##// END OF EJS Templates
branching: merge stable into default
Raphaël Gomès -
r48007:bcafcd77 merge default
parent child Browse files
Show More
@@ -1,127 +1,141 b''
1 stages:
1 stages:
2 - tests
2 - tests
3 - phabricator
3 - phabricator
4
4
5 image: registry.heptapod.net/mercurial/ci-images/mercurial-core:$HG_CI_IMAGE_TAG
5 image: registry.heptapod.net/mercurial/ci-images/mercurial-core:$HG_CI_IMAGE_TAG
6
6
7 variables:
7 variables:
8 PYTHON: python
8 PYTHON: python
9 TEST_HGMODULEPOLICY: "allow"
9 TEST_HGMODULEPOLICY: "allow"
10 HG_CI_IMAGE_TAG: "latest"
10 HG_CI_IMAGE_TAG: "latest"
11 TEST_HGTESTS_ALLOW_NETIO: "0"
11 TEST_HGTESTS_ALLOW_NETIO: "0"
12
12
13 .runtests_template: &runtests
13 .runtests_template: &runtests
14 stage: tests
14 stage: tests
15 # The runner made a clone as root.
15 # The runner made a clone as root.
16 # We make a new clone owned by user used to run the step.
16 # We make a new clone owned by user used to run the step.
17 before_script:
17 before_script:
18 - hg clone . /tmp/mercurial-ci/ --noupdate --config phases.publish=no
18 - hg clone . /tmp/mercurial-ci/ --noupdate --config phases.publish=no
19 - hg -R /tmp/mercurial-ci/ update `hg log --rev '.' --template '{node}'`
19 - hg -R /tmp/mercurial-ci/ update `hg log --rev '.' --template '{node}'`
20 - cd /tmp/mercurial-ci/
20 - cd /tmp/mercurial-ci/
21 - ls -1 tests/test-check-*.* > /tmp/check-tests.txt
21 - ls -1 tests/test-check-*.* > /tmp/check-tests.txt
22 - black --version
22 - black --version
23 - clang-format --version
23 - clang-format --version
24 script:
24 script:
25 - echo "python used, $PYTHON"
25 - echo "python used, $PYTHON"
26 - echo "$RUNTEST_ARGS"
26 - echo "$RUNTEST_ARGS"
27 - HGTESTS_ALLOW_NETIO="$TEST_HGTESTS_ALLOW_NETIO" HGMODULEPOLICY="$TEST_HGMODULEPOLICY" "$PYTHON" tests/run-tests.py --color=always $RUNTEST_ARGS
27 - HGTESTS_ALLOW_NETIO="$TEST_HGTESTS_ALLOW_NETIO" HGMODULEPOLICY="$TEST_HGMODULEPOLICY" "$PYTHON" tests/run-tests.py --color=always $RUNTEST_ARGS
28
28
29 checks-py2:
29 checks-py2:
30 <<: *runtests
30 <<: *runtests
31 variables:
31 variables:
32 RUNTEST_ARGS: "--time --test-list /tmp/check-tests.txt"
32 RUNTEST_ARGS: "--time --test-list /tmp/check-tests.txt"
33
33
34 checks-py3:
34 checks-py3:
35 <<: *runtests
35 <<: *runtests
36 variables:
36 variables:
37 RUNTEST_ARGS: "--time --test-list /tmp/check-tests.txt"
37 RUNTEST_ARGS: "--time --test-list /tmp/check-tests.txt"
38 PYTHON: python3
38 PYTHON: python3
39
39
40 rust-cargo-test-py2: &rust_cargo_test
40 rust-cargo-test-py2: &rust_cargo_test
41 stage: tests
41 stage: tests
42 script:
42 script:
43 - echo "python used, $PYTHON"
43 - echo "python used, $PYTHON"
44 - make rust-tests
44 - make rust-tests
45
45
46 rust-cargo-test-py3:
46 rust-cargo-test-py3:
47 stage: tests
47 stage: tests
48 <<: *rust_cargo_test
48 <<: *rust_cargo_test
49 variables:
49 variables:
50 PYTHON: python3
50 PYTHON: python3
51
51
52 phabricator-refresh:
52 phabricator-refresh:
53 stage: phabricator
53 stage: phabricator
54 variables:
54 variables:
55 DEFAULT_COMMENT: ":white_check_mark: refresh by Heptapod after a successful CI run (:octopus: :green_heart:)"
55 DEFAULT_COMMENT: ":white_check_mark: refresh by Heptapod after a successful CI run (:octopus: :green_heart:)"
56 STABLE_COMMENT: ":white_check_mark: refresh by Heptapod after a successful CI run (:octopus: :green_heart:)\n⚠ This patch is intended for stable ⚠\n{image https://media.giphy.com/media/nYI8SmmChYXK0/source.gif}"
56 STABLE_COMMENT: ":white_check_mark: refresh by Heptapod after a successful CI run (:octopus: :green_heart:)\n⚠ This patch is intended for stable ⚠\n{image https://media.giphy.com/media/nYI8SmmChYXK0/source.gif}"
57 script:
57 script:
58 - |
58 - |
59 if [ `hg branch` == "stable" ]; then
59 if [ `hg branch` == "stable" ]; then
60 ./contrib/phab-refresh-stack.sh --comment "$STABLE_COMMENT";
60 ./contrib/phab-refresh-stack.sh --comment "$STABLE_COMMENT";
61 else
61 else
62 ./contrib/phab-refresh-stack.sh --comment "$DEFAULT_COMMENT";
62 ./contrib/phab-refresh-stack.sh --comment "$DEFAULT_COMMENT";
63 fi
63 fi
64
64
65 test-py2:
65 test-py2:
66 <<: *runtests
66 <<: *runtests
67 variables:
67 variables:
68 RUNTEST_ARGS: " --no-rust --blacklist /tmp/check-tests.txt"
68 RUNTEST_ARGS: " --no-rust --blacklist /tmp/check-tests.txt"
69 TEST_HGMODULEPOLICY: "c"
69 TEST_HGMODULEPOLICY: "c"
70 TEST_HGTESTS_ALLOW_NETIO: "1"
70 TEST_HGTESTS_ALLOW_NETIO: "1"
71
71
72 test-py3:
72 test-py3:
73 <<: *runtests
73 <<: *runtests
74 variables:
74 variables:
75 RUNTEST_ARGS: " --no-rust --blacklist /tmp/check-tests.txt"
75 RUNTEST_ARGS: " --no-rust --blacklist /tmp/check-tests.txt"
76 PYTHON: python3
76 PYTHON: python3
77 TEST_HGMODULEPOLICY: "c"
77 TEST_HGMODULEPOLICY: "c"
78 TEST_HGTESTS_ALLOW_NETIO: "1"
78 TEST_HGTESTS_ALLOW_NETIO: "1"
79
79
80 test-py2-pure:
80 test-py2-pure:
81 <<: *runtests
81 <<: *runtests
82 variables:
82 variables:
83 RUNTEST_ARGS: "--pure --blacklist /tmp/check-tests.txt"
83 RUNTEST_ARGS: "--pure --blacklist /tmp/check-tests.txt"
84 TEST_HGMODULEPOLICY: "py"
84 TEST_HGMODULEPOLICY: "py"
85
85
86 test-py3-pure:
86 test-py3-pure:
87 <<: *runtests
87 <<: *runtests
88 variables:
88 variables:
89 RUNTEST_ARGS: "--pure --blacklist /tmp/check-tests.txt"
89 RUNTEST_ARGS: "--pure --blacklist /tmp/check-tests.txt"
90 PYTHON: python3
90 PYTHON: python3
91 TEST_HGMODULEPOLICY: "py"
91 TEST_HGMODULEPOLICY: "py"
92
92
93 test-py2-rust:
93 test-py2-rust:
94 <<: *runtests
94 <<: *runtests
95 variables:
95 variables:
96 HGWITHRUSTEXT: cpython
96 HGWITHRUSTEXT: cpython
97 RUNTEST_ARGS: "--rust --blacklist /tmp/check-tests.txt"
97 RUNTEST_ARGS: "--rust --blacklist /tmp/check-tests.txt"
98 TEST_HGMODULEPOLICY: "rust+c"
98 TEST_HGMODULEPOLICY: "rust+c"
99
99
100 test-py3-rust:
100 test-py3-rust:
101 <<: *runtests
101 <<: *runtests
102 variables:
102 variables:
103 HGWITHRUSTEXT: cpython
103 HGWITHRUSTEXT: cpython
104 RUNTEST_ARGS: "--rust --blacklist /tmp/check-tests.txt"
104 RUNTEST_ARGS: "--rust --blacklist /tmp/check-tests.txt"
105 PYTHON: python3
105 PYTHON: python3
106 TEST_HGMODULEPOLICY: "rust+c"
106 TEST_HGMODULEPOLICY: "rust+c"
107
107
108 test-py3-rhg:
108 test-py3-rhg:
109 <<: *runtests
109 <<: *runtests
110 variables:
110 variables:
111 HGWITHRUSTEXT: cpython
111 HGWITHRUSTEXT: cpython
112 RUNTEST_ARGS: "--rust --rhg --blacklist /tmp/check-tests.txt"
112 RUNTEST_ARGS: "--rust --rhg --blacklist /tmp/check-tests.txt"
113 PYTHON: python3
113 PYTHON: python3
114 TEST_HGMODULEPOLICY: "rust+c"
114 TEST_HGMODULEPOLICY: "rust+c"
115
115
116 test-py2-chg:
116 test-py2-chg:
117 <<: *runtests
117 <<: *runtests
118 variables:
118 variables:
119 RUNTEST_ARGS: "--blacklist /tmp/check-tests.txt --chg"
119 RUNTEST_ARGS: "--blacklist /tmp/check-tests.txt --chg"
120 TEST_HGMODULEPOLICY: "c"
120 TEST_HGMODULEPOLICY: "c"
121
121
122 test-py3-chg:
122 test-py3-chg:
123 <<: *runtests
123 <<: *runtests
124 variables:
124 variables:
125 PYTHON: python3
125 PYTHON: python3
126 RUNTEST_ARGS: "--blacklist /tmp/check-tests.txt --chg"
126 RUNTEST_ARGS: "--blacklist /tmp/check-tests.txt --chg"
127 TEST_HGMODULEPOLICY: "c"
127 TEST_HGMODULEPOLICY: "c"
128
129 check-pytype-py3:
130 extends: .runtests_template
131 when: manual
132 before_script:
133 - hg clone . /tmp/mercurial-ci/ --noupdate --config phases.publish=no
134 - hg -R /tmp/mercurial-ci/ update `hg log --rev '.' --template '{node}'`
135 - cd /tmp/mercurial-ci/
136 - make local PYTHON=$PYTHON
137 - $PYTHON -m pip install --user -U pytype==2021.04.15
138 variables:
139 RUNTEST_ARGS: " --allow-slow-tests tests/test-check-pytype.t"
140 PYTHON: python3
141 TEST_HGMODULEPOLICY: "c"
@@ -1,405 +1,403 b''
1 # Perforce source for convert extension.
1 # Perforce source for convert extension.
2 #
2 #
3 # Copyright 2009, Frank Kingswood <frank@kingswood-consulting.co.uk>
3 # Copyright 2009, Frank Kingswood <frank@kingswood-consulting.co.uk>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from __future__ import absolute_import
7 from __future__ import absolute_import
8
8
9 import marshal
9 import marshal
10 import re
10 import re
11
11
12 from mercurial.i18n import _
12 from mercurial.i18n import _
13 from mercurial import (
13 from mercurial import (
14 error,
14 error,
15 util,
15 util,
16 )
16 )
17 from mercurial.utils import (
17 from mercurial.utils import (
18 dateutil,
18 dateutil,
19 procutil,
19 procutil,
20 stringutil,
20 stringutil,
21 )
21 )
22
22
23 from . import common
23 from . import common
24
24
25
25
26 def loaditer(f):
26 def loaditer(f):
27 """Yield the dictionary objects generated by p4"""
27 """Yield the dictionary objects generated by p4"""
28 try:
28 try:
29 while True:
29 while True:
30 d = marshal.load(f)
30 d = marshal.load(f)
31 if not d:
31 if not d:
32 break
32 break
33 yield d
33 yield d
34 except EOFError:
34 except EOFError:
35 pass
35 pass
36
36
37
37
38 def decodefilename(filename):
38 def decodefilename(filename):
39 """Perforce escapes special characters @, #, *, or %
39 """Perforce escapes special characters @, #, *, or %
40 with %40, %23, %2A, or %25 respectively
40 with %40, %23, %2A, or %25 respectively
41
41
42 >>> decodefilename(b'portable-net45%252Bnetcore45%252Bwp8%252BMonoAndroid')
42 >>> decodefilename(b'portable-net45%252Bnetcore45%252Bwp8%252BMonoAndroid')
43 'portable-net45%2Bnetcore45%2Bwp8%2BMonoAndroid'
43 'portable-net45%2Bnetcore45%2Bwp8%2BMonoAndroid'
44 >>> decodefilename(b'//Depot/Directory/%2525/%2523/%23%40.%2A')
44 >>> decodefilename(b'//Depot/Directory/%2525/%2523/%23%40.%2A')
45 '//Depot/Directory/%25/%23/#@.*'
45 '//Depot/Directory/%25/%23/#@.*'
46 """
46 """
47 replacements = [
47 replacements = [
48 (b'%2A', b'*'),
48 (b'%2A', b'*'),
49 (b'%23', b'#'),
49 (b'%23', b'#'),
50 (b'%40', b'@'),
50 (b'%40', b'@'),
51 (b'%25', b'%'),
51 (b'%25', b'%'),
52 ]
52 ]
53 for k, v in replacements:
53 for k, v in replacements:
54 filename = filename.replace(k, v)
54 filename = filename.replace(k, v)
55 return filename
55 return filename
56
56
57
57
58 class p4_source(common.converter_source):
58 class p4_source(common.converter_source):
59 def __init__(self, ui, repotype, path, revs=None):
59 def __init__(self, ui, repotype, path, revs=None):
60 # avoid import cycle
60 # avoid import cycle
61 from . import convcmd
61 from . import convcmd
62
62
63 super(p4_source, self).__init__(ui, repotype, path, revs=revs)
63 super(p4_source, self).__init__(ui, repotype, path, revs=revs)
64
64
65 if b"/" in path and not path.startswith(b'//'):
65 if b"/" in path and not path.startswith(b'//'):
66 raise common.NoRepo(
66 raise common.NoRepo(
67 _(b'%s does not look like a P4 repository') % path
67 _(b'%s does not look like a P4 repository') % path
68 )
68 )
69
69
70 common.checktool(b'p4', abort=False)
70 common.checktool(b'p4', abort=False)
71
71
72 self.revmap = {}
72 self.revmap = {}
73 self.encoding = self.ui.config(
73 self.encoding = self.ui.config(
74 b'convert', b'p4.encoding', convcmd.orig_encoding
74 b'convert', b'p4.encoding', convcmd.orig_encoding
75 )
75 )
76 self.re_type = re.compile(
76 self.re_type = re.compile(
77 br"([a-z]+)?(text|binary|symlink|apple|resource|unicode|utf\d+)"
77 br"([a-z]+)?(text|binary|symlink|apple|resource|unicode|utf\d+)"
78 br"(\+\w+)?$"
78 br"(\+\w+)?$"
79 )
79 )
80 self.re_keywords = re.compile(
80 self.re_keywords = re.compile(
81 br"\$(Id|Header|Date|DateTime|Change|File|Revision|Author)"
81 br"\$(Id|Header|Date|DateTime|Change|File|Revision|Author)"
82 br":[^$\n]*\$"
82 br":[^$\n]*\$"
83 )
83 )
84 self.re_keywords_old = re.compile(br"\$(Id|Header):[^$\n]*\$")
84 self.re_keywords_old = re.compile(br"\$(Id|Header):[^$\n]*\$")
85
85
86 if revs and len(revs) > 1:
86 if revs and len(revs) > 1:
87 raise error.Abort(
87 raise error.Abort(
88 _(
88 _(
89 b"p4 source does not support specifying "
89 b"p4 source does not support specifying "
90 b"multiple revisions"
90 b"multiple revisions"
91 )
91 )
92 )
92 )
93
93
94 def setrevmap(self, revmap):
94 def setrevmap(self, revmap):
95 """Sets the parsed revmap dictionary.
95 """Sets the parsed revmap dictionary.
96
96
97 Revmap stores mappings from a source revision to a target revision.
97 Revmap stores mappings from a source revision to a target revision.
98 It is set in convertcmd.convert and provided by the user as a file
98 It is set in convertcmd.convert and provided by the user as a file
99 on the commandline.
99 on the commandline.
100
100
101 Revisions in the map are considered beeing present in the
101 Revisions in the map are considered beeing present in the
102 repository and ignored during _parse(). This allows for incremental
102 repository and ignored during _parse(). This allows for incremental
103 imports if a revmap is provided.
103 imports if a revmap is provided.
104 """
104 """
105 self.revmap = revmap
105 self.revmap = revmap
106
106
107 def _parse_view(self, path):
107 def _parse_view(self, path):
108 """Read changes affecting the path"""
108 """Read changes affecting the path"""
109 cmd = b'p4 -G changes -s submitted %s' % procutil.shellquote(path)
109 cmd = b'p4 -G changes -s submitted %s' % procutil.shellquote(path)
110 stdout = procutil.popen(cmd, mode=b'rb')
110 stdout = procutil.popen(cmd, mode=b'rb')
111 p4changes = {}
111 p4changes = {}
112 for d in loaditer(stdout):
112 for d in loaditer(stdout):
113 c = d.get(b"change", None)
113 c = d.get(b"change", None)
114 if c:
114 if c:
115 p4changes[c] = True
115 p4changes[c] = True
116 return p4changes
116 return p4changes
117
117
118 def _parse(self, ui, path):
118 def _parse(self, ui, path):
119 """Prepare list of P4 filenames and revisions to import"""
119 """Prepare list of P4 filenames and revisions to import"""
120 p4changes = {}
120 p4changes = {}
121 changeset = {}
121 changeset = {}
122 files_map = {}
122 files_map = {}
123 copies_map = {}
123 copies_map = {}
124 localname = {}
124 localname = {}
125 depotname = {}
125 depotname = {}
126 heads = []
126 heads = []
127
127
128 ui.status(_(b'reading p4 views\n'))
128 ui.status(_(b'reading p4 views\n'))
129
129
130 # read client spec or view
130 # read client spec or view
131 if b"/" in path:
131 if b"/" in path:
132 p4changes.update(self._parse_view(path))
132 p4changes.update(self._parse_view(path))
133 if path.startswith(b"//") and path.endswith(b"/..."):
133 if path.startswith(b"//") and path.endswith(b"/..."):
134 views = {path[:-3]: b""}
134 views = {path[:-3]: b""}
135 else:
135 else:
136 views = {b"//": b""}
136 views = {b"//": b""}
137 else:
137 else:
138 cmd = b'p4 -G client -o %s' % procutil.shellquote(path)
138 cmd = b'p4 -G client -o %s' % procutil.shellquote(path)
139 clientspec = marshal.load(procutil.popen(cmd, mode=b'rb'))
139 clientspec = marshal.load(procutil.popen(cmd, mode=b'rb'))
140
140
141 views = {}
141 views = {}
142 for client in clientspec:
142 for client in clientspec:
143 if client.startswith(b"View"):
143 if client.startswith(b"View"):
144 sview, cview = clientspec[client].split()
144 sview, cview = clientspec[client].split()
145 p4changes.update(self._parse_view(sview))
145 p4changes.update(self._parse_view(sview))
146 if sview.endswith(b"...") and cview.endswith(b"..."):
146 if sview.endswith(b"...") and cview.endswith(b"..."):
147 sview = sview[:-3]
147 sview = sview[:-3]
148 cview = cview[:-3]
148 cview = cview[:-3]
149 cview = cview[2:]
149 cview = cview[2:]
150 cview = cview[cview.find(b"/") + 1 :]
150 cview = cview[cview.find(b"/") + 1 :]
151 views[sview] = cview
151 views[sview] = cview
152
152
153 # list of changes that affect our source files
153 # list of changes that affect our source files
154 p4changes = p4changes.keys()
154 p4changes = sorted(p4changes.keys(), key=int)
155 p4changes.sort(key=int)
156
155
157 # list with depot pathnames, longest first
156 # list with depot pathnames, longest first
158 vieworder = views.keys()
157 vieworder = sorted(views.keys(), key=len, reverse=True)
159 vieworder.sort(key=len, reverse=True)
160
158
161 # handle revision limiting
159 # handle revision limiting
162 startrev = self.ui.config(b'convert', b'p4.startrev')
160 startrev = self.ui.config(b'convert', b'p4.startrev')
163
161
164 # now read the full changelists to get the list of file revisions
162 # now read the full changelists to get the list of file revisions
165 ui.status(_(b'collecting p4 changelists\n'))
163 ui.status(_(b'collecting p4 changelists\n'))
166 lastid = None
164 lastid = None
167 for change in p4changes:
165 for change in p4changes:
168 if startrev and int(change) < int(startrev):
166 if startrev and int(change) < int(startrev):
169 continue
167 continue
170 if self.revs and int(change) > int(self.revs[0]):
168 if self.revs and int(change) > int(self.revs[0]):
171 continue
169 continue
172 if change in self.revmap:
170 if change in self.revmap:
173 # Ignore already present revisions, but set the parent pointer.
171 # Ignore already present revisions, but set the parent pointer.
174 lastid = change
172 lastid = change
175 continue
173 continue
176
174
177 if lastid:
175 if lastid:
178 parents = [lastid]
176 parents = [lastid]
179 else:
177 else:
180 parents = []
178 parents = []
181
179
182 d = self._fetch_revision(change)
180 d = self._fetch_revision(change)
183 c = self._construct_commit(d, parents)
181 c = self._construct_commit(d, parents)
184
182
185 descarr = c.desc.splitlines(True)
183 descarr = c.desc.splitlines(True)
186 if len(descarr) > 0:
184 if len(descarr) > 0:
187 shortdesc = descarr[0].rstrip(b'\r\n')
185 shortdesc = descarr[0].rstrip(b'\r\n')
188 else:
186 else:
189 shortdesc = b'**empty changelist description**'
187 shortdesc = b'**empty changelist description**'
190
188
191 t = b'%s %s' % (c.rev, repr(shortdesc)[1:-1])
189 t = b'%s %s' % (c.rev, shortdesc)
192 ui.status(stringutil.ellipsis(t, 80) + b'\n')
190 ui.status(stringutil.ellipsis(t, 80) + b'\n')
193
191
194 files = []
192 files = []
195 copies = {}
193 copies = {}
196 copiedfiles = []
194 copiedfiles = []
197 i = 0
195 i = 0
198 while (b"depotFile%d" % i) in d and (b"rev%d" % i) in d:
196 while (b"depotFile%d" % i) in d and (b"rev%d" % i) in d:
199 oldname = d[b"depotFile%d" % i]
197 oldname = d[b"depotFile%d" % i]
200 filename = None
198 filename = None
201 for v in vieworder:
199 for v in vieworder:
202 if oldname.lower().startswith(v.lower()):
200 if oldname.lower().startswith(v.lower()):
203 filename = decodefilename(views[v] + oldname[len(v) :])
201 filename = decodefilename(views[v] + oldname[len(v) :])
204 break
202 break
205 if filename:
203 if filename:
206 files.append((filename, d[b"rev%d" % i]))
204 files.append((filename, d[b"rev%d" % i]))
207 depotname[filename] = oldname
205 depotname[filename] = oldname
208 if d.get(b"action%d" % i) == b"move/add":
206 if d.get(b"action%d" % i) == b"move/add":
209 copiedfiles.append(filename)
207 copiedfiles.append(filename)
210 localname[oldname] = filename
208 localname[oldname] = filename
211 i += 1
209 i += 1
212
210
213 # Collect information about copied files
211 # Collect information about copied files
214 for filename in copiedfiles:
212 for filename in copiedfiles:
215 oldname = depotname[filename]
213 oldname = depotname[filename]
216
214
217 flcmd = b'p4 -G filelog %s' % procutil.shellquote(oldname)
215 flcmd = b'p4 -G filelog %s' % procutil.shellquote(oldname)
218 flstdout = procutil.popen(flcmd, mode=b'rb')
216 flstdout = procutil.popen(flcmd, mode=b'rb')
219
217
220 copiedfilename = None
218 copiedfilename = None
221 for d in loaditer(flstdout):
219 for d in loaditer(flstdout):
222 copiedoldname = None
220 copiedoldname = None
223
221
224 i = 0
222 i = 0
225 while (b"change%d" % i) in d:
223 while (b"change%d" % i) in d:
226 if (
224 if (
227 d[b"change%d" % i] == change
225 d[b"change%d" % i] == change
228 and d[b"action%d" % i] == b"move/add"
226 and d[b"action%d" % i] == b"move/add"
229 ):
227 ):
230 j = 0
228 j = 0
231 while (b"file%d,%d" % (i, j)) in d:
229 while (b"file%d,%d" % (i, j)) in d:
232 if d[b"how%d,%d" % (i, j)] == b"moved from":
230 if d[b"how%d,%d" % (i, j)] == b"moved from":
233 copiedoldname = d[b"file%d,%d" % (i, j)]
231 copiedoldname = d[b"file%d,%d" % (i, j)]
234 break
232 break
235 j += 1
233 j += 1
236 i += 1
234 i += 1
237
235
238 if copiedoldname and copiedoldname in localname:
236 if copiedoldname and copiedoldname in localname:
239 copiedfilename = localname[copiedoldname]
237 copiedfilename = localname[copiedoldname]
240 break
238 break
241
239
242 if copiedfilename:
240 if copiedfilename:
243 copies[filename] = copiedfilename
241 copies[filename] = copiedfilename
244 else:
242 else:
245 ui.warn(
243 ui.warn(
246 _(b"cannot find source for copied file: %s@%s\n")
244 _(b"cannot find source for copied file: %s@%s\n")
247 % (filename, change)
245 % (filename, change)
248 )
246 )
249
247
250 changeset[change] = c
248 changeset[change] = c
251 files_map[change] = files
249 files_map[change] = files
252 copies_map[change] = copies
250 copies_map[change] = copies
253 lastid = change
251 lastid = change
254
252
255 if lastid and len(changeset) > 0:
253 if lastid and len(changeset) > 0:
256 heads = [lastid]
254 heads = [lastid]
257
255
258 return {
256 return {
259 b'changeset': changeset,
257 b'changeset': changeset,
260 b'files': files_map,
258 b'files': files_map,
261 b'copies': copies_map,
259 b'copies': copies_map,
262 b'heads': heads,
260 b'heads': heads,
263 b'depotname': depotname,
261 b'depotname': depotname,
264 }
262 }
265
263
266 @util.propertycache
264 @util.propertycache
267 def _parse_once(self):
265 def _parse_once(self):
268 return self._parse(self.ui, self.path)
266 return self._parse(self.ui, self.path)
269
267
270 @util.propertycache
268 @util.propertycache
271 def copies(self):
269 def copies(self):
272 return self._parse_once[b'copies']
270 return self._parse_once[b'copies']
273
271
274 @util.propertycache
272 @util.propertycache
275 def files(self):
273 def files(self):
276 return self._parse_once[b'files']
274 return self._parse_once[b'files']
277
275
278 @util.propertycache
276 @util.propertycache
279 def changeset(self):
277 def changeset(self):
280 return self._parse_once[b'changeset']
278 return self._parse_once[b'changeset']
281
279
282 @util.propertycache
280 @util.propertycache
283 def heads(self):
281 def heads(self):
284 return self._parse_once[b'heads']
282 return self._parse_once[b'heads']
285
283
286 @util.propertycache
284 @util.propertycache
287 def depotname(self):
285 def depotname(self):
288 return self._parse_once[b'depotname']
286 return self._parse_once[b'depotname']
289
287
290 def getheads(self):
288 def getheads(self):
291 return self.heads
289 return self.heads
292
290
293 def getfile(self, name, rev):
291 def getfile(self, name, rev):
294 cmd = b'p4 -G print %s' % procutil.shellquote(
292 cmd = b'p4 -G print %s' % procutil.shellquote(
295 b"%s#%s" % (self.depotname[name], rev)
293 b"%s#%s" % (self.depotname[name], rev)
296 )
294 )
297
295
298 lasterror = None
296 lasterror = None
299 while True:
297 while True:
300 stdout = procutil.popen(cmd, mode=b'rb')
298 stdout = procutil.popen(cmd, mode=b'rb')
301
299
302 mode = None
300 mode = None
303 contents = []
301 contents = []
304 keywords = None
302 keywords = None
305
303
306 for d in loaditer(stdout):
304 for d in loaditer(stdout):
307 code = d[b"code"]
305 code = d[b"code"]
308 data = d.get(b"data")
306 data = d.get(b"data")
309
307
310 if code == b"error":
308 if code == b"error":
311 # if this is the first time error happened
309 # if this is the first time error happened
312 # re-attempt getting the file
310 # re-attempt getting the file
313 if not lasterror:
311 if not lasterror:
314 lasterror = IOError(d[b"generic"], data)
312 lasterror = IOError(d[b"generic"], data)
315 # this will exit inner-most for-loop
313 # this will exit inner-most for-loop
316 break
314 break
317 else:
315 else:
318 raise lasterror
316 raise lasterror
319
317
320 elif code == b"stat":
318 elif code == b"stat":
321 action = d.get(b"action")
319 action = d.get(b"action")
322 if action in [b"purge", b"delete", b"move/delete"]:
320 if action in [b"purge", b"delete", b"move/delete"]:
323 return None, None
321 return None, None
324 p4type = self.re_type.match(d[b"type"])
322 p4type = self.re_type.match(d[b"type"])
325 if p4type:
323 if p4type:
326 mode = b""
324 mode = b""
327 flags = (p4type.group(1) or b"") + (
325 flags = (p4type.group(1) or b"") + (
328 p4type.group(3) or b""
326 p4type.group(3) or b""
329 )
327 )
330 if b"x" in flags:
328 if b"x" in flags:
331 mode = b"x"
329 mode = b"x"
332 if p4type.group(2) == b"symlink":
330 if p4type.group(2) == b"symlink":
333 mode = b"l"
331 mode = b"l"
334 if b"ko" in flags:
332 if b"ko" in flags:
335 keywords = self.re_keywords_old
333 keywords = self.re_keywords_old
336 elif b"k" in flags:
334 elif b"k" in flags:
337 keywords = self.re_keywords
335 keywords = self.re_keywords
338
336
339 elif code == b"text" or code == b"binary":
337 elif code == b"text" or code == b"binary":
340 contents.append(data)
338 contents.append(data)
341
339
342 lasterror = None
340 lasterror = None
343
341
344 if not lasterror:
342 if not lasterror:
345 break
343 break
346
344
347 if mode is None:
345 if mode is None:
348 return None, None
346 return None, None
349
347
350 contents = b''.join(contents)
348 contents = b''.join(contents)
351
349
352 if keywords:
350 if keywords:
353 contents = keywords.sub(b"$\\1$", contents)
351 contents = keywords.sub(b"$\\1$", contents)
354 if mode == b"l" and contents.endswith(b"\n"):
352 if mode == b"l" and contents.endswith(b"\n"):
355 contents = contents[:-1]
353 contents = contents[:-1]
356
354
357 return contents, mode
355 return contents, mode
358
356
359 def getchanges(self, rev, full):
357 def getchanges(self, rev, full):
360 if full:
358 if full:
361 raise error.Abort(_(b"convert from p4 does not support --full"))
359 raise error.Abort(_(b"convert from p4 does not support --full"))
362 return self.files[rev], self.copies[rev], set()
360 return self.files[rev], self.copies[rev], set()
363
361
364 def _construct_commit(self, obj, parents=None):
362 def _construct_commit(self, obj, parents=None):
365 """
363 """
366 Constructs a common.commit object from an unmarshalled
364 Constructs a common.commit object from an unmarshalled
367 `p4 describe` output
365 `p4 describe` output
368 """
366 """
369 desc = self.recode(obj.get(b"desc", b""))
367 desc = self.recode(obj.get(b"desc", b""))
370 date = (int(obj[b"time"]), 0) # timezone not set
368 date = (int(obj[b"time"]), 0) # timezone not set
371 if parents is None:
369 if parents is None:
372 parents = []
370 parents = []
373
371
374 return common.commit(
372 return common.commit(
375 author=self.recode(obj[b"user"]),
373 author=self.recode(obj[b"user"]),
376 date=dateutil.datestr(date, b'%Y-%m-%d %H:%M:%S %1%2'),
374 date=dateutil.datestr(date, b'%Y-%m-%d %H:%M:%S %1%2'),
377 parents=parents,
375 parents=parents,
378 desc=desc,
376 desc=desc,
379 branch=None,
377 branch=None,
380 rev=obj[b'change'],
378 rev=obj[b'change'],
381 extra={b"p4": obj[b'change'], b"convert_revision": obj[b'change']},
379 extra={b"p4": obj[b'change'], b"convert_revision": obj[b'change']},
382 )
380 )
383
381
384 def _fetch_revision(self, rev):
382 def _fetch_revision(self, rev):
385 """Return an output of `p4 describe` including author, commit date as
383 """Return an output of `p4 describe` including author, commit date as
386 a dictionary."""
384 a dictionary."""
387 cmd = b"p4 -G describe -s %s" % rev
385 cmd = b"p4 -G describe -s %s" % rev
388 stdout = procutil.popen(cmd, mode=b'rb')
386 stdout = procutil.popen(cmd, mode=b'rb')
389 return marshal.load(stdout)
387 return marshal.load(stdout)
390
388
391 def getcommit(self, rev):
389 def getcommit(self, rev):
392 if rev in self.changeset:
390 if rev in self.changeset:
393 return self.changeset[rev]
391 return self.changeset[rev]
394 elif rev in self.revmap:
392 elif rev in self.revmap:
395 d = self._fetch_revision(rev)
393 d = self._fetch_revision(rev)
396 return self._construct_commit(d, parents=None)
394 return self._construct_commit(d, parents=None)
397 raise error.Abort(
395 raise error.Abort(
398 _(b"cannot find %s in the revmap or parsed changesets") % rev
396 _(b"cannot find %s in the revmap or parsed changesets") % rev
399 )
397 )
400
398
401 def gettags(self):
399 def gettags(self):
402 return {}
400 return {}
403
401
404 def getchangedfiles(self, rev, i):
402 def getchangedfiles(self, rev, i):
405 return sorted([x[0] for x in self.files[rev]])
403 return sorted([x[0] for x in self.files[rev]])
@@ -1,1599 +1,1599 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import os
12 import os
13 import shutil
13 import shutil
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 hex,
18 hex,
19 sha1nodeconstants,
19 sha1nodeconstants,
20 short,
20 short,
21 )
21 )
22 from .pycompat import getattr
22 from .pycompat import getattr
23
23
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 bundlerepo,
26 bundlerepo,
27 cacheutil,
27 cacheutil,
28 cmdutil,
28 cmdutil,
29 destutil,
29 destutil,
30 discovery,
30 discovery,
31 error,
31 error,
32 exchange,
32 exchange,
33 extensions,
33 extensions,
34 graphmod,
34 graphmod,
35 httppeer,
35 httppeer,
36 localrepo,
36 localrepo,
37 lock,
37 lock,
38 logcmdutil,
38 logcmdutil,
39 logexchange,
39 logexchange,
40 merge as mergemod,
40 merge as mergemod,
41 mergestate as mergestatemod,
41 mergestate as mergestatemod,
42 narrowspec,
42 narrowspec,
43 phases,
43 phases,
44 requirements,
44 requirements,
45 scmutil,
45 scmutil,
46 sshpeer,
46 sshpeer,
47 statichttprepo,
47 statichttprepo,
48 ui as uimod,
48 ui as uimod,
49 unionrepo,
49 unionrepo,
50 url,
50 url,
51 util,
51 util,
52 verify as verifymod,
52 verify as verifymod,
53 vfs as vfsmod,
53 vfs as vfsmod,
54 )
54 )
55 from .utils import (
55 from .utils import (
56 hashutil,
56 hashutil,
57 stringutil,
57 stringutil,
58 urlutil,
58 urlutil,
59 )
59 )
60
60
61
61
62 release = lock.release
62 release = lock.release
63
63
64 # shared features
64 # shared features
65 sharedbookmarks = b'bookmarks'
65 sharedbookmarks = b'bookmarks'
66
66
67
67
68 def _local(path):
68 def _local(path):
69 path = util.expandpath(urlutil.urllocalpath(path))
69 path = util.expandpath(urlutil.urllocalpath(path))
70
70
71 try:
71 try:
72 # we use os.stat() directly here instead of os.path.isfile()
72 # we use os.stat() directly here instead of os.path.isfile()
73 # because the latter started returning `False` on invalid path
73 # because the latter started returning `False` on invalid path
74 # exceptions starting in 3.8 and we care about handling
74 # exceptions starting in 3.8 and we care about handling
75 # invalid paths specially here.
75 # invalid paths specially here.
76 st = os.stat(path)
76 st = os.stat(path)
77 isfile = stat.S_ISREG(st.st_mode)
77 isfile = stat.S_ISREG(st.st_mode)
78 # Python 2 raises TypeError, Python 3 ValueError.
78 # Python 2 raises TypeError, Python 3 ValueError.
79 except (TypeError, ValueError) as e:
79 except (TypeError, ValueError) as e:
80 raise error.Abort(
80 raise error.Abort(
81 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
81 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
82 )
82 )
83 except OSError:
83 except OSError:
84 isfile = False
84 isfile = False
85
85
86 return isfile and bundlerepo or localrepo
86 return isfile and bundlerepo or localrepo
87
87
88
88
89 def addbranchrevs(lrepo, other, branches, revs):
89 def addbranchrevs(lrepo, other, branches, revs):
90 peer = other.peer() # a courtesy to callers using a localrepo for other
90 peer = other.peer() # a courtesy to callers using a localrepo for other
91 hashbranch, branches = branches
91 hashbranch, branches = branches
92 if not hashbranch and not branches:
92 if not hashbranch and not branches:
93 x = revs or None
93 x = revs or None
94 if revs:
94 if revs:
95 y = revs[0]
95 y = revs[0]
96 else:
96 else:
97 y = None
97 y = None
98 return x, y
98 return x, y
99 if revs:
99 if revs:
100 revs = list(revs)
100 revs = list(revs)
101 else:
101 else:
102 revs = []
102 revs = []
103
103
104 if not peer.capable(b'branchmap'):
104 if not peer.capable(b'branchmap'):
105 if branches:
105 if branches:
106 raise error.Abort(_(b"remote branch lookup not supported"))
106 raise error.Abort(_(b"remote branch lookup not supported"))
107 revs.append(hashbranch)
107 revs.append(hashbranch)
108 return revs, revs[0]
108 return revs, revs[0]
109
109
110 with peer.commandexecutor() as e:
110 with peer.commandexecutor() as e:
111 branchmap = e.callcommand(b'branchmap', {}).result()
111 branchmap = e.callcommand(b'branchmap', {}).result()
112
112
113 def primary(branch):
113 def primary(branch):
114 if branch == b'.':
114 if branch == b'.':
115 if not lrepo:
115 if not lrepo:
116 raise error.Abort(_(b"dirstate branch not accessible"))
116 raise error.Abort(_(b"dirstate branch not accessible"))
117 branch = lrepo.dirstate.branch()
117 branch = lrepo.dirstate.branch()
118 if branch in branchmap:
118 if branch in branchmap:
119 revs.extend(hex(r) for r in reversed(branchmap[branch]))
119 revs.extend(hex(r) for r in reversed(branchmap[branch]))
120 return True
120 return True
121 else:
121 else:
122 return False
122 return False
123
123
124 for branch in branches:
124 for branch in branches:
125 if not primary(branch):
125 if not primary(branch):
126 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
126 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
127 if hashbranch:
127 if hashbranch:
128 if not primary(hashbranch):
128 if not primary(hashbranch):
129 revs.append(hashbranch)
129 revs.append(hashbranch)
130 return revs, revs[0]
130 return revs, revs[0]
131
131
132
132
133 def parseurl(path, branches=None):
133 def parseurl(path, branches=None):
134 '''parse url#branch, returning (url, (branch, branches))'''
134 '''parse url#branch, returning (url, (branch, branches))'''
135 msg = b'parseurl(...) moved to mercurial.utils.urlutil'
135 msg = b'parseurl(...) moved to mercurial.utils.urlutil'
136 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
136 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
137 return urlutil.parseurl(path, branches=branches)
137 return urlutil.parseurl(path, branches=branches)
138
138
139
139
140 schemes = {
140 schemes = {
141 b'bundle': bundlerepo,
141 b'bundle': bundlerepo,
142 b'union': unionrepo,
142 b'union': unionrepo,
143 b'file': _local,
143 b'file': _local,
144 b'http': httppeer,
144 b'http': httppeer,
145 b'https': httppeer,
145 b'https': httppeer,
146 b'ssh': sshpeer,
146 b'ssh': sshpeer,
147 b'static-http': statichttprepo,
147 b'static-http': statichttprepo,
148 }
148 }
149
149
150
150
151 def _peerlookup(path):
151 def _peerlookup(path):
152 u = urlutil.url(path)
152 u = urlutil.url(path)
153 scheme = u.scheme or b'file'
153 scheme = u.scheme or b'file'
154 thing = schemes.get(scheme) or schemes[b'file']
154 thing = schemes.get(scheme) or schemes[b'file']
155 try:
155 try:
156 return thing(path)
156 return thing(path)
157 except TypeError:
157 except TypeError:
158 # we can't test callable(thing) because 'thing' can be an unloaded
158 # we can't test callable(thing) because 'thing' can be an unloaded
159 # module that implements __call__
159 # module that implements __call__
160 if not util.safehasattr(thing, b'instance'):
160 if not util.safehasattr(thing, b'instance'):
161 raise
161 raise
162 return thing
162 return thing
163
163
164
164
165 def islocal(repo):
165 def islocal(repo):
166 '''return true if repo (or path pointing to repo) is local'''
166 '''return true if repo (or path pointing to repo) is local'''
167 if isinstance(repo, bytes):
167 if isinstance(repo, bytes):
168 try:
168 try:
169 return _peerlookup(repo).islocal(repo)
169 return _peerlookup(repo).islocal(repo)
170 except AttributeError:
170 except AttributeError:
171 return False
171 return False
172 return repo.local()
172 return repo.local()
173
173
174
174
175 def openpath(ui, path, sendaccept=True):
175 def openpath(ui, path, sendaccept=True):
176 '''open path with open if local, url.open if remote'''
176 '''open path with open if local, url.open if remote'''
177 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
177 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
178 if pathurl.islocal():
178 if pathurl.islocal():
179 return util.posixfile(pathurl.localpath(), b'rb')
179 return util.posixfile(pathurl.localpath(), b'rb')
180 else:
180 else:
181 return url.open(ui, path, sendaccept=sendaccept)
181 return url.open(ui, path, sendaccept=sendaccept)
182
182
183
183
184 # a list of (ui, repo) functions called for wire peer initialization
184 # a list of (ui, repo) functions called for wire peer initialization
185 wirepeersetupfuncs = []
185 wirepeersetupfuncs = []
186
186
187
187
188 def _peerorrepo(
188 def _peerorrepo(
189 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
189 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
190 ):
190 ):
191 """return a repository object for the specified path"""
191 """return a repository object for the specified path"""
192 obj = _peerlookup(path).instance(
192 obj = _peerlookup(path).instance(
193 ui, path, create, intents=intents, createopts=createopts
193 ui, path, create, intents=intents, createopts=createopts
194 )
194 )
195 ui = getattr(obj, "ui", ui)
195 ui = getattr(obj, "ui", ui)
196 for f in presetupfuncs or []:
196 for f in presetupfuncs or []:
197 f(ui, obj)
197 f(ui, obj)
198 ui.log(b'extension', b'- executing reposetup hooks\n')
198 ui.log(b'extension', b'- executing reposetup hooks\n')
199 with util.timedcm('all reposetup') as allreposetupstats:
199 with util.timedcm('all reposetup') as allreposetupstats:
200 for name, module in extensions.extensions(ui):
200 for name, module in extensions.extensions(ui):
201 ui.log(b'extension', b' - running reposetup for %s\n', name)
201 ui.log(b'extension', b' - running reposetup for %s\n', name)
202 hook = getattr(module, 'reposetup', None)
202 hook = getattr(module, 'reposetup', None)
203 if hook:
203 if hook:
204 with util.timedcm('reposetup %r', name) as stats:
204 with util.timedcm('reposetup %r', name) as stats:
205 hook(ui, obj)
205 hook(ui, obj)
206 ui.log(
206 ui.log(
207 b'extension', b' > reposetup for %s took %s\n', name, stats
207 b'extension', b' > reposetup for %s took %s\n', name, stats
208 )
208 )
209 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
209 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
210 if not obj.local():
210 if not obj.local():
211 for f in wirepeersetupfuncs:
211 for f in wirepeersetupfuncs:
212 f(ui, obj)
212 f(ui, obj)
213 return obj
213 return obj
214
214
215
215
216 def repository(
216 def repository(
217 ui,
217 ui,
218 path=b'',
218 path=b'',
219 create=False,
219 create=False,
220 presetupfuncs=None,
220 presetupfuncs=None,
221 intents=None,
221 intents=None,
222 createopts=None,
222 createopts=None,
223 ):
223 ):
224 """return a repository object for the specified path"""
224 """return a repository object for the specified path"""
225 peer = _peerorrepo(
225 peer = _peerorrepo(
226 ui,
226 ui,
227 path,
227 path,
228 create,
228 create,
229 presetupfuncs=presetupfuncs,
229 presetupfuncs=presetupfuncs,
230 intents=intents,
230 intents=intents,
231 createopts=createopts,
231 createopts=createopts,
232 )
232 )
233 repo = peer.local()
233 repo = peer.local()
234 if not repo:
234 if not repo:
235 raise error.Abort(
235 raise error.Abort(
236 _(b"repository '%s' is not local") % (path or peer.url())
236 _(b"repository '%s' is not local") % (path or peer.url())
237 )
237 )
238 return repo.filtered(b'visible')
238 return repo.filtered(b'visible')
239
239
240
240
241 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
241 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
242 '''return a repository peer for the specified path'''
242 '''return a repository peer for the specified path'''
243 rui = remoteui(uiorrepo, opts)
243 rui = remoteui(uiorrepo, opts)
244 return _peerorrepo(
244 return _peerorrepo(
245 rui, path, create, intents=intents, createopts=createopts
245 rui, path, create, intents=intents, createopts=createopts
246 ).peer()
246 ).peer()
247
247
248
248
249 def defaultdest(source):
249 def defaultdest(source):
250 """return default destination of clone if none is given
250 """return default destination of clone if none is given
251
251
252 >>> defaultdest(b'foo')
252 >>> defaultdest(b'foo')
253 'foo'
253 'foo'
254 >>> defaultdest(b'/foo/bar')
254 >>> defaultdest(b'/foo/bar')
255 'bar'
255 'bar'
256 >>> defaultdest(b'/')
256 >>> defaultdest(b'/')
257 ''
257 ''
258 >>> defaultdest(b'')
258 >>> defaultdest(b'')
259 ''
259 ''
260 >>> defaultdest(b'http://example.org/')
260 >>> defaultdest(b'http://example.org/')
261 ''
261 ''
262 >>> defaultdest(b'http://example.org/foo/')
262 >>> defaultdest(b'http://example.org/foo/')
263 'foo'
263 'foo'
264 """
264 """
265 path = urlutil.url(source).path
265 path = urlutil.url(source).path
266 if not path:
266 if not path:
267 return b''
267 return b''
268 return os.path.basename(os.path.normpath(path))
268 return os.path.basename(os.path.normpath(path))
269
269
270
270
271 def sharedreposource(repo):
271 def sharedreposource(repo):
272 """Returns repository object for source repository of a shared repo.
272 """Returns repository object for source repository of a shared repo.
273
273
274 If repo is not a shared repository, returns None.
274 If repo is not a shared repository, returns None.
275 """
275 """
276 if repo.sharedpath == repo.path:
276 if repo.sharedpath == repo.path:
277 return None
277 return None
278
278
279 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
279 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
280 return repo.srcrepo
280 return repo.srcrepo
281
281
282 # the sharedpath always ends in the .hg; we want the path to the repo
282 # the sharedpath always ends in the .hg; we want the path to the repo
283 source = repo.vfs.split(repo.sharedpath)[0]
283 source = repo.vfs.split(repo.sharedpath)[0]
284 srcurl, branches = urlutil.parseurl(source)
284 srcurl, branches = urlutil.parseurl(source)
285 srcrepo = repository(repo.ui, srcurl)
285 srcrepo = repository(repo.ui, srcurl)
286 repo.srcrepo = srcrepo
286 repo.srcrepo = srcrepo
287 return srcrepo
287 return srcrepo
288
288
289
289
290 def share(
290 def share(
291 ui,
291 ui,
292 source,
292 source,
293 dest=None,
293 dest=None,
294 update=True,
294 update=True,
295 bookmarks=True,
295 bookmarks=True,
296 defaultpath=None,
296 defaultpath=None,
297 relative=False,
297 relative=False,
298 ):
298 ):
299 '''create a shared repository'''
299 '''create a shared repository'''
300
300
301 if not islocal(source):
301 if not islocal(source):
302 raise error.Abort(_(b'can only share local repositories'))
302 raise error.Abort(_(b'can only share local repositories'))
303
303
304 if not dest:
304 if not dest:
305 dest = defaultdest(source)
305 dest = defaultdest(source)
306 else:
306 else:
307 dest = urlutil.get_clone_path(ui, dest)[1]
307 dest = urlutil.get_clone_path(ui, dest)[1]
308
308
309 if isinstance(source, bytes):
309 if isinstance(source, bytes):
310 origsource, source, branches = urlutil.get_clone_path(ui, source)
310 origsource, source, branches = urlutil.get_clone_path(ui, source)
311 srcrepo = repository(ui, source)
311 srcrepo = repository(ui, source)
312 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
312 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
313 else:
313 else:
314 srcrepo = source.local()
314 srcrepo = source.local()
315 checkout = None
315 checkout = None
316
316
317 shareditems = set()
317 shareditems = set()
318 if bookmarks:
318 if bookmarks:
319 shareditems.add(sharedbookmarks)
319 shareditems.add(sharedbookmarks)
320
320
321 r = repository(
321 r = repository(
322 ui,
322 ui,
323 dest,
323 dest,
324 create=True,
324 create=True,
325 createopts={
325 createopts={
326 b'sharedrepo': srcrepo,
326 b'sharedrepo': srcrepo,
327 b'sharedrelative': relative,
327 b'sharedrelative': relative,
328 b'shareditems': shareditems,
328 b'shareditems': shareditems,
329 },
329 },
330 )
330 )
331
331
332 postshare(srcrepo, r, defaultpath=defaultpath)
332 postshare(srcrepo, r, defaultpath=defaultpath)
333 r = repository(ui, dest)
333 r = repository(ui, dest)
334 _postshareupdate(r, update, checkout=checkout)
334 _postshareupdate(r, update, checkout=checkout)
335 return r
335 return r
336
336
337
337
338 def _prependsourcehgrc(repo):
338 def _prependsourcehgrc(repo):
339 """copies the source repo config and prepend it in current repo .hg/hgrc
339 """copies the source repo config and prepend it in current repo .hg/hgrc
340 on unshare. This is only done if the share was perfomed using share safe
340 on unshare. This is only done if the share was perfomed using share safe
341 method where we share config of source in shares"""
341 method where we share config of source in shares"""
342 srcvfs = vfsmod.vfs(repo.sharedpath)
342 srcvfs = vfsmod.vfs(repo.sharedpath)
343 dstvfs = vfsmod.vfs(repo.path)
343 dstvfs = vfsmod.vfs(repo.path)
344
344
345 if not srcvfs.exists(b'hgrc'):
345 if not srcvfs.exists(b'hgrc'):
346 return
346 return
347
347
348 currentconfig = b''
348 currentconfig = b''
349 if dstvfs.exists(b'hgrc'):
349 if dstvfs.exists(b'hgrc'):
350 currentconfig = dstvfs.read(b'hgrc')
350 currentconfig = dstvfs.read(b'hgrc')
351
351
352 with dstvfs(b'hgrc', b'wb') as fp:
352 with dstvfs(b'hgrc', b'wb') as fp:
353 sourceconfig = srcvfs.read(b'hgrc')
353 sourceconfig = srcvfs.read(b'hgrc')
354 fp.write(b"# Config copied from shared source\n")
354 fp.write(b"# Config copied from shared source\n")
355 fp.write(sourceconfig)
355 fp.write(sourceconfig)
356 fp.write(b'\n')
356 fp.write(b'\n')
357 fp.write(currentconfig)
357 fp.write(currentconfig)
358
358
359
359
360 def unshare(ui, repo):
360 def unshare(ui, repo):
361 """convert a shared repository to a normal one
361 """convert a shared repository to a normal one
362
362
363 Copy the store data to the repo and remove the sharedpath data.
363 Copy the store data to the repo and remove the sharedpath data.
364
364
365 Returns a new repository object representing the unshared repository.
365 Returns a new repository object representing the unshared repository.
366
366
367 The passed repository object is not usable after this function is
367 The passed repository object is not usable after this function is
368 called.
368 called.
369 """
369 """
370
370
371 with repo.lock():
371 with repo.lock():
372 # we use locks here because if we race with commit, we
372 # we use locks here because if we race with commit, we
373 # can end up with extra data in the cloned revlogs that's
373 # can end up with extra data in the cloned revlogs that's
374 # not pointed to by changesets, thus causing verify to
374 # not pointed to by changesets, thus causing verify to
375 # fail
375 # fail
376 destlock = copystore(ui, repo, repo.path)
376 destlock = copystore(ui, repo, repo.path)
377 with destlock or util.nullcontextmanager():
377 with destlock or util.nullcontextmanager():
378 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
378 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
379 # we were sharing .hg/hgrc of the share source with the current
379 # we were sharing .hg/hgrc of the share source with the current
380 # repo. We need to copy that while unsharing otherwise it can
380 # repo. We need to copy that while unsharing otherwise it can
381 # disable hooks and other checks
381 # disable hooks and other checks
382 _prependsourcehgrc(repo)
382 _prependsourcehgrc(repo)
383
383
384 sharefile = repo.vfs.join(b'sharedpath')
384 sharefile = repo.vfs.join(b'sharedpath')
385 util.rename(sharefile, sharefile + b'.old')
385 util.rename(sharefile, sharefile + b'.old')
386
386
387 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
387 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
388 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
388 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
389 scmutil.writereporequirements(repo)
389 scmutil.writereporequirements(repo)
390
390
391 # Removing share changes some fundamental properties of the repo instance.
391 # Removing share changes some fundamental properties of the repo instance.
392 # So we instantiate a new repo object and operate on it rather than
392 # So we instantiate a new repo object and operate on it rather than
393 # try to keep the existing repo usable.
393 # try to keep the existing repo usable.
394 newrepo = repository(repo.baseui, repo.root, create=False)
394 newrepo = repository(repo.baseui, repo.root, create=False)
395
395
396 # TODO: figure out how to access subrepos that exist, but were previously
396 # TODO: figure out how to access subrepos that exist, but were previously
397 # removed from .hgsub
397 # removed from .hgsub
398 c = newrepo[b'.']
398 c = newrepo[b'.']
399 subs = c.substate
399 subs = c.substate
400 for s in sorted(subs):
400 for s in sorted(subs):
401 c.sub(s).unshare()
401 c.sub(s).unshare()
402
402
403 localrepo.poisonrepository(repo)
403 localrepo.poisonrepository(repo)
404
404
405 return newrepo
405 return newrepo
406
406
407
407
408 def postshare(sourcerepo, destrepo, defaultpath=None):
408 def postshare(sourcerepo, destrepo, defaultpath=None):
409 """Called after a new shared repo is created.
409 """Called after a new shared repo is created.
410
410
411 The new repo only has a requirements file and pointer to the source.
411 The new repo only has a requirements file and pointer to the source.
412 This function configures additional shared data.
412 This function configures additional shared data.
413
413
414 Extensions can wrap this function and write additional entries to
414 Extensions can wrap this function and write additional entries to
415 destrepo/.hg/shared to indicate additional pieces of data to be shared.
415 destrepo/.hg/shared to indicate additional pieces of data to be shared.
416 """
416 """
417 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
417 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
418 if default:
418 if default:
419 template = b'[paths]\ndefault = %s\n'
419 template = b'[paths]\ndefault = %s\n'
420 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
420 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
421 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
421 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
422 with destrepo.wlock():
422 with destrepo.wlock():
423 narrowspec.copytoworkingcopy(destrepo)
423 narrowspec.copytoworkingcopy(destrepo)
424
424
425
425
426 def _postshareupdate(repo, update, checkout=None):
426 def _postshareupdate(repo, update, checkout=None):
427 """Maybe perform a working directory update after a shared repo is created.
427 """Maybe perform a working directory update after a shared repo is created.
428
428
429 ``update`` can be a boolean or a revision to update to.
429 ``update`` can be a boolean or a revision to update to.
430 """
430 """
431 if not update:
431 if not update:
432 return
432 return
433
433
434 repo.ui.status(_(b"updating working directory\n"))
434 repo.ui.status(_(b"updating working directory\n"))
435 if update is not True:
435 if update is not True:
436 checkout = update
436 checkout = update
437 for test in (checkout, b'default', b'tip'):
437 for test in (checkout, b'default', b'tip'):
438 if test is None:
438 if test is None:
439 continue
439 continue
440 try:
440 try:
441 uprev = repo.lookup(test)
441 uprev = repo.lookup(test)
442 break
442 break
443 except error.RepoLookupError:
443 except error.RepoLookupError:
444 continue
444 continue
445 _update(repo, uprev)
445 _update(repo, uprev)
446
446
447
447
448 def copystore(ui, srcrepo, destpath):
448 def copystore(ui, srcrepo, destpath):
449 """copy files from store of srcrepo in destpath
449 """copy files from store of srcrepo in destpath
450
450
451 returns destlock
451 returns destlock
452 """
452 """
453 destlock = None
453 destlock = None
454 try:
454 try:
455 hardlink = None
455 hardlink = None
456 topic = _(b'linking') if hardlink else _(b'copying')
456 topic = _(b'linking') if hardlink else _(b'copying')
457 with ui.makeprogress(topic, unit=_(b'files')) as progress:
457 with ui.makeprogress(topic, unit=_(b'files')) as progress:
458 num = 0
458 num = 0
459 srcpublishing = srcrepo.publishing()
459 srcpublishing = srcrepo.publishing()
460 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
460 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
461 dstvfs = vfsmod.vfs(destpath)
461 dstvfs = vfsmod.vfs(destpath)
462 for f in srcrepo.store.copylist():
462 for f in srcrepo.store.copylist():
463 if srcpublishing and f.endswith(b'phaseroots'):
463 if srcpublishing and f.endswith(b'phaseroots'):
464 continue
464 continue
465 dstbase = os.path.dirname(f)
465 dstbase = os.path.dirname(f)
466 if dstbase and not dstvfs.exists(dstbase):
466 if dstbase and not dstvfs.exists(dstbase):
467 dstvfs.mkdir(dstbase)
467 dstvfs.mkdir(dstbase)
468 if srcvfs.exists(f):
468 if srcvfs.exists(f):
469 if f.endswith(b'data'):
469 if f.endswith(b'data'):
470 # 'dstbase' may be empty (e.g. revlog format 0)
470 # 'dstbase' may be empty (e.g. revlog format 0)
471 lockfile = os.path.join(dstbase, b"lock")
471 lockfile = os.path.join(dstbase, b"lock")
472 # lock to avoid premature writing to the target
472 # lock to avoid premature writing to the target
473 destlock = lock.lock(dstvfs, lockfile)
473 destlock = lock.lock(dstvfs, lockfile)
474 hardlink, n = util.copyfiles(
474 hardlink, n = util.copyfiles(
475 srcvfs.join(f), dstvfs.join(f), hardlink, progress
475 srcvfs.join(f), dstvfs.join(f), hardlink, progress
476 )
476 )
477 num += n
477 num += n
478 if hardlink:
478 if hardlink:
479 ui.debug(b"linked %d files\n" % num)
479 ui.debug(b"linked %d files\n" % num)
480 else:
480 else:
481 ui.debug(b"copied %d files\n" % num)
481 ui.debug(b"copied %d files\n" % num)
482 return destlock
482 return destlock
483 except: # re-raises
483 except: # re-raises
484 release(destlock)
484 release(destlock)
485 raise
485 raise
486
486
487
487
488 def clonewithshare(
488 def clonewithshare(
489 ui,
489 ui,
490 peeropts,
490 peeropts,
491 sharepath,
491 sharepath,
492 source,
492 source,
493 srcpeer,
493 srcpeer,
494 dest,
494 dest,
495 pull=False,
495 pull=False,
496 rev=None,
496 rev=None,
497 update=True,
497 update=True,
498 stream=False,
498 stream=False,
499 ):
499 ):
500 """Perform a clone using a shared repo.
500 """Perform a clone using a shared repo.
501
501
502 The store for the repository will be located at <sharepath>/.hg. The
502 The store for the repository will be located at <sharepath>/.hg. The
503 specified revisions will be cloned or pulled from "source". A shared repo
503 specified revisions will be cloned or pulled from "source". A shared repo
504 will be created at "dest" and a working copy will be created if "update" is
504 will be created at "dest" and a working copy will be created if "update" is
505 True.
505 True.
506 """
506 """
507 revs = None
507 revs = None
508 if rev:
508 if rev:
509 if not srcpeer.capable(b'lookup'):
509 if not srcpeer.capable(b'lookup'):
510 raise error.Abort(
510 raise error.Abort(
511 _(
511 _(
512 b"src repository does not support "
512 b"src repository does not support "
513 b"revision lookup and so doesn't "
513 b"revision lookup and so doesn't "
514 b"support clone by revision"
514 b"support clone by revision"
515 )
515 )
516 )
516 )
517
517
518 # TODO this is batchable.
518 # TODO this is batchable.
519 remoterevs = []
519 remoterevs = []
520 for r in rev:
520 for r in rev:
521 with srcpeer.commandexecutor() as e:
521 with srcpeer.commandexecutor() as e:
522 remoterevs.append(
522 remoterevs.append(
523 e.callcommand(
523 e.callcommand(
524 b'lookup',
524 b'lookup',
525 {
525 {
526 b'key': r,
526 b'key': r,
527 },
527 },
528 ).result()
528 ).result()
529 )
529 )
530 revs = remoterevs
530 revs = remoterevs
531
531
532 # Obtain a lock before checking for or cloning the pooled repo otherwise
532 # Obtain a lock before checking for or cloning the pooled repo otherwise
533 # 2 clients may race creating or populating it.
533 # 2 clients may race creating or populating it.
534 pooldir = os.path.dirname(sharepath)
534 pooldir = os.path.dirname(sharepath)
535 # lock class requires the directory to exist.
535 # lock class requires the directory to exist.
536 try:
536 try:
537 util.makedir(pooldir, False)
537 util.makedir(pooldir, False)
538 except OSError as e:
538 except OSError as e:
539 if e.errno != errno.EEXIST:
539 if e.errno != errno.EEXIST:
540 raise
540 raise
541
541
542 poolvfs = vfsmod.vfs(pooldir)
542 poolvfs = vfsmod.vfs(pooldir)
543 basename = os.path.basename(sharepath)
543 basename = os.path.basename(sharepath)
544
544
545 with lock.lock(poolvfs, b'%s.lock' % basename):
545 with lock.lock(poolvfs, b'%s.lock' % basename):
546 if os.path.exists(sharepath):
546 if os.path.exists(sharepath):
547 ui.status(
547 ui.status(
548 _(b'(sharing from existing pooled repository %s)\n') % basename
548 _(b'(sharing from existing pooled repository %s)\n') % basename
549 )
549 )
550 else:
550 else:
551 ui.status(
551 ui.status(
552 _(b'(sharing from new pooled repository %s)\n') % basename
552 _(b'(sharing from new pooled repository %s)\n') % basename
553 )
553 )
554 # Always use pull mode because hardlinks in share mode don't work
554 # Always use pull mode because hardlinks in share mode don't work
555 # well. Never update because working copies aren't necessary in
555 # well. Never update because working copies aren't necessary in
556 # share mode.
556 # share mode.
557 clone(
557 clone(
558 ui,
558 ui,
559 peeropts,
559 peeropts,
560 source,
560 source,
561 dest=sharepath,
561 dest=sharepath,
562 pull=True,
562 pull=True,
563 revs=rev,
563 revs=rev,
564 update=False,
564 update=False,
565 stream=stream,
565 stream=stream,
566 )
566 )
567
567
568 # Resolve the value to put in [paths] section for the source.
568 # Resolve the value to put in [paths] section for the source.
569 if islocal(source):
569 if islocal(source):
570 defaultpath = os.path.abspath(urlutil.urllocalpath(source))
570 defaultpath = os.path.abspath(urlutil.urllocalpath(source))
571 else:
571 else:
572 defaultpath = source
572 defaultpath = source
573
573
574 sharerepo = repository(ui, path=sharepath)
574 sharerepo = repository(ui, path=sharepath)
575 destrepo = share(
575 destrepo = share(
576 ui,
576 ui,
577 sharerepo,
577 sharerepo,
578 dest=dest,
578 dest=dest,
579 update=False,
579 update=False,
580 bookmarks=False,
580 bookmarks=False,
581 defaultpath=defaultpath,
581 defaultpath=defaultpath,
582 )
582 )
583
583
584 # We need to perform a pull against the dest repo to fetch bookmarks
584 # We need to perform a pull against the dest repo to fetch bookmarks
585 # and other non-store data that isn't shared by default. In the case of
585 # and other non-store data that isn't shared by default. In the case of
586 # non-existing shared repo, this means we pull from the remote twice. This
586 # non-existing shared repo, this means we pull from the remote twice. This
587 # is a bit weird. But at the time it was implemented, there wasn't an easy
587 # is a bit weird. But at the time it was implemented, there wasn't an easy
588 # way to pull just non-changegroup data.
588 # way to pull just non-changegroup data.
589 exchange.pull(destrepo, srcpeer, heads=revs)
589 exchange.pull(destrepo, srcpeer, heads=revs)
590
590
591 _postshareupdate(destrepo, update)
591 _postshareupdate(destrepo, update)
592
592
593 return srcpeer, peer(ui, peeropts, dest)
593 return srcpeer, peer(ui, peeropts, dest)
594
594
595
595
596 # Recomputing caches is often slow on big repos, so copy them.
596 # Recomputing caches is often slow on big repos, so copy them.
597 def _copycache(srcrepo, dstcachedir, fname):
597 def _copycache(srcrepo, dstcachedir, fname):
598 """copy a cache from srcrepo to destcachedir (if it exists)"""
598 """copy a cache from srcrepo to destcachedir (if it exists)"""
599 srcfname = srcrepo.cachevfs.join(fname)
599 srcfname = srcrepo.cachevfs.join(fname)
600 dstfname = os.path.join(dstcachedir, fname)
600 dstfname = os.path.join(dstcachedir, fname)
601 if os.path.exists(srcfname):
601 if os.path.exists(srcfname):
602 if not os.path.exists(dstcachedir):
602 if not os.path.exists(dstcachedir):
603 os.mkdir(dstcachedir)
603 os.mkdir(dstcachedir)
604 util.copyfile(srcfname, dstfname)
604 util.copyfile(srcfname, dstfname)
605
605
606
606
607 def clone(
607 def clone(
608 ui,
608 ui,
609 peeropts,
609 peeropts,
610 source,
610 source,
611 dest=None,
611 dest=None,
612 pull=False,
612 pull=False,
613 revs=None,
613 revs=None,
614 update=True,
614 update=True,
615 stream=False,
615 stream=False,
616 branch=None,
616 branch=None,
617 shareopts=None,
617 shareopts=None,
618 storeincludepats=None,
618 storeincludepats=None,
619 storeexcludepats=None,
619 storeexcludepats=None,
620 depth=None,
620 depth=None,
621 ):
621 ):
622 """Make a copy of an existing repository.
622 """Make a copy of an existing repository.
623
623
624 Create a copy of an existing repository in a new directory. The
624 Create a copy of an existing repository in a new directory. The
625 source and destination are URLs, as passed to the repository
625 source and destination are URLs, as passed to the repository
626 function. Returns a pair of repository peers, the source and
626 function. Returns a pair of repository peers, the source and
627 newly created destination.
627 newly created destination.
628
628
629 The location of the source is added to the new repository's
629 The location of the source is added to the new repository's
630 .hg/hgrc file, as the default to be used for future pulls and
630 .hg/hgrc file, as the default to be used for future pulls and
631 pushes.
631 pushes.
632
632
633 If an exception is raised, the partly cloned/updated destination
633 If an exception is raised, the partly cloned/updated destination
634 repository will be deleted.
634 repository will be deleted.
635
635
636 Arguments:
636 Arguments:
637
637
638 source: repository object or URL
638 source: repository object or URL
639
639
640 dest: URL of destination repository to create (defaults to base
640 dest: URL of destination repository to create (defaults to base
641 name of source repository)
641 name of source repository)
642
642
643 pull: always pull from source repository, even in local case or if the
643 pull: always pull from source repository, even in local case or if the
644 server prefers streaming
644 server prefers streaming
645
645
646 stream: stream raw data uncompressed from repository (fast over
646 stream: stream raw data uncompressed from repository (fast over
647 LAN, slow over WAN)
647 LAN, slow over WAN)
648
648
649 revs: revision to clone up to (implies pull=True)
649 revs: revision to clone up to (implies pull=True)
650
650
651 update: update working directory after clone completes, if
651 update: update working directory after clone completes, if
652 destination is local repository (True means update to default rev,
652 destination is local repository (True means update to default rev,
653 anything else is treated as a revision)
653 anything else is treated as a revision)
654
654
655 branch: branches to clone
655 branch: branches to clone
656
656
657 shareopts: dict of options to control auto sharing behavior. The "pool" key
657 shareopts: dict of options to control auto sharing behavior. The "pool" key
658 activates auto sharing mode and defines the directory for stores. The
658 activates auto sharing mode and defines the directory for stores. The
659 "mode" key determines how to construct the directory name of the shared
659 "mode" key determines how to construct the directory name of the shared
660 repository. "identity" means the name is derived from the node of the first
660 repository. "identity" means the name is derived from the node of the first
661 changeset in the repository. "remote" means the name is derived from the
661 changeset in the repository. "remote" means the name is derived from the
662 remote's path/URL. Defaults to "identity."
662 remote's path/URL. Defaults to "identity."
663
663
664 storeincludepats and storeexcludepats: sets of file patterns to include and
664 storeincludepats and storeexcludepats: sets of file patterns to include and
665 exclude in the repository copy, respectively. If not defined, all files
665 exclude in the repository copy, respectively. If not defined, all files
666 will be included (a "full" clone). Otherwise a "narrow" clone containing
666 will be included (a "full" clone). Otherwise a "narrow" clone containing
667 only the requested files will be performed. If ``storeincludepats`` is not
667 only the requested files will be performed. If ``storeincludepats`` is not
668 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
668 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
669 ``path:.``. If both are empty sets, no files will be cloned.
669 ``path:.``. If both are empty sets, no files will be cloned.
670 """
670 """
671
671
672 if isinstance(source, bytes):
672 if isinstance(source, bytes):
673 src = urlutil.get_clone_path(ui, source, branch)
673 src = urlutil.get_clone_path(ui, source, branch)
674 origsource, source, branches = src
674 origsource, source, branches = src
675 srcpeer = peer(ui, peeropts, source)
675 srcpeer = peer(ui, peeropts, source)
676 else:
676 else:
677 srcpeer = source.peer() # in case we were called with a localrepo
677 srcpeer = source.peer() # in case we were called with a localrepo
678 branches = (None, branch or [])
678 branches = (None, branch or [])
679 origsource = source = srcpeer.url()
679 origsource = source = srcpeer.url()
680 srclock = destlock = cleandir = None
680 srclock = destlock = cleandir = None
681 destpeer = None
681 destpeer = None
682 try:
682 try:
683 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
683 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
684
684
685 if dest is None:
685 if dest is None:
686 dest = defaultdest(source)
686 dest = defaultdest(source)
687 if dest:
687 if dest:
688 ui.status(_(b"destination directory: %s\n") % dest)
688 ui.status(_(b"destination directory: %s\n") % dest)
689 else:
689 else:
690 dest = urlutil.get_clone_path(ui, dest)[0]
690 dest = urlutil.get_clone_path(ui, dest)[0]
691
691
692 dest = urlutil.urllocalpath(dest)
692 dest = urlutil.urllocalpath(dest)
693 source = urlutil.urllocalpath(source)
693 source = urlutil.urllocalpath(source)
694
694
695 if not dest:
695 if not dest:
696 raise error.InputError(_(b"empty destination path is not valid"))
696 raise error.InputError(_(b"empty destination path is not valid"))
697
697
698 destvfs = vfsmod.vfs(dest, expandpath=True)
698 destvfs = vfsmod.vfs(dest, expandpath=True)
699 if destvfs.lexists():
699 if destvfs.lexists():
700 if not destvfs.isdir():
700 if not destvfs.isdir():
701 raise error.InputError(
701 raise error.InputError(
702 _(b"destination '%s' already exists") % dest
702 _(b"destination '%s' already exists") % dest
703 )
703 )
704 elif destvfs.listdir():
704 elif destvfs.listdir():
705 raise error.InputError(
705 raise error.InputError(
706 _(b"destination '%s' is not empty") % dest
706 _(b"destination '%s' is not empty") % dest
707 )
707 )
708
708
709 createopts = {}
709 createopts = {}
710 narrow = False
710 narrow = False
711
711
712 if storeincludepats is not None:
712 if storeincludepats is not None:
713 narrowspec.validatepatterns(storeincludepats)
713 narrowspec.validatepatterns(storeincludepats)
714 narrow = True
714 narrow = True
715
715
716 if storeexcludepats is not None:
716 if storeexcludepats is not None:
717 narrowspec.validatepatterns(storeexcludepats)
717 narrowspec.validatepatterns(storeexcludepats)
718 narrow = True
718 narrow = True
719
719
720 if narrow:
720 if narrow:
721 # Include everything by default if only exclusion patterns defined.
721 # Include everything by default if only exclusion patterns defined.
722 if storeexcludepats and not storeincludepats:
722 if storeexcludepats and not storeincludepats:
723 storeincludepats = {b'path:.'}
723 storeincludepats = {b'path:.'}
724
724
725 createopts[b'narrowfiles'] = True
725 createopts[b'narrowfiles'] = True
726
726
727 if depth:
727 if depth:
728 createopts[b'shallowfilestore'] = True
728 createopts[b'shallowfilestore'] = True
729
729
730 if srcpeer.capable(b'lfs-serve'):
730 if srcpeer.capable(b'lfs-serve'):
731 # Repository creation honors the config if it disabled the extension, so
731 # Repository creation honors the config if it disabled the extension, so
732 # we can't just announce that lfs will be enabled. This check avoids
732 # we can't just announce that lfs will be enabled. This check avoids
733 # saying that lfs will be enabled, and then saying it's an unknown
733 # saying that lfs will be enabled, and then saying it's an unknown
734 # feature. The lfs creation option is set in either case so that a
734 # feature. The lfs creation option is set in either case so that a
735 # requirement is added. If the extension is explicitly disabled but the
735 # requirement is added. If the extension is explicitly disabled but the
736 # requirement is set, the clone aborts early, before transferring any
736 # requirement is set, the clone aborts early, before transferring any
737 # data.
737 # data.
738 createopts[b'lfs'] = True
738 createopts[b'lfs'] = True
739
739
740 if extensions.disabled_help(b'lfs'):
740 if extensions.disabled_help(b'lfs'):
741 ui.status(
741 ui.status(
742 _(
742 _(
743 b'(remote is using large file support (lfs), but it is '
743 b'(remote is using large file support (lfs), but it is '
744 b'explicitly disabled in the local configuration)\n'
744 b'explicitly disabled in the local configuration)\n'
745 )
745 )
746 )
746 )
747 else:
747 else:
748 ui.status(
748 ui.status(
749 _(
749 _(
750 b'(remote is using large file support (lfs); lfs will '
750 b'(remote is using large file support (lfs); lfs will '
751 b'be enabled for this repository)\n'
751 b'be enabled for this repository)\n'
752 )
752 )
753 )
753 )
754
754
755 shareopts = shareopts or {}
755 shareopts = shareopts or {}
756 sharepool = shareopts.get(b'pool')
756 sharepool = shareopts.get(b'pool')
757 sharenamemode = shareopts.get(b'mode')
757 sharenamemode = shareopts.get(b'mode')
758 if sharepool and islocal(dest):
758 if sharepool and islocal(dest):
759 sharepath = None
759 sharepath = None
760 if sharenamemode == b'identity':
760 if sharenamemode == b'identity':
761 # Resolve the name from the initial changeset in the remote
761 # Resolve the name from the initial changeset in the remote
762 # repository. This returns nullid when the remote is empty. It
762 # repository. This returns nullid when the remote is empty. It
763 # raises RepoLookupError if revision 0 is filtered or otherwise
763 # raises RepoLookupError if revision 0 is filtered or otherwise
764 # not available. If we fail to resolve, sharing is not enabled.
764 # not available. If we fail to resolve, sharing is not enabled.
765 try:
765 try:
766 with srcpeer.commandexecutor() as e:
766 with srcpeer.commandexecutor() as e:
767 rootnode = e.callcommand(
767 rootnode = e.callcommand(
768 b'lookup',
768 b'lookup',
769 {
769 {
770 b'key': b'0',
770 b'key': b'0',
771 },
771 },
772 ).result()
772 ).result()
773
773
774 if rootnode != sha1nodeconstants.nullid:
774 if rootnode != sha1nodeconstants.nullid:
775 sharepath = os.path.join(sharepool, hex(rootnode))
775 sharepath = os.path.join(sharepool, hex(rootnode))
776 else:
776 else:
777 ui.status(
777 ui.status(
778 _(
778 _(
779 b'(not using pooled storage: '
779 b'(not using pooled storage: '
780 b'remote appears to be empty)\n'
780 b'remote appears to be empty)\n'
781 )
781 )
782 )
782 )
783 except error.RepoLookupError:
783 except error.RepoLookupError:
784 ui.status(
784 ui.status(
785 _(
785 _(
786 b'(not using pooled storage: '
786 b'(not using pooled storage: '
787 b'unable to resolve identity of remote)\n'
787 b'unable to resolve identity of remote)\n'
788 )
788 )
789 )
789 )
790 elif sharenamemode == b'remote':
790 elif sharenamemode == b'remote':
791 sharepath = os.path.join(
791 sharepath = os.path.join(
792 sharepool, hex(hashutil.sha1(source).digest())
792 sharepool, hex(hashutil.sha1(source).digest())
793 )
793 )
794 else:
794 else:
795 raise error.Abort(
795 raise error.Abort(
796 _(b'unknown share naming mode: %s') % sharenamemode
796 _(b'unknown share naming mode: %s') % sharenamemode
797 )
797 )
798
798
799 # TODO this is a somewhat arbitrary restriction.
799 # TODO this is a somewhat arbitrary restriction.
800 if narrow:
800 if narrow:
801 ui.status(
801 ui.status(
802 _(b'(pooled storage not supported for narrow clones)\n')
802 _(b'(pooled storage not supported for narrow clones)\n')
803 )
803 )
804 sharepath = None
804 sharepath = None
805
805
806 if sharepath:
806 if sharepath:
807 return clonewithshare(
807 return clonewithshare(
808 ui,
808 ui,
809 peeropts,
809 peeropts,
810 sharepath,
810 sharepath,
811 source,
811 source,
812 srcpeer,
812 srcpeer,
813 dest,
813 dest,
814 pull=pull,
814 pull=pull,
815 rev=revs,
815 rev=revs,
816 update=update,
816 update=update,
817 stream=stream,
817 stream=stream,
818 )
818 )
819
819
820 srcrepo = srcpeer.local()
820 srcrepo = srcpeer.local()
821
821
822 abspath = origsource
822 abspath = origsource
823 if islocal(origsource):
823 if islocal(origsource):
824 abspath = os.path.abspath(urlutil.urllocalpath(origsource))
824 abspath = os.path.abspath(urlutil.urllocalpath(origsource))
825
825
826 if islocal(dest):
826 if islocal(dest):
827 cleandir = dest
827 cleandir = dest
828
828
829 copy = False
829 copy = False
830 if (
830 if (
831 srcrepo
831 srcrepo
832 and srcrepo.cancopy()
832 and srcrepo.cancopy()
833 and islocal(dest)
833 and islocal(dest)
834 and not phases.hassecret(srcrepo)
834 and not phases.hassecret(srcrepo)
835 ):
835 ):
836 copy = not pull and not revs
836 copy = not pull and not revs
837
837
838 # TODO this is a somewhat arbitrary restriction.
838 # TODO this is a somewhat arbitrary restriction.
839 if narrow:
839 if narrow:
840 copy = False
840 copy = False
841
841
842 if copy:
842 if copy:
843 try:
843 try:
844 # we use a lock here because if we race with commit, we
844 # we use a lock here because if we race with commit, we
845 # can end up with extra data in the cloned revlogs that's
845 # can end up with extra data in the cloned revlogs that's
846 # not pointed to by changesets, thus causing verify to
846 # not pointed to by changesets, thus causing verify to
847 # fail
847 # fail
848 srclock = srcrepo.lock(wait=False)
848 srclock = srcrepo.lock(wait=False)
849 except error.LockError:
849 except error.LockError:
850 copy = False
850 copy = False
851
851
852 if copy:
852 if copy:
853 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
853 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
854 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
854 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
855 if not os.path.exists(dest):
855 if not os.path.exists(dest):
856 util.makedirs(dest)
856 util.makedirs(dest)
857 else:
857 else:
858 # only clean up directories we create ourselves
858 # only clean up directories we create ourselves
859 cleandir = hgdir
859 cleandir = hgdir
860 try:
860 try:
861 destpath = hgdir
861 destpath = hgdir
862 util.makedir(destpath, notindexed=True)
862 util.makedir(destpath, notindexed=True)
863 except OSError as inst:
863 except OSError as inst:
864 if inst.errno == errno.EEXIST:
864 if inst.errno == errno.EEXIST:
865 cleandir = None
865 cleandir = None
866 raise error.Abort(
866 raise error.Abort(
867 _(b"destination '%s' already exists") % dest
867 _(b"destination '%s' already exists") % dest
868 )
868 )
869 raise
869 raise
870
870
871 destlock = copystore(ui, srcrepo, destpath)
871 destlock = copystore(ui, srcrepo, destpath)
872 # copy bookmarks over
872 # copy bookmarks over
873 srcbookmarks = srcrepo.vfs.join(b'bookmarks')
873 srcbookmarks = srcrepo.vfs.join(b'bookmarks')
874 dstbookmarks = os.path.join(destpath, b'bookmarks')
874 dstbookmarks = os.path.join(destpath, b'bookmarks')
875 if os.path.exists(srcbookmarks):
875 if os.path.exists(srcbookmarks):
876 util.copyfile(srcbookmarks, dstbookmarks)
876 util.copyfile(srcbookmarks, dstbookmarks)
877
877
878 dstcachedir = os.path.join(destpath, b'cache')
878 dstcachedir = os.path.join(destpath, b'cache')
879 for cache in cacheutil.cachetocopy(srcrepo):
879 for cache in cacheutil.cachetocopy(srcrepo):
880 _copycache(srcrepo, dstcachedir, cache)
880 _copycache(srcrepo, dstcachedir, cache)
881
881
882 # we need to re-init the repo after manually copying the data
882 # we need to re-init the repo after manually copying the data
883 # into it
883 # into it
884 destpeer = peer(srcrepo, peeropts, dest)
884 destpeer = peer(srcrepo, peeropts, dest)
885 srcrepo.hook(
885 srcrepo.hook(
886 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
886 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
887 )
887 )
888 else:
888 else:
889 try:
889 try:
890 # only pass ui when no srcrepo
890 # only pass ui when no srcrepo
891 destpeer = peer(
891 destpeer = peer(
892 srcrepo or ui,
892 srcrepo or ui,
893 peeropts,
893 peeropts,
894 dest,
894 dest,
895 create=True,
895 create=True,
896 createopts=createopts,
896 createopts=createopts,
897 )
897 )
898 except OSError as inst:
898 except OSError as inst:
899 if inst.errno == errno.EEXIST:
899 if inst.errno == errno.EEXIST:
900 cleandir = None
900 cleandir = None
901 raise error.Abort(
901 raise error.Abort(
902 _(b"destination '%s' already exists") % dest
902 _(b"destination '%s' already exists") % dest
903 )
903 )
904 raise
904 raise
905
905
906 if revs:
906 if revs:
907 if not srcpeer.capable(b'lookup'):
907 if not srcpeer.capable(b'lookup'):
908 raise error.Abort(
908 raise error.Abort(
909 _(
909 _(
910 b"src repository does not support "
910 b"src repository does not support "
911 b"revision lookup and so doesn't "
911 b"revision lookup and so doesn't "
912 b"support clone by revision"
912 b"support clone by revision"
913 )
913 )
914 )
914 )
915
915
916 # TODO this is batchable.
916 # TODO this is batchable.
917 remoterevs = []
917 remoterevs = []
918 for rev in revs:
918 for rev in revs:
919 with srcpeer.commandexecutor() as e:
919 with srcpeer.commandexecutor() as e:
920 remoterevs.append(
920 remoterevs.append(
921 e.callcommand(
921 e.callcommand(
922 b'lookup',
922 b'lookup',
923 {
923 {
924 b'key': rev,
924 b'key': rev,
925 },
925 },
926 ).result()
926 ).result()
927 )
927 )
928 revs = remoterevs
928 revs = remoterevs
929
929
930 checkout = revs[0]
930 checkout = revs[0]
931 else:
931 else:
932 revs = None
932 revs = None
933 local = destpeer.local()
933 local = destpeer.local()
934 if local:
934 if local:
935 if narrow:
935 if narrow:
936 with local.wlock(), local.lock():
936 with local.wlock(), local.lock():
937 local.setnarrowpats(storeincludepats, storeexcludepats)
937 local.setnarrowpats(storeincludepats, storeexcludepats)
938 narrowspec.copytoworkingcopy(local)
938 narrowspec.copytoworkingcopy(local)
939
939
940 u = urlutil.url(abspath)
940 u = urlutil.url(abspath)
941 defaulturl = bytes(u)
941 defaulturl = bytes(u)
942 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
942 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
943 if not stream:
943 if not stream:
944 if pull:
944 if pull:
945 stream = False
945 stream = False
946 else:
946 else:
947 stream = None
947 stream = None
948 # internal config: ui.quietbookmarkmove
948 # internal config: ui.quietbookmarkmove
949 overrides = {(b'ui', b'quietbookmarkmove'): True}
949 overrides = {(b'ui', b'quietbookmarkmove'): True}
950 with local.ui.configoverride(overrides, b'clone'):
950 with local.ui.configoverride(overrides, b'clone'):
951 exchange.pull(
951 exchange.pull(
952 local,
952 local,
953 srcpeer,
953 srcpeer,
954 revs,
954 revs,
955 streamclonerequested=stream,
955 streamclonerequested=stream,
956 includepats=storeincludepats,
956 includepats=storeincludepats,
957 excludepats=storeexcludepats,
957 excludepats=storeexcludepats,
958 depth=depth,
958 depth=depth,
959 )
959 )
960 elif srcrepo:
960 elif srcrepo:
961 # TODO lift restriction once exchange.push() accepts narrow
961 # TODO lift restriction once exchange.push() accepts narrow
962 # push.
962 # push.
963 if narrow:
963 if narrow:
964 raise error.Abort(
964 raise error.Abort(
965 _(
965 _(
966 b'narrow clone not available for '
966 b'narrow clone not available for '
967 b'remote destinations'
967 b'remote destinations'
968 )
968 )
969 )
969 )
970
970
971 exchange.push(
971 exchange.push(
972 srcrepo,
972 srcrepo,
973 destpeer,
973 destpeer,
974 revs=revs,
974 revs=revs,
975 bookmarks=srcrepo._bookmarks.keys(),
975 bookmarks=srcrepo._bookmarks.keys(),
976 )
976 )
977 else:
977 else:
978 raise error.Abort(
978 raise error.Abort(
979 _(b"clone from remote to remote not supported")
979 _(b"clone from remote to remote not supported")
980 )
980 )
981
981
982 cleandir = None
982 cleandir = None
983
983
984 destrepo = destpeer.local()
984 destrepo = destpeer.local()
985 if destrepo:
985 if destrepo:
986 template = uimod.samplehgrcs[b'cloned']
986 template = uimod.samplehgrcs[b'cloned']
987 u = urlutil.url(abspath)
987 u = urlutil.url(abspath)
988 u.passwd = None
988 u.passwd = None
989 defaulturl = bytes(u)
989 defaulturl = bytes(u)
990 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
990 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
991 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
991 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
992
992
993 if ui.configbool(b'experimental', b'remotenames'):
993 if ui.configbool(b'experimental', b'remotenames'):
994 logexchange.pullremotenames(destrepo, srcpeer)
994 logexchange.pullremotenames(destrepo, srcpeer)
995
995
996 if update:
996 if update:
997 if update is not True:
997 if update is not True:
998 with srcpeer.commandexecutor() as e:
998 with srcpeer.commandexecutor() as e:
999 checkout = e.callcommand(
999 checkout = e.callcommand(
1000 b'lookup',
1000 b'lookup',
1001 {
1001 {
1002 b'key': update,
1002 b'key': update,
1003 },
1003 },
1004 ).result()
1004 ).result()
1005
1005
1006 uprev = None
1006 uprev = None
1007 status = None
1007 status = None
1008 if checkout is not None:
1008 if checkout is not None:
1009 # Some extensions (at least hg-git and hg-subversion) have
1009 # Some extensions (at least hg-git and hg-subversion) have
1010 # a peer.lookup() implementation that returns a name instead
1010 # a peer.lookup() implementation that returns a name instead
1011 # of a nodeid. We work around it here until we've figured
1011 # of a nodeid. We work around it here until we've figured
1012 # out a better solution.
1012 # out a better solution.
1013 if len(checkout) == 20 and checkout in destrepo:
1013 if len(checkout) == 20 and checkout in destrepo:
1014 uprev = checkout
1014 uprev = checkout
1015 elif scmutil.isrevsymbol(destrepo, checkout):
1015 elif scmutil.isrevsymbol(destrepo, checkout):
1016 uprev = scmutil.revsymbol(destrepo, checkout).node()
1016 uprev = scmutil.revsymbol(destrepo, checkout).node()
1017 else:
1017 else:
1018 if update is not True:
1018 if update is not True:
1019 try:
1019 try:
1020 uprev = destrepo.lookup(update)
1020 uprev = destrepo.lookup(update)
1021 except error.RepoLookupError:
1021 except error.RepoLookupError:
1022 pass
1022 pass
1023 if uprev is None:
1023 if uprev is None:
1024 try:
1024 try:
1025 if destrepo._activebookmark:
1025 if destrepo._activebookmark:
1026 uprev = destrepo.lookup(destrepo._activebookmark)
1026 uprev = destrepo.lookup(destrepo._activebookmark)
1027 update = destrepo._activebookmark
1027 update = destrepo._activebookmark
1028 else:
1028 else:
1029 uprev = destrepo._bookmarks[b'@']
1029 uprev = destrepo._bookmarks[b'@']
1030 update = b'@'
1030 update = b'@'
1031 bn = destrepo[uprev].branch()
1031 bn = destrepo[uprev].branch()
1032 if bn == b'default':
1032 if bn == b'default':
1033 status = _(b"updating to bookmark %s\n" % update)
1033 status = _(b"updating to bookmark %s\n" % update)
1034 else:
1034 else:
1035 status = (
1035 status = (
1036 _(b"updating to bookmark %s on branch %s\n")
1036 _(b"updating to bookmark %s on branch %s\n")
1037 ) % (update, bn)
1037 ) % (update, bn)
1038 except KeyError:
1038 except KeyError:
1039 try:
1039 try:
1040 uprev = destrepo.branchtip(b'default')
1040 uprev = destrepo.branchtip(b'default')
1041 except error.RepoLookupError:
1041 except error.RepoLookupError:
1042 uprev = destrepo.lookup(b'tip')
1042 uprev = destrepo.lookup(b'tip')
1043 if not status:
1043 if not status:
1044 bn = destrepo[uprev].branch()
1044 bn = destrepo[uprev].branch()
1045 status = _(b"updating to branch %s\n") % bn
1045 status = _(b"updating to branch %s\n") % bn
1046 destrepo.ui.status(status)
1046 destrepo.ui.status(status)
1047 _update(destrepo, uprev)
1047 _update(destrepo, uprev)
1048 if update in destrepo._bookmarks:
1048 if update in destrepo._bookmarks:
1049 bookmarks.activate(destrepo, update)
1049 bookmarks.activate(destrepo, update)
1050 if destlock is not None:
1050 if destlock is not None:
1051 release(destlock)
1051 release(destlock)
1052 # here is a tiny windows were someone could end up writing the
1052 # here is a tiny windows were someone could end up writing the
1053 # repository before the cache are sure to be warm. This is "fine"
1053 # repository before the cache are sure to be warm. This is "fine"
1054 # as the only "bad" outcome would be some slowness. That potential
1054 # as the only "bad" outcome would be some slowness. That potential
1055 # slowness already affect reader.
1055 # slowness already affect reader.
1056 with destrepo.lock():
1056 with destrepo.lock():
1057 destrepo.updatecaches(full=True)
1057 destrepo.updatecaches(full=b"post-clone")
1058 finally:
1058 finally:
1059 release(srclock, destlock)
1059 release(srclock, destlock)
1060 if cleandir is not None:
1060 if cleandir is not None:
1061 shutil.rmtree(cleandir, True)
1061 shutil.rmtree(cleandir, True)
1062 if srcpeer is not None:
1062 if srcpeer is not None:
1063 srcpeer.close()
1063 srcpeer.close()
1064 if destpeer and destpeer.local() is None:
1064 if destpeer and destpeer.local() is None:
1065 destpeer.close()
1065 destpeer.close()
1066 return srcpeer, destpeer
1066 return srcpeer, destpeer
1067
1067
1068
1068
1069 def _showstats(repo, stats, quietempty=False):
1069 def _showstats(repo, stats, quietempty=False):
1070 if quietempty and stats.isempty():
1070 if quietempty and stats.isempty():
1071 return
1071 return
1072 repo.ui.status(
1072 repo.ui.status(
1073 _(
1073 _(
1074 b"%d files updated, %d files merged, "
1074 b"%d files updated, %d files merged, "
1075 b"%d files removed, %d files unresolved\n"
1075 b"%d files removed, %d files unresolved\n"
1076 )
1076 )
1077 % (
1077 % (
1078 stats.updatedcount,
1078 stats.updatedcount,
1079 stats.mergedcount,
1079 stats.mergedcount,
1080 stats.removedcount,
1080 stats.removedcount,
1081 stats.unresolvedcount,
1081 stats.unresolvedcount,
1082 )
1082 )
1083 )
1083 )
1084
1084
1085
1085
1086 def updaterepo(repo, node, overwrite, updatecheck=None):
1086 def updaterepo(repo, node, overwrite, updatecheck=None):
1087 """Update the working directory to node.
1087 """Update the working directory to node.
1088
1088
1089 When overwrite is set, changes are clobbered, merged else
1089 When overwrite is set, changes are clobbered, merged else
1090
1090
1091 returns stats (see pydoc mercurial.merge.applyupdates)"""
1091 returns stats (see pydoc mercurial.merge.applyupdates)"""
1092 repo.ui.deprecwarn(
1092 repo.ui.deprecwarn(
1093 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1093 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1094 b'5.7',
1094 b'5.7',
1095 )
1095 )
1096 return mergemod._update(
1096 return mergemod._update(
1097 repo,
1097 repo,
1098 node,
1098 node,
1099 branchmerge=False,
1099 branchmerge=False,
1100 force=overwrite,
1100 force=overwrite,
1101 labels=[b'working copy', b'destination'],
1101 labels=[b'working copy', b'destination'],
1102 updatecheck=updatecheck,
1102 updatecheck=updatecheck,
1103 )
1103 )
1104
1104
1105
1105
1106 def update(repo, node, quietempty=False, updatecheck=None):
1106 def update(repo, node, quietempty=False, updatecheck=None):
1107 """update the working directory to node"""
1107 """update the working directory to node"""
1108 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1108 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1109 _showstats(repo, stats, quietempty)
1109 _showstats(repo, stats, quietempty)
1110 if stats.unresolvedcount:
1110 if stats.unresolvedcount:
1111 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1111 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1112 return stats.unresolvedcount > 0
1112 return stats.unresolvedcount > 0
1113
1113
1114
1114
1115 # naming conflict in clone()
1115 # naming conflict in clone()
1116 _update = update
1116 _update = update
1117
1117
1118
1118
1119 def clean(repo, node, show_stats=True, quietempty=False):
1119 def clean(repo, node, show_stats=True, quietempty=False):
1120 """forcibly switch the working directory to node, clobbering changes"""
1120 """forcibly switch the working directory to node, clobbering changes"""
1121 stats = mergemod.clean_update(repo[node])
1121 stats = mergemod.clean_update(repo[node])
1122 assert stats.unresolvedcount == 0
1122 assert stats.unresolvedcount == 0
1123 if show_stats:
1123 if show_stats:
1124 _showstats(repo, stats, quietempty)
1124 _showstats(repo, stats, quietempty)
1125 return False
1125 return False
1126
1126
1127
1127
1128 # naming conflict in updatetotally()
1128 # naming conflict in updatetotally()
1129 _clean = clean
1129 _clean = clean
1130
1130
1131 _VALID_UPDATECHECKS = {
1131 _VALID_UPDATECHECKS = {
1132 mergemod.UPDATECHECK_ABORT,
1132 mergemod.UPDATECHECK_ABORT,
1133 mergemod.UPDATECHECK_NONE,
1133 mergemod.UPDATECHECK_NONE,
1134 mergemod.UPDATECHECK_LINEAR,
1134 mergemod.UPDATECHECK_LINEAR,
1135 mergemod.UPDATECHECK_NO_CONFLICT,
1135 mergemod.UPDATECHECK_NO_CONFLICT,
1136 }
1136 }
1137
1137
1138
1138
1139 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1139 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1140 """Update the working directory with extra care for non-file components
1140 """Update the working directory with extra care for non-file components
1141
1141
1142 This takes care of non-file components below:
1142 This takes care of non-file components below:
1143
1143
1144 :bookmark: might be advanced or (in)activated
1144 :bookmark: might be advanced or (in)activated
1145
1145
1146 This takes arguments below:
1146 This takes arguments below:
1147
1147
1148 :checkout: to which revision the working directory is updated
1148 :checkout: to which revision the working directory is updated
1149 :brev: a name, which might be a bookmark to be activated after updating
1149 :brev: a name, which might be a bookmark to be activated after updating
1150 :clean: whether changes in the working directory can be discarded
1150 :clean: whether changes in the working directory can be discarded
1151 :updatecheck: how to deal with a dirty working directory
1151 :updatecheck: how to deal with a dirty working directory
1152
1152
1153 Valid values for updatecheck are the UPDATECHECK_* constants
1153 Valid values for updatecheck are the UPDATECHECK_* constants
1154 defined in the merge module. Passing `None` will result in using the
1154 defined in the merge module. Passing `None` will result in using the
1155 configured default.
1155 configured default.
1156
1156
1157 * ABORT: abort if the working directory is dirty
1157 * ABORT: abort if the working directory is dirty
1158 * NONE: don't check (merge working directory changes into destination)
1158 * NONE: don't check (merge working directory changes into destination)
1159 * LINEAR: check that update is linear before merging working directory
1159 * LINEAR: check that update is linear before merging working directory
1160 changes into destination
1160 changes into destination
1161 * NO_CONFLICT: check that the update does not result in file merges
1161 * NO_CONFLICT: check that the update does not result in file merges
1162
1162
1163 This returns whether conflict is detected at updating or not.
1163 This returns whether conflict is detected at updating or not.
1164 """
1164 """
1165 if updatecheck is None:
1165 if updatecheck is None:
1166 updatecheck = ui.config(b'commands', b'update.check')
1166 updatecheck = ui.config(b'commands', b'update.check')
1167 if updatecheck not in _VALID_UPDATECHECKS:
1167 if updatecheck not in _VALID_UPDATECHECKS:
1168 # If not configured, or invalid value configured
1168 # If not configured, or invalid value configured
1169 updatecheck = mergemod.UPDATECHECK_LINEAR
1169 updatecheck = mergemod.UPDATECHECK_LINEAR
1170 if updatecheck not in _VALID_UPDATECHECKS:
1170 if updatecheck not in _VALID_UPDATECHECKS:
1171 raise ValueError(
1171 raise ValueError(
1172 r'Invalid updatecheck value %r (can accept %r)'
1172 r'Invalid updatecheck value %r (can accept %r)'
1173 % (updatecheck, _VALID_UPDATECHECKS)
1173 % (updatecheck, _VALID_UPDATECHECKS)
1174 )
1174 )
1175 with repo.wlock():
1175 with repo.wlock():
1176 movemarkfrom = None
1176 movemarkfrom = None
1177 warndest = False
1177 warndest = False
1178 if checkout is None:
1178 if checkout is None:
1179 updata = destutil.destupdate(repo, clean=clean)
1179 updata = destutil.destupdate(repo, clean=clean)
1180 checkout, movemarkfrom, brev = updata
1180 checkout, movemarkfrom, brev = updata
1181 warndest = True
1181 warndest = True
1182
1182
1183 if clean:
1183 if clean:
1184 ret = _clean(repo, checkout)
1184 ret = _clean(repo, checkout)
1185 else:
1185 else:
1186 if updatecheck == mergemod.UPDATECHECK_ABORT:
1186 if updatecheck == mergemod.UPDATECHECK_ABORT:
1187 cmdutil.bailifchanged(repo, merge=False)
1187 cmdutil.bailifchanged(repo, merge=False)
1188 updatecheck = mergemod.UPDATECHECK_NONE
1188 updatecheck = mergemod.UPDATECHECK_NONE
1189 ret = _update(repo, checkout, updatecheck=updatecheck)
1189 ret = _update(repo, checkout, updatecheck=updatecheck)
1190
1190
1191 if not ret and movemarkfrom:
1191 if not ret and movemarkfrom:
1192 if movemarkfrom == repo[b'.'].node():
1192 if movemarkfrom == repo[b'.'].node():
1193 pass # no-op update
1193 pass # no-op update
1194 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1194 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1195 b = ui.label(repo._activebookmark, b'bookmarks.active')
1195 b = ui.label(repo._activebookmark, b'bookmarks.active')
1196 ui.status(_(b"updating bookmark %s\n") % b)
1196 ui.status(_(b"updating bookmark %s\n") % b)
1197 else:
1197 else:
1198 # this can happen with a non-linear update
1198 # this can happen with a non-linear update
1199 b = ui.label(repo._activebookmark, b'bookmarks')
1199 b = ui.label(repo._activebookmark, b'bookmarks')
1200 ui.status(_(b"(leaving bookmark %s)\n") % b)
1200 ui.status(_(b"(leaving bookmark %s)\n") % b)
1201 bookmarks.deactivate(repo)
1201 bookmarks.deactivate(repo)
1202 elif brev in repo._bookmarks:
1202 elif brev in repo._bookmarks:
1203 if brev != repo._activebookmark:
1203 if brev != repo._activebookmark:
1204 b = ui.label(brev, b'bookmarks.active')
1204 b = ui.label(brev, b'bookmarks.active')
1205 ui.status(_(b"(activating bookmark %s)\n") % b)
1205 ui.status(_(b"(activating bookmark %s)\n") % b)
1206 bookmarks.activate(repo, brev)
1206 bookmarks.activate(repo, brev)
1207 elif brev:
1207 elif brev:
1208 if repo._activebookmark:
1208 if repo._activebookmark:
1209 b = ui.label(repo._activebookmark, b'bookmarks')
1209 b = ui.label(repo._activebookmark, b'bookmarks')
1210 ui.status(_(b"(leaving bookmark %s)\n") % b)
1210 ui.status(_(b"(leaving bookmark %s)\n") % b)
1211 bookmarks.deactivate(repo)
1211 bookmarks.deactivate(repo)
1212
1212
1213 if warndest:
1213 if warndest:
1214 destutil.statusotherdests(ui, repo)
1214 destutil.statusotherdests(ui, repo)
1215
1215
1216 return ret
1216 return ret
1217
1217
1218
1218
1219 def merge(
1219 def merge(
1220 ctx,
1220 ctx,
1221 force=False,
1221 force=False,
1222 remind=True,
1222 remind=True,
1223 labels=None,
1223 labels=None,
1224 ):
1224 ):
1225 """Branch merge with node, resolving changes. Return true if any
1225 """Branch merge with node, resolving changes. Return true if any
1226 unresolved conflicts."""
1226 unresolved conflicts."""
1227 repo = ctx.repo()
1227 repo = ctx.repo()
1228 stats = mergemod.merge(ctx, force=force, labels=labels)
1228 stats = mergemod.merge(ctx, force=force, labels=labels)
1229 _showstats(repo, stats)
1229 _showstats(repo, stats)
1230 if stats.unresolvedcount:
1230 if stats.unresolvedcount:
1231 repo.ui.status(
1231 repo.ui.status(
1232 _(
1232 _(
1233 b"use 'hg resolve' to retry unresolved file merges "
1233 b"use 'hg resolve' to retry unresolved file merges "
1234 b"or 'hg merge --abort' to abandon\n"
1234 b"or 'hg merge --abort' to abandon\n"
1235 )
1235 )
1236 )
1236 )
1237 elif remind:
1237 elif remind:
1238 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1238 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1239 return stats.unresolvedcount > 0
1239 return stats.unresolvedcount > 0
1240
1240
1241
1241
1242 def abortmerge(ui, repo):
1242 def abortmerge(ui, repo):
1243 ms = mergestatemod.mergestate.read(repo)
1243 ms = mergestatemod.mergestate.read(repo)
1244 if ms.active():
1244 if ms.active():
1245 # there were conflicts
1245 # there were conflicts
1246 node = ms.localctx.hex()
1246 node = ms.localctx.hex()
1247 else:
1247 else:
1248 # there were no conficts, mergestate was not stored
1248 # there were no conficts, mergestate was not stored
1249 node = repo[b'.'].hex()
1249 node = repo[b'.'].hex()
1250
1250
1251 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1251 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1252 stats = mergemod.clean_update(repo[node])
1252 stats = mergemod.clean_update(repo[node])
1253 assert stats.unresolvedcount == 0
1253 assert stats.unresolvedcount == 0
1254 _showstats(repo, stats)
1254 _showstats(repo, stats)
1255
1255
1256
1256
1257 def _incoming(
1257 def _incoming(
1258 displaychlist,
1258 displaychlist,
1259 subreporecurse,
1259 subreporecurse,
1260 ui,
1260 ui,
1261 repo,
1261 repo,
1262 source,
1262 source,
1263 opts,
1263 opts,
1264 buffered=False,
1264 buffered=False,
1265 subpath=None,
1265 subpath=None,
1266 ):
1266 ):
1267 """
1267 """
1268 Helper for incoming / gincoming.
1268 Helper for incoming / gincoming.
1269 displaychlist gets called with
1269 displaychlist gets called with
1270 (remoterepo, incomingchangesetlist, displayer) parameters,
1270 (remoterepo, incomingchangesetlist, displayer) parameters,
1271 and is supposed to contain only code that can't be unified.
1271 and is supposed to contain only code that can't be unified.
1272 """
1272 """
1273 srcs = urlutil.get_pull_paths(repo, ui, [source], opts.get(b'branch'))
1273 srcs = urlutil.get_pull_paths(repo, ui, [source], opts.get(b'branch'))
1274 srcs = list(srcs)
1274 srcs = list(srcs)
1275 if len(srcs) != 1:
1275 if len(srcs) != 1:
1276 msg = _(b'for now, incoming supports only a single source, %d provided')
1276 msg = _(b'for now, incoming supports only a single source, %d provided')
1277 msg %= len(srcs)
1277 msg %= len(srcs)
1278 raise error.Abort(msg)
1278 raise error.Abort(msg)
1279 source, branches = srcs[0]
1279 source, branches = srcs[0]
1280 if subpath is not None:
1280 if subpath is not None:
1281 subpath = urlutil.url(subpath)
1281 subpath = urlutil.url(subpath)
1282 if subpath.isabs():
1282 if subpath.isabs():
1283 source = bytes(subpath)
1283 source = bytes(subpath)
1284 else:
1284 else:
1285 p = urlutil.url(source)
1285 p = urlutil.url(source)
1286 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1286 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1287 source = bytes(p)
1287 source = bytes(p)
1288 other = peer(repo, opts, source)
1288 other = peer(repo, opts, source)
1289 cleanupfn = other.close
1289 cleanupfn = other.close
1290 try:
1290 try:
1291 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1291 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1292 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1292 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1293
1293
1294 if revs:
1294 if revs:
1295 revs = [other.lookup(rev) for rev in revs]
1295 revs = [other.lookup(rev) for rev in revs]
1296 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1296 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1297 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1297 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1298 )
1298 )
1299
1299
1300 if not chlist:
1300 if not chlist:
1301 ui.status(_(b"no changes found\n"))
1301 ui.status(_(b"no changes found\n"))
1302 return subreporecurse()
1302 return subreporecurse()
1303 ui.pager(b'incoming')
1303 ui.pager(b'incoming')
1304 displayer = logcmdutil.changesetdisplayer(
1304 displayer = logcmdutil.changesetdisplayer(
1305 ui, other, opts, buffered=buffered
1305 ui, other, opts, buffered=buffered
1306 )
1306 )
1307 displaychlist(other, chlist, displayer)
1307 displaychlist(other, chlist, displayer)
1308 displayer.close()
1308 displayer.close()
1309 finally:
1309 finally:
1310 cleanupfn()
1310 cleanupfn()
1311 subreporecurse()
1311 subreporecurse()
1312 return 0 # exit code is zero since we found incoming changes
1312 return 0 # exit code is zero since we found incoming changes
1313
1313
1314
1314
1315 def incoming(ui, repo, source, opts, subpath=None):
1315 def incoming(ui, repo, source, opts, subpath=None):
1316 def subreporecurse():
1316 def subreporecurse():
1317 ret = 1
1317 ret = 1
1318 if opts.get(b'subrepos'):
1318 if opts.get(b'subrepos'):
1319 ctx = repo[None]
1319 ctx = repo[None]
1320 for subpath in sorted(ctx.substate):
1320 for subpath in sorted(ctx.substate):
1321 sub = ctx.sub(subpath)
1321 sub = ctx.sub(subpath)
1322 ret = min(ret, sub.incoming(ui, source, opts))
1322 ret = min(ret, sub.incoming(ui, source, opts))
1323 return ret
1323 return ret
1324
1324
1325 def display(other, chlist, displayer):
1325 def display(other, chlist, displayer):
1326 limit = logcmdutil.getlimit(opts)
1326 limit = logcmdutil.getlimit(opts)
1327 if opts.get(b'newest_first'):
1327 if opts.get(b'newest_first'):
1328 chlist.reverse()
1328 chlist.reverse()
1329 count = 0
1329 count = 0
1330 for n in chlist:
1330 for n in chlist:
1331 if limit is not None and count >= limit:
1331 if limit is not None and count >= limit:
1332 break
1332 break
1333 parents = [
1333 parents = [
1334 p for p in other.changelog.parents(n) if p != repo.nullid
1334 p for p in other.changelog.parents(n) if p != repo.nullid
1335 ]
1335 ]
1336 if opts.get(b'no_merges') and len(parents) == 2:
1336 if opts.get(b'no_merges') and len(parents) == 2:
1337 continue
1337 continue
1338 count += 1
1338 count += 1
1339 displayer.show(other[n])
1339 displayer.show(other[n])
1340
1340
1341 return _incoming(
1341 return _incoming(
1342 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1342 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1343 )
1343 )
1344
1344
1345
1345
1346 def _outgoing(ui, repo, dests, opts, subpath=None):
1346 def _outgoing(ui, repo, dests, opts, subpath=None):
1347 out = set()
1347 out = set()
1348 others = []
1348 others = []
1349 for path in urlutil.get_push_paths(repo, ui, dests):
1349 for path in urlutil.get_push_paths(repo, ui, dests):
1350 dest = path.pushloc or path.loc
1350 dest = path.pushloc or path.loc
1351 if subpath is not None:
1351 if subpath is not None:
1352 subpath = urlutil.url(subpath)
1352 subpath = urlutil.url(subpath)
1353 if subpath.isabs():
1353 if subpath.isabs():
1354 dest = bytes(subpath)
1354 dest = bytes(subpath)
1355 else:
1355 else:
1356 p = urlutil.url(dest)
1356 p = urlutil.url(dest)
1357 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1357 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1358 dest = bytes(p)
1358 dest = bytes(p)
1359 branches = path.branch, opts.get(b'branch') or []
1359 branches = path.branch, opts.get(b'branch') or []
1360
1360
1361 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1361 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1362 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1362 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1363 if revs:
1363 if revs:
1364 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1364 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1365
1365
1366 other = peer(repo, opts, dest)
1366 other = peer(repo, opts, dest)
1367 try:
1367 try:
1368 outgoing = discovery.findcommonoutgoing(
1368 outgoing = discovery.findcommonoutgoing(
1369 repo, other, revs, force=opts.get(b'force')
1369 repo, other, revs, force=opts.get(b'force')
1370 )
1370 )
1371 o = outgoing.missing
1371 o = outgoing.missing
1372 out.update(o)
1372 out.update(o)
1373 if not o:
1373 if not o:
1374 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1374 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1375 others.append(other)
1375 others.append(other)
1376 except: # re-raises
1376 except: # re-raises
1377 other.close()
1377 other.close()
1378 raise
1378 raise
1379 # make sure this is ordered by revision number
1379 # make sure this is ordered by revision number
1380 outgoing_revs = list(out)
1380 outgoing_revs = list(out)
1381 cl = repo.changelog
1381 cl = repo.changelog
1382 outgoing_revs.sort(key=cl.rev)
1382 outgoing_revs.sort(key=cl.rev)
1383 return outgoing_revs, others
1383 return outgoing_revs, others
1384
1384
1385
1385
1386 def _outgoing_recurse(ui, repo, dests, opts):
1386 def _outgoing_recurse(ui, repo, dests, opts):
1387 ret = 1
1387 ret = 1
1388 if opts.get(b'subrepos'):
1388 if opts.get(b'subrepos'):
1389 ctx = repo[None]
1389 ctx = repo[None]
1390 for subpath in sorted(ctx.substate):
1390 for subpath in sorted(ctx.substate):
1391 sub = ctx.sub(subpath)
1391 sub = ctx.sub(subpath)
1392 ret = min(ret, sub.outgoing(ui, dests, opts))
1392 ret = min(ret, sub.outgoing(ui, dests, opts))
1393 return ret
1393 return ret
1394
1394
1395
1395
1396 def _outgoing_filter(repo, revs, opts):
1396 def _outgoing_filter(repo, revs, opts):
1397 """apply revision filtering/ordering option for outgoing"""
1397 """apply revision filtering/ordering option for outgoing"""
1398 limit = logcmdutil.getlimit(opts)
1398 limit = logcmdutil.getlimit(opts)
1399 no_merges = opts.get(b'no_merges')
1399 no_merges = opts.get(b'no_merges')
1400 if opts.get(b'newest_first'):
1400 if opts.get(b'newest_first'):
1401 revs.reverse()
1401 revs.reverse()
1402 if limit is None and not no_merges:
1402 if limit is None and not no_merges:
1403 for r in revs:
1403 for r in revs:
1404 yield r
1404 yield r
1405 return
1405 return
1406
1406
1407 count = 0
1407 count = 0
1408 cl = repo.changelog
1408 cl = repo.changelog
1409 for n in revs:
1409 for n in revs:
1410 if limit is not None and count >= limit:
1410 if limit is not None and count >= limit:
1411 break
1411 break
1412 parents = [p for p in cl.parents(n) if p != repo.nullid]
1412 parents = [p for p in cl.parents(n) if p != repo.nullid]
1413 if no_merges and len(parents) == 2:
1413 if no_merges and len(parents) == 2:
1414 continue
1414 continue
1415 count += 1
1415 count += 1
1416 yield n
1416 yield n
1417
1417
1418
1418
1419 def outgoing(ui, repo, dests, opts, subpath=None):
1419 def outgoing(ui, repo, dests, opts, subpath=None):
1420 if opts.get(b'graph'):
1420 if opts.get(b'graph'):
1421 logcmdutil.checkunsupportedgraphflags([], opts)
1421 logcmdutil.checkunsupportedgraphflags([], opts)
1422 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1422 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1423 ret = 1
1423 ret = 1
1424 try:
1424 try:
1425 if o:
1425 if o:
1426 ret = 0
1426 ret = 0
1427
1427
1428 if opts.get(b'graph'):
1428 if opts.get(b'graph'):
1429 revdag = logcmdutil.graphrevs(repo, o, opts)
1429 revdag = logcmdutil.graphrevs(repo, o, opts)
1430 ui.pager(b'outgoing')
1430 ui.pager(b'outgoing')
1431 displayer = logcmdutil.changesetdisplayer(
1431 displayer = logcmdutil.changesetdisplayer(
1432 ui, repo, opts, buffered=True
1432 ui, repo, opts, buffered=True
1433 )
1433 )
1434 logcmdutil.displaygraph(
1434 logcmdutil.displaygraph(
1435 ui, repo, revdag, displayer, graphmod.asciiedges
1435 ui, repo, revdag, displayer, graphmod.asciiedges
1436 )
1436 )
1437 else:
1437 else:
1438 ui.pager(b'outgoing')
1438 ui.pager(b'outgoing')
1439 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1439 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1440 for n in _outgoing_filter(repo, o, opts):
1440 for n in _outgoing_filter(repo, o, opts):
1441 displayer.show(repo[n])
1441 displayer.show(repo[n])
1442 displayer.close()
1442 displayer.close()
1443 for oth in others:
1443 for oth in others:
1444 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1444 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1445 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1445 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1446 return ret # exit code is zero since we found outgoing changes
1446 return ret # exit code is zero since we found outgoing changes
1447 finally:
1447 finally:
1448 for oth in others:
1448 for oth in others:
1449 oth.close()
1449 oth.close()
1450
1450
1451
1451
1452 def verify(repo, level=None):
1452 def verify(repo, level=None):
1453 """verify the consistency of a repository"""
1453 """verify the consistency of a repository"""
1454 ret = verifymod.verify(repo, level=level)
1454 ret = verifymod.verify(repo, level=level)
1455
1455
1456 # Broken subrepo references in hidden csets don't seem worth worrying about,
1456 # Broken subrepo references in hidden csets don't seem worth worrying about,
1457 # since they can't be pushed/pulled, and --hidden can be used if they are a
1457 # since they can't be pushed/pulled, and --hidden can be used if they are a
1458 # concern.
1458 # concern.
1459
1459
1460 # pathto() is needed for -R case
1460 # pathto() is needed for -R case
1461 revs = repo.revs(
1461 revs = repo.revs(
1462 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1462 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1463 )
1463 )
1464
1464
1465 if revs:
1465 if revs:
1466 repo.ui.status(_(b'checking subrepo links\n'))
1466 repo.ui.status(_(b'checking subrepo links\n'))
1467 for rev in revs:
1467 for rev in revs:
1468 ctx = repo[rev]
1468 ctx = repo[rev]
1469 try:
1469 try:
1470 for subpath in ctx.substate:
1470 for subpath in ctx.substate:
1471 try:
1471 try:
1472 ret = (
1472 ret = (
1473 ctx.sub(subpath, allowcreate=False).verify() or ret
1473 ctx.sub(subpath, allowcreate=False).verify() or ret
1474 )
1474 )
1475 except error.RepoError as e:
1475 except error.RepoError as e:
1476 repo.ui.warn(b'%d: %s\n' % (rev, e))
1476 repo.ui.warn(b'%d: %s\n' % (rev, e))
1477 except Exception:
1477 except Exception:
1478 repo.ui.warn(
1478 repo.ui.warn(
1479 _(b'.hgsubstate is corrupt in revision %s\n')
1479 _(b'.hgsubstate is corrupt in revision %s\n')
1480 % short(ctx.node())
1480 % short(ctx.node())
1481 )
1481 )
1482
1482
1483 return ret
1483 return ret
1484
1484
1485
1485
1486 def remoteui(src, opts):
1486 def remoteui(src, opts):
1487 """build a remote ui from ui or repo and opts"""
1487 """build a remote ui from ui or repo and opts"""
1488 if util.safehasattr(src, b'baseui'): # looks like a repository
1488 if util.safehasattr(src, b'baseui'): # looks like a repository
1489 dst = src.baseui.copy() # drop repo-specific config
1489 dst = src.baseui.copy() # drop repo-specific config
1490 src = src.ui # copy target options from repo
1490 src = src.ui # copy target options from repo
1491 else: # assume it's a global ui object
1491 else: # assume it's a global ui object
1492 dst = src.copy() # keep all global options
1492 dst = src.copy() # keep all global options
1493
1493
1494 # copy ssh-specific options
1494 # copy ssh-specific options
1495 for o in b'ssh', b'remotecmd':
1495 for o in b'ssh', b'remotecmd':
1496 v = opts.get(o) or src.config(b'ui', o)
1496 v = opts.get(o) or src.config(b'ui', o)
1497 if v:
1497 if v:
1498 dst.setconfig(b"ui", o, v, b'copied')
1498 dst.setconfig(b"ui", o, v, b'copied')
1499
1499
1500 # copy bundle-specific options
1500 # copy bundle-specific options
1501 r = src.config(b'bundle', b'mainreporoot')
1501 r = src.config(b'bundle', b'mainreporoot')
1502 if r:
1502 if r:
1503 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1503 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1504
1504
1505 # copy selected local settings to the remote ui
1505 # copy selected local settings to the remote ui
1506 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1506 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1507 for key, val in src.configitems(sect):
1507 for key, val in src.configitems(sect):
1508 dst.setconfig(sect, key, val, b'copied')
1508 dst.setconfig(sect, key, val, b'copied')
1509 v = src.config(b'web', b'cacerts')
1509 v = src.config(b'web', b'cacerts')
1510 if v:
1510 if v:
1511 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1511 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1512
1512
1513 return dst
1513 return dst
1514
1514
1515
1515
1516 # Files of interest
1516 # Files of interest
1517 # Used to check if the repository has changed looking at mtime and size of
1517 # Used to check if the repository has changed looking at mtime and size of
1518 # these files.
1518 # these files.
1519 foi = [
1519 foi = [
1520 (b'spath', b'00changelog.i'),
1520 (b'spath', b'00changelog.i'),
1521 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1521 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1522 (b'spath', b'obsstore'),
1522 (b'spath', b'obsstore'),
1523 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1523 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1524 ]
1524 ]
1525
1525
1526
1526
1527 class cachedlocalrepo(object):
1527 class cachedlocalrepo(object):
1528 """Holds a localrepository that can be cached and reused."""
1528 """Holds a localrepository that can be cached and reused."""
1529
1529
1530 def __init__(self, repo):
1530 def __init__(self, repo):
1531 """Create a new cached repo from an existing repo.
1531 """Create a new cached repo from an existing repo.
1532
1532
1533 We assume the passed in repo was recently created. If the
1533 We assume the passed in repo was recently created. If the
1534 repo has changed between when it was created and when it was
1534 repo has changed between when it was created and when it was
1535 turned into a cache, it may not refresh properly.
1535 turned into a cache, it may not refresh properly.
1536 """
1536 """
1537 assert isinstance(repo, localrepo.localrepository)
1537 assert isinstance(repo, localrepo.localrepository)
1538 self._repo = repo
1538 self._repo = repo
1539 self._state, self.mtime = self._repostate()
1539 self._state, self.mtime = self._repostate()
1540 self._filtername = repo.filtername
1540 self._filtername = repo.filtername
1541
1541
1542 def fetch(self):
1542 def fetch(self):
1543 """Refresh (if necessary) and return a repository.
1543 """Refresh (if necessary) and return a repository.
1544
1544
1545 If the cached instance is out of date, it will be recreated
1545 If the cached instance is out of date, it will be recreated
1546 automatically and returned.
1546 automatically and returned.
1547
1547
1548 Returns a tuple of the repo and a boolean indicating whether a new
1548 Returns a tuple of the repo and a boolean indicating whether a new
1549 repo instance was created.
1549 repo instance was created.
1550 """
1550 """
1551 # We compare the mtimes and sizes of some well-known files to
1551 # We compare the mtimes and sizes of some well-known files to
1552 # determine if the repo changed. This is not precise, as mtimes
1552 # determine if the repo changed. This is not precise, as mtimes
1553 # are susceptible to clock skew and imprecise filesystems and
1553 # are susceptible to clock skew and imprecise filesystems and
1554 # file content can change while maintaining the same size.
1554 # file content can change while maintaining the same size.
1555
1555
1556 state, mtime = self._repostate()
1556 state, mtime = self._repostate()
1557 if state == self._state:
1557 if state == self._state:
1558 return self._repo, False
1558 return self._repo, False
1559
1559
1560 repo = repository(self._repo.baseui, self._repo.url())
1560 repo = repository(self._repo.baseui, self._repo.url())
1561 if self._filtername:
1561 if self._filtername:
1562 self._repo = repo.filtered(self._filtername)
1562 self._repo = repo.filtered(self._filtername)
1563 else:
1563 else:
1564 self._repo = repo.unfiltered()
1564 self._repo = repo.unfiltered()
1565 self._state = state
1565 self._state = state
1566 self.mtime = mtime
1566 self.mtime = mtime
1567
1567
1568 return self._repo, True
1568 return self._repo, True
1569
1569
1570 def _repostate(self):
1570 def _repostate(self):
1571 state = []
1571 state = []
1572 maxmtime = -1
1572 maxmtime = -1
1573 for attr, fname in foi:
1573 for attr, fname in foi:
1574 prefix = getattr(self._repo, attr)
1574 prefix = getattr(self._repo, attr)
1575 p = os.path.join(prefix, fname)
1575 p = os.path.join(prefix, fname)
1576 try:
1576 try:
1577 st = os.stat(p)
1577 st = os.stat(p)
1578 except OSError:
1578 except OSError:
1579 st = os.stat(prefix)
1579 st = os.stat(prefix)
1580 state.append((st[stat.ST_MTIME], st.st_size))
1580 state.append((st[stat.ST_MTIME], st.st_size))
1581 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1581 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1582
1582
1583 return tuple(state), maxmtime
1583 return tuple(state), maxmtime
1584
1584
1585 def copy(self):
1585 def copy(self):
1586 """Obtain a copy of this class instance.
1586 """Obtain a copy of this class instance.
1587
1587
1588 A new localrepository instance is obtained. The new instance should be
1588 A new localrepository instance is obtained. The new instance should be
1589 completely independent of the original.
1589 completely independent of the original.
1590 """
1590 """
1591 repo = repository(self._repo.baseui, self._repo.origroot)
1591 repo = repository(self._repo.baseui, self._repo.origroot)
1592 if self._filtername:
1592 if self._filtername:
1593 repo = repo.filtered(self._filtername)
1593 repo = repo.filtered(self._filtername)
1594 else:
1594 else:
1595 repo = repo.unfiltered()
1595 repo = repo.unfiltered()
1596 c = cachedlocalrepo(repo)
1596 c = cachedlocalrepo(repo)
1597 c._state = self._state
1597 c._state = self._state
1598 c.mtime = self.mtime
1598 c.mtime = self.mtime
1599 return c
1599 return c
@@ -1,3763 +1,3769 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import functools
11 import functools
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullrev,
22 nullrev,
23 sha1nodeconstants,
23 sha1nodeconstants,
24 short,
24 short,
25 )
25 )
26 from .pycompat import (
26 from .pycompat import (
27 delattr,
27 delattr,
28 getattr,
28 getattr,
29 )
29 )
30 from . import (
30 from . import (
31 bookmarks,
31 bookmarks,
32 branchmap,
32 branchmap,
33 bundle2,
33 bundle2,
34 bundlecaches,
34 bundlecaches,
35 changegroup,
35 changegroup,
36 color,
36 color,
37 commit,
37 commit,
38 context,
38 context,
39 dirstate,
39 dirstate,
40 dirstateguard,
40 dirstateguard,
41 discovery,
41 discovery,
42 encoding,
42 encoding,
43 error,
43 error,
44 exchange,
44 exchange,
45 extensions,
45 extensions,
46 filelog,
46 filelog,
47 hook,
47 hook,
48 lock as lockmod,
48 lock as lockmod,
49 match as matchmod,
49 match as matchmod,
50 mergestate as mergestatemod,
50 mergestate as mergestatemod,
51 mergeutil,
51 mergeutil,
52 namespaces,
52 namespaces,
53 narrowspec,
53 narrowspec,
54 obsolete,
54 obsolete,
55 pathutil,
55 pathutil,
56 phases,
56 phases,
57 pushkey,
57 pushkey,
58 pycompat,
58 pycompat,
59 rcutil,
59 rcutil,
60 repoview,
60 repoview,
61 requirements as requirementsmod,
61 requirements as requirementsmod,
62 revlog,
62 revlog,
63 revset,
63 revset,
64 revsetlang,
64 revsetlang,
65 scmutil,
65 scmutil,
66 sparse,
66 sparse,
67 store as storemod,
67 store as storemod,
68 subrepoutil,
68 subrepoutil,
69 tags as tagsmod,
69 tags as tagsmod,
70 transaction,
70 transaction,
71 txnutil,
71 txnutil,
72 util,
72 util,
73 vfs as vfsmod,
73 vfs as vfsmod,
74 wireprototypes,
74 wireprototypes,
75 )
75 )
76
76
77 from .interfaces import (
77 from .interfaces import (
78 repository,
78 repository,
79 util as interfaceutil,
79 util as interfaceutil,
80 )
80 )
81
81
82 from .utils import (
82 from .utils import (
83 hashutil,
83 hashutil,
84 procutil,
84 procutil,
85 stringutil,
85 stringutil,
86 urlutil,
86 urlutil,
87 )
87 )
88
88
89 from .revlogutils import (
89 from .revlogutils import (
90 concurrency_checker as revlogchecker,
90 concurrency_checker as revlogchecker,
91 constants as revlogconst,
91 constants as revlogconst,
92 sidedata as sidedatamod,
92 sidedata as sidedatamod,
93 )
93 )
94
94
95 release = lockmod.release
95 release = lockmod.release
96 urlerr = util.urlerr
96 urlerr = util.urlerr
97 urlreq = util.urlreq
97 urlreq = util.urlreq
98
98
99 # set of (path, vfs-location) tuples. vfs-location is:
99 # set of (path, vfs-location) tuples. vfs-location is:
100 # - 'plain for vfs relative paths
100 # - 'plain for vfs relative paths
101 # - '' for svfs relative paths
101 # - '' for svfs relative paths
102 _cachedfiles = set()
102 _cachedfiles = set()
103
103
104
104
105 class _basefilecache(scmutil.filecache):
105 class _basefilecache(scmutil.filecache):
106 """All filecache usage on repo are done for logic that should be unfiltered"""
106 """All filecache usage on repo are done for logic that should be unfiltered"""
107
107
108 def __get__(self, repo, type=None):
108 def __get__(self, repo, type=None):
109 if repo is None:
109 if repo is None:
110 return self
110 return self
111 # proxy to unfiltered __dict__ since filtered repo has no entry
111 # proxy to unfiltered __dict__ since filtered repo has no entry
112 unfi = repo.unfiltered()
112 unfi = repo.unfiltered()
113 try:
113 try:
114 return unfi.__dict__[self.sname]
114 return unfi.__dict__[self.sname]
115 except KeyError:
115 except KeyError:
116 pass
116 pass
117 return super(_basefilecache, self).__get__(unfi, type)
117 return super(_basefilecache, self).__get__(unfi, type)
118
118
119 def set(self, repo, value):
119 def set(self, repo, value):
120 return super(_basefilecache, self).set(repo.unfiltered(), value)
120 return super(_basefilecache, self).set(repo.unfiltered(), value)
121
121
122
122
123 class repofilecache(_basefilecache):
123 class repofilecache(_basefilecache):
124 """filecache for files in .hg but outside of .hg/store"""
124 """filecache for files in .hg but outside of .hg/store"""
125
125
126 def __init__(self, *paths):
126 def __init__(self, *paths):
127 super(repofilecache, self).__init__(*paths)
127 super(repofilecache, self).__init__(*paths)
128 for path in paths:
128 for path in paths:
129 _cachedfiles.add((path, b'plain'))
129 _cachedfiles.add((path, b'plain'))
130
130
131 def join(self, obj, fname):
131 def join(self, obj, fname):
132 return obj.vfs.join(fname)
132 return obj.vfs.join(fname)
133
133
134
134
135 class storecache(_basefilecache):
135 class storecache(_basefilecache):
136 """filecache for files in the store"""
136 """filecache for files in the store"""
137
137
138 def __init__(self, *paths):
138 def __init__(self, *paths):
139 super(storecache, self).__init__(*paths)
139 super(storecache, self).__init__(*paths)
140 for path in paths:
140 for path in paths:
141 _cachedfiles.add((path, b''))
141 _cachedfiles.add((path, b''))
142
142
143 def join(self, obj, fname):
143 def join(self, obj, fname):
144 return obj.sjoin(fname)
144 return obj.sjoin(fname)
145
145
146
146
147 class mixedrepostorecache(_basefilecache):
147 class mixedrepostorecache(_basefilecache):
148 """filecache for a mix files in .hg/store and outside"""
148 """filecache for a mix files in .hg/store and outside"""
149
149
150 def __init__(self, *pathsandlocations):
150 def __init__(self, *pathsandlocations):
151 # scmutil.filecache only uses the path for passing back into our
151 # scmutil.filecache only uses the path for passing back into our
152 # join(), so we can safely pass a list of paths and locations
152 # join(), so we can safely pass a list of paths and locations
153 super(mixedrepostorecache, self).__init__(*pathsandlocations)
153 super(mixedrepostorecache, self).__init__(*pathsandlocations)
154 _cachedfiles.update(pathsandlocations)
154 _cachedfiles.update(pathsandlocations)
155
155
156 def join(self, obj, fnameandlocation):
156 def join(self, obj, fnameandlocation):
157 fname, location = fnameandlocation
157 fname, location = fnameandlocation
158 if location == b'plain':
158 if location == b'plain':
159 return obj.vfs.join(fname)
159 return obj.vfs.join(fname)
160 else:
160 else:
161 if location != b'':
161 if location != b'':
162 raise error.ProgrammingError(
162 raise error.ProgrammingError(
163 b'unexpected location: %s' % location
163 b'unexpected location: %s' % location
164 )
164 )
165 return obj.sjoin(fname)
165 return obj.sjoin(fname)
166
166
167
167
168 def isfilecached(repo, name):
168 def isfilecached(repo, name):
169 """check if a repo has already cached "name" filecache-ed property
169 """check if a repo has already cached "name" filecache-ed property
170
170
171 This returns (cachedobj-or-None, iscached) tuple.
171 This returns (cachedobj-or-None, iscached) tuple.
172 """
172 """
173 cacheentry = repo.unfiltered()._filecache.get(name, None)
173 cacheentry = repo.unfiltered()._filecache.get(name, None)
174 if not cacheentry:
174 if not cacheentry:
175 return None, False
175 return None, False
176 return cacheentry.obj, True
176 return cacheentry.obj, True
177
177
178
178
179 class unfilteredpropertycache(util.propertycache):
179 class unfilteredpropertycache(util.propertycache):
180 """propertycache that apply to unfiltered repo only"""
180 """propertycache that apply to unfiltered repo only"""
181
181
182 def __get__(self, repo, type=None):
182 def __get__(self, repo, type=None):
183 unfi = repo.unfiltered()
183 unfi = repo.unfiltered()
184 if unfi is repo:
184 if unfi is repo:
185 return super(unfilteredpropertycache, self).__get__(unfi)
185 return super(unfilteredpropertycache, self).__get__(unfi)
186 return getattr(unfi, self.name)
186 return getattr(unfi, self.name)
187
187
188
188
189 class filteredpropertycache(util.propertycache):
189 class filteredpropertycache(util.propertycache):
190 """propertycache that must take filtering in account"""
190 """propertycache that must take filtering in account"""
191
191
192 def cachevalue(self, obj, value):
192 def cachevalue(self, obj, value):
193 object.__setattr__(obj, self.name, value)
193 object.__setattr__(obj, self.name, value)
194
194
195
195
196 def hasunfilteredcache(repo, name):
196 def hasunfilteredcache(repo, name):
197 """check if a repo has an unfilteredpropertycache value for <name>"""
197 """check if a repo has an unfilteredpropertycache value for <name>"""
198 return name in vars(repo.unfiltered())
198 return name in vars(repo.unfiltered())
199
199
200
200
201 def unfilteredmethod(orig):
201 def unfilteredmethod(orig):
202 """decorate method that always need to be run on unfiltered version"""
202 """decorate method that always need to be run on unfiltered version"""
203
203
204 @functools.wraps(orig)
204 @functools.wraps(orig)
205 def wrapper(repo, *args, **kwargs):
205 def wrapper(repo, *args, **kwargs):
206 return orig(repo.unfiltered(), *args, **kwargs)
206 return orig(repo.unfiltered(), *args, **kwargs)
207
207
208 return wrapper
208 return wrapper
209
209
210
210
211 moderncaps = {
211 moderncaps = {
212 b'lookup',
212 b'lookup',
213 b'branchmap',
213 b'branchmap',
214 b'pushkey',
214 b'pushkey',
215 b'known',
215 b'known',
216 b'getbundle',
216 b'getbundle',
217 b'unbundle',
217 b'unbundle',
218 }
218 }
219 legacycaps = moderncaps.union({b'changegroupsubset'})
219 legacycaps = moderncaps.union({b'changegroupsubset'})
220
220
221
221
222 @interfaceutil.implementer(repository.ipeercommandexecutor)
222 @interfaceutil.implementer(repository.ipeercommandexecutor)
223 class localcommandexecutor(object):
223 class localcommandexecutor(object):
224 def __init__(self, peer):
224 def __init__(self, peer):
225 self._peer = peer
225 self._peer = peer
226 self._sent = False
226 self._sent = False
227 self._closed = False
227 self._closed = False
228
228
229 def __enter__(self):
229 def __enter__(self):
230 return self
230 return self
231
231
232 def __exit__(self, exctype, excvalue, exctb):
232 def __exit__(self, exctype, excvalue, exctb):
233 self.close()
233 self.close()
234
234
235 def callcommand(self, command, args):
235 def callcommand(self, command, args):
236 if self._sent:
236 if self._sent:
237 raise error.ProgrammingError(
237 raise error.ProgrammingError(
238 b'callcommand() cannot be used after sendcommands()'
238 b'callcommand() cannot be used after sendcommands()'
239 )
239 )
240
240
241 if self._closed:
241 if self._closed:
242 raise error.ProgrammingError(
242 raise error.ProgrammingError(
243 b'callcommand() cannot be used after close()'
243 b'callcommand() cannot be used after close()'
244 )
244 )
245
245
246 # We don't need to support anything fancy. Just call the named
246 # We don't need to support anything fancy. Just call the named
247 # method on the peer and return a resolved future.
247 # method on the peer and return a resolved future.
248 fn = getattr(self._peer, pycompat.sysstr(command))
248 fn = getattr(self._peer, pycompat.sysstr(command))
249
249
250 f = pycompat.futures.Future()
250 f = pycompat.futures.Future()
251
251
252 try:
252 try:
253 result = fn(**pycompat.strkwargs(args))
253 result = fn(**pycompat.strkwargs(args))
254 except Exception:
254 except Exception:
255 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
255 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
256 else:
256 else:
257 f.set_result(result)
257 f.set_result(result)
258
258
259 return f
259 return f
260
260
261 def sendcommands(self):
261 def sendcommands(self):
262 self._sent = True
262 self._sent = True
263
263
264 def close(self):
264 def close(self):
265 self._closed = True
265 self._closed = True
266
266
267
267
268 @interfaceutil.implementer(repository.ipeercommands)
268 @interfaceutil.implementer(repository.ipeercommands)
269 class localpeer(repository.peer):
269 class localpeer(repository.peer):
270 '''peer for a local repo; reflects only the most recent API'''
270 '''peer for a local repo; reflects only the most recent API'''
271
271
272 def __init__(self, repo, caps=None):
272 def __init__(self, repo, caps=None):
273 super(localpeer, self).__init__()
273 super(localpeer, self).__init__()
274
274
275 if caps is None:
275 if caps is None:
276 caps = moderncaps.copy()
276 caps = moderncaps.copy()
277 self._repo = repo.filtered(b'served')
277 self._repo = repo.filtered(b'served')
278 self.ui = repo.ui
278 self.ui = repo.ui
279
279
280 if repo._wanted_sidedata:
280 if repo._wanted_sidedata:
281 formatted = bundle2.format_remote_wanted_sidedata(repo)
281 formatted = bundle2.format_remote_wanted_sidedata(repo)
282 caps.add(b'exp-wanted-sidedata=' + formatted)
282 caps.add(b'exp-wanted-sidedata=' + formatted)
283
283
284 self._caps = repo._restrictcapabilities(caps)
284 self._caps = repo._restrictcapabilities(caps)
285
285
286 # Begin of _basepeer interface.
286 # Begin of _basepeer interface.
287
287
288 def url(self):
288 def url(self):
289 return self._repo.url()
289 return self._repo.url()
290
290
291 def local(self):
291 def local(self):
292 return self._repo
292 return self._repo
293
293
294 def peer(self):
294 def peer(self):
295 return self
295 return self
296
296
297 def canpush(self):
297 def canpush(self):
298 return True
298 return True
299
299
300 def close(self):
300 def close(self):
301 self._repo.close()
301 self._repo.close()
302
302
303 # End of _basepeer interface.
303 # End of _basepeer interface.
304
304
305 # Begin of _basewirecommands interface.
305 # Begin of _basewirecommands interface.
306
306
307 def branchmap(self):
307 def branchmap(self):
308 return self._repo.branchmap()
308 return self._repo.branchmap()
309
309
310 def capabilities(self):
310 def capabilities(self):
311 return self._caps
311 return self._caps
312
312
313 def clonebundles(self):
313 def clonebundles(self):
314 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
314 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
315
315
316 def debugwireargs(self, one, two, three=None, four=None, five=None):
316 def debugwireargs(self, one, two, three=None, four=None, five=None):
317 """Used to test argument passing over the wire"""
317 """Used to test argument passing over the wire"""
318 return b"%s %s %s %s %s" % (
318 return b"%s %s %s %s %s" % (
319 one,
319 one,
320 two,
320 two,
321 pycompat.bytestr(three),
321 pycompat.bytestr(three),
322 pycompat.bytestr(four),
322 pycompat.bytestr(four),
323 pycompat.bytestr(five),
323 pycompat.bytestr(five),
324 )
324 )
325
325
326 def getbundle(
326 def getbundle(
327 self,
327 self,
328 source,
328 source,
329 heads=None,
329 heads=None,
330 common=None,
330 common=None,
331 bundlecaps=None,
331 bundlecaps=None,
332 remote_sidedata=None,
332 remote_sidedata=None,
333 **kwargs
333 **kwargs
334 ):
334 ):
335 chunks = exchange.getbundlechunks(
335 chunks = exchange.getbundlechunks(
336 self._repo,
336 self._repo,
337 source,
337 source,
338 heads=heads,
338 heads=heads,
339 common=common,
339 common=common,
340 bundlecaps=bundlecaps,
340 bundlecaps=bundlecaps,
341 remote_sidedata=remote_sidedata,
341 remote_sidedata=remote_sidedata,
342 **kwargs
342 **kwargs
343 )[1]
343 )[1]
344 cb = util.chunkbuffer(chunks)
344 cb = util.chunkbuffer(chunks)
345
345
346 if exchange.bundle2requested(bundlecaps):
346 if exchange.bundle2requested(bundlecaps):
347 # When requesting a bundle2, getbundle returns a stream to make the
347 # When requesting a bundle2, getbundle returns a stream to make the
348 # wire level function happier. We need to build a proper object
348 # wire level function happier. We need to build a proper object
349 # from it in local peer.
349 # from it in local peer.
350 return bundle2.getunbundler(self.ui, cb)
350 return bundle2.getunbundler(self.ui, cb)
351 else:
351 else:
352 return changegroup.getunbundler(b'01', cb, None)
352 return changegroup.getunbundler(b'01', cb, None)
353
353
354 def heads(self):
354 def heads(self):
355 return self._repo.heads()
355 return self._repo.heads()
356
356
357 def known(self, nodes):
357 def known(self, nodes):
358 return self._repo.known(nodes)
358 return self._repo.known(nodes)
359
359
360 def listkeys(self, namespace):
360 def listkeys(self, namespace):
361 return self._repo.listkeys(namespace)
361 return self._repo.listkeys(namespace)
362
362
363 def lookup(self, key):
363 def lookup(self, key):
364 return self._repo.lookup(key)
364 return self._repo.lookup(key)
365
365
366 def pushkey(self, namespace, key, old, new):
366 def pushkey(self, namespace, key, old, new):
367 return self._repo.pushkey(namespace, key, old, new)
367 return self._repo.pushkey(namespace, key, old, new)
368
368
369 def stream_out(self):
369 def stream_out(self):
370 raise error.Abort(_(b'cannot perform stream clone against local peer'))
370 raise error.Abort(_(b'cannot perform stream clone against local peer'))
371
371
372 def unbundle(self, bundle, heads, url):
372 def unbundle(self, bundle, heads, url):
373 """apply a bundle on a repo
373 """apply a bundle on a repo
374
374
375 This function handles the repo locking itself."""
375 This function handles the repo locking itself."""
376 try:
376 try:
377 try:
377 try:
378 bundle = exchange.readbundle(self.ui, bundle, None)
378 bundle = exchange.readbundle(self.ui, bundle, None)
379 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
379 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
380 if util.safehasattr(ret, b'getchunks'):
380 if util.safehasattr(ret, b'getchunks'):
381 # This is a bundle20 object, turn it into an unbundler.
381 # This is a bundle20 object, turn it into an unbundler.
382 # This little dance should be dropped eventually when the
382 # This little dance should be dropped eventually when the
383 # API is finally improved.
383 # API is finally improved.
384 stream = util.chunkbuffer(ret.getchunks())
384 stream = util.chunkbuffer(ret.getchunks())
385 ret = bundle2.getunbundler(self.ui, stream)
385 ret = bundle2.getunbundler(self.ui, stream)
386 return ret
386 return ret
387 except Exception as exc:
387 except Exception as exc:
388 # If the exception contains output salvaged from a bundle2
388 # If the exception contains output salvaged from a bundle2
389 # reply, we need to make sure it is printed before continuing
389 # reply, we need to make sure it is printed before continuing
390 # to fail. So we build a bundle2 with such output and consume
390 # to fail. So we build a bundle2 with such output and consume
391 # it directly.
391 # it directly.
392 #
392 #
393 # This is not very elegant but allows a "simple" solution for
393 # This is not very elegant but allows a "simple" solution for
394 # issue4594
394 # issue4594
395 output = getattr(exc, '_bundle2salvagedoutput', ())
395 output = getattr(exc, '_bundle2salvagedoutput', ())
396 if output:
396 if output:
397 bundler = bundle2.bundle20(self._repo.ui)
397 bundler = bundle2.bundle20(self._repo.ui)
398 for out in output:
398 for out in output:
399 bundler.addpart(out)
399 bundler.addpart(out)
400 stream = util.chunkbuffer(bundler.getchunks())
400 stream = util.chunkbuffer(bundler.getchunks())
401 b = bundle2.getunbundler(self.ui, stream)
401 b = bundle2.getunbundler(self.ui, stream)
402 bundle2.processbundle(self._repo, b)
402 bundle2.processbundle(self._repo, b)
403 raise
403 raise
404 except error.PushRaced as exc:
404 except error.PushRaced as exc:
405 raise error.ResponseError(
405 raise error.ResponseError(
406 _(b'push failed:'), stringutil.forcebytestr(exc)
406 _(b'push failed:'), stringutil.forcebytestr(exc)
407 )
407 )
408
408
409 # End of _basewirecommands interface.
409 # End of _basewirecommands interface.
410
410
411 # Begin of peer interface.
411 # Begin of peer interface.
412
412
413 def commandexecutor(self):
413 def commandexecutor(self):
414 return localcommandexecutor(self)
414 return localcommandexecutor(self)
415
415
416 # End of peer interface.
416 # End of peer interface.
417
417
418
418
419 @interfaceutil.implementer(repository.ipeerlegacycommands)
419 @interfaceutil.implementer(repository.ipeerlegacycommands)
420 class locallegacypeer(localpeer):
420 class locallegacypeer(localpeer):
421 """peer extension which implements legacy methods too; used for tests with
421 """peer extension which implements legacy methods too; used for tests with
422 restricted capabilities"""
422 restricted capabilities"""
423
423
424 def __init__(self, repo):
424 def __init__(self, repo):
425 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
425 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
426
426
427 # Begin of baselegacywirecommands interface.
427 # Begin of baselegacywirecommands interface.
428
428
429 def between(self, pairs):
429 def between(self, pairs):
430 return self._repo.between(pairs)
430 return self._repo.between(pairs)
431
431
432 def branches(self, nodes):
432 def branches(self, nodes):
433 return self._repo.branches(nodes)
433 return self._repo.branches(nodes)
434
434
435 def changegroup(self, nodes, source):
435 def changegroup(self, nodes, source):
436 outgoing = discovery.outgoing(
436 outgoing = discovery.outgoing(
437 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
437 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
438 )
438 )
439 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
439 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
440
440
441 def changegroupsubset(self, bases, heads, source):
441 def changegroupsubset(self, bases, heads, source):
442 outgoing = discovery.outgoing(
442 outgoing = discovery.outgoing(
443 self._repo, missingroots=bases, ancestorsof=heads
443 self._repo, missingroots=bases, ancestorsof=heads
444 )
444 )
445 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
445 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
446
446
447 # End of baselegacywirecommands interface.
447 # End of baselegacywirecommands interface.
448
448
449
449
450 # Functions receiving (ui, features) that extensions can register to impact
450 # Functions receiving (ui, features) that extensions can register to impact
451 # the ability to load repositories with custom requirements. Only
451 # the ability to load repositories with custom requirements. Only
452 # functions defined in loaded extensions are called.
452 # functions defined in loaded extensions are called.
453 #
453 #
454 # The function receives a set of requirement strings that the repository
454 # The function receives a set of requirement strings that the repository
455 # is capable of opening. Functions will typically add elements to the
455 # is capable of opening. Functions will typically add elements to the
456 # set to reflect that the extension knows how to handle that requirements.
456 # set to reflect that the extension knows how to handle that requirements.
457 featuresetupfuncs = set()
457 featuresetupfuncs = set()
458
458
459
459
460 def _getsharedvfs(hgvfs, requirements):
460 def _getsharedvfs(hgvfs, requirements):
461 """returns the vfs object pointing to root of shared source
461 """returns the vfs object pointing to root of shared source
462 repo for a shared repository
462 repo for a shared repository
463
463
464 hgvfs is vfs pointing at .hg/ of current repo (shared one)
464 hgvfs is vfs pointing at .hg/ of current repo (shared one)
465 requirements is a set of requirements of current repo (shared one)
465 requirements is a set of requirements of current repo (shared one)
466 """
466 """
467 # The ``shared`` or ``relshared`` requirements indicate the
467 # The ``shared`` or ``relshared`` requirements indicate the
468 # store lives in the path contained in the ``.hg/sharedpath`` file.
468 # store lives in the path contained in the ``.hg/sharedpath`` file.
469 # This is an absolute path for ``shared`` and relative to
469 # This is an absolute path for ``shared`` and relative to
470 # ``.hg/`` for ``relshared``.
470 # ``.hg/`` for ``relshared``.
471 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
471 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
472 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
472 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
473 sharedpath = util.normpath(hgvfs.join(sharedpath))
473 sharedpath = util.normpath(hgvfs.join(sharedpath))
474
474
475 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
475 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
476
476
477 if not sharedvfs.exists():
477 if not sharedvfs.exists():
478 raise error.RepoError(
478 raise error.RepoError(
479 _(b'.hg/sharedpath points to nonexistent directory %s')
479 _(b'.hg/sharedpath points to nonexistent directory %s')
480 % sharedvfs.base
480 % sharedvfs.base
481 )
481 )
482 return sharedvfs
482 return sharedvfs
483
483
484
484
485 def _readrequires(vfs, allowmissing):
485 def _readrequires(vfs, allowmissing):
486 """reads the require file present at root of this vfs
486 """reads the require file present at root of this vfs
487 and return a set of requirements
487 and return a set of requirements
488
488
489 If allowmissing is True, we suppress ENOENT if raised"""
489 If allowmissing is True, we suppress ENOENT if raised"""
490 # requires file contains a newline-delimited list of
490 # requires file contains a newline-delimited list of
491 # features/capabilities the opener (us) must have in order to use
491 # features/capabilities the opener (us) must have in order to use
492 # the repository. This file was introduced in Mercurial 0.9.2,
492 # the repository. This file was introduced in Mercurial 0.9.2,
493 # which means very old repositories may not have one. We assume
493 # which means very old repositories may not have one. We assume
494 # a missing file translates to no requirements.
494 # a missing file translates to no requirements.
495 try:
495 try:
496 requirements = set(vfs.read(b'requires').splitlines())
496 requirements = set(vfs.read(b'requires').splitlines())
497 except IOError as e:
497 except IOError as e:
498 if not (allowmissing and e.errno == errno.ENOENT):
498 if not (allowmissing and e.errno == errno.ENOENT):
499 raise
499 raise
500 requirements = set()
500 requirements = set()
501 return requirements
501 return requirements
502
502
503
503
504 def makelocalrepository(baseui, path, intents=None):
504 def makelocalrepository(baseui, path, intents=None):
505 """Create a local repository object.
505 """Create a local repository object.
506
506
507 Given arguments needed to construct a local repository, this function
507 Given arguments needed to construct a local repository, this function
508 performs various early repository loading functionality (such as
508 performs various early repository loading functionality (such as
509 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
509 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
510 the repository can be opened, derives a type suitable for representing
510 the repository can be opened, derives a type suitable for representing
511 that repository, and returns an instance of it.
511 that repository, and returns an instance of it.
512
512
513 The returned object conforms to the ``repository.completelocalrepository``
513 The returned object conforms to the ``repository.completelocalrepository``
514 interface.
514 interface.
515
515
516 The repository type is derived by calling a series of factory functions
516 The repository type is derived by calling a series of factory functions
517 for each aspect/interface of the final repository. These are defined by
517 for each aspect/interface of the final repository. These are defined by
518 ``REPO_INTERFACES``.
518 ``REPO_INTERFACES``.
519
519
520 Each factory function is called to produce a type implementing a specific
520 Each factory function is called to produce a type implementing a specific
521 interface. The cumulative list of returned types will be combined into a
521 interface. The cumulative list of returned types will be combined into a
522 new type and that type will be instantiated to represent the local
522 new type and that type will be instantiated to represent the local
523 repository.
523 repository.
524
524
525 The factory functions each receive various state that may be consulted
525 The factory functions each receive various state that may be consulted
526 as part of deriving a type.
526 as part of deriving a type.
527
527
528 Extensions should wrap these factory functions to customize repository type
528 Extensions should wrap these factory functions to customize repository type
529 creation. Note that an extension's wrapped function may be called even if
529 creation. Note that an extension's wrapped function may be called even if
530 that extension is not loaded for the repo being constructed. Extensions
530 that extension is not loaded for the repo being constructed. Extensions
531 should check if their ``__name__`` appears in the
531 should check if their ``__name__`` appears in the
532 ``extensionmodulenames`` set passed to the factory function and no-op if
532 ``extensionmodulenames`` set passed to the factory function and no-op if
533 not.
533 not.
534 """
534 """
535 ui = baseui.copy()
535 ui = baseui.copy()
536 # Prevent copying repo configuration.
536 # Prevent copying repo configuration.
537 ui.copy = baseui.copy
537 ui.copy = baseui.copy
538
538
539 # Working directory VFS rooted at repository root.
539 # Working directory VFS rooted at repository root.
540 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
540 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
541
541
542 # Main VFS for .hg/ directory.
542 # Main VFS for .hg/ directory.
543 hgpath = wdirvfs.join(b'.hg')
543 hgpath = wdirvfs.join(b'.hg')
544 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
544 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
545 # Whether this repository is shared one or not
545 # Whether this repository is shared one or not
546 shared = False
546 shared = False
547 # If this repository is shared, vfs pointing to shared repo
547 # If this repository is shared, vfs pointing to shared repo
548 sharedvfs = None
548 sharedvfs = None
549
549
550 # The .hg/ path should exist and should be a directory. All other
550 # The .hg/ path should exist and should be a directory. All other
551 # cases are errors.
551 # cases are errors.
552 if not hgvfs.isdir():
552 if not hgvfs.isdir():
553 try:
553 try:
554 hgvfs.stat()
554 hgvfs.stat()
555 except OSError as e:
555 except OSError as e:
556 if e.errno != errno.ENOENT:
556 if e.errno != errno.ENOENT:
557 raise
557 raise
558 except ValueError as e:
558 except ValueError as e:
559 # Can be raised on Python 3.8 when path is invalid.
559 # Can be raised on Python 3.8 when path is invalid.
560 raise error.Abort(
560 raise error.Abort(
561 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
561 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
562 )
562 )
563
563
564 raise error.RepoError(_(b'repository %s not found') % path)
564 raise error.RepoError(_(b'repository %s not found') % path)
565
565
566 requirements = _readrequires(hgvfs, True)
566 requirements = _readrequires(hgvfs, True)
567 shared = (
567 shared = (
568 requirementsmod.SHARED_REQUIREMENT in requirements
568 requirementsmod.SHARED_REQUIREMENT in requirements
569 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
569 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
570 )
570 )
571 storevfs = None
571 storevfs = None
572 if shared:
572 if shared:
573 # This is a shared repo
573 # This is a shared repo
574 sharedvfs = _getsharedvfs(hgvfs, requirements)
574 sharedvfs = _getsharedvfs(hgvfs, requirements)
575 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
575 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
576 else:
576 else:
577 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
577 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
578
578
579 # if .hg/requires contains the sharesafe requirement, it means
579 # if .hg/requires contains the sharesafe requirement, it means
580 # there exists a `.hg/store/requires` too and we should read it
580 # there exists a `.hg/store/requires` too and we should read it
581 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
581 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
582 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
582 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
583 # is not present, refer checkrequirementscompat() for that
583 # is not present, refer checkrequirementscompat() for that
584 #
584 #
585 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
585 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
586 # repository was shared the old way. We check the share source .hg/requires
586 # repository was shared the old way. We check the share source .hg/requires
587 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
587 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
588 # to be reshared
588 # to be reshared
589 hint = _(b"see `hg help config.format.use-share-safe` for more information")
589 hint = _(b"see `hg help config.format.use-share-safe` for more information")
590 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
590 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
591
591
592 if (
592 if (
593 shared
593 shared
594 and requirementsmod.SHARESAFE_REQUIREMENT
594 and requirementsmod.SHARESAFE_REQUIREMENT
595 not in _readrequires(sharedvfs, True)
595 not in _readrequires(sharedvfs, True)
596 ):
596 ):
597 mismatch_warn = ui.configbool(
597 mismatch_warn = ui.configbool(
598 b'share', b'safe-mismatch.source-not-safe.warn'
598 b'share', b'safe-mismatch.source-not-safe.warn'
599 )
599 )
600 mismatch_config = ui.config(
600 mismatch_config = ui.config(
601 b'share', b'safe-mismatch.source-not-safe'
601 b'share', b'safe-mismatch.source-not-safe'
602 )
602 )
603 if mismatch_config in (
603 if mismatch_config in (
604 b'downgrade-allow',
604 b'downgrade-allow',
605 b'allow',
605 b'allow',
606 b'downgrade-abort',
606 b'downgrade-abort',
607 ):
607 ):
608 # prevent cyclic import localrepo -> upgrade -> localrepo
608 # prevent cyclic import localrepo -> upgrade -> localrepo
609 from . import upgrade
609 from . import upgrade
610
610
611 upgrade.downgrade_share_to_non_safe(
611 upgrade.downgrade_share_to_non_safe(
612 ui,
612 ui,
613 hgvfs,
613 hgvfs,
614 sharedvfs,
614 sharedvfs,
615 requirements,
615 requirements,
616 mismatch_config,
616 mismatch_config,
617 mismatch_warn,
617 mismatch_warn,
618 )
618 )
619 elif mismatch_config == b'abort':
619 elif mismatch_config == b'abort':
620 raise error.Abort(
620 raise error.Abort(
621 _(b"share source does not support share-safe requirement"),
621 _(b"share source does not support share-safe requirement"),
622 hint=hint,
622 hint=hint,
623 )
623 )
624 else:
624 else:
625 raise error.Abort(
625 raise error.Abort(
626 _(
626 _(
627 b"share-safe mismatch with source.\nUnrecognized"
627 b"share-safe mismatch with source.\nUnrecognized"
628 b" value '%s' of `share.safe-mismatch.source-not-safe`"
628 b" value '%s' of `share.safe-mismatch.source-not-safe`"
629 b" set."
629 b" set."
630 )
630 )
631 % mismatch_config,
631 % mismatch_config,
632 hint=hint,
632 hint=hint,
633 )
633 )
634 else:
634 else:
635 requirements |= _readrequires(storevfs, False)
635 requirements |= _readrequires(storevfs, False)
636 elif shared:
636 elif shared:
637 sourcerequires = _readrequires(sharedvfs, False)
637 sourcerequires = _readrequires(sharedvfs, False)
638 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
638 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
639 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
639 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
640 mismatch_warn = ui.configbool(
640 mismatch_warn = ui.configbool(
641 b'share', b'safe-mismatch.source-safe.warn'
641 b'share', b'safe-mismatch.source-safe.warn'
642 )
642 )
643 if mismatch_config in (
643 if mismatch_config in (
644 b'upgrade-allow',
644 b'upgrade-allow',
645 b'allow',
645 b'allow',
646 b'upgrade-abort',
646 b'upgrade-abort',
647 ):
647 ):
648 # prevent cyclic import localrepo -> upgrade -> localrepo
648 # prevent cyclic import localrepo -> upgrade -> localrepo
649 from . import upgrade
649 from . import upgrade
650
650
651 upgrade.upgrade_share_to_safe(
651 upgrade.upgrade_share_to_safe(
652 ui,
652 ui,
653 hgvfs,
653 hgvfs,
654 storevfs,
654 storevfs,
655 requirements,
655 requirements,
656 mismatch_config,
656 mismatch_config,
657 mismatch_warn,
657 mismatch_warn,
658 )
658 )
659 elif mismatch_config == b'abort':
659 elif mismatch_config == b'abort':
660 raise error.Abort(
660 raise error.Abort(
661 _(
661 _(
662 b'version mismatch: source uses share-safe'
662 b'version mismatch: source uses share-safe'
663 b' functionality while the current share does not'
663 b' functionality while the current share does not'
664 ),
664 ),
665 hint=hint,
665 hint=hint,
666 )
666 )
667 else:
667 else:
668 raise error.Abort(
668 raise error.Abort(
669 _(
669 _(
670 b"share-safe mismatch with source.\nUnrecognized"
670 b"share-safe mismatch with source.\nUnrecognized"
671 b" value '%s' of `share.safe-mismatch.source-safe` set."
671 b" value '%s' of `share.safe-mismatch.source-safe` set."
672 )
672 )
673 % mismatch_config,
673 % mismatch_config,
674 hint=hint,
674 hint=hint,
675 )
675 )
676
676
677 # The .hg/hgrc file may load extensions or contain config options
677 # The .hg/hgrc file may load extensions or contain config options
678 # that influence repository construction. Attempt to load it and
678 # that influence repository construction. Attempt to load it and
679 # process any new extensions that it may have pulled in.
679 # process any new extensions that it may have pulled in.
680 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
680 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
681 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
681 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
682 extensions.loadall(ui)
682 extensions.loadall(ui)
683 extensions.populateui(ui)
683 extensions.populateui(ui)
684
684
685 # Set of module names of extensions loaded for this repository.
685 # Set of module names of extensions loaded for this repository.
686 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
686 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
687
687
688 supportedrequirements = gathersupportedrequirements(ui)
688 supportedrequirements = gathersupportedrequirements(ui)
689
689
690 # We first validate the requirements are known.
690 # We first validate the requirements are known.
691 ensurerequirementsrecognized(requirements, supportedrequirements)
691 ensurerequirementsrecognized(requirements, supportedrequirements)
692
692
693 # Then we validate that the known set is reasonable to use together.
693 # Then we validate that the known set is reasonable to use together.
694 ensurerequirementscompatible(ui, requirements)
694 ensurerequirementscompatible(ui, requirements)
695
695
696 # TODO there are unhandled edge cases related to opening repositories with
696 # TODO there are unhandled edge cases related to opening repositories with
697 # shared storage. If storage is shared, we should also test for requirements
697 # shared storage. If storage is shared, we should also test for requirements
698 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
698 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
699 # that repo, as that repo may load extensions needed to open it. This is a
699 # that repo, as that repo may load extensions needed to open it. This is a
700 # bit complicated because we don't want the other hgrc to overwrite settings
700 # bit complicated because we don't want the other hgrc to overwrite settings
701 # in this hgrc.
701 # in this hgrc.
702 #
702 #
703 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
703 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
704 # file when sharing repos. But if a requirement is added after the share is
704 # file when sharing repos. But if a requirement is added after the share is
705 # performed, thereby introducing a new requirement for the opener, we may
705 # performed, thereby introducing a new requirement for the opener, we may
706 # will not see that and could encounter a run-time error interacting with
706 # will not see that and could encounter a run-time error interacting with
707 # that shared store since it has an unknown-to-us requirement.
707 # that shared store since it has an unknown-to-us requirement.
708
708
709 # At this point, we know we should be capable of opening the repository.
709 # At this point, we know we should be capable of opening the repository.
710 # Now get on with doing that.
710 # Now get on with doing that.
711
711
712 features = set()
712 features = set()
713
713
714 # The "store" part of the repository holds versioned data. How it is
714 # The "store" part of the repository holds versioned data. How it is
715 # accessed is determined by various requirements. If `shared` or
715 # accessed is determined by various requirements. If `shared` or
716 # `relshared` requirements are present, this indicates current repository
716 # `relshared` requirements are present, this indicates current repository
717 # is a share and store exists in path mentioned in `.hg/sharedpath`
717 # is a share and store exists in path mentioned in `.hg/sharedpath`
718 if shared:
718 if shared:
719 storebasepath = sharedvfs.base
719 storebasepath = sharedvfs.base
720 cachepath = sharedvfs.join(b'cache')
720 cachepath = sharedvfs.join(b'cache')
721 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
721 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
722 else:
722 else:
723 storebasepath = hgvfs.base
723 storebasepath = hgvfs.base
724 cachepath = hgvfs.join(b'cache')
724 cachepath = hgvfs.join(b'cache')
725 wcachepath = hgvfs.join(b'wcache')
725 wcachepath = hgvfs.join(b'wcache')
726
726
727 # The store has changed over time and the exact layout is dictated by
727 # The store has changed over time and the exact layout is dictated by
728 # requirements. The store interface abstracts differences across all
728 # requirements. The store interface abstracts differences across all
729 # of them.
729 # of them.
730 store = makestore(
730 store = makestore(
731 requirements,
731 requirements,
732 storebasepath,
732 storebasepath,
733 lambda base: vfsmod.vfs(base, cacheaudited=True),
733 lambda base: vfsmod.vfs(base, cacheaudited=True),
734 )
734 )
735 hgvfs.createmode = store.createmode
735 hgvfs.createmode = store.createmode
736
736
737 storevfs = store.vfs
737 storevfs = store.vfs
738 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
738 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
739
739
740 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
740 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
741 features.add(repository.REPO_FEATURE_SIDE_DATA)
741 features.add(repository.REPO_FEATURE_SIDE_DATA)
742
742
743 # The cache vfs is used to manage cache files.
743 # The cache vfs is used to manage cache files.
744 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
744 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
745 cachevfs.createmode = store.createmode
745 cachevfs.createmode = store.createmode
746 # The cache vfs is used to manage cache files related to the working copy
746 # The cache vfs is used to manage cache files related to the working copy
747 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
747 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
748 wcachevfs.createmode = store.createmode
748 wcachevfs.createmode = store.createmode
749
749
750 # Now resolve the type for the repository object. We do this by repeatedly
750 # Now resolve the type for the repository object. We do this by repeatedly
751 # calling a factory function to produces types for specific aspects of the
751 # calling a factory function to produces types for specific aspects of the
752 # repo's operation. The aggregate returned types are used as base classes
752 # repo's operation. The aggregate returned types are used as base classes
753 # for a dynamically-derived type, which will represent our new repository.
753 # for a dynamically-derived type, which will represent our new repository.
754
754
755 bases = []
755 bases = []
756 extrastate = {}
756 extrastate = {}
757
757
758 for iface, fn in REPO_INTERFACES:
758 for iface, fn in REPO_INTERFACES:
759 # We pass all potentially useful state to give extensions tons of
759 # We pass all potentially useful state to give extensions tons of
760 # flexibility.
760 # flexibility.
761 typ = fn()(
761 typ = fn()(
762 ui=ui,
762 ui=ui,
763 intents=intents,
763 intents=intents,
764 requirements=requirements,
764 requirements=requirements,
765 features=features,
765 features=features,
766 wdirvfs=wdirvfs,
766 wdirvfs=wdirvfs,
767 hgvfs=hgvfs,
767 hgvfs=hgvfs,
768 store=store,
768 store=store,
769 storevfs=storevfs,
769 storevfs=storevfs,
770 storeoptions=storevfs.options,
770 storeoptions=storevfs.options,
771 cachevfs=cachevfs,
771 cachevfs=cachevfs,
772 wcachevfs=wcachevfs,
772 wcachevfs=wcachevfs,
773 extensionmodulenames=extensionmodulenames,
773 extensionmodulenames=extensionmodulenames,
774 extrastate=extrastate,
774 extrastate=extrastate,
775 baseclasses=bases,
775 baseclasses=bases,
776 )
776 )
777
777
778 if not isinstance(typ, type):
778 if not isinstance(typ, type):
779 raise error.ProgrammingError(
779 raise error.ProgrammingError(
780 b'unable to construct type for %s' % iface
780 b'unable to construct type for %s' % iface
781 )
781 )
782
782
783 bases.append(typ)
783 bases.append(typ)
784
784
785 # type() allows you to use characters in type names that wouldn't be
785 # type() allows you to use characters in type names that wouldn't be
786 # recognized as Python symbols in source code. We abuse that to add
786 # recognized as Python symbols in source code. We abuse that to add
787 # rich information about our constructed repo.
787 # rich information about our constructed repo.
788 name = pycompat.sysstr(
788 name = pycompat.sysstr(
789 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
789 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
790 )
790 )
791
791
792 cls = type(name, tuple(bases), {})
792 cls = type(name, tuple(bases), {})
793
793
794 return cls(
794 return cls(
795 baseui=baseui,
795 baseui=baseui,
796 ui=ui,
796 ui=ui,
797 origroot=path,
797 origroot=path,
798 wdirvfs=wdirvfs,
798 wdirvfs=wdirvfs,
799 hgvfs=hgvfs,
799 hgvfs=hgvfs,
800 requirements=requirements,
800 requirements=requirements,
801 supportedrequirements=supportedrequirements,
801 supportedrequirements=supportedrequirements,
802 sharedpath=storebasepath,
802 sharedpath=storebasepath,
803 store=store,
803 store=store,
804 cachevfs=cachevfs,
804 cachevfs=cachevfs,
805 wcachevfs=wcachevfs,
805 wcachevfs=wcachevfs,
806 features=features,
806 features=features,
807 intents=intents,
807 intents=intents,
808 )
808 )
809
809
810
810
811 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
811 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
812 """Load hgrc files/content into a ui instance.
812 """Load hgrc files/content into a ui instance.
813
813
814 This is called during repository opening to load any additional
814 This is called during repository opening to load any additional
815 config files or settings relevant to the current repository.
815 config files or settings relevant to the current repository.
816
816
817 Returns a bool indicating whether any additional configs were loaded.
817 Returns a bool indicating whether any additional configs were loaded.
818
818
819 Extensions should monkeypatch this function to modify how per-repo
819 Extensions should monkeypatch this function to modify how per-repo
820 configs are loaded. For example, an extension may wish to pull in
820 configs are loaded. For example, an extension may wish to pull in
821 configs from alternate files or sources.
821 configs from alternate files or sources.
822
822
823 sharedvfs is vfs object pointing to source repo if the current one is a
823 sharedvfs is vfs object pointing to source repo if the current one is a
824 shared one
824 shared one
825 """
825 """
826 if not rcutil.use_repo_hgrc():
826 if not rcutil.use_repo_hgrc():
827 return False
827 return False
828
828
829 ret = False
829 ret = False
830 # first load config from shared source if we has to
830 # first load config from shared source if we has to
831 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
831 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
832 try:
832 try:
833 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
833 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
834 ret = True
834 ret = True
835 except IOError:
835 except IOError:
836 pass
836 pass
837
837
838 try:
838 try:
839 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
839 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
840 ret = True
840 ret = True
841 except IOError:
841 except IOError:
842 pass
842 pass
843
843
844 try:
844 try:
845 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
845 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
846 ret = True
846 ret = True
847 except IOError:
847 except IOError:
848 pass
848 pass
849
849
850 return ret
850 return ret
851
851
852
852
853 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
853 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
854 """Perform additional actions after .hg/hgrc is loaded.
854 """Perform additional actions after .hg/hgrc is loaded.
855
855
856 This function is called during repository loading immediately after
856 This function is called during repository loading immediately after
857 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
857 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
858
858
859 The function can be used to validate configs, automatically add
859 The function can be used to validate configs, automatically add
860 options (including extensions) based on requirements, etc.
860 options (including extensions) based on requirements, etc.
861 """
861 """
862
862
863 # Map of requirements to list of extensions to load automatically when
863 # Map of requirements to list of extensions to load automatically when
864 # requirement is present.
864 # requirement is present.
865 autoextensions = {
865 autoextensions = {
866 b'git': [b'git'],
866 b'git': [b'git'],
867 b'largefiles': [b'largefiles'],
867 b'largefiles': [b'largefiles'],
868 b'lfs': [b'lfs'],
868 b'lfs': [b'lfs'],
869 }
869 }
870
870
871 for requirement, names in sorted(autoextensions.items()):
871 for requirement, names in sorted(autoextensions.items()):
872 if requirement not in requirements:
872 if requirement not in requirements:
873 continue
873 continue
874
874
875 for name in names:
875 for name in names:
876 if not ui.hasconfig(b'extensions', name):
876 if not ui.hasconfig(b'extensions', name):
877 ui.setconfig(b'extensions', name, b'', source=b'autoload')
877 ui.setconfig(b'extensions', name, b'', source=b'autoload')
878
878
879
879
880 def gathersupportedrequirements(ui):
880 def gathersupportedrequirements(ui):
881 """Determine the complete set of recognized requirements."""
881 """Determine the complete set of recognized requirements."""
882 # Start with all requirements supported by this file.
882 # Start with all requirements supported by this file.
883 supported = set(localrepository._basesupported)
883 supported = set(localrepository._basesupported)
884
884
885 # Execute ``featuresetupfuncs`` entries if they belong to an extension
885 # Execute ``featuresetupfuncs`` entries if they belong to an extension
886 # relevant to this ui instance.
886 # relevant to this ui instance.
887 modules = {m.__name__ for n, m in extensions.extensions(ui)}
887 modules = {m.__name__ for n, m in extensions.extensions(ui)}
888
888
889 for fn in featuresetupfuncs:
889 for fn in featuresetupfuncs:
890 if fn.__module__ in modules:
890 if fn.__module__ in modules:
891 fn(ui, supported)
891 fn(ui, supported)
892
892
893 # Add derived requirements from registered compression engines.
893 # Add derived requirements from registered compression engines.
894 for name in util.compengines:
894 for name in util.compengines:
895 engine = util.compengines[name]
895 engine = util.compengines[name]
896 if engine.available() and engine.revlogheader():
896 if engine.available() and engine.revlogheader():
897 supported.add(b'exp-compression-%s' % name)
897 supported.add(b'exp-compression-%s' % name)
898 if engine.name() == b'zstd':
898 if engine.name() == b'zstd':
899 supported.add(b'revlog-compression-zstd')
899 supported.add(b'revlog-compression-zstd')
900
900
901 return supported
901 return supported
902
902
903
903
904 def ensurerequirementsrecognized(requirements, supported):
904 def ensurerequirementsrecognized(requirements, supported):
905 """Validate that a set of local requirements is recognized.
905 """Validate that a set of local requirements is recognized.
906
906
907 Receives a set of requirements. Raises an ``error.RepoError`` if there
907 Receives a set of requirements. Raises an ``error.RepoError`` if there
908 exists any requirement in that set that currently loaded code doesn't
908 exists any requirement in that set that currently loaded code doesn't
909 recognize.
909 recognize.
910
910
911 Returns a set of supported requirements.
911 Returns a set of supported requirements.
912 """
912 """
913 missing = set()
913 missing = set()
914
914
915 for requirement in requirements:
915 for requirement in requirements:
916 if requirement in supported:
916 if requirement in supported:
917 continue
917 continue
918
918
919 if not requirement or not requirement[0:1].isalnum():
919 if not requirement or not requirement[0:1].isalnum():
920 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
920 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
921
921
922 missing.add(requirement)
922 missing.add(requirement)
923
923
924 if missing:
924 if missing:
925 raise error.RequirementError(
925 raise error.RequirementError(
926 _(b'repository requires features unknown to this Mercurial: %s')
926 _(b'repository requires features unknown to this Mercurial: %s')
927 % b' '.join(sorted(missing)),
927 % b' '.join(sorted(missing)),
928 hint=_(
928 hint=_(
929 b'see https://mercurial-scm.org/wiki/MissingRequirement '
929 b'see https://mercurial-scm.org/wiki/MissingRequirement '
930 b'for more information'
930 b'for more information'
931 ),
931 ),
932 )
932 )
933
933
934
934
935 def ensurerequirementscompatible(ui, requirements):
935 def ensurerequirementscompatible(ui, requirements):
936 """Validates that a set of recognized requirements is mutually compatible.
936 """Validates that a set of recognized requirements is mutually compatible.
937
937
938 Some requirements may not be compatible with others or require
938 Some requirements may not be compatible with others or require
939 config options that aren't enabled. This function is called during
939 config options that aren't enabled. This function is called during
940 repository opening to ensure that the set of requirements needed
940 repository opening to ensure that the set of requirements needed
941 to open a repository is sane and compatible with config options.
941 to open a repository is sane and compatible with config options.
942
942
943 Extensions can monkeypatch this function to perform additional
943 Extensions can monkeypatch this function to perform additional
944 checking.
944 checking.
945
945
946 ``error.RepoError`` should be raised on failure.
946 ``error.RepoError`` should be raised on failure.
947 """
947 """
948 if (
948 if (
949 requirementsmod.SPARSE_REQUIREMENT in requirements
949 requirementsmod.SPARSE_REQUIREMENT in requirements
950 and not sparse.enabled
950 and not sparse.enabled
951 ):
951 ):
952 raise error.RepoError(
952 raise error.RepoError(
953 _(
953 _(
954 b'repository is using sparse feature but '
954 b'repository is using sparse feature but '
955 b'sparse is not enabled; enable the '
955 b'sparse is not enabled; enable the '
956 b'"sparse" extensions to access'
956 b'"sparse" extensions to access'
957 )
957 )
958 )
958 )
959
959
960
960
961 def makestore(requirements, path, vfstype):
961 def makestore(requirements, path, vfstype):
962 """Construct a storage object for a repository."""
962 """Construct a storage object for a repository."""
963 if requirementsmod.STORE_REQUIREMENT in requirements:
963 if requirementsmod.STORE_REQUIREMENT in requirements:
964 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
964 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
965 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
965 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
966 return storemod.fncachestore(path, vfstype, dotencode)
966 return storemod.fncachestore(path, vfstype, dotencode)
967
967
968 return storemod.encodedstore(path, vfstype)
968 return storemod.encodedstore(path, vfstype)
969
969
970 return storemod.basicstore(path, vfstype)
970 return storemod.basicstore(path, vfstype)
971
971
972
972
973 def resolvestorevfsoptions(ui, requirements, features):
973 def resolvestorevfsoptions(ui, requirements, features):
974 """Resolve the options to pass to the store vfs opener.
974 """Resolve the options to pass to the store vfs opener.
975
975
976 The returned dict is used to influence behavior of the storage layer.
976 The returned dict is used to influence behavior of the storage layer.
977 """
977 """
978 options = {}
978 options = {}
979
979
980 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
980 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
981 options[b'treemanifest'] = True
981 options[b'treemanifest'] = True
982
982
983 # experimental config: format.manifestcachesize
983 # experimental config: format.manifestcachesize
984 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
984 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
985 if manifestcachesize is not None:
985 if manifestcachesize is not None:
986 options[b'manifestcachesize'] = manifestcachesize
986 options[b'manifestcachesize'] = manifestcachesize
987
987
988 # In the absence of another requirement superseding a revlog-related
988 # In the absence of another requirement superseding a revlog-related
989 # requirement, we have to assume the repo is using revlog version 0.
989 # requirement, we have to assume the repo is using revlog version 0.
990 # This revlog format is super old and we don't bother trying to parse
990 # This revlog format is super old and we don't bother trying to parse
991 # opener options for it because those options wouldn't do anything
991 # opener options for it because those options wouldn't do anything
992 # meaningful on such old repos.
992 # meaningful on such old repos.
993 if (
993 if (
994 requirementsmod.REVLOGV1_REQUIREMENT in requirements
994 requirementsmod.REVLOGV1_REQUIREMENT in requirements
995 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
995 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
996 ):
996 ):
997 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
997 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
998 else: # explicitly mark repo as using revlogv0
998 else: # explicitly mark repo as using revlogv0
999 options[b'revlogv0'] = True
999 options[b'revlogv0'] = True
1000
1000
1001 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1001 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1002 options[b'copies-storage'] = b'changeset-sidedata'
1002 options[b'copies-storage'] = b'changeset-sidedata'
1003 else:
1003 else:
1004 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1004 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1005 copiesextramode = (b'changeset-only', b'compatibility')
1005 copiesextramode = (b'changeset-only', b'compatibility')
1006 if writecopiesto in copiesextramode:
1006 if writecopiesto in copiesextramode:
1007 options[b'copies-storage'] = b'extra'
1007 options[b'copies-storage'] = b'extra'
1008
1008
1009 return options
1009 return options
1010
1010
1011
1011
1012 def resolverevlogstorevfsoptions(ui, requirements, features):
1012 def resolverevlogstorevfsoptions(ui, requirements, features):
1013 """Resolve opener options specific to revlogs."""
1013 """Resolve opener options specific to revlogs."""
1014
1014
1015 options = {}
1015 options = {}
1016 options[b'flagprocessors'] = {}
1016 options[b'flagprocessors'] = {}
1017
1017
1018 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1018 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1019 options[b'revlogv1'] = True
1019 options[b'revlogv1'] = True
1020 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1020 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1021 options[b'revlogv2'] = True
1021 options[b'revlogv2'] = True
1022
1022
1023 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1023 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1024 options[b'generaldelta'] = True
1024 options[b'generaldelta'] = True
1025
1025
1026 # experimental config: format.chunkcachesize
1026 # experimental config: format.chunkcachesize
1027 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1027 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1028 if chunkcachesize is not None:
1028 if chunkcachesize is not None:
1029 options[b'chunkcachesize'] = chunkcachesize
1029 options[b'chunkcachesize'] = chunkcachesize
1030
1030
1031 deltabothparents = ui.configbool(
1031 deltabothparents = ui.configbool(
1032 b'storage', b'revlog.optimize-delta-parent-choice'
1032 b'storage', b'revlog.optimize-delta-parent-choice'
1033 )
1033 )
1034 options[b'deltabothparents'] = deltabothparents
1034 options[b'deltabothparents'] = deltabothparents
1035
1035
1036 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1036 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1037 lazydeltabase = False
1037 lazydeltabase = False
1038 if lazydelta:
1038 if lazydelta:
1039 lazydeltabase = ui.configbool(
1039 lazydeltabase = ui.configbool(
1040 b'storage', b'revlog.reuse-external-delta-parent'
1040 b'storage', b'revlog.reuse-external-delta-parent'
1041 )
1041 )
1042 if lazydeltabase is None:
1042 if lazydeltabase is None:
1043 lazydeltabase = not scmutil.gddeltaconfig(ui)
1043 lazydeltabase = not scmutil.gddeltaconfig(ui)
1044 options[b'lazydelta'] = lazydelta
1044 options[b'lazydelta'] = lazydelta
1045 options[b'lazydeltabase'] = lazydeltabase
1045 options[b'lazydeltabase'] = lazydeltabase
1046
1046
1047 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1047 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1048 if 0 <= chainspan:
1048 if 0 <= chainspan:
1049 options[b'maxdeltachainspan'] = chainspan
1049 options[b'maxdeltachainspan'] = chainspan
1050
1050
1051 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1051 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1052 if mmapindexthreshold is not None:
1052 if mmapindexthreshold is not None:
1053 options[b'mmapindexthreshold'] = mmapindexthreshold
1053 options[b'mmapindexthreshold'] = mmapindexthreshold
1054
1054
1055 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1055 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1056 srdensitythres = float(
1056 srdensitythres = float(
1057 ui.config(b'experimental', b'sparse-read.density-threshold')
1057 ui.config(b'experimental', b'sparse-read.density-threshold')
1058 )
1058 )
1059 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1059 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1060 options[b'with-sparse-read'] = withsparseread
1060 options[b'with-sparse-read'] = withsparseread
1061 options[b'sparse-read-density-threshold'] = srdensitythres
1061 options[b'sparse-read-density-threshold'] = srdensitythres
1062 options[b'sparse-read-min-gap-size'] = srmingapsize
1062 options[b'sparse-read-min-gap-size'] = srmingapsize
1063
1063
1064 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1064 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1065 options[b'sparse-revlog'] = sparserevlog
1065 options[b'sparse-revlog'] = sparserevlog
1066 if sparserevlog:
1066 if sparserevlog:
1067 options[b'generaldelta'] = True
1067 options[b'generaldelta'] = True
1068
1068
1069 maxchainlen = None
1069 maxchainlen = None
1070 if sparserevlog:
1070 if sparserevlog:
1071 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1071 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1072 # experimental config: format.maxchainlen
1072 # experimental config: format.maxchainlen
1073 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1073 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1074 if maxchainlen is not None:
1074 if maxchainlen is not None:
1075 options[b'maxchainlen'] = maxchainlen
1075 options[b'maxchainlen'] = maxchainlen
1076
1076
1077 for r in requirements:
1077 for r in requirements:
1078 # we allow multiple compression engine requirement to co-exist because
1078 # we allow multiple compression engine requirement to co-exist because
1079 # strickly speaking, revlog seems to support mixed compression style.
1079 # strickly speaking, revlog seems to support mixed compression style.
1080 #
1080 #
1081 # The compression used for new entries will be "the last one"
1081 # The compression used for new entries will be "the last one"
1082 prefix = r.startswith
1082 prefix = r.startswith
1083 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1083 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1084 options[b'compengine'] = r.split(b'-', 2)[2]
1084 options[b'compengine'] = r.split(b'-', 2)[2]
1085
1085
1086 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1086 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1087 if options[b'zlib.level'] is not None:
1087 if options[b'zlib.level'] is not None:
1088 if not (0 <= options[b'zlib.level'] <= 9):
1088 if not (0 <= options[b'zlib.level'] <= 9):
1089 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1089 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1090 raise error.Abort(msg % options[b'zlib.level'])
1090 raise error.Abort(msg % options[b'zlib.level'])
1091 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1091 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1092 if options[b'zstd.level'] is not None:
1092 if options[b'zstd.level'] is not None:
1093 if not (0 <= options[b'zstd.level'] <= 22):
1093 if not (0 <= options[b'zstd.level'] <= 22):
1094 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1094 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1095 raise error.Abort(msg % options[b'zstd.level'])
1095 raise error.Abort(msg % options[b'zstd.level'])
1096
1096
1097 if requirementsmod.NARROW_REQUIREMENT in requirements:
1097 if requirementsmod.NARROW_REQUIREMENT in requirements:
1098 options[b'enableellipsis'] = True
1098 options[b'enableellipsis'] = True
1099
1099
1100 if ui.configbool(b'experimental', b'rust.index'):
1100 if ui.configbool(b'experimental', b'rust.index'):
1101 options[b'rust.index'] = True
1101 options[b'rust.index'] = True
1102 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1102 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1103 slow_path = ui.config(
1103 slow_path = ui.config(
1104 b'storage', b'revlog.persistent-nodemap.slow-path'
1104 b'storage', b'revlog.persistent-nodemap.slow-path'
1105 )
1105 )
1106 if slow_path not in (b'allow', b'warn', b'abort'):
1106 if slow_path not in (b'allow', b'warn', b'abort'):
1107 default = ui.config_default(
1107 default = ui.config_default(
1108 b'storage', b'revlog.persistent-nodemap.slow-path'
1108 b'storage', b'revlog.persistent-nodemap.slow-path'
1109 )
1109 )
1110 msg = _(
1110 msg = _(
1111 b'unknown value for config '
1111 b'unknown value for config '
1112 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1112 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1113 )
1113 )
1114 ui.warn(msg % slow_path)
1114 ui.warn(msg % slow_path)
1115 if not ui.quiet:
1115 if not ui.quiet:
1116 ui.warn(_(b'falling back to default value: %s\n') % default)
1116 ui.warn(_(b'falling back to default value: %s\n') % default)
1117 slow_path = default
1117 slow_path = default
1118
1118
1119 msg = _(
1119 msg = _(
1120 b"accessing `persistent-nodemap` repository without associated "
1120 b"accessing `persistent-nodemap` repository without associated "
1121 b"fast implementation."
1121 b"fast implementation."
1122 )
1122 )
1123 hint = _(
1123 hint = _(
1124 b"check `hg help config.format.use-persistent-nodemap` "
1124 b"check `hg help config.format.use-persistent-nodemap` "
1125 b"for details"
1125 b"for details"
1126 )
1126 )
1127 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1127 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1128 if slow_path == b'warn':
1128 if slow_path == b'warn':
1129 msg = b"warning: " + msg + b'\n'
1129 msg = b"warning: " + msg + b'\n'
1130 ui.warn(msg)
1130 ui.warn(msg)
1131 if not ui.quiet:
1131 if not ui.quiet:
1132 hint = b'(' + hint + b')\n'
1132 hint = b'(' + hint + b')\n'
1133 ui.warn(hint)
1133 ui.warn(hint)
1134 if slow_path == b'abort':
1134 if slow_path == b'abort':
1135 raise error.Abort(msg, hint=hint)
1135 raise error.Abort(msg, hint=hint)
1136 options[b'persistent-nodemap'] = True
1136 options[b'persistent-nodemap'] = True
1137 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1137 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1138 options[b'persistent-nodemap.mmap'] = True
1138 options[b'persistent-nodemap.mmap'] = True
1139 if ui.configbool(b'devel', b'persistent-nodemap'):
1139 if ui.configbool(b'devel', b'persistent-nodemap'):
1140 options[b'devel-force-nodemap'] = True
1140 options[b'devel-force-nodemap'] = True
1141
1141
1142 return options
1142 return options
1143
1143
1144
1144
1145 def makemain(**kwargs):
1145 def makemain(**kwargs):
1146 """Produce a type conforming to ``ilocalrepositorymain``."""
1146 """Produce a type conforming to ``ilocalrepositorymain``."""
1147 return localrepository
1147 return localrepository
1148
1148
1149
1149
1150 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1150 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1151 class revlogfilestorage(object):
1151 class revlogfilestorage(object):
1152 """File storage when using revlogs."""
1152 """File storage when using revlogs."""
1153
1153
1154 def file(self, path):
1154 def file(self, path):
1155 if path.startswith(b'/'):
1155 if path.startswith(b'/'):
1156 path = path[1:]
1156 path = path[1:]
1157
1157
1158 return filelog.filelog(self.svfs, path)
1158 return filelog.filelog(self.svfs, path)
1159
1159
1160
1160
1161 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1161 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1162 class revlognarrowfilestorage(object):
1162 class revlognarrowfilestorage(object):
1163 """File storage when using revlogs and narrow files."""
1163 """File storage when using revlogs and narrow files."""
1164
1164
1165 def file(self, path):
1165 def file(self, path):
1166 if path.startswith(b'/'):
1166 if path.startswith(b'/'):
1167 path = path[1:]
1167 path = path[1:]
1168
1168
1169 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1169 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1170
1170
1171
1171
1172 def makefilestorage(requirements, features, **kwargs):
1172 def makefilestorage(requirements, features, **kwargs):
1173 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1173 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1174 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1174 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1175 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1175 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1176
1176
1177 if requirementsmod.NARROW_REQUIREMENT in requirements:
1177 if requirementsmod.NARROW_REQUIREMENT in requirements:
1178 return revlognarrowfilestorage
1178 return revlognarrowfilestorage
1179 else:
1179 else:
1180 return revlogfilestorage
1180 return revlogfilestorage
1181
1181
1182
1182
1183 # List of repository interfaces and factory functions for them. Each
1183 # List of repository interfaces and factory functions for them. Each
1184 # will be called in order during ``makelocalrepository()`` to iteratively
1184 # will be called in order during ``makelocalrepository()`` to iteratively
1185 # derive the final type for a local repository instance. We capture the
1185 # derive the final type for a local repository instance. We capture the
1186 # function as a lambda so we don't hold a reference and the module-level
1186 # function as a lambda so we don't hold a reference and the module-level
1187 # functions can be wrapped.
1187 # functions can be wrapped.
1188 REPO_INTERFACES = [
1188 REPO_INTERFACES = [
1189 (repository.ilocalrepositorymain, lambda: makemain),
1189 (repository.ilocalrepositorymain, lambda: makemain),
1190 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1190 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1191 ]
1191 ]
1192
1192
1193
1193
1194 @interfaceutil.implementer(repository.ilocalrepositorymain)
1194 @interfaceutil.implementer(repository.ilocalrepositorymain)
1195 class localrepository(object):
1195 class localrepository(object):
1196 """Main class for representing local repositories.
1196 """Main class for representing local repositories.
1197
1197
1198 All local repositories are instances of this class.
1198 All local repositories are instances of this class.
1199
1199
1200 Constructed on its own, instances of this class are not usable as
1200 Constructed on its own, instances of this class are not usable as
1201 repository objects. To obtain a usable repository object, call
1201 repository objects. To obtain a usable repository object, call
1202 ``hg.repository()``, ``localrepo.instance()``, or
1202 ``hg.repository()``, ``localrepo.instance()``, or
1203 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1203 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1204 ``instance()`` adds support for creating new repositories.
1204 ``instance()`` adds support for creating new repositories.
1205 ``hg.repository()`` adds more extension integration, including calling
1205 ``hg.repository()`` adds more extension integration, including calling
1206 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1206 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1207 used.
1207 used.
1208 """
1208 """
1209
1209
1210 # obsolete experimental requirements:
1210 # obsolete experimental requirements:
1211 # - manifestv2: An experimental new manifest format that allowed
1211 # - manifestv2: An experimental new manifest format that allowed
1212 # for stem compression of long paths. Experiment ended up not
1212 # for stem compression of long paths. Experiment ended up not
1213 # being successful (repository sizes went up due to worse delta
1213 # being successful (repository sizes went up due to worse delta
1214 # chains), and the code was deleted in 4.6.
1214 # chains), and the code was deleted in 4.6.
1215 supportedformats = {
1215 supportedformats = {
1216 requirementsmod.REVLOGV1_REQUIREMENT,
1216 requirementsmod.REVLOGV1_REQUIREMENT,
1217 requirementsmod.GENERALDELTA_REQUIREMENT,
1217 requirementsmod.GENERALDELTA_REQUIREMENT,
1218 requirementsmod.TREEMANIFEST_REQUIREMENT,
1218 requirementsmod.TREEMANIFEST_REQUIREMENT,
1219 requirementsmod.COPIESSDC_REQUIREMENT,
1219 requirementsmod.COPIESSDC_REQUIREMENT,
1220 requirementsmod.REVLOGV2_REQUIREMENT,
1220 requirementsmod.REVLOGV2_REQUIREMENT,
1221 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1221 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1222 requirementsmod.NODEMAP_REQUIREMENT,
1222 requirementsmod.NODEMAP_REQUIREMENT,
1223 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1223 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1224 requirementsmod.SHARESAFE_REQUIREMENT,
1224 requirementsmod.SHARESAFE_REQUIREMENT,
1225 }
1225 }
1226 _basesupported = supportedformats | {
1226 _basesupported = supportedformats | {
1227 requirementsmod.STORE_REQUIREMENT,
1227 requirementsmod.STORE_REQUIREMENT,
1228 requirementsmod.FNCACHE_REQUIREMENT,
1228 requirementsmod.FNCACHE_REQUIREMENT,
1229 requirementsmod.SHARED_REQUIREMENT,
1229 requirementsmod.SHARED_REQUIREMENT,
1230 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1230 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1231 requirementsmod.DOTENCODE_REQUIREMENT,
1231 requirementsmod.DOTENCODE_REQUIREMENT,
1232 requirementsmod.SPARSE_REQUIREMENT,
1232 requirementsmod.SPARSE_REQUIREMENT,
1233 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1233 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1234 }
1234 }
1235
1235
1236 # list of prefix for file which can be written without 'wlock'
1236 # list of prefix for file which can be written without 'wlock'
1237 # Extensions should extend this list when needed
1237 # Extensions should extend this list when needed
1238 _wlockfreeprefix = {
1238 _wlockfreeprefix = {
1239 # We migh consider requiring 'wlock' for the next
1239 # We migh consider requiring 'wlock' for the next
1240 # two, but pretty much all the existing code assume
1240 # two, but pretty much all the existing code assume
1241 # wlock is not needed so we keep them excluded for
1241 # wlock is not needed so we keep them excluded for
1242 # now.
1242 # now.
1243 b'hgrc',
1243 b'hgrc',
1244 b'requires',
1244 b'requires',
1245 # XXX cache is a complicatged business someone
1245 # XXX cache is a complicatged business someone
1246 # should investigate this in depth at some point
1246 # should investigate this in depth at some point
1247 b'cache/',
1247 b'cache/',
1248 # XXX shouldn't be dirstate covered by the wlock?
1248 # XXX shouldn't be dirstate covered by the wlock?
1249 b'dirstate',
1249 b'dirstate',
1250 # XXX bisect was still a bit too messy at the time
1250 # XXX bisect was still a bit too messy at the time
1251 # this changeset was introduced. Someone should fix
1251 # this changeset was introduced. Someone should fix
1252 # the remainig bit and drop this line
1252 # the remainig bit and drop this line
1253 b'bisect.state',
1253 b'bisect.state',
1254 }
1254 }
1255
1255
1256 def __init__(
1256 def __init__(
1257 self,
1257 self,
1258 baseui,
1258 baseui,
1259 ui,
1259 ui,
1260 origroot,
1260 origroot,
1261 wdirvfs,
1261 wdirvfs,
1262 hgvfs,
1262 hgvfs,
1263 requirements,
1263 requirements,
1264 supportedrequirements,
1264 supportedrequirements,
1265 sharedpath,
1265 sharedpath,
1266 store,
1266 store,
1267 cachevfs,
1267 cachevfs,
1268 wcachevfs,
1268 wcachevfs,
1269 features,
1269 features,
1270 intents=None,
1270 intents=None,
1271 ):
1271 ):
1272 """Create a new local repository instance.
1272 """Create a new local repository instance.
1273
1273
1274 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1274 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1275 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1275 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1276 object.
1276 object.
1277
1277
1278 Arguments:
1278 Arguments:
1279
1279
1280 baseui
1280 baseui
1281 ``ui.ui`` instance that ``ui`` argument was based off of.
1281 ``ui.ui`` instance that ``ui`` argument was based off of.
1282
1282
1283 ui
1283 ui
1284 ``ui.ui`` instance for use by the repository.
1284 ``ui.ui`` instance for use by the repository.
1285
1285
1286 origroot
1286 origroot
1287 ``bytes`` path to working directory root of this repository.
1287 ``bytes`` path to working directory root of this repository.
1288
1288
1289 wdirvfs
1289 wdirvfs
1290 ``vfs.vfs`` rooted at the working directory.
1290 ``vfs.vfs`` rooted at the working directory.
1291
1291
1292 hgvfs
1292 hgvfs
1293 ``vfs.vfs`` rooted at .hg/
1293 ``vfs.vfs`` rooted at .hg/
1294
1294
1295 requirements
1295 requirements
1296 ``set`` of bytestrings representing repository opening requirements.
1296 ``set`` of bytestrings representing repository opening requirements.
1297
1297
1298 supportedrequirements
1298 supportedrequirements
1299 ``set`` of bytestrings representing repository requirements that we
1299 ``set`` of bytestrings representing repository requirements that we
1300 know how to open. May be a supetset of ``requirements``.
1300 know how to open. May be a supetset of ``requirements``.
1301
1301
1302 sharedpath
1302 sharedpath
1303 ``bytes`` Defining path to storage base directory. Points to a
1303 ``bytes`` Defining path to storage base directory. Points to a
1304 ``.hg/`` directory somewhere.
1304 ``.hg/`` directory somewhere.
1305
1305
1306 store
1306 store
1307 ``store.basicstore`` (or derived) instance providing access to
1307 ``store.basicstore`` (or derived) instance providing access to
1308 versioned storage.
1308 versioned storage.
1309
1309
1310 cachevfs
1310 cachevfs
1311 ``vfs.vfs`` used for cache files.
1311 ``vfs.vfs`` used for cache files.
1312
1312
1313 wcachevfs
1313 wcachevfs
1314 ``vfs.vfs`` used for cache files related to the working copy.
1314 ``vfs.vfs`` used for cache files related to the working copy.
1315
1315
1316 features
1316 features
1317 ``set`` of bytestrings defining features/capabilities of this
1317 ``set`` of bytestrings defining features/capabilities of this
1318 instance.
1318 instance.
1319
1319
1320 intents
1320 intents
1321 ``set`` of system strings indicating what this repo will be used
1321 ``set`` of system strings indicating what this repo will be used
1322 for.
1322 for.
1323 """
1323 """
1324 self.baseui = baseui
1324 self.baseui = baseui
1325 self.ui = ui
1325 self.ui = ui
1326 self.origroot = origroot
1326 self.origroot = origroot
1327 # vfs rooted at working directory.
1327 # vfs rooted at working directory.
1328 self.wvfs = wdirvfs
1328 self.wvfs = wdirvfs
1329 self.root = wdirvfs.base
1329 self.root = wdirvfs.base
1330 # vfs rooted at .hg/. Used to access most non-store paths.
1330 # vfs rooted at .hg/. Used to access most non-store paths.
1331 self.vfs = hgvfs
1331 self.vfs = hgvfs
1332 self.path = hgvfs.base
1332 self.path = hgvfs.base
1333 self.requirements = requirements
1333 self.requirements = requirements
1334 self.nodeconstants = sha1nodeconstants
1334 self.nodeconstants = sha1nodeconstants
1335 self.nullid = self.nodeconstants.nullid
1335 self.nullid = self.nodeconstants.nullid
1336 self.supported = supportedrequirements
1336 self.supported = supportedrequirements
1337 self.sharedpath = sharedpath
1337 self.sharedpath = sharedpath
1338 self.store = store
1338 self.store = store
1339 self.cachevfs = cachevfs
1339 self.cachevfs = cachevfs
1340 self.wcachevfs = wcachevfs
1340 self.wcachevfs = wcachevfs
1341 self.features = features
1341 self.features = features
1342
1342
1343 self.filtername = None
1343 self.filtername = None
1344
1344
1345 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1345 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1346 b'devel', b'check-locks'
1346 b'devel', b'check-locks'
1347 ):
1347 ):
1348 self.vfs.audit = self._getvfsward(self.vfs.audit)
1348 self.vfs.audit = self._getvfsward(self.vfs.audit)
1349 # A list of callback to shape the phase if no data were found.
1349 # A list of callback to shape the phase if no data were found.
1350 # Callback are in the form: func(repo, roots) --> processed root.
1350 # Callback are in the form: func(repo, roots) --> processed root.
1351 # This list it to be filled by extension during repo setup
1351 # This list it to be filled by extension during repo setup
1352 self._phasedefaults = []
1352 self._phasedefaults = []
1353
1353
1354 color.setup(self.ui)
1354 color.setup(self.ui)
1355
1355
1356 self.spath = self.store.path
1356 self.spath = self.store.path
1357 self.svfs = self.store.vfs
1357 self.svfs = self.store.vfs
1358 self.sjoin = self.store.join
1358 self.sjoin = self.store.join
1359 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1359 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1360 b'devel', b'check-locks'
1360 b'devel', b'check-locks'
1361 ):
1361 ):
1362 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1362 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1363 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1363 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1364 else: # standard vfs
1364 else: # standard vfs
1365 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1365 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1366
1366
1367 self._dirstatevalidatewarned = False
1367 self._dirstatevalidatewarned = False
1368
1368
1369 self._branchcaches = branchmap.BranchMapCache()
1369 self._branchcaches = branchmap.BranchMapCache()
1370 self._revbranchcache = None
1370 self._revbranchcache = None
1371 self._filterpats = {}
1371 self._filterpats = {}
1372 self._datafilters = {}
1372 self._datafilters = {}
1373 self._transref = self._lockref = self._wlockref = None
1373 self._transref = self._lockref = self._wlockref = None
1374
1374
1375 # A cache for various files under .hg/ that tracks file changes,
1375 # A cache for various files under .hg/ that tracks file changes,
1376 # (used by the filecache decorator)
1376 # (used by the filecache decorator)
1377 #
1377 #
1378 # Maps a property name to its util.filecacheentry
1378 # Maps a property name to its util.filecacheentry
1379 self._filecache = {}
1379 self._filecache = {}
1380
1380
1381 # hold sets of revision to be filtered
1381 # hold sets of revision to be filtered
1382 # should be cleared when something might have changed the filter value:
1382 # should be cleared when something might have changed the filter value:
1383 # - new changesets,
1383 # - new changesets,
1384 # - phase change,
1384 # - phase change,
1385 # - new obsolescence marker,
1385 # - new obsolescence marker,
1386 # - working directory parent change,
1386 # - working directory parent change,
1387 # - bookmark changes
1387 # - bookmark changes
1388 self.filteredrevcache = {}
1388 self.filteredrevcache = {}
1389
1389
1390 # post-dirstate-status hooks
1390 # post-dirstate-status hooks
1391 self._postdsstatus = []
1391 self._postdsstatus = []
1392
1392
1393 # generic mapping between names and nodes
1393 # generic mapping between names and nodes
1394 self.names = namespaces.namespaces()
1394 self.names = namespaces.namespaces()
1395
1395
1396 # Key to signature value.
1396 # Key to signature value.
1397 self._sparsesignaturecache = {}
1397 self._sparsesignaturecache = {}
1398 # Signature to cached matcher instance.
1398 # Signature to cached matcher instance.
1399 self._sparsematchercache = {}
1399 self._sparsematchercache = {}
1400
1400
1401 self._extrafilterid = repoview.extrafilter(ui)
1401 self._extrafilterid = repoview.extrafilter(ui)
1402
1402
1403 self.filecopiesmode = None
1403 self.filecopiesmode = None
1404 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1404 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1405 self.filecopiesmode = b'changeset-sidedata'
1405 self.filecopiesmode = b'changeset-sidedata'
1406
1406
1407 self._wanted_sidedata = set()
1407 self._wanted_sidedata = set()
1408 self._sidedata_computers = {}
1408 self._sidedata_computers = {}
1409 sidedatamod.set_sidedata_spec_for_repo(self)
1409 sidedatamod.set_sidedata_spec_for_repo(self)
1410
1410
1411 def _getvfsward(self, origfunc):
1411 def _getvfsward(self, origfunc):
1412 """build a ward for self.vfs"""
1412 """build a ward for self.vfs"""
1413 rref = weakref.ref(self)
1413 rref = weakref.ref(self)
1414
1414
1415 def checkvfs(path, mode=None):
1415 def checkvfs(path, mode=None):
1416 ret = origfunc(path, mode=mode)
1416 ret = origfunc(path, mode=mode)
1417 repo = rref()
1417 repo = rref()
1418 if (
1418 if (
1419 repo is None
1419 repo is None
1420 or not util.safehasattr(repo, b'_wlockref')
1420 or not util.safehasattr(repo, b'_wlockref')
1421 or not util.safehasattr(repo, b'_lockref')
1421 or not util.safehasattr(repo, b'_lockref')
1422 ):
1422 ):
1423 return
1423 return
1424 if mode in (None, b'r', b'rb'):
1424 if mode in (None, b'r', b'rb'):
1425 return
1425 return
1426 if path.startswith(repo.path):
1426 if path.startswith(repo.path):
1427 # truncate name relative to the repository (.hg)
1427 # truncate name relative to the repository (.hg)
1428 path = path[len(repo.path) + 1 :]
1428 path = path[len(repo.path) + 1 :]
1429 if path.startswith(b'cache/'):
1429 if path.startswith(b'cache/'):
1430 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1430 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1431 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1431 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1432 # path prefixes covered by 'lock'
1432 # path prefixes covered by 'lock'
1433 vfs_path_prefixes = (
1433 vfs_path_prefixes = (
1434 b'journal.',
1434 b'journal.',
1435 b'undo.',
1435 b'undo.',
1436 b'strip-backup/',
1436 b'strip-backup/',
1437 b'cache/',
1437 b'cache/',
1438 )
1438 )
1439 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1439 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1440 if repo._currentlock(repo._lockref) is None:
1440 if repo._currentlock(repo._lockref) is None:
1441 repo.ui.develwarn(
1441 repo.ui.develwarn(
1442 b'write with no lock: "%s"' % path,
1442 b'write with no lock: "%s"' % path,
1443 stacklevel=3,
1443 stacklevel=3,
1444 config=b'check-locks',
1444 config=b'check-locks',
1445 )
1445 )
1446 elif repo._currentlock(repo._wlockref) is None:
1446 elif repo._currentlock(repo._wlockref) is None:
1447 # rest of vfs files are covered by 'wlock'
1447 # rest of vfs files are covered by 'wlock'
1448 #
1448 #
1449 # exclude special files
1449 # exclude special files
1450 for prefix in self._wlockfreeprefix:
1450 for prefix in self._wlockfreeprefix:
1451 if path.startswith(prefix):
1451 if path.startswith(prefix):
1452 return
1452 return
1453 repo.ui.develwarn(
1453 repo.ui.develwarn(
1454 b'write with no wlock: "%s"' % path,
1454 b'write with no wlock: "%s"' % path,
1455 stacklevel=3,
1455 stacklevel=3,
1456 config=b'check-locks',
1456 config=b'check-locks',
1457 )
1457 )
1458 return ret
1458 return ret
1459
1459
1460 return checkvfs
1460 return checkvfs
1461
1461
1462 def _getsvfsward(self, origfunc):
1462 def _getsvfsward(self, origfunc):
1463 """build a ward for self.svfs"""
1463 """build a ward for self.svfs"""
1464 rref = weakref.ref(self)
1464 rref = weakref.ref(self)
1465
1465
1466 def checksvfs(path, mode=None):
1466 def checksvfs(path, mode=None):
1467 ret = origfunc(path, mode=mode)
1467 ret = origfunc(path, mode=mode)
1468 repo = rref()
1468 repo = rref()
1469 if repo is None or not util.safehasattr(repo, b'_lockref'):
1469 if repo is None or not util.safehasattr(repo, b'_lockref'):
1470 return
1470 return
1471 if mode in (None, b'r', b'rb'):
1471 if mode in (None, b'r', b'rb'):
1472 return
1472 return
1473 if path.startswith(repo.sharedpath):
1473 if path.startswith(repo.sharedpath):
1474 # truncate name relative to the repository (.hg)
1474 # truncate name relative to the repository (.hg)
1475 path = path[len(repo.sharedpath) + 1 :]
1475 path = path[len(repo.sharedpath) + 1 :]
1476 if repo._currentlock(repo._lockref) is None:
1476 if repo._currentlock(repo._lockref) is None:
1477 repo.ui.develwarn(
1477 repo.ui.develwarn(
1478 b'write with no lock: "%s"' % path, stacklevel=4
1478 b'write with no lock: "%s"' % path, stacklevel=4
1479 )
1479 )
1480 return ret
1480 return ret
1481
1481
1482 return checksvfs
1482 return checksvfs
1483
1483
1484 def close(self):
1484 def close(self):
1485 self._writecaches()
1485 self._writecaches()
1486
1486
1487 def _writecaches(self):
1487 def _writecaches(self):
1488 if self._revbranchcache:
1488 if self._revbranchcache:
1489 self._revbranchcache.write()
1489 self._revbranchcache.write()
1490
1490
1491 def _restrictcapabilities(self, caps):
1491 def _restrictcapabilities(self, caps):
1492 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1492 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1493 caps = set(caps)
1493 caps = set(caps)
1494 capsblob = bundle2.encodecaps(
1494 capsblob = bundle2.encodecaps(
1495 bundle2.getrepocaps(self, role=b'client')
1495 bundle2.getrepocaps(self, role=b'client')
1496 )
1496 )
1497 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1497 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1498 if self.ui.configbool(b'experimental', b'narrow'):
1498 if self.ui.configbool(b'experimental', b'narrow'):
1499 caps.add(wireprototypes.NARROWCAP)
1499 caps.add(wireprototypes.NARROWCAP)
1500 return caps
1500 return caps
1501
1501
1502 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1502 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1503 # self -> auditor -> self._checknested -> self
1503 # self -> auditor -> self._checknested -> self
1504
1504
1505 @property
1505 @property
1506 def auditor(self):
1506 def auditor(self):
1507 # This is only used by context.workingctx.match in order to
1507 # This is only used by context.workingctx.match in order to
1508 # detect files in subrepos.
1508 # detect files in subrepos.
1509 return pathutil.pathauditor(self.root, callback=self._checknested)
1509 return pathutil.pathauditor(self.root, callback=self._checknested)
1510
1510
1511 @property
1511 @property
1512 def nofsauditor(self):
1512 def nofsauditor(self):
1513 # This is only used by context.basectx.match in order to detect
1513 # This is only used by context.basectx.match in order to detect
1514 # files in subrepos.
1514 # files in subrepos.
1515 return pathutil.pathauditor(
1515 return pathutil.pathauditor(
1516 self.root, callback=self._checknested, realfs=False, cached=True
1516 self.root, callback=self._checknested, realfs=False, cached=True
1517 )
1517 )
1518
1518
1519 def _checknested(self, path):
1519 def _checknested(self, path):
1520 """Determine if path is a legal nested repository."""
1520 """Determine if path is a legal nested repository."""
1521 if not path.startswith(self.root):
1521 if not path.startswith(self.root):
1522 return False
1522 return False
1523 subpath = path[len(self.root) + 1 :]
1523 subpath = path[len(self.root) + 1 :]
1524 normsubpath = util.pconvert(subpath)
1524 normsubpath = util.pconvert(subpath)
1525
1525
1526 # XXX: Checking against the current working copy is wrong in
1526 # XXX: Checking against the current working copy is wrong in
1527 # the sense that it can reject things like
1527 # the sense that it can reject things like
1528 #
1528 #
1529 # $ hg cat -r 10 sub/x.txt
1529 # $ hg cat -r 10 sub/x.txt
1530 #
1530 #
1531 # if sub/ is no longer a subrepository in the working copy
1531 # if sub/ is no longer a subrepository in the working copy
1532 # parent revision.
1532 # parent revision.
1533 #
1533 #
1534 # However, it can of course also allow things that would have
1534 # However, it can of course also allow things that would have
1535 # been rejected before, such as the above cat command if sub/
1535 # been rejected before, such as the above cat command if sub/
1536 # is a subrepository now, but was a normal directory before.
1536 # is a subrepository now, but was a normal directory before.
1537 # The old path auditor would have rejected by mistake since it
1537 # The old path auditor would have rejected by mistake since it
1538 # panics when it sees sub/.hg/.
1538 # panics when it sees sub/.hg/.
1539 #
1539 #
1540 # All in all, checking against the working copy seems sensible
1540 # All in all, checking against the working copy seems sensible
1541 # since we want to prevent access to nested repositories on
1541 # since we want to prevent access to nested repositories on
1542 # the filesystem *now*.
1542 # the filesystem *now*.
1543 ctx = self[None]
1543 ctx = self[None]
1544 parts = util.splitpath(subpath)
1544 parts = util.splitpath(subpath)
1545 while parts:
1545 while parts:
1546 prefix = b'/'.join(parts)
1546 prefix = b'/'.join(parts)
1547 if prefix in ctx.substate:
1547 if prefix in ctx.substate:
1548 if prefix == normsubpath:
1548 if prefix == normsubpath:
1549 return True
1549 return True
1550 else:
1550 else:
1551 sub = ctx.sub(prefix)
1551 sub = ctx.sub(prefix)
1552 return sub.checknested(subpath[len(prefix) + 1 :])
1552 return sub.checknested(subpath[len(prefix) + 1 :])
1553 else:
1553 else:
1554 parts.pop()
1554 parts.pop()
1555 return False
1555 return False
1556
1556
1557 def peer(self):
1557 def peer(self):
1558 return localpeer(self) # not cached to avoid reference cycle
1558 return localpeer(self) # not cached to avoid reference cycle
1559
1559
1560 def unfiltered(self):
1560 def unfiltered(self):
1561 """Return unfiltered version of the repository
1561 """Return unfiltered version of the repository
1562
1562
1563 Intended to be overwritten by filtered repo."""
1563 Intended to be overwritten by filtered repo."""
1564 return self
1564 return self
1565
1565
1566 def filtered(self, name, visibilityexceptions=None):
1566 def filtered(self, name, visibilityexceptions=None):
1567 """Return a filtered version of a repository
1567 """Return a filtered version of a repository
1568
1568
1569 The `name` parameter is the identifier of the requested view. This
1569 The `name` parameter is the identifier of the requested view. This
1570 will return a repoview object set "exactly" to the specified view.
1570 will return a repoview object set "exactly" to the specified view.
1571
1571
1572 This function does not apply recursive filtering to a repository. For
1572 This function does not apply recursive filtering to a repository. For
1573 example calling `repo.filtered("served")` will return a repoview using
1573 example calling `repo.filtered("served")` will return a repoview using
1574 the "served" view, regardless of the initial view used by `repo`.
1574 the "served" view, regardless of the initial view used by `repo`.
1575
1575
1576 In other word, there is always only one level of `repoview` "filtering".
1576 In other word, there is always only one level of `repoview` "filtering".
1577 """
1577 """
1578 if self._extrafilterid is not None and b'%' not in name:
1578 if self._extrafilterid is not None and b'%' not in name:
1579 name = name + b'%' + self._extrafilterid
1579 name = name + b'%' + self._extrafilterid
1580
1580
1581 cls = repoview.newtype(self.unfiltered().__class__)
1581 cls = repoview.newtype(self.unfiltered().__class__)
1582 return cls(self, name, visibilityexceptions)
1582 return cls(self, name, visibilityexceptions)
1583
1583
1584 @mixedrepostorecache(
1584 @mixedrepostorecache(
1585 (b'bookmarks', b'plain'),
1585 (b'bookmarks', b'plain'),
1586 (b'bookmarks.current', b'plain'),
1586 (b'bookmarks.current', b'plain'),
1587 (b'bookmarks', b''),
1587 (b'bookmarks', b''),
1588 (b'00changelog.i', b''),
1588 (b'00changelog.i', b''),
1589 )
1589 )
1590 def _bookmarks(self):
1590 def _bookmarks(self):
1591 # Since the multiple files involved in the transaction cannot be
1591 # Since the multiple files involved in the transaction cannot be
1592 # written atomically (with current repository format), there is a race
1592 # written atomically (with current repository format), there is a race
1593 # condition here.
1593 # condition here.
1594 #
1594 #
1595 # 1) changelog content A is read
1595 # 1) changelog content A is read
1596 # 2) outside transaction update changelog to content B
1596 # 2) outside transaction update changelog to content B
1597 # 3) outside transaction update bookmark file referring to content B
1597 # 3) outside transaction update bookmark file referring to content B
1598 # 4) bookmarks file content is read and filtered against changelog-A
1598 # 4) bookmarks file content is read and filtered against changelog-A
1599 #
1599 #
1600 # When this happens, bookmarks against nodes missing from A are dropped.
1600 # When this happens, bookmarks against nodes missing from A are dropped.
1601 #
1601 #
1602 # Having this happening during read is not great, but it become worse
1602 # Having this happening during read is not great, but it become worse
1603 # when this happen during write because the bookmarks to the "unknown"
1603 # when this happen during write because the bookmarks to the "unknown"
1604 # nodes will be dropped for good. However, writes happen within locks.
1604 # nodes will be dropped for good. However, writes happen within locks.
1605 # This locking makes it possible to have a race free consistent read.
1605 # This locking makes it possible to have a race free consistent read.
1606 # For this purpose data read from disc before locking are
1606 # For this purpose data read from disc before locking are
1607 # "invalidated" right after the locks are taken. This invalidations are
1607 # "invalidated" right after the locks are taken. This invalidations are
1608 # "light", the `filecache` mechanism keep the data in memory and will
1608 # "light", the `filecache` mechanism keep the data in memory and will
1609 # reuse them if the underlying files did not changed. Not parsing the
1609 # reuse them if the underlying files did not changed. Not parsing the
1610 # same data multiple times helps performances.
1610 # same data multiple times helps performances.
1611 #
1611 #
1612 # Unfortunately in the case describe above, the files tracked by the
1612 # Unfortunately in the case describe above, the files tracked by the
1613 # bookmarks file cache might not have changed, but the in-memory
1613 # bookmarks file cache might not have changed, but the in-memory
1614 # content is still "wrong" because we used an older changelog content
1614 # content is still "wrong" because we used an older changelog content
1615 # to process the on-disk data. So after locking, the changelog would be
1615 # to process the on-disk data. So after locking, the changelog would be
1616 # refreshed but `_bookmarks` would be preserved.
1616 # refreshed but `_bookmarks` would be preserved.
1617 # Adding `00changelog.i` to the list of tracked file is not
1617 # Adding `00changelog.i` to the list of tracked file is not
1618 # enough, because at the time we build the content for `_bookmarks` in
1618 # enough, because at the time we build the content for `_bookmarks` in
1619 # (4), the changelog file has already diverged from the content used
1619 # (4), the changelog file has already diverged from the content used
1620 # for loading `changelog` in (1)
1620 # for loading `changelog` in (1)
1621 #
1621 #
1622 # To prevent the issue, we force the changelog to be explicitly
1622 # To prevent the issue, we force the changelog to be explicitly
1623 # reloaded while computing `_bookmarks`. The data race can still happen
1623 # reloaded while computing `_bookmarks`. The data race can still happen
1624 # without the lock (with a narrower window), but it would no longer go
1624 # without the lock (with a narrower window), but it would no longer go
1625 # undetected during the lock time refresh.
1625 # undetected during the lock time refresh.
1626 #
1626 #
1627 # The new schedule is as follow
1627 # The new schedule is as follow
1628 #
1628 #
1629 # 1) filecache logic detect that `_bookmarks` needs to be computed
1629 # 1) filecache logic detect that `_bookmarks` needs to be computed
1630 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1630 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1631 # 3) We force `changelog` filecache to be tested
1631 # 3) We force `changelog` filecache to be tested
1632 # 4) cachestat for `changelog` are captured (for changelog)
1632 # 4) cachestat for `changelog` are captured (for changelog)
1633 # 5) `_bookmarks` is computed and cached
1633 # 5) `_bookmarks` is computed and cached
1634 #
1634 #
1635 # The step in (3) ensure we have a changelog at least as recent as the
1635 # The step in (3) ensure we have a changelog at least as recent as the
1636 # cache stat computed in (1). As a result at locking time:
1636 # cache stat computed in (1). As a result at locking time:
1637 # * if the changelog did not changed since (1) -> we can reuse the data
1637 # * if the changelog did not changed since (1) -> we can reuse the data
1638 # * otherwise -> the bookmarks get refreshed.
1638 # * otherwise -> the bookmarks get refreshed.
1639 self._refreshchangelog()
1639 self._refreshchangelog()
1640 return bookmarks.bmstore(self)
1640 return bookmarks.bmstore(self)
1641
1641
1642 def _refreshchangelog(self):
1642 def _refreshchangelog(self):
1643 """make sure the in memory changelog match the on-disk one"""
1643 """make sure the in memory changelog match the on-disk one"""
1644 if 'changelog' in vars(self) and self.currenttransaction() is None:
1644 if 'changelog' in vars(self) and self.currenttransaction() is None:
1645 del self.changelog
1645 del self.changelog
1646
1646
1647 @property
1647 @property
1648 def _activebookmark(self):
1648 def _activebookmark(self):
1649 return self._bookmarks.active
1649 return self._bookmarks.active
1650
1650
1651 # _phasesets depend on changelog. what we need is to call
1651 # _phasesets depend on changelog. what we need is to call
1652 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1652 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1653 # can't be easily expressed in filecache mechanism.
1653 # can't be easily expressed in filecache mechanism.
1654 @storecache(b'phaseroots', b'00changelog.i')
1654 @storecache(b'phaseroots', b'00changelog.i')
1655 def _phasecache(self):
1655 def _phasecache(self):
1656 return phases.phasecache(self, self._phasedefaults)
1656 return phases.phasecache(self, self._phasedefaults)
1657
1657
1658 @storecache(b'obsstore')
1658 @storecache(b'obsstore')
1659 def obsstore(self):
1659 def obsstore(self):
1660 return obsolete.makestore(self.ui, self)
1660 return obsolete.makestore(self.ui, self)
1661
1661
1662 @storecache(b'00changelog.i')
1662 @storecache(b'00changelog.i')
1663 def changelog(self):
1663 def changelog(self):
1664 # load dirstate before changelog to avoid race see issue6303
1664 # load dirstate before changelog to avoid race see issue6303
1665 self.dirstate.prefetch_parents()
1665 self.dirstate.prefetch_parents()
1666 return self.store.changelog(
1666 return self.store.changelog(
1667 txnutil.mayhavepending(self.root),
1667 txnutil.mayhavepending(self.root),
1668 concurrencychecker=revlogchecker.get_checker(self.ui, b'changelog'),
1668 concurrencychecker=revlogchecker.get_checker(self.ui, b'changelog'),
1669 )
1669 )
1670
1670
1671 @storecache(b'00manifest.i')
1671 @storecache(b'00manifest.i')
1672 def manifestlog(self):
1672 def manifestlog(self):
1673 return self.store.manifestlog(self, self._storenarrowmatch)
1673 return self.store.manifestlog(self, self._storenarrowmatch)
1674
1674
1675 @repofilecache(b'dirstate')
1675 @repofilecache(b'dirstate')
1676 def dirstate(self):
1676 def dirstate(self):
1677 return self._makedirstate()
1677 return self._makedirstate()
1678
1678
1679 def _makedirstate(self):
1679 def _makedirstate(self):
1680 """Extension point for wrapping the dirstate per-repo."""
1680 """Extension point for wrapping the dirstate per-repo."""
1681 sparsematchfn = lambda: sparse.matcher(self)
1681 sparsematchfn = lambda: sparse.matcher(self)
1682
1682
1683 return dirstate.dirstate(
1683 return dirstate.dirstate(
1684 self.vfs,
1684 self.vfs,
1685 self.ui,
1685 self.ui,
1686 self.root,
1686 self.root,
1687 self._dirstatevalidate,
1687 self._dirstatevalidate,
1688 sparsematchfn,
1688 sparsematchfn,
1689 self.nodeconstants,
1689 self.nodeconstants,
1690 )
1690 )
1691
1691
1692 def _dirstatevalidate(self, node):
1692 def _dirstatevalidate(self, node):
1693 try:
1693 try:
1694 self.changelog.rev(node)
1694 self.changelog.rev(node)
1695 return node
1695 return node
1696 except error.LookupError:
1696 except error.LookupError:
1697 if not self._dirstatevalidatewarned:
1697 if not self._dirstatevalidatewarned:
1698 self._dirstatevalidatewarned = True
1698 self._dirstatevalidatewarned = True
1699 self.ui.warn(
1699 self.ui.warn(
1700 _(b"warning: ignoring unknown working parent %s!\n")
1700 _(b"warning: ignoring unknown working parent %s!\n")
1701 % short(node)
1701 % short(node)
1702 )
1702 )
1703 return self.nullid
1703 return self.nullid
1704
1704
1705 @storecache(narrowspec.FILENAME)
1705 @storecache(narrowspec.FILENAME)
1706 def narrowpats(self):
1706 def narrowpats(self):
1707 """matcher patterns for this repository's narrowspec
1707 """matcher patterns for this repository's narrowspec
1708
1708
1709 A tuple of (includes, excludes).
1709 A tuple of (includes, excludes).
1710 """
1710 """
1711 return narrowspec.load(self)
1711 return narrowspec.load(self)
1712
1712
1713 @storecache(narrowspec.FILENAME)
1713 @storecache(narrowspec.FILENAME)
1714 def _storenarrowmatch(self):
1714 def _storenarrowmatch(self):
1715 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1715 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1716 return matchmod.always()
1716 return matchmod.always()
1717 include, exclude = self.narrowpats
1717 include, exclude = self.narrowpats
1718 return narrowspec.match(self.root, include=include, exclude=exclude)
1718 return narrowspec.match(self.root, include=include, exclude=exclude)
1719
1719
1720 @storecache(narrowspec.FILENAME)
1720 @storecache(narrowspec.FILENAME)
1721 def _narrowmatch(self):
1721 def _narrowmatch(self):
1722 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1722 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1723 return matchmod.always()
1723 return matchmod.always()
1724 narrowspec.checkworkingcopynarrowspec(self)
1724 narrowspec.checkworkingcopynarrowspec(self)
1725 include, exclude = self.narrowpats
1725 include, exclude = self.narrowpats
1726 return narrowspec.match(self.root, include=include, exclude=exclude)
1726 return narrowspec.match(self.root, include=include, exclude=exclude)
1727
1727
1728 def narrowmatch(self, match=None, includeexact=False):
1728 def narrowmatch(self, match=None, includeexact=False):
1729 """matcher corresponding the the repo's narrowspec
1729 """matcher corresponding the the repo's narrowspec
1730
1730
1731 If `match` is given, then that will be intersected with the narrow
1731 If `match` is given, then that will be intersected with the narrow
1732 matcher.
1732 matcher.
1733
1733
1734 If `includeexact` is True, then any exact matches from `match` will
1734 If `includeexact` is True, then any exact matches from `match` will
1735 be included even if they're outside the narrowspec.
1735 be included even if they're outside the narrowspec.
1736 """
1736 """
1737 if match:
1737 if match:
1738 if includeexact and not self._narrowmatch.always():
1738 if includeexact and not self._narrowmatch.always():
1739 # do not exclude explicitly-specified paths so that they can
1739 # do not exclude explicitly-specified paths so that they can
1740 # be warned later on
1740 # be warned later on
1741 em = matchmod.exact(match.files())
1741 em = matchmod.exact(match.files())
1742 nm = matchmod.unionmatcher([self._narrowmatch, em])
1742 nm = matchmod.unionmatcher([self._narrowmatch, em])
1743 return matchmod.intersectmatchers(match, nm)
1743 return matchmod.intersectmatchers(match, nm)
1744 return matchmod.intersectmatchers(match, self._narrowmatch)
1744 return matchmod.intersectmatchers(match, self._narrowmatch)
1745 return self._narrowmatch
1745 return self._narrowmatch
1746
1746
1747 def setnarrowpats(self, newincludes, newexcludes):
1747 def setnarrowpats(self, newincludes, newexcludes):
1748 narrowspec.save(self, newincludes, newexcludes)
1748 narrowspec.save(self, newincludes, newexcludes)
1749 self.invalidate(clearfilecache=True)
1749 self.invalidate(clearfilecache=True)
1750
1750
1751 @unfilteredpropertycache
1751 @unfilteredpropertycache
1752 def _quick_access_changeid_null(self):
1752 def _quick_access_changeid_null(self):
1753 return {
1753 return {
1754 b'null': (nullrev, self.nodeconstants.nullid),
1754 b'null': (nullrev, self.nodeconstants.nullid),
1755 nullrev: (nullrev, self.nodeconstants.nullid),
1755 nullrev: (nullrev, self.nodeconstants.nullid),
1756 self.nullid: (nullrev, self.nullid),
1756 self.nullid: (nullrev, self.nullid),
1757 }
1757 }
1758
1758
1759 @unfilteredpropertycache
1759 @unfilteredpropertycache
1760 def _quick_access_changeid_wc(self):
1760 def _quick_access_changeid_wc(self):
1761 # also fast path access to the working copy parents
1761 # also fast path access to the working copy parents
1762 # however, only do it for filter that ensure wc is visible.
1762 # however, only do it for filter that ensure wc is visible.
1763 quick = self._quick_access_changeid_null.copy()
1763 quick = self._quick_access_changeid_null.copy()
1764 cl = self.unfiltered().changelog
1764 cl = self.unfiltered().changelog
1765 for node in self.dirstate.parents():
1765 for node in self.dirstate.parents():
1766 if node == self.nullid:
1766 if node == self.nullid:
1767 continue
1767 continue
1768 rev = cl.index.get_rev(node)
1768 rev = cl.index.get_rev(node)
1769 if rev is None:
1769 if rev is None:
1770 # unknown working copy parent case:
1770 # unknown working copy parent case:
1771 #
1771 #
1772 # skip the fast path and let higher code deal with it
1772 # skip the fast path and let higher code deal with it
1773 continue
1773 continue
1774 pair = (rev, node)
1774 pair = (rev, node)
1775 quick[rev] = pair
1775 quick[rev] = pair
1776 quick[node] = pair
1776 quick[node] = pair
1777 # also add the parents of the parents
1777 # also add the parents of the parents
1778 for r in cl.parentrevs(rev):
1778 for r in cl.parentrevs(rev):
1779 if r == nullrev:
1779 if r == nullrev:
1780 continue
1780 continue
1781 n = cl.node(r)
1781 n = cl.node(r)
1782 pair = (r, n)
1782 pair = (r, n)
1783 quick[r] = pair
1783 quick[r] = pair
1784 quick[n] = pair
1784 quick[n] = pair
1785 p1node = self.dirstate.p1()
1785 p1node = self.dirstate.p1()
1786 if p1node != self.nullid:
1786 if p1node != self.nullid:
1787 quick[b'.'] = quick[p1node]
1787 quick[b'.'] = quick[p1node]
1788 return quick
1788 return quick
1789
1789
1790 @unfilteredmethod
1790 @unfilteredmethod
1791 def _quick_access_changeid_invalidate(self):
1791 def _quick_access_changeid_invalidate(self):
1792 if '_quick_access_changeid_wc' in vars(self):
1792 if '_quick_access_changeid_wc' in vars(self):
1793 del self.__dict__['_quick_access_changeid_wc']
1793 del self.__dict__['_quick_access_changeid_wc']
1794
1794
1795 @property
1795 @property
1796 def _quick_access_changeid(self):
1796 def _quick_access_changeid(self):
1797 """an helper dictionnary for __getitem__ calls
1797 """an helper dictionnary for __getitem__ calls
1798
1798
1799 This contains a list of symbol we can recognise right away without
1799 This contains a list of symbol we can recognise right away without
1800 further processing.
1800 further processing.
1801 """
1801 """
1802 if self.filtername in repoview.filter_has_wc:
1802 if self.filtername in repoview.filter_has_wc:
1803 return self._quick_access_changeid_wc
1803 return self._quick_access_changeid_wc
1804 return self._quick_access_changeid_null
1804 return self._quick_access_changeid_null
1805
1805
1806 def __getitem__(self, changeid):
1806 def __getitem__(self, changeid):
1807 # dealing with special cases
1807 # dealing with special cases
1808 if changeid is None:
1808 if changeid is None:
1809 return context.workingctx(self)
1809 return context.workingctx(self)
1810 if isinstance(changeid, context.basectx):
1810 if isinstance(changeid, context.basectx):
1811 return changeid
1811 return changeid
1812
1812
1813 # dealing with multiple revisions
1813 # dealing with multiple revisions
1814 if isinstance(changeid, slice):
1814 if isinstance(changeid, slice):
1815 # wdirrev isn't contiguous so the slice shouldn't include it
1815 # wdirrev isn't contiguous so the slice shouldn't include it
1816 return [
1816 return [
1817 self[i]
1817 self[i]
1818 for i in pycompat.xrange(*changeid.indices(len(self)))
1818 for i in pycompat.xrange(*changeid.indices(len(self)))
1819 if i not in self.changelog.filteredrevs
1819 if i not in self.changelog.filteredrevs
1820 ]
1820 ]
1821
1821
1822 # dealing with some special values
1822 # dealing with some special values
1823 quick_access = self._quick_access_changeid.get(changeid)
1823 quick_access = self._quick_access_changeid.get(changeid)
1824 if quick_access is not None:
1824 if quick_access is not None:
1825 rev, node = quick_access
1825 rev, node = quick_access
1826 return context.changectx(self, rev, node, maybe_filtered=False)
1826 return context.changectx(self, rev, node, maybe_filtered=False)
1827 if changeid == b'tip':
1827 if changeid == b'tip':
1828 node = self.changelog.tip()
1828 node = self.changelog.tip()
1829 rev = self.changelog.rev(node)
1829 rev = self.changelog.rev(node)
1830 return context.changectx(self, rev, node)
1830 return context.changectx(self, rev, node)
1831
1831
1832 # dealing with arbitrary values
1832 # dealing with arbitrary values
1833 try:
1833 try:
1834 if isinstance(changeid, int):
1834 if isinstance(changeid, int):
1835 node = self.changelog.node(changeid)
1835 node = self.changelog.node(changeid)
1836 rev = changeid
1836 rev = changeid
1837 elif changeid == b'.':
1837 elif changeid == b'.':
1838 # this is a hack to delay/avoid loading obsmarkers
1838 # this is a hack to delay/avoid loading obsmarkers
1839 # when we know that '.' won't be hidden
1839 # when we know that '.' won't be hidden
1840 node = self.dirstate.p1()
1840 node = self.dirstate.p1()
1841 rev = self.unfiltered().changelog.rev(node)
1841 rev = self.unfiltered().changelog.rev(node)
1842 elif len(changeid) == self.nodeconstants.nodelen:
1842 elif len(changeid) == self.nodeconstants.nodelen:
1843 try:
1843 try:
1844 node = changeid
1844 node = changeid
1845 rev = self.changelog.rev(changeid)
1845 rev = self.changelog.rev(changeid)
1846 except error.FilteredLookupError:
1846 except error.FilteredLookupError:
1847 changeid = hex(changeid) # for the error message
1847 changeid = hex(changeid) # for the error message
1848 raise
1848 raise
1849 except LookupError:
1849 except LookupError:
1850 # check if it might have come from damaged dirstate
1850 # check if it might have come from damaged dirstate
1851 #
1851 #
1852 # XXX we could avoid the unfiltered if we had a recognizable
1852 # XXX we could avoid the unfiltered if we had a recognizable
1853 # exception for filtered changeset access
1853 # exception for filtered changeset access
1854 if (
1854 if (
1855 self.local()
1855 self.local()
1856 and changeid in self.unfiltered().dirstate.parents()
1856 and changeid in self.unfiltered().dirstate.parents()
1857 ):
1857 ):
1858 msg = _(b"working directory has unknown parent '%s'!")
1858 msg = _(b"working directory has unknown parent '%s'!")
1859 raise error.Abort(msg % short(changeid))
1859 raise error.Abort(msg % short(changeid))
1860 changeid = hex(changeid) # for the error message
1860 changeid = hex(changeid) # for the error message
1861 raise
1861 raise
1862
1862
1863 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1863 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1864 node = bin(changeid)
1864 node = bin(changeid)
1865 rev = self.changelog.rev(node)
1865 rev = self.changelog.rev(node)
1866 else:
1866 else:
1867 raise error.ProgrammingError(
1867 raise error.ProgrammingError(
1868 b"unsupported changeid '%s' of type %s"
1868 b"unsupported changeid '%s' of type %s"
1869 % (changeid, pycompat.bytestr(type(changeid)))
1869 % (changeid, pycompat.bytestr(type(changeid)))
1870 )
1870 )
1871
1871
1872 return context.changectx(self, rev, node)
1872 return context.changectx(self, rev, node)
1873
1873
1874 except (error.FilteredIndexError, error.FilteredLookupError):
1874 except (error.FilteredIndexError, error.FilteredLookupError):
1875 raise error.FilteredRepoLookupError(
1875 raise error.FilteredRepoLookupError(
1876 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1876 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1877 )
1877 )
1878 except (IndexError, LookupError):
1878 except (IndexError, LookupError):
1879 raise error.RepoLookupError(
1879 raise error.RepoLookupError(
1880 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1880 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1881 )
1881 )
1882 except error.WdirUnsupported:
1882 except error.WdirUnsupported:
1883 return context.workingctx(self)
1883 return context.workingctx(self)
1884
1884
1885 def __contains__(self, changeid):
1885 def __contains__(self, changeid):
1886 """True if the given changeid exists"""
1886 """True if the given changeid exists"""
1887 try:
1887 try:
1888 self[changeid]
1888 self[changeid]
1889 return True
1889 return True
1890 except error.RepoLookupError:
1890 except error.RepoLookupError:
1891 return False
1891 return False
1892
1892
1893 def __nonzero__(self):
1893 def __nonzero__(self):
1894 return True
1894 return True
1895
1895
1896 __bool__ = __nonzero__
1896 __bool__ = __nonzero__
1897
1897
1898 def __len__(self):
1898 def __len__(self):
1899 # no need to pay the cost of repoview.changelog
1899 # no need to pay the cost of repoview.changelog
1900 unfi = self.unfiltered()
1900 unfi = self.unfiltered()
1901 return len(unfi.changelog)
1901 return len(unfi.changelog)
1902
1902
1903 def __iter__(self):
1903 def __iter__(self):
1904 return iter(self.changelog)
1904 return iter(self.changelog)
1905
1905
1906 def revs(self, expr, *args):
1906 def revs(self, expr, *args):
1907 """Find revisions matching a revset.
1907 """Find revisions matching a revset.
1908
1908
1909 The revset is specified as a string ``expr`` that may contain
1909 The revset is specified as a string ``expr`` that may contain
1910 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1910 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1911
1911
1912 Revset aliases from the configuration are not expanded. To expand
1912 Revset aliases from the configuration are not expanded. To expand
1913 user aliases, consider calling ``scmutil.revrange()`` or
1913 user aliases, consider calling ``scmutil.revrange()`` or
1914 ``repo.anyrevs([expr], user=True)``.
1914 ``repo.anyrevs([expr], user=True)``.
1915
1915
1916 Returns a smartset.abstractsmartset, which is a list-like interface
1916 Returns a smartset.abstractsmartset, which is a list-like interface
1917 that contains integer revisions.
1917 that contains integer revisions.
1918 """
1918 """
1919 tree = revsetlang.spectree(expr, *args)
1919 tree = revsetlang.spectree(expr, *args)
1920 return revset.makematcher(tree)(self)
1920 return revset.makematcher(tree)(self)
1921
1921
1922 def set(self, expr, *args):
1922 def set(self, expr, *args):
1923 """Find revisions matching a revset and emit changectx instances.
1923 """Find revisions matching a revset and emit changectx instances.
1924
1924
1925 This is a convenience wrapper around ``revs()`` that iterates the
1925 This is a convenience wrapper around ``revs()`` that iterates the
1926 result and is a generator of changectx instances.
1926 result and is a generator of changectx instances.
1927
1927
1928 Revset aliases from the configuration are not expanded. To expand
1928 Revset aliases from the configuration are not expanded. To expand
1929 user aliases, consider calling ``scmutil.revrange()``.
1929 user aliases, consider calling ``scmutil.revrange()``.
1930 """
1930 """
1931 for r in self.revs(expr, *args):
1931 for r in self.revs(expr, *args):
1932 yield self[r]
1932 yield self[r]
1933
1933
1934 def anyrevs(self, specs, user=False, localalias=None):
1934 def anyrevs(self, specs, user=False, localalias=None):
1935 """Find revisions matching one of the given revsets.
1935 """Find revisions matching one of the given revsets.
1936
1936
1937 Revset aliases from the configuration are not expanded by default. To
1937 Revset aliases from the configuration are not expanded by default. To
1938 expand user aliases, specify ``user=True``. To provide some local
1938 expand user aliases, specify ``user=True``. To provide some local
1939 definitions overriding user aliases, set ``localalias`` to
1939 definitions overriding user aliases, set ``localalias`` to
1940 ``{name: definitionstring}``.
1940 ``{name: definitionstring}``.
1941 """
1941 """
1942 if specs == [b'null']:
1942 if specs == [b'null']:
1943 return revset.baseset([nullrev])
1943 return revset.baseset([nullrev])
1944 if specs == [b'.']:
1944 if specs == [b'.']:
1945 quick_data = self._quick_access_changeid.get(b'.')
1945 quick_data = self._quick_access_changeid.get(b'.')
1946 if quick_data is not None:
1946 if quick_data is not None:
1947 return revset.baseset([quick_data[0]])
1947 return revset.baseset([quick_data[0]])
1948 if user:
1948 if user:
1949 m = revset.matchany(
1949 m = revset.matchany(
1950 self.ui,
1950 self.ui,
1951 specs,
1951 specs,
1952 lookup=revset.lookupfn(self),
1952 lookup=revset.lookupfn(self),
1953 localalias=localalias,
1953 localalias=localalias,
1954 )
1954 )
1955 else:
1955 else:
1956 m = revset.matchany(None, specs, localalias=localalias)
1956 m = revset.matchany(None, specs, localalias=localalias)
1957 return m(self)
1957 return m(self)
1958
1958
1959 def url(self):
1959 def url(self):
1960 return b'file:' + self.root
1960 return b'file:' + self.root
1961
1961
1962 def hook(self, name, throw=False, **args):
1962 def hook(self, name, throw=False, **args):
1963 """Call a hook, passing this repo instance.
1963 """Call a hook, passing this repo instance.
1964
1964
1965 This a convenience method to aid invoking hooks. Extensions likely
1965 This a convenience method to aid invoking hooks. Extensions likely
1966 won't call this unless they have registered a custom hook or are
1966 won't call this unless they have registered a custom hook or are
1967 replacing code that is expected to call a hook.
1967 replacing code that is expected to call a hook.
1968 """
1968 """
1969 return hook.hook(self.ui, self, name, throw, **args)
1969 return hook.hook(self.ui, self, name, throw, **args)
1970
1970
1971 @filteredpropertycache
1971 @filteredpropertycache
1972 def _tagscache(self):
1972 def _tagscache(self):
1973 """Returns a tagscache object that contains various tags related
1973 """Returns a tagscache object that contains various tags related
1974 caches."""
1974 caches."""
1975
1975
1976 # This simplifies its cache management by having one decorated
1976 # This simplifies its cache management by having one decorated
1977 # function (this one) and the rest simply fetch things from it.
1977 # function (this one) and the rest simply fetch things from it.
1978 class tagscache(object):
1978 class tagscache(object):
1979 def __init__(self):
1979 def __init__(self):
1980 # These two define the set of tags for this repository. tags
1980 # These two define the set of tags for this repository. tags
1981 # maps tag name to node; tagtypes maps tag name to 'global' or
1981 # maps tag name to node; tagtypes maps tag name to 'global' or
1982 # 'local'. (Global tags are defined by .hgtags across all
1982 # 'local'. (Global tags are defined by .hgtags across all
1983 # heads, and local tags are defined in .hg/localtags.)
1983 # heads, and local tags are defined in .hg/localtags.)
1984 # They constitute the in-memory cache of tags.
1984 # They constitute the in-memory cache of tags.
1985 self.tags = self.tagtypes = None
1985 self.tags = self.tagtypes = None
1986
1986
1987 self.nodetagscache = self.tagslist = None
1987 self.nodetagscache = self.tagslist = None
1988
1988
1989 cache = tagscache()
1989 cache = tagscache()
1990 cache.tags, cache.tagtypes = self._findtags()
1990 cache.tags, cache.tagtypes = self._findtags()
1991
1991
1992 return cache
1992 return cache
1993
1993
1994 def tags(self):
1994 def tags(self):
1995 '''return a mapping of tag to node'''
1995 '''return a mapping of tag to node'''
1996 t = {}
1996 t = {}
1997 if self.changelog.filteredrevs:
1997 if self.changelog.filteredrevs:
1998 tags, tt = self._findtags()
1998 tags, tt = self._findtags()
1999 else:
1999 else:
2000 tags = self._tagscache.tags
2000 tags = self._tagscache.tags
2001 rev = self.changelog.rev
2001 rev = self.changelog.rev
2002 for k, v in pycompat.iteritems(tags):
2002 for k, v in pycompat.iteritems(tags):
2003 try:
2003 try:
2004 # ignore tags to unknown nodes
2004 # ignore tags to unknown nodes
2005 rev(v)
2005 rev(v)
2006 t[k] = v
2006 t[k] = v
2007 except (error.LookupError, ValueError):
2007 except (error.LookupError, ValueError):
2008 pass
2008 pass
2009 return t
2009 return t
2010
2010
2011 def _findtags(self):
2011 def _findtags(self):
2012 """Do the hard work of finding tags. Return a pair of dicts
2012 """Do the hard work of finding tags. Return a pair of dicts
2013 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2013 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2014 maps tag name to a string like \'global\' or \'local\'.
2014 maps tag name to a string like \'global\' or \'local\'.
2015 Subclasses or extensions are free to add their own tags, but
2015 Subclasses or extensions are free to add their own tags, but
2016 should be aware that the returned dicts will be retained for the
2016 should be aware that the returned dicts will be retained for the
2017 duration of the localrepo object."""
2017 duration of the localrepo object."""
2018
2018
2019 # XXX what tagtype should subclasses/extensions use? Currently
2019 # XXX what tagtype should subclasses/extensions use? Currently
2020 # mq and bookmarks add tags, but do not set the tagtype at all.
2020 # mq and bookmarks add tags, but do not set the tagtype at all.
2021 # Should each extension invent its own tag type? Should there
2021 # Should each extension invent its own tag type? Should there
2022 # be one tagtype for all such "virtual" tags? Or is the status
2022 # be one tagtype for all such "virtual" tags? Or is the status
2023 # quo fine?
2023 # quo fine?
2024
2024
2025 # map tag name to (node, hist)
2025 # map tag name to (node, hist)
2026 alltags = tagsmod.findglobaltags(self.ui, self)
2026 alltags = tagsmod.findglobaltags(self.ui, self)
2027 # map tag name to tag type
2027 # map tag name to tag type
2028 tagtypes = {tag: b'global' for tag in alltags}
2028 tagtypes = {tag: b'global' for tag in alltags}
2029
2029
2030 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2030 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2031
2031
2032 # Build the return dicts. Have to re-encode tag names because
2032 # Build the return dicts. Have to re-encode tag names because
2033 # the tags module always uses UTF-8 (in order not to lose info
2033 # the tags module always uses UTF-8 (in order not to lose info
2034 # writing to the cache), but the rest of Mercurial wants them in
2034 # writing to the cache), but the rest of Mercurial wants them in
2035 # local encoding.
2035 # local encoding.
2036 tags = {}
2036 tags = {}
2037 for (name, (node, hist)) in pycompat.iteritems(alltags):
2037 for (name, (node, hist)) in pycompat.iteritems(alltags):
2038 if node != self.nullid:
2038 if node != self.nullid:
2039 tags[encoding.tolocal(name)] = node
2039 tags[encoding.tolocal(name)] = node
2040 tags[b'tip'] = self.changelog.tip()
2040 tags[b'tip'] = self.changelog.tip()
2041 tagtypes = {
2041 tagtypes = {
2042 encoding.tolocal(name): value
2042 encoding.tolocal(name): value
2043 for (name, value) in pycompat.iteritems(tagtypes)
2043 for (name, value) in pycompat.iteritems(tagtypes)
2044 }
2044 }
2045 return (tags, tagtypes)
2045 return (tags, tagtypes)
2046
2046
2047 def tagtype(self, tagname):
2047 def tagtype(self, tagname):
2048 """
2048 """
2049 return the type of the given tag. result can be:
2049 return the type of the given tag. result can be:
2050
2050
2051 'local' : a local tag
2051 'local' : a local tag
2052 'global' : a global tag
2052 'global' : a global tag
2053 None : tag does not exist
2053 None : tag does not exist
2054 """
2054 """
2055
2055
2056 return self._tagscache.tagtypes.get(tagname)
2056 return self._tagscache.tagtypes.get(tagname)
2057
2057
2058 def tagslist(self):
2058 def tagslist(self):
2059 '''return a list of tags ordered by revision'''
2059 '''return a list of tags ordered by revision'''
2060 if not self._tagscache.tagslist:
2060 if not self._tagscache.tagslist:
2061 l = []
2061 l = []
2062 for t, n in pycompat.iteritems(self.tags()):
2062 for t, n in pycompat.iteritems(self.tags()):
2063 l.append((self.changelog.rev(n), t, n))
2063 l.append((self.changelog.rev(n), t, n))
2064 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2064 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2065
2065
2066 return self._tagscache.tagslist
2066 return self._tagscache.tagslist
2067
2067
2068 def nodetags(self, node):
2068 def nodetags(self, node):
2069 '''return the tags associated with a node'''
2069 '''return the tags associated with a node'''
2070 if not self._tagscache.nodetagscache:
2070 if not self._tagscache.nodetagscache:
2071 nodetagscache = {}
2071 nodetagscache = {}
2072 for t, n in pycompat.iteritems(self._tagscache.tags):
2072 for t, n in pycompat.iteritems(self._tagscache.tags):
2073 nodetagscache.setdefault(n, []).append(t)
2073 nodetagscache.setdefault(n, []).append(t)
2074 for tags in pycompat.itervalues(nodetagscache):
2074 for tags in pycompat.itervalues(nodetagscache):
2075 tags.sort()
2075 tags.sort()
2076 self._tagscache.nodetagscache = nodetagscache
2076 self._tagscache.nodetagscache = nodetagscache
2077 return self._tagscache.nodetagscache.get(node, [])
2077 return self._tagscache.nodetagscache.get(node, [])
2078
2078
2079 def nodebookmarks(self, node):
2079 def nodebookmarks(self, node):
2080 """return the list of bookmarks pointing to the specified node"""
2080 """return the list of bookmarks pointing to the specified node"""
2081 return self._bookmarks.names(node)
2081 return self._bookmarks.names(node)
2082
2082
2083 def branchmap(self):
2083 def branchmap(self):
2084 """returns a dictionary {branch: [branchheads]} with branchheads
2084 """returns a dictionary {branch: [branchheads]} with branchheads
2085 ordered by increasing revision number"""
2085 ordered by increasing revision number"""
2086 return self._branchcaches[self]
2086 return self._branchcaches[self]
2087
2087
2088 @unfilteredmethod
2088 @unfilteredmethod
2089 def revbranchcache(self):
2089 def revbranchcache(self):
2090 if not self._revbranchcache:
2090 if not self._revbranchcache:
2091 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2091 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2092 return self._revbranchcache
2092 return self._revbranchcache
2093
2093
2094 def register_changeset(self, rev, changelogrevision):
2094 def register_changeset(self, rev, changelogrevision):
2095 self.revbranchcache().setdata(rev, changelogrevision)
2095 self.revbranchcache().setdata(rev, changelogrevision)
2096
2096
2097 def branchtip(self, branch, ignoremissing=False):
2097 def branchtip(self, branch, ignoremissing=False):
2098 """return the tip node for a given branch
2098 """return the tip node for a given branch
2099
2099
2100 If ignoremissing is True, then this method will not raise an error.
2100 If ignoremissing is True, then this method will not raise an error.
2101 This is helpful for callers that only expect None for a missing branch
2101 This is helpful for callers that only expect None for a missing branch
2102 (e.g. namespace).
2102 (e.g. namespace).
2103
2103
2104 """
2104 """
2105 try:
2105 try:
2106 return self.branchmap().branchtip(branch)
2106 return self.branchmap().branchtip(branch)
2107 except KeyError:
2107 except KeyError:
2108 if not ignoremissing:
2108 if not ignoremissing:
2109 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2109 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2110 else:
2110 else:
2111 pass
2111 pass
2112
2112
2113 def lookup(self, key):
2113 def lookup(self, key):
2114 node = scmutil.revsymbol(self, key).node()
2114 node = scmutil.revsymbol(self, key).node()
2115 if node is None:
2115 if node is None:
2116 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2116 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2117 return node
2117 return node
2118
2118
2119 def lookupbranch(self, key):
2119 def lookupbranch(self, key):
2120 if self.branchmap().hasbranch(key):
2120 if self.branchmap().hasbranch(key):
2121 return key
2121 return key
2122
2122
2123 return scmutil.revsymbol(self, key).branch()
2123 return scmutil.revsymbol(self, key).branch()
2124
2124
2125 def known(self, nodes):
2125 def known(self, nodes):
2126 cl = self.changelog
2126 cl = self.changelog
2127 get_rev = cl.index.get_rev
2127 get_rev = cl.index.get_rev
2128 filtered = cl.filteredrevs
2128 filtered = cl.filteredrevs
2129 result = []
2129 result = []
2130 for n in nodes:
2130 for n in nodes:
2131 r = get_rev(n)
2131 r = get_rev(n)
2132 resp = not (r is None or r in filtered)
2132 resp = not (r is None or r in filtered)
2133 result.append(resp)
2133 result.append(resp)
2134 return result
2134 return result
2135
2135
2136 def local(self):
2136 def local(self):
2137 return self
2137 return self
2138
2138
2139 def publishing(self):
2139 def publishing(self):
2140 # it's safe (and desirable) to trust the publish flag unconditionally
2140 # it's safe (and desirable) to trust the publish flag unconditionally
2141 # so that we don't finalize changes shared between users via ssh or nfs
2141 # so that we don't finalize changes shared between users via ssh or nfs
2142 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2142 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2143
2143
2144 def cancopy(self):
2144 def cancopy(self):
2145 # so statichttprepo's override of local() works
2145 # so statichttprepo's override of local() works
2146 if not self.local():
2146 if not self.local():
2147 return False
2147 return False
2148 if not self.publishing():
2148 if not self.publishing():
2149 return True
2149 return True
2150 # if publishing we can't copy if there is filtered content
2150 # if publishing we can't copy if there is filtered content
2151 return not self.filtered(b'visible').changelog.filteredrevs
2151 return not self.filtered(b'visible').changelog.filteredrevs
2152
2152
2153 def shared(self):
2153 def shared(self):
2154 '''the type of shared repository (None if not shared)'''
2154 '''the type of shared repository (None if not shared)'''
2155 if self.sharedpath != self.path:
2155 if self.sharedpath != self.path:
2156 return b'store'
2156 return b'store'
2157 return None
2157 return None
2158
2158
2159 def wjoin(self, f, *insidef):
2159 def wjoin(self, f, *insidef):
2160 return self.vfs.reljoin(self.root, f, *insidef)
2160 return self.vfs.reljoin(self.root, f, *insidef)
2161
2161
2162 def setparents(self, p1, p2=None):
2162 def setparents(self, p1, p2=None):
2163 if p2 is None:
2163 if p2 is None:
2164 p2 = self.nullid
2164 p2 = self.nullid
2165 self[None].setparents(p1, p2)
2165 self[None].setparents(p1, p2)
2166 self._quick_access_changeid_invalidate()
2166 self._quick_access_changeid_invalidate()
2167
2167
2168 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2168 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2169 """changeid must be a changeset revision, if specified.
2169 """changeid must be a changeset revision, if specified.
2170 fileid can be a file revision or node."""
2170 fileid can be a file revision or node."""
2171 return context.filectx(
2171 return context.filectx(
2172 self, path, changeid, fileid, changectx=changectx
2172 self, path, changeid, fileid, changectx=changectx
2173 )
2173 )
2174
2174
2175 def getcwd(self):
2175 def getcwd(self):
2176 return self.dirstate.getcwd()
2176 return self.dirstate.getcwd()
2177
2177
2178 def pathto(self, f, cwd=None):
2178 def pathto(self, f, cwd=None):
2179 return self.dirstate.pathto(f, cwd)
2179 return self.dirstate.pathto(f, cwd)
2180
2180
2181 def _loadfilter(self, filter):
2181 def _loadfilter(self, filter):
2182 if filter not in self._filterpats:
2182 if filter not in self._filterpats:
2183 l = []
2183 l = []
2184 for pat, cmd in self.ui.configitems(filter):
2184 for pat, cmd in self.ui.configitems(filter):
2185 if cmd == b'!':
2185 if cmd == b'!':
2186 continue
2186 continue
2187 mf = matchmod.match(self.root, b'', [pat])
2187 mf = matchmod.match(self.root, b'', [pat])
2188 fn = None
2188 fn = None
2189 params = cmd
2189 params = cmd
2190 for name, filterfn in pycompat.iteritems(self._datafilters):
2190 for name, filterfn in pycompat.iteritems(self._datafilters):
2191 if cmd.startswith(name):
2191 if cmd.startswith(name):
2192 fn = filterfn
2192 fn = filterfn
2193 params = cmd[len(name) :].lstrip()
2193 params = cmd[len(name) :].lstrip()
2194 break
2194 break
2195 if not fn:
2195 if not fn:
2196 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2196 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2197 fn.__name__ = 'commandfilter'
2197 fn.__name__ = 'commandfilter'
2198 # Wrap old filters not supporting keyword arguments
2198 # Wrap old filters not supporting keyword arguments
2199 if not pycompat.getargspec(fn)[2]:
2199 if not pycompat.getargspec(fn)[2]:
2200 oldfn = fn
2200 oldfn = fn
2201 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2201 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2202 fn.__name__ = 'compat-' + oldfn.__name__
2202 fn.__name__ = 'compat-' + oldfn.__name__
2203 l.append((mf, fn, params))
2203 l.append((mf, fn, params))
2204 self._filterpats[filter] = l
2204 self._filterpats[filter] = l
2205 return self._filterpats[filter]
2205 return self._filterpats[filter]
2206
2206
2207 def _filter(self, filterpats, filename, data):
2207 def _filter(self, filterpats, filename, data):
2208 for mf, fn, cmd in filterpats:
2208 for mf, fn, cmd in filterpats:
2209 if mf(filename):
2209 if mf(filename):
2210 self.ui.debug(
2210 self.ui.debug(
2211 b"filtering %s through %s\n"
2211 b"filtering %s through %s\n"
2212 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2212 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2213 )
2213 )
2214 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2214 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2215 break
2215 break
2216
2216
2217 return data
2217 return data
2218
2218
2219 @unfilteredpropertycache
2219 @unfilteredpropertycache
2220 def _encodefilterpats(self):
2220 def _encodefilterpats(self):
2221 return self._loadfilter(b'encode')
2221 return self._loadfilter(b'encode')
2222
2222
2223 @unfilteredpropertycache
2223 @unfilteredpropertycache
2224 def _decodefilterpats(self):
2224 def _decodefilterpats(self):
2225 return self._loadfilter(b'decode')
2225 return self._loadfilter(b'decode')
2226
2226
2227 def adddatafilter(self, name, filter):
2227 def adddatafilter(self, name, filter):
2228 self._datafilters[name] = filter
2228 self._datafilters[name] = filter
2229
2229
2230 def wread(self, filename):
2230 def wread(self, filename):
2231 if self.wvfs.islink(filename):
2231 if self.wvfs.islink(filename):
2232 data = self.wvfs.readlink(filename)
2232 data = self.wvfs.readlink(filename)
2233 else:
2233 else:
2234 data = self.wvfs.read(filename)
2234 data = self.wvfs.read(filename)
2235 return self._filter(self._encodefilterpats, filename, data)
2235 return self._filter(self._encodefilterpats, filename, data)
2236
2236
2237 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2237 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2238 """write ``data`` into ``filename`` in the working directory
2238 """write ``data`` into ``filename`` in the working directory
2239
2239
2240 This returns length of written (maybe decoded) data.
2240 This returns length of written (maybe decoded) data.
2241 """
2241 """
2242 data = self._filter(self._decodefilterpats, filename, data)
2242 data = self._filter(self._decodefilterpats, filename, data)
2243 if b'l' in flags:
2243 if b'l' in flags:
2244 self.wvfs.symlink(data, filename)
2244 self.wvfs.symlink(data, filename)
2245 else:
2245 else:
2246 self.wvfs.write(
2246 self.wvfs.write(
2247 filename, data, backgroundclose=backgroundclose, **kwargs
2247 filename, data, backgroundclose=backgroundclose, **kwargs
2248 )
2248 )
2249 if b'x' in flags:
2249 if b'x' in flags:
2250 self.wvfs.setflags(filename, False, True)
2250 self.wvfs.setflags(filename, False, True)
2251 else:
2251 else:
2252 self.wvfs.setflags(filename, False, False)
2252 self.wvfs.setflags(filename, False, False)
2253 return len(data)
2253 return len(data)
2254
2254
2255 def wwritedata(self, filename, data):
2255 def wwritedata(self, filename, data):
2256 return self._filter(self._decodefilterpats, filename, data)
2256 return self._filter(self._decodefilterpats, filename, data)
2257
2257
2258 def currenttransaction(self):
2258 def currenttransaction(self):
2259 """return the current transaction or None if non exists"""
2259 """return the current transaction or None if non exists"""
2260 if self._transref:
2260 if self._transref:
2261 tr = self._transref()
2261 tr = self._transref()
2262 else:
2262 else:
2263 tr = None
2263 tr = None
2264
2264
2265 if tr and tr.running():
2265 if tr and tr.running():
2266 return tr
2266 return tr
2267 return None
2267 return None
2268
2268
2269 def transaction(self, desc, report=None):
2269 def transaction(self, desc, report=None):
2270 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2270 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2271 b'devel', b'check-locks'
2271 b'devel', b'check-locks'
2272 ):
2272 ):
2273 if self._currentlock(self._lockref) is None:
2273 if self._currentlock(self._lockref) is None:
2274 raise error.ProgrammingError(b'transaction requires locking')
2274 raise error.ProgrammingError(b'transaction requires locking')
2275 tr = self.currenttransaction()
2275 tr = self.currenttransaction()
2276 if tr is not None:
2276 if tr is not None:
2277 return tr.nest(name=desc)
2277 return tr.nest(name=desc)
2278
2278
2279 # abort here if the journal already exists
2279 # abort here if the journal already exists
2280 if self.svfs.exists(b"journal"):
2280 if self.svfs.exists(b"journal"):
2281 raise error.RepoError(
2281 raise error.RepoError(
2282 _(b"abandoned transaction found"),
2282 _(b"abandoned transaction found"),
2283 hint=_(b"run 'hg recover' to clean up transaction"),
2283 hint=_(b"run 'hg recover' to clean up transaction"),
2284 )
2284 )
2285
2285
2286 idbase = b"%.40f#%f" % (random.random(), time.time())
2286 idbase = b"%.40f#%f" % (random.random(), time.time())
2287 ha = hex(hashutil.sha1(idbase).digest())
2287 ha = hex(hashutil.sha1(idbase).digest())
2288 txnid = b'TXN:' + ha
2288 txnid = b'TXN:' + ha
2289 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2289 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2290
2290
2291 self._writejournal(desc)
2291 self._writejournal(desc)
2292 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2292 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2293 if report:
2293 if report:
2294 rp = report
2294 rp = report
2295 else:
2295 else:
2296 rp = self.ui.warn
2296 rp = self.ui.warn
2297 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2297 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2298 # we must avoid cyclic reference between repo and transaction.
2298 # we must avoid cyclic reference between repo and transaction.
2299 reporef = weakref.ref(self)
2299 reporef = weakref.ref(self)
2300 # Code to track tag movement
2300 # Code to track tag movement
2301 #
2301 #
2302 # Since tags are all handled as file content, it is actually quite hard
2302 # Since tags are all handled as file content, it is actually quite hard
2303 # to track these movement from a code perspective. So we fallback to a
2303 # to track these movement from a code perspective. So we fallback to a
2304 # tracking at the repository level. One could envision to track changes
2304 # tracking at the repository level. One could envision to track changes
2305 # to the '.hgtags' file through changegroup apply but that fails to
2305 # to the '.hgtags' file through changegroup apply but that fails to
2306 # cope with case where transaction expose new heads without changegroup
2306 # cope with case where transaction expose new heads without changegroup
2307 # being involved (eg: phase movement).
2307 # being involved (eg: phase movement).
2308 #
2308 #
2309 # For now, We gate the feature behind a flag since this likely comes
2309 # For now, We gate the feature behind a flag since this likely comes
2310 # with performance impacts. The current code run more often than needed
2310 # with performance impacts. The current code run more often than needed
2311 # and do not use caches as much as it could. The current focus is on
2311 # and do not use caches as much as it could. The current focus is on
2312 # the behavior of the feature so we disable it by default. The flag
2312 # the behavior of the feature so we disable it by default. The flag
2313 # will be removed when we are happy with the performance impact.
2313 # will be removed when we are happy with the performance impact.
2314 #
2314 #
2315 # Once this feature is no longer experimental move the following
2315 # Once this feature is no longer experimental move the following
2316 # documentation to the appropriate help section:
2316 # documentation to the appropriate help section:
2317 #
2317 #
2318 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2318 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2319 # tags (new or changed or deleted tags). In addition the details of
2319 # tags (new or changed or deleted tags). In addition the details of
2320 # these changes are made available in a file at:
2320 # these changes are made available in a file at:
2321 # ``REPOROOT/.hg/changes/tags.changes``.
2321 # ``REPOROOT/.hg/changes/tags.changes``.
2322 # Make sure you check for HG_TAG_MOVED before reading that file as it
2322 # Make sure you check for HG_TAG_MOVED before reading that file as it
2323 # might exist from a previous transaction even if no tag were touched
2323 # might exist from a previous transaction even if no tag were touched
2324 # in this one. Changes are recorded in a line base format::
2324 # in this one. Changes are recorded in a line base format::
2325 #
2325 #
2326 # <action> <hex-node> <tag-name>\n
2326 # <action> <hex-node> <tag-name>\n
2327 #
2327 #
2328 # Actions are defined as follow:
2328 # Actions are defined as follow:
2329 # "-R": tag is removed,
2329 # "-R": tag is removed,
2330 # "+A": tag is added,
2330 # "+A": tag is added,
2331 # "-M": tag is moved (old value),
2331 # "-M": tag is moved (old value),
2332 # "+M": tag is moved (new value),
2332 # "+M": tag is moved (new value),
2333 tracktags = lambda x: None
2333 tracktags = lambda x: None
2334 # experimental config: experimental.hook-track-tags
2334 # experimental config: experimental.hook-track-tags
2335 shouldtracktags = self.ui.configbool(
2335 shouldtracktags = self.ui.configbool(
2336 b'experimental', b'hook-track-tags'
2336 b'experimental', b'hook-track-tags'
2337 )
2337 )
2338 if desc != b'strip' and shouldtracktags:
2338 if desc != b'strip' and shouldtracktags:
2339 oldheads = self.changelog.headrevs()
2339 oldheads = self.changelog.headrevs()
2340
2340
2341 def tracktags(tr2):
2341 def tracktags(tr2):
2342 repo = reporef()
2342 repo = reporef()
2343 assert repo is not None # help pytype
2343 assert repo is not None # help pytype
2344 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2344 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2345 newheads = repo.changelog.headrevs()
2345 newheads = repo.changelog.headrevs()
2346 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2346 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2347 # notes: we compare lists here.
2347 # notes: we compare lists here.
2348 # As we do it only once buiding set would not be cheaper
2348 # As we do it only once buiding set would not be cheaper
2349 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2349 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2350 if changes:
2350 if changes:
2351 tr2.hookargs[b'tag_moved'] = b'1'
2351 tr2.hookargs[b'tag_moved'] = b'1'
2352 with repo.vfs(
2352 with repo.vfs(
2353 b'changes/tags.changes', b'w', atomictemp=True
2353 b'changes/tags.changes', b'w', atomictemp=True
2354 ) as changesfile:
2354 ) as changesfile:
2355 # note: we do not register the file to the transaction
2355 # note: we do not register the file to the transaction
2356 # because we needs it to still exist on the transaction
2356 # because we needs it to still exist on the transaction
2357 # is close (for txnclose hooks)
2357 # is close (for txnclose hooks)
2358 tagsmod.writediff(changesfile, changes)
2358 tagsmod.writediff(changesfile, changes)
2359
2359
2360 def validate(tr2):
2360 def validate(tr2):
2361 """will run pre-closing hooks"""
2361 """will run pre-closing hooks"""
2362 # XXX the transaction API is a bit lacking here so we take a hacky
2362 # XXX the transaction API is a bit lacking here so we take a hacky
2363 # path for now
2363 # path for now
2364 #
2364 #
2365 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2365 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2366 # dict is copied before these run. In addition we needs the data
2366 # dict is copied before these run. In addition we needs the data
2367 # available to in memory hooks too.
2367 # available to in memory hooks too.
2368 #
2368 #
2369 # Moreover, we also need to make sure this runs before txnclose
2369 # Moreover, we also need to make sure this runs before txnclose
2370 # hooks and there is no "pending" mechanism that would execute
2370 # hooks and there is no "pending" mechanism that would execute
2371 # logic only if hooks are about to run.
2371 # logic only if hooks are about to run.
2372 #
2372 #
2373 # Fixing this limitation of the transaction is also needed to track
2373 # Fixing this limitation of the transaction is also needed to track
2374 # other families of changes (bookmarks, phases, obsolescence).
2374 # other families of changes (bookmarks, phases, obsolescence).
2375 #
2375 #
2376 # This will have to be fixed before we remove the experimental
2376 # This will have to be fixed before we remove the experimental
2377 # gating.
2377 # gating.
2378 tracktags(tr2)
2378 tracktags(tr2)
2379 repo = reporef()
2379 repo = reporef()
2380 assert repo is not None # help pytype
2380 assert repo is not None # help pytype
2381
2381
2382 singleheadopt = (b'experimental', b'single-head-per-branch')
2382 singleheadopt = (b'experimental', b'single-head-per-branch')
2383 singlehead = repo.ui.configbool(*singleheadopt)
2383 singlehead = repo.ui.configbool(*singleheadopt)
2384 if singlehead:
2384 if singlehead:
2385 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2385 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2386 accountclosed = singleheadsub.get(
2386 accountclosed = singleheadsub.get(
2387 b"account-closed-heads", False
2387 b"account-closed-heads", False
2388 )
2388 )
2389 if singleheadsub.get(b"public-changes-only", False):
2389 if singleheadsub.get(b"public-changes-only", False):
2390 filtername = b"immutable"
2390 filtername = b"immutable"
2391 else:
2391 else:
2392 filtername = b"visible"
2392 filtername = b"visible"
2393 scmutil.enforcesinglehead(
2393 scmutil.enforcesinglehead(
2394 repo, tr2, desc, accountclosed, filtername
2394 repo, tr2, desc, accountclosed, filtername
2395 )
2395 )
2396 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2396 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2397 for name, (old, new) in sorted(
2397 for name, (old, new) in sorted(
2398 tr.changes[b'bookmarks'].items()
2398 tr.changes[b'bookmarks'].items()
2399 ):
2399 ):
2400 args = tr.hookargs.copy()
2400 args = tr.hookargs.copy()
2401 args.update(bookmarks.preparehookargs(name, old, new))
2401 args.update(bookmarks.preparehookargs(name, old, new))
2402 repo.hook(
2402 repo.hook(
2403 b'pretxnclose-bookmark',
2403 b'pretxnclose-bookmark',
2404 throw=True,
2404 throw=True,
2405 **pycompat.strkwargs(args)
2405 **pycompat.strkwargs(args)
2406 )
2406 )
2407 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2407 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2408 cl = repo.unfiltered().changelog
2408 cl = repo.unfiltered().changelog
2409 for revs, (old, new) in tr.changes[b'phases']:
2409 for revs, (old, new) in tr.changes[b'phases']:
2410 for rev in revs:
2410 for rev in revs:
2411 args = tr.hookargs.copy()
2411 args = tr.hookargs.copy()
2412 node = hex(cl.node(rev))
2412 node = hex(cl.node(rev))
2413 args.update(phases.preparehookargs(node, old, new))
2413 args.update(phases.preparehookargs(node, old, new))
2414 repo.hook(
2414 repo.hook(
2415 b'pretxnclose-phase',
2415 b'pretxnclose-phase',
2416 throw=True,
2416 throw=True,
2417 **pycompat.strkwargs(args)
2417 **pycompat.strkwargs(args)
2418 )
2418 )
2419
2419
2420 repo.hook(
2420 repo.hook(
2421 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2421 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2422 )
2422 )
2423
2423
2424 def releasefn(tr, success):
2424 def releasefn(tr, success):
2425 repo = reporef()
2425 repo = reporef()
2426 if repo is None:
2426 if repo is None:
2427 # If the repo has been GC'd (and this release function is being
2427 # If the repo has been GC'd (and this release function is being
2428 # called from transaction.__del__), there's not much we can do,
2428 # called from transaction.__del__), there's not much we can do,
2429 # so just leave the unfinished transaction there and let the
2429 # so just leave the unfinished transaction there and let the
2430 # user run `hg recover`.
2430 # user run `hg recover`.
2431 return
2431 return
2432 if success:
2432 if success:
2433 # this should be explicitly invoked here, because
2433 # this should be explicitly invoked here, because
2434 # in-memory changes aren't written out at closing
2434 # in-memory changes aren't written out at closing
2435 # transaction, if tr.addfilegenerator (via
2435 # transaction, if tr.addfilegenerator (via
2436 # dirstate.write or so) isn't invoked while
2436 # dirstate.write or so) isn't invoked while
2437 # transaction running
2437 # transaction running
2438 repo.dirstate.write(None)
2438 repo.dirstate.write(None)
2439 else:
2439 else:
2440 # discard all changes (including ones already written
2440 # discard all changes (including ones already written
2441 # out) in this transaction
2441 # out) in this transaction
2442 narrowspec.restorebackup(self, b'journal.narrowspec')
2442 narrowspec.restorebackup(self, b'journal.narrowspec')
2443 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2443 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2444 repo.dirstate.restorebackup(None, b'journal.dirstate')
2444 repo.dirstate.restorebackup(None, b'journal.dirstate')
2445
2445
2446 repo.invalidate(clearfilecache=True)
2446 repo.invalidate(clearfilecache=True)
2447
2447
2448 tr = transaction.transaction(
2448 tr = transaction.transaction(
2449 rp,
2449 rp,
2450 self.svfs,
2450 self.svfs,
2451 vfsmap,
2451 vfsmap,
2452 b"journal",
2452 b"journal",
2453 b"undo",
2453 b"undo",
2454 aftertrans(renames),
2454 aftertrans(renames),
2455 self.store.createmode,
2455 self.store.createmode,
2456 validator=validate,
2456 validator=validate,
2457 releasefn=releasefn,
2457 releasefn=releasefn,
2458 checkambigfiles=_cachedfiles,
2458 checkambigfiles=_cachedfiles,
2459 name=desc,
2459 name=desc,
2460 )
2460 )
2461 tr.changes[b'origrepolen'] = len(self)
2461 tr.changes[b'origrepolen'] = len(self)
2462 tr.changes[b'obsmarkers'] = set()
2462 tr.changes[b'obsmarkers'] = set()
2463 tr.changes[b'phases'] = []
2463 tr.changes[b'phases'] = []
2464 tr.changes[b'bookmarks'] = {}
2464 tr.changes[b'bookmarks'] = {}
2465
2465
2466 tr.hookargs[b'txnid'] = txnid
2466 tr.hookargs[b'txnid'] = txnid
2467 tr.hookargs[b'txnname'] = desc
2467 tr.hookargs[b'txnname'] = desc
2468 tr.hookargs[b'changes'] = tr.changes
2468 tr.hookargs[b'changes'] = tr.changes
2469 # note: writing the fncache only during finalize mean that the file is
2469 # note: writing the fncache only during finalize mean that the file is
2470 # outdated when running hooks. As fncache is used for streaming clone,
2470 # outdated when running hooks. As fncache is used for streaming clone,
2471 # this is not expected to break anything that happen during the hooks.
2471 # this is not expected to break anything that happen during the hooks.
2472 tr.addfinalize(b'flush-fncache', self.store.write)
2472 tr.addfinalize(b'flush-fncache', self.store.write)
2473
2473
2474 def txnclosehook(tr2):
2474 def txnclosehook(tr2):
2475 """To be run if transaction is successful, will schedule a hook run"""
2475 """To be run if transaction is successful, will schedule a hook run"""
2476 # Don't reference tr2 in hook() so we don't hold a reference.
2476 # Don't reference tr2 in hook() so we don't hold a reference.
2477 # This reduces memory consumption when there are multiple
2477 # This reduces memory consumption when there are multiple
2478 # transactions per lock. This can likely go away if issue5045
2478 # transactions per lock. This can likely go away if issue5045
2479 # fixes the function accumulation.
2479 # fixes the function accumulation.
2480 hookargs = tr2.hookargs
2480 hookargs = tr2.hookargs
2481
2481
2482 def hookfunc(unused_success):
2482 def hookfunc(unused_success):
2483 repo = reporef()
2483 repo = reporef()
2484 assert repo is not None # help pytype
2484 assert repo is not None # help pytype
2485
2485
2486 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2486 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2487 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2487 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2488 for name, (old, new) in bmchanges:
2488 for name, (old, new) in bmchanges:
2489 args = tr.hookargs.copy()
2489 args = tr.hookargs.copy()
2490 args.update(bookmarks.preparehookargs(name, old, new))
2490 args.update(bookmarks.preparehookargs(name, old, new))
2491 repo.hook(
2491 repo.hook(
2492 b'txnclose-bookmark',
2492 b'txnclose-bookmark',
2493 throw=False,
2493 throw=False,
2494 **pycompat.strkwargs(args)
2494 **pycompat.strkwargs(args)
2495 )
2495 )
2496
2496
2497 if hook.hashook(repo.ui, b'txnclose-phase'):
2497 if hook.hashook(repo.ui, b'txnclose-phase'):
2498 cl = repo.unfiltered().changelog
2498 cl = repo.unfiltered().changelog
2499 phasemv = sorted(
2499 phasemv = sorted(
2500 tr.changes[b'phases'], key=lambda r: r[0][0]
2500 tr.changes[b'phases'], key=lambda r: r[0][0]
2501 )
2501 )
2502 for revs, (old, new) in phasemv:
2502 for revs, (old, new) in phasemv:
2503 for rev in revs:
2503 for rev in revs:
2504 args = tr.hookargs.copy()
2504 args = tr.hookargs.copy()
2505 node = hex(cl.node(rev))
2505 node = hex(cl.node(rev))
2506 args.update(phases.preparehookargs(node, old, new))
2506 args.update(phases.preparehookargs(node, old, new))
2507 repo.hook(
2507 repo.hook(
2508 b'txnclose-phase',
2508 b'txnclose-phase',
2509 throw=False,
2509 throw=False,
2510 **pycompat.strkwargs(args)
2510 **pycompat.strkwargs(args)
2511 )
2511 )
2512
2512
2513 repo.hook(
2513 repo.hook(
2514 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2514 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2515 )
2515 )
2516
2516
2517 repo = reporef()
2517 repo = reporef()
2518 assert repo is not None # help pytype
2518 assert repo is not None # help pytype
2519 repo._afterlock(hookfunc)
2519 repo._afterlock(hookfunc)
2520
2520
2521 tr.addfinalize(b'txnclose-hook', txnclosehook)
2521 tr.addfinalize(b'txnclose-hook', txnclosehook)
2522 # Include a leading "-" to make it happen before the transaction summary
2522 # Include a leading "-" to make it happen before the transaction summary
2523 # reports registered via scmutil.registersummarycallback() whose names
2523 # reports registered via scmutil.registersummarycallback() whose names
2524 # are 00-txnreport etc. That way, the caches will be warm when the
2524 # are 00-txnreport etc. That way, the caches will be warm when the
2525 # callbacks run.
2525 # callbacks run.
2526 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2526 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2527
2527
2528 def txnaborthook(tr2):
2528 def txnaborthook(tr2):
2529 """To be run if transaction is aborted"""
2529 """To be run if transaction is aborted"""
2530 repo = reporef()
2530 repo = reporef()
2531 assert repo is not None # help pytype
2531 assert repo is not None # help pytype
2532 repo.hook(
2532 repo.hook(
2533 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2533 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2534 )
2534 )
2535
2535
2536 tr.addabort(b'txnabort-hook', txnaborthook)
2536 tr.addabort(b'txnabort-hook', txnaborthook)
2537 # avoid eager cache invalidation. in-memory data should be identical
2537 # avoid eager cache invalidation. in-memory data should be identical
2538 # to stored data if transaction has no error.
2538 # to stored data if transaction has no error.
2539 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2539 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2540 self._transref = weakref.ref(tr)
2540 self._transref = weakref.ref(tr)
2541 scmutil.registersummarycallback(self, tr, desc)
2541 scmutil.registersummarycallback(self, tr, desc)
2542 return tr
2542 return tr
2543
2543
2544 def _journalfiles(self):
2544 def _journalfiles(self):
2545 return (
2545 return (
2546 (self.svfs, b'journal'),
2546 (self.svfs, b'journal'),
2547 (self.svfs, b'journal.narrowspec'),
2547 (self.svfs, b'journal.narrowspec'),
2548 (self.vfs, b'journal.narrowspec.dirstate'),
2548 (self.vfs, b'journal.narrowspec.dirstate'),
2549 (self.vfs, b'journal.dirstate'),
2549 (self.vfs, b'journal.dirstate'),
2550 (self.vfs, b'journal.branch'),
2550 (self.vfs, b'journal.branch'),
2551 (self.vfs, b'journal.desc'),
2551 (self.vfs, b'journal.desc'),
2552 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2552 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2553 (self.svfs, b'journal.phaseroots'),
2553 (self.svfs, b'journal.phaseroots'),
2554 )
2554 )
2555
2555
2556 def undofiles(self):
2556 def undofiles(self):
2557 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2557 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2558
2558
2559 @unfilteredmethod
2559 @unfilteredmethod
2560 def _writejournal(self, desc):
2560 def _writejournal(self, desc):
2561 self.dirstate.savebackup(None, b'journal.dirstate')
2561 self.dirstate.savebackup(None, b'journal.dirstate')
2562 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2562 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2563 narrowspec.savebackup(self, b'journal.narrowspec')
2563 narrowspec.savebackup(self, b'journal.narrowspec')
2564 self.vfs.write(
2564 self.vfs.write(
2565 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2565 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2566 )
2566 )
2567 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2567 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2568 bookmarksvfs = bookmarks.bookmarksvfs(self)
2568 bookmarksvfs = bookmarks.bookmarksvfs(self)
2569 bookmarksvfs.write(
2569 bookmarksvfs.write(
2570 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2570 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2571 )
2571 )
2572 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2572 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2573
2573
2574 def recover(self):
2574 def recover(self):
2575 with self.lock():
2575 with self.lock():
2576 if self.svfs.exists(b"journal"):
2576 if self.svfs.exists(b"journal"):
2577 self.ui.status(_(b"rolling back interrupted transaction\n"))
2577 self.ui.status(_(b"rolling back interrupted transaction\n"))
2578 vfsmap = {
2578 vfsmap = {
2579 b'': self.svfs,
2579 b'': self.svfs,
2580 b'plain': self.vfs,
2580 b'plain': self.vfs,
2581 }
2581 }
2582 transaction.rollback(
2582 transaction.rollback(
2583 self.svfs,
2583 self.svfs,
2584 vfsmap,
2584 vfsmap,
2585 b"journal",
2585 b"journal",
2586 self.ui.warn,
2586 self.ui.warn,
2587 checkambigfiles=_cachedfiles,
2587 checkambigfiles=_cachedfiles,
2588 )
2588 )
2589 self.invalidate()
2589 self.invalidate()
2590 return True
2590 return True
2591 else:
2591 else:
2592 self.ui.warn(_(b"no interrupted transaction available\n"))
2592 self.ui.warn(_(b"no interrupted transaction available\n"))
2593 return False
2593 return False
2594
2594
2595 def rollback(self, dryrun=False, force=False):
2595 def rollback(self, dryrun=False, force=False):
2596 wlock = lock = dsguard = None
2596 wlock = lock = dsguard = None
2597 try:
2597 try:
2598 wlock = self.wlock()
2598 wlock = self.wlock()
2599 lock = self.lock()
2599 lock = self.lock()
2600 if self.svfs.exists(b"undo"):
2600 if self.svfs.exists(b"undo"):
2601 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2601 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2602
2602
2603 return self._rollback(dryrun, force, dsguard)
2603 return self._rollback(dryrun, force, dsguard)
2604 else:
2604 else:
2605 self.ui.warn(_(b"no rollback information available\n"))
2605 self.ui.warn(_(b"no rollback information available\n"))
2606 return 1
2606 return 1
2607 finally:
2607 finally:
2608 release(dsguard, lock, wlock)
2608 release(dsguard, lock, wlock)
2609
2609
2610 @unfilteredmethod # Until we get smarter cache management
2610 @unfilteredmethod # Until we get smarter cache management
2611 def _rollback(self, dryrun, force, dsguard):
2611 def _rollback(self, dryrun, force, dsguard):
2612 ui = self.ui
2612 ui = self.ui
2613 try:
2613 try:
2614 args = self.vfs.read(b'undo.desc').splitlines()
2614 args = self.vfs.read(b'undo.desc').splitlines()
2615 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2615 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2616 if len(args) >= 3:
2616 if len(args) >= 3:
2617 detail = args[2]
2617 detail = args[2]
2618 oldtip = oldlen - 1
2618 oldtip = oldlen - 1
2619
2619
2620 if detail and ui.verbose:
2620 if detail and ui.verbose:
2621 msg = _(
2621 msg = _(
2622 b'repository tip rolled back to revision %d'
2622 b'repository tip rolled back to revision %d'
2623 b' (undo %s: %s)\n'
2623 b' (undo %s: %s)\n'
2624 ) % (oldtip, desc, detail)
2624 ) % (oldtip, desc, detail)
2625 else:
2625 else:
2626 msg = _(
2626 msg = _(
2627 b'repository tip rolled back to revision %d (undo %s)\n'
2627 b'repository tip rolled back to revision %d (undo %s)\n'
2628 ) % (oldtip, desc)
2628 ) % (oldtip, desc)
2629 except IOError:
2629 except IOError:
2630 msg = _(b'rolling back unknown transaction\n')
2630 msg = _(b'rolling back unknown transaction\n')
2631 desc = None
2631 desc = None
2632
2632
2633 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2633 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2634 raise error.Abort(
2634 raise error.Abort(
2635 _(
2635 _(
2636 b'rollback of last commit while not checked out '
2636 b'rollback of last commit while not checked out '
2637 b'may lose data'
2637 b'may lose data'
2638 ),
2638 ),
2639 hint=_(b'use -f to force'),
2639 hint=_(b'use -f to force'),
2640 )
2640 )
2641
2641
2642 ui.status(msg)
2642 ui.status(msg)
2643 if dryrun:
2643 if dryrun:
2644 return 0
2644 return 0
2645
2645
2646 parents = self.dirstate.parents()
2646 parents = self.dirstate.parents()
2647 self.destroying()
2647 self.destroying()
2648 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2648 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2649 transaction.rollback(
2649 transaction.rollback(
2650 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2650 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2651 )
2651 )
2652 bookmarksvfs = bookmarks.bookmarksvfs(self)
2652 bookmarksvfs = bookmarks.bookmarksvfs(self)
2653 if bookmarksvfs.exists(b'undo.bookmarks'):
2653 if bookmarksvfs.exists(b'undo.bookmarks'):
2654 bookmarksvfs.rename(
2654 bookmarksvfs.rename(
2655 b'undo.bookmarks', b'bookmarks', checkambig=True
2655 b'undo.bookmarks', b'bookmarks', checkambig=True
2656 )
2656 )
2657 if self.svfs.exists(b'undo.phaseroots'):
2657 if self.svfs.exists(b'undo.phaseroots'):
2658 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2658 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2659 self.invalidate()
2659 self.invalidate()
2660
2660
2661 has_node = self.changelog.index.has_node
2661 has_node = self.changelog.index.has_node
2662 parentgone = any(not has_node(p) for p in parents)
2662 parentgone = any(not has_node(p) for p in parents)
2663 if parentgone:
2663 if parentgone:
2664 # prevent dirstateguard from overwriting already restored one
2664 # prevent dirstateguard from overwriting already restored one
2665 dsguard.close()
2665 dsguard.close()
2666
2666
2667 narrowspec.restorebackup(self, b'undo.narrowspec')
2667 narrowspec.restorebackup(self, b'undo.narrowspec')
2668 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2668 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2669 self.dirstate.restorebackup(None, b'undo.dirstate')
2669 self.dirstate.restorebackup(None, b'undo.dirstate')
2670 try:
2670 try:
2671 branch = self.vfs.read(b'undo.branch')
2671 branch = self.vfs.read(b'undo.branch')
2672 self.dirstate.setbranch(encoding.tolocal(branch))
2672 self.dirstate.setbranch(encoding.tolocal(branch))
2673 except IOError:
2673 except IOError:
2674 ui.warn(
2674 ui.warn(
2675 _(
2675 _(
2676 b'named branch could not be reset: '
2676 b'named branch could not be reset: '
2677 b'current branch is still \'%s\'\n'
2677 b'current branch is still \'%s\'\n'
2678 )
2678 )
2679 % self.dirstate.branch()
2679 % self.dirstate.branch()
2680 )
2680 )
2681
2681
2682 parents = tuple([p.rev() for p in self[None].parents()])
2682 parents = tuple([p.rev() for p in self[None].parents()])
2683 if len(parents) > 1:
2683 if len(parents) > 1:
2684 ui.status(
2684 ui.status(
2685 _(
2685 _(
2686 b'working directory now based on '
2686 b'working directory now based on '
2687 b'revisions %d and %d\n'
2687 b'revisions %d and %d\n'
2688 )
2688 )
2689 % parents
2689 % parents
2690 )
2690 )
2691 else:
2691 else:
2692 ui.status(
2692 ui.status(
2693 _(b'working directory now based on revision %d\n') % parents
2693 _(b'working directory now based on revision %d\n') % parents
2694 )
2694 )
2695 mergestatemod.mergestate.clean(self)
2695 mergestatemod.mergestate.clean(self)
2696
2696
2697 # TODO: if we know which new heads may result from this rollback, pass
2697 # TODO: if we know which new heads may result from this rollback, pass
2698 # them to destroy(), which will prevent the branchhead cache from being
2698 # them to destroy(), which will prevent the branchhead cache from being
2699 # invalidated.
2699 # invalidated.
2700 self.destroyed()
2700 self.destroyed()
2701 return 0
2701 return 0
2702
2702
2703 def _buildcacheupdater(self, newtransaction):
2703 def _buildcacheupdater(self, newtransaction):
2704 """called during transaction to build the callback updating cache
2704 """called during transaction to build the callback updating cache
2705
2705
2706 Lives on the repository to help extension who might want to augment
2706 Lives on the repository to help extension who might want to augment
2707 this logic. For this purpose, the created transaction is passed to the
2707 this logic. For this purpose, the created transaction is passed to the
2708 method.
2708 method.
2709 """
2709 """
2710 # we must avoid cyclic reference between repo and transaction.
2710 # we must avoid cyclic reference between repo and transaction.
2711 reporef = weakref.ref(self)
2711 reporef = weakref.ref(self)
2712
2712
2713 def updater(tr):
2713 def updater(tr):
2714 repo = reporef()
2714 repo = reporef()
2715 assert repo is not None # help pytype
2715 assert repo is not None # help pytype
2716 repo.updatecaches(tr)
2716 repo.updatecaches(tr)
2717
2717
2718 return updater
2718 return updater
2719
2719
2720 @unfilteredmethod
2720 @unfilteredmethod
2721 def updatecaches(self, tr=None, full=False):
2721 def updatecaches(self, tr=None, full=False):
2722 """warm appropriate caches
2722 """warm appropriate caches
2723
2723
2724 If this function is called after a transaction closed. The transaction
2724 If this function is called after a transaction closed. The transaction
2725 will be available in the 'tr' argument. This can be used to selectively
2725 will be available in the 'tr' argument. This can be used to selectively
2726 update caches relevant to the changes in that transaction.
2726 update caches relevant to the changes in that transaction.
2727
2727
2728 If 'full' is set, make sure all caches the function knows about have
2728 If 'full' is set, make sure all caches the function knows about have
2729 up-to-date data. Even the ones usually loaded more lazily.
2729 up-to-date data. Even the ones usually loaded more lazily.
2730
2731 The `full` argument can take a special "post-clone" value. In this case
2732 the cache warming is made after a clone and of the slower cache might
2733 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2734 as we plan for a cleaner way to deal with this for 5.9.
2730 """
2735 """
2731 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2736 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2732 # During strip, many caches are invalid but
2737 # During strip, many caches are invalid but
2733 # later call to `destroyed` will refresh them.
2738 # later call to `destroyed` will refresh them.
2734 return
2739 return
2735
2740
2736 if tr is None or tr.changes[b'origrepolen'] < len(self):
2741 if tr is None or tr.changes[b'origrepolen'] < len(self):
2737 # accessing the 'served' branchmap should refresh all the others,
2742 # accessing the 'served' branchmap should refresh all the others,
2738 self.ui.debug(b'updating the branch cache\n')
2743 self.ui.debug(b'updating the branch cache\n')
2739 self.filtered(b'served').branchmap()
2744 self.filtered(b'served').branchmap()
2740 self.filtered(b'served.hidden').branchmap()
2745 self.filtered(b'served.hidden').branchmap()
2741
2746
2742 if full:
2747 if full:
2743 unfi = self.unfiltered()
2748 unfi = self.unfiltered()
2744
2749
2745 self.changelog.update_caches(transaction=tr)
2750 self.changelog.update_caches(transaction=tr)
2746 self.manifestlog.update_caches(transaction=tr)
2751 self.manifestlog.update_caches(transaction=tr)
2747
2752
2748 rbc = unfi.revbranchcache()
2753 rbc = unfi.revbranchcache()
2749 for r in unfi.changelog:
2754 for r in unfi.changelog:
2750 rbc.branchinfo(r)
2755 rbc.branchinfo(r)
2751 rbc.write()
2756 rbc.write()
2752
2757
2753 # ensure the working copy parents are in the manifestfulltextcache
2758 # ensure the working copy parents are in the manifestfulltextcache
2754 for ctx in self[b'.'].parents():
2759 for ctx in self[b'.'].parents():
2755 ctx.manifest() # accessing the manifest is enough
2760 ctx.manifest() # accessing the manifest is enough
2756
2761
2757 # accessing fnode cache warms the cache
2762 if not full == b"post-clone":
2758 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2763 # accessing fnode cache warms the cache
2764 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2759 # accessing tags warm the cache
2765 # accessing tags warm the cache
2760 self.tags()
2766 self.tags()
2761 self.filtered(b'served').tags()
2767 self.filtered(b'served').tags()
2762
2768
2763 # The `full` arg is documented as updating even the lazily-loaded
2769 # The `full` arg is documented as updating even the lazily-loaded
2764 # caches immediately, so we're forcing a write to cause these caches
2770 # caches immediately, so we're forcing a write to cause these caches
2765 # to be warmed up even if they haven't explicitly been requested
2771 # to be warmed up even if they haven't explicitly been requested
2766 # yet (if they've never been used by hg, they won't ever have been
2772 # yet (if they've never been used by hg, they won't ever have been
2767 # written, even if they're a subset of another kind of cache that
2773 # written, even if they're a subset of another kind of cache that
2768 # *has* been used).
2774 # *has* been used).
2769 for filt in repoview.filtertable.keys():
2775 for filt in repoview.filtertable.keys():
2770 filtered = self.filtered(filt)
2776 filtered = self.filtered(filt)
2771 filtered.branchmap().write(filtered)
2777 filtered.branchmap().write(filtered)
2772
2778
2773 def invalidatecaches(self):
2779 def invalidatecaches(self):
2774
2780
2775 if '_tagscache' in vars(self):
2781 if '_tagscache' in vars(self):
2776 # can't use delattr on proxy
2782 # can't use delattr on proxy
2777 del self.__dict__['_tagscache']
2783 del self.__dict__['_tagscache']
2778
2784
2779 self._branchcaches.clear()
2785 self._branchcaches.clear()
2780 self.invalidatevolatilesets()
2786 self.invalidatevolatilesets()
2781 self._sparsesignaturecache.clear()
2787 self._sparsesignaturecache.clear()
2782
2788
2783 def invalidatevolatilesets(self):
2789 def invalidatevolatilesets(self):
2784 self.filteredrevcache.clear()
2790 self.filteredrevcache.clear()
2785 obsolete.clearobscaches(self)
2791 obsolete.clearobscaches(self)
2786 self._quick_access_changeid_invalidate()
2792 self._quick_access_changeid_invalidate()
2787
2793
2788 def invalidatedirstate(self):
2794 def invalidatedirstate(self):
2789 """Invalidates the dirstate, causing the next call to dirstate
2795 """Invalidates the dirstate, causing the next call to dirstate
2790 to check if it was modified since the last time it was read,
2796 to check if it was modified since the last time it was read,
2791 rereading it if it has.
2797 rereading it if it has.
2792
2798
2793 This is different to dirstate.invalidate() that it doesn't always
2799 This is different to dirstate.invalidate() that it doesn't always
2794 rereads the dirstate. Use dirstate.invalidate() if you want to
2800 rereads the dirstate. Use dirstate.invalidate() if you want to
2795 explicitly read the dirstate again (i.e. restoring it to a previous
2801 explicitly read the dirstate again (i.e. restoring it to a previous
2796 known good state)."""
2802 known good state)."""
2797 if hasunfilteredcache(self, 'dirstate'):
2803 if hasunfilteredcache(self, 'dirstate'):
2798 for k in self.dirstate._filecache:
2804 for k in self.dirstate._filecache:
2799 try:
2805 try:
2800 delattr(self.dirstate, k)
2806 delattr(self.dirstate, k)
2801 except AttributeError:
2807 except AttributeError:
2802 pass
2808 pass
2803 delattr(self.unfiltered(), 'dirstate')
2809 delattr(self.unfiltered(), 'dirstate')
2804
2810
2805 def invalidate(self, clearfilecache=False):
2811 def invalidate(self, clearfilecache=False):
2806 """Invalidates both store and non-store parts other than dirstate
2812 """Invalidates both store and non-store parts other than dirstate
2807
2813
2808 If a transaction is running, invalidation of store is omitted,
2814 If a transaction is running, invalidation of store is omitted,
2809 because discarding in-memory changes might cause inconsistency
2815 because discarding in-memory changes might cause inconsistency
2810 (e.g. incomplete fncache causes unintentional failure, but
2816 (e.g. incomplete fncache causes unintentional failure, but
2811 redundant one doesn't).
2817 redundant one doesn't).
2812 """
2818 """
2813 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2819 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2814 for k in list(self._filecache.keys()):
2820 for k in list(self._filecache.keys()):
2815 # dirstate is invalidated separately in invalidatedirstate()
2821 # dirstate is invalidated separately in invalidatedirstate()
2816 if k == b'dirstate':
2822 if k == b'dirstate':
2817 continue
2823 continue
2818 if (
2824 if (
2819 k == b'changelog'
2825 k == b'changelog'
2820 and self.currenttransaction()
2826 and self.currenttransaction()
2821 and self.changelog._delayed
2827 and self.changelog._delayed
2822 ):
2828 ):
2823 # The changelog object may store unwritten revisions. We don't
2829 # The changelog object may store unwritten revisions. We don't
2824 # want to lose them.
2830 # want to lose them.
2825 # TODO: Solve the problem instead of working around it.
2831 # TODO: Solve the problem instead of working around it.
2826 continue
2832 continue
2827
2833
2828 if clearfilecache:
2834 if clearfilecache:
2829 del self._filecache[k]
2835 del self._filecache[k]
2830 try:
2836 try:
2831 delattr(unfiltered, k)
2837 delattr(unfiltered, k)
2832 except AttributeError:
2838 except AttributeError:
2833 pass
2839 pass
2834 self.invalidatecaches()
2840 self.invalidatecaches()
2835 if not self.currenttransaction():
2841 if not self.currenttransaction():
2836 # TODO: Changing contents of store outside transaction
2842 # TODO: Changing contents of store outside transaction
2837 # causes inconsistency. We should make in-memory store
2843 # causes inconsistency. We should make in-memory store
2838 # changes detectable, and abort if changed.
2844 # changes detectable, and abort if changed.
2839 self.store.invalidatecaches()
2845 self.store.invalidatecaches()
2840
2846
2841 def invalidateall(self):
2847 def invalidateall(self):
2842 """Fully invalidates both store and non-store parts, causing the
2848 """Fully invalidates both store and non-store parts, causing the
2843 subsequent operation to reread any outside changes."""
2849 subsequent operation to reread any outside changes."""
2844 # extension should hook this to invalidate its caches
2850 # extension should hook this to invalidate its caches
2845 self.invalidate()
2851 self.invalidate()
2846 self.invalidatedirstate()
2852 self.invalidatedirstate()
2847
2853
2848 @unfilteredmethod
2854 @unfilteredmethod
2849 def _refreshfilecachestats(self, tr):
2855 def _refreshfilecachestats(self, tr):
2850 """Reload stats of cached files so that they are flagged as valid"""
2856 """Reload stats of cached files so that they are flagged as valid"""
2851 for k, ce in self._filecache.items():
2857 for k, ce in self._filecache.items():
2852 k = pycompat.sysstr(k)
2858 k = pycompat.sysstr(k)
2853 if k == 'dirstate' or k not in self.__dict__:
2859 if k == 'dirstate' or k not in self.__dict__:
2854 continue
2860 continue
2855 ce.refresh()
2861 ce.refresh()
2856
2862
2857 def _lock(
2863 def _lock(
2858 self,
2864 self,
2859 vfs,
2865 vfs,
2860 lockname,
2866 lockname,
2861 wait,
2867 wait,
2862 releasefn,
2868 releasefn,
2863 acquirefn,
2869 acquirefn,
2864 desc,
2870 desc,
2865 ):
2871 ):
2866 timeout = 0
2872 timeout = 0
2867 warntimeout = 0
2873 warntimeout = 0
2868 if wait:
2874 if wait:
2869 timeout = self.ui.configint(b"ui", b"timeout")
2875 timeout = self.ui.configint(b"ui", b"timeout")
2870 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2876 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2871 # internal config: ui.signal-safe-lock
2877 # internal config: ui.signal-safe-lock
2872 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2878 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2873
2879
2874 l = lockmod.trylock(
2880 l = lockmod.trylock(
2875 self.ui,
2881 self.ui,
2876 vfs,
2882 vfs,
2877 lockname,
2883 lockname,
2878 timeout,
2884 timeout,
2879 warntimeout,
2885 warntimeout,
2880 releasefn=releasefn,
2886 releasefn=releasefn,
2881 acquirefn=acquirefn,
2887 acquirefn=acquirefn,
2882 desc=desc,
2888 desc=desc,
2883 signalsafe=signalsafe,
2889 signalsafe=signalsafe,
2884 )
2890 )
2885 return l
2891 return l
2886
2892
2887 def _afterlock(self, callback):
2893 def _afterlock(self, callback):
2888 """add a callback to be run when the repository is fully unlocked
2894 """add a callback to be run when the repository is fully unlocked
2889
2895
2890 The callback will be executed when the outermost lock is released
2896 The callback will be executed when the outermost lock is released
2891 (with wlock being higher level than 'lock')."""
2897 (with wlock being higher level than 'lock')."""
2892 for ref in (self._wlockref, self._lockref):
2898 for ref in (self._wlockref, self._lockref):
2893 l = ref and ref()
2899 l = ref and ref()
2894 if l and l.held:
2900 if l and l.held:
2895 l.postrelease.append(callback)
2901 l.postrelease.append(callback)
2896 break
2902 break
2897 else: # no lock have been found.
2903 else: # no lock have been found.
2898 callback(True)
2904 callback(True)
2899
2905
2900 def lock(self, wait=True):
2906 def lock(self, wait=True):
2901 """Lock the repository store (.hg/store) and return a weak reference
2907 """Lock the repository store (.hg/store) and return a weak reference
2902 to the lock. Use this before modifying the store (e.g. committing or
2908 to the lock. Use this before modifying the store (e.g. committing or
2903 stripping). If you are opening a transaction, get a lock as well.)
2909 stripping). If you are opening a transaction, get a lock as well.)
2904
2910
2905 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2911 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2906 'wlock' first to avoid a dead-lock hazard."""
2912 'wlock' first to avoid a dead-lock hazard."""
2907 l = self._currentlock(self._lockref)
2913 l = self._currentlock(self._lockref)
2908 if l is not None:
2914 if l is not None:
2909 l.lock()
2915 l.lock()
2910 return l
2916 return l
2911
2917
2912 l = self._lock(
2918 l = self._lock(
2913 vfs=self.svfs,
2919 vfs=self.svfs,
2914 lockname=b"lock",
2920 lockname=b"lock",
2915 wait=wait,
2921 wait=wait,
2916 releasefn=None,
2922 releasefn=None,
2917 acquirefn=self.invalidate,
2923 acquirefn=self.invalidate,
2918 desc=_(b'repository %s') % self.origroot,
2924 desc=_(b'repository %s') % self.origroot,
2919 )
2925 )
2920 self._lockref = weakref.ref(l)
2926 self._lockref = weakref.ref(l)
2921 return l
2927 return l
2922
2928
2923 def wlock(self, wait=True):
2929 def wlock(self, wait=True):
2924 """Lock the non-store parts of the repository (everything under
2930 """Lock the non-store parts of the repository (everything under
2925 .hg except .hg/store) and return a weak reference to the lock.
2931 .hg except .hg/store) and return a weak reference to the lock.
2926
2932
2927 Use this before modifying files in .hg.
2933 Use this before modifying files in .hg.
2928
2934
2929 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2935 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2930 'wlock' first to avoid a dead-lock hazard."""
2936 'wlock' first to avoid a dead-lock hazard."""
2931 l = self._wlockref() if self._wlockref else None
2937 l = self._wlockref() if self._wlockref else None
2932 if l is not None and l.held:
2938 if l is not None and l.held:
2933 l.lock()
2939 l.lock()
2934 return l
2940 return l
2935
2941
2936 # We do not need to check for non-waiting lock acquisition. Such
2942 # We do not need to check for non-waiting lock acquisition. Such
2937 # acquisition would not cause dead-lock as they would just fail.
2943 # acquisition would not cause dead-lock as they would just fail.
2938 if wait and (
2944 if wait and (
2939 self.ui.configbool(b'devel', b'all-warnings')
2945 self.ui.configbool(b'devel', b'all-warnings')
2940 or self.ui.configbool(b'devel', b'check-locks')
2946 or self.ui.configbool(b'devel', b'check-locks')
2941 ):
2947 ):
2942 if self._currentlock(self._lockref) is not None:
2948 if self._currentlock(self._lockref) is not None:
2943 self.ui.develwarn(b'"wlock" acquired after "lock"')
2949 self.ui.develwarn(b'"wlock" acquired after "lock"')
2944
2950
2945 def unlock():
2951 def unlock():
2946 if self.dirstate.pendingparentchange():
2952 if self.dirstate.pendingparentchange():
2947 self.dirstate.invalidate()
2953 self.dirstate.invalidate()
2948 else:
2954 else:
2949 self.dirstate.write(None)
2955 self.dirstate.write(None)
2950
2956
2951 self._filecache[b'dirstate'].refresh()
2957 self._filecache[b'dirstate'].refresh()
2952
2958
2953 l = self._lock(
2959 l = self._lock(
2954 self.vfs,
2960 self.vfs,
2955 b"wlock",
2961 b"wlock",
2956 wait,
2962 wait,
2957 unlock,
2963 unlock,
2958 self.invalidatedirstate,
2964 self.invalidatedirstate,
2959 _(b'working directory of %s') % self.origroot,
2965 _(b'working directory of %s') % self.origroot,
2960 )
2966 )
2961 self._wlockref = weakref.ref(l)
2967 self._wlockref = weakref.ref(l)
2962 return l
2968 return l
2963
2969
2964 def _currentlock(self, lockref):
2970 def _currentlock(self, lockref):
2965 """Returns the lock if it's held, or None if it's not."""
2971 """Returns the lock if it's held, or None if it's not."""
2966 if lockref is None:
2972 if lockref is None:
2967 return None
2973 return None
2968 l = lockref()
2974 l = lockref()
2969 if l is None or not l.held:
2975 if l is None or not l.held:
2970 return None
2976 return None
2971 return l
2977 return l
2972
2978
2973 def currentwlock(self):
2979 def currentwlock(self):
2974 """Returns the wlock if it's held, or None if it's not."""
2980 """Returns the wlock if it's held, or None if it's not."""
2975 return self._currentlock(self._wlockref)
2981 return self._currentlock(self._wlockref)
2976
2982
2977 def checkcommitpatterns(self, wctx, match, status, fail):
2983 def checkcommitpatterns(self, wctx, match, status, fail):
2978 """check for commit arguments that aren't committable"""
2984 """check for commit arguments that aren't committable"""
2979 if match.isexact() or match.prefix():
2985 if match.isexact() or match.prefix():
2980 matched = set(status.modified + status.added + status.removed)
2986 matched = set(status.modified + status.added + status.removed)
2981
2987
2982 for f in match.files():
2988 for f in match.files():
2983 f = self.dirstate.normalize(f)
2989 f = self.dirstate.normalize(f)
2984 if f == b'.' or f in matched or f in wctx.substate:
2990 if f == b'.' or f in matched or f in wctx.substate:
2985 continue
2991 continue
2986 if f in status.deleted:
2992 if f in status.deleted:
2987 fail(f, _(b'file not found!'))
2993 fail(f, _(b'file not found!'))
2988 # Is it a directory that exists or used to exist?
2994 # Is it a directory that exists or used to exist?
2989 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2995 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2990 d = f + b'/'
2996 d = f + b'/'
2991 for mf in matched:
2997 for mf in matched:
2992 if mf.startswith(d):
2998 if mf.startswith(d):
2993 break
2999 break
2994 else:
3000 else:
2995 fail(f, _(b"no match under directory!"))
3001 fail(f, _(b"no match under directory!"))
2996 elif f not in self.dirstate:
3002 elif f not in self.dirstate:
2997 fail(f, _(b"file not tracked!"))
3003 fail(f, _(b"file not tracked!"))
2998
3004
2999 @unfilteredmethod
3005 @unfilteredmethod
3000 def commit(
3006 def commit(
3001 self,
3007 self,
3002 text=b"",
3008 text=b"",
3003 user=None,
3009 user=None,
3004 date=None,
3010 date=None,
3005 match=None,
3011 match=None,
3006 force=False,
3012 force=False,
3007 editor=None,
3013 editor=None,
3008 extra=None,
3014 extra=None,
3009 ):
3015 ):
3010 """Add a new revision to current repository.
3016 """Add a new revision to current repository.
3011
3017
3012 Revision information is gathered from the working directory,
3018 Revision information is gathered from the working directory,
3013 match can be used to filter the committed files. If editor is
3019 match can be used to filter the committed files. If editor is
3014 supplied, it is called to get a commit message.
3020 supplied, it is called to get a commit message.
3015 """
3021 """
3016 if extra is None:
3022 if extra is None:
3017 extra = {}
3023 extra = {}
3018
3024
3019 def fail(f, msg):
3025 def fail(f, msg):
3020 raise error.InputError(b'%s: %s' % (f, msg))
3026 raise error.InputError(b'%s: %s' % (f, msg))
3021
3027
3022 if not match:
3028 if not match:
3023 match = matchmod.always()
3029 match = matchmod.always()
3024
3030
3025 if not force:
3031 if not force:
3026 match.bad = fail
3032 match.bad = fail
3027
3033
3028 # lock() for recent changelog (see issue4368)
3034 # lock() for recent changelog (see issue4368)
3029 with self.wlock(), self.lock():
3035 with self.wlock(), self.lock():
3030 wctx = self[None]
3036 wctx = self[None]
3031 merge = len(wctx.parents()) > 1
3037 merge = len(wctx.parents()) > 1
3032
3038
3033 if not force and merge and not match.always():
3039 if not force and merge and not match.always():
3034 raise error.Abort(
3040 raise error.Abort(
3035 _(
3041 _(
3036 b'cannot partially commit a merge '
3042 b'cannot partially commit a merge '
3037 b'(do not specify files or patterns)'
3043 b'(do not specify files or patterns)'
3038 )
3044 )
3039 )
3045 )
3040
3046
3041 status = self.status(match=match, clean=force)
3047 status = self.status(match=match, clean=force)
3042 if force:
3048 if force:
3043 status.modified.extend(
3049 status.modified.extend(
3044 status.clean
3050 status.clean
3045 ) # mq may commit clean files
3051 ) # mq may commit clean files
3046
3052
3047 # check subrepos
3053 # check subrepos
3048 subs, commitsubs, newstate = subrepoutil.precommit(
3054 subs, commitsubs, newstate = subrepoutil.precommit(
3049 self.ui, wctx, status, match, force=force
3055 self.ui, wctx, status, match, force=force
3050 )
3056 )
3051
3057
3052 # make sure all explicit patterns are matched
3058 # make sure all explicit patterns are matched
3053 if not force:
3059 if not force:
3054 self.checkcommitpatterns(wctx, match, status, fail)
3060 self.checkcommitpatterns(wctx, match, status, fail)
3055
3061
3056 cctx = context.workingcommitctx(
3062 cctx = context.workingcommitctx(
3057 self, status, text, user, date, extra
3063 self, status, text, user, date, extra
3058 )
3064 )
3059
3065
3060 ms = mergestatemod.mergestate.read(self)
3066 ms = mergestatemod.mergestate.read(self)
3061 mergeutil.checkunresolved(ms)
3067 mergeutil.checkunresolved(ms)
3062
3068
3063 # internal config: ui.allowemptycommit
3069 # internal config: ui.allowemptycommit
3064 if cctx.isempty() and not self.ui.configbool(
3070 if cctx.isempty() and not self.ui.configbool(
3065 b'ui', b'allowemptycommit'
3071 b'ui', b'allowemptycommit'
3066 ):
3072 ):
3067 self.ui.debug(b'nothing to commit, clearing merge state\n')
3073 self.ui.debug(b'nothing to commit, clearing merge state\n')
3068 ms.reset()
3074 ms.reset()
3069 return None
3075 return None
3070
3076
3071 if merge and cctx.deleted():
3077 if merge and cctx.deleted():
3072 raise error.Abort(_(b"cannot commit merge with missing files"))
3078 raise error.Abort(_(b"cannot commit merge with missing files"))
3073
3079
3074 if editor:
3080 if editor:
3075 cctx._text = editor(self, cctx, subs)
3081 cctx._text = editor(self, cctx, subs)
3076 edited = text != cctx._text
3082 edited = text != cctx._text
3077
3083
3078 # Save commit message in case this transaction gets rolled back
3084 # Save commit message in case this transaction gets rolled back
3079 # (e.g. by a pretxncommit hook). Leave the content alone on
3085 # (e.g. by a pretxncommit hook). Leave the content alone on
3080 # the assumption that the user will use the same editor again.
3086 # the assumption that the user will use the same editor again.
3081 msgfn = self.savecommitmessage(cctx._text)
3087 msgfn = self.savecommitmessage(cctx._text)
3082
3088
3083 # commit subs and write new state
3089 # commit subs and write new state
3084 if subs:
3090 if subs:
3085 uipathfn = scmutil.getuipathfn(self)
3091 uipathfn = scmutil.getuipathfn(self)
3086 for s in sorted(commitsubs):
3092 for s in sorted(commitsubs):
3087 sub = wctx.sub(s)
3093 sub = wctx.sub(s)
3088 self.ui.status(
3094 self.ui.status(
3089 _(b'committing subrepository %s\n')
3095 _(b'committing subrepository %s\n')
3090 % uipathfn(subrepoutil.subrelpath(sub))
3096 % uipathfn(subrepoutil.subrelpath(sub))
3091 )
3097 )
3092 sr = sub.commit(cctx._text, user, date)
3098 sr = sub.commit(cctx._text, user, date)
3093 newstate[s] = (newstate[s][0], sr)
3099 newstate[s] = (newstate[s][0], sr)
3094 subrepoutil.writestate(self, newstate)
3100 subrepoutil.writestate(self, newstate)
3095
3101
3096 p1, p2 = self.dirstate.parents()
3102 p1, p2 = self.dirstate.parents()
3097 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3103 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3098 try:
3104 try:
3099 self.hook(
3105 self.hook(
3100 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3106 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3101 )
3107 )
3102 with self.transaction(b'commit'):
3108 with self.transaction(b'commit'):
3103 ret = self.commitctx(cctx, True)
3109 ret = self.commitctx(cctx, True)
3104 # update bookmarks, dirstate and mergestate
3110 # update bookmarks, dirstate and mergestate
3105 bookmarks.update(self, [p1, p2], ret)
3111 bookmarks.update(self, [p1, p2], ret)
3106 cctx.markcommitted(ret)
3112 cctx.markcommitted(ret)
3107 ms.reset()
3113 ms.reset()
3108 except: # re-raises
3114 except: # re-raises
3109 if edited:
3115 if edited:
3110 self.ui.write(
3116 self.ui.write(
3111 _(b'note: commit message saved in %s\n') % msgfn
3117 _(b'note: commit message saved in %s\n') % msgfn
3112 )
3118 )
3113 self.ui.write(
3119 self.ui.write(
3114 _(
3120 _(
3115 b"note: use 'hg commit --logfile "
3121 b"note: use 'hg commit --logfile "
3116 b".hg/last-message.txt --edit' to reuse it\n"
3122 b".hg/last-message.txt --edit' to reuse it\n"
3117 )
3123 )
3118 )
3124 )
3119 raise
3125 raise
3120
3126
3121 def commithook(unused_success):
3127 def commithook(unused_success):
3122 # hack for command that use a temporary commit (eg: histedit)
3128 # hack for command that use a temporary commit (eg: histedit)
3123 # temporary commit got stripped before hook release
3129 # temporary commit got stripped before hook release
3124 if self.changelog.hasnode(ret):
3130 if self.changelog.hasnode(ret):
3125 self.hook(
3131 self.hook(
3126 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3132 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3127 )
3133 )
3128
3134
3129 self._afterlock(commithook)
3135 self._afterlock(commithook)
3130 return ret
3136 return ret
3131
3137
3132 @unfilteredmethod
3138 @unfilteredmethod
3133 def commitctx(self, ctx, error=False, origctx=None):
3139 def commitctx(self, ctx, error=False, origctx=None):
3134 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3140 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3135
3141
3136 @unfilteredmethod
3142 @unfilteredmethod
3137 def destroying(self):
3143 def destroying(self):
3138 """Inform the repository that nodes are about to be destroyed.
3144 """Inform the repository that nodes are about to be destroyed.
3139 Intended for use by strip and rollback, so there's a common
3145 Intended for use by strip and rollback, so there's a common
3140 place for anything that has to be done before destroying history.
3146 place for anything that has to be done before destroying history.
3141
3147
3142 This is mostly useful for saving state that is in memory and waiting
3148 This is mostly useful for saving state that is in memory and waiting
3143 to be flushed when the current lock is released. Because a call to
3149 to be flushed when the current lock is released. Because a call to
3144 destroyed is imminent, the repo will be invalidated causing those
3150 destroyed is imminent, the repo will be invalidated causing those
3145 changes to stay in memory (waiting for the next unlock), or vanish
3151 changes to stay in memory (waiting for the next unlock), or vanish
3146 completely.
3152 completely.
3147 """
3153 """
3148 # When using the same lock to commit and strip, the phasecache is left
3154 # When using the same lock to commit and strip, the phasecache is left
3149 # dirty after committing. Then when we strip, the repo is invalidated,
3155 # dirty after committing. Then when we strip, the repo is invalidated,
3150 # causing those changes to disappear.
3156 # causing those changes to disappear.
3151 if '_phasecache' in vars(self):
3157 if '_phasecache' in vars(self):
3152 self._phasecache.write()
3158 self._phasecache.write()
3153
3159
3154 @unfilteredmethod
3160 @unfilteredmethod
3155 def destroyed(self):
3161 def destroyed(self):
3156 """Inform the repository that nodes have been destroyed.
3162 """Inform the repository that nodes have been destroyed.
3157 Intended for use by strip and rollback, so there's a common
3163 Intended for use by strip and rollback, so there's a common
3158 place for anything that has to be done after destroying history.
3164 place for anything that has to be done after destroying history.
3159 """
3165 """
3160 # When one tries to:
3166 # When one tries to:
3161 # 1) destroy nodes thus calling this method (e.g. strip)
3167 # 1) destroy nodes thus calling this method (e.g. strip)
3162 # 2) use phasecache somewhere (e.g. commit)
3168 # 2) use phasecache somewhere (e.g. commit)
3163 #
3169 #
3164 # then 2) will fail because the phasecache contains nodes that were
3170 # then 2) will fail because the phasecache contains nodes that were
3165 # removed. We can either remove phasecache from the filecache,
3171 # removed. We can either remove phasecache from the filecache,
3166 # causing it to reload next time it is accessed, or simply filter
3172 # causing it to reload next time it is accessed, or simply filter
3167 # the removed nodes now and write the updated cache.
3173 # the removed nodes now and write the updated cache.
3168 self._phasecache.filterunknown(self)
3174 self._phasecache.filterunknown(self)
3169 self._phasecache.write()
3175 self._phasecache.write()
3170
3176
3171 # refresh all repository caches
3177 # refresh all repository caches
3172 self.updatecaches()
3178 self.updatecaches()
3173
3179
3174 # Ensure the persistent tag cache is updated. Doing it now
3180 # Ensure the persistent tag cache is updated. Doing it now
3175 # means that the tag cache only has to worry about destroyed
3181 # means that the tag cache only has to worry about destroyed
3176 # heads immediately after a strip/rollback. That in turn
3182 # heads immediately after a strip/rollback. That in turn
3177 # guarantees that "cachetip == currenttip" (comparing both rev
3183 # guarantees that "cachetip == currenttip" (comparing both rev
3178 # and node) always means no nodes have been added or destroyed.
3184 # and node) always means no nodes have been added or destroyed.
3179
3185
3180 # XXX this is suboptimal when qrefresh'ing: we strip the current
3186 # XXX this is suboptimal when qrefresh'ing: we strip the current
3181 # head, refresh the tag cache, then immediately add a new head.
3187 # head, refresh the tag cache, then immediately add a new head.
3182 # But I think doing it this way is necessary for the "instant
3188 # But I think doing it this way is necessary for the "instant
3183 # tag cache retrieval" case to work.
3189 # tag cache retrieval" case to work.
3184 self.invalidate()
3190 self.invalidate()
3185
3191
3186 def status(
3192 def status(
3187 self,
3193 self,
3188 node1=b'.',
3194 node1=b'.',
3189 node2=None,
3195 node2=None,
3190 match=None,
3196 match=None,
3191 ignored=False,
3197 ignored=False,
3192 clean=False,
3198 clean=False,
3193 unknown=False,
3199 unknown=False,
3194 listsubrepos=False,
3200 listsubrepos=False,
3195 ):
3201 ):
3196 '''a convenience method that calls node1.status(node2)'''
3202 '''a convenience method that calls node1.status(node2)'''
3197 return self[node1].status(
3203 return self[node1].status(
3198 node2, match, ignored, clean, unknown, listsubrepos
3204 node2, match, ignored, clean, unknown, listsubrepos
3199 )
3205 )
3200
3206
3201 def addpostdsstatus(self, ps):
3207 def addpostdsstatus(self, ps):
3202 """Add a callback to run within the wlock, at the point at which status
3208 """Add a callback to run within the wlock, at the point at which status
3203 fixups happen.
3209 fixups happen.
3204
3210
3205 On status completion, callback(wctx, status) will be called with the
3211 On status completion, callback(wctx, status) will be called with the
3206 wlock held, unless the dirstate has changed from underneath or the wlock
3212 wlock held, unless the dirstate has changed from underneath or the wlock
3207 couldn't be grabbed.
3213 couldn't be grabbed.
3208
3214
3209 Callbacks should not capture and use a cached copy of the dirstate --
3215 Callbacks should not capture and use a cached copy of the dirstate --
3210 it might change in the meanwhile. Instead, they should access the
3216 it might change in the meanwhile. Instead, they should access the
3211 dirstate via wctx.repo().dirstate.
3217 dirstate via wctx.repo().dirstate.
3212
3218
3213 This list is emptied out after each status run -- extensions should
3219 This list is emptied out after each status run -- extensions should
3214 make sure it adds to this list each time dirstate.status is called.
3220 make sure it adds to this list each time dirstate.status is called.
3215 Extensions should also make sure they don't call this for statuses
3221 Extensions should also make sure they don't call this for statuses
3216 that don't involve the dirstate.
3222 that don't involve the dirstate.
3217 """
3223 """
3218
3224
3219 # The list is located here for uniqueness reasons -- it is actually
3225 # The list is located here for uniqueness reasons -- it is actually
3220 # managed by the workingctx, but that isn't unique per-repo.
3226 # managed by the workingctx, but that isn't unique per-repo.
3221 self._postdsstatus.append(ps)
3227 self._postdsstatus.append(ps)
3222
3228
3223 def postdsstatus(self):
3229 def postdsstatus(self):
3224 """Used by workingctx to get the list of post-dirstate-status hooks."""
3230 """Used by workingctx to get the list of post-dirstate-status hooks."""
3225 return self._postdsstatus
3231 return self._postdsstatus
3226
3232
3227 def clearpostdsstatus(self):
3233 def clearpostdsstatus(self):
3228 """Used by workingctx to clear post-dirstate-status hooks."""
3234 """Used by workingctx to clear post-dirstate-status hooks."""
3229 del self._postdsstatus[:]
3235 del self._postdsstatus[:]
3230
3236
3231 def heads(self, start=None):
3237 def heads(self, start=None):
3232 if start is None:
3238 if start is None:
3233 cl = self.changelog
3239 cl = self.changelog
3234 headrevs = reversed(cl.headrevs())
3240 headrevs = reversed(cl.headrevs())
3235 return [cl.node(rev) for rev in headrevs]
3241 return [cl.node(rev) for rev in headrevs]
3236
3242
3237 heads = self.changelog.heads(start)
3243 heads = self.changelog.heads(start)
3238 # sort the output in rev descending order
3244 # sort the output in rev descending order
3239 return sorted(heads, key=self.changelog.rev, reverse=True)
3245 return sorted(heads, key=self.changelog.rev, reverse=True)
3240
3246
3241 def branchheads(self, branch=None, start=None, closed=False):
3247 def branchheads(self, branch=None, start=None, closed=False):
3242 """return a (possibly filtered) list of heads for the given branch
3248 """return a (possibly filtered) list of heads for the given branch
3243
3249
3244 Heads are returned in topological order, from newest to oldest.
3250 Heads are returned in topological order, from newest to oldest.
3245 If branch is None, use the dirstate branch.
3251 If branch is None, use the dirstate branch.
3246 If start is not None, return only heads reachable from start.
3252 If start is not None, return only heads reachable from start.
3247 If closed is True, return heads that are marked as closed as well.
3253 If closed is True, return heads that are marked as closed as well.
3248 """
3254 """
3249 if branch is None:
3255 if branch is None:
3250 branch = self[None].branch()
3256 branch = self[None].branch()
3251 branches = self.branchmap()
3257 branches = self.branchmap()
3252 if not branches.hasbranch(branch):
3258 if not branches.hasbranch(branch):
3253 return []
3259 return []
3254 # the cache returns heads ordered lowest to highest
3260 # the cache returns heads ordered lowest to highest
3255 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3261 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3256 if start is not None:
3262 if start is not None:
3257 # filter out the heads that cannot be reached from startrev
3263 # filter out the heads that cannot be reached from startrev
3258 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3264 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3259 bheads = [h for h in bheads if h in fbheads]
3265 bheads = [h for h in bheads if h in fbheads]
3260 return bheads
3266 return bheads
3261
3267
3262 def branches(self, nodes):
3268 def branches(self, nodes):
3263 if not nodes:
3269 if not nodes:
3264 nodes = [self.changelog.tip()]
3270 nodes = [self.changelog.tip()]
3265 b = []
3271 b = []
3266 for n in nodes:
3272 for n in nodes:
3267 t = n
3273 t = n
3268 while True:
3274 while True:
3269 p = self.changelog.parents(n)
3275 p = self.changelog.parents(n)
3270 if p[1] != self.nullid or p[0] == self.nullid:
3276 if p[1] != self.nullid or p[0] == self.nullid:
3271 b.append((t, n, p[0], p[1]))
3277 b.append((t, n, p[0], p[1]))
3272 break
3278 break
3273 n = p[0]
3279 n = p[0]
3274 return b
3280 return b
3275
3281
3276 def between(self, pairs):
3282 def between(self, pairs):
3277 r = []
3283 r = []
3278
3284
3279 for top, bottom in pairs:
3285 for top, bottom in pairs:
3280 n, l, i = top, [], 0
3286 n, l, i = top, [], 0
3281 f = 1
3287 f = 1
3282
3288
3283 while n != bottom and n != self.nullid:
3289 while n != bottom and n != self.nullid:
3284 p = self.changelog.parents(n)[0]
3290 p = self.changelog.parents(n)[0]
3285 if i == f:
3291 if i == f:
3286 l.append(n)
3292 l.append(n)
3287 f = f * 2
3293 f = f * 2
3288 n = p
3294 n = p
3289 i += 1
3295 i += 1
3290
3296
3291 r.append(l)
3297 r.append(l)
3292
3298
3293 return r
3299 return r
3294
3300
3295 def checkpush(self, pushop):
3301 def checkpush(self, pushop):
3296 """Extensions can override this function if additional checks have
3302 """Extensions can override this function if additional checks have
3297 to be performed before pushing, or call it if they override push
3303 to be performed before pushing, or call it if they override push
3298 command.
3304 command.
3299 """
3305 """
3300
3306
3301 @unfilteredpropertycache
3307 @unfilteredpropertycache
3302 def prepushoutgoinghooks(self):
3308 def prepushoutgoinghooks(self):
3303 """Return util.hooks consists of a pushop with repo, remote, outgoing
3309 """Return util.hooks consists of a pushop with repo, remote, outgoing
3304 methods, which are called before pushing changesets.
3310 methods, which are called before pushing changesets.
3305 """
3311 """
3306 return util.hooks()
3312 return util.hooks()
3307
3313
3308 def pushkey(self, namespace, key, old, new):
3314 def pushkey(self, namespace, key, old, new):
3309 try:
3315 try:
3310 tr = self.currenttransaction()
3316 tr = self.currenttransaction()
3311 hookargs = {}
3317 hookargs = {}
3312 if tr is not None:
3318 if tr is not None:
3313 hookargs.update(tr.hookargs)
3319 hookargs.update(tr.hookargs)
3314 hookargs = pycompat.strkwargs(hookargs)
3320 hookargs = pycompat.strkwargs(hookargs)
3315 hookargs['namespace'] = namespace
3321 hookargs['namespace'] = namespace
3316 hookargs['key'] = key
3322 hookargs['key'] = key
3317 hookargs['old'] = old
3323 hookargs['old'] = old
3318 hookargs['new'] = new
3324 hookargs['new'] = new
3319 self.hook(b'prepushkey', throw=True, **hookargs)
3325 self.hook(b'prepushkey', throw=True, **hookargs)
3320 except error.HookAbort as exc:
3326 except error.HookAbort as exc:
3321 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3327 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3322 if exc.hint:
3328 if exc.hint:
3323 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3329 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3324 return False
3330 return False
3325 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3331 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3326 ret = pushkey.push(self, namespace, key, old, new)
3332 ret = pushkey.push(self, namespace, key, old, new)
3327
3333
3328 def runhook(unused_success):
3334 def runhook(unused_success):
3329 self.hook(
3335 self.hook(
3330 b'pushkey',
3336 b'pushkey',
3331 namespace=namespace,
3337 namespace=namespace,
3332 key=key,
3338 key=key,
3333 old=old,
3339 old=old,
3334 new=new,
3340 new=new,
3335 ret=ret,
3341 ret=ret,
3336 )
3342 )
3337
3343
3338 self._afterlock(runhook)
3344 self._afterlock(runhook)
3339 return ret
3345 return ret
3340
3346
3341 def listkeys(self, namespace):
3347 def listkeys(self, namespace):
3342 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3348 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3343 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3349 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3344 values = pushkey.list(self, namespace)
3350 values = pushkey.list(self, namespace)
3345 self.hook(b'listkeys', namespace=namespace, values=values)
3351 self.hook(b'listkeys', namespace=namespace, values=values)
3346 return values
3352 return values
3347
3353
3348 def debugwireargs(self, one, two, three=None, four=None, five=None):
3354 def debugwireargs(self, one, two, three=None, four=None, five=None):
3349 '''used to test argument passing over the wire'''
3355 '''used to test argument passing over the wire'''
3350 return b"%s %s %s %s %s" % (
3356 return b"%s %s %s %s %s" % (
3351 one,
3357 one,
3352 two,
3358 two,
3353 pycompat.bytestr(three),
3359 pycompat.bytestr(three),
3354 pycompat.bytestr(four),
3360 pycompat.bytestr(four),
3355 pycompat.bytestr(five),
3361 pycompat.bytestr(five),
3356 )
3362 )
3357
3363
3358 def savecommitmessage(self, text):
3364 def savecommitmessage(self, text):
3359 fp = self.vfs(b'last-message.txt', b'wb')
3365 fp = self.vfs(b'last-message.txt', b'wb')
3360 try:
3366 try:
3361 fp.write(text)
3367 fp.write(text)
3362 finally:
3368 finally:
3363 fp.close()
3369 fp.close()
3364 return self.pathto(fp.name[len(self.root) + 1 :])
3370 return self.pathto(fp.name[len(self.root) + 1 :])
3365
3371
3366 def register_wanted_sidedata(self, category):
3372 def register_wanted_sidedata(self, category):
3367 if requirementsmod.REVLOGV2_REQUIREMENT not in self.requirements:
3373 if requirementsmod.REVLOGV2_REQUIREMENT not in self.requirements:
3368 # Only revlogv2 repos can want sidedata.
3374 # Only revlogv2 repos can want sidedata.
3369 return
3375 return
3370 self._wanted_sidedata.add(pycompat.bytestr(category))
3376 self._wanted_sidedata.add(pycompat.bytestr(category))
3371
3377
3372 def register_sidedata_computer(
3378 def register_sidedata_computer(
3373 self, kind, category, keys, computer, flags, replace=False
3379 self, kind, category, keys, computer, flags, replace=False
3374 ):
3380 ):
3375 if kind not in revlogconst.ALL_KINDS:
3381 if kind not in revlogconst.ALL_KINDS:
3376 msg = _(b"unexpected revlog kind '%s'.")
3382 msg = _(b"unexpected revlog kind '%s'.")
3377 raise error.ProgrammingError(msg % kind)
3383 raise error.ProgrammingError(msg % kind)
3378 category = pycompat.bytestr(category)
3384 category = pycompat.bytestr(category)
3379 already_registered = category in self._sidedata_computers.get(kind, [])
3385 already_registered = category in self._sidedata_computers.get(kind, [])
3380 if already_registered and not replace:
3386 if already_registered and not replace:
3381 msg = _(
3387 msg = _(
3382 b"cannot register a sidedata computer twice for category '%s'."
3388 b"cannot register a sidedata computer twice for category '%s'."
3383 )
3389 )
3384 raise error.ProgrammingError(msg % category)
3390 raise error.ProgrammingError(msg % category)
3385 if replace and not already_registered:
3391 if replace and not already_registered:
3386 msg = _(
3392 msg = _(
3387 b"cannot replace a sidedata computer that isn't registered "
3393 b"cannot replace a sidedata computer that isn't registered "
3388 b"for category '%s'."
3394 b"for category '%s'."
3389 )
3395 )
3390 raise error.ProgrammingError(msg % category)
3396 raise error.ProgrammingError(msg % category)
3391 self._sidedata_computers.setdefault(kind, {})
3397 self._sidedata_computers.setdefault(kind, {})
3392 self._sidedata_computers[kind][category] = (keys, computer, flags)
3398 self._sidedata_computers[kind][category] = (keys, computer, flags)
3393
3399
3394
3400
3395 # used to avoid circular references so destructors work
3401 # used to avoid circular references so destructors work
3396 def aftertrans(files):
3402 def aftertrans(files):
3397 renamefiles = [tuple(t) for t in files]
3403 renamefiles = [tuple(t) for t in files]
3398
3404
3399 def a():
3405 def a():
3400 for vfs, src, dest in renamefiles:
3406 for vfs, src, dest in renamefiles:
3401 # if src and dest refer to a same file, vfs.rename is a no-op,
3407 # if src and dest refer to a same file, vfs.rename is a no-op,
3402 # leaving both src and dest on disk. delete dest to make sure
3408 # leaving both src and dest on disk. delete dest to make sure
3403 # the rename couldn't be such a no-op.
3409 # the rename couldn't be such a no-op.
3404 vfs.tryunlink(dest)
3410 vfs.tryunlink(dest)
3405 try:
3411 try:
3406 vfs.rename(src, dest)
3412 vfs.rename(src, dest)
3407 except OSError: # journal file does not yet exist
3413 except OSError: # journal file does not yet exist
3408 pass
3414 pass
3409
3415
3410 return a
3416 return a
3411
3417
3412
3418
3413 def undoname(fn):
3419 def undoname(fn):
3414 base, name = os.path.split(fn)
3420 base, name = os.path.split(fn)
3415 assert name.startswith(b'journal')
3421 assert name.startswith(b'journal')
3416 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3422 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3417
3423
3418
3424
3419 def instance(ui, path, create, intents=None, createopts=None):
3425 def instance(ui, path, create, intents=None, createopts=None):
3420 localpath = urlutil.urllocalpath(path)
3426 localpath = urlutil.urllocalpath(path)
3421 if create:
3427 if create:
3422 createrepository(ui, localpath, createopts=createopts)
3428 createrepository(ui, localpath, createopts=createopts)
3423
3429
3424 return makelocalrepository(ui, localpath, intents=intents)
3430 return makelocalrepository(ui, localpath, intents=intents)
3425
3431
3426
3432
3427 def islocal(path):
3433 def islocal(path):
3428 return True
3434 return True
3429
3435
3430
3436
3431 def defaultcreateopts(ui, createopts=None):
3437 def defaultcreateopts(ui, createopts=None):
3432 """Populate the default creation options for a repository.
3438 """Populate the default creation options for a repository.
3433
3439
3434 A dictionary of explicitly requested creation options can be passed
3440 A dictionary of explicitly requested creation options can be passed
3435 in. Missing keys will be populated.
3441 in. Missing keys will be populated.
3436 """
3442 """
3437 createopts = dict(createopts or {})
3443 createopts = dict(createopts or {})
3438
3444
3439 if b'backend' not in createopts:
3445 if b'backend' not in createopts:
3440 # experimental config: storage.new-repo-backend
3446 # experimental config: storage.new-repo-backend
3441 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3447 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3442
3448
3443 return createopts
3449 return createopts
3444
3450
3445
3451
3446 def newreporequirements(ui, createopts):
3452 def newreporequirements(ui, createopts):
3447 """Determine the set of requirements for a new local repository.
3453 """Determine the set of requirements for a new local repository.
3448
3454
3449 Extensions can wrap this function to specify custom requirements for
3455 Extensions can wrap this function to specify custom requirements for
3450 new repositories.
3456 new repositories.
3451 """
3457 """
3452 # If the repo is being created from a shared repository, we copy
3458 # If the repo is being created from a shared repository, we copy
3453 # its requirements.
3459 # its requirements.
3454 if b'sharedrepo' in createopts:
3460 if b'sharedrepo' in createopts:
3455 requirements = set(createopts[b'sharedrepo'].requirements)
3461 requirements = set(createopts[b'sharedrepo'].requirements)
3456 if createopts.get(b'sharedrelative'):
3462 if createopts.get(b'sharedrelative'):
3457 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3463 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3458 else:
3464 else:
3459 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3465 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3460
3466
3461 return requirements
3467 return requirements
3462
3468
3463 if b'backend' not in createopts:
3469 if b'backend' not in createopts:
3464 raise error.ProgrammingError(
3470 raise error.ProgrammingError(
3465 b'backend key not present in createopts; '
3471 b'backend key not present in createopts; '
3466 b'was defaultcreateopts() called?'
3472 b'was defaultcreateopts() called?'
3467 )
3473 )
3468
3474
3469 if createopts[b'backend'] != b'revlogv1':
3475 if createopts[b'backend'] != b'revlogv1':
3470 raise error.Abort(
3476 raise error.Abort(
3471 _(
3477 _(
3472 b'unable to determine repository requirements for '
3478 b'unable to determine repository requirements for '
3473 b'storage backend: %s'
3479 b'storage backend: %s'
3474 )
3480 )
3475 % createopts[b'backend']
3481 % createopts[b'backend']
3476 )
3482 )
3477
3483
3478 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3484 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3479 if ui.configbool(b'format', b'usestore'):
3485 if ui.configbool(b'format', b'usestore'):
3480 requirements.add(requirementsmod.STORE_REQUIREMENT)
3486 requirements.add(requirementsmod.STORE_REQUIREMENT)
3481 if ui.configbool(b'format', b'usefncache'):
3487 if ui.configbool(b'format', b'usefncache'):
3482 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3488 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3483 if ui.configbool(b'format', b'dotencode'):
3489 if ui.configbool(b'format', b'dotencode'):
3484 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3490 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3485
3491
3486 compengines = ui.configlist(b'format', b'revlog-compression')
3492 compengines = ui.configlist(b'format', b'revlog-compression')
3487 for compengine in compengines:
3493 for compengine in compengines:
3488 if compengine in util.compengines:
3494 if compengine in util.compengines:
3489 engine = util.compengines[compengine]
3495 engine = util.compengines[compengine]
3490 if engine.available() and engine.revlogheader():
3496 if engine.available() and engine.revlogheader():
3491 break
3497 break
3492 else:
3498 else:
3493 raise error.Abort(
3499 raise error.Abort(
3494 _(
3500 _(
3495 b'compression engines %s defined by '
3501 b'compression engines %s defined by '
3496 b'format.revlog-compression not available'
3502 b'format.revlog-compression not available'
3497 )
3503 )
3498 % b', '.join(b'"%s"' % e for e in compengines),
3504 % b', '.join(b'"%s"' % e for e in compengines),
3499 hint=_(
3505 hint=_(
3500 b'run "hg debuginstall" to list available '
3506 b'run "hg debuginstall" to list available '
3501 b'compression engines'
3507 b'compression engines'
3502 ),
3508 ),
3503 )
3509 )
3504
3510
3505 # zlib is the historical default and doesn't need an explicit requirement.
3511 # zlib is the historical default and doesn't need an explicit requirement.
3506 if compengine == b'zstd':
3512 if compengine == b'zstd':
3507 requirements.add(b'revlog-compression-zstd')
3513 requirements.add(b'revlog-compression-zstd')
3508 elif compengine != b'zlib':
3514 elif compengine != b'zlib':
3509 requirements.add(b'exp-compression-%s' % compengine)
3515 requirements.add(b'exp-compression-%s' % compengine)
3510
3516
3511 if scmutil.gdinitconfig(ui):
3517 if scmutil.gdinitconfig(ui):
3512 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3518 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3513 if ui.configbool(b'format', b'sparse-revlog'):
3519 if ui.configbool(b'format', b'sparse-revlog'):
3514 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3520 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3515
3521
3516 # experimental config: format.exp-use-copies-side-data-changeset
3522 # experimental config: format.exp-use-copies-side-data-changeset
3517 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3523 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3518 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3524 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3519 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3525 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3520 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3526 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3521 if ui.configbool(b'experimental', b'treemanifest'):
3527 if ui.configbool(b'experimental', b'treemanifest'):
3522 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3528 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3523
3529
3524 revlogv2 = ui.config(b'experimental', b'revlogv2')
3530 revlogv2 = ui.config(b'experimental', b'revlogv2')
3525 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3531 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3526 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3532 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3527 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3533 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3528 # experimental config: format.internal-phase
3534 # experimental config: format.internal-phase
3529 if ui.configbool(b'format', b'internal-phase'):
3535 if ui.configbool(b'format', b'internal-phase'):
3530 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3536 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3531
3537
3532 if createopts.get(b'narrowfiles'):
3538 if createopts.get(b'narrowfiles'):
3533 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3539 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3534
3540
3535 if createopts.get(b'lfs'):
3541 if createopts.get(b'lfs'):
3536 requirements.add(b'lfs')
3542 requirements.add(b'lfs')
3537
3543
3538 if ui.configbool(b'format', b'bookmarks-in-store'):
3544 if ui.configbool(b'format', b'bookmarks-in-store'):
3539 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3545 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3540
3546
3541 if ui.configbool(b'format', b'use-persistent-nodemap'):
3547 if ui.configbool(b'format', b'use-persistent-nodemap'):
3542 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3548 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3543
3549
3544 # if share-safe is enabled, let's create the new repository with the new
3550 # if share-safe is enabled, let's create the new repository with the new
3545 # requirement
3551 # requirement
3546 if ui.configbool(b'format', b'use-share-safe'):
3552 if ui.configbool(b'format', b'use-share-safe'):
3547 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3553 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3548
3554
3549 return requirements
3555 return requirements
3550
3556
3551
3557
3552 def checkrequirementscompat(ui, requirements):
3558 def checkrequirementscompat(ui, requirements):
3553 """Checks compatibility of repository requirements enabled and disabled.
3559 """Checks compatibility of repository requirements enabled and disabled.
3554
3560
3555 Returns a set of requirements which needs to be dropped because dependend
3561 Returns a set of requirements which needs to be dropped because dependend
3556 requirements are not enabled. Also warns users about it"""
3562 requirements are not enabled. Also warns users about it"""
3557
3563
3558 dropped = set()
3564 dropped = set()
3559
3565
3560 if requirementsmod.STORE_REQUIREMENT not in requirements:
3566 if requirementsmod.STORE_REQUIREMENT not in requirements:
3561 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3567 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3562 ui.warn(
3568 ui.warn(
3563 _(
3569 _(
3564 b'ignoring enabled \'format.bookmarks-in-store\' config '
3570 b'ignoring enabled \'format.bookmarks-in-store\' config '
3565 b'beacuse it is incompatible with disabled '
3571 b'beacuse it is incompatible with disabled '
3566 b'\'format.usestore\' config\n'
3572 b'\'format.usestore\' config\n'
3567 )
3573 )
3568 )
3574 )
3569 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3575 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3570
3576
3571 if (
3577 if (
3572 requirementsmod.SHARED_REQUIREMENT in requirements
3578 requirementsmod.SHARED_REQUIREMENT in requirements
3573 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3579 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3574 ):
3580 ):
3575 raise error.Abort(
3581 raise error.Abort(
3576 _(
3582 _(
3577 b"cannot create shared repository as source was created"
3583 b"cannot create shared repository as source was created"
3578 b" with 'format.usestore' config disabled"
3584 b" with 'format.usestore' config disabled"
3579 )
3585 )
3580 )
3586 )
3581
3587
3582 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3588 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3583 ui.warn(
3589 ui.warn(
3584 _(
3590 _(
3585 b"ignoring enabled 'format.use-share-safe' config because "
3591 b"ignoring enabled 'format.use-share-safe' config because "
3586 b"it is incompatible with disabled 'format.usestore'"
3592 b"it is incompatible with disabled 'format.usestore'"
3587 b" config\n"
3593 b" config\n"
3588 )
3594 )
3589 )
3595 )
3590 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3596 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3591
3597
3592 return dropped
3598 return dropped
3593
3599
3594
3600
3595 def filterknowncreateopts(ui, createopts):
3601 def filterknowncreateopts(ui, createopts):
3596 """Filters a dict of repo creation options against options that are known.
3602 """Filters a dict of repo creation options against options that are known.
3597
3603
3598 Receives a dict of repo creation options and returns a dict of those
3604 Receives a dict of repo creation options and returns a dict of those
3599 options that we don't know how to handle.
3605 options that we don't know how to handle.
3600
3606
3601 This function is called as part of repository creation. If the
3607 This function is called as part of repository creation. If the
3602 returned dict contains any items, repository creation will not
3608 returned dict contains any items, repository creation will not
3603 be allowed, as it means there was a request to create a repository
3609 be allowed, as it means there was a request to create a repository
3604 with options not recognized by loaded code.
3610 with options not recognized by loaded code.
3605
3611
3606 Extensions can wrap this function to filter out creation options
3612 Extensions can wrap this function to filter out creation options
3607 they know how to handle.
3613 they know how to handle.
3608 """
3614 """
3609 known = {
3615 known = {
3610 b'backend',
3616 b'backend',
3611 b'lfs',
3617 b'lfs',
3612 b'narrowfiles',
3618 b'narrowfiles',
3613 b'sharedrepo',
3619 b'sharedrepo',
3614 b'sharedrelative',
3620 b'sharedrelative',
3615 b'shareditems',
3621 b'shareditems',
3616 b'shallowfilestore',
3622 b'shallowfilestore',
3617 }
3623 }
3618
3624
3619 return {k: v for k, v in createopts.items() if k not in known}
3625 return {k: v for k, v in createopts.items() if k not in known}
3620
3626
3621
3627
3622 def createrepository(ui, path, createopts=None):
3628 def createrepository(ui, path, createopts=None):
3623 """Create a new repository in a vfs.
3629 """Create a new repository in a vfs.
3624
3630
3625 ``path`` path to the new repo's working directory.
3631 ``path`` path to the new repo's working directory.
3626 ``createopts`` options for the new repository.
3632 ``createopts`` options for the new repository.
3627
3633
3628 The following keys for ``createopts`` are recognized:
3634 The following keys for ``createopts`` are recognized:
3629
3635
3630 backend
3636 backend
3631 The storage backend to use.
3637 The storage backend to use.
3632 lfs
3638 lfs
3633 Repository will be created with ``lfs`` requirement. The lfs extension
3639 Repository will be created with ``lfs`` requirement. The lfs extension
3634 will automatically be loaded when the repository is accessed.
3640 will automatically be loaded when the repository is accessed.
3635 narrowfiles
3641 narrowfiles
3636 Set up repository to support narrow file storage.
3642 Set up repository to support narrow file storage.
3637 sharedrepo
3643 sharedrepo
3638 Repository object from which storage should be shared.
3644 Repository object from which storage should be shared.
3639 sharedrelative
3645 sharedrelative
3640 Boolean indicating if the path to the shared repo should be
3646 Boolean indicating if the path to the shared repo should be
3641 stored as relative. By default, the pointer to the "parent" repo
3647 stored as relative. By default, the pointer to the "parent" repo
3642 is stored as an absolute path.
3648 is stored as an absolute path.
3643 shareditems
3649 shareditems
3644 Set of items to share to the new repository (in addition to storage).
3650 Set of items to share to the new repository (in addition to storage).
3645 shallowfilestore
3651 shallowfilestore
3646 Indicates that storage for files should be shallow (not all ancestor
3652 Indicates that storage for files should be shallow (not all ancestor
3647 revisions are known).
3653 revisions are known).
3648 """
3654 """
3649 createopts = defaultcreateopts(ui, createopts=createopts)
3655 createopts = defaultcreateopts(ui, createopts=createopts)
3650
3656
3651 unknownopts = filterknowncreateopts(ui, createopts)
3657 unknownopts = filterknowncreateopts(ui, createopts)
3652
3658
3653 if not isinstance(unknownopts, dict):
3659 if not isinstance(unknownopts, dict):
3654 raise error.ProgrammingError(
3660 raise error.ProgrammingError(
3655 b'filterknowncreateopts() did not return a dict'
3661 b'filterknowncreateopts() did not return a dict'
3656 )
3662 )
3657
3663
3658 if unknownopts:
3664 if unknownopts:
3659 raise error.Abort(
3665 raise error.Abort(
3660 _(
3666 _(
3661 b'unable to create repository because of unknown '
3667 b'unable to create repository because of unknown '
3662 b'creation option: %s'
3668 b'creation option: %s'
3663 )
3669 )
3664 % b', '.join(sorted(unknownopts)),
3670 % b', '.join(sorted(unknownopts)),
3665 hint=_(b'is a required extension not loaded?'),
3671 hint=_(b'is a required extension not loaded?'),
3666 )
3672 )
3667
3673
3668 requirements = newreporequirements(ui, createopts=createopts)
3674 requirements = newreporequirements(ui, createopts=createopts)
3669 requirements -= checkrequirementscompat(ui, requirements)
3675 requirements -= checkrequirementscompat(ui, requirements)
3670
3676
3671 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3677 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3672
3678
3673 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3679 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3674 if hgvfs.exists():
3680 if hgvfs.exists():
3675 raise error.RepoError(_(b'repository %s already exists') % path)
3681 raise error.RepoError(_(b'repository %s already exists') % path)
3676
3682
3677 if b'sharedrepo' in createopts:
3683 if b'sharedrepo' in createopts:
3678 sharedpath = createopts[b'sharedrepo'].sharedpath
3684 sharedpath = createopts[b'sharedrepo'].sharedpath
3679
3685
3680 if createopts.get(b'sharedrelative'):
3686 if createopts.get(b'sharedrelative'):
3681 try:
3687 try:
3682 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3688 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3683 sharedpath = util.pconvert(sharedpath)
3689 sharedpath = util.pconvert(sharedpath)
3684 except (IOError, ValueError) as e:
3690 except (IOError, ValueError) as e:
3685 # ValueError is raised on Windows if the drive letters differ
3691 # ValueError is raised on Windows if the drive letters differ
3686 # on each path.
3692 # on each path.
3687 raise error.Abort(
3693 raise error.Abort(
3688 _(b'cannot calculate relative path'),
3694 _(b'cannot calculate relative path'),
3689 hint=stringutil.forcebytestr(e),
3695 hint=stringutil.forcebytestr(e),
3690 )
3696 )
3691
3697
3692 if not wdirvfs.exists():
3698 if not wdirvfs.exists():
3693 wdirvfs.makedirs()
3699 wdirvfs.makedirs()
3694
3700
3695 hgvfs.makedir(notindexed=True)
3701 hgvfs.makedir(notindexed=True)
3696 if b'sharedrepo' not in createopts:
3702 if b'sharedrepo' not in createopts:
3697 hgvfs.mkdir(b'cache')
3703 hgvfs.mkdir(b'cache')
3698 hgvfs.mkdir(b'wcache')
3704 hgvfs.mkdir(b'wcache')
3699
3705
3700 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3706 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3701 if has_store and b'sharedrepo' not in createopts:
3707 if has_store and b'sharedrepo' not in createopts:
3702 hgvfs.mkdir(b'store')
3708 hgvfs.mkdir(b'store')
3703
3709
3704 # We create an invalid changelog outside the store so very old
3710 # We create an invalid changelog outside the store so very old
3705 # Mercurial versions (which didn't know about the requirements
3711 # Mercurial versions (which didn't know about the requirements
3706 # file) encounter an error on reading the changelog. This
3712 # file) encounter an error on reading the changelog. This
3707 # effectively locks out old clients and prevents them from
3713 # effectively locks out old clients and prevents them from
3708 # mucking with a repo in an unknown format.
3714 # mucking with a repo in an unknown format.
3709 #
3715 #
3710 # The revlog header has version 65535, which won't be recognized by
3716 # The revlog header has version 65535, which won't be recognized by
3711 # such old clients.
3717 # such old clients.
3712 hgvfs.append(
3718 hgvfs.append(
3713 b'00changelog.i',
3719 b'00changelog.i',
3714 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3720 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3715 b'layout',
3721 b'layout',
3716 )
3722 )
3717
3723
3718 # Filter the requirements into working copy and store ones
3724 # Filter the requirements into working copy and store ones
3719 wcreq, storereq = scmutil.filterrequirements(requirements)
3725 wcreq, storereq = scmutil.filterrequirements(requirements)
3720 # write working copy ones
3726 # write working copy ones
3721 scmutil.writerequires(hgvfs, wcreq)
3727 scmutil.writerequires(hgvfs, wcreq)
3722 # If there are store requirements and the current repository
3728 # If there are store requirements and the current repository
3723 # is not a shared one, write stored requirements
3729 # is not a shared one, write stored requirements
3724 # For new shared repository, we don't need to write the store
3730 # For new shared repository, we don't need to write the store
3725 # requirements as they are already present in store requires
3731 # requirements as they are already present in store requires
3726 if storereq and b'sharedrepo' not in createopts:
3732 if storereq and b'sharedrepo' not in createopts:
3727 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3733 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3728 scmutil.writerequires(storevfs, storereq)
3734 scmutil.writerequires(storevfs, storereq)
3729
3735
3730 # Write out file telling readers where to find the shared store.
3736 # Write out file telling readers where to find the shared store.
3731 if b'sharedrepo' in createopts:
3737 if b'sharedrepo' in createopts:
3732 hgvfs.write(b'sharedpath', sharedpath)
3738 hgvfs.write(b'sharedpath', sharedpath)
3733
3739
3734 if createopts.get(b'shareditems'):
3740 if createopts.get(b'shareditems'):
3735 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3741 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3736 hgvfs.write(b'shared', shared)
3742 hgvfs.write(b'shared', shared)
3737
3743
3738
3744
3739 def poisonrepository(repo):
3745 def poisonrepository(repo):
3740 """Poison a repository instance so it can no longer be used."""
3746 """Poison a repository instance so it can no longer be used."""
3741 # Perform any cleanup on the instance.
3747 # Perform any cleanup on the instance.
3742 repo.close()
3748 repo.close()
3743
3749
3744 # Our strategy is to replace the type of the object with one that
3750 # Our strategy is to replace the type of the object with one that
3745 # has all attribute lookups result in error.
3751 # has all attribute lookups result in error.
3746 #
3752 #
3747 # But we have to allow the close() method because some constructors
3753 # But we have to allow the close() method because some constructors
3748 # of repos call close() on repo references.
3754 # of repos call close() on repo references.
3749 class poisonedrepository(object):
3755 class poisonedrepository(object):
3750 def __getattribute__(self, item):
3756 def __getattribute__(self, item):
3751 if item == 'close':
3757 if item == 'close':
3752 return object.__getattribute__(self, item)
3758 return object.__getattribute__(self, item)
3753
3759
3754 raise error.ProgrammingError(
3760 raise error.ProgrammingError(
3755 b'repo instances should not be used after unshare'
3761 b'repo instances should not be used after unshare'
3756 )
3762 )
3757
3763
3758 def close(self):
3764 def close(self):
3759 pass
3765 pass
3760
3766
3761 # We may have a repoview, which intercepts __setattr__. So be sure
3767 # We may have a repoview, which intercepts __setattr__. So be sure
3762 # we operate at the lowest level possible.
3768 # we operate at the lowest level possible.
3763 object.__setattr__(repo, '__class__', poisonedrepository)
3769 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,783 +1,787 b''
1 # posix.py - Posix utility function implementations for Mercurial
1 # posix.py - Posix utility function implementations for Mercurial
2 #
2 #
3 # Copyright 2005-2009 Olivia Mackall <olivia@selenic.com> and others
3 # Copyright 2005-2009 Olivia Mackall <olivia@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import fcntl
11 import fcntl
12 import getpass
12 import getpass
13 import grp
13 import grp
14 import os
14 import os
15 import pwd
15 import pwd
16 import re
16 import re
17 import select
17 import select
18 import stat
18 import stat
19 import sys
19 import sys
20 import tempfile
20 import tempfile
21 import unicodedata
21 import unicodedata
22
22
23 from .i18n import _
23 from .i18n import _
24 from .pycompat import (
24 from .pycompat import (
25 getattr,
25 getattr,
26 open,
26 open,
27 )
27 )
28 from . import (
28 from . import (
29 encoding,
29 encoding,
30 error,
30 error,
31 policy,
31 policy,
32 pycompat,
32 pycompat,
33 )
33 )
34
34
35 osutil = policy.importmod('osutil')
35 osutil = policy.importmod('osutil')
36
36
37 normpath = os.path.normpath
37 normpath = os.path.normpath
38 samestat = os.path.samestat
38 samestat = os.path.samestat
39 try:
39 try:
40 oslink = os.link
40 oslink = os.link
41 except AttributeError:
41 except AttributeError:
42 # Some platforms build Python without os.link on systems that are
42 # Some platforms build Python without os.link on systems that are
43 # vaguely unix-like but don't have hardlink support. For those
43 # vaguely unix-like but don't have hardlink support. For those
44 # poor souls, just say we tried and that it failed so we fall back
44 # poor souls, just say we tried and that it failed so we fall back
45 # to copies.
45 # to copies.
46 def oslink(src, dst):
46 def oslink(src, dst):
47 raise OSError(
47 raise OSError(
48 errno.EINVAL, b'hardlinks not supported: %s to %s' % (src, dst)
48 errno.EINVAL, b'hardlinks not supported: %s to %s' % (src, dst)
49 )
49 )
50
50
51
51
52 readlink = os.readlink
52 readlink = os.readlink
53 unlink = os.unlink
53 unlink = os.unlink
54 rename = os.rename
54 rename = os.rename
55 removedirs = os.removedirs
55 removedirs = os.removedirs
56 expandglobs = False
56 expandglobs = False
57
57
58 umask = os.umask(0)
58 umask = os.umask(0)
59 os.umask(umask)
59 os.umask(umask)
60
60
61 if not pycompat.ispy3:
61 if not pycompat.ispy3:
62
62
63 def posixfile(name, mode='r', buffering=-1):
63 def posixfile(name, mode='r', buffering=-1):
64 fp = open(name, mode=mode, buffering=buffering)
64 fp = open(name, mode=mode, buffering=buffering)
65 # The position when opening in append mode is implementation defined, so
65 # The position when opening in append mode is implementation defined, so
66 # make it consistent by always seeking to the end.
66 # make it consistent by always seeking to the end.
67 if 'a' in mode:
67 if 'a' in mode:
68 fp.seek(0, os.SEEK_END)
68 fp.seek(0, os.SEEK_END)
69 return fp
69 return fp
70
70
71
71
72 else:
72 else:
73 # The underlying file object seeks as required in Python 3:
73 # The underlying file object seeks as required in Python 3:
74 # https://github.com/python/cpython/blob/v3.7.3/Modules/_io/fileio.c#L474
74 # https://github.com/python/cpython/blob/v3.7.3/Modules/_io/fileio.c#L474
75 posixfile = open
75 posixfile = open
76
76
77
77
78 def split(p):
78 def split(p):
79 """Same as posixpath.split, but faster
79 """Same as posixpath.split, but faster
80
80
81 >>> import posixpath
81 >>> import posixpath
82 >>> for f in [b'/absolute/path/to/file',
82 >>> for f in [b'/absolute/path/to/file',
83 ... b'relative/path/to/file',
83 ... b'relative/path/to/file',
84 ... b'file_alone',
84 ... b'file_alone',
85 ... b'path/to/directory/',
85 ... b'path/to/directory/',
86 ... b'/multiple/path//separators',
86 ... b'/multiple/path//separators',
87 ... b'/file_at_root',
87 ... b'/file_at_root',
88 ... b'///multiple_leading_separators_at_root',
88 ... b'///multiple_leading_separators_at_root',
89 ... b'']:
89 ... b'']:
90 ... assert split(f) == posixpath.split(f), f
90 ... assert split(f) == posixpath.split(f), f
91 """
91 """
92 ht = p.rsplit(b'/', 1)
92 ht = p.rsplit(b'/', 1)
93 if len(ht) == 1:
93 if len(ht) == 1:
94 return b'', p
94 return b'', p
95 nh = ht[0].rstrip(b'/')
95 nh = ht[0].rstrip(b'/')
96 if nh:
96 if nh:
97 return nh, ht[1]
97 return nh, ht[1]
98 return ht[0] + b'/', ht[1]
98 return ht[0] + b'/', ht[1]
99
99
100
100
101 def openhardlinks():
101 def openhardlinks():
102 '''return true if it is safe to hold open file handles to hardlinks'''
102 '''return true if it is safe to hold open file handles to hardlinks'''
103 return True
103 return True
104
104
105
105
106 def nlinks(name):
106 def nlinks(name):
107 '''return number of hardlinks for the given file'''
107 '''return number of hardlinks for the given file'''
108 return os.lstat(name).st_nlink
108 return os.lstat(name).st_nlink
109
109
110
110
111 def parsepatchoutput(output_line):
111 def parsepatchoutput(output_line):
112 """parses the output produced by patch and returns the filename"""
112 """parses the output produced by patch and returns the filename"""
113 pf = output_line[14:]
113 pf = output_line[14:]
114 if pycompat.sysplatform == b'OpenVMS':
114 if pycompat.sysplatform == b'OpenVMS':
115 if pf[0] == b'`':
115 if pf[0] == b'`':
116 pf = pf[1:-1] # Remove the quotes
116 pf = pf[1:-1] # Remove the quotes
117 else:
117 else:
118 if pf.startswith(b"'") and pf.endswith(b"'") and b" " in pf:
118 if pf.startswith(b"'") and pf.endswith(b"'") and b" " in pf:
119 pf = pf[1:-1] # Remove the quotes
119 pf = pf[1:-1] # Remove the quotes
120 return pf
120 return pf
121
121
122
122
123 def sshargs(sshcmd, host, user, port):
123 def sshargs(sshcmd, host, user, port):
124 '''Build argument list for ssh'''
124 '''Build argument list for ssh'''
125 args = user and (b"%s@%s" % (user, host)) or host
125 args = user and (b"%s@%s" % (user, host)) or host
126 if b'-' in args[:1]:
126 if b'-' in args[:1]:
127 raise error.Abort(
127 raise error.Abort(
128 _(b'illegal ssh hostname or username starting with -: %s') % args
128 _(b'illegal ssh hostname or username starting with -: %s') % args
129 )
129 )
130 args = shellquote(args)
130 args = shellquote(args)
131 if port:
131 if port:
132 args = b'-p %s %s' % (shellquote(port), args)
132 args = b'-p %s %s' % (shellquote(port), args)
133 return args
133 return args
134
134
135
135
136 def isexec(f):
136 def isexec(f):
137 """check whether a file is executable"""
137 """check whether a file is executable"""
138 return os.lstat(f).st_mode & 0o100 != 0
138 return os.lstat(f).st_mode & 0o100 != 0
139
139
140
140
141 def setflags(f, l, x):
141 def setflags(f, l, x):
142 st = os.lstat(f)
142 st = os.lstat(f)
143 s = st.st_mode
143 s = st.st_mode
144 if l:
144 if l:
145 if not stat.S_ISLNK(s):
145 if not stat.S_ISLNK(s):
146 # switch file to link
146 # switch file to link
147 with open(f, b'rb') as fp:
147 with open(f, b'rb') as fp:
148 data = fp.read()
148 data = fp.read()
149 unlink(f)
149 unlink(f)
150 try:
150 try:
151 os.symlink(data, f)
151 os.symlink(data, f)
152 except OSError:
152 except OSError:
153 # failed to make a link, rewrite file
153 # failed to make a link, rewrite file
154 with open(f, b"wb") as fp:
154 with open(f, b"wb") as fp:
155 fp.write(data)
155 fp.write(data)
156
156
157 # no chmod needed at this point
157 # no chmod needed at this point
158 return
158 return
159 if stat.S_ISLNK(s):
159 if stat.S_ISLNK(s):
160 # switch link to file
160 # switch link to file
161 data = os.readlink(f)
161 data = os.readlink(f)
162 unlink(f)
162 unlink(f)
163 with open(f, b"wb") as fp:
163 with open(f, b"wb") as fp:
164 fp.write(data)
164 fp.write(data)
165 s = 0o666 & ~umask # avoid restatting for chmod
165 s = 0o666 & ~umask # avoid restatting for chmod
166
166
167 sx = s & 0o100
167 sx = s & 0o100
168 if st.st_nlink > 1 and bool(x) != bool(sx):
168 if st.st_nlink > 1 and bool(x) != bool(sx):
169 # the file is a hardlink, break it
169 # the file is a hardlink, break it
170 with open(f, b"rb") as fp:
170 with open(f, b"rb") as fp:
171 data = fp.read()
171 data = fp.read()
172 unlink(f)
172 unlink(f)
173 with open(f, b"wb") as fp:
173 with open(f, b"wb") as fp:
174 fp.write(data)
174 fp.write(data)
175
175
176 if x and not sx:
176 if x and not sx:
177 # Turn on +x for every +r bit when making a file executable
177 # Turn on +x for every +r bit when making a file executable
178 # and obey umask.
178 # and obey umask.
179 os.chmod(f, s | (s & 0o444) >> 2 & ~umask)
179 os.chmod(f, s | (s & 0o444) >> 2 & ~umask)
180 elif not x and sx:
180 elif not x and sx:
181 # Turn off all +x bits
181 # Turn off all +x bits
182 os.chmod(f, s & 0o666)
182 os.chmod(f, s & 0o666)
183
183
184
184
185 def copymode(src, dst, mode=None, enforcewritable=False):
185 def copymode(src, dst, mode=None, enforcewritable=False):
186 """Copy the file mode from the file at path src to dst.
186 """Copy the file mode from the file at path src to dst.
187 If src doesn't exist, we're using mode instead. If mode is None, we're
187 If src doesn't exist, we're using mode instead. If mode is None, we're
188 using umask."""
188 using umask."""
189 try:
189 try:
190 st_mode = os.lstat(src).st_mode & 0o777
190 st_mode = os.lstat(src).st_mode & 0o777
191 except OSError as inst:
191 except OSError as inst:
192 if inst.errno != errno.ENOENT:
192 if inst.errno != errno.ENOENT:
193 raise
193 raise
194 st_mode = mode
194 st_mode = mode
195 if st_mode is None:
195 if st_mode is None:
196 st_mode = ~umask
196 st_mode = ~umask
197 st_mode &= 0o666
197 st_mode &= 0o666
198
198
199 new_mode = st_mode
199 new_mode = st_mode
200
200
201 if enforcewritable:
201 if enforcewritable:
202 new_mode |= stat.S_IWUSR
202 new_mode |= stat.S_IWUSR
203
203
204 os.chmod(dst, new_mode)
204 os.chmod(dst, new_mode)
205
205
206
206
207 def checkexec(path):
207 def checkexec(path):
208 """
208 """
209 Check whether the given path is on a filesystem with UNIX-like exec flags
209 Check whether the given path is on a filesystem with UNIX-like exec flags
210
210
211 Requires a directory (like /foo/.hg)
211 Requires a directory (like /foo/.hg)
212 """
212 """
213
213
214 # VFAT on some Linux versions can flip mode but it doesn't persist
214 # VFAT on some Linux versions can flip mode but it doesn't persist
215 # a FS remount. Frequently we can detect it if files are created
215 # a FS remount. Frequently we can detect it if files are created
216 # with exec bit on.
216 # with exec bit on.
217
217
218 try:
218 try:
219 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
219 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
220 basedir = os.path.join(path, b'.hg')
220 basedir = os.path.join(path, b'.hg')
221 cachedir = os.path.join(basedir, b'wcache')
221 cachedir = os.path.join(basedir, b'wcache')
222 storedir = os.path.join(basedir, b'store')
222 storedir = os.path.join(basedir, b'store')
223 if not os.path.exists(cachedir):
223 if not os.path.exists(cachedir):
224 try:
224 try:
225 # we want to create the 'cache' directory, not the '.hg' one.
225 # we want to create the 'cache' directory, not the '.hg' one.
226 # Automatically creating '.hg' directory could silently spawn
226 # Automatically creating '.hg' directory could silently spawn
227 # invalid Mercurial repositories. That seems like a bad idea.
227 # invalid Mercurial repositories. That seems like a bad idea.
228 os.mkdir(cachedir)
228 os.mkdir(cachedir)
229 if os.path.exists(storedir):
229 if os.path.exists(storedir):
230 copymode(storedir, cachedir)
230 copymode(storedir, cachedir)
231 else:
231 else:
232 copymode(basedir, cachedir)
232 copymode(basedir, cachedir)
233 except (IOError, OSError):
233 except (IOError, OSError):
234 # we other fallback logic triggers
234 # we other fallback logic triggers
235 pass
235 pass
236 if os.path.isdir(cachedir):
236 if os.path.isdir(cachedir):
237 checkisexec = os.path.join(cachedir, b'checkisexec')
237 checkisexec = os.path.join(cachedir, b'checkisexec')
238 checknoexec = os.path.join(cachedir, b'checknoexec')
238 checknoexec = os.path.join(cachedir, b'checknoexec')
239
239
240 try:
240 try:
241 m = os.stat(checkisexec).st_mode
241 m = os.stat(checkisexec).st_mode
242 except OSError as e:
242 except OSError as e:
243 if e.errno != errno.ENOENT:
243 if e.errno != errno.ENOENT:
244 raise
244 raise
245 # checkisexec does not exist - fall through ...
245 # checkisexec does not exist - fall through ...
246 else:
246 else:
247 # checkisexec exists, check if it actually is exec
247 # checkisexec exists, check if it actually is exec
248 if m & EXECFLAGS != 0:
248 if m & EXECFLAGS != 0:
249 # ensure checkisexec exists, check it isn't exec
249 # ensure checkisexec exists, check it isn't exec
250 try:
250 try:
251 m = os.stat(checknoexec).st_mode
251 m = os.stat(checknoexec).st_mode
252 except OSError as e:
252 except OSError as e:
253 if e.errno != errno.ENOENT:
253 if e.errno != errno.ENOENT:
254 raise
254 raise
255 open(checknoexec, b'w').close() # might fail
255 open(checknoexec, b'w').close() # might fail
256 m = os.stat(checknoexec).st_mode
256 m = os.stat(checknoexec).st_mode
257 if m & EXECFLAGS == 0:
257 if m & EXECFLAGS == 0:
258 # check-exec is exec and check-no-exec is not exec
258 # check-exec is exec and check-no-exec is not exec
259 return True
259 return True
260 # checknoexec exists but is exec - delete it
260 # checknoexec exists but is exec - delete it
261 unlink(checknoexec)
261 unlink(checknoexec)
262 # checkisexec exists but is not exec - delete it
262 # checkisexec exists but is not exec - delete it
263 unlink(checkisexec)
263 unlink(checkisexec)
264
264
265 # check using one file, leave it as checkisexec
265 # check using one file, leave it as checkisexec
266 checkdir = cachedir
266 checkdir = cachedir
267 else:
267 else:
268 # check directly in path and don't leave checkisexec behind
268 # check directly in path and don't leave checkisexec behind
269 checkdir = path
269 checkdir = path
270 checkisexec = None
270 checkisexec = None
271 fh, fn = pycompat.mkstemp(dir=checkdir, prefix=b'hg-checkexec-')
271 fh, fn = pycompat.mkstemp(dir=checkdir, prefix=b'hg-checkexec-')
272 try:
272 try:
273 os.close(fh)
273 os.close(fh)
274 m = os.stat(fn).st_mode
274 m = os.stat(fn).st_mode
275 if m & EXECFLAGS == 0:
275 if m & EXECFLAGS == 0:
276 os.chmod(fn, m & 0o777 | EXECFLAGS)
276 os.chmod(fn, m & 0o777 | EXECFLAGS)
277 if os.stat(fn).st_mode & EXECFLAGS != 0:
277 if os.stat(fn).st_mode & EXECFLAGS != 0:
278 if checkisexec is not None:
278 if checkisexec is not None:
279 os.rename(fn, checkisexec)
279 os.rename(fn, checkisexec)
280 fn = None
280 fn = None
281 return True
281 return True
282 finally:
282 finally:
283 if fn is not None:
283 if fn is not None:
284 unlink(fn)
284 unlink(fn)
285 except (IOError, OSError):
285 except (IOError, OSError):
286 # we don't care, the user probably won't be able to commit anyway
286 # we don't care, the user probably won't be able to commit anyway
287 return False
287 return False
288
288
289
289
290 def checklink(path):
290 def checklink(path):
291 """check whether the given path is on a symlink-capable filesystem"""
291 """check whether the given path is on a symlink-capable filesystem"""
292 # mktemp is not racy because symlink creation will fail if the
292 # mktemp is not racy because symlink creation will fail if the
293 # file already exists
293 # file already exists
294 while True:
294 while True:
295 cachedir = os.path.join(path, b'.hg', b'wcache')
295 cachedir = os.path.join(path, b'.hg', b'wcache')
296 checklink = os.path.join(cachedir, b'checklink')
296 checklink = os.path.join(cachedir, b'checklink')
297 # try fast path, read only
297 # try fast path, read only
298 if os.path.islink(checklink):
298 if os.path.islink(checklink):
299 return True
299 return True
300 if os.path.isdir(cachedir):
300 if os.path.isdir(cachedir):
301 checkdir = cachedir
301 checkdir = cachedir
302 else:
302 else:
303 checkdir = path
303 checkdir = path
304 cachedir = None
304 cachedir = None
305 name = tempfile.mktemp(
305 name = tempfile.mktemp(
306 dir=pycompat.fsdecode(checkdir), prefix=r'checklink-'
306 dir=pycompat.fsdecode(checkdir), prefix=r'checklink-'
307 )
307 )
308 name = pycompat.fsencode(name)
308 name = pycompat.fsencode(name)
309 try:
309 try:
310 fd = None
310 fd = None
311 if cachedir is None:
311 if cachedir is None:
312 fd = pycompat.namedtempfile(
312 fd = pycompat.namedtempfile(
313 dir=checkdir, prefix=b'hg-checklink-'
313 dir=checkdir, prefix=b'hg-checklink-'
314 )
314 )
315 target = os.path.basename(fd.name)
315 target = os.path.basename(fd.name)
316 else:
316 else:
317 # create a fixed file to link to; doesn't matter if it
317 # create a fixed file to link to; doesn't matter if it
318 # already exists.
318 # already exists.
319 target = b'checklink-target'
319 target = b'checklink-target'
320 try:
320 try:
321 fullpath = os.path.join(cachedir, target)
321 fullpath = os.path.join(cachedir, target)
322 open(fullpath, b'w').close()
322 open(fullpath, b'w').close()
323 except IOError as inst:
323 except IOError as inst:
324 # pytype: disable=unsupported-operands
324 # pytype: disable=unsupported-operands
325 if inst[0] == errno.EACCES:
325 if inst[0] == errno.EACCES:
326 # pytype: enable=unsupported-operands
326 # pytype: enable=unsupported-operands
327
327
328 # If we can't write to cachedir, just pretend
328 # If we can't write to cachedir, just pretend
329 # that the fs is readonly and by association
329 # that the fs is readonly and by association
330 # that the fs won't support symlinks. This
330 # that the fs won't support symlinks. This
331 # seems like the least dangerous way to avoid
331 # seems like the least dangerous way to avoid
332 # data loss.
332 # data loss.
333 return False
333 return False
334 raise
334 raise
335 try:
335 try:
336 os.symlink(target, name)
336 os.symlink(target, name)
337 if cachedir is None:
337 if cachedir is None:
338 unlink(name)
338 unlink(name)
339 else:
339 else:
340 try:
340 try:
341 os.rename(name, checklink)
341 os.rename(name, checklink)
342 except OSError:
342 except OSError:
343 unlink(name)
343 unlink(name)
344 return True
344 return True
345 except OSError as inst:
345 except OSError as inst:
346 # link creation might race, try again
346 # link creation might race, try again
347 if inst.errno == errno.EEXIST:
347 if inst.errno == errno.EEXIST:
348 continue
348 continue
349 raise
349 raise
350 finally:
350 finally:
351 if fd is not None:
351 if fd is not None:
352 fd.close()
352 fd.close()
353 except AttributeError:
353 except AttributeError:
354 return False
354 return False
355 except OSError as inst:
355 except OSError as inst:
356 # sshfs might report failure while successfully creating the link
356 # sshfs might report failure while successfully creating the link
357 if inst.errno == errno.EIO and os.path.exists(name):
357 if inst.errno == errno.EIO and os.path.exists(name):
358 unlink(name)
358 unlink(name)
359 return False
359 return False
360
360
361
361
362 def checkosfilename(path):
362 def checkosfilename(path):
363 """Check that the base-relative path is a valid filename on this platform.
363 """Check that the base-relative path is a valid filename on this platform.
364 Returns None if the path is ok, or a UI string describing the problem."""
364 Returns None if the path is ok, or a UI string describing the problem."""
365 return None # on posix platforms, every path is ok
365 return None # on posix platforms, every path is ok
366
366
367
367
368 def getfsmountpoint(dirpath):
368 def getfsmountpoint(dirpath):
369 """Get the filesystem mount point from a directory (best-effort)
369 """Get the filesystem mount point from a directory (best-effort)
370
370
371 Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc.
371 Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc.
372 """
372 """
373 return getattr(osutil, 'getfsmountpoint', lambda x: None)(dirpath)
373 return getattr(osutil, 'getfsmountpoint', lambda x: None)(dirpath)
374
374
375
375
376 def getfstype(dirpath):
376 def getfstype(dirpath):
377 """Get the filesystem type name from a directory (best-effort)
377 """Get the filesystem type name from a directory (best-effort)
378
378
379 Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc.
379 Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc.
380 """
380 """
381 return getattr(osutil, 'getfstype', lambda x: None)(dirpath)
381 return getattr(osutil, 'getfstype', lambda x: None)(dirpath)
382
382
383
383
384 def get_password():
385 return encoding.strtolocal(getpass.getpass(''))
386
387
384 def setbinary(fd):
388 def setbinary(fd):
385 pass
389 pass
386
390
387
391
388 def pconvert(path):
392 def pconvert(path):
389 return path
393 return path
390
394
391
395
392 def localpath(path):
396 def localpath(path):
393 return path
397 return path
394
398
395
399
396 def samefile(fpath1, fpath2):
400 def samefile(fpath1, fpath2):
397 """Returns whether path1 and path2 refer to the same file. This is only
401 """Returns whether path1 and path2 refer to the same file. This is only
398 guaranteed to work for files, not directories."""
402 guaranteed to work for files, not directories."""
399 return os.path.samefile(fpath1, fpath2)
403 return os.path.samefile(fpath1, fpath2)
400
404
401
405
402 def samedevice(fpath1, fpath2):
406 def samedevice(fpath1, fpath2):
403 """Returns whether fpath1 and fpath2 are on the same device. This is only
407 """Returns whether fpath1 and fpath2 are on the same device. This is only
404 guaranteed to work for files, not directories."""
408 guaranteed to work for files, not directories."""
405 st1 = os.lstat(fpath1)
409 st1 = os.lstat(fpath1)
406 st2 = os.lstat(fpath2)
410 st2 = os.lstat(fpath2)
407 return st1.st_dev == st2.st_dev
411 return st1.st_dev == st2.st_dev
408
412
409
413
410 # os.path.normcase is a no-op, which doesn't help us on non-native filesystems
414 # os.path.normcase is a no-op, which doesn't help us on non-native filesystems
411 def normcase(path):
415 def normcase(path):
412 return path.lower()
416 return path.lower()
413
417
414
418
415 # what normcase does to ASCII strings
419 # what normcase does to ASCII strings
416 normcasespec = encoding.normcasespecs.lower
420 normcasespec = encoding.normcasespecs.lower
417 # fallback normcase function for non-ASCII strings
421 # fallback normcase function for non-ASCII strings
418 normcasefallback = normcase
422 normcasefallback = normcase
419
423
420 if pycompat.isdarwin:
424 if pycompat.isdarwin:
421
425
422 def normcase(path):
426 def normcase(path):
423 """
427 """
424 Normalize a filename for OS X-compatible comparison:
428 Normalize a filename for OS X-compatible comparison:
425 - escape-encode invalid characters
429 - escape-encode invalid characters
426 - decompose to NFD
430 - decompose to NFD
427 - lowercase
431 - lowercase
428 - omit ignored characters [200c-200f, 202a-202e, 206a-206f,feff]
432 - omit ignored characters [200c-200f, 202a-202e, 206a-206f,feff]
429
433
430 >>> normcase(b'UPPER')
434 >>> normcase(b'UPPER')
431 'upper'
435 'upper'
432 >>> normcase(b'Caf\\xc3\\xa9')
436 >>> normcase(b'Caf\\xc3\\xa9')
433 'cafe\\xcc\\x81'
437 'cafe\\xcc\\x81'
434 >>> normcase(b'\\xc3\\x89')
438 >>> normcase(b'\\xc3\\x89')
435 'e\\xcc\\x81'
439 'e\\xcc\\x81'
436 >>> normcase(b'\\xb8\\xca\\xc3\\xca\\xbe\\xc8.JPG') # issue3918
440 >>> normcase(b'\\xb8\\xca\\xc3\\xca\\xbe\\xc8.JPG') # issue3918
437 '%b8%ca%c3\\xca\\xbe%c8.jpg'
441 '%b8%ca%c3\\xca\\xbe%c8.jpg'
438 """
442 """
439
443
440 try:
444 try:
441 return encoding.asciilower(path) # exception for non-ASCII
445 return encoding.asciilower(path) # exception for non-ASCII
442 except UnicodeDecodeError:
446 except UnicodeDecodeError:
443 return normcasefallback(path)
447 return normcasefallback(path)
444
448
445 normcasespec = encoding.normcasespecs.lower
449 normcasespec = encoding.normcasespecs.lower
446
450
447 def normcasefallback(path):
451 def normcasefallback(path):
448 try:
452 try:
449 u = path.decode('utf-8')
453 u = path.decode('utf-8')
450 except UnicodeDecodeError:
454 except UnicodeDecodeError:
451 # OS X percent-encodes any bytes that aren't valid utf-8
455 # OS X percent-encodes any bytes that aren't valid utf-8
452 s = b''
456 s = b''
453 pos = 0
457 pos = 0
454 l = len(path)
458 l = len(path)
455 while pos < l:
459 while pos < l:
456 try:
460 try:
457 c = encoding.getutf8char(path, pos)
461 c = encoding.getutf8char(path, pos)
458 pos += len(c)
462 pos += len(c)
459 except ValueError:
463 except ValueError:
460 c = b'%%%02X' % ord(path[pos : pos + 1])
464 c = b'%%%02X' % ord(path[pos : pos + 1])
461 pos += 1
465 pos += 1
462 s += c
466 s += c
463
467
464 u = s.decode('utf-8')
468 u = s.decode('utf-8')
465
469
466 # Decompose then lowercase (HFS+ technote specifies lower)
470 # Decompose then lowercase (HFS+ technote specifies lower)
467 enc = unicodedata.normalize('NFD', u).lower().encode('utf-8')
471 enc = unicodedata.normalize('NFD', u).lower().encode('utf-8')
468 # drop HFS+ ignored characters
472 # drop HFS+ ignored characters
469 return encoding.hfsignoreclean(enc)
473 return encoding.hfsignoreclean(enc)
470
474
471
475
472 if pycompat.sysplatform == b'cygwin':
476 if pycompat.sysplatform == b'cygwin':
473 # workaround for cygwin, in which mount point part of path is
477 # workaround for cygwin, in which mount point part of path is
474 # treated as case sensitive, even though underlying NTFS is case
478 # treated as case sensitive, even though underlying NTFS is case
475 # insensitive.
479 # insensitive.
476
480
477 # default mount points
481 # default mount points
478 cygwinmountpoints = sorted(
482 cygwinmountpoints = sorted(
479 [
483 [
480 b"/usr/bin",
484 b"/usr/bin",
481 b"/usr/lib",
485 b"/usr/lib",
482 b"/cygdrive",
486 b"/cygdrive",
483 ],
487 ],
484 reverse=True,
488 reverse=True,
485 )
489 )
486
490
487 # use upper-ing as normcase as same as NTFS workaround
491 # use upper-ing as normcase as same as NTFS workaround
488 def normcase(path):
492 def normcase(path):
489 pathlen = len(path)
493 pathlen = len(path)
490 if (pathlen == 0) or (path[0] != pycompat.ossep):
494 if (pathlen == 0) or (path[0] != pycompat.ossep):
491 # treat as relative
495 # treat as relative
492 return encoding.upper(path)
496 return encoding.upper(path)
493
497
494 # to preserve case of mountpoint part
498 # to preserve case of mountpoint part
495 for mp in cygwinmountpoints:
499 for mp in cygwinmountpoints:
496 if not path.startswith(mp):
500 if not path.startswith(mp):
497 continue
501 continue
498
502
499 mplen = len(mp)
503 mplen = len(mp)
500 if mplen == pathlen: # mount point itself
504 if mplen == pathlen: # mount point itself
501 return mp
505 return mp
502 if path[mplen] == pycompat.ossep:
506 if path[mplen] == pycompat.ossep:
503 return mp + encoding.upper(path[mplen:])
507 return mp + encoding.upper(path[mplen:])
504
508
505 return encoding.upper(path)
509 return encoding.upper(path)
506
510
507 normcasespec = encoding.normcasespecs.other
511 normcasespec = encoding.normcasespecs.other
508 normcasefallback = normcase
512 normcasefallback = normcase
509
513
510 # Cygwin translates native ACLs to POSIX permissions,
514 # Cygwin translates native ACLs to POSIX permissions,
511 # but these translations are not supported by native
515 # but these translations are not supported by native
512 # tools, so the exec bit tends to be set erroneously.
516 # tools, so the exec bit tends to be set erroneously.
513 # Therefore, disable executable bit access on Cygwin.
517 # Therefore, disable executable bit access on Cygwin.
514 def checkexec(path):
518 def checkexec(path):
515 return False
519 return False
516
520
517 # Similarly, Cygwin's symlink emulation is likely to create
521 # Similarly, Cygwin's symlink emulation is likely to create
518 # problems when Mercurial is used from both Cygwin and native
522 # problems when Mercurial is used from both Cygwin and native
519 # Windows, with other native tools, or on shared volumes
523 # Windows, with other native tools, or on shared volumes
520 def checklink(path):
524 def checklink(path):
521 return False
525 return False
522
526
523
527
524 _needsshellquote = None
528 _needsshellquote = None
525
529
526
530
527 def shellquote(s):
531 def shellquote(s):
528 if pycompat.sysplatform == b'OpenVMS':
532 if pycompat.sysplatform == b'OpenVMS':
529 return b'"%s"' % s
533 return b'"%s"' % s
530 global _needsshellquote
534 global _needsshellquote
531 if _needsshellquote is None:
535 if _needsshellquote is None:
532 _needsshellquote = re.compile(br'[^a-zA-Z0-9._/+-]').search
536 _needsshellquote = re.compile(br'[^a-zA-Z0-9._/+-]').search
533 if s and not _needsshellquote(s):
537 if s and not _needsshellquote(s):
534 # "s" shouldn't have to be quoted
538 # "s" shouldn't have to be quoted
535 return s
539 return s
536 else:
540 else:
537 return b"'%s'" % s.replace(b"'", b"'\\''")
541 return b"'%s'" % s.replace(b"'", b"'\\''")
538
542
539
543
540 def shellsplit(s):
544 def shellsplit(s):
541 """Parse a command string in POSIX shell way (best-effort)"""
545 """Parse a command string in POSIX shell way (best-effort)"""
542 return pycompat.shlexsplit(s, posix=True)
546 return pycompat.shlexsplit(s, posix=True)
543
547
544
548
545 def testpid(pid):
549 def testpid(pid):
546 '''return False if pid dead, True if running or not sure'''
550 '''return False if pid dead, True if running or not sure'''
547 if pycompat.sysplatform == b'OpenVMS':
551 if pycompat.sysplatform == b'OpenVMS':
548 return True
552 return True
549 try:
553 try:
550 os.kill(pid, 0)
554 os.kill(pid, 0)
551 return True
555 return True
552 except OSError as inst:
556 except OSError as inst:
553 return inst.errno != errno.ESRCH
557 return inst.errno != errno.ESRCH
554
558
555
559
556 def isowner(st):
560 def isowner(st):
557 """Return True if the stat object st is from the current user."""
561 """Return True if the stat object st is from the current user."""
558 return st.st_uid == os.getuid()
562 return st.st_uid == os.getuid()
559
563
560
564
561 def findexe(command):
565 def findexe(command):
562 """Find executable for command searching like which does.
566 """Find executable for command searching like which does.
563 If command is a basename then PATH is searched for command.
567 If command is a basename then PATH is searched for command.
564 PATH isn't searched if command is an absolute or relative path.
568 PATH isn't searched if command is an absolute or relative path.
565 If command isn't found None is returned."""
569 If command isn't found None is returned."""
566 if pycompat.sysplatform == b'OpenVMS':
570 if pycompat.sysplatform == b'OpenVMS':
567 return command
571 return command
568
572
569 def findexisting(executable):
573 def findexisting(executable):
570 b'Will return executable if existing file'
574 b'Will return executable if existing file'
571 if os.path.isfile(executable) and os.access(executable, os.X_OK):
575 if os.path.isfile(executable) and os.access(executable, os.X_OK):
572 return executable
576 return executable
573 return None
577 return None
574
578
575 if pycompat.ossep in command:
579 if pycompat.ossep in command:
576 return findexisting(command)
580 return findexisting(command)
577
581
578 if pycompat.sysplatform == b'plan9':
582 if pycompat.sysplatform == b'plan9':
579 return findexisting(os.path.join(b'/bin', command))
583 return findexisting(os.path.join(b'/bin', command))
580
584
581 for path in encoding.environ.get(b'PATH', b'').split(pycompat.ospathsep):
585 for path in encoding.environ.get(b'PATH', b'').split(pycompat.ospathsep):
582 executable = findexisting(os.path.join(path, command))
586 executable = findexisting(os.path.join(path, command))
583 if executable is not None:
587 if executable is not None:
584 return executable
588 return executable
585 return None
589 return None
586
590
587
591
588 def setsignalhandler():
592 def setsignalhandler():
589 pass
593 pass
590
594
591
595
592 _wantedkinds = {stat.S_IFREG, stat.S_IFLNK}
596 _wantedkinds = {stat.S_IFREG, stat.S_IFLNK}
593
597
594
598
595 def statfiles(files):
599 def statfiles(files):
596 """Stat each file in files. Yield each stat, or None if a file does not
600 """Stat each file in files. Yield each stat, or None if a file does not
597 exist or has a type we don't care about."""
601 exist or has a type we don't care about."""
598 lstat = os.lstat
602 lstat = os.lstat
599 getkind = stat.S_IFMT
603 getkind = stat.S_IFMT
600 for nf in files:
604 for nf in files:
601 try:
605 try:
602 st = lstat(nf)
606 st = lstat(nf)
603 if getkind(st.st_mode) not in _wantedkinds:
607 if getkind(st.st_mode) not in _wantedkinds:
604 st = None
608 st = None
605 except OSError as err:
609 except OSError as err:
606 if err.errno not in (errno.ENOENT, errno.ENOTDIR):
610 if err.errno not in (errno.ENOENT, errno.ENOTDIR):
607 raise
611 raise
608 st = None
612 st = None
609 yield st
613 yield st
610
614
611
615
612 def getuser():
616 def getuser():
613 '''return name of current user'''
617 '''return name of current user'''
614 return pycompat.fsencode(getpass.getuser())
618 return pycompat.fsencode(getpass.getuser())
615
619
616
620
617 def username(uid=None):
621 def username(uid=None):
618 """Return the name of the user with the given uid.
622 """Return the name of the user with the given uid.
619
623
620 If uid is None, return the name of the current user."""
624 If uid is None, return the name of the current user."""
621
625
622 if uid is None:
626 if uid is None:
623 uid = os.getuid()
627 uid = os.getuid()
624 try:
628 try:
625 return pycompat.fsencode(pwd.getpwuid(uid)[0])
629 return pycompat.fsencode(pwd.getpwuid(uid)[0])
626 except KeyError:
630 except KeyError:
627 return b'%d' % uid
631 return b'%d' % uid
628
632
629
633
630 def groupname(gid=None):
634 def groupname(gid=None):
631 """Return the name of the group with the given gid.
635 """Return the name of the group with the given gid.
632
636
633 If gid is None, return the name of the current group."""
637 If gid is None, return the name of the current group."""
634
638
635 if gid is None:
639 if gid is None:
636 gid = os.getgid()
640 gid = os.getgid()
637 try:
641 try:
638 return pycompat.fsencode(grp.getgrgid(gid)[0])
642 return pycompat.fsencode(grp.getgrgid(gid)[0])
639 except KeyError:
643 except KeyError:
640 return pycompat.bytestr(gid)
644 return pycompat.bytestr(gid)
641
645
642
646
643 def groupmembers(name):
647 def groupmembers(name):
644 """Return the list of members of the group with the given
648 """Return the list of members of the group with the given
645 name, KeyError if the group does not exist.
649 name, KeyError if the group does not exist.
646 """
650 """
647 name = pycompat.fsdecode(name)
651 name = pycompat.fsdecode(name)
648 return pycompat.rapply(pycompat.fsencode, list(grp.getgrnam(name).gr_mem))
652 return pycompat.rapply(pycompat.fsencode, list(grp.getgrnam(name).gr_mem))
649
653
650
654
651 def spawndetached(args):
655 def spawndetached(args):
652 return os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0), args[0], args)
656 return os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0), args[0], args)
653
657
654
658
655 def gethgcmd():
659 def gethgcmd():
656 return sys.argv[:1]
660 return sys.argv[:1]
657
661
658
662
659 def makedir(path, notindexed):
663 def makedir(path, notindexed):
660 os.mkdir(path)
664 os.mkdir(path)
661
665
662
666
663 def lookupreg(key, name=None, scope=None):
667 def lookupreg(key, name=None, scope=None):
664 return None
668 return None
665
669
666
670
667 def hidewindow():
671 def hidewindow():
668 """Hide current shell window.
672 """Hide current shell window.
669
673
670 Used to hide the window opened when starting asynchronous
674 Used to hide the window opened when starting asynchronous
671 child process under Windows, unneeded on other systems.
675 child process under Windows, unneeded on other systems.
672 """
676 """
673 pass
677 pass
674
678
675
679
676 class cachestat(object):
680 class cachestat(object):
677 def __init__(self, path):
681 def __init__(self, path):
678 self.stat = os.stat(path)
682 self.stat = os.stat(path)
679
683
680 def cacheable(self):
684 def cacheable(self):
681 return bool(self.stat.st_ino)
685 return bool(self.stat.st_ino)
682
686
683 __hash__ = object.__hash__
687 __hash__ = object.__hash__
684
688
685 def __eq__(self, other):
689 def __eq__(self, other):
686 try:
690 try:
687 # Only dev, ino, size, mtime and atime are likely to change. Out
691 # Only dev, ino, size, mtime and atime are likely to change. Out
688 # of these, we shouldn't compare atime but should compare the
692 # of these, we shouldn't compare atime but should compare the
689 # rest. However, one of the other fields changing indicates
693 # rest. However, one of the other fields changing indicates
690 # something fishy going on, so return False if anything but atime
694 # something fishy going on, so return False if anything but atime
691 # changes.
695 # changes.
692 return (
696 return (
693 self.stat.st_mode == other.stat.st_mode
697 self.stat.st_mode == other.stat.st_mode
694 and self.stat.st_ino == other.stat.st_ino
698 and self.stat.st_ino == other.stat.st_ino
695 and self.stat.st_dev == other.stat.st_dev
699 and self.stat.st_dev == other.stat.st_dev
696 and self.stat.st_nlink == other.stat.st_nlink
700 and self.stat.st_nlink == other.stat.st_nlink
697 and self.stat.st_uid == other.stat.st_uid
701 and self.stat.st_uid == other.stat.st_uid
698 and self.stat.st_gid == other.stat.st_gid
702 and self.stat.st_gid == other.stat.st_gid
699 and self.stat.st_size == other.stat.st_size
703 and self.stat.st_size == other.stat.st_size
700 and self.stat[stat.ST_MTIME] == other.stat[stat.ST_MTIME]
704 and self.stat[stat.ST_MTIME] == other.stat[stat.ST_MTIME]
701 and self.stat[stat.ST_CTIME] == other.stat[stat.ST_CTIME]
705 and self.stat[stat.ST_CTIME] == other.stat[stat.ST_CTIME]
702 )
706 )
703 except AttributeError:
707 except AttributeError:
704 return False
708 return False
705
709
706 def __ne__(self, other):
710 def __ne__(self, other):
707 return not self == other
711 return not self == other
708
712
709
713
710 def statislink(st):
714 def statislink(st):
711 '''check whether a stat result is a symlink'''
715 '''check whether a stat result is a symlink'''
712 return st and stat.S_ISLNK(st.st_mode)
716 return st and stat.S_ISLNK(st.st_mode)
713
717
714
718
715 def statisexec(st):
719 def statisexec(st):
716 '''check whether a stat result is an executable file'''
720 '''check whether a stat result is an executable file'''
717 return st and (st.st_mode & 0o100 != 0)
721 return st and (st.st_mode & 0o100 != 0)
718
722
719
723
720 def poll(fds):
724 def poll(fds):
721 """block until something happens on any file descriptor
725 """block until something happens on any file descriptor
722
726
723 This is a generic helper that will check for any activity
727 This is a generic helper that will check for any activity
724 (read, write. exception) and return the list of touched files.
728 (read, write. exception) and return the list of touched files.
725
729
726 In unsupported cases, it will raise a NotImplementedError"""
730 In unsupported cases, it will raise a NotImplementedError"""
727 try:
731 try:
728 while True:
732 while True:
729 try:
733 try:
730 res = select.select(fds, fds, fds)
734 res = select.select(fds, fds, fds)
731 break
735 break
732 except select.error as inst:
736 except select.error as inst:
733 if inst.args[0] == errno.EINTR:
737 if inst.args[0] == errno.EINTR:
734 continue
738 continue
735 raise
739 raise
736 except ValueError: # out of range file descriptor
740 except ValueError: # out of range file descriptor
737 raise NotImplementedError()
741 raise NotImplementedError()
738 return sorted(list(set(sum(res, []))))
742 return sorted(list(set(sum(res, []))))
739
743
740
744
741 def readpipe(pipe):
745 def readpipe(pipe):
742 """Read all available data from a pipe."""
746 """Read all available data from a pipe."""
743 # We can't fstat() a pipe because Linux will always report 0.
747 # We can't fstat() a pipe because Linux will always report 0.
744 # So, we set the pipe to non-blocking mode and read everything
748 # So, we set the pipe to non-blocking mode and read everything
745 # that's available.
749 # that's available.
746 flags = fcntl.fcntl(pipe, fcntl.F_GETFL)
750 flags = fcntl.fcntl(pipe, fcntl.F_GETFL)
747 flags |= os.O_NONBLOCK
751 flags |= os.O_NONBLOCK
748 oldflags = fcntl.fcntl(pipe, fcntl.F_SETFL, flags)
752 oldflags = fcntl.fcntl(pipe, fcntl.F_SETFL, flags)
749
753
750 try:
754 try:
751 chunks = []
755 chunks = []
752 while True:
756 while True:
753 try:
757 try:
754 s = pipe.read()
758 s = pipe.read()
755 if not s:
759 if not s:
756 break
760 break
757 chunks.append(s)
761 chunks.append(s)
758 except IOError:
762 except IOError:
759 break
763 break
760
764
761 return b''.join(chunks)
765 return b''.join(chunks)
762 finally:
766 finally:
763 fcntl.fcntl(pipe, fcntl.F_SETFL, oldflags)
767 fcntl.fcntl(pipe, fcntl.F_SETFL, oldflags)
764
768
765
769
766 def bindunixsocket(sock, path):
770 def bindunixsocket(sock, path):
767 """Bind the UNIX domain socket to the specified path"""
771 """Bind the UNIX domain socket to the specified path"""
768 # use relative path instead of full path at bind() if possible, since
772 # use relative path instead of full path at bind() if possible, since
769 # AF_UNIX path has very small length limit (107 chars) on common
773 # AF_UNIX path has very small length limit (107 chars) on common
770 # platforms (see sys/un.h)
774 # platforms (see sys/un.h)
771 dirname, basename = os.path.split(path)
775 dirname, basename = os.path.split(path)
772 bakwdfd = None
776 bakwdfd = None
773
777
774 try:
778 try:
775 if dirname:
779 if dirname:
776 bakwdfd = os.open(b'.', os.O_DIRECTORY)
780 bakwdfd = os.open(b'.', os.O_DIRECTORY)
777 os.chdir(dirname)
781 os.chdir(dirname)
778 sock.bind(basename)
782 sock.bind(basename)
779 if bakwdfd:
783 if bakwdfd:
780 os.fchdir(bakwdfd)
784 os.fchdir(bakwdfd)
781 finally:
785 finally:
782 if bakwdfd:
786 if bakwdfd:
783 os.close(bakwdfd)
787 os.close(bakwdfd)
@@ -1,2230 +1,2229 b''
1 # ui.py - user interface bits for mercurial
1 # ui.py - user interface bits for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import datetime
12 import datetime
13 import errno
13 import errno
14 import getpass
15 import inspect
14 import inspect
16 import os
15 import os
17 import re
16 import re
18 import signal
17 import signal
19 import socket
18 import socket
20 import subprocess
19 import subprocess
21 import sys
20 import sys
22 import traceback
21 import traceback
23
22
24 from .i18n import _
23 from .i18n import _
25 from .node import hex
24 from .node import hex
26 from .pycompat import (
25 from .pycompat import (
27 getattr,
26 getattr,
28 open,
27 open,
29 )
28 )
30
29
31 from . import (
30 from . import (
32 color,
31 color,
33 config,
32 config,
34 configitems,
33 configitems,
35 encoding,
34 encoding,
36 error,
35 error,
37 formatter,
36 formatter,
38 loggingutil,
37 loggingutil,
39 progress,
38 progress,
40 pycompat,
39 pycompat,
41 rcutil,
40 rcutil,
42 scmutil,
41 scmutil,
43 util,
42 util,
44 )
43 )
45 from .utils import (
44 from .utils import (
46 dateutil,
45 dateutil,
47 procutil,
46 procutil,
48 resourceutil,
47 resourceutil,
49 stringutil,
48 stringutil,
50 urlutil,
49 urlutil,
51 )
50 )
52
51
53 urlreq = util.urlreq
52 urlreq = util.urlreq
54
53
55 # for use with str.translate(None, _keepalnum), to keep just alphanumerics
54 # for use with str.translate(None, _keepalnum), to keep just alphanumerics
56 _keepalnum = b''.join(
55 _keepalnum = b''.join(
57 c for c in map(pycompat.bytechr, range(256)) if not c.isalnum()
56 c for c in map(pycompat.bytechr, range(256)) if not c.isalnum()
58 )
57 )
59
58
60 # The config knobs that will be altered (if unset) by ui.tweakdefaults.
59 # The config knobs that will be altered (if unset) by ui.tweakdefaults.
61 tweakrc = b"""
60 tweakrc = b"""
62 [ui]
61 [ui]
63 # The rollback command is dangerous. As a rule, don't use it.
62 # The rollback command is dangerous. As a rule, don't use it.
64 rollback = False
63 rollback = False
65 # Make `hg status` report copy information
64 # Make `hg status` report copy information
66 statuscopies = yes
65 statuscopies = yes
67 # Prefer curses UIs when available. Revert to plain-text with `text`.
66 # Prefer curses UIs when available. Revert to plain-text with `text`.
68 interface = curses
67 interface = curses
69 # Make compatible commands emit cwd-relative paths by default.
68 # Make compatible commands emit cwd-relative paths by default.
70 relative-paths = yes
69 relative-paths = yes
71
70
72 [commands]
71 [commands]
73 # Grep working directory by default.
72 # Grep working directory by default.
74 grep.all-files = True
73 grep.all-files = True
75 # Refuse to perform an `hg update` that would cause a file content merge
74 # Refuse to perform an `hg update` that would cause a file content merge
76 update.check = noconflict
75 update.check = noconflict
77 # Show conflicts information in `hg status`
76 # Show conflicts information in `hg status`
78 status.verbose = True
77 status.verbose = True
79 # Make `hg resolve` with no action (like `-m`) fail instead of re-merging.
78 # Make `hg resolve` with no action (like `-m`) fail instead of re-merging.
80 resolve.explicit-re-merge = True
79 resolve.explicit-re-merge = True
81
80
82 [diff]
81 [diff]
83 git = 1
82 git = 1
84 showfunc = 1
83 showfunc = 1
85 word-diff = 1
84 word-diff = 1
86 """
85 """
87
86
88 samplehgrcs = {
87 samplehgrcs = {
89 b'user': b"""# example user config (see 'hg help config' for more info)
88 b'user': b"""# example user config (see 'hg help config' for more info)
90 [ui]
89 [ui]
91 # name and email, e.g.
90 # name and email, e.g.
92 # username = Jane Doe <jdoe@example.com>
91 # username = Jane Doe <jdoe@example.com>
93 username =
92 username =
94
93
95 # We recommend enabling tweakdefaults to get slight improvements to
94 # We recommend enabling tweakdefaults to get slight improvements to
96 # the UI over time. Make sure to set HGPLAIN in the environment when
95 # the UI over time. Make sure to set HGPLAIN in the environment when
97 # writing scripts!
96 # writing scripts!
98 # tweakdefaults = True
97 # tweakdefaults = True
99
98
100 # uncomment to disable color in command output
99 # uncomment to disable color in command output
101 # (see 'hg help color' for details)
100 # (see 'hg help color' for details)
102 # color = never
101 # color = never
103
102
104 # uncomment to disable command output pagination
103 # uncomment to disable command output pagination
105 # (see 'hg help pager' for details)
104 # (see 'hg help pager' for details)
106 # paginate = never
105 # paginate = never
107
106
108 [extensions]
107 [extensions]
109 # uncomment the lines below to enable some popular extensions
108 # uncomment the lines below to enable some popular extensions
110 # (see 'hg help extensions' for more info)
109 # (see 'hg help extensions' for more info)
111 #
110 #
112 # histedit =
111 # histedit =
113 # rebase =
112 # rebase =
114 # uncommit =
113 # uncommit =
115 """,
114 """,
116 b'cloned': b"""# example repository config (see 'hg help config' for more info)
115 b'cloned': b"""# example repository config (see 'hg help config' for more info)
117 [paths]
116 [paths]
118 default = %s
117 default = %s
119
118
120 # path aliases to other clones of this repo in URLs or filesystem paths
119 # path aliases to other clones of this repo in URLs or filesystem paths
121 # (see 'hg help config.paths' for more info)
120 # (see 'hg help config.paths' for more info)
122 #
121 #
123 # default:pushurl = ssh://jdoe@example.net/hg/jdoes-fork
122 # default:pushurl = ssh://jdoe@example.net/hg/jdoes-fork
124 # my-fork = ssh://jdoe@example.net/hg/jdoes-fork
123 # my-fork = ssh://jdoe@example.net/hg/jdoes-fork
125 # my-clone = /home/jdoe/jdoes-clone
124 # my-clone = /home/jdoe/jdoes-clone
126
125
127 [ui]
126 [ui]
128 # name and email (local to this repository, optional), e.g.
127 # name and email (local to this repository, optional), e.g.
129 # username = Jane Doe <jdoe@example.com>
128 # username = Jane Doe <jdoe@example.com>
130 """,
129 """,
131 b'local': b"""# example repository config (see 'hg help config' for more info)
130 b'local': b"""# example repository config (see 'hg help config' for more info)
132 [paths]
131 [paths]
133 # path aliases to other clones of this repo in URLs or filesystem paths
132 # path aliases to other clones of this repo in URLs or filesystem paths
134 # (see 'hg help config.paths' for more info)
133 # (see 'hg help config.paths' for more info)
135 #
134 #
136 # default = http://example.com/hg/example-repo
135 # default = http://example.com/hg/example-repo
137 # default:pushurl = ssh://jdoe@example.net/hg/jdoes-fork
136 # default:pushurl = ssh://jdoe@example.net/hg/jdoes-fork
138 # my-fork = ssh://jdoe@example.net/hg/jdoes-fork
137 # my-fork = ssh://jdoe@example.net/hg/jdoes-fork
139 # my-clone = /home/jdoe/jdoes-clone
138 # my-clone = /home/jdoe/jdoes-clone
140
139
141 [ui]
140 [ui]
142 # name and email (local to this repository, optional), e.g.
141 # name and email (local to this repository, optional), e.g.
143 # username = Jane Doe <jdoe@example.com>
142 # username = Jane Doe <jdoe@example.com>
144 """,
143 """,
145 b'global': b"""# example system-wide hg config (see 'hg help config' for more info)
144 b'global': b"""# example system-wide hg config (see 'hg help config' for more info)
146
145
147 [ui]
146 [ui]
148 # uncomment to disable color in command output
147 # uncomment to disable color in command output
149 # (see 'hg help color' for details)
148 # (see 'hg help color' for details)
150 # color = never
149 # color = never
151
150
152 # uncomment to disable command output pagination
151 # uncomment to disable command output pagination
153 # (see 'hg help pager' for details)
152 # (see 'hg help pager' for details)
154 # paginate = never
153 # paginate = never
155
154
156 [extensions]
155 [extensions]
157 # uncomment the lines below to enable some popular extensions
156 # uncomment the lines below to enable some popular extensions
158 # (see 'hg help extensions' for more info)
157 # (see 'hg help extensions' for more info)
159 #
158 #
160 # blackbox =
159 # blackbox =
161 # churn =
160 # churn =
162 """,
161 """,
163 }
162 }
164
163
165
164
166 def _maybestrurl(maybebytes):
165 def _maybestrurl(maybebytes):
167 return pycompat.rapply(pycompat.strurl, maybebytes)
166 return pycompat.rapply(pycompat.strurl, maybebytes)
168
167
169
168
170 def _maybebytesurl(maybestr):
169 def _maybebytesurl(maybestr):
171 return pycompat.rapply(pycompat.bytesurl, maybestr)
170 return pycompat.rapply(pycompat.bytesurl, maybestr)
172
171
173
172
174 class httppasswordmgrdbproxy(object):
173 class httppasswordmgrdbproxy(object):
175 """Delays loading urllib2 until it's needed."""
174 """Delays loading urllib2 until it's needed."""
176
175
177 def __init__(self):
176 def __init__(self):
178 self._mgr = None
177 self._mgr = None
179
178
180 def _get_mgr(self):
179 def _get_mgr(self):
181 if self._mgr is None:
180 if self._mgr is None:
182 self._mgr = urlreq.httppasswordmgrwithdefaultrealm()
181 self._mgr = urlreq.httppasswordmgrwithdefaultrealm()
183 return self._mgr
182 return self._mgr
184
183
185 def add_password(self, realm, uris, user, passwd):
184 def add_password(self, realm, uris, user, passwd):
186 return self._get_mgr().add_password(
185 return self._get_mgr().add_password(
187 _maybestrurl(realm),
186 _maybestrurl(realm),
188 _maybestrurl(uris),
187 _maybestrurl(uris),
189 _maybestrurl(user),
188 _maybestrurl(user),
190 _maybestrurl(passwd),
189 _maybestrurl(passwd),
191 )
190 )
192
191
193 def find_user_password(self, realm, uri):
192 def find_user_password(self, realm, uri):
194 mgr = self._get_mgr()
193 mgr = self._get_mgr()
195 return _maybebytesurl(
194 return _maybebytesurl(
196 mgr.find_user_password(_maybestrurl(realm), _maybestrurl(uri))
195 mgr.find_user_password(_maybestrurl(realm), _maybestrurl(uri))
197 )
196 )
198
197
199
198
200 def _catchterm(*args):
199 def _catchterm(*args):
201 raise error.SignalInterrupt
200 raise error.SignalInterrupt
202
201
203
202
204 # unique object used to detect no default value has been provided when
203 # unique object used to detect no default value has been provided when
205 # retrieving configuration value.
204 # retrieving configuration value.
206 _unset = object()
205 _unset = object()
207
206
208 # _reqexithandlers: callbacks run at the end of a request
207 # _reqexithandlers: callbacks run at the end of a request
209 _reqexithandlers = []
208 _reqexithandlers = []
210
209
211
210
212 class ui(object):
211 class ui(object):
213 def __init__(self, src=None):
212 def __init__(self, src=None):
214 """Create a fresh new ui object if no src given
213 """Create a fresh new ui object if no src given
215
214
216 Use uimod.ui.load() to create a ui which knows global and user configs.
215 Use uimod.ui.load() to create a ui which knows global and user configs.
217 In most cases, you should use ui.copy() to create a copy of an existing
216 In most cases, you should use ui.copy() to create a copy of an existing
218 ui object.
217 ui object.
219 """
218 """
220 # _buffers: used for temporary capture of output
219 # _buffers: used for temporary capture of output
221 self._buffers = []
220 self._buffers = []
222 # 3-tuple describing how each buffer in the stack behaves.
221 # 3-tuple describing how each buffer in the stack behaves.
223 # Values are (capture stderr, capture subprocesses, apply labels).
222 # Values are (capture stderr, capture subprocesses, apply labels).
224 self._bufferstates = []
223 self._bufferstates = []
225 # When a buffer is active, defines whether we are expanding labels.
224 # When a buffer is active, defines whether we are expanding labels.
226 # This exists to prevent an extra list lookup.
225 # This exists to prevent an extra list lookup.
227 self._bufferapplylabels = None
226 self._bufferapplylabels = None
228 self.quiet = self.verbose = self.debugflag = self.tracebackflag = False
227 self.quiet = self.verbose = self.debugflag = self.tracebackflag = False
229 self._reportuntrusted = True
228 self._reportuntrusted = True
230 self._knownconfig = configitems.coreitems
229 self._knownconfig = configitems.coreitems
231 self._ocfg = config.config() # overlay
230 self._ocfg = config.config() # overlay
232 self._tcfg = config.config() # trusted
231 self._tcfg = config.config() # trusted
233 self._ucfg = config.config() # untrusted
232 self._ucfg = config.config() # untrusted
234 self._trustusers = set()
233 self._trustusers = set()
235 self._trustgroups = set()
234 self._trustgroups = set()
236 self.callhooks = True
235 self.callhooks = True
237 # Insecure server connections requested.
236 # Insecure server connections requested.
238 self.insecureconnections = False
237 self.insecureconnections = False
239 # Blocked time
238 # Blocked time
240 self.logblockedtimes = False
239 self.logblockedtimes = False
241 # color mode: see mercurial/color.py for possible value
240 # color mode: see mercurial/color.py for possible value
242 self._colormode = None
241 self._colormode = None
243 self._terminfoparams = {}
242 self._terminfoparams = {}
244 self._styles = {}
243 self._styles = {}
245 self._uninterruptible = False
244 self._uninterruptible = False
246 self.showtimestamp = False
245 self.showtimestamp = False
247
246
248 if src:
247 if src:
249 self._fout = src._fout
248 self._fout = src._fout
250 self._ferr = src._ferr
249 self._ferr = src._ferr
251 self._fin = src._fin
250 self._fin = src._fin
252 self._fmsg = src._fmsg
251 self._fmsg = src._fmsg
253 self._fmsgout = src._fmsgout
252 self._fmsgout = src._fmsgout
254 self._fmsgerr = src._fmsgerr
253 self._fmsgerr = src._fmsgerr
255 self._finoutredirected = src._finoutredirected
254 self._finoutredirected = src._finoutredirected
256 self._loggers = src._loggers.copy()
255 self._loggers = src._loggers.copy()
257 self.pageractive = src.pageractive
256 self.pageractive = src.pageractive
258 self._disablepager = src._disablepager
257 self._disablepager = src._disablepager
259 self._tweaked = src._tweaked
258 self._tweaked = src._tweaked
260
259
261 self._tcfg = src._tcfg.copy()
260 self._tcfg = src._tcfg.copy()
262 self._ucfg = src._ucfg.copy()
261 self._ucfg = src._ucfg.copy()
263 self._ocfg = src._ocfg.copy()
262 self._ocfg = src._ocfg.copy()
264 self._trustusers = src._trustusers.copy()
263 self._trustusers = src._trustusers.copy()
265 self._trustgroups = src._trustgroups.copy()
264 self._trustgroups = src._trustgroups.copy()
266 self.environ = src.environ
265 self.environ = src.environ
267 self.callhooks = src.callhooks
266 self.callhooks = src.callhooks
268 self.insecureconnections = src.insecureconnections
267 self.insecureconnections = src.insecureconnections
269 self._colormode = src._colormode
268 self._colormode = src._colormode
270 self._terminfoparams = src._terminfoparams.copy()
269 self._terminfoparams = src._terminfoparams.copy()
271 self._styles = src._styles.copy()
270 self._styles = src._styles.copy()
272
271
273 self.fixconfig()
272 self.fixconfig()
274
273
275 self.httppasswordmgrdb = src.httppasswordmgrdb
274 self.httppasswordmgrdb = src.httppasswordmgrdb
276 self._blockedtimes = src._blockedtimes
275 self._blockedtimes = src._blockedtimes
277 else:
276 else:
278 self._fout = procutil.stdout
277 self._fout = procutil.stdout
279 self._ferr = procutil.stderr
278 self._ferr = procutil.stderr
280 self._fin = procutil.stdin
279 self._fin = procutil.stdin
281 self._fmsg = None
280 self._fmsg = None
282 self._fmsgout = self.fout # configurable
281 self._fmsgout = self.fout # configurable
283 self._fmsgerr = self.ferr # configurable
282 self._fmsgerr = self.ferr # configurable
284 self._finoutredirected = False
283 self._finoutredirected = False
285 self._loggers = {}
284 self._loggers = {}
286 self.pageractive = False
285 self.pageractive = False
287 self._disablepager = False
286 self._disablepager = False
288 self._tweaked = False
287 self._tweaked = False
289
288
290 # shared read-only environment
289 # shared read-only environment
291 self.environ = encoding.environ
290 self.environ = encoding.environ
292
291
293 self.httppasswordmgrdb = httppasswordmgrdbproxy()
292 self.httppasswordmgrdb = httppasswordmgrdbproxy()
294 self._blockedtimes = collections.defaultdict(int)
293 self._blockedtimes = collections.defaultdict(int)
295
294
296 allowed = self.configlist(b'experimental', b'exportableenviron')
295 allowed = self.configlist(b'experimental', b'exportableenviron')
297 if b'*' in allowed:
296 if b'*' in allowed:
298 self._exportableenviron = self.environ
297 self._exportableenviron = self.environ
299 else:
298 else:
300 self._exportableenviron = {}
299 self._exportableenviron = {}
301 for k in allowed:
300 for k in allowed:
302 if k in self.environ:
301 if k in self.environ:
303 self._exportableenviron[k] = self.environ[k]
302 self._exportableenviron[k] = self.environ[k]
304
303
305 def _new_source(self):
304 def _new_source(self):
306 self._ocfg.new_source()
305 self._ocfg.new_source()
307 self._tcfg.new_source()
306 self._tcfg.new_source()
308 self._ucfg.new_source()
307 self._ucfg.new_source()
309
308
310 @classmethod
309 @classmethod
311 def load(cls):
310 def load(cls):
312 """Create a ui and load global and user configs"""
311 """Create a ui and load global and user configs"""
313 u = cls()
312 u = cls()
314 # we always trust global config files and environment variables
313 # we always trust global config files and environment variables
315 for t, f in rcutil.rccomponents():
314 for t, f in rcutil.rccomponents():
316 if t == b'path':
315 if t == b'path':
317 u.readconfig(f, trust=True)
316 u.readconfig(f, trust=True)
318 elif t == b'resource':
317 elif t == b'resource':
319 u.read_resource_config(f, trust=True)
318 u.read_resource_config(f, trust=True)
320 elif t == b'items':
319 elif t == b'items':
321 u._new_source()
320 u._new_source()
322 sections = set()
321 sections = set()
323 for section, name, value, source in f:
322 for section, name, value, source in f:
324 # do not set u._ocfg
323 # do not set u._ocfg
325 # XXX clean this up once immutable config object is a thing
324 # XXX clean this up once immutable config object is a thing
326 u._tcfg.set(section, name, value, source)
325 u._tcfg.set(section, name, value, source)
327 u._ucfg.set(section, name, value, source)
326 u._ucfg.set(section, name, value, source)
328 sections.add(section)
327 sections.add(section)
329 for section in sections:
328 for section in sections:
330 u.fixconfig(section=section)
329 u.fixconfig(section=section)
331 else:
330 else:
332 raise error.ProgrammingError(b'unknown rctype: %s' % t)
331 raise error.ProgrammingError(b'unknown rctype: %s' % t)
333 u._maybetweakdefaults()
332 u._maybetweakdefaults()
334 u._new_source() # anything after that is a different level
333 u._new_source() # anything after that is a different level
335 return u
334 return u
336
335
337 def _maybetweakdefaults(self):
336 def _maybetweakdefaults(self):
338 if not self.configbool(b'ui', b'tweakdefaults'):
337 if not self.configbool(b'ui', b'tweakdefaults'):
339 return
338 return
340 if self._tweaked or self.plain(b'tweakdefaults'):
339 if self._tweaked or self.plain(b'tweakdefaults'):
341 return
340 return
342
341
343 # Note: it is SUPER IMPORTANT that you set self._tweaked to
342 # Note: it is SUPER IMPORTANT that you set self._tweaked to
344 # True *before* any calls to setconfig(), otherwise you'll get
343 # True *before* any calls to setconfig(), otherwise you'll get
345 # infinite recursion between setconfig and this method.
344 # infinite recursion between setconfig and this method.
346 #
345 #
347 # TODO: We should extract an inner method in setconfig() to
346 # TODO: We should extract an inner method in setconfig() to
348 # avoid this weirdness.
347 # avoid this weirdness.
349 self._tweaked = True
348 self._tweaked = True
350 tmpcfg = config.config()
349 tmpcfg = config.config()
351 tmpcfg.parse(b'<tweakdefaults>', tweakrc)
350 tmpcfg.parse(b'<tweakdefaults>', tweakrc)
352 for section in tmpcfg:
351 for section in tmpcfg:
353 for name, value in tmpcfg.items(section):
352 for name, value in tmpcfg.items(section):
354 if not self.hasconfig(section, name):
353 if not self.hasconfig(section, name):
355 self.setconfig(section, name, value, b"<tweakdefaults>")
354 self.setconfig(section, name, value, b"<tweakdefaults>")
356
355
357 def copy(self):
356 def copy(self):
358 return self.__class__(self)
357 return self.__class__(self)
359
358
360 def resetstate(self):
359 def resetstate(self):
361 """Clear internal state that shouldn't persist across commands"""
360 """Clear internal state that shouldn't persist across commands"""
362 if self._progbar:
361 if self._progbar:
363 self._progbar.resetstate() # reset last-print time of progress bar
362 self._progbar.resetstate() # reset last-print time of progress bar
364 self.httppasswordmgrdb = httppasswordmgrdbproxy()
363 self.httppasswordmgrdb = httppasswordmgrdbproxy()
365
364
366 @contextlib.contextmanager
365 @contextlib.contextmanager
367 def timeblockedsection(self, key):
366 def timeblockedsection(self, key):
368 # this is open-coded below - search for timeblockedsection to find them
367 # this is open-coded below - search for timeblockedsection to find them
369 starttime = util.timer()
368 starttime = util.timer()
370 try:
369 try:
371 yield
370 yield
372 finally:
371 finally:
373 self._blockedtimes[key + b'_blocked'] += (
372 self._blockedtimes[key + b'_blocked'] += (
374 util.timer() - starttime
373 util.timer() - starttime
375 ) * 1000
374 ) * 1000
376
375
377 @contextlib.contextmanager
376 @contextlib.contextmanager
378 def uninterruptible(self):
377 def uninterruptible(self):
379 """Mark an operation as unsafe.
378 """Mark an operation as unsafe.
380
379
381 Most operations on a repository are safe to interrupt, but a
380 Most operations on a repository are safe to interrupt, but a
382 few are risky (for example repair.strip). This context manager
381 few are risky (for example repair.strip). This context manager
383 lets you advise Mercurial that something risky is happening so
382 lets you advise Mercurial that something risky is happening so
384 that control-C etc can be blocked if desired.
383 that control-C etc can be blocked if desired.
385 """
384 """
386 enabled = self.configbool(b'experimental', b'nointerrupt')
385 enabled = self.configbool(b'experimental', b'nointerrupt')
387 if enabled and self.configbool(
386 if enabled and self.configbool(
388 b'experimental', b'nointerrupt-interactiveonly'
387 b'experimental', b'nointerrupt-interactiveonly'
389 ):
388 ):
390 enabled = self.interactive()
389 enabled = self.interactive()
391 if self._uninterruptible or not enabled:
390 if self._uninterruptible or not enabled:
392 # if nointerrupt support is turned off, the process isn't
391 # if nointerrupt support is turned off, the process isn't
393 # interactive, or we're already in an uninterruptible
392 # interactive, or we're already in an uninterruptible
394 # block, do nothing.
393 # block, do nothing.
395 yield
394 yield
396 return
395 return
397
396
398 def warn():
397 def warn():
399 self.warn(_(b"shutting down cleanly\n"))
398 self.warn(_(b"shutting down cleanly\n"))
400 self.warn(
399 self.warn(
401 _(b"press ^C again to terminate immediately (dangerous)\n")
400 _(b"press ^C again to terminate immediately (dangerous)\n")
402 )
401 )
403 return True
402 return True
404
403
405 with procutil.uninterruptible(warn):
404 with procutil.uninterruptible(warn):
406 try:
405 try:
407 self._uninterruptible = True
406 self._uninterruptible = True
408 yield
407 yield
409 finally:
408 finally:
410 self._uninterruptible = False
409 self._uninterruptible = False
411
410
412 def formatter(self, topic, opts):
411 def formatter(self, topic, opts):
413 return formatter.formatter(self, self, topic, opts)
412 return formatter.formatter(self, self, topic, opts)
414
413
415 def _trusted(self, fp, f):
414 def _trusted(self, fp, f):
416 st = util.fstat(fp)
415 st = util.fstat(fp)
417 if util.isowner(st):
416 if util.isowner(st):
418 return True
417 return True
419
418
420 tusers, tgroups = self._trustusers, self._trustgroups
419 tusers, tgroups = self._trustusers, self._trustgroups
421 if b'*' in tusers or b'*' in tgroups:
420 if b'*' in tusers or b'*' in tgroups:
422 return True
421 return True
423
422
424 user = util.username(st.st_uid)
423 user = util.username(st.st_uid)
425 group = util.groupname(st.st_gid)
424 group = util.groupname(st.st_gid)
426 if user in tusers or group in tgroups or user == util.username():
425 if user in tusers or group in tgroups or user == util.username():
427 return True
426 return True
428
427
429 if self._reportuntrusted:
428 if self._reportuntrusted:
430 self.warn(
429 self.warn(
431 _(
430 _(
432 b'not trusting file %s from untrusted '
431 b'not trusting file %s from untrusted '
433 b'user %s, group %s\n'
432 b'user %s, group %s\n'
434 )
433 )
435 % (f, user, group)
434 % (f, user, group)
436 )
435 )
437 return False
436 return False
438
437
439 def read_resource_config(
438 def read_resource_config(
440 self, name, root=None, trust=False, sections=None, remap=None
439 self, name, root=None, trust=False, sections=None, remap=None
441 ):
440 ):
442 try:
441 try:
443 fp = resourceutil.open_resource(name[0], name[1])
442 fp = resourceutil.open_resource(name[0], name[1])
444 except IOError:
443 except IOError:
445 if not sections: # ignore unless we were looking for something
444 if not sections: # ignore unless we were looking for something
446 return
445 return
447 raise
446 raise
448
447
449 self._readconfig(
448 self._readconfig(
450 b'resource:%s.%s' % name, fp, root, trust, sections, remap
449 b'resource:%s.%s' % name, fp, root, trust, sections, remap
451 )
450 )
452
451
453 def readconfig(
452 def readconfig(
454 self, filename, root=None, trust=False, sections=None, remap=None
453 self, filename, root=None, trust=False, sections=None, remap=None
455 ):
454 ):
456 try:
455 try:
457 fp = open(filename, 'rb')
456 fp = open(filename, 'rb')
458 except IOError:
457 except IOError:
459 if not sections: # ignore unless we were looking for something
458 if not sections: # ignore unless we were looking for something
460 return
459 return
461 raise
460 raise
462
461
463 self._readconfig(filename, fp, root, trust, sections, remap)
462 self._readconfig(filename, fp, root, trust, sections, remap)
464
463
465 def _readconfig(
464 def _readconfig(
466 self, filename, fp, root=None, trust=False, sections=None, remap=None
465 self, filename, fp, root=None, trust=False, sections=None, remap=None
467 ):
466 ):
468 with fp:
467 with fp:
469 cfg = config.config()
468 cfg = config.config()
470 trusted = sections or trust or self._trusted(fp, filename)
469 trusted = sections or trust or self._trusted(fp, filename)
471
470
472 try:
471 try:
473 cfg.read(filename, fp, sections=sections, remap=remap)
472 cfg.read(filename, fp, sections=sections, remap=remap)
474 except error.ConfigError as inst:
473 except error.ConfigError as inst:
475 if trusted:
474 if trusted:
476 raise
475 raise
477 self.warn(
476 self.warn(
478 _(b'ignored %s: %s\n') % (inst.location, inst.message)
477 _(b'ignored %s: %s\n') % (inst.location, inst.message)
479 )
478 )
480
479
481 self._applyconfig(cfg, trusted, root)
480 self._applyconfig(cfg, trusted, root)
482
481
483 def applyconfig(self, configitems, source=b"", root=None):
482 def applyconfig(self, configitems, source=b"", root=None):
484 """Add configitems from a non-file source. Unlike with ``setconfig()``,
483 """Add configitems from a non-file source. Unlike with ``setconfig()``,
485 they can be overridden by subsequent config file reads. The items are
484 they can be overridden by subsequent config file reads. The items are
486 in the same format as ``configoverride()``, namely a dict of the
485 in the same format as ``configoverride()``, namely a dict of the
487 following structures: {(section, name) : value}
486 following structures: {(section, name) : value}
488
487
489 Typically this is used by extensions that inject themselves into the
488 Typically this is used by extensions that inject themselves into the
490 config file load procedure by monkeypatching ``localrepo.loadhgrc()``.
489 config file load procedure by monkeypatching ``localrepo.loadhgrc()``.
491 """
490 """
492 cfg = config.config()
491 cfg = config.config()
493
492
494 for (section, name), value in configitems.items():
493 for (section, name), value in configitems.items():
495 cfg.set(section, name, value, source)
494 cfg.set(section, name, value, source)
496
495
497 self._applyconfig(cfg, True, root)
496 self._applyconfig(cfg, True, root)
498
497
499 def _applyconfig(self, cfg, trusted, root):
498 def _applyconfig(self, cfg, trusted, root):
500 if self.plain():
499 if self.plain():
501 for k in (
500 for k in (
502 b'debug',
501 b'debug',
503 b'fallbackencoding',
502 b'fallbackencoding',
504 b'quiet',
503 b'quiet',
505 b'slash',
504 b'slash',
506 b'logtemplate',
505 b'logtemplate',
507 b'message-output',
506 b'message-output',
508 b'statuscopies',
507 b'statuscopies',
509 b'style',
508 b'style',
510 b'traceback',
509 b'traceback',
511 b'verbose',
510 b'verbose',
512 ):
511 ):
513 if k in cfg[b'ui']:
512 if k in cfg[b'ui']:
514 del cfg[b'ui'][k]
513 del cfg[b'ui'][k]
515 for k, v in cfg.items(b'defaults'):
514 for k, v in cfg.items(b'defaults'):
516 del cfg[b'defaults'][k]
515 del cfg[b'defaults'][k]
517 for k, v in cfg.items(b'commands'):
516 for k, v in cfg.items(b'commands'):
518 del cfg[b'commands'][k]
517 del cfg[b'commands'][k]
519 for k, v in cfg.items(b'command-templates'):
518 for k, v in cfg.items(b'command-templates'):
520 del cfg[b'command-templates'][k]
519 del cfg[b'command-templates'][k]
521 # Don't remove aliases from the configuration if in the exceptionlist
520 # Don't remove aliases from the configuration if in the exceptionlist
522 if self.plain(b'alias'):
521 if self.plain(b'alias'):
523 for k, v in cfg.items(b'alias'):
522 for k, v in cfg.items(b'alias'):
524 del cfg[b'alias'][k]
523 del cfg[b'alias'][k]
525 if self.plain(b'revsetalias'):
524 if self.plain(b'revsetalias'):
526 for k, v in cfg.items(b'revsetalias'):
525 for k, v in cfg.items(b'revsetalias'):
527 del cfg[b'revsetalias'][k]
526 del cfg[b'revsetalias'][k]
528 if self.plain(b'templatealias'):
527 if self.plain(b'templatealias'):
529 for k, v in cfg.items(b'templatealias'):
528 for k, v in cfg.items(b'templatealias'):
530 del cfg[b'templatealias'][k]
529 del cfg[b'templatealias'][k]
531
530
532 if trusted:
531 if trusted:
533 self._tcfg.update(cfg)
532 self._tcfg.update(cfg)
534 self._tcfg.update(self._ocfg)
533 self._tcfg.update(self._ocfg)
535 self._ucfg.update(cfg)
534 self._ucfg.update(cfg)
536 self._ucfg.update(self._ocfg)
535 self._ucfg.update(self._ocfg)
537
536
538 if root is None:
537 if root is None:
539 root = os.path.expanduser(b'~')
538 root = os.path.expanduser(b'~')
540 self.fixconfig(root=root)
539 self.fixconfig(root=root)
541
540
542 def fixconfig(self, root=None, section=None):
541 def fixconfig(self, root=None, section=None):
543 if section in (None, b'paths'):
542 if section in (None, b'paths'):
544 # expand vars and ~
543 # expand vars and ~
545 # translate paths relative to root (or home) into absolute paths
544 # translate paths relative to root (or home) into absolute paths
546 root = root or encoding.getcwd()
545 root = root or encoding.getcwd()
547 for c in self._tcfg, self._ucfg, self._ocfg:
546 for c in self._tcfg, self._ucfg, self._ocfg:
548 for n, p in c.items(b'paths'):
547 for n, p in c.items(b'paths'):
549 # Ignore sub-options.
548 # Ignore sub-options.
550 if b':' in n:
549 if b':' in n:
551 continue
550 continue
552 if not p:
551 if not p:
553 continue
552 continue
554 if b'%%' in p:
553 if b'%%' in p:
555 s = self.configsource(b'paths', n) or b'none'
554 s = self.configsource(b'paths', n) or b'none'
556 self.warn(
555 self.warn(
557 _(b"(deprecated '%%' in path %s=%s from %s)\n")
556 _(b"(deprecated '%%' in path %s=%s from %s)\n")
558 % (n, p, s)
557 % (n, p, s)
559 )
558 )
560 p = p.replace(b'%%', b'%')
559 p = p.replace(b'%%', b'%')
561 p = util.expandpath(p)
560 p = util.expandpath(p)
562 if not urlutil.hasscheme(p) and not os.path.isabs(p):
561 if not urlutil.hasscheme(p) and not os.path.isabs(p):
563 p = os.path.normpath(os.path.join(root, p))
562 p = os.path.normpath(os.path.join(root, p))
564 c.alter(b"paths", n, p)
563 c.alter(b"paths", n, p)
565
564
566 if section in (None, b'ui'):
565 if section in (None, b'ui'):
567 # update ui options
566 # update ui options
568 self._fmsgout, self._fmsgerr = _selectmsgdests(self)
567 self._fmsgout, self._fmsgerr = _selectmsgdests(self)
569 self.debugflag = self.configbool(b'ui', b'debug')
568 self.debugflag = self.configbool(b'ui', b'debug')
570 self.verbose = self.debugflag or self.configbool(b'ui', b'verbose')
569 self.verbose = self.debugflag or self.configbool(b'ui', b'verbose')
571 self.quiet = not self.debugflag and self.configbool(b'ui', b'quiet')
570 self.quiet = not self.debugflag and self.configbool(b'ui', b'quiet')
572 if self.verbose and self.quiet:
571 if self.verbose and self.quiet:
573 self.quiet = self.verbose = False
572 self.quiet = self.verbose = False
574 self._reportuntrusted = self.debugflag or self.configbool(
573 self._reportuntrusted = self.debugflag or self.configbool(
575 b"ui", b"report_untrusted"
574 b"ui", b"report_untrusted"
576 )
575 )
577 self.showtimestamp = self.configbool(b'ui', b'timestamp-output')
576 self.showtimestamp = self.configbool(b'ui', b'timestamp-output')
578 self.tracebackflag = self.configbool(b'ui', b'traceback')
577 self.tracebackflag = self.configbool(b'ui', b'traceback')
579 self.logblockedtimes = self.configbool(b'ui', b'logblockedtimes')
578 self.logblockedtimes = self.configbool(b'ui', b'logblockedtimes')
580
579
581 if section in (None, b'trusted'):
580 if section in (None, b'trusted'):
582 # update trust information
581 # update trust information
583 self._trustusers.update(self.configlist(b'trusted', b'users'))
582 self._trustusers.update(self.configlist(b'trusted', b'users'))
584 self._trustgroups.update(self.configlist(b'trusted', b'groups'))
583 self._trustgroups.update(self.configlist(b'trusted', b'groups'))
585
584
586 if section in (None, b'devel', b'ui') and self.debugflag:
585 if section in (None, b'devel', b'ui') and self.debugflag:
587 tracked = set()
586 tracked = set()
588 if self.configbool(b'devel', b'debug.extensions'):
587 if self.configbool(b'devel', b'debug.extensions'):
589 tracked.add(b'extension')
588 tracked.add(b'extension')
590 if tracked:
589 if tracked:
591 logger = loggingutil.fileobjectlogger(self._ferr, tracked)
590 logger = loggingutil.fileobjectlogger(self._ferr, tracked)
592 self.setlogger(b'debug', logger)
591 self.setlogger(b'debug', logger)
593
592
594 def backupconfig(self, section, item):
593 def backupconfig(self, section, item):
595 return (
594 return (
596 self._ocfg.backup(section, item),
595 self._ocfg.backup(section, item),
597 self._tcfg.backup(section, item),
596 self._tcfg.backup(section, item),
598 self._ucfg.backup(section, item),
597 self._ucfg.backup(section, item),
599 )
598 )
600
599
601 def restoreconfig(self, data):
600 def restoreconfig(self, data):
602 self._ocfg.restore(data[0])
601 self._ocfg.restore(data[0])
603 self._tcfg.restore(data[1])
602 self._tcfg.restore(data[1])
604 self._ucfg.restore(data[2])
603 self._ucfg.restore(data[2])
605
604
606 def setconfig(self, section, name, value, source=b''):
605 def setconfig(self, section, name, value, source=b''):
607 for cfg in (self._ocfg, self._tcfg, self._ucfg):
606 for cfg in (self._ocfg, self._tcfg, self._ucfg):
608 cfg.set(section, name, value, source)
607 cfg.set(section, name, value, source)
609 self.fixconfig(section=section)
608 self.fixconfig(section=section)
610 self._maybetweakdefaults()
609 self._maybetweakdefaults()
611
610
612 def _data(self, untrusted):
611 def _data(self, untrusted):
613 return untrusted and self._ucfg or self._tcfg
612 return untrusted and self._ucfg or self._tcfg
614
613
615 def configsource(self, section, name, untrusted=False):
614 def configsource(self, section, name, untrusted=False):
616 return self._data(untrusted).source(section, name)
615 return self._data(untrusted).source(section, name)
617
616
618 def config(self, section, name, default=_unset, untrusted=False):
617 def config(self, section, name, default=_unset, untrusted=False):
619 """return the plain string version of a config"""
618 """return the plain string version of a config"""
620 value = self._config(
619 value = self._config(
621 section, name, default=default, untrusted=untrusted
620 section, name, default=default, untrusted=untrusted
622 )
621 )
623 if value is _unset:
622 if value is _unset:
624 return None
623 return None
625 return value
624 return value
626
625
627 def _config(self, section, name, default=_unset, untrusted=False):
626 def _config(self, section, name, default=_unset, untrusted=False):
628 value = itemdefault = default
627 value = itemdefault = default
629 item = self._knownconfig.get(section, {}).get(name)
628 item = self._knownconfig.get(section, {}).get(name)
630 alternates = [(section, name)]
629 alternates = [(section, name)]
631
630
632 if item is not None:
631 if item is not None:
633 alternates.extend(item.alias)
632 alternates.extend(item.alias)
634 if callable(item.default):
633 if callable(item.default):
635 itemdefault = item.default()
634 itemdefault = item.default()
636 else:
635 else:
637 itemdefault = item.default
636 itemdefault = item.default
638 else:
637 else:
639 msg = b"accessing unregistered config item: '%s.%s'"
638 msg = b"accessing unregistered config item: '%s.%s'"
640 msg %= (section, name)
639 msg %= (section, name)
641 self.develwarn(msg, 2, b'warn-config-unknown')
640 self.develwarn(msg, 2, b'warn-config-unknown')
642
641
643 if default is _unset:
642 if default is _unset:
644 if item is None:
643 if item is None:
645 value = default
644 value = default
646 elif item.default is configitems.dynamicdefault:
645 elif item.default is configitems.dynamicdefault:
647 value = None
646 value = None
648 msg = b"config item requires an explicit default value: '%s.%s'"
647 msg = b"config item requires an explicit default value: '%s.%s'"
649 msg %= (section, name)
648 msg %= (section, name)
650 self.develwarn(msg, 2, b'warn-config-default')
649 self.develwarn(msg, 2, b'warn-config-default')
651 else:
650 else:
652 value = itemdefault
651 value = itemdefault
653 elif (
652 elif (
654 item is not None
653 item is not None
655 and item.default is not configitems.dynamicdefault
654 and item.default is not configitems.dynamicdefault
656 and default != itemdefault
655 and default != itemdefault
657 ):
656 ):
658 msg = (
657 msg = (
659 b"specifying a mismatched default value for a registered "
658 b"specifying a mismatched default value for a registered "
660 b"config item: '%s.%s' '%s'"
659 b"config item: '%s.%s' '%s'"
661 )
660 )
662 msg %= (section, name, pycompat.bytestr(default))
661 msg %= (section, name, pycompat.bytestr(default))
663 self.develwarn(msg, 2, b'warn-config-default')
662 self.develwarn(msg, 2, b'warn-config-default')
664
663
665 candidates = []
664 candidates = []
666 config = self._data(untrusted)
665 config = self._data(untrusted)
667 for s, n in alternates:
666 for s, n in alternates:
668 candidate = config.get(s, n, None)
667 candidate = config.get(s, n, None)
669 if candidate is not None:
668 if candidate is not None:
670 candidates.append((s, n, candidate))
669 candidates.append((s, n, candidate))
671 if candidates:
670 if candidates:
672
671
673 def level(x):
672 def level(x):
674 return config.level(x[0], x[1])
673 return config.level(x[0], x[1])
675
674
676 value = max(candidates, key=level)[2]
675 value = max(candidates, key=level)[2]
677
676
678 if self.debugflag and not untrusted and self._reportuntrusted:
677 if self.debugflag and not untrusted and self._reportuntrusted:
679 for s, n in alternates:
678 for s, n in alternates:
680 uvalue = self._ucfg.get(s, n)
679 uvalue = self._ucfg.get(s, n)
681 if uvalue is not None and uvalue != value:
680 if uvalue is not None and uvalue != value:
682 self.debug(
681 self.debug(
683 b"ignoring untrusted configuration option "
682 b"ignoring untrusted configuration option "
684 b"%s.%s = %s\n" % (s, n, uvalue)
683 b"%s.%s = %s\n" % (s, n, uvalue)
685 )
684 )
686 return value
685 return value
687
686
688 def config_default(self, section, name):
687 def config_default(self, section, name):
689 """return the default value for a config option
688 """return the default value for a config option
690
689
691 The default is returned "raw", for example if it is a callable, the
690 The default is returned "raw", for example if it is a callable, the
692 callable was not called.
691 callable was not called.
693 """
692 """
694 item = self._knownconfig.get(section, {}).get(name)
693 item = self._knownconfig.get(section, {}).get(name)
695
694
696 if item is None:
695 if item is None:
697 raise KeyError((section, name))
696 raise KeyError((section, name))
698 return item.default
697 return item.default
699
698
700 def configsuboptions(self, section, name, default=_unset, untrusted=False):
699 def configsuboptions(self, section, name, default=_unset, untrusted=False):
701 """Get a config option and all sub-options.
700 """Get a config option and all sub-options.
702
701
703 Some config options have sub-options that are declared with the
702 Some config options have sub-options that are declared with the
704 format "key:opt = value". This method is used to return the main
703 format "key:opt = value". This method is used to return the main
705 option and all its declared sub-options.
704 option and all its declared sub-options.
706
705
707 Returns a 2-tuple of ``(option, sub-options)``, where `sub-options``
706 Returns a 2-tuple of ``(option, sub-options)``, where `sub-options``
708 is a dict of defined sub-options where keys and values are strings.
707 is a dict of defined sub-options where keys and values are strings.
709 """
708 """
710 main = self.config(section, name, default, untrusted=untrusted)
709 main = self.config(section, name, default, untrusted=untrusted)
711 data = self._data(untrusted)
710 data = self._data(untrusted)
712 sub = {}
711 sub = {}
713 prefix = b'%s:' % name
712 prefix = b'%s:' % name
714 for k, v in data.items(section):
713 for k, v in data.items(section):
715 if k.startswith(prefix):
714 if k.startswith(prefix):
716 sub[k[len(prefix) :]] = v
715 sub[k[len(prefix) :]] = v
717
716
718 if self.debugflag and not untrusted and self._reportuntrusted:
717 if self.debugflag and not untrusted and self._reportuntrusted:
719 for k, v in sub.items():
718 for k, v in sub.items():
720 uvalue = self._ucfg.get(section, b'%s:%s' % (name, k))
719 uvalue = self._ucfg.get(section, b'%s:%s' % (name, k))
721 if uvalue is not None and uvalue != v:
720 if uvalue is not None and uvalue != v:
722 self.debug(
721 self.debug(
723 b'ignoring untrusted configuration option '
722 b'ignoring untrusted configuration option '
724 b'%s:%s.%s = %s\n' % (section, name, k, uvalue)
723 b'%s:%s.%s = %s\n' % (section, name, k, uvalue)
725 )
724 )
726
725
727 return main, sub
726 return main, sub
728
727
729 def configpath(self, section, name, default=_unset, untrusted=False):
728 def configpath(self, section, name, default=_unset, untrusted=False):
730 """get a path config item, expanded relative to repo root or config
729 """get a path config item, expanded relative to repo root or config
731 file"""
730 file"""
732 v = self.config(section, name, default, untrusted)
731 v = self.config(section, name, default, untrusted)
733 if v is None:
732 if v is None:
734 return None
733 return None
735 if not os.path.isabs(v) or b"://" not in v:
734 if not os.path.isabs(v) or b"://" not in v:
736 src = self.configsource(section, name, untrusted)
735 src = self.configsource(section, name, untrusted)
737 if b':' in src:
736 if b':' in src:
738 base = os.path.dirname(src.rsplit(b':')[0])
737 base = os.path.dirname(src.rsplit(b':')[0])
739 v = os.path.join(base, os.path.expanduser(v))
738 v = os.path.join(base, os.path.expanduser(v))
740 return v
739 return v
741
740
742 def configbool(self, section, name, default=_unset, untrusted=False):
741 def configbool(self, section, name, default=_unset, untrusted=False):
743 """parse a configuration element as a boolean
742 """parse a configuration element as a boolean
744
743
745 >>> u = ui(); s = b'foo'
744 >>> u = ui(); s = b'foo'
746 >>> u.setconfig(s, b'true', b'yes')
745 >>> u.setconfig(s, b'true', b'yes')
747 >>> u.configbool(s, b'true')
746 >>> u.configbool(s, b'true')
748 True
747 True
749 >>> u.setconfig(s, b'false', b'no')
748 >>> u.setconfig(s, b'false', b'no')
750 >>> u.configbool(s, b'false')
749 >>> u.configbool(s, b'false')
751 False
750 False
752 >>> u.configbool(s, b'unknown')
751 >>> u.configbool(s, b'unknown')
753 False
752 False
754 >>> u.configbool(s, b'unknown', True)
753 >>> u.configbool(s, b'unknown', True)
755 True
754 True
756 >>> u.setconfig(s, b'invalid', b'somevalue')
755 >>> u.setconfig(s, b'invalid', b'somevalue')
757 >>> u.configbool(s, b'invalid')
756 >>> u.configbool(s, b'invalid')
758 Traceback (most recent call last):
757 Traceback (most recent call last):
759 ...
758 ...
760 ConfigError: foo.invalid is not a boolean ('somevalue')
759 ConfigError: foo.invalid is not a boolean ('somevalue')
761 """
760 """
762
761
763 v = self._config(section, name, default, untrusted=untrusted)
762 v = self._config(section, name, default, untrusted=untrusted)
764 if v is None:
763 if v is None:
765 return v
764 return v
766 if v is _unset:
765 if v is _unset:
767 if default is _unset:
766 if default is _unset:
768 return False
767 return False
769 return default
768 return default
770 if isinstance(v, bool):
769 if isinstance(v, bool):
771 return v
770 return v
772 b = stringutil.parsebool(v)
771 b = stringutil.parsebool(v)
773 if b is None:
772 if b is None:
774 raise error.ConfigError(
773 raise error.ConfigError(
775 _(b"%s.%s is not a boolean ('%s')") % (section, name, v)
774 _(b"%s.%s is not a boolean ('%s')") % (section, name, v)
776 )
775 )
777 return b
776 return b
778
777
779 def configwith(
778 def configwith(
780 self, convert, section, name, default=_unset, desc=None, untrusted=False
779 self, convert, section, name, default=_unset, desc=None, untrusted=False
781 ):
780 ):
782 """parse a configuration element with a conversion function
781 """parse a configuration element with a conversion function
783
782
784 >>> u = ui(); s = b'foo'
783 >>> u = ui(); s = b'foo'
785 >>> u.setconfig(s, b'float1', b'42')
784 >>> u.setconfig(s, b'float1', b'42')
786 >>> u.configwith(float, s, b'float1')
785 >>> u.configwith(float, s, b'float1')
787 42.0
786 42.0
788 >>> u.setconfig(s, b'float2', b'-4.25')
787 >>> u.setconfig(s, b'float2', b'-4.25')
789 >>> u.configwith(float, s, b'float2')
788 >>> u.configwith(float, s, b'float2')
790 -4.25
789 -4.25
791 >>> u.configwith(float, s, b'unknown', 7)
790 >>> u.configwith(float, s, b'unknown', 7)
792 7.0
791 7.0
793 >>> u.setconfig(s, b'invalid', b'somevalue')
792 >>> u.setconfig(s, b'invalid', b'somevalue')
794 >>> u.configwith(float, s, b'invalid')
793 >>> u.configwith(float, s, b'invalid')
795 Traceback (most recent call last):
794 Traceback (most recent call last):
796 ...
795 ...
797 ConfigError: foo.invalid is not a valid float ('somevalue')
796 ConfigError: foo.invalid is not a valid float ('somevalue')
798 >>> u.configwith(float, s, b'invalid', desc=b'womble')
797 >>> u.configwith(float, s, b'invalid', desc=b'womble')
799 Traceback (most recent call last):
798 Traceback (most recent call last):
800 ...
799 ...
801 ConfigError: foo.invalid is not a valid womble ('somevalue')
800 ConfigError: foo.invalid is not a valid womble ('somevalue')
802 """
801 """
803
802
804 v = self.config(section, name, default, untrusted)
803 v = self.config(section, name, default, untrusted)
805 if v is None:
804 if v is None:
806 return v # do not attempt to convert None
805 return v # do not attempt to convert None
807 try:
806 try:
808 return convert(v)
807 return convert(v)
809 except (ValueError, error.ParseError):
808 except (ValueError, error.ParseError):
810 if desc is None:
809 if desc is None:
811 desc = pycompat.sysbytes(convert.__name__)
810 desc = pycompat.sysbytes(convert.__name__)
812 raise error.ConfigError(
811 raise error.ConfigError(
813 _(b"%s.%s is not a valid %s ('%s')") % (section, name, desc, v)
812 _(b"%s.%s is not a valid %s ('%s')") % (section, name, desc, v)
814 )
813 )
815
814
816 def configint(self, section, name, default=_unset, untrusted=False):
815 def configint(self, section, name, default=_unset, untrusted=False):
817 """parse a configuration element as an integer
816 """parse a configuration element as an integer
818
817
819 >>> u = ui(); s = b'foo'
818 >>> u = ui(); s = b'foo'
820 >>> u.setconfig(s, b'int1', b'42')
819 >>> u.setconfig(s, b'int1', b'42')
821 >>> u.configint(s, b'int1')
820 >>> u.configint(s, b'int1')
822 42
821 42
823 >>> u.setconfig(s, b'int2', b'-42')
822 >>> u.setconfig(s, b'int2', b'-42')
824 >>> u.configint(s, b'int2')
823 >>> u.configint(s, b'int2')
825 -42
824 -42
826 >>> u.configint(s, b'unknown', 7)
825 >>> u.configint(s, b'unknown', 7)
827 7
826 7
828 >>> u.setconfig(s, b'invalid', b'somevalue')
827 >>> u.setconfig(s, b'invalid', b'somevalue')
829 >>> u.configint(s, b'invalid')
828 >>> u.configint(s, b'invalid')
830 Traceback (most recent call last):
829 Traceback (most recent call last):
831 ...
830 ...
832 ConfigError: foo.invalid is not a valid integer ('somevalue')
831 ConfigError: foo.invalid is not a valid integer ('somevalue')
833 """
832 """
834
833
835 return self.configwith(
834 return self.configwith(
836 int, section, name, default, b'integer', untrusted
835 int, section, name, default, b'integer', untrusted
837 )
836 )
838
837
839 def configbytes(self, section, name, default=_unset, untrusted=False):
838 def configbytes(self, section, name, default=_unset, untrusted=False):
840 """parse a configuration element as a quantity in bytes
839 """parse a configuration element as a quantity in bytes
841
840
842 Units can be specified as b (bytes), k or kb (kilobytes), m or
841 Units can be specified as b (bytes), k or kb (kilobytes), m or
843 mb (megabytes), g or gb (gigabytes).
842 mb (megabytes), g or gb (gigabytes).
844
843
845 >>> u = ui(); s = b'foo'
844 >>> u = ui(); s = b'foo'
846 >>> u.setconfig(s, b'val1', b'42')
845 >>> u.setconfig(s, b'val1', b'42')
847 >>> u.configbytes(s, b'val1')
846 >>> u.configbytes(s, b'val1')
848 42
847 42
849 >>> u.setconfig(s, b'val2', b'42.5 kb')
848 >>> u.setconfig(s, b'val2', b'42.5 kb')
850 >>> u.configbytes(s, b'val2')
849 >>> u.configbytes(s, b'val2')
851 43520
850 43520
852 >>> u.configbytes(s, b'unknown', b'7 MB')
851 >>> u.configbytes(s, b'unknown', b'7 MB')
853 7340032
852 7340032
854 >>> u.setconfig(s, b'invalid', b'somevalue')
853 >>> u.setconfig(s, b'invalid', b'somevalue')
855 >>> u.configbytes(s, b'invalid')
854 >>> u.configbytes(s, b'invalid')
856 Traceback (most recent call last):
855 Traceback (most recent call last):
857 ...
856 ...
858 ConfigError: foo.invalid is not a byte quantity ('somevalue')
857 ConfigError: foo.invalid is not a byte quantity ('somevalue')
859 """
858 """
860
859
861 value = self._config(section, name, default, untrusted)
860 value = self._config(section, name, default, untrusted)
862 if value is _unset:
861 if value is _unset:
863 if default is _unset:
862 if default is _unset:
864 default = 0
863 default = 0
865 value = default
864 value = default
866 if not isinstance(value, bytes):
865 if not isinstance(value, bytes):
867 return value
866 return value
868 try:
867 try:
869 return util.sizetoint(value)
868 return util.sizetoint(value)
870 except error.ParseError:
869 except error.ParseError:
871 raise error.ConfigError(
870 raise error.ConfigError(
872 _(b"%s.%s is not a byte quantity ('%s')")
871 _(b"%s.%s is not a byte quantity ('%s')")
873 % (section, name, value)
872 % (section, name, value)
874 )
873 )
875
874
876 def configlist(self, section, name, default=_unset, untrusted=False):
875 def configlist(self, section, name, default=_unset, untrusted=False):
877 """parse a configuration element as a list of comma/space separated
876 """parse a configuration element as a list of comma/space separated
878 strings
877 strings
879
878
880 >>> u = ui(); s = b'foo'
879 >>> u = ui(); s = b'foo'
881 >>> u.setconfig(s, b'list1', b'this,is "a small" ,test')
880 >>> u.setconfig(s, b'list1', b'this,is "a small" ,test')
882 >>> u.configlist(s, b'list1')
881 >>> u.configlist(s, b'list1')
883 ['this', 'is', 'a small', 'test']
882 ['this', 'is', 'a small', 'test']
884 >>> u.setconfig(s, b'list2', b'this, is "a small" , test ')
883 >>> u.setconfig(s, b'list2', b'this, is "a small" , test ')
885 >>> u.configlist(s, b'list2')
884 >>> u.configlist(s, b'list2')
886 ['this', 'is', 'a small', 'test']
885 ['this', 'is', 'a small', 'test']
887 """
886 """
888 # default is not always a list
887 # default is not always a list
889 v = self.configwith(
888 v = self.configwith(
890 stringutil.parselist, section, name, default, b'list', untrusted
889 stringutil.parselist, section, name, default, b'list', untrusted
891 )
890 )
892 if isinstance(v, bytes):
891 if isinstance(v, bytes):
893 return stringutil.parselist(v)
892 return stringutil.parselist(v)
894 elif v is None:
893 elif v is None:
895 return []
894 return []
896 return v
895 return v
897
896
898 def configdate(self, section, name, default=_unset, untrusted=False):
897 def configdate(self, section, name, default=_unset, untrusted=False):
899 """parse a configuration element as a tuple of ints
898 """parse a configuration element as a tuple of ints
900
899
901 >>> u = ui(); s = b'foo'
900 >>> u = ui(); s = b'foo'
902 >>> u.setconfig(s, b'date', b'0 0')
901 >>> u.setconfig(s, b'date', b'0 0')
903 >>> u.configdate(s, b'date')
902 >>> u.configdate(s, b'date')
904 (0, 0)
903 (0, 0)
905 """
904 """
906 if self.config(section, name, default, untrusted):
905 if self.config(section, name, default, untrusted):
907 return self.configwith(
906 return self.configwith(
908 dateutil.parsedate, section, name, default, b'date', untrusted
907 dateutil.parsedate, section, name, default, b'date', untrusted
909 )
908 )
910 if default is _unset:
909 if default is _unset:
911 return None
910 return None
912 return default
911 return default
913
912
914 def configdefault(self, section, name):
913 def configdefault(self, section, name):
915 """returns the default value of the config item"""
914 """returns the default value of the config item"""
916 item = self._knownconfig.get(section, {}).get(name)
915 item = self._knownconfig.get(section, {}).get(name)
917 itemdefault = None
916 itemdefault = None
918 if item is not None:
917 if item is not None:
919 if callable(item.default):
918 if callable(item.default):
920 itemdefault = item.default()
919 itemdefault = item.default()
921 else:
920 else:
922 itemdefault = item.default
921 itemdefault = item.default
923 return itemdefault
922 return itemdefault
924
923
925 def hasconfig(self, section, name, untrusted=False):
924 def hasconfig(self, section, name, untrusted=False):
926 return self._data(untrusted).hasitem(section, name)
925 return self._data(untrusted).hasitem(section, name)
927
926
928 def has_section(self, section, untrusted=False):
927 def has_section(self, section, untrusted=False):
929 '''tell whether section exists in config.'''
928 '''tell whether section exists in config.'''
930 return section in self._data(untrusted)
929 return section in self._data(untrusted)
931
930
932 def configitems(self, section, untrusted=False, ignoresub=False):
931 def configitems(self, section, untrusted=False, ignoresub=False):
933 items = self._data(untrusted).items(section)
932 items = self._data(untrusted).items(section)
934 if ignoresub:
933 if ignoresub:
935 items = [i for i in items if b':' not in i[0]]
934 items = [i for i in items if b':' not in i[0]]
936 if self.debugflag and not untrusted and self._reportuntrusted:
935 if self.debugflag and not untrusted and self._reportuntrusted:
937 for k, v in self._ucfg.items(section):
936 for k, v in self._ucfg.items(section):
938 if self._tcfg.get(section, k) != v:
937 if self._tcfg.get(section, k) != v:
939 self.debug(
938 self.debug(
940 b"ignoring untrusted configuration option "
939 b"ignoring untrusted configuration option "
941 b"%s.%s = %s\n" % (section, k, v)
940 b"%s.%s = %s\n" % (section, k, v)
942 )
941 )
943 return items
942 return items
944
943
945 def walkconfig(self, untrusted=False):
944 def walkconfig(self, untrusted=False):
946 cfg = self._data(untrusted)
945 cfg = self._data(untrusted)
947 for section in cfg.sections():
946 for section in cfg.sections():
948 for name, value in self.configitems(section, untrusted):
947 for name, value in self.configitems(section, untrusted):
949 yield section, name, value
948 yield section, name, value
950
949
951 def plain(self, feature=None):
950 def plain(self, feature=None):
952 """is plain mode active?
951 """is plain mode active?
953
952
954 Plain mode means that all configuration variables which affect
953 Plain mode means that all configuration variables which affect
955 the behavior and output of Mercurial should be
954 the behavior and output of Mercurial should be
956 ignored. Additionally, the output should be stable,
955 ignored. Additionally, the output should be stable,
957 reproducible and suitable for use in scripts or applications.
956 reproducible and suitable for use in scripts or applications.
958
957
959 The only way to trigger plain mode is by setting either the
958 The only way to trigger plain mode is by setting either the
960 `HGPLAIN' or `HGPLAINEXCEPT' environment variables.
959 `HGPLAIN' or `HGPLAINEXCEPT' environment variables.
961
960
962 The return value can either be
961 The return value can either be
963 - False if HGPLAIN is not set, or feature is in HGPLAINEXCEPT
962 - False if HGPLAIN is not set, or feature is in HGPLAINEXCEPT
964 - False if feature is disabled by default and not included in HGPLAIN
963 - False if feature is disabled by default and not included in HGPLAIN
965 - True otherwise
964 - True otherwise
966 """
965 """
967 if (
966 if (
968 b'HGPLAIN' not in encoding.environ
967 b'HGPLAIN' not in encoding.environ
969 and b'HGPLAINEXCEPT' not in encoding.environ
968 and b'HGPLAINEXCEPT' not in encoding.environ
970 ):
969 ):
971 return False
970 return False
972 exceptions = (
971 exceptions = (
973 encoding.environ.get(b'HGPLAINEXCEPT', b'').strip().split(b',')
972 encoding.environ.get(b'HGPLAINEXCEPT', b'').strip().split(b',')
974 )
973 )
975 # TODO: add support for HGPLAIN=+feature,-feature syntax
974 # TODO: add support for HGPLAIN=+feature,-feature syntax
976 if b'+strictflags' not in encoding.environ.get(b'HGPLAIN', b'').split(
975 if b'+strictflags' not in encoding.environ.get(b'HGPLAIN', b'').split(
977 b','
976 b','
978 ):
977 ):
979 exceptions.append(b'strictflags')
978 exceptions.append(b'strictflags')
980 if feature and exceptions:
979 if feature and exceptions:
981 return feature not in exceptions
980 return feature not in exceptions
982 return True
981 return True
983
982
984 def username(self, acceptempty=False):
983 def username(self, acceptempty=False):
985 """Return default username to be used in commits.
984 """Return default username to be used in commits.
986
985
987 Searched in this order: $HGUSER, [ui] section of hgrcs, $EMAIL
986 Searched in this order: $HGUSER, [ui] section of hgrcs, $EMAIL
988 and stop searching if one of these is set.
987 and stop searching if one of these is set.
989 If not found and acceptempty is True, returns None.
988 If not found and acceptempty is True, returns None.
990 If not found and ui.askusername is True, ask the user, else use
989 If not found and ui.askusername is True, ask the user, else use
991 ($LOGNAME or $USER or $LNAME or $USERNAME) + "@full.hostname".
990 ($LOGNAME or $USER or $LNAME or $USERNAME) + "@full.hostname".
992 If no username could be found, raise an Abort error.
991 If no username could be found, raise an Abort error.
993 """
992 """
994 user = encoding.environ.get(b"HGUSER")
993 user = encoding.environ.get(b"HGUSER")
995 if user is None:
994 if user is None:
996 user = self.config(b"ui", b"username")
995 user = self.config(b"ui", b"username")
997 if user is not None:
996 if user is not None:
998 user = os.path.expandvars(user)
997 user = os.path.expandvars(user)
999 if user is None:
998 if user is None:
1000 user = encoding.environ.get(b"EMAIL")
999 user = encoding.environ.get(b"EMAIL")
1001 if user is None and acceptempty:
1000 if user is None and acceptempty:
1002 return user
1001 return user
1003 if user is None and self.configbool(b"ui", b"askusername"):
1002 if user is None and self.configbool(b"ui", b"askusername"):
1004 user = self.prompt(_(b"enter a commit username:"), default=None)
1003 user = self.prompt(_(b"enter a commit username:"), default=None)
1005 if user is None and not self.interactive():
1004 if user is None and not self.interactive():
1006 try:
1005 try:
1007 user = b'%s@%s' % (
1006 user = b'%s@%s' % (
1008 procutil.getuser(),
1007 procutil.getuser(),
1009 encoding.strtolocal(socket.getfqdn()),
1008 encoding.strtolocal(socket.getfqdn()),
1010 )
1009 )
1011 self.warn(_(b"no username found, using '%s' instead\n") % user)
1010 self.warn(_(b"no username found, using '%s' instead\n") % user)
1012 except KeyError:
1011 except KeyError:
1013 pass
1012 pass
1014 if not user:
1013 if not user:
1015 raise error.Abort(
1014 raise error.Abort(
1016 _(b'no username supplied'),
1015 _(b'no username supplied'),
1017 hint=_(b"use 'hg config --edit' " b'to set your username'),
1016 hint=_(b"use 'hg config --edit' " b'to set your username'),
1018 )
1017 )
1019 if b"\n" in user:
1018 if b"\n" in user:
1020 raise error.Abort(
1019 raise error.Abort(
1021 _(b"username %r contains a newline\n") % pycompat.bytestr(user)
1020 _(b"username %r contains a newline\n") % pycompat.bytestr(user)
1022 )
1021 )
1023 return user
1022 return user
1024
1023
1025 def shortuser(self, user):
1024 def shortuser(self, user):
1026 """Return a short representation of a user name or email address."""
1025 """Return a short representation of a user name or email address."""
1027 if not self.verbose:
1026 if not self.verbose:
1028 user = stringutil.shortuser(user)
1027 user = stringutil.shortuser(user)
1029 return user
1028 return user
1030
1029
1031 def expandpath(self, loc, default=None):
1030 def expandpath(self, loc, default=None):
1032 """Return repository location relative to cwd or from [paths]"""
1031 """Return repository location relative to cwd or from [paths]"""
1033 msg = b'ui.expandpath is deprecated, use `get_*` functions from urlutil'
1032 msg = b'ui.expandpath is deprecated, use `get_*` functions from urlutil'
1034 self.deprecwarn(msg, b'6.0')
1033 self.deprecwarn(msg, b'6.0')
1035 try:
1034 try:
1036 p = self.getpath(loc)
1035 p = self.getpath(loc)
1037 if p:
1036 if p:
1038 return p.rawloc
1037 return p.rawloc
1039 except error.RepoError:
1038 except error.RepoError:
1040 pass
1039 pass
1041
1040
1042 if default:
1041 if default:
1043 try:
1042 try:
1044 p = self.getpath(default)
1043 p = self.getpath(default)
1045 if p:
1044 if p:
1046 return p.rawloc
1045 return p.rawloc
1047 except error.RepoError:
1046 except error.RepoError:
1048 pass
1047 pass
1049
1048
1050 return loc
1049 return loc
1051
1050
1052 @util.propertycache
1051 @util.propertycache
1053 def paths(self):
1052 def paths(self):
1054 return urlutil.paths(self)
1053 return urlutil.paths(self)
1055
1054
1056 def getpath(self, *args, **kwargs):
1055 def getpath(self, *args, **kwargs):
1057 """see paths.getpath for details
1056 """see paths.getpath for details
1058
1057
1059 This method exist as `getpath` need a ui for potential warning message.
1058 This method exist as `getpath` need a ui for potential warning message.
1060 """
1059 """
1061 msg = b'ui.getpath is deprecated, use `get_*` functions from urlutil'
1060 msg = b'ui.getpath is deprecated, use `get_*` functions from urlutil'
1062 self.deprecwarn(msg, '6.0')
1061 self.deprecwarn(msg, '6.0')
1063 return self.paths.getpath(self, *args, **kwargs)
1062 return self.paths.getpath(self, *args, **kwargs)
1064
1063
1065 @property
1064 @property
1066 def fout(self):
1065 def fout(self):
1067 return self._fout
1066 return self._fout
1068
1067
1069 @fout.setter
1068 @fout.setter
1070 def fout(self, f):
1069 def fout(self, f):
1071 self._fout = f
1070 self._fout = f
1072 self._fmsgout, self._fmsgerr = _selectmsgdests(self)
1071 self._fmsgout, self._fmsgerr = _selectmsgdests(self)
1073
1072
1074 @property
1073 @property
1075 def ferr(self):
1074 def ferr(self):
1076 return self._ferr
1075 return self._ferr
1077
1076
1078 @ferr.setter
1077 @ferr.setter
1079 def ferr(self, f):
1078 def ferr(self, f):
1080 self._ferr = f
1079 self._ferr = f
1081 self._fmsgout, self._fmsgerr = _selectmsgdests(self)
1080 self._fmsgout, self._fmsgerr = _selectmsgdests(self)
1082
1081
1083 @property
1082 @property
1084 def fin(self):
1083 def fin(self):
1085 return self._fin
1084 return self._fin
1086
1085
1087 @fin.setter
1086 @fin.setter
1088 def fin(self, f):
1087 def fin(self, f):
1089 self._fin = f
1088 self._fin = f
1090
1089
1091 @property
1090 @property
1092 def fmsg(self):
1091 def fmsg(self):
1093 """Stream dedicated for status/error messages; may be None if
1092 """Stream dedicated for status/error messages; may be None if
1094 fout/ferr are used"""
1093 fout/ferr are used"""
1095 return self._fmsg
1094 return self._fmsg
1096
1095
1097 @fmsg.setter
1096 @fmsg.setter
1098 def fmsg(self, f):
1097 def fmsg(self, f):
1099 self._fmsg = f
1098 self._fmsg = f
1100 self._fmsgout, self._fmsgerr = _selectmsgdests(self)
1099 self._fmsgout, self._fmsgerr = _selectmsgdests(self)
1101
1100
1102 def pushbuffer(self, error=False, subproc=False, labeled=False):
1101 def pushbuffer(self, error=False, subproc=False, labeled=False):
1103 """install a buffer to capture standard output of the ui object
1102 """install a buffer to capture standard output of the ui object
1104
1103
1105 If error is True, the error output will be captured too.
1104 If error is True, the error output will be captured too.
1106
1105
1107 If subproc is True, output from subprocesses (typically hooks) will be
1106 If subproc is True, output from subprocesses (typically hooks) will be
1108 captured too.
1107 captured too.
1109
1108
1110 If labeled is True, any labels associated with buffered
1109 If labeled is True, any labels associated with buffered
1111 output will be handled. By default, this has no effect
1110 output will be handled. By default, this has no effect
1112 on the output returned, but extensions and GUI tools may
1111 on the output returned, but extensions and GUI tools may
1113 handle this argument and returned styled output. If output
1112 handle this argument and returned styled output. If output
1114 is being buffered so it can be captured and parsed or
1113 is being buffered so it can be captured and parsed or
1115 processed, labeled should not be set to True.
1114 processed, labeled should not be set to True.
1116 """
1115 """
1117 self._buffers.append([])
1116 self._buffers.append([])
1118 self._bufferstates.append((error, subproc, labeled))
1117 self._bufferstates.append((error, subproc, labeled))
1119 self._bufferapplylabels = labeled
1118 self._bufferapplylabels = labeled
1120
1119
1121 def popbuffer(self):
1120 def popbuffer(self):
1122 '''pop the last buffer and return the buffered output'''
1121 '''pop the last buffer and return the buffered output'''
1123 self._bufferstates.pop()
1122 self._bufferstates.pop()
1124 if self._bufferstates:
1123 if self._bufferstates:
1125 self._bufferapplylabels = self._bufferstates[-1][2]
1124 self._bufferapplylabels = self._bufferstates[-1][2]
1126 else:
1125 else:
1127 self._bufferapplylabels = None
1126 self._bufferapplylabels = None
1128
1127
1129 return b"".join(self._buffers.pop())
1128 return b"".join(self._buffers.pop())
1130
1129
1131 def _isbuffered(self, dest):
1130 def _isbuffered(self, dest):
1132 if dest is self._fout:
1131 if dest is self._fout:
1133 return bool(self._buffers)
1132 return bool(self._buffers)
1134 if dest is self._ferr:
1133 if dest is self._ferr:
1135 return bool(self._bufferstates and self._bufferstates[-1][0])
1134 return bool(self._bufferstates and self._bufferstates[-1][0])
1136 return False
1135 return False
1137
1136
1138 def canwritewithoutlabels(self):
1137 def canwritewithoutlabels(self):
1139 '''check if write skips the label'''
1138 '''check if write skips the label'''
1140 if self._buffers and not self._bufferapplylabels:
1139 if self._buffers and not self._bufferapplylabels:
1141 return True
1140 return True
1142 return self._colormode is None
1141 return self._colormode is None
1143
1142
1144 def canbatchlabeledwrites(self):
1143 def canbatchlabeledwrites(self):
1145 '''check if write calls with labels are batchable'''
1144 '''check if write calls with labels are batchable'''
1146 # Windows color printing is special, see ``write``.
1145 # Windows color printing is special, see ``write``.
1147 return self._colormode != b'win32'
1146 return self._colormode != b'win32'
1148
1147
1149 def write(self, *args, **opts):
1148 def write(self, *args, **opts):
1150 """write args to output
1149 """write args to output
1151
1150
1152 By default, this method simply writes to the buffer or stdout.
1151 By default, this method simply writes to the buffer or stdout.
1153 Color mode can be set on the UI class to have the output decorated
1152 Color mode can be set on the UI class to have the output decorated
1154 with color modifier before being written to stdout.
1153 with color modifier before being written to stdout.
1155
1154
1156 The color used is controlled by an optional keyword argument, "label".
1155 The color used is controlled by an optional keyword argument, "label".
1157 This should be a string containing label names separated by space.
1156 This should be a string containing label names separated by space.
1158 Label names take the form of "topic.type". For example, ui.debug()
1157 Label names take the form of "topic.type". For example, ui.debug()
1159 issues a label of "ui.debug".
1158 issues a label of "ui.debug".
1160
1159
1161 Progress reports via stderr are normally cleared before writing as
1160 Progress reports via stderr are normally cleared before writing as
1162 stdout and stderr go to the same terminal. This can be skipped with
1161 stdout and stderr go to the same terminal. This can be skipped with
1163 the optional keyword argument "keepprogressbar". The progress bar
1162 the optional keyword argument "keepprogressbar". The progress bar
1164 will continue to occupy a partial line on stderr in that case.
1163 will continue to occupy a partial line on stderr in that case.
1165 This functionality is intended when Mercurial acts as data source
1164 This functionality is intended when Mercurial acts as data source
1166 in a pipe.
1165 in a pipe.
1167
1166
1168 When labeling output for a specific command, a label of
1167 When labeling output for a specific command, a label of
1169 "cmdname.type" is recommended. For example, status issues
1168 "cmdname.type" is recommended. For example, status issues
1170 a label of "status.modified" for modified files.
1169 a label of "status.modified" for modified files.
1171 """
1170 """
1172 dest = self._fout
1171 dest = self._fout
1173
1172
1174 # inlined _write() for speed
1173 # inlined _write() for speed
1175 if self._buffers:
1174 if self._buffers:
1176 label = opts.get('label', b'')
1175 label = opts.get('label', b'')
1177 if label and self._bufferapplylabels:
1176 if label and self._bufferapplylabels:
1178 self._buffers[-1].extend(self.label(a, label) for a in args)
1177 self._buffers[-1].extend(self.label(a, label) for a in args)
1179 else:
1178 else:
1180 self._buffers[-1].extend(args)
1179 self._buffers[-1].extend(args)
1181 return
1180 return
1182
1181
1183 # inlined _writenobuf() for speed
1182 # inlined _writenobuf() for speed
1184 if not opts.get('keepprogressbar', False):
1183 if not opts.get('keepprogressbar', False):
1185 self._progclear()
1184 self._progclear()
1186 msg = b''.join(args)
1185 msg = b''.join(args)
1187
1186
1188 # opencode timeblockedsection because this is a critical path
1187 # opencode timeblockedsection because this is a critical path
1189 starttime = util.timer()
1188 starttime = util.timer()
1190 try:
1189 try:
1191 if self._colormode == b'win32':
1190 if self._colormode == b'win32':
1192 # windows color printing is its own can of crab, defer to
1191 # windows color printing is its own can of crab, defer to
1193 # the color module and that is it.
1192 # the color module and that is it.
1194 color.win32print(self, dest.write, msg, **opts)
1193 color.win32print(self, dest.write, msg, **opts)
1195 else:
1194 else:
1196 if self._colormode is not None:
1195 if self._colormode is not None:
1197 label = opts.get('label', b'')
1196 label = opts.get('label', b'')
1198 msg = self.label(msg, label)
1197 msg = self.label(msg, label)
1199 dest.write(msg)
1198 dest.write(msg)
1200 except IOError as err:
1199 except IOError as err:
1201 raise error.StdioError(err)
1200 raise error.StdioError(err)
1202 finally:
1201 finally:
1203 self._blockedtimes[b'stdio_blocked'] += (
1202 self._blockedtimes[b'stdio_blocked'] += (
1204 util.timer() - starttime
1203 util.timer() - starttime
1205 ) * 1000
1204 ) * 1000
1206
1205
1207 def write_err(self, *args, **opts):
1206 def write_err(self, *args, **opts):
1208 self._write(self._ferr, *args, **opts)
1207 self._write(self._ferr, *args, **opts)
1209
1208
1210 def _write(self, dest, *args, **opts):
1209 def _write(self, dest, *args, **opts):
1211 # update write() as well if you touch this code
1210 # update write() as well if you touch this code
1212 if self._isbuffered(dest):
1211 if self._isbuffered(dest):
1213 label = opts.get('label', b'')
1212 label = opts.get('label', b'')
1214 if label and self._bufferapplylabels:
1213 if label and self._bufferapplylabels:
1215 self._buffers[-1].extend(self.label(a, label) for a in args)
1214 self._buffers[-1].extend(self.label(a, label) for a in args)
1216 else:
1215 else:
1217 self._buffers[-1].extend(args)
1216 self._buffers[-1].extend(args)
1218 else:
1217 else:
1219 self._writenobuf(dest, *args, **opts)
1218 self._writenobuf(dest, *args, **opts)
1220
1219
1221 def _writenobuf(self, dest, *args, **opts):
1220 def _writenobuf(self, dest, *args, **opts):
1222 # update write() as well if you touch this code
1221 # update write() as well if you touch this code
1223 if not opts.get('keepprogressbar', False):
1222 if not opts.get('keepprogressbar', False):
1224 self._progclear()
1223 self._progclear()
1225 msg = b''.join(args)
1224 msg = b''.join(args)
1226
1225
1227 # opencode timeblockedsection because this is a critical path
1226 # opencode timeblockedsection because this is a critical path
1228 starttime = util.timer()
1227 starttime = util.timer()
1229 try:
1228 try:
1230 if dest is self._ferr and not getattr(self._fout, 'closed', False):
1229 if dest is self._ferr and not getattr(self._fout, 'closed', False):
1231 self._fout.flush()
1230 self._fout.flush()
1232 if getattr(dest, 'structured', False):
1231 if getattr(dest, 'structured', False):
1233 # channel for machine-readable output with metadata, where
1232 # channel for machine-readable output with metadata, where
1234 # no extra colorization is necessary.
1233 # no extra colorization is necessary.
1235 dest.write(msg, **opts)
1234 dest.write(msg, **opts)
1236 elif self._colormode == b'win32':
1235 elif self._colormode == b'win32':
1237 # windows color printing is its own can of crab, defer to
1236 # windows color printing is its own can of crab, defer to
1238 # the color module and that is it.
1237 # the color module and that is it.
1239 color.win32print(self, dest.write, msg, **opts)
1238 color.win32print(self, dest.write, msg, **opts)
1240 else:
1239 else:
1241 if self._colormode is not None:
1240 if self._colormode is not None:
1242 label = opts.get('label', b'')
1241 label = opts.get('label', b'')
1243 msg = self.label(msg, label)
1242 msg = self.label(msg, label)
1244 dest.write(msg)
1243 dest.write(msg)
1245 # stderr may be buffered under win32 when redirected to files,
1244 # stderr may be buffered under win32 when redirected to files,
1246 # including stdout.
1245 # including stdout.
1247 if dest is self._ferr and not getattr(dest, 'closed', False):
1246 if dest is self._ferr and not getattr(dest, 'closed', False):
1248 dest.flush()
1247 dest.flush()
1249 except IOError as err:
1248 except IOError as err:
1250 if dest is self._ferr and err.errno in (
1249 if dest is self._ferr and err.errno in (
1251 errno.EPIPE,
1250 errno.EPIPE,
1252 errno.EIO,
1251 errno.EIO,
1253 errno.EBADF,
1252 errno.EBADF,
1254 ):
1253 ):
1255 # no way to report the error, so ignore it
1254 # no way to report the error, so ignore it
1256 return
1255 return
1257 raise error.StdioError(err)
1256 raise error.StdioError(err)
1258 finally:
1257 finally:
1259 self._blockedtimes[b'stdio_blocked'] += (
1258 self._blockedtimes[b'stdio_blocked'] += (
1260 util.timer() - starttime
1259 util.timer() - starttime
1261 ) * 1000
1260 ) * 1000
1262
1261
1263 def _writemsg(self, dest, *args, **opts):
1262 def _writemsg(self, dest, *args, **opts):
1264 timestamp = self.showtimestamp and opts.get('type') in {
1263 timestamp = self.showtimestamp and opts.get('type') in {
1265 b'debug',
1264 b'debug',
1266 b'error',
1265 b'error',
1267 b'note',
1266 b'note',
1268 b'status',
1267 b'status',
1269 b'warning',
1268 b'warning',
1270 }
1269 }
1271 if timestamp:
1270 if timestamp:
1272 args = (
1271 args = (
1273 b'[%s] '
1272 b'[%s] '
1274 % pycompat.bytestr(datetime.datetime.now().isoformat()),
1273 % pycompat.bytestr(datetime.datetime.now().isoformat()),
1275 ) + args
1274 ) + args
1276 _writemsgwith(self._write, dest, *args, **opts)
1275 _writemsgwith(self._write, dest, *args, **opts)
1277 if timestamp:
1276 if timestamp:
1278 dest.flush()
1277 dest.flush()
1279
1278
1280 def _writemsgnobuf(self, dest, *args, **opts):
1279 def _writemsgnobuf(self, dest, *args, **opts):
1281 _writemsgwith(self._writenobuf, dest, *args, **opts)
1280 _writemsgwith(self._writenobuf, dest, *args, **opts)
1282
1281
1283 def flush(self):
1282 def flush(self):
1284 # opencode timeblockedsection because this is a critical path
1283 # opencode timeblockedsection because this is a critical path
1285 starttime = util.timer()
1284 starttime = util.timer()
1286 try:
1285 try:
1287 try:
1286 try:
1288 self._fout.flush()
1287 self._fout.flush()
1289 except IOError as err:
1288 except IOError as err:
1290 if err.errno not in (errno.EPIPE, errno.EIO, errno.EBADF):
1289 if err.errno not in (errno.EPIPE, errno.EIO, errno.EBADF):
1291 raise error.StdioError(err)
1290 raise error.StdioError(err)
1292 finally:
1291 finally:
1293 try:
1292 try:
1294 self._ferr.flush()
1293 self._ferr.flush()
1295 except IOError as err:
1294 except IOError as err:
1296 if err.errno not in (errno.EPIPE, errno.EIO, errno.EBADF):
1295 if err.errno not in (errno.EPIPE, errno.EIO, errno.EBADF):
1297 raise error.StdioError(err)
1296 raise error.StdioError(err)
1298 finally:
1297 finally:
1299 self._blockedtimes[b'stdio_blocked'] += (
1298 self._blockedtimes[b'stdio_blocked'] += (
1300 util.timer() - starttime
1299 util.timer() - starttime
1301 ) * 1000
1300 ) * 1000
1302
1301
1303 def _isatty(self, fh):
1302 def _isatty(self, fh):
1304 if self.configbool(b'ui', b'nontty'):
1303 if self.configbool(b'ui', b'nontty'):
1305 return False
1304 return False
1306 return procutil.isatty(fh)
1305 return procutil.isatty(fh)
1307
1306
1308 def protectfinout(self):
1307 def protectfinout(self):
1309 """Duplicate ui streams and redirect original if they are stdio
1308 """Duplicate ui streams and redirect original if they are stdio
1310
1309
1311 Returns (fin, fout) which point to the original ui fds, but may be
1310 Returns (fin, fout) which point to the original ui fds, but may be
1312 copy of them. The returned streams can be considered "owned" in that
1311 copy of them. The returned streams can be considered "owned" in that
1313 print(), exec(), etc. never reach to them.
1312 print(), exec(), etc. never reach to them.
1314 """
1313 """
1315 if self._finoutredirected:
1314 if self._finoutredirected:
1316 # if already redirected, protectstdio() would just create another
1315 # if already redirected, protectstdio() would just create another
1317 # nullfd pair, which is equivalent to returning self._fin/_fout.
1316 # nullfd pair, which is equivalent to returning self._fin/_fout.
1318 return self._fin, self._fout
1317 return self._fin, self._fout
1319 fin, fout = procutil.protectstdio(self._fin, self._fout)
1318 fin, fout = procutil.protectstdio(self._fin, self._fout)
1320 self._finoutredirected = (fin, fout) != (self._fin, self._fout)
1319 self._finoutredirected = (fin, fout) != (self._fin, self._fout)
1321 return fin, fout
1320 return fin, fout
1322
1321
1323 def restorefinout(self, fin, fout):
1322 def restorefinout(self, fin, fout):
1324 """Restore ui streams from possibly duplicated (fin, fout)"""
1323 """Restore ui streams from possibly duplicated (fin, fout)"""
1325 if (fin, fout) == (self._fin, self._fout):
1324 if (fin, fout) == (self._fin, self._fout):
1326 return
1325 return
1327 procutil.restorestdio(self._fin, self._fout, fin, fout)
1326 procutil.restorestdio(self._fin, self._fout, fin, fout)
1328 # protectfinout() won't create more than one duplicated streams,
1327 # protectfinout() won't create more than one duplicated streams,
1329 # so we can just turn the redirection flag off.
1328 # so we can just turn the redirection flag off.
1330 self._finoutredirected = False
1329 self._finoutredirected = False
1331
1330
1332 @contextlib.contextmanager
1331 @contextlib.contextmanager
1333 def protectedfinout(self):
1332 def protectedfinout(self):
1334 """Run code block with protected standard streams"""
1333 """Run code block with protected standard streams"""
1335 fin, fout = self.protectfinout()
1334 fin, fout = self.protectfinout()
1336 try:
1335 try:
1337 yield fin, fout
1336 yield fin, fout
1338 finally:
1337 finally:
1339 self.restorefinout(fin, fout)
1338 self.restorefinout(fin, fout)
1340
1339
1341 def disablepager(self):
1340 def disablepager(self):
1342 self._disablepager = True
1341 self._disablepager = True
1343
1342
1344 def pager(self, command):
1343 def pager(self, command):
1345 """Start a pager for subsequent command output.
1344 """Start a pager for subsequent command output.
1346
1345
1347 Commands which produce a long stream of output should call
1346 Commands which produce a long stream of output should call
1348 this function to activate the user's preferred pagination
1347 this function to activate the user's preferred pagination
1349 mechanism (which may be no pager). Calling this function
1348 mechanism (which may be no pager). Calling this function
1350 precludes any future use of interactive functionality, such as
1349 precludes any future use of interactive functionality, such as
1351 prompting the user or activating curses.
1350 prompting the user or activating curses.
1352
1351
1353 Args:
1352 Args:
1354 command: The full, non-aliased name of the command. That is, "log"
1353 command: The full, non-aliased name of the command. That is, "log"
1355 not "history, "summary" not "summ", etc.
1354 not "history, "summary" not "summ", etc.
1356 """
1355 """
1357 if self._disablepager or self.pageractive:
1356 if self._disablepager or self.pageractive:
1358 # how pager should do is already determined
1357 # how pager should do is already determined
1359 return
1358 return
1360
1359
1361 if not command.startswith(b'internal-always-') and (
1360 if not command.startswith(b'internal-always-') and (
1362 # explicit --pager=on (= 'internal-always-' prefix) should
1361 # explicit --pager=on (= 'internal-always-' prefix) should
1363 # take precedence over disabling factors below
1362 # take precedence over disabling factors below
1364 command in self.configlist(b'pager', b'ignore')
1363 command in self.configlist(b'pager', b'ignore')
1365 or not self.configbool(b'ui', b'paginate')
1364 or not self.configbool(b'ui', b'paginate')
1366 or not self.configbool(b'pager', b'attend-' + command, True)
1365 or not self.configbool(b'pager', b'attend-' + command, True)
1367 or encoding.environ.get(b'TERM') == b'dumb'
1366 or encoding.environ.get(b'TERM') == b'dumb'
1368 # TODO: if we want to allow HGPLAINEXCEPT=pager,
1367 # TODO: if we want to allow HGPLAINEXCEPT=pager,
1369 # formatted() will need some adjustment.
1368 # formatted() will need some adjustment.
1370 or not self.formatted()
1369 or not self.formatted()
1371 or self.plain()
1370 or self.plain()
1372 or self._buffers
1371 or self._buffers
1373 # TODO: expose debugger-enabled on the UI object
1372 # TODO: expose debugger-enabled on the UI object
1374 or b'--debugger' in pycompat.sysargv
1373 or b'--debugger' in pycompat.sysargv
1375 ):
1374 ):
1376 # We only want to paginate if the ui appears to be
1375 # We only want to paginate if the ui appears to be
1377 # interactive, the user didn't say HGPLAIN or
1376 # interactive, the user didn't say HGPLAIN or
1378 # HGPLAINEXCEPT=pager, and the user didn't specify --debug.
1377 # HGPLAINEXCEPT=pager, and the user didn't specify --debug.
1379 return
1378 return
1380
1379
1381 pagercmd = self.config(b'pager', b'pager', rcutil.fallbackpager)
1380 pagercmd = self.config(b'pager', b'pager', rcutil.fallbackpager)
1382 if not pagercmd:
1381 if not pagercmd:
1383 return
1382 return
1384
1383
1385 pagerenv = {}
1384 pagerenv = {}
1386 for name, value in rcutil.defaultpagerenv().items():
1385 for name, value in rcutil.defaultpagerenv().items():
1387 if name not in encoding.environ:
1386 if name not in encoding.environ:
1388 pagerenv[name] = value
1387 pagerenv[name] = value
1389
1388
1390 self.debug(
1389 self.debug(
1391 b'starting pager for command %s\n' % stringutil.pprint(command)
1390 b'starting pager for command %s\n' % stringutil.pprint(command)
1392 )
1391 )
1393 self.flush()
1392 self.flush()
1394
1393
1395 wasformatted = self.formatted()
1394 wasformatted = self.formatted()
1396 if util.safehasattr(signal, b"SIGPIPE"):
1395 if util.safehasattr(signal, b"SIGPIPE"):
1397 signal.signal(signal.SIGPIPE, _catchterm)
1396 signal.signal(signal.SIGPIPE, _catchterm)
1398 if self._runpager(pagercmd, pagerenv):
1397 if self._runpager(pagercmd, pagerenv):
1399 self.pageractive = True
1398 self.pageractive = True
1400 # Preserve the formatted-ness of the UI. This is important
1399 # Preserve the formatted-ness of the UI. This is important
1401 # because we mess with stdout, which might confuse
1400 # because we mess with stdout, which might confuse
1402 # auto-detection of things being formatted.
1401 # auto-detection of things being formatted.
1403 self.setconfig(b'ui', b'formatted', wasformatted, b'pager')
1402 self.setconfig(b'ui', b'formatted', wasformatted, b'pager')
1404 self.setconfig(b'ui', b'interactive', False, b'pager')
1403 self.setconfig(b'ui', b'interactive', False, b'pager')
1405
1404
1406 # If pagermode differs from color.mode, reconfigure color now that
1405 # If pagermode differs from color.mode, reconfigure color now that
1407 # pageractive is set.
1406 # pageractive is set.
1408 cm = self._colormode
1407 cm = self._colormode
1409 if cm != self.config(b'color', b'pagermode', cm):
1408 if cm != self.config(b'color', b'pagermode', cm):
1410 color.setup(self)
1409 color.setup(self)
1411 else:
1410 else:
1412 # If the pager can't be spawned in dispatch when --pager=on is
1411 # If the pager can't be spawned in dispatch when --pager=on is
1413 # given, don't try again when the command runs, to avoid a duplicate
1412 # given, don't try again when the command runs, to avoid a duplicate
1414 # warning about a missing pager command.
1413 # warning about a missing pager command.
1415 self.disablepager()
1414 self.disablepager()
1416
1415
1417 def _runpager(self, command, env=None):
1416 def _runpager(self, command, env=None):
1418 """Actually start the pager and set up file descriptors.
1417 """Actually start the pager and set up file descriptors.
1419
1418
1420 This is separate in part so that extensions (like chg) can
1419 This is separate in part so that extensions (like chg) can
1421 override how a pager is invoked.
1420 override how a pager is invoked.
1422 """
1421 """
1423 if command == b'cat':
1422 if command == b'cat':
1424 # Save ourselves some work.
1423 # Save ourselves some work.
1425 return False
1424 return False
1426 # If the command doesn't contain any of these characters, we
1425 # If the command doesn't contain any of these characters, we
1427 # assume it's a binary and exec it directly. This means for
1426 # assume it's a binary and exec it directly. This means for
1428 # simple pager command configurations, we can degrade
1427 # simple pager command configurations, we can degrade
1429 # gracefully and tell the user about their broken pager.
1428 # gracefully and tell the user about their broken pager.
1430 shell = any(c in command for c in b"|&;<>()$`\\\"' \t\n*?[#~=%")
1429 shell = any(c in command for c in b"|&;<>()$`\\\"' \t\n*?[#~=%")
1431
1430
1432 if pycompat.iswindows and not shell:
1431 if pycompat.iswindows and not shell:
1433 # Window's built-in `more` cannot be invoked with shell=False, but
1432 # Window's built-in `more` cannot be invoked with shell=False, but
1434 # its `more.com` can. Hide this implementation detail from the
1433 # its `more.com` can. Hide this implementation detail from the
1435 # user so we can also get sane bad PAGER behavior. MSYS has
1434 # user so we can also get sane bad PAGER behavior. MSYS has
1436 # `more.exe`, so do a cmd.exe style resolution of the executable to
1435 # `more.exe`, so do a cmd.exe style resolution of the executable to
1437 # determine which one to use.
1436 # determine which one to use.
1438 fullcmd = procutil.findexe(command)
1437 fullcmd = procutil.findexe(command)
1439 if not fullcmd:
1438 if not fullcmd:
1440 self.warn(
1439 self.warn(
1441 _(b"missing pager command '%s', skipping pager\n") % command
1440 _(b"missing pager command '%s', skipping pager\n") % command
1442 )
1441 )
1443 return False
1442 return False
1444
1443
1445 command = fullcmd
1444 command = fullcmd
1446
1445
1447 try:
1446 try:
1448 pager = subprocess.Popen(
1447 pager = subprocess.Popen(
1449 procutil.tonativestr(command),
1448 procutil.tonativestr(command),
1450 shell=shell,
1449 shell=shell,
1451 bufsize=-1,
1450 bufsize=-1,
1452 close_fds=procutil.closefds,
1451 close_fds=procutil.closefds,
1453 stdin=subprocess.PIPE,
1452 stdin=subprocess.PIPE,
1454 stdout=procutil.stdout,
1453 stdout=procutil.stdout,
1455 stderr=procutil.stderr,
1454 stderr=procutil.stderr,
1456 env=procutil.tonativeenv(procutil.shellenviron(env)),
1455 env=procutil.tonativeenv(procutil.shellenviron(env)),
1457 )
1456 )
1458 except OSError as e:
1457 except OSError as e:
1459 if e.errno == errno.ENOENT and not shell:
1458 if e.errno == errno.ENOENT and not shell:
1460 self.warn(
1459 self.warn(
1461 _(b"missing pager command '%s', skipping pager\n") % command
1460 _(b"missing pager command '%s', skipping pager\n") % command
1462 )
1461 )
1463 return False
1462 return False
1464 raise
1463 raise
1465
1464
1466 # back up original file descriptors
1465 # back up original file descriptors
1467 stdoutfd = os.dup(procutil.stdout.fileno())
1466 stdoutfd = os.dup(procutil.stdout.fileno())
1468 stderrfd = os.dup(procutil.stderr.fileno())
1467 stderrfd = os.dup(procutil.stderr.fileno())
1469
1468
1470 os.dup2(pager.stdin.fileno(), procutil.stdout.fileno())
1469 os.dup2(pager.stdin.fileno(), procutil.stdout.fileno())
1471 if self._isatty(procutil.stderr):
1470 if self._isatty(procutil.stderr):
1472 os.dup2(pager.stdin.fileno(), procutil.stderr.fileno())
1471 os.dup2(pager.stdin.fileno(), procutil.stderr.fileno())
1473
1472
1474 @self.atexit
1473 @self.atexit
1475 def killpager():
1474 def killpager():
1476 if util.safehasattr(signal, b"SIGINT"):
1475 if util.safehasattr(signal, b"SIGINT"):
1477 signal.signal(signal.SIGINT, signal.SIG_IGN)
1476 signal.signal(signal.SIGINT, signal.SIG_IGN)
1478 # restore original fds, closing pager.stdin copies in the process
1477 # restore original fds, closing pager.stdin copies in the process
1479 os.dup2(stdoutfd, procutil.stdout.fileno())
1478 os.dup2(stdoutfd, procutil.stdout.fileno())
1480 os.dup2(stderrfd, procutil.stderr.fileno())
1479 os.dup2(stderrfd, procutil.stderr.fileno())
1481 pager.stdin.close()
1480 pager.stdin.close()
1482 pager.wait()
1481 pager.wait()
1483
1482
1484 return True
1483 return True
1485
1484
1486 @property
1485 @property
1487 def _exithandlers(self):
1486 def _exithandlers(self):
1488 return _reqexithandlers
1487 return _reqexithandlers
1489
1488
1490 def atexit(self, func, *args, **kwargs):
1489 def atexit(self, func, *args, **kwargs):
1491 """register a function to run after dispatching a request
1490 """register a function to run after dispatching a request
1492
1491
1493 Handlers do not stay registered across request boundaries."""
1492 Handlers do not stay registered across request boundaries."""
1494 self._exithandlers.append((func, args, kwargs))
1493 self._exithandlers.append((func, args, kwargs))
1495 return func
1494 return func
1496
1495
1497 def interface(self, feature):
1496 def interface(self, feature):
1498 """what interface to use for interactive console features?
1497 """what interface to use for interactive console features?
1499
1498
1500 The interface is controlled by the value of `ui.interface` but also by
1499 The interface is controlled by the value of `ui.interface` but also by
1501 the value of feature-specific configuration. For example:
1500 the value of feature-specific configuration. For example:
1502
1501
1503 ui.interface.histedit = text
1502 ui.interface.histedit = text
1504 ui.interface.chunkselector = curses
1503 ui.interface.chunkselector = curses
1505
1504
1506 Here the features are "histedit" and "chunkselector".
1505 Here the features are "histedit" and "chunkselector".
1507
1506
1508 The configuration above means that the default interfaces for commands
1507 The configuration above means that the default interfaces for commands
1509 is curses, the interface for histedit is text and the interface for
1508 is curses, the interface for histedit is text and the interface for
1510 selecting chunk is crecord (the best curses interface available).
1509 selecting chunk is crecord (the best curses interface available).
1511
1510
1512 Consider the following example:
1511 Consider the following example:
1513 ui.interface = curses
1512 ui.interface = curses
1514 ui.interface.histedit = text
1513 ui.interface.histedit = text
1515
1514
1516 Then histedit will use the text interface and chunkselector will use
1515 Then histedit will use the text interface and chunkselector will use
1517 the default curses interface (crecord at the moment).
1516 the default curses interface (crecord at the moment).
1518 """
1517 """
1519 alldefaults = frozenset([b"text", b"curses"])
1518 alldefaults = frozenset([b"text", b"curses"])
1520
1519
1521 featureinterfaces = {
1520 featureinterfaces = {
1522 b"chunkselector": [
1521 b"chunkselector": [
1523 b"text",
1522 b"text",
1524 b"curses",
1523 b"curses",
1525 ],
1524 ],
1526 b"histedit": [
1525 b"histedit": [
1527 b"text",
1526 b"text",
1528 b"curses",
1527 b"curses",
1529 ],
1528 ],
1530 }
1529 }
1531
1530
1532 # Feature-specific interface
1531 # Feature-specific interface
1533 if feature not in featureinterfaces.keys():
1532 if feature not in featureinterfaces.keys():
1534 # Programming error, not user error
1533 # Programming error, not user error
1535 raise ValueError(b"Unknown feature requested %s" % feature)
1534 raise ValueError(b"Unknown feature requested %s" % feature)
1536
1535
1537 availableinterfaces = frozenset(featureinterfaces[feature])
1536 availableinterfaces = frozenset(featureinterfaces[feature])
1538 if alldefaults > availableinterfaces:
1537 if alldefaults > availableinterfaces:
1539 # Programming error, not user error. We need a use case to
1538 # Programming error, not user error. We need a use case to
1540 # define the right thing to do here.
1539 # define the right thing to do here.
1541 raise ValueError(
1540 raise ValueError(
1542 b"Feature %s does not handle all default interfaces" % feature
1541 b"Feature %s does not handle all default interfaces" % feature
1543 )
1542 )
1544
1543
1545 if self.plain() or encoding.environ.get(b'TERM') == b'dumb':
1544 if self.plain() or encoding.environ.get(b'TERM') == b'dumb':
1546 return b"text"
1545 return b"text"
1547
1546
1548 # Default interface for all the features
1547 # Default interface for all the features
1549 defaultinterface = b"text"
1548 defaultinterface = b"text"
1550 i = self.config(b"ui", b"interface")
1549 i = self.config(b"ui", b"interface")
1551 if i in alldefaults:
1550 if i in alldefaults:
1552 defaultinterface = i
1551 defaultinterface = i
1553
1552
1554 choseninterface = defaultinterface
1553 choseninterface = defaultinterface
1555 f = self.config(b"ui", b"interface.%s" % feature)
1554 f = self.config(b"ui", b"interface.%s" % feature)
1556 if f in availableinterfaces:
1555 if f in availableinterfaces:
1557 choseninterface = f
1556 choseninterface = f
1558
1557
1559 if i is not None and defaultinterface != i:
1558 if i is not None and defaultinterface != i:
1560 if f is not None:
1559 if f is not None:
1561 self.warn(_(b"invalid value for ui.interface: %s\n") % (i,))
1560 self.warn(_(b"invalid value for ui.interface: %s\n") % (i,))
1562 else:
1561 else:
1563 self.warn(
1562 self.warn(
1564 _(b"invalid value for ui.interface: %s (using %s)\n")
1563 _(b"invalid value for ui.interface: %s (using %s)\n")
1565 % (i, choseninterface)
1564 % (i, choseninterface)
1566 )
1565 )
1567 if f is not None and choseninterface != f:
1566 if f is not None and choseninterface != f:
1568 self.warn(
1567 self.warn(
1569 _(b"invalid value for ui.interface.%s: %s (using %s)\n")
1568 _(b"invalid value for ui.interface.%s: %s (using %s)\n")
1570 % (feature, f, choseninterface)
1569 % (feature, f, choseninterface)
1571 )
1570 )
1572
1571
1573 return choseninterface
1572 return choseninterface
1574
1573
1575 def interactive(self):
1574 def interactive(self):
1576 """is interactive input allowed?
1575 """is interactive input allowed?
1577
1576
1578 An interactive session is a session where input can be reasonably read
1577 An interactive session is a session where input can be reasonably read
1579 from `sys.stdin'. If this function returns false, any attempt to read
1578 from `sys.stdin'. If this function returns false, any attempt to read
1580 from stdin should fail with an error, unless a sensible default has been
1579 from stdin should fail with an error, unless a sensible default has been
1581 specified.
1580 specified.
1582
1581
1583 Interactiveness is triggered by the value of the `ui.interactive'
1582 Interactiveness is triggered by the value of the `ui.interactive'
1584 configuration variable or - if it is unset - when `sys.stdin' points
1583 configuration variable or - if it is unset - when `sys.stdin' points
1585 to a terminal device.
1584 to a terminal device.
1586
1585
1587 This function refers to input only; for output, see `ui.formatted()'.
1586 This function refers to input only; for output, see `ui.formatted()'.
1588 """
1587 """
1589 i = self.configbool(b"ui", b"interactive")
1588 i = self.configbool(b"ui", b"interactive")
1590 if i is None:
1589 if i is None:
1591 # some environments replace stdin without implementing isatty
1590 # some environments replace stdin without implementing isatty
1592 # usually those are non-interactive
1591 # usually those are non-interactive
1593 return self._isatty(self._fin)
1592 return self._isatty(self._fin)
1594
1593
1595 return i
1594 return i
1596
1595
1597 def termwidth(self):
1596 def termwidth(self):
1598 """how wide is the terminal in columns?"""
1597 """how wide is the terminal in columns?"""
1599 if b'COLUMNS' in encoding.environ:
1598 if b'COLUMNS' in encoding.environ:
1600 try:
1599 try:
1601 return int(encoding.environ[b'COLUMNS'])
1600 return int(encoding.environ[b'COLUMNS'])
1602 except ValueError:
1601 except ValueError:
1603 pass
1602 pass
1604 return scmutil.termsize(self)[0]
1603 return scmutil.termsize(self)[0]
1605
1604
1606 def formatted(self):
1605 def formatted(self):
1607 """should formatted output be used?
1606 """should formatted output be used?
1608
1607
1609 It is often desirable to format the output to suite the output medium.
1608 It is often desirable to format the output to suite the output medium.
1610 Examples of this are truncating long lines or colorizing messages.
1609 Examples of this are truncating long lines or colorizing messages.
1611 However, this is not often not desirable when piping output into other
1610 However, this is not often not desirable when piping output into other
1612 utilities, e.g. `grep'.
1611 utilities, e.g. `grep'.
1613
1612
1614 Formatted output is triggered by the value of the `ui.formatted'
1613 Formatted output is triggered by the value of the `ui.formatted'
1615 configuration variable or - if it is unset - when `sys.stdout' points
1614 configuration variable or - if it is unset - when `sys.stdout' points
1616 to a terminal device. Please note that `ui.formatted' should be
1615 to a terminal device. Please note that `ui.formatted' should be
1617 considered an implementation detail; it is not intended for use outside
1616 considered an implementation detail; it is not intended for use outside
1618 Mercurial or its extensions.
1617 Mercurial or its extensions.
1619
1618
1620 This function refers to output only; for input, see `ui.interactive()'.
1619 This function refers to output only; for input, see `ui.interactive()'.
1621 This function always returns false when in plain mode, see `ui.plain()'.
1620 This function always returns false when in plain mode, see `ui.plain()'.
1622 """
1621 """
1623 if self.plain():
1622 if self.plain():
1624 return False
1623 return False
1625
1624
1626 i = self.configbool(b"ui", b"formatted")
1625 i = self.configbool(b"ui", b"formatted")
1627 if i is None:
1626 if i is None:
1628 # some environments replace stdout without implementing isatty
1627 # some environments replace stdout without implementing isatty
1629 # usually those are non-interactive
1628 # usually those are non-interactive
1630 return self._isatty(self._fout)
1629 return self._isatty(self._fout)
1631
1630
1632 return i
1631 return i
1633
1632
1634 def _readline(self, prompt=b' ', promptopts=None):
1633 def _readline(self, prompt=b' ', promptopts=None):
1635 # Replacing stdin/stdout temporarily is a hard problem on Python 3
1634 # Replacing stdin/stdout temporarily is a hard problem on Python 3
1636 # because they have to be text streams with *no buffering*. Instead,
1635 # because they have to be text streams with *no buffering*. Instead,
1637 # we use rawinput() only if call_readline() will be invoked by
1636 # we use rawinput() only if call_readline() will be invoked by
1638 # PyOS_Readline(), so no I/O will be made at Python layer.
1637 # PyOS_Readline(), so no I/O will be made at Python layer.
1639 usereadline = (
1638 usereadline = (
1640 self._isatty(self._fin)
1639 self._isatty(self._fin)
1641 and self._isatty(self._fout)
1640 and self._isatty(self._fout)
1642 and procutil.isstdin(self._fin)
1641 and procutil.isstdin(self._fin)
1643 and procutil.isstdout(self._fout)
1642 and procutil.isstdout(self._fout)
1644 )
1643 )
1645 if usereadline:
1644 if usereadline:
1646 try:
1645 try:
1647 # magically add command line editing support, where
1646 # magically add command line editing support, where
1648 # available
1647 # available
1649 import readline
1648 import readline
1650
1649
1651 # force demandimport to really load the module
1650 # force demandimport to really load the module
1652 readline.read_history_file
1651 readline.read_history_file
1653 # windows sometimes raises something other than ImportError
1652 # windows sometimes raises something other than ImportError
1654 except Exception:
1653 except Exception:
1655 usereadline = False
1654 usereadline = False
1656
1655
1657 if self._colormode == b'win32' or not usereadline:
1656 if self._colormode == b'win32' or not usereadline:
1658 if not promptopts:
1657 if not promptopts:
1659 promptopts = {}
1658 promptopts = {}
1660 self._writemsgnobuf(
1659 self._writemsgnobuf(
1661 self._fmsgout, prompt, type=b'prompt', **promptopts
1660 self._fmsgout, prompt, type=b'prompt', **promptopts
1662 )
1661 )
1663 self.flush()
1662 self.flush()
1664 prompt = b' '
1663 prompt = b' '
1665 else:
1664 else:
1666 prompt = self.label(prompt, b'ui.prompt') + b' '
1665 prompt = self.label(prompt, b'ui.prompt') + b' '
1667
1666
1668 # prompt ' ' must exist; otherwise readline may delete entire line
1667 # prompt ' ' must exist; otherwise readline may delete entire line
1669 # - http://bugs.python.org/issue12833
1668 # - http://bugs.python.org/issue12833
1670 with self.timeblockedsection(b'stdio'):
1669 with self.timeblockedsection(b'stdio'):
1671 if usereadline:
1670 if usereadline:
1672 self.flush()
1671 self.flush()
1673 prompt = encoding.strfromlocal(prompt)
1672 prompt = encoding.strfromlocal(prompt)
1674 line = encoding.strtolocal(pycompat.rawinput(prompt))
1673 line = encoding.strtolocal(pycompat.rawinput(prompt))
1675 # When stdin is in binary mode on Windows, it can cause
1674 # When stdin is in binary mode on Windows, it can cause
1676 # raw_input() to emit an extra trailing carriage return
1675 # raw_input() to emit an extra trailing carriage return
1677 if pycompat.oslinesep == b'\r\n' and line.endswith(b'\r'):
1676 if pycompat.oslinesep == b'\r\n' and line.endswith(b'\r'):
1678 line = line[:-1]
1677 line = line[:-1]
1679 else:
1678 else:
1680 self._fout.write(pycompat.bytestr(prompt))
1679 self._fout.write(pycompat.bytestr(prompt))
1681 self._fout.flush()
1680 self._fout.flush()
1682 line = self._fin.readline()
1681 line = self._fin.readline()
1683 if not line:
1682 if not line:
1684 raise EOFError
1683 raise EOFError
1685 line = line.rstrip(pycompat.oslinesep)
1684 line = line.rstrip(pycompat.oslinesep)
1686
1685
1687 return line
1686 return line
1688
1687
1689 def prompt(self, msg, default=b"y"):
1688 def prompt(self, msg, default=b"y"):
1690 """Prompt user with msg, read response.
1689 """Prompt user with msg, read response.
1691 If ui is not interactive, the default is returned.
1690 If ui is not interactive, the default is returned.
1692 """
1691 """
1693 return self._prompt(msg, default=default)
1692 return self._prompt(msg, default=default)
1694
1693
1695 def _prompt(self, msg, **opts):
1694 def _prompt(self, msg, **opts):
1696 default = opts['default']
1695 default = opts['default']
1697 if not self.interactive():
1696 if not self.interactive():
1698 self._writemsg(self._fmsgout, msg, b' ', type=b'prompt', **opts)
1697 self._writemsg(self._fmsgout, msg, b' ', type=b'prompt', **opts)
1699 self._writemsg(
1698 self._writemsg(
1700 self._fmsgout, default or b'', b"\n", type=b'promptecho'
1699 self._fmsgout, default or b'', b"\n", type=b'promptecho'
1701 )
1700 )
1702 return default
1701 return default
1703 try:
1702 try:
1704 r = self._readline(prompt=msg, promptopts=opts)
1703 r = self._readline(prompt=msg, promptopts=opts)
1705 if not r:
1704 if not r:
1706 r = default
1705 r = default
1707 if self.configbool(b'ui', b'promptecho'):
1706 if self.configbool(b'ui', b'promptecho'):
1708 self._writemsg(
1707 self._writemsg(
1709 self._fmsgout, r or b'', b"\n", type=b'promptecho'
1708 self._fmsgout, r or b'', b"\n", type=b'promptecho'
1710 )
1709 )
1711 return r
1710 return r
1712 except EOFError:
1711 except EOFError:
1713 raise error.ResponseExpected()
1712 raise error.ResponseExpected()
1714
1713
1715 @staticmethod
1714 @staticmethod
1716 def extractchoices(prompt):
1715 def extractchoices(prompt):
1717 """Extract prompt message and list of choices from specified prompt.
1716 """Extract prompt message and list of choices from specified prompt.
1718
1717
1719 This returns tuple "(message, choices)", and "choices" is the
1718 This returns tuple "(message, choices)", and "choices" is the
1720 list of tuple "(response character, text without &)".
1719 list of tuple "(response character, text without &)".
1721
1720
1722 >>> ui.extractchoices(b"awake? $$ &Yes $$ &No")
1721 >>> ui.extractchoices(b"awake? $$ &Yes $$ &No")
1723 ('awake? ', [('y', 'Yes'), ('n', 'No')])
1722 ('awake? ', [('y', 'Yes'), ('n', 'No')])
1724 >>> ui.extractchoices(b"line\\nbreak? $$ &Yes $$ &No")
1723 >>> ui.extractchoices(b"line\\nbreak? $$ &Yes $$ &No")
1725 ('line\\nbreak? ', [('y', 'Yes'), ('n', 'No')])
1724 ('line\\nbreak? ', [('y', 'Yes'), ('n', 'No')])
1726 >>> ui.extractchoices(b"want lots of $$money$$?$$Ye&s$$N&o")
1725 >>> ui.extractchoices(b"want lots of $$money$$?$$Ye&s$$N&o")
1727 ('want lots of $$money$$?', [('s', 'Yes'), ('o', 'No')])
1726 ('want lots of $$money$$?', [('s', 'Yes'), ('o', 'No')])
1728 """
1727 """
1729
1728
1730 # Sadly, the prompt string may have been built with a filename
1729 # Sadly, the prompt string may have been built with a filename
1731 # containing "$$" so let's try to find the first valid-looking
1730 # containing "$$" so let's try to find the first valid-looking
1732 # prompt to start parsing. Sadly, we also can't rely on
1731 # prompt to start parsing. Sadly, we also can't rely on
1733 # choices containing spaces, ASCII, or basically anything
1732 # choices containing spaces, ASCII, or basically anything
1734 # except an ampersand followed by a character.
1733 # except an ampersand followed by a character.
1735 m = re.match(br'(?s)(.+?)\$\$([^$]*&[^ $].*)', prompt)
1734 m = re.match(br'(?s)(.+?)\$\$([^$]*&[^ $].*)', prompt)
1736 msg = m.group(1)
1735 msg = m.group(1)
1737 choices = [p.strip(b' ') for p in m.group(2).split(b'$$')]
1736 choices = [p.strip(b' ') for p in m.group(2).split(b'$$')]
1738
1737
1739 def choicetuple(s):
1738 def choicetuple(s):
1740 ampidx = s.index(b'&')
1739 ampidx = s.index(b'&')
1741 return s[ampidx + 1 : ampidx + 2].lower(), s.replace(b'&', b'', 1)
1740 return s[ampidx + 1 : ampidx + 2].lower(), s.replace(b'&', b'', 1)
1742
1741
1743 return (msg, [choicetuple(s) for s in choices])
1742 return (msg, [choicetuple(s) for s in choices])
1744
1743
1745 def promptchoice(self, prompt, default=0):
1744 def promptchoice(self, prompt, default=0):
1746 """Prompt user with a message, read response, and ensure it matches
1745 """Prompt user with a message, read response, and ensure it matches
1747 one of the provided choices. The prompt is formatted as follows:
1746 one of the provided choices. The prompt is formatted as follows:
1748
1747
1749 "would you like fries with that (Yn)? $$ &Yes $$ &No"
1748 "would you like fries with that (Yn)? $$ &Yes $$ &No"
1750
1749
1751 The index of the choice is returned. Responses are case
1750 The index of the choice is returned. Responses are case
1752 insensitive. If ui is not interactive, the default is
1751 insensitive. If ui is not interactive, the default is
1753 returned.
1752 returned.
1754 """
1753 """
1755
1754
1756 msg, choices = self.extractchoices(prompt)
1755 msg, choices = self.extractchoices(prompt)
1757 resps = [r for r, t in choices]
1756 resps = [r for r, t in choices]
1758 while True:
1757 while True:
1759 r = self._prompt(msg, default=resps[default], choices=choices)
1758 r = self._prompt(msg, default=resps[default], choices=choices)
1760 if r.lower() in resps:
1759 if r.lower() in resps:
1761 return resps.index(r.lower())
1760 return resps.index(r.lower())
1762 # TODO: shouldn't it be a warning?
1761 # TODO: shouldn't it be a warning?
1763 self._writemsg(self._fmsgout, _(b"unrecognized response\n"))
1762 self._writemsg(self._fmsgout, _(b"unrecognized response\n"))
1764
1763
1765 def getpass(self, prompt=None, default=None):
1764 def getpass(self, prompt=None, default=None):
1766 if not self.interactive():
1765 if not self.interactive():
1767 return default
1766 return default
1768 try:
1767 try:
1769 self._writemsg(
1768 self._writemsg(
1770 self._fmsgerr,
1769 self._fmsgerr,
1771 prompt or _(b'password: '),
1770 prompt or _(b'password: '),
1772 type=b'prompt',
1771 type=b'prompt',
1773 password=True,
1772 password=True,
1774 )
1773 )
1775 # disable getpass() only if explicitly specified. it's still valid
1774 # disable getpass() only if explicitly specified. it's still valid
1776 # to interact with tty even if fin is not a tty.
1775 # to interact with tty even if fin is not a tty.
1777 with self.timeblockedsection(b'stdio'):
1776 with self.timeblockedsection(b'stdio'):
1778 if self.configbool(b'ui', b'nontty'):
1777 if self.configbool(b'ui', b'nontty'):
1779 l = self._fin.readline()
1778 l = self._fin.readline()
1780 if not l:
1779 if not l:
1781 raise EOFError
1780 raise EOFError
1782 return l.rstrip(b'\n')
1781 return l.rstrip(b'\n')
1783 else:
1782 else:
1784 return encoding.strtolocal(getpass.getpass(''))
1783 return util.get_password()
1785 except EOFError:
1784 except EOFError:
1786 raise error.ResponseExpected()
1785 raise error.ResponseExpected()
1787
1786
1788 def status(self, *msg, **opts):
1787 def status(self, *msg, **opts):
1789 """write status message to output (if ui.quiet is False)
1788 """write status message to output (if ui.quiet is False)
1790
1789
1791 This adds an output label of "ui.status".
1790 This adds an output label of "ui.status".
1792 """
1791 """
1793 if not self.quiet:
1792 if not self.quiet:
1794 self._writemsg(self._fmsgout, type=b'status', *msg, **opts)
1793 self._writemsg(self._fmsgout, type=b'status', *msg, **opts)
1795
1794
1796 def warn(self, *msg, **opts):
1795 def warn(self, *msg, **opts):
1797 """write warning message to output (stderr)
1796 """write warning message to output (stderr)
1798
1797
1799 This adds an output label of "ui.warning".
1798 This adds an output label of "ui.warning".
1800 """
1799 """
1801 self._writemsg(self._fmsgerr, type=b'warning', *msg, **opts)
1800 self._writemsg(self._fmsgerr, type=b'warning', *msg, **opts)
1802
1801
1803 def error(self, *msg, **opts):
1802 def error(self, *msg, **opts):
1804 """write error message to output (stderr)
1803 """write error message to output (stderr)
1805
1804
1806 This adds an output label of "ui.error".
1805 This adds an output label of "ui.error".
1807 """
1806 """
1808 self._writemsg(self._fmsgerr, type=b'error', *msg, **opts)
1807 self._writemsg(self._fmsgerr, type=b'error', *msg, **opts)
1809
1808
1810 def note(self, *msg, **opts):
1809 def note(self, *msg, **opts):
1811 """write note to output (if ui.verbose is True)
1810 """write note to output (if ui.verbose is True)
1812
1811
1813 This adds an output label of "ui.note".
1812 This adds an output label of "ui.note".
1814 """
1813 """
1815 if self.verbose:
1814 if self.verbose:
1816 self._writemsg(self._fmsgout, type=b'note', *msg, **opts)
1815 self._writemsg(self._fmsgout, type=b'note', *msg, **opts)
1817
1816
1818 def debug(self, *msg, **opts):
1817 def debug(self, *msg, **opts):
1819 """write debug message to output (if ui.debugflag is True)
1818 """write debug message to output (if ui.debugflag is True)
1820
1819
1821 This adds an output label of "ui.debug".
1820 This adds an output label of "ui.debug".
1822 """
1821 """
1823 if self.debugflag:
1822 if self.debugflag:
1824 self._writemsg(self._fmsgout, type=b'debug', *msg, **opts)
1823 self._writemsg(self._fmsgout, type=b'debug', *msg, **opts)
1825 self.log(b'debug', b'%s', b''.join(msg))
1824 self.log(b'debug', b'%s', b''.join(msg))
1826
1825
1827 # Aliases to defeat check-code.
1826 # Aliases to defeat check-code.
1828 statusnoi18n = status
1827 statusnoi18n = status
1829 notenoi18n = note
1828 notenoi18n = note
1830 warnnoi18n = warn
1829 warnnoi18n = warn
1831 writenoi18n = write
1830 writenoi18n = write
1832
1831
1833 def edit(
1832 def edit(
1834 self,
1833 self,
1835 text,
1834 text,
1836 user,
1835 user,
1837 extra=None,
1836 extra=None,
1838 editform=None,
1837 editform=None,
1839 pending=None,
1838 pending=None,
1840 repopath=None,
1839 repopath=None,
1841 action=None,
1840 action=None,
1842 ):
1841 ):
1843 if action is None:
1842 if action is None:
1844 self.develwarn(
1843 self.develwarn(
1845 b'action is None but will soon be a required '
1844 b'action is None but will soon be a required '
1846 b'parameter to ui.edit()'
1845 b'parameter to ui.edit()'
1847 )
1846 )
1848 extra_defaults = {
1847 extra_defaults = {
1849 b'prefix': b'editor',
1848 b'prefix': b'editor',
1850 b'suffix': b'.txt',
1849 b'suffix': b'.txt',
1851 }
1850 }
1852 if extra is not None:
1851 if extra is not None:
1853 if extra.get(b'suffix') is not None:
1852 if extra.get(b'suffix') is not None:
1854 self.develwarn(
1853 self.develwarn(
1855 b'extra.suffix is not None but will soon be '
1854 b'extra.suffix is not None but will soon be '
1856 b'ignored by ui.edit()'
1855 b'ignored by ui.edit()'
1857 )
1856 )
1858 extra_defaults.update(extra)
1857 extra_defaults.update(extra)
1859 extra = extra_defaults
1858 extra = extra_defaults
1860
1859
1861 if action == b'diff':
1860 if action == b'diff':
1862 suffix = b'.diff'
1861 suffix = b'.diff'
1863 elif action:
1862 elif action:
1864 suffix = b'.%s.hg.txt' % action
1863 suffix = b'.%s.hg.txt' % action
1865 else:
1864 else:
1866 suffix = extra[b'suffix']
1865 suffix = extra[b'suffix']
1867
1866
1868 rdir = None
1867 rdir = None
1869 if self.configbool(b'experimental', b'editortmpinhg'):
1868 if self.configbool(b'experimental', b'editortmpinhg'):
1870 rdir = repopath
1869 rdir = repopath
1871 (fd, name) = pycompat.mkstemp(
1870 (fd, name) = pycompat.mkstemp(
1872 prefix=b'hg-' + extra[b'prefix'] + b'-', suffix=suffix, dir=rdir
1871 prefix=b'hg-' + extra[b'prefix'] + b'-', suffix=suffix, dir=rdir
1873 )
1872 )
1874 try:
1873 try:
1875 with os.fdopen(fd, 'wb') as f:
1874 with os.fdopen(fd, 'wb') as f:
1876 f.write(util.tonativeeol(text))
1875 f.write(util.tonativeeol(text))
1877
1876
1878 environ = {b'HGUSER': user}
1877 environ = {b'HGUSER': user}
1879 if b'transplant_source' in extra:
1878 if b'transplant_source' in extra:
1880 environ.update(
1879 environ.update(
1881 {b'HGREVISION': hex(extra[b'transplant_source'])}
1880 {b'HGREVISION': hex(extra[b'transplant_source'])}
1882 )
1881 )
1883 for label in (b'intermediate-source', b'source', b'rebase_source'):
1882 for label in (b'intermediate-source', b'source', b'rebase_source'):
1884 if label in extra:
1883 if label in extra:
1885 environ.update({b'HGREVISION': extra[label]})
1884 environ.update({b'HGREVISION': extra[label]})
1886 break
1885 break
1887 if editform:
1886 if editform:
1888 environ.update({b'HGEDITFORM': editform})
1887 environ.update({b'HGEDITFORM': editform})
1889 if pending:
1888 if pending:
1890 environ.update({b'HG_PENDING': pending})
1889 environ.update({b'HG_PENDING': pending})
1891
1890
1892 editor = self.geteditor()
1891 editor = self.geteditor()
1893
1892
1894 self.system(
1893 self.system(
1895 b"%s \"%s\"" % (editor, name),
1894 b"%s \"%s\"" % (editor, name),
1896 environ=environ,
1895 environ=environ,
1897 onerr=error.CanceledError,
1896 onerr=error.CanceledError,
1898 errprefix=_(b"edit failed"),
1897 errprefix=_(b"edit failed"),
1899 blockedtag=b'editor',
1898 blockedtag=b'editor',
1900 )
1899 )
1901
1900
1902 with open(name, 'rb') as f:
1901 with open(name, 'rb') as f:
1903 t = util.fromnativeeol(f.read())
1902 t = util.fromnativeeol(f.read())
1904 finally:
1903 finally:
1905 os.unlink(name)
1904 os.unlink(name)
1906
1905
1907 return t
1906 return t
1908
1907
1909 def system(
1908 def system(
1910 self,
1909 self,
1911 cmd,
1910 cmd,
1912 environ=None,
1911 environ=None,
1913 cwd=None,
1912 cwd=None,
1914 onerr=None,
1913 onerr=None,
1915 errprefix=None,
1914 errprefix=None,
1916 blockedtag=None,
1915 blockedtag=None,
1917 ):
1916 ):
1918 """execute shell command with appropriate output stream. command
1917 """execute shell command with appropriate output stream. command
1919 output will be redirected if fout is not stdout.
1918 output will be redirected if fout is not stdout.
1920
1919
1921 if command fails and onerr is None, return status, else raise onerr
1920 if command fails and onerr is None, return status, else raise onerr
1922 object as exception.
1921 object as exception.
1923 """
1922 """
1924 if blockedtag is None:
1923 if blockedtag is None:
1925 # Long cmds tend to be because of an absolute path on cmd. Keep
1924 # Long cmds tend to be because of an absolute path on cmd. Keep
1926 # the tail end instead
1925 # the tail end instead
1927 cmdsuffix = cmd.translate(None, _keepalnum)[-85:]
1926 cmdsuffix = cmd.translate(None, _keepalnum)[-85:]
1928 blockedtag = b'unknown_system_' + cmdsuffix
1927 blockedtag = b'unknown_system_' + cmdsuffix
1929 out = self._fout
1928 out = self._fout
1930 if any(s[1] for s in self._bufferstates):
1929 if any(s[1] for s in self._bufferstates):
1931 out = self
1930 out = self
1932 with self.timeblockedsection(blockedtag):
1931 with self.timeblockedsection(blockedtag):
1933 rc = self._runsystem(cmd, environ=environ, cwd=cwd, out=out)
1932 rc = self._runsystem(cmd, environ=environ, cwd=cwd, out=out)
1934 if rc and onerr:
1933 if rc and onerr:
1935 errmsg = b'%s %s' % (
1934 errmsg = b'%s %s' % (
1936 procutil.shellsplit(cmd)[0],
1935 procutil.shellsplit(cmd)[0],
1937 procutil.explainexit(rc),
1936 procutil.explainexit(rc),
1938 )
1937 )
1939 if errprefix:
1938 if errprefix:
1940 errmsg = b'%s: %s' % (errprefix, errmsg)
1939 errmsg = b'%s: %s' % (errprefix, errmsg)
1941 raise onerr(errmsg)
1940 raise onerr(errmsg)
1942 return rc
1941 return rc
1943
1942
1944 def _runsystem(self, cmd, environ, cwd, out):
1943 def _runsystem(self, cmd, environ, cwd, out):
1945 """actually execute the given shell command (can be overridden by
1944 """actually execute the given shell command (can be overridden by
1946 extensions like chg)"""
1945 extensions like chg)"""
1947 return procutil.system(cmd, environ=environ, cwd=cwd, out=out)
1946 return procutil.system(cmd, environ=environ, cwd=cwd, out=out)
1948
1947
1949 def traceback(self, exc=None, force=False):
1948 def traceback(self, exc=None, force=False):
1950 """print exception traceback if traceback printing enabled or forced.
1949 """print exception traceback if traceback printing enabled or forced.
1951 only to call in exception handler. returns true if traceback
1950 only to call in exception handler. returns true if traceback
1952 printed."""
1951 printed."""
1953 if self.tracebackflag or force:
1952 if self.tracebackflag or force:
1954 if exc is None:
1953 if exc is None:
1955 exc = sys.exc_info()
1954 exc = sys.exc_info()
1956 cause = getattr(exc[1], 'cause', None)
1955 cause = getattr(exc[1], 'cause', None)
1957
1956
1958 if cause is not None:
1957 if cause is not None:
1959 causetb = traceback.format_tb(cause[2])
1958 causetb = traceback.format_tb(cause[2])
1960 exctb = traceback.format_tb(exc[2])
1959 exctb = traceback.format_tb(exc[2])
1961 exconly = traceback.format_exception_only(cause[0], cause[1])
1960 exconly = traceback.format_exception_only(cause[0], cause[1])
1962
1961
1963 # exclude frame where 'exc' was chained and rethrown from exctb
1962 # exclude frame where 'exc' was chained and rethrown from exctb
1964 self.write_err(
1963 self.write_err(
1965 b'Traceback (most recent call last):\n',
1964 b'Traceback (most recent call last):\n',
1966 encoding.strtolocal(''.join(exctb[:-1])),
1965 encoding.strtolocal(''.join(exctb[:-1])),
1967 encoding.strtolocal(''.join(causetb)),
1966 encoding.strtolocal(''.join(causetb)),
1968 encoding.strtolocal(''.join(exconly)),
1967 encoding.strtolocal(''.join(exconly)),
1969 )
1968 )
1970 else:
1969 else:
1971 output = traceback.format_exception(exc[0], exc[1], exc[2])
1970 output = traceback.format_exception(exc[0], exc[1], exc[2])
1972 self.write_err(encoding.strtolocal(''.join(output)))
1971 self.write_err(encoding.strtolocal(''.join(output)))
1973 return self.tracebackflag or force
1972 return self.tracebackflag or force
1974
1973
1975 def geteditor(self):
1974 def geteditor(self):
1976 '''return editor to use'''
1975 '''return editor to use'''
1977 if pycompat.sysplatform == b'plan9':
1976 if pycompat.sysplatform == b'plan9':
1978 # vi is the MIPS instruction simulator on Plan 9. We
1977 # vi is the MIPS instruction simulator on Plan 9. We
1979 # instead default to E to plumb commit messages to
1978 # instead default to E to plumb commit messages to
1980 # avoid confusion.
1979 # avoid confusion.
1981 editor = b'E'
1980 editor = b'E'
1982 elif pycompat.isdarwin:
1981 elif pycompat.isdarwin:
1983 # vi on darwin is POSIX compatible to a fault, and that includes
1982 # vi on darwin is POSIX compatible to a fault, and that includes
1984 # exiting non-zero if you make any mistake when running an ex
1983 # exiting non-zero if you make any mistake when running an ex
1985 # command. Proof: `vi -c ':unknown' -c ':qa'; echo $?` produces 1,
1984 # command. Proof: `vi -c ':unknown' -c ':qa'; echo $?` produces 1,
1986 # while s/vi/vim/ doesn't.
1985 # while s/vi/vim/ doesn't.
1987 editor = b'vim'
1986 editor = b'vim'
1988 else:
1987 else:
1989 editor = b'vi'
1988 editor = b'vi'
1990 return encoding.environ.get(b"HGEDITOR") or self.config(
1989 return encoding.environ.get(b"HGEDITOR") or self.config(
1991 b"ui", b"editor", editor
1990 b"ui", b"editor", editor
1992 )
1991 )
1993
1992
1994 @util.propertycache
1993 @util.propertycache
1995 def _progbar(self):
1994 def _progbar(self):
1996 """setup the progbar singleton to the ui object"""
1995 """setup the progbar singleton to the ui object"""
1997 if (
1996 if (
1998 self.quiet
1997 self.quiet
1999 or self.debugflag
1998 or self.debugflag
2000 or self.configbool(b'progress', b'disable')
1999 or self.configbool(b'progress', b'disable')
2001 or not progress.shouldprint(self)
2000 or not progress.shouldprint(self)
2002 ):
2001 ):
2003 return None
2002 return None
2004 return getprogbar(self)
2003 return getprogbar(self)
2005
2004
2006 def _progclear(self):
2005 def _progclear(self):
2007 """clear progress bar output if any. use it before any output"""
2006 """clear progress bar output if any. use it before any output"""
2008 if not haveprogbar(): # nothing loaded yet
2007 if not haveprogbar(): # nothing loaded yet
2009 return
2008 return
2010 if self._progbar is not None and self._progbar.printed:
2009 if self._progbar is not None and self._progbar.printed:
2011 self._progbar.clear()
2010 self._progbar.clear()
2012
2011
2013 def makeprogress(self, topic, unit=b"", total=None):
2012 def makeprogress(self, topic, unit=b"", total=None):
2014 """Create a progress helper for the specified topic"""
2013 """Create a progress helper for the specified topic"""
2015 if getattr(self._fmsgerr, 'structured', False):
2014 if getattr(self._fmsgerr, 'structured', False):
2016 # channel for machine-readable output with metadata, just send
2015 # channel for machine-readable output with metadata, just send
2017 # raw information
2016 # raw information
2018 # TODO: consider porting some useful information (e.g. estimated
2017 # TODO: consider porting some useful information (e.g. estimated
2019 # time) from progbar. we might want to support update delay to
2018 # time) from progbar. we might want to support update delay to
2020 # reduce the cost of transferring progress messages.
2019 # reduce the cost of transferring progress messages.
2021 def updatebar(topic, pos, item, unit, total):
2020 def updatebar(topic, pos, item, unit, total):
2022 self._fmsgerr.write(
2021 self._fmsgerr.write(
2023 None,
2022 None,
2024 type=b'progress',
2023 type=b'progress',
2025 topic=topic,
2024 topic=topic,
2026 pos=pos,
2025 pos=pos,
2027 item=item,
2026 item=item,
2028 unit=unit,
2027 unit=unit,
2029 total=total,
2028 total=total,
2030 )
2029 )
2031
2030
2032 elif self._progbar is not None:
2031 elif self._progbar is not None:
2033 updatebar = self._progbar.progress
2032 updatebar = self._progbar.progress
2034 else:
2033 else:
2035
2034
2036 def updatebar(topic, pos, item, unit, total):
2035 def updatebar(topic, pos, item, unit, total):
2037 pass
2036 pass
2038
2037
2039 return scmutil.progress(self, updatebar, topic, unit, total)
2038 return scmutil.progress(self, updatebar, topic, unit, total)
2040
2039
2041 def getlogger(self, name):
2040 def getlogger(self, name):
2042 """Returns a logger of the given name; or None if not registered"""
2041 """Returns a logger of the given name; or None if not registered"""
2043 return self._loggers.get(name)
2042 return self._loggers.get(name)
2044
2043
2045 def setlogger(self, name, logger):
2044 def setlogger(self, name, logger):
2046 """Install logger which can be identified later by the given name
2045 """Install logger which can be identified later by the given name
2047
2046
2048 More than one loggers can be registered. Use extension or module
2047 More than one loggers can be registered. Use extension or module
2049 name to uniquely identify the logger instance.
2048 name to uniquely identify the logger instance.
2050 """
2049 """
2051 self._loggers[name] = logger
2050 self._loggers[name] = logger
2052
2051
2053 def log(self, event, msgfmt, *msgargs, **opts):
2052 def log(self, event, msgfmt, *msgargs, **opts):
2054 """hook for logging facility extensions
2053 """hook for logging facility extensions
2055
2054
2056 event should be a readily-identifiable subsystem, which will
2055 event should be a readily-identifiable subsystem, which will
2057 allow filtering.
2056 allow filtering.
2058
2057
2059 msgfmt should be a newline-terminated format string to log, and
2058 msgfmt should be a newline-terminated format string to log, and
2060 *msgargs are %-formatted into it.
2059 *msgargs are %-formatted into it.
2061
2060
2062 **opts currently has no defined meanings.
2061 **opts currently has no defined meanings.
2063 """
2062 """
2064 if not self._loggers:
2063 if not self._loggers:
2065 return
2064 return
2066 activeloggers = [
2065 activeloggers = [
2067 l for l in pycompat.itervalues(self._loggers) if l.tracked(event)
2066 l for l in pycompat.itervalues(self._loggers) if l.tracked(event)
2068 ]
2067 ]
2069 if not activeloggers:
2068 if not activeloggers:
2070 return
2069 return
2071 msg = msgfmt % msgargs
2070 msg = msgfmt % msgargs
2072 opts = pycompat.byteskwargs(opts)
2071 opts = pycompat.byteskwargs(opts)
2073 # guard against recursion from e.g. ui.debug()
2072 # guard against recursion from e.g. ui.debug()
2074 registeredloggers = self._loggers
2073 registeredloggers = self._loggers
2075 self._loggers = {}
2074 self._loggers = {}
2076 try:
2075 try:
2077 for logger in activeloggers:
2076 for logger in activeloggers:
2078 logger.log(self, event, msg, opts)
2077 logger.log(self, event, msg, opts)
2079 finally:
2078 finally:
2080 self._loggers = registeredloggers
2079 self._loggers = registeredloggers
2081
2080
2082 def label(self, msg, label):
2081 def label(self, msg, label):
2083 """style msg based on supplied label
2082 """style msg based on supplied label
2084
2083
2085 If some color mode is enabled, this will add the necessary control
2084 If some color mode is enabled, this will add the necessary control
2086 characters to apply such color. In addition, 'debug' color mode adds
2085 characters to apply such color. In addition, 'debug' color mode adds
2087 markup showing which label affects a piece of text.
2086 markup showing which label affects a piece of text.
2088
2087
2089 ui.write(s, 'label') is equivalent to
2088 ui.write(s, 'label') is equivalent to
2090 ui.write(ui.label(s, 'label')).
2089 ui.write(ui.label(s, 'label')).
2091 """
2090 """
2092 if self._colormode is not None:
2091 if self._colormode is not None:
2093 return color.colorlabel(self, msg, label)
2092 return color.colorlabel(self, msg, label)
2094 return msg
2093 return msg
2095
2094
2096 def develwarn(self, msg, stacklevel=1, config=None):
2095 def develwarn(self, msg, stacklevel=1, config=None):
2097 """issue a developer warning message
2096 """issue a developer warning message
2098
2097
2099 Use 'stacklevel' to report the offender some layers further up in the
2098 Use 'stacklevel' to report the offender some layers further up in the
2100 stack.
2099 stack.
2101 """
2100 """
2102 if not self.configbool(b'devel', b'all-warnings'):
2101 if not self.configbool(b'devel', b'all-warnings'):
2103 if config is None or not self.configbool(b'devel', config):
2102 if config is None or not self.configbool(b'devel', config):
2104 return
2103 return
2105 msg = b'devel-warn: ' + msg
2104 msg = b'devel-warn: ' + msg
2106 stacklevel += 1 # get in develwarn
2105 stacklevel += 1 # get in develwarn
2107 if self.tracebackflag:
2106 if self.tracebackflag:
2108 util.debugstacktrace(msg, stacklevel, self._ferr, self._fout)
2107 util.debugstacktrace(msg, stacklevel, self._ferr, self._fout)
2109 self.log(
2108 self.log(
2110 b'develwarn',
2109 b'develwarn',
2111 b'%s at:\n%s'
2110 b'%s at:\n%s'
2112 % (msg, b''.join(util.getstackframes(stacklevel))),
2111 % (msg, b''.join(util.getstackframes(stacklevel))),
2113 )
2112 )
2114 else:
2113 else:
2115 curframe = inspect.currentframe()
2114 curframe = inspect.currentframe()
2116 calframe = inspect.getouterframes(curframe, 2)
2115 calframe = inspect.getouterframes(curframe, 2)
2117 fname, lineno, fmsg = calframe[stacklevel][1:4]
2116 fname, lineno, fmsg = calframe[stacklevel][1:4]
2118 fname, fmsg = pycompat.sysbytes(fname), pycompat.sysbytes(fmsg)
2117 fname, fmsg = pycompat.sysbytes(fname), pycompat.sysbytes(fmsg)
2119 self.write_err(b'%s at: %s:%d (%s)\n' % (msg, fname, lineno, fmsg))
2118 self.write_err(b'%s at: %s:%d (%s)\n' % (msg, fname, lineno, fmsg))
2120 self.log(
2119 self.log(
2121 b'develwarn', b'%s at: %s:%d (%s)\n', msg, fname, lineno, fmsg
2120 b'develwarn', b'%s at: %s:%d (%s)\n', msg, fname, lineno, fmsg
2122 )
2121 )
2123
2122
2124 # avoid cycles
2123 # avoid cycles
2125 del curframe
2124 del curframe
2126 del calframe
2125 del calframe
2127
2126
2128 def deprecwarn(self, msg, version, stacklevel=2):
2127 def deprecwarn(self, msg, version, stacklevel=2):
2129 """issue a deprecation warning
2128 """issue a deprecation warning
2130
2129
2131 - msg: message explaining what is deprecated and how to upgrade,
2130 - msg: message explaining what is deprecated and how to upgrade,
2132 - version: last version where the API will be supported,
2131 - version: last version where the API will be supported,
2133 """
2132 """
2134 if not (
2133 if not (
2135 self.configbool(b'devel', b'all-warnings')
2134 self.configbool(b'devel', b'all-warnings')
2136 or self.configbool(b'devel', b'deprec-warn')
2135 or self.configbool(b'devel', b'deprec-warn')
2137 ):
2136 ):
2138 return
2137 return
2139 msg += (
2138 msg += (
2140 b"\n(compatibility will be dropped after Mercurial-%s,"
2139 b"\n(compatibility will be dropped after Mercurial-%s,"
2141 b" update your code.)"
2140 b" update your code.)"
2142 ) % version
2141 ) % version
2143 self.develwarn(msg, stacklevel=stacklevel, config=b'deprec-warn')
2142 self.develwarn(msg, stacklevel=stacklevel, config=b'deprec-warn')
2144
2143
2145 def exportableenviron(self):
2144 def exportableenviron(self):
2146 """The environment variables that are safe to export, e.g. through
2145 """The environment variables that are safe to export, e.g. through
2147 hgweb.
2146 hgweb.
2148 """
2147 """
2149 return self._exportableenviron
2148 return self._exportableenviron
2150
2149
2151 @contextlib.contextmanager
2150 @contextlib.contextmanager
2152 def configoverride(self, overrides, source=b""):
2151 def configoverride(self, overrides, source=b""):
2153 """Context manager for temporary config overrides
2152 """Context manager for temporary config overrides
2154 `overrides` must be a dict of the following structure:
2153 `overrides` must be a dict of the following structure:
2155 {(section, name) : value}"""
2154 {(section, name) : value}"""
2156 backups = {}
2155 backups = {}
2157 try:
2156 try:
2158 for (section, name), value in overrides.items():
2157 for (section, name), value in overrides.items():
2159 backups[(section, name)] = self.backupconfig(section, name)
2158 backups[(section, name)] = self.backupconfig(section, name)
2160 self.setconfig(section, name, value, source)
2159 self.setconfig(section, name, value, source)
2161 yield
2160 yield
2162 finally:
2161 finally:
2163 for __, backup in backups.items():
2162 for __, backup in backups.items():
2164 self.restoreconfig(backup)
2163 self.restoreconfig(backup)
2165 # just restoring ui.quiet config to the previous value is not enough
2164 # just restoring ui.quiet config to the previous value is not enough
2166 # as it does not update ui.quiet class member
2165 # as it does not update ui.quiet class member
2167 if (b'ui', b'quiet') in overrides:
2166 if (b'ui', b'quiet') in overrides:
2168 self.fixconfig(section=b'ui')
2167 self.fixconfig(section=b'ui')
2169
2168
2170 def estimatememory(self):
2169 def estimatememory(self):
2171 """Provide an estimate for the available system memory in Bytes.
2170 """Provide an estimate for the available system memory in Bytes.
2172
2171
2173 This can be overriden via ui.available-memory. It returns None, if
2172 This can be overriden via ui.available-memory. It returns None, if
2174 no estimate can be computed.
2173 no estimate can be computed.
2175 """
2174 """
2176 value = self.config(b'ui', b'available-memory')
2175 value = self.config(b'ui', b'available-memory')
2177 if value is not None:
2176 if value is not None:
2178 try:
2177 try:
2179 return util.sizetoint(value)
2178 return util.sizetoint(value)
2180 except error.ParseError:
2179 except error.ParseError:
2181 raise error.ConfigError(
2180 raise error.ConfigError(
2182 _(b"ui.available-memory value is invalid ('%s')") % value
2181 _(b"ui.available-memory value is invalid ('%s')") % value
2183 )
2182 )
2184 return util._estimatememory()
2183 return util._estimatememory()
2185
2184
2186
2185
2187 # we instantiate one globally shared progress bar to avoid
2186 # we instantiate one globally shared progress bar to avoid
2188 # competing progress bars when multiple UI objects get created
2187 # competing progress bars when multiple UI objects get created
2189 _progresssingleton = None
2188 _progresssingleton = None
2190
2189
2191
2190
2192 def getprogbar(ui):
2191 def getprogbar(ui):
2193 global _progresssingleton
2192 global _progresssingleton
2194 if _progresssingleton is None:
2193 if _progresssingleton is None:
2195 # passing 'ui' object to the singleton is fishy,
2194 # passing 'ui' object to the singleton is fishy,
2196 # this is how the extension used to work but feel free to rework it.
2195 # this is how the extension used to work but feel free to rework it.
2197 _progresssingleton = progress.progbar(ui)
2196 _progresssingleton = progress.progbar(ui)
2198 return _progresssingleton
2197 return _progresssingleton
2199
2198
2200
2199
2201 def haveprogbar():
2200 def haveprogbar():
2202 return _progresssingleton is not None
2201 return _progresssingleton is not None
2203
2202
2204
2203
2205 def _selectmsgdests(ui):
2204 def _selectmsgdests(ui):
2206 name = ui.config(b'ui', b'message-output')
2205 name = ui.config(b'ui', b'message-output')
2207 if name == b'channel':
2206 if name == b'channel':
2208 if ui.fmsg:
2207 if ui.fmsg:
2209 return ui.fmsg, ui.fmsg
2208 return ui.fmsg, ui.fmsg
2210 else:
2209 else:
2211 # fall back to ferr if channel isn't ready so that status/error
2210 # fall back to ferr if channel isn't ready so that status/error
2212 # messages can be printed
2211 # messages can be printed
2213 return ui.ferr, ui.ferr
2212 return ui.ferr, ui.ferr
2214 if name == b'stdio':
2213 if name == b'stdio':
2215 return ui.fout, ui.ferr
2214 return ui.fout, ui.ferr
2216 if name == b'stderr':
2215 if name == b'stderr':
2217 return ui.ferr, ui.ferr
2216 return ui.ferr, ui.ferr
2218 raise error.Abort(b'invalid ui.message-output destination: %s' % name)
2217 raise error.Abort(b'invalid ui.message-output destination: %s' % name)
2219
2218
2220
2219
2221 def _writemsgwith(write, dest, *args, **opts):
2220 def _writemsgwith(write, dest, *args, **opts):
2222 """Write ui message with the given ui._write*() function
2221 """Write ui message with the given ui._write*() function
2223
2222
2224 The specified message type is translated to 'ui.<type>' label if the dest
2223 The specified message type is translated to 'ui.<type>' label if the dest
2225 isn't a structured channel, so that the message will be colorized.
2224 isn't a structured channel, so that the message will be colorized.
2226 """
2225 """
2227 # TODO: maybe change 'type' to a mandatory option
2226 # TODO: maybe change 'type' to a mandatory option
2228 if 'type' in opts and not getattr(dest, 'structured', False):
2227 if 'type' in opts and not getattr(dest, 'structured', False):
2229 opts['label'] = opts.get('label', b'') + b' ui.%s' % opts.pop('type')
2228 opts['label'] = opts.get('label', b'') + b' ui.%s' % opts.pop('type')
2230 write(dest, *args, **opts)
2229 write(dest, *args, **opts)
@@ -1,3379 +1,3380 b''
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from __future__ import absolute_import, print_function
16 from __future__ import absolute_import, print_function
17
17
18 import abc
18 import abc
19 import collections
19 import collections
20 import contextlib
20 import contextlib
21 import errno
21 import errno
22 import gc
22 import gc
23 import hashlib
23 import hashlib
24 import itertools
24 import itertools
25 import locale
25 import locale
26 import mmap
26 import mmap
27 import os
27 import os
28 import platform as pyplatform
28 import platform as pyplatform
29 import re as remod
29 import re as remod
30 import shutil
30 import shutil
31 import stat
31 import stat
32 import sys
32 import sys
33 import time
33 import time
34 import traceback
34 import traceback
35 import warnings
35 import warnings
36
36
37 from .node import hex
37 from .node import hex
38 from .thirdparty import attr
38 from .thirdparty import attr
39 from .pycompat import (
39 from .pycompat import (
40 delattr,
40 delattr,
41 getattr,
41 getattr,
42 open,
42 open,
43 setattr,
43 setattr,
44 )
44 )
45 from .node import hex
45 from .node import hex
46 from hgdemandimport import tracing
46 from hgdemandimport import tracing
47 from . import (
47 from . import (
48 encoding,
48 encoding,
49 error,
49 error,
50 i18n,
50 i18n,
51 policy,
51 policy,
52 pycompat,
52 pycompat,
53 urllibcompat,
53 urllibcompat,
54 )
54 )
55 from .utils import (
55 from .utils import (
56 compression,
56 compression,
57 hashutil,
57 hashutil,
58 procutil,
58 procutil,
59 stringutil,
59 stringutil,
60 urlutil,
60 urlutil,
61 )
61 )
62
62
63 if pycompat.TYPE_CHECKING:
63 if pycompat.TYPE_CHECKING:
64 from typing import (
64 from typing import (
65 Iterator,
65 Iterator,
66 List,
66 List,
67 Optional,
67 Optional,
68 Tuple,
68 Tuple,
69 )
69 )
70
70
71
71
72 base85 = policy.importmod('base85')
72 base85 = policy.importmod('base85')
73 osutil = policy.importmod('osutil')
73 osutil = policy.importmod('osutil')
74
74
75 b85decode = base85.b85decode
75 b85decode = base85.b85decode
76 b85encode = base85.b85encode
76 b85encode = base85.b85encode
77
77
78 cookielib = pycompat.cookielib
78 cookielib = pycompat.cookielib
79 httplib = pycompat.httplib
79 httplib = pycompat.httplib
80 pickle = pycompat.pickle
80 pickle = pycompat.pickle
81 safehasattr = pycompat.safehasattr
81 safehasattr = pycompat.safehasattr
82 socketserver = pycompat.socketserver
82 socketserver = pycompat.socketserver
83 bytesio = pycompat.bytesio
83 bytesio = pycompat.bytesio
84 # TODO deprecate stringio name, as it is a lie on Python 3.
84 # TODO deprecate stringio name, as it is a lie on Python 3.
85 stringio = bytesio
85 stringio = bytesio
86 xmlrpclib = pycompat.xmlrpclib
86 xmlrpclib = pycompat.xmlrpclib
87
87
88 httpserver = urllibcompat.httpserver
88 httpserver = urllibcompat.httpserver
89 urlerr = urllibcompat.urlerr
89 urlerr = urllibcompat.urlerr
90 urlreq = urllibcompat.urlreq
90 urlreq = urllibcompat.urlreq
91
91
92 # workaround for win32mbcs
92 # workaround for win32mbcs
93 _filenamebytestr = pycompat.bytestr
93 _filenamebytestr = pycompat.bytestr
94
94
95 if pycompat.iswindows:
95 if pycompat.iswindows:
96 from . import windows as platform
96 from . import windows as platform
97 else:
97 else:
98 from . import posix as platform
98 from . import posix as platform
99
99
100 _ = i18n._
100 _ = i18n._
101
101
102 bindunixsocket = platform.bindunixsocket
102 bindunixsocket = platform.bindunixsocket
103 cachestat = platform.cachestat
103 cachestat = platform.cachestat
104 checkexec = platform.checkexec
104 checkexec = platform.checkexec
105 checklink = platform.checklink
105 checklink = platform.checklink
106 copymode = platform.copymode
106 copymode = platform.copymode
107 expandglobs = platform.expandglobs
107 expandglobs = platform.expandglobs
108 getfsmountpoint = platform.getfsmountpoint
108 getfsmountpoint = platform.getfsmountpoint
109 getfstype = platform.getfstype
109 getfstype = platform.getfstype
110 get_password = platform.get_password
110 groupmembers = platform.groupmembers
111 groupmembers = platform.groupmembers
111 groupname = platform.groupname
112 groupname = platform.groupname
112 isexec = platform.isexec
113 isexec = platform.isexec
113 isowner = platform.isowner
114 isowner = platform.isowner
114 listdir = osutil.listdir
115 listdir = osutil.listdir
115 localpath = platform.localpath
116 localpath = platform.localpath
116 lookupreg = platform.lookupreg
117 lookupreg = platform.lookupreg
117 makedir = platform.makedir
118 makedir = platform.makedir
118 nlinks = platform.nlinks
119 nlinks = platform.nlinks
119 normpath = platform.normpath
120 normpath = platform.normpath
120 normcase = platform.normcase
121 normcase = platform.normcase
121 normcasespec = platform.normcasespec
122 normcasespec = platform.normcasespec
122 normcasefallback = platform.normcasefallback
123 normcasefallback = platform.normcasefallback
123 openhardlinks = platform.openhardlinks
124 openhardlinks = platform.openhardlinks
124 oslink = platform.oslink
125 oslink = platform.oslink
125 parsepatchoutput = platform.parsepatchoutput
126 parsepatchoutput = platform.parsepatchoutput
126 pconvert = platform.pconvert
127 pconvert = platform.pconvert
127 poll = platform.poll
128 poll = platform.poll
128 posixfile = platform.posixfile
129 posixfile = platform.posixfile
129 readlink = platform.readlink
130 readlink = platform.readlink
130 rename = platform.rename
131 rename = platform.rename
131 removedirs = platform.removedirs
132 removedirs = platform.removedirs
132 samedevice = platform.samedevice
133 samedevice = platform.samedevice
133 samefile = platform.samefile
134 samefile = platform.samefile
134 samestat = platform.samestat
135 samestat = platform.samestat
135 setflags = platform.setflags
136 setflags = platform.setflags
136 split = platform.split
137 split = platform.split
137 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
138 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
138 statisexec = platform.statisexec
139 statisexec = platform.statisexec
139 statislink = platform.statislink
140 statislink = platform.statislink
140 umask = platform.umask
141 umask = platform.umask
141 unlink = platform.unlink
142 unlink = platform.unlink
142 username = platform.username
143 username = platform.username
143
144
144
145
145 def setumask(val):
146 def setumask(val):
146 # type: (int) -> None
147 # type: (int) -> None
147 '''updates the umask. used by chg server'''
148 '''updates the umask. used by chg server'''
148 if pycompat.iswindows:
149 if pycompat.iswindows:
149 return
150 return
150 os.umask(val)
151 os.umask(val)
151 global umask
152 global umask
152 platform.umask = umask = val & 0o777
153 platform.umask = umask = val & 0o777
153
154
154
155
155 # small compat layer
156 # small compat layer
156 compengines = compression.compengines
157 compengines = compression.compengines
157 SERVERROLE = compression.SERVERROLE
158 SERVERROLE = compression.SERVERROLE
158 CLIENTROLE = compression.CLIENTROLE
159 CLIENTROLE = compression.CLIENTROLE
159
160
160 try:
161 try:
161 recvfds = osutil.recvfds
162 recvfds = osutil.recvfds
162 except AttributeError:
163 except AttributeError:
163 pass
164 pass
164
165
165 # Python compatibility
166 # Python compatibility
166
167
167 _notset = object()
168 _notset = object()
168
169
169
170
170 def bitsfrom(container):
171 def bitsfrom(container):
171 bits = 0
172 bits = 0
172 for bit in container:
173 for bit in container:
173 bits |= bit
174 bits |= bit
174 return bits
175 return bits
175
176
176
177
177 # python 2.6 still have deprecation warning enabled by default. We do not want
178 # python 2.6 still have deprecation warning enabled by default. We do not want
178 # to display anything to standard user so detect if we are running test and
179 # to display anything to standard user so detect if we are running test and
179 # only use python deprecation warning in this case.
180 # only use python deprecation warning in this case.
180 _dowarn = bool(encoding.environ.get(b'HGEMITWARNINGS'))
181 _dowarn = bool(encoding.environ.get(b'HGEMITWARNINGS'))
181 if _dowarn:
182 if _dowarn:
182 # explicitly unfilter our warning for python 2.7
183 # explicitly unfilter our warning for python 2.7
183 #
184 #
184 # The option of setting PYTHONWARNINGS in the test runner was investigated.
185 # The option of setting PYTHONWARNINGS in the test runner was investigated.
185 # However, module name set through PYTHONWARNINGS was exactly matched, so
186 # However, module name set through PYTHONWARNINGS was exactly matched, so
186 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
187 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
187 # makes the whole PYTHONWARNINGS thing useless for our usecase.
188 # makes the whole PYTHONWARNINGS thing useless for our usecase.
188 warnings.filterwarnings('default', '', DeprecationWarning, 'mercurial')
189 warnings.filterwarnings('default', '', DeprecationWarning, 'mercurial')
189 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext')
190 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext')
190 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext3rd')
191 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext3rd')
191 if _dowarn and pycompat.ispy3:
192 if _dowarn and pycompat.ispy3:
192 # silence warning emitted by passing user string to re.sub()
193 # silence warning emitted by passing user string to re.sub()
193 warnings.filterwarnings(
194 warnings.filterwarnings(
194 'ignore', 'bad escape', DeprecationWarning, 'mercurial'
195 'ignore', 'bad escape', DeprecationWarning, 'mercurial'
195 )
196 )
196 warnings.filterwarnings(
197 warnings.filterwarnings(
197 'ignore', 'invalid escape sequence', DeprecationWarning, 'mercurial'
198 'ignore', 'invalid escape sequence', DeprecationWarning, 'mercurial'
198 )
199 )
199 # TODO: reinvent imp.is_frozen()
200 # TODO: reinvent imp.is_frozen()
200 warnings.filterwarnings(
201 warnings.filterwarnings(
201 'ignore',
202 'ignore',
202 'the imp module is deprecated',
203 'the imp module is deprecated',
203 DeprecationWarning,
204 DeprecationWarning,
204 'mercurial',
205 'mercurial',
205 )
206 )
206
207
207
208
208 def nouideprecwarn(msg, version, stacklevel=1):
209 def nouideprecwarn(msg, version, stacklevel=1):
209 """Issue an python native deprecation warning
210 """Issue an python native deprecation warning
210
211
211 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
212 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
212 """
213 """
213 if _dowarn:
214 if _dowarn:
214 msg += (
215 msg += (
215 b"\n(compatibility will be dropped after Mercurial-%s,"
216 b"\n(compatibility will be dropped after Mercurial-%s,"
216 b" update your code.)"
217 b" update your code.)"
217 ) % version
218 ) % version
218 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
219 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
219 # on python 3 with chg, we will need to explicitly flush the output
220 # on python 3 with chg, we will need to explicitly flush the output
220 sys.stderr.flush()
221 sys.stderr.flush()
221
222
222
223
223 DIGESTS = {
224 DIGESTS = {
224 b'md5': hashlib.md5,
225 b'md5': hashlib.md5,
225 b'sha1': hashutil.sha1,
226 b'sha1': hashutil.sha1,
226 b'sha512': hashlib.sha512,
227 b'sha512': hashlib.sha512,
227 }
228 }
228 # List of digest types from strongest to weakest
229 # List of digest types from strongest to weakest
229 DIGESTS_BY_STRENGTH = [b'sha512', b'sha1', b'md5']
230 DIGESTS_BY_STRENGTH = [b'sha512', b'sha1', b'md5']
230
231
231 for k in DIGESTS_BY_STRENGTH:
232 for k in DIGESTS_BY_STRENGTH:
232 assert k in DIGESTS
233 assert k in DIGESTS
233
234
234
235
235 class digester(object):
236 class digester(object):
236 """helper to compute digests.
237 """helper to compute digests.
237
238
238 This helper can be used to compute one or more digests given their name.
239 This helper can be used to compute one or more digests given their name.
239
240
240 >>> d = digester([b'md5', b'sha1'])
241 >>> d = digester([b'md5', b'sha1'])
241 >>> d.update(b'foo')
242 >>> d.update(b'foo')
242 >>> [k for k in sorted(d)]
243 >>> [k for k in sorted(d)]
243 ['md5', 'sha1']
244 ['md5', 'sha1']
244 >>> d[b'md5']
245 >>> d[b'md5']
245 'acbd18db4cc2f85cedef654fccc4a4d8'
246 'acbd18db4cc2f85cedef654fccc4a4d8'
246 >>> d[b'sha1']
247 >>> d[b'sha1']
247 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
248 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
248 >>> digester.preferred([b'md5', b'sha1'])
249 >>> digester.preferred([b'md5', b'sha1'])
249 'sha1'
250 'sha1'
250 """
251 """
251
252
252 def __init__(self, digests, s=b''):
253 def __init__(self, digests, s=b''):
253 self._hashes = {}
254 self._hashes = {}
254 for k in digests:
255 for k in digests:
255 if k not in DIGESTS:
256 if k not in DIGESTS:
256 raise error.Abort(_(b'unknown digest type: %s') % k)
257 raise error.Abort(_(b'unknown digest type: %s') % k)
257 self._hashes[k] = DIGESTS[k]()
258 self._hashes[k] = DIGESTS[k]()
258 if s:
259 if s:
259 self.update(s)
260 self.update(s)
260
261
261 def update(self, data):
262 def update(self, data):
262 for h in self._hashes.values():
263 for h in self._hashes.values():
263 h.update(data)
264 h.update(data)
264
265
265 def __getitem__(self, key):
266 def __getitem__(self, key):
266 if key not in DIGESTS:
267 if key not in DIGESTS:
267 raise error.Abort(_(b'unknown digest type: %s') % k)
268 raise error.Abort(_(b'unknown digest type: %s') % k)
268 return hex(self._hashes[key].digest())
269 return hex(self._hashes[key].digest())
269
270
270 def __iter__(self):
271 def __iter__(self):
271 return iter(self._hashes)
272 return iter(self._hashes)
272
273
273 @staticmethod
274 @staticmethod
274 def preferred(supported):
275 def preferred(supported):
275 """returns the strongest digest type in both supported and DIGESTS."""
276 """returns the strongest digest type in both supported and DIGESTS."""
276
277
277 for k in DIGESTS_BY_STRENGTH:
278 for k in DIGESTS_BY_STRENGTH:
278 if k in supported:
279 if k in supported:
279 return k
280 return k
280 return None
281 return None
281
282
282
283
283 class digestchecker(object):
284 class digestchecker(object):
284 """file handle wrapper that additionally checks content against a given
285 """file handle wrapper that additionally checks content against a given
285 size and digests.
286 size and digests.
286
287
287 d = digestchecker(fh, size, {'md5': '...'})
288 d = digestchecker(fh, size, {'md5': '...'})
288
289
289 When multiple digests are given, all of them are validated.
290 When multiple digests are given, all of them are validated.
290 """
291 """
291
292
292 def __init__(self, fh, size, digests):
293 def __init__(self, fh, size, digests):
293 self._fh = fh
294 self._fh = fh
294 self._size = size
295 self._size = size
295 self._got = 0
296 self._got = 0
296 self._digests = dict(digests)
297 self._digests = dict(digests)
297 self._digester = digester(self._digests.keys())
298 self._digester = digester(self._digests.keys())
298
299
299 def read(self, length=-1):
300 def read(self, length=-1):
300 content = self._fh.read(length)
301 content = self._fh.read(length)
301 self._digester.update(content)
302 self._digester.update(content)
302 self._got += len(content)
303 self._got += len(content)
303 return content
304 return content
304
305
305 def validate(self):
306 def validate(self):
306 if self._size != self._got:
307 if self._size != self._got:
307 raise error.Abort(
308 raise error.Abort(
308 _(b'size mismatch: expected %d, got %d')
309 _(b'size mismatch: expected %d, got %d')
309 % (self._size, self._got)
310 % (self._size, self._got)
310 )
311 )
311 for k, v in self._digests.items():
312 for k, v in self._digests.items():
312 if v != self._digester[k]:
313 if v != self._digester[k]:
313 # i18n: first parameter is a digest name
314 # i18n: first parameter is a digest name
314 raise error.Abort(
315 raise error.Abort(
315 _(b'%s mismatch: expected %s, got %s')
316 _(b'%s mismatch: expected %s, got %s')
316 % (k, v, self._digester[k])
317 % (k, v, self._digester[k])
317 )
318 )
318
319
319
320
320 try:
321 try:
321 buffer = buffer # pytype: disable=name-error
322 buffer = buffer # pytype: disable=name-error
322 except NameError:
323 except NameError:
323
324
324 def buffer(sliceable, offset=0, length=None):
325 def buffer(sliceable, offset=0, length=None):
325 if length is not None:
326 if length is not None:
326 return memoryview(sliceable)[offset : offset + length]
327 return memoryview(sliceable)[offset : offset + length]
327 return memoryview(sliceable)[offset:]
328 return memoryview(sliceable)[offset:]
328
329
329
330
330 _chunksize = 4096
331 _chunksize = 4096
331
332
332
333
333 class bufferedinputpipe(object):
334 class bufferedinputpipe(object):
334 """a manually buffered input pipe
335 """a manually buffered input pipe
335
336
336 Python will not let us use buffered IO and lazy reading with 'polling' at
337 Python will not let us use buffered IO and lazy reading with 'polling' at
337 the same time. We cannot probe the buffer state and select will not detect
338 the same time. We cannot probe the buffer state and select will not detect
338 that data are ready to read if they are already buffered.
339 that data are ready to read if they are already buffered.
339
340
340 This class let us work around that by implementing its own buffering
341 This class let us work around that by implementing its own buffering
341 (allowing efficient readline) while offering a way to know if the buffer is
342 (allowing efficient readline) while offering a way to know if the buffer is
342 empty from the output (allowing collaboration of the buffer with polling).
343 empty from the output (allowing collaboration of the buffer with polling).
343
344
344 This class lives in the 'util' module because it makes use of the 'os'
345 This class lives in the 'util' module because it makes use of the 'os'
345 module from the python stdlib.
346 module from the python stdlib.
346 """
347 """
347
348
348 def __new__(cls, fh):
349 def __new__(cls, fh):
349 # If we receive a fileobjectproxy, we need to use a variation of this
350 # If we receive a fileobjectproxy, we need to use a variation of this
350 # class that notifies observers about activity.
351 # class that notifies observers about activity.
351 if isinstance(fh, fileobjectproxy):
352 if isinstance(fh, fileobjectproxy):
352 cls = observedbufferedinputpipe
353 cls = observedbufferedinputpipe
353
354
354 return super(bufferedinputpipe, cls).__new__(cls)
355 return super(bufferedinputpipe, cls).__new__(cls)
355
356
356 def __init__(self, input):
357 def __init__(self, input):
357 self._input = input
358 self._input = input
358 self._buffer = []
359 self._buffer = []
359 self._eof = False
360 self._eof = False
360 self._lenbuf = 0
361 self._lenbuf = 0
361
362
362 @property
363 @property
363 def hasbuffer(self):
364 def hasbuffer(self):
364 """True is any data is currently buffered
365 """True is any data is currently buffered
365
366
366 This will be used externally a pre-step for polling IO. If there is
367 This will be used externally a pre-step for polling IO. If there is
367 already data then no polling should be set in place."""
368 already data then no polling should be set in place."""
368 return bool(self._buffer)
369 return bool(self._buffer)
369
370
370 @property
371 @property
371 def closed(self):
372 def closed(self):
372 return self._input.closed
373 return self._input.closed
373
374
374 def fileno(self):
375 def fileno(self):
375 return self._input.fileno()
376 return self._input.fileno()
376
377
377 def close(self):
378 def close(self):
378 return self._input.close()
379 return self._input.close()
379
380
380 def read(self, size):
381 def read(self, size):
381 while (not self._eof) and (self._lenbuf < size):
382 while (not self._eof) and (self._lenbuf < size):
382 self._fillbuffer()
383 self._fillbuffer()
383 return self._frombuffer(size)
384 return self._frombuffer(size)
384
385
385 def unbufferedread(self, size):
386 def unbufferedread(self, size):
386 if not self._eof and self._lenbuf == 0:
387 if not self._eof and self._lenbuf == 0:
387 self._fillbuffer(max(size, _chunksize))
388 self._fillbuffer(max(size, _chunksize))
388 return self._frombuffer(min(self._lenbuf, size))
389 return self._frombuffer(min(self._lenbuf, size))
389
390
390 def readline(self, *args, **kwargs):
391 def readline(self, *args, **kwargs):
391 if len(self._buffer) > 1:
392 if len(self._buffer) > 1:
392 # this should not happen because both read and readline end with a
393 # this should not happen because both read and readline end with a
393 # _frombuffer call that collapse it.
394 # _frombuffer call that collapse it.
394 self._buffer = [b''.join(self._buffer)]
395 self._buffer = [b''.join(self._buffer)]
395 self._lenbuf = len(self._buffer[0])
396 self._lenbuf = len(self._buffer[0])
396 lfi = -1
397 lfi = -1
397 if self._buffer:
398 if self._buffer:
398 lfi = self._buffer[-1].find(b'\n')
399 lfi = self._buffer[-1].find(b'\n')
399 while (not self._eof) and lfi < 0:
400 while (not self._eof) and lfi < 0:
400 self._fillbuffer()
401 self._fillbuffer()
401 if self._buffer:
402 if self._buffer:
402 lfi = self._buffer[-1].find(b'\n')
403 lfi = self._buffer[-1].find(b'\n')
403 size = lfi + 1
404 size = lfi + 1
404 if lfi < 0: # end of file
405 if lfi < 0: # end of file
405 size = self._lenbuf
406 size = self._lenbuf
406 elif len(self._buffer) > 1:
407 elif len(self._buffer) > 1:
407 # we need to take previous chunks into account
408 # we need to take previous chunks into account
408 size += self._lenbuf - len(self._buffer[-1])
409 size += self._lenbuf - len(self._buffer[-1])
409 return self._frombuffer(size)
410 return self._frombuffer(size)
410
411
411 def _frombuffer(self, size):
412 def _frombuffer(self, size):
412 """return at most 'size' data from the buffer
413 """return at most 'size' data from the buffer
413
414
414 The data are removed from the buffer."""
415 The data are removed from the buffer."""
415 if size == 0 or not self._buffer:
416 if size == 0 or not self._buffer:
416 return b''
417 return b''
417 buf = self._buffer[0]
418 buf = self._buffer[0]
418 if len(self._buffer) > 1:
419 if len(self._buffer) > 1:
419 buf = b''.join(self._buffer)
420 buf = b''.join(self._buffer)
420
421
421 data = buf[:size]
422 data = buf[:size]
422 buf = buf[len(data) :]
423 buf = buf[len(data) :]
423 if buf:
424 if buf:
424 self._buffer = [buf]
425 self._buffer = [buf]
425 self._lenbuf = len(buf)
426 self._lenbuf = len(buf)
426 else:
427 else:
427 self._buffer = []
428 self._buffer = []
428 self._lenbuf = 0
429 self._lenbuf = 0
429 return data
430 return data
430
431
431 def _fillbuffer(self, size=_chunksize):
432 def _fillbuffer(self, size=_chunksize):
432 """read data to the buffer"""
433 """read data to the buffer"""
433 data = os.read(self._input.fileno(), size)
434 data = os.read(self._input.fileno(), size)
434 if not data:
435 if not data:
435 self._eof = True
436 self._eof = True
436 else:
437 else:
437 self._lenbuf += len(data)
438 self._lenbuf += len(data)
438 self._buffer.append(data)
439 self._buffer.append(data)
439
440
440 return data
441 return data
441
442
442
443
443 def mmapread(fp, size=None):
444 def mmapread(fp, size=None):
444 if size == 0:
445 if size == 0:
445 # size of 0 to mmap.mmap() means "all data"
446 # size of 0 to mmap.mmap() means "all data"
446 # rather than "zero bytes", so special case that.
447 # rather than "zero bytes", so special case that.
447 return b''
448 return b''
448 elif size is None:
449 elif size is None:
449 size = 0
450 size = 0
450 try:
451 try:
451 fd = getattr(fp, 'fileno', lambda: fp)()
452 fd = getattr(fp, 'fileno', lambda: fp)()
452 return mmap.mmap(fd, size, access=mmap.ACCESS_READ)
453 return mmap.mmap(fd, size, access=mmap.ACCESS_READ)
453 except ValueError:
454 except ValueError:
454 # Empty files cannot be mmapped, but mmapread should still work. Check
455 # Empty files cannot be mmapped, but mmapread should still work. Check
455 # if the file is empty, and if so, return an empty buffer.
456 # if the file is empty, and if so, return an empty buffer.
456 if os.fstat(fd).st_size == 0:
457 if os.fstat(fd).st_size == 0:
457 return b''
458 return b''
458 raise
459 raise
459
460
460
461
461 class fileobjectproxy(object):
462 class fileobjectproxy(object):
462 """A proxy around file objects that tells a watcher when events occur.
463 """A proxy around file objects that tells a watcher when events occur.
463
464
464 This type is intended to only be used for testing purposes. Think hard
465 This type is intended to only be used for testing purposes. Think hard
465 before using it in important code.
466 before using it in important code.
466 """
467 """
467
468
468 __slots__ = (
469 __slots__ = (
469 '_orig',
470 '_orig',
470 '_observer',
471 '_observer',
471 )
472 )
472
473
473 def __init__(self, fh, observer):
474 def __init__(self, fh, observer):
474 object.__setattr__(self, '_orig', fh)
475 object.__setattr__(self, '_orig', fh)
475 object.__setattr__(self, '_observer', observer)
476 object.__setattr__(self, '_observer', observer)
476
477
477 def __getattribute__(self, name):
478 def __getattribute__(self, name):
478 ours = {
479 ours = {
479 '_observer',
480 '_observer',
480 # IOBase
481 # IOBase
481 'close',
482 'close',
482 # closed if a property
483 # closed if a property
483 'fileno',
484 'fileno',
484 'flush',
485 'flush',
485 'isatty',
486 'isatty',
486 'readable',
487 'readable',
487 'readline',
488 'readline',
488 'readlines',
489 'readlines',
489 'seek',
490 'seek',
490 'seekable',
491 'seekable',
491 'tell',
492 'tell',
492 'truncate',
493 'truncate',
493 'writable',
494 'writable',
494 'writelines',
495 'writelines',
495 # RawIOBase
496 # RawIOBase
496 'read',
497 'read',
497 'readall',
498 'readall',
498 'readinto',
499 'readinto',
499 'write',
500 'write',
500 # BufferedIOBase
501 # BufferedIOBase
501 # raw is a property
502 # raw is a property
502 'detach',
503 'detach',
503 # read defined above
504 # read defined above
504 'read1',
505 'read1',
505 # readinto defined above
506 # readinto defined above
506 # write defined above
507 # write defined above
507 }
508 }
508
509
509 # We only observe some methods.
510 # We only observe some methods.
510 if name in ours:
511 if name in ours:
511 return object.__getattribute__(self, name)
512 return object.__getattribute__(self, name)
512
513
513 return getattr(object.__getattribute__(self, '_orig'), name)
514 return getattr(object.__getattribute__(self, '_orig'), name)
514
515
515 def __nonzero__(self):
516 def __nonzero__(self):
516 return bool(object.__getattribute__(self, '_orig'))
517 return bool(object.__getattribute__(self, '_orig'))
517
518
518 __bool__ = __nonzero__
519 __bool__ = __nonzero__
519
520
520 def __delattr__(self, name):
521 def __delattr__(self, name):
521 return delattr(object.__getattribute__(self, '_orig'), name)
522 return delattr(object.__getattribute__(self, '_orig'), name)
522
523
523 def __setattr__(self, name, value):
524 def __setattr__(self, name, value):
524 return setattr(object.__getattribute__(self, '_orig'), name, value)
525 return setattr(object.__getattribute__(self, '_orig'), name, value)
525
526
526 def __iter__(self):
527 def __iter__(self):
527 return object.__getattribute__(self, '_orig').__iter__()
528 return object.__getattribute__(self, '_orig').__iter__()
528
529
529 def _observedcall(self, name, *args, **kwargs):
530 def _observedcall(self, name, *args, **kwargs):
530 # Call the original object.
531 # Call the original object.
531 orig = object.__getattribute__(self, '_orig')
532 orig = object.__getattribute__(self, '_orig')
532 res = getattr(orig, name)(*args, **kwargs)
533 res = getattr(orig, name)(*args, **kwargs)
533
534
534 # Call a method on the observer of the same name with arguments
535 # Call a method on the observer of the same name with arguments
535 # so it can react, log, etc.
536 # so it can react, log, etc.
536 observer = object.__getattribute__(self, '_observer')
537 observer = object.__getattribute__(self, '_observer')
537 fn = getattr(observer, name, None)
538 fn = getattr(observer, name, None)
538 if fn:
539 if fn:
539 fn(res, *args, **kwargs)
540 fn(res, *args, **kwargs)
540
541
541 return res
542 return res
542
543
543 def close(self, *args, **kwargs):
544 def close(self, *args, **kwargs):
544 return object.__getattribute__(self, '_observedcall')(
545 return object.__getattribute__(self, '_observedcall')(
545 'close', *args, **kwargs
546 'close', *args, **kwargs
546 )
547 )
547
548
548 def fileno(self, *args, **kwargs):
549 def fileno(self, *args, **kwargs):
549 return object.__getattribute__(self, '_observedcall')(
550 return object.__getattribute__(self, '_observedcall')(
550 'fileno', *args, **kwargs
551 'fileno', *args, **kwargs
551 )
552 )
552
553
553 def flush(self, *args, **kwargs):
554 def flush(self, *args, **kwargs):
554 return object.__getattribute__(self, '_observedcall')(
555 return object.__getattribute__(self, '_observedcall')(
555 'flush', *args, **kwargs
556 'flush', *args, **kwargs
556 )
557 )
557
558
558 def isatty(self, *args, **kwargs):
559 def isatty(self, *args, **kwargs):
559 return object.__getattribute__(self, '_observedcall')(
560 return object.__getattribute__(self, '_observedcall')(
560 'isatty', *args, **kwargs
561 'isatty', *args, **kwargs
561 )
562 )
562
563
563 def readable(self, *args, **kwargs):
564 def readable(self, *args, **kwargs):
564 return object.__getattribute__(self, '_observedcall')(
565 return object.__getattribute__(self, '_observedcall')(
565 'readable', *args, **kwargs
566 'readable', *args, **kwargs
566 )
567 )
567
568
568 def readline(self, *args, **kwargs):
569 def readline(self, *args, **kwargs):
569 return object.__getattribute__(self, '_observedcall')(
570 return object.__getattribute__(self, '_observedcall')(
570 'readline', *args, **kwargs
571 'readline', *args, **kwargs
571 )
572 )
572
573
573 def readlines(self, *args, **kwargs):
574 def readlines(self, *args, **kwargs):
574 return object.__getattribute__(self, '_observedcall')(
575 return object.__getattribute__(self, '_observedcall')(
575 'readlines', *args, **kwargs
576 'readlines', *args, **kwargs
576 )
577 )
577
578
578 def seek(self, *args, **kwargs):
579 def seek(self, *args, **kwargs):
579 return object.__getattribute__(self, '_observedcall')(
580 return object.__getattribute__(self, '_observedcall')(
580 'seek', *args, **kwargs
581 'seek', *args, **kwargs
581 )
582 )
582
583
583 def seekable(self, *args, **kwargs):
584 def seekable(self, *args, **kwargs):
584 return object.__getattribute__(self, '_observedcall')(
585 return object.__getattribute__(self, '_observedcall')(
585 'seekable', *args, **kwargs
586 'seekable', *args, **kwargs
586 )
587 )
587
588
588 def tell(self, *args, **kwargs):
589 def tell(self, *args, **kwargs):
589 return object.__getattribute__(self, '_observedcall')(
590 return object.__getattribute__(self, '_observedcall')(
590 'tell', *args, **kwargs
591 'tell', *args, **kwargs
591 )
592 )
592
593
593 def truncate(self, *args, **kwargs):
594 def truncate(self, *args, **kwargs):
594 return object.__getattribute__(self, '_observedcall')(
595 return object.__getattribute__(self, '_observedcall')(
595 'truncate', *args, **kwargs
596 'truncate', *args, **kwargs
596 )
597 )
597
598
598 def writable(self, *args, **kwargs):
599 def writable(self, *args, **kwargs):
599 return object.__getattribute__(self, '_observedcall')(
600 return object.__getattribute__(self, '_observedcall')(
600 'writable', *args, **kwargs
601 'writable', *args, **kwargs
601 )
602 )
602
603
603 def writelines(self, *args, **kwargs):
604 def writelines(self, *args, **kwargs):
604 return object.__getattribute__(self, '_observedcall')(
605 return object.__getattribute__(self, '_observedcall')(
605 'writelines', *args, **kwargs
606 'writelines', *args, **kwargs
606 )
607 )
607
608
608 def read(self, *args, **kwargs):
609 def read(self, *args, **kwargs):
609 return object.__getattribute__(self, '_observedcall')(
610 return object.__getattribute__(self, '_observedcall')(
610 'read', *args, **kwargs
611 'read', *args, **kwargs
611 )
612 )
612
613
613 def readall(self, *args, **kwargs):
614 def readall(self, *args, **kwargs):
614 return object.__getattribute__(self, '_observedcall')(
615 return object.__getattribute__(self, '_observedcall')(
615 'readall', *args, **kwargs
616 'readall', *args, **kwargs
616 )
617 )
617
618
618 def readinto(self, *args, **kwargs):
619 def readinto(self, *args, **kwargs):
619 return object.__getattribute__(self, '_observedcall')(
620 return object.__getattribute__(self, '_observedcall')(
620 'readinto', *args, **kwargs
621 'readinto', *args, **kwargs
621 )
622 )
622
623
623 def write(self, *args, **kwargs):
624 def write(self, *args, **kwargs):
624 return object.__getattribute__(self, '_observedcall')(
625 return object.__getattribute__(self, '_observedcall')(
625 'write', *args, **kwargs
626 'write', *args, **kwargs
626 )
627 )
627
628
628 def detach(self, *args, **kwargs):
629 def detach(self, *args, **kwargs):
629 return object.__getattribute__(self, '_observedcall')(
630 return object.__getattribute__(self, '_observedcall')(
630 'detach', *args, **kwargs
631 'detach', *args, **kwargs
631 )
632 )
632
633
633 def read1(self, *args, **kwargs):
634 def read1(self, *args, **kwargs):
634 return object.__getattribute__(self, '_observedcall')(
635 return object.__getattribute__(self, '_observedcall')(
635 'read1', *args, **kwargs
636 'read1', *args, **kwargs
636 )
637 )
637
638
638
639
639 class observedbufferedinputpipe(bufferedinputpipe):
640 class observedbufferedinputpipe(bufferedinputpipe):
640 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
641 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
641
642
642 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
643 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
643 bypass ``fileobjectproxy``. Because of this, we need to make
644 bypass ``fileobjectproxy``. Because of this, we need to make
644 ``bufferedinputpipe`` aware of these operations.
645 ``bufferedinputpipe`` aware of these operations.
645
646
646 This variation of ``bufferedinputpipe`` can notify observers about
647 This variation of ``bufferedinputpipe`` can notify observers about
647 ``os.read()`` events. It also re-publishes other events, such as
648 ``os.read()`` events. It also re-publishes other events, such as
648 ``read()`` and ``readline()``.
649 ``read()`` and ``readline()``.
649 """
650 """
650
651
651 def _fillbuffer(self):
652 def _fillbuffer(self):
652 res = super(observedbufferedinputpipe, self)._fillbuffer()
653 res = super(observedbufferedinputpipe, self)._fillbuffer()
653
654
654 fn = getattr(self._input._observer, 'osread', None)
655 fn = getattr(self._input._observer, 'osread', None)
655 if fn:
656 if fn:
656 fn(res, _chunksize)
657 fn(res, _chunksize)
657
658
658 return res
659 return res
659
660
660 # We use different observer methods because the operation isn't
661 # We use different observer methods because the operation isn't
661 # performed on the actual file object but on us.
662 # performed on the actual file object but on us.
662 def read(self, size):
663 def read(self, size):
663 res = super(observedbufferedinputpipe, self).read(size)
664 res = super(observedbufferedinputpipe, self).read(size)
664
665
665 fn = getattr(self._input._observer, 'bufferedread', None)
666 fn = getattr(self._input._observer, 'bufferedread', None)
666 if fn:
667 if fn:
667 fn(res, size)
668 fn(res, size)
668
669
669 return res
670 return res
670
671
671 def readline(self, *args, **kwargs):
672 def readline(self, *args, **kwargs):
672 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
673 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
673
674
674 fn = getattr(self._input._observer, 'bufferedreadline', None)
675 fn = getattr(self._input._observer, 'bufferedreadline', None)
675 if fn:
676 if fn:
676 fn(res)
677 fn(res)
677
678
678 return res
679 return res
679
680
680
681
681 PROXIED_SOCKET_METHODS = {
682 PROXIED_SOCKET_METHODS = {
682 'makefile',
683 'makefile',
683 'recv',
684 'recv',
684 'recvfrom',
685 'recvfrom',
685 'recvfrom_into',
686 'recvfrom_into',
686 'recv_into',
687 'recv_into',
687 'send',
688 'send',
688 'sendall',
689 'sendall',
689 'sendto',
690 'sendto',
690 'setblocking',
691 'setblocking',
691 'settimeout',
692 'settimeout',
692 'gettimeout',
693 'gettimeout',
693 'setsockopt',
694 'setsockopt',
694 }
695 }
695
696
696
697
697 class socketproxy(object):
698 class socketproxy(object):
698 """A proxy around a socket that tells a watcher when events occur.
699 """A proxy around a socket that tells a watcher when events occur.
699
700
700 This is like ``fileobjectproxy`` except for sockets.
701 This is like ``fileobjectproxy`` except for sockets.
701
702
702 This type is intended to only be used for testing purposes. Think hard
703 This type is intended to only be used for testing purposes. Think hard
703 before using it in important code.
704 before using it in important code.
704 """
705 """
705
706
706 __slots__ = (
707 __slots__ = (
707 '_orig',
708 '_orig',
708 '_observer',
709 '_observer',
709 )
710 )
710
711
711 def __init__(self, sock, observer):
712 def __init__(self, sock, observer):
712 object.__setattr__(self, '_orig', sock)
713 object.__setattr__(self, '_orig', sock)
713 object.__setattr__(self, '_observer', observer)
714 object.__setattr__(self, '_observer', observer)
714
715
715 def __getattribute__(self, name):
716 def __getattribute__(self, name):
716 if name in PROXIED_SOCKET_METHODS:
717 if name in PROXIED_SOCKET_METHODS:
717 return object.__getattribute__(self, name)
718 return object.__getattribute__(self, name)
718
719
719 return getattr(object.__getattribute__(self, '_orig'), name)
720 return getattr(object.__getattribute__(self, '_orig'), name)
720
721
721 def __delattr__(self, name):
722 def __delattr__(self, name):
722 return delattr(object.__getattribute__(self, '_orig'), name)
723 return delattr(object.__getattribute__(self, '_orig'), name)
723
724
724 def __setattr__(self, name, value):
725 def __setattr__(self, name, value):
725 return setattr(object.__getattribute__(self, '_orig'), name, value)
726 return setattr(object.__getattribute__(self, '_orig'), name, value)
726
727
727 def __nonzero__(self):
728 def __nonzero__(self):
728 return bool(object.__getattribute__(self, '_orig'))
729 return bool(object.__getattribute__(self, '_orig'))
729
730
730 __bool__ = __nonzero__
731 __bool__ = __nonzero__
731
732
732 def _observedcall(self, name, *args, **kwargs):
733 def _observedcall(self, name, *args, **kwargs):
733 # Call the original object.
734 # Call the original object.
734 orig = object.__getattribute__(self, '_orig')
735 orig = object.__getattribute__(self, '_orig')
735 res = getattr(orig, name)(*args, **kwargs)
736 res = getattr(orig, name)(*args, **kwargs)
736
737
737 # Call a method on the observer of the same name with arguments
738 # Call a method on the observer of the same name with arguments
738 # so it can react, log, etc.
739 # so it can react, log, etc.
739 observer = object.__getattribute__(self, '_observer')
740 observer = object.__getattribute__(self, '_observer')
740 fn = getattr(observer, name, None)
741 fn = getattr(observer, name, None)
741 if fn:
742 if fn:
742 fn(res, *args, **kwargs)
743 fn(res, *args, **kwargs)
743
744
744 return res
745 return res
745
746
746 def makefile(self, *args, **kwargs):
747 def makefile(self, *args, **kwargs):
747 res = object.__getattribute__(self, '_observedcall')(
748 res = object.__getattribute__(self, '_observedcall')(
748 'makefile', *args, **kwargs
749 'makefile', *args, **kwargs
749 )
750 )
750
751
751 # The file object may be used for I/O. So we turn it into a
752 # The file object may be used for I/O. So we turn it into a
752 # proxy using our observer.
753 # proxy using our observer.
753 observer = object.__getattribute__(self, '_observer')
754 observer = object.__getattribute__(self, '_observer')
754 return makeloggingfileobject(
755 return makeloggingfileobject(
755 observer.fh,
756 observer.fh,
756 res,
757 res,
757 observer.name,
758 observer.name,
758 reads=observer.reads,
759 reads=observer.reads,
759 writes=observer.writes,
760 writes=observer.writes,
760 logdata=observer.logdata,
761 logdata=observer.logdata,
761 logdataapis=observer.logdataapis,
762 logdataapis=observer.logdataapis,
762 )
763 )
763
764
764 def recv(self, *args, **kwargs):
765 def recv(self, *args, **kwargs):
765 return object.__getattribute__(self, '_observedcall')(
766 return object.__getattribute__(self, '_observedcall')(
766 'recv', *args, **kwargs
767 'recv', *args, **kwargs
767 )
768 )
768
769
769 def recvfrom(self, *args, **kwargs):
770 def recvfrom(self, *args, **kwargs):
770 return object.__getattribute__(self, '_observedcall')(
771 return object.__getattribute__(self, '_observedcall')(
771 'recvfrom', *args, **kwargs
772 'recvfrom', *args, **kwargs
772 )
773 )
773
774
774 def recvfrom_into(self, *args, **kwargs):
775 def recvfrom_into(self, *args, **kwargs):
775 return object.__getattribute__(self, '_observedcall')(
776 return object.__getattribute__(self, '_observedcall')(
776 'recvfrom_into', *args, **kwargs
777 'recvfrom_into', *args, **kwargs
777 )
778 )
778
779
779 def recv_into(self, *args, **kwargs):
780 def recv_into(self, *args, **kwargs):
780 return object.__getattribute__(self, '_observedcall')(
781 return object.__getattribute__(self, '_observedcall')(
781 'recv_info', *args, **kwargs
782 'recv_info', *args, **kwargs
782 )
783 )
783
784
784 def send(self, *args, **kwargs):
785 def send(self, *args, **kwargs):
785 return object.__getattribute__(self, '_observedcall')(
786 return object.__getattribute__(self, '_observedcall')(
786 'send', *args, **kwargs
787 'send', *args, **kwargs
787 )
788 )
788
789
789 def sendall(self, *args, **kwargs):
790 def sendall(self, *args, **kwargs):
790 return object.__getattribute__(self, '_observedcall')(
791 return object.__getattribute__(self, '_observedcall')(
791 'sendall', *args, **kwargs
792 'sendall', *args, **kwargs
792 )
793 )
793
794
794 def sendto(self, *args, **kwargs):
795 def sendto(self, *args, **kwargs):
795 return object.__getattribute__(self, '_observedcall')(
796 return object.__getattribute__(self, '_observedcall')(
796 'sendto', *args, **kwargs
797 'sendto', *args, **kwargs
797 )
798 )
798
799
799 def setblocking(self, *args, **kwargs):
800 def setblocking(self, *args, **kwargs):
800 return object.__getattribute__(self, '_observedcall')(
801 return object.__getattribute__(self, '_observedcall')(
801 'setblocking', *args, **kwargs
802 'setblocking', *args, **kwargs
802 )
803 )
803
804
804 def settimeout(self, *args, **kwargs):
805 def settimeout(self, *args, **kwargs):
805 return object.__getattribute__(self, '_observedcall')(
806 return object.__getattribute__(self, '_observedcall')(
806 'settimeout', *args, **kwargs
807 'settimeout', *args, **kwargs
807 )
808 )
808
809
809 def gettimeout(self, *args, **kwargs):
810 def gettimeout(self, *args, **kwargs):
810 return object.__getattribute__(self, '_observedcall')(
811 return object.__getattribute__(self, '_observedcall')(
811 'gettimeout', *args, **kwargs
812 'gettimeout', *args, **kwargs
812 )
813 )
813
814
814 def setsockopt(self, *args, **kwargs):
815 def setsockopt(self, *args, **kwargs):
815 return object.__getattribute__(self, '_observedcall')(
816 return object.__getattribute__(self, '_observedcall')(
816 'setsockopt', *args, **kwargs
817 'setsockopt', *args, **kwargs
817 )
818 )
818
819
819
820
820 class baseproxyobserver(object):
821 class baseproxyobserver(object):
821 def __init__(self, fh, name, logdata, logdataapis):
822 def __init__(self, fh, name, logdata, logdataapis):
822 self.fh = fh
823 self.fh = fh
823 self.name = name
824 self.name = name
824 self.logdata = logdata
825 self.logdata = logdata
825 self.logdataapis = logdataapis
826 self.logdataapis = logdataapis
826
827
827 def _writedata(self, data):
828 def _writedata(self, data):
828 if not self.logdata:
829 if not self.logdata:
829 if self.logdataapis:
830 if self.logdataapis:
830 self.fh.write(b'\n')
831 self.fh.write(b'\n')
831 self.fh.flush()
832 self.fh.flush()
832 return
833 return
833
834
834 # Simple case writes all data on a single line.
835 # Simple case writes all data on a single line.
835 if b'\n' not in data:
836 if b'\n' not in data:
836 if self.logdataapis:
837 if self.logdataapis:
837 self.fh.write(b': %s\n' % stringutil.escapestr(data))
838 self.fh.write(b': %s\n' % stringutil.escapestr(data))
838 else:
839 else:
839 self.fh.write(
840 self.fh.write(
840 b'%s> %s\n' % (self.name, stringutil.escapestr(data))
841 b'%s> %s\n' % (self.name, stringutil.escapestr(data))
841 )
842 )
842 self.fh.flush()
843 self.fh.flush()
843 return
844 return
844
845
845 # Data with newlines is written to multiple lines.
846 # Data with newlines is written to multiple lines.
846 if self.logdataapis:
847 if self.logdataapis:
847 self.fh.write(b':\n')
848 self.fh.write(b':\n')
848
849
849 lines = data.splitlines(True)
850 lines = data.splitlines(True)
850 for line in lines:
851 for line in lines:
851 self.fh.write(
852 self.fh.write(
852 b'%s> %s\n' % (self.name, stringutil.escapestr(line))
853 b'%s> %s\n' % (self.name, stringutil.escapestr(line))
853 )
854 )
854 self.fh.flush()
855 self.fh.flush()
855
856
856
857
857 class fileobjectobserver(baseproxyobserver):
858 class fileobjectobserver(baseproxyobserver):
858 """Logs file object activity."""
859 """Logs file object activity."""
859
860
860 def __init__(
861 def __init__(
861 self, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
862 self, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
862 ):
863 ):
863 super(fileobjectobserver, self).__init__(fh, name, logdata, logdataapis)
864 super(fileobjectobserver, self).__init__(fh, name, logdata, logdataapis)
864 self.reads = reads
865 self.reads = reads
865 self.writes = writes
866 self.writes = writes
866
867
867 def read(self, res, size=-1):
868 def read(self, res, size=-1):
868 if not self.reads:
869 if not self.reads:
869 return
870 return
870 # Python 3 can return None from reads at EOF instead of empty strings.
871 # Python 3 can return None from reads at EOF instead of empty strings.
871 if res is None:
872 if res is None:
872 res = b''
873 res = b''
873
874
874 if size == -1 and res == b'':
875 if size == -1 and res == b'':
875 # Suppress pointless read(-1) calls that return
876 # Suppress pointless read(-1) calls that return
876 # nothing. These happen _a lot_ on Python 3, and there
877 # nothing. These happen _a lot_ on Python 3, and there
877 # doesn't seem to be a better workaround to have matching
878 # doesn't seem to be a better workaround to have matching
878 # Python 2 and 3 behavior. :(
879 # Python 2 and 3 behavior. :(
879 return
880 return
880
881
881 if self.logdataapis:
882 if self.logdataapis:
882 self.fh.write(b'%s> read(%d) -> %d' % (self.name, size, len(res)))
883 self.fh.write(b'%s> read(%d) -> %d' % (self.name, size, len(res)))
883
884
884 self._writedata(res)
885 self._writedata(res)
885
886
886 def readline(self, res, limit=-1):
887 def readline(self, res, limit=-1):
887 if not self.reads:
888 if not self.reads:
888 return
889 return
889
890
890 if self.logdataapis:
891 if self.logdataapis:
891 self.fh.write(b'%s> readline() -> %d' % (self.name, len(res)))
892 self.fh.write(b'%s> readline() -> %d' % (self.name, len(res)))
892
893
893 self._writedata(res)
894 self._writedata(res)
894
895
895 def readinto(self, res, dest):
896 def readinto(self, res, dest):
896 if not self.reads:
897 if not self.reads:
897 return
898 return
898
899
899 if self.logdataapis:
900 if self.logdataapis:
900 self.fh.write(
901 self.fh.write(
901 b'%s> readinto(%d) -> %r' % (self.name, len(dest), res)
902 b'%s> readinto(%d) -> %r' % (self.name, len(dest), res)
902 )
903 )
903
904
904 data = dest[0:res] if res is not None else b''
905 data = dest[0:res] if res is not None else b''
905
906
906 # _writedata() uses "in" operator and is confused by memoryview because
907 # _writedata() uses "in" operator and is confused by memoryview because
907 # characters are ints on Python 3.
908 # characters are ints on Python 3.
908 if isinstance(data, memoryview):
909 if isinstance(data, memoryview):
909 data = data.tobytes()
910 data = data.tobytes()
910
911
911 self._writedata(data)
912 self._writedata(data)
912
913
913 def write(self, res, data):
914 def write(self, res, data):
914 if not self.writes:
915 if not self.writes:
915 return
916 return
916
917
917 # Python 2 returns None from some write() calls. Python 3 (reasonably)
918 # Python 2 returns None from some write() calls. Python 3 (reasonably)
918 # returns the integer bytes written.
919 # returns the integer bytes written.
919 if res is None and data:
920 if res is None and data:
920 res = len(data)
921 res = len(data)
921
922
922 if self.logdataapis:
923 if self.logdataapis:
923 self.fh.write(b'%s> write(%d) -> %r' % (self.name, len(data), res))
924 self.fh.write(b'%s> write(%d) -> %r' % (self.name, len(data), res))
924
925
925 self._writedata(data)
926 self._writedata(data)
926
927
927 def flush(self, res):
928 def flush(self, res):
928 if not self.writes:
929 if not self.writes:
929 return
930 return
930
931
931 self.fh.write(b'%s> flush() -> %r\n' % (self.name, res))
932 self.fh.write(b'%s> flush() -> %r\n' % (self.name, res))
932
933
933 # For observedbufferedinputpipe.
934 # For observedbufferedinputpipe.
934 def bufferedread(self, res, size):
935 def bufferedread(self, res, size):
935 if not self.reads:
936 if not self.reads:
936 return
937 return
937
938
938 if self.logdataapis:
939 if self.logdataapis:
939 self.fh.write(
940 self.fh.write(
940 b'%s> bufferedread(%d) -> %d' % (self.name, size, len(res))
941 b'%s> bufferedread(%d) -> %d' % (self.name, size, len(res))
941 )
942 )
942
943
943 self._writedata(res)
944 self._writedata(res)
944
945
945 def bufferedreadline(self, res):
946 def bufferedreadline(self, res):
946 if not self.reads:
947 if not self.reads:
947 return
948 return
948
949
949 if self.logdataapis:
950 if self.logdataapis:
950 self.fh.write(
951 self.fh.write(
951 b'%s> bufferedreadline() -> %d' % (self.name, len(res))
952 b'%s> bufferedreadline() -> %d' % (self.name, len(res))
952 )
953 )
953
954
954 self._writedata(res)
955 self._writedata(res)
955
956
956
957
957 def makeloggingfileobject(
958 def makeloggingfileobject(
958 logh, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
959 logh, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
959 ):
960 ):
960 """Turn a file object into a logging file object."""
961 """Turn a file object into a logging file object."""
961
962
962 observer = fileobjectobserver(
963 observer = fileobjectobserver(
963 logh,
964 logh,
964 name,
965 name,
965 reads=reads,
966 reads=reads,
966 writes=writes,
967 writes=writes,
967 logdata=logdata,
968 logdata=logdata,
968 logdataapis=logdataapis,
969 logdataapis=logdataapis,
969 )
970 )
970 return fileobjectproxy(fh, observer)
971 return fileobjectproxy(fh, observer)
971
972
972
973
973 class socketobserver(baseproxyobserver):
974 class socketobserver(baseproxyobserver):
974 """Logs socket activity."""
975 """Logs socket activity."""
975
976
976 def __init__(
977 def __init__(
977 self,
978 self,
978 fh,
979 fh,
979 name,
980 name,
980 reads=True,
981 reads=True,
981 writes=True,
982 writes=True,
982 states=True,
983 states=True,
983 logdata=False,
984 logdata=False,
984 logdataapis=True,
985 logdataapis=True,
985 ):
986 ):
986 super(socketobserver, self).__init__(fh, name, logdata, logdataapis)
987 super(socketobserver, self).__init__(fh, name, logdata, logdataapis)
987 self.reads = reads
988 self.reads = reads
988 self.writes = writes
989 self.writes = writes
989 self.states = states
990 self.states = states
990
991
991 def makefile(self, res, mode=None, bufsize=None):
992 def makefile(self, res, mode=None, bufsize=None):
992 if not self.states:
993 if not self.states:
993 return
994 return
994
995
995 self.fh.write(b'%s> makefile(%r, %r)\n' % (self.name, mode, bufsize))
996 self.fh.write(b'%s> makefile(%r, %r)\n' % (self.name, mode, bufsize))
996
997
997 def recv(self, res, size, flags=0):
998 def recv(self, res, size, flags=0):
998 if not self.reads:
999 if not self.reads:
999 return
1000 return
1000
1001
1001 if self.logdataapis:
1002 if self.logdataapis:
1002 self.fh.write(
1003 self.fh.write(
1003 b'%s> recv(%d, %d) -> %d' % (self.name, size, flags, len(res))
1004 b'%s> recv(%d, %d) -> %d' % (self.name, size, flags, len(res))
1004 )
1005 )
1005 self._writedata(res)
1006 self._writedata(res)
1006
1007
1007 def recvfrom(self, res, size, flags=0):
1008 def recvfrom(self, res, size, flags=0):
1008 if not self.reads:
1009 if not self.reads:
1009 return
1010 return
1010
1011
1011 if self.logdataapis:
1012 if self.logdataapis:
1012 self.fh.write(
1013 self.fh.write(
1013 b'%s> recvfrom(%d, %d) -> %d'
1014 b'%s> recvfrom(%d, %d) -> %d'
1014 % (self.name, size, flags, len(res[0]))
1015 % (self.name, size, flags, len(res[0]))
1015 )
1016 )
1016
1017
1017 self._writedata(res[0])
1018 self._writedata(res[0])
1018
1019
1019 def recvfrom_into(self, res, buf, size, flags=0):
1020 def recvfrom_into(self, res, buf, size, flags=0):
1020 if not self.reads:
1021 if not self.reads:
1021 return
1022 return
1022
1023
1023 if self.logdataapis:
1024 if self.logdataapis:
1024 self.fh.write(
1025 self.fh.write(
1025 b'%s> recvfrom_into(%d, %d) -> %d'
1026 b'%s> recvfrom_into(%d, %d) -> %d'
1026 % (self.name, size, flags, res[0])
1027 % (self.name, size, flags, res[0])
1027 )
1028 )
1028
1029
1029 self._writedata(buf[0 : res[0]])
1030 self._writedata(buf[0 : res[0]])
1030
1031
1031 def recv_into(self, res, buf, size=0, flags=0):
1032 def recv_into(self, res, buf, size=0, flags=0):
1032 if not self.reads:
1033 if not self.reads:
1033 return
1034 return
1034
1035
1035 if self.logdataapis:
1036 if self.logdataapis:
1036 self.fh.write(
1037 self.fh.write(
1037 b'%s> recv_into(%d, %d) -> %d' % (self.name, size, flags, res)
1038 b'%s> recv_into(%d, %d) -> %d' % (self.name, size, flags, res)
1038 )
1039 )
1039
1040
1040 self._writedata(buf[0:res])
1041 self._writedata(buf[0:res])
1041
1042
1042 def send(self, res, data, flags=0):
1043 def send(self, res, data, flags=0):
1043 if not self.writes:
1044 if not self.writes:
1044 return
1045 return
1045
1046
1046 self.fh.write(
1047 self.fh.write(
1047 b'%s> send(%d, %d) -> %d' % (self.name, len(data), flags, len(res))
1048 b'%s> send(%d, %d) -> %d' % (self.name, len(data), flags, len(res))
1048 )
1049 )
1049 self._writedata(data)
1050 self._writedata(data)
1050
1051
1051 def sendall(self, res, data, flags=0):
1052 def sendall(self, res, data, flags=0):
1052 if not self.writes:
1053 if not self.writes:
1053 return
1054 return
1054
1055
1055 if self.logdataapis:
1056 if self.logdataapis:
1056 # Returns None on success. So don't bother reporting return value.
1057 # Returns None on success. So don't bother reporting return value.
1057 self.fh.write(
1058 self.fh.write(
1058 b'%s> sendall(%d, %d)' % (self.name, len(data), flags)
1059 b'%s> sendall(%d, %d)' % (self.name, len(data), flags)
1059 )
1060 )
1060
1061
1061 self._writedata(data)
1062 self._writedata(data)
1062
1063
1063 def sendto(self, res, data, flagsoraddress, address=None):
1064 def sendto(self, res, data, flagsoraddress, address=None):
1064 if not self.writes:
1065 if not self.writes:
1065 return
1066 return
1066
1067
1067 if address:
1068 if address:
1068 flags = flagsoraddress
1069 flags = flagsoraddress
1069 else:
1070 else:
1070 flags = 0
1071 flags = 0
1071
1072
1072 if self.logdataapis:
1073 if self.logdataapis:
1073 self.fh.write(
1074 self.fh.write(
1074 b'%s> sendto(%d, %d, %r) -> %d'
1075 b'%s> sendto(%d, %d, %r) -> %d'
1075 % (self.name, len(data), flags, address, res)
1076 % (self.name, len(data), flags, address, res)
1076 )
1077 )
1077
1078
1078 self._writedata(data)
1079 self._writedata(data)
1079
1080
1080 def setblocking(self, res, flag):
1081 def setblocking(self, res, flag):
1081 if not self.states:
1082 if not self.states:
1082 return
1083 return
1083
1084
1084 self.fh.write(b'%s> setblocking(%r)\n' % (self.name, flag))
1085 self.fh.write(b'%s> setblocking(%r)\n' % (self.name, flag))
1085
1086
1086 def settimeout(self, res, value):
1087 def settimeout(self, res, value):
1087 if not self.states:
1088 if not self.states:
1088 return
1089 return
1089
1090
1090 self.fh.write(b'%s> settimeout(%r)\n' % (self.name, value))
1091 self.fh.write(b'%s> settimeout(%r)\n' % (self.name, value))
1091
1092
1092 def gettimeout(self, res):
1093 def gettimeout(self, res):
1093 if not self.states:
1094 if not self.states:
1094 return
1095 return
1095
1096
1096 self.fh.write(b'%s> gettimeout() -> %f\n' % (self.name, res))
1097 self.fh.write(b'%s> gettimeout() -> %f\n' % (self.name, res))
1097
1098
1098 def setsockopt(self, res, level, optname, value):
1099 def setsockopt(self, res, level, optname, value):
1099 if not self.states:
1100 if not self.states:
1100 return
1101 return
1101
1102
1102 self.fh.write(
1103 self.fh.write(
1103 b'%s> setsockopt(%r, %r, %r) -> %r\n'
1104 b'%s> setsockopt(%r, %r, %r) -> %r\n'
1104 % (self.name, level, optname, value, res)
1105 % (self.name, level, optname, value, res)
1105 )
1106 )
1106
1107
1107
1108
1108 def makeloggingsocket(
1109 def makeloggingsocket(
1109 logh,
1110 logh,
1110 fh,
1111 fh,
1111 name,
1112 name,
1112 reads=True,
1113 reads=True,
1113 writes=True,
1114 writes=True,
1114 states=True,
1115 states=True,
1115 logdata=False,
1116 logdata=False,
1116 logdataapis=True,
1117 logdataapis=True,
1117 ):
1118 ):
1118 """Turn a socket into a logging socket."""
1119 """Turn a socket into a logging socket."""
1119
1120
1120 observer = socketobserver(
1121 observer = socketobserver(
1121 logh,
1122 logh,
1122 name,
1123 name,
1123 reads=reads,
1124 reads=reads,
1124 writes=writes,
1125 writes=writes,
1125 states=states,
1126 states=states,
1126 logdata=logdata,
1127 logdata=logdata,
1127 logdataapis=logdataapis,
1128 logdataapis=logdataapis,
1128 )
1129 )
1129 return socketproxy(fh, observer)
1130 return socketproxy(fh, observer)
1130
1131
1131
1132
1132 def version():
1133 def version():
1133 """Return version information if available."""
1134 """Return version information if available."""
1134 try:
1135 try:
1135 from . import __version__
1136 from . import __version__
1136
1137
1137 return __version__.version
1138 return __version__.version
1138 except ImportError:
1139 except ImportError:
1139 return b'unknown'
1140 return b'unknown'
1140
1141
1141
1142
1142 def versiontuple(v=None, n=4):
1143 def versiontuple(v=None, n=4):
1143 """Parses a Mercurial version string into an N-tuple.
1144 """Parses a Mercurial version string into an N-tuple.
1144
1145
1145 The version string to be parsed is specified with the ``v`` argument.
1146 The version string to be parsed is specified with the ``v`` argument.
1146 If it isn't defined, the current Mercurial version string will be parsed.
1147 If it isn't defined, the current Mercurial version string will be parsed.
1147
1148
1148 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1149 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1149 returned values:
1150 returned values:
1150
1151
1151 >>> v = b'3.6.1+190-df9b73d2d444'
1152 >>> v = b'3.6.1+190-df9b73d2d444'
1152 >>> versiontuple(v, 2)
1153 >>> versiontuple(v, 2)
1153 (3, 6)
1154 (3, 6)
1154 >>> versiontuple(v, 3)
1155 >>> versiontuple(v, 3)
1155 (3, 6, 1)
1156 (3, 6, 1)
1156 >>> versiontuple(v, 4)
1157 >>> versiontuple(v, 4)
1157 (3, 6, 1, '190-df9b73d2d444')
1158 (3, 6, 1, '190-df9b73d2d444')
1158
1159
1159 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1160 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1160 (3, 6, 1, '190-df9b73d2d444+20151118')
1161 (3, 6, 1, '190-df9b73d2d444+20151118')
1161
1162
1162 >>> v = b'3.6'
1163 >>> v = b'3.6'
1163 >>> versiontuple(v, 2)
1164 >>> versiontuple(v, 2)
1164 (3, 6)
1165 (3, 6)
1165 >>> versiontuple(v, 3)
1166 >>> versiontuple(v, 3)
1166 (3, 6, None)
1167 (3, 6, None)
1167 >>> versiontuple(v, 4)
1168 >>> versiontuple(v, 4)
1168 (3, 6, None, None)
1169 (3, 6, None, None)
1169
1170
1170 >>> v = b'3.9-rc'
1171 >>> v = b'3.9-rc'
1171 >>> versiontuple(v, 2)
1172 >>> versiontuple(v, 2)
1172 (3, 9)
1173 (3, 9)
1173 >>> versiontuple(v, 3)
1174 >>> versiontuple(v, 3)
1174 (3, 9, None)
1175 (3, 9, None)
1175 >>> versiontuple(v, 4)
1176 >>> versiontuple(v, 4)
1176 (3, 9, None, 'rc')
1177 (3, 9, None, 'rc')
1177
1178
1178 >>> v = b'3.9-rc+2-02a8fea4289b'
1179 >>> v = b'3.9-rc+2-02a8fea4289b'
1179 >>> versiontuple(v, 2)
1180 >>> versiontuple(v, 2)
1180 (3, 9)
1181 (3, 9)
1181 >>> versiontuple(v, 3)
1182 >>> versiontuple(v, 3)
1182 (3, 9, None)
1183 (3, 9, None)
1183 >>> versiontuple(v, 4)
1184 >>> versiontuple(v, 4)
1184 (3, 9, None, 'rc+2-02a8fea4289b')
1185 (3, 9, None, 'rc+2-02a8fea4289b')
1185
1186
1186 >>> versiontuple(b'4.6rc0')
1187 >>> versiontuple(b'4.6rc0')
1187 (4, 6, None, 'rc0')
1188 (4, 6, None, 'rc0')
1188 >>> versiontuple(b'4.6rc0+12-425d55e54f98')
1189 >>> versiontuple(b'4.6rc0+12-425d55e54f98')
1189 (4, 6, None, 'rc0+12-425d55e54f98')
1190 (4, 6, None, 'rc0+12-425d55e54f98')
1190 >>> versiontuple(b'.1.2.3')
1191 >>> versiontuple(b'.1.2.3')
1191 (None, None, None, '.1.2.3')
1192 (None, None, None, '.1.2.3')
1192 >>> versiontuple(b'12.34..5')
1193 >>> versiontuple(b'12.34..5')
1193 (12, 34, None, '..5')
1194 (12, 34, None, '..5')
1194 >>> versiontuple(b'1.2.3.4.5.6')
1195 >>> versiontuple(b'1.2.3.4.5.6')
1195 (1, 2, 3, '.4.5.6')
1196 (1, 2, 3, '.4.5.6')
1196 """
1197 """
1197 if not v:
1198 if not v:
1198 v = version()
1199 v = version()
1199 m = remod.match(br'(\d+(?:\.\d+){,2})[+-]?(.*)', v)
1200 m = remod.match(br'(\d+(?:\.\d+){,2})[+-]?(.*)', v)
1200 if not m:
1201 if not m:
1201 vparts, extra = b'', v
1202 vparts, extra = b'', v
1202 elif m.group(2):
1203 elif m.group(2):
1203 vparts, extra = m.groups()
1204 vparts, extra = m.groups()
1204 else:
1205 else:
1205 vparts, extra = m.group(1), None
1206 vparts, extra = m.group(1), None
1206
1207
1207 assert vparts is not None # help pytype
1208 assert vparts is not None # help pytype
1208
1209
1209 vints = []
1210 vints = []
1210 for i in vparts.split(b'.'):
1211 for i in vparts.split(b'.'):
1211 try:
1212 try:
1212 vints.append(int(i))
1213 vints.append(int(i))
1213 except ValueError:
1214 except ValueError:
1214 break
1215 break
1215 # (3, 6) -> (3, 6, None)
1216 # (3, 6) -> (3, 6, None)
1216 while len(vints) < 3:
1217 while len(vints) < 3:
1217 vints.append(None)
1218 vints.append(None)
1218
1219
1219 if n == 2:
1220 if n == 2:
1220 return (vints[0], vints[1])
1221 return (vints[0], vints[1])
1221 if n == 3:
1222 if n == 3:
1222 return (vints[0], vints[1], vints[2])
1223 return (vints[0], vints[1], vints[2])
1223 if n == 4:
1224 if n == 4:
1224 return (vints[0], vints[1], vints[2], extra)
1225 return (vints[0], vints[1], vints[2], extra)
1225
1226
1226
1227
1227 def cachefunc(func):
1228 def cachefunc(func):
1228 '''cache the result of function calls'''
1229 '''cache the result of function calls'''
1229 # XXX doesn't handle keywords args
1230 # XXX doesn't handle keywords args
1230 if func.__code__.co_argcount == 0:
1231 if func.__code__.co_argcount == 0:
1231 listcache = []
1232 listcache = []
1232
1233
1233 def f():
1234 def f():
1234 if len(listcache) == 0:
1235 if len(listcache) == 0:
1235 listcache.append(func())
1236 listcache.append(func())
1236 return listcache[0]
1237 return listcache[0]
1237
1238
1238 return f
1239 return f
1239 cache = {}
1240 cache = {}
1240 if func.__code__.co_argcount == 1:
1241 if func.__code__.co_argcount == 1:
1241 # we gain a small amount of time because
1242 # we gain a small amount of time because
1242 # we don't need to pack/unpack the list
1243 # we don't need to pack/unpack the list
1243 def f(arg):
1244 def f(arg):
1244 if arg not in cache:
1245 if arg not in cache:
1245 cache[arg] = func(arg)
1246 cache[arg] = func(arg)
1246 return cache[arg]
1247 return cache[arg]
1247
1248
1248 else:
1249 else:
1249
1250
1250 def f(*args):
1251 def f(*args):
1251 if args not in cache:
1252 if args not in cache:
1252 cache[args] = func(*args)
1253 cache[args] = func(*args)
1253 return cache[args]
1254 return cache[args]
1254
1255
1255 return f
1256 return f
1256
1257
1257
1258
1258 class cow(object):
1259 class cow(object):
1259 """helper class to make copy-on-write easier
1260 """helper class to make copy-on-write easier
1260
1261
1261 Call preparewrite before doing any writes.
1262 Call preparewrite before doing any writes.
1262 """
1263 """
1263
1264
1264 def preparewrite(self):
1265 def preparewrite(self):
1265 """call this before writes, return self or a copied new object"""
1266 """call this before writes, return self or a copied new object"""
1266 if getattr(self, '_copied', 0):
1267 if getattr(self, '_copied', 0):
1267 self._copied -= 1
1268 self._copied -= 1
1268 # Function cow.__init__ expects 1 arg(s), got 2 [wrong-arg-count]
1269 # Function cow.__init__ expects 1 arg(s), got 2 [wrong-arg-count]
1269 return self.__class__(self) # pytype: disable=wrong-arg-count
1270 return self.__class__(self) # pytype: disable=wrong-arg-count
1270 return self
1271 return self
1271
1272
1272 def copy(self):
1273 def copy(self):
1273 """always do a cheap copy"""
1274 """always do a cheap copy"""
1274 self._copied = getattr(self, '_copied', 0) + 1
1275 self._copied = getattr(self, '_copied', 0) + 1
1275 return self
1276 return self
1276
1277
1277
1278
1278 class sortdict(collections.OrderedDict):
1279 class sortdict(collections.OrderedDict):
1279 """a simple sorted dictionary
1280 """a simple sorted dictionary
1280
1281
1281 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1282 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1282 >>> d2 = d1.copy()
1283 >>> d2 = d1.copy()
1283 >>> d2
1284 >>> d2
1284 sortdict([('a', 0), ('b', 1)])
1285 sortdict([('a', 0), ('b', 1)])
1285 >>> d2.update([(b'a', 2)])
1286 >>> d2.update([(b'a', 2)])
1286 >>> list(d2.keys()) # should still be in last-set order
1287 >>> list(d2.keys()) # should still be in last-set order
1287 ['b', 'a']
1288 ['b', 'a']
1288 >>> d1.insert(1, b'a.5', 0.5)
1289 >>> d1.insert(1, b'a.5', 0.5)
1289 >>> d1
1290 >>> d1
1290 sortdict([('a', 0), ('a.5', 0.5), ('b', 1)])
1291 sortdict([('a', 0), ('a.5', 0.5), ('b', 1)])
1291 """
1292 """
1292
1293
1293 def __setitem__(self, key, value):
1294 def __setitem__(self, key, value):
1294 if key in self:
1295 if key in self:
1295 del self[key]
1296 del self[key]
1296 super(sortdict, self).__setitem__(key, value)
1297 super(sortdict, self).__setitem__(key, value)
1297
1298
1298 if pycompat.ispypy:
1299 if pycompat.ispypy:
1299 # __setitem__() isn't called as of PyPy 5.8.0
1300 # __setitem__() isn't called as of PyPy 5.8.0
1300 def update(self, src, **f):
1301 def update(self, src, **f):
1301 if isinstance(src, dict):
1302 if isinstance(src, dict):
1302 src = pycompat.iteritems(src)
1303 src = pycompat.iteritems(src)
1303 for k, v in src:
1304 for k, v in src:
1304 self[k] = v
1305 self[k] = v
1305 for k in f:
1306 for k in f:
1306 self[k] = f[k]
1307 self[k] = f[k]
1307
1308
1308 def insert(self, position, key, value):
1309 def insert(self, position, key, value):
1309 for (i, (k, v)) in enumerate(list(self.items())):
1310 for (i, (k, v)) in enumerate(list(self.items())):
1310 if i == position:
1311 if i == position:
1311 self[key] = value
1312 self[key] = value
1312 if i >= position:
1313 if i >= position:
1313 del self[k]
1314 del self[k]
1314 self[k] = v
1315 self[k] = v
1315
1316
1316
1317
1317 class cowdict(cow, dict):
1318 class cowdict(cow, dict):
1318 """copy-on-write dict
1319 """copy-on-write dict
1319
1320
1320 Be sure to call d = d.preparewrite() before writing to d.
1321 Be sure to call d = d.preparewrite() before writing to d.
1321
1322
1322 >>> a = cowdict()
1323 >>> a = cowdict()
1323 >>> a is a.preparewrite()
1324 >>> a is a.preparewrite()
1324 True
1325 True
1325 >>> b = a.copy()
1326 >>> b = a.copy()
1326 >>> b is a
1327 >>> b is a
1327 True
1328 True
1328 >>> c = b.copy()
1329 >>> c = b.copy()
1329 >>> c is a
1330 >>> c is a
1330 True
1331 True
1331 >>> a = a.preparewrite()
1332 >>> a = a.preparewrite()
1332 >>> b is a
1333 >>> b is a
1333 False
1334 False
1334 >>> a is a.preparewrite()
1335 >>> a is a.preparewrite()
1335 True
1336 True
1336 >>> c = c.preparewrite()
1337 >>> c = c.preparewrite()
1337 >>> b is c
1338 >>> b is c
1338 False
1339 False
1339 >>> b is b.preparewrite()
1340 >>> b is b.preparewrite()
1340 True
1341 True
1341 """
1342 """
1342
1343
1343
1344
1344 class cowsortdict(cow, sortdict):
1345 class cowsortdict(cow, sortdict):
1345 """copy-on-write sortdict
1346 """copy-on-write sortdict
1346
1347
1347 Be sure to call d = d.preparewrite() before writing to d.
1348 Be sure to call d = d.preparewrite() before writing to d.
1348 """
1349 """
1349
1350
1350
1351
1351 class transactional(object): # pytype: disable=ignored-metaclass
1352 class transactional(object): # pytype: disable=ignored-metaclass
1352 """Base class for making a transactional type into a context manager."""
1353 """Base class for making a transactional type into a context manager."""
1353
1354
1354 __metaclass__ = abc.ABCMeta
1355 __metaclass__ = abc.ABCMeta
1355
1356
1356 @abc.abstractmethod
1357 @abc.abstractmethod
1357 def close(self):
1358 def close(self):
1358 """Successfully closes the transaction."""
1359 """Successfully closes the transaction."""
1359
1360
1360 @abc.abstractmethod
1361 @abc.abstractmethod
1361 def release(self):
1362 def release(self):
1362 """Marks the end of the transaction.
1363 """Marks the end of the transaction.
1363
1364
1364 If the transaction has not been closed, it will be aborted.
1365 If the transaction has not been closed, it will be aborted.
1365 """
1366 """
1366
1367
1367 def __enter__(self):
1368 def __enter__(self):
1368 return self
1369 return self
1369
1370
1370 def __exit__(self, exc_type, exc_val, exc_tb):
1371 def __exit__(self, exc_type, exc_val, exc_tb):
1371 try:
1372 try:
1372 if exc_type is None:
1373 if exc_type is None:
1373 self.close()
1374 self.close()
1374 finally:
1375 finally:
1375 self.release()
1376 self.release()
1376
1377
1377
1378
1378 @contextlib.contextmanager
1379 @contextlib.contextmanager
1379 def acceptintervention(tr=None):
1380 def acceptintervention(tr=None):
1380 """A context manager that closes the transaction on InterventionRequired
1381 """A context manager that closes the transaction on InterventionRequired
1381
1382
1382 If no transaction was provided, this simply runs the body and returns
1383 If no transaction was provided, this simply runs the body and returns
1383 """
1384 """
1384 if not tr:
1385 if not tr:
1385 yield
1386 yield
1386 return
1387 return
1387 try:
1388 try:
1388 yield
1389 yield
1389 tr.close()
1390 tr.close()
1390 except error.InterventionRequired:
1391 except error.InterventionRequired:
1391 tr.close()
1392 tr.close()
1392 raise
1393 raise
1393 finally:
1394 finally:
1394 tr.release()
1395 tr.release()
1395
1396
1396
1397
1397 @contextlib.contextmanager
1398 @contextlib.contextmanager
1398 def nullcontextmanager(enter_result=None):
1399 def nullcontextmanager(enter_result=None):
1399 yield enter_result
1400 yield enter_result
1400
1401
1401
1402
1402 class _lrucachenode(object):
1403 class _lrucachenode(object):
1403 """A node in a doubly linked list.
1404 """A node in a doubly linked list.
1404
1405
1405 Holds a reference to nodes on either side as well as a key-value
1406 Holds a reference to nodes on either side as well as a key-value
1406 pair for the dictionary entry.
1407 pair for the dictionary entry.
1407 """
1408 """
1408
1409
1409 __slots__ = ('next', 'prev', 'key', 'value', 'cost')
1410 __slots__ = ('next', 'prev', 'key', 'value', 'cost')
1410
1411
1411 def __init__(self):
1412 def __init__(self):
1412 self.next = self
1413 self.next = self
1413 self.prev = self
1414 self.prev = self
1414
1415
1415 self.key = _notset
1416 self.key = _notset
1416 self.value = None
1417 self.value = None
1417 self.cost = 0
1418 self.cost = 0
1418
1419
1419 def markempty(self):
1420 def markempty(self):
1420 """Mark the node as emptied."""
1421 """Mark the node as emptied."""
1421 self.key = _notset
1422 self.key = _notset
1422 self.value = None
1423 self.value = None
1423 self.cost = 0
1424 self.cost = 0
1424
1425
1425
1426
1426 class lrucachedict(object):
1427 class lrucachedict(object):
1427 """Dict that caches most recent accesses and sets.
1428 """Dict that caches most recent accesses and sets.
1428
1429
1429 The dict consists of an actual backing dict - indexed by original
1430 The dict consists of an actual backing dict - indexed by original
1430 key - and a doubly linked circular list defining the order of entries in
1431 key - and a doubly linked circular list defining the order of entries in
1431 the cache.
1432 the cache.
1432
1433
1433 The head node is the newest entry in the cache. If the cache is full,
1434 The head node is the newest entry in the cache. If the cache is full,
1434 we recycle head.prev and make it the new head. Cache accesses result in
1435 we recycle head.prev and make it the new head. Cache accesses result in
1435 the node being moved to before the existing head and being marked as the
1436 the node being moved to before the existing head and being marked as the
1436 new head node.
1437 new head node.
1437
1438
1438 Items in the cache can be inserted with an optional "cost" value. This is
1439 Items in the cache can be inserted with an optional "cost" value. This is
1439 simply an integer that is specified by the caller. The cache can be queried
1440 simply an integer that is specified by the caller. The cache can be queried
1440 for the total cost of all items presently in the cache.
1441 for the total cost of all items presently in the cache.
1441
1442
1442 The cache can also define a maximum cost. If a cache insertion would
1443 The cache can also define a maximum cost. If a cache insertion would
1443 cause the total cost of the cache to go beyond the maximum cost limit,
1444 cause the total cost of the cache to go beyond the maximum cost limit,
1444 nodes will be evicted to make room for the new code. This can be used
1445 nodes will be evicted to make room for the new code. This can be used
1445 to e.g. set a max memory limit and associate an estimated bytes size
1446 to e.g. set a max memory limit and associate an estimated bytes size
1446 cost to each item in the cache. By default, no maximum cost is enforced.
1447 cost to each item in the cache. By default, no maximum cost is enforced.
1447 """
1448 """
1448
1449
1449 def __init__(self, max, maxcost=0):
1450 def __init__(self, max, maxcost=0):
1450 self._cache = {}
1451 self._cache = {}
1451
1452
1452 self._head = _lrucachenode()
1453 self._head = _lrucachenode()
1453 self._size = 1
1454 self._size = 1
1454 self.capacity = max
1455 self.capacity = max
1455 self.totalcost = 0
1456 self.totalcost = 0
1456 self.maxcost = maxcost
1457 self.maxcost = maxcost
1457
1458
1458 def __len__(self):
1459 def __len__(self):
1459 return len(self._cache)
1460 return len(self._cache)
1460
1461
1461 def __contains__(self, k):
1462 def __contains__(self, k):
1462 return k in self._cache
1463 return k in self._cache
1463
1464
1464 def __iter__(self):
1465 def __iter__(self):
1465 # We don't have to iterate in cache order, but why not.
1466 # We don't have to iterate in cache order, but why not.
1466 n = self._head
1467 n = self._head
1467 for i in range(len(self._cache)):
1468 for i in range(len(self._cache)):
1468 yield n.key
1469 yield n.key
1469 n = n.next
1470 n = n.next
1470
1471
1471 def __getitem__(self, k):
1472 def __getitem__(self, k):
1472 node = self._cache[k]
1473 node = self._cache[k]
1473 self._movetohead(node)
1474 self._movetohead(node)
1474 return node.value
1475 return node.value
1475
1476
1476 def insert(self, k, v, cost=0):
1477 def insert(self, k, v, cost=0):
1477 """Insert a new item in the cache with optional cost value."""
1478 """Insert a new item in the cache with optional cost value."""
1478 node = self._cache.get(k)
1479 node = self._cache.get(k)
1479 # Replace existing value and mark as newest.
1480 # Replace existing value and mark as newest.
1480 if node is not None:
1481 if node is not None:
1481 self.totalcost -= node.cost
1482 self.totalcost -= node.cost
1482 node.value = v
1483 node.value = v
1483 node.cost = cost
1484 node.cost = cost
1484 self.totalcost += cost
1485 self.totalcost += cost
1485 self._movetohead(node)
1486 self._movetohead(node)
1486
1487
1487 if self.maxcost:
1488 if self.maxcost:
1488 self._enforcecostlimit()
1489 self._enforcecostlimit()
1489
1490
1490 return
1491 return
1491
1492
1492 if self._size < self.capacity:
1493 if self._size < self.capacity:
1493 node = self._addcapacity()
1494 node = self._addcapacity()
1494 else:
1495 else:
1495 # Grab the last/oldest item.
1496 # Grab the last/oldest item.
1496 node = self._head.prev
1497 node = self._head.prev
1497
1498
1498 # At capacity. Kill the old entry.
1499 # At capacity. Kill the old entry.
1499 if node.key is not _notset:
1500 if node.key is not _notset:
1500 self.totalcost -= node.cost
1501 self.totalcost -= node.cost
1501 del self._cache[node.key]
1502 del self._cache[node.key]
1502
1503
1503 node.key = k
1504 node.key = k
1504 node.value = v
1505 node.value = v
1505 node.cost = cost
1506 node.cost = cost
1506 self.totalcost += cost
1507 self.totalcost += cost
1507 self._cache[k] = node
1508 self._cache[k] = node
1508 # And mark it as newest entry. No need to adjust order since it
1509 # And mark it as newest entry. No need to adjust order since it
1509 # is already self._head.prev.
1510 # is already self._head.prev.
1510 self._head = node
1511 self._head = node
1511
1512
1512 if self.maxcost:
1513 if self.maxcost:
1513 self._enforcecostlimit()
1514 self._enforcecostlimit()
1514
1515
1515 def __setitem__(self, k, v):
1516 def __setitem__(self, k, v):
1516 self.insert(k, v)
1517 self.insert(k, v)
1517
1518
1518 def __delitem__(self, k):
1519 def __delitem__(self, k):
1519 self.pop(k)
1520 self.pop(k)
1520
1521
1521 def pop(self, k, default=_notset):
1522 def pop(self, k, default=_notset):
1522 try:
1523 try:
1523 node = self._cache.pop(k)
1524 node = self._cache.pop(k)
1524 except KeyError:
1525 except KeyError:
1525 if default is _notset:
1526 if default is _notset:
1526 raise
1527 raise
1527 return default
1528 return default
1528
1529
1529 assert node is not None # help pytype
1530 assert node is not None # help pytype
1530 value = node.value
1531 value = node.value
1531 self.totalcost -= node.cost
1532 self.totalcost -= node.cost
1532 node.markempty()
1533 node.markempty()
1533
1534
1534 # Temporarily mark as newest item before re-adjusting head to make
1535 # Temporarily mark as newest item before re-adjusting head to make
1535 # this node the oldest item.
1536 # this node the oldest item.
1536 self._movetohead(node)
1537 self._movetohead(node)
1537 self._head = node.next
1538 self._head = node.next
1538
1539
1539 return value
1540 return value
1540
1541
1541 # Additional dict methods.
1542 # Additional dict methods.
1542
1543
1543 def get(self, k, default=None):
1544 def get(self, k, default=None):
1544 try:
1545 try:
1545 return self.__getitem__(k)
1546 return self.__getitem__(k)
1546 except KeyError:
1547 except KeyError:
1547 return default
1548 return default
1548
1549
1549 def peek(self, k, default=_notset):
1550 def peek(self, k, default=_notset):
1550 """Get the specified item without moving it to the head
1551 """Get the specified item without moving it to the head
1551
1552
1552 Unlike get(), this doesn't mutate the internal state. But be aware
1553 Unlike get(), this doesn't mutate the internal state. But be aware
1553 that it doesn't mean peek() is thread safe.
1554 that it doesn't mean peek() is thread safe.
1554 """
1555 """
1555 try:
1556 try:
1556 node = self._cache[k]
1557 node = self._cache[k]
1557 assert node is not None # help pytype
1558 assert node is not None # help pytype
1558 return node.value
1559 return node.value
1559 except KeyError:
1560 except KeyError:
1560 if default is _notset:
1561 if default is _notset:
1561 raise
1562 raise
1562 return default
1563 return default
1563
1564
1564 def clear(self):
1565 def clear(self):
1565 n = self._head
1566 n = self._head
1566 while n.key is not _notset:
1567 while n.key is not _notset:
1567 self.totalcost -= n.cost
1568 self.totalcost -= n.cost
1568 n.markempty()
1569 n.markempty()
1569 n = n.next
1570 n = n.next
1570
1571
1571 self._cache.clear()
1572 self._cache.clear()
1572
1573
1573 def copy(self, capacity=None, maxcost=0):
1574 def copy(self, capacity=None, maxcost=0):
1574 """Create a new cache as a copy of the current one.
1575 """Create a new cache as a copy of the current one.
1575
1576
1576 By default, the new cache has the same capacity as the existing one.
1577 By default, the new cache has the same capacity as the existing one.
1577 But, the cache capacity can be changed as part of performing the
1578 But, the cache capacity can be changed as part of performing the
1578 copy.
1579 copy.
1579
1580
1580 Items in the copy have an insertion/access order matching this
1581 Items in the copy have an insertion/access order matching this
1581 instance.
1582 instance.
1582 """
1583 """
1583
1584
1584 capacity = capacity or self.capacity
1585 capacity = capacity or self.capacity
1585 maxcost = maxcost or self.maxcost
1586 maxcost = maxcost or self.maxcost
1586 result = lrucachedict(capacity, maxcost=maxcost)
1587 result = lrucachedict(capacity, maxcost=maxcost)
1587
1588
1588 # We copy entries by iterating in oldest-to-newest order so the copy
1589 # We copy entries by iterating in oldest-to-newest order so the copy
1589 # has the correct ordering.
1590 # has the correct ordering.
1590
1591
1591 # Find the first non-empty entry.
1592 # Find the first non-empty entry.
1592 n = self._head.prev
1593 n = self._head.prev
1593 while n.key is _notset and n is not self._head:
1594 while n.key is _notset and n is not self._head:
1594 n = n.prev
1595 n = n.prev
1595
1596
1596 # We could potentially skip the first N items when decreasing capacity.
1597 # We could potentially skip the first N items when decreasing capacity.
1597 # But let's keep it simple unless it is a performance problem.
1598 # But let's keep it simple unless it is a performance problem.
1598 for i in range(len(self._cache)):
1599 for i in range(len(self._cache)):
1599 result.insert(n.key, n.value, cost=n.cost)
1600 result.insert(n.key, n.value, cost=n.cost)
1600 n = n.prev
1601 n = n.prev
1601
1602
1602 return result
1603 return result
1603
1604
1604 def popoldest(self):
1605 def popoldest(self):
1605 """Remove the oldest item from the cache.
1606 """Remove the oldest item from the cache.
1606
1607
1607 Returns the (key, value) describing the removed cache entry.
1608 Returns the (key, value) describing the removed cache entry.
1608 """
1609 """
1609 if not self._cache:
1610 if not self._cache:
1610 return
1611 return
1611
1612
1612 # Walk the linked list backwards starting at tail node until we hit
1613 # Walk the linked list backwards starting at tail node until we hit
1613 # a non-empty node.
1614 # a non-empty node.
1614 n = self._head.prev
1615 n = self._head.prev
1615
1616
1616 assert n is not None # help pytype
1617 assert n is not None # help pytype
1617
1618
1618 while n.key is _notset:
1619 while n.key is _notset:
1619 n = n.prev
1620 n = n.prev
1620
1621
1621 assert n is not None # help pytype
1622 assert n is not None # help pytype
1622
1623
1623 key, value = n.key, n.value
1624 key, value = n.key, n.value
1624
1625
1625 # And remove it from the cache and mark it as empty.
1626 # And remove it from the cache and mark it as empty.
1626 del self._cache[n.key]
1627 del self._cache[n.key]
1627 self.totalcost -= n.cost
1628 self.totalcost -= n.cost
1628 n.markempty()
1629 n.markempty()
1629
1630
1630 return key, value
1631 return key, value
1631
1632
1632 def _movetohead(self, node):
1633 def _movetohead(self, node):
1633 """Mark a node as the newest, making it the new head.
1634 """Mark a node as the newest, making it the new head.
1634
1635
1635 When a node is accessed, it becomes the freshest entry in the LRU
1636 When a node is accessed, it becomes the freshest entry in the LRU
1636 list, which is denoted by self._head.
1637 list, which is denoted by self._head.
1637
1638
1638 Visually, let's make ``N`` the new head node (* denotes head):
1639 Visually, let's make ``N`` the new head node (* denotes head):
1639
1640
1640 previous/oldest <-> head <-> next/next newest
1641 previous/oldest <-> head <-> next/next newest
1641
1642
1642 ----<->--- A* ---<->-----
1643 ----<->--- A* ---<->-----
1643 | |
1644 | |
1644 E <-> D <-> N <-> C <-> B
1645 E <-> D <-> N <-> C <-> B
1645
1646
1646 To:
1647 To:
1647
1648
1648 ----<->--- N* ---<->-----
1649 ----<->--- N* ---<->-----
1649 | |
1650 | |
1650 E <-> D <-> C <-> B <-> A
1651 E <-> D <-> C <-> B <-> A
1651
1652
1652 This requires the following moves:
1653 This requires the following moves:
1653
1654
1654 C.next = D (node.prev.next = node.next)
1655 C.next = D (node.prev.next = node.next)
1655 D.prev = C (node.next.prev = node.prev)
1656 D.prev = C (node.next.prev = node.prev)
1656 E.next = N (head.prev.next = node)
1657 E.next = N (head.prev.next = node)
1657 N.prev = E (node.prev = head.prev)
1658 N.prev = E (node.prev = head.prev)
1658 N.next = A (node.next = head)
1659 N.next = A (node.next = head)
1659 A.prev = N (head.prev = node)
1660 A.prev = N (head.prev = node)
1660 """
1661 """
1661 head = self._head
1662 head = self._head
1662 # C.next = D
1663 # C.next = D
1663 node.prev.next = node.next
1664 node.prev.next = node.next
1664 # D.prev = C
1665 # D.prev = C
1665 node.next.prev = node.prev
1666 node.next.prev = node.prev
1666 # N.prev = E
1667 # N.prev = E
1667 node.prev = head.prev
1668 node.prev = head.prev
1668 # N.next = A
1669 # N.next = A
1669 # It is tempting to do just "head" here, however if node is
1670 # It is tempting to do just "head" here, however if node is
1670 # adjacent to head, this will do bad things.
1671 # adjacent to head, this will do bad things.
1671 node.next = head.prev.next
1672 node.next = head.prev.next
1672 # E.next = N
1673 # E.next = N
1673 node.next.prev = node
1674 node.next.prev = node
1674 # A.prev = N
1675 # A.prev = N
1675 node.prev.next = node
1676 node.prev.next = node
1676
1677
1677 self._head = node
1678 self._head = node
1678
1679
1679 def _addcapacity(self):
1680 def _addcapacity(self):
1680 """Add a node to the circular linked list.
1681 """Add a node to the circular linked list.
1681
1682
1682 The new node is inserted before the head node.
1683 The new node is inserted before the head node.
1683 """
1684 """
1684 head = self._head
1685 head = self._head
1685 node = _lrucachenode()
1686 node = _lrucachenode()
1686 head.prev.next = node
1687 head.prev.next = node
1687 node.prev = head.prev
1688 node.prev = head.prev
1688 node.next = head
1689 node.next = head
1689 head.prev = node
1690 head.prev = node
1690 self._size += 1
1691 self._size += 1
1691 return node
1692 return node
1692
1693
1693 def _enforcecostlimit(self):
1694 def _enforcecostlimit(self):
1694 # This should run after an insertion. It should only be called if total
1695 # This should run after an insertion. It should only be called if total
1695 # cost limits are being enforced.
1696 # cost limits are being enforced.
1696 # The most recently inserted node is never evicted.
1697 # The most recently inserted node is never evicted.
1697 if len(self) <= 1 or self.totalcost <= self.maxcost:
1698 if len(self) <= 1 or self.totalcost <= self.maxcost:
1698 return
1699 return
1699
1700
1700 # This is logically equivalent to calling popoldest() until we
1701 # This is logically equivalent to calling popoldest() until we
1701 # free up enough cost. We don't do that since popoldest() needs
1702 # free up enough cost. We don't do that since popoldest() needs
1702 # to walk the linked list and doing this in a loop would be
1703 # to walk the linked list and doing this in a loop would be
1703 # quadratic. So we find the first non-empty node and then
1704 # quadratic. So we find the first non-empty node and then
1704 # walk nodes until we free up enough capacity.
1705 # walk nodes until we free up enough capacity.
1705 #
1706 #
1706 # If we only removed the minimum number of nodes to free enough
1707 # If we only removed the minimum number of nodes to free enough
1707 # cost at insert time, chances are high that the next insert would
1708 # cost at insert time, chances are high that the next insert would
1708 # also require pruning. This would effectively constitute quadratic
1709 # also require pruning. This would effectively constitute quadratic
1709 # behavior for insert-heavy workloads. To mitigate this, we set a
1710 # behavior for insert-heavy workloads. To mitigate this, we set a
1710 # target cost that is a percentage of the max cost. This will tend
1711 # target cost that is a percentage of the max cost. This will tend
1711 # to free more nodes when the high water mark is reached, which
1712 # to free more nodes when the high water mark is reached, which
1712 # lowers the chances of needing to prune on the subsequent insert.
1713 # lowers the chances of needing to prune on the subsequent insert.
1713 targetcost = int(self.maxcost * 0.75)
1714 targetcost = int(self.maxcost * 0.75)
1714
1715
1715 n = self._head.prev
1716 n = self._head.prev
1716 while n.key is _notset:
1717 while n.key is _notset:
1717 n = n.prev
1718 n = n.prev
1718
1719
1719 while len(self) > 1 and self.totalcost > targetcost:
1720 while len(self) > 1 and self.totalcost > targetcost:
1720 del self._cache[n.key]
1721 del self._cache[n.key]
1721 self.totalcost -= n.cost
1722 self.totalcost -= n.cost
1722 n.markempty()
1723 n.markempty()
1723 n = n.prev
1724 n = n.prev
1724
1725
1725
1726
1726 def lrucachefunc(func):
1727 def lrucachefunc(func):
1727 '''cache most recent results of function calls'''
1728 '''cache most recent results of function calls'''
1728 cache = {}
1729 cache = {}
1729 order = collections.deque()
1730 order = collections.deque()
1730 if func.__code__.co_argcount == 1:
1731 if func.__code__.co_argcount == 1:
1731
1732
1732 def f(arg):
1733 def f(arg):
1733 if arg not in cache:
1734 if arg not in cache:
1734 if len(cache) > 20:
1735 if len(cache) > 20:
1735 del cache[order.popleft()]
1736 del cache[order.popleft()]
1736 cache[arg] = func(arg)
1737 cache[arg] = func(arg)
1737 else:
1738 else:
1738 order.remove(arg)
1739 order.remove(arg)
1739 order.append(arg)
1740 order.append(arg)
1740 return cache[arg]
1741 return cache[arg]
1741
1742
1742 else:
1743 else:
1743
1744
1744 def f(*args):
1745 def f(*args):
1745 if args not in cache:
1746 if args not in cache:
1746 if len(cache) > 20:
1747 if len(cache) > 20:
1747 del cache[order.popleft()]
1748 del cache[order.popleft()]
1748 cache[args] = func(*args)
1749 cache[args] = func(*args)
1749 else:
1750 else:
1750 order.remove(args)
1751 order.remove(args)
1751 order.append(args)
1752 order.append(args)
1752 return cache[args]
1753 return cache[args]
1753
1754
1754 return f
1755 return f
1755
1756
1756
1757
1757 class propertycache(object):
1758 class propertycache(object):
1758 def __init__(self, func):
1759 def __init__(self, func):
1759 self.func = func
1760 self.func = func
1760 self.name = func.__name__
1761 self.name = func.__name__
1761
1762
1762 def __get__(self, obj, type=None):
1763 def __get__(self, obj, type=None):
1763 result = self.func(obj)
1764 result = self.func(obj)
1764 self.cachevalue(obj, result)
1765 self.cachevalue(obj, result)
1765 return result
1766 return result
1766
1767
1767 def cachevalue(self, obj, value):
1768 def cachevalue(self, obj, value):
1768 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1769 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1769 obj.__dict__[self.name] = value
1770 obj.__dict__[self.name] = value
1770
1771
1771
1772
1772 def clearcachedproperty(obj, prop):
1773 def clearcachedproperty(obj, prop):
1773 '''clear a cached property value, if one has been set'''
1774 '''clear a cached property value, if one has been set'''
1774 prop = pycompat.sysstr(prop)
1775 prop = pycompat.sysstr(prop)
1775 if prop in obj.__dict__:
1776 if prop in obj.__dict__:
1776 del obj.__dict__[prop]
1777 del obj.__dict__[prop]
1777
1778
1778
1779
1779 def increasingchunks(source, min=1024, max=65536):
1780 def increasingchunks(source, min=1024, max=65536):
1780 """return no less than min bytes per chunk while data remains,
1781 """return no less than min bytes per chunk while data remains,
1781 doubling min after each chunk until it reaches max"""
1782 doubling min after each chunk until it reaches max"""
1782
1783
1783 def log2(x):
1784 def log2(x):
1784 if not x:
1785 if not x:
1785 return 0
1786 return 0
1786 i = 0
1787 i = 0
1787 while x:
1788 while x:
1788 x >>= 1
1789 x >>= 1
1789 i += 1
1790 i += 1
1790 return i - 1
1791 return i - 1
1791
1792
1792 buf = []
1793 buf = []
1793 blen = 0
1794 blen = 0
1794 for chunk in source:
1795 for chunk in source:
1795 buf.append(chunk)
1796 buf.append(chunk)
1796 blen += len(chunk)
1797 blen += len(chunk)
1797 if blen >= min:
1798 if blen >= min:
1798 if min < max:
1799 if min < max:
1799 min = min << 1
1800 min = min << 1
1800 nmin = 1 << log2(blen)
1801 nmin = 1 << log2(blen)
1801 if nmin > min:
1802 if nmin > min:
1802 min = nmin
1803 min = nmin
1803 if min > max:
1804 if min > max:
1804 min = max
1805 min = max
1805 yield b''.join(buf)
1806 yield b''.join(buf)
1806 blen = 0
1807 blen = 0
1807 buf = []
1808 buf = []
1808 if buf:
1809 if buf:
1809 yield b''.join(buf)
1810 yield b''.join(buf)
1810
1811
1811
1812
1812 def always(fn):
1813 def always(fn):
1813 return True
1814 return True
1814
1815
1815
1816
1816 def never(fn):
1817 def never(fn):
1817 return False
1818 return False
1818
1819
1819
1820
1820 def nogc(func):
1821 def nogc(func):
1821 """disable garbage collector
1822 """disable garbage collector
1822
1823
1823 Python's garbage collector triggers a GC each time a certain number of
1824 Python's garbage collector triggers a GC each time a certain number of
1824 container objects (the number being defined by gc.get_threshold()) are
1825 container objects (the number being defined by gc.get_threshold()) are
1825 allocated even when marked not to be tracked by the collector. Tracking has
1826 allocated even when marked not to be tracked by the collector. Tracking has
1826 no effect on when GCs are triggered, only on what objects the GC looks
1827 no effect on when GCs are triggered, only on what objects the GC looks
1827 into. As a workaround, disable GC while building complex (huge)
1828 into. As a workaround, disable GC while building complex (huge)
1828 containers.
1829 containers.
1829
1830
1830 This garbage collector issue have been fixed in 2.7. But it still affect
1831 This garbage collector issue have been fixed in 2.7. But it still affect
1831 CPython's performance.
1832 CPython's performance.
1832 """
1833 """
1833
1834
1834 def wrapper(*args, **kwargs):
1835 def wrapper(*args, **kwargs):
1835 gcenabled = gc.isenabled()
1836 gcenabled = gc.isenabled()
1836 gc.disable()
1837 gc.disable()
1837 try:
1838 try:
1838 return func(*args, **kwargs)
1839 return func(*args, **kwargs)
1839 finally:
1840 finally:
1840 if gcenabled:
1841 if gcenabled:
1841 gc.enable()
1842 gc.enable()
1842
1843
1843 return wrapper
1844 return wrapper
1844
1845
1845
1846
1846 if pycompat.ispypy:
1847 if pycompat.ispypy:
1847 # PyPy runs slower with gc disabled
1848 # PyPy runs slower with gc disabled
1848 nogc = lambda x: x
1849 nogc = lambda x: x
1849
1850
1850
1851
1851 def pathto(root, n1, n2):
1852 def pathto(root, n1, n2):
1852 # type: (bytes, bytes, bytes) -> bytes
1853 # type: (bytes, bytes, bytes) -> bytes
1853 """return the relative path from one place to another.
1854 """return the relative path from one place to another.
1854 root should use os.sep to separate directories
1855 root should use os.sep to separate directories
1855 n1 should use os.sep to separate directories
1856 n1 should use os.sep to separate directories
1856 n2 should use "/" to separate directories
1857 n2 should use "/" to separate directories
1857 returns an os.sep-separated path.
1858 returns an os.sep-separated path.
1858
1859
1859 If n1 is a relative path, it's assumed it's
1860 If n1 is a relative path, it's assumed it's
1860 relative to root.
1861 relative to root.
1861 n2 should always be relative to root.
1862 n2 should always be relative to root.
1862 """
1863 """
1863 if not n1:
1864 if not n1:
1864 return localpath(n2)
1865 return localpath(n2)
1865 if os.path.isabs(n1):
1866 if os.path.isabs(n1):
1866 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1867 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1867 return os.path.join(root, localpath(n2))
1868 return os.path.join(root, localpath(n2))
1868 n2 = b'/'.join((pconvert(root), n2))
1869 n2 = b'/'.join((pconvert(root), n2))
1869 a, b = splitpath(n1), n2.split(b'/')
1870 a, b = splitpath(n1), n2.split(b'/')
1870 a.reverse()
1871 a.reverse()
1871 b.reverse()
1872 b.reverse()
1872 while a and b and a[-1] == b[-1]:
1873 while a and b and a[-1] == b[-1]:
1873 a.pop()
1874 a.pop()
1874 b.pop()
1875 b.pop()
1875 b.reverse()
1876 b.reverse()
1876 return pycompat.ossep.join(([b'..'] * len(a)) + b) or b'.'
1877 return pycompat.ossep.join(([b'..'] * len(a)) + b) or b'.'
1877
1878
1878
1879
1879 def checksignature(func, depth=1):
1880 def checksignature(func, depth=1):
1880 '''wrap a function with code to check for calling errors'''
1881 '''wrap a function with code to check for calling errors'''
1881
1882
1882 def check(*args, **kwargs):
1883 def check(*args, **kwargs):
1883 try:
1884 try:
1884 return func(*args, **kwargs)
1885 return func(*args, **kwargs)
1885 except TypeError:
1886 except TypeError:
1886 if len(traceback.extract_tb(sys.exc_info()[2])) == depth:
1887 if len(traceback.extract_tb(sys.exc_info()[2])) == depth:
1887 raise error.SignatureError
1888 raise error.SignatureError
1888 raise
1889 raise
1889
1890
1890 return check
1891 return check
1891
1892
1892
1893
1893 # a whilelist of known filesystems where hardlink works reliably
1894 # a whilelist of known filesystems where hardlink works reliably
1894 _hardlinkfswhitelist = {
1895 _hardlinkfswhitelist = {
1895 b'apfs',
1896 b'apfs',
1896 b'btrfs',
1897 b'btrfs',
1897 b'ext2',
1898 b'ext2',
1898 b'ext3',
1899 b'ext3',
1899 b'ext4',
1900 b'ext4',
1900 b'hfs',
1901 b'hfs',
1901 b'jfs',
1902 b'jfs',
1902 b'NTFS',
1903 b'NTFS',
1903 b'reiserfs',
1904 b'reiserfs',
1904 b'tmpfs',
1905 b'tmpfs',
1905 b'ufs',
1906 b'ufs',
1906 b'xfs',
1907 b'xfs',
1907 b'zfs',
1908 b'zfs',
1908 }
1909 }
1909
1910
1910
1911
1911 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1912 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1912 """copy a file, preserving mode and optionally other stat info like
1913 """copy a file, preserving mode and optionally other stat info like
1913 atime/mtime
1914 atime/mtime
1914
1915
1915 checkambig argument is used with filestat, and is useful only if
1916 checkambig argument is used with filestat, and is useful only if
1916 destination file is guarded by any lock (e.g. repo.lock or
1917 destination file is guarded by any lock (e.g. repo.lock or
1917 repo.wlock).
1918 repo.wlock).
1918
1919
1919 copystat and checkambig should be exclusive.
1920 copystat and checkambig should be exclusive.
1920 """
1921 """
1921 assert not (copystat and checkambig)
1922 assert not (copystat and checkambig)
1922 oldstat = None
1923 oldstat = None
1923 if os.path.lexists(dest):
1924 if os.path.lexists(dest):
1924 if checkambig:
1925 if checkambig:
1925 oldstat = checkambig and filestat.frompath(dest)
1926 oldstat = checkambig and filestat.frompath(dest)
1926 unlink(dest)
1927 unlink(dest)
1927 if hardlink:
1928 if hardlink:
1928 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1929 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1929 # unless we are confident that dest is on a whitelisted filesystem.
1930 # unless we are confident that dest is on a whitelisted filesystem.
1930 try:
1931 try:
1931 fstype = getfstype(os.path.dirname(dest))
1932 fstype = getfstype(os.path.dirname(dest))
1932 except OSError:
1933 except OSError:
1933 fstype = None
1934 fstype = None
1934 if fstype not in _hardlinkfswhitelist:
1935 if fstype not in _hardlinkfswhitelist:
1935 hardlink = False
1936 hardlink = False
1936 if hardlink:
1937 if hardlink:
1937 try:
1938 try:
1938 oslink(src, dest)
1939 oslink(src, dest)
1939 return
1940 return
1940 except (IOError, OSError):
1941 except (IOError, OSError):
1941 pass # fall back to normal copy
1942 pass # fall back to normal copy
1942 if os.path.islink(src):
1943 if os.path.islink(src):
1943 os.symlink(os.readlink(src), dest)
1944 os.symlink(os.readlink(src), dest)
1944 # copytime is ignored for symlinks, but in general copytime isn't needed
1945 # copytime is ignored for symlinks, but in general copytime isn't needed
1945 # for them anyway
1946 # for them anyway
1946 else:
1947 else:
1947 try:
1948 try:
1948 shutil.copyfile(src, dest)
1949 shutil.copyfile(src, dest)
1949 if copystat:
1950 if copystat:
1950 # copystat also copies mode
1951 # copystat also copies mode
1951 shutil.copystat(src, dest)
1952 shutil.copystat(src, dest)
1952 else:
1953 else:
1953 shutil.copymode(src, dest)
1954 shutil.copymode(src, dest)
1954 if oldstat and oldstat.stat:
1955 if oldstat and oldstat.stat:
1955 newstat = filestat.frompath(dest)
1956 newstat = filestat.frompath(dest)
1956 if newstat.isambig(oldstat):
1957 if newstat.isambig(oldstat):
1957 # stat of copied file is ambiguous to original one
1958 # stat of copied file is ambiguous to original one
1958 advanced = (
1959 advanced = (
1959 oldstat.stat[stat.ST_MTIME] + 1
1960 oldstat.stat[stat.ST_MTIME] + 1
1960 ) & 0x7FFFFFFF
1961 ) & 0x7FFFFFFF
1961 os.utime(dest, (advanced, advanced))
1962 os.utime(dest, (advanced, advanced))
1962 except shutil.Error as inst:
1963 except shutil.Error as inst:
1963 raise error.Abort(stringutil.forcebytestr(inst))
1964 raise error.Abort(stringutil.forcebytestr(inst))
1964
1965
1965
1966
1966 def copyfiles(src, dst, hardlink=None, progress=None):
1967 def copyfiles(src, dst, hardlink=None, progress=None):
1967 """Copy a directory tree using hardlinks if possible."""
1968 """Copy a directory tree using hardlinks if possible."""
1968 num = 0
1969 num = 0
1969
1970
1970 def settopic():
1971 def settopic():
1971 if progress:
1972 if progress:
1972 progress.topic = _(b'linking') if hardlink else _(b'copying')
1973 progress.topic = _(b'linking') if hardlink else _(b'copying')
1973
1974
1974 if os.path.isdir(src):
1975 if os.path.isdir(src):
1975 if hardlink is None:
1976 if hardlink is None:
1976 hardlink = (
1977 hardlink = (
1977 os.stat(src).st_dev == os.stat(os.path.dirname(dst)).st_dev
1978 os.stat(src).st_dev == os.stat(os.path.dirname(dst)).st_dev
1978 )
1979 )
1979 settopic()
1980 settopic()
1980 os.mkdir(dst)
1981 os.mkdir(dst)
1981 for name, kind in listdir(src):
1982 for name, kind in listdir(src):
1982 srcname = os.path.join(src, name)
1983 srcname = os.path.join(src, name)
1983 dstname = os.path.join(dst, name)
1984 dstname = os.path.join(dst, name)
1984 hardlink, n = copyfiles(srcname, dstname, hardlink, progress)
1985 hardlink, n = copyfiles(srcname, dstname, hardlink, progress)
1985 num += n
1986 num += n
1986 else:
1987 else:
1987 if hardlink is None:
1988 if hardlink is None:
1988 hardlink = (
1989 hardlink = (
1989 os.stat(os.path.dirname(src)).st_dev
1990 os.stat(os.path.dirname(src)).st_dev
1990 == os.stat(os.path.dirname(dst)).st_dev
1991 == os.stat(os.path.dirname(dst)).st_dev
1991 )
1992 )
1992 settopic()
1993 settopic()
1993
1994
1994 if hardlink:
1995 if hardlink:
1995 try:
1996 try:
1996 oslink(src, dst)
1997 oslink(src, dst)
1997 except (IOError, OSError):
1998 except (IOError, OSError):
1998 hardlink = False
1999 hardlink = False
1999 shutil.copy(src, dst)
2000 shutil.copy(src, dst)
2000 else:
2001 else:
2001 shutil.copy(src, dst)
2002 shutil.copy(src, dst)
2002 num += 1
2003 num += 1
2003 if progress:
2004 if progress:
2004 progress.increment()
2005 progress.increment()
2005
2006
2006 return hardlink, num
2007 return hardlink, num
2007
2008
2008
2009
2009 _winreservednames = {
2010 _winreservednames = {
2010 b'con',
2011 b'con',
2011 b'prn',
2012 b'prn',
2012 b'aux',
2013 b'aux',
2013 b'nul',
2014 b'nul',
2014 b'com1',
2015 b'com1',
2015 b'com2',
2016 b'com2',
2016 b'com3',
2017 b'com3',
2017 b'com4',
2018 b'com4',
2018 b'com5',
2019 b'com5',
2019 b'com6',
2020 b'com6',
2020 b'com7',
2021 b'com7',
2021 b'com8',
2022 b'com8',
2022 b'com9',
2023 b'com9',
2023 b'lpt1',
2024 b'lpt1',
2024 b'lpt2',
2025 b'lpt2',
2025 b'lpt3',
2026 b'lpt3',
2026 b'lpt4',
2027 b'lpt4',
2027 b'lpt5',
2028 b'lpt5',
2028 b'lpt6',
2029 b'lpt6',
2029 b'lpt7',
2030 b'lpt7',
2030 b'lpt8',
2031 b'lpt8',
2031 b'lpt9',
2032 b'lpt9',
2032 }
2033 }
2033 _winreservedchars = b':*?"<>|'
2034 _winreservedchars = b':*?"<>|'
2034
2035
2035
2036
2036 def checkwinfilename(path):
2037 def checkwinfilename(path):
2037 # type: (bytes) -> Optional[bytes]
2038 # type: (bytes) -> Optional[bytes]
2038 r"""Check that the base-relative path is a valid filename on Windows.
2039 r"""Check that the base-relative path is a valid filename on Windows.
2039 Returns None if the path is ok, or a UI string describing the problem.
2040 Returns None if the path is ok, or a UI string describing the problem.
2040
2041
2041 >>> checkwinfilename(b"just/a/normal/path")
2042 >>> checkwinfilename(b"just/a/normal/path")
2042 >>> checkwinfilename(b"foo/bar/con.xml")
2043 >>> checkwinfilename(b"foo/bar/con.xml")
2043 "filename contains 'con', which is reserved on Windows"
2044 "filename contains 'con', which is reserved on Windows"
2044 >>> checkwinfilename(b"foo/con.xml/bar")
2045 >>> checkwinfilename(b"foo/con.xml/bar")
2045 "filename contains 'con', which is reserved on Windows"
2046 "filename contains 'con', which is reserved on Windows"
2046 >>> checkwinfilename(b"foo/bar/xml.con")
2047 >>> checkwinfilename(b"foo/bar/xml.con")
2047 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
2048 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
2048 "filename contains 'AUX', which is reserved on Windows"
2049 "filename contains 'AUX', which is reserved on Windows"
2049 >>> checkwinfilename(b"foo/bar/bla:.txt")
2050 >>> checkwinfilename(b"foo/bar/bla:.txt")
2050 "filename contains ':', which is reserved on Windows"
2051 "filename contains ':', which is reserved on Windows"
2051 >>> checkwinfilename(b"foo/bar/b\07la.txt")
2052 >>> checkwinfilename(b"foo/bar/b\07la.txt")
2052 "filename contains '\\x07', which is invalid on Windows"
2053 "filename contains '\\x07', which is invalid on Windows"
2053 >>> checkwinfilename(b"foo/bar/bla ")
2054 >>> checkwinfilename(b"foo/bar/bla ")
2054 "filename ends with ' ', which is not allowed on Windows"
2055 "filename ends with ' ', which is not allowed on Windows"
2055 >>> checkwinfilename(b"../bar")
2056 >>> checkwinfilename(b"../bar")
2056 >>> checkwinfilename(b"foo\\")
2057 >>> checkwinfilename(b"foo\\")
2057 "filename ends with '\\', which is invalid on Windows"
2058 "filename ends with '\\', which is invalid on Windows"
2058 >>> checkwinfilename(b"foo\\/bar")
2059 >>> checkwinfilename(b"foo\\/bar")
2059 "directory name ends with '\\', which is invalid on Windows"
2060 "directory name ends with '\\', which is invalid on Windows"
2060 """
2061 """
2061 if path.endswith(b'\\'):
2062 if path.endswith(b'\\'):
2062 return _(b"filename ends with '\\', which is invalid on Windows")
2063 return _(b"filename ends with '\\', which is invalid on Windows")
2063 if b'\\/' in path:
2064 if b'\\/' in path:
2064 return _(b"directory name ends with '\\', which is invalid on Windows")
2065 return _(b"directory name ends with '\\', which is invalid on Windows")
2065 for n in path.replace(b'\\', b'/').split(b'/'):
2066 for n in path.replace(b'\\', b'/').split(b'/'):
2066 if not n:
2067 if not n:
2067 continue
2068 continue
2068 for c in _filenamebytestr(n):
2069 for c in _filenamebytestr(n):
2069 if c in _winreservedchars:
2070 if c in _winreservedchars:
2070 return (
2071 return (
2071 _(
2072 _(
2072 b"filename contains '%s', which is reserved "
2073 b"filename contains '%s', which is reserved "
2073 b"on Windows"
2074 b"on Windows"
2074 )
2075 )
2075 % c
2076 % c
2076 )
2077 )
2077 if ord(c) <= 31:
2078 if ord(c) <= 31:
2078 return _(
2079 return _(
2079 b"filename contains '%s', which is invalid on Windows"
2080 b"filename contains '%s', which is invalid on Windows"
2080 ) % stringutil.escapestr(c)
2081 ) % stringutil.escapestr(c)
2081 base = n.split(b'.')[0]
2082 base = n.split(b'.')[0]
2082 if base and base.lower() in _winreservednames:
2083 if base and base.lower() in _winreservednames:
2083 return (
2084 return (
2084 _(b"filename contains '%s', which is reserved on Windows")
2085 _(b"filename contains '%s', which is reserved on Windows")
2085 % base
2086 % base
2086 )
2087 )
2087 t = n[-1:]
2088 t = n[-1:]
2088 if t in b'. ' and n not in b'..':
2089 if t in b'. ' and n not in b'..':
2089 return (
2090 return (
2090 _(
2091 _(
2091 b"filename ends with '%s', which is not allowed "
2092 b"filename ends with '%s', which is not allowed "
2092 b"on Windows"
2093 b"on Windows"
2093 )
2094 )
2094 % t
2095 % t
2095 )
2096 )
2096
2097
2097
2098
2098 timer = getattr(time, "perf_counter", None)
2099 timer = getattr(time, "perf_counter", None)
2099
2100
2100 if pycompat.iswindows:
2101 if pycompat.iswindows:
2101 checkosfilename = checkwinfilename
2102 checkosfilename = checkwinfilename
2102 if not timer:
2103 if not timer:
2103 timer = time.clock
2104 timer = time.clock
2104 else:
2105 else:
2105 # mercurial.windows doesn't have platform.checkosfilename
2106 # mercurial.windows doesn't have platform.checkosfilename
2106 checkosfilename = platform.checkosfilename # pytype: disable=module-attr
2107 checkosfilename = platform.checkosfilename # pytype: disable=module-attr
2107 if not timer:
2108 if not timer:
2108 timer = time.time
2109 timer = time.time
2109
2110
2110
2111
2111 def makelock(info, pathname):
2112 def makelock(info, pathname):
2112 """Create a lock file atomically if possible
2113 """Create a lock file atomically if possible
2113
2114
2114 This may leave a stale lock file if symlink isn't supported and signal
2115 This may leave a stale lock file if symlink isn't supported and signal
2115 interrupt is enabled.
2116 interrupt is enabled.
2116 """
2117 """
2117 try:
2118 try:
2118 return os.symlink(info, pathname)
2119 return os.symlink(info, pathname)
2119 except OSError as why:
2120 except OSError as why:
2120 if why.errno == errno.EEXIST:
2121 if why.errno == errno.EEXIST:
2121 raise
2122 raise
2122 except AttributeError: # no symlink in os
2123 except AttributeError: # no symlink in os
2123 pass
2124 pass
2124
2125
2125 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
2126 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
2126 ld = os.open(pathname, flags)
2127 ld = os.open(pathname, flags)
2127 os.write(ld, info)
2128 os.write(ld, info)
2128 os.close(ld)
2129 os.close(ld)
2129
2130
2130
2131
2131 def readlock(pathname):
2132 def readlock(pathname):
2132 # type: (bytes) -> bytes
2133 # type: (bytes) -> bytes
2133 try:
2134 try:
2134 return readlink(pathname)
2135 return readlink(pathname)
2135 except OSError as why:
2136 except OSError as why:
2136 if why.errno not in (errno.EINVAL, errno.ENOSYS):
2137 if why.errno not in (errno.EINVAL, errno.ENOSYS):
2137 raise
2138 raise
2138 except AttributeError: # no symlink in os
2139 except AttributeError: # no symlink in os
2139 pass
2140 pass
2140 with posixfile(pathname, b'rb') as fp:
2141 with posixfile(pathname, b'rb') as fp:
2141 return fp.read()
2142 return fp.read()
2142
2143
2143
2144
2144 def fstat(fp):
2145 def fstat(fp):
2145 '''stat file object that may not have fileno method.'''
2146 '''stat file object that may not have fileno method.'''
2146 try:
2147 try:
2147 return os.fstat(fp.fileno())
2148 return os.fstat(fp.fileno())
2148 except AttributeError:
2149 except AttributeError:
2149 return os.stat(fp.name)
2150 return os.stat(fp.name)
2150
2151
2151
2152
2152 # File system features
2153 # File system features
2153
2154
2154
2155
2155 def fscasesensitive(path):
2156 def fscasesensitive(path):
2156 # type: (bytes) -> bool
2157 # type: (bytes) -> bool
2157 """
2158 """
2158 Return true if the given path is on a case-sensitive filesystem
2159 Return true if the given path is on a case-sensitive filesystem
2159
2160
2160 Requires a path (like /foo/.hg) ending with a foldable final
2161 Requires a path (like /foo/.hg) ending with a foldable final
2161 directory component.
2162 directory component.
2162 """
2163 """
2163 s1 = os.lstat(path)
2164 s1 = os.lstat(path)
2164 d, b = os.path.split(path)
2165 d, b = os.path.split(path)
2165 b2 = b.upper()
2166 b2 = b.upper()
2166 if b == b2:
2167 if b == b2:
2167 b2 = b.lower()
2168 b2 = b.lower()
2168 if b == b2:
2169 if b == b2:
2169 return True # no evidence against case sensitivity
2170 return True # no evidence against case sensitivity
2170 p2 = os.path.join(d, b2)
2171 p2 = os.path.join(d, b2)
2171 try:
2172 try:
2172 s2 = os.lstat(p2)
2173 s2 = os.lstat(p2)
2173 if s2 == s1:
2174 if s2 == s1:
2174 return False
2175 return False
2175 return True
2176 return True
2176 except OSError:
2177 except OSError:
2177 return True
2178 return True
2178
2179
2179
2180
2180 _re2_input = lambda x: x
2181 _re2_input = lambda x: x
2181 try:
2182 try:
2182 import re2 # pytype: disable=import-error
2183 import re2 # pytype: disable=import-error
2183
2184
2184 _re2 = None
2185 _re2 = None
2185 except ImportError:
2186 except ImportError:
2186 _re2 = False
2187 _re2 = False
2187
2188
2188
2189
2189 class _re(object):
2190 class _re(object):
2190 def _checkre2(self):
2191 def _checkre2(self):
2191 global _re2
2192 global _re2
2192 global _re2_input
2193 global _re2_input
2193
2194
2194 check_pattern = br'\[([^\[]+)\]'
2195 check_pattern = br'\[([^\[]+)\]'
2195 check_input = b'[ui]'
2196 check_input = b'[ui]'
2196 try:
2197 try:
2197 # check if match works, see issue3964
2198 # check if match works, see issue3964
2198 _re2 = bool(re2.match(check_pattern, check_input))
2199 _re2 = bool(re2.match(check_pattern, check_input))
2199 except ImportError:
2200 except ImportError:
2200 _re2 = False
2201 _re2 = False
2201 except TypeError:
2202 except TypeError:
2202 # the `pyre-2` project provides a re2 module that accept bytes
2203 # the `pyre-2` project provides a re2 module that accept bytes
2203 # the `fb-re2` project provides a re2 module that acccept sysstr
2204 # the `fb-re2` project provides a re2 module that acccept sysstr
2204 check_pattern = pycompat.sysstr(check_pattern)
2205 check_pattern = pycompat.sysstr(check_pattern)
2205 check_input = pycompat.sysstr(check_input)
2206 check_input = pycompat.sysstr(check_input)
2206 _re2 = bool(re2.match(check_pattern, check_input))
2207 _re2 = bool(re2.match(check_pattern, check_input))
2207 _re2_input = pycompat.sysstr
2208 _re2_input = pycompat.sysstr
2208
2209
2209 def compile(self, pat, flags=0):
2210 def compile(self, pat, flags=0):
2210 """Compile a regular expression, using re2 if possible
2211 """Compile a regular expression, using re2 if possible
2211
2212
2212 For best performance, use only re2-compatible regexp features. The
2213 For best performance, use only re2-compatible regexp features. The
2213 only flags from the re module that are re2-compatible are
2214 only flags from the re module that are re2-compatible are
2214 IGNORECASE and MULTILINE."""
2215 IGNORECASE and MULTILINE."""
2215 if _re2 is None:
2216 if _re2 is None:
2216 self._checkre2()
2217 self._checkre2()
2217 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
2218 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
2218 if flags & remod.IGNORECASE:
2219 if flags & remod.IGNORECASE:
2219 pat = b'(?i)' + pat
2220 pat = b'(?i)' + pat
2220 if flags & remod.MULTILINE:
2221 if flags & remod.MULTILINE:
2221 pat = b'(?m)' + pat
2222 pat = b'(?m)' + pat
2222 try:
2223 try:
2223 return re2.compile(_re2_input(pat))
2224 return re2.compile(_re2_input(pat))
2224 except re2.error:
2225 except re2.error:
2225 pass
2226 pass
2226 return remod.compile(pat, flags)
2227 return remod.compile(pat, flags)
2227
2228
2228 @propertycache
2229 @propertycache
2229 def escape(self):
2230 def escape(self):
2230 """Return the version of escape corresponding to self.compile.
2231 """Return the version of escape corresponding to self.compile.
2231
2232
2232 This is imperfect because whether re2 or re is used for a particular
2233 This is imperfect because whether re2 or re is used for a particular
2233 function depends on the flags, etc, but it's the best we can do.
2234 function depends on the flags, etc, but it's the best we can do.
2234 """
2235 """
2235 global _re2
2236 global _re2
2236 if _re2 is None:
2237 if _re2 is None:
2237 self._checkre2()
2238 self._checkre2()
2238 if _re2:
2239 if _re2:
2239 return re2.escape
2240 return re2.escape
2240 else:
2241 else:
2241 return remod.escape
2242 return remod.escape
2242
2243
2243
2244
2244 re = _re()
2245 re = _re()
2245
2246
2246 _fspathcache = {}
2247 _fspathcache = {}
2247
2248
2248
2249
2249 def fspath(name, root):
2250 def fspath(name, root):
2250 # type: (bytes, bytes) -> bytes
2251 # type: (bytes, bytes) -> bytes
2251 """Get name in the case stored in the filesystem
2252 """Get name in the case stored in the filesystem
2252
2253
2253 The name should be relative to root, and be normcase-ed for efficiency.
2254 The name should be relative to root, and be normcase-ed for efficiency.
2254
2255
2255 Note that this function is unnecessary, and should not be
2256 Note that this function is unnecessary, and should not be
2256 called, for case-sensitive filesystems (simply because it's expensive).
2257 called, for case-sensitive filesystems (simply because it's expensive).
2257
2258
2258 The root should be normcase-ed, too.
2259 The root should be normcase-ed, too.
2259 """
2260 """
2260
2261
2261 def _makefspathcacheentry(dir):
2262 def _makefspathcacheentry(dir):
2262 return {normcase(n): n for n in os.listdir(dir)}
2263 return {normcase(n): n for n in os.listdir(dir)}
2263
2264
2264 seps = pycompat.ossep
2265 seps = pycompat.ossep
2265 if pycompat.osaltsep:
2266 if pycompat.osaltsep:
2266 seps = seps + pycompat.osaltsep
2267 seps = seps + pycompat.osaltsep
2267 # Protect backslashes. This gets silly very quickly.
2268 # Protect backslashes. This gets silly very quickly.
2268 seps.replace(b'\\', b'\\\\')
2269 seps.replace(b'\\', b'\\\\')
2269 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
2270 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
2270 dir = os.path.normpath(root)
2271 dir = os.path.normpath(root)
2271 result = []
2272 result = []
2272 for part, sep in pattern.findall(name):
2273 for part, sep in pattern.findall(name):
2273 if sep:
2274 if sep:
2274 result.append(sep)
2275 result.append(sep)
2275 continue
2276 continue
2276
2277
2277 if dir not in _fspathcache:
2278 if dir not in _fspathcache:
2278 _fspathcache[dir] = _makefspathcacheentry(dir)
2279 _fspathcache[dir] = _makefspathcacheentry(dir)
2279 contents = _fspathcache[dir]
2280 contents = _fspathcache[dir]
2280
2281
2281 found = contents.get(part)
2282 found = contents.get(part)
2282 if not found:
2283 if not found:
2283 # retry "once per directory" per "dirstate.walk" which
2284 # retry "once per directory" per "dirstate.walk" which
2284 # may take place for each patches of "hg qpush", for example
2285 # may take place for each patches of "hg qpush", for example
2285 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
2286 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
2286 found = contents.get(part)
2287 found = contents.get(part)
2287
2288
2288 result.append(found or part)
2289 result.append(found or part)
2289 dir = os.path.join(dir, part)
2290 dir = os.path.join(dir, part)
2290
2291
2291 return b''.join(result)
2292 return b''.join(result)
2292
2293
2293
2294
2294 def checknlink(testfile):
2295 def checknlink(testfile):
2295 # type: (bytes) -> bool
2296 # type: (bytes) -> bool
2296 '''check whether hardlink count reporting works properly'''
2297 '''check whether hardlink count reporting works properly'''
2297
2298
2298 # testfile may be open, so we need a separate file for checking to
2299 # testfile may be open, so we need a separate file for checking to
2299 # work around issue2543 (or testfile may get lost on Samba shares)
2300 # work around issue2543 (or testfile may get lost on Samba shares)
2300 f1, f2, fp = None, None, None
2301 f1, f2, fp = None, None, None
2301 try:
2302 try:
2302 fd, f1 = pycompat.mkstemp(
2303 fd, f1 = pycompat.mkstemp(
2303 prefix=b'.%s-' % os.path.basename(testfile),
2304 prefix=b'.%s-' % os.path.basename(testfile),
2304 suffix=b'1~',
2305 suffix=b'1~',
2305 dir=os.path.dirname(testfile),
2306 dir=os.path.dirname(testfile),
2306 )
2307 )
2307 os.close(fd)
2308 os.close(fd)
2308 f2 = b'%s2~' % f1[:-2]
2309 f2 = b'%s2~' % f1[:-2]
2309
2310
2310 oslink(f1, f2)
2311 oslink(f1, f2)
2311 # nlinks() may behave differently for files on Windows shares if
2312 # nlinks() may behave differently for files on Windows shares if
2312 # the file is open.
2313 # the file is open.
2313 fp = posixfile(f2)
2314 fp = posixfile(f2)
2314 return nlinks(f2) > 1
2315 return nlinks(f2) > 1
2315 except OSError:
2316 except OSError:
2316 return False
2317 return False
2317 finally:
2318 finally:
2318 if fp is not None:
2319 if fp is not None:
2319 fp.close()
2320 fp.close()
2320 for f in (f1, f2):
2321 for f in (f1, f2):
2321 try:
2322 try:
2322 if f is not None:
2323 if f is not None:
2323 os.unlink(f)
2324 os.unlink(f)
2324 except OSError:
2325 except OSError:
2325 pass
2326 pass
2326
2327
2327
2328
2328 def endswithsep(path):
2329 def endswithsep(path):
2329 # type: (bytes) -> bool
2330 # type: (bytes) -> bool
2330 '''Check path ends with os.sep or os.altsep.'''
2331 '''Check path ends with os.sep or os.altsep.'''
2331 return bool( # help pytype
2332 return bool( # help pytype
2332 path.endswith(pycompat.ossep)
2333 path.endswith(pycompat.ossep)
2333 or pycompat.osaltsep
2334 or pycompat.osaltsep
2334 and path.endswith(pycompat.osaltsep)
2335 and path.endswith(pycompat.osaltsep)
2335 )
2336 )
2336
2337
2337
2338
2338 def splitpath(path):
2339 def splitpath(path):
2339 # type: (bytes) -> List[bytes]
2340 # type: (bytes) -> List[bytes]
2340 """Split path by os.sep.
2341 """Split path by os.sep.
2341 Note that this function does not use os.altsep because this is
2342 Note that this function does not use os.altsep because this is
2342 an alternative of simple "xxx.split(os.sep)".
2343 an alternative of simple "xxx.split(os.sep)".
2343 It is recommended to use os.path.normpath() before using this
2344 It is recommended to use os.path.normpath() before using this
2344 function if need."""
2345 function if need."""
2345 return path.split(pycompat.ossep)
2346 return path.split(pycompat.ossep)
2346
2347
2347
2348
2348 def mktempcopy(name, emptyok=False, createmode=None, enforcewritable=False):
2349 def mktempcopy(name, emptyok=False, createmode=None, enforcewritable=False):
2349 """Create a temporary file with the same contents from name
2350 """Create a temporary file with the same contents from name
2350
2351
2351 The permission bits are copied from the original file.
2352 The permission bits are copied from the original file.
2352
2353
2353 If the temporary file is going to be truncated immediately, you
2354 If the temporary file is going to be truncated immediately, you
2354 can use emptyok=True as an optimization.
2355 can use emptyok=True as an optimization.
2355
2356
2356 Returns the name of the temporary file.
2357 Returns the name of the temporary file.
2357 """
2358 """
2358 d, fn = os.path.split(name)
2359 d, fn = os.path.split(name)
2359 fd, temp = pycompat.mkstemp(prefix=b'.%s-' % fn, suffix=b'~', dir=d)
2360 fd, temp = pycompat.mkstemp(prefix=b'.%s-' % fn, suffix=b'~', dir=d)
2360 os.close(fd)
2361 os.close(fd)
2361 # Temporary files are created with mode 0600, which is usually not
2362 # Temporary files are created with mode 0600, which is usually not
2362 # what we want. If the original file already exists, just copy
2363 # what we want. If the original file already exists, just copy
2363 # its mode. Otherwise, manually obey umask.
2364 # its mode. Otherwise, manually obey umask.
2364 copymode(name, temp, createmode, enforcewritable)
2365 copymode(name, temp, createmode, enforcewritable)
2365
2366
2366 if emptyok:
2367 if emptyok:
2367 return temp
2368 return temp
2368 try:
2369 try:
2369 try:
2370 try:
2370 ifp = posixfile(name, b"rb")
2371 ifp = posixfile(name, b"rb")
2371 except IOError as inst:
2372 except IOError as inst:
2372 if inst.errno == errno.ENOENT:
2373 if inst.errno == errno.ENOENT:
2373 return temp
2374 return temp
2374 if not getattr(inst, 'filename', None):
2375 if not getattr(inst, 'filename', None):
2375 inst.filename = name
2376 inst.filename = name
2376 raise
2377 raise
2377 ofp = posixfile(temp, b"wb")
2378 ofp = posixfile(temp, b"wb")
2378 for chunk in filechunkiter(ifp):
2379 for chunk in filechunkiter(ifp):
2379 ofp.write(chunk)
2380 ofp.write(chunk)
2380 ifp.close()
2381 ifp.close()
2381 ofp.close()
2382 ofp.close()
2382 except: # re-raises
2383 except: # re-raises
2383 try:
2384 try:
2384 os.unlink(temp)
2385 os.unlink(temp)
2385 except OSError:
2386 except OSError:
2386 pass
2387 pass
2387 raise
2388 raise
2388 return temp
2389 return temp
2389
2390
2390
2391
2391 class filestat(object):
2392 class filestat(object):
2392 """help to exactly detect change of a file
2393 """help to exactly detect change of a file
2393
2394
2394 'stat' attribute is result of 'os.stat()' if specified 'path'
2395 'stat' attribute is result of 'os.stat()' if specified 'path'
2395 exists. Otherwise, it is None. This can avoid preparative
2396 exists. Otherwise, it is None. This can avoid preparative
2396 'exists()' examination on client side of this class.
2397 'exists()' examination on client side of this class.
2397 """
2398 """
2398
2399
2399 def __init__(self, stat):
2400 def __init__(self, stat):
2400 self.stat = stat
2401 self.stat = stat
2401
2402
2402 @classmethod
2403 @classmethod
2403 def frompath(cls, path):
2404 def frompath(cls, path):
2404 try:
2405 try:
2405 stat = os.stat(path)
2406 stat = os.stat(path)
2406 except OSError as err:
2407 except OSError as err:
2407 if err.errno != errno.ENOENT:
2408 if err.errno != errno.ENOENT:
2408 raise
2409 raise
2409 stat = None
2410 stat = None
2410 return cls(stat)
2411 return cls(stat)
2411
2412
2412 @classmethod
2413 @classmethod
2413 def fromfp(cls, fp):
2414 def fromfp(cls, fp):
2414 stat = os.fstat(fp.fileno())
2415 stat = os.fstat(fp.fileno())
2415 return cls(stat)
2416 return cls(stat)
2416
2417
2417 __hash__ = object.__hash__
2418 __hash__ = object.__hash__
2418
2419
2419 def __eq__(self, old):
2420 def __eq__(self, old):
2420 try:
2421 try:
2421 # if ambiguity between stat of new and old file is
2422 # if ambiguity between stat of new and old file is
2422 # avoided, comparison of size, ctime and mtime is enough
2423 # avoided, comparison of size, ctime and mtime is enough
2423 # to exactly detect change of a file regardless of platform
2424 # to exactly detect change of a file regardless of platform
2424 return (
2425 return (
2425 self.stat.st_size == old.stat.st_size
2426 self.stat.st_size == old.stat.st_size
2426 and self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2427 and self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2427 and self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME]
2428 and self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME]
2428 )
2429 )
2429 except AttributeError:
2430 except AttributeError:
2430 pass
2431 pass
2431 try:
2432 try:
2432 return self.stat is None and old.stat is None
2433 return self.stat is None and old.stat is None
2433 except AttributeError:
2434 except AttributeError:
2434 return False
2435 return False
2435
2436
2436 def isambig(self, old):
2437 def isambig(self, old):
2437 """Examine whether new (= self) stat is ambiguous against old one
2438 """Examine whether new (= self) stat is ambiguous against old one
2438
2439
2439 "S[N]" below means stat of a file at N-th change:
2440 "S[N]" below means stat of a file at N-th change:
2440
2441
2441 - S[n-1].ctime < S[n].ctime: can detect change of a file
2442 - S[n-1].ctime < S[n].ctime: can detect change of a file
2442 - S[n-1].ctime == S[n].ctime
2443 - S[n-1].ctime == S[n].ctime
2443 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2444 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2444 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2445 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2445 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2446 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2446 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2447 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2447
2448
2448 Case (*2) above means that a file was changed twice or more at
2449 Case (*2) above means that a file was changed twice or more at
2449 same time in sec (= S[n-1].ctime), and comparison of timestamp
2450 same time in sec (= S[n-1].ctime), and comparison of timestamp
2450 is ambiguous.
2451 is ambiguous.
2451
2452
2452 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2453 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2453 timestamp is ambiguous".
2454 timestamp is ambiguous".
2454
2455
2455 But advancing mtime only in case (*2) doesn't work as
2456 But advancing mtime only in case (*2) doesn't work as
2456 expected, because naturally advanced S[n].mtime in case (*1)
2457 expected, because naturally advanced S[n].mtime in case (*1)
2457 might be equal to manually advanced S[n-1 or earlier].mtime.
2458 might be equal to manually advanced S[n-1 or earlier].mtime.
2458
2459
2459 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2460 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2460 treated as ambiguous regardless of mtime, to avoid overlooking
2461 treated as ambiguous regardless of mtime, to avoid overlooking
2461 by confliction between such mtime.
2462 by confliction between such mtime.
2462
2463
2463 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2464 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2464 S[n].mtime", even if size of a file isn't changed.
2465 S[n].mtime", even if size of a file isn't changed.
2465 """
2466 """
2466 try:
2467 try:
2467 return self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2468 return self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2468 except AttributeError:
2469 except AttributeError:
2469 return False
2470 return False
2470
2471
2471 def avoidambig(self, path, old):
2472 def avoidambig(self, path, old):
2472 """Change file stat of specified path to avoid ambiguity
2473 """Change file stat of specified path to avoid ambiguity
2473
2474
2474 'old' should be previous filestat of 'path'.
2475 'old' should be previous filestat of 'path'.
2475
2476
2476 This skips avoiding ambiguity, if a process doesn't have
2477 This skips avoiding ambiguity, if a process doesn't have
2477 appropriate privileges for 'path'. This returns False in this
2478 appropriate privileges for 'path'. This returns False in this
2478 case.
2479 case.
2479
2480
2480 Otherwise, this returns True, as "ambiguity is avoided".
2481 Otherwise, this returns True, as "ambiguity is avoided".
2481 """
2482 """
2482 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2483 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2483 try:
2484 try:
2484 os.utime(path, (advanced, advanced))
2485 os.utime(path, (advanced, advanced))
2485 except OSError as inst:
2486 except OSError as inst:
2486 if inst.errno == errno.EPERM:
2487 if inst.errno == errno.EPERM:
2487 # utime() on the file created by another user causes EPERM,
2488 # utime() on the file created by another user causes EPERM,
2488 # if a process doesn't have appropriate privileges
2489 # if a process doesn't have appropriate privileges
2489 return False
2490 return False
2490 raise
2491 raise
2491 return True
2492 return True
2492
2493
2493 def __ne__(self, other):
2494 def __ne__(self, other):
2494 return not self == other
2495 return not self == other
2495
2496
2496
2497
2497 class atomictempfile(object):
2498 class atomictempfile(object):
2498 """writable file object that atomically updates a file
2499 """writable file object that atomically updates a file
2499
2500
2500 All writes will go to a temporary copy of the original file. Call
2501 All writes will go to a temporary copy of the original file. Call
2501 close() when you are done writing, and atomictempfile will rename
2502 close() when you are done writing, and atomictempfile will rename
2502 the temporary copy to the original name, making the changes
2503 the temporary copy to the original name, making the changes
2503 visible. If the object is destroyed without being closed, all your
2504 visible. If the object is destroyed without being closed, all your
2504 writes are discarded.
2505 writes are discarded.
2505
2506
2506 checkambig argument of constructor is used with filestat, and is
2507 checkambig argument of constructor is used with filestat, and is
2507 useful only if target file is guarded by any lock (e.g. repo.lock
2508 useful only if target file is guarded by any lock (e.g. repo.lock
2508 or repo.wlock).
2509 or repo.wlock).
2509 """
2510 """
2510
2511
2511 def __init__(self, name, mode=b'w+b', createmode=None, checkambig=False):
2512 def __init__(self, name, mode=b'w+b', createmode=None, checkambig=False):
2512 self.__name = name # permanent name
2513 self.__name = name # permanent name
2513 self._tempname = mktempcopy(
2514 self._tempname = mktempcopy(
2514 name,
2515 name,
2515 emptyok=(b'w' in mode),
2516 emptyok=(b'w' in mode),
2516 createmode=createmode,
2517 createmode=createmode,
2517 enforcewritable=(b'w' in mode),
2518 enforcewritable=(b'w' in mode),
2518 )
2519 )
2519
2520
2520 self._fp = posixfile(self._tempname, mode)
2521 self._fp = posixfile(self._tempname, mode)
2521 self._checkambig = checkambig
2522 self._checkambig = checkambig
2522
2523
2523 # delegated methods
2524 # delegated methods
2524 self.read = self._fp.read
2525 self.read = self._fp.read
2525 self.write = self._fp.write
2526 self.write = self._fp.write
2526 self.seek = self._fp.seek
2527 self.seek = self._fp.seek
2527 self.tell = self._fp.tell
2528 self.tell = self._fp.tell
2528 self.fileno = self._fp.fileno
2529 self.fileno = self._fp.fileno
2529
2530
2530 def close(self):
2531 def close(self):
2531 if not self._fp.closed:
2532 if not self._fp.closed:
2532 self._fp.close()
2533 self._fp.close()
2533 filename = localpath(self.__name)
2534 filename = localpath(self.__name)
2534 oldstat = self._checkambig and filestat.frompath(filename)
2535 oldstat = self._checkambig and filestat.frompath(filename)
2535 if oldstat and oldstat.stat:
2536 if oldstat and oldstat.stat:
2536 rename(self._tempname, filename)
2537 rename(self._tempname, filename)
2537 newstat = filestat.frompath(filename)
2538 newstat = filestat.frompath(filename)
2538 if newstat.isambig(oldstat):
2539 if newstat.isambig(oldstat):
2539 # stat of changed file is ambiguous to original one
2540 # stat of changed file is ambiguous to original one
2540 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2541 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2541 os.utime(filename, (advanced, advanced))
2542 os.utime(filename, (advanced, advanced))
2542 else:
2543 else:
2543 rename(self._tempname, filename)
2544 rename(self._tempname, filename)
2544
2545
2545 def discard(self):
2546 def discard(self):
2546 if not self._fp.closed:
2547 if not self._fp.closed:
2547 try:
2548 try:
2548 os.unlink(self._tempname)
2549 os.unlink(self._tempname)
2549 except OSError:
2550 except OSError:
2550 pass
2551 pass
2551 self._fp.close()
2552 self._fp.close()
2552
2553
2553 def __del__(self):
2554 def __del__(self):
2554 if safehasattr(self, '_fp'): # constructor actually did something
2555 if safehasattr(self, '_fp'): # constructor actually did something
2555 self.discard()
2556 self.discard()
2556
2557
2557 def __enter__(self):
2558 def __enter__(self):
2558 return self
2559 return self
2559
2560
2560 def __exit__(self, exctype, excvalue, traceback):
2561 def __exit__(self, exctype, excvalue, traceback):
2561 if exctype is not None:
2562 if exctype is not None:
2562 self.discard()
2563 self.discard()
2563 else:
2564 else:
2564 self.close()
2565 self.close()
2565
2566
2566
2567
2567 def unlinkpath(f, ignoremissing=False, rmdir=True):
2568 def unlinkpath(f, ignoremissing=False, rmdir=True):
2568 # type: (bytes, bool, bool) -> None
2569 # type: (bytes, bool, bool) -> None
2569 """unlink and remove the directory if it is empty"""
2570 """unlink and remove the directory if it is empty"""
2570 if ignoremissing:
2571 if ignoremissing:
2571 tryunlink(f)
2572 tryunlink(f)
2572 else:
2573 else:
2573 unlink(f)
2574 unlink(f)
2574 if rmdir:
2575 if rmdir:
2575 # try removing directories that might now be empty
2576 # try removing directories that might now be empty
2576 try:
2577 try:
2577 removedirs(os.path.dirname(f))
2578 removedirs(os.path.dirname(f))
2578 except OSError:
2579 except OSError:
2579 pass
2580 pass
2580
2581
2581
2582
2582 def tryunlink(f):
2583 def tryunlink(f):
2583 # type: (bytes) -> None
2584 # type: (bytes) -> None
2584 """Attempt to remove a file, ignoring ENOENT errors."""
2585 """Attempt to remove a file, ignoring ENOENT errors."""
2585 try:
2586 try:
2586 unlink(f)
2587 unlink(f)
2587 except OSError as e:
2588 except OSError as e:
2588 if e.errno != errno.ENOENT:
2589 if e.errno != errno.ENOENT:
2589 raise
2590 raise
2590
2591
2591
2592
2592 def makedirs(name, mode=None, notindexed=False):
2593 def makedirs(name, mode=None, notindexed=False):
2593 # type: (bytes, Optional[int], bool) -> None
2594 # type: (bytes, Optional[int], bool) -> None
2594 """recursive directory creation with parent mode inheritance
2595 """recursive directory creation with parent mode inheritance
2595
2596
2596 Newly created directories are marked as "not to be indexed by
2597 Newly created directories are marked as "not to be indexed by
2597 the content indexing service", if ``notindexed`` is specified
2598 the content indexing service", if ``notindexed`` is specified
2598 for "write" mode access.
2599 for "write" mode access.
2599 """
2600 """
2600 try:
2601 try:
2601 makedir(name, notindexed)
2602 makedir(name, notindexed)
2602 except OSError as err:
2603 except OSError as err:
2603 if err.errno == errno.EEXIST:
2604 if err.errno == errno.EEXIST:
2604 return
2605 return
2605 if err.errno != errno.ENOENT or not name:
2606 if err.errno != errno.ENOENT or not name:
2606 raise
2607 raise
2607 parent = os.path.dirname(os.path.abspath(name))
2608 parent = os.path.dirname(os.path.abspath(name))
2608 if parent == name:
2609 if parent == name:
2609 raise
2610 raise
2610 makedirs(parent, mode, notindexed)
2611 makedirs(parent, mode, notindexed)
2611 try:
2612 try:
2612 makedir(name, notindexed)
2613 makedir(name, notindexed)
2613 except OSError as err:
2614 except OSError as err:
2614 # Catch EEXIST to handle races
2615 # Catch EEXIST to handle races
2615 if err.errno == errno.EEXIST:
2616 if err.errno == errno.EEXIST:
2616 return
2617 return
2617 raise
2618 raise
2618 if mode is not None:
2619 if mode is not None:
2619 os.chmod(name, mode)
2620 os.chmod(name, mode)
2620
2621
2621
2622
2622 def readfile(path):
2623 def readfile(path):
2623 # type: (bytes) -> bytes
2624 # type: (bytes) -> bytes
2624 with open(path, b'rb') as fp:
2625 with open(path, b'rb') as fp:
2625 return fp.read()
2626 return fp.read()
2626
2627
2627
2628
2628 def writefile(path, text):
2629 def writefile(path, text):
2629 # type: (bytes, bytes) -> None
2630 # type: (bytes, bytes) -> None
2630 with open(path, b'wb') as fp:
2631 with open(path, b'wb') as fp:
2631 fp.write(text)
2632 fp.write(text)
2632
2633
2633
2634
2634 def appendfile(path, text):
2635 def appendfile(path, text):
2635 # type: (bytes, bytes) -> None
2636 # type: (bytes, bytes) -> None
2636 with open(path, b'ab') as fp:
2637 with open(path, b'ab') as fp:
2637 fp.write(text)
2638 fp.write(text)
2638
2639
2639
2640
2640 class chunkbuffer(object):
2641 class chunkbuffer(object):
2641 """Allow arbitrary sized chunks of data to be efficiently read from an
2642 """Allow arbitrary sized chunks of data to be efficiently read from an
2642 iterator over chunks of arbitrary size."""
2643 iterator over chunks of arbitrary size."""
2643
2644
2644 def __init__(self, in_iter):
2645 def __init__(self, in_iter):
2645 """in_iter is the iterator that's iterating over the input chunks."""
2646 """in_iter is the iterator that's iterating over the input chunks."""
2646
2647
2647 def splitbig(chunks):
2648 def splitbig(chunks):
2648 for chunk in chunks:
2649 for chunk in chunks:
2649 if len(chunk) > 2 ** 20:
2650 if len(chunk) > 2 ** 20:
2650 pos = 0
2651 pos = 0
2651 while pos < len(chunk):
2652 while pos < len(chunk):
2652 end = pos + 2 ** 18
2653 end = pos + 2 ** 18
2653 yield chunk[pos:end]
2654 yield chunk[pos:end]
2654 pos = end
2655 pos = end
2655 else:
2656 else:
2656 yield chunk
2657 yield chunk
2657
2658
2658 self.iter = splitbig(in_iter)
2659 self.iter = splitbig(in_iter)
2659 self._queue = collections.deque()
2660 self._queue = collections.deque()
2660 self._chunkoffset = 0
2661 self._chunkoffset = 0
2661
2662
2662 def read(self, l=None):
2663 def read(self, l=None):
2663 """Read L bytes of data from the iterator of chunks of data.
2664 """Read L bytes of data from the iterator of chunks of data.
2664 Returns less than L bytes if the iterator runs dry.
2665 Returns less than L bytes if the iterator runs dry.
2665
2666
2666 If size parameter is omitted, read everything"""
2667 If size parameter is omitted, read everything"""
2667 if l is None:
2668 if l is None:
2668 return b''.join(self.iter)
2669 return b''.join(self.iter)
2669
2670
2670 left = l
2671 left = l
2671 buf = []
2672 buf = []
2672 queue = self._queue
2673 queue = self._queue
2673 while left > 0:
2674 while left > 0:
2674 # refill the queue
2675 # refill the queue
2675 if not queue:
2676 if not queue:
2676 target = 2 ** 18
2677 target = 2 ** 18
2677 for chunk in self.iter:
2678 for chunk in self.iter:
2678 queue.append(chunk)
2679 queue.append(chunk)
2679 target -= len(chunk)
2680 target -= len(chunk)
2680 if target <= 0:
2681 if target <= 0:
2681 break
2682 break
2682 if not queue:
2683 if not queue:
2683 break
2684 break
2684
2685
2685 # The easy way to do this would be to queue.popleft(), modify the
2686 # The easy way to do this would be to queue.popleft(), modify the
2686 # chunk (if necessary), then queue.appendleft(). However, for cases
2687 # chunk (if necessary), then queue.appendleft(). However, for cases
2687 # where we read partial chunk content, this incurs 2 dequeue
2688 # where we read partial chunk content, this incurs 2 dequeue
2688 # mutations and creates a new str for the remaining chunk in the
2689 # mutations and creates a new str for the remaining chunk in the
2689 # queue. Our code below avoids this overhead.
2690 # queue. Our code below avoids this overhead.
2690
2691
2691 chunk = queue[0]
2692 chunk = queue[0]
2692 chunkl = len(chunk)
2693 chunkl = len(chunk)
2693 offset = self._chunkoffset
2694 offset = self._chunkoffset
2694
2695
2695 # Use full chunk.
2696 # Use full chunk.
2696 if offset == 0 and left >= chunkl:
2697 if offset == 0 and left >= chunkl:
2697 left -= chunkl
2698 left -= chunkl
2698 queue.popleft()
2699 queue.popleft()
2699 buf.append(chunk)
2700 buf.append(chunk)
2700 # self._chunkoffset remains at 0.
2701 # self._chunkoffset remains at 0.
2701 continue
2702 continue
2702
2703
2703 chunkremaining = chunkl - offset
2704 chunkremaining = chunkl - offset
2704
2705
2705 # Use all of unconsumed part of chunk.
2706 # Use all of unconsumed part of chunk.
2706 if left >= chunkremaining:
2707 if left >= chunkremaining:
2707 left -= chunkremaining
2708 left -= chunkremaining
2708 queue.popleft()
2709 queue.popleft()
2709 # offset == 0 is enabled by block above, so this won't merely
2710 # offset == 0 is enabled by block above, so this won't merely
2710 # copy via ``chunk[0:]``.
2711 # copy via ``chunk[0:]``.
2711 buf.append(chunk[offset:])
2712 buf.append(chunk[offset:])
2712 self._chunkoffset = 0
2713 self._chunkoffset = 0
2713
2714
2714 # Partial chunk needed.
2715 # Partial chunk needed.
2715 else:
2716 else:
2716 buf.append(chunk[offset : offset + left])
2717 buf.append(chunk[offset : offset + left])
2717 self._chunkoffset += left
2718 self._chunkoffset += left
2718 left -= chunkremaining
2719 left -= chunkremaining
2719
2720
2720 return b''.join(buf)
2721 return b''.join(buf)
2721
2722
2722
2723
2723 def filechunkiter(f, size=131072, limit=None):
2724 def filechunkiter(f, size=131072, limit=None):
2724 """Create a generator that produces the data in the file size
2725 """Create a generator that produces the data in the file size
2725 (default 131072) bytes at a time, up to optional limit (default is
2726 (default 131072) bytes at a time, up to optional limit (default is
2726 to read all data). Chunks may be less than size bytes if the
2727 to read all data). Chunks may be less than size bytes if the
2727 chunk is the last chunk in the file, or the file is a socket or
2728 chunk is the last chunk in the file, or the file is a socket or
2728 some other type of file that sometimes reads less data than is
2729 some other type of file that sometimes reads less data than is
2729 requested."""
2730 requested."""
2730 assert size >= 0
2731 assert size >= 0
2731 assert limit is None or limit >= 0
2732 assert limit is None or limit >= 0
2732 while True:
2733 while True:
2733 if limit is None:
2734 if limit is None:
2734 nbytes = size
2735 nbytes = size
2735 else:
2736 else:
2736 nbytes = min(limit, size)
2737 nbytes = min(limit, size)
2737 s = nbytes and f.read(nbytes)
2738 s = nbytes and f.read(nbytes)
2738 if not s:
2739 if not s:
2739 break
2740 break
2740 if limit:
2741 if limit:
2741 limit -= len(s)
2742 limit -= len(s)
2742 yield s
2743 yield s
2743
2744
2744
2745
2745 class cappedreader(object):
2746 class cappedreader(object):
2746 """A file object proxy that allows reading up to N bytes.
2747 """A file object proxy that allows reading up to N bytes.
2747
2748
2748 Given a source file object, instances of this type allow reading up to
2749 Given a source file object, instances of this type allow reading up to
2749 N bytes from that source file object. Attempts to read past the allowed
2750 N bytes from that source file object. Attempts to read past the allowed
2750 limit are treated as EOF.
2751 limit are treated as EOF.
2751
2752
2752 It is assumed that I/O is not performed on the original file object
2753 It is assumed that I/O is not performed on the original file object
2753 in addition to I/O that is performed by this instance. If there is,
2754 in addition to I/O that is performed by this instance. If there is,
2754 state tracking will get out of sync and unexpected results will ensue.
2755 state tracking will get out of sync and unexpected results will ensue.
2755 """
2756 """
2756
2757
2757 def __init__(self, fh, limit):
2758 def __init__(self, fh, limit):
2758 """Allow reading up to <limit> bytes from <fh>."""
2759 """Allow reading up to <limit> bytes from <fh>."""
2759 self._fh = fh
2760 self._fh = fh
2760 self._left = limit
2761 self._left = limit
2761
2762
2762 def read(self, n=-1):
2763 def read(self, n=-1):
2763 if not self._left:
2764 if not self._left:
2764 return b''
2765 return b''
2765
2766
2766 if n < 0:
2767 if n < 0:
2767 n = self._left
2768 n = self._left
2768
2769
2769 data = self._fh.read(min(n, self._left))
2770 data = self._fh.read(min(n, self._left))
2770 self._left -= len(data)
2771 self._left -= len(data)
2771 assert self._left >= 0
2772 assert self._left >= 0
2772
2773
2773 return data
2774 return data
2774
2775
2775 def readinto(self, b):
2776 def readinto(self, b):
2776 res = self.read(len(b))
2777 res = self.read(len(b))
2777 if res is None:
2778 if res is None:
2778 return None
2779 return None
2779
2780
2780 b[0 : len(res)] = res
2781 b[0 : len(res)] = res
2781 return len(res)
2782 return len(res)
2782
2783
2783
2784
2784 def unitcountfn(*unittable):
2785 def unitcountfn(*unittable):
2785 '''return a function that renders a readable count of some quantity'''
2786 '''return a function that renders a readable count of some quantity'''
2786
2787
2787 def go(count):
2788 def go(count):
2788 for multiplier, divisor, format in unittable:
2789 for multiplier, divisor, format in unittable:
2789 if abs(count) >= divisor * multiplier:
2790 if abs(count) >= divisor * multiplier:
2790 return format % (count / float(divisor))
2791 return format % (count / float(divisor))
2791 return unittable[-1][2] % count
2792 return unittable[-1][2] % count
2792
2793
2793 return go
2794 return go
2794
2795
2795
2796
2796 def processlinerange(fromline, toline):
2797 def processlinerange(fromline, toline):
2797 # type: (int, int) -> Tuple[int, int]
2798 # type: (int, int) -> Tuple[int, int]
2798 """Check that linerange <fromline>:<toline> makes sense and return a
2799 """Check that linerange <fromline>:<toline> makes sense and return a
2799 0-based range.
2800 0-based range.
2800
2801
2801 >>> processlinerange(10, 20)
2802 >>> processlinerange(10, 20)
2802 (9, 20)
2803 (9, 20)
2803 >>> processlinerange(2, 1)
2804 >>> processlinerange(2, 1)
2804 Traceback (most recent call last):
2805 Traceback (most recent call last):
2805 ...
2806 ...
2806 ParseError: line range must be positive
2807 ParseError: line range must be positive
2807 >>> processlinerange(0, 5)
2808 >>> processlinerange(0, 5)
2808 Traceback (most recent call last):
2809 Traceback (most recent call last):
2809 ...
2810 ...
2810 ParseError: fromline must be strictly positive
2811 ParseError: fromline must be strictly positive
2811 """
2812 """
2812 if toline - fromline < 0:
2813 if toline - fromline < 0:
2813 raise error.ParseError(_(b"line range must be positive"))
2814 raise error.ParseError(_(b"line range must be positive"))
2814 if fromline < 1:
2815 if fromline < 1:
2815 raise error.ParseError(_(b"fromline must be strictly positive"))
2816 raise error.ParseError(_(b"fromline must be strictly positive"))
2816 return fromline - 1, toline
2817 return fromline - 1, toline
2817
2818
2818
2819
2819 bytecount = unitcountfn(
2820 bytecount = unitcountfn(
2820 (100, 1 << 30, _(b'%.0f GB')),
2821 (100, 1 << 30, _(b'%.0f GB')),
2821 (10, 1 << 30, _(b'%.1f GB')),
2822 (10, 1 << 30, _(b'%.1f GB')),
2822 (1, 1 << 30, _(b'%.2f GB')),
2823 (1, 1 << 30, _(b'%.2f GB')),
2823 (100, 1 << 20, _(b'%.0f MB')),
2824 (100, 1 << 20, _(b'%.0f MB')),
2824 (10, 1 << 20, _(b'%.1f MB')),
2825 (10, 1 << 20, _(b'%.1f MB')),
2825 (1, 1 << 20, _(b'%.2f MB')),
2826 (1, 1 << 20, _(b'%.2f MB')),
2826 (100, 1 << 10, _(b'%.0f KB')),
2827 (100, 1 << 10, _(b'%.0f KB')),
2827 (10, 1 << 10, _(b'%.1f KB')),
2828 (10, 1 << 10, _(b'%.1f KB')),
2828 (1, 1 << 10, _(b'%.2f KB')),
2829 (1, 1 << 10, _(b'%.2f KB')),
2829 (1, 1, _(b'%.0f bytes')),
2830 (1, 1, _(b'%.0f bytes')),
2830 )
2831 )
2831
2832
2832
2833
2833 class transformingwriter(object):
2834 class transformingwriter(object):
2834 """Writable file wrapper to transform data by function"""
2835 """Writable file wrapper to transform data by function"""
2835
2836
2836 def __init__(self, fp, encode):
2837 def __init__(self, fp, encode):
2837 self._fp = fp
2838 self._fp = fp
2838 self._encode = encode
2839 self._encode = encode
2839
2840
2840 def close(self):
2841 def close(self):
2841 self._fp.close()
2842 self._fp.close()
2842
2843
2843 def flush(self):
2844 def flush(self):
2844 self._fp.flush()
2845 self._fp.flush()
2845
2846
2846 def write(self, data):
2847 def write(self, data):
2847 return self._fp.write(self._encode(data))
2848 return self._fp.write(self._encode(data))
2848
2849
2849
2850
2850 # Matches a single EOL which can either be a CRLF where repeated CR
2851 # Matches a single EOL which can either be a CRLF where repeated CR
2851 # are removed or a LF. We do not care about old Macintosh files, so a
2852 # are removed or a LF. We do not care about old Macintosh files, so a
2852 # stray CR is an error.
2853 # stray CR is an error.
2853 _eolre = remod.compile(br'\r*\n')
2854 _eolre = remod.compile(br'\r*\n')
2854
2855
2855
2856
2856 def tolf(s):
2857 def tolf(s):
2857 # type: (bytes) -> bytes
2858 # type: (bytes) -> bytes
2858 return _eolre.sub(b'\n', s)
2859 return _eolre.sub(b'\n', s)
2859
2860
2860
2861
2861 def tocrlf(s):
2862 def tocrlf(s):
2862 # type: (bytes) -> bytes
2863 # type: (bytes) -> bytes
2863 return _eolre.sub(b'\r\n', s)
2864 return _eolre.sub(b'\r\n', s)
2864
2865
2865
2866
2866 def _crlfwriter(fp):
2867 def _crlfwriter(fp):
2867 return transformingwriter(fp, tocrlf)
2868 return transformingwriter(fp, tocrlf)
2868
2869
2869
2870
2870 if pycompat.oslinesep == b'\r\n':
2871 if pycompat.oslinesep == b'\r\n':
2871 tonativeeol = tocrlf
2872 tonativeeol = tocrlf
2872 fromnativeeol = tolf
2873 fromnativeeol = tolf
2873 nativeeolwriter = _crlfwriter
2874 nativeeolwriter = _crlfwriter
2874 else:
2875 else:
2875 tonativeeol = pycompat.identity
2876 tonativeeol = pycompat.identity
2876 fromnativeeol = pycompat.identity
2877 fromnativeeol = pycompat.identity
2877 nativeeolwriter = pycompat.identity
2878 nativeeolwriter = pycompat.identity
2878
2879
2879 if pyplatform.python_implementation() == b'CPython' and sys.version_info < (
2880 if pyplatform.python_implementation() == b'CPython' and sys.version_info < (
2880 3,
2881 3,
2881 0,
2882 0,
2882 ):
2883 ):
2883 # There is an issue in CPython that some IO methods do not handle EINTR
2884 # There is an issue in CPython that some IO methods do not handle EINTR
2884 # correctly. The following table shows what CPython version (and functions)
2885 # correctly. The following table shows what CPython version (and functions)
2885 # are affected (buggy: has the EINTR bug, okay: otherwise):
2886 # are affected (buggy: has the EINTR bug, okay: otherwise):
2886 #
2887 #
2887 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2888 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2888 # --------------------------------------------------
2889 # --------------------------------------------------
2889 # fp.__iter__ | buggy | buggy | okay
2890 # fp.__iter__ | buggy | buggy | okay
2890 # fp.read* | buggy | okay [1] | okay
2891 # fp.read* | buggy | okay [1] | okay
2891 #
2892 #
2892 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2893 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2893 #
2894 #
2894 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2895 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2895 # like "read*" work fine, as we do not support Python < 2.7.4.
2896 # like "read*" work fine, as we do not support Python < 2.7.4.
2896 #
2897 #
2897 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2898 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2898 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2899 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2899 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2900 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2900 # fp.__iter__ but not other fp.read* methods.
2901 # fp.__iter__ but not other fp.read* methods.
2901 #
2902 #
2902 # On modern systems like Linux, the "read" syscall cannot be interrupted
2903 # On modern systems like Linux, the "read" syscall cannot be interrupted
2903 # when reading "fast" files like on-disk files. So the EINTR issue only
2904 # when reading "fast" files like on-disk files. So the EINTR issue only
2904 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2905 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2905 # files approximately as "fast" files and use the fast (unsafe) code path,
2906 # files approximately as "fast" files and use the fast (unsafe) code path,
2906 # to minimize the performance impact.
2907 # to minimize the performance impact.
2907
2908
2908 def iterfile(fp):
2909 def iterfile(fp):
2909 fastpath = True
2910 fastpath = True
2910 if type(fp) is file:
2911 if type(fp) is file:
2911 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2912 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2912 if fastpath:
2913 if fastpath:
2913 return fp
2914 return fp
2914 else:
2915 else:
2915 # fp.readline deals with EINTR correctly, use it as a workaround.
2916 # fp.readline deals with EINTR correctly, use it as a workaround.
2916 return iter(fp.readline, b'')
2917 return iter(fp.readline, b'')
2917
2918
2918
2919
2919 else:
2920 else:
2920 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2921 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2921 def iterfile(fp):
2922 def iterfile(fp):
2922 return fp
2923 return fp
2923
2924
2924
2925
2925 def iterlines(iterator):
2926 def iterlines(iterator):
2926 # type: (Iterator[bytes]) -> Iterator[bytes]
2927 # type: (Iterator[bytes]) -> Iterator[bytes]
2927 for chunk in iterator:
2928 for chunk in iterator:
2928 for line in chunk.splitlines():
2929 for line in chunk.splitlines():
2929 yield line
2930 yield line
2930
2931
2931
2932
2932 def expandpath(path):
2933 def expandpath(path):
2933 # type: (bytes) -> bytes
2934 # type: (bytes) -> bytes
2934 return os.path.expanduser(os.path.expandvars(path))
2935 return os.path.expanduser(os.path.expandvars(path))
2935
2936
2936
2937
2937 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2938 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2938 """Return the result of interpolating items in the mapping into string s.
2939 """Return the result of interpolating items in the mapping into string s.
2939
2940
2940 prefix is a single character string, or a two character string with
2941 prefix is a single character string, or a two character string with
2941 a backslash as the first character if the prefix needs to be escaped in
2942 a backslash as the first character if the prefix needs to be escaped in
2942 a regular expression.
2943 a regular expression.
2943
2944
2944 fn is an optional function that will be applied to the replacement text
2945 fn is an optional function that will be applied to the replacement text
2945 just before replacement.
2946 just before replacement.
2946
2947
2947 escape_prefix is an optional flag that allows using doubled prefix for
2948 escape_prefix is an optional flag that allows using doubled prefix for
2948 its escaping.
2949 its escaping.
2949 """
2950 """
2950 fn = fn or (lambda s: s)
2951 fn = fn or (lambda s: s)
2951 patterns = b'|'.join(mapping.keys())
2952 patterns = b'|'.join(mapping.keys())
2952 if escape_prefix:
2953 if escape_prefix:
2953 patterns += b'|' + prefix
2954 patterns += b'|' + prefix
2954 if len(prefix) > 1:
2955 if len(prefix) > 1:
2955 prefix_char = prefix[1:]
2956 prefix_char = prefix[1:]
2956 else:
2957 else:
2957 prefix_char = prefix
2958 prefix_char = prefix
2958 mapping[prefix_char] = prefix_char
2959 mapping[prefix_char] = prefix_char
2959 r = remod.compile(br'%s(%s)' % (prefix, patterns))
2960 r = remod.compile(br'%s(%s)' % (prefix, patterns))
2960 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2961 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2961
2962
2962
2963
2963 def getport(*args, **kwargs):
2964 def getport(*args, **kwargs):
2964 msg = b'getport(...) moved to mercurial.utils.urlutil'
2965 msg = b'getport(...) moved to mercurial.utils.urlutil'
2965 nouideprecwarn(msg, b'6.0', stacklevel=2)
2966 nouideprecwarn(msg, b'6.0', stacklevel=2)
2966 return urlutil.getport(*args, **kwargs)
2967 return urlutil.getport(*args, **kwargs)
2967
2968
2968
2969
2969 def url(*args, **kwargs):
2970 def url(*args, **kwargs):
2970 msg = b'url(...) moved to mercurial.utils.urlutil'
2971 msg = b'url(...) moved to mercurial.utils.urlutil'
2971 nouideprecwarn(msg, b'6.0', stacklevel=2)
2972 nouideprecwarn(msg, b'6.0', stacklevel=2)
2972 return urlutil.url(*args, **kwargs)
2973 return urlutil.url(*args, **kwargs)
2973
2974
2974
2975
2975 def hasscheme(*args, **kwargs):
2976 def hasscheme(*args, **kwargs):
2976 msg = b'hasscheme(...) moved to mercurial.utils.urlutil'
2977 msg = b'hasscheme(...) moved to mercurial.utils.urlutil'
2977 nouideprecwarn(msg, b'6.0', stacklevel=2)
2978 nouideprecwarn(msg, b'6.0', stacklevel=2)
2978 return urlutil.hasscheme(*args, **kwargs)
2979 return urlutil.hasscheme(*args, **kwargs)
2979
2980
2980
2981
2981 def hasdriveletter(*args, **kwargs):
2982 def hasdriveletter(*args, **kwargs):
2982 msg = b'hasdriveletter(...) moved to mercurial.utils.urlutil'
2983 msg = b'hasdriveletter(...) moved to mercurial.utils.urlutil'
2983 nouideprecwarn(msg, b'6.0', stacklevel=2)
2984 nouideprecwarn(msg, b'6.0', stacklevel=2)
2984 return urlutil.hasdriveletter(*args, **kwargs)
2985 return urlutil.hasdriveletter(*args, **kwargs)
2985
2986
2986
2987
2987 def urllocalpath(*args, **kwargs):
2988 def urllocalpath(*args, **kwargs):
2988 msg = b'urllocalpath(...) moved to mercurial.utils.urlutil'
2989 msg = b'urllocalpath(...) moved to mercurial.utils.urlutil'
2989 nouideprecwarn(msg, b'6.0', stacklevel=2)
2990 nouideprecwarn(msg, b'6.0', stacklevel=2)
2990 return urlutil.urllocalpath(*args, **kwargs)
2991 return urlutil.urllocalpath(*args, **kwargs)
2991
2992
2992
2993
2993 def checksafessh(*args, **kwargs):
2994 def checksafessh(*args, **kwargs):
2994 msg = b'checksafessh(...) moved to mercurial.utils.urlutil'
2995 msg = b'checksafessh(...) moved to mercurial.utils.urlutil'
2995 nouideprecwarn(msg, b'6.0', stacklevel=2)
2996 nouideprecwarn(msg, b'6.0', stacklevel=2)
2996 return urlutil.checksafessh(*args, **kwargs)
2997 return urlutil.checksafessh(*args, **kwargs)
2997
2998
2998
2999
2999 def hidepassword(*args, **kwargs):
3000 def hidepassword(*args, **kwargs):
3000 msg = b'hidepassword(...) moved to mercurial.utils.urlutil'
3001 msg = b'hidepassword(...) moved to mercurial.utils.urlutil'
3001 nouideprecwarn(msg, b'6.0', stacklevel=2)
3002 nouideprecwarn(msg, b'6.0', stacklevel=2)
3002 return urlutil.hidepassword(*args, **kwargs)
3003 return urlutil.hidepassword(*args, **kwargs)
3003
3004
3004
3005
3005 def removeauth(*args, **kwargs):
3006 def removeauth(*args, **kwargs):
3006 msg = b'removeauth(...) moved to mercurial.utils.urlutil'
3007 msg = b'removeauth(...) moved to mercurial.utils.urlutil'
3007 nouideprecwarn(msg, b'6.0', stacklevel=2)
3008 nouideprecwarn(msg, b'6.0', stacklevel=2)
3008 return urlutil.removeauth(*args, **kwargs)
3009 return urlutil.removeauth(*args, **kwargs)
3009
3010
3010
3011
3011 timecount = unitcountfn(
3012 timecount = unitcountfn(
3012 (1, 1e3, _(b'%.0f s')),
3013 (1, 1e3, _(b'%.0f s')),
3013 (100, 1, _(b'%.1f s')),
3014 (100, 1, _(b'%.1f s')),
3014 (10, 1, _(b'%.2f s')),
3015 (10, 1, _(b'%.2f s')),
3015 (1, 1, _(b'%.3f s')),
3016 (1, 1, _(b'%.3f s')),
3016 (100, 0.001, _(b'%.1f ms')),
3017 (100, 0.001, _(b'%.1f ms')),
3017 (10, 0.001, _(b'%.2f ms')),
3018 (10, 0.001, _(b'%.2f ms')),
3018 (1, 0.001, _(b'%.3f ms')),
3019 (1, 0.001, _(b'%.3f ms')),
3019 (100, 0.000001, _(b'%.1f us')),
3020 (100, 0.000001, _(b'%.1f us')),
3020 (10, 0.000001, _(b'%.2f us')),
3021 (10, 0.000001, _(b'%.2f us')),
3021 (1, 0.000001, _(b'%.3f us')),
3022 (1, 0.000001, _(b'%.3f us')),
3022 (100, 0.000000001, _(b'%.1f ns')),
3023 (100, 0.000000001, _(b'%.1f ns')),
3023 (10, 0.000000001, _(b'%.2f ns')),
3024 (10, 0.000000001, _(b'%.2f ns')),
3024 (1, 0.000000001, _(b'%.3f ns')),
3025 (1, 0.000000001, _(b'%.3f ns')),
3025 )
3026 )
3026
3027
3027
3028
3028 @attr.s
3029 @attr.s
3029 class timedcmstats(object):
3030 class timedcmstats(object):
3030 """Stats information produced by the timedcm context manager on entering."""
3031 """Stats information produced by the timedcm context manager on entering."""
3031
3032
3032 # the starting value of the timer as a float (meaning and resulution is
3033 # the starting value of the timer as a float (meaning and resulution is
3033 # platform dependent, see util.timer)
3034 # platform dependent, see util.timer)
3034 start = attr.ib(default=attr.Factory(lambda: timer()))
3035 start = attr.ib(default=attr.Factory(lambda: timer()))
3035 # the number of seconds as a floating point value; starts at 0, updated when
3036 # the number of seconds as a floating point value; starts at 0, updated when
3036 # the context is exited.
3037 # the context is exited.
3037 elapsed = attr.ib(default=0)
3038 elapsed = attr.ib(default=0)
3038 # the number of nested timedcm context managers.
3039 # the number of nested timedcm context managers.
3039 level = attr.ib(default=1)
3040 level = attr.ib(default=1)
3040
3041
3041 def __bytes__(self):
3042 def __bytes__(self):
3042 return timecount(self.elapsed) if self.elapsed else b'<unknown>'
3043 return timecount(self.elapsed) if self.elapsed else b'<unknown>'
3043
3044
3044 __str__ = encoding.strmethod(__bytes__)
3045 __str__ = encoding.strmethod(__bytes__)
3045
3046
3046
3047
3047 @contextlib.contextmanager
3048 @contextlib.contextmanager
3048 def timedcm(whencefmt, *whenceargs):
3049 def timedcm(whencefmt, *whenceargs):
3049 """A context manager that produces timing information for a given context.
3050 """A context manager that produces timing information for a given context.
3050
3051
3051 On entering a timedcmstats instance is produced.
3052 On entering a timedcmstats instance is produced.
3052
3053
3053 This context manager is reentrant.
3054 This context manager is reentrant.
3054
3055
3055 """
3056 """
3056 # track nested context managers
3057 # track nested context managers
3057 timedcm._nested += 1
3058 timedcm._nested += 1
3058 timing_stats = timedcmstats(level=timedcm._nested)
3059 timing_stats = timedcmstats(level=timedcm._nested)
3059 try:
3060 try:
3060 with tracing.log(whencefmt, *whenceargs):
3061 with tracing.log(whencefmt, *whenceargs):
3061 yield timing_stats
3062 yield timing_stats
3062 finally:
3063 finally:
3063 timing_stats.elapsed = timer() - timing_stats.start
3064 timing_stats.elapsed = timer() - timing_stats.start
3064 timedcm._nested -= 1
3065 timedcm._nested -= 1
3065
3066
3066
3067
3067 timedcm._nested = 0
3068 timedcm._nested = 0
3068
3069
3069
3070
3070 def timed(func):
3071 def timed(func):
3071 """Report the execution time of a function call to stderr.
3072 """Report the execution time of a function call to stderr.
3072
3073
3073 During development, use as a decorator when you need to measure
3074 During development, use as a decorator when you need to measure
3074 the cost of a function, e.g. as follows:
3075 the cost of a function, e.g. as follows:
3075
3076
3076 @util.timed
3077 @util.timed
3077 def foo(a, b, c):
3078 def foo(a, b, c):
3078 pass
3079 pass
3079 """
3080 """
3080
3081
3081 def wrapper(*args, **kwargs):
3082 def wrapper(*args, **kwargs):
3082 with timedcm(pycompat.bytestr(func.__name__)) as time_stats:
3083 with timedcm(pycompat.bytestr(func.__name__)) as time_stats:
3083 result = func(*args, **kwargs)
3084 result = func(*args, **kwargs)
3084 stderr = procutil.stderr
3085 stderr = procutil.stderr
3085 stderr.write(
3086 stderr.write(
3086 b'%s%s: %s\n'
3087 b'%s%s: %s\n'
3087 % (
3088 % (
3088 b' ' * time_stats.level * 2,
3089 b' ' * time_stats.level * 2,
3089 pycompat.bytestr(func.__name__),
3090 pycompat.bytestr(func.__name__),
3090 time_stats,
3091 time_stats,
3091 )
3092 )
3092 )
3093 )
3093 return result
3094 return result
3094
3095
3095 return wrapper
3096 return wrapper
3096
3097
3097
3098
3098 _sizeunits = (
3099 _sizeunits = (
3099 (b'm', 2 ** 20),
3100 (b'm', 2 ** 20),
3100 (b'k', 2 ** 10),
3101 (b'k', 2 ** 10),
3101 (b'g', 2 ** 30),
3102 (b'g', 2 ** 30),
3102 (b'kb', 2 ** 10),
3103 (b'kb', 2 ** 10),
3103 (b'mb', 2 ** 20),
3104 (b'mb', 2 ** 20),
3104 (b'gb', 2 ** 30),
3105 (b'gb', 2 ** 30),
3105 (b'b', 1),
3106 (b'b', 1),
3106 )
3107 )
3107
3108
3108
3109
3109 def sizetoint(s):
3110 def sizetoint(s):
3110 # type: (bytes) -> int
3111 # type: (bytes) -> int
3111 """Convert a space specifier to a byte count.
3112 """Convert a space specifier to a byte count.
3112
3113
3113 >>> sizetoint(b'30')
3114 >>> sizetoint(b'30')
3114 30
3115 30
3115 >>> sizetoint(b'2.2kb')
3116 >>> sizetoint(b'2.2kb')
3116 2252
3117 2252
3117 >>> sizetoint(b'6M')
3118 >>> sizetoint(b'6M')
3118 6291456
3119 6291456
3119 """
3120 """
3120 t = s.strip().lower()
3121 t = s.strip().lower()
3121 try:
3122 try:
3122 for k, u in _sizeunits:
3123 for k, u in _sizeunits:
3123 if t.endswith(k):
3124 if t.endswith(k):
3124 return int(float(t[: -len(k)]) * u)
3125 return int(float(t[: -len(k)]) * u)
3125 return int(t)
3126 return int(t)
3126 except ValueError:
3127 except ValueError:
3127 raise error.ParseError(_(b"couldn't parse size: %s") % s)
3128 raise error.ParseError(_(b"couldn't parse size: %s") % s)
3128
3129
3129
3130
3130 class hooks(object):
3131 class hooks(object):
3131 """A collection of hook functions that can be used to extend a
3132 """A collection of hook functions that can be used to extend a
3132 function's behavior. Hooks are called in lexicographic order,
3133 function's behavior. Hooks are called in lexicographic order,
3133 based on the names of their sources."""
3134 based on the names of their sources."""
3134
3135
3135 def __init__(self):
3136 def __init__(self):
3136 self._hooks = []
3137 self._hooks = []
3137
3138
3138 def add(self, source, hook):
3139 def add(self, source, hook):
3139 self._hooks.append((source, hook))
3140 self._hooks.append((source, hook))
3140
3141
3141 def __call__(self, *args):
3142 def __call__(self, *args):
3142 self._hooks.sort(key=lambda x: x[0])
3143 self._hooks.sort(key=lambda x: x[0])
3143 results = []
3144 results = []
3144 for source, hook in self._hooks:
3145 for source, hook in self._hooks:
3145 results.append(hook(*args))
3146 results.append(hook(*args))
3146 return results
3147 return results
3147
3148
3148
3149
3149 def getstackframes(skip=0, line=b' %-*s in %s\n', fileline=b'%s:%d', depth=0):
3150 def getstackframes(skip=0, line=b' %-*s in %s\n', fileline=b'%s:%d', depth=0):
3150 """Yields lines for a nicely formatted stacktrace.
3151 """Yields lines for a nicely formatted stacktrace.
3151 Skips the 'skip' last entries, then return the last 'depth' entries.
3152 Skips the 'skip' last entries, then return the last 'depth' entries.
3152 Each file+linenumber is formatted according to fileline.
3153 Each file+linenumber is formatted according to fileline.
3153 Each line is formatted according to line.
3154 Each line is formatted according to line.
3154 If line is None, it yields:
3155 If line is None, it yields:
3155 length of longest filepath+line number,
3156 length of longest filepath+line number,
3156 filepath+linenumber,
3157 filepath+linenumber,
3157 function
3158 function
3158
3159
3159 Not be used in production code but very convenient while developing.
3160 Not be used in production code but very convenient while developing.
3160 """
3161 """
3161 entries = [
3162 entries = [
3162 (fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
3163 (fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
3163 for fn, ln, func, _text in traceback.extract_stack()[: -skip - 1]
3164 for fn, ln, func, _text in traceback.extract_stack()[: -skip - 1]
3164 ][-depth:]
3165 ][-depth:]
3165 if entries:
3166 if entries:
3166 fnmax = max(len(entry[0]) for entry in entries)
3167 fnmax = max(len(entry[0]) for entry in entries)
3167 for fnln, func in entries:
3168 for fnln, func in entries:
3168 if line is None:
3169 if line is None:
3169 yield (fnmax, fnln, func)
3170 yield (fnmax, fnln, func)
3170 else:
3171 else:
3171 yield line % (fnmax, fnln, func)
3172 yield line % (fnmax, fnln, func)
3172
3173
3173
3174
3174 def debugstacktrace(
3175 def debugstacktrace(
3175 msg=b'stacktrace',
3176 msg=b'stacktrace',
3176 skip=0,
3177 skip=0,
3177 f=procutil.stderr,
3178 f=procutil.stderr,
3178 otherf=procutil.stdout,
3179 otherf=procutil.stdout,
3179 depth=0,
3180 depth=0,
3180 prefix=b'',
3181 prefix=b'',
3181 ):
3182 ):
3182 """Writes a message to f (stderr) with a nicely formatted stacktrace.
3183 """Writes a message to f (stderr) with a nicely formatted stacktrace.
3183 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3184 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3184 By default it will flush stdout first.
3185 By default it will flush stdout first.
3185 It can be used everywhere and intentionally does not require an ui object.
3186 It can be used everywhere and intentionally does not require an ui object.
3186 Not be used in production code but very convenient while developing.
3187 Not be used in production code but very convenient while developing.
3187 """
3188 """
3188 if otherf:
3189 if otherf:
3189 otherf.flush()
3190 otherf.flush()
3190 f.write(b'%s%s at:\n' % (prefix, msg.rstrip()))
3191 f.write(b'%s%s at:\n' % (prefix, msg.rstrip()))
3191 for line in getstackframes(skip + 1, depth=depth):
3192 for line in getstackframes(skip + 1, depth=depth):
3192 f.write(prefix + line)
3193 f.write(prefix + line)
3193 f.flush()
3194 f.flush()
3194
3195
3195
3196
3196 # convenient shortcut
3197 # convenient shortcut
3197 dst = debugstacktrace
3198 dst = debugstacktrace
3198
3199
3199
3200
3200 def safename(f, tag, ctx, others=None):
3201 def safename(f, tag, ctx, others=None):
3201 """
3202 """
3202 Generate a name that it is safe to rename f to in the given context.
3203 Generate a name that it is safe to rename f to in the given context.
3203
3204
3204 f: filename to rename
3205 f: filename to rename
3205 tag: a string tag that will be included in the new name
3206 tag: a string tag that will be included in the new name
3206 ctx: a context, in which the new name must not exist
3207 ctx: a context, in which the new name must not exist
3207 others: a set of other filenames that the new name must not be in
3208 others: a set of other filenames that the new name must not be in
3208
3209
3209 Returns a file name of the form oldname~tag[~number] which does not exist
3210 Returns a file name of the form oldname~tag[~number] which does not exist
3210 in the provided context and is not in the set of other names.
3211 in the provided context and is not in the set of other names.
3211 """
3212 """
3212 if others is None:
3213 if others is None:
3213 others = set()
3214 others = set()
3214
3215
3215 fn = b'%s~%s' % (f, tag)
3216 fn = b'%s~%s' % (f, tag)
3216 if fn not in ctx and fn not in others:
3217 if fn not in ctx and fn not in others:
3217 return fn
3218 return fn
3218 for n in itertools.count(1):
3219 for n in itertools.count(1):
3219 fn = b'%s~%s~%s' % (f, tag, n)
3220 fn = b'%s~%s~%s' % (f, tag, n)
3220 if fn not in ctx and fn not in others:
3221 if fn not in ctx and fn not in others:
3221 return fn
3222 return fn
3222
3223
3223
3224
3224 def readexactly(stream, n):
3225 def readexactly(stream, n):
3225 '''read n bytes from stream.read and abort if less was available'''
3226 '''read n bytes from stream.read and abort if less was available'''
3226 s = stream.read(n)
3227 s = stream.read(n)
3227 if len(s) < n:
3228 if len(s) < n:
3228 raise error.Abort(
3229 raise error.Abort(
3229 _(b"stream ended unexpectedly (got %d bytes, expected %d)")
3230 _(b"stream ended unexpectedly (got %d bytes, expected %d)")
3230 % (len(s), n)
3231 % (len(s), n)
3231 )
3232 )
3232 return s
3233 return s
3233
3234
3234
3235
3235 def uvarintencode(value):
3236 def uvarintencode(value):
3236 """Encode an unsigned integer value to a varint.
3237 """Encode an unsigned integer value to a varint.
3237
3238
3238 A varint is a variable length integer of 1 or more bytes. Each byte
3239 A varint is a variable length integer of 1 or more bytes. Each byte
3239 except the last has the most significant bit set. The lower 7 bits of
3240 except the last has the most significant bit set. The lower 7 bits of
3240 each byte store the 2's complement representation, least significant group
3241 each byte store the 2's complement representation, least significant group
3241 first.
3242 first.
3242
3243
3243 >>> uvarintencode(0)
3244 >>> uvarintencode(0)
3244 '\\x00'
3245 '\\x00'
3245 >>> uvarintencode(1)
3246 >>> uvarintencode(1)
3246 '\\x01'
3247 '\\x01'
3247 >>> uvarintencode(127)
3248 >>> uvarintencode(127)
3248 '\\x7f'
3249 '\\x7f'
3249 >>> uvarintencode(1337)
3250 >>> uvarintencode(1337)
3250 '\\xb9\\n'
3251 '\\xb9\\n'
3251 >>> uvarintencode(65536)
3252 >>> uvarintencode(65536)
3252 '\\x80\\x80\\x04'
3253 '\\x80\\x80\\x04'
3253 >>> uvarintencode(-1)
3254 >>> uvarintencode(-1)
3254 Traceback (most recent call last):
3255 Traceback (most recent call last):
3255 ...
3256 ...
3256 ProgrammingError: negative value for uvarint: -1
3257 ProgrammingError: negative value for uvarint: -1
3257 """
3258 """
3258 if value < 0:
3259 if value < 0:
3259 raise error.ProgrammingError(b'negative value for uvarint: %d' % value)
3260 raise error.ProgrammingError(b'negative value for uvarint: %d' % value)
3260 bits = value & 0x7F
3261 bits = value & 0x7F
3261 value >>= 7
3262 value >>= 7
3262 bytes = []
3263 bytes = []
3263 while value:
3264 while value:
3264 bytes.append(pycompat.bytechr(0x80 | bits))
3265 bytes.append(pycompat.bytechr(0x80 | bits))
3265 bits = value & 0x7F
3266 bits = value & 0x7F
3266 value >>= 7
3267 value >>= 7
3267 bytes.append(pycompat.bytechr(bits))
3268 bytes.append(pycompat.bytechr(bits))
3268
3269
3269 return b''.join(bytes)
3270 return b''.join(bytes)
3270
3271
3271
3272
3272 def uvarintdecodestream(fh):
3273 def uvarintdecodestream(fh):
3273 """Decode an unsigned variable length integer from a stream.
3274 """Decode an unsigned variable length integer from a stream.
3274
3275
3275 The passed argument is anything that has a ``.read(N)`` method.
3276 The passed argument is anything that has a ``.read(N)`` method.
3276
3277
3277 >>> try:
3278 >>> try:
3278 ... from StringIO import StringIO as BytesIO
3279 ... from StringIO import StringIO as BytesIO
3279 ... except ImportError:
3280 ... except ImportError:
3280 ... from io import BytesIO
3281 ... from io import BytesIO
3281 >>> uvarintdecodestream(BytesIO(b'\\x00'))
3282 >>> uvarintdecodestream(BytesIO(b'\\x00'))
3282 0
3283 0
3283 >>> uvarintdecodestream(BytesIO(b'\\x01'))
3284 >>> uvarintdecodestream(BytesIO(b'\\x01'))
3284 1
3285 1
3285 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
3286 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
3286 127
3287 127
3287 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
3288 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
3288 1337
3289 1337
3289 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
3290 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
3290 65536
3291 65536
3291 >>> uvarintdecodestream(BytesIO(b'\\x80'))
3292 >>> uvarintdecodestream(BytesIO(b'\\x80'))
3292 Traceback (most recent call last):
3293 Traceback (most recent call last):
3293 ...
3294 ...
3294 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
3295 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
3295 """
3296 """
3296 result = 0
3297 result = 0
3297 shift = 0
3298 shift = 0
3298 while True:
3299 while True:
3299 byte = ord(readexactly(fh, 1))
3300 byte = ord(readexactly(fh, 1))
3300 result |= (byte & 0x7F) << shift
3301 result |= (byte & 0x7F) << shift
3301 if not (byte & 0x80):
3302 if not (byte & 0x80):
3302 return result
3303 return result
3303 shift += 7
3304 shift += 7
3304
3305
3305
3306
3306 # Passing the '' locale means that the locale should be set according to the
3307 # Passing the '' locale means that the locale should be set according to the
3307 # user settings (environment variables).
3308 # user settings (environment variables).
3308 # Python sometimes avoids setting the global locale settings. When interfacing
3309 # Python sometimes avoids setting the global locale settings. When interfacing
3309 # with C code (e.g. the curses module or the Subversion bindings), the global
3310 # with C code (e.g. the curses module or the Subversion bindings), the global
3310 # locale settings must be initialized correctly. Python 2 does not initialize
3311 # locale settings must be initialized correctly. Python 2 does not initialize
3311 # the global locale settings on interpreter startup. Python 3 sometimes
3312 # the global locale settings on interpreter startup. Python 3 sometimes
3312 # initializes LC_CTYPE, but not consistently at least on Windows. Therefore we
3313 # initializes LC_CTYPE, but not consistently at least on Windows. Therefore we
3313 # explicitly initialize it to get consistent behavior if it's not already
3314 # explicitly initialize it to get consistent behavior if it's not already
3314 # initialized. Since CPython commit 177d921c8c03d30daa32994362023f777624b10d,
3315 # initialized. Since CPython commit 177d921c8c03d30daa32994362023f777624b10d,
3315 # LC_CTYPE is always initialized. If we require Python 3.8+, we should re-check
3316 # LC_CTYPE is always initialized. If we require Python 3.8+, we should re-check
3316 # if we can remove this code.
3317 # if we can remove this code.
3317 @contextlib.contextmanager
3318 @contextlib.contextmanager
3318 def with_lc_ctype():
3319 def with_lc_ctype():
3319 oldloc = locale.setlocale(locale.LC_CTYPE, None)
3320 oldloc = locale.setlocale(locale.LC_CTYPE, None)
3320 if oldloc == 'C':
3321 if oldloc == 'C':
3321 try:
3322 try:
3322 try:
3323 try:
3323 locale.setlocale(locale.LC_CTYPE, '')
3324 locale.setlocale(locale.LC_CTYPE, '')
3324 except locale.Error:
3325 except locale.Error:
3325 # The likely case is that the locale from the environment
3326 # The likely case is that the locale from the environment
3326 # variables is unknown.
3327 # variables is unknown.
3327 pass
3328 pass
3328 yield
3329 yield
3329 finally:
3330 finally:
3330 locale.setlocale(locale.LC_CTYPE, oldloc)
3331 locale.setlocale(locale.LC_CTYPE, oldloc)
3331 else:
3332 else:
3332 yield
3333 yield
3333
3334
3334
3335
3335 def _estimatememory():
3336 def _estimatememory():
3336 # type: () -> Optional[int]
3337 # type: () -> Optional[int]
3337 """Provide an estimate for the available system memory in Bytes.
3338 """Provide an estimate for the available system memory in Bytes.
3338
3339
3339 If no estimate can be provided on the platform, returns None.
3340 If no estimate can be provided on the platform, returns None.
3340 """
3341 """
3341 if pycompat.sysplatform.startswith(b'win'):
3342 if pycompat.sysplatform.startswith(b'win'):
3342 # On Windows, use the GlobalMemoryStatusEx kernel function directly.
3343 # On Windows, use the GlobalMemoryStatusEx kernel function directly.
3343 from ctypes import c_long as DWORD, c_ulonglong as DWORDLONG
3344 from ctypes import c_long as DWORD, c_ulonglong as DWORDLONG
3344 from ctypes.wintypes import ( # pytype: disable=import-error
3345 from ctypes.wintypes import ( # pytype: disable=import-error
3345 Structure,
3346 Structure,
3346 byref,
3347 byref,
3347 sizeof,
3348 sizeof,
3348 windll,
3349 windll,
3349 )
3350 )
3350
3351
3351 class MEMORYSTATUSEX(Structure):
3352 class MEMORYSTATUSEX(Structure):
3352 _fields_ = [
3353 _fields_ = [
3353 ('dwLength', DWORD),
3354 ('dwLength', DWORD),
3354 ('dwMemoryLoad', DWORD),
3355 ('dwMemoryLoad', DWORD),
3355 ('ullTotalPhys', DWORDLONG),
3356 ('ullTotalPhys', DWORDLONG),
3356 ('ullAvailPhys', DWORDLONG),
3357 ('ullAvailPhys', DWORDLONG),
3357 ('ullTotalPageFile', DWORDLONG),
3358 ('ullTotalPageFile', DWORDLONG),
3358 ('ullAvailPageFile', DWORDLONG),
3359 ('ullAvailPageFile', DWORDLONG),
3359 ('ullTotalVirtual', DWORDLONG),
3360 ('ullTotalVirtual', DWORDLONG),
3360 ('ullAvailVirtual', DWORDLONG),
3361 ('ullAvailVirtual', DWORDLONG),
3361 ('ullExtendedVirtual', DWORDLONG),
3362 ('ullExtendedVirtual', DWORDLONG),
3362 ]
3363 ]
3363
3364
3364 x = MEMORYSTATUSEX()
3365 x = MEMORYSTATUSEX()
3365 x.dwLength = sizeof(x)
3366 x.dwLength = sizeof(x)
3366 windll.kernel32.GlobalMemoryStatusEx(byref(x))
3367 windll.kernel32.GlobalMemoryStatusEx(byref(x))
3367 return x.ullAvailPhys
3368 return x.ullAvailPhys
3368
3369
3369 # On newer Unix-like systems and Mac OSX, the sysconf interface
3370 # On newer Unix-like systems and Mac OSX, the sysconf interface
3370 # can be used. _SC_PAGE_SIZE is part of POSIX; _SC_PHYS_PAGES
3371 # can be used. _SC_PAGE_SIZE is part of POSIX; _SC_PHYS_PAGES
3371 # seems to be implemented on most systems.
3372 # seems to be implemented on most systems.
3372 try:
3373 try:
3373 pagesize = os.sysconf(os.sysconf_names['SC_PAGE_SIZE'])
3374 pagesize = os.sysconf(os.sysconf_names['SC_PAGE_SIZE'])
3374 pages = os.sysconf(os.sysconf_names['SC_PHYS_PAGES'])
3375 pages = os.sysconf(os.sysconf_names['SC_PHYS_PAGES'])
3375 return pagesize * pages
3376 return pagesize * pages
3376 except OSError: # sysconf can fail
3377 except OSError: # sysconf can fail
3377 pass
3378 pass
3378 except KeyError: # unknown parameter
3379 except KeyError: # unknown parameter
3379 pass
3380 pass
@@ -1,691 +1,713 b''
1 # windows.py - Windows utility function implementations for Mercurial
1 # windows.py - Windows utility function implementations for Mercurial
2 #
2 #
3 # Copyright 2005-2009 Olivia Mackall <olivia@selenic.com> and others
3 # Copyright 2005-2009 Olivia Mackall <olivia@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import getpass
11 import getpass
12 import msvcrt
12 import msvcrt
13 import os
13 import os
14 import re
14 import re
15 import stat
15 import stat
16 import string
16 import string
17 import sys
17 import sys
18
18
19 from .i18n import _
19 from .i18n import _
20 from .pycompat import getattr
20 from .pycompat import getattr
21 from . import (
21 from . import (
22 encoding,
22 encoding,
23 error,
23 error,
24 policy,
24 policy,
25 pycompat,
25 pycompat,
26 win32,
26 win32,
27 )
27 )
28
28
29 try:
29 try:
30 import _winreg as winreg # pytype: disable=import-error
30 import _winreg as winreg # pytype: disable=import-error
31
31
32 winreg.CloseKey
32 winreg.CloseKey
33 except ImportError:
33 except ImportError:
34 # py2 only
34 # py2 only
35 import winreg # pytype: disable=import-error
35 import winreg # pytype: disable=import-error
36
36
37 osutil = policy.importmod('osutil')
37 osutil = policy.importmod('osutil')
38
38
39 getfsmountpoint = win32.getvolumename
39 getfsmountpoint = win32.getvolumename
40 getfstype = win32.getfstype
40 getfstype = win32.getfstype
41 getuser = win32.getuser
41 getuser = win32.getuser
42 hidewindow = win32.hidewindow
42 hidewindow = win32.hidewindow
43 makedir = win32.makedir
43 makedir = win32.makedir
44 nlinks = win32.nlinks
44 nlinks = win32.nlinks
45 oslink = win32.oslink
45 oslink = win32.oslink
46 samedevice = win32.samedevice
46 samedevice = win32.samedevice
47 samefile = win32.samefile
47 samefile = win32.samefile
48 setsignalhandler = win32.setsignalhandler
48 setsignalhandler = win32.setsignalhandler
49 spawndetached = win32.spawndetached
49 spawndetached = win32.spawndetached
50 split = os.path.split
50 split = os.path.split
51 testpid = win32.testpid
51 testpid = win32.testpid
52 unlink = win32.unlink
52 unlink = win32.unlink
53
53
54 umask = 0o022
54 umask = 0o022
55
55
56
56
57 class mixedfilemodewrapper(object):
57 class mixedfilemodewrapper(object):
58 """Wraps a file handle when it is opened in read/write mode.
58 """Wraps a file handle when it is opened in read/write mode.
59
59
60 fopen() and fdopen() on Windows have a specific-to-Windows requirement
60 fopen() and fdopen() on Windows have a specific-to-Windows requirement
61 that files opened with mode r+, w+, or a+ make a call to a file positioning
61 that files opened with mode r+, w+, or a+ make a call to a file positioning
62 function when switching between reads and writes. Without this extra call,
62 function when switching between reads and writes. Without this extra call,
63 Python will raise a not very intuitive "IOError: [Errno 0] Error."
63 Python will raise a not very intuitive "IOError: [Errno 0] Error."
64
64
65 This class wraps posixfile instances when the file is opened in read/write
65 This class wraps posixfile instances when the file is opened in read/write
66 mode and automatically adds checks or inserts appropriate file positioning
66 mode and automatically adds checks or inserts appropriate file positioning
67 calls when necessary.
67 calls when necessary.
68 """
68 """
69
69
70 OPNONE = 0
70 OPNONE = 0
71 OPREAD = 1
71 OPREAD = 1
72 OPWRITE = 2
72 OPWRITE = 2
73
73
74 def __init__(self, fp):
74 def __init__(self, fp):
75 object.__setattr__(self, '_fp', fp)
75 object.__setattr__(self, '_fp', fp)
76 object.__setattr__(self, '_lastop', 0)
76 object.__setattr__(self, '_lastop', 0)
77
77
78 def __enter__(self):
78 def __enter__(self):
79 self._fp.__enter__()
79 self._fp.__enter__()
80 return self
80 return self
81
81
82 def __exit__(self, exc_type, exc_val, exc_tb):
82 def __exit__(self, exc_type, exc_val, exc_tb):
83 self._fp.__exit__(exc_type, exc_val, exc_tb)
83 self._fp.__exit__(exc_type, exc_val, exc_tb)
84
84
85 def __getattr__(self, name):
85 def __getattr__(self, name):
86 return getattr(self._fp, name)
86 return getattr(self._fp, name)
87
87
88 def __setattr__(self, name, value):
88 def __setattr__(self, name, value):
89 return self._fp.__setattr__(name, value)
89 return self._fp.__setattr__(name, value)
90
90
91 def _noopseek(self):
91 def _noopseek(self):
92 self._fp.seek(0, os.SEEK_CUR)
92 self._fp.seek(0, os.SEEK_CUR)
93
93
94 def seek(self, *args, **kwargs):
94 def seek(self, *args, **kwargs):
95 object.__setattr__(self, '_lastop', self.OPNONE)
95 object.__setattr__(self, '_lastop', self.OPNONE)
96 return self._fp.seek(*args, **kwargs)
96 return self._fp.seek(*args, **kwargs)
97
97
98 def write(self, d):
98 def write(self, d):
99 if self._lastop == self.OPREAD:
99 if self._lastop == self.OPREAD:
100 self._noopseek()
100 self._noopseek()
101
101
102 object.__setattr__(self, '_lastop', self.OPWRITE)
102 object.__setattr__(self, '_lastop', self.OPWRITE)
103 return self._fp.write(d)
103 return self._fp.write(d)
104
104
105 def writelines(self, *args, **kwargs):
105 def writelines(self, *args, **kwargs):
106 if self._lastop == self.OPREAD:
106 if self._lastop == self.OPREAD:
107 self._noopeseek()
107 self._noopeseek()
108
108
109 object.__setattr__(self, '_lastop', self.OPWRITE)
109 object.__setattr__(self, '_lastop', self.OPWRITE)
110 return self._fp.writelines(*args, **kwargs)
110 return self._fp.writelines(*args, **kwargs)
111
111
112 def read(self, *args, **kwargs):
112 def read(self, *args, **kwargs):
113 if self._lastop == self.OPWRITE:
113 if self._lastop == self.OPWRITE:
114 self._noopseek()
114 self._noopseek()
115
115
116 object.__setattr__(self, '_lastop', self.OPREAD)
116 object.__setattr__(self, '_lastop', self.OPREAD)
117 return self._fp.read(*args, **kwargs)
117 return self._fp.read(*args, **kwargs)
118
118
119 def readline(self, *args, **kwargs):
119 def readline(self, *args, **kwargs):
120 if self._lastop == self.OPWRITE:
120 if self._lastop == self.OPWRITE:
121 self._noopseek()
121 self._noopseek()
122
122
123 object.__setattr__(self, '_lastop', self.OPREAD)
123 object.__setattr__(self, '_lastop', self.OPREAD)
124 return self._fp.readline(*args, **kwargs)
124 return self._fp.readline(*args, **kwargs)
125
125
126 def readlines(self, *args, **kwargs):
126 def readlines(self, *args, **kwargs):
127 if self._lastop == self.OPWRITE:
127 if self._lastop == self.OPWRITE:
128 self._noopseek()
128 self._noopseek()
129
129
130 object.__setattr__(self, '_lastop', self.OPREAD)
130 object.__setattr__(self, '_lastop', self.OPREAD)
131 return self._fp.readlines(*args, **kwargs)
131 return self._fp.readlines(*args, **kwargs)
132
132
133
133
134 class fdproxy(object):
134 class fdproxy(object):
135 """Wraps osutil.posixfile() to override the name attribute to reflect the
135 """Wraps osutil.posixfile() to override the name attribute to reflect the
136 underlying file name.
136 underlying file name.
137 """
137 """
138
138
139 def __init__(self, name, fp):
139 def __init__(self, name, fp):
140 self.name = name
140 self.name = name
141 self._fp = fp
141 self._fp = fp
142
142
143 def __enter__(self):
143 def __enter__(self):
144 self._fp.__enter__()
144 self._fp.__enter__()
145 # Return this wrapper for the context manager so that the name is
145 # Return this wrapper for the context manager so that the name is
146 # still available.
146 # still available.
147 return self
147 return self
148
148
149 def __exit__(self, exc_type, exc_value, traceback):
149 def __exit__(self, exc_type, exc_value, traceback):
150 self._fp.__exit__(exc_type, exc_value, traceback)
150 self._fp.__exit__(exc_type, exc_value, traceback)
151
151
152 def __iter__(self):
152 def __iter__(self):
153 return iter(self._fp)
153 return iter(self._fp)
154
154
155 def __getattr__(self, name):
155 def __getattr__(self, name):
156 return getattr(self._fp, name)
156 return getattr(self._fp, name)
157
157
158
158
159 def posixfile(name, mode=b'r', buffering=-1):
159 def posixfile(name, mode=b'r', buffering=-1):
160 '''Open a file with even more POSIX-like semantics'''
160 '''Open a file with even more POSIX-like semantics'''
161 try:
161 try:
162 fp = osutil.posixfile(name, mode, buffering) # may raise WindowsError
162 fp = osutil.posixfile(name, mode, buffering) # may raise WindowsError
163
163
164 # PyFile_FromFd() ignores the name, and seems to report fp.name as the
164 # PyFile_FromFd() ignores the name, and seems to report fp.name as the
165 # underlying file descriptor.
165 # underlying file descriptor.
166 if pycompat.ispy3:
166 if pycompat.ispy3:
167 fp = fdproxy(name, fp)
167 fp = fdproxy(name, fp)
168
168
169 # The position when opening in append mode is implementation defined, so
169 # The position when opening in append mode is implementation defined, so
170 # make it consistent with other platforms, which position at EOF.
170 # make it consistent with other platforms, which position at EOF.
171 if b'a' in mode:
171 if b'a' in mode:
172 fp.seek(0, os.SEEK_END)
172 fp.seek(0, os.SEEK_END)
173
173
174 if b'+' in mode:
174 if b'+' in mode:
175 return mixedfilemodewrapper(fp)
175 return mixedfilemodewrapper(fp)
176
176
177 return fp
177 return fp
178 except WindowsError as err:
178 except WindowsError as err:
179 # convert to a friendlier exception
179 # convert to a friendlier exception
180 raise IOError(
180 raise IOError(
181 err.errno, '%s: %s' % (encoding.strfromlocal(name), err.strerror)
181 err.errno, '%s: %s' % (encoding.strfromlocal(name), err.strerror)
182 )
182 )
183
183
184
184
185 # may be wrapped by win32mbcs extension
185 # may be wrapped by win32mbcs extension
186 listdir = osutil.listdir
186 listdir = osutil.listdir
187
187
188
188
189 # copied from .utils.procutil, remove after Python 2 support was dropped
189 # copied from .utils.procutil, remove after Python 2 support was dropped
190 def _isatty(fp):
190 def _isatty(fp):
191 try:
191 try:
192 return fp.isatty()
192 return fp.isatty()
193 except AttributeError:
193 except AttributeError:
194 return False
194 return False
195
195
196
196
197 def get_password():
198 """Prompt for password with echo off, using Windows getch().
199
200 This shouldn't be called directly- use ``ui.getpass()`` instead, which
201 checks if the session is interactive first.
202 """
203 pw = ""
204 while True:
205 c = msvcrt.getwch()
206 if c == '\r' or c == '\n':
207 break
208 if c == '\003':
209 raise KeyboardInterrupt
210 if c == '\b':
211 pw = pw[:-1]
212 else:
213 pw = pw + c
214 msvcrt.putwch('\r')
215 msvcrt.putwch('\n')
216 return encoding.strtolocal(pw)
217
218
197 class winstdout(object):
219 class winstdout(object):
198 """Some files on Windows misbehave.
220 """Some files on Windows misbehave.
199
221
200 When writing to a broken pipe, EINVAL instead of EPIPE may be raised.
222 When writing to a broken pipe, EINVAL instead of EPIPE may be raised.
201
223
202 When writing too many bytes to a console at the same, a "Not enough space"
224 When writing too many bytes to a console at the same, a "Not enough space"
203 error may happen. Python 3 already works around that.
225 error may happen. Python 3 already works around that.
204 """
226 """
205
227
206 def __init__(self, fp):
228 def __init__(self, fp):
207 self.fp = fp
229 self.fp = fp
208 self.throttle = not pycompat.ispy3 and _isatty(fp)
230 self.throttle = not pycompat.ispy3 and _isatty(fp)
209
231
210 def __getattr__(self, key):
232 def __getattr__(self, key):
211 return getattr(self.fp, key)
233 return getattr(self.fp, key)
212
234
213 def close(self):
235 def close(self):
214 try:
236 try:
215 self.fp.close()
237 self.fp.close()
216 except IOError:
238 except IOError:
217 pass
239 pass
218
240
219 def write(self, s):
241 def write(self, s):
220 try:
242 try:
221 if not self.throttle:
243 if not self.throttle:
222 return self.fp.write(s)
244 return self.fp.write(s)
223 # This is workaround for "Not enough space" error on
245 # This is workaround for "Not enough space" error on
224 # writing large size of data to console.
246 # writing large size of data to console.
225 limit = 16000
247 limit = 16000
226 l = len(s)
248 l = len(s)
227 start = 0
249 start = 0
228 while start < l:
250 while start < l:
229 end = start + limit
251 end = start + limit
230 self.fp.write(s[start:end])
252 self.fp.write(s[start:end])
231 start = end
253 start = end
232 except IOError as inst:
254 except IOError as inst:
233 if inst.errno != 0 and not win32.lasterrorwaspipeerror(inst):
255 if inst.errno != 0 and not win32.lasterrorwaspipeerror(inst):
234 raise
256 raise
235 self.close()
257 self.close()
236 raise IOError(errno.EPIPE, 'Broken pipe')
258 raise IOError(errno.EPIPE, 'Broken pipe')
237
259
238 def flush(self):
260 def flush(self):
239 try:
261 try:
240 return self.fp.flush()
262 return self.fp.flush()
241 except IOError as inst:
263 except IOError as inst:
242 if not win32.lasterrorwaspipeerror(inst):
264 if not win32.lasterrorwaspipeerror(inst):
243 raise
265 raise
244 raise IOError(errno.EPIPE, 'Broken pipe')
266 raise IOError(errno.EPIPE, 'Broken pipe')
245
267
246
268
247 def openhardlinks():
269 def openhardlinks():
248 return True
270 return True
249
271
250
272
251 def parsepatchoutput(output_line):
273 def parsepatchoutput(output_line):
252 """parses the output produced by patch and returns the filename"""
274 """parses the output produced by patch and returns the filename"""
253 pf = output_line[14:]
275 pf = output_line[14:]
254 if pf[0] == b'`':
276 if pf[0] == b'`':
255 pf = pf[1:-1] # Remove the quotes
277 pf = pf[1:-1] # Remove the quotes
256 return pf
278 return pf
257
279
258
280
259 def sshargs(sshcmd, host, user, port):
281 def sshargs(sshcmd, host, user, port):
260 '''Build argument list for ssh or Plink'''
282 '''Build argument list for ssh or Plink'''
261 pflag = b'plink' in sshcmd.lower() and b'-P' or b'-p'
283 pflag = b'plink' in sshcmd.lower() and b'-P' or b'-p'
262 args = user and (b"%s@%s" % (user, host)) or host
284 args = user and (b"%s@%s" % (user, host)) or host
263 if args.startswith(b'-') or args.startswith(b'/'):
285 if args.startswith(b'-') or args.startswith(b'/'):
264 raise error.Abort(
286 raise error.Abort(
265 _(b'illegal ssh hostname or username starting with - or /: %s')
287 _(b'illegal ssh hostname or username starting with - or /: %s')
266 % args
288 % args
267 )
289 )
268 args = shellquote(args)
290 args = shellquote(args)
269 if port:
291 if port:
270 args = b'%s %s %s' % (pflag, shellquote(port), args)
292 args = b'%s %s %s' % (pflag, shellquote(port), args)
271 return args
293 return args
272
294
273
295
274 def setflags(f, l, x):
296 def setflags(f, l, x):
275 pass
297 pass
276
298
277
299
278 def copymode(src, dst, mode=None, enforcewritable=False):
300 def copymode(src, dst, mode=None, enforcewritable=False):
279 pass
301 pass
280
302
281
303
282 def checkexec(path):
304 def checkexec(path):
283 return False
305 return False
284
306
285
307
286 def checklink(path):
308 def checklink(path):
287 return False
309 return False
288
310
289
311
290 def setbinary(fd):
312 def setbinary(fd):
291 # When run without console, pipes may expose invalid
313 # When run without console, pipes may expose invalid
292 # fileno(), usually set to -1.
314 # fileno(), usually set to -1.
293 fno = getattr(fd, 'fileno', None)
315 fno = getattr(fd, 'fileno', None)
294 if fno is not None and fno() >= 0:
316 if fno is not None and fno() >= 0:
295 msvcrt.setmode(fno(), os.O_BINARY) # pytype: disable=module-attr
317 msvcrt.setmode(fno(), os.O_BINARY) # pytype: disable=module-attr
296
318
297
319
298 def pconvert(path):
320 def pconvert(path):
299 return path.replace(pycompat.ossep, b'/')
321 return path.replace(pycompat.ossep, b'/')
300
322
301
323
302 def localpath(path):
324 def localpath(path):
303 return path.replace(b'/', b'\\')
325 return path.replace(b'/', b'\\')
304
326
305
327
306 def normpath(path):
328 def normpath(path):
307 return pconvert(os.path.normpath(path))
329 return pconvert(os.path.normpath(path))
308
330
309
331
310 def normcase(path):
332 def normcase(path):
311 return encoding.upper(path) # NTFS compares via upper()
333 return encoding.upper(path) # NTFS compares via upper()
312
334
313
335
314 # see posix.py for definitions
336 # see posix.py for definitions
315 normcasespec = encoding.normcasespecs.upper
337 normcasespec = encoding.normcasespecs.upper
316 normcasefallback = encoding.upperfallback
338 normcasefallback = encoding.upperfallback
317
339
318
340
319 def samestat(s1, s2):
341 def samestat(s1, s2):
320 return False
342 return False
321
343
322
344
323 def shelltocmdexe(path, env):
345 def shelltocmdexe(path, env):
324 r"""Convert shell variables in the form $var and ${var} inside ``path``
346 r"""Convert shell variables in the form $var and ${var} inside ``path``
325 to %var% form. Existing Windows style variables are left unchanged.
347 to %var% form. Existing Windows style variables are left unchanged.
326
348
327 The variables are limited to the given environment. Unknown variables are
349 The variables are limited to the given environment. Unknown variables are
328 left unchanged.
350 left unchanged.
329
351
330 >>> e = {b'var1': b'v1', b'var2': b'v2', b'var3': b'v3'}
352 >>> e = {b'var1': b'v1', b'var2': b'v2', b'var3': b'v3'}
331 >>> # Only valid values are expanded
353 >>> # Only valid values are expanded
332 >>> shelltocmdexe(b'cmd $var1 ${var2} %var3% $missing ${missing} %missing%',
354 >>> shelltocmdexe(b'cmd $var1 ${var2} %var3% $missing ${missing} %missing%',
333 ... e)
355 ... e)
334 'cmd %var1% %var2% %var3% $missing ${missing} %missing%'
356 'cmd %var1% %var2% %var3% $missing ${missing} %missing%'
335 >>> # Single quote prevents expansion, as does \$ escaping
357 >>> # Single quote prevents expansion, as does \$ escaping
336 >>> shelltocmdexe(b"cmd '$var1 ${var2} %var3%' \$var1 \${var2} \\", e)
358 >>> shelltocmdexe(b"cmd '$var1 ${var2} %var3%' \$var1 \${var2} \\", e)
337 'cmd "$var1 ${var2} %var3%" $var1 ${var2} \\'
359 'cmd "$var1 ${var2} %var3%" $var1 ${var2} \\'
338 >>> # $$ is not special. %% is not special either, but can be the end and
360 >>> # $$ is not special. %% is not special either, but can be the end and
339 >>> # start of consecutive variables
361 >>> # start of consecutive variables
340 >>> shelltocmdexe(b"cmd $$ %% %var1%%var2%", e)
362 >>> shelltocmdexe(b"cmd $$ %% %var1%%var2%", e)
341 'cmd $$ %% %var1%%var2%'
363 'cmd $$ %% %var1%%var2%'
342 >>> # No double substitution
364 >>> # No double substitution
343 >>> shelltocmdexe(b"$var1 %var1%", {b'var1': b'%var2%', b'var2': b'boom'})
365 >>> shelltocmdexe(b"$var1 %var1%", {b'var1': b'%var2%', b'var2': b'boom'})
344 '%var1% %var1%'
366 '%var1% %var1%'
345 >>> # Tilde expansion
367 >>> # Tilde expansion
346 >>> shelltocmdexe(b"~/dir ~\dir2 ~tmpfile \~/", {})
368 >>> shelltocmdexe(b"~/dir ~\dir2 ~tmpfile \~/", {})
347 '%USERPROFILE%/dir %USERPROFILE%\\dir2 ~tmpfile ~/'
369 '%USERPROFILE%/dir %USERPROFILE%\\dir2 ~tmpfile ~/'
348 """
370 """
349 if not any(c in path for c in b"$'~"):
371 if not any(c in path for c in b"$'~"):
350 return path
372 return path
351
373
352 varchars = pycompat.sysbytes(string.ascii_letters + string.digits) + b'_-'
374 varchars = pycompat.sysbytes(string.ascii_letters + string.digits) + b'_-'
353
375
354 res = b''
376 res = b''
355 index = 0
377 index = 0
356 pathlen = len(path)
378 pathlen = len(path)
357 while index < pathlen:
379 while index < pathlen:
358 c = path[index : index + 1]
380 c = path[index : index + 1]
359 if c == b'\'': # no expansion within single quotes
381 if c == b'\'': # no expansion within single quotes
360 path = path[index + 1 :]
382 path = path[index + 1 :]
361 pathlen = len(path)
383 pathlen = len(path)
362 try:
384 try:
363 index = path.index(b'\'')
385 index = path.index(b'\'')
364 res += b'"' + path[:index] + b'"'
386 res += b'"' + path[:index] + b'"'
365 except ValueError:
387 except ValueError:
366 res += c + path
388 res += c + path
367 index = pathlen - 1
389 index = pathlen - 1
368 elif c == b'%': # variable
390 elif c == b'%': # variable
369 path = path[index + 1 :]
391 path = path[index + 1 :]
370 pathlen = len(path)
392 pathlen = len(path)
371 try:
393 try:
372 index = path.index(b'%')
394 index = path.index(b'%')
373 except ValueError:
395 except ValueError:
374 res += b'%' + path
396 res += b'%' + path
375 index = pathlen - 1
397 index = pathlen - 1
376 else:
398 else:
377 var = path[:index]
399 var = path[:index]
378 res += b'%' + var + b'%'
400 res += b'%' + var + b'%'
379 elif c == b'$': # variable
401 elif c == b'$': # variable
380 if path[index + 1 : index + 2] == b'{':
402 if path[index + 1 : index + 2] == b'{':
381 path = path[index + 2 :]
403 path = path[index + 2 :]
382 pathlen = len(path)
404 pathlen = len(path)
383 try:
405 try:
384 index = path.index(b'}')
406 index = path.index(b'}')
385 var = path[:index]
407 var = path[:index]
386
408
387 # See below for why empty variables are handled specially
409 # See below for why empty variables are handled specially
388 if env.get(var, b'') != b'':
410 if env.get(var, b'') != b'':
389 res += b'%' + var + b'%'
411 res += b'%' + var + b'%'
390 else:
412 else:
391 res += b'${' + var + b'}'
413 res += b'${' + var + b'}'
392 except ValueError:
414 except ValueError:
393 res += b'${' + path
415 res += b'${' + path
394 index = pathlen - 1
416 index = pathlen - 1
395 else:
417 else:
396 var = b''
418 var = b''
397 index += 1
419 index += 1
398 c = path[index : index + 1]
420 c = path[index : index + 1]
399 while c != b'' and c in varchars:
421 while c != b'' and c in varchars:
400 var += c
422 var += c
401 index += 1
423 index += 1
402 c = path[index : index + 1]
424 c = path[index : index + 1]
403 # Some variables (like HG_OLDNODE) may be defined, but have an
425 # Some variables (like HG_OLDNODE) may be defined, but have an
404 # empty value. Those need to be skipped because when spawning
426 # empty value. Those need to be skipped because when spawning
405 # cmd.exe to run the hook, it doesn't replace %VAR% for an empty
427 # cmd.exe to run the hook, it doesn't replace %VAR% for an empty
406 # VAR, and that really confuses things like revset expressions.
428 # VAR, and that really confuses things like revset expressions.
407 # OTOH, if it's left in Unix format and the hook runs sh.exe, it
429 # OTOH, if it's left in Unix format and the hook runs sh.exe, it
408 # will substitute to an empty string, and everything is happy.
430 # will substitute to an empty string, and everything is happy.
409 if env.get(var, b'') != b'':
431 if env.get(var, b'') != b'':
410 res += b'%' + var + b'%'
432 res += b'%' + var + b'%'
411 else:
433 else:
412 res += b'$' + var
434 res += b'$' + var
413
435
414 if c != b'':
436 if c != b'':
415 index -= 1
437 index -= 1
416 elif (
438 elif (
417 c == b'~'
439 c == b'~'
418 and index + 1 < pathlen
440 and index + 1 < pathlen
419 and path[index + 1 : index + 2] in (b'\\', b'/')
441 and path[index + 1 : index + 2] in (b'\\', b'/')
420 ):
442 ):
421 res += b"%USERPROFILE%"
443 res += b"%USERPROFILE%"
422 elif (
444 elif (
423 c == b'\\'
445 c == b'\\'
424 and index + 1 < pathlen
446 and index + 1 < pathlen
425 and path[index + 1 : index + 2] in (b'$', b'~')
447 and path[index + 1 : index + 2] in (b'$', b'~')
426 ):
448 ):
427 # Skip '\', but only if it is escaping $ or ~
449 # Skip '\', but only if it is escaping $ or ~
428 res += path[index + 1 : index + 2]
450 res += path[index + 1 : index + 2]
429 index += 1
451 index += 1
430 else:
452 else:
431 res += c
453 res += c
432
454
433 index += 1
455 index += 1
434 return res
456 return res
435
457
436
458
437 # A sequence of backslashes is special iff it precedes a double quote:
459 # A sequence of backslashes is special iff it precedes a double quote:
438 # - if there's an even number of backslashes, the double quote is not
460 # - if there's an even number of backslashes, the double quote is not
439 # quoted (i.e. it ends the quoted region)
461 # quoted (i.e. it ends the quoted region)
440 # - if there's an odd number of backslashes, the double quote is quoted
462 # - if there's an odd number of backslashes, the double quote is quoted
441 # - in both cases, every pair of backslashes is unquoted into a single
463 # - in both cases, every pair of backslashes is unquoted into a single
442 # backslash
464 # backslash
443 # (See http://msdn2.microsoft.com/en-us/library/a1y7w461.aspx )
465 # (See http://msdn2.microsoft.com/en-us/library/a1y7w461.aspx )
444 # So, to quote a string, we must surround it in double quotes, double
466 # So, to quote a string, we must surround it in double quotes, double
445 # the number of backslashes that precede double quotes and add another
467 # the number of backslashes that precede double quotes and add another
446 # backslash before every double quote (being careful with the double
468 # backslash before every double quote (being careful with the double
447 # quote we've appended to the end)
469 # quote we've appended to the end)
448 _quotere = None
470 _quotere = None
449 _needsshellquote = None
471 _needsshellquote = None
450
472
451
473
452 def shellquote(s):
474 def shellquote(s):
453 r"""
475 r"""
454 >>> shellquote(br'C:\Users\xyz')
476 >>> shellquote(br'C:\Users\xyz')
455 '"C:\\Users\\xyz"'
477 '"C:\\Users\\xyz"'
456 >>> shellquote(br'C:\Users\xyz/mixed')
478 >>> shellquote(br'C:\Users\xyz/mixed')
457 '"C:\\Users\\xyz/mixed"'
479 '"C:\\Users\\xyz/mixed"'
458 >>> # Would be safe not to quote too, since it is all double backslashes
480 >>> # Would be safe not to quote too, since it is all double backslashes
459 >>> shellquote(br'C:\\Users\\xyz')
481 >>> shellquote(br'C:\\Users\\xyz')
460 '"C:\\\\Users\\\\xyz"'
482 '"C:\\\\Users\\\\xyz"'
461 >>> # But this must be quoted
483 >>> # But this must be quoted
462 >>> shellquote(br'C:\\Users\\xyz/abc')
484 >>> shellquote(br'C:\\Users\\xyz/abc')
463 '"C:\\\\Users\\\\xyz/abc"'
485 '"C:\\\\Users\\\\xyz/abc"'
464 """
486 """
465 global _quotere
487 global _quotere
466 if _quotere is None:
488 if _quotere is None:
467 _quotere = re.compile(br'(\\*)("|\\$)')
489 _quotere = re.compile(br'(\\*)("|\\$)')
468 global _needsshellquote
490 global _needsshellquote
469 if _needsshellquote is None:
491 if _needsshellquote is None:
470 # ":" is also treated as "safe character", because it is used as a part
492 # ":" is also treated as "safe character", because it is used as a part
471 # of path name on Windows. "\" is also part of a path name, but isn't
493 # of path name on Windows. "\" is also part of a path name, but isn't
472 # safe because shlex.split() (kind of) treats it as an escape char and
494 # safe because shlex.split() (kind of) treats it as an escape char and
473 # drops it. It will leave the next character, even if it is another
495 # drops it. It will leave the next character, even if it is another
474 # "\".
496 # "\".
475 _needsshellquote = re.compile(br'[^a-zA-Z0-9._:/-]').search
497 _needsshellquote = re.compile(br'[^a-zA-Z0-9._:/-]').search
476 if s and not _needsshellquote(s) and not _quotere.search(s):
498 if s and not _needsshellquote(s) and not _quotere.search(s):
477 # "s" shouldn't have to be quoted
499 # "s" shouldn't have to be quoted
478 return s
500 return s
479 return b'"%s"' % _quotere.sub(br'\1\1\\\2', s)
501 return b'"%s"' % _quotere.sub(br'\1\1\\\2', s)
480
502
481
503
482 def _unquote(s):
504 def _unquote(s):
483 if s.startswith(b'"') and s.endswith(b'"'):
505 if s.startswith(b'"') and s.endswith(b'"'):
484 return s[1:-1]
506 return s[1:-1]
485 return s
507 return s
486
508
487
509
488 def shellsplit(s):
510 def shellsplit(s):
489 """Parse a command string in cmd.exe way (best-effort)"""
511 """Parse a command string in cmd.exe way (best-effort)"""
490 return pycompat.maplist(_unquote, pycompat.shlexsplit(s, posix=False))
512 return pycompat.maplist(_unquote, pycompat.shlexsplit(s, posix=False))
491
513
492
514
493 # if you change this stub into a real check, please try to implement the
515 # if you change this stub into a real check, please try to implement the
494 # username and groupname functions above, too.
516 # username and groupname functions above, too.
495 def isowner(st):
517 def isowner(st):
496 return True
518 return True
497
519
498
520
499 def findexe(command):
521 def findexe(command):
500 """Find executable for command searching like cmd.exe does.
522 """Find executable for command searching like cmd.exe does.
501 If command is a basename then PATH is searched for command.
523 If command is a basename then PATH is searched for command.
502 PATH isn't searched if command is an absolute or relative path.
524 PATH isn't searched if command is an absolute or relative path.
503 An extension from PATHEXT is found and added if not present.
525 An extension from PATHEXT is found and added if not present.
504 If command isn't found None is returned."""
526 If command isn't found None is returned."""
505 pathext = encoding.environ.get(b'PATHEXT', b'.COM;.EXE;.BAT;.CMD')
527 pathext = encoding.environ.get(b'PATHEXT', b'.COM;.EXE;.BAT;.CMD')
506 pathexts = [ext for ext in pathext.lower().split(pycompat.ospathsep)]
528 pathexts = [ext for ext in pathext.lower().split(pycompat.ospathsep)]
507 if os.path.splitext(command)[1].lower() in pathexts:
529 if os.path.splitext(command)[1].lower() in pathexts:
508 pathexts = [b'']
530 pathexts = [b'']
509
531
510 def findexisting(pathcommand):
532 def findexisting(pathcommand):
511 """Will append extension (if needed) and return existing file"""
533 """Will append extension (if needed) and return existing file"""
512 for ext in pathexts:
534 for ext in pathexts:
513 executable = pathcommand + ext
535 executable = pathcommand + ext
514 if os.path.exists(executable):
536 if os.path.exists(executable):
515 return executable
537 return executable
516 return None
538 return None
517
539
518 if pycompat.ossep in command:
540 if pycompat.ossep in command:
519 return findexisting(command)
541 return findexisting(command)
520
542
521 for path in encoding.environ.get(b'PATH', b'').split(pycompat.ospathsep):
543 for path in encoding.environ.get(b'PATH', b'').split(pycompat.ospathsep):
522 executable = findexisting(os.path.join(path, command))
544 executable = findexisting(os.path.join(path, command))
523 if executable is not None:
545 if executable is not None:
524 return executable
546 return executable
525 return findexisting(os.path.expanduser(os.path.expandvars(command)))
547 return findexisting(os.path.expanduser(os.path.expandvars(command)))
526
548
527
549
528 _wantedkinds = {stat.S_IFREG, stat.S_IFLNK}
550 _wantedkinds = {stat.S_IFREG, stat.S_IFLNK}
529
551
530
552
531 def statfiles(files):
553 def statfiles(files):
532 """Stat each file in files. Yield each stat, or None if a file
554 """Stat each file in files. Yield each stat, or None if a file
533 does not exist or has a type we don't care about.
555 does not exist or has a type we don't care about.
534
556
535 Cluster and cache stat per directory to minimize number of OS stat calls."""
557 Cluster and cache stat per directory to minimize number of OS stat calls."""
536 dircache = {} # dirname -> filename -> status | None if file does not exist
558 dircache = {} # dirname -> filename -> status | None if file does not exist
537 getkind = stat.S_IFMT
559 getkind = stat.S_IFMT
538 for nf in files:
560 for nf in files:
539 nf = normcase(nf)
561 nf = normcase(nf)
540 dir, base = os.path.split(nf)
562 dir, base = os.path.split(nf)
541 if not dir:
563 if not dir:
542 dir = b'.'
564 dir = b'.'
543 cache = dircache.get(dir, None)
565 cache = dircache.get(dir, None)
544 if cache is None:
566 if cache is None:
545 try:
567 try:
546 dmap = {
568 dmap = {
547 normcase(n): s
569 normcase(n): s
548 for n, k, s in listdir(dir, True)
570 for n, k, s in listdir(dir, True)
549 if getkind(s.st_mode) in _wantedkinds
571 if getkind(s.st_mode) in _wantedkinds
550 }
572 }
551 except OSError as err:
573 except OSError as err:
552 # Python >= 2.5 returns ENOENT and adds winerror field
574 # Python >= 2.5 returns ENOENT and adds winerror field
553 # EINVAL is raised if dir is not a directory.
575 # EINVAL is raised if dir is not a directory.
554 if err.errno not in (errno.ENOENT, errno.EINVAL, errno.ENOTDIR):
576 if err.errno not in (errno.ENOENT, errno.EINVAL, errno.ENOTDIR):
555 raise
577 raise
556 dmap = {}
578 dmap = {}
557 cache = dircache.setdefault(dir, dmap)
579 cache = dircache.setdefault(dir, dmap)
558 yield cache.get(base, None)
580 yield cache.get(base, None)
559
581
560
582
561 def username(uid=None):
583 def username(uid=None):
562 """Return the name of the user with the given uid.
584 """Return the name of the user with the given uid.
563
585
564 If uid is None, return the name of the current user."""
586 If uid is None, return the name of the current user."""
565 if not uid:
587 if not uid:
566 return pycompat.fsencode(getpass.getuser())
588 return pycompat.fsencode(getpass.getuser())
567 return None
589 return None
568
590
569
591
570 def groupname(gid=None):
592 def groupname(gid=None):
571 """Return the name of the group with the given gid.
593 """Return the name of the group with the given gid.
572
594
573 If gid is None, return the name of the current group."""
595 If gid is None, return the name of the current group."""
574 return None
596 return None
575
597
576
598
577 def readlink(pathname):
599 def readlink(pathname):
578 return pycompat.fsencode(os.readlink(pycompat.fsdecode(pathname)))
600 return pycompat.fsencode(os.readlink(pycompat.fsdecode(pathname)))
579
601
580
602
581 def removedirs(name):
603 def removedirs(name):
582 """special version of os.removedirs that does not remove symlinked
604 """special version of os.removedirs that does not remove symlinked
583 directories or junction points if they actually contain files"""
605 directories or junction points if they actually contain files"""
584 if listdir(name):
606 if listdir(name):
585 return
607 return
586 os.rmdir(name)
608 os.rmdir(name)
587 head, tail = os.path.split(name)
609 head, tail = os.path.split(name)
588 if not tail:
610 if not tail:
589 head, tail = os.path.split(head)
611 head, tail = os.path.split(head)
590 while head and tail:
612 while head and tail:
591 try:
613 try:
592 if listdir(head):
614 if listdir(head):
593 return
615 return
594 os.rmdir(head)
616 os.rmdir(head)
595 except (ValueError, OSError):
617 except (ValueError, OSError):
596 break
618 break
597 head, tail = os.path.split(head)
619 head, tail = os.path.split(head)
598
620
599
621
600 def rename(src, dst):
622 def rename(src, dst):
601 '''atomically rename file src to dst, replacing dst if it exists'''
623 '''atomically rename file src to dst, replacing dst if it exists'''
602 try:
624 try:
603 os.rename(src, dst)
625 os.rename(src, dst)
604 except OSError as e:
626 except OSError as e:
605 if e.errno != errno.EEXIST:
627 if e.errno != errno.EEXIST:
606 raise
628 raise
607 unlink(dst)
629 unlink(dst)
608 os.rename(src, dst)
630 os.rename(src, dst)
609
631
610
632
611 def gethgcmd():
633 def gethgcmd():
612 return [encoding.strtolocal(arg) for arg in [sys.executable] + sys.argv[:1]]
634 return [encoding.strtolocal(arg) for arg in [sys.executable] + sys.argv[:1]]
613
635
614
636
615 def groupmembers(name):
637 def groupmembers(name):
616 # Don't support groups on Windows for now
638 # Don't support groups on Windows for now
617 raise KeyError
639 raise KeyError
618
640
619
641
620 def isexec(f):
642 def isexec(f):
621 return False
643 return False
622
644
623
645
624 class cachestat(object):
646 class cachestat(object):
625 def __init__(self, path):
647 def __init__(self, path):
626 pass
648 pass
627
649
628 def cacheable(self):
650 def cacheable(self):
629 return False
651 return False
630
652
631
653
632 def lookupreg(key, valname=None, scope=None):
654 def lookupreg(key, valname=None, scope=None):
633 """Look up a key/value name in the Windows registry.
655 """Look up a key/value name in the Windows registry.
634
656
635 valname: value name. If unspecified, the default value for the key
657 valname: value name. If unspecified, the default value for the key
636 is used.
658 is used.
637 scope: optionally specify scope for registry lookup, this can be
659 scope: optionally specify scope for registry lookup, this can be
638 a sequence of scopes to look up in order. Default (CURRENT_USER,
660 a sequence of scopes to look up in order. Default (CURRENT_USER,
639 LOCAL_MACHINE).
661 LOCAL_MACHINE).
640 """
662 """
641 if scope is None:
663 if scope is None:
642 scope = (winreg.HKEY_CURRENT_USER, winreg.HKEY_LOCAL_MACHINE)
664 scope = (winreg.HKEY_CURRENT_USER, winreg.HKEY_LOCAL_MACHINE)
643 elif not isinstance(scope, (list, tuple)):
665 elif not isinstance(scope, (list, tuple)):
644 scope = (scope,)
666 scope = (scope,)
645 for s in scope:
667 for s in scope:
646 try:
668 try:
647 with winreg.OpenKey(s, encoding.strfromlocal(key)) as hkey:
669 with winreg.OpenKey(s, encoding.strfromlocal(key)) as hkey:
648 name = valname and encoding.strfromlocal(valname) or valname
670 name = valname and encoding.strfromlocal(valname) or valname
649 val = winreg.QueryValueEx(hkey, name)[0]
671 val = winreg.QueryValueEx(hkey, name)[0]
650 # never let a Unicode string escape into the wild
672 # never let a Unicode string escape into the wild
651 return encoding.unitolocal(val)
673 return encoding.unitolocal(val)
652 except EnvironmentError:
674 except EnvironmentError:
653 pass
675 pass
654
676
655
677
656 expandglobs = True
678 expandglobs = True
657
679
658
680
659 def statislink(st):
681 def statislink(st):
660 '''check whether a stat result is a symlink'''
682 '''check whether a stat result is a symlink'''
661 return False
683 return False
662
684
663
685
664 def statisexec(st):
686 def statisexec(st):
665 '''check whether a stat result is an executable file'''
687 '''check whether a stat result is an executable file'''
666 return False
688 return False
667
689
668
690
669 def poll(fds):
691 def poll(fds):
670 # see posix.py for description
692 # see posix.py for description
671 raise NotImplementedError()
693 raise NotImplementedError()
672
694
673
695
674 def readpipe(pipe):
696 def readpipe(pipe):
675 """Read all available data from a pipe."""
697 """Read all available data from a pipe."""
676 chunks = []
698 chunks = []
677 while True:
699 while True:
678 size = win32.peekpipe(pipe)
700 size = win32.peekpipe(pipe)
679 if not size:
701 if not size:
680 break
702 break
681
703
682 s = pipe.read(size)
704 s = pipe.read(size)
683 if not s:
705 if not s:
684 break
706 break
685 chunks.append(s)
707 chunks.append(s)
686
708
687 return b''.join(chunks)
709 return b''.join(chunks)
688
710
689
711
690 def bindunixsocket(sock, path):
712 def bindunixsocket(sock, path):
691 raise NotImplementedError('unsupported platform')
713 raise NotImplementedError('unsupported platform')
@@ -1,1120 +1,1123 b''
1 from __future__ import absolute_import, print_function
1 from __future__ import absolute_import, print_function
2
2
3 import distutils.version
3 import distutils.version
4 import os
4 import os
5 import re
5 import re
6 import socket
6 import socket
7 import stat
7 import stat
8 import subprocess
8 import subprocess
9 import sys
9 import sys
10 import tempfile
10 import tempfile
11
11
12 tempprefix = 'hg-hghave-'
12 tempprefix = 'hg-hghave-'
13
13
14 checks = {
14 checks = {
15 "true": (lambda: True, "yak shaving"),
15 "true": (lambda: True, "yak shaving"),
16 "false": (lambda: False, "nail clipper"),
16 "false": (lambda: False, "nail clipper"),
17 "known-bad-output": (lambda: True, "use for currently known bad output"),
17 "known-bad-output": (lambda: True, "use for currently known bad output"),
18 "missing-correct-output": (lambda: False, "use for missing good output"),
18 "missing-correct-output": (lambda: False, "use for missing good output"),
19 }
19 }
20
20
21 try:
21 try:
22 import msvcrt
22 import msvcrt
23
23
24 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
24 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
25 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
25 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
26 except ImportError:
26 except ImportError:
27 pass
27 pass
28
28
29 stdout = getattr(sys.stdout, 'buffer', sys.stdout)
29 stdout = getattr(sys.stdout, 'buffer', sys.stdout)
30 stderr = getattr(sys.stderr, 'buffer', sys.stderr)
30 stderr = getattr(sys.stderr, 'buffer', sys.stderr)
31
31
32 if sys.version_info[0] >= 3:
32 if sys.version_info[0] >= 3:
33
33
34 def _sys2bytes(p):
34 def _sys2bytes(p):
35 if p is None:
35 if p is None:
36 return p
36 return p
37 return p.encode('utf-8')
37 return p.encode('utf-8')
38
38
39 def _bytes2sys(p):
39 def _bytes2sys(p):
40 if p is None:
40 if p is None:
41 return p
41 return p
42 return p.decode('utf-8')
42 return p.decode('utf-8')
43
43
44
44
45 else:
45 else:
46
46
47 def _sys2bytes(p):
47 def _sys2bytes(p):
48 return p
48 return p
49
49
50 _bytes2sys = _sys2bytes
50 _bytes2sys = _sys2bytes
51
51
52
52
53 def check(name, desc):
53 def check(name, desc):
54 """Registers a check function for a feature."""
54 """Registers a check function for a feature."""
55
55
56 def decorator(func):
56 def decorator(func):
57 checks[name] = (func, desc)
57 checks[name] = (func, desc)
58 return func
58 return func
59
59
60 return decorator
60 return decorator
61
61
62
62
63 def checkvers(name, desc, vers):
63 def checkvers(name, desc, vers):
64 """Registers a check function for each of a series of versions.
64 """Registers a check function for each of a series of versions.
65
65
66 vers can be a list or an iterator.
66 vers can be a list or an iterator.
67
67
68 Produces a series of feature checks that have the form <name><vers> without
68 Produces a series of feature checks that have the form <name><vers> without
69 any punctuation (even if there's punctuation in 'vers'; i.e. this produces
69 any punctuation (even if there's punctuation in 'vers'; i.e. this produces
70 'py38', not 'py3.8' or 'py-38')."""
70 'py38', not 'py3.8' or 'py-38')."""
71
71
72 def decorator(func):
72 def decorator(func):
73 def funcv(v):
73 def funcv(v):
74 def f():
74 def f():
75 return func(v)
75 return func(v)
76
76
77 return f
77 return f
78
78
79 for v in vers:
79 for v in vers:
80 v = str(v)
80 v = str(v)
81 f = funcv(v)
81 f = funcv(v)
82 checks['%s%s' % (name, v.replace('.', ''))] = (f, desc % v)
82 checks['%s%s' % (name, v.replace('.', ''))] = (f, desc % v)
83 return func
83 return func
84
84
85 return decorator
85 return decorator
86
86
87
87
88 def checkfeatures(features):
88 def checkfeatures(features):
89 result = {
89 result = {
90 'error': [],
90 'error': [],
91 'missing': [],
91 'missing': [],
92 'skipped': [],
92 'skipped': [],
93 }
93 }
94
94
95 for feature in features:
95 for feature in features:
96 negate = feature.startswith('no-')
96 negate = feature.startswith('no-')
97 if negate:
97 if negate:
98 feature = feature[3:]
98 feature = feature[3:]
99
99
100 if feature not in checks:
100 if feature not in checks:
101 result['missing'].append(feature)
101 result['missing'].append(feature)
102 continue
102 continue
103
103
104 check, desc = checks[feature]
104 check, desc = checks[feature]
105 try:
105 try:
106 available = check()
106 available = check()
107 except Exception:
107 except Exception:
108 result['error'].append('hghave check failed: %s' % feature)
108 result['error'].append('hghave check failed: %s' % feature)
109 continue
109 continue
110
110
111 if not negate and not available:
111 if not negate and not available:
112 result['skipped'].append('missing feature: %s' % desc)
112 result['skipped'].append('missing feature: %s' % desc)
113 elif negate and available:
113 elif negate and available:
114 result['skipped'].append('system supports %s' % desc)
114 result['skipped'].append('system supports %s' % desc)
115
115
116 return result
116 return result
117
117
118
118
119 def require(features):
119 def require(features):
120 """Require that features are available, exiting if not."""
120 """Require that features are available, exiting if not."""
121 result = checkfeatures(features)
121 result = checkfeatures(features)
122
122
123 for missing in result['missing']:
123 for missing in result['missing']:
124 stderr.write(
124 stderr.write(
125 ('skipped: unknown feature: %s\n' % missing).encode('utf-8')
125 ('skipped: unknown feature: %s\n' % missing).encode('utf-8')
126 )
126 )
127 for msg in result['skipped']:
127 for msg in result['skipped']:
128 stderr.write(('skipped: %s\n' % msg).encode('utf-8'))
128 stderr.write(('skipped: %s\n' % msg).encode('utf-8'))
129 for msg in result['error']:
129 for msg in result['error']:
130 stderr.write(('%s\n' % msg).encode('utf-8'))
130 stderr.write(('%s\n' % msg).encode('utf-8'))
131
131
132 if result['missing']:
132 if result['missing']:
133 sys.exit(2)
133 sys.exit(2)
134
134
135 if result['skipped'] or result['error']:
135 if result['skipped'] or result['error']:
136 sys.exit(1)
136 sys.exit(1)
137
137
138
138
139 def matchoutput(cmd, regexp, ignorestatus=False):
139 def matchoutput(cmd, regexp, ignorestatus=False):
140 """Return the match object if cmd executes successfully and its output
140 """Return the match object if cmd executes successfully and its output
141 is matched by the supplied regular expression.
141 is matched by the supplied regular expression.
142 """
142 """
143
143
144 # Tests on Windows have to fake USERPROFILE to point to the test area so
144 # Tests on Windows have to fake USERPROFILE to point to the test area so
145 # that `~` is properly expanded on py3.8+. However, some tools like black
145 # that `~` is properly expanded on py3.8+. However, some tools like black
146 # make calls that need the real USERPROFILE in order to run `foo --version`.
146 # make calls that need the real USERPROFILE in order to run `foo --version`.
147 env = os.environ
147 env = os.environ
148 if os.name == 'nt':
148 if os.name == 'nt':
149 env = os.environ.copy()
149 env = os.environ.copy()
150 env['USERPROFILE'] = env['REALUSERPROFILE']
150 env['USERPROFILE'] = env['REALUSERPROFILE']
151
151
152 r = re.compile(regexp)
152 r = re.compile(regexp)
153 p = subprocess.Popen(
153 p = subprocess.Popen(
154 cmd,
154 cmd,
155 shell=True,
155 shell=True,
156 stdout=subprocess.PIPE,
156 stdout=subprocess.PIPE,
157 stderr=subprocess.STDOUT,
157 stderr=subprocess.STDOUT,
158 env=env,
158 env=env,
159 )
159 )
160 s = p.communicate()[0]
160 s = p.communicate()[0]
161 ret = p.returncode
161 ret = p.returncode
162 return (ignorestatus or not ret) and r.search(s)
162 return (ignorestatus or not ret) and r.search(s)
163
163
164
164
165 @check("baz", "GNU Arch baz client")
165 @check("baz", "GNU Arch baz client")
166 def has_baz():
166 def has_baz():
167 return matchoutput('baz --version 2>&1', br'baz Bazaar version')
167 return matchoutput('baz --version 2>&1', br'baz Bazaar version')
168
168
169
169
170 @check("bzr", "Canonical's Bazaar client")
170 @check("bzr", "Canonical's Bazaar client")
171 def has_bzr():
171 def has_bzr():
172 try:
172 try:
173 import bzrlib
173 import bzrlib
174 import bzrlib.bzrdir
174 import bzrlib.bzrdir
175 import bzrlib.errors
175 import bzrlib.errors
176 import bzrlib.revision
176 import bzrlib.revision
177 import bzrlib.revisionspec
177 import bzrlib.revisionspec
178
178
179 bzrlib.revisionspec.RevisionSpec
179 bzrlib.revisionspec.RevisionSpec
180 return bzrlib.__doc__ is not None
180 return bzrlib.__doc__ is not None
181 except (AttributeError, ImportError):
181 except (AttributeError, ImportError):
182 return False
182 return False
183
183
184
184
185 @checkvers("bzr", "Canonical's Bazaar client >= %s", (1.14,))
185 @checkvers("bzr", "Canonical's Bazaar client >= %s", (1.14,))
186 def has_bzr_range(v):
186 def has_bzr_range(v):
187 major, minor = v.split('rc')[0].split('.')[0:2]
187 major, minor = v.split('rc')[0].split('.')[0:2]
188 try:
188 try:
189 import bzrlib
189 import bzrlib
190
190
191 return bzrlib.__doc__ is not None and bzrlib.version_info[:2] >= (
191 return bzrlib.__doc__ is not None and bzrlib.version_info[:2] >= (
192 int(major),
192 int(major),
193 int(minor),
193 int(minor),
194 )
194 )
195 except ImportError:
195 except ImportError:
196 return False
196 return False
197
197
198
198
199 @check("chg", "running with chg")
199 @check("chg", "running with chg")
200 def has_chg():
200 def has_chg():
201 return 'CHGHG' in os.environ
201 return 'CHGHG' in os.environ
202
202
203
203
204 @check("rhg", "running with rhg as 'hg'")
204 @check("rhg", "running with rhg as 'hg'")
205 def has_rhg():
205 def has_rhg():
206 return 'RHG_INSTALLED_AS_HG' in os.environ
206 return 'RHG_INSTALLED_AS_HG' in os.environ
207
207
208
208
209 @check("cvs", "cvs client/server")
209 @check("cvs", "cvs client/server")
210 def has_cvs():
210 def has_cvs():
211 re = br'Concurrent Versions System.*?server'
211 re = br'Concurrent Versions System.*?server'
212 return matchoutput('cvs --version 2>&1', re) and not has_msys()
212 return matchoutput('cvs --version 2>&1', re) and not has_msys()
213
213
214
214
215 @check("cvs112", "cvs client/server 1.12.* (not cvsnt)")
215 @check("cvs112", "cvs client/server 1.12.* (not cvsnt)")
216 def has_cvs112():
216 def has_cvs112():
217 re = br'Concurrent Versions System \(CVS\) 1.12.*?server'
217 re = br'Concurrent Versions System \(CVS\) 1.12.*?server'
218 return matchoutput('cvs --version 2>&1', re) and not has_msys()
218 return matchoutput('cvs --version 2>&1', re) and not has_msys()
219
219
220
220
221 @check("cvsnt", "cvsnt client/server")
221 @check("cvsnt", "cvsnt client/server")
222 def has_cvsnt():
222 def has_cvsnt():
223 re = br'Concurrent Versions System \(CVSNT\) (\d+).(\d+).*\(client/server\)'
223 re = br'Concurrent Versions System \(CVSNT\) (\d+).(\d+).*\(client/server\)'
224 return matchoutput('cvsnt --version 2>&1', re)
224 return matchoutput('cvsnt --version 2>&1', re)
225
225
226
226
227 @check("darcs", "darcs client")
227 @check("darcs", "darcs client")
228 def has_darcs():
228 def has_darcs():
229 return matchoutput('darcs --version', br'\b2\.([2-9]|\d{2})', True)
229 return matchoutput('darcs --version', br'\b2\.([2-9]|\d{2})', True)
230
230
231
231
232 @check("mtn", "monotone client (>= 1.0)")
232 @check("mtn", "monotone client (>= 1.0)")
233 def has_mtn():
233 def has_mtn():
234 return matchoutput('mtn --version', br'monotone', True) and not matchoutput(
234 return matchoutput('mtn --version', br'monotone', True) and not matchoutput(
235 'mtn --version', br'monotone 0\.', True
235 'mtn --version', br'monotone 0\.', True
236 )
236 )
237
237
238
238
239 @check("eol-in-paths", "end-of-lines in paths")
239 @check("eol-in-paths", "end-of-lines in paths")
240 def has_eol_in_paths():
240 def has_eol_in_paths():
241 try:
241 try:
242 fd, path = tempfile.mkstemp(dir='.', prefix=tempprefix, suffix='\n\r')
242 fd, path = tempfile.mkstemp(dir='.', prefix=tempprefix, suffix='\n\r')
243 os.close(fd)
243 os.close(fd)
244 os.remove(path)
244 os.remove(path)
245 return True
245 return True
246 except (IOError, OSError):
246 except (IOError, OSError):
247 return False
247 return False
248
248
249
249
250 @check("execbit", "executable bit")
250 @check("execbit", "executable bit")
251 def has_executablebit():
251 def has_executablebit():
252 try:
252 try:
253 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
253 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
254 fh, fn = tempfile.mkstemp(dir='.', prefix=tempprefix)
254 fh, fn = tempfile.mkstemp(dir='.', prefix=tempprefix)
255 try:
255 try:
256 os.close(fh)
256 os.close(fh)
257 m = os.stat(fn).st_mode & 0o777
257 m = os.stat(fn).st_mode & 0o777
258 new_file_has_exec = m & EXECFLAGS
258 new_file_has_exec = m & EXECFLAGS
259 os.chmod(fn, m ^ EXECFLAGS)
259 os.chmod(fn, m ^ EXECFLAGS)
260 exec_flags_cannot_flip = (os.stat(fn).st_mode & 0o777) == m
260 exec_flags_cannot_flip = (os.stat(fn).st_mode & 0o777) == m
261 finally:
261 finally:
262 os.unlink(fn)
262 os.unlink(fn)
263 except (IOError, OSError):
263 except (IOError, OSError):
264 # we don't care, the user probably won't be able to commit anyway
264 # we don't care, the user probably won't be able to commit anyway
265 return False
265 return False
266 return not (new_file_has_exec or exec_flags_cannot_flip)
266 return not (new_file_has_exec or exec_flags_cannot_flip)
267
267
268
268
269 @check("icasefs", "case insensitive file system")
269 @check("icasefs", "case insensitive file system")
270 def has_icasefs():
270 def has_icasefs():
271 # Stolen from mercurial.util
271 # Stolen from mercurial.util
272 fd, path = tempfile.mkstemp(dir='.', prefix=tempprefix)
272 fd, path = tempfile.mkstemp(dir='.', prefix=tempprefix)
273 os.close(fd)
273 os.close(fd)
274 try:
274 try:
275 s1 = os.stat(path)
275 s1 = os.stat(path)
276 d, b = os.path.split(path)
276 d, b = os.path.split(path)
277 p2 = os.path.join(d, b.upper())
277 p2 = os.path.join(d, b.upper())
278 if path == p2:
278 if path == p2:
279 p2 = os.path.join(d, b.lower())
279 p2 = os.path.join(d, b.lower())
280 try:
280 try:
281 s2 = os.stat(p2)
281 s2 = os.stat(p2)
282 return s2 == s1
282 return s2 == s1
283 except OSError:
283 except OSError:
284 return False
284 return False
285 finally:
285 finally:
286 os.remove(path)
286 os.remove(path)
287
287
288
288
289 @check("fifo", "named pipes")
289 @check("fifo", "named pipes")
290 def has_fifo():
290 def has_fifo():
291 if getattr(os, "mkfifo", None) is None:
291 if getattr(os, "mkfifo", None) is None:
292 return False
292 return False
293 name = tempfile.mktemp(dir='.', prefix=tempprefix)
293 name = tempfile.mktemp(dir='.', prefix=tempprefix)
294 try:
294 try:
295 os.mkfifo(name)
295 os.mkfifo(name)
296 os.unlink(name)
296 os.unlink(name)
297 return True
297 return True
298 except OSError:
298 except OSError:
299 return False
299 return False
300
300
301
301
302 @check("killdaemons", 'killdaemons.py support')
302 @check("killdaemons", 'killdaemons.py support')
303 def has_killdaemons():
303 def has_killdaemons():
304 return True
304 return True
305
305
306
306
307 @check("cacheable", "cacheable filesystem")
307 @check("cacheable", "cacheable filesystem")
308 def has_cacheable_fs():
308 def has_cacheable_fs():
309 from mercurial import util
309 from mercurial import util
310
310
311 fd, path = tempfile.mkstemp(dir='.', prefix=tempprefix)
311 fd, path = tempfile.mkstemp(dir='.', prefix=tempprefix)
312 os.close(fd)
312 os.close(fd)
313 try:
313 try:
314 return util.cachestat(path).cacheable()
314 return util.cachestat(path).cacheable()
315 finally:
315 finally:
316 os.remove(path)
316 os.remove(path)
317
317
318
318
319 @check("lsprof", "python lsprof module")
319 @check("lsprof", "python lsprof module")
320 def has_lsprof():
320 def has_lsprof():
321 try:
321 try:
322 import _lsprof
322 import _lsprof
323
323
324 _lsprof.Profiler # silence unused import warning
324 _lsprof.Profiler # silence unused import warning
325 return True
325 return True
326 except ImportError:
326 except ImportError:
327 return False
327 return False
328
328
329
329
330 def _gethgversion():
330 def _gethgversion():
331 m = matchoutput('hg --version --quiet 2>&1', br'(\d+)\.(\d+)')
331 m = matchoutput('hg --version --quiet 2>&1', br'(\d+)\.(\d+)')
332 if not m:
332 if not m:
333 return (0, 0)
333 return (0, 0)
334 return (int(m.group(1)), int(m.group(2)))
334 return (int(m.group(1)), int(m.group(2)))
335
335
336
336
337 _hgversion = None
337 _hgversion = None
338
338
339
339
340 def gethgversion():
340 def gethgversion():
341 global _hgversion
341 global _hgversion
342 if _hgversion is None:
342 if _hgversion is None:
343 _hgversion = _gethgversion()
343 _hgversion = _gethgversion()
344 return _hgversion
344 return _hgversion
345
345
346
346
347 @checkvers(
347 @checkvers(
348 "hg", "Mercurial >= %s", list([(1.0 * x) / 10 for x in range(9, 99)])
348 "hg", "Mercurial >= %s", list([(1.0 * x) / 10 for x in range(9, 99)])
349 )
349 )
350 def has_hg_range(v):
350 def has_hg_range(v):
351 major, minor = v.split('.')[0:2]
351 major, minor = v.split('.')[0:2]
352 return gethgversion() >= (int(major), int(minor))
352 return gethgversion() >= (int(major), int(minor))
353
353
354
354
355 @check("rust", "Using the Rust extensions")
355 @check("rust", "Using the Rust extensions")
356 def has_rust():
356 def has_rust():
357 """Check is the mercurial currently running is using some rust code"""
357 """Check is the mercurial currently running is using some rust code"""
358 cmd = 'hg debuginstall --quiet 2>&1'
358 cmd = 'hg debuginstall --quiet 2>&1'
359 match = br'checking module policy \(([^)]+)\)'
359 match = br'checking module policy \(([^)]+)\)'
360 policy = matchoutput(cmd, match)
360 policy = matchoutput(cmd, match)
361 if not policy:
361 if not policy:
362 return False
362 return False
363 return b'rust' in policy.group(1)
363 return b'rust' in policy.group(1)
364
364
365
365
366 @check("hg08", "Mercurial >= 0.8")
366 @check("hg08", "Mercurial >= 0.8")
367 def has_hg08():
367 def has_hg08():
368 if checks["hg09"][0]():
368 if checks["hg09"][0]():
369 return True
369 return True
370 return matchoutput('hg help annotate 2>&1', '--date')
370 return matchoutput('hg help annotate 2>&1', '--date')
371
371
372
372
373 @check("hg07", "Mercurial >= 0.7")
373 @check("hg07", "Mercurial >= 0.7")
374 def has_hg07():
374 def has_hg07():
375 if checks["hg08"][0]():
375 if checks["hg08"][0]():
376 return True
376 return True
377 return matchoutput('hg --version --quiet 2>&1', 'Mercurial Distributed SCM')
377 return matchoutput('hg --version --quiet 2>&1', 'Mercurial Distributed SCM')
378
378
379
379
380 @check("hg06", "Mercurial >= 0.6")
380 @check("hg06", "Mercurial >= 0.6")
381 def has_hg06():
381 def has_hg06():
382 if checks["hg07"][0]():
382 if checks["hg07"][0]():
383 return True
383 return True
384 return matchoutput('hg --version --quiet 2>&1', 'Mercurial version')
384 return matchoutput('hg --version --quiet 2>&1', 'Mercurial version')
385
385
386
386
387 @check("gettext", "GNU Gettext (msgfmt)")
387 @check("gettext", "GNU Gettext (msgfmt)")
388 def has_gettext():
388 def has_gettext():
389 return matchoutput('msgfmt --version', br'GNU gettext-tools')
389 return matchoutput('msgfmt --version', br'GNU gettext-tools')
390
390
391
391
392 @check("git", "git command line client")
392 @check("git", "git command line client")
393 def has_git():
393 def has_git():
394 return matchoutput('git --version 2>&1', br'^git version')
394 return matchoutput('git --version 2>&1', br'^git version')
395
395
396
396
397 def getgitversion():
397 def getgitversion():
398 m = matchoutput('git --version 2>&1', br'git version (\d+)\.(\d+)')
398 m = matchoutput('git --version 2>&1', br'git version (\d+)\.(\d+)')
399 if not m:
399 if not m:
400 return (0, 0)
400 return (0, 0)
401 return (int(m.group(1)), int(m.group(2)))
401 return (int(m.group(1)), int(m.group(2)))
402
402
403
403
404 @check("pygit2", "pygit2 Python library")
404 @check("pygit2", "pygit2 Python library")
405 def has_git():
405 def has_git():
406 try:
406 try:
407 import pygit2
407 import pygit2
408
408
409 pygit2.Oid # silence unused import
409 pygit2.Oid # silence unused import
410 return True
410 return True
411 except ImportError:
411 except ImportError:
412 return False
412 return False
413
413
414
414
415 # https://github.com/git-lfs/lfs-test-server
415 # https://github.com/git-lfs/lfs-test-server
416 @check("lfs-test-server", "git-lfs test server")
416 @check("lfs-test-server", "git-lfs test server")
417 def has_lfsserver():
417 def has_lfsserver():
418 exe = 'lfs-test-server'
418 exe = 'lfs-test-server'
419 if has_windows():
419 if has_windows():
420 exe = 'lfs-test-server.exe'
420 exe = 'lfs-test-server.exe'
421 return any(
421 return any(
422 os.access(os.path.join(path, exe), os.X_OK)
422 os.access(os.path.join(path, exe), os.X_OK)
423 for path in os.environ["PATH"].split(os.pathsep)
423 for path in os.environ["PATH"].split(os.pathsep)
424 )
424 )
425
425
426
426
427 @checkvers("git", "git client (with ext::sh support) version >= %s", (1.9,))
427 @checkvers("git", "git client (with ext::sh support) version >= %s", (1.9,))
428 def has_git_range(v):
428 def has_git_range(v):
429 major, minor = v.split('.')[0:2]
429 major, minor = v.split('.')[0:2]
430 return getgitversion() >= (int(major), int(minor))
430 return getgitversion() >= (int(major), int(minor))
431
431
432
432
433 @check("docutils", "Docutils text processing library")
433 @check("docutils", "Docutils text processing library")
434 def has_docutils():
434 def has_docutils():
435 try:
435 try:
436 import docutils.core
436 import docutils.core
437
437
438 docutils.core.publish_cmdline # silence unused import
438 docutils.core.publish_cmdline # silence unused import
439 return True
439 return True
440 except ImportError:
440 except ImportError:
441 return False
441 return False
442
442
443
443
444 def getsvnversion():
444 def getsvnversion():
445 m = matchoutput('svn --version --quiet 2>&1', br'^(\d+)\.(\d+)')
445 m = matchoutput('svn --version --quiet 2>&1', br'^(\d+)\.(\d+)')
446 if not m:
446 if not m:
447 return (0, 0)
447 return (0, 0)
448 return (int(m.group(1)), int(m.group(2)))
448 return (int(m.group(1)), int(m.group(2)))
449
449
450
450
451 @checkvers("svn", "subversion client and admin tools >= %s", (1.3, 1.5))
451 @checkvers("svn", "subversion client and admin tools >= %s", (1.3, 1.5))
452 def has_svn_range(v):
452 def has_svn_range(v):
453 major, minor = v.split('.')[0:2]
453 major, minor = v.split('.')[0:2]
454 return getsvnversion() >= (int(major), int(minor))
454 return getsvnversion() >= (int(major), int(minor))
455
455
456
456
457 @check("svn", "subversion client and admin tools")
457 @check("svn", "subversion client and admin tools")
458 def has_svn():
458 def has_svn():
459 return matchoutput('svn --version 2>&1', br'^svn, version') and matchoutput(
459 return matchoutput('svn --version 2>&1', br'^svn, version') and matchoutput(
460 'svnadmin --version 2>&1', br'^svnadmin, version'
460 'svnadmin --version 2>&1', br'^svnadmin, version'
461 )
461 )
462
462
463
463
464 @check("svn-bindings", "subversion python bindings")
464 @check("svn-bindings", "subversion python bindings")
465 def has_svn_bindings():
465 def has_svn_bindings():
466 try:
466 try:
467 import svn.core
467 import svn.core
468
468
469 version = svn.core.SVN_VER_MAJOR, svn.core.SVN_VER_MINOR
469 version = svn.core.SVN_VER_MAJOR, svn.core.SVN_VER_MINOR
470 if version < (1, 4):
470 if version < (1, 4):
471 return False
471 return False
472 return True
472 return True
473 except ImportError:
473 except ImportError:
474 return False
474 return False
475
475
476
476
477 @check("p4", "Perforce server and client")
477 @check("p4", "Perforce server and client")
478 def has_p4():
478 def has_p4():
479 return matchoutput('p4 -V', br'Rev\. P4/') and matchoutput(
479 return matchoutput('p4 -V', br'Rev\. P4/') and matchoutput(
480 'p4d -V', br'Rev\. P4D/'
480 'p4d -V', br'Rev\. P4D/'
481 )
481 )
482
482
483
483
484 @check("symlink", "symbolic links")
484 @check("symlink", "symbolic links")
485 def has_symlink():
485 def has_symlink():
486 # mercurial.windows.checklink() is a hard 'no' at the moment
486 # mercurial.windows.checklink() is a hard 'no' at the moment
487 if os.name == 'nt' or getattr(os, "symlink", None) is None:
487 if os.name == 'nt' or getattr(os, "symlink", None) is None:
488 return False
488 return False
489 name = tempfile.mktemp(dir='.', prefix=tempprefix)
489 name = tempfile.mktemp(dir='.', prefix=tempprefix)
490 try:
490 try:
491 os.symlink(".", name)
491 os.symlink(".", name)
492 os.unlink(name)
492 os.unlink(name)
493 return True
493 return True
494 except (OSError, AttributeError):
494 except (OSError, AttributeError):
495 return False
495 return False
496
496
497
497
498 @check("hardlink", "hardlinks")
498 @check("hardlink", "hardlinks")
499 def has_hardlink():
499 def has_hardlink():
500 from mercurial import util
500 from mercurial import util
501
501
502 fh, fn = tempfile.mkstemp(dir='.', prefix=tempprefix)
502 fh, fn = tempfile.mkstemp(dir='.', prefix=tempprefix)
503 os.close(fh)
503 os.close(fh)
504 name = tempfile.mktemp(dir='.', prefix=tempprefix)
504 name = tempfile.mktemp(dir='.', prefix=tempprefix)
505 try:
505 try:
506 util.oslink(_sys2bytes(fn), _sys2bytes(name))
506 util.oslink(_sys2bytes(fn), _sys2bytes(name))
507 os.unlink(name)
507 os.unlink(name)
508 return True
508 return True
509 except OSError:
509 except OSError:
510 return False
510 return False
511 finally:
511 finally:
512 os.unlink(fn)
512 os.unlink(fn)
513
513
514
514
515 @check("hardlink-whitelisted", "hardlinks on whitelisted filesystems")
515 @check("hardlink-whitelisted", "hardlinks on whitelisted filesystems")
516 def has_hardlink_whitelisted():
516 def has_hardlink_whitelisted():
517 from mercurial import util
517 from mercurial import util
518
518
519 try:
519 try:
520 fstype = util.getfstype(b'.')
520 fstype = util.getfstype(b'.')
521 except OSError:
521 except OSError:
522 return False
522 return False
523 return fstype in util._hardlinkfswhitelist
523 return fstype in util._hardlinkfswhitelist
524
524
525
525
526 @check("rmcwd", "can remove current working directory")
526 @check("rmcwd", "can remove current working directory")
527 def has_rmcwd():
527 def has_rmcwd():
528 ocwd = os.getcwd()
528 ocwd = os.getcwd()
529 temp = tempfile.mkdtemp(dir='.', prefix=tempprefix)
529 temp = tempfile.mkdtemp(dir='.', prefix=tempprefix)
530 try:
530 try:
531 os.chdir(temp)
531 os.chdir(temp)
532 # On Linux, 'rmdir .' isn't allowed, but the other names are okay.
532 # On Linux, 'rmdir .' isn't allowed, but the other names are okay.
533 # On Solaris and Windows, the cwd can't be removed by any names.
533 # On Solaris and Windows, the cwd can't be removed by any names.
534 os.rmdir(os.getcwd())
534 os.rmdir(os.getcwd())
535 return True
535 return True
536 except OSError:
536 except OSError:
537 return False
537 return False
538 finally:
538 finally:
539 os.chdir(ocwd)
539 os.chdir(ocwd)
540 # clean up temp dir on platforms where cwd can't be removed
540 # clean up temp dir on platforms where cwd can't be removed
541 try:
541 try:
542 os.rmdir(temp)
542 os.rmdir(temp)
543 except OSError:
543 except OSError:
544 pass
544 pass
545
545
546
546
547 @check("tla", "GNU Arch tla client")
547 @check("tla", "GNU Arch tla client")
548 def has_tla():
548 def has_tla():
549 return matchoutput('tla --version 2>&1', br'The GNU Arch Revision')
549 return matchoutput('tla --version 2>&1', br'The GNU Arch Revision')
550
550
551
551
552 @check("gpg", "gpg client")
552 @check("gpg", "gpg client")
553 def has_gpg():
553 def has_gpg():
554 return matchoutput('gpg --version 2>&1', br'GnuPG')
554 return matchoutput('gpg --version 2>&1', br'GnuPG')
555
555
556
556
557 @check("gpg2", "gpg client v2")
557 @check("gpg2", "gpg client v2")
558 def has_gpg2():
558 def has_gpg2():
559 return matchoutput('gpg --version 2>&1', br'GnuPG[^0-9]+2\.')
559 return matchoutput('gpg --version 2>&1', br'GnuPG[^0-9]+2\.')
560
560
561
561
562 @check("gpg21", "gpg client v2.1+")
562 @check("gpg21", "gpg client v2.1+")
563 def has_gpg21():
563 def has_gpg21():
564 return matchoutput('gpg --version 2>&1', br'GnuPG[^0-9]+2\.(?!0)')
564 return matchoutput('gpg --version 2>&1', br'GnuPG[^0-9]+2\.(?!0)')
565
565
566
566
567 @check("unix-permissions", "unix-style permissions")
567 @check("unix-permissions", "unix-style permissions")
568 def has_unix_permissions():
568 def has_unix_permissions():
569 d = tempfile.mkdtemp(dir='.', prefix=tempprefix)
569 d = tempfile.mkdtemp(dir='.', prefix=tempprefix)
570 try:
570 try:
571 fname = os.path.join(d, 'foo')
571 fname = os.path.join(d, 'foo')
572 for umask in (0o77, 0o07, 0o22):
572 for umask in (0o77, 0o07, 0o22):
573 os.umask(umask)
573 os.umask(umask)
574 f = open(fname, 'w')
574 f = open(fname, 'w')
575 f.close()
575 f.close()
576 mode = os.stat(fname).st_mode
576 mode = os.stat(fname).st_mode
577 os.unlink(fname)
577 os.unlink(fname)
578 if mode & 0o777 != ~umask & 0o666:
578 if mode & 0o777 != ~umask & 0o666:
579 return False
579 return False
580 return True
580 return True
581 finally:
581 finally:
582 os.rmdir(d)
582 os.rmdir(d)
583
583
584
584
585 @check("unix-socket", "AF_UNIX socket family")
585 @check("unix-socket", "AF_UNIX socket family")
586 def has_unix_socket():
586 def has_unix_socket():
587 return getattr(socket, 'AF_UNIX', None) is not None
587 return getattr(socket, 'AF_UNIX', None) is not None
588
588
589
589
590 @check("root", "root permissions")
590 @check("root", "root permissions")
591 def has_root():
591 def has_root():
592 return getattr(os, 'geteuid', None) and os.geteuid() == 0
592 return getattr(os, 'geteuid', None) and os.geteuid() == 0
593
593
594
594
595 @check("pyflakes", "Pyflakes python linter")
595 @check("pyflakes", "Pyflakes python linter")
596 def has_pyflakes():
596 def has_pyflakes():
597 try:
597 try:
598 import pyflakes
598 import pyflakes
599
599
600 pyflakes.__version__
600 pyflakes.__version__
601 except ImportError:
601 except ImportError:
602 return False
602 return False
603 else:
603 else:
604 return True
604 return True
605
605
606
606
607 @check("pylint", "Pylint python linter")
607 @check("pylint", "Pylint python linter")
608 def has_pylint():
608 def has_pylint():
609 return matchoutput("pylint --help", br"Usage:[ ]+pylint", True)
609 return matchoutput("pylint --help", br"Usage:[ ]+pylint", True)
610
610
611
611
612 @check("clang-format", "clang-format C code formatter (>= 11)")
612 @check("clang-format", "clang-format C code formatter (>= 11)")
613 def has_clang_format():
613 def has_clang_format():
614 m = matchoutput('clang-format --version', br'clang-format version (\d+)')
614 m = matchoutput('clang-format --version', br'clang-format version (\d+)')
615 # style changed somewhere between 10.x and 11.x
615 # style changed somewhere between 10.x and 11.x
616 return m and int(m.group(1)) >= 11
616 return m and int(m.group(1)) >= 11
617
617
618
618
619 @check("jshint", "JSHint static code analysis tool")
619 @check("jshint", "JSHint static code analysis tool")
620 def has_jshint():
620 def has_jshint():
621 return matchoutput("jshint --version 2>&1", br"jshint v")
621 return matchoutput("jshint --version 2>&1", br"jshint v")
622
622
623
623
624 @check("pygments", "Pygments source highlighting library")
624 @check("pygments", "Pygments source highlighting library")
625 def has_pygments():
625 def has_pygments():
626 try:
626 try:
627 import pygments
627 import pygments
628
628
629 pygments.highlight # silence unused import warning
629 pygments.highlight # silence unused import warning
630 return True
630 return True
631 except ImportError:
631 except ImportError:
632 return False
632 return False
633
633
634
634
635 @check("pygments25", "Pygments version >= 2.5")
635 @check("pygments25", "Pygments version >= 2.5")
636 def pygments25():
636 def pygments25():
637 try:
637 try:
638 import pygments
638 import pygments
639
639
640 v = pygments.__version__
640 v = pygments.__version__
641 except ImportError:
641 except ImportError:
642 return False
642 return False
643
643
644 parts = v.split(".")
644 parts = v.split(".")
645 major = int(parts[0])
645 major = int(parts[0])
646 minor = int(parts[1])
646 minor = int(parts[1])
647
647
648 return (major, minor) >= (2, 5)
648 return (major, minor) >= (2, 5)
649
649
650
650
651 @check("outer-repo", "outer repo")
651 @check("outer-repo", "outer repo")
652 def has_outer_repo():
652 def has_outer_repo():
653 # failing for other reasons than 'no repo' imply that there is a repo
653 # failing for other reasons than 'no repo' imply that there is a repo
654 return not matchoutput('hg root 2>&1', br'abort: no repository found', True)
654 return not matchoutput('hg root 2>&1', br'abort: no repository found', True)
655
655
656
656
657 @check("ssl", "ssl module available")
657 @check("ssl", "ssl module available")
658 def has_ssl():
658 def has_ssl():
659 try:
659 try:
660 import ssl
660 import ssl
661
661
662 ssl.CERT_NONE
662 ssl.CERT_NONE
663 return True
663 return True
664 except ImportError:
664 except ImportError:
665 return False
665 return False
666
666
667
667
668 @check("defaultcacertsloaded", "detected presence of loaded system CA certs")
668 @check("defaultcacertsloaded", "detected presence of loaded system CA certs")
669 def has_defaultcacertsloaded():
669 def has_defaultcacertsloaded():
670 import ssl
670 import ssl
671 from mercurial import sslutil, ui as uimod
671 from mercurial import sslutil, ui as uimod
672
672
673 ui = uimod.ui.load()
673 ui = uimod.ui.load()
674 cafile = sslutil._defaultcacerts(ui)
674 cafile = sslutil._defaultcacerts(ui)
675 ctx = ssl.create_default_context()
675 ctx = ssl.create_default_context()
676 if cafile:
676 if cafile:
677 ctx.load_verify_locations(cafile=cafile)
677 ctx.load_verify_locations(cafile=cafile)
678 else:
678 else:
679 ctx.load_default_certs()
679 ctx.load_default_certs()
680
680
681 return len(ctx.get_ca_certs()) > 0
681 return len(ctx.get_ca_certs()) > 0
682
682
683
683
684 @check("tls1.2", "TLS 1.2 protocol support")
684 @check("tls1.2", "TLS 1.2 protocol support")
685 def has_tls1_2():
685 def has_tls1_2():
686 from mercurial import sslutil
686 from mercurial import sslutil
687
687
688 return b'tls1.2' in sslutil.supportedprotocols
688 return b'tls1.2' in sslutil.supportedprotocols
689
689
690
690
691 @check("windows", "Windows")
691 @check("windows", "Windows")
692 def has_windows():
692 def has_windows():
693 return os.name == 'nt'
693 return os.name == 'nt'
694
694
695
695
696 @check("system-sh", "system() uses sh")
696 @check("system-sh", "system() uses sh")
697 def has_system_sh():
697 def has_system_sh():
698 return os.name != 'nt'
698 return os.name != 'nt'
699
699
700
700
701 @check("serve", "platform and python can manage 'hg serve -d'")
701 @check("serve", "platform and python can manage 'hg serve -d'")
702 def has_serve():
702 def has_serve():
703 return True
703 return True
704
704
705
705
706 @check("setprocname", "whether osutil.setprocname is available or not")
706 @check("setprocname", "whether osutil.setprocname is available or not")
707 def has_setprocname():
707 def has_setprocname():
708 try:
708 try:
709 from mercurial.utils import procutil
709 from mercurial.utils import procutil
710
710
711 procutil.setprocname
711 procutil.setprocname
712 return True
712 return True
713 except AttributeError:
713 except AttributeError:
714 return False
714 return False
715
715
716
716
717 @check("test-repo", "running tests from repository")
717 @check("test-repo", "running tests from repository")
718 def has_test_repo():
718 def has_test_repo():
719 t = os.environ["TESTDIR"]
719 t = os.environ["TESTDIR"]
720 return os.path.isdir(os.path.join(t, "..", ".hg"))
720 return os.path.isdir(os.path.join(t, "..", ".hg"))
721
721
722
722
723 @check("network-io", "whether tests are allowed to access 3rd party services")
723 @check("network-io", "whether tests are allowed to access 3rd party services")
724 def has_test_repo():
724 def has_test_repo():
725 t = os.environ.get("HGTESTS_ALLOW_NETIO")
725 t = os.environ.get("HGTESTS_ALLOW_NETIO")
726 return t == "1"
726 return t == "1"
727
727
728
728
729 @check("curses", "terminfo compiler and curses module")
729 @check("curses", "terminfo compiler and curses module")
730 def has_curses():
730 def has_curses():
731 try:
731 try:
732 import curses
732 import curses
733
733
734 curses.COLOR_BLUE
734 curses.COLOR_BLUE
735
735
736 # Windows doesn't have a `tic` executable, but the windows_curses
736 # Windows doesn't have a `tic` executable, but the windows_curses
737 # package is sufficient to run the tests without it.
737 # package is sufficient to run the tests without it.
738 if os.name == 'nt':
738 if os.name == 'nt':
739 return True
739 return True
740
740
741 return has_tic()
741 return has_tic()
742
742
743 except (ImportError, AttributeError):
743 except (ImportError, AttributeError):
744 return False
744 return False
745
745
746
746
747 @check("tic", "terminfo compiler")
747 @check("tic", "terminfo compiler")
748 def has_tic():
748 def has_tic():
749 return matchoutput('test -x "`which tic`"', br'')
749 return matchoutput('test -x "`which tic`"', br'')
750
750
751
751
752 @check("xz", "xz compression utility")
752 @check("xz", "xz compression utility")
753 def has_xz():
753 def has_xz():
754 # When Windows invokes a subprocess in shell mode, it uses `cmd.exe`, which
754 # When Windows invokes a subprocess in shell mode, it uses `cmd.exe`, which
755 # only knows `where`, not `which`. So invoke MSYS shell explicitly.
755 # only knows `where`, not `which`. So invoke MSYS shell explicitly.
756 return matchoutput("sh -c 'test -x \"`which xz`\"'", b'')
756 return matchoutput("sh -c 'test -x \"`which xz`\"'", b'')
757
757
758
758
759 @check("msys", "Windows with MSYS")
759 @check("msys", "Windows with MSYS")
760 def has_msys():
760 def has_msys():
761 return os.getenv('MSYSTEM')
761 return os.getenv('MSYSTEM')
762
762
763
763
764 @check("aix", "AIX")
764 @check("aix", "AIX")
765 def has_aix():
765 def has_aix():
766 return sys.platform.startswith("aix")
766 return sys.platform.startswith("aix")
767
767
768
768
769 @check("osx", "OS X")
769 @check("osx", "OS X")
770 def has_osx():
770 def has_osx():
771 return sys.platform == 'darwin'
771 return sys.platform == 'darwin'
772
772
773
773
774 @check("osxpackaging", "OS X packaging tools")
774 @check("osxpackaging", "OS X packaging tools")
775 def has_osxpackaging():
775 def has_osxpackaging():
776 try:
776 try:
777 return (
777 return (
778 matchoutput('pkgbuild', br'Usage: pkgbuild ', ignorestatus=1)
778 matchoutput('pkgbuild', br'Usage: pkgbuild ', ignorestatus=1)
779 and matchoutput(
779 and matchoutput(
780 'productbuild', br'Usage: productbuild ', ignorestatus=1
780 'productbuild', br'Usage: productbuild ', ignorestatus=1
781 )
781 )
782 and matchoutput('lsbom', br'Usage: lsbom', ignorestatus=1)
782 and matchoutput('lsbom', br'Usage: lsbom', ignorestatus=1)
783 and matchoutput('xar --help', br'Usage: xar', ignorestatus=1)
783 and matchoutput('xar --help', br'Usage: xar', ignorestatus=1)
784 )
784 )
785 except ImportError:
785 except ImportError:
786 return False
786 return False
787
787
788
788
789 @check('linuxormacos', 'Linux or MacOS')
789 @check('linuxormacos', 'Linux or MacOS')
790 def has_linuxormacos():
790 def has_linuxormacos():
791 # This isn't a perfect test for MacOS. But it is sufficient for our needs.
791 # This isn't a perfect test for MacOS. But it is sufficient for our needs.
792 return sys.platform.startswith(('linux', 'darwin'))
792 return sys.platform.startswith(('linux', 'darwin'))
793
793
794
794
795 @check("docker", "docker support")
795 @check("docker", "docker support")
796 def has_docker():
796 def has_docker():
797 pat = br'A self-sufficient runtime for'
797 pat = br'A self-sufficient runtime for'
798 if matchoutput('docker --help', pat):
798 if matchoutput('docker --help', pat):
799 if 'linux' not in sys.platform:
799 if 'linux' not in sys.platform:
800 # TODO: in theory we should be able to test docker-based
800 # TODO: in theory we should be able to test docker-based
801 # package creation on non-linux using boot2docker, but in
801 # package creation on non-linux using boot2docker, but in
802 # practice that requires extra coordination to make sure
802 # practice that requires extra coordination to make sure
803 # $TESTTEMP is going to be visible at the same path to the
803 # $TESTTEMP is going to be visible at the same path to the
804 # boot2docker VM. If we figure out how to verify that, we
804 # boot2docker VM. If we figure out how to verify that, we
805 # can use the following instead of just saying False:
805 # can use the following instead of just saying False:
806 # return 'DOCKER_HOST' in os.environ
806 # return 'DOCKER_HOST' in os.environ
807 return False
807 return False
808
808
809 return True
809 return True
810 return False
810 return False
811
811
812
812
813 @check("debhelper", "debian packaging tools")
813 @check("debhelper", "debian packaging tools")
814 def has_debhelper():
814 def has_debhelper():
815 # Some versions of dpkg say `dpkg', some say 'dpkg' (` vs ' on the first
815 # Some versions of dpkg say `dpkg', some say 'dpkg' (` vs ' on the first
816 # quote), so just accept anything in that spot.
816 # quote), so just accept anything in that spot.
817 dpkg = matchoutput(
817 dpkg = matchoutput(
818 'dpkg --version', br"Debian .dpkg' package management program"
818 'dpkg --version', br"Debian .dpkg' package management program"
819 )
819 )
820 dh = matchoutput(
820 dh = matchoutput(
821 'dh --help', br'dh is a part of debhelper.', ignorestatus=True
821 'dh --help', br'dh is a part of debhelper.', ignorestatus=True
822 )
822 )
823 dh_py2 = matchoutput(
823 dh_py2 = matchoutput(
824 'dh_python2 --help', br'other supported Python versions'
824 'dh_python2 --help', br'other supported Python versions'
825 )
825 )
826 # debuild comes from the 'devscripts' package, though you might want
826 # debuild comes from the 'devscripts' package, though you might want
827 # the 'build-debs' package instead, which has a dependency on devscripts.
827 # the 'build-debs' package instead, which has a dependency on devscripts.
828 debuild = matchoutput(
828 debuild = matchoutput(
829 'debuild --help', br'to run debian/rules with given parameter'
829 'debuild --help', br'to run debian/rules with given parameter'
830 )
830 )
831 return dpkg and dh and dh_py2 and debuild
831 return dpkg and dh and dh_py2 and debuild
832
832
833
833
834 @check(
834 @check(
835 "debdeps", "debian build dependencies (run dpkg-checkbuilddeps in contrib/)"
835 "debdeps", "debian build dependencies (run dpkg-checkbuilddeps in contrib/)"
836 )
836 )
837 def has_debdeps():
837 def has_debdeps():
838 # just check exit status (ignoring output)
838 # just check exit status (ignoring output)
839 path = '%s/../contrib/packaging/debian/control' % os.environ['TESTDIR']
839 path = '%s/../contrib/packaging/debian/control' % os.environ['TESTDIR']
840 return matchoutput('dpkg-checkbuilddeps %s' % path, br'')
840 return matchoutput('dpkg-checkbuilddeps %s' % path, br'')
841
841
842
842
843 @check("demandimport", "demandimport enabled")
843 @check("demandimport", "demandimport enabled")
844 def has_demandimport():
844 def has_demandimport():
845 # chg disables demandimport intentionally for performance wins.
845 # chg disables demandimport intentionally for performance wins.
846 return (not has_chg()) and os.environ.get('HGDEMANDIMPORT') != 'disable'
846 return (not has_chg()) and os.environ.get('HGDEMANDIMPORT') != 'disable'
847
847
848
848
849 # Add "py27", "py35", ... as possible feature checks. Note that there's no
849 # Add "py27", "py35", ... as possible feature checks. Note that there's no
850 # punctuation here.
850 # punctuation here.
851 @checkvers("py", "Python >= %s", (2.7, 3.5, 3.6, 3.7, 3.8, 3.9))
851 @checkvers("py", "Python >= %s", (2.7, 3.5, 3.6, 3.7, 3.8, 3.9))
852 def has_python_range(v):
852 def has_python_range(v):
853 major, minor = v.split('.')[0:2]
853 major, minor = v.split('.')[0:2]
854 py_major, py_minor = sys.version_info.major, sys.version_info.minor
854 py_major, py_minor = sys.version_info.major, sys.version_info.minor
855
855
856 return (py_major, py_minor) >= (int(major), int(minor))
856 return (py_major, py_minor) >= (int(major), int(minor))
857
857
858
858
859 @check("py3", "running with Python 3.x")
859 @check("py3", "running with Python 3.x")
860 def has_py3():
860 def has_py3():
861 return 3 == sys.version_info[0]
861 return 3 == sys.version_info[0]
862
862
863
863
864 @check("py3exe", "a Python 3.x interpreter is available")
864 @check("py3exe", "a Python 3.x interpreter is available")
865 def has_python3exe():
865 def has_python3exe():
866 return matchoutput('python3 -V', br'^Python 3.(5|6|7|8|9)')
866 py = 'python3'
867 if os.name == 'nt':
868 py = 'py -3'
869 return matchoutput('%s -V' % py, br'^Python 3.(5|6|7|8|9)')
867
870
868
871
869 @check("pure", "running with pure Python code")
872 @check("pure", "running with pure Python code")
870 def has_pure():
873 def has_pure():
871 return any(
874 return any(
872 [
875 [
873 os.environ.get("HGMODULEPOLICY") == "py",
876 os.environ.get("HGMODULEPOLICY") == "py",
874 os.environ.get("HGTEST_RUN_TESTS_PURE") == "--pure",
877 os.environ.get("HGTEST_RUN_TESTS_PURE") == "--pure",
875 ]
878 ]
876 )
879 )
877
880
878
881
879 @check("slow", "allow slow tests (use --allow-slow-tests)")
882 @check("slow", "allow slow tests (use --allow-slow-tests)")
880 def has_slow():
883 def has_slow():
881 return os.environ.get('HGTEST_SLOW') == 'slow'
884 return os.environ.get('HGTEST_SLOW') == 'slow'
882
885
883
886
884 @check("hypothesis", "Hypothesis automated test generation")
887 @check("hypothesis", "Hypothesis automated test generation")
885 def has_hypothesis():
888 def has_hypothesis():
886 try:
889 try:
887 import hypothesis
890 import hypothesis
888
891
889 hypothesis.given
892 hypothesis.given
890 return True
893 return True
891 except ImportError:
894 except ImportError:
892 return False
895 return False
893
896
894
897
895 @check("unziplinks", "unzip(1) understands and extracts symlinks")
898 @check("unziplinks", "unzip(1) understands and extracts symlinks")
896 def unzip_understands_symlinks():
899 def unzip_understands_symlinks():
897 return matchoutput('unzip --help', br'Info-ZIP')
900 return matchoutput('unzip --help', br'Info-ZIP')
898
901
899
902
900 @check("zstd", "zstd Python module available")
903 @check("zstd", "zstd Python module available")
901 def has_zstd():
904 def has_zstd():
902 try:
905 try:
903 import mercurial.zstd
906 import mercurial.zstd
904
907
905 mercurial.zstd.__version__
908 mercurial.zstd.__version__
906 return True
909 return True
907 except ImportError:
910 except ImportError:
908 return False
911 return False
909
912
910
913
911 @check("devfull", "/dev/full special file")
914 @check("devfull", "/dev/full special file")
912 def has_dev_full():
915 def has_dev_full():
913 return os.path.exists('/dev/full')
916 return os.path.exists('/dev/full')
914
917
915
918
916 @check("ensurepip", "ensurepip module")
919 @check("ensurepip", "ensurepip module")
917 def has_ensurepip():
920 def has_ensurepip():
918 try:
921 try:
919 import ensurepip
922 import ensurepip
920
923
921 ensurepip.bootstrap
924 ensurepip.bootstrap
922 return True
925 return True
923 except ImportError:
926 except ImportError:
924 return False
927 return False
925
928
926
929
927 @check("virtualenv", "virtualenv support")
930 @check("virtualenv", "virtualenv support")
928 def has_virtualenv():
931 def has_virtualenv():
929 try:
932 try:
930 import virtualenv
933 import virtualenv
931
934
932 # --no-site-package became the default in 1.7 (Nov 2011), and the
935 # --no-site-package became the default in 1.7 (Nov 2011), and the
933 # argument was removed in 20.0 (Feb 2020). Rather than make the
936 # argument was removed in 20.0 (Feb 2020). Rather than make the
934 # script complicated, just ignore ancient versions.
937 # script complicated, just ignore ancient versions.
935 return int(virtualenv.__version__.split('.')[0]) > 1
938 return int(virtualenv.__version__.split('.')[0]) > 1
936 except (AttributeError, ImportError, IndexError):
939 except (AttributeError, ImportError, IndexError):
937 return False
940 return False
938
941
939
942
940 @check("fsmonitor", "running tests with fsmonitor")
943 @check("fsmonitor", "running tests with fsmonitor")
941 def has_fsmonitor():
944 def has_fsmonitor():
942 return 'HGFSMONITOR_TESTS' in os.environ
945 return 'HGFSMONITOR_TESTS' in os.environ
943
946
944
947
945 @check("fuzzywuzzy", "Fuzzy string matching library")
948 @check("fuzzywuzzy", "Fuzzy string matching library")
946 def has_fuzzywuzzy():
949 def has_fuzzywuzzy():
947 try:
950 try:
948 import fuzzywuzzy
951 import fuzzywuzzy
949
952
950 fuzzywuzzy.__version__
953 fuzzywuzzy.__version__
951 return True
954 return True
952 except ImportError:
955 except ImportError:
953 return False
956 return False
954
957
955
958
956 @check("clang-libfuzzer", "clang new enough to include libfuzzer")
959 @check("clang-libfuzzer", "clang new enough to include libfuzzer")
957 def has_clang_libfuzzer():
960 def has_clang_libfuzzer():
958 mat = matchoutput('clang --version', br'clang version (\d)')
961 mat = matchoutput('clang --version', br'clang version (\d)')
959 if mat:
962 if mat:
960 # libfuzzer is new in clang 6
963 # libfuzzer is new in clang 6
961 return int(mat.group(1)) > 5
964 return int(mat.group(1)) > 5
962 return False
965 return False
963
966
964
967
965 @check("clang-6.0", "clang 6.0 with version suffix (libfuzzer included)")
968 @check("clang-6.0", "clang 6.0 with version suffix (libfuzzer included)")
966 def has_clang60():
969 def has_clang60():
967 return matchoutput('clang-6.0 --version', br'clang version 6\.')
970 return matchoutput('clang-6.0 --version', br'clang version 6\.')
968
971
969
972
970 @check("xdiff", "xdiff algorithm")
973 @check("xdiff", "xdiff algorithm")
971 def has_xdiff():
974 def has_xdiff():
972 try:
975 try:
973 from mercurial import policy
976 from mercurial import policy
974
977
975 bdiff = policy.importmod('bdiff')
978 bdiff = policy.importmod('bdiff')
976 return bdiff.xdiffblocks(b'', b'') == [(0, 0, 0, 0)]
979 return bdiff.xdiffblocks(b'', b'') == [(0, 0, 0, 0)]
977 except (ImportError, AttributeError):
980 except (ImportError, AttributeError):
978 return False
981 return False
979
982
980
983
981 @check('extraextensions', 'whether tests are running with extra extensions')
984 @check('extraextensions', 'whether tests are running with extra extensions')
982 def has_extraextensions():
985 def has_extraextensions():
983 return 'HGTESTEXTRAEXTENSIONS' in os.environ
986 return 'HGTESTEXTRAEXTENSIONS' in os.environ
984
987
985
988
986 def getrepofeatures():
989 def getrepofeatures():
987 """Obtain set of repository features in use.
990 """Obtain set of repository features in use.
988
991
989 HGREPOFEATURES can be used to define or remove features. It contains
992 HGREPOFEATURES can be used to define or remove features. It contains
990 a space-delimited list of feature strings. Strings beginning with ``-``
993 a space-delimited list of feature strings. Strings beginning with ``-``
991 mean to remove.
994 mean to remove.
992 """
995 """
993 # Default list provided by core.
996 # Default list provided by core.
994 features = {
997 features = {
995 'bundlerepo',
998 'bundlerepo',
996 'revlogstore',
999 'revlogstore',
997 'fncache',
1000 'fncache',
998 }
1001 }
999
1002
1000 # Features that imply other features.
1003 # Features that imply other features.
1001 implies = {
1004 implies = {
1002 'simplestore': ['-revlogstore', '-bundlerepo', '-fncache'],
1005 'simplestore': ['-revlogstore', '-bundlerepo', '-fncache'],
1003 }
1006 }
1004
1007
1005 for override in os.environ.get('HGREPOFEATURES', '').split(' '):
1008 for override in os.environ.get('HGREPOFEATURES', '').split(' '):
1006 if not override:
1009 if not override:
1007 continue
1010 continue
1008
1011
1009 if override.startswith('-'):
1012 if override.startswith('-'):
1010 if override[1:] in features:
1013 if override[1:] in features:
1011 features.remove(override[1:])
1014 features.remove(override[1:])
1012 else:
1015 else:
1013 features.add(override)
1016 features.add(override)
1014
1017
1015 for imply in implies.get(override, []):
1018 for imply in implies.get(override, []):
1016 if imply.startswith('-'):
1019 if imply.startswith('-'):
1017 if imply[1:] in features:
1020 if imply[1:] in features:
1018 features.remove(imply[1:])
1021 features.remove(imply[1:])
1019 else:
1022 else:
1020 features.add(imply)
1023 features.add(imply)
1021
1024
1022 return features
1025 return features
1023
1026
1024
1027
1025 @check('reporevlogstore', 'repository using the default revlog store')
1028 @check('reporevlogstore', 'repository using the default revlog store')
1026 def has_reporevlogstore():
1029 def has_reporevlogstore():
1027 return 'revlogstore' in getrepofeatures()
1030 return 'revlogstore' in getrepofeatures()
1028
1031
1029
1032
1030 @check('reposimplestore', 'repository using simple storage extension')
1033 @check('reposimplestore', 'repository using simple storage extension')
1031 def has_reposimplestore():
1034 def has_reposimplestore():
1032 return 'simplestore' in getrepofeatures()
1035 return 'simplestore' in getrepofeatures()
1033
1036
1034
1037
1035 @check('repobundlerepo', 'whether we can open bundle files as repos')
1038 @check('repobundlerepo', 'whether we can open bundle files as repos')
1036 def has_repobundlerepo():
1039 def has_repobundlerepo():
1037 return 'bundlerepo' in getrepofeatures()
1040 return 'bundlerepo' in getrepofeatures()
1038
1041
1039
1042
1040 @check('repofncache', 'repository has an fncache')
1043 @check('repofncache', 'repository has an fncache')
1041 def has_repofncache():
1044 def has_repofncache():
1042 return 'fncache' in getrepofeatures()
1045 return 'fncache' in getrepofeatures()
1043
1046
1044
1047
1045 @check('sqlite', 'sqlite3 module and matching cli is available')
1048 @check('sqlite', 'sqlite3 module and matching cli is available')
1046 def has_sqlite():
1049 def has_sqlite():
1047 try:
1050 try:
1048 import sqlite3
1051 import sqlite3
1049
1052
1050 version = sqlite3.sqlite_version_info
1053 version = sqlite3.sqlite_version_info
1051 except ImportError:
1054 except ImportError:
1052 return False
1055 return False
1053
1056
1054 if version < (3, 8, 3):
1057 if version < (3, 8, 3):
1055 # WITH clause not supported
1058 # WITH clause not supported
1056 return False
1059 return False
1057
1060
1058 return matchoutput('sqlite3 -version', br'^3\.\d+')
1061 return matchoutput('sqlite3 -version', br'^3\.\d+')
1059
1062
1060
1063
1061 @check('vcr', 'vcr http mocking library (pytest-vcr)')
1064 @check('vcr', 'vcr http mocking library (pytest-vcr)')
1062 def has_vcr():
1065 def has_vcr():
1063 try:
1066 try:
1064 import vcr
1067 import vcr
1065
1068
1066 vcr.VCR
1069 vcr.VCR
1067 return True
1070 return True
1068 except (ImportError, AttributeError):
1071 except (ImportError, AttributeError):
1069 pass
1072 pass
1070 return False
1073 return False
1071
1074
1072
1075
1073 @check('emacs', 'GNU Emacs')
1076 @check('emacs', 'GNU Emacs')
1074 def has_emacs():
1077 def has_emacs():
1075 # Our emacs lisp uses `with-eval-after-load` which is new in emacs
1078 # Our emacs lisp uses `with-eval-after-load` which is new in emacs
1076 # 24.4, so we allow emacs 24.4, 24.5, and 25+ (24.5 was the last
1079 # 24.4, so we allow emacs 24.4, 24.5, and 25+ (24.5 was the last
1077 # 24 release)
1080 # 24 release)
1078 return matchoutput('emacs --version', b'GNU Emacs 2(4.4|4.5|5|6|7|8|9)')
1081 return matchoutput('emacs --version', b'GNU Emacs 2(4.4|4.5|5|6|7|8|9)')
1079
1082
1080
1083
1081 @check('black', 'the black formatter for python (>= 20.8b1)')
1084 @check('black', 'the black formatter for python (>= 20.8b1)')
1082 def has_black():
1085 def has_black():
1083 blackcmd = 'black --version'
1086 blackcmd = 'black --version'
1084 version_regex = b'black, version ([0-9a-b.]+)'
1087 version_regex = b'black, version ([0-9a-b.]+)'
1085 version = matchoutput(blackcmd, version_regex)
1088 version = matchoutput(blackcmd, version_regex)
1086 sv = distutils.version.StrictVersion
1089 sv = distutils.version.StrictVersion
1087 return version and sv(_bytes2sys(version.group(1))) >= sv('20.8b1')
1090 return version and sv(_bytes2sys(version.group(1))) >= sv('20.8b1')
1088
1091
1089
1092
1090 @check('pytype', 'the pytype type checker')
1093 @check('pytype', 'the pytype type checker')
1091 def has_pytype():
1094 def has_pytype():
1092 pytypecmd = 'pytype --version'
1095 pytypecmd = 'pytype --version'
1093 version = matchoutput(pytypecmd, b'[0-9a-b.]+')
1096 version = matchoutput(pytypecmd, b'[0-9a-b.]+')
1094 sv = distutils.version.StrictVersion
1097 sv = distutils.version.StrictVersion
1095 return version and sv(_bytes2sys(version.group(0))) >= sv('2019.10.17')
1098 return version and sv(_bytes2sys(version.group(0))) >= sv('2019.10.17')
1096
1099
1097
1100
1098 @check("rustfmt", "rustfmt tool at version nightly-2020-10-04")
1101 @check("rustfmt", "rustfmt tool at version nightly-2020-10-04")
1099 def has_rustfmt():
1102 def has_rustfmt():
1100 # We use Nightly's rustfmt due to current unstable config options.
1103 # We use Nightly's rustfmt due to current unstable config options.
1101 return matchoutput(
1104 return matchoutput(
1102 '`rustup which --toolchain nightly-2020-10-04 rustfmt` --version',
1105 '`rustup which --toolchain nightly-2020-10-04 rustfmt` --version',
1103 b'rustfmt',
1106 b'rustfmt',
1104 )
1107 )
1105
1108
1106
1109
1107 @check("cargo", "cargo tool")
1110 @check("cargo", "cargo tool")
1108 def has_cargo():
1111 def has_cargo():
1109 return matchoutput('`rustup which cargo` --version', b'cargo')
1112 return matchoutput('`rustup which cargo` --version', b'cargo')
1110
1113
1111
1114
1112 @check("lzma", "python lzma module")
1115 @check("lzma", "python lzma module")
1113 def has_lzma():
1116 def has_lzma():
1114 try:
1117 try:
1115 import _lzma
1118 import _lzma
1116
1119
1117 _lzma.FORMAT_XZ
1120 _lzma.FORMAT_XZ
1118 return True
1121 return True
1119 except ImportError:
1122 except ImportError:
1120 return False
1123 return False
@@ -1,3909 +1,3916 b''
1 #!/usr/bin/env python3
1 #!/usr/bin/env python3
2 #
2 #
3 # run-tests.py - Run a set of tests on Mercurial
3 # run-tests.py - Run a set of tests on Mercurial
4 #
4 #
5 # Copyright 2006 Olivia Mackall <olivia@selenic.com>
5 # Copyright 2006 Olivia Mackall <olivia@selenic.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 # Modifying this script is tricky because it has many modes:
10 # Modifying this script is tricky because it has many modes:
11 # - serial (default) vs parallel (-jN, N > 1)
11 # - serial (default) vs parallel (-jN, N > 1)
12 # - no coverage (default) vs coverage (-c, -C, -s)
12 # - no coverage (default) vs coverage (-c, -C, -s)
13 # - temp install (default) vs specific hg script (--with-hg, --local)
13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 # - tests are a mix of shell scripts and Python scripts
14 # - tests are a mix of shell scripts and Python scripts
15 #
15 #
16 # If you change this script, it is recommended that you ensure you
16 # If you change this script, it is recommended that you ensure you
17 # haven't broken it by running it in various modes with a representative
17 # haven't broken it by running it in various modes with a representative
18 # sample of test scripts. For example:
18 # sample of test scripts. For example:
19 #
19 #
20 # 1) serial, no coverage, temp install:
20 # 1) serial, no coverage, temp install:
21 # ./run-tests.py test-s*
21 # ./run-tests.py test-s*
22 # 2) serial, no coverage, local hg:
22 # 2) serial, no coverage, local hg:
23 # ./run-tests.py --local test-s*
23 # ./run-tests.py --local test-s*
24 # 3) serial, coverage, temp install:
24 # 3) serial, coverage, temp install:
25 # ./run-tests.py -c test-s*
25 # ./run-tests.py -c test-s*
26 # 4) serial, coverage, local hg:
26 # 4) serial, coverage, local hg:
27 # ./run-tests.py -c --local test-s* # unsupported
27 # ./run-tests.py -c --local test-s* # unsupported
28 # 5) parallel, no coverage, temp install:
28 # 5) parallel, no coverage, temp install:
29 # ./run-tests.py -j2 test-s*
29 # ./run-tests.py -j2 test-s*
30 # 6) parallel, no coverage, local hg:
30 # 6) parallel, no coverage, local hg:
31 # ./run-tests.py -j2 --local test-s*
31 # ./run-tests.py -j2 --local test-s*
32 # 7) parallel, coverage, temp install:
32 # 7) parallel, coverage, temp install:
33 # ./run-tests.py -j2 -c test-s* # currently broken
33 # ./run-tests.py -j2 -c test-s* # currently broken
34 # 8) parallel, coverage, local install:
34 # 8) parallel, coverage, local install:
35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 # 9) parallel, custom tmp dir:
36 # 9) parallel, custom tmp dir:
37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 # 10) parallel, pure, tests that call run-tests:
38 # 10) parallel, pure, tests that call run-tests:
39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
40 #
40 #
41 # (You could use any subset of the tests: test-s* happens to match
41 # (You could use any subset of the tests: test-s* happens to match
42 # enough that it's worth doing parallel runs, few enough that it
42 # enough that it's worth doing parallel runs, few enough that it
43 # completes fairly quickly, includes both shell and Python scripts, and
43 # completes fairly quickly, includes both shell and Python scripts, and
44 # includes some scripts that run daemon processes.)
44 # includes some scripts that run daemon processes.)
45
45
46 from __future__ import absolute_import, print_function
46 from __future__ import absolute_import, print_function
47
47
48 import argparse
48 import argparse
49 import collections
49 import collections
50 import contextlib
50 import contextlib
51 import difflib
51 import difflib
52 import distutils.version as version
52 import distutils.version as version
53 import errno
53 import errno
54 import json
54 import json
55 import multiprocessing
55 import multiprocessing
56 import os
56 import os
57 import platform
57 import platform
58 import random
58 import random
59 import re
59 import re
60 import shutil
60 import shutil
61 import signal
61 import signal
62 import socket
62 import socket
63 import subprocess
63 import subprocess
64 import sys
64 import sys
65 import sysconfig
65 import sysconfig
66 import tempfile
66 import tempfile
67 import threading
67 import threading
68 import time
68 import time
69 import unittest
69 import unittest
70 import uuid
70 import uuid
71 import xml.dom.minidom as minidom
71 import xml.dom.minidom as minidom
72
72
73 try:
73 try:
74 import Queue as queue
74 import Queue as queue
75 except ImportError:
75 except ImportError:
76 import queue
76 import queue
77
77
78 try:
78 try:
79 import shlex
79 import shlex
80
80
81 shellquote = shlex.quote
81 shellquote = shlex.quote
82 except (ImportError, AttributeError):
82 except (ImportError, AttributeError):
83 import pipes
83 import pipes
84
84
85 shellquote = pipes.quote
85 shellquote = pipes.quote
86
86
87 processlock = threading.Lock()
87 processlock = threading.Lock()
88
88
89 pygmentspresent = False
89 pygmentspresent = False
90 # ANSI color is unsupported prior to Windows 10
90 # ANSI color is unsupported prior to Windows 10
91 if os.name != 'nt':
91 if os.name != 'nt':
92 try: # is pygments installed
92 try: # is pygments installed
93 import pygments
93 import pygments
94 import pygments.lexers as lexers
94 import pygments.lexers as lexers
95 import pygments.lexer as lexer
95 import pygments.lexer as lexer
96 import pygments.formatters as formatters
96 import pygments.formatters as formatters
97 import pygments.token as token
97 import pygments.token as token
98 import pygments.style as style
98 import pygments.style as style
99
99
100 pygmentspresent = True
100 pygmentspresent = True
101 difflexer = lexers.DiffLexer()
101 difflexer = lexers.DiffLexer()
102 terminal256formatter = formatters.Terminal256Formatter()
102 terminal256formatter = formatters.Terminal256Formatter()
103 except ImportError:
103 except ImportError:
104 pass
104 pass
105
105
106 if pygmentspresent:
106 if pygmentspresent:
107
107
108 class TestRunnerStyle(style.Style):
108 class TestRunnerStyle(style.Style):
109 default_style = ""
109 default_style = ""
110 skipped = token.string_to_tokentype("Token.Generic.Skipped")
110 skipped = token.string_to_tokentype("Token.Generic.Skipped")
111 failed = token.string_to_tokentype("Token.Generic.Failed")
111 failed = token.string_to_tokentype("Token.Generic.Failed")
112 skippedname = token.string_to_tokentype("Token.Generic.SName")
112 skippedname = token.string_to_tokentype("Token.Generic.SName")
113 failedname = token.string_to_tokentype("Token.Generic.FName")
113 failedname = token.string_to_tokentype("Token.Generic.FName")
114 styles = {
114 styles = {
115 skipped: '#e5e5e5',
115 skipped: '#e5e5e5',
116 skippedname: '#00ffff',
116 skippedname: '#00ffff',
117 failed: '#7f0000',
117 failed: '#7f0000',
118 failedname: '#ff0000',
118 failedname: '#ff0000',
119 }
119 }
120
120
121 class TestRunnerLexer(lexer.RegexLexer):
121 class TestRunnerLexer(lexer.RegexLexer):
122 testpattern = r'[\w-]+\.(t|py)(#[a-zA-Z0-9_\-\.]+)?'
122 testpattern = r'[\w-]+\.(t|py)(#[a-zA-Z0-9_\-\.]+)?'
123 tokens = {
123 tokens = {
124 'root': [
124 'root': [
125 (r'^Skipped', token.Generic.Skipped, 'skipped'),
125 (r'^Skipped', token.Generic.Skipped, 'skipped'),
126 (r'^Failed ', token.Generic.Failed, 'failed'),
126 (r'^Failed ', token.Generic.Failed, 'failed'),
127 (r'^ERROR: ', token.Generic.Failed, 'failed'),
127 (r'^ERROR: ', token.Generic.Failed, 'failed'),
128 ],
128 ],
129 'skipped': [
129 'skipped': [
130 (testpattern, token.Generic.SName),
130 (testpattern, token.Generic.SName),
131 (r':.*', token.Generic.Skipped),
131 (r':.*', token.Generic.Skipped),
132 ],
132 ],
133 'failed': [
133 'failed': [
134 (testpattern, token.Generic.FName),
134 (testpattern, token.Generic.FName),
135 (r'(:| ).*', token.Generic.Failed),
135 (r'(:| ).*', token.Generic.Failed),
136 ],
136 ],
137 }
137 }
138
138
139 runnerformatter = formatters.Terminal256Formatter(style=TestRunnerStyle)
139 runnerformatter = formatters.Terminal256Formatter(style=TestRunnerStyle)
140 runnerlexer = TestRunnerLexer()
140 runnerlexer = TestRunnerLexer()
141
141
142 origenviron = os.environ.copy()
142 origenviron = os.environ.copy()
143
143
144 if sys.version_info > (3, 5, 0):
144 if sys.version_info > (3, 5, 0):
145 PYTHON3 = True
145 PYTHON3 = True
146 xrange = range # we use xrange in one place, and we'd rather not use range
146 xrange = range # we use xrange in one place, and we'd rather not use range
147
147
148 def _sys2bytes(p):
148 def _sys2bytes(p):
149 if p is None:
149 if p is None:
150 return p
150 return p
151 return p.encode('utf-8')
151 return p.encode('utf-8')
152
152
153 def _bytes2sys(p):
153 def _bytes2sys(p):
154 if p is None:
154 if p is None:
155 return p
155 return p
156 return p.decode('utf-8')
156 return p.decode('utf-8')
157
157
158 osenvironb = getattr(os, 'environb', None)
158 osenvironb = getattr(os, 'environb', None)
159 if osenvironb is None:
159 if osenvironb is None:
160 # Windows lacks os.environb, for instance. A proxy over the real thing
160 # Windows lacks os.environb, for instance. A proxy over the real thing
161 # instead of a copy allows the environment to be updated via bytes on
161 # instead of a copy allows the environment to be updated via bytes on
162 # all platforms.
162 # all platforms.
163 class environbytes(object):
163 class environbytes(object):
164 def __init__(self, strenv):
164 def __init__(self, strenv):
165 self.__len__ = strenv.__len__
165 self.__len__ = strenv.__len__
166 self.clear = strenv.clear
166 self.clear = strenv.clear
167 self._strenv = strenv
167 self._strenv = strenv
168
168
169 def __getitem__(self, k):
169 def __getitem__(self, k):
170 v = self._strenv.__getitem__(_bytes2sys(k))
170 v = self._strenv.__getitem__(_bytes2sys(k))
171 return _sys2bytes(v)
171 return _sys2bytes(v)
172
172
173 def __setitem__(self, k, v):
173 def __setitem__(self, k, v):
174 self._strenv.__setitem__(_bytes2sys(k), _bytes2sys(v))
174 self._strenv.__setitem__(_bytes2sys(k), _bytes2sys(v))
175
175
176 def __delitem__(self, k):
176 def __delitem__(self, k):
177 self._strenv.__delitem__(_bytes2sys(k))
177 self._strenv.__delitem__(_bytes2sys(k))
178
178
179 def __contains__(self, k):
179 def __contains__(self, k):
180 return self._strenv.__contains__(_bytes2sys(k))
180 return self._strenv.__contains__(_bytes2sys(k))
181
181
182 def __iter__(self):
182 def __iter__(self):
183 return iter([_sys2bytes(k) for k in iter(self._strenv)])
183 return iter([_sys2bytes(k) for k in iter(self._strenv)])
184
184
185 def get(self, k, default=None):
185 def get(self, k, default=None):
186 v = self._strenv.get(_bytes2sys(k), _bytes2sys(default))
186 v = self._strenv.get(_bytes2sys(k), _bytes2sys(default))
187 return _sys2bytes(v)
187 return _sys2bytes(v)
188
188
189 def pop(self, k, default=None):
189 def pop(self, k, default=None):
190 v = self._strenv.pop(_bytes2sys(k), _bytes2sys(default))
190 v = self._strenv.pop(_bytes2sys(k), _bytes2sys(default))
191 return _sys2bytes(v)
191 return _sys2bytes(v)
192
192
193 osenvironb = environbytes(os.environ)
193 osenvironb = environbytes(os.environ)
194
194
195 getcwdb = getattr(os, 'getcwdb')
195 getcwdb = getattr(os, 'getcwdb')
196 if not getcwdb or os.name == 'nt':
196 if not getcwdb or os.name == 'nt':
197 getcwdb = lambda: _sys2bytes(os.getcwd())
197 getcwdb = lambda: _sys2bytes(os.getcwd())
198
198
199 elif sys.version_info >= (3, 0, 0):
199 elif sys.version_info >= (3, 0, 0):
200 print(
200 print(
201 '%s is only supported on Python 3.5+ and 2.7, not %s'
201 '%s is only supported on Python 3.5+ and 2.7, not %s'
202 % (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3]))
202 % (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3]))
203 )
203 )
204 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
204 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
205 else:
205 else:
206 PYTHON3 = False
206 PYTHON3 = False
207
207
208 # In python 2.x, path operations are generally done using
208 # In python 2.x, path operations are generally done using
209 # bytestrings by default, so we don't have to do any extra
209 # bytestrings by default, so we don't have to do any extra
210 # fiddling there. We define the wrapper functions anyway just to
210 # fiddling there. We define the wrapper functions anyway just to
211 # help keep code consistent between platforms.
211 # help keep code consistent between platforms.
212 def _sys2bytes(p):
212 def _sys2bytes(p):
213 return p
213 return p
214
214
215 _bytes2sys = _sys2bytes
215 _bytes2sys = _sys2bytes
216 osenvironb = os.environ
216 osenvironb = os.environ
217 getcwdb = os.getcwd
217 getcwdb = os.getcwd
218
218
219 # For Windows support
219 # For Windows support
220 wifexited = getattr(os, "WIFEXITED", lambda x: False)
220 wifexited = getattr(os, "WIFEXITED", lambda x: False)
221
221
222 # Whether to use IPv6
222 # Whether to use IPv6
223 def checksocketfamily(name, port=20058):
223 def checksocketfamily(name, port=20058):
224 """return true if we can listen on localhost using family=name
224 """return true if we can listen on localhost using family=name
225
225
226 name should be either 'AF_INET', or 'AF_INET6'.
226 name should be either 'AF_INET', or 'AF_INET6'.
227 port being used is okay - EADDRINUSE is considered as successful.
227 port being used is okay - EADDRINUSE is considered as successful.
228 """
228 """
229 family = getattr(socket, name, None)
229 family = getattr(socket, name, None)
230 if family is None:
230 if family is None:
231 return False
231 return False
232 try:
232 try:
233 s = socket.socket(family, socket.SOCK_STREAM)
233 s = socket.socket(family, socket.SOCK_STREAM)
234 s.bind(('localhost', port))
234 s.bind(('localhost', port))
235 s.close()
235 s.close()
236 return True
236 return True
237 except socket.error as exc:
237 except socket.error as exc:
238 if exc.errno == errno.EADDRINUSE:
238 if exc.errno == errno.EADDRINUSE:
239 return True
239 return True
240 elif exc.errno in (errno.EADDRNOTAVAIL, errno.EPROTONOSUPPORT):
240 elif exc.errno in (errno.EADDRNOTAVAIL, errno.EPROTONOSUPPORT):
241 return False
241 return False
242 else:
242 else:
243 raise
243 raise
244 else:
244 else:
245 return False
245 return False
246
246
247
247
248 # useipv6 will be set by parseargs
248 # useipv6 will be set by parseargs
249 useipv6 = None
249 useipv6 = None
250
250
251
251
252 def checkportisavailable(port):
252 def checkportisavailable(port):
253 """return true if a port seems free to bind on localhost"""
253 """return true if a port seems free to bind on localhost"""
254 if useipv6:
254 if useipv6:
255 family = socket.AF_INET6
255 family = socket.AF_INET6
256 else:
256 else:
257 family = socket.AF_INET
257 family = socket.AF_INET
258 try:
258 try:
259 with contextlib.closing(socket.socket(family, socket.SOCK_STREAM)) as s:
259 with contextlib.closing(socket.socket(family, socket.SOCK_STREAM)) as s:
260 s.bind(('localhost', port))
260 s.bind(('localhost', port))
261 return True
261 return True
262 except socket.error as exc:
262 except socket.error as exc:
263 if os.name == 'nt' and exc.errno == errno.WSAEACCES:
263 if os.name == 'nt' and exc.errno == errno.WSAEACCES:
264 return False
264 return False
265 elif exc.errno not in (
265 elif PYTHON3:
266 # TODO: make a proper exception handler after dropping py2. This
267 # works because socket.error is an alias for OSError on py3,
268 # which is also the baseclass of PermissionError.
269 if isinstance(exc, PermissionError):
270 return False
271 if exc.errno not in (
266 errno.EADDRINUSE,
272 errno.EADDRINUSE,
267 errno.EADDRNOTAVAIL,
273 errno.EADDRNOTAVAIL,
268 errno.EPROTONOSUPPORT,
274 errno.EPROTONOSUPPORT,
269 ):
275 ):
270 raise
276 raise
271 return False
277 return False
272
278
273
279
274 closefds = os.name == 'posix'
280 closefds = os.name == 'posix'
275
281
276
282
277 def Popen4(cmd, wd, timeout, env=None):
283 def Popen4(cmd, wd, timeout, env=None):
278 processlock.acquire()
284 processlock.acquire()
279 p = subprocess.Popen(
285 p = subprocess.Popen(
280 _bytes2sys(cmd),
286 _bytes2sys(cmd),
281 shell=True,
287 shell=True,
282 bufsize=-1,
288 bufsize=-1,
283 cwd=_bytes2sys(wd),
289 cwd=_bytes2sys(wd),
284 env=env,
290 env=env,
285 close_fds=closefds,
291 close_fds=closefds,
286 stdin=subprocess.PIPE,
292 stdin=subprocess.PIPE,
287 stdout=subprocess.PIPE,
293 stdout=subprocess.PIPE,
288 stderr=subprocess.STDOUT,
294 stderr=subprocess.STDOUT,
289 )
295 )
290 processlock.release()
296 processlock.release()
291
297
292 p.fromchild = p.stdout
298 p.fromchild = p.stdout
293 p.tochild = p.stdin
299 p.tochild = p.stdin
294 p.childerr = p.stderr
300 p.childerr = p.stderr
295
301
296 p.timeout = False
302 p.timeout = False
297 if timeout:
303 if timeout:
298
304
299 def t():
305 def t():
300 start = time.time()
306 start = time.time()
301 while time.time() - start < timeout and p.returncode is None:
307 while time.time() - start < timeout and p.returncode is None:
302 time.sleep(0.1)
308 time.sleep(0.1)
303 p.timeout = True
309 p.timeout = True
304 vlog('# Timout reached for process %d' % p.pid)
310 vlog('# Timout reached for process %d' % p.pid)
305 if p.returncode is None:
311 if p.returncode is None:
306 terminate(p)
312 terminate(p)
307
313
308 threading.Thread(target=t).start()
314 threading.Thread(target=t).start()
309
315
310 return p
316 return p
311
317
312
318
313 if sys.executable:
319 if sys.executable:
314 sysexecutable = sys.executable
320 sysexecutable = sys.executable
315 elif os.environ.get('PYTHONEXECUTABLE'):
321 elif os.environ.get('PYTHONEXECUTABLE'):
316 sysexecutable = os.environ['PYTHONEXECUTABLE']
322 sysexecutable = os.environ['PYTHONEXECUTABLE']
317 elif os.environ.get('PYTHON'):
323 elif os.environ.get('PYTHON'):
318 sysexecutable = os.environ['PYTHON']
324 sysexecutable = os.environ['PYTHON']
319 else:
325 else:
320 raise AssertionError('Could not find Python interpreter')
326 raise AssertionError('Could not find Python interpreter')
321
327
322 PYTHON = _sys2bytes(sysexecutable.replace('\\', '/'))
328 PYTHON = _sys2bytes(sysexecutable.replace('\\', '/'))
323 IMPL_PATH = b'PYTHONPATH'
329 IMPL_PATH = b'PYTHONPATH'
324 if 'java' in sys.platform:
330 if 'java' in sys.platform:
325 IMPL_PATH = b'JYTHONPATH'
331 IMPL_PATH = b'JYTHONPATH'
326
332
327 default_defaults = {
333 default_defaults = {
328 'jobs': ('HGTEST_JOBS', multiprocessing.cpu_count()),
334 'jobs': ('HGTEST_JOBS', multiprocessing.cpu_count()),
329 'timeout': ('HGTEST_TIMEOUT', 360),
335 'timeout': ('HGTEST_TIMEOUT', 360),
330 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 1500),
336 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 1500),
331 'port': ('HGTEST_PORT', 20059),
337 'port': ('HGTEST_PORT', 20059),
332 'shell': ('HGTEST_SHELL', 'sh'),
338 'shell': ('HGTEST_SHELL', 'sh'),
333 }
339 }
334
340
335 defaults = default_defaults.copy()
341 defaults = default_defaults.copy()
336
342
337
343
338 def canonpath(path):
344 def canonpath(path):
339 return os.path.realpath(os.path.expanduser(path))
345 return os.path.realpath(os.path.expanduser(path))
340
346
341
347
342 def parselistfiles(files, listtype, warn=True):
348 def parselistfiles(files, listtype, warn=True):
343 entries = dict()
349 entries = dict()
344 for filename in files:
350 for filename in files:
345 try:
351 try:
346 path = os.path.expanduser(os.path.expandvars(filename))
352 path = os.path.expanduser(os.path.expandvars(filename))
347 f = open(path, "rb")
353 f = open(path, "rb")
348 except IOError as err:
354 except IOError as err:
349 if err.errno != errno.ENOENT:
355 if err.errno != errno.ENOENT:
350 raise
356 raise
351 if warn:
357 if warn:
352 print("warning: no such %s file: %s" % (listtype, filename))
358 print("warning: no such %s file: %s" % (listtype, filename))
353 continue
359 continue
354
360
355 for line in f.readlines():
361 for line in f.readlines():
356 line = line.split(b'#', 1)[0].strip()
362 line = line.split(b'#', 1)[0].strip()
357 if line:
363 if line:
358 entries[line] = filename
364 # Ensure path entries are compatible with os.path.relpath()
365 entries[os.path.normpath(line)] = filename
359
366
360 f.close()
367 f.close()
361 return entries
368 return entries
362
369
363
370
364 def parsettestcases(path):
371 def parsettestcases(path):
365 """read a .t test file, return a set of test case names
372 """read a .t test file, return a set of test case names
366
373
367 If path does not exist, return an empty set.
374 If path does not exist, return an empty set.
368 """
375 """
369 cases = []
376 cases = []
370 try:
377 try:
371 with open(path, 'rb') as f:
378 with open(path, 'rb') as f:
372 for l in f:
379 for l in f:
373 if l.startswith(b'#testcases '):
380 if l.startswith(b'#testcases '):
374 cases.append(sorted(l[11:].split()))
381 cases.append(sorted(l[11:].split()))
375 except IOError as ex:
382 except IOError as ex:
376 if ex.errno != errno.ENOENT:
383 if ex.errno != errno.ENOENT:
377 raise
384 raise
378 return cases
385 return cases
379
386
380
387
381 def getparser():
388 def getparser():
382 """Obtain the OptionParser used by the CLI."""
389 """Obtain the OptionParser used by the CLI."""
383 parser = argparse.ArgumentParser(usage='%(prog)s [options] [tests]')
390 parser = argparse.ArgumentParser(usage='%(prog)s [options] [tests]')
384
391
385 selection = parser.add_argument_group('Test Selection')
392 selection = parser.add_argument_group('Test Selection')
386 selection.add_argument(
393 selection.add_argument(
387 '--allow-slow-tests',
394 '--allow-slow-tests',
388 action='store_true',
395 action='store_true',
389 help='allow extremely slow tests',
396 help='allow extremely slow tests',
390 )
397 )
391 selection.add_argument(
398 selection.add_argument(
392 "--blacklist",
399 "--blacklist",
393 action="append",
400 action="append",
394 help="skip tests listed in the specified blacklist file",
401 help="skip tests listed in the specified blacklist file",
395 )
402 )
396 selection.add_argument(
403 selection.add_argument(
397 "--changed",
404 "--changed",
398 help="run tests that are changed in parent rev or working directory",
405 help="run tests that are changed in parent rev or working directory",
399 )
406 )
400 selection.add_argument(
407 selection.add_argument(
401 "-k", "--keywords", help="run tests matching keywords"
408 "-k", "--keywords", help="run tests matching keywords"
402 )
409 )
403 selection.add_argument(
410 selection.add_argument(
404 "-r", "--retest", action="store_true", help="retest failed tests"
411 "-r", "--retest", action="store_true", help="retest failed tests"
405 )
412 )
406 selection.add_argument(
413 selection.add_argument(
407 "--test-list",
414 "--test-list",
408 action="append",
415 action="append",
409 help="read tests to run from the specified file",
416 help="read tests to run from the specified file",
410 )
417 )
411 selection.add_argument(
418 selection.add_argument(
412 "--whitelist",
419 "--whitelist",
413 action="append",
420 action="append",
414 help="always run tests listed in the specified whitelist file",
421 help="always run tests listed in the specified whitelist file",
415 )
422 )
416 selection.add_argument(
423 selection.add_argument(
417 'tests', metavar='TESTS', nargs='*', help='Tests to run'
424 'tests', metavar='TESTS', nargs='*', help='Tests to run'
418 )
425 )
419
426
420 harness = parser.add_argument_group('Test Harness Behavior')
427 harness = parser.add_argument_group('Test Harness Behavior')
421 harness.add_argument(
428 harness.add_argument(
422 '--bisect-repo',
429 '--bisect-repo',
423 metavar='bisect_repo',
430 metavar='bisect_repo',
424 help=(
431 help=(
425 "Path of a repo to bisect. Use together with " "--known-good-rev"
432 "Path of a repo to bisect. Use together with " "--known-good-rev"
426 ),
433 ),
427 )
434 )
428 harness.add_argument(
435 harness.add_argument(
429 "-d",
436 "-d",
430 "--debug",
437 "--debug",
431 action="store_true",
438 action="store_true",
432 help="debug mode: write output of test scripts to console"
439 help="debug mode: write output of test scripts to console"
433 " rather than capturing and diffing it (disables timeout)",
440 " rather than capturing and diffing it (disables timeout)",
434 )
441 )
435 harness.add_argument(
442 harness.add_argument(
436 "-f",
443 "-f",
437 "--first",
444 "--first",
438 action="store_true",
445 action="store_true",
439 help="exit on the first test failure",
446 help="exit on the first test failure",
440 )
447 )
441 harness.add_argument(
448 harness.add_argument(
442 "-i",
449 "-i",
443 "--interactive",
450 "--interactive",
444 action="store_true",
451 action="store_true",
445 help="prompt to accept changed output",
452 help="prompt to accept changed output",
446 )
453 )
447 harness.add_argument(
454 harness.add_argument(
448 "-j",
455 "-j",
449 "--jobs",
456 "--jobs",
450 type=int,
457 type=int,
451 help="number of jobs to run in parallel"
458 help="number of jobs to run in parallel"
452 " (default: $%s or %d)" % defaults['jobs'],
459 " (default: $%s or %d)" % defaults['jobs'],
453 )
460 )
454 harness.add_argument(
461 harness.add_argument(
455 "--keep-tmpdir",
462 "--keep-tmpdir",
456 action="store_true",
463 action="store_true",
457 help="keep temporary directory after running tests",
464 help="keep temporary directory after running tests",
458 )
465 )
459 harness.add_argument(
466 harness.add_argument(
460 '--known-good-rev',
467 '--known-good-rev',
461 metavar="known_good_rev",
468 metavar="known_good_rev",
462 help=(
469 help=(
463 "Automatically bisect any failures using this "
470 "Automatically bisect any failures using this "
464 "revision as a known-good revision."
471 "revision as a known-good revision."
465 ),
472 ),
466 )
473 )
467 harness.add_argument(
474 harness.add_argument(
468 "--list-tests",
475 "--list-tests",
469 action="store_true",
476 action="store_true",
470 help="list tests instead of running them",
477 help="list tests instead of running them",
471 )
478 )
472 harness.add_argument(
479 harness.add_argument(
473 "--loop", action="store_true", help="loop tests repeatedly"
480 "--loop", action="store_true", help="loop tests repeatedly"
474 )
481 )
475 harness.add_argument(
482 harness.add_argument(
476 '--random', action="store_true", help='run tests in random order'
483 '--random', action="store_true", help='run tests in random order'
477 )
484 )
478 harness.add_argument(
485 harness.add_argument(
479 '--order-by-runtime',
486 '--order-by-runtime',
480 action="store_true",
487 action="store_true",
481 help='run slowest tests first, according to .testtimes',
488 help='run slowest tests first, according to .testtimes',
482 )
489 )
483 harness.add_argument(
490 harness.add_argument(
484 "-p",
491 "-p",
485 "--port",
492 "--port",
486 type=int,
493 type=int,
487 help="port on which servers should listen"
494 help="port on which servers should listen"
488 " (default: $%s or %d)" % defaults['port'],
495 " (default: $%s or %d)" % defaults['port'],
489 )
496 )
490 harness.add_argument(
497 harness.add_argument(
491 '--profile-runner',
498 '--profile-runner',
492 action='store_true',
499 action='store_true',
493 help='run statprof on run-tests',
500 help='run statprof on run-tests',
494 )
501 )
495 harness.add_argument(
502 harness.add_argument(
496 "-R", "--restart", action="store_true", help="restart at last error"
503 "-R", "--restart", action="store_true", help="restart at last error"
497 )
504 )
498 harness.add_argument(
505 harness.add_argument(
499 "--runs-per-test",
506 "--runs-per-test",
500 type=int,
507 type=int,
501 dest="runs_per_test",
508 dest="runs_per_test",
502 help="run each test N times (default=1)",
509 help="run each test N times (default=1)",
503 default=1,
510 default=1,
504 )
511 )
505 harness.add_argument(
512 harness.add_argument(
506 "--shell", help="shell to use (default: $%s or %s)" % defaults['shell']
513 "--shell", help="shell to use (default: $%s or %s)" % defaults['shell']
507 )
514 )
508 harness.add_argument(
515 harness.add_argument(
509 '--showchannels', action='store_true', help='show scheduling channels'
516 '--showchannels', action='store_true', help='show scheduling channels'
510 )
517 )
511 harness.add_argument(
518 harness.add_argument(
512 "--slowtimeout",
519 "--slowtimeout",
513 type=int,
520 type=int,
514 help="kill errant slow tests after SLOWTIMEOUT seconds"
521 help="kill errant slow tests after SLOWTIMEOUT seconds"
515 " (default: $%s or %d)" % defaults['slowtimeout'],
522 " (default: $%s or %d)" % defaults['slowtimeout'],
516 )
523 )
517 harness.add_argument(
524 harness.add_argument(
518 "-t",
525 "-t",
519 "--timeout",
526 "--timeout",
520 type=int,
527 type=int,
521 help="kill errant tests after TIMEOUT seconds"
528 help="kill errant tests after TIMEOUT seconds"
522 " (default: $%s or %d)" % defaults['timeout'],
529 " (default: $%s or %d)" % defaults['timeout'],
523 )
530 )
524 harness.add_argument(
531 harness.add_argument(
525 "--tmpdir",
532 "--tmpdir",
526 help="run tests in the given temporary directory"
533 help="run tests in the given temporary directory"
527 " (implies --keep-tmpdir)",
534 " (implies --keep-tmpdir)",
528 )
535 )
529 harness.add_argument(
536 harness.add_argument(
530 "-v", "--verbose", action="store_true", help="output verbose messages"
537 "-v", "--verbose", action="store_true", help="output verbose messages"
531 )
538 )
532
539
533 hgconf = parser.add_argument_group('Mercurial Configuration')
540 hgconf = parser.add_argument_group('Mercurial Configuration')
534 hgconf.add_argument(
541 hgconf.add_argument(
535 "--chg",
542 "--chg",
536 action="store_true",
543 action="store_true",
537 help="install and use chg wrapper in place of hg",
544 help="install and use chg wrapper in place of hg",
538 )
545 )
539 hgconf.add_argument(
546 hgconf.add_argument(
540 "--chg-debug",
547 "--chg-debug",
541 action="store_true",
548 action="store_true",
542 help="show chg debug logs",
549 help="show chg debug logs",
543 )
550 )
544 hgconf.add_argument(
551 hgconf.add_argument(
545 "--rhg",
552 "--rhg",
546 action="store_true",
553 action="store_true",
547 help="install and use rhg Rust implementation in place of hg",
554 help="install and use rhg Rust implementation in place of hg",
548 )
555 )
549 hgconf.add_argument("--compiler", help="compiler to build with")
556 hgconf.add_argument("--compiler", help="compiler to build with")
550 hgconf.add_argument(
557 hgconf.add_argument(
551 '--extra-config-opt',
558 '--extra-config-opt',
552 action="append",
559 action="append",
553 default=[],
560 default=[],
554 help='set the given config opt in the test hgrc',
561 help='set the given config opt in the test hgrc',
555 )
562 )
556 hgconf.add_argument(
563 hgconf.add_argument(
557 "-l",
564 "-l",
558 "--local",
565 "--local",
559 action="store_true",
566 action="store_true",
560 help="shortcut for --with-hg=<testdir>/../hg, "
567 help="shortcut for --with-hg=<testdir>/../hg, "
561 "--with-rhg=<testdir>/../rust/target/release/rhg if --rhg is set, "
568 "--with-rhg=<testdir>/../rust/target/release/rhg if --rhg is set, "
562 "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set",
569 "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set",
563 )
570 )
564 hgconf.add_argument(
571 hgconf.add_argument(
565 "--ipv6",
572 "--ipv6",
566 action="store_true",
573 action="store_true",
567 help="prefer IPv6 to IPv4 for network related tests",
574 help="prefer IPv6 to IPv4 for network related tests",
568 )
575 )
569 hgconf.add_argument(
576 hgconf.add_argument(
570 "--pure",
577 "--pure",
571 action="store_true",
578 action="store_true",
572 help="use pure Python code instead of C extensions",
579 help="use pure Python code instead of C extensions",
573 )
580 )
574 hgconf.add_argument(
581 hgconf.add_argument(
575 "--rust",
582 "--rust",
576 action="store_true",
583 action="store_true",
577 help="use Rust code alongside C extensions",
584 help="use Rust code alongside C extensions",
578 )
585 )
579 hgconf.add_argument(
586 hgconf.add_argument(
580 "--no-rust",
587 "--no-rust",
581 action="store_true",
588 action="store_true",
582 help="do not use Rust code even if compiled",
589 help="do not use Rust code even if compiled",
583 )
590 )
584 hgconf.add_argument(
591 hgconf.add_argument(
585 "--with-chg",
592 "--with-chg",
586 metavar="CHG",
593 metavar="CHG",
587 help="use specified chg wrapper in place of hg",
594 help="use specified chg wrapper in place of hg",
588 )
595 )
589 hgconf.add_argument(
596 hgconf.add_argument(
590 "--with-rhg",
597 "--with-rhg",
591 metavar="RHG",
598 metavar="RHG",
592 help="use specified rhg Rust implementation in place of hg",
599 help="use specified rhg Rust implementation in place of hg",
593 )
600 )
594 hgconf.add_argument(
601 hgconf.add_argument(
595 "--with-hg",
602 "--with-hg",
596 metavar="HG",
603 metavar="HG",
597 help="test using specified hg script rather than a "
604 help="test using specified hg script rather than a "
598 "temporary installation",
605 "temporary installation",
599 )
606 )
600
607
601 reporting = parser.add_argument_group('Results Reporting')
608 reporting = parser.add_argument_group('Results Reporting')
602 reporting.add_argument(
609 reporting.add_argument(
603 "-C",
610 "-C",
604 "--annotate",
611 "--annotate",
605 action="store_true",
612 action="store_true",
606 help="output files annotated with coverage",
613 help="output files annotated with coverage",
607 )
614 )
608 reporting.add_argument(
615 reporting.add_argument(
609 "--color",
616 "--color",
610 choices=["always", "auto", "never"],
617 choices=["always", "auto", "never"],
611 default=os.environ.get('HGRUNTESTSCOLOR', 'auto'),
618 default=os.environ.get('HGRUNTESTSCOLOR', 'auto'),
612 help="colorisation: always|auto|never (default: auto)",
619 help="colorisation: always|auto|never (default: auto)",
613 )
620 )
614 reporting.add_argument(
621 reporting.add_argument(
615 "-c",
622 "-c",
616 "--cover",
623 "--cover",
617 action="store_true",
624 action="store_true",
618 help="print a test coverage report",
625 help="print a test coverage report",
619 )
626 )
620 reporting.add_argument(
627 reporting.add_argument(
621 '--exceptions',
628 '--exceptions',
622 action='store_true',
629 action='store_true',
623 help='log all exceptions and generate an exception report',
630 help='log all exceptions and generate an exception report',
624 )
631 )
625 reporting.add_argument(
632 reporting.add_argument(
626 "-H",
633 "-H",
627 "--htmlcov",
634 "--htmlcov",
628 action="store_true",
635 action="store_true",
629 help="create an HTML report of the coverage of the files",
636 help="create an HTML report of the coverage of the files",
630 )
637 )
631 reporting.add_argument(
638 reporting.add_argument(
632 "--json",
639 "--json",
633 action="store_true",
640 action="store_true",
634 help="store test result data in 'report.json' file",
641 help="store test result data in 'report.json' file",
635 )
642 )
636 reporting.add_argument(
643 reporting.add_argument(
637 "--outputdir",
644 "--outputdir",
638 help="directory to write error logs to (default=test directory)",
645 help="directory to write error logs to (default=test directory)",
639 )
646 )
640 reporting.add_argument(
647 reporting.add_argument(
641 "-n", "--nodiff", action="store_true", help="skip showing test changes"
648 "-n", "--nodiff", action="store_true", help="skip showing test changes"
642 )
649 )
643 reporting.add_argument(
650 reporting.add_argument(
644 "-S",
651 "-S",
645 "--noskips",
652 "--noskips",
646 action="store_true",
653 action="store_true",
647 help="don't report skip tests verbosely",
654 help="don't report skip tests verbosely",
648 )
655 )
649 reporting.add_argument(
656 reporting.add_argument(
650 "--time", action="store_true", help="time how long each test takes"
657 "--time", action="store_true", help="time how long each test takes"
651 )
658 )
652 reporting.add_argument("--view", help="external diff viewer")
659 reporting.add_argument("--view", help="external diff viewer")
653 reporting.add_argument(
660 reporting.add_argument(
654 "--xunit", help="record xunit results at specified path"
661 "--xunit", help="record xunit results at specified path"
655 )
662 )
656
663
657 for option, (envvar, default) in defaults.items():
664 for option, (envvar, default) in defaults.items():
658 defaults[option] = type(default)(os.environ.get(envvar, default))
665 defaults[option] = type(default)(os.environ.get(envvar, default))
659 parser.set_defaults(**defaults)
666 parser.set_defaults(**defaults)
660
667
661 return parser
668 return parser
662
669
663
670
664 def parseargs(args, parser):
671 def parseargs(args, parser):
665 """Parse arguments with our OptionParser and validate results."""
672 """Parse arguments with our OptionParser and validate results."""
666 options = parser.parse_args(args)
673 options = parser.parse_args(args)
667
674
668 # jython is always pure
675 # jython is always pure
669 if 'java' in sys.platform or '__pypy__' in sys.modules:
676 if 'java' in sys.platform or '__pypy__' in sys.modules:
670 options.pure = True
677 options.pure = True
671
678
672 if platform.python_implementation() != 'CPython' and options.rust:
679 if platform.python_implementation() != 'CPython' and options.rust:
673 parser.error('Rust extensions are only available with CPython')
680 parser.error('Rust extensions are only available with CPython')
674
681
675 if options.pure and options.rust:
682 if options.pure and options.rust:
676 parser.error('--rust cannot be used with --pure')
683 parser.error('--rust cannot be used with --pure')
677
684
678 if options.rust and options.no_rust:
685 if options.rust and options.no_rust:
679 parser.error('--rust cannot be used with --no-rust')
686 parser.error('--rust cannot be used with --no-rust')
680
687
681 if options.local:
688 if options.local:
682 if options.with_hg or options.with_rhg or options.with_chg:
689 if options.with_hg or options.with_rhg or options.with_chg:
683 parser.error(
690 parser.error(
684 '--local cannot be used with --with-hg or --with-rhg or --with-chg'
691 '--local cannot be used with --with-hg or --with-rhg or --with-chg'
685 )
692 )
686 testdir = os.path.dirname(_sys2bytes(canonpath(sys.argv[0])))
693 testdir = os.path.dirname(_sys2bytes(canonpath(sys.argv[0])))
687 reporootdir = os.path.dirname(testdir)
694 reporootdir = os.path.dirname(testdir)
688 pathandattrs = [(b'hg', 'with_hg')]
695 pathandattrs = [(b'hg', 'with_hg')]
689 if options.chg:
696 if options.chg:
690 pathandattrs.append((b'contrib/chg/chg', 'with_chg'))
697 pathandattrs.append((b'contrib/chg/chg', 'with_chg'))
691 if options.rhg:
698 if options.rhg:
692 pathandattrs.append((b'rust/target/release/rhg', 'with_rhg'))
699 pathandattrs.append((b'rust/target/release/rhg', 'with_rhg'))
693 for relpath, attr in pathandattrs:
700 for relpath, attr in pathandattrs:
694 binpath = os.path.join(reporootdir, relpath)
701 binpath = os.path.join(reporootdir, relpath)
695 if os.name != 'nt' and not os.access(binpath, os.X_OK):
702 if os.name != 'nt' and not os.access(binpath, os.X_OK):
696 parser.error(
703 parser.error(
697 '--local specified, but %r not found or '
704 '--local specified, but %r not found or '
698 'not executable' % binpath
705 'not executable' % binpath
699 )
706 )
700 setattr(options, attr, _bytes2sys(binpath))
707 setattr(options, attr, _bytes2sys(binpath))
701
708
702 if options.with_hg:
709 if options.with_hg:
703 options.with_hg = canonpath(_sys2bytes(options.with_hg))
710 options.with_hg = canonpath(_sys2bytes(options.with_hg))
704 if not (
711 if not (
705 os.path.isfile(options.with_hg)
712 os.path.isfile(options.with_hg)
706 and os.access(options.with_hg, os.X_OK)
713 and os.access(options.with_hg, os.X_OK)
707 ):
714 ):
708 parser.error('--with-hg must specify an executable hg script')
715 parser.error('--with-hg must specify an executable hg script')
709 if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']:
716 if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']:
710 sys.stderr.write('warning: --with-hg should specify an hg script\n')
717 sys.stderr.write('warning: --with-hg should specify an hg script\n')
711 sys.stderr.flush()
718 sys.stderr.flush()
712
719
713 if (options.chg or options.with_chg) and os.name == 'nt':
720 if (options.chg or options.with_chg) and os.name == 'nt':
714 parser.error('chg does not work on %s' % os.name)
721 parser.error('chg does not work on %s' % os.name)
715 if (options.rhg or options.with_rhg) and os.name == 'nt':
722 if (options.rhg or options.with_rhg) and os.name == 'nt':
716 parser.error('rhg does not work on %s' % os.name)
723 parser.error('rhg does not work on %s' % os.name)
717 if options.with_chg:
724 if options.with_chg:
718 options.chg = False # no installation to temporary location
725 options.chg = False # no installation to temporary location
719 options.with_chg = canonpath(_sys2bytes(options.with_chg))
726 options.with_chg = canonpath(_sys2bytes(options.with_chg))
720 if not (
727 if not (
721 os.path.isfile(options.with_chg)
728 os.path.isfile(options.with_chg)
722 and os.access(options.with_chg, os.X_OK)
729 and os.access(options.with_chg, os.X_OK)
723 ):
730 ):
724 parser.error('--with-chg must specify a chg executable')
731 parser.error('--with-chg must specify a chg executable')
725 if options.with_rhg:
732 if options.with_rhg:
726 options.rhg = False # no installation to temporary location
733 options.rhg = False # no installation to temporary location
727 options.with_rhg = canonpath(_sys2bytes(options.with_rhg))
734 options.with_rhg = canonpath(_sys2bytes(options.with_rhg))
728 if not (
735 if not (
729 os.path.isfile(options.with_rhg)
736 os.path.isfile(options.with_rhg)
730 and os.access(options.with_rhg, os.X_OK)
737 and os.access(options.with_rhg, os.X_OK)
731 ):
738 ):
732 parser.error('--with-rhg must specify a rhg executable')
739 parser.error('--with-rhg must specify a rhg executable')
733 if options.chg and options.with_hg:
740 if options.chg and options.with_hg:
734 # chg shares installation location with hg
741 # chg shares installation location with hg
735 parser.error(
742 parser.error(
736 '--chg does not work when --with-hg is specified '
743 '--chg does not work when --with-hg is specified '
737 '(use --with-chg instead)'
744 '(use --with-chg instead)'
738 )
745 )
739 if options.rhg and options.with_hg:
746 if options.rhg and options.with_hg:
740 # rhg shares installation location with hg
747 # rhg shares installation location with hg
741 parser.error(
748 parser.error(
742 '--rhg does not work when --with-hg is specified '
749 '--rhg does not work when --with-hg is specified '
743 '(use --with-rhg instead)'
750 '(use --with-rhg instead)'
744 )
751 )
745 if options.rhg and options.chg:
752 if options.rhg and options.chg:
746 parser.error('--rhg and --chg do not work together')
753 parser.error('--rhg and --chg do not work together')
747
754
748 if options.color == 'always' and not pygmentspresent:
755 if options.color == 'always' and not pygmentspresent:
749 sys.stderr.write(
756 sys.stderr.write(
750 'warning: --color=always ignored because '
757 'warning: --color=always ignored because '
751 'pygments is not installed\n'
758 'pygments is not installed\n'
752 )
759 )
753
760
754 if options.bisect_repo and not options.known_good_rev:
761 if options.bisect_repo and not options.known_good_rev:
755 parser.error("--bisect-repo cannot be used without --known-good-rev")
762 parser.error("--bisect-repo cannot be used without --known-good-rev")
756
763
757 global useipv6
764 global useipv6
758 if options.ipv6:
765 if options.ipv6:
759 useipv6 = checksocketfamily('AF_INET6')
766 useipv6 = checksocketfamily('AF_INET6')
760 else:
767 else:
761 # only use IPv6 if IPv4 is unavailable and IPv6 is available
768 # only use IPv6 if IPv4 is unavailable and IPv6 is available
762 useipv6 = (not checksocketfamily('AF_INET')) and checksocketfamily(
769 useipv6 = (not checksocketfamily('AF_INET')) and checksocketfamily(
763 'AF_INET6'
770 'AF_INET6'
764 )
771 )
765
772
766 options.anycoverage = options.cover or options.annotate or options.htmlcov
773 options.anycoverage = options.cover or options.annotate or options.htmlcov
767 if options.anycoverage:
774 if options.anycoverage:
768 try:
775 try:
769 import coverage
776 import coverage
770
777
771 covver = version.StrictVersion(coverage.__version__).version
778 covver = version.StrictVersion(coverage.__version__).version
772 if covver < (3, 3):
779 if covver < (3, 3):
773 parser.error('coverage options require coverage 3.3 or later')
780 parser.error('coverage options require coverage 3.3 or later')
774 except ImportError:
781 except ImportError:
775 parser.error('coverage options now require the coverage package')
782 parser.error('coverage options now require the coverage package')
776
783
777 if options.anycoverage and options.local:
784 if options.anycoverage and options.local:
778 # this needs some path mangling somewhere, I guess
785 # this needs some path mangling somewhere, I guess
779 parser.error(
786 parser.error(
780 "sorry, coverage options do not work when --local " "is specified"
787 "sorry, coverage options do not work when --local " "is specified"
781 )
788 )
782
789
783 if options.anycoverage and options.with_hg:
790 if options.anycoverage and options.with_hg:
784 parser.error(
791 parser.error(
785 "sorry, coverage options do not work when --with-hg " "is specified"
792 "sorry, coverage options do not work when --with-hg " "is specified"
786 )
793 )
787
794
788 global verbose
795 global verbose
789 if options.verbose:
796 if options.verbose:
790 verbose = ''
797 verbose = ''
791
798
792 if options.tmpdir:
799 if options.tmpdir:
793 options.tmpdir = canonpath(options.tmpdir)
800 options.tmpdir = canonpath(options.tmpdir)
794
801
795 if options.jobs < 1:
802 if options.jobs < 1:
796 parser.error('--jobs must be positive')
803 parser.error('--jobs must be positive')
797 if options.interactive and options.debug:
804 if options.interactive and options.debug:
798 parser.error("-i/--interactive and -d/--debug are incompatible")
805 parser.error("-i/--interactive and -d/--debug are incompatible")
799 if options.debug:
806 if options.debug:
800 if options.timeout != defaults['timeout']:
807 if options.timeout != defaults['timeout']:
801 sys.stderr.write('warning: --timeout option ignored with --debug\n')
808 sys.stderr.write('warning: --timeout option ignored with --debug\n')
802 if options.slowtimeout != defaults['slowtimeout']:
809 if options.slowtimeout != defaults['slowtimeout']:
803 sys.stderr.write(
810 sys.stderr.write(
804 'warning: --slowtimeout option ignored with --debug\n'
811 'warning: --slowtimeout option ignored with --debug\n'
805 )
812 )
806 options.timeout = 0
813 options.timeout = 0
807 options.slowtimeout = 0
814 options.slowtimeout = 0
808
815
809 if options.blacklist:
816 if options.blacklist:
810 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
817 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
811 if options.whitelist:
818 if options.whitelist:
812 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
819 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
813 else:
820 else:
814 options.whitelisted = {}
821 options.whitelisted = {}
815
822
816 if options.showchannels:
823 if options.showchannels:
817 options.nodiff = True
824 options.nodiff = True
818
825
819 return options
826 return options
820
827
821
828
822 def rename(src, dst):
829 def rename(src, dst):
823 """Like os.rename(), trade atomicity and opened files friendliness
830 """Like os.rename(), trade atomicity and opened files friendliness
824 for existing destination support.
831 for existing destination support.
825 """
832 """
826 shutil.copy(src, dst)
833 shutil.copy(src, dst)
827 os.remove(src)
834 os.remove(src)
828
835
829
836
830 def makecleanable(path):
837 def makecleanable(path):
831 """Try to fix directory permission recursively so that the entire tree
838 """Try to fix directory permission recursively so that the entire tree
832 can be deleted"""
839 can be deleted"""
833 for dirpath, dirnames, _filenames in os.walk(path, topdown=True):
840 for dirpath, dirnames, _filenames in os.walk(path, topdown=True):
834 for d in dirnames:
841 for d in dirnames:
835 p = os.path.join(dirpath, d)
842 p = os.path.join(dirpath, d)
836 try:
843 try:
837 os.chmod(p, os.stat(p).st_mode & 0o777 | 0o700) # chmod u+rwx
844 os.chmod(p, os.stat(p).st_mode & 0o777 | 0o700) # chmod u+rwx
838 except OSError:
845 except OSError:
839 pass
846 pass
840
847
841
848
842 _unified_diff = difflib.unified_diff
849 _unified_diff = difflib.unified_diff
843 if PYTHON3:
850 if PYTHON3:
844 import functools
851 import functools
845
852
846 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
853 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
847
854
848
855
849 def getdiff(expected, output, ref, err):
856 def getdiff(expected, output, ref, err):
850 servefail = False
857 servefail = False
851 lines = []
858 lines = []
852 for line in _unified_diff(expected, output, ref, err):
859 for line in _unified_diff(expected, output, ref, err):
853 if line.startswith(b'+++') or line.startswith(b'---'):
860 if line.startswith(b'+++') or line.startswith(b'---'):
854 line = line.replace(b'\\', b'/')
861 line = line.replace(b'\\', b'/')
855 if line.endswith(b' \n'):
862 if line.endswith(b' \n'):
856 line = line[:-2] + b'\n'
863 line = line[:-2] + b'\n'
857 lines.append(line)
864 lines.append(line)
858 if not servefail and line.startswith(
865 if not servefail and line.startswith(
859 b'+ abort: child process failed to start'
866 b'+ abort: child process failed to start'
860 ):
867 ):
861 servefail = True
868 servefail = True
862
869
863 return servefail, lines
870 return servefail, lines
864
871
865
872
866 verbose = False
873 verbose = False
867
874
868
875
869 def vlog(*msg):
876 def vlog(*msg):
870 """Log only when in verbose mode."""
877 """Log only when in verbose mode."""
871 if verbose is False:
878 if verbose is False:
872 return
879 return
873
880
874 return log(*msg)
881 return log(*msg)
875
882
876
883
877 # Bytes that break XML even in a CDATA block: control characters 0-31
884 # Bytes that break XML even in a CDATA block: control characters 0-31
878 # sans \t, \n and \r
885 # sans \t, \n and \r
879 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
886 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
880
887
881 # Match feature conditionalized output lines in the form, capturing the feature
888 # Match feature conditionalized output lines in the form, capturing the feature
882 # list in group 2, and the preceeding line output in group 1:
889 # list in group 2, and the preceeding line output in group 1:
883 #
890 #
884 # output..output (feature !)\n
891 # output..output (feature !)\n
885 optline = re.compile(br'(.*) \((.+?) !\)\n$')
892 optline = re.compile(br'(.*) \((.+?) !\)\n$')
886
893
887
894
888 def cdatasafe(data):
895 def cdatasafe(data):
889 """Make a string safe to include in a CDATA block.
896 """Make a string safe to include in a CDATA block.
890
897
891 Certain control characters are illegal in a CDATA block, and
898 Certain control characters are illegal in a CDATA block, and
892 there's no way to include a ]]> in a CDATA either. This function
899 there's no way to include a ]]> in a CDATA either. This function
893 replaces illegal bytes with ? and adds a space between the ]] so
900 replaces illegal bytes with ? and adds a space between the ]] so
894 that it won't break the CDATA block.
901 that it won't break the CDATA block.
895 """
902 """
896 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
903 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
897
904
898
905
899 def log(*msg):
906 def log(*msg):
900 """Log something to stdout.
907 """Log something to stdout.
901
908
902 Arguments are strings to print.
909 Arguments are strings to print.
903 """
910 """
904 with iolock:
911 with iolock:
905 if verbose:
912 if verbose:
906 print(verbose, end=' ')
913 print(verbose, end=' ')
907 for m in msg:
914 for m in msg:
908 print(m, end=' ')
915 print(m, end=' ')
909 print()
916 print()
910 sys.stdout.flush()
917 sys.stdout.flush()
911
918
912
919
913 def highlightdiff(line, color):
920 def highlightdiff(line, color):
914 if not color:
921 if not color:
915 return line
922 return line
916 assert pygmentspresent
923 assert pygmentspresent
917 return pygments.highlight(
924 return pygments.highlight(
918 line.decode('latin1'), difflexer, terminal256formatter
925 line.decode('latin1'), difflexer, terminal256formatter
919 ).encode('latin1')
926 ).encode('latin1')
920
927
921
928
922 def highlightmsg(msg, color):
929 def highlightmsg(msg, color):
923 if not color:
930 if not color:
924 return msg
931 return msg
925 assert pygmentspresent
932 assert pygmentspresent
926 return pygments.highlight(msg, runnerlexer, runnerformatter)
933 return pygments.highlight(msg, runnerlexer, runnerformatter)
927
934
928
935
929 def terminate(proc):
936 def terminate(proc):
930 """Terminate subprocess"""
937 """Terminate subprocess"""
931 vlog('# Terminating process %d' % proc.pid)
938 vlog('# Terminating process %d' % proc.pid)
932 try:
939 try:
933 proc.terminate()
940 proc.terminate()
934 except OSError:
941 except OSError:
935 pass
942 pass
936
943
937
944
938 def killdaemons(pidfile):
945 def killdaemons(pidfile):
939 import killdaemons as killmod
946 import killdaemons as killmod
940
947
941 return killmod.killdaemons(pidfile, tryhard=False, remove=True, logfn=vlog)
948 return killmod.killdaemons(pidfile, tryhard=False, remove=True, logfn=vlog)
942
949
943
950
944 class Test(unittest.TestCase):
951 class Test(unittest.TestCase):
945 """Encapsulates a single, runnable test.
952 """Encapsulates a single, runnable test.
946
953
947 While this class conforms to the unittest.TestCase API, it differs in that
954 While this class conforms to the unittest.TestCase API, it differs in that
948 instances need to be instantiated manually. (Typically, unittest.TestCase
955 instances need to be instantiated manually. (Typically, unittest.TestCase
949 classes are instantiated automatically by scanning modules.)
956 classes are instantiated automatically by scanning modules.)
950 """
957 """
951
958
952 # Status code reserved for skipped tests (used by hghave).
959 # Status code reserved for skipped tests (used by hghave).
953 SKIPPED_STATUS = 80
960 SKIPPED_STATUS = 80
954
961
955 def __init__(
962 def __init__(
956 self,
963 self,
957 path,
964 path,
958 outputdir,
965 outputdir,
959 tmpdir,
966 tmpdir,
960 keeptmpdir=False,
967 keeptmpdir=False,
961 debug=False,
968 debug=False,
962 first=False,
969 first=False,
963 timeout=None,
970 timeout=None,
964 startport=None,
971 startport=None,
965 extraconfigopts=None,
972 extraconfigopts=None,
966 shell=None,
973 shell=None,
967 hgcommand=None,
974 hgcommand=None,
968 slowtimeout=None,
975 slowtimeout=None,
969 usechg=False,
976 usechg=False,
970 chgdebug=False,
977 chgdebug=False,
971 useipv6=False,
978 useipv6=False,
972 ):
979 ):
973 """Create a test from parameters.
980 """Create a test from parameters.
974
981
975 path is the full path to the file defining the test.
982 path is the full path to the file defining the test.
976
983
977 tmpdir is the main temporary directory to use for this test.
984 tmpdir is the main temporary directory to use for this test.
978
985
979 keeptmpdir determines whether to keep the test's temporary directory
986 keeptmpdir determines whether to keep the test's temporary directory
980 after execution. It defaults to removal (False).
987 after execution. It defaults to removal (False).
981
988
982 debug mode will make the test execute verbosely, with unfiltered
989 debug mode will make the test execute verbosely, with unfiltered
983 output.
990 output.
984
991
985 timeout controls the maximum run time of the test. It is ignored when
992 timeout controls the maximum run time of the test. It is ignored when
986 debug is True. See slowtimeout for tests with #require slow.
993 debug is True. See slowtimeout for tests with #require slow.
987
994
988 slowtimeout overrides timeout if the test has #require slow.
995 slowtimeout overrides timeout if the test has #require slow.
989
996
990 startport controls the starting port number to use for this test. Each
997 startport controls the starting port number to use for this test. Each
991 test will reserve 3 port numbers for execution. It is the caller's
998 test will reserve 3 port numbers for execution. It is the caller's
992 responsibility to allocate a non-overlapping port range to Test
999 responsibility to allocate a non-overlapping port range to Test
993 instances.
1000 instances.
994
1001
995 extraconfigopts is an iterable of extra hgrc config options. Values
1002 extraconfigopts is an iterable of extra hgrc config options. Values
996 must have the form "key=value" (something understood by hgrc). Values
1003 must have the form "key=value" (something understood by hgrc). Values
997 of the form "foo.key=value" will result in "[foo] key=value".
1004 of the form "foo.key=value" will result in "[foo] key=value".
998
1005
999 shell is the shell to execute tests in.
1006 shell is the shell to execute tests in.
1000 """
1007 """
1001 if timeout is None:
1008 if timeout is None:
1002 timeout = defaults['timeout']
1009 timeout = defaults['timeout']
1003 if startport is None:
1010 if startport is None:
1004 startport = defaults['port']
1011 startport = defaults['port']
1005 if slowtimeout is None:
1012 if slowtimeout is None:
1006 slowtimeout = defaults['slowtimeout']
1013 slowtimeout = defaults['slowtimeout']
1007 self.path = path
1014 self.path = path
1008 self.relpath = os.path.relpath(path)
1015 self.relpath = os.path.relpath(path)
1009 self.bname = os.path.basename(path)
1016 self.bname = os.path.basename(path)
1010 self.name = _bytes2sys(self.bname)
1017 self.name = _bytes2sys(self.bname)
1011 self._testdir = os.path.dirname(path)
1018 self._testdir = os.path.dirname(path)
1012 self._outputdir = outputdir
1019 self._outputdir = outputdir
1013 self._tmpname = os.path.basename(path)
1020 self._tmpname = os.path.basename(path)
1014 self.errpath = os.path.join(self._outputdir, b'%s.err' % self.bname)
1021 self.errpath = os.path.join(self._outputdir, b'%s.err' % self.bname)
1015
1022
1016 self._threadtmp = tmpdir
1023 self._threadtmp = tmpdir
1017 self._keeptmpdir = keeptmpdir
1024 self._keeptmpdir = keeptmpdir
1018 self._debug = debug
1025 self._debug = debug
1019 self._first = first
1026 self._first = first
1020 self._timeout = timeout
1027 self._timeout = timeout
1021 self._slowtimeout = slowtimeout
1028 self._slowtimeout = slowtimeout
1022 self._startport = startport
1029 self._startport = startport
1023 self._extraconfigopts = extraconfigopts or []
1030 self._extraconfigopts = extraconfigopts or []
1024 self._shell = _sys2bytes(shell)
1031 self._shell = _sys2bytes(shell)
1025 self._hgcommand = hgcommand or b'hg'
1032 self._hgcommand = hgcommand or b'hg'
1026 self._usechg = usechg
1033 self._usechg = usechg
1027 self._chgdebug = chgdebug
1034 self._chgdebug = chgdebug
1028 self._useipv6 = useipv6
1035 self._useipv6 = useipv6
1029
1036
1030 self._aborted = False
1037 self._aborted = False
1031 self._daemonpids = []
1038 self._daemonpids = []
1032 self._finished = None
1039 self._finished = None
1033 self._ret = None
1040 self._ret = None
1034 self._out = None
1041 self._out = None
1035 self._skipped = None
1042 self._skipped = None
1036 self._testtmp = None
1043 self._testtmp = None
1037 self._chgsockdir = None
1044 self._chgsockdir = None
1038
1045
1039 self._refout = self.readrefout()
1046 self._refout = self.readrefout()
1040
1047
1041 def readrefout(self):
1048 def readrefout(self):
1042 """read reference output"""
1049 """read reference output"""
1043 # If we're not in --debug mode and reference output file exists,
1050 # If we're not in --debug mode and reference output file exists,
1044 # check test output against it.
1051 # check test output against it.
1045 if self._debug:
1052 if self._debug:
1046 return None # to match "out is None"
1053 return None # to match "out is None"
1047 elif os.path.exists(self.refpath):
1054 elif os.path.exists(self.refpath):
1048 with open(self.refpath, 'rb') as f:
1055 with open(self.refpath, 'rb') as f:
1049 return f.read().splitlines(True)
1056 return f.read().splitlines(True)
1050 else:
1057 else:
1051 return []
1058 return []
1052
1059
1053 # needed to get base class __repr__ running
1060 # needed to get base class __repr__ running
1054 @property
1061 @property
1055 def _testMethodName(self):
1062 def _testMethodName(self):
1056 return self.name
1063 return self.name
1057
1064
1058 def __str__(self):
1065 def __str__(self):
1059 return self.name
1066 return self.name
1060
1067
1061 def shortDescription(self):
1068 def shortDescription(self):
1062 return self.name
1069 return self.name
1063
1070
1064 def setUp(self):
1071 def setUp(self):
1065 """Tasks to perform before run()."""
1072 """Tasks to perform before run()."""
1066 self._finished = False
1073 self._finished = False
1067 self._ret = None
1074 self._ret = None
1068 self._out = None
1075 self._out = None
1069 self._skipped = None
1076 self._skipped = None
1070
1077
1071 try:
1078 try:
1072 os.mkdir(self._threadtmp)
1079 os.mkdir(self._threadtmp)
1073 except OSError as e:
1080 except OSError as e:
1074 if e.errno != errno.EEXIST:
1081 if e.errno != errno.EEXIST:
1075 raise
1082 raise
1076
1083
1077 name = self._tmpname
1084 name = self._tmpname
1078 self._testtmp = os.path.join(self._threadtmp, name)
1085 self._testtmp = os.path.join(self._threadtmp, name)
1079 os.mkdir(self._testtmp)
1086 os.mkdir(self._testtmp)
1080
1087
1081 # Remove any previous output files.
1088 # Remove any previous output files.
1082 if os.path.exists(self.errpath):
1089 if os.path.exists(self.errpath):
1083 try:
1090 try:
1084 os.remove(self.errpath)
1091 os.remove(self.errpath)
1085 except OSError as e:
1092 except OSError as e:
1086 # We might have raced another test to clean up a .err
1093 # We might have raced another test to clean up a .err
1087 # file, so ignore ENOENT when removing a previous .err
1094 # file, so ignore ENOENT when removing a previous .err
1088 # file.
1095 # file.
1089 if e.errno != errno.ENOENT:
1096 if e.errno != errno.ENOENT:
1090 raise
1097 raise
1091
1098
1092 if self._usechg:
1099 if self._usechg:
1093 self._chgsockdir = os.path.join(
1100 self._chgsockdir = os.path.join(
1094 self._threadtmp, b'%s.chgsock' % name
1101 self._threadtmp, b'%s.chgsock' % name
1095 )
1102 )
1096 os.mkdir(self._chgsockdir)
1103 os.mkdir(self._chgsockdir)
1097
1104
1098 def run(self, result):
1105 def run(self, result):
1099 """Run this test and report results against a TestResult instance."""
1106 """Run this test and report results against a TestResult instance."""
1100 # This function is extremely similar to unittest.TestCase.run(). Once
1107 # This function is extremely similar to unittest.TestCase.run(). Once
1101 # we require Python 2.7 (or at least its version of unittest), this
1108 # we require Python 2.7 (or at least its version of unittest), this
1102 # function can largely go away.
1109 # function can largely go away.
1103 self._result = result
1110 self._result = result
1104 result.startTest(self)
1111 result.startTest(self)
1105 try:
1112 try:
1106 try:
1113 try:
1107 self.setUp()
1114 self.setUp()
1108 except (KeyboardInterrupt, SystemExit):
1115 except (KeyboardInterrupt, SystemExit):
1109 self._aborted = True
1116 self._aborted = True
1110 raise
1117 raise
1111 except Exception:
1118 except Exception:
1112 result.addError(self, sys.exc_info())
1119 result.addError(self, sys.exc_info())
1113 return
1120 return
1114
1121
1115 success = False
1122 success = False
1116 try:
1123 try:
1117 self.runTest()
1124 self.runTest()
1118 except KeyboardInterrupt:
1125 except KeyboardInterrupt:
1119 self._aborted = True
1126 self._aborted = True
1120 raise
1127 raise
1121 except unittest.SkipTest as e:
1128 except unittest.SkipTest as e:
1122 result.addSkip(self, str(e))
1129 result.addSkip(self, str(e))
1123 # The base class will have already counted this as a
1130 # The base class will have already counted this as a
1124 # test we "ran", but we want to exclude skipped tests
1131 # test we "ran", but we want to exclude skipped tests
1125 # from those we count towards those run.
1132 # from those we count towards those run.
1126 result.testsRun -= 1
1133 result.testsRun -= 1
1127 except self.failureException as e:
1134 except self.failureException as e:
1128 # This differs from unittest in that we don't capture
1135 # This differs from unittest in that we don't capture
1129 # the stack trace. This is for historical reasons and
1136 # the stack trace. This is for historical reasons and
1130 # this decision could be revisited in the future,
1137 # this decision could be revisited in the future,
1131 # especially for PythonTest instances.
1138 # especially for PythonTest instances.
1132 if result.addFailure(self, str(e)):
1139 if result.addFailure(self, str(e)):
1133 success = True
1140 success = True
1134 except Exception:
1141 except Exception:
1135 result.addError(self, sys.exc_info())
1142 result.addError(self, sys.exc_info())
1136 else:
1143 else:
1137 success = True
1144 success = True
1138
1145
1139 try:
1146 try:
1140 self.tearDown()
1147 self.tearDown()
1141 except (KeyboardInterrupt, SystemExit):
1148 except (KeyboardInterrupt, SystemExit):
1142 self._aborted = True
1149 self._aborted = True
1143 raise
1150 raise
1144 except Exception:
1151 except Exception:
1145 result.addError(self, sys.exc_info())
1152 result.addError(self, sys.exc_info())
1146 success = False
1153 success = False
1147
1154
1148 if success:
1155 if success:
1149 result.addSuccess(self)
1156 result.addSuccess(self)
1150 finally:
1157 finally:
1151 result.stopTest(self, interrupted=self._aborted)
1158 result.stopTest(self, interrupted=self._aborted)
1152
1159
1153 def runTest(self):
1160 def runTest(self):
1154 """Run this test instance.
1161 """Run this test instance.
1155
1162
1156 This will return a tuple describing the result of the test.
1163 This will return a tuple describing the result of the test.
1157 """
1164 """
1158 env = self._getenv()
1165 env = self._getenv()
1159 self._genrestoreenv(env)
1166 self._genrestoreenv(env)
1160 self._daemonpids.append(env['DAEMON_PIDS'])
1167 self._daemonpids.append(env['DAEMON_PIDS'])
1161 self._createhgrc(env['HGRCPATH'])
1168 self._createhgrc(env['HGRCPATH'])
1162
1169
1163 vlog('# Test', self.name)
1170 vlog('# Test', self.name)
1164
1171
1165 ret, out = self._run(env)
1172 ret, out = self._run(env)
1166 self._finished = True
1173 self._finished = True
1167 self._ret = ret
1174 self._ret = ret
1168 self._out = out
1175 self._out = out
1169
1176
1170 def describe(ret):
1177 def describe(ret):
1171 if ret < 0:
1178 if ret < 0:
1172 return 'killed by signal: %d' % -ret
1179 return 'killed by signal: %d' % -ret
1173 return 'returned error code %d' % ret
1180 return 'returned error code %d' % ret
1174
1181
1175 self._skipped = False
1182 self._skipped = False
1176
1183
1177 if ret == self.SKIPPED_STATUS:
1184 if ret == self.SKIPPED_STATUS:
1178 if out is None: # Debug mode, nothing to parse.
1185 if out is None: # Debug mode, nothing to parse.
1179 missing = ['unknown']
1186 missing = ['unknown']
1180 failed = None
1187 failed = None
1181 else:
1188 else:
1182 missing, failed = TTest.parsehghaveoutput(out)
1189 missing, failed = TTest.parsehghaveoutput(out)
1183
1190
1184 if not missing:
1191 if not missing:
1185 missing = ['skipped']
1192 missing = ['skipped']
1186
1193
1187 if failed:
1194 if failed:
1188 self.fail('hg have failed checking for %s' % failed[-1])
1195 self.fail('hg have failed checking for %s' % failed[-1])
1189 else:
1196 else:
1190 self._skipped = True
1197 self._skipped = True
1191 raise unittest.SkipTest(missing[-1])
1198 raise unittest.SkipTest(missing[-1])
1192 elif ret == 'timeout':
1199 elif ret == 'timeout':
1193 self.fail('timed out')
1200 self.fail('timed out')
1194 elif ret is False:
1201 elif ret is False:
1195 self.fail('no result code from test')
1202 self.fail('no result code from test')
1196 elif out != self._refout:
1203 elif out != self._refout:
1197 # Diff generation may rely on written .err file.
1204 # Diff generation may rely on written .err file.
1198 if (
1205 if (
1199 (ret != 0 or out != self._refout)
1206 (ret != 0 or out != self._refout)
1200 and not self._skipped
1207 and not self._skipped
1201 and not self._debug
1208 and not self._debug
1202 ):
1209 ):
1203 with open(self.errpath, 'wb') as f:
1210 with open(self.errpath, 'wb') as f:
1204 for line in out:
1211 for line in out:
1205 f.write(line)
1212 f.write(line)
1206
1213
1207 # The result object handles diff calculation for us.
1214 # The result object handles diff calculation for us.
1208 with firstlock:
1215 with firstlock:
1209 if self._result.addOutputMismatch(self, ret, out, self._refout):
1216 if self._result.addOutputMismatch(self, ret, out, self._refout):
1210 # change was accepted, skip failing
1217 # change was accepted, skip failing
1211 return
1218 return
1212 if self._first:
1219 if self._first:
1213 global firsterror
1220 global firsterror
1214 firsterror = True
1221 firsterror = True
1215
1222
1216 if ret:
1223 if ret:
1217 msg = 'output changed and ' + describe(ret)
1224 msg = 'output changed and ' + describe(ret)
1218 else:
1225 else:
1219 msg = 'output changed'
1226 msg = 'output changed'
1220
1227
1221 self.fail(msg)
1228 self.fail(msg)
1222 elif ret:
1229 elif ret:
1223 self.fail(describe(ret))
1230 self.fail(describe(ret))
1224
1231
1225 def tearDown(self):
1232 def tearDown(self):
1226 """Tasks to perform after run()."""
1233 """Tasks to perform after run()."""
1227 for entry in self._daemonpids:
1234 for entry in self._daemonpids:
1228 killdaemons(entry)
1235 killdaemons(entry)
1229 self._daemonpids = []
1236 self._daemonpids = []
1230
1237
1231 if self._keeptmpdir:
1238 if self._keeptmpdir:
1232 log(
1239 log(
1233 '\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s'
1240 '\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s'
1234 % (
1241 % (
1235 _bytes2sys(self._testtmp),
1242 _bytes2sys(self._testtmp),
1236 _bytes2sys(self._threadtmp),
1243 _bytes2sys(self._threadtmp),
1237 )
1244 )
1238 )
1245 )
1239 else:
1246 else:
1240 try:
1247 try:
1241 shutil.rmtree(self._testtmp)
1248 shutil.rmtree(self._testtmp)
1242 except OSError:
1249 except OSError:
1243 # unreadable directory may be left in $TESTTMP; fix permission
1250 # unreadable directory may be left in $TESTTMP; fix permission
1244 # and try again
1251 # and try again
1245 makecleanable(self._testtmp)
1252 makecleanable(self._testtmp)
1246 shutil.rmtree(self._testtmp, True)
1253 shutil.rmtree(self._testtmp, True)
1247 shutil.rmtree(self._threadtmp, True)
1254 shutil.rmtree(self._threadtmp, True)
1248
1255
1249 if self._usechg:
1256 if self._usechg:
1250 # chgservers will stop automatically after they find the socket
1257 # chgservers will stop automatically after they find the socket
1251 # files are deleted
1258 # files are deleted
1252 shutil.rmtree(self._chgsockdir, True)
1259 shutil.rmtree(self._chgsockdir, True)
1253
1260
1254 if (
1261 if (
1255 (self._ret != 0 or self._out != self._refout)
1262 (self._ret != 0 or self._out != self._refout)
1256 and not self._skipped
1263 and not self._skipped
1257 and not self._debug
1264 and not self._debug
1258 and self._out
1265 and self._out
1259 ):
1266 ):
1260 with open(self.errpath, 'wb') as f:
1267 with open(self.errpath, 'wb') as f:
1261 for line in self._out:
1268 for line in self._out:
1262 f.write(line)
1269 f.write(line)
1263
1270
1264 vlog("# Ret was:", self._ret, '(%s)' % self.name)
1271 vlog("# Ret was:", self._ret, '(%s)' % self.name)
1265
1272
1266 def _run(self, env):
1273 def _run(self, env):
1267 # This should be implemented in child classes to run tests.
1274 # This should be implemented in child classes to run tests.
1268 raise unittest.SkipTest('unknown test type')
1275 raise unittest.SkipTest('unknown test type')
1269
1276
1270 def abort(self):
1277 def abort(self):
1271 """Terminate execution of this test."""
1278 """Terminate execution of this test."""
1272 self._aborted = True
1279 self._aborted = True
1273
1280
1274 def _portmap(self, i):
1281 def _portmap(self, i):
1275 offset = b'' if i == 0 else b'%d' % i
1282 offset = b'' if i == 0 else b'%d' % i
1276 return (br':%d\b' % (self._startport + i), b':$HGPORT%s' % offset)
1283 return (br':%d\b' % (self._startport + i), b':$HGPORT%s' % offset)
1277
1284
1278 def _getreplacements(self):
1285 def _getreplacements(self):
1279 """Obtain a mapping of text replacements to apply to test output.
1286 """Obtain a mapping of text replacements to apply to test output.
1280
1287
1281 Test output needs to be normalized so it can be compared to expected
1288 Test output needs to be normalized so it can be compared to expected
1282 output. This function defines how some of that normalization will
1289 output. This function defines how some of that normalization will
1283 occur.
1290 occur.
1284 """
1291 """
1285 r = [
1292 r = [
1286 # This list should be parallel to defineport in _getenv
1293 # This list should be parallel to defineport in _getenv
1287 self._portmap(0),
1294 self._portmap(0),
1288 self._portmap(1),
1295 self._portmap(1),
1289 self._portmap(2),
1296 self._portmap(2),
1290 (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'),
1297 (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'),
1291 (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'),
1298 (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'),
1292 ]
1299 ]
1293 r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
1300 r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
1294
1301
1295 replacementfile = os.path.join(self._testdir, b'common-pattern.py')
1302 replacementfile = os.path.join(self._testdir, b'common-pattern.py')
1296
1303
1297 if os.path.exists(replacementfile):
1304 if os.path.exists(replacementfile):
1298 data = {}
1305 data = {}
1299 with open(replacementfile, mode='rb') as source:
1306 with open(replacementfile, mode='rb') as source:
1300 # the intermediate 'compile' step help with debugging
1307 # the intermediate 'compile' step help with debugging
1301 code = compile(source.read(), replacementfile, 'exec')
1308 code = compile(source.read(), replacementfile, 'exec')
1302 exec(code, data)
1309 exec(code, data)
1303 for value in data.get('substitutions', ()):
1310 for value in data.get('substitutions', ()):
1304 if len(value) != 2:
1311 if len(value) != 2:
1305 msg = 'malformatted substitution in %s: %r'
1312 msg = 'malformatted substitution in %s: %r'
1306 msg %= (replacementfile, value)
1313 msg %= (replacementfile, value)
1307 raise ValueError(msg)
1314 raise ValueError(msg)
1308 r.append(value)
1315 r.append(value)
1309 return r
1316 return r
1310
1317
1311 def _escapepath(self, p):
1318 def _escapepath(self, p):
1312 if os.name == 'nt':
1319 if os.name == 'nt':
1313 return b''.join(
1320 return b''.join(
1314 c.isalpha()
1321 c.isalpha()
1315 and b'[%s%s]' % (c.lower(), c.upper())
1322 and b'[%s%s]' % (c.lower(), c.upper())
1316 or c in b'/\\'
1323 or c in b'/\\'
1317 and br'[/\\]'
1324 and br'[/\\]'
1318 or c.isdigit()
1325 or c.isdigit()
1319 and c
1326 and c
1320 or b'\\' + c
1327 or b'\\' + c
1321 for c in [p[i : i + 1] for i in range(len(p))]
1328 for c in [p[i : i + 1] for i in range(len(p))]
1322 )
1329 )
1323 else:
1330 else:
1324 return re.escape(p)
1331 return re.escape(p)
1325
1332
1326 def _localip(self):
1333 def _localip(self):
1327 if self._useipv6:
1334 if self._useipv6:
1328 return b'::1'
1335 return b'::1'
1329 else:
1336 else:
1330 return b'127.0.0.1'
1337 return b'127.0.0.1'
1331
1338
1332 def _genrestoreenv(self, testenv):
1339 def _genrestoreenv(self, testenv):
1333 """Generate a script that can be used by tests to restore the original
1340 """Generate a script that can be used by tests to restore the original
1334 environment."""
1341 environment."""
1335 # Put the restoreenv script inside self._threadtmp
1342 # Put the restoreenv script inside self._threadtmp
1336 scriptpath = os.path.join(self._threadtmp, b'restoreenv.sh')
1343 scriptpath = os.path.join(self._threadtmp, b'restoreenv.sh')
1337 testenv['HGTEST_RESTOREENV'] = _bytes2sys(scriptpath)
1344 testenv['HGTEST_RESTOREENV'] = _bytes2sys(scriptpath)
1338
1345
1339 # Only restore environment variable names that the shell allows
1346 # Only restore environment variable names that the shell allows
1340 # us to export.
1347 # us to export.
1341 name_regex = re.compile('^[a-zA-Z][a-zA-Z0-9_]*$')
1348 name_regex = re.compile('^[a-zA-Z][a-zA-Z0-9_]*$')
1342
1349
1343 # Do not restore these variables; otherwise tests would fail.
1350 # Do not restore these variables; otherwise tests would fail.
1344 reqnames = {'PYTHON', 'TESTDIR', 'TESTTMP'}
1351 reqnames = {'PYTHON', 'TESTDIR', 'TESTTMP'}
1345
1352
1346 with open(scriptpath, 'w') as envf:
1353 with open(scriptpath, 'w') as envf:
1347 for name, value in origenviron.items():
1354 for name, value in origenviron.items():
1348 if not name_regex.match(name):
1355 if not name_regex.match(name):
1349 # Skip environment variables with unusual names not
1356 # Skip environment variables with unusual names not
1350 # allowed by most shells.
1357 # allowed by most shells.
1351 continue
1358 continue
1352 if name in reqnames:
1359 if name in reqnames:
1353 continue
1360 continue
1354 envf.write('%s=%s\n' % (name, shellquote(value)))
1361 envf.write('%s=%s\n' % (name, shellquote(value)))
1355
1362
1356 for name in testenv:
1363 for name in testenv:
1357 if name in origenviron or name in reqnames:
1364 if name in origenviron or name in reqnames:
1358 continue
1365 continue
1359 envf.write('unset %s\n' % (name,))
1366 envf.write('unset %s\n' % (name,))
1360
1367
1361 def _getenv(self):
1368 def _getenv(self):
1362 """Obtain environment variables to use during test execution."""
1369 """Obtain environment variables to use during test execution."""
1363
1370
1364 def defineport(i):
1371 def defineport(i):
1365 offset = '' if i == 0 else '%s' % i
1372 offset = '' if i == 0 else '%s' % i
1366 env["HGPORT%s" % offset] = '%s' % (self._startport + i)
1373 env["HGPORT%s" % offset] = '%s' % (self._startport + i)
1367
1374
1368 env = os.environ.copy()
1375 env = os.environ.copy()
1369 env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase') or ''
1376 env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase') or ''
1370 env['HGEMITWARNINGS'] = '1'
1377 env['HGEMITWARNINGS'] = '1'
1371 env['TESTTMP'] = _bytes2sys(self._testtmp)
1378 env['TESTTMP'] = _bytes2sys(self._testtmp)
1372 env['TESTNAME'] = self.name
1379 env['TESTNAME'] = self.name
1373 env['HOME'] = _bytes2sys(self._testtmp)
1380 env['HOME'] = _bytes2sys(self._testtmp)
1374 if os.name == 'nt':
1381 if os.name == 'nt':
1375 env['REALUSERPROFILE'] = env['USERPROFILE']
1382 env['REALUSERPROFILE'] = env['USERPROFILE']
1376 # py3.8+ ignores HOME: https://bugs.python.org/issue36264
1383 # py3.8+ ignores HOME: https://bugs.python.org/issue36264
1377 env['USERPROFILE'] = env['HOME']
1384 env['USERPROFILE'] = env['HOME']
1378 formated_timeout = _bytes2sys(b"%d" % default_defaults['timeout'][1])
1385 formated_timeout = _bytes2sys(b"%d" % default_defaults['timeout'][1])
1379 env['HGTEST_TIMEOUT_DEFAULT'] = formated_timeout
1386 env['HGTEST_TIMEOUT_DEFAULT'] = formated_timeout
1380 env['HGTEST_TIMEOUT'] = _bytes2sys(b"%d" % self._timeout)
1387 env['HGTEST_TIMEOUT'] = _bytes2sys(b"%d" % self._timeout)
1381 # This number should match portneeded in _getport
1388 # This number should match portneeded in _getport
1382 for port in xrange(3):
1389 for port in xrange(3):
1383 # This list should be parallel to _portmap in _getreplacements
1390 # This list should be parallel to _portmap in _getreplacements
1384 defineport(port)
1391 defineport(port)
1385 env["HGRCPATH"] = _bytes2sys(os.path.join(self._threadtmp, b'.hgrc'))
1392 env["HGRCPATH"] = _bytes2sys(os.path.join(self._threadtmp, b'.hgrc'))
1386 env["DAEMON_PIDS"] = _bytes2sys(
1393 env["DAEMON_PIDS"] = _bytes2sys(
1387 os.path.join(self._threadtmp, b'daemon.pids')
1394 os.path.join(self._threadtmp, b'daemon.pids')
1388 )
1395 )
1389 env["HGEDITOR"] = (
1396 env["HGEDITOR"] = (
1390 '"' + sysexecutable + '"' + ' -c "import sys; sys.exit(0)"'
1397 '"' + sysexecutable + '"' + ' -c "import sys; sys.exit(0)"'
1391 )
1398 )
1392 env["HGUSER"] = "test"
1399 env["HGUSER"] = "test"
1393 env["HGENCODING"] = "ascii"
1400 env["HGENCODING"] = "ascii"
1394 env["HGENCODINGMODE"] = "strict"
1401 env["HGENCODINGMODE"] = "strict"
1395 env["HGHOSTNAME"] = "test-hostname"
1402 env["HGHOSTNAME"] = "test-hostname"
1396 env['HGIPV6'] = str(int(self._useipv6))
1403 env['HGIPV6'] = str(int(self._useipv6))
1397 # See contrib/catapipe.py for how to use this functionality.
1404 # See contrib/catapipe.py for how to use this functionality.
1398 if 'HGTESTCATAPULTSERVERPIPE' not in env:
1405 if 'HGTESTCATAPULTSERVERPIPE' not in env:
1399 # If we don't have HGTESTCATAPULTSERVERPIPE explicitly set, pull the
1406 # If we don't have HGTESTCATAPULTSERVERPIPE explicitly set, pull the
1400 # non-test one in as a default, otherwise set to devnull
1407 # non-test one in as a default, otherwise set to devnull
1401 env['HGTESTCATAPULTSERVERPIPE'] = env.get(
1408 env['HGTESTCATAPULTSERVERPIPE'] = env.get(
1402 'HGCATAPULTSERVERPIPE', os.devnull
1409 'HGCATAPULTSERVERPIPE', os.devnull
1403 )
1410 )
1404
1411
1405 extraextensions = []
1412 extraextensions = []
1406 for opt in self._extraconfigopts:
1413 for opt in self._extraconfigopts:
1407 section, key = opt.split('.', 1)
1414 section, key = opt.split('.', 1)
1408 if section != 'extensions':
1415 if section != 'extensions':
1409 continue
1416 continue
1410 name = key.split('=', 1)[0]
1417 name = key.split('=', 1)[0]
1411 extraextensions.append(name)
1418 extraextensions.append(name)
1412
1419
1413 if extraextensions:
1420 if extraextensions:
1414 env['HGTESTEXTRAEXTENSIONS'] = ' '.join(extraextensions)
1421 env['HGTESTEXTRAEXTENSIONS'] = ' '.join(extraextensions)
1415
1422
1416 # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
1423 # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
1417 # IP addresses.
1424 # IP addresses.
1418 env['LOCALIP'] = _bytes2sys(self._localip())
1425 env['LOCALIP'] = _bytes2sys(self._localip())
1419
1426
1420 # This has the same effect as Py_LegacyWindowsStdioFlag in exewrapper.c,
1427 # This has the same effect as Py_LegacyWindowsStdioFlag in exewrapper.c,
1421 # but this is needed for testing python instances like dummyssh,
1428 # but this is needed for testing python instances like dummyssh,
1422 # dummysmtpd.py, and dumbhttp.py.
1429 # dummysmtpd.py, and dumbhttp.py.
1423 if PYTHON3 and os.name == 'nt':
1430 if PYTHON3 and os.name == 'nt':
1424 env['PYTHONLEGACYWINDOWSSTDIO'] = '1'
1431 env['PYTHONLEGACYWINDOWSSTDIO'] = '1'
1425
1432
1426 # Modified HOME in test environment can confuse Rust tools. So set
1433 # Modified HOME in test environment can confuse Rust tools. So set
1427 # CARGO_HOME and RUSTUP_HOME automatically if a Rust toolchain is
1434 # CARGO_HOME and RUSTUP_HOME automatically if a Rust toolchain is
1428 # present and these variables aren't already defined.
1435 # present and these variables aren't already defined.
1429 cargo_home_path = os.path.expanduser('~/.cargo')
1436 cargo_home_path = os.path.expanduser('~/.cargo')
1430 rustup_home_path = os.path.expanduser('~/.rustup')
1437 rustup_home_path = os.path.expanduser('~/.rustup')
1431
1438
1432 if os.path.exists(cargo_home_path) and b'CARGO_HOME' not in osenvironb:
1439 if os.path.exists(cargo_home_path) and b'CARGO_HOME' not in osenvironb:
1433 env['CARGO_HOME'] = cargo_home_path
1440 env['CARGO_HOME'] = cargo_home_path
1434 if (
1441 if (
1435 os.path.exists(rustup_home_path)
1442 os.path.exists(rustup_home_path)
1436 and b'RUSTUP_HOME' not in osenvironb
1443 and b'RUSTUP_HOME' not in osenvironb
1437 ):
1444 ):
1438 env['RUSTUP_HOME'] = rustup_home_path
1445 env['RUSTUP_HOME'] = rustup_home_path
1439
1446
1440 # Reset some environment variables to well-known values so that
1447 # Reset some environment variables to well-known values so that
1441 # the tests produce repeatable output.
1448 # the tests produce repeatable output.
1442 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
1449 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
1443 env['TZ'] = 'GMT'
1450 env['TZ'] = 'GMT'
1444 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
1451 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
1445 env['COLUMNS'] = '80'
1452 env['COLUMNS'] = '80'
1446 env['TERM'] = 'xterm'
1453 env['TERM'] = 'xterm'
1447
1454
1448 dropped = [
1455 dropped = [
1449 'CDPATH',
1456 'CDPATH',
1450 'CHGDEBUG',
1457 'CHGDEBUG',
1451 'EDITOR',
1458 'EDITOR',
1452 'GREP_OPTIONS',
1459 'GREP_OPTIONS',
1453 'HG',
1460 'HG',
1454 'HGMERGE',
1461 'HGMERGE',
1455 'HGPLAIN',
1462 'HGPLAIN',
1456 'HGPLAINEXCEPT',
1463 'HGPLAINEXCEPT',
1457 'HGPROF',
1464 'HGPROF',
1458 'http_proxy',
1465 'http_proxy',
1459 'no_proxy',
1466 'no_proxy',
1460 'NO_PROXY',
1467 'NO_PROXY',
1461 'PAGER',
1468 'PAGER',
1462 'VISUAL',
1469 'VISUAL',
1463 ]
1470 ]
1464
1471
1465 for k in dropped:
1472 for k in dropped:
1466 if k in env:
1473 if k in env:
1467 del env[k]
1474 del env[k]
1468
1475
1469 # unset env related to hooks
1476 # unset env related to hooks
1470 for k in list(env):
1477 for k in list(env):
1471 if k.startswith('HG_'):
1478 if k.startswith('HG_'):
1472 del env[k]
1479 del env[k]
1473
1480
1474 if self._usechg:
1481 if self._usechg:
1475 env['CHGSOCKNAME'] = os.path.join(self._chgsockdir, b'server')
1482 env['CHGSOCKNAME'] = os.path.join(self._chgsockdir, b'server')
1476 if self._chgdebug:
1483 if self._chgdebug:
1477 env['CHGDEBUG'] = 'true'
1484 env['CHGDEBUG'] = 'true'
1478
1485
1479 return env
1486 return env
1480
1487
1481 def _createhgrc(self, path):
1488 def _createhgrc(self, path):
1482 """Create an hgrc file for this test."""
1489 """Create an hgrc file for this test."""
1483 with open(path, 'wb') as hgrc:
1490 with open(path, 'wb') as hgrc:
1484 hgrc.write(b'[ui]\n')
1491 hgrc.write(b'[ui]\n')
1485 hgrc.write(b'slash = True\n')
1492 hgrc.write(b'slash = True\n')
1486 hgrc.write(b'interactive = False\n')
1493 hgrc.write(b'interactive = False\n')
1487 hgrc.write(b'detailed-exit-code = True\n')
1494 hgrc.write(b'detailed-exit-code = True\n')
1488 hgrc.write(b'merge = internal:merge\n')
1495 hgrc.write(b'merge = internal:merge\n')
1489 hgrc.write(b'mergemarkers = detailed\n')
1496 hgrc.write(b'mergemarkers = detailed\n')
1490 hgrc.write(b'promptecho = True\n')
1497 hgrc.write(b'promptecho = True\n')
1491 hgrc.write(b'timeout.warn=15\n')
1498 hgrc.write(b'timeout.warn=15\n')
1492 hgrc.write(b'[defaults]\n')
1499 hgrc.write(b'[defaults]\n')
1493 hgrc.write(b'[devel]\n')
1500 hgrc.write(b'[devel]\n')
1494 hgrc.write(b'all-warnings = true\n')
1501 hgrc.write(b'all-warnings = true\n')
1495 hgrc.write(b'default-date = 0 0\n')
1502 hgrc.write(b'default-date = 0 0\n')
1496 hgrc.write(b'[largefiles]\n')
1503 hgrc.write(b'[largefiles]\n')
1497 hgrc.write(
1504 hgrc.write(
1498 b'usercache = %s\n'
1505 b'usercache = %s\n'
1499 % (os.path.join(self._testtmp, b'.cache/largefiles'))
1506 % (os.path.join(self._testtmp, b'.cache/largefiles'))
1500 )
1507 )
1501 hgrc.write(b'[lfs]\n')
1508 hgrc.write(b'[lfs]\n')
1502 hgrc.write(
1509 hgrc.write(
1503 b'usercache = %s\n'
1510 b'usercache = %s\n'
1504 % (os.path.join(self._testtmp, b'.cache/lfs'))
1511 % (os.path.join(self._testtmp, b'.cache/lfs'))
1505 )
1512 )
1506 hgrc.write(b'[web]\n')
1513 hgrc.write(b'[web]\n')
1507 hgrc.write(b'address = localhost\n')
1514 hgrc.write(b'address = localhost\n')
1508 hgrc.write(b'ipv6 = %r\n' % self._useipv6)
1515 hgrc.write(b'ipv6 = %r\n' % self._useipv6)
1509 hgrc.write(b'server-header = testing stub value\n')
1516 hgrc.write(b'server-header = testing stub value\n')
1510
1517
1511 for opt in self._extraconfigopts:
1518 for opt in self._extraconfigopts:
1512 section, key = _sys2bytes(opt).split(b'.', 1)
1519 section, key = _sys2bytes(opt).split(b'.', 1)
1513 assert b'=' in key, (
1520 assert b'=' in key, (
1514 'extra config opt %s must ' 'have an = for assignment' % opt
1521 'extra config opt %s must ' 'have an = for assignment' % opt
1515 )
1522 )
1516 hgrc.write(b'[%s]\n%s\n' % (section, key))
1523 hgrc.write(b'[%s]\n%s\n' % (section, key))
1517
1524
1518 def fail(self, msg):
1525 def fail(self, msg):
1519 # unittest differentiates between errored and failed.
1526 # unittest differentiates between errored and failed.
1520 # Failed is denoted by AssertionError (by default at least).
1527 # Failed is denoted by AssertionError (by default at least).
1521 raise AssertionError(msg)
1528 raise AssertionError(msg)
1522
1529
1523 def _runcommand(self, cmd, env, normalizenewlines=False):
1530 def _runcommand(self, cmd, env, normalizenewlines=False):
1524 """Run command in a sub-process, capturing the output (stdout and
1531 """Run command in a sub-process, capturing the output (stdout and
1525 stderr).
1532 stderr).
1526
1533
1527 Return a tuple (exitcode, output). output is None in debug mode.
1534 Return a tuple (exitcode, output). output is None in debug mode.
1528 """
1535 """
1529 if self._debug:
1536 if self._debug:
1530 proc = subprocess.Popen(
1537 proc = subprocess.Popen(
1531 _bytes2sys(cmd),
1538 _bytes2sys(cmd),
1532 shell=True,
1539 shell=True,
1533 cwd=_bytes2sys(self._testtmp),
1540 cwd=_bytes2sys(self._testtmp),
1534 env=env,
1541 env=env,
1535 )
1542 )
1536 ret = proc.wait()
1543 ret = proc.wait()
1537 return (ret, None)
1544 return (ret, None)
1538
1545
1539 proc = Popen4(cmd, self._testtmp, self._timeout, env)
1546 proc = Popen4(cmd, self._testtmp, self._timeout, env)
1540
1547
1541 def cleanup():
1548 def cleanup():
1542 terminate(proc)
1549 terminate(proc)
1543 ret = proc.wait()
1550 ret = proc.wait()
1544 if ret == 0:
1551 if ret == 0:
1545 ret = signal.SIGTERM << 8
1552 ret = signal.SIGTERM << 8
1546 killdaemons(env['DAEMON_PIDS'])
1553 killdaemons(env['DAEMON_PIDS'])
1547 return ret
1554 return ret
1548
1555
1549 proc.tochild.close()
1556 proc.tochild.close()
1550
1557
1551 try:
1558 try:
1552 output = proc.fromchild.read()
1559 output = proc.fromchild.read()
1553 except KeyboardInterrupt:
1560 except KeyboardInterrupt:
1554 vlog('# Handling keyboard interrupt')
1561 vlog('# Handling keyboard interrupt')
1555 cleanup()
1562 cleanup()
1556 raise
1563 raise
1557
1564
1558 ret = proc.wait()
1565 ret = proc.wait()
1559 if wifexited(ret):
1566 if wifexited(ret):
1560 ret = os.WEXITSTATUS(ret)
1567 ret = os.WEXITSTATUS(ret)
1561
1568
1562 if proc.timeout:
1569 if proc.timeout:
1563 ret = 'timeout'
1570 ret = 'timeout'
1564
1571
1565 if ret:
1572 if ret:
1566 killdaemons(env['DAEMON_PIDS'])
1573 killdaemons(env['DAEMON_PIDS'])
1567
1574
1568 for s, r in self._getreplacements():
1575 for s, r in self._getreplacements():
1569 output = re.sub(s, r, output)
1576 output = re.sub(s, r, output)
1570
1577
1571 if normalizenewlines:
1578 if normalizenewlines:
1572 output = output.replace(b'\r\n', b'\n')
1579 output = output.replace(b'\r\n', b'\n')
1573
1580
1574 return ret, output.splitlines(True)
1581 return ret, output.splitlines(True)
1575
1582
1576
1583
1577 class PythonTest(Test):
1584 class PythonTest(Test):
1578 """A Python-based test."""
1585 """A Python-based test."""
1579
1586
1580 @property
1587 @property
1581 def refpath(self):
1588 def refpath(self):
1582 return os.path.join(self._testdir, b'%s.out' % self.bname)
1589 return os.path.join(self._testdir, b'%s.out' % self.bname)
1583
1590
1584 def _run(self, env):
1591 def _run(self, env):
1585 # Quote the python(3) executable for Windows
1592 # Quote the python(3) executable for Windows
1586 cmd = b'"%s" "%s"' % (PYTHON, self.path)
1593 cmd = b'"%s" "%s"' % (PYTHON, self.path)
1587 vlog("# Running", cmd.decode("utf-8"))
1594 vlog("# Running", cmd.decode("utf-8"))
1588 normalizenewlines = os.name == 'nt'
1595 normalizenewlines = os.name == 'nt'
1589 result = self._runcommand(cmd, env, normalizenewlines=normalizenewlines)
1596 result = self._runcommand(cmd, env, normalizenewlines=normalizenewlines)
1590 if self._aborted:
1597 if self._aborted:
1591 raise KeyboardInterrupt()
1598 raise KeyboardInterrupt()
1592
1599
1593 return result
1600 return result
1594
1601
1595
1602
1596 # Some glob patterns apply only in some circumstances, so the script
1603 # Some glob patterns apply only in some circumstances, so the script
1597 # might want to remove (glob) annotations that otherwise should be
1604 # might want to remove (glob) annotations that otherwise should be
1598 # retained.
1605 # retained.
1599 checkcodeglobpats = [
1606 checkcodeglobpats = [
1600 # On Windows it looks like \ doesn't require a (glob), but we know
1607 # On Windows it looks like \ doesn't require a (glob), but we know
1601 # better.
1608 # better.
1602 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
1609 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
1603 re.compile(br'^moving \S+/.*[^)]$'),
1610 re.compile(br'^moving \S+/.*[^)]$'),
1604 re.compile(br'^pulling from \$TESTTMP/.*[^)]$'),
1611 re.compile(br'^pulling from \$TESTTMP/.*[^)]$'),
1605 # Not all platforms have 127.0.0.1 as loopback (though most do),
1612 # Not all platforms have 127.0.0.1 as loopback (though most do),
1606 # so we always glob that too.
1613 # so we always glob that too.
1607 re.compile(br'.*\$LOCALIP.*$'),
1614 re.compile(br'.*\$LOCALIP.*$'),
1608 ]
1615 ]
1609
1616
1610 bchr = chr
1617 bchr = chr
1611 if PYTHON3:
1618 if PYTHON3:
1612 bchr = lambda x: bytes([x])
1619 bchr = lambda x: bytes([x])
1613
1620
1614 WARN_UNDEFINED = 1
1621 WARN_UNDEFINED = 1
1615 WARN_YES = 2
1622 WARN_YES = 2
1616 WARN_NO = 3
1623 WARN_NO = 3
1617
1624
1618 MARK_OPTIONAL = b" (?)\n"
1625 MARK_OPTIONAL = b" (?)\n"
1619
1626
1620
1627
1621 def isoptional(line):
1628 def isoptional(line):
1622 return line.endswith(MARK_OPTIONAL)
1629 return line.endswith(MARK_OPTIONAL)
1623
1630
1624
1631
1625 class TTest(Test):
1632 class TTest(Test):
1626 """A "t test" is a test backed by a .t file."""
1633 """A "t test" is a test backed by a .t file."""
1627
1634
1628 SKIPPED_PREFIX = b'skipped: '
1635 SKIPPED_PREFIX = b'skipped: '
1629 FAILED_PREFIX = b'hghave check failed: '
1636 FAILED_PREFIX = b'hghave check failed: '
1630 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
1637 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
1631
1638
1632 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
1639 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
1633 ESCAPEMAP = {bchr(i): br'\x%02x' % i for i in range(256)}
1640 ESCAPEMAP = {bchr(i): br'\x%02x' % i for i in range(256)}
1634 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
1641 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
1635
1642
1636 def __init__(self, path, *args, **kwds):
1643 def __init__(self, path, *args, **kwds):
1637 # accept an extra "case" parameter
1644 # accept an extra "case" parameter
1638 case = kwds.pop('case', [])
1645 case = kwds.pop('case', [])
1639 self._case = case
1646 self._case = case
1640 self._allcases = {x for y in parsettestcases(path) for x in y}
1647 self._allcases = {x for y in parsettestcases(path) for x in y}
1641 super(TTest, self).__init__(path, *args, **kwds)
1648 super(TTest, self).__init__(path, *args, **kwds)
1642 if case:
1649 if case:
1643 casepath = b'#'.join(case)
1650 casepath = b'#'.join(case)
1644 self.name = '%s#%s' % (self.name, _bytes2sys(casepath))
1651 self.name = '%s#%s' % (self.name, _bytes2sys(casepath))
1645 self.errpath = b'%s#%s.err' % (self.errpath[:-4], casepath)
1652 self.errpath = b'%s#%s.err' % (self.errpath[:-4], casepath)
1646 self._tmpname += b'-%s' % casepath.replace(b'#', b'-')
1653 self._tmpname += b'-%s' % casepath.replace(b'#', b'-')
1647 self._have = {}
1654 self._have = {}
1648
1655
1649 @property
1656 @property
1650 def refpath(self):
1657 def refpath(self):
1651 return os.path.join(self._testdir, self.bname)
1658 return os.path.join(self._testdir, self.bname)
1652
1659
1653 def _run(self, env):
1660 def _run(self, env):
1654 with open(self.path, 'rb') as f:
1661 with open(self.path, 'rb') as f:
1655 lines = f.readlines()
1662 lines = f.readlines()
1656
1663
1657 # .t file is both reference output and the test input, keep reference
1664 # .t file is both reference output and the test input, keep reference
1658 # output updated with the the test input. This avoids some race
1665 # output updated with the the test input. This avoids some race
1659 # conditions where the reference output does not match the actual test.
1666 # conditions where the reference output does not match the actual test.
1660 if self._refout is not None:
1667 if self._refout is not None:
1661 self._refout = lines
1668 self._refout = lines
1662
1669
1663 salt, script, after, expected = self._parsetest(lines)
1670 salt, script, after, expected = self._parsetest(lines)
1664
1671
1665 # Write out the generated script.
1672 # Write out the generated script.
1666 fname = b'%s.sh' % self._testtmp
1673 fname = b'%s.sh' % self._testtmp
1667 with open(fname, 'wb') as f:
1674 with open(fname, 'wb') as f:
1668 for l in script:
1675 for l in script:
1669 f.write(l)
1676 f.write(l)
1670
1677
1671 cmd = b'%s "%s"' % (self._shell, fname)
1678 cmd = b'%s "%s"' % (self._shell, fname)
1672 vlog("# Running", cmd.decode("utf-8"))
1679 vlog("# Running", cmd.decode("utf-8"))
1673
1680
1674 exitcode, output = self._runcommand(cmd, env)
1681 exitcode, output = self._runcommand(cmd, env)
1675
1682
1676 if self._aborted:
1683 if self._aborted:
1677 raise KeyboardInterrupt()
1684 raise KeyboardInterrupt()
1678
1685
1679 # Do not merge output if skipped. Return hghave message instead.
1686 # Do not merge output if skipped. Return hghave message instead.
1680 # Similarly, with --debug, output is None.
1687 # Similarly, with --debug, output is None.
1681 if exitcode == self.SKIPPED_STATUS or output is None:
1688 if exitcode == self.SKIPPED_STATUS or output is None:
1682 return exitcode, output
1689 return exitcode, output
1683
1690
1684 return self._processoutput(exitcode, output, salt, after, expected)
1691 return self._processoutput(exitcode, output, salt, after, expected)
1685
1692
1686 def _hghave(self, reqs):
1693 def _hghave(self, reqs):
1687 allreqs = b' '.join(reqs)
1694 allreqs = b' '.join(reqs)
1688
1695
1689 self._detectslow(reqs)
1696 self._detectslow(reqs)
1690
1697
1691 if allreqs in self._have:
1698 if allreqs in self._have:
1692 return self._have.get(allreqs)
1699 return self._have.get(allreqs)
1693
1700
1694 # TODO do something smarter when all other uses of hghave are gone.
1701 # TODO do something smarter when all other uses of hghave are gone.
1695 runtestdir = osenvironb[b'RUNTESTDIR']
1702 runtestdir = osenvironb[b'RUNTESTDIR']
1696 tdir = runtestdir.replace(b'\\', b'/')
1703 tdir = runtestdir.replace(b'\\', b'/')
1697 proc = Popen4(
1704 proc = Popen4(
1698 b'%s -c "%s/hghave %s"' % (self._shell, tdir, allreqs),
1705 b'%s -c "%s/hghave %s"' % (self._shell, tdir, allreqs),
1699 self._testtmp,
1706 self._testtmp,
1700 0,
1707 0,
1701 self._getenv(),
1708 self._getenv(),
1702 )
1709 )
1703 stdout, stderr = proc.communicate()
1710 stdout, stderr = proc.communicate()
1704 ret = proc.wait()
1711 ret = proc.wait()
1705 if wifexited(ret):
1712 if wifexited(ret):
1706 ret = os.WEXITSTATUS(ret)
1713 ret = os.WEXITSTATUS(ret)
1707 if ret == 2:
1714 if ret == 2:
1708 print(stdout.decode('utf-8'))
1715 print(stdout.decode('utf-8'))
1709 sys.exit(1)
1716 sys.exit(1)
1710
1717
1711 if ret != 0:
1718 if ret != 0:
1712 self._have[allreqs] = (False, stdout)
1719 self._have[allreqs] = (False, stdout)
1713 return False, stdout
1720 return False, stdout
1714
1721
1715 self._have[allreqs] = (True, None)
1722 self._have[allreqs] = (True, None)
1716 return True, None
1723 return True, None
1717
1724
1718 def _detectslow(self, reqs):
1725 def _detectslow(self, reqs):
1719 """update the timeout of slow test when appropriate"""
1726 """update the timeout of slow test when appropriate"""
1720 if b'slow' in reqs:
1727 if b'slow' in reqs:
1721 self._timeout = self._slowtimeout
1728 self._timeout = self._slowtimeout
1722
1729
1723 def _iftest(self, args):
1730 def _iftest(self, args):
1724 # implements "#if"
1731 # implements "#if"
1725 reqs = []
1732 reqs = []
1726 for arg in args:
1733 for arg in args:
1727 if arg.startswith(b'no-') and arg[3:] in self._allcases:
1734 if arg.startswith(b'no-') and arg[3:] in self._allcases:
1728 if arg[3:] in self._case:
1735 if arg[3:] in self._case:
1729 return False
1736 return False
1730 elif arg in self._allcases:
1737 elif arg in self._allcases:
1731 if arg not in self._case:
1738 if arg not in self._case:
1732 return False
1739 return False
1733 else:
1740 else:
1734 reqs.append(arg)
1741 reqs.append(arg)
1735 self._detectslow(reqs)
1742 self._detectslow(reqs)
1736 return self._hghave(reqs)[0]
1743 return self._hghave(reqs)[0]
1737
1744
1738 def _parsetest(self, lines):
1745 def _parsetest(self, lines):
1739 # We generate a shell script which outputs unique markers to line
1746 # We generate a shell script which outputs unique markers to line
1740 # up script results with our source. These markers include input
1747 # up script results with our source. These markers include input
1741 # line number and the last return code.
1748 # line number and the last return code.
1742 salt = b"SALT%d" % time.time()
1749 salt = b"SALT%d" % time.time()
1743
1750
1744 def addsalt(line, inpython):
1751 def addsalt(line, inpython):
1745 if inpython:
1752 if inpython:
1746 script.append(b'%s %d 0\n' % (salt, line))
1753 script.append(b'%s %d 0\n' % (salt, line))
1747 else:
1754 else:
1748 script.append(b'echo %s %d $?\n' % (salt, line))
1755 script.append(b'echo %s %d $?\n' % (salt, line))
1749
1756
1750 activetrace = []
1757 activetrace = []
1751 session = str(uuid.uuid4())
1758 session = str(uuid.uuid4())
1752 if PYTHON3:
1759 if PYTHON3:
1753 session = session.encode('ascii')
1760 session = session.encode('ascii')
1754 hgcatapult = os.getenv('HGTESTCATAPULTSERVERPIPE') or os.getenv(
1761 hgcatapult = os.getenv('HGTESTCATAPULTSERVERPIPE') or os.getenv(
1755 'HGCATAPULTSERVERPIPE'
1762 'HGCATAPULTSERVERPIPE'
1756 )
1763 )
1757
1764
1758 def toggletrace(cmd=None):
1765 def toggletrace(cmd=None):
1759 if not hgcatapult or hgcatapult == os.devnull:
1766 if not hgcatapult or hgcatapult == os.devnull:
1760 return
1767 return
1761
1768
1762 if activetrace:
1769 if activetrace:
1763 script.append(
1770 script.append(
1764 b'echo END %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n'
1771 b'echo END %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n'
1765 % (session, activetrace[0])
1772 % (session, activetrace[0])
1766 )
1773 )
1767 if cmd is None:
1774 if cmd is None:
1768 return
1775 return
1769
1776
1770 if isinstance(cmd, str):
1777 if isinstance(cmd, str):
1771 quoted = shellquote(cmd.strip())
1778 quoted = shellquote(cmd.strip())
1772 else:
1779 else:
1773 quoted = shellquote(cmd.strip().decode('utf8')).encode('utf8')
1780 quoted = shellquote(cmd.strip().decode('utf8')).encode('utf8')
1774 quoted = quoted.replace(b'\\', b'\\\\')
1781 quoted = quoted.replace(b'\\', b'\\\\')
1775 script.append(
1782 script.append(
1776 b'echo START %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n'
1783 b'echo START %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n'
1777 % (session, quoted)
1784 % (session, quoted)
1778 )
1785 )
1779 activetrace[0:] = [quoted]
1786 activetrace[0:] = [quoted]
1780
1787
1781 script = []
1788 script = []
1782
1789
1783 # After we run the shell script, we re-unify the script output
1790 # After we run the shell script, we re-unify the script output
1784 # with non-active parts of the source, with synchronization by our
1791 # with non-active parts of the source, with synchronization by our
1785 # SALT line number markers. The after table contains the non-active
1792 # SALT line number markers. The after table contains the non-active
1786 # components, ordered by line number.
1793 # components, ordered by line number.
1787 after = {}
1794 after = {}
1788
1795
1789 # Expected shell script output.
1796 # Expected shell script output.
1790 expected = {}
1797 expected = {}
1791
1798
1792 pos = prepos = -1
1799 pos = prepos = -1
1793
1800
1794 # True or False when in a true or false conditional section
1801 # True or False when in a true or false conditional section
1795 skipping = None
1802 skipping = None
1796
1803
1797 # We keep track of whether or not we're in a Python block so we
1804 # We keep track of whether or not we're in a Python block so we
1798 # can generate the surrounding doctest magic.
1805 # can generate the surrounding doctest magic.
1799 inpython = False
1806 inpython = False
1800
1807
1801 if self._debug:
1808 if self._debug:
1802 script.append(b'set -x\n')
1809 script.append(b'set -x\n')
1803 if self._hgcommand != b'hg':
1810 if self._hgcommand != b'hg':
1804 script.append(b'alias hg="%s"\n' % self._hgcommand)
1811 script.append(b'alias hg="%s"\n' % self._hgcommand)
1805 if os.getenv('MSYSTEM'):
1812 if os.getenv('MSYSTEM'):
1806 script.append(b'alias pwd="pwd -W"\n')
1813 script.append(b'alias pwd="pwd -W"\n')
1807
1814
1808 if hgcatapult and hgcatapult != os.devnull:
1815 if hgcatapult and hgcatapult != os.devnull:
1809 if PYTHON3:
1816 if PYTHON3:
1810 hgcatapult = hgcatapult.encode('utf8')
1817 hgcatapult = hgcatapult.encode('utf8')
1811 cataname = self.name.encode('utf8')
1818 cataname = self.name.encode('utf8')
1812 else:
1819 else:
1813 cataname = self.name
1820 cataname = self.name
1814
1821
1815 # Kludge: use a while loop to keep the pipe from getting
1822 # Kludge: use a while loop to keep the pipe from getting
1816 # closed by our echo commands. The still-running file gets
1823 # closed by our echo commands. The still-running file gets
1817 # reaped at the end of the script, which causes the while
1824 # reaped at the end of the script, which causes the while
1818 # loop to exit and closes the pipe. Sigh.
1825 # loop to exit and closes the pipe. Sigh.
1819 script.append(
1826 script.append(
1820 b'rtendtracing() {\n'
1827 b'rtendtracing() {\n'
1821 b' echo END %(session)s %(name)s >> %(catapult)s\n'
1828 b' echo END %(session)s %(name)s >> %(catapult)s\n'
1822 b' rm -f "$TESTTMP/.still-running"\n'
1829 b' rm -f "$TESTTMP/.still-running"\n'
1823 b'}\n'
1830 b'}\n'
1824 b'trap "rtendtracing" 0\n'
1831 b'trap "rtendtracing" 0\n'
1825 b'touch "$TESTTMP/.still-running"\n'
1832 b'touch "$TESTTMP/.still-running"\n'
1826 b'while [ -f "$TESTTMP/.still-running" ]; do sleep 1; done '
1833 b'while [ -f "$TESTTMP/.still-running" ]; do sleep 1; done '
1827 b'> %(catapult)s &\n'
1834 b'> %(catapult)s &\n'
1828 b'HGCATAPULTSESSION=%(session)s ; export HGCATAPULTSESSION\n'
1835 b'HGCATAPULTSESSION=%(session)s ; export HGCATAPULTSESSION\n'
1829 b'echo START %(session)s %(name)s >> %(catapult)s\n'
1836 b'echo START %(session)s %(name)s >> %(catapult)s\n'
1830 % {
1837 % {
1831 b'name': cataname,
1838 b'name': cataname,
1832 b'session': session,
1839 b'session': session,
1833 b'catapult': hgcatapult,
1840 b'catapult': hgcatapult,
1834 }
1841 }
1835 )
1842 )
1836
1843
1837 if self._case:
1844 if self._case:
1838 casestr = b'#'.join(self._case)
1845 casestr = b'#'.join(self._case)
1839 if isinstance(casestr, str):
1846 if isinstance(casestr, str):
1840 quoted = shellquote(casestr)
1847 quoted = shellquote(casestr)
1841 else:
1848 else:
1842 quoted = shellquote(casestr.decode('utf8')).encode('utf8')
1849 quoted = shellquote(casestr.decode('utf8')).encode('utf8')
1843 script.append(b'TESTCASE=%s\n' % quoted)
1850 script.append(b'TESTCASE=%s\n' % quoted)
1844 script.append(b'export TESTCASE\n')
1851 script.append(b'export TESTCASE\n')
1845
1852
1846 n = 0
1853 n = 0
1847 for n, l in enumerate(lines):
1854 for n, l in enumerate(lines):
1848 if not l.endswith(b'\n'):
1855 if not l.endswith(b'\n'):
1849 l += b'\n'
1856 l += b'\n'
1850 if l.startswith(b'#require'):
1857 if l.startswith(b'#require'):
1851 lsplit = l.split()
1858 lsplit = l.split()
1852 if len(lsplit) < 2 or lsplit[0] != b'#require':
1859 if len(lsplit) < 2 or lsplit[0] != b'#require':
1853 after.setdefault(pos, []).append(
1860 after.setdefault(pos, []).append(
1854 b' !!! invalid #require\n'
1861 b' !!! invalid #require\n'
1855 )
1862 )
1856 if not skipping:
1863 if not skipping:
1857 haveresult, message = self._hghave(lsplit[1:])
1864 haveresult, message = self._hghave(lsplit[1:])
1858 if not haveresult:
1865 if not haveresult:
1859 script = [b'echo "%s"\nexit 80\n' % message]
1866 script = [b'echo "%s"\nexit 80\n' % message]
1860 break
1867 break
1861 after.setdefault(pos, []).append(l)
1868 after.setdefault(pos, []).append(l)
1862 elif l.startswith(b'#if'):
1869 elif l.startswith(b'#if'):
1863 lsplit = l.split()
1870 lsplit = l.split()
1864 if len(lsplit) < 2 or lsplit[0] != b'#if':
1871 if len(lsplit) < 2 or lsplit[0] != b'#if':
1865 after.setdefault(pos, []).append(b' !!! invalid #if\n')
1872 after.setdefault(pos, []).append(b' !!! invalid #if\n')
1866 if skipping is not None:
1873 if skipping is not None:
1867 after.setdefault(pos, []).append(b' !!! nested #if\n')
1874 after.setdefault(pos, []).append(b' !!! nested #if\n')
1868 skipping = not self._iftest(lsplit[1:])
1875 skipping = not self._iftest(lsplit[1:])
1869 after.setdefault(pos, []).append(l)
1876 after.setdefault(pos, []).append(l)
1870 elif l.startswith(b'#else'):
1877 elif l.startswith(b'#else'):
1871 if skipping is None:
1878 if skipping is None:
1872 after.setdefault(pos, []).append(b' !!! missing #if\n')
1879 after.setdefault(pos, []).append(b' !!! missing #if\n')
1873 skipping = not skipping
1880 skipping = not skipping
1874 after.setdefault(pos, []).append(l)
1881 after.setdefault(pos, []).append(l)
1875 elif l.startswith(b'#endif'):
1882 elif l.startswith(b'#endif'):
1876 if skipping is None:
1883 if skipping is None:
1877 after.setdefault(pos, []).append(b' !!! missing #if\n')
1884 after.setdefault(pos, []).append(b' !!! missing #if\n')
1878 skipping = None
1885 skipping = None
1879 after.setdefault(pos, []).append(l)
1886 after.setdefault(pos, []).append(l)
1880 elif skipping:
1887 elif skipping:
1881 after.setdefault(pos, []).append(l)
1888 after.setdefault(pos, []).append(l)
1882 elif l.startswith(b' >>> '): # python inlines
1889 elif l.startswith(b' >>> '): # python inlines
1883 after.setdefault(pos, []).append(l)
1890 after.setdefault(pos, []).append(l)
1884 prepos = pos
1891 prepos = pos
1885 pos = n
1892 pos = n
1886 if not inpython:
1893 if not inpython:
1887 # We've just entered a Python block. Add the header.
1894 # We've just entered a Python block. Add the header.
1888 inpython = True
1895 inpython = True
1889 addsalt(prepos, False) # Make sure we report the exit code.
1896 addsalt(prepos, False) # Make sure we report the exit code.
1890 script.append(b'"%s" -m heredoctest <<EOF\n' % PYTHON)
1897 script.append(b'"%s" -m heredoctest <<EOF\n' % PYTHON)
1891 addsalt(n, True)
1898 addsalt(n, True)
1892 script.append(l[2:])
1899 script.append(l[2:])
1893 elif l.startswith(b' ... '): # python inlines
1900 elif l.startswith(b' ... '): # python inlines
1894 after.setdefault(prepos, []).append(l)
1901 after.setdefault(prepos, []).append(l)
1895 script.append(l[2:])
1902 script.append(l[2:])
1896 elif l.startswith(b' $ '): # commands
1903 elif l.startswith(b' $ '): # commands
1897 if inpython:
1904 if inpython:
1898 script.append(b'EOF\n')
1905 script.append(b'EOF\n')
1899 inpython = False
1906 inpython = False
1900 after.setdefault(pos, []).append(l)
1907 after.setdefault(pos, []).append(l)
1901 prepos = pos
1908 prepos = pos
1902 pos = n
1909 pos = n
1903 addsalt(n, False)
1910 addsalt(n, False)
1904 rawcmd = l[4:]
1911 rawcmd = l[4:]
1905 cmd = rawcmd.split()
1912 cmd = rawcmd.split()
1906 toggletrace(rawcmd)
1913 toggletrace(rawcmd)
1907 if len(cmd) == 2 and cmd[0] == b'cd':
1914 if len(cmd) == 2 and cmd[0] == b'cd':
1908 rawcmd = b'cd %s || exit 1\n' % cmd[1]
1915 rawcmd = b'cd %s || exit 1\n' % cmd[1]
1909 script.append(rawcmd)
1916 script.append(rawcmd)
1910 elif l.startswith(b' > '): # continuations
1917 elif l.startswith(b' > '): # continuations
1911 after.setdefault(prepos, []).append(l)
1918 after.setdefault(prepos, []).append(l)
1912 script.append(l[4:])
1919 script.append(l[4:])
1913 elif l.startswith(b' '): # results
1920 elif l.startswith(b' '): # results
1914 # Queue up a list of expected results.
1921 # Queue up a list of expected results.
1915 expected.setdefault(pos, []).append(l[2:])
1922 expected.setdefault(pos, []).append(l[2:])
1916 else:
1923 else:
1917 if inpython:
1924 if inpython:
1918 script.append(b'EOF\n')
1925 script.append(b'EOF\n')
1919 inpython = False
1926 inpython = False
1920 # Non-command/result. Queue up for merged output.
1927 # Non-command/result. Queue up for merged output.
1921 after.setdefault(pos, []).append(l)
1928 after.setdefault(pos, []).append(l)
1922
1929
1923 if inpython:
1930 if inpython:
1924 script.append(b'EOF\n')
1931 script.append(b'EOF\n')
1925 if skipping is not None:
1932 if skipping is not None:
1926 after.setdefault(pos, []).append(b' !!! missing #endif\n')
1933 after.setdefault(pos, []).append(b' !!! missing #endif\n')
1927 addsalt(n + 1, False)
1934 addsalt(n + 1, False)
1928 # Need to end any current per-command trace
1935 # Need to end any current per-command trace
1929 if activetrace:
1936 if activetrace:
1930 toggletrace()
1937 toggletrace()
1931 return salt, script, after, expected
1938 return salt, script, after, expected
1932
1939
1933 def _processoutput(self, exitcode, output, salt, after, expected):
1940 def _processoutput(self, exitcode, output, salt, after, expected):
1934 # Merge the script output back into a unified test.
1941 # Merge the script output back into a unified test.
1935 warnonly = WARN_UNDEFINED # 1: not yet; 2: yes; 3: for sure not
1942 warnonly = WARN_UNDEFINED # 1: not yet; 2: yes; 3: for sure not
1936 if exitcode != 0:
1943 if exitcode != 0:
1937 warnonly = WARN_NO
1944 warnonly = WARN_NO
1938
1945
1939 pos = -1
1946 pos = -1
1940 postout = []
1947 postout = []
1941 for out_rawline in output:
1948 for out_rawline in output:
1942 out_line, cmd_line = out_rawline, None
1949 out_line, cmd_line = out_rawline, None
1943 if salt in out_rawline:
1950 if salt in out_rawline:
1944 out_line, cmd_line = out_rawline.split(salt, 1)
1951 out_line, cmd_line = out_rawline.split(salt, 1)
1945
1952
1946 pos, postout, warnonly = self._process_out_line(
1953 pos, postout, warnonly = self._process_out_line(
1947 out_line, pos, postout, expected, warnonly
1954 out_line, pos, postout, expected, warnonly
1948 )
1955 )
1949 pos, postout = self._process_cmd_line(cmd_line, pos, postout, after)
1956 pos, postout = self._process_cmd_line(cmd_line, pos, postout, after)
1950
1957
1951 if pos in after:
1958 if pos in after:
1952 postout += after.pop(pos)
1959 postout += after.pop(pos)
1953
1960
1954 if warnonly == WARN_YES:
1961 if warnonly == WARN_YES:
1955 exitcode = False # Set exitcode to warned.
1962 exitcode = False # Set exitcode to warned.
1956
1963
1957 return exitcode, postout
1964 return exitcode, postout
1958
1965
1959 def _process_out_line(self, out_line, pos, postout, expected, warnonly):
1966 def _process_out_line(self, out_line, pos, postout, expected, warnonly):
1960 while out_line:
1967 while out_line:
1961 if not out_line.endswith(b'\n'):
1968 if not out_line.endswith(b'\n'):
1962 out_line += b' (no-eol)\n'
1969 out_line += b' (no-eol)\n'
1963
1970
1964 # Find the expected output at the current position.
1971 # Find the expected output at the current position.
1965 els = [None]
1972 els = [None]
1966 if expected.get(pos, None):
1973 if expected.get(pos, None):
1967 els = expected[pos]
1974 els = expected[pos]
1968
1975
1969 optional = []
1976 optional = []
1970 for i, el in enumerate(els):
1977 for i, el in enumerate(els):
1971 r = False
1978 r = False
1972 if el:
1979 if el:
1973 r, exact = self.linematch(el, out_line)
1980 r, exact = self.linematch(el, out_line)
1974 if isinstance(r, str):
1981 if isinstance(r, str):
1975 if r == '-glob':
1982 if r == '-glob':
1976 out_line = ''.join(el.rsplit(' (glob)', 1))
1983 out_line = ''.join(el.rsplit(' (glob)', 1))
1977 r = '' # Warn only this line.
1984 r = '' # Warn only this line.
1978 elif r == "retry":
1985 elif r == "retry":
1979 postout.append(b' ' + el)
1986 postout.append(b' ' + el)
1980 else:
1987 else:
1981 log('\ninfo, unknown linematch result: %r\n' % r)
1988 log('\ninfo, unknown linematch result: %r\n' % r)
1982 r = False
1989 r = False
1983 if r:
1990 if r:
1984 els.pop(i)
1991 els.pop(i)
1985 break
1992 break
1986 if el:
1993 if el:
1987 if isoptional(el):
1994 if isoptional(el):
1988 optional.append(i)
1995 optional.append(i)
1989 else:
1996 else:
1990 m = optline.match(el)
1997 m = optline.match(el)
1991 if m:
1998 if m:
1992 conditions = [c for c in m.group(2).split(b' ')]
1999 conditions = [c for c in m.group(2).split(b' ')]
1993
2000
1994 if not self._iftest(conditions):
2001 if not self._iftest(conditions):
1995 optional.append(i)
2002 optional.append(i)
1996 if exact:
2003 if exact:
1997 # Don't allow line to be matches against a later
2004 # Don't allow line to be matches against a later
1998 # line in the output
2005 # line in the output
1999 els.pop(i)
2006 els.pop(i)
2000 break
2007 break
2001
2008
2002 if r:
2009 if r:
2003 if r == "retry":
2010 if r == "retry":
2004 continue
2011 continue
2005 # clean up any optional leftovers
2012 # clean up any optional leftovers
2006 for i in optional:
2013 for i in optional:
2007 postout.append(b' ' + els[i])
2014 postout.append(b' ' + els[i])
2008 for i in reversed(optional):
2015 for i in reversed(optional):
2009 del els[i]
2016 del els[i]
2010 postout.append(b' ' + el)
2017 postout.append(b' ' + el)
2011 else:
2018 else:
2012 if self.NEEDESCAPE(out_line):
2019 if self.NEEDESCAPE(out_line):
2013 out_line = TTest._stringescape(
2020 out_line = TTest._stringescape(
2014 b'%s (esc)\n' % out_line.rstrip(b'\n')
2021 b'%s (esc)\n' % out_line.rstrip(b'\n')
2015 )
2022 )
2016 postout.append(b' ' + out_line) # Let diff deal with it.
2023 postout.append(b' ' + out_line) # Let diff deal with it.
2017 if r != '': # If line failed.
2024 if r != '': # If line failed.
2018 warnonly = WARN_NO
2025 warnonly = WARN_NO
2019 elif warnonly == WARN_UNDEFINED:
2026 elif warnonly == WARN_UNDEFINED:
2020 warnonly = WARN_YES
2027 warnonly = WARN_YES
2021 break
2028 break
2022 else:
2029 else:
2023 # clean up any optional leftovers
2030 # clean up any optional leftovers
2024 while expected.get(pos, None):
2031 while expected.get(pos, None):
2025 el = expected[pos].pop(0)
2032 el = expected[pos].pop(0)
2026 if el:
2033 if el:
2027 if not isoptional(el):
2034 if not isoptional(el):
2028 m = optline.match(el)
2035 m = optline.match(el)
2029 if m:
2036 if m:
2030 conditions = [c for c in m.group(2).split(b' ')]
2037 conditions = [c for c in m.group(2).split(b' ')]
2031
2038
2032 if self._iftest(conditions):
2039 if self._iftest(conditions):
2033 # Don't append as optional line
2040 # Don't append as optional line
2034 continue
2041 continue
2035 else:
2042 else:
2036 continue
2043 continue
2037 postout.append(b' ' + el)
2044 postout.append(b' ' + el)
2038 return pos, postout, warnonly
2045 return pos, postout, warnonly
2039
2046
2040 def _process_cmd_line(self, cmd_line, pos, postout, after):
2047 def _process_cmd_line(self, cmd_line, pos, postout, after):
2041 """process a "command" part of a line from unified test output"""
2048 """process a "command" part of a line from unified test output"""
2042 if cmd_line:
2049 if cmd_line:
2043 # Add on last return code.
2050 # Add on last return code.
2044 ret = int(cmd_line.split()[1])
2051 ret = int(cmd_line.split()[1])
2045 if ret != 0:
2052 if ret != 0:
2046 postout.append(b' [%d]\n' % ret)
2053 postout.append(b' [%d]\n' % ret)
2047 if pos in after:
2054 if pos in after:
2048 # Merge in non-active test bits.
2055 # Merge in non-active test bits.
2049 postout += after.pop(pos)
2056 postout += after.pop(pos)
2050 pos = int(cmd_line.split()[0])
2057 pos = int(cmd_line.split()[0])
2051 return pos, postout
2058 return pos, postout
2052
2059
2053 @staticmethod
2060 @staticmethod
2054 def rematch(el, l):
2061 def rematch(el, l):
2055 try:
2062 try:
2056 # parse any flags at the beginning of the regex. Only 'i' is
2063 # parse any flags at the beginning of the regex. Only 'i' is
2057 # supported right now, but this should be easy to extend.
2064 # supported right now, but this should be easy to extend.
2058 flags, el = re.match(br'^(\(\?i\))?(.*)', el).groups()[0:2]
2065 flags, el = re.match(br'^(\(\?i\))?(.*)', el).groups()[0:2]
2059 flags = flags or b''
2066 flags = flags or b''
2060 el = flags + b'(?:' + el + b')'
2067 el = flags + b'(?:' + el + b')'
2061 # use \Z to ensure that the regex matches to the end of the string
2068 # use \Z to ensure that the regex matches to the end of the string
2062 if os.name == 'nt':
2069 if os.name == 'nt':
2063 return re.match(el + br'\r?\n\Z', l)
2070 return re.match(el + br'\r?\n\Z', l)
2064 return re.match(el + br'\n\Z', l)
2071 return re.match(el + br'\n\Z', l)
2065 except re.error:
2072 except re.error:
2066 # el is an invalid regex
2073 # el is an invalid regex
2067 return False
2074 return False
2068
2075
2069 @staticmethod
2076 @staticmethod
2070 def globmatch(el, l):
2077 def globmatch(el, l):
2071 # The only supported special characters are * and ? plus / which also
2078 # The only supported special characters are * and ? plus / which also
2072 # matches \ on windows. Escaping of these characters is supported.
2079 # matches \ on windows. Escaping of these characters is supported.
2073 if el + b'\n' == l:
2080 if el + b'\n' == l:
2074 if os.altsep:
2081 if os.altsep:
2075 # matching on "/" is not needed for this line
2082 # matching on "/" is not needed for this line
2076 for pat in checkcodeglobpats:
2083 for pat in checkcodeglobpats:
2077 if pat.match(el):
2084 if pat.match(el):
2078 return True
2085 return True
2079 return b'-glob'
2086 return b'-glob'
2080 return True
2087 return True
2081 el = el.replace(b'$LOCALIP', b'*')
2088 el = el.replace(b'$LOCALIP', b'*')
2082 i, n = 0, len(el)
2089 i, n = 0, len(el)
2083 res = b''
2090 res = b''
2084 while i < n:
2091 while i < n:
2085 c = el[i : i + 1]
2092 c = el[i : i + 1]
2086 i += 1
2093 i += 1
2087 if c == b'\\' and i < n and el[i : i + 1] in b'*?\\/':
2094 if c == b'\\' and i < n and el[i : i + 1] in b'*?\\/':
2088 res += el[i - 1 : i + 1]
2095 res += el[i - 1 : i + 1]
2089 i += 1
2096 i += 1
2090 elif c == b'*':
2097 elif c == b'*':
2091 res += b'.*'
2098 res += b'.*'
2092 elif c == b'?':
2099 elif c == b'?':
2093 res += b'.'
2100 res += b'.'
2094 elif c == b'/' and os.altsep:
2101 elif c == b'/' and os.altsep:
2095 res += b'[/\\\\]'
2102 res += b'[/\\\\]'
2096 else:
2103 else:
2097 res += re.escape(c)
2104 res += re.escape(c)
2098 return TTest.rematch(res, l)
2105 return TTest.rematch(res, l)
2099
2106
2100 def linematch(self, el, l):
2107 def linematch(self, el, l):
2101 if el == l: # perfect match (fast)
2108 if el == l: # perfect match (fast)
2102 return True, True
2109 return True, True
2103 retry = False
2110 retry = False
2104 if isoptional(el):
2111 if isoptional(el):
2105 retry = "retry"
2112 retry = "retry"
2106 el = el[: -len(MARK_OPTIONAL)] + b"\n"
2113 el = el[: -len(MARK_OPTIONAL)] + b"\n"
2107 else:
2114 else:
2108 m = optline.match(el)
2115 m = optline.match(el)
2109 if m:
2116 if m:
2110 conditions = [c for c in m.group(2).split(b' ')]
2117 conditions = [c for c in m.group(2).split(b' ')]
2111
2118
2112 el = m.group(1) + b"\n"
2119 el = m.group(1) + b"\n"
2113 if not self._iftest(conditions):
2120 if not self._iftest(conditions):
2114 # listed feature missing, should not match
2121 # listed feature missing, should not match
2115 return "retry", False
2122 return "retry", False
2116
2123
2117 if el.endswith(b" (esc)\n"):
2124 if el.endswith(b" (esc)\n"):
2118 if PYTHON3:
2125 if PYTHON3:
2119 el = el[:-7].decode('unicode_escape') + '\n'
2126 el = el[:-7].decode('unicode_escape') + '\n'
2120 el = el.encode('latin-1')
2127 el = el.encode('latin-1')
2121 else:
2128 else:
2122 el = el[:-7].decode('string-escape') + '\n'
2129 el = el[:-7].decode('string-escape') + '\n'
2123 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
2130 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
2124 return True, True
2131 return True, True
2125 if el.endswith(b" (re)\n"):
2132 if el.endswith(b" (re)\n"):
2126 return (TTest.rematch(el[:-6], l) or retry), False
2133 return (TTest.rematch(el[:-6], l) or retry), False
2127 if el.endswith(b" (glob)\n"):
2134 if el.endswith(b" (glob)\n"):
2128 # ignore '(glob)' added to l by 'replacements'
2135 # ignore '(glob)' added to l by 'replacements'
2129 if l.endswith(b" (glob)\n"):
2136 if l.endswith(b" (glob)\n"):
2130 l = l[:-8] + b"\n"
2137 l = l[:-8] + b"\n"
2131 return (TTest.globmatch(el[:-8], l) or retry), False
2138 return (TTest.globmatch(el[:-8], l) or retry), False
2132 if os.altsep:
2139 if os.altsep:
2133 _l = l.replace(b'\\', b'/')
2140 _l = l.replace(b'\\', b'/')
2134 if el == _l or os.name == 'nt' and el[:-1] + b'\r\n' == _l:
2141 if el == _l or os.name == 'nt' and el[:-1] + b'\r\n' == _l:
2135 return True, True
2142 return True, True
2136 return retry, True
2143 return retry, True
2137
2144
2138 @staticmethod
2145 @staticmethod
2139 def parsehghaveoutput(lines):
2146 def parsehghaveoutput(lines):
2140 """Parse hghave log lines.
2147 """Parse hghave log lines.
2141
2148
2142 Return tuple of lists (missing, failed):
2149 Return tuple of lists (missing, failed):
2143 * the missing/unknown features
2150 * the missing/unknown features
2144 * the features for which existence check failed"""
2151 * the features for which existence check failed"""
2145 missing = []
2152 missing = []
2146 failed = []
2153 failed = []
2147 for line in lines:
2154 for line in lines:
2148 if line.startswith(TTest.SKIPPED_PREFIX):
2155 if line.startswith(TTest.SKIPPED_PREFIX):
2149 line = line.splitlines()[0]
2156 line = line.splitlines()[0]
2150 missing.append(_bytes2sys(line[len(TTest.SKIPPED_PREFIX) :]))
2157 missing.append(_bytes2sys(line[len(TTest.SKIPPED_PREFIX) :]))
2151 elif line.startswith(TTest.FAILED_PREFIX):
2158 elif line.startswith(TTest.FAILED_PREFIX):
2152 line = line.splitlines()[0]
2159 line = line.splitlines()[0]
2153 failed.append(_bytes2sys(line[len(TTest.FAILED_PREFIX) :]))
2160 failed.append(_bytes2sys(line[len(TTest.FAILED_PREFIX) :]))
2154
2161
2155 return missing, failed
2162 return missing, failed
2156
2163
2157 @staticmethod
2164 @staticmethod
2158 def _escapef(m):
2165 def _escapef(m):
2159 return TTest.ESCAPEMAP[m.group(0)]
2166 return TTest.ESCAPEMAP[m.group(0)]
2160
2167
2161 @staticmethod
2168 @staticmethod
2162 def _stringescape(s):
2169 def _stringescape(s):
2163 return TTest.ESCAPESUB(TTest._escapef, s)
2170 return TTest.ESCAPESUB(TTest._escapef, s)
2164
2171
2165
2172
2166 iolock = threading.RLock()
2173 iolock = threading.RLock()
2167 firstlock = threading.RLock()
2174 firstlock = threading.RLock()
2168 firsterror = False
2175 firsterror = False
2169
2176
2170
2177
2171 class TestResult(unittest._TextTestResult):
2178 class TestResult(unittest._TextTestResult):
2172 """Holds results when executing via unittest."""
2179 """Holds results when executing via unittest."""
2173
2180
2174 # Don't worry too much about accessing the non-public _TextTestResult.
2181 # Don't worry too much about accessing the non-public _TextTestResult.
2175 # It is relatively common in Python testing tools.
2182 # It is relatively common in Python testing tools.
2176 def __init__(self, options, *args, **kwargs):
2183 def __init__(self, options, *args, **kwargs):
2177 super(TestResult, self).__init__(*args, **kwargs)
2184 super(TestResult, self).__init__(*args, **kwargs)
2178
2185
2179 self._options = options
2186 self._options = options
2180
2187
2181 # unittest.TestResult didn't have skipped until 2.7. We need to
2188 # unittest.TestResult didn't have skipped until 2.7. We need to
2182 # polyfill it.
2189 # polyfill it.
2183 self.skipped = []
2190 self.skipped = []
2184
2191
2185 # We have a custom "ignored" result that isn't present in any Python
2192 # We have a custom "ignored" result that isn't present in any Python
2186 # unittest implementation. It is very similar to skipped. It may make
2193 # unittest implementation. It is very similar to skipped. It may make
2187 # sense to map it into skip some day.
2194 # sense to map it into skip some day.
2188 self.ignored = []
2195 self.ignored = []
2189
2196
2190 self.times = []
2197 self.times = []
2191 self._firststarttime = None
2198 self._firststarttime = None
2192 # Data stored for the benefit of generating xunit reports.
2199 # Data stored for the benefit of generating xunit reports.
2193 self.successes = []
2200 self.successes = []
2194 self.faildata = {}
2201 self.faildata = {}
2195
2202
2196 if options.color == 'auto':
2203 if options.color == 'auto':
2197 self.color = pygmentspresent and self.stream.isatty()
2204 self.color = pygmentspresent and self.stream.isatty()
2198 elif options.color == 'never':
2205 elif options.color == 'never':
2199 self.color = False
2206 self.color = False
2200 else: # 'always', for testing purposes
2207 else: # 'always', for testing purposes
2201 self.color = pygmentspresent
2208 self.color = pygmentspresent
2202
2209
2203 def onStart(self, test):
2210 def onStart(self, test):
2204 """Can be overriden by custom TestResult"""
2211 """Can be overriden by custom TestResult"""
2205
2212
2206 def onEnd(self):
2213 def onEnd(self):
2207 """Can be overriden by custom TestResult"""
2214 """Can be overriden by custom TestResult"""
2208
2215
2209 def addFailure(self, test, reason):
2216 def addFailure(self, test, reason):
2210 self.failures.append((test, reason))
2217 self.failures.append((test, reason))
2211
2218
2212 if self._options.first:
2219 if self._options.first:
2213 self.stop()
2220 self.stop()
2214 else:
2221 else:
2215 with iolock:
2222 with iolock:
2216 if reason == "timed out":
2223 if reason == "timed out":
2217 self.stream.write('t')
2224 self.stream.write('t')
2218 else:
2225 else:
2219 if not self._options.nodiff:
2226 if not self._options.nodiff:
2220 self.stream.write('\n')
2227 self.stream.write('\n')
2221 # Exclude the '\n' from highlighting to lex correctly
2228 # Exclude the '\n' from highlighting to lex correctly
2222 formatted = 'ERROR: %s output changed\n' % test
2229 formatted = 'ERROR: %s output changed\n' % test
2223 self.stream.write(highlightmsg(formatted, self.color))
2230 self.stream.write(highlightmsg(formatted, self.color))
2224 self.stream.write('!')
2231 self.stream.write('!')
2225
2232
2226 self.stream.flush()
2233 self.stream.flush()
2227
2234
2228 def addSuccess(self, test):
2235 def addSuccess(self, test):
2229 with iolock:
2236 with iolock:
2230 super(TestResult, self).addSuccess(test)
2237 super(TestResult, self).addSuccess(test)
2231 self.successes.append(test)
2238 self.successes.append(test)
2232
2239
2233 def addError(self, test, err):
2240 def addError(self, test, err):
2234 super(TestResult, self).addError(test, err)
2241 super(TestResult, self).addError(test, err)
2235 if self._options.first:
2242 if self._options.first:
2236 self.stop()
2243 self.stop()
2237
2244
2238 # Polyfill.
2245 # Polyfill.
2239 def addSkip(self, test, reason):
2246 def addSkip(self, test, reason):
2240 self.skipped.append((test, reason))
2247 self.skipped.append((test, reason))
2241 with iolock:
2248 with iolock:
2242 if self.showAll:
2249 if self.showAll:
2243 self.stream.writeln('skipped %s' % reason)
2250 self.stream.writeln('skipped %s' % reason)
2244 else:
2251 else:
2245 self.stream.write('s')
2252 self.stream.write('s')
2246 self.stream.flush()
2253 self.stream.flush()
2247
2254
2248 def addIgnore(self, test, reason):
2255 def addIgnore(self, test, reason):
2249 self.ignored.append((test, reason))
2256 self.ignored.append((test, reason))
2250 with iolock:
2257 with iolock:
2251 if self.showAll:
2258 if self.showAll:
2252 self.stream.writeln('ignored %s' % reason)
2259 self.stream.writeln('ignored %s' % reason)
2253 else:
2260 else:
2254 if reason not in ('not retesting', "doesn't match keyword"):
2261 if reason not in ('not retesting', "doesn't match keyword"):
2255 self.stream.write('i')
2262 self.stream.write('i')
2256 else:
2263 else:
2257 self.testsRun += 1
2264 self.testsRun += 1
2258 self.stream.flush()
2265 self.stream.flush()
2259
2266
2260 def addOutputMismatch(self, test, ret, got, expected):
2267 def addOutputMismatch(self, test, ret, got, expected):
2261 """Record a mismatch in test output for a particular test."""
2268 """Record a mismatch in test output for a particular test."""
2262 if self.shouldStop or firsterror:
2269 if self.shouldStop or firsterror:
2263 # don't print, some other test case already failed and
2270 # don't print, some other test case already failed and
2264 # printed, we're just stale and probably failed due to our
2271 # printed, we're just stale and probably failed due to our
2265 # temp dir getting cleaned up.
2272 # temp dir getting cleaned up.
2266 return
2273 return
2267
2274
2268 accepted = False
2275 accepted = False
2269 lines = []
2276 lines = []
2270
2277
2271 with iolock:
2278 with iolock:
2272 if self._options.nodiff:
2279 if self._options.nodiff:
2273 pass
2280 pass
2274 elif self._options.view:
2281 elif self._options.view:
2275 v = self._options.view
2282 v = self._options.view
2276 subprocess.call(
2283 subprocess.call(
2277 r'"%s" "%s" "%s"'
2284 r'"%s" "%s" "%s"'
2278 % (v, _bytes2sys(test.refpath), _bytes2sys(test.errpath)),
2285 % (v, _bytes2sys(test.refpath), _bytes2sys(test.errpath)),
2279 shell=True,
2286 shell=True,
2280 )
2287 )
2281 else:
2288 else:
2282 servefail, lines = getdiff(
2289 servefail, lines = getdiff(
2283 expected, got, test.refpath, test.errpath
2290 expected, got, test.refpath, test.errpath
2284 )
2291 )
2285 self.stream.write('\n')
2292 self.stream.write('\n')
2286 for line in lines:
2293 for line in lines:
2287 line = highlightdiff(line, self.color)
2294 line = highlightdiff(line, self.color)
2288 if PYTHON3:
2295 if PYTHON3:
2289 self.stream.flush()
2296 self.stream.flush()
2290 self.stream.buffer.write(line)
2297 self.stream.buffer.write(line)
2291 self.stream.buffer.flush()
2298 self.stream.buffer.flush()
2292 else:
2299 else:
2293 self.stream.write(line)
2300 self.stream.write(line)
2294 self.stream.flush()
2301 self.stream.flush()
2295
2302
2296 if servefail:
2303 if servefail:
2297 raise test.failureException(
2304 raise test.failureException(
2298 'server failed to start (HGPORT=%s)' % test._startport
2305 'server failed to start (HGPORT=%s)' % test._startport
2299 )
2306 )
2300
2307
2301 # handle interactive prompt without releasing iolock
2308 # handle interactive prompt without releasing iolock
2302 if self._options.interactive:
2309 if self._options.interactive:
2303 if test.readrefout() != expected:
2310 if test.readrefout() != expected:
2304 self.stream.write(
2311 self.stream.write(
2305 'Reference output has changed (run again to prompt '
2312 'Reference output has changed (run again to prompt '
2306 'changes)'
2313 'changes)'
2307 )
2314 )
2308 else:
2315 else:
2309 self.stream.write('Accept this change? [y/N] ')
2316 self.stream.write('Accept this change? [y/N] ')
2310 self.stream.flush()
2317 self.stream.flush()
2311 answer = sys.stdin.readline().strip()
2318 answer = sys.stdin.readline().strip()
2312 if answer.lower() in ('y', 'yes'):
2319 if answer.lower() in ('y', 'yes'):
2313 if test.path.endswith(b'.t'):
2320 if test.path.endswith(b'.t'):
2314 rename(test.errpath, test.path)
2321 rename(test.errpath, test.path)
2315 else:
2322 else:
2316 rename(test.errpath, b'%s.out' % test.path)
2323 rename(test.errpath, b'%s.out' % test.path)
2317 accepted = True
2324 accepted = True
2318 if not accepted:
2325 if not accepted:
2319 self.faildata[test.name] = b''.join(lines)
2326 self.faildata[test.name] = b''.join(lines)
2320
2327
2321 return accepted
2328 return accepted
2322
2329
2323 def startTest(self, test):
2330 def startTest(self, test):
2324 super(TestResult, self).startTest(test)
2331 super(TestResult, self).startTest(test)
2325
2332
2326 # os.times module computes the user time and system time spent by
2333 # os.times module computes the user time and system time spent by
2327 # child's processes along with real elapsed time taken by a process.
2334 # child's processes along with real elapsed time taken by a process.
2328 # This module has one limitation. It can only work for Linux user
2335 # This module has one limitation. It can only work for Linux user
2329 # and not for Windows. Hence why we fall back to another function
2336 # and not for Windows. Hence why we fall back to another function
2330 # for wall time calculations.
2337 # for wall time calculations.
2331 test.started_times = os.times()
2338 test.started_times = os.times()
2332 # TODO use a monotonic clock once support for Python 2.7 is dropped.
2339 # TODO use a monotonic clock once support for Python 2.7 is dropped.
2333 test.started_time = time.time()
2340 test.started_time = time.time()
2334 if self._firststarttime is None: # thread racy but irrelevant
2341 if self._firststarttime is None: # thread racy but irrelevant
2335 self._firststarttime = test.started_time
2342 self._firststarttime = test.started_time
2336
2343
2337 def stopTest(self, test, interrupted=False):
2344 def stopTest(self, test, interrupted=False):
2338 super(TestResult, self).stopTest(test)
2345 super(TestResult, self).stopTest(test)
2339
2346
2340 test.stopped_times = os.times()
2347 test.stopped_times = os.times()
2341 stopped_time = time.time()
2348 stopped_time = time.time()
2342
2349
2343 starttime = test.started_times
2350 starttime = test.started_times
2344 endtime = test.stopped_times
2351 endtime = test.stopped_times
2345 origin = self._firststarttime
2352 origin = self._firststarttime
2346 self.times.append(
2353 self.times.append(
2347 (
2354 (
2348 test.name,
2355 test.name,
2349 endtime[2] - starttime[2], # user space CPU time
2356 endtime[2] - starttime[2], # user space CPU time
2350 endtime[3] - starttime[3], # sys space CPU time
2357 endtime[3] - starttime[3], # sys space CPU time
2351 stopped_time - test.started_time, # real time
2358 stopped_time - test.started_time, # real time
2352 test.started_time - origin, # start date in run context
2359 test.started_time - origin, # start date in run context
2353 stopped_time - origin, # end date in run context
2360 stopped_time - origin, # end date in run context
2354 )
2361 )
2355 )
2362 )
2356
2363
2357 if interrupted:
2364 if interrupted:
2358 with iolock:
2365 with iolock:
2359 self.stream.writeln(
2366 self.stream.writeln(
2360 'INTERRUPTED: %s (after %d seconds)'
2367 'INTERRUPTED: %s (after %d seconds)'
2361 % (test.name, self.times[-1][3])
2368 % (test.name, self.times[-1][3])
2362 )
2369 )
2363
2370
2364
2371
2365 def getTestResult():
2372 def getTestResult():
2366 """
2373 """
2367 Returns the relevant test result
2374 Returns the relevant test result
2368 """
2375 """
2369 if "CUSTOM_TEST_RESULT" in os.environ:
2376 if "CUSTOM_TEST_RESULT" in os.environ:
2370 testresultmodule = __import__(os.environ["CUSTOM_TEST_RESULT"])
2377 testresultmodule = __import__(os.environ["CUSTOM_TEST_RESULT"])
2371 return testresultmodule.TestResult
2378 return testresultmodule.TestResult
2372 else:
2379 else:
2373 return TestResult
2380 return TestResult
2374
2381
2375
2382
2376 class TestSuite(unittest.TestSuite):
2383 class TestSuite(unittest.TestSuite):
2377 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
2384 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
2378
2385
2379 def __init__(
2386 def __init__(
2380 self,
2387 self,
2381 testdir,
2388 testdir,
2382 jobs=1,
2389 jobs=1,
2383 whitelist=None,
2390 whitelist=None,
2384 blacklist=None,
2391 blacklist=None,
2385 keywords=None,
2392 keywords=None,
2386 loop=False,
2393 loop=False,
2387 runs_per_test=1,
2394 runs_per_test=1,
2388 loadtest=None,
2395 loadtest=None,
2389 showchannels=False,
2396 showchannels=False,
2390 *args,
2397 *args,
2391 **kwargs
2398 **kwargs
2392 ):
2399 ):
2393 """Create a new instance that can run tests with a configuration.
2400 """Create a new instance that can run tests with a configuration.
2394
2401
2395 testdir specifies the directory where tests are executed from. This
2402 testdir specifies the directory where tests are executed from. This
2396 is typically the ``tests`` directory from Mercurial's source
2403 is typically the ``tests`` directory from Mercurial's source
2397 repository.
2404 repository.
2398
2405
2399 jobs specifies the number of jobs to run concurrently. Each test
2406 jobs specifies the number of jobs to run concurrently. Each test
2400 executes on its own thread. Tests actually spawn new processes, so
2407 executes on its own thread. Tests actually spawn new processes, so
2401 state mutation should not be an issue.
2408 state mutation should not be an issue.
2402
2409
2403 If there is only one job, it will use the main thread.
2410 If there is only one job, it will use the main thread.
2404
2411
2405 whitelist and blacklist denote tests that have been whitelisted and
2412 whitelist and blacklist denote tests that have been whitelisted and
2406 blacklisted, respectively. These arguments don't belong in TestSuite.
2413 blacklisted, respectively. These arguments don't belong in TestSuite.
2407 Instead, whitelist and blacklist should be handled by the thing that
2414 Instead, whitelist and blacklist should be handled by the thing that
2408 populates the TestSuite with tests. They are present to preserve
2415 populates the TestSuite with tests. They are present to preserve
2409 backwards compatible behavior which reports skipped tests as part
2416 backwards compatible behavior which reports skipped tests as part
2410 of the results.
2417 of the results.
2411
2418
2412 keywords denotes key words that will be used to filter which tests
2419 keywords denotes key words that will be used to filter which tests
2413 to execute. This arguably belongs outside of TestSuite.
2420 to execute. This arguably belongs outside of TestSuite.
2414
2421
2415 loop denotes whether to loop over tests forever.
2422 loop denotes whether to loop over tests forever.
2416 """
2423 """
2417 super(TestSuite, self).__init__(*args, **kwargs)
2424 super(TestSuite, self).__init__(*args, **kwargs)
2418
2425
2419 self._jobs = jobs
2426 self._jobs = jobs
2420 self._whitelist = whitelist
2427 self._whitelist = whitelist
2421 self._blacklist = blacklist
2428 self._blacklist = blacklist
2422 self._keywords = keywords
2429 self._keywords = keywords
2423 self._loop = loop
2430 self._loop = loop
2424 self._runs_per_test = runs_per_test
2431 self._runs_per_test = runs_per_test
2425 self._loadtest = loadtest
2432 self._loadtest = loadtest
2426 self._showchannels = showchannels
2433 self._showchannels = showchannels
2427
2434
2428 def run(self, result):
2435 def run(self, result):
2429 # We have a number of filters that need to be applied. We do this
2436 # We have a number of filters that need to be applied. We do this
2430 # here instead of inside Test because it makes the running logic for
2437 # here instead of inside Test because it makes the running logic for
2431 # Test simpler.
2438 # Test simpler.
2432 tests = []
2439 tests = []
2433 num_tests = [0]
2440 num_tests = [0]
2434 for test in self._tests:
2441 for test in self._tests:
2435
2442
2436 def get():
2443 def get():
2437 num_tests[0] += 1
2444 num_tests[0] += 1
2438 if getattr(test, 'should_reload', False):
2445 if getattr(test, 'should_reload', False):
2439 return self._loadtest(test, num_tests[0])
2446 return self._loadtest(test, num_tests[0])
2440 return test
2447 return test
2441
2448
2442 if not os.path.exists(test.path):
2449 if not os.path.exists(test.path):
2443 result.addSkip(test, "Doesn't exist")
2450 result.addSkip(test, "Doesn't exist")
2444 continue
2451 continue
2445
2452
2446 is_whitelisted = self._whitelist and (
2453 is_whitelisted = self._whitelist and (
2447 test.relpath in self._whitelist or test.bname in self._whitelist
2454 test.relpath in self._whitelist or test.bname in self._whitelist
2448 )
2455 )
2449 if not is_whitelisted:
2456 if not is_whitelisted:
2450 is_blacklisted = self._blacklist and (
2457 is_blacklisted = self._blacklist and (
2451 test.relpath in self._blacklist
2458 test.relpath in self._blacklist
2452 or test.bname in self._blacklist
2459 or test.bname in self._blacklist
2453 )
2460 )
2454 if is_blacklisted:
2461 if is_blacklisted:
2455 result.addSkip(test, 'blacklisted')
2462 result.addSkip(test, 'blacklisted')
2456 continue
2463 continue
2457 if self._keywords:
2464 if self._keywords:
2458 with open(test.path, 'rb') as f:
2465 with open(test.path, 'rb') as f:
2459 t = f.read().lower() + test.bname.lower()
2466 t = f.read().lower() + test.bname.lower()
2460 ignored = False
2467 ignored = False
2461 for k in self._keywords.lower().split():
2468 for k in self._keywords.lower().split():
2462 if k not in t:
2469 if k not in t:
2463 result.addIgnore(test, "doesn't match keyword")
2470 result.addIgnore(test, "doesn't match keyword")
2464 ignored = True
2471 ignored = True
2465 break
2472 break
2466
2473
2467 if ignored:
2474 if ignored:
2468 continue
2475 continue
2469 for _ in xrange(self._runs_per_test):
2476 for _ in xrange(self._runs_per_test):
2470 tests.append(get())
2477 tests.append(get())
2471
2478
2472 runtests = list(tests)
2479 runtests = list(tests)
2473 done = queue.Queue()
2480 done = queue.Queue()
2474 running = 0
2481 running = 0
2475
2482
2476 channels = [""] * self._jobs
2483 channels = [""] * self._jobs
2477
2484
2478 def job(test, result):
2485 def job(test, result):
2479 for n, v in enumerate(channels):
2486 for n, v in enumerate(channels):
2480 if not v:
2487 if not v:
2481 channel = n
2488 channel = n
2482 break
2489 break
2483 else:
2490 else:
2484 raise ValueError('Could not find output channel')
2491 raise ValueError('Could not find output channel')
2485 channels[channel] = "=" + test.name[5:].split(".")[0]
2492 channels[channel] = "=" + test.name[5:].split(".")[0]
2486 try:
2493 try:
2487 test(result)
2494 test(result)
2488 done.put(None)
2495 done.put(None)
2489 except KeyboardInterrupt:
2496 except KeyboardInterrupt:
2490 pass
2497 pass
2491 except: # re-raises
2498 except: # re-raises
2492 done.put(('!', test, 'run-test raised an error, see traceback'))
2499 done.put(('!', test, 'run-test raised an error, see traceback'))
2493 raise
2500 raise
2494 finally:
2501 finally:
2495 try:
2502 try:
2496 channels[channel] = ''
2503 channels[channel] = ''
2497 except IndexError:
2504 except IndexError:
2498 pass
2505 pass
2499
2506
2500 def stat():
2507 def stat():
2501 count = 0
2508 count = 0
2502 while channels:
2509 while channels:
2503 d = '\n%03s ' % count
2510 d = '\n%03s ' % count
2504 for n, v in enumerate(channels):
2511 for n, v in enumerate(channels):
2505 if v:
2512 if v:
2506 d += v[0]
2513 d += v[0]
2507 channels[n] = v[1:] or '.'
2514 channels[n] = v[1:] or '.'
2508 else:
2515 else:
2509 d += ' '
2516 d += ' '
2510 d += ' '
2517 d += ' '
2511 with iolock:
2518 with iolock:
2512 sys.stdout.write(d + ' ')
2519 sys.stdout.write(d + ' ')
2513 sys.stdout.flush()
2520 sys.stdout.flush()
2514 for x in xrange(10):
2521 for x in xrange(10):
2515 if channels:
2522 if channels:
2516 time.sleep(0.1)
2523 time.sleep(0.1)
2517 count += 1
2524 count += 1
2518
2525
2519 stoppedearly = False
2526 stoppedearly = False
2520
2527
2521 if self._showchannels:
2528 if self._showchannels:
2522 statthread = threading.Thread(target=stat, name="stat")
2529 statthread = threading.Thread(target=stat, name="stat")
2523 statthread.start()
2530 statthread.start()
2524
2531
2525 try:
2532 try:
2526 while tests or running:
2533 while tests or running:
2527 if not done.empty() or running == self._jobs or not tests:
2534 if not done.empty() or running == self._jobs or not tests:
2528 try:
2535 try:
2529 done.get(True, 1)
2536 done.get(True, 1)
2530 running -= 1
2537 running -= 1
2531 if result and result.shouldStop:
2538 if result and result.shouldStop:
2532 stoppedearly = True
2539 stoppedearly = True
2533 break
2540 break
2534 except queue.Empty:
2541 except queue.Empty:
2535 continue
2542 continue
2536 if tests and not running == self._jobs:
2543 if tests and not running == self._jobs:
2537 test = tests.pop(0)
2544 test = tests.pop(0)
2538 if self._loop:
2545 if self._loop:
2539 if getattr(test, 'should_reload', False):
2546 if getattr(test, 'should_reload', False):
2540 num_tests[0] += 1
2547 num_tests[0] += 1
2541 tests.append(self._loadtest(test, num_tests[0]))
2548 tests.append(self._loadtest(test, num_tests[0]))
2542 else:
2549 else:
2543 tests.append(test)
2550 tests.append(test)
2544 if self._jobs == 1:
2551 if self._jobs == 1:
2545 job(test, result)
2552 job(test, result)
2546 else:
2553 else:
2547 t = threading.Thread(
2554 t = threading.Thread(
2548 target=job, name=test.name, args=(test, result)
2555 target=job, name=test.name, args=(test, result)
2549 )
2556 )
2550 t.start()
2557 t.start()
2551 running += 1
2558 running += 1
2552
2559
2553 # If we stop early we still need to wait on started tests to
2560 # If we stop early we still need to wait on started tests to
2554 # finish. Otherwise, there is a race between the test completing
2561 # finish. Otherwise, there is a race between the test completing
2555 # and the test's cleanup code running. This could result in the
2562 # and the test's cleanup code running. This could result in the
2556 # test reporting incorrect.
2563 # test reporting incorrect.
2557 if stoppedearly:
2564 if stoppedearly:
2558 while running:
2565 while running:
2559 try:
2566 try:
2560 done.get(True, 1)
2567 done.get(True, 1)
2561 running -= 1
2568 running -= 1
2562 except queue.Empty:
2569 except queue.Empty:
2563 continue
2570 continue
2564 except KeyboardInterrupt:
2571 except KeyboardInterrupt:
2565 for test in runtests:
2572 for test in runtests:
2566 test.abort()
2573 test.abort()
2567
2574
2568 channels = []
2575 channels = []
2569
2576
2570 return result
2577 return result
2571
2578
2572
2579
2573 # Save the most recent 5 wall-clock runtimes of each test to a
2580 # Save the most recent 5 wall-clock runtimes of each test to a
2574 # human-readable text file named .testtimes. Tests are sorted
2581 # human-readable text file named .testtimes. Tests are sorted
2575 # alphabetically, while times for each test are listed from oldest to
2582 # alphabetically, while times for each test are listed from oldest to
2576 # newest.
2583 # newest.
2577
2584
2578
2585
2579 def loadtimes(outputdir):
2586 def loadtimes(outputdir):
2580 times = []
2587 times = []
2581 try:
2588 try:
2582 with open(os.path.join(outputdir, b'.testtimes')) as fp:
2589 with open(os.path.join(outputdir, b'.testtimes')) as fp:
2583 for line in fp:
2590 for line in fp:
2584 m = re.match('(.*?) ([0-9. ]+)', line)
2591 m = re.match('(.*?) ([0-9. ]+)', line)
2585 times.append(
2592 times.append(
2586 (m.group(1), [float(t) for t in m.group(2).split()])
2593 (m.group(1), [float(t) for t in m.group(2).split()])
2587 )
2594 )
2588 except IOError as err:
2595 except IOError as err:
2589 if err.errno != errno.ENOENT:
2596 if err.errno != errno.ENOENT:
2590 raise
2597 raise
2591 return times
2598 return times
2592
2599
2593
2600
2594 def savetimes(outputdir, result):
2601 def savetimes(outputdir, result):
2595 saved = dict(loadtimes(outputdir))
2602 saved = dict(loadtimes(outputdir))
2596 maxruns = 5
2603 maxruns = 5
2597 skipped = {str(t[0]) for t in result.skipped}
2604 skipped = {str(t[0]) for t in result.skipped}
2598 for tdata in result.times:
2605 for tdata in result.times:
2599 test, real = tdata[0], tdata[3]
2606 test, real = tdata[0], tdata[3]
2600 if test not in skipped:
2607 if test not in skipped:
2601 ts = saved.setdefault(test, [])
2608 ts = saved.setdefault(test, [])
2602 ts.append(real)
2609 ts.append(real)
2603 ts[:] = ts[-maxruns:]
2610 ts[:] = ts[-maxruns:]
2604
2611
2605 fd, tmpname = tempfile.mkstemp(
2612 fd, tmpname = tempfile.mkstemp(
2606 prefix=b'.testtimes', dir=outputdir, text=True
2613 prefix=b'.testtimes', dir=outputdir, text=True
2607 )
2614 )
2608 with os.fdopen(fd, 'w') as fp:
2615 with os.fdopen(fd, 'w') as fp:
2609 for name, ts in sorted(saved.items()):
2616 for name, ts in sorted(saved.items()):
2610 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
2617 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
2611 timepath = os.path.join(outputdir, b'.testtimes')
2618 timepath = os.path.join(outputdir, b'.testtimes')
2612 try:
2619 try:
2613 os.unlink(timepath)
2620 os.unlink(timepath)
2614 except OSError:
2621 except OSError:
2615 pass
2622 pass
2616 try:
2623 try:
2617 os.rename(tmpname, timepath)
2624 os.rename(tmpname, timepath)
2618 except OSError:
2625 except OSError:
2619 pass
2626 pass
2620
2627
2621
2628
2622 class TextTestRunner(unittest.TextTestRunner):
2629 class TextTestRunner(unittest.TextTestRunner):
2623 """Custom unittest test runner that uses appropriate settings."""
2630 """Custom unittest test runner that uses appropriate settings."""
2624
2631
2625 def __init__(self, runner, *args, **kwargs):
2632 def __init__(self, runner, *args, **kwargs):
2626 super(TextTestRunner, self).__init__(*args, **kwargs)
2633 super(TextTestRunner, self).__init__(*args, **kwargs)
2627
2634
2628 self._runner = runner
2635 self._runner = runner
2629
2636
2630 self._result = getTestResult()(
2637 self._result = getTestResult()(
2631 self._runner.options, self.stream, self.descriptions, self.verbosity
2638 self._runner.options, self.stream, self.descriptions, self.verbosity
2632 )
2639 )
2633
2640
2634 def listtests(self, test):
2641 def listtests(self, test):
2635 test = sorted(test, key=lambda t: t.name)
2642 test = sorted(test, key=lambda t: t.name)
2636
2643
2637 self._result.onStart(test)
2644 self._result.onStart(test)
2638
2645
2639 for t in test:
2646 for t in test:
2640 print(t.name)
2647 print(t.name)
2641 self._result.addSuccess(t)
2648 self._result.addSuccess(t)
2642
2649
2643 if self._runner.options.xunit:
2650 if self._runner.options.xunit:
2644 with open(self._runner.options.xunit, "wb") as xuf:
2651 with open(self._runner.options.xunit, "wb") as xuf:
2645 self._writexunit(self._result, xuf)
2652 self._writexunit(self._result, xuf)
2646
2653
2647 if self._runner.options.json:
2654 if self._runner.options.json:
2648 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2655 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2649 with open(jsonpath, 'w') as fp:
2656 with open(jsonpath, 'w') as fp:
2650 self._writejson(self._result, fp)
2657 self._writejson(self._result, fp)
2651
2658
2652 return self._result
2659 return self._result
2653
2660
2654 def run(self, test):
2661 def run(self, test):
2655 self._result.onStart(test)
2662 self._result.onStart(test)
2656 test(self._result)
2663 test(self._result)
2657
2664
2658 failed = len(self._result.failures)
2665 failed = len(self._result.failures)
2659 skipped = len(self._result.skipped)
2666 skipped = len(self._result.skipped)
2660 ignored = len(self._result.ignored)
2667 ignored = len(self._result.ignored)
2661
2668
2662 with iolock:
2669 with iolock:
2663 self.stream.writeln('')
2670 self.stream.writeln('')
2664
2671
2665 if not self._runner.options.noskips:
2672 if not self._runner.options.noskips:
2666 for test, msg in sorted(
2673 for test, msg in sorted(
2667 self._result.skipped, key=lambda s: s[0].name
2674 self._result.skipped, key=lambda s: s[0].name
2668 ):
2675 ):
2669 formatted = 'Skipped %s: %s\n' % (test.name, msg)
2676 formatted = 'Skipped %s: %s\n' % (test.name, msg)
2670 msg = highlightmsg(formatted, self._result.color)
2677 msg = highlightmsg(formatted, self._result.color)
2671 self.stream.write(msg)
2678 self.stream.write(msg)
2672 for test, msg in sorted(
2679 for test, msg in sorted(
2673 self._result.failures, key=lambda f: f[0].name
2680 self._result.failures, key=lambda f: f[0].name
2674 ):
2681 ):
2675 formatted = 'Failed %s: %s\n' % (test.name, msg)
2682 formatted = 'Failed %s: %s\n' % (test.name, msg)
2676 self.stream.write(highlightmsg(formatted, self._result.color))
2683 self.stream.write(highlightmsg(formatted, self._result.color))
2677 for test, msg in sorted(
2684 for test, msg in sorted(
2678 self._result.errors, key=lambda e: e[0].name
2685 self._result.errors, key=lambda e: e[0].name
2679 ):
2686 ):
2680 self.stream.writeln('Errored %s: %s' % (test.name, msg))
2687 self.stream.writeln('Errored %s: %s' % (test.name, msg))
2681
2688
2682 if self._runner.options.xunit:
2689 if self._runner.options.xunit:
2683 with open(self._runner.options.xunit, "wb") as xuf:
2690 with open(self._runner.options.xunit, "wb") as xuf:
2684 self._writexunit(self._result, xuf)
2691 self._writexunit(self._result, xuf)
2685
2692
2686 if self._runner.options.json:
2693 if self._runner.options.json:
2687 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2694 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2688 with open(jsonpath, 'w') as fp:
2695 with open(jsonpath, 'w') as fp:
2689 self._writejson(self._result, fp)
2696 self._writejson(self._result, fp)
2690
2697
2691 self._runner._checkhglib('Tested')
2698 self._runner._checkhglib('Tested')
2692
2699
2693 savetimes(self._runner._outputdir, self._result)
2700 savetimes(self._runner._outputdir, self._result)
2694
2701
2695 if failed and self._runner.options.known_good_rev:
2702 if failed and self._runner.options.known_good_rev:
2696 self._bisecttests(t for t, m in self._result.failures)
2703 self._bisecttests(t for t, m in self._result.failures)
2697 self.stream.writeln(
2704 self.stream.writeln(
2698 '# Ran %d tests, %d skipped, %d failed.'
2705 '# Ran %d tests, %d skipped, %d failed.'
2699 % (self._result.testsRun, skipped + ignored, failed)
2706 % (self._result.testsRun, skipped + ignored, failed)
2700 )
2707 )
2701 if failed:
2708 if failed:
2702 self.stream.writeln(
2709 self.stream.writeln(
2703 'python hash seed: %s' % os.environ['PYTHONHASHSEED']
2710 'python hash seed: %s' % os.environ['PYTHONHASHSEED']
2704 )
2711 )
2705 if self._runner.options.time:
2712 if self._runner.options.time:
2706 self.printtimes(self._result.times)
2713 self.printtimes(self._result.times)
2707
2714
2708 if self._runner.options.exceptions:
2715 if self._runner.options.exceptions:
2709 exceptions = aggregateexceptions(
2716 exceptions = aggregateexceptions(
2710 os.path.join(self._runner._outputdir, b'exceptions')
2717 os.path.join(self._runner._outputdir, b'exceptions')
2711 )
2718 )
2712
2719
2713 self.stream.writeln('Exceptions Report:')
2720 self.stream.writeln('Exceptions Report:')
2714 self.stream.writeln(
2721 self.stream.writeln(
2715 '%d total from %d frames'
2722 '%d total from %d frames'
2716 % (exceptions['total'], len(exceptions['exceptioncounts']))
2723 % (exceptions['total'], len(exceptions['exceptioncounts']))
2717 )
2724 )
2718 combined = exceptions['combined']
2725 combined = exceptions['combined']
2719 for key in sorted(combined, key=combined.get, reverse=True):
2726 for key in sorted(combined, key=combined.get, reverse=True):
2720 frame, line, exc = key
2727 frame, line, exc = key
2721 totalcount, testcount, leastcount, leasttest = combined[key]
2728 totalcount, testcount, leastcount, leasttest = combined[key]
2722
2729
2723 self.stream.writeln(
2730 self.stream.writeln(
2724 '%d (%d tests)\t%s: %s (%s - %d total)'
2731 '%d (%d tests)\t%s: %s (%s - %d total)'
2725 % (
2732 % (
2726 totalcount,
2733 totalcount,
2727 testcount,
2734 testcount,
2728 frame,
2735 frame,
2729 exc,
2736 exc,
2730 leasttest,
2737 leasttest,
2731 leastcount,
2738 leastcount,
2732 )
2739 )
2733 )
2740 )
2734
2741
2735 self.stream.flush()
2742 self.stream.flush()
2736
2743
2737 return self._result
2744 return self._result
2738
2745
2739 def _bisecttests(self, tests):
2746 def _bisecttests(self, tests):
2740 bisectcmd = ['hg', 'bisect']
2747 bisectcmd = ['hg', 'bisect']
2741 bisectrepo = self._runner.options.bisect_repo
2748 bisectrepo = self._runner.options.bisect_repo
2742 if bisectrepo:
2749 if bisectrepo:
2743 bisectcmd.extend(['-R', os.path.abspath(bisectrepo)])
2750 bisectcmd.extend(['-R', os.path.abspath(bisectrepo)])
2744
2751
2745 def pread(args):
2752 def pread(args):
2746 env = os.environ.copy()
2753 env = os.environ.copy()
2747 env['HGPLAIN'] = '1'
2754 env['HGPLAIN'] = '1'
2748 p = subprocess.Popen(
2755 p = subprocess.Popen(
2749 args, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, env=env
2756 args, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, env=env
2750 )
2757 )
2751 data = p.stdout.read()
2758 data = p.stdout.read()
2752 p.wait()
2759 p.wait()
2753 return data
2760 return data
2754
2761
2755 for test in tests:
2762 for test in tests:
2756 pread(bisectcmd + ['--reset']),
2763 pread(bisectcmd + ['--reset']),
2757 pread(bisectcmd + ['--bad', '.'])
2764 pread(bisectcmd + ['--bad', '.'])
2758 pread(bisectcmd + ['--good', self._runner.options.known_good_rev])
2765 pread(bisectcmd + ['--good', self._runner.options.known_good_rev])
2759 # TODO: we probably need to forward more options
2766 # TODO: we probably need to forward more options
2760 # that alter hg's behavior inside the tests.
2767 # that alter hg's behavior inside the tests.
2761 opts = ''
2768 opts = ''
2762 withhg = self._runner.options.with_hg
2769 withhg = self._runner.options.with_hg
2763 if withhg:
2770 if withhg:
2764 opts += ' --with-hg=%s ' % shellquote(_bytes2sys(withhg))
2771 opts += ' --with-hg=%s ' % shellquote(_bytes2sys(withhg))
2765 rtc = '%s %s %s %s' % (sysexecutable, sys.argv[0], opts, test)
2772 rtc = '%s %s %s %s' % (sysexecutable, sys.argv[0], opts, test)
2766 data = pread(bisectcmd + ['--command', rtc])
2773 data = pread(bisectcmd + ['--command', rtc])
2767 m = re.search(
2774 m = re.search(
2768 (
2775 (
2769 br'\nThe first (?P<goodbad>bad|good) revision '
2776 br'\nThe first (?P<goodbad>bad|good) revision '
2770 br'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n'
2777 br'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n'
2771 br'summary: +(?P<summary>[^\n]+)\n'
2778 br'summary: +(?P<summary>[^\n]+)\n'
2772 ),
2779 ),
2773 data,
2780 data,
2774 (re.MULTILINE | re.DOTALL),
2781 (re.MULTILINE | re.DOTALL),
2775 )
2782 )
2776 if m is None:
2783 if m is None:
2777 self.stream.writeln(
2784 self.stream.writeln(
2778 'Failed to identify failure point for %s' % test
2785 'Failed to identify failure point for %s' % test
2779 )
2786 )
2780 continue
2787 continue
2781 dat = m.groupdict()
2788 dat = m.groupdict()
2782 verb = 'broken' if dat['goodbad'] == b'bad' else 'fixed'
2789 verb = 'broken' if dat['goodbad'] == b'bad' else 'fixed'
2783 self.stream.writeln(
2790 self.stream.writeln(
2784 '%s %s by %s (%s)'
2791 '%s %s by %s (%s)'
2785 % (
2792 % (
2786 test,
2793 test,
2787 verb,
2794 verb,
2788 dat['node'].decode('ascii'),
2795 dat['node'].decode('ascii'),
2789 dat['summary'].decode('utf8', 'ignore'),
2796 dat['summary'].decode('utf8', 'ignore'),
2790 )
2797 )
2791 )
2798 )
2792
2799
2793 def printtimes(self, times):
2800 def printtimes(self, times):
2794 # iolock held by run
2801 # iolock held by run
2795 self.stream.writeln('# Producing time report')
2802 self.stream.writeln('# Producing time report')
2796 times.sort(key=lambda t: (t[3]))
2803 times.sort(key=lambda t: (t[3]))
2797 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
2804 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
2798 self.stream.writeln(
2805 self.stream.writeln(
2799 '%-7s %-7s %-7s %-7s %-7s %s'
2806 '%-7s %-7s %-7s %-7s %-7s %s'
2800 % ('start', 'end', 'cuser', 'csys', 'real', 'Test')
2807 % ('start', 'end', 'cuser', 'csys', 'real', 'Test')
2801 )
2808 )
2802 for tdata in times:
2809 for tdata in times:
2803 test = tdata[0]
2810 test = tdata[0]
2804 cuser, csys, real, start, end = tdata[1:6]
2811 cuser, csys, real, start, end = tdata[1:6]
2805 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
2812 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
2806
2813
2807 @staticmethod
2814 @staticmethod
2808 def _writexunit(result, outf):
2815 def _writexunit(result, outf):
2809 # See http://llg.cubic.org/docs/junit/ for a reference.
2816 # See http://llg.cubic.org/docs/junit/ for a reference.
2810 timesd = {t[0]: t[3] for t in result.times}
2817 timesd = {t[0]: t[3] for t in result.times}
2811 doc = minidom.Document()
2818 doc = minidom.Document()
2812 s = doc.createElement('testsuite')
2819 s = doc.createElement('testsuite')
2813 s.setAttribute('errors', "0") # TODO
2820 s.setAttribute('errors', "0") # TODO
2814 s.setAttribute('failures', str(len(result.failures)))
2821 s.setAttribute('failures', str(len(result.failures)))
2815 s.setAttribute('name', 'run-tests')
2822 s.setAttribute('name', 'run-tests')
2816 s.setAttribute(
2823 s.setAttribute(
2817 'skipped', str(len(result.skipped) + len(result.ignored))
2824 'skipped', str(len(result.skipped) + len(result.ignored))
2818 )
2825 )
2819 s.setAttribute('tests', str(result.testsRun))
2826 s.setAttribute('tests', str(result.testsRun))
2820 doc.appendChild(s)
2827 doc.appendChild(s)
2821 for tc in result.successes:
2828 for tc in result.successes:
2822 t = doc.createElement('testcase')
2829 t = doc.createElement('testcase')
2823 t.setAttribute('name', tc.name)
2830 t.setAttribute('name', tc.name)
2824 tctime = timesd.get(tc.name)
2831 tctime = timesd.get(tc.name)
2825 if tctime is not None:
2832 if tctime is not None:
2826 t.setAttribute('time', '%.3f' % tctime)
2833 t.setAttribute('time', '%.3f' % tctime)
2827 s.appendChild(t)
2834 s.appendChild(t)
2828 for tc, err in sorted(result.faildata.items()):
2835 for tc, err in sorted(result.faildata.items()):
2829 t = doc.createElement('testcase')
2836 t = doc.createElement('testcase')
2830 t.setAttribute('name', tc)
2837 t.setAttribute('name', tc)
2831 tctime = timesd.get(tc)
2838 tctime = timesd.get(tc)
2832 if tctime is not None:
2839 if tctime is not None:
2833 t.setAttribute('time', '%.3f' % tctime)
2840 t.setAttribute('time', '%.3f' % tctime)
2834 # createCDATASection expects a unicode or it will
2841 # createCDATASection expects a unicode or it will
2835 # convert using default conversion rules, which will
2842 # convert using default conversion rules, which will
2836 # fail if string isn't ASCII.
2843 # fail if string isn't ASCII.
2837 err = cdatasafe(err).decode('utf-8', 'replace')
2844 err = cdatasafe(err).decode('utf-8', 'replace')
2838 cd = doc.createCDATASection(err)
2845 cd = doc.createCDATASection(err)
2839 # Use 'failure' here instead of 'error' to match errors = 0,
2846 # Use 'failure' here instead of 'error' to match errors = 0,
2840 # failures = len(result.failures) in the testsuite element.
2847 # failures = len(result.failures) in the testsuite element.
2841 failelem = doc.createElement('failure')
2848 failelem = doc.createElement('failure')
2842 failelem.setAttribute('message', 'output changed')
2849 failelem.setAttribute('message', 'output changed')
2843 failelem.setAttribute('type', 'output-mismatch')
2850 failelem.setAttribute('type', 'output-mismatch')
2844 failelem.appendChild(cd)
2851 failelem.appendChild(cd)
2845 t.appendChild(failelem)
2852 t.appendChild(failelem)
2846 s.appendChild(t)
2853 s.appendChild(t)
2847 for tc, message in result.skipped:
2854 for tc, message in result.skipped:
2848 # According to the schema, 'skipped' has no attributes. So store
2855 # According to the schema, 'skipped' has no attributes. So store
2849 # the skip message as a text node instead.
2856 # the skip message as a text node instead.
2850 t = doc.createElement('testcase')
2857 t = doc.createElement('testcase')
2851 t.setAttribute('name', tc.name)
2858 t.setAttribute('name', tc.name)
2852 binmessage = message.encode('utf-8')
2859 binmessage = message.encode('utf-8')
2853 message = cdatasafe(binmessage).decode('utf-8', 'replace')
2860 message = cdatasafe(binmessage).decode('utf-8', 'replace')
2854 cd = doc.createCDATASection(message)
2861 cd = doc.createCDATASection(message)
2855 skipelem = doc.createElement('skipped')
2862 skipelem = doc.createElement('skipped')
2856 skipelem.appendChild(cd)
2863 skipelem.appendChild(cd)
2857 t.appendChild(skipelem)
2864 t.appendChild(skipelem)
2858 s.appendChild(t)
2865 s.appendChild(t)
2859 outf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
2866 outf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
2860
2867
2861 @staticmethod
2868 @staticmethod
2862 def _writejson(result, outf):
2869 def _writejson(result, outf):
2863 timesd = {}
2870 timesd = {}
2864 for tdata in result.times:
2871 for tdata in result.times:
2865 test = tdata[0]
2872 test = tdata[0]
2866 timesd[test] = tdata[1:]
2873 timesd[test] = tdata[1:]
2867
2874
2868 outcome = {}
2875 outcome = {}
2869 groups = [
2876 groups = [
2870 ('success', ((tc, None) for tc in result.successes)),
2877 ('success', ((tc, None) for tc in result.successes)),
2871 ('failure', result.failures),
2878 ('failure', result.failures),
2872 ('skip', result.skipped),
2879 ('skip', result.skipped),
2873 ]
2880 ]
2874 for res, testcases in groups:
2881 for res, testcases in groups:
2875 for tc, __ in testcases:
2882 for tc, __ in testcases:
2876 if tc.name in timesd:
2883 if tc.name in timesd:
2877 diff = result.faildata.get(tc.name, b'')
2884 diff = result.faildata.get(tc.name, b'')
2878 try:
2885 try:
2879 diff = diff.decode('unicode_escape')
2886 diff = diff.decode('unicode_escape')
2880 except UnicodeDecodeError as e:
2887 except UnicodeDecodeError as e:
2881 diff = '%r decoding diff, sorry' % e
2888 diff = '%r decoding diff, sorry' % e
2882 tres = {
2889 tres = {
2883 'result': res,
2890 'result': res,
2884 'time': ('%0.3f' % timesd[tc.name][2]),
2891 'time': ('%0.3f' % timesd[tc.name][2]),
2885 'cuser': ('%0.3f' % timesd[tc.name][0]),
2892 'cuser': ('%0.3f' % timesd[tc.name][0]),
2886 'csys': ('%0.3f' % timesd[tc.name][1]),
2893 'csys': ('%0.3f' % timesd[tc.name][1]),
2887 'start': ('%0.3f' % timesd[tc.name][3]),
2894 'start': ('%0.3f' % timesd[tc.name][3]),
2888 'end': ('%0.3f' % timesd[tc.name][4]),
2895 'end': ('%0.3f' % timesd[tc.name][4]),
2889 'diff': diff,
2896 'diff': diff,
2890 }
2897 }
2891 else:
2898 else:
2892 # blacklisted test
2899 # blacklisted test
2893 tres = {'result': res}
2900 tres = {'result': res}
2894
2901
2895 outcome[tc.name] = tres
2902 outcome[tc.name] = tres
2896 jsonout = json.dumps(
2903 jsonout = json.dumps(
2897 outcome, sort_keys=True, indent=4, separators=(',', ': ')
2904 outcome, sort_keys=True, indent=4, separators=(',', ': ')
2898 )
2905 )
2899 outf.writelines(("testreport =", jsonout))
2906 outf.writelines(("testreport =", jsonout))
2900
2907
2901
2908
2902 def sorttests(testdescs, previoustimes, shuffle=False):
2909 def sorttests(testdescs, previoustimes, shuffle=False):
2903 """Do an in-place sort of tests."""
2910 """Do an in-place sort of tests."""
2904 if shuffle:
2911 if shuffle:
2905 random.shuffle(testdescs)
2912 random.shuffle(testdescs)
2906 return
2913 return
2907
2914
2908 if previoustimes:
2915 if previoustimes:
2909
2916
2910 def sortkey(f):
2917 def sortkey(f):
2911 f = f['path']
2918 f = f['path']
2912 if f in previoustimes:
2919 if f in previoustimes:
2913 # Use most recent time as estimate
2920 # Use most recent time as estimate
2914 return -(previoustimes[f][-1])
2921 return -(previoustimes[f][-1])
2915 else:
2922 else:
2916 # Default to a rather arbitrary value of 1 second for new tests
2923 # Default to a rather arbitrary value of 1 second for new tests
2917 return -1.0
2924 return -1.0
2918
2925
2919 else:
2926 else:
2920 # keywords for slow tests
2927 # keywords for slow tests
2921 slow = {
2928 slow = {
2922 b'svn': 10,
2929 b'svn': 10,
2923 b'cvs': 10,
2930 b'cvs': 10,
2924 b'hghave': 10,
2931 b'hghave': 10,
2925 b'largefiles-update': 10,
2932 b'largefiles-update': 10,
2926 b'run-tests': 10,
2933 b'run-tests': 10,
2927 b'corruption': 10,
2934 b'corruption': 10,
2928 b'race': 10,
2935 b'race': 10,
2929 b'i18n': 10,
2936 b'i18n': 10,
2930 b'check': 100,
2937 b'check': 100,
2931 b'gendoc': 100,
2938 b'gendoc': 100,
2932 b'contrib-perf': 200,
2939 b'contrib-perf': 200,
2933 b'merge-combination': 100,
2940 b'merge-combination': 100,
2934 }
2941 }
2935 perf = {}
2942 perf = {}
2936
2943
2937 def sortkey(f):
2944 def sortkey(f):
2938 # run largest tests first, as they tend to take the longest
2945 # run largest tests first, as they tend to take the longest
2939 f = f['path']
2946 f = f['path']
2940 try:
2947 try:
2941 return perf[f]
2948 return perf[f]
2942 except KeyError:
2949 except KeyError:
2943 try:
2950 try:
2944 val = -os.stat(f).st_size
2951 val = -os.stat(f).st_size
2945 except OSError as e:
2952 except OSError as e:
2946 if e.errno != errno.ENOENT:
2953 if e.errno != errno.ENOENT:
2947 raise
2954 raise
2948 perf[f] = -1e9 # file does not exist, tell early
2955 perf[f] = -1e9 # file does not exist, tell early
2949 return -1e9
2956 return -1e9
2950 for kw, mul in slow.items():
2957 for kw, mul in slow.items():
2951 if kw in f:
2958 if kw in f:
2952 val *= mul
2959 val *= mul
2953 if f.endswith(b'.py'):
2960 if f.endswith(b'.py'):
2954 val /= 10.0
2961 val /= 10.0
2955 perf[f] = val / 1000.0
2962 perf[f] = val / 1000.0
2956 return perf[f]
2963 return perf[f]
2957
2964
2958 testdescs.sort(key=sortkey)
2965 testdescs.sort(key=sortkey)
2959
2966
2960
2967
2961 class TestRunner(object):
2968 class TestRunner(object):
2962 """Holds context for executing tests.
2969 """Holds context for executing tests.
2963
2970
2964 Tests rely on a lot of state. This object holds it for them.
2971 Tests rely on a lot of state. This object holds it for them.
2965 """
2972 """
2966
2973
2967 # Programs required to run tests.
2974 # Programs required to run tests.
2968 REQUIREDTOOLS = [
2975 REQUIREDTOOLS = [
2969 b'diff',
2976 b'diff',
2970 b'grep',
2977 b'grep',
2971 b'unzip',
2978 b'unzip',
2972 b'gunzip',
2979 b'gunzip',
2973 b'bunzip2',
2980 b'bunzip2',
2974 b'sed',
2981 b'sed',
2975 ]
2982 ]
2976
2983
2977 # Maps file extensions to test class.
2984 # Maps file extensions to test class.
2978 TESTTYPES = [
2985 TESTTYPES = [
2979 (b'.py', PythonTest),
2986 (b'.py', PythonTest),
2980 (b'.t', TTest),
2987 (b'.t', TTest),
2981 ]
2988 ]
2982
2989
2983 def __init__(self):
2990 def __init__(self):
2984 self.options = None
2991 self.options = None
2985 self._hgroot = None
2992 self._hgroot = None
2986 self._testdir = None
2993 self._testdir = None
2987 self._outputdir = None
2994 self._outputdir = None
2988 self._hgtmp = None
2995 self._hgtmp = None
2989 self._installdir = None
2996 self._installdir = None
2990 self._bindir = None
2997 self._bindir = None
2991 self._tmpbindir = None
2998 self._tmpbindir = None
2992 self._pythondir = None
2999 self._pythondir = None
2993 self._coveragefile = None
3000 self._coveragefile = None
2994 self._createdfiles = []
3001 self._createdfiles = []
2995 self._hgcommand = None
3002 self._hgcommand = None
2996 self._hgpath = None
3003 self._hgpath = None
2997 self._portoffset = 0
3004 self._portoffset = 0
2998 self._ports = {}
3005 self._ports = {}
2999
3006
3000 def run(self, args, parser=None):
3007 def run(self, args, parser=None):
3001 """Run the test suite."""
3008 """Run the test suite."""
3002 oldmask = os.umask(0o22)
3009 oldmask = os.umask(0o22)
3003 try:
3010 try:
3004 parser = parser or getparser()
3011 parser = parser or getparser()
3005 options = parseargs(args, parser)
3012 options = parseargs(args, parser)
3006 tests = [_sys2bytes(a) for a in options.tests]
3013 tests = [_sys2bytes(a) for a in options.tests]
3007 if options.test_list is not None:
3014 if options.test_list is not None:
3008 for listfile in options.test_list:
3015 for listfile in options.test_list:
3009 with open(listfile, 'rb') as f:
3016 with open(listfile, 'rb') as f:
3010 tests.extend(t for t in f.read().splitlines() if t)
3017 tests.extend(t for t in f.read().splitlines() if t)
3011 self.options = options
3018 self.options = options
3012
3019
3013 self._checktools()
3020 self._checktools()
3014 testdescs = self.findtests(tests)
3021 testdescs = self.findtests(tests)
3015 if options.profile_runner:
3022 if options.profile_runner:
3016 import statprof
3023 import statprof
3017
3024
3018 statprof.start()
3025 statprof.start()
3019 result = self._run(testdescs)
3026 result = self._run(testdescs)
3020 if options.profile_runner:
3027 if options.profile_runner:
3021 statprof.stop()
3028 statprof.stop()
3022 statprof.display()
3029 statprof.display()
3023 return result
3030 return result
3024
3031
3025 finally:
3032 finally:
3026 os.umask(oldmask)
3033 os.umask(oldmask)
3027
3034
3028 def _run(self, testdescs):
3035 def _run(self, testdescs):
3029 testdir = getcwdb()
3036 testdir = getcwdb()
3030 self._testdir = osenvironb[b'TESTDIR'] = getcwdb()
3037 self._testdir = osenvironb[b'TESTDIR'] = getcwdb()
3031 # assume all tests in same folder for now
3038 # assume all tests in same folder for now
3032 if testdescs:
3039 if testdescs:
3033 pathname = os.path.dirname(testdescs[0]['path'])
3040 pathname = os.path.dirname(testdescs[0]['path'])
3034 if pathname:
3041 if pathname:
3035 testdir = os.path.join(testdir, pathname)
3042 testdir = os.path.join(testdir, pathname)
3036 self._testdir = osenvironb[b'TESTDIR'] = testdir
3043 self._testdir = osenvironb[b'TESTDIR'] = testdir
3037 if self.options.outputdir:
3044 if self.options.outputdir:
3038 self._outputdir = canonpath(_sys2bytes(self.options.outputdir))
3045 self._outputdir = canonpath(_sys2bytes(self.options.outputdir))
3039 else:
3046 else:
3040 self._outputdir = getcwdb()
3047 self._outputdir = getcwdb()
3041 if testdescs and pathname:
3048 if testdescs and pathname:
3042 self._outputdir = os.path.join(self._outputdir, pathname)
3049 self._outputdir = os.path.join(self._outputdir, pathname)
3043 previoustimes = {}
3050 previoustimes = {}
3044 if self.options.order_by_runtime:
3051 if self.options.order_by_runtime:
3045 previoustimes = dict(loadtimes(self._outputdir))
3052 previoustimes = dict(loadtimes(self._outputdir))
3046 sorttests(testdescs, previoustimes, shuffle=self.options.random)
3053 sorttests(testdescs, previoustimes, shuffle=self.options.random)
3047
3054
3048 if 'PYTHONHASHSEED' not in os.environ:
3055 if 'PYTHONHASHSEED' not in os.environ:
3049 # use a random python hash seed all the time
3056 # use a random python hash seed all the time
3050 # we do the randomness ourself to know what seed is used
3057 # we do the randomness ourself to know what seed is used
3051 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
3058 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
3052
3059
3053 # Rayon (Rust crate for multi-threading) will use all logical CPU cores
3060 # Rayon (Rust crate for multi-threading) will use all logical CPU cores
3054 # by default, causing thrashing on high-cpu-count systems.
3061 # by default, causing thrashing on high-cpu-count systems.
3055 # Setting its limit to 3 during tests should still let us uncover
3062 # Setting its limit to 3 during tests should still let us uncover
3056 # multi-threading bugs while keeping the thrashing reasonable.
3063 # multi-threading bugs while keeping the thrashing reasonable.
3057 os.environ.setdefault("RAYON_NUM_THREADS", "3")
3064 os.environ.setdefault("RAYON_NUM_THREADS", "3")
3058
3065
3059 if self.options.tmpdir:
3066 if self.options.tmpdir:
3060 self.options.keep_tmpdir = True
3067 self.options.keep_tmpdir = True
3061 tmpdir = _sys2bytes(self.options.tmpdir)
3068 tmpdir = _sys2bytes(self.options.tmpdir)
3062 if os.path.exists(tmpdir):
3069 if os.path.exists(tmpdir):
3063 # Meaning of tmpdir has changed since 1.3: we used to create
3070 # Meaning of tmpdir has changed since 1.3: we used to create
3064 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
3071 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
3065 # tmpdir already exists.
3072 # tmpdir already exists.
3066 print("error: temp dir %r already exists" % tmpdir)
3073 print("error: temp dir %r already exists" % tmpdir)
3067 return 1
3074 return 1
3068
3075
3069 os.makedirs(tmpdir)
3076 os.makedirs(tmpdir)
3070 else:
3077 else:
3071 d = None
3078 d = None
3072 if os.name == 'nt':
3079 if os.name == 'nt':
3073 # without this, we get the default temp dir location, but
3080 # without this, we get the default temp dir location, but
3074 # in all lowercase, which causes troubles with paths (issue3490)
3081 # in all lowercase, which causes troubles with paths (issue3490)
3075 d = osenvironb.get(b'TMP', None)
3082 d = osenvironb.get(b'TMP', None)
3076 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
3083 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
3077
3084
3078 self._hgtmp = osenvironb[b'HGTMP'] = os.path.realpath(tmpdir)
3085 self._hgtmp = osenvironb[b'HGTMP'] = os.path.realpath(tmpdir)
3079
3086
3080 if self.options.with_hg:
3087 if self.options.with_hg:
3081 self._installdir = None
3088 self._installdir = None
3082 whg = self.options.with_hg
3089 whg = self.options.with_hg
3083 self._bindir = os.path.dirname(os.path.realpath(whg))
3090 self._bindir = os.path.dirname(os.path.realpath(whg))
3084 assert isinstance(self._bindir, bytes)
3091 assert isinstance(self._bindir, bytes)
3085 self._hgcommand = os.path.basename(whg)
3092 self._hgcommand = os.path.basename(whg)
3086 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
3093 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
3087 os.makedirs(self._tmpbindir)
3094 os.makedirs(self._tmpbindir)
3088
3095
3089 normbin = os.path.normpath(os.path.abspath(whg))
3096 normbin = os.path.normpath(os.path.abspath(whg))
3090 normbin = normbin.replace(_sys2bytes(os.sep), b'/')
3097 normbin = normbin.replace(_sys2bytes(os.sep), b'/')
3091
3098
3092 # Other Python scripts in the test harness need to
3099 # Other Python scripts in the test harness need to
3093 # `import mercurial`. If `hg` is a Python script, we assume
3100 # `import mercurial`. If `hg` is a Python script, we assume
3094 # the Mercurial modules are relative to its path and tell the tests
3101 # the Mercurial modules are relative to its path and tell the tests
3095 # to load Python modules from its directory.
3102 # to load Python modules from its directory.
3096 with open(whg, 'rb') as fh:
3103 with open(whg, 'rb') as fh:
3097 initial = fh.read(1024)
3104 initial = fh.read(1024)
3098
3105
3099 if re.match(b'#!.*python', initial):
3106 if re.match(b'#!.*python', initial):
3100 self._pythondir = self._bindir
3107 self._pythondir = self._bindir
3101 # If it looks like our in-repo Rust binary, use the source root.
3108 # If it looks like our in-repo Rust binary, use the source root.
3102 # This is a bit hacky. But rhg is still not supported outside the
3109 # This is a bit hacky. But rhg is still not supported outside the
3103 # source directory. So until it is, do the simple thing.
3110 # source directory. So until it is, do the simple thing.
3104 elif re.search(b'/rust/target/[^/]+/hg', normbin):
3111 elif re.search(b'/rust/target/[^/]+/hg', normbin):
3105 self._pythondir = os.path.dirname(self._testdir)
3112 self._pythondir = os.path.dirname(self._testdir)
3106 # Fall back to the legacy behavior.
3113 # Fall back to the legacy behavior.
3107 else:
3114 else:
3108 self._pythondir = self._bindir
3115 self._pythondir = self._bindir
3109
3116
3110 else:
3117 else:
3111 self._installdir = os.path.join(self._hgtmp, b"install")
3118 self._installdir = os.path.join(self._hgtmp, b"install")
3112 self._bindir = os.path.join(self._installdir, b"bin")
3119 self._bindir = os.path.join(self._installdir, b"bin")
3113 self._hgcommand = b'hg'
3120 self._hgcommand = b'hg'
3114 self._tmpbindir = self._bindir
3121 self._tmpbindir = self._bindir
3115 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
3122 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
3116
3123
3117 # Force the use of hg.exe instead of relying on MSYS to recognize hg is
3124 # Force the use of hg.exe instead of relying on MSYS to recognize hg is
3118 # a python script and feed it to python.exe. Legacy stdio is force
3125 # a python script and feed it to python.exe. Legacy stdio is force
3119 # enabled by hg.exe, and this is a more realistic way to launch hg
3126 # enabled by hg.exe, and this is a more realistic way to launch hg
3120 # anyway.
3127 # anyway.
3121 if os.name == 'nt' and not self._hgcommand.endswith(b'.exe'):
3128 if os.name == 'nt' and not self._hgcommand.endswith(b'.exe'):
3122 self._hgcommand += b'.exe'
3129 self._hgcommand += b'.exe'
3123
3130
3124 # set CHGHG, then replace "hg" command by "chg"
3131 # set CHGHG, then replace "hg" command by "chg"
3125 chgbindir = self._bindir
3132 chgbindir = self._bindir
3126 if self.options.chg or self.options.with_chg:
3133 if self.options.chg or self.options.with_chg:
3127 osenvironb[b'CHGHG'] = os.path.join(self._bindir, self._hgcommand)
3134 osenvironb[b'CHGHG'] = os.path.join(self._bindir, self._hgcommand)
3128 else:
3135 else:
3129 osenvironb.pop(b'CHGHG', None) # drop flag for hghave
3136 osenvironb.pop(b'CHGHG', None) # drop flag for hghave
3130 if self.options.chg:
3137 if self.options.chg:
3131 self._hgcommand = b'chg'
3138 self._hgcommand = b'chg'
3132 elif self.options.with_chg:
3139 elif self.options.with_chg:
3133 chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
3140 chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
3134 self._hgcommand = os.path.basename(self.options.with_chg)
3141 self._hgcommand = os.path.basename(self.options.with_chg)
3135
3142
3136 # configure fallback and replace "hg" command by "rhg"
3143 # configure fallback and replace "hg" command by "rhg"
3137 rhgbindir = self._bindir
3144 rhgbindir = self._bindir
3138 if self.options.rhg or self.options.with_rhg:
3145 if self.options.rhg or self.options.with_rhg:
3139 # Affects hghave.py
3146 # Affects hghave.py
3140 osenvironb[b'RHG_INSTALLED_AS_HG'] = b'1'
3147 osenvironb[b'RHG_INSTALLED_AS_HG'] = b'1'
3141 # Affects configuration. Alternatives would be setting configuration through
3148 # Affects configuration. Alternatives would be setting configuration through
3142 # `$HGRCPATH` but some tests override that, or changing `_hgcommand` to include
3149 # `$HGRCPATH` but some tests override that, or changing `_hgcommand` to include
3143 # `--config` but that disrupts tests that print command lines and check expected
3150 # `--config` but that disrupts tests that print command lines and check expected
3144 # output.
3151 # output.
3145 osenvironb[b'RHG_ON_UNSUPPORTED'] = b'fallback'
3152 osenvironb[b'RHG_ON_UNSUPPORTED'] = b'fallback'
3146 osenvironb[b'RHG_FALLBACK_EXECUTABLE'] = os.path.join(
3153 osenvironb[b'RHG_FALLBACK_EXECUTABLE'] = os.path.join(
3147 self._bindir, self._hgcommand
3154 self._bindir, self._hgcommand
3148 )
3155 )
3149 if self.options.rhg:
3156 if self.options.rhg:
3150 self._hgcommand = b'rhg'
3157 self._hgcommand = b'rhg'
3151 elif self.options.with_rhg:
3158 elif self.options.with_rhg:
3152 rhgbindir = os.path.dirname(os.path.realpath(self.options.with_rhg))
3159 rhgbindir = os.path.dirname(os.path.realpath(self.options.with_rhg))
3153 self._hgcommand = os.path.basename(self.options.with_rhg)
3160 self._hgcommand = os.path.basename(self.options.with_rhg)
3154
3161
3155 osenvironb[b"BINDIR"] = self._bindir
3162 osenvironb[b"BINDIR"] = self._bindir
3156 osenvironb[b"PYTHON"] = PYTHON
3163 osenvironb[b"PYTHON"] = PYTHON
3157
3164
3158 fileb = _sys2bytes(__file__)
3165 fileb = _sys2bytes(__file__)
3159 runtestdir = os.path.abspath(os.path.dirname(fileb))
3166 runtestdir = os.path.abspath(os.path.dirname(fileb))
3160 osenvironb[b'RUNTESTDIR'] = runtestdir
3167 osenvironb[b'RUNTESTDIR'] = runtestdir
3161 if PYTHON3:
3168 if PYTHON3:
3162 sepb = _sys2bytes(os.pathsep)
3169 sepb = _sys2bytes(os.pathsep)
3163 else:
3170 else:
3164 sepb = os.pathsep
3171 sepb = os.pathsep
3165 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
3172 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
3166 if os.path.islink(__file__):
3173 if os.path.islink(__file__):
3167 # test helper will likely be at the end of the symlink
3174 # test helper will likely be at the end of the symlink
3168 realfile = os.path.realpath(fileb)
3175 realfile = os.path.realpath(fileb)
3169 realdir = os.path.abspath(os.path.dirname(realfile))
3176 realdir = os.path.abspath(os.path.dirname(realfile))
3170 path.insert(2, realdir)
3177 path.insert(2, realdir)
3171 if chgbindir != self._bindir:
3178 if chgbindir != self._bindir:
3172 path.insert(1, chgbindir)
3179 path.insert(1, chgbindir)
3173 if rhgbindir != self._bindir:
3180 if rhgbindir != self._bindir:
3174 path.insert(1, rhgbindir)
3181 path.insert(1, rhgbindir)
3175 if self._testdir != runtestdir:
3182 if self._testdir != runtestdir:
3176 path = [self._testdir] + path
3183 path = [self._testdir] + path
3177 if self._tmpbindir != self._bindir:
3184 if self._tmpbindir != self._bindir:
3178 path = [self._tmpbindir] + path
3185 path = [self._tmpbindir] + path
3179 osenvironb[b"PATH"] = sepb.join(path)
3186 osenvironb[b"PATH"] = sepb.join(path)
3180
3187
3181 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
3188 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
3182 # can run .../tests/run-tests.py test-foo where test-foo
3189 # can run .../tests/run-tests.py test-foo where test-foo
3183 # adds an extension to HGRC. Also include run-test.py directory to
3190 # adds an extension to HGRC. Also include run-test.py directory to
3184 # import modules like heredoctest.
3191 # import modules like heredoctest.
3185 pypath = [self._pythondir, self._testdir, runtestdir]
3192 pypath = [self._pythondir, self._testdir, runtestdir]
3186 # We have to augment PYTHONPATH, rather than simply replacing
3193 # We have to augment PYTHONPATH, rather than simply replacing
3187 # it, in case external libraries are only available via current
3194 # it, in case external libraries are only available via current
3188 # PYTHONPATH. (In particular, the Subversion bindings on OS X
3195 # PYTHONPATH. (In particular, the Subversion bindings on OS X
3189 # are in /opt/subversion.)
3196 # are in /opt/subversion.)
3190 oldpypath = osenvironb.get(IMPL_PATH)
3197 oldpypath = osenvironb.get(IMPL_PATH)
3191 if oldpypath:
3198 if oldpypath:
3192 pypath.append(oldpypath)
3199 pypath.append(oldpypath)
3193 osenvironb[IMPL_PATH] = sepb.join(pypath)
3200 osenvironb[IMPL_PATH] = sepb.join(pypath)
3194
3201
3195 if self.options.pure:
3202 if self.options.pure:
3196 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
3203 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
3197 os.environ["HGMODULEPOLICY"] = "py"
3204 os.environ["HGMODULEPOLICY"] = "py"
3198 if self.options.rust:
3205 if self.options.rust:
3199 os.environ["HGMODULEPOLICY"] = "rust+c"
3206 os.environ["HGMODULEPOLICY"] = "rust+c"
3200 if self.options.no_rust:
3207 if self.options.no_rust:
3201 current_policy = os.environ.get("HGMODULEPOLICY", "")
3208 current_policy = os.environ.get("HGMODULEPOLICY", "")
3202 if current_policy.startswith("rust+"):
3209 if current_policy.startswith("rust+"):
3203 os.environ["HGMODULEPOLICY"] = current_policy[len("rust+") :]
3210 os.environ["HGMODULEPOLICY"] = current_policy[len("rust+") :]
3204 os.environ.pop("HGWITHRUSTEXT", None)
3211 os.environ.pop("HGWITHRUSTEXT", None)
3205
3212
3206 if self.options.allow_slow_tests:
3213 if self.options.allow_slow_tests:
3207 os.environ["HGTEST_SLOW"] = "slow"
3214 os.environ["HGTEST_SLOW"] = "slow"
3208 elif 'HGTEST_SLOW' in os.environ:
3215 elif 'HGTEST_SLOW' in os.environ:
3209 del os.environ['HGTEST_SLOW']
3216 del os.environ['HGTEST_SLOW']
3210
3217
3211 self._coveragefile = os.path.join(self._testdir, b'.coverage')
3218 self._coveragefile = os.path.join(self._testdir, b'.coverage')
3212
3219
3213 if self.options.exceptions:
3220 if self.options.exceptions:
3214 exceptionsdir = os.path.join(self._outputdir, b'exceptions')
3221 exceptionsdir = os.path.join(self._outputdir, b'exceptions')
3215 try:
3222 try:
3216 os.makedirs(exceptionsdir)
3223 os.makedirs(exceptionsdir)
3217 except OSError as e:
3224 except OSError as e:
3218 if e.errno != errno.EEXIST:
3225 if e.errno != errno.EEXIST:
3219 raise
3226 raise
3220
3227
3221 # Remove all existing exception reports.
3228 # Remove all existing exception reports.
3222 for f in os.listdir(exceptionsdir):
3229 for f in os.listdir(exceptionsdir):
3223 os.unlink(os.path.join(exceptionsdir, f))
3230 os.unlink(os.path.join(exceptionsdir, f))
3224
3231
3225 osenvironb[b'HGEXCEPTIONSDIR'] = exceptionsdir
3232 osenvironb[b'HGEXCEPTIONSDIR'] = exceptionsdir
3226 logexceptions = os.path.join(self._testdir, b'logexceptions.py')
3233 logexceptions = os.path.join(self._testdir, b'logexceptions.py')
3227 self.options.extra_config_opt.append(
3234 self.options.extra_config_opt.append(
3228 'extensions.logexceptions=%s' % logexceptions.decode('utf-8')
3235 'extensions.logexceptions=%s' % logexceptions.decode('utf-8')
3229 )
3236 )
3230
3237
3231 vlog("# Using TESTDIR", _bytes2sys(self._testdir))
3238 vlog("# Using TESTDIR", _bytes2sys(self._testdir))
3232 vlog("# Using RUNTESTDIR", _bytes2sys(osenvironb[b'RUNTESTDIR']))
3239 vlog("# Using RUNTESTDIR", _bytes2sys(osenvironb[b'RUNTESTDIR']))
3233 vlog("# Using HGTMP", _bytes2sys(self._hgtmp))
3240 vlog("# Using HGTMP", _bytes2sys(self._hgtmp))
3234 vlog("# Using PATH", os.environ["PATH"])
3241 vlog("# Using PATH", os.environ["PATH"])
3235 vlog(
3242 vlog(
3236 "# Using",
3243 "# Using",
3237 _bytes2sys(IMPL_PATH),
3244 _bytes2sys(IMPL_PATH),
3238 _bytes2sys(osenvironb[IMPL_PATH]),
3245 _bytes2sys(osenvironb[IMPL_PATH]),
3239 )
3246 )
3240 vlog("# Writing to directory", _bytes2sys(self._outputdir))
3247 vlog("# Writing to directory", _bytes2sys(self._outputdir))
3241
3248
3242 try:
3249 try:
3243 return self._runtests(testdescs) or 0
3250 return self._runtests(testdescs) or 0
3244 finally:
3251 finally:
3245 time.sleep(0.1)
3252 time.sleep(0.1)
3246 self._cleanup()
3253 self._cleanup()
3247
3254
3248 def findtests(self, args):
3255 def findtests(self, args):
3249 """Finds possible test files from arguments.
3256 """Finds possible test files from arguments.
3250
3257
3251 If you wish to inject custom tests into the test harness, this would
3258 If you wish to inject custom tests into the test harness, this would
3252 be a good function to monkeypatch or override in a derived class.
3259 be a good function to monkeypatch or override in a derived class.
3253 """
3260 """
3254 if not args:
3261 if not args:
3255 if self.options.changed:
3262 if self.options.changed:
3256 proc = Popen4(
3263 proc = Popen4(
3257 b'hg st --rev "%s" -man0 .'
3264 b'hg st --rev "%s" -man0 .'
3258 % _sys2bytes(self.options.changed),
3265 % _sys2bytes(self.options.changed),
3259 None,
3266 None,
3260 0,
3267 0,
3261 )
3268 )
3262 stdout, stderr = proc.communicate()
3269 stdout, stderr = proc.communicate()
3263 args = stdout.strip(b'\0').split(b'\0')
3270 args = stdout.strip(b'\0').split(b'\0')
3264 else:
3271 else:
3265 args = os.listdir(b'.')
3272 args = os.listdir(b'.')
3266
3273
3267 expanded_args = []
3274 expanded_args = []
3268 for arg in args:
3275 for arg in args:
3269 if os.path.isdir(arg):
3276 if os.path.isdir(arg):
3270 if not arg.endswith(b'/'):
3277 if not arg.endswith(b'/'):
3271 arg += b'/'
3278 arg += b'/'
3272 expanded_args.extend([arg + a for a in os.listdir(arg)])
3279 expanded_args.extend([arg + a for a in os.listdir(arg)])
3273 else:
3280 else:
3274 expanded_args.append(arg)
3281 expanded_args.append(arg)
3275 args = expanded_args
3282 args = expanded_args
3276
3283
3277 testcasepattern = re.compile(br'([\w-]+\.t|py)(?:#([a-zA-Z0-9_\-.#]+))')
3284 testcasepattern = re.compile(br'([\w-]+\.t|py)(?:#([a-zA-Z0-9_\-.#]+))')
3278 tests = []
3285 tests = []
3279 for t in args:
3286 for t in args:
3280 case = []
3287 case = []
3281
3288
3282 if not (
3289 if not (
3283 os.path.basename(t).startswith(b'test-')
3290 os.path.basename(t).startswith(b'test-')
3284 and (t.endswith(b'.py') or t.endswith(b'.t'))
3291 and (t.endswith(b'.py') or t.endswith(b'.t'))
3285 ):
3292 ):
3286
3293
3287 m = testcasepattern.match(os.path.basename(t))
3294 m = testcasepattern.match(os.path.basename(t))
3288 if m is not None:
3295 if m is not None:
3289 t_basename, casestr = m.groups()
3296 t_basename, casestr = m.groups()
3290 t = os.path.join(os.path.dirname(t), t_basename)
3297 t = os.path.join(os.path.dirname(t), t_basename)
3291 if casestr:
3298 if casestr:
3292 case = casestr.split(b'#')
3299 case = casestr.split(b'#')
3293 else:
3300 else:
3294 continue
3301 continue
3295
3302
3296 if t.endswith(b'.t'):
3303 if t.endswith(b'.t'):
3297 # .t file may contain multiple test cases
3304 # .t file may contain multiple test cases
3298 casedimensions = parsettestcases(t)
3305 casedimensions = parsettestcases(t)
3299 if casedimensions:
3306 if casedimensions:
3300 cases = []
3307 cases = []
3301
3308
3302 def addcases(case, casedimensions):
3309 def addcases(case, casedimensions):
3303 if not casedimensions:
3310 if not casedimensions:
3304 cases.append(case)
3311 cases.append(case)
3305 else:
3312 else:
3306 for c in casedimensions[0]:
3313 for c in casedimensions[0]:
3307 addcases(case + [c], casedimensions[1:])
3314 addcases(case + [c], casedimensions[1:])
3308
3315
3309 addcases([], casedimensions)
3316 addcases([], casedimensions)
3310 if case and case in cases:
3317 if case and case in cases:
3311 cases = [case]
3318 cases = [case]
3312 elif case:
3319 elif case:
3313 # Ignore invalid cases
3320 # Ignore invalid cases
3314 cases = []
3321 cases = []
3315 else:
3322 else:
3316 pass
3323 pass
3317 tests += [{'path': t, 'case': c} for c in sorted(cases)]
3324 tests += [{'path': t, 'case': c} for c in sorted(cases)]
3318 else:
3325 else:
3319 tests.append({'path': t})
3326 tests.append({'path': t})
3320 else:
3327 else:
3321 tests.append({'path': t})
3328 tests.append({'path': t})
3322
3329
3323 if self.options.retest:
3330 if self.options.retest:
3324 retest_args = []
3331 retest_args = []
3325 for test in tests:
3332 for test in tests:
3326 errpath = self._geterrpath(test)
3333 errpath = self._geterrpath(test)
3327 if os.path.exists(errpath):
3334 if os.path.exists(errpath):
3328 retest_args.append(test)
3335 retest_args.append(test)
3329 tests = retest_args
3336 tests = retest_args
3330 return tests
3337 return tests
3331
3338
3332 def _runtests(self, testdescs):
3339 def _runtests(self, testdescs):
3333 def _reloadtest(test, i):
3340 def _reloadtest(test, i):
3334 # convert a test back to its description dict
3341 # convert a test back to its description dict
3335 desc = {'path': test.path}
3342 desc = {'path': test.path}
3336 case = getattr(test, '_case', [])
3343 case = getattr(test, '_case', [])
3337 if case:
3344 if case:
3338 desc['case'] = case
3345 desc['case'] = case
3339 return self._gettest(desc, i)
3346 return self._gettest(desc, i)
3340
3347
3341 try:
3348 try:
3342 if self.options.restart:
3349 if self.options.restart:
3343 orig = list(testdescs)
3350 orig = list(testdescs)
3344 while testdescs:
3351 while testdescs:
3345 desc = testdescs[0]
3352 desc = testdescs[0]
3346 errpath = self._geterrpath(desc)
3353 errpath = self._geterrpath(desc)
3347 if os.path.exists(errpath):
3354 if os.path.exists(errpath):
3348 break
3355 break
3349 testdescs.pop(0)
3356 testdescs.pop(0)
3350 if not testdescs:
3357 if not testdescs:
3351 print("running all tests")
3358 print("running all tests")
3352 testdescs = orig
3359 testdescs = orig
3353
3360
3354 tests = [self._gettest(d, i) for i, d in enumerate(testdescs)]
3361 tests = [self._gettest(d, i) for i, d in enumerate(testdescs)]
3355 num_tests = len(tests) * self.options.runs_per_test
3362 num_tests = len(tests) * self.options.runs_per_test
3356
3363
3357 jobs = min(num_tests, self.options.jobs)
3364 jobs = min(num_tests, self.options.jobs)
3358
3365
3359 failed = False
3366 failed = False
3360 kws = self.options.keywords
3367 kws = self.options.keywords
3361 if kws is not None and PYTHON3:
3368 if kws is not None and PYTHON3:
3362 kws = kws.encode('utf-8')
3369 kws = kws.encode('utf-8')
3363
3370
3364 suite = TestSuite(
3371 suite = TestSuite(
3365 self._testdir,
3372 self._testdir,
3366 jobs=jobs,
3373 jobs=jobs,
3367 whitelist=self.options.whitelisted,
3374 whitelist=self.options.whitelisted,
3368 blacklist=self.options.blacklist,
3375 blacklist=self.options.blacklist,
3369 keywords=kws,
3376 keywords=kws,
3370 loop=self.options.loop,
3377 loop=self.options.loop,
3371 runs_per_test=self.options.runs_per_test,
3378 runs_per_test=self.options.runs_per_test,
3372 showchannels=self.options.showchannels,
3379 showchannels=self.options.showchannels,
3373 tests=tests,
3380 tests=tests,
3374 loadtest=_reloadtest,
3381 loadtest=_reloadtest,
3375 )
3382 )
3376 verbosity = 1
3383 verbosity = 1
3377 if self.options.list_tests:
3384 if self.options.list_tests:
3378 verbosity = 0
3385 verbosity = 0
3379 elif self.options.verbose:
3386 elif self.options.verbose:
3380 verbosity = 2
3387 verbosity = 2
3381 runner = TextTestRunner(self, verbosity=verbosity)
3388 runner = TextTestRunner(self, verbosity=verbosity)
3382
3389
3383 if self.options.list_tests:
3390 if self.options.list_tests:
3384 result = runner.listtests(suite)
3391 result = runner.listtests(suite)
3385 else:
3392 else:
3386 if self._installdir:
3393 if self._installdir:
3387 self._installhg()
3394 self._installhg()
3388 self._checkhglib("Testing")
3395 self._checkhglib("Testing")
3389 else:
3396 else:
3390 self._usecorrectpython()
3397 self._usecorrectpython()
3391 if self.options.chg:
3398 if self.options.chg:
3392 assert self._installdir
3399 assert self._installdir
3393 self._installchg()
3400 self._installchg()
3394 if self.options.rhg:
3401 if self.options.rhg:
3395 assert self._installdir
3402 assert self._installdir
3396 self._installrhg()
3403 self._installrhg()
3397
3404
3398 log(
3405 log(
3399 'running %d tests using %d parallel processes'
3406 'running %d tests using %d parallel processes'
3400 % (num_tests, jobs)
3407 % (num_tests, jobs)
3401 )
3408 )
3402
3409
3403 result = runner.run(suite)
3410 result = runner.run(suite)
3404
3411
3405 if result.failures or result.errors:
3412 if result.failures or result.errors:
3406 failed = True
3413 failed = True
3407
3414
3408 result.onEnd()
3415 result.onEnd()
3409
3416
3410 if self.options.anycoverage:
3417 if self.options.anycoverage:
3411 self._outputcoverage()
3418 self._outputcoverage()
3412 except KeyboardInterrupt:
3419 except KeyboardInterrupt:
3413 failed = True
3420 failed = True
3414 print("\ninterrupted!")
3421 print("\ninterrupted!")
3415
3422
3416 if failed:
3423 if failed:
3417 return 1
3424 return 1
3418
3425
3419 def _geterrpath(self, test):
3426 def _geterrpath(self, test):
3420 # test['path'] is a relative path
3427 # test['path'] is a relative path
3421 if 'case' in test:
3428 if 'case' in test:
3422 # for multiple dimensions test cases
3429 # for multiple dimensions test cases
3423 casestr = b'#'.join(test['case'])
3430 casestr = b'#'.join(test['case'])
3424 errpath = b'%s#%s.err' % (test['path'], casestr)
3431 errpath = b'%s#%s.err' % (test['path'], casestr)
3425 else:
3432 else:
3426 errpath = b'%s.err' % test['path']
3433 errpath = b'%s.err' % test['path']
3427 if self.options.outputdir:
3434 if self.options.outputdir:
3428 self._outputdir = canonpath(_sys2bytes(self.options.outputdir))
3435 self._outputdir = canonpath(_sys2bytes(self.options.outputdir))
3429 errpath = os.path.join(self._outputdir, errpath)
3436 errpath = os.path.join(self._outputdir, errpath)
3430 return errpath
3437 return errpath
3431
3438
3432 def _getport(self, count):
3439 def _getport(self, count):
3433 port = self._ports.get(count) # do we have a cached entry?
3440 port = self._ports.get(count) # do we have a cached entry?
3434 if port is None:
3441 if port is None:
3435 portneeded = 3
3442 portneeded = 3
3436 # above 100 tries we just give up and let test reports failure
3443 # above 100 tries we just give up and let test reports failure
3437 for tries in xrange(100):
3444 for tries in xrange(100):
3438 allfree = True
3445 allfree = True
3439 port = self.options.port + self._portoffset
3446 port = self.options.port + self._portoffset
3440 for idx in xrange(portneeded):
3447 for idx in xrange(portneeded):
3441 if not checkportisavailable(port + idx):
3448 if not checkportisavailable(port + idx):
3442 allfree = False
3449 allfree = False
3443 break
3450 break
3444 self._portoffset += portneeded
3451 self._portoffset += portneeded
3445 if allfree:
3452 if allfree:
3446 break
3453 break
3447 self._ports[count] = port
3454 self._ports[count] = port
3448 return port
3455 return port
3449
3456
3450 def _gettest(self, testdesc, count):
3457 def _gettest(self, testdesc, count):
3451 """Obtain a Test by looking at its filename.
3458 """Obtain a Test by looking at its filename.
3452
3459
3453 Returns a Test instance. The Test may not be runnable if it doesn't
3460 Returns a Test instance. The Test may not be runnable if it doesn't
3454 map to a known type.
3461 map to a known type.
3455 """
3462 """
3456 path = testdesc['path']
3463 path = testdesc['path']
3457 lctest = path.lower()
3464 lctest = path.lower()
3458 testcls = Test
3465 testcls = Test
3459
3466
3460 for ext, cls in self.TESTTYPES:
3467 for ext, cls in self.TESTTYPES:
3461 if lctest.endswith(ext):
3468 if lctest.endswith(ext):
3462 testcls = cls
3469 testcls = cls
3463 break
3470 break
3464
3471
3465 refpath = os.path.join(getcwdb(), path)
3472 refpath = os.path.join(getcwdb(), path)
3466 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
3473 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
3467
3474
3468 # extra keyword parameters. 'case' is used by .t tests
3475 # extra keyword parameters. 'case' is used by .t tests
3469 kwds = {k: testdesc[k] for k in ['case'] if k in testdesc}
3476 kwds = {k: testdesc[k] for k in ['case'] if k in testdesc}
3470
3477
3471 t = testcls(
3478 t = testcls(
3472 refpath,
3479 refpath,
3473 self._outputdir,
3480 self._outputdir,
3474 tmpdir,
3481 tmpdir,
3475 keeptmpdir=self.options.keep_tmpdir,
3482 keeptmpdir=self.options.keep_tmpdir,
3476 debug=self.options.debug,
3483 debug=self.options.debug,
3477 first=self.options.first,
3484 first=self.options.first,
3478 timeout=self.options.timeout,
3485 timeout=self.options.timeout,
3479 startport=self._getport(count),
3486 startport=self._getport(count),
3480 extraconfigopts=self.options.extra_config_opt,
3487 extraconfigopts=self.options.extra_config_opt,
3481 shell=self.options.shell,
3488 shell=self.options.shell,
3482 hgcommand=self._hgcommand,
3489 hgcommand=self._hgcommand,
3483 usechg=bool(self.options.with_chg or self.options.chg),
3490 usechg=bool(self.options.with_chg or self.options.chg),
3484 chgdebug=self.options.chg_debug,
3491 chgdebug=self.options.chg_debug,
3485 useipv6=useipv6,
3492 useipv6=useipv6,
3486 **kwds
3493 **kwds
3487 )
3494 )
3488 t.should_reload = True
3495 t.should_reload = True
3489 return t
3496 return t
3490
3497
3491 def _cleanup(self):
3498 def _cleanup(self):
3492 """Clean up state from this test invocation."""
3499 """Clean up state from this test invocation."""
3493 if self.options.keep_tmpdir:
3500 if self.options.keep_tmpdir:
3494 return
3501 return
3495
3502
3496 vlog("# Cleaning up HGTMP", _bytes2sys(self._hgtmp))
3503 vlog("# Cleaning up HGTMP", _bytes2sys(self._hgtmp))
3497 shutil.rmtree(self._hgtmp, True)
3504 shutil.rmtree(self._hgtmp, True)
3498 for f in self._createdfiles:
3505 for f in self._createdfiles:
3499 try:
3506 try:
3500 os.remove(f)
3507 os.remove(f)
3501 except OSError:
3508 except OSError:
3502 pass
3509 pass
3503
3510
3504 def _usecorrectpython(self):
3511 def _usecorrectpython(self):
3505 """Configure the environment to use the appropriate Python in tests."""
3512 """Configure the environment to use the appropriate Python in tests."""
3506 # Tests must use the same interpreter as us or bad things will happen.
3513 # Tests must use the same interpreter as us or bad things will happen.
3507 pyexename = sys.platform == 'win32' and b'python.exe' or b'python3'
3514 pyexename = sys.platform == 'win32' and b'python.exe' or b'python3'
3508
3515
3509 # os.symlink() is a thing with py3 on Windows, but it requires
3516 # os.symlink() is a thing with py3 on Windows, but it requires
3510 # Administrator rights.
3517 # Administrator rights.
3511 if getattr(os, 'symlink', None) and os.name != 'nt':
3518 if getattr(os, 'symlink', None) and os.name != 'nt':
3512 vlog(
3519 vlog(
3513 "# Making python executable in test path a symlink to '%s'"
3520 "# Making python executable in test path a symlink to '%s'"
3514 % sysexecutable
3521 % sysexecutable
3515 )
3522 )
3516 mypython = os.path.join(self._tmpbindir, pyexename)
3523 mypython = os.path.join(self._tmpbindir, pyexename)
3517 try:
3524 try:
3518 if os.readlink(mypython) == sysexecutable:
3525 if os.readlink(mypython) == sysexecutable:
3519 return
3526 return
3520 os.unlink(mypython)
3527 os.unlink(mypython)
3521 except OSError as err:
3528 except OSError as err:
3522 if err.errno != errno.ENOENT:
3529 if err.errno != errno.ENOENT:
3523 raise
3530 raise
3524 if self._findprogram(pyexename) != sysexecutable:
3531 if self._findprogram(pyexename) != sysexecutable:
3525 try:
3532 try:
3526 os.symlink(sysexecutable, mypython)
3533 os.symlink(sysexecutable, mypython)
3527 self._createdfiles.append(mypython)
3534 self._createdfiles.append(mypython)
3528 except OSError as err:
3535 except OSError as err:
3529 # child processes may race, which is harmless
3536 # child processes may race, which is harmless
3530 if err.errno != errno.EEXIST:
3537 if err.errno != errno.EEXIST:
3531 raise
3538 raise
3532 else:
3539 else:
3533 # Windows doesn't have `python3.exe`, and MSYS cannot understand the
3540 # Windows doesn't have `python3.exe`, and MSYS cannot understand the
3534 # reparse point with that name provided by Microsoft. Create a
3541 # reparse point with that name provided by Microsoft. Create a
3535 # simple script on PATH with that name that delegates to the py3
3542 # simple script on PATH with that name that delegates to the py3
3536 # launcher so the shebang lines work.
3543 # launcher so the shebang lines work.
3537 if os.getenv('MSYSTEM'):
3544 if os.getenv('MSYSTEM'):
3538 with open(osenvironb[b'RUNTESTDIR'] + b'/python3', 'wb') as f:
3545 with open(osenvironb[b'RUNTESTDIR'] + b'/python3', 'wb') as f:
3539 f.write(b'#!/bin/sh\n')
3546 f.write(b'#!/bin/sh\n')
3540 f.write(b'py -3.%d "$@"\n' % sys.version_info[1])
3547 f.write(b'py -3.%d "$@"\n' % sys.version_info[1])
3541
3548
3542 exedir, exename = os.path.split(sysexecutable)
3549 exedir, exename = os.path.split(sysexecutable)
3543 vlog(
3550 vlog(
3544 "# Modifying search path to find %s as %s in '%s'"
3551 "# Modifying search path to find %s as %s in '%s'"
3545 % (exename, pyexename, exedir)
3552 % (exename, pyexename, exedir)
3546 )
3553 )
3547 path = os.environ['PATH'].split(os.pathsep)
3554 path = os.environ['PATH'].split(os.pathsep)
3548 while exedir in path:
3555 while exedir in path:
3549 path.remove(exedir)
3556 path.remove(exedir)
3550
3557
3551 # Binaries installed by pip into the user area like pylint.exe may
3558 # Binaries installed by pip into the user area like pylint.exe may
3552 # not be in PATH by default.
3559 # not be in PATH by default.
3553 extra_paths = [exedir]
3560 extra_paths = [exedir]
3554 vi = sys.version_info
3561 vi = sys.version_info
3555 if 'APPDATA' in os.environ:
3562 if 'APPDATA' in os.environ:
3556 scripts_dir = os.path.join(
3563 scripts_dir = os.path.join(
3557 os.environ['APPDATA'],
3564 os.environ['APPDATA'],
3558 'Python',
3565 'Python',
3559 'Python%d%d' % (vi[0], vi[1]),
3566 'Python%d%d' % (vi[0], vi[1]),
3560 'Scripts',
3567 'Scripts',
3561 )
3568 )
3562
3569
3563 if vi.major == 2:
3570 if vi.major == 2:
3564 scripts_dir = os.path.join(
3571 scripts_dir = os.path.join(
3565 os.environ['APPDATA'],
3572 os.environ['APPDATA'],
3566 'Python',
3573 'Python',
3567 'Scripts',
3574 'Scripts',
3568 )
3575 )
3569
3576
3570 extra_paths.append(scripts_dir)
3577 extra_paths.append(scripts_dir)
3571
3578
3572 os.environ['PATH'] = os.pathsep.join(extra_paths + path)
3579 os.environ['PATH'] = os.pathsep.join(extra_paths + path)
3573 if not self._findprogram(pyexename):
3580 if not self._findprogram(pyexename):
3574 print("WARNING: Cannot find %s in search path" % pyexename)
3581 print("WARNING: Cannot find %s in search path" % pyexename)
3575
3582
3576 def _installhg(self):
3583 def _installhg(self):
3577 """Install hg into the test environment.
3584 """Install hg into the test environment.
3578
3585
3579 This will also configure hg with the appropriate testing settings.
3586 This will also configure hg with the appropriate testing settings.
3580 """
3587 """
3581 vlog("# Performing temporary installation of HG")
3588 vlog("# Performing temporary installation of HG")
3582 installerrs = os.path.join(self._hgtmp, b"install.err")
3589 installerrs = os.path.join(self._hgtmp, b"install.err")
3583 compiler = ''
3590 compiler = ''
3584 if self.options.compiler:
3591 if self.options.compiler:
3585 compiler = '--compiler ' + self.options.compiler
3592 compiler = '--compiler ' + self.options.compiler
3586 setup_opts = b""
3593 setup_opts = b""
3587 if self.options.pure:
3594 if self.options.pure:
3588 setup_opts = b"--pure"
3595 setup_opts = b"--pure"
3589 elif self.options.rust:
3596 elif self.options.rust:
3590 setup_opts = b"--rust"
3597 setup_opts = b"--rust"
3591 elif self.options.no_rust:
3598 elif self.options.no_rust:
3592 setup_opts = b"--no-rust"
3599 setup_opts = b"--no-rust"
3593
3600
3594 # Run installer in hg root
3601 # Run installer in hg root
3595 script = os.path.realpath(sys.argv[0])
3602 script = os.path.realpath(sys.argv[0])
3596 exe = sysexecutable
3603 exe = sysexecutable
3597 if PYTHON3:
3604 if PYTHON3:
3598 compiler = _sys2bytes(compiler)
3605 compiler = _sys2bytes(compiler)
3599 script = _sys2bytes(script)
3606 script = _sys2bytes(script)
3600 exe = _sys2bytes(exe)
3607 exe = _sys2bytes(exe)
3601 hgroot = os.path.dirname(os.path.dirname(script))
3608 hgroot = os.path.dirname(os.path.dirname(script))
3602 self._hgroot = hgroot
3609 self._hgroot = hgroot
3603 os.chdir(hgroot)
3610 os.chdir(hgroot)
3604 nohome = b'--home=""'
3611 nohome = b'--home=""'
3605 if os.name == 'nt':
3612 if os.name == 'nt':
3606 # The --home="" trick works only on OS where os.sep == '/'
3613 # The --home="" trick works only on OS where os.sep == '/'
3607 # because of a distutils convert_path() fast-path. Avoid it at
3614 # because of a distutils convert_path() fast-path. Avoid it at
3608 # least on Windows for now, deal with .pydistutils.cfg bugs
3615 # least on Windows for now, deal with .pydistutils.cfg bugs
3609 # when they happen.
3616 # when they happen.
3610 nohome = b''
3617 nohome = b''
3611 cmd = (
3618 cmd = (
3612 b'"%(exe)s" setup.py %(setup_opts)s clean --all'
3619 b'"%(exe)s" setup.py %(setup_opts)s clean --all'
3613 b' build %(compiler)s --build-base="%(base)s"'
3620 b' build %(compiler)s --build-base="%(base)s"'
3614 b' install --force --prefix="%(prefix)s"'
3621 b' install --force --prefix="%(prefix)s"'
3615 b' --install-lib="%(libdir)s"'
3622 b' --install-lib="%(libdir)s"'
3616 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
3623 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
3617 % {
3624 % {
3618 b'exe': exe,
3625 b'exe': exe,
3619 b'setup_opts': setup_opts,
3626 b'setup_opts': setup_opts,
3620 b'compiler': compiler,
3627 b'compiler': compiler,
3621 b'base': os.path.join(self._hgtmp, b"build"),
3628 b'base': os.path.join(self._hgtmp, b"build"),
3622 b'prefix': self._installdir,
3629 b'prefix': self._installdir,
3623 b'libdir': self._pythondir,
3630 b'libdir': self._pythondir,
3624 b'bindir': self._bindir,
3631 b'bindir': self._bindir,
3625 b'nohome': nohome,
3632 b'nohome': nohome,
3626 b'logfile': installerrs,
3633 b'logfile': installerrs,
3627 }
3634 }
3628 )
3635 )
3629
3636
3630 # setuptools requires install directories to exist.
3637 # setuptools requires install directories to exist.
3631 def makedirs(p):
3638 def makedirs(p):
3632 try:
3639 try:
3633 os.makedirs(p)
3640 os.makedirs(p)
3634 except OSError as e:
3641 except OSError as e:
3635 if e.errno != errno.EEXIST:
3642 if e.errno != errno.EEXIST:
3636 raise
3643 raise
3637
3644
3638 makedirs(self._pythondir)
3645 makedirs(self._pythondir)
3639 makedirs(self._bindir)
3646 makedirs(self._bindir)
3640
3647
3641 vlog("# Running", cmd.decode("utf-8"))
3648 vlog("# Running", cmd.decode("utf-8"))
3642 if subprocess.call(_bytes2sys(cmd), shell=True) == 0:
3649 if subprocess.call(_bytes2sys(cmd), shell=True) == 0:
3643 if not self.options.verbose:
3650 if not self.options.verbose:
3644 try:
3651 try:
3645 os.remove(installerrs)
3652 os.remove(installerrs)
3646 except OSError as e:
3653 except OSError as e:
3647 if e.errno != errno.ENOENT:
3654 if e.errno != errno.ENOENT:
3648 raise
3655 raise
3649 else:
3656 else:
3650 with open(installerrs, 'rb') as f:
3657 with open(installerrs, 'rb') as f:
3651 for line in f:
3658 for line in f:
3652 if PYTHON3:
3659 if PYTHON3:
3653 sys.stdout.buffer.write(line)
3660 sys.stdout.buffer.write(line)
3654 else:
3661 else:
3655 sys.stdout.write(line)
3662 sys.stdout.write(line)
3656 sys.exit(1)
3663 sys.exit(1)
3657 os.chdir(self._testdir)
3664 os.chdir(self._testdir)
3658
3665
3659 self._usecorrectpython()
3666 self._usecorrectpython()
3660
3667
3661 hgbat = os.path.join(self._bindir, b'hg.bat')
3668 hgbat = os.path.join(self._bindir, b'hg.bat')
3662 if os.path.isfile(hgbat):
3669 if os.path.isfile(hgbat):
3663 # hg.bat expects to be put in bin/scripts while run-tests.py
3670 # hg.bat expects to be put in bin/scripts while run-tests.py
3664 # installation layout put it in bin/ directly. Fix it
3671 # installation layout put it in bin/ directly. Fix it
3665 with open(hgbat, 'rb') as f:
3672 with open(hgbat, 'rb') as f:
3666 data = f.read()
3673 data = f.read()
3667 if br'"%~dp0..\python" "%~dp0hg" %*' in data:
3674 if br'"%~dp0..\python" "%~dp0hg" %*' in data:
3668 data = data.replace(
3675 data = data.replace(
3669 br'"%~dp0..\python" "%~dp0hg" %*',
3676 br'"%~dp0..\python" "%~dp0hg" %*',
3670 b'"%~dp0python" "%~dp0hg" %*',
3677 b'"%~dp0python" "%~dp0hg" %*',
3671 )
3678 )
3672 with open(hgbat, 'wb') as f:
3679 with open(hgbat, 'wb') as f:
3673 f.write(data)
3680 f.write(data)
3674 else:
3681 else:
3675 print('WARNING: cannot fix hg.bat reference to python.exe')
3682 print('WARNING: cannot fix hg.bat reference to python.exe')
3676
3683
3677 if self.options.anycoverage:
3684 if self.options.anycoverage:
3678 custom = os.path.join(
3685 custom = os.path.join(
3679 osenvironb[b'RUNTESTDIR'], b'sitecustomize.py'
3686 osenvironb[b'RUNTESTDIR'], b'sitecustomize.py'
3680 )
3687 )
3681 target = os.path.join(self._pythondir, b'sitecustomize.py')
3688 target = os.path.join(self._pythondir, b'sitecustomize.py')
3682 vlog('# Installing coverage trigger to %s' % target)
3689 vlog('# Installing coverage trigger to %s' % target)
3683 shutil.copyfile(custom, target)
3690 shutil.copyfile(custom, target)
3684 rc = os.path.join(self._testdir, b'.coveragerc')
3691 rc = os.path.join(self._testdir, b'.coveragerc')
3685 vlog('# Installing coverage rc to %s' % rc)
3692 vlog('# Installing coverage rc to %s' % rc)
3686 osenvironb[b'COVERAGE_PROCESS_START'] = rc
3693 osenvironb[b'COVERAGE_PROCESS_START'] = rc
3687 covdir = os.path.join(self._installdir, b'..', b'coverage')
3694 covdir = os.path.join(self._installdir, b'..', b'coverage')
3688 try:
3695 try:
3689 os.mkdir(covdir)
3696 os.mkdir(covdir)
3690 except OSError as e:
3697 except OSError as e:
3691 if e.errno != errno.EEXIST:
3698 if e.errno != errno.EEXIST:
3692 raise
3699 raise
3693
3700
3694 osenvironb[b'COVERAGE_DIR'] = covdir
3701 osenvironb[b'COVERAGE_DIR'] = covdir
3695
3702
3696 def _checkhglib(self, verb):
3703 def _checkhglib(self, verb):
3697 """Ensure that the 'mercurial' package imported by python is
3704 """Ensure that the 'mercurial' package imported by python is
3698 the one we expect it to be. If not, print a warning to stderr."""
3705 the one we expect it to be. If not, print a warning to stderr."""
3699 if (self._bindir == self._pythondir) and (
3706 if (self._bindir == self._pythondir) and (
3700 self._bindir != self._tmpbindir
3707 self._bindir != self._tmpbindir
3701 ):
3708 ):
3702 # The pythondir has been inferred from --with-hg flag.
3709 # The pythondir has been inferred from --with-hg flag.
3703 # We cannot expect anything sensible here.
3710 # We cannot expect anything sensible here.
3704 return
3711 return
3705 expecthg = os.path.join(self._pythondir, b'mercurial')
3712 expecthg = os.path.join(self._pythondir, b'mercurial')
3706 actualhg = self._gethgpath()
3713 actualhg = self._gethgpath()
3707 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
3714 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
3708 sys.stderr.write(
3715 sys.stderr.write(
3709 'warning: %s with unexpected mercurial lib: %s\n'
3716 'warning: %s with unexpected mercurial lib: %s\n'
3710 ' (expected %s)\n' % (verb, actualhg, expecthg)
3717 ' (expected %s)\n' % (verb, actualhg, expecthg)
3711 )
3718 )
3712
3719
3713 def _gethgpath(self):
3720 def _gethgpath(self):
3714 """Return the path to the mercurial package that is actually found by
3721 """Return the path to the mercurial package that is actually found by
3715 the current Python interpreter."""
3722 the current Python interpreter."""
3716 if self._hgpath is not None:
3723 if self._hgpath is not None:
3717 return self._hgpath
3724 return self._hgpath
3718
3725
3719 cmd = b'"%s" -c "import mercurial; print (mercurial.__path__[0])"'
3726 cmd = b'"%s" -c "import mercurial; print (mercurial.__path__[0])"'
3720 cmd = cmd % PYTHON
3727 cmd = cmd % PYTHON
3721 if PYTHON3:
3728 if PYTHON3:
3722 cmd = _bytes2sys(cmd)
3729 cmd = _bytes2sys(cmd)
3723
3730
3724 p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
3731 p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
3725 out, err = p.communicate()
3732 out, err = p.communicate()
3726
3733
3727 self._hgpath = out.strip()
3734 self._hgpath = out.strip()
3728
3735
3729 return self._hgpath
3736 return self._hgpath
3730
3737
3731 def _installchg(self):
3738 def _installchg(self):
3732 """Install chg into the test environment"""
3739 """Install chg into the test environment"""
3733 vlog('# Performing temporary installation of CHG')
3740 vlog('# Performing temporary installation of CHG')
3734 assert os.path.dirname(self._bindir) == self._installdir
3741 assert os.path.dirname(self._bindir) == self._installdir
3735 assert self._hgroot, 'must be called after _installhg()'
3742 assert self._hgroot, 'must be called after _installhg()'
3736 cmd = b'"%(make)s" clean install PREFIX="%(prefix)s"' % {
3743 cmd = b'"%(make)s" clean install PREFIX="%(prefix)s"' % {
3737 b'make': b'make', # TODO: switch by option or environment?
3744 b'make': b'make', # TODO: switch by option or environment?
3738 b'prefix': self._installdir,
3745 b'prefix': self._installdir,
3739 }
3746 }
3740 cwd = os.path.join(self._hgroot, b'contrib', b'chg')
3747 cwd = os.path.join(self._hgroot, b'contrib', b'chg')
3741 vlog("# Running", cmd)
3748 vlog("# Running", cmd)
3742 proc = subprocess.Popen(
3749 proc = subprocess.Popen(
3743 cmd,
3750 cmd,
3744 shell=True,
3751 shell=True,
3745 cwd=cwd,
3752 cwd=cwd,
3746 stdin=subprocess.PIPE,
3753 stdin=subprocess.PIPE,
3747 stdout=subprocess.PIPE,
3754 stdout=subprocess.PIPE,
3748 stderr=subprocess.STDOUT,
3755 stderr=subprocess.STDOUT,
3749 )
3756 )
3750 out, _err = proc.communicate()
3757 out, _err = proc.communicate()
3751 if proc.returncode != 0:
3758 if proc.returncode != 0:
3752 if PYTHON3:
3759 if PYTHON3:
3753 sys.stdout.buffer.write(out)
3760 sys.stdout.buffer.write(out)
3754 else:
3761 else:
3755 sys.stdout.write(out)
3762 sys.stdout.write(out)
3756 sys.exit(1)
3763 sys.exit(1)
3757
3764
3758 def _installrhg(self):
3765 def _installrhg(self):
3759 """Install rhg into the test environment"""
3766 """Install rhg into the test environment"""
3760 vlog('# Performing temporary installation of rhg')
3767 vlog('# Performing temporary installation of rhg')
3761 assert os.path.dirname(self._bindir) == self._installdir
3768 assert os.path.dirname(self._bindir) == self._installdir
3762 assert self._hgroot, 'must be called after _installhg()'
3769 assert self._hgroot, 'must be called after _installhg()'
3763 cmd = b'"%(make)s" install-rhg PREFIX="%(prefix)s"' % {
3770 cmd = b'"%(make)s" install-rhg PREFIX="%(prefix)s"' % {
3764 b'make': b'make', # TODO: switch by option or environment?
3771 b'make': b'make', # TODO: switch by option or environment?
3765 b'prefix': self._installdir,
3772 b'prefix': self._installdir,
3766 }
3773 }
3767 cwd = self._hgroot
3774 cwd = self._hgroot
3768 vlog("# Running", cmd)
3775 vlog("# Running", cmd)
3769 proc = subprocess.Popen(
3776 proc = subprocess.Popen(
3770 cmd,
3777 cmd,
3771 shell=True,
3778 shell=True,
3772 cwd=cwd,
3779 cwd=cwd,
3773 stdin=subprocess.PIPE,
3780 stdin=subprocess.PIPE,
3774 stdout=subprocess.PIPE,
3781 stdout=subprocess.PIPE,
3775 stderr=subprocess.STDOUT,
3782 stderr=subprocess.STDOUT,
3776 )
3783 )
3777 out, _err = proc.communicate()
3784 out, _err = proc.communicate()
3778 if proc.returncode != 0:
3785 if proc.returncode != 0:
3779 if PYTHON3:
3786 if PYTHON3:
3780 sys.stdout.buffer.write(out)
3787 sys.stdout.buffer.write(out)
3781 else:
3788 else:
3782 sys.stdout.write(out)
3789 sys.stdout.write(out)
3783 sys.exit(1)
3790 sys.exit(1)
3784
3791
3785 def _outputcoverage(self):
3792 def _outputcoverage(self):
3786 """Produce code coverage output."""
3793 """Produce code coverage output."""
3787 import coverage
3794 import coverage
3788
3795
3789 coverage = coverage.coverage
3796 coverage = coverage.coverage
3790
3797
3791 vlog('# Producing coverage report')
3798 vlog('# Producing coverage report')
3792 # chdir is the easiest way to get short, relative paths in the
3799 # chdir is the easiest way to get short, relative paths in the
3793 # output.
3800 # output.
3794 os.chdir(self._hgroot)
3801 os.chdir(self._hgroot)
3795 covdir = os.path.join(_bytes2sys(self._installdir), '..', 'coverage')
3802 covdir = os.path.join(_bytes2sys(self._installdir), '..', 'coverage')
3796 cov = coverage(data_file=os.path.join(covdir, 'cov'))
3803 cov = coverage(data_file=os.path.join(covdir, 'cov'))
3797
3804
3798 # Map install directory paths back to source directory.
3805 # Map install directory paths back to source directory.
3799 cov.config.paths['srcdir'] = ['.', _bytes2sys(self._pythondir)]
3806 cov.config.paths['srcdir'] = ['.', _bytes2sys(self._pythondir)]
3800
3807
3801 cov.combine()
3808 cov.combine()
3802
3809
3803 omit = [
3810 omit = [
3804 _bytes2sys(os.path.join(x, b'*'))
3811 _bytes2sys(os.path.join(x, b'*'))
3805 for x in [self._bindir, self._testdir]
3812 for x in [self._bindir, self._testdir]
3806 ]
3813 ]
3807 cov.report(ignore_errors=True, omit=omit)
3814 cov.report(ignore_errors=True, omit=omit)
3808
3815
3809 if self.options.htmlcov:
3816 if self.options.htmlcov:
3810 htmldir = os.path.join(_bytes2sys(self._outputdir), 'htmlcov')
3817 htmldir = os.path.join(_bytes2sys(self._outputdir), 'htmlcov')
3811 cov.html_report(directory=htmldir, omit=omit)
3818 cov.html_report(directory=htmldir, omit=omit)
3812 if self.options.annotate:
3819 if self.options.annotate:
3813 adir = os.path.join(_bytes2sys(self._outputdir), 'annotated')
3820 adir = os.path.join(_bytes2sys(self._outputdir), 'annotated')
3814 if not os.path.isdir(adir):
3821 if not os.path.isdir(adir):
3815 os.mkdir(adir)
3822 os.mkdir(adir)
3816 cov.annotate(directory=adir, omit=omit)
3823 cov.annotate(directory=adir, omit=omit)
3817
3824
3818 def _findprogram(self, program):
3825 def _findprogram(self, program):
3819 """Search PATH for a executable program"""
3826 """Search PATH for a executable program"""
3820 dpb = _sys2bytes(os.defpath)
3827 dpb = _sys2bytes(os.defpath)
3821 sepb = _sys2bytes(os.pathsep)
3828 sepb = _sys2bytes(os.pathsep)
3822 for p in osenvironb.get(b'PATH', dpb).split(sepb):
3829 for p in osenvironb.get(b'PATH', dpb).split(sepb):
3823 name = os.path.join(p, program)
3830 name = os.path.join(p, program)
3824 if os.name == 'nt' or os.access(name, os.X_OK):
3831 if os.name == 'nt' or os.access(name, os.X_OK):
3825 return _bytes2sys(name)
3832 return _bytes2sys(name)
3826 return None
3833 return None
3827
3834
3828 def _checktools(self):
3835 def _checktools(self):
3829 """Ensure tools required to run tests are present."""
3836 """Ensure tools required to run tests are present."""
3830 for p in self.REQUIREDTOOLS:
3837 for p in self.REQUIREDTOOLS:
3831 if os.name == 'nt' and not p.endswith(b'.exe'):
3838 if os.name == 'nt' and not p.endswith(b'.exe'):
3832 p += b'.exe'
3839 p += b'.exe'
3833 found = self._findprogram(p)
3840 found = self._findprogram(p)
3834 p = p.decode("utf-8")
3841 p = p.decode("utf-8")
3835 if found:
3842 if found:
3836 vlog("# Found prerequisite", p, "at", found)
3843 vlog("# Found prerequisite", p, "at", found)
3837 else:
3844 else:
3838 print("WARNING: Did not find prerequisite tool: %s " % p)
3845 print("WARNING: Did not find prerequisite tool: %s " % p)
3839
3846
3840
3847
3841 def aggregateexceptions(path):
3848 def aggregateexceptions(path):
3842 exceptioncounts = collections.Counter()
3849 exceptioncounts = collections.Counter()
3843 testsbyfailure = collections.defaultdict(set)
3850 testsbyfailure = collections.defaultdict(set)
3844 failuresbytest = collections.defaultdict(set)
3851 failuresbytest = collections.defaultdict(set)
3845
3852
3846 for f in os.listdir(path):
3853 for f in os.listdir(path):
3847 with open(os.path.join(path, f), 'rb') as fh:
3854 with open(os.path.join(path, f), 'rb') as fh:
3848 data = fh.read().split(b'\0')
3855 data = fh.read().split(b'\0')
3849 if len(data) != 5:
3856 if len(data) != 5:
3850 continue
3857 continue
3851
3858
3852 exc, mainframe, hgframe, hgline, testname = data
3859 exc, mainframe, hgframe, hgline, testname = data
3853 exc = exc.decode('utf-8')
3860 exc = exc.decode('utf-8')
3854 mainframe = mainframe.decode('utf-8')
3861 mainframe = mainframe.decode('utf-8')
3855 hgframe = hgframe.decode('utf-8')
3862 hgframe = hgframe.decode('utf-8')
3856 hgline = hgline.decode('utf-8')
3863 hgline = hgline.decode('utf-8')
3857 testname = testname.decode('utf-8')
3864 testname = testname.decode('utf-8')
3858
3865
3859 key = (hgframe, hgline, exc)
3866 key = (hgframe, hgline, exc)
3860 exceptioncounts[key] += 1
3867 exceptioncounts[key] += 1
3861 testsbyfailure[key].add(testname)
3868 testsbyfailure[key].add(testname)
3862 failuresbytest[testname].add(key)
3869 failuresbytest[testname].add(key)
3863
3870
3864 # Find test having fewest failures for each failure.
3871 # Find test having fewest failures for each failure.
3865 leastfailing = {}
3872 leastfailing = {}
3866 for key, tests in testsbyfailure.items():
3873 for key, tests in testsbyfailure.items():
3867 fewesttest = None
3874 fewesttest = None
3868 fewestcount = 99999999
3875 fewestcount = 99999999
3869 for test in sorted(tests):
3876 for test in sorted(tests):
3870 if len(failuresbytest[test]) < fewestcount:
3877 if len(failuresbytest[test]) < fewestcount:
3871 fewesttest = test
3878 fewesttest = test
3872 fewestcount = len(failuresbytest[test])
3879 fewestcount = len(failuresbytest[test])
3873
3880
3874 leastfailing[key] = (fewestcount, fewesttest)
3881 leastfailing[key] = (fewestcount, fewesttest)
3875
3882
3876 # Create a combined counter so we can sort by total occurrences and
3883 # Create a combined counter so we can sort by total occurrences and
3877 # impacted tests.
3884 # impacted tests.
3878 combined = {}
3885 combined = {}
3879 for key in exceptioncounts:
3886 for key in exceptioncounts:
3880 combined[key] = (
3887 combined[key] = (
3881 exceptioncounts[key],
3888 exceptioncounts[key],
3882 len(testsbyfailure[key]),
3889 len(testsbyfailure[key]),
3883 leastfailing[key][0],
3890 leastfailing[key][0],
3884 leastfailing[key][1],
3891 leastfailing[key][1],
3885 )
3892 )
3886
3893
3887 return {
3894 return {
3888 'exceptioncounts': exceptioncounts,
3895 'exceptioncounts': exceptioncounts,
3889 'total': sum(exceptioncounts.values()),
3896 'total': sum(exceptioncounts.values()),
3890 'combined': combined,
3897 'combined': combined,
3891 'leastfailing': leastfailing,
3898 'leastfailing': leastfailing,
3892 'byfailure': testsbyfailure,
3899 'byfailure': testsbyfailure,
3893 'bytest': failuresbytest,
3900 'bytest': failuresbytest,
3894 }
3901 }
3895
3902
3896
3903
3897 if __name__ == '__main__':
3904 if __name__ == '__main__':
3898 runner = TestRunner()
3905 runner = TestRunner()
3899
3906
3900 try:
3907 try:
3901 import msvcrt
3908 import msvcrt
3902
3909
3903 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
3910 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
3904 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
3911 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
3905 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
3912 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
3906 except ImportError:
3913 except ImportError:
3907 pass
3914 pass
3908
3915
3909 sys.exit(runner.run(sys.argv[1:]))
3916 sys.exit(runner.run(sys.argv[1:]))
@@ -1,26 +1,27 b''
1 #require test-repo pyflakes hg10
1 #require test-repo pyflakes hg10
2
2
3 $ . "$TESTDIR/helpers-testrepo.sh"
3 $ . "$TESTDIR/helpers-testrepo.sh"
4
4
5 run pyflakes on all tracked files ending in .py or without a file ending
5 run pyflakes on all tracked files ending in .py or without a file ending
6 (skipping binary file random-seed)
6 (skipping binary file random-seed)
7
7
8 $ cat > test.py <<EOF
8 $ cat > test.py <<EOF
9 > print(undefinedname)
9 > print(undefinedname)
10 > EOF
10 > EOF
11 $ "$PYTHON" -m pyflakes test.py 2>/dev/null | "$TESTDIR/filterpyflakes.py"
11 $ "$PYTHON" -m pyflakes test.py 2>/dev/null | "$TESTDIR/filterpyflakes.py"
12 test.py:1:* undefined name 'undefinedname' (glob)
12 test.py:1:* undefined name 'undefinedname' (glob)
13
13
14 $ cd "`dirname "$TESTDIR"`"
14 $ cd "`dirname "$TESTDIR"`"
15
15
16 $ testrepohg locate 'set:**.py or grep("^#!.*python")' \
16 $ testrepohg locate 'set:**.py or grep("^#!.*python")' \
17 > -X hgext/fsmonitor/pywatchman \
17 > -X hgext/fsmonitor/pywatchman \
18 > -X mercurial/pycompat.py -X contrib/python-zstandard \
18 > -X mercurial/pycompat.py -X contrib/python-zstandard \
19 > -X mercurial/thirdparty \
19 > -X mercurial/thirdparty \
20 > 2>/dev/null \
20 > 2>/dev/null \
21 > | xargs "$PYTHON" -m pyflakes 2>/dev/null | "$TESTDIR/filterpyflakes.py"
21 > | xargs "$PYTHON" -m pyflakes 2>/dev/null | "$TESTDIR/filterpyflakes.py"
22 contrib/perf.py:*:* undefined name 'xrange' (glob) (?)
22 contrib/perf.py:*:* undefined name 'xrange' (glob) (?)
23 mercurial/hgweb/server.py:*:* undefined name 'reload' (glob) (?)
23 mercurial/hgweb/server.py:*:* undefined name 'reload' (glob) (?)
24 mercurial/util.py:*:* undefined name 'file' (glob) (?)
24 mercurial/util.py:*:* undefined name 'file' (glob) (?)
25 mercurial/encoding.py:*:* undefined name 'localstr' (glob) (?)
25 mercurial/encoding.py:*:* undefined name 'localstr' (glob) (?)
26 tests/run-tests.py:*:* undefined name 'PermissionError' (glob) (?)
26
27
@@ -1,671 +1,670 b''
1 #require serve no-reposimplestore no-chg
1 #require serve no-reposimplestore no-chg
2
2
3 #testcases stream-legacy stream-bundle2
3 #testcases stream-legacy stream-bundle2
4
4
5 #if stream-legacy
5 #if stream-legacy
6 $ cat << EOF >> $HGRCPATH
6 $ cat << EOF >> $HGRCPATH
7 > [server]
7 > [server]
8 > bundle2.stream = no
8 > bundle2.stream = no
9 > EOF
9 > EOF
10 #endif
10 #endif
11
11
12 Initialize repository
12 Initialize repository
13 the status call is to check for issue5130
13 the status call is to check for issue5130
14
14
15 $ hg init server
15 $ hg init server
16 $ cd server
16 $ cd server
17 $ touch foo
17 $ touch foo
18 $ hg -q commit -A -m initial
18 $ hg -q commit -A -m initial
19 >>> for i in range(1024):
19 >>> for i in range(1024):
20 ... with open(str(i), 'wb') as fh:
20 ... with open(str(i), 'wb') as fh:
21 ... fh.write(b"%d" % i) and None
21 ... fh.write(b"%d" % i) and None
22 $ hg -q commit -A -m 'add a lot of files'
22 $ hg -q commit -A -m 'add a lot of files'
23 $ hg st
23 $ hg st
24 $ hg --config server.uncompressed=false serve -p $HGPORT -d --pid-file=hg.pid
24 $ hg --config server.uncompressed=false serve -p $HGPORT -d --pid-file=hg.pid
25 $ cat hg.pid > $DAEMON_PIDS
25 $ cat hg.pid > $DAEMON_PIDS
26 $ cd ..
26 $ cd ..
27
27
28 Cannot stream clone when server.uncompressed is set
28 Cannot stream clone when server.uncompressed is set
29
29
30 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=stream_out'
30 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=stream_out'
31 200 Script output follows
31 200 Script output follows
32
32
33 1
33 1
34
34
35 #if stream-legacy
35 #if stream-legacy
36 $ hg debugcapabilities http://localhost:$HGPORT
36 $ hg debugcapabilities http://localhost:$HGPORT
37 Main capabilities:
37 Main capabilities:
38 batch
38 batch
39 branchmap
39 branchmap
40 $USUAL_BUNDLE2_CAPS_SERVER$
40 $USUAL_BUNDLE2_CAPS_SERVER$
41 changegroupsubset
41 changegroupsubset
42 compression=$BUNDLE2_COMPRESSIONS$
42 compression=$BUNDLE2_COMPRESSIONS$
43 getbundle
43 getbundle
44 httpheader=1024
44 httpheader=1024
45 httpmediatype=0.1rx,0.1tx,0.2tx
45 httpmediatype=0.1rx,0.1tx,0.2tx
46 known
46 known
47 lookup
47 lookup
48 pushkey
48 pushkey
49 unbundle=HG10GZ,HG10BZ,HG10UN
49 unbundle=HG10GZ,HG10BZ,HG10UN
50 unbundlehash
50 unbundlehash
51 Bundle2 capabilities:
51 Bundle2 capabilities:
52 HG20
52 HG20
53 bookmarks
53 bookmarks
54 changegroup
54 changegroup
55 01
55 01
56 02
56 02
57 checkheads
57 checkheads
58 related
58 related
59 digests
59 digests
60 md5
60 md5
61 sha1
61 sha1
62 sha512
62 sha512
63 error
63 error
64 abort
64 abort
65 unsupportedcontent
65 unsupportedcontent
66 pushraced
66 pushraced
67 pushkey
67 pushkey
68 hgtagsfnodes
68 hgtagsfnodes
69 listkeys
69 listkeys
70 phases
70 phases
71 heads
71 heads
72 pushkey
72 pushkey
73 remote-changegroup
73 remote-changegroup
74 http
74 http
75 https
75 https
76
76
77 $ hg clone --stream -U http://localhost:$HGPORT server-disabled
77 $ hg clone --stream -U http://localhost:$HGPORT server-disabled
78 warning: stream clone requested but server has them disabled
78 warning: stream clone requested but server has them disabled
79 requesting all changes
79 requesting all changes
80 adding changesets
80 adding changesets
81 adding manifests
81 adding manifests
82 adding file changes
82 adding file changes
83 added 2 changesets with 1025 changes to 1025 files
83 added 2 changesets with 1025 changes to 1025 files
84 new changesets 96ee1d7354c4:c17445101a72
84 new changesets 96ee1d7354c4:c17445101a72
85
85
86 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
86 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
87 200 Script output follows
87 200 Script output follows
88 content-type: application/mercurial-0.2
88 content-type: application/mercurial-0.2
89
89
90
90
91 $ f --size body --hexdump --bytes 100
91 $ f --size body --hexdump --bytes 100
92 body: size=232
92 body: size=232
93 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
93 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
94 0010: cf 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |..ERROR:ABORT...|
94 0010: cf 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |..ERROR:ABORT...|
95 0020: 00 01 01 07 3c 04 72 6d 65 73 73 61 67 65 73 74 |....<.rmessagest|
95 0020: 00 01 01 07 3c 04 72 6d 65 73 73 61 67 65 73 74 |....<.rmessagest|
96 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
96 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
97 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
97 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
98 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
98 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
99 0060: 69 73 20 66 |is f|
99 0060: 69 73 20 66 |is f|
100
100
101 #endif
101 #endif
102 #if stream-bundle2
102 #if stream-bundle2
103 $ hg debugcapabilities http://localhost:$HGPORT
103 $ hg debugcapabilities http://localhost:$HGPORT
104 Main capabilities:
104 Main capabilities:
105 batch
105 batch
106 branchmap
106 branchmap
107 $USUAL_BUNDLE2_CAPS_SERVER$
107 $USUAL_BUNDLE2_CAPS_SERVER$
108 changegroupsubset
108 changegroupsubset
109 compression=$BUNDLE2_COMPRESSIONS$
109 compression=$BUNDLE2_COMPRESSIONS$
110 getbundle
110 getbundle
111 httpheader=1024
111 httpheader=1024
112 httpmediatype=0.1rx,0.1tx,0.2tx
112 httpmediatype=0.1rx,0.1tx,0.2tx
113 known
113 known
114 lookup
114 lookup
115 pushkey
115 pushkey
116 unbundle=HG10GZ,HG10BZ,HG10UN
116 unbundle=HG10GZ,HG10BZ,HG10UN
117 unbundlehash
117 unbundlehash
118 Bundle2 capabilities:
118 Bundle2 capabilities:
119 HG20
119 HG20
120 bookmarks
120 bookmarks
121 changegroup
121 changegroup
122 01
122 01
123 02
123 02
124 checkheads
124 checkheads
125 related
125 related
126 digests
126 digests
127 md5
127 md5
128 sha1
128 sha1
129 sha512
129 sha512
130 error
130 error
131 abort
131 abort
132 unsupportedcontent
132 unsupportedcontent
133 pushraced
133 pushraced
134 pushkey
134 pushkey
135 hgtagsfnodes
135 hgtagsfnodes
136 listkeys
136 listkeys
137 phases
137 phases
138 heads
138 heads
139 pushkey
139 pushkey
140 remote-changegroup
140 remote-changegroup
141 http
141 http
142 https
142 https
143
143
144 $ hg clone --stream -U http://localhost:$HGPORT server-disabled
144 $ hg clone --stream -U http://localhost:$HGPORT server-disabled
145 warning: stream clone requested but server has them disabled
145 warning: stream clone requested but server has them disabled
146 requesting all changes
146 requesting all changes
147 adding changesets
147 adding changesets
148 adding manifests
148 adding manifests
149 adding file changes
149 adding file changes
150 added 2 changesets with 1025 changes to 1025 files
150 added 2 changesets with 1025 changes to 1025 files
151 new changesets 96ee1d7354c4:c17445101a72
151 new changesets 96ee1d7354c4:c17445101a72
152
152
153 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
153 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
154 200 Script output follows
154 200 Script output follows
155 content-type: application/mercurial-0.2
155 content-type: application/mercurial-0.2
156
156
157
157
158 $ f --size body --hexdump --bytes 100
158 $ f --size body --hexdump --bytes 100
159 body: size=232
159 body: size=232
160 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
160 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
161 0010: cf 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |..ERROR:ABORT...|
161 0010: cf 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |..ERROR:ABORT...|
162 0020: 00 01 01 07 3c 04 72 6d 65 73 73 61 67 65 73 74 |....<.rmessagest|
162 0020: 00 01 01 07 3c 04 72 6d 65 73 73 61 67 65 73 74 |....<.rmessagest|
163 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
163 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
164 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
164 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
165 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
165 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
166 0060: 69 73 20 66 |is f|
166 0060: 69 73 20 66 |is f|
167
167
168 #endif
168 #endif
169
169
170 $ killdaemons.py
170 $ killdaemons.py
171 $ cd server
171 $ cd server
172 $ hg serve -p $HGPORT -d --pid-file=hg.pid --error errors.txt
172 $ hg serve -p $HGPORT -d --pid-file=hg.pid --error errors.txt
173 $ cat hg.pid > $DAEMON_PIDS
173 $ cat hg.pid > $DAEMON_PIDS
174 $ cd ..
174 $ cd ..
175
175
176 Basic clone
176 Basic clone
177
177
178 #if stream-legacy
178 #if stream-legacy
179 $ hg clone --stream -U http://localhost:$HGPORT clone1
179 $ hg clone --stream -U http://localhost:$HGPORT clone1
180 streaming all changes
180 streaming all changes
181 1027 files to transfer, 96.3 KB of data (no-zstd !)
181 1027 files to transfer, 96.3 KB of data (no-zstd !)
182 transferred 96.3 KB in * seconds (*/sec) (glob) (no-zstd !)
182 transferred 96.3 KB in * seconds (*/sec) (glob) (no-zstd !)
183 1027 files to transfer, 93.5 KB of data (zstd !)
183 1027 files to transfer, 93.5 KB of data (zstd !)
184 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
184 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
185 searching for changes
185 searching for changes
186 no changes found
186 no changes found
187 $ cat server/errors.txt
187 $ cat server/errors.txt
188 #endif
188 #endif
189 #if stream-bundle2
189 #if stream-bundle2
190 $ hg clone --stream -U http://localhost:$HGPORT clone1
190 $ hg clone --stream -U http://localhost:$HGPORT clone1
191 streaming all changes
191 streaming all changes
192 1030 files to transfer, 96.5 KB of data (no-zstd !)
192 1030 files to transfer, 96.5 KB of data (no-zstd !)
193 transferred 96.5 KB in * seconds (*/sec) (glob) (no-zstd !)
193 transferred 96.5 KB in * seconds (*/sec) (glob) (no-zstd !)
194 1030 files to transfer, 93.6 KB of data (zstd !)
194 1030 files to transfer, 93.6 KB of data (zstd !)
195 transferred 93.6 KB in * seconds (* */sec) (glob) (zstd !)
195 transferred 93.6 KB in * seconds (* */sec) (glob) (zstd !)
196
196
197 $ ls -1 clone1/.hg/cache
197 $ ls -1 clone1/.hg/cache
198 branch2-base
198 branch2-base
199 branch2-immutable
199 branch2-immutable
200 branch2-served
200 branch2-served
201 branch2-served.hidden
201 branch2-served.hidden
202 branch2-visible
202 branch2-visible
203 branch2-visible-hidden
203 branch2-visible-hidden
204 hgtagsfnodes1
205 rbc-names-v1
204 rbc-names-v1
206 rbc-revs-v1
205 rbc-revs-v1
207 tags2
206 tags2
208 tags2-served
207 tags2-served
209 $ cat server/errors.txt
208 $ cat server/errors.txt
210 #endif
209 #endif
211
210
212 getbundle requests with stream=1 are uncompressed
211 getbundle requests with stream=1 are uncompressed
213
212
214 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto '0.1 0.2 comp=zlib,none' --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
213 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto '0.1 0.2 comp=zlib,none' --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
215 200 Script output follows
214 200 Script output follows
216 content-type: application/mercurial-0.2
215 content-type: application/mercurial-0.2
217
216
218
217
219 $ f --size --hex --bytes 256 body
218 $ f --size --hex --bytes 256 body
220 body: size=112262 (no-zstd !)
219 body: size=112262 (no-zstd !)
221 body: size=109410 (zstd no-rust !)
220 body: size=109410 (zstd no-rust !)
222 body: size=109431 (rust !)
221 body: size=109431 (rust !)
223 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
222 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
224 0010: 7f 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| (no-zstd !)
223 0010: 7f 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| (no-zstd !)
225 0020: 05 09 04 0c 44 62 79 74 65 63 6f 75 6e 74 39 38 |....Dbytecount98| (no-zstd !)
224 0020: 05 09 04 0c 44 62 79 74 65 63 6f 75 6e 74 39 38 |....Dbytecount98| (no-zstd !)
226 0030: 37 37 35 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |775filecount1030| (no-zstd !)
225 0030: 37 37 35 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |775filecount1030| (no-zstd !)
227 0010: 99 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| (zstd no-rust !)
226 0010: 99 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| (zstd no-rust !)
228 0010: ae 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| (rust !)
227 0010: ae 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| (rust !)
229 0020: 05 09 04 0c 5e 62 79 74 65 63 6f 75 6e 74 39 35 |....^bytecount95| (zstd no-rust !)
228 0020: 05 09 04 0c 5e 62 79 74 65 63 6f 75 6e 74 39 35 |....^bytecount95| (zstd no-rust !)
230 0020: 05 09 04 0c 73 62 79 74 65 63 6f 75 6e 74 39 35 |....sbytecount95| (rust !)
229 0020: 05 09 04 0c 73 62 79 74 65 63 6f 75 6e 74 39 35 |....sbytecount95| (rust !)
231 0030: 38 39 37 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |897filecount1030| (zstd !)
230 0030: 38 39 37 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |897filecount1030| (zstd !)
232 0040: 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 65 |requirementsdote|
231 0040: 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 65 |requirementsdote|
233 0050: 6e 63 6f 64 65 25 32 43 66 6e 63 61 63 68 65 25 |ncode%2Cfncache%|
232 0050: 6e 63 6f 64 65 25 32 43 66 6e 63 61 63 68 65 25 |ncode%2Cfncache%|
234 0060: 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 25 32 |2Cgeneraldelta%2|
233 0060: 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 25 32 |2Cgeneraldelta%2|
235 0070: 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 72 |Crevlogv1%2Cspar| (no-zstd !)
234 0070: 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 72 |Crevlogv1%2Cspar| (no-zstd !)
236 0080: 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 65 |serevlog%2Cstore| (no-zstd !)
235 0080: 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 65 |serevlog%2Cstore| (no-zstd !)
237 0090: 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 00 |....s.Bdata/0.i.| (no-zstd !)
236 0090: 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 00 |....s.Bdata/0.i.| (no-zstd !)
238 00a0: 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 00 |................| (no-zstd !)
237 00a0: 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 00 |................| (no-zstd !)
239 00b0: 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff 80 |................| (no-zstd !)
238 00b0: 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff 80 |................| (no-zstd !)
240 00c0: 29 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 |)c.I.#....Vg.g,i| (no-zstd !)
239 00c0: 29 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 |)c.I.#....Vg.g,i| (no-zstd !)
241 00d0: d1 ec 39 00 00 00 00 00 00 00 00 00 00 00 00 75 |..9............u| (no-zstd !)
240 00d0: d1 ec 39 00 00 00 00 00 00 00 00 00 00 00 00 75 |..9............u| (no-zstd !)
242 00e0: 30 73 08 42 64 61 74 61 2f 31 2e 69 00 03 00 01 |0s.Bdata/1.i....| (no-zstd !)
241 00e0: 30 73 08 42 64 61 74 61 2f 31 2e 69 00 03 00 01 |0s.Bdata/1.i....| (no-zstd !)
243 00f0: 00 00 00 00 00 00 00 02 00 00 00 01 00 00 00 00 |................| (no-zstd !)
242 00f0: 00 00 00 00 00 00 00 02 00 00 00 01 00 00 00 00 |................| (no-zstd !)
244 0070: 43 72 65 76 6c 6f 67 2d 63 6f 6d 70 72 65 73 73 |Crevlog-compress| (zstd no-rust !)
243 0070: 43 72 65 76 6c 6f 67 2d 63 6f 6d 70 72 65 73 73 |Crevlog-compress| (zstd no-rust !)
245 0070: 43 70 65 72 73 69 73 74 65 6e 74 2d 6e 6f 64 65 |Cpersistent-node| (rust !)
244 0070: 43 70 65 72 73 69 73 74 65 6e 74 2d 6e 6f 64 65 |Cpersistent-node| (rust !)
246 0080: 69 6f 6e 2d 7a 73 74 64 25 32 43 72 65 76 6c 6f |ion-zstd%2Crevlo| (zstd no-rust !)
245 0080: 69 6f 6e 2d 7a 73 74 64 25 32 43 72 65 76 6c 6f |ion-zstd%2Crevlo| (zstd no-rust !)
247 0080: 6d 61 70 25 32 43 72 65 76 6c 6f 67 2d 63 6f 6d |map%2Crevlog-com| (rust !)
246 0080: 6d 61 70 25 32 43 72 65 76 6c 6f 67 2d 63 6f 6d |map%2Crevlog-com| (rust !)
248 0090: 67 76 31 25 32 43 73 70 61 72 73 65 72 65 76 6c |gv1%2Csparserevl| (zstd no-rust !)
247 0090: 67 76 31 25 32 43 73 70 61 72 73 65 72 65 76 6c |gv1%2Csparserevl| (zstd no-rust !)
249 0090: 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 25 32 43 |pression-zstd%2C| (rust !)
248 0090: 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 25 32 43 |pression-zstd%2C| (rust !)
250 00a0: 6f 67 25 32 43 73 74 6f 72 65 00 00 80 00 73 08 |og%2Cstore....s.| (zstd no-rust !)
249 00a0: 6f 67 25 32 43 73 74 6f 72 65 00 00 80 00 73 08 |og%2Cstore....s.| (zstd no-rust !)
251 00a0: 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 72 73 |revlogv1%2Cspars| (rust !)
250 00a0: 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 72 73 |revlogv1%2Cspars| (rust !)
252 00b0: 42 64 61 74 61 2f 30 2e 69 00 03 00 01 00 00 00 |Bdata/0.i.......| (zstd no-rust !)
251 00b0: 42 64 61 74 61 2f 30 2e 69 00 03 00 01 00 00 00 |Bdata/0.i.......| (zstd no-rust !)
253 00b0: 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 65 00 |erevlog%2Cstore.| (rust !)
252 00b0: 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 65 00 |erevlog%2Cstore.| (rust !)
254 00c0: 00 00 00 00 02 00 00 00 01 00 00 00 00 00 00 00 |................| (zstd no-rust !)
253 00c0: 00 00 00 00 02 00 00 00 01 00 00 00 00 00 00 00 |................| (zstd no-rust !)
255 00c0: 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 00 03 |...s.Bdata/0.i..| (rust !)
254 00c0: 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 00 03 |...s.Bdata/0.i..| (rust !)
256 00d0: 01 ff ff ff ff ff ff ff ff 80 29 63 a0 49 d3 23 |..........)c.I.#| (zstd no-rust !)
255 00d0: 01 ff ff ff ff ff ff ff ff 80 29 63 a0 49 d3 23 |..........)c.I.#| (zstd no-rust !)
257 00d0: 00 01 00 00 00 00 00 00 00 02 00 00 00 01 00 00 |................| (rust !)
256 00d0: 00 01 00 00 00 00 00 00 00 02 00 00 00 01 00 00 |................| (rust !)
258 00e0: 87 bf ce fe 56 67 92 67 2c 69 d1 ec 39 00 00 00 |....Vg.g,i..9...| (zstd no-rust !)
257 00e0: 87 bf ce fe 56 67 92 67 2c 69 d1 ec 39 00 00 00 |....Vg.g,i..9...| (zstd no-rust !)
259 00e0: 00 00 00 00 00 01 ff ff ff ff ff ff ff ff 80 29 |...............)| (rust !)
258 00e0: 00 00 00 00 00 01 ff ff ff ff ff ff ff ff 80 29 |...............)| (rust !)
260 00f0: 00 00 00 00 00 00 00 00 00 75 30 73 08 42 64 61 |.........u0s.Bda| (zstd no-rust !)
259 00f0: 00 00 00 00 00 00 00 00 00 75 30 73 08 42 64 61 |.........u0s.Bda| (zstd no-rust !)
261 00f0: 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 d1 |c.I.#....Vg.g,i.| (rust !)
260 00f0: 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 d1 |c.I.#....Vg.g,i.| (rust !)
262
261
263 --uncompressed is an alias to --stream
262 --uncompressed is an alias to --stream
264
263
265 #if stream-legacy
264 #if stream-legacy
266 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
265 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
267 streaming all changes
266 streaming all changes
268 1027 files to transfer, 96.3 KB of data (no-zstd !)
267 1027 files to transfer, 96.3 KB of data (no-zstd !)
269 transferred 96.3 KB in * seconds (*/sec) (glob) (no-zstd !)
268 transferred 96.3 KB in * seconds (*/sec) (glob) (no-zstd !)
270 1027 files to transfer, 93.5 KB of data (zstd !)
269 1027 files to transfer, 93.5 KB of data (zstd !)
271 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
270 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
272 searching for changes
271 searching for changes
273 no changes found
272 no changes found
274 #endif
273 #endif
275 #if stream-bundle2
274 #if stream-bundle2
276 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
275 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
277 streaming all changes
276 streaming all changes
278 1030 files to transfer, 96.5 KB of data (no-zstd !)
277 1030 files to transfer, 96.5 KB of data (no-zstd !)
279 transferred 96.5 KB in * seconds (* */sec) (glob) (no-zstd !)
278 transferred 96.5 KB in * seconds (* */sec) (glob) (no-zstd !)
280 1030 files to transfer, 93.6 KB of data (zstd !)
279 1030 files to transfer, 93.6 KB of data (zstd !)
281 transferred 93.6 KB in * seconds (* */sec) (glob) (zstd !)
280 transferred 93.6 KB in * seconds (* */sec) (glob) (zstd !)
282 #endif
281 #endif
283
282
284 Clone with background file closing enabled
283 Clone with background file closing enabled
285
284
286 #if stream-legacy
285 #if stream-legacy
287 $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
286 $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
288 using http://localhost:$HGPORT/
287 using http://localhost:$HGPORT/
289 sending capabilities command
288 sending capabilities command
290 sending branchmap command
289 sending branchmap command
291 streaming all changes
290 streaming all changes
292 sending stream_out command
291 sending stream_out command
293 1027 files to transfer, 96.3 KB of data (no-zstd !)
292 1027 files to transfer, 96.3 KB of data (no-zstd !)
294 1027 files to transfer, 93.5 KB of data (zstd !)
293 1027 files to transfer, 93.5 KB of data (zstd !)
295 starting 4 threads for background file closing
294 starting 4 threads for background file closing
296 updating the branch cache
295 updating the branch cache
297 transferred 96.3 KB in * seconds (*/sec) (glob) (no-zstd !)
296 transferred 96.3 KB in * seconds (*/sec) (glob) (no-zstd !)
298 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
297 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
299 query 1; heads
298 query 1; heads
300 sending batch command
299 sending batch command
301 searching for changes
300 searching for changes
302 all remote heads known locally
301 all remote heads known locally
303 no changes found
302 no changes found
304 sending getbundle command
303 sending getbundle command
305 bundle2-input-bundle: with-transaction
304 bundle2-input-bundle: with-transaction
306 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
305 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
307 bundle2-input-part: "phase-heads" supported
306 bundle2-input-part: "phase-heads" supported
308 bundle2-input-part: total payload size 24
307 bundle2-input-part: total payload size 24
309 bundle2-input-bundle: 2 parts total
308 bundle2-input-bundle: 2 parts total
310 checking for updated bookmarks
309 checking for updated bookmarks
311 updating the branch cache
310 updating the branch cache
312 (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
311 (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
313 #endif
312 #endif
314 #if stream-bundle2
313 #if stream-bundle2
315 $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
314 $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
316 using http://localhost:$HGPORT/
315 using http://localhost:$HGPORT/
317 sending capabilities command
316 sending capabilities command
318 query 1; heads
317 query 1; heads
319 sending batch command
318 sending batch command
320 streaming all changes
319 streaming all changes
321 sending getbundle command
320 sending getbundle command
322 bundle2-input-bundle: with-transaction
321 bundle2-input-bundle: with-transaction
323 bundle2-input-part: "stream2" (params: 3 mandatory) supported
322 bundle2-input-part: "stream2" (params: 3 mandatory) supported
324 applying stream bundle
323 applying stream bundle
325 1030 files to transfer, 96.5 KB of data (no-zstd !)
324 1030 files to transfer, 96.5 KB of data (no-zstd !)
326 1030 files to transfer, 93.6 KB of data (zstd !)
325 1030 files to transfer, 93.6 KB of data (zstd !)
327 starting 4 threads for background file closing
326 starting 4 threads for background file closing
328 starting 4 threads for background file closing
327 starting 4 threads for background file closing
329 updating the branch cache
328 updating the branch cache
330 transferred 96.5 KB in * seconds (* */sec) (glob) (no-zstd !)
329 transferred 96.5 KB in * seconds (* */sec) (glob) (no-zstd !)
331 bundle2-input-part: total payload size 112094 (no-zstd !)
330 bundle2-input-part: total payload size 112094 (no-zstd !)
332 transferred 93.6 KB in * seconds (* */sec) (glob) (zstd !)
331 transferred 93.6 KB in * seconds (* */sec) (glob) (zstd !)
333 bundle2-input-part: total payload size 109216 (zstd !)
332 bundle2-input-part: total payload size 109216 (zstd !)
334 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
333 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
335 bundle2-input-bundle: 2 parts total
334 bundle2-input-bundle: 2 parts total
336 checking for updated bookmarks
335 checking for updated bookmarks
337 updating the branch cache
336 updating the branch cache
338 (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
337 (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
339 #endif
338 #endif
340
339
341 Cannot stream clone when there are secret changesets
340 Cannot stream clone when there are secret changesets
342
341
343 $ hg -R server phase --force --secret -r tip
342 $ hg -R server phase --force --secret -r tip
344 $ hg clone --stream -U http://localhost:$HGPORT secret-denied
343 $ hg clone --stream -U http://localhost:$HGPORT secret-denied
345 warning: stream clone requested but server has them disabled
344 warning: stream clone requested but server has them disabled
346 requesting all changes
345 requesting all changes
347 adding changesets
346 adding changesets
348 adding manifests
347 adding manifests
349 adding file changes
348 adding file changes
350 added 1 changesets with 1 changes to 1 files
349 added 1 changesets with 1 changes to 1 files
351 new changesets 96ee1d7354c4
350 new changesets 96ee1d7354c4
352
351
353 $ killdaemons.py
352 $ killdaemons.py
354
353
355 Streaming of secrets can be overridden by server config
354 Streaming of secrets can be overridden by server config
356
355
357 $ cd server
356 $ cd server
358 $ hg serve --config server.uncompressedallowsecret=true -p $HGPORT -d --pid-file=hg.pid
357 $ hg serve --config server.uncompressedallowsecret=true -p $HGPORT -d --pid-file=hg.pid
359 $ cat hg.pid > $DAEMON_PIDS
358 $ cat hg.pid > $DAEMON_PIDS
360 $ cd ..
359 $ cd ..
361
360
362 #if stream-legacy
361 #if stream-legacy
363 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
362 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
364 streaming all changes
363 streaming all changes
365 1027 files to transfer, 96.3 KB of data (no-zstd !)
364 1027 files to transfer, 96.3 KB of data (no-zstd !)
366 transferred 96.3 KB in * seconds (*/sec) (glob) (no-zstd !)
365 transferred 96.3 KB in * seconds (*/sec) (glob) (no-zstd !)
367 1027 files to transfer, 93.5 KB of data (zstd !)
366 1027 files to transfer, 93.5 KB of data (zstd !)
368 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
367 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
369 searching for changes
368 searching for changes
370 no changes found
369 no changes found
371 #endif
370 #endif
372 #if stream-bundle2
371 #if stream-bundle2
373 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
372 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
374 streaming all changes
373 streaming all changes
375 1030 files to transfer, 96.5 KB of data (no-zstd !)
374 1030 files to transfer, 96.5 KB of data (no-zstd !)
376 transferred 96.5 KB in * seconds (* */sec) (glob) (no-zstd !)
375 transferred 96.5 KB in * seconds (* */sec) (glob) (no-zstd !)
377 1030 files to transfer, 93.6 KB of data (zstd !)
376 1030 files to transfer, 93.6 KB of data (zstd !)
378 transferred 93.6 KB in * seconds (* */sec) (glob) (zstd !)
377 transferred 93.6 KB in * seconds (* */sec) (glob) (zstd !)
379 #endif
378 #endif
380
379
381 $ killdaemons.py
380 $ killdaemons.py
382
381
383 Verify interaction between preferuncompressed and secret presence
382 Verify interaction between preferuncompressed and secret presence
384
383
385 $ cd server
384 $ cd server
386 $ hg serve --config server.preferuncompressed=true -p $HGPORT -d --pid-file=hg.pid
385 $ hg serve --config server.preferuncompressed=true -p $HGPORT -d --pid-file=hg.pid
387 $ cat hg.pid > $DAEMON_PIDS
386 $ cat hg.pid > $DAEMON_PIDS
388 $ cd ..
387 $ cd ..
389
388
390 $ hg clone -U http://localhost:$HGPORT preferuncompressed-secret
389 $ hg clone -U http://localhost:$HGPORT preferuncompressed-secret
391 requesting all changes
390 requesting all changes
392 adding changesets
391 adding changesets
393 adding manifests
392 adding manifests
394 adding file changes
393 adding file changes
395 added 1 changesets with 1 changes to 1 files
394 added 1 changesets with 1 changes to 1 files
396 new changesets 96ee1d7354c4
395 new changesets 96ee1d7354c4
397
396
398 $ killdaemons.py
397 $ killdaemons.py
399
398
400 Clone not allowed when full bundles disabled and can't serve secrets
399 Clone not allowed when full bundles disabled and can't serve secrets
401
400
402 $ cd server
401 $ cd server
403 $ hg serve --config server.disablefullbundle=true -p $HGPORT -d --pid-file=hg.pid
402 $ hg serve --config server.disablefullbundle=true -p $HGPORT -d --pid-file=hg.pid
404 $ cat hg.pid > $DAEMON_PIDS
403 $ cat hg.pid > $DAEMON_PIDS
405 $ cd ..
404 $ cd ..
406
405
407 $ hg clone --stream http://localhost:$HGPORT secret-full-disabled
406 $ hg clone --stream http://localhost:$HGPORT secret-full-disabled
408 warning: stream clone requested but server has them disabled
407 warning: stream clone requested but server has them disabled
409 requesting all changes
408 requesting all changes
410 remote: abort: server has pull-based clones disabled
409 remote: abort: server has pull-based clones disabled
411 abort: pull failed on remote
410 abort: pull failed on remote
412 (remove --pull if specified or upgrade Mercurial)
411 (remove --pull if specified or upgrade Mercurial)
413 [100]
412 [100]
414
413
415 Local stream clone with secrets involved
414 Local stream clone with secrets involved
416 (This is just a test over behavior: if you have access to the repo's files,
415 (This is just a test over behavior: if you have access to the repo's files,
417 there is no security so it isn't important to prevent a clone here.)
416 there is no security so it isn't important to prevent a clone here.)
418
417
419 $ hg clone -U --stream server local-secret
418 $ hg clone -U --stream server local-secret
420 warning: stream clone requested but server has them disabled
419 warning: stream clone requested but server has them disabled
421 requesting all changes
420 requesting all changes
422 adding changesets
421 adding changesets
423 adding manifests
422 adding manifests
424 adding file changes
423 adding file changes
425 added 1 changesets with 1 changes to 1 files
424 added 1 changesets with 1 changes to 1 files
426 new changesets 96ee1d7354c4
425 new changesets 96ee1d7354c4
427
426
428 Stream clone while repo is changing:
427 Stream clone while repo is changing:
429
428
430 $ mkdir changing
429 $ mkdir changing
431 $ cd changing
430 $ cd changing
432
431
433 extension for delaying the server process so we reliably can modify the repo
432 extension for delaying the server process so we reliably can modify the repo
434 while cloning
433 while cloning
435
434
436 $ cat > stream_steps.py <<EOF
435 $ cat > stream_steps.py <<EOF
437 > import os
436 > import os
438 > import sys
437 > import sys
439 > from mercurial import (
438 > from mercurial import (
440 > encoding,
439 > encoding,
441 > extensions,
440 > extensions,
442 > streamclone,
441 > streamclone,
443 > testing,
442 > testing,
444 > )
443 > )
445 > WALKED_FILE_1 = encoding.environ[b'HG_TEST_STREAM_WALKED_FILE_1']
444 > WALKED_FILE_1 = encoding.environ[b'HG_TEST_STREAM_WALKED_FILE_1']
446 > WALKED_FILE_2 = encoding.environ[b'HG_TEST_STREAM_WALKED_FILE_2']
445 > WALKED_FILE_2 = encoding.environ[b'HG_TEST_STREAM_WALKED_FILE_2']
447 >
446 >
448 > def _test_sync_point_walk_1(orig, repo):
447 > def _test_sync_point_walk_1(orig, repo):
449 > testing.write_file(WALKED_FILE_1)
448 > testing.write_file(WALKED_FILE_1)
450 >
449 >
451 > def _test_sync_point_walk_2(orig, repo):
450 > def _test_sync_point_walk_2(orig, repo):
452 > assert repo._currentlock(repo._lockref) is None
451 > assert repo._currentlock(repo._lockref) is None
453 > testing.wait_file(WALKED_FILE_2)
452 > testing.wait_file(WALKED_FILE_2)
454 >
453 >
455 > extensions.wrapfunction(
454 > extensions.wrapfunction(
456 > streamclone,
455 > streamclone,
457 > '_test_sync_point_walk_1',
456 > '_test_sync_point_walk_1',
458 > _test_sync_point_walk_1
457 > _test_sync_point_walk_1
459 > )
458 > )
460 > extensions.wrapfunction(
459 > extensions.wrapfunction(
461 > streamclone,
460 > streamclone,
462 > '_test_sync_point_walk_2',
461 > '_test_sync_point_walk_2',
463 > _test_sync_point_walk_2
462 > _test_sync_point_walk_2
464 > )
463 > )
465 > EOF
464 > EOF
466
465
467 prepare repo with small and big file to cover both code paths in emitrevlogdata
466 prepare repo with small and big file to cover both code paths in emitrevlogdata
468
467
469 $ hg init repo
468 $ hg init repo
470 $ touch repo/f1
469 $ touch repo/f1
471 $ $TESTDIR/seq.py 50000 > repo/f2
470 $ $TESTDIR/seq.py 50000 > repo/f2
472 $ hg -R repo ci -Aqm "0"
471 $ hg -R repo ci -Aqm "0"
473 $ HG_TEST_STREAM_WALKED_FILE_1="$TESTTMP/sync_file_walked_1"
472 $ HG_TEST_STREAM_WALKED_FILE_1="$TESTTMP/sync_file_walked_1"
474 $ export HG_TEST_STREAM_WALKED_FILE_1
473 $ export HG_TEST_STREAM_WALKED_FILE_1
475 $ HG_TEST_STREAM_WALKED_FILE_2="$TESTTMP/sync_file_walked_2"
474 $ HG_TEST_STREAM_WALKED_FILE_2="$TESTTMP/sync_file_walked_2"
476 $ export HG_TEST_STREAM_WALKED_FILE_2
475 $ export HG_TEST_STREAM_WALKED_FILE_2
477 $ HG_TEST_STREAM_WALKED_FILE_3="$TESTTMP/sync_file_walked_3"
476 $ HG_TEST_STREAM_WALKED_FILE_3="$TESTTMP/sync_file_walked_3"
478 $ export HG_TEST_STREAM_WALKED_FILE_3
477 $ export HG_TEST_STREAM_WALKED_FILE_3
479 # $ cat << EOF >> $HGRCPATH
478 # $ cat << EOF >> $HGRCPATH
480 # > [hooks]
479 # > [hooks]
481 # > pre-clone=rm -f "$TESTTMP/sync_file_walked_*"
480 # > pre-clone=rm -f "$TESTTMP/sync_file_walked_*"
482 # > EOF
481 # > EOF
483 $ hg serve -R repo -p $HGPORT1 -d --error errors.log --pid-file=hg.pid --config extensions.stream_steps="$RUNTESTDIR/testlib/ext-stream-clone-steps.py"
482 $ hg serve -R repo -p $HGPORT1 -d --error errors.log --pid-file=hg.pid --config extensions.stream_steps="$RUNTESTDIR/testlib/ext-stream-clone-steps.py"
484 $ cat hg.pid >> $DAEMON_PIDS
483 $ cat hg.pid >> $DAEMON_PIDS
485
484
486 clone while modifying the repo between stating file with write lock and
485 clone while modifying the repo between stating file with write lock and
487 actually serving file content
486 actually serving file content
488
487
489 $ (hg clone -q --stream -U http://localhost:$HGPORT1 clone; touch "$HG_TEST_STREAM_WALKED_FILE_3") &
488 $ (hg clone -q --stream -U http://localhost:$HGPORT1 clone; touch "$HG_TEST_STREAM_WALKED_FILE_3") &
490 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1
489 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1
491 $ echo >> repo/f1
490 $ echo >> repo/f1
492 $ echo >> repo/f2
491 $ echo >> repo/f2
493 $ hg -R repo ci -m "1" --config ui.timeout.warn=-1
492 $ hg -R repo ci -m "1" --config ui.timeout.warn=-1
494 $ touch $HG_TEST_STREAM_WALKED_FILE_2
493 $ touch $HG_TEST_STREAM_WALKED_FILE_2
495 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_3
494 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_3
496 $ hg -R clone id
495 $ hg -R clone id
497 000000000000
496 000000000000
498 $ cat errors.log
497 $ cat errors.log
499 $ cd ..
498 $ cd ..
500
499
501 Stream repository with bookmarks
500 Stream repository with bookmarks
502 --------------------------------
501 --------------------------------
503
502
504 (revert introduction of secret changeset)
503 (revert introduction of secret changeset)
505
504
506 $ hg -R server phase --draft 'secret()'
505 $ hg -R server phase --draft 'secret()'
507
506
508 add a bookmark
507 add a bookmark
509
508
510 $ hg -R server bookmark -r tip some-bookmark
509 $ hg -R server bookmark -r tip some-bookmark
511
510
512 clone it
511 clone it
513
512
514 #if stream-legacy
513 #if stream-legacy
515 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
514 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
516 streaming all changes
515 streaming all changes
517 1027 files to transfer, 96.3 KB of data (no-zstd !)
516 1027 files to transfer, 96.3 KB of data (no-zstd !)
518 transferred 96.3 KB in * seconds (*) (glob) (no-zstd !)
517 transferred 96.3 KB in * seconds (*) (glob) (no-zstd !)
519 1027 files to transfer, 93.5 KB of data (zstd !)
518 1027 files to transfer, 93.5 KB of data (zstd !)
520 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
519 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
521 searching for changes
520 searching for changes
522 no changes found
521 no changes found
523 updating to branch default
522 updating to branch default
524 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
523 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
525 #endif
524 #endif
526 #if stream-bundle2
525 #if stream-bundle2
527 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
526 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
528 streaming all changes
527 streaming all changes
529 1033 files to transfer, 96.6 KB of data (no-zstd !)
528 1033 files to transfer, 96.6 KB of data (no-zstd !)
530 transferred 96.6 KB in * seconds (* */sec) (glob) (no-zstd !)
529 transferred 96.6 KB in * seconds (* */sec) (glob) (no-zstd !)
531 1033 files to transfer, 93.8 KB of data (zstd !)
530 1033 files to transfer, 93.8 KB of data (zstd !)
532 transferred 93.8 KB in * seconds (* */sec) (glob) (zstd !)
531 transferred 93.8 KB in * seconds (* */sec) (glob) (zstd !)
533 updating to branch default
532 updating to branch default
534 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
533 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
535 #endif
534 #endif
536 $ hg -R with-bookmarks bookmarks
535 $ hg -R with-bookmarks bookmarks
537 some-bookmark 1:c17445101a72
536 some-bookmark 1:c17445101a72
538
537
539 Stream repository with phases
538 Stream repository with phases
540 -----------------------------
539 -----------------------------
541
540
542 Clone as publishing
541 Clone as publishing
543
542
544 $ hg -R server phase -r 'all()'
543 $ hg -R server phase -r 'all()'
545 0: draft
544 0: draft
546 1: draft
545 1: draft
547
546
548 #if stream-legacy
547 #if stream-legacy
549 $ hg clone --stream http://localhost:$HGPORT phase-publish
548 $ hg clone --stream http://localhost:$HGPORT phase-publish
550 streaming all changes
549 streaming all changes
551 1027 files to transfer, 96.3 KB of data (no-zstd !)
550 1027 files to transfer, 96.3 KB of data (no-zstd !)
552 transferred 96.3 KB in * seconds (*) (glob) (no-zstd !)
551 transferred 96.3 KB in * seconds (*) (glob) (no-zstd !)
553 1027 files to transfer, 93.5 KB of data (zstd !)
552 1027 files to transfer, 93.5 KB of data (zstd !)
554 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
553 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
555 searching for changes
554 searching for changes
556 no changes found
555 no changes found
557 updating to branch default
556 updating to branch default
558 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
557 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
559 #endif
558 #endif
560 #if stream-bundle2
559 #if stream-bundle2
561 $ hg clone --stream http://localhost:$HGPORT phase-publish
560 $ hg clone --stream http://localhost:$HGPORT phase-publish
562 streaming all changes
561 streaming all changes
563 1033 files to transfer, 96.6 KB of data (no-zstd !)
562 1033 files to transfer, 96.6 KB of data (no-zstd !)
564 transferred 96.6 KB in * seconds (* */sec) (glob) (no-zstd !)
563 transferred 96.6 KB in * seconds (* */sec) (glob) (no-zstd !)
565 1033 files to transfer, 93.8 KB of data (zstd !)
564 1033 files to transfer, 93.8 KB of data (zstd !)
566 transferred 93.8 KB in * seconds (* */sec) (glob) (zstd !)
565 transferred 93.8 KB in * seconds (* */sec) (glob) (zstd !)
567 updating to branch default
566 updating to branch default
568 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
567 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
569 #endif
568 #endif
570 $ hg -R phase-publish phase -r 'all()'
569 $ hg -R phase-publish phase -r 'all()'
571 0: public
570 0: public
572 1: public
571 1: public
573
572
574 Clone as non publishing
573 Clone as non publishing
575
574
576 $ cat << EOF >> server/.hg/hgrc
575 $ cat << EOF >> server/.hg/hgrc
577 > [phases]
576 > [phases]
578 > publish = False
577 > publish = False
579 > EOF
578 > EOF
580 $ killdaemons.py
579 $ killdaemons.py
581 $ hg -R server serve -p $HGPORT -d --pid-file=hg.pid
580 $ hg -R server serve -p $HGPORT -d --pid-file=hg.pid
582 $ cat hg.pid > $DAEMON_PIDS
581 $ cat hg.pid > $DAEMON_PIDS
583
582
584 #if stream-legacy
583 #if stream-legacy
585
584
586 With v1 of the stream protocol, changeset are always cloned as public. It make
585 With v1 of the stream protocol, changeset are always cloned as public. It make
587 stream v1 unsuitable for non-publishing repository.
586 stream v1 unsuitable for non-publishing repository.
588
587
589 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
588 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
590 streaming all changes
589 streaming all changes
591 1027 files to transfer, 96.3 KB of data (no-zstd !)
590 1027 files to transfer, 96.3 KB of data (no-zstd !)
592 transferred 96.3 KB in * seconds (* */sec) (glob) (no-zstd !)
591 transferred 96.3 KB in * seconds (* */sec) (glob) (no-zstd !)
593 1027 files to transfer, 93.5 KB of data (zstd !)
592 1027 files to transfer, 93.5 KB of data (zstd !)
594 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
593 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
595 searching for changes
594 searching for changes
596 no changes found
595 no changes found
597 updating to branch default
596 updating to branch default
598 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
597 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
599 $ hg -R phase-no-publish phase -r 'all()'
598 $ hg -R phase-no-publish phase -r 'all()'
600 0: public
599 0: public
601 1: public
600 1: public
602 #endif
601 #endif
603 #if stream-bundle2
602 #if stream-bundle2
604 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
603 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
605 streaming all changes
604 streaming all changes
606 1034 files to transfer, 96.7 KB of data (no-zstd !)
605 1034 files to transfer, 96.7 KB of data (no-zstd !)
607 transferred 96.7 KB in * seconds (* */sec) (glob) (no-zstd !)
606 transferred 96.7 KB in * seconds (* */sec) (glob) (no-zstd !)
608 1034 files to transfer, 93.9 KB of data (zstd !)
607 1034 files to transfer, 93.9 KB of data (zstd !)
609 transferred 93.9 KB in * seconds (* */sec) (glob) (zstd !)
608 transferred 93.9 KB in * seconds (* */sec) (glob) (zstd !)
610 updating to branch default
609 updating to branch default
611 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
610 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
612 $ hg -R phase-no-publish phase -r 'all()'
611 $ hg -R phase-no-publish phase -r 'all()'
613 0: draft
612 0: draft
614 1: draft
613 1: draft
615 #endif
614 #endif
616
615
617 $ killdaemons.py
616 $ killdaemons.py
618
617
619 #if stream-legacy
618 #if stream-legacy
620
619
621 With v1 of the stream protocol, changeset are always cloned as public. There's
620 With v1 of the stream protocol, changeset are always cloned as public. There's
622 no obsolescence markers exchange in stream v1.
621 no obsolescence markers exchange in stream v1.
623
622
624 #endif
623 #endif
625 #if stream-bundle2
624 #if stream-bundle2
626
625
627 Stream repository with obsolescence
626 Stream repository with obsolescence
628 -----------------------------------
627 -----------------------------------
629
628
630 Clone non-publishing with obsolescence
629 Clone non-publishing with obsolescence
631
630
632 $ cat >> $HGRCPATH << EOF
631 $ cat >> $HGRCPATH << EOF
633 > [experimental]
632 > [experimental]
634 > evolution=all
633 > evolution=all
635 > EOF
634 > EOF
636
635
637 $ cd server
636 $ cd server
638 $ echo foo > foo
637 $ echo foo > foo
639 $ hg -q commit -m 'about to be pruned'
638 $ hg -q commit -m 'about to be pruned'
640 $ hg debugobsolete `hg log -r . -T '{node}'` -d '0 0' -u test --record-parents
639 $ hg debugobsolete `hg log -r . -T '{node}'` -d '0 0' -u test --record-parents
641 1 new obsolescence markers
640 1 new obsolescence markers
642 obsoleted 1 changesets
641 obsoleted 1 changesets
643 $ hg up null -q
642 $ hg up null -q
644 $ hg log -T '{rev}: {phase}\n'
643 $ hg log -T '{rev}: {phase}\n'
645 1: draft
644 1: draft
646 0: draft
645 0: draft
647 $ hg serve -p $HGPORT -d --pid-file=hg.pid
646 $ hg serve -p $HGPORT -d --pid-file=hg.pid
648 $ cat hg.pid > $DAEMON_PIDS
647 $ cat hg.pid > $DAEMON_PIDS
649 $ cd ..
648 $ cd ..
650
649
651 $ hg clone -U --stream http://localhost:$HGPORT with-obsolescence
650 $ hg clone -U --stream http://localhost:$HGPORT with-obsolescence
652 streaming all changes
651 streaming all changes
653 1035 files to transfer, 97.1 KB of data (no-zstd !)
652 1035 files to transfer, 97.1 KB of data (no-zstd !)
654 transferred 97.1 KB in * seconds (* */sec) (glob) (no-zstd !)
653 transferred 97.1 KB in * seconds (* */sec) (glob) (no-zstd !)
655 1035 files to transfer, 94.3 KB of data (zstd !)
654 1035 files to transfer, 94.3 KB of data (zstd !)
656 transferred 94.3 KB in * seconds (* */sec) (glob) (zstd !)
655 transferred 94.3 KB in * seconds (* */sec) (glob) (zstd !)
657 $ hg -R with-obsolescence log -T '{rev}: {phase}\n'
656 $ hg -R with-obsolescence log -T '{rev}: {phase}\n'
658 1: draft
657 1: draft
659 0: draft
658 0: draft
660 $ hg debugobsolete -R with-obsolescence
659 $ hg debugobsolete -R with-obsolescence
661 50382b884f66690b7045cac93a540cba4d4c906f 0 {c17445101a72edac06facd130d14808dfbd5c7c2} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
660 50382b884f66690b7045cac93a540cba4d4c906f 0 {c17445101a72edac06facd130d14808dfbd5c7c2} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
662
661
663 $ hg clone -U --stream --config experimental.evolution=0 http://localhost:$HGPORT with-obsolescence-no-evolution
662 $ hg clone -U --stream --config experimental.evolution=0 http://localhost:$HGPORT with-obsolescence-no-evolution
664 streaming all changes
663 streaming all changes
665 remote: abort: server has obsolescence markers, but client cannot receive them via stream clone
664 remote: abort: server has obsolescence markers, but client cannot receive them via stream clone
666 abort: pull failed on remote
665 abort: pull failed on remote
667 [100]
666 [100]
668
667
669 $ killdaemons.py
668 $ killdaemons.py
670
669
671 #endif
670 #endif
@@ -1,1326 +1,1324 b''
1 #testcases sshv1 sshv2
1 #testcases sshv1 sshv2
2
2
3 #if sshv2
3 #if sshv2
4 $ cat >> $HGRCPATH << EOF
4 $ cat >> $HGRCPATH << EOF
5 > [experimental]
5 > [experimental]
6 > sshpeer.advertise-v2 = true
6 > sshpeer.advertise-v2 = true
7 > sshserver.support-v2 = true
7 > sshserver.support-v2 = true
8 > EOF
8 > EOF
9 #endif
9 #endif
10
10
11 Prepare repo a:
11 Prepare repo a:
12
12
13 $ hg init a
13 $ hg init a
14 $ cd a
14 $ cd a
15 $ echo a > a
15 $ echo a > a
16 $ hg add a
16 $ hg add a
17 $ hg commit -m test
17 $ hg commit -m test
18 $ echo first line > b
18 $ echo first line > b
19 $ hg add b
19 $ hg add b
20
20
21 Create a non-inlined filelog:
21 Create a non-inlined filelog:
22
22
23 $ "$PYTHON" -c 'open("data1", "wb").write(b"".join(b"%d\n" % x for x in range(10000)))'
23 $ "$PYTHON" -c 'open("data1", "wb").write(b"".join(b"%d\n" % x for x in range(10000)))'
24 $ for j in 0 1 2 3 4 5 6 7 8 9; do
24 $ for j in 0 1 2 3 4 5 6 7 8 9; do
25 > cat data1 >> b
25 > cat data1 >> b
26 > hg commit -m test
26 > hg commit -m test
27 > done
27 > done
28
28
29 List files in store/data (should show a 'b.d'):
29 List files in store/data (should show a 'b.d'):
30
30
31 #if reporevlogstore
31 #if reporevlogstore
32 $ for i in .hg/store/data/*; do
32 $ for i in .hg/store/data/*; do
33 > echo $i
33 > echo $i
34 > done
34 > done
35 .hg/store/data/a.i
35 .hg/store/data/a.i
36 .hg/store/data/b.d
36 .hg/store/data/b.d
37 .hg/store/data/b.i
37 .hg/store/data/b.i
38 #endif
38 #endif
39
39
40 Trigger branchcache creation:
40 Trigger branchcache creation:
41
41
42 $ hg branches
42 $ hg branches
43 default 10:a7949464abda
43 default 10:a7949464abda
44 $ ls .hg/cache
44 $ ls .hg/cache
45 branch2-served
45 branch2-served
46 rbc-names-v1
46 rbc-names-v1
47 rbc-revs-v1
47 rbc-revs-v1
48
48
49 Default operation:
49 Default operation:
50
50
51 $ hg clone . ../b
51 $ hg clone . ../b
52 updating to branch default
52 updating to branch default
53 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
53 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
54 $ cd ../b
54 $ cd ../b
55
55
56 Ensure branchcache got copied over:
56 Ensure branchcache got copied over:
57
57
58 $ ls .hg/cache
58 $ ls .hg/cache
59 branch2-base
59 branch2-base
60 branch2-immutable
60 branch2-immutable
61 branch2-served
61 branch2-served
62 branch2-served.hidden
62 branch2-served.hidden
63 branch2-visible
63 branch2-visible
64 branch2-visible-hidden
64 branch2-visible-hidden
65 hgtagsfnodes1
66 rbc-names-v1
65 rbc-names-v1
67 rbc-revs-v1
66 rbc-revs-v1
68 tags2
67 tags2
69 tags2-served
68 tags2-served
70
69
71 $ cat a
70 $ cat a
72 a
71 a
73 $ hg verify
72 $ hg verify
74 checking changesets
73 checking changesets
75 checking manifests
74 checking manifests
76 crosschecking files in changesets and manifests
75 crosschecking files in changesets and manifests
77 checking files
76 checking files
78 checked 11 changesets with 11 changes to 2 files
77 checked 11 changesets with 11 changes to 2 files
79
78
80 Invalid dest '' must abort:
79 Invalid dest '' must abort:
81
80
82 $ hg clone . ''
81 $ hg clone . ''
83 abort: empty destination path is not valid
82 abort: empty destination path is not valid
84 [10]
83 [10]
85
84
86 No update, with debug option:
85 No update, with debug option:
87
86
88 #if hardlink
87 #if hardlink
89 $ hg --debug clone -U . ../c --config progress.debug=true
88 $ hg --debug clone -U . ../c --config progress.debug=true
90 linking: 1 files
89 linking: 1 files
91 linking: 2 files
90 linking: 2 files
92 linking: 3 files
91 linking: 3 files
93 linking: 4 files
92 linking: 4 files
94 linking: 5 files
93 linking: 5 files
95 linking: 6 files
94 linking: 6 files
96 linking: 7 files
95 linking: 7 files
97 linking: 8 files
96 linking: 8 files
98 linked 8 files (reporevlogstore !)
97 linked 8 files (reporevlogstore !)
99 linking: 9 files (reposimplestore !)
98 linking: 9 files (reposimplestore !)
100 linking: 10 files (reposimplestore !)
99 linking: 10 files (reposimplestore !)
101 linking: 11 files (reposimplestore !)
100 linking: 11 files (reposimplestore !)
102 linking: 12 files (reposimplestore !)
101 linking: 12 files (reposimplestore !)
103 linking: 13 files (reposimplestore !)
102 linking: 13 files (reposimplestore !)
104 linking: 14 files (reposimplestore !)
103 linking: 14 files (reposimplestore !)
105 linking: 15 files (reposimplestore !)
104 linking: 15 files (reposimplestore !)
106 linking: 16 files (reposimplestore !)
105 linking: 16 files (reposimplestore !)
107 linking: 17 files (reposimplestore !)
106 linking: 17 files (reposimplestore !)
108 linking: 18 files (reposimplestore !)
107 linking: 18 files (reposimplestore !)
109 linked 18 files (reposimplestore !)
108 linked 18 files (reposimplestore !)
110 updating the branch cache
109 updating the branch cache
111 #else
110 #else
112 $ hg --debug clone -U . ../c --config progress.debug=true
111 $ hg --debug clone -U . ../c --config progress.debug=true
113 linking: 1 files
112 linking: 1 files
114 copying: 2 files
113 copying: 2 files
115 copying: 3 files
114 copying: 3 files
116 copying: 4 files
115 copying: 4 files
117 copying: 5 files
116 copying: 5 files
118 copying: 6 files
117 copying: 6 files
119 copying: 7 files
118 copying: 7 files
120 copying: 8 files
119 copying: 8 files
121 copied 8 files (reporevlogstore !)
120 copied 8 files (reporevlogstore !)
122 copying: 9 files (reposimplestore !)
121 copying: 9 files (reposimplestore !)
123 copying: 10 files (reposimplestore !)
122 copying: 10 files (reposimplestore !)
124 copying: 11 files (reposimplestore !)
123 copying: 11 files (reposimplestore !)
125 copying: 12 files (reposimplestore !)
124 copying: 12 files (reposimplestore !)
126 copying: 13 files (reposimplestore !)
125 copying: 13 files (reposimplestore !)
127 copying: 14 files (reposimplestore !)
126 copying: 14 files (reposimplestore !)
128 copying: 15 files (reposimplestore !)
127 copying: 15 files (reposimplestore !)
129 copying: 16 files (reposimplestore !)
128 copying: 16 files (reposimplestore !)
130 copying: 17 files (reposimplestore !)
129 copying: 17 files (reposimplestore !)
131 copying: 18 files (reposimplestore !)
130 copying: 18 files (reposimplestore !)
132 copied 18 files (reposimplestore !)
131 copied 18 files (reposimplestore !)
133 #endif
132 #endif
134 $ cd ../c
133 $ cd ../c
135
134
136 Ensure branchcache got copied over:
135 Ensure branchcache got copied over:
137
136
138 $ ls .hg/cache
137 $ ls .hg/cache
139 branch2-base
138 branch2-base
140 branch2-immutable
139 branch2-immutable
141 branch2-served
140 branch2-served
142 branch2-served.hidden
141 branch2-served.hidden
143 branch2-visible
142 branch2-visible
144 branch2-visible-hidden
143 branch2-visible-hidden
145 hgtagsfnodes1
146 rbc-names-v1
144 rbc-names-v1
147 rbc-revs-v1
145 rbc-revs-v1
148 tags2
146 tags2
149 tags2-served
147 tags2-served
150
148
151 $ cat a 2>/dev/null || echo "a not present"
149 $ cat a 2>/dev/null || echo "a not present"
152 a not present
150 a not present
153 $ hg verify
151 $ hg verify
154 checking changesets
152 checking changesets
155 checking manifests
153 checking manifests
156 crosschecking files in changesets and manifests
154 crosschecking files in changesets and manifests
157 checking files
155 checking files
158 checked 11 changesets with 11 changes to 2 files
156 checked 11 changesets with 11 changes to 2 files
159
157
160 Default destination:
158 Default destination:
161
159
162 $ mkdir ../d
160 $ mkdir ../d
163 $ cd ../d
161 $ cd ../d
164 $ hg clone ../a
162 $ hg clone ../a
165 destination directory: a
163 destination directory: a
166 updating to branch default
164 updating to branch default
167 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
165 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
168 $ cd a
166 $ cd a
169 $ hg cat a
167 $ hg cat a
170 a
168 a
171 $ cd ../..
169 $ cd ../..
172
170
173 Check that we drop the 'file:' from the path before writing the .hgrc:
171 Check that we drop the 'file:' from the path before writing the .hgrc:
174
172
175 $ hg clone file:a e
173 $ hg clone file:a e
176 updating to branch default
174 updating to branch default
177 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
175 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
178 $ grep 'file:' e/.hg/hgrc
176 $ grep 'file:' e/.hg/hgrc
179 [1]
177 [1]
180
178
181 Check that path aliases are expanded:
179 Check that path aliases are expanded:
182
180
183 $ hg clone -q -U --config 'paths.foobar=a#0' foobar f
181 $ hg clone -q -U --config 'paths.foobar=a#0' foobar f
184 $ hg -R f showconfig paths.default
182 $ hg -R f showconfig paths.default
185 $TESTTMP/a#0
183 $TESTTMP/a#0
186
184
187 Use --pull:
185 Use --pull:
188
186
189 $ hg clone --pull a g
187 $ hg clone --pull a g
190 requesting all changes
188 requesting all changes
191 adding changesets
189 adding changesets
192 adding manifests
190 adding manifests
193 adding file changes
191 adding file changes
194 added 11 changesets with 11 changes to 2 files
192 added 11 changesets with 11 changes to 2 files
195 new changesets acb14030fe0a:a7949464abda
193 new changesets acb14030fe0a:a7949464abda
196 updating to branch default
194 updating to branch default
197 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
195 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
198 $ hg -R g verify
196 $ hg -R g verify
199 checking changesets
197 checking changesets
200 checking manifests
198 checking manifests
201 crosschecking files in changesets and manifests
199 crosschecking files in changesets and manifests
202 checking files
200 checking files
203 checked 11 changesets with 11 changes to 2 files
201 checked 11 changesets with 11 changes to 2 files
204
202
205 Invalid dest '' with --pull must abort (issue2528):
203 Invalid dest '' with --pull must abort (issue2528):
206
204
207 $ hg clone --pull a ''
205 $ hg clone --pull a ''
208 abort: empty destination path is not valid
206 abort: empty destination path is not valid
209 [10]
207 [10]
210
208
211 Clone to '.':
209 Clone to '.':
212
210
213 $ mkdir h
211 $ mkdir h
214 $ cd h
212 $ cd h
215 $ hg clone ../a .
213 $ hg clone ../a .
216 updating to branch default
214 updating to branch default
217 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
215 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
218 $ cd ..
216 $ cd ..
219
217
220
218
221 *** Tests for option -u ***
219 *** Tests for option -u ***
222
220
223 Adding some more history to repo a:
221 Adding some more history to repo a:
224
222
225 $ cd a
223 $ cd a
226 $ hg tag ref1
224 $ hg tag ref1
227 $ echo the quick brown fox >a
225 $ echo the quick brown fox >a
228 $ hg ci -m "hacked default"
226 $ hg ci -m "hacked default"
229 $ hg up ref1
227 $ hg up ref1
230 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
228 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
231 $ hg branch stable
229 $ hg branch stable
232 marked working directory as branch stable
230 marked working directory as branch stable
233 (branches are permanent and global, did you want a bookmark?)
231 (branches are permanent and global, did you want a bookmark?)
234 $ echo some text >a
232 $ echo some text >a
235 $ hg ci -m "starting branch stable"
233 $ hg ci -m "starting branch stable"
236 $ hg tag ref2
234 $ hg tag ref2
237 $ echo some more text >a
235 $ echo some more text >a
238 $ hg ci -m "another change for branch stable"
236 $ hg ci -m "another change for branch stable"
239 $ hg up ref2
237 $ hg up ref2
240 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
238 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
241 $ hg parents
239 $ hg parents
242 changeset: 13:e8ece76546a6
240 changeset: 13:e8ece76546a6
243 branch: stable
241 branch: stable
244 tag: ref2
242 tag: ref2
245 parent: 10:a7949464abda
243 parent: 10:a7949464abda
246 user: test
244 user: test
247 date: Thu Jan 01 00:00:00 1970 +0000
245 date: Thu Jan 01 00:00:00 1970 +0000
248 summary: starting branch stable
246 summary: starting branch stable
249
247
250
248
251 Repo a has two heads:
249 Repo a has two heads:
252
250
253 $ hg heads
251 $ hg heads
254 changeset: 15:0aae7cf88f0d
252 changeset: 15:0aae7cf88f0d
255 branch: stable
253 branch: stable
256 tag: tip
254 tag: tip
257 user: test
255 user: test
258 date: Thu Jan 01 00:00:00 1970 +0000
256 date: Thu Jan 01 00:00:00 1970 +0000
259 summary: another change for branch stable
257 summary: another change for branch stable
260
258
261 changeset: 12:f21241060d6a
259 changeset: 12:f21241060d6a
262 user: test
260 user: test
263 date: Thu Jan 01 00:00:00 1970 +0000
261 date: Thu Jan 01 00:00:00 1970 +0000
264 summary: hacked default
262 summary: hacked default
265
263
266
264
267 $ cd ..
265 $ cd ..
268
266
269
267
270 Testing --noupdate with --updaterev (must abort):
268 Testing --noupdate with --updaterev (must abort):
271
269
272 $ hg clone --noupdate --updaterev 1 a ua
270 $ hg clone --noupdate --updaterev 1 a ua
273 abort: cannot specify both --noupdate and --updaterev
271 abort: cannot specify both --noupdate and --updaterev
274 [10]
272 [10]
275
273
276
274
277 Testing clone -u:
275 Testing clone -u:
278
276
279 $ hg clone -u . a ua
277 $ hg clone -u . a ua
280 updating to branch stable
278 updating to branch stable
281 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
279 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
282
280
283 Repo ua has both heads:
281 Repo ua has both heads:
284
282
285 $ hg -R ua heads
283 $ hg -R ua heads
286 changeset: 15:0aae7cf88f0d
284 changeset: 15:0aae7cf88f0d
287 branch: stable
285 branch: stable
288 tag: tip
286 tag: tip
289 user: test
287 user: test
290 date: Thu Jan 01 00:00:00 1970 +0000
288 date: Thu Jan 01 00:00:00 1970 +0000
291 summary: another change for branch stable
289 summary: another change for branch stable
292
290
293 changeset: 12:f21241060d6a
291 changeset: 12:f21241060d6a
294 user: test
292 user: test
295 date: Thu Jan 01 00:00:00 1970 +0000
293 date: Thu Jan 01 00:00:00 1970 +0000
296 summary: hacked default
294 summary: hacked default
297
295
298
296
299 Same revision checked out in repo a and ua:
297 Same revision checked out in repo a and ua:
300
298
301 $ hg -R a parents --template "{node|short}\n"
299 $ hg -R a parents --template "{node|short}\n"
302 e8ece76546a6
300 e8ece76546a6
303 $ hg -R ua parents --template "{node|short}\n"
301 $ hg -R ua parents --template "{node|short}\n"
304 e8ece76546a6
302 e8ece76546a6
305
303
306 $ rm -r ua
304 $ rm -r ua
307
305
308
306
309 Testing clone --pull -u:
307 Testing clone --pull -u:
310
308
311 $ hg clone --pull -u . a ua
309 $ hg clone --pull -u . a ua
312 requesting all changes
310 requesting all changes
313 adding changesets
311 adding changesets
314 adding manifests
312 adding manifests
315 adding file changes
313 adding file changes
316 added 16 changesets with 16 changes to 3 files (+1 heads)
314 added 16 changesets with 16 changes to 3 files (+1 heads)
317 new changesets acb14030fe0a:0aae7cf88f0d
315 new changesets acb14030fe0a:0aae7cf88f0d
318 updating to branch stable
316 updating to branch stable
319 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
317 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
320
318
321 Repo ua has both heads:
319 Repo ua has both heads:
322
320
323 $ hg -R ua heads
321 $ hg -R ua heads
324 changeset: 15:0aae7cf88f0d
322 changeset: 15:0aae7cf88f0d
325 branch: stable
323 branch: stable
326 tag: tip
324 tag: tip
327 user: test
325 user: test
328 date: Thu Jan 01 00:00:00 1970 +0000
326 date: Thu Jan 01 00:00:00 1970 +0000
329 summary: another change for branch stable
327 summary: another change for branch stable
330
328
331 changeset: 12:f21241060d6a
329 changeset: 12:f21241060d6a
332 user: test
330 user: test
333 date: Thu Jan 01 00:00:00 1970 +0000
331 date: Thu Jan 01 00:00:00 1970 +0000
334 summary: hacked default
332 summary: hacked default
335
333
336
334
337 Same revision checked out in repo a and ua:
335 Same revision checked out in repo a and ua:
338
336
339 $ hg -R a parents --template "{node|short}\n"
337 $ hg -R a parents --template "{node|short}\n"
340 e8ece76546a6
338 e8ece76546a6
341 $ hg -R ua parents --template "{node|short}\n"
339 $ hg -R ua parents --template "{node|short}\n"
342 e8ece76546a6
340 e8ece76546a6
343
341
344 $ rm -r ua
342 $ rm -r ua
345
343
346
344
347 Testing clone -u <branch>:
345 Testing clone -u <branch>:
348
346
349 $ hg clone -u stable a ua
347 $ hg clone -u stable a ua
350 updating to branch stable
348 updating to branch stable
351 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
349 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
352
350
353 Repo ua has both heads:
351 Repo ua has both heads:
354
352
355 $ hg -R ua heads
353 $ hg -R ua heads
356 changeset: 15:0aae7cf88f0d
354 changeset: 15:0aae7cf88f0d
357 branch: stable
355 branch: stable
358 tag: tip
356 tag: tip
359 user: test
357 user: test
360 date: Thu Jan 01 00:00:00 1970 +0000
358 date: Thu Jan 01 00:00:00 1970 +0000
361 summary: another change for branch stable
359 summary: another change for branch stable
362
360
363 changeset: 12:f21241060d6a
361 changeset: 12:f21241060d6a
364 user: test
362 user: test
365 date: Thu Jan 01 00:00:00 1970 +0000
363 date: Thu Jan 01 00:00:00 1970 +0000
366 summary: hacked default
364 summary: hacked default
367
365
368
366
369 Branch 'stable' is checked out:
367 Branch 'stable' is checked out:
370
368
371 $ hg -R ua parents
369 $ hg -R ua parents
372 changeset: 15:0aae7cf88f0d
370 changeset: 15:0aae7cf88f0d
373 branch: stable
371 branch: stable
374 tag: tip
372 tag: tip
375 user: test
373 user: test
376 date: Thu Jan 01 00:00:00 1970 +0000
374 date: Thu Jan 01 00:00:00 1970 +0000
377 summary: another change for branch stable
375 summary: another change for branch stable
378
376
379
377
380 $ rm -r ua
378 $ rm -r ua
381
379
382
380
383 Testing default checkout:
381 Testing default checkout:
384
382
385 $ hg clone a ua
383 $ hg clone a ua
386 updating to branch default
384 updating to branch default
387 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
385 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
388
386
389 Repo ua has both heads:
387 Repo ua has both heads:
390
388
391 $ hg -R ua heads
389 $ hg -R ua heads
392 changeset: 15:0aae7cf88f0d
390 changeset: 15:0aae7cf88f0d
393 branch: stable
391 branch: stable
394 tag: tip
392 tag: tip
395 user: test
393 user: test
396 date: Thu Jan 01 00:00:00 1970 +0000
394 date: Thu Jan 01 00:00:00 1970 +0000
397 summary: another change for branch stable
395 summary: another change for branch stable
398
396
399 changeset: 12:f21241060d6a
397 changeset: 12:f21241060d6a
400 user: test
398 user: test
401 date: Thu Jan 01 00:00:00 1970 +0000
399 date: Thu Jan 01 00:00:00 1970 +0000
402 summary: hacked default
400 summary: hacked default
403
401
404
402
405 Branch 'default' is checked out:
403 Branch 'default' is checked out:
406
404
407 $ hg -R ua parents
405 $ hg -R ua parents
408 changeset: 12:f21241060d6a
406 changeset: 12:f21241060d6a
409 user: test
407 user: test
410 date: Thu Jan 01 00:00:00 1970 +0000
408 date: Thu Jan 01 00:00:00 1970 +0000
411 summary: hacked default
409 summary: hacked default
412
410
413 Test clone with a branch named "@" (issue3677)
411 Test clone with a branch named "@" (issue3677)
414
412
415 $ hg -R ua branch @
413 $ hg -R ua branch @
416 marked working directory as branch @
414 marked working directory as branch @
417 $ hg -R ua commit -m 'created branch @'
415 $ hg -R ua commit -m 'created branch @'
418 $ hg clone ua atbranch
416 $ hg clone ua atbranch
419 updating to branch default
417 updating to branch default
420 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
418 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
421 $ hg -R atbranch heads
419 $ hg -R atbranch heads
422 changeset: 16:798b6d97153e
420 changeset: 16:798b6d97153e
423 branch: @
421 branch: @
424 tag: tip
422 tag: tip
425 parent: 12:f21241060d6a
423 parent: 12:f21241060d6a
426 user: test
424 user: test
427 date: Thu Jan 01 00:00:00 1970 +0000
425 date: Thu Jan 01 00:00:00 1970 +0000
428 summary: created branch @
426 summary: created branch @
429
427
430 changeset: 15:0aae7cf88f0d
428 changeset: 15:0aae7cf88f0d
431 branch: stable
429 branch: stable
432 user: test
430 user: test
433 date: Thu Jan 01 00:00:00 1970 +0000
431 date: Thu Jan 01 00:00:00 1970 +0000
434 summary: another change for branch stable
432 summary: another change for branch stable
435
433
436 changeset: 12:f21241060d6a
434 changeset: 12:f21241060d6a
437 user: test
435 user: test
438 date: Thu Jan 01 00:00:00 1970 +0000
436 date: Thu Jan 01 00:00:00 1970 +0000
439 summary: hacked default
437 summary: hacked default
440
438
441 $ hg -R atbranch parents
439 $ hg -R atbranch parents
442 changeset: 12:f21241060d6a
440 changeset: 12:f21241060d6a
443 user: test
441 user: test
444 date: Thu Jan 01 00:00:00 1970 +0000
442 date: Thu Jan 01 00:00:00 1970 +0000
445 summary: hacked default
443 summary: hacked default
446
444
447
445
448 $ rm -r ua atbranch
446 $ rm -r ua atbranch
449
447
450
448
451 Testing #<branch>:
449 Testing #<branch>:
452
450
453 $ hg clone -u . a#stable ua
451 $ hg clone -u . a#stable ua
454 adding changesets
452 adding changesets
455 adding manifests
453 adding manifests
456 adding file changes
454 adding file changes
457 added 14 changesets with 14 changes to 3 files
455 added 14 changesets with 14 changes to 3 files
458 new changesets acb14030fe0a:0aae7cf88f0d
456 new changesets acb14030fe0a:0aae7cf88f0d
459 updating to branch stable
457 updating to branch stable
460 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
458 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
461
459
462 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
460 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
463
461
464 $ hg -R ua heads
462 $ hg -R ua heads
465 changeset: 13:0aae7cf88f0d
463 changeset: 13:0aae7cf88f0d
466 branch: stable
464 branch: stable
467 tag: tip
465 tag: tip
468 user: test
466 user: test
469 date: Thu Jan 01 00:00:00 1970 +0000
467 date: Thu Jan 01 00:00:00 1970 +0000
470 summary: another change for branch stable
468 summary: another change for branch stable
471
469
472 changeset: 10:a7949464abda
470 changeset: 10:a7949464abda
473 user: test
471 user: test
474 date: Thu Jan 01 00:00:00 1970 +0000
472 date: Thu Jan 01 00:00:00 1970 +0000
475 summary: test
473 summary: test
476
474
477
475
478 Same revision checked out in repo a and ua:
476 Same revision checked out in repo a and ua:
479
477
480 $ hg -R a parents --template "{node|short}\n"
478 $ hg -R a parents --template "{node|short}\n"
481 e8ece76546a6
479 e8ece76546a6
482 $ hg -R ua parents --template "{node|short}\n"
480 $ hg -R ua parents --template "{node|short}\n"
483 e8ece76546a6
481 e8ece76546a6
484
482
485 $ rm -r ua
483 $ rm -r ua
486
484
487
485
488 Testing -u -r <branch>:
486 Testing -u -r <branch>:
489
487
490 $ hg clone -u . -r stable a ua
488 $ hg clone -u . -r stable a ua
491 adding changesets
489 adding changesets
492 adding manifests
490 adding manifests
493 adding file changes
491 adding file changes
494 added 14 changesets with 14 changes to 3 files
492 added 14 changesets with 14 changes to 3 files
495 new changesets acb14030fe0a:0aae7cf88f0d
493 new changesets acb14030fe0a:0aae7cf88f0d
496 updating to branch stable
494 updating to branch stable
497 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
495 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
498
496
499 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
497 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
500
498
501 $ hg -R ua heads
499 $ hg -R ua heads
502 changeset: 13:0aae7cf88f0d
500 changeset: 13:0aae7cf88f0d
503 branch: stable
501 branch: stable
504 tag: tip
502 tag: tip
505 user: test
503 user: test
506 date: Thu Jan 01 00:00:00 1970 +0000
504 date: Thu Jan 01 00:00:00 1970 +0000
507 summary: another change for branch stable
505 summary: another change for branch stable
508
506
509 changeset: 10:a7949464abda
507 changeset: 10:a7949464abda
510 user: test
508 user: test
511 date: Thu Jan 01 00:00:00 1970 +0000
509 date: Thu Jan 01 00:00:00 1970 +0000
512 summary: test
510 summary: test
513
511
514
512
515 Same revision checked out in repo a and ua:
513 Same revision checked out in repo a and ua:
516
514
517 $ hg -R a parents --template "{node|short}\n"
515 $ hg -R a parents --template "{node|short}\n"
518 e8ece76546a6
516 e8ece76546a6
519 $ hg -R ua parents --template "{node|short}\n"
517 $ hg -R ua parents --template "{node|short}\n"
520 e8ece76546a6
518 e8ece76546a6
521
519
522 $ rm -r ua
520 $ rm -r ua
523
521
524
522
525 Testing -r <branch>:
523 Testing -r <branch>:
526
524
527 $ hg clone -r stable a ua
525 $ hg clone -r stable a ua
528 adding changesets
526 adding changesets
529 adding manifests
527 adding manifests
530 adding file changes
528 adding file changes
531 added 14 changesets with 14 changes to 3 files
529 added 14 changesets with 14 changes to 3 files
532 new changesets acb14030fe0a:0aae7cf88f0d
530 new changesets acb14030fe0a:0aae7cf88f0d
533 updating to branch stable
531 updating to branch stable
534 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
532 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
535
533
536 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
534 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
537
535
538 $ hg -R ua heads
536 $ hg -R ua heads
539 changeset: 13:0aae7cf88f0d
537 changeset: 13:0aae7cf88f0d
540 branch: stable
538 branch: stable
541 tag: tip
539 tag: tip
542 user: test
540 user: test
543 date: Thu Jan 01 00:00:00 1970 +0000
541 date: Thu Jan 01 00:00:00 1970 +0000
544 summary: another change for branch stable
542 summary: another change for branch stable
545
543
546 changeset: 10:a7949464abda
544 changeset: 10:a7949464abda
547 user: test
545 user: test
548 date: Thu Jan 01 00:00:00 1970 +0000
546 date: Thu Jan 01 00:00:00 1970 +0000
549 summary: test
547 summary: test
550
548
551
549
552 Branch 'stable' is checked out:
550 Branch 'stable' is checked out:
553
551
554 $ hg -R ua parents
552 $ hg -R ua parents
555 changeset: 13:0aae7cf88f0d
553 changeset: 13:0aae7cf88f0d
556 branch: stable
554 branch: stable
557 tag: tip
555 tag: tip
558 user: test
556 user: test
559 date: Thu Jan 01 00:00:00 1970 +0000
557 date: Thu Jan 01 00:00:00 1970 +0000
560 summary: another change for branch stable
558 summary: another change for branch stable
561
559
562
560
563 $ rm -r ua
561 $ rm -r ua
564
562
565
563
566 Issue2267: Error in 1.6 hg.py: TypeError: 'NoneType' object is not
564 Issue2267: Error in 1.6 hg.py: TypeError: 'NoneType' object is not
567 iterable in addbranchrevs()
565 iterable in addbranchrevs()
568
566
569 $ cat <<EOF > simpleclone.py
567 $ cat <<EOF > simpleclone.py
570 > from mercurial import hg, ui as uimod
568 > from mercurial import hg, ui as uimod
571 > myui = uimod.ui.load()
569 > myui = uimod.ui.load()
572 > repo = hg.repository(myui, b'a')
570 > repo = hg.repository(myui, b'a')
573 > hg.clone(myui, {}, repo, dest=b"ua")
571 > hg.clone(myui, {}, repo, dest=b"ua")
574 > EOF
572 > EOF
575
573
576 $ "$PYTHON" simpleclone.py
574 $ "$PYTHON" simpleclone.py
577 updating to branch default
575 updating to branch default
578 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
576 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
579
577
580 $ rm -r ua
578 $ rm -r ua
581
579
582 $ cat <<EOF > branchclone.py
580 $ cat <<EOF > branchclone.py
583 > from mercurial import extensions, hg, ui as uimod
581 > from mercurial import extensions, hg, ui as uimod
584 > myui = uimod.ui.load()
582 > myui = uimod.ui.load()
585 > extensions.loadall(myui)
583 > extensions.loadall(myui)
586 > extensions.populateui(myui)
584 > extensions.populateui(myui)
587 > repo = hg.repository(myui, b'a')
585 > repo = hg.repository(myui, b'a')
588 > hg.clone(myui, {}, repo, dest=b"ua", branch=[b"stable"])
586 > hg.clone(myui, {}, repo, dest=b"ua", branch=[b"stable"])
589 > EOF
587 > EOF
590
588
591 $ "$PYTHON" branchclone.py
589 $ "$PYTHON" branchclone.py
592 adding changesets
590 adding changesets
593 adding manifests
591 adding manifests
594 adding file changes
592 adding file changes
595 added 14 changesets with 14 changes to 3 files
593 added 14 changesets with 14 changes to 3 files
596 new changesets acb14030fe0a:0aae7cf88f0d
594 new changesets acb14030fe0a:0aae7cf88f0d
597 updating to branch stable
595 updating to branch stable
598 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
596 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
599 $ rm -r ua
597 $ rm -r ua
600
598
601
599
602 Test clone with special '@' bookmark:
600 Test clone with special '@' bookmark:
603 $ cd a
601 $ cd a
604 $ hg bookmark -r a7949464abda @ # branch point of stable from default
602 $ hg bookmark -r a7949464abda @ # branch point of stable from default
605 $ hg clone . ../i
603 $ hg clone . ../i
606 updating to bookmark @
604 updating to bookmark @
607 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
605 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
608 $ hg id -i ../i
606 $ hg id -i ../i
609 a7949464abda
607 a7949464abda
610 $ rm -r ../i
608 $ rm -r ../i
611
609
612 $ hg bookmark -f -r stable @
610 $ hg bookmark -f -r stable @
613 $ hg bookmarks
611 $ hg bookmarks
614 @ 15:0aae7cf88f0d
612 @ 15:0aae7cf88f0d
615 $ hg clone . ../i
613 $ hg clone . ../i
616 updating to bookmark @ on branch stable
614 updating to bookmark @ on branch stable
617 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
615 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
618 $ hg id -i ../i
616 $ hg id -i ../i
619 0aae7cf88f0d
617 0aae7cf88f0d
620 $ cd "$TESTTMP"
618 $ cd "$TESTTMP"
621
619
622
620
623 Testing failures:
621 Testing failures:
624
622
625 $ mkdir fail
623 $ mkdir fail
626 $ cd fail
624 $ cd fail
627
625
628 No local source
626 No local source
629
627
630 $ hg clone a b
628 $ hg clone a b
631 abort: repository a not found
629 abort: repository a not found
632 [255]
630 [255]
633
631
634 Invalid URL
632 Invalid URL
635
633
636 $ hg clone http://invalid:url/a b
634 $ hg clone http://invalid:url/a b
637 abort: error: nonnumeric port: 'url'
635 abort: error: nonnumeric port: 'url'
638 [100]
636 [100]
639
637
640 No remote source
638 No remote source
641
639
642 #if windows
640 #if windows
643 $ hg clone http://$LOCALIP:3121/a b
641 $ hg clone http://$LOCALIP:3121/a b
644 abort: error: * (glob)
642 abort: error: * (glob)
645 [100]
643 [100]
646 #else
644 #else
647 $ hg clone http://$LOCALIP:3121/a b
645 $ hg clone http://$LOCALIP:3121/a b
648 abort: error: *refused* (glob)
646 abort: error: *refused* (glob)
649 [100]
647 [100]
650 #endif
648 #endif
651 $ rm -rf b # work around bug with http clone
649 $ rm -rf b # work around bug with http clone
652
650
653
651
654 #if unix-permissions no-root
652 #if unix-permissions no-root
655
653
656 Inaccessible source
654 Inaccessible source
657
655
658 $ mkdir a
656 $ mkdir a
659 $ chmod 000 a
657 $ chmod 000 a
660 $ hg clone a b
658 $ hg clone a b
661 abort: Permission denied: *$TESTTMP/fail/a/.hg* (glob)
659 abort: Permission denied: *$TESTTMP/fail/a/.hg* (glob)
662 [255]
660 [255]
663
661
664 Inaccessible destination
662 Inaccessible destination
665
663
666 $ hg init b
664 $ hg init b
667 $ cd b
665 $ cd b
668 $ hg clone . ../a
666 $ hg clone . ../a
669 abort: Permission denied: *../a* (glob)
667 abort: Permission denied: *../a* (glob)
670 [255]
668 [255]
671 $ cd ..
669 $ cd ..
672 $ chmod 700 a
670 $ chmod 700 a
673 $ rm -r a b
671 $ rm -r a b
674
672
675 #endif
673 #endif
676
674
677
675
678 #if fifo
676 #if fifo
679
677
680 Source of wrong type
678 Source of wrong type
681
679
682 $ mkfifo a
680 $ mkfifo a
683 $ hg clone a b
681 $ hg clone a b
684 abort: $ENOTDIR$: *$TESTTMP/fail/a/.hg* (glob)
682 abort: $ENOTDIR$: *$TESTTMP/fail/a/.hg* (glob)
685 [255]
683 [255]
686 $ rm a
684 $ rm a
687
685
688 #endif
686 #endif
689
687
690 Default destination, same directory
688 Default destination, same directory
691
689
692 $ hg init q
690 $ hg init q
693 $ hg clone q
691 $ hg clone q
694 destination directory: q
692 destination directory: q
695 abort: destination 'q' is not empty
693 abort: destination 'q' is not empty
696 [10]
694 [10]
697
695
698 destination directory not empty
696 destination directory not empty
699
697
700 $ mkdir a
698 $ mkdir a
701 $ echo stuff > a/a
699 $ echo stuff > a/a
702 $ hg clone q a
700 $ hg clone q a
703 abort: destination 'a' is not empty
701 abort: destination 'a' is not empty
704 [10]
702 [10]
705
703
706
704
707 #if unix-permissions no-root
705 #if unix-permissions no-root
708
706
709 leave existing directory in place after clone failure
707 leave existing directory in place after clone failure
710
708
711 $ hg init c
709 $ hg init c
712 $ cd c
710 $ cd c
713 $ echo c > c
711 $ echo c > c
714 $ hg commit -A -m test
712 $ hg commit -A -m test
715 adding c
713 adding c
716 $ chmod -rx .hg/store/data
714 $ chmod -rx .hg/store/data
717 $ cd ..
715 $ cd ..
718 $ mkdir d
716 $ mkdir d
719 $ hg clone c d 2> err
717 $ hg clone c d 2> err
720 [255]
718 [255]
721 $ test -d d
719 $ test -d d
722 $ test -d d/.hg
720 $ test -d d/.hg
723 [1]
721 [1]
724
722
725 re-enable perm to allow deletion
723 re-enable perm to allow deletion
726
724
727 $ chmod +rx c/.hg/store/data
725 $ chmod +rx c/.hg/store/data
728
726
729 #endif
727 #endif
730
728
731 $ cd ..
729 $ cd ..
732
730
733 Test clone from the repository in (emulated) revlog format 0 (issue4203):
731 Test clone from the repository in (emulated) revlog format 0 (issue4203):
734
732
735 $ mkdir issue4203
733 $ mkdir issue4203
736 $ mkdir -p src/.hg
734 $ mkdir -p src/.hg
737 $ echo foo > src/foo
735 $ echo foo > src/foo
738 $ hg -R src add src/foo
736 $ hg -R src add src/foo
739 $ hg -R src commit -m '#0'
737 $ hg -R src commit -m '#0'
740 $ hg -R src log -q
738 $ hg -R src log -q
741 0:e1bab28bca43
739 0:e1bab28bca43
742 $ hg -R src debugrevlog -c | egrep 'format|flags'
740 $ hg -R src debugrevlog -c | egrep 'format|flags'
743 format : 0
741 format : 0
744 flags : (none)
742 flags : (none)
745 $ hg root -R src -T json | sed 's|\\\\|\\|g'
743 $ hg root -R src -T json | sed 's|\\\\|\\|g'
746 [
744 [
747 {
745 {
748 "hgpath": "$TESTTMP/src/.hg",
746 "hgpath": "$TESTTMP/src/.hg",
749 "reporoot": "$TESTTMP/src",
747 "reporoot": "$TESTTMP/src",
750 "storepath": "$TESTTMP/src/.hg"
748 "storepath": "$TESTTMP/src/.hg"
751 }
749 }
752 ]
750 ]
753 $ hg clone -U -q src dst
751 $ hg clone -U -q src dst
754 $ hg -R dst log -q
752 $ hg -R dst log -q
755 0:e1bab28bca43
753 0:e1bab28bca43
756
754
757 Create repositories to test auto sharing functionality
755 Create repositories to test auto sharing functionality
758
756
759 $ cat >> $HGRCPATH << EOF
757 $ cat >> $HGRCPATH << EOF
760 > [extensions]
758 > [extensions]
761 > share=
759 > share=
762 > EOF
760 > EOF
763
761
764 $ hg init empty
762 $ hg init empty
765 $ hg init source1a
763 $ hg init source1a
766 $ cd source1a
764 $ cd source1a
767 $ echo initial1 > foo
765 $ echo initial1 > foo
768 $ hg -q commit -A -m initial
766 $ hg -q commit -A -m initial
769 $ echo second > foo
767 $ echo second > foo
770 $ hg commit -m second
768 $ hg commit -m second
771 $ cd ..
769 $ cd ..
772
770
773 $ hg init filteredrev0
771 $ hg init filteredrev0
774 $ cd filteredrev0
772 $ cd filteredrev0
775 $ cat >> .hg/hgrc << EOF
773 $ cat >> .hg/hgrc << EOF
776 > [experimental]
774 > [experimental]
777 > evolution.createmarkers=True
775 > evolution.createmarkers=True
778 > EOF
776 > EOF
779 $ echo initial1 > foo
777 $ echo initial1 > foo
780 $ hg -q commit -A -m initial0
778 $ hg -q commit -A -m initial0
781 $ hg -q up -r null
779 $ hg -q up -r null
782 $ echo initial2 > foo
780 $ echo initial2 > foo
783 $ hg -q commit -A -m initial1
781 $ hg -q commit -A -m initial1
784 $ hg debugobsolete c05d5c47a5cf81401869999f3d05f7d699d2b29a e082c1832e09a7d1e78b7fd49a592d372de854c8
782 $ hg debugobsolete c05d5c47a5cf81401869999f3d05f7d699d2b29a e082c1832e09a7d1e78b7fd49a592d372de854c8
785 1 new obsolescence markers
783 1 new obsolescence markers
786 obsoleted 1 changesets
784 obsoleted 1 changesets
787 $ cd ..
785 $ cd ..
788
786
789 $ hg -q clone --pull source1a source1b
787 $ hg -q clone --pull source1a source1b
790 $ cd source1a
788 $ cd source1a
791 $ hg bookmark bookA
789 $ hg bookmark bookA
792 $ echo 1a > foo
790 $ echo 1a > foo
793 $ hg commit -m 1a
791 $ hg commit -m 1a
794 $ cd ../source1b
792 $ cd ../source1b
795 $ hg -q up -r 0
793 $ hg -q up -r 0
796 $ echo head1 > foo
794 $ echo head1 > foo
797 $ hg commit -m head1
795 $ hg commit -m head1
798 created new head
796 created new head
799 $ hg bookmark head1
797 $ hg bookmark head1
800 $ hg -q up -r 0
798 $ hg -q up -r 0
801 $ echo head2 > foo
799 $ echo head2 > foo
802 $ hg commit -m head2
800 $ hg commit -m head2
803 created new head
801 created new head
804 $ hg bookmark head2
802 $ hg bookmark head2
805 $ hg -q up -r 0
803 $ hg -q up -r 0
806 $ hg branch branch1
804 $ hg branch branch1
807 marked working directory as branch branch1
805 marked working directory as branch branch1
808 (branches are permanent and global, did you want a bookmark?)
806 (branches are permanent and global, did you want a bookmark?)
809 $ echo branch1 > foo
807 $ echo branch1 > foo
810 $ hg commit -m branch1
808 $ hg commit -m branch1
811 $ hg -q up -r 0
809 $ hg -q up -r 0
812 $ hg branch branch2
810 $ hg branch branch2
813 marked working directory as branch branch2
811 marked working directory as branch branch2
814 $ echo branch2 > foo
812 $ echo branch2 > foo
815 $ hg commit -m branch2
813 $ hg commit -m branch2
816 $ cd ..
814 $ cd ..
817 $ hg init source2
815 $ hg init source2
818 $ cd source2
816 $ cd source2
819 $ echo initial2 > foo
817 $ echo initial2 > foo
820 $ hg -q commit -A -m initial2
818 $ hg -q commit -A -m initial2
821 $ echo second > foo
819 $ echo second > foo
822 $ hg commit -m second
820 $ hg commit -m second
823 $ cd ..
821 $ cd ..
824
822
825 Clone with auto share from an empty repo should not result in share
823 Clone with auto share from an empty repo should not result in share
826
824
827 $ mkdir share
825 $ mkdir share
828 $ hg --config share.pool=share clone empty share-empty
826 $ hg --config share.pool=share clone empty share-empty
829 (not using pooled storage: remote appears to be empty)
827 (not using pooled storage: remote appears to be empty)
830 updating to branch default
828 updating to branch default
831 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
829 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
832 $ ls share
830 $ ls share
833 $ test -d share-empty/.hg/store
831 $ test -d share-empty/.hg/store
834 $ test -f share-empty/.hg/sharedpath
832 $ test -f share-empty/.hg/sharedpath
835 [1]
833 [1]
836
834
837 Clone with auto share from a repo with filtered revision 0 should not result in share
835 Clone with auto share from a repo with filtered revision 0 should not result in share
838
836
839 $ hg --config share.pool=share clone filteredrev0 share-filtered
837 $ hg --config share.pool=share clone filteredrev0 share-filtered
840 (not using pooled storage: unable to resolve identity of remote)
838 (not using pooled storage: unable to resolve identity of remote)
841 requesting all changes
839 requesting all changes
842 adding changesets
840 adding changesets
843 adding manifests
841 adding manifests
844 adding file changes
842 adding file changes
845 added 1 changesets with 1 changes to 1 files
843 added 1 changesets with 1 changes to 1 files
846 new changesets e082c1832e09
844 new changesets e082c1832e09
847 updating to branch default
845 updating to branch default
848 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
846 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
849
847
850 Clone from repo with content should result in shared store being created
848 Clone from repo with content should result in shared store being created
851
849
852 $ hg --config share.pool=share clone source1a share-dest1a
850 $ hg --config share.pool=share clone source1a share-dest1a
853 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
851 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
854 requesting all changes
852 requesting all changes
855 adding changesets
853 adding changesets
856 adding manifests
854 adding manifests
857 adding file changes
855 adding file changes
858 added 3 changesets with 3 changes to 1 files
856 added 3 changesets with 3 changes to 1 files
859 new changesets b5f04eac9d8f:e5bfe23c0b47
857 new changesets b5f04eac9d8f:e5bfe23c0b47
860 searching for changes
858 searching for changes
861 no changes found
859 no changes found
862 adding remote bookmark bookA
860 adding remote bookmark bookA
863 updating working directory
861 updating working directory
864 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
862 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
865
863
866 The shared repo should have been created
864 The shared repo should have been created
867
865
868 $ ls share
866 $ ls share
869 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
867 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
870
868
871 The destination should point to it
869 The destination should point to it
872
870
873 $ cat share-dest1a/.hg/sharedpath; echo
871 $ cat share-dest1a/.hg/sharedpath; echo
874 $TESTTMP/share/b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1/.hg
872 $TESTTMP/share/b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1/.hg
875
873
876 The destination should have bookmarks
874 The destination should have bookmarks
877
875
878 $ hg -R share-dest1a bookmarks
876 $ hg -R share-dest1a bookmarks
879 bookA 2:e5bfe23c0b47
877 bookA 2:e5bfe23c0b47
880
878
881 The default path should be the remote, not the share
879 The default path should be the remote, not the share
882
880
883 $ hg -R share-dest1a config paths.default
881 $ hg -R share-dest1a config paths.default
884 $TESTTMP/source1a
882 $TESTTMP/source1a
885
883
886 Clone with existing share dir should result in pull + share
884 Clone with existing share dir should result in pull + share
887
885
888 $ hg --config share.pool=share clone source1b share-dest1b
886 $ hg --config share.pool=share clone source1b share-dest1b
889 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
887 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
890 searching for changes
888 searching for changes
891 adding changesets
889 adding changesets
892 adding manifests
890 adding manifests
893 adding file changes
891 adding file changes
894 adding remote bookmark head1
892 adding remote bookmark head1
895 adding remote bookmark head2
893 adding remote bookmark head2
896 added 4 changesets with 4 changes to 1 files (+4 heads)
894 added 4 changesets with 4 changes to 1 files (+4 heads)
897 new changesets 4a8dc1ab4c13:6bacf4683960
895 new changesets 4a8dc1ab4c13:6bacf4683960
898 updating working directory
896 updating working directory
899 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
897 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
900
898
901 $ ls share
899 $ ls share
902 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
900 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
903
901
904 $ cat share-dest1b/.hg/sharedpath; echo
902 $ cat share-dest1b/.hg/sharedpath; echo
905 $TESTTMP/share/b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1/.hg
903 $TESTTMP/share/b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1/.hg
906
904
907 We only get bookmarks from the remote, not everything in the share
905 We only get bookmarks from the remote, not everything in the share
908
906
909 $ hg -R share-dest1b bookmarks
907 $ hg -R share-dest1b bookmarks
910 head1 3:4a8dc1ab4c13
908 head1 3:4a8dc1ab4c13
911 head2 4:99f71071f117
909 head2 4:99f71071f117
912
910
913 Default path should be source, not share.
911 Default path should be source, not share.
914
912
915 $ hg -R share-dest1b config paths.default
913 $ hg -R share-dest1b config paths.default
916 $TESTTMP/source1b
914 $TESTTMP/source1b
917
915
918 Checked out revision should be head of default branch
916 Checked out revision should be head of default branch
919
917
920 $ hg -R share-dest1b log -r .
918 $ hg -R share-dest1b log -r .
921 changeset: 4:99f71071f117
919 changeset: 4:99f71071f117
922 bookmark: head2
920 bookmark: head2
923 parent: 0:b5f04eac9d8f
921 parent: 0:b5f04eac9d8f
924 user: test
922 user: test
925 date: Thu Jan 01 00:00:00 1970 +0000
923 date: Thu Jan 01 00:00:00 1970 +0000
926 summary: head2
924 summary: head2
927
925
928
926
929 Clone from unrelated repo should result in new share
927 Clone from unrelated repo should result in new share
930
928
931 $ hg --config share.pool=share clone source2 share-dest2
929 $ hg --config share.pool=share clone source2 share-dest2
932 (sharing from new pooled repository 22aeff664783fd44c6d9b435618173c118c3448e)
930 (sharing from new pooled repository 22aeff664783fd44c6d9b435618173c118c3448e)
933 requesting all changes
931 requesting all changes
934 adding changesets
932 adding changesets
935 adding manifests
933 adding manifests
936 adding file changes
934 adding file changes
937 added 2 changesets with 2 changes to 1 files
935 added 2 changesets with 2 changes to 1 files
938 new changesets 22aeff664783:63cf6c3dba4a
936 new changesets 22aeff664783:63cf6c3dba4a
939 searching for changes
937 searching for changes
940 no changes found
938 no changes found
941 updating working directory
939 updating working directory
942 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
940 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
943
941
944 $ ls share
942 $ ls share
945 22aeff664783fd44c6d9b435618173c118c3448e
943 22aeff664783fd44c6d9b435618173c118c3448e
946 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
944 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
947
945
948 remote naming mode works as advertised
946 remote naming mode works as advertised
949
947
950 $ hg --config share.pool=shareremote --config share.poolnaming=remote clone source1a share-remote1a
948 $ hg --config share.pool=shareremote --config share.poolnaming=remote clone source1a share-remote1a
951 (sharing from new pooled repository 195bb1fcdb595c14a6c13e0269129ed78f6debde)
949 (sharing from new pooled repository 195bb1fcdb595c14a6c13e0269129ed78f6debde)
952 requesting all changes
950 requesting all changes
953 adding changesets
951 adding changesets
954 adding manifests
952 adding manifests
955 adding file changes
953 adding file changes
956 added 3 changesets with 3 changes to 1 files
954 added 3 changesets with 3 changes to 1 files
957 new changesets b5f04eac9d8f:e5bfe23c0b47
955 new changesets b5f04eac9d8f:e5bfe23c0b47
958 searching for changes
956 searching for changes
959 no changes found
957 no changes found
960 adding remote bookmark bookA
958 adding remote bookmark bookA
961 updating working directory
959 updating working directory
962 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
960 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
963
961
964 $ ls shareremote
962 $ ls shareremote
965 195bb1fcdb595c14a6c13e0269129ed78f6debde
963 195bb1fcdb595c14a6c13e0269129ed78f6debde
966
964
967 $ hg --config share.pool=shareremote --config share.poolnaming=remote clone source1b share-remote1b
965 $ hg --config share.pool=shareremote --config share.poolnaming=remote clone source1b share-remote1b
968 (sharing from new pooled repository c0d4f83847ca2a873741feb7048a45085fd47c46)
966 (sharing from new pooled repository c0d4f83847ca2a873741feb7048a45085fd47c46)
969 requesting all changes
967 requesting all changes
970 adding changesets
968 adding changesets
971 adding manifests
969 adding manifests
972 adding file changes
970 adding file changes
973 added 6 changesets with 6 changes to 1 files (+4 heads)
971 added 6 changesets with 6 changes to 1 files (+4 heads)
974 new changesets b5f04eac9d8f:6bacf4683960
972 new changesets b5f04eac9d8f:6bacf4683960
975 searching for changes
973 searching for changes
976 no changes found
974 no changes found
977 adding remote bookmark head1
975 adding remote bookmark head1
978 adding remote bookmark head2
976 adding remote bookmark head2
979 updating working directory
977 updating working directory
980 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
978 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
981
979
982 $ ls shareremote
980 $ ls shareremote
983 195bb1fcdb595c14a6c13e0269129ed78f6debde
981 195bb1fcdb595c14a6c13e0269129ed78f6debde
984 c0d4f83847ca2a873741feb7048a45085fd47c46
982 c0d4f83847ca2a873741feb7048a45085fd47c46
985
983
986 request to clone a single revision is respected in sharing mode
984 request to clone a single revision is respected in sharing mode
987
985
988 $ hg --config share.pool=sharerevs clone -r 4a8dc1ab4c13 source1b share-1arev
986 $ hg --config share.pool=sharerevs clone -r 4a8dc1ab4c13 source1b share-1arev
989 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
987 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
990 adding changesets
988 adding changesets
991 adding manifests
989 adding manifests
992 adding file changes
990 adding file changes
993 added 2 changesets with 2 changes to 1 files
991 added 2 changesets with 2 changes to 1 files
994 new changesets b5f04eac9d8f:4a8dc1ab4c13
992 new changesets b5f04eac9d8f:4a8dc1ab4c13
995 no changes found
993 no changes found
996 adding remote bookmark head1
994 adding remote bookmark head1
997 updating working directory
995 updating working directory
998 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
996 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
999
997
1000 $ hg -R share-1arev log -G
998 $ hg -R share-1arev log -G
1001 @ changeset: 1:4a8dc1ab4c13
999 @ changeset: 1:4a8dc1ab4c13
1002 | bookmark: head1
1000 | bookmark: head1
1003 | tag: tip
1001 | tag: tip
1004 | user: test
1002 | user: test
1005 | date: Thu Jan 01 00:00:00 1970 +0000
1003 | date: Thu Jan 01 00:00:00 1970 +0000
1006 | summary: head1
1004 | summary: head1
1007 |
1005 |
1008 o changeset: 0:b5f04eac9d8f
1006 o changeset: 0:b5f04eac9d8f
1009 user: test
1007 user: test
1010 date: Thu Jan 01 00:00:00 1970 +0000
1008 date: Thu Jan 01 00:00:00 1970 +0000
1011 summary: initial
1009 summary: initial
1012
1010
1013
1011
1014 making another clone should only pull down requested rev
1012 making another clone should only pull down requested rev
1015
1013
1016 $ hg --config share.pool=sharerevs clone -r 99f71071f117 source1b share-1brev
1014 $ hg --config share.pool=sharerevs clone -r 99f71071f117 source1b share-1brev
1017 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1015 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1018 searching for changes
1016 searching for changes
1019 adding changesets
1017 adding changesets
1020 adding manifests
1018 adding manifests
1021 adding file changes
1019 adding file changes
1022 adding remote bookmark head1
1020 adding remote bookmark head1
1023 adding remote bookmark head2
1021 adding remote bookmark head2
1024 added 1 changesets with 1 changes to 1 files (+1 heads)
1022 added 1 changesets with 1 changes to 1 files (+1 heads)
1025 new changesets 99f71071f117
1023 new changesets 99f71071f117
1026 updating working directory
1024 updating working directory
1027 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1025 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1028
1026
1029 $ hg -R share-1brev log -G
1027 $ hg -R share-1brev log -G
1030 @ changeset: 2:99f71071f117
1028 @ changeset: 2:99f71071f117
1031 | bookmark: head2
1029 | bookmark: head2
1032 | tag: tip
1030 | tag: tip
1033 | parent: 0:b5f04eac9d8f
1031 | parent: 0:b5f04eac9d8f
1034 | user: test
1032 | user: test
1035 | date: Thu Jan 01 00:00:00 1970 +0000
1033 | date: Thu Jan 01 00:00:00 1970 +0000
1036 | summary: head2
1034 | summary: head2
1037 |
1035 |
1038 | o changeset: 1:4a8dc1ab4c13
1036 | o changeset: 1:4a8dc1ab4c13
1039 |/ bookmark: head1
1037 |/ bookmark: head1
1040 | user: test
1038 | user: test
1041 | date: Thu Jan 01 00:00:00 1970 +0000
1039 | date: Thu Jan 01 00:00:00 1970 +0000
1042 | summary: head1
1040 | summary: head1
1043 |
1041 |
1044 o changeset: 0:b5f04eac9d8f
1042 o changeset: 0:b5f04eac9d8f
1045 user: test
1043 user: test
1046 date: Thu Jan 01 00:00:00 1970 +0000
1044 date: Thu Jan 01 00:00:00 1970 +0000
1047 summary: initial
1045 summary: initial
1048
1046
1049
1047
1050 Request to clone a single branch is respected in sharing mode
1048 Request to clone a single branch is respected in sharing mode
1051
1049
1052 $ hg --config share.pool=sharebranch clone -b branch1 source1b share-1bbranch1
1050 $ hg --config share.pool=sharebranch clone -b branch1 source1b share-1bbranch1
1053 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1051 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1054 adding changesets
1052 adding changesets
1055 adding manifests
1053 adding manifests
1056 adding file changes
1054 adding file changes
1057 added 2 changesets with 2 changes to 1 files
1055 added 2 changesets with 2 changes to 1 files
1058 new changesets b5f04eac9d8f:5f92a6c1a1b1
1056 new changesets b5f04eac9d8f:5f92a6c1a1b1
1059 no changes found
1057 no changes found
1060 updating working directory
1058 updating working directory
1061 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1059 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1062
1060
1063 $ hg -R share-1bbranch1 log -G
1061 $ hg -R share-1bbranch1 log -G
1064 o changeset: 1:5f92a6c1a1b1
1062 o changeset: 1:5f92a6c1a1b1
1065 | branch: branch1
1063 | branch: branch1
1066 | tag: tip
1064 | tag: tip
1067 | user: test
1065 | user: test
1068 | date: Thu Jan 01 00:00:00 1970 +0000
1066 | date: Thu Jan 01 00:00:00 1970 +0000
1069 | summary: branch1
1067 | summary: branch1
1070 |
1068 |
1071 @ changeset: 0:b5f04eac9d8f
1069 @ changeset: 0:b5f04eac9d8f
1072 user: test
1070 user: test
1073 date: Thu Jan 01 00:00:00 1970 +0000
1071 date: Thu Jan 01 00:00:00 1970 +0000
1074 summary: initial
1072 summary: initial
1075
1073
1076
1074
1077 $ hg --config share.pool=sharebranch clone -b branch2 source1b share-1bbranch2
1075 $ hg --config share.pool=sharebranch clone -b branch2 source1b share-1bbranch2
1078 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1076 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1079 searching for changes
1077 searching for changes
1080 adding changesets
1078 adding changesets
1081 adding manifests
1079 adding manifests
1082 adding file changes
1080 adding file changes
1083 added 1 changesets with 1 changes to 1 files (+1 heads)
1081 added 1 changesets with 1 changes to 1 files (+1 heads)
1084 new changesets 6bacf4683960
1082 new changesets 6bacf4683960
1085 updating working directory
1083 updating working directory
1086 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1084 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1087
1085
1088 $ hg -R share-1bbranch2 log -G
1086 $ hg -R share-1bbranch2 log -G
1089 o changeset: 2:6bacf4683960
1087 o changeset: 2:6bacf4683960
1090 | branch: branch2
1088 | branch: branch2
1091 | tag: tip
1089 | tag: tip
1092 | parent: 0:b5f04eac9d8f
1090 | parent: 0:b5f04eac9d8f
1093 | user: test
1091 | user: test
1094 | date: Thu Jan 01 00:00:00 1970 +0000
1092 | date: Thu Jan 01 00:00:00 1970 +0000
1095 | summary: branch2
1093 | summary: branch2
1096 |
1094 |
1097 | o changeset: 1:5f92a6c1a1b1
1095 | o changeset: 1:5f92a6c1a1b1
1098 |/ branch: branch1
1096 |/ branch: branch1
1099 | user: test
1097 | user: test
1100 | date: Thu Jan 01 00:00:00 1970 +0000
1098 | date: Thu Jan 01 00:00:00 1970 +0000
1101 | summary: branch1
1099 | summary: branch1
1102 |
1100 |
1103 @ changeset: 0:b5f04eac9d8f
1101 @ changeset: 0:b5f04eac9d8f
1104 user: test
1102 user: test
1105 date: Thu Jan 01 00:00:00 1970 +0000
1103 date: Thu Jan 01 00:00:00 1970 +0000
1106 summary: initial
1104 summary: initial
1107
1105
1108
1106
1109 -U is respected in share clone mode
1107 -U is respected in share clone mode
1110
1108
1111 $ hg --config share.pool=share clone -U source1a share-1anowc
1109 $ hg --config share.pool=share clone -U source1a share-1anowc
1112 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1110 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1113 searching for changes
1111 searching for changes
1114 no changes found
1112 no changes found
1115 adding remote bookmark bookA
1113 adding remote bookmark bookA
1116
1114
1117 $ ls -A share-1anowc
1115 $ ls -A share-1anowc
1118 .hg
1116 .hg
1119
1117
1120 Test that auto sharing doesn't cause failure of "hg clone local remote"
1118 Test that auto sharing doesn't cause failure of "hg clone local remote"
1121
1119
1122 $ cd $TESTTMP
1120 $ cd $TESTTMP
1123 $ hg -R a id -r 0
1121 $ hg -R a id -r 0
1124 acb14030fe0a
1122 acb14030fe0a
1125 $ hg id -R remote -r 0
1123 $ hg id -R remote -r 0
1126 abort: repository remote not found
1124 abort: repository remote not found
1127 [255]
1125 [255]
1128 $ hg --config share.pool=share -q clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" a ssh://user@dummy/remote
1126 $ hg --config share.pool=share -q clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" a ssh://user@dummy/remote
1129 $ hg -R remote id -r 0
1127 $ hg -R remote id -r 0
1130 acb14030fe0a
1128 acb14030fe0a
1131
1129
1132 Cloning into pooled storage doesn't race (issue5104)
1130 Cloning into pooled storage doesn't race (issue5104)
1133
1131
1134 $ HGPOSTLOCKDELAY=2.0 hg --config share.pool=racepool --config extensions.lockdelay=$TESTDIR/lockdelay.py clone source1a share-destrace1 > race1.log 2>&1 &
1132 $ HGPOSTLOCKDELAY=2.0 hg --config share.pool=racepool --config extensions.lockdelay=$TESTDIR/lockdelay.py clone source1a share-destrace1 > race1.log 2>&1 &
1135 $ HGPRELOCKDELAY=1.0 hg --config share.pool=racepool --config extensions.lockdelay=$TESTDIR/lockdelay.py clone source1a share-destrace2 > race2.log 2>&1
1133 $ HGPRELOCKDELAY=1.0 hg --config share.pool=racepool --config extensions.lockdelay=$TESTDIR/lockdelay.py clone source1a share-destrace2 > race2.log 2>&1
1136 $ wait
1134 $ wait
1137
1135
1138 $ hg -R share-destrace1 log -r tip
1136 $ hg -R share-destrace1 log -r tip
1139 changeset: 2:e5bfe23c0b47
1137 changeset: 2:e5bfe23c0b47
1140 bookmark: bookA
1138 bookmark: bookA
1141 tag: tip
1139 tag: tip
1142 user: test
1140 user: test
1143 date: Thu Jan 01 00:00:00 1970 +0000
1141 date: Thu Jan 01 00:00:00 1970 +0000
1144 summary: 1a
1142 summary: 1a
1145
1143
1146
1144
1147 $ hg -R share-destrace2 log -r tip
1145 $ hg -R share-destrace2 log -r tip
1148 changeset: 2:e5bfe23c0b47
1146 changeset: 2:e5bfe23c0b47
1149 bookmark: bookA
1147 bookmark: bookA
1150 tag: tip
1148 tag: tip
1151 user: test
1149 user: test
1152 date: Thu Jan 01 00:00:00 1970 +0000
1150 date: Thu Jan 01 00:00:00 1970 +0000
1153 summary: 1a
1151 summary: 1a
1154
1152
1155 One repo should be new, the other should be shared from the pool. We
1153 One repo should be new, the other should be shared from the pool. We
1156 don't care which is which, so we just make sure we always print the
1154 don't care which is which, so we just make sure we always print the
1157 one containing "new pooled" first, then one one containing "existing
1155 one containing "new pooled" first, then one one containing "existing
1158 pooled".
1156 pooled".
1159
1157
1160 $ (grep 'new pooled' race1.log > /dev/null && cat race1.log || cat race2.log) | grep -v lock
1158 $ (grep 'new pooled' race1.log > /dev/null && cat race1.log || cat race2.log) | grep -v lock
1161 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1159 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1162 requesting all changes
1160 requesting all changes
1163 adding changesets
1161 adding changesets
1164 adding manifests
1162 adding manifests
1165 adding file changes
1163 adding file changes
1166 added 3 changesets with 3 changes to 1 files
1164 added 3 changesets with 3 changes to 1 files
1167 new changesets b5f04eac9d8f:e5bfe23c0b47
1165 new changesets b5f04eac9d8f:e5bfe23c0b47
1168 searching for changes
1166 searching for changes
1169 no changes found
1167 no changes found
1170 adding remote bookmark bookA
1168 adding remote bookmark bookA
1171 updating working directory
1169 updating working directory
1172 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1170 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1173
1171
1174 $ (grep 'existing pooled' race1.log > /dev/null && cat race1.log || cat race2.log) | grep -v lock
1172 $ (grep 'existing pooled' race1.log > /dev/null && cat race1.log || cat race2.log) | grep -v lock
1175 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1173 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1176 searching for changes
1174 searching for changes
1177 no changes found
1175 no changes found
1178 adding remote bookmark bookA
1176 adding remote bookmark bookA
1179 updating working directory
1177 updating working directory
1180 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1178 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1181
1179
1182 SEC: check for unsafe ssh url
1180 SEC: check for unsafe ssh url
1183
1181
1184 $ cat >> $HGRCPATH << EOF
1182 $ cat >> $HGRCPATH << EOF
1185 > [ui]
1183 > [ui]
1186 > ssh = sh -c "read l; read l; read l"
1184 > ssh = sh -c "read l; read l; read l"
1187 > EOF
1185 > EOF
1188
1186
1189 $ hg clone 'ssh://-oProxyCommand=touch${IFS}owned/path'
1187 $ hg clone 'ssh://-oProxyCommand=touch${IFS}owned/path'
1190 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
1188 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
1191 [255]
1189 [255]
1192 $ hg clone 'ssh://%2DoProxyCommand=touch${IFS}owned/path'
1190 $ hg clone 'ssh://%2DoProxyCommand=touch${IFS}owned/path'
1193 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
1191 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
1194 [255]
1192 [255]
1195 $ hg clone 'ssh://fakehost|touch%20owned/path'
1193 $ hg clone 'ssh://fakehost|touch%20owned/path'
1196 abort: no suitable response from remote hg
1194 abort: no suitable response from remote hg
1197 [255]
1195 [255]
1198 $ hg clone 'ssh://fakehost%7Ctouch%20owned/path'
1196 $ hg clone 'ssh://fakehost%7Ctouch%20owned/path'
1199 abort: no suitable response from remote hg
1197 abort: no suitable response from remote hg
1200 [255]
1198 [255]
1201
1199
1202 $ hg clone 'ssh://-oProxyCommand=touch owned%20foo@example.com/nonexistent/path'
1200 $ hg clone 'ssh://-oProxyCommand=touch owned%20foo@example.com/nonexistent/path'
1203 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch owned foo@example.com/nonexistent/path'
1201 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch owned foo@example.com/nonexistent/path'
1204 [255]
1202 [255]
1205
1203
1206 #if windows
1204 #if windows
1207 $ hg clone "ssh://%26touch%20owned%20/" --debug
1205 $ hg clone "ssh://%26touch%20owned%20/" --debug
1208 running sh -c "read l; read l; read l" "&touch owned " "hg -R . serve --stdio"
1206 running sh -c "read l; read l; read l" "&touch owned " "hg -R . serve --stdio"
1209 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1207 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1210 sending hello command
1208 sending hello command
1211 sending between command
1209 sending between command
1212 abort: no suitable response from remote hg
1210 abort: no suitable response from remote hg
1213 [255]
1211 [255]
1214 $ hg clone "ssh://example.com:%26touch%20owned%20/" --debug
1212 $ hg clone "ssh://example.com:%26touch%20owned%20/" --debug
1215 running sh -c "read l; read l; read l" -p "&touch owned " example.com "hg -R . serve --stdio"
1213 running sh -c "read l; read l; read l" -p "&touch owned " example.com "hg -R . serve --stdio"
1216 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1214 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1217 sending hello command
1215 sending hello command
1218 sending between command
1216 sending between command
1219 abort: no suitable response from remote hg
1217 abort: no suitable response from remote hg
1220 [255]
1218 [255]
1221 #else
1219 #else
1222 $ hg clone "ssh://%3btouch%20owned%20/" --debug
1220 $ hg clone "ssh://%3btouch%20owned%20/" --debug
1223 running sh -c "read l; read l; read l" ';touch owned ' 'hg -R . serve --stdio'
1221 running sh -c "read l; read l; read l" ';touch owned ' 'hg -R . serve --stdio'
1224 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1222 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1225 sending hello command
1223 sending hello command
1226 sending between command
1224 sending between command
1227 abort: no suitable response from remote hg
1225 abort: no suitable response from remote hg
1228 [255]
1226 [255]
1229 $ hg clone "ssh://example.com:%3btouch%20owned%20/" --debug
1227 $ hg clone "ssh://example.com:%3btouch%20owned%20/" --debug
1230 running sh -c "read l; read l; read l" -p ';touch owned ' example.com 'hg -R . serve --stdio'
1228 running sh -c "read l; read l; read l" -p ';touch owned ' example.com 'hg -R . serve --stdio'
1231 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1229 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1232 sending hello command
1230 sending hello command
1233 sending between command
1231 sending between command
1234 abort: no suitable response from remote hg
1232 abort: no suitable response from remote hg
1235 [255]
1233 [255]
1236 #endif
1234 #endif
1237
1235
1238 $ hg clone "ssh://v-alid.example.com/" --debug
1236 $ hg clone "ssh://v-alid.example.com/" --debug
1239 running sh -c "read l; read l; read l" v-alid\.example\.com ['"]hg -R \. serve --stdio['"] (re)
1237 running sh -c "read l; read l; read l" v-alid\.example\.com ['"]hg -R \. serve --stdio['"] (re)
1240 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1238 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1241 sending hello command
1239 sending hello command
1242 sending between command
1240 sending between command
1243 abort: no suitable response from remote hg
1241 abort: no suitable response from remote hg
1244 [255]
1242 [255]
1245
1243
1246 We should not have created a file named owned - if it exists, the
1244 We should not have created a file named owned - if it exists, the
1247 attack succeeded.
1245 attack succeeded.
1248 $ if test -f owned; then echo 'you got owned'; fi
1246 $ if test -f owned; then echo 'you got owned'; fi
1249
1247
1250 Cloning without fsmonitor enabled does not print a warning for small repos
1248 Cloning without fsmonitor enabled does not print a warning for small repos
1251
1249
1252 $ hg clone a fsmonitor-default
1250 $ hg clone a fsmonitor-default
1253 updating to bookmark @ on branch stable
1251 updating to bookmark @ on branch stable
1254 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1252 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1255
1253
1256 Lower the warning threshold to simulate a large repo
1254 Lower the warning threshold to simulate a large repo
1257
1255
1258 $ cat >> $HGRCPATH << EOF
1256 $ cat >> $HGRCPATH << EOF
1259 > [fsmonitor]
1257 > [fsmonitor]
1260 > warn_update_file_count = 2
1258 > warn_update_file_count = 2
1261 > warn_update_file_count_rust = 2
1259 > warn_update_file_count_rust = 2
1262 > EOF
1260 > EOF
1263
1261
1264 We should see a warning about no fsmonitor on supported platforms
1262 We should see a warning about no fsmonitor on supported platforms
1265
1263
1266 #if linuxormacos no-fsmonitor
1264 #if linuxormacos no-fsmonitor
1267 $ hg clone a nofsmonitor
1265 $ hg clone a nofsmonitor
1268 updating to bookmark @ on branch stable
1266 updating to bookmark @ on branch stable
1269 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor")
1267 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor")
1270 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1268 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1271 #else
1269 #else
1272 $ hg clone a nofsmonitor
1270 $ hg clone a nofsmonitor
1273 updating to bookmark @ on branch stable
1271 updating to bookmark @ on branch stable
1274 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1272 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1275 #endif
1273 #endif
1276
1274
1277 We should not see warning about fsmonitor when it is enabled
1275 We should not see warning about fsmonitor when it is enabled
1278
1276
1279 #if fsmonitor
1277 #if fsmonitor
1280 $ hg clone a fsmonitor-enabled
1278 $ hg clone a fsmonitor-enabled
1281 updating to bookmark @ on branch stable
1279 updating to bookmark @ on branch stable
1282 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1280 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1283 #endif
1281 #endif
1284
1282
1285 We can disable the fsmonitor warning
1283 We can disable the fsmonitor warning
1286
1284
1287 $ hg --config fsmonitor.warn_when_unused=false clone a fsmonitor-disable-warning
1285 $ hg --config fsmonitor.warn_when_unused=false clone a fsmonitor-disable-warning
1288 updating to bookmark @ on branch stable
1286 updating to bookmark @ on branch stable
1289 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1287 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1290
1288
1291 Loaded fsmonitor but disabled in config should still print warning
1289 Loaded fsmonitor but disabled in config should still print warning
1292
1290
1293 #if linuxormacos fsmonitor
1291 #if linuxormacos fsmonitor
1294 $ hg --config fsmonitor.mode=off clone a fsmonitor-mode-off
1292 $ hg --config fsmonitor.mode=off clone a fsmonitor-mode-off
1295 updating to bookmark @ on branch stable
1293 updating to bookmark @ on branch stable
1296 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor") (fsmonitor !)
1294 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor") (fsmonitor !)
1297 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1295 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1298 #endif
1296 #endif
1299
1297
1300 Warning not printed if working directory isn't empty
1298 Warning not printed if working directory isn't empty
1301
1299
1302 $ hg -q clone a fsmonitor-update
1300 $ hg -q clone a fsmonitor-update
1303 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor") (?)
1301 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor") (?)
1304 $ cd fsmonitor-update
1302 $ cd fsmonitor-update
1305 $ hg up acb14030fe0a
1303 $ hg up acb14030fe0a
1306 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
1304 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
1307 (leaving bookmark @)
1305 (leaving bookmark @)
1308 $ hg up cf0fe1914066
1306 $ hg up cf0fe1914066
1309 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1307 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1310
1308
1311 `hg update` from null revision also prints
1309 `hg update` from null revision also prints
1312
1310
1313 $ hg up null
1311 $ hg up null
1314 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
1312 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
1315
1313
1316 #if linuxormacos no-fsmonitor
1314 #if linuxormacos no-fsmonitor
1317 $ hg up cf0fe1914066
1315 $ hg up cf0fe1914066
1318 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor")
1316 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor")
1319 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1317 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1320 #else
1318 #else
1321 $ hg up cf0fe1914066
1319 $ hg up cf0fe1914066
1322 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1320 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1323 #endif
1321 #endif
1324
1322
1325 $ cd ..
1323 $ cd ..
1326
1324
@@ -1,447 +1,445 b''
1 #require hardlink reporevlogstore
1 #require hardlink reporevlogstore
2
2
3 $ cat > nlinks.py <<EOF
3 $ cat > nlinks.py <<EOF
4 > from __future__ import print_function
4 > from __future__ import print_function
5 > import sys
5 > import sys
6 > from mercurial import pycompat, util
6 > from mercurial import pycompat, util
7 > for f in sorted(sys.stdin.readlines()):
7 > for f in sorted(sys.stdin.readlines()):
8 > f = f[:-1]
8 > f = f[:-1]
9 > print(util.nlinks(pycompat.fsencode(f)), f)
9 > print(util.nlinks(pycompat.fsencode(f)), f)
10 > EOF
10 > EOF
11
11
12 $ nlinksdir()
12 $ nlinksdir()
13 > {
13 > {
14 > find "$@" -type f | "$PYTHON" $TESTTMP/nlinks.py
14 > find "$@" -type f | "$PYTHON" $TESTTMP/nlinks.py
15 > }
15 > }
16
16
17 Some implementations of cp can't create hardlinks (replaces 'cp -al' on Linux):
17 Some implementations of cp can't create hardlinks (replaces 'cp -al' on Linux):
18
18
19 $ cat > linkcp.py <<EOF
19 $ cat > linkcp.py <<EOF
20 > from __future__ import absolute_import
20 > from __future__ import absolute_import
21 > import sys
21 > import sys
22 > from mercurial import pycompat, util
22 > from mercurial import pycompat, util
23 > util.copyfiles(pycompat.fsencode(sys.argv[1]),
23 > util.copyfiles(pycompat.fsencode(sys.argv[1]),
24 > pycompat.fsencode(sys.argv[2]), hardlink=True)
24 > pycompat.fsencode(sys.argv[2]), hardlink=True)
25 > EOF
25 > EOF
26
26
27 $ linkcp()
27 $ linkcp()
28 > {
28 > {
29 > "$PYTHON" $TESTTMP/linkcp.py $1 $2
29 > "$PYTHON" $TESTTMP/linkcp.py $1 $2
30 > }
30 > }
31
31
32 Prepare repo r1:
32 Prepare repo r1:
33
33
34 $ hg init r1
34 $ hg init r1
35 $ cd r1
35 $ cd r1
36
36
37 $ echo c1 > f1
37 $ echo c1 > f1
38 $ hg add f1
38 $ hg add f1
39 $ hg ci -m0
39 $ hg ci -m0
40
40
41 $ mkdir d1
41 $ mkdir d1
42 $ cd d1
42 $ cd d1
43 $ echo c2 > f2
43 $ echo c2 > f2
44 $ hg add f2
44 $ hg add f2
45 $ hg ci -m1
45 $ hg ci -m1
46 $ cd ../..
46 $ cd ../..
47
47
48 $ nlinksdir r1/.hg/store
48 $ nlinksdir r1/.hg/store
49 1 r1/.hg/store/00changelog.i
49 1 r1/.hg/store/00changelog.i
50 1 r1/.hg/store/00manifest.i
50 1 r1/.hg/store/00manifest.i
51 1 r1/.hg/store/data/d1/f2.i
51 1 r1/.hg/store/data/d1/f2.i
52 1 r1/.hg/store/data/f1.i
52 1 r1/.hg/store/data/f1.i
53 1 r1/.hg/store/fncache (repofncache !)
53 1 r1/.hg/store/fncache (repofncache !)
54 1 r1/.hg/store/phaseroots
54 1 r1/.hg/store/phaseroots
55 1 r1/.hg/store/undo
55 1 r1/.hg/store/undo
56 1 r1/.hg/store/undo.backup.fncache (repofncache !)
56 1 r1/.hg/store/undo.backup.fncache (repofncache !)
57 1 r1/.hg/store/undo.backupfiles
57 1 r1/.hg/store/undo.backupfiles
58 1 r1/.hg/store/undo.phaseroots
58 1 r1/.hg/store/undo.phaseroots
59
59
60
60
61 Create hardlinked clone r2:
61 Create hardlinked clone r2:
62
62
63 $ hg clone -U --debug r1 r2 --config progress.debug=true
63 $ hg clone -U --debug r1 r2 --config progress.debug=true
64 linking: 1 files
64 linking: 1 files
65 linking: 2 files
65 linking: 2 files
66 linking: 3 files
66 linking: 3 files
67 linking: 4 files
67 linking: 4 files
68 linking: 5 files
68 linking: 5 files
69 linking: 6 files
69 linking: 6 files
70 linking: 7 files
70 linking: 7 files
71 linked 7 files
71 linked 7 files
72 updating the branch cache
72 updating the branch cache
73
73
74 Create non-hardlinked clone r3:
74 Create non-hardlinked clone r3:
75
75
76 $ hg clone --pull r1 r3
76 $ hg clone --pull r1 r3
77 requesting all changes
77 requesting all changes
78 adding changesets
78 adding changesets
79 adding manifests
79 adding manifests
80 adding file changes
80 adding file changes
81 added 2 changesets with 2 changes to 2 files
81 added 2 changesets with 2 changes to 2 files
82 new changesets 40d85e9847f2:7069c422939c
82 new changesets 40d85e9847f2:7069c422939c
83 updating to branch default
83 updating to branch default
84 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
84 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
85
85
86
86
87 Repos r1 and r2 should now contain hardlinked files:
87 Repos r1 and r2 should now contain hardlinked files:
88
88
89 $ nlinksdir r1/.hg/store
89 $ nlinksdir r1/.hg/store
90 2 r1/.hg/store/00changelog.i
90 2 r1/.hg/store/00changelog.i
91 2 r1/.hg/store/00manifest.i
91 2 r1/.hg/store/00manifest.i
92 2 r1/.hg/store/data/d1/f2.i
92 2 r1/.hg/store/data/d1/f2.i
93 2 r1/.hg/store/data/f1.i
93 2 r1/.hg/store/data/f1.i
94 2 r1/.hg/store/fncache (repofncache !)
94 2 r1/.hg/store/fncache (repofncache !)
95 1 r1/.hg/store/phaseroots
95 1 r1/.hg/store/phaseroots
96 1 r1/.hg/store/undo
96 1 r1/.hg/store/undo
97 1 r1/.hg/store/undo.backup.fncache (repofncache !)
97 1 r1/.hg/store/undo.backup.fncache (repofncache !)
98 1 r1/.hg/store/undo.backupfiles
98 1 r1/.hg/store/undo.backupfiles
99 1 r1/.hg/store/undo.phaseroots
99 1 r1/.hg/store/undo.phaseroots
100
100
101 $ nlinksdir r2/.hg/store
101 $ nlinksdir r2/.hg/store
102 2 r2/.hg/store/00changelog.i
102 2 r2/.hg/store/00changelog.i
103 2 r2/.hg/store/00manifest.i
103 2 r2/.hg/store/00manifest.i
104 2 r2/.hg/store/data/d1/f2.i
104 2 r2/.hg/store/data/d1/f2.i
105 2 r2/.hg/store/data/f1.i
105 2 r2/.hg/store/data/f1.i
106 2 r2/.hg/store/fncache (repofncache !)
106 2 r2/.hg/store/fncache (repofncache !)
107
107
108 Repo r3 should not be hardlinked:
108 Repo r3 should not be hardlinked:
109
109
110 $ nlinksdir r3/.hg/store
110 $ nlinksdir r3/.hg/store
111 1 r3/.hg/store/00changelog.i
111 1 r3/.hg/store/00changelog.i
112 1 r3/.hg/store/00manifest.i
112 1 r3/.hg/store/00manifest.i
113 1 r3/.hg/store/data/d1/f2.i
113 1 r3/.hg/store/data/d1/f2.i
114 1 r3/.hg/store/data/f1.i
114 1 r3/.hg/store/data/f1.i
115 1 r3/.hg/store/fncache (repofncache !)
115 1 r3/.hg/store/fncache (repofncache !)
116 1 r3/.hg/store/phaseroots
116 1 r3/.hg/store/phaseroots
117 1 r3/.hg/store/undo
117 1 r3/.hg/store/undo
118 1 r3/.hg/store/undo.backupfiles
118 1 r3/.hg/store/undo.backupfiles
119 1 r3/.hg/store/undo.phaseroots
119 1 r3/.hg/store/undo.phaseroots
120
120
121
121
122 Create a non-inlined filelog in r3:
122 Create a non-inlined filelog in r3:
123
123
124 $ cd r3/d1
124 $ cd r3/d1
125 >>> f = open('data1', 'wb')
125 >>> f = open('data1', 'wb')
126 >>> for x in range(10000):
126 >>> for x in range(10000):
127 ... f.write(b"%d\n" % x) and None
127 ... f.write(b"%d\n" % x) and None
128 >>> f.close()
128 >>> f.close()
129 $ for j in 0 1 2 3 4 5 6 7 8 9; do
129 $ for j in 0 1 2 3 4 5 6 7 8 9; do
130 > cat data1 >> f2
130 > cat data1 >> f2
131 > hg commit -m$j
131 > hg commit -m$j
132 > done
132 > done
133 $ cd ../..
133 $ cd ../..
134
134
135 $ nlinksdir r3/.hg/store
135 $ nlinksdir r3/.hg/store
136 1 r3/.hg/store/00changelog.i
136 1 r3/.hg/store/00changelog.i
137 1 r3/.hg/store/00manifest.i
137 1 r3/.hg/store/00manifest.i
138 1 r3/.hg/store/data/d1/f2.d
138 1 r3/.hg/store/data/d1/f2.d
139 1 r3/.hg/store/data/d1/f2.i
139 1 r3/.hg/store/data/d1/f2.i
140 1 r3/.hg/store/data/f1.i
140 1 r3/.hg/store/data/f1.i
141 1 r3/.hg/store/fncache (repofncache !)
141 1 r3/.hg/store/fncache (repofncache !)
142 1 r3/.hg/store/phaseroots
142 1 r3/.hg/store/phaseroots
143 1 r3/.hg/store/undo
143 1 r3/.hg/store/undo
144 1 r3/.hg/store/undo.backup.fncache (repofncache !)
144 1 r3/.hg/store/undo.backup.fncache (repofncache !)
145 1 r3/.hg/store/undo.backup.phaseroots
145 1 r3/.hg/store/undo.backup.phaseroots
146 1 r3/.hg/store/undo.backupfiles
146 1 r3/.hg/store/undo.backupfiles
147 1 r3/.hg/store/undo.phaseroots
147 1 r3/.hg/store/undo.phaseroots
148
148
149 Push to repo r1 should break up most hardlinks in r2:
149 Push to repo r1 should break up most hardlinks in r2:
150
150
151 $ hg -R r2 verify
151 $ hg -R r2 verify
152 checking changesets
152 checking changesets
153 checking manifests
153 checking manifests
154 crosschecking files in changesets and manifests
154 crosschecking files in changesets and manifests
155 checking files
155 checking files
156 checked 2 changesets with 2 changes to 2 files
156 checked 2 changesets with 2 changes to 2 files
157
157
158 $ cd r3
158 $ cd r3
159 $ hg push
159 $ hg push
160 pushing to $TESTTMP/r1
160 pushing to $TESTTMP/r1
161 searching for changes
161 searching for changes
162 adding changesets
162 adding changesets
163 adding manifests
163 adding manifests
164 adding file changes
164 adding file changes
165 added 10 changesets with 10 changes to 1 files
165 added 10 changesets with 10 changes to 1 files
166
166
167 $ cd ..
167 $ cd ..
168
168
169 $ nlinksdir r2/.hg/store
169 $ nlinksdir r2/.hg/store
170 1 r2/.hg/store/00changelog.i
170 1 r2/.hg/store/00changelog.i
171 1 r2/.hg/store/00manifest.i
171 1 r2/.hg/store/00manifest.i
172 1 r2/.hg/store/data/d1/f2.i
172 1 r2/.hg/store/data/d1/f2.i
173 2 r2/.hg/store/data/f1.i
173 2 r2/.hg/store/data/f1.i
174 [12] r2/\.hg/store/fncache (re) (repofncache !)
174 [12] r2/\.hg/store/fncache (re) (repofncache !)
175
175
176 #if hardlink-whitelisted repofncache
176 #if hardlink-whitelisted repofncache
177 $ nlinksdir r2/.hg/store/fncache
177 $ nlinksdir r2/.hg/store/fncache
178 2 r2/.hg/store/fncache
178 2 r2/.hg/store/fncache
179 #endif
179 #endif
180
180
181 $ hg -R r2 verify
181 $ hg -R r2 verify
182 checking changesets
182 checking changesets
183 checking manifests
183 checking manifests
184 crosschecking files in changesets and manifests
184 crosschecking files in changesets and manifests
185 checking files
185 checking files
186 checked 2 changesets with 2 changes to 2 files
186 checked 2 changesets with 2 changes to 2 files
187
187
188
188
189 $ cd r1
189 $ cd r1
190 $ hg up
190 $ hg up
191 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
191 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
192
192
193 Committing a change to f1 in r1 must break up hardlink f1.i in r2:
193 Committing a change to f1 in r1 must break up hardlink f1.i in r2:
194
194
195 $ echo c1c1 >> f1
195 $ echo c1c1 >> f1
196 $ hg ci -m00
196 $ hg ci -m00
197 $ cd ..
197 $ cd ..
198
198
199 $ nlinksdir r2/.hg/store
199 $ nlinksdir r2/.hg/store
200 1 r2/.hg/store/00changelog.i
200 1 r2/.hg/store/00changelog.i
201 1 r2/.hg/store/00manifest.i
201 1 r2/.hg/store/00manifest.i
202 1 r2/.hg/store/data/d1/f2.i
202 1 r2/.hg/store/data/d1/f2.i
203 1 r2/.hg/store/data/f1.i
203 1 r2/.hg/store/data/f1.i
204 [12] r2/\.hg/store/fncache (re) (repofncache !)
204 [12] r2/\.hg/store/fncache (re) (repofncache !)
205
205
206 #if hardlink-whitelisted repofncache
206 #if hardlink-whitelisted repofncache
207 $ nlinksdir r2/.hg/store/fncache
207 $ nlinksdir r2/.hg/store/fncache
208 2 r2/.hg/store/fncache
208 2 r2/.hg/store/fncache
209 #endif
209 #endif
210
210
211 Create a file which exec permissions we will change
211 Create a file which exec permissions we will change
212 $ cd r3
212 $ cd r3
213 $ echo "echo hello world" > f3
213 $ echo "echo hello world" > f3
214 $ hg add f3
214 $ hg add f3
215 $ hg ci -mf3
215 $ hg ci -mf3
216 $ cd ..
216 $ cd ..
217
217
218 $ cd r3
218 $ cd r3
219 $ hg tip --template '{rev}:{node|short}\n'
219 $ hg tip --template '{rev}:{node|short}\n'
220 12:d3b77733a28a
220 12:d3b77733a28a
221 $ echo bla > f1
221 $ echo bla > f1
222 $ chmod +x f3
222 $ chmod +x f3
223 $ hg ci -m1
223 $ hg ci -m1
224 $ cd ..
224 $ cd ..
225
225
226 Create hardlinked copy r4 of r3 (on Linux, we would call 'cp -al'):
226 Create hardlinked copy r4 of r3 (on Linux, we would call 'cp -al'):
227
227
228 $ linkcp r3 r4
228 $ linkcp r3 r4
229
229
230 'checklink' is produced by hardlinking a symlink, which is undefined whether
230 'checklink' is produced by hardlinking a symlink, which is undefined whether
231 the symlink should be followed or not. It does behave differently on Linux and
231 the symlink should be followed or not. It does behave differently on Linux and
232 BSD. Just remove it so the test pass on both platforms.
232 BSD. Just remove it so the test pass on both platforms.
233
233
234 $ rm -f r4/.hg/wcache/checklink
234 $ rm -f r4/.hg/wcache/checklink
235
235
236 r4 has hardlinks in the working dir (not just inside .hg):
236 r4 has hardlinks in the working dir (not just inside .hg):
237
237
238 $ nlinksdir r4
238 $ nlinksdir r4
239 2 r4/.hg/00changelog.i
239 2 r4/.hg/00changelog.i
240 2 r4/.hg/branch
240 2 r4/.hg/branch
241 2 r4/.hg/cache/branch2-base
241 2 r4/.hg/cache/branch2-base
242 2 r4/.hg/cache/branch2-immutable
242 2 r4/.hg/cache/branch2-immutable
243 2 r4/.hg/cache/branch2-served
243 2 r4/.hg/cache/branch2-served
244 2 r4/.hg/cache/branch2-served.hidden
244 2 r4/.hg/cache/branch2-served.hidden
245 2 r4/.hg/cache/branch2-visible
245 2 r4/.hg/cache/branch2-visible
246 2 r4/.hg/cache/branch2-visible-hidden
246 2 r4/.hg/cache/branch2-visible-hidden
247 2 r4/.hg/cache/hgtagsfnodes1
248 2 r4/.hg/cache/rbc-names-v1
247 2 r4/.hg/cache/rbc-names-v1
249 2 r4/.hg/cache/rbc-revs-v1
248 2 r4/.hg/cache/rbc-revs-v1
250 2 r4/.hg/cache/tags2
249 2 r4/.hg/cache/tags2
251 2 r4/.hg/cache/tags2-served
250 2 r4/.hg/cache/tags2-served
252 2 r4/.hg/dirstate
251 2 r4/.hg/dirstate
253 2 r4/.hg/fsmonitor.state (fsmonitor !)
252 2 r4/.hg/fsmonitor.state (fsmonitor !)
254 2 r4/.hg/hgrc
253 2 r4/.hg/hgrc
255 2 r4/.hg/last-message.txt
254 2 r4/.hg/last-message.txt
256 2 r4/.hg/requires
255 2 r4/.hg/requires
257 2 r4/.hg/store/00changelog.i
256 2 r4/.hg/store/00changelog.i
258 2 r4/.hg/store/00manifest.i
257 2 r4/.hg/store/00manifest.i
259 2 r4/.hg/store/data/d1/f2.d
258 2 r4/.hg/store/data/d1/f2.d
260 2 r4/.hg/store/data/d1/f2.i
259 2 r4/.hg/store/data/d1/f2.i
261 2 r4/.hg/store/data/f1.i
260 2 r4/.hg/store/data/f1.i
262 2 r4/.hg/store/data/f3.i
261 2 r4/.hg/store/data/f3.i
263 2 r4/.hg/store/fncache (repofncache !)
262 2 r4/.hg/store/fncache (repofncache !)
264 2 r4/.hg/store/phaseroots
263 2 r4/.hg/store/phaseroots
265 2 r4/.hg/store/undo
264 2 r4/.hg/store/undo
266 2 r4/.hg/store/undo.backup.fncache (repofncache !)
265 2 r4/.hg/store/undo.backup.fncache (repofncache !)
267 2 r4/.hg/store/undo.backup.phaseroots
266 2 r4/.hg/store/undo.backup.phaseroots
268 2 r4/.hg/store/undo.backupfiles
267 2 r4/.hg/store/undo.backupfiles
269 2 r4/.hg/store/undo.phaseroots
268 2 r4/.hg/store/undo.phaseroots
270 [24] r4/\.hg/undo\.backup\.dirstate (re)
269 [24] r4/\.hg/undo\.backup\.dirstate (re)
271 2 r4/.hg/undo.bookmarks
270 2 r4/.hg/undo.bookmarks
272 2 r4/.hg/undo.branch
271 2 r4/.hg/undo.branch
273 2 r4/.hg/undo.desc
272 2 r4/.hg/undo.desc
274 [24] r4/\.hg/undo\.dirstate (re)
273 [24] r4/\.hg/undo\.dirstate (re)
275 2 r4/.hg/wcache/checkisexec (execbit !)
274 2 r4/.hg/wcache/checkisexec (execbit !)
276 2 r4/.hg/wcache/checklink-target (symlink !)
275 2 r4/.hg/wcache/checklink-target (symlink !)
277 2 r4/.hg/wcache/checknoexec (execbit !)
276 2 r4/.hg/wcache/checknoexec (execbit !)
278 2 r4/.hg/wcache/manifestfulltextcache (reporevlogstore !)
277 2 r4/.hg/wcache/manifestfulltextcache (reporevlogstore !)
279 2 r4/d1/data1
278 2 r4/d1/data1
280 2 r4/d1/f2
279 2 r4/d1/f2
281 2 r4/f1
280 2 r4/f1
282 2 r4/f3
281 2 r4/f3
283
282
284 Update back to revision 12 in r4 should break hardlink of file f1 and f3:
283 Update back to revision 12 in r4 should break hardlink of file f1 and f3:
285 #if hardlink-whitelisted
284 #if hardlink-whitelisted
286 $ nlinksdir r4/.hg/undo.backup.dirstate r4/.hg/undo.dirstate
285 $ nlinksdir r4/.hg/undo.backup.dirstate r4/.hg/undo.dirstate
287 4 r4/.hg/undo.backup.dirstate
286 4 r4/.hg/undo.backup.dirstate
288 4 r4/.hg/undo.dirstate
287 4 r4/.hg/undo.dirstate
289 #endif
288 #endif
290
289
291
290
292 $ hg -R r4 up 12
291 $ hg -R r4 up 12
293 2 files updated, 0 files merged, 0 files removed, 0 files unresolved (execbit !)
292 2 files updated, 0 files merged, 0 files removed, 0 files unresolved (execbit !)
294 1 files updated, 0 files merged, 0 files removed, 0 files unresolved (no-execbit !)
293 1 files updated, 0 files merged, 0 files removed, 0 files unresolved (no-execbit !)
295
294
296 $ nlinksdir r4
295 $ nlinksdir r4
297 2 r4/.hg/00changelog.i
296 2 r4/.hg/00changelog.i
298 1 r4/.hg/branch
297 1 r4/.hg/branch
299 2 r4/.hg/cache/branch2-base
298 2 r4/.hg/cache/branch2-base
300 2 r4/.hg/cache/branch2-immutable
299 2 r4/.hg/cache/branch2-immutable
301 2 r4/.hg/cache/branch2-served
300 2 r4/.hg/cache/branch2-served
302 2 r4/.hg/cache/branch2-served.hidden
301 2 r4/.hg/cache/branch2-served.hidden
303 2 r4/.hg/cache/branch2-visible
302 2 r4/.hg/cache/branch2-visible
304 2 r4/.hg/cache/branch2-visible-hidden
303 2 r4/.hg/cache/branch2-visible-hidden
305 2 r4/.hg/cache/hgtagsfnodes1
306 2 r4/.hg/cache/rbc-names-v1
304 2 r4/.hg/cache/rbc-names-v1
307 2 r4/.hg/cache/rbc-revs-v1
305 2 r4/.hg/cache/rbc-revs-v1
308 2 r4/.hg/cache/tags2
306 2 r4/.hg/cache/tags2
309 2 r4/.hg/cache/tags2-served
307 2 r4/.hg/cache/tags2-served
310 1 r4/.hg/dirstate
308 1 r4/.hg/dirstate
311 1 r4/.hg/fsmonitor.state (fsmonitor !)
309 1 r4/.hg/fsmonitor.state (fsmonitor !)
312 2 r4/.hg/hgrc
310 2 r4/.hg/hgrc
313 2 r4/.hg/last-message.txt
311 2 r4/.hg/last-message.txt
314 2 r4/.hg/requires
312 2 r4/.hg/requires
315 2 r4/.hg/store/00changelog.i
313 2 r4/.hg/store/00changelog.i
316 2 r4/.hg/store/00manifest.i
314 2 r4/.hg/store/00manifest.i
317 2 r4/.hg/store/data/d1/f2.d
315 2 r4/.hg/store/data/d1/f2.d
318 2 r4/.hg/store/data/d1/f2.i
316 2 r4/.hg/store/data/d1/f2.i
319 2 r4/.hg/store/data/f1.i
317 2 r4/.hg/store/data/f1.i
320 2 r4/.hg/store/data/f3.i
318 2 r4/.hg/store/data/f3.i
321 2 r4/.hg/store/fncache
319 2 r4/.hg/store/fncache
322 2 r4/.hg/store/phaseroots
320 2 r4/.hg/store/phaseroots
323 2 r4/.hg/store/undo
321 2 r4/.hg/store/undo
324 2 r4/.hg/store/undo.backup.fncache (repofncache !)
322 2 r4/.hg/store/undo.backup.fncache (repofncache !)
325 2 r4/.hg/store/undo.backup.phaseroots
323 2 r4/.hg/store/undo.backup.phaseroots
326 2 r4/.hg/store/undo.backupfiles
324 2 r4/.hg/store/undo.backupfiles
327 2 r4/.hg/store/undo.phaseroots
325 2 r4/.hg/store/undo.phaseroots
328 [24] r4/\.hg/undo\.backup\.dirstate (re)
326 [24] r4/\.hg/undo\.backup\.dirstate (re)
329 2 r4/.hg/undo.bookmarks
327 2 r4/.hg/undo.bookmarks
330 2 r4/.hg/undo.branch
328 2 r4/.hg/undo.branch
331 2 r4/.hg/undo.desc
329 2 r4/.hg/undo.desc
332 [24] r4/\.hg/undo\.dirstate (re)
330 [24] r4/\.hg/undo\.dirstate (re)
333 2 r4/.hg/wcache/checkisexec (execbit !)
331 2 r4/.hg/wcache/checkisexec (execbit !)
334 2 r4/.hg/wcache/checklink-target (symlink !)
332 2 r4/.hg/wcache/checklink-target (symlink !)
335 2 r4/.hg/wcache/checknoexec (execbit !)
333 2 r4/.hg/wcache/checknoexec (execbit !)
336 1 r4/.hg/wcache/manifestfulltextcache (reporevlogstore !)
334 1 r4/.hg/wcache/manifestfulltextcache (reporevlogstore !)
337 2 r4/d1/data1
335 2 r4/d1/data1
338 2 r4/d1/f2
336 2 r4/d1/f2
339 1 r4/f1
337 1 r4/f1
340 1 r4/f3 (execbit !)
338 1 r4/f3 (execbit !)
341 2 r4/f3 (no-execbit !)
339 2 r4/f3 (no-execbit !)
342
340
343 #if hardlink-whitelisted
341 #if hardlink-whitelisted
344 $ nlinksdir r4/.hg/undo.backup.dirstate r4/.hg/undo.dirstate
342 $ nlinksdir r4/.hg/undo.backup.dirstate r4/.hg/undo.dirstate
345 4 r4/.hg/undo.backup.dirstate
343 4 r4/.hg/undo.backup.dirstate
346 4 r4/.hg/undo.dirstate
344 4 r4/.hg/undo.dirstate
347 #endif
345 #endif
348
346
349 Test hardlinking outside hg:
347 Test hardlinking outside hg:
350
348
351 $ mkdir x
349 $ mkdir x
352 $ echo foo > x/a
350 $ echo foo > x/a
353
351
354 $ linkcp x y
352 $ linkcp x y
355 $ echo bar >> y/a
353 $ echo bar >> y/a
356
354
357 No diff if hardlink:
355 No diff if hardlink:
358
356
359 $ diff x/a y/a
357 $ diff x/a y/a
360
358
361 Test mq hardlinking:
359 Test mq hardlinking:
362
360
363 $ echo "[extensions]" >> $HGRCPATH
361 $ echo "[extensions]" >> $HGRCPATH
364 $ echo "mq=" >> $HGRCPATH
362 $ echo "mq=" >> $HGRCPATH
365
363
366 $ hg init a
364 $ hg init a
367 $ cd a
365 $ cd a
368
366
369 $ hg qimport -n foo - << EOF
367 $ hg qimport -n foo - << EOF
370 > # HG changeset patch
368 > # HG changeset patch
371 > # Date 1 0
369 > # Date 1 0
372 > diff -r 2588a8b53d66 a
370 > diff -r 2588a8b53d66 a
373 > --- /dev/null Thu Jan 01 00:00:00 1970 +0000
371 > --- /dev/null Thu Jan 01 00:00:00 1970 +0000
374 > +++ b/a Wed Jul 23 15:54:29 2008 +0200
372 > +++ b/a Wed Jul 23 15:54:29 2008 +0200
375 > @@ -0,0 +1,1 @@
373 > @@ -0,0 +1,1 @@
376 > +a
374 > +a
377 > EOF
375 > EOF
378 adding foo to series file
376 adding foo to series file
379
377
380 $ hg qpush
378 $ hg qpush
381 applying foo
379 applying foo
382 now at: foo
380 now at: foo
383
381
384 $ cd ..
382 $ cd ..
385 $ linkcp a b
383 $ linkcp a b
386 $ cd b
384 $ cd b
387
385
388 $ hg qimport -n bar - << EOF
386 $ hg qimport -n bar - << EOF
389 > # HG changeset patch
387 > # HG changeset patch
390 > # Date 2 0
388 > # Date 2 0
391 > diff -r 2588a8b53d66 a
389 > diff -r 2588a8b53d66 a
392 > --- /dev/null Thu Jan 01 00:00:00 1970 +0000
390 > --- /dev/null Thu Jan 01 00:00:00 1970 +0000
393 > +++ b/b Wed Jul 23 15:54:29 2008 +0200
391 > +++ b/b Wed Jul 23 15:54:29 2008 +0200
394 > @@ -0,0 +1,1 @@
392 > @@ -0,0 +1,1 @@
395 > +b
393 > +b
396 > EOF
394 > EOF
397 adding bar to series file
395 adding bar to series file
398
396
399 $ hg qpush
397 $ hg qpush
400 applying bar
398 applying bar
401 now at: bar
399 now at: bar
402
400
403 $ cat .hg/patches/status
401 $ cat .hg/patches/status
404 430ed4828a74fa4047bc816a25500f7472ab4bfe:foo
402 430ed4828a74fa4047bc816a25500f7472ab4bfe:foo
405 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c:bar
403 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c:bar
406
404
407 $ cat .hg/patches/series
405 $ cat .hg/patches/series
408 foo
406 foo
409 bar
407 bar
410
408
411 $ cat ../a/.hg/patches/status
409 $ cat ../a/.hg/patches/status
412 430ed4828a74fa4047bc816a25500f7472ab4bfe:foo
410 430ed4828a74fa4047bc816a25500f7472ab4bfe:foo
413
411
414 $ cat ../a/.hg/patches/series
412 $ cat ../a/.hg/patches/series
415 foo
413 foo
416
414
417 Test tags hardlinking:
415 Test tags hardlinking:
418
416
419 $ hg qdel -r qbase:qtip
417 $ hg qdel -r qbase:qtip
420 patch foo finalized without changeset message
418 patch foo finalized without changeset message
421 patch bar finalized without changeset message
419 patch bar finalized without changeset message
422
420
423 $ hg tag -l lfoo
421 $ hg tag -l lfoo
424 $ hg tag foo
422 $ hg tag foo
425
423
426 $ cd ..
424 $ cd ..
427 $ linkcp b c
425 $ linkcp b c
428 $ cd c
426 $ cd c
429
427
430 $ hg tag -l -r 0 lbar
428 $ hg tag -l -r 0 lbar
431 $ hg tag -r 0 bar
429 $ hg tag -r 0 bar
432
430
433 $ cat .hgtags
431 $ cat .hgtags
434 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c foo
432 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c foo
435 430ed4828a74fa4047bc816a25500f7472ab4bfe bar
433 430ed4828a74fa4047bc816a25500f7472ab4bfe bar
436
434
437 $ cat .hg/localtags
435 $ cat .hg/localtags
438 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c lfoo
436 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c lfoo
439 430ed4828a74fa4047bc816a25500f7472ab4bfe lbar
437 430ed4828a74fa4047bc816a25500f7472ab4bfe lbar
440
438
441 $ cat ../b/.hgtags
439 $ cat ../b/.hgtags
442 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c foo
440 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c foo
443
441
444 $ cat ../b/.hg/localtags
442 $ cat ../b/.hg/localtags
445 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c lfoo
443 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c lfoo
446
444
447 $ cd ..
445 $ cd ..
@@ -1,2034 +1,2038 b''
1 This file tests the behavior of run-tests.py itself.
1 This file tests the behavior of run-tests.py itself.
2
2
3 Avoid interference from actual test env:
3 Avoid interference from actual test env:
4
4
5 $ . "$TESTDIR/helper-runtests.sh"
5 $ . "$TESTDIR/helper-runtests.sh"
6
6
7 Smoke test with install
7 Smoke test with install
8 ============
8 ============
9 $ "$PYTHON" $TESTDIR/run-tests.py $HGTEST_RUN_TESTS_PURE -l
9 $ "$PYTHON" $TESTDIR/run-tests.py $HGTEST_RUN_TESTS_PURE -l
10 running 0 tests using 0 parallel processes
10 running 0 tests using 0 parallel processes
11
11
12 # Ran 0 tests, 0 skipped, 0 failed.
12 # Ran 0 tests, 0 skipped, 0 failed.
13
13
14 Define a helper to avoid the install step
14 Define a helper to avoid the install step
15 =============
15 =============
16 $ rt()
16 $ rt()
17 > {
17 > {
18 > "$PYTHON" $TESTDIR/run-tests.py --with-hg=`which hg` -j1 "$@"
18 > "$PYTHON" $TESTDIR/run-tests.py --with-hg=`which hg` -j1 "$@"
19 > }
19 > }
20
20
21 error paths
21 error paths
22
22
23 #if symlink
23 #if symlink
24 $ ln -s `which true` hg
24 $ ln -s `which true` hg
25 $ "$PYTHON" $TESTDIR/run-tests.py --with-hg=./hg
25 $ "$PYTHON" $TESTDIR/run-tests.py --with-hg=./hg
26 warning: --with-hg should specify an hg script
26 warning: --with-hg should specify an hg script
27 running 0 tests using 0 parallel processes
27 running 0 tests using 0 parallel processes
28
28
29 # Ran 0 tests, 0 skipped, 0 failed.
29 # Ran 0 tests, 0 skipped, 0 failed.
30 $ rm hg
30 $ rm hg
31 #endif
31 #endif
32
32
33 #if execbit
33 #if execbit
34 $ touch hg
34 $ touch hg
35 $ "$PYTHON" $TESTDIR/run-tests.py --with-hg=./hg
35 $ "$PYTHON" $TESTDIR/run-tests.py --with-hg=./hg
36 usage: run-tests.py [options] [tests]
36 usage: run-tests.py [options] [tests]
37 run-tests.py: error: --with-hg must specify an executable hg script
37 run-tests.py: error: --with-hg must specify an executable hg script
38 [2]
38 [2]
39 $ rm hg
39 $ rm hg
40 #endif
40 #endif
41
41
42 Features for testing optional lines
42 Features for testing optional lines
43 ===================================
43 ===================================
44
44
45 $ cat > hghaveaddon.py <<EOF
45 $ cat > hghaveaddon.py <<EOF
46 > import hghave
46 > import hghave
47 > @hghave.check("custom", "custom hghave feature")
47 > @hghave.check("custom", "custom hghave feature")
48 > def has_custom():
48 > def has_custom():
49 > return True
49 > return True
50 > @hghave.check("missing", "missing hghave feature")
50 > @hghave.check("missing", "missing hghave feature")
51 > def has_missing():
51 > def has_missing():
52 > return False
52 > return False
53 > EOF
53 > EOF
54
54
55 an empty test
55 an empty test
56 =======================
56 =======================
57
57
58 $ touch test-empty.t
58 $ touch test-empty.t
59 $ rt
59 $ rt
60 running 1 tests using 1 parallel processes
60 running 1 tests using 1 parallel processes
61 .
61 .
62 # Ran 1 tests, 0 skipped, 0 failed.
62 # Ran 1 tests, 0 skipped, 0 failed.
63 $ rm test-empty.t
63 $ rm test-empty.t
64
64
65 a succesful test
65 a succesful test
66 =======================
66 =======================
67
67
68 $ cat > test-success.t << EOF
68 $ cat > test-success.t << EOF
69 > $ echo babar
69 > $ echo babar
70 > babar
70 > babar
71 > $ echo xyzzy
71 > $ echo xyzzy
72 > dont_print (?)
72 > dont_print (?)
73 > nothing[42]line (re) (?)
73 > nothing[42]line (re) (?)
74 > never*happens (glob) (?)
74 > never*happens (glob) (?)
75 > more_nothing (?)
75 > more_nothing (?)
76 > xyzzy
76 > xyzzy
77 > nor this (?)
77 > nor this (?)
78 > $ printf 'abc\ndef\nxyz\n'
78 > $ printf 'abc\ndef\nxyz\n'
79 > 123 (?)
79 > 123 (?)
80 > abc
80 > abc
81 > def (?)
81 > def (?)
82 > 456 (?)
82 > 456 (?)
83 > xyz
83 > xyz
84 > $ printf 'zyx\nwvu\ntsr\n'
84 > $ printf 'zyx\nwvu\ntsr\n'
85 > abc (?)
85 > abc (?)
86 > zyx (custom !)
86 > zyx (custom !)
87 > wvu
87 > wvu
88 > no_print (no-custom !)
88 > no_print (no-custom !)
89 > tsr (no-missing !)
89 > tsr (no-missing !)
90 > missing (missing !)
90 > missing (missing !)
91 > EOF
91 > EOF
92
92
93 $ rt
93 $ rt
94 running 1 tests using 1 parallel processes
94 running 1 tests using 1 parallel processes
95 .
95 .
96 # Ran 1 tests, 0 skipped, 0 failed.
96 # Ran 1 tests, 0 skipped, 0 failed.
97
97
98 failing test
98 failing test
99 ==================
99 ==================
100
100
101 test churn with globs
101 test churn with globs
102 $ cat > test-failure.t <<EOF
102 $ cat > test-failure.t <<EOF
103 > $ echo "bar-baz"; echo "bar-bad"; echo foo
103 > $ echo "bar-baz"; echo "bar-bad"; echo foo
104 > bar*bad (glob)
104 > bar*bad (glob)
105 > bar*baz (glob)
105 > bar*baz (glob)
106 > | fo (re)
106 > | fo (re)
107 > EOF
107 > EOF
108 $ rt test-failure.t
108 $ rt test-failure.t
109 running 1 tests using 1 parallel processes
109 running 1 tests using 1 parallel processes
110
110
111 --- $TESTTMP/test-failure.t
111 --- $TESTTMP/test-failure.t
112 +++ $TESTTMP/test-failure.t.err
112 +++ $TESTTMP/test-failure.t.err
113 @@ -1,4 +1,4 @@
113 @@ -1,4 +1,4 @@
114 $ echo "bar-baz"; echo "bar-bad"; echo foo
114 $ echo "bar-baz"; echo "bar-bad"; echo foo
115 + bar*baz (glob)
115 + bar*baz (glob)
116 bar*bad (glob)
116 bar*bad (glob)
117 - bar*baz (glob)
117 - bar*baz (glob)
118 - | fo (re)
118 - | fo (re)
119 + foo
119 + foo
120
120
121 ERROR: test-failure.t output changed
121 ERROR: test-failure.t output changed
122 !
122 !
123 Failed test-failure.t: output changed
123 Failed test-failure.t: output changed
124 # Ran 1 tests, 0 skipped, 1 failed.
124 # Ran 1 tests, 0 skipped, 1 failed.
125 python hash seed: * (glob)
125 python hash seed: * (glob)
126 [1]
126 [1]
127
127
128 test how multiple globs gets matched with lines in output
128 test how multiple globs gets matched with lines in output
129 $ cat > test-failure-globs.t <<EOF
129 $ cat > test-failure-globs.t <<EOF
130 > $ echo "context"; echo "context"; \
130 > $ echo "context"; echo "context"; \
131 > echo "key: 1"; echo "value: not a"; \
131 > echo "key: 1"; echo "value: not a"; \
132 > echo "key: 2"; echo "value: not b"; \
132 > echo "key: 2"; echo "value: not b"; \
133 > echo "key: 3"; echo "value: c"; \
133 > echo "key: 3"; echo "value: c"; \
134 > echo "key: 4"; echo "value: d"
134 > echo "key: 4"; echo "value: d"
135 > context
135 > context
136 > context
136 > context
137 > key: 1
137 > key: 1
138 > value: a
138 > value: a
139 > key: 2
139 > key: 2
140 > value: b
140 > value: b
141 > key: 3
141 > key: 3
142 > value: * (glob)
142 > value: * (glob)
143 > key: 4
143 > key: 4
144 > value: * (glob)
144 > value: * (glob)
145 > EOF
145 > EOF
146 $ rt test-failure-globs.t
146 $ rt test-failure-globs.t
147 running 1 tests using 1 parallel processes
147 running 1 tests using 1 parallel processes
148
148
149 --- $TESTTMP/test-failure-globs.t
149 --- $TESTTMP/test-failure-globs.t
150 +++ $TESTTMP/test-failure-globs.t.err
150 +++ $TESTTMP/test-failure-globs.t.err
151 @@ -2,9 +2,9 @@
151 @@ -2,9 +2,9 @@
152 context
152 context
153 context
153 context
154 key: 1
154 key: 1
155 - value: a
155 - value: a
156 + value: not a
156 + value: not a
157 key: 2
157 key: 2
158 - value: b
158 - value: b
159 + value: not b
159 + value: not b
160 key: 3
160 key: 3
161 value: * (glob)
161 value: * (glob)
162 key: 4
162 key: 4
163
163
164 ERROR: test-failure-globs.t output changed
164 ERROR: test-failure-globs.t output changed
165 !
165 !
166 Failed test-failure-globs.t: output changed
166 Failed test-failure-globs.t: output changed
167 # Ran 1 tests, 0 skipped, 1 failed.
167 # Ran 1 tests, 0 skipped, 1 failed.
168 python hash seed: * (glob)
168 python hash seed: * (glob)
169 [1]
169 [1]
170 $ rm test-failure-globs.t
170 $ rm test-failure-globs.t
171
171
172 test diff colorisation
172 test diff colorisation
173
173
174 #if no-windows pygments
174 #if no-windows pygments
175 $ rt test-failure.t --color always
175 $ rt test-failure.t --color always
176 running 1 tests using 1 parallel processes
176 running 1 tests using 1 parallel processes
177
177
178 \x1b[38;5;124m--- $TESTTMP/test-failure.t\x1b[39m (esc)
178 \x1b[38;5;124m--- $TESTTMP/test-failure.t\x1b[39m (esc)
179 \x1b[38;5;34m+++ $TESTTMP/test-failure.t.err\x1b[39m (esc)
179 \x1b[38;5;34m+++ $TESTTMP/test-failure.t.err\x1b[39m (esc)
180 \x1b[38;5;90;01m@@ -1,4 +1,4 @@\x1b[39;00m (esc)
180 \x1b[38;5;90;01m@@ -1,4 +1,4 @@\x1b[39;00m (esc)
181 $ echo "bar-baz"; echo "bar-bad"; echo foo
181 $ echo "bar-baz"; echo "bar-bad"; echo foo
182 \x1b[38;5;34m+ bar*baz (glob)\x1b[39m (esc)
182 \x1b[38;5;34m+ bar*baz (glob)\x1b[39m (esc)
183 bar*bad (glob)
183 bar*bad (glob)
184 \x1b[38;5;124m- bar*baz (glob)\x1b[39m (esc)
184 \x1b[38;5;124m- bar*baz (glob)\x1b[39m (esc)
185 \x1b[38;5;124m- | fo (re)\x1b[39m (esc)
185 \x1b[38;5;124m- | fo (re)\x1b[39m (esc)
186 \x1b[38;5;34m+ foo\x1b[39m (esc)
186 \x1b[38;5;34m+ foo\x1b[39m (esc)
187
187
188 \x1b[38;5;88mERROR: \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m output changed\x1b[39m (esc)
188 \x1b[38;5;88mERROR: \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m output changed\x1b[39m (esc)
189 !
189 !
190 \x1b[38;5;88mFailed \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m: output changed\x1b[39m (esc)
190 \x1b[38;5;88mFailed \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m: output changed\x1b[39m (esc)
191 # Ran 1 tests, 0 skipped, 1 failed.
191 # Ran 1 tests, 0 skipped, 1 failed.
192 python hash seed: * (glob)
192 python hash seed: * (glob)
193 [1]
193 [1]
194
194
195 $ rt test-failure.t 2> tmp.log
195 $ rt test-failure.t 2> tmp.log
196 running 1 tests using 1 parallel processes
196 running 1 tests using 1 parallel processes
197 [1]
197 [1]
198 $ cat tmp.log
198 $ cat tmp.log
199
199
200 --- $TESTTMP/test-failure.t
200 --- $TESTTMP/test-failure.t
201 +++ $TESTTMP/test-failure.t.err
201 +++ $TESTTMP/test-failure.t.err
202 @@ -1,4 +1,4 @@
202 @@ -1,4 +1,4 @@
203 $ echo "bar-baz"; echo "bar-bad"; echo foo
203 $ echo "bar-baz"; echo "bar-bad"; echo foo
204 + bar*baz (glob)
204 + bar*baz (glob)
205 bar*bad (glob)
205 bar*bad (glob)
206 - bar*baz (glob)
206 - bar*baz (glob)
207 - | fo (re)
207 - | fo (re)
208 + foo
208 + foo
209
209
210 ERROR: test-failure.t output changed
210 ERROR: test-failure.t output changed
211 !
211 !
212 Failed test-failure.t: output changed
212 Failed test-failure.t: output changed
213 # Ran 1 tests, 0 skipped, 1 failed.
213 # Ran 1 tests, 0 skipped, 1 failed.
214 python hash seed: * (glob)
214 python hash seed: * (glob)
215 #endif
215 #endif
216
216
217 $ cat > test-failure.t << EOF
217 $ cat > test-failure.t << EOF
218 > $ true
218 > $ true
219 > should go away (true !)
219 > should go away (true !)
220 > $ true
220 > $ true
221 > should stay (false !)
221 > should stay (false !)
222 >
222 >
223 > Should remove first line, not second or third
223 > Should remove first line, not second or third
224 > $ echo 'testing'
224 > $ echo 'testing'
225 > baz*foo (glob) (true !)
225 > baz*foo (glob) (true !)
226 > foobar*foo (glob) (false !)
226 > foobar*foo (glob) (false !)
227 > te*ting (glob) (true !)
227 > te*ting (glob) (true !)
228 >
228 >
229 > Should keep first two lines, remove third and last
229 > Should keep first two lines, remove third and last
230 > $ echo 'testing'
230 > $ echo 'testing'
231 > test.ng (re) (true !)
231 > test.ng (re) (true !)
232 > foo.ar (re) (false !)
232 > foo.ar (re) (false !)
233 > b.r (re) (true !)
233 > b.r (re) (true !)
234 > missing (?)
234 > missing (?)
235 > awol (true !)
235 > awol (true !)
236 >
236 >
237 > The "missing" line should stay, even though awol is dropped
237 > The "missing" line should stay, even though awol is dropped
238 > $ echo 'testing'
238 > $ echo 'testing'
239 > test.ng (re) (true !)
239 > test.ng (re) (true !)
240 > foo.ar (?)
240 > foo.ar (?)
241 > awol
241 > awol
242 > missing (?)
242 > missing (?)
243 > EOF
243 > EOF
244 $ rt test-failure.t
244 $ rt test-failure.t
245 running 1 tests using 1 parallel processes
245 running 1 tests using 1 parallel processes
246
246
247 --- $TESTTMP/test-failure.t
247 --- $TESTTMP/test-failure.t
248 +++ $TESTTMP/test-failure.t.err
248 +++ $TESTTMP/test-failure.t.err
249 @@ -1,11 +1,9 @@
249 @@ -1,11 +1,9 @@
250 $ true
250 $ true
251 - should go away (true !)
251 - should go away (true !)
252 $ true
252 $ true
253 should stay (false !)
253 should stay (false !)
254
254
255 Should remove first line, not second or third
255 Should remove first line, not second or third
256 $ echo 'testing'
256 $ echo 'testing'
257 - baz*foo (glob) (true !)
257 - baz*foo (glob) (true !)
258 foobar*foo (glob) (false !)
258 foobar*foo (glob) (false !)
259 te*ting (glob) (true !)
259 te*ting (glob) (true !)
260
260
261 foo.ar (re) (false !)
261 foo.ar (re) (false !)
262 missing (?)
262 missing (?)
263 @@ -13,13 +11,10 @@
263 @@ -13,13 +11,10 @@
264 $ echo 'testing'
264 $ echo 'testing'
265 test.ng (re) (true !)
265 test.ng (re) (true !)
266 foo.ar (re) (false !)
266 foo.ar (re) (false !)
267 - b.r (re) (true !)
267 - b.r (re) (true !)
268 missing (?)
268 missing (?)
269 - awol (true !)
269 - awol (true !)
270
270
271 The "missing" line should stay, even though awol is dropped
271 The "missing" line should stay, even though awol is dropped
272 $ echo 'testing'
272 $ echo 'testing'
273 test.ng (re) (true !)
273 test.ng (re) (true !)
274 foo.ar (?)
274 foo.ar (?)
275 - awol
275 - awol
276 missing (?)
276 missing (?)
277
277
278 ERROR: test-failure.t output changed
278 ERROR: test-failure.t output changed
279 !
279 !
280 Failed test-failure.t: output changed
280 Failed test-failure.t: output changed
281 # Ran 1 tests, 0 skipped, 1 failed.
281 # Ran 1 tests, 0 skipped, 1 failed.
282 python hash seed: * (glob)
282 python hash seed: * (glob)
283 [1]
283 [1]
284
284
285 basic failing test
285 basic failing test
286 $ cat > test-failure.t << EOF
286 $ cat > test-failure.t << EOF
287 > $ echo babar
287 > $ echo babar
288 > rataxes
288 > rataxes
289 > This is a noop statement so that
289 > This is a noop statement so that
290 > this test is still more bytes than success.
290 > this test is still more bytes than success.
291 > pad pad pad pad............................................................
291 > pad pad pad pad............................................................
292 > pad pad pad pad............................................................
292 > pad pad pad pad............................................................
293 > pad pad pad pad............................................................
293 > pad pad pad pad............................................................
294 > pad pad pad pad............................................................
294 > pad pad pad pad............................................................
295 > pad pad pad pad............................................................
295 > pad pad pad pad............................................................
296 > pad pad pad pad............................................................
296 > pad pad pad pad............................................................
297 > EOF
297 > EOF
298
298
299 >>> fh = open('test-failure-unicode.t', 'wb')
299 >>> fh = open('test-failure-unicode.t', 'wb')
300 >>> fh.write(u' $ echo babar\u03b1\n'.encode('utf-8')) and None
300 >>> fh.write(u' $ echo babar\u03b1\n'.encode('utf-8')) and None
301 >>> fh.write(u' l\u03b5\u03b5t\n'.encode('utf-8')) and None
301 >>> fh.write(u' l\u03b5\u03b5t\n'.encode('utf-8')) and None
302
302
303 $ rt
303 $ rt
304 running 3 tests using 1 parallel processes
304 running 3 tests using 1 parallel processes
305
305
306 --- $TESTTMP/test-failure.t
306 --- $TESTTMP/test-failure.t
307 +++ $TESTTMP/test-failure.t.err
307 +++ $TESTTMP/test-failure.t.err
308 @@ -1,5 +1,5 @@
308 @@ -1,5 +1,5 @@
309 $ echo babar
309 $ echo babar
310 - rataxes
310 - rataxes
311 + babar
311 + babar
312 This is a noop statement so that
312 This is a noop statement so that
313 this test is still more bytes than success.
313 this test is still more bytes than success.
314 pad pad pad pad............................................................
314 pad pad pad pad............................................................
315
315
316 ERROR: test-failure.t output changed
316 ERROR: test-failure.t output changed
317 !.
317 !.
318 --- $TESTTMP/test-failure-unicode.t
318 --- $TESTTMP/test-failure-unicode.t
319 +++ $TESTTMP/test-failure-unicode.t.err
319 +++ $TESTTMP/test-failure-unicode.t.err
320 @@ -1,2 +1,2 @@
320 @@ -1,2 +1,2 @@
321 $ echo babar\xce\xb1 (esc)
321 $ echo babar\xce\xb1 (esc)
322 - l\xce\xb5\xce\xb5t (esc)
322 - l\xce\xb5\xce\xb5t (esc)
323 + babar\xce\xb1 (esc)
323 + babar\xce\xb1 (esc)
324
324
325 ERROR: test-failure-unicode.t output changed
325 ERROR: test-failure-unicode.t output changed
326 !
326 !
327 Failed test-failure-unicode.t: output changed
327 Failed test-failure-unicode.t: output changed
328 Failed test-failure.t: output changed
328 Failed test-failure.t: output changed
329 # Ran 3 tests, 0 skipped, 2 failed.
329 # Ran 3 tests, 0 skipped, 2 failed.
330 python hash seed: * (glob)
330 python hash seed: * (glob)
331 [1]
331 [1]
332
332
333 test --outputdir
333 test --outputdir
334 $ mkdir output
334 $ mkdir output
335 $ rt --outputdir output
335 $ rt --outputdir output
336 running 3 tests using 1 parallel processes
336 running 3 tests using 1 parallel processes
337
337
338 --- $TESTTMP/test-failure.t
338 --- $TESTTMP/test-failure.t
339 +++ $TESTTMP/output/test-failure.t.err
339 +++ $TESTTMP/output/test-failure.t.err
340 @@ -1,5 +1,5 @@
340 @@ -1,5 +1,5 @@
341 $ echo babar
341 $ echo babar
342 - rataxes
342 - rataxes
343 + babar
343 + babar
344 This is a noop statement so that
344 This is a noop statement so that
345 this test is still more bytes than success.
345 this test is still more bytes than success.
346 pad pad pad pad............................................................
346 pad pad pad pad............................................................
347
347
348 ERROR: test-failure.t output changed
348 ERROR: test-failure.t output changed
349 !.
349 !.
350 --- $TESTTMP/test-failure-unicode.t
350 --- $TESTTMP/test-failure-unicode.t
351 +++ $TESTTMP/output/test-failure-unicode.t.err
351 +++ $TESTTMP/output/test-failure-unicode.t.err
352 @@ -1,2 +1,2 @@
352 @@ -1,2 +1,2 @@
353 $ echo babar\xce\xb1 (esc)
353 $ echo babar\xce\xb1 (esc)
354 - l\xce\xb5\xce\xb5t (esc)
354 - l\xce\xb5\xce\xb5t (esc)
355 + babar\xce\xb1 (esc)
355 + babar\xce\xb1 (esc)
356
356
357 ERROR: test-failure-unicode.t output changed
357 ERROR: test-failure-unicode.t output changed
358 !
358 !
359 Failed test-failure-unicode.t: output changed
359 Failed test-failure-unicode.t: output changed
360 Failed test-failure.t: output changed
360 Failed test-failure.t: output changed
361 # Ran 3 tests, 0 skipped, 2 failed.
361 # Ran 3 tests, 0 skipped, 2 failed.
362 python hash seed: * (glob)
362 python hash seed: * (glob)
363 [1]
363 [1]
364 $ ls -a output
364 $ ls -a output
365 .
365 .
366 ..
366 ..
367 .testtimes
367 .testtimes
368 test-failure-unicode.t.err
368 test-failure-unicode.t.err
369 test-failure.t.err
369 test-failure.t.err
370
370
371 test --xunit support
371 test --xunit support
372 $ rt --xunit=xunit.xml
372 $ rt --xunit=xunit.xml
373 running 3 tests using 1 parallel processes
373 running 3 tests using 1 parallel processes
374
374
375 --- $TESTTMP/test-failure.t
375 --- $TESTTMP/test-failure.t
376 +++ $TESTTMP/test-failure.t.err
376 +++ $TESTTMP/test-failure.t.err
377 @@ -1,5 +1,5 @@
377 @@ -1,5 +1,5 @@
378 $ echo babar
378 $ echo babar
379 - rataxes
379 - rataxes
380 + babar
380 + babar
381 This is a noop statement so that
381 This is a noop statement so that
382 this test is still more bytes than success.
382 this test is still more bytes than success.
383 pad pad pad pad............................................................
383 pad pad pad pad............................................................
384
384
385 ERROR: test-failure.t output changed
385 ERROR: test-failure.t output changed
386 !.
386 !.
387 --- $TESTTMP/test-failure-unicode.t
387 --- $TESTTMP/test-failure-unicode.t
388 +++ $TESTTMP/test-failure-unicode.t.err
388 +++ $TESTTMP/test-failure-unicode.t.err
389 @@ -1,2 +1,2 @@
389 @@ -1,2 +1,2 @@
390 $ echo babar\xce\xb1 (esc)
390 $ echo babar\xce\xb1 (esc)
391 - l\xce\xb5\xce\xb5t (esc)
391 - l\xce\xb5\xce\xb5t (esc)
392 + babar\xce\xb1 (esc)
392 + babar\xce\xb1 (esc)
393
393
394 ERROR: test-failure-unicode.t output changed
394 ERROR: test-failure-unicode.t output changed
395 !
395 !
396 Failed test-failure-unicode.t: output changed
396 Failed test-failure-unicode.t: output changed
397 Failed test-failure.t: output changed
397 Failed test-failure.t: output changed
398 # Ran 3 tests, 0 skipped, 2 failed.
398 # Ran 3 tests, 0 skipped, 2 failed.
399 python hash seed: * (glob)
399 python hash seed: * (glob)
400 [1]
400 [1]
401 $ cat xunit.xml
401 $ cat xunit.xml
402 <?xml version="1.0" encoding="utf-8"?>
402 <?xml version="1.0" encoding="utf-8"?>
403 <testsuite errors="0" failures="2" name="run-tests" skipped="0" tests="3">
403 <testsuite errors="0" failures="2" name="run-tests" skipped="0" tests="3">
404 <testcase name="test-success.t" time="*"/> (glob)
404 <testcase name="test-success.t" time="*"/> (glob)
405 <testcase name="test-failure-unicode.t" time="*"> (glob)
405 <testcase name="test-failure-unicode.t" time="*"> (glob)
406 <failure message="output changed" type="output-mismatch"><![CDATA[--- $TESTTMP/test-failure-unicode.t (py38 !)
406 <failure message="output changed" type="output-mismatch"><![CDATA[--- $TESTTMP/test-failure-unicode.t (py38 !)
407 <failure message="output changed" type="output-mismatch"> (no-py38 !)
407 <failure message="output changed" type="output-mismatch"> (no-py38 !)
408 <![CDATA[--- $TESTTMP/test-failure-unicode.t (no-py38 !)
408 <![CDATA[--- $TESTTMP/test-failure-unicode.t (no-py38 !)
409 +++ $TESTTMP/test-failure-unicode.t.err
409 +++ $TESTTMP/test-failure-unicode.t.err
410 @@ -1,2 +1,2 @@
410 @@ -1,2 +1,2 @@
411 $ echo babar\xce\xb1 (esc)
411 $ echo babar\xce\xb1 (esc)
412 - l\xce\xb5\xce\xb5t (esc)
412 - l\xce\xb5\xce\xb5t (esc)
413 + babar\xce\xb1 (esc)
413 + babar\xce\xb1 (esc)
414 ]]></failure> (py38 !)
414 ]]></failure> (py38 !)
415 ]]> </failure> (no-py38 !)
415 ]]> </failure> (no-py38 !)
416 </testcase>
416 </testcase>
417 <testcase name="test-failure.t" time="*"> (glob)
417 <testcase name="test-failure.t" time="*"> (glob)
418 <failure message="output changed" type="output-mismatch"><![CDATA[--- $TESTTMP/test-failure.t (py38 !)
418 <failure message="output changed" type="output-mismatch"><![CDATA[--- $TESTTMP/test-failure.t (py38 !)
419 <failure message="output changed" type="output-mismatch"> (no-py38 !)
419 <failure message="output changed" type="output-mismatch"> (no-py38 !)
420 <![CDATA[--- $TESTTMP/test-failure.t (no-py38 !)
420 <![CDATA[--- $TESTTMP/test-failure.t (no-py38 !)
421 +++ $TESTTMP/test-failure.t.err
421 +++ $TESTTMP/test-failure.t.err
422 @@ -1,5 +1,5 @@
422 @@ -1,5 +1,5 @@
423 $ echo babar
423 $ echo babar
424 - rataxes
424 - rataxes
425 + babar
425 + babar
426 This is a noop statement so that
426 This is a noop statement so that
427 this test is still more bytes than success.
427 this test is still more bytes than success.
428 pad pad pad pad............................................................
428 pad pad pad pad............................................................
429 ]]></failure> (py38 !)
429 ]]></failure> (py38 !)
430 ]]> </failure> (no-py38 !)
430 ]]> </failure> (no-py38 !)
431 </testcase>
431 </testcase>
432 </testsuite>
432 </testsuite>
433
433
434 $ cat .testtimes
434 $ cat .testtimes
435 test-empty.t * (glob)
435 test-empty.t * (glob)
436 test-failure-globs.t * (glob)
436 test-failure-globs.t * (glob)
437 test-failure-unicode.t * (glob)
437 test-failure-unicode.t * (glob)
438 test-failure.t * (glob)
438 test-failure.t * (glob)
439 test-success.t * (glob)
439 test-success.t * (glob)
440
440
441 $ rt --list-tests
441 $ rt --list-tests
442 test-failure-unicode.t
442 test-failure-unicode.t
443 test-failure.t
443 test-failure.t
444 test-success.t
444 test-success.t
445
445
446 $ rt --list-tests --json
446 $ rt --list-tests --json
447 test-failure-unicode.t
447 test-failure-unicode.t
448 test-failure.t
448 test-failure.t
449 test-success.t
449 test-success.t
450 $ cat report.json
450 $ cat report.json
451 testreport ={
451 testreport ={
452 "test-failure-unicode.t": {
452 "test-failure-unicode.t": {
453 "result": "success"
453 "result": "success"
454 },
454 },
455 "test-failure.t": {
455 "test-failure.t": {
456 "result": "success"
456 "result": "success"
457 },
457 },
458 "test-success.t": {
458 "test-success.t": {
459 "result": "success"
459 "result": "success"
460 }
460 }
461 } (no-eol)
461 } (no-eol)
462
462
463 $ rt --list-tests --xunit=xunit.xml
463 $ rt --list-tests --xunit=xunit.xml
464 test-failure-unicode.t
464 test-failure-unicode.t
465 test-failure.t
465 test-failure.t
466 test-success.t
466 test-success.t
467 $ cat xunit.xml
467 $ cat xunit.xml
468 <?xml version="1.0" encoding="utf-8"?>
468 <?xml version="1.0" encoding="utf-8"?>
469 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
469 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
470 <testcase name="test-failure-unicode.t"/>
470 <testcase name="test-failure-unicode.t"/>
471 <testcase name="test-failure.t"/>
471 <testcase name="test-failure.t"/>
472 <testcase name="test-success.t"/>
472 <testcase name="test-success.t"/>
473 </testsuite>
473 </testsuite>
474
474
475 $ rt --list-tests test-failure* --json --xunit=xunit.xml --outputdir output
475 $ rt --list-tests test-failure* --json --xunit=xunit.xml --outputdir output
476 test-failure-unicode.t
476 test-failure-unicode.t
477 test-failure.t
477 test-failure.t
478 $ cat output/report.json
478 $ cat output/report.json
479 testreport ={
479 testreport ={
480 "test-failure-unicode.t": {
480 "test-failure-unicode.t": {
481 "result": "success"
481 "result": "success"
482 },
482 },
483 "test-failure.t": {
483 "test-failure.t": {
484 "result": "success"
484 "result": "success"
485 }
485 }
486 } (no-eol)
486 } (no-eol)
487 $ cat xunit.xml
487 $ cat xunit.xml
488 <?xml version="1.0" encoding="utf-8"?>
488 <?xml version="1.0" encoding="utf-8"?>
489 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
489 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
490 <testcase name="test-failure-unicode.t"/>
490 <testcase name="test-failure-unicode.t"/>
491 <testcase name="test-failure.t"/>
491 <testcase name="test-failure.t"/>
492 </testsuite>
492 </testsuite>
493
493
494 $ rm test-failure-unicode.t
494 $ rm test-failure-unicode.t
495
495
496 test for --retest
496 test for --retest
497 ====================
497 ====================
498
498
499 $ rt --retest
499 $ rt --retest
500 running 1 tests using 1 parallel processes
500 running 1 tests using 1 parallel processes
501
501
502 --- $TESTTMP/test-failure.t
502 --- $TESTTMP/test-failure.t
503 +++ $TESTTMP/test-failure.t.err
503 +++ $TESTTMP/test-failure.t.err
504 @@ -1,5 +1,5 @@
504 @@ -1,5 +1,5 @@
505 $ echo babar
505 $ echo babar
506 - rataxes
506 - rataxes
507 + babar
507 + babar
508 This is a noop statement so that
508 This is a noop statement so that
509 this test is still more bytes than success.
509 this test is still more bytes than success.
510 pad pad pad pad............................................................
510 pad pad pad pad............................................................
511
511
512 ERROR: test-failure.t output changed
512 ERROR: test-failure.t output changed
513 !
513 !
514 Failed test-failure.t: output changed
514 Failed test-failure.t: output changed
515 # Ran 1 tests, 0 skipped, 1 failed.
515 # Ran 1 tests, 0 skipped, 1 failed.
516 python hash seed: * (glob)
516 python hash seed: * (glob)
517 [1]
517 [1]
518
518
519 --retest works with --outputdir
519 --retest works with --outputdir
520 $ rm -r output
520 $ rm -r output
521 $ mkdir output
521 $ mkdir output
522 $ mv test-failure.t.err output
522 $ mv test-failure.t.err output
523 $ rt --retest --outputdir output
523 $ rt --retest --outputdir output
524 running 1 tests using 1 parallel processes
524 running 1 tests using 1 parallel processes
525
525
526 --- $TESTTMP/test-failure.t
526 --- $TESTTMP/test-failure.t
527 +++ $TESTTMP/output/test-failure.t.err
527 +++ $TESTTMP/output/test-failure.t.err
528 @@ -1,5 +1,5 @@
528 @@ -1,5 +1,5 @@
529 $ echo babar
529 $ echo babar
530 - rataxes
530 - rataxes
531 + babar
531 + babar
532 This is a noop statement so that
532 This is a noop statement so that
533 this test is still more bytes than success.
533 this test is still more bytes than success.
534 pad pad pad pad............................................................
534 pad pad pad pad............................................................
535
535
536 ERROR: test-failure.t output changed
536 ERROR: test-failure.t output changed
537 !
537 !
538 Failed test-failure.t: output changed
538 Failed test-failure.t: output changed
539 # Ran 1 tests, 0 skipped, 1 failed.
539 # Ran 1 tests, 0 skipped, 1 failed.
540 python hash seed: * (glob)
540 python hash seed: * (glob)
541 [1]
541 [1]
542
542
543 Selecting Tests To Run
543 Selecting Tests To Run
544 ======================
544 ======================
545
545
546 successful
546 successful
547
547
548 $ rt test-success.t
548 $ rt test-success.t
549 running 1 tests using 1 parallel processes
549 running 1 tests using 1 parallel processes
550 .
550 .
551 # Ran 1 tests, 0 skipped, 0 failed.
551 # Ran 1 tests, 0 skipped, 0 failed.
552
552
553 success w/ keyword
553 success w/ keyword
554 $ rt -k xyzzy
554 $ rt -k xyzzy
555 running 2 tests using 1 parallel processes
555 running 2 tests using 1 parallel processes
556 .
556 .
557 # Ran 2 tests, 1 skipped, 0 failed.
557 # Ran 2 tests, 1 skipped, 0 failed.
558
558
559 failed
559 failed
560
560
561 $ rt test-failure.t
561 $ rt test-failure.t
562 running 1 tests using 1 parallel processes
562 running 1 tests using 1 parallel processes
563
563
564 --- $TESTTMP/test-failure.t
564 --- $TESTTMP/test-failure.t
565 +++ $TESTTMP/test-failure.t.err
565 +++ $TESTTMP/test-failure.t.err
566 @@ -1,5 +1,5 @@
566 @@ -1,5 +1,5 @@
567 $ echo babar
567 $ echo babar
568 - rataxes
568 - rataxes
569 + babar
569 + babar
570 This is a noop statement so that
570 This is a noop statement so that
571 this test is still more bytes than success.
571 this test is still more bytes than success.
572 pad pad pad pad............................................................
572 pad pad pad pad............................................................
573
573
574 ERROR: test-failure.t output changed
574 ERROR: test-failure.t output changed
575 !
575 !
576 Failed test-failure.t: output changed
576 Failed test-failure.t: output changed
577 # Ran 1 tests, 0 skipped, 1 failed.
577 # Ran 1 tests, 0 skipped, 1 failed.
578 python hash seed: * (glob)
578 python hash seed: * (glob)
579 [1]
579 [1]
580
580
581 failure w/ keyword
581 failure w/ keyword
582 $ rt -k rataxes
582 $ rt -k rataxes
583 running 2 tests using 1 parallel processes
583 running 2 tests using 1 parallel processes
584
584
585 --- $TESTTMP/test-failure.t
585 --- $TESTTMP/test-failure.t
586 +++ $TESTTMP/test-failure.t.err
586 +++ $TESTTMP/test-failure.t.err
587 @@ -1,5 +1,5 @@
587 @@ -1,5 +1,5 @@
588 $ echo babar
588 $ echo babar
589 - rataxes
589 - rataxes
590 + babar
590 + babar
591 This is a noop statement so that
591 This is a noop statement so that
592 this test is still more bytes than success.
592 this test is still more bytes than success.
593 pad pad pad pad............................................................
593 pad pad pad pad............................................................
594
594
595 ERROR: test-failure.t output changed
595 ERROR: test-failure.t output changed
596 !
596 !
597 Failed test-failure.t: output changed
597 Failed test-failure.t: output changed
598 # Ran 2 tests, 1 skipped, 1 failed.
598 # Ran 2 tests, 1 skipped, 1 failed.
599 python hash seed: * (glob)
599 python hash seed: * (glob)
600 [1]
600 [1]
601
601
602 Verify that when a process fails to start we show a useful message
602 Verify that when a process fails to start we show a useful message
603 ==================================================================
603 ==================================================================
604
604
605 $ cat > test-serve-fail.t <<EOF
605 $ cat > test-serve-fail.t <<EOF
606 > $ echo 'abort: child process failed to start blah'
606 > $ echo 'abort: child process failed to start blah'
607 > EOF
607 > EOF
608 $ rt test-serve-fail.t
608 $ rt test-serve-fail.t
609 running 1 tests using 1 parallel processes
609 running 1 tests using 1 parallel processes
610
610
611 --- $TESTTMP/test-serve-fail.t
611 --- $TESTTMP/test-serve-fail.t
612 +++ $TESTTMP/test-serve-fail.t.err
612 +++ $TESTTMP/test-serve-fail.t.err
613 @@ -1* +1,2 @@ (glob)
613 @@ -1* +1,2 @@ (glob)
614 $ echo 'abort: child process failed to start blah'
614 $ echo 'abort: child process failed to start blah'
615 + abort: child process failed to start blah
615 + abort: child process failed to start blah
616
616
617 ERROR: test-serve-fail.t output changed
617 ERROR: test-serve-fail.t output changed
618 !
618 !
619 Failed test-serve-fail.t: server failed to start (HGPORT=*) (glob)
619 Failed test-serve-fail.t: server failed to start (HGPORT=*) (glob)
620 # Ran 1 tests, 0 skipped, 1 failed.
620 # Ran 1 tests, 0 skipped, 1 failed.
621 python hash seed: * (glob)
621 python hash seed: * (glob)
622 [1]
622 [1]
623 $ rm test-serve-fail.t
623 $ rm test-serve-fail.t
624
624
625 Verify that we can try other ports
625 Verify that we can try other ports
626 ===================================
626 ===================================
627
627
628 Extensions aren't inherited by the invoked run-tests.py. An extension
628 Extensions aren't inherited by the invoked run-tests.py. An extension
629 introducing a repository requirement could cause this to fail. So we force
629 introducing a repository requirement could cause this to fail. So we force
630 HGRCPATH to get a clean environment.
630 HGRCPATH to get a clean environment.
631
631
632 $ HGRCPATH= hg init inuse
632 $ HGRCPATH= hg init inuse
633 $ hg serve -R inuse -p $HGPORT -d --pid-file=blocks.pid
633 $ hg serve -R inuse -p $HGPORT -d --pid-file=blocks.pid
634 $ cat blocks.pid >> $DAEMON_PIDS
634 $ cat blocks.pid >> $DAEMON_PIDS
635 $ cat > test-serve-inuse.t <<EOF
635 $ cat > test-serve-inuse.t <<EOF
636 > $ hg serve -R `pwd`/inuse -p \$HGPORT -d --pid-file=hg.pid
636 > $ hg serve -R `pwd`/inuse -p \$HGPORT -d --pid-file=hg.pid
637 > $ cat hg.pid >> \$DAEMON_PIDS
637 > $ cat hg.pid >> \$DAEMON_PIDS
638 > EOF
638 > EOF
639 $ rt test-serve-inuse.t
639 $ rt test-serve-inuse.t
640 running 1 tests using 1 parallel processes
640 running 1 tests using 1 parallel processes
641 .
641 .
642 # Ran 1 tests, 0 skipped, 0 failed.
642 # Ran 1 tests, 0 skipped, 0 failed.
643 $ rm test-serve-inuse.t
643 $ rm test-serve-inuse.t
644 $ killdaemons.py $DAEMON_PIDS
644 $ killdaemons.py $DAEMON_PIDS
645
645
646 Running In Debug Mode
646 Running In Debug Mode
647 ======================
647 ======================
648
648
649 $ rt --debug 2>&1 | grep -v pwd
649 $ rt --debug 2>&1 | grep -v pwd
650 running 2 tests using 1 parallel processes
650 running 2 tests using 1 parallel processes
651 + alias hg=hg.exe (windows !)
651 + alias hg=hg.exe (windows !)
652 + echo *SALT* 0 0 (glob)
652 + echo *SALT* 0 0 (glob)
653 *SALT* 0 0 (glob)
653 *SALT* 0 0 (glob)
654 + echo babar
654 + echo babar
655 babar
655 babar
656 + echo *SALT* 10 0 (glob)
656 + echo *SALT* 10 0 (glob)
657 *SALT* 10 0 (glob)
657 *SALT* 10 0 (glob)
658 .+ alias hg=hg.exe (windows !)
658 .+ alias hg=hg.exe (windows !)
659 *+ echo *SALT* 0 0 (glob)
659 *+ echo *SALT* 0 0 (glob)
660 *SALT* 0 0 (glob)
660 *SALT* 0 0 (glob)
661 + echo babar
661 + echo babar
662 babar
662 babar
663 + echo *SALT* 2 0 (glob)
663 + echo *SALT* 2 0 (glob)
664 *SALT* 2 0 (glob)
664 *SALT* 2 0 (glob)
665 + echo xyzzy
665 + echo xyzzy
666 xyzzy
666 xyzzy
667 + echo *SALT* 9 0 (glob)
667 + echo *SALT* 9 0 (glob)
668 *SALT* 9 0 (glob)
668 *SALT* 9 0 (glob)
669 + printf *abc\ndef\nxyz\n* (glob)
669 + printf *abc\ndef\nxyz\n* (glob)
670 abc
670 abc
671 def
671 def
672 xyz
672 xyz
673 + echo *SALT* 15 0 (glob)
673 + echo *SALT* 15 0 (glob)
674 *SALT* 15 0 (glob)
674 *SALT* 15 0 (glob)
675 + printf *zyx\nwvu\ntsr\n* (glob)
675 + printf *zyx\nwvu\ntsr\n* (glob)
676 zyx
676 zyx
677 wvu
677 wvu
678 tsr
678 tsr
679 + echo *SALT* 22 0 (glob)
679 + echo *SALT* 22 0 (glob)
680 *SALT* 22 0 (glob)
680 *SALT* 22 0 (glob)
681 .
681 .
682 # Ran 2 tests, 0 skipped, 0 failed.
682 # Ran 2 tests, 0 skipped, 0 failed.
683
683
684 Parallel runs
684 Parallel runs
685 ==============
685 ==============
686
686
687 (duplicate the failing test to get predictable output)
687 (duplicate the failing test to get predictable output)
688 $ cp test-failure.t test-failure-copy.t
688 $ cp test-failure.t test-failure-copy.t
689
689
690 $ rt --jobs 2 test-failure*.t -n
690 $ rt --jobs 2 test-failure*.t -n
691 running 2 tests using 2 parallel processes
691 running 2 tests using 2 parallel processes
692 !!
692 !!
693 Failed test-failure*.t: output changed (glob)
693 Failed test-failure*.t: output changed (glob)
694 Failed test-failure*.t: output changed (glob)
694 Failed test-failure*.t: output changed (glob)
695 # Ran 2 tests, 0 skipped, 2 failed.
695 # Ran 2 tests, 0 skipped, 2 failed.
696 python hash seed: * (glob)
696 python hash seed: * (glob)
697 [1]
697 [1]
698
698
699 failures in parallel with --first should only print one failure
699 failures in parallel with --first should only print one failure
700 $ rt --jobs 2 --first test-failure*.t
700 $ rt --jobs 2 --first test-failure*.t
701 running 2 tests using 2 parallel processes
701 running 2 tests using 2 parallel processes
702
702
703 --- $TESTTMP/test-failure*.t (glob)
703 --- $TESTTMP/test-failure*.t (glob)
704 +++ $TESTTMP/test-failure*.t.err (glob)
704 +++ $TESTTMP/test-failure*.t.err (glob)
705 @@ -1,5 +1,5 @@
705 @@ -1,5 +1,5 @@
706 $ echo babar
706 $ echo babar
707 - rataxes
707 - rataxes
708 + babar
708 + babar
709 This is a noop statement so that
709 This is a noop statement so that
710 this test is still more bytes than success.
710 this test is still more bytes than success.
711 pad pad pad pad............................................................
711 pad pad pad pad............................................................
712
712
713 Failed test-failure*.t: output changed (glob)
713 Failed test-failure*.t: output changed (glob)
714 Failed test-failure*.t: output changed (glob)
714 Failed test-failure*.t: output changed (glob)
715 # Ran 2 tests, 0 skipped, 2 failed.
715 # Ran 2 tests, 0 skipped, 2 failed.
716 python hash seed: * (glob)
716 python hash seed: * (glob)
717 [1]
717 [1]
718
718
719
719
720 (delete the duplicated test file)
720 (delete the duplicated test file)
721 $ rm test-failure-copy.t
721 $ rm test-failure-copy.t
722
722
723 multiple runs per test should be parallelized
723 multiple runs per test should be parallelized
724
724
725 $ rt --jobs 2 --runs-per-test 2 test-success.t
725 $ rt --jobs 2 --runs-per-test 2 test-success.t
726 running 2 tests using 2 parallel processes
726 running 2 tests using 2 parallel processes
727 ..
727 ..
728 # Ran 2 tests, 0 skipped, 0 failed.
728 # Ran 2 tests, 0 skipped, 0 failed.
729
729
730 Interactive run
730 Interactive run
731 ===============
731 ===============
732
732
733 (backup the failing test)
733 (backup the failing test)
734 $ cp test-failure.t backup
734 $ cp test-failure.t backup
735
735
736 Refuse the fix
736 Refuse the fix
737
737
738 $ echo 'n' | rt -i
738 $ echo 'n' | rt -i
739 running 2 tests using 1 parallel processes
739 running 2 tests using 1 parallel processes
740
740
741 --- $TESTTMP/test-failure.t
741 --- $TESTTMP/test-failure.t
742 +++ $TESTTMP/test-failure.t.err
742 +++ $TESTTMP/test-failure.t.err
743 @@ -1,5 +1,5 @@
743 @@ -1,5 +1,5 @@
744 $ echo babar
744 $ echo babar
745 - rataxes
745 - rataxes
746 + babar
746 + babar
747 This is a noop statement so that
747 This is a noop statement so that
748 this test is still more bytes than success.
748 this test is still more bytes than success.
749 pad pad pad pad............................................................
749 pad pad pad pad............................................................
750 Accept this change? [y/N]
750 Accept this change? [y/N]
751 ERROR: test-failure.t output changed
751 ERROR: test-failure.t output changed
752 !.
752 !.
753 Failed test-failure.t: output changed
753 Failed test-failure.t: output changed
754 # Ran 2 tests, 0 skipped, 1 failed.
754 # Ran 2 tests, 0 skipped, 1 failed.
755 python hash seed: * (glob)
755 python hash seed: * (glob)
756 [1]
756 [1]
757
757
758 $ cat test-failure.t
758 $ cat test-failure.t
759 $ echo babar
759 $ echo babar
760 rataxes
760 rataxes
761 This is a noop statement so that
761 This is a noop statement so that
762 this test is still more bytes than success.
762 this test is still more bytes than success.
763 pad pad pad pad............................................................
763 pad pad pad pad............................................................
764 pad pad pad pad............................................................
764 pad pad pad pad............................................................
765 pad pad pad pad............................................................
765 pad pad pad pad............................................................
766 pad pad pad pad............................................................
766 pad pad pad pad............................................................
767 pad pad pad pad............................................................
767 pad pad pad pad............................................................
768 pad pad pad pad............................................................
768 pad pad pad pad............................................................
769
769
770 Interactive with custom view
770 Interactive with custom view
771
771
772 $ echo 'n' | rt -i --view echo
772 $ echo 'n' | rt -i --view echo
773 running 2 tests using 1 parallel processes
773 running 2 tests using 1 parallel processes
774 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
774 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
775 Accept this change? [y/N]* (glob)
775 Accept this change? [y/N]* (glob)
776 ERROR: test-failure.t output changed
776 ERROR: test-failure.t output changed
777 !.
777 !.
778 Failed test-failure.t: output changed
778 Failed test-failure.t: output changed
779 # Ran 2 tests, 0 skipped, 1 failed.
779 # Ran 2 tests, 0 skipped, 1 failed.
780 python hash seed: * (glob)
780 python hash seed: * (glob)
781 [1]
781 [1]
782
782
783 View the fix
783 View the fix
784
784
785 $ echo 'y' | rt --view echo
785 $ echo 'y' | rt --view echo
786 running 2 tests using 1 parallel processes
786 running 2 tests using 1 parallel processes
787 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
787 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
788
788
789 ERROR: test-failure.t output changed
789 ERROR: test-failure.t output changed
790 !.
790 !.
791 Failed test-failure.t: output changed
791 Failed test-failure.t: output changed
792 # Ran 2 tests, 0 skipped, 1 failed.
792 # Ran 2 tests, 0 skipped, 1 failed.
793 python hash seed: * (glob)
793 python hash seed: * (glob)
794 [1]
794 [1]
795
795
796 Accept the fix
796 Accept the fix
797
797
798 $ cat >> test-failure.t <<EOF
798 $ cat >> test-failure.t <<EOF
799 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
799 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
800 > saved backup bundle to \$TESTTMP/foo.hg
800 > saved backup bundle to \$TESTTMP/foo.hg
801 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
801 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
802 > saved backup bundle to $TESTTMP\\foo.hg
802 > saved backup bundle to $TESTTMP\\foo.hg
803 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
803 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
804 > saved backup bundle to \$TESTTMP/*.hg (glob)
804 > saved backup bundle to \$TESTTMP/*.hg (glob)
805 > EOF
805 > EOF
806 $ echo 'y' | rt -i 2>&1
806 $ echo 'y' | rt -i 2>&1
807 running 2 tests using 1 parallel processes
807 running 2 tests using 1 parallel processes
808
808
809 --- $TESTTMP/test-failure.t
809 --- $TESTTMP/test-failure.t
810 +++ $TESTTMP/test-failure.t.err
810 +++ $TESTTMP/test-failure.t.err
811 @@ -1,5 +1,5 @@
811 @@ -1,5 +1,5 @@
812 $ echo babar
812 $ echo babar
813 - rataxes
813 - rataxes
814 + babar
814 + babar
815 This is a noop statement so that
815 This is a noop statement so that
816 this test is still more bytes than success.
816 this test is still more bytes than success.
817 pad pad pad pad............................................................
817 pad pad pad pad............................................................
818 @@ -11,6 +11,6 @@
818 @@ -11,6 +11,6 @@
819 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
819 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
820 saved backup bundle to $TESTTMP/foo.hg
820 saved backup bundle to $TESTTMP/foo.hg
821 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
821 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
822 - saved backup bundle to $TESTTMP\foo.hg
822 - saved backup bundle to $TESTTMP\foo.hg
823 + saved backup bundle to $TESTTMP/foo.hg
823 + saved backup bundle to $TESTTMP/foo.hg
824 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
824 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
825 saved backup bundle to $TESTTMP/*.hg (glob)
825 saved backup bundle to $TESTTMP/*.hg (glob)
826 Accept this change? [y/N] ..
826 Accept this change? [y/N] ..
827 # Ran 2 tests, 0 skipped, 0 failed.
827 # Ran 2 tests, 0 skipped, 0 failed.
828
828
829 $ sed -e 's,(glob)$,&<,g' test-failure.t
829 $ sed -e 's,(glob)$,&<,g' test-failure.t
830 $ echo babar
830 $ echo babar
831 babar
831 babar
832 This is a noop statement so that
832 This is a noop statement so that
833 this test is still more bytes than success.
833 this test is still more bytes than success.
834 pad pad pad pad............................................................
834 pad pad pad pad............................................................
835 pad pad pad pad............................................................
835 pad pad pad pad............................................................
836 pad pad pad pad............................................................
836 pad pad pad pad............................................................
837 pad pad pad pad............................................................
837 pad pad pad pad............................................................
838 pad pad pad pad............................................................
838 pad pad pad pad............................................................
839 pad pad pad pad............................................................
839 pad pad pad pad............................................................
840 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
840 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
841 saved backup bundle to $TESTTMP/foo.hg
841 saved backup bundle to $TESTTMP/foo.hg
842 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
842 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
843 saved backup bundle to $TESTTMP/foo.hg
843 saved backup bundle to $TESTTMP/foo.hg
844 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
844 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
845 saved backup bundle to $TESTTMP/*.hg (glob)<
845 saved backup bundle to $TESTTMP/*.hg (glob)<
846
846
847 $ rm test-failure.t
847 $ rm test-failure.t
848
848
849 Race condition - test file was modified when test is running
849 Race condition - test file was modified when test is running
850
850
851 $ TESTRACEDIR=`pwd`
851 $ TESTRACEDIR=`pwd`
852 $ export TESTRACEDIR
852 $ export TESTRACEDIR
853 $ cat > test-race.t <<EOF
853 $ cat > test-race.t <<EOF
854 > $ echo 1
854 > $ echo 1
855 > $ echo "# a new line" >> $TESTRACEDIR/test-race.t
855 > $ echo "# a new line" >> $TESTRACEDIR/test-race.t
856 > EOF
856 > EOF
857
857
858 $ rt -i test-race.t
858 $ rt -i test-race.t
859 running 1 tests using 1 parallel processes
859 running 1 tests using 1 parallel processes
860
860
861 --- $TESTTMP/test-race.t
861 --- $TESTTMP/test-race.t
862 +++ $TESTTMP/test-race.t.err
862 +++ $TESTTMP/test-race.t.err
863 @@ -1,2 +1,3 @@
863 @@ -1,2 +1,3 @@
864 $ echo 1
864 $ echo 1
865 + 1
865 + 1
866 $ echo "# a new line" >> $TESTTMP/test-race.t
866 $ echo "# a new line" >> $TESTTMP/test-race.t
867 Reference output has changed (run again to prompt changes)
867 Reference output has changed (run again to prompt changes)
868 ERROR: test-race.t output changed
868 ERROR: test-race.t output changed
869 !
869 !
870 Failed test-race.t: output changed
870 Failed test-race.t: output changed
871 # Ran 1 tests, 0 skipped, 1 failed.
871 # Ran 1 tests, 0 skipped, 1 failed.
872 python hash seed: * (glob)
872 python hash seed: * (glob)
873 [1]
873 [1]
874
874
875 $ rm test-race.t
875 $ rm test-race.t
876
876
877 When "#testcases" is used in .t files
877 When "#testcases" is used in .t files
878
878
879 $ cat >> test-cases.t <<EOF
879 $ cat >> test-cases.t <<EOF
880 > #testcases a b
880 > #testcases a b
881 > #if a
881 > #if a
882 > $ echo 1
882 > $ echo 1
883 > #endif
883 > #endif
884 > #if b
884 > #if b
885 > $ echo 2
885 > $ echo 2
886 > #endif
886 > #endif
887 > EOF
887 > EOF
888
888
889 $ cat <<EOF | rt -i test-cases.t 2>&1
889 $ cat <<EOF | rt -i test-cases.t 2>&1
890 > y
890 > y
891 > y
891 > y
892 > EOF
892 > EOF
893 running 2 tests using 1 parallel processes
893 running 2 tests using 1 parallel processes
894
894
895 --- $TESTTMP/test-cases.t
895 --- $TESTTMP/test-cases.t
896 +++ $TESTTMP/test-cases.t#a.err
896 +++ $TESTTMP/test-cases.t#a.err
897 @@ -1,6 +1,7 @@
897 @@ -1,6 +1,7 @@
898 #testcases a b
898 #testcases a b
899 #if a
899 #if a
900 $ echo 1
900 $ echo 1
901 + 1
901 + 1
902 #endif
902 #endif
903 #if b
903 #if b
904 $ echo 2
904 $ echo 2
905 Accept this change? [y/N] .
905 Accept this change? [y/N] .
906 --- $TESTTMP/test-cases.t
906 --- $TESTTMP/test-cases.t
907 +++ $TESTTMP/test-cases.t#b.err
907 +++ $TESTTMP/test-cases.t#b.err
908 @@ -5,4 +5,5 @@
908 @@ -5,4 +5,5 @@
909 #endif
909 #endif
910 #if b
910 #if b
911 $ echo 2
911 $ echo 2
912 + 2
912 + 2
913 #endif
913 #endif
914 Accept this change? [y/N] .
914 Accept this change? [y/N] .
915 # Ran 2 tests, 0 skipped, 0 failed.
915 # Ran 2 tests, 0 skipped, 0 failed.
916
916
917 $ cat test-cases.t
917 $ cat test-cases.t
918 #testcases a b
918 #testcases a b
919 #if a
919 #if a
920 $ echo 1
920 $ echo 1
921 1
921 1
922 #endif
922 #endif
923 #if b
923 #if b
924 $ echo 2
924 $ echo 2
925 2
925 2
926 #endif
926 #endif
927
927
928 $ cat >> test-cases.t <<'EOF'
928 $ cat >> test-cases.t <<'EOF'
929 > #if a
929 > #if a
930 > $ NAME=A
930 > $ NAME=A
931 > #else
931 > #else
932 > $ NAME=B
932 > $ NAME=B
933 > #endif
933 > #endif
934 > $ echo $NAME
934 > $ echo $NAME
935 > A (a !)
935 > A (a !)
936 > B (b !)
936 > B (b !)
937 > EOF
937 > EOF
938 $ rt test-cases.t
938 $ rt test-cases.t
939 running 2 tests using 1 parallel processes
939 running 2 tests using 1 parallel processes
940 ..
940 ..
941 # Ran 2 tests, 0 skipped, 0 failed.
941 # Ran 2 tests, 0 skipped, 0 failed.
942
942
943 When using multiple dimensions of "#testcases" in .t files
943 When using multiple dimensions of "#testcases" in .t files
944
944
945 $ cat > test-cases.t <<'EOF'
945 $ cat > test-cases.t <<'EOF'
946 > #testcases a b
946 > #testcases a b
947 > #testcases c d
947 > #testcases c d
948 > #if a d
948 > #if a d
949 > $ echo $TESTCASE
949 > $ echo $TESTCASE
950 > a#d
950 > a#d
951 > #endif
951 > #endif
952 > #if b c
952 > #if b c
953 > $ echo yes
953 > $ echo yes
954 > no
954 > no
955 > #endif
955 > #endif
956 > EOF
956 > EOF
957 $ rt test-cases.t
957 $ rt test-cases.t
958 running 4 tests using 1 parallel processes
958 running 4 tests using 1 parallel processes
959 ..
959 ..
960 --- $TESTTMP/test-cases.t
960 --- $TESTTMP/test-cases.t
961 +++ $TESTTMP/test-cases.t#b#c.err
961 +++ $TESTTMP/test-cases.t#b#c.err
962 @@ -6,5 +6,5 @@
962 @@ -6,5 +6,5 @@
963 #endif
963 #endif
964 #if b c
964 #if b c
965 $ echo yes
965 $ echo yes
966 - no
966 - no
967 + yes
967 + yes
968 #endif
968 #endif
969
969
970 ERROR: test-cases.t#b#c output changed
970 ERROR: test-cases.t#b#c output changed
971 !.
971 !.
972 Failed test-cases.t#b#c: output changed
972 Failed test-cases.t#b#c: output changed
973 # Ran 4 tests, 0 skipped, 1 failed.
973 # Ran 4 tests, 0 skipped, 1 failed.
974 python hash seed: * (glob)
974 python hash seed: * (glob)
975 [1]
975 [1]
976
976
977 $ rt --retest
977 $ rt --retest
978 running 1 tests using 1 parallel processes
978 running 1 tests using 1 parallel processes
979
979
980 --- $TESTTMP/test-cases.t
980 --- $TESTTMP/test-cases.t
981 +++ $TESTTMP/test-cases.t#b#c.err
981 +++ $TESTTMP/test-cases.t#b#c.err
982 @@ -6,5 +6,5 @@
982 @@ -6,5 +6,5 @@
983 #endif
983 #endif
984 #if b c
984 #if b c
985 $ echo yes
985 $ echo yes
986 - no
986 - no
987 + yes
987 + yes
988 #endif
988 #endif
989
989
990 ERROR: test-cases.t#b#c output changed
990 ERROR: test-cases.t#b#c output changed
991 !
991 !
992 Failed test-cases.t#b#c: output changed
992 Failed test-cases.t#b#c: output changed
993 # Ran 1 tests, 0 skipped, 1 failed.
993 # Ran 1 tests, 0 skipped, 1 failed.
994 python hash seed: * (glob)
994 python hash seed: * (glob)
995 [1]
995 [1]
996 $ rm test-cases.t#b#c.err
996 $ rm test-cases.t#b#c.err
997 $ rm test-cases.t
997 $ rm test-cases.t
998
998
999 (reinstall)
999 (reinstall)
1000 $ mv backup test-failure.t
1000 $ mv backup test-failure.t
1001
1001
1002 No Diff
1002 No Diff
1003 ===============
1003 ===============
1004
1004
1005 $ rt --nodiff
1005 $ rt --nodiff
1006 running 2 tests using 1 parallel processes
1006 running 2 tests using 1 parallel processes
1007 !.
1007 !.
1008 Failed test-failure.t: output changed
1008 Failed test-failure.t: output changed
1009 # Ran 2 tests, 0 skipped, 1 failed.
1009 # Ran 2 tests, 0 skipped, 1 failed.
1010 python hash seed: * (glob)
1010 python hash seed: * (glob)
1011 [1]
1011 [1]
1012
1012
1013 test --tmpdir support
1013 test --tmpdir support
1014 $ rt --tmpdir=$TESTTMP/keep test-success.t
1014 $ rt --tmpdir=$TESTTMP/keep test-success.t
1015 running 1 tests using 1 parallel processes
1015 running 1 tests using 1 parallel processes
1016
1016
1017 Keeping testtmp dir: $TESTTMP/keep/child1/test-success.t
1017 Keeping testtmp dir: $TESTTMP/keep/child1/test-success.t
1018 Keeping threadtmp dir: $TESTTMP/keep/child1
1018 Keeping threadtmp dir: $TESTTMP/keep/child1
1019 .
1019 .
1020 # Ran 1 tests, 0 skipped, 0 failed.
1020 # Ran 1 tests, 0 skipped, 0 failed.
1021
1021
1022 timeouts
1022 timeouts
1023 ========
1023 ========
1024 $ cat > test-timeout.t <<EOF
1024 $ cat > test-timeout.t <<EOF
1025 > $ sleep 2
1025 > $ sleep 2
1026 > $ echo pass
1026 > $ echo pass
1027 > pass
1027 > pass
1028 > EOF
1028 > EOF
1029 > echo '#require slow' > test-slow-timeout.t
1029 > echo '#require slow' > test-slow-timeout.t
1030 > cat test-timeout.t >> test-slow-timeout.t
1030 > cat test-timeout.t >> test-slow-timeout.t
1031 $ rt --timeout=1 --slowtimeout=3 test-timeout.t test-slow-timeout.t
1031 $ rt --timeout=1 --slowtimeout=3 test-timeout.t test-slow-timeout.t
1032 running 2 tests using 1 parallel processes
1032 running 2 tests using 1 parallel processes
1033 st
1033 st
1034 Skipped test-slow-timeout.t: missing feature: allow slow tests (use --allow-slow-tests)
1034 Skipped test-slow-timeout.t: missing feature: allow slow tests (use --allow-slow-tests)
1035 Failed test-timeout.t: timed out
1035 Failed test-timeout.t: timed out
1036 # Ran 1 tests, 1 skipped, 1 failed.
1036 # Ran 1 tests, 1 skipped, 1 failed.
1037 python hash seed: * (glob)
1037 python hash seed: * (glob)
1038 [1]
1038 [1]
1039 $ rt --timeout=1 --slowtimeout=3 \
1039 $ rt --timeout=1 --slowtimeout=3 \
1040 > test-timeout.t test-slow-timeout.t --allow-slow-tests
1040 > test-timeout.t test-slow-timeout.t --allow-slow-tests
1041 running 2 tests using 1 parallel processes
1041 running 2 tests using 1 parallel processes
1042 .t
1042 .t
1043 Failed test-timeout.t: timed out
1043 Failed test-timeout.t: timed out
1044 # Ran 2 tests, 0 skipped, 1 failed.
1044 # Ran 2 tests, 0 skipped, 1 failed.
1045 python hash seed: * (glob)
1045 python hash seed: * (glob)
1046 [1]
1046 [1]
1047 $ rm test-timeout.t test-slow-timeout.t
1047 $ rm test-timeout.t test-slow-timeout.t
1048
1048
1049 test for --time
1049 test for --time
1050 ==================
1050 ==================
1051
1051
1052 $ rt test-success.t --time
1052 $ rt test-success.t --time
1053 running 1 tests using 1 parallel processes
1053 running 1 tests using 1 parallel processes
1054 .
1054 .
1055 # Ran 1 tests, 0 skipped, 0 failed.
1055 # Ran 1 tests, 0 skipped, 0 failed.
1056 # Producing time report
1056 # Producing time report
1057 start end cuser csys real Test
1057 start end cuser csys real Test
1058 \s*[\d\.]{5,8} \s*[\d\.]{5,8} \s*[\d\.]{5,8} \s*[\d\.]{5,8} \s*[\d\.]{5,8} test-success.t (re)
1058 \s*[\d\.]{5,8} \s*[\d\.]{5,8} \s*[\d\.]{5,8} \s*[\d\.]{5,8} \s*[\d\.]{5,8} test-success.t (re)
1059
1059
1060 test for --time with --job enabled
1060 test for --time with --job enabled
1061 ====================================
1061 ====================================
1062
1062
1063 $ rt test-success.t --time --jobs 2
1063 $ rt test-success.t --time --jobs 2
1064 running 1 tests using 1 parallel processes
1064 running 1 tests using 1 parallel processes
1065 .
1065 .
1066 # Ran 1 tests, 0 skipped, 0 failed.
1066 # Ran 1 tests, 0 skipped, 0 failed.
1067 # Producing time report
1067 # Producing time report
1068 start end cuser csys real Test
1068 start end cuser csys real Test
1069 \s*[\d\.]{5,8} \s*[\d\.]{5,8} \s*[\d\.]{5,8} \s*[\d\.]{5,8} \s*[\d\.]{5,8} test-success.t (re)
1069 \s*[\d\.]{5,8} \s*[\d\.]{5,8} \s*[\d\.]{5,8} \s*[\d\.]{5,8} \s*[\d\.]{5,8} test-success.t (re)
1070
1070
1071 Skips
1071 Skips
1072 ================
1072 ================
1073 $ cat > test-skip.t <<EOF
1073 $ cat > test-skip.t <<EOF
1074 > $ echo xyzzy
1074 > $ echo xyzzy
1075 > #if true
1075 > #if true
1076 > #require false
1076 > #require false
1077 > #end
1077 > #end
1078 > EOF
1078 > EOF
1079 $ cat > test-noskip.t <<EOF
1079 $ cat > test-noskip.t <<EOF
1080 > #if false
1080 > #if false
1081 > #require false
1081 > #require false
1082 > #endif
1082 > #endif
1083 > EOF
1083 > EOF
1084 $ rt --nodiff
1084 $ rt --nodiff
1085 running 4 tests using 1 parallel processes
1085 running 4 tests using 1 parallel processes
1086 !.s.
1086 !.s.
1087 Skipped test-skip.t: missing feature: nail clipper
1087 Skipped test-skip.t: missing feature: nail clipper
1088 Failed test-failure.t: output changed
1088 Failed test-failure.t: output changed
1089 # Ran 3 tests, 1 skipped, 1 failed.
1089 # Ran 3 tests, 1 skipped, 1 failed.
1090 python hash seed: * (glob)
1090 python hash seed: * (glob)
1091 [1]
1091 [1]
1092
1092
1093 $ rm test-noskip.t
1093 $ rm test-noskip.t
1094 $ rt --keyword xyzzy
1094 $ rt --keyword xyzzy
1095 running 3 tests using 1 parallel processes
1095 running 3 tests using 1 parallel processes
1096 .s
1096 .s
1097 Skipped test-skip.t: missing feature: nail clipper
1097 Skipped test-skip.t: missing feature: nail clipper
1098 # Ran 2 tests, 2 skipped, 0 failed.
1098 # Ran 2 tests, 2 skipped, 0 failed.
1099
1099
1100 Skips with xml
1100 Skips with xml
1101 $ rt --keyword xyzzy \
1101 $ rt --keyword xyzzy \
1102 > --xunit=xunit.xml
1102 > --xunit=xunit.xml
1103 running 3 tests using 1 parallel processes
1103 running 3 tests using 1 parallel processes
1104 .s
1104 .s
1105 Skipped test-skip.t: missing feature: nail clipper
1105 Skipped test-skip.t: missing feature: nail clipper
1106 # Ran 2 tests, 2 skipped, 0 failed.
1106 # Ran 2 tests, 2 skipped, 0 failed.
1107 $ cat xunit.xml
1107 $ cat xunit.xml
1108 <?xml version="1.0" encoding="utf-8"?>
1108 <?xml version="1.0" encoding="utf-8"?>
1109 <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="2">
1109 <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="2">
1110 <testcase name="test-success.t" time="*"/> (glob)
1110 <testcase name="test-success.t" time="*"/> (glob)
1111 <testcase name="test-skip.t">
1111 <testcase name="test-skip.t">
1112 <skipped><![CDATA[missing feature: nail clipper]]></skipped> (py38 !)
1112 <skipped><![CDATA[missing feature: nail clipper]]></skipped> (py38 !)
1113 <skipped> (no-py38 !)
1113 <skipped> (no-py38 !)
1114 <![CDATA[missing feature: nail clipper]]> </skipped> (no-py38 !)
1114 <![CDATA[missing feature: nail clipper]]> </skipped> (no-py38 !)
1115 </testcase>
1115 </testcase>
1116 </testsuite>
1116 </testsuite>
1117
1117
1118 Missing skips or blacklisted skips don't count as executed:
1118 Missing skips or blacklisted skips don't count as executed:
1119 $ echo test-failure.t > blacklist
1119 $ mkdir tests
1120 $ echo tests/test-failure.t > blacklist
1121 $ cp test-failure.t tests
1120 $ rt --blacklist=blacklist --json\
1122 $ rt --blacklist=blacklist --json\
1121 > test-failure.t test-bogus.t
1123 > tests/test-failure.t tests/test-bogus.t
1122 running 2 tests using 1 parallel processes
1124 running 2 tests using 1 parallel processes
1123 ss
1125 ss
1124 Skipped test-bogus.t: Doesn't exist
1126 Skipped test-bogus.t: Doesn't exist
1125 Skipped test-failure.t: blacklisted
1127 Skipped test-failure.t: blacklisted
1126 # Ran 0 tests, 2 skipped, 0 failed.
1128 # Ran 0 tests, 2 skipped, 0 failed.
1127 $ cat report.json
1129 $ cat tests/report.json
1128 testreport ={
1130 testreport ={
1129 "test-bogus.t": {
1131 "test-bogus.t": {
1130 "result": "skip"
1132 "result": "skip"
1131 },
1133 },
1132 "test-failure.t": {
1134 "test-failure.t": {
1133 "result": "skip"
1135 "result": "skip"
1134 }
1136 }
1135 } (no-eol)
1137 } (no-eol)
1138 $ rm -r tests
1139 $ echo test-failure.t > blacklist
1136
1140
1137 Whitelist trumps blacklist
1141 Whitelist trumps blacklist
1138 $ echo test-failure.t > whitelist
1142 $ echo test-failure.t > whitelist
1139 $ rt --blacklist=blacklist --whitelist=whitelist --json\
1143 $ rt --blacklist=blacklist --whitelist=whitelist --json\
1140 > test-failure.t test-bogus.t
1144 > test-failure.t test-bogus.t
1141 running 2 tests using 1 parallel processes
1145 running 2 tests using 1 parallel processes
1142 s
1146 s
1143 --- $TESTTMP/test-failure.t
1147 --- $TESTTMP/test-failure.t
1144 +++ $TESTTMP/test-failure.t.err
1148 +++ $TESTTMP/test-failure.t.err
1145 @@ -1,5 +1,5 @@
1149 @@ -1,5 +1,5 @@
1146 $ echo babar
1150 $ echo babar
1147 - rataxes
1151 - rataxes
1148 + babar
1152 + babar
1149 This is a noop statement so that
1153 This is a noop statement so that
1150 this test is still more bytes than success.
1154 this test is still more bytes than success.
1151 pad pad pad pad............................................................
1155 pad pad pad pad............................................................
1152
1156
1153 ERROR: test-failure.t output changed
1157 ERROR: test-failure.t output changed
1154 !
1158 !
1155 Skipped test-bogus.t: Doesn't exist
1159 Skipped test-bogus.t: Doesn't exist
1156 Failed test-failure.t: output changed
1160 Failed test-failure.t: output changed
1157 # Ran 1 tests, 1 skipped, 1 failed.
1161 # Ran 1 tests, 1 skipped, 1 failed.
1158 python hash seed: * (glob)
1162 python hash seed: * (glob)
1159 [1]
1163 [1]
1160
1164
1161 Ensure that --test-list causes only the tests listed in that file to
1165 Ensure that --test-list causes only the tests listed in that file to
1162 be executed.
1166 be executed.
1163 $ echo test-success.t >> onlytest
1167 $ echo test-success.t >> onlytest
1164 $ rt --test-list=onlytest
1168 $ rt --test-list=onlytest
1165 running 1 tests using 1 parallel processes
1169 running 1 tests using 1 parallel processes
1166 .
1170 .
1167 # Ran 1 tests, 0 skipped, 0 failed.
1171 # Ran 1 tests, 0 skipped, 0 failed.
1168 $ echo test-bogus.t >> anothertest
1172 $ echo test-bogus.t >> anothertest
1169 $ rt --test-list=onlytest --test-list=anothertest
1173 $ rt --test-list=onlytest --test-list=anothertest
1170 running 2 tests using 1 parallel processes
1174 running 2 tests using 1 parallel processes
1171 s.
1175 s.
1172 Skipped test-bogus.t: Doesn't exist
1176 Skipped test-bogus.t: Doesn't exist
1173 # Ran 1 tests, 1 skipped, 0 failed.
1177 # Ran 1 tests, 1 skipped, 0 failed.
1174 $ rm onlytest anothertest
1178 $ rm onlytest anothertest
1175
1179
1176 test for --json
1180 test for --json
1177 ==================
1181 ==================
1178
1182
1179 $ rt --json
1183 $ rt --json
1180 running 3 tests using 1 parallel processes
1184 running 3 tests using 1 parallel processes
1181
1185
1182 --- $TESTTMP/test-failure.t
1186 --- $TESTTMP/test-failure.t
1183 +++ $TESTTMP/test-failure.t.err
1187 +++ $TESTTMP/test-failure.t.err
1184 @@ -1,5 +1,5 @@
1188 @@ -1,5 +1,5 @@
1185 $ echo babar
1189 $ echo babar
1186 - rataxes
1190 - rataxes
1187 + babar
1191 + babar
1188 This is a noop statement so that
1192 This is a noop statement so that
1189 this test is still more bytes than success.
1193 this test is still more bytes than success.
1190 pad pad pad pad............................................................
1194 pad pad pad pad............................................................
1191
1195
1192 ERROR: test-failure.t output changed
1196 ERROR: test-failure.t output changed
1193 !.s
1197 !.s
1194 Skipped test-skip.t: missing feature: nail clipper
1198 Skipped test-skip.t: missing feature: nail clipper
1195 Failed test-failure.t: output changed
1199 Failed test-failure.t: output changed
1196 # Ran 2 tests, 1 skipped, 1 failed.
1200 # Ran 2 tests, 1 skipped, 1 failed.
1197 python hash seed: * (glob)
1201 python hash seed: * (glob)
1198 [1]
1202 [1]
1199
1203
1200 $ cat report.json
1204 $ cat report.json
1201 testreport ={
1205 testreport ={
1202 "test-failure.t": [\{] (re)
1206 "test-failure.t": [\{] (re)
1203 "csys": "\s*\d+\.\d{3,4}", ? (re)
1207 "csys": "\s*\d+\.\d{3,4}", ? (re)
1204 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1208 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1205 "diff": "---.+\+\+\+.+", ? (re)
1209 "diff": "---.+\+\+\+.+", ? (re)
1206 "end": "\s*\d+\.\d{3,4}", ? (re)
1210 "end": "\s*\d+\.\d{3,4}", ? (re)
1207 "result": "failure", ? (re)
1211 "result": "failure", ? (re)
1208 "start": "\s*\d+\.\d{3,4}", ? (re)
1212 "start": "\s*\d+\.\d{3,4}", ? (re)
1209 "time": "\s*\d+\.\d{3,4}" (re)
1213 "time": "\s*\d+\.\d{3,4}" (re)
1210 }, ? (re)
1214 }, ? (re)
1211 "test-skip.t": {
1215 "test-skip.t": {
1212 "csys": "\s*\d+\.\d{3,4}", ? (re)
1216 "csys": "\s*\d+\.\d{3,4}", ? (re)
1213 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1217 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1214 "diff": "", ? (re)
1218 "diff": "", ? (re)
1215 "end": "\s*\d+\.\d{3,4}", ? (re)
1219 "end": "\s*\d+\.\d{3,4}", ? (re)
1216 "result": "skip", ? (re)
1220 "result": "skip", ? (re)
1217 "start": "\s*\d+\.\d{3,4}", ? (re)
1221 "start": "\s*\d+\.\d{3,4}", ? (re)
1218 "time": "\s*\d+\.\d{3,4}" (re)
1222 "time": "\s*\d+\.\d{3,4}" (re)
1219 }, ? (re)
1223 }, ? (re)
1220 "test-success.t": [\{] (re)
1224 "test-success.t": [\{] (re)
1221 "csys": "\s*\d+\.\d{3,4}", ? (re)
1225 "csys": "\s*\d+\.\d{3,4}", ? (re)
1222 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1226 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1223 "diff": "", ? (re)
1227 "diff": "", ? (re)
1224 "end": "\s*\d+\.\d{3,4}", ? (re)
1228 "end": "\s*\d+\.\d{3,4}", ? (re)
1225 "result": "success", ? (re)
1229 "result": "success", ? (re)
1226 "start": "\s*\d+\.\d{3,4}", ? (re)
1230 "start": "\s*\d+\.\d{3,4}", ? (re)
1227 "time": "\s*\d+\.\d{3,4}" (re)
1231 "time": "\s*\d+\.\d{3,4}" (re)
1228 }
1232 }
1229 } (no-eol)
1233 } (no-eol)
1230 --json with --outputdir
1234 --json with --outputdir
1231
1235
1232 $ rm report.json
1236 $ rm report.json
1233 $ rm -r output
1237 $ rm -r output
1234 $ mkdir output
1238 $ mkdir output
1235 $ rt --json --outputdir output
1239 $ rt --json --outputdir output
1236 running 3 tests using 1 parallel processes
1240 running 3 tests using 1 parallel processes
1237
1241
1238 --- $TESTTMP/test-failure.t
1242 --- $TESTTMP/test-failure.t
1239 +++ $TESTTMP/output/test-failure.t.err
1243 +++ $TESTTMP/output/test-failure.t.err
1240 @@ -1,5 +1,5 @@
1244 @@ -1,5 +1,5 @@
1241 $ echo babar
1245 $ echo babar
1242 - rataxes
1246 - rataxes
1243 + babar
1247 + babar
1244 This is a noop statement so that
1248 This is a noop statement so that
1245 this test is still more bytes than success.
1249 this test is still more bytes than success.
1246 pad pad pad pad............................................................
1250 pad pad pad pad............................................................
1247
1251
1248 ERROR: test-failure.t output changed
1252 ERROR: test-failure.t output changed
1249 !.s
1253 !.s
1250 Skipped test-skip.t: missing feature: nail clipper
1254 Skipped test-skip.t: missing feature: nail clipper
1251 Failed test-failure.t: output changed
1255 Failed test-failure.t: output changed
1252 # Ran 2 tests, 1 skipped, 1 failed.
1256 # Ran 2 tests, 1 skipped, 1 failed.
1253 python hash seed: * (glob)
1257 python hash seed: * (glob)
1254 [1]
1258 [1]
1255 $ f report.json
1259 $ f report.json
1256 report.json: file not found
1260 report.json: file not found
1257 $ cat output/report.json
1261 $ cat output/report.json
1258 testreport ={
1262 testreport ={
1259 "test-failure.t": [\{] (re)
1263 "test-failure.t": [\{] (re)
1260 "csys": "\s*\d+\.\d{3,4}", ? (re)
1264 "csys": "\s*\d+\.\d{3,4}", ? (re)
1261 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1265 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1262 "diff": "---.+\+\+\+.+", ? (re)
1266 "diff": "---.+\+\+\+.+", ? (re)
1263 "end": "\s*\d+\.\d{3,4}", ? (re)
1267 "end": "\s*\d+\.\d{3,4}", ? (re)
1264 "result": "failure", ? (re)
1268 "result": "failure", ? (re)
1265 "start": "\s*\d+\.\d{3,4}", ? (re)
1269 "start": "\s*\d+\.\d{3,4}", ? (re)
1266 "time": "\s*\d+\.\d{3,4}" (re)
1270 "time": "\s*\d+\.\d{3,4}" (re)
1267 }, ? (re)
1271 }, ? (re)
1268 "test-skip.t": {
1272 "test-skip.t": {
1269 "csys": "\s*\d+\.\d{3,4}", ? (re)
1273 "csys": "\s*\d+\.\d{3,4}", ? (re)
1270 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1274 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1271 "diff": "", ? (re)
1275 "diff": "", ? (re)
1272 "end": "\s*\d+\.\d{3,4}", ? (re)
1276 "end": "\s*\d+\.\d{3,4}", ? (re)
1273 "result": "skip", ? (re)
1277 "result": "skip", ? (re)
1274 "start": "\s*\d+\.\d{3,4}", ? (re)
1278 "start": "\s*\d+\.\d{3,4}", ? (re)
1275 "time": "\s*\d+\.\d{3,4}" (re)
1279 "time": "\s*\d+\.\d{3,4}" (re)
1276 }, ? (re)
1280 }, ? (re)
1277 "test-success.t": [\{] (re)
1281 "test-success.t": [\{] (re)
1278 "csys": "\s*\d+\.\d{3,4}", ? (re)
1282 "csys": "\s*\d+\.\d{3,4}", ? (re)
1279 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1283 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1280 "diff": "", ? (re)
1284 "diff": "", ? (re)
1281 "end": "\s*\d+\.\d{3,4}", ? (re)
1285 "end": "\s*\d+\.\d{3,4}", ? (re)
1282 "result": "success", ? (re)
1286 "result": "success", ? (re)
1283 "start": "\s*\d+\.\d{3,4}", ? (re)
1287 "start": "\s*\d+\.\d{3,4}", ? (re)
1284 "time": "\s*\d+\.\d{3,4}" (re)
1288 "time": "\s*\d+\.\d{3,4}" (re)
1285 }
1289 }
1286 } (no-eol)
1290 } (no-eol)
1287 $ ls -a output
1291 $ ls -a output
1288 .
1292 .
1289 ..
1293 ..
1290 .testtimes
1294 .testtimes
1291 report.json
1295 report.json
1292 test-failure.t.err
1296 test-failure.t.err
1293
1297
1294 Test that failed test accepted through interactive are properly reported:
1298 Test that failed test accepted through interactive are properly reported:
1295
1299
1296 $ cp test-failure.t backup
1300 $ cp test-failure.t backup
1297 $ echo y | rt --json -i
1301 $ echo y | rt --json -i
1298 running 3 tests using 1 parallel processes
1302 running 3 tests using 1 parallel processes
1299
1303
1300 --- $TESTTMP/test-failure.t
1304 --- $TESTTMP/test-failure.t
1301 +++ $TESTTMP/test-failure.t.err
1305 +++ $TESTTMP/test-failure.t.err
1302 @@ -1,5 +1,5 @@
1306 @@ -1,5 +1,5 @@
1303 $ echo babar
1307 $ echo babar
1304 - rataxes
1308 - rataxes
1305 + babar
1309 + babar
1306 This is a noop statement so that
1310 This is a noop statement so that
1307 this test is still more bytes than success.
1311 this test is still more bytes than success.
1308 pad pad pad pad............................................................
1312 pad pad pad pad............................................................
1309 Accept this change? [y/N] ..s
1313 Accept this change? [y/N] ..s
1310 Skipped test-skip.t: missing feature: nail clipper
1314 Skipped test-skip.t: missing feature: nail clipper
1311 # Ran 2 tests, 1 skipped, 0 failed.
1315 # Ran 2 tests, 1 skipped, 0 failed.
1312
1316
1313 $ cat report.json
1317 $ cat report.json
1314 testreport ={
1318 testreport ={
1315 "test-failure.t": [\{] (re)
1319 "test-failure.t": [\{] (re)
1316 "csys": "\s*\d+\.\d{3,4}", ? (re)
1320 "csys": "\s*\d+\.\d{3,4}", ? (re)
1317 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1321 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1318 "diff": "", ? (re)
1322 "diff": "", ? (re)
1319 "end": "\s*\d+\.\d{3,4}", ? (re)
1323 "end": "\s*\d+\.\d{3,4}", ? (re)
1320 "result": "success", ? (re)
1324 "result": "success", ? (re)
1321 "start": "\s*\d+\.\d{3,4}", ? (re)
1325 "start": "\s*\d+\.\d{3,4}", ? (re)
1322 "time": "\s*\d+\.\d{3,4}" (re)
1326 "time": "\s*\d+\.\d{3,4}" (re)
1323 }, ? (re)
1327 }, ? (re)
1324 "test-skip.t": {
1328 "test-skip.t": {
1325 "csys": "\s*\d+\.\d{3,4}", ? (re)
1329 "csys": "\s*\d+\.\d{3,4}", ? (re)
1326 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1330 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1327 "diff": "", ? (re)
1331 "diff": "", ? (re)
1328 "end": "\s*\d+\.\d{3,4}", ? (re)
1332 "end": "\s*\d+\.\d{3,4}", ? (re)
1329 "result": "skip", ? (re)
1333 "result": "skip", ? (re)
1330 "start": "\s*\d+\.\d{3,4}", ? (re)
1334 "start": "\s*\d+\.\d{3,4}", ? (re)
1331 "time": "\s*\d+\.\d{3,4}" (re)
1335 "time": "\s*\d+\.\d{3,4}" (re)
1332 }, ? (re)
1336 }, ? (re)
1333 "test-success.t": [\{] (re)
1337 "test-success.t": [\{] (re)
1334 "csys": "\s*\d+\.\d{3,4}", ? (re)
1338 "csys": "\s*\d+\.\d{3,4}", ? (re)
1335 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1339 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1336 "diff": "", ? (re)
1340 "diff": "", ? (re)
1337 "end": "\s*\d+\.\d{3,4}", ? (re)
1341 "end": "\s*\d+\.\d{3,4}", ? (re)
1338 "result": "success", ? (re)
1342 "result": "success", ? (re)
1339 "start": "\s*\d+\.\d{3,4}", ? (re)
1343 "start": "\s*\d+\.\d{3,4}", ? (re)
1340 "time": "\s*\d+\.\d{3,4}" (re)
1344 "time": "\s*\d+\.\d{3,4}" (re)
1341 }
1345 }
1342 } (no-eol)
1346 } (no-eol)
1343 $ mv backup test-failure.t
1347 $ mv backup test-failure.t
1344
1348
1345 backslash on end of line with glob matching is handled properly
1349 backslash on end of line with glob matching is handled properly
1346
1350
1347 $ cat > test-glob-backslash.t << EOF
1351 $ cat > test-glob-backslash.t << EOF
1348 > $ echo 'foo bar \\'
1352 > $ echo 'foo bar \\'
1349 > foo * \ (glob)
1353 > foo * \ (glob)
1350 > EOF
1354 > EOF
1351
1355
1352 $ rt test-glob-backslash.t
1356 $ rt test-glob-backslash.t
1353 running 1 tests using 1 parallel processes
1357 running 1 tests using 1 parallel processes
1354 .
1358 .
1355 # Ran 1 tests, 0 skipped, 0 failed.
1359 # Ran 1 tests, 0 skipped, 0 failed.
1356
1360
1357 $ rm -f test-glob-backslash.t
1361 $ rm -f test-glob-backslash.t
1358
1362
1359 Test globbing of local IP addresses
1363 Test globbing of local IP addresses
1360 $ echo 172.16.18.1
1364 $ echo 172.16.18.1
1361 $LOCALIP (glob)
1365 $LOCALIP (glob)
1362 $ echo dead:beef::1
1366 $ echo dead:beef::1
1363 $LOCALIP (glob)
1367 $LOCALIP (glob)
1364
1368
1365 Add support for external test formatter
1369 Add support for external test formatter
1366 =======================================
1370 =======================================
1367
1371
1368 $ CUSTOM_TEST_RESULT=basic_test_result "$PYTHON" $TESTDIR/run-tests.py --with-hg=`which hg` -j1 "$@" test-success.t test-failure.t
1372 $ CUSTOM_TEST_RESULT=basic_test_result "$PYTHON" $TESTDIR/run-tests.py --with-hg=`which hg` -j1 "$@" test-success.t test-failure.t
1369 running 2 tests using 1 parallel processes
1373 running 2 tests using 1 parallel processes
1370
1374
1371 # Ran 2 tests, 0 skipped, 0 failed.
1375 # Ran 2 tests, 0 skipped, 0 failed.
1372 ON_START! <__main__.TestSuite tests=[<__main__.TTest testMethod=test-failure.t>, <__main__.TTest testMethod=test-success.t>]>
1376 ON_START! <__main__.TestSuite tests=[<__main__.TTest testMethod=test-failure.t>, <__main__.TTest testMethod=test-success.t>]>
1373 FAILURE! test-failure.t output changed
1377 FAILURE! test-failure.t output changed
1374 SUCCESS! test-success.t
1378 SUCCESS! test-success.t
1375 ON_END!
1379 ON_END!
1376
1380
1377 Test reusability for third party tools
1381 Test reusability for third party tools
1378 ======================================
1382 ======================================
1379
1383
1380 $ mkdir "$TESTTMP"/anothertests
1384 $ mkdir "$TESTTMP"/anothertests
1381 $ cd "$TESTTMP"/anothertests
1385 $ cd "$TESTTMP"/anothertests
1382
1386
1383 test that `run-tests.py` can execute hghave, even if it runs not in
1387 test that `run-tests.py` can execute hghave, even if it runs not in
1384 Mercurial source tree.
1388 Mercurial source tree.
1385
1389
1386 $ cat > test-hghave.t <<EOF
1390 $ cat > test-hghave.t <<EOF
1387 > #require true
1391 > #require true
1388 > $ echo foo
1392 > $ echo foo
1389 > foo
1393 > foo
1390 > EOF
1394 > EOF
1391 $ rt test-hghave.t
1395 $ rt test-hghave.t
1392 running 1 tests using 1 parallel processes
1396 running 1 tests using 1 parallel processes
1393 .
1397 .
1394 # Ran 1 tests, 0 skipped, 0 failed.
1398 # Ran 1 tests, 0 skipped, 0 failed.
1395
1399
1396 test that RUNTESTDIR refers the directory, in which `run-tests.py` now
1400 test that RUNTESTDIR refers the directory, in which `run-tests.py` now
1397 running is placed.
1401 running is placed.
1398
1402
1399 $ cat > test-runtestdir.t <<EOF
1403 $ cat > test-runtestdir.t <<EOF
1400 > - $TESTDIR, in which test-run-tests.t is placed
1404 > - $TESTDIR, in which test-run-tests.t is placed
1401 > - \$TESTDIR, in which test-runtestdir.t is placed (expanded at runtime)
1405 > - \$TESTDIR, in which test-runtestdir.t is placed (expanded at runtime)
1402 > - \$RUNTESTDIR, in which run-tests.py is placed (expanded at runtime)
1406 > - \$RUNTESTDIR, in which run-tests.py is placed (expanded at runtime)
1403 >
1407 >
1404 > #if windows
1408 > #if windows
1405 > $ test "\$TESTDIR" = "$TESTTMP\anothertests"
1409 > $ test "\$TESTDIR" = "$TESTTMP\anothertests"
1406 > #else
1410 > #else
1407 > $ test "\$TESTDIR" = "$TESTTMP"/anothertests
1411 > $ test "\$TESTDIR" = "$TESTTMP"/anothertests
1408 > #endif
1412 > #endif
1409 > If this prints a path, that means RUNTESTDIR didn't equal
1413 > If this prints a path, that means RUNTESTDIR didn't equal
1410 > TESTDIR as it should have.
1414 > TESTDIR as it should have.
1411 > $ test "\$RUNTESTDIR" = "$TESTDIR" || echo "\$RUNTESTDIR"
1415 > $ test "\$RUNTESTDIR" = "$TESTDIR" || echo "\$RUNTESTDIR"
1412 > This should print the start of check-code. If this passes but the
1416 > This should print the start of check-code. If this passes but the
1413 > previous check failed, that means we found a copy of check-code at whatever
1417 > previous check failed, that means we found a copy of check-code at whatever
1414 > RUNTESTSDIR ended up containing, even though it doesn't match TESTDIR.
1418 > RUNTESTSDIR ended up containing, even though it doesn't match TESTDIR.
1415 > $ head -n 3 "\$RUNTESTDIR"/../contrib/check-code.py | sed 's@.!.*python3@#!USRBINENVPY@'
1419 > $ head -n 3 "\$RUNTESTDIR"/../contrib/check-code.py | sed 's@.!.*python3@#!USRBINENVPY@'
1416 > #!USRBINENVPY
1420 > #!USRBINENVPY
1417 > #
1421 > #
1418 > # check-code - a style and portability checker for Mercurial
1422 > # check-code - a style and portability checker for Mercurial
1419 > EOF
1423 > EOF
1420 $ rt test-runtestdir.t
1424 $ rt test-runtestdir.t
1421 running 1 tests using 1 parallel processes
1425 running 1 tests using 1 parallel processes
1422 .
1426 .
1423 # Ran 1 tests, 0 skipped, 0 failed.
1427 # Ran 1 tests, 0 skipped, 0 failed.
1424
1428
1425 #if execbit
1429 #if execbit
1426
1430
1427 test that TESTDIR is referred in PATH
1431 test that TESTDIR is referred in PATH
1428
1432
1429 $ cat > custom-command.sh <<EOF
1433 $ cat > custom-command.sh <<EOF
1430 > #!/bin/sh
1434 > #!/bin/sh
1431 > echo "hello world"
1435 > echo "hello world"
1432 > EOF
1436 > EOF
1433 $ chmod +x custom-command.sh
1437 $ chmod +x custom-command.sh
1434 $ cat > test-testdir-path.t <<EOF
1438 $ cat > test-testdir-path.t <<EOF
1435 > $ custom-command.sh
1439 > $ custom-command.sh
1436 > hello world
1440 > hello world
1437 > EOF
1441 > EOF
1438 $ rt test-testdir-path.t
1442 $ rt test-testdir-path.t
1439 running 1 tests using 1 parallel processes
1443 running 1 tests using 1 parallel processes
1440 .
1444 .
1441 # Ran 1 tests, 0 skipped, 0 failed.
1445 # Ran 1 tests, 0 skipped, 0 failed.
1442
1446
1443 #endif
1447 #endif
1444
1448
1445 test support for --allow-slow-tests
1449 test support for --allow-slow-tests
1446 $ cat > test-very-slow-test.t <<EOF
1450 $ cat > test-very-slow-test.t <<EOF
1447 > #require slow
1451 > #require slow
1448 > $ echo pass
1452 > $ echo pass
1449 > pass
1453 > pass
1450 > EOF
1454 > EOF
1451 $ rt test-very-slow-test.t
1455 $ rt test-very-slow-test.t
1452 running 1 tests using 1 parallel processes
1456 running 1 tests using 1 parallel processes
1453 s
1457 s
1454 Skipped test-very-slow-test.t: missing feature: allow slow tests (use --allow-slow-tests)
1458 Skipped test-very-slow-test.t: missing feature: allow slow tests (use --allow-slow-tests)
1455 # Ran 0 tests, 1 skipped, 0 failed.
1459 # Ran 0 tests, 1 skipped, 0 failed.
1456 $ rt $HGTEST_RUN_TESTS_PURE --allow-slow-tests test-very-slow-test.t
1460 $ rt $HGTEST_RUN_TESTS_PURE --allow-slow-tests test-very-slow-test.t
1457 running 1 tests using 1 parallel processes
1461 running 1 tests using 1 parallel processes
1458 .
1462 .
1459 # Ran 1 tests, 0 skipped, 0 failed.
1463 # Ran 1 tests, 0 skipped, 0 failed.
1460
1464
1461 support for running a test outside the current directory
1465 support for running a test outside the current directory
1462 $ mkdir nonlocal
1466 $ mkdir nonlocal
1463 $ cat > nonlocal/test-is-not-here.t << EOF
1467 $ cat > nonlocal/test-is-not-here.t << EOF
1464 > $ echo pass
1468 > $ echo pass
1465 > pass
1469 > pass
1466 > EOF
1470 > EOF
1467 $ rt nonlocal/test-is-not-here.t
1471 $ rt nonlocal/test-is-not-here.t
1468 running 1 tests using 1 parallel processes
1472 running 1 tests using 1 parallel processes
1469 .
1473 .
1470 # Ran 1 tests, 0 skipped, 0 failed.
1474 # Ran 1 tests, 0 skipped, 0 failed.
1471
1475
1472 support for automatically discovering test if arg is a folder
1476 support for automatically discovering test if arg is a folder
1473 $ mkdir tmp && cd tmp
1477 $ mkdir tmp && cd tmp
1474
1478
1475 $ cat > test-uno.t << EOF
1479 $ cat > test-uno.t << EOF
1476 > $ echo line
1480 > $ echo line
1477 > line
1481 > line
1478 > EOF
1482 > EOF
1479
1483
1480 $ cp test-uno.t test-dos.t
1484 $ cp test-uno.t test-dos.t
1481 $ cd ..
1485 $ cd ..
1482 $ cp -R tmp tmpp
1486 $ cp -R tmp tmpp
1483 $ cp tmp/test-uno.t test-solo.t
1487 $ cp tmp/test-uno.t test-solo.t
1484
1488
1485 $ rt tmp/ test-solo.t tmpp
1489 $ rt tmp/ test-solo.t tmpp
1486 running 5 tests using 1 parallel processes
1490 running 5 tests using 1 parallel processes
1487 .....
1491 .....
1488 # Ran 5 tests, 0 skipped, 0 failed.
1492 # Ran 5 tests, 0 skipped, 0 failed.
1489 $ rm -rf tmp tmpp
1493 $ rm -rf tmp tmpp
1490
1494
1491 support for running run-tests.py from another directory
1495 support for running run-tests.py from another directory
1492 $ mkdir tmp && cd tmp
1496 $ mkdir tmp && cd tmp
1493
1497
1494 $ cat > useful-file.sh << EOF
1498 $ cat > useful-file.sh << EOF
1495 > important command
1499 > important command
1496 > EOF
1500 > EOF
1497
1501
1498 $ cat > test-folder.t << EOF
1502 $ cat > test-folder.t << EOF
1499 > $ cat \$TESTDIR/useful-file.sh
1503 > $ cat \$TESTDIR/useful-file.sh
1500 > important command
1504 > important command
1501 > EOF
1505 > EOF
1502
1506
1503 $ cat > test-folder-fail.t << EOF
1507 $ cat > test-folder-fail.t << EOF
1504 > $ cat \$TESTDIR/useful-file.sh
1508 > $ cat \$TESTDIR/useful-file.sh
1505 > important commando
1509 > important commando
1506 > EOF
1510 > EOF
1507
1511
1508 $ cd ..
1512 $ cd ..
1509 $ rt tmp/test-*.t
1513 $ rt tmp/test-*.t
1510 running 2 tests using 1 parallel processes
1514 running 2 tests using 1 parallel processes
1511
1515
1512 --- $TESTTMP/anothertests/tmp/test-folder-fail.t
1516 --- $TESTTMP/anothertests/tmp/test-folder-fail.t
1513 +++ $TESTTMP/anothertests/tmp/test-folder-fail.t.err
1517 +++ $TESTTMP/anothertests/tmp/test-folder-fail.t.err
1514 @@ -1,2 +1,2 @@
1518 @@ -1,2 +1,2 @@
1515 $ cat $TESTDIR/useful-file.sh
1519 $ cat $TESTDIR/useful-file.sh
1516 - important commando
1520 - important commando
1517 + important command
1521 + important command
1518
1522
1519 ERROR: test-folder-fail.t output changed
1523 ERROR: test-folder-fail.t output changed
1520 !.
1524 !.
1521 Failed test-folder-fail.t: output changed
1525 Failed test-folder-fail.t: output changed
1522 # Ran 2 tests, 0 skipped, 1 failed.
1526 # Ran 2 tests, 0 skipped, 1 failed.
1523 python hash seed: * (glob)
1527 python hash seed: * (glob)
1524 [1]
1528 [1]
1525
1529
1526 support for bisecting failed tests automatically
1530 support for bisecting failed tests automatically
1527 $ hg init bisect
1531 $ hg init bisect
1528 $ cd bisect
1532 $ cd bisect
1529 $ cat >> test-bisect.t <<EOF
1533 $ cat >> test-bisect.t <<EOF
1530 > $ echo pass
1534 > $ echo pass
1531 > pass
1535 > pass
1532 > EOF
1536 > EOF
1533 $ hg add test-bisect.t
1537 $ hg add test-bisect.t
1534 $ hg ci -m 'good'
1538 $ hg ci -m 'good'
1535 $ cat >> test-bisect.t <<EOF
1539 $ cat >> test-bisect.t <<EOF
1536 > $ echo pass
1540 > $ echo pass
1537 > fail
1541 > fail
1538 > EOF
1542 > EOF
1539 $ hg ci -m 'bad'
1543 $ hg ci -m 'bad'
1540 $ rt --known-good-rev=0 test-bisect.t
1544 $ rt --known-good-rev=0 test-bisect.t
1541 running 1 tests using 1 parallel processes
1545 running 1 tests using 1 parallel processes
1542
1546
1543 --- $TESTTMP/anothertests/bisect/test-bisect.t
1547 --- $TESTTMP/anothertests/bisect/test-bisect.t
1544 +++ $TESTTMP/anothertests/bisect/test-bisect.t.err
1548 +++ $TESTTMP/anothertests/bisect/test-bisect.t.err
1545 @@ -1,4 +1,4 @@
1549 @@ -1,4 +1,4 @@
1546 $ echo pass
1550 $ echo pass
1547 pass
1551 pass
1548 $ echo pass
1552 $ echo pass
1549 - fail
1553 - fail
1550 + pass
1554 + pass
1551
1555
1552 ERROR: test-bisect.t output changed
1556 ERROR: test-bisect.t output changed
1553 !
1557 !
1554 Failed test-bisect.t: output changed
1558 Failed test-bisect.t: output changed
1555 test-bisect.t broken by 72cbf122d116 (bad)
1559 test-bisect.t broken by 72cbf122d116 (bad)
1556 # Ran 1 tests, 0 skipped, 1 failed.
1560 # Ran 1 tests, 0 skipped, 1 failed.
1557 python hash seed: * (glob)
1561 python hash seed: * (glob)
1558 [1]
1562 [1]
1559
1563
1560 $ cd ..
1564 $ cd ..
1561
1565
1562 support bisecting a separate repo
1566 support bisecting a separate repo
1563
1567
1564 $ hg init bisect-dependent
1568 $ hg init bisect-dependent
1565 $ cd bisect-dependent
1569 $ cd bisect-dependent
1566 $ cat > test-bisect-dependent.t <<EOF
1570 $ cat > test-bisect-dependent.t <<EOF
1567 > $ tail -1 \$TESTDIR/../bisect/test-bisect.t
1571 > $ tail -1 \$TESTDIR/../bisect/test-bisect.t
1568 > pass
1572 > pass
1569 > EOF
1573 > EOF
1570 $ hg commit -Am dependent test-bisect-dependent.t
1574 $ hg commit -Am dependent test-bisect-dependent.t
1571
1575
1572 $ rt --known-good-rev=0 test-bisect-dependent.t
1576 $ rt --known-good-rev=0 test-bisect-dependent.t
1573 running 1 tests using 1 parallel processes
1577 running 1 tests using 1 parallel processes
1574
1578
1575 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1579 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1576 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1580 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1577 @@ -1,2 +1,2 @@
1581 @@ -1,2 +1,2 @@
1578 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1582 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1579 - pass
1583 - pass
1580 + fail
1584 + fail
1581
1585
1582 ERROR: test-bisect-dependent.t output changed
1586 ERROR: test-bisect-dependent.t output changed
1583 !
1587 !
1584 Failed test-bisect-dependent.t: output changed
1588 Failed test-bisect-dependent.t: output changed
1585 Failed to identify failure point for test-bisect-dependent.t
1589 Failed to identify failure point for test-bisect-dependent.t
1586 # Ran 1 tests, 0 skipped, 1 failed.
1590 # Ran 1 tests, 0 skipped, 1 failed.
1587 python hash seed: * (glob)
1591 python hash seed: * (glob)
1588 [1]
1592 [1]
1589
1593
1590 $ rt --bisect-repo=../test-bisect test-bisect-dependent.t
1594 $ rt --bisect-repo=../test-bisect test-bisect-dependent.t
1591 usage: run-tests.py [options] [tests]
1595 usage: run-tests.py [options] [tests]
1592 run-tests.py: error: --bisect-repo cannot be used without --known-good-rev
1596 run-tests.py: error: --bisect-repo cannot be used without --known-good-rev
1593 [2]
1597 [2]
1594
1598
1595 $ rt --known-good-rev=0 --bisect-repo=../bisect test-bisect-dependent.t
1599 $ rt --known-good-rev=0 --bisect-repo=../bisect test-bisect-dependent.t
1596 running 1 tests using 1 parallel processes
1600 running 1 tests using 1 parallel processes
1597
1601
1598 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1602 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1599 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1603 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1600 @@ -1,2 +1,2 @@
1604 @@ -1,2 +1,2 @@
1601 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1605 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1602 - pass
1606 - pass
1603 + fail
1607 + fail
1604
1608
1605 ERROR: test-bisect-dependent.t output changed
1609 ERROR: test-bisect-dependent.t output changed
1606 !
1610 !
1607 Failed test-bisect-dependent.t: output changed
1611 Failed test-bisect-dependent.t: output changed
1608 test-bisect-dependent.t broken by 72cbf122d116 (bad)
1612 test-bisect-dependent.t broken by 72cbf122d116 (bad)
1609 # Ran 1 tests, 0 skipped, 1 failed.
1613 # Ran 1 tests, 0 skipped, 1 failed.
1610 python hash seed: * (glob)
1614 python hash seed: * (glob)
1611 [1]
1615 [1]
1612
1616
1613 $ cd ..
1617 $ cd ..
1614
1618
1615 Test a broken #if statement doesn't break run-tests threading.
1619 Test a broken #if statement doesn't break run-tests threading.
1616 ==============================================================
1620 ==============================================================
1617 $ mkdir broken
1621 $ mkdir broken
1618 $ cd broken
1622 $ cd broken
1619 $ cat > test-broken.t <<EOF
1623 $ cat > test-broken.t <<EOF
1620 > true
1624 > true
1621 > #if notarealhghavefeature
1625 > #if notarealhghavefeature
1622 > $ false
1626 > $ false
1623 > #endif
1627 > #endif
1624 > EOF
1628 > EOF
1625 $ for f in 1 2 3 4 ; do
1629 $ for f in 1 2 3 4 ; do
1626 > cat > test-works-$f.t <<EOF
1630 > cat > test-works-$f.t <<EOF
1627 > This is test case $f
1631 > This is test case $f
1628 > $ sleep 1
1632 > $ sleep 1
1629 > EOF
1633 > EOF
1630 > done
1634 > done
1631 $ rt -j 2
1635 $ rt -j 2
1632 running 5 tests using 2 parallel processes
1636 running 5 tests using 2 parallel processes
1633 ....
1637 ....
1634 # Ran 5 tests, 0 skipped, 0 failed.
1638 # Ran 5 tests, 0 skipped, 0 failed.
1635 skipped: unknown feature: notarealhghavefeature
1639 skipped: unknown feature: notarealhghavefeature
1636
1640
1637 $ cd ..
1641 $ cd ..
1638 $ rm -rf broken
1642 $ rm -rf broken
1639
1643
1640 Test cases in .t files
1644 Test cases in .t files
1641 ======================
1645 ======================
1642 $ mkdir cases
1646 $ mkdir cases
1643 $ cd cases
1647 $ cd cases
1644 $ cat > test-cases-abc.t <<'EOF'
1648 $ cat > test-cases-abc.t <<'EOF'
1645 > #testcases A B C
1649 > #testcases A B C
1646 > $ V=B
1650 > $ V=B
1647 > #if A
1651 > #if A
1648 > $ V=A
1652 > $ V=A
1649 > #endif
1653 > #endif
1650 > #if C
1654 > #if C
1651 > $ V=C
1655 > $ V=C
1652 > #endif
1656 > #endif
1653 > $ echo $V | sed 's/A/C/'
1657 > $ echo $V | sed 's/A/C/'
1654 > C
1658 > C
1655 > #if C
1659 > #if C
1656 > $ [ $V = C ]
1660 > $ [ $V = C ]
1657 > #endif
1661 > #endif
1658 > #if A
1662 > #if A
1659 > $ [ $V = C ]
1663 > $ [ $V = C ]
1660 > [1]
1664 > [1]
1661 > #endif
1665 > #endif
1662 > #if no-C
1666 > #if no-C
1663 > $ [ $V = C ]
1667 > $ [ $V = C ]
1664 > [1]
1668 > [1]
1665 > #endif
1669 > #endif
1666 > $ [ $V = D ]
1670 > $ [ $V = D ]
1667 > [1]
1671 > [1]
1668 > EOF
1672 > EOF
1669 $ rt
1673 $ rt
1670 running 3 tests using 1 parallel processes
1674 running 3 tests using 1 parallel processes
1671 .
1675 .
1672 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1676 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1673 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1677 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1674 @@ -7,7 +7,7 @@
1678 @@ -7,7 +7,7 @@
1675 $ V=C
1679 $ V=C
1676 #endif
1680 #endif
1677 $ echo $V | sed 's/A/C/'
1681 $ echo $V | sed 's/A/C/'
1678 - C
1682 - C
1679 + B
1683 + B
1680 #if C
1684 #if C
1681 $ [ $V = C ]
1685 $ [ $V = C ]
1682 #endif
1686 #endif
1683
1687
1684 ERROR: test-cases-abc.t#B output changed
1688 ERROR: test-cases-abc.t#B output changed
1685 !.
1689 !.
1686 Failed test-cases-abc.t#B: output changed
1690 Failed test-cases-abc.t#B: output changed
1687 # Ran 3 tests, 0 skipped, 1 failed.
1691 # Ran 3 tests, 0 skipped, 1 failed.
1688 python hash seed: * (glob)
1692 python hash seed: * (glob)
1689 [1]
1693 [1]
1690
1694
1691 --restart works
1695 --restart works
1692
1696
1693 $ rt --restart
1697 $ rt --restart
1694 running 2 tests using 1 parallel processes
1698 running 2 tests using 1 parallel processes
1695
1699
1696 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1700 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1697 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1701 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1698 @@ -7,7 +7,7 @@
1702 @@ -7,7 +7,7 @@
1699 $ V=C
1703 $ V=C
1700 #endif
1704 #endif
1701 $ echo $V | sed 's/A/C/'
1705 $ echo $V | sed 's/A/C/'
1702 - C
1706 - C
1703 + B
1707 + B
1704 #if C
1708 #if C
1705 $ [ $V = C ]
1709 $ [ $V = C ]
1706 #endif
1710 #endif
1707
1711
1708 ERROR: test-cases-abc.t#B output changed
1712 ERROR: test-cases-abc.t#B output changed
1709 !.
1713 !.
1710 Failed test-cases-abc.t#B: output changed
1714 Failed test-cases-abc.t#B: output changed
1711 # Ran 2 tests, 0 skipped, 1 failed.
1715 # Ran 2 tests, 0 skipped, 1 failed.
1712 python hash seed: * (glob)
1716 python hash seed: * (glob)
1713 [1]
1717 [1]
1714
1718
1715 --restart works with outputdir
1719 --restart works with outputdir
1716
1720
1717 $ mkdir output
1721 $ mkdir output
1718 $ mv test-cases-abc.t#B.err output
1722 $ mv test-cases-abc.t#B.err output
1719 $ rt --restart --outputdir output
1723 $ rt --restart --outputdir output
1720 running 2 tests using 1 parallel processes
1724 running 2 tests using 1 parallel processes
1721
1725
1722 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1726 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1723 +++ $TESTTMP/anothertests/cases/output/test-cases-abc.t#B.err
1727 +++ $TESTTMP/anothertests/cases/output/test-cases-abc.t#B.err
1724 @@ -7,7 +7,7 @@
1728 @@ -7,7 +7,7 @@
1725 $ V=C
1729 $ V=C
1726 #endif
1730 #endif
1727 $ echo $V | sed 's/A/C/'
1731 $ echo $V | sed 's/A/C/'
1728 - C
1732 - C
1729 + B
1733 + B
1730 #if C
1734 #if C
1731 $ [ $V = C ]
1735 $ [ $V = C ]
1732 #endif
1736 #endif
1733
1737
1734 ERROR: test-cases-abc.t#B output changed
1738 ERROR: test-cases-abc.t#B output changed
1735 !.
1739 !.
1736 Failed test-cases-abc.t#B: output changed
1740 Failed test-cases-abc.t#B: output changed
1737 # Ran 2 tests, 0 skipped, 1 failed.
1741 # Ran 2 tests, 0 skipped, 1 failed.
1738 python hash seed: * (glob)
1742 python hash seed: * (glob)
1739 [1]
1743 [1]
1740
1744
1741 Test TESTCASE variable
1745 Test TESTCASE variable
1742
1746
1743 $ cat > test-cases-ab.t <<'EOF'
1747 $ cat > test-cases-ab.t <<'EOF'
1744 > $ dostuff() {
1748 > $ dostuff() {
1745 > > echo "In case $TESTCASE"
1749 > > echo "In case $TESTCASE"
1746 > > }
1750 > > }
1747 > #testcases A B
1751 > #testcases A B
1748 > #if A
1752 > #if A
1749 > $ dostuff
1753 > $ dostuff
1750 > In case A
1754 > In case A
1751 > #endif
1755 > #endif
1752 > #if B
1756 > #if B
1753 > $ dostuff
1757 > $ dostuff
1754 > In case B
1758 > In case B
1755 > #endif
1759 > #endif
1756 > EOF
1760 > EOF
1757 $ rt test-cases-ab.t
1761 $ rt test-cases-ab.t
1758 running 2 tests using 1 parallel processes
1762 running 2 tests using 1 parallel processes
1759 ..
1763 ..
1760 # Ran 2 tests, 0 skipped, 0 failed.
1764 # Ran 2 tests, 0 skipped, 0 failed.
1761
1765
1762 Support running a specific test case
1766 Support running a specific test case
1763
1767
1764 $ rt "test-cases-abc.t#B"
1768 $ rt "test-cases-abc.t#B"
1765 running 1 tests using 1 parallel processes
1769 running 1 tests using 1 parallel processes
1766
1770
1767 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1771 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1768 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1772 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1769 @@ -7,7 +7,7 @@
1773 @@ -7,7 +7,7 @@
1770 $ V=C
1774 $ V=C
1771 #endif
1775 #endif
1772 $ echo $V | sed 's/A/C/'
1776 $ echo $V | sed 's/A/C/'
1773 - C
1777 - C
1774 + B
1778 + B
1775 #if C
1779 #if C
1776 $ [ $V = C ]
1780 $ [ $V = C ]
1777 #endif
1781 #endif
1778
1782
1779 ERROR: test-cases-abc.t#B output changed
1783 ERROR: test-cases-abc.t#B output changed
1780 !
1784 !
1781 Failed test-cases-abc.t#B: output changed
1785 Failed test-cases-abc.t#B: output changed
1782 # Ran 1 tests, 0 skipped, 1 failed.
1786 # Ran 1 tests, 0 skipped, 1 failed.
1783 python hash seed: * (glob)
1787 python hash seed: * (glob)
1784 [1]
1788 [1]
1785
1789
1786 Support running multiple test cases in the same file
1790 Support running multiple test cases in the same file
1787
1791
1788 $ rt test-cases-abc.t#B test-cases-abc.t#C
1792 $ rt test-cases-abc.t#B test-cases-abc.t#C
1789 running 2 tests using 1 parallel processes
1793 running 2 tests using 1 parallel processes
1790
1794
1791 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1795 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1792 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1796 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1793 @@ -7,7 +7,7 @@
1797 @@ -7,7 +7,7 @@
1794 $ V=C
1798 $ V=C
1795 #endif
1799 #endif
1796 $ echo $V | sed 's/A/C/'
1800 $ echo $V | sed 's/A/C/'
1797 - C
1801 - C
1798 + B
1802 + B
1799 #if C
1803 #if C
1800 $ [ $V = C ]
1804 $ [ $V = C ]
1801 #endif
1805 #endif
1802
1806
1803 ERROR: test-cases-abc.t#B output changed
1807 ERROR: test-cases-abc.t#B output changed
1804 !.
1808 !.
1805 Failed test-cases-abc.t#B: output changed
1809 Failed test-cases-abc.t#B: output changed
1806 # Ran 2 tests, 0 skipped, 1 failed.
1810 # Ran 2 tests, 0 skipped, 1 failed.
1807 python hash seed: * (glob)
1811 python hash seed: * (glob)
1808 [1]
1812 [1]
1809
1813
1810 Support ignoring invalid test cases
1814 Support ignoring invalid test cases
1811
1815
1812 $ rt test-cases-abc.t#B test-cases-abc.t#D
1816 $ rt test-cases-abc.t#B test-cases-abc.t#D
1813 running 1 tests using 1 parallel processes
1817 running 1 tests using 1 parallel processes
1814
1818
1815 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1819 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1816 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1820 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1817 @@ -7,7 +7,7 @@
1821 @@ -7,7 +7,7 @@
1818 $ V=C
1822 $ V=C
1819 #endif
1823 #endif
1820 $ echo $V | sed 's/A/C/'
1824 $ echo $V | sed 's/A/C/'
1821 - C
1825 - C
1822 + B
1826 + B
1823 #if C
1827 #if C
1824 $ [ $V = C ]
1828 $ [ $V = C ]
1825 #endif
1829 #endif
1826
1830
1827 ERROR: test-cases-abc.t#B output changed
1831 ERROR: test-cases-abc.t#B output changed
1828 !
1832 !
1829 Failed test-cases-abc.t#B: output changed
1833 Failed test-cases-abc.t#B: output changed
1830 # Ran 1 tests, 0 skipped, 1 failed.
1834 # Ran 1 tests, 0 skipped, 1 failed.
1831 python hash seed: * (glob)
1835 python hash seed: * (glob)
1832 [1]
1836 [1]
1833
1837
1834 Support running complex test cases names
1838 Support running complex test cases names
1835
1839
1836 $ cat > test-cases-advanced-cases.t <<'EOF'
1840 $ cat > test-cases-advanced-cases.t <<'EOF'
1837 > #testcases simple case-with-dashes casewith_-.chars
1841 > #testcases simple case-with-dashes casewith_-.chars
1838 > $ echo $TESTCASE
1842 > $ echo $TESTCASE
1839 > simple
1843 > simple
1840 > EOF
1844 > EOF
1841
1845
1842 $ cat test-cases-advanced-cases.t
1846 $ cat test-cases-advanced-cases.t
1843 #testcases simple case-with-dashes casewith_-.chars
1847 #testcases simple case-with-dashes casewith_-.chars
1844 $ echo $TESTCASE
1848 $ echo $TESTCASE
1845 simple
1849 simple
1846
1850
1847 $ rt test-cases-advanced-cases.t
1851 $ rt test-cases-advanced-cases.t
1848 running 3 tests using 1 parallel processes
1852 running 3 tests using 1 parallel processes
1849
1853
1850 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1854 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1851 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#case-with-dashes.err
1855 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#case-with-dashes.err
1852 @@ -1,3 +1,3 @@
1856 @@ -1,3 +1,3 @@
1853 #testcases simple case-with-dashes casewith_-.chars
1857 #testcases simple case-with-dashes casewith_-.chars
1854 $ echo $TESTCASE
1858 $ echo $TESTCASE
1855 - simple
1859 - simple
1856 + case-with-dashes
1860 + case-with-dashes
1857
1861
1858 ERROR: test-cases-advanced-cases.t#case-with-dashes output changed
1862 ERROR: test-cases-advanced-cases.t#case-with-dashes output changed
1859 !
1863 !
1860 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1864 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1861 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#casewith_-.chars.err
1865 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#casewith_-.chars.err
1862 @@ -1,3 +1,3 @@
1866 @@ -1,3 +1,3 @@
1863 #testcases simple case-with-dashes casewith_-.chars
1867 #testcases simple case-with-dashes casewith_-.chars
1864 $ echo $TESTCASE
1868 $ echo $TESTCASE
1865 - simple
1869 - simple
1866 + casewith_-.chars
1870 + casewith_-.chars
1867
1871
1868 ERROR: test-cases-advanced-cases.t#casewith_-.chars output changed
1872 ERROR: test-cases-advanced-cases.t#casewith_-.chars output changed
1869 !.
1873 !.
1870 Failed test-cases-advanced-cases.t#case-with-dashes: output changed
1874 Failed test-cases-advanced-cases.t#case-with-dashes: output changed
1871 Failed test-cases-advanced-cases.t#casewith_-.chars: output changed
1875 Failed test-cases-advanced-cases.t#casewith_-.chars: output changed
1872 # Ran 3 tests, 0 skipped, 2 failed.
1876 # Ran 3 tests, 0 skipped, 2 failed.
1873 python hash seed: * (glob)
1877 python hash seed: * (glob)
1874 [1]
1878 [1]
1875
1879
1876 $ rt "test-cases-advanced-cases.t#case-with-dashes"
1880 $ rt "test-cases-advanced-cases.t#case-with-dashes"
1877 running 1 tests using 1 parallel processes
1881 running 1 tests using 1 parallel processes
1878
1882
1879 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1883 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1880 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#case-with-dashes.err
1884 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#case-with-dashes.err
1881 @@ -1,3 +1,3 @@
1885 @@ -1,3 +1,3 @@
1882 #testcases simple case-with-dashes casewith_-.chars
1886 #testcases simple case-with-dashes casewith_-.chars
1883 $ echo $TESTCASE
1887 $ echo $TESTCASE
1884 - simple
1888 - simple
1885 + case-with-dashes
1889 + case-with-dashes
1886
1890
1887 ERROR: test-cases-advanced-cases.t#case-with-dashes output changed
1891 ERROR: test-cases-advanced-cases.t#case-with-dashes output changed
1888 !
1892 !
1889 Failed test-cases-advanced-cases.t#case-with-dashes: output changed
1893 Failed test-cases-advanced-cases.t#case-with-dashes: output changed
1890 # Ran 1 tests, 0 skipped, 1 failed.
1894 # Ran 1 tests, 0 skipped, 1 failed.
1891 python hash seed: * (glob)
1895 python hash seed: * (glob)
1892 [1]
1896 [1]
1893
1897
1894 $ rt "test-cases-advanced-cases.t#casewith_-.chars"
1898 $ rt "test-cases-advanced-cases.t#casewith_-.chars"
1895 running 1 tests using 1 parallel processes
1899 running 1 tests using 1 parallel processes
1896
1900
1897 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1901 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1898 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#casewith_-.chars.err
1902 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#casewith_-.chars.err
1899 @@ -1,3 +1,3 @@
1903 @@ -1,3 +1,3 @@
1900 #testcases simple case-with-dashes casewith_-.chars
1904 #testcases simple case-with-dashes casewith_-.chars
1901 $ echo $TESTCASE
1905 $ echo $TESTCASE
1902 - simple
1906 - simple
1903 + casewith_-.chars
1907 + casewith_-.chars
1904
1908
1905 ERROR: test-cases-advanced-cases.t#casewith_-.chars output changed
1909 ERROR: test-cases-advanced-cases.t#casewith_-.chars output changed
1906 !
1910 !
1907 Failed test-cases-advanced-cases.t#casewith_-.chars: output changed
1911 Failed test-cases-advanced-cases.t#casewith_-.chars: output changed
1908 # Ran 1 tests, 0 skipped, 1 failed.
1912 # Ran 1 tests, 0 skipped, 1 failed.
1909 python hash seed: * (glob)
1913 python hash seed: * (glob)
1910 [1]
1914 [1]
1911
1915
1912 Test automatic pattern replacement
1916 Test automatic pattern replacement
1913 ==================================
1917 ==================================
1914
1918
1915 $ cat << EOF >> common-pattern.py
1919 $ cat << EOF >> common-pattern.py
1916 > substitutions = [
1920 > substitutions = [
1917 > (br'foo-(.*)\\b',
1921 > (br'foo-(.*)\\b',
1918 > br'\$XXX=\\1\$'),
1922 > br'\$XXX=\\1\$'),
1919 > (br'bar\\n',
1923 > (br'bar\\n',
1920 > br'\$YYY$\\n'),
1924 > br'\$YYY$\\n'),
1921 > ]
1925 > ]
1922 > EOF
1926 > EOF
1923
1927
1924 $ cat << EOF >> test-substitution.t
1928 $ cat << EOF >> test-substitution.t
1925 > $ echo foo-12
1929 > $ echo foo-12
1926 > \$XXX=12$
1930 > \$XXX=12$
1927 > $ echo foo-42
1931 > $ echo foo-42
1928 > \$XXX=42$
1932 > \$XXX=42$
1929 > $ echo bar prior
1933 > $ echo bar prior
1930 > bar prior
1934 > bar prior
1931 > $ echo lastbar
1935 > $ echo lastbar
1932 > last\$YYY$
1936 > last\$YYY$
1933 > $ echo foo-bar foo-baz
1937 > $ echo foo-bar foo-baz
1934 > EOF
1938 > EOF
1935
1939
1936 $ rt test-substitution.t
1940 $ rt test-substitution.t
1937 running 1 tests using 1 parallel processes
1941 running 1 tests using 1 parallel processes
1938
1942
1939 --- $TESTTMP/anothertests/cases/test-substitution.t
1943 --- $TESTTMP/anothertests/cases/test-substitution.t
1940 +++ $TESTTMP/anothertests/cases/test-substitution.t.err
1944 +++ $TESTTMP/anothertests/cases/test-substitution.t.err
1941 @@ -7,3 +7,4 @@
1945 @@ -7,3 +7,4 @@
1942 $ echo lastbar
1946 $ echo lastbar
1943 last$YYY$
1947 last$YYY$
1944 $ echo foo-bar foo-baz
1948 $ echo foo-bar foo-baz
1945 + $XXX=bar foo-baz$
1949 + $XXX=bar foo-baz$
1946
1950
1947 ERROR: test-substitution.t output changed
1951 ERROR: test-substitution.t output changed
1948 !
1952 !
1949 Failed test-substitution.t: output changed
1953 Failed test-substitution.t: output changed
1950 # Ran 1 tests, 0 skipped, 1 failed.
1954 # Ran 1 tests, 0 skipped, 1 failed.
1951 python hash seed: * (glob)
1955 python hash seed: * (glob)
1952 [1]
1956 [1]
1953
1957
1954 --extra-config-opt works
1958 --extra-config-opt works
1955
1959
1956 $ cat << EOF >> test-config-opt.t
1960 $ cat << EOF >> test-config-opt.t
1957 > $ hg init test-config-opt
1961 > $ hg init test-config-opt
1958 > $ hg -R test-config-opt purge
1962 > $ hg -R test-config-opt purge
1959 > $ echo "HGTESTEXTRAEXTENSIONS: \$HGTESTEXTRAEXTENSIONS"
1963 > $ echo "HGTESTEXTRAEXTENSIONS: \$HGTESTEXTRAEXTENSIONS"
1960 > HGTESTEXTRAEXTENSIONS: purge
1964 > HGTESTEXTRAEXTENSIONS: purge
1961 > EOF
1965 > EOF
1962
1966
1963 $ rt --extra-config-opt extensions.purge= \
1967 $ rt --extra-config-opt extensions.purge= \
1964 > --extra-config-opt not.an.extension=True test-config-opt.t
1968 > --extra-config-opt not.an.extension=True test-config-opt.t
1965 running 1 tests using 1 parallel processes
1969 running 1 tests using 1 parallel processes
1966 .
1970 .
1967 # Ran 1 tests, 0 skipped, 0 failed.
1971 # Ran 1 tests, 0 skipped, 0 failed.
1968
1972
1969 Test conditional output matching
1973 Test conditional output matching
1970 ================================
1974 ================================
1971
1975
1972 $ cat << EOF >> test-conditional-matching.t
1976 $ cat << EOF >> test-conditional-matching.t
1973 > #testcases foo bar
1977 > #testcases foo bar
1974 > $ echo richtig
1978 > $ echo richtig
1975 > richtig (true !)
1979 > richtig (true !)
1976 > $ echo falsch
1980 > $ echo falsch
1977 > falsch (false !)
1981 > falsch (false !)
1978 > #if foo
1982 > #if foo
1979 > $ echo arthur
1983 > $ echo arthur
1980 > arthur (bar !)
1984 > arthur (bar !)
1981 > #endif
1985 > #endif
1982 > $ echo celeste
1986 > $ echo celeste
1983 > celeste (foo !)
1987 > celeste (foo !)
1984 > $ echo zephir
1988 > $ echo zephir
1985 > zephir (bar !)
1989 > zephir (bar !)
1986 > EOF
1990 > EOF
1987
1991
1988 $ rt test-conditional-matching.t
1992 $ rt test-conditional-matching.t
1989 running 2 tests using 1 parallel processes
1993 running 2 tests using 1 parallel processes
1990
1994
1991 --- $TESTTMP/anothertests/cases/test-conditional-matching.t
1995 --- $TESTTMP/anothertests/cases/test-conditional-matching.t
1992 +++ $TESTTMP/anothertests/cases/test-conditional-matching.t#bar.err
1996 +++ $TESTTMP/anothertests/cases/test-conditional-matching.t#bar.err
1993 @@ -3,11 +3,13 @@
1997 @@ -3,11 +3,13 @@
1994 richtig (true !)
1998 richtig (true !)
1995 $ echo falsch
1999 $ echo falsch
1996 falsch (false !)
2000 falsch (false !)
1997 + falsch
2001 + falsch
1998 #if foo
2002 #if foo
1999 $ echo arthur
2003 $ echo arthur
2000 arthur \(bar !\) (re)
2004 arthur \(bar !\) (re)
2001 #endif
2005 #endif
2002 $ echo celeste
2006 $ echo celeste
2003 celeste \(foo !\) (re)
2007 celeste \(foo !\) (re)
2004 + celeste
2008 + celeste
2005 $ echo zephir
2009 $ echo zephir
2006 zephir \(bar !\) (re)
2010 zephir \(bar !\) (re)
2007
2011
2008 ERROR: test-conditional-matching.t#bar output changed
2012 ERROR: test-conditional-matching.t#bar output changed
2009 !
2013 !
2010 --- $TESTTMP/anothertests/cases/test-conditional-matching.t
2014 --- $TESTTMP/anothertests/cases/test-conditional-matching.t
2011 +++ $TESTTMP/anothertests/cases/test-conditional-matching.t#foo.err
2015 +++ $TESTTMP/anothertests/cases/test-conditional-matching.t#foo.err
2012 @@ -3,11 +3,14 @@
2016 @@ -3,11 +3,14 @@
2013 richtig (true !)
2017 richtig (true !)
2014 $ echo falsch
2018 $ echo falsch
2015 falsch (false !)
2019 falsch (false !)
2016 + falsch
2020 + falsch
2017 #if foo
2021 #if foo
2018 $ echo arthur
2022 $ echo arthur
2019 arthur \(bar !\) (re)
2023 arthur \(bar !\) (re)
2020 + arthur
2024 + arthur
2021 #endif
2025 #endif
2022 $ echo celeste
2026 $ echo celeste
2023 celeste \(foo !\) (re)
2027 celeste \(foo !\) (re)
2024 $ echo zephir
2028 $ echo zephir
2025 zephir \(bar !\) (re)
2029 zephir \(bar !\) (re)
2026 + zephir
2030 + zephir
2027
2031
2028 ERROR: test-conditional-matching.t#foo output changed
2032 ERROR: test-conditional-matching.t#foo output changed
2029 !
2033 !
2030 Failed test-conditional-matching.t#bar: output changed
2034 Failed test-conditional-matching.t#bar: output changed
2031 Failed test-conditional-matching.t#foo: output changed
2035 Failed test-conditional-matching.t#foo: output changed
2032 # Ran 2 tests, 0 skipped, 2 failed.
2036 # Ran 2 tests, 0 skipped, 2 failed.
2033 python hash seed: * (glob)
2037 python hash seed: * (glob)
2034 [1]
2038 [1]
@@ -1,287 +1,286 b''
1 #testcases safe normal
1 #testcases safe normal
2
2
3 #if safe
3 #if safe
4 $ echo "[format]" >> $HGRCPATH
4 $ echo "[format]" >> $HGRCPATH
5 $ echo "use-share-safe = True" >> $HGRCPATH
5 $ echo "use-share-safe = True" >> $HGRCPATH
6 #endif
6 #endif
7
7
8 $ echo "[extensions]" >> $HGRCPATH
8 $ echo "[extensions]" >> $HGRCPATH
9 $ echo "share = " >> $HGRCPATH
9 $ echo "share = " >> $HGRCPATH
10
10
11 prepare repo1
11 prepare repo1
12
12
13 $ hg init repo1
13 $ hg init repo1
14 $ cd repo1
14 $ cd repo1
15 $ echo a > a
15 $ echo a > a
16 $ hg commit -A -m'init'
16 $ hg commit -A -m'init'
17 adding a
17 adding a
18
18
19 share it
19 share it
20
20
21 $ cd ..
21 $ cd ..
22 $ hg share repo1 repo2
22 $ hg share repo1 repo2
23 updating working directory
23 updating working directory
24 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
25
25
26 share shouldn't have a store dir
26 share shouldn't have a store dir
27
27
28 $ cd repo2
28 $ cd repo2
29 $ test -d .hg/store
29 $ test -d .hg/store
30 [1]
30 [1]
31 $ hg root -Tjson | sed 's|\\\\|\\|g'
31 $ hg root -Tjson | sed 's|\\\\|\\|g'
32 [
32 [
33 {
33 {
34 "hgpath": "$TESTTMP/repo2/.hg",
34 "hgpath": "$TESTTMP/repo2/.hg",
35 "reporoot": "$TESTTMP/repo2",
35 "reporoot": "$TESTTMP/repo2",
36 "storepath": "$TESTTMP/repo1/.hg/store"
36 "storepath": "$TESTTMP/repo1/.hg/store"
37 }
37 }
38 ]
38 ]
39
39
40 share shouldn't have a full cache dir, original repo should
40 share shouldn't have a full cache dir, original repo should
41
41
42 $ hg branches
42 $ hg branches
43 default 0:d3873e73d99e
43 default 0:d3873e73d99e
44 $ hg tags
44 $ hg tags
45 tip 0:d3873e73d99e
45 tip 0:d3873e73d99e
46 $ test -d .hg/cache
46 $ test -d .hg/cache
47 [1]
47 [1]
48 $ ls -1 .hg/wcache || true
48 $ ls -1 .hg/wcache || true
49 checkisexec (execbit !)
49 checkisexec (execbit !)
50 checklink (symlink !)
50 checklink (symlink !)
51 checklink-target (symlink !)
51 checklink-target (symlink !)
52 manifestfulltextcache (reporevlogstore !)
52 manifestfulltextcache (reporevlogstore !)
53 $ ls -1 ../repo1/.hg/cache
53 $ ls -1 ../repo1/.hg/cache
54 branch2-served
54 branch2-served
55 rbc-names-v1
55 rbc-names-v1
56 rbc-revs-v1
56 rbc-revs-v1
57 tags2-visible
57 tags2-visible
58
58
59 Cloning a shared repo should pick up the full cache dir on the other hand.
59 Cloning a shared repo should pick up the full cache dir on the other hand.
60
60
61 $ hg clone . ../repo2-clone
61 $ hg clone . ../repo2-clone
62 updating to branch default
62 updating to branch default
63 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
63 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
64 $ ls -1 ../repo2-clone/.hg/cache
64 $ ls -1 ../repo2-clone/.hg/cache
65 branch2-base
65 branch2-base
66 branch2-immutable
66 branch2-immutable
67 branch2-served
67 branch2-served
68 branch2-served.hidden
68 branch2-served.hidden
69 branch2-visible
69 branch2-visible
70 branch2-visible-hidden
70 branch2-visible-hidden
71 hgtagsfnodes1
72 rbc-names-v1
71 rbc-names-v1
73 rbc-revs-v1
72 rbc-revs-v1
74 tags2
73 tags2
75 tags2-served
74 tags2-served
76 tags2-visible
75 tags2-visible
77
76
78 Some sed versions appends newline, some don't, and some just fails
77 Some sed versions appends newline, some don't, and some just fails
79
78
80 $ cat .hg/sharedpath; echo
79 $ cat .hg/sharedpath; echo
81 $TESTTMP/repo1/.hg
80 $TESTTMP/repo1/.hg
82
81
83 trailing newline on .hg/sharedpath is ok
82 trailing newline on .hg/sharedpath is ok
84 $ hg tip -q
83 $ hg tip -q
85 0:d3873e73d99e
84 0:d3873e73d99e
86 $ echo '' >> .hg/sharedpath
85 $ echo '' >> .hg/sharedpath
87 $ cat .hg/sharedpath
86 $ cat .hg/sharedpath
88 $TESTTMP/repo1/.hg
87 $TESTTMP/repo1/.hg
89 $ hg tip -q
88 $ hg tip -q
90 0:d3873e73d99e
89 0:d3873e73d99e
91
90
92 commit in shared clone
91 commit in shared clone
93
92
94 $ echo a >> a
93 $ echo a >> a
95 $ hg commit -m'change in shared clone'
94 $ hg commit -m'change in shared clone'
96
95
97 check original
96 check original
98
97
99 $ cd ../repo1
98 $ cd ../repo1
100 $ hg log
99 $ hg log
101 changeset: 1:8af4dc49db9e
100 changeset: 1:8af4dc49db9e
102 tag: tip
101 tag: tip
103 user: test
102 user: test
104 date: Thu Jan 01 00:00:00 1970 +0000
103 date: Thu Jan 01 00:00:00 1970 +0000
105 summary: change in shared clone
104 summary: change in shared clone
106
105
107 changeset: 0:d3873e73d99e
106 changeset: 0:d3873e73d99e
108 user: test
107 user: test
109 date: Thu Jan 01 00:00:00 1970 +0000
108 date: Thu Jan 01 00:00:00 1970 +0000
110 summary: init
109 summary: init
111
110
112 $ hg update
111 $ hg update
113 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
112 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
114 $ cat a # should be two lines of "a"
113 $ cat a # should be two lines of "a"
115 a
114 a
116 a
115 a
117
116
118 commit in original
117 commit in original
119
118
120 $ echo b > b
119 $ echo b > b
121 $ hg commit -A -m'another file'
120 $ hg commit -A -m'another file'
122 adding b
121 adding b
123
122
124 check in shared clone
123 check in shared clone
125
124
126 $ cd ../repo2
125 $ cd ../repo2
127 $ hg log
126 $ hg log
128 changeset: 2:c2e0ac586386
127 changeset: 2:c2e0ac586386
129 tag: tip
128 tag: tip
130 user: test
129 user: test
131 date: Thu Jan 01 00:00:00 1970 +0000
130 date: Thu Jan 01 00:00:00 1970 +0000
132 summary: another file
131 summary: another file
133
132
134 changeset: 1:8af4dc49db9e
133 changeset: 1:8af4dc49db9e
135 user: test
134 user: test
136 date: Thu Jan 01 00:00:00 1970 +0000
135 date: Thu Jan 01 00:00:00 1970 +0000
137 summary: change in shared clone
136 summary: change in shared clone
138
137
139 changeset: 0:d3873e73d99e
138 changeset: 0:d3873e73d99e
140 user: test
139 user: test
141 date: Thu Jan 01 00:00:00 1970 +0000
140 date: Thu Jan 01 00:00:00 1970 +0000
142 summary: init
141 summary: init
143
142
144 $ hg update
143 $ hg update
145 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
144 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
146 $ cat b # should exist with one "b"
145 $ cat b # should exist with one "b"
147 b
146 b
148
147
149 hg serve shared clone
148 hg serve shared clone
150
149
151 $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid
150 $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid
152 $ cat hg.pid >> $DAEMON_PIDS
151 $ cat hg.pid >> $DAEMON_PIDS
153 $ get-with-headers.py localhost:$HGPORT 'raw-file/'
152 $ get-with-headers.py localhost:$HGPORT 'raw-file/'
154 200 Script output follows
153 200 Script output follows
155
154
156
155
157 -rw-r--r-- 4 a
156 -rw-r--r-- 4 a
158 -rw-r--r-- 2 b
157 -rw-r--r-- 2 b
159
158
160
159
161 Cloning a shared repo via bundle2 results in a non-shared clone
160 Cloning a shared repo via bundle2 results in a non-shared clone
162
161
163 $ cd ..
162 $ cd ..
164 $ hg clone -q --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/`pwd`/repo2 cloned-via-bundle2
163 $ hg clone -q --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/`pwd`/repo2 cloned-via-bundle2
165 $ cat ./cloned-via-bundle2/.hg/requires | grep "shared"
164 $ cat ./cloned-via-bundle2/.hg/requires | grep "shared"
166 [1]
165 [1]
167 $ hg id --cwd cloned-via-bundle2 -r tip
166 $ hg id --cwd cloned-via-bundle2 -r tip
168 c2e0ac586386 tip
167 c2e0ac586386 tip
169 $ cd repo2
168 $ cd repo2
170
169
171 test unshare command
170 test unshare command
172
171
173 $ hg unshare
172 $ hg unshare
174 $ test -d .hg/store
173 $ test -d .hg/store
175 $ test -f .hg/sharedpath
174 $ test -f .hg/sharedpath
176 [1]
175 [1]
177 $ grep shared .hg/requires
176 $ grep shared .hg/requires
178 [1]
177 [1]
179 $ hg unshare
178 $ hg unshare
180 abort: this is not a shared repo
179 abort: this is not a shared repo
181 [255]
180 [255]
182
181
183 check that a change does not propagate
182 check that a change does not propagate
184
183
185 $ echo b >> b
184 $ echo b >> b
186 $ hg commit -m'change in unshared'
185 $ hg commit -m'change in unshared'
187 $ cd ../repo1
186 $ cd ../repo1
188 $ hg id -r tip
187 $ hg id -r tip
189 c2e0ac586386 tip
188 c2e0ac586386 tip
190
189
191 $ cd ..
190 $ cd ..
192
191
193
192
194 non largefiles repos won't enable largefiles
193 non largefiles repos won't enable largefiles
195
194
196 $ hg share --config extensions.largefiles= repo2 sharedrepo
195 $ hg share --config extensions.largefiles= repo2 sharedrepo
197 The fsmonitor extension is incompatible with the largefiles extension and has been disabled. (fsmonitor !)
196 The fsmonitor extension is incompatible with the largefiles extension and has been disabled. (fsmonitor !)
198 The fsmonitor extension is incompatible with the largefiles extension and has been disabled. (fsmonitor !)
197 The fsmonitor extension is incompatible with the largefiles extension and has been disabled. (fsmonitor !)
199 updating working directory
198 updating working directory
200 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
199 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
201 $ [ -f sharedrepo/.hg/hgrc ]
200 $ [ -f sharedrepo/.hg/hgrc ]
202 [1]
201 [1]
203
202
204 test shared clones using relative paths work
203 test shared clones using relative paths work
205
204
206 $ mkdir thisdir
205 $ mkdir thisdir
207 $ hg init thisdir/orig
206 $ hg init thisdir/orig
208 $ hg share -U thisdir/orig thisdir/abs
207 $ hg share -U thisdir/orig thisdir/abs
209 $ hg share -U --relative thisdir/abs thisdir/rel
208 $ hg share -U --relative thisdir/abs thisdir/rel
210 $ cat thisdir/rel/.hg/sharedpath
209 $ cat thisdir/rel/.hg/sharedpath
211 ../../orig/.hg (no-eol)
210 ../../orig/.hg (no-eol)
212 $ grep shared thisdir/*/.hg/requires
211 $ grep shared thisdir/*/.hg/requires
213 thisdir/abs/.hg/requires:shared
212 thisdir/abs/.hg/requires:shared
214 thisdir/rel/.hg/requires:relshared
213 thisdir/rel/.hg/requires:relshared
215 thisdir/rel/.hg/requires:shared
214 thisdir/rel/.hg/requires:shared
216
215
217 test that relative shared paths aren't relative to $PWD
216 test that relative shared paths aren't relative to $PWD
218
217
219 $ cd thisdir
218 $ cd thisdir
220 $ hg -R rel root
219 $ hg -R rel root
221 $TESTTMP/thisdir/rel
220 $TESTTMP/thisdir/rel
222 $ cd ..
221 $ cd ..
223
222
224 now test that relative paths really are relative, survive across
223 now test that relative paths really are relative, survive across
225 renames and changes of PWD
224 renames and changes of PWD
226
225
227 $ hg -R thisdir/abs root
226 $ hg -R thisdir/abs root
228 $TESTTMP/thisdir/abs
227 $TESTTMP/thisdir/abs
229 $ hg -R thisdir/rel root
228 $ hg -R thisdir/rel root
230 $TESTTMP/thisdir/rel
229 $TESTTMP/thisdir/rel
231 $ mv thisdir thatdir
230 $ mv thisdir thatdir
232 $ hg -R thatdir/abs root
231 $ hg -R thatdir/abs root
233 abort: .hg/sharedpath points to nonexistent directory $TESTTMP/thisdir/orig/.hg
232 abort: .hg/sharedpath points to nonexistent directory $TESTTMP/thisdir/orig/.hg
234 [255]
233 [255]
235 $ hg -R thatdir/rel root
234 $ hg -R thatdir/rel root
236 $TESTTMP/thatdir/rel
235 $TESTTMP/thatdir/rel
237
236
238 test unshare relshared repo
237 test unshare relshared repo
239
238
240 $ cd thatdir/rel
239 $ cd thatdir/rel
241 $ hg unshare
240 $ hg unshare
242 $ test -d .hg/store
241 $ test -d .hg/store
243 $ test -f .hg/sharedpath
242 $ test -f .hg/sharedpath
244 [1]
243 [1]
245 $ grep shared .hg/requires
244 $ grep shared .hg/requires
246 [1]
245 [1]
247 $ hg unshare
246 $ hg unshare
248 abort: this is not a shared repo
247 abort: this is not a shared repo
249 [255]
248 [255]
250 $ cd ../..
249 $ cd ../..
251
250
252 $ rm -r thatdir
251 $ rm -r thatdir
253
252
254 Demonstrate buggy behavior around requirements validation
253 Demonstrate buggy behavior around requirements validation
255 See comment in localrepo.py:makelocalrepository() for more.
254 See comment in localrepo.py:makelocalrepository() for more.
256
255
257 $ hg init sharenewrequires
256 $ hg init sharenewrequires
258 $ hg share sharenewrequires shareoldrequires
257 $ hg share sharenewrequires shareoldrequires
259 updating working directory
258 updating working directory
260 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
259 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
261
260
262 $ cat >> sharenewrequires/.hg/requires << EOF
261 $ cat >> sharenewrequires/.hg/requires << EOF
263 > missing-requirement
262 > missing-requirement
264 > EOF
263 > EOF
265
264
266 We cannot open the repo with the unknown requirement
265 We cannot open the repo with the unknown requirement
267
266
268 $ hg -R sharenewrequires status
267 $ hg -R sharenewrequires status
269 abort: repository requires features unknown to this Mercurial: missing-requirement
268 abort: repository requires features unknown to this Mercurial: missing-requirement
270 (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
269 (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
271 [255]
270 [255]
272
271
273 BUG: we don't get the same error when opening the shared repo pointing to it
272 BUG: we don't get the same error when opening the shared repo pointing to it
274
273
275 $ hg -R shareoldrequires status
274 $ hg -R shareoldrequires status
276
275
277 Explicitly kill daemons to let the test exit on Windows
276 Explicitly kill daemons to let the test exit on Windows
278
277
279 $ killdaemons.py
278 $ killdaemons.py
280
279
281 Test sharing a repository which was created with store requirement disable
280 Test sharing a repository which was created with store requirement disable
282
281
283 $ hg init nostore --config format.usestore=false
282 $ hg init nostore --config format.usestore=false
284 ignoring enabled 'format.use-share-safe' config because it is incompatible with disabled 'format.usestore' config (safe !)
283 ignoring enabled 'format.use-share-safe' config because it is incompatible with disabled 'format.usestore' config (safe !)
285 $ hg share nostore sharednostore
284 $ hg share nostore sharednostore
286 abort: cannot create shared repository as source was created with 'format.usestore' config disabled
285 abort: cannot create shared repository as source was created with 'format.usestore' config disabled
287 [255]
286 [255]
@@ -1,733 +1,733 b''
1 #testcases sshv1 sshv2
1 #testcases sshv1 sshv2
2
2
3 #if sshv2
3 #if sshv2
4 $ cat >> $HGRCPATH << EOF
4 $ cat >> $HGRCPATH << EOF
5 > [experimental]
5 > [experimental]
6 > sshpeer.advertise-v2 = true
6 > sshpeer.advertise-v2 = true
7 > sshserver.support-v2 = true
7 > sshserver.support-v2 = true
8 > EOF
8 > EOF
9 #endif
9 #endif
10
10
11 This test tries to exercise the ssh functionality with a dummy script
11 This test tries to exercise the ssh functionality with a dummy script
12
12
13 creating 'remote' repo
13 creating 'remote' repo
14
14
15 $ hg init remote
15 $ hg init remote
16 $ cd remote
16 $ cd remote
17 $ echo this > foo
17 $ echo this > foo
18 $ echo this > fooO
18 $ echo this > fooO
19 $ hg ci -A -m "init" foo fooO
19 $ hg ci -A -m "init" foo fooO
20
20
21 insert a closed branch (issue4428)
21 insert a closed branch (issue4428)
22
22
23 $ hg up null
23 $ hg up null
24 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
24 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
25 $ hg branch closed
25 $ hg branch closed
26 marked working directory as branch closed
26 marked working directory as branch closed
27 (branches are permanent and global, did you want a bookmark?)
27 (branches are permanent and global, did you want a bookmark?)
28 $ hg ci -mc0
28 $ hg ci -mc0
29 $ hg ci --close-branch -mc1
29 $ hg ci --close-branch -mc1
30 $ hg up -q default
30 $ hg up -q default
31
31
32 configure for serving
32 configure for serving
33
33
34 $ cat <<EOF > .hg/hgrc
34 $ cat <<EOF > .hg/hgrc
35 > [server]
35 > [server]
36 > uncompressed = True
36 > uncompressed = True
37 >
37 >
38 > [hooks]
38 > [hooks]
39 > changegroup = sh -c "printenv.py --line changegroup-in-remote 0 ../dummylog"
39 > changegroup = sh -c "printenv.py --line changegroup-in-remote 0 ../dummylog"
40 > EOF
40 > EOF
41 $ cd $TESTTMP
41 $ cd $TESTTMP
42
42
43 repo not found error
43 repo not found error
44
44
45 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/nonexistent local
45 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/nonexistent local
46 remote: abort: repository nonexistent not found
46 remote: abort: repository nonexistent not found
47 abort: no suitable response from remote hg
47 abort: no suitable response from remote hg
48 [255]
48 [255]
49 $ hg clone -q -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/nonexistent local
49 $ hg clone -q -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/nonexistent local
50 remote: abort: repository nonexistent not found
50 remote: abort: repository nonexistent not found
51 abort: no suitable response from remote hg
51 abort: no suitable response from remote hg
52 [255]
52 [255]
53
53
54 non-existent absolute path
54 non-existent absolute path
55
55
56 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/`pwd`/nonexistent local
56 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/`pwd`/nonexistent local
57 remote: abort: repository $TESTTMP/nonexistent not found
57 remote: abort: repository $TESTTMP/nonexistent not found
58 abort: no suitable response from remote hg
58 abort: no suitable response from remote hg
59 [255]
59 [255]
60
60
61 clone remote via stream
61 clone remote via stream
62
62
63 #if no-reposimplestore
63 #if no-reposimplestore
64
64
65 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --stream ssh://user@dummy/remote local-stream
65 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --stream ssh://user@dummy/remote local-stream
66 streaming all changes
66 streaming all changes
67 8 files to transfer, 827 bytes of data (no-zstd !)
67 8 files to transfer, 827 bytes of data (no-zstd !)
68 transferred 827 bytes in * seconds (*) (glob) (no-zstd !)
68 transferred 827 bytes in * seconds (*) (glob) (no-zstd !)
69 8 files to transfer, 846 bytes of data (zstd !)
69 8 files to transfer, 846 bytes of data (zstd !)
70 transferred * bytes in * seconds (* */sec) (glob) (zstd !)
70 transferred * bytes in * seconds (* */sec) (glob) (zstd !)
71 updating to branch default
71 updating to branch default
72 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
72 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
73 $ cd local-stream
73 $ cd local-stream
74 $ hg verify
74 $ hg verify
75 checking changesets
75 checking changesets
76 checking manifests
76 checking manifests
77 crosschecking files in changesets and manifests
77 crosschecking files in changesets and manifests
78 checking files
78 checking files
79 checked 3 changesets with 2 changes to 2 files
79 checked 3 changesets with 2 changes to 2 files
80 $ hg branches
80 $ hg branches
81 default 0:1160648e36ce
81 default 0:1160648e36ce
82 $ cd $TESTTMP
82 $ cd $TESTTMP
83
83
84 clone bookmarks via stream
84 clone bookmarks via stream
85
85
86 $ hg -R local-stream book mybook
86 $ hg -R local-stream book mybook
87 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --stream ssh://user@dummy/local-stream stream2
87 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --stream ssh://user@dummy/local-stream stream2
88 streaming all changes
88 streaming all changes
89 16 files to transfer, * of data (glob)
89 15 files to transfer, * of data (glob)
90 transferred * in * seconds (*) (glob)
90 transferred * in * seconds (*) (glob)
91 updating to branch default
91 updating to branch default
92 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
92 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
93 $ cd stream2
93 $ cd stream2
94 $ hg book
94 $ hg book
95 mybook 0:1160648e36ce
95 mybook 0:1160648e36ce
96 $ cd $TESTTMP
96 $ cd $TESTTMP
97 $ rm -rf local-stream stream2
97 $ rm -rf local-stream stream2
98
98
99 #endif
99 #endif
100
100
101 clone remote via pull
101 clone remote via pull
102
102
103 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote local
103 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote local
104 requesting all changes
104 requesting all changes
105 adding changesets
105 adding changesets
106 adding manifests
106 adding manifests
107 adding file changes
107 adding file changes
108 added 3 changesets with 2 changes to 2 files
108 added 3 changesets with 2 changes to 2 files
109 new changesets 1160648e36ce:ad076bfb429d
109 new changesets 1160648e36ce:ad076bfb429d
110 updating to branch default
110 updating to branch default
111 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
111 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
112
112
113 verify
113 verify
114
114
115 $ cd local
115 $ cd local
116 $ hg verify
116 $ hg verify
117 checking changesets
117 checking changesets
118 checking manifests
118 checking manifests
119 crosschecking files in changesets and manifests
119 crosschecking files in changesets and manifests
120 checking files
120 checking files
121 checked 3 changesets with 2 changes to 2 files
121 checked 3 changesets with 2 changes to 2 files
122 $ cat >> .hg/hgrc <<EOF
122 $ cat >> .hg/hgrc <<EOF
123 > [hooks]
123 > [hooks]
124 > changegroup = sh -c "printenv.py changegroup-in-local 0 ../dummylog"
124 > changegroup = sh -c "printenv.py changegroup-in-local 0 ../dummylog"
125 > EOF
125 > EOF
126
126
127 empty default pull
127 empty default pull
128
128
129 $ hg paths
129 $ hg paths
130 default = ssh://user@dummy/remote
130 default = ssh://user@dummy/remote
131 $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\""
131 $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\""
132 pulling from ssh://user@dummy/remote
132 pulling from ssh://user@dummy/remote
133 searching for changes
133 searching for changes
134 no changes found
134 no changes found
135
135
136 pull from wrong ssh URL
136 pull from wrong ssh URL
137
137
138 $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/doesnotexist
138 $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/doesnotexist
139 pulling from ssh://user@dummy/doesnotexist
139 pulling from ssh://user@dummy/doesnotexist
140 remote: abort: repository doesnotexist not found
140 remote: abort: repository doesnotexist not found
141 abort: no suitable response from remote hg
141 abort: no suitable response from remote hg
142 [255]
142 [255]
143
143
144 local change
144 local change
145
145
146 $ echo bleah > foo
146 $ echo bleah > foo
147 $ hg ci -m "add"
147 $ hg ci -m "add"
148
148
149 updating rc
149 updating rc
150
150
151 $ echo "default-push = ssh://user@dummy/remote" >> .hg/hgrc
151 $ echo "default-push = ssh://user@dummy/remote" >> .hg/hgrc
152 $ echo "[ui]" >> .hg/hgrc
152 $ echo "[ui]" >> .hg/hgrc
153 $ echo "ssh = \"$PYTHON\" \"$TESTDIR/dummyssh\"" >> .hg/hgrc
153 $ echo "ssh = \"$PYTHON\" \"$TESTDIR/dummyssh\"" >> .hg/hgrc
154
154
155 find outgoing
155 find outgoing
156
156
157 $ hg out ssh://user@dummy/remote
157 $ hg out ssh://user@dummy/remote
158 comparing with ssh://user@dummy/remote
158 comparing with ssh://user@dummy/remote
159 searching for changes
159 searching for changes
160 changeset: 3:a28a9d1a809c
160 changeset: 3:a28a9d1a809c
161 tag: tip
161 tag: tip
162 parent: 0:1160648e36ce
162 parent: 0:1160648e36ce
163 user: test
163 user: test
164 date: Thu Jan 01 00:00:00 1970 +0000
164 date: Thu Jan 01 00:00:00 1970 +0000
165 summary: add
165 summary: add
166
166
167
167
168 find incoming on the remote side
168 find incoming on the remote side
169
169
170 $ hg incoming -R ../remote -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/local
170 $ hg incoming -R ../remote -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/local
171 comparing with ssh://user@dummy/local
171 comparing with ssh://user@dummy/local
172 searching for changes
172 searching for changes
173 changeset: 3:a28a9d1a809c
173 changeset: 3:a28a9d1a809c
174 tag: tip
174 tag: tip
175 parent: 0:1160648e36ce
175 parent: 0:1160648e36ce
176 user: test
176 user: test
177 date: Thu Jan 01 00:00:00 1970 +0000
177 date: Thu Jan 01 00:00:00 1970 +0000
178 summary: add
178 summary: add
179
179
180
180
181 find incoming on the remote side (using absolute path)
181 find incoming on the remote side (using absolute path)
182
182
183 $ hg incoming -R ../remote -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/`pwd`"
183 $ hg incoming -R ../remote -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/`pwd`"
184 comparing with ssh://user@dummy/$TESTTMP/local
184 comparing with ssh://user@dummy/$TESTTMP/local
185 searching for changes
185 searching for changes
186 changeset: 3:a28a9d1a809c
186 changeset: 3:a28a9d1a809c
187 tag: tip
187 tag: tip
188 parent: 0:1160648e36ce
188 parent: 0:1160648e36ce
189 user: test
189 user: test
190 date: Thu Jan 01 00:00:00 1970 +0000
190 date: Thu Jan 01 00:00:00 1970 +0000
191 summary: add
191 summary: add
192
192
193
193
194 push
194 push
195
195
196 $ hg push
196 $ hg push
197 pushing to ssh://user@dummy/remote
197 pushing to ssh://user@dummy/remote
198 searching for changes
198 searching for changes
199 remote: adding changesets
199 remote: adding changesets
200 remote: adding manifests
200 remote: adding manifests
201 remote: adding file changes
201 remote: adding file changes
202 remote: added 1 changesets with 1 changes to 1 files
202 remote: added 1 changesets with 1 changes to 1 files
203 $ cd $TESTTMP/remote
203 $ cd $TESTTMP/remote
204
204
205 check remote tip
205 check remote tip
206
206
207 $ hg tip
207 $ hg tip
208 changeset: 3:a28a9d1a809c
208 changeset: 3:a28a9d1a809c
209 tag: tip
209 tag: tip
210 parent: 0:1160648e36ce
210 parent: 0:1160648e36ce
211 user: test
211 user: test
212 date: Thu Jan 01 00:00:00 1970 +0000
212 date: Thu Jan 01 00:00:00 1970 +0000
213 summary: add
213 summary: add
214
214
215 $ hg verify
215 $ hg verify
216 checking changesets
216 checking changesets
217 checking manifests
217 checking manifests
218 crosschecking files in changesets and manifests
218 crosschecking files in changesets and manifests
219 checking files
219 checking files
220 checked 4 changesets with 3 changes to 2 files
220 checked 4 changesets with 3 changes to 2 files
221 $ hg cat -r tip foo
221 $ hg cat -r tip foo
222 bleah
222 bleah
223 $ echo z > z
223 $ echo z > z
224 $ hg ci -A -m z z
224 $ hg ci -A -m z z
225 created new head
225 created new head
226
226
227 test pushkeys and bookmarks
227 test pushkeys and bookmarks
228
228
229 $ cd $TESTTMP/local
229 $ cd $TESTTMP/local
230 $ hg debugpushkey --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote namespaces
230 $ hg debugpushkey --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote namespaces
231 bookmarks
231 bookmarks
232 namespaces
232 namespaces
233 phases
233 phases
234 $ hg book foo -r 0
234 $ hg book foo -r 0
235 $ hg out -B --config paths.default=bogus://invalid --config paths.default:pushurl=`hg paths default`
235 $ hg out -B --config paths.default=bogus://invalid --config paths.default:pushurl=`hg paths default`
236 comparing with ssh://user@dummy/remote
236 comparing with ssh://user@dummy/remote
237 searching for changed bookmarks
237 searching for changed bookmarks
238 foo 1160648e36ce
238 foo 1160648e36ce
239 $ hg push -B foo
239 $ hg push -B foo
240 pushing to ssh://user@dummy/remote
240 pushing to ssh://user@dummy/remote
241 searching for changes
241 searching for changes
242 no changes found
242 no changes found
243 exporting bookmark foo
243 exporting bookmark foo
244 [1]
244 [1]
245 $ hg debugpushkey --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote bookmarks
245 $ hg debugpushkey --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote bookmarks
246 foo 1160648e36cec0054048a7edc4110c6f84fde594
246 foo 1160648e36cec0054048a7edc4110c6f84fde594
247 $ hg book -f foo
247 $ hg book -f foo
248 $ hg push --traceback
248 $ hg push --traceback
249 pushing to ssh://user@dummy/remote
249 pushing to ssh://user@dummy/remote
250 searching for changes
250 searching for changes
251 no changes found
251 no changes found
252 updating bookmark foo
252 updating bookmark foo
253 [1]
253 [1]
254 $ hg book -d foo
254 $ hg book -d foo
255 $ hg in -B
255 $ hg in -B
256 comparing with ssh://user@dummy/remote
256 comparing with ssh://user@dummy/remote
257 searching for changed bookmarks
257 searching for changed bookmarks
258 foo a28a9d1a809c
258 foo a28a9d1a809c
259 $ hg book -f -r 0 foo
259 $ hg book -f -r 0 foo
260 $ hg pull -B foo
260 $ hg pull -B foo
261 pulling from ssh://user@dummy/remote
261 pulling from ssh://user@dummy/remote
262 no changes found
262 no changes found
263 updating bookmark foo
263 updating bookmark foo
264 $ hg book -d foo
264 $ hg book -d foo
265 $ hg push -B foo
265 $ hg push -B foo
266 pushing to ssh://user@dummy/remote
266 pushing to ssh://user@dummy/remote
267 searching for changes
267 searching for changes
268 no changes found
268 no changes found
269 deleting remote bookmark foo
269 deleting remote bookmark foo
270 [1]
270 [1]
271
271
272 a bad, evil hook that prints to stdout
272 a bad, evil hook that prints to stdout
273
273
274 $ cat <<EOF > $TESTTMP/badhook
274 $ cat <<EOF > $TESTTMP/badhook
275 > import sys
275 > import sys
276 > sys.stdout.write("KABOOM\n")
276 > sys.stdout.write("KABOOM\n")
277 > sys.stdout.flush()
277 > sys.stdout.flush()
278 > EOF
278 > EOF
279
279
280 $ cat <<EOF > $TESTTMP/badpyhook.py
280 $ cat <<EOF > $TESTTMP/badpyhook.py
281 > import sys
281 > import sys
282 > def hook(ui, repo, hooktype, **kwargs):
282 > def hook(ui, repo, hooktype, **kwargs):
283 > sys.stdout.write("KABOOM IN PROCESS\n")
283 > sys.stdout.write("KABOOM IN PROCESS\n")
284 > sys.stdout.flush()
284 > sys.stdout.flush()
285 > EOF
285 > EOF
286
286
287 $ cat <<EOF >> ../remote/.hg/hgrc
287 $ cat <<EOF >> ../remote/.hg/hgrc
288 > [hooks]
288 > [hooks]
289 > changegroup.stdout = "$PYTHON" $TESTTMP/badhook
289 > changegroup.stdout = "$PYTHON" $TESTTMP/badhook
290 > changegroup.pystdout = python:$TESTTMP/badpyhook.py:hook
290 > changegroup.pystdout = python:$TESTTMP/badpyhook.py:hook
291 > EOF
291 > EOF
292 $ echo r > r
292 $ echo r > r
293 $ hg ci -A -m z r
293 $ hg ci -A -m z r
294
294
295 push should succeed even though it has an unexpected response
295 push should succeed even though it has an unexpected response
296
296
297 $ hg push
297 $ hg push
298 pushing to ssh://user@dummy/remote
298 pushing to ssh://user@dummy/remote
299 searching for changes
299 searching for changes
300 remote has heads on branch 'default' that are not known locally: 6c0482d977a3
300 remote has heads on branch 'default' that are not known locally: 6c0482d977a3
301 remote: adding changesets
301 remote: adding changesets
302 remote: adding manifests
302 remote: adding manifests
303 remote: adding file changes
303 remote: adding file changes
304 remote: added 1 changesets with 1 changes to 1 files
304 remote: added 1 changesets with 1 changes to 1 files
305 remote: KABOOM
305 remote: KABOOM
306 remote: KABOOM IN PROCESS
306 remote: KABOOM IN PROCESS
307 $ hg -R ../remote heads
307 $ hg -R ../remote heads
308 changeset: 5:1383141674ec
308 changeset: 5:1383141674ec
309 tag: tip
309 tag: tip
310 parent: 3:a28a9d1a809c
310 parent: 3:a28a9d1a809c
311 user: test
311 user: test
312 date: Thu Jan 01 00:00:00 1970 +0000
312 date: Thu Jan 01 00:00:00 1970 +0000
313 summary: z
313 summary: z
314
314
315 changeset: 4:6c0482d977a3
315 changeset: 4:6c0482d977a3
316 parent: 0:1160648e36ce
316 parent: 0:1160648e36ce
317 user: test
317 user: test
318 date: Thu Jan 01 00:00:00 1970 +0000
318 date: Thu Jan 01 00:00:00 1970 +0000
319 summary: z
319 summary: z
320
320
321
321
322 #if chg
322 #if chg
323
323
324 try again with remote chg, which should succeed as well
324 try again with remote chg, which should succeed as well
325
325
326 $ hg rollback -R ../remote
326 $ hg rollback -R ../remote
327 repository tip rolled back to revision 4 (undo serve)
327 repository tip rolled back to revision 4 (undo serve)
328
328
329 $ hg push --config ui.remotecmd=chg
329 $ hg push --config ui.remotecmd=chg
330 pushing to ssh://user@dummy/remote
330 pushing to ssh://user@dummy/remote
331 searching for changes
331 searching for changes
332 remote has heads on branch 'default' that are not known locally: 6c0482d977a3
332 remote has heads on branch 'default' that are not known locally: 6c0482d977a3
333 remote: adding changesets
333 remote: adding changesets
334 remote: adding manifests
334 remote: adding manifests
335 remote: adding file changes
335 remote: adding file changes
336 remote: added 1 changesets with 1 changes to 1 files (py3 !)
336 remote: added 1 changesets with 1 changes to 1 files (py3 !)
337 remote: KABOOM
337 remote: KABOOM
338 remote: KABOOM IN PROCESS
338 remote: KABOOM IN PROCESS
339 remote: added 1 changesets with 1 changes to 1 files (no-py3 !)
339 remote: added 1 changesets with 1 changes to 1 files (no-py3 !)
340
340
341 #endif
341 #endif
342
342
343 clone bookmarks
343 clone bookmarks
344
344
345 $ hg -R ../remote bookmark test
345 $ hg -R ../remote bookmark test
346 $ hg -R ../remote bookmarks
346 $ hg -R ../remote bookmarks
347 * test 4:6c0482d977a3
347 * test 4:6c0482d977a3
348 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote local-bookmarks
348 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote local-bookmarks
349 requesting all changes
349 requesting all changes
350 adding changesets
350 adding changesets
351 adding manifests
351 adding manifests
352 adding file changes
352 adding file changes
353 added 6 changesets with 5 changes to 4 files (+1 heads)
353 added 6 changesets with 5 changes to 4 files (+1 heads)
354 new changesets 1160648e36ce:1383141674ec
354 new changesets 1160648e36ce:1383141674ec
355 updating to branch default
355 updating to branch default
356 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
356 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
357 $ hg -R local-bookmarks bookmarks
357 $ hg -R local-bookmarks bookmarks
358 test 4:6c0482d977a3
358 test 4:6c0482d977a3
359
359
360 passwords in ssh urls are not supported
360 passwords in ssh urls are not supported
361 (we use a glob here because different Python versions give different
361 (we use a glob here because different Python versions give different
362 results here)
362 results here)
363
363
364 $ hg push ssh://user:erroneouspwd@dummy/remote
364 $ hg push ssh://user:erroneouspwd@dummy/remote
365 pushing to ssh://user:*@dummy/remote (glob)
365 pushing to ssh://user:*@dummy/remote (glob)
366 abort: password in URL not supported
366 abort: password in URL not supported
367 [255]
367 [255]
368
368
369 $ cd $TESTTMP
369 $ cd $TESTTMP
370
370
371 hide outer repo
371 hide outer repo
372 $ hg init
372 $ hg init
373
373
374 Test remote paths with spaces (issue2983):
374 Test remote paths with spaces (issue2983):
375
375
376 $ hg init --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo"
376 $ hg init --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo"
377 $ touch "$TESTTMP/a repo/test"
377 $ touch "$TESTTMP/a repo/test"
378 $ hg -R 'a repo' commit -A -m "test"
378 $ hg -R 'a repo' commit -A -m "test"
379 adding test
379 adding test
380 $ hg -R 'a repo' tag tag
380 $ hg -R 'a repo' tag tag
381 $ hg id --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo"
381 $ hg id --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo"
382 73649e48688a
382 73649e48688a
383
383
384 $ hg id --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo#noNoNO"
384 $ hg id --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo#noNoNO"
385 abort: unknown revision 'noNoNO'
385 abort: unknown revision 'noNoNO'
386 [255]
386 [255]
387
387
388 Test (non-)escaping of remote paths with spaces when cloning (issue3145):
388 Test (non-)escaping of remote paths with spaces when cloning (issue3145):
389
389
390 $ hg clone --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo"
390 $ hg clone --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo"
391 destination directory: a repo
391 destination directory: a repo
392 abort: destination 'a repo' is not empty
392 abort: destination 'a repo' is not empty
393 [10]
393 [10]
394
394
395 #if no-rhg
395 #if no-rhg
396 Make sure hg is really paranoid in serve --stdio mode. It used to be
396 Make sure hg is really paranoid in serve --stdio mode. It used to be
397 possible to get a debugger REPL by specifying a repo named --debugger.
397 possible to get a debugger REPL by specifying a repo named --debugger.
398 $ hg -R --debugger serve --stdio
398 $ hg -R --debugger serve --stdio
399 abort: potentially unsafe serve --stdio invocation: ['-R', '--debugger', 'serve', '--stdio']
399 abort: potentially unsafe serve --stdio invocation: ['-R', '--debugger', 'serve', '--stdio']
400 [255]
400 [255]
401 $ hg -R --config=ui.debugger=yes serve --stdio
401 $ hg -R --config=ui.debugger=yes serve --stdio
402 abort: potentially unsafe serve --stdio invocation: ['-R', '--config=ui.debugger=yes', 'serve', '--stdio']
402 abort: potentially unsafe serve --stdio invocation: ['-R', '--config=ui.debugger=yes', 'serve', '--stdio']
403 [255]
403 [255]
404 Abbreviations of 'serve' also don't work, to avoid shenanigans.
404 Abbreviations of 'serve' also don't work, to avoid shenanigans.
405 $ hg -R narf serv --stdio
405 $ hg -R narf serv --stdio
406 abort: potentially unsafe serve --stdio invocation: ['-R', 'narf', 'serv', '--stdio']
406 abort: potentially unsafe serve --stdio invocation: ['-R', 'narf', 'serv', '--stdio']
407 [255]
407 [255]
408 #else
408 #else
409 rhg aborts early on -R without a repository at that path
409 rhg aborts early on -R without a repository at that path
410 $ hg -R --debugger serve --stdio
410 $ hg -R --debugger serve --stdio
411 abort: potentially unsafe serve --stdio invocation: ['-R', '--debugger', 'serve', '--stdio'] (missing-correct-output !)
411 abort: potentially unsafe serve --stdio invocation: ['-R', '--debugger', 'serve', '--stdio'] (missing-correct-output !)
412 abort: repository --debugger not found (known-bad-output !)
412 abort: repository --debugger not found (known-bad-output !)
413 [255]
413 [255]
414 $ hg -R --config=ui.debugger=yes serve --stdio
414 $ hg -R --config=ui.debugger=yes serve --stdio
415 abort: potentially unsafe serve --stdio invocation: ['-R', '--config=ui.debugger=yes', 'serve', '--stdio'] (missing-correct-output !)
415 abort: potentially unsafe serve --stdio invocation: ['-R', '--config=ui.debugger=yes', 'serve', '--stdio'] (missing-correct-output !)
416 abort: repository --config=ui.debugger=yes not found (known-bad-output !)
416 abort: repository --config=ui.debugger=yes not found (known-bad-output !)
417 [255]
417 [255]
418 $ hg -R narf serv --stdio
418 $ hg -R narf serv --stdio
419 abort: potentially unsafe serve --stdio invocation: ['-R', 'narf', 'serv', '--stdio'] (missing-correct-output !)
419 abort: potentially unsafe serve --stdio invocation: ['-R', 'narf', 'serv', '--stdio'] (missing-correct-output !)
420 abort: repository narf not found (known-bad-output !)
420 abort: repository narf not found (known-bad-output !)
421 [255]
421 [255]
422 If the repo does exist, rhg finds an unsupported command and falls back to Python
422 If the repo does exist, rhg finds an unsupported command and falls back to Python
423 which still does the right thing
423 which still does the right thing
424 $ hg init narf
424 $ hg init narf
425 $ hg -R narf serv --stdio
425 $ hg -R narf serv --stdio
426 abort: potentially unsafe serve --stdio invocation: ['-R', 'narf', 'serv', '--stdio']
426 abort: potentially unsafe serve --stdio invocation: ['-R', 'narf', 'serv', '--stdio']
427 [255]
427 [255]
428 #endif
428 #endif
429
429
430 Test hg-ssh using a helper script that will restore PYTHONPATH (which might
430 Test hg-ssh using a helper script that will restore PYTHONPATH (which might
431 have been cleared by a hg.exe wrapper) and invoke hg-ssh with the right
431 have been cleared by a hg.exe wrapper) and invoke hg-ssh with the right
432 parameters:
432 parameters:
433
433
434 $ cat > ssh.sh << EOF
434 $ cat > ssh.sh << EOF
435 > userhost="\$1"
435 > userhost="\$1"
436 > SSH_ORIGINAL_COMMAND="\$2"
436 > SSH_ORIGINAL_COMMAND="\$2"
437 > export SSH_ORIGINAL_COMMAND
437 > export SSH_ORIGINAL_COMMAND
438 > PYTHONPATH="$PYTHONPATH"
438 > PYTHONPATH="$PYTHONPATH"
439 > export PYTHONPATH
439 > export PYTHONPATH
440 > "$PYTHON" "$TESTDIR/../contrib/hg-ssh" "$TESTTMP/a repo"
440 > "$PYTHON" "$TESTDIR/../contrib/hg-ssh" "$TESTTMP/a repo"
441 > EOF
441 > EOF
442
442
443 $ hg id --ssh "sh ssh.sh" "ssh://user@dummy/a repo"
443 $ hg id --ssh "sh ssh.sh" "ssh://user@dummy/a repo"
444 73649e48688a
444 73649e48688a
445
445
446 $ hg id --ssh "sh ssh.sh" "ssh://user@dummy/a'repo"
446 $ hg id --ssh "sh ssh.sh" "ssh://user@dummy/a'repo"
447 remote: Illegal repository "$TESTTMP/a'repo"
447 remote: Illegal repository "$TESTTMP/a'repo"
448 abort: no suitable response from remote hg
448 abort: no suitable response from remote hg
449 [255]
449 [255]
450
450
451 $ hg id --ssh "sh ssh.sh" --remotecmd hacking "ssh://user@dummy/a'repo"
451 $ hg id --ssh "sh ssh.sh" --remotecmd hacking "ssh://user@dummy/a'repo"
452 remote: Illegal command "hacking -R 'a'\''repo' serve --stdio"
452 remote: Illegal command "hacking -R 'a'\''repo' serve --stdio"
453 abort: no suitable response from remote hg
453 abort: no suitable response from remote hg
454 [255]
454 [255]
455
455
456 $ SSH_ORIGINAL_COMMAND="'hg' -R 'a'repo' serve --stdio" "$PYTHON" "$TESTDIR/../contrib/hg-ssh"
456 $ SSH_ORIGINAL_COMMAND="'hg' -R 'a'repo' serve --stdio" "$PYTHON" "$TESTDIR/../contrib/hg-ssh"
457 Illegal command "'hg' -R 'a'repo' serve --stdio": No closing quotation
457 Illegal command "'hg' -R 'a'repo' serve --stdio": No closing quotation
458 [255]
458 [255]
459
459
460 Test hg-ssh in read-only mode:
460 Test hg-ssh in read-only mode:
461
461
462 $ cat > ssh.sh << EOF
462 $ cat > ssh.sh << EOF
463 > userhost="\$1"
463 > userhost="\$1"
464 > SSH_ORIGINAL_COMMAND="\$2"
464 > SSH_ORIGINAL_COMMAND="\$2"
465 > export SSH_ORIGINAL_COMMAND
465 > export SSH_ORIGINAL_COMMAND
466 > PYTHONPATH="$PYTHONPATH"
466 > PYTHONPATH="$PYTHONPATH"
467 > export PYTHONPATH
467 > export PYTHONPATH
468 > "$PYTHON" "$TESTDIR/../contrib/hg-ssh" --read-only "$TESTTMP/remote"
468 > "$PYTHON" "$TESTDIR/../contrib/hg-ssh" --read-only "$TESTTMP/remote"
469 > EOF
469 > EOF
470
470
471 $ hg clone --ssh "sh ssh.sh" "ssh://user@dummy/$TESTTMP/remote" read-only-local
471 $ hg clone --ssh "sh ssh.sh" "ssh://user@dummy/$TESTTMP/remote" read-only-local
472 requesting all changes
472 requesting all changes
473 adding changesets
473 adding changesets
474 adding manifests
474 adding manifests
475 adding file changes
475 adding file changes
476 added 6 changesets with 5 changes to 4 files (+1 heads)
476 added 6 changesets with 5 changes to 4 files (+1 heads)
477 new changesets 1160648e36ce:1383141674ec
477 new changesets 1160648e36ce:1383141674ec
478 updating to branch default
478 updating to branch default
479 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
479 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
480
480
481 $ cd read-only-local
481 $ cd read-only-local
482 $ echo "baz" > bar
482 $ echo "baz" > bar
483 $ hg ci -A -m "unpushable commit" bar
483 $ hg ci -A -m "unpushable commit" bar
484 $ hg push --ssh "sh ../ssh.sh"
484 $ hg push --ssh "sh ../ssh.sh"
485 pushing to ssh://user@dummy/*/remote (glob)
485 pushing to ssh://user@dummy/*/remote (glob)
486 searching for changes
486 searching for changes
487 remote: Permission denied
487 remote: Permission denied
488 remote: pretxnopen.hg-ssh hook failed
488 remote: pretxnopen.hg-ssh hook failed
489 abort: push failed on remote
489 abort: push failed on remote
490 [100]
490 [100]
491
491
492 $ cd $TESTTMP
492 $ cd $TESTTMP
493
493
494 stderr from remote commands should be printed before stdout from local code (issue4336)
494 stderr from remote commands should be printed before stdout from local code (issue4336)
495
495
496 $ hg clone remote stderr-ordering
496 $ hg clone remote stderr-ordering
497 updating to branch default
497 updating to branch default
498 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
498 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
499 $ cd stderr-ordering
499 $ cd stderr-ordering
500 $ cat >> localwrite.py << EOF
500 $ cat >> localwrite.py << EOF
501 > from mercurial import exchange, extensions
501 > from mercurial import exchange, extensions
502 >
502 >
503 > def wrappedpush(orig, repo, *args, **kwargs):
503 > def wrappedpush(orig, repo, *args, **kwargs):
504 > res = orig(repo, *args, **kwargs)
504 > res = orig(repo, *args, **kwargs)
505 > repo.ui.write(b'local stdout\n')
505 > repo.ui.write(b'local stdout\n')
506 > repo.ui.flush()
506 > repo.ui.flush()
507 > return res
507 > return res
508 >
508 >
509 > def extsetup(ui):
509 > def extsetup(ui):
510 > extensions.wrapfunction(exchange, b'push', wrappedpush)
510 > extensions.wrapfunction(exchange, b'push', wrappedpush)
511 > EOF
511 > EOF
512
512
513 $ cat >> .hg/hgrc << EOF
513 $ cat >> .hg/hgrc << EOF
514 > [paths]
514 > [paths]
515 > default-push = ssh://user@dummy/remote
515 > default-push = ssh://user@dummy/remote
516 > [ui]
516 > [ui]
517 > ssh = "$PYTHON" "$TESTDIR/dummyssh"
517 > ssh = "$PYTHON" "$TESTDIR/dummyssh"
518 > [extensions]
518 > [extensions]
519 > localwrite = localwrite.py
519 > localwrite = localwrite.py
520 > EOF
520 > EOF
521
521
522 $ echo localwrite > foo
522 $ echo localwrite > foo
523 $ hg commit -m 'testing localwrite'
523 $ hg commit -m 'testing localwrite'
524 $ hg push
524 $ hg push
525 pushing to ssh://user@dummy/remote
525 pushing to ssh://user@dummy/remote
526 searching for changes
526 searching for changes
527 remote: adding changesets
527 remote: adding changesets
528 remote: adding manifests
528 remote: adding manifests
529 remote: adding file changes
529 remote: adding file changes
530 remote: added 1 changesets with 1 changes to 1 files
530 remote: added 1 changesets with 1 changes to 1 files
531 remote: KABOOM
531 remote: KABOOM
532 remote: KABOOM IN PROCESS
532 remote: KABOOM IN PROCESS
533 local stdout
533 local stdout
534
534
535 debug output
535 debug output
536
536
537 $ hg pull --debug ssh://user@dummy/remote --config devel.debug.peer-request=yes
537 $ hg pull --debug ssh://user@dummy/remote --config devel.debug.peer-request=yes
538 pulling from ssh://user@dummy/remote
538 pulling from ssh://user@dummy/remote
539 running .* ".*/dummyssh" ['"]user@dummy['"] ('|")hg -R remote serve --stdio('|") (re)
539 running .* ".*/dummyssh" ['"]user@dummy['"] ('|")hg -R remote serve --stdio('|") (re)
540 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
540 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
541 devel-peer-request: hello+between
541 devel-peer-request: hello+between
542 devel-peer-request: pairs: 81 bytes
542 devel-peer-request: pairs: 81 bytes
543 sending hello command
543 sending hello command
544 sending between command
544 sending between command
545 remote: 444 (sshv1 no-rust !)
545 remote: 444 (sshv1 no-rust !)
546 remote: 463 (sshv1 rust !)
546 remote: 463 (sshv1 rust !)
547 protocol upgraded to exp-ssh-v2-0003 (sshv2 !)
547 protocol upgraded to exp-ssh-v2-0003 (sshv2 !)
548 remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-rust !)
548 remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-rust !)
549 remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,persistent-nodemap,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (rust !)
549 remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,persistent-nodemap,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (rust !)
550 remote: 1 (sshv1 !)
550 remote: 1 (sshv1 !)
551 devel-peer-request: protocaps
551 devel-peer-request: protocaps
552 devel-peer-request: caps: * bytes (glob)
552 devel-peer-request: caps: * bytes (glob)
553 sending protocaps command
553 sending protocaps command
554 query 1; heads
554 query 1; heads
555 devel-peer-request: batched-content
555 devel-peer-request: batched-content
556 devel-peer-request: - heads (0 arguments)
556 devel-peer-request: - heads (0 arguments)
557 devel-peer-request: - known (1 arguments)
557 devel-peer-request: - known (1 arguments)
558 devel-peer-request: batch
558 devel-peer-request: batch
559 devel-peer-request: cmds: 141 bytes
559 devel-peer-request: cmds: 141 bytes
560 sending batch command
560 sending batch command
561 searching for changes
561 searching for changes
562 all remote heads known locally
562 all remote heads known locally
563 no changes found
563 no changes found
564 devel-peer-request: getbundle
564 devel-peer-request: getbundle
565 devel-peer-request: bookmarks: 1 bytes
565 devel-peer-request: bookmarks: 1 bytes
566 devel-peer-request: bundlecaps: 270 bytes
566 devel-peer-request: bundlecaps: 270 bytes
567 devel-peer-request: cg: 1 bytes
567 devel-peer-request: cg: 1 bytes
568 devel-peer-request: common: 122 bytes
568 devel-peer-request: common: 122 bytes
569 devel-peer-request: heads: 122 bytes
569 devel-peer-request: heads: 122 bytes
570 devel-peer-request: listkeys: 9 bytes
570 devel-peer-request: listkeys: 9 bytes
571 devel-peer-request: phases: 1 bytes
571 devel-peer-request: phases: 1 bytes
572 sending getbundle command
572 sending getbundle command
573 bundle2-input-bundle: with-transaction
573 bundle2-input-bundle: with-transaction
574 bundle2-input-part: "bookmarks" supported
574 bundle2-input-part: "bookmarks" supported
575 bundle2-input-part: total payload size 26
575 bundle2-input-part: total payload size 26
576 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
576 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
577 bundle2-input-part: total payload size 45
577 bundle2-input-part: total payload size 45
578 bundle2-input-part: "phase-heads" supported
578 bundle2-input-part: "phase-heads" supported
579 bundle2-input-part: total payload size 72
579 bundle2-input-part: total payload size 72
580 bundle2-input-bundle: 3 parts total
580 bundle2-input-bundle: 3 parts total
581 checking for updated bookmarks
581 checking for updated bookmarks
582
582
583 $ cd $TESTTMP
583 $ cd $TESTTMP
584
584
585 $ cat dummylog
585 $ cat dummylog
586 Got arguments 1:user@dummy 2:hg -R nonexistent serve --stdio
586 Got arguments 1:user@dummy 2:hg -R nonexistent serve --stdio
587 Got arguments 1:user@dummy 2:hg -R nonexistent serve --stdio
587 Got arguments 1:user@dummy 2:hg -R nonexistent serve --stdio
588 Got arguments 1:user@dummy 2:hg -R $TESTTMP/nonexistent serve --stdio
588 Got arguments 1:user@dummy 2:hg -R $TESTTMP/nonexistent serve --stdio
589 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
589 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
590 Got arguments 1:user@dummy 2:hg -R local-stream serve --stdio (no-reposimplestore !)
590 Got arguments 1:user@dummy 2:hg -R local-stream serve --stdio (no-reposimplestore !)
591 Got arguments 1:user@dummy 2:hg -R remote serve --stdio (no-reposimplestore !)
591 Got arguments 1:user@dummy 2:hg -R remote serve --stdio (no-reposimplestore !)
592 Got arguments 1:user@dummy 2:hg -R remote serve --stdio (no-reposimplestore !)
592 Got arguments 1:user@dummy 2:hg -R remote serve --stdio (no-reposimplestore !)
593 Got arguments 1:user@dummy 2:hg -R doesnotexist serve --stdio
593 Got arguments 1:user@dummy 2:hg -R doesnotexist serve --stdio
594 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
594 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
595 Got arguments 1:user@dummy 2:hg -R local serve --stdio
595 Got arguments 1:user@dummy 2:hg -R local serve --stdio
596 Got arguments 1:user@dummy 2:hg -R $TESTTMP/local serve --stdio
596 Got arguments 1:user@dummy 2:hg -R $TESTTMP/local serve --stdio
597 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
597 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
598 changegroup-in-remote hook: HG_BUNDLE2=1
598 changegroup-in-remote hook: HG_BUNDLE2=1
599 HG_HOOKNAME=changegroup
599 HG_HOOKNAME=changegroup
600 HG_HOOKTYPE=changegroup
600 HG_HOOKTYPE=changegroup
601 HG_NODE=a28a9d1a809cab7d4e2fde4bee738a9ede948b60
601 HG_NODE=a28a9d1a809cab7d4e2fde4bee738a9ede948b60
602 HG_NODE_LAST=a28a9d1a809cab7d4e2fde4bee738a9ede948b60
602 HG_NODE_LAST=a28a9d1a809cab7d4e2fde4bee738a9ede948b60
603 HG_SOURCE=serve
603 HG_SOURCE=serve
604 HG_TXNID=TXN:$ID$
604 HG_TXNID=TXN:$ID$
605 HG_TXNNAME=serve
605 HG_TXNNAME=serve
606 HG_URL=remote:ssh:$LOCALIP
606 HG_URL=remote:ssh:$LOCALIP
607
607
608 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
608 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
609 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
609 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
610 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
610 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
611 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
611 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
612 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
612 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
613 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
613 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
614 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
614 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
615 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
615 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
616 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
616 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
617 changegroup-in-remote hook: HG_BUNDLE2=1
617 changegroup-in-remote hook: HG_BUNDLE2=1
618 HG_HOOKNAME=changegroup
618 HG_HOOKNAME=changegroup
619 HG_HOOKTYPE=changegroup
619 HG_HOOKTYPE=changegroup
620 HG_NODE=1383141674ec756a6056f6a9097618482fe0f4a6
620 HG_NODE=1383141674ec756a6056f6a9097618482fe0f4a6
621 HG_NODE_LAST=1383141674ec756a6056f6a9097618482fe0f4a6
621 HG_NODE_LAST=1383141674ec756a6056f6a9097618482fe0f4a6
622 HG_SOURCE=serve
622 HG_SOURCE=serve
623 HG_TXNID=TXN:$ID$
623 HG_TXNID=TXN:$ID$
624 HG_TXNNAME=serve
624 HG_TXNNAME=serve
625 HG_URL=remote:ssh:$LOCALIP
625 HG_URL=remote:ssh:$LOCALIP
626
626
627 Got arguments 1:user@dummy 2:chg -R remote serve --stdio (chg !)
627 Got arguments 1:user@dummy 2:chg -R remote serve --stdio (chg !)
628 changegroup-in-remote hook: HG_BUNDLE2=1 (chg !)
628 changegroup-in-remote hook: HG_BUNDLE2=1 (chg !)
629 HG_HOOKNAME=changegroup (chg !)
629 HG_HOOKNAME=changegroup (chg !)
630 HG_HOOKTYPE=changegroup (chg !)
630 HG_HOOKTYPE=changegroup (chg !)
631 HG_NODE=1383141674ec756a6056f6a9097618482fe0f4a6 (chg !)
631 HG_NODE=1383141674ec756a6056f6a9097618482fe0f4a6 (chg !)
632 HG_NODE_LAST=1383141674ec756a6056f6a9097618482fe0f4a6 (chg !)
632 HG_NODE_LAST=1383141674ec756a6056f6a9097618482fe0f4a6 (chg !)
633 HG_SOURCE=serve (chg !)
633 HG_SOURCE=serve (chg !)
634 HG_TXNID=TXN:$ID$ (chg !)
634 HG_TXNID=TXN:$ID$ (chg !)
635 HG_TXNNAME=serve (chg !)
635 HG_TXNNAME=serve (chg !)
636 HG_URL=remote:ssh:$LOCALIP (chg !)
636 HG_URL=remote:ssh:$LOCALIP (chg !)
637 (chg !)
637 (chg !)
638 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
638 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
639 Got arguments 1:user@dummy 2:hg init 'a repo'
639 Got arguments 1:user@dummy 2:hg init 'a repo'
640 Got arguments 1:user@dummy 2:hg -R 'a repo' serve --stdio
640 Got arguments 1:user@dummy 2:hg -R 'a repo' serve --stdio
641 Got arguments 1:user@dummy 2:hg -R 'a repo' serve --stdio
641 Got arguments 1:user@dummy 2:hg -R 'a repo' serve --stdio
642 Got arguments 1:user@dummy 2:hg -R 'a repo' serve --stdio
642 Got arguments 1:user@dummy 2:hg -R 'a repo' serve --stdio
643 Got arguments 1:user@dummy 2:hg -R 'a repo' serve --stdio
643 Got arguments 1:user@dummy 2:hg -R 'a repo' serve --stdio
644 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
644 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
645 changegroup-in-remote hook: HG_BUNDLE2=1
645 changegroup-in-remote hook: HG_BUNDLE2=1
646 HG_HOOKNAME=changegroup
646 HG_HOOKNAME=changegroup
647 HG_HOOKTYPE=changegroup
647 HG_HOOKTYPE=changegroup
648 HG_NODE=65c38f4125f9602c8db4af56530cc221d93b8ef8
648 HG_NODE=65c38f4125f9602c8db4af56530cc221d93b8ef8
649 HG_NODE_LAST=65c38f4125f9602c8db4af56530cc221d93b8ef8
649 HG_NODE_LAST=65c38f4125f9602c8db4af56530cc221d93b8ef8
650 HG_SOURCE=serve
650 HG_SOURCE=serve
651 HG_TXNID=TXN:$ID$
651 HG_TXNID=TXN:$ID$
652 HG_TXNNAME=serve
652 HG_TXNNAME=serve
653 HG_URL=remote:ssh:$LOCALIP
653 HG_URL=remote:ssh:$LOCALIP
654
654
655 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
655 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
656
656
657
657
658 remote hook failure is attributed to remote
658 remote hook failure is attributed to remote
659
659
660 $ cat > $TESTTMP/failhook << EOF
660 $ cat > $TESTTMP/failhook << EOF
661 > def hook(ui, repo, **kwargs):
661 > def hook(ui, repo, **kwargs):
662 > ui.write(b'hook failure!\n')
662 > ui.write(b'hook failure!\n')
663 > ui.flush()
663 > ui.flush()
664 > return 1
664 > return 1
665 > EOF
665 > EOF
666
666
667 $ echo "pretxnchangegroup.fail = python:$TESTTMP/failhook:hook" >> remote/.hg/hgrc
667 $ echo "pretxnchangegroup.fail = python:$TESTTMP/failhook:hook" >> remote/.hg/hgrc
668
668
669 $ hg -q --config ui.ssh="\"$PYTHON\" $TESTDIR/dummyssh" clone ssh://user@dummy/remote hookout
669 $ hg -q --config ui.ssh="\"$PYTHON\" $TESTDIR/dummyssh" clone ssh://user@dummy/remote hookout
670 $ cd hookout
670 $ cd hookout
671 $ touch hookfailure
671 $ touch hookfailure
672 $ hg -q commit -A -m 'remote hook failure'
672 $ hg -q commit -A -m 'remote hook failure'
673 $ hg --config ui.ssh="\"$PYTHON\" $TESTDIR/dummyssh" push
673 $ hg --config ui.ssh="\"$PYTHON\" $TESTDIR/dummyssh" push
674 pushing to ssh://user@dummy/remote
674 pushing to ssh://user@dummy/remote
675 searching for changes
675 searching for changes
676 remote: adding changesets
676 remote: adding changesets
677 remote: adding manifests
677 remote: adding manifests
678 remote: adding file changes
678 remote: adding file changes
679 remote: hook failure!
679 remote: hook failure!
680 remote: transaction abort!
680 remote: transaction abort!
681 remote: rollback completed
681 remote: rollback completed
682 remote: pretxnchangegroup.fail hook failed
682 remote: pretxnchangegroup.fail hook failed
683 abort: push failed on remote
683 abort: push failed on remote
684 [100]
684 [100]
685
685
686 abort during pull is properly reported as such
686 abort during pull is properly reported as such
687
687
688 $ echo morefoo >> ../remote/foo
688 $ echo morefoo >> ../remote/foo
689 $ hg -R ../remote commit --message "more foo to be pulled"
689 $ hg -R ../remote commit --message "more foo to be pulled"
690 $ cat >> ../remote/.hg/hgrc << EOF
690 $ cat >> ../remote/.hg/hgrc << EOF
691 > [extensions]
691 > [extensions]
692 > crash = ${TESTDIR}/crashgetbundler.py
692 > crash = ${TESTDIR}/crashgetbundler.py
693 > EOF
693 > EOF
694 $ hg --config ui.ssh="\"$PYTHON\" $TESTDIR/dummyssh" pull
694 $ hg --config ui.ssh="\"$PYTHON\" $TESTDIR/dummyssh" pull
695 pulling from ssh://user@dummy/remote
695 pulling from ssh://user@dummy/remote
696 searching for changes
696 searching for changes
697 remote: abort: this is an exercise
697 remote: abort: this is an exercise
698 abort: pull failed on remote
698 abort: pull failed on remote
699 [100]
699 [100]
700
700
701 abort with no error hint when there is a ssh problem when pulling
701 abort with no error hint when there is a ssh problem when pulling
702
702
703 $ hg pull ssh://brokenrepository -e "\"$PYTHON\" \"$TESTDIR/dummyssh\""
703 $ hg pull ssh://brokenrepository -e "\"$PYTHON\" \"$TESTDIR/dummyssh\""
704 pulling from ssh://brokenrepository/
704 pulling from ssh://brokenrepository/
705 abort: no suitable response from remote hg
705 abort: no suitable response from remote hg
706 [255]
706 [255]
707
707
708 abort with configured error hint when there is a ssh problem when pulling
708 abort with configured error hint when there is a ssh problem when pulling
709
709
710 $ hg pull ssh://brokenrepository -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" \
710 $ hg pull ssh://brokenrepository -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" \
711 > --config ui.ssherrorhint="Please see http://company/internalwiki/ssh.html"
711 > --config ui.ssherrorhint="Please see http://company/internalwiki/ssh.html"
712 pulling from ssh://brokenrepository/
712 pulling from ssh://brokenrepository/
713 abort: no suitable response from remote hg
713 abort: no suitable response from remote hg
714 (Please see http://company/internalwiki/ssh.html)
714 (Please see http://company/internalwiki/ssh.html)
715 [255]
715 [255]
716
716
717 test that custom environment is passed down to ssh executable
717 test that custom environment is passed down to ssh executable
718 $ cat >>dumpenv <<EOF
718 $ cat >>dumpenv <<EOF
719 > #! /bin/sh
719 > #! /bin/sh
720 > echo \$VAR >&2
720 > echo \$VAR >&2
721 > EOF
721 > EOF
722 $ chmod +x dumpenv
722 $ chmod +x dumpenv
723 $ hg pull ssh://something --config ui.ssh="sh dumpenv"
723 $ hg pull ssh://something --config ui.ssh="sh dumpenv"
724 pulling from ssh://something/
724 pulling from ssh://something/
725 remote:
725 remote:
726 abort: no suitable response from remote hg
726 abort: no suitable response from remote hg
727 [255]
727 [255]
728 $ hg pull ssh://something --config ui.ssh="sh dumpenv" --config sshenv.VAR=17
728 $ hg pull ssh://something --config ui.ssh="sh dumpenv" --config sshenv.VAR=17
729 pulling from ssh://something/
729 pulling from ssh://something/
730 remote: 17
730 remote: 17
731 abort: no suitable response from remote hg
731 abort: no suitable response from remote hg
732 [255]
732 [255]
733
733
@@ -1,935 +1,935 b''
1 setup
1 setup
2
2
3 $ cat >> $HGRCPATH << EOF
3 $ cat >> $HGRCPATH << EOF
4 > [extensions]
4 > [extensions]
5 > blackbox=
5 > blackbox=
6 > mock=$TESTDIR/mockblackbox.py
6 > mock=$TESTDIR/mockblackbox.py
7 > [blackbox]
7 > [blackbox]
8 > track = command, commandfinish, tagscache
8 > track = command, commandfinish, tagscache
9 > EOF
9 > EOF
10
10
11 Helper functions:
11 Helper functions:
12
12
13 $ cacheexists() {
13 $ cacheexists() {
14 > [ -f .hg/cache/tags2-visible ] && echo "tag cache exists" || echo "no tag cache"
14 > [ -f .hg/cache/tags2-visible ] && echo "tag cache exists" || echo "no tag cache"
15 > }
15 > }
16
16
17 $ fnodescacheexists() {
17 $ fnodescacheexists() {
18 > [ -f .hg/cache/hgtagsfnodes1 ] && echo "fnodes cache exists" || echo "no fnodes cache"
18 > [ -f .hg/cache/hgtagsfnodes1 ] && echo "fnodes cache exists" || echo "no fnodes cache"
19 > }
19 > }
20
20
21 $ dumptags() {
21 $ dumptags() {
22 > rev=$1
22 > rev=$1
23 > echo "rev $rev: .hgtags:"
23 > echo "rev $rev: .hgtags:"
24 > hg cat -r$rev .hgtags
24 > hg cat -r$rev .hgtags
25 > }
25 > }
26
26
27 # XXX need to test that the tag cache works when we strip an old head
27 # XXX need to test that the tag cache works when we strip an old head
28 # and add a new one rooted off non-tip: i.e. node and rev of tip are the
28 # and add a new one rooted off non-tip: i.e. node and rev of tip are the
29 # same, but stuff has changed behind tip.
29 # same, but stuff has changed behind tip.
30
30
31 Setup:
31 Setup:
32
32
33 $ hg init t
33 $ hg init t
34 $ cd t
34 $ cd t
35 $ cacheexists
35 $ cacheexists
36 no tag cache
36 no tag cache
37 $ fnodescacheexists
37 $ fnodescacheexists
38 no fnodes cache
38 no fnodes cache
39 $ hg id
39 $ hg id
40 000000000000 tip
40 000000000000 tip
41 $ cacheexists
41 $ cacheexists
42 no tag cache
42 no tag cache
43 $ fnodescacheexists
43 $ fnodescacheexists
44 no fnodes cache
44 no fnodes cache
45 $ echo a > a
45 $ echo a > a
46 $ hg add a
46 $ hg add a
47 $ hg commit -m "test"
47 $ hg commit -m "test"
48 $ hg co
48 $ hg co
49 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
49 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
50 $ hg identify
50 $ hg identify
51 acb14030fe0a tip
51 acb14030fe0a tip
52 $ hg identify -r 'wdir()'
52 $ hg identify -r 'wdir()'
53 acb14030fe0a tip
53 acb14030fe0a tip
54 $ cacheexists
54 $ cacheexists
55 tag cache exists
55 tag cache exists
56 No fnodes cache because .hgtags file doesn't exist
56 No fnodes cache because .hgtags file doesn't exist
57 (this is an implementation detail)
57 (this is an implementation detail)
58 $ fnodescacheexists
58 $ fnodescacheexists
59 no fnodes cache
59 no fnodes cache
60
60
61 Try corrupting the cache
61 Try corrupting the cache
62
62
63 $ printf 'a b' > .hg/cache/tags2-visible
63 $ printf 'a b' > .hg/cache/tags2-visible
64 $ hg identify
64 $ hg identify
65 acb14030fe0a tip
65 acb14030fe0a tip
66 $ cacheexists
66 $ cacheexists
67 tag cache exists
67 tag cache exists
68 $ fnodescacheexists
68 $ fnodescacheexists
69 no fnodes cache
69 no fnodes cache
70 $ hg identify
70 $ hg identify
71 acb14030fe0a tip
71 acb14030fe0a tip
72
72
73 Create local tag with long name:
73 Create local tag with long name:
74
74
75 $ T=`hg identify --debug --id`
75 $ T=`hg identify --debug --id`
76 $ hg tag -l "This is a local tag with a really long name!"
76 $ hg tag -l "This is a local tag with a really long name!"
77 $ hg tags
77 $ hg tags
78 tip 0:acb14030fe0a
78 tip 0:acb14030fe0a
79 This is a local tag with a really long name! 0:acb14030fe0a
79 This is a local tag with a really long name! 0:acb14030fe0a
80 $ rm .hg/localtags
80 $ rm .hg/localtags
81
81
82 Create a tag behind hg's back:
82 Create a tag behind hg's back:
83
83
84 $ echo "$T first" > .hgtags
84 $ echo "$T first" > .hgtags
85 $ cat .hgtags
85 $ cat .hgtags
86 acb14030fe0a21b60322c440ad2d20cf7685a376 first
86 acb14030fe0a21b60322c440ad2d20cf7685a376 first
87 $ hg add .hgtags
87 $ hg add .hgtags
88 $ hg commit -m "add tags"
88 $ hg commit -m "add tags"
89 $ hg tags
89 $ hg tags
90 tip 1:b9154636be93
90 tip 1:b9154636be93
91 first 0:acb14030fe0a
91 first 0:acb14030fe0a
92 $ hg identify
92 $ hg identify
93 b9154636be93 tip
93 b9154636be93 tip
94
94
95 We should have a fnodes cache now that we have a real tag
95 We should have a fnodes cache now that we have a real tag
96 The cache should have an empty entry for rev 0 and a valid entry for rev 1.
96 The cache should have an empty entry for rev 0 and a valid entry for rev 1.
97
97
98
98
99 $ fnodescacheexists
99 $ fnodescacheexists
100 fnodes cache exists
100 fnodes cache exists
101 $ f --size --hexdump .hg/cache/hgtagsfnodes1
101 $ f --size --hexdump .hg/cache/hgtagsfnodes1
102 .hg/cache/hgtagsfnodes1: size=48
102 .hg/cache/hgtagsfnodes1: size=48
103 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
103 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
104 0010: ff ff ff ff ff ff ff ff b9 15 46 36 26 b7 b4 a7 |..........F6&...|
104 0010: ff ff ff ff ff ff ff ff b9 15 46 36 26 b7 b4 a7 |..........F6&...|
105 0020: 73 e0 9e e3 c5 2f 51 0e 19 e0 5e 1f f9 66 d8 59 |s..../Q...^..f.Y|
105 0020: 73 e0 9e e3 c5 2f 51 0e 19 e0 5e 1f f9 66 d8 59 |s..../Q...^..f.Y|
106 $ hg debugtagscache
106 $ hg debugtagscache
107 0 acb14030fe0a21b60322c440ad2d20cf7685a376 missing
107 0 acb14030fe0a21b60322c440ad2d20cf7685a376 missing
108 1 b9154636be938d3d431e75a7c906504a079bfe07 26b7b4a773e09ee3c52f510e19e05e1ff966d859
108 1 b9154636be938d3d431e75a7c906504a079bfe07 26b7b4a773e09ee3c52f510e19e05e1ff966d859
109
109
110 Repeat with cold tag cache:
110 Repeat with cold tag cache:
111
111
112 $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
112 $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
113 $ hg identify
113 $ hg identify
114 b9154636be93 tip
114 b9154636be93 tip
115
115
116 $ fnodescacheexists
116 $ fnodescacheexists
117 fnodes cache exists
117 fnodes cache exists
118 $ f --size --hexdump .hg/cache/hgtagsfnodes1
118 $ f --size --hexdump .hg/cache/hgtagsfnodes1
119 .hg/cache/hgtagsfnodes1: size=48
119 .hg/cache/hgtagsfnodes1: size=48
120 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
120 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
121 0010: ff ff ff ff ff ff ff ff b9 15 46 36 26 b7 b4 a7 |..........F6&...|
121 0010: ff ff ff ff ff ff ff ff b9 15 46 36 26 b7 b4 a7 |..........F6&...|
122 0020: 73 e0 9e e3 c5 2f 51 0e 19 e0 5e 1f f9 66 d8 59 |s..../Q...^..f.Y|
122 0020: 73 e0 9e e3 c5 2f 51 0e 19 e0 5e 1f f9 66 d8 59 |s..../Q...^..f.Y|
123
123
124 And again, but now unable to write tag cache or lock file:
124 And again, but now unable to write tag cache or lock file:
125
125
126 #if unix-permissions no-fsmonitor
126 #if unix-permissions no-fsmonitor
127
127
128 $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
128 $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
129 $ chmod 555 .hg/cache
129 $ chmod 555 .hg/cache
130 $ hg identify
130 $ hg identify
131 b9154636be93 tip
131 b9154636be93 tip
132 $ chmod 755 .hg/cache
132 $ chmod 755 .hg/cache
133
133
134 (this block should be protected by no-fsmonitor, because "chmod 555 .hg"
134 (this block should be protected by no-fsmonitor, because "chmod 555 .hg"
135 makes watchman fail at accessing to files under .hg)
135 makes watchman fail at accessing to files under .hg)
136
136
137 $ chmod 555 .hg
137 $ chmod 555 .hg
138 $ hg identify
138 $ hg identify
139 b9154636be93 tip
139 b9154636be93 tip
140 $ chmod 755 .hg
140 $ chmod 755 .hg
141 #endif
141 #endif
142
142
143 Tag cache debug info written to blackbox log
143 Tag cache debug info written to blackbox log
144
144
145 $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
145 $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
146 $ hg identify
146 $ hg identify
147 b9154636be93 tip
147 b9154636be93 tip
148 $ hg blackbox -l 6
148 $ hg blackbox -l 6
149 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify
149 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify
150 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> writing 48 bytes to cache/hgtagsfnodes1
150 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> writing 48 bytes to cache/hgtagsfnodes1
151 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> 0/2 cache hits/lookups in * seconds (glob)
151 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> 0/2 cache hits/lookups in * seconds (glob)
152 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> writing .hg/cache/tags2-visible with 1 tags
152 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> writing .hg/cache/tags2-visible with 1 tags
153 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify exited 0 after * seconds (glob)
153 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify exited 0 after * seconds (glob)
154 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> blackbox -l 6
154 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> blackbox -l 6
155
155
156 Failure to acquire lock results in no write
156 Failure to acquire lock results in no write
157
157
158 $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
158 $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
159 $ echo 'foo:1' > .hg/store/lock
159 $ echo 'foo:1' > .hg/store/lock
160 $ hg identify
160 $ hg identify
161 b9154636be93 tip
161 b9154636be93 tip
162 $ hg blackbox -l 6
162 $ hg blackbox -l 6
163 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify
163 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify
164 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> not writing .hg/cache/hgtagsfnodes1 because lock cannot be acquired
164 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> not writing .hg/cache/hgtagsfnodes1 because lock cannot be acquired
165 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> 0/2 cache hits/lookups in * seconds (glob)
165 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> 0/2 cache hits/lookups in * seconds (glob)
166 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> writing .hg/cache/tags2-visible with 1 tags
166 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> writing .hg/cache/tags2-visible with 1 tags
167 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify exited 0 after * seconds (glob)
167 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify exited 0 after * seconds (glob)
168 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> blackbox -l 6
168 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> blackbox -l 6
169
169
170 $ fnodescacheexists
170 $ fnodescacheexists
171 no fnodes cache
171 no fnodes cache
172
172
173 $ rm .hg/store/lock
173 $ rm .hg/store/lock
174
174
175 $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
175 $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
176 $ hg identify
176 $ hg identify
177 b9154636be93 tip
177 b9154636be93 tip
178
178
179 Create a branch:
179 Create a branch:
180
180
181 $ echo bb > a
181 $ echo bb > a
182 $ hg status
182 $ hg status
183 M a
183 M a
184 $ hg identify
184 $ hg identify
185 b9154636be93+ tip
185 b9154636be93+ tip
186 $ hg co first
186 $ hg co first
187 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
187 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
188 $ hg id
188 $ hg id
189 acb14030fe0a+ first
189 acb14030fe0a+ first
190 $ hg id -r 'wdir()'
190 $ hg id -r 'wdir()'
191 acb14030fe0a+ first
191 acb14030fe0a+ first
192 $ hg -v id
192 $ hg -v id
193 acb14030fe0a+ first
193 acb14030fe0a+ first
194 $ hg status
194 $ hg status
195 M a
195 M a
196 $ echo 1 > b
196 $ echo 1 > b
197 $ hg add b
197 $ hg add b
198 $ hg commit -m "branch"
198 $ hg commit -m "branch"
199 created new head
199 created new head
200
200
201 Creating a new commit shouldn't append the .hgtags fnodes cache until
201 Creating a new commit shouldn't append the .hgtags fnodes cache until
202 tags info is accessed
202 tags info is accessed
203
203
204 $ f --size --hexdump .hg/cache/hgtagsfnodes1
204 $ f --size --hexdump .hg/cache/hgtagsfnodes1
205 .hg/cache/hgtagsfnodes1: size=48
205 .hg/cache/hgtagsfnodes1: size=48
206 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
206 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
207 0010: ff ff ff ff ff ff ff ff b9 15 46 36 26 b7 b4 a7 |..........F6&...|
207 0010: ff ff ff ff ff ff ff ff b9 15 46 36 26 b7 b4 a7 |..........F6&...|
208 0020: 73 e0 9e e3 c5 2f 51 0e 19 e0 5e 1f f9 66 d8 59 |s..../Q...^..f.Y|
208 0020: 73 e0 9e e3 c5 2f 51 0e 19 e0 5e 1f f9 66 d8 59 |s..../Q...^..f.Y|
209
209
210 $ hg id
210 $ hg id
211 c8edf04160c7 tip
211 c8edf04160c7 tip
212
212
213 First 4 bytes of record 3 are changeset fragment
213 First 4 bytes of record 3 are changeset fragment
214
214
215 $ f --size --hexdump .hg/cache/hgtagsfnodes1
215 $ f --size --hexdump .hg/cache/hgtagsfnodes1
216 .hg/cache/hgtagsfnodes1: size=72
216 .hg/cache/hgtagsfnodes1: size=72
217 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
217 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
218 0010: ff ff ff ff ff ff ff ff b9 15 46 36 26 b7 b4 a7 |..........F6&...|
218 0010: ff ff ff ff ff ff ff ff b9 15 46 36 26 b7 b4 a7 |..........F6&...|
219 0020: 73 e0 9e e3 c5 2f 51 0e 19 e0 5e 1f f9 66 d8 59 |s..../Q...^..f.Y|
219 0020: 73 e0 9e e3 c5 2f 51 0e 19 e0 5e 1f f9 66 d8 59 |s..../Q...^..f.Y|
220 0030: c8 ed f0 41 00 00 00 00 00 00 00 00 00 00 00 00 |...A............|
220 0030: c8 ed f0 41 00 00 00 00 00 00 00 00 00 00 00 00 |...A............|
221 0040: 00 00 00 00 00 00 00 00 |........|
221 0040: 00 00 00 00 00 00 00 00 |........|
222
222
223 Merge the two heads:
223 Merge the two heads:
224
224
225 $ hg merge 1
225 $ hg merge 1
226 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
226 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
227 (branch merge, don't forget to commit)
227 (branch merge, don't forget to commit)
228 $ hg blackbox -l3
228 $ hg blackbox -l3
229 1970/01/01 00:00:00 bob @c8edf04160c7f731e4589d66ab3ab3486a64ac28 (5000)> merge 1
229 1970/01/01 00:00:00 bob @c8edf04160c7f731e4589d66ab3ab3486a64ac28 (5000)> merge 1
230 1970/01/01 00:00:00 bob @c8edf04160c7f731e4589d66ab3ab3486a64ac28+b9154636be938d3d431e75a7c906504a079bfe07 (5000)> merge 1 exited 0 after * seconds (glob)
230 1970/01/01 00:00:00 bob @c8edf04160c7f731e4589d66ab3ab3486a64ac28+b9154636be938d3d431e75a7c906504a079bfe07 (5000)> merge 1 exited 0 after * seconds (glob)
231 1970/01/01 00:00:00 bob @c8edf04160c7f731e4589d66ab3ab3486a64ac28+b9154636be938d3d431e75a7c906504a079bfe07 (5000)> blackbox -l3
231 1970/01/01 00:00:00 bob @c8edf04160c7f731e4589d66ab3ab3486a64ac28+b9154636be938d3d431e75a7c906504a079bfe07 (5000)> blackbox -l3
232 $ hg id
232 $ hg id
233 c8edf04160c7+b9154636be93+ tip
233 c8edf04160c7+b9154636be93+ tip
234 $ hg status
234 $ hg status
235 M .hgtags
235 M .hgtags
236 $ hg commit -m "merge"
236 $ hg commit -m "merge"
237
237
238 Create a fake head, make sure tag not visible afterwards:
238 Create a fake head, make sure tag not visible afterwards:
239
239
240 $ cp .hgtags tags
240 $ cp .hgtags tags
241 $ hg tag last
241 $ hg tag last
242 $ hg rm .hgtags
242 $ hg rm .hgtags
243 $ hg commit -m "remove"
243 $ hg commit -m "remove"
244
244
245 $ mv tags .hgtags
245 $ mv tags .hgtags
246 $ hg add .hgtags
246 $ hg add .hgtags
247 $ hg commit -m "readd"
247 $ hg commit -m "readd"
248 $
248 $
249 $ hg tags
249 $ hg tags
250 tip 6:35ff301afafe
250 tip 6:35ff301afafe
251 first 0:acb14030fe0a
251 first 0:acb14030fe0a
252
252
253 Add invalid tags:
253 Add invalid tags:
254
254
255 $ echo "spam" >> .hgtags
255 $ echo "spam" >> .hgtags
256 $ echo >> .hgtags
256 $ echo >> .hgtags
257 $ echo "foo bar" >> .hgtags
257 $ echo "foo bar" >> .hgtags
258 $ echo "a5a5 invalid" >> .hg/localtags
258 $ echo "a5a5 invalid" >> .hg/localtags
259 $ cat .hgtags
259 $ cat .hgtags
260 acb14030fe0a21b60322c440ad2d20cf7685a376 first
260 acb14030fe0a21b60322c440ad2d20cf7685a376 first
261 spam
261 spam
262
262
263 foo bar
263 foo bar
264 $ hg commit -m "tags"
264 $ hg commit -m "tags"
265
265
266 Report tag parse error on other head:
266 Report tag parse error on other head:
267
267
268 $ hg up 3
268 $ hg up 3
269 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
269 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
270 $ echo 'x y' >> .hgtags
270 $ echo 'x y' >> .hgtags
271 $ hg commit -m "head"
271 $ hg commit -m "head"
272 created new head
272 created new head
273
273
274 $ hg tags --debug
274 $ hg tags --debug
275 .hgtags@75d9f02dfe28, line 2: cannot parse entry
275 .hgtags@75d9f02dfe28, line 2: cannot parse entry
276 .hgtags@75d9f02dfe28, line 4: node 'foo' is not well formed
276 .hgtags@75d9f02dfe28, line 4: node 'foo' is not well formed
277 .hgtags@c4be69a18c11, line 2: node 'x' is not well formed
277 .hgtags@c4be69a18c11, line 2: node 'x' is not well formed
278 tip 8:c4be69a18c11e8bc3a5fdbb576017c25f7d84663
278 tip 8:c4be69a18c11e8bc3a5fdbb576017c25f7d84663
279 first 0:acb14030fe0a21b60322c440ad2d20cf7685a376
279 first 0:acb14030fe0a21b60322c440ad2d20cf7685a376
280 $ hg tip
280 $ hg tip
281 changeset: 8:c4be69a18c11
281 changeset: 8:c4be69a18c11
282 tag: tip
282 tag: tip
283 parent: 3:ac5e980c4dc0
283 parent: 3:ac5e980c4dc0
284 user: test
284 user: test
285 date: Thu Jan 01 00:00:00 1970 +0000
285 date: Thu Jan 01 00:00:00 1970 +0000
286 summary: head
286 summary: head
287
287
288
288
289 Test tag precedence rules:
289 Test tag precedence rules:
290
290
291 $ cd ..
291 $ cd ..
292 $ hg init t2
292 $ hg init t2
293 $ cd t2
293 $ cd t2
294 $ echo foo > foo
294 $ echo foo > foo
295 $ hg add foo
295 $ hg add foo
296 $ hg ci -m 'add foo' # rev 0
296 $ hg ci -m 'add foo' # rev 0
297 $ hg tag bar # rev 1
297 $ hg tag bar # rev 1
298 $ echo >> foo
298 $ echo >> foo
299 $ hg ci -m 'change foo 1' # rev 2
299 $ hg ci -m 'change foo 1' # rev 2
300 $ hg up -C 1
300 $ hg up -C 1
301 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
301 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
302 $ hg tag -r 1 -f bar # rev 3
302 $ hg tag -r 1 -f bar # rev 3
303 $ hg up -C 1
303 $ hg up -C 1
304 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
304 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
305 $ echo >> foo
305 $ echo >> foo
306 $ hg ci -m 'change foo 2' # rev 4
306 $ hg ci -m 'change foo 2' # rev 4
307 created new head
307 created new head
308 $ hg tags
308 $ hg tags
309 tip 4:0c192d7d5e6b
309 tip 4:0c192d7d5e6b
310 bar 1:78391a272241
310 bar 1:78391a272241
311
311
312 Repeat in case of cache effects:
312 Repeat in case of cache effects:
313
313
314 $ hg tags
314 $ hg tags
315 tip 4:0c192d7d5e6b
315 tip 4:0c192d7d5e6b
316 bar 1:78391a272241
316 bar 1:78391a272241
317
317
318 Detailed dump of tag info:
318 Detailed dump of tag info:
319
319
320 $ hg heads -q # expect 4, 3, 2
320 $ hg heads -q # expect 4, 3, 2
321 4:0c192d7d5e6b
321 4:0c192d7d5e6b
322 3:6fa450212aeb
322 3:6fa450212aeb
323 2:7a94127795a3
323 2:7a94127795a3
324 $ dumptags 2
324 $ dumptags 2
325 rev 2: .hgtags:
325 rev 2: .hgtags:
326 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
326 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
327 $ dumptags 3
327 $ dumptags 3
328 rev 3: .hgtags:
328 rev 3: .hgtags:
329 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
329 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
330 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
330 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
331 78391a272241d70354aa14c874552cad6b51bb42 bar
331 78391a272241d70354aa14c874552cad6b51bb42 bar
332 $ dumptags 4
332 $ dumptags 4
333 rev 4: .hgtags:
333 rev 4: .hgtags:
334 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
334 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
335
335
336 Dump cache:
336 Dump cache:
337
337
338 $ cat .hg/cache/tags2-visible
338 $ cat .hg/cache/tags2-visible
339 4 0c192d7d5e6b78a714de54a2e9627952a877e25a
339 4 0c192d7d5e6b78a714de54a2e9627952a877e25a
340 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
340 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
341 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
341 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
342 78391a272241d70354aa14c874552cad6b51bb42 bar
342 78391a272241d70354aa14c874552cad6b51bb42 bar
343
343
344 $ f --size --hexdump .hg/cache/hgtagsfnodes1
344 $ f --size --hexdump .hg/cache/hgtagsfnodes1
345 .hg/cache/hgtagsfnodes1: size=120
345 .hg/cache/hgtagsfnodes1: size=120
346 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
346 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
347 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
347 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
348 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
348 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
349 0030: 7a 94 12 77 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |z..w.....1....B(|
349 0030: 7a 94 12 77 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |z..w.....1....B(|
350 0040: 78 ee 5a 2d ad bc 94 3d 6f a4 50 21 7d 3b 71 8c |x.Z-...=o.P!};q.|
350 0040: 78 ee 5a 2d ad bc 94 3d 6f a4 50 21 7d 3b 71 8c |x.Z-...=o.P!};q.|
351 0050: 96 4e f3 7b 89 e5 50 eb da fd 57 89 e7 6c e1 b0 |.N.{..P...W..l..|
351 0050: 96 4e f3 7b 89 e5 50 eb da fd 57 89 e7 6c e1 b0 |.N.{..P...W..l..|
352 0060: 0c 19 2d 7d 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |..-}.....1....B(|
352 0060: 0c 19 2d 7d 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |..-}.....1....B(|
353 0070: 78 ee 5a 2d ad bc 94 3d |x.Z-...=|
353 0070: 78 ee 5a 2d ad bc 94 3d |x.Z-...=|
354
354
355 Corrupt the .hgtags fnodes cache
355 Corrupt the .hgtags fnodes cache
356 Extra junk data at the end should get overwritten on next cache update
356 Extra junk data at the end should get overwritten on next cache update
357
357
358 $ echo extra >> .hg/cache/hgtagsfnodes1
358 $ echo extra >> .hg/cache/hgtagsfnodes1
359 $ echo dummy1 > foo
359 $ echo dummy1 > foo
360 $ hg commit -m throwaway1
360 $ hg commit -m throwaway1
361
361
362 $ hg tags
362 $ hg tags
363 tip 5:8dbfe60eff30
363 tip 5:8dbfe60eff30
364 bar 1:78391a272241
364 bar 1:78391a272241
365
365
366 $ hg blackbox -l 6
366 $ hg blackbox -l 6
367 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> tags
367 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> tags
368 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> writing 24 bytes to cache/hgtagsfnodes1
368 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> writing 24 bytes to cache/hgtagsfnodes1
369 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> 3/4 cache hits/lookups in * seconds (glob)
369 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> 3/4 cache hits/lookups in * seconds (glob)
370 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> writing .hg/cache/tags2-visible with 1 tags
370 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> writing .hg/cache/tags2-visible with 1 tags
371 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> tags exited 0 after * seconds (glob)
371 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> tags exited 0 after * seconds (glob)
372 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> blackbox -l 6
372 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> blackbox -l 6
373
373
374 On junk data + missing cache entries, hg also overwrites the junk.
374 On junk data + missing cache entries, hg also overwrites the junk.
375
375
376 $ rm -f .hg/cache/tags2-visible
376 $ rm -f .hg/cache/tags2-visible
377 >>> import os
377 >>> import os
378 >>> with open(".hg/cache/hgtagsfnodes1", "ab+") as fp:
378 >>> with open(".hg/cache/hgtagsfnodes1", "ab+") as fp:
379 ... fp.seek(-10, os.SEEK_END) and None
379 ... fp.seek(-10, os.SEEK_END) and None
380 ... fp.truncate() and None
380 ... fp.truncate() and None
381
381
382 $ hg debugtagscache | tail -2
382 $ hg debugtagscache | tail -2
383 4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
383 4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
384 5 8dbfe60eff306a54259cfe007db9e330e7ecf866 missing
384 5 8dbfe60eff306a54259cfe007db9e330e7ecf866 missing
385 $ hg tags
385 $ hg tags
386 tip 5:8dbfe60eff30
386 tip 5:8dbfe60eff30
387 bar 1:78391a272241
387 bar 1:78391a272241
388 $ hg debugtagscache | tail -2
388 $ hg debugtagscache | tail -2
389 4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
389 4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
390 5 8dbfe60eff306a54259cfe007db9e330e7ecf866 0c04f2a8af31de17fab7422878ee5a2dadbc943d
390 5 8dbfe60eff306a54259cfe007db9e330e7ecf866 0c04f2a8af31de17fab7422878ee5a2dadbc943d
391
391
392 If the 4 bytes of node hash for a record don't match an existing node, the entry
392 If the 4 bytes of node hash for a record don't match an existing node, the entry
393 is flagged as invalid.
393 is flagged as invalid.
394
394
395 >>> import os
395 >>> import os
396 >>> with open(".hg/cache/hgtagsfnodes1", "rb+") as fp:
396 >>> with open(".hg/cache/hgtagsfnodes1", "rb+") as fp:
397 ... fp.seek(-24, os.SEEK_END) and None
397 ... fp.seek(-24, os.SEEK_END) and None
398 ... fp.write(b'\xde\xad') and None
398 ... fp.write(b'\xde\xad') and None
399
399
400 $ f --size --hexdump .hg/cache/hgtagsfnodes1
400 $ f --size --hexdump .hg/cache/hgtagsfnodes1
401 .hg/cache/hgtagsfnodes1: size=144
401 .hg/cache/hgtagsfnodes1: size=144
402 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
402 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
403 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
403 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
404 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
404 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
405 0030: 7a 94 12 77 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |z..w.....1....B(|
405 0030: 7a 94 12 77 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |z..w.....1....B(|
406 0040: 78 ee 5a 2d ad bc 94 3d 6f a4 50 21 7d 3b 71 8c |x.Z-...=o.P!};q.|
406 0040: 78 ee 5a 2d ad bc 94 3d 6f a4 50 21 7d 3b 71 8c |x.Z-...=o.P!};q.|
407 0050: 96 4e f3 7b 89 e5 50 eb da fd 57 89 e7 6c e1 b0 |.N.{..P...W..l..|
407 0050: 96 4e f3 7b 89 e5 50 eb da fd 57 89 e7 6c e1 b0 |.N.{..P...W..l..|
408 0060: 0c 19 2d 7d 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |..-}.....1....B(|
408 0060: 0c 19 2d 7d 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |..-}.....1....B(|
409 0070: 78 ee 5a 2d ad bc 94 3d de ad e6 0e 0c 04 f2 a8 |x.Z-...=........|
409 0070: 78 ee 5a 2d ad bc 94 3d de ad e6 0e 0c 04 f2 a8 |x.Z-...=........|
410 0080: af 31 de 17 fa b7 42 28 78 ee 5a 2d ad bc 94 3d |.1....B(x.Z-...=|
410 0080: af 31 de 17 fa b7 42 28 78 ee 5a 2d ad bc 94 3d |.1....B(x.Z-...=|
411
411
412 $ hg debugtagscache | tail -2
412 $ hg debugtagscache | tail -2
413 4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
413 4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
414 5 8dbfe60eff306a54259cfe007db9e330e7ecf866 invalid
414 5 8dbfe60eff306a54259cfe007db9e330e7ecf866 invalid
415
415
416 $ hg tags
416 $ hg tags
417 tip 5:8dbfe60eff30
417 tip 5:8dbfe60eff30
418 bar 1:78391a272241
418 bar 1:78391a272241
419
419
420 BUG: If the filenode part of an entry in hgtagsfnodes is corrupt and
420 BUG: If the filenode part of an entry in hgtagsfnodes is corrupt and
421 tags2-visible is missing, `hg tags` aborts. Corrupting the leading 4 bytes of
421 tags2-visible is missing, `hg tags` aborts. Corrupting the leading 4 bytes of
422 node hash (as above) doesn't seem to trigger the issue. Also note that the
422 node hash (as above) doesn't seem to trigger the issue. Also note that the
423 debug command hides the corruption, both with and without tags2-visible.
423 debug command hides the corruption, both with and without tags2-visible.
424
424
425 $ mv .hg/cache/hgtagsfnodes1 .hg/cache/hgtagsfnodes1.bak
425 $ mv .hg/cache/hgtagsfnodes1 .hg/cache/hgtagsfnodes1.bak
426 $ hg debugupdatecaches
426 $ hg debugupdatecaches
427
427
428 >>> import os
428 >>> import os
429 >>> with open(".hg/cache/hgtagsfnodes1", "rb+") as fp:
429 >>> with open(".hg/cache/hgtagsfnodes1", "rb+") as fp:
430 ... fp.seek(-16, os.SEEK_END) and None
430 ... fp.seek(-16, os.SEEK_END) and None
431 ... fp.write(b'\xde\xad') and None
431 ... fp.write(b'\xde\xad') and None
432
432
433 $ f --size --hexdump .hg/cache/hgtagsfnodes1
433 $ f --size --hexdump .hg/cache/hgtagsfnodes1
434 .hg/cache/hgtagsfnodes1: size=144
434 .hg/cache/hgtagsfnodes1: size=144
435 0000: bb d1 79 df 00 00 00 00 00 00 00 00 00 00 00 00 |..y.............|
435 0000: bb d1 79 df 00 00 00 00 00 00 00 00 00 00 00 00 |..y.............|
436 0010: 00 00 00 00 00 00 00 00 78 39 1a 27 0c 04 f2 a8 |........x9.'....|
436 0010: 00 00 00 00 00 00 00 00 78 39 1a 27 0c 04 f2 a8 |........x9.'....|
437 0020: af 31 de 17 fa b7 42 28 78 ee 5a 2d ad bc 94 3d |.1....B(x.Z-...=|
437 0020: af 31 de 17 fa b7 42 28 78 ee 5a 2d ad bc 94 3d |.1....B(x.Z-...=|
438 0030: 7a 94 12 77 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |z..w.....1....B(|
438 0030: 7a 94 12 77 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |z..w.....1....B(|
439 0040: 78 ee 5a 2d ad bc 94 3d 6f a4 50 21 7d 3b 71 8c |x.Z-...=o.P!};q.|
439 0040: 78 ee 5a 2d ad bc 94 3d 6f a4 50 21 7d 3b 71 8c |x.Z-...=o.P!};q.|
440 0050: 96 4e f3 7b 89 e5 50 eb da fd 57 89 e7 6c e1 b0 |.N.{..P...W..l..|
440 0050: 96 4e f3 7b 89 e5 50 eb da fd 57 89 e7 6c e1 b0 |.N.{..P...W..l..|
441 0060: 0c 19 2d 7d 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |..-}.....1....B(|
441 0060: 0c 19 2d 7d 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |..-}.....1....B(|
442 0070: 78 ee 5a 2d ad bc 94 3d 8d bf e6 0e 0c 04 f2 a8 |x.Z-...=........|
442 0070: 78 ee 5a 2d ad bc 94 3d 8d bf e6 0e 0c 04 f2 a8 |x.Z-...=........|
443 0080: de ad de 17 fa b7 42 28 78 ee 5a 2d ad bc 94 3d |......B(x.Z-...=|
443 0080: de ad de 17 fa b7 42 28 78 ee 5a 2d ad bc 94 3d |......B(x.Z-...=|
444
444
445 $ hg debugtagscache | tail -2
445 $ hg debugtagscache | tail -2
446 4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
446 4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
447 5 8dbfe60eff306a54259cfe007db9e330e7ecf866 0c04f2a8deadde17fab7422878ee5a2dadbc943d (unknown node)
447 5 8dbfe60eff306a54259cfe007db9e330e7ecf866 0c04f2a8deadde17fab7422878ee5a2dadbc943d (unknown node)
448
448
449 $ rm -f .hg/cache/tags2-visible
449 $ rm -f .hg/cache/tags2-visible
450 $ hg debugtagscache | tail -2
450 $ hg debugtagscache | tail -2
451 4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
451 4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
452 5 8dbfe60eff306a54259cfe007db9e330e7ecf866 0c04f2a8deadde17fab7422878ee5a2dadbc943d (unknown node)
452 5 8dbfe60eff306a54259cfe007db9e330e7ecf866 0c04f2a8deadde17fab7422878ee5a2dadbc943d (unknown node)
453
453
454 $ hg tags
454 $ hg tags
455 tip 5:8dbfe60eff30
455 tip 5:8dbfe60eff30
456 bar 1:78391a272241
456 bar 1:78391a272241
457
457
458 BUG: Unless this file is restored, the `hg tags` in the next unix-permissions
458 BUG: Unless this file is restored, the `hg tags` in the next unix-permissions
459 conditional will fail: "abort: data/.hgtags.i@0c04f2a8dead: no match found"
459 conditional will fail: "abort: data/.hgtags.i@0c04f2a8dead: no match found"
460
460
461 $ mv .hg/cache/hgtagsfnodes1.bak .hg/cache/hgtagsfnodes1
461 $ mv .hg/cache/hgtagsfnodes1.bak .hg/cache/hgtagsfnodes1
462
462
463 #if unix-permissions no-root
463 #if unix-permissions no-root
464 Errors writing to .hgtags fnodes cache are silently ignored
464 Errors writing to .hgtags fnodes cache are silently ignored
465
465
466 $ echo dummy2 > foo
466 $ echo dummy2 > foo
467 $ hg commit -m throwaway2
467 $ hg commit -m throwaway2
468
468
469 $ chmod a-w .hg/cache/hgtagsfnodes1
469 $ chmod a-w .hg/cache/hgtagsfnodes1
470 $ rm -f .hg/cache/tags2-visible
470 $ rm -f .hg/cache/tags2-visible
471
471
472 $ hg tags
472 $ hg tags
473 tip 6:b968051b5cf3
473 tip 6:b968051b5cf3
474 bar 1:78391a272241
474 bar 1:78391a272241
475
475
476 $ hg blackbox -l 6
476 $ hg blackbox -l 6
477 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags
477 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags
478 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> couldn't write cache/hgtagsfnodes1: [Errno *] * (glob)
478 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> couldn't write cache/hgtagsfnodes1: [Errno *] * (glob)
479 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> 2/4 cache hits/lookups in * seconds (glob)
479 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> 2/4 cache hits/lookups in * seconds (glob)
480 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> writing .hg/cache/tags2-visible with 1 tags
480 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> writing .hg/cache/tags2-visible with 1 tags
481 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags exited 0 after * seconds (glob)
481 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags exited 0 after * seconds (glob)
482 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> blackbox -l 6
482 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> blackbox -l 6
483
483
484 $ chmod a+w .hg/cache/hgtagsfnodes1
484 $ chmod a+w .hg/cache/hgtagsfnodes1
485
485
486 $ rm -f .hg/cache/tags2-visible
486 $ rm -f .hg/cache/tags2-visible
487 $ hg tags
487 $ hg tags
488 tip 6:b968051b5cf3
488 tip 6:b968051b5cf3
489 bar 1:78391a272241
489 bar 1:78391a272241
490
490
491 $ hg blackbox -l 6
491 $ hg blackbox -l 6
492 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags
492 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags
493 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> writing 24 bytes to cache/hgtagsfnodes1
493 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> writing 24 bytes to cache/hgtagsfnodes1
494 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> 2/4 cache hits/lookups in * seconds (glob)
494 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> 2/4 cache hits/lookups in * seconds (glob)
495 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> writing .hg/cache/tags2-visible with 1 tags
495 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> writing .hg/cache/tags2-visible with 1 tags
496 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags exited 0 after * seconds (glob)
496 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags exited 0 after * seconds (glob)
497 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> blackbox -l 6
497 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> blackbox -l 6
498
498
499 $ f --size .hg/cache/hgtagsfnodes1
499 $ f --size .hg/cache/hgtagsfnodes1
500 .hg/cache/hgtagsfnodes1: size=168
500 .hg/cache/hgtagsfnodes1: size=168
501
501
502 $ hg -q --config extensions.strip= strip -r 6 --no-backup
502 $ hg -q --config extensions.strip= strip -r 6 --no-backup
503 #endif
503 #endif
504
504
505 Stripping doesn't truncate the tags cache until new data is available
505 Stripping doesn't truncate the tags cache until new data is available
506
506
507 $ rm -f .hg/cache/hgtagsfnodes1 .hg/cache/tags2-visible
507 $ rm -f .hg/cache/hgtagsfnodes1 .hg/cache/tags2-visible
508 $ hg tags
508 $ hg tags
509 tip 5:8dbfe60eff30
509 tip 5:8dbfe60eff30
510 bar 1:78391a272241
510 bar 1:78391a272241
511
511
512 $ f --size .hg/cache/hgtagsfnodes1
512 $ f --size .hg/cache/hgtagsfnodes1
513 .hg/cache/hgtagsfnodes1: size=144
513 .hg/cache/hgtagsfnodes1: size=144
514
514
515 $ hg -q --config extensions.strip= strip -r 5 --no-backup
515 $ hg -q --config extensions.strip= strip -r 5 --no-backup
516 $ hg tags
516 $ hg tags
517 tip 4:0c192d7d5e6b
517 tip 4:0c192d7d5e6b
518 bar 1:78391a272241
518 bar 1:78391a272241
519
519
520 $ hg blackbox -l 5
520 $ hg blackbox -l 5
521 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> writing 24 bytes to cache/hgtagsfnodes1
521 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> writing 24 bytes to cache/hgtagsfnodes1
522 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> 2/4 cache hits/lookups in * seconds (glob)
522 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> 2/4 cache hits/lookups in * seconds (glob)
523 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> writing .hg/cache/tags2-visible with 1 tags
523 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> writing .hg/cache/tags2-visible with 1 tags
524 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> tags exited 0 after * seconds (glob)
524 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> tags exited 0 after * seconds (glob)
525 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> blackbox -l 5
525 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> blackbox -l 5
526
526
527 $ f --size .hg/cache/hgtagsfnodes1
527 $ f --size .hg/cache/hgtagsfnodes1
528 .hg/cache/hgtagsfnodes1: size=120
528 .hg/cache/hgtagsfnodes1: size=120
529
529
530 $ echo dummy > foo
530 $ echo dummy > foo
531 $ hg commit -m throwaway3
531 $ hg commit -m throwaway3
532
532
533 $ hg tags
533 $ hg tags
534 tip 5:035f65efb448
534 tip 5:035f65efb448
535 bar 1:78391a272241
535 bar 1:78391a272241
536
536
537 $ hg blackbox -l 6
537 $ hg blackbox -l 6
538 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> tags
538 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> tags
539 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> writing 24 bytes to cache/hgtagsfnodes1
539 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> writing 24 bytes to cache/hgtagsfnodes1
540 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> 3/4 cache hits/lookups in * seconds (glob)
540 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> 3/4 cache hits/lookups in * seconds (glob)
541 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> writing .hg/cache/tags2-visible with 1 tags
541 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> writing .hg/cache/tags2-visible with 1 tags
542 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> tags exited 0 after * seconds (glob)
542 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> tags exited 0 after * seconds (glob)
543 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> blackbox -l 6
543 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> blackbox -l 6
544 $ f --size .hg/cache/hgtagsfnodes1
544 $ f --size .hg/cache/hgtagsfnodes1
545 .hg/cache/hgtagsfnodes1: size=144
545 .hg/cache/hgtagsfnodes1: size=144
546
546
547 $ hg -q --config extensions.strip= strip -r 5 --no-backup
547 $ hg -q --config extensions.strip= strip -r 5 --no-backup
548
548
549 Test tag removal:
549 Test tag removal:
550
550
551 $ hg tag --remove bar # rev 5
551 $ hg tag --remove bar # rev 5
552 $ hg tip -vp
552 $ hg tip -vp
553 changeset: 5:5f6e8655b1c7
553 changeset: 5:5f6e8655b1c7
554 tag: tip
554 tag: tip
555 user: test
555 user: test
556 date: Thu Jan 01 00:00:00 1970 +0000
556 date: Thu Jan 01 00:00:00 1970 +0000
557 files: .hgtags
557 files: .hgtags
558 description:
558 description:
559 Removed tag bar
559 Removed tag bar
560
560
561
561
562 diff -r 0c192d7d5e6b -r 5f6e8655b1c7 .hgtags
562 diff -r 0c192d7d5e6b -r 5f6e8655b1c7 .hgtags
563 --- a/.hgtags Thu Jan 01 00:00:00 1970 +0000
563 --- a/.hgtags Thu Jan 01 00:00:00 1970 +0000
564 +++ b/.hgtags Thu Jan 01 00:00:00 1970 +0000
564 +++ b/.hgtags Thu Jan 01 00:00:00 1970 +0000
565 @@ -1,1 +1,3 @@
565 @@ -1,1 +1,3 @@
566 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
566 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
567 +78391a272241d70354aa14c874552cad6b51bb42 bar
567 +78391a272241d70354aa14c874552cad6b51bb42 bar
568 +0000000000000000000000000000000000000000 bar
568 +0000000000000000000000000000000000000000 bar
569
569
570 $ hg tags
570 $ hg tags
571 tip 5:5f6e8655b1c7
571 tip 5:5f6e8655b1c7
572 $ hg tags # again, try to expose cache bugs
572 $ hg tags # again, try to expose cache bugs
573 tip 5:5f6e8655b1c7
573 tip 5:5f6e8655b1c7
574
574
575 Remove nonexistent tag:
575 Remove nonexistent tag:
576
576
577 $ hg tag --remove foobar
577 $ hg tag --remove foobar
578 abort: tag 'foobar' does not exist
578 abort: tag 'foobar' does not exist
579 [10]
579 [10]
580 $ hg tip
580 $ hg tip
581 changeset: 5:5f6e8655b1c7
581 changeset: 5:5f6e8655b1c7
582 tag: tip
582 tag: tip
583 user: test
583 user: test
584 date: Thu Jan 01 00:00:00 1970 +0000
584 date: Thu Jan 01 00:00:00 1970 +0000
585 summary: Removed tag bar
585 summary: Removed tag bar
586
586
587
587
588 Undo a tag with rollback:
588 Undo a tag with rollback:
589
589
590 $ hg rollback # destroy rev 5 (restore bar)
590 $ hg rollback # destroy rev 5 (restore bar)
591 repository tip rolled back to revision 4 (undo commit)
591 repository tip rolled back to revision 4 (undo commit)
592 working directory now based on revision 4
592 working directory now based on revision 4
593 $ hg tags
593 $ hg tags
594 tip 4:0c192d7d5e6b
594 tip 4:0c192d7d5e6b
595 bar 1:78391a272241
595 bar 1:78391a272241
596 $ hg tags
596 $ hg tags
597 tip 4:0c192d7d5e6b
597 tip 4:0c192d7d5e6b
598 bar 1:78391a272241
598 bar 1:78391a272241
599
599
600 Test tag rank:
600 Test tag rank:
601
601
602 $ cd ..
602 $ cd ..
603 $ hg init t3
603 $ hg init t3
604 $ cd t3
604 $ cd t3
605 $ echo foo > foo
605 $ echo foo > foo
606 $ hg add foo
606 $ hg add foo
607 $ hg ci -m 'add foo' # rev 0
607 $ hg ci -m 'add foo' # rev 0
608 $ hg tag -f bar # rev 1 bar -> 0
608 $ hg tag -f bar # rev 1 bar -> 0
609 $ hg tag -f bar # rev 2 bar -> 1
609 $ hg tag -f bar # rev 2 bar -> 1
610 $ hg tag -fr 0 bar # rev 3 bar -> 0
610 $ hg tag -fr 0 bar # rev 3 bar -> 0
611 $ hg tag -fr 1 bar # rev 4 bar -> 1
611 $ hg tag -fr 1 bar # rev 4 bar -> 1
612 $ hg tag -fr 0 bar # rev 5 bar -> 0
612 $ hg tag -fr 0 bar # rev 5 bar -> 0
613 $ hg tags
613 $ hg tags
614 tip 5:85f05169d91d
614 tip 5:85f05169d91d
615 bar 0:bbd179dfa0a7
615 bar 0:bbd179dfa0a7
616 $ hg co 3
616 $ hg co 3
617 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
617 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
618 $ echo barbar > foo
618 $ echo barbar > foo
619 $ hg ci -m 'change foo' # rev 6
619 $ hg ci -m 'change foo' # rev 6
620 created new head
620 created new head
621 $ hg tags
621 $ hg tags
622 tip 6:735c3ca72986
622 tip 6:735c3ca72986
623 bar 0:bbd179dfa0a7
623 bar 0:bbd179dfa0a7
624
624
625 Don't allow moving tag without -f:
625 Don't allow moving tag without -f:
626
626
627 $ hg tag -r 3 bar
627 $ hg tag -r 3 bar
628 abort: tag 'bar' already exists (use -f to force)
628 abort: tag 'bar' already exists (use -f to force)
629 [10]
629 [10]
630 $ hg tags
630 $ hg tags
631 tip 6:735c3ca72986
631 tip 6:735c3ca72986
632 bar 0:bbd179dfa0a7
632 bar 0:bbd179dfa0a7
633
633
634 Strip 1: expose an old head:
634 Strip 1: expose an old head:
635
635
636 $ hg --config extensions.mq= strip 5
636 $ hg --config extensions.mq= strip 5
637 saved backup bundle to $TESTTMP/t3/.hg/strip-backup/*-backup.hg (glob)
637 saved backup bundle to $TESTTMP/t3/.hg/strip-backup/*-backup.hg (glob)
638 $ hg tags # partly stale cache
638 $ hg tags # partly stale cache
639 tip 5:735c3ca72986
639 tip 5:735c3ca72986
640 bar 1:78391a272241
640 bar 1:78391a272241
641 $ hg tags # up-to-date cache
641 $ hg tags # up-to-date cache
642 tip 5:735c3ca72986
642 tip 5:735c3ca72986
643 bar 1:78391a272241
643 bar 1:78391a272241
644
644
645 Strip 2: destroy whole branch, no old head exposed
645 Strip 2: destroy whole branch, no old head exposed
646
646
647 $ hg --config extensions.mq= strip 4
647 $ hg --config extensions.mq= strip 4
648 saved backup bundle to $TESTTMP/t3/.hg/strip-backup/*-backup.hg (glob)
648 saved backup bundle to $TESTTMP/t3/.hg/strip-backup/*-backup.hg (glob)
649 $ hg tags # partly stale
649 $ hg tags # partly stale
650 tip 4:735c3ca72986
650 tip 4:735c3ca72986
651 bar 0:bbd179dfa0a7
651 bar 0:bbd179dfa0a7
652 $ rm -f .hg/cache/tags2-visible
652 $ rm -f .hg/cache/tags2-visible
653 $ hg tags # cold cache
653 $ hg tags # cold cache
654 tip 4:735c3ca72986
654 tip 4:735c3ca72986
655 bar 0:bbd179dfa0a7
655 bar 0:bbd179dfa0a7
656
656
657 Test tag rank with 3 heads:
657 Test tag rank with 3 heads:
658
658
659 $ cd ..
659 $ cd ..
660 $ hg init t4
660 $ hg init t4
661 $ cd t4
661 $ cd t4
662 $ echo foo > foo
662 $ echo foo > foo
663 $ hg add
663 $ hg add
664 adding foo
664 adding foo
665 $ hg ci -m 'add foo' # rev 0
665 $ hg ci -m 'add foo' # rev 0
666 $ hg tag bar # rev 1 bar -> 0
666 $ hg tag bar # rev 1 bar -> 0
667 $ hg tag -f bar # rev 2 bar -> 1
667 $ hg tag -f bar # rev 2 bar -> 1
668 $ hg up -qC 0
668 $ hg up -qC 0
669 $ hg tag -fr 2 bar # rev 3 bar -> 2
669 $ hg tag -fr 2 bar # rev 3 bar -> 2
670 $ hg tags
670 $ hg tags
671 tip 3:197c21bbbf2c
671 tip 3:197c21bbbf2c
672 bar 2:6fa450212aeb
672 bar 2:6fa450212aeb
673 $ hg up -qC 0
673 $ hg up -qC 0
674 $ hg tag -m 'retag rev 0' -fr 0 bar # rev 4 bar -> 0, but bar stays at 2
674 $ hg tag -m 'retag rev 0' -fr 0 bar # rev 4 bar -> 0, but bar stays at 2
675
675
676 Bar should still point to rev 2:
676 Bar should still point to rev 2:
677
677
678 $ hg tags
678 $ hg tags
679 tip 4:3b4b14ed0202
679 tip 4:3b4b14ed0202
680 bar 2:6fa450212aeb
680 bar 2:6fa450212aeb
681
681
682 Test that removing global/local tags does not get confused when trying
682 Test that removing global/local tags does not get confused when trying
683 to remove a tag of type X which actually only exists as a type Y:
683 to remove a tag of type X which actually only exists as a type Y:
684
684
685 $ cd ..
685 $ cd ..
686 $ hg init t5
686 $ hg init t5
687 $ cd t5
687 $ cd t5
688 $ echo foo > foo
688 $ echo foo > foo
689 $ hg add
689 $ hg add
690 adding foo
690 adding foo
691 $ hg ci -m 'add foo' # rev 0
691 $ hg ci -m 'add foo' # rev 0
692
692
693 $ hg tag -r 0 -l localtag
693 $ hg tag -r 0 -l localtag
694 $ hg tag --remove localtag
694 $ hg tag --remove localtag
695 abort: tag 'localtag' is not a global tag
695 abort: tag 'localtag' is not a global tag
696 [10]
696 [10]
697 $
697 $
698 $ hg tag -r 0 globaltag
698 $ hg tag -r 0 globaltag
699 $ hg tag --remove -l globaltag
699 $ hg tag --remove -l globaltag
700 abort: tag 'globaltag' is not a local tag
700 abort: tag 'globaltag' is not a local tag
701 [10]
701 [10]
702 $ hg tags -v
702 $ hg tags -v
703 tip 1:a0b6fe111088
703 tip 1:a0b6fe111088
704 localtag 0:bbd179dfa0a7 local
704 localtag 0:bbd179dfa0a7 local
705 globaltag 0:bbd179dfa0a7
705 globaltag 0:bbd179dfa0a7
706
706
707 Templated output:
707 Templated output:
708
708
709 (immediate values)
709 (immediate values)
710
710
711 $ hg tags -T '{pad(tag, 9)} {rev}:{node} ({type})\n'
711 $ hg tags -T '{pad(tag, 9)} {rev}:{node} ({type})\n'
712 tip 1:a0b6fe111088c8c29567d3876cc466aa02927cae ()
712 tip 1:a0b6fe111088c8c29567d3876cc466aa02927cae ()
713 localtag 0:bbd179dfa0a71671c253b3ae0aa1513b60d199fa (local)
713 localtag 0:bbd179dfa0a71671c253b3ae0aa1513b60d199fa (local)
714 globaltag 0:bbd179dfa0a71671c253b3ae0aa1513b60d199fa ()
714 globaltag 0:bbd179dfa0a71671c253b3ae0aa1513b60d199fa ()
715
715
716 (ctx/revcache dependent)
716 (ctx/revcache dependent)
717
717
718 $ hg tags -T '{pad(tag, 9)} {rev} {file_adds}\n'
718 $ hg tags -T '{pad(tag, 9)} {rev} {file_adds}\n'
719 tip 1 .hgtags
719 tip 1 .hgtags
720 localtag 0 foo
720 localtag 0 foo
721 globaltag 0 foo
721 globaltag 0 foo
722
722
723 $ hg tags -T '{pad(tag, 9)} {rev}:{node|shortest}\n'
723 $ hg tags -T '{pad(tag, 9)} {rev}:{node|shortest}\n'
724 tip 1:a0b6
724 tip 1:a0b6
725 localtag 0:bbd1
725 localtag 0:bbd1
726 globaltag 0:bbd1
726 globaltag 0:bbd1
727
727
728 Test for issue3911
728 Test for issue3911
729
729
730 $ hg tag -r 0 -l localtag2
730 $ hg tag -r 0 -l localtag2
731 $ hg tag -l --remove localtag2
731 $ hg tag -l --remove localtag2
732 $ hg tags -v
732 $ hg tags -v
733 tip 1:a0b6fe111088
733 tip 1:a0b6fe111088
734 localtag 0:bbd179dfa0a7 local
734 localtag 0:bbd179dfa0a7 local
735 globaltag 0:bbd179dfa0a7
735 globaltag 0:bbd179dfa0a7
736
736
737 $ hg tag -r 1 -f localtag
737 $ hg tag -r 1 -f localtag
738 $ hg tags -v
738 $ hg tags -v
739 tip 2:5c70a037bb37
739 tip 2:5c70a037bb37
740 localtag 1:a0b6fe111088
740 localtag 1:a0b6fe111088
741 globaltag 0:bbd179dfa0a7
741 globaltag 0:bbd179dfa0a7
742
742
743 $ hg tags -v
743 $ hg tags -v
744 tip 2:5c70a037bb37
744 tip 2:5c70a037bb37
745 localtag 1:a0b6fe111088
745 localtag 1:a0b6fe111088
746 globaltag 0:bbd179dfa0a7
746 globaltag 0:bbd179dfa0a7
747
747
748 $ hg tag -r 1 localtag2
748 $ hg tag -r 1 localtag2
749 $ hg tags -v
749 $ hg tags -v
750 tip 3:bbfb8cd42be2
750 tip 3:bbfb8cd42be2
751 localtag2 1:a0b6fe111088
751 localtag2 1:a0b6fe111088
752 localtag 1:a0b6fe111088
752 localtag 1:a0b6fe111088
753 globaltag 0:bbd179dfa0a7
753 globaltag 0:bbd179dfa0a7
754
754
755 $ hg tags -v
755 $ hg tags -v
756 tip 3:bbfb8cd42be2
756 tip 3:bbfb8cd42be2
757 localtag2 1:a0b6fe111088
757 localtag2 1:a0b6fe111088
758 localtag 1:a0b6fe111088
758 localtag 1:a0b6fe111088
759 globaltag 0:bbd179dfa0a7
759 globaltag 0:bbd179dfa0a7
760
760
761 $ cd ..
761 $ cd ..
762
762
763 Create a repository with tags data to test .hgtags fnodes transfer
763 Create a repository with tags data to test .hgtags fnodes transfer
764
764
765 $ hg init tagsserver
765 $ hg init tagsserver
766 $ cd tagsserver
766 $ cd tagsserver
767 $ touch foo
767 $ touch foo
768 $ hg -q commit -A -m initial
768 $ hg -q commit -A -m initial
769 $ hg tag -m 'tag 0.1' 0.1
769 $ hg tag -m 'tag 0.1' 0.1
770 $ echo second > foo
770 $ echo second > foo
771 $ hg commit -m second
771 $ hg commit -m second
772 $ hg tag -m 'tag 0.2' 0.2
772 $ hg tag -m 'tag 0.2' 0.2
773 $ hg tags
773 $ hg tags
774 tip 3:40f0358cb314
774 tip 3:40f0358cb314
775 0.2 2:f63cc8fe54e4
775 0.2 2:f63cc8fe54e4
776 0.1 0:96ee1d7354c4
776 0.1 0:96ee1d7354c4
777 $ cd ..
777 $ cd ..
778
778
779 Cloning should pull down hgtags fnodes mappings and write the cache file
779 Cloning should pull down hgtags fnodes mappings and write the cache file
780
780
781 $ hg clone --pull tagsserver tagsclient
781 $ hg clone --pull tagsserver tagsclient
782 requesting all changes
782 requesting all changes
783 adding changesets
783 adding changesets
784 adding manifests
784 adding manifests
785 adding file changes
785 adding file changes
786 added 4 changesets with 4 changes to 2 files
786 added 4 changesets with 4 changes to 2 files
787 new changesets 96ee1d7354c4:40f0358cb314
787 new changesets 96ee1d7354c4:40f0358cb314
788 updating to branch default
788 updating to branch default
789 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
789 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
790
790
791 Missing tags2* files means the cache wasn't written through the normal mechanism.
791 Missing tags2* files means the cache wasn't written through the normal mechanism.
792
792
793 $ ls tagsclient/.hg/cache
793 $ ls tagsclient/.hg/cache
794 branch2-base
794 branch2-base
795 branch2-immutable
795 branch2-immutable
796 branch2-served
796 branch2-served
797 branch2-served.hidden
797 branch2-served.hidden
798 branch2-visible
798 branch2-visible
799 branch2-visible-hidden
799 branch2-visible-hidden
800 hgtagsfnodes1
800 hgtagsfnodes1
801 rbc-names-v1
801 rbc-names-v1
802 rbc-revs-v1
802 rbc-revs-v1
803 tags2
803 tags2
804 tags2-served
804 tags2-served
805
805
806 Cache should contain the head only, even though other nodes have tags data
806 Cache should contain the head only, even though other nodes have tags data
807
807
808 $ f --size --hexdump tagsclient/.hg/cache/hgtagsfnodes1
808 $ f --size --hexdump tagsclient/.hg/cache/hgtagsfnodes1
809 tagsclient/.hg/cache/hgtagsfnodes1: size=96
809 tagsclient/.hg/cache/hgtagsfnodes1: size=96
810 0000: 96 ee 1d 73 00 00 00 00 00 00 00 00 00 00 00 00 |...s............|
810 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
811 0010: 00 00 00 00 00 00 00 00 c4 da b0 c2 94 65 e1 c6 |.............e..|
811 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
812 0020: 0d f7 f0 dd 32 04 ea 57 78 c8 97 97 79 fc d5 95 |....2..Wx...y...|
812 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
813 0030: f6 3c c8 fe 94 65 e1 c6 0d f7 f0 dd 32 04 ea 57 |.<...e......2..W|
813 0030: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
814 0040: 78 c8 97 97 79 fc d5 95 40 f0 35 8c 19 e0 a7 d3 |x...y...@.5.....|
814 0040: ff ff ff ff ff ff ff ff 40 f0 35 8c 19 e0 a7 d3 |........@.5.....|
815 0050: 8a 5c 6a 82 4d cf fb a5 87 d0 2f a3 1e 4f 2f 8a |.\j.M...../..O/.|
815 0050: 8a 5c 6a 82 4d cf fb a5 87 d0 2f a3 1e 4f 2f 8a |.\j.M...../..O/.|
816
816
817 Running hg tags should produce tags2* file and not change cache
817 Running hg tags should produce tags2* file and not change cache
818
818
819 $ hg -R tagsclient tags
819 $ hg -R tagsclient tags
820 tip 3:40f0358cb314
820 tip 3:40f0358cb314
821 0.2 2:f63cc8fe54e4
821 0.2 2:f63cc8fe54e4
822 0.1 0:96ee1d7354c4
822 0.1 0:96ee1d7354c4
823
823
824 $ ls tagsclient/.hg/cache
824 $ ls tagsclient/.hg/cache
825 branch2-base
825 branch2-base
826 branch2-immutable
826 branch2-immutable
827 branch2-served
827 branch2-served
828 branch2-served.hidden
828 branch2-served.hidden
829 branch2-visible
829 branch2-visible
830 branch2-visible-hidden
830 branch2-visible-hidden
831 hgtagsfnodes1
831 hgtagsfnodes1
832 rbc-names-v1
832 rbc-names-v1
833 rbc-revs-v1
833 rbc-revs-v1
834 tags2
834 tags2
835 tags2-served
835 tags2-served
836 tags2-visible
836 tags2-visible
837
837
838 $ f --size --hexdump tagsclient/.hg/cache/hgtagsfnodes1
838 $ f --size --hexdump tagsclient/.hg/cache/hgtagsfnodes1
839 tagsclient/.hg/cache/hgtagsfnodes1: size=96
839 tagsclient/.hg/cache/hgtagsfnodes1: size=96
840 0000: 96 ee 1d 73 00 00 00 00 00 00 00 00 00 00 00 00 |...s............|
840 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
841 0010: 00 00 00 00 00 00 00 00 c4 da b0 c2 94 65 e1 c6 |.............e..|
841 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
842 0020: 0d f7 f0 dd 32 04 ea 57 78 c8 97 97 79 fc d5 95 |....2..Wx...y...|
842 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
843 0030: f6 3c c8 fe 94 65 e1 c6 0d f7 f0 dd 32 04 ea 57 |.<...e......2..W|
843 0030: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
844 0040: 78 c8 97 97 79 fc d5 95 40 f0 35 8c 19 e0 a7 d3 |x...y...@.5.....|
844 0040: ff ff ff ff ff ff ff ff 40 f0 35 8c 19 e0 a7 d3 |........@.5.....|
845 0050: 8a 5c 6a 82 4d cf fb a5 87 d0 2f a3 1e 4f 2f 8a |.\j.M...../..O/.|
845 0050: 8a 5c 6a 82 4d cf fb a5 87 d0 2f a3 1e 4f 2f 8a |.\j.M...../..O/.|
846
846
847 Check that the bundle includes cache data
847 Check that the bundle includes cache data
848
848
849 $ hg -R tagsclient bundle --all ./test-cache-in-bundle-all-rev.hg
849 $ hg -R tagsclient bundle --all ./test-cache-in-bundle-all-rev.hg
850 4 changesets found
850 4 changesets found
851 $ hg debugbundle ./test-cache-in-bundle-all-rev.hg
851 $ hg debugbundle ./test-cache-in-bundle-all-rev.hg
852 Stream params: {Compression: BZ}
852 Stream params: {Compression: BZ}
853 changegroup -- {nbchanges: 4, version: 02} (mandatory: True)
853 changegroup -- {nbchanges: 4, version: 02} (mandatory: True)
854 96ee1d7354c4ad7372047672c36a1f561e3a6a4c
854 96ee1d7354c4ad7372047672c36a1f561e3a6a4c
855 c4dab0c2fd337eb9191f80c3024830a4889a8f34
855 c4dab0c2fd337eb9191f80c3024830a4889a8f34
856 f63cc8fe54e4d326f8d692805d70e092f851ddb1
856 f63cc8fe54e4d326f8d692805d70e092f851ddb1
857 40f0358cb314c824a5929ee527308d90e023bc10
857 40f0358cb314c824a5929ee527308d90e023bc10
858 hgtagsfnodes -- {} (mandatory: True)
858 hgtagsfnodes -- {} (mandatory: True)
859 cache:rev-branch-cache -- {} (mandatory: False)
859 cache:rev-branch-cache -- {} (mandatory: False)
860
860
861 Check that local clone includes cache data
861 Check that local clone includes cache data
862
862
863 $ hg clone tagsclient tags-local-clone
863 $ hg clone tagsclient tags-local-clone
864 updating to branch default
864 updating to branch default
865 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
865 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
866 $ (cd tags-local-clone/.hg/cache/; ls -1 tag*)
866 $ (cd tags-local-clone/.hg/cache/; ls -1 tag*)
867 tags2
867 tags2
868 tags2-served
868 tags2-served
869 tags2-visible
869 tags2-visible
870
870
871 Avoid writing logs on trying to delete an already deleted tag
871 Avoid writing logs on trying to delete an already deleted tag
872 $ hg init issue5752
872 $ hg init issue5752
873 $ cd issue5752
873 $ cd issue5752
874 $ echo > a
874 $ echo > a
875 $ hg commit -Am 'add a'
875 $ hg commit -Am 'add a'
876 adding a
876 adding a
877 $ hg tag a
877 $ hg tag a
878 $ hg tags
878 $ hg tags
879 tip 1:bd7ee4f3939b
879 tip 1:bd7ee4f3939b
880 a 0:a8a82d372bb3
880 a 0:a8a82d372bb3
881 $ hg log
881 $ hg log
882 changeset: 1:bd7ee4f3939b
882 changeset: 1:bd7ee4f3939b
883 tag: tip
883 tag: tip
884 user: test
884 user: test
885 date: Thu Jan 01 00:00:00 1970 +0000
885 date: Thu Jan 01 00:00:00 1970 +0000
886 summary: Added tag a for changeset a8a82d372bb3
886 summary: Added tag a for changeset a8a82d372bb3
887
887
888 changeset: 0:a8a82d372bb3
888 changeset: 0:a8a82d372bb3
889 tag: a
889 tag: a
890 user: test
890 user: test
891 date: Thu Jan 01 00:00:00 1970 +0000
891 date: Thu Jan 01 00:00:00 1970 +0000
892 summary: add a
892 summary: add a
893
893
894 $ hg tag --remove a
894 $ hg tag --remove a
895 $ hg log
895 $ hg log
896 changeset: 2:e7feacc7ec9e
896 changeset: 2:e7feacc7ec9e
897 tag: tip
897 tag: tip
898 user: test
898 user: test
899 date: Thu Jan 01 00:00:00 1970 +0000
899 date: Thu Jan 01 00:00:00 1970 +0000
900 summary: Removed tag a
900 summary: Removed tag a
901
901
902 changeset: 1:bd7ee4f3939b
902 changeset: 1:bd7ee4f3939b
903 user: test
903 user: test
904 date: Thu Jan 01 00:00:00 1970 +0000
904 date: Thu Jan 01 00:00:00 1970 +0000
905 summary: Added tag a for changeset a8a82d372bb3
905 summary: Added tag a for changeset a8a82d372bb3
906
906
907 changeset: 0:a8a82d372bb3
907 changeset: 0:a8a82d372bb3
908 user: test
908 user: test
909 date: Thu Jan 01 00:00:00 1970 +0000
909 date: Thu Jan 01 00:00:00 1970 +0000
910 summary: add a
910 summary: add a
911
911
912 $ hg tag --remove a
912 $ hg tag --remove a
913 abort: tag 'a' is already removed
913 abort: tag 'a' is already removed
914 [10]
914 [10]
915 $ hg log
915 $ hg log
916 changeset: 2:e7feacc7ec9e
916 changeset: 2:e7feacc7ec9e
917 tag: tip
917 tag: tip
918 user: test
918 user: test
919 date: Thu Jan 01 00:00:00 1970 +0000
919 date: Thu Jan 01 00:00:00 1970 +0000
920 summary: Removed tag a
920 summary: Removed tag a
921
921
922 changeset: 1:bd7ee4f3939b
922 changeset: 1:bd7ee4f3939b
923 user: test
923 user: test
924 date: Thu Jan 01 00:00:00 1970 +0000
924 date: Thu Jan 01 00:00:00 1970 +0000
925 summary: Added tag a for changeset a8a82d372bb3
925 summary: Added tag a for changeset a8a82d372bb3
926
926
927 changeset: 0:a8a82d372bb3
927 changeset: 0:a8a82d372bb3
928 user: test
928 user: test
929 date: Thu Jan 01 00:00:00 1970 +0000
929 date: Thu Jan 01 00:00:00 1970 +0000
930 summary: add a
930 summary: add a
931
931
932 $ cat .hgtags
932 $ cat .hgtags
933 a8a82d372bb35b42ff736e74f07c23bcd99c371f a
933 a8a82d372bb35b42ff736e74f07c23bcd99c371f a
934 a8a82d372bb35b42ff736e74f07c23bcd99c371f a
934 a8a82d372bb35b42ff736e74f07c23bcd99c371f a
935 0000000000000000000000000000000000000000 a
935 0000000000000000000000000000000000000000 a
@@ -1,903 +1,903 b''
1 $ cat << EOF >> $HGRCPATH
1 $ cat << EOF >> $HGRCPATH
2 > [ui]
2 > [ui]
3 > ssh="$PYTHON" "$TESTDIR/dummyssh"
3 > ssh="$PYTHON" "$TESTDIR/dummyssh"
4 > EOF
4 > EOF
5
5
6 Set up repo
6 Set up repo
7
7
8 $ hg --config experimental.treemanifest=True init repo
8 $ hg --config experimental.treemanifest=True init repo
9 $ cd repo
9 $ cd repo
10
10
11 Requirements get set on init
11 Requirements get set on init
12
12
13 $ grep treemanifest .hg/requires
13 $ grep treemanifest .hg/requires
14 treemanifest
14 treemanifest
15
15
16 Without directories, looks like any other repo
16 Without directories, looks like any other repo
17
17
18 $ echo 0 > a
18 $ echo 0 > a
19 $ echo 0 > b
19 $ echo 0 > b
20 $ hg ci -Aqm initial
20 $ hg ci -Aqm initial
21 $ hg debugdata -m 0
21 $ hg debugdata -m 0
22 a\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
22 a\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
23 b\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
23 b\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
24
24
25 Submanifest is stored in separate revlog
25 Submanifest is stored in separate revlog
26
26
27 $ mkdir dir1
27 $ mkdir dir1
28 $ echo 1 > dir1/a
28 $ echo 1 > dir1/a
29 $ echo 1 > dir1/b
29 $ echo 1 > dir1/b
30 $ echo 1 > e
30 $ echo 1 > e
31 $ hg ci -Aqm 'add dir1'
31 $ hg ci -Aqm 'add dir1'
32 $ hg debugdata -m 1
32 $ hg debugdata -m 1
33 a\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
33 a\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
34 b\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
34 b\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
35 dir1\x008b3ffd73f901e83304c83d33132c8e774ceac44et (esc)
35 dir1\x008b3ffd73f901e83304c83d33132c8e774ceac44et (esc)
36 e\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
36 e\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
37 $ hg debugdata --dir dir1 0
37 $ hg debugdata --dir dir1 0
38 a\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
38 a\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
39 b\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
39 b\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
40
40
41 Can add nested directories
41 Can add nested directories
42
42
43 $ mkdir dir1/dir1
43 $ mkdir dir1/dir1
44 $ echo 2 > dir1/dir1/a
44 $ echo 2 > dir1/dir1/a
45 $ echo 2 > dir1/dir1/b
45 $ echo 2 > dir1/dir1/b
46 $ mkdir dir1/dir2
46 $ mkdir dir1/dir2
47 $ echo 2 > dir1/dir2/a
47 $ echo 2 > dir1/dir2/a
48 $ echo 2 > dir1/dir2/b
48 $ echo 2 > dir1/dir2/b
49 $ hg ci -Aqm 'add dir1/dir1'
49 $ hg ci -Aqm 'add dir1/dir1'
50 $ hg files -r .
50 $ hg files -r .
51 a
51 a
52 b
52 b
53 dir1/a
53 dir1/a
54 dir1/b
54 dir1/b
55 dir1/dir1/a
55 dir1/dir1/a
56 dir1/dir1/b
56 dir1/dir1/b
57 dir1/dir2/a
57 dir1/dir2/a
58 dir1/dir2/b
58 dir1/dir2/b
59 e
59 e
60
60
61 The manifest command works
61 The manifest command works
62
62
63 $ hg manifest
63 $ hg manifest
64 a
64 a
65 b
65 b
66 dir1/a
66 dir1/a
67 dir1/b
67 dir1/b
68 dir1/dir1/a
68 dir1/dir1/a
69 dir1/dir1/b
69 dir1/dir1/b
70 dir1/dir2/a
70 dir1/dir2/a
71 dir1/dir2/b
71 dir1/dir2/b
72 e
72 e
73
73
74 Revision is not created for unchanged directory
74 Revision is not created for unchanged directory
75
75
76 $ mkdir dir2
76 $ mkdir dir2
77 $ echo 3 > dir2/a
77 $ echo 3 > dir2/a
78 $ hg add dir2
78 $ hg add dir2
79 adding dir2/a
79 adding dir2/a
80 $ hg debugindex --dir dir1 > before
80 $ hg debugindex --dir dir1 > before
81 $ hg ci -qm 'add dir2'
81 $ hg ci -qm 'add dir2'
82 $ hg debugindex --dir dir1 > after
82 $ hg debugindex --dir dir1 > after
83 $ diff before after
83 $ diff before after
84 $ rm before after
84 $ rm before after
85
85
86 Removing directory does not create an revlog entry
86 Removing directory does not create an revlog entry
87
87
88 $ hg rm dir1/dir1
88 $ hg rm dir1/dir1
89 removing dir1/dir1/a
89 removing dir1/dir1/a
90 removing dir1/dir1/b
90 removing dir1/dir1/b
91 $ hg debugindex --dir dir1/dir1 > before
91 $ hg debugindex --dir dir1/dir1 > before
92 $ hg ci -qm 'remove dir1/dir1'
92 $ hg ci -qm 'remove dir1/dir1'
93 $ hg debugindex --dir dir1/dir1 > after
93 $ hg debugindex --dir dir1/dir1 > after
94 $ diff before after
94 $ diff before after
95 $ rm before after
95 $ rm before after
96
96
97 Check that hg files (calls treemanifest.walk()) works
97 Check that hg files (calls treemanifest.walk()) works
98 without loading all directory revlogs
98 without loading all directory revlogs
99
99
100 $ hg co 'desc("add dir2")'
100 $ hg co 'desc("add dir2")'
101 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
101 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
102 $ mv .hg/store/meta/dir2 .hg/store/meta/dir2-backup
102 $ mv .hg/store/meta/dir2 .hg/store/meta/dir2-backup
103 $ hg files -r . dir1
103 $ hg files -r . dir1
104 dir1/a
104 dir1/a
105 dir1/b
105 dir1/b
106 dir1/dir1/a
106 dir1/dir1/a
107 dir1/dir1/b
107 dir1/dir1/b
108 dir1/dir2/a
108 dir1/dir2/a
109 dir1/dir2/b
109 dir1/dir2/b
110
110
111 Check that status between revisions works (calls treemanifest.matches())
111 Check that status between revisions works (calls treemanifest.matches())
112 without loading all directory revlogs
112 without loading all directory revlogs
113
113
114 $ hg status --rev 'desc("add dir1")' --rev . dir1
114 $ hg status --rev 'desc("add dir1")' --rev . dir1
115 A dir1/dir1/a
115 A dir1/dir1/a
116 A dir1/dir1/b
116 A dir1/dir1/b
117 A dir1/dir2/a
117 A dir1/dir2/a
118 A dir1/dir2/b
118 A dir1/dir2/b
119 $ mv .hg/store/meta/dir2-backup .hg/store/meta/dir2
119 $ mv .hg/store/meta/dir2-backup .hg/store/meta/dir2
120
120
121 Merge creates 2-parent revision of directory revlog
121 Merge creates 2-parent revision of directory revlog
122
122
123 $ echo 5 > dir1/a
123 $ echo 5 > dir1/a
124 $ hg ci -Aqm 'modify dir1/a'
124 $ hg ci -Aqm 'modify dir1/a'
125 $ hg co '.^'
125 $ hg co '.^'
126 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
126 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
127 $ echo 6 > dir1/b
127 $ echo 6 > dir1/b
128 $ hg ci -Aqm 'modify dir1/b'
128 $ hg ci -Aqm 'modify dir1/b'
129 $ hg merge 'desc("modify dir1/a")'
129 $ hg merge 'desc("modify dir1/a")'
130 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
130 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
131 (branch merge, don't forget to commit)
131 (branch merge, don't forget to commit)
132 $ hg ci -m 'conflict-free merge involving dir1/'
132 $ hg ci -m 'conflict-free merge involving dir1/'
133 $ cat dir1/a
133 $ cat dir1/a
134 5
134 5
135 $ cat dir1/b
135 $ cat dir1/b
136 6
136 6
137 $ hg debugindex --dir dir1
137 $ hg debugindex --dir dir1
138 rev linkrev nodeid p1 p2
138 rev linkrev nodeid p1 p2
139 0 1 8b3ffd73f901 000000000000 000000000000
139 0 1 8b3ffd73f901 000000000000 000000000000
140 1 2 68e9d057c5a8 8b3ffd73f901 000000000000
140 1 2 68e9d057c5a8 8b3ffd73f901 000000000000
141 2 4 4698198d2624 68e9d057c5a8 000000000000
141 2 4 4698198d2624 68e9d057c5a8 000000000000
142 3 5 44844058ccce 68e9d057c5a8 000000000000
142 3 5 44844058ccce 68e9d057c5a8 000000000000
143 4 6 bf3d9b744927 68e9d057c5a8 000000000000
143 4 6 bf3d9b744927 68e9d057c5a8 000000000000
144 5 7 dde7c0af2a03 bf3d9b744927 44844058ccce
144 5 7 dde7c0af2a03 bf3d9b744927 44844058ccce
145
145
146 Merge keeping directory from parent 1 does not create revlog entry. (Note that
146 Merge keeping directory from parent 1 does not create revlog entry. (Note that
147 dir1's manifest does change, but only because dir1/a's filelog changes.)
147 dir1's manifest does change, but only because dir1/a's filelog changes.)
148
148
149 $ hg co 'desc("add dir2")'
149 $ hg co 'desc("add dir2")'
150 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
150 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
151 $ echo 8 > dir2/a
151 $ echo 8 > dir2/a
152 $ hg ci -m 'modify dir2/a'
152 $ hg ci -m 'modify dir2/a'
153 created new head
153 created new head
154
154
155 $ hg debugindex --dir dir2 > before
155 $ hg debugindex --dir dir2 > before
156 $ hg merge 'desc("modify dir1/a")'
156 $ hg merge 'desc("modify dir1/a")'
157 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
157 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
158 (branch merge, don't forget to commit)
158 (branch merge, don't forget to commit)
159 $ hg revert -r 'desc("modify dir2/a")' .
159 $ hg revert -r 'desc("modify dir2/a")' .
160 reverting dir1/a
160 reverting dir1/a
161 $ hg ci -m 'merge, keeping parent 1'
161 $ hg ci -m 'merge, keeping parent 1'
162 $ hg debugindex --dir dir2 > after
162 $ hg debugindex --dir dir2 > after
163 $ diff before after
163 $ diff before after
164 $ rm before after
164 $ rm before after
165
165
166 Merge keeping directory from parent 2 does not create revlog entry. (Note that
166 Merge keeping directory from parent 2 does not create revlog entry. (Note that
167 dir2's manifest does change, but only because dir2/a's filelog changes.)
167 dir2's manifest does change, but only because dir2/a's filelog changes.)
168
168
169 $ hg co 'desc("modify dir2/a")'
169 $ hg co 'desc("modify dir2/a")'
170 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
170 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
171 $ hg debugindex --dir dir1 > before
171 $ hg debugindex --dir dir1 > before
172 $ hg merge 'desc("modify dir1/a")'
172 $ hg merge 'desc("modify dir1/a")'
173 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
173 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
174 (branch merge, don't forget to commit)
174 (branch merge, don't forget to commit)
175 $ hg revert -r 'desc("modify dir1/a")' .
175 $ hg revert -r 'desc("modify dir1/a")' .
176 reverting dir2/a
176 reverting dir2/a
177 $ hg ci -m 'merge, keeping parent 2'
177 $ hg ci -m 'merge, keeping parent 2'
178 created new head
178 created new head
179 $ hg debugindex --dir dir1 > after
179 $ hg debugindex --dir dir1 > after
180 $ diff before after
180 $ diff before after
181 $ rm before after
181 $ rm before after
182
182
183 Create flat source repo for tests with mixed flat/tree manifests
183 Create flat source repo for tests with mixed flat/tree manifests
184
184
185 $ cd ..
185 $ cd ..
186 $ hg init repo-flat
186 $ hg init repo-flat
187 $ cd repo-flat
187 $ cd repo-flat
188
188
189 Create a few commits with flat manifest
189 Create a few commits with flat manifest
190
190
191 $ echo 0 > a
191 $ echo 0 > a
192 $ echo 0 > b
192 $ echo 0 > b
193 $ echo 0 > e
193 $ echo 0 > e
194 $ for d in dir1 dir1/dir1 dir1/dir2 dir2
194 $ for d in dir1 dir1/dir1 dir1/dir2 dir2
195 > do
195 > do
196 > mkdir $d
196 > mkdir $d
197 > echo 0 > $d/a
197 > echo 0 > $d/a
198 > echo 0 > $d/b
198 > echo 0 > $d/b
199 > done
199 > done
200 $ hg ci -Aqm initial
200 $ hg ci -Aqm initial
201
201
202 $ echo 1 > a
202 $ echo 1 > a
203 $ echo 1 > dir1/a
203 $ echo 1 > dir1/a
204 $ echo 1 > dir1/dir1/a
204 $ echo 1 > dir1/dir1/a
205 $ hg ci -Aqm 'modify on branch 1'
205 $ hg ci -Aqm 'modify on branch 1'
206
206
207 $ hg co 0
207 $ hg co 0
208 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
208 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
209 $ echo 2 > b
209 $ echo 2 > b
210 $ echo 2 > dir1/b
210 $ echo 2 > dir1/b
211 $ echo 2 > dir1/dir1/b
211 $ echo 2 > dir1/dir1/b
212 $ hg ci -Aqm 'modify on branch 2'
212 $ hg ci -Aqm 'modify on branch 2'
213
213
214 $ hg merge 1
214 $ hg merge 1
215 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
215 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
216 (branch merge, don't forget to commit)
216 (branch merge, don't forget to commit)
217 $ hg ci -m 'merge of flat manifests to new flat manifest'
217 $ hg ci -m 'merge of flat manifests to new flat manifest'
218
218
219 $ hg serve -p $HGPORT -d --pid-file=hg.pid --errorlog=errors.log
219 $ hg serve -p $HGPORT -d --pid-file=hg.pid --errorlog=errors.log
220 $ cat hg.pid >> $DAEMON_PIDS
220 $ cat hg.pid >> $DAEMON_PIDS
221
221
222 Create clone with tree manifests enabled
222 Create clone with tree manifests enabled
223
223
224 $ cd ..
224 $ cd ..
225 $ hg clone --config experimental.treemanifest=1 \
225 $ hg clone --config experimental.treemanifest=1 \
226 > http://localhost:$HGPORT repo-mixed -r 1
226 > http://localhost:$HGPORT repo-mixed -r 1
227 adding changesets
227 adding changesets
228 adding manifests
228 adding manifests
229 adding file changes
229 adding file changes
230 added 2 changesets with 14 changes to 11 files
230 added 2 changesets with 14 changes to 11 files
231 new changesets 5b02a3e8db7e:581ef6037d8b
231 new changesets 5b02a3e8db7e:581ef6037d8b
232 updating to branch default
232 updating to branch default
233 11 files updated, 0 files merged, 0 files removed, 0 files unresolved
233 11 files updated, 0 files merged, 0 files removed, 0 files unresolved
234 $ cd repo-mixed
234 $ cd repo-mixed
235 $ test -d .hg/store/meta
235 $ test -d .hg/store/meta
236 [1]
236 [1]
237 $ grep treemanifest .hg/requires
237 $ grep treemanifest .hg/requires
238 treemanifest
238 treemanifest
239
239
240 Should be possible to push updates from flat to tree manifest repo
240 Should be possible to push updates from flat to tree manifest repo
241
241
242 $ hg -R ../repo-flat push ssh://user@dummy/repo-mixed
242 $ hg -R ../repo-flat push ssh://user@dummy/repo-mixed
243 pushing to ssh://user@dummy/repo-mixed
243 pushing to ssh://user@dummy/repo-mixed
244 searching for changes
244 searching for changes
245 remote: adding changesets
245 remote: adding changesets
246 remote: adding manifests
246 remote: adding manifests
247 remote: adding file changes
247 remote: adding file changes
248 remote: added 2 changesets with 3 changes to 3 files
248 remote: added 2 changesets with 3 changes to 3 files
249
249
250 Commit should store revlog per directory
250 Commit should store revlog per directory
251
251
252 $ hg co 1
252 $ hg co 1
253 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
253 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
254 $ echo 3 > a
254 $ echo 3 > a
255 $ echo 3 > dir1/a
255 $ echo 3 > dir1/a
256 $ echo 3 > dir1/dir1/a
256 $ echo 3 > dir1/dir1/a
257 $ hg ci -m 'first tree'
257 $ hg ci -m 'first tree'
258 created new head
258 created new head
259 $ find .hg/store/meta | sort
259 $ find .hg/store/meta | sort
260 .hg/store/meta
260 .hg/store/meta
261 .hg/store/meta/dir1
261 .hg/store/meta/dir1
262 .hg/store/meta/dir1/00manifest.i
262 .hg/store/meta/dir1/00manifest.i
263 .hg/store/meta/dir1/dir1
263 .hg/store/meta/dir1/dir1
264 .hg/store/meta/dir1/dir1/00manifest.i
264 .hg/store/meta/dir1/dir1/00manifest.i
265 .hg/store/meta/dir1/dir2
265 .hg/store/meta/dir1/dir2
266 .hg/store/meta/dir1/dir2/00manifest.i
266 .hg/store/meta/dir1/dir2/00manifest.i
267 .hg/store/meta/dir2
267 .hg/store/meta/dir2
268 .hg/store/meta/dir2/00manifest.i
268 .hg/store/meta/dir2/00manifest.i
269
269
270 Merge of two trees
270 Merge of two trees
271
271
272 $ hg co 2
272 $ hg co 2
273 6 files updated, 0 files merged, 0 files removed, 0 files unresolved
273 6 files updated, 0 files merged, 0 files removed, 0 files unresolved
274 $ hg merge 1
274 $ hg merge 1
275 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
275 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
276 (branch merge, don't forget to commit)
276 (branch merge, don't forget to commit)
277 $ hg ci -m 'merge of flat manifests to new tree manifest'
277 $ hg ci -m 'merge of flat manifests to new tree manifest'
278 created new head
278 created new head
279 $ hg diff -r 3
279 $ hg diff -r 3
280
280
281 Parent of tree root manifest should be flat manifest, and two for merge
281 Parent of tree root manifest should be flat manifest, and two for merge
282
282
283 $ hg debugindex -m
283 $ hg debugindex -m
284 rev linkrev nodeid p1 p2
284 rev linkrev nodeid p1 p2
285 0 0 40536115ed9e 000000000000 000000000000
285 0 0 40536115ed9e 000000000000 000000000000
286 1 1 f3376063c255 40536115ed9e 000000000000
286 1 1 f3376063c255 40536115ed9e 000000000000
287 2 2 5d9b9da231a2 40536115ed9e 000000000000
287 2 2 5d9b9da231a2 40536115ed9e 000000000000
288 3 3 d17d663cbd8a 5d9b9da231a2 f3376063c255
288 3 3 d17d663cbd8a 5d9b9da231a2 f3376063c255
289 4 4 51e32a8c60ee f3376063c255 000000000000
289 4 4 51e32a8c60ee f3376063c255 000000000000
290 5 5 cc5baa78b230 5d9b9da231a2 f3376063c255
290 5 5 cc5baa78b230 5d9b9da231a2 f3376063c255
291
291
292
292
293 Status across flat/tree boundary should work
293 Status across flat/tree boundary should work
294
294
295 $ hg status --rev '.^' --rev .
295 $ hg status --rev '.^' --rev .
296 M a
296 M a
297 M dir1/a
297 M dir1/a
298 M dir1/dir1/a
298 M dir1/dir1/a
299
299
300
300
301 Turning off treemanifest config has no effect
301 Turning off treemanifest config has no effect
302
302
303 $ hg debugindex --dir dir1
303 $ hg debugindex --dir dir1
304 rev linkrev nodeid p1 p2
304 rev linkrev nodeid p1 p2
305 0 4 064927a0648a 000000000000 000000000000
305 0 4 064927a0648a 000000000000 000000000000
306 1 5 25ecb8cb8618 000000000000 000000000000
306 1 5 25ecb8cb8618 000000000000 000000000000
307 $ echo 2 > dir1/a
307 $ echo 2 > dir1/a
308 $ hg --config experimental.treemanifest=False ci -qm 'modify dir1/a'
308 $ hg --config experimental.treemanifest=False ci -qm 'modify dir1/a'
309 $ hg debugindex --dir dir1
309 $ hg debugindex --dir dir1
310 rev linkrev nodeid p1 p2
310 rev linkrev nodeid p1 p2
311 0 4 064927a0648a 000000000000 000000000000
311 0 4 064927a0648a 000000000000 000000000000
312 1 5 25ecb8cb8618 000000000000 000000000000
312 1 5 25ecb8cb8618 000000000000 000000000000
313 2 6 5b16163a30c6 25ecb8cb8618 000000000000
313 2 6 5b16163a30c6 25ecb8cb8618 000000000000
314
314
315 Stripping and recovering changes should work
315 Stripping and recovering changes should work
316
316
317 $ hg st --change tip
317 $ hg st --change tip
318 M dir1/a
318 M dir1/a
319 $ hg --config extensions.strip= strip tip
319 $ hg --config extensions.strip= strip tip
320 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
320 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
321 saved backup bundle to $TESTTMP/repo-mixed/.hg/strip-backup/51cfd7b1e13b-78a2f3ed-backup.hg
321 saved backup bundle to $TESTTMP/repo-mixed/.hg/strip-backup/51cfd7b1e13b-78a2f3ed-backup.hg
322 $ hg debugindex --dir dir1
322 $ hg debugindex --dir dir1
323 rev linkrev nodeid p1 p2
323 rev linkrev nodeid p1 p2
324 0 4 064927a0648a 000000000000 000000000000
324 0 4 064927a0648a 000000000000 000000000000
325 1 5 25ecb8cb8618 000000000000 000000000000
325 1 5 25ecb8cb8618 000000000000 000000000000
326
326
327 #if repobundlerepo
327 #if repobundlerepo
328 $ hg incoming .hg/strip-backup/*
328 $ hg incoming .hg/strip-backup/*
329 comparing with .hg/strip-backup/*-backup.hg (glob)
329 comparing with .hg/strip-backup/*-backup.hg (glob)
330 searching for changes
330 searching for changes
331 changeset: 6:51cfd7b1e13b
331 changeset: 6:51cfd7b1e13b
332 tag: tip
332 tag: tip
333 user: test
333 user: test
334 date: Thu Jan 01 00:00:00 1970 +0000
334 date: Thu Jan 01 00:00:00 1970 +0000
335 summary: modify dir1/a
335 summary: modify dir1/a
336
336
337 #endif
337 #endif
338
338
339 $ hg unbundle .hg/strip-backup/*
339 $ hg unbundle .hg/strip-backup/*
340 adding changesets
340 adding changesets
341 adding manifests
341 adding manifests
342 adding file changes
342 adding file changes
343 added 1 changesets with 1 changes to 1 files
343 added 1 changesets with 1 changes to 1 files
344 new changesets 51cfd7b1e13b (1 drafts)
344 new changesets 51cfd7b1e13b (1 drafts)
345 (run 'hg update' to get a working copy)
345 (run 'hg update' to get a working copy)
346 $ hg --config extensions.strip= strip tip
346 $ hg --config extensions.strip= strip tip
347 saved backup bundle to $TESTTMP/repo-mixed/.hg/strip-backup/*-backup.hg (glob)
347 saved backup bundle to $TESTTMP/repo-mixed/.hg/strip-backup/*-backup.hg (glob)
348 $ hg unbundle -q .hg/strip-backup/*
348 $ hg unbundle -q .hg/strip-backup/*
349 $ hg debugindex --dir dir1
349 $ hg debugindex --dir dir1
350 rev linkrev nodeid p1 p2
350 rev linkrev nodeid p1 p2
351 0 4 064927a0648a 000000000000 000000000000
351 0 4 064927a0648a 000000000000 000000000000
352 1 5 25ecb8cb8618 000000000000 000000000000
352 1 5 25ecb8cb8618 000000000000 000000000000
353 2 6 5b16163a30c6 25ecb8cb8618 000000000000
353 2 6 5b16163a30c6 25ecb8cb8618 000000000000
354 $ hg st --change tip
354 $ hg st --change tip
355 M dir1/a
355 M dir1/a
356
356
357 Shelving and unshelving should work
357 Shelving and unshelving should work
358
358
359 $ echo foo >> dir1/a
359 $ echo foo >> dir1/a
360 $ hg shelve
360 $ hg shelve
361 shelved as default
361 shelved as default
362 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
362 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
363 $ hg unshelve
363 $ hg unshelve
364 unshelving change 'default'
364 unshelving change 'default'
365 $ hg diff --nodates
365 $ hg diff --nodates
366 diff -r 708a273da119 dir1/a
366 diff -r 708a273da119 dir1/a
367 --- a/dir1/a
367 --- a/dir1/a
368 +++ b/dir1/a
368 +++ b/dir1/a
369 @@ -1,1 +1,2 @@
369 @@ -1,1 +1,2 @@
370 1
370 1
371 +foo
371 +foo
372
372
373 Pushing from treemanifest repo to an empty repo makes that a treemanifest repo
373 Pushing from treemanifest repo to an empty repo makes that a treemanifest repo
374
374
375 $ cd ..
375 $ cd ..
376 $ hg init empty-repo
376 $ hg init empty-repo
377 $ cat << EOF >> empty-repo/.hg/hgrc
377 $ cat << EOF >> empty-repo/.hg/hgrc
378 > [experimental]
378 > [experimental]
379 > changegroup3=yes
379 > changegroup3=yes
380 > EOF
380 > EOF
381 $ grep treemanifest empty-repo/.hg/requires
381 $ grep treemanifest empty-repo/.hg/requires
382 [1]
382 [1]
383 $ hg push -R repo -r 0 empty-repo
383 $ hg push -R repo -r 0 empty-repo
384 pushing to empty-repo
384 pushing to empty-repo
385 searching for changes
385 searching for changes
386 adding changesets
386 adding changesets
387 adding manifests
387 adding manifests
388 adding file changes
388 adding file changes
389 added 1 changesets with 2 changes to 2 files
389 added 1 changesets with 2 changes to 2 files
390 $ grep treemanifest empty-repo/.hg/requires
390 $ grep treemanifest empty-repo/.hg/requires
391 treemanifest
391 treemanifest
392
392
393 Pushing to an empty repo works
393 Pushing to an empty repo works
394
394
395 $ hg --config experimental.treemanifest=1 init clone
395 $ hg --config experimental.treemanifest=1 init clone
396 $ grep treemanifest clone/.hg/requires
396 $ grep treemanifest clone/.hg/requires
397 treemanifest
397 treemanifest
398 $ hg push -R repo clone
398 $ hg push -R repo clone
399 pushing to clone
399 pushing to clone
400 searching for changes
400 searching for changes
401 adding changesets
401 adding changesets
402 adding manifests
402 adding manifests
403 adding file changes
403 adding file changes
404 added 11 changesets with 15 changes to 10 files (+3 heads)
404 added 11 changesets with 15 changes to 10 files (+3 heads)
405 $ grep treemanifest clone/.hg/requires
405 $ grep treemanifest clone/.hg/requires
406 treemanifest
406 treemanifest
407 $ hg -R clone verify
407 $ hg -R clone verify
408 checking changesets
408 checking changesets
409 checking manifests
409 checking manifests
410 checking directory manifests
410 checking directory manifests
411 crosschecking files in changesets and manifests
411 crosschecking files in changesets and manifests
412 checking files
412 checking files
413 checked 11 changesets with 15 changes to 10 files
413 checked 11 changesets with 15 changes to 10 files
414
414
415 Create deeper repo with tree manifests.
415 Create deeper repo with tree manifests.
416
416
417 $ hg --config experimental.treemanifest=True init deeprepo
417 $ hg --config experimental.treemanifest=True init deeprepo
418 $ cd deeprepo
418 $ cd deeprepo
419
419
420 $ mkdir .A
420 $ mkdir .A
421 $ mkdir b
421 $ mkdir b
422 $ mkdir b/bar
422 $ mkdir b/bar
423 $ mkdir b/bar/orange
423 $ mkdir b/bar/orange
424 $ mkdir b/bar/orange/fly
424 $ mkdir b/bar/orange/fly
425 $ mkdir b/foo
425 $ mkdir b/foo
426 $ mkdir b/foo/apple
426 $ mkdir b/foo/apple
427 $ mkdir b/foo/apple/bees
427 $ mkdir b/foo/apple/bees
428
428
429 $ touch .A/one.txt
429 $ touch .A/one.txt
430 $ touch .A/two.txt
430 $ touch .A/two.txt
431 $ touch b/bar/fruits.txt
431 $ touch b/bar/fruits.txt
432 $ touch b/bar/orange/fly/gnat.py
432 $ touch b/bar/orange/fly/gnat.py
433 $ touch b/bar/orange/fly/housefly.txt
433 $ touch b/bar/orange/fly/housefly.txt
434 $ touch b/foo/apple/bees/flower.py
434 $ touch b/foo/apple/bees/flower.py
435 $ touch c.txt
435 $ touch c.txt
436 $ touch d.py
436 $ touch d.py
437
437
438 $ hg ci -Aqm 'initial'
438 $ hg ci -Aqm 'initial'
439
439
440 $ echo >> .A/one.txt
440 $ echo >> .A/one.txt
441 $ echo >> .A/two.txt
441 $ echo >> .A/two.txt
442 $ echo >> b/bar/fruits.txt
442 $ echo >> b/bar/fruits.txt
443 $ echo >> b/bar/orange/fly/gnat.py
443 $ echo >> b/bar/orange/fly/gnat.py
444 $ echo >> b/bar/orange/fly/housefly.txt
444 $ echo >> b/bar/orange/fly/housefly.txt
445 $ echo >> b/foo/apple/bees/flower.py
445 $ echo >> b/foo/apple/bees/flower.py
446 $ echo >> c.txt
446 $ echo >> c.txt
447 $ echo >> d.py
447 $ echo >> d.py
448 $ hg ci -Aqm 'second'
448 $ hg ci -Aqm 'second'
449
449
450 We'll see that visitdir works by removing some treemanifest revlogs and running
450 We'll see that visitdir works by removing some treemanifest revlogs and running
451 the files command with various parameters.
451 the files command with various parameters.
452
452
453 Test files from the root.
453 Test files from the root.
454
454
455 $ hg files -r .
455 $ hg files -r .
456 .A/one.txt
456 .A/one.txt
457 .A/two.txt
457 .A/two.txt
458 b/bar/fruits.txt
458 b/bar/fruits.txt
459 b/bar/orange/fly/gnat.py
459 b/bar/orange/fly/gnat.py
460 b/bar/orange/fly/housefly.txt
460 b/bar/orange/fly/housefly.txt
461 b/foo/apple/bees/flower.py
461 b/foo/apple/bees/flower.py
462 c.txt
462 c.txt
463 d.py
463 d.py
464
464
465 Excludes with a glob should not exclude everything from the glob's root
465 Excludes with a glob should not exclude everything from the glob's root
466
466
467 $ hg files -r . -X 'b/fo?' b
467 $ hg files -r . -X 'b/fo?' b
468 b/bar/fruits.txt
468 b/bar/fruits.txt
469 b/bar/orange/fly/gnat.py
469 b/bar/orange/fly/gnat.py
470 b/bar/orange/fly/housefly.txt
470 b/bar/orange/fly/housefly.txt
471 $ cp -R .hg/store .hg/store-copy
471 $ cp -R .hg/store .hg/store-copy
472
472
473 Test files for a subdirectory.
473 Test files for a subdirectory.
474
474
475 #if reporevlogstore
475 #if reporevlogstore
476 $ rm -r .hg/store/meta/~2e_a
476 $ rm -r .hg/store/meta/~2e_a
477 #endif
477 #endif
478 #if reposimplestore
478 #if reposimplestore
479 $ rm -r .hg/store/meta/._a
479 $ rm -r .hg/store/meta/._a
480 #endif
480 #endif
481 $ hg files -r . b
481 $ hg files -r . b
482 b/bar/fruits.txt
482 b/bar/fruits.txt
483 b/bar/orange/fly/gnat.py
483 b/bar/orange/fly/gnat.py
484 b/bar/orange/fly/housefly.txt
484 b/bar/orange/fly/housefly.txt
485 b/foo/apple/bees/flower.py
485 b/foo/apple/bees/flower.py
486 $ hg diff -r '.^' -r . --stat b
486 $ hg diff -r '.^' -r . --stat b
487 b/bar/fruits.txt | 1 +
487 b/bar/fruits.txt | 1 +
488 b/bar/orange/fly/gnat.py | 1 +
488 b/bar/orange/fly/gnat.py | 1 +
489 b/bar/orange/fly/housefly.txt | 1 +
489 b/bar/orange/fly/housefly.txt | 1 +
490 b/foo/apple/bees/flower.py | 1 +
490 b/foo/apple/bees/flower.py | 1 +
491 4 files changed, 4 insertions(+), 0 deletions(-)
491 4 files changed, 4 insertions(+), 0 deletions(-)
492 $ cp -R .hg/store-copy/. .hg/store
492 $ cp -R .hg/store-copy/. .hg/store
493
493
494 Test files with just includes and excludes.
494 Test files with just includes and excludes.
495
495
496 #if reporevlogstore
496 #if reporevlogstore
497 $ rm -r .hg/store/meta/~2e_a
497 $ rm -r .hg/store/meta/~2e_a
498 #endif
498 #endif
499 #if reposimplestore
499 #if reposimplestore
500 $ rm -r .hg/store/meta/._a
500 $ rm -r .hg/store/meta/._a
501 #endif
501 #endif
502 $ rm -r .hg/store/meta/b/bar/orange/fly
502 $ rm -r .hg/store/meta/b/bar/orange/fly
503 $ rm -r .hg/store/meta/b/foo/apple/bees
503 $ rm -r .hg/store/meta/b/foo/apple/bees
504 $ hg files -r . -I path:b/bar -X path:b/bar/orange/fly -I path:b/foo -X path:b/foo/apple/bees
504 $ hg files -r . -I path:b/bar -X path:b/bar/orange/fly -I path:b/foo -X path:b/foo/apple/bees
505 b/bar/fruits.txt
505 b/bar/fruits.txt
506 $ hg diff -r '.^' -r . --stat -I path:b/bar -X path:b/bar/orange/fly -I path:b/foo -X path:b/foo/apple/bees
506 $ hg diff -r '.^' -r . --stat -I path:b/bar -X path:b/bar/orange/fly -I path:b/foo -X path:b/foo/apple/bees
507 b/bar/fruits.txt | 1 +
507 b/bar/fruits.txt | 1 +
508 1 files changed, 1 insertions(+), 0 deletions(-)
508 1 files changed, 1 insertions(+), 0 deletions(-)
509 $ cp -R .hg/store-copy/. .hg/store
509 $ cp -R .hg/store-copy/. .hg/store
510
510
511 Test files for a subdirectory, excluding a directory within it.
511 Test files for a subdirectory, excluding a directory within it.
512
512
513 #if reporevlogstore
513 #if reporevlogstore
514 $ rm -r .hg/store/meta/~2e_a
514 $ rm -r .hg/store/meta/~2e_a
515 #endif
515 #endif
516 #if reposimplestore
516 #if reposimplestore
517 $ rm -r .hg/store/meta/._a
517 $ rm -r .hg/store/meta/._a
518 #endif
518 #endif
519 $ rm -r .hg/store/meta/b/foo
519 $ rm -r .hg/store/meta/b/foo
520 $ hg files -r . -X path:b/foo b
520 $ hg files -r . -X path:b/foo b
521 b/bar/fruits.txt
521 b/bar/fruits.txt
522 b/bar/orange/fly/gnat.py
522 b/bar/orange/fly/gnat.py
523 b/bar/orange/fly/housefly.txt
523 b/bar/orange/fly/housefly.txt
524 $ hg diff -r '.^' -r . --stat -X path:b/foo b
524 $ hg diff -r '.^' -r . --stat -X path:b/foo b
525 b/bar/fruits.txt | 1 +
525 b/bar/fruits.txt | 1 +
526 b/bar/orange/fly/gnat.py | 1 +
526 b/bar/orange/fly/gnat.py | 1 +
527 b/bar/orange/fly/housefly.txt | 1 +
527 b/bar/orange/fly/housefly.txt | 1 +
528 3 files changed, 3 insertions(+), 0 deletions(-)
528 3 files changed, 3 insertions(+), 0 deletions(-)
529 $ cp -R .hg/store-copy/. .hg/store
529 $ cp -R .hg/store-copy/. .hg/store
530
530
531 Test files for a sub directory, including only a directory within it, and
531 Test files for a sub directory, including only a directory within it, and
532 including an unrelated directory.
532 including an unrelated directory.
533
533
534 #if reporevlogstore
534 #if reporevlogstore
535 $ rm -r .hg/store/meta/~2e_a
535 $ rm -r .hg/store/meta/~2e_a
536 #endif
536 #endif
537 #if reposimplestore
537 #if reposimplestore
538 $ rm -r .hg/store/meta/._a
538 $ rm -r .hg/store/meta/._a
539 #endif
539 #endif
540 $ rm -r .hg/store/meta/b/foo
540 $ rm -r .hg/store/meta/b/foo
541 $ hg files -r . -I path:b/bar/orange -I path:a b
541 $ hg files -r . -I path:b/bar/orange -I path:a b
542 b/bar/orange/fly/gnat.py
542 b/bar/orange/fly/gnat.py
543 b/bar/orange/fly/housefly.txt
543 b/bar/orange/fly/housefly.txt
544 $ hg diff -r '.^' -r . --stat -I path:b/bar/orange -I path:a b
544 $ hg diff -r '.^' -r . --stat -I path:b/bar/orange -I path:a b
545 b/bar/orange/fly/gnat.py | 1 +
545 b/bar/orange/fly/gnat.py | 1 +
546 b/bar/orange/fly/housefly.txt | 1 +
546 b/bar/orange/fly/housefly.txt | 1 +
547 2 files changed, 2 insertions(+), 0 deletions(-)
547 2 files changed, 2 insertions(+), 0 deletions(-)
548 $ cp -R .hg/store-copy/. .hg/store
548 $ cp -R .hg/store-copy/. .hg/store
549
549
550 Test files for a pattern, including a directory, and excluding a directory
550 Test files for a pattern, including a directory, and excluding a directory
551 within that.
551 within that.
552
552
553 #if reporevlogstore
553 #if reporevlogstore
554 $ rm -r .hg/store/meta/~2e_a
554 $ rm -r .hg/store/meta/~2e_a
555 #endif
555 #endif
556 #if reposimplestore
556 #if reposimplestore
557 $ rm -r .hg/store/meta/._a
557 $ rm -r .hg/store/meta/._a
558 #endif
558 #endif
559 $ rm -r .hg/store/meta/b/foo
559 $ rm -r .hg/store/meta/b/foo
560 $ rm -r .hg/store/meta/b/bar/orange
560 $ rm -r .hg/store/meta/b/bar/orange
561 $ hg files -r . glob:**.txt -I path:b/bar -X path:b/bar/orange
561 $ hg files -r . glob:**.txt -I path:b/bar -X path:b/bar/orange
562 b/bar/fruits.txt
562 b/bar/fruits.txt
563 $ hg diff -r '.^' -r . --stat glob:**.txt -I path:b/bar -X path:b/bar/orange
563 $ hg diff -r '.^' -r . --stat glob:**.txt -I path:b/bar -X path:b/bar/orange
564 b/bar/fruits.txt | 1 +
564 b/bar/fruits.txt | 1 +
565 1 files changed, 1 insertions(+), 0 deletions(-)
565 1 files changed, 1 insertions(+), 0 deletions(-)
566 $ cp -R .hg/store-copy/. .hg/store
566 $ cp -R .hg/store-copy/. .hg/store
567
567
568 Add some more changes to the deep repo
568 Add some more changes to the deep repo
569 $ echo narf >> b/bar/fruits.txt
569 $ echo narf >> b/bar/fruits.txt
570 $ hg ci -m narf
570 $ hg ci -m narf
571 $ echo troz >> b/bar/orange/fly/gnat.py
571 $ echo troz >> b/bar/orange/fly/gnat.py
572 $ hg ci -m troz
572 $ hg ci -m troz
573
573
574 Verify works
574 Verify works
575 $ hg verify
575 $ hg verify
576 checking changesets
576 checking changesets
577 checking manifests
577 checking manifests
578 checking directory manifests
578 checking directory manifests
579 crosschecking files in changesets and manifests
579 crosschecking files in changesets and manifests
580 checking files
580 checking files
581 checked 4 changesets with 18 changes to 8 files
581 checked 4 changesets with 18 changes to 8 files
582
582
583 #if repofncache
583 #if repofncache
584 Dirlogs are included in fncache
584 Dirlogs are included in fncache
585 $ grep meta/.A/00manifest.i .hg/store/fncache
585 $ grep meta/.A/00manifest.i .hg/store/fncache
586 meta/.A/00manifest.i
586 meta/.A/00manifest.i
587
587
588 Rebuilt fncache includes dirlogs
588 Rebuilt fncache includes dirlogs
589 $ rm .hg/store/fncache
589 $ rm .hg/store/fncache
590 $ hg debugrebuildfncache
590 $ hg debugrebuildfncache
591 adding data/.A/one.txt.i
591 adding data/.A/one.txt.i
592 adding data/.A/two.txt.i
592 adding data/.A/two.txt.i
593 adding data/b/bar/fruits.txt.i
593 adding data/b/bar/fruits.txt.i
594 adding data/b/bar/orange/fly/gnat.py.i
594 adding data/b/bar/orange/fly/gnat.py.i
595 adding data/b/bar/orange/fly/housefly.txt.i
595 adding data/b/bar/orange/fly/housefly.txt.i
596 adding data/b/foo/apple/bees/flower.py.i
596 adding data/b/foo/apple/bees/flower.py.i
597 adding data/c.txt.i
597 adding data/c.txt.i
598 adding data/d.py.i
598 adding data/d.py.i
599 adding meta/.A/00manifest.i
599 adding meta/.A/00manifest.i
600 adding meta/b/00manifest.i
600 adding meta/b/00manifest.i
601 adding meta/b/bar/00manifest.i
601 adding meta/b/bar/00manifest.i
602 adding meta/b/bar/orange/00manifest.i
602 adding meta/b/bar/orange/00manifest.i
603 adding meta/b/bar/orange/fly/00manifest.i
603 adding meta/b/bar/orange/fly/00manifest.i
604 adding meta/b/foo/00manifest.i
604 adding meta/b/foo/00manifest.i
605 adding meta/b/foo/apple/00manifest.i
605 adding meta/b/foo/apple/00manifest.i
606 adding meta/b/foo/apple/bees/00manifest.i
606 adding meta/b/foo/apple/bees/00manifest.i
607 16 items added, 0 removed from fncache
607 16 items added, 0 removed from fncache
608 #endif
608 #endif
609
609
610 Finish first server
610 Finish first server
611 $ killdaemons.py
611 $ killdaemons.py
612
612
613 Back up the recently added revlogs
613 Back up the recently added revlogs
614 $ cp -R .hg/store .hg/store-newcopy
614 $ cp -R .hg/store .hg/store-newcopy
615
615
616 Verify reports missing dirlog
616 Verify reports missing dirlog
617 $ rm .hg/store/meta/b/00manifest.*
617 $ rm .hg/store/meta/b/00manifest.*
618 $ hg verify
618 $ hg verify
619 checking changesets
619 checking changesets
620 checking manifests
620 checking manifests
621 checking directory manifests
621 checking directory manifests
622 0: empty or missing b/
622 0: empty or missing b/
623 b/@0: parent-directory manifest refers to unknown revision 67688a370455
623 b/@0: parent-directory manifest refers to unknown revision 67688a370455
624 b/@1: parent-directory manifest refers to unknown revision f065da70369e
624 b/@1: parent-directory manifest refers to unknown revision f065da70369e
625 b/@2: parent-directory manifest refers to unknown revision ac0d30948e0b
625 b/@2: parent-directory manifest refers to unknown revision ac0d30948e0b
626 b/@3: parent-directory manifest refers to unknown revision 367152e6af28
626 b/@3: parent-directory manifest refers to unknown revision 367152e6af28
627 warning: orphan data file 'meta/b/bar/00manifest.i' (reporevlogstore !)
627 warning: orphan data file 'meta/b/bar/00manifest.i' (reporevlogstore !)
628 warning: orphan data file 'meta/b/bar/orange/00manifest.i' (reporevlogstore !)
628 warning: orphan data file 'meta/b/bar/orange/00manifest.i' (reporevlogstore !)
629 warning: orphan data file 'meta/b/bar/orange/fly/00manifest.i' (reporevlogstore !)
629 warning: orphan data file 'meta/b/bar/orange/fly/00manifest.i' (reporevlogstore !)
630 warning: orphan data file 'meta/b/foo/00manifest.i' (reporevlogstore !)
630 warning: orphan data file 'meta/b/foo/00manifest.i' (reporevlogstore !)
631 warning: orphan data file 'meta/b/foo/apple/00manifest.i' (reporevlogstore !)
631 warning: orphan data file 'meta/b/foo/apple/00manifest.i' (reporevlogstore !)
632 warning: orphan data file 'meta/b/foo/apple/bees/00manifest.i' (reporevlogstore !)
632 warning: orphan data file 'meta/b/foo/apple/bees/00manifest.i' (reporevlogstore !)
633 crosschecking files in changesets and manifests
633 crosschecking files in changesets and manifests
634 b/bar/fruits.txt@0: in changeset but not in manifest
634 b/bar/fruits.txt@0: in changeset but not in manifest
635 b/bar/orange/fly/gnat.py@0: in changeset but not in manifest
635 b/bar/orange/fly/gnat.py@0: in changeset but not in manifest
636 b/bar/orange/fly/housefly.txt@0: in changeset but not in manifest
636 b/bar/orange/fly/housefly.txt@0: in changeset but not in manifest
637 b/foo/apple/bees/flower.py@0: in changeset but not in manifest
637 b/foo/apple/bees/flower.py@0: in changeset but not in manifest
638 checking files
638 checking files
639 checked 4 changesets with 18 changes to 8 files
639 checked 4 changesets with 18 changes to 8 files
640 6 warnings encountered! (reporevlogstore !)
640 6 warnings encountered! (reporevlogstore !)
641 9 integrity errors encountered!
641 9 integrity errors encountered!
642 (first damaged changeset appears to be 0)
642 (first damaged changeset appears to be 0)
643 [1]
643 [1]
644 $ cp -R .hg/store-newcopy/. .hg/store
644 $ cp -R .hg/store-newcopy/. .hg/store
645
645
646 Verify reports missing dirlog entry
646 Verify reports missing dirlog entry
647 $ mv -f .hg/store-copy/meta/b/00manifest.* .hg/store/meta/b/
647 $ mv -f .hg/store-copy/meta/b/00manifest.* .hg/store/meta/b/
648 $ hg verify
648 $ hg verify
649 checking changesets
649 checking changesets
650 checking manifests
650 checking manifests
651 checking directory manifests
651 checking directory manifests
652 b/@2: parent-directory manifest refers to unknown revision ac0d30948e0b
652 b/@2: parent-directory manifest refers to unknown revision ac0d30948e0b
653 b/@3: parent-directory manifest refers to unknown revision 367152e6af28
653 b/@3: parent-directory manifest refers to unknown revision 367152e6af28
654 b/bar/@?: rev 2 points to unexpected changeset 2
654 b/bar/@?: rev 2 points to unexpected changeset 2
655 b/bar/@?: 44d7e1146e0d not in parent-directory manifest
655 b/bar/@?: 44d7e1146e0d not in parent-directory manifest
656 b/bar/@?: rev 3 points to unexpected changeset 3
656 b/bar/@?: rev 3 points to unexpected changeset 3
657 b/bar/@?: 70b10c6b17b7 not in parent-directory manifest
657 b/bar/@?: 70b10c6b17b7 not in parent-directory manifest
658 b/bar/orange/@?: rev 2 points to unexpected changeset 3
658 b/bar/orange/@?: rev 2 points to unexpected changeset 3
659 (expected None)
659 (expected None)
660 b/bar/orange/fly/@?: rev 2 points to unexpected changeset 3
660 b/bar/orange/fly/@?: rev 2 points to unexpected changeset 3
661 (expected None)
661 (expected None)
662 crosschecking files in changesets and manifests
662 crosschecking files in changesets and manifests
663 checking files
663 checking files
664 checked 4 changesets with 18 changes to 8 files
664 checked 4 changesets with 18 changes to 8 files
665 2 warnings encountered!
665 2 warnings encountered!
666 8 integrity errors encountered!
666 8 integrity errors encountered!
667 (first damaged changeset appears to be 2)
667 (first damaged changeset appears to be 2)
668 [1]
668 [1]
669 $ cp -R .hg/store-newcopy/. .hg/store
669 $ cp -R .hg/store-newcopy/. .hg/store
670
670
671 Test cloning a treemanifest repo over http.
671 Test cloning a treemanifest repo over http.
672 $ hg serve -p $HGPORT -d --pid-file=hg.pid --errorlog=errors.log
672 $ hg serve -p $HGPORT -d --pid-file=hg.pid --errorlog=errors.log
673 $ cat hg.pid >> $DAEMON_PIDS
673 $ cat hg.pid >> $DAEMON_PIDS
674 $ cd ..
674 $ cd ..
675 We can clone even with the knob turned off and we'll get a treemanifest repo.
675 We can clone even with the knob turned off and we'll get a treemanifest repo.
676 $ hg clone --config experimental.treemanifest=False \
676 $ hg clone --config experimental.treemanifest=False \
677 > --config experimental.changegroup3=True \
677 > --config experimental.changegroup3=True \
678 > http://localhost:$HGPORT deepclone
678 > http://localhost:$HGPORT deepclone
679 requesting all changes
679 requesting all changes
680 adding changesets
680 adding changesets
681 adding manifests
681 adding manifests
682 adding file changes
682 adding file changes
683 added 4 changesets with 18 changes to 8 files
683 added 4 changesets with 18 changes to 8 files
684 new changesets 775704be6f52:523e5c631710
684 new changesets 775704be6f52:523e5c631710
685 updating to branch default
685 updating to branch default
686 8 files updated, 0 files merged, 0 files removed, 0 files unresolved
686 8 files updated, 0 files merged, 0 files removed, 0 files unresolved
687 No server errors.
687 No server errors.
688 $ cat deeprepo/errors.log
688 $ cat deeprepo/errors.log
689 requires got updated to include treemanifest
689 requires got updated to include treemanifest
690 $ cat deepclone/.hg/requires | grep treemanifest
690 $ cat deepclone/.hg/requires | grep treemanifest
691 treemanifest
691 treemanifest
692 Tree manifest revlogs exist.
692 Tree manifest revlogs exist.
693 $ find deepclone/.hg/store/meta | sort
693 $ find deepclone/.hg/store/meta | sort
694 deepclone/.hg/store/meta
694 deepclone/.hg/store/meta
695 deepclone/.hg/store/meta/._a (reposimplestore !)
695 deepclone/.hg/store/meta/._a (reposimplestore !)
696 deepclone/.hg/store/meta/._a/00manifest.i (reposimplestore !)
696 deepclone/.hg/store/meta/._a/00manifest.i (reposimplestore !)
697 deepclone/.hg/store/meta/b
697 deepclone/.hg/store/meta/b
698 deepclone/.hg/store/meta/b/00manifest.i
698 deepclone/.hg/store/meta/b/00manifest.i
699 deepclone/.hg/store/meta/b/bar
699 deepclone/.hg/store/meta/b/bar
700 deepclone/.hg/store/meta/b/bar/00manifest.i
700 deepclone/.hg/store/meta/b/bar/00manifest.i
701 deepclone/.hg/store/meta/b/bar/orange
701 deepclone/.hg/store/meta/b/bar/orange
702 deepclone/.hg/store/meta/b/bar/orange/00manifest.i
702 deepclone/.hg/store/meta/b/bar/orange/00manifest.i
703 deepclone/.hg/store/meta/b/bar/orange/fly
703 deepclone/.hg/store/meta/b/bar/orange/fly
704 deepclone/.hg/store/meta/b/bar/orange/fly/00manifest.i
704 deepclone/.hg/store/meta/b/bar/orange/fly/00manifest.i
705 deepclone/.hg/store/meta/b/foo
705 deepclone/.hg/store/meta/b/foo
706 deepclone/.hg/store/meta/b/foo/00manifest.i
706 deepclone/.hg/store/meta/b/foo/00manifest.i
707 deepclone/.hg/store/meta/b/foo/apple
707 deepclone/.hg/store/meta/b/foo/apple
708 deepclone/.hg/store/meta/b/foo/apple/00manifest.i
708 deepclone/.hg/store/meta/b/foo/apple/00manifest.i
709 deepclone/.hg/store/meta/b/foo/apple/bees
709 deepclone/.hg/store/meta/b/foo/apple/bees
710 deepclone/.hg/store/meta/b/foo/apple/bees/00manifest.i
710 deepclone/.hg/store/meta/b/foo/apple/bees/00manifest.i
711 deepclone/.hg/store/meta/~2e_a (reporevlogstore !)
711 deepclone/.hg/store/meta/~2e_a (reporevlogstore !)
712 deepclone/.hg/store/meta/~2e_a/00manifest.i (reporevlogstore !)
712 deepclone/.hg/store/meta/~2e_a/00manifest.i (reporevlogstore !)
713 Verify passes.
713 Verify passes.
714 $ cd deepclone
714 $ cd deepclone
715 $ hg verify
715 $ hg verify
716 checking changesets
716 checking changesets
717 checking manifests
717 checking manifests
718 checking directory manifests
718 checking directory manifests
719 crosschecking files in changesets and manifests
719 crosschecking files in changesets and manifests
720 checking files
720 checking files
721 checked 4 changesets with 18 changes to 8 files
721 checked 4 changesets with 18 changes to 8 files
722 $ cd ..
722 $ cd ..
723
723
724 #if reporevlogstore
724 #if reporevlogstore
725 Create clones using old repo formats to use in later tests
725 Create clones using old repo formats to use in later tests
726 $ hg clone --config format.usestore=False \
726 $ hg clone --config format.usestore=False \
727 > --config experimental.changegroup3=True \
727 > --config experimental.changegroup3=True \
728 > http://localhost:$HGPORT deeprepo-basicstore
728 > http://localhost:$HGPORT deeprepo-basicstore
729 requesting all changes
729 requesting all changes
730 adding changesets
730 adding changesets
731 adding manifests
731 adding manifests
732 adding file changes
732 adding file changes
733 added 4 changesets with 18 changes to 8 files
733 added 4 changesets with 18 changes to 8 files
734 new changesets 775704be6f52:523e5c631710
734 new changesets 775704be6f52:523e5c631710
735 updating to branch default
735 updating to branch default
736 8 files updated, 0 files merged, 0 files removed, 0 files unresolved
736 8 files updated, 0 files merged, 0 files removed, 0 files unresolved
737 $ cd deeprepo-basicstore
737 $ cd deeprepo-basicstore
738 $ grep store .hg/requires
738 $ grep store .hg/requires
739 [1]
739 [1]
740 $ hg serve -p $HGPORT1 -d --pid-file=hg.pid --errorlog=errors.log
740 $ hg serve -p $HGPORT1 -d --pid-file=hg.pid --errorlog=errors.log
741 $ cat hg.pid >> $DAEMON_PIDS
741 $ cat hg.pid >> $DAEMON_PIDS
742 $ cd ..
742 $ cd ..
743 $ hg clone --config format.usefncache=False \
743 $ hg clone --config format.usefncache=False \
744 > --config experimental.changegroup3=True \
744 > --config experimental.changegroup3=True \
745 > http://localhost:$HGPORT deeprepo-encodedstore
745 > http://localhost:$HGPORT deeprepo-encodedstore
746 requesting all changes
746 requesting all changes
747 adding changesets
747 adding changesets
748 adding manifests
748 adding manifests
749 adding file changes
749 adding file changes
750 added 4 changesets with 18 changes to 8 files
750 added 4 changesets with 18 changes to 8 files
751 new changesets 775704be6f52:523e5c631710
751 new changesets 775704be6f52:523e5c631710
752 updating to branch default
752 updating to branch default
753 8 files updated, 0 files merged, 0 files removed, 0 files unresolved
753 8 files updated, 0 files merged, 0 files removed, 0 files unresolved
754 $ cd deeprepo-encodedstore
754 $ cd deeprepo-encodedstore
755 $ grep fncache .hg/requires
755 $ grep fncache .hg/requires
756 [1]
756 [1]
757 $ hg serve -p $HGPORT2 -d --pid-file=hg.pid --errorlog=errors.log
757 $ hg serve -p $HGPORT2 -d --pid-file=hg.pid --errorlog=errors.log
758 $ cat hg.pid >> $DAEMON_PIDS
758 $ cat hg.pid >> $DAEMON_PIDS
759 $ cd ..
759 $ cd ..
760
760
761 Local clone with basicstore
761 Local clone with basicstore
762 $ hg clone -U deeprepo-basicstore local-clone-basicstore
762 $ hg clone -U deeprepo-basicstore local-clone-basicstore
763 $ hg -R local-clone-basicstore verify
763 $ hg -R local-clone-basicstore verify
764 checking changesets
764 checking changesets
765 checking manifests
765 checking manifests
766 checking directory manifests
766 checking directory manifests
767 crosschecking files in changesets and manifests
767 crosschecking files in changesets and manifests
768 checking files
768 checking files
769 checked 4 changesets with 18 changes to 8 files
769 checked 4 changesets with 18 changes to 8 files
770
770
771 Local clone with encodedstore
771 Local clone with encodedstore
772 $ hg clone -U deeprepo-encodedstore local-clone-encodedstore
772 $ hg clone -U deeprepo-encodedstore local-clone-encodedstore
773 $ hg -R local-clone-encodedstore verify
773 $ hg -R local-clone-encodedstore verify
774 checking changesets
774 checking changesets
775 checking manifests
775 checking manifests
776 checking directory manifests
776 checking directory manifests
777 crosschecking files in changesets and manifests
777 crosschecking files in changesets and manifests
778 checking files
778 checking files
779 checked 4 changesets with 18 changes to 8 files
779 checked 4 changesets with 18 changes to 8 files
780
780
781 Local clone with fncachestore
781 Local clone with fncachestore
782 $ hg clone -U deeprepo local-clone-fncachestore
782 $ hg clone -U deeprepo local-clone-fncachestore
783 $ hg -R local-clone-fncachestore verify
783 $ hg -R local-clone-fncachestore verify
784 checking changesets
784 checking changesets
785 checking manifests
785 checking manifests
786 checking directory manifests
786 checking directory manifests
787 crosschecking files in changesets and manifests
787 crosschecking files in changesets and manifests
788 checking files
788 checking files
789 checked 4 changesets with 18 changes to 8 files
789 checked 4 changesets with 18 changes to 8 files
790
790
791 Stream clone with basicstore
791 Stream clone with basicstore
792 $ hg clone --config experimental.changegroup3=True --stream -U \
792 $ hg clone --config experimental.changegroup3=True --stream -U \
793 > http://localhost:$HGPORT1 stream-clone-basicstore
793 > http://localhost:$HGPORT1 stream-clone-basicstore
794 streaming all changes
794 streaming all changes
795 29 files to transfer, * of data (glob)
795 28 files to transfer, * of data (glob)
796 transferred * in * seconds (*) (glob)
796 transferred * in * seconds (*) (glob)
797 $ hg -R stream-clone-basicstore verify
797 $ hg -R stream-clone-basicstore verify
798 checking changesets
798 checking changesets
799 checking manifests
799 checking manifests
800 checking directory manifests
800 checking directory manifests
801 crosschecking files in changesets and manifests
801 crosschecking files in changesets and manifests
802 checking files
802 checking files
803 checked 4 changesets with 18 changes to 8 files
803 checked 4 changesets with 18 changes to 8 files
804
804
805 Stream clone with encodedstore
805 Stream clone with encodedstore
806 $ hg clone --config experimental.changegroup3=True --stream -U \
806 $ hg clone --config experimental.changegroup3=True --stream -U \
807 > http://localhost:$HGPORT2 stream-clone-encodedstore
807 > http://localhost:$HGPORT2 stream-clone-encodedstore
808 streaming all changes
808 streaming all changes
809 29 files to transfer, * of data (glob)
809 28 files to transfer, * of data (glob)
810 transferred * in * seconds (*) (glob)
810 transferred * in * seconds (*) (glob)
811 $ hg -R stream-clone-encodedstore verify
811 $ hg -R stream-clone-encodedstore verify
812 checking changesets
812 checking changesets
813 checking manifests
813 checking manifests
814 checking directory manifests
814 checking directory manifests
815 crosschecking files in changesets and manifests
815 crosschecking files in changesets and manifests
816 checking files
816 checking files
817 checked 4 changesets with 18 changes to 8 files
817 checked 4 changesets with 18 changes to 8 files
818
818
819 Stream clone with fncachestore
819 Stream clone with fncachestore
820 $ hg clone --config experimental.changegroup3=True --stream -U \
820 $ hg clone --config experimental.changegroup3=True --stream -U \
821 > http://localhost:$HGPORT stream-clone-fncachestore
821 > http://localhost:$HGPORT stream-clone-fncachestore
822 streaming all changes
822 streaming all changes
823 22 files to transfer, * of data (glob)
823 22 files to transfer, * of data (glob)
824 transferred * in * seconds (*) (glob)
824 transferred * in * seconds (*) (glob)
825 $ hg -R stream-clone-fncachestore verify
825 $ hg -R stream-clone-fncachestore verify
826 checking changesets
826 checking changesets
827 checking manifests
827 checking manifests
828 checking directory manifests
828 checking directory manifests
829 crosschecking files in changesets and manifests
829 crosschecking files in changesets and manifests
830 checking files
830 checking files
831 checked 4 changesets with 18 changes to 8 files
831 checked 4 changesets with 18 changes to 8 files
832
832
833 Packed bundle
833 Packed bundle
834 $ hg -R deeprepo debugcreatestreamclonebundle repo-packed.hg
834 $ hg -R deeprepo debugcreatestreamclonebundle repo-packed.hg
835 writing 5330 bytes for 18 files (no-zstd !)
835 writing 5330 bytes for 18 files (no-zstd !)
836 writing 5400 bytes for 18 files (zstd !)
836 writing 5400 bytes for 18 files (zstd !)
837 bundle requirements: generaldelta, revlogv1, sparserevlog, treemanifest (no-rust !)
837 bundle requirements: generaldelta, revlogv1, sparserevlog, treemanifest (no-rust !)
838 bundle requirements: generaldelta, persistent-nodemap, revlogv1, sparserevlog, treemanifest (rust !)
838 bundle requirements: generaldelta, persistent-nodemap, revlogv1, sparserevlog, treemanifest (rust !)
839 $ hg debugbundle --spec repo-packed.hg
839 $ hg debugbundle --spec repo-packed.hg
840 none-packed1;requirements%3Dgeneraldelta%2Crevlogv1%2Csparserevlog%2Ctreemanifest (no-rust !)
840 none-packed1;requirements%3Dgeneraldelta%2Crevlogv1%2Csparserevlog%2Ctreemanifest (no-rust !)
841 none-packed1;requirements%3Dgeneraldelta%2Cpersistent-nodemap%2Crevlogv1%2Csparserevlog%2Ctreemanifest (rust !)
841 none-packed1;requirements%3Dgeneraldelta%2Cpersistent-nodemap%2Crevlogv1%2Csparserevlog%2Ctreemanifest (rust !)
842
842
843 #endif
843 #endif
844
844
845 Bundle with changegroup2 is not supported
845 Bundle with changegroup2 is not supported
846
846
847 $ hg -R deeprepo bundle --all -t v2 deeprepo.bundle
847 $ hg -R deeprepo bundle --all -t v2 deeprepo.bundle
848 abort: repository does not support bundle version 02
848 abort: repository does not support bundle version 02
849 [255]
849 [255]
850
850
851 Pull does not include changegroup for manifest the client already has from
851 Pull does not include changegroup for manifest the client already has from
852 other branch
852 other branch
853
853
854 $ mkdir grafted-dir-repo
854 $ mkdir grafted-dir-repo
855 $ cd grafted-dir-repo
855 $ cd grafted-dir-repo
856 $ hg --config experimental.treemanifest=1 init
856 $ hg --config experimental.treemanifest=1 init
857 $ mkdir dir
857 $ mkdir dir
858 $ echo a > dir/file
858 $ echo a > dir/file
859 $ echo a > file
859 $ echo a > file
860 $ hg ci -Am initial
860 $ hg ci -Am initial
861 adding dir/file
861 adding dir/file
862 adding file
862 adding file
863 $ echo b > dir/file
863 $ echo b > dir/file
864 $ hg ci -m updated
864 $ hg ci -m updated
865 $ hg co '.^'
865 $ hg co '.^'
866 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
866 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
867 $ hg revert -r tip dir/
867 $ hg revert -r tip dir/
868 reverting dir/file
868 reverting dir/file
869 $ echo b > file # to make sure root manifest is sent
869 $ echo b > file # to make sure root manifest is sent
870 $ hg ci -m grafted
870 $ hg ci -m grafted
871 created new head
871 created new head
872 $ cd ..
872 $ cd ..
873
873
874 $ hg --config experimental.treemanifest=1 clone --pull -r 1 \
874 $ hg --config experimental.treemanifest=1 clone --pull -r 1 \
875 > grafted-dir-repo grafted-dir-repo-clone
875 > grafted-dir-repo grafted-dir-repo-clone
876 adding changesets
876 adding changesets
877 adding manifests
877 adding manifests
878 adding file changes
878 adding file changes
879 added 2 changesets with 3 changes to 2 files
879 added 2 changesets with 3 changes to 2 files
880 new changesets d84f4c419457:09ab742f3b0f
880 new changesets d84f4c419457:09ab742f3b0f
881 updating to branch default
881 updating to branch default
882 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
882 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
883 $ cd grafted-dir-repo-clone
883 $ cd grafted-dir-repo-clone
884 $ hg pull -r 2
884 $ hg pull -r 2
885 pulling from $TESTTMP/grafted-dir-repo
885 pulling from $TESTTMP/grafted-dir-repo
886 searching for changes
886 searching for changes
887 adding changesets
887 adding changesets
888 adding manifests
888 adding manifests
889 adding file changes
889 adding file changes
890 added 1 changesets with 1 changes to 1 files (+1 heads)
890 added 1 changesets with 1 changes to 1 files (+1 heads)
891 new changesets 73699489fb7c
891 new changesets 73699489fb7c
892 (run 'hg heads' to see heads, 'hg merge' to merge)
892 (run 'hg heads' to see heads, 'hg merge' to merge)
893
893
894 Committing a empty commit does not duplicate root treemanifest
894 Committing a empty commit does not duplicate root treemanifest
895 $ echo z >> z
895 $ echo z >> z
896 $ hg commit -Aqm 'pre-empty commit'
896 $ hg commit -Aqm 'pre-empty commit'
897 $ hg rm z
897 $ hg rm z
898 $ hg commit --amend -m 'empty commit'
898 $ hg commit --amend -m 'empty commit'
899 saved backup bundle to $TESTTMP/grafted-dir-repo-clone/.hg/strip-backup/cb99d5717cea-9e3b6b02-amend.hg
899 saved backup bundle to $TESTTMP/grafted-dir-repo-clone/.hg/strip-backup/cb99d5717cea-9e3b6b02-amend.hg
900 $ hg log -r 'tip + tip^' -T '{manifest}\n'
900 $ hg log -r 'tip + tip^' -T '{manifest}\n'
901 1:678d3574b88c
901 1:678d3574b88c
902 1:678d3574b88c
902 1:678d3574b88c
903 $ hg --config extensions.strip= strip -r . -q
903 $ hg --config extensions.strip= strip -r . -q
General Comments 0
You need to be logged in to leave comments. Login now