Show More
The requested changes are too big and content was truncated. Show full diff
This diff has been collapsed as it changes many lines, (545 lines changed) Show them Hide them | |||
@@ -0,0 +1,545 b'' | |||
|
1 | # linux.py - Linux specific automation functionality | |
|
2 | # | |
|
3 | # Copyright 2019 Gregory Szorc <gregory.szorc@gmail.com> | |
|
4 | # | |
|
5 | # This software may be used and distributed according to the terms of the | |
|
6 | # GNU General Public License version 2 or any later version. | |
|
7 | ||
|
8 | # no-check-code because Python 3 native. | |
|
9 | ||
|
10 | import os | |
|
11 | import pathlib | |
|
12 | import shlex | |
|
13 | import subprocess | |
|
14 | import tempfile | |
|
15 | ||
|
16 | from .ssh import ( | |
|
17 | exec_command, | |
|
18 | ) | |
|
19 | ||
|
20 | ||
|
21 | # Linux distributions that are supported. | |
|
22 | DISTROS = { | |
|
23 | 'debian9', | |
|
24 | 'ubuntu18.04', | |
|
25 | 'ubuntu18.10', | |
|
26 | 'ubuntu19.04', | |
|
27 | } | |
|
28 | ||
|
29 | INSTALL_PYTHONS = r''' | |
|
30 | PYENV2_VERSIONS="2.7.16 pypy2.7-7.1.1" | |
|
31 | PYENV3_VERSIONS="3.5.7 3.6.8 3.7.3 3.8-dev pypy3.5-7.0.0 pypy3.6-7.1.1" | |
|
32 | ||
|
33 | git clone https://github.com/pyenv/pyenv.git /hgdev/pyenv | |
|
34 | pushd /hgdev/pyenv | |
|
35 | git checkout 3faeda67bb33e07750d1a104271369a7384ca45c | |
|
36 | popd | |
|
37 | ||
|
38 | export PYENV_ROOT="/hgdev/pyenv" | |
|
39 | export PATH="$PYENV_ROOT/bin:$PATH" | |
|
40 | ||
|
41 | # pip 19.0.3. | |
|
42 | PIP_SHA256=efe99298f3fbb1f56201ce6b81d2658067d2f7d7dfc2d412e0d3cacc9a397c61 | |
|
43 | wget -O get-pip.py --progress dot:mega https://github.com/pypa/get-pip/raw/fee32c376da1ff6496a798986d7939cd51e1644f/get-pip.py | |
|
44 | echo "${PIP_SHA256} get-pip.py" | sha256sum --check - | |
|
45 | ||
|
46 | VIRTUALENV_SHA256=984d7e607b0a5d1329425dd8845bd971b957424b5ba664729fab51ab8c11bc39 | |
|
47 | VIRTUALENV_TARBALL=virtualenv-16.4.3.tar.gz | |
|
48 | wget -O ${VIRTUALENV_TARBALL} --progress dot:mega https://files.pythonhosted.org/packages/37/db/89d6b043b22052109da35416abc3c397655e4bd3cff031446ba02b9654fa/${VIRTUALENV_TARBALL} | |
|
49 | echo "${VIRTUALENV_SHA256} ${VIRTUALENV_TARBALL}" | sha256sum --check - | |
|
50 | ||
|
51 | for v in ${PYENV2_VERSIONS}; do | |
|
52 | pyenv install -v ${v} | |
|
53 | ${PYENV_ROOT}/versions/${v}/bin/python get-pip.py | |
|
54 | ${PYENV_ROOT}/versions/${v}/bin/pip install ${VIRTUALENV_TARBALL} | |
|
55 | ${PYENV_ROOT}/versions/${v}/bin/pip install -r /hgdev/requirements-py2.txt | |
|
56 | done | |
|
57 | ||
|
58 | for v in ${PYENV3_VERSIONS}; do | |
|
59 | pyenv install -v ${v} | |
|
60 | ${PYENV_ROOT}/versions/${v}/bin/python get-pip.py | |
|
61 | ${PYENV_ROOT}/versions/${v}/bin/pip install -r /hgdev/requirements-py3.txt | |
|
62 | done | |
|
63 | ||
|
64 | pyenv global ${PYENV2_VERSIONS} ${PYENV3_VERSIONS} system | |
|
65 | '''.lstrip().replace('\r\n', '\n') | |
|
66 | ||
|
67 | ||
|
68 | BOOTSTRAP_VIRTUALENV = r''' | |
|
69 | /usr/bin/virtualenv /hgdev/venv-bootstrap | |
|
70 | ||
|
71 | HG_SHA256=1bdd21bb87d1e05fb5cd395d488d0e0cc2f2f90ce0fd248e31a03595da5ccb47 | |
|
72 | HG_TARBALL=mercurial-4.9.1.tar.gz | |
|
73 | ||
|
74 | wget -O ${HG_TARBALL} --progress dot:mega https://www.mercurial-scm.org/release/${HG_TARBALL} | |
|
75 | echo "${HG_SHA256} ${HG_TARBALL}" | sha256sum --check - | |
|
76 | ||
|
77 | /hgdev/venv-bootstrap/bin/pip install ${HG_TARBALL} | |
|
78 | '''.lstrip().replace('\r\n', '\n') | |
|
79 | ||
|
80 | ||
|
81 | BOOTSTRAP_DEBIAN = r''' | |
|
82 | #!/bin/bash | |
|
83 | ||
|
84 | set -ex | |
|
85 | ||
|
86 | DISTRO=`grep DISTRIB_ID /etc/lsb-release | awk -F= '{{print $2}}'` | |
|
87 | DEBIAN_VERSION=`cat /etc/debian_version` | |
|
88 | LSB_RELEASE=`lsb_release -cs` | |
|
89 | ||
|
90 | sudo /usr/sbin/groupadd hg | |
|
91 | sudo /usr/sbin/groupadd docker | |
|
92 | sudo /usr/sbin/useradd -g hg -G sudo,docker -d /home/hg -m -s /bin/bash hg | |
|
93 | sudo mkdir /home/hg/.ssh | |
|
94 | sudo cp ~/.ssh/authorized_keys /home/hg/.ssh/authorized_keys | |
|
95 | sudo chown -R hg:hg /home/hg/.ssh | |
|
96 | sudo chmod 700 /home/hg/.ssh | |
|
97 | sudo chmod 600 /home/hg/.ssh/authorized_keys | |
|
98 | ||
|
99 | cat << EOF | sudo tee /etc/sudoers.d/90-hg | |
|
100 | hg ALL=(ALL) NOPASSWD:ALL | |
|
101 | EOF | |
|
102 | ||
|
103 | sudo apt-get update | |
|
104 | sudo DEBIAN_FRONTEND=noninteractive apt-get -yq dist-upgrade | |
|
105 | ||
|
106 | # Install packages necessary to set up Docker Apt repo. | |
|
107 | sudo DEBIAN_FRONTEND=noninteractive apt-get -yq install --no-install-recommends \ | |
|
108 | apt-transport-https \ | |
|
109 | gnupg | |
|
110 | ||
|
111 | cat > docker-apt-key << EOF | |
|
112 | -----BEGIN PGP PUBLIC KEY BLOCK----- | |
|
113 | ||
|
114 | mQINBFit2ioBEADhWpZ8/wvZ6hUTiXOwQHXMAlaFHcPH9hAtr4F1y2+OYdbtMuth | |
|
115 | lqqwp028AqyY+PRfVMtSYMbjuQuu5byyKR01BbqYhuS3jtqQmljZ/bJvXqnmiVXh | |
|
116 | 38UuLa+z077PxyxQhu5BbqntTPQMfiyqEiU+BKbq2WmANUKQf+1AmZY/IruOXbnq | |
|
117 | L4C1+gJ8vfmXQt99npCaxEjaNRVYfOS8QcixNzHUYnb6emjlANyEVlZzeqo7XKl7 | |
|
118 | UrwV5inawTSzWNvtjEjj4nJL8NsLwscpLPQUhTQ+7BbQXAwAmeHCUTQIvvWXqw0N | |
|
119 | cmhh4HgeQscQHYgOJjjDVfoY5MucvglbIgCqfzAHW9jxmRL4qbMZj+b1XoePEtht | |
|
120 | ku4bIQN1X5P07fNWzlgaRL5Z4POXDDZTlIQ/El58j9kp4bnWRCJW0lya+f8ocodo | |
|
121 | vZZ+Doi+fy4D5ZGrL4XEcIQP/Lv5uFyf+kQtl/94VFYVJOleAv8W92KdgDkhTcTD | |
|
122 | G7c0tIkVEKNUq48b3aQ64NOZQW7fVjfoKwEZdOqPE72Pa45jrZzvUFxSpdiNk2tZ | |
|
123 | XYukHjlxxEgBdC/J3cMMNRE1F4NCA3ApfV1Y7/hTeOnmDuDYwr9/obA8t016Yljj | |
|
124 | q5rdkywPf4JF8mXUW5eCN1vAFHxeg9ZWemhBtQmGxXnw9M+z6hWwc6ahmwARAQAB | |
|
125 | tCtEb2NrZXIgUmVsZWFzZSAoQ0UgZGViKSA8ZG9ja2VyQGRvY2tlci5jb20+iQI3 | |
|
126 | BBMBCgAhBQJYrefAAhsvBQsJCAcDBRUKCQgLBRYCAwEAAh4BAheAAAoJEI2BgDwO | |
|
127 | v82IsskP/iQZo68flDQmNvn8X5XTd6RRaUH33kXYXquT6NkHJciS7E2gTJmqvMqd | |
|
128 | tI4mNYHCSEYxI5qrcYV5YqX9P6+Ko+vozo4nseUQLPH/ATQ4qL0Zok+1jkag3Lgk | |
|
129 | jonyUf9bwtWxFp05HC3GMHPhhcUSexCxQLQvnFWXD2sWLKivHp2fT8QbRGeZ+d3m | |
|
130 | 6fqcd5Fu7pxsqm0EUDK5NL+nPIgYhN+auTrhgzhK1CShfGccM/wfRlei9Utz6p9P | |
|
131 | XRKIlWnXtT4qNGZNTN0tR+NLG/6Bqd8OYBaFAUcue/w1VW6JQ2VGYZHnZu9S8LMc | |
|
132 | FYBa5Ig9PxwGQOgq6RDKDbV+PqTQT5EFMeR1mrjckk4DQJjbxeMZbiNMG5kGECA8 | |
|
133 | g383P3elhn03WGbEEa4MNc3Z4+7c236QI3xWJfNPdUbXRaAwhy/6rTSFbzwKB0Jm | |
|
134 | ebwzQfwjQY6f55MiI/RqDCyuPj3r3jyVRkK86pQKBAJwFHyqj9KaKXMZjfVnowLh | |
|
135 | 9svIGfNbGHpucATqREvUHuQbNnqkCx8VVhtYkhDb9fEP2xBu5VvHbR+3nfVhMut5 | |
|
136 | G34Ct5RS7Jt6LIfFdtcn8CaSas/l1HbiGeRgc70X/9aYx/V/CEJv0lIe8gP6uDoW | |
|
137 | FPIZ7d6vH+Vro6xuWEGiuMaiznap2KhZmpkgfupyFmplh0s6knymuQINBFit2ioB | |
|
138 | EADneL9S9m4vhU3blaRjVUUyJ7b/qTjcSylvCH5XUE6R2k+ckEZjfAMZPLpO+/tF | |
|
139 | M2JIJMD4SifKuS3xck9KtZGCufGmcwiLQRzeHF7vJUKrLD5RTkNi23ydvWZgPjtx | |
|
140 | Q+DTT1Zcn7BrQFY6FgnRoUVIxwtdw1bMY/89rsFgS5wwuMESd3Q2RYgb7EOFOpnu | |
|
141 | w6da7WakWf4IhnF5nsNYGDVaIHzpiqCl+uTbf1epCjrOlIzkZ3Z3Yk5CM/TiFzPk | |
|
142 | z2lLz89cpD8U+NtCsfagWWfjd2U3jDapgH+7nQnCEWpROtzaKHG6lA3pXdix5zG8 | |
|
143 | eRc6/0IbUSWvfjKxLLPfNeCS2pCL3IeEI5nothEEYdQH6szpLog79xB9dVnJyKJb | |
|
144 | VfxXnseoYqVrRz2VVbUI5Blwm6B40E3eGVfUQWiux54DspyVMMk41Mx7QJ3iynIa | |
|
145 | 1N4ZAqVMAEruyXTRTxc9XW0tYhDMA/1GYvz0EmFpm8LzTHA6sFVtPm/ZlNCX6P1X | |
|
146 | zJwrv7DSQKD6GGlBQUX+OeEJ8tTkkf8QTJSPUdh8P8YxDFS5EOGAvhhpMBYD42kQ | |
|
147 | pqXjEC+XcycTvGI7impgv9PDY1RCC1zkBjKPa120rNhv/hkVk/YhuGoajoHyy4h7 | |
|
148 | ZQopdcMtpN2dgmhEegny9JCSwxfQmQ0zK0g7m6SHiKMwjwARAQABiQQ+BBgBCAAJ | |
|
149 | BQJYrdoqAhsCAikJEI2BgDwOv82IwV0gBBkBCAAGBQJYrdoqAAoJEH6gqcPyc/zY | |
|
150 | 1WAP/2wJ+R0gE6qsce3rjaIz58PJmc8goKrir5hnElWhPgbq7cYIsW5qiFyLhkdp | |
|
151 | YcMmhD9mRiPpQn6Ya2w3e3B8zfIVKipbMBnke/ytZ9M7qHmDCcjoiSmwEXN3wKYI | |
|
152 | mD9VHONsl/CG1rU9Isw1jtB5g1YxuBA7M/m36XN6x2u+NtNMDB9P56yc4gfsZVES | |
|
153 | KA9v+yY2/l45L8d/WUkUi0YXomn6hyBGI7JrBLq0CX37GEYP6O9rrKipfz73XfO7 | |
|
154 | JIGzOKZlljb/D9RX/g7nRbCn+3EtH7xnk+TK/50euEKw8SMUg147sJTcpQmv6UzZ | |
|
155 | cM4JgL0HbHVCojV4C/plELwMddALOFeYQzTif6sMRPf+3DSj8frbInjChC3yOLy0 | |
|
156 | 6br92KFom17EIj2CAcoeq7UPhi2oouYBwPxh5ytdehJkoo+sN7RIWua6P2WSmon5 | |
|
157 | U888cSylXC0+ADFdgLX9K2zrDVYUG1vo8CX0vzxFBaHwN6Px26fhIT1/hYUHQR1z | |
|
158 | VfNDcyQmXqkOnZvvoMfz/Q0s9BhFJ/zU6AgQbIZE/hm1spsfgvtsD1frZfygXJ9f | |
|
159 | irP+MSAI80xHSf91qSRZOj4Pl3ZJNbq4yYxv0b1pkMqeGdjdCYhLU+LZ4wbQmpCk | |
|
160 | SVe2prlLureigXtmZfkqevRz7FrIZiu9ky8wnCAPwC7/zmS18rgP/17bOtL4/iIz | |
|
161 | QhxAAoAMWVrGyJivSkjhSGx1uCojsWfsTAm11P7jsruIL61ZzMUVE2aM3Pmj5G+W | |
|
162 | 9AcZ58Em+1WsVnAXdUR//bMmhyr8wL/G1YO1V3JEJTRdxsSxdYa4deGBBY/Adpsw | |
|
163 | 24jxhOJR+lsJpqIUeb999+R8euDhRHG9eFO7DRu6weatUJ6suupoDTRWtr/4yGqe | |
|
164 | dKxV3qQhNLSnaAzqW/1nA3iUB4k7kCaKZxhdhDbClf9P37qaRW467BLCVO/coL3y | |
|
165 | Vm50dwdrNtKpMBh3ZpbB1uJvgi9mXtyBOMJ3v8RZeDzFiG8HdCtg9RvIt/AIFoHR | |
|
166 | H3S+U79NT6i0KPzLImDfs8T7RlpyuMc4Ufs8ggyg9v3Ae6cN3eQyxcK3w0cbBwsh | |
|
167 | /nQNfsA6uu+9H7NhbehBMhYnpNZyrHzCmzyXkauwRAqoCbGCNykTRwsur9gS41TQ | |
|
168 | M8ssD1jFheOJf3hODnkKU+HKjvMROl1DK7zdmLdNzA1cvtZH/nCC9KPj1z8QC47S | |
|
169 | xx+dTZSx4ONAhwbS/LN3PoKtn8LPjY9NP9uDWI+TWYquS2U+KHDrBDlsgozDbs/O | |
|
170 | jCxcpDzNmXpWQHEtHU7649OXHP7UeNST1mCUCH5qdank0V1iejF6/CfTFU4MfcrG | |
|
171 | YT90qFF93M3v01BbxP+EIY2/9tiIPbrd | |
|
172 | =0YYh | |
|
173 | -----END PGP PUBLIC KEY BLOCK----- | |
|
174 | EOF | |
|
175 | ||
|
176 | sudo apt-key add docker-apt-key | |
|
177 | ||
|
178 | if [ "$DEBIAN_VERSION" = "9.8" ]; then | |
|
179 | cat << EOF | sudo tee -a /etc/apt/sources.list | |
|
180 | # Need backports for clang-format-6.0 | |
|
181 | deb http://deb.debian.org/debian stretch-backports main | |
|
182 | ||
|
183 | # Sources are useful if we want to compile things locally. | |
|
184 | deb-src http://deb.debian.org/debian stretch main | |
|
185 | deb-src http://security.debian.org/debian-security stretch/updates main | |
|
186 | deb-src http://deb.debian.org/debian stretch-updates main | |
|
187 | deb-src http://deb.debian.org/debian stretch-backports main | |
|
188 | ||
|
189 | deb [arch=amd64] https://download.docker.com/linux/debian stretch stable | |
|
190 | EOF | |
|
191 | ||
|
192 | elif [ "$DISTRO" = "Ubuntu" ]; then | |
|
193 | cat << EOF | sudo tee -a /etc/apt/sources.list | |
|
194 | deb [arch=amd64] https://download.docker.com/linux/ubuntu $LSB_RELEASE stable | |
|
195 | EOF | |
|
196 | ||
|
197 | fi | |
|
198 | ||
|
199 | sudo apt-get update | |
|
200 | ||
|
201 | PACKAGES="\ | |
|
202 | btrfs-progs \ | |
|
203 | build-essential \ | |
|
204 | bzr \ | |
|
205 | clang-format-6.0 \ | |
|
206 | cvs \ | |
|
207 | darcs \ | |
|
208 | debhelper \ | |
|
209 | devscripts \ | |
|
210 | dpkg-dev \ | |
|
211 | dstat \ | |
|
212 | emacs \ | |
|
213 | gettext \ | |
|
214 | git \ | |
|
215 | htop \ | |
|
216 | iotop \ | |
|
217 | jfsutils \ | |
|
218 | libbz2-dev \ | |
|
219 | libexpat1-dev \ | |
|
220 | libffi-dev \ | |
|
221 | libgdbm-dev \ | |
|
222 | liblzma-dev \ | |
|
223 | libncurses5-dev \ | |
|
224 | libnss3-dev \ | |
|
225 | libreadline-dev \ | |
|
226 | libsqlite3-dev \ | |
|
227 | libssl-dev \ | |
|
228 | netbase \ | |
|
229 | ntfs-3g \ | |
|
230 | nvme-cli \ | |
|
231 | pyflakes \ | |
|
232 | pyflakes3 \ | |
|
233 | pylint \ | |
|
234 | pylint3 \ | |
|
235 | python-all-dev \ | |
|
236 | python-dev \ | |
|
237 | python-docutils \ | |
|
238 | python-fuzzywuzzy \ | |
|
239 | python-pygments \ | |
|
240 | python-subversion \ | |
|
241 | python-vcr \ | |
|
242 | python3-dev \ | |
|
243 | python3-docutils \ | |
|
244 | python3-fuzzywuzzy \ | |
|
245 | python3-pygments \ | |
|
246 | python3-vcr \ | |
|
247 | rsync \ | |
|
248 | sqlite3 \ | |
|
249 | subversion \ | |
|
250 | tcl-dev \ | |
|
251 | tk-dev \ | |
|
252 | tla \ | |
|
253 | unzip \ | |
|
254 | uuid-dev \ | |
|
255 | vim \ | |
|
256 | virtualenv \ | |
|
257 | wget \ | |
|
258 | xfsprogs \ | |
|
259 | zip \ | |
|
260 | zlib1g-dev" | |
|
261 | ||
|
262 | if [ "$DEBIAN_VERSION" = "9.8" ]; then | |
|
263 | PACKAGES="$PACKAGES linux-perf" | |
|
264 | elif [ "$DISTRO" = "Ubuntu" ]; then | |
|
265 | PACKAGES="$PACKAGES linux-tools-common" | |
|
266 | fi | |
|
267 | ||
|
268 | # Ubuntu 19.04 removes monotone. | |
|
269 | if [ "$LSB_RELEASE" != "disco" ]; then | |
|
270 | PACKAGES="$PACKAGES monotone" | |
|
271 | fi | |
|
272 | ||
|
273 | # As of April 27, 2019, Docker hasn't published packages for | |
|
274 | # Ubuntu 19.04 yet. | |
|
275 | if [ "$LSB_RELEASE" != "disco" ]; then | |
|
276 | PACKAGES="$PACKAGES docker-ce" | |
|
277 | fi | |
|
278 | ||
|
279 | sudo DEBIAN_FRONTEND=noninteractive apt-get -yq install --no-install-recommends $PACKAGES | |
|
280 | ||
|
281 | # Create clang-format symlink so test harness finds it. | |
|
282 | sudo update-alternatives --install /usr/bin/clang-format clang-format \ | |
|
283 | /usr/bin/clang-format-6.0 1000 | |
|
284 | ||
|
285 | sudo mkdir /hgdev | |
|
286 | # Will be normalized to hg:hg later. | |
|
287 | sudo chown `whoami` /hgdev | |
|
288 | ||
|
289 | cp requirements-py2.txt /hgdev/requirements-py2.txt | |
|
290 | cp requirements-py3.txt /hgdev/requirements-py3.txt | |
|
291 | ||
|
292 | # Disable the pip version check because it uses the network and can | |
|
293 | # be annoying. | |
|
294 | cat << EOF | sudo tee -a /etc/pip.conf | |
|
295 | [global] | |
|
296 | disable-pip-version-check = True | |
|
297 | EOF | |
|
298 | ||
|
299 | {install_pythons} | |
|
300 | {bootstrap_virtualenv} | |
|
301 | ||
|
302 | /hgdev/venv-bootstrap/bin/hg clone https://www.mercurial-scm.org/repo/hg /hgdev/src | |
|
303 | ||
|
304 | # Mark the repo as non-publishing. | |
|
305 | cat >> /hgdev/src/.hg/hgrc << EOF | |
|
306 | [phases] | |
|
307 | publish = false | |
|
308 | EOF | |
|
309 | ||
|
310 | sudo chown -R hg:hg /hgdev | |
|
311 | '''.lstrip().format( | |
|
312 | install_pythons=INSTALL_PYTHONS, | |
|
313 | bootstrap_virtualenv=BOOTSTRAP_VIRTUALENV | |
|
314 | ).replace('\r\n', '\n') | |
|
315 | ||
|
316 | ||
|
317 | # Prepares /hgdev for operations. | |
|
318 | PREPARE_HGDEV = ''' | |
|
319 | #!/bin/bash | |
|
320 | ||
|
321 | set -e | |
|
322 | ||
|
323 | FS=$1 | |
|
324 | ||
|
325 | ensure_device() { | |
|
326 | if [ -z "${DEVICE}" ]; then | |
|
327 | echo "could not find block device to format" | |
|
328 | exit 1 | |
|
329 | fi | |
|
330 | } | |
|
331 | ||
|
332 | # Determine device to partition for extra filesystem. | |
|
333 | # If only 1 volume is present, it will be the root volume and | |
|
334 | # should be /dev/nvme0. If multiple volumes are present, the | |
|
335 | # root volume could be nvme0 or nvme1. Use whichever one doesn't have | |
|
336 | # a partition. | |
|
337 | if [ -e /dev/nvme1n1 ]; then | |
|
338 | if [ -e /dev/nvme0n1p1 ]; then | |
|
339 | DEVICE=/dev/nvme1n1 | |
|
340 | else | |
|
341 | DEVICE=/dev/nvme0n1 | |
|
342 | fi | |
|
343 | else | |
|
344 | DEVICE= | |
|
345 | fi | |
|
346 | ||
|
347 | sudo mkdir /hgwork | |
|
348 | ||
|
349 | if [ "${FS}" != "default" -a "${FS}" != "tmpfs" ]; then | |
|
350 | ensure_device | |
|
351 | echo "creating ${FS} filesystem on ${DEVICE}" | |
|
352 | fi | |
|
353 | ||
|
354 | if [ "${FS}" = "default" ]; then | |
|
355 | : | |
|
356 | ||
|
357 | elif [ "${FS}" = "btrfs" ]; then | |
|
358 | sudo mkfs.btrfs ${DEVICE} | |
|
359 | sudo mount ${DEVICE} /hgwork | |
|
360 | ||
|
361 | elif [ "${FS}" = "ext3" ]; then | |
|
362 | # lazy_journal_init speeds up filesystem creation at the expense of | |
|
363 | # integrity if things crash. We are an ephemeral instance, so we don't | |
|
364 | # care about integrity. | |
|
365 | sudo mkfs.ext3 -E lazy_journal_init=1 ${DEVICE} | |
|
366 | sudo mount ${DEVICE} /hgwork | |
|
367 | ||
|
368 | elif [ "${FS}" = "ext4" ]; then | |
|
369 | sudo mkfs.ext4 -E lazy_journal_init=1 ${DEVICE} | |
|
370 | sudo mount ${DEVICE} /hgwork | |
|
371 | ||
|
372 | elif [ "${FS}" = "jfs" ]; then | |
|
373 | sudo mkfs.jfs ${DEVICE} | |
|
374 | sudo mount ${DEVICE} /hgwork | |
|
375 | ||
|
376 | elif [ "${FS}" = "tmpfs" ]; then | |
|
377 | echo "creating tmpfs volume in /hgwork" | |
|
378 | sudo mount -t tmpfs -o size=1024M tmpfs /hgwork | |
|
379 | ||
|
380 | elif [ "${FS}" = "xfs" ]; then | |
|
381 | sudo mkfs.xfs ${DEVICE} | |
|
382 | sudo mount ${DEVICE} /hgwork | |
|
383 | ||
|
384 | else | |
|
385 | echo "unsupported filesystem: ${FS}" | |
|
386 | exit 1 | |
|
387 | fi | |
|
388 | ||
|
389 | echo "/hgwork ready" | |
|
390 | ||
|
391 | sudo chown hg:hg /hgwork | |
|
392 | mkdir /hgwork/tmp | |
|
393 | chown hg:hg /hgwork/tmp | |
|
394 | ||
|
395 | rsync -a /hgdev/src /hgwork/ | |
|
396 | '''.lstrip().replace('\r\n', '\n') | |
|
397 | ||
|
398 | ||
|
399 | HG_UPDATE_CLEAN = ''' | |
|
400 | set -ex | |
|
401 | ||
|
402 | HG=/hgdev/venv-bootstrap/bin/hg | |
|
403 | ||
|
404 | cd /hgwork/src | |
|
405 | ${HG} --config extensions.purge= purge --all | |
|
406 | ${HG} update -C $1 | |
|
407 | ${HG} log -r . | |
|
408 | '''.lstrip().replace('\r\n', '\n') | |
|
409 | ||
|
410 | ||
|
411 | def prepare_exec_environment(ssh_client, filesystem='default'): | |
|
412 | """Prepare an EC2 instance to execute things. | |
|
413 | ||
|
414 | The AMI has an ``/hgdev`` bootstrapped with various Python installs | |
|
415 | and a clone of the Mercurial repo. | |
|
416 | ||
|
417 | In EC2, EBS volumes launched from snapshots have wonky performance behavior. | |
|
418 | Notably, blocks have to be copied on first access, which makes volume | |
|
419 | I/O extremely slow on fresh volumes. | |
|
420 | ||
|
421 | Furthermore, we may want to run operations, tests, etc on alternative | |
|
422 | filesystems so we examine behavior on different filesystems. | |
|
423 | ||
|
424 | This function is used to facilitate executing operations on alternate | |
|
425 | volumes. | |
|
426 | """ | |
|
427 | sftp = ssh_client.open_sftp() | |
|
428 | ||
|
429 | with sftp.open('/hgdev/prepare-hgdev', 'wb') as fh: | |
|
430 | fh.write(PREPARE_HGDEV) | |
|
431 | fh.chmod(0o0777) | |
|
432 | ||
|
433 | command = 'sudo /hgdev/prepare-hgdev %s' % filesystem | |
|
434 | chan, stdin, stdout = exec_command(ssh_client, command) | |
|
435 | stdin.close() | |
|
436 | ||
|
437 | for line in stdout: | |
|
438 | print(line, end='') | |
|
439 | ||
|
440 | res = chan.recv_exit_status() | |
|
441 | ||
|
442 | if res: | |
|
443 | raise Exception('non-0 exit code updating working directory; %d' | |
|
444 | % res) | |
|
445 | ||
|
446 | ||
|
447 | def synchronize_hg(source_path: pathlib.Path, ec2_instance, revision: str=None): | |
|
448 | """Synchronize a local Mercurial source path to remote EC2 instance.""" | |
|
449 | ||
|
450 | with tempfile.TemporaryDirectory() as temp_dir: | |
|
451 | temp_dir = pathlib.Path(temp_dir) | |
|
452 | ||
|
453 | ssh_dir = temp_dir / '.ssh' | |
|
454 | ssh_dir.mkdir() | |
|
455 | ssh_dir.chmod(0o0700) | |
|
456 | ||
|
457 | public_ip = ec2_instance.public_ip_address | |
|
458 | ||
|
459 | ssh_config = ssh_dir / 'config' | |
|
460 | ||
|
461 | with ssh_config.open('w', encoding='utf-8') as fh: | |
|
462 | fh.write('Host %s\n' % public_ip) | |
|
463 | fh.write(' User hg\n') | |
|
464 | fh.write(' StrictHostKeyChecking no\n') | |
|
465 | fh.write(' UserKnownHostsFile %s\n' % (ssh_dir / 'known_hosts')) | |
|
466 | fh.write(' IdentityFile %s\n' % ec2_instance.ssh_private_key_path) | |
|
467 | ||
|
468 | if not (source_path / '.hg').is_dir(): | |
|
469 | raise Exception('%s is not a Mercurial repository; synchronization ' | |
|
470 | 'not yet supported' % source_path) | |
|
471 | ||
|
472 | env = dict(os.environ) | |
|
473 | env['HGPLAIN'] = '1' | |
|
474 | env['HGENCODING'] = 'utf-8' | |
|
475 | ||
|
476 | hg_bin = source_path / 'hg' | |
|
477 | ||
|
478 | res = subprocess.run( | |
|
479 | ['python2.7', str(hg_bin), 'log', '-r', revision, '-T', '{node}'], | |
|
480 | cwd=str(source_path), env=env, check=True, capture_output=True) | |
|
481 | ||
|
482 | full_revision = res.stdout.decode('ascii') | |
|
483 | ||
|
484 | args = [ | |
|
485 | 'python2.7', str(hg_bin), | |
|
486 | '--config', 'ui.ssh=ssh -F %s' % ssh_config, | |
|
487 | '--config', 'ui.remotecmd=/hgdev/venv-bootstrap/bin/hg', | |
|
488 | 'push', '-f', '-r', full_revision, | |
|
489 | 'ssh://%s//hgwork/src' % public_ip, | |
|
490 | ] | |
|
491 | ||
|
492 | subprocess.run(args, cwd=str(source_path), env=env, check=True) | |
|
493 | ||
|
494 | # TODO support synchronizing dirty working directory. | |
|
495 | ||
|
496 | sftp = ec2_instance.ssh_client.open_sftp() | |
|
497 | ||
|
498 | with sftp.open('/hgdev/hgup', 'wb') as fh: | |
|
499 | fh.write(HG_UPDATE_CLEAN) | |
|
500 | fh.chmod(0o0700) | |
|
501 | ||
|
502 | chan, stdin, stdout = exec_command( | |
|
503 | ec2_instance.ssh_client, '/hgdev/hgup %s' % full_revision) | |
|
504 | stdin.close() | |
|
505 | ||
|
506 | for line in stdout: | |
|
507 | print(line, end='') | |
|
508 | ||
|
509 | res = chan.recv_exit_status() | |
|
510 | ||
|
511 | if res: | |
|
512 | raise Exception('non-0 exit code updating working directory; %d' | |
|
513 | % res) | |
|
514 | ||
|
515 | ||
|
516 | def run_tests(ssh_client, python_version, test_flags=None): | |
|
517 | """Run tests on a remote Linux machine via an SSH client.""" | |
|
518 | test_flags = test_flags or [] | |
|
519 | ||
|
520 | print('running tests') | |
|
521 | ||
|
522 | if python_version == 'system2': | |
|
523 | python = '/usr/bin/python2' | |
|
524 | elif python_version == 'system3': | |
|
525 | python = '/usr/bin/python3' | |
|
526 | elif python_version.startswith('pypy'): | |
|
527 | python = '/hgdev/pyenv/shims/%s' % python_version | |
|
528 | else: | |
|
529 | python = '/hgdev/pyenv/shims/python%s' % python_version | |
|
530 | ||
|
531 | test_flags = ' '.join(shlex.quote(a) for a in test_flags) | |
|
532 | ||
|
533 | command = ( | |
|
534 | '/bin/sh -c "export TMPDIR=/hgwork/tmp; ' | |
|
535 | 'cd /hgwork/src/tests && %s run-tests.py %s"' % ( | |
|
536 | python, test_flags)) | |
|
537 | ||
|
538 | chan, stdin, stdout = exec_command(ssh_client, command) | |
|
539 | ||
|
540 | stdin.close() | |
|
541 | ||
|
542 | for line in stdout: | |
|
543 | print(line, end='') | |
|
544 | ||
|
545 | return chan.recv_exit_status() |
@@ -0,0 +1,67 b'' | |||
|
1 | # ssh.py - Interact with remote SSH servers | |
|
2 | # | |
|
3 | # Copyright 2019 Gregory Szorc <gregory.szorc@gmail.com> | |
|
4 | # | |
|
5 | # This software may be used and distributed according to the terms of the | |
|
6 | # GNU General Public License version 2 or any later version. | |
|
7 | ||
|
8 | # no-check-code because Python 3 native. | |
|
9 | ||
|
10 | import socket | |
|
11 | import time | |
|
12 | import warnings | |
|
13 | ||
|
14 | from cryptography.utils import ( | |
|
15 | CryptographyDeprecationWarning, | |
|
16 | ) | |
|
17 | import paramiko | |
|
18 | ||
|
19 | ||
|
20 | def wait_for_ssh(hostname, port, timeout=60, username=None, key_filename=None): | |
|
21 | """Wait for an SSH server to start on the specified host and port.""" | |
|
22 | class IgnoreHostKeyPolicy(paramiko.MissingHostKeyPolicy): | |
|
23 | def missing_host_key(self, client, hostname, key): | |
|
24 | return | |
|
25 | ||
|
26 | end_time = time.time() + timeout | |
|
27 | ||
|
28 | # paramiko triggers a CryptographyDeprecationWarning in the cryptography | |
|
29 | # package. Let's suppress | |
|
30 | with warnings.catch_warnings(): | |
|
31 | warnings.filterwarnings('ignore', | |
|
32 | category=CryptographyDeprecationWarning) | |
|
33 | ||
|
34 | while True: | |
|
35 | client = paramiko.SSHClient() | |
|
36 | client.set_missing_host_key_policy(IgnoreHostKeyPolicy()) | |
|
37 | try: | |
|
38 | client.connect(hostname, port=port, username=username, | |
|
39 | key_filename=key_filename, | |
|
40 | timeout=5.0, allow_agent=False, | |
|
41 | look_for_keys=False) | |
|
42 | ||
|
43 | return client | |
|
44 | except socket.error: | |
|
45 | pass | |
|
46 | except paramiko.AuthenticationException: | |
|
47 | raise | |
|
48 | except paramiko.SSHException: | |
|
49 | pass | |
|
50 | ||
|
51 | if time.time() >= end_time: | |
|
52 | raise Exception('Timeout reached waiting for SSH') | |
|
53 | ||
|
54 | time.sleep(1.0) | |
|
55 | ||
|
56 | ||
|
57 | def exec_command(client, command): | |
|
58 | """exec_command wrapper that combines stderr/stdout and returns channel""" | |
|
59 | chan = client.get_transport().open_session() | |
|
60 | ||
|
61 | chan.exec_command(command) | |
|
62 | chan.set_combine_stderr(True) | |
|
63 | ||
|
64 | stdin = chan.makefile('wb', -1) | |
|
65 | stdout = chan.makefile('r', -1) | |
|
66 | ||
|
67 | return chan, stdin, stdout |
@@ -0,0 +1,130 b'' | |||
|
1 | # | |
|
2 | # This file is autogenerated by pip-compile | |
|
3 | # To update, run: | |
|
4 | # | |
|
5 | # pip-compile -U --generate-hashes --output-file contrib/automation/linux-requirements-py2.txt contrib/automation/linux-requirements.txt.in | |
|
6 | # | |
|
7 | astroid==1.6.6 \ | |
|
8 | --hash=sha256:87de48a92e29cedf7210ffa853d11441e7ad94cb47bacd91b023499b51cbc756 \ | |
|
9 | --hash=sha256:d25869fc7f44f1d9fb7d24fd7ea0639656f5355fc3089cd1f3d18c6ec6b124c7 \ | |
|
10 | # via pylint | |
|
11 | backports.functools-lru-cache==1.5 \ | |
|
12 | --hash=sha256:9d98697f088eb1b0fa451391f91afb5e3ebde16bbdb272819fd091151fda4f1a \ | |
|
13 | --hash=sha256:f0b0e4eba956de51238e17573b7087e852dfe9854afd2e9c873f73fc0ca0a6dd \ | |
|
14 | # via astroid, isort, pylint | |
|
15 | bzr==2.7.0 ; python_version <= "2.7" and platform_python_implementation == "CPython" \ | |
|
16 | --hash=sha256:c9f6bbe0a50201dadc5fddadd94ba50174193c6cf6e39e16f6dd0ad98a1df338 | |
|
17 | configparser==3.7.4 \ | |
|
18 | --hash=sha256:8be81d89d6e7b4c0d4e44bcc525845f6da25821de80cb5e06e7e0238a2899e32 \ | |
|
19 | --hash=sha256:da60d0014fd8c55eb48c1c5354352e363e2d30bbf7057e5e171a468390184c75 \ | |
|
20 | # via pylint | |
|
21 | contextlib2==0.5.5 \ | |
|
22 | --hash=sha256:509f9419ee91cdd00ba34443217d5ca51f5a364a404e1dce9e8979cea969ca48 \ | |
|
23 | --hash=sha256:f5260a6e679d2ff42ec91ec5252f4eeffdcf21053db9113bd0a8e4d953769c00 \ | |
|
24 | # via vcrpy | |
|
25 | docutils==0.14 \ | |
|
26 | --hash=sha256:02aec4bd92ab067f6ff27a38a38a41173bf01bed8f89157768c1573f53e474a6 \ | |
|
27 | --hash=sha256:51e64ef2ebfb29cae1faa133b3710143496eca21c530f3f71424d77687764274 \ | |
|
28 | --hash=sha256:7a4bd47eaf6596e1295ecb11361139febe29b084a87bf005bf899f9a42edc3c6 | |
|
29 | enum34==1.1.6 \ | |
|
30 | --hash=sha256:2d81cbbe0e73112bdfe6ef8576f2238f2ba27dd0d55752a776c41d38b7da2850 \ | |
|
31 | --hash=sha256:644837f692e5f550741432dd3f223bbb9852018674981b1664e5dc339387588a \ | |
|
32 | --hash=sha256:6bd0f6ad48ec2aa117d3d141940d484deccda84d4fcd884f5c3d93c23ecd8c79 \ | |
|
33 | --hash=sha256:8ad8c4783bf61ded74527bffb48ed9b54166685e4230386a9ed9b1279e2df5b1 \ | |
|
34 | # via astroid | |
|
35 | funcsigs==1.0.2 \ | |
|
36 | --hash=sha256:330cc27ccbf7f1e992e69fef78261dc7c6569012cf397db8d3de0234e6c937ca \ | |
|
37 | --hash=sha256:a7bb0f2cf3a3fd1ab2732cb49eba4252c2af4240442415b4abce3b87022a8f50 \ | |
|
38 | # via mock | |
|
39 | futures==3.2.0 \ | |
|
40 | --hash=sha256:9ec02aa7d674acb8618afb127e27fde7fc68994c0437ad759fa094a574adb265 \ | |
|
41 | --hash=sha256:ec0a6cb848cc212002b9828c3e34c675e0c9ff6741dc445cab6fdd4e1085d1f1 \ | |
|
42 | # via isort | |
|
43 | fuzzywuzzy==0.17.0 \ | |
|
44 | --hash=sha256:5ac7c0b3f4658d2743aa17da53a55598144edbc5bee3c6863840636e6926f254 \ | |
|
45 | --hash=sha256:6f49de47db00e1c71d40ad16da42284ac357936fa9b66bea1df63fed07122d62 | |
|
46 | isort==4.3.17 \ | |
|
47 | --hash=sha256:01cb7e1ca5e6c5b3f235f0385057f70558b70d2f00320208825fa62887292f43 \ | |
|
48 | --hash=sha256:268067462aed7eb2a1e237fcb287852f22077de3fb07964e87e00f829eea2d1a \ | |
|
49 | # via pylint | |
|
50 | lazy-object-proxy==1.3.1 \ | |
|
51 | --hash=sha256:0ce34342b419bd8f018e6666bfef729aec3edf62345a53b537a4dcc115746a33 \ | |
|
52 | --hash=sha256:1b668120716eb7ee21d8a38815e5eb3bb8211117d9a90b0f8e21722c0758cc39 \ | |
|
53 | --hash=sha256:209615b0fe4624d79e50220ce3310ca1a9445fd8e6d3572a896e7f9146bbf019 \ | |
|
54 | --hash=sha256:27bf62cb2b1a2068d443ff7097ee33393f8483b570b475db8ebf7e1cba64f088 \ | |
|
55 | --hash=sha256:27ea6fd1c02dcc78172a82fc37fcc0992a94e4cecf53cb6d73f11749825bd98b \ | |
|
56 | --hash=sha256:2c1b21b44ac9beb0fc848d3993924147ba45c4ebc24be19825e57aabbe74a99e \ | |
|
57 | --hash=sha256:2df72ab12046a3496a92476020a1a0abf78b2a7db9ff4dc2036b8dd980203ae6 \ | |
|
58 | --hash=sha256:320ffd3de9699d3892048baee45ebfbbf9388a7d65d832d7e580243ade426d2b \ | |
|
59 | --hash=sha256:50e3b9a464d5d08cc5227413db0d1c4707b6172e4d4d915c1c70e4de0bbff1f5 \ | |
|
60 | --hash=sha256:5276db7ff62bb7b52f77f1f51ed58850e315154249aceb42e7f4c611f0f847ff \ | |
|
61 | --hash=sha256:61a6cf00dcb1a7f0c773ed4acc509cb636af2d6337a08f362413c76b2b47a8dd \ | |
|
62 | --hash=sha256:6ae6c4cb59f199d8827c5a07546b2ab7e85d262acaccaacd49b62f53f7c456f7 \ | |
|
63 | --hash=sha256:7661d401d60d8bf15bb5da39e4dd72f5d764c5aff5a86ef52a042506e3e970ff \ | |
|
64 | --hash=sha256:7bd527f36a605c914efca5d3d014170b2cb184723e423d26b1fb2fd9108e264d \ | |
|
65 | --hash=sha256:7cb54db3535c8686ea12e9535eb087d32421184eacc6939ef15ef50f83a5e7e2 \ | |
|
66 | --hash=sha256:7f3a2d740291f7f2c111d86a1c4851b70fb000a6c8883a59660d95ad57b9df35 \ | |
|
67 | --hash=sha256:81304b7d8e9c824d058087dcb89144842c8e0dea6d281c031f59f0acf66963d4 \ | |
|
68 | --hash=sha256:933947e8b4fbe617a51528b09851685138b49d511af0b6c0da2539115d6d4514 \ | |
|
69 | --hash=sha256:94223d7f060301b3a8c09c9b3bc3294b56b2188e7d8179c762a1cda72c979252 \ | |
|
70 | --hash=sha256:ab3ca49afcb47058393b0122428358d2fbe0408cf99f1b58b295cfeb4ed39109 \ | |
|
71 | --hash=sha256:bd6292f565ca46dee4e737ebcc20742e3b5be2b01556dafe169f6c65d088875f \ | |
|
72 | --hash=sha256:cb924aa3e4a3fb644d0c463cad5bc2572649a6a3f68a7f8e4fbe44aaa6d77e4c \ | |
|
73 | --hash=sha256:d0fc7a286feac9077ec52a927fc9fe8fe2fabab95426722be4c953c9a8bede92 \ | |
|
74 | --hash=sha256:ddc34786490a6e4ec0a855d401034cbd1242ef186c20d79d2166d6a4bd449577 \ | |
|
75 | --hash=sha256:e34b155e36fa9da7e1b7c738ed7767fc9491a62ec6af70fe9da4a057759edc2d \ | |
|
76 | --hash=sha256:e5b9e8f6bda48460b7b143c3821b21b452cb3a835e6bbd5dd33aa0c8d3f5137d \ | |
|
77 | --hash=sha256:e81ebf6c5ee9684be8f2c87563880f93eedd56dd2b6146d8a725b50b7e5adb0f \ | |
|
78 | --hash=sha256:eb91be369f945f10d3a49f5f9be8b3d0b93a4c2be8f8a5b83b0571b8123e0a7a \ | |
|
79 | --hash=sha256:f460d1ceb0e4a5dcb2a652db0904224f367c9b3c1470d5a7683c0480e582468b \ | |
|
80 | # via astroid | |
|
81 | mccabe==0.6.1 \ | |
|
82 | --hash=sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42 \ | |
|
83 | --hash=sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f \ | |
|
84 | # via pylint | |
|
85 | mock==2.0.0 \ | |
|
86 | --hash=sha256:5ce3c71c5545b472da17b72268978914d0252980348636840bd34a00b5cc96c1 \ | |
|
87 | --hash=sha256:b158b6df76edd239b8208d481dc46b6afd45a846b7812ff0ce58971cf5bc8bba \ | |
|
88 | # via vcrpy | |
|
89 | pbr==5.1.3 \ | |
|
90 | --hash=sha256:8257baf496c8522437e8a6cfe0f15e00aedc6c0e0e7c9d55eeeeab31e0853843 \ | |
|
91 | --hash=sha256:8c361cc353d988e4f5b998555c88098b9d5964c2e11acf7b0d21925a66bb5824 \ | |
|
92 | # via mock | |
|
93 | pyflakes==2.1.1 \ | |
|
94 | --hash=sha256:17dbeb2e3f4d772725c777fabc446d5634d1038f234e77343108ce445ea69ce0 \ | |
|
95 | --hash=sha256:d976835886f8c5b31d47970ed689944a0262b5f3afa00a5a7b4dc81e5449f8a2 | |
|
96 | pygments==2.3.1 \ | |
|
97 | --hash=sha256:5ffada19f6203563680669ee7f53b64dabbeb100eb51b61996085e99c03b284a \ | |
|
98 | --hash=sha256:e8218dd399a61674745138520d0d4cf2621d7e032439341bc3f647bff125818d | |
|
99 | pylint==1.9.4 \ | |
|
100 | --hash=sha256:02c2b6d268695a8b64ad61847f92e611e6afcff33fd26c3a2125370c4662905d \ | |
|
101 | --hash=sha256:ee1e85575587c5b58ddafa25e1c1b01691ef172e139fc25585e5d3f02451da93 | |
|
102 | python-levenshtein==0.12.0 \ | |
|
103 | --hash=sha256:033a11de5e3d19ea25c9302d11224e1a1898fe5abd23c61c7c360c25195e3eb1 | |
|
104 | pyyaml==5.1 \ | |
|
105 | --hash=sha256:1adecc22f88d38052fb787d959f003811ca858b799590a5eaa70e63dca50308c \ | |
|
106 | --hash=sha256:436bc774ecf7c103814098159fbb84c2715d25980175292c648f2da143909f95 \ | |
|
107 | --hash=sha256:460a5a4248763f6f37ea225d19d5c205677d8d525f6a83357ca622ed541830c2 \ | |
|
108 | --hash=sha256:5a22a9c84653debfbf198d02fe592c176ea548cccce47553f35f466e15cf2fd4 \ | |
|
109 | --hash=sha256:7a5d3f26b89d688db27822343dfa25c599627bc92093e788956372285c6298ad \ | |
|
110 | --hash=sha256:9372b04a02080752d9e6f990179a4ab840227c6e2ce15b95e1278456664cf2ba \ | |
|
111 | --hash=sha256:a5dcbebee834eaddf3fa7366316b880ff4062e4bcc9787b78c7fbb4a26ff2dd1 \ | |
|
112 | --hash=sha256:aee5bab92a176e7cd034e57f46e9df9a9862a71f8f37cad167c6fc74c65f5b4e \ | |
|
113 | --hash=sha256:c51f642898c0bacd335fc119da60baae0824f2cde95b0330b56c0553439f0673 \ | |
|
114 | --hash=sha256:c68ea4d3ba1705da1e0d85da6684ac657912679a649e8868bd850d2c299cce13 \ | |
|
115 | --hash=sha256:e23d0cc5299223dcc37885dae624f382297717e459ea24053709675a976a3e19 \ | |
|
116 | # via vcrpy | |
|
117 | singledispatch==3.4.0.3 \ | |
|
118 | --hash=sha256:5b06af87df13818d14f08a028e42f566640aef80805c3b50c5056b086e3c2b9c \ | |
|
119 | --hash=sha256:833b46966687b3de7f438c761ac475213e53b306740f1abfaa86e1d1aae56aa8 \ | |
|
120 | # via astroid, pylint | |
|
121 | six==1.12.0 \ | |
|
122 | --hash=sha256:3350809f0555b11f552448330d0b52d5f24c91a322ea4a15ef22629740f3761c \ | |
|
123 | --hash=sha256:d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73 \ | |
|
124 | # via astroid, mock, pylint, singledispatch, vcrpy | |
|
125 | vcrpy==2.0.1 \ | |
|
126 | --hash=sha256:127e79cf7b569d071d1bd761b83f7b62b2ce2a2eb63ceca7aa67cba8f2602ea3 \ | |
|
127 | --hash=sha256:57be64aa8e9883a4117d0b15de28af62275c001abcdb00b6dc2d4406073d9a4f | |
|
128 | wrapt==1.11.1 \ | |
|
129 | --hash=sha256:4aea003270831cceb8a90ff27c4031da6ead7ec1886023b80ce0dfe0adf61533 \ | |
|
130 | # via astroid, vcrpy |
@@ -0,0 +1,159 b'' | |||
|
1 | # | |
|
2 | # This file is autogenerated by pip-compile | |
|
3 | # To update, run: | |
|
4 | # | |
|
5 | # pip-compile -U --generate-hashes --output-file contrib/automation/linux-requirements-py3.txt contrib/automation/linux-requirements.txt.in | |
|
6 | # | |
|
7 | astroid==2.2.5 \ | |
|
8 | --hash=sha256:6560e1e1749f68c64a4b5dee4e091fce798d2f0d84ebe638cf0e0585a343acf4 \ | |
|
9 | --hash=sha256:b65db1bbaac9f9f4d190199bb8680af6f6f84fd3769a5ea883df8a91fe68b4c4 \ | |
|
10 | # via pylint | |
|
11 | docutils==0.14 \ | |
|
12 | --hash=sha256:02aec4bd92ab067f6ff27a38a38a41173bf01bed8f89157768c1573f53e474a6 \ | |
|
13 | --hash=sha256:51e64ef2ebfb29cae1faa133b3710143496eca21c530f3f71424d77687764274 \ | |
|
14 | --hash=sha256:7a4bd47eaf6596e1295ecb11361139febe29b084a87bf005bf899f9a42edc3c6 | |
|
15 | fuzzywuzzy==0.17.0 \ | |
|
16 | --hash=sha256:5ac7c0b3f4658d2743aa17da53a55598144edbc5bee3c6863840636e6926f254 \ | |
|
17 | --hash=sha256:6f49de47db00e1c71d40ad16da42284ac357936fa9b66bea1df63fed07122d62 | |
|
18 | idna==2.8 \ | |
|
19 | --hash=sha256:c357b3f628cf53ae2c4c05627ecc484553142ca23264e593d327bcde5e9c3407 \ | |
|
20 | --hash=sha256:ea8b7f6188e6fa117537c3df7da9fc686d485087abf6ac197f9c46432f7e4a3c \ | |
|
21 | # via yarl | |
|
22 | isort==4.3.17 \ | |
|
23 | --hash=sha256:01cb7e1ca5e6c5b3f235f0385057f70558b70d2f00320208825fa62887292f43 \ | |
|
24 | --hash=sha256:268067462aed7eb2a1e237fcb287852f22077de3fb07964e87e00f829eea2d1a \ | |
|
25 | # via pylint | |
|
26 | lazy-object-proxy==1.3.1 \ | |
|
27 | --hash=sha256:0ce34342b419bd8f018e6666bfef729aec3edf62345a53b537a4dcc115746a33 \ | |
|
28 | --hash=sha256:1b668120716eb7ee21d8a38815e5eb3bb8211117d9a90b0f8e21722c0758cc39 \ | |
|
29 | --hash=sha256:209615b0fe4624d79e50220ce3310ca1a9445fd8e6d3572a896e7f9146bbf019 \ | |
|
30 | --hash=sha256:27bf62cb2b1a2068d443ff7097ee33393f8483b570b475db8ebf7e1cba64f088 \ | |
|
31 | --hash=sha256:27ea6fd1c02dcc78172a82fc37fcc0992a94e4cecf53cb6d73f11749825bd98b \ | |
|
32 | --hash=sha256:2c1b21b44ac9beb0fc848d3993924147ba45c4ebc24be19825e57aabbe74a99e \ | |
|
33 | --hash=sha256:2df72ab12046a3496a92476020a1a0abf78b2a7db9ff4dc2036b8dd980203ae6 \ | |
|
34 | --hash=sha256:320ffd3de9699d3892048baee45ebfbbf9388a7d65d832d7e580243ade426d2b \ | |
|
35 | --hash=sha256:50e3b9a464d5d08cc5227413db0d1c4707b6172e4d4d915c1c70e4de0bbff1f5 \ | |
|
36 | --hash=sha256:5276db7ff62bb7b52f77f1f51ed58850e315154249aceb42e7f4c611f0f847ff \ | |
|
37 | --hash=sha256:61a6cf00dcb1a7f0c773ed4acc509cb636af2d6337a08f362413c76b2b47a8dd \ | |
|
38 | --hash=sha256:6ae6c4cb59f199d8827c5a07546b2ab7e85d262acaccaacd49b62f53f7c456f7 \ | |
|
39 | --hash=sha256:7661d401d60d8bf15bb5da39e4dd72f5d764c5aff5a86ef52a042506e3e970ff \ | |
|
40 | --hash=sha256:7bd527f36a605c914efca5d3d014170b2cb184723e423d26b1fb2fd9108e264d \ | |
|
41 | --hash=sha256:7cb54db3535c8686ea12e9535eb087d32421184eacc6939ef15ef50f83a5e7e2 \ | |
|
42 | --hash=sha256:7f3a2d740291f7f2c111d86a1c4851b70fb000a6c8883a59660d95ad57b9df35 \ | |
|
43 | --hash=sha256:81304b7d8e9c824d058087dcb89144842c8e0dea6d281c031f59f0acf66963d4 \ | |
|
44 | --hash=sha256:933947e8b4fbe617a51528b09851685138b49d511af0b6c0da2539115d6d4514 \ | |
|
45 | --hash=sha256:94223d7f060301b3a8c09c9b3bc3294b56b2188e7d8179c762a1cda72c979252 \ | |
|
46 | --hash=sha256:ab3ca49afcb47058393b0122428358d2fbe0408cf99f1b58b295cfeb4ed39109 \ | |
|
47 | --hash=sha256:bd6292f565ca46dee4e737ebcc20742e3b5be2b01556dafe169f6c65d088875f \ | |
|
48 | --hash=sha256:cb924aa3e4a3fb644d0c463cad5bc2572649a6a3f68a7f8e4fbe44aaa6d77e4c \ | |
|
49 | --hash=sha256:d0fc7a286feac9077ec52a927fc9fe8fe2fabab95426722be4c953c9a8bede92 \ | |
|
50 | --hash=sha256:ddc34786490a6e4ec0a855d401034cbd1242ef186c20d79d2166d6a4bd449577 \ | |
|
51 | --hash=sha256:e34b155e36fa9da7e1b7c738ed7767fc9491a62ec6af70fe9da4a057759edc2d \ | |
|
52 | --hash=sha256:e5b9e8f6bda48460b7b143c3821b21b452cb3a835e6bbd5dd33aa0c8d3f5137d \ | |
|
53 | --hash=sha256:e81ebf6c5ee9684be8f2c87563880f93eedd56dd2b6146d8a725b50b7e5adb0f \ | |
|
54 | --hash=sha256:eb91be369f945f10d3a49f5f9be8b3d0b93a4c2be8f8a5b83b0571b8123e0a7a \ | |
|
55 | --hash=sha256:f460d1ceb0e4a5dcb2a652db0904224f367c9b3c1470d5a7683c0480e582468b \ | |
|
56 | # via astroid | |
|
57 | mccabe==0.6.1 \ | |
|
58 | --hash=sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42 \ | |
|
59 | --hash=sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f \ | |
|
60 | # via pylint | |
|
61 | multidict==4.5.2 \ | |
|
62 | --hash=sha256:024b8129695a952ebd93373e45b5d341dbb87c17ce49637b34000093f243dd4f \ | |
|
63 | --hash=sha256:041e9442b11409be5e4fc8b6a97e4bcead758ab1e11768d1e69160bdde18acc3 \ | |
|
64 | --hash=sha256:045b4dd0e5f6121e6f314d81759abd2c257db4634260abcfe0d3f7083c4908ef \ | |
|
65 | --hash=sha256:047c0a04e382ef8bd74b0de01407e8d8632d7d1b4db6f2561106af812a68741b \ | |
|
66 | --hash=sha256:068167c2d7bbeebd359665ac4fff756be5ffac9cda02375b5c5a7c4777038e73 \ | |
|
67 | --hash=sha256:148ff60e0fffa2f5fad2eb25aae7bef23d8f3b8bdaf947a65cdbe84a978092bc \ | |
|
68 | --hash=sha256:1d1c77013a259971a72ddaa83b9f42c80a93ff12df6a4723be99d858fa30bee3 \ | |
|
69 | --hash=sha256:1d48bc124a6b7a55006d97917f695effa9725d05abe8ee78fd60d6588b8344cd \ | |
|
70 | --hash=sha256:31dfa2fc323097f8ad7acd41aa38d7c614dd1960ac6681745b6da124093dc351 \ | |
|
71 | --hash=sha256:34f82db7f80c49f38b032c5abb605c458bac997a6c3142e0d6c130be6fb2b941 \ | |
|
72 | --hash=sha256:3d5dd8e5998fb4ace04789d1d008e2bb532de501218519d70bb672c4c5a2fc5d \ | |
|
73 | --hash=sha256:4a6ae52bd3ee41ee0f3acf4c60ceb3f44e0e3bc52ab7da1c2b2aa6703363a3d1 \ | |
|
74 | --hash=sha256:4b02a3b2a2f01d0490dd39321c74273fed0568568ea0e7ea23e02bd1fb10a10b \ | |
|
75 | --hash=sha256:4b843f8e1dd6a3195679d9838eb4670222e8b8d01bc36c9894d6c3538316fa0a \ | |
|
76 | --hash=sha256:5de53a28f40ef3c4fd57aeab6b590c2c663de87a5af76136ced519923d3efbb3 \ | |
|
77 | --hash=sha256:61b2b33ede821b94fa99ce0b09c9ece049c7067a33b279f343adfe35108a4ea7 \ | |
|
78 | --hash=sha256:6a3a9b0f45fd75dc05d8e93dc21b18fc1670135ec9544d1ad4acbcf6b86781d0 \ | |
|
79 | --hash=sha256:76ad8e4c69dadbb31bad17c16baee61c0d1a4a73bed2590b741b2e1a46d3edd0 \ | |
|
80 | --hash=sha256:7ba19b777dc00194d1b473180d4ca89a054dd18de27d0ee2e42a103ec9b7d014 \ | |
|
81 | --hash=sha256:7c1b7eab7a49aa96f3db1f716f0113a8a2e93c7375dd3d5d21c4941f1405c9c5 \ | |
|
82 | --hash=sha256:7fc0eee3046041387cbace9314926aa48b681202f8897f8bff3809967a049036 \ | |
|
83 | --hash=sha256:8ccd1c5fff1aa1427100ce188557fc31f1e0a383ad8ec42c559aabd4ff08802d \ | |
|
84 | --hash=sha256:8e08dd76de80539d613654915a2f5196dbccc67448df291e69a88712ea21e24a \ | |
|
85 | --hash=sha256:c18498c50c59263841862ea0501da9f2b3659c00db54abfbf823a80787fde8ce \ | |
|
86 | --hash=sha256:c49db89d602c24928e68c0d510f4fcf8989d77defd01c973d6cbe27e684833b1 \ | |
|
87 | --hash=sha256:ce20044d0317649ddbb4e54dab3c1bcc7483c78c27d3f58ab3d0c7e6bc60d26a \ | |
|
88 | --hash=sha256:d1071414dd06ca2eafa90c85a079169bfeb0e5f57fd0b45d44c092546fcd6fd9 \ | |
|
89 | --hash=sha256:d3be11ac43ab1a3e979dac80843b42226d5d3cccd3986f2e03152720a4297cd7 \ | |
|
90 | --hash=sha256:db603a1c235d110c860d5f39988ebc8218ee028f07a7cbc056ba6424372ca31b \ | |
|
91 | # via yarl | |
|
92 | pyflakes==2.1.1 \ | |
|
93 | --hash=sha256:17dbeb2e3f4d772725c777fabc446d5634d1038f234e77343108ce445ea69ce0 \ | |
|
94 | --hash=sha256:d976835886f8c5b31d47970ed689944a0262b5f3afa00a5a7b4dc81e5449f8a2 | |
|
95 | pygments==2.3.1 \ | |
|
96 | --hash=sha256:5ffada19f6203563680669ee7f53b64dabbeb100eb51b61996085e99c03b284a \ | |
|
97 | --hash=sha256:e8218dd399a61674745138520d0d4cf2621d7e032439341bc3f647bff125818d | |
|
98 | pylint==2.3.1 \ | |
|
99 | --hash=sha256:5d77031694a5fb97ea95e828c8d10fc770a1df6eb3906067aaed42201a8a6a09 \ | |
|
100 | --hash=sha256:723e3db49555abaf9bf79dc474c6b9e2935ad82230b10c1138a71ea41ac0fff1 | |
|
101 | python-levenshtein==0.12.0 \ | |
|
102 | --hash=sha256:033a11de5e3d19ea25c9302d11224e1a1898fe5abd23c61c7c360c25195e3eb1 | |
|
103 | pyyaml==5.1 \ | |
|
104 | --hash=sha256:1adecc22f88d38052fb787d959f003811ca858b799590a5eaa70e63dca50308c \ | |
|
105 | --hash=sha256:436bc774ecf7c103814098159fbb84c2715d25980175292c648f2da143909f95 \ | |
|
106 | --hash=sha256:460a5a4248763f6f37ea225d19d5c205677d8d525f6a83357ca622ed541830c2 \ | |
|
107 | --hash=sha256:5a22a9c84653debfbf198d02fe592c176ea548cccce47553f35f466e15cf2fd4 \ | |
|
108 | --hash=sha256:7a5d3f26b89d688db27822343dfa25c599627bc92093e788956372285c6298ad \ | |
|
109 | --hash=sha256:9372b04a02080752d9e6f990179a4ab840227c6e2ce15b95e1278456664cf2ba \ | |
|
110 | --hash=sha256:a5dcbebee834eaddf3fa7366316b880ff4062e4bcc9787b78c7fbb4a26ff2dd1 \ | |
|
111 | --hash=sha256:aee5bab92a176e7cd034e57f46e9df9a9862a71f8f37cad167c6fc74c65f5b4e \ | |
|
112 | --hash=sha256:c51f642898c0bacd335fc119da60baae0824f2cde95b0330b56c0553439f0673 \ | |
|
113 | --hash=sha256:c68ea4d3ba1705da1e0d85da6684ac657912679a649e8868bd850d2c299cce13 \ | |
|
114 | --hash=sha256:e23d0cc5299223dcc37885dae624f382297717e459ea24053709675a976a3e19 \ | |
|
115 | # via vcrpy | |
|
116 | six==1.12.0 \ | |
|
117 | --hash=sha256:3350809f0555b11f552448330d0b52d5f24c91a322ea4a15ef22629740f3761c \ | |
|
118 | --hash=sha256:d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73 \ | |
|
119 | # via astroid, vcrpy | |
|
120 | typed-ast==1.3.4 ; python_version >= "3.0" and platform_python_implementation != "PyPy" \ | |
|
121 | --hash=sha256:04894d268ba6eab7e093d43107869ad49e7b5ef40d1a94243ea49b352061b200 \ | |
|
122 | --hash=sha256:16616ece19daddc586e499a3d2f560302c11f122b9c692bc216e821ae32aa0d0 \ | |
|
123 | --hash=sha256:252fdae740964b2d3cdfb3f84dcb4d6247a48a6abe2579e8029ab3be3cdc026c \ | |
|
124 | --hash=sha256:2af80a373af123d0b9f44941a46df67ef0ff7a60f95872412a145f4500a7fc99 \ | |
|
125 | --hash=sha256:2c88d0a913229a06282b285f42a31e063c3bf9071ff65c5ea4c12acb6977c6a7 \ | |
|
126 | --hash=sha256:2ea99c029ebd4b5a308d915cc7fb95b8e1201d60b065450d5d26deb65d3f2bc1 \ | |
|
127 | --hash=sha256:3d2e3ab175fc097d2a51c7a0d3fda442f35ebcc93bb1d7bd9b95ad893e44c04d \ | |
|
128 | --hash=sha256:4766dd695548a15ee766927bf883fb90c6ac8321be5a60c141f18628fb7f8da8 \ | |
|
129 | --hash=sha256:56b6978798502ef66625a2e0f80cf923da64e328da8bbe16c1ff928c70c873de \ | |
|
130 | --hash=sha256:5cddb6f8bce14325b2863f9d5ac5c51e07b71b462361fd815d1d7706d3a9d682 \ | |
|
131 | --hash=sha256:644ee788222d81555af543b70a1098f2025db38eaa99226f3a75a6854924d4db \ | |
|
132 | --hash=sha256:64cf762049fc4775efe6b27161467e76d0ba145862802a65eefc8879086fc6f8 \ | |
|
133 | --hash=sha256:68c362848d9fb71d3c3e5f43c09974a0ae319144634e7a47db62f0f2a54a7fa7 \ | |
|
134 | --hash=sha256:6c1f3c6f6635e611d58e467bf4371883568f0de9ccc4606f17048142dec14a1f \ | |
|
135 | --hash=sha256:b213d4a02eec4ddf622f4d2fbc539f062af3788d1f332f028a2e19c42da53f15 \ | |
|
136 | --hash=sha256:bb27d4e7805a7de0e35bd0cb1411bc85f807968b2b0539597a49a23b00a622ae \ | |
|
137 | --hash=sha256:c9d414512eaa417aadae7758bc118868cd2396b0e6138c1dd4fda96679c079d3 \ | |
|
138 | --hash=sha256:f0937165d1e25477b01081c4763d2d9cdc3b18af69cb259dd4f640c9b900fe5e \ | |
|
139 | --hash=sha256:fb96a6e2c11059ecf84e6741a319f93f683e440e341d4489c9b161eca251cf2a \ | |
|
140 | --hash=sha256:fc71d2d6ae56a091a8d94f33ec9d0f2001d1cb1db423d8b4355debfe9ce689b7 | |
|
141 | vcrpy==2.0.1 \ | |
|
142 | --hash=sha256:127e79cf7b569d071d1bd761b83f7b62b2ce2a2eb63ceca7aa67cba8f2602ea3 \ | |
|
143 | --hash=sha256:57be64aa8e9883a4117d0b15de28af62275c001abcdb00b6dc2d4406073d9a4f | |
|
144 | wrapt==1.11.1 \ | |
|
145 | --hash=sha256:4aea003270831cceb8a90ff27c4031da6ead7ec1886023b80ce0dfe0adf61533 \ | |
|
146 | # via astroid, vcrpy | |
|
147 | yarl==1.3.0 \ | |
|
148 | --hash=sha256:024ecdc12bc02b321bc66b41327f930d1c2c543fa9a561b39861da9388ba7aa9 \ | |
|
149 | --hash=sha256:2f3010703295fbe1aec51023740871e64bb9664c789cba5a6bdf404e93f7568f \ | |
|
150 | --hash=sha256:3890ab952d508523ef4881457c4099056546593fa05e93da84c7250516e632eb \ | |
|
151 | --hash=sha256:3e2724eb9af5dc41648e5bb304fcf4891adc33258c6e14e2a7414ea32541e320 \ | |
|
152 | --hash=sha256:5badb97dd0abf26623a9982cd448ff12cb39b8e4c94032ccdedf22ce01a64842 \ | |
|
153 | --hash=sha256:73f447d11b530d860ca1e6b582f947688286ad16ca42256413083d13f260b7a0 \ | |
|
154 | --hash=sha256:7ab825726f2940c16d92aaec7d204cfc34ac26c0040da727cf8ba87255a33829 \ | |
|
155 | --hash=sha256:b25de84a8c20540531526dfbb0e2d2b648c13fd5dd126728c496d7c3fea33310 \ | |
|
156 | --hash=sha256:c6e341f5a6562af74ba55205dbd56d248daf1b5748ec48a0200ba227bb9e33f4 \ | |
|
157 | --hash=sha256:c9bb7c249c4432cd47e75af3864bc02d26c9594f49c82e2a28624417f0ae63b8 \ | |
|
158 | --hash=sha256:e060906c0c585565c718d1c3841747b61c5439af2211e185f6739a9412dfbde1 \ | |
|
159 | # via vcrpy |
@@ -0,0 +1,12 b'' | |||
|
1 | # Bazaar doesn't work with Python 3 nor PyPy. | |
|
2 | bzr ; python_version <= '2.7' and platform_python_implementation == 'CPython' | |
|
3 | docutils | |
|
4 | fuzzywuzzy | |
|
5 | pyflakes | |
|
6 | pygments | |
|
7 | pylint | |
|
8 | # Needed to avoid warnings from fuzzywuzzy. | |
|
9 | python-Levenshtein | |
|
10 | # typed-ast dependency doesn't install on PyPy. | |
|
11 | typed-ast ; python_version >= '3.0' and platform_python_implementation != 'PyPy' | |
|
12 | vcrpy |
@@ -0,0 +1,195 b'' | |||
|
1 | Prior to removing (EXPERIMENTAL) | |
|
2 | -------------------------------- | |
|
3 | ||
|
4 | These things affect UI and/or behavior, and should probably be implemented (or | |
|
5 | ruled out) prior to taking off the experimental shrinkwrap. | |
|
6 | ||
|
7 | #. Finish the `hg convert` story | |
|
8 | ||
|
9 | * Add an argument to accept a rules file to apply during conversion? | |
|
10 | Currently `lfs.track` is the only way to affect the conversion. | |
|
11 | * drop `lfs.track` config settings | |
|
12 | * splice in `.hglfs` file for normal repo -> lfs conversions? | |
|
13 | ||
|
14 | #. Stop uploading blobs when pushing between local repos | |
|
15 | ||
|
16 | * Could probably hardlink directly to the other local repo's store | |
|
17 | * Support inferring `lfs.url` for local push/pull (currently only supports | |
|
18 | http) | |
|
19 | ||
|
20 | #. Stop uploading blobs on strip/amend/histedit/etc. | |
|
21 | ||
|
22 | * This seems to be a side effect of doing it for `hg bundle`, which probably | |
|
23 | makes sense. | |
|
24 | ||
|
25 | #. Handle a server with the extension loaded and a client without the extension | |
|
26 | more gracefully. | |
|
27 | ||
|
28 | * `changegroup3` is still experimental, and not enabled by default. | |
|
29 | * Figure out how to `introduce LFS to the server repo | |
|
30 | <https://www.mercurial-scm.org/pipermail/mercurial-devel/2018-September/122281.html>`_. | |
|
31 | See the TODO in test-lfs-serve.t. | |
|
32 | ||
|
33 | #. Remove `lfs.retry` hack in client? This came from FB, but it's not clear why | |
|
34 | it is/was needed. | |
|
35 | ||
|
36 | #. `hg export` currently writes out the LFS blob. Should it write the pointer | |
|
37 | instead? | |
|
38 | ||
|
39 | * `hg diff` is similar, and probably shouldn't see the pointer file | |
|
40 | ||
|
41 | #. `Fix https multiplexing, and re-enable workers | |
|
42 | <https://www.mercurial-scm.org/pipermail/mercurial-devel/2018-January/109916.html>`_. | |
|
43 | ||
|
44 | #. Show to-be-applied rules with `hg files -r 'wdir()' 'set:lfs()'` | |
|
45 | ||
|
46 | * `debugignore` can show file + line number, so a dedicated command could be | |
|
47 | useful too. | |
|
48 | ||
|
49 | #. Filesets, revsets and templates | |
|
50 | ||
|
51 | * A dedicated revset should be faster than `'file(set:lfs())'` | |
|
52 | * Attach `{lfsoid}` and `{lfspointer}` to `general keywords | |
|
53 | <https://www.mercurial-scm.org/pipermail/mercurial-devel/2018-January/110251.html>`_, | |
|
54 | IFF the file is a blob | |
|
55 | * Drop existing items that would be redundant with general support | |
|
56 | ||
|
57 | #. Can `grep` avoid downloading most things? | |
|
58 | ||
|
59 | * Add a command option to skip LFS blobs? | |
|
60 | ||
|
61 | #. Add a flag that's visible in `hg files -v` to indicate external storage? | |
|
62 | ||
|
63 | #. Server side issues | |
|
64 | ||
|
65 | * Check for local disk space before allowing upload. (I've got a patch for | |
|
66 | this.) | |
|
67 | * Make sure the http codes used are appropriate. | |
|
68 | * `Why is copying the Authorization header into the JSON payload necessary | |
|
69 | <https://www.mercurial-scm.org/pipermail/mercurial-devel/2018-April/116230.html>`_? | |
|
70 | * `LFS-Authenticate` header support in client and server(?) | |
|
71 | ||
|
72 | #. Add locks on cache and blob store | |
|
73 | ||
|
74 | * This is complicated with a global store, and multiple potentially unrelated | |
|
75 | local repositories that reference the same blob. | |
|
76 | * Alternately, maybe just handle collisions when trying to create the same | |
|
77 | blob in the store somehow. | |
|
78 | ||
|
79 | #. Are proper file sizes reported in `debugupgraderepo`? | |
|
80 | ||
|
81 | #. Finish prefetching files | |
|
82 | ||
|
83 | * `-T {data}` (other than cat?) | |
|
84 | * `verify` | |
|
85 | * `grep` | |
|
86 | ||
|
87 | #. Output cleanup | |
|
88 | ||
|
89 | * Can we print the url when connecting to the blobstore? (A sudden | |
|
90 | connection refused after pulling commits looks confusing.) Problem is, | |
|
91 | 'pushing to main url' is printed, and then lfs wants to upload before going | |
|
92 | back to the main repo transfer, so then *that* could be confusing with | |
|
93 | extra output. (This is kinda improved with 380f5131ee7b and 9f78d10742af.) | |
|
94 | ||
|
95 | * Add more progress indicators? Uploading a large repo looks idle for a long | |
|
96 | time while it scans for blobs in each outgoing revision. | |
|
97 | ||
|
98 | * Print filenames instead of hashes in error messages | |
|
99 | ||
|
100 | * subrepo aware paths, where necessary | |
|
101 | ||
|
102 | * Is existing output at the right status/note/debug level? | |
|
103 | ||
|
104 | #. Can `verify` be done without downloading everything? | |
|
105 | ||
|
106 | * If we know that we are talking to an hg server, we can leverage the fact | |
|
107 | that it validates in the Batch API portion, and skip d/l altogether. OTOH, | |
|
108 | maybe we should download the files unconditionally for forensics. The | |
|
109 | alternative is to define a custom transfer handler that definitively | |
|
110 | verifies without transferring, and then cache those results. When verify | |
|
111 | comes looking, look in the cache instead of actually opening the file and | |
|
112 | processing it. | |
|
113 | ||
|
114 | * Yuya has concerns about when blob fetch takes place vs when revlog is | |
|
115 | verified. Since the visible hash matches the blob content, I don't think | |
|
116 | there's a way to verify the pointer file that's actually stored in the | |
|
117 | filelog (other than basic JSON checks). Full verification requires the | |
|
118 | blob. See | |
|
119 | https://www.mercurial-scm.org/pipermail/mercurial-devel/2018-April/116133.html | |
|
120 | ||
|
121 | * Opening a corrupt pointer file aborts. It probably shouldn't for verify. | |
|
122 | ||
|
123 | ||
|
124 | Future ideas/features/polishing | |
|
125 | ------------------------------- | |
|
126 | ||
|
127 | These aren't in any particular order, and are things that don't have obvious BC | |
|
128 | concerns. | |
|
129 | ||
|
130 | #. Garbage collection `(issue5790) <https://bz.mercurial-scm.org/show_bug.cgi?id=5790>`_ | |
|
131 | ||
|
132 | * This gets complicated because of the global cache, which may or may not | |
|
133 | consist of hardlinks to the repo, and may be in use by other repos. (So | |
|
134 | the gc may be pointless.) | |
|
135 | ||
|
136 | #. `Compress blobs <https://github.com/git-lfs/git-lfs/issues/260>`_ | |
|
137 | ||
|
138 | * 700MB repo becomes 2.5GB with all lfs blobs | |
|
139 | * What implications are there for filesystem paths that don't indicate | |
|
140 | compression? (i.e. how to share with global cache and other local repos?) | |
|
141 | * Probably needs to be stored under `.hg/store/lfs/zstd`, with a repo | |
|
142 | requirement. | |
|
143 | * Allow tuneable compression type and settings? | |
|
144 | * Support compression over the wire if both sides understand the compression? | |
|
145 | * `debugupgraderepo` to convert? | |
|
146 | * Probably not worth supporting compressed and uncompressed concurrently | |
|
147 | ||
|
148 | #. Determine things to upload with `readfast() | |
|
149 | <https://www.mercurial-scm.org/pipermail/mercurial-devel/2018-August/121315.html>`_ | |
|
150 | ||
|
151 | * Significantly faster when pushing an entire large repo to http. | |
|
152 | * Causes test changes to fileset and templates; may need both this and | |
|
153 | current methods of lookup. | |
|
154 | ||
|
155 | #. Is a command to download everything needed? This would allow copying the | |
|
156 | whole to a portable drive. Currently this can be effected by running | |
|
157 | `hg verify`. | |
|
158 | ||
|
159 | #. Stop reading in entire file into one buffer when passing through filelog | |
|
160 | interface | |
|
161 | ||
|
162 | * `Requires major replumbing to core | |
|
163 | <https://www.mercurial-scm.org/wiki/HandlingLargeFiles>`_ | |
|
164 | ||
|
165 | #. Keep corrupt files around in 'store/lfs/incoming' for forensics? | |
|
166 | ||
|
167 | * Files should be downloaded to 'incoming', and moved to normal location when | |
|
168 | done. | |
|
169 | ||
|
170 | #. Client side path enhancements | |
|
171 | ||
|
172 | * Support paths.default:lfs = ... style paths | |
|
173 | * SSH -> https server inference | |
|
174 | ||
|
175 | * https://www.mercurial-scm.org/pipermail/mercurial-devel/2018-April/115416.html | |
|
176 | * https://github.com/git-lfs/git-lfs/blob/master/docs/api/server-discovery.md#guessing-the-server | |
|
177 | ||
|
178 | #. Server enhancements | |
|
179 | ||
|
180 | * Add support for transfer quotas? | |
|
181 | * Download should be able to send the file in chunks, without reading the | |
|
182 | whole thing into memory | |
|
183 | (https://www.mercurial-scm.org/pipermail/mercurial-devel/2018-March/114584.html) | |
|
184 | * Support for resuming transfers | |
|
185 | ||
|
186 | #. Handle 3rd party server storage. | |
|
187 | ||
|
188 | * Teach client to handle lfs `verify` action. This is needed after the | |
|
189 | server instructs the client to upload the file to another server, in order | |
|
190 | to tell the server that the upload completed. | |
|
191 | * Teach the server to send redirects if configured, and process `verify` | |
|
192 | requests. | |
|
193 | ||
|
194 | #. `Is any hg-git work needed | |
|
195 | <https://groups.google.com/d/msg/hg-git/XYNQuudteeM/ivt8gXoZAAAJ>`_? |
@@ -0,0 +1,80 b'' | |||
|
1 | The active mergestate is stored in ``.hg/merge`` when a merge is triggered | |
|
2 | by commands like ``hg merge``, ``hg rebase``, etc. until the merge is | |
|
3 | completed or aborted to track the 3-way merge state of individual files. | |
|
4 | ||
|
5 | The contents of the directory are: | |
|
6 | ||
|
7 | Conflicting files | |
|
8 | ----------------- | |
|
9 | ||
|
10 | The local version of the conflicting files are stored with their | |
|
11 | filenames as the hash of their paths. | |
|
12 | ||
|
13 | state | |
|
14 | ----- | |
|
15 | ||
|
16 | This mergestate file record is used by hg version prior to 2.9.1 | |
|
17 | and contains less data than ``state2``. If there is no contradiction | |
|
18 | with ``state2``, we can assume that both are written at the same time. | |
|
19 | In this case, data from ``state2`` is used. Otherwise, we use ``state``. | |
|
20 | We read/write both ``state`` and ``state2`` records to ensure backward | |
|
21 | compatibility. | |
|
22 | ||
|
23 | state2 | |
|
24 | ------ | |
|
25 | ||
|
26 | This record stores a superset of data in ``state``, including new kinds | |
|
27 | of records in the future. | |
|
28 | ||
|
29 | Each record can contain arbitrary content and has an associated type. This | |
|
30 | `type` should be a letter. If `type` is uppercase, the record is mandatory: | |
|
31 | versions of Mercurial that don't support it should abort. If `type` is | |
|
32 | lowercase, the record can be safely ignored. | |
|
33 | ||
|
34 | Currently known records: | |
|
35 | ||
|
36 | | * L: the node of the "local" part of the merge (hexified version) | |
|
37 | | * O: the node of the "other" part of the merge (hexified version) | |
|
38 | | * F: a file to be merged entry | |
|
39 | | * C: a change/delete or delete/change conflict | |
|
40 | | * D: a file that the external merge driver will merge internally | |
|
41 | | (experimental) | |
|
42 | | * P: a path conflict (file vs directory) | |
|
43 | | * m: the external merge driver defined for this merge plus its run state | |
|
44 | | (experimental) | |
|
45 | | * f: a (filename, dictionary) tuple of optional values for a given file | |
|
46 | | * X: unsupported mandatory record type (used in tests) | |
|
47 | | * x: unsupported advisory record type (used in tests) | |
|
48 | | * l: the labels for the parts of the merge. | |
|
49 | ||
|
50 | Merge driver run states (experimental): | |
|
51 | ||
|
52 | | * u: driver-resolved files unmarked -- needs to be run next time we're | |
|
53 | | about to resolve or commit | |
|
54 | | * m: driver-resolved files marked -- only needs to be run before commit | |
|
55 | | * s: success/skipped -- does not need to be run any more | |
|
56 | ||
|
57 | Merge record states (indexed by filename): | |
|
58 | ||
|
59 | | * u: unresolved conflict | |
|
60 | | * r: resolved conflict | |
|
61 | | * pu: unresolved path conflict (file conflicts with directory) | |
|
62 | | * pr: resolved path conflict | |
|
63 | | * d: driver-resolved conflict | |
|
64 | ||
|
65 | The resolve command transitions between 'u' and 'r' for conflicts and | |
|
66 | 'pu' and 'pr' for path conflicts. | |
|
67 | ||
|
68 | This format is a list of arbitrary records of the form: | |
|
69 | ||
|
70 | [type][length][content] | |
|
71 | ||
|
72 | `type` is a single character, `length` is a 4 byte integer, and | |
|
73 | `content` is an arbitrary byte sequence of length `length`. | |
|
74 | ||
|
75 | Mercurial versions prior to 3.7 have a bug where if there are | |
|
76 | unsupported mandatory merge records, attempting to clear out the merge | |
|
77 | state with hg update --clean or similar aborts. The 't' record type | |
|
78 | works around that by writing out what those versions treat as an | |
|
79 | advisory record, but later versions interpret as special: the first | |
|
80 | character is the 'real' record type and everything onwards is the data. |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
@@ -154,3 +154,6 b' roots(-10000:-1)' | |||
|
154 | 154 | roots(matching(tip, "author")) |
|
155 | 155 | roots(matching(tip, "author")) and -10000:-1 |
|
156 | 156 | (-10000:-1) and roots(matching(tip, "author")) |
|
157 | only(max(head())) | |
|
158 | only(max(head()), min(head())) | |
|
159 | only(max(head()), limit(head(), 1, 1)) |
@@ -101,9 +101,14 b' Under normal operation, recurring costs ' | |||
|
101 | 101 | * Storage costs for AMI / EBS snapshots. This should be just a few pennies |
|
102 | 102 | per month. |
|
103 | 103 | |
|
104 |
When running EC2 instances, you'll be billed accordingly. |
|
|
105 | use *small* instances, like ``t3.medium``. This instance type costs ~$0.07 per | |
|
106 | hour. | |
|
104 | When running EC2 instances, you'll be billed accordingly. Default instance | |
|
105 | types vary by operation. We try to be respectful of your money when choosing | |
|
106 | defaults. e.g. for Windows instances which are billed per hour, we use e.g. | |
|
107 | ``t3.medium`` instances, which cost ~$0.07 per hour. For operations that | |
|
108 | scale well to many CPUs like running Linux tests, we may use a more powerful | |
|
109 | instance like ``c5.9xlarge``. However, since Linux instances are billed | |
|
110 | per second and the cost of running an e.g. ``c5.9xlarge`` for half the time | |
|
111 | of a ``c5.4xlarge`` is roughly the same, the choice is justified. | |
|
107 | 112 | |
|
108 | 113 | .. note:: |
|
109 | 114 | |
@@ -125,3 +130,54 b' To terminate all EC2 instances that we m' | |||
|
125 | 130 | To purge all EC2 resources that we manage:: |
|
126 | 131 |
|
|
127 | 132 | $ automation.py purge-ec2-resources |
|
133 | ||
|
134 | Remote Machine Interfaces | |
|
135 | ========================= | |
|
136 | ||
|
137 | The code that connects to a remote machine and executes things is | |
|
138 | theoretically machine agnostic as long as the remote machine conforms to | |
|
139 | an *interface*. In other words, to perform actions like running tests | |
|
140 | remotely or triggering packaging, it shouldn't matter if the remote machine | |
|
141 | is an EC2 instance, a virtual machine, etc. This section attempts to document | |
|
142 | the interface that remote machines need to provide in order to be valid | |
|
143 | *targets* for remote execution. These interfaces are often not ideal nor | |
|
144 | the most flexible. Instead, they have often evolved as the requirements of | |
|
145 | our automation code have evolved. | |
|
146 | ||
|
147 | Linux | |
|
148 | ----- | |
|
149 | ||
|
150 | Remote Linux machines expose an SSH server on port 22. The SSH server | |
|
151 | must allow the ``hg`` user to authenticate using the SSH key generated by | |
|
152 | the automation code. The ``hg`` user should be part of the ``hg`` group | |
|
153 | and it should have ``sudo`` access without password prompting. | |
|
154 | ||
|
155 | The SSH channel must support SFTP to facilitate transferring files from | |
|
156 | client to server. | |
|
157 | ||
|
158 | ``/bin/bash`` must be executable and point to a bash shell executable. | |
|
159 | ||
|
160 | The ``/hgdev`` directory must exist and all its content owned by ``hg::hg``. | |
|
161 | ||
|
162 | The ``/hgdev/pyenv`` directory should contain an installation of | |
|
163 | ``pyenv``. Various Python distributions should be installed. The exact | |
|
164 | versions shouldn't matter. ``pyenv global`` should have been run so | |
|
165 | ``/hgdev/pyenv/shims/`` is populated with redirector scripts that point | |
|
166 | to the appropriate Python executable. | |
|
167 | ||
|
168 | The ``/hgdev/venv-bootstrap`` directory must contain a virtualenv | |
|
169 | with Mercurial installed. The ``/hgdev/venv-bootstrap/bin/hg`` executable | |
|
170 | is referenced by various scripts and the client. | |
|
171 | ||
|
172 | The ``/hgdev/src`` directory MUST contain a clone of the Mercurial | |
|
173 | source code. The state of the working directory is not important. | |
|
174 | ||
|
175 | In order to run tests, the ``/hgwork`` directory will be created. | |
|
176 | This may require running various ``mkfs.*`` executables and ``mount`` | |
|
177 | to provision a new filesystem. This will require elevated privileges | |
|
178 | via ``sudo``. | |
|
179 | ||
|
180 | Various dependencies to run the Mercurial test harness are also required. | |
|
181 | Documenting them is beyond the scope of this document. Various tests | |
|
182 | also require other optional dependencies and missing dependencies will | |
|
183 | be printed by the test runner when a test is skipped. |
@@ -53,7 +53,7 b' class HGAutomation:' | |||
|
53 | 53 | |
|
54 | 54 | return password |
|
55 | 55 | |
|
56 | def aws_connection(self, region: str): | |
|
56 | def aws_connection(self, region: str, ensure_ec2_state: bool=True): | |
|
57 | 57 | """Obtain an AWSConnection instance bound to a specific region.""" |
|
58 | 58 | |
|
59 | return AWSConnection(self, region) | |
|
59 | return AWSConnection(self, region, ensure_ec2_state=ensure_ec2_state) |
@@ -19,6 +19,13 b' import time' | |||
|
19 | 19 | import boto3 |
|
20 | 20 | import botocore.exceptions |
|
21 | 21 | |
|
22 | from .linux import ( | |
|
23 | BOOTSTRAP_DEBIAN, | |
|
24 | ) | |
|
25 | from .ssh import ( | |
|
26 | exec_command as ssh_exec_command, | |
|
27 | wait_for_ssh, | |
|
28 | ) | |
|
22 | 29 | from .winrm import ( |
|
23 | 30 | run_powershell, |
|
24 | 31 | wait_for_winrm, |
@@ -31,12 +38,46 b' INSTALL_WINDOWS_DEPENDENCIES = (SOURCE_R' | |||
|
31 | 38 | 'install-windows-dependencies.ps1') |
|
32 | 39 | |
|
33 | 40 | |
|
41 | INSTANCE_TYPES_WITH_STORAGE = { | |
|
42 | 'c5d', | |
|
43 | 'd2', | |
|
44 | 'h1', | |
|
45 | 'i3', | |
|
46 | 'm5ad', | |
|
47 | 'm5d', | |
|
48 | 'r5d', | |
|
49 | 'r5ad', | |
|
50 | 'x1', | |
|
51 | 'z1d', | |
|
52 | } | |
|
53 | ||
|
54 | ||
|
55 | DEBIAN_ACCOUNT_ID = '379101102735' | |
|
56 | UBUNTU_ACCOUNT_ID = '099720109477' | |
|
57 | ||
|
58 | ||
|
34 | 59 | KEY_PAIRS = { |
|
35 | 60 | 'automation', |
|
36 | 61 | } |
|
37 | 62 | |
|
38 | 63 | |
|
39 | 64 | SECURITY_GROUPS = { |
|
65 | 'linux-dev-1': { | |
|
66 | 'description': 'Mercurial Linux instances that perform build/test automation', | |
|
67 | 'ingress': [ | |
|
68 | { | |
|
69 | 'FromPort': 22, | |
|
70 | 'ToPort': 22, | |
|
71 | 'IpProtocol': 'tcp', | |
|
72 | 'IpRanges': [ | |
|
73 | { | |
|
74 | 'CidrIp': '0.0.0.0/0', | |
|
75 | 'Description': 'SSH from entire Internet', | |
|
76 | }, | |
|
77 | ], | |
|
78 | }, | |
|
79 | ], | |
|
80 | }, | |
|
40 | 81 | 'windows-dev-1': { |
|
41 | 82 | 'description': 'Mercurial Windows instances that perform build automation', |
|
42 | 83 | 'ingress': [ |
@@ -180,7 +221,7 b' Install-WindowsFeature -Name Net-Framewo' | |||
|
180 | 221 | class AWSConnection: |
|
181 | 222 | """Manages the state of a connection with AWS.""" |
|
182 | 223 | |
|
183 | def __init__(self, automation, region: str): | |
|
224 | def __init__(self, automation, region: str, ensure_ec2_state: bool=True): | |
|
184 | 225 | self.automation = automation |
|
185 | 226 | self.local_state_path = automation.state_path |
|
186 | 227 | |
@@ -191,11 +232,12 b' class AWSConnection:' | |||
|
191 | 232 | self.ec2resource = self.session.resource('ec2') |
|
192 | 233 | self.iamclient = self.session.client('iam') |
|
193 | 234 | self.iamresource = self.session.resource('iam') |
|
194 | ||
|
195 | ensure_key_pairs(automation.state_path, self.ec2resource) | |
|
235 | self.security_groups = {} | |
|
196 | 236 | |
|
197 | self.security_groups = ensure_security_groups(self.ec2resource) | |
|
198 |
ensure_ |
|
|
237 | if ensure_ec2_state: | |
|
238 | ensure_key_pairs(automation.state_path, self.ec2resource) | |
|
239 | self.security_groups = ensure_security_groups(self.ec2resource) | |
|
240 | ensure_iam_state(self.iamclient, self.iamresource) | |
|
199 | 241 | |
|
200 | 242 | def key_pair_path_private(self, name): |
|
201 | 243 | """Path to a key pair private key file.""" |
@@ -324,7 +366,7 b' def delete_instance_profile(profile):' | |||
|
324 | 366 | profile.delete() |
|
325 | 367 | |
|
326 | 368 | |
|
327 | def ensure_iam_state(iamresource, prefix='hg-'): | |
|
369 | def ensure_iam_state(iamclient, iamresource, prefix='hg-'): | |
|
328 | 370 | """Ensure IAM state is in sync with our canonical definition.""" |
|
329 | 371 | |
|
330 | 372 | remote_profiles = {} |
@@ -360,6 +402,10 b' def ensure_iam_state(iamresource, prefix' | |||
|
360 | 402 | InstanceProfileName=actual) |
|
361 | 403 | remote_profiles[name] = profile |
|
362 | 404 | |
|
405 | waiter = iamclient.get_waiter('instance_profile_exists') | |
|
406 | waiter.wait(InstanceProfileName=actual) | |
|
407 | print('IAM instance profile %s is available' % actual) | |
|
408 | ||
|
363 | 409 | for name in sorted(set(IAM_ROLES) - set(remote_roles)): |
|
364 | 410 | entry = IAM_ROLES[name] |
|
365 | 411 | |
@@ -372,6 +418,10 b' def ensure_iam_state(iamresource, prefix' | |||
|
372 | 418 | AssumeRolePolicyDocument=ASSUME_ROLE_POLICY_DOCUMENT, |
|
373 | 419 | ) |
|
374 | 420 | |
|
421 | waiter = iamclient.get_waiter('role_exists') | |
|
422 | waiter.wait(RoleName=actual) | |
|
423 | print('IAM role %s is available' % actual) | |
|
424 | ||
|
375 | 425 | remote_roles[name] = role |
|
376 | 426 | |
|
377 | 427 | for arn in entry['policy_arns']: |
@@ -393,14 +443,14 b' def ensure_iam_state(iamresource, prefix' | |||
|
393 | 443 | profile.add_role(RoleName=role) |
|
394 | 444 | |
|
395 | 445 | |
|
396 |
def find_ |
|
|
397 | """Find the Amazon published Windows Server 2019 base image.""" | |
|
446 | def find_image(ec2resource, owner_id, name): | |
|
447 | """Find an AMI by its owner ID and name.""" | |
|
398 | 448 | |
|
399 | 449 | images = ec2resource.images.filter( |
|
400 | 450 | Filters=[ |
|
401 | 451 | { |
|
402 |
'Name': 'owner- |
|
|
403 |
'Values': [ |
|
|
452 | 'Name': 'owner-id', | |
|
453 | 'Values': [owner_id], | |
|
404 | 454 | }, |
|
405 | 455 | { |
|
406 | 456 | 'Name': 'state', |
@@ -412,14 +462,14 b' def find_windows_server_2019_image(ec2re' | |||
|
412 | 462 | }, |
|
413 | 463 | { |
|
414 | 464 | 'Name': 'name', |
|
415 | 'Values': ['Windows_Server-2019-English-Full-Base-2019.02.13'], | |
|
465 | 'Values': [name], | |
|
416 | 466 | }, |
|
417 | 467 | ]) |
|
418 | 468 | |
|
419 | 469 | for image in images: |
|
420 | 470 | return image |
|
421 | 471 | |
|
422 |
raise Exception('unable to find |
|
|
472 | raise Exception('unable to find image for %s' % name) | |
|
423 | 473 | |
|
424 | 474 | |
|
425 | 475 | def ensure_security_groups(ec2resource, prefix='hg-'): |
@@ -490,7 +540,7 b" def remove_resources(c, prefix='hg-'):" | |||
|
490 | 540 | |
|
491 | 541 | terminate_ec2_instances(ec2resource, prefix=prefix) |
|
492 | 542 | |
|
493 |
for image in ec2resource.images. |
|
|
543 | for image in ec2resource.images.filter(Owners=['self']): | |
|
494 | 544 | if image.name.startswith(prefix): |
|
495 | 545 | remove_ami(ec2resource, image) |
|
496 | 546 | |
@@ -505,6 +555,10 b" def remove_resources(c, prefix='hg-'):" | |||
|
505 | 555 | |
|
506 | 556 | for role in iamresource.roles.all(): |
|
507 | 557 | if role.name.startswith(prefix): |
|
558 | for p in role.attached_policies.all(): | |
|
559 | print('detaching policy %s from %s' % (p.arn, role.name)) | |
|
560 | role.detach_policy(PolicyArn=p.arn) | |
|
561 | ||
|
508 | 562 | print('removing role %s' % role.name) |
|
509 | 563 | role.delete() |
|
510 | 564 | |
@@ -671,6 +725,309 b' def create_temp_windows_ec2_instances(c:' | |||
|
671 | 725 | yield instances |
|
672 | 726 | |
|
673 | 727 | |
|
728 | def resolve_fingerprint(fingerprint): | |
|
729 | fingerprint = json.dumps(fingerprint, sort_keys=True) | |
|
730 | return hashlib.sha256(fingerprint.encode('utf-8')).hexdigest() | |
|
731 | ||
|
732 | ||
|
733 | def find_and_reconcile_image(ec2resource, name, fingerprint): | |
|
734 | """Attempt to find an existing EC2 AMI with a name and fingerprint. | |
|
735 | ||
|
736 | If an image with the specified fingerprint is found, it is returned. | |
|
737 | Otherwise None is returned. | |
|
738 | ||
|
739 | Existing images for the specified name that don't have the specified | |
|
740 | fingerprint or are missing required metadata or deleted. | |
|
741 | """ | |
|
742 | # Find existing AMIs with this name and delete the ones that are invalid. | |
|
743 | # Store a reference to a good image so it can be returned one the | |
|
744 | # image state is reconciled. | |
|
745 | images = ec2resource.images.filter( | |
|
746 | Filters=[{'Name': 'name', 'Values': [name]}]) | |
|
747 | ||
|
748 | existing_image = None | |
|
749 | ||
|
750 | for image in images: | |
|
751 | if image.tags is None: | |
|
752 | print('image %s for %s lacks required tags; removing' % ( | |
|
753 | image.id, image.name)) | |
|
754 | remove_ami(ec2resource, image) | |
|
755 | else: | |
|
756 | tags = {t['Key']: t['Value'] for t in image.tags} | |
|
757 | ||
|
758 | if tags.get('HGIMAGEFINGERPRINT') == fingerprint: | |
|
759 | existing_image = image | |
|
760 | else: | |
|
761 | print('image %s for %s has wrong fingerprint; removing' % ( | |
|
762 | image.id, image.name)) | |
|
763 | remove_ami(ec2resource, image) | |
|
764 | ||
|
765 | return existing_image | |
|
766 | ||
|
767 | ||
|
768 | def create_ami_from_instance(ec2client, instance, name, description, | |
|
769 | fingerprint): | |
|
770 | """Create an AMI from a running instance. | |
|
771 | ||
|
772 | Returns the ``ec2resource.Image`` representing the created AMI. | |
|
773 | """ | |
|
774 | instance.stop() | |
|
775 | ||
|
776 | ec2client.get_waiter('instance_stopped').wait( | |
|
777 | InstanceIds=[instance.id], | |
|
778 | WaiterConfig={ | |
|
779 | 'Delay': 5, | |
|
780 | }) | |
|
781 | print('%s is stopped' % instance.id) | |
|
782 | ||
|
783 | image = instance.create_image( | |
|
784 | Name=name, | |
|
785 | Description=description, | |
|
786 | ) | |
|
787 | ||
|
788 | image.create_tags(Tags=[ | |
|
789 | { | |
|
790 | 'Key': 'HGIMAGEFINGERPRINT', | |
|
791 | 'Value': fingerprint, | |
|
792 | }, | |
|
793 | ]) | |
|
794 | ||
|
795 | print('waiting for image %s' % image.id) | |
|
796 | ||
|
797 | ec2client.get_waiter('image_available').wait( | |
|
798 | ImageIds=[image.id], | |
|
799 | ) | |
|
800 | ||
|
801 | print('image %s available as %s' % (image.id, image.name)) | |
|
802 | ||
|
803 | return image | |
|
804 | ||
|
805 | ||
|
806 | def ensure_linux_dev_ami(c: AWSConnection, distro='debian9', prefix='hg-'): | |
|
807 | """Ensures a Linux development AMI is available and up-to-date. | |
|
808 | ||
|
809 | Returns an ``ec2.Image`` of either an existing AMI or a newly-built one. | |
|
810 | """ | |
|
811 | ec2client = c.ec2client | |
|
812 | ec2resource = c.ec2resource | |
|
813 | ||
|
814 | name = '%s%s-%s' % (prefix, 'linux-dev', distro) | |
|
815 | ||
|
816 | if distro == 'debian9': | |
|
817 | image = find_image( | |
|
818 | ec2resource, | |
|
819 | DEBIAN_ACCOUNT_ID, | |
|
820 | 'debian-stretch-hvm-x86_64-gp2-2019-02-19-26620', | |
|
821 | ) | |
|
822 | ssh_username = 'admin' | |
|
823 | elif distro == 'ubuntu18.04': | |
|
824 | image = find_image( | |
|
825 | ec2resource, | |
|
826 | UBUNTU_ACCOUNT_ID, | |
|
827 | 'ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-20190403', | |
|
828 | ) | |
|
829 | ssh_username = 'ubuntu' | |
|
830 | elif distro == 'ubuntu18.10': | |
|
831 | image = find_image( | |
|
832 | ec2resource, | |
|
833 | UBUNTU_ACCOUNT_ID, | |
|
834 | 'ubuntu/images/hvm-ssd/ubuntu-cosmic-18.10-amd64-server-20190402', | |
|
835 | ) | |
|
836 | ssh_username = 'ubuntu' | |
|
837 | elif distro == 'ubuntu19.04': | |
|
838 | image = find_image( | |
|
839 | ec2resource, | |
|
840 | UBUNTU_ACCOUNT_ID, | |
|
841 | 'ubuntu/images/hvm-ssd/ubuntu-disco-19.04-amd64-server-20190417', | |
|
842 | ) | |
|
843 | ssh_username = 'ubuntu' | |
|
844 | else: | |
|
845 | raise ValueError('unsupported Linux distro: %s' % distro) | |
|
846 | ||
|
847 | config = { | |
|
848 | 'BlockDeviceMappings': [ | |
|
849 | { | |
|
850 | 'DeviceName': image.block_device_mappings[0]['DeviceName'], | |
|
851 | 'Ebs': { | |
|
852 | 'DeleteOnTermination': True, | |
|
853 | 'VolumeSize': 8, | |
|
854 | 'VolumeType': 'gp2', | |
|
855 | }, | |
|
856 | }, | |
|
857 | ], | |
|
858 | 'EbsOptimized': True, | |
|
859 | 'ImageId': image.id, | |
|
860 | 'InstanceInitiatedShutdownBehavior': 'stop', | |
|
861 | # 8 VCPUs for compiling Python. | |
|
862 | 'InstanceType': 't3.2xlarge', | |
|
863 | 'KeyName': '%sautomation' % prefix, | |
|
864 | 'MaxCount': 1, | |
|
865 | 'MinCount': 1, | |
|
866 | 'SecurityGroupIds': [c.security_groups['linux-dev-1'].id], | |
|
867 | } | |
|
868 | ||
|
869 | requirements2_path = (pathlib.Path(__file__).parent.parent / | |
|
870 | 'linux-requirements-py2.txt') | |
|
871 | requirements3_path = (pathlib.Path(__file__).parent.parent / | |
|
872 | 'linux-requirements-py3.txt') | |
|
873 | with requirements2_path.open('r', encoding='utf-8') as fh: | |
|
874 | requirements2 = fh.read() | |
|
875 | with requirements3_path.open('r', encoding='utf-8') as fh: | |
|
876 | requirements3 = fh.read() | |
|
877 | ||
|
878 | # Compute a deterministic fingerprint to determine whether image needs to | |
|
879 | # be regenerated. | |
|
880 | fingerprint = resolve_fingerprint({ | |
|
881 | 'instance_config': config, | |
|
882 | 'bootstrap_script': BOOTSTRAP_DEBIAN, | |
|
883 | 'requirements_py2': requirements2, | |
|
884 | 'requirements_py3': requirements3, | |
|
885 | }) | |
|
886 | ||
|
887 | existing_image = find_and_reconcile_image(ec2resource, name, fingerprint) | |
|
888 | ||
|
889 | if existing_image: | |
|
890 | return existing_image | |
|
891 | ||
|
892 | print('no suitable %s image found; creating one...' % name) | |
|
893 | ||
|
894 | with temporary_ec2_instances(ec2resource, config) as instances: | |
|
895 | wait_for_ip_addresses(instances) | |
|
896 | ||
|
897 | instance = instances[0] | |
|
898 | ||
|
899 | client = wait_for_ssh( | |
|
900 | instance.public_ip_address, 22, | |
|
901 | username=ssh_username, | |
|
902 | key_filename=str(c.key_pair_path_private('automation'))) | |
|
903 | ||
|
904 | home = '/home/%s' % ssh_username | |
|
905 | ||
|
906 | with client: | |
|
907 | print('connecting to SSH server') | |
|
908 | sftp = client.open_sftp() | |
|
909 | ||
|
910 | print('uploading bootstrap files') | |
|
911 | with sftp.open('%s/bootstrap' % home, 'wb') as fh: | |
|
912 | fh.write(BOOTSTRAP_DEBIAN) | |
|
913 | fh.chmod(0o0700) | |
|
914 | ||
|
915 | with sftp.open('%s/requirements-py2.txt' % home, 'wb') as fh: | |
|
916 | fh.write(requirements2) | |
|
917 | fh.chmod(0o0700) | |
|
918 | ||
|
919 | with sftp.open('%s/requirements-py3.txt' % home, 'wb') as fh: | |
|
920 | fh.write(requirements3) | |
|
921 | fh.chmod(0o0700) | |
|
922 | ||
|
923 | print('executing bootstrap') | |
|
924 | chan, stdin, stdout = ssh_exec_command(client, | |
|
925 | '%s/bootstrap' % home) | |
|
926 | stdin.close() | |
|
927 | ||
|
928 | for line in stdout: | |
|
929 | print(line, end='') | |
|
930 | ||
|
931 | res = chan.recv_exit_status() | |
|
932 | if res: | |
|
933 | raise Exception('non-0 exit from bootstrap: %d' % res) | |
|
934 | ||
|
935 | print('bootstrap completed; stopping %s to create %s' % ( | |
|
936 | instance.id, name)) | |
|
937 | ||
|
938 | return create_ami_from_instance(ec2client, instance, name, | |
|
939 | 'Mercurial Linux development environment', | |
|
940 | fingerprint) | |
|
941 | ||
|
942 | ||
|
943 | @contextlib.contextmanager | |
|
944 | def temporary_linux_dev_instances(c: AWSConnection, image, instance_type, | |
|
945 | prefix='hg-', ensure_extra_volume=False): | |
|
946 | """Create temporary Linux development EC2 instances. | |
|
947 | ||
|
948 | Context manager resolves to a list of ``ec2.Instance`` that were created | |
|
949 | and are running. | |
|
950 | ||
|
951 | ``ensure_extra_volume`` can be set to ``True`` to require that instances | |
|
952 | have a 2nd storage volume available other than the primary AMI volume. | |
|
953 | For instance types with instance storage, this does nothing special. | |
|
954 | But for instance types without instance storage, an additional EBS volume | |
|
955 | will be added to the instance. | |
|
956 | ||
|
957 | Instances have an ``ssh_client`` attribute containing a paramiko SSHClient | |
|
958 | instance bound to the instance. | |
|
959 | ||
|
960 | Instances have an ``ssh_private_key_path`` attributing containing the | |
|
961 | str path to the SSH private key to connect to the instance. | |
|
962 | """ | |
|
963 | ||
|
964 | block_device_mappings = [ | |
|
965 | { | |
|
966 | 'DeviceName': image.block_device_mappings[0]['DeviceName'], | |
|
967 | 'Ebs': { | |
|
968 | 'DeleteOnTermination': True, | |
|
969 | 'VolumeSize': 8, | |
|
970 | 'VolumeType': 'gp2', | |
|
971 | }, | |
|
972 | } | |
|
973 | ] | |
|
974 | ||
|
975 | # This is not an exhaustive list of instance types having instance storage. | |
|
976 | # But | |
|
977 | if (ensure_extra_volume | |
|
978 | and not instance_type.startswith(tuple(INSTANCE_TYPES_WITH_STORAGE))): | |
|
979 | main_device = block_device_mappings[0]['DeviceName'] | |
|
980 | ||
|
981 | if main_device == 'xvda': | |
|
982 | second_device = 'xvdb' | |
|
983 | elif main_device == '/dev/sda1': | |
|
984 | second_device = '/dev/sdb' | |
|
985 | else: | |
|
986 | raise ValueError('unhandled primary EBS device name: %s' % | |
|
987 | main_device) | |
|
988 | ||
|
989 | block_device_mappings.append({ | |
|
990 | 'DeviceName': second_device, | |
|
991 | 'Ebs': { | |
|
992 | 'DeleteOnTermination': True, | |
|
993 | 'VolumeSize': 8, | |
|
994 | 'VolumeType': 'gp2', | |
|
995 | } | |
|
996 | }) | |
|
997 | ||
|
998 | config = { | |
|
999 | 'BlockDeviceMappings': block_device_mappings, | |
|
1000 | 'EbsOptimized': True, | |
|
1001 | 'ImageId': image.id, | |
|
1002 | 'InstanceInitiatedShutdownBehavior': 'terminate', | |
|
1003 | 'InstanceType': instance_type, | |
|
1004 | 'KeyName': '%sautomation' % prefix, | |
|
1005 | 'MaxCount': 1, | |
|
1006 | 'MinCount': 1, | |
|
1007 | 'SecurityGroupIds': [c.security_groups['linux-dev-1'].id], | |
|
1008 | } | |
|
1009 | ||
|
1010 | with temporary_ec2_instances(c.ec2resource, config) as instances: | |
|
1011 | wait_for_ip_addresses(instances) | |
|
1012 | ||
|
1013 | ssh_private_key_path = str(c.key_pair_path_private('automation')) | |
|
1014 | ||
|
1015 | for instance in instances: | |
|
1016 | client = wait_for_ssh( | |
|
1017 | instance.public_ip_address, 22, | |
|
1018 | username='hg', | |
|
1019 | key_filename=ssh_private_key_path) | |
|
1020 | ||
|
1021 | instance.ssh_client = client | |
|
1022 | instance.ssh_private_key_path = ssh_private_key_path | |
|
1023 | ||
|
1024 | try: | |
|
1025 | yield instances | |
|
1026 | finally: | |
|
1027 | for instance in instances: | |
|
1028 | instance.ssh_client.close() | |
|
1029 | ||
|
1030 | ||
|
674 | 1031 | def ensure_windows_dev_ami(c: AWSConnection, prefix='hg-'): |
|
675 | 1032 | """Ensure Windows Development AMI is available and up-to-date. |
|
676 | 1033 | |
@@ -689,6 +1046,10 b' def ensure_windows_dev_ami(c: AWSConnect' | |||
|
689 | 1046 | |
|
690 | 1047 | name = '%s%s' % (prefix, 'windows-dev') |
|
691 | 1048 | |
|
1049 | image = find_image(ec2resource, | |
|
1050 | '801119661308', | |
|
1051 | 'Windows_Server-2019-English-Full-Base-2019.02.13') | |
|
1052 | ||
|
692 | 1053 | config = { |
|
693 | 1054 | 'BlockDeviceMappings': [ |
|
694 | 1055 | { |
@@ -700,7 +1061,7 b' def ensure_windows_dev_ami(c: AWSConnect' | |||
|
700 | 1061 | }, |
|
701 | 1062 | } |
|
702 | 1063 | ], |
|
703 | 'ImageId': find_windows_server_2019_image(ec2resource).id, | |
|
1064 | 'ImageId': image.id, | |
|
704 | 1065 | 'InstanceInitiatedShutdownBehavior': 'stop', |
|
705 | 1066 | 'InstanceType': 't3.medium', |
|
706 | 1067 | 'KeyName': '%sautomation' % prefix, |
@@ -735,38 +1096,14 b' def ensure_windows_dev_ami(c: AWSConnect' | |||
|
735 | 1096 | |
|
736 | 1097 | # Compute a deterministic fingerprint to determine whether image needs |
|
737 | 1098 | # to be regenerated. |
|
738 | fingerprint = { | |
|
1099 | fingerprint = resolve_fingerprint({ | |
|
739 | 1100 | 'instance_config': config, |
|
740 | 1101 | 'user_data': WINDOWS_USER_DATA, |
|
741 | 1102 | 'initial_bootstrap': WINDOWS_BOOTSTRAP_POWERSHELL, |
|
742 | 1103 | 'bootstrap_commands': commands, |
|
743 | } | |
|
744 | ||
|
745 | fingerprint = json.dumps(fingerprint, sort_keys=True) | |
|
746 | fingerprint = hashlib.sha256(fingerprint.encode('utf-8')).hexdigest() | |
|
747 | ||
|
748 | # Find existing AMIs with this name and delete the ones that are invalid. | |
|
749 | # Store a reference to a good image so it can be returned one the | |
|
750 | # image state is reconciled. | |
|
751 | images = ec2resource.images.filter( | |
|
752 | Filters=[{'Name': 'name', 'Values': [name]}]) | |
|
753 | ||
|
754 | existing_image = None | |
|
1104 | }) | |
|
755 | 1105 | |
|
756 | for image in images: | |
|
757 | if image.tags is None: | |
|
758 | print('image %s for %s lacks required tags; removing' % ( | |
|
759 | image.id, image.name)) | |
|
760 | remove_ami(ec2resource, image) | |
|
761 | else: | |
|
762 | tags = {t['Key']: t['Value'] for t in image.tags} | |
|
763 | ||
|
764 | if tags.get('HGIMAGEFINGERPRINT') == fingerprint: | |
|
765 | existing_image = image | |
|
766 | else: | |
|
767 | print('image %s for %s has wrong fingerprint; removing' % ( | |
|
768 | image.id, image.name)) | |
|
769 | remove_ami(ec2resource, image) | |
|
1106 | existing_image = find_and_reconcile_image(ec2resource, name, fingerprint) | |
|
770 | 1107 | |
|
771 | 1108 | if existing_image: |
|
772 | 1109 | return existing_image |
@@ -795,10 +1132,26 b' def ensure_windows_dev_ami(c: AWSConnect' | |||
|
795 | 1132 | ) |
|
796 | 1133 | |
|
797 | 1134 | # Reboot so all updates are fully applied. |
|
1135 | # | |
|
1136 | # We don't use instance.reboot() here because it is asynchronous and | |
|
1137 | # we don't know when exactly the instance has rebooted. It could take | |
|
1138 | # a while to stop and we may start trying to interact with the instance | |
|
1139 | # before it has rebooted. | |
|
798 | 1140 | print('rebooting instance %s' % instance.id) |
|
799 | ec2client.reboot_instances(InstanceIds=[instance.id]) | |
|
1141 | instance.stop() | |
|
1142 | ec2client.get_waiter('instance_stopped').wait( | |
|
1143 | InstanceIds=[instance.id], | |
|
1144 | WaiterConfig={ | |
|
1145 | 'Delay': 5, | |
|
1146 | }) | |
|
800 | 1147 | |
|
801 |
|
|
|
1148 | instance.start() | |
|
1149 | wait_for_ip_addresses([instance]) | |
|
1150 | ||
|
1151 | # There is a race condition here between the User Data PS script running | |
|
1152 | # and us connecting to WinRM. This can manifest as | |
|
1153 | # "AuthorizationManager check failed" failures during run_powershell(). | |
|
1154 | # TODO figure out a workaround. | |
|
802 | 1155 | |
|
803 | 1156 | print('waiting for Windows Remote Management to come back...') |
|
804 | 1157 | client = wait_for_winrm(instance.public_ip_address, 'Administrator', |
@@ -810,36 +1163,9 b' def ensure_windows_dev_ami(c: AWSConnect' | |||
|
810 | 1163 | run_powershell(instance.winrm_client, '\n'.join(commands)) |
|
811 | 1164 | |
|
812 | 1165 | print('bootstrap completed; stopping %s to create image' % instance.id) |
|
813 | instance.stop() | |
|
814 | ||
|
815 | ec2client.get_waiter('instance_stopped').wait( | |
|
816 | InstanceIds=[instance.id], | |
|
817 | WaiterConfig={ | |
|
818 | 'Delay': 5, | |
|
819 | }) | |
|
820 | print('%s is stopped' % instance.id) | |
|
821 | ||
|
822 | image = instance.create_image( | |
|
823 | Name=name, | |
|
824 | Description='Mercurial Windows development environment', | |
|
825 | ) | |
|
826 | ||
|
827 | image.create_tags(Tags=[ | |
|
828 | { | |
|
829 | 'Key': 'HGIMAGEFINGERPRINT', | |
|
830 | 'Value': fingerprint, | |
|
831 | }, | |
|
832 | ]) | |
|
833 | ||
|
834 | print('waiting for image %s' % image.id) | |
|
835 | ||
|
836 | ec2client.get_waiter('image_available').wait( | |
|
837 | ImageIds=[image.id], | |
|
838 | ) | |
|
839 | ||
|
840 | print('image %s available as %s' % (image.id, image.name)) | |
|
841 | ||
|
842 | return image | |
|
1166 | return create_ami_from_instance(ec2client, instance, name, | |
|
1167 | 'Mercurial Windows development environment', | |
|
1168 | fingerprint) | |
|
843 | 1169 | |
|
844 | 1170 | |
|
845 | 1171 | @contextlib.contextmanager |
@@ -8,12 +8,15 b'' | |||
|
8 | 8 | # no-check-code because Python 3 native. |
|
9 | 9 | |
|
10 | 10 | import argparse |
|
11 | import concurrent.futures as futures | |
|
11 | 12 | import os |
|
12 | 13 | import pathlib |
|
14 | import time | |
|
13 | 15 | |
|
14 | 16 | from . import ( |
|
15 | 17 | aws, |
|
16 | 18 | HGAutomation, |
|
19 | linux, | |
|
17 | 20 | windows, |
|
18 | 21 | ) |
|
19 | 22 | |
@@ -22,6 +25,33 b' SOURCE_ROOT = pathlib.Path(os.path.abspa' | |||
|
22 | 25 | DIST_PATH = SOURCE_ROOT / 'dist' |
|
23 | 26 | |
|
24 | 27 | |
|
28 | def bootstrap_linux_dev(hga: HGAutomation, aws_region, distros=None, | |
|
29 | parallel=False): | |
|
30 | c = hga.aws_connection(aws_region) | |
|
31 | ||
|
32 | if distros: | |
|
33 | distros = distros.split(',') | |
|
34 | else: | |
|
35 | distros = sorted(linux.DISTROS) | |
|
36 | ||
|
37 | # TODO There is a wonky interaction involving KeyboardInterrupt whereby | |
|
38 | # the context manager that is supposed to terminate the temporary EC2 | |
|
39 | # instance doesn't run. Until we fix this, make parallel building opt-in | |
|
40 | # so we don't orphan instances. | |
|
41 | if parallel: | |
|
42 | fs = [] | |
|
43 | ||
|
44 | with futures.ThreadPoolExecutor(len(distros)) as e: | |
|
45 | for distro in distros: | |
|
46 | fs.append(e.submit(aws.ensure_linux_dev_ami, c, distro=distro)) | |
|
47 | ||
|
48 | for f in fs: | |
|
49 | f.result() | |
|
50 | else: | |
|
51 | for distro in distros: | |
|
52 | aws.ensure_linux_dev_ami(c, distro=distro) | |
|
53 | ||
|
54 | ||
|
25 | 55 | def bootstrap_windows_dev(hga: HGAutomation, aws_region): |
|
26 | 56 | c = hga.aws_connection(aws_region) |
|
27 | 57 | image = aws.ensure_windows_dev_ami(c) |
@@ -73,7 +103,8 b' def build_windows_wheel(hga: HGAutomatio' | |||
|
73 | 103 | windows.build_wheel(instance.winrm_client, a, DIST_PATH) |
|
74 | 104 | |
|
75 | 105 | |
|
76 |
def build_all_windows_packages(hga: HGAutomation, aws_region, revision |
|
|
106 | def build_all_windows_packages(hga: HGAutomation, aws_region, revision, | |
|
107 | version): | |
|
77 | 108 | c = hga.aws_connection(aws_region) |
|
78 | 109 | image = aws.ensure_windows_dev_ami(c) |
|
79 | 110 | DIST_PATH.mkdir(exist_ok=True) |
@@ -89,19 +120,52 b' def build_all_windows_packages(hga: HGAu' | |||
|
89 | 120 | windows.purge_hg(winrm_client) |
|
90 | 121 | windows.build_wheel(winrm_client, arch, DIST_PATH) |
|
91 | 122 | windows.purge_hg(winrm_client) |
|
92 |
windows.build_inno_installer(winrm_client, arch, DIST_PATH |
|
|
123 | windows.build_inno_installer(winrm_client, arch, DIST_PATH, | |
|
124 | version=version) | |
|
93 | 125 | windows.purge_hg(winrm_client) |
|
94 |
windows.build_wix_installer(winrm_client, arch, DIST_PATH |
|
|
126 | windows.build_wix_installer(winrm_client, arch, DIST_PATH, | |
|
127 | version=version) | |
|
95 | 128 | |
|
96 | 129 | |
|
97 | 130 | def terminate_ec2_instances(hga: HGAutomation, aws_region): |
|
98 | c = hga.aws_connection(aws_region) | |
|
131 | c = hga.aws_connection(aws_region, ensure_ec2_state=False) | |
|
99 | 132 | aws.terminate_ec2_instances(c.ec2resource) |
|
100 | 133 | |
|
101 | 134 | |
|
102 | 135 | def purge_ec2_resources(hga: HGAutomation, aws_region): |
|
136 | c = hga.aws_connection(aws_region, ensure_ec2_state=False) | |
|
137 | aws.remove_resources(c) | |
|
138 | ||
|
139 | ||
|
140 | def run_tests_linux(hga: HGAutomation, aws_region, instance_type, | |
|
141 | python_version, test_flags, distro, filesystem): | |
|
103 | 142 | c = hga.aws_connection(aws_region) |
|
104 | aws.remove_resources(c) | |
|
143 | image = aws.ensure_linux_dev_ami(c, distro=distro) | |
|
144 | ||
|
145 | t_start = time.time() | |
|
146 | ||
|
147 | ensure_extra_volume = filesystem not in ('default', 'tmpfs') | |
|
148 | ||
|
149 | with aws.temporary_linux_dev_instances( | |
|
150 | c, image, instance_type, | |
|
151 | ensure_extra_volume=ensure_extra_volume) as insts: | |
|
152 | ||
|
153 | instance = insts[0] | |
|
154 | ||
|
155 | linux.prepare_exec_environment(instance.ssh_client, | |
|
156 | filesystem=filesystem) | |
|
157 | linux.synchronize_hg(SOURCE_ROOT, instance, '.') | |
|
158 | t_prepared = time.time() | |
|
159 | linux.run_tests(instance.ssh_client, python_version, | |
|
160 | test_flags) | |
|
161 | t_done = time.time() | |
|
162 | ||
|
163 | t_setup = t_prepared - t_start | |
|
164 | t_all = t_done - t_start | |
|
165 | ||
|
166 | print( | |
|
167 | 'total time: %.1fs; setup: %.1fs; tests: %.1fs; setup overhead: %.1f%%' | |
|
168 | % (t_all, t_setup, t_done - t_prepared, t_setup / t_all * 100.0)) | |
|
105 | 169 | |
|
106 | 170 | |
|
107 | 171 | def run_tests_windows(hga: HGAutomation, aws_region, instance_type, |
@@ -135,6 +199,21 b' def get_parser():' | |||
|
135 | 199 | subparsers = parser.add_subparsers() |
|
136 | 200 | |
|
137 | 201 | sp = subparsers.add_parser( |
|
202 | 'bootstrap-linux-dev', | |
|
203 | help='Bootstrap Linux development environments', | |
|
204 | ) | |
|
205 | sp.add_argument( | |
|
206 | '--distros', | |
|
207 | help='Comma delimited list of distros to bootstrap', | |
|
208 | ) | |
|
209 | sp.add_argument( | |
|
210 | '--parallel', | |
|
211 | action='store_true', | |
|
212 | help='Generate AMIs in parallel (not CTRL-c safe)' | |
|
213 | ) | |
|
214 | sp.set_defaults(func=bootstrap_linux_dev) | |
|
215 | ||
|
216 | sp = subparsers.add_parser( | |
|
138 | 217 | 'bootstrap-windows-dev', |
|
139 | 218 | help='Bootstrap the Windows development environment', |
|
140 | 219 | ) |
@@ -149,6 +228,10 b' def get_parser():' | |||
|
149 | 228 | help='Mercurial revision to build', |
|
150 | 229 | default='.', |
|
151 | 230 | ) |
|
231 | sp.add_argument( | |
|
232 | '--version', | |
|
233 | help='Mercurial version string to use', | |
|
234 | ) | |
|
152 | 235 | sp.set_defaults(func=build_all_windows_packages) |
|
153 | 236 | |
|
154 | 237 | sp = subparsers.add_parser( |
@@ -226,6 +309,41 b' def get_parser():' | |||
|
226 | 309 | sp.set_defaults(func=purge_ec2_resources) |
|
227 | 310 | |
|
228 | 311 | sp = subparsers.add_parser( |
|
312 | 'run-tests-linux', | |
|
313 | help='Run tests on Linux', | |
|
314 | ) | |
|
315 | sp.add_argument( | |
|
316 | '--distro', | |
|
317 | help='Linux distribution to run tests on', | |
|
318 | choices=linux.DISTROS, | |
|
319 | default='debian9', | |
|
320 | ) | |
|
321 | sp.add_argument( | |
|
322 | '--filesystem', | |
|
323 | help='Filesystem type to use', | |
|
324 | choices={'btrfs', 'default', 'ext3', 'ext4', 'jfs', 'tmpfs', 'xfs'}, | |
|
325 | default='default', | |
|
326 | ) | |
|
327 | sp.add_argument( | |
|
328 | '--instance-type', | |
|
329 | help='EC2 instance type to use', | |
|
330 | default='c5.9xlarge', | |
|
331 | ) | |
|
332 | sp.add_argument( | |
|
333 | '--python-version', | |
|
334 | help='Python version to use', | |
|
335 | choices={'system2', 'system3', '2.7', '3.5', '3.6', '3.7', '3.8', | |
|
336 | 'pypy', 'pypy3.5', 'pypy3.6'}, | |
|
337 | default='system2', | |
|
338 | ) | |
|
339 | sp.add_argument( | |
|
340 | 'test_flags', | |
|
341 | help='Extra command line flags to pass to run-tests.py', | |
|
342 | nargs='*', | |
|
343 | ) | |
|
344 | sp.set_defaults(func=run_tests_linux) | |
|
345 | ||
|
346 | sp = subparsers.add_parser( | |
|
229 | 347 | 'run-tests-windows', |
|
230 | 348 | help='Run tests on Windows', |
|
231 | 349 | ) |
@@ -39,7 +39,7 b' Write-Output "activating Visual Studio 2' | |||
|
39 | 39 | $Env:PATH = "${root}\VC\Bin;${root}\WinSDK\Bin;$Env:PATH" |
|
40 | 40 | $Env:INCLUDE = "${root}\VC\Include;${root}\WinSDK\Include;$Env:INCLUDE" |
|
41 | 41 | $Env:LIB = "${root}\VC\Lib;${root}\WinSDK\Lib;$Env:LIB" |
|
42 |
$Env:LIBPATH = "${root}\VC\lib;${root}\WinSDK\Lib |
|
|
42 | $Env:LIBPATH = "${root}\VC\lib;${root}\WinSDK\Lib;$Env:LIBPATH" | |
|
43 | 43 | '''.lstrip() |
|
44 | 44 | |
|
45 | 45 | HG_PURGE = r''' |
@@ -156,6 +156,10 b' def synchronize_hg(hg_repo: pathlib.Path' | |||
|
156 | 156 | fh.write(' UserKnownHostsFile %s\n' % (ssh_dir / 'known_hosts')) |
|
157 | 157 | fh.write(' IdentityFile %s\n' % (ssh_dir / 'id_rsa')) |
|
158 | 158 | |
|
159 | if not (hg_repo / '.hg').is_dir(): | |
|
160 | raise Exception('%s is not a Mercurial repository; ' | |
|
161 | 'synchronization not yet supported' % hg_repo) | |
|
162 | ||
|
159 | 163 | env = dict(os.environ) |
|
160 | 164 | env['HGPLAIN'] = '1' |
|
161 | 165 | env['HGENCODING'] = 'utf-8' |
@@ -172,7 +176,8 b' def synchronize_hg(hg_repo: pathlib.Path' | |||
|
172 | 176 | 'python2.7', hg_bin, |
|
173 | 177 | '--config', 'ui.ssh=ssh -F %s' % ssh_config, |
|
174 | 178 | '--config', 'ui.remotecmd=c:/hgdev/venv-bootstrap/Scripts/hg.exe', |
|
175 |
'push', '-r', full_revision, |
|
|
179 | 'push', '-f', '-r', full_revision, | |
|
180 | 'ssh://%s/c:/hgdev/src' % public_ip, | |
|
176 | 181 | ] |
|
177 | 182 | |
|
178 | 183 | subprocess.run(args, cwd=str(hg_repo), env=env, check=True) |
@@ -25,7 +25,7 b' import requests.exceptions' | |||
|
25 | 25 | logger = logging.getLogger(__name__) |
|
26 | 26 | |
|
27 | 27 | |
|
28 |
def wait_for_winrm(host, username, password, timeout=1 |
|
|
28 | def wait_for_winrm(host, username, password, timeout=180, ssl=False): | |
|
29 | 29 | """Wait for the Windows Remoting (WinRM) service to become available. |
|
30 | 30 | |
|
31 | 31 | Returns a ``psrpclient.Client`` instance. |
@@ -8,47 +8,68 b' asn1crypto==0.24.0 \\' | |||
|
8 | 8 | --hash=sha256:2f1adbb7546ed199e3c90ef23ec95c5cf3585bac7d11fb7eb562a3fe89c64e87 \ |
|
9 | 9 | --hash=sha256:9d5c20441baf0cb60a4ac34cc447c6c189024b6b4c6cd7877034f4965c464e49 \ |
|
10 | 10 | # via cryptography |
|
11 | boto3==1.9.111 \ | |
|
12 | --hash=sha256:06414c75d1f62af7d04fd652b38d1e4fd3cfd6b35bad978466af88e2aaecd00d \ | |
|
13 | --hash=sha256:f3b77dff382374773d02411fa47ee408f4f503aeebd837fd9dc9ed8635bc5e8e | |
|
14 | botocore==1.12.111 \ | |
|
15 | --hash=sha256:6af473c52d5e3e7ff82de5334e9fee96b2d5ec2df5d78bc00cd9937e2573a7a8 \ | |
|
16 | --hash=sha256:9f5123c7be704b17aeacae99b5842ab17bda1f799dd29134de8c70e0a50a45d7 \ | |
|
11 | bcrypt==3.1.6 \ | |
|
12 | --hash=sha256:0ba875eb67b011add6d8c5b76afbd92166e98b1f1efab9433d5dc0fafc76e203 \ | |
|
13 | --hash=sha256:21ed446054c93e209434148ef0b362432bb82bbdaf7beef70a32c221f3e33d1c \ | |
|
14 | --hash=sha256:28a0459381a8021f57230954b9e9a65bb5e3d569d2c253c5cac6cb181d71cf23 \ | |
|
15 | --hash=sha256:2aed3091eb6f51c26b7c2fad08d6620d1c35839e7a362f706015b41bd991125e \ | |
|
16 | --hash=sha256:2fa5d1e438958ea90eaedbf8082c2ceb1a684b4f6c75a3800c6ec1e18ebef96f \ | |
|
17 | --hash=sha256:3a73f45484e9874252002793518da060fb11eaa76c30713faa12115db17d1430 \ | |
|
18 | --hash=sha256:3e489787638a36bb466cd66780e15715494b6d6905ffdbaede94440d6d8e7dba \ | |
|
19 | --hash=sha256:44636759d222baa62806bbceb20e96f75a015a6381690d1bc2eda91c01ec02ea \ | |
|
20 | --hash=sha256:678c21b2fecaa72a1eded0cf12351b153615520637efcadc09ecf81b871f1596 \ | |
|
21 | --hash=sha256:75460c2c3786977ea9768d6c9d8957ba31b5fbeb0aae67a5c0e96aab4155f18c \ | |
|
22 | --hash=sha256:8ac06fb3e6aacb0a95b56eba735c0b64df49651c6ceb1ad1cf01ba75070d567f \ | |
|
23 | --hash=sha256:8fdced50a8b646fff8fa0e4b1c5fd940ecc844b43d1da5a980cb07f2d1b1132f \ | |
|
24 | --hash=sha256:9b2c5b640a2da533b0ab5f148d87fb9989bf9bcb2e61eea6a729102a6d36aef9 \ | |
|
25 | --hash=sha256:a9083e7fa9adb1a4de5ac15f9097eb15b04e2c8f97618f1b881af40abce382e1 \ | |
|
26 | --hash=sha256:b7e3948b8b1a81c5a99d41da5fb2dc03ddb93b5f96fcd3fd27e643f91efa33e1 \ | |
|
27 | --hash=sha256:b998b8ca979d906085f6a5d84f7b5459e5e94a13fc27c28a3514437013b6c2f6 \ | |
|
28 | --hash=sha256:dd08c50bc6f7be69cd7ba0769acca28c846ec46b7a8ddc2acf4b9ac6f8a7457e \ | |
|
29 | --hash=sha256:de5badee458544ab8125e63e39afeedfcf3aef6a6e2282ac159c95ae7472d773 \ | |
|
30 | --hash=sha256:ede2a87333d24f55a4a7338a6ccdccf3eaa9bed081d1737e0db4dbd1a4f7e6b6 \ | |
|
31 | # via paramiko | |
|
32 | boto3==1.9.137 \ | |
|
33 | --hash=sha256:882cc4869b47b51dae4b4a900769e72171ff00e0b6bca644b2d7a7ad7378f324 \ | |
|
34 | --hash=sha256:cd503a7e7a04f1c14d2801f9727159dfa88c393b4004e98940fa4aa205d920c8 | |
|
35 | botocore==1.12.137 \ | |
|
36 | --hash=sha256:0d95794f6b1239c75e2c5f966221bcd4b68020fddb5676f757531eedbb612ed8 \ | |
|
37 | --hash=sha256:3213cf48cf2ceee10fc3b93221f2cd1c38521cca7584f547d5c086213cc60f35 \ | |
|
17 | 38 | # via boto3, s3transfer |
|
18 | 39 | certifi==2019.3.9 \ |
|
19 | 40 | --hash=sha256:59b7658e26ca9c7339e00f8f4636cdfe59d34fa37b9b04f6f9e9926b3cece1a5 \ |
|
20 | 41 | --hash=sha256:b26104d6835d1f5e49452a26eb2ff87fe7090b89dfcaee5ea2212697e1e1d7ae \ |
|
21 | 42 | # via requests |
|
22 |
cffi==1.12. |
|
|
23 | --hash=sha256:00b97afa72c233495560a0793cdc86c2571721b4271c0667addc83c417f3d90f \ | |
|
24 | --hash=sha256:0ba1b0c90f2124459f6966a10c03794082a2f3985cd699d7d63c4a8dae113e11 \ | |
|
25 | --hash=sha256:0bffb69da295a4fc3349f2ec7cbe16b8ba057b0a593a92cbe8396e535244ee9d \ | |
|
26 | --hash=sha256:21469a2b1082088d11ccd79dd84157ba42d940064abbfa59cf5f024c19cf4891 \ | |
|
27 | --hash=sha256:2e4812f7fa984bf1ab253a40f1f4391b604f7fc424a3e21f7de542a7f8f7aedf \ | |
|
28 | --hash=sha256:2eac2cdd07b9049dd4e68449b90d3ef1adc7c759463af5beb53a84f1db62e36c \ | |
|
29 | --hash=sha256:2f9089979d7456c74d21303c7851f158833d48fb265876923edcb2d0194104ed \ | |
|
30 | --hash=sha256:3dd13feff00bddb0bd2d650cdb7338f815c1789a91a6f68fdc00e5c5ed40329b \ | |
|
31 | --hash=sha256:4065c32b52f4b142f417af6f33a5024edc1336aa845b9d5a8d86071f6fcaac5a \ | |
|
32 | --hash=sha256:51a4ba1256e9003a3acf508e3b4f4661bebd015b8180cc31849da222426ef585 \ | |
|
33 | --hash=sha256:59888faac06403767c0cf8cfb3f4a777b2939b1fbd9f729299b5384f097f05ea \ | |
|
34 | --hash=sha256:59c87886640574d8b14910840327f5cd15954e26ed0bbd4e7cef95fa5aef218f \ | |
|
35 | --hash=sha256:610fc7d6db6c56a244c2701575f6851461753c60f73f2de89c79bbf1cc807f33 \ | |
|
36 | --hash=sha256:70aeadeecb281ea901bf4230c6222af0248c41044d6f57401a614ea59d96d145 \ | |
|
37 | --hash=sha256:71e1296d5e66c59cd2c0f2d72dc476d42afe02aeddc833d8e05630a0551dad7a \ | |
|
38 | --hash=sha256:8fc7a49b440ea752cfdf1d51a586fd08d395ff7a5d555dc69e84b1939f7ddee3 \ | |
|
39 | --hash=sha256:9b5c2afd2d6e3771d516045a6cfa11a8da9a60e3d128746a7fe9ab36dfe7221f \ | |
|
40 | --hash=sha256:9c759051ebcb244d9d55ee791259ddd158188d15adee3c152502d3b69005e6bd \ | |
|
41 | --hash=sha256:b4d1011fec5ec12aa7cc10c05a2f2f12dfa0adfe958e56ae38dc140614035804 \ | |
|
42 | --hash=sha256:b4f1d6332339ecc61275bebd1f7b674098a66fea11a00c84d1c58851e618dc0d \ | |
|
43 | --hash=sha256:c030cda3dc8e62b814831faa4eb93dd9a46498af8cd1d5c178c2de856972fd92 \ | |
|
44 | --hash=sha256:c2e1f2012e56d61390c0e668c20c4fb0ae667c44d6f6a2eeea5d7148dcd3df9f \ | |
|
45 | --hash=sha256:c37c77d6562074452120fc6c02ad86ec928f5710fbc435a181d69334b4de1d84 \ | |
|
46 | --hash=sha256:c8149780c60f8fd02752d0429246088c6c04e234b895c4a42e1ea9b4de8d27fb \ | |
|
47 | --hash=sha256:cbeeef1dc3c4299bd746b774f019de9e4672f7cc666c777cd5b409f0b746dac7 \ | |
|
48 | --hash=sha256:e113878a446c6228669144ae8a56e268c91b7f1fafae927adc4879d9849e0ea7 \ | |
|
49 | --hash=sha256:e21162bf941b85c0cda08224dade5def9360f53b09f9f259adb85fc7dd0e7b35 \ | |
|
50 | --hash=sha256:fb6934ef4744becbda3143d30c6604718871495a5e36c408431bf33d9c146889 \ | |
|
51 | # via cryptography | |
|
43 | cffi==1.12.3 \ | |
|
44 | --hash=sha256:041c81822e9f84b1d9c401182e174996f0bae9991f33725d059b771744290774 \ | |
|
45 | --hash=sha256:046ef9a22f5d3eed06334d01b1e836977eeef500d9b78e9ef693f9380ad0b83d \ | |
|
46 | --hash=sha256:066bc4c7895c91812eff46f4b1c285220947d4aa46fa0a2651ff85f2afae9c90 \ | |
|
47 | --hash=sha256:066c7ff148ae33040c01058662d6752fd73fbc8e64787229ea8498c7d7f4041b \ | |
|
48 | --hash=sha256:2444d0c61f03dcd26dbf7600cf64354376ee579acad77aef459e34efcb438c63 \ | |
|
49 | --hash=sha256:300832850b8f7967e278870c5d51e3819b9aad8f0a2c8dbe39ab11f119237f45 \ | |
|
50 | --hash=sha256:34c77afe85b6b9e967bd8154e3855e847b70ca42043db6ad17f26899a3df1b25 \ | |
|
51 | --hash=sha256:46de5fa00f7ac09f020729148ff632819649b3e05a007d286242c4882f7b1dc3 \ | |
|
52 | --hash=sha256:4aa8ee7ba27c472d429b980c51e714a24f47ca296d53f4d7868075b175866f4b \ | |
|
53 | --hash=sha256:4d0004eb4351e35ed950c14c11e734182591465a33e960a4ab5e8d4f04d72647 \ | |
|
54 | --hash=sha256:4e3d3f31a1e202b0f5a35ba3bc4eb41e2fc2b11c1eff38b362de710bcffb5016 \ | |
|
55 | --hash=sha256:50bec6d35e6b1aaeb17f7c4e2b9374ebf95a8975d57863546fa83e8d31bdb8c4 \ | |
|
56 | --hash=sha256:55cad9a6df1e2a1d62063f79d0881a414a906a6962bc160ac968cc03ed3efcfb \ | |
|
57 | --hash=sha256:5662ad4e4e84f1eaa8efce5da695c5d2e229c563f9d5ce5b0113f71321bcf753 \ | |
|
58 | --hash=sha256:59b4dc008f98fc6ee2bb4fd7fc786a8d70000d058c2bbe2698275bc53a8d3fa7 \ | |
|
59 | --hash=sha256:73e1ffefe05e4ccd7bcea61af76f36077b914f92b76f95ccf00b0c1b9186f3f9 \ | |
|
60 | --hash=sha256:a1f0fd46eba2d71ce1589f7e50a9e2ffaeb739fb2c11e8192aa2b45d5f6cc41f \ | |
|
61 | --hash=sha256:a2e85dc204556657661051ff4bab75a84e968669765c8a2cd425918699c3d0e8 \ | |
|
62 | --hash=sha256:a5457d47dfff24882a21492e5815f891c0ca35fefae8aa742c6c263dac16ef1f \ | |
|
63 | --hash=sha256:a8dccd61d52a8dae4a825cdbb7735da530179fea472903eb871a5513b5abbfdc \ | |
|
64 | --hash=sha256:ae61af521ed676cf16ae94f30fe202781a38d7178b6b4ab622e4eec8cefaff42 \ | |
|
65 | --hash=sha256:b012a5edb48288f77a63dba0840c92d0504aa215612da4541b7b42d849bc83a3 \ | |
|
66 | --hash=sha256:d2c5cfa536227f57f97c92ac30c8109688ace8fa4ac086d19d0af47d134e2909 \ | |
|
67 | --hash=sha256:d42b5796e20aacc9d15e66befb7a345454eef794fdb0737d1af593447c6c8f45 \ | |
|
68 | --hash=sha256:dee54f5d30d775f525894d67b1495625dd9322945e7fee00731952e0368ff42d \ | |
|
69 | --hash=sha256:e070535507bd6aa07124258171be2ee8dfc19119c28ca94c9dfb7efd23564512 \ | |
|
70 | --hash=sha256:e1ff2748c84d97b065cc95429814cdba39bcbd77c9c85c89344b317dc0d9cbff \ | |
|
71 | --hash=sha256:ed851c75d1e0e043cbf5ca9a8e1b13c4c90f3fbd863dacb01c0808e2b5204201 \ | |
|
72 | # via bcrypt, cryptography, pynacl | |
|
52 | 73 | chardet==3.0.4 \ |
|
53 | 74 | --hash=sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae \ |
|
54 | 75 | --hash=sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691 \ |
@@ -73,7 +94,7 b' cryptography==2.6.1 \\' | |||
|
73 | 94 | --hash=sha256:d4afbb0840f489b60f5a580a41a1b9c3622e08ecb5eec8614d4fb4cd914c4460 \ |
|
74 | 95 | --hash=sha256:d9ed28030797c00f4bc43c86bf819266c76a5ea61d006cd4078a93ebf7da6bfd \ |
|
75 | 96 | --hash=sha256:e603aa7bb52e4e8ed4119a58a03b60323918467ef209e6ff9db3ac382e5cf2c6 \ |
|
76 | # via pypsrp | |
|
97 | # via paramiko, pypsrp | |
|
77 | 98 | docutils==0.14 \ |
|
78 | 99 | --hash=sha256:02aec4bd92ab067f6ff27a38a38a41173bf01bed8f89157768c1573f53e474a6 \ |
|
79 | 100 | --hash=sha256:51e64ef2ebfb29cae1faa133b3710143496eca21c530f3f71424d77687764274 \ |
@@ -87,13 +108,41 b' jmespath==0.9.4 \\' | |||
|
87 | 108 | --hash=sha256:3720a4b1bd659dd2eecad0666459b9788813e032b83e7ba58578e48254e0a0e6 \ |
|
88 | 109 | --hash=sha256:bde2aef6f44302dfb30320115b17d030798de8c4110e28d5cf6cf91a7a31074c \ |
|
89 | 110 | # via boto3, botocore |
|
90 |
ntlm-auth==1. |
|
|
91 | --hash=sha256:7bc02a3fbdfee7275d3dc20fce8028ed8eb6d32364637f28be9e9ae9160c6d5c \ | |
|
92 | --hash=sha256:9b13eaf88f16a831637d75236a93d60c0049536715aafbf8190ba58a590b023e \ | |
|
111 | ntlm-auth==1.3.0 \ | |
|
112 | --hash=sha256:bb2fd03c665f0f62c5f65695b62dcdb07fb7a45df6ebc86c770be2054d6902dd \ | |
|
113 | --hash=sha256:ce5b4483ed761f341a538a426a71a52e5a9cf5fd834ebef1d2090f9eef14b3f8 \ | |
|
93 | 114 | # via pypsrp |
|
115 | paramiko==2.4.2 \ | |
|
116 | --hash=sha256:3c16b2bfb4c0d810b24c40155dbfd113c0521e7e6ee593d704e84b4c658a1f3b \ | |
|
117 | --hash=sha256:a8975a7df3560c9f1e2b43dc54ebd40fd00a7017392ca5445ce7df409f900fcb | |
|
118 | pyasn1==0.4.5 \ | |
|
119 | --hash=sha256:da2420fe13a9452d8ae97a0e478adde1dee153b11ba832a95b223a2ba01c10f7 \ | |
|
120 | --hash=sha256:da6b43a8c9ae93bc80e2739efb38cc776ba74a886e3e9318d65fe81a8b8a2c6e \ | |
|
121 | # via paramiko | |
|
94 | 122 | pycparser==2.19 \ |
|
95 | 123 | --hash=sha256:a988718abfad80b6b157acce7bf130a30876d27603738ac39f140993246b25b3 \ |
|
96 | 124 | # via cffi |
|
125 | pynacl==1.3.0 \ | |
|
126 | --hash=sha256:05c26f93964373fc0abe332676cb6735f0ecad27711035b9472751faa8521255 \ | |
|
127 | --hash=sha256:0c6100edd16fefd1557da078c7a31e7b7d7a52ce39fdca2bec29d4f7b6e7600c \ | |
|
128 | --hash=sha256:0d0a8171a68edf51add1e73d2159c4bc19fc0718e79dec51166e940856c2f28e \ | |
|
129 | --hash=sha256:1c780712b206317a746ace34c209b8c29dbfd841dfbc02aa27f2084dd3db77ae \ | |
|
130 | --hash=sha256:2424c8b9f41aa65bbdbd7a64e73a7450ebb4aa9ddedc6a081e7afcc4c97f7621 \ | |
|
131 | --hash=sha256:2d23c04e8d709444220557ae48ed01f3f1086439f12dbf11976e849a4926db56 \ | |
|
132 | --hash=sha256:30f36a9c70450c7878053fa1344aca0145fd47d845270b43a7ee9192a051bf39 \ | |
|
133 | --hash=sha256:37aa336a317209f1bb099ad177fef0da45be36a2aa664507c5d72015f956c310 \ | |
|
134 | --hash=sha256:4943decfc5b905748f0756fdd99d4f9498d7064815c4cf3643820c9028b711d1 \ | |
|
135 | --hash=sha256:57ef38a65056e7800859e5ba9e6091053cd06e1038983016effaffe0efcd594a \ | |
|
136 | --hash=sha256:5bd61e9b44c543016ce1f6aef48606280e45f892a928ca7068fba30021e9b786 \ | |
|
137 | --hash=sha256:6482d3017a0c0327a49dddc8bd1074cc730d45db2ccb09c3bac1f8f32d1eb61b \ | |
|
138 | --hash=sha256:7d3ce02c0784b7cbcc771a2da6ea51f87e8716004512493a2b69016326301c3b \ | |
|
139 | --hash=sha256:a14e499c0f5955dcc3991f785f3f8e2130ed504fa3a7f44009ff458ad6bdd17f \ | |
|
140 | --hash=sha256:a39f54ccbcd2757d1d63b0ec00a00980c0b382c62865b61a505163943624ab20 \ | |
|
141 | --hash=sha256:aabb0c5232910a20eec8563503c153a8e78bbf5459490c49ab31f6adf3f3a415 \ | |
|
142 | --hash=sha256:bd4ecb473a96ad0f90c20acba4f0bf0df91a4e03a1f4dd6a4bdc9ca75aa3a715 \ | |
|
143 | --hash=sha256:e2da3c13307eac601f3de04887624939aca8ee3c9488a0bb0eca4fb9401fc6b1 \ | |
|
144 | --hash=sha256:f67814c38162f4deb31f68d590771a29d5ae3b1bd64b75cf232308e5c74777e0 \ | |
|
145 | # via paramiko | |
|
97 | 146 | pypsrp==0.3.1 \ |
|
98 | 147 | --hash=sha256:309853380fe086090a03cc6662a778ee69b1cae355ae4a932859034fd76e9d0b \ |
|
99 | 148 | --hash=sha256:90f946254f547dc3493cea8493c819ab87e152a755797c93aa2668678ba8ae85 |
@@ -112,8 +161,8 b' s3transfer==0.2.0 \\' | |||
|
112 | 161 | six==1.12.0 \ |
|
113 | 162 | --hash=sha256:3350809f0555b11f552448330d0b52d5f24c91a322ea4a15ef22629740f3761c \ |
|
114 | 163 | --hash=sha256:d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73 \ |
|
115 | # via cryptography, pypsrp, python-dateutil | |
|
116 |
urllib3==1.24. |
|
|
117 | --hash=sha256:61bf29cada3fc2fbefad4fdf059ea4bd1b4a86d2b6d15e1c7c0b582b9752fe39 \ | |
|
118 | --hash=sha256:de9529817c93f27c8ccbfead6985011db27bd0ddfcdb2d86f3f663385c6a9c22 \ | |
|
164 | # via bcrypt, cryptography, pynacl, pypsrp, python-dateutil | |
|
165 | urllib3==1.24.2 \ | |
|
166 | --hash=sha256:4c291ca23bbb55c76518905869ef34bdd5f0e46af7afe6861e8375643ffee1a0 \ | |
|
167 | --hash=sha256:9a247273df709c4fedb38c711e44292304f73f39ab01beda9f6b9fc375669ac3 \ | |
|
119 | 168 | # via botocore, requests |
@@ -7,7 +7,7 b'' | |||
|
7 | 7 | # This software may be used and distributed according to the terms of the |
|
8 | 8 | # GNU General Public License version 2 or any later version. |
|
9 | 9 | |
|
10 | from __future__ import absolute_import | |
|
10 | from __future__ import absolute_import, print_function | |
|
11 | 11 | |
|
12 | 12 | import argparse |
|
13 | 13 | import contextlib |
@@ -227,4 +227,7 b' def main():' | |||
|
227 | 227 | process(fin, fout, opts) |
|
228 | 228 | |
|
229 | 229 | if __name__ == '__main__': |
|
230 | if sys.version_info.major < 3: | |
|
231 | print('This script must be run under Python 3.') | |
|
232 | sys.exit(3) | |
|
230 | 233 | main() |
@@ -44,6 +44,7 b' import timeit' | |||
|
44 | 44 | _TYPEMAP = { |
|
45 | 45 | 'START': 'B', |
|
46 | 46 | 'END': 'E', |
|
47 | 'COUNTER': 'C', | |
|
47 | 48 | } |
|
48 | 49 | |
|
49 | 50 | _threadmap = {} |
@@ -78,6 +79,11 b' def main():' | |||
|
78 | 79 | verb, session, label = ev.split(' ', 2) |
|
79 | 80 | if session not in _threadmap: |
|
80 | 81 | _threadmap[session] = len(_threadmap) |
|
82 | if verb == 'COUNTER': | |
|
83 | amount, label = label.split(' ', 1) | |
|
84 | payload_args = {'value': int(amount)} | |
|
85 | else: | |
|
86 | payload_args = {} | |
|
81 | 87 | pid = _threadmap[session] |
|
82 | 88 | ts_micros = (now - start) * 1000000 |
|
83 | 89 | out.write(json.dumps( |
@@ -88,7 +94,7 b' def main():' | |||
|
88 | 94 | "ts": ts_micros, |
|
89 | 95 | "pid": pid, |
|
90 | 96 | "tid": 1, |
|
91 |
"args": |
|
|
97 | "args": payload_args, | |
|
92 | 98 | })) |
|
93 | 99 | out.write(',\n') |
|
94 | 100 | finally: |
@@ -649,15 +649,15 b' def embedded(f, modname, src):' | |||
|
649 | 649 | ... print("%s %s %d" % (_forcestr(m), _forcestr(f), l)) |
|
650 | 650 | ... print(repr(_forcestr(s))) |
|
651 | 651 | >>> lines = [ |
|
652 |
... |
|
|
653 |
... |
|
|
654 |
... |
|
|
655 |
... |
|
|
656 |
... |
|
|
657 |
... |
|
|
658 |
... |
|
|
659 |
... |
|
|
660 |
... |
|
|
652 | ... 'comment', | |
|
653 | ... ' >>> from __future__ import print_function', | |
|
654 | ... " >>> ' multiline", | |
|
655 | ... " ... string'", | |
|
656 | ... ' ', | |
|
657 | ... 'comment', | |
|
658 | ... ' $ cat > foo.py <<EOF', | |
|
659 | ... ' > from __future__ import print_function', | |
|
660 | ... ' > EOF', | |
|
661 | 661 | ... ] |
|
662 | 662 | >>> test(b"example.t", lines) |
|
663 | 663 | example[2] doctest.py 1 |
@@ -694,7 +694,7 b' def sources(f, modname):' | |||
|
694 | 694 | yield src.read(), modname, f, 0 |
|
695 | 695 | py = True |
|
696 | 696 | if py or f.endswith('.t'): |
|
697 |
with open(f, 'r |
|
|
697 | with open(f, 'r') as src: | |
|
698 | 698 | for script, modname, t, line in embedded(f, modname, src): |
|
699 | 699 | yield script, modname.encode('utf8'), t, line |
|
700 | 700 |
@@ -32,7 +32,7 b' From the prompt, change to the Mercurial' | |||
|
32 | 32 | ``cd c:\src\hg``. |
|
33 | 33 | |
|
34 | 34 | Next, invoke ``build.py`` to produce an Inno installer. You will |
|
35 | need to supply the path to the Python interpreter to use.: | |
|
35 | need to supply the path to the Python interpreter to use.:: | |
|
36 | 36 |
|
|
37 | 37 | $ python3.exe contrib\packaging\inno\build.py \ |
|
38 | 38 | --python c:\python27\python.exe |
@@ -49,6 +49,7 b'' | |||
|
49 | 49 | <File Id="internals.config.txt" Name="config.txt" /> |
|
50 | 50 | <File Id="internals.extensions.txt" Name="extensions.txt" /> |
|
51 | 51 | <File Id="internals.linelog.txt" Name="linelog.txt" /> |
|
52 | <File Id="internals.mergestate.txt" Name="mergestate.txt" /> | |
|
52 | 53 | <File Id="internals.requirements.txt" Name="requirements.txt" /> |
|
53 | 54 | <File Id="internals.revlogs.txt" Name="revlogs.txt" /> |
|
54 | 55 | <File Id="internals.wireprotocol.txt" Name="wireprotocol.txt" /> |
@@ -15,6 +15,13 b' Configurations' | |||
|
15 | 15 | ``presleep`` |
|
16 | 16 | number of second to wait before any group of runs (default: 1) |
|
17 | 17 | |
|
18 | ``pre-run`` | |
|
19 | number of run to perform before starting measurement. | |
|
20 | ||
|
21 | ``profile-benchmark`` | |
|
22 | Enable profiling for the benchmarked section. | |
|
23 | (The first iteration is benchmarked) | |
|
24 | ||
|
18 | 25 | ``run-limits`` |
|
19 | 26 | Control the number of runs each benchmark will perform. The option value |
|
20 | 27 | should be a list of `<time>-<numberofrun>` pairs. After each run the |
@@ -106,6 +113,10 b' try:' | |||
|
106 | 113 | except ImportError: |
|
107 | 114 | pass |
|
108 | 115 | |
|
116 | try: | |
|
117 | from mercurial import profiling | |
|
118 | except ImportError: | |
|
119 | profiling = None | |
|
109 | 120 | |
|
110 | 121 | def identity(a): |
|
111 | 122 | return a |
@@ -240,6 +251,12 b' try:' | |||
|
240 | 251 | configitem(b'perf', b'all-timing', |
|
241 | 252 | default=mercurial.configitems.dynamicdefault, |
|
242 | 253 | ) |
|
254 | configitem(b'perf', b'pre-run', | |
|
255 | default=mercurial.configitems.dynamicdefault, | |
|
256 | ) | |
|
257 | configitem(b'perf', b'profile-benchmark', | |
|
258 | default=mercurial.configitems.dynamicdefault, | |
|
259 | ) | |
|
243 | 260 | configitem(b'perf', b'run-limits', |
|
244 | 261 | default=mercurial.configitems.dynamicdefault, |
|
245 | 262 | ) |
@@ -251,6 +268,15 b' def getlen(ui):' | |||
|
251 | 268 | return lambda x: 1 |
|
252 | 269 | return len |
|
253 | 270 | |
|
271 | class noop(object): | |
|
272 | """dummy context manager""" | |
|
273 | def __enter__(self): | |
|
274 | pass | |
|
275 | def __exit__(self, *args): | |
|
276 | pass | |
|
277 | ||
|
278 | NOOPCTX = noop() | |
|
279 | ||
|
254 | 280 | def gettimer(ui, opts=None): |
|
255 | 281 | """return a timer function and formatter: (timer, formatter) |
|
256 | 282 | |
@@ -341,7 +367,14 b' def gettimer(ui, opts=None):' | |||
|
341 | 367 | if not limits: |
|
342 | 368 | limits = DEFAULTLIMITS |
|
343 | 369 | |
|
344 | t = functools.partial(_timer, fm, displayall=displayall, limits=limits) | |
|
370 | profiler = None | |
|
371 | if profiling is not None: | |
|
372 | if ui.configbool(b"perf", b"profile-benchmark", False): | |
|
373 | profiler = profiling.profile(ui) | |
|
374 | ||
|
375 | prerun = getint(ui, b"perf", b"pre-run", 0) | |
|
376 | t = functools.partial(_timer, fm, displayall=displayall, limits=limits, | |
|
377 | prerun=prerun, profiler=profiler) | |
|
345 | 378 | return t, fm |
|
346 | 379 | |
|
347 | 380 | def stub_timer(fm, func, setup=None, title=None): |
@@ -368,17 +401,25 b' DEFAULTLIMITS = (' | |||
|
368 | 401 | ) |
|
369 | 402 | |
|
370 | 403 | def _timer(fm, func, setup=None, title=None, displayall=False, |
|
371 | limits=DEFAULTLIMITS): | |
|
404 | limits=DEFAULTLIMITS, prerun=0, profiler=None): | |
|
372 | 405 | gc.collect() |
|
373 | 406 | results = [] |
|
374 | 407 | begin = util.timer() |
|
375 | 408 | count = 0 |
|
409 | if profiler is None: | |
|
410 | profiler = NOOPCTX | |
|
411 | for i in range(prerun): | |
|
412 | if setup is not None: | |
|
413 | setup() | |
|
414 | func() | |
|
376 | 415 | keepgoing = True |
|
377 | 416 | while keepgoing: |
|
378 | 417 | if setup is not None: |
|
379 | 418 | setup() |
|
380 |
with |
|
|
381 | r = func() | |
|
419 | with profiler: | |
|
420 | with timeone() as item: | |
|
421 | r = func() | |
|
422 | profiler = NOOPCTX | |
|
382 | 423 | count += 1 |
|
383 | 424 | results.append(item[0]) |
|
384 | 425 | cstop = util.timer() |
@@ -922,17 +963,39 b' def perfdirstatewrite(ui, repo, **opts):' | |||
|
922 | 963 | timer(d) |
|
923 | 964 | fm.end() |
|
924 | 965 | |
|
966 | def _getmergerevs(repo, opts): | |
|
967 | """parse command argument to return rev involved in merge | |
|
968 | ||
|
969 | input: options dictionnary with `rev`, `from` and `bse` | |
|
970 | output: (localctx, otherctx, basectx) | |
|
971 | """ | |
|
972 | if opts[b'from']: | |
|
973 | fromrev = scmutil.revsingle(repo, opts[b'from']) | |
|
974 | wctx = repo[fromrev] | |
|
975 | else: | |
|
976 | wctx = repo[None] | |
|
977 | # we don't want working dir files to be stat'd in the benchmark, so | |
|
978 | # prime that cache | |
|
979 | wctx.dirty() | |
|
980 | rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev']) | |
|
981 | if opts[b'base']: | |
|
982 | fromrev = scmutil.revsingle(repo, opts[b'base']) | |
|
983 | ancestor = repo[fromrev] | |
|
984 | else: | |
|
985 | ancestor = wctx.ancestor(rctx) | |
|
986 | return (wctx, rctx, ancestor) | |
|
987 | ||
|
925 | 988 | @command(b'perfmergecalculate', |
|
926 | [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts) | |
|
927 | def perfmergecalculate(ui, repo, rev, **opts): | |
|
989 | [ | |
|
990 | (b'r', b'rev', b'.', b'rev to merge against'), | |
|
991 | (b'', b'from', b'', b'rev to merge from'), | |
|
992 | (b'', b'base', b'', b'the revision to use as base'), | |
|
993 | ] + formatteropts) | |
|
994 | def perfmergecalculate(ui, repo, **opts): | |
|
928 | 995 | opts = _byteskwargs(opts) |
|
929 | 996 | timer, fm = gettimer(ui, opts) |
|
930 | wctx = repo[None] | |
|
931 | rctx = scmutil.revsingle(repo, rev, rev) | |
|
932 | ancestor = wctx.ancestor(rctx) | |
|
933 | # we don't want working dir files to be stat'd in the benchmark, so prime | |
|
934 | # that cache | |
|
935 | wctx.dirty() | |
|
997 | ||
|
998 | wctx, rctx, ancestor = _getmergerevs(repo, opts) | |
|
936 | 999 | def d(): |
|
937 | 1000 | # acceptremote is True because we don't want prompts in the middle of |
|
938 | 1001 | # our benchmark |
@@ -941,6 +1004,24 b' def perfmergecalculate(ui, repo, rev, **' | |||
|
941 | 1004 | timer(d) |
|
942 | 1005 | fm.end() |
|
943 | 1006 | |
|
1007 | @command(b'perfmergecopies', | |
|
1008 | [ | |
|
1009 | (b'r', b'rev', b'.', b'rev to merge against'), | |
|
1010 | (b'', b'from', b'', b'rev to merge from'), | |
|
1011 | (b'', b'base', b'', b'the revision to use as base'), | |
|
1012 | ] + formatteropts) | |
|
1013 | def perfmergecopies(ui, repo, **opts): | |
|
1014 | """measure runtime of `copies.mergecopies`""" | |
|
1015 | opts = _byteskwargs(opts) | |
|
1016 | timer, fm = gettimer(ui, opts) | |
|
1017 | wctx, rctx, ancestor = _getmergerevs(repo, opts) | |
|
1018 | def d(): | |
|
1019 | # acceptremote is True because we don't want prompts in the middle of | |
|
1020 | # our benchmark | |
|
1021 | copies.mergecopies(repo, wctx, rctx, ancestor) | |
|
1022 | timer(d) | |
|
1023 | fm.end() | |
|
1024 | ||
|
944 | 1025 | @command(b'perfpathcopies', [], b"REV REV") |
|
945 | 1026 | def perfpathcopies(ui, repo, rev1, rev2, **opts): |
|
946 | 1027 | """benchmark the copy tracing logic""" |
@@ -1390,6 +1471,111 b' def perftemplating(ui, repo, testedtempl' | |||
|
1390 | 1471 | timer(format) |
|
1391 | 1472 | fm.end() |
|
1392 | 1473 | |
|
1474 | @command(b'perfhelper-mergecopies', formatteropts + | |
|
1475 | [ | |
|
1476 | (b'r', b'revs', [], b'restrict search to these revisions'), | |
|
1477 | (b'', b'timing', False, b'provides extra data (costly)'), | |
|
1478 | ]) | |
|
1479 | def perfhelpermergecopies(ui, repo, revs=[], **opts): | |
|
1480 | """find statistics about potential parameters for `perfmergecopies` | |
|
1481 | ||
|
1482 | This command find (base, p1, p2) triplet relevant for copytracing | |
|
1483 | benchmarking in the context of a merge. It reports values for some of the | |
|
1484 | parameters that impact merge copy tracing time during merge. | |
|
1485 | ||
|
1486 | If `--timing` is set, rename detection is run and the associated timing | |
|
1487 | will be reported. The extra details come at the cost of slower command | |
|
1488 | execution. | |
|
1489 | ||
|
1490 | Since rename detection is only run once, other factors might easily | |
|
1491 | affect the precision of the timing. However it should give a good | |
|
1492 | approximation of which revision triplets are very costly. | |
|
1493 | """ | |
|
1494 | opts = _byteskwargs(opts) | |
|
1495 | fm = ui.formatter(b'perf', opts) | |
|
1496 | dotiming = opts[b'timing'] | |
|
1497 | ||
|
1498 | output_template = [ | |
|
1499 | ("base", "%(base)12s"), | |
|
1500 | ("p1", "%(p1.node)12s"), | |
|
1501 | ("p2", "%(p2.node)12s"), | |
|
1502 | ("p1.nb-revs", "%(p1.nbrevs)12d"), | |
|
1503 | ("p1.nb-files", "%(p1.nbmissingfiles)12d"), | |
|
1504 | ("p1.renames", "%(p1.renamedfiles)12d"), | |
|
1505 | ("p1.time", "%(p1.time)12.3f"), | |
|
1506 | ("p2.nb-revs", "%(p2.nbrevs)12d"), | |
|
1507 | ("p2.nb-files", "%(p2.nbmissingfiles)12d"), | |
|
1508 | ("p2.renames", "%(p2.renamedfiles)12d"), | |
|
1509 | ("p2.time", "%(p2.time)12.3f"), | |
|
1510 | ("renames", "%(nbrenamedfiles)12d"), | |
|
1511 | ("total.time", "%(time)12.3f"), | |
|
1512 | ] | |
|
1513 | if not dotiming: | |
|
1514 | output_template = [i for i in output_template | |
|
1515 | if not ('time' in i[0] or 'renames' in i[0])] | |
|
1516 | header_names = [h for (h, v) in output_template] | |
|
1517 | output = ' '.join([v for (h, v) in output_template]) + '\n' | |
|
1518 | header = ' '.join(['%12s'] * len(header_names)) + '\n' | |
|
1519 | fm.plain(header % tuple(header_names)) | |
|
1520 | ||
|
1521 | if not revs: | |
|
1522 | revs = ['all()'] | |
|
1523 | revs = scmutil.revrange(repo, revs) | |
|
1524 | ||
|
1525 | roi = repo.revs('merge() and %ld', revs) | |
|
1526 | for r in roi: | |
|
1527 | ctx = repo[r] | |
|
1528 | p1 = ctx.p1() | |
|
1529 | p2 = ctx.p2() | |
|
1530 | bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev()) | |
|
1531 | for b in bases: | |
|
1532 | b = repo[b] | |
|
1533 | p1missing = copies._computeforwardmissing(b, p1) | |
|
1534 | p2missing = copies._computeforwardmissing(b, p2) | |
|
1535 | data = { | |
|
1536 | b'base': b.hex(), | |
|
1537 | b'p1.node': p1.hex(), | |
|
1538 | b'p1.nbrevs': len(repo.revs('%d::%d', b.rev(), p1.rev())), | |
|
1539 | b'p1.nbmissingfiles': len(p1missing), | |
|
1540 | b'p2.node': p2.hex(), | |
|
1541 | b'p2.nbrevs': len(repo.revs('%d::%d', b.rev(), p2.rev())), | |
|
1542 | b'p2.nbmissingfiles': len(p2missing), | |
|
1543 | } | |
|
1544 | if dotiming: | |
|
1545 | begin = util.timer() | |
|
1546 | mergedata = copies.mergecopies(repo, p1, p2, b) | |
|
1547 | end = util.timer() | |
|
1548 | # not very stable timing since we did only one run | |
|
1549 | data['time'] = end - begin | |
|
1550 | # mergedata contains five dicts: "copy", "movewithdir", | |
|
1551 | # "diverge", "renamedelete" and "dirmove". | |
|
1552 | # The first 4 are about renamed file so lets count that. | |
|
1553 | renames = len(mergedata[0]) | |
|
1554 | renames += len(mergedata[1]) | |
|
1555 | renames += len(mergedata[2]) | |
|
1556 | renames += len(mergedata[3]) | |
|
1557 | data['nbrenamedfiles'] = renames | |
|
1558 | begin = util.timer() | |
|
1559 | p1renames = copies.pathcopies(b, p1) | |
|
1560 | end = util.timer() | |
|
1561 | data['p1.time'] = end - begin | |
|
1562 | begin = util.timer() | |
|
1563 | p2renames = copies.pathcopies(b, p2) | |
|
1564 | data['p2.time'] = end - begin | |
|
1565 | end = util.timer() | |
|
1566 | data['p1.renamedfiles'] = len(p1renames) | |
|
1567 | data['p2.renamedfiles'] = len(p2renames) | |
|
1568 | fm.startitem() | |
|
1569 | fm.data(**data) | |
|
1570 | # make node pretty for the human output | |
|
1571 | out = data.copy() | |
|
1572 | out['base'] = fm.hexfunc(b.node()) | |
|
1573 | out['p1.node'] = fm.hexfunc(p1.node()) | |
|
1574 | out['p2.node'] = fm.hexfunc(p2.node()) | |
|
1575 | fm.plain(output % out) | |
|
1576 | ||
|
1577 | fm.end() | |
|
1578 | ||
|
1393 | 1579 | @command(b'perfhelper-pathcopies', formatteropts + |
|
1394 | 1580 | [ |
|
1395 | 1581 | (b'r', b'revs', [], b'restrict search to these revisions'), |
@@ -1890,7 +2076,7 b' def perfrevlogrevisions(ui, repo, file_=' | |||
|
1890 | 2076 | @command(b'perfrevlogwrite', revlogopts + formatteropts + |
|
1891 | 2077 | [(b's', b'startrev', 1000, b'revision to start writing at'), |
|
1892 | 2078 | (b'', b'stoprev', -1, b'last revision to write'), |
|
1893 |
(b'', b'count', 3, b' |
|
|
2079 | (b'', b'count', 3, b'number of passes to perform'), | |
|
1894 | 2080 | (b'', b'details', False, b'print timing for every revisions tested'), |
|
1895 | 2081 | (b'', b'source', b'full', b'the kind of data feed in the revlog'), |
|
1896 | 2082 | (b'', b'lazydeltabase', True, b'try the provided delta first'), |
@@ -1907,6 +2093,16 b' def perfrevlogwrite(ui, repo, file_=None' | |||
|
1907 | 2093 | (use a delta from the first parent otherwise) |
|
1908 | 2094 | * `parent-smallest`: add from the smallest delta (either p1 or p2) |
|
1909 | 2095 | * `storage`: add from the existing precomputed deltas |
|
2096 | ||
|
2097 | Note: This performance command measures performance in a custom way. As a | |
|
2098 | result some of the global configuration of the 'perf' command does not | |
|
2099 | apply to it: | |
|
2100 | ||
|
2101 | * ``pre-run``: disabled | |
|
2102 | ||
|
2103 | * ``profile-benchmark``: disabled | |
|
2104 | ||
|
2105 | * ``run-limits``: disabled use --count instead | |
|
1910 | 2106 | """ |
|
1911 | 2107 | opts = _byteskwargs(opts) |
|
1912 | 2108 | |
@@ -2081,6 +2277,10 b' def _temprevlog(ui, orig, truncaterev):' | |||
|
2081 | 2277 | |
|
2082 | 2278 | if orig._inline: |
|
2083 | 2279 | raise error.Abort('not supporting inline revlog (yet)') |
|
2280 | revlogkwargs = {} | |
|
2281 | k = 'upperboundcomp' | |
|
2282 | if util.safehasattr(orig, k): | |
|
2283 | revlogkwargs[k] = getattr(orig, k) | |
|
2084 | 2284 | |
|
2085 | 2285 | origindexpath = orig.opener.join(orig.indexfile) |
|
2086 | 2286 | origdatapath = orig.opener.join(orig.datafile) |
@@ -2112,7 +2312,7 b' def _temprevlog(ui, orig, truncaterev):' | |||
|
2112 | 2312 | |
|
2113 | 2313 | dest = revlog.revlog(vfs, |
|
2114 | 2314 | indexfile=indexname, |
|
2115 | datafile=dataname) | |
|
2315 | datafile=dataname, **revlogkwargs) | |
|
2116 | 2316 | if dest._inline: |
|
2117 | 2317 | raise error.Abort('not supporting inline revlog (yet)') |
|
2118 | 2318 | # make sure internals are initialized |
@@ -4,6 +4,7 b' test-absorb-filefixupstate.py' | |||
|
4 | 4 | test-absorb-phase.t |
|
5 | 5 | test-absorb-rename.t |
|
6 | 6 | test-absorb-strip.t |
|
7 | test-absorb-unfinished.t | |
|
7 | 8 | test-absorb.t |
|
8 | 9 | test-acl.t |
|
9 | 10 | test-add.t |
@@ -35,6 +36,7 b' test-bisect2.t' | |||
|
35 | 36 | test-bisect3.t |
|
36 | 37 | test-blackbox.t |
|
37 | 38 | test-bookflow.t |
|
39 | test-bookmarks-corner-case.t | |
|
38 | 40 | test-bookmarks-current.t |
|
39 | 41 | test-bookmarks-merge.t |
|
40 | 42 | test-bookmarks-pushpull.t |
@@ -104,6 +106,7 b' test-context.py' | |||
|
104 | 106 | test-contrib-check-code.t |
|
105 | 107 | test-contrib-check-commit.t |
|
106 | 108 | test-contrib-dumprevlog.t |
|
109 | test-contrib-emacs.t | |
|
107 | 110 | test-contrib-perf.t |
|
108 | 111 | test-contrib-relnotes.t |
|
109 | 112 | test-contrib-testparseutil.t |
@@ -126,6 +129,8 b' test-convert-splicemap.t' | |||
|
126 | 129 | test-convert-svn-sink.t |
|
127 | 130 | test-convert-tagsbranch-topology.t |
|
128 | 131 | test-convert.t |
|
132 | test-copies-in-changeset.t | |
|
133 | test-copies-unrelated.t | |
|
129 | 134 | test-copies.t |
|
130 | 135 | test-copy-move-merge.t |
|
131 | 136 | test-copy.t |
@@ -139,6 +144,7 b' test-debugindexdot.t' | |||
|
139 | 144 | test-debugrename.t |
|
140 | 145 | test-default-push.t |
|
141 | 146 | test-demandimport.py |
|
147 | test-devel-warnings.t | |
|
142 | 148 | test-diff-antipatience.t |
|
143 | 149 | test-diff-binary-file.t |
|
144 | 150 | test-diff-change.t |
@@ -159,6 +165,7 b' test-directaccess.t' | |||
|
159 | 165 | test-dirstate-backup.t |
|
160 | 166 | test-dirstate-nonnormalset.t |
|
161 | 167 | test-dirstate-race.t |
|
168 | test-dirstate-race2.t | |
|
162 | 169 | test-dirstate.t |
|
163 | 170 | test-dispatch.py |
|
164 | 171 | test-dispatch.t |
@@ -230,6 +237,7 b' test-filecache.py' | |||
|
230 | 237 | test-filelog.py |
|
231 | 238 | test-fileset-generated.t |
|
232 | 239 | test-fileset.t |
|
240 | test-fix-metadata.t | |
|
233 | 241 | test-fix-topology.t |
|
234 | 242 | test-fix.t |
|
235 | 243 | test-flagprocessor.t |
@@ -511,6 +519,7 b' test-pathconflicts-basic.t' | |||
|
511 | 519 | test-pathconflicts-merge.t |
|
512 | 520 | test-pathconflicts-update.t |
|
513 | 521 | test-pathencode.py |
|
522 | test-paths.t | |
|
514 | 523 | test-pending.t |
|
515 | 524 | test-permissions.t |
|
516 | 525 | test-phabricator.t |
@@ -597,6 +606,7 b' test-releasenotes-formatting.t' | |||
|
597 | 606 | test-releasenotes-merging.t |
|
598 | 607 | test-releasenotes-parsing.t |
|
599 | 608 | test-relink.t |
|
609 | test-remote-hidden.t | |
|
600 | 610 | test-remotefilelog-bad-configs.t |
|
601 | 611 | test-remotefilelog-bgprefetch.t |
|
602 | 612 | test-remotefilelog-blame.t |
@@ -658,10 +668,12 b' test-rollback.t' | |||
|
658 | 668 | test-run-tests.py |
|
659 | 669 | test-run-tests.t |
|
660 | 670 | test-rust-ancestor.py |
|
671 | test-rust-discovery.py | |
|
661 | 672 | test-schemes.t |
|
662 | 673 | test-serve.t |
|
663 | 674 | test-server-view.t |
|
664 | 675 | test-setdiscovery.t |
|
676 | test-share-bookmarks.t | |
|
665 | 677 | test-share.t |
|
666 | 678 | test-shelve.t |
|
667 | 679 | test-shelve2.t |
@@ -38,12 +38,6 b' def rapply(f, xs):' | |||
|
38 | 38 | if ispy3: |
|
39 | 39 | import builtins |
|
40 | 40 | |
|
41 | # TODO: .buffer might not exist if std streams were replaced; we'll need | |
|
42 | # a silly wrapper to make a bytes stream backed by a unicode one. | |
|
43 | stdin = sys.stdin.buffer | |
|
44 | stdout = sys.stdout.buffer | |
|
45 | stderr = sys.stderr.buffer | |
|
46 | ||
|
47 | 41 | def bytestr(s): |
|
48 | 42 | # tiny version of pycompat.bytestr |
|
49 | 43 | return s.encode('latin1') |
@@ -54,12 +48,8 b' if ispy3:' | |||
|
54 | 48 | return s.decode(u'latin-1') |
|
55 | 49 | |
|
56 | 50 | def opentext(f): |
|
57 |
return open(f, 'r |
|
|
51 | return open(f, 'r') | |
|
58 | 52 | else: |
|
59 | stdin = sys.stdin | |
|
60 | stdout = sys.stdout | |
|
61 | stderr = sys.stderr | |
|
62 | ||
|
63 | 53 | bytestr = str |
|
64 | 54 | sysstr = identity |
|
65 | 55 | |
@@ -71,11 +61,11 b' def b2s(x):' | |||
|
71 | 61 | |
|
72 | 62 | def writeout(data): |
|
73 | 63 | # write "data" in BYTES into stdout |
|
74 | stdout.write(data) | |
|
64 | sys.stdout.write(data) | |
|
75 | 65 | |
|
76 | 66 | def writeerr(data): |
|
77 | 67 | # write "data" in BYTES into stderr |
|
78 | stderr.write(data) | |
|
68 | sys.stderr.write(data) | |
|
79 | 69 | |
|
80 | 70 | #################### |
|
81 | 71 | |
@@ -164,14 +154,14 b' def embedded(basefile, lines, errors, ma' | |||
|
164 | 154 | ... self.matchfunc = matchfunc |
|
165 | 155 | ... def startsat(self, line): |
|
166 | 156 | ... return self.matchfunc(line) |
|
167 |
>>> ambig1 = ambigmatcher( |
|
|
168 |
... lambda l: l.startswith( |
|
|
169 |
>>> ambig2 = ambigmatcher( |
|
|
170 |
... lambda l: l.endswith( |
|
|
171 |
>>> lines = [ |
|
|
157 | >>> ambig1 = ambigmatcher('ambiguous #1', | |
|
158 | ... lambda l: l.startswith(' $ cat ')) | |
|
159 | >>> ambig2 = ambigmatcher('ambiguous #2', | |
|
160 | ... lambda l: l.endswith('<< EOF\\n')) | |
|
161 | >>> lines = [' $ cat > foo.py << EOF\\n'] | |
|
172 | 162 | >>> errors = [] |
|
173 | 163 | >>> matchers = [ambig1, ambig2] |
|
174 |
>>> list(t for t in embedded( |
|
|
164 | >>> list(t for t in embedded('<dummy>', lines, errors, matchers)) | |
|
175 | 165 | [] |
|
176 | 166 | >>> b2s(errors) |
|
177 | 167 | ['<dummy>:1: ambiguous line for "ambiguous #1", "ambiguous #2"'] |
@@ -181,21 +171,21 b' def embedded(basefile, lines, errors, ma' | |||
|
181 | 171 | ctx = filename = code = startline = None # for pyflakes |
|
182 | 172 | |
|
183 | 173 | for lineno, line in enumerate(lines, 1): |
|
184 |
if not line.endswith( |
|
|
185 |
line += |
|
|
174 | if not line.endswith('\n'): | |
|
175 | line += '\n' # to normalize EOF line | |
|
186 | 176 | if matcher: # now, inside embedded code |
|
187 | 177 | if matcher.endsat(ctx, line): |
|
188 | 178 | codeatend = matcher.codeatend(ctx, line) |
|
189 | 179 | if codeatend is not None: |
|
190 | 180 | code.append(codeatend) |
|
191 | 181 | if not matcher.ignores(ctx): |
|
192 |
yield (filename, startline, lineno, |
|
|
182 | yield (filename, startline, lineno, ''.join(code)) | |
|
193 | 183 | matcher = None |
|
194 | 184 | # DO NOT "continue", because line might start next fragment |
|
195 | 185 | elif not matcher.isinside(ctx, line): |
|
196 | 186 | # this is an error of basefile |
|
197 | 187 | # (if matchers are implemented correctly) |
|
198 |
errors.append( |
|
|
188 | errors.append('%s:%d: unexpected line for "%s"' | |
|
199 | 189 | % (basefile, lineno, matcher.desc)) |
|
200 | 190 | # stop extracting embedded code by current 'matcher', |
|
201 | 191 | # because appearance of unexpected line might mean |
@@ -218,9 +208,9 b' def embedded(basefile, lines, errors, ma' | |||
|
218 | 208 | if matched: |
|
219 | 209 | if len(matched) > 1: |
|
220 | 210 | # this is an error of matchers, maybe |
|
221 |
errors.append( |
|
|
211 | errors.append('%s:%d: ambiguous line for %s' % | |
|
222 | 212 | (basefile, lineno, |
|
223 |
|
|
|
213 | ', '.join(['"%s"' % m.desc | |
|
224 | 214 | for m, c in matched]))) |
|
225 | 215 | # omit extracting embedded code, because choosing |
|
226 | 216 | # arbitrary matcher from matched ones might fail to |
@@ -239,20 +229,20 b' def embedded(basefile, lines, errors, ma' | |||
|
239 | 229 | if matcher: |
|
240 | 230 | # examine whether EOF ends embedded code, because embedded |
|
241 | 231 | # code isn't yet ended explicitly |
|
242 |
if matcher.endsat(ctx, |
|
|
243 |
codeatend = matcher.codeatend(ctx, |
|
|
232 | if matcher.endsat(ctx, '\n'): | |
|
233 | codeatend = matcher.codeatend(ctx, '\n') | |
|
244 | 234 | if codeatend is not None: |
|
245 | 235 | code.append(codeatend) |
|
246 | 236 | if not matcher.ignores(ctx): |
|
247 |
yield (filename, startline, lineno + 1, |
|
|
237 | yield (filename, startline, lineno + 1, ''.join(code)) | |
|
248 | 238 | else: |
|
249 | 239 | # this is an error of basefile |
|
250 | 240 | # (if matchers are implemented correctly) |
|
251 |
errors.append( |
|
|
241 | errors.append('%s:%d: unexpected end of file for "%s"' | |
|
252 | 242 | % (basefile, lineno, matcher.desc)) |
|
253 | 243 | |
|
254 | 244 | # heredoc limit mark to ignore embedded code at check-code.py or so |
|
255 |
heredocignorelimit = |
|
|
245 | heredocignorelimit = 'NO_CHECK_EOF' | |
|
256 | 246 | |
|
257 | 247 | # the pattern to match against cases below, and to return a limit mark |
|
258 | 248 | # string as 'lname' group |
@@ -260,47 +250,47 b" heredocignorelimit = b'NO_CHECK_EOF'" | |||
|
260 | 250 | # - << LIMITMARK |
|
261 | 251 | # - << "LIMITMARK" |
|
262 | 252 | # - << 'LIMITMARK' |
|
263 |
heredoclimitpat = |
|
|
253 | heredoclimitpat = r'\s*<<\s*(?P<lquote>["\']?)(?P<limit>\w+)(?P=lquote)' | |
|
264 | 254 | |
|
265 | 255 | class fileheredocmatcher(embeddedmatcher): |
|
266 | 256 | """Detect "cat > FILE << LIMIT" style embedded code |
|
267 | 257 | |
|
268 |
>>> matcher = fileheredocmatcher( |
|
|
269 |
>>> b2s(matcher.startsat( |
|
|
258 | >>> matcher = fileheredocmatcher('heredoc .py file', r'[^<]+\\.py') | |
|
259 | >>> b2s(matcher.startsat(' $ cat > file.py << EOF\\n')) | |
|
270 | 260 | ('file.py', ' > EOF\\n') |
|
271 |
>>> b2s(matcher.startsat( |
|
|
261 | >>> b2s(matcher.startsat(' $ cat >>file.py <<EOF\\n')) | |
|
272 | 262 | ('file.py', ' > EOF\\n') |
|
273 |
>>> b2s(matcher.startsat( |
|
|
263 | >>> b2s(matcher.startsat(' $ cat> \\x27any file.py\\x27<< "EOF"\\n')) | |
|
274 | 264 | ('any file.py', ' > EOF\\n') |
|
275 |
>>> b2s(matcher.startsat( |
|
|
265 | >>> b2s(matcher.startsat(" $ cat > file.py << 'ANYLIMIT'\\n")) | |
|
276 | 266 | ('file.py', ' > ANYLIMIT\\n') |
|
277 |
>>> b2s(matcher.startsat( |
|
|
267 | >>> b2s(matcher.startsat(' $ cat<<ANYLIMIT>"file.py"\\n')) | |
|
278 | 268 | ('file.py', ' > ANYLIMIT\\n') |
|
279 |
>>> start = |
|
|
269 | >>> start = ' $ cat > file.py << EOF\\n' | |
|
280 | 270 | >>> ctx = matcher.startsat(start) |
|
281 | 271 | >>> matcher.codeatstart(ctx, start) |
|
282 | 272 | >>> b2s(matcher.filename(ctx)) |
|
283 | 273 | 'file.py' |
|
284 | 274 | >>> matcher.ignores(ctx) |
|
285 | 275 | False |
|
286 |
>>> inside = |
|
|
276 | >>> inside = ' > foo = 1\\n' | |
|
287 | 277 | >>> matcher.endsat(ctx, inside) |
|
288 | 278 | False |
|
289 | 279 | >>> matcher.isinside(ctx, inside) |
|
290 | 280 | True |
|
291 | 281 | >>> b2s(matcher.codeinside(ctx, inside)) |
|
292 | 282 | 'foo = 1\\n' |
|
293 |
>>> end = |
|
|
283 | >>> end = ' > EOF\\n' | |
|
294 | 284 | >>> matcher.endsat(ctx, end) |
|
295 | 285 | True |
|
296 | 286 | >>> matcher.codeatend(ctx, end) |
|
297 |
>>> matcher.endsat(ctx, |
|
|
287 | >>> matcher.endsat(ctx, ' > EOFEOF\\n') | |
|
298 | 288 | False |
|
299 |
>>> ctx = matcher.startsat( |
|
|
289 | >>> ctx = matcher.startsat(' $ cat > file.py << NO_CHECK_EOF\\n') | |
|
300 | 290 | >>> matcher.ignores(ctx) |
|
301 | 291 | True |
|
302 | 292 | """ |
|
303 |
_prefix = |
|
|
293 | _prefix = ' > ' | |
|
304 | 294 | |
|
305 | 295 | def __init__(self, desc, namepat): |
|
306 | 296 | super(fileheredocmatcher, self).__init__(desc) |
@@ -312,13 +302,13 b' class fileheredocmatcher(embeddedmatcher' | |||
|
312 | 302 | # - > NAMEPAT |
|
313 | 303 | # - > "NAMEPAT" |
|
314 | 304 | # - > 'NAMEPAT' |
|
315 |
namepat = ( |
|
|
305 | namepat = (r'\s*>>?\s*(?P<nquote>["\']?)(?P<name>%s)(?P=nquote)' | |
|
316 | 306 | % namepat) |
|
317 | 307 | self._fileres = [ |
|
318 | 308 | # "cat > NAME << LIMIT" case |
|
319 |
re.compile( |
|
|
309 | re.compile(r' \$ \s*cat' + namepat + heredoclimitpat), | |
|
320 | 310 | # "cat << LIMIT > NAME" case |
|
321 |
re.compile( |
|
|
311 | re.compile(r' \$ \s*cat' + heredoclimitpat + namepat), | |
|
322 | 312 | ] |
|
323 | 313 | |
|
324 | 314 | def startsat(self, line): |
@@ -327,7 +317,7 b' class fileheredocmatcher(embeddedmatcher' | |||
|
327 | 317 | matched = filere.match(line) |
|
328 | 318 | if matched: |
|
329 | 319 | return (matched.group('name'), |
|
330 |
|
|
|
320 | ' > %s\n' % matched.group('limit')) | |
|
331 | 321 | |
|
332 | 322 | def endsat(self, ctx, line): |
|
333 | 323 | return ctx[1] == line |
@@ -336,7 +326,7 b' class fileheredocmatcher(embeddedmatcher' | |||
|
336 | 326 | return line.startswith(self._prefix) |
|
337 | 327 | |
|
338 | 328 | def ignores(self, ctx): |
|
339 |
return |
|
|
329 | return ' > %s\n' % heredocignorelimit == ctx[1] | |
|
340 | 330 | |
|
341 | 331 | def filename(self, ctx): |
|
342 | 332 | return ctx[0] |
@@ -357,10 +347,10 b' class pydoctestmatcher(embeddedmatcher):' | |||
|
357 | 347 | """Detect ">>> code" style embedded python code |
|
358 | 348 | |
|
359 | 349 | >>> matcher = pydoctestmatcher() |
|
360 |
>>> startline = |
|
|
350 | >>> startline = ' >>> foo = 1\\n' | |
|
361 | 351 | >>> matcher.startsat(startline) |
|
362 | 352 | True |
|
363 |
>>> matcher.startsat( |
|
|
353 | >>> matcher.startsat(' ... foo = 1\\n') | |
|
364 | 354 | False |
|
365 | 355 | >>> ctx = matcher.startsat(startline) |
|
366 | 356 | >>> matcher.filename(ctx) |
@@ -368,45 +358,45 b' class pydoctestmatcher(embeddedmatcher):' | |||
|
368 | 358 | False |
|
369 | 359 | >>> b2s(matcher.codeatstart(ctx, startline)) |
|
370 | 360 | 'foo = 1\\n' |
|
371 |
>>> inside = |
|
|
361 | >>> inside = ' >>> foo = 1\\n' | |
|
372 | 362 | >>> matcher.endsat(ctx, inside) |
|
373 | 363 | False |
|
374 | 364 | >>> matcher.isinside(ctx, inside) |
|
375 | 365 | True |
|
376 | 366 | >>> b2s(matcher.codeinside(ctx, inside)) |
|
377 | 367 | 'foo = 1\\n' |
|
378 |
>>> inside = |
|
|
368 | >>> inside = ' ... foo = 1\\n' | |
|
379 | 369 | >>> matcher.endsat(ctx, inside) |
|
380 | 370 | False |
|
381 | 371 | >>> matcher.isinside(ctx, inside) |
|
382 | 372 | True |
|
383 | 373 | >>> b2s(matcher.codeinside(ctx, inside)) |
|
384 | 374 | 'foo = 1\\n' |
|
385 |
>>> inside = |
|
|
375 | >>> inside = ' expected output\\n' | |
|
386 | 376 | >>> matcher.endsat(ctx, inside) |
|
387 | 377 | False |
|
388 | 378 | >>> matcher.isinside(ctx, inside) |
|
389 | 379 | True |
|
390 | 380 | >>> b2s(matcher.codeinside(ctx, inside)) |
|
391 | 381 | '\\n' |
|
392 |
>>> inside = |
|
|
382 | >>> inside = ' \\n' | |
|
393 | 383 | >>> matcher.endsat(ctx, inside) |
|
394 | 384 | False |
|
395 | 385 | >>> matcher.isinside(ctx, inside) |
|
396 | 386 | True |
|
397 | 387 | >>> b2s(matcher.codeinside(ctx, inside)) |
|
398 | 388 | '\\n' |
|
399 |
>>> end = |
|
|
389 | >>> end = ' $ foo bar\\n' | |
|
400 | 390 | >>> matcher.endsat(ctx, end) |
|
401 | 391 | True |
|
402 | 392 | >>> matcher.codeatend(ctx, end) |
|
403 |
>>> end = |
|
|
393 | >>> end = '\\n' | |
|
404 | 394 | >>> matcher.endsat(ctx, end) |
|
405 | 395 | True |
|
406 | 396 | >>> matcher.codeatend(ctx, end) |
|
407 | 397 | """ |
|
408 |
_prefix = |
|
|
409 |
_prefixre = re.compile( |
|
|
398 | _prefix = ' >>> ' | |
|
399 | _prefixre = re.compile(r' (>>>|\.\.\.) ') | |
|
410 | 400 | |
|
411 | 401 | # If a line matches against not _prefixre but _outputre, that line |
|
412 | 402 | # is "an expected output line" (= not a part of code fragment). |
@@ -416,10 +406,10 b' class pydoctestmatcher(embeddedmatcher):' | |||
|
416 | 406 | # run-tests.py. But "directive line inside inline python code" |
|
417 | 407 | # should be rejected by Mercurial reviewers. Therefore, this |
|
418 | 408 | # regexp does not matche against such directive lines. |
|
419 |
_outputre = re.compile( |
|
|
409 | _outputre = re.compile(r' $| [^$]') | |
|
420 | 410 | |
|
421 | 411 | def __init__(self): |
|
422 |
super(pydoctestmatcher, self).__init__( |
|
|
412 | super(pydoctestmatcher, self).__init__("doctest style python code") | |
|
423 | 413 | |
|
424 | 414 | def startsat(self, line): |
|
425 | 415 | # ctx is "True" |
@@ -446,57 +436,57 b' class pydoctestmatcher(embeddedmatcher):' | |||
|
446 | 436 | def codeinside(self, ctx, line): |
|
447 | 437 | if self._prefixre.match(line): |
|
448 | 438 | return line[len(self._prefix):] # strip prefix ' >>> '/' ... ' |
|
449 |
return |
|
|
439 | return '\n' # an expected output line is treated as an empty line | |
|
450 | 440 | |
|
451 | 441 | class pyheredocmatcher(embeddedmatcher): |
|
452 | 442 | """Detect "python << LIMIT" style embedded python code |
|
453 | 443 | |
|
454 | 444 | >>> matcher = pyheredocmatcher() |
|
455 |
>>> b2s(matcher.startsat( |
|
|
445 | >>> b2s(matcher.startsat(' $ python << EOF\\n')) | |
|
456 | 446 | ' > EOF\\n' |
|
457 |
>>> b2s(matcher.startsat( |
|
|
447 | >>> b2s(matcher.startsat(' $ $PYTHON <<EOF\\n')) | |
|
458 | 448 | ' > EOF\\n' |
|
459 |
>>> b2s(matcher.startsat( |
|
|
449 | >>> b2s(matcher.startsat(' $ "$PYTHON"<< "EOF"\\n')) | |
|
460 | 450 | ' > EOF\\n' |
|
461 |
>>> b2s(matcher.startsat( |
|
|
451 | >>> b2s(matcher.startsat(" $ $PYTHON << 'ANYLIMIT'\\n")) | |
|
462 | 452 | ' > ANYLIMIT\\n' |
|
463 |
>>> matcher.startsat( |
|
|
464 |
>>> start = |
|
|
453 | >>> matcher.startsat(' $ "$PYTHON" < EOF\\n') | |
|
454 | >>> start = ' $ python << EOF\\n' | |
|
465 | 455 | >>> ctx = matcher.startsat(start) |
|
466 | 456 | >>> matcher.codeatstart(ctx, start) |
|
467 | 457 | >>> matcher.filename(ctx) |
|
468 | 458 | >>> matcher.ignores(ctx) |
|
469 | 459 | False |
|
470 |
>>> inside = |
|
|
460 | >>> inside = ' > foo = 1\\n' | |
|
471 | 461 | >>> matcher.endsat(ctx, inside) |
|
472 | 462 | False |
|
473 | 463 | >>> matcher.isinside(ctx, inside) |
|
474 | 464 | True |
|
475 | 465 | >>> b2s(matcher.codeinside(ctx, inside)) |
|
476 | 466 | 'foo = 1\\n' |
|
477 |
>>> end = |
|
|
467 | >>> end = ' > EOF\\n' | |
|
478 | 468 | >>> matcher.endsat(ctx, end) |
|
479 | 469 | True |
|
480 | 470 | >>> matcher.codeatend(ctx, end) |
|
481 |
>>> matcher.endsat(ctx, |
|
|
471 | >>> matcher.endsat(ctx, ' > EOFEOF\\n') | |
|
482 | 472 | False |
|
483 |
>>> ctx = matcher.startsat( |
|
|
473 | >>> ctx = matcher.startsat(' $ python << NO_CHECK_EOF\\n') | |
|
484 | 474 | >>> matcher.ignores(ctx) |
|
485 | 475 | True |
|
486 | 476 | """ |
|
487 |
_prefix = |
|
|
477 | _prefix = ' > ' | |
|
488 | 478 | |
|
489 |
_startre = re.compile( |
|
|
479 | _startre = re.compile(r' \$ (\$PYTHON|"\$PYTHON"|python).*' + | |
|
490 | 480 | heredoclimitpat) |
|
491 | 481 | |
|
492 | 482 | def __init__(self): |
|
493 |
super(pyheredocmatcher, self).__init__( |
|
|
483 | super(pyheredocmatcher, self).__init__("heredoc python invocation") | |
|
494 | 484 | |
|
495 | 485 | def startsat(self, line): |
|
496 | 486 | # ctx is END-LINE-OF-EMBEDDED-CODE |
|
497 | 487 | matched = self._startre.match(line) |
|
498 | 488 | if matched: |
|
499 |
return |
|
|
489 | return ' > %s\n' % matched.group('limit') | |
|
500 | 490 | |
|
501 | 491 | def endsat(self, ctx, line): |
|
502 | 492 | return ctx == line |
@@ -505,7 +495,7 b' class pyheredocmatcher(embeddedmatcher):' | |||
|
505 | 495 | return line.startswith(self._prefix) |
|
506 | 496 | |
|
507 | 497 | def ignores(self, ctx): |
|
508 |
return |
|
|
498 | return ' > %s\n' % heredocignorelimit == ctx | |
|
509 | 499 | |
|
510 | 500 | def filename(self, ctx): |
|
511 | 501 | return None # no filename |
@@ -524,7 +514,7 b' class pyheredocmatcher(embeddedmatcher):' | |||
|
524 | 514 | pyheredocmatcher(), |
|
525 | 515 | # use '[^<]+' instead of '\S+', in order to match against |
|
526 | 516 | # paths including whitespaces |
|
527 |
fileheredocmatcher( |
|
|
517 | fileheredocmatcher('heredoc .py file', r'[^<]+\.py'), | |
|
528 | 518 | ] |
|
529 | 519 | |
|
530 | 520 | def pyembedded(basefile, lines, errors): |
@@ -536,7 +526,7 b' def pyembedded(basefile, lines, errors):' | |||
|
536 | 526 | _shmatchers = [ |
|
537 | 527 | # use '[^<]+' instead of '\S+', in order to match against |
|
538 | 528 | # paths including whitespaces |
|
539 |
fileheredocmatcher( |
|
|
529 | fileheredocmatcher('heredoc .sh file', r'[^<]+\.sh'), | |
|
540 | 530 | ] |
|
541 | 531 | |
|
542 | 532 | def shembedded(basefile, lines, errors): |
@@ -548,8 +538,8 b' def shembedded(basefile, lines, errors):' | |||
|
548 | 538 | _hgrcmatchers = [ |
|
549 | 539 | # use '[^<]+' instead of '\S+', in order to match against |
|
550 | 540 | # paths including whitespaces |
|
551 |
fileheredocmatcher( |
|
|
552 |
|
|
|
541 | fileheredocmatcher('heredoc hgrc file', | |
|
542 | r'(([^/<]+/)+hgrc|\$HGRCPATH|\${HGRCPATH})'), | |
|
553 | 543 | ] |
|
554 | 544 | |
|
555 | 545 | def hgrcembedded(basefile, lines, errors): |
@@ -565,14 +555,14 b' if __name__ == "__main__":' | |||
|
565 | 555 | errors = [] |
|
566 | 556 | for name, starts, ends, code in embeddedfunc(basefile, lines, errors): |
|
567 | 557 | if not name: |
|
568 |
name = |
|
|
569 |
writeout( |
|
|
558 | name = '<anonymous>' | |
|
559 | writeout("%s:%d: %s starts\n" % (basefile, starts, name)) | |
|
570 | 560 | if opts.verbose and code: |
|
571 |
writeout( |
|
|
572 |
|
|
|
573 |
writeout( |
|
|
561 | writeout(" |%s\n" % | |
|
562 | "\n |".join(l for l in code.splitlines())) | |
|
563 | writeout("%s:%d: %s ends\n" % (basefile, ends, name)) | |
|
574 | 564 | for e in errors: |
|
575 |
writeerr( |
|
|
565 | writeerr("%s\n" % e) | |
|
576 | 566 | return len(errors) |
|
577 | 567 | |
|
578 | 568 | def applyembedded(args, embeddedfunc, opts): |
@@ -580,11 +570,11 b' if __name__ == "__main__":' | |||
|
580 | 570 | if args: |
|
581 | 571 | for f in args: |
|
582 | 572 | with opentext(f) as fp: |
|
583 |
if showembedded( |
|
|
573 | if showembedded(f, fp, embeddedfunc, opts): | |
|
584 | 574 | ret = 1 |
|
585 | 575 | else: |
|
586 | lines = [l for l in stdin.readlines()] | |
|
587 |
if showembedded( |
|
|
576 | lines = [l for l in sys.stdin.readlines()] | |
|
577 | if showembedded('<stdin>', lines, embeddedfunc, opts): | |
|
588 | 578 | ret = 1 |
|
589 | 579 | return ret |
|
590 | 580 |
@@ -64,7 +64,6 b' editor = notepad' | |||
|
64 | 64 | ;relink = |
|
65 | 65 | ;schemes = |
|
66 | 66 | ;share = |
|
67 | ;shelve = | |
|
68 | 67 | ;transplant = |
|
69 | 68 | ;win32mbcs = |
|
70 | 69 | ;zeroconf = |
@@ -1,4 +1,4 b'' | |||
|
1 | #compdef hg | |
|
1 | #compdef hg chg | |
|
2 | 2 | |
|
3 | 3 | # Zsh completion script for mercurial. Rename this file to _hg and copy |
|
4 | 4 | # it into your zsh function path (/usr/share/zsh/site-functions for |
@@ -120,7 +120,7 b' def showdoc(ui):' | |||
|
120 | 120 | |
|
121 | 121 | # print cmds |
|
122 | 122 | ui.write(minirst.section(_(b"Commands"))) |
|
123 | commandprinter(ui, table, minirst.subsection) | |
|
123 | commandprinter(ui, table, minirst.subsection, minirst.subsubsection) | |
|
124 | 124 | |
|
125 | 125 | # print help topics |
|
126 | 126 | # The config help topic is included in the hgrc.5 man page. |
@@ -143,7 +143,8 b' def showdoc(ui):' | |||
|
143 | 143 | cmdtable = getattr(mod, 'cmdtable', None) |
|
144 | 144 | if cmdtable: |
|
145 | 145 | ui.write(minirst.subsubsection(_(b'Commands'))) |
|
146 |
commandprinter(ui, cmdtable, minirst.subsubsubsection |
|
|
146 | commandprinter(ui, cmdtable, minirst.subsubsubsection, | |
|
147 | minirst.subsubsubsubsection) | |
|
147 | 148 | |
|
148 | 149 | def showtopic(ui, topic): |
|
149 | 150 | extrahelptable = [ |
@@ -177,7 +178,27 b' def helpprinter(ui, helptable, sectionfu' | |||
|
177 | 178 | ui.write(doc) |
|
178 | 179 | ui.write(b"\n") |
|
179 | 180 | |
|
180 | def commandprinter(ui, cmdtable, sectionfunc): | |
|
181 | def commandprinter(ui, cmdtable, sectionfunc, subsectionfunc): | |
|
182 | """Render restructuredtext describing a list of commands and their | |
|
183 | documentations, grouped by command category. | |
|
184 | ||
|
185 | Args: | |
|
186 | ui: UI object to write the output to | |
|
187 | cmdtable: a dict that maps a string of the command name plus its aliases | |
|
188 | (separated with pipes) to a 3-tuple of (the command's function, a list | |
|
189 | of its option descriptions, and a string summarizing available | |
|
190 | options). Example, with aliases added for demonstration purposes: | |
|
191 | ||
|
192 | 'phase|alias1|alias2': ( | |
|
193 | <function phase at 0x7f0816b05e60>, | |
|
194 | [ ('p', 'public', False, 'set changeset phase to public'), | |
|
195 | ..., | |
|
196 | ('r', 'rev', [], 'target revision', 'REV')], | |
|
197 | '[-p|-d|-s] [-f] [-r] [REV...]' | |
|
198 | ) | |
|
199 | sectionfunc: minirst function to format command category headers | |
|
200 | subsectionfunc: minirst function to format command headers | |
|
201 | """ | |
|
181 | 202 | h = {} |
|
182 | 203 | for c, attr in cmdtable.items(): |
|
183 | 204 | f = c.split(b"|")[0] |
@@ -185,45 +206,76 b' def commandprinter(ui, cmdtable, section' | |||
|
185 | 206 | h[f] = c |
|
186 | 207 | cmds = h.keys() |
|
187 | 208 | |
|
188 | for f in sorted(cmds): | |
|
189 | if f.startswith(b"debug"): | |
|
209 | def helpcategory(cmd): | |
|
210 | """Given a canonical command name from `cmds` (above), retrieve its | |
|
211 | help category. If helpcategory is None, default to CATEGORY_NONE. | |
|
212 | """ | |
|
213 | fullname = h[cmd] | |
|
214 | details = cmdtable[fullname] | |
|
215 | helpcategory = details[0].helpcategory | |
|
216 | return helpcategory or help.registrar.command.CATEGORY_NONE | |
|
217 | ||
|
218 | cmdsbycategory = {category: [] for category in help.CATEGORY_ORDER} | |
|
219 | for cmd in cmds: | |
|
220 | # If a command category wasn't registered, the command won't get | |
|
221 | # rendered below, so we raise an AssertionError. | |
|
222 | if helpcategory(cmd) not in cmdsbycategory: | |
|
223 | raise AssertionError( | |
|
224 | "The following command did not register its (category) in " | |
|
225 | "help.CATEGORY_ORDER: %s (%s)" % (cmd, helpcategory(cmd))) | |
|
226 | cmdsbycategory[helpcategory(cmd)].append(cmd) | |
|
227 | ||
|
228 | # Print the help for each command. We present the commands grouped by | |
|
229 | # category, and we use help.CATEGORY_ORDER as a guide for a helpful order | |
|
230 | # in which to present the categories. | |
|
231 | for category in help.CATEGORY_ORDER: | |
|
232 | categorycmds = cmdsbycategory[category] | |
|
233 | if not categorycmds: | |
|
234 | # Skip empty categories | |
|
190 | 235 | continue |
|
191 | d = get_cmd(h[f], cmdtable) | |
|
192 | ui.write(sectionfunc(d[b'cmd'])) | |
|
193 | # short description | |
|
194 | ui.write(d[b'desc'][0]) | |
|
195 | # synopsis | |
|
196 | ui.write(b"::\n\n") | |
|
197 | synopsislines = d[b'synopsis'].splitlines() | |
|
198 | for line in synopsislines: | |
|
199 | # some commands (such as rebase) have a multi-line | |
|
236 | # Print a section header for the category. | |
|
237 | # For now, the category header is at the same level as the headers for | |
|
238 | # the commands in the category; this is fixed in the next commit. | |
|
239 | ui.write(sectionfunc(help.CATEGORY_NAMES[category])) | |
|
240 | # Print each command in the category | |
|
241 | for f in sorted(categorycmds): | |
|
242 | if f.startswith(b"debug"): | |
|
243 | continue | |
|
244 | d = get_cmd(h[f], cmdtable) | |
|
245 | ui.write(subsectionfunc(d[b'cmd'])) | |
|
246 | # short description | |
|
247 | ui.write(d[b'desc'][0]) | |
|
200 | 248 | # synopsis |
|
201 |
ui.write(b" |
|
|
202 | ui.write(b'\n') | |
|
203 | # description | |
|
204 | ui.write(b"%s\n\n" % d[b'desc'][1]) | |
|
205 |
# |
|
|
206 | opt_output = list(d[b'opts']) | |
|
207 | if opt_output: | |
|
208 | opts_len = max([len(line[0]) for line in opt_output]) | |
|
209 |
ui.write( |
|
|
210 |
|
|
|
211 | for optstr, desc in opt_output: | |
|
212 |
|
|
|
213 | s = b"%-*s %s" % (opts_len, optstr, desc) | |
|
214 | else: | |
|
215 |
|
|
|
216 | ui.write(b"%s\n" % s) | |
|
217 | if optstr.endswith(b"[+]>"): | |
|
218 | multioccur = True | |
|
219 | if multioccur: | |
|
220 | ui.write(_(b"\n[+] marked option can be specified" | |
|
221 |
|
|
|
222 | ui.write(b"\n") | |
|
223 | # aliases | |
|
224 | if d[b'aliases']: | |
|
225 | ui.write(_(b" aliases: %s\n\n") % b" ".join(d[b'aliases'])) | |
|
226 | ||
|
249 | ui.write(b"::\n\n") | |
|
250 | synopsislines = d[b'synopsis'].splitlines() | |
|
251 | for line in synopsislines: | |
|
252 | # some commands (such as rebase) have a multi-line | |
|
253 | # synopsis | |
|
254 | ui.write(b" %s\n" % line) | |
|
255 | ui.write(b'\n') | |
|
256 | # description | |
|
257 | ui.write(b"%s\n\n" % d[b'desc'][1]) | |
|
258 | # options | |
|
259 | opt_output = list(d[b'opts']) | |
|
260 | if opt_output: | |
|
261 | opts_len = max([len(line[0]) for line in opt_output]) | |
|
262 | ui.write(_(b"Options:\n\n")) | |
|
263 | multioccur = False | |
|
264 | for optstr, desc in opt_output: | |
|
265 | if desc: | |
|
266 | s = b"%-*s %s" % (opts_len, optstr, desc) | |
|
267 | else: | |
|
268 | s = optstr | |
|
269 | ui.write(b"%s\n" % s) | |
|
270 | if optstr.endswith(b"[+]>"): | |
|
271 | multioccur = True | |
|
272 | if multioccur: | |
|
273 | ui.write(_(b"\n[+] marked option can be specified" | |
|
274 | b" multiple times\n")) | |
|
275 | ui.write(b"\n") | |
|
276 | # aliases | |
|
277 | if d[b'aliases']: | |
|
278 | ui.write(_(b" aliases: %s\n\n") % b" ".join(d[b'aliases'])) | |
|
227 | 279 | |
|
228 | 280 | def allextensionnames(): |
|
229 | 281 | return set(extensions.enabled().keys()) | set(extensions.disabled().keys()) |
@@ -32,6 +32,8 b' import importlib.machinery' | |||
|
32 | 32 | import importlib.util |
|
33 | 33 | import sys |
|
34 | 34 | |
|
35 | from . import tracing | |
|
36 | ||
|
35 | 37 | _deactivated = False |
|
36 | 38 | |
|
37 | 39 | class _lazyloaderex(importlib.util.LazyLoader): |
@@ -40,10 +42,11 b' class _lazyloaderex(importlib.util.LazyL' | |||
|
40 | 42 | """ |
|
41 | 43 | def exec_module(self, module): |
|
42 | 44 | """Make the module load lazily.""" |
|
43 | if _deactivated or module.__name__ in ignores: | |
|
44 | self.loader.exec_module(module) | |
|
45 | else: | |
|
46 | super().exec_module(module) | |
|
45 | with tracing.log('demandimport %s', module): | |
|
46 | if _deactivated or module.__name__ in ignores: | |
|
47 | self.loader.exec_module(module) | |
|
48 | else: | |
|
49 | super().exec_module(module) | |
|
47 | 50 | |
|
48 | 51 | # This is 3.6+ because with Python 3.5 it isn't possible to lazily load |
|
49 | 52 | # extensions. See the discussion in https://bugs.python.org/issue26186 for more. |
@@ -13,19 +13,23 b' import os' | |||
|
13 | 13 | _pipe = None |
|
14 | 14 | _checked = False |
|
15 | 15 | |
|
16 | @contextlib.contextmanager | |
|
17 | def log(whencefmt, *whenceargs): | |
|
16 | def _isactive(): | |
|
18 | 17 | global _pipe, _session, _checked |
|
19 | 18 | if _pipe is None: |
|
20 | 19 | if _checked: |
|
21 |
|
|
|
22 | return | |
|
20 | return False | |
|
23 | 21 | _checked = True |
|
24 | 22 | if 'HGCATAPULTSERVERPIPE' not in os.environ: |
|
25 |
|
|
|
26 | return | |
|
23 | return False | |
|
27 | 24 | _pipe = open(os.environ['HGCATAPULTSERVERPIPE'], 'w', 1) |
|
28 | 25 | _session = os.environ.get('HGCATAPULTSESSION', 'none') |
|
26 | return True | |
|
27 | ||
|
28 | @contextlib.contextmanager | |
|
29 | def log(whencefmt, *whenceargs): | |
|
30 | if not _isactive(): | |
|
31 | yield | |
|
32 | return | |
|
29 | 33 | whence = whencefmt % whenceargs |
|
30 | 34 | try: |
|
31 | 35 | # Both writes to the pipe are wrapped in try/except to ignore |
@@ -42,3 +46,13 b' def log(whencefmt, *whenceargs):' | |||
|
42 | 46 | _pipe.write('END %s %s\n' % (_session, whence)) |
|
43 | 47 | except IOError: |
|
44 | 48 | pass |
|
49 | ||
|
50 | def counter(label, amount, *labelargs): | |
|
51 | if not _isactive(): | |
|
52 | return | |
|
53 | l = label % labelargs | |
|
54 | # See above in log() for why this is in a try/except. | |
|
55 | try: | |
|
56 | _pipe.write('COUNTER %s %d %s\n' % (_session, amount, l)) | |
|
57 | except IOError: | |
|
58 | pass |
@@ -871,7 +871,7 b' def _parsechunk(hunk):' | |||
|
871 | 871 | patchlines = mdiff.splitnewlines(buf.getvalue()) |
|
872 | 872 | # hunk.prettystr() will update hunk.removed |
|
873 | 873 | a2 = a1 + hunk.removed |
|
874 |
blines = [l[1:] for l in patchlines[1:] if l |
|
|
874 | blines = [l[1:] for l in patchlines[1:] if not l.startswith('-')] | |
|
875 | 875 | return path, (a1, a2, blines) |
|
876 | 876 | |
|
877 | 877 | def overlaydiffcontext(ctx, chunks): |
@@ -914,7 +914,10 b' def absorb(ui, repo, stack=None, targetc' | |||
|
914 | 914 | """ |
|
915 | 915 | if stack is None: |
|
916 | 916 | limit = ui.configint('absorb', 'max-stack-size') |
|
917 | stack = getdraftstack(repo['.'], limit) | |
|
917 | headctx = repo['.'] | |
|
918 | if len(headctx.parents()) > 1: | |
|
919 | raise error.Abort(_('cannot absorb into a merge')) | |
|
920 | stack = getdraftstack(headctx, limit) | |
|
918 | 921 | if limit and len(stack) >= limit: |
|
919 | 922 | ui.warn(_('absorb: only the recent %d changesets will ' |
|
920 | 923 | 'be analysed\n') |
@@ -932,7 +935,7 b' def absorb(ui, repo, stack=None, targetc' | |||
|
932 | 935 | if opts.get('interactive'): |
|
933 | 936 | diff = patch.diff(repo, stack[-1].node(), targetctx.node(), matcher) |
|
934 | 937 | origchunks = patch.parsepatch(diff) |
|
935 | chunks = cmdutil.recordfilter(ui, origchunks)[0] | |
|
938 | chunks = cmdutil.recordfilter(ui, origchunks, matcher)[0] | |
|
936 | 939 | targetctx = overlaydiffcontext(stack[-1], chunks) |
|
937 | 940 | fm = None |
|
938 | 941 | if opts.get('print_changes') or not opts.get('apply_changes'): |
@@ -81,10 +81,10 b' def _interestingfiles(repo, matcher):' | |||
|
81 | 81 | |
|
82 | 82 | """ |
|
83 | 83 | stat = repo.status(match=matcher) |
|
84 |
added = stat |
|
|
85 |
removed = stat |
|
|
84 | added = stat.added | |
|
85 | removed = stat.removed | |
|
86 | 86 | |
|
87 |
copy = copies. |
|
|
87 | copy = copies.pathcopies(repo['.'], repo[None], matcher) | |
|
88 | 88 | # remove the copy files for which we already have copy info |
|
89 | 89 | added = [f for f in added if f not in copy] |
|
90 | 90 |
@@ -9,12 +9,14 b'' | |||
|
9 | 9 | """log repository events to a blackbox for debugging |
|
10 | 10 | |
|
11 | 11 | Logs event information to .hg/blackbox.log to help debug and diagnose problems. |
|
12 |
The events that get logged can be configured via the blackbox.track |
|
|
12 | The events that get logged can be configured via the blackbox.track and | |
|
13 | blackbox.ignore config keys. | |
|
13 | 14 | |
|
14 | 15 | Examples:: |
|
15 | 16 | |
|
16 | 17 | [blackbox] |
|
17 | 18 | track = * |
|
19 | ignore = pythonhook | |
|
18 | 20 | # dirty is *EXPENSIVE* (slow); |
|
19 | 21 | # each log entry indicates `+` if the repository is dirty, like :hg:`id`. |
|
20 | 22 | dirty = True |
@@ -84,6 +86,9 b" configitem('blackbox', 'maxfiles'," | |||
|
84 | 86 | configitem('blackbox', 'track', |
|
85 | 87 | default=lambda: ['*'], |
|
86 | 88 | ) |
|
89 | configitem('blackbox', 'ignore', | |
|
90 | default=lambda: ['chgserver', 'cmdserver', 'extension'], | |
|
91 | ) | |
|
87 | 92 | configitem('blackbox', 'date-format', |
|
88 | 93 | default='%Y/%m/%d %H:%M:%S', |
|
89 | 94 | ) |
@@ -94,12 +99,15 b' class blackboxlogger(object):' | |||
|
94 | 99 | def __init__(self, ui, repo): |
|
95 | 100 | self._repo = repo |
|
96 | 101 | self._trackedevents = set(ui.configlist('blackbox', 'track')) |
|
102 | self._ignoredevents = set(ui.configlist('blackbox', 'ignore')) | |
|
97 | 103 | self._maxfiles = ui.configint('blackbox', 'maxfiles') |
|
98 | 104 | self._maxsize = ui.configbytes('blackbox', 'maxsize') |
|
99 | 105 | self._inlog = False |
|
100 | 106 | |
|
101 | 107 | def tracked(self, event): |
|
102 |
return b'*' in self._trackedevents |
|
|
108 | return ((b'*' in self._trackedevents | |
|
109 | and event not in self._ignoredevents) | |
|
110 | or event in self._trackedevents) | |
|
103 | 111 | |
|
104 | 112 | def log(self, ui, event, msg, opts): |
|
105 | 113 | # self._log() -> ctx.dirty() may create new subrepo instance, which |
@@ -439,6 +439,11 b' def convert(ui, src, dest=None, revmapfi' | |||
|
439 | 439 | :convert.hg.sourcename: records the given string as a 'convert_source' extra |
|
440 | 440 | value on each commit made in the target repository. The default is None. |
|
441 | 441 | |
|
442 | :convert.hg.preserve-hash: only works with mercurial sources. Make convert | |
|
443 | prevent performance improvement to the list of modified files in commits | |
|
444 | when such an improvement would cause the hash of a commit to change. | |
|
445 | The default is False. | |
|
446 | ||
|
442 | 447 | All Destinations |
|
443 | 448 | ################ |
|
444 | 449 |
@@ -114,7 +114,7 b" SKIPREV = 'SKIP'" | |||
|
114 | 114 | class commit(object): |
|
115 | 115 | def __init__(self, author, date, desc, parents, branch=None, rev=None, |
|
116 | 116 | extra=None, sortkey=None, saverev=True, phase=phases.draft, |
|
117 | optparents=None): | |
|
117 | optparents=None, ctx=None): | |
|
118 | 118 | self.author = author or 'unknown' |
|
119 | 119 | self.date = date or '0 0' |
|
120 | 120 | self.desc = desc |
@@ -126,6 +126,7 b' class commit(object):' | |||
|
126 | 126 | self.sortkey = sortkey |
|
127 | 127 | self.saverev = saverev |
|
128 | 128 | self.phase = phase |
|
129 | self.ctx = ctx # for hg to hg conversions | |
|
129 | 130 | |
|
130 | 131 | class converter_source(object): |
|
131 | 132 | """Conversion source interface""" |
@@ -339,7 +339,11 b' class mercurial_sink(common.converter_si' | |||
|
339 | 339 | phases.phasenames[commit.phase], 'convert') |
|
340 | 340 | |
|
341 | 341 | with self.repo.transaction("convert") as tr: |
|
342 | node = nodemod.hex(self.repo.commitctx(ctx)) | |
|
342 | if self.repo.ui.config('convert', 'hg.preserve-hash'): | |
|
343 | origctx = commit.ctx | |
|
344 | else: | |
|
345 | origctx = None | |
|
346 | node = nodemod.hex(self.repo.commitctx(ctx, origctx=origctx)) | |
|
343 | 347 | |
|
344 | 348 | # If the node value has changed, but the phase is lower than |
|
345 | 349 | # draft, set it back to draft since it hasn't been exposed |
@@ -591,7 +595,8 b' class mercurial_source(common.converter_' | |||
|
591 | 595 | extra=ctx.extra(), |
|
592 | 596 | sortkey=ctx.rev(), |
|
593 | 597 | saverev=self.saverev, |
|
594 |
phase=ctx.phase() |
|
|
598 | phase=ctx.phase(), | |
|
599 | ctx=ctx) | |
|
595 | 600 | |
|
596 | 601 | def numcommits(self): |
|
597 | 602 | return len(self.repo) |
@@ -284,9 +284,9 b' class monotone_source(common.converter_s' | |||
|
284 | 284 | # d2 => d3 |
|
285 | 285 | ignoremove[tofile] = 1 |
|
286 | 286 | for tofile, fromfile in renamed.items(): |
|
287 |
self.ui.debug |
|
|
288 |
|
|
|
289 |
|
|
|
287 | self.ui.debug( | |
|
288 | "copying file in renamed directory from '%s' to '%s'" | |
|
289 | % (fromfile, tofile), '\n') | |
|
290 | 290 | files[tofile] = rev |
|
291 | 291 | copies[tofile] = fromfile |
|
292 | 292 | for fromfile in renamed.values(): |
@@ -370,4 +370,3 b' class monotone_source(common.converter_s' | |||
|
370 | 370 | self.mtnwritefp = None |
|
371 | 371 | self.mtnreadfp.close() |
|
372 | 372 | self.mtnreadfp = None |
|
373 |
@@ -1333,7 +1333,7 b' class svn_sink(converter_sink, commandli' | |||
|
1333 | 1333 | rev = self.commit_re.search(output).group(1) |
|
1334 | 1334 | except AttributeError: |
|
1335 | 1335 | if not files: |
|
1336 | return parents[0] if parents else None | |
|
1336 | return parents[0] if parents else 'None' | |
|
1337 | 1337 | self.ui.warn(_('unexpected svn output:\n')) |
|
1338 | 1338 | self.ui.warn(output) |
|
1339 | 1339 | raise error.Abort(_('unable to cope with svn output')) |
@@ -400,7 +400,7 b' def reposetup(ui, repo):' | |||
|
400 | 400 | if wlock is not None: |
|
401 | 401 | wlock.release() |
|
402 | 402 | |
|
403 | def commitctx(self, ctx, error=False): | |
|
403 | def commitctx(self, ctx, error=False, origctx=None): | |
|
404 | 404 | for f in sorted(ctx.added() + ctx.modified()): |
|
405 | 405 | if not self._eolmatch(f): |
|
406 | 406 | continue |
@@ -416,6 +416,6 b' def reposetup(ui, repo):' | |||
|
416 | 416 | if inconsistenteol(data): |
|
417 | 417 | raise errormod.Abort(_("inconsistent newline style " |
|
418 | 418 | "in %s\n") % f) |
|
419 | return super(eolrepo, self).commitctx(ctx, error) | |
|
419 | return super(eolrepo, self).commitctx(ctx, error, origctx) | |
|
420 | 420 | repo.__class__ = eolrepo |
|
421 | 421 | repo._hgcleardirstate() |
@@ -8,6 +8,7 b'' | |||
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import bisect |
|
11 | import io | |
|
11 | 12 | import os |
|
12 | 13 | import struct |
|
13 | 14 | |
@@ -246,7 +247,7 b' def getlastnode(path):' | |||
|
246 | 247 | hsh = None |
|
247 | 248 | try: |
|
248 | 249 | with open(path, 'rb') as f: |
|
249 |
f.seek(-_hshlen, |
|
|
250 | f.seek(-_hshlen, io.SEEK_END) | |
|
250 | 251 | if f.tell() > len(revmap.HEADER): |
|
251 | 252 | hsh = f.read(_hshlen) |
|
252 | 253 | except IOError: |
@@ -72,12 +72,43 b" in a text file by ensuring that 'sort' r" | |||
|
72 | 72 | To account for changes made by each tool, the line numbers used for incremental |
|
73 | 73 | formatting are recomputed before executing the next tool. So, each tool may see |
|
74 | 74 | different values for the arguments added by the :linerange suboption. |
|
75 | ||
|
76 | Each fixer tool is allowed to return some metadata in addition to the fixed file | |
|
77 | content. The metadata must be placed before the file content on stdout, | |
|
78 | separated from the file content by a zero byte. The metadata is parsed as a JSON | |
|
79 | value (so, it should be UTF-8 encoded and contain no zero bytes). A fixer tool | |
|
80 | is expected to produce this metadata encoding if and only if the :metadata | |
|
81 | suboption is true:: | |
|
82 | ||
|
83 | [fix] | |
|
84 | tool:command = tool --prepend-json-metadata | |
|
85 | tool:metadata = true | |
|
86 | ||
|
87 | The metadata values are passed to hooks, which can be used to print summaries or | |
|
88 | perform other post-fixing work. The supported hooks are:: | |
|
89 | ||
|
90 | "postfixfile" | |
|
91 | Run once for each file in each revision where any fixer tools made changes | |
|
92 | to the file content. Provides "$HG_REV" and "$HG_PATH" to identify the file, | |
|
93 | and "$HG_METADATA" with a map of fixer names to metadata values from fixer | |
|
94 | tools that affected the file. Fixer tools that didn't affect the file have a | |
|
95 | valueof None. Only fixer tools that executed are present in the metadata. | |
|
96 | ||
|
97 | "postfix" | |
|
98 | Run once after all files and revisions have been handled. Provides | |
|
99 | "$HG_REPLACEMENTS" with information about what revisions were created and | |
|
100 | made obsolete. Provides a boolean "$HG_WDIRWRITTEN" to indicate whether any | |
|
101 | files in the working copy were updated. Provides a list "$HG_METADATA" | |
|
102 | mapping fixer tool names to lists of metadata values returned from | |
|
103 | executions that modified a file. This aggregates the same metadata | |
|
104 | previously passed to the "postfixfile" hook. | |
|
75 | 105 | """ |
|
76 | 106 | |
|
77 | 107 | from __future__ import absolute_import |
|
78 | 108 | |
|
79 | 109 | import collections |
|
80 | 110 | import itertools |
|
111 | import json | |
|
81 | 112 | import os |
|
82 | 113 | import re |
|
83 | 114 | import subprocess |
@@ -117,13 +148,14 b' command = registrar.command(cmdtable)' | |||
|
117 | 148 | configtable = {} |
|
118 | 149 | configitem = registrar.configitem(configtable) |
|
119 | 150 | |
|
120 | # Register the suboptions allowed for each configured fixer. | |
|
151 | # Register the suboptions allowed for each configured fixer, and default values. | |
|
121 | 152 | FIXER_ATTRS = { |
|
122 | 153 | 'command': None, |
|
123 | 154 | 'linerange': None, |
|
124 | 155 | 'fileset': None, |
|
125 | 156 | 'pattern': None, |
|
126 | 157 | 'priority': 0, |
|
158 | 'metadata': False, | |
|
127 | 159 | } |
|
128 | 160 | |
|
129 | 161 | for key, default in FIXER_ATTRS.items(): |
@@ -201,10 +233,12 b' def fix(ui, repo, *pats, **opts):' | |||
|
201 | 233 | for rev, path in items: |
|
202 | 234 | ctx = repo[rev] |
|
203 | 235 | olddata = ctx[path].data() |
|
204 |
newdata = fixfile(ui, opts, fixers, ctx, path, |
|
|
236 | metadata, newdata = fixfile(ui, opts, fixers, ctx, path, | |
|
237 | basectxs[rev]) | |
|
205 | 238 | # Don't waste memory/time passing unchanged content back, but |
|
206 | 239 | # produce one result per item either way. |
|
207 |
yield (rev, path, |
|
|
240 | yield (rev, path, metadata, | |
|
241 | newdata if newdata != olddata else None) | |
|
208 | 242 | results = worker.worker(ui, 1.0, getfixes, tuple(), workqueue, |
|
209 | 243 | threadsafe=False) |
|
210 | 244 | |
@@ -215,15 +249,25 b' def fix(ui, repo, *pats, **opts):' | |||
|
215 | 249 | # the tests deterministic. It might also be considered a feature since |
|
216 | 250 | # it makes the results more easily reproducible. |
|
217 | 251 | filedata = collections.defaultdict(dict) |
|
252 | aggregatemetadata = collections.defaultdict(list) | |
|
218 | 253 | replacements = {} |
|
219 | 254 | wdirwritten = False |
|
220 | 255 | commitorder = sorted(revstofix, reverse=True) |
|
221 | 256 | with ui.makeprogress(topic=_('fixing'), unit=_('files'), |
|
222 | 257 | total=sum(numitems.values())) as progress: |
|
223 | for rev, path, newdata in results: | |
|
258 | for rev, path, filerevmetadata, newdata in results: | |
|
224 | 259 | progress.increment(item=path) |
|
260 | for fixername, fixermetadata in filerevmetadata.items(): | |
|
261 | aggregatemetadata[fixername].append(fixermetadata) | |
|
225 | 262 | if newdata is not None: |
|
226 | 263 | filedata[rev][path] = newdata |
|
264 | hookargs = { | |
|
265 | 'rev': rev, | |
|
266 | 'path': path, | |
|
267 | 'metadata': filerevmetadata, | |
|
268 | } | |
|
269 | repo.hook('postfixfile', throw=False, | |
|
270 | **pycompat.strkwargs(hookargs)) | |
|
227 | 271 | numitems[rev] -= 1 |
|
228 | 272 | # Apply the fixes for this and any other revisions that are |
|
229 | 273 | # ready and sitting at the front of the queue. Using a loop here |
@@ -240,6 +284,12 b' def fix(ui, repo, *pats, **opts):' | |||
|
240 | 284 | del filedata[rev] |
|
241 | 285 | |
|
242 | 286 | cleanup(repo, replacements, wdirwritten) |
|
287 | hookargs = { | |
|
288 | 'replacements': replacements, | |
|
289 | 'wdirwritten': wdirwritten, | |
|
290 | 'metadata': aggregatemetadata, | |
|
291 | } | |
|
292 | repo.hook('postfix', throw=True, **pycompat.strkwargs(hookargs)) | |
|
243 | 293 | |
|
244 | 294 | def cleanup(repo, replacements, wdirwritten): |
|
245 | 295 | """Calls scmutil.cleanupnodes() with the given replacements. |
@@ -491,6 +541,7 b' def fixfile(ui, opts, fixers, fixctx, pa' | |||
|
491 | 541 | A fixer tool's stdout will become the file's new content if and only if it |
|
492 | 542 | exits with code zero. |
|
493 | 543 | """ |
|
544 | metadata = {} | |
|
494 | 545 | newdata = fixctx[path].data() |
|
495 | 546 | for fixername, fixer in fixers.iteritems(): |
|
496 | 547 | if fixer.affects(opts, fixctx, path): |
@@ -506,9 +557,20 b' def fixfile(ui, opts, fixers, fixctx, pa' | |||
|
506 | 557 | stdin=subprocess.PIPE, |
|
507 | 558 | stdout=subprocess.PIPE, |
|
508 | 559 | stderr=subprocess.PIPE) |
|
509 |
|
|
|
560 | stdout, stderr = proc.communicate(newdata) | |
|
510 | 561 | if stderr: |
|
511 | 562 | showstderr(ui, fixctx.rev(), fixername, stderr) |
|
563 | newerdata = stdout | |
|
564 | if fixer.shouldoutputmetadata(): | |
|
565 | try: | |
|
566 | metadatajson, newerdata = stdout.split('\0', 1) | |
|
567 | metadata[fixername] = json.loads(metadatajson) | |
|
568 | except ValueError: | |
|
569 | ui.warn(_('ignored invalid output from fixer tool: %s\n') % | |
|
570 | (fixername,)) | |
|
571 | continue | |
|
572 | else: | |
|
573 | metadata[fixername] = None | |
|
512 | 574 | if proc.returncode == 0: |
|
513 | 575 | newdata = newerdata |
|
514 | 576 | else: |
@@ -519,7 +581,7 b' def fixfile(ui, opts, fixers, fixctx, pa' | |||
|
519 | 581 | ui, _('no fixes will be applied'), |
|
520 | 582 | hint=_('use --config fix.failure=continue to apply any ' |
|
521 | 583 | 'successful fixes anyway')) |
|
522 | return newdata | |
|
584 | return metadata, newdata | |
|
523 | 585 | |
|
524 | 586 | def showstderr(ui, rev, fixername, stderr): |
|
525 | 587 | """Writes the lines of the stderr string as warnings on the ui |
@@ -667,6 +729,10 b' class Fixer(object):' | |||
|
667 | 729 | """Should this fixer run on the file at the given path and context?""" |
|
668 | 730 | return scmutil.match(fixctx, [self._pattern], opts)(path) |
|
669 | 731 | |
|
732 | def shouldoutputmetadata(self): | |
|
733 | """Should the stdout of this fixer start with JSON and a null byte?""" | |
|
734 | return self._metadata | |
|
735 | ||
|
670 | 736 | def command(self, ui, path, rangesfn): |
|
671 | 737 | """A shell command to use to invoke this fixer on the given file/lines |
|
672 | 738 |
@@ -192,12 +192,15 b' def am(ui, repo, *args, **kwargs):' | |||
|
192 | 192 | def apply(ui, repo, *args, **kwargs): |
|
193 | 193 | cmdoptions = [ |
|
194 | 194 | ('p', 'p', int, ''), |
|
195 | ('', 'directory', '', ''), | |
|
195 | 196 | ] |
|
196 | 197 | args, opts = parseoptions(ui, cmdoptions, args) |
|
197 | 198 | |
|
198 | 199 | cmd = Command('import --no-commit') |
|
199 | 200 | if (opts.get('p')): |
|
200 | 201 | cmd['-p'] = opts.get('p') |
|
202 | if opts.get('directory'): | |
|
203 | cmd['--prefix'] = opts.get('directory') | |
|
201 | 204 | cmd.extend(args) |
|
202 | 205 | |
|
203 | 206 | ui.status((bytes(cmd)), "\n") |
@@ -681,6 +684,7 b' def mergetool(ui, repo, *args, **kwargs)' | |||
|
681 | 684 | def mv(ui, repo, *args, **kwargs): |
|
682 | 685 | cmdoptions = [ |
|
683 | 686 | ('f', 'force', None, ''), |
|
687 | ('n', 'dry-run', None, ''), | |
|
684 | 688 | ] |
|
685 | 689 | args, opts = parseoptions(ui, cmdoptions, args) |
|
686 | 690 | |
@@ -689,6 +693,8 b' def mv(ui, repo, *args, **kwargs):' | |||
|
689 | 693 | |
|
690 | 694 | if opts.get('force'): |
|
691 | 695 | cmd['-f'] = None |
|
696 | if opts.get('dry_run'): | |
|
697 | cmd['-n'] = None | |
|
692 | 698 | |
|
693 | 699 | ui.status((bytes(cmd)), "\n") |
|
694 | 700 | |
@@ -917,6 +923,7 b' def show(ui, repo, *args, **kwargs):' | |||
|
917 | 923 | |
|
918 | 924 | def stash(ui, repo, *args, **kwargs): |
|
919 | 925 | cmdoptions = [ |
|
926 | ('p', 'patch', None, ''), | |
|
920 | 927 | ] |
|
921 | 928 | args, opts = parseoptions(ui, cmdoptions, args) |
|
922 | 929 | |
@@ -925,6 +932,17 b' def stash(ui, repo, *args, **kwargs):' | |||
|
925 | 932 | |
|
926 | 933 | if action == 'list': |
|
927 | 934 | cmd['-l'] = None |
|
935 | if opts.get('patch'): | |
|
936 | cmd['-p'] = None | |
|
937 | elif action == 'show': | |
|
938 | if opts.get('patch'): | |
|
939 | cmd['-p'] = None | |
|
940 | else: | |
|
941 | cmd['--stat'] = None | |
|
942 | if len(args) > 1: | |
|
943 | cmd.append(args[1]) | |
|
944 | elif action == 'clear': | |
|
945 | cmd['--cleanup'] = None | |
|
928 | 946 | elif action == 'drop': |
|
929 | 947 | cmd['-d'] = None |
|
930 | 948 | if len(args) > 1: |
@@ -937,10 +955,9 b' def stash(ui, repo, *args, **kwargs):' | |||
|
937 | 955 | cmd.append(args[1]) |
|
938 | 956 | if action == 'apply': |
|
939 | 957 | cmd['--keep'] = None |
|
940 |
elif |
|
|
941 | or action == 'create'): | |
|
958 | elif action == 'branch' or action == 'create': | |
|
942 | 959 | ui.status(_("note: Mercurial doesn't have equivalents to the " |
|
943 |
"git stash branch |
|
|
960 | "git stash branch or create actions\n\n")) | |
|
944 | 961 | return |
|
945 | 962 | else: |
|
946 | 963 | if len(args) > 0: |
@@ -49,6 +49,11 b" configitem('gpg', '.*'," | |||
|
49 | 49 | |
|
50 | 50 | # Custom help category |
|
51 | 51 | _HELP_CATEGORY = 'gpg' |
|
52 | help.CATEGORY_ORDER.insert( | |
|
53 | help.CATEGORY_ORDER.index(registrar.command.CATEGORY_HELP), | |
|
54 | _HELP_CATEGORY | |
|
55 | ) | |
|
56 | help.CATEGORY_NAMES[_HELP_CATEGORY] = 'Signing changes (GPG)' | |
|
52 | 57 | |
|
53 | 58 | class gpg(object): |
|
54 | 59 | def __init__(self, path, key=None): |
@@ -1079,6 +1079,8 b' def movecursor(state, oldpos, newpos):' | |||
|
1079 | 1079 | def changemode(state, mode): |
|
1080 | 1080 | curmode, _ = state['mode'] |
|
1081 | 1081 | state['mode'] = (mode, curmode) |
|
1082 | if mode == MODE_PATCH: | |
|
1083 | state['modes'][MODE_PATCH]['patchcontents'] = patchcontents(state) | |
|
1082 | 1084 | |
|
1083 | 1085 | def makeselection(state, pos): |
|
1084 | 1086 | state['selected'] = pos |
@@ -1134,7 +1136,7 b' def changeview(state, delta, unit):' | |||
|
1134 | 1136 | if mode != MODE_PATCH: |
|
1135 | 1137 | return |
|
1136 | 1138 | mode_state = state['modes'][mode] |
|
1137 |
num_lines = len(patchcontents |
|
|
1139 | num_lines = len(mode_state['patchcontents']) | |
|
1138 | 1140 | page_height = state['page_height'] |
|
1139 | 1141 | unit = page_height if unit == 'page' else 1 |
|
1140 | 1142 | num_pages = 1 + (num_lines - 1) / page_height |
@@ -1227,15 +1229,25 b' def addln(win, y, x, line, color=None):' | |||
|
1227 | 1229 | else: |
|
1228 | 1230 | win.addstr(y, x, line) |
|
1229 | 1231 | |
|
1232 | def _trunc_head(line, n): | |
|
1233 | if len(line) <= n: | |
|
1234 | return line | |
|
1235 | return '> ' + line[-(n - 2):] | |
|
1236 | def _trunc_tail(line, n): | |
|
1237 | if len(line) <= n: | |
|
1238 | return line | |
|
1239 | return line[:n - 2] + ' >' | |
|
1240 | ||
|
1230 | 1241 | def patchcontents(state): |
|
1231 | 1242 | repo = state['repo'] |
|
1232 | 1243 | rule = state['rules'][state['pos']] |
|
1233 | repo.ui.verbose = True | |
|
1234 | 1244 | displayer = logcmdutil.changesetdisplayer(repo.ui, repo, { |
|
1235 | 1245 | "patch": True, "template": "status" |
|
1236 | 1246 | }, buffered=True) |
|
1237 | displayer.show(rule.ctx) | |
|
1238 | displayer.close() | |
|
1247 | overrides = {('ui', 'verbose'): True} | |
|
1248 | with repo.ui.configoverride(overrides, source='histedit'): | |
|
1249 | displayer.show(rule.ctx) | |
|
1250 | displayer.close() | |
|
1239 | 1251 | return displayer.hunk[rule.ctx.rev()].splitlines() |
|
1240 | 1252 | |
|
1241 | 1253 | def _chisteditmain(repo, rules, stdscr): |
@@ -1283,11 +1295,23 b' def _chisteditmain(repo, rules, stdscr):' | |||
|
1283 | 1295 | line = "bookmark: {0}".format(' '.join(bms)) |
|
1284 | 1296 | win.addstr(3, 1, line[:length]) |
|
1285 | 1297 | |
|
1286 |
line = " |
|
|
1298 | line = "summary: {0}".format(ctx.description().splitlines()[0]) | |
|
1287 | 1299 | win.addstr(4, 1, line[:length]) |
|
1288 | 1300 | |
|
1289 | line = "summary: {0}".format(ctx.description().splitlines()[0]) | |
|
1290 |
win.addstr(5, 1, line |
|
|
1301 | line = "files: " | |
|
1302 | win.addstr(5, 1, line) | |
|
1303 | fnx = 1 + len(line) | |
|
1304 | fnmaxx = length - fnx + 1 | |
|
1305 | y = 5 | |
|
1306 | fnmaxn = maxy - (1 + y) - 1 | |
|
1307 | files = ctx.files() | |
|
1308 | for i, line1 in enumerate(files): | |
|
1309 | if len(files) > fnmaxn and i == fnmaxn - 1: | |
|
1310 | win.addstr(y, fnx, _trunc_tail(','.join(files[i:]), fnmaxx)) | |
|
1311 | y = y + 1 | |
|
1312 | break | |
|
1313 | win.addstr(y, fnx, _trunc_head(line1, fnmaxx)) | |
|
1314 | y = y + 1 | |
|
1291 | 1315 | |
|
1292 | 1316 | conflicts = rule.conflicts |
|
1293 | 1317 | if len(conflicts) > 0: |
@@ -1296,7 +1320,7 b' def _chisteditmain(repo, rules, stdscr):' | |||
|
1296 | 1320 | else: |
|
1297 | 1321 | conflictstr = 'no overlap' |
|
1298 | 1322 | |
|
1299 |
win.addstr( |
|
|
1323 | win.addstr(y, 1, conflictstr[:length]) | |
|
1300 | 1324 | win.noutrefresh() |
|
1301 | 1325 | |
|
1302 | 1326 | def helplines(mode): |
@@ -1372,15 +1396,16 b' pgup/K: move patch up, pgdn/J: move patc' | |||
|
1372 | 1396 | |
|
1373 | 1397 | def renderpatch(win, state): |
|
1374 | 1398 | start = state['modes'][MODE_PATCH]['line_offset'] |
|
1375 | renderstring(win, state, patchcontents(state)[start:], diffcolors=True) | |
|
1399 | content = state['modes'][MODE_PATCH]['patchcontents'] | |
|
1400 | renderstring(win, state, content[start:], diffcolors=True) | |
|
1376 | 1401 | |
|
1377 | 1402 | def layout(mode): |
|
1378 | 1403 | maxy, maxx = stdscr.getmaxyx() |
|
1379 | 1404 | helplen = len(helplines(mode)) |
|
1380 | 1405 | return { |
|
1381 |
'commit': ( |
|
|
1406 | 'commit': (12, maxx), | |
|
1382 | 1407 | 'help': (helplen, maxx), |
|
1383 |
'main': (maxy - helplen - |
|
|
1408 | 'main': (maxy - helplen - 12, maxx), | |
|
1384 | 1409 | } |
|
1385 | 1410 | |
|
1386 | 1411 | def drawvertwin(size, y, x): |
@@ -1894,6 +1919,14 b' def _aborthistedit(ui, repo, state, noba' | |||
|
1894 | 1919 | finally: |
|
1895 | 1920 | state.clear() |
|
1896 | 1921 | |
|
1922 | def hgaborthistedit(ui, repo): | |
|
1923 | state = histeditstate(repo) | |
|
1924 | nobackup = not ui.configbool('rewrite', 'backup-bundle') | |
|
1925 | with repo.wlock() as wlock, repo.lock() as lock: | |
|
1926 | state.wlock = wlock | |
|
1927 | state.lock = lock | |
|
1928 | _aborthistedit(ui, repo, state, nobackup=nobackup) | |
|
1929 | ||
|
1897 | 1930 | def _edithisteditplan(ui, repo, state, rules): |
|
1898 | 1931 | state.read() |
|
1899 | 1932 | if not rules: |
@@ -2288,8 +2321,6 b' def summaryhook(ui, repo):' | |||
|
2288 | 2321 | |
|
2289 | 2322 | def extsetup(ui): |
|
2290 | 2323 | cmdutil.summaryhooks.add('histedit', summaryhook) |
|
2291 | cmdutil.unfinishedstates.append( | |
|
2292 | ['histedit-state', False, True, _('histedit in progress'), | |
|
2293 | _("use 'hg histedit --continue' or 'hg histedit --abort'")]) | |
|
2294 | cmdutil.afterresolvedstates.append( | |
|
2295 | ['histedit-state', _('hg histedit --continue')]) | |
|
2324 | statemod.addunfinished('histedit', fname='histedit-state', allowcommit=True, | |
|
2325 | continueflag=True, abortfunc=hgaborthistedit) | |
|
2326 |
@@ -785,8 +785,8 b' def reposetup(ui, repo):' | |||
|
785 | 785 | finally: |
|
786 | 786 | del self.commitctx |
|
787 | 787 | |
|
788 | def kwcommitctx(self, ctx, error=False): | |
|
789 | n = super(kwrepo, self).commitctx(ctx, error) | |
|
788 | def kwcommitctx(self, ctx, error=False, origctx=None): | |
|
789 | n = super(kwrepo, self).commitctx(ctx, error, origctx) | |
|
790 | 790 | # no lock needed, only called from repo.commit() which already locks |
|
791 | 791 | if not kwt.postcommit: |
|
792 | 792 | restrict = kwt.restrict |
@@ -515,7 +515,7 b' def overridecalculateupdates(origfn, rep' | |||
|
515 | 515 | return actions, diverge, renamedelete |
|
516 | 516 | |
|
517 | 517 | @eh.wrapfunction(merge, 'recordupdates') |
|
518 | def mergerecordupdates(orig, repo, actions, branchmerge): | |
|
518 | def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata): | |
|
519 | 519 | if 'lfmr' in actions: |
|
520 | 520 | lfdirstate = lfutil.openlfdirstate(repo.ui, repo) |
|
521 | 521 | for lfile, args, msg in actions['lfmr']: |
@@ -526,7 +526,7 b' def mergerecordupdates(orig, repo, actio' | |||
|
526 | 526 | lfdirstate.add(lfile) |
|
527 | 527 | lfdirstate.write() |
|
528 | 528 | |
|
529 | return orig(repo, actions, branchmerge) | |
|
529 | return orig(repo, actions, branchmerge, getfiledata) | |
|
530 | 530 | |
|
531 | 531 | # Override filemerge to prompt the user about how they wish to merge |
|
532 | 532 | # largefiles. This will handle identical edits without prompting the user. |
@@ -545,7 +545,8 b' def overridefilemerge(origfn, premerge, ' | |||
|
545 | 545 | (dhash == ahash or |
|
546 | 546 | repo.ui.promptchoice( |
|
547 | 547 | _('largefile %s has a merge conflict\nancestor was %s\n' |
|
548 |
'keep (l)ocal %s or\n |
|
|
548 | 'you can keep (l)ocal %s or take (o)ther %s.\n' | |
|
549 | 'what do you want to do?' | |
|
549 | 550 | '$$ &Local $$ &Other') % |
|
550 | 551 | (lfutil.splitstandin(orig), ahash, dhash, ohash), |
|
551 | 552 | 0) == 1)): |
@@ -227,9 +227,9 b' def _reposetup(ui, repo):' | |||
|
227 | 227 | |
|
228 | 228 | class lfsrepo(repo.__class__): |
|
229 | 229 | @localrepo.unfilteredmethod |
|
230 | def commitctx(self, ctx, error=False): | |
|
230 | def commitctx(self, ctx, error=False, origctx=None): | |
|
231 | 231 | repo.svfs.options['lfstrack'] = _trackedmatcher(self) |
|
232 | return super(lfsrepo, self).commitctx(ctx, error) | |
|
232 | return super(lfsrepo, self).commitctx(ctx, error, origctx=origctx) | |
|
233 | 233 | |
|
234 | 234 | repo.__class__ = lfsrepo |
|
235 | 235 |
@@ -144,9 +144,21 b' except KeyError:' | |||
|
144 | 144 | stripext = extensions.load(dummyui(), 'strip', '') |
|
145 | 145 | |
|
146 | 146 | strip = stripext.strip |
|
147 | checksubstate = stripext.checksubstate | |
|
148 | checklocalchanges = stripext.checklocalchanges | |
|
149 | ||
|
147 | ||
|
148 | def checksubstate(repo, baserev=None): | |
|
149 | '''return list of subrepos at a different revision than substate. | |
|
150 | Abort if any subrepos have uncommitted changes.''' | |
|
151 | inclsubs = [] | |
|
152 | wctx = repo[None] | |
|
153 | if baserev: | |
|
154 | bctx = repo[baserev] | |
|
155 | else: | |
|
156 | bctx = wctx.p1() | |
|
157 | for s in sorted(wctx.substate): | |
|
158 | wctx.sub(s).bailifchanged(True) | |
|
159 | if s not in bctx.substate or bctx.sub(s).dirty(): | |
|
160 | inclsubs.append(s) | |
|
161 | return inclsubs | |
|
150 | 162 | |
|
151 | 163 | # Patch names looks like unix-file names. |
|
152 | 164 | # They must be joinable with queue directory and result in the patch path. |
@@ -1149,7 +1161,19 b' class queue(object):' | |||
|
1149 | 1161 | # plain versions for i18n tool to detect them |
|
1150 | 1162 | _("local changes found, qrefresh first") |
|
1151 | 1163 | _("local changed subrepos found, qrefresh first") |
|
1152 | return checklocalchanges(repo, force, excsuffix) | |
|
1164 | ||
|
1165 | s = repo.status() | |
|
1166 | if not force: | |
|
1167 | cmdutil.checkunfinished(repo) | |
|
1168 | if s.modified or s.added or s.removed or s.deleted: | |
|
1169 | _("local changes found") # i18n tool detection | |
|
1170 | raise error.Abort(_("local changes found" + excsuffix)) | |
|
1171 | if checksubstate(repo): | |
|
1172 | _("local changed subrepos found") # i18n tool detection | |
|
1173 | raise error.Abort(_("local changed subrepos found" + excsuffix)) | |
|
1174 | else: | |
|
1175 | cmdutil.checkunfinished(repo, skipmerge=True) | |
|
1176 | return s | |
|
1153 | 1177 | |
|
1154 | 1178 | _reserved = ('series', 'status', 'guards', '.', '..') |
|
1155 | 1179 | def checkreservedname(self, name): |
@@ -51,21 +51,25 b' def getbundlechangegrouppart_narrow(bund' | |||
|
51 | 51 | assert repo.ui.configbool('experimental', 'narrowservebrokenellipses') |
|
52 | 52 | |
|
53 | 53 | cgversions = b2caps.get('changegroup') |
|
54 | if cgversions: # 3.1 and 3.2 ship with an empty value | |
|
55 | cgversions = [v for v in cgversions | |
|
56 | if v in changegroup.supportedoutgoingversions(repo)] | |
|
57 | if not cgversions: | |
|
58 | raise ValueError(_('no common changegroup version')) | |
|
59 | version = max(cgversions) | |
|
60 | else: | |
|
61 | raise ValueError(_("server does not advertise changegroup version," | |
|
62 | " can't negotiate support for ellipsis nodes")) | |
|
54 | cgversions = [v for v in cgversions | |
|
55 | if v in changegroup.supportedoutgoingversions(repo)] | |
|
56 | if not cgversions: | |
|
57 | raise ValueError(_('no common changegroup version')) | |
|
58 | version = max(cgversions) | |
|
63 | 59 | |
|
64 | include = sorted(filter(bool, kwargs.get(r'includepats', []))) | |
|
65 | exclude = sorted(filter(bool, kwargs.get(r'excludepats', []))) | |
|
66 | newmatch = narrowspec.match(repo.root, include=include, exclude=exclude) | |
|
60 | oldinclude = sorted(filter(bool, kwargs.get(r'oldincludepats', []))) | |
|
61 | oldexclude = sorted(filter(bool, kwargs.get(r'oldexcludepats', []))) | |
|
62 | newinclude = sorted(filter(bool, kwargs.get(r'includepats', []))) | |
|
63 | newexclude = sorted(filter(bool, kwargs.get(r'excludepats', []))) | |
|
64 | known = {bin(n) for n in kwargs.get(r'known', [])} | |
|
65 | generateellipsesbundle2(bundler, repo, oldinclude, oldexclude, newinclude, | |
|
66 | newexclude, version, common, heads, known, | |
|
67 | kwargs.get(r'depth', None)) | |
|
67 | 68 | |
|
68 | depth = kwargs.get(r'depth', None) | |
|
69 | def generateellipsesbundle2(bundler, repo, oldinclude, oldexclude, newinclude, | |
|
70 | newexclude, version, common, heads, known, depth): | |
|
71 | newmatch = narrowspec.match(repo.root, include=newinclude, | |
|
72 | exclude=newexclude) | |
|
69 | 73 | if depth is not None: |
|
70 | 74 | depth = int(depth) |
|
71 | 75 | if depth < 1: |
@@ -73,10 +77,7 b' def getbundlechangegrouppart_narrow(bund' | |||
|
73 | 77 | |
|
74 | 78 | heads = set(heads or repo.heads()) |
|
75 | 79 | common = set(common or [nullid]) |
|
76 | oldinclude = sorted(filter(bool, kwargs.get(r'oldincludepats', []))) | |
|
77 | oldexclude = sorted(filter(bool, kwargs.get(r'oldexcludepats', []))) | |
|
78 | known = {bin(n) for n in kwargs.get(r'known', [])} | |
|
79 | if known and (oldinclude != include or oldexclude != exclude): | |
|
80 | if known and (oldinclude != newinclude or oldexclude != newexclude): | |
|
80 | 81 | # Steps: |
|
81 | 82 | # 1. Send kill for "$known & ::common" |
|
82 | 83 | # |
@@ -146,7 +146,7 b' def pullbundle2extraprepare(orig, pullop' | |||
|
146 | 146 | kwargs['excludepats'] = exclude |
|
147 | 147 | # calculate known nodes only in ellipses cases because in non-ellipses cases |
|
148 | 148 | # we have all the nodes |
|
149 | if wireprototypes.ELLIPSESCAP in pullop.remote.capabilities(): | |
|
149 | if wireprototypes.ELLIPSESCAP1 in pullop.remote.capabilities(): | |
|
150 | 150 | kwargs['known'] = [node.hex(ctx.node()) for ctx in |
|
151 | 151 | repo.set('::%ln', pullop.common) |
|
152 | 152 | if ctx.node() != node.nullid] |
@@ -216,7 +216,7 b' def _narrow(ui, repo, remote, commoninc,' | |||
|
216 | 216 | todelete.append(f) |
|
217 | 217 | elif f.startswith('meta/'): |
|
218 | 218 | dir = f[5:-13] |
|
219 |
dirs = |
|
|
219 | dirs = sorted(util.dirs({dir})) + [dir] | |
|
220 | 220 | include = True |
|
221 | 221 | for d in dirs: |
|
222 | 222 | visit = newmatch.visitdir(d) |
@@ -253,7 +253,14 b' def _widen(ui, repo, remote, commoninc, ' | |||
|
253 | 253 | # then send that information to server whether we want ellipses or not. |
|
254 | 254 | # Theoretically a non-ellipses repo should be able to use narrow |
|
255 | 255 | # functionality from an ellipses enabled server |
|
256 |
|
|
|
256 | remotecap = remote.capabilities() | |
|
257 | ellipsesremote = any(cap in remotecap | |
|
258 | for cap in wireprototypes.SUPPORTED_ELLIPSESCAP) | |
|
259 | ||
|
260 | # check whether we are talking to a server which supports old version of | |
|
261 | # ellipses capabilities | |
|
262 | isoldellipses = (ellipsesremote and wireprototypes.ELLIPSESCAP1 in | |
|
263 | remotecap and wireprototypes.ELLIPSESCAP not in remotecap) | |
|
257 | 264 | |
|
258 | 265 | def pullbundle2extraprepare_widen(orig, pullop, kwargs): |
|
259 | 266 | orig(pullop, kwargs) |
@@ -271,19 +278,22 b' def _widen(ui, repo, remote, commoninc, ' | |||
|
271 | 278 | # silence the devel-warning of applying an empty changegroup |
|
272 | 279 | overrides = {('devel', 'all-warnings'): False} |
|
273 | 280 | |
|
281 | common = commoninc[0] | |
|
274 | 282 | with ui.uninterruptible(): |
|
275 | common = commoninc[0] | |
|
276 | 283 | if ellipsesremote: |
|
277 | 284 | ds = repo.dirstate |
|
278 | 285 | p1, p2 = ds.p1(), ds.p2() |
|
279 | 286 | with ds.parentchange(): |
|
280 | 287 | ds.setparents(node.nullid, node.nullid) |
|
288 | if isoldellipses: | |
|
281 | 289 | with wrappedextraprepare: |
|
282 | with repo.ui.configoverride(overrides, 'widen'): | |
|
283 | exchange.pull(repo, remote, heads=common) | |
|
284 | with ds.parentchange(): | |
|
285 | ds.setparents(p1, p2) | |
|
290 | exchange.pull(repo, remote, heads=common) | |
|
286 | 291 | else: |
|
292 | known = [] | |
|
293 | if ellipsesremote: | |
|
294 | known = [node.hex(ctx.node()) for ctx in | |
|
295 | repo.set('::%ln', common) | |
|
296 | if ctx.node() != node.nullid] | |
|
287 | 297 | with remote.commandexecutor() as e: |
|
288 | 298 | bundle = e.callcommand('narrow_widen', { |
|
289 | 299 | 'oldincludes': oldincludes, |
@@ -292,15 +302,20 b' def _widen(ui, repo, remote, commoninc, ' | |||
|
292 | 302 | 'newexcludes': newexcludes, |
|
293 | 303 | 'cgversion': '03', |
|
294 | 304 | 'commonheads': common, |
|
295 |
'known': |
|
|
296 |
'ellipses': |
|
|
305 | 'known': known, | |
|
306 | 'ellipses': ellipsesremote, | |
|
297 | 307 | }).result() |
|
298 | 308 | |
|
299 | with repo.transaction('widening') as tr: | |
|
300 |
|
|
|
301 | tgetter = lambda: tr | |
|
302 | bundle2.processbundle(repo, bundle, | |
|
303 | transactiongetter=tgetter) | |
|
309 | trmanager = exchange.transactionmanager(repo, 'widen', remote.url()) | |
|
310 | with trmanager, repo.ui.configoverride(overrides, 'widen'): | |
|
311 | op = bundle2.bundleoperation(repo, trmanager.transaction, | |
|
312 | source='widen') | |
|
313 | # TODO: we should catch error.Abort here | |
|
314 | bundle2.processbundle(repo, bundle, op=op) | |
|
315 | ||
|
316 | if ellipsesremote: | |
|
317 | with ds.parentchange(): | |
|
318 | ds.setparents(p1, p2) | |
|
304 | 319 | |
|
305 | 320 | with repo.transaction('widening'): |
|
306 | 321 | repo.setnewnarrowpats() |
@@ -16,21 +16,21 b' def wrapdirstate(repo, dirstate):' | |||
|
16 | 16 | """Add narrow spec dirstate ignore, block changes outside narrow spec.""" |
|
17 | 17 | |
|
18 | 18 | def _editfunc(fn): |
|
19 | def _wrapper(self, *args): | |
|
19 | def _wrapper(self, *args, **kwargs): | |
|
20 | 20 | narrowmatch = repo.narrowmatch() |
|
21 | 21 | for f in args: |
|
22 | 22 | if f is not None and not narrowmatch(f) and f not in self: |
|
23 | 23 | raise error.Abort(_("cannot track '%s' - it is outside " + |
|
24 | 24 | "the narrow clone") % f) |
|
25 | return fn(self, *args) | |
|
25 | return fn(self, *args, **kwargs) | |
|
26 | 26 | return _wrapper |
|
27 | 27 | |
|
28 | 28 | class narrowdirstate(dirstate.__class__): |
|
29 | 29 | # Prevent adding/editing/copying/deleting files that are outside the |
|
30 | 30 | # sparse checkout |
|
31 | 31 | @_editfunc |
|
32 | def normal(self, *args): | |
|
33 | return super(narrowdirstate, self).normal(*args) | |
|
32 | def normal(self, *args, **kwargs): | |
|
33 | return super(narrowdirstate, self).normal(*args, **kwargs) | |
|
34 | 34 | |
|
35 | 35 | @_editfunc |
|
36 | 36 | def add(self, *args): |
@@ -37,7 +37,7 b' def outsidenarrow(context, mapping):' | |||
|
37 | 37 | repo = context.resource(mapping, 'repo') |
|
38 | 38 | ctx = context.resource(mapping, 'ctx') |
|
39 | 39 | m = repo.narrowmatch() |
|
40 | if not m.always(): | |
|
40 | if ctx.files() and not m.always(): | |
|
41 | 41 | if not any(m(f) for f in ctx.files()): |
|
42 | 42 | return 'outsidenarrow' |
|
43 | 43 | return '' |
@@ -13,12 +13,15 b' from mercurial import (' | |||
|
13 | 13 | extensions, |
|
14 | 14 | hg, |
|
15 | 15 | narrowspec, |
|
16 | node as nodemod, | |
|
16 | 17 | pycompat, |
|
17 | 18 | wireprototypes, |
|
18 | 19 | wireprotov1peer, |
|
19 | 20 | wireprotov1server, |
|
20 | 21 | ) |
|
21 | 22 | |
|
23 | from . import narrowbundle2 | |
|
24 | ||
|
22 | 25 | def uisetup(): |
|
23 | 26 | wireprotov1peer.wirepeer.narrow_widen = peernarrowwiden |
|
24 | 27 | |
@@ -69,21 +72,26 b' def narrow_widen(repo, proto, oldinclude' | |||
|
69 | 72 | narrowspec.validatepatterns(set(newexcludes)) |
|
70 | 73 | |
|
71 | 74 | common = wireprototypes.decodelist(commonheads) |
|
72 | known = None | |
|
73 | if known: | |
|
74 | known = wireprototypes.decodelist(known) | |
|
75 | known = wireprototypes.decodelist(known) | |
|
76 | known = {nodemod.bin(n) for n in known} | |
|
75 | 77 | if ellipses == '0': |
|
76 | 78 | ellipses = False |
|
77 | 79 | else: |
|
78 | 80 | ellipses = bool(ellipses) |
|
79 | 81 | cgversion = cgversion |
|
80 | newmatch = narrowspec.match(repo.root, include=newincludes, | |
|
81 | exclude=newexcludes) | |
|
82 | oldmatch = narrowspec.match(repo.root, include=oldincludes, | |
|
83 | exclude=oldexcludes) | |
|
84 | 82 | |
|
85 |
bundler = bundle2. |
|
|
86 | cgversion, ellipses) | |
|
83 | bundler = bundle2.bundle20(repo.ui) | |
|
84 | if not ellipses: | |
|
85 | newmatch = narrowspec.match(repo.root, include=newincludes, | |
|
86 | exclude=newexcludes) | |
|
87 | oldmatch = narrowspec.match(repo.root, include=oldincludes, | |
|
88 | exclude=oldexcludes) | |
|
89 | bundle2.widen_bundle(bundler, repo, oldmatch, newmatch, common, | |
|
90 | known, cgversion, ellipses) | |
|
91 | else: | |
|
92 | narrowbundle2.generateellipsesbundle2(bundler, repo, oldincludes, | |
|
93 | oldexcludes, newincludes, newexcludes, cgversion, common, | |
|
94 | list(common), known, None) | |
|
87 | 95 | except error.Abort as exc: |
|
88 | 96 | bundler = bundle2.bundle20(repo.ui) |
|
89 | 97 | manargs = [('message', pycompat.bytestr(exc))] |
@@ -65,6 +65,7 b' from mercurial import (' | |||
|
65 | 65 | scmutil, |
|
66 | 66 | smartset, |
|
67 | 67 | tags, |
|
68 | templatefilters, | |
|
68 | 69 | templateutil, |
|
69 | 70 | url as urlmod, |
|
70 | 71 | util, |
@@ -124,8 +125,28 b' colortable = {' | |||
|
124 | 125 | )), |
|
125 | 126 | ] |
|
126 | 127 | |
|
127 | def vcrcommand(name, flags, spec, helpcategory=None): | |
|
128 | def vcrcommand(name, flags, spec, helpcategory=None, optionalrepo=False): | |
|
128 | 129 | fullflags = flags + _VCR_FLAGS |
|
130 | def hgmatcher(r1, r2): | |
|
131 | if r1.uri != r2.uri or r1.method != r2.method: | |
|
132 | return False | |
|
133 | r1params = r1.body.split(b'&') | |
|
134 | r2params = r2.body.split(b'&') | |
|
135 | return set(r1params) == set(r2params) | |
|
136 | ||
|
137 | def sanitiserequest(request): | |
|
138 | request.body = re.sub( | |
|
139 | r'cli-[a-z0-9]+', | |
|
140 | r'cli-hahayouwish', | |
|
141 | request.body | |
|
142 | ) | |
|
143 | return request | |
|
144 | ||
|
145 | def sanitiseresponse(response): | |
|
146 | if r'set-cookie' in response[r'headers']: | |
|
147 | del response[r'headers'][r'set-cookie'] | |
|
148 | return response | |
|
149 | ||
|
129 | 150 | def decorate(fn): |
|
130 | 151 | def inner(*args, **kwargs): |
|
131 | 152 | cassette = pycompat.fsdecode(kwargs.pop(r'test_vcr', None)) |
@@ -136,18 +157,22 b' def vcrcommand(name, flags, spec, helpca' | |||
|
136 | 157 | import vcr.stubs as stubs |
|
137 | 158 | vcr = vcrmod.VCR( |
|
138 | 159 | serializer=r'json', |
|
160 | before_record_request=sanitiserequest, | |
|
161 | before_record_response=sanitiseresponse, | |
|
139 | 162 | custom_patches=[ |
|
140 | 163 | (urlmod, r'httpconnection', |
|
141 | 164 | stubs.VCRHTTPConnection), |
|
142 | 165 | (urlmod, r'httpsconnection', |
|
143 | 166 | stubs.VCRHTTPSConnection), |
|
144 | 167 | ]) |
|
145 | with vcr.use_cassette(cassette): | |
|
168 | vcr.register_matcher(r'hgmatcher', hgmatcher) | |
|
169 | with vcr.use_cassette(cassette, match_on=[r'hgmatcher']): | |
|
146 | 170 | return fn(*args, **kwargs) |
|
147 | 171 | return fn(*args, **kwargs) |
|
148 | 172 | inner.__name__ = fn.__name__ |
|
149 | 173 | inner.__doc__ = fn.__doc__ |
|
150 |
return command(name, fullflags, spec, helpcategory=helpcategory |
|
|
174 | return command(name, fullflags, spec, helpcategory=helpcategory, | |
|
175 | optionalrepo=optionalrepo)(inner) | |
|
151 | 176 | return decorate |
|
152 | 177 | |
|
153 | 178 | def urlencodenested(params): |
@@ -174,24 +199,24 b' def urlencodenested(params):' | |||
|
174 | 199 | process(b'', params) |
|
175 | 200 | return util.urlreq.urlencode(flatparams) |
|
176 | 201 | |
|
177 |
def readurltoken( |
|
|
202 | def readurltoken(ui): | |
|
178 | 203 | """return conduit url, token and make sure they exist |
|
179 | 204 | |
|
180 | 205 | Currently read from [auth] config section. In the future, it might |
|
181 | 206 | make sense to read from .arcconfig and .arcrc as well. |
|
182 | 207 | """ |
|
183 |
url = |
|
|
208 | url = ui.config(b'phabricator', b'url') | |
|
184 | 209 | if not url: |
|
185 | 210 | raise error.Abort(_(b'config %s.%s is required') |
|
186 | 211 | % (b'phabricator', b'url')) |
|
187 | 212 | |
|
188 |
res = httpconnectionmod.readauthforuri( |
|
|
213 | res = httpconnectionmod.readauthforuri(ui, url, util.url(url).user) | |
|
189 | 214 | token = None |
|
190 | 215 | |
|
191 | 216 | if res: |
|
192 | 217 | group, auth = res |
|
193 | 218 | |
|
194 |
|
|
|
219 | ui.debug(b"using auth.%s.* for authentication\n" % group) | |
|
195 | 220 | |
|
196 | 221 | token = auth.get(b'phabtoken') |
|
197 | 222 | |
@@ -201,15 +226,15 b' def readurltoken(repo):' | |||
|
201 | 226 | |
|
202 | 227 | return url, token |
|
203 | 228 | |
|
204 |
def callconduit( |
|
|
229 | def callconduit(ui, name, params): | |
|
205 | 230 | """call Conduit API, params is a dict. return json.loads result, or None""" |
|
206 |
host, token = readurltoken( |
|
|
231 | host, token = readurltoken(ui) | |
|
207 | 232 | url, authinfo = util.url(b'/'.join([host, b'api', name])).authinfo() |
|
208 |
|
|
|
233 | ui.debug(b'Conduit Call: %s %s\n' % (url, pycompat.byterepr(params))) | |
|
209 | 234 | params = params.copy() |
|
210 | 235 | params[b'api.token'] = token |
|
211 | 236 | data = urlencodenested(params) |
|
212 |
curlcmd = |
|
|
237 | curlcmd = ui.config(b'phabricator', b'curlcmd') | |
|
213 | 238 | if curlcmd: |
|
214 | 239 | sin, sout = procutil.popen2(b'%s -d @- %s' |
|
215 | 240 | % (curlcmd, procutil.shellquote(url))) |
@@ -217,11 +242,11 b' def callconduit(repo, name, params):' | |||
|
217 | 242 | sin.close() |
|
218 | 243 | body = sout.read() |
|
219 | 244 | else: |
|
220 |
urlopener = urlmod.opener( |
|
|
245 | urlopener = urlmod.opener(ui, authinfo) | |
|
221 | 246 | request = util.urlreq.request(pycompat.strurl(url), data=data) |
|
222 | 247 | with contextlib.closing(urlopener.open(request)) as rsp: |
|
223 | 248 | body = rsp.read() |
|
224 |
|
|
|
249 | ui.debug(b'Conduit Response: %s\n' % body) | |
|
225 | 250 | parsed = pycompat.rapply( |
|
226 | 251 | lambda x: encoding.unitolocal(x) if isinstance(x, pycompat.unicode) |
|
227 | 252 | else x, |
@@ -233,7 +258,7 b' def callconduit(repo, name, params):' | |||
|
233 | 258 | raise error.Abort(msg) |
|
234 | 259 | return parsed[b'result'] |
|
235 | 260 | |
|
236 | @vcrcommand(b'debugcallconduit', [], _(b'METHOD')) | |
|
261 | @vcrcommand(b'debugcallconduit', [], _(b'METHOD'), optionalrepo=True) | |
|
237 | 262 | def debugcallconduit(ui, repo, name): |
|
238 | 263 | """call Conduit API |
|
239 | 264 | |
@@ -250,7 +275,7 b' def debugcallconduit(ui, repo, name):' | |||
|
250 | 275 | # json.dumps only accepts unicode strings |
|
251 | 276 | result = pycompat.rapply(lambda x: |
|
252 | 277 | encoding.unifromlocal(x) if isinstance(x, bytes) else x, |
|
253 |
callconduit( |
|
|
278 | callconduit(ui, name, params) | |
|
254 | 279 | ) |
|
255 | 280 | s = json.dumps(result, sort_keys=True, indent=2, separators=(u',', u': ')) |
|
256 | 281 | ui.write(b'%s\n' % encoding.unitolocal(s)) |
@@ -264,7 +289,7 b' def getrepophid(repo):' | |||
|
264 | 289 | callsign = repo.ui.config(b'phabricator', b'callsign') |
|
265 | 290 | if not callsign: |
|
266 | 291 | return None |
|
267 | query = callconduit(repo, b'diffusion.repository.search', | |
|
292 | query = callconduit(repo.ui, b'diffusion.repository.search', | |
|
268 | 293 | {b'constraints': {b'callsigns': [callsign]}}) |
|
269 | 294 | if len(query[b'data']) == 0: |
|
270 | 295 | return None |
@@ -320,7 +345,7 b' def getoldnodedrevmap(repo, nodelist):' | |||
|
320 | 345 | # Phabricator, and expect precursors overlap with it. |
|
321 | 346 | if toconfirm: |
|
322 | 347 | drevs = [drev for force, precs, drev in toconfirm.values()] |
|
323 | alldiffs = callconduit(unfi, b'differential.querydiffs', | |
|
348 | alldiffs = callconduit(unfi.ui, b'differential.querydiffs', | |
|
324 | 349 | {b'revisionIDs': drevs}) |
|
325 | 350 | getnode = lambda d: bin( |
|
326 | 351 | getdiffmeta(d).get(b'node', b'')) or None |
@@ -370,7 +395,7 b' def creatediff(ctx):' | |||
|
370 | 395 | params = {b'diff': getdiff(ctx, mdiff.diffopts(git=True, context=32767))} |
|
371 | 396 | if repophid: |
|
372 | 397 | params[b'repositoryPHID'] = repophid |
|
373 | diff = callconduit(repo, b'differential.createrawdiff', params) | |
|
398 | diff = callconduit(repo.ui, b'differential.createrawdiff', params) | |
|
374 | 399 | if not diff: |
|
375 | 400 | raise error.Abort(_(b'cannot create diff for %s') % ctx) |
|
376 | 401 | return diff |
@@ -380,35 +405,39 b' def writediffproperties(ctx, diff):' | |||
|
380 | 405 | params = { |
|
381 | 406 | b'diff_id': diff[b'id'], |
|
382 | 407 | b'name': b'hg:meta', |
|
383 |
b'data': json |
|
|
384 |
|
|
|
385 |
|
|
|
386 | u'node': encoding.unifromlocal(ctx.hex()), | |
|
387 | u'parent': encoding.unifromlocal(ctx.p1().hex()), | |
|
408 | b'data': templatefilters.json({ | |
|
409 | b'user': ctx.user(), | |
|
410 | b'date': b'%d %d' % ctx.date(), | |
|
411 | b'branch': ctx.branch(), | |
|
412 | b'node': ctx.hex(), | |
|
413 | b'parent': ctx.p1().hex(), | |
|
388 | 414 | }), |
|
389 | 415 | } |
|
390 | callconduit(ctx.repo(), b'differential.setdiffproperty', params) | |
|
416 | callconduit(ctx.repo().ui, b'differential.setdiffproperty', params) | |
|
391 | 417 | |
|
392 | 418 | params = { |
|
393 | 419 | b'diff_id': diff[b'id'], |
|
394 | 420 | b'name': b'local:commits', |
|
395 |
b'data': json |
|
|
396 |
|
|
|
397 |
|
|
|
398 |
|
|
|
399 |
|
|
|
400 |
|
|
|
421 | b'data': templatefilters.json({ | |
|
422 | ctx.hex(): { | |
|
423 | b'author': stringutil.person(ctx.user()), | |
|
424 | b'authorEmail': stringutil.email(ctx.user()), | |
|
425 | b'time': int(ctx.date()[0]), | |
|
426 | b'commit': ctx.hex(), | |
|
427 | b'parents': [ctx.p1().hex()], | |
|
428 | b'branch': ctx.branch(), | |
|
401 | 429 | }, |
|
402 | 430 | }), |
|
403 | 431 | } |
|
404 | callconduit(ctx.repo(), b'differential.setdiffproperty', params) | |
|
432 | callconduit(ctx.repo().ui, b'differential.setdiffproperty', params) | |
|
405 | 433 | |
|
406 |
def createdifferentialrevision(ctx, revid=None, parentrevid |
|
|
407 |
olddiff=None, actions=None |
|
|
434 | def createdifferentialrevision(ctx, revid=None, parentrevphid=None, | |
|
435 | oldnode=None, olddiff=None, actions=None, | |
|
436 | comment=None): | |
|
408 | 437 | """create or update a Differential Revision |
|
409 | 438 | |
|
410 | 439 | If revid is None, create a new Differential Revision, otherwise update |
|
411 | revid. If parentrevid is not None, set it as a dependency. | |
|
440 | revid. If parentrevphid is not None, set it as a dependency. | |
|
412 | 441 | |
|
413 | 442 | If oldnode is not None, check if the patch content (without commit message |
|
414 | 443 | and metadata) has changed before creating another diff. |
@@ -427,6 +456,8 b' def createdifferentialrevision(ctx, revi' | |||
|
427 | 456 | if neednewdiff: |
|
428 | 457 | diff = creatediff(ctx) |
|
429 | 458 | transactions.append({b'type': b'update', b'value': diff[b'phid']}) |
|
459 | if comment: | |
|
460 | transactions.append({b'type': b'comment', b'value': comment}) | |
|
430 | 461 | else: |
|
431 | 462 | # Even if we don't need to upload a new diff because the patch content |
|
432 | 463 | # does not change. We might still need to update its metadata so |
@@ -435,21 +466,17 b' def createdifferentialrevision(ctx, revi' | |||
|
435 | 466 | diff = olddiff |
|
436 | 467 | writediffproperties(ctx, diff) |
|
437 | 468 | |
|
438 | # Use a temporary summary to set dependency. There might be better ways but | |
|
439 | # I cannot find them for now. But do not do that if we are updating an | |
|
440 | # existing revision (revid is not None) since that introduces visible | |
|
441 | # churns (someone edited "Summary" twice) on the web page. | |
|
442 | if parentrevid and revid is None: | |
|
443 | summary = b'Depends on D%d' % parentrevid | |
|
444 | transactions += [{b'type': b'summary', b'value': summary}, | |
|
445 | {b'type': b'summary', b'value': b' '}] | |
|
469 | # Set the parent Revision every time, so commit re-ordering is picked-up | |
|
470 | if parentrevphid: | |
|
471 | transactions.append({b'type': b'parents.set', | |
|
472 | b'value': [parentrevphid]}) | |
|
446 | 473 | |
|
447 | 474 | if actions: |
|
448 | 475 | transactions += actions |
|
449 | 476 | |
|
450 | 477 | # Parse commit message and update related fields. |
|
451 | 478 | desc = ctx.description() |
|
452 | info = callconduit(repo, b'differential.parsecommitmessage', | |
|
479 | info = callconduit(repo.ui, b'differential.parsecommitmessage', | |
|
453 | 480 | {b'corpus': desc}) |
|
454 | 481 | for k, v in info[b'fields'].items(): |
|
455 | 482 | if k in [b'title', b'summary', b'testPlan']: |
@@ -460,7 +487,7 b' def createdifferentialrevision(ctx, revi' | |||
|
460 | 487 | # Update an existing Differential Revision |
|
461 | 488 | params[b'objectIdentifier'] = revid |
|
462 | 489 | |
|
463 | revision = callconduit(repo, b'differential.revision.edit', params) | |
|
490 | revision = callconduit(repo.ui, b'differential.revision.edit', params) | |
|
464 | 491 | if not revision: |
|
465 | 492 | raise error.Abort(_(b'cannot create revision for %s') % ctx) |
|
466 | 493 | |
@@ -470,7 +497,7 b' def userphids(repo, names):' | |||
|
470 | 497 | """convert user names to PHIDs""" |
|
471 | 498 | names = [name.lower() for name in names] |
|
472 | 499 | query = {b'constraints': {b'usernames': names}} |
|
473 | result = callconduit(repo, b'user.search', query) | |
|
500 | result = callconduit(repo.ui, b'user.search', query) | |
|
474 | 501 | # username not found is not an error of the API. So check if we have missed |
|
475 | 502 | # some names here. |
|
476 | 503 | data = result[b'data'] |
@@ -485,6 +512,9 b' def userphids(repo, names):' | |||
|
485 | 512 | [(b'r', b'rev', [], _(b'revisions to send'), _(b'REV')), |
|
486 | 513 | (b'', b'amend', True, _(b'update commit messages')), |
|
487 | 514 | (b'', b'reviewer', [], _(b'specify reviewers')), |
|
515 | (b'', b'blocker', [], _(b'specify blocking reviewers')), | |
|
516 | (b'm', b'comment', b'', | |
|
517 | _(b'add a comment to Revisions with new/updated Diffs')), | |
|
488 | 518 | (b'', b'confirm', None, _(b'ask for confirmation before sending'))], |
|
489 | 519 | _(b'REV [OPTIONS]'), |
|
490 | 520 | helpcategory=command.CATEGORY_IMPORT_EXPORT) |
@@ -536,16 +566,23 b' def phabsend(ui, repo, *revs, **opts):' | |||
|
536 | 566 | |
|
537 | 567 | actions = [] |
|
538 | 568 | reviewers = opts.get(b'reviewer', []) |
|
569 | blockers = opts.get(b'blocker', []) | |
|
570 | phids = [] | |
|
539 | 571 | if reviewers: |
|
540 |
phids |
|
|
572 | phids.extend(userphids(repo, reviewers)) | |
|
573 | if blockers: | |
|
574 | phids.extend(map( | |
|
575 | lambda phid: b'blocking(%s)' % phid, userphids(repo, blockers) | |
|
576 | )) | |
|
577 | if phids: | |
|
541 | 578 | actions.append({b'type': b'reviewers.add', b'value': phids}) |
|
542 | 579 | |
|
543 | 580 | drevids = [] # [int] |
|
544 | 581 | diffmap = {} # {newnode: diff} |
|
545 | 582 | |
|
546 | # Send patches one by one so we know their Differential Revision IDs and | |
|
583 | # Send patches one by one so we know their Differential Revision PHIDs and | |
|
547 | 584 | # can provide dependency relationship |
|
548 | lastrevid = None | |
|
585 | lastrevphid = None | |
|
549 | 586 | for rev in revs: |
|
550 | 587 | ui.debug(b'sending rev %d\n' % rev) |
|
551 | 588 | ctx = repo[rev] |
@@ -555,9 +592,11 b' def phabsend(ui, repo, *revs, **opts):' | |||
|
555 | 592 | if oldnode != ctx.node() or opts.get(b'amend'): |
|
556 | 593 | # Create or update Differential Revision |
|
557 | 594 | revision, diff = createdifferentialrevision( |
|
558 |
ctx, revid, lastrevid, oldnode, olddiff, actions |
|
|
595 | ctx, revid, lastrevphid, oldnode, olddiff, actions, | |
|
596 | opts.get(b'comment')) | |
|
559 | 597 | diffmap[ctx.node()] = diff |
|
560 | 598 | newrevid = int(revision[b'object'][b'id']) |
|
599 | newrevphid = revision[b'object'][b'phid'] | |
|
561 | 600 | if revid: |
|
562 | 601 | action = b'updated' |
|
563 | 602 | else: |
@@ -571,8 +610,9 b' def phabsend(ui, repo, *revs, **opts):' | |||
|
571 | 610 | tags.tag(repo, tagname, ctx.node(), message=None, user=None, |
|
572 | 611 | date=None, local=True) |
|
573 | 612 | else: |
|
574 | # Nothing changed. But still set "newrevid" so the next revision | |
|
575 | # could depend on this one. | |
|
613 | # Nothing changed. But still set "newrevphid" so the next revision | |
|
614 | # could depend on this one and "newrevid" for the summary line. | |
|
615 | newrevphid = querydrev(repo, str(revid))[0][b'phid'] | |
|
576 | 616 | newrevid = revid |
|
577 | 617 | action = b'skipped' |
|
578 | 618 | |
@@ -587,12 +627,12 b' def phabsend(ui, repo, *revs, **opts):' | |||
|
587 | 627 | ui.write(_(b'%s - %s - %s: %s\n') % (drevdesc, actiondesc, nodedesc, |
|
588 | 628 | desc)) |
|
589 | 629 | drevids.append(newrevid) |
|
590 | lastrevid = newrevid | |
|
630 | lastrevphid = newrevphid | |
|
591 | 631 | |
|
592 | 632 | # Update commit messages and remove tags |
|
593 | 633 | if opts.get(b'amend'): |
|
594 | 634 | unfi = repo.unfiltered() |
|
595 |
drevs = callconduit( |
|
|
635 | drevs = callconduit(ui, b'differential.query', {b'ids': drevids}) | |
|
596 | 636 | with repo.wlock(), repo.lock(), repo.transaction(b'phabsend'): |
|
597 | 637 | wnode = unfi[b'.'].node() |
|
598 | 638 | mapping = {} # {oldnode: [newnode]} |
@@ -632,10 +672,11 b' def phabsend(ui, repo, *revs, **opts):' | |||
|
632 | 672 | # Map from "hg:meta" keys to header understood by "hg import". The order is |
|
633 | 673 | # consistent with "hg export" output. |
|
634 | 674 | _metanamemap = util.sortdict([(b'user', b'User'), (b'date', b'Date'), |
|
635 |
(b'node', b'Node ID'), |
|
|
675 | (b'branch', b'Branch'), (b'node', b'Node ID'), | |
|
676 | (b'parent', b'Parent ')]) | |
|
636 | 677 | |
|
637 | 678 | def _confirmbeforesend(repo, revs, oldmap): |
|
638 | url, token = readurltoken(repo) | |
|
679 | url, token = readurltoken(repo.ui) | |
|
639 | 680 | ui = repo.ui |
|
640 | 681 | for rev in revs: |
|
641 | 682 | ctx = repo[rev] |
@@ -777,7 +818,7 b' def querydrev(repo, spec):' | |||
|
777 | 818 | key = (params.get(b'ids') or params.get(b'phids') or [None])[0] |
|
778 | 819 | if key in prefetched: |
|
779 | 820 | return prefetched[key] |
|
780 | drevs = callconduit(repo, b'differential.query', params) | |
|
821 | drevs = callconduit(repo.ui, b'differential.query', params) | |
|
781 | 822 | # Fill prefetched with the result |
|
782 | 823 | for drev in drevs: |
|
783 | 824 | prefetched[drev[b'phid']] = drev |
@@ -901,16 +942,31 b' def getdiffmeta(diff):' | |||
|
901 | 942 | """ |
|
902 | 943 | props = diff.get(b'properties') or {} |
|
903 | 944 | meta = props.get(b'hg:meta') |
|
904 | if not meta and props.get(b'local:commits'): | |
|
905 |
|
|
|
906 | meta = { | |
|
907 | b'date': b'%d 0' % commit[b'time'], | |
|
908 | b'node': commit[b'rev'], | |
|
909 |
b'user' |
|
|
910 | } | |
|
911 | if len(commit.get(b'parents', ())) >= 1: | |
|
912 |
meta[b' |
|
|
913 | return meta or {} | |
|
945 | if not meta: | |
|
946 | if props.get(b'local:commits'): | |
|
947 | commit = sorted(props[b'local:commits'].values())[0] | |
|
948 | meta = {} | |
|
949 | if b'author' in commit and b'authorEmail' in commit: | |
|
950 | meta[b'user'] = b'%s <%s>' % (commit[b'author'], | |
|
951 | commit[b'authorEmail']) | |
|
952 | if b'time' in commit: | |
|
953 | meta[b'date'] = b'%d 0' % int(commit[b'time']) | |
|
954 | if b'branch' in commit: | |
|
955 | meta[b'branch'] = commit[b'branch'] | |
|
956 | node = commit.get(b'commit', commit.get(b'rev')) | |
|
957 | if node: | |
|
958 | meta[b'node'] = node | |
|
959 | if len(commit.get(b'parents', ())) >= 1: | |
|
960 | meta[b'parent'] = commit[b'parents'][0] | |
|
961 | else: | |
|
962 | meta = {} | |
|
963 | if b'date' not in meta and b'dateCreated' in diff: | |
|
964 | meta[b'date'] = b'%s 0' % diff[b'dateCreated'] | |
|
965 | if b'branch' not in meta and diff.get(b'branch'): | |
|
966 | meta[b'branch'] = diff[b'branch'] | |
|
967 | if b'parent' not in meta and diff.get(b'sourceControlBaseRevision'): | |
|
968 | meta[b'parent'] = diff[b'sourceControlBaseRevision'] | |
|
969 | return meta | |
|
914 | 970 | |
|
915 | 971 | def readpatch(repo, drevs, write): |
|
916 | 972 | """generate plain-text patch readable by 'hg import' |
@@ -920,14 +976,14 b' def readpatch(repo, drevs, write):' | |||
|
920 | 976 | """ |
|
921 | 977 | # Prefetch hg:meta property for all diffs |
|
922 | 978 | diffids = sorted(set(max(int(v) for v in drev[b'diffs']) for drev in drevs)) |
|
923 | diffs = callconduit(repo, b'differential.querydiffs', {b'ids': diffids}) | |
|
979 | diffs = callconduit(repo.ui, b'differential.querydiffs', {b'ids': diffids}) | |
|
924 | 980 | |
|
925 | 981 | # Generate patch for each drev |
|
926 | 982 | for drev in drevs: |
|
927 | 983 | repo.ui.note(_(b'reading D%s\n') % drev[b'id']) |
|
928 | 984 | |
|
929 | 985 | diffid = max(int(v) for v in drev[b'diffs']) |
|
930 | body = callconduit(repo, b'differential.getrawdiff', | |
|
986 | body = callconduit(repo.ui, b'differential.getrawdiff', | |
|
931 | 987 | {b'diffID': diffid}) |
|
932 | 988 | desc = getdescfromdrev(drev) |
|
933 | 989 | header = b'# HG changeset patch\n' |
@@ -1001,7 +1057,7 b' def phabupdate(ui, repo, spec, **opts):' | |||
|
1001 | 1057 | if actions: |
|
1002 | 1058 | params = {b'objectIdentifier': drev[b'phid'], |
|
1003 | 1059 | b'transactions': actions} |
|
1004 |
callconduit( |
|
|
1060 | callconduit(ui, b'differential.revision.edit', params) | |
|
1005 | 1061 | |
|
1006 | 1062 | templatekeyword = registrar.templatekeyword() |
|
1007 | 1063 |
@@ -108,7 +108,9 b' def _revsetdestrebase(repo, subset, x):' | |||
|
108 | 108 | |
|
109 | 109 | @revsetpredicate('_destautoorphanrebase') |
|
110 | 110 | def _revsetdestautoorphanrebase(repo, subset, x): |
|
111 | """automatic rebase destination for a single orphan revision""" | |
|
111 | # ``_destautoorphanrebase()`` | |
|
112 | ||
|
113 | # automatic rebase destination for a single orphan revision. | |
|
112 | 114 | unfi = repo.unfiltered() |
|
113 | 115 | obsoleted = unfi.revs('obsolete()') |
|
114 | 116 | |
@@ -848,8 +850,9 b' def rebase(ui, repo, **opts):' | |||
|
848 | 850 | singletransaction = True |
|
849 | 851 | |
|
850 | 852 | By default, rebase writes to the working copy, but you can configure it to |
|
851 |
run in-memory for |
|
|
852 | working copy is dirty:: | |
|
853 | run in-memory for better performance. When the rebase is not moving the | |
|
854 | parent(s) of the working copy (AKA the "currently checked out changesets"), | |
|
855 | this may also allow it to run even if the working copy is dirty:: | |
|
853 | 856 | |
|
854 | 857 | [rebase] |
|
855 | 858 | experimental.inmemory = True |
@@ -1819,7 +1822,7 b' def pullrebase(orig, ui, repo, *args, **' | |||
|
1819 | 1822 | ui.debug('--update and --rebase are not compatible, ignoring ' |
|
1820 | 1823 | 'the update flag\n') |
|
1821 | 1824 | |
|
1822 | cmdutil.checkunfinished(repo) | |
|
1825 | cmdutil.checkunfinished(repo, skipmerge=True) | |
|
1823 | 1826 | cmdutil.bailifchanged(repo, hint=_('cannot pull with rebase: ' |
|
1824 | 1827 | 'please commit or shelve your changes first')) |
|
1825 | 1828 | |
@@ -1920,6 +1923,22 b' def _computeobsoletenotrebased(repo, reb' | |||
|
1920 | 1923 | obsoleteextinctsuccessors, |
|
1921 | 1924 | ) |
|
1922 | 1925 | |
|
1926 | def abortrebase(ui, repo): | |
|
1927 | with repo.wlock(), repo.lock(): | |
|
1928 | rbsrt = rebaseruntime(repo, ui) | |
|
1929 | rbsrt._prepareabortorcontinue(isabort=True) | |
|
1930 | ||
|
1931 | def continuerebase(ui, repo): | |
|
1932 | with repo.wlock(), repo.lock(): | |
|
1933 | rbsrt = rebaseruntime(repo, ui) | |
|
1934 | ms = mergemod.mergestate.read(repo) | |
|
1935 | mergeutil.checkunresolved(ms) | |
|
1936 | retcode = rbsrt._prepareabortorcontinue(isabort=False) | |
|
1937 | if retcode is not None: | |
|
1938 | return retcode | |
|
1939 | rbsrt._performrebase(None) | |
|
1940 | rbsrt._finishrebase() | |
|
1941 | ||
|
1923 | 1942 | def summaryhook(ui, repo): |
|
1924 | 1943 | if not repo.vfs.exists('rebasestate'): |
|
1925 | 1944 | return |
@@ -1947,8 +1966,6 b' def uisetup(ui):' | |||
|
1947 | 1966 | entry[1].append(('t', 'tool', '', |
|
1948 | 1967 | _("specify merge tool for rebase"))) |
|
1949 | 1968 | cmdutil.summaryhooks.add('rebase', summaryhook) |
|
1950 | cmdutil.unfinishedstates.append( | |
|
1951 | ['rebasestate', False, False, _('rebase in progress'), | |
|
1952 | _("use 'hg rebase --continue' or 'hg rebase --abort'")]) | |
|
1953 | cmdutil.afterresolvedstates.append( | |
|
1954 | ['rebasestate', _('hg rebase --continue')]) | |
|
1969 | statemod.addunfinished('rebase', fname='rebasestate', stopflag=True, | |
|
1970 | continueflag=True, abortfunc=abortrebase, | |
|
1971 | continuefunc=continuerebase) |
@@ -293,6 +293,35 b' def uisetup(ui):' | |||
|
293 | 293 | # debugdata needs remotefilelog.len to work |
|
294 | 294 | extensions.wrapcommand(commands.table, 'debugdata', debugdatashallow) |
|
295 | 295 | |
|
296 | changegroup.cgpacker = shallowbundle.shallowcg1packer | |
|
297 | ||
|
298 | extensions.wrapfunction(changegroup, '_addchangegroupfiles', | |
|
299 | shallowbundle.addchangegroupfiles) | |
|
300 | extensions.wrapfunction( | |
|
301 | changegroup, 'makechangegroup', shallowbundle.makechangegroup) | |
|
302 | extensions.wrapfunction(localrepo, 'makestore', storewrapper) | |
|
303 | extensions.wrapfunction(exchange, 'pull', exchangepull) | |
|
304 | extensions.wrapfunction(merge, 'applyupdates', applyupdates) | |
|
305 | extensions.wrapfunction(merge, '_checkunknownfiles', checkunknownfiles) | |
|
306 | extensions.wrapfunction(context.workingctx, '_checklookup', checklookup) | |
|
307 | extensions.wrapfunction(scmutil, '_findrenames', findrenames) | |
|
308 | extensions.wrapfunction(copies, '_computeforwardmissing', | |
|
309 | computeforwardmissing) | |
|
310 | extensions.wrapfunction(dispatch, 'runcommand', runcommand) | |
|
311 | extensions.wrapfunction(repair, '_collectbrokencsets', _collectbrokencsets) | |
|
312 | extensions.wrapfunction(context.changectx, 'filectx', filectx) | |
|
313 | extensions.wrapfunction(context.workingctx, 'filectx', workingfilectx) | |
|
314 | extensions.wrapfunction(patch, 'trydiff', trydiff) | |
|
315 | extensions.wrapfunction(hg, 'verify', _verify) | |
|
316 | scmutil.fileprefetchhooks.add('remotefilelog', _fileprefetchhook) | |
|
317 | ||
|
318 | # disappointing hacks below | |
|
319 | extensions.wrapfunction(scmutil, 'getrenamedfn', getrenamedfn) | |
|
320 | extensions.wrapfunction(revset, 'filelog', filelogrevset) | |
|
321 | revset.symbols['filelog'] = revset.filelog | |
|
322 | extensions.wrapfunction(cmdutil, 'walkfilerevs', walkfilerevs) | |
|
323 | ||
|
324 | ||
|
296 | 325 | def cloneshallow(orig, ui, repo, *args, **opts): |
|
297 | 326 | if opts.get(r'shallow'): |
|
298 | 327 | repos = [] |
@@ -405,6 +434,158 b' def setupclient(ui, repo):' | |||
|
405 | 434 | shallowrepo.wraprepo(repo) |
|
406 | 435 | repo.store = shallowstore.wrapstore(repo.store) |
|
407 | 436 | |
|
437 | def storewrapper(orig, requirements, path, vfstype): | |
|
438 | s = orig(requirements, path, vfstype) | |
|
439 | if constants.SHALLOWREPO_REQUIREMENT in requirements: | |
|
440 | s = shallowstore.wrapstore(s) | |
|
441 | ||
|
442 | return s | |
|
443 | ||
|
444 | # prefetch files before update | |
|
445 | def applyupdates(orig, repo, actions, wctx, mctx, overwrite, wantfiledata, | |
|
446 | labels=None): | |
|
447 | if isenabled(repo): | |
|
448 | manifest = mctx.manifest() | |
|
449 | files = [] | |
|
450 | for f, args, msg in actions['g']: | |
|
451 | files.append((f, hex(manifest[f]))) | |
|
452 | # batch fetch the needed files from the server | |
|
453 | repo.fileservice.prefetch(files) | |
|
454 | return orig(repo, actions, wctx, mctx, overwrite, wantfiledata, | |
|
455 | labels=labels) | |
|
456 | ||
|
457 | # Prefetch merge checkunknownfiles | |
|
458 | def checkunknownfiles(orig, repo, wctx, mctx, force, actions, | |
|
459 | *args, **kwargs): | |
|
460 | if isenabled(repo): | |
|
461 | files = [] | |
|
462 | sparsematch = repo.maybesparsematch(mctx.rev()) | |
|
463 | for f, (m, actionargs, msg) in actions.iteritems(): | |
|
464 | if sparsematch and not sparsematch(f): | |
|
465 | continue | |
|
466 | if m in ('c', 'dc', 'cm'): | |
|
467 | files.append((f, hex(mctx.filenode(f)))) | |
|
468 | elif m == 'dg': | |
|
469 | f2 = actionargs[0] | |
|
470 | files.append((f2, hex(mctx.filenode(f2)))) | |
|
471 | # batch fetch the needed files from the server | |
|
472 | repo.fileservice.prefetch(files) | |
|
473 | return orig(repo, wctx, mctx, force, actions, *args, **kwargs) | |
|
474 | ||
|
475 | # Prefetch files before status attempts to look at their size and contents | |
|
476 | def checklookup(orig, self, files): | |
|
477 | repo = self._repo | |
|
478 | if isenabled(repo): | |
|
479 | prefetchfiles = [] | |
|
480 | for parent in self._parents: | |
|
481 | for f in files: | |
|
482 | if f in parent: | |
|
483 | prefetchfiles.append((f, hex(parent.filenode(f)))) | |
|
484 | # batch fetch the needed files from the server | |
|
485 | repo.fileservice.prefetch(prefetchfiles) | |
|
486 | return orig(self, files) | |
|
487 | ||
|
488 | # Prefetch the logic that compares added and removed files for renames | |
|
489 | def findrenames(orig, repo, matcher, added, removed, *args, **kwargs): | |
|
490 | if isenabled(repo): | |
|
491 | files = [] | |
|
492 | pmf = repo['.'].manifest() | |
|
493 | for f in removed: | |
|
494 | if f in pmf: | |
|
495 | files.append((f, hex(pmf[f]))) | |
|
496 | # batch fetch the needed files from the server | |
|
497 | repo.fileservice.prefetch(files) | |
|
498 | return orig(repo, matcher, added, removed, *args, **kwargs) | |
|
499 | ||
|
500 | # prefetch files before pathcopies check | |
|
501 | def computeforwardmissing(orig, a, b, match=None): | |
|
502 | missing = orig(a, b, match=match) | |
|
503 | repo = a._repo | |
|
504 | if isenabled(repo): | |
|
505 | mb = b.manifest() | |
|
506 | ||
|
507 | files = [] | |
|
508 | sparsematch = repo.maybesparsematch(b.rev()) | |
|
509 | if sparsematch: | |
|
510 | sparsemissing = set() | |
|
511 | for f in missing: | |
|
512 | if sparsematch(f): | |
|
513 | files.append((f, hex(mb[f]))) | |
|
514 | sparsemissing.add(f) | |
|
515 | missing = sparsemissing | |
|
516 | ||
|
517 | # batch fetch the needed files from the server | |
|
518 | repo.fileservice.prefetch(files) | |
|
519 | return missing | |
|
520 | ||
|
521 | # close cache miss server connection after the command has finished | |
|
522 | def runcommand(orig, lui, repo, *args, **kwargs): | |
|
523 | fileservice = None | |
|
524 | # repo can be None when running in chg: | |
|
525 | # - at startup, reposetup was called because serve is not norepo | |
|
526 | # - a norepo command like "help" is called | |
|
527 | if repo and isenabled(repo): | |
|
528 | fileservice = repo.fileservice | |
|
529 | try: | |
|
530 | return orig(lui, repo, *args, **kwargs) | |
|
531 | finally: | |
|
532 | if fileservice: | |
|
533 | fileservice.close() | |
|
534 | ||
|
535 | # prevent strip from stripping remotefilelogs | |
|
536 | def _collectbrokencsets(orig, repo, files, striprev): | |
|
537 | if isenabled(repo): | |
|
538 | files = list([f for f in files if not repo.shallowmatch(f)]) | |
|
539 | return orig(repo, files, striprev) | |
|
540 | ||
|
541 | # changectx wrappers | |
|
542 | def filectx(orig, self, path, fileid=None, filelog=None): | |
|
543 | if fileid is None: | |
|
544 | fileid = self.filenode(path) | |
|
545 | if (isenabled(self._repo) and self._repo.shallowmatch(path)): | |
|
546 | return remotefilectx.remotefilectx(self._repo, path, fileid=fileid, | |
|
547 | changectx=self, filelog=filelog) | |
|
548 | return orig(self, path, fileid=fileid, filelog=filelog) | |
|
549 | ||
|
550 | def workingfilectx(orig, self, path, filelog=None): | |
|
551 | if (isenabled(self._repo) and self._repo.shallowmatch(path)): | |
|
552 | return remotefilectx.remoteworkingfilectx(self._repo, path, | |
|
553 | workingctx=self, | |
|
554 | filelog=filelog) | |
|
555 | return orig(self, path, filelog=filelog) | |
|
556 | ||
|
557 | # prefetch required revisions before a diff | |
|
558 | def trydiff(orig, repo, revs, ctx1, ctx2, modified, added, removed, | |
|
559 | copy, getfilectx, *args, **kwargs): | |
|
560 | if isenabled(repo): | |
|
561 | prefetch = [] | |
|
562 | mf1 = ctx1.manifest() | |
|
563 | for fname in modified + added + removed: | |
|
564 | if fname in mf1: | |
|
565 | fnode = getfilectx(fname, ctx1).filenode() | |
|
566 | # fnode can be None if it's a edited working ctx file | |
|
567 | if fnode: | |
|
568 | prefetch.append((fname, hex(fnode))) | |
|
569 | if fname not in removed: | |
|
570 | fnode = getfilectx(fname, ctx2).filenode() | |
|
571 | if fnode: | |
|
572 | prefetch.append((fname, hex(fnode))) | |
|
573 | ||
|
574 | repo.fileservice.prefetch(prefetch) | |
|
575 | ||
|
576 | return orig(repo, revs, ctx1, ctx2, modified, added, removed, copy, | |
|
577 | getfilectx, *args, **kwargs) | |
|
578 | ||
|
579 | # Prevent verify from processing files | |
|
580 | # a stub for mercurial.hg.verify() | |
|
581 | def _verify(orig, repo, level=None): | |
|
582 | lock = repo.lock() | |
|
583 | try: | |
|
584 | return shallowverifier.shallowverifier(repo).verify() | |
|
585 | finally: | |
|
586 | lock.release() | |
|
587 | ||
|
588 | ||
|
408 | 589 | clientonetime = False |
|
409 | 590 | def onetimeclientsetup(ui): |
|
410 | 591 | global clientonetime |
@@ -412,163 +593,6 b' def onetimeclientsetup(ui):' | |||
|
412 | 593 | return |
|
413 | 594 | clientonetime = True |
|
414 | 595 | |
|
415 | changegroup.cgpacker = shallowbundle.shallowcg1packer | |
|
416 | ||
|
417 | extensions.wrapfunction(changegroup, '_addchangegroupfiles', | |
|
418 | shallowbundle.addchangegroupfiles) | |
|
419 | extensions.wrapfunction( | |
|
420 | changegroup, 'makechangegroup', shallowbundle.makechangegroup) | |
|
421 | ||
|
422 | def storewrapper(orig, requirements, path, vfstype): | |
|
423 | s = orig(requirements, path, vfstype) | |
|
424 | if constants.SHALLOWREPO_REQUIREMENT in requirements: | |
|
425 | s = shallowstore.wrapstore(s) | |
|
426 | ||
|
427 | return s | |
|
428 | extensions.wrapfunction(localrepo, 'makestore', storewrapper) | |
|
429 | ||
|
430 | extensions.wrapfunction(exchange, 'pull', exchangepull) | |
|
431 | ||
|
432 | # prefetch files before update | |
|
433 | def applyupdates(orig, repo, actions, wctx, mctx, overwrite, labels=None): | |
|
434 | if isenabled(repo): | |
|
435 | manifest = mctx.manifest() | |
|
436 | files = [] | |
|
437 | for f, args, msg in actions['g']: | |
|
438 | files.append((f, hex(manifest[f]))) | |
|
439 | # batch fetch the needed files from the server | |
|
440 | repo.fileservice.prefetch(files) | |
|
441 | return orig(repo, actions, wctx, mctx, overwrite, labels=labels) | |
|
442 | extensions.wrapfunction(merge, 'applyupdates', applyupdates) | |
|
443 | ||
|
444 | # Prefetch merge checkunknownfiles | |
|
445 | def checkunknownfiles(orig, repo, wctx, mctx, force, actions, | |
|
446 | *args, **kwargs): | |
|
447 | if isenabled(repo): | |
|
448 | files = [] | |
|
449 | sparsematch = repo.maybesparsematch(mctx.rev()) | |
|
450 | for f, (m, actionargs, msg) in actions.iteritems(): | |
|
451 | if sparsematch and not sparsematch(f): | |
|
452 | continue | |
|
453 | if m in ('c', 'dc', 'cm'): | |
|
454 | files.append((f, hex(mctx.filenode(f)))) | |
|
455 | elif m == 'dg': | |
|
456 | f2 = actionargs[0] | |
|
457 | files.append((f2, hex(mctx.filenode(f2)))) | |
|
458 | # batch fetch the needed files from the server | |
|
459 | repo.fileservice.prefetch(files) | |
|
460 | return orig(repo, wctx, mctx, force, actions, *args, **kwargs) | |
|
461 | extensions.wrapfunction(merge, '_checkunknownfiles', checkunknownfiles) | |
|
462 | ||
|
463 | # Prefetch files before status attempts to look at their size and contents | |
|
464 | def checklookup(orig, self, files): | |
|
465 | repo = self._repo | |
|
466 | if isenabled(repo): | |
|
467 | prefetchfiles = [] | |
|
468 | for parent in self._parents: | |
|
469 | for f in files: | |
|
470 | if f in parent: | |
|
471 | prefetchfiles.append((f, hex(parent.filenode(f)))) | |
|
472 | # batch fetch the needed files from the server | |
|
473 | repo.fileservice.prefetch(prefetchfiles) | |
|
474 | return orig(self, files) | |
|
475 | extensions.wrapfunction(context.workingctx, '_checklookup', checklookup) | |
|
476 | ||
|
477 | # Prefetch the logic that compares added and removed files for renames | |
|
478 | def findrenames(orig, repo, matcher, added, removed, *args, **kwargs): | |
|
479 | if isenabled(repo): | |
|
480 | files = [] | |
|
481 | pmf = repo['.'].manifest() | |
|
482 | for f in removed: | |
|
483 | if f in pmf: | |
|
484 | files.append((f, hex(pmf[f]))) | |
|
485 | # batch fetch the needed files from the server | |
|
486 | repo.fileservice.prefetch(files) | |
|
487 | return orig(repo, matcher, added, removed, *args, **kwargs) | |
|
488 | extensions.wrapfunction(scmutil, '_findrenames', findrenames) | |
|
489 | ||
|
490 | # prefetch files before mergecopies check | |
|
491 | def computenonoverlap(orig, repo, c1, c2, *args, **kwargs): | |
|
492 | u1, u2 = orig(repo, c1, c2, *args, **kwargs) | |
|
493 | if isenabled(repo): | |
|
494 | m1 = c1.manifest() | |
|
495 | m2 = c2.manifest() | |
|
496 | files = [] | |
|
497 | ||
|
498 | sparsematch1 = repo.maybesparsematch(c1.rev()) | |
|
499 | if sparsematch1: | |
|
500 | sparseu1 = set() | |
|
501 | for f in u1: | |
|
502 | if sparsematch1(f): | |
|
503 | files.append((f, hex(m1[f]))) | |
|
504 | sparseu1.add(f) | |
|
505 | u1 = sparseu1 | |
|
506 | ||
|
507 | sparsematch2 = repo.maybesparsematch(c2.rev()) | |
|
508 | if sparsematch2: | |
|
509 | sparseu2 = set() | |
|
510 | for f in u2: | |
|
511 | if sparsematch2(f): | |
|
512 | files.append((f, hex(m2[f]))) | |
|
513 | sparseu2.add(f) | |
|
514 | u2 = sparseu2 | |
|
515 | ||
|
516 | # batch fetch the needed files from the server | |
|
517 | repo.fileservice.prefetch(files) | |
|
518 | return u1, u2 | |
|
519 | extensions.wrapfunction(copies, '_computenonoverlap', computenonoverlap) | |
|
520 | ||
|
521 | # prefetch files before pathcopies check | |
|
522 | def computeforwardmissing(orig, a, b, match=None): | |
|
523 | missing = orig(a, b, match=match) | |
|
524 | repo = a._repo | |
|
525 | if isenabled(repo): | |
|
526 | mb = b.manifest() | |
|
527 | ||
|
528 | files = [] | |
|
529 | sparsematch = repo.maybesparsematch(b.rev()) | |
|
530 | if sparsematch: | |
|
531 | sparsemissing = set() | |
|
532 | for f in missing: | |
|
533 | if sparsematch(f): | |
|
534 | files.append((f, hex(mb[f]))) | |
|
535 | sparsemissing.add(f) | |
|
536 | missing = sparsemissing | |
|
537 | ||
|
538 | # batch fetch the needed files from the server | |
|
539 | repo.fileservice.prefetch(files) | |
|
540 | return missing | |
|
541 | extensions.wrapfunction(copies, '_computeforwardmissing', | |
|
542 | computeforwardmissing) | |
|
543 | ||
|
544 | # close cache miss server connection after the command has finished | |
|
545 | def runcommand(orig, lui, repo, *args, **kwargs): | |
|
546 | fileservice = None | |
|
547 | # repo can be None when running in chg: | |
|
548 | # - at startup, reposetup was called because serve is not norepo | |
|
549 | # - a norepo command like "help" is called | |
|
550 | if repo and isenabled(repo): | |
|
551 | fileservice = repo.fileservice | |
|
552 | try: | |
|
553 | return orig(lui, repo, *args, **kwargs) | |
|
554 | finally: | |
|
555 | if fileservice: | |
|
556 | fileservice.close() | |
|
557 | extensions.wrapfunction(dispatch, 'runcommand', runcommand) | |
|
558 | ||
|
559 | # disappointing hacks below | |
|
560 | scmutil.getrenamedfn = getrenamedfn | |
|
561 | extensions.wrapfunction(revset, 'filelog', filelogrevset) | |
|
562 | revset.symbols['filelog'] = revset.filelog | |
|
563 | extensions.wrapfunction(cmdutil, 'walkfilerevs', walkfilerevs) | |
|
564 | ||
|
565 | # prevent strip from stripping remotefilelogs | |
|
566 | def _collectbrokencsets(orig, repo, files, striprev): | |
|
567 | if isenabled(repo): | |
|
568 | files = list([f for f in files if not repo.shallowmatch(f)]) | |
|
569 | return orig(repo, files, striprev) | |
|
570 | extensions.wrapfunction(repair, '_collectbrokencsets', _collectbrokencsets) | |
|
571 | ||
|
572 | 596 | # Don't commit filelogs until we know the commit hash, since the hash |
|
573 | 597 | # is present in the filelog blob. |
|
574 | 598 | # This violates Mercurial's filelog->manifest->changelog write order, |
@@ -611,60 +635,10 b' def onetimeclientsetup(ui):' | |||
|
611 | 635 | return node |
|
612 | 636 | extensions.wrapfunction(changelog.changelog, 'add', changelogadd) |
|
613 | 637 | |
|
614 | # changectx wrappers | |
|
615 | def filectx(orig, self, path, fileid=None, filelog=None): | |
|
616 | if fileid is None: | |
|
617 | fileid = self.filenode(path) | |
|
618 | if (isenabled(self._repo) and self._repo.shallowmatch(path)): | |
|
619 | return remotefilectx.remotefilectx(self._repo, path, | |
|
620 | fileid=fileid, changectx=self, filelog=filelog) | |
|
621 | return orig(self, path, fileid=fileid, filelog=filelog) | |
|
622 | extensions.wrapfunction(context.changectx, 'filectx', filectx) | |
|
623 | ||
|
624 | def workingfilectx(orig, self, path, filelog=None): | |
|
625 | if (isenabled(self._repo) and self._repo.shallowmatch(path)): | |
|
626 | return remotefilectx.remoteworkingfilectx(self._repo, | |
|
627 | path, workingctx=self, filelog=filelog) | |
|
628 | return orig(self, path, filelog=filelog) | |
|
629 | extensions.wrapfunction(context.workingctx, 'filectx', workingfilectx) | |
|
638 | def getrenamedfn(orig, repo, endrev=None): | |
|
639 | if not isenabled(repo) or copies.usechangesetcentricalgo(repo): | |
|
640 | return orig(repo, endrev) | |
|
630 | 641 | |
|
631 | # prefetch required revisions before a diff | |
|
632 | def trydiff(orig, repo, revs, ctx1, ctx2, modified, added, removed, | |
|
633 | copy, getfilectx, *args, **kwargs): | |
|
634 | if isenabled(repo): | |
|
635 | prefetch = [] | |
|
636 | mf1 = ctx1.manifest() | |
|
637 | for fname in modified + added + removed: | |
|
638 | if fname in mf1: | |
|
639 | fnode = getfilectx(fname, ctx1).filenode() | |
|
640 | # fnode can be None if it's a edited working ctx file | |
|
641 | if fnode: | |
|
642 | prefetch.append((fname, hex(fnode))) | |
|
643 | if fname not in removed: | |
|
644 | fnode = getfilectx(fname, ctx2).filenode() | |
|
645 | if fnode: | |
|
646 | prefetch.append((fname, hex(fnode))) | |
|
647 | ||
|
648 | repo.fileservice.prefetch(prefetch) | |
|
649 | ||
|
650 | return orig(repo, revs, ctx1, ctx2, modified, added, removed, | |
|
651 | copy, getfilectx, *args, **kwargs) | |
|
652 | extensions.wrapfunction(patch, 'trydiff', trydiff) | |
|
653 | ||
|
654 | # Prevent verify from processing files | |
|
655 | # a stub for mercurial.hg.verify() | |
|
656 | def _verify(orig, repo): | |
|
657 | lock = repo.lock() | |
|
658 | try: | |
|
659 | return shallowverifier.shallowverifier(repo).verify() | |
|
660 | finally: | |
|
661 | lock.release() | |
|
662 | ||
|
663 | extensions.wrapfunction(hg, 'verify', _verify) | |
|
664 | ||
|
665 | scmutil.fileprefetchhooks.add('remotefilelog', _fileprefetchhook) | |
|
666 | ||
|
667 | def getrenamedfn(repo, endrev=None): | |
|
668 | 642 | rcache = {} |
|
669 | 643 | |
|
670 | 644 | def getrenamed(fn, rev): |
@@ -1019,9 +993,6 b' def _fileprefetchhook(repo, revs, match)' | |||
|
1019 | 993 | mf = ctx.manifest() |
|
1020 | 994 | sparsematch = repo.maybesparsematch(ctx.rev()) |
|
1021 | 995 | for path in ctx.walk(match): |
|
1022 | if path.endswith('/'): | |
|
1023 | # Tree manifest that's being excluded as part of narrow | |
|
1024 | continue | |
|
1025 | 996 | if (not sparsematch or sparsematch(path)) and path in mf: |
|
1026 | 997 | allfiles.append((path, hex(mf[path]))) |
|
1027 | 998 | repo.fileservice.prefetch(allfiles) |
@@ -396,6 +396,9 b' class fileserverclient(object):' | |||
|
396 | 396 | batchdefault = 10 |
|
397 | 397 | batchsize = self.ui.configint( |
|
398 | 398 | 'remotefilelog', 'batchsize', batchdefault) |
|
399 | self.ui.debug( | |
|
400 | b'requesting %d files from ' | |
|
401 | b'remotefilelog server...\n' % len(missed)) | |
|
399 | 402 | _getfilesbatch( |
|
400 | 403 | remote, self.receivemissing, progress.increment, |
|
401 | 404 | missed, idmap, batchsize) |
@@ -43,7 +43,8 b' def backgroundrepack(repo, incremental=T' | |||
|
43 | 43 | if packsonly: |
|
44 | 44 | cmd.append('--packsonly') |
|
45 | 45 | repo.ui.warn(msg) |
|
46 | procutil.runbgcommand(cmd, encoding.environ) | |
|
46 | # We know this command will find a binary, so don't block on it starting. | |
|
47 | procutil.runbgcommand(cmd, encoding.environ, ensurestart=False) | |
|
47 | 48 | |
|
48 | 49 | def fullrepack(repo, options=None): |
|
49 | 50 | """If ``packsonly`` is True, stores creating only loose objects are skipped. |
@@ -33,13 +33,6 b' from . import (' | |||
|
33 | 33 | shallowutil, |
|
34 | 34 | ) |
|
35 | 35 | |
|
36 | if util.safehasattr(util, '_hgexecutable'): | |
|
37 | # Before 5be286db | |
|
38 | _hgexecutable = util.hgexecutable | |
|
39 | else: | |
|
40 | from mercurial.utils import procutil | |
|
41 | _hgexecutable = procutil.hgexecutable | |
|
42 | ||
|
43 | 36 | # These make*stores functions are global so that other extensions can replace |
|
44 | 37 | # them. |
|
45 | 38 | def makelocalstores(repo): |
@@ -168,7 +161,7 b' def wraprepo(repo):' | |||
|
168 | 161 | **kwargs) |
|
169 | 162 | |
|
170 | 163 | @localrepo.unfilteredmethod |
|
171 | def commitctx(self, ctx, error=False): | |
|
164 | def commitctx(self, ctx, error=False, origctx=None): | |
|
172 | 165 | """Add a new revision to current repository. |
|
173 | 166 | Revision information is passed via the context argument. |
|
174 | 167 | """ |
@@ -186,18 +179,21 b' def wraprepo(repo):' | |||
|
186 | 179 | files.append((f, hex(fparent1))) |
|
187 | 180 | self.fileservice.prefetch(files) |
|
188 | 181 | return super(shallowrepository, self).commitctx(ctx, |
|
189 |
error=error |
|
|
182 | error=error, | |
|
183 | origctx=origctx) | |
|
190 | 184 | |
|
191 | 185 | def backgroundprefetch(self, revs, base=None, repack=False, pats=None, |
|
192 | 186 | opts=None): |
|
193 | 187 | """Runs prefetch in background with optional repack |
|
194 | 188 | """ |
|
195 |
cmd = [ |
|
|
189 | cmd = [procutil.hgexecutable(), '-R', repo.origroot, 'prefetch'] | |
|
196 | 190 | if repack: |
|
197 | 191 | cmd.append('--repack') |
|
198 | 192 | if revs: |
|
199 | 193 | cmd += ['-r', revs] |
|
200 | procutil.runbgcommand(cmd, encoding.environ) | |
|
194 | # We know this command will find a binary, so don't block | |
|
195 | # on it starting. | |
|
196 | procutil.runbgcommand(cmd, encoding.environ, ensurestart=False) | |
|
201 | 197 | |
|
202 | 198 | def prefetch(self, revs, base=None, pats=None, opts=None): |
|
203 | 199 | """Prefetches all the necessary file revisions for the given revs |
@@ -167,6 +167,8 b' class lazyremotenamedict(mutablemapping)' | |||
|
167 | 167 | for k, vtup in self.potentialentries.iteritems(): |
|
168 | 168 | yield (k, [bin(vtup[0])]) |
|
169 | 169 | |
|
170 | items = iteritems | |
|
171 | ||
|
170 | 172 | class remotenames(object): |
|
171 | 173 | """ |
|
172 | 174 | This class encapsulates all the remotenames state. It also contains |
@@ -125,6 +125,10 b' def extsetup(ui):' | |||
|
125 | 125 | |
|
126 | 126 | def _hassharedbookmarks(repo): |
|
127 | 127 | """Returns whether this repo has shared bookmarks""" |
|
128 | if bookmarks.bookmarksinstore(repo): | |
|
129 | # Kind of a lie, but it means that we skip our custom reads and writes | |
|
130 | # from/to the source repo. | |
|
131 | return False | |
|
128 | 132 | try: |
|
129 | 133 | shared = repo.vfs.read('shared').splitlines() |
|
130 | 134 | except IOError as inst: |
@@ -460,8 +460,8 b' def _updatedocstring():' | |||
|
460 | 460 | longest = max(map(len, showview._table.keys())) |
|
461 | 461 | entries = [] |
|
462 | 462 | for key in sorted(showview._table.keys()): |
|
463 |
entries.append( |
|
|
464 |
key.ljust(longest), showview._table[key]._origdoc)) |
|
|
463 | entries.append(r' %s %s' % ( | |
|
464 | pycompat.sysstr(key.ljust(longest)), showview._table[key]._origdoc)) | |
|
465 | 465 | |
|
466 | 466 | cmdtable['show'][0].__doc__ = pycompat.sysstr('%s\n\n%s\n ') % ( |
|
467 | 467 | cmdtable['show'][0].__doc__.rstrip(), |
@@ -228,7 +228,7 b' def _setupdirstate(ui):' | |||
|
228 | 228 | hint = _('include file with `hg debugsparse --include <pattern>` or use ' + |
|
229 | 229 | '`hg add -s <file>` to include file directory while adding') |
|
230 | 230 | for func in editfuncs: |
|
231 | def _wrapper(orig, self, *args): | |
|
231 | def _wrapper(orig, self, *args, **kwargs): | |
|
232 | 232 | sparsematch = self._sparsematcher |
|
233 | 233 | if not sparsematch.always(): |
|
234 | 234 | for f in args: |
@@ -237,7 +237,7 b' def _setupdirstate(ui):' | |||
|
237 | 237 | raise error.Abort(_("cannot add '%s' - it is outside " |
|
238 | 238 | "the sparse checkout") % f, |
|
239 | 239 | hint=hint) |
|
240 | return orig(self, *args) | |
|
240 | return orig(self, *args, **kwargs) | |
|
241 | 241 | extensions.wrapfunction(dirstate.dirstate, func, _wrapper) |
|
242 | 242 | |
|
243 | 243 | @command('debugsparse', [ |
@@ -31,31 +31,13 b' command = registrar.command(cmdtable)' | |||
|
31 | 31 | # leave the attribute unspecified. |
|
32 | 32 | testedwith = 'ships-with-hg-core' |
|
33 | 33 | |
|
34 | def checksubstate(repo, baserev=None): | |
|
35 | '''return list of subrepos at a different revision than substate. | |
|
36 | Abort if any subrepos have uncommitted changes.''' | |
|
37 | inclsubs = [] | |
|
38 | wctx = repo[None] | |
|
39 | if baserev: | |
|
40 | bctx = repo[baserev] | |
|
41 | else: | |
|
42 | bctx = wctx.p1() | |
|
43 | for s in sorted(wctx.substate): | |
|
44 | wctx.sub(s).bailifchanged(True) | |
|
45 | if s not in bctx.substate or bctx.sub(s).dirty(): | |
|
46 | inclsubs.append(s) | |
|
47 | return inclsubs | |
|
48 | ||
|
49 | def checklocalchanges(repo, force=False, excsuffix=''): | |
|
50 | cmdutil.checkunfinished(repo) | |
|
34 | def checklocalchanges(repo, force=False): | |
|
51 | 35 | s = repo.status() |
|
52 | 36 | if not force: |
|
53 | if s.modified or s.added or s.removed or s.deleted: | |
|
54 | _("local changes found") # i18n tool detection | |
|
55 | raise error.Abort(_("local changes found" + excsuffix)) | |
|
56 | if checksubstate(repo): | |
|
57 | _("local changed subrepos found") # i18n tool detection | |
|
58 | raise error.Abort(_("local changed subrepos found" + excsuffix)) | |
|
37 | cmdutil.checkunfinished(repo) | |
|
38 | cmdutil.bailifchanged(repo) | |
|
39 | else: | |
|
40 | cmdutil.checkunfinished(repo, skipmerge=True) | |
|
59 | 41 | return s |
|
60 | 42 | |
|
61 | 43 | def _findupdatetarget(repo, nodes): |
@@ -35,6 +35,7 b' from mercurial import (' | |||
|
35 | 35 | revset, |
|
36 | 36 | scmutil, |
|
37 | 37 | smartset, |
|
38 | state as statemod, | |
|
38 | 39 | util, |
|
39 | 40 | vfs as vfsmod, |
|
40 | 41 | ) |
@@ -757,9 +758,12 b' def kwtransplanted(context, mapping):' | |||
|
757 | 758 | return n and nodemod.hex(n) or '' |
|
758 | 759 | |
|
759 | 760 | def extsetup(ui): |
|
760 | cmdutil.unfinishedstates.append( | |
|
761 | ['transplant/journal', True, False, _('transplant in progress'), | |
|
762 | _("use 'hg transplant --continue' or 'hg update' to abort")]) | |
|
761 | statemod.addunfinished ( | |
|
762 | 'transplant', fname='transplant/journal', clearable=True, | |
|
763 | statushint=_('To continue: hg transplant --continue\n' | |
|
764 | 'To abort: hg update'), | |
|
765 | cmdhint=_("use 'hg transplant --continue' or 'hg update' to abort") | |
|
766 | ) | |
|
763 | 767 | |
|
764 | 768 | # tell hggettext to extract docstrings from these functions: |
|
765 | 769 | i18nfunctions = [revsettransplanted, kwtransplanted] |
@@ -89,6 +89,8 b' import threading' | |||
|
89 | 89 | import time |
|
90 | 90 | import traceback |
|
91 | 91 | |
|
92 | from mercurial import pycompat | |
|
93 | ||
|
92 | 94 | __all__ = ["Zeroconf", "ServiceInfo", "ServiceBrowser"] |
|
93 | 95 | |
|
94 | 96 | # hook for threads |
@@ -270,6 +272,8 b' class DNSQuestion(DNSEntry):' | |||
|
270 | 272 | """A DNS question entry""" |
|
271 | 273 | |
|
272 | 274 | def __init__(self, name, type, clazz): |
|
275 | if pycompat.ispy3 and isinstance(name, str): | |
|
276 | name = name.encode('ascii') | |
|
273 | 277 | if not name.endswith(".local."): |
|
274 | 278 | raise NonLocalNameException(name) |
|
275 | 279 | DNSEntry.__init__(self, name, type, clazz) |
@@ -535,7 +539,7 b' class DNSIncoming(object):' | |||
|
535 | 539 | |
|
536 | 540 | def readString(self, len): |
|
537 | 541 | """Reads a string of a given length from the packet""" |
|
538 |
format = '!' |
|
|
542 | format = '!%ds' % len | |
|
539 | 543 | length = struct.calcsize(format) |
|
540 | 544 | info = struct.unpack(format, |
|
541 | 545 | self.data[self.offset:self.offset + length]) |
@@ -613,7 +617,7 b' class DNSIncoming(object):' | |||
|
613 | 617 | |
|
614 | 618 | def readName(self): |
|
615 | 619 | """Reads a domain name from the packet""" |
|
616 | result = '' | |
|
620 | result = r'' | |
|
617 | 621 | off = self.offset |
|
618 | 622 | next = -1 |
|
619 | 623 | first = off |
@@ -625,7 +629,7 b' class DNSIncoming(object):' | |||
|
625 | 629 | break |
|
626 | 630 | t = len & 0xC0 |
|
627 | 631 | if t == 0x00: |
|
628 | result = ''.join((result, self.readUTF(off, len) + '.')) | |
|
632 | result = r''.join((result, self.readUTF(off, len) + r'.')) | |
|
629 | 633 | off += len |
|
630 | 634 | elif t == 0xC0: |
|
631 | 635 | if next < 0: |
@@ -34,6 +34,7 b' from mercurial import (' | |||
|
34 | 34 | encoding, |
|
35 | 35 | extensions, |
|
36 | 36 | hg, |
|
37 | pycompat, | |
|
37 | 38 | ui as uimod, |
|
38 | 39 | ) |
|
39 | 40 | from mercurial.hgweb import ( |
@@ -55,7 +56,7 b' def getip():' | |||
|
55 | 56 | # finds external-facing interface without sending any packets (Linux) |
|
56 | 57 | try: |
|
57 | 58 | s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) |
|
58 | s.connect(('1.0.0.1', 0)) | |
|
59 | s.connect((r'1.0.0.1', 0)) | |
|
59 | 60 | ip = s.getsockname()[0] |
|
60 | 61 | return ip |
|
61 | 62 | except socket.error: |
@@ -64,17 +65,17 b' def getip():' | |||
|
64 | 65 | # Generic method, sometimes gives useless results |
|
65 | 66 | try: |
|
66 | 67 | dumbip = socket.gethostbyaddr(socket.gethostname())[2][0] |
|
67 | if ':' in dumbip: | |
|
68 | dumbip = '127.0.0.1' | |
|
69 | if not dumbip.startswith('127.'): | |
|
68 | if r':' in dumbip: | |
|
69 | dumbip = r'127.0.0.1' | |
|
70 | if not dumbip.startswith(r'127.'): | |
|
70 | 71 | return dumbip |
|
71 | 72 | except (socket.gaierror, socket.herror): |
|
72 | dumbip = '127.0.0.1' | |
|
73 | dumbip = r'127.0.0.1' | |
|
73 | 74 | |
|
74 | 75 | # works elsewhere, but actually sends a packet |
|
75 | 76 | try: |
|
76 | 77 | s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) |
|
77 | s.connect(('1.0.0.1', 1)) | |
|
78 | s.connect((r'1.0.0.1', 1)) | |
|
78 | 79 | ip = s.getsockname()[0] |
|
79 | 80 | return ip |
|
80 | 81 | except socket.error: |
@@ -86,19 +87,19 b' def publish(name, desc, path, port):' | |||
|
86 | 87 | global server, localip |
|
87 | 88 | if not server: |
|
88 | 89 | ip = getip() |
|
89 | if ip.startswith('127.'): | |
|
90 | if ip.startswith(r'127.'): | |
|
90 | 91 | # if we have no internet connection, this can happen. |
|
91 | 92 | return |
|
92 | 93 | localip = socket.inet_aton(ip) |
|
93 | 94 | server = Zeroconf.Zeroconf(ip) |
|
94 | 95 | |
|
95 | hostname = socket.gethostname().split('.')[0] | |
|
96 | host = hostname + ".local" | |
|
97 | name = "%s-%s" % (hostname, name) | |
|
96 | hostname = socket.gethostname().split(r'.')[0] | |
|
97 | host = hostname + r".local" | |
|
98 | name = r"%s-%s" % (hostname, name) | |
|
98 | 99 | |
|
99 | 100 | # advertise to browsers |
|
100 | 101 | svc = Zeroconf.ServiceInfo('_http._tcp.local.', |
|
101 | name + '._http._tcp.local.', | |
|
102 | pycompat.bytestr(name + r'._http._tcp.local.'), | |
|
102 | 103 | server = host, |
|
103 | 104 | port = port, |
|
104 | 105 | properties = {'description': desc, |
@@ -108,7 +109,7 b' def publish(name, desc, path, port):' | |||
|
108 | 109 | |
|
109 | 110 | # advertise to Mercurial clients |
|
110 | 111 | svc = Zeroconf.ServiceInfo('_hg._tcp.local.', |
|
111 | name + '._hg._tcp.local.', | |
|
112 | pycompat.bytestr(name + r'._hg._tcp.local.'), | |
|
112 | 113 | server = host, |
|
113 | 114 | port = port, |
|
114 | 115 | properties = {'description': desc, |
@@ -158,7 +159,7 b' class listener(object):' | |||
|
158 | 159 | |
|
159 | 160 | def getzcpaths(): |
|
160 | 161 | ip = getip() |
|
161 | if ip.startswith('127.'): | |
|
162 | if ip.startswith(r'127.'): | |
|
162 | 163 | return |
|
163 | 164 | server = Zeroconf.Zeroconf(ip) |
|
164 | 165 | l = listener() |
@@ -166,10 +167,10 b' def getzcpaths():' | |||
|
166 | 167 | time.sleep(1) |
|
167 | 168 | server.close() |
|
168 | 169 | for value in l.found.values(): |
|
169 | name = value.name[:value.name.index('.')] | |
|
170 | url = "http://%s:%s%s" % (socket.inet_ntoa(value.address), value.port, | |
|
171 | value.properties.get("path", "/")) | |
|
172 | yield "zc-" + name, url | |
|
170 | name = value.name[:value.name.index(b'.')] | |
|
171 | url = r"http://%s:%s%s" % (socket.inet_ntoa(value.address), value.port, | |
|
172 | value.properties.get(r"path", r"/")) | |
|
173 | yield b"zc-" + name, pycompat.bytestr(url) | |
|
173 | 174 | |
|
174 | 175 | def config(orig, self, section, key, *args, **kwargs): |
|
175 | 176 | if section == "paths" and key.startswith("zc-"): |
@@ -29,7 +29,7 b' if sys.version_info[0] >= 3:' | |||
|
29 | 29 | """A sys.meta_path finder that uses a custom module loader.""" |
|
30 | 30 | def find_spec(self, fullname, path, target=None): |
|
31 | 31 | # Only handle Mercurial-related modules. |
|
32 |
if not fullname.startswith(('mercurial.', 'hgext.' |
|
|
32 | if not fullname.startswith(('mercurial.', 'hgext.')): | |
|
33 | 33 | return None |
|
34 | 34 | # don't try to parse binary |
|
35 | 35 | if fullname.startswith('mercurial.cext.'): |
@@ -54,7 +54,16 b' if sys.version_info[0] >= 3:' | |||
|
54 | 54 | if finder == self: |
|
55 | 55 | continue |
|
56 | 56 | |
|
57 | spec = finder.find_spec(fullname, path, target=target) | |
|
57 | # Originally the API was a `find_module` method, but it was | |
|
58 | # renamed to `find_spec` in python 3.4, with a new `target` | |
|
59 | # argument. | |
|
60 | find_spec_method = getattr(finder, 'find_spec', None) | |
|
61 | if find_spec_method: | |
|
62 | spec = find_spec_method(fullname, path, target=target) | |
|
63 | else: | |
|
64 | spec = finder.find_module(fullname) | |
|
65 | if spec is not None: | |
|
66 | spec = importlib.util.spec_from_loader(fullname, spec) | |
|
58 | 67 | if spec: |
|
59 | 68 | break |
|
60 | 69 | |
@@ -216,7 +225,9 b' if sys.version_info[0] >= 3:' | |||
|
216 | 225 | |
|
217 | 226 | # It changes iteritems/values to items/values as they are not |
|
218 | 227 | # present in Python 3 world. |
|
219 |
elif fn in ('iteritems', 'itervalues') |
|
|
228 | elif (fn in ('iteritems', 'itervalues') and | |
|
229 | not (tokens[i - 1].type == token.NAME and | |
|
230 | tokens[i - 1].string == 'def')): | |
|
220 | 231 | yield t._replace(string=fn[4:]) |
|
221 | 232 | continue |
|
222 | 233 | |
@@ -227,7 +238,7 b' if sys.version_info[0] >= 3:' | |||
|
227 | 238 | # ``replacetoken`` or any mechanism that changes semantics of module |
|
228 | 239 | # loading is changed. Otherwise cached bytecode may get loaded without |
|
229 | 240 | # the new transformation mechanisms applied. |
|
230 |
BYTECODEHEADER = b'HG\x00\x0 |
|
|
241 | BYTECODEHEADER = b'HG\x00\x0c' | |
|
231 | 242 | |
|
232 | 243 | class hgloader(importlib.machinery.SourceFileLoader): |
|
233 | 244 | """Custom module loader that transforms source code. |
@@ -33,6 +33,14 b' from . import (' | |||
|
33 | 33 | # custom styles |
|
34 | 34 | activebookmarklabel = 'bookmarks.active bookmarks.current' |
|
35 | 35 | |
|
36 | BOOKMARKS_IN_STORE_REQUIREMENT = 'bookmarksinstore' | |
|
37 | ||
|
38 | def bookmarksinstore(repo): | |
|
39 | return BOOKMARKS_IN_STORE_REQUIREMENT in repo.requirements | |
|
40 | ||
|
41 | def bookmarksvfs(repo): | |
|
42 | return repo.svfs if bookmarksinstore(repo) else repo.vfs | |
|
43 | ||
|
36 | 44 | def _getbkfile(repo): |
|
37 | 45 | """Hook so that extensions that mess with the store can hook bm storage. |
|
38 | 46 | |
@@ -40,7 +48,7 b' def _getbkfile(repo):' | |||
|
40 | 48 | bookmarks or the committed ones. Other extensions (like share) |
|
41 | 49 | may need to tweak this behavior further. |
|
42 | 50 | """ |
|
43 |
fp, pending = txnutil.trypending(repo.root, repo |
|
|
51 | fp, pending = txnutil.trypending(repo.root, bookmarksvfs(repo), 'bookmarks') | |
|
44 | 52 | return fp |
|
45 | 53 | |
|
46 | 54 | class bmstore(object): |
@@ -91,8 +99,11 b' class bmstore(object):' | |||
|
91 | 99 | # ValueError: |
|
92 | 100 | # - node in nm, for non-20-bytes entry |
|
93 | 101 | # - split(...), for string without ' ' |
|
94 |
|
|
|
95 |
|
|
|
102 | bookmarkspath = '.hg/bookmarks' | |
|
103 | if bookmarksinstore(repo): | |
|
104 | bookmarkspath = '.hg/store/bookmarks' | |
|
105 | repo.ui.warn(_('malformed line in %s: %r\n') | |
|
106 | % (bookmarkspath, pycompat.bytestr(line))) | |
|
96 | 107 | except IOError as inst: |
|
97 | 108 | if inst.errno != errno.ENOENT: |
|
98 | 109 | raise |
@@ -192,8 +203,9 b' class bmstore(object):' | |||
|
192 | 203 | """record that bookmarks have been changed in a transaction |
|
193 | 204 | |
|
194 | 205 | The transaction is then responsible for updating the file content.""" |
|
206 | location = '' if bookmarksinstore(self._repo) else 'plain' | |
|
195 | 207 | tr.addfilegenerator('bookmarks', ('bookmarks',), self._write, |
|
196 |
location= |
|
|
208 | location=location) | |
|
197 | 209 | tr.hookargs['bookmark_moved'] = '1' |
|
198 | 210 | |
|
199 | 211 | def _writerepo(self, repo): |
@@ -203,28 +215,24 b' class bmstore(object):' | |||
|
203 | 215 | rbm.active = None |
|
204 | 216 | rbm._writeactive() |
|
205 | 217 | |
|
206 | with repo.wlock(): | |
|
207 | file_ = repo.vfs('bookmarks', 'w', atomictemp=True, | |
|
208 | checkambig=True) | |
|
209 |
|
|
|
210 | self._write(file_) | |
|
211 | except: # re-raises | |
|
212 | file_.discard() | |
|
213 | raise | |
|
214 | finally: | |
|
215 | file_.close() | |
|
218 | if bookmarksinstore(repo): | |
|
219 | vfs = repo.svfs | |
|
220 | lock = repo.lock() | |
|
221 | else: | |
|
222 | vfs = repo.vfs | |
|
223 | lock = repo.wlock() | |
|
224 | with lock: | |
|
225 | with vfs('bookmarks', 'w', atomictemp=True, checkambig=True) as f: | |
|
226 | self._write(f) | |
|
216 | 227 | |
|
217 | 228 | def _writeactive(self): |
|
218 | 229 | if self._aclean: |
|
219 | 230 | return |
|
220 | 231 | with self._repo.wlock(): |
|
221 | 232 | if self._active is not None: |
|
222 |
|
|
|
223 | checkambig=True) | |
|
224 | try: | |
|
233 | with self._repo.vfs('bookmarks.current', 'w', atomictemp=True, | |
|
234 | checkambig=True) as f: | |
|
225 | 235 | f.write(encoding.fromlocal(self._active)) |
|
226 | finally: | |
|
227 | f.close() | |
|
228 | 236 | else: |
|
229 | 237 | self._repo.vfs.tryunlink('bookmarks.current') |
|
230 | 238 | self._aclean = True |
@@ -306,28 +314,12 b' def _readactive(repo, marks):' | |||
|
306 | 314 | itself as we commit. This function returns the name of that bookmark. |
|
307 | 315 | It is stored in .hg/bookmarks.current |
|
308 | 316 | """ |
|
309 | try: | |
|
310 | file = repo.vfs('bookmarks.current') | |
|
311 | except IOError as inst: | |
|
312 | if inst.errno != errno.ENOENT: | |
|
313 | raise | |
|
314 |
|
|
|
315 | try: | |
|
316 | # No readline() in osutil.posixfile, reading everything is | |
|
317 | # cheap. | |
|
318 | # Note that it's possible for readlines() here to raise | |
|
319 | # IOError, since we might be reading the active mark over | |
|
320 | # static-http which only tries to load the file when we try | |
|
321 | # to read from it. | |
|
322 | mark = encoding.tolocal((file.readlines() or [''])[0]) | |
|
323 | if mark == '' or mark not in marks: | |
|
324 | mark = None | |
|
325 | except IOError as inst: | |
|
326 | if inst.errno != errno.ENOENT: | |
|
327 | raise | |
|
328 | return None | |
|
329 | finally: | |
|
330 | file.close() | |
|
317 | # No readline() in osutil.posixfile, reading everything is | |
|
318 | # cheap. | |
|
319 | content = repo.vfs.tryread('bookmarks.current') | |
|
320 | mark = encoding.tolocal((content.splitlines() or [''])[0]) | |
|
321 | if mark == '' or mark not in marks: | |
|
322 | mark = None | |
|
331 | 323 | return mark |
|
332 | 324 | |
|
333 | 325 | def activate(repo, mark): |
@@ -453,7 +445,11 b' def listbookmarks(repo):' | |||
|
453 | 445 | return d |
|
454 | 446 | |
|
455 | 447 | def pushbookmark(repo, key, old, new): |
|
456 | with repo.wlock(), repo.lock(), repo.transaction('bookmarks') as tr: | |
|
448 | if bookmarksinstore(repo): | |
|
449 | wlock = util.nullcontextmanager() | |
|
450 | else: | |
|
451 | wlock = repo.wlock() | |
|
452 | with wlock, repo.lock(), repo.transaction('bookmarks') as tr: | |
|
457 | 453 | marks = repo._bookmarks |
|
458 | 454 | existing = hex(marks.get(key, '')) |
|
459 | 455 | if existing != old and existing != new: |
@@ -121,6 +121,12 b' def _unknownnode(node):' | |||
|
121 | 121 | """ |
|
122 | 122 | raise ValueError(r'node %s does not exist' % pycompat.sysstr(hex(node))) |
|
123 | 123 | |
|
124 | def _branchcachedesc(repo): | |
|
125 | if repo.filtername is not None: | |
|
126 | return 'branch cache (%s)' % repo.filtername | |
|
127 | else: | |
|
128 | return 'branch cache' | |
|
129 | ||
|
124 | 130 | class branchcache(object): |
|
125 | 131 | """A dict like object that hold branches heads cache. |
|
126 | 132 | |
@@ -212,6 +218,8 b' class branchcache(object):' | |||
|
212 | 218 | self._verifybranch(k) |
|
213 | 219 | yield k, v |
|
214 | 220 | |
|
221 | items = iteritems | |
|
222 | ||
|
215 | 223 | def hasbranch(self, label): |
|
216 | 224 | """ checks whether a branch of this name exists or not """ |
|
217 | 225 | self._verifybranch(label) |
@@ -241,11 +249,9 b' class branchcache(object):' | |||
|
241 | 249 | |
|
242 | 250 | except Exception as inst: |
|
243 | 251 | if repo.ui.debugflag: |
|
244 |
msg = 'invalid |
|
|
245 | if repo.filtername is not None: | |
|
246 | msg += ' (%s)' % repo.filtername | |
|
247 | msg += ': %s\n' | |
|
248 | repo.ui.debug(msg % pycompat.bytestr(inst)) | |
|
252 | msg = 'invalid %s: %s\n' | |
|
253 | repo.ui.debug(msg % (_branchcachedesc(repo), | |
|
254 | pycompat.bytestr(inst))) | |
|
249 | 255 | bcache = None |
|
250 | 256 | |
|
251 | 257 | finally: |
@@ -351,9 +357,8 b' class branchcache(object):' | |||
|
351 | 357 | state = 'o' |
|
352 | 358 | f.write("%s %s %s\n" % (hex(node), state, label)) |
|
353 | 359 | f.close() |
|
354 | repo.ui.log('branchcache', | |
|
355 |
|
|
|
356 | repo.filtername, len(self._entries), nodecount) | |
|
360 | repo.ui.log('branchcache', 'wrote %s with %d labels and %d nodes\n', | |
|
361 | _branchcachedesc(repo), len(self._entries), nodecount) | |
|
357 | 362 | except (IOError, OSError, error.Abort) as inst: |
|
358 | 363 | # Abort may be raised by read only opener, so log and continue |
|
359 | 364 | repo.ui.debug("couldn't write branch cache: %s\n" % |
@@ -378,6 +383,10 b' class branchcache(object):' | |||
|
378 | 383 | # fetch current topological heads to speed up filtering |
|
379 | 384 | topoheads = set(cl.headrevs()) |
|
380 | 385 | |
|
386 | # new tip revision which we found after iterating items from new | |
|
387 | # branches | |
|
388 | ntiprev = self.tiprev | |
|
389 | ||
|
381 | 390 | # if older branchheads are reachable from new ones, they aren't |
|
382 | 391 | # really branchheads. Note checking parents is insufficient: |
|
383 | 392 | # 1 (branch a) -> 2 (branch b) -> 3 (branch a) |
@@ -401,9 +410,12 b' class branchcache(object):' | |||
|
401 | 410 | bheadrevs = sorted(bheadset) |
|
402 | 411 | self[branch] = [cl.node(rev) for rev in bheadrevs] |
|
403 | 412 | tiprev = bheadrevs[-1] |
|
404 |
if tiprev > |
|
|
405 | self.tipnode = cl.node(tiprev) | |
|
406 | self.tiprev = tiprev | |
|
413 | if tiprev > ntiprev: | |
|
414 | ntiprev = tiprev | |
|
415 | ||
|
416 | if ntiprev > self.tiprev: | |
|
417 | self.tiprev = ntiprev | |
|
418 | self.tipnode = cl.node(ntiprev) | |
|
407 | 419 | |
|
408 | 420 | if not self.validfor(repo): |
|
409 | 421 | # cache key are not valid anymore |
@@ -417,8 +429,8 b' class branchcache(object):' | |||
|
417 | 429 | self.filteredhash = scmutil.filteredhash(repo, self.tiprev) |
|
418 | 430 | |
|
419 | 431 | duration = util.timer() - starttime |
|
420 |
repo.ui.log('branchcache', 'updated %s |
|
|
421 |
repo |
|
|
432 | repo.ui.log('branchcache', 'updated %s in %.4f seconds\n', | |
|
433 | _branchcachedesc(repo), duration) | |
|
422 | 434 | |
|
423 | 435 | self.write(repo) |
|
424 | 436 | |
@@ -608,51 +620,59 b' class revbranchcache(object):' | |||
|
608 | 620 | wlock = None |
|
609 | 621 | step = '' |
|
610 | 622 | try: |
|
623 | # write the new names | |
|
611 | 624 | if self._rbcnamescount < len(self._names): |
|
612 | step = ' names' | |
|
613 | 625 | wlock = repo.wlock(wait=False) |
|
614 | if self._rbcnamescount != 0: | |
|
615 | f = repo.cachevfs.open(_rbcnames, 'ab') | |
|
616 | if f.tell() == self._rbcsnameslen: | |
|
617 | f.write('\0') | |
|
618 | else: | |
|
619 | f.close() | |
|
620 | repo.ui.debug("%s changed - rewriting it\n" % _rbcnames) | |
|
621 | self._rbcnamescount = 0 | |
|
622 | self._rbcrevslen = 0 | |
|
623 | if self._rbcnamescount == 0: | |
|
624 | # before rewriting names, make sure references are removed | |
|
625 | repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True) | |
|
626 | f = repo.cachevfs.open(_rbcnames, 'wb') | |
|
627 | f.write('\0'.join(encoding.fromlocal(b) | |
|
628 | for b in self._names[self._rbcnamescount:])) | |
|
629 | self._rbcsnameslen = f.tell() | |
|
630 | f.close() | |
|
631 | self._rbcnamescount = len(self._names) | |
|
626 | step = ' names' | |
|
627 | self._writenames(repo) | |
|
632 | 628 | |
|
629 | # write the new revs | |
|
633 | 630 | start = self._rbcrevslen * _rbcrecsize |
|
634 | 631 | if start != len(self._rbcrevs): |
|
635 | 632 | step = '' |
|
636 | 633 | if wlock is None: |
|
637 | 634 | wlock = repo.wlock(wait=False) |
|
638 | revs = min(len(repo.changelog), | |
|
639 | len(self._rbcrevs) // _rbcrecsize) | |
|
640 | f = repo.cachevfs.open(_rbcrevs, 'ab') | |
|
641 | if f.tell() != start: | |
|
642 | repo.ui.debug("truncating cache/%s to %d\n" | |
|
643 | % (_rbcrevs, start)) | |
|
644 | f.seek(start) | |
|
645 | if f.tell() != start: | |
|
646 | start = 0 | |
|
647 | f.seek(start) | |
|
648 | f.truncate() | |
|
649 | end = revs * _rbcrecsize | |
|
650 | f.write(self._rbcrevs[start:end]) | |
|
651 | f.close() | |
|
652 | self._rbcrevslen = revs | |
|
635 | self._writerevs(repo, start) | |
|
636 | ||
|
653 | 637 | except (IOError, OSError, error.Abort, error.LockError) as inst: |
|
654 | 638 | repo.ui.debug("couldn't write revision branch cache%s: %s\n" |
|
655 | 639 | % (step, stringutil.forcebytestr(inst))) |
|
656 | 640 | finally: |
|
657 | 641 | if wlock is not None: |
|
658 | 642 | wlock.release() |
|
643 | ||
|
644 | def _writenames(self, repo): | |
|
645 | """ write the new branch names to revbranchcache """ | |
|
646 | if self._rbcnamescount != 0: | |
|
647 | f = repo.cachevfs.open(_rbcnames, 'ab') | |
|
648 | if f.tell() == self._rbcsnameslen: | |
|
649 | f.write('\0') | |
|
650 | else: | |
|
651 | f.close() | |
|
652 | repo.ui.debug("%s changed - rewriting it\n" % _rbcnames) | |
|
653 | self._rbcnamescount = 0 | |
|
654 | self._rbcrevslen = 0 | |
|
655 | if self._rbcnamescount == 0: | |
|
656 | # before rewriting names, make sure references are removed | |
|
657 | repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True) | |
|
658 | f = repo.cachevfs.open(_rbcnames, 'wb') | |
|
659 | f.write('\0'.join(encoding.fromlocal(b) | |
|
660 | for b in self._names[self._rbcnamescount:])) | |
|
661 | self._rbcsnameslen = f.tell() | |
|
662 | f.close() | |
|
663 | self._rbcnamescount = len(self._names) | |
|
664 | ||
|
665 | def _writerevs(self, repo, start): | |
|
666 | """ write the new revs to revbranchcache """ | |
|
667 | revs = min(len(repo.changelog), len(self._rbcrevs) // _rbcrecsize) | |
|
668 | with repo.cachevfs.open(_rbcrevs, 'ab') as f: | |
|
669 | if f.tell() != start: | |
|
670 | repo.ui.debug("truncating cache/%s to %d\n" % (_rbcrevs, start)) | |
|
671 | f.seek(start) | |
|
672 | if f.tell() != start: | |
|
673 | start = 0 | |
|
674 | f.seek(start) | |
|
675 | f.truncate() | |
|
676 | end = revs * _rbcrecsize | |
|
677 | f.write(self._rbcrevs[start:end]) | |
|
678 | self._rbcrevslen = revs |
@@ -2298,10 +2298,11 b' def handlestreamv2bundle(op, part):' | |||
|
2298 | 2298 | streamclone.applybundlev2(repo, part, filecount, bytecount, |
|
2299 | 2299 | requirements) |
|
2300 | 2300 | |
|
2301 |
def widen_bundle(repo, oldmatcher, newmatcher, common, |
|
|
2302 | ellipses): | |
|
2301 | def widen_bundle(bundler, repo, oldmatcher, newmatcher, common, | |
|
2302 | known, cgversion, ellipses): | |
|
2303 | 2303 | """generates bundle2 for widening a narrow clone |
|
2304 | 2304 | |
|
2305 | bundler is the bundle to which data should be added | |
|
2305 | 2306 | repo is the localrepository instance |
|
2306 | 2307 | oldmatcher matches what the client already has |
|
2307 | 2308 | newmatcher matches what the client needs (including what it already has) |
@@ -2312,7 +2313,6 b' def widen_bundle(repo, oldmatcher, newma' | |||
|
2312 | 2313 | |
|
2313 | 2314 | returns bundle2 of the data required for extending |
|
2314 | 2315 | """ |
|
2315 | bundler = bundle20(repo.ui) | |
|
2316 | 2316 | commonnodes = set() |
|
2317 | 2317 | cl = repo.changelog |
|
2318 | 2318 | for r in repo.revs("::%ln", common): |
@@ -42,6 +42,9 b' static inline Py_ssize_t _finddir(const ' | |||
|
42 | 42 | break; |
|
43 | 43 | pos -= 1; |
|
44 | 44 | } |
|
45 | if (pos == -1) { | |
|
46 | return 0; | |
|
47 | } | |
|
45 | 48 | |
|
46 | 49 | return pos; |
|
47 | 50 | } |
@@ -667,10 +667,11 b' void dirs_module_init(PyObject *mod);' | |||
|
667 | 667 | void manifest_module_init(PyObject *mod); |
|
668 | 668 | void revlog_module_init(PyObject *mod); |
|
669 | 669 | |
|
670 |
static const int version = 1 |
|
|
670 | static const int version = 13; | |
|
671 | 671 | |
|
672 | 672 | static void module_init(PyObject *mod) |
|
673 | 673 | { |
|
674 | PyObject *capsule = NULL; | |
|
674 | 675 | PyModule_AddIntConstant(mod, "version", version); |
|
675 | 676 | |
|
676 | 677 | /* This module constant has two purposes. First, it lets us unit test |
@@ -687,6 +688,12 b' static void module_init(PyObject *mod)' | |||
|
687 | 688 | manifest_module_init(mod); |
|
688 | 689 | revlog_module_init(mod); |
|
689 | 690 | |
|
691 | capsule = PyCapsule_New( | |
|
692 | make_dirstate_tuple, | |
|
693 | "mercurial.cext.parsers.make_dirstate_tuple_CAPI", NULL); | |
|
694 | if (capsule != NULL) | |
|
695 | PyModule_AddObject(mod, "make_dirstate_tuple_CAPI", capsule); | |
|
696 | ||
|
690 | 697 | if (PyType_Ready(&dirstateTupleType) < 0) { |
|
691 | 698 | return; |
|
692 | 699 | } |
@@ -1061,7 +1061,7 b' class cgpacker(object):' | |||
|
1061 | 1061 | while tmfnodes: |
|
1062 | 1062 | tree, nodes = tmfnodes.popitem() |
|
1063 | 1063 | |
|
1064 |
should_visit = self._matcher.visitdir(tree[:-1] |
|
|
1064 | should_visit = self._matcher.visitdir(tree[:-1]) | |
|
1065 | 1065 | if tree and not should_visit: |
|
1066 | 1066 | continue |
|
1067 | 1067 | |
@@ -1093,7 +1093,7 b' class cgpacker(object):' | |||
|
1093 | 1093 | fullclnodes=self._fullclnodes, |
|
1094 | 1094 | precomputedellipsis=self._precomputedellipsis) |
|
1095 | 1095 | |
|
1096 |
if not self._oldmatcher.visitdir(store.tree[:-1] |
|
|
1096 | if not self._oldmatcher.visitdir(store.tree[:-1]): | |
|
1097 | 1097 | yield tree, deltas |
|
1098 | 1098 | else: |
|
1099 | 1099 | # 'deltas' is a generator and we need to consume it even if |
@@ -80,25 +80,55 b' def encodeextra(d):' | |||
|
80 | 80 | ] |
|
81 | 81 | return "\0".join(items) |
|
82 | 82 | |
|
83 | def encodecopies(copies): | |
|
84 | items = [ | |
|
85 | '%s\0%s' % (k, copies[k]) | |
|
86 |
f |
|
|
87 | ] | |
|
83 | def encodecopies(files, copies): | |
|
84 | items = [] | |
|
85 | for i, dst in enumerate(files): | |
|
86 | if dst in copies: | |
|
87 | items.append('%d\0%s' % (i, copies[dst])) | |
|
88 | if len(items) != len(copies): | |
|
89 | raise error.ProgrammingError('some copy targets missing from file list') | |
|
88 | 90 | return "\n".join(items) |
|
89 | 91 | |
|
90 | def decodecopies(data): | |
|
92 | def decodecopies(files, data): | |
|
91 | 93 | try: |
|
92 | 94 | copies = {} |
|
95 | if not data: | |
|
96 | return copies | |
|
93 | 97 | for l in data.split('\n'): |
|
94 |
|
|
|
95 | copies[k] = v | |
|
98 | strindex, src = l.split('\0') | |
|
99 | i = int(strindex) | |
|
100 | dst = files[i] | |
|
101 | copies[dst] = src | |
|
96 | 102 | return copies |
|
97 | except ValueError: | |
|
103 | except (ValueError, IndexError): | |
|
98 | 104 | # Perhaps someone had chosen the same key name (e.g. "p1copies") and |
|
99 | 105 | # used different syntax for the value. |
|
100 | 106 | return None |
|
101 | 107 | |
|
108 | def encodefileindices(files, subset): | |
|
109 | subset = set(subset) | |
|
110 | indices = [] | |
|
111 | for i, f in enumerate(files): | |
|
112 | if f in subset: | |
|
113 | indices.append('%d' % i) | |
|
114 | return '\n'.join(indices) | |
|
115 | ||
|
116 | def decodefileindices(files, data): | |
|
117 | try: | |
|
118 | subset = [] | |
|
119 | if not data: | |
|
120 | return subset | |
|
121 | for strindex in data.split('\n'): | |
|
122 | i = int(strindex) | |
|
123 | if i < 0 or i >= len(files): | |
|
124 | return None | |
|
125 | subset.append(files[i]) | |
|
126 | return subset | |
|
127 | except (ValueError, IndexError): | |
|
128 | # Perhaps someone had chosen the same key name (e.g. "added") and | |
|
129 | # used different syntax for the value. | |
|
130 | return None | |
|
131 | ||
|
102 | 132 | def stripdesc(desc): |
|
103 | 133 | """strip trailing whitespace and leading and trailing empty lines""" |
|
104 | 134 | return '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n') |
@@ -194,6 +224,10 b' class _changelogrevision(object):' | |||
|
194 | 224 | user = attr.ib(default='') |
|
195 | 225 | date = attr.ib(default=(0, 0)) |
|
196 | 226 | files = attr.ib(default=attr.Factory(list)) |
|
227 | filesadded = attr.ib(default=None) | |
|
228 | filesremoved = attr.ib(default=None) | |
|
229 | p1copies = attr.ib(default=None) | |
|
230 | p2copies = attr.ib(default=None) | |
|
197 | 231 | description = attr.ib(default='') |
|
198 | 232 | |
|
199 | 233 | class changelogrevision(object): |
@@ -298,14 +332,24 b' class changelogrevision(object):' | |||
|
298 | 332 | return self._text[off[2] + 1:off[3]].split('\n') |
|
299 | 333 | |
|
300 | 334 | @property |
|
335 | def filesadded(self): | |
|
336 | rawindices = self.extra.get('filesadded') | |
|
337 | return rawindices and decodefileindices(self.files, rawindices) | |
|
338 | ||
|
339 | @property | |
|
340 | def filesremoved(self): | |
|
341 | rawindices = self.extra.get('filesremoved') | |
|
342 | return rawindices and decodefileindices(self.files, rawindices) | |
|
343 | ||
|
344 | @property | |
|
301 | 345 | def p1copies(self): |
|
302 | 346 | rawcopies = self.extra.get('p1copies') |
|
303 | return rawcopies and decodecopies(rawcopies) | |
|
347 | return rawcopies and decodecopies(self.files, rawcopies) | |
|
304 | 348 | |
|
305 | 349 | @property |
|
306 | 350 | def p2copies(self): |
|
307 | 351 | rawcopies = self.extra.get('p2copies') |
|
308 | return rawcopies and decodecopies(rawcopies) | |
|
352 | return rawcopies and decodecopies(self.files, rawcopies) | |
|
309 | 353 | |
|
310 | 354 | @property |
|
311 | 355 | def description(self): |
@@ -380,9 +424,6 b' class changelog(revlog.revlog):' | |||
|
380 | 424 | if i not in self.filteredrevs: |
|
381 | 425 | yield i |
|
382 | 426 | |
|
383 | def reachableroots(self, minroot, heads, roots, includepath=False): | |
|
384 | return self.index.reachableroots2(minroot, heads, roots, includepath) | |
|
385 | ||
|
386 | 427 | def _checknofilteredinrevs(self, revs): |
|
387 | 428 | """raise the appropriate error if 'revs' contains a filtered revision |
|
388 | 429 | |
@@ -562,7 +603,8 b' class changelog(revlog.revlog):' | |||
|
562 | 603 | return l[3:] |
|
563 | 604 | |
|
564 | 605 | def add(self, manifest, files, desc, transaction, p1, p2, |
|
565 |
user, date=None, extra=None, p1copies=None, p2copies=None |
|
|
606 | user, date=None, extra=None, p1copies=None, p2copies=None, | |
|
607 | filesadded=None, filesremoved=None): | |
|
566 | 608 | # Convert to UTF-8 encoded bytestrings as the very first |
|
567 | 609 | # thing: calling any method on a localstr object will turn it |
|
568 | 610 | # into a str object and the cached UTF-8 string is thus lost. |
@@ -591,17 +633,23 b' class changelog(revlog.revlog):' | |||
|
591 | 633 | elif branch in (".", "null", "tip"): |
|
592 | 634 | raise error.StorageError(_('the name \'%s\' is reserved') |
|
593 | 635 | % branch) |
|
594 | if (p1copies or p2copies) and extra is None: | |
|
636 | extrasentries = p1copies, p2copies, filesadded, filesremoved | |
|
637 | if extra is None and any(x is not None for x in extrasentries): | |
|
595 | 638 | extra = {} |
|
596 | if p1copies: | |
|
597 | extra['p1copies'] = encodecopies(p1copies) | |
|
598 | if p2copies: | |
|
599 | extra['p2copies'] = encodecopies(p2copies) | |
|
639 | sortedfiles = sorted(files) | |
|
640 | if p1copies is not None: | |
|
641 | extra['p1copies'] = encodecopies(sortedfiles, p1copies) | |
|
642 | if p2copies is not None: | |
|
643 | extra['p2copies'] = encodecopies(sortedfiles, p2copies) | |
|
644 | if filesadded is not None: | |
|
645 | extra['filesadded'] = encodefileindices(sortedfiles, filesadded) | |
|
646 | if filesremoved is not None: | |
|
647 | extra['filesremoved'] = encodefileindices(sortedfiles, filesremoved) | |
|
600 | 648 | |
|
601 | 649 | if extra: |
|
602 | 650 | extra = encodeextra(extra) |
|
603 | 651 | parseddate = "%s %s" % (parseddate, extra) |
|
604 |
l = [hex(manifest), user, parseddate] + sorted |
|
|
652 | l = [hex(manifest), user, parseddate] + sortedfiles + ["", desc] | |
|
605 | 653 | text = "\n".join(l) |
|
606 | 654 | return self.addrevision(text, transaction, len(self), p1, p2) |
|
607 | 655 |
@@ -138,7 +138,9 b' def _getmtimepaths(ui):' | |||
|
138 | 138 | modules.append(__version__) |
|
139 | 139 | except ImportError: |
|
140 | 140 | pass |
|
141 | files = [pycompat.sysexecutable] | |
|
141 | files = [] | |
|
142 | if pycompat.sysexecutable: | |
|
143 | files.append(pycompat.sysexecutable) | |
|
142 | 144 | for m in modules: |
|
143 | 145 | try: |
|
144 | 146 | files.append(pycompat.fsencode(inspect.getabsfile(m))) |
@@ -38,10 +38,12 b' from . import (' | |||
|
38 | 38 | pathutil, |
|
39 | 39 | phases, |
|
40 | 40 | pycompat, |
|
41 | repair, | |
|
41 | 42 | revlog, |
|
42 | 43 | rewriteutil, |
|
43 | 44 | scmutil, |
|
44 | 45 | smartset, |
|
46 | state as statemod, | |
|
45 | 47 | subrepoutil, |
|
46 | 48 | templatekw, |
|
47 | 49 | templater, |
@@ -264,8 +266,8 b' def dorecord(ui, repo, commitfunc, cmdsu' | |||
|
264 | 266 | In the end we'll record interesting changes, and everything else |
|
265 | 267 | will be left in place, so the user can continue working. |
|
266 | 268 | """ |
|
267 | ||
|
268 | checkunfinished(repo, commit=True) | |
|
269 | if not opts.get('interactive-unshelve'): | |
|
270 | checkunfinished(repo, commit=True) | |
|
269 | 271 | wctx = repo[None] |
|
270 | 272 | merge = len(wctx.parents()) > 1 |
|
271 | 273 | if merge: |
@@ -278,8 +280,8 b' def dorecord(ui, repo, commitfunc, cmdsu' | |||
|
278 | 280 | force = opts.get('force') |
|
279 | 281 | if not force: |
|
280 | 282 | vdirs = [] |
|
283 | match = matchmod.badmatch(match, fail) | |
|
281 | 284 | match.explicitdir = vdirs.append |
|
282 | match.bad = fail | |
|
283 | 285 | |
|
284 | 286 | status = repo.status(match=match) |
|
285 | 287 | |
@@ -618,74 +620,18 b' To mark files as resolved: hg resolve -' | |||
|
618 | 620 | |
|
619 | 621 | return _commentlines(msg) |
|
620 | 622 | |
|
621 | def _helpmessage(continuecmd, abortcmd): | |
|
622 | msg = _('To continue: %s\n' | |
|
623 | 'To abort: %s') % (continuecmd, abortcmd) | |
|
624 | return _commentlines(msg) | |
|
625 | ||
|
626 | def _rebasemsg(): | |
|
627 | return _helpmessage('hg rebase --continue', 'hg rebase --abort') | |
|
628 | ||
|
629 | def _histeditmsg(): | |
|
630 | return _helpmessage('hg histedit --continue', 'hg histedit --abort') | |
|
631 | ||
|
632 | def _unshelvemsg(): | |
|
633 | return _helpmessage('hg unshelve --continue', 'hg unshelve --abort') | |
|
634 | ||
|
635 | def _graftmsg(): | |
|
636 | return _helpmessage('hg graft --continue', 'hg graft --abort') | |
|
637 | ||
|
638 | def _mergemsg(): | |
|
639 | return _helpmessage('hg commit', 'hg merge --abort') | |
|
640 | ||
|
641 | def _bisectmsg(): | |
|
642 | msg = _('To mark the changeset good: hg bisect --good\n' | |
|
643 | 'To mark the changeset bad: hg bisect --bad\n' | |
|
644 | 'To abort: hg bisect --reset\n') | |
|
645 | return _commentlines(msg) | |
|
646 | ||
|
647 | def fileexistspredicate(filename): | |
|
648 | return lambda repo: repo.vfs.exists(filename) | |
|
649 | ||
|
650 | def _mergepredicate(repo): | |
|
651 | return len(repo[None].parents()) > 1 | |
|
652 | ||
|
653 | STATES = ( | |
|
654 | # (state, predicate to detect states, helpful message function) | |
|
655 | ('histedit', fileexistspredicate('histedit-state'), _histeditmsg), | |
|
656 | ('bisect', fileexistspredicate('bisect.state'), _bisectmsg), | |
|
657 | ('graft', fileexistspredicate('graftstate'), _graftmsg), | |
|
658 | ('unshelve', fileexistspredicate('shelvedstate'), _unshelvemsg), | |
|
659 | ('rebase', fileexistspredicate('rebasestate'), _rebasemsg), | |
|
660 | # The merge state is part of a list that will be iterated over. | |
|
661 | # They need to be last because some of the other unfinished states may also | |
|
662 | # be in a merge or update state (eg. rebase, histedit, graft, etc). | |
|
663 | # We want those to have priority. | |
|
664 | ('merge', _mergepredicate, _mergemsg), | |
|
665 | ) | |
|
666 | ||
|
667 | def _getrepostate(repo): | |
|
668 | # experimental config: commands.status.skipstates | |
|
669 | skip = set(repo.ui.configlist('commands', 'status.skipstates')) | |
|
670 | for state, statedetectionpredicate, msgfn in STATES: | |
|
671 | if state in skip: | |
|
672 | continue | |
|
673 | if statedetectionpredicate(repo): | |
|
674 | return (state, statedetectionpredicate, msgfn) | |
|
675 | ||
|
676 | 623 | def morestatus(repo, fm): |
|
677 |
statetuple = |
|
|
624 | statetuple = statemod.getrepostate(repo) | |
|
678 | 625 | label = 'status.morestatus' |
|
679 | 626 | if statetuple: |
|
680 |
state |
|
|
627 | state, helpfulmsg = statetuple | |
|
681 | 628 | statemsg = _('The repository is in an unfinished *%s* state.') % state |
|
682 | 629 | fm.plain('%s\n' % _commentlines(statemsg), label=label) |
|
683 | 630 | conmsg = _conflictsmsg(repo) |
|
684 | 631 | if conmsg: |
|
685 | 632 | fm.plain('%s\n' % conmsg, label=label) |
|
686 | 633 | if helpfulmsg: |
|
687 | helpmsg = helpfulmsg() | |
|
688 | fm.plain('%s\n' % helpmsg, label=label) | |
|
634 | fm.plain('%s\n' % _commentlines(helpfulmsg), label=label) | |
|
689 | 635 | |
|
690 | 636 | def findpossible(cmd, table, strict=False): |
|
691 | 637 | """ |
@@ -1668,6 +1614,14 b' def _exportfntemplate(repo, revs, basefm' | |||
|
1668 | 1614 | _exportsingle(repo, ctx, fm, match, switch_parent, seqno, |
|
1669 | 1615 | diffopts) |
|
1670 | 1616 | |
|
1617 | def _prefetchchangedfiles(repo, revs, match): | |
|
1618 | allfiles = set() | |
|
1619 | for rev in revs: | |
|
1620 | for file in repo[rev].files(): | |
|
1621 | if not match or match(file): | |
|
1622 | allfiles.add(file) | |
|
1623 | scmutil.prefetchfiles(repo, revs, scmutil.matchfiles(repo, allfiles)) | |
|
1624 | ||
|
1671 | 1625 | def export(repo, revs, basefm, fntemplate='hg-%h.patch', switch_parent=False, |
|
1672 | 1626 | opts=None, match=None): |
|
1673 | 1627 | '''export changesets as hg patches |
@@ -1692,7 +1646,7 b' def export(repo, revs, basefm, fntemplat' | |||
|
1692 | 1646 | the given template. |
|
1693 | 1647 | Otherwise: All revs will be written to basefm. |
|
1694 | 1648 | ''' |
|
1695 |
|
|
|
1649 | _prefetchchangedfiles(repo, revs, match) | |
|
1696 | 1650 | |
|
1697 | 1651 | if not fntemplate: |
|
1698 | 1652 | _exportfile(repo, revs, basefm, '<unnamed>', switch_parent, opts, match) |
@@ -1702,7 +1656,7 b' def export(repo, revs, basefm, fntemplat' | |||
|
1702 | 1656 | |
|
1703 | 1657 | def exportfile(repo, revs, fp, switch_parent=False, opts=None, match=None): |
|
1704 | 1658 | """Export changesets to the given file stream""" |
|
1705 |
|
|
|
1659 | _prefetchchangedfiles(repo, revs, match) | |
|
1706 | 1660 | |
|
1707 | 1661 | dest = getattr(fp, 'name', '<unnamed>') |
|
1708 | 1662 | with formatter.formatter(repo.ui, fp, 'export', {}) as fm: |
@@ -2345,14 +2299,22 b' def remove(ui, repo, m, prefix, uipathfn' | |||
|
2345 | 2299 | |
|
2346 | 2300 | return ret |
|
2347 | 2301 | |
|
2302 | def _catfmtneedsdata(fm): | |
|
2303 | return not fm.datahint() or 'data' in fm.datahint() | |
|
2304 | ||
|
2348 | 2305 | def _updatecatformatter(fm, ctx, matcher, path, decode): |
|
2349 | 2306 | """Hook for adding data to the formatter used by ``hg cat``. |
|
2350 | 2307 | |
|
2351 | 2308 | Extensions (e.g., lfs) can wrap this to inject keywords/data, but must call |
|
2352 | 2309 | this method first.""" |
|
2353 | data = ctx[path].data() | |
|
2354 | if decode: | |
|
2355 | data = ctx.repo().wwritedata(path, data) | |
|
2310 | ||
|
2311 | # data() can be expensive to fetch (e.g. lfs), so don't fetch it if it | |
|
2312 | # wasn't requested. | |
|
2313 | data = b'' | |
|
2314 | if _catfmtneedsdata(fm): | |
|
2315 | data = ctx[path].data() | |
|
2316 | if decode: | |
|
2317 | data = ctx.repo().wwritedata(path, data) | |
|
2356 | 2318 | fm.startitem() |
|
2357 | 2319 | fm.context(ctx=ctx) |
|
2358 | 2320 | fm.write('data', '%s', data) |
@@ -2383,13 +2345,15 b' def cat(ui, repo, ctx, matcher, basefm, ' | |||
|
2383 | 2345 | mfnode = ctx.manifestnode() |
|
2384 | 2346 | try: |
|
2385 | 2347 | if mfnode and mfl[mfnode].find(file)[0]: |
|
2386 | scmutil.prefetchfiles(repo, [ctx.rev()], matcher) | |
|
2348 | if _catfmtneedsdata(basefm): | |
|
2349 | scmutil.prefetchfiles(repo, [ctx.rev()], matcher) | |
|
2387 | 2350 | write(file) |
|
2388 | 2351 | return 0 |
|
2389 | 2352 | except KeyError: |
|
2390 | 2353 | pass |
|
2391 | 2354 | |
|
2392 | scmutil.prefetchfiles(repo, [ctx.rev()], matcher) | |
|
2355 | if _catfmtneedsdata(basefm): | |
|
2356 | scmutil.prefetchfiles(repo, [ctx.rev()], matcher) | |
|
2393 | 2357 | |
|
2394 | 2358 | for abs in ctx.walk(matcher): |
|
2395 | 2359 | write(abs) |
@@ -2583,12 +2547,18 b' def amend(ui, repo, old, extra, pats, op' | |||
|
2583 | 2547 | message = logmessage(ui, opts) |
|
2584 | 2548 | |
|
2585 | 2549 | editform = mergeeditform(old, 'commit.amend') |
|
2586 | editor = getcommiteditor(editform=editform, | |
|
2587 | **pycompat.strkwargs(opts)) | |
|
2588 | 2550 | |
|
2589 | 2551 | if not message: |
|
2590 | editor = getcommiteditor(edit=True, editform=editform) | |
|
2591 | 2552 | message = old.description() |
|
2553 | # Default if message isn't provided and --edit is not passed is to | |
|
2554 | # invoke editor, but allow --no-edit. If somehow we don't have any | |
|
2555 | # description, let's always start the editor. | |
|
2556 | doedit = not message or opts.get('edit') in [True, None] | |
|
2557 | else: | |
|
2558 | # Default if message is provided is to not invoke editor, but allow | |
|
2559 | # --edit. | |
|
2560 | doedit = opts.get('edit') is True | |
|
2561 | editor = getcommiteditor(edit=doedit, editform=editform) | |
|
2592 | 2562 | |
|
2593 | 2563 | pureextra = extra.copy() |
|
2594 | 2564 | extra['amend_source'] = old.hex() |
@@ -3289,66 +3259,69 b' summaryhooks = util.hooks()' | |||
|
3289 | 3259 | # - (desturl, destbranch, destpeer, outgoing) |
|
3290 | 3260 | summaryremotehooks = util.hooks() |
|
3291 | 3261 | |
|
3292 | # A list of state files kept by multistep operations like graft. | |
|
3293 | # Since graft cannot be aborted, it is considered 'clearable' by update. | |
|
3294 | # note: bisect is intentionally excluded | |
|
3295 | # (state file, clearable, allowcommit, error, hint) | |
|
3296 | unfinishedstates = [ | |
|
3297 | ('graftstate', True, False, _('graft in progress'), | |
|
3298 | _("use 'hg graft --continue' or 'hg graft --stop' to stop")), | |
|
3299 | ('updatestate', True, False, _('last update was interrupted'), | |
|
3300 | _("use 'hg update' to get a consistent checkout")) | |
|
3301 | ] | |
|
3302 | ||
|
3303 | def checkunfinished(repo, commit=False): | |
|
3262 | ||
|
3263 | def checkunfinished(repo, commit=False, skipmerge=False): | |
|
3304 | 3264 | '''Look for an unfinished multistep operation, like graft, and abort |
|
3305 | 3265 | if found. It's probably good to check this right before |
|
3306 | 3266 | bailifchanged(). |
|
3307 | 3267 | ''' |
|
3308 | 3268 | # Check for non-clearable states first, so things like rebase will take |
|
3309 | 3269 | # precedence over update. |
|
3310 | for f, clearable, allowcommit, msg, hint in unfinishedstates: | |
|
3311 |
if clearable or (commit and allowcommit) |
|
|
3270 | for state in statemod._unfinishedstates: | |
|
3271 | if (state._clearable or (commit and state._allowcommit) or | |
|
3272 | state._reportonly): | |
|
3312 | 3273 | continue |
|
3313 | if repo.vfs.exists(f): | |
|
3314 | raise error.Abort(msg, hint=hint) | |
|
3315 | ||
|
3316 | for f, clearable, allowcommit, msg, hint in unfinishedstates: | |
|
3317 |
if not clearable or (commit and allowcommit) |
|
|
3274 | if state.isunfinished(repo): | |
|
3275 | raise error.Abort(state.msg(), hint=state.hint()) | |
|
3276 | ||
|
3277 | for s in statemod._unfinishedstates: | |
|
3278 | if (not s._clearable or (commit and s._allowcommit) or | |
|
3279 | (s._opname == 'merge' and skipmerge) or s._reportonly): | |
|
3318 | 3280 | continue |
|
3319 | if repo.vfs.exists(f): | |
|
3320 | raise error.Abort(msg, hint=hint) | |
|
3281 | if s.isunfinished(repo): | |
|
3282 | raise error.Abort(s.msg(), hint=s.hint()) | |
|
3321 | 3283 | |
|
3322 | 3284 | def clearunfinished(repo): |
|
3323 | 3285 | '''Check for unfinished operations (as above), and clear the ones |
|
3324 | 3286 | that are clearable. |
|
3325 | 3287 | ''' |
|
3326 | for f, clearable, allowcommit, msg, hint in unfinishedstates: | |
|
3327 | if not clearable and repo.vfs.exists(f): | |
|
3328 | raise error.Abort(msg, hint=hint) | |
|
3329 | for f, clearable, allowcommit, msg, hint in unfinishedstates: | |
|
3330 | if clearable and repo.vfs.exists(f): | |
|
3331 | util.unlink(repo.vfs.join(f)) | |
|
3332 | ||
|
3333 | afterresolvedstates = [ | |
|
3334 | ('graftstate', | |
|
3335 | _('hg graft --continue')), | |
|
3336 | ] | |
|
3288 | for state in statemod._unfinishedstates: | |
|
3289 | if state._reportonly: | |
|
3290 | continue | |
|
3291 | if not state._clearable and state.isunfinished(repo): | |
|
3292 | raise error.Abort(state.msg(), hint=state.hint()) | |
|
3293 | ||
|
3294 | for s in statemod._unfinishedstates: | |
|
3295 | if s._opname == 'merge' or state._reportonly: | |
|
3296 | continue | |
|
3297 | if s._clearable and s.isunfinished(repo): | |
|
3298 | util.unlink(repo.vfs.join(s._fname)) | |
|
3299 | ||
|
3300 | def getunfinishedstate(repo): | |
|
3301 | ''' Checks for unfinished operations and returns statecheck object | |
|
3302 | for it''' | |
|
3303 | for state in statemod._unfinishedstates: | |
|
3304 | if state.isunfinished(repo): | |
|
3305 | return state | |
|
3306 | return None | |
|
3337 | 3307 | |
|
3338 | 3308 | def howtocontinue(repo): |
|
3339 | 3309 | '''Check for an unfinished operation and return the command to finish |
|
3340 | 3310 | it. |
|
3341 | 3311 | |
|
3342 | afterresolvedstates tuples define a .hg/{file} and the corresponding | |
|
3343 | command needed to finish it. | |
|
3312 | statemod._unfinishedstates list is checked for an unfinished operation | |
|
3313 | and the corresponding message to finish it is generated if a method to | |
|
3314 | continue is supported by the operation. | |
|
3344 | 3315 | |
|
3345 | 3316 | Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is |
|
3346 | 3317 | a boolean. |
|
3347 | 3318 | ''' |
|
3348 | 3319 | contmsg = _("continue: %s") |
|
3349 |
for |
|
|
3350 | if repo.vfs.exists(f): | |
|
3351 |
|
|
|
3320 | for state in statemod._unfinishedstates: | |
|
3321 | if not state._continueflag: | |
|
3322 | continue | |
|
3323 | if state.isunfinished(repo): | |
|
3324 | return contmsg % state.continuemsg(), True | |
|
3352 | 3325 | if repo[None].dirty(missing=True, merge=False, branch=False): |
|
3353 | 3326 | return contmsg % _("hg commit"), False |
|
3354 | 3327 | return None, None |
@@ -3356,8 +3329,8 b' def howtocontinue(repo):' | |||
|
3356 | 3329 | def checkafterresolved(repo): |
|
3357 | 3330 | '''Inform the user about the next action after completing hg resolve |
|
3358 | 3331 | |
|
3359 | If there's a matching afterresolvedstates, howtocontinue will yield | |
|
3360 | repo.ui.warn as the reporter. | |
|
3332 | If there's a an unfinished operation that supports continue flag, | |
|
3333 | howtocontinue will yield repo.ui.warn as the reporter. | |
|
3361 | 3334 | |
|
3362 | 3335 | Otherwise, it will yield repo.ui.note. |
|
3363 | 3336 | ''' |
@@ -3382,3 +3355,73 b' def wrongtooltocontinue(repo, task):' | |||
|
3382 | 3355 | if after[1]: |
|
3383 | 3356 | hint = after[0] |
|
3384 | 3357 | raise error.Abort(_('no %s in progress') % task, hint=hint) |
|
3358 | ||
|
3359 | def abortgraft(ui, repo, graftstate): | |
|
3360 | """abort the interrupted graft and rollbacks to the state before interrupted | |
|
3361 | graft""" | |
|
3362 | if not graftstate.exists(): | |
|
3363 | raise error.Abort(_("no interrupted graft to abort")) | |
|
3364 | statedata = readgraftstate(repo, graftstate) | |
|
3365 | newnodes = statedata.get('newnodes') | |
|
3366 | if newnodes is None: | |
|
3367 | # and old graft state which does not have all the data required to abort | |
|
3368 | # the graft | |
|
3369 | raise error.Abort(_("cannot abort using an old graftstate")) | |
|
3370 | ||
|
3371 | # changeset from which graft operation was started | |
|
3372 | if len(newnodes) > 0: | |
|
3373 | startctx = repo[newnodes[0]].p1() | |
|
3374 | else: | |
|
3375 | startctx = repo['.'] | |
|
3376 | # whether to strip or not | |
|
3377 | cleanup = False | |
|
3378 | from . import hg | |
|
3379 | if newnodes: | |
|
3380 | newnodes = [repo[r].rev() for r in newnodes] | |
|
3381 | cleanup = True | |
|
3382 | # checking that none of the newnodes turned public or is public | |
|
3383 | immutable = [c for c in newnodes if not repo[c].mutable()] | |
|
3384 | if immutable: | |
|
3385 | repo.ui.warn(_("cannot clean up public changesets %s\n") | |
|
3386 | % ', '.join(bytes(repo[r]) for r in immutable), | |
|
3387 | hint=_("see 'hg help phases' for details")) | |
|
3388 | cleanup = False | |
|
3389 | ||
|
3390 | # checking that no new nodes are created on top of grafted revs | |
|
3391 | desc = set(repo.changelog.descendants(newnodes)) | |
|
3392 | if desc - set(newnodes): | |
|
3393 | repo.ui.warn(_("new changesets detected on destination " | |
|
3394 | "branch, can't strip\n")) | |
|
3395 | cleanup = False | |
|
3396 | ||
|
3397 | if cleanup: | |
|
3398 | with repo.wlock(), repo.lock(): | |
|
3399 | hg.updaterepo(repo, startctx.node(), overwrite=True) | |
|
3400 | # stripping the new nodes created | |
|
3401 | strippoints = [c.node() for c in repo.set("roots(%ld)", | |
|
3402 | newnodes)] | |
|
3403 | repair.strip(repo.ui, repo, strippoints, backup=False) | |
|
3404 | ||
|
3405 | if not cleanup: | |
|
3406 | # we don't update to the startnode if we can't strip | |
|
3407 | startctx = repo['.'] | |
|
3408 | hg.updaterepo(repo, startctx.node(), overwrite=True) | |
|
3409 | ||
|
3410 | ui.status(_("graft aborted\n")) | |
|
3411 | ui.status(_("working directory is now at %s\n") % startctx.hex()[:12]) | |
|
3412 | graftstate.delete() | |
|
3413 | return 0 | |
|
3414 | ||
|
3415 | def readgraftstate(repo, graftstate): | |
|
3416 | """read the graft state file and return a dict of the data stored in it""" | |
|
3417 | try: | |
|
3418 | return graftstate.read() | |
|
3419 | except error.CorruptedState: | |
|
3420 | nodes = repo.vfs.read('graftstate').splitlines() | |
|
3421 | return {'nodes': nodes} | |
|
3422 | ||
|
3423 | def hgabortgraft(ui, repo): | |
|
3424 | """ abort logic for aborting graft using 'hg abort'""" | |
|
3425 | with repo.wlock(): | |
|
3426 | graftstate = statemod.cmdstate(repo, 'graftstate') | |
|
3427 | return abortgraft(ui, repo, graftstate) |
@@ -53,16 +53,17 b' from . import (' | |||
|
53 | 53 | pycompat, |
|
54 | 54 | rcutil, |
|
55 | 55 | registrar, |
|
56 | repair, | |
|
57 | 56 | revsetlang, |
|
58 | 57 | rewriteutil, |
|
59 | 58 | scmutil, |
|
60 | 59 | server, |
|
60 | shelve as shelvemod, | |
|
61 | 61 | state as statemod, |
|
62 | 62 | streamclone, |
|
63 | 63 | tags as tagsmod, |
|
64 | 64 | ui as uimod, |
|
65 | 65 | util, |
|
66 | verify as verifymod, | |
|
66 | 67 | wireprotoserver, |
|
67 | 68 | ) |
|
68 | 69 | from .utils import ( |
@@ -130,6 +131,29 b' debugrevlogopts = cmdutil.debugrevlogopt' | |||
|
130 | 131 | |
|
131 | 132 | # Commands start here, listed alphabetically |
|
132 | 133 | |
|
134 | @command('abort', | |
|
135 | dryrunopts, helpcategory=command.CATEGORY_CHANGE_MANAGEMENT, | |
|
136 | helpbasic=True) | |
|
137 | def abort(ui, repo, **opts): | |
|
138 | """abort an unfinished operation (EXPERIMENTAL) | |
|
139 | ||
|
140 | Aborts a multistep operation like graft, histedit, rebase, merge, | |
|
141 | and unshelve if they are in an unfinished state. | |
|
142 | ||
|
143 | use --dry-run/-n to dry run the command. | |
|
144 | """ | |
|
145 | dryrun = opts.get(r'dry_run') | |
|
146 | abortstate = cmdutil.getunfinishedstate(repo) | |
|
147 | if not abortstate: | |
|
148 | raise error.Abort(_('no operation in progress')) | |
|
149 | if not abortstate.abortfunc: | |
|
150 | raise error.Abort((_("%s in progress but does not support 'hg abort'") % | |
|
151 | (abortstate._opname)), hint=abortstate.hint()) | |
|
152 | if dryrun: | |
|
153 | ui.status(_('%s in progress, will be aborted\n') % (abortstate._opname)) | |
|
154 | return | |
|
155 | return abortstate.abortfunc(ui, repo) | |
|
156 | ||
|
133 | 157 | @command('add', |
|
134 | 158 | walkopts + subrepoopts + dryrunopts, |
|
135 | 159 | _('[OPTION]... [FILE]...'), |
@@ -1582,6 +1606,8 b' def clone(ui, source, dest=None, **opts)' | |||
|
1582 | 1606 | ('', 'amend', None, _('amend the parent of the working directory')), |
|
1583 | 1607 | ('s', 'secret', None, _('use the secret phase for committing')), |
|
1584 | 1608 | ('e', 'edit', None, _('invoke editor on commit messages')), |
|
1609 | ('', 'force-close-branch', None, | |
|
1610 | _('forcibly close branch from a non-head changeset (ADVANCED)')), | |
|
1585 | 1611 | ('i', 'interactive', None, _('use interactive mode')), |
|
1586 | 1612 | ] + walkopts + commitopts + commitopts2 + subrepoopts, |
|
1587 | 1613 | _('[OPTION]... [FILE]...'), |
@@ -1669,11 +1695,19 b' def _docommit(ui, repo, *pats, **opts):' | |||
|
1669 | 1695 | bheads = repo.branchheads(branch) |
|
1670 | 1696 | |
|
1671 | 1697 | extra = {} |
|
1672 | if opts.get('close_branch'): | |
|
1698 | if opts.get('close_branch') or opts.get('force_close_branch'): | |
|
1673 | 1699 | extra['close'] = '1' |
|
1674 | 1700 | |
|
1675 | if not bheads: | |
|
1676 |
raise error.Abort(_('c |
|
|
1701 | if repo['.'].closesbranch(): | |
|
1702 | raise error.Abort(_('current revision is already a branch closing' | |
|
1703 | ' head')) | |
|
1704 | elif not bheads: | |
|
1705 | raise error.Abort(_('branch "%s" has no heads to close') % branch) | |
|
1706 | elif (branch == repo['.'].branch() and repo['.'].node() not in bheads | |
|
1707 | and not opts.get('force_close_branch')): | |
|
1708 | hint = _('use --force-close-branch to close branch from a non-head' | |
|
1709 | ' changeset') | |
|
1710 | raise error.Abort(_('can only close branch heads'), hint=hint) | |
|
1677 | 1711 | elif opts.get('amend'): |
|
1678 | 1712 | if (repo['.'].p1().branch() != branch and |
|
1679 | 1713 | repo['.'].p2().branch() != branch): |
@@ -1732,6 +1766,10 b' def _docommit(ui, repo, *pats, **opts):' | |||
|
1732 | 1766 | |
|
1733 | 1767 | cmdutil.commitstatus(repo, node, branch, bheads, opts) |
|
1734 | 1768 | |
|
1769 | if not ui.quiet and ui.configbool('commands', 'commit.post-status'): | |
|
1770 | status(ui, repo, modified=True, added=True, removed=True, deleted=True, | |
|
1771 | unknown=True, subrepos=opts.get('subrepos')) | |
|
1772 | ||
|
1735 | 1773 | @command('config|showconfig|debugconfig', |
|
1736 | 1774 | [('u', 'untrusted', None, _('show untrusted configuration options')), |
|
1737 | 1775 | ('e', 'edit', None, _('edit user config')), |
@@ -1853,6 +1891,30 b' def config(ui, repo, *values, **opts):' | |||
|
1853 | 1891 | return 0 |
|
1854 | 1892 | return 1 |
|
1855 | 1893 | |
|
1894 | @command('continue', | |
|
1895 | dryrunopts, helpcategory=command.CATEGORY_CHANGE_MANAGEMENT, | |
|
1896 | helpbasic=True) | |
|
1897 | def continuecmd(ui, repo, **opts): | |
|
1898 | """resumes an interrupted operation (EXPERIMENTAL) | |
|
1899 | ||
|
1900 | Finishes a multistep operation like graft, histedit, rebase, merge, | |
|
1901 | and unshelve if they are in an interrupted state. | |
|
1902 | ||
|
1903 | use --dry-run/-n to dry run the command. | |
|
1904 | """ | |
|
1905 | dryrun = opts.get(r'dry_run') | |
|
1906 | contstate = cmdutil.getunfinishedstate(repo) | |
|
1907 | if not contstate: | |
|
1908 | raise error.Abort(_('no operation in progress')) | |
|
1909 | if not contstate.continuefunc: | |
|
1910 | raise error.Abort((_("%s in progress but does not support " | |
|
1911 | "'hg continue'") % (contstate._opname)), | |
|
1912 | hint=contstate.continuemsg()) | |
|
1913 | if dryrun: | |
|
1914 | ui.status(_('%s in progress, will be resumed\n') % (contstate._opname)) | |
|
1915 | return | |
|
1916 | return contstate.continuefunc(ui, repo) | |
|
1917 | ||
|
1856 | 1918 | @command('copy|cp', |
|
1857 | 1919 | [('A', 'after', None, _('record a copy that has already occurred')), |
|
1858 | 1920 | ('f', 'force', None, _('forcibly copy over an existing managed file')), |
@@ -2449,14 +2511,14 b' def _dograft(ui, repo, *revs, **opts):' | |||
|
2449 | 2511 | opts.get('currentuser'), opts.get('rev'))): |
|
2450 | 2512 | raise error.Abort(_("cannot specify any other flag with '--abort'")) |
|
2451 | 2513 | |
|
2452 |
return |
|
|
2514 | return cmdutil.abortgraft(ui, repo, graftstate) | |
|
2453 | 2515 | elif opts.get('continue'): |
|
2454 | 2516 | cont = True |
|
2455 | 2517 | if revs: |
|
2456 | 2518 | raise error.Abort(_("can't specify --continue and revisions")) |
|
2457 | 2519 | # read in unfinished revisions |
|
2458 | 2520 | if graftstate.exists(): |
|
2459 |
statedata = |
|
|
2521 | statedata = cmdutil.readgraftstate(repo, graftstate) | |
|
2460 | 2522 | if statedata.get('date'): |
|
2461 | 2523 | opts['date'] = statedata['date'] |
|
2462 | 2524 | if statedata.get('user'): |
@@ -2626,69 +2688,6 b' def _dograft(ui, repo, *revs, **opts):' | |||
|
2626 | 2688 | |
|
2627 | 2689 | return 0 |
|
2628 | 2690 | |
|
2629 | def _abortgraft(ui, repo, graftstate): | |
|
2630 | """abort the interrupted graft and rollbacks to the state before interrupted | |
|
2631 | graft""" | |
|
2632 | if not graftstate.exists(): | |
|
2633 | raise error.Abort(_("no interrupted graft to abort")) | |
|
2634 | statedata = _readgraftstate(repo, graftstate) | |
|
2635 | newnodes = statedata.get('newnodes') | |
|
2636 | if newnodes is None: | |
|
2637 | # and old graft state which does not have all the data required to abort | |
|
2638 | # the graft | |
|
2639 | raise error.Abort(_("cannot abort using an old graftstate")) | |
|
2640 | ||
|
2641 | # changeset from which graft operation was started | |
|
2642 | if len(newnodes) > 0: | |
|
2643 | startctx = repo[newnodes[0]].p1() | |
|
2644 | else: | |
|
2645 | startctx = repo['.'] | |
|
2646 | # whether to strip or not | |
|
2647 | cleanup = False | |
|
2648 | if newnodes: | |
|
2649 | newnodes = [repo[r].rev() for r in newnodes] | |
|
2650 | cleanup = True | |
|
2651 | # checking that none of the newnodes turned public or is public | |
|
2652 | immutable = [c for c in newnodes if not repo[c].mutable()] | |
|
2653 | if immutable: | |
|
2654 | repo.ui.warn(_("cannot clean up public changesets %s\n") | |
|
2655 | % ', '.join(bytes(repo[r]) for r in immutable), | |
|
2656 | hint=_("see 'hg help phases' for details")) | |
|
2657 | cleanup = False | |
|
2658 | ||
|
2659 | # checking that no new nodes are created on top of grafted revs | |
|
2660 | desc = set(repo.changelog.descendants(newnodes)) | |
|
2661 | if desc - set(newnodes): | |
|
2662 | repo.ui.warn(_("new changesets detected on destination " | |
|
2663 | "branch, can't strip\n")) | |
|
2664 | cleanup = False | |
|
2665 | ||
|
2666 | if cleanup: | |
|
2667 | with repo.wlock(), repo.lock(): | |
|
2668 | hg.updaterepo(repo, startctx.node(), overwrite=True) | |
|
2669 | # stripping the new nodes created | |
|
2670 | strippoints = [c.node() for c in repo.set("roots(%ld)", | |
|
2671 | newnodes)] | |
|
2672 | repair.strip(repo.ui, repo, strippoints, backup=False) | |
|
2673 | ||
|
2674 | if not cleanup: | |
|
2675 | # we don't update to the startnode if we can't strip | |
|
2676 | startctx = repo['.'] | |
|
2677 | hg.updaterepo(repo, startctx.node(), overwrite=True) | |
|
2678 | ||
|
2679 | ui.status(_("graft aborted\n")) | |
|
2680 | ui.status(_("working directory is now at %s\n") % startctx.hex()[:12]) | |
|
2681 | graftstate.delete() | |
|
2682 | return 0 | |
|
2683 | ||
|
2684 | def _readgraftstate(repo, graftstate): | |
|
2685 | """read the graft state file and return a dict of the data stored in it""" | |
|
2686 | try: | |
|
2687 | return graftstate.read() | |
|
2688 | except error.CorruptedState: | |
|
2689 | nodes = repo.vfs.read('graftstate').splitlines() | |
|
2690 | return {'nodes': nodes} | |
|
2691 | ||
|
2692 | 2691 | def _stopgraft(ui, repo, graftstate): |
|
2693 | 2692 | """stop the interrupted graft""" |
|
2694 | 2693 | if not graftstate.exists(): |
@@ -2700,6 +2699,12 b' def _stopgraft(ui, repo, graftstate):' | |||
|
2700 | 2699 | ui.status(_("working directory is now at %s\n") % pctx.hex()[:12]) |
|
2701 | 2700 | return 0 |
|
2702 | 2701 | |
|
2702 | statemod.addunfinished( | |
|
2703 | 'graft', fname='graftstate', clearable=True, stopflag=True, | |
|
2704 | continueflag=True, abortfunc=cmdutil.hgabortgraft, | |
|
2705 | cmdhint=_("use 'hg graft --continue' or 'hg graft --stop' to stop") | |
|
2706 | ) | |
|
2707 | ||
|
2703 | 2708 | @command('grep', |
|
2704 | 2709 | [('0', 'print0', None, _('end fields with NUL')), |
|
2705 | 2710 | ('', 'all', None, _('print all revisions that match (DEPRECATED) ')), |
@@ -3715,7 +3720,8 b' def locate(ui, repo, *pats, **opts):' | |||
|
3715 | 3720 | _('follow line range of specified file (EXPERIMENTAL)'), |
|
3716 | 3721 | _('FILE,RANGE')), |
|
3717 | 3722 | ('', 'removed', None, _('include revisions where files were removed')), |
|
3718 |
('m', 'only-merges', None, |
|
|
3723 | ('m', 'only-merges', None, | |
|
3724 | _('show only merges (DEPRECATED) (use -r "merge()" instead)')), | |
|
3719 | 3725 | ('u', 'user', [], _('revisions committed by user'), _('USER')), |
|
3720 | 3726 | ('', 'only-branch', [], |
|
3721 | 3727 | _('show only changesets within the given named branch (DEPRECATED)'), |
@@ -3876,12 +3882,12 b' def log(ui, repo, *pats, **opts):' | |||
|
3876 | 3882 | # then filter the result by logcmdutil._makerevset() and --limit |
|
3877 | 3883 | revs, differ = logcmdutil.getlinerangerevs(repo, revs, opts) |
|
3878 | 3884 | |
|
3879 |
get |
|
|
3885 | getcopies = None | |
|
3880 | 3886 | if opts.get('copies'): |
|
3881 | 3887 | endrev = None |
|
3882 | 3888 | if revs: |
|
3883 | 3889 | endrev = revs.max() + 1 |
|
3884 |
get |
|
|
3890 | getcopies = scmutil.getcopiesfn(repo, endrev=endrev) | |
|
3885 | 3891 | |
|
3886 | 3892 | ui.pager('log') |
|
3887 | 3893 | displayer = logcmdutil.changesetdisplayer(ui, repo, opts, differ, |
@@ -3890,7 +3896,7 b' def log(ui, repo, *pats, **opts):' | |||
|
3890 | 3896 | displayfn = logcmdutil.displaygraphrevs |
|
3891 | 3897 | else: |
|
3892 | 3898 | displayfn = logcmdutil.displayrevs |
|
3893 |
displayfn(ui, repo, revs, displayer, get |
|
|
3899 | displayfn(ui, repo, revs, displayer, getcopies) | |
|
3894 | 3900 | |
|
3895 | 3901 | @command('manifest', |
|
3896 | 3902 | [('r', 'rev', '', _('revision to display'), _('REV')), |
@@ -3983,7 +3989,7 b' def merge(ui, repo, node=None, **opts):' | |||
|
3983 | 3989 | If no revision is specified, the working directory's parent is a |
|
3984 | 3990 | head revision, and the current branch contains exactly one other |
|
3985 | 3991 | head, the other head is merged with by default. Otherwise, an |
|
3986 |
explicit revision with which to merge |
|
|
3992 | explicit revision with which to merge must be provided. | |
|
3987 | 3993 | |
|
3988 | 3994 | See :hg:`help resolve` for information on handling file conflicts. |
|
3989 | 3995 | |
@@ -3999,6 +4005,10 b' def merge(ui, repo, node=None, **opts):' | |||
|
3999 | 4005 | if abort and repo.dirstate.p2() == nullid: |
|
4000 | 4006 | cmdutil.wrongtooltocontinue(repo, _('merge')) |
|
4001 | 4007 | if abort: |
|
4008 | state = cmdutil.getunfinishedstate(repo) | |
|
4009 | if state and state._opname != 'merge': | |
|
4010 | raise error.Abort(_('cannot abort merge with %s in progress') % | |
|
4011 | (state._opname), hint=state.hint()) | |
|
4002 | 4012 | if node: |
|
4003 | 4013 | raise error.Abort(_("cannot specify a node with --abort")) |
|
4004 | 4014 | if opts.get('rev'): |
@@ -4036,6 +4046,14 b' def merge(ui, repo, node=None, **opts):' | |||
|
4036 | 4046 | return hg.merge(repo, node, force=force, mergeforce=force, |
|
4037 | 4047 | labels=labels, abort=abort) |
|
4038 | 4048 | |
|
4049 | statemod.addunfinished( | |
|
4050 | 'merge', fname=None, clearable=True, allowcommit=True, | |
|
4051 | cmdmsg=_('outstanding uncommitted merge'), abortfunc=hg.abortmerge, | |
|
4052 | statushint=_('To continue: hg commit\n' | |
|
4053 | 'To abort: hg merge --abort'), | |
|
4054 | cmdhint=_("use 'hg commit' or 'hg merge --abort'") | |
|
4055 | ) | |
|
4056 | ||
|
4039 | 4057 | @command('outgoing|out', |
|
4040 | 4058 | [('f', 'force', None, _('run even when the destination is unrelated')), |
|
4041 | 4059 | ('r', 'rev', [], |
@@ -4672,7 +4690,7 b' def recover(ui, repo, **opts):' | |||
|
4672 | 4690 | """ |
|
4673 | 4691 | ret = repo.recover() |
|
4674 | 4692 | if ret: |
|
4675 | if opts['verify']: | |
|
4693 | if opts[r'verify']: | |
|
4676 | 4694 | return hg.verify(repo) |
|
4677 | 4695 | else: |
|
4678 | 4696 | msg = _("(verify step skipped, run `hg verify` to check your " |
@@ -5217,16 +5235,30 b' def rollback(ui, repo, **opts):' | |||
|
5217 | 5235 | force=opts.get(r'force')) |
|
5218 | 5236 | |
|
5219 | 5237 | @command( |
|
5220 | 'root', [], intents={INTENT_READONLY}, | |
|
5238 | 'root', [] + formatteropts, intents={INTENT_READONLY}, | |
|
5221 | 5239 | helpcategory=command.CATEGORY_WORKING_DIRECTORY) |
|
5222 | def root(ui, repo): | |
|
5240 | def root(ui, repo, **opts): | |
|
5223 | 5241 | """print the root (top) of the current working directory |
|
5224 | 5242 | |
|
5225 | 5243 | Print the root directory of the current repository. |
|
5226 | 5244 | |
|
5245 | .. container:: verbose | |
|
5246 | ||
|
5247 | Template: | |
|
5248 | ||
|
5249 | The following keywords are supported in addition to the common template | |
|
5250 | keywords and functions. See also :hg:`help templates`. | |
|
5251 | ||
|
5252 | :hgpath: String. Path to the .hg directory. | |
|
5253 | :storepath: String. Path to the directory holding versioned data. | |
|
5254 | ||
|
5227 | 5255 | Returns 0 on success. |
|
5228 | 5256 | """ |
|
5229 | ui.write(repo.root + "\n") | |
|
5257 | opts = pycompat.byteskwargs(opts) | |
|
5258 | with ui.formatter('root', opts) as fm: | |
|
5259 | fm.startitem() | |
|
5260 | fm.write('reporoot', '%s\n', repo.root) | |
|
5261 | fm.data(hgpath=repo.path, storepath=repo.spath) | |
|
5230 | 5262 | |
|
5231 | 5263 | @command('serve', |
|
5232 | 5264 | [('A', 'accesslog', '', _('name of access log file to write to'), |
@@ -5299,6 +5331,106 b' def serve(ui, repo, **opts):' | |||
|
5299 | 5331 | service = server.createservice(ui, repo, opts) |
|
5300 | 5332 | return server.runservice(opts, initfn=service.init, runfn=service.run) |
|
5301 | 5333 | |
|
5334 | @command('shelve', | |
|
5335 | [('A', 'addremove', None, | |
|
5336 | _('mark new/missing files as added/removed before shelving')), | |
|
5337 | ('u', 'unknown', None, | |
|
5338 | _('store unknown files in the shelve')), | |
|
5339 | ('', 'cleanup', None, | |
|
5340 | _('delete all shelved changes')), | |
|
5341 | ('', 'date', '', | |
|
5342 | _('shelve with the specified commit date'), _('DATE')), | |
|
5343 | ('d', 'delete', None, | |
|
5344 | _('delete the named shelved change(s)')), | |
|
5345 | ('e', 'edit', False, | |
|
5346 | _('invoke editor on commit messages')), | |
|
5347 | ('k', 'keep', False, | |
|
5348 | _('shelve, but keep changes in the working directory')), | |
|
5349 | ('l', 'list', None, | |
|
5350 | _('list current shelves')), | |
|
5351 | ('m', 'message', '', | |
|
5352 | _('use text as shelve message'), _('TEXT')), | |
|
5353 | ('n', 'name', '', | |
|
5354 | _('use the given name for the shelved commit'), _('NAME')), | |
|
5355 | ('p', 'patch', None, | |
|
5356 | _('output patches for changes (provide the names of the shelved ' | |
|
5357 | 'changes as positional arguments)')), | |
|
5358 | ('i', 'interactive', None, | |
|
5359 | _('interactive mode')), | |
|
5360 | ('', 'stat', None, | |
|
5361 | _('output diffstat-style summary of changes (provide the names of ' | |
|
5362 | 'the shelved changes as positional arguments)') | |
|
5363 | )] + cmdutil.walkopts, | |
|
5364 | _('hg shelve [OPTION]... [FILE]...'), | |
|
5365 | helpcategory=command.CATEGORY_WORKING_DIRECTORY) | |
|
5366 | def shelve(ui, repo, *pats, **opts): | |
|
5367 | '''save and set aside changes from the working directory | |
|
5368 | ||
|
5369 | Shelving takes files that "hg status" reports as not clean, saves | |
|
5370 | the modifications to a bundle (a shelved change), and reverts the | |
|
5371 | files so that their state in the working directory becomes clean. | |
|
5372 | ||
|
5373 | To restore these changes to the working directory, using "hg | |
|
5374 | unshelve"; this will work even if you switch to a different | |
|
5375 | commit. | |
|
5376 | ||
|
5377 | When no files are specified, "hg shelve" saves all not-clean | |
|
5378 | files. If specific files or directories are named, only changes to | |
|
5379 | those files are shelved. | |
|
5380 | ||
|
5381 | In bare shelve (when no files are specified, without interactive, | |
|
5382 | include and exclude option), shelving remembers information if the | |
|
5383 | working directory was on newly created branch, in other words working | |
|
5384 | directory was on different branch than its first parent. In this | |
|
5385 | situation unshelving restores branch information to the working directory. | |
|
5386 | ||
|
5387 | Each shelved change has a name that makes it easier to find later. | |
|
5388 | The name of a shelved change defaults to being based on the active | |
|
5389 | bookmark, or if there is no active bookmark, the current named | |
|
5390 | branch. To specify a different name, use ``--name``. | |
|
5391 | ||
|
5392 | To see a list of existing shelved changes, use the ``--list`` | |
|
5393 | option. For each shelved change, this will print its name, age, | |
|
5394 | and description; use ``--patch`` or ``--stat`` for more details. | |
|
5395 | ||
|
5396 | To delete specific shelved changes, use ``--delete``. To delete | |
|
5397 | all shelved changes, use ``--cleanup``. | |
|
5398 | ''' | |
|
5399 | opts = pycompat.byteskwargs(opts) | |
|
5400 | allowables = [ | |
|
5401 | ('addremove', {'create'}), # 'create' is pseudo action | |
|
5402 | ('unknown', {'create'}), | |
|
5403 | ('cleanup', {'cleanup'}), | |
|
5404 | # ('date', {'create'}), # ignored for passing '--date "0 0"' in tests | |
|
5405 | ('delete', {'delete'}), | |
|
5406 | ('edit', {'create'}), | |
|
5407 | ('keep', {'create'}), | |
|
5408 | ('list', {'list'}), | |
|
5409 | ('message', {'create'}), | |
|
5410 | ('name', {'create'}), | |
|
5411 | ('patch', {'patch', 'list'}), | |
|
5412 | ('stat', {'stat', 'list'}), | |
|
5413 | ] | |
|
5414 | def checkopt(opt): | |
|
5415 | if opts.get(opt): | |
|
5416 | for i, allowable in allowables: | |
|
5417 | if opts[i] and opt not in allowable: | |
|
5418 | raise error.Abort(_("options '--%s' and '--%s' may not be " | |
|
5419 | "used together") % (opt, i)) | |
|
5420 | return True | |
|
5421 | if checkopt('cleanup'): | |
|
5422 | if pats: | |
|
5423 | raise error.Abort(_("cannot specify names when using '--cleanup'")) | |
|
5424 | return shelvemod.cleanupcmd(ui, repo) | |
|
5425 | elif checkopt('delete'): | |
|
5426 | return shelvemod.deletecmd(ui, repo, pats) | |
|
5427 | elif checkopt('list'): | |
|
5428 | return shelvemod.listcmd(ui, repo, pats, opts) | |
|
5429 | elif checkopt('patch') or checkopt('stat'): | |
|
5430 | return shelvemod.patchcmds(ui, repo, pats, opts) | |
|
5431 | else: | |
|
5432 | return shelvemod.createcmd(ui, repo, pats, opts) | |
|
5433 | ||
|
5302 | 5434 | _NOTTERSE = 'nothing' |
|
5303 | 5435 | |
|
5304 | 5436 | @command('status|st', |
@@ -6027,6 +6159,68 b' def unbundle(ui, repo, fname1, *fnames, ' | |||
|
6027 | 6159 | |
|
6028 | 6160 | return postincoming(ui, repo, modheads, opts.get(r'update'), None, None) |
|
6029 | 6161 | |
|
6162 | @command('unshelve', | |
|
6163 | [('a', 'abort', None, | |
|
6164 | _('abort an incomplete unshelve operation')), | |
|
6165 | ('c', 'continue', None, | |
|
6166 | _('continue an incomplete unshelve operation')), | |
|
6167 | ('i', 'interactive', None, | |
|
6168 | _('use interactive mode (EXPERIMENTAL)')), | |
|
6169 | ('k', 'keep', None, | |
|
6170 | _('keep shelve after unshelving')), | |
|
6171 | ('n', 'name', '', | |
|
6172 | _('restore shelved change with given name'), _('NAME')), | |
|
6173 | ('t', 'tool', '', _('specify merge tool')), | |
|
6174 | ('', 'date', '', | |
|
6175 | _('set date for temporary commits (DEPRECATED)'), _('DATE'))], | |
|
6176 | _('hg unshelve [OPTION]... [FILE]... [-n SHELVED]'), | |
|
6177 | helpcategory=command.CATEGORY_WORKING_DIRECTORY) | |
|
6178 | def unshelve(ui, repo, *shelved, **opts): | |
|
6179 | """restore a shelved change to the working directory | |
|
6180 | ||
|
6181 | This command accepts an optional name of a shelved change to | |
|
6182 | restore. If none is given, the most recent shelved change is used. | |
|
6183 | ||
|
6184 | If a shelved change is applied successfully, the bundle that | |
|
6185 | contains the shelved changes is moved to a backup location | |
|
6186 | (.hg/shelve-backup). | |
|
6187 | ||
|
6188 | Since you can restore a shelved change on top of an arbitrary | |
|
6189 | commit, it is possible that unshelving will result in a conflict | |
|
6190 | between your changes and the commits you are unshelving onto. If | |
|
6191 | this occurs, you must resolve the conflict, then use | |
|
6192 | ``--continue`` to complete the unshelve operation. (The bundle | |
|
6193 | will not be moved until you successfully complete the unshelve.) | |
|
6194 | ||
|
6195 | (Alternatively, you can use ``--abort`` to abandon an unshelve | |
|
6196 | that causes a conflict. This reverts the unshelved changes, and | |
|
6197 | leaves the bundle in place.) | |
|
6198 | ||
|
6199 | If bare shelved change (when no files are specified, without interactive, | |
|
6200 | include and exclude option) was done on newly created branch it would | |
|
6201 | restore branch information to the working directory. | |
|
6202 | ||
|
6203 | After a successful unshelve, the shelved changes are stored in a | |
|
6204 | backup directory. Only the N most recent backups are kept. N | |
|
6205 | defaults to 10 but can be overridden using the ``shelve.maxbackups`` | |
|
6206 | configuration option. | |
|
6207 | ||
|
6208 | .. container:: verbose | |
|
6209 | ||
|
6210 | Timestamp in seconds is used to decide order of backups. More | |
|
6211 | than ``maxbackups`` backups are kept, if same timestamp | |
|
6212 | prevents from deciding exact order of them, for safety. | |
|
6213 | """ | |
|
6214 | with repo.wlock(): | |
|
6215 | return shelvemod.dounshelve(ui, repo, *shelved, **opts) | |
|
6216 | ||
|
6217 | statemod.addunfinished( | |
|
6218 | 'unshelve', fname='shelvedstate', continueflag=True, | |
|
6219 | abortfunc=shelvemod.hgabortunshelve, | |
|
6220 | continuefunc=shelvemod.hgcontinueunshelve, | |
|
6221 | cmdmsg=_('unshelve already in progress'), | |
|
6222 | ) | |
|
6223 | ||
|
6030 | 6224 | @command('update|up|checkout|co', |
|
6031 | 6225 | [('C', 'clean', None, _('discard uncommitted changes (no backup)')), |
|
6032 | 6226 | ('c', 'check', None, _('require clean working directory')), |
@@ -6123,7 +6317,6 b' def update(ui, repo, node=None, **opts):' | |||
|
6123 | 6317 | |
|
6124 | 6318 | with repo.wlock(): |
|
6125 | 6319 | cmdutil.clearunfinished(repo) |
|
6126 | ||
|
6127 | 6320 | if date: |
|
6128 | 6321 | rev = cmdutil.finddate(ui, repo, date) |
|
6129 | 6322 | |
@@ -6147,8 +6340,10 b' def update(ui, repo, node=None, **opts):' | |||
|
6147 | 6340 | ui.warn("(%s)\n" % obsfatemsg) |
|
6148 | 6341 | return ret |
|
6149 | 6342 | |
|
6150 | @command('verify', [], helpcategory=command.CATEGORY_MAINTENANCE) | |
|
6151 | def verify(ui, repo): | |
|
6343 | @command('verify', | |
|
6344 | [('', 'full', False, 'perform more checks (EXPERIMENTAL)')], | |
|
6345 | helpcategory=command.CATEGORY_MAINTENANCE) | |
|
6346 | def verify(ui, repo, **opts): | |
|
6152 | 6347 | """verify the integrity of the repository |
|
6153 | 6348 | |
|
6154 | 6349 | Verify the integrity of the current repository. |
@@ -6164,7 +6359,12 b' def verify(ui, repo):' | |||
|
6164 | 6359 | |
|
6165 | 6360 | Returns 0 on success, 1 if errors are encountered. |
|
6166 | 6361 | """ |
|
6167 | return hg.verify(repo) | |
|
6362 | opts = pycompat.byteskwargs(opts) | |
|
6363 | ||
|
6364 | level = None | |
|
6365 | if opts['full']: | |
|
6366 | level = verifymod.VERIFY_FULL | |
|
6367 | return hg.verify(repo, level) | |
|
6168 | 6368 | |
|
6169 | 6369 | @command( |
|
6170 | 6370 | 'version', [] + formatteropts, helpcategory=command.CATEGORY_HELP, |
@@ -6233,16 +6433,6 b' def version_(ui, **opts):' | |||
|
6233 | 6433 | def loadcmdtable(ui, name, cmdtable): |
|
6234 | 6434 | """Load command functions from specified cmdtable |
|
6235 | 6435 | """ |
|
6236 | cmdtable = cmdtable.copy() | |
|
6237 | for cmd in list(cmdtable): | |
|
6238 | if not cmd.startswith('^'): | |
|
6239 | continue | |
|
6240 | ui.deprecwarn("old-style command registration '%s' in extension '%s'" | |
|
6241 | % (cmd, name), '4.8') | |
|
6242 | entry = cmdtable.pop(cmd) | |
|
6243 | entry[0].helpbasic = True | |
|
6244 | cmdtable[cmd[1:]] = entry | |
|
6245 | ||
|
6246 | 6436 | overrides = [cmd for cmd in cmdtable if cmd in table] |
|
6247 | 6437 | if overrides: |
|
6248 | 6438 | ui.warn(_("extension '%s' overrides commands: %s\n") |
@@ -202,6 +202,9 b" coreconfigitem('color', 'pagermode'," | |||
|
202 | 202 | default=dynamicdefault, |
|
203 | 203 | ) |
|
204 | 204 | _registerdiffopts(section='commands', configprefix='commit.interactive.') |
|
205 | coreconfigitem('commands', 'commit.post-status', | |
|
206 | default=False, | |
|
207 | ) | |
|
205 | 208 | coreconfigitem('commands', 'grep.all-files', |
|
206 | 209 | default=False, |
|
207 | 210 | ) |
@@ -288,6 +291,9 b" coreconfigitem('convert', 'hg.clonebranc" | |||
|
288 | 291 | coreconfigitem('convert', 'hg.ignoreerrors', |
|
289 | 292 | default=False, |
|
290 | 293 | ) |
|
294 | coreconfigitem('convert', 'hg.preserve-hash', | |
|
295 | default=False, | |
|
296 | ) | |
|
291 | 297 | coreconfigitem('convert', 'hg.revs', |
|
292 | 298 | default=None, |
|
293 | 299 | ) |
@@ -526,12 +532,22 b" coreconfigitem('experimental', 'evolutio" | |||
|
526 | 532 | coreconfigitem('experimental', 'evolution.bundle-obsmarker', |
|
527 | 533 | default=False, |
|
528 | 534 | ) |
|
535 | coreconfigitem('experimental', 'log.topo', | |
|
536 | default=False, | |
|
537 | ) | |
|
529 | 538 | coreconfigitem('experimental', 'evolution.report-instabilities', |
|
530 | 539 | default=True, |
|
531 | 540 | ) |
|
532 | 541 | coreconfigitem('experimental', 'evolution.track-operation', |
|
533 | 542 | default=True, |
|
534 | 543 | ) |
|
544 | # repo-level config to exclude a revset visibility | |
|
545 | # | |
|
546 | # The target use case is to use `share` to expose different subset of the same | |
|
547 | # repository, especially server side. See also `server.view`. | |
|
548 | coreconfigitem('experimental', 'extra-filter-revs', | |
|
549 | default=None, | |
|
550 | ) | |
|
535 | 551 | coreconfigitem('experimental', 'maxdeltachainspan', |
|
536 | 552 | default=-1, |
|
537 | 553 | ) |
@@ -663,6 +679,9 b" coreconfigitem('extdata', '.*'," | |||
|
663 | 679 | default=None, |
|
664 | 680 | generic=True, |
|
665 | 681 | ) |
|
682 | coreconfigitem('format', 'bookmarks-in-store', | |
|
683 | default=False, | |
|
684 | ) | |
|
666 | 685 | coreconfigitem('format', 'chunkcachesize', |
|
667 | 686 | default=None, |
|
668 | 687 | ) |
@@ -931,6 +950,9 b" coreconfigitem('profiling', 'showmax'," | |||
|
931 | 950 | coreconfigitem('profiling', 'showmin', |
|
932 | 951 | default=dynamicdefault, |
|
933 | 952 | ) |
|
953 | coreconfigitem('profiling', 'showtime', | |
|
954 | default=True, | |
|
955 | ) | |
|
934 | 956 | coreconfigitem('profiling', 'sort', |
|
935 | 957 | default='inlinetime', |
|
936 | 958 | ) |
@@ -1072,6 +1094,9 b" coreconfigitem('share', 'pool'," | |||
|
1072 | 1094 | coreconfigitem('share', 'poolnaming', |
|
1073 | 1095 | default='identity', |
|
1074 | 1096 | ) |
|
1097 | coreconfigitem('shelve','maxbackups', | |
|
1098 | default=10, | |
|
1099 | ) | |
|
1075 | 1100 | coreconfigitem('smtp', 'host', |
|
1076 | 1101 | default=None, |
|
1077 | 1102 | ) |
@@ -272,6 +272,30 b' class basectx(object):' | |||
|
272 | 272 | except error.LookupError: |
|
273 | 273 | return '' |
|
274 | 274 | |
|
275 | @propertycache | |
|
276 | def _copies(self): | |
|
277 | p1copies = {} | |
|
278 | p2copies = {} | |
|
279 | p1 = self.p1() | |
|
280 | p2 = self.p2() | |
|
281 | narrowmatch = self._repo.narrowmatch() | |
|
282 | for dst in self.files(): | |
|
283 | if not narrowmatch(dst) or dst not in self: | |
|
284 | continue | |
|
285 | copied = self[dst].renamed() | |
|
286 | if not copied: | |
|
287 | continue | |
|
288 | src, srcnode = copied | |
|
289 | if src in p1 and p1[src].filenode() == srcnode: | |
|
290 | p1copies[dst] = src | |
|
291 | elif src in p2 and p2[src].filenode() == srcnode: | |
|
292 | p2copies[dst] = src | |
|
293 | return p1copies, p2copies | |
|
294 | def p1copies(self): | |
|
295 | return self._copies[0] | |
|
296 | def p2copies(self): | |
|
297 | return self._copies[1] | |
|
298 | ||
|
275 | 299 | def sub(self, path, allowcreate=True): |
|
276 | 300 | '''return a subrepo for the stored revision of path, never wdir()''' |
|
277 | 301 | return subrepo.subrepo(self, path, allowcreate=allowcreate) |
@@ -439,6 +463,36 b' class changectx(basectx):' | |||
|
439 | 463 | return self._changeset.date |
|
440 | 464 | def files(self): |
|
441 | 465 | return self._changeset.files |
|
466 | def filesmodified(self): | |
|
467 | modified = set(self.files()) | |
|
468 | modified.difference_update(self.filesadded()) | |
|
469 | modified.difference_update(self.filesremoved()) | |
|
470 | return sorted(modified) | |
|
471 | def filesadded(self): | |
|
472 | source = self._repo.ui.config('experimental', 'copies.read-from') | |
|
473 | if (source == 'changeset-only' or | |
|
474 | (source == 'compatibility' and | |
|
475 | self._changeset.filesadded is not None)): | |
|
476 | return self._changeset.filesadded or [] | |
|
477 | ||
|
478 | added = [] | |
|
479 | for f in self.files(): | |
|
480 | if not any(f in p for p in self.parents()): | |
|
481 | added.append(f) | |
|
482 | return added | |
|
483 | def filesremoved(self): | |
|
484 | source = self._repo.ui.config('experimental', 'copies.read-from') | |
|
485 | if (source == 'changeset-only' or | |
|
486 | (source == 'compatibility' and | |
|
487 | self._changeset.filesremoved is not None)): | |
|
488 | return self._changeset.filesremoved or [] | |
|
489 | ||
|
490 | removed = [] | |
|
491 | for f in self.files(): | |
|
492 | if f not in self: | |
|
493 | removed.append(f) | |
|
494 | return removed | |
|
495 | ||
|
442 | 496 | @propertycache |
|
443 | 497 | def _copies(self): |
|
444 | 498 | source = self._repo.ui.config('experimental', 'copies.read-from') |
@@ -456,27 +510,7 b' class changectx(basectx):' | |||
|
456 | 510 | # Otherwise (config said to read only from filelog, or we are in |
|
457 | 511 | # compatiblity mode and there is not data in the changeset), we get |
|
458 | 512 | # the copy metadata from the filelogs. |
|
459 | p1copies = {} | |
|
460 | p2copies = {} | |
|
461 | p1 = self.p1() | |
|
462 | p2 = self.p2() | |
|
463 | narrowmatch = self._repo.narrowmatch() | |
|
464 | for dst in self.files(): | |
|
465 | if not narrowmatch(dst) or dst not in self: | |
|
466 | continue | |
|
467 | copied = self[dst].renamed() | |
|
468 | if not copied: | |
|
469 | continue | |
|
470 | src, srcnode = copied | |
|
471 | if src in p1 and p1[src].filenode() == srcnode: | |
|
472 | p1copies[dst] = src | |
|
473 | elif src in p2 and p2[src].filenode() == srcnode: | |
|
474 | p2copies[dst] = src | |
|
475 | return p1copies, p2copies | |
|
476 | def p1copies(self): | |
|
477 | return self._copies[0] | |
|
478 | def p2copies(self): | |
|
479 | return self._copies[1] | |
|
513 | return super(changectx, self)._copies | |
|
480 | 514 | def description(self): |
|
481 | 515 | return self._changeset.description |
|
482 | 516 | def branch(self): |
@@ -1098,7 +1132,7 b' class committablectx(basectx):' | |||
|
1098 | 1132 | """A committablectx object provides common functionality for a context that |
|
1099 | 1133 | wants the ability to commit, e.g. workingctx or memctx.""" |
|
1100 | 1134 | def __init__(self, repo, text="", user=None, date=None, extra=None, |
|
1101 | changes=None): | |
|
1135 | changes=None, branch=None): | |
|
1102 | 1136 | super(committablectx, self).__init__(repo) |
|
1103 | 1137 | self._rev = None |
|
1104 | 1138 | self._node = None |
@@ -1113,13 +1147,9 b' class committablectx(basectx):' | |||
|
1113 | 1147 | self._extra = {} |
|
1114 | 1148 | if extra: |
|
1115 | 1149 | self._extra = extra.copy() |
|
1116 |
if |
|
|
1117 | try: | |
|
1118 | branch = encoding.fromlocal(self._repo.dirstate.branch()) | |
|
1119 | except UnicodeDecodeError: | |
|
1120 | raise error.Abort(_('branch name not in UTF-8!')) | |
|
1121 | self._extra['branch'] = branch | |
|
1122 | if self._extra['branch'] == '': | |
|
1150 | if branch is not None: | |
|
1151 | self._extra['branch'] = encoding.fromlocal(branch) | |
|
1152 | if not self._extra.get('branch'): | |
|
1123 | 1153 | self._extra['branch'] = 'default' |
|
1124 | 1154 | |
|
1125 | 1155 | def __bytes__(self): |
@@ -1132,42 +1162,6 b' class committablectx(basectx):' | |||
|
1132 | 1162 | |
|
1133 | 1163 | __bool__ = __nonzero__ |
|
1134 | 1164 | |
|
1135 | def _buildflagfunc(self): | |
|
1136 | # Create a fallback function for getting file flags when the | |
|
1137 | # filesystem doesn't support them | |
|
1138 | ||
|
1139 | copiesget = self._repo.dirstate.copies().get | |
|
1140 | parents = self.parents() | |
|
1141 | if len(parents) < 2: | |
|
1142 | # when we have one parent, it's easy: copy from parent | |
|
1143 | man = parents[0].manifest() | |
|
1144 | def func(f): | |
|
1145 | f = copiesget(f, f) | |
|
1146 | return man.flags(f) | |
|
1147 | else: | |
|
1148 | # merges are tricky: we try to reconstruct the unstored | |
|
1149 | # result from the merge (issue1802) | |
|
1150 | p1, p2 = parents | |
|
1151 | pa = p1.ancestor(p2) | |
|
1152 | m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest() | |
|
1153 | ||
|
1154 | def func(f): | |
|
1155 | f = copiesget(f, f) # may be wrong for merges with copies | |
|
1156 | fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f) | |
|
1157 | if fl1 == fl2: | |
|
1158 | return fl1 | |
|
1159 | if fl1 == fla: | |
|
1160 | return fl2 | |
|
1161 | if fl2 == fla: | |
|
1162 | return fl1 | |
|
1163 | return '' # punt for conflicts | |
|
1164 | ||
|
1165 | return func | |
|
1166 | ||
|
1167 | @propertycache | |
|
1168 | def _flagfunc(self): | |
|
1169 | return self._repo.dirstate.flagfunc(self._buildflagfunc) | |
|
1170 | ||
|
1171 | 1165 | @propertycache |
|
1172 | 1166 | def _status(self): |
|
1173 | 1167 | return self._repo.status() |
@@ -1206,26 +1200,10 b' class committablectx(basectx):' | |||
|
1206 | 1200 | return self._status.removed |
|
1207 | 1201 | def deleted(self): |
|
1208 | 1202 | return self._status.deleted |
|
1209 | @propertycache | |
|
1210 | def _copies(self): | |
|
1211 | p1copies = {} | |
|
1212 | p2copies = {} | |
|
1213 | parents = self._repo.dirstate.parents() | |
|
1214 | p1manifest = self._repo[parents[0]].manifest() | |
|
1215 | p2manifest = self._repo[parents[1]].manifest() | |
|
1216 | narrowmatch = self._repo.narrowmatch() | |
|
1217 | for dst, src in self._repo.dirstate.copies().items(): | |
|
1218 | if not narrowmatch(dst): | |
|
1219 | continue | |
|
1220 | if src in p1manifest: | |
|
1221 | p1copies[dst] = src | |
|
1222 | elif src in p2manifest: | |
|
1223 | p2copies[dst] = src | |
|
1224 | return p1copies, p2copies | |
|
1225 | def p1copies(self): | |
|
1226 | return self._copies[0] | |
|
1227 | def p2copies(self): | |
|
1228 | return self._copies[1] | |
|
1203 | filesmodified = modified | |
|
1204 | filesadded = added | |
|
1205 | filesremoved = removed | |
|
1206 | ||
|
1229 | 1207 | def branch(self): |
|
1230 | 1208 | return encoding.tolocal(self._extra['branch']) |
|
1231 | 1209 | def closesbranch(self): |
@@ -1257,33 +1235,10 b' class committablectx(basectx):' | |||
|
1257 | 1235 | def children(self): |
|
1258 | 1236 | return [] |
|
1259 | 1237 | |
|
1260 | def flags(self, path): | |
|
1261 | if r'_manifest' in self.__dict__: | |
|
1262 | try: | |
|
1263 | return self._manifest.flags(path) | |
|
1264 | except KeyError: | |
|
1265 | return '' | |
|
1266 | ||
|
1267 | try: | |
|
1268 | return self._flagfunc(path) | |
|
1269 | except OSError: | |
|
1270 | return '' | |
|
1271 | ||
|
1272 | 1238 | def ancestor(self, c2): |
|
1273 | 1239 | """return the "best" ancestor context of self and c2""" |
|
1274 | 1240 | return self._parents[0].ancestor(c2) # punt on two parents for now |
|
1275 | 1241 | |
|
1276 | def walk(self, match): | |
|
1277 | '''Generates matching file names.''' | |
|
1278 | return sorted(self._repo.dirstate.walk(self._repo.narrowmatch(match), | |
|
1279 | subrepos=sorted(self.substate), | |
|
1280 | unknown=True, ignored=False)) | |
|
1281 | ||
|
1282 | def matches(self, match): | |
|
1283 | match = self._repo.narrowmatch(match) | |
|
1284 | ds = self._repo.dirstate | |
|
1285 | return sorted(f for f in ds.matches(match) if ds[f] != 'r') | |
|
1286 | ||
|
1287 | 1242 | def ancestors(self): |
|
1288 | 1243 | for p in self._parents: |
|
1289 | 1244 | yield p |
@@ -1301,18 +1256,6 b' class committablectx(basectx):' | |||
|
1301 | 1256 | |
|
1302 | 1257 | """ |
|
1303 | 1258 | |
|
1304 | with self._repo.dirstate.parentchange(): | |
|
1305 | for f in self.modified() + self.added(): | |
|
1306 | self._repo.dirstate.normal(f) | |
|
1307 | for f in self.removed(): | |
|
1308 | self._repo.dirstate.drop(f) | |
|
1309 | self._repo.dirstate.setparents(node) | |
|
1310 | ||
|
1311 | # write changes out explicitly, because nesting wlock at | |
|
1312 | # runtime may prevent 'wlock.release()' in 'repo.commit()' | |
|
1313 | # from immediately doing so for subsequent changing files | |
|
1314 | self._repo.dirstate.write(self._repo.currenttransaction()) | |
|
1315 | ||
|
1316 | 1259 | def dirty(self, missing=False, merge=True, branch=True): |
|
1317 | 1260 | return False |
|
1318 | 1261 | |
@@ -1327,7 +1270,14 b' class workingctx(committablectx):' | |||
|
1327 | 1270 | """ |
|
1328 | 1271 | def __init__(self, repo, text="", user=None, date=None, extra=None, |
|
1329 | 1272 | changes=None): |
|
1330 | super(workingctx, self).__init__(repo, text, user, date, extra, changes) | |
|
1273 | branch = None | |
|
1274 | if not extra or 'branch' not in extra: | |
|
1275 | try: | |
|
1276 | branch = repo.dirstate.branch() | |
|
1277 | except UnicodeDecodeError: | |
|
1278 | raise error.Abort(_('branch name not in UTF-8!')) | |
|
1279 | super(workingctx, self).__init__(repo, text, user, date, extra, changes, | |
|
1280 | branch=branch) | |
|
1331 | 1281 | |
|
1332 | 1282 | def __iter__(self): |
|
1333 | 1283 | d = self._repo.dirstate |
@@ -1355,6 +1305,54 b' class workingctx(committablectx):' | |||
|
1355 | 1305 | self._manifest |
|
1356 | 1306 | return super(workingctx, self)._fileinfo(path) |
|
1357 | 1307 | |
|
1308 | def _buildflagfunc(self): | |
|
1309 | # Create a fallback function for getting file flags when the | |
|
1310 | # filesystem doesn't support them | |
|
1311 | ||
|
1312 | copiesget = self._repo.dirstate.copies().get | |
|
1313 | parents = self.parents() | |
|
1314 | if len(parents) < 2: | |
|
1315 | # when we have one parent, it's easy: copy from parent | |
|
1316 | man = parents[0].manifest() | |
|
1317 | def func(f): | |
|
1318 | f = copiesget(f, f) | |
|
1319 | return man.flags(f) | |
|
1320 | else: | |
|
1321 | # merges are tricky: we try to reconstruct the unstored | |
|
1322 | # result from the merge (issue1802) | |
|
1323 | p1, p2 = parents | |
|
1324 | pa = p1.ancestor(p2) | |
|
1325 | m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest() | |
|
1326 | ||
|
1327 | def func(f): | |
|
1328 | f = copiesget(f, f) # may be wrong for merges with copies | |
|
1329 | fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f) | |
|
1330 | if fl1 == fl2: | |
|
1331 | return fl1 | |
|
1332 | if fl1 == fla: | |
|
1333 | return fl2 | |
|
1334 | if fl2 == fla: | |
|
1335 | return fl1 | |
|
1336 | return '' # punt for conflicts | |
|
1337 | ||
|
1338 | return func | |
|
1339 | ||
|
1340 | @propertycache | |
|
1341 | def _flagfunc(self): | |
|
1342 | return self._repo.dirstate.flagfunc(self._buildflagfunc) | |
|
1343 | ||
|
1344 | def flags(self, path): | |
|
1345 | if r'_manifest' in self.__dict__: | |
|
1346 | try: | |
|
1347 | return self._manifest.flags(path) | |
|
1348 | except KeyError: | |
|
1349 | return '' | |
|
1350 | ||
|
1351 | try: | |
|
1352 | return self._flagfunc(path) | |
|
1353 | except OSError: | |
|
1354 | return '' | |
|
1355 | ||
|
1358 | 1356 | def filectx(self, path, filelog=None): |
|
1359 | 1357 | """get a file context from the working directory""" |
|
1360 | 1358 | return workingfilectx(self._repo, path, workingctx=self, |
@@ -1579,6 +1577,23 b' class workingctx(committablectx):' | |||
|
1579 | 1577 | return s |
|
1580 | 1578 | |
|
1581 | 1579 | @propertycache |
|
1580 | def _copies(self): | |
|
1581 | p1copies = {} | |
|
1582 | p2copies = {} | |
|
1583 | parents = self._repo.dirstate.parents() | |
|
1584 | p1manifest = self._repo[parents[0]].manifest() | |
|
1585 | p2manifest = self._repo[parents[1]].manifest() | |
|
1586 | narrowmatch = self._repo.narrowmatch() | |
|
1587 | for dst, src in self._repo.dirstate.copies().items(): | |
|
1588 | if not narrowmatch(dst): | |
|
1589 | continue | |
|
1590 | if src in p1manifest: | |
|
1591 | p1copies[dst] = src | |
|
1592 | elif src in p2manifest: | |
|
1593 | p2copies[dst] = src | |
|
1594 | return p1copies, p2copies | |
|
1595 | ||
|
1596 | @propertycache | |
|
1582 | 1597 | def _manifest(self): |
|
1583 | 1598 | """generate a manifest corresponding to the values in self._status |
|
1584 | 1599 | |
@@ -1651,8 +1666,29 b' class workingctx(committablectx):' | |||
|
1651 | 1666 | match.bad = bad |
|
1652 | 1667 | return match |
|
1653 | 1668 | |
|
1669 | def walk(self, match): | |
|
1670 | '''Generates matching file names.''' | |
|
1671 | return sorted(self._repo.dirstate.walk(self._repo.narrowmatch(match), | |
|
1672 | subrepos=sorted(self.substate), | |
|
1673 | unknown=True, ignored=False)) | |
|
1674 | ||
|
1675 | def matches(self, match): | |
|
1676 | match = self._repo.narrowmatch(match) | |
|
1677 | ds = self._repo.dirstate | |
|
1678 | return sorted(f for f in ds.matches(match) if ds[f] != 'r') | |
|
1679 | ||
|
1654 | 1680 | def markcommitted(self, node): |
|
1655 | super(workingctx, self).markcommitted(node) | |
|
1681 | with self._repo.dirstate.parentchange(): | |
|
1682 | for f in self.modified() + self.added(): | |
|
1683 | self._repo.dirstate.normal(f) | |
|
1684 | for f in self.removed(): | |
|
1685 | self._repo.dirstate.drop(f) | |
|
1686 | self._repo.dirstate.setparents(node) | |
|
1687 | ||
|
1688 | # write changes out explicitly, because nesting wlock at | |
|
1689 | # runtime may prevent 'wlock.release()' in 'repo.commit()' | |
|
1690 | # from immediately doing so for subsequent changing files | |
|
1691 | self._repo.dirstate.write(self._repo.currenttransaction()) | |
|
1656 | 1692 | |
|
1657 | 1693 | sparse.aftercommit(self._repo, node) |
|
1658 | 1694 | |
@@ -1726,6 +1762,8 b' class workingfilectx(committablefilectx)' | |||
|
1726 | 1762 | |
|
1727 | 1763 | def size(self): |
|
1728 | 1764 | return self._repo.wvfs.lstat(self._path).st_size |
|
1765 | def lstat(self): | |
|
1766 | return self._repo.wvfs.lstat(self._path) | |
|
1729 | 1767 | def date(self): |
|
1730 | 1768 | t, tz = self._changectx.date() |
|
1731 | 1769 | try: |
@@ -1761,14 +1799,13 b' class workingfilectx(committablefilectx)' | |||
|
1761 | 1799 | |
|
1762 | 1800 | def write(self, data, flags, backgroundclose=False, **kwargs): |
|
1763 | 1801 | """wraps repo.wwrite""" |
|
1764 | self._repo.wwrite(self._path, data, flags, | |
|
1765 | backgroundclose=backgroundclose, | |
|
1766 | **kwargs) | |
|
1802 | return self._repo.wwrite(self._path, data, flags, | |
|
1803 | backgroundclose=backgroundclose, | |
|
1804 | **kwargs) | |
|
1767 | 1805 | |
|
1768 | 1806 | def markcopied(self, src): |
|
1769 | 1807 | """marks this file a copy of `src`""" |
|
1770 |
|
|
|
1771 | self._repo.dirstate.copy(src, self._path) | |
|
1808 | self._repo.dirstate.copy(src, self._path) | |
|
1772 | 1809 | |
|
1773 | 1810 | def clearunknown(self): |
|
1774 | 1811 | """Removes conflicting items in the working directory so that |
@@ -1913,7 +1950,7 b' class overlayworkingctx(committablectx):' | |||
|
1913 | 1950 | if self.isdirty(path): |
|
1914 | 1951 | return self._cache[path]['copied'] |
|
1915 | 1952 | else: |
|
1916 | raise error.ProgrammingError('copydata() called on clean context') | |
|
1953 | return None | |
|
1917 | 1954 | |
|
1918 | 1955 | def flags(self, path): |
|
1919 | 1956 | if self.isdirty(path): |
@@ -2055,7 +2092,7 b' class overlayworkingctx(committablectx):' | |||
|
2055 | 2092 | else: |
|
2056 | 2093 | parents = (self._repo[parents[0]], self._repo[parents[1]]) |
|
2057 | 2094 | |
|
2058 |
files = self. |
|
|
2095 | files = self.files() | |
|
2059 | 2096 | def getfile(repo, memctx, path): |
|
2060 | 2097 | if self._cache[path]['exists']: |
|
2061 | 2098 | return memfilectx(repo, memctx, path, |
@@ -2118,7 +2155,9 b' class overlayworkingctx(committablectx):' | |||
|
2118 | 2155 | # the file is marked as existing. |
|
2119 | 2156 | if exists and data is None: |
|
2120 | 2157 | oldentry = self._cache.get(path) or {} |
|
2121 |
data = oldentry.get('data') |
|
|
2158 | data = oldentry.get('data') | |
|
2159 | if data is None: | |
|
2160 | data = self._wrappedctx[path].data() | |
|
2122 | 2161 | |
|
2123 | 2162 | self._cache[path] = { |
|
2124 | 2163 | 'exists': exists, |
@@ -2305,7 +2344,8 b' class memctx(committablectx):' | |||
|
2305 | 2344 | |
|
2306 | 2345 | def __init__(self, repo, parents, text, files, filectxfn, user=None, |
|
2307 | 2346 | date=None, extra=None, branch=None, editor=False): |
|
2308 |
super(memctx, self).__init__(repo, text, user, date, extra |
|
|
2347 | super(memctx, self).__init__(repo, text, user, date, extra, | |
|
2348 | branch=branch) | |
|
2309 | 2349 | self._rev = None |
|
2310 | 2350 | self._node = None |
|
2311 | 2351 | parents = [(p or nullid) for p in parents] |
@@ -2313,8 +2353,6 b' class memctx(committablectx):' | |||
|
2313 | 2353 | self._parents = [self._repo[p] for p in (p1, p2)] |
|
2314 | 2354 | files = sorted(set(files)) |
|
2315 | 2355 | self._files = files |
|
2316 | if branch is not None: | |
|
2317 | self._extra['branch'] = encoding.fromlocal(branch) | |
|
2318 | 2356 | self.substate = {} |
|
2319 | 2357 | |
|
2320 | 2358 | if isinstance(filectxfn, patch.filestore): |
This diff has been collapsed as it changes many lines, (593 lines changed) Show them Hide them | |||
@@ -107,40 +107,60 b' def _findlimit(repo, ctxa, ctxb):' | |||
|
107 | 107 | # This only occurs when a is a descendent of b or visa-versa. |
|
108 | 108 | return min(limit, a, b) |
|
109 | 109 | |
|
110 |
def _ |
|
|
111 | """chain two sets of copies a->b""" | |
|
112 | t = a.copy() | |
|
113 | for k, v in b.iteritems(): | |
|
114 | if v in t: | |
|
115 | # found a chain | |
|
116 | if t[v] != k: | |
|
117 | # file wasn't renamed back to itself | |
|
118 | t[k] = t[v] | |
|
119 | if v not in dst: | |
|
120 | # chain was a rename, not a copy | |
|
121 |
|
|
|
122 | if v in src: | |
|
123 | # file is a copy of an existing file | |
|
124 | t[k] = v | |
|
110 | def _filter(src, dst, t): | |
|
111 | """filters out invalid copies after chaining""" | |
|
112 | ||
|
113 | # When _chain()'ing copies in 'a' (from 'src' via some other commit 'mid') | |
|
114 | # with copies in 'b' (from 'mid' to 'dst'), we can get the different cases | |
|
115 | # in the following table (not including trivial cases). For example, case 2 | |
|
116 | # is where a file existed in 'src' and remained under that name in 'mid' and | |
|
117 | # then was renamed between 'mid' and 'dst'. | |
|
118 | # | |
|
119 | # case src mid dst result | |
|
120 | # 1 x y - - | |
|
121 | # 2 x y y x->y | |
|
122 | # 3 x y x - | |
|
123 | # 4 x y z x->z | |
|
124 | # 5 - x y - | |
|
125 | # 6 x x y x->y | |
|
126 | # | |
|
127 | # _chain() takes care of chaining the copies in 'a' and 'b', but it | |
|
128 | # cannot tell the difference between cases 1 and 2, between 3 and 4, or | |
|
129 | # between 5 and 6, so it includes all cases in its result. | |
|
130 | # Cases 1, 3, and 5 are then removed by _filter(). | |
|
125 | 131 | |
|
126 | 132 | for k, v in list(t.items()): |
|
133 | # remove copies from files that didn't exist | |
|
134 | if v not in src: | |
|
135 | del t[k] | |
|
127 | 136 | # remove criss-crossed copies |
|
128 | if k in src and v in dst: | |
|
137 | elif k in src and v in dst: | |
|
129 | 138 | del t[k] |
|
130 | 139 | # remove copies to files that were then removed |
|
131 | 140 | elif k not in dst: |
|
132 | 141 | del t[k] |
|
133 | 142 | |
|
143 | def _chain(a, b): | |
|
144 | """chain two sets of copies 'a' and 'b'""" | |
|
145 | t = a.copy() | |
|
146 | for k, v in b.iteritems(): | |
|
147 | if v in t: | |
|
148 | t[k] = t[v] | |
|
149 | else: | |
|
150 | t[k] = v | |
|
134 | 151 | return t |
|
135 | 152 | |
|
136 |
def _tracefile(fctx, am, limit |
|
|
153 | def _tracefile(fctx, am, basemf, limit): | |
|
137 | 154 | """return file context that is the ancestor of fctx present in ancestor |
|
138 | 155 | manifest am, stopping after the first ancestor lower than limit""" |
|
139 | 156 | |
|
140 | 157 | for f in fctx.ancestors(): |
|
141 | if am.get(f.path(), None) == f.filenode(): | |
|
142 | return f | |
|
143 | if limit >= 0 and not f.isintroducedafter(limit): | |
|
158 | path = f.path() | |
|
159 | if am.get(path, None) == f.filenode(): | |
|
160 | return path | |
|
161 | if basemf and basemf.get(path, None) == f.filenode(): | |
|
162 | return path | |
|
163 | if not f.isintroducedafter(limit): | |
|
144 | 164 | return None |
|
145 | 165 | |
|
146 | 166 | def _dirstatecopies(repo, match=None): |
@@ -165,7 +185,7 b' def usechangesetcentricalgo(repo):' | |||
|
165 | 185 | return (repo.ui.config('experimental', 'copies.read-from') in |
|
166 | 186 | ('changeset-only', 'compatibility')) |
|
167 | 187 | |
|
168 | def _committedforwardcopies(a, b, match): | |
|
188 | def _committedforwardcopies(a, b, base, match): | |
|
169 | 189 | """Like _forwardcopies(), but b.rev() cannot be None (working copy)""" |
|
170 | 190 | # files might have to be traced back to the fctx parent of the last |
|
171 | 191 | # one-side-only changeset, but not further back than that |
@@ -183,6 +203,7 b' def _committedforwardcopies(a, b, match)' | |||
|
183 | 203 | if debug: |
|
184 | 204 | dbg('debug.copies: search limit: %d\n' % limit) |
|
185 | 205 | am = a.manifest() |
|
206 | basemf = None if base is None else base.manifest() | |
|
186 | 207 | |
|
187 | 208 | # find where new files came from |
|
188 | 209 | # we currently don't try to find where old files went, too expensive |
@@ -204,9 +225,9 b' def _committedforwardcopies(a, b, match)' | |||
|
204 | 225 | ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True) |
|
205 | 226 | |
|
206 | 227 | if debug: |
|
207 | dbg('debug.copies: missing file to search: %d\n' % len(missing)) | |
|
228 | dbg('debug.copies: missing files to search: %d\n' % len(missing)) | |
|
208 | 229 | |
|
209 | for f in missing: | |
|
230 | for f in sorted(missing): | |
|
210 | 231 | if debug: |
|
211 | 232 | dbg('debug.copies: tracing file: %s\n' % f) |
|
212 | 233 | fctx = b[f] |
@@ -214,11 +235,11 b' def _committedforwardcopies(a, b, match)' | |||
|
214 | 235 | |
|
215 | 236 | if debug: |
|
216 | 237 | start = util.timer() |
|
217 |
o |
|
|
218 |
if o |
|
|
238 | opath = _tracefile(fctx, am, basemf, limit) | |
|
239 | if opath: | |
|
219 | 240 | if debug: |
|
220 |
dbg('debug.copies: rename of: %s\n' % o |
|
|
221 |
cm[f] = o |
|
|
241 | dbg('debug.copies: rename of: %s\n' % opath) | |
|
242 | cm[f] = opath | |
|
222 | 243 | if debug: |
|
223 | 244 | dbg('debug.copies: time: %f seconds\n' |
|
224 | 245 | % (util.timer() - start)) |
@@ -245,40 +266,30 b' def _changesetforwardcopies(a, b, match)' | |||
|
245 | 266 | # 'work' contains 3-tuples of a (revision number, parent number, copies). |
|
246 | 267 | # The parent number is only used for knowing which parent the copies dict |
|
247 | 268 | # came from. |
|
269 | # NOTE: To reduce costly copying the 'copies' dicts, we reuse the same | |
|
270 | # instance for *one* of the child nodes (the last one). Once an instance | |
|
271 | # has been put on the queue, it is thus no longer safe to modify it. | |
|
272 | # Conversely, it *is* safe to modify an instance popped off the queue. | |
|
248 | 273 | work = [(r, 1, {}) for r in roots] |
|
249 | 274 | heapq.heapify(work) |
|
275 | alwaysmatch = match.always() | |
|
250 | 276 | while work: |
|
251 |
r, i1, copies |
|
|
277 | r, i1, copies = heapq.heappop(work) | |
|
252 | 278 | if work and work[0][0] == r: |
|
253 | 279 | # We are tracing copies from both parents |
|
254 | 280 | r, i2, copies2 = heapq.heappop(work) |
|
255 | copies = {} | |
|
256 | ctx = repo[r] | |
|
257 | p1man, p2man = ctx.p1().manifest(), ctx.p2().manifest() | |
|
258 | allcopies = set(copies1) | set(copies2) | |
|
259 | # TODO: perhaps this filtering should be done as long as ctx | |
|
260 | # is merge, whether or not we're tracing from both parent. | |
|
261 | for dst in allcopies: | |
|
262 | if not match(dst): | |
|
263 |
co |
|
|
264 | if dst not in copies2: | |
|
265 | # Copied on p1 side: mark as copy from p1 side if it didn't | |
|
266 | # already exist on p2 side | |
|
267 | if dst not in p2man: | |
|
268 | copies[dst] = copies1[dst] | |
|
269 | elif dst not in copies1: | |
|
270 | # Copied on p2 side: mark as copy from p2 side if it didn't | |
|
271 | # already exist on p1 side | |
|
272 | if dst not in p1man: | |
|
273 | copies[dst] = copies2[dst] | |
|
274 | else: | |
|
275 | # Copied on both sides: mark as copy from p1 side | |
|
276 | copies[dst] = copies1[dst] | |
|
277 | else: | |
|
278 | copies = copies1 | |
|
281 | for dst, src in copies2.items(): | |
|
282 | # Unlike when copies are stored in the filelog, we consider | |
|
283 | # it a copy even if the destination already existed on the | |
|
284 | # other branch. It's simply too expensive to check if the | |
|
285 | # file existed in the manifest. | |
|
286 | if dst not in copies: | |
|
287 | # If it was copied on the p1 side, leave it as copied from | |
|
288 | # that side, even if it was also copied on the p2 side. | |
|
289 | copies[dst] = copies2[dst] | |
|
279 | 290 | if r == b.rev(): |
|
280 | 291 | return copies |
|
281 | for c in children[r]: | |
|
292 | for i, c in enumerate(children[r]): | |
|
282 | 293 | childctx = repo[c] |
|
283 | 294 | if r == childctx.p1().rev(): |
|
284 | 295 | parent = 1 |
@@ -287,27 +298,36 b' def _changesetforwardcopies(a, b, match)' | |||
|
287 | 298 | assert r == childctx.p2().rev() |
|
288 | 299 | parent = 2 |
|
289 | 300 | childcopies = childctx.p2copies() |
|
290 |
if not |
|
|
301 | if not alwaysmatch: | |
|
291 | 302 | childcopies = {dst: src for dst, src in childcopies.items() |
|
292 | 303 | if match(dst)} |
|
293 | childcopies = _chain(a, childctx, copies, childcopies) | |
|
294 | heapq.heappush(work, (c, parent, childcopies)) | |
|
304 | # Copy the dict only if later iterations will also need it | |
|
305 | if i != len(children[r]) - 1: | |
|
306 | newcopies = copies.copy() | |
|
307 | else: | |
|
308 | newcopies = copies | |
|
309 | if childcopies: | |
|
310 | newcopies = _chain(newcopies, childcopies) | |
|
311 | for f in childctx.filesremoved(): | |
|
312 | if f in newcopies: | |
|
313 | del newcopies[f] | |
|
314 | heapq.heappush(work, (c, parent, newcopies)) | |
|
295 | 315 | assert False |
|
296 | 316 | |
|
297 | def _forwardcopies(a, b, match=None): | |
|
317 | def _forwardcopies(a, b, base=None, match=None): | |
|
298 | 318 | """find {dst@b: src@a} copy mapping where a is an ancestor of b""" |
|
299 | 319 | |
|
320 | if base is None: | |
|
321 | base = a | |
|
300 | 322 | match = a.repo().narrowmatch(match) |
|
301 | 323 | # check for working copy |
|
302 | 324 | if b.rev() is None: |
|
303 | if a == b.p1(): | |
|
304 | # short-circuit to avoid issues with merge states | |
|
305 | return _dirstatecopies(b._repo, match) | |
|
306 | ||
|
307 | cm = _committedforwardcopies(a, b.p1(), match) | |
|
325 | cm = _committedforwardcopies(a, b.p1(), base, match) | |
|
308 | 326 | # combine copies from dirstate if necessary |
|
309 |
|
|
|
310 | return _committedforwardcopies(a, b, match) | |
|
327 | copies = _chain(cm, _dirstatecopies(b._repo, match)) | |
|
328 | else: | |
|
329 | copies = _committedforwardcopies(a, b, base, match) | |
|
330 | return copies | |
|
311 | 331 | |
|
312 | 332 | def _backwardrenames(a, b, match): |
|
313 | 333 | if a._repo.ui.config('experimental', 'copytrace') == 'off': |
@@ -343,90 +363,24 b' def pathcopies(x, y, match=None):' | |||
|
343 | 363 | if a == x: |
|
344 | 364 | if debug: |
|
345 | 365 | repo.ui.debug('debug.copies: search mode: forward\n') |
|
346 | return _forwardcopies(x, y, match=match) | |
|
347 | if a == y: | |
|
366 | if y.rev() is None and x == y.p1(): | |
|
367 | # short-circuit to avoid issues with merge states | |
|
368 | return _dirstatecopies(repo, match) | |
|
369 | copies = _forwardcopies(x, y, match=match) | |
|
370 | elif a == y: | |
|
348 | 371 | if debug: |
|
349 | 372 | repo.ui.debug('debug.copies: search mode: backward\n') |
|
350 |
|
|
|
351 | if debug: | |
|
352 | repo.ui.debug('debug.copies: search mode: combined\n') | |
|
353 | return _chain(x, y, _backwardrenames(x, a, match=match), | |
|
354 | _forwardcopies(a, y, match=match)) | |
|
355 | ||
|
356 | def _computenonoverlap(repo, c1, c2, addedinm1, addedinm2, baselabel=''): | |
|
357 | """Computes, based on addedinm1 and addedinm2, the files exclusive to c1 | |
|
358 | and c2. This is its own function so extensions can easily wrap this call | |
|
359 | to see what files mergecopies is about to process. | |
|
360 | ||
|
361 | Even though c1 and c2 are not used in this function, they are useful in | |
|
362 | other extensions for being able to read the file nodes of the changed files. | |
|
363 | ||
|
364 | "baselabel" can be passed to help distinguish the multiple computations | |
|
365 | done in the graft case. | |
|
366 | """ | |
|
367 | u1 = sorted(addedinm1 - addedinm2) | |
|
368 | u2 = sorted(addedinm2 - addedinm1) | |
|
369 | ||
|
370 | header = " unmatched files in %s" | |
|
371 | if baselabel: | |
|
372 | header += ' (from %s)' % baselabel | |
|
373 | if u1: | |
|
374 | repo.ui.debug("%s:\n %s\n" % (header % 'local', "\n ".join(u1))) | |
|
375 | if u2: | |
|
376 | repo.ui.debug("%s:\n %s\n" % (header % 'other', "\n ".join(u2))) | |
|
377 | ||
|
378 | return u1, u2 | |
|
379 | ||
|
380 | def _makegetfctx(ctx): | |
|
381 | """return a 'getfctx' function suitable for _checkcopies usage | |
|
382 | ||
|
383 | We have to re-setup the function building 'filectx' for each | |
|
384 | '_checkcopies' to ensure the linkrev adjustment is properly setup for | |
|
385 | each. Linkrev adjustment is important to avoid bug in rename | |
|
386 | detection. Moreover, having a proper '_ancestrycontext' setup ensures | |
|
387 | the performance impact of this adjustment is kept limited. Without it, | |
|
388 | each file could do a full dag traversal making the time complexity of | |
|
389 | the operation explode (see issue4537). | |
|
390 | ||
|
391 | This function exists here mostly to limit the impact on stable. Feel | |
|
392 | free to refactor on default. | |
|
393 | """ | |
|
394 | rev = ctx.rev() | |
|
395 | repo = ctx._repo | |
|
396 | ac = getattr(ctx, '_ancestrycontext', None) | |
|
397 | if ac is None: | |
|
398 | revs = [rev] | |
|
399 | if rev is None: | |
|
400 | revs = [p.rev() for p in ctx.parents()] | |
|
401 | ac = repo.changelog.ancestors(revs, inclusive=True) | |
|
402 | ctx._ancestrycontext = ac | |
|
403 | def makectx(f, n): | |
|
404 | if n in node.wdirfilenodeids: # in a working context? | |
|
405 | if ctx.rev() is None: | |
|
406 | return ctx.filectx(f) | |
|
407 | return repo[None][f] | |
|
408 | fctx = repo.filectx(f, fileid=n) | |
|
409 | # setup only needed for filectx not create from a changectx | |
|
410 | fctx._ancestrycontext = ac | |
|
411 | fctx._descendantrev = rev | |
|
412 | return fctx | |
|
413 | return util.lrucachefunc(makectx) | |
|
414 | ||
|
415 | def _combinecopies(copyfrom, copyto, finalcopy, diverge, incompletediverge): | |
|
416 | """combine partial copy paths""" | |
|
417 | remainder = {} | |
|
418 | for f in copyfrom: | |
|
419 | if f in copyto: | |
|
420 | finalcopy[copyto[f]] = copyfrom[f] | |
|
421 | del copyto[f] | |
|
422 | for f in incompletediverge: | |
|
423 | assert f not in diverge | |
|
424 | ic = incompletediverge[f] | |
|
425 | if ic[0] in copyto: | |
|
426 | diverge[f] = [copyto[ic[0]], ic[1]] | |
|
427 | else: | |
|
428 | remainder[f] = ic | |
|
429 | return remainder | |
|
373 | copies = _backwardrenames(x, y, match=match) | |
|
374 | else: | |
|
375 | if debug: | |
|
376 | repo.ui.debug('debug.copies: search mode: combined\n') | |
|
377 | base = None | |
|
378 | if a.rev() != node.nullrev: | |
|
379 | base = x | |
|
380 | copies = _chain(_backwardrenames(x, a, match=match), | |
|
381 | _forwardcopies(a, y, base, match=match)) | |
|
382 | _filter(x, y, copies) | |
|
383 | return copies | |
|
430 | 384 | |
|
431 | 385 | def mergecopies(repo, c1, c2, base): |
|
432 | 386 | """ |
@@ -485,7 +439,14 b' def mergecopies(repo, c1, c2, base):' | |||
|
485 | 439 | return _dirstatecopies(repo, narrowmatch), {}, {}, {}, {} |
|
486 | 440 | |
|
487 | 441 | copytracing = repo.ui.config('experimental', 'copytrace') |
|
488 |
|
|
|
442 | if stringutil.parsebool(copytracing) is False: | |
|
443 | # stringutil.parsebool() returns None when it is unable to parse the | |
|
444 | # value, so we should rely on making sure copytracing is on such cases | |
|
445 | return {}, {}, {}, {}, {} | |
|
446 | ||
|
447 | if usechangesetcentricalgo(repo): | |
|
448 | # The heuristics don't make sense when we need changeset-centric algos | |
|
449 | return _fullcopytracing(repo, c1, c2, base) | |
|
489 | 450 | |
|
490 | 451 | # Copy trace disabling is explicitly below the node == p1 logic above |
|
491 | 452 | # because the logic above is required for a simple copy to be kept across a |
@@ -497,10 +458,6 b' def mergecopies(repo, c1, c2, base):' | |||
|
497 | 458 | if _isfullcopytraceable(repo, c1, base): |
|
498 | 459 | return _fullcopytracing(repo, c1, c2, base) |
|
499 | 460 | return _heuristicscopytracing(repo, c1, c2, base) |
|
500 | elif boolctrace is False: | |
|
501 | # stringutil.parsebool() returns None when it is unable to parse the | |
|
502 | # value, so we should rely on making sure copytracing is on such cases | |
|
503 | return {}, {}, {}, {}, {} | |
|
504 | 461 | else: |
|
505 | 462 | return _fullcopytracing(repo, c1, c2, base) |
|
506 | 463 | |
@@ -522,6 +479,23 b' def _isfullcopytraceable(repo, c1, base)' | |||
|
522 | 479 | return commits < sourcecommitlimit |
|
523 | 480 | return False |
|
524 | 481 | |
|
482 | def _checksinglesidecopies(src, dsts1, m1, m2, mb, c2, base, | |
|
483 | copy, renamedelete): | |
|
484 | if src not in m2: | |
|
485 | # deleted on side 2 | |
|
486 | if src not in m1: | |
|
487 | # renamed on side 1, deleted on side 2 | |
|
488 | renamedelete[src] = dsts1 | |
|
489 | elif m2[src] != mb[src]: | |
|
490 | if not _related(c2[src], base[src]): | |
|
491 | return | |
|
492 | # modified on side 2 | |
|
493 | for dst in dsts1: | |
|
494 | if dst not in m2: | |
|
495 | # dst not added on side 2 (handle as regular | |
|
496 | # "both created" case in manifestmerge otherwise) | |
|
497 | copy[dst] = src | |
|
498 | ||
|
525 | 499 | def _fullcopytracing(repo, c1, c2, base): |
|
526 | 500 | """ The full copytracing algorithm which finds all the new files that were |
|
527 | 501 | added from merge base up to the top commit and for each file it checks if |
@@ -530,159 +504,84 b' def _fullcopytracing(repo, c1, c2, base)' | |||
|
530 | 504 | This is pretty slow when a lot of changesets are involved but will track all |
|
531 | 505 | the copies. |
|
532 | 506 | """ |
|
533 | # In certain scenarios (e.g. graft, update or rebase), base can be | |
|
534 | # overridden We still need to know a real common ancestor in this case We | |
|
535 | # can't just compute _c1.ancestor(_c2) and compare it to ca, because there | |
|
536 | # can be multiple common ancestors, e.g. in case of bidmerge. Because our | |
|
537 | # caller may not know if the revision passed in lieu of the CA is a genuine | |
|
538 | # common ancestor or not without explicitly checking it, it's better to | |
|
539 | # determine that here. | |
|
540 | # | |
|
541 | # base.isancestorof(wc) is False, work around that | |
|
542 | _c1 = c1.p1() if c1.rev() is None else c1 | |
|
543 | _c2 = c2.p1() if c2.rev() is None else c2 | |
|
544 | # an endpoint is "dirty" if it isn't a descendant of the merge base | |
|
545 | # if we have a dirty endpoint, we need to trigger graft logic, and also | |
|
546 | # keep track of which endpoint is dirty | |
|
547 | dirtyc1 = not base.isancestorof(_c1) | |
|
548 | dirtyc2 = not base.isancestorof(_c2) | |
|
549 | graft = dirtyc1 or dirtyc2 | |
|
550 | tca = base | |
|
551 | if graft: | |
|
552 | tca = _c1.ancestor(_c2) | |
|
553 | ||
|
554 | limit = _findlimit(repo, c1, c2) | |
|
555 | repo.ui.debug(" searching for copies back to rev %d\n" % limit) | |
|
556 | ||
|
557 | 507 | m1 = c1.manifest() |
|
558 | 508 | m2 = c2.manifest() |
|
559 | 509 | mb = base.manifest() |
|
560 | 510 | |
|
561 | # gather data from _checkcopies: | |
|
562 | # - diverge = record all diverges in this dict | |
|
563 | # - copy = record all non-divergent copies in this dict | |
|
564 | # - fullcopy = record all copies in this dict | |
|
565 | # - incomplete = record non-divergent partial copies here | |
|
566 | # - incompletediverge = record divergent partial copies here | |
|
567 | diverge = {} # divergence data is shared | |
|
568 | incompletediverge = {} | |
|
569 | data1 = {'copy': {}, | |
|
570 | 'fullcopy': {}, | |
|
571 | 'incomplete': {}, | |
|
572 | 'diverge': diverge, | |
|
573 | 'incompletediverge': incompletediverge, | |
|
574 | } | |
|
575 | data2 = {'copy': {}, | |
|
576 | 'fullcopy': {}, | |
|
577 | 'incomplete': {}, | |
|
578 | 'diverge': diverge, | |
|
579 | 'incompletediverge': incompletediverge, | |
|
580 | } | |
|
511 | copies1 = pathcopies(base, c1) | |
|
512 | copies2 = pathcopies(base, c2) | |
|
513 | ||
|
514 | inversecopies1 = {} | |
|
515 | inversecopies2 = {} | |
|
516 | for dst, src in copies1.items(): | |
|
517 | inversecopies1.setdefault(src, []).append(dst) | |
|
518 | for dst, src in copies2.items(): | |
|
519 | inversecopies2.setdefault(src, []).append(dst) | |
|
520 | ||
|
521 | copy = {} | |
|
522 | diverge = {} | |
|
523 | renamedelete = {} | |
|
524 | allsources = set(inversecopies1) | set(inversecopies2) | |
|
525 | for src in allsources: | |
|
526 | dsts1 = inversecopies1.get(src) | |
|
527 | dsts2 = inversecopies2.get(src) | |
|
528 | if dsts1 and dsts2: | |
|
529 | # copied/renamed on both sides | |
|
530 | if src not in m1 and src not in m2: | |
|
531 | # renamed on both sides | |
|
532 | dsts1 = set(dsts1) | |
|
533 | dsts2 = set(dsts2) | |
|
534 | # If there's some overlap in the rename destinations, we | |
|
535 | # consider it not divergent. For example, if side 1 copies 'a' | |
|
536 | # to 'b' and 'c' and deletes 'a', and side 2 copies 'a' to 'c' | |
|
537 | # and 'd' and deletes 'a'. | |
|
538 | if dsts1 & dsts2: | |
|
539 | for dst in (dsts1 & dsts2): | |
|
540 | copy[dst] = src | |
|
541 | else: | |
|
542 | diverge[src] = sorted(dsts1 | dsts2) | |
|
543 | elif src in m1 and src in m2: | |
|
544 | # copied on both sides | |
|
545 | dsts1 = set(dsts1) | |
|
546 | dsts2 = set(dsts2) | |
|
547 | for dst in (dsts1 & dsts2): | |
|
548 | copy[dst] = src | |
|
549 | # TODO: Handle cases where it was renamed on one side and copied | |
|
550 | # on the other side | |
|
551 | elif dsts1: | |
|
552 | # copied/renamed only on side 1 | |
|
553 | _checksinglesidecopies(src, dsts1, m1, m2, mb, c2, base, | |
|
554 | copy, renamedelete) | |
|
555 | elif dsts2: | |
|
556 | # copied/renamed only on side 2 | |
|
557 | _checksinglesidecopies(src, dsts2, m2, m1, mb, c1, base, | |
|
558 | copy, renamedelete) | |
|
559 | ||
|
560 | renamedeleteset = set() | |
|
561 | divergeset = set() | |
|
562 | for dsts in diverge.values(): | |
|
563 | divergeset.update(dsts) | |
|
564 | for dsts in renamedelete.values(): | |
|
565 | renamedeleteset.update(dsts) | |
|
581 | 566 | |
|
582 | 567 | # find interesting file sets from manifests |
|
583 | 568 | addedinm1 = m1.filesnotin(mb, repo.narrowmatch()) |
|
584 | 569 | addedinm2 = m2.filesnotin(mb, repo.narrowmatch()) |
|
585 |
|
|
|
586 | if tca == base: | |
|
587 | # unmatched file from base | |
|
588 | u1r, u2r = _computenonoverlap(repo, c1, c2, addedinm1, addedinm2) | |
|
589 | u1u, u2u = u1r, u2r | |
|
590 | else: | |
|
591 | # unmatched file from base (DAG rotation in the graft case) | |
|
592 | u1r, u2r = _computenonoverlap(repo, c1, c2, addedinm1, addedinm2, | |
|
593 | baselabel='base') | |
|
594 | # unmatched file from topological common ancestors (no DAG rotation) | |
|
595 | # need to recompute this for directory move handling when grafting | |
|
596 | mta = tca.manifest() | |
|
597 | u1u, u2u = _computenonoverlap(repo, c1, c2, | |
|
598 | m1.filesnotin(mta, repo.narrowmatch()), | |
|
599 | m2.filesnotin(mta, repo.narrowmatch()), | |
|
600 | baselabel='topological common ancestor') | |
|
601 | ||
|
602 | for f in u1u: | |
|
603 | _checkcopies(c1, c2, f, base, tca, dirtyc1, limit, data1) | |
|
604 | ||
|
605 | for f in u2u: | |
|
606 | _checkcopies(c2, c1, f, base, tca, dirtyc2, limit, data2) | |
|
607 | ||
|
608 | copy = dict(data1['copy']) | |
|
609 | copy.update(data2['copy']) | |
|
610 | fullcopy = dict(data1['fullcopy']) | |
|
611 | fullcopy.update(data2['fullcopy']) | |
|
612 | ||
|
613 | if dirtyc1: | |
|
614 | _combinecopies(data2['incomplete'], data1['incomplete'], copy, diverge, | |
|
615 | incompletediverge) | |
|
616 | if dirtyc2: | |
|
617 | _combinecopies(data1['incomplete'], data2['incomplete'], copy, diverge, | |
|
618 | incompletediverge) | |
|
619 | ||
|
620 | renamedelete = {} | |
|
621 | renamedeleteset = set() | |
|
622 | divergeset = set() | |
|
623 | for of, fl in list(diverge.items()): | |
|
624 | if len(fl) == 1 or of in c1 or of in c2: | |
|
625 | del diverge[of] # not actually divergent, or not a rename | |
|
626 | if of not in c1 and of not in c2: | |
|
627 | # renamed on one side, deleted on the other side, but filter | |
|
628 | # out files that have been renamed and then deleted | |
|
629 | renamedelete[of] = [f for f in fl if f in c1 or f in c2] | |
|
630 | renamedeleteset.update(fl) # reverse map for below | |
|
631 | else: | |
|
632 | divergeset.update(fl) # reverse map for below | |
|
570 | u1 = sorted(addedinm1 - addedinm2) | |
|
571 | u2 = sorted(addedinm2 - addedinm1) | |
|
633 | 572 | |
|
634 | if bothnew: | |
|
635 | repo.ui.debug(" unmatched files new in both:\n %s\n" | |
|
636 | % "\n ".join(bothnew)) | |
|
637 | bothdiverge = {} | |
|
638 | bothincompletediverge = {} | |
|
639 | remainder = {} | |
|
640 | both1 = {'copy': {}, | |
|
641 | 'fullcopy': {}, | |
|
642 | 'incomplete': {}, | |
|
643 | 'diverge': bothdiverge, | |
|
644 | 'incompletediverge': bothincompletediverge | |
|
645 | } | |
|
646 | both2 = {'copy': {}, | |
|
647 | 'fullcopy': {}, | |
|
648 | 'incomplete': {}, | |
|
649 | 'diverge': bothdiverge, | |
|
650 | 'incompletediverge': bothincompletediverge | |
|
651 | } | |
|
652 | for f in bothnew: | |
|
653 | _checkcopies(c1, c2, f, base, tca, dirtyc1, limit, both1) | |
|
654 | _checkcopies(c2, c1, f, base, tca, dirtyc2, limit, both2) | |
|
655 | if dirtyc1 and dirtyc2: | |
|
656 | remainder = _combinecopies(both2['incomplete'], both1['incomplete'], | |
|
657 | copy, bothdiverge, bothincompletediverge) | |
|
658 | remainder1 = _combinecopies(both1['incomplete'], both2['incomplete'], | |
|
659 | copy, bothdiverge, bothincompletediverge) | |
|
660 | remainder.update(remainder1) | |
|
661 | elif dirtyc1: | |
|
662 | # incomplete copies may only be found on the "dirty" side for bothnew | |
|
663 | assert not both2['incomplete'] | |
|
664 | remainder = _combinecopies({}, both1['incomplete'], copy, bothdiverge, | |
|
665 | bothincompletediverge) | |
|
666 | elif dirtyc2: | |
|
667 | assert not both1['incomplete'] | |
|
668 | remainder = _combinecopies({}, both2['incomplete'], copy, bothdiverge, | |
|
669 | bothincompletediverge) | |
|
670 | else: | |
|
671 | # incomplete copies and divergences can't happen outside grafts | |
|
672 | assert not both1['incomplete'] | |
|
673 | assert not both2['incomplete'] | |
|
674 | assert not bothincompletediverge | |
|
675 | for f in remainder: | |
|
676 | assert f not in bothdiverge | |
|
677 | ic = remainder[f] | |
|
678 | if ic[0] in (m1 if dirtyc1 else m2): | |
|
679 | # backed-out rename on one side, but watch out for deleted files | |
|
680 | bothdiverge[f] = ic | |
|
681 | for of, fl in bothdiverge.items(): | |
|
682 | if len(fl) == 2 and fl[0] == fl[1]: | |
|
683 | copy[fl[0]] = of # not actually divergent, just matching renames | |
|
573 | header = " unmatched files in %s" | |
|
574 | if u1: | |
|
575 | repo.ui.debug("%s:\n %s\n" % (header % 'local', "\n ".join(u1))) | |
|
576 | if u2: | |
|
577 | repo.ui.debug("%s:\n %s\n" % (header % 'other', "\n ".join(u2))) | |
|
684 | 578 | |
|
685 | if fullcopy and repo.ui.debugflag: | |
|
579 | fullcopy = copies1.copy() | |
|
580 | fullcopy.update(copies2) | |
|
581 | if not fullcopy: | |
|
582 | return copy, {}, diverge, renamedelete, {} | |
|
583 | ||
|
584 | if repo.ui.debugflag: | |
|
686 | 585 | repo.ui.debug(" all copies found (* = to merge, ! = divergent, " |
|
687 | 586 | "% = renamed and deleted):\n") |
|
688 | 587 | for f in sorted(fullcopy): |
@@ -697,16 +596,10 b' def _fullcopytracing(repo, c1, c2, base)' | |||
|
697 | 596 | note)) |
|
698 | 597 | del divergeset |
|
699 | 598 | |
|
700 | if not fullcopy: | |
|
701 | return copy, {}, diverge, renamedelete, {} | |
|
702 | ||
|
703 | 599 | repo.ui.debug(" checking for directory renames\n") |
|
704 | 600 | |
|
705 | 601 | # generate a directory move map |
|
706 | 602 | d1, d2 = c1.dirs(), c2.dirs() |
|
707 | # Hack for adding '', which is not otherwise added, to d1 and d2 | |
|
708 | d1.addpath('/') | |
|
709 | d2.addpath('/') | |
|
710 | 603 | invalid = set() |
|
711 | 604 | dirmove = {} |
|
712 | 605 | |
@@ -746,7 +639,7 b' def _fullcopytracing(repo, c1, c2, base)' | |||
|
746 | 639 | |
|
747 | 640 | movewithdir = {} |
|
748 | 641 | # check unaccounted nonoverlapping files against directory moves |
|
749 |
for f in u1 |
|
|
642 | for f in u1 + u2: | |
|
750 | 643 | if f not in fullcopy: |
|
751 | 644 | for d in dirmove: |
|
752 | 645 | if f.startswith(d): |
@@ -893,99 +786,6 b' def _related(f1, f2):' | |||
|
893 | 786 | except StopIteration: |
|
894 | 787 | return False |
|
895 | 788 | |
|
896 | def _checkcopies(srcctx, dstctx, f, base, tca, remotebase, limit, data): | |
|
897 | """ | |
|
898 | check possible copies of f from msrc to mdst | |
|
899 | ||
|
900 | srcctx = starting context for f in msrc | |
|
901 | dstctx = destination context for f in mdst | |
|
902 | f = the filename to check (as in msrc) | |
|
903 | base = the changectx used as a merge base | |
|
904 | tca = topological common ancestor for graft-like scenarios | |
|
905 | remotebase = True if base is outside tca::srcctx, False otherwise | |
|
906 | limit = the rev number to not search beyond | |
|
907 | data = dictionary of dictionary to store copy data. (see mergecopies) | |
|
908 | ||
|
909 | note: limit is only an optimization, and provides no guarantee that | |
|
910 | irrelevant revisions will not be visited | |
|
911 | there is no easy way to make this algorithm stop in a guaranteed way | |
|
912 | once it "goes behind a certain revision". | |
|
913 | """ | |
|
914 | ||
|
915 | msrc = srcctx.manifest() | |
|
916 | mdst = dstctx.manifest() | |
|
917 | mb = base.manifest() | |
|
918 | mta = tca.manifest() | |
|
919 | # Might be true if this call is about finding backward renames, | |
|
920 | # This happens in the case of grafts because the DAG is then rotated. | |
|
921 | # If the file exists in both the base and the source, we are not looking | |
|
922 | # for a rename on the source side, but on the part of the DAG that is | |
|
923 | # traversed backwards. | |
|
924 | # | |
|
925 | # In the case there is both backward and forward renames (before and after | |
|
926 | # the base) this is more complicated as we must detect a divergence. | |
|
927 | # We use 'backwards = False' in that case. | |
|
928 | backwards = not remotebase and base != tca and f in mb | |
|
929 | getsrcfctx = _makegetfctx(srcctx) | |
|
930 | getdstfctx = _makegetfctx(dstctx) | |
|
931 | ||
|
932 | if msrc[f] == mb.get(f) and not remotebase: | |
|
933 | # Nothing to merge | |
|
934 | return | |
|
935 | ||
|
936 | of = None | |
|
937 | seen = {f} | |
|
938 | for oc in getsrcfctx(f, msrc[f]).ancestors(): | |
|
939 | of = oc.path() | |
|
940 | if of in seen: | |
|
941 | # check limit late - grab last rename before | |
|
942 | if oc.linkrev() < limit: | |
|
943 | break | |
|
944 | continue | |
|
945 | seen.add(of) | |
|
946 | ||
|
947 | # remember for dir rename detection | |
|
948 | if backwards: | |
|
949 | data['fullcopy'][of] = f # grafting backwards through renames | |
|
950 | else: | |
|
951 | data['fullcopy'][f] = of | |
|
952 | if of not in mdst: | |
|
953 | continue # no match, keep looking | |
|
954 | if mdst[of] == mb.get(of): | |
|
955 | return # no merge needed, quit early | |
|
956 | c2 = getdstfctx(of, mdst[of]) | |
|
957 | # c2 might be a plain new file on added on destination side that is | |
|
958 | # unrelated to the droids we are looking for. | |
|
959 | cr = _related(oc, c2) | |
|
960 | if cr and (of == f or of == c2.path()): # non-divergent | |
|
961 | if backwards: | |
|
962 | data['copy'][of] = f | |
|
963 | elif of in mb: | |
|
964 | data['copy'][f] = of | |
|
965 | elif remotebase: # special case: a <- b <- a -> b "ping-pong" rename | |
|
966 | data['copy'][of] = f | |
|
967 | del data['fullcopy'][f] | |
|
968 | data['fullcopy'][of] = f | |
|
969 | else: # divergence w.r.t. graft CA on one side of topological CA | |
|
970 | for sf in seen: | |
|
971 | if sf in mb: | |
|
972 | assert sf not in data['diverge'] | |
|
973 | data['diverge'][sf] = [f, of] | |
|
974 | break | |
|
975 | return | |
|
976 | ||
|
977 | if of in mta: | |
|
978 | if backwards or remotebase: | |
|
979 | data['incomplete'][of] = f | |
|
980 | else: | |
|
981 | for sf in seen: | |
|
982 | if sf in mb: | |
|
983 | if tca == base: | |
|
984 | data['diverge'].setdefault(sf, []).append(f) | |
|
985 | else: | |
|
986 | data['incompletediverge'][sf] = [of, f] | |
|
987 | return | |
|
988 | ||
|
989 | 789 | def duplicatecopies(repo, wctx, rev, fromrev, skiprev=None): |
|
990 | 790 | """reproduce copies from fromrev to rev in the dirstate |
|
991 | 791 | |
@@ -1005,8 +805,7 b' def duplicatecopies(repo, wctx, rev, fro' | |||
|
1005 | 805 | # metadata across the rebase anyway). |
|
1006 | 806 | exclude = pathcopies(repo[fromrev], repo[skiprev]) |
|
1007 | 807 | for dst, src in pathcopies(repo[fromrev], repo[rev]).iteritems(): |
|
1008 | # copies.pathcopies returns backward renames, so dst might not | |
|
1009 | # actually be in the dirstate | |
|
1010 | 808 | if dst in exclude: |
|
1011 | 809 | continue |
|
1012 | wctx[dst].markcopied(src) | |
|
810 | if dst in wctx: | |
|
811 | wctx[dst].markcopied(src) |
@@ -608,6 +608,7 b' class curseschunkselector(object):' | |||
|
608 | 608 | |
|
609 | 609 | # the currently selected header, hunk, or hunk-line |
|
610 | 610 | self.currentselecteditem = self.headerlist[0] |
|
611 | self.lastapplieditem = None | |
|
611 | 612 | |
|
612 | 613 | # updated when printing out patch-display -- the 'lines' here are the |
|
613 | 614 | # line positions *in the pad*, not on the screen. |
@@ -723,7 +724,7 b' class curseschunkselector(object):' | |||
|
723 | 724 | self.currentselecteditem = nextitem |
|
724 | 725 | self.recenterdisplayedarea() |
|
725 | 726 | |
|
726 | def nextsametype(self): | |
|
727 | def nextsametype(self, test=False): | |
|
727 | 728 | currentitem = self.currentselecteditem |
|
728 | 729 | sametype = lambda item: isinstance(item, type(currentitem)) |
|
729 | 730 | nextitem = currentitem.nextitem() |
@@ -739,7 +740,8 b' class curseschunkselector(object):' | |||
|
739 | 740 | self.togglefolded(parent) |
|
740 | 741 | |
|
741 | 742 | self.currentselecteditem = nextitem |
|
742 | self.recenterdisplayedarea() | |
|
743 | if not test: | |
|
744 | self.recenterdisplayedarea() | |
|
743 | 745 | |
|
744 | 746 | def rightarrowevent(self): |
|
745 | 747 | """ |
@@ -838,6 +840,8 b' class curseschunkselector(object):' | |||
|
838 | 840 | """ |
|
839 | 841 | if item is None: |
|
840 | 842 | item = self.currentselecteditem |
|
843 | # Only set this when NOT using 'toggleall' | |
|
844 | self.lastapplieditem = item | |
|
841 | 845 | |
|
842 | 846 | item.applied = not item.applied |
|
843 | 847 | |
@@ -931,6 +935,45 b' class curseschunkselector(object):' | |||
|
931 | 935 | self.toggleapply(item) |
|
932 | 936 | self.waslasttoggleallapplied = not self.waslasttoggleallapplied |
|
933 | 937 | |
|
938 | def toggleallbetween(self): | |
|
939 | "toggle applied on or off for all items in range [lastapplied,current]." | |
|
940 | if (not self.lastapplieditem or | |
|
941 | self.currentselecteditem == self.lastapplieditem): | |
|
942 | # Treat this like a normal 'x'/' ' | |
|
943 | self.toggleapply() | |
|
944 | return | |
|
945 | ||
|
946 | startitem = self.lastapplieditem | |
|
947 | enditem = self.currentselecteditem | |
|
948 | # Verify that enditem is "after" startitem, otherwise swap them. | |
|
949 | for direction in ['forward', 'reverse']: | |
|
950 | nextitem = startitem.nextitem() | |
|
951 | while nextitem and nextitem != enditem: | |
|
952 | nextitem = nextitem.nextitem() | |
|
953 | if nextitem: | |
|
954 | break | |
|
955 | # Looks like we went the wrong direction :) | |
|
956 | startitem, enditem = enditem, startitem | |
|
957 | ||
|
958 | if not nextitem: | |
|
959 | # We didn't find a path going either forward or backward? Don't know | |
|
960 | # how this can happen, let's not crash though. | |
|
961 | return | |
|
962 | ||
|
963 | nextitem = startitem | |
|
964 | # Switch all items to be the opposite state of the currently selected | |
|
965 | # item. Specifically: | |
|
966 | # [ ] startitem | |
|
967 | # [x] middleitem | |
|
968 | # [ ] enditem <-- currently selected | |
|
969 | # This will turn all three on, since the currently selected item is off. | |
|
970 | # This does *not* invert each item (i.e. middleitem stays marked/on) | |
|
971 | desiredstate = not self.currentselecteditem.applied | |
|
972 | while nextitem != enditem.nextitem(): | |
|
973 | if nextitem.applied != desiredstate: | |
|
974 | self.toggleapply(item=nextitem) | |
|
975 | nextitem = nextitem.nextitem() | |
|
976 | ||
|
934 | 977 | def togglefolded(self, item=None, foldparent=False): |
|
935 | 978 | "toggle folded flag of specified item (defaults to currently selected)" |
|
936 | 979 | if item is None: |
@@ -1460,9 +1503,10 b' changes, the unselected changes are stil' | |||
|
1460 | 1503 | can use crecord multiple times to split large changes into smaller changesets. |
|
1461 | 1504 | the following are valid keystrokes: |
|
1462 | 1505 | |
|
1463 |
|
|
|
1506 | x [space] : (un-)select item ([~]/[x] = partly/fully applied) | |
|
1464 | 1507 | [enter] : (un-)select item and go to next item of same type |
|
1465 | 1508 | A : (un-)select all items |
|
1509 | X : (un-)select all items between current and most-recent | |
|
1466 | 1510 | up/down-arrow [k/j] : go to previous/next unfolded item |
|
1467 | 1511 | pgup/pgdn [K/J] : go to previous/next item of same type |
|
1468 | 1512 | right/left-arrow [l/h] : go to child item / parent item |
@@ -1724,7 +1768,7 b' are you sure you want to review/edit and' | |||
|
1724 | 1768 | keypressed = pycompat.bytestr(keypressed) |
|
1725 | 1769 | if keypressed in ["k", "KEY_UP"]: |
|
1726 | 1770 | self.uparrowevent() |
|
1727 | if keypressed in ["K", "KEY_PPAGE"]: | |
|
1771 | elif keypressed in ["K", "KEY_PPAGE"]: | |
|
1728 | 1772 | self.uparrowshiftevent() |
|
1729 | 1773 | elif keypressed in ["j", "KEY_DOWN"]: |
|
1730 | 1774 | self.downarrowevent() |
@@ -1742,8 +1786,6 b' are you sure you want to review/edit and' | |||
|
1742 | 1786 | self.toggleamend(self.opts, test) |
|
1743 | 1787 | elif keypressed in ["c"]: |
|
1744 | 1788 | return True |
|
1745 | elif test and keypressed in ['X']: | |
|
1746 | return True | |
|
1747 | 1789 | elif keypressed in ["r"]: |
|
1748 | 1790 | if self.reviewcommit(): |
|
1749 | 1791 | self.opts['review'] = True |
@@ -1751,11 +1793,13 b' are you sure you want to review/edit and' | |||
|
1751 | 1793 | elif test and keypressed in ['R']: |
|
1752 | 1794 | self.opts['review'] = True |
|
1753 | 1795 | return True |
|
1754 |
elif keypressed in [' '] |
|
|
1796 | elif keypressed in [' ', 'x']: | |
|
1755 | 1797 | self.toggleapply() |
|
1756 | 1798 | elif keypressed in ['\n', 'KEY_ENTER']: |
|
1757 | 1799 | self.toggleapply() |
|
1758 | self.nextsametype() | |
|
1800 | self.nextsametype(test=test) | |
|
1801 | elif keypressed in ['X']: | |
|
1802 | self.toggleallbetween() | |
|
1759 | 1803 | elif keypressed in ['A']: |
|
1760 | 1804 | self.toggleall() |
|
1761 | 1805 | elif keypressed in ['e']: |
@@ -259,13 +259,10 b' def descendantrevs(revs, revsfn, parentr' | |||
|
259 | 259 | yield rev |
|
260 | 260 | break |
|
261 | 261 | |
|
262 |
def _reachablerootspure( |
|
|
263 | """return (heads(::<roots> and ::<heads>)) | |
|
264 | ||
|
265 | If includepath is True, return (<roots>::<heads>).""" | |
|
262 | def _reachablerootspure(pfunc, minroot, roots, heads, includepath): | |
|
263 | """See revlog.reachableroots""" | |
|
266 | 264 | if not roots: |
|
267 | 265 | return [] |
|
268 | parentrevs = repo.changelog.parentrevs | |
|
269 | 266 | roots = set(roots) |
|
270 | 267 | visit = list(heads) |
|
271 | 268 | reachable = set() |
@@ -282,7 +279,7 b' def _reachablerootspure(repo, minroot, r' | |||
|
282 | 279 | reached(rev) |
|
283 | 280 | if not includepath: |
|
284 | 281 | continue |
|
285 |
parents = p |
|
|
282 | parents = pfunc(rev) | |
|
286 | 283 | seen[rev] = parents |
|
287 | 284 | for parent in parents: |
|
288 | 285 | if parent >= minroot and parent not in seen: |
@@ -298,18 +295,13 b' def _reachablerootspure(repo, minroot, r' | |||
|
298 | 295 | return reachable |
|
299 | 296 | |
|
300 | 297 | def reachableroots(repo, roots, heads, includepath=False): |
|
301 | """return (heads(::<roots> and ::<heads>)) | |
|
302 | ||
|
303 | If includepath is True, return (<roots>::<heads>).""" | |
|
298 | """See revlog.reachableroots""" | |
|
304 | 299 | if not roots: |
|
305 | 300 | return baseset() |
|
306 | 301 | minroot = roots.min() |
|
307 | 302 | roots = list(roots) |
|
308 | 303 | heads = list(heads) |
|
309 | try: | |
|
310 | revs = repo.changelog.reachableroots(minroot, heads, roots, includepath) | |
|
311 | except AttributeError: | |
|
312 | revs = _reachablerootspure(repo, minroot, roots, heads, includepath) | |
|
304 | revs = repo.changelog.reachableroots(minroot, heads, roots, includepath) | |
|
313 | 305 | revs = baseset(revs) |
|
314 | 306 | revs.sort() |
|
315 | 307 | return revs |
@@ -1240,7 +1240,7 b' def debuginstall(ui, **opts):' | |||
|
1240 | 1240 | |
|
1241 | 1241 | # Python |
|
1242 | 1242 | fm.write('pythonexe', _("checking Python executable (%s)\n"), |
|
1243 | pycompat.sysexecutable) | |
|
1243 | pycompat.sysexecutable or _("unknown")) | |
|
1244 | 1244 | fm.write('pythonver', _("checking Python version (%s)\n"), |
|
1245 | 1245 | ("%d.%d.%d" % sys.version_info[:3])) |
|
1246 | 1246 | fm.write('pythonlib', _("checking Python lib (%s)...\n"), |
@@ -1278,16 +1278,28 b' def debuginstall(ui, **opts):' | |||
|
1278 | 1278 | fm.write('hgmodules', _("checking installed modules (%s)...\n"), |
|
1279 | 1279 | os.path.dirname(pycompat.fsencode(__file__))) |
|
1280 | 1280 | |
|
1281 |
|
|
|
1281 | rustandc = policy.policy in ('rust+c', 'rust+c-allow') | |
|
1282 | rustext = rustandc # for now, that's the only case | |
|
1283 | cext = policy.policy in ('c', 'allow') or rustandc | |
|
1284 | nopure = cext or rustext | |
|
1285 | if nopure: | |
|
1282 | 1286 | err = None |
|
1283 | 1287 | try: |
|
1284 |
f |
|
|
1285 | base85, | |
|
1286 |
|
|
|
1287 |
|
|
|
1288 |
|
|
|
1289 |
|
|
|
1290 | dir(bdiff), dir(mpatch), dir(base85), dir(osutil) # quiet pyflakes | |
|
1288 | if cext: | |
|
1289 | from .cext import ( | |
|
1290 | base85, | |
|
1291 | bdiff, | |
|
1292 | mpatch, | |
|
1293 | osutil, | |
|
1294 | ) | |
|
1295 | # quiet pyflakes | |
|
1296 | dir(bdiff), dir(mpatch), dir(base85), dir(osutil) | |
|
1297 | if rustext: | |
|
1298 | from .rustext import ( | |
|
1299 | ancestor, | |
|
1300 | dirstate, | |
|
1301 | ) | |
|
1302 | dir(ancestor), dir(dirstate) # quiet pyflakes | |
|
1291 | 1303 | except Exception as inst: |
|
1292 | 1304 | err = stringutil.forcebytestr(inst) |
|
1293 | 1305 | problems += 1 |
@@ -28,6 +28,7 b' from . import (' | |||
|
28 | 28 | ) |
|
29 | 29 | |
|
30 | 30 | parsers = policy.importmod(r'parsers') |
|
31 | dirstatemod = policy.importrust(r'dirstate', default=parsers) | |
|
31 | 32 | |
|
32 | 33 | propertycache = util.propertycache |
|
33 | 34 | filecache = scmutil.filecache |
@@ -390,12 +391,24 b' class dirstate(object):' | |||
|
390 | 391 | self._updatedfiles.add(f) |
|
391 | 392 | self._map.addfile(f, oldstate, state, mode, size, mtime) |
|
392 | 393 | |
|
393 | def normal(self, f): | |
|
394 |
'''Mark a file normal and clean. |
|
|
395 | s = os.lstat(self._join(f)) | |
|
396 | mtime = s[stat.ST_MTIME] | |
|
397 | self._addpath(f, 'n', s.st_mode, | |
|
398 | s.st_size & _rangemask, mtime & _rangemask) | |
|
394 | def normal(self, f, parentfiledata=None): | |
|
395 | '''Mark a file normal and clean. | |
|
396 | ||
|
397 | parentfiledata: (mode, size, mtime) of the clean file | |
|
398 | ||
|
399 | parentfiledata should be computed from memory (for mode, | |
|
400 | size), as or close as possible from the point where we | |
|
401 | determined the file was clean, to limit the risk of the | |
|
402 | file having been changed by an external process between the | |
|
403 | moment where the file was determined to be clean and now.''' | |
|
404 | if parentfiledata: | |
|
405 | (mode, size, mtime) = parentfiledata | |
|
406 | else: | |
|
407 | s = os.lstat(self._join(f)) | |
|
408 | mode = s.st_mode | |
|
409 | size = s.st_size | |
|
410 | mtime = s[stat.ST_MTIME] | |
|
411 | self._addpath(f, 'n', mode, size & _rangemask, mtime & _rangemask) | |
|
399 | 412 | self._map.copymap.pop(f, None) |
|
400 | 413 | if f in self._map.nonnormalset: |
|
401 | 414 | self._map.nonnormalset.remove(f) |
@@ -656,8 +669,6 b' class dirstate(object):' | |||
|
656 | 669 | self._dirty = False |
|
657 | 670 | |
|
658 | 671 | def _dirignore(self, f): |
|
659 | if f == '.': | |
|
660 | return False | |
|
661 | 672 | if self._ignore(f): |
|
662 | 673 | return True |
|
663 | 674 | for p in util.finddirs(f): |
@@ -751,15 +762,16 b' class dirstate(object):' | |||
|
751 | 762 | del files[i] |
|
752 | 763 | j += 1 |
|
753 | 764 | |
|
754 |
if not files or ' |
|
|
755 |
files = [' |
|
|
765 | if not files or '' in files: | |
|
766 | files = [''] | |
|
767 | # constructing the foldmap is expensive, so don't do it for the | |
|
768 | # common case where files is [''] | |
|
769 | normalize = None | |
|
756 | 770 | results = dict.fromkeys(subrepos) |
|
757 | 771 | results['.hg'] = None |
|
758 | 772 | |
|
759 | 773 | for ff in files: |
|
760 | # constructing the foldmap is expensive, so don't do it for the | |
|
761 | # common case where files is ['.'] | |
|
762 | if normalize and ff != '.': | |
|
774 | if normalize: | |
|
763 | 775 | nf = normalize(ff, False, True) |
|
764 | 776 | else: |
|
765 | 777 | nf = ff |
@@ -903,9 +915,7 b' class dirstate(object):' | |||
|
903 | 915 | if visitentries == 'this' or visitentries == 'all': |
|
904 | 916 | visitentries = None |
|
905 | 917 | skip = None |
|
906 |
if nd |
|
|
907 | nd = '' | |
|
908 | else: | |
|
918 | if nd != '': | |
|
909 | 919 | skip = '.hg' |
|
910 | 920 | try: |
|
911 | 921 | entries = listdir(join(nd), stat=True, skip=skip) |
@@ -1465,7 +1475,7 b' class dirstatemap(object):' | |||
|
1465 | 1475 | # parsing the dirstate. |
|
1466 | 1476 | # |
|
1467 | 1477 | # (we cannot decorate the function directly since it is in a C module) |
|
1468 |
parse_dirstate = util.nogc( |
|
|
1478 | parse_dirstate = util.nogc(dirstatemod.parse_dirstate) | |
|
1469 | 1479 | p = parse_dirstate(self._map, self.copymap, st) |
|
1470 | 1480 | if not self._dirtyparents: |
|
1471 | 1481 | self.setparents(*p) |
@@ -1476,8 +1486,8 b' class dirstatemap(object):' | |||
|
1476 | 1486 | self.get = self._map.get |
|
1477 | 1487 | |
|
1478 | 1488 | def write(self, st, now): |
|
1479 |
st.write( |
|
|
1480 | self.parents(), now)) | |
|
1489 | st.write(dirstatemod.pack_dirstate(self._map, self.copymap, | |
|
1490 | self.parents(), now)) | |
|
1481 | 1491 | st.close() |
|
1482 | 1492 | self._dirtyparents = False |
|
1483 | 1493 | self.nonnormalset, self.otherparentset = self.nonnormalentries() |
@@ -343,10 +343,19 b' def checkheads(pushop):' | |||
|
343 | 343 | # 1. Check for new branches on the remote. |
|
344 | 344 | if newbranches and not newbranch: # new branch requires --new-branch |
|
345 | 345 | branchnames = ', '.join(sorted(newbranches)) |
|
346 | raise error.Abort(_("push creates new remote branches: %s!") | |
|
347 | % branchnames, | |
|
348 | hint=_("use 'hg push --new-branch' to create" | |
|
349 | " new remote branches")) | |
|
346 | # Calculate how many of the new branches are closed branches | |
|
347 | closedbranches = set() | |
|
348 | for tag, heads, tip, isclosed in repo.branchmap().iterbranches(): | |
|
349 | if isclosed: | |
|
350 | closedbranches.add(tag) | |
|
351 | closedbranches = (closedbranches & set(newbranches)) | |
|
352 | if closedbranches: | |
|
353 | errmsg = (_("push creates new remote branches: %s (%d closed)!") | |
|
354 | % (branchnames, len(closedbranches))) | |
|
355 | else: | |
|
356 | errmsg = (_("push creates new remote branches: %s!")% branchnames) | |
|
357 | hint=_("use 'hg push --new-branch' to create new remote branches") | |
|
358 | raise error.Abort(errmsg, hint=hint) | |
|
350 | 359 | |
|
351 | 360 | # 2. Find heads that we need not warn about |
|
352 | 361 | nowarnheads = _nowarnheads(pushop) |
@@ -539,10 +539,12 b' def push(repo, remote, force=False, revs' | |||
|
539 | 539 | # get lock as we might write phase data |
|
540 | 540 | wlock = lock = None |
|
541 | 541 | try: |
|
542 |
# bundle2 push may receive a reply bundle touching bookmarks |
|
|
543 |
# |
|
|
542 | # bundle2 push may receive a reply bundle touching bookmarks | |
|
543 | # requiring the wlock. Take it now to ensure proper ordering. | |
|
544 | 544 | maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback') |
|
545 |
if (not _forcebundle1(pushop)) and |
|
|
545 | if ((not _forcebundle1(pushop)) and | |
|
546 | maypushback and | |
|
547 | not bookmod.bookmarksinstore(repo)): | |
|
546 | 548 | wlock = pushop.repo.wlock() |
|
547 | 549 | lock = pushop.repo.lock() |
|
548 | 550 | pushop.trmanager = transactionmanager(pushop.repo, |
@@ -1548,7 +1550,10 b' def pull(repo, remote, heads=None, force' | |||
|
1548 | 1550 | raise error.Abort(msg) |
|
1549 | 1551 | |
|
1550 | 1552 | pullop.trmanager = transactionmanager(repo, 'pull', remote.url()) |
|
1551 | with repo.wlock(), repo.lock(), pullop.trmanager: | |
|
1553 | wlock = util.nullcontextmanager() | |
|
1554 | if not bookmod.bookmarksinstore(repo): | |
|
1555 | wlock = repo.wlock() | |
|
1556 | with wlock, repo.lock(), pullop.trmanager: | |
|
1552 | 1557 | # Use the modern wire protocol, if available. |
|
1553 | 1558 | if remote.capable('command-changesetdata'): |
|
1554 | 1559 | exchangev2.pull(pullop) |
@@ -2395,7 +2400,8 b' def unbundle(repo, cg, heads, source, ur' | |||
|
2395 | 2400 | try: |
|
2396 | 2401 | def gettransaction(): |
|
2397 | 2402 | if not lockandtr[2]: |
|
2398 |
|
|
|
2403 | if not bookmod.bookmarksinstore(repo): | |
|
2404 | lockandtr[0] = repo.wlock() | |
|
2399 | 2405 | lockandtr[1] = repo.lock() |
|
2400 | 2406 | lockandtr[2] = repo.transaction(source) |
|
2401 | 2407 | lockandtr[2].hookargs['source'] = source |
@@ -43,7 +43,8 b' from .utils import (' | |||
|
43 | 43 | 'progress', |
|
44 | 44 | 'interhg', |
|
45 | 45 | 'inotify', |
|
46 | 'hgcia' | |
|
46 | 'hgcia', | |
|
47 | 'shelve', | |
|
47 | 48 | } |
|
48 | 49 | |
|
49 | 50 | def extensions(ui=None): |
@@ -221,14 +222,7 b' def _runextsetup(name, ui):' | |||
|
221 | 222 | extsetup = getattr(_extensions[name], 'extsetup', None) |
|
222 | 223 | if extsetup: |
|
223 | 224 | try: |
|
224 |
|
|
|
225 | extsetup(ui) | |
|
226 | except TypeError: | |
|
227 | if pycompat.getargspec(extsetup).args: | |
|
228 | raise | |
|
229 | ui.deprecwarn("extsetup for '%s' must take a ui argument" | |
|
230 | % name, "4.9") | |
|
231 | extsetup() # old extsetup with no ui argument | |
|
225 | extsetup(ui) | |
|
232 | 226 | except Exception as inst: |
|
233 | 227 | ui.traceback(force=True) |
|
234 | 228 | msg = stringutil.forcebytestr(inst) |
@@ -15,9 +15,12 b' from . import (' | |||
|
15 | 15 | commands, |
|
16 | 16 | error, |
|
17 | 17 | extensions, |
|
18 | pycompat, | |
|
18 | 19 | registrar, |
|
19 | 20 | ) |
|
20 | 21 | |
|
22 | from hgdemandimport import tracing | |
|
23 | ||
|
21 | 24 | class exthelper(object): |
|
22 | 25 | """Helper for modular extension setup |
|
23 | 26 | |
@@ -135,7 +138,8 b' class exthelper(object):' | |||
|
135 | 138 | for cont, funcname, wrapper in self._functionwrappers: |
|
136 | 139 | extensions.wrapfunction(cont, funcname, wrapper) |
|
137 | 140 | for c in self._uicallables: |
|
138 | c(ui) | |
|
141 | with tracing.log(b'finaluisetup: %s', pycompat.sysbytes(repr(c))): | |
|
142 | c(ui) | |
|
139 | 143 | |
|
140 | 144 | def finaluipopulate(self, ui): |
|
141 | 145 | """Method to be used as the extension uipopulate |
@@ -175,7 +179,8 b' class exthelper(object):' | |||
|
175 | 179 | entry[1].append(opt) |
|
176 | 180 | |
|
177 | 181 | for c in self._extcallables: |
|
178 | c(ui) | |
|
182 | with tracing.log(b'finalextsetup: %s', pycompat.sysbytes(repr(c))): | |
|
183 | c(ui) | |
|
179 | 184 | |
|
180 | 185 | def finalreposetup(self, ui, repo): |
|
181 | 186 | """Method to be used as the extension reposetup |
@@ -187,7 +192,8 b' class exthelper(object):' | |||
|
187 | 192 | - Changes to repo.__class__, repo.dirstate.__class__ |
|
188 | 193 | """ |
|
189 | 194 | for c in self._repocallables: |
|
190 | c(ui, repo) | |
|
195 | with tracing.log(b'finalreposetup: %s', pycompat.sysbytes(repr(c))): | |
|
196 | c(ui, repo) | |
|
191 | 197 | |
|
192 | 198 | def uisetup(self, call): |
|
193 | 199 | """Decorated function will be executed during uisetup |
@@ -60,17 +60,20 b' nomerge = internaltool.nomerge' | |||
|
60 | 60 | mergeonly = internaltool.mergeonly # just the full merge, no premerge |
|
61 | 61 | fullmerge = internaltool.fullmerge # both premerge and merge |
|
62 | 62 | |
|
63 | # IMPORTANT: keep the last line of this prompt very short ("What do you want to | |
|
64 | # do?") because of issue6158, ideally to <40 English characters (to allow other | |
|
65 | # languages that may take more columns to still have a chance to fit in an | |
|
66 | # 80-column screen). | |
|
63 | 67 | _localchangedotherdeletedmsg = _( |
|
64 | 68 | "file '%(fd)s' was deleted in other%(o)s but was modified in local%(l)s.\n" |
|
65 | "What do you want to do?\n" | |
|
66 | "use (c)hanged version, (d)elete, or leave (u)nresolved?" | |
|
69 | "You can use (c)hanged version, (d)elete, or leave (u)nresolved.\n" | |
|
70 | "What do you want to do?" | |
|
67 | 71 | "$$ &Changed $$ &Delete $$ &Unresolved") |
|
68 | 72 | |
|
69 | 73 | _otherchangedlocaldeletedmsg = _( |
|
70 | 74 | "file '%(fd)s' was deleted in local%(l)s but was modified in other%(o)s.\n" |
|
71 | "What do you want to do?\n" | |
|
72 | "use (c)hanged version, leave (d)eleted, or " | |
|
73 | "leave (u)nresolved?" | |
|
75 | "You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved.\n" | |
|
76 | "What do you want to do?" | |
|
74 | 77 | "$$ &Changed $$ &Deleted $$ &Unresolved") |
|
75 | 78 | |
|
76 | 79 | class absentfilectx(object): |
@@ -299,9 +302,14 b' def _iprompt(repo, mynode, orig, fcd, fc' | |||
|
299 | 302 | _otherchangedlocaldeletedmsg % prompts, 2) |
|
300 | 303 | choice = ['other', 'local', 'unresolved'][index] |
|
301 | 304 | else: |
|
305 | # IMPORTANT: keep the last line of this prompt ("What do you want to | |
|
306 | # do?") very short, see comment next to _localchangedotherdeletedmsg | |
|
307 | # at the top of the file for details. | |
|
302 | 308 | index = ui.promptchoice( |
|
303 | _("keep (l)ocal%(l)s, take (o)ther%(o)s, or leave (u)nresolved" | |
|
304 | " for %(fd)s?" | |
|
309 | _("file '%(fd)s' needs to be resolved.\n" | |
|
310 | "You can keep (l)ocal%(l)s, take (o)ther%(o)s, or leave " | |
|
311 | "(u)nresolved.\n" | |
|
312 | "What do you want to do?" | |
|
305 | 313 | "$$ &Local $$ &Other $$ &Unresolved") % prompts, 2) |
|
306 | 314 | choice = ['local', 'other', 'unresolved'][index] |
|
307 | 315 |
@@ -469,22 +469,6 b' def ascii(ui, state, type, char, text, c' | |||
|
469 | 469 | while len(text) < len(lines): |
|
470 | 470 | text.append("") |
|
471 | 471 | |
|
472 | if any(len(char) > 1 for char in edgemap.values()): | |
|
473 | # limit drawing an edge to the first or last N lines of the current | |
|
474 | # section the rest of the edge is drawn like a parent line. | |
|
475 | parent = state['styles'][PARENT][-1:] | |
|
476 | def _drawgp(char, i): | |
|
477 | # should a grandparent character be drawn for this line? | |
|
478 | if len(char) < 2: | |
|
479 | return True | |
|
480 | num = int(char[:-1]) | |
|
481 | # either skip first num lines or take last num lines, based on sign | |
|
482 | return -num <= i if num < 0 else (len(lines) - i) <= num | |
|
483 | for i, line in enumerate(lines): | |
|
484 | line[:] = [c[-1:] if _drawgp(c, i) else parent for c in line] | |
|
485 | edgemap.update( | |
|
486 | (e, (c if len(c) < 2 else parent)) for e, c in edgemap.items()) | |
|
487 | ||
|
488 | 472 | # print lines |
|
489 | 473 | indentation_level = max(ncols, ncols + coldiff) |
|
490 | 474 | lines = ["%-*s " % (2 * indentation_level, "".join(line)) for line in lines] |
@@ -32,6 +32,7 b' def bisect(repo, state):' | |||
|
32 | 32 | if searching for a first bad one. |
|
33 | 33 | """ |
|
34 | 34 | |
|
35 | repo = repo.unfiltered() | |
|
35 | 36 | changelog = repo.changelog |
|
36 | 37 | clparents = changelog.parentrevs |
|
37 | 38 | skip = {changelog.rev(n) for n in state['skip']} |
@@ -139,7 +140,7 b' def load_state(repo):' | |||
|
139 | 140 | state = {'current': [], 'good': [], 'bad': [], 'skip': []} |
|
140 | 141 | for l in repo.vfs.tryreadlines("bisect.state"): |
|
141 | 142 | kind, node = l[:-1].split() |
|
142 | node = repo.lookup(node) | |
|
143 | node = repo.unfiltered().lookup(node) | |
|
143 | 144 | if kind not in state: |
|
144 | 145 | raise error.Abort(_("unknown bisect kind %s") % kind) |
|
145 | 146 | state[kind].append(node) |
@@ -184,7 +185,7 b' def get(repo, status):' | |||
|
184 | 185 | """ |
|
185 | 186 | state = load_state(repo) |
|
186 | 187 | if status in ('good', 'bad', 'skip', 'current'): |
|
187 | return map(repo.changelog.rev, state[status]) | |
|
188 | return map(repo.unfiltered().changelog.rev, state[status]) | |
|
188 | 189 | else: |
|
189 | 190 | # In the following sets, we do *not* call 'bisect()' with more |
|
190 | 191 | # than one level of recursion, because that can be very, very |
@@ -268,6 +269,7 b' def label(repo, node):' | |||
|
268 | 269 | return None |
|
269 | 270 | |
|
270 | 271 | def printresult(ui, repo, state, displayer, nodes, good): |
|
272 | repo = repo.unfiltered() | |
|
271 | 273 | if len(nodes) == 1: |
|
272 | 274 | # narrowed it down to a single revision |
|
273 | 275 | if good: |
@@ -320,6 +320,8 b' internalstable = sorted([' | |||
|
320 | 320 | loaddoc('config', subdir='internals')), |
|
321 | 321 | (['extensions', 'extension'], _('Extension API'), |
|
322 | 322 | loaddoc('extensions', subdir='internals')), |
|
323 | (['mergestate'], _('Mergestate'), | |
|
324 | loaddoc('mergestate', subdir='internals')), | |
|
323 | 325 | (['requirements'], _('Repository Requirements'), |
|
324 | 326 | loaddoc('requirements', subdir='internals')), |
|
325 | 327 | (['revlogs'], _('Revision Logs'), |
@@ -453,7 +455,7 b' def inserttweakrc(ui, topic, doc):' | |||
|
453 | 455 | addtopichook('config', inserttweakrc) |
|
454 | 456 | |
|
455 | 457 | def help_(ui, commands, name, unknowncmd=False, full=True, subtopic=None, |
|
456 | **opts): | |
|
458 | fullname=None, **opts): | |
|
457 | 459 | ''' |
|
458 | 460 | Generate the help for 'name' as unformatted restructured text. If |
|
459 | 461 | 'name' is None, describe the commands available. |
@@ -689,6 +691,8 b' def help_(ui, commands, name, unknowncmd' | |||
|
689 | 691 | for names, header, doc in subtopics[name]: |
|
690 | 692 | if subtopic in names: |
|
691 | 693 | break |
|
694 | if not any(subtopic in s[0] for s in subtopics[name]): | |
|
695 | raise error.UnknownCommand(name) | |
|
692 | 696 | |
|
693 | 697 | if not header: |
|
694 | 698 | for topic in helptable: |
@@ -812,8 +816,16 b' def help_(ui, commands, name, unknowncmd' | |||
|
812 | 816 | if unknowncmd: |
|
813 | 817 | raise error.UnknownCommand(name) |
|
814 | 818 | else: |
|
815 | msg = _('no such help topic: %s') % name | |
|
816 | hint = _("try 'hg help --keyword %s'") % name | |
|
819 | if fullname: | |
|
820 | formatname = fullname | |
|
821 | else: | |
|
822 | formatname = name | |
|
823 | if subtopic: | |
|
824 | hintname = subtopic | |
|
825 | else: | |
|
826 | hintname = name | |
|
827 | msg = _('no such help topic: %s') % formatname | |
|
828 | hint = _("try 'hg help --keyword %s'") % hintname | |
|
817 | 829 | raise error.Abort(msg, hint=hint) |
|
818 | 830 | else: |
|
819 | 831 | # program name |
@@ -848,7 +860,7 b' def formattedhelp(ui, commands, fullname' | |||
|
848 | 860 | termwidth = ui.termwidth() - 2 |
|
849 | 861 | if textwidth <= 0 or termwidth < textwidth: |
|
850 | 862 | textwidth = termwidth |
|
851 | text = help_(ui, commands, name, | |
|
863 | text = help_(ui, commands, name, fullname=fullname, | |
|
852 | 864 | subtopic=subtopic, unknowncmd=unknowncmd, full=full, **opts) |
|
853 | 865 | |
|
854 | 866 | blocks, pruned = minirst.parse(text, keep=keep) |
@@ -438,6 +438,10 b' effect and style see :hg:`help color`.' | |||
|
438 | 438 | ``commands`` |
|
439 | 439 | ------------ |
|
440 | 440 | |
|
441 | ``commit.post-status`` | |
|
442 | Show status of files in the working directory after successful commit. | |
|
443 | (default: False) | |
|
444 | ||
|
441 | 445 | ``resolve.confirm`` |
|
442 | 446 | Confirm before performing action if no filename is passed. |
|
443 | 447 | (default: False) |
@@ -875,6 +879,15 b' https://www.mercurial-scm.org/wiki/Missi' | |||
|
875 | 879 | |
|
876 | 880 | On some system, Mercurial installation may lack `zstd` supports. Default is `zlib`. |
|
877 | 881 | |
|
882 | ``bookmarks-in-store`` | |
|
883 | Store bookmarks in .hg/store/. This means that bookmarks are shared when | |
|
884 | using `hg share` regardless of the `-B` option. | |
|
885 | ||
|
886 | Repositories with this on-disk format require Mercurial version 5.1. | |
|
887 | ||
|
888 | Disabled by default. | |
|
889 | ||
|
890 | ||
|
878 | 891 | ``graph`` |
|
879 | 892 | --------- |
|
880 | 893 | |
@@ -1767,6 +1780,11 b' statistical text report generated from t' | |||
|
1767 | 1780 | |
|
1768 | 1781 | The option is unused on other formats. |
|
1769 | 1782 | |
|
1783 | ``showtime`` | |
|
1784 | Show time taken as absolute durations, in addition to percentages. | |
|
1785 | Only used by the ``hotpath`` format. | |
|
1786 | (default: true) | |
|
1787 | ||
|
1770 | 1788 | ``progress`` |
|
1771 | 1789 | ------------ |
|
1772 | 1790 |
@@ -129,3 +129,16 b' August 2017). This requirement and featu' | |||
|
129 | 129 | disappear in a future Mercurial release. The requirement will only |
|
130 | 130 | be present on repositories that have opted in to a sparse working |
|
131 | 131 | directory. |
|
132 | ||
|
133 | bookmarksinstore | |
|
134 | ================== | |
|
135 | ||
|
136 | Bookmarks are stored in ``.hg/store/`` instead of directly in ``.hg/`` | |
|
137 | where they used to be stored. The active bookmark is still stored | |
|
138 | directly in ``.hg/``. This makes them always shared by ``hg share``, | |
|
139 | whether or not ``-B`` was passed. | |
|
140 | ||
|
141 | Support for this requirement was added in Mercurial 5.1 (released | |
|
142 | August 2019). The requirement will only be present on repositories | |
|
143 | that have opted in to this format (by having | |
|
144 | ``format.bookmarks-in-store=true`` set when they were created). |
@@ -28,8 +28,8 b' File Format' | |||
|
28 | 28 | =========== |
|
29 | 29 | |
|
30 | 30 | A revlog begins with a 32-bit big endian integer holding version info |
|
31 |
and feature flags. This integer |
|
|
32 | entry. | |
|
31 | and feature flags. This integer overlaps with the first four bytes of | |
|
32 | the first revision entry. | |
|
33 | 33 | |
|
34 | 34 | This integer is logically divided into 2 16-bit shorts. The least |
|
35 | 35 | significant half of the integer is the format/version short. The other |
@@ -78,10 +78,10 b' 00 02 00 01' | |||
|
78 | 78 | 00 03 00 01 |
|
79 | 79 | v1 + inline + generaldelta |
|
80 | 80 | |
|
81 |
Following the 32-bit header is the remain |
|
|
82 |
Following that are |
|
|
83 |
possibly located between index entries. More on this |
|
|
84 | below. | |
|
81 | Following the 32-bit header is the remaining 60 bytes of the first index | |
|
82 | entry. Following that are additional *index* entries. Inlined revision | |
|
83 | data is possibly located between index entries. More on this inlined | |
|
84 | layout is described below. | |
|
85 | 85 | |
|
86 | 86 | Version 1 Format |
|
87 | 87 | ================ |
@@ -149,8 +149,12 b' If revision data is not inline, then raw' | |||
|
149 | 149 | separate byte container. The offsets from bytes 0-5 and the compressed |
|
150 | 150 | length from bytes 8-11 define how to access this data. |
|
151 | 151 | |
|
152 | The first 4 bytes of the revlog are shared between the revlog header | |
|
153 | and the 6 byte absolute offset field from the first revlog entry. | |
|
152 | The 6 byte absolute offset field from the first revlog entry overlaps | |
|
153 | with the revlog header. That is, the first 6 bytes of the first revlog | |
|
154 | entry can be split into four bytes containing the header for the revlog | |
|
155 | file and an additional two bytes containing the offset for the first | |
|
156 | entry. Since this is the offset from the beginning of the file for the | |
|
157 | first revision entry, the two bytes will always be set to zero. | |
|
154 | 158 | |
|
155 | 159 | Version 2 Format |
|
156 | 160 | ================ |
@@ -956,31 +956,34 b' def merge(repo, node, force=None, remind' | |||
|
956 | 956 | abort=False): |
|
957 | 957 | """Branch merge with node, resolving changes. Return true if any |
|
958 | 958 | unresolved conflicts.""" |
|
959 |
if |
|
|
960 | stats = mergemod.update(repo, node, branchmerge=True, force=force, | |
|
961 | mergeforce=mergeforce, labels=labels) | |
|
962 | else: | |
|
963 | ms = mergemod.mergestate.read(repo) | |
|
964 | if ms.active(): | |
|
965 | # there were conflicts | |
|
966 | node = ms.localctx.hex() | |
|
967 | else: | |
|
968 | # there were no conficts, mergestate was not stored | |
|
969 | node = repo['.'].hex() | |
|
959 | if abort: | |
|
960 | return abortmerge(repo.ui, repo) | |
|
970 | 961 | |
|
971 | repo.ui.status(_("aborting the merge, updating back to" | |
|
972 | " %s\n") % node[:12]) | |
|
973 | stats = mergemod.update(repo, node, branchmerge=False, force=True, | |
|
974 | labels=labels) | |
|
975 | ||
|
962 | stats = mergemod.update(repo, node, branchmerge=True, force=force, | |
|
963 | mergeforce=mergeforce, labels=labels) | |
|
976 | 964 | _showstats(repo, stats) |
|
977 | 965 | if stats.unresolvedcount: |
|
978 | 966 | repo.ui.status(_("use 'hg resolve' to retry unresolved file merges " |
|
979 | 967 | "or 'hg merge --abort' to abandon\n")) |
|
980 |
elif remind |
|
|
968 | elif remind: | |
|
981 | 969 | repo.ui.status(_("(branch merge, don't forget to commit)\n")) |
|
982 | 970 | return stats.unresolvedcount > 0 |
|
983 | 971 | |
|
972 | def abortmerge(ui, repo): | |
|
973 | ms = mergemod.mergestate.read(repo) | |
|
974 | if ms.active(): | |
|
975 | # there were conflicts | |
|
976 | node = ms.localctx.hex() | |
|
977 | else: | |
|
978 | # there were no conficts, mergestate was not stored | |
|
979 | node = repo['.'].hex() | |
|
980 | ||
|
981 | repo.ui.status(_("aborting the merge, updating back to" | |
|
982 | " %s\n") % node[:12]) | |
|
983 | stats = mergemod.update(repo, node, branchmerge=False, force=True) | |
|
984 | _showstats(repo, stats) | |
|
985 | return stats.unresolvedcount > 0 | |
|
986 | ||
|
984 | 987 | def _incoming(displaychlist, subreporecurse, ui, repo, source, |
|
985 | 988 | opts, buffered=False): |
|
986 | 989 | """ |
@@ -1092,9 +1095,9 b' def outgoing(ui, repo, dest, opts):' | |||
|
1092 | 1095 | recurse() |
|
1093 | 1096 | return 0 # exit code is zero since we found outgoing changes |
|
1094 | 1097 | |
|
1095 | def verify(repo): | |
|
1098 | def verify(repo, level=None): | |
|
1096 | 1099 | """verify the consistency of a repository""" |
|
1097 | ret = verifymod.verify(repo) | |
|
1100 | ret = verifymod.verify(repo, level=level) | |
|
1098 | 1101 | |
|
1099 | 1102 | # Broken subrepo references in hidden csets don't seem worth worrying about, |
|
1100 | 1103 | # since they can't be pushed/pulled, and --hidden can be used if they are a |
@@ -38,6 +38,9 b' def hgweb(config, name=None, baseui=None' | |||
|
38 | 38 | - list of virtual:real tuples (multi-repo view) |
|
39 | 39 | ''' |
|
40 | 40 | |
|
41 | if isinstance(config, pycompat.unicode): | |
|
42 | raise error.ProgrammingError( | |
|
43 | 'Mercurial only supports encoded strings: %r' % config) | |
|
41 | 44 | if ((isinstance(config, bytes) and not os.path.isdir(config)) or |
|
42 | 45 | isinstance(config, dict) or isinstance(config, list)): |
|
43 | 46 | # create a multi-dir interface |
@@ -414,14 +414,10 b' class hgwebdir(object):' | |||
|
414 | 414 | return self.makeindex(req, res, tmpl, subdir) |
|
415 | 415 | |
|
416 | 416 | def _virtualdirs(): |
|
417 |
# Check the full virtual path, each parent |
|
|
418 |
|
|
|
419 |
|
|
|
420 | ||
|
421 | for p in util.finddirs(virtual): | |
|
422 | yield p | |
|
423 | ||
|
424 | yield '' | |
|
417 | # Check the full virtual path, and each parent | |
|
418 | yield virtual | |
|
419 | for p in util.finddirs(virtual): | |
|
420 | yield p | |
|
425 | 421 | |
|
426 | 422 | for virtualrepo in _virtualdirs(): |
|
427 | 423 | real = repos.get(virtualrepo) |
@@ -409,12 +409,6 b' def whyunstable(context, mapping):' | |||
|
409 | 409 | |
|
410 | 410 | whyunstable._requires = {'repo', 'ctx'} |
|
411 | 411 | |
|
412 | # helper to mark a function as a new-style template keyword; can be removed | |
|
413 | # once old-style function gets unsupported and new-style becomes the default | |
|
414 | def _kwfunc(f): | |
|
415 | f._requires = () | |
|
416 | return f | |
|
417 | ||
|
418 | 412 | def commonentry(repo, ctx): |
|
419 | 413 | node = scmutil.binnode(ctx) |
|
420 | 414 | return { |
@@ -439,8 +433,8 b' def commonentry(repo, ctx):' | |||
|
439 | 433 | 'branches': nodebranchdict(repo, ctx), |
|
440 | 434 | 'tags': nodetagsdict(repo, node), |
|
441 | 435 | 'bookmarks': nodebookmarksdict(repo, node), |
|
442 |
'parent': |
|
|
443 |
'child': |
|
|
436 | 'parent': lambda context, mapping: parents(ctx), | |
|
437 | 'child': lambda context, mapping: children(ctx), | |
|
444 | 438 | } |
|
445 | 439 | |
|
446 | 440 | def changelistentry(web, ctx): |
@@ -457,9 +451,9 b' def changelistentry(web, ctx):' | |||
|
457 | 451 | |
|
458 | 452 | entry = commonentry(repo, ctx) |
|
459 | 453 | entry.update({ |
|
460 |
'allparents': |
|
|
461 |
'parent': |
|
|
462 |
'child': |
|
|
454 | 'allparents': lambda context, mapping: parents(ctx), | |
|
455 | 'parent': lambda context, mapping: parents(ctx, rev - 1), | |
|
456 | 'child': lambda context, mapping: children(ctx, rev + 1), | |
|
463 | 457 | 'changelogtag': showtags, |
|
464 | 458 | 'files': files, |
|
465 | 459 | }) |
@@ -529,7 +523,7 b' def changesetentry(web, ctx):' | |||
|
529 | 523 | changesetbranch=showbranch, |
|
530 | 524 | files=templateutil.mappedgenerator(_listfilesgen, |
|
531 | 525 | args=(ctx, web.stripecount)), |
|
532 |
diffsummary= |
|
|
526 | diffsummary=lambda context, mapping: diffsummary(diffstatsgen), | |
|
533 | 527 | diffstat=diffstats, |
|
534 | 528 | archives=web.archivelist(ctx.hex()), |
|
535 | 529 | **pycompat.strkwargs(commonentry(web.repo, ctx))) |
@@ -382,6 +382,7 b' class httppeer(wireprotov1peer.wirepeer)' | |||
|
382 | 382 | self._path = path |
|
383 | 383 | self._url = url |
|
384 | 384 | self._caps = caps |
|
385 | self.limitedarguments = caps is not None and 'httppostargs' not in caps | |
|
385 | 386 | self._urlopener = opener |
|
386 | 387 | self._requestbuilder = requestbuilder |
|
387 | 388 | |
@@ -750,6 +751,9 b' class httpv2executor(object):' | |||
|
750 | 751 | |
|
751 | 752 | @interfaceutil.implementer(repository.ipeerv2) |
|
752 | 753 | class httpv2peer(object): |
|
754 | ||
|
755 | limitedarguments = False | |
|
756 | ||
|
753 | 757 | def __init__(self, ui, repourl, apipath, opener, requestbuilder, |
|
754 | 758 | apidescriptor): |
|
755 | 759 | self.ui = ui |
@@ -128,8 +128,7 b' class mixedrepostorecache(_basefilecache' | |||
|
128 | 128 | # scmutil.filecache only uses the path for passing back into our |
|
129 | 129 | # join(), so we can safely pass a list of paths and locations |
|
130 | 130 | super(mixedrepostorecache, self).__init__(*pathsandlocations) |
|
131 |
|
|
|
132 | _cachedfiles.update(pathsandlocations) | |
|
131 | _cachedfiles.update(pathsandlocations) | |
|
133 | 132 | |
|
134 | 133 | def join(self, obj, fnameandlocation): |
|
135 | 134 | fname, location = fnameandlocation |
@@ -910,6 +909,7 b' class localrepository(object):' | |||
|
910 | 909 | 'treemanifest', |
|
911 | 910 | REVLOGV2_REQUIREMENT, |
|
912 | 911 | SPARSEREVLOG_REQUIREMENT, |
|
912 | bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT, | |
|
913 | 913 | } |
|
914 | 914 | _basesupported = supportedformats | { |
|
915 | 915 | 'store', |
@@ -1069,6 +1069,8 b' class localrepository(object):' | |||
|
1069 | 1069 | # Signature to cached matcher instance. |
|
1070 | 1070 | self._sparsematchercache = {} |
|
1071 | 1071 | |
|
1072 | self._extrafilterid = repoview.extrafilter(ui) | |
|
1073 | ||
|
1072 | 1074 | def _getvfsward(self, origfunc): |
|
1073 | 1075 | """build a ward for self.vfs""" |
|
1074 | 1076 | rref = weakref.ref(self) |
@@ -1216,11 +1218,14 b' class localrepository(object):' | |||
|
1216 | 1218 | |
|
1217 | 1219 | In other word, there is always only one level of `repoview` "filtering". |
|
1218 | 1220 | """ |
|
1221 | if self._extrafilterid is not None and '%' not in name: | |
|
1222 | name = name + '%' + self._extrafilterid | |
|
1223 | ||
|
1219 | 1224 | cls = repoview.newtype(self.unfiltered().__class__) |
|
1220 | 1225 | return cls(self, name, visibilityexceptions) |
|
1221 | 1226 | |
|
1222 | 1227 | @mixedrepostorecache(('bookmarks', 'plain'), ('bookmarks.current', 'plain'), |
|
1223 | ('00changelog.i', '')) | |
|
1228 | ('bookmarks', ''), ('00changelog.i', '')) | |
|
1224 | 1229 | def _bookmarks(self): |
|
1225 | 1230 | return bookmarks.bmstore(self) |
|
1226 | 1231 | |
@@ -1982,7 +1987,7 b' class localrepository(object):' | |||
|
1982 | 1987 | (self.vfs, 'journal.dirstate'), |
|
1983 | 1988 | (self.vfs, 'journal.branch'), |
|
1984 | 1989 | (self.vfs, 'journal.desc'), |
|
1985 |
(self |
|
|
1990 | (bookmarks.bookmarksvfs(self), 'journal.bookmarks'), | |
|
1986 | 1991 | (self.svfs, 'journal.phaseroots')) |
|
1987 | 1992 | |
|
1988 | 1993 | def undofiles(self): |
@@ -1997,8 +2002,9 b' class localrepository(object):' | |||
|
1997 | 2002 | encoding.fromlocal(self.dirstate.branch())) |
|
1998 | 2003 | self.vfs.write("journal.desc", |
|
1999 | 2004 | "%d\n%s\n" % (len(self), desc)) |
|
2000 | self.vfs.write("journal.bookmarks", | |
|
2001 | self.vfs.tryread("bookmarks")) | |
|
2005 | bookmarksvfs = bookmarks.bookmarksvfs(self) | |
|
2006 | bookmarksvfs.write("journal.bookmarks", | |
|
2007 | bookmarksvfs.tryread("bookmarks")) | |
|
2002 | 2008 | self.svfs.write("journal.phaseroots", |
|
2003 | 2009 | self.svfs.tryread("phaseroots")) |
|
2004 | 2010 | |
@@ -2068,8 +2074,9 b' class localrepository(object):' | |||
|
2068 | 2074 | vfsmap = {'plain': self.vfs, '': self.svfs} |
|
2069 | 2075 | transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn, |
|
2070 | 2076 | checkambigfiles=_cachedfiles) |
|
2071 | if self.vfs.exists('undo.bookmarks'): | |
|
2072 | self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True) | |
|
2077 | bookmarksvfs = bookmarks.bookmarksvfs(self) | |
|
2078 | if bookmarksvfs.exists('undo.bookmarks'): | |
|
2079 | bookmarksvfs.rename('undo.bookmarks', 'bookmarks', checkambig=True) | |
|
2073 | 2080 | if self.svfs.exists('undo.phaseroots'): |
|
2074 | 2081 | self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True) |
|
2075 | 2082 | self.invalidate() |
@@ -2152,6 +2159,8 b' class localrepository(object):' | |||
|
2152 | 2159 | for ctx in self['.'].parents(): |
|
2153 | 2160 | ctx.manifest() # accessing the manifest is enough |
|
2154 | 2161 | |
|
2162 | # accessing fnode cache warms the cache | |
|
2163 | tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs()) | |
|
2155 | 2164 | # accessing tags warm the cache |
|
2156 | 2165 | self.tags() |
|
2157 | 2166 | self.filtered('served').tags() |
@@ -2362,7 +2371,10 b' class localrepository(object):' | |||
|
2362 | 2371 | node = fctx.filenode() |
|
2363 | 2372 | if node in [fparent1, fparent2]: |
|
2364 | 2373 | self.ui.debug('reusing %s filelog entry\n' % fname) |
|
2365 | if manifest1.flags(fname) != fctx.flags(): | |
|
2374 | if ((fparent1 != nullid and | |
|
2375 | manifest1.flags(fname) != fctx.flags()) or | |
|
2376 | (fparent2 != nullid and | |
|
2377 | manifest2.flags(fname) != fctx.flags())): | |
|
2366 | 2378 | changelist.append(fname) |
|
2367 | 2379 | return node |
|
2368 | 2380 | |
@@ -2556,17 +2568,17 b' class localrepository(object):' | |||
|
2556 | 2568 | _('note: commit message saved in %s\n') % msgfn) |
|
2557 | 2569 | raise |
|
2558 | 2570 | |
|
2559 | def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2): | |
|
2571 | def commithook(): | |
|
2560 | 2572 | # hack for command that use a temporary commit (eg: histedit) |
|
2561 | 2573 | # temporary commit got stripped before hook release |
|
2562 | 2574 | if self.changelog.hasnode(ret): |
|
2563 |
self.hook("commit", node= |
|
|
2564 |
parent2=p |
|
|
2575 | self.hook("commit", node=hex(ret), parent1=hookp1, | |
|
2576 | parent2=hookp2) | |
|
2565 | 2577 | self._afterlock(commithook) |
|
2566 | 2578 | return ret |
|
2567 | 2579 | |
|
2568 | 2580 | @unfilteredmethod |
|
2569 | def commitctx(self, ctx, error=False): | |
|
2581 | def commitctx(self, ctx, error=False, origctx=None): | |
|
2570 | 2582 | """Add a new revision to current repository. |
|
2571 | 2583 | Revision information is passed via the context argument. |
|
2572 | 2584 | |
@@ -2574,6 +2586,12 b' class localrepository(object):' | |||
|
2574 | 2586 | modified/added/removed files. On merge, it may be wider than the |
|
2575 | 2587 | ctx.files() to be committed, since any file nodes derived directly |
|
2576 | 2588 | from p1 or p2 are excluded from the committed ctx.files(). |
|
2589 | ||
|
2590 | origctx is for convert to work around the problem that bug | |
|
2591 | fixes to the files list in changesets change hashes. For | |
|
2592 | convert to be the identity, it can pass an origctx and this | |
|
2593 | function will use the same files list when it makes sense to | |
|
2594 | do so. | |
|
2577 | 2595 | """ |
|
2578 | 2596 | |
|
2579 | 2597 | p1, p2 = ctx.p1(), ctx.p2() |
@@ -2581,10 +2599,13 b' class localrepository(object):' | |||
|
2581 | 2599 | |
|
2582 | 2600 | writecopiesto = self.ui.config('experimental', 'copies.write-to') |
|
2583 | 2601 | writefilecopymeta = writecopiesto != 'changeset-only' |
|
2602 | writechangesetcopy = (writecopiesto in | |
|
2603 | ('changeset-only', 'compatibility')) | |
|
2584 | 2604 | p1copies, p2copies = None, None |
|
2585 |
if writec |
|
|
2605 | if writechangesetcopy: | |
|
2586 | 2606 | p1copies = ctx.p1copies() |
|
2587 | 2607 | p2copies = ctx.p2copies() |
|
2608 | filesadded, filesremoved = None, None | |
|
2588 | 2609 | with self.lock(), self.transaction("commit") as tr: |
|
2589 | 2610 | trp = weakref.proxy(tr) |
|
2590 | 2611 | |
@@ -2593,6 +2614,9 b' class localrepository(object):' | |||
|
2593 | 2614 | self.ui.debug('reusing known manifest\n') |
|
2594 | 2615 | mn = ctx.manifestnode() |
|
2595 | 2616 | files = ctx.files() |
|
2617 | if writechangesetcopy: | |
|
2618 | filesadded = ctx.filesadded() | |
|
2619 | filesremoved = ctx.filesremoved() | |
|
2596 | 2620 | elif ctx.files(): |
|
2597 | 2621 | m1ctx = p1.manifestctx() |
|
2598 | 2622 | m2ctx = p2.manifestctx() |
@@ -2633,10 +2657,51 b' class localrepository(object):' | |||
|
2633 | 2657 | raise |
|
2634 | 2658 | |
|
2635 | 2659 | # update manifest |
|
2636 |
removed = [f for f in |
|
|
2637 | drop = [f for f in removed if f in m] | |
|
2660 | removed = [f for f in removed if f in m1 or f in m2] | |
|
2661 | drop = sorted([f for f in removed if f in m]) | |
|
2638 | 2662 | for f in drop: |
|
2639 | 2663 | del m[f] |
|
2664 | if p2.rev() != nullrev: | |
|
2665 | @util.cachefunc | |
|
2666 | def mas(): | |
|
2667 | p1n = p1.node() | |
|
2668 | p2n = p2.node() | |
|
2669 | cahs = self.changelog.commonancestorsheads(p1n, p2n) | |
|
2670 | if not cahs: | |
|
2671 | cahs = [nullrev] | |
|
2672 | return [self[r].manifest() for r in cahs] | |
|
2673 | def deletionfromparent(f): | |
|
2674 | # When a file is removed relative to p1 in a merge, this | |
|
2675 | # function determines whether the absence is due to a | |
|
2676 | # deletion from a parent, or whether the merge commit | |
|
2677 | # itself deletes the file. We decide this by doing a | |
|
2678 | # simplified three way merge of the manifest entry for | |
|
2679 | # the file. There are two ways we decide the merge | |
|
2680 | # itself didn't delete a file: | |
|
2681 | # - neither parent (nor the merge) contain the file | |
|
2682 | # - exactly one parent contains the file, and that | |
|
2683 | # parent has the same filelog entry as the merge | |
|
2684 | # ancestor (or all of them if there two). In other | |
|
2685 | # words, that parent left the file unchanged while the | |
|
2686 | # other one deleted it. | |
|
2687 | # One way to think about this is that deleting a file is | |
|
2688 | # similar to emptying it, so the list of changed files | |
|
2689 | # should be similar either way. The computation | |
|
2690 | # described above is not done directly in _filecommit | |
|
2691 | # when creating the list of changed files, however | |
|
2692 | # it does something very similar by comparing filelog | |
|
2693 | # nodes. | |
|
2694 | if f in m1: | |
|
2695 | return (f not in m2 | |
|
2696 | and all(f in ma and ma.find(f) == m1.find(f) | |
|
2697 | for ma in mas())) | |
|
2698 | elif f in m2: | |
|
2699 | return all(f in ma and ma.find(f) == m2.find(f) | |
|
2700 | for ma in mas()) | |
|
2701 | else: | |
|
2702 | return True | |
|
2703 | removed = [f for f in removed if not deletionfromparent(f)] | |
|
2704 | ||
|
2640 | 2705 | files = changed + removed |
|
2641 | 2706 | md = None |
|
2642 | 2707 | if not files: |
@@ -2659,8 +2724,13 b' class localrepository(object):' | |||
|
2659 | 2724 | mn = mctx.write(trp, linkrev, |
|
2660 | 2725 | p1.manifestnode(), p2.manifestnode(), |
|
2661 | 2726 | added, drop, match=self.narrowmatch()) |
|
2727 | ||
|
2728 | if writechangesetcopy: | |
|
2729 | filesadded = [f for f in changed | |
|
2730 | if not (f in m1 or f in m2)] | |
|
2731 | filesremoved = removed | |
|
2662 | 2732 | else: |
|
2663 |
self.ui.debug('reusing manifest f |
|
|
2733 | self.ui.debug('reusing manifest from p1 (listed files ' | |
|
2664 | 2734 | 'actually unchanged)\n') |
|
2665 | 2735 | mn = p1.manifestnode() |
|
2666 | 2736 | else: |
@@ -2668,13 +2738,26 b' class localrepository(object):' | |||
|
2668 | 2738 | mn = p1.manifestnode() |
|
2669 | 2739 | files = [] |
|
2670 | 2740 | |
|
2741 | if writecopiesto == 'changeset-only': | |
|
2742 | # If writing only to changeset extras, use None to indicate that | |
|
2743 | # no entry should be written. If writing to both, write an empty | |
|
2744 | # entry to prevent the reader from falling back to reading | |
|
2745 | # filelogs. | |
|
2746 | p1copies = p1copies or None | |
|
2747 | p2copies = p2copies or None | |
|
2748 | filesadded = filesadded or None | |
|
2749 | filesremoved = filesremoved or None | |
|
2750 | ||
|
2751 | if origctx and origctx.manifestnode() == mn: | |
|
2752 | files = origctx.files() | |
|
2753 | ||
|
2671 | 2754 | # update changelog |
|
2672 | 2755 | self.ui.note(_("committing changelog\n")) |
|
2673 | 2756 | self.changelog.delayupdate(tr) |
|
2674 | 2757 | n = self.changelog.add(mn, files, ctx.description(), |
|
2675 | 2758 | trp, p1.node(), p2.node(), |
|
2676 | 2759 | user, ctx.date(), ctx.extra().copy(), |
|
2677 | p1copies, p2copies) | |
|
2760 | p1copies, p2copies, filesadded, filesremoved) | |
|
2678 | 2761 | xp1, xp2 = p1.hex(), p2 and p2.hex() or '' |
|
2679 | 2762 | self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1, |
|
2680 | 2763 | parent2=xp2) |
@@ -3013,6 +3096,9 b' def newreporequirements(ui, createopts):' | |||
|
3013 | 3096 | if createopts.get('lfs'): |
|
3014 | 3097 | requirements.add('lfs') |
|
3015 | 3098 | |
|
3099 | if ui.configbool('format', 'bookmarks-in-store'): | |
|
3100 | requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT) | |
|
3101 | ||
|
3016 | 3102 | return requirements |
|
3017 | 3103 | |
|
3018 | 3104 | def filterknowncreateopts(ui, createopts): |
@@ -743,10 +743,15 b' def getrevs(repo, pats, opts):' | |||
|
743 | 743 | return match |
|
744 | 744 | |
|
745 | 745 | expr = _makerevset(repo, match, pats, slowpath, opts) |
|
746 |
if opts.get('graph') |
|
|
746 | if opts.get('graph'): | |
|
747 | 747 | # User-specified revs might be unsorted, but don't sort before |
|
748 | 748 | # _makerevset because it might depend on the order of revs |
|
749 | if not (revs.isdescending() or revs.istopo()): | |
|
749 | if repo.ui.configbool('experimental', 'log.topo'): | |
|
750 | if not revs.istopo(): | |
|
751 | revs = dagop.toposort(revs, repo.changelog.parentrevs) | |
|
752 | # TODO: try to iterate the set lazily | |
|
753 | revs = revset.baseset(list(revs), istopo=True) | |
|
754 | elif not (revs.isdescending() or revs.istopo()): | |
|
750 | 755 | revs.sort(reverse=True) |
|
751 | 756 | if expr: |
|
752 | 757 | matcher = revset.match(None, expr) |
@@ -857,7 +862,7 b' def _graphnodeformatter(ui, displayer):' | |||
|
857 | 862 | return templ.renderdefault(props) |
|
858 | 863 | return formatnode |
|
859 | 864 | |
|
860 |
def displaygraph(ui, repo, dag, displayer, edgefn, get |
|
|
865 | def displaygraph(ui, repo, dag, displayer, edgefn, getcopies=None, props=None): | |
|
861 | 866 | props = props or {} |
|
862 | 867 | formatnode = _graphnodeformatter(ui, displayer) |
|
863 | 868 | state = graphmod.asciistate() |
@@ -885,13 +890,7 b' def displaygraph(ui, repo, dag, displaye' | |||
|
885 | 890 | |
|
886 | 891 | for rev, type, ctx, parents in dag: |
|
887 | 892 | char = formatnode(repo, ctx) |
|
888 | copies = None | |
|
889 | if getrenamed and ctx.rev(): | |
|
890 | copies = [] | |
|
891 | for fn in ctx.files(): | |
|
892 | rename = getrenamed(fn, ctx.rev()) | |
|
893 | if rename: | |
|
894 | copies.append((fn, rename)) | |
|
893 | copies = getcopies(ctx) if getcopies else None | |
|
895 | 894 | edges = edgefn(type, char, state, rev, parents) |
|
896 | 895 | firstedge = next(edges) |
|
897 | 896 | width = firstedge[2] |
@@ -910,16 +909,10 b' def displaygraphrevs(ui, repo, revs, dis' | |||
|
910 | 909 | revdag = graphmod.dagwalker(repo, revs) |
|
911 | 910 | displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed) |
|
912 | 911 | |
|
913 |
def displayrevs(ui, repo, revs, displayer, get |
|
|
912 | def displayrevs(ui, repo, revs, displayer, getcopies): | |
|
914 | 913 | for rev in revs: |
|
915 | 914 | ctx = repo[rev] |
|
916 | copies = None | |
|
917 | if getrenamed is not None and rev: | |
|
918 | copies = [] | |
|
919 | for fn in ctx.files(): | |
|
920 | rename = getrenamed(fn, rev) | |
|
921 | if rename: | |
|
922 | copies.append((fn, rename)) | |
|
915 | copies = getcopies(ctx) if getcopies else None | |
|
923 | 916 | displayer.show(ctx, copies=copies) |
|
924 | 917 | displayer.flush(ctx) |
|
925 | 918 | displayer.close() |
@@ -506,9 +506,9 b' class manifestdict(object):' | |||
|
506 | 506 | if match(fn): |
|
507 | 507 | yield fn |
|
508 | 508 | |
|
509 |
# for dirstate.walk, files=[' |
|
|
509 | # for dirstate.walk, files=[''] means "walk the whole tree". | |
|
510 | 510 | # follow that here, too |
|
511 |
fset.discard(' |
|
|
511 | fset.discard('') | |
|
512 | 512 | |
|
513 | 513 | for fn in sorted(fset): |
|
514 | 514 | if not self.hasdir(fn): |
@@ -1078,9 +1078,9 b' class treemanifest(object):' | |||
|
1078 | 1078 | fset.remove(fn) |
|
1079 | 1079 | yield fn |
|
1080 | 1080 | |
|
1081 |
# for dirstate.walk, files=[' |
|
|
1081 | # for dirstate.walk, files=[''] means "walk the whole tree". | |
|
1082 | 1082 | # follow that here, too |
|
1083 |
fset.discard(' |
|
|
1083 | fset.discard('') | |
|
1084 | 1084 | |
|
1085 | 1085 | for fn in sorted(fset): |
|
1086 | 1086 | if not self.hasdir(fn): |
@@ -1088,7 +1088,7 b' class treemanifest(object):' | |||
|
1088 | 1088 | |
|
1089 | 1089 | def _walk(self, match): |
|
1090 | 1090 | '''Recursively generates matching file names for walk().''' |
|
1091 |
visit = match.visitchildrenset(self._dir[:-1] |
|
|
1091 | visit = match.visitchildrenset(self._dir[:-1]) | |
|
1092 | 1092 | if not visit: |
|
1093 | 1093 | return |
|
1094 | 1094 | |
@@ -1116,7 +1116,7 b' class treemanifest(object):' | |||
|
1116 | 1116 | '''recursively generate a new manifest filtered by the match argument. |
|
1117 | 1117 | ''' |
|
1118 | 1118 | |
|
1119 |
visit = match.visitchildrenset(self._dir[:-1] |
|
|
1119 | visit = match.visitchildrenset(self._dir[:-1]) | |
|
1120 | 1120 | if visit == 'all': |
|
1121 | 1121 | return self.copy() |
|
1122 | 1122 | ret = treemanifest(self._dir) |
@@ -1275,7 +1275,7 b' class treemanifest(object):' | |||
|
1275 | 1275 | return m._dirs.get(d, emptytree)._node |
|
1276 | 1276 | |
|
1277 | 1277 | # let's skip investigating things that `match` says we do not need. |
|
1278 |
visit = match.visitchildrenset(self._dir[:-1] |
|
|
1278 | visit = match.visitchildrenset(self._dir[:-1]) | |
|
1279 | 1279 | visit = self._loadchildrensetlazy(visit) |
|
1280 | 1280 | if visit == 'this' or visit == 'all': |
|
1281 | 1281 | visit = None |
@@ -1294,7 +1294,7 b' class treemanifest(object):' | |||
|
1294 | 1294 | |
|
1295 | 1295 | If `matcher` is provided, it only returns subtrees that match. |
|
1296 | 1296 | """ |
|
1297 |
if matcher and not matcher.visitdir(self._dir[:-1] |
|
|
1297 | if matcher and not matcher.visitdir(self._dir[:-1]): | |
|
1298 | 1298 | return |
|
1299 | 1299 | if not matcher or matcher(self._dir[:-1]): |
|
1300 | 1300 | yield self |
@@ -1417,6 +1417,10 b' class manifestfulltextcache(util.lrucach' | |||
|
1417 | 1417 | self.write() |
|
1418 | 1418 | self._read = False |
|
1419 | 1419 | |
|
1420 | # and upper bound of what we expect from compression | |
|
1421 | # (real live value seems to be "3") | |
|
1422 | MAXCOMPRESSION = 3 | |
|
1423 | ||
|
1420 | 1424 | @interfaceutil.implementer(repository.imanifeststorage) |
|
1421 | 1425 | class manifestrevlog(object): |
|
1422 | 1426 | '''A revlog that stores manifest texts. This is responsible for caching the |
@@ -1467,7 +1471,8 b' class manifestrevlog(object):' | |||
|
1467 | 1471 | self._revlog = revlog.revlog(opener, indexfile, |
|
1468 | 1472 | # only root indexfile is cached |
|
1469 | 1473 | checkambig=not bool(tree), |
|
1470 |
mmaplargeindex=True |
|
|
1474 | mmaplargeindex=True, | |
|
1475 | upperboundcomp=MAXCOMPRESSION) | |
|
1471 | 1476 | |
|
1472 | 1477 | self.index = self._revlog.index |
|
1473 | 1478 | self.version = self._revlog.version |
@@ -1526,8 +1531,8 b' class manifestrevlog(object):' | |||
|
1526 | 1531 | |
|
1527 | 1532 | _checkforbidden(added) |
|
1528 | 1533 | # combine the changed lists into one sorted iterator |
|
1529 | work = heapq.merge([(x, False) for x in added], | |
|
1530 | [(x, True) for x in removed]) | |
|
1534 | work = heapq.merge([(x, False) for x in sorted(added)], | |
|
1535 | [(x, True) for x in sorted(removed)]) | |
|
1531 | 1536 | |
|
1532 | 1537 | arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work) |
|
1533 | 1538 | cachedelta = self._revlog.rev(p1), deltatext |
@@ -1725,7 +1730,7 b' class manifestlog(object):' | |||
|
1725 | 1730 | return self._dirmancache[tree][node] |
|
1726 | 1731 | |
|
1727 | 1732 | if not self._narrowmatch.always(): |
|
1728 |
if not self._narrowmatch.visitdir(tree[:-1] |
|
|
1733 | if not self._narrowmatch.visitdir(tree[:-1]): | |
|
1729 | 1734 | return excludeddirmanifestctx(tree, node) |
|
1730 | 1735 | if tree: |
|
1731 | 1736 | if self._rootstore._treeondisk: |
@@ -1918,7 +1923,7 b' class treemanifestctx(object):' | |||
|
1918 | 1923 | def _storage(self): |
|
1919 | 1924 | narrowmatch = self._manifestlog._narrowmatch |
|
1920 | 1925 | if not narrowmatch.always(): |
|
1921 |
if not narrowmatch.visitdir(self._dir[:-1] |
|
|
1926 | if not narrowmatch.visitdir(self._dir[:-1]): | |
|
1922 | 1927 | return excludedmanifestrevlog(self._dir) |
|
1923 | 1928 | return self._manifestlog.getstorage(self._dir) |
|
1924 | 1929 |
@@ -17,6 +17,7 b' from . import (' | |||
|
17 | 17 | encoding, |
|
18 | 18 | error, |
|
19 | 19 | pathutil, |
|
20 | policy, | |
|
20 | 21 | pycompat, |
|
21 | 22 | util, |
|
22 | 23 | ) |
@@ -24,6 +25,8 b' from .utils import (' | |||
|
24 | 25 | stringutil, |
|
25 | 26 | ) |
|
26 | 27 | |
|
28 | rustmod = policy.importrust('filepatterns') | |
|
29 | ||
|
27 | 30 | allpatternkinds = ('re', 'glob', 'path', 'relglob', 'relpath', 'relre', |
|
28 | 31 | 'rootglob', |
|
29 | 32 | 'listfile', 'listfile0', 'set', 'include', 'subinclude', |
@@ -305,9 +308,6 b' class basematcher(object):' | |||
|
305 | 308 | |
|
306 | 309 | def __call__(self, fn): |
|
307 | 310 | return self.matchfn(fn) |
|
308 | def __iter__(self): | |
|
309 | for f in self._files: | |
|
310 | yield f | |
|
311 | 311 | # Callbacks related to how the matcher is used by dirstate.walk. |
|
312 | 312 | # Subscribers to these events must monkeypatch the matcher object. |
|
313 | 313 | def bad(self, f, msg): |
@@ -377,7 +377,7 b' class basematcher(object):' | |||
|
377 | 377 | the following values (assuming the implementation of visitchildrenset |
|
378 | 378 | is capable of recognizing this; some implementations are not). |
|
379 | 379 | |
|
380 |
' |
|
|
380 | '' -> {'foo', 'qux'} | |
|
381 | 381 | 'baz' -> set() |
|
382 | 382 | 'foo' -> {'bar'} |
|
383 | 383 | # Ideally this would be 'all', but since the prefix nature of matchers |
@@ -480,11 +480,19 b' class predicatematcher(basematcher):' | |||
|
480 | 480 | or pycompat.byterepr(self.matchfn)) |
|
481 | 481 | return '<predicatenmatcher pred=%s>' % s |
|
482 | 482 | |
|
483 | def normalizerootdir(dir, funcname): | |
|
484 | if dir == '.': | |
|
485 | util.nouideprecwarn("match.%s() no longer accepts " | |
|
486 | "'.', use '' instead." % funcname, '5.1') | |
|
487 | return '' | |
|
488 | return dir | |
|
489 | ||
|
490 | ||
|
483 | 491 | class patternmatcher(basematcher): |
|
484 | 492 | """Matches a set of (kind, pat, source) against a 'root' directory. |
|
485 | 493 | |
|
486 | 494 | >>> kindpats = [ |
|
487 | ... (b're', b'.*\.c$', b''), | |
|
495 | ... (b're', br'.*\.c$', b''), | |
|
488 | 496 | ... (b'path', b'foo/a', b''), |
|
489 | 497 | ... (b'relpath', b'b', b''), |
|
490 | 498 | ... (b'glob', b'*.h', b''), |
@@ -504,7 +512,7 b' class patternmatcher(basematcher):' | |||
|
504 | 512 | True |
|
505 | 513 | |
|
506 | 514 | >>> m.files() |
|
507 |
[' |
|
|
515 | ['', 'foo/a', 'b', ''] | |
|
508 | 516 | >>> m.exact(b'foo/a') |
|
509 | 517 | True |
|
510 | 518 | >>> m.exact(b'b') |
@@ -522,13 +530,13 b' class patternmatcher(basematcher):' | |||
|
522 | 530 | |
|
523 | 531 | @propertycache |
|
524 | 532 | def _dirs(self): |
|
525 |
return set(util.dirs(self._fileset)) |
|
|
533 | return set(util.dirs(self._fileset)) | |
|
526 | 534 | |
|
527 | 535 | def visitdir(self, dir): |
|
536 | dir = normalizerootdir(dir, 'visitdir') | |
|
528 | 537 | if self._prefix and dir in self._fileset: |
|
529 | 538 | return 'all' |
|
530 |
return ( |
|
|
531 | dir in self._fileset or | |
|
539 | return (dir in self._fileset or | |
|
532 | 540 | dir in self._dirs or |
|
533 | 541 | any(parentdir in self._fileset |
|
534 | 542 | for parentdir in util.finddirs(dir))) |
@@ -561,7 +569,7 b' class _dirchildren(object):' | |||
|
561 | 569 | addpath(f) |
|
562 | 570 | |
|
563 | 571 | def addpath(self, path): |
|
564 |
if path == ' |
|
|
572 | if path == '': | |
|
565 | 573 | return |
|
566 | 574 | dirs = self._dirs |
|
567 | 575 | findsplitdirs = _dirchildren._findsplitdirs |
@@ -575,16 +583,15 b' class _dirchildren(object):' | |||
|
575 | 583 | # yields (dirname, basename) tuples, walking back to the root. This is |
|
576 | 584 | # very similar to util.finddirs, except: |
|
577 | 585 | # - produces a (dirname, basename) tuple, not just 'dirname' |
|
578 | # - includes root dir | |
|
579 | 586 | # Unlike manifest._splittopdir, this does not suffix `dirname` with a |
|
580 | # slash, and produces '.' for the root instead of ''. | |
|
587 | # slash. | |
|
581 | 588 | oldpos = len(path) |
|
582 | 589 | pos = path.rfind('/') |
|
583 | 590 | while pos != -1: |
|
584 | 591 | yield path[:pos], path[pos + 1:oldpos] |
|
585 | 592 | oldpos = pos |
|
586 | 593 | pos = path.rfind('/', 0, pos) |
|
587 |
yield ' |
|
|
594 | yield '', path[:oldpos] | |
|
588 | 595 | |
|
589 | 596 | def get(self, path): |
|
590 | 597 | return self._dirs.get(path, set()) |
@@ -603,13 +610,13 b' class includematcher(basematcher):' | |||
|
603 | 610 | self._dirs = set(dirs) |
|
604 | 611 | # parents are directories which are non-recursively included because |
|
605 | 612 | # they are needed to get to items in _dirs or _roots. |
|
606 |
self._parents = |
|
|
613 | self._parents = parents | |
|
607 | 614 | |
|
608 | 615 | def visitdir(self, dir): |
|
616 | dir = normalizerootdir(dir, 'visitdir') | |
|
609 | 617 | if self._prefix and dir in self._roots: |
|
610 | 618 | return 'all' |
|
611 |
return ( |
|
|
612 | dir in self._roots or | |
|
619 | return (dir in self._roots or | |
|
613 | 620 | dir in self._dirs or |
|
614 | 621 | dir in self._parents or |
|
615 | 622 | any(parentdir in self._roots |
@@ -632,7 +639,7 b' class includematcher(basematcher):' | |||
|
632 | 639 | return 'all' |
|
633 | 640 | # Note: this does *not* include the 'dir in self._parents' case from |
|
634 | 641 | # visitdir, that's handled below. |
|
635 |
if (' |
|
|
642 | if ('' in self._roots or | |
|
636 | 643 | dir in self._roots or |
|
637 | 644 | dir in self._dirs or |
|
638 | 645 | any(parentdir in self._roots |
@@ -651,7 +658,7 b' class exactmatcher(basematcher):' | |||
|
651 | 658 | r'''Matches the input files exactly. They are interpreted as paths, not |
|
652 | 659 | patterns (so no kind-prefixes). |
|
653 | 660 | |
|
654 | >>> m = exactmatcher([b'a.txt', b're:.*\.c$']) | |
|
661 | >>> m = exactmatcher([b'a.txt', br're:.*\.c$']) | |
|
655 | 662 | >>> m(b'a.txt') |
|
656 | 663 | True |
|
657 | 664 | >>> m(b'b.txt') |
@@ -664,7 +671,7 b' class exactmatcher(basematcher):' | |||
|
664 | 671 | So pattern 're:.*\.c$' is not considered as a regex, but as a file name |
|
665 | 672 | >>> m(b'main.c') |
|
666 | 673 | False |
|
667 | >>> m(b're:.*\.c$') | |
|
674 | >>> m(br're:.*\.c$') | |
|
668 | 675 | True |
|
669 | 676 | ''' |
|
670 | 677 | |
@@ -680,22 +687,25 b' class exactmatcher(basematcher):' | |||
|
680 | 687 | |
|
681 | 688 | @propertycache |
|
682 | 689 | def _dirs(self): |
|
683 |
return set(util.dirs(self._fileset)) |
|
|
690 | return set(util.dirs(self._fileset)) | |
|
684 | 691 | |
|
685 | 692 | def visitdir(self, dir): |
|
693 | dir = normalizerootdir(dir, 'visitdir') | |
|
686 | 694 | return dir in self._dirs |
|
687 | 695 | |
|
688 | 696 | def visitchildrenset(self, dir): |
|
697 | dir = normalizerootdir(dir, 'visitchildrenset') | |
|
698 | ||
|
689 | 699 | if not self._fileset or dir not in self._dirs: |
|
690 | 700 | return set() |
|
691 | 701 | |
|
692 |
candidates = self._fileset | self._dirs - {' |
|
|
693 |
if dir != ' |
|
|
702 | candidates = self._fileset | self._dirs - {''} | |
|
703 | if dir != '': | |
|
694 | 704 | d = dir + '/' |
|
695 | 705 | candidates = set(c[len(d):] for c in candidates if |
|
696 | 706 | c.startswith(d)) |
|
697 | 707 | # self._dirs includes all of the directories, recursively, so if |
|
698 |
# we're attempting to match foo/bar/baz.txt, it'll have ' |
|
|
708 | # we're attempting to match foo/bar/baz.txt, it'll have '', 'foo', | |
|
699 | 709 | # 'foo/bar' in it. Thus we can safely ignore a candidate that has a |
|
700 | 710 | # '/' in it, indicating a it's for a subdir-of-a-subdir; the |
|
701 | 711 | # immediate subdir will be in there without a slash. |
@@ -769,7 +779,7 b' class differencematcher(basematcher):' | |||
|
769 | 779 | # Possible values for m1: set(...), set() |
|
770 | 780 | # Possible values for m2: 'this', set(...) |
|
771 | 781 | # We ignore m2's set results. They're possibly incorrect: |
|
772 |
# m1 = path:dir/subdir, m2=rootfilesin:dir, visitchildrenset(' |
|
|
782 | # m1 = path:dir/subdir, m2=rootfilesin:dir, visitchildrenset(''): | |
|
773 | 783 | # m1 returns {'dir'}, m2 returns {'dir'}, if we subtracted we'd |
|
774 | 784 | # return set(), which is *not* correct, we still need to visit 'dir'! |
|
775 | 785 | return m1_set |
@@ -915,14 +925,16 b' class subdirmatcher(basematcher):' | |||
|
915 | 925 | return self._matcher.matchfn(self._path + "/" + f) |
|
916 | 926 | |
|
917 | 927 | def visitdir(self, dir): |
|
918 | if dir == '.': | |
|
928 | dir = normalizerootdir(dir, 'visitdir') | |
|
929 | if dir == '': | |
|
919 | 930 | dir = self._path |
|
920 | 931 | else: |
|
921 | 932 | dir = self._path + "/" + dir |
|
922 | 933 | return self._matcher.visitdir(dir) |
|
923 | 934 | |
|
924 | 935 | def visitchildrenset(self, dir): |
|
925 | if dir == '.': | |
|
936 | dir = normalizerootdir(dir, 'visitchildrenset') | |
|
937 | if dir == '': | |
|
926 | 938 | dir = self._path |
|
927 | 939 | else: |
|
928 | 940 | dir = self._path + "/" + dir |
@@ -991,18 +1003,18 b' class prefixdirmatcher(basematcher):' | |||
|
991 | 1003 | |
|
992 | 1004 | @propertycache |
|
993 | 1005 | def _pathdirs(self): |
|
994 |
return set(util.finddirs(self._path)) |
|
|
1006 | return set(util.finddirs(self._path)) | |
|
995 | 1007 | |
|
996 | 1008 | def visitdir(self, dir): |
|
997 | 1009 | if dir == self._path: |
|
998 |
return self._matcher.visitdir(' |
|
|
1010 | return self._matcher.visitdir('') | |
|
999 | 1011 | if dir.startswith(self._pathprefix): |
|
1000 | 1012 | return self._matcher.visitdir(dir[len(self._pathprefix):]) |
|
1001 | 1013 | return dir in self._pathdirs |
|
1002 | 1014 | |
|
1003 | 1015 | def visitchildrenset(self, dir): |
|
1004 | 1016 | if dir == self._path: |
|
1005 |
return self._matcher.visitchildrenset(' |
|
|
1017 | return self._matcher.visitchildrenset('') | |
|
1006 | 1018 | if dir.startswith(self._pathprefix): |
|
1007 | 1019 | return self._matcher.visitchildrenset(dir[len(self._pathprefix):]) |
|
1008 | 1020 | if dir in self._pathdirs: |
@@ -1075,7 +1087,7 b' class unionmatcher(basematcher):' | |||
|
1075 | 1087 | def patkind(pattern, default=None): |
|
1076 | 1088 | '''If pattern is 'kind:pat' with a known kind, return kind. |
|
1077 | 1089 | |
|
1078 | >>> patkind(b're:.*\.c$') | |
|
1090 | >>> patkind(br're:.*\.c$') | |
|
1079 | 1091 | 're' |
|
1080 | 1092 | >>> patkind(b'glob:*.c') |
|
1081 | 1093 | 'glob' |
@@ -1178,9 +1190,23 b' def _globre(pat):' | |||
|
1178 | 1190 | return res |
|
1179 | 1191 | |
|
1180 | 1192 | def _regex(kind, pat, globsuffix): |
|
1181 |
'''Convert a (normalized) pattern of any kind into a |
|
|
1193 | '''Convert a (normalized) pattern of any kind into a | |
|
1194 | regular expression. | |
|
1182 | 1195 | globsuffix is appended to the regexp of globs.''' |
|
1183 | if not pat: | |
|
1196 | ||
|
1197 | if rustmod is not None: | |
|
1198 | try: | |
|
1199 | return rustmod.build_single_regex( | |
|
1200 | kind, | |
|
1201 | pat, | |
|
1202 | globsuffix | |
|
1203 | ) | |
|
1204 | except rustmod.PatternError: | |
|
1205 | raise error.ProgrammingError( | |
|
1206 | 'not a regex pattern: %s:%s' % (kind, pat) | |
|
1207 | ) | |
|
1208 | ||
|
1209 | if not pat and kind in ('glob', 'relpath'): | |
|
1184 | 1210 | return '' |
|
1185 | 1211 | if kind == 're': |
|
1186 | 1212 | return pat |
@@ -1324,13 +1350,17 b' def _patternrootsanddirs(kindpats):' | |||
|
1324 | 1350 | if '[' in p or '{' in p or '*' in p or '?' in p: |
|
1325 | 1351 | break |
|
1326 | 1352 | root.append(p) |
|
1327 |
r.append('/'.join(root) |
|
|
1353 | r.append('/'.join(root)) | |
|
1328 | 1354 | elif kind in ('relpath', 'path'): |
|
1329 |
|
|
|
1355 | if pat == '.': | |
|
1356 | pat = '' | |
|
1357 | r.append(pat) | |
|
1330 | 1358 | elif kind in ('rootfilesin',): |
|
1331 |
|
|
|
1359 | if pat == '.': | |
|
1360 | pat = '' | |
|
1361 | d.append(pat) | |
|
1332 | 1362 | else: # relglob, re, relre |
|
1333 |
r.append(' |
|
|
1363 | r.append('') | |
|
1334 | 1364 | return r, d |
|
1335 | 1365 | |
|
1336 | 1366 | def _roots(kindpats): |
@@ -1347,31 +1377,33 b' def _rootsdirsandparents(kindpats):' | |||
|
1347 | 1377 | |
|
1348 | 1378 | Returns a tuple of (roots, dirs, parents). |
|
1349 | 1379 | |
|
1350 | >>> _rootsdirsandparents( | |
|
1380 | >>> r = _rootsdirsandparents( | |
|
1351 | 1381 | ... [(b'glob', b'g/h/*', b''), (b'glob', b'g/h', b''), |
|
1352 | 1382 | ... (b'glob', b'g*', b'')]) |
|
1353 | (['g/h', 'g/h', '.'], [], ['g', '.']) | |
|
1354 | >>> _rootsdirsandparents( | |
|
1383 | >>> print(r[0:2], sorted(r[2])) # the set has an unstable output | |
|
1384 | (['g/h', 'g/h', ''], []) ['', 'g'] | |
|
1385 | >>> r = _rootsdirsandparents( | |
|
1355 | 1386 | ... [(b'rootfilesin', b'g/h', b''), (b'rootfilesin', b'', b'')]) |
|
1356 | ([], ['g/h', '.'], ['g', '.']) | |
|
1357 | >>> _rootsdirsandparents( | |
|
1387 | >>> print(r[0:2], sorted(r[2])) # the set has an unstable output | |
|
1388 | ([], ['g/h', '']) ['', 'g'] | |
|
1389 | >>> r = _rootsdirsandparents( | |
|
1358 | 1390 | ... [(b'relpath', b'r', b''), (b'path', b'p/p', b''), |
|
1359 | 1391 | ... (b'path', b'', b'')]) |
|
1360 | (['r', 'p/p', '.'], [], ['p', '.']) | |
|
1361 | >>> _rootsdirsandparents( | |
|
1392 | >>> print(r[0:2], sorted(r[2])) # the set has an unstable output | |
|
1393 | (['r', 'p/p', ''], []) ['', 'p'] | |
|
1394 | >>> r = _rootsdirsandparents( | |
|
1362 | 1395 | ... [(b'relglob', b'rg*', b''), (b're', b're/', b''), |
|
1363 | 1396 | ... (b'relre', b'rr', b'')]) |
|
1364 | (['.', '.', '.'], [], ['.']) | |
|
1397 | >>> print(r[0:2], sorted(r[2])) # the set has an unstable output | |
|
1398 | (['', '', ''], []) [''] | |
|
1365 | 1399 | ''' |
|
1366 | 1400 | r, d = _patternrootsanddirs(kindpats) |
|
1367 | 1401 | |
|
1368 |
p = |
|
|
1369 |
# A |
|
|
1402 | p = set() | |
|
1403 | # Add the parents as non-recursive/exact directories, since they must be | |
|
1370 | 1404 | # scanned to get to either the roots or the other exact directories. |
|
1371 |
p. |
|
|
1372 |
p. |
|
|
1373 | # util.dirs() does not include the root directory, so add it manually | |
|
1374 | p.append('.') | |
|
1405 | p.update(util.dirs(d)) | |
|
1406 | p.update(util.dirs(r)) | |
|
1375 | 1407 | |
|
1376 | 1408 | # FIXME: all uses of this function convert these to sets, do so before |
|
1377 | 1409 | # returning. |
@@ -1421,9 +1453,24 b' def readpatternfile(filepath, warn, sour' | |||
|
1421 | 1453 | pattern # pattern of the current default type |
|
1422 | 1454 | |
|
1423 | 1455 | if sourceinfo is set, returns a list of tuples: |
|
1424 |
(pattern, lineno, originalline). |
|
|
1456 | (pattern, lineno, originalline). | |
|
1457 | This is useful to debug ignore patterns. | |
|
1425 | 1458 | ''' |
|
1426 | 1459 | |
|
1460 | if rustmod is not None: | |
|
1461 | result, warnings = rustmod.read_pattern_file( | |
|
1462 | filepath, | |
|
1463 | bool(warn), | |
|
1464 | sourceinfo, | |
|
1465 | ) | |
|
1466 | ||
|
1467 | for warning_params in warnings: | |
|
1468 | # Can't be easily emitted from Rust, because it would require | |
|
1469 | # a mechanism for both gettext and calling the `warn` function. | |
|
1470 | warn(_("%s: ignoring invalid syntax '%s'\n") % warning_params) | |
|
1471 | ||
|
1472 | return result | |
|
1473 | ||
|
1427 | 1474 | syntaxes = { |
|
1428 | 1475 | 're': 'relre:', |
|
1429 | 1476 | 'regexp': 'relre:', |
@@ -10,6 +10,7 b' from __future__ import absolute_import' | |||
|
10 | 10 | import errno |
|
11 | 11 | import hashlib |
|
12 | 12 | import shutil |
|
13 | import stat | |
|
13 | 14 | import struct |
|
14 | 15 | |
|
15 | 16 | from .i18n import _ |
@@ -683,7 +684,7 b' class mergestate(object):' | |||
|
683 | 684 | def recordactions(self): |
|
684 | 685 | """record remove/add/get actions in the dirstate""" |
|
685 | 686 | branchmerge = self._repo.dirstate.p2() != nullid |
|
686 | recordupdates(self._repo, self.actions(), branchmerge) | |
|
687 | recordupdates(self._repo, self.actions(), branchmerge, None) | |
|
687 | 688 | |
|
688 | 689 | def queueremove(self, f): |
|
689 | 690 | """queues a file to be removed from the dirstate |
@@ -1380,7 +1381,6 b' def calculateupdates(repo, wctx, mctx, a' | |||
|
1380 | 1381 | # Pick the best bid for each file |
|
1381 | 1382 | repo.ui.note(_('\nauction for merging merge bids\n')) |
|
1382 | 1383 | actions = {} |
|
1383 | dms = [] # filenames that have dm actions | |
|
1384 | 1384 | for f, bids in sorted(fbids.items()): |
|
1385 | 1385 | # bids is a mapping from action method to list af actions |
|
1386 | 1386 | # Consensus? |
@@ -1389,8 +1389,6 b' def calculateupdates(repo, wctx, mctx, a' | |||
|
1389 | 1389 | if all(a == l[0] for a in l[1:]): # len(bids) is > 1 |
|
1390 | 1390 | repo.ui.note(_(" %s: consensus for %s\n") % (f, m)) |
|
1391 | 1391 | actions[f] = l[0] |
|
1392 | if m == ACTION_DIR_RENAME_MOVE_LOCAL: | |
|
1393 | dms.append(f) | |
|
1394 | 1392 | continue |
|
1395 | 1393 | # If keep is an option, just do it. |
|
1396 | 1394 | if ACTION_KEEP in bids: |
@@ -1415,18 +1413,7 b' def calculateupdates(repo, wctx, mctx, a' | |||
|
1415 | 1413 | repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') % |
|
1416 | 1414 | (f, m)) |
|
1417 | 1415 | actions[f] = l[0] |
|
1418 | if m == ACTION_DIR_RENAME_MOVE_LOCAL: | |
|
1419 | dms.append(f) | |
|
1420 | 1416 | continue |
|
1421 | # Work around 'dm' that can cause multiple actions for the same file | |
|
1422 | for f in dms: | |
|
1423 | dm, (f0, flags), msg = actions[f] | |
|
1424 | assert dm == ACTION_DIR_RENAME_MOVE_LOCAL, dm | |
|
1425 | if f0 in actions and actions[f0][0] == ACTION_REMOVE: | |
|
1426 | # We have one bid for removing a file and another for moving it. | |
|
1427 | # These two could be merged as first move and then delete ... | |
|
1428 | # but instead drop moving and just delete. | |
|
1429 | del actions[f] | |
|
1430 | 1417 | repo.ui.note(_('end of auction\n\n')) |
|
1431 | 1418 | |
|
1432 | 1419 | if wctx.rev() is None: |
@@ -1478,13 +1465,17 b' def batchremove(repo, wctx, actions):' | |||
|
1478 | 1465 | repo.ui.warn(_("current directory was removed\n" |
|
1479 | 1466 | "(consider changing to repo root: %s)\n") % repo.root) |
|
1480 | 1467 | |
|
1481 | def batchget(repo, mctx, wctx, actions): | |
|
1468 | def batchget(repo, mctx, wctx, wantfiledata, actions): | |
|
1482 | 1469 | """apply gets to the working directory |
|
1483 | 1470 | |
|
1484 | 1471 | mctx is the context to get from |
|
1485 | 1472 | |
|
1486 |
|
|
|
1473 | Yields arbitrarily many (False, tuple) for progress updates, followed by | |
|
1474 | exactly one (True, filedata). When wantfiledata is false, filedata is an | |
|
1475 | empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size, | |
|
1476 | mtime) of the file f written for each action. | |
|
1487 | 1477 | """ |
|
1478 | filedata = {} | |
|
1488 | 1479 | verbose = repo.ui.verbose |
|
1489 | 1480 | fctx = mctx.filectx |
|
1490 | 1481 | ui = repo.ui |
@@ -1508,16 +1499,24 b' def batchget(repo, mctx, wctx, actions):' | |||
|
1508 | 1499 | if repo.wvfs.lexists(conflicting): |
|
1509 | 1500 | orig = scmutil.backuppath(ui, repo, conflicting) |
|
1510 | 1501 | util.rename(repo.wjoin(conflicting), orig) |
|
1511 |
wctx[f] |
|
|
1502 | wfctx = wctx[f] | |
|
1503 | wfctx.clearunknown() | |
|
1512 | 1504 | atomictemp = ui.configbool("experimental", "update.atomic-file") |
|
1513 |
wctx |
|
|
1514 |
|
|
|
1505 | size = wfctx.write(fctx(f).data(), flags, | |
|
1506 | backgroundclose=True, | |
|
1507 | atomictemp=atomictemp) | |
|
1508 | if wantfiledata: | |
|
1509 | s = wfctx.lstat() | |
|
1510 | mode = s.st_mode | |
|
1511 | mtime = s[stat.ST_MTIME] | |
|
1512 | filedata[f] = ((mode, size, mtime)) # for dirstate.normal | |
|
1515 | 1513 | if i == 100: |
|
1516 | yield i, f | |
|
1514 | yield False, (i, f) | |
|
1517 | 1515 | i = 0 |
|
1518 | 1516 | i += 1 |
|
1519 | 1517 | if i > 0: |
|
1520 | yield i, f | |
|
1518 | yield False, (i, f) | |
|
1519 | yield True, filedata | |
|
1521 | 1520 | |
|
1522 | 1521 | def _prefetchfiles(repo, ctx, actions): |
|
1523 | 1522 | """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict |
@@ -1564,14 +1563,17 b' def emptyactions():' | |||
|
1564 | 1563 | ACTION_PATH_CONFLICT, |
|
1565 | 1564 | ACTION_PATH_CONFLICT_RESOLVE)) |
|
1566 | 1565 | |
|
1567 |
def applyupdates(repo, actions, wctx, mctx, overwrite, |
|
|
1566 | def applyupdates(repo, actions, wctx, mctx, overwrite, wantfiledata, | |
|
1567 | labels=None): | |
|
1568 | 1568 | """apply the merge action list to the working directory |
|
1569 | 1569 | |
|
1570 | 1570 | wctx is the working copy context |
|
1571 | 1571 | mctx is the context to be merged into the working copy |
|
1572 | 1572 | |
|
1573 | Return a tuple of counts (updated, merged, removed, unresolved) that | |
|
1574 | describes how many files were affected by the update. | |
|
1573 | Return a tuple of (counts, filedata), where counts is a tuple | |
|
1574 | (updated, merged, removed, unresolved) that describes how many | |
|
1575 | files were affected by the update, and filedata is as described in | |
|
1576 | batchget. | |
|
1575 | 1577 | """ |
|
1576 | 1578 | |
|
1577 | 1579 | _prefetchfiles(repo, mctx, actions) |
@@ -1663,11 +1665,18 b' def applyupdates(repo, actions, wctx, mc' | |||
|
1663 | 1665 | # get in parallel. |
|
1664 | 1666 | threadsafe = repo.ui.configbool('experimental', |
|
1665 | 1667 | 'worker.wdir-get-thread-safe') |
|
1666 |
prog = worker.worker(repo.ui, cost, batchget, |
|
|
1668 | prog = worker.worker(repo.ui, cost, batchget, | |
|
1669 | (repo, mctx, wctx, wantfiledata), | |
|
1667 | 1670 | actions[ACTION_GET], |
|
1668 |
threadsafe=threadsafe |
|
|
1669 | for i, item in prog: | |
|
1670 | progress.increment(step=i, item=item) | |
|
1671 | threadsafe=threadsafe, | |
|
1672 | hasretval=True) | |
|
1673 | getfiledata = {} | |
|
1674 | for final, res in prog: | |
|
1675 | if final: | |
|
1676 | getfiledata = res | |
|
1677 | else: | |
|
1678 | i, item = res | |
|
1679 | progress.increment(step=i, item=item) | |
|
1671 | 1680 | updated = len(actions[ACTION_GET]) |
|
1672 | 1681 | |
|
1673 | 1682 | if [a for a in actions[ACTION_GET] if a[0] == '.hgsubstate']: |
@@ -1792,6 +1801,10 b' def applyupdates(repo, actions, wctx, mc' | |||
|
1792 | 1801 | mfiles = set(a[0] for a in actions[ACTION_MERGE]) |
|
1793 | 1802 | for k, acts in extraactions.iteritems(): |
|
1794 | 1803 | actions[k].extend(acts) |
|
1804 | if k == ACTION_GET and wantfiledata: | |
|
1805 | # no filedata until mergestate is updated to provide it | |
|
1806 | for a in acts: | |
|
1807 | getfiledata[a[0]] = None | |
|
1795 | 1808 | # Remove these files from actions[ACTION_MERGE] as well. This is |
|
1796 | 1809 | # important because in recordupdates, files in actions[ACTION_MERGE] |
|
1797 | 1810 | # are processed after files in other actions, and the merge driver |
@@ -1814,9 +1827,10 b' def applyupdates(repo, actions, wctx, mc' | |||
|
1814 | 1827 | if a[0] in mfiles] |
|
1815 | 1828 | |
|
1816 | 1829 | progress.complete() |
|
1817 | return updateresult(updated, merged, removed, unresolved) | |
|
1830 | assert len(getfiledata) == (len(actions[ACTION_GET]) if wantfiledata else 0) | |
|
1831 | return updateresult(updated, merged, removed, unresolved), getfiledata | |
|
1818 | 1832 | |
|
1819 | def recordupdates(repo, actions, branchmerge): | |
|
1833 | def recordupdates(repo, actions, branchmerge, getfiledata): | |
|
1820 | 1834 | "record merge actions to the dirstate" |
|
1821 | 1835 | # remove (must come first) |
|
1822 | 1836 | for f, args, msg in actions.get(ACTION_REMOVE, []): |
@@ -1864,7 +1878,8 b' def recordupdates(repo, actions, branchm' | |||
|
1864 | 1878 | if branchmerge: |
|
1865 | 1879 | repo.dirstate.otherparent(f) |
|
1866 | 1880 | else: |
|
1867 | repo.dirstate.normal(f) | |
|
1881 | parentfiledata = getfiledata[f] if getfiledata else None | |
|
1882 | repo.dirstate.normal(f, parentfiledata=parentfiledata) | |
|
1868 | 1883 | |
|
1869 | 1884 | # merge |
|
1870 | 1885 | for f, args, msg in actions.get(ACTION_MERGE, []): |
@@ -1991,14 +2006,10 b' def update(repo, node, branchmerge, forc' | |||
|
1991 | 2006 | wc = repo[None] |
|
1992 | 2007 | pl = wc.parents() |
|
1993 | 2008 | p1 = pl[0] |
|
1994 |
p |
|
|
2009 | p2 = repo[node] | |
|
1995 | 2010 | if ancestor is not None: |
|
1996 | 2011 | pas = [repo[ancestor]] |
|
1997 | ||
|
1998 | overwrite = force and not branchmerge | |
|
1999 | ||
|
2000 | p2 = repo[node] | |
|
2001 | if pas[0] is None: | |
|
2012 | else: | |
|
2002 | 2013 | if repo.ui.configlist('merge', 'preferancestor') == ['*']: |
|
2003 | 2014 | cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node()) |
|
2004 | 2015 | pas = [repo[anc] for anc in (sorted(cahs) or [nullid])] |
@@ -2007,6 +2018,7 b' def update(repo, node, branchmerge, forc' | |||
|
2007 | 2018 | |
|
2008 | 2019 | fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2) |
|
2009 | 2020 | |
|
2021 | overwrite = force and not branchmerge | |
|
2010 | 2022 | ### check phase |
|
2011 | 2023 | if not overwrite: |
|
2012 | 2024 | if len(pl) > 1: |
@@ -2183,12 +2195,15 b' def update(repo, node, branchmerge, forc' | |||
|
2183 | 2195 | 'fsmonitor enabled; enable fsmonitor to improve performance; ' |
|
2184 | 2196 | 'see "hg help -e fsmonitor")\n')) |
|
2185 | 2197 | |
|
2186 | stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels) | |
|
2198 | updatedirstate = not partial and not wc.isinmemory() | |
|
2199 | wantfiledata = updatedirstate and not branchmerge | |
|
2200 | stats, getfiledata = applyupdates(repo, actions, wc, p2, overwrite, | |
|
2201 | wantfiledata, labels=labels) | |
|
2187 | 2202 | |
|
2188 | if not partial and not wc.isinmemory(): | |
|
2203 | if updatedirstate: | |
|
2189 | 2204 | with repo.dirstate.parentchange(): |
|
2190 | 2205 | repo.setparents(fp1, fp2) |
|
2191 | recordupdates(repo, actions, branchmerge) | |
|
2206 | recordupdates(repo, actions, branchmerge, getfiledata) | |
|
2192 | 2207 | # update completed, clear state |
|
2193 | 2208 | util.unlink(repo.vfs.join('updatestate')) |
|
2194 | 2209 | |
@@ -2219,7 +2234,7 b' def graft(repo, ctx, pctx, labels=None, ' | |||
|
2219 | 2234 | pctx - merge base, usually ctx.p1() |
|
2220 | 2235 | labels - merge labels eg ['local', 'graft'] |
|
2221 | 2236 | keepparent - keep second parent if any |
|
2222 | keepparent - if unresolved, keep parent used for the merge | |
|
2237 | keepconflictparent - if unresolved, keep parent used for the merge | |
|
2223 | 2238 | |
|
2224 | 2239 | """ |
|
2225 | 2240 | # If we're grafting a descendant onto an ancestor, be sure to pass |
@@ -44,6 +44,9 b' def subsubsection(s):' | |||
|
44 | 44 | def subsubsubsection(s): |
|
45 | 45 | return "%s\n%s\n\n" % (s, "." * encoding.colwidth(s)) |
|
46 | 46 | |
|
47 | def subsubsubsubsection(s): | |
|
48 | return "%s\n%s\n\n" % (s, "'" * encoding.colwidth(s)) | |
|
49 | ||
|
47 | 50 | def replace(text, substs): |
|
48 | 51 | ''' |
|
49 | 52 | Apply a list of (find, replace) pairs to a text. |
@@ -7,14 +7,13 b'' | |||
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | import errno | |
|
11 | ||
|
12 | 10 | from .i18n import _ |
|
13 | 11 | from . import ( |
|
14 | 12 | error, |
|
15 | 13 | match as matchmod, |
|
16 | 14 | merge, |
|
17 | 15 | repository, |
|
16 | scmutil, | |
|
18 | 17 | sparse, |
|
19 | 18 | util, |
|
20 | 19 | ) |
@@ -144,15 +143,9 b' def parseconfig(ui, spec):' | |||
|
144 | 143 | return includepats, excludepats |
|
145 | 144 | |
|
146 | 145 | def load(repo): |
|
147 | try: | |
|
148 | spec = repo.svfs.read(FILENAME) | |
|
149 | except IOError as e: | |
|
150 | # Treat "narrowspec does not exist" the same as "narrowspec file exists | |
|
151 | # and is empty". | |
|
152 | if e.errno == errno.ENOENT: | |
|
153 | return set(), set() | |
|
154 | raise | |
|
155 | ||
|
146 | # Treat "narrowspec does not exist" the same as "narrowspec file exists | |
|
147 | # and is empty". | |
|
148 | spec = repo.svfs.tryread(FILENAME) | |
|
156 | 149 | return parseconfig(repo.ui, spec) |
|
157 | 150 | |
|
158 | 151 | def save(repo, includepats, excludepats): |
@@ -266,9 +259,12 b' def _writeaddedfiles(repo, pctx, files):' | |||
|
266 | 259 | if not repo.wvfs.exists(f): |
|
267 | 260 | addgaction((f, (mf.flags(f), False), "narrowspec updated")) |
|
268 | 261 | merge.applyupdates(repo, actions, wctx=repo[None], |
|
269 | mctx=repo['.'], overwrite=False) | |
|
262 | mctx=repo['.'], overwrite=False, wantfiledata=False) | |
|
270 | 263 | |
|
271 | 264 | def checkworkingcopynarrowspec(repo): |
|
265 | # Avoid infinite recursion when updating the working copy | |
|
266 | if getattr(repo, '_updatingnarrowspec', False): | |
|
267 | return | |
|
272 | 268 | storespec = repo.svfs.tryread(FILENAME) |
|
273 | 269 | wcspec = repo.vfs.tryread(DIRSTATE_FILENAME) |
|
274 | 270 | if wcspec != storespec: |
@@ -283,6 +279,7 b' def updateworkingcopy(repo, assumeclean=' | |||
|
283 | 279 | """ |
|
284 | 280 | oldspec = repo.vfs.tryread(DIRSTATE_FILENAME) |
|
285 | 281 | newspec = repo.svfs.tryread(FILENAME) |
|
282 | repo._updatingnarrowspec = True | |
|
286 | 283 | |
|
287 | 284 | oldincludes, oldexcludes = parseconfig(repo.ui, oldspec) |
|
288 | 285 | newincludes, newexcludes = parseconfig(repo.ui, newspec) |
@@ -292,8 +289,8 b' def updateworkingcopy(repo, assumeclean=' | |||
|
292 | 289 | removedmatch = matchmod.differencematcher(oldmatch, newmatch) |
|
293 | 290 | |
|
294 | 291 | ds = repo.dirstate |
|
295 |
lookup, status = ds.status(removedmatch, subrepos=[], ignored= |
|
|
296 |
clean=True, unknown= |
|
|
292 | lookup, status = ds.status(removedmatch, subrepos=[], ignored=True, | |
|
293 | clean=True, unknown=True) | |
|
297 | 294 | trackeddirty = status.modified + status.added |
|
298 | 295 | clean = status.clean |
|
299 | 296 | if assumeclean: |
@@ -302,15 +299,19 b' def updateworkingcopy(repo, assumeclean=' | |||
|
302 | 299 | else: |
|
303 | 300 | trackeddirty.extend(lookup) |
|
304 | 301 | _deletecleanfiles(repo, clean) |
|
302 | uipathfn = scmutil.getuipathfn(repo) | |
|
305 | 303 | for f in sorted(trackeddirty): |
|
306 | repo.ui.status(_('not deleting possibly dirty file %s\n') % f) | |
|
304 | repo.ui.status(_('not deleting possibly dirty file %s\n') % uipathfn(f)) | |
|
305 | for f in sorted(status.unknown): | |
|
306 | repo.ui.status(_('not deleting unknown file %s\n') % uipathfn(f)) | |
|
307 | for f in sorted(status.ignored): | |
|
308 | repo.ui.status(_('not deleting ignored file %s\n') % uipathfn(f)) | |
|
307 | 309 | for f in clean + trackeddirty: |
|
308 | 310 | ds.drop(f) |
|
309 | 311 | |
|
310 | repo.narrowpats = newincludes, newexcludes | |
|
311 | repo._narrowmatch = newmatch | |
|
312 | 312 | pctx = repo['.'] |
|
313 | 313 | newfiles = [f for f in pctx.manifest().walk(addedmatch) if f not in ds] |
|
314 | 314 | for f in newfiles: |
|
315 | 315 | ds.normallookup(f) |
|
316 | 316 | _writeaddedfiles(repo, pctx, newfiles) |
|
317 | repo._updatingnarrowspec = False |
@@ -93,10 +93,6 b" parsers = policy.importmod(r'parsers')" | |||
|
93 | 93 | _calcsize = struct.calcsize |
|
94 | 94 | propertycache = util.propertycache |
|
95 | 95 | |
|
96 | # the obsolete feature is not mature enough to be enabled by default. | |
|
97 | # you have to rely on third party extension extension to enable this. | |
|
98 | _enabled = False | |
|
99 | ||
|
100 | 96 | # Options for obsolescence |
|
101 | 97 | createmarkersopt = 'createmarkers' |
|
102 | 98 | allowunstableopt = 'allowunstable' |
@@ -124,11 +120,6 b' def _getoptionvalue(repo, option):' | |||
|
124 | 120 | if 'all' in result: |
|
125 | 121 | return True |
|
126 | 122 | |
|
127 | # For migration purposes, temporarily return true if the config hasn't | |
|
128 | # been set but _enabled is true. | |
|
129 | if len(result) == 0 and _enabled: | |
|
130 | return True | |
|
131 | ||
|
132 | 123 | # Temporary hack for next check |
|
133 | 124 | newconfig = repo.ui.config('experimental', 'evolution.createmarkers') |
|
134 | 125 | if newconfig: |
@@ -1089,7 +1089,9 b' def filterpatch(ui, headers, match, oper' | |||
|
1089 | 1089 | return skipfile, skipfile, skipall, newpatches |
|
1090 | 1090 | while True: |
|
1091 | 1091 | resps = messages['help'][operation] |
|
1092 | r = ui.promptchoice("%s %s" % (query, resps)) | |
|
1092 | # IMPORTANT: keep the last line of this prompt short (<40 english | |
|
1093 | # chars is a good target) because of issue6158. | |
|
1094 | r = ui.promptchoice("%s\n(enter ? for help) %s" % (query, resps)) | |
|
1093 | 1095 | ui.write("\n") |
|
1094 | 1096 | if r == 8: # ? |
|
1095 | 1097 | for c, t in ui.extractchoices(resps)[1]: |
@@ -13,6 +13,9 b' import sys' | |||
|
13 | 13 | # Rules for how modules can be loaded. Values are: |
|
14 | 14 | # |
|
15 | 15 | # c - require C extensions |
|
16 | # rust+c - require Rust and C extensions | |
|
17 | # rust+c-allow - allow Rust and C extensions with fallback to pure Python | |
|
18 | # for each | |
|
16 | 19 | # allow - allow pure Python implementation when C loading fails |
|
17 | 20 | # cffi - required cffi versions (implemented within pure module) |
|
18 | 21 | # cffi-allow - allow pure Python implementation if cffi version is missing |
@@ -29,6 +32,9 b" policy = b'allow'" | |||
|
29 | 32 | b'cffi': (r'cffi', None), |
|
30 | 33 | b'cffi-allow': (r'cffi', r'pure'), |
|
31 | 34 | b'py': (None, r'pure'), |
|
35 | # For now, rust policies impact importrust only | |
|
36 | b'rust+c': (r'cext', None), | |
|
37 | b'rust+c-allow': (r'cext', r'pure'), | |
|
32 | 38 | } |
|
33 | 39 | |
|
34 | 40 | try: |
@@ -69,7 +75,7 b' def _importfrom(pkgname, modname):' | |||
|
69 | 75 | (r'cext', r'bdiff'): 3, |
|
70 | 76 | (r'cext', r'mpatch'): 1, |
|
71 | 77 | (r'cext', r'osutil'): 4, |
|
72 |
(r'cext', r'parsers'): 1 |
|
|
78 | (r'cext', r'parsers'): 13, | |
|
73 | 79 | } |
|
74 | 80 | |
|
75 | 81 | # map import request to other package or module |
@@ -107,3 +113,34 b' def importmod(modname):' | |||
|
107 | 113 | raise |
|
108 | 114 | pn, mn = _modredirects.get((purepkg, modname), (purepkg, modname)) |
|
109 | 115 | return _importfrom(pn, mn) |
|
116 | ||
|
117 | def _isrustpermissive(): | |
|
118 | """Assuming the policy is a Rust one, tell if it's permissive.""" | |
|
119 | return policy.endswith(b'-allow') | |
|
120 | ||
|
121 | def importrust(modname, member=None, default=None): | |
|
122 | """Import Rust module according to policy and availability. | |
|
123 | ||
|
124 | If policy isn't a Rust one, this returns `default`. | |
|
125 | ||
|
126 | If either the module or its member is not available, this returns `default` | |
|
127 | if policy is permissive and raises `ImportError` if not. | |
|
128 | """ | |
|
129 | if not policy.startswith(b'rust'): | |
|
130 | return default | |
|
131 | ||
|
132 | try: | |
|
133 | mod = _importfrom(r'rustext', modname) | |
|
134 | except ImportError: | |
|
135 | if _isrustpermissive(): | |
|
136 | return default | |
|
137 | raise | |
|
138 | if member is None: | |
|
139 | return mod | |
|
140 | ||
|
141 | try: | |
|
142 | return getattr(mod, member) | |
|
143 | except AttributeError: | |
|
144 | if _isrustpermissive(): | |
|
145 | return default | |
|
146 | raise ImportError(r"Cannot import name %s" % member) |
@@ -147,6 +147,8 b' def statprofile(ui, fp):' | |||
|
147 | 147 | # inconsistent config: profiling.showmin |
|
148 | 148 | limit = ui.configwith(fraction, 'profiling', 'showmin', 0.05) |
|
149 | 149 | kwargs[r'limit'] = limit |
|
150 | showtime = ui.configbool('profiling', 'showtime') | |
|
151 | kwargs[r'showtime'] = showtime | |
|
150 | 152 | |
|
151 | 153 | statprof.display(fp, data=data, format=displayformat, **kwargs) |
|
152 | 154 |
@@ -5,7 +5,7 b'' | |||
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | from __future__ import absolute_import | |
|
8 | from __future__ import absolute_import, division | |
|
9 | 9 | |
|
10 | 10 | import ctypes |
|
11 | 11 | import ctypes.util |
@@ -149,7 +149,7 b' if not pycompat.iswindows:' | |||
|
149 | 149 | cmsg.cmsg_type != _SCM_RIGHTS): |
|
150 | 150 | return [] |
|
151 | 151 | rfds = ctypes.cast(cmsg.cmsg_data, ctypes.POINTER(ctypes.c_int)) |
|
152 | rfdscount = ((cmsg.cmsg_len - _cmsghdr.cmsg_data.offset) / | |
|
152 | rfdscount = ((cmsg.cmsg_len - _cmsghdr.cmsg_data.offset) // | |
|
153 | 153 | ctypes.sizeof(ctypes.c_int)) |
|
154 | 154 | return [rfds[i] for i in pycompat.xrange(rfdscount)] |
|
155 | 155 |
@@ -64,8 +64,8 b' class _funcregistrarbase(object):' | |||
|
64 | 64 | raise error.ProgrammingError(msg) |
|
65 | 65 | |
|
66 | 66 | if func.__doc__ and not util.safehasattr(func, '_origdoc'): |
|
67 |
doc = |
|
|
68 |
func._origdoc |
|
|
67 | func._origdoc = func.__doc__.strip() | |
|
68 | doc = pycompat.sysbytes(func._origdoc) | |
|
69 | 69 | func.__doc__ = pycompat.sysstr(self._formatdoc(decl, doc)) |
|
70 | 70 | |
|
71 | 71 | self._table[name] = func |
@@ -338,19 +338,10 b' class templatekeyword(_templateregistrar' | |||
|
338 | 338 | ''' |
|
339 | 339 | pass |
|
340 | 340 | |
|
341 | # old API (DEPRECATED) | |
|
342 | @templatekeyword('mykeyword') | |
|
343 | def mykeywordfunc(repo, ctx, templ, cache, revcache, **args): | |
|
344 | '''Explanation of this template keyword .... | |
|
345 | ''' | |
|
346 | pass | |
|
347 | ||
|
348 | 341 | The first string argument is used also in online help. |
|
349 | 342 | |
|
350 | 343 | Optional argument 'requires' should be a collection of resource names |
|
351 |
which the template keyword depends on. |
|
|
352 | switch to the new API. If 'requires' is unspecified, all template | |
|
353 | keywords and resources are expanded to the function arguments. | |
|
344 | which the template keyword depends on. | |
|
354 | 345 | |
|
355 | 346 | 'templatekeyword' instance in example above can be used to |
|
356 | 347 | decorate multiple functions. |
@@ -362,7 +353,7 b' class templatekeyword(_templateregistrar' | |||
|
362 | 353 | Otherwise, explicit 'templatekw.loadkeyword()' is needed. |
|
363 | 354 | """ |
|
364 | 355 | |
|
365 |
def _extrasetup(self, name, func, requires= |
|
|
356 | def _extrasetup(self, name, func, requires=()): | |
|
366 | 357 | func._requires = requires |
|
367 | 358 | |
|
368 | 359 | class templatefilter(_templateregistrarbase): |
@@ -279,7 +279,9 b' def _bookmarkmovements(repo, tostrip):' | |||
|
279 | 279 | if rev in tostrip: |
|
280 | 280 | updatebm.append(m) |
|
281 | 281 | newbmtarget = None |
|
282 | if updatebm: # don't compute anything is there is no bookmark to move anyway | |
|
282 | # If we need to move bookmarks, compute bookmark | |
|
283 | # targets. Otherwise we can skip doing this logic. | |
|
284 | if updatebm: | |
|
283 | 285 | # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), |
|
284 | 286 | # but is much faster |
|
285 | 287 | newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip) |
@@ -364,8 +366,9 b' def stripmanifest(repo, striprev, tr, fi' | |||
|
364 | 366 | striptrees(repo, tr, striprev, files) |
|
365 | 367 | |
|
366 | 368 | def striptrees(repo, tr, striprev, files): |
|
367 |
if 'treemanifest' in repo.requirements: |
|
|
368 | # otherwise | |
|
369 | if 'treemanifest' in repo.requirements: | |
|
370 | # This logic is safe if treemanifest isn't enabled, but also | |
|
371 | # pointless, so we skip it if treemanifest isn't enabled. | |
|
369 | 372 | for unencoded, encoded, size in repo.store.datafiles(): |
|
370 | 373 | if (unencoded.startswith('meta/') and |
|
371 | 374 | unencoded.endswith('00manifest.i')): |
@@ -416,7 +419,9 b' def rebuildfncache(ui, repo):' | |||
|
416 | 419 | |
|
417 | 420 | progress.complete() |
|
418 | 421 | |
|
419 |
if 'treemanifest' in repo.requirements: |
|
|
422 | if 'treemanifest' in repo.requirements: | |
|
423 | # This logic is safe if treemanifest isn't enabled, but also | |
|
424 | # pointless, so we skip it if treemanifest isn't enabled. | |
|
420 | 425 | for dir in util.dirs(seenfiles): |
|
421 | 426 | i = 'meta/%s/00manifest.i' % dir |
|
422 | 427 | d = 'meta/%s/00manifest.d' % dir |
@@ -291,6 +291,10 b' class ipeercommandexecutor(interfaceutil' | |||
|
291 | 291 | class ipeerrequests(interfaceutil.Interface): |
|
292 | 292 | """Interface for executing commands on a peer.""" |
|
293 | 293 | |
|
294 | limitedarguments = interfaceutil.Attribute( | |
|
295 | """True if the peer cannot receive large argument value for commands.""" | |
|
296 | ) | |
|
297 | ||
|
294 | 298 | def commandexecutor(): |
|
295 | 299 | """A context manager that resolves to an ipeercommandexecutor. |
|
296 | 300 | |
@@ -329,6 +333,8 b' class ipeerv2(ipeerconnection, ipeercapa' | |||
|
329 | 333 | class peer(object): |
|
330 | 334 | """Base class for peer repositories.""" |
|
331 | 335 | |
|
336 | limitedarguments = False | |
|
337 | ||
|
332 | 338 | def capable(self, name): |
|
333 | 339 | caps = self.capabilities() |
|
334 | 340 | if name in caps: |
@@ -1650,7 +1656,7 b' class ilocalrepositorymain(interfaceutil' | |||
|
1650 | 1656 | editor=False, extra=None): |
|
1651 | 1657 | """Add a new revision to the repository.""" |
|
1652 | 1658 | |
|
1653 | def commitctx(ctx, error=False): | |
|
1659 | def commitctx(ctx, error=False, origctx=None): | |
|
1654 | 1660 | """Commit a commitctx instance to the repository.""" |
|
1655 | 1661 | |
|
1656 | 1662 | def destroying(): |
@@ -17,6 +17,10 b' from . import (' | |||
|
17 | 17 | phases, |
|
18 | 18 | pycompat, |
|
19 | 19 | tags as tagsmod, |
|
20 | util, | |
|
21 | ) | |
|
22 | from .utils import ( | |
|
23 | repoviewutil, | |
|
20 | 24 | ) |
|
21 | 25 | |
|
22 | 26 | def hideablerevs(repo): |
@@ -154,6 +158,35 b" filtertable = {'visible': computehidden," | |||
|
154 | 158 | 'immutable': computemutable, |
|
155 | 159 | 'base': computeimpactable} |
|
156 | 160 | |
|
161 | _basefiltername = list(filtertable) | |
|
162 | ||
|
163 | def extrafilter(ui): | |
|
164 | """initialize extra filter and return its id | |
|
165 | ||
|
166 | If extra filtering is configured, we make sure the associated filtered view | |
|
167 | are declared and return the associated id. | |
|
168 | """ | |
|
169 | frevs = ui.config('experimental', 'extra-filter-revs') | |
|
170 | if frevs is None: | |
|
171 | return None | |
|
172 | ||
|
173 | fid = pycompat.sysbytes(util.DIGESTS['sha1'](frevs).hexdigest())[:12] | |
|
174 | ||
|
175 | combine = lambda fname: fname + '%' + fid | |
|
176 | ||
|
177 | subsettable = repoviewutil.subsettable | |
|
178 | ||
|
179 | if combine('base') not in filtertable: | |
|
180 | for name in _basefiltername: | |
|
181 | def extrafilteredrevs(repo, *args, **kwargs): | |
|
182 | baserevs = filtertable[name](repo, *args, **kwargs) | |
|
183 | extrarevs = frozenset(repo.revs(frevs)) | |
|
184 | return baserevs | extrarevs | |
|
185 | filtertable[combine(name)] = extrafilteredrevs | |
|
186 | if name in subsettable: | |
|
187 | subsettable[combine(name)] = combine(subsettable[name]) | |
|
188 | return fid | |
|
189 | ||
|
157 | 190 | def filterrevs(repo, filtername, visibilityexceptions=None): |
|
158 | 191 | """returns set of filtered revision for this filter name |
|
159 | 192 |
@@ -16,6 +16,7 b' from __future__ import absolute_import' | |||
|
16 | 16 | import collections |
|
17 | 17 | import contextlib |
|
18 | 18 | import errno |
|
19 | import io | |
|
19 | 20 | import os |
|
20 | 21 | import struct |
|
21 | 22 | import zlib |
@@ -97,11 +98,8 b' REVIDX_KNOWN_FLAGS' | |||
|
97 | 98 | REVIDX_RAWTEXT_CHANGING_FLAGS |
|
98 | 99 | |
|
99 | 100 | parsers = policy.importmod(r'parsers') |
|
100 | try: | |
|
101 | from . import rustext | |
|
102 | rustext.__name__ # force actual import (see hgdemandimport) | |
|
103 | except ImportError: | |
|
104 | rustext = None | |
|
101 | rustancestor = policy.importrust(r'ancestor') | |
|
102 | rustdagop = policy.importrust(r'dagop') | |
|
105 | 103 | |
|
106 | 104 | # Aliased for performance. |
|
107 | 105 | _zlibdecompress = zlib.decompress |
@@ -337,15 +335,21 b' class revlog(object):' | |||
|
337 | 335 | configured threshold. |
|
338 | 336 | |
|
339 | 337 | If censorable is True, the revlog can have censored revisions. |
|
338 | ||
|
339 | If `upperboundcomp` is not None, this is the expected maximal gain from | |
|
340 | compression for the data content. | |
|
340 | 341 | """ |
|
341 | 342 | def __init__(self, opener, indexfile, datafile=None, checkambig=False, |
|
342 |
mmaplargeindex=False, censorable=False |
|
|
343 | mmaplargeindex=False, censorable=False, | |
|
344 | upperboundcomp=None): | |
|
343 | 345 | """ |
|
344 | 346 | create a revlog object |
|
345 | 347 | |
|
346 | 348 | opener is a function that abstracts the file opening operation |
|
347 | 349 | and can be used to implement COW semantics or the like. |
|
350 | ||
|
348 | 351 | """ |
|
352 | self.upperboundcomp = upperboundcomp | |
|
349 | 353 | self.indexfile = indexfile |
|
350 | 354 | self.datafile = datafile or (indexfile[:-2] + ".d") |
|
351 | 355 | self.opener = opener |
@@ -825,8 +829,8 b' class revlog(object):' | |||
|
825 | 829 | checkrev(r) |
|
826 | 830 | # and we're sure ancestors aren't filtered as well |
|
827 | 831 | |
|
828 |
if rust |
|
|
829 |
lazyancestors = rust |
|
|
832 | if rustancestor is not None: | |
|
833 | lazyancestors = rustancestor.LazyAncestors | |
|
830 | 834 | arg = self.index |
|
831 | 835 | elif util.safehasattr(parsers, 'rustlazyancestors'): |
|
832 | 836 | lazyancestors = ancestor.rustlazyancestors |
@@ -915,8 +919,8 b' class revlog(object):' | |||
|
915 | 919 | if common is None: |
|
916 | 920 | common = [nullrev] |
|
917 | 921 | |
|
918 |
if rust |
|
|
919 |
return rust |
|
|
922 | if rustancestor is not None: | |
|
923 | return rustancestor.MissingAncestors(self.index, common) | |
|
920 | 924 | return ancestor.incrementalmissingancestors(self.parentrevs, common) |
|
921 | 925 | |
|
922 | 926 | def findmissingrevs(self, common=None, heads=None): |
@@ -1130,8 +1134,8 b' class revlog(object):' | |||
|
1130 | 1134 | return self.index.headrevs() |
|
1131 | 1135 | except AttributeError: |
|
1132 | 1136 | return self._headrevs() |
|
1133 |
if rust |
|
|
1134 |
return rust |
|
|
1137 | if rustdagop is not None: | |
|
1138 | return rustdagop.headrevs(self.index, revs) | |
|
1135 | 1139 | return dagop.headrevs(revs, self._uncheckedparentrevs) |
|
1136 | 1140 | |
|
1137 | 1141 | def computephases(self, roots): |
@@ -1216,14 +1220,25 b' class revlog(object):' | |||
|
1216 | 1220 | A revision is considered an ancestor of itself. |
|
1217 | 1221 | |
|
1218 | 1222 | The implementation of this is trivial but the use of |
|
1219 |
|
|
|
1223 | reachableroots is not.""" | |
|
1220 | 1224 | if a == nullrev: |
|
1221 | 1225 | return True |
|
1222 | 1226 | elif a == b: |
|
1223 | 1227 | return True |
|
1224 | 1228 | elif a > b: |
|
1225 | 1229 | return False |
|
1226 | return a in self._commonancestorsheads(a, b) | |
|
1230 | return bool(self.reachableroots(a, [b], [a], includepath=False)) | |
|
1231 | ||
|
1232 | def reachableroots(self, minroot, heads, roots, includepath=False): | |
|
1233 | """return (heads(::<roots> and <roots>::<heads>)) | |
|
1234 | ||
|
1235 | If includepath is True, return (<roots>::<heads>).""" | |
|
1236 | try: | |
|
1237 | return self.index.reachableroots2(minroot, heads, roots, | |
|
1238 | includepath) | |
|
1239 | except AttributeError: | |
|
1240 | return dagop._reachablerootspure(self.parentrevs, | |
|
1241 | minroot, roots, heads, includepath) | |
|
1227 | 1242 | |
|
1228 | 1243 | def ancestor(self, a, b): |
|
1229 | 1244 | """calculate the "best" common ancestor of nodes a and b""" |
@@ -1340,13 +1355,13 b' class revlog(object):' | |||
|
1340 | 1355 | """Find the shortest unambiguous prefix that matches node.""" |
|
1341 | 1356 | def isvalid(prefix): |
|
1342 | 1357 | try: |
|
1343 | node = self._partialmatch(prefix) | |
|
1358 | matchednode = self._partialmatch(prefix) | |
|
1344 | 1359 | except error.AmbiguousPrefixLookupError: |
|
1345 | 1360 | return False |
|
1346 | 1361 | except error.WdirUnsupported: |
|
1347 | 1362 | # single 'ff...' match |
|
1348 | 1363 | return True |
|
1349 | if node is None: | |
|
1364 | if matchednode is None: | |
|
1350 | 1365 | raise error.LookupError(node, self.indexfile, _('no node')) |
|
1351 | 1366 | return True |
|
1352 | 1367 | |
@@ -2292,7 +2307,7 b' class revlog(object):' | |||
|
2292 | 2307 | |
|
2293 | 2308 | try: |
|
2294 | 2309 | with self._datafp() as f: |
|
2295 |
f.seek(0, |
|
|
2310 | f.seek(0, io.SEEK_END) | |
|
2296 | 2311 | actual = f.tell() |
|
2297 | 2312 | dd = actual - expected |
|
2298 | 2313 | except IOError as inst: |
@@ -2302,7 +2317,7 b' class revlog(object):' | |||
|
2302 | 2317 | |
|
2303 | 2318 | try: |
|
2304 | 2319 | f = self.opener(self.indexfile) |
|
2305 |
f.seek(0, |
|
|
2320 | f.seek(0, io.SEEK_END) | |
|
2306 | 2321 | actual = f.tell() |
|
2307 | 2322 | f.close() |
|
2308 | 2323 | s = self._io.size |
@@ -679,6 +679,31 b' def _candidategroups(revlog, textlen, p1' | |||
|
679 | 679 | # if chain already have too much data, skip base |
|
680 | 680 | if deltas_limit < chainsize: |
|
681 | 681 | continue |
|
682 | if sparse and revlog.upperboundcomp is not None: | |
|
683 | maxcomp = revlog.upperboundcomp | |
|
684 | basenotsnap = (p1, p2, nullrev) | |
|
685 | if rev not in basenotsnap and revlog.issnapshot(rev): | |
|
686 | snapshotdepth = revlog.snapshotdepth(rev) | |
|
687 | # If text is significantly larger than the base, we can | |
|
688 | # expect the resulting delta to be proportional to the size | |
|
689 | # difference | |
|
690 | revsize = revlog.rawsize(rev) | |
|
691 | rawsizedistance = max(textlen - revsize, 0) | |
|
692 | # use an estimate of the compression upper bound. | |
|
693 | lowestrealisticdeltalen = rawsizedistance // maxcomp | |
|
694 | ||
|
695 | # check the absolute constraint on the delta size | |
|
696 | snapshotlimit = textlen >> snapshotdepth | |
|
697 | if snapshotlimit < lowestrealisticdeltalen: | |
|
698 | # delta lower bound is larger than accepted upper bound | |
|
699 | continue | |
|
700 | ||
|
701 | # check the relative constraint on the delta size | |
|
702 | revlength = revlog.length(rev) | |
|
703 | if revlength < lowestrealisticdeltalen: | |
|
704 | # delta probable lower bound is larger than target base | |
|
705 | continue | |
|
706 | ||
|
682 | 707 | group.append(rev) |
|
683 | 708 | if group: |
|
684 | 709 | # XXX: in the sparse revlog case, group can become large, |
@@ -907,6 +932,21 b' class deltacomputer(object):' | |||
|
907 | 932 | |
|
908 | 933 | def _builddeltainfo(self, revinfo, base, fh): |
|
909 | 934 | # can we use the cached delta? |
|
935 | revlog = self.revlog | |
|
936 | chainbase = revlog.chainbase(base) | |
|
937 | if revlog._generaldelta: | |
|
938 | deltabase = base | |
|
939 | else: | |
|
940 | deltabase = chainbase | |
|
941 | snapshotdepth = None | |
|
942 | if revlog._sparserevlog and deltabase == nullrev: | |
|
943 | snapshotdepth = 0 | |
|
944 | elif revlog._sparserevlog and revlog.issnapshot(deltabase): | |
|
945 | # A delta chain should always be one full snapshot, | |
|
946 | # zero or more semi-snapshots, and zero or more deltas | |
|
947 | p1, p2 = revlog.rev(revinfo.p1), revlog.rev(revinfo.p2) | |
|
948 | if deltabase not in (p1, p2) and revlog.issnapshot(deltabase): | |
|
949 | snapshotdepth = len(revlog._deltachain(deltabase)[0]) | |
|
910 | 950 | delta = None |
|
911 | 951 | if revinfo.cachedelta: |
|
912 | 952 | cachebase, cachediff = revinfo.cachedelta |
@@ -920,31 +960,22 b' class deltacomputer(object):' | |||
|
920 | 960 | delta = revinfo.cachedelta[1] |
|
921 | 961 | if delta is None: |
|
922 | 962 | delta = self._builddeltadiff(base, revinfo, fh) |
|
923 | revlog = self.revlog | |
|
963 | # snapshotdept need to be neither None nor 0 level snapshot | |
|
964 | if revlog.upperboundcomp is not None and snapshotdepth: | |
|
965 | lowestrealisticdeltalen = len(delta) // revlog.upperboundcomp | |
|
966 | snapshotlimit = revinfo.textlen >> snapshotdepth | |
|
967 | if snapshotlimit < lowestrealisticdeltalen: | |
|
968 | return None | |
|
969 | if revlog.length(base) < lowestrealisticdeltalen: | |
|
970 | return None | |
|
924 | 971 | header, data = revlog.compress(delta) |
|
925 | 972 | deltalen = len(header) + len(data) |
|
926 | chainbase = revlog.chainbase(base) | |
|
927 | 973 | offset = revlog.end(len(revlog) - 1) |
|
928 | 974 | dist = deltalen + offset - revlog.start(chainbase) |
|
929 | if revlog._generaldelta: | |
|
930 | deltabase = base | |
|
931 | else: | |
|
932 | deltabase = chainbase | |
|
933 | 975 | chainlen, compresseddeltalen = revlog._chaininfo(base) |
|
934 | 976 | chainlen += 1 |
|
935 | 977 | compresseddeltalen += deltalen |
|
936 | 978 | |
|
937 | revlog = self.revlog | |
|
938 | snapshotdepth = None | |
|
939 | if deltabase == nullrev: | |
|
940 | snapshotdepth = 0 | |
|
941 | elif revlog._sparserevlog and revlog.issnapshot(deltabase): | |
|
942 | # A delta chain should always be one full snapshot, | |
|
943 | # zero or more semi-snapshots, and zero or more deltas | |
|
944 | p1, p2 = revlog.rev(revinfo.p1), revlog.rev(revinfo.p2) | |
|
945 | if deltabase not in (p1, p2) and revlog.issnapshot(deltabase): | |
|
946 | snapshotdepth = len(revlog._deltachain(deltabase)[0]) | |
|
947 | ||
|
948 | 979 | return _deltainfo(dist, deltalen, (header, data), deltabase, |
|
949 | 980 | chainbase, chainlen, compresseddeltalen, |
|
950 | 981 | snapshotdepth) |
@@ -1002,8 +1033,9 b' class deltacomputer(object):' | |||
|
1002 | 1033 | nominateddeltas.append(deltainfo) |
|
1003 | 1034 | for candidaterev in candidaterevs: |
|
1004 | 1035 | candidatedelta = self._builddeltainfo(revinfo, candidaterev, fh) |
|
1005 |
if i |
|
|
1006 |
|
|
|
1036 | if candidatedelta is not None: | |
|
1037 | if isgooddeltainfo(self.revlog, candidatedelta, revinfo): | |
|
1038 | nominateddeltas.append(candidatedelta) | |
|
1007 | 1039 | if nominateddeltas: |
|
1008 | 1040 | deltainfo = min(nominateddeltas, key=lambda x: x.deltalen) |
|
1009 | 1041 | if deltainfo is not None: |
@@ -52,6 +52,9 b' generatorset = smartset.generatorset' | |||
|
52 | 52 | spanset = smartset.spanset |
|
53 | 53 | fullreposet = smartset.fullreposet |
|
54 | 54 | |
|
55 | # revisions not included in all(), but populated if specified | |
|
56 | _virtualrevs = (node.nullrev, node.wdirrev) | |
|
57 | ||
|
55 | 58 | # Constants for ordering requirement, used in getset(): |
|
56 | 59 | # |
|
57 | 60 | # If 'define', any nested functions and operations MAY change the ordering of |
@@ -120,8 +123,7 b' def stringset(repo, subset, x, order):' | |||
|
120 | 123 | if not x: |
|
121 | 124 | raise error.ParseError(_("empty string is not a valid revision")) |
|
122 | 125 | x = scmutil.intrev(scmutil.revsymbol(repo, x)) |
|
123 | if (x in subset | |
|
124 | or x == node.nullrev and isinstance(subset, fullreposet)): | |
|
126 | if x in subset or x in _virtualrevs and isinstance(subset, fullreposet): | |
|
125 | 127 | return baseset([x]) |
|
126 | 128 | return baseset() |
|
127 | 129 | |
@@ -1359,8 +1361,13 b' def merge(repo, subset, x):' | |||
|
1359 | 1361 | # i18n: "merge" is a keyword |
|
1360 | 1362 | getargs(x, 0, 0, _("merge takes no arguments")) |
|
1361 | 1363 | cl = repo.changelog |
|
1362 | return subset.filter(lambda r: cl.parentrevs(r)[1] != -1, | |
|
1363 | condrepr='<merge>') | |
|
1364 | nullrev = node.nullrev | |
|
1365 | def ismerge(r): | |
|
1366 | try: | |
|
1367 | return cl.parentrevs(r)[1] != nullrev | |
|
1368 | except error.WdirUnsupported: | |
|
1369 | return bool(repo[r].p2()) | |
|
1370 | return subset.filter(ismerge, condrepr='<merge>') | |
|
1364 | 1371 | |
|
1365 | 1372 | @predicate('branchpoint()', safe=True) |
|
1366 | 1373 | def branchpoint(repo, subset, x): |
@@ -1847,7 +1854,7 b' def rev(repo, subset, x):' | |||
|
1847 | 1854 | except (TypeError, ValueError): |
|
1848 | 1855 | # i18n: "rev" is a keyword |
|
1849 | 1856 | raise error.ParseError(_("rev expects a number")) |
|
1850 |
if l not in repo.changelog and l not in |
|
|
1857 | if l not in repo.changelog and l not in _virtualrevs: | |
|
1851 | 1858 | return baseset() |
|
1852 | 1859 | return subset & baseset([l]) |
|
1853 | 1860 | |
@@ -2262,7 +2269,7 b' def _orderedlist(repo, subset, x):' | |||
|
2262 | 2269 | if r in seen: |
|
2263 | 2270 | continue |
|
2264 | 2271 | if (r in subset |
|
2265 |
or r |
|
|
2272 | or r in _virtualrevs and isinstance(subset, fullreposet)): | |
|
2266 | 2273 | ls.append(r) |
|
2267 | 2274 | seen.add(r) |
|
2268 | 2275 | return baseset(ls) |
@@ -1247,6 +1247,28 b' def getrenamedfn(repo, endrev=None):' | |||
|
1247 | 1247 | |
|
1248 | 1248 | return getrenamed |
|
1249 | 1249 | |
|
1250 | def getcopiesfn(repo, endrev=None): | |
|
1251 | if copiesmod.usechangesetcentricalgo(repo): | |
|
1252 | def copiesfn(ctx): | |
|
1253 | if ctx.p2copies(): | |
|
1254 | allcopies = ctx.p1copies().copy() | |
|
1255 | # There should be no overlap | |
|
1256 | allcopies.update(ctx.p2copies()) | |
|
1257 | return sorted(allcopies.items()) | |
|
1258 | else: | |
|
1259 | return sorted(ctx.p1copies().items()) | |
|
1260 | else: | |
|
1261 | getrenamed = getrenamedfn(repo, endrev) | |
|
1262 | def copiesfn(ctx): | |
|
1263 | copies = [] | |
|
1264 | for fn in ctx.files(): | |
|
1265 | rename = getrenamed(fn, ctx.rev()) | |
|
1266 | if rename: | |
|
1267 | copies.append((fn, rename)) | |
|
1268 | return copies | |
|
1269 | ||
|
1270 | return copiesfn | |
|
1271 | ||
|
1250 | 1272 | def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None): |
|
1251 | 1273 | """Update the dirstate to reflect the intent of copying src to dst. For |
|
1252 | 1274 | different reasons it might not end with dst being marked as copied from src. |
@@ -1519,7 +1541,12 b' def extdatasource(repo, source):' | |||
|
1519 | 1541 | pass # we ignore data for nodes that don't exist locally |
|
1520 | 1542 | finally: |
|
1521 | 1543 | if proc: |
|
1522 | proc.communicate() | |
|
1544 | try: | |
|
1545 | proc.communicate() | |
|
1546 | except ValueError: | |
|
1547 | # This happens if we started iterating src and then | |
|
1548 | # get a parse error on a line. It should be safe to ignore. | |
|
1549 | pass | |
|
1523 | 1550 | if src: |
|
1524 | 1551 | src.close() |
|
1525 | 1552 | if proc and proc.returncode != 0: |
@@ -110,22 +110,23 b' class partialdiscovery(object):' | |||
|
110 | 110 | (all tracked revisions are known locally) |
|
111 | 111 | """ |
|
112 | 112 | |
|
113 | def __init__(self, repo, targetheads): | |
|
113 | def __init__(self, repo, targetheads, respectsize): | |
|
114 | 114 | self._repo = repo |
|
115 | 115 | self._targetheads = targetheads |
|
116 | 116 | self._common = repo.changelog.incrementalmissingrevs() |
|
117 | 117 | self._undecided = None |
|
118 | 118 | self.missing = set() |
|
119 | 119 | self._childrenmap = None |
|
120 | self._respectsize = respectsize | |
|
120 | 121 | |
|
121 | 122 | def addcommons(self, commons): |
|
122 |
"""regist |
|
|
123 | """register nodes known as common""" | |
|
123 | 124 | self._common.addbases(commons) |
|
124 | 125 | if self._undecided is not None: |
|
125 | 126 | self._common.removeancestorsfrom(self._undecided) |
|
126 | 127 | |
|
127 | 128 | def addmissings(self, missings): |
|
128 |
"""regist |
|
|
129 | """register some nodes as missing""" | |
|
129 | 130 | newmissing = self._repo.revs('%ld::%ld', missings, self.undecided) |
|
130 | 131 | if newmissing: |
|
131 | 132 | self.missing.update(newmissing) |
@@ -241,11 +242,13 b' class partialdiscovery(object):' | |||
|
241 | 242 | |
|
242 | 243 | # update from roots |
|
243 | 244 | revsroots = set(repo.revs('roots(%ld)', revs)) |
|
244 | ||
|
245 | 245 | childrenrevs = self._childrengetter() |
|
246 | ||
|
247 | 246 | _updatesample(revs, revsroots, sample, childrenrevs) |
|
248 | 247 | assert sample |
|
248 | ||
|
249 | if not self._respectsize: | |
|
250 | size = max(size, min(len(revsroots), len(revsheads))) | |
|
251 | ||
|
249 | 252 | sample = _limitsample(sample, size) |
|
250 | 253 | if len(sample) < size: |
|
251 | 254 | more = size - len(sample) |
@@ -256,7 +259,8 b' def findcommonheads(ui, local, remote,' | |||
|
256 | 259 | initialsamplesize=100, |
|
257 | 260 | fullsamplesize=200, |
|
258 | 261 | abortwhenunrelated=True, |
|
259 |
ancestorsof=None |
|
|
262 | ancestorsof=None, | |
|
263 | samplegrowth=1.05): | |
|
260 | 264 | '''Return a tuple (common, anyincoming, remoteheads) used to identify |
|
261 | 265 | missing nodes from or in remote. |
|
262 | 266 | ''' |
@@ -275,9 +279,63 b' def findcommonheads(ui, local, remote,' | |||
|
275 | 279 | # early exit if we know all the specified remote heads already |
|
276 | 280 | ui.debug("query 1; heads\n") |
|
277 | 281 | roundtrips += 1 |
|
278 | sample = _limitsample(ownheads, initialsamplesize) | |
|
279 | # indices between sample and externalized version must match | |
|
280 | sample = list(sample) | |
|
282 | # We also ask remote about all the local heads. That set can be arbitrarily | |
|
283 | # large, so we used to limit it size to `initialsamplesize`. We no longer | |
|
284 | # do as it proved counter productive. The skipped heads could lead to a | |
|
285 | # large "undecided" set, slower to be clarified than if we asked the | |
|
286 | # question for all heads right away. | |
|
287 | # | |
|
288 | # We are already fetching all server heads using the `heads` commands, | |
|
289 | # sending a equivalent number of heads the other way should not have a | |
|
290 | # significant impact. In addition, it is very likely that we are going to | |
|
291 | # have to issue "known" request for an equivalent amount of revisions in | |
|
292 | # order to decide if theses heads are common or missing. | |
|
293 | # | |
|
294 | # find a detailled analysis below. | |
|
295 | # | |
|
296 | # Case A: local and server both has few heads | |
|
297 | # | |
|
298 | # Ownheads is below initialsamplesize, limit would not have any effect. | |
|
299 | # | |
|
300 | # Case B: local has few heads and server has many | |
|
301 | # | |
|
302 | # Ownheads is below initialsamplesize, limit would not have any effect. | |
|
303 | # | |
|
304 | # Case C: local and server both has many heads | |
|
305 | # | |
|
306 | # We now transfert some more data, but not significantly more than is | |
|
307 | # already transfered to carry the server heads. | |
|
308 | # | |
|
309 | # Case D: local has many heads, server has few | |
|
310 | # | |
|
311 | # D.1 local heads are mostly known remotely | |
|
312 | # | |
|
313 | # All the known head will have be part of a `known` request at some | |
|
314 | # point for the discovery to finish. Sending them all earlier is | |
|
315 | # actually helping. | |
|
316 | # | |
|
317 | # (This case is fairly unlikely, it requires the numerous heads to all | |
|
318 | # be merged server side in only a few heads) | |
|
319 | # | |
|
320 | # D.2 local heads are mostly missing remotely | |
|
321 | # | |
|
322 | # To determine that the heads are missing, we'll have to issue `known` | |
|
323 | # request for them or one of their ancestors. This amount of `known` | |
|
324 | # request will likely be in the same order of magnitude than the amount | |
|
325 | # of local heads. | |
|
326 | # | |
|
327 | # The only case where we can be more efficient using `known` request on | |
|
328 | # ancestors are case were all the "missing" local heads are based on a | |
|
329 | # few changeset, also "missing". This means we would have a "complex" | |
|
330 | # graph (with many heads) attached to, but very independant to a the | |
|
331 | # "simple" graph on the server. This is a fairly usual case and have | |
|
332 | # not been met in the wild so far. | |
|
333 | if remote.limitedarguments: | |
|
334 | sample = _limitsample(ownheads, initialsamplesize) | |
|
335 | # indices between sample and externalized version must match | |
|
336 | sample = list(sample) | |
|
337 | else: | |
|
338 | sample = ownheads | |
|
281 | 339 | |
|
282 | 340 | with remote.commandexecutor() as e: |
|
283 | 341 | fheads = e.callcommand('heads', {}) |
@@ -318,7 +376,7 b' def findcommonheads(ui, local, remote,' | |||
|
318 | 376 | |
|
319 | 377 | # full blown discovery |
|
320 | 378 | |
|
321 | disco = partialdiscovery(local, ownheads) | |
|
379 | disco = partialdiscovery(local, ownheads, remote.limitedarguments) | |
|
322 | 380 | # treat remote heads (and maybe own heads) as a first implicit sample |
|
323 | 381 | # response |
|
324 | 382 | disco.addcommons(knownsrvheads) |
@@ -335,6 +393,8 b' def findcommonheads(ui, local, remote,' | |||
|
335 | 393 | ui.debug("taking initial sample\n") |
|
336 | 394 | samplefunc = disco.takefullsample |
|
337 | 395 | targetsize = fullsamplesize |
|
396 | if not remote.limitedarguments: | |
|
397 | fullsamplesize = int(fullsamplesize * samplegrowth) | |
|
338 | 398 | else: |
|
339 | 399 | # use even cheaper initial sample |
|
340 | 400 | ui.debug("taking quick initial sample\n") |
@@ -27,8 +27,8 b' import errno' | |||
|
27 | 27 | import itertools |
|
28 | 28 | import stat |
|
29 | 29 | |
|
30 |
from |
|
|
31 |
from |
|
|
30 | from .i18n import _ | |
|
31 | from . import ( | |
|
32 | 32 | bookmarks, |
|
33 | 33 | bundle2, |
|
34 | 34 | bundlerepo, |
@@ -45,37 +45,17 b' from mercurial import (' | |||
|
45 | 45 | patch, |
|
46 | 46 | phases, |
|
47 | 47 | pycompat, |
|
48 | registrar, | |
|
49 | 48 | repair, |
|
50 | 49 | scmutil, |
|
51 | 50 | templatefilters, |
|
52 | 51 | util, |
|
53 | 52 | vfs as vfsmod, |
|
54 | 53 | ) |
|
55 | ||
|
56 | from . import ( | |
|
57 | rebase, | |
|
58 | ) | |
|
59 | from mercurial.utils import ( | |
|
54 | from .utils import ( | |
|
60 | 55 | dateutil, |
|
61 | 56 | stringutil, |
|
62 | 57 | ) |
|
63 | 58 | |
|
64 | cmdtable = {} | |
|
65 | command = registrar.command(cmdtable) | |
|
66 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for | |
|
67 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should | |
|
68 | # be specifying the version(s) of Mercurial they are tested with, or | |
|
69 | # leave the attribute unspecified. | |
|
70 | testedwith = 'ships-with-hg-core' | |
|
71 | ||
|
72 | configtable = {} | |
|
73 | configitem = registrar.configitem(configtable) | |
|
74 | ||
|
75 | configitem('shelve', 'maxbackups', | |
|
76 | default=10, | |
|
77 | ) | |
|
78 | ||
|
79 | 59 | backupdir = 'shelve-backup' |
|
80 | 60 | shelvedir = 'shelved' |
|
81 | 61 | shelvefileextensions = ['hg', 'patch', 'shelve'] |
@@ -451,8 +431,6 b' def createcmd(ui, repo, pats, opts):' | |||
|
451 | 431 | def _docreatecmd(ui, repo, pats, opts): |
|
452 | 432 | wctx = repo[None] |
|
453 | 433 | parents = wctx.parents() |
|
454 | if len(parents) > 1: | |
|
455 | raise error.Abort(_('cannot shelve while merging')) | |
|
456 | 434 | parent = parents[0] |
|
457 | 435 | origbranch = wctx.branch() |
|
458 | 436 | |
@@ -646,7 +624,30 b' def checkparents(repo, state):' | |||
|
646 | 624 | raise error.Abort(_('working directory parents do not match unshelve ' |
|
647 | 625 | 'state')) |
|
648 | 626 | |
|
649 |
def |
|
|
627 | def _loadshelvedstate(ui, repo, opts): | |
|
628 | try: | |
|
629 | state = shelvedstate.load(repo) | |
|
630 | if opts.get('keep') is None: | |
|
631 | opts['keep'] = state.keep | |
|
632 | except IOError as err: | |
|
633 | if err.errno != errno.ENOENT: | |
|
634 | raise | |
|
635 | cmdutil.wrongtooltocontinue(repo, _('unshelve')) | |
|
636 | except error.CorruptedState as err: | |
|
637 | ui.debug(pycompat.bytestr(err) + '\n') | |
|
638 | if opts.get('continue'): | |
|
639 | msg = _('corrupted shelved state file') | |
|
640 | hint = _('please run hg unshelve --abort to abort unshelve ' | |
|
641 | 'operation') | |
|
642 | raise error.Abort(msg, hint=hint) | |
|
643 | elif opts.get('abort'): | |
|
644 | shelvedstate.clear(repo) | |
|
645 | raise error.Abort(_('could not read shelved state file, your ' | |
|
646 | 'working copy may be in an unexpected state\n' | |
|
647 | 'please update to some commit\n')) | |
|
648 | return state | |
|
649 | ||
|
650 | def unshelveabort(ui, repo, state): | |
|
650 | 651 | """subcommand that abort an in-progress unshelve""" |
|
651 | 652 | with repo.lock(): |
|
652 | 653 | try: |
@@ -656,11 +657,6 b' def unshelveabort(ui, repo, state, opts)' | |||
|
656 | 657 | if (state.activebookmark |
|
657 | 658 | and state.activebookmark in repo._bookmarks): |
|
658 | 659 | bookmarks.activate(repo, state.activebookmark) |
|
659 | ||
|
660 | if repo.vfs.exists('unshelverebasestate'): | |
|
661 | repo.vfs.rename('unshelverebasestate', 'rebasestate') | |
|
662 | rebase.clearstatus(repo) | |
|
663 | ||
|
664 | 660 | mergefiles(ui, repo, state.wctx, state.pendingctx) |
|
665 | 661 | if not phases.supportinternal(repo): |
|
666 | 662 | repair.strip(ui, repo, state.nodestoremove, backup=False, |
@@ -669,6 +665,12 b' def unshelveabort(ui, repo, state, opts)' | |||
|
669 | 665 | shelvedstate.clear(repo) |
|
670 | 666 | ui.warn(_("unshelve of '%s' aborted\n") % state.name) |
|
671 | 667 | |
|
668 | def hgabortunshelve(ui, repo): | |
|
669 | """logic to abort unshelve using 'hg abort""" | |
|
670 | with repo.wlock(): | |
|
671 | state = _loadshelvedstate(ui, repo, {'abort' : True}) | |
|
672 | return unshelveabort(ui, repo, state) | |
|
673 | ||
|
672 | 674 | def mergefiles(ui, repo, wctx, shelvectx): |
|
673 | 675 | """updates to wctx and merges the changes from shelvectx into the |
|
674 | 676 | dirstate.""" |
@@ -692,11 +694,11 b' def unshelvecleanup(ui, repo, name, opts' | |||
|
692 | 694 | if shfile.exists(): |
|
693 | 695 | shfile.movetobackup() |
|
694 | 696 | cleanupoldbackups(repo) |
|
695 | ||
|
696 | def unshelvecontinue(ui, repo, state, opts): | |
|
697 | def unshelvecontinue(ui, repo, state, opts, basename=None): | |
|
697 | 698 | """subcommand to continue an in-progress unshelve""" |
|
698 | 699 | # We're finishing off a merge. First parent is our original |
|
699 | 700 | # parent, second is the temporary "fake" commit we're unshelving. |
|
701 | interactive = opts.get('interactive') | |
|
700 | 702 | with repo.lock(): |
|
701 | 703 | checkparents(repo, state) |
|
702 | 704 | ms = merge.mergestate.read(repo) |
@@ -719,10 +721,15 b' def unshelvecontinue(ui, repo, state, op' | |||
|
719 | 721 | with repo.ui.configoverride(overrides, 'unshelve'): |
|
720 | 722 | with repo.dirstate.parentchange(): |
|
721 | 723 | repo.setparents(state.parents[0], nodemod.nullid) |
|
722 | newnode = repo.commit(text=shelvectx.description(), | |
|
723 | extra=shelvectx.extra(), | |
|
724 | user=shelvectx.user(), | |
|
725 |
|
|
|
724 | if not interactive: | |
|
725 | ispartialunshelve = False | |
|
726 | newnode = repo.commit(text=shelvectx.description(), | |
|
727 | extra=shelvectx.extra(), | |
|
728 | user=shelvectx.user(), | |
|
729 | date=shelvectx.date()) | |
|
730 | else: | |
|
731 | newnode, ispartialunshelve = _dounshelveinteractive(ui, | |
|
732 | repo, shelvectx, basename, opts) | |
|
726 | 733 | |
|
727 | 734 | if newnode is None: |
|
728 | 735 | # If it ended up being a no-op commit, then the normal |
@@ -739,22 +746,24 b' def unshelvecontinue(ui, repo, state, op' | |||
|
739 | 746 | shelvectx = repo[newnode] |
|
740 | 747 | |
|
741 | 748 | hg.updaterepo(repo, pendingctx.node(), overwrite=False) |
|
742 | ||
|
743 | if repo.vfs.exists('unshelverebasestate'): | |
|
744 | repo.vfs.rename('unshelverebasestate', 'rebasestate') | |
|
745 | rebase.clearstatus(repo) | |
|
746 | ||
|
747 | 749 | mergefiles(ui, repo, state.wctx, shelvectx) |
|
748 | 750 | restorebranch(ui, repo, state.branchtorestore) |
|
749 | 751 | |
|
750 |
if not |
|
|
751 | repair.strip(ui, repo, state.nodestoremove, backup=False, | |
|
752 | topic='shelve') | |
|
752 | if not ispartialunshelve: | |
|
753 | if not phases.supportinternal(repo): | |
|
754 | repair.strip(ui, repo, state.nodestoremove, backup=False, | |
|
755 | topic='shelve') | |
|
756 | shelvedstate.clear(repo) | |
|
757 | unshelvecleanup(ui, repo, state.name, opts) | |
|
753 | 758 | _restoreactivebookmark(repo, state.activebookmark) |
|
754 | shelvedstate.clear(repo) | |
|
755 | unshelvecleanup(ui, repo, state.name, opts) | |
|
756 | 759 | ui.status(_("unshelve of '%s' complete\n") % state.name) |
|
757 | 760 | |
|
761 | def hgcontinueunshelve(ui, repo): | |
|
762 | """logic to resume unshelve using 'hg continue'""" | |
|
763 | with repo.wlock(): | |
|
764 | state = _loadshelvedstate(ui, repo, {'continue' : True}) | |
|
765 | return unshelvecontinue(ui, repo, state, {'keep' : state.keep}) | |
|
766 | ||
|
758 | 767 | def _commitworkingcopychanges(ui, repo, opts, tmpwctx): |
|
759 | 768 | """Temporarily commit working copy changes before moving unshelve commit""" |
|
760 | 769 | # Store pending changes in a commit and remember added in case a shelve |
@@ -795,14 +804,40 b' def _unshelverestorecommit(ui, repo, tr,' | |||
|
795 | 804 | |
|
796 | 805 | return repo, shelvectx |
|
797 | 806 | |
|
807 | def _dounshelveinteractive(ui, repo, shelvectx, basename, opts): | |
|
808 | """The user might want to unshelve certain changes only from the stored | |
|
809 | shelve. So, we would create two commits. One with requested changes to | |
|
810 | unshelve at that time and the latter is shelved for future. | |
|
811 | """ | |
|
812 | opts['message'] = shelvectx.description() | |
|
813 | opts['interactive-unshelve'] = True | |
|
814 | pats = [] | |
|
815 | commitfunc = getcommitfunc(shelvectx.extra(), interactive=True, | |
|
816 | editor=True) | |
|
817 | newnode = cmdutil.dorecord(ui, repo, commitfunc, None, False, | |
|
818 | cmdutil.recordfilter, *pats, | |
|
819 | **pycompat.strkwargs(opts)) | |
|
820 | snode = repo.commit(text=shelvectx.description(), | |
|
821 | extra=shelvectx.extra(), | |
|
822 | user=shelvectx.user(), | |
|
823 | date=shelvectx.date()) | |
|
824 | m = scmutil.matchfiles(repo, repo[snode].files()) | |
|
825 | if snode: | |
|
826 | _shelvecreatedcommit(repo, snode, basename, m) | |
|
827 | ||
|
828 | return newnode, bool(snode) | |
|
829 | ||
|
798 | 830 | def _rebaserestoredcommit(ui, repo, opts, tr, oldtiprev, basename, pctx, |
|
799 | 831 | tmpwctx, shelvectx, branchtorestore, |
|
800 | 832 | activebookmark): |
|
801 | 833 | """Rebase restored commit from its original location to a destination""" |
|
802 | 834 | # If the shelve is not immediately on top of the commit |
|
803 | 835 | # we'll be merging with, rebase it to be on top. |
|
804 | if tmpwctx.node() == shelvectx.p1().node(): | |
|
805 | return shelvectx | |
|
836 | interactive = opts.get('interactive') | |
|
837 | if tmpwctx.node() == shelvectx.p1().node() and not interactive: | |
|
838 | # We won't skip on interactive mode because, the user might want to | |
|
839 | # unshelve certain changes only. | |
|
840 | return shelvectx, False | |
|
806 | 841 | |
|
807 | 842 | overrides = { |
|
808 | 843 | ('ui', 'forcemerge'): opts.get('tool', ''), |
@@ -826,10 +861,15 b' def _rebaserestoredcommit(ui, repo, opts' | |||
|
826 | 861 | |
|
827 | 862 | with repo.dirstate.parentchange(): |
|
828 | 863 | repo.setparents(tmpwctx.node(), nodemod.nullid) |
|
829 | newnode = repo.commit(text=shelvectx.description(), | |
|
830 | extra=shelvectx.extra(), | |
|
831 | user=shelvectx.user(), | |
|
832 |
|
|
|
864 | if not interactive: | |
|
865 | ispartialunshelve = False | |
|
866 | newnode = repo.commit(text=shelvectx.description(), | |
|
867 | extra=shelvectx.extra(), | |
|
868 | user=shelvectx.user(), | |
|
869 | date=shelvectx.date()) | |
|
870 | else: | |
|
871 | newnode, ispartialunshelve = _dounshelveinteractive(ui, repo, | |
|
872 | shelvectx, basename, opts) | |
|
833 | 873 | |
|
834 | 874 | if newnode is None: |
|
835 | 875 | # If it ended up being a no-op commit, then the normal |
@@ -844,7 +884,7 b' def _rebaserestoredcommit(ui, repo, opts' | |||
|
844 | 884 | shelvectx = repo[newnode] |
|
845 | 885 | hg.updaterepo(repo, tmpwctx.node(), False) |
|
846 | 886 | |
|
847 | return shelvectx | |
|
887 | return shelvectx, ispartialunshelve | |
|
848 | 888 | |
|
849 | 889 | def _forgetunknownfiles(repo, shelvectx, addedbefore): |
|
850 | 890 | # Forget any files that were unknown before the shelve, unknown before |
@@ -877,70 +917,18 b' def _checkunshelveuntrackedproblems(ui, ' | |||
|
877 | 917 | hint = _("run hg status to see which files are missing") |
|
878 | 918 | raise error.Abort(m, hint=hint) |
|
879 | 919 | |
|
880 | @command('unshelve', | |
|
881 | [('a', 'abort', None, | |
|
882 | _('abort an incomplete unshelve operation')), | |
|
883 | ('c', 'continue', None, | |
|
884 | _('continue an incomplete unshelve operation')), | |
|
885 | ('k', 'keep', None, | |
|
886 | _('keep shelve after unshelving')), | |
|
887 | ('n', 'name', '', | |
|
888 | _('restore shelved change with given name'), _('NAME')), | |
|
889 | ('t', 'tool', '', _('specify merge tool')), | |
|
890 | ('', 'date', '', | |
|
891 | _('set date for temporary commits (DEPRECATED)'), _('DATE'))], | |
|
892 | _('hg unshelve [[-n] SHELVED]'), | |
|
893 | helpcategory=command.CATEGORY_WORKING_DIRECTORY) | |
|
894 | def unshelve(ui, repo, *shelved, **opts): | |
|
895 | """restore a shelved change to the working directory | |
|
896 | ||
|
897 | This command accepts an optional name of a shelved change to | |
|
898 | restore. If none is given, the most recent shelved change is used. | |
|
899 | ||
|
900 | If a shelved change is applied successfully, the bundle that | |
|
901 | contains the shelved changes is moved to a backup location | |
|
902 | (.hg/shelve-backup). | |
|
903 | ||
|
904 | Since you can restore a shelved change on top of an arbitrary | |
|
905 | commit, it is possible that unshelving will result in a conflict | |
|
906 | between your changes and the commits you are unshelving onto. If | |
|
907 | this occurs, you must resolve the conflict, then use | |
|
908 | ``--continue`` to complete the unshelve operation. (The bundle | |
|
909 | will not be moved until you successfully complete the unshelve.) | |
|
910 | ||
|
911 | (Alternatively, you can use ``--abort`` to abandon an unshelve | |
|
912 | that causes a conflict. This reverts the unshelved changes, and | |
|
913 | leaves the bundle in place.) | |
|
914 | ||
|
915 | If bare shelved change(when no files are specified, without interactive, | |
|
916 | include and exclude option) was done on newly created branch it would | |
|
917 | restore branch information to the working directory. | |
|
918 | ||
|
919 | After a successful unshelve, the shelved changes are stored in a | |
|
920 | backup directory. Only the N most recent backups are kept. N | |
|
921 | defaults to 10 but can be overridden using the ``shelve.maxbackups`` | |
|
922 | configuration option. | |
|
923 | ||
|
924 | .. container:: verbose | |
|
925 | ||
|
926 | Timestamp in seconds is used to decide order of backups. More | |
|
927 | than ``maxbackups`` backups are kept, if same timestamp | |
|
928 | prevents from deciding exact order of them, for safety. | |
|
929 | """ | |
|
930 | with repo.wlock(): | |
|
931 | return _dounshelve(ui, repo, *shelved, **opts) | |
|
932 | ||
|
933 | def _dounshelve(ui, repo, *shelved, **opts): | |
|
920 | def dounshelve(ui, repo, *shelved, **opts): | |
|
934 | 921 | opts = pycompat.byteskwargs(opts) |
|
935 | 922 | abortf = opts.get('abort') |
|
936 | 923 | continuef = opts.get('continue') |
|
924 | interactive = opts.get('interactive') | |
|
937 | 925 | if not abortf and not continuef: |
|
938 | 926 | cmdutil.checkunfinished(repo) |
|
939 | 927 | shelved = list(shelved) |
|
940 | 928 | if opts.get("name"): |
|
941 | 929 | shelved.append(opts["name"]) |
|
942 | 930 | |
|
943 | if abortf or continuef: | |
|
931 | if abortf or continuef and not interactive: | |
|
944 | 932 | if abortf and continuef: |
|
945 | 933 | raise error.Abort(_('cannot use both abort and continue')) |
|
946 | 934 | if shelved: |
@@ -949,49 +937,24 b' def _dounshelve(ui, repo, *shelved, **op' | |||
|
949 | 937 | if abortf and opts.get('tool', False): |
|
950 | 938 | ui.warn(_('tool option will be ignored\n')) |
|
951 | 939 | |
|
952 | try: | |
|
953 | state = shelvedstate.load(repo) | |
|
954 | if opts.get('keep') is None: | |
|
955 | opts['keep'] = state.keep | |
|
956 | except IOError as err: | |
|
957 | if err.errno != errno.ENOENT: | |
|
958 | raise | |
|
959 | cmdutil.wrongtooltocontinue(repo, _('unshelve')) | |
|
960 | except error.CorruptedState as err: | |
|
961 | ui.debug(pycompat.bytestr(err) + '\n') | |
|
962 | if continuef: | |
|
963 | msg = _('corrupted shelved state file') | |
|
964 | hint = _('please run hg unshelve --abort to abort unshelve ' | |
|
965 | 'operation') | |
|
966 | raise error.Abort(msg, hint=hint) | |
|
967 | elif abortf: | |
|
968 | msg = _('could not read shelved state file, your working copy ' | |
|
969 | 'may be in an unexpected state\nplease update to some ' | |
|
970 | 'commit\n') | |
|
971 | ui.warn(msg) | |
|
972 | shelvedstate.clear(repo) | |
|
973 | return | |
|
974 | ||
|
940 | state = _loadshelvedstate(ui, repo, opts) | |
|
975 | 941 | if abortf: |
|
976 |
return unshelveabort(ui, repo, state |
|
|
942 | return unshelveabort(ui, repo, state) | |
|
977 | 943 | elif continuef: |
|
978 | 944 | return unshelvecontinue(ui, repo, state, opts) |
|
979 | 945 | elif len(shelved) > 1: |
|
980 | 946 | raise error.Abort(_('can only unshelve one change at a time')) |
|
981 | ||
|
982 | # abort unshelve while merging (issue5123) | |
|
983 | parents = repo[None].parents() | |
|
984 | if len(parents) > 1: | |
|
985 | raise error.Abort(_('cannot unshelve while merging')) | |
|
986 | ||
|
987 | 947 | elif not shelved: |
|
988 | 948 | shelved = listshelves(repo) |
|
989 | 949 | if not shelved: |
|
990 | 950 | raise error.Abort(_('no shelved changes to apply!')) |
|
991 | 951 | basename = util.split(shelved[0][1])[1] |
|
992 | 952 | ui.status(_("unshelving change '%s'\n") % basename) |
|
993 | else: | |
|
953 | elif shelved: | |
|
994 | 954 | basename = shelved[0] |
|
955 | if continuef and interactive: | |
|
956 | state = _loadshelvedstate(ui, repo, opts) | |
|
957 | return unshelvecontinue(ui, repo, state, opts, basename) | |
|
995 | 958 | |
|
996 | 959 | if not shelvedfile(repo, basename, patchextension).exists(): |
|
997 | 960 | raise error.Abort(_("shelved change '%s' not found") % basename) |
@@ -1020,128 +983,20 b' def _dounshelve(ui, repo, *shelved, **op' | |||
|
1020 | 983 | if shelvectx.branch() != shelvectx.p1().branch(): |
|
1021 | 984 | branchtorestore = shelvectx.branch() |
|
1022 | 985 | |
|
1023 |
shelvectx = _rebaserestoredcommit(ui, repo, opts, |
|
|
1024 | basename, pctx, tmpwctx, | |
|
1025 | shelvectx, branchtorestore, | |
|
1026 | activebookmark) | |
|
986 | shelvectx, ispartialunshelve = _rebaserestoredcommit(ui, repo, opts, | |
|
987 | tr, oldtiprev, basename, pctx, tmpwctx, shelvectx, | |
|
988 | branchtorestore, activebookmark) | |
|
1027 | 989 | overrides = {('ui', 'forcemerge'): opts.get('tool', '')} |
|
1028 | 990 | with ui.configoverride(overrides, 'unshelve'): |
|
1029 | 991 | mergefiles(ui, repo, pctx, shelvectx) |
|
1030 | 992 | restorebranch(ui, repo, branchtorestore) |
|
1031 | _forgetunknownfiles(repo, shelvectx, addedbefore) | |
|
993 | if not ispartialunshelve: | |
|
994 | _forgetunknownfiles(repo, shelvectx, addedbefore) | |
|
1032 | 995 | |
|
1033 | shelvedstate.clear(repo) | |
|
1034 | _finishunshelve(repo, oldtiprev, tr, activebookmark) | |
|
1035 | unshelvecleanup(ui, repo, basename, opts) | |
|
996 | shelvedstate.clear(repo) | |
|
997 | _finishunshelve(repo, oldtiprev, tr, activebookmark) | |
|
998 | unshelvecleanup(ui, repo, basename, opts) | |
|
1036 | 999 | finally: |
|
1037 | 1000 | if tr: |
|
1038 | 1001 | tr.release() |
|
1039 | 1002 | lockmod.release(lock) |
|
1040 | ||
|
1041 | @command('shelve', | |
|
1042 | [('A', 'addremove', None, | |
|
1043 | _('mark new/missing files as added/removed before shelving')), | |
|
1044 | ('u', 'unknown', None, | |
|
1045 | _('store unknown files in the shelve')), | |
|
1046 | ('', 'cleanup', None, | |
|
1047 | _('delete all shelved changes')), | |
|
1048 | ('', 'date', '', | |
|
1049 | _('shelve with the specified commit date'), _('DATE')), | |
|
1050 | ('d', 'delete', None, | |
|
1051 | _('delete the named shelved change(s)')), | |
|
1052 | ('e', 'edit', False, | |
|
1053 | _('invoke editor on commit messages')), | |
|
1054 | ('k', 'keep', False, | |
|
1055 | _('shelve, but keep changes in the working directory')), | |
|
1056 | ('l', 'list', None, | |
|
1057 | _('list current shelves')), | |
|
1058 | ('m', 'message', '', | |
|
1059 | _('use text as shelve message'), _('TEXT')), | |
|
1060 | ('n', 'name', '', | |
|
1061 | _('use the given name for the shelved commit'), _('NAME')), | |
|
1062 | ('p', 'patch', None, | |
|
1063 | _('output patches for changes (provide the names of the shelved ' | |
|
1064 | 'changes as positional arguments)')), | |
|
1065 | ('i', 'interactive', None, | |
|
1066 | _('interactive mode, only works while creating a shelve')), | |
|
1067 | ('', 'stat', None, | |
|
1068 | _('output diffstat-style summary of changes (provide the names of ' | |
|
1069 | 'the shelved changes as positional arguments)') | |
|
1070 | )] + cmdutil.walkopts, | |
|
1071 | _('hg shelve [OPTION]... [FILE]...'), | |
|
1072 | helpcategory=command.CATEGORY_WORKING_DIRECTORY) | |
|
1073 | def shelvecmd(ui, repo, *pats, **opts): | |
|
1074 | '''save and set aside changes from the working directory | |
|
1075 | ||
|
1076 | Shelving takes files that "hg status" reports as not clean, saves | |
|
1077 | the modifications to a bundle (a shelved change), and reverts the | |
|
1078 | files so that their state in the working directory becomes clean. | |
|
1079 | ||
|
1080 | To restore these changes to the working directory, using "hg | |
|
1081 | unshelve"; this will work even if you switch to a different | |
|
1082 | commit. | |
|
1083 | ||
|
1084 | When no files are specified, "hg shelve" saves all not-clean | |
|
1085 | files. If specific files or directories are named, only changes to | |
|
1086 | those files are shelved. | |
|
1087 | ||
|
1088 | In bare shelve (when no files are specified, without interactive, | |
|
1089 | include and exclude option), shelving remembers information if the | |
|
1090 | working directory was on newly created branch, in other words working | |
|
1091 | directory was on different branch than its first parent. In this | |
|
1092 | situation unshelving restores branch information to the working directory. | |
|
1093 | ||
|
1094 | Each shelved change has a name that makes it easier to find later. | |
|
1095 | The name of a shelved change defaults to being based on the active | |
|
1096 | bookmark, or if there is no active bookmark, the current named | |
|
1097 | branch. To specify a different name, use ``--name``. | |
|
1098 | ||
|
1099 | To see a list of existing shelved changes, use the ``--list`` | |
|
1100 | option. For each shelved change, this will print its name, age, | |
|
1101 | and description; use ``--patch`` or ``--stat`` for more details. | |
|
1102 | ||
|
1103 | To delete specific shelved changes, use ``--delete``. To delete | |
|
1104 | all shelved changes, use ``--cleanup``. | |
|
1105 | ''' | |
|
1106 | opts = pycompat.byteskwargs(opts) | |
|
1107 | allowables = [ | |
|
1108 | ('addremove', {'create'}), # 'create' is pseudo action | |
|
1109 | ('unknown', {'create'}), | |
|
1110 | ('cleanup', {'cleanup'}), | |
|
1111 | # ('date', {'create'}), # ignored for passing '--date "0 0"' in tests | |
|
1112 | ('delete', {'delete'}), | |
|
1113 | ('edit', {'create'}), | |
|
1114 | ('keep', {'create'}), | |
|
1115 | ('list', {'list'}), | |
|
1116 | ('message', {'create'}), | |
|
1117 | ('name', {'create'}), | |
|
1118 | ('patch', {'patch', 'list'}), | |
|
1119 | ('stat', {'stat', 'list'}), | |
|
1120 | ] | |
|
1121 | def checkopt(opt): | |
|
1122 | if opts.get(opt): | |
|
1123 | for i, allowable in allowables: | |
|
1124 | if opts[i] and opt not in allowable: | |
|
1125 | raise error.Abort(_("options '--%s' and '--%s' may not be " | |
|
1126 | "used together") % (opt, i)) | |
|
1127 | return True | |
|
1128 | if checkopt('cleanup'): | |
|
1129 | if pats: | |
|
1130 | raise error.Abort(_("cannot specify names when using '--cleanup'")) | |
|
1131 | return cleanupcmd(ui, repo) | |
|
1132 | elif checkopt('delete'): | |
|
1133 | return deletecmd(ui, repo, pats) | |
|
1134 | elif checkopt('list'): | |
|
1135 | return listcmd(ui, repo, pats, opts) | |
|
1136 | elif checkopt('patch') or checkopt('stat'): | |
|
1137 | return patchcmds(ui, repo, pats, opts) | |
|
1138 | else: | |
|
1139 | return createcmd(ui, repo, pats, opts) | |
|
1140 | ||
|
1141 | def extsetup(ui): | |
|
1142 | cmdutil.unfinishedstates.append( | |
|
1143 | [shelvedstate._filename, False, False, | |
|
1144 | _('unshelve already in progress'), | |
|
1145 | _("use 'hg unshelve --continue' or 'hg unshelve --abort'")]) | |
|
1146 | cmdutil.afterresolvedstates.append( | |
|
1147 | [shelvedstate._filename, _('hg unshelve --continue')]) |
@@ -248,7 +248,8 b' def prunetemporaryincludes(repo):' | |||
|
248 | 248 | |
|
249 | 249 | typeactions = mergemod.emptyactions() |
|
250 | 250 | typeactions['r'] = actions |
|
251 |
mergemod.applyupdates(repo, typeactions, repo[None], repo['.'], False |
|
|
251 | mergemod.applyupdates(repo, typeactions, repo[None], repo['.'], False, | |
|
252 | wantfiledata=False) | |
|
252 | 253 | |
|
253 | 254 | # Fix dirstate |
|
254 | 255 | for file in dropped: |
@@ -382,7 +383,7 b' def filterupdatesactions(repo, wctx, mct' | |||
|
382 | 383 | typeactions = mergemod.emptyactions() |
|
383 | 384 | typeactions['g'] = actions |
|
384 | 385 | mergemod.applyupdates(repo, typeactions, repo[None], repo['.'], |
|
385 | False) | |
|
386 | False, wantfiledata=False) | |
|
386 | 387 | |
|
387 | 388 | dirstate = repo.dirstate |
|
388 | 389 | for file, flags, msg in actions: |
@@ -486,7 +487,8 b' def refreshwdir(repo, origstatus, origsp' | |||
|
486 | 487 | for f, (m, args, msg) in actions.iteritems(): |
|
487 | 488 | typeactions[m].append((f, args, msg)) |
|
488 | 489 | |
|
489 |
mergemod.applyupdates(repo, typeactions, repo[None], repo['.'], False |
|
|
490 | mergemod.applyupdates(repo, typeactions, repo[None], repo['.'], False, | |
|
491 | wantfiledata=False) | |
|
490 | 492 | |
|
491 | 493 | # Fix dirstate |
|
492 | 494 | for file in added: |
@@ -16,6 +16,7 b' import ssl' | |||
|
16 | 16 | |
|
17 | 17 | from .i18n import _ |
|
18 | 18 | from . import ( |
|
19 | encoding, | |
|
19 | 20 | error, |
|
20 | 21 | node, |
|
21 | 22 | pycompat, |
@@ -348,6 +349,17 b' def wrapsocket(sock, keyfile, certfile, ' | |||
|
348 | 349 | if not serverhostname: |
|
349 | 350 | raise error.Abort(_('serverhostname argument is required')) |
|
350 | 351 | |
|
352 | if b'SSLKEYLOGFILE' in encoding.environ: | |
|
353 | try: | |
|
354 | import sslkeylog | |
|
355 | sslkeylog.set_keylog(pycompat.fsdecode( | |
|
356 | encoding.environ[b'SSLKEYLOGFILE'])) | |
|
357 | ui.warn( | |
|
358 | b'sslkeylog enabled by SSLKEYLOGFILE environment variable\n') | |
|
359 | except ImportError: | |
|
360 | ui.warn(b'sslkeylog module missing, ' | |
|
361 | b'but SSLKEYLOGFILE set in environment\n') | |
|
362 | ||
|
351 | 363 | for f in (keyfile, certfile): |
|
352 | 364 | if f and not os.path.exists(f): |
|
353 | 365 | raise error.Abort( |
@@ -19,6 +19,8 b' the data.' | |||
|
19 | 19 | |
|
20 | 20 | from __future__ import absolute_import |
|
21 | 21 | |
|
22 | from .i18n import _ | |
|
23 | ||
|
22 | 24 | from . import ( |
|
23 | 25 | error, |
|
24 | 26 | util, |
@@ -85,3 +87,134 b' class cmdstate(object):' | |||
|
85 | 87 | def exists(self): |
|
86 | 88 | """check whether the state file exists or not""" |
|
87 | 89 | return self._repo.vfs.exists(self.fname) |
|
90 | ||
|
91 | class _statecheck(object): | |
|
92 | """a utility class that deals with multistep operations like graft, | |
|
93 | histedit, bisect, update etc and check whether such commands | |
|
94 | are in an unfinished conditition or not and return appropriate message | |
|
95 | and hint. | |
|
96 | It also has the ability to register and determine the states of any new | |
|
97 | multistep operation or multistep command extension. | |
|
98 | """ | |
|
99 | ||
|
100 | def __init__(self, opname, fname, clearable, allowcommit, reportonly, | |
|
101 | continueflag, stopflag, cmdmsg, cmdhint, statushint, | |
|
102 | abortfunc, continuefunc): | |
|
103 | self._opname = opname | |
|
104 | self._fname = fname | |
|
105 | self._clearable = clearable | |
|
106 | self._allowcommit = allowcommit | |
|
107 | self._reportonly = reportonly | |
|
108 | self._continueflag = continueflag | |
|
109 | self._stopflag = stopflag | |
|
110 | self._cmdmsg = cmdmsg | |
|
111 | self._cmdhint = cmdhint | |
|
112 | self._statushint = statushint | |
|
113 | self.abortfunc = abortfunc | |
|
114 | self.continuefunc = continuefunc | |
|
115 | ||
|
116 | def statusmsg(self): | |
|
117 | """returns the hint message corresponding to the command for | |
|
118 | hg status --verbose | |
|
119 | """ | |
|
120 | if not self._statushint: | |
|
121 | hint = (_('To continue: hg %s --continue\n' | |
|
122 | 'To abort: hg %s --abort') % (self._opname, | |
|
123 | self._opname)) | |
|
124 | if self._stopflag: | |
|
125 | hint = hint + (_('\nTo stop: hg %s --stop') % | |
|
126 | (self._opname)) | |
|
127 | return hint | |
|
128 | return self._statushint | |
|
129 | ||
|
130 | def hint(self): | |
|
131 | """returns the hint message corresponding to an interrupted | |
|
132 | operation | |
|
133 | """ | |
|
134 | if not self._cmdhint: | |
|
135 | return (_("use 'hg %s --continue' or 'hg %s --abort'") % | |
|
136 | (self._opname, self._opname)) | |
|
137 | return self._cmdhint | |
|
138 | ||
|
139 | def msg(self): | |
|
140 | """returns the status message corresponding to the command""" | |
|
141 | if not self._cmdmsg: | |
|
142 | return _('%s in progress') % (self._opname) | |
|
143 | return self._cmdmsg | |
|
144 | ||
|
145 | def continuemsg(self): | |
|
146 | """ returns appropriate continue message corresponding to command""" | |
|
147 | return _('hg %s --continue') % (self._opname) | |
|
148 | ||
|
149 | def isunfinished(self, repo): | |
|
150 | """determines whether a multi-step operation is in progress | |
|
151 | or not | |
|
152 | """ | |
|
153 | if self._opname == 'merge': | |
|
154 | return len(repo[None].parents()) > 1 | |
|
155 | else: | |
|
156 | return repo.vfs.exists(self._fname) | |
|
157 | ||
|
158 | # A list of statecheck objects for multistep operations like graft. | |
|
159 | _unfinishedstates = [] | |
|
160 | ||
|
161 | def addunfinished(opname, fname, clearable=False, allowcommit=False, | |
|
162 | reportonly=False, continueflag=False, stopflag=False, | |
|
163 | cmdmsg="", cmdhint="", statushint="", abortfunc=None, | |
|
164 | continuefunc=None): | |
|
165 | """this registers a new command or operation to unfinishedstates | |
|
166 | opname is the name the command or operation | |
|
167 | fname is the file name in which data should be stored in .hg directory. | |
|
168 | It is None for merge command. | |
|
169 | clearable boolean determines whether or not interrupted states can be | |
|
170 | cleared by running `hg update -C .` which in turn deletes the | |
|
171 | state file. | |
|
172 | allowcommit boolean decides whether commit is allowed during interrupted | |
|
173 | state or not. | |
|
174 | reportonly flag is used for operations like bisect where we just | |
|
175 | need to detect the operation using 'hg status --verbose' | |
|
176 | continueflag is a boolean determines whether or not a command supports | |
|
177 | `--continue` option or not. | |
|
178 | stopflag is a boolean that determines whether or not a command supports | |
|
179 | --stop flag | |
|
180 | cmdmsg is used to pass a different status message in case standard | |
|
181 | message of the format "abort: cmdname in progress" is not desired. | |
|
182 | cmdhint is used to pass a different hint message in case standard | |
|
183 | message of the format "To continue: hg cmdname --continue | |
|
184 | To abort: hg cmdname --abort" is not desired. | |
|
185 | statushint is used to pass a different status message in case standard | |
|
186 | message of the format ('To continue: hg cmdname --continue' | |
|
187 | 'To abort: hg cmdname --abort') is not desired | |
|
188 | abortfunc stores the function required to abort an unfinished state. | |
|
189 | continuefunc stores the function required to finish an interrupted | |
|
190 | operation. | |
|
191 | """ | |
|
192 | statecheckobj = _statecheck(opname, fname, clearable, allowcommit, | |
|
193 | reportonly, continueflag, stopflag, cmdmsg, | |
|
194 | cmdhint, statushint, abortfunc, continuefunc) | |
|
195 | if opname == 'merge': | |
|
196 | _unfinishedstates.append(statecheckobj) | |
|
197 | else: | |
|
198 | _unfinishedstates.insert(0, statecheckobj) | |
|
199 | ||
|
200 | addunfinished( | |
|
201 | 'update', fname='updatestate', clearable=True, | |
|
202 | cmdmsg=_('last update was interrupted'), | |
|
203 | cmdhint=_("use 'hg update' to get a consistent checkout"), | |
|
204 | statushint=_("To continue: hg update") | |
|
205 | ) | |
|
206 | addunfinished( | |
|
207 | 'bisect', fname='bisect.state', allowcommit=True, reportonly=True, | |
|
208 | statushint=_('To mark the changeset good: hg bisect --good\n' | |
|
209 | 'To mark the changeset bad: hg bisect --bad\n' | |
|
210 | 'To abort: hg bisect --reset\n') | |
|
211 | ) | |
|
212 | ||
|
213 | def getrepostate(repo): | |
|
214 | # experimental config: commands.status.skipstates | |
|
215 | skip = set(repo.ui.configlist('commands', 'status.skipstates')) | |
|
216 | for state in _unfinishedstates: | |
|
217 | if state._opname in skip: | |
|
218 | continue | |
|
219 | if state.isunfinished(repo): | |
|
220 | return (state._opname, state.statusmsg()) |
@@ -155,6 +155,7 b' class statichttprepository(localrepo.loc' | |||
|
155 | 155 | |
|
156 | 156 | self.names = namespaces.namespaces() |
|
157 | 157 | self.filtername = None |
|
158 | self._extrafilterid = None | |
|
158 | 159 | |
|
159 | 160 | try: |
|
160 | 161 | requirements = set(self.vfs.read(b'requires').splitlines()) |
@@ -678,6 +678,7 b' def display_hotpath(data, fp, limit=0.05' | |||
|
678 | 678 | for sample in data.samples: |
|
679 | 679 | root.add(sample.stack[::-1], sample.time - lasttime) |
|
680 | 680 | lasttime = sample.time |
|
681 | showtime = kwargs.get(r'showtime', True) | |
|
681 | 682 | |
|
682 | 683 | def _write(node, depth, multiple_siblings): |
|
683 | 684 | site = node.site |
@@ -695,7 +696,9 b' def display_hotpath(data, fp, limit=0.05' | |||
|
695 | 696 | # lots of string formatting |
|
696 | 697 | listpattern = ''.ljust(indent) +\ |
|
697 | 698 | ('\\' if multiple_siblings else '|') +\ |
|
698 |
' %4.1f%% |
|
|
699 | ' %4.1f%%' +\ | |
|
700 | (' %5.2fs' % node.count if showtime else '') +\ | |
|
701 | ' %s %s' | |
|
699 | 702 | liststring = listpattern % (node.count / root.count * 100, |
|
700 | 703 | filename, function) |
|
701 | 704 | codepattern = '%' + ('%d' % (55 - len(liststring))) + 's %d: %s' |
@@ -40,7 +40,7 b' def _matchtrackedpath(path, matcher):' | |||
|
40 | 40 | if path.startswith('data/'): |
|
41 | 41 | return matcher(path[len('data/'):-len('.i')]) |
|
42 | 42 | elif path.startswith('meta/'): |
|
43 |
return matcher.visitdir(path[len('meta/'):-len('/00manifest.i')] |
|
|
43 | return matcher.visitdir(path[len('meta/'):-len('/00manifest.i')]) | |
|
44 | 44 | |
|
45 | 45 | raise error.ProgrammingError("cannot decode path %s" % path) |
|
46 | 46 | |
@@ -337,7 +337,7 b' def _calcmode(vfs):' | |||
|
337 | 337 | mode = None |
|
338 | 338 | return mode |
|
339 | 339 | |
|
340 | _data = ('narrowspec data meta 00manifest.d 00manifest.i' | |
|
340 | _data = ('bookmarks narrowspec data meta 00manifest.d 00manifest.i' | |
|
341 | 341 | ' 00changelog.d 00changelog.i phaseroots obsstore') |
|
342 | 342 | |
|
343 | 343 | def isrevlog(f, kind, st): |
@@ -612,7 +612,7 b' class fncachestore(basicstore):' | |||
|
612 | 612 | raise |
|
613 | 613 | |
|
614 | 614 | def copylist(self): |
|
615 | d = ('narrowspec data meta dh fncache phaseroots obsstore' | |
|
615 | d = ('bookmarks narrowspec data meta dh fncache phaseroots obsstore' | |
|
616 | 616 | ' 00manifest.d 00manifest.i 00changelog.d 00changelog.i') |
|
617 | 617 | return (['requires', '00changelog.i'] + |
|
618 | 618 | ['store/' + f for f in d.split()]) |
@@ -88,13 +88,15 b' def annotatesubrepoerror(func):' | |||
|
88 | 88 | def _updateprompt(ui, sub, dirty, local, remote): |
|
89 | 89 | if dirty: |
|
90 | 90 | msg = (_(' subrepository sources for %s differ\n' |
|
91 |
'use (l)ocal source (%s) or (r)emote source (%s) |
|
|
91 | 'you can use (l)ocal source (%s) or (r)emote source (%s).\n' | |
|
92 | 'what do you want to do?' | |
|
92 | 93 | '$$ &Local $$ &Remote') |
|
93 | 94 | % (subrelpath(sub), local, remote)) |
|
94 | 95 | else: |
|
95 | 96 | msg = (_(' subrepository sources for %s differ (in checked out ' |
|
96 | 97 | 'version)\n' |
|
97 |
'use (l)ocal source (%s) or (r)emote source (%s) |
|
|
98 | 'you can use (l)ocal source (%s) or (r)emote source (%s).\n' | |
|
99 | 'what do you want to do?' | |
|
98 | 100 | '$$ &Local $$ &Remote') |
|
99 | 101 | % (subrelpath(sub), local, remote)) |
|
100 | 102 | return ui.promptchoice(msg, 0) |
@@ -168,8 +168,9 b' def submerge(repo, wctx, mctx, actx, ove' | |||
|
168 | 168 | prompts['ro'] = r[0] |
|
169 | 169 | if repo.ui.promptchoice( |
|
170 | 170 | _(' subrepository sources for %(s)s differ\n' |
|
171 | 'use (l)ocal%(l)s source (%(lo)s)' | |
|
172 |
' or (r)emote%(o)s source (%(ro)s) |
|
|
171 | 'you can use (l)ocal%(l)s source (%(lo)s)' | |
|
172 | ' or (r)emote%(o)s source (%(ro)s).\n' | |
|
173 | 'what do you want to do?' | |
|
173 | 174 | '$$ &Local $$ &Remote') % prompts, 0): |
|
174 | 175 | debug(s, "prompt changed, get", r) |
|
175 | 176 | wctx.sub(s).get(r, overwrite) |
@@ -186,7 +187,9 b' def submerge(repo, wctx, mctx, actx, ove' | |||
|
186 | 187 | option = repo.ui.promptchoice( |
|
187 | 188 | _(' subrepository %(s)s diverged (local revision: %(sl)s, ' |
|
188 | 189 | 'remote revision: %(sr)s)\n' |
|
189 |
' |
|
|
190 | 'you can (m)erge, keep (l)ocal%(l)s or keep ' | |
|
191 | '(r)emote%(o)s.\n' | |
|
192 | 'what do you want to do?' | |
|
190 | 193 | '$$ &Merge $$ &Local $$ &Remote') |
|
191 | 194 | % prompts, 0) |
|
192 | 195 | if option == 0: |
@@ -13,11 +13,13 b'' | |||
|
13 | 13 | from __future__ import absolute_import |
|
14 | 14 | |
|
15 | 15 | import errno |
|
16 | import io | |
|
16 | 17 | |
|
17 | 18 | from .node import ( |
|
18 | 19 | bin, |
|
19 | 20 | hex, |
|
20 | 21 | nullid, |
|
22 | nullrev, | |
|
21 | 23 | short, |
|
22 | 24 | ) |
|
23 | 25 | from .i18n import _ |
@@ -89,7 +91,7 b' def fnoderevs(ui, repo, revs):' | |||
|
89 | 91 | unfi = repo.unfiltered() |
|
90 | 92 | tonode = unfi.changelog.node |
|
91 | 93 | nodes = [tonode(r) for r in revs] |
|
92 |
fnodes = _getfnodes(ui, repo, nodes |
|
|
94 | fnodes = _getfnodes(ui, repo, nodes) | |
|
93 | 95 | fnodes = _filterfnodes(fnodes, nodes) |
|
94 | 96 | return fnodes |
|
95 | 97 | |
@@ -457,7 +459,8 b' def _readtagcache(ui, repo):' | |||
|
457 | 459 | # This is the most expensive part of finding tags, so performance |
|
458 | 460 | # depends primarily on the size of newheads. Worst case: no cache |
|
459 | 461 | # file, so newheads == repoheads. |
|
460 | cachefnode = _getfnodes(ui, repo, repoheads) | |
|
462 | # Reversed order helps the cache ('repoheads' is in descending order) | |
|
463 | cachefnode = _getfnodes(ui, repo, reversed(repoheads)) | |
|
461 | 464 | |
|
462 | 465 | # Caller has to iterate over all heads, but can use the filenodes in |
|
463 | 466 | # cachefnode to get to each .hgtags revision quickly. |
@@ -472,7 +475,7 b' def _getfnodes(ui, repo, nodes):' | |||
|
472 | 475 | starttime = util.timer() |
|
473 | 476 | fnodescache = hgtagsfnodescache(repo.unfiltered()) |
|
474 | 477 | cachefnode = {} |
|
475 |
for node in |
|
|
478 | for node in nodes: | |
|
476 | 479 | fnode = fnodescache.getfnode(node) |
|
477 | 480 | if fnode != nullid: |
|
478 | 481 | cachefnode[node] = fnode |
@@ -560,7 +563,7 b' def _tag(repo, names, node, message, loc' | |||
|
560 | 563 | " branch name\n") % name) |
|
561 | 564 | |
|
562 | 565 | def writetags(fp, names, munge, prevtags): |
|
563 |
fp.seek(0, |
|
|
566 | fp.seek(0, io.SEEK_END) | |
|
564 | 567 | if prevtags and not prevtags.endswith('\n'): |
|
565 | 568 | fp.write('\n') |
|
566 | 569 | for name in names: |
@@ -691,6 +694,9 b' class hgtagsfnodescache(object):' | |||
|
691 | 694 | If an .hgtags does not exist at the specified revision, nullid is |
|
692 | 695 | returned. |
|
693 | 696 | """ |
|
697 | if node == nullid: | |
|
698 | return nullid | |
|
699 | ||
|
694 | 700 | ctx = self._repo[node] |
|
695 | 701 | rev = ctx.rev() |
|
696 | 702 | |
@@ -715,12 +721,33 b' class hgtagsfnodescache(object):' | |||
|
715 | 721 | if not computemissing: |
|
716 | 722 | return None |
|
717 | 723 | |
|
718 | # Populate missing entry. | |
|
719 | try: | |
|
720 | fnode = ctx.filenode('.hgtags') | |
|
721 | except error.LookupError: | |
|
722 | # No .hgtags file on this revision. | |
|
723 | fnode = nullid | |
|
724 | fnode = None | |
|
725 | cl = self._repo.changelog | |
|
726 | p1rev, p2rev = cl._uncheckedparentrevs(rev) | |
|
727 | p1node = cl.node(p1rev) | |
|
728 | p1fnode = self.getfnode(p1node, computemissing=False) | |
|
729 | if p2rev != nullrev: | |
|
730 | # There is some no-merge changeset where p1 is null and p2 is set | |
|
731 | # Processing them as merge is just slower, but still gives a good | |
|
732 | # result. | |
|
733 | p2node = cl.node(p1rev) | |
|
734 | p2fnode = self.getfnode(p2node, computemissing=False) | |
|
735 | if p1fnode != p2fnode: | |
|
736 | # we cannot rely on readfast because we don't know against what | |
|
737 | # parent the readfast delta is computed | |
|
738 | p1fnode = None | |
|
739 | if p1fnode is not None: | |
|
740 | mctx = ctx.manifestctx() | |
|
741 | fnode = mctx.readfast().get('.hgtags') | |
|
742 | if fnode is None: | |
|
743 | fnode = p1fnode | |
|
744 | if fnode is None: | |
|
745 | # Populate missing entry. | |
|
746 | try: | |
|
747 | fnode = ctx.filenode('.hgtags') | |
|
748 | except error.LookupError: | |
|
749 | # No .hgtags file on this revision. | |
|
750 | fnode = nullid | |
|
724 | 751 | |
|
725 | 752 | self._writeentry(offset, properprefix, fnode) |
|
726 | 753 | return fnode |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: file copied from rust/hg-cpython/src/ancestors.rs to rust/hg-cpython/src/filepatterns.rs | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: file copied from tests/test-copies.t to tests/test-copies-unrelated.t | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: file copied from tests/test-share.t to tests/test-share-bookmarks.t | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
General Comments 0
You need to be logged in to leave comments.
Login now