##// END OF EJS Templates
merge default into stable for 5.1 release
Augie Fackler -
r42860:e386b5f4 merge 5.1rc0 stable
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

This diff has been collapsed as it changes many lines, (545 lines changed) Show them Hide them
@@ -0,0 +1,545 b''
1 # linux.py - Linux specific automation functionality
2 #
3 # Copyright 2019 Gregory Szorc <gregory.szorc@gmail.com>
4 #
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
7
8 # no-check-code because Python 3 native.
9
10 import os
11 import pathlib
12 import shlex
13 import subprocess
14 import tempfile
15
16 from .ssh import (
17 exec_command,
18 )
19
20
21 # Linux distributions that are supported.
22 DISTROS = {
23 'debian9',
24 'ubuntu18.04',
25 'ubuntu18.10',
26 'ubuntu19.04',
27 }
28
29 INSTALL_PYTHONS = r'''
30 PYENV2_VERSIONS="2.7.16 pypy2.7-7.1.1"
31 PYENV3_VERSIONS="3.5.7 3.6.8 3.7.3 3.8-dev pypy3.5-7.0.0 pypy3.6-7.1.1"
32
33 git clone https://github.com/pyenv/pyenv.git /hgdev/pyenv
34 pushd /hgdev/pyenv
35 git checkout 3faeda67bb33e07750d1a104271369a7384ca45c
36 popd
37
38 export PYENV_ROOT="/hgdev/pyenv"
39 export PATH="$PYENV_ROOT/bin:$PATH"
40
41 # pip 19.0.3.
42 PIP_SHA256=efe99298f3fbb1f56201ce6b81d2658067d2f7d7dfc2d412e0d3cacc9a397c61
43 wget -O get-pip.py --progress dot:mega https://github.com/pypa/get-pip/raw/fee32c376da1ff6496a798986d7939cd51e1644f/get-pip.py
44 echo "${PIP_SHA256} get-pip.py" | sha256sum --check -
45
46 VIRTUALENV_SHA256=984d7e607b0a5d1329425dd8845bd971b957424b5ba664729fab51ab8c11bc39
47 VIRTUALENV_TARBALL=virtualenv-16.4.3.tar.gz
48 wget -O ${VIRTUALENV_TARBALL} --progress dot:mega https://files.pythonhosted.org/packages/37/db/89d6b043b22052109da35416abc3c397655e4bd3cff031446ba02b9654fa/${VIRTUALENV_TARBALL}
49 echo "${VIRTUALENV_SHA256} ${VIRTUALENV_TARBALL}" | sha256sum --check -
50
51 for v in ${PYENV2_VERSIONS}; do
52 pyenv install -v ${v}
53 ${PYENV_ROOT}/versions/${v}/bin/python get-pip.py
54 ${PYENV_ROOT}/versions/${v}/bin/pip install ${VIRTUALENV_TARBALL}
55 ${PYENV_ROOT}/versions/${v}/bin/pip install -r /hgdev/requirements-py2.txt
56 done
57
58 for v in ${PYENV3_VERSIONS}; do
59 pyenv install -v ${v}
60 ${PYENV_ROOT}/versions/${v}/bin/python get-pip.py
61 ${PYENV_ROOT}/versions/${v}/bin/pip install -r /hgdev/requirements-py3.txt
62 done
63
64 pyenv global ${PYENV2_VERSIONS} ${PYENV3_VERSIONS} system
65 '''.lstrip().replace('\r\n', '\n')
66
67
68 BOOTSTRAP_VIRTUALENV = r'''
69 /usr/bin/virtualenv /hgdev/venv-bootstrap
70
71 HG_SHA256=1bdd21bb87d1e05fb5cd395d488d0e0cc2f2f90ce0fd248e31a03595da5ccb47
72 HG_TARBALL=mercurial-4.9.1.tar.gz
73
74 wget -O ${HG_TARBALL} --progress dot:mega https://www.mercurial-scm.org/release/${HG_TARBALL}
75 echo "${HG_SHA256} ${HG_TARBALL}" | sha256sum --check -
76
77 /hgdev/venv-bootstrap/bin/pip install ${HG_TARBALL}
78 '''.lstrip().replace('\r\n', '\n')
79
80
81 BOOTSTRAP_DEBIAN = r'''
82 #!/bin/bash
83
84 set -ex
85
86 DISTRO=`grep DISTRIB_ID /etc/lsb-release | awk -F= '{{print $2}}'`
87 DEBIAN_VERSION=`cat /etc/debian_version`
88 LSB_RELEASE=`lsb_release -cs`
89
90 sudo /usr/sbin/groupadd hg
91 sudo /usr/sbin/groupadd docker
92 sudo /usr/sbin/useradd -g hg -G sudo,docker -d /home/hg -m -s /bin/bash hg
93 sudo mkdir /home/hg/.ssh
94 sudo cp ~/.ssh/authorized_keys /home/hg/.ssh/authorized_keys
95 sudo chown -R hg:hg /home/hg/.ssh
96 sudo chmod 700 /home/hg/.ssh
97 sudo chmod 600 /home/hg/.ssh/authorized_keys
98
99 cat << EOF | sudo tee /etc/sudoers.d/90-hg
100 hg ALL=(ALL) NOPASSWD:ALL
101 EOF
102
103 sudo apt-get update
104 sudo DEBIAN_FRONTEND=noninteractive apt-get -yq dist-upgrade
105
106 # Install packages necessary to set up Docker Apt repo.
107 sudo DEBIAN_FRONTEND=noninteractive apt-get -yq install --no-install-recommends \
108 apt-transport-https \
109 gnupg
110
111 cat > docker-apt-key << EOF
112 -----BEGIN PGP PUBLIC KEY BLOCK-----
113
114 mQINBFit2ioBEADhWpZ8/wvZ6hUTiXOwQHXMAlaFHcPH9hAtr4F1y2+OYdbtMuth
115 lqqwp028AqyY+PRfVMtSYMbjuQuu5byyKR01BbqYhuS3jtqQmljZ/bJvXqnmiVXh
116 38UuLa+z077PxyxQhu5BbqntTPQMfiyqEiU+BKbq2WmANUKQf+1AmZY/IruOXbnq
117 L4C1+gJ8vfmXQt99npCaxEjaNRVYfOS8QcixNzHUYnb6emjlANyEVlZzeqo7XKl7
118 UrwV5inawTSzWNvtjEjj4nJL8NsLwscpLPQUhTQ+7BbQXAwAmeHCUTQIvvWXqw0N
119 cmhh4HgeQscQHYgOJjjDVfoY5MucvglbIgCqfzAHW9jxmRL4qbMZj+b1XoePEtht
120 ku4bIQN1X5P07fNWzlgaRL5Z4POXDDZTlIQ/El58j9kp4bnWRCJW0lya+f8ocodo
121 vZZ+Doi+fy4D5ZGrL4XEcIQP/Lv5uFyf+kQtl/94VFYVJOleAv8W92KdgDkhTcTD
122 G7c0tIkVEKNUq48b3aQ64NOZQW7fVjfoKwEZdOqPE72Pa45jrZzvUFxSpdiNk2tZ
123 XYukHjlxxEgBdC/J3cMMNRE1F4NCA3ApfV1Y7/hTeOnmDuDYwr9/obA8t016Yljj
124 q5rdkywPf4JF8mXUW5eCN1vAFHxeg9ZWemhBtQmGxXnw9M+z6hWwc6ahmwARAQAB
125 tCtEb2NrZXIgUmVsZWFzZSAoQ0UgZGViKSA8ZG9ja2VyQGRvY2tlci5jb20+iQI3
126 BBMBCgAhBQJYrefAAhsvBQsJCAcDBRUKCQgLBRYCAwEAAh4BAheAAAoJEI2BgDwO
127 v82IsskP/iQZo68flDQmNvn8X5XTd6RRaUH33kXYXquT6NkHJciS7E2gTJmqvMqd
128 tI4mNYHCSEYxI5qrcYV5YqX9P6+Ko+vozo4nseUQLPH/ATQ4qL0Zok+1jkag3Lgk
129 jonyUf9bwtWxFp05HC3GMHPhhcUSexCxQLQvnFWXD2sWLKivHp2fT8QbRGeZ+d3m
130 6fqcd5Fu7pxsqm0EUDK5NL+nPIgYhN+auTrhgzhK1CShfGccM/wfRlei9Utz6p9P
131 XRKIlWnXtT4qNGZNTN0tR+NLG/6Bqd8OYBaFAUcue/w1VW6JQ2VGYZHnZu9S8LMc
132 FYBa5Ig9PxwGQOgq6RDKDbV+PqTQT5EFMeR1mrjckk4DQJjbxeMZbiNMG5kGECA8
133 g383P3elhn03WGbEEa4MNc3Z4+7c236QI3xWJfNPdUbXRaAwhy/6rTSFbzwKB0Jm
134 ebwzQfwjQY6f55MiI/RqDCyuPj3r3jyVRkK86pQKBAJwFHyqj9KaKXMZjfVnowLh
135 9svIGfNbGHpucATqREvUHuQbNnqkCx8VVhtYkhDb9fEP2xBu5VvHbR+3nfVhMut5
136 G34Ct5RS7Jt6LIfFdtcn8CaSas/l1HbiGeRgc70X/9aYx/V/CEJv0lIe8gP6uDoW
137 FPIZ7d6vH+Vro6xuWEGiuMaiznap2KhZmpkgfupyFmplh0s6knymuQINBFit2ioB
138 EADneL9S9m4vhU3blaRjVUUyJ7b/qTjcSylvCH5XUE6R2k+ckEZjfAMZPLpO+/tF
139 M2JIJMD4SifKuS3xck9KtZGCufGmcwiLQRzeHF7vJUKrLD5RTkNi23ydvWZgPjtx
140 Q+DTT1Zcn7BrQFY6FgnRoUVIxwtdw1bMY/89rsFgS5wwuMESd3Q2RYgb7EOFOpnu
141 w6da7WakWf4IhnF5nsNYGDVaIHzpiqCl+uTbf1epCjrOlIzkZ3Z3Yk5CM/TiFzPk
142 z2lLz89cpD8U+NtCsfagWWfjd2U3jDapgH+7nQnCEWpROtzaKHG6lA3pXdix5zG8
143 eRc6/0IbUSWvfjKxLLPfNeCS2pCL3IeEI5nothEEYdQH6szpLog79xB9dVnJyKJb
144 VfxXnseoYqVrRz2VVbUI5Blwm6B40E3eGVfUQWiux54DspyVMMk41Mx7QJ3iynIa
145 1N4ZAqVMAEruyXTRTxc9XW0tYhDMA/1GYvz0EmFpm8LzTHA6sFVtPm/ZlNCX6P1X
146 zJwrv7DSQKD6GGlBQUX+OeEJ8tTkkf8QTJSPUdh8P8YxDFS5EOGAvhhpMBYD42kQ
147 pqXjEC+XcycTvGI7impgv9PDY1RCC1zkBjKPa120rNhv/hkVk/YhuGoajoHyy4h7
148 ZQopdcMtpN2dgmhEegny9JCSwxfQmQ0zK0g7m6SHiKMwjwARAQABiQQ+BBgBCAAJ
149 BQJYrdoqAhsCAikJEI2BgDwOv82IwV0gBBkBCAAGBQJYrdoqAAoJEH6gqcPyc/zY
150 1WAP/2wJ+R0gE6qsce3rjaIz58PJmc8goKrir5hnElWhPgbq7cYIsW5qiFyLhkdp
151 YcMmhD9mRiPpQn6Ya2w3e3B8zfIVKipbMBnke/ytZ9M7qHmDCcjoiSmwEXN3wKYI
152 mD9VHONsl/CG1rU9Isw1jtB5g1YxuBA7M/m36XN6x2u+NtNMDB9P56yc4gfsZVES
153 KA9v+yY2/l45L8d/WUkUi0YXomn6hyBGI7JrBLq0CX37GEYP6O9rrKipfz73XfO7
154 JIGzOKZlljb/D9RX/g7nRbCn+3EtH7xnk+TK/50euEKw8SMUg147sJTcpQmv6UzZ
155 cM4JgL0HbHVCojV4C/plELwMddALOFeYQzTif6sMRPf+3DSj8frbInjChC3yOLy0
156 6br92KFom17EIj2CAcoeq7UPhi2oouYBwPxh5ytdehJkoo+sN7RIWua6P2WSmon5
157 U888cSylXC0+ADFdgLX9K2zrDVYUG1vo8CX0vzxFBaHwN6Px26fhIT1/hYUHQR1z
158 VfNDcyQmXqkOnZvvoMfz/Q0s9BhFJ/zU6AgQbIZE/hm1spsfgvtsD1frZfygXJ9f
159 irP+MSAI80xHSf91qSRZOj4Pl3ZJNbq4yYxv0b1pkMqeGdjdCYhLU+LZ4wbQmpCk
160 SVe2prlLureigXtmZfkqevRz7FrIZiu9ky8wnCAPwC7/zmS18rgP/17bOtL4/iIz
161 QhxAAoAMWVrGyJivSkjhSGx1uCojsWfsTAm11P7jsruIL61ZzMUVE2aM3Pmj5G+W
162 9AcZ58Em+1WsVnAXdUR//bMmhyr8wL/G1YO1V3JEJTRdxsSxdYa4deGBBY/Adpsw
163 24jxhOJR+lsJpqIUeb999+R8euDhRHG9eFO7DRu6weatUJ6suupoDTRWtr/4yGqe
164 dKxV3qQhNLSnaAzqW/1nA3iUB4k7kCaKZxhdhDbClf9P37qaRW467BLCVO/coL3y
165 Vm50dwdrNtKpMBh3ZpbB1uJvgi9mXtyBOMJ3v8RZeDzFiG8HdCtg9RvIt/AIFoHR
166 H3S+U79NT6i0KPzLImDfs8T7RlpyuMc4Ufs8ggyg9v3Ae6cN3eQyxcK3w0cbBwsh
167 /nQNfsA6uu+9H7NhbehBMhYnpNZyrHzCmzyXkauwRAqoCbGCNykTRwsur9gS41TQ
168 M8ssD1jFheOJf3hODnkKU+HKjvMROl1DK7zdmLdNzA1cvtZH/nCC9KPj1z8QC47S
169 xx+dTZSx4ONAhwbS/LN3PoKtn8LPjY9NP9uDWI+TWYquS2U+KHDrBDlsgozDbs/O
170 jCxcpDzNmXpWQHEtHU7649OXHP7UeNST1mCUCH5qdank0V1iejF6/CfTFU4MfcrG
171 YT90qFF93M3v01BbxP+EIY2/9tiIPbrd
172 =0YYh
173 -----END PGP PUBLIC KEY BLOCK-----
174 EOF
175
176 sudo apt-key add docker-apt-key
177
178 if [ "$DEBIAN_VERSION" = "9.8" ]; then
179 cat << EOF | sudo tee -a /etc/apt/sources.list
180 # Need backports for clang-format-6.0
181 deb http://deb.debian.org/debian stretch-backports main
182
183 # Sources are useful if we want to compile things locally.
184 deb-src http://deb.debian.org/debian stretch main
185 deb-src http://security.debian.org/debian-security stretch/updates main
186 deb-src http://deb.debian.org/debian stretch-updates main
187 deb-src http://deb.debian.org/debian stretch-backports main
188
189 deb [arch=amd64] https://download.docker.com/linux/debian stretch stable
190 EOF
191
192 elif [ "$DISTRO" = "Ubuntu" ]; then
193 cat << EOF | sudo tee -a /etc/apt/sources.list
194 deb [arch=amd64] https://download.docker.com/linux/ubuntu $LSB_RELEASE stable
195 EOF
196
197 fi
198
199 sudo apt-get update
200
201 PACKAGES="\
202 btrfs-progs \
203 build-essential \
204 bzr \
205 clang-format-6.0 \
206 cvs \
207 darcs \
208 debhelper \
209 devscripts \
210 dpkg-dev \
211 dstat \
212 emacs \
213 gettext \
214 git \
215 htop \
216 iotop \
217 jfsutils \
218 libbz2-dev \
219 libexpat1-dev \
220 libffi-dev \
221 libgdbm-dev \
222 liblzma-dev \
223 libncurses5-dev \
224 libnss3-dev \
225 libreadline-dev \
226 libsqlite3-dev \
227 libssl-dev \
228 netbase \
229 ntfs-3g \
230 nvme-cli \
231 pyflakes \
232 pyflakes3 \
233 pylint \
234 pylint3 \
235 python-all-dev \
236 python-dev \
237 python-docutils \
238 python-fuzzywuzzy \
239 python-pygments \
240 python-subversion \
241 python-vcr \
242 python3-dev \
243 python3-docutils \
244 python3-fuzzywuzzy \
245 python3-pygments \
246 python3-vcr \
247 rsync \
248 sqlite3 \
249 subversion \
250 tcl-dev \
251 tk-dev \
252 tla \
253 unzip \
254 uuid-dev \
255 vim \
256 virtualenv \
257 wget \
258 xfsprogs \
259 zip \
260 zlib1g-dev"
261
262 if [ "$DEBIAN_VERSION" = "9.8" ]; then
263 PACKAGES="$PACKAGES linux-perf"
264 elif [ "$DISTRO" = "Ubuntu" ]; then
265 PACKAGES="$PACKAGES linux-tools-common"
266 fi
267
268 # Ubuntu 19.04 removes monotone.
269 if [ "$LSB_RELEASE" != "disco" ]; then
270 PACKAGES="$PACKAGES monotone"
271 fi
272
273 # As of April 27, 2019, Docker hasn't published packages for
274 # Ubuntu 19.04 yet.
275 if [ "$LSB_RELEASE" != "disco" ]; then
276 PACKAGES="$PACKAGES docker-ce"
277 fi
278
279 sudo DEBIAN_FRONTEND=noninteractive apt-get -yq install --no-install-recommends $PACKAGES
280
281 # Create clang-format symlink so test harness finds it.
282 sudo update-alternatives --install /usr/bin/clang-format clang-format \
283 /usr/bin/clang-format-6.0 1000
284
285 sudo mkdir /hgdev
286 # Will be normalized to hg:hg later.
287 sudo chown `whoami` /hgdev
288
289 cp requirements-py2.txt /hgdev/requirements-py2.txt
290 cp requirements-py3.txt /hgdev/requirements-py3.txt
291
292 # Disable the pip version check because it uses the network and can
293 # be annoying.
294 cat << EOF | sudo tee -a /etc/pip.conf
295 [global]
296 disable-pip-version-check = True
297 EOF
298
299 {install_pythons}
300 {bootstrap_virtualenv}
301
302 /hgdev/venv-bootstrap/bin/hg clone https://www.mercurial-scm.org/repo/hg /hgdev/src
303
304 # Mark the repo as non-publishing.
305 cat >> /hgdev/src/.hg/hgrc << EOF
306 [phases]
307 publish = false
308 EOF
309
310 sudo chown -R hg:hg /hgdev
311 '''.lstrip().format(
312 install_pythons=INSTALL_PYTHONS,
313 bootstrap_virtualenv=BOOTSTRAP_VIRTUALENV
314 ).replace('\r\n', '\n')
315
316
317 # Prepares /hgdev for operations.
318 PREPARE_HGDEV = '''
319 #!/bin/bash
320
321 set -e
322
323 FS=$1
324
325 ensure_device() {
326 if [ -z "${DEVICE}" ]; then
327 echo "could not find block device to format"
328 exit 1
329 fi
330 }
331
332 # Determine device to partition for extra filesystem.
333 # If only 1 volume is present, it will be the root volume and
334 # should be /dev/nvme0. If multiple volumes are present, the
335 # root volume could be nvme0 or nvme1. Use whichever one doesn't have
336 # a partition.
337 if [ -e /dev/nvme1n1 ]; then
338 if [ -e /dev/nvme0n1p1 ]; then
339 DEVICE=/dev/nvme1n1
340 else
341 DEVICE=/dev/nvme0n1
342 fi
343 else
344 DEVICE=
345 fi
346
347 sudo mkdir /hgwork
348
349 if [ "${FS}" != "default" -a "${FS}" != "tmpfs" ]; then
350 ensure_device
351 echo "creating ${FS} filesystem on ${DEVICE}"
352 fi
353
354 if [ "${FS}" = "default" ]; then
355 :
356
357 elif [ "${FS}" = "btrfs" ]; then
358 sudo mkfs.btrfs ${DEVICE}
359 sudo mount ${DEVICE} /hgwork
360
361 elif [ "${FS}" = "ext3" ]; then
362 # lazy_journal_init speeds up filesystem creation at the expense of
363 # integrity if things crash. We are an ephemeral instance, so we don't
364 # care about integrity.
365 sudo mkfs.ext3 -E lazy_journal_init=1 ${DEVICE}
366 sudo mount ${DEVICE} /hgwork
367
368 elif [ "${FS}" = "ext4" ]; then
369 sudo mkfs.ext4 -E lazy_journal_init=1 ${DEVICE}
370 sudo mount ${DEVICE} /hgwork
371
372 elif [ "${FS}" = "jfs" ]; then
373 sudo mkfs.jfs ${DEVICE}
374 sudo mount ${DEVICE} /hgwork
375
376 elif [ "${FS}" = "tmpfs" ]; then
377 echo "creating tmpfs volume in /hgwork"
378 sudo mount -t tmpfs -o size=1024M tmpfs /hgwork
379
380 elif [ "${FS}" = "xfs" ]; then
381 sudo mkfs.xfs ${DEVICE}
382 sudo mount ${DEVICE} /hgwork
383
384 else
385 echo "unsupported filesystem: ${FS}"
386 exit 1
387 fi
388
389 echo "/hgwork ready"
390
391 sudo chown hg:hg /hgwork
392 mkdir /hgwork/tmp
393 chown hg:hg /hgwork/tmp
394
395 rsync -a /hgdev/src /hgwork/
396 '''.lstrip().replace('\r\n', '\n')
397
398
399 HG_UPDATE_CLEAN = '''
400 set -ex
401
402 HG=/hgdev/venv-bootstrap/bin/hg
403
404 cd /hgwork/src
405 ${HG} --config extensions.purge= purge --all
406 ${HG} update -C $1
407 ${HG} log -r .
408 '''.lstrip().replace('\r\n', '\n')
409
410
411 def prepare_exec_environment(ssh_client, filesystem='default'):
412 """Prepare an EC2 instance to execute things.
413
414 The AMI has an ``/hgdev`` bootstrapped with various Python installs
415 and a clone of the Mercurial repo.
416
417 In EC2, EBS volumes launched from snapshots have wonky performance behavior.
418 Notably, blocks have to be copied on first access, which makes volume
419 I/O extremely slow on fresh volumes.
420
421 Furthermore, we may want to run operations, tests, etc on alternative
422 filesystems so we examine behavior on different filesystems.
423
424 This function is used to facilitate executing operations on alternate
425 volumes.
426 """
427 sftp = ssh_client.open_sftp()
428
429 with sftp.open('/hgdev/prepare-hgdev', 'wb') as fh:
430 fh.write(PREPARE_HGDEV)
431 fh.chmod(0o0777)
432
433 command = 'sudo /hgdev/prepare-hgdev %s' % filesystem
434 chan, stdin, stdout = exec_command(ssh_client, command)
435 stdin.close()
436
437 for line in stdout:
438 print(line, end='')
439
440 res = chan.recv_exit_status()
441
442 if res:
443 raise Exception('non-0 exit code updating working directory; %d'
444 % res)
445
446
447 def synchronize_hg(source_path: pathlib.Path, ec2_instance, revision: str=None):
448 """Synchronize a local Mercurial source path to remote EC2 instance."""
449
450 with tempfile.TemporaryDirectory() as temp_dir:
451 temp_dir = pathlib.Path(temp_dir)
452
453 ssh_dir = temp_dir / '.ssh'
454 ssh_dir.mkdir()
455 ssh_dir.chmod(0o0700)
456
457 public_ip = ec2_instance.public_ip_address
458
459 ssh_config = ssh_dir / 'config'
460
461 with ssh_config.open('w', encoding='utf-8') as fh:
462 fh.write('Host %s\n' % public_ip)
463 fh.write(' User hg\n')
464 fh.write(' StrictHostKeyChecking no\n')
465 fh.write(' UserKnownHostsFile %s\n' % (ssh_dir / 'known_hosts'))
466 fh.write(' IdentityFile %s\n' % ec2_instance.ssh_private_key_path)
467
468 if not (source_path / '.hg').is_dir():
469 raise Exception('%s is not a Mercurial repository; synchronization '
470 'not yet supported' % source_path)
471
472 env = dict(os.environ)
473 env['HGPLAIN'] = '1'
474 env['HGENCODING'] = 'utf-8'
475
476 hg_bin = source_path / 'hg'
477
478 res = subprocess.run(
479 ['python2.7', str(hg_bin), 'log', '-r', revision, '-T', '{node}'],
480 cwd=str(source_path), env=env, check=True, capture_output=True)
481
482 full_revision = res.stdout.decode('ascii')
483
484 args = [
485 'python2.7', str(hg_bin),
486 '--config', 'ui.ssh=ssh -F %s' % ssh_config,
487 '--config', 'ui.remotecmd=/hgdev/venv-bootstrap/bin/hg',
488 'push', '-f', '-r', full_revision,
489 'ssh://%s//hgwork/src' % public_ip,
490 ]
491
492 subprocess.run(args, cwd=str(source_path), env=env, check=True)
493
494 # TODO support synchronizing dirty working directory.
495
496 sftp = ec2_instance.ssh_client.open_sftp()
497
498 with sftp.open('/hgdev/hgup', 'wb') as fh:
499 fh.write(HG_UPDATE_CLEAN)
500 fh.chmod(0o0700)
501
502 chan, stdin, stdout = exec_command(
503 ec2_instance.ssh_client, '/hgdev/hgup %s' % full_revision)
504 stdin.close()
505
506 for line in stdout:
507 print(line, end='')
508
509 res = chan.recv_exit_status()
510
511 if res:
512 raise Exception('non-0 exit code updating working directory; %d'
513 % res)
514
515
516 def run_tests(ssh_client, python_version, test_flags=None):
517 """Run tests on a remote Linux machine via an SSH client."""
518 test_flags = test_flags or []
519
520 print('running tests')
521
522 if python_version == 'system2':
523 python = '/usr/bin/python2'
524 elif python_version == 'system3':
525 python = '/usr/bin/python3'
526 elif python_version.startswith('pypy'):
527 python = '/hgdev/pyenv/shims/%s' % python_version
528 else:
529 python = '/hgdev/pyenv/shims/python%s' % python_version
530
531 test_flags = ' '.join(shlex.quote(a) for a in test_flags)
532
533 command = (
534 '/bin/sh -c "export TMPDIR=/hgwork/tmp; '
535 'cd /hgwork/src/tests && %s run-tests.py %s"' % (
536 python, test_flags))
537
538 chan, stdin, stdout = exec_command(ssh_client, command)
539
540 stdin.close()
541
542 for line in stdout:
543 print(line, end='')
544
545 return chan.recv_exit_status()
@@ -0,0 +1,67 b''
1 # ssh.py - Interact with remote SSH servers
2 #
3 # Copyright 2019 Gregory Szorc <gregory.szorc@gmail.com>
4 #
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
7
8 # no-check-code because Python 3 native.
9
10 import socket
11 import time
12 import warnings
13
14 from cryptography.utils import (
15 CryptographyDeprecationWarning,
16 )
17 import paramiko
18
19
20 def wait_for_ssh(hostname, port, timeout=60, username=None, key_filename=None):
21 """Wait for an SSH server to start on the specified host and port."""
22 class IgnoreHostKeyPolicy(paramiko.MissingHostKeyPolicy):
23 def missing_host_key(self, client, hostname, key):
24 return
25
26 end_time = time.time() + timeout
27
28 # paramiko triggers a CryptographyDeprecationWarning in the cryptography
29 # package. Let's suppress
30 with warnings.catch_warnings():
31 warnings.filterwarnings('ignore',
32 category=CryptographyDeprecationWarning)
33
34 while True:
35 client = paramiko.SSHClient()
36 client.set_missing_host_key_policy(IgnoreHostKeyPolicy())
37 try:
38 client.connect(hostname, port=port, username=username,
39 key_filename=key_filename,
40 timeout=5.0, allow_agent=False,
41 look_for_keys=False)
42
43 return client
44 except socket.error:
45 pass
46 except paramiko.AuthenticationException:
47 raise
48 except paramiko.SSHException:
49 pass
50
51 if time.time() >= end_time:
52 raise Exception('Timeout reached waiting for SSH')
53
54 time.sleep(1.0)
55
56
57 def exec_command(client, command):
58 """exec_command wrapper that combines stderr/stdout and returns channel"""
59 chan = client.get_transport().open_session()
60
61 chan.exec_command(command)
62 chan.set_combine_stderr(True)
63
64 stdin = chan.makefile('wb', -1)
65 stdout = chan.makefile('r', -1)
66
67 return chan, stdin, stdout
@@ -0,0 +1,130 b''
1 #
2 # This file is autogenerated by pip-compile
3 # To update, run:
4 #
5 # pip-compile -U --generate-hashes --output-file contrib/automation/linux-requirements-py2.txt contrib/automation/linux-requirements.txt.in
6 #
7 astroid==1.6.6 \
8 --hash=sha256:87de48a92e29cedf7210ffa853d11441e7ad94cb47bacd91b023499b51cbc756 \
9 --hash=sha256:d25869fc7f44f1d9fb7d24fd7ea0639656f5355fc3089cd1f3d18c6ec6b124c7 \
10 # via pylint
11 backports.functools-lru-cache==1.5 \
12 --hash=sha256:9d98697f088eb1b0fa451391f91afb5e3ebde16bbdb272819fd091151fda4f1a \
13 --hash=sha256:f0b0e4eba956de51238e17573b7087e852dfe9854afd2e9c873f73fc0ca0a6dd \
14 # via astroid, isort, pylint
15 bzr==2.7.0 ; python_version <= "2.7" and platform_python_implementation == "CPython" \
16 --hash=sha256:c9f6bbe0a50201dadc5fddadd94ba50174193c6cf6e39e16f6dd0ad98a1df338
17 configparser==3.7.4 \
18 --hash=sha256:8be81d89d6e7b4c0d4e44bcc525845f6da25821de80cb5e06e7e0238a2899e32 \
19 --hash=sha256:da60d0014fd8c55eb48c1c5354352e363e2d30bbf7057e5e171a468390184c75 \
20 # via pylint
21 contextlib2==0.5.5 \
22 --hash=sha256:509f9419ee91cdd00ba34443217d5ca51f5a364a404e1dce9e8979cea969ca48 \
23 --hash=sha256:f5260a6e679d2ff42ec91ec5252f4eeffdcf21053db9113bd0a8e4d953769c00 \
24 # via vcrpy
25 docutils==0.14 \
26 --hash=sha256:02aec4bd92ab067f6ff27a38a38a41173bf01bed8f89157768c1573f53e474a6 \
27 --hash=sha256:51e64ef2ebfb29cae1faa133b3710143496eca21c530f3f71424d77687764274 \
28 --hash=sha256:7a4bd47eaf6596e1295ecb11361139febe29b084a87bf005bf899f9a42edc3c6
29 enum34==1.1.6 \
30 --hash=sha256:2d81cbbe0e73112bdfe6ef8576f2238f2ba27dd0d55752a776c41d38b7da2850 \
31 --hash=sha256:644837f692e5f550741432dd3f223bbb9852018674981b1664e5dc339387588a \
32 --hash=sha256:6bd0f6ad48ec2aa117d3d141940d484deccda84d4fcd884f5c3d93c23ecd8c79 \
33 --hash=sha256:8ad8c4783bf61ded74527bffb48ed9b54166685e4230386a9ed9b1279e2df5b1 \
34 # via astroid
35 funcsigs==1.0.2 \
36 --hash=sha256:330cc27ccbf7f1e992e69fef78261dc7c6569012cf397db8d3de0234e6c937ca \
37 --hash=sha256:a7bb0f2cf3a3fd1ab2732cb49eba4252c2af4240442415b4abce3b87022a8f50 \
38 # via mock
39 futures==3.2.0 \
40 --hash=sha256:9ec02aa7d674acb8618afb127e27fde7fc68994c0437ad759fa094a574adb265 \
41 --hash=sha256:ec0a6cb848cc212002b9828c3e34c675e0c9ff6741dc445cab6fdd4e1085d1f1 \
42 # via isort
43 fuzzywuzzy==0.17.0 \
44 --hash=sha256:5ac7c0b3f4658d2743aa17da53a55598144edbc5bee3c6863840636e6926f254 \
45 --hash=sha256:6f49de47db00e1c71d40ad16da42284ac357936fa9b66bea1df63fed07122d62
46 isort==4.3.17 \
47 --hash=sha256:01cb7e1ca5e6c5b3f235f0385057f70558b70d2f00320208825fa62887292f43 \
48 --hash=sha256:268067462aed7eb2a1e237fcb287852f22077de3fb07964e87e00f829eea2d1a \
49 # via pylint
50 lazy-object-proxy==1.3.1 \
51 --hash=sha256:0ce34342b419bd8f018e6666bfef729aec3edf62345a53b537a4dcc115746a33 \
52 --hash=sha256:1b668120716eb7ee21d8a38815e5eb3bb8211117d9a90b0f8e21722c0758cc39 \
53 --hash=sha256:209615b0fe4624d79e50220ce3310ca1a9445fd8e6d3572a896e7f9146bbf019 \
54 --hash=sha256:27bf62cb2b1a2068d443ff7097ee33393f8483b570b475db8ebf7e1cba64f088 \
55 --hash=sha256:27ea6fd1c02dcc78172a82fc37fcc0992a94e4cecf53cb6d73f11749825bd98b \
56 --hash=sha256:2c1b21b44ac9beb0fc848d3993924147ba45c4ebc24be19825e57aabbe74a99e \
57 --hash=sha256:2df72ab12046a3496a92476020a1a0abf78b2a7db9ff4dc2036b8dd980203ae6 \
58 --hash=sha256:320ffd3de9699d3892048baee45ebfbbf9388a7d65d832d7e580243ade426d2b \
59 --hash=sha256:50e3b9a464d5d08cc5227413db0d1c4707b6172e4d4d915c1c70e4de0bbff1f5 \
60 --hash=sha256:5276db7ff62bb7b52f77f1f51ed58850e315154249aceb42e7f4c611f0f847ff \
61 --hash=sha256:61a6cf00dcb1a7f0c773ed4acc509cb636af2d6337a08f362413c76b2b47a8dd \
62 --hash=sha256:6ae6c4cb59f199d8827c5a07546b2ab7e85d262acaccaacd49b62f53f7c456f7 \
63 --hash=sha256:7661d401d60d8bf15bb5da39e4dd72f5d764c5aff5a86ef52a042506e3e970ff \
64 --hash=sha256:7bd527f36a605c914efca5d3d014170b2cb184723e423d26b1fb2fd9108e264d \
65 --hash=sha256:7cb54db3535c8686ea12e9535eb087d32421184eacc6939ef15ef50f83a5e7e2 \
66 --hash=sha256:7f3a2d740291f7f2c111d86a1c4851b70fb000a6c8883a59660d95ad57b9df35 \
67 --hash=sha256:81304b7d8e9c824d058087dcb89144842c8e0dea6d281c031f59f0acf66963d4 \
68 --hash=sha256:933947e8b4fbe617a51528b09851685138b49d511af0b6c0da2539115d6d4514 \
69 --hash=sha256:94223d7f060301b3a8c09c9b3bc3294b56b2188e7d8179c762a1cda72c979252 \
70 --hash=sha256:ab3ca49afcb47058393b0122428358d2fbe0408cf99f1b58b295cfeb4ed39109 \
71 --hash=sha256:bd6292f565ca46dee4e737ebcc20742e3b5be2b01556dafe169f6c65d088875f \
72 --hash=sha256:cb924aa3e4a3fb644d0c463cad5bc2572649a6a3f68a7f8e4fbe44aaa6d77e4c \
73 --hash=sha256:d0fc7a286feac9077ec52a927fc9fe8fe2fabab95426722be4c953c9a8bede92 \
74 --hash=sha256:ddc34786490a6e4ec0a855d401034cbd1242ef186c20d79d2166d6a4bd449577 \
75 --hash=sha256:e34b155e36fa9da7e1b7c738ed7767fc9491a62ec6af70fe9da4a057759edc2d \
76 --hash=sha256:e5b9e8f6bda48460b7b143c3821b21b452cb3a835e6bbd5dd33aa0c8d3f5137d \
77 --hash=sha256:e81ebf6c5ee9684be8f2c87563880f93eedd56dd2b6146d8a725b50b7e5adb0f \
78 --hash=sha256:eb91be369f945f10d3a49f5f9be8b3d0b93a4c2be8f8a5b83b0571b8123e0a7a \
79 --hash=sha256:f460d1ceb0e4a5dcb2a652db0904224f367c9b3c1470d5a7683c0480e582468b \
80 # via astroid
81 mccabe==0.6.1 \
82 --hash=sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42 \
83 --hash=sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f \
84 # via pylint
85 mock==2.0.0 \
86 --hash=sha256:5ce3c71c5545b472da17b72268978914d0252980348636840bd34a00b5cc96c1 \
87 --hash=sha256:b158b6df76edd239b8208d481dc46b6afd45a846b7812ff0ce58971cf5bc8bba \
88 # via vcrpy
89 pbr==5.1.3 \
90 --hash=sha256:8257baf496c8522437e8a6cfe0f15e00aedc6c0e0e7c9d55eeeeab31e0853843 \
91 --hash=sha256:8c361cc353d988e4f5b998555c88098b9d5964c2e11acf7b0d21925a66bb5824 \
92 # via mock
93 pyflakes==2.1.1 \
94 --hash=sha256:17dbeb2e3f4d772725c777fabc446d5634d1038f234e77343108ce445ea69ce0 \
95 --hash=sha256:d976835886f8c5b31d47970ed689944a0262b5f3afa00a5a7b4dc81e5449f8a2
96 pygments==2.3.1 \
97 --hash=sha256:5ffada19f6203563680669ee7f53b64dabbeb100eb51b61996085e99c03b284a \
98 --hash=sha256:e8218dd399a61674745138520d0d4cf2621d7e032439341bc3f647bff125818d
99 pylint==1.9.4 \
100 --hash=sha256:02c2b6d268695a8b64ad61847f92e611e6afcff33fd26c3a2125370c4662905d \
101 --hash=sha256:ee1e85575587c5b58ddafa25e1c1b01691ef172e139fc25585e5d3f02451da93
102 python-levenshtein==0.12.0 \
103 --hash=sha256:033a11de5e3d19ea25c9302d11224e1a1898fe5abd23c61c7c360c25195e3eb1
104 pyyaml==5.1 \
105 --hash=sha256:1adecc22f88d38052fb787d959f003811ca858b799590a5eaa70e63dca50308c \
106 --hash=sha256:436bc774ecf7c103814098159fbb84c2715d25980175292c648f2da143909f95 \
107 --hash=sha256:460a5a4248763f6f37ea225d19d5c205677d8d525f6a83357ca622ed541830c2 \
108 --hash=sha256:5a22a9c84653debfbf198d02fe592c176ea548cccce47553f35f466e15cf2fd4 \
109 --hash=sha256:7a5d3f26b89d688db27822343dfa25c599627bc92093e788956372285c6298ad \
110 --hash=sha256:9372b04a02080752d9e6f990179a4ab840227c6e2ce15b95e1278456664cf2ba \
111 --hash=sha256:a5dcbebee834eaddf3fa7366316b880ff4062e4bcc9787b78c7fbb4a26ff2dd1 \
112 --hash=sha256:aee5bab92a176e7cd034e57f46e9df9a9862a71f8f37cad167c6fc74c65f5b4e \
113 --hash=sha256:c51f642898c0bacd335fc119da60baae0824f2cde95b0330b56c0553439f0673 \
114 --hash=sha256:c68ea4d3ba1705da1e0d85da6684ac657912679a649e8868bd850d2c299cce13 \
115 --hash=sha256:e23d0cc5299223dcc37885dae624f382297717e459ea24053709675a976a3e19 \
116 # via vcrpy
117 singledispatch==3.4.0.3 \
118 --hash=sha256:5b06af87df13818d14f08a028e42f566640aef80805c3b50c5056b086e3c2b9c \
119 --hash=sha256:833b46966687b3de7f438c761ac475213e53b306740f1abfaa86e1d1aae56aa8 \
120 # via astroid, pylint
121 six==1.12.0 \
122 --hash=sha256:3350809f0555b11f552448330d0b52d5f24c91a322ea4a15ef22629740f3761c \
123 --hash=sha256:d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73 \
124 # via astroid, mock, pylint, singledispatch, vcrpy
125 vcrpy==2.0.1 \
126 --hash=sha256:127e79cf7b569d071d1bd761b83f7b62b2ce2a2eb63ceca7aa67cba8f2602ea3 \
127 --hash=sha256:57be64aa8e9883a4117d0b15de28af62275c001abcdb00b6dc2d4406073d9a4f
128 wrapt==1.11.1 \
129 --hash=sha256:4aea003270831cceb8a90ff27c4031da6ead7ec1886023b80ce0dfe0adf61533 \
130 # via astroid, vcrpy
@@ -0,0 +1,159 b''
1 #
2 # This file is autogenerated by pip-compile
3 # To update, run:
4 #
5 # pip-compile -U --generate-hashes --output-file contrib/automation/linux-requirements-py3.txt contrib/automation/linux-requirements.txt.in
6 #
7 astroid==2.2.5 \
8 --hash=sha256:6560e1e1749f68c64a4b5dee4e091fce798d2f0d84ebe638cf0e0585a343acf4 \
9 --hash=sha256:b65db1bbaac9f9f4d190199bb8680af6f6f84fd3769a5ea883df8a91fe68b4c4 \
10 # via pylint
11 docutils==0.14 \
12 --hash=sha256:02aec4bd92ab067f6ff27a38a38a41173bf01bed8f89157768c1573f53e474a6 \
13 --hash=sha256:51e64ef2ebfb29cae1faa133b3710143496eca21c530f3f71424d77687764274 \
14 --hash=sha256:7a4bd47eaf6596e1295ecb11361139febe29b084a87bf005bf899f9a42edc3c6
15 fuzzywuzzy==0.17.0 \
16 --hash=sha256:5ac7c0b3f4658d2743aa17da53a55598144edbc5bee3c6863840636e6926f254 \
17 --hash=sha256:6f49de47db00e1c71d40ad16da42284ac357936fa9b66bea1df63fed07122d62
18 idna==2.8 \
19 --hash=sha256:c357b3f628cf53ae2c4c05627ecc484553142ca23264e593d327bcde5e9c3407 \
20 --hash=sha256:ea8b7f6188e6fa117537c3df7da9fc686d485087abf6ac197f9c46432f7e4a3c \
21 # via yarl
22 isort==4.3.17 \
23 --hash=sha256:01cb7e1ca5e6c5b3f235f0385057f70558b70d2f00320208825fa62887292f43 \
24 --hash=sha256:268067462aed7eb2a1e237fcb287852f22077de3fb07964e87e00f829eea2d1a \
25 # via pylint
26 lazy-object-proxy==1.3.1 \
27 --hash=sha256:0ce34342b419bd8f018e6666bfef729aec3edf62345a53b537a4dcc115746a33 \
28 --hash=sha256:1b668120716eb7ee21d8a38815e5eb3bb8211117d9a90b0f8e21722c0758cc39 \
29 --hash=sha256:209615b0fe4624d79e50220ce3310ca1a9445fd8e6d3572a896e7f9146bbf019 \
30 --hash=sha256:27bf62cb2b1a2068d443ff7097ee33393f8483b570b475db8ebf7e1cba64f088 \
31 --hash=sha256:27ea6fd1c02dcc78172a82fc37fcc0992a94e4cecf53cb6d73f11749825bd98b \
32 --hash=sha256:2c1b21b44ac9beb0fc848d3993924147ba45c4ebc24be19825e57aabbe74a99e \
33 --hash=sha256:2df72ab12046a3496a92476020a1a0abf78b2a7db9ff4dc2036b8dd980203ae6 \
34 --hash=sha256:320ffd3de9699d3892048baee45ebfbbf9388a7d65d832d7e580243ade426d2b \
35 --hash=sha256:50e3b9a464d5d08cc5227413db0d1c4707b6172e4d4d915c1c70e4de0bbff1f5 \
36 --hash=sha256:5276db7ff62bb7b52f77f1f51ed58850e315154249aceb42e7f4c611f0f847ff \
37 --hash=sha256:61a6cf00dcb1a7f0c773ed4acc509cb636af2d6337a08f362413c76b2b47a8dd \
38 --hash=sha256:6ae6c4cb59f199d8827c5a07546b2ab7e85d262acaccaacd49b62f53f7c456f7 \
39 --hash=sha256:7661d401d60d8bf15bb5da39e4dd72f5d764c5aff5a86ef52a042506e3e970ff \
40 --hash=sha256:7bd527f36a605c914efca5d3d014170b2cb184723e423d26b1fb2fd9108e264d \
41 --hash=sha256:7cb54db3535c8686ea12e9535eb087d32421184eacc6939ef15ef50f83a5e7e2 \
42 --hash=sha256:7f3a2d740291f7f2c111d86a1c4851b70fb000a6c8883a59660d95ad57b9df35 \
43 --hash=sha256:81304b7d8e9c824d058087dcb89144842c8e0dea6d281c031f59f0acf66963d4 \
44 --hash=sha256:933947e8b4fbe617a51528b09851685138b49d511af0b6c0da2539115d6d4514 \
45 --hash=sha256:94223d7f060301b3a8c09c9b3bc3294b56b2188e7d8179c762a1cda72c979252 \
46 --hash=sha256:ab3ca49afcb47058393b0122428358d2fbe0408cf99f1b58b295cfeb4ed39109 \
47 --hash=sha256:bd6292f565ca46dee4e737ebcc20742e3b5be2b01556dafe169f6c65d088875f \
48 --hash=sha256:cb924aa3e4a3fb644d0c463cad5bc2572649a6a3f68a7f8e4fbe44aaa6d77e4c \
49 --hash=sha256:d0fc7a286feac9077ec52a927fc9fe8fe2fabab95426722be4c953c9a8bede92 \
50 --hash=sha256:ddc34786490a6e4ec0a855d401034cbd1242ef186c20d79d2166d6a4bd449577 \
51 --hash=sha256:e34b155e36fa9da7e1b7c738ed7767fc9491a62ec6af70fe9da4a057759edc2d \
52 --hash=sha256:e5b9e8f6bda48460b7b143c3821b21b452cb3a835e6bbd5dd33aa0c8d3f5137d \
53 --hash=sha256:e81ebf6c5ee9684be8f2c87563880f93eedd56dd2b6146d8a725b50b7e5adb0f \
54 --hash=sha256:eb91be369f945f10d3a49f5f9be8b3d0b93a4c2be8f8a5b83b0571b8123e0a7a \
55 --hash=sha256:f460d1ceb0e4a5dcb2a652db0904224f367c9b3c1470d5a7683c0480e582468b \
56 # via astroid
57 mccabe==0.6.1 \
58 --hash=sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42 \
59 --hash=sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f \
60 # via pylint
61 multidict==4.5.2 \
62 --hash=sha256:024b8129695a952ebd93373e45b5d341dbb87c17ce49637b34000093f243dd4f \
63 --hash=sha256:041e9442b11409be5e4fc8b6a97e4bcead758ab1e11768d1e69160bdde18acc3 \
64 --hash=sha256:045b4dd0e5f6121e6f314d81759abd2c257db4634260abcfe0d3f7083c4908ef \
65 --hash=sha256:047c0a04e382ef8bd74b0de01407e8d8632d7d1b4db6f2561106af812a68741b \
66 --hash=sha256:068167c2d7bbeebd359665ac4fff756be5ffac9cda02375b5c5a7c4777038e73 \
67 --hash=sha256:148ff60e0fffa2f5fad2eb25aae7bef23d8f3b8bdaf947a65cdbe84a978092bc \
68 --hash=sha256:1d1c77013a259971a72ddaa83b9f42c80a93ff12df6a4723be99d858fa30bee3 \
69 --hash=sha256:1d48bc124a6b7a55006d97917f695effa9725d05abe8ee78fd60d6588b8344cd \
70 --hash=sha256:31dfa2fc323097f8ad7acd41aa38d7c614dd1960ac6681745b6da124093dc351 \
71 --hash=sha256:34f82db7f80c49f38b032c5abb605c458bac997a6c3142e0d6c130be6fb2b941 \
72 --hash=sha256:3d5dd8e5998fb4ace04789d1d008e2bb532de501218519d70bb672c4c5a2fc5d \
73 --hash=sha256:4a6ae52bd3ee41ee0f3acf4c60ceb3f44e0e3bc52ab7da1c2b2aa6703363a3d1 \
74 --hash=sha256:4b02a3b2a2f01d0490dd39321c74273fed0568568ea0e7ea23e02bd1fb10a10b \
75 --hash=sha256:4b843f8e1dd6a3195679d9838eb4670222e8b8d01bc36c9894d6c3538316fa0a \
76 --hash=sha256:5de53a28f40ef3c4fd57aeab6b590c2c663de87a5af76136ced519923d3efbb3 \
77 --hash=sha256:61b2b33ede821b94fa99ce0b09c9ece049c7067a33b279f343adfe35108a4ea7 \
78 --hash=sha256:6a3a9b0f45fd75dc05d8e93dc21b18fc1670135ec9544d1ad4acbcf6b86781d0 \
79 --hash=sha256:76ad8e4c69dadbb31bad17c16baee61c0d1a4a73bed2590b741b2e1a46d3edd0 \
80 --hash=sha256:7ba19b777dc00194d1b473180d4ca89a054dd18de27d0ee2e42a103ec9b7d014 \
81 --hash=sha256:7c1b7eab7a49aa96f3db1f716f0113a8a2e93c7375dd3d5d21c4941f1405c9c5 \
82 --hash=sha256:7fc0eee3046041387cbace9314926aa48b681202f8897f8bff3809967a049036 \
83 --hash=sha256:8ccd1c5fff1aa1427100ce188557fc31f1e0a383ad8ec42c559aabd4ff08802d \
84 --hash=sha256:8e08dd76de80539d613654915a2f5196dbccc67448df291e69a88712ea21e24a \
85 --hash=sha256:c18498c50c59263841862ea0501da9f2b3659c00db54abfbf823a80787fde8ce \
86 --hash=sha256:c49db89d602c24928e68c0d510f4fcf8989d77defd01c973d6cbe27e684833b1 \
87 --hash=sha256:ce20044d0317649ddbb4e54dab3c1bcc7483c78c27d3f58ab3d0c7e6bc60d26a \
88 --hash=sha256:d1071414dd06ca2eafa90c85a079169bfeb0e5f57fd0b45d44c092546fcd6fd9 \
89 --hash=sha256:d3be11ac43ab1a3e979dac80843b42226d5d3cccd3986f2e03152720a4297cd7 \
90 --hash=sha256:db603a1c235d110c860d5f39988ebc8218ee028f07a7cbc056ba6424372ca31b \
91 # via yarl
92 pyflakes==2.1.1 \
93 --hash=sha256:17dbeb2e3f4d772725c777fabc446d5634d1038f234e77343108ce445ea69ce0 \
94 --hash=sha256:d976835886f8c5b31d47970ed689944a0262b5f3afa00a5a7b4dc81e5449f8a2
95 pygments==2.3.1 \
96 --hash=sha256:5ffada19f6203563680669ee7f53b64dabbeb100eb51b61996085e99c03b284a \
97 --hash=sha256:e8218dd399a61674745138520d0d4cf2621d7e032439341bc3f647bff125818d
98 pylint==2.3.1 \
99 --hash=sha256:5d77031694a5fb97ea95e828c8d10fc770a1df6eb3906067aaed42201a8a6a09 \
100 --hash=sha256:723e3db49555abaf9bf79dc474c6b9e2935ad82230b10c1138a71ea41ac0fff1
101 python-levenshtein==0.12.0 \
102 --hash=sha256:033a11de5e3d19ea25c9302d11224e1a1898fe5abd23c61c7c360c25195e3eb1
103 pyyaml==5.1 \
104 --hash=sha256:1adecc22f88d38052fb787d959f003811ca858b799590a5eaa70e63dca50308c \
105 --hash=sha256:436bc774ecf7c103814098159fbb84c2715d25980175292c648f2da143909f95 \
106 --hash=sha256:460a5a4248763f6f37ea225d19d5c205677d8d525f6a83357ca622ed541830c2 \
107 --hash=sha256:5a22a9c84653debfbf198d02fe592c176ea548cccce47553f35f466e15cf2fd4 \
108 --hash=sha256:7a5d3f26b89d688db27822343dfa25c599627bc92093e788956372285c6298ad \
109 --hash=sha256:9372b04a02080752d9e6f990179a4ab840227c6e2ce15b95e1278456664cf2ba \
110 --hash=sha256:a5dcbebee834eaddf3fa7366316b880ff4062e4bcc9787b78c7fbb4a26ff2dd1 \
111 --hash=sha256:aee5bab92a176e7cd034e57f46e9df9a9862a71f8f37cad167c6fc74c65f5b4e \
112 --hash=sha256:c51f642898c0bacd335fc119da60baae0824f2cde95b0330b56c0553439f0673 \
113 --hash=sha256:c68ea4d3ba1705da1e0d85da6684ac657912679a649e8868bd850d2c299cce13 \
114 --hash=sha256:e23d0cc5299223dcc37885dae624f382297717e459ea24053709675a976a3e19 \
115 # via vcrpy
116 six==1.12.0 \
117 --hash=sha256:3350809f0555b11f552448330d0b52d5f24c91a322ea4a15ef22629740f3761c \
118 --hash=sha256:d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73 \
119 # via astroid, vcrpy
120 typed-ast==1.3.4 ; python_version >= "3.0" and platform_python_implementation != "PyPy" \
121 --hash=sha256:04894d268ba6eab7e093d43107869ad49e7b5ef40d1a94243ea49b352061b200 \
122 --hash=sha256:16616ece19daddc586e499a3d2f560302c11f122b9c692bc216e821ae32aa0d0 \
123 --hash=sha256:252fdae740964b2d3cdfb3f84dcb4d6247a48a6abe2579e8029ab3be3cdc026c \
124 --hash=sha256:2af80a373af123d0b9f44941a46df67ef0ff7a60f95872412a145f4500a7fc99 \
125 --hash=sha256:2c88d0a913229a06282b285f42a31e063c3bf9071ff65c5ea4c12acb6977c6a7 \
126 --hash=sha256:2ea99c029ebd4b5a308d915cc7fb95b8e1201d60b065450d5d26deb65d3f2bc1 \
127 --hash=sha256:3d2e3ab175fc097d2a51c7a0d3fda442f35ebcc93bb1d7bd9b95ad893e44c04d \
128 --hash=sha256:4766dd695548a15ee766927bf883fb90c6ac8321be5a60c141f18628fb7f8da8 \
129 --hash=sha256:56b6978798502ef66625a2e0f80cf923da64e328da8bbe16c1ff928c70c873de \
130 --hash=sha256:5cddb6f8bce14325b2863f9d5ac5c51e07b71b462361fd815d1d7706d3a9d682 \
131 --hash=sha256:644ee788222d81555af543b70a1098f2025db38eaa99226f3a75a6854924d4db \
132 --hash=sha256:64cf762049fc4775efe6b27161467e76d0ba145862802a65eefc8879086fc6f8 \
133 --hash=sha256:68c362848d9fb71d3c3e5f43c09974a0ae319144634e7a47db62f0f2a54a7fa7 \
134 --hash=sha256:6c1f3c6f6635e611d58e467bf4371883568f0de9ccc4606f17048142dec14a1f \
135 --hash=sha256:b213d4a02eec4ddf622f4d2fbc539f062af3788d1f332f028a2e19c42da53f15 \
136 --hash=sha256:bb27d4e7805a7de0e35bd0cb1411bc85f807968b2b0539597a49a23b00a622ae \
137 --hash=sha256:c9d414512eaa417aadae7758bc118868cd2396b0e6138c1dd4fda96679c079d3 \
138 --hash=sha256:f0937165d1e25477b01081c4763d2d9cdc3b18af69cb259dd4f640c9b900fe5e \
139 --hash=sha256:fb96a6e2c11059ecf84e6741a319f93f683e440e341d4489c9b161eca251cf2a \
140 --hash=sha256:fc71d2d6ae56a091a8d94f33ec9d0f2001d1cb1db423d8b4355debfe9ce689b7
141 vcrpy==2.0.1 \
142 --hash=sha256:127e79cf7b569d071d1bd761b83f7b62b2ce2a2eb63ceca7aa67cba8f2602ea3 \
143 --hash=sha256:57be64aa8e9883a4117d0b15de28af62275c001abcdb00b6dc2d4406073d9a4f
144 wrapt==1.11.1 \
145 --hash=sha256:4aea003270831cceb8a90ff27c4031da6ead7ec1886023b80ce0dfe0adf61533 \
146 # via astroid, vcrpy
147 yarl==1.3.0 \
148 --hash=sha256:024ecdc12bc02b321bc66b41327f930d1c2c543fa9a561b39861da9388ba7aa9 \
149 --hash=sha256:2f3010703295fbe1aec51023740871e64bb9664c789cba5a6bdf404e93f7568f \
150 --hash=sha256:3890ab952d508523ef4881457c4099056546593fa05e93da84c7250516e632eb \
151 --hash=sha256:3e2724eb9af5dc41648e5bb304fcf4891adc33258c6e14e2a7414ea32541e320 \
152 --hash=sha256:5badb97dd0abf26623a9982cd448ff12cb39b8e4c94032ccdedf22ce01a64842 \
153 --hash=sha256:73f447d11b530d860ca1e6b582f947688286ad16ca42256413083d13f260b7a0 \
154 --hash=sha256:7ab825726f2940c16d92aaec7d204cfc34ac26c0040da727cf8ba87255a33829 \
155 --hash=sha256:b25de84a8c20540531526dfbb0e2d2b648c13fd5dd126728c496d7c3fea33310 \
156 --hash=sha256:c6e341f5a6562af74ba55205dbd56d248daf1b5748ec48a0200ba227bb9e33f4 \
157 --hash=sha256:c9bb7c249c4432cd47e75af3864bc02d26c9594f49c82e2a28624417f0ae63b8 \
158 --hash=sha256:e060906c0c585565c718d1c3841747b61c5439af2211e185f6739a9412dfbde1 \
159 # via vcrpy
@@ -0,0 +1,12 b''
1 # Bazaar doesn't work with Python 3 nor PyPy.
2 bzr ; python_version <= '2.7' and platform_python_implementation == 'CPython'
3 docutils
4 fuzzywuzzy
5 pyflakes
6 pygments
7 pylint
8 # Needed to avoid warnings from fuzzywuzzy.
9 python-Levenshtein
10 # typed-ast dependency doesn't install on PyPy.
11 typed-ast ; python_version >= '3.0' and platform_python_implementation != 'PyPy'
12 vcrpy
@@ -0,0 +1,195 b''
1 Prior to removing (EXPERIMENTAL)
2 --------------------------------
3
4 These things affect UI and/or behavior, and should probably be implemented (or
5 ruled out) prior to taking off the experimental shrinkwrap.
6
7 #. Finish the `hg convert` story
8
9 * Add an argument to accept a rules file to apply during conversion?
10 Currently `lfs.track` is the only way to affect the conversion.
11 * drop `lfs.track` config settings
12 * splice in `.hglfs` file for normal repo -> lfs conversions?
13
14 #. Stop uploading blobs when pushing between local repos
15
16 * Could probably hardlink directly to the other local repo's store
17 * Support inferring `lfs.url` for local push/pull (currently only supports
18 http)
19
20 #. Stop uploading blobs on strip/amend/histedit/etc.
21
22 * This seems to be a side effect of doing it for `hg bundle`, which probably
23 makes sense.
24
25 #. Handle a server with the extension loaded and a client without the extension
26 more gracefully.
27
28 * `changegroup3` is still experimental, and not enabled by default.
29 * Figure out how to `introduce LFS to the server repo
30 <https://www.mercurial-scm.org/pipermail/mercurial-devel/2018-September/122281.html>`_.
31 See the TODO in test-lfs-serve.t.
32
33 #. Remove `lfs.retry` hack in client? This came from FB, but it's not clear why
34 it is/was needed.
35
36 #. `hg export` currently writes out the LFS blob. Should it write the pointer
37 instead?
38
39 * `hg diff` is similar, and probably shouldn't see the pointer file
40
41 #. `Fix https multiplexing, and re-enable workers
42 <https://www.mercurial-scm.org/pipermail/mercurial-devel/2018-January/109916.html>`_.
43
44 #. Show to-be-applied rules with `hg files -r 'wdir()' 'set:lfs()'`
45
46 * `debugignore` can show file + line number, so a dedicated command could be
47 useful too.
48
49 #. Filesets, revsets and templates
50
51 * A dedicated revset should be faster than `'file(set:lfs())'`
52 * Attach `{lfsoid}` and `{lfspointer}` to `general keywords
53 <https://www.mercurial-scm.org/pipermail/mercurial-devel/2018-January/110251.html>`_,
54 IFF the file is a blob
55 * Drop existing items that would be redundant with general support
56
57 #. Can `grep` avoid downloading most things?
58
59 * Add a command option to skip LFS blobs?
60
61 #. Add a flag that's visible in `hg files -v` to indicate external storage?
62
63 #. Server side issues
64
65 * Check for local disk space before allowing upload. (I've got a patch for
66 this.)
67 * Make sure the http codes used are appropriate.
68 * `Why is copying the Authorization header into the JSON payload necessary
69 <https://www.mercurial-scm.org/pipermail/mercurial-devel/2018-April/116230.html>`_?
70 * `LFS-Authenticate` header support in client and server(?)
71
72 #. Add locks on cache and blob store
73
74 * This is complicated with a global store, and multiple potentially unrelated
75 local repositories that reference the same blob.
76 * Alternately, maybe just handle collisions when trying to create the same
77 blob in the store somehow.
78
79 #. Are proper file sizes reported in `debugupgraderepo`?
80
81 #. Finish prefetching files
82
83 * `-T {data}` (other than cat?)
84 * `verify`
85 * `grep`
86
87 #. Output cleanup
88
89 * Can we print the url when connecting to the blobstore? (A sudden
90 connection refused after pulling commits looks confusing.) Problem is,
91 'pushing to main url' is printed, and then lfs wants to upload before going
92 back to the main repo transfer, so then *that* could be confusing with
93 extra output. (This is kinda improved with 380f5131ee7b and 9f78d10742af.)
94
95 * Add more progress indicators? Uploading a large repo looks idle for a long
96 time while it scans for blobs in each outgoing revision.
97
98 * Print filenames instead of hashes in error messages
99
100 * subrepo aware paths, where necessary
101
102 * Is existing output at the right status/note/debug level?
103
104 #. Can `verify` be done without downloading everything?
105
106 * If we know that we are talking to an hg server, we can leverage the fact
107 that it validates in the Batch API portion, and skip d/l altogether. OTOH,
108 maybe we should download the files unconditionally for forensics. The
109 alternative is to define a custom transfer handler that definitively
110 verifies without transferring, and then cache those results. When verify
111 comes looking, look in the cache instead of actually opening the file and
112 processing it.
113
114 * Yuya has concerns about when blob fetch takes place vs when revlog is
115 verified. Since the visible hash matches the blob content, I don't think
116 there's a way to verify the pointer file that's actually stored in the
117 filelog (other than basic JSON checks). Full verification requires the
118 blob. See
119 https://www.mercurial-scm.org/pipermail/mercurial-devel/2018-April/116133.html
120
121 * Opening a corrupt pointer file aborts. It probably shouldn't for verify.
122
123
124 Future ideas/features/polishing
125 -------------------------------
126
127 These aren't in any particular order, and are things that don't have obvious BC
128 concerns.
129
130 #. Garbage collection `(issue5790) <https://bz.mercurial-scm.org/show_bug.cgi?id=5790>`_
131
132 * This gets complicated because of the global cache, which may or may not
133 consist of hardlinks to the repo, and may be in use by other repos. (So
134 the gc may be pointless.)
135
136 #. `Compress blobs <https://github.com/git-lfs/git-lfs/issues/260>`_
137
138 * 700MB repo becomes 2.5GB with all lfs blobs
139 * What implications are there for filesystem paths that don't indicate
140 compression? (i.e. how to share with global cache and other local repos?)
141 * Probably needs to be stored under `.hg/store/lfs/zstd`, with a repo
142 requirement.
143 * Allow tuneable compression type and settings?
144 * Support compression over the wire if both sides understand the compression?
145 * `debugupgraderepo` to convert?
146 * Probably not worth supporting compressed and uncompressed concurrently
147
148 #. Determine things to upload with `readfast()
149 <https://www.mercurial-scm.org/pipermail/mercurial-devel/2018-August/121315.html>`_
150
151 * Significantly faster when pushing an entire large repo to http.
152 * Causes test changes to fileset and templates; may need both this and
153 current methods of lookup.
154
155 #. Is a command to download everything needed? This would allow copying the
156 whole to a portable drive. Currently this can be effected by running
157 `hg verify`.
158
159 #. Stop reading in entire file into one buffer when passing through filelog
160 interface
161
162 * `Requires major replumbing to core
163 <https://www.mercurial-scm.org/wiki/HandlingLargeFiles>`_
164
165 #. Keep corrupt files around in 'store/lfs/incoming' for forensics?
166
167 * Files should be downloaded to 'incoming', and moved to normal location when
168 done.
169
170 #. Client side path enhancements
171
172 * Support paths.default:lfs = ... style paths
173 * SSH -> https server inference
174
175 * https://www.mercurial-scm.org/pipermail/mercurial-devel/2018-April/115416.html
176 * https://github.com/git-lfs/git-lfs/blob/master/docs/api/server-discovery.md#guessing-the-server
177
178 #. Server enhancements
179
180 * Add support for transfer quotas?
181 * Download should be able to send the file in chunks, without reading the
182 whole thing into memory
183 (https://www.mercurial-scm.org/pipermail/mercurial-devel/2018-March/114584.html)
184 * Support for resuming transfers
185
186 #. Handle 3rd party server storage.
187
188 * Teach client to handle lfs `verify` action. This is needed after the
189 server instructs the client to upload the file to another server, in order
190 to tell the server that the upload completed.
191 * Teach the server to send redirects if configured, and process `verify`
192 requests.
193
194 #. `Is any hg-git work needed
195 <https://groups.google.com/d/msg/hg-git/XYNQuudteeM/ivt8gXoZAAAJ>`_?
@@ -0,0 +1,80 b''
1 The active mergestate is stored in ``.hg/merge`` when a merge is triggered
2 by commands like ``hg merge``, ``hg rebase``, etc. until the merge is
3 completed or aborted to track the 3-way merge state of individual files.
4
5 The contents of the directory are:
6
7 Conflicting files
8 -----------------
9
10 The local version of the conflicting files are stored with their
11 filenames as the hash of their paths.
12
13 state
14 -----
15
16 This mergestate file record is used by hg version prior to 2.9.1
17 and contains less data than ``state2``. If there is no contradiction
18 with ``state2``, we can assume that both are written at the same time.
19 In this case, data from ``state2`` is used. Otherwise, we use ``state``.
20 We read/write both ``state`` and ``state2`` records to ensure backward
21 compatibility.
22
23 state2
24 ------
25
26 This record stores a superset of data in ``state``, including new kinds
27 of records in the future.
28
29 Each record can contain arbitrary content and has an associated type. This
30 `type` should be a letter. If `type` is uppercase, the record is mandatory:
31 versions of Mercurial that don't support it should abort. If `type` is
32 lowercase, the record can be safely ignored.
33
34 Currently known records:
35
36 | * L: the node of the "local" part of the merge (hexified version)
37 | * O: the node of the "other" part of the merge (hexified version)
38 | * F: a file to be merged entry
39 | * C: a change/delete or delete/change conflict
40 | * D: a file that the external merge driver will merge internally
41 | (experimental)
42 | * P: a path conflict (file vs directory)
43 | * m: the external merge driver defined for this merge plus its run state
44 | (experimental)
45 | * f: a (filename, dictionary) tuple of optional values for a given file
46 | * X: unsupported mandatory record type (used in tests)
47 | * x: unsupported advisory record type (used in tests)
48 | * l: the labels for the parts of the merge.
49
50 Merge driver run states (experimental):
51
52 | * u: driver-resolved files unmarked -- needs to be run next time we're
53 | about to resolve or commit
54 | * m: driver-resolved files marked -- only needs to be run before commit
55 | * s: success/skipped -- does not need to be run any more
56
57 Merge record states (indexed by filename):
58
59 | * u: unresolved conflict
60 | * r: resolved conflict
61 | * pu: unresolved path conflict (file conflicts with directory)
62 | * pr: resolved path conflict
63 | * d: driver-resolved conflict
64
65 The resolve command transitions between 'u' and 'r' for conflicts and
66 'pu' and 'pr' for path conflicts.
67
68 This format is a list of arbitrary records of the form:
69
70 [type][length][content]
71
72 `type` is a single character, `length` is a 4 byte integer, and
73 `content` is an arbitrary byte sequence of length `length`.
74
75 Mercurial versions prior to 3.7 have a bug where if there are
76 unsupported mandatory merge records, attempting to clear out the merge
77 state with hg update --clean or similar aborts. The 't' record type
78 works around that by writing out what those versions treat as an
79 advisory record, but later versions interpret as special: the first
80 character is the 'real' record type and everything onwards is the data.
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
@@ -154,3 +154,6 b' roots(-10000:-1)'
154 roots(matching(tip, "author"))
154 roots(matching(tip, "author"))
155 roots(matching(tip, "author")) and -10000:-1
155 roots(matching(tip, "author")) and -10000:-1
156 (-10000:-1) and roots(matching(tip, "author"))
156 (-10000:-1) and roots(matching(tip, "author"))
157 only(max(head()))
158 only(max(head()), min(head()))
159 only(max(head()), limit(head(), 1, 1))
@@ -101,9 +101,14 b' Under normal operation, recurring costs '
101 * Storage costs for AMI / EBS snapshots. This should be just a few pennies
101 * Storage costs for AMI / EBS snapshots. This should be just a few pennies
102 per month.
102 per month.
103
103
104 When running EC2 instances, you'll be billed accordingly. By default, we
104 When running EC2 instances, you'll be billed accordingly. Default instance
105 use *small* instances, like ``t3.medium``. This instance type costs ~$0.07 per
105 types vary by operation. We try to be respectful of your money when choosing
106 hour.
106 defaults. e.g. for Windows instances which are billed per hour, we use e.g.
107 ``t3.medium`` instances, which cost ~$0.07 per hour. For operations that
108 scale well to many CPUs like running Linux tests, we may use a more powerful
109 instance like ``c5.9xlarge``. However, since Linux instances are billed
110 per second and the cost of running an e.g. ``c5.9xlarge`` for half the time
111 of a ``c5.4xlarge`` is roughly the same, the choice is justified.
107
112
108 .. note::
113 .. note::
109
114
@@ -125,3 +130,54 b' To terminate all EC2 instances that we m'
125 To purge all EC2 resources that we manage::
130 To purge all EC2 resources that we manage::
126
131
127 $ automation.py purge-ec2-resources
132 $ automation.py purge-ec2-resources
133
134 Remote Machine Interfaces
135 =========================
136
137 The code that connects to a remote machine and executes things is
138 theoretically machine agnostic as long as the remote machine conforms to
139 an *interface*. In other words, to perform actions like running tests
140 remotely or triggering packaging, it shouldn't matter if the remote machine
141 is an EC2 instance, a virtual machine, etc. This section attempts to document
142 the interface that remote machines need to provide in order to be valid
143 *targets* for remote execution. These interfaces are often not ideal nor
144 the most flexible. Instead, they have often evolved as the requirements of
145 our automation code have evolved.
146
147 Linux
148 -----
149
150 Remote Linux machines expose an SSH server on port 22. The SSH server
151 must allow the ``hg`` user to authenticate using the SSH key generated by
152 the automation code. The ``hg`` user should be part of the ``hg`` group
153 and it should have ``sudo`` access without password prompting.
154
155 The SSH channel must support SFTP to facilitate transferring files from
156 client to server.
157
158 ``/bin/bash`` must be executable and point to a bash shell executable.
159
160 The ``/hgdev`` directory must exist and all its content owned by ``hg::hg``.
161
162 The ``/hgdev/pyenv`` directory should contain an installation of
163 ``pyenv``. Various Python distributions should be installed. The exact
164 versions shouldn't matter. ``pyenv global`` should have been run so
165 ``/hgdev/pyenv/shims/`` is populated with redirector scripts that point
166 to the appropriate Python executable.
167
168 The ``/hgdev/venv-bootstrap`` directory must contain a virtualenv
169 with Mercurial installed. The ``/hgdev/venv-bootstrap/bin/hg`` executable
170 is referenced by various scripts and the client.
171
172 The ``/hgdev/src`` directory MUST contain a clone of the Mercurial
173 source code. The state of the working directory is not important.
174
175 In order to run tests, the ``/hgwork`` directory will be created.
176 This may require running various ``mkfs.*`` executables and ``mount``
177 to provision a new filesystem. This will require elevated privileges
178 via ``sudo``.
179
180 Various dependencies to run the Mercurial test harness are also required.
181 Documenting them is beyond the scope of this document. Various tests
182 also require other optional dependencies and missing dependencies will
183 be printed by the test runner when a test is skipped.
@@ -53,7 +53,7 b' class HGAutomation:'
53
53
54 return password
54 return password
55
55
56 def aws_connection(self, region: str):
56 def aws_connection(self, region: str, ensure_ec2_state: bool=True):
57 """Obtain an AWSConnection instance bound to a specific region."""
57 """Obtain an AWSConnection instance bound to a specific region."""
58
58
59 return AWSConnection(self, region)
59 return AWSConnection(self, region, ensure_ec2_state=ensure_ec2_state)
@@ -19,6 +19,13 b' import time'
19 import boto3
19 import boto3
20 import botocore.exceptions
20 import botocore.exceptions
21
21
22 from .linux import (
23 BOOTSTRAP_DEBIAN,
24 )
25 from .ssh import (
26 exec_command as ssh_exec_command,
27 wait_for_ssh,
28 )
22 from .winrm import (
29 from .winrm import (
23 run_powershell,
30 run_powershell,
24 wait_for_winrm,
31 wait_for_winrm,
@@ -31,12 +38,46 b' INSTALL_WINDOWS_DEPENDENCIES = (SOURCE_R'
31 'install-windows-dependencies.ps1')
38 'install-windows-dependencies.ps1')
32
39
33
40
41 INSTANCE_TYPES_WITH_STORAGE = {
42 'c5d',
43 'd2',
44 'h1',
45 'i3',
46 'm5ad',
47 'm5d',
48 'r5d',
49 'r5ad',
50 'x1',
51 'z1d',
52 }
53
54
55 DEBIAN_ACCOUNT_ID = '379101102735'
56 UBUNTU_ACCOUNT_ID = '099720109477'
57
58
34 KEY_PAIRS = {
59 KEY_PAIRS = {
35 'automation',
60 'automation',
36 }
61 }
37
62
38
63
39 SECURITY_GROUPS = {
64 SECURITY_GROUPS = {
65 'linux-dev-1': {
66 'description': 'Mercurial Linux instances that perform build/test automation',
67 'ingress': [
68 {
69 'FromPort': 22,
70 'ToPort': 22,
71 'IpProtocol': 'tcp',
72 'IpRanges': [
73 {
74 'CidrIp': '0.0.0.0/0',
75 'Description': 'SSH from entire Internet',
76 },
77 ],
78 },
79 ],
80 },
40 'windows-dev-1': {
81 'windows-dev-1': {
41 'description': 'Mercurial Windows instances that perform build automation',
82 'description': 'Mercurial Windows instances that perform build automation',
42 'ingress': [
83 'ingress': [
@@ -180,7 +221,7 b' Install-WindowsFeature -Name Net-Framewo'
180 class AWSConnection:
221 class AWSConnection:
181 """Manages the state of a connection with AWS."""
222 """Manages the state of a connection with AWS."""
182
223
183 def __init__(self, automation, region: str):
224 def __init__(self, automation, region: str, ensure_ec2_state: bool=True):
184 self.automation = automation
225 self.automation = automation
185 self.local_state_path = automation.state_path
226 self.local_state_path = automation.state_path
186
227
@@ -191,11 +232,12 b' class AWSConnection:'
191 self.ec2resource = self.session.resource('ec2')
232 self.ec2resource = self.session.resource('ec2')
192 self.iamclient = self.session.client('iam')
233 self.iamclient = self.session.client('iam')
193 self.iamresource = self.session.resource('iam')
234 self.iamresource = self.session.resource('iam')
194
235 self.security_groups = {}
195 ensure_key_pairs(automation.state_path, self.ec2resource)
196
236
197 self.security_groups = ensure_security_groups(self.ec2resource)
237 if ensure_ec2_state:
198 ensure_iam_state(self.iamresource)
238 ensure_key_pairs(automation.state_path, self.ec2resource)
239 self.security_groups = ensure_security_groups(self.ec2resource)
240 ensure_iam_state(self.iamclient, self.iamresource)
199
241
200 def key_pair_path_private(self, name):
242 def key_pair_path_private(self, name):
201 """Path to a key pair private key file."""
243 """Path to a key pair private key file."""
@@ -324,7 +366,7 b' def delete_instance_profile(profile):'
324 profile.delete()
366 profile.delete()
325
367
326
368
327 def ensure_iam_state(iamresource, prefix='hg-'):
369 def ensure_iam_state(iamclient, iamresource, prefix='hg-'):
328 """Ensure IAM state is in sync with our canonical definition."""
370 """Ensure IAM state is in sync with our canonical definition."""
329
371
330 remote_profiles = {}
372 remote_profiles = {}
@@ -360,6 +402,10 b' def ensure_iam_state(iamresource, prefix'
360 InstanceProfileName=actual)
402 InstanceProfileName=actual)
361 remote_profiles[name] = profile
403 remote_profiles[name] = profile
362
404
405 waiter = iamclient.get_waiter('instance_profile_exists')
406 waiter.wait(InstanceProfileName=actual)
407 print('IAM instance profile %s is available' % actual)
408
363 for name in sorted(set(IAM_ROLES) - set(remote_roles)):
409 for name in sorted(set(IAM_ROLES) - set(remote_roles)):
364 entry = IAM_ROLES[name]
410 entry = IAM_ROLES[name]
365
411
@@ -372,6 +418,10 b' def ensure_iam_state(iamresource, prefix'
372 AssumeRolePolicyDocument=ASSUME_ROLE_POLICY_DOCUMENT,
418 AssumeRolePolicyDocument=ASSUME_ROLE_POLICY_DOCUMENT,
373 )
419 )
374
420
421 waiter = iamclient.get_waiter('role_exists')
422 waiter.wait(RoleName=actual)
423 print('IAM role %s is available' % actual)
424
375 remote_roles[name] = role
425 remote_roles[name] = role
376
426
377 for arn in entry['policy_arns']:
427 for arn in entry['policy_arns']:
@@ -393,14 +443,14 b' def ensure_iam_state(iamresource, prefix'
393 profile.add_role(RoleName=role)
443 profile.add_role(RoleName=role)
394
444
395
445
396 def find_windows_server_2019_image(ec2resource):
446 def find_image(ec2resource, owner_id, name):
397 """Find the Amazon published Windows Server 2019 base image."""
447 """Find an AMI by its owner ID and name."""
398
448
399 images = ec2resource.images.filter(
449 images = ec2resource.images.filter(
400 Filters=[
450 Filters=[
401 {
451 {
402 'Name': 'owner-alias',
452 'Name': 'owner-id',
403 'Values': ['amazon'],
453 'Values': [owner_id],
404 },
454 },
405 {
455 {
406 'Name': 'state',
456 'Name': 'state',
@@ -412,14 +462,14 b' def find_windows_server_2019_image(ec2re'
412 },
462 },
413 {
463 {
414 'Name': 'name',
464 'Name': 'name',
415 'Values': ['Windows_Server-2019-English-Full-Base-2019.02.13'],
465 'Values': [name],
416 },
466 },
417 ])
467 ])
418
468
419 for image in images:
469 for image in images:
420 return image
470 return image
421
471
422 raise Exception('unable to find Windows Server 2019 image')
472 raise Exception('unable to find image for %s' % name)
423
473
424
474
425 def ensure_security_groups(ec2resource, prefix='hg-'):
475 def ensure_security_groups(ec2resource, prefix='hg-'):
@@ -490,7 +540,7 b" def remove_resources(c, prefix='hg-'):"
490
540
491 terminate_ec2_instances(ec2resource, prefix=prefix)
541 terminate_ec2_instances(ec2resource, prefix=prefix)
492
542
493 for image in ec2resource.images.all():
543 for image in ec2resource.images.filter(Owners=['self']):
494 if image.name.startswith(prefix):
544 if image.name.startswith(prefix):
495 remove_ami(ec2resource, image)
545 remove_ami(ec2resource, image)
496
546
@@ -505,6 +555,10 b" def remove_resources(c, prefix='hg-'):"
505
555
506 for role in iamresource.roles.all():
556 for role in iamresource.roles.all():
507 if role.name.startswith(prefix):
557 if role.name.startswith(prefix):
558 for p in role.attached_policies.all():
559 print('detaching policy %s from %s' % (p.arn, role.name))
560 role.detach_policy(PolicyArn=p.arn)
561
508 print('removing role %s' % role.name)
562 print('removing role %s' % role.name)
509 role.delete()
563 role.delete()
510
564
@@ -671,6 +725,309 b' def create_temp_windows_ec2_instances(c:'
671 yield instances
725 yield instances
672
726
673
727
728 def resolve_fingerprint(fingerprint):
729 fingerprint = json.dumps(fingerprint, sort_keys=True)
730 return hashlib.sha256(fingerprint.encode('utf-8')).hexdigest()
731
732
733 def find_and_reconcile_image(ec2resource, name, fingerprint):
734 """Attempt to find an existing EC2 AMI with a name and fingerprint.
735
736 If an image with the specified fingerprint is found, it is returned.
737 Otherwise None is returned.
738
739 Existing images for the specified name that don't have the specified
740 fingerprint or are missing required metadata or deleted.
741 """
742 # Find existing AMIs with this name and delete the ones that are invalid.
743 # Store a reference to a good image so it can be returned one the
744 # image state is reconciled.
745 images = ec2resource.images.filter(
746 Filters=[{'Name': 'name', 'Values': [name]}])
747
748 existing_image = None
749
750 for image in images:
751 if image.tags is None:
752 print('image %s for %s lacks required tags; removing' % (
753 image.id, image.name))
754 remove_ami(ec2resource, image)
755 else:
756 tags = {t['Key']: t['Value'] for t in image.tags}
757
758 if tags.get('HGIMAGEFINGERPRINT') == fingerprint:
759 existing_image = image
760 else:
761 print('image %s for %s has wrong fingerprint; removing' % (
762 image.id, image.name))
763 remove_ami(ec2resource, image)
764
765 return existing_image
766
767
768 def create_ami_from_instance(ec2client, instance, name, description,
769 fingerprint):
770 """Create an AMI from a running instance.
771
772 Returns the ``ec2resource.Image`` representing the created AMI.
773 """
774 instance.stop()
775
776 ec2client.get_waiter('instance_stopped').wait(
777 InstanceIds=[instance.id],
778 WaiterConfig={
779 'Delay': 5,
780 })
781 print('%s is stopped' % instance.id)
782
783 image = instance.create_image(
784 Name=name,
785 Description=description,
786 )
787
788 image.create_tags(Tags=[
789 {
790 'Key': 'HGIMAGEFINGERPRINT',
791 'Value': fingerprint,
792 },
793 ])
794
795 print('waiting for image %s' % image.id)
796
797 ec2client.get_waiter('image_available').wait(
798 ImageIds=[image.id],
799 )
800
801 print('image %s available as %s' % (image.id, image.name))
802
803 return image
804
805
806 def ensure_linux_dev_ami(c: AWSConnection, distro='debian9', prefix='hg-'):
807 """Ensures a Linux development AMI is available and up-to-date.
808
809 Returns an ``ec2.Image`` of either an existing AMI or a newly-built one.
810 """
811 ec2client = c.ec2client
812 ec2resource = c.ec2resource
813
814 name = '%s%s-%s' % (prefix, 'linux-dev', distro)
815
816 if distro == 'debian9':
817 image = find_image(
818 ec2resource,
819 DEBIAN_ACCOUNT_ID,
820 'debian-stretch-hvm-x86_64-gp2-2019-02-19-26620',
821 )
822 ssh_username = 'admin'
823 elif distro == 'ubuntu18.04':
824 image = find_image(
825 ec2resource,
826 UBUNTU_ACCOUNT_ID,
827 'ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-20190403',
828 )
829 ssh_username = 'ubuntu'
830 elif distro == 'ubuntu18.10':
831 image = find_image(
832 ec2resource,
833 UBUNTU_ACCOUNT_ID,
834 'ubuntu/images/hvm-ssd/ubuntu-cosmic-18.10-amd64-server-20190402',
835 )
836 ssh_username = 'ubuntu'
837 elif distro == 'ubuntu19.04':
838 image = find_image(
839 ec2resource,
840 UBUNTU_ACCOUNT_ID,
841 'ubuntu/images/hvm-ssd/ubuntu-disco-19.04-amd64-server-20190417',
842 )
843 ssh_username = 'ubuntu'
844 else:
845 raise ValueError('unsupported Linux distro: %s' % distro)
846
847 config = {
848 'BlockDeviceMappings': [
849 {
850 'DeviceName': image.block_device_mappings[0]['DeviceName'],
851 'Ebs': {
852 'DeleteOnTermination': True,
853 'VolumeSize': 8,
854 'VolumeType': 'gp2',
855 },
856 },
857 ],
858 'EbsOptimized': True,
859 'ImageId': image.id,
860 'InstanceInitiatedShutdownBehavior': 'stop',
861 # 8 VCPUs for compiling Python.
862 'InstanceType': 't3.2xlarge',
863 'KeyName': '%sautomation' % prefix,
864 'MaxCount': 1,
865 'MinCount': 1,
866 'SecurityGroupIds': [c.security_groups['linux-dev-1'].id],
867 }
868
869 requirements2_path = (pathlib.Path(__file__).parent.parent /
870 'linux-requirements-py2.txt')
871 requirements3_path = (pathlib.Path(__file__).parent.parent /
872 'linux-requirements-py3.txt')
873 with requirements2_path.open('r', encoding='utf-8') as fh:
874 requirements2 = fh.read()
875 with requirements3_path.open('r', encoding='utf-8') as fh:
876 requirements3 = fh.read()
877
878 # Compute a deterministic fingerprint to determine whether image needs to
879 # be regenerated.
880 fingerprint = resolve_fingerprint({
881 'instance_config': config,
882 'bootstrap_script': BOOTSTRAP_DEBIAN,
883 'requirements_py2': requirements2,
884 'requirements_py3': requirements3,
885 })
886
887 existing_image = find_and_reconcile_image(ec2resource, name, fingerprint)
888
889 if existing_image:
890 return existing_image
891
892 print('no suitable %s image found; creating one...' % name)
893
894 with temporary_ec2_instances(ec2resource, config) as instances:
895 wait_for_ip_addresses(instances)
896
897 instance = instances[0]
898
899 client = wait_for_ssh(
900 instance.public_ip_address, 22,
901 username=ssh_username,
902 key_filename=str(c.key_pair_path_private('automation')))
903
904 home = '/home/%s' % ssh_username
905
906 with client:
907 print('connecting to SSH server')
908 sftp = client.open_sftp()
909
910 print('uploading bootstrap files')
911 with sftp.open('%s/bootstrap' % home, 'wb') as fh:
912 fh.write(BOOTSTRAP_DEBIAN)
913 fh.chmod(0o0700)
914
915 with sftp.open('%s/requirements-py2.txt' % home, 'wb') as fh:
916 fh.write(requirements2)
917 fh.chmod(0o0700)
918
919 with sftp.open('%s/requirements-py3.txt' % home, 'wb') as fh:
920 fh.write(requirements3)
921 fh.chmod(0o0700)
922
923 print('executing bootstrap')
924 chan, stdin, stdout = ssh_exec_command(client,
925 '%s/bootstrap' % home)
926 stdin.close()
927
928 for line in stdout:
929 print(line, end='')
930
931 res = chan.recv_exit_status()
932 if res:
933 raise Exception('non-0 exit from bootstrap: %d' % res)
934
935 print('bootstrap completed; stopping %s to create %s' % (
936 instance.id, name))
937
938 return create_ami_from_instance(ec2client, instance, name,
939 'Mercurial Linux development environment',
940 fingerprint)
941
942
943 @contextlib.contextmanager
944 def temporary_linux_dev_instances(c: AWSConnection, image, instance_type,
945 prefix='hg-', ensure_extra_volume=False):
946 """Create temporary Linux development EC2 instances.
947
948 Context manager resolves to a list of ``ec2.Instance`` that were created
949 and are running.
950
951 ``ensure_extra_volume`` can be set to ``True`` to require that instances
952 have a 2nd storage volume available other than the primary AMI volume.
953 For instance types with instance storage, this does nothing special.
954 But for instance types without instance storage, an additional EBS volume
955 will be added to the instance.
956
957 Instances have an ``ssh_client`` attribute containing a paramiko SSHClient
958 instance bound to the instance.
959
960 Instances have an ``ssh_private_key_path`` attributing containing the
961 str path to the SSH private key to connect to the instance.
962 """
963
964 block_device_mappings = [
965 {
966 'DeviceName': image.block_device_mappings[0]['DeviceName'],
967 'Ebs': {
968 'DeleteOnTermination': True,
969 'VolumeSize': 8,
970 'VolumeType': 'gp2',
971 },
972 }
973 ]
974
975 # This is not an exhaustive list of instance types having instance storage.
976 # But
977 if (ensure_extra_volume
978 and not instance_type.startswith(tuple(INSTANCE_TYPES_WITH_STORAGE))):
979 main_device = block_device_mappings[0]['DeviceName']
980
981 if main_device == 'xvda':
982 second_device = 'xvdb'
983 elif main_device == '/dev/sda1':
984 second_device = '/dev/sdb'
985 else:
986 raise ValueError('unhandled primary EBS device name: %s' %
987 main_device)
988
989 block_device_mappings.append({
990 'DeviceName': second_device,
991 'Ebs': {
992 'DeleteOnTermination': True,
993 'VolumeSize': 8,
994 'VolumeType': 'gp2',
995 }
996 })
997
998 config = {
999 'BlockDeviceMappings': block_device_mappings,
1000 'EbsOptimized': True,
1001 'ImageId': image.id,
1002 'InstanceInitiatedShutdownBehavior': 'terminate',
1003 'InstanceType': instance_type,
1004 'KeyName': '%sautomation' % prefix,
1005 'MaxCount': 1,
1006 'MinCount': 1,
1007 'SecurityGroupIds': [c.security_groups['linux-dev-1'].id],
1008 }
1009
1010 with temporary_ec2_instances(c.ec2resource, config) as instances:
1011 wait_for_ip_addresses(instances)
1012
1013 ssh_private_key_path = str(c.key_pair_path_private('automation'))
1014
1015 for instance in instances:
1016 client = wait_for_ssh(
1017 instance.public_ip_address, 22,
1018 username='hg',
1019 key_filename=ssh_private_key_path)
1020
1021 instance.ssh_client = client
1022 instance.ssh_private_key_path = ssh_private_key_path
1023
1024 try:
1025 yield instances
1026 finally:
1027 for instance in instances:
1028 instance.ssh_client.close()
1029
1030
674 def ensure_windows_dev_ami(c: AWSConnection, prefix='hg-'):
1031 def ensure_windows_dev_ami(c: AWSConnection, prefix='hg-'):
675 """Ensure Windows Development AMI is available and up-to-date.
1032 """Ensure Windows Development AMI is available and up-to-date.
676
1033
@@ -689,6 +1046,10 b' def ensure_windows_dev_ami(c: AWSConnect'
689
1046
690 name = '%s%s' % (prefix, 'windows-dev')
1047 name = '%s%s' % (prefix, 'windows-dev')
691
1048
1049 image = find_image(ec2resource,
1050 '801119661308',
1051 'Windows_Server-2019-English-Full-Base-2019.02.13')
1052
692 config = {
1053 config = {
693 'BlockDeviceMappings': [
1054 'BlockDeviceMappings': [
694 {
1055 {
@@ -700,7 +1061,7 b' def ensure_windows_dev_ami(c: AWSConnect'
700 },
1061 },
701 }
1062 }
702 ],
1063 ],
703 'ImageId': find_windows_server_2019_image(ec2resource).id,
1064 'ImageId': image.id,
704 'InstanceInitiatedShutdownBehavior': 'stop',
1065 'InstanceInitiatedShutdownBehavior': 'stop',
705 'InstanceType': 't3.medium',
1066 'InstanceType': 't3.medium',
706 'KeyName': '%sautomation' % prefix,
1067 'KeyName': '%sautomation' % prefix,
@@ -735,38 +1096,14 b' def ensure_windows_dev_ami(c: AWSConnect'
735
1096
736 # Compute a deterministic fingerprint to determine whether image needs
1097 # Compute a deterministic fingerprint to determine whether image needs
737 # to be regenerated.
1098 # to be regenerated.
738 fingerprint = {
1099 fingerprint = resolve_fingerprint({
739 'instance_config': config,
1100 'instance_config': config,
740 'user_data': WINDOWS_USER_DATA,
1101 'user_data': WINDOWS_USER_DATA,
741 'initial_bootstrap': WINDOWS_BOOTSTRAP_POWERSHELL,
1102 'initial_bootstrap': WINDOWS_BOOTSTRAP_POWERSHELL,
742 'bootstrap_commands': commands,
1103 'bootstrap_commands': commands,
743 }
1104 })
744
745 fingerprint = json.dumps(fingerprint, sort_keys=True)
746 fingerprint = hashlib.sha256(fingerprint.encode('utf-8')).hexdigest()
747
748 # Find existing AMIs with this name and delete the ones that are invalid.
749 # Store a reference to a good image so it can be returned one the
750 # image state is reconciled.
751 images = ec2resource.images.filter(
752 Filters=[{'Name': 'name', 'Values': [name]}])
753
754 existing_image = None
755
1105
756 for image in images:
1106 existing_image = find_and_reconcile_image(ec2resource, name, fingerprint)
757 if image.tags is None:
758 print('image %s for %s lacks required tags; removing' % (
759 image.id, image.name))
760 remove_ami(ec2resource, image)
761 else:
762 tags = {t['Key']: t['Value'] for t in image.tags}
763
764 if tags.get('HGIMAGEFINGERPRINT') == fingerprint:
765 existing_image = image
766 else:
767 print('image %s for %s has wrong fingerprint; removing' % (
768 image.id, image.name))
769 remove_ami(ec2resource, image)
770
1107
771 if existing_image:
1108 if existing_image:
772 return existing_image
1109 return existing_image
@@ -795,10 +1132,26 b' def ensure_windows_dev_ami(c: AWSConnect'
795 )
1132 )
796
1133
797 # Reboot so all updates are fully applied.
1134 # Reboot so all updates are fully applied.
1135 #
1136 # We don't use instance.reboot() here because it is asynchronous and
1137 # we don't know when exactly the instance has rebooted. It could take
1138 # a while to stop and we may start trying to interact with the instance
1139 # before it has rebooted.
798 print('rebooting instance %s' % instance.id)
1140 print('rebooting instance %s' % instance.id)
799 ec2client.reboot_instances(InstanceIds=[instance.id])
1141 instance.stop()
1142 ec2client.get_waiter('instance_stopped').wait(
1143 InstanceIds=[instance.id],
1144 WaiterConfig={
1145 'Delay': 5,
1146 })
800
1147
801 time.sleep(15)
1148 instance.start()
1149 wait_for_ip_addresses([instance])
1150
1151 # There is a race condition here between the User Data PS script running
1152 # and us connecting to WinRM. This can manifest as
1153 # "AuthorizationManager check failed" failures during run_powershell().
1154 # TODO figure out a workaround.
802
1155
803 print('waiting for Windows Remote Management to come back...')
1156 print('waiting for Windows Remote Management to come back...')
804 client = wait_for_winrm(instance.public_ip_address, 'Administrator',
1157 client = wait_for_winrm(instance.public_ip_address, 'Administrator',
@@ -810,36 +1163,9 b' def ensure_windows_dev_ami(c: AWSConnect'
810 run_powershell(instance.winrm_client, '\n'.join(commands))
1163 run_powershell(instance.winrm_client, '\n'.join(commands))
811
1164
812 print('bootstrap completed; stopping %s to create image' % instance.id)
1165 print('bootstrap completed; stopping %s to create image' % instance.id)
813 instance.stop()
1166 return create_ami_from_instance(ec2client, instance, name,
814
1167 'Mercurial Windows development environment',
815 ec2client.get_waiter('instance_stopped').wait(
1168 fingerprint)
816 InstanceIds=[instance.id],
817 WaiterConfig={
818 'Delay': 5,
819 })
820 print('%s is stopped' % instance.id)
821
822 image = instance.create_image(
823 Name=name,
824 Description='Mercurial Windows development environment',
825 )
826
827 image.create_tags(Tags=[
828 {
829 'Key': 'HGIMAGEFINGERPRINT',
830 'Value': fingerprint,
831 },
832 ])
833
834 print('waiting for image %s' % image.id)
835
836 ec2client.get_waiter('image_available').wait(
837 ImageIds=[image.id],
838 )
839
840 print('image %s available as %s' % (image.id, image.name))
841
842 return image
843
1169
844
1170
845 @contextlib.contextmanager
1171 @contextlib.contextmanager
@@ -8,12 +8,15 b''
8 # no-check-code because Python 3 native.
8 # no-check-code because Python 3 native.
9
9
10 import argparse
10 import argparse
11 import concurrent.futures as futures
11 import os
12 import os
12 import pathlib
13 import pathlib
14 import time
13
15
14 from . import (
16 from . import (
15 aws,
17 aws,
16 HGAutomation,
18 HGAutomation,
19 linux,
17 windows,
20 windows,
18 )
21 )
19
22
@@ -22,6 +25,33 b' SOURCE_ROOT = pathlib.Path(os.path.abspa'
22 DIST_PATH = SOURCE_ROOT / 'dist'
25 DIST_PATH = SOURCE_ROOT / 'dist'
23
26
24
27
28 def bootstrap_linux_dev(hga: HGAutomation, aws_region, distros=None,
29 parallel=False):
30 c = hga.aws_connection(aws_region)
31
32 if distros:
33 distros = distros.split(',')
34 else:
35 distros = sorted(linux.DISTROS)
36
37 # TODO There is a wonky interaction involving KeyboardInterrupt whereby
38 # the context manager that is supposed to terminate the temporary EC2
39 # instance doesn't run. Until we fix this, make parallel building opt-in
40 # so we don't orphan instances.
41 if parallel:
42 fs = []
43
44 with futures.ThreadPoolExecutor(len(distros)) as e:
45 for distro in distros:
46 fs.append(e.submit(aws.ensure_linux_dev_ami, c, distro=distro))
47
48 for f in fs:
49 f.result()
50 else:
51 for distro in distros:
52 aws.ensure_linux_dev_ami(c, distro=distro)
53
54
25 def bootstrap_windows_dev(hga: HGAutomation, aws_region):
55 def bootstrap_windows_dev(hga: HGAutomation, aws_region):
26 c = hga.aws_connection(aws_region)
56 c = hga.aws_connection(aws_region)
27 image = aws.ensure_windows_dev_ami(c)
57 image = aws.ensure_windows_dev_ami(c)
@@ -73,7 +103,8 b' def build_windows_wheel(hga: HGAutomatio'
73 windows.build_wheel(instance.winrm_client, a, DIST_PATH)
103 windows.build_wheel(instance.winrm_client, a, DIST_PATH)
74
104
75
105
76 def build_all_windows_packages(hga: HGAutomation, aws_region, revision):
106 def build_all_windows_packages(hga: HGAutomation, aws_region, revision,
107 version):
77 c = hga.aws_connection(aws_region)
108 c = hga.aws_connection(aws_region)
78 image = aws.ensure_windows_dev_ami(c)
109 image = aws.ensure_windows_dev_ami(c)
79 DIST_PATH.mkdir(exist_ok=True)
110 DIST_PATH.mkdir(exist_ok=True)
@@ -89,19 +120,52 b' def build_all_windows_packages(hga: HGAu'
89 windows.purge_hg(winrm_client)
120 windows.purge_hg(winrm_client)
90 windows.build_wheel(winrm_client, arch, DIST_PATH)
121 windows.build_wheel(winrm_client, arch, DIST_PATH)
91 windows.purge_hg(winrm_client)
122 windows.purge_hg(winrm_client)
92 windows.build_inno_installer(winrm_client, arch, DIST_PATH)
123 windows.build_inno_installer(winrm_client, arch, DIST_PATH,
124 version=version)
93 windows.purge_hg(winrm_client)
125 windows.purge_hg(winrm_client)
94 windows.build_wix_installer(winrm_client, arch, DIST_PATH)
126 windows.build_wix_installer(winrm_client, arch, DIST_PATH,
127 version=version)
95
128
96
129
97 def terminate_ec2_instances(hga: HGAutomation, aws_region):
130 def terminate_ec2_instances(hga: HGAutomation, aws_region):
98 c = hga.aws_connection(aws_region)
131 c = hga.aws_connection(aws_region, ensure_ec2_state=False)
99 aws.terminate_ec2_instances(c.ec2resource)
132 aws.terminate_ec2_instances(c.ec2resource)
100
133
101
134
102 def purge_ec2_resources(hga: HGAutomation, aws_region):
135 def purge_ec2_resources(hga: HGAutomation, aws_region):
136 c = hga.aws_connection(aws_region, ensure_ec2_state=False)
137 aws.remove_resources(c)
138
139
140 def run_tests_linux(hga: HGAutomation, aws_region, instance_type,
141 python_version, test_flags, distro, filesystem):
103 c = hga.aws_connection(aws_region)
142 c = hga.aws_connection(aws_region)
104 aws.remove_resources(c)
143 image = aws.ensure_linux_dev_ami(c, distro=distro)
144
145 t_start = time.time()
146
147 ensure_extra_volume = filesystem not in ('default', 'tmpfs')
148
149 with aws.temporary_linux_dev_instances(
150 c, image, instance_type,
151 ensure_extra_volume=ensure_extra_volume) as insts:
152
153 instance = insts[0]
154
155 linux.prepare_exec_environment(instance.ssh_client,
156 filesystem=filesystem)
157 linux.synchronize_hg(SOURCE_ROOT, instance, '.')
158 t_prepared = time.time()
159 linux.run_tests(instance.ssh_client, python_version,
160 test_flags)
161 t_done = time.time()
162
163 t_setup = t_prepared - t_start
164 t_all = t_done - t_start
165
166 print(
167 'total time: %.1fs; setup: %.1fs; tests: %.1fs; setup overhead: %.1f%%'
168 % (t_all, t_setup, t_done - t_prepared, t_setup / t_all * 100.0))
105
169
106
170
107 def run_tests_windows(hga: HGAutomation, aws_region, instance_type,
171 def run_tests_windows(hga: HGAutomation, aws_region, instance_type,
@@ -135,6 +199,21 b' def get_parser():'
135 subparsers = parser.add_subparsers()
199 subparsers = parser.add_subparsers()
136
200
137 sp = subparsers.add_parser(
201 sp = subparsers.add_parser(
202 'bootstrap-linux-dev',
203 help='Bootstrap Linux development environments',
204 )
205 sp.add_argument(
206 '--distros',
207 help='Comma delimited list of distros to bootstrap',
208 )
209 sp.add_argument(
210 '--parallel',
211 action='store_true',
212 help='Generate AMIs in parallel (not CTRL-c safe)'
213 )
214 sp.set_defaults(func=bootstrap_linux_dev)
215
216 sp = subparsers.add_parser(
138 'bootstrap-windows-dev',
217 'bootstrap-windows-dev',
139 help='Bootstrap the Windows development environment',
218 help='Bootstrap the Windows development environment',
140 )
219 )
@@ -149,6 +228,10 b' def get_parser():'
149 help='Mercurial revision to build',
228 help='Mercurial revision to build',
150 default='.',
229 default='.',
151 )
230 )
231 sp.add_argument(
232 '--version',
233 help='Mercurial version string to use',
234 )
152 sp.set_defaults(func=build_all_windows_packages)
235 sp.set_defaults(func=build_all_windows_packages)
153
236
154 sp = subparsers.add_parser(
237 sp = subparsers.add_parser(
@@ -226,6 +309,41 b' def get_parser():'
226 sp.set_defaults(func=purge_ec2_resources)
309 sp.set_defaults(func=purge_ec2_resources)
227
310
228 sp = subparsers.add_parser(
311 sp = subparsers.add_parser(
312 'run-tests-linux',
313 help='Run tests on Linux',
314 )
315 sp.add_argument(
316 '--distro',
317 help='Linux distribution to run tests on',
318 choices=linux.DISTROS,
319 default='debian9',
320 )
321 sp.add_argument(
322 '--filesystem',
323 help='Filesystem type to use',
324 choices={'btrfs', 'default', 'ext3', 'ext4', 'jfs', 'tmpfs', 'xfs'},
325 default='default',
326 )
327 sp.add_argument(
328 '--instance-type',
329 help='EC2 instance type to use',
330 default='c5.9xlarge',
331 )
332 sp.add_argument(
333 '--python-version',
334 help='Python version to use',
335 choices={'system2', 'system3', '2.7', '3.5', '3.6', '3.7', '3.8',
336 'pypy', 'pypy3.5', 'pypy3.6'},
337 default='system2',
338 )
339 sp.add_argument(
340 'test_flags',
341 help='Extra command line flags to pass to run-tests.py',
342 nargs='*',
343 )
344 sp.set_defaults(func=run_tests_linux)
345
346 sp = subparsers.add_parser(
229 'run-tests-windows',
347 'run-tests-windows',
230 help='Run tests on Windows',
348 help='Run tests on Windows',
231 )
349 )
@@ -39,7 +39,7 b' Write-Output "activating Visual Studio 2'
39 $Env:PATH = "${root}\VC\Bin;${root}\WinSDK\Bin;$Env:PATH"
39 $Env:PATH = "${root}\VC\Bin;${root}\WinSDK\Bin;$Env:PATH"
40 $Env:INCLUDE = "${root}\VC\Include;${root}\WinSDK\Include;$Env:INCLUDE"
40 $Env:INCLUDE = "${root}\VC\Include;${root}\WinSDK\Include;$Env:INCLUDE"
41 $Env:LIB = "${root}\VC\Lib;${root}\WinSDK\Lib;$Env:LIB"
41 $Env:LIB = "${root}\VC\Lib;${root}\WinSDK\Lib;$Env:LIB"
42 $Env:LIBPATH = "${root}\VC\lib;${root}\WinSDK\Lib:$Env:LIBPATH"
42 $Env:LIBPATH = "${root}\VC\lib;${root}\WinSDK\Lib;$Env:LIBPATH"
43 '''.lstrip()
43 '''.lstrip()
44
44
45 HG_PURGE = r'''
45 HG_PURGE = r'''
@@ -156,6 +156,10 b' def synchronize_hg(hg_repo: pathlib.Path'
156 fh.write(' UserKnownHostsFile %s\n' % (ssh_dir / 'known_hosts'))
156 fh.write(' UserKnownHostsFile %s\n' % (ssh_dir / 'known_hosts'))
157 fh.write(' IdentityFile %s\n' % (ssh_dir / 'id_rsa'))
157 fh.write(' IdentityFile %s\n' % (ssh_dir / 'id_rsa'))
158
158
159 if not (hg_repo / '.hg').is_dir():
160 raise Exception('%s is not a Mercurial repository; '
161 'synchronization not yet supported' % hg_repo)
162
159 env = dict(os.environ)
163 env = dict(os.environ)
160 env['HGPLAIN'] = '1'
164 env['HGPLAIN'] = '1'
161 env['HGENCODING'] = 'utf-8'
165 env['HGENCODING'] = 'utf-8'
@@ -172,7 +176,8 b' def synchronize_hg(hg_repo: pathlib.Path'
172 'python2.7', hg_bin,
176 'python2.7', hg_bin,
173 '--config', 'ui.ssh=ssh -F %s' % ssh_config,
177 '--config', 'ui.ssh=ssh -F %s' % ssh_config,
174 '--config', 'ui.remotecmd=c:/hgdev/venv-bootstrap/Scripts/hg.exe',
178 '--config', 'ui.remotecmd=c:/hgdev/venv-bootstrap/Scripts/hg.exe',
175 'push', '-r', full_revision, 'ssh://%s/c:/hgdev/src' % public_ip,
179 'push', '-f', '-r', full_revision,
180 'ssh://%s/c:/hgdev/src' % public_ip,
176 ]
181 ]
177
182
178 subprocess.run(args, cwd=str(hg_repo), env=env, check=True)
183 subprocess.run(args, cwd=str(hg_repo), env=env, check=True)
@@ -25,7 +25,7 b' import requests.exceptions'
25 logger = logging.getLogger(__name__)
25 logger = logging.getLogger(__name__)
26
26
27
27
28 def wait_for_winrm(host, username, password, timeout=120, ssl=False):
28 def wait_for_winrm(host, username, password, timeout=180, ssl=False):
29 """Wait for the Windows Remoting (WinRM) service to become available.
29 """Wait for the Windows Remoting (WinRM) service to become available.
30
30
31 Returns a ``psrpclient.Client`` instance.
31 Returns a ``psrpclient.Client`` instance.
@@ -8,47 +8,68 b' asn1crypto==0.24.0 \\'
8 --hash=sha256:2f1adbb7546ed199e3c90ef23ec95c5cf3585bac7d11fb7eb562a3fe89c64e87 \
8 --hash=sha256:2f1adbb7546ed199e3c90ef23ec95c5cf3585bac7d11fb7eb562a3fe89c64e87 \
9 --hash=sha256:9d5c20441baf0cb60a4ac34cc447c6c189024b6b4c6cd7877034f4965c464e49 \
9 --hash=sha256:9d5c20441baf0cb60a4ac34cc447c6c189024b6b4c6cd7877034f4965c464e49 \
10 # via cryptography
10 # via cryptography
11 boto3==1.9.111 \
11 bcrypt==3.1.6 \
12 --hash=sha256:06414c75d1f62af7d04fd652b38d1e4fd3cfd6b35bad978466af88e2aaecd00d \
12 --hash=sha256:0ba875eb67b011add6d8c5b76afbd92166e98b1f1efab9433d5dc0fafc76e203 \
13 --hash=sha256:f3b77dff382374773d02411fa47ee408f4f503aeebd837fd9dc9ed8635bc5e8e
13 --hash=sha256:21ed446054c93e209434148ef0b362432bb82bbdaf7beef70a32c221f3e33d1c \
14 botocore==1.12.111 \
14 --hash=sha256:28a0459381a8021f57230954b9e9a65bb5e3d569d2c253c5cac6cb181d71cf23 \
15 --hash=sha256:6af473c52d5e3e7ff82de5334e9fee96b2d5ec2df5d78bc00cd9937e2573a7a8 \
15 --hash=sha256:2aed3091eb6f51c26b7c2fad08d6620d1c35839e7a362f706015b41bd991125e \
16 --hash=sha256:9f5123c7be704b17aeacae99b5842ab17bda1f799dd29134de8c70e0a50a45d7 \
16 --hash=sha256:2fa5d1e438958ea90eaedbf8082c2ceb1a684b4f6c75a3800c6ec1e18ebef96f \
17 --hash=sha256:3a73f45484e9874252002793518da060fb11eaa76c30713faa12115db17d1430 \
18 --hash=sha256:3e489787638a36bb466cd66780e15715494b6d6905ffdbaede94440d6d8e7dba \
19 --hash=sha256:44636759d222baa62806bbceb20e96f75a015a6381690d1bc2eda91c01ec02ea \
20 --hash=sha256:678c21b2fecaa72a1eded0cf12351b153615520637efcadc09ecf81b871f1596 \
21 --hash=sha256:75460c2c3786977ea9768d6c9d8957ba31b5fbeb0aae67a5c0e96aab4155f18c \
22 --hash=sha256:8ac06fb3e6aacb0a95b56eba735c0b64df49651c6ceb1ad1cf01ba75070d567f \
23 --hash=sha256:8fdced50a8b646fff8fa0e4b1c5fd940ecc844b43d1da5a980cb07f2d1b1132f \
24 --hash=sha256:9b2c5b640a2da533b0ab5f148d87fb9989bf9bcb2e61eea6a729102a6d36aef9 \
25 --hash=sha256:a9083e7fa9adb1a4de5ac15f9097eb15b04e2c8f97618f1b881af40abce382e1 \
26 --hash=sha256:b7e3948b8b1a81c5a99d41da5fb2dc03ddb93b5f96fcd3fd27e643f91efa33e1 \
27 --hash=sha256:b998b8ca979d906085f6a5d84f7b5459e5e94a13fc27c28a3514437013b6c2f6 \
28 --hash=sha256:dd08c50bc6f7be69cd7ba0769acca28c846ec46b7a8ddc2acf4b9ac6f8a7457e \
29 --hash=sha256:de5badee458544ab8125e63e39afeedfcf3aef6a6e2282ac159c95ae7472d773 \
30 --hash=sha256:ede2a87333d24f55a4a7338a6ccdccf3eaa9bed081d1737e0db4dbd1a4f7e6b6 \
31 # via paramiko
32 boto3==1.9.137 \
33 --hash=sha256:882cc4869b47b51dae4b4a900769e72171ff00e0b6bca644b2d7a7ad7378f324 \
34 --hash=sha256:cd503a7e7a04f1c14d2801f9727159dfa88c393b4004e98940fa4aa205d920c8
35 botocore==1.12.137 \
36 --hash=sha256:0d95794f6b1239c75e2c5f966221bcd4b68020fddb5676f757531eedbb612ed8 \
37 --hash=sha256:3213cf48cf2ceee10fc3b93221f2cd1c38521cca7584f547d5c086213cc60f35 \
17 # via boto3, s3transfer
38 # via boto3, s3transfer
18 certifi==2019.3.9 \
39 certifi==2019.3.9 \
19 --hash=sha256:59b7658e26ca9c7339e00f8f4636cdfe59d34fa37b9b04f6f9e9926b3cece1a5 \
40 --hash=sha256:59b7658e26ca9c7339e00f8f4636cdfe59d34fa37b9b04f6f9e9926b3cece1a5 \
20 --hash=sha256:b26104d6835d1f5e49452a26eb2ff87fe7090b89dfcaee5ea2212697e1e1d7ae \
41 --hash=sha256:b26104d6835d1f5e49452a26eb2ff87fe7090b89dfcaee5ea2212697e1e1d7ae \
21 # via requests
42 # via requests
22 cffi==1.12.2 \
43 cffi==1.12.3 \
23 --hash=sha256:00b97afa72c233495560a0793cdc86c2571721b4271c0667addc83c417f3d90f \
44 --hash=sha256:041c81822e9f84b1d9c401182e174996f0bae9991f33725d059b771744290774 \
24 --hash=sha256:0ba1b0c90f2124459f6966a10c03794082a2f3985cd699d7d63c4a8dae113e11 \
45 --hash=sha256:046ef9a22f5d3eed06334d01b1e836977eeef500d9b78e9ef693f9380ad0b83d \
25 --hash=sha256:0bffb69da295a4fc3349f2ec7cbe16b8ba057b0a593a92cbe8396e535244ee9d \
46 --hash=sha256:066bc4c7895c91812eff46f4b1c285220947d4aa46fa0a2651ff85f2afae9c90 \
26 --hash=sha256:21469a2b1082088d11ccd79dd84157ba42d940064abbfa59cf5f024c19cf4891 \
47 --hash=sha256:066c7ff148ae33040c01058662d6752fd73fbc8e64787229ea8498c7d7f4041b \
27 --hash=sha256:2e4812f7fa984bf1ab253a40f1f4391b604f7fc424a3e21f7de542a7f8f7aedf \
48 --hash=sha256:2444d0c61f03dcd26dbf7600cf64354376ee579acad77aef459e34efcb438c63 \
28 --hash=sha256:2eac2cdd07b9049dd4e68449b90d3ef1adc7c759463af5beb53a84f1db62e36c \
49 --hash=sha256:300832850b8f7967e278870c5d51e3819b9aad8f0a2c8dbe39ab11f119237f45 \
29 --hash=sha256:2f9089979d7456c74d21303c7851f158833d48fb265876923edcb2d0194104ed \
50 --hash=sha256:34c77afe85b6b9e967bd8154e3855e847b70ca42043db6ad17f26899a3df1b25 \
30 --hash=sha256:3dd13feff00bddb0bd2d650cdb7338f815c1789a91a6f68fdc00e5c5ed40329b \
51 --hash=sha256:46de5fa00f7ac09f020729148ff632819649b3e05a007d286242c4882f7b1dc3 \
31 --hash=sha256:4065c32b52f4b142f417af6f33a5024edc1336aa845b9d5a8d86071f6fcaac5a \
52 --hash=sha256:4aa8ee7ba27c472d429b980c51e714a24f47ca296d53f4d7868075b175866f4b \
32 --hash=sha256:51a4ba1256e9003a3acf508e3b4f4661bebd015b8180cc31849da222426ef585 \
53 --hash=sha256:4d0004eb4351e35ed950c14c11e734182591465a33e960a4ab5e8d4f04d72647 \
33 --hash=sha256:59888faac06403767c0cf8cfb3f4a777b2939b1fbd9f729299b5384f097f05ea \
54 --hash=sha256:4e3d3f31a1e202b0f5a35ba3bc4eb41e2fc2b11c1eff38b362de710bcffb5016 \
34 --hash=sha256:59c87886640574d8b14910840327f5cd15954e26ed0bbd4e7cef95fa5aef218f \
55 --hash=sha256:50bec6d35e6b1aaeb17f7c4e2b9374ebf95a8975d57863546fa83e8d31bdb8c4 \
35 --hash=sha256:610fc7d6db6c56a244c2701575f6851461753c60f73f2de89c79bbf1cc807f33 \
56 --hash=sha256:55cad9a6df1e2a1d62063f79d0881a414a906a6962bc160ac968cc03ed3efcfb \
36 --hash=sha256:70aeadeecb281ea901bf4230c6222af0248c41044d6f57401a614ea59d96d145 \
57 --hash=sha256:5662ad4e4e84f1eaa8efce5da695c5d2e229c563f9d5ce5b0113f71321bcf753 \
37 --hash=sha256:71e1296d5e66c59cd2c0f2d72dc476d42afe02aeddc833d8e05630a0551dad7a \
58 --hash=sha256:59b4dc008f98fc6ee2bb4fd7fc786a8d70000d058c2bbe2698275bc53a8d3fa7 \
38 --hash=sha256:8fc7a49b440ea752cfdf1d51a586fd08d395ff7a5d555dc69e84b1939f7ddee3 \
59 --hash=sha256:73e1ffefe05e4ccd7bcea61af76f36077b914f92b76f95ccf00b0c1b9186f3f9 \
39 --hash=sha256:9b5c2afd2d6e3771d516045a6cfa11a8da9a60e3d128746a7fe9ab36dfe7221f \
60 --hash=sha256:a1f0fd46eba2d71ce1589f7e50a9e2ffaeb739fb2c11e8192aa2b45d5f6cc41f \
40 --hash=sha256:9c759051ebcb244d9d55ee791259ddd158188d15adee3c152502d3b69005e6bd \
61 --hash=sha256:a2e85dc204556657661051ff4bab75a84e968669765c8a2cd425918699c3d0e8 \
41 --hash=sha256:b4d1011fec5ec12aa7cc10c05a2f2f12dfa0adfe958e56ae38dc140614035804 \
62 --hash=sha256:a5457d47dfff24882a21492e5815f891c0ca35fefae8aa742c6c263dac16ef1f \
42 --hash=sha256:b4f1d6332339ecc61275bebd1f7b674098a66fea11a00c84d1c58851e618dc0d \
63 --hash=sha256:a8dccd61d52a8dae4a825cdbb7735da530179fea472903eb871a5513b5abbfdc \
43 --hash=sha256:c030cda3dc8e62b814831faa4eb93dd9a46498af8cd1d5c178c2de856972fd92 \
64 --hash=sha256:ae61af521ed676cf16ae94f30fe202781a38d7178b6b4ab622e4eec8cefaff42 \
44 --hash=sha256:c2e1f2012e56d61390c0e668c20c4fb0ae667c44d6f6a2eeea5d7148dcd3df9f \
65 --hash=sha256:b012a5edb48288f77a63dba0840c92d0504aa215612da4541b7b42d849bc83a3 \
45 --hash=sha256:c37c77d6562074452120fc6c02ad86ec928f5710fbc435a181d69334b4de1d84 \
66 --hash=sha256:d2c5cfa536227f57f97c92ac30c8109688ace8fa4ac086d19d0af47d134e2909 \
46 --hash=sha256:c8149780c60f8fd02752d0429246088c6c04e234b895c4a42e1ea9b4de8d27fb \
67 --hash=sha256:d42b5796e20aacc9d15e66befb7a345454eef794fdb0737d1af593447c6c8f45 \
47 --hash=sha256:cbeeef1dc3c4299bd746b774f019de9e4672f7cc666c777cd5b409f0b746dac7 \
68 --hash=sha256:dee54f5d30d775f525894d67b1495625dd9322945e7fee00731952e0368ff42d \
48 --hash=sha256:e113878a446c6228669144ae8a56e268c91b7f1fafae927adc4879d9849e0ea7 \
69 --hash=sha256:e070535507bd6aa07124258171be2ee8dfc19119c28ca94c9dfb7efd23564512 \
49 --hash=sha256:e21162bf941b85c0cda08224dade5def9360f53b09f9f259adb85fc7dd0e7b35 \
70 --hash=sha256:e1ff2748c84d97b065cc95429814cdba39bcbd77c9c85c89344b317dc0d9cbff \
50 --hash=sha256:fb6934ef4744becbda3143d30c6604718871495a5e36c408431bf33d9c146889 \
71 --hash=sha256:ed851c75d1e0e043cbf5ca9a8e1b13c4c90f3fbd863dacb01c0808e2b5204201 \
51 # via cryptography
72 # via bcrypt, cryptography, pynacl
52 chardet==3.0.4 \
73 chardet==3.0.4 \
53 --hash=sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae \
74 --hash=sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae \
54 --hash=sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691 \
75 --hash=sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691 \
@@ -73,7 +94,7 b' cryptography==2.6.1 \\'
73 --hash=sha256:d4afbb0840f489b60f5a580a41a1b9c3622e08ecb5eec8614d4fb4cd914c4460 \
94 --hash=sha256:d4afbb0840f489b60f5a580a41a1b9c3622e08ecb5eec8614d4fb4cd914c4460 \
74 --hash=sha256:d9ed28030797c00f4bc43c86bf819266c76a5ea61d006cd4078a93ebf7da6bfd \
95 --hash=sha256:d9ed28030797c00f4bc43c86bf819266c76a5ea61d006cd4078a93ebf7da6bfd \
75 --hash=sha256:e603aa7bb52e4e8ed4119a58a03b60323918467ef209e6ff9db3ac382e5cf2c6 \
96 --hash=sha256:e603aa7bb52e4e8ed4119a58a03b60323918467ef209e6ff9db3ac382e5cf2c6 \
76 # via pypsrp
97 # via paramiko, pypsrp
77 docutils==0.14 \
98 docutils==0.14 \
78 --hash=sha256:02aec4bd92ab067f6ff27a38a38a41173bf01bed8f89157768c1573f53e474a6 \
99 --hash=sha256:02aec4bd92ab067f6ff27a38a38a41173bf01bed8f89157768c1573f53e474a6 \
79 --hash=sha256:51e64ef2ebfb29cae1faa133b3710143496eca21c530f3f71424d77687764274 \
100 --hash=sha256:51e64ef2ebfb29cae1faa133b3710143496eca21c530f3f71424d77687764274 \
@@ -87,13 +108,41 b' jmespath==0.9.4 \\'
87 --hash=sha256:3720a4b1bd659dd2eecad0666459b9788813e032b83e7ba58578e48254e0a0e6 \
108 --hash=sha256:3720a4b1bd659dd2eecad0666459b9788813e032b83e7ba58578e48254e0a0e6 \
88 --hash=sha256:bde2aef6f44302dfb30320115b17d030798de8c4110e28d5cf6cf91a7a31074c \
109 --hash=sha256:bde2aef6f44302dfb30320115b17d030798de8c4110e28d5cf6cf91a7a31074c \
89 # via boto3, botocore
110 # via boto3, botocore
90 ntlm-auth==1.2.0 \
111 ntlm-auth==1.3.0 \
91 --hash=sha256:7bc02a3fbdfee7275d3dc20fce8028ed8eb6d32364637f28be9e9ae9160c6d5c \
112 --hash=sha256:bb2fd03c665f0f62c5f65695b62dcdb07fb7a45df6ebc86c770be2054d6902dd \
92 --hash=sha256:9b13eaf88f16a831637d75236a93d60c0049536715aafbf8190ba58a590b023e \
113 --hash=sha256:ce5b4483ed761f341a538a426a71a52e5a9cf5fd834ebef1d2090f9eef14b3f8 \
93 # via pypsrp
114 # via pypsrp
115 paramiko==2.4.2 \
116 --hash=sha256:3c16b2bfb4c0d810b24c40155dbfd113c0521e7e6ee593d704e84b4c658a1f3b \
117 --hash=sha256:a8975a7df3560c9f1e2b43dc54ebd40fd00a7017392ca5445ce7df409f900fcb
118 pyasn1==0.4.5 \
119 --hash=sha256:da2420fe13a9452d8ae97a0e478adde1dee153b11ba832a95b223a2ba01c10f7 \
120 --hash=sha256:da6b43a8c9ae93bc80e2739efb38cc776ba74a886e3e9318d65fe81a8b8a2c6e \
121 # via paramiko
94 pycparser==2.19 \
122 pycparser==2.19 \
95 --hash=sha256:a988718abfad80b6b157acce7bf130a30876d27603738ac39f140993246b25b3 \
123 --hash=sha256:a988718abfad80b6b157acce7bf130a30876d27603738ac39f140993246b25b3 \
96 # via cffi
124 # via cffi
125 pynacl==1.3.0 \
126 --hash=sha256:05c26f93964373fc0abe332676cb6735f0ecad27711035b9472751faa8521255 \
127 --hash=sha256:0c6100edd16fefd1557da078c7a31e7b7d7a52ce39fdca2bec29d4f7b6e7600c \
128 --hash=sha256:0d0a8171a68edf51add1e73d2159c4bc19fc0718e79dec51166e940856c2f28e \
129 --hash=sha256:1c780712b206317a746ace34c209b8c29dbfd841dfbc02aa27f2084dd3db77ae \
130 --hash=sha256:2424c8b9f41aa65bbdbd7a64e73a7450ebb4aa9ddedc6a081e7afcc4c97f7621 \
131 --hash=sha256:2d23c04e8d709444220557ae48ed01f3f1086439f12dbf11976e849a4926db56 \
132 --hash=sha256:30f36a9c70450c7878053fa1344aca0145fd47d845270b43a7ee9192a051bf39 \
133 --hash=sha256:37aa336a317209f1bb099ad177fef0da45be36a2aa664507c5d72015f956c310 \
134 --hash=sha256:4943decfc5b905748f0756fdd99d4f9498d7064815c4cf3643820c9028b711d1 \
135 --hash=sha256:57ef38a65056e7800859e5ba9e6091053cd06e1038983016effaffe0efcd594a \
136 --hash=sha256:5bd61e9b44c543016ce1f6aef48606280e45f892a928ca7068fba30021e9b786 \
137 --hash=sha256:6482d3017a0c0327a49dddc8bd1074cc730d45db2ccb09c3bac1f8f32d1eb61b \
138 --hash=sha256:7d3ce02c0784b7cbcc771a2da6ea51f87e8716004512493a2b69016326301c3b \
139 --hash=sha256:a14e499c0f5955dcc3991f785f3f8e2130ed504fa3a7f44009ff458ad6bdd17f \
140 --hash=sha256:a39f54ccbcd2757d1d63b0ec00a00980c0b382c62865b61a505163943624ab20 \
141 --hash=sha256:aabb0c5232910a20eec8563503c153a8e78bbf5459490c49ab31f6adf3f3a415 \
142 --hash=sha256:bd4ecb473a96ad0f90c20acba4f0bf0df91a4e03a1f4dd6a4bdc9ca75aa3a715 \
143 --hash=sha256:e2da3c13307eac601f3de04887624939aca8ee3c9488a0bb0eca4fb9401fc6b1 \
144 --hash=sha256:f67814c38162f4deb31f68d590771a29d5ae3b1bd64b75cf232308e5c74777e0 \
145 # via paramiko
97 pypsrp==0.3.1 \
146 pypsrp==0.3.1 \
98 --hash=sha256:309853380fe086090a03cc6662a778ee69b1cae355ae4a932859034fd76e9d0b \
147 --hash=sha256:309853380fe086090a03cc6662a778ee69b1cae355ae4a932859034fd76e9d0b \
99 --hash=sha256:90f946254f547dc3493cea8493c819ab87e152a755797c93aa2668678ba8ae85
148 --hash=sha256:90f946254f547dc3493cea8493c819ab87e152a755797c93aa2668678ba8ae85
@@ -112,8 +161,8 b' s3transfer==0.2.0 \\'
112 six==1.12.0 \
161 six==1.12.0 \
113 --hash=sha256:3350809f0555b11f552448330d0b52d5f24c91a322ea4a15ef22629740f3761c \
162 --hash=sha256:3350809f0555b11f552448330d0b52d5f24c91a322ea4a15ef22629740f3761c \
114 --hash=sha256:d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73 \
163 --hash=sha256:d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73 \
115 # via cryptography, pypsrp, python-dateutil
164 # via bcrypt, cryptography, pynacl, pypsrp, python-dateutil
116 urllib3==1.24.1 \
165 urllib3==1.24.2 \
117 --hash=sha256:61bf29cada3fc2fbefad4fdf059ea4bd1b4a86d2b6d15e1c7c0b582b9752fe39 \
166 --hash=sha256:4c291ca23bbb55c76518905869ef34bdd5f0e46af7afe6861e8375643ffee1a0 \
118 --hash=sha256:de9529817c93f27c8ccbfead6985011db27bd0ddfcdb2d86f3f663385c6a9c22 \
167 --hash=sha256:9a247273df709c4fedb38c711e44292304f73f39ab01beda9f6b9fc375669ac3 \
119 # via botocore, requests
168 # via botocore, requests
@@ -1,2 +1,3 b''
1 boto3
1 boto3
2 paramiko
2 pypsrp
3 pypsrp
@@ -7,7 +7,7 b''
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 from __future__ import absolute_import
10 from __future__ import absolute_import, print_function
11
11
12 import argparse
12 import argparse
13 import contextlib
13 import contextlib
@@ -227,4 +227,7 b' def main():'
227 process(fin, fout, opts)
227 process(fin, fout, opts)
228
228
229 if __name__ == '__main__':
229 if __name__ == '__main__':
230 if sys.version_info.major < 3:
231 print('This script must be run under Python 3.')
232 sys.exit(3)
230 main()
233 main()
@@ -44,6 +44,7 b' import timeit'
44 _TYPEMAP = {
44 _TYPEMAP = {
45 'START': 'B',
45 'START': 'B',
46 'END': 'E',
46 'END': 'E',
47 'COUNTER': 'C',
47 }
48 }
48
49
49 _threadmap = {}
50 _threadmap = {}
@@ -78,6 +79,11 b' def main():'
78 verb, session, label = ev.split(' ', 2)
79 verb, session, label = ev.split(' ', 2)
79 if session not in _threadmap:
80 if session not in _threadmap:
80 _threadmap[session] = len(_threadmap)
81 _threadmap[session] = len(_threadmap)
82 if verb == 'COUNTER':
83 amount, label = label.split(' ', 1)
84 payload_args = {'value': int(amount)}
85 else:
86 payload_args = {}
81 pid = _threadmap[session]
87 pid = _threadmap[session]
82 ts_micros = (now - start) * 1000000
88 ts_micros = (now - start) * 1000000
83 out.write(json.dumps(
89 out.write(json.dumps(
@@ -88,7 +94,7 b' def main():'
88 "ts": ts_micros,
94 "ts": ts_micros,
89 "pid": pid,
95 "pid": pid,
90 "tid": 1,
96 "tid": 1,
91 "args": {}
97 "args": payload_args,
92 }))
98 }))
93 out.write(',\n')
99 out.write(',\n')
94 finally:
100 finally:
@@ -649,15 +649,15 b' def embedded(f, modname, src):'
649 ... print("%s %s %d" % (_forcestr(m), _forcestr(f), l))
649 ... print("%s %s %d" % (_forcestr(m), _forcestr(f), l))
650 ... print(repr(_forcestr(s)))
650 ... print(repr(_forcestr(s)))
651 >>> lines = [
651 >>> lines = [
652 ... b'comment',
652 ... 'comment',
653 ... b' >>> from __future__ import print_function',
653 ... ' >>> from __future__ import print_function',
654 ... b" >>> ' multiline",
654 ... " >>> ' multiline",
655 ... b" ... string'",
655 ... " ... string'",
656 ... b' ',
656 ... ' ',
657 ... b'comment',
657 ... 'comment',
658 ... b' $ cat > foo.py <<EOF',
658 ... ' $ cat > foo.py <<EOF',
659 ... b' > from __future__ import print_function',
659 ... ' > from __future__ import print_function',
660 ... b' > EOF',
660 ... ' > EOF',
661 ... ]
661 ... ]
662 >>> test(b"example.t", lines)
662 >>> test(b"example.t", lines)
663 example[2] doctest.py 1
663 example[2] doctest.py 1
@@ -694,7 +694,7 b' def sources(f, modname):'
694 yield src.read(), modname, f, 0
694 yield src.read(), modname, f, 0
695 py = True
695 py = True
696 if py or f.endswith('.t'):
696 if py or f.endswith('.t'):
697 with open(f, 'rb') as src:
697 with open(f, 'r') as src:
698 for script, modname, t, line in embedded(f, modname, src):
698 for script, modname, t, line in embedded(f, modname, src):
699 yield script, modname.encode('utf8'), t, line
699 yield script, modname.encode('utf8'), t, line
700
700
@@ -32,7 +32,7 b' From the prompt, change to the Mercurial'
32 ``cd c:\src\hg``.
32 ``cd c:\src\hg``.
33
33
34 Next, invoke ``build.py`` to produce an Inno installer. You will
34 Next, invoke ``build.py`` to produce an Inno installer. You will
35 need to supply the path to the Python interpreter to use.:
35 need to supply the path to the Python interpreter to use.::
36
36
37 $ python3.exe contrib\packaging\inno\build.py \
37 $ python3.exe contrib\packaging\inno\build.py \
38 --python c:\python27\python.exe
38 --python c:\python27\python.exe
@@ -49,6 +49,7 b''
49 <File Id="internals.config.txt" Name="config.txt" />
49 <File Id="internals.config.txt" Name="config.txt" />
50 <File Id="internals.extensions.txt" Name="extensions.txt" />
50 <File Id="internals.extensions.txt" Name="extensions.txt" />
51 <File Id="internals.linelog.txt" Name="linelog.txt" />
51 <File Id="internals.linelog.txt" Name="linelog.txt" />
52 <File Id="internals.mergestate.txt" Name="mergestate.txt" />
52 <File Id="internals.requirements.txt" Name="requirements.txt" />
53 <File Id="internals.requirements.txt" Name="requirements.txt" />
53 <File Id="internals.revlogs.txt" Name="revlogs.txt" />
54 <File Id="internals.revlogs.txt" Name="revlogs.txt" />
54 <File Id="internals.wireprotocol.txt" Name="wireprotocol.txt" />
55 <File Id="internals.wireprotocol.txt" Name="wireprotocol.txt" />
@@ -15,6 +15,13 b' Configurations'
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
19 number of run to perform before starting measurement.
20
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
24
18 ``run-limits``
25 ``run-limits``
19 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
20 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
@@ -106,6 +113,10 b' try:'
106 except ImportError:
113 except ImportError:
107 pass
114 pass
108
115
116 try:
117 from mercurial import profiling
118 except ImportError:
119 profiling = None
109
120
110 def identity(a):
121 def identity(a):
111 return a
122 return a
@@ -240,6 +251,12 b' try:'
240 configitem(b'perf', b'all-timing',
251 configitem(b'perf', b'all-timing',
241 default=mercurial.configitems.dynamicdefault,
252 default=mercurial.configitems.dynamicdefault,
242 )
253 )
254 configitem(b'perf', b'pre-run',
255 default=mercurial.configitems.dynamicdefault,
256 )
257 configitem(b'perf', b'profile-benchmark',
258 default=mercurial.configitems.dynamicdefault,
259 )
243 configitem(b'perf', b'run-limits',
260 configitem(b'perf', b'run-limits',
244 default=mercurial.configitems.dynamicdefault,
261 default=mercurial.configitems.dynamicdefault,
245 )
262 )
@@ -251,6 +268,15 b' def getlen(ui):'
251 return lambda x: 1
268 return lambda x: 1
252 return len
269 return len
253
270
271 class noop(object):
272 """dummy context manager"""
273 def __enter__(self):
274 pass
275 def __exit__(self, *args):
276 pass
277
278 NOOPCTX = noop()
279
254 def gettimer(ui, opts=None):
280 def gettimer(ui, opts=None):
255 """return a timer function and formatter: (timer, formatter)
281 """return a timer function and formatter: (timer, formatter)
256
282
@@ -341,7 +367,14 b' def gettimer(ui, opts=None):'
341 if not limits:
367 if not limits:
342 limits = DEFAULTLIMITS
368 limits = DEFAULTLIMITS
343
369
344 t = functools.partial(_timer, fm, displayall=displayall, limits=limits)
370 profiler = None
371 if profiling is not None:
372 if ui.configbool(b"perf", b"profile-benchmark", False):
373 profiler = profiling.profile(ui)
374
375 prerun = getint(ui, b"perf", b"pre-run", 0)
376 t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
377 prerun=prerun, profiler=profiler)
345 return t, fm
378 return t, fm
346
379
347 def stub_timer(fm, func, setup=None, title=None):
380 def stub_timer(fm, func, setup=None, title=None):
@@ -368,17 +401,25 b' DEFAULTLIMITS = ('
368 )
401 )
369
402
370 def _timer(fm, func, setup=None, title=None, displayall=False,
403 def _timer(fm, func, setup=None, title=None, displayall=False,
371 limits=DEFAULTLIMITS):
404 limits=DEFAULTLIMITS, prerun=0, profiler=None):
372 gc.collect()
405 gc.collect()
373 results = []
406 results = []
374 begin = util.timer()
407 begin = util.timer()
375 count = 0
408 count = 0
409 if profiler is None:
410 profiler = NOOPCTX
411 for i in range(prerun):
412 if setup is not None:
413 setup()
414 func()
376 keepgoing = True
415 keepgoing = True
377 while keepgoing:
416 while keepgoing:
378 if setup is not None:
417 if setup is not None:
379 setup()
418 setup()
380 with timeone() as item:
419 with profiler:
381 r = func()
420 with timeone() as item:
421 r = func()
422 profiler = NOOPCTX
382 count += 1
423 count += 1
383 results.append(item[0])
424 results.append(item[0])
384 cstop = util.timer()
425 cstop = util.timer()
@@ -922,17 +963,39 b' def perfdirstatewrite(ui, repo, **opts):'
922 timer(d)
963 timer(d)
923 fm.end()
964 fm.end()
924
965
966 def _getmergerevs(repo, opts):
967 """parse command argument to return rev involved in merge
968
969 input: options dictionnary with `rev`, `from` and `bse`
970 output: (localctx, otherctx, basectx)
971 """
972 if opts[b'from']:
973 fromrev = scmutil.revsingle(repo, opts[b'from'])
974 wctx = repo[fromrev]
975 else:
976 wctx = repo[None]
977 # we don't want working dir files to be stat'd in the benchmark, so
978 # prime that cache
979 wctx.dirty()
980 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
981 if opts[b'base']:
982 fromrev = scmutil.revsingle(repo, opts[b'base'])
983 ancestor = repo[fromrev]
984 else:
985 ancestor = wctx.ancestor(rctx)
986 return (wctx, rctx, ancestor)
987
925 @command(b'perfmergecalculate',
988 @command(b'perfmergecalculate',
926 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
989 [
927 def perfmergecalculate(ui, repo, rev, **opts):
990 (b'r', b'rev', b'.', b'rev to merge against'),
991 (b'', b'from', b'', b'rev to merge from'),
992 (b'', b'base', b'', b'the revision to use as base'),
993 ] + formatteropts)
994 def perfmergecalculate(ui, repo, **opts):
928 opts = _byteskwargs(opts)
995 opts = _byteskwargs(opts)
929 timer, fm = gettimer(ui, opts)
996 timer, fm = gettimer(ui, opts)
930 wctx = repo[None]
997
931 rctx = scmutil.revsingle(repo, rev, rev)
998 wctx, rctx, ancestor = _getmergerevs(repo, opts)
932 ancestor = wctx.ancestor(rctx)
933 # we don't want working dir files to be stat'd in the benchmark, so prime
934 # that cache
935 wctx.dirty()
936 def d():
999 def d():
937 # acceptremote is True because we don't want prompts in the middle of
1000 # acceptremote is True because we don't want prompts in the middle of
938 # our benchmark
1001 # our benchmark
@@ -941,6 +1004,24 b' def perfmergecalculate(ui, repo, rev, **'
941 timer(d)
1004 timer(d)
942 fm.end()
1005 fm.end()
943
1006
1007 @command(b'perfmergecopies',
1008 [
1009 (b'r', b'rev', b'.', b'rev to merge against'),
1010 (b'', b'from', b'', b'rev to merge from'),
1011 (b'', b'base', b'', b'the revision to use as base'),
1012 ] + formatteropts)
1013 def perfmergecopies(ui, repo, **opts):
1014 """measure runtime of `copies.mergecopies`"""
1015 opts = _byteskwargs(opts)
1016 timer, fm = gettimer(ui, opts)
1017 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1018 def d():
1019 # acceptremote is True because we don't want prompts in the middle of
1020 # our benchmark
1021 copies.mergecopies(repo, wctx, rctx, ancestor)
1022 timer(d)
1023 fm.end()
1024
944 @command(b'perfpathcopies', [], b"REV REV")
1025 @command(b'perfpathcopies', [], b"REV REV")
945 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1026 def perfpathcopies(ui, repo, rev1, rev2, **opts):
946 """benchmark the copy tracing logic"""
1027 """benchmark the copy tracing logic"""
@@ -1390,6 +1471,111 b' def perftemplating(ui, repo, testedtempl'
1390 timer(format)
1471 timer(format)
1391 fm.end()
1472 fm.end()
1392
1473
1474 @command(b'perfhelper-mergecopies', formatteropts +
1475 [
1476 (b'r', b'revs', [], b'restrict search to these revisions'),
1477 (b'', b'timing', False, b'provides extra data (costly)'),
1478 ])
1479 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1480 """find statistics about potential parameters for `perfmergecopies`
1481
1482 This command find (base, p1, p2) triplet relevant for copytracing
1483 benchmarking in the context of a merge. It reports values for some of the
1484 parameters that impact merge copy tracing time during merge.
1485
1486 If `--timing` is set, rename detection is run and the associated timing
1487 will be reported. The extra details come at the cost of slower command
1488 execution.
1489
1490 Since rename detection is only run once, other factors might easily
1491 affect the precision of the timing. However it should give a good
1492 approximation of which revision triplets are very costly.
1493 """
1494 opts = _byteskwargs(opts)
1495 fm = ui.formatter(b'perf', opts)
1496 dotiming = opts[b'timing']
1497
1498 output_template = [
1499 ("base", "%(base)12s"),
1500 ("p1", "%(p1.node)12s"),
1501 ("p2", "%(p2.node)12s"),
1502 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1503 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1504 ("p1.renames", "%(p1.renamedfiles)12d"),
1505 ("p1.time", "%(p1.time)12.3f"),
1506 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1507 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1508 ("p2.renames", "%(p2.renamedfiles)12d"),
1509 ("p2.time", "%(p2.time)12.3f"),
1510 ("renames", "%(nbrenamedfiles)12d"),
1511 ("total.time", "%(time)12.3f"),
1512 ]
1513 if not dotiming:
1514 output_template = [i for i in output_template
1515 if not ('time' in i[0] or 'renames' in i[0])]
1516 header_names = [h for (h, v) in output_template]
1517 output = ' '.join([v for (h, v) in output_template]) + '\n'
1518 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1519 fm.plain(header % tuple(header_names))
1520
1521 if not revs:
1522 revs = ['all()']
1523 revs = scmutil.revrange(repo, revs)
1524
1525 roi = repo.revs('merge() and %ld', revs)
1526 for r in roi:
1527 ctx = repo[r]
1528 p1 = ctx.p1()
1529 p2 = ctx.p2()
1530 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1531 for b in bases:
1532 b = repo[b]
1533 p1missing = copies._computeforwardmissing(b, p1)
1534 p2missing = copies._computeforwardmissing(b, p2)
1535 data = {
1536 b'base': b.hex(),
1537 b'p1.node': p1.hex(),
1538 b'p1.nbrevs': len(repo.revs('%d::%d', b.rev(), p1.rev())),
1539 b'p1.nbmissingfiles': len(p1missing),
1540 b'p2.node': p2.hex(),
1541 b'p2.nbrevs': len(repo.revs('%d::%d', b.rev(), p2.rev())),
1542 b'p2.nbmissingfiles': len(p2missing),
1543 }
1544 if dotiming:
1545 begin = util.timer()
1546 mergedata = copies.mergecopies(repo, p1, p2, b)
1547 end = util.timer()
1548 # not very stable timing since we did only one run
1549 data['time'] = end - begin
1550 # mergedata contains five dicts: "copy", "movewithdir",
1551 # "diverge", "renamedelete" and "dirmove".
1552 # The first 4 are about renamed file so lets count that.
1553 renames = len(mergedata[0])
1554 renames += len(mergedata[1])
1555 renames += len(mergedata[2])
1556 renames += len(mergedata[3])
1557 data['nbrenamedfiles'] = renames
1558 begin = util.timer()
1559 p1renames = copies.pathcopies(b, p1)
1560 end = util.timer()
1561 data['p1.time'] = end - begin
1562 begin = util.timer()
1563 p2renames = copies.pathcopies(b, p2)
1564 data['p2.time'] = end - begin
1565 end = util.timer()
1566 data['p1.renamedfiles'] = len(p1renames)
1567 data['p2.renamedfiles'] = len(p2renames)
1568 fm.startitem()
1569 fm.data(**data)
1570 # make node pretty for the human output
1571 out = data.copy()
1572 out['base'] = fm.hexfunc(b.node())
1573 out['p1.node'] = fm.hexfunc(p1.node())
1574 out['p2.node'] = fm.hexfunc(p2.node())
1575 fm.plain(output % out)
1576
1577 fm.end()
1578
1393 @command(b'perfhelper-pathcopies', formatteropts +
1579 @command(b'perfhelper-pathcopies', formatteropts +
1394 [
1580 [
1395 (b'r', b'revs', [], b'restrict search to these revisions'),
1581 (b'r', b'revs', [], b'restrict search to these revisions'),
@@ -1890,7 +2076,7 b' def perfrevlogrevisions(ui, repo, file_='
1890 @command(b'perfrevlogwrite', revlogopts + formatteropts +
2076 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1891 [(b's', b'startrev', 1000, b'revision to start writing at'),
2077 [(b's', b'startrev', 1000, b'revision to start writing at'),
1892 (b'', b'stoprev', -1, b'last revision to write'),
2078 (b'', b'stoprev', -1, b'last revision to write'),
1893 (b'', b'count', 3, b'last revision to write'),
2079 (b'', b'count', 3, b'number of passes to perform'),
1894 (b'', b'details', False, b'print timing for every revisions tested'),
2080 (b'', b'details', False, b'print timing for every revisions tested'),
1895 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2081 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1896 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2082 (b'', b'lazydeltabase', True, b'try the provided delta first'),
@@ -1907,6 +2093,16 b' def perfrevlogwrite(ui, repo, file_=None'
1907 (use a delta from the first parent otherwise)
2093 (use a delta from the first parent otherwise)
1908 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2094 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1909 * `storage`: add from the existing precomputed deltas
2095 * `storage`: add from the existing precomputed deltas
2096
2097 Note: This performance command measures performance in a custom way. As a
2098 result some of the global configuration of the 'perf' command does not
2099 apply to it:
2100
2101 * ``pre-run``: disabled
2102
2103 * ``profile-benchmark``: disabled
2104
2105 * ``run-limits``: disabled use --count instead
1910 """
2106 """
1911 opts = _byteskwargs(opts)
2107 opts = _byteskwargs(opts)
1912
2108
@@ -2081,6 +2277,10 b' def _temprevlog(ui, orig, truncaterev):'
2081
2277
2082 if orig._inline:
2278 if orig._inline:
2083 raise error.Abort('not supporting inline revlog (yet)')
2279 raise error.Abort('not supporting inline revlog (yet)')
2280 revlogkwargs = {}
2281 k = 'upperboundcomp'
2282 if util.safehasattr(orig, k):
2283 revlogkwargs[k] = getattr(orig, k)
2084
2284
2085 origindexpath = orig.opener.join(orig.indexfile)
2285 origindexpath = orig.opener.join(orig.indexfile)
2086 origdatapath = orig.opener.join(orig.datafile)
2286 origdatapath = orig.opener.join(orig.datafile)
@@ -2112,7 +2312,7 b' def _temprevlog(ui, orig, truncaterev):'
2112
2312
2113 dest = revlog.revlog(vfs,
2313 dest = revlog.revlog(vfs,
2114 indexfile=indexname,
2314 indexfile=indexname,
2115 datafile=dataname)
2315 datafile=dataname, **revlogkwargs)
2116 if dest._inline:
2316 if dest._inline:
2117 raise error.Abort('not supporting inline revlog (yet)')
2317 raise error.Abort('not supporting inline revlog (yet)')
2118 # make sure internals are initialized
2318 # make sure internals are initialized
@@ -4,6 +4,7 b' test-absorb-filefixupstate.py'
4 test-absorb-phase.t
4 test-absorb-phase.t
5 test-absorb-rename.t
5 test-absorb-rename.t
6 test-absorb-strip.t
6 test-absorb-strip.t
7 test-absorb-unfinished.t
7 test-absorb.t
8 test-absorb.t
8 test-acl.t
9 test-acl.t
9 test-add.t
10 test-add.t
@@ -35,6 +36,7 b' test-bisect2.t'
35 test-bisect3.t
36 test-bisect3.t
36 test-blackbox.t
37 test-blackbox.t
37 test-bookflow.t
38 test-bookflow.t
39 test-bookmarks-corner-case.t
38 test-bookmarks-current.t
40 test-bookmarks-current.t
39 test-bookmarks-merge.t
41 test-bookmarks-merge.t
40 test-bookmarks-pushpull.t
42 test-bookmarks-pushpull.t
@@ -104,6 +106,7 b' test-context.py'
104 test-contrib-check-code.t
106 test-contrib-check-code.t
105 test-contrib-check-commit.t
107 test-contrib-check-commit.t
106 test-contrib-dumprevlog.t
108 test-contrib-dumprevlog.t
109 test-contrib-emacs.t
107 test-contrib-perf.t
110 test-contrib-perf.t
108 test-contrib-relnotes.t
111 test-contrib-relnotes.t
109 test-contrib-testparseutil.t
112 test-contrib-testparseutil.t
@@ -126,6 +129,8 b' test-convert-splicemap.t'
126 test-convert-svn-sink.t
129 test-convert-svn-sink.t
127 test-convert-tagsbranch-topology.t
130 test-convert-tagsbranch-topology.t
128 test-convert.t
131 test-convert.t
132 test-copies-in-changeset.t
133 test-copies-unrelated.t
129 test-copies.t
134 test-copies.t
130 test-copy-move-merge.t
135 test-copy-move-merge.t
131 test-copy.t
136 test-copy.t
@@ -139,6 +144,7 b' test-debugindexdot.t'
139 test-debugrename.t
144 test-debugrename.t
140 test-default-push.t
145 test-default-push.t
141 test-demandimport.py
146 test-demandimport.py
147 test-devel-warnings.t
142 test-diff-antipatience.t
148 test-diff-antipatience.t
143 test-diff-binary-file.t
149 test-diff-binary-file.t
144 test-diff-change.t
150 test-diff-change.t
@@ -159,6 +165,7 b' test-directaccess.t'
159 test-dirstate-backup.t
165 test-dirstate-backup.t
160 test-dirstate-nonnormalset.t
166 test-dirstate-nonnormalset.t
161 test-dirstate-race.t
167 test-dirstate-race.t
168 test-dirstate-race2.t
162 test-dirstate.t
169 test-dirstate.t
163 test-dispatch.py
170 test-dispatch.py
164 test-dispatch.t
171 test-dispatch.t
@@ -230,6 +237,7 b' test-filecache.py'
230 test-filelog.py
237 test-filelog.py
231 test-fileset-generated.t
238 test-fileset-generated.t
232 test-fileset.t
239 test-fileset.t
240 test-fix-metadata.t
233 test-fix-topology.t
241 test-fix-topology.t
234 test-fix.t
242 test-fix.t
235 test-flagprocessor.t
243 test-flagprocessor.t
@@ -511,6 +519,7 b' test-pathconflicts-basic.t'
511 test-pathconflicts-merge.t
519 test-pathconflicts-merge.t
512 test-pathconflicts-update.t
520 test-pathconflicts-update.t
513 test-pathencode.py
521 test-pathencode.py
522 test-paths.t
514 test-pending.t
523 test-pending.t
515 test-permissions.t
524 test-permissions.t
516 test-phabricator.t
525 test-phabricator.t
@@ -597,6 +606,7 b' test-releasenotes-formatting.t'
597 test-releasenotes-merging.t
606 test-releasenotes-merging.t
598 test-releasenotes-parsing.t
607 test-releasenotes-parsing.t
599 test-relink.t
608 test-relink.t
609 test-remote-hidden.t
600 test-remotefilelog-bad-configs.t
610 test-remotefilelog-bad-configs.t
601 test-remotefilelog-bgprefetch.t
611 test-remotefilelog-bgprefetch.t
602 test-remotefilelog-blame.t
612 test-remotefilelog-blame.t
@@ -658,10 +668,12 b' test-rollback.t'
658 test-run-tests.py
668 test-run-tests.py
659 test-run-tests.t
669 test-run-tests.t
660 test-rust-ancestor.py
670 test-rust-ancestor.py
671 test-rust-discovery.py
661 test-schemes.t
672 test-schemes.t
662 test-serve.t
673 test-serve.t
663 test-server-view.t
674 test-server-view.t
664 test-setdiscovery.t
675 test-setdiscovery.t
676 test-share-bookmarks.t
665 test-share.t
677 test-share.t
666 test-shelve.t
678 test-shelve.t
667 test-shelve2.t
679 test-shelve2.t
@@ -38,12 +38,6 b' def rapply(f, xs):'
38 if ispy3:
38 if ispy3:
39 import builtins
39 import builtins
40
40
41 # TODO: .buffer might not exist if std streams were replaced; we'll need
42 # a silly wrapper to make a bytes stream backed by a unicode one.
43 stdin = sys.stdin.buffer
44 stdout = sys.stdout.buffer
45 stderr = sys.stderr.buffer
46
47 def bytestr(s):
41 def bytestr(s):
48 # tiny version of pycompat.bytestr
42 # tiny version of pycompat.bytestr
49 return s.encode('latin1')
43 return s.encode('latin1')
@@ -54,12 +48,8 b' if ispy3:'
54 return s.decode(u'latin-1')
48 return s.decode(u'latin-1')
55
49
56 def opentext(f):
50 def opentext(f):
57 return open(f, 'rb')
51 return open(f, 'r')
58 else:
52 else:
59 stdin = sys.stdin
60 stdout = sys.stdout
61 stderr = sys.stderr
62
63 bytestr = str
53 bytestr = str
64 sysstr = identity
54 sysstr = identity
65
55
@@ -71,11 +61,11 b' def b2s(x):'
71
61
72 def writeout(data):
62 def writeout(data):
73 # write "data" in BYTES into stdout
63 # write "data" in BYTES into stdout
74 stdout.write(data)
64 sys.stdout.write(data)
75
65
76 def writeerr(data):
66 def writeerr(data):
77 # write "data" in BYTES into stderr
67 # write "data" in BYTES into stderr
78 stderr.write(data)
68 sys.stderr.write(data)
79
69
80 ####################
70 ####################
81
71
@@ -164,14 +154,14 b' def embedded(basefile, lines, errors, ma'
164 ... self.matchfunc = matchfunc
154 ... self.matchfunc = matchfunc
165 ... def startsat(self, line):
155 ... def startsat(self, line):
166 ... return self.matchfunc(line)
156 ... return self.matchfunc(line)
167 >>> ambig1 = ambigmatcher(b'ambiguous #1',
157 >>> ambig1 = ambigmatcher('ambiguous #1',
168 ... lambda l: l.startswith(b' $ cat '))
158 ... lambda l: l.startswith(' $ cat '))
169 >>> ambig2 = ambigmatcher(b'ambiguous #2',
159 >>> ambig2 = ambigmatcher('ambiguous #2',
170 ... lambda l: l.endswith(b'<< EOF\\n'))
160 ... lambda l: l.endswith('<< EOF\\n'))
171 >>> lines = [b' $ cat > foo.py << EOF\\n']
161 >>> lines = [' $ cat > foo.py << EOF\\n']
172 >>> errors = []
162 >>> errors = []
173 >>> matchers = [ambig1, ambig2]
163 >>> matchers = [ambig1, ambig2]
174 >>> list(t for t in embedded(b'<dummy>', lines, errors, matchers))
164 >>> list(t for t in embedded('<dummy>', lines, errors, matchers))
175 []
165 []
176 >>> b2s(errors)
166 >>> b2s(errors)
177 ['<dummy>:1: ambiguous line for "ambiguous #1", "ambiguous #2"']
167 ['<dummy>:1: ambiguous line for "ambiguous #1", "ambiguous #2"']
@@ -181,21 +171,21 b' def embedded(basefile, lines, errors, ma'
181 ctx = filename = code = startline = None # for pyflakes
171 ctx = filename = code = startline = None # for pyflakes
182
172
183 for lineno, line in enumerate(lines, 1):
173 for lineno, line in enumerate(lines, 1):
184 if not line.endswith(b'\n'):
174 if not line.endswith('\n'):
185 line += b'\n' # to normalize EOF line
175 line += '\n' # to normalize EOF line
186 if matcher: # now, inside embedded code
176 if matcher: # now, inside embedded code
187 if matcher.endsat(ctx, line):
177 if matcher.endsat(ctx, line):
188 codeatend = matcher.codeatend(ctx, line)
178 codeatend = matcher.codeatend(ctx, line)
189 if codeatend is not None:
179 if codeatend is not None:
190 code.append(codeatend)
180 code.append(codeatend)
191 if not matcher.ignores(ctx):
181 if not matcher.ignores(ctx):
192 yield (filename, startline, lineno, b''.join(code))
182 yield (filename, startline, lineno, ''.join(code))
193 matcher = None
183 matcher = None
194 # DO NOT "continue", because line might start next fragment
184 # DO NOT "continue", because line might start next fragment
195 elif not matcher.isinside(ctx, line):
185 elif not matcher.isinside(ctx, line):
196 # this is an error of basefile
186 # this is an error of basefile
197 # (if matchers are implemented correctly)
187 # (if matchers are implemented correctly)
198 errors.append(b'%s:%d: unexpected line for "%s"'
188 errors.append('%s:%d: unexpected line for "%s"'
199 % (basefile, lineno, matcher.desc))
189 % (basefile, lineno, matcher.desc))
200 # stop extracting embedded code by current 'matcher',
190 # stop extracting embedded code by current 'matcher',
201 # because appearance of unexpected line might mean
191 # because appearance of unexpected line might mean
@@ -218,9 +208,9 b' def embedded(basefile, lines, errors, ma'
218 if matched:
208 if matched:
219 if len(matched) > 1:
209 if len(matched) > 1:
220 # this is an error of matchers, maybe
210 # this is an error of matchers, maybe
221 errors.append(b'%s:%d: ambiguous line for %s' %
211 errors.append('%s:%d: ambiguous line for %s' %
222 (basefile, lineno,
212 (basefile, lineno,
223 b', '.join([b'"%s"' % m.desc
213 ', '.join(['"%s"' % m.desc
224 for m, c in matched])))
214 for m, c in matched])))
225 # omit extracting embedded code, because choosing
215 # omit extracting embedded code, because choosing
226 # arbitrary matcher from matched ones might fail to
216 # arbitrary matcher from matched ones might fail to
@@ -239,20 +229,20 b' def embedded(basefile, lines, errors, ma'
239 if matcher:
229 if matcher:
240 # examine whether EOF ends embedded code, because embedded
230 # examine whether EOF ends embedded code, because embedded
241 # code isn't yet ended explicitly
231 # code isn't yet ended explicitly
242 if matcher.endsat(ctx, b'\n'):
232 if matcher.endsat(ctx, '\n'):
243 codeatend = matcher.codeatend(ctx, b'\n')
233 codeatend = matcher.codeatend(ctx, '\n')
244 if codeatend is not None:
234 if codeatend is not None:
245 code.append(codeatend)
235 code.append(codeatend)
246 if not matcher.ignores(ctx):
236 if not matcher.ignores(ctx):
247 yield (filename, startline, lineno + 1, b''.join(code))
237 yield (filename, startline, lineno + 1, ''.join(code))
248 else:
238 else:
249 # this is an error of basefile
239 # this is an error of basefile
250 # (if matchers are implemented correctly)
240 # (if matchers are implemented correctly)
251 errors.append(b'%s:%d: unexpected end of file for "%s"'
241 errors.append('%s:%d: unexpected end of file for "%s"'
252 % (basefile, lineno, matcher.desc))
242 % (basefile, lineno, matcher.desc))
253
243
254 # heredoc limit mark to ignore embedded code at check-code.py or so
244 # heredoc limit mark to ignore embedded code at check-code.py or so
255 heredocignorelimit = b'NO_CHECK_EOF'
245 heredocignorelimit = 'NO_CHECK_EOF'
256
246
257 # the pattern to match against cases below, and to return a limit mark
247 # the pattern to match against cases below, and to return a limit mark
258 # string as 'lname' group
248 # string as 'lname' group
@@ -260,47 +250,47 b" heredocignorelimit = b'NO_CHECK_EOF'"
260 # - << LIMITMARK
250 # - << LIMITMARK
261 # - << "LIMITMARK"
251 # - << "LIMITMARK"
262 # - << 'LIMITMARK'
252 # - << 'LIMITMARK'
263 heredoclimitpat = br'\s*<<\s*(?P<lquote>["\']?)(?P<limit>\w+)(?P=lquote)'
253 heredoclimitpat = r'\s*<<\s*(?P<lquote>["\']?)(?P<limit>\w+)(?P=lquote)'
264
254
265 class fileheredocmatcher(embeddedmatcher):
255 class fileheredocmatcher(embeddedmatcher):
266 """Detect "cat > FILE << LIMIT" style embedded code
256 """Detect "cat > FILE << LIMIT" style embedded code
267
257
268 >>> matcher = fileheredocmatcher(b'heredoc .py file', br'[^<]+\\.py')
258 >>> matcher = fileheredocmatcher('heredoc .py file', r'[^<]+\\.py')
269 >>> b2s(matcher.startsat(b' $ cat > file.py << EOF\\n'))
259 >>> b2s(matcher.startsat(' $ cat > file.py << EOF\\n'))
270 ('file.py', ' > EOF\\n')
260 ('file.py', ' > EOF\\n')
271 >>> b2s(matcher.startsat(b' $ cat >>file.py <<EOF\\n'))
261 >>> b2s(matcher.startsat(' $ cat >>file.py <<EOF\\n'))
272 ('file.py', ' > EOF\\n')
262 ('file.py', ' > EOF\\n')
273 >>> b2s(matcher.startsat(b' $ cat> \\x27any file.py\\x27<< "EOF"\\n'))
263 >>> b2s(matcher.startsat(' $ cat> \\x27any file.py\\x27<< "EOF"\\n'))
274 ('any file.py', ' > EOF\\n')
264 ('any file.py', ' > EOF\\n')
275 >>> b2s(matcher.startsat(b" $ cat > file.py << 'ANYLIMIT'\\n"))
265 >>> b2s(matcher.startsat(" $ cat > file.py << 'ANYLIMIT'\\n"))
276 ('file.py', ' > ANYLIMIT\\n')
266 ('file.py', ' > ANYLIMIT\\n')
277 >>> b2s(matcher.startsat(b' $ cat<<ANYLIMIT>"file.py"\\n'))
267 >>> b2s(matcher.startsat(' $ cat<<ANYLIMIT>"file.py"\\n'))
278 ('file.py', ' > ANYLIMIT\\n')
268 ('file.py', ' > ANYLIMIT\\n')
279 >>> start = b' $ cat > file.py << EOF\\n'
269 >>> start = ' $ cat > file.py << EOF\\n'
280 >>> ctx = matcher.startsat(start)
270 >>> ctx = matcher.startsat(start)
281 >>> matcher.codeatstart(ctx, start)
271 >>> matcher.codeatstart(ctx, start)
282 >>> b2s(matcher.filename(ctx))
272 >>> b2s(matcher.filename(ctx))
283 'file.py'
273 'file.py'
284 >>> matcher.ignores(ctx)
274 >>> matcher.ignores(ctx)
285 False
275 False
286 >>> inside = b' > foo = 1\\n'
276 >>> inside = ' > foo = 1\\n'
287 >>> matcher.endsat(ctx, inside)
277 >>> matcher.endsat(ctx, inside)
288 False
278 False
289 >>> matcher.isinside(ctx, inside)
279 >>> matcher.isinside(ctx, inside)
290 True
280 True
291 >>> b2s(matcher.codeinside(ctx, inside))
281 >>> b2s(matcher.codeinside(ctx, inside))
292 'foo = 1\\n'
282 'foo = 1\\n'
293 >>> end = b' > EOF\\n'
283 >>> end = ' > EOF\\n'
294 >>> matcher.endsat(ctx, end)
284 >>> matcher.endsat(ctx, end)
295 True
285 True
296 >>> matcher.codeatend(ctx, end)
286 >>> matcher.codeatend(ctx, end)
297 >>> matcher.endsat(ctx, b' > EOFEOF\\n')
287 >>> matcher.endsat(ctx, ' > EOFEOF\\n')
298 False
288 False
299 >>> ctx = matcher.startsat(b' $ cat > file.py << NO_CHECK_EOF\\n')
289 >>> ctx = matcher.startsat(' $ cat > file.py << NO_CHECK_EOF\\n')
300 >>> matcher.ignores(ctx)
290 >>> matcher.ignores(ctx)
301 True
291 True
302 """
292 """
303 _prefix = b' > '
293 _prefix = ' > '
304
294
305 def __init__(self, desc, namepat):
295 def __init__(self, desc, namepat):
306 super(fileheredocmatcher, self).__init__(desc)
296 super(fileheredocmatcher, self).__init__(desc)
@@ -312,13 +302,13 b' class fileheredocmatcher(embeddedmatcher'
312 # - > NAMEPAT
302 # - > NAMEPAT
313 # - > "NAMEPAT"
303 # - > "NAMEPAT"
314 # - > 'NAMEPAT'
304 # - > 'NAMEPAT'
315 namepat = (br'\s*>>?\s*(?P<nquote>["\']?)(?P<name>%s)(?P=nquote)'
305 namepat = (r'\s*>>?\s*(?P<nquote>["\']?)(?P<name>%s)(?P=nquote)'
316 % namepat)
306 % namepat)
317 self._fileres = [
307 self._fileres = [
318 # "cat > NAME << LIMIT" case
308 # "cat > NAME << LIMIT" case
319 re.compile(br' \$ \s*cat' + namepat + heredoclimitpat),
309 re.compile(r' \$ \s*cat' + namepat + heredoclimitpat),
320 # "cat << LIMIT > NAME" case
310 # "cat << LIMIT > NAME" case
321 re.compile(br' \$ \s*cat' + heredoclimitpat + namepat),
311 re.compile(r' \$ \s*cat' + heredoclimitpat + namepat),
322 ]
312 ]
323
313
324 def startsat(self, line):
314 def startsat(self, line):
@@ -327,7 +317,7 b' class fileheredocmatcher(embeddedmatcher'
327 matched = filere.match(line)
317 matched = filere.match(line)
328 if matched:
318 if matched:
329 return (matched.group('name'),
319 return (matched.group('name'),
330 b' > %s\n' % matched.group('limit'))
320 ' > %s\n' % matched.group('limit'))
331
321
332 def endsat(self, ctx, line):
322 def endsat(self, ctx, line):
333 return ctx[1] == line
323 return ctx[1] == line
@@ -336,7 +326,7 b' class fileheredocmatcher(embeddedmatcher'
336 return line.startswith(self._prefix)
326 return line.startswith(self._prefix)
337
327
338 def ignores(self, ctx):
328 def ignores(self, ctx):
339 return b' > %s\n' % heredocignorelimit == ctx[1]
329 return ' > %s\n' % heredocignorelimit == ctx[1]
340
330
341 def filename(self, ctx):
331 def filename(self, ctx):
342 return ctx[0]
332 return ctx[0]
@@ -357,10 +347,10 b' class pydoctestmatcher(embeddedmatcher):'
357 """Detect ">>> code" style embedded python code
347 """Detect ">>> code" style embedded python code
358
348
359 >>> matcher = pydoctestmatcher()
349 >>> matcher = pydoctestmatcher()
360 >>> startline = b' >>> foo = 1\\n'
350 >>> startline = ' >>> foo = 1\\n'
361 >>> matcher.startsat(startline)
351 >>> matcher.startsat(startline)
362 True
352 True
363 >>> matcher.startsat(b' ... foo = 1\\n')
353 >>> matcher.startsat(' ... foo = 1\\n')
364 False
354 False
365 >>> ctx = matcher.startsat(startline)
355 >>> ctx = matcher.startsat(startline)
366 >>> matcher.filename(ctx)
356 >>> matcher.filename(ctx)
@@ -368,45 +358,45 b' class pydoctestmatcher(embeddedmatcher):'
368 False
358 False
369 >>> b2s(matcher.codeatstart(ctx, startline))
359 >>> b2s(matcher.codeatstart(ctx, startline))
370 'foo = 1\\n'
360 'foo = 1\\n'
371 >>> inside = b' >>> foo = 1\\n'
361 >>> inside = ' >>> foo = 1\\n'
372 >>> matcher.endsat(ctx, inside)
362 >>> matcher.endsat(ctx, inside)
373 False
363 False
374 >>> matcher.isinside(ctx, inside)
364 >>> matcher.isinside(ctx, inside)
375 True
365 True
376 >>> b2s(matcher.codeinside(ctx, inside))
366 >>> b2s(matcher.codeinside(ctx, inside))
377 'foo = 1\\n'
367 'foo = 1\\n'
378 >>> inside = b' ... foo = 1\\n'
368 >>> inside = ' ... foo = 1\\n'
379 >>> matcher.endsat(ctx, inside)
369 >>> matcher.endsat(ctx, inside)
380 False
370 False
381 >>> matcher.isinside(ctx, inside)
371 >>> matcher.isinside(ctx, inside)
382 True
372 True
383 >>> b2s(matcher.codeinside(ctx, inside))
373 >>> b2s(matcher.codeinside(ctx, inside))
384 'foo = 1\\n'
374 'foo = 1\\n'
385 >>> inside = b' expected output\\n'
375 >>> inside = ' expected output\\n'
386 >>> matcher.endsat(ctx, inside)
376 >>> matcher.endsat(ctx, inside)
387 False
377 False
388 >>> matcher.isinside(ctx, inside)
378 >>> matcher.isinside(ctx, inside)
389 True
379 True
390 >>> b2s(matcher.codeinside(ctx, inside))
380 >>> b2s(matcher.codeinside(ctx, inside))
391 '\\n'
381 '\\n'
392 >>> inside = b' \\n'
382 >>> inside = ' \\n'
393 >>> matcher.endsat(ctx, inside)
383 >>> matcher.endsat(ctx, inside)
394 False
384 False
395 >>> matcher.isinside(ctx, inside)
385 >>> matcher.isinside(ctx, inside)
396 True
386 True
397 >>> b2s(matcher.codeinside(ctx, inside))
387 >>> b2s(matcher.codeinside(ctx, inside))
398 '\\n'
388 '\\n'
399 >>> end = b' $ foo bar\\n'
389 >>> end = ' $ foo bar\\n'
400 >>> matcher.endsat(ctx, end)
390 >>> matcher.endsat(ctx, end)
401 True
391 True
402 >>> matcher.codeatend(ctx, end)
392 >>> matcher.codeatend(ctx, end)
403 >>> end = b'\\n'
393 >>> end = '\\n'
404 >>> matcher.endsat(ctx, end)
394 >>> matcher.endsat(ctx, end)
405 True
395 True
406 >>> matcher.codeatend(ctx, end)
396 >>> matcher.codeatend(ctx, end)
407 """
397 """
408 _prefix = b' >>> '
398 _prefix = ' >>> '
409 _prefixre = re.compile(br' (>>>|\.\.\.) ')
399 _prefixre = re.compile(r' (>>>|\.\.\.) ')
410
400
411 # If a line matches against not _prefixre but _outputre, that line
401 # If a line matches against not _prefixre but _outputre, that line
412 # is "an expected output line" (= not a part of code fragment).
402 # is "an expected output line" (= not a part of code fragment).
@@ -416,10 +406,10 b' class pydoctestmatcher(embeddedmatcher):'
416 # run-tests.py. But "directive line inside inline python code"
406 # run-tests.py. But "directive line inside inline python code"
417 # should be rejected by Mercurial reviewers. Therefore, this
407 # should be rejected by Mercurial reviewers. Therefore, this
418 # regexp does not matche against such directive lines.
408 # regexp does not matche against such directive lines.
419 _outputre = re.compile(br' $| [^$]')
409 _outputre = re.compile(r' $| [^$]')
420
410
421 def __init__(self):
411 def __init__(self):
422 super(pydoctestmatcher, self).__init__(b"doctest style python code")
412 super(pydoctestmatcher, self).__init__("doctest style python code")
423
413
424 def startsat(self, line):
414 def startsat(self, line):
425 # ctx is "True"
415 # ctx is "True"
@@ -446,57 +436,57 b' class pydoctestmatcher(embeddedmatcher):'
446 def codeinside(self, ctx, line):
436 def codeinside(self, ctx, line):
447 if self._prefixre.match(line):
437 if self._prefixre.match(line):
448 return line[len(self._prefix):] # strip prefix ' >>> '/' ... '
438 return line[len(self._prefix):] # strip prefix ' >>> '/' ... '
449 return b'\n' # an expected output line is treated as an empty line
439 return '\n' # an expected output line is treated as an empty line
450
440
451 class pyheredocmatcher(embeddedmatcher):
441 class pyheredocmatcher(embeddedmatcher):
452 """Detect "python << LIMIT" style embedded python code
442 """Detect "python << LIMIT" style embedded python code
453
443
454 >>> matcher = pyheredocmatcher()
444 >>> matcher = pyheredocmatcher()
455 >>> b2s(matcher.startsat(b' $ python << EOF\\n'))
445 >>> b2s(matcher.startsat(' $ python << EOF\\n'))
456 ' > EOF\\n'
446 ' > EOF\\n'
457 >>> b2s(matcher.startsat(b' $ $PYTHON <<EOF\\n'))
447 >>> b2s(matcher.startsat(' $ $PYTHON <<EOF\\n'))
458 ' > EOF\\n'
448 ' > EOF\\n'
459 >>> b2s(matcher.startsat(b' $ "$PYTHON"<< "EOF"\\n'))
449 >>> b2s(matcher.startsat(' $ "$PYTHON"<< "EOF"\\n'))
460 ' > EOF\\n'
450 ' > EOF\\n'
461 >>> b2s(matcher.startsat(b" $ $PYTHON << 'ANYLIMIT'\\n"))
451 >>> b2s(matcher.startsat(" $ $PYTHON << 'ANYLIMIT'\\n"))
462 ' > ANYLIMIT\\n'
452 ' > ANYLIMIT\\n'
463 >>> matcher.startsat(b' $ "$PYTHON" < EOF\\n')
453 >>> matcher.startsat(' $ "$PYTHON" < EOF\\n')
464 >>> start = b' $ python << EOF\\n'
454 >>> start = ' $ python << EOF\\n'
465 >>> ctx = matcher.startsat(start)
455 >>> ctx = matcher.startsat(start)
466 >>> matcher.codeatstart(ctx, start)
456 >>> matcher.codeatstart(ctx, start)
467 >>> matcher.filename(ctx)
457 >>> matcher.filename(ctx)
468 >>> matcher.ignores(ctx)
458 >>> matcher.ignores(ctx)
469 False
459 False
470 >>> inside = b' > foo = 1\\n'
460 >>> inside = ' > foo = 1\\n'
471 >>> matcher.endsat(ctx, inside)
461 >>> matcher.endsat(ctx, inside)
472 False
462 False
473 >>> matcher.isinside(ctx, inside)
463 >>> matcher.isinside(ctx, inside)
474 True
464 True
475 >>> b2s(matcher.codeinside(ctx, inside))
465 >>> b2s(matcher.codeinside(ctx, inside))
476 'foo = 1\\n'
466 'foo = 1\\n'
477 >>> end = b' > EOF\\n'
467 >>> end = ' > EOF\\n'
478 >>> matcher.endsat(ctx, end)
468 >>> matcher.endsat(ctx, end)
479 True
469 True
480 >>> matcher.codeatend(ctx, end)
470 >>> matcher.codeatend(ctx, end)
481 >>> matcher.endsat(ctx, b' > EOFEOF\\n')
471 >>> matcher.endsat(ctx, ' > EOFEOF\\n')
482 False
472 False
483 >>> ctx = matcher.startsat(b' $ python << NO_CHECK_EOF\\n')
473 >>> ctx = matcher.startsat(' $ python << NO_CHECK_EOF\\n')
484 >>> matcher.ignores(ctx)
474 >>> matcher.ignores(ctx)
485 True
475 True
486 """
476 """
487 _prefix = b' > '
477 _prefix = ' > '
488
478
489 _startre = re.compile(br' \$ (\$PYTHON|"\$PYTHON"|python).*' +
479 _startre = re.compile(r' \$ (\$PYTHON|"\$PYTHON"|python).*' +
490 heredoclimitpat)
480 heredoclimitpat)
491
481
492 def __init__(self):
482 def __init__(self):
493 super(pyheredocmatcher, self).__init__(b"heredoc python invocation")
483 super(pyheredocmatcher, self).__init__("heredoc python invocation")
494
484
495 def startsat(self, line):
485 def startsat(self, line):
496 # ctx is END-LINE-OF-EMBEDDED-CODE
486 # ctx is END-LINE-OF-EMBEDDED-CODE
497 matched = self._startre.match(line)
487 matched = self._startre.match(line)
498 if matched:
488 if matched:
499 return b' > %s\n' % matched.group('limit')
489 return ' > %s\n' % matched.group('limit')
500
490
501 def endsat(self, ctx, line):
491 def endsat(self, ctx, line):
502 return ctx == line
492 return ctx == line
@@ -505,7 +495,7 b' class pyheredocmatcher(embeddedmatcher):'
505 return line.startswith(self._prefix)
495 return line.startswith(self._prefix)
506
496
507 def ignores(self, ctx):
497 def ignores(self, ctx):
508 return b' > %s\n' % heredocignorelimit == ctx
498 return ' > %s\n' % heredocignorelimit == ctx
509
499
510 def filename(self, ctx):
500 def filename(self, ctx):
511 return None # no filename
501 return None # no filename
@@ -524,7 +514,7 b' class pyheredocmatcher(embeddedmatcher):'
524 pyheredocmatcher(),
514 pyheredocmatcher(),
525 # use '[^<]+' instead of '\S+', in order to match against
515 # use '[^<]+' instead of '\S+', in order to match against
526 # paths including whitespaces
516 # paths including whitespaces
527 fileheredocmatcher(b'heredoc .py file', br'[^<]+\.py'),
517 fileheredocmatcher('heredoc .py file', r'[^<]+\.py'),
528 ]
518 ]
529
519
530 def pyembedded(basefile, lines, errors):
520 def pyembedded(basefile, lines, errors):
@@ -536,7 +526,7 b' def pyembedded(basefile, lines, errors):'
536 _shmatchers = [
526 _shmatchers = [
537 # use '[^<]+' instead of '\S+', in order to match against
527 # use '[^<]+' instead of '\S+', in order to match against
538 # paths including whitespaces
528 # paths including whitespaces
539 fileheredocmatcher(b'heredoc .sh file', br'[^<]+\.sh'),
529 fileheredocmatcher('heredoc .sh file', r'[^<]+\.sh'),
540 ]
530 ]
541
531
542 def shembedded(basefile, lines, errors):
532 def shembedded(basefile, lines, errors):
@@ -548,8 +538,8 b' def shembedded(basefile, lines, errors):'
548 _hgrcmatchers = [
538 _hgrcmatchers = [
549 # use '[^<]+' instead of '\S+', in order to match against
539 # use '[^<]+' instead of '\S+', in order to match against
550 # paths including whitespaces
540 # paths including whitespaces
551 fileheredocmatcher(b'heredoc hgrc file',
541 fileheredocmatcher('heredoc hgrc file',
552 br'(([^/<]+/)+hgrc|\$HGRCPATH|\${HGRCPATH})'),
542 r'(([^/<]+/)+hgrc|\$HGRCPATH|\${HGRCPATH})'),
553 ]
543 ]
554
544
555 def hgrcembedded(basefile, lines, errors):
545 def hgrcembedded(basefile, lines, errors):
@@ -565,14 +555,14 b' if __name__ == "__main__":'
565 errors = []
555 errors = []
566 for name, starts, ends, code in embeddedfunc(basefile, lines, errors):
556 for name, starts, ends, code in embeddedfunc(basefile, lines, errors):
567 if not name:
557 if not name:
568 name = b'<anonymous>'
558 name = '<anonymous>'
569 writeout(b"%s:%d: %s starts\n" % (basefile, starts, name))
559 writeout("%s:%d: %s starts\n" % (basefile, starts, name))
570 if opts.verbose and code:
560 if opts.verbose and code:
571 writeout(b" |%s\n" %
561 writeout(" |%s\n" %
572 b"\n |".join(l for l in code.splitlines()))
562 "\n |".join(l for l in code.splitlines()))
573 writeout(b"%s:%d: %s ends\n" % (basefile, ends, name))
563 writeout("%s:%d: %s ends\n" % (basefile, ends, name))
574 for e in errors:
564 for e in errors:
575 writeerr(b"%s\n" % e)
565 writeerr("%s\n" % e)
576 return len(errors)
566 return len(errors)
577
567
578 def applyembedded(args, embeddedfunc, opts):
568 def applyembedded(args, embeddedfunc, opts):
@@ -580,11 +570,11 b' if __name__ == "__main__":'
580 if args:
570 if args:
581 for f in args:
571 for f in args:
582 with opentext(f) as fp:
572 with opentext(f) as fp:
583 if showembedded(bytestr(f), fp, embeddedfunc, opts):
573 if showembedded(f, fp, embeddedfunc, opts):
584 ret = 1
574 ret = 1
585 else:
575 else:
586 lines = [l for l in stdin.readlines()]
576 lines = [l for l in sys.stdin.readlines()]
587 if showembedded(b'<stdin>', lines, embeddedfunc, opts):
577 if showembedded('<stdin>', lines, embeddedfunc, opts):
588 ret = 1
578 ret = 1
589 return ret
579 return ret
590
580
@@ -64,7 +64,6 b' editor = notepad'
64 ;relink =
64 ;relink =
65 ;schemes =
65 ;schemes =
66 ;share =
66 ;share =
67 ;shelve =
68 ;transplant =
67 ;transplant =
69 ;win32mbcs =
68 ;win32mbcs =
70 ;zeroconf =
69 ;zeroconf =
@@ -1,4 +1,4 b''
1 #compdef hg
1 #compdef hg chg
2
2
3 # Zsh completion script for mercurial. Rename this file to _hg and copy
3 # Zsh completion script for mercurial. Rename this file to _hg and copy
4 # it into your zsh function path (/usr/share/zsh/site-functions for
4 # it into your zsh function path (/usr/share/zsh/site-functions for
@@ -120,7 +120,7 b' def showdoc(ui):'
120
120
121 # print cmds
121 # print cmds
122 ui.write(minirst.section(_(b"Commands")))
122 ui.write(minirst.section(_(b"Commands")))
123 commandprinter(ui, table, minirst.subsection)
123 commandprinter(ui, table, minirst.subsection, minirst.subsubsection)
124
124
125 # print help topics
125 # print help topics
126 # The config help topic is included in the hgrc.5 man page.
126 # The config help topic is included in the hgrc.5 man page.
@@ -143,7 +143,8 b' def showdoc(ui):'
143 cmdtable = getattr(mod, 'cmdtable', None)
143 cmdtable = getattr(mod, 'cmdtable', None)
144 if cmdtable:
144 if cmdtable:
145 ui.write(minirst.subsubsection(_(b'Commands')))
145 ui.write(minirst.subsubsection(_(b'Commands')))
146 commandprinter(ui, cmdtable, minirst.subsubsubsection)
146 commandprinter(ui, cmdtable, minirst.subsubsubsection,
147 minirst.subsubsubsubsection)
147
148
148 def showtopic(ui, topic):
149 def showtopic(ui, topic):
149 extrahelptable = [
150 extrahelptable = [
@@ -177,7 +178,27 b' def helpprinter(ui, helptable, sectionfu'
177 ui.write(doc)
178 ui.write(doc)
178 ui.write(b"\n")
179 ui.write(b"\n")
179
180
180 def commandprinter(ui, cmdtable, sectionfunc):
181 def commandprinter(ui, cmdtable, sectionfunc, subsectionfunc):
182 """Render restructuredtext describing a list of commands and their
183 documentations, grouped by command category.
184
185 Args:
186 ui: UI object to write the output to
187 cmdtable: a dict that maps a string of the command name plus its aliases
188 (separated with pipes) to a 3-tuple of (the command's function, a list
189 of its option descriptions, and a string summarizing available
190 options). Example, with aliases added for demonstration purposes:
191
192 'phase|alias1|alias2': (
193 <function phase at 0x7f0816b05e60>,
194 [ ('p', 'public', False, 'set changeset phase to public'),
195 ...,
196 ('r', 'rev', [], 'target revision', 'REV')],
197 '[-p|-d|-s] [-f] [-r] [REV...]'
198 )
199 sectionfunc: minirst function to format command category headers
200 subsectionfunc: minirst function to format command headers
201 """
181 h = {}
202 h = {}
182 for c, attr in cmdtable.items():
203 for c, attr in cmdtable.items():
183 f = c.split(b"|")[0]
204 f = c.split(b"|")[0]
@@ -185,45 +206,76 b' def commandprinter(ui, cmdtable, section'
185 h[f] = c
206 h[f] = c
186 cmds = h.keys()
207 cmds = h.keys()
187
208
188 for f in sorted(cmds):
209 def helpcategory(cmd):
189 if f.startswith(b"debug"):
210 """Given a canonical command name from `cmds` (above), retrieve its
211 help category. If helpcategory is None, default to CATEGORY_NONE.
212 """
213 fullname = h[cmd]
214 details = cmdtable[fullname]
215 helpcategory = details[0].helpcategory
216 return helpcategory or help.registrar.command.CATEGORY_NONE
217
218 cmdsbycategory = {category: [] for category in help.CATEGORY_ORDER}
219 for cmd in cmds:
220 # If a command category wasn't registered, the command won't get
221 # rendered below, so we raise an AssertionError.
222 if helpcategory(cmd) not in cmdsbycategory:
223 raise AssertionError(
224 "The following command did not register its (category) in "
225 "help.CATEGORY_ORDER: %s (%s)" % (cmd, helpcategory(cmd)))
226 cmdsbycategory[helpcategory(cmd)].append(cmd)
227
228 # Print the help for each command. We present the commands grouped by
229 # category, and we use help.CATEGORY_ORDER as a guide for a helpful order
230 # in which to present the categories.
231 for category in help.CATEGORY_ORDER:
232 categorycmds = cmdsbycategory[category]
233 if not categorycmds:
234 # Skip empty categories
190 continue
235 continue
191 d = get_cmd(h[f], cmdtable)
236 # Print a section header for the category.
192 ui.write(sectionfunc(d[b'cmd']))
237 # For now, the category header is at the same level as the headers for
193 # short description
238 # the commands in the category; this is fixed in the next commit.
194 ui.write(d[b'desc'][0])
239 ui.write(sectionfunc(help.CATEGORY_NAMES[category]))
195 # synopsis
240 # Print each command in the category
196 ui.write(b"::\n\n")
241 for f in sorted(categorycmds):
197 synopsislines = d[b'synopsis'].splitlines()
242 if f.startswith(b"debug"):
198 for line in synopsislines:
243 continue
199 # some commands (such as rebase) have a multi-line
244 d = get_cmd(h[f], cmdtable)
245 ui.write(subsectionfunc(d[b'cmd']))
246 # short description
247 ui.write(d[b'desc'][0])
200 # synopsis
248 # synopsis
201 ui.write(b" %s\n" % line)
249 ui.write(b"::\n\n")
202 ui.write(b'\n')
250 synopsislines = d[b'synopsis'].splitlines()
203 # description
251 for line in synopsislines:
204 ui.write(b"%s\n\n" % d[b'desc'][1])
252 # some commands (such as rebase) have a multi-line
205 # options
253 # synopsis
206 opt_output = list(d[b'opts'])
254 ui.write(b" %s\n" % line)
207 if opt_output:
255 ui.write(b'\n')
208 opts_len = max([len(line[0]) for line in opt_output])
256 # description
209 ui.write(_(b"Options:\n\n"))
257 ui.write(b"%s\n\n" % d[b'desc'][1])
210 multioccur = False
258 # options
211 for optstr, desc in opt_output:
259 opt_output = list(d[b'opts'])
212 if desc:
260 if opt_output:
213 s = b"%-*s %s" % (opts_len, optstr, desc)
261 opts_len = max([len(line[0]) for line in opt_output])
214 else:
262 ui.write(_(b"Options:\n\n"))
215 s = optstr
263 multioccur = False
216 ui.write(b"%s\n" % s)
264 for optstr, desc in opt_output:
217 if optstr.endswith(b"[+]>"):
265 if desc:
218 multioccur = True
266 s = b"%-*s %s" % (opts_len, optstr, desc)
219 if multioccur:
267 else:
220 ui.write(_(b"\n[+] marked option can be specified"
268 s = optstr
221 b" multiple times\n"))
269 ui.write(b"%s\n" % s)
222 ui.write(b"\n")
270 if optstr.endswith(b"[+]>"):
223 # aliases
271 multioccur = True
224 if d[b'aliases']:
272 if multioccur:
225 ui.write(_(b" aliases: %s\n\n") % b" ".join(d[b'aliases']))
273 ui.write(_(b"\n[+] marked option can be specified"
226
274 b" multiple times\n"))
275 ui.write(b"\n")
276 # aliases
277 if d[b'aliases']:
278 ui.write(_(b" aliases: %s\n\n") % b" ".join(d[b'aliases']))
227
279
228 def allextensionnames():
280 def allextensionnames():
229 return set(extensions.enabled().keys()) | set(extensions.disabled().keys())
281 return set(extensions.enabled().keys()) | set(extensions.disabled().keys())
@@ -32,6 +32,8 b' import importlib.machinery'
32 import importlib.util
32 import importlib.util
33 import sys
33 import sys
34
34
35 from . import tracing
36
35 _deactivated = False
37 _deactivated = False
36
38
37 class _lazyloaderex(importlib.util.LazyLoader):
39 class _lazyloaderex(importlib.util.LazyLoader):
@@ -40,10 +42,11 b' class _lazyloaderex(importlib.util.LazyL'
40 """
42 """
41 def exec_module(self, module):
43 def exec_module(self, module):
42 """Make the module load lazily."""
44 """Make the module load lazily."""
43 if _deactivated or module.__name__ in ignores:
45 with tracing.log('demandimport %s', module):
44 self.loader.exec_module(module)
46 if _deactivated or module.__name__ in ignores:
45 else:
47 self.loader.exec_module(module)
46 super().exec_module(module)
48 else:
49 super().exec_module(module)
47
50
48 # This is 3.6+ because with Python 3.5 it isn't possible to lazily load
51 # This is 3.6+ because with Python 3.5 it isn't possible to lazily load
49 # extensions. See the discussion in https://bugs.python.org/issue26186 for more.
52 # extensions. See the discussion in https://bugs.python.org/issue26186 for more.
@@ -13,19 +13,23 b' import os'
13 _pipe = None
13 _pipe = None
14 _checked = False
14 _checked = False
15
15
16 @contextlib.contextmanager
16 def _isactive():
17 def log(whencefmt, *whenceargs):
18 global _pipe, _session, _checked
17 global _pipe, _session, _checked
19 if _pipe is None:
18 if _pipe is None:
20 if _checked:
19 if _checked:
21 yield
20 return False
22 return
23 _checked = True
21 _checked = True
24 if 'HGCATAPULTSERVERPIPE' not in os.environ:
22 if 'HGCATAPULTSERVERPIPE' not in os.environ:
25 yield
23 return False
26 return
27 _pipe = open(os.environ['HGCATAPULTSERVERPIPE'], 'w', 1)
24 _pipe = open(os.environ['HGCATAPULTSERVERPIPE'], 'w', 1)
28 _session = os.environ.get('HGCATAPULTSESSION', 'none')
25 _session = os.environ.get('HGCATAPULTSESSION', 'none')
26 return True
27
28 @contextlib.contextmanager
29 def log(whencefmt, *whenceargs):
30 if not _isactive():
31 yield
32 return
29 whence = whencefmt % whenceargs
33 whence = whencefmt % whenceargs
30 try:
34 try:
31 # Both writes to the pipe are wrapped in try/except to ignore
35 # Both writes to the pipe are wrapped in try/except to ignore
@@ -42,3 +46,13 b' def log(whencefmt, *whenceargs):'
42 _pipe.write('END %s %s\n' % (_session, whence))
46 _pipe.write('END %s %s\n' % (_session, whence))
43 except IOError:
47 except IOError:
44 pass
48 pass
49
50 def counter(label, amount, *labelargs):
51 if not _isactive():
52 return
53 l = label % labelargs
54 # See above in log() for why this is in a try/except.
55 try:
56 _pipe.write('COUNTER %s %d %s\n' % (_session, amount, l))
57 except IOError:
58 pass
@@ -871,7 +871,7 b' def _parsechunk(hunk):'
871 patchlines = mdiff.splitnewlines(buf.getvalue())
871 patchlines = mdiff.splitnewlines(buf.getvalue())
872 # hunk.prettystr() will update hunk.removed
872 # hunk.prettystr() will update hunk.removed
873 a2 = a1 + hunk.removed
873 a2 = a1 + hunk.removed
874 blines = [l[1:] for l in patchlines[1:] if l[0] != '-']
874 blines = [l[1:] for l in patchlines[1:] if not l.startswith('-')]
875 return path, (a1, a2, blines)
875 return path, (a1, a2, blines)
876
876
877 def overlaydiffcontext(ctx, chunks):
877 def overlaydiffcontext(ctx, chunks):
@@ -914,7 +914,10 b' def absorb(ui, repo, stack=None, targetc'
914 """
914 """
915 if stack is None:
915 if stack is None:
916 limit = ui.configint('absorb', 'max-stack-size')
916 limit = ui.configint('absorb', 'max-stack-size')
917 stack = getdraftstack(repo['.'], limit)
917 headctx = repo['.']
918 if len(headctx.parents()) > 1:
919 raise error.Abort(_('cannot absorb into a merge'))
920 stack = getdraftstack(headctx, limit)
918 if limit and len(stack) >= limit:
921 if limit and len(stack) >= limit:
919 ui.warn(_('absorb: only the recent %d changesets will '
922 ui.warn(_('absorb: only the recent %d changesets will '
920 'be analysed\n')
923 'be analysed\n')
@@ -932,7 +935,7 b' def absorb(ui, repo, stack=None, targetc'
932 if opts.get('interactive'):
935 if opts.get('interactive'):
933 diff = patch.diff(repo, stack[-1].node(), targetctx.node(), matcher)
936 diff = patch.diff(repo, stack[-1].node(), targetctx.node(), matcher)
934 origchunks = patch.parsepatch(diff)
937 origchunks = patch.parsepatch(diff)
935 chunks = cmdutil.recordfilter(ui, origchunks)[0]
938 chunks = cmdutil.recordfilter(ui, origchunks, matcher)[0]
936 targetctx = overlaydiffcontext(stack[-1], chunks)
939 targetctx = overlaydiffcontext(stack[-1], chunks)
937 fm = None
940 fm = None
938 if opts.get('print_changes') or not opts.get('apply_changes'):
941 if opts.get('print_changes') or not opts.get('apply_changes'):
@@ -81,10 +81,10 b' def _interestingfiles(repo, matcher):'
81
81
82 """
82 """
83 stat = repo.status(match=matcher)
83 stat = repo.status(match=matcher)
84 added = stat[1]
84 added = stat.added
85 removed = stat[2]
85 removed = stat.removed
86
86
87 copy = copies._forwardcopies(repo['.'], repo[None], matcher)
87 copy = copies.pathcopies(repo['.'], repo[None], matcher)
88 # remove the copy files for which we already have copy info
88 # remove the copy files for which we already have copy info
89 added = [f for f in added if f not in copy]
89 added = [f for f in added if f not in copy]
90
90
@@ -9,12 +9,14 b''
9 """log repository events to a blackbox for debugging
9 """log repository events to a blackbox for debugging
10
10
11 Logs event information to .hg/blackbox.log to help debug and diagnose problems.
11 Logs event information to .hg/blackbox.log to help debug and diagnose problems.
12 The events that get logged can be configured via the blackbox.track config key.
12 The events that get logged can be configured via the blackbox.track and
13 blackbox.ignore config keys.
13
14
14 Examples::
15 Examples::
15
16
16 [blackbox]
17 [blackbox]
17 track = *
18 track = *
19 ignore = pythonhook
18 # dirty is *EXPENSIVE* (slow);
20 # dirty is *EXPENSIVE* (slow);
19 # each log entry indicates `+` if the repository is dirty, like :hg:`id`.
21 # each log entry indicates `+` if the repository is dirty, like :hg:`id`.
20 dirty = True
22 dirty = True
@@ -84,6 +86,9 b" configitem('blackbox', 'maxfiles',"
84 configitem('blackbox', 'track',
86 configitem('blackbox', 'track',
85 default=lambda: ['*'],
87 default=lambda: ['*'],
86 )
88 )
89 configitem('blackbox', 'ignore',
90 default=lambda: ['chgserver', 'cmdserver', 'extension'],
91 )
87 configitem('blackbox', 'date-format',
92 configitem('blackbox', 'date-format',
88 default='%Y/%m/%d %H:%M:%S',
93 default='%Y/%m/%d %H:%M:%S',
89 )
94 )
@@ -94,12 +99,15 b' class blackboxlogger(object):'
94 def __init__(self, ui, repo):
99 def __init__(self, ui, repo):
95 self._repo = repo
100 self._repo = repo
96 self._trackedevents = set(ui.configlist('blackbox', 'track'))
101 self._trackedevents = set(ui.configlist('blackbox', 'track'))
102 self._ignoredevents = set(ui.configlist('blackbox', 'ignore'))
97 self._maxfiles = ui.configint('blackbox', 'maxfiles')
103 self._maxfiles = ui.configint('blackbox', 'maxfiles')
98 self._maxsize = ui.configbytes('blackbox', 'maxsize')
104 self._maxsize = ui.configbytes('blackbox', 'maxsize')
99 self._inlog = False
105 self._inlog = False
100
106
101 def tracked(self, event):
107 def tracked(self, event):
102 return b'*' in self._trackedevents or event in self._trackedevents
108 return ((b'*' in self._trackedevents
109 and event not in self._ignoredevents)
110 or event in self._trackedevents)
103
111
104 def log(self, ui, event, msg, opts):
112 def log(self, ui, event, msg, opts):
105 # self._log() -> ctx.dirty() may create new subrepo instance, which
113 # self._log() -> ctx.dirty() may create new subrepo instance, which
@@ -439,6 +439,11 b' def convert(ui, src, dest=None, revmapfi'
439 :convert.hg.sourcename: records the given string as a 'convert_source' extra
439 :convert.hg.sourcename: records the given string as a 'convert_source' extra
440 value on each commit made in the target repository. The default is None.
440 value on each commit made in the target repository. The default is None.
441
441
442 :convert.hg.preserve-hash: only works with mercurial sources. Make convert
443 prevent performance improvement to the list of modified files in commits
444 when such an improvement would cause the hash of a commit to change.
445 The default is False.
446
442 All Destinations
447 All Destinations
443 ################
448 ################
444
449
@@ -114,7 +114,7 b" SKIPREV = 'SKIP'"
114 class commit(object):
114 class commit(object):
115 def __init__(self, author, date, desc, parents, branch=None, rev=None,
115 def __init__(self, author, date, desc, parents, branch=None, rev=None,
116 extra=None, sortkey=None, saverev=True, phase=phases.draft,
116 extra=None, sortkey=None, saverev=True, phase=phases.draft,
117 optparents=None):
117 optparents=None, ctx=None):
118 self.author = author or 'unknown'
118 self.author = author or 'unknown'
119 self.date = date or '0 0'
119 self.date = date or '0 0'
120 self.desc = desc
120 self.desc = desc
@@ -126,6 +126,7 b' class commit(object):'
126 self.sortkey = sortkey
126 self.sortkey = sortkey
127 self.saverev = saverev
127 self.saverev = saverev
128 self.phase = phase
128 self.phase = phase
129 self.ctx = ctx # for hg to hg conversions
129
130
130 class converter_source(object):
131 class converter_source(object):
131 """Conversion source interface"""
132 """Conversion source interface"""
@@ -339,7 +339,11 b' class mercurial_sink(common.converter_si'
339 phases.phasenames[commit.phase], 'convert')
339 phases.phasenames[commit.phase], 'convert')
340
340
341 with self.repo.transaction("convert") as tr:
341 with self.repo.transaction("convert") as tr:
342 node = nodemod.hex(self.repo.commitctx(ctx))
342 if self.repo.ui.config('convert', 'hg.preserve-hash'):
343 origctx = commit.ctx
344 else:
345 origctx = None
346 node = nodemod.hex(self.repo.commitctx(ctx, origctx=origctx))
343
347
344 # If the node value has changed, but the phase is lower than
348 # If the node value has changed, but the phase is lower than
345 # draft, set it back to draft since it hasn't been exposed
349 # draft, set it back to draft since it hasn't been exposed
@@ -591,7 +595,8 b' class mercurial_source(common.converter_'
591 extra=ctx.extra(),
595 extra=ctx.extra(),
592 sortkey=ctx.rev(),
596 sortkey=ctx.rev(),
593 saverev=self.saverev,
597 saverev=self.saverev,
594 phase=ctx.phase())
598 phase=ctx.phase(),
599 ctx=ctx)
595
600
596 def numcommits(self):
601 def numcommits(self):
597 return len(self.repo)
602 return len(self.repo)
@@ -284,9 +284,9 b' class monotone_source(common.converter_s'
284 # d2 => d3
284 # d2 => d3
285 ignoremove[tofile] = 1
285 ignoremove[tofile] = 1
286 for tofile, fromfile in renamed.items():
286 for tofile, fromfile in renamed.items():
287 self.ui.debug (_("copying file in renamed directory "
287 self.ui.debug(
288 "from '%s' to '%s'")
288 "copying file in renamed directory from '%s' to '%s'"
289 % (fromfile, tofile), '\n')
289 % (fromfile, tofile), '\n')
290 files[tofile] = rev
290 files[tofile] = rev
291 copies[tofile] = fromfile
291 copies[tofile] = fromfile
292 for fromfile in renamed.values():
292 for fromfile in renamed.values():
@@ -370,4 +370,3 b' class monotone_source(common.converter_s'
370 self.mtnwritefp = None
370 self.mtnwritefp = None
371 self.mtnreadfp.close()
371 self.mtnreadfp.close()
372 self.mtnreadfp = None
372 self.mtnreadfp = None
373
@@ -1333,7 +1333,7 b' class svn_sink(converter_sink, commandli'
1333 rev = self.commit_re.search(output).group(1)
1333 rev = self.commit_re.search(output).group(1)
1334 except AttributeError:
1334 except AttributeError:
1335 if not files:
1335 if not files:
1336 return parents[0] if parents else None
1336 return parents[0] if parents else 'None'
1337 self.ui.warn(_('unexpected svn output:\n'))
1337 self.ui.warn(_('unexpected svn output:\n'))
1338 self.ui.warn(output)
1338 self.ui.warn(output)
1339 raise error.Abort(_('unable to cope with svn output'))
1339 raise error.Abort(_('unable to cope with svn output'))
@@ -400,7 +400,7 b' def reposetup(ui, repo):'
400 if wlock is not None:
400 if wlock is not None:
401 wlock.release()
401 wlock.release()
402
402
403 def commitctx(self, ctx, error=False):
403 def commitctx(self, ctx, error=False, origctx=None):
404 for f in sorted(ctx.added() + ctx.modified()):
404 for f in sorted(ctx.added() + ctx.modified()):
405 if not self._eolmatch(f):
405 if not self._eolmatch(f):
406 continue
406 continue
@@ -416,6 +416,6 b' def reposetup(ui, repo):'
416 if inconsistenteol(data):
416 if inconsistenteol(data):
417 raise errormod.Abort(_("inconsistent newline style "
417 raise errormod.Abort(_("inconsistent newline style "
418 "in %s\n") % f)
418 "in %s\n") % f)
419 return super(eolrepo, self).commitctx(ctx, error)
419 return super(eolrepo, self).commitctx(ctx, error, origctx)
420 repo.__class__ = eolrepo
420 repo.__class__ = eolrepo
421 repo._hgcleardirstate()
421 repo._hgcleardirstate()
@@ -8,6 +8,7 b''
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import bisect
10 import bisect
11 import io
11 import os
12 import os
12 import struct
13 import struct
13
14
@@ -246,7 +247,7 b' def getlastnode(path):'
246 hsh = None
247 hsh = None
247 try:
248 try:
248 with open(path, 'rb') as f:
249 with open(path, 'rb') as f:
249 f.seek(-_hshlen, 2)
250 f.seek(-_hshlen, io.SEEK_END)
250 if f.tell() > len(revmap.HEADER):
251 if f.tell() > len(revmap.HEADER):
251 hsh = f.read(_hshlen)
252 hsh = f.read(_hshlen)
252 except IOError:
253 except IOError:
@@ -72,12 +72,43 b" in a text file by ensuring that 'sort' r"
72 To account for changes made by each tool, the line numbers used for incremental
72 To account for changes made by each tool, the line numbers used for incremental
73 formatting are recomputed before executing the next tool. So, each tool may see
73 formatting are recomputed before executing the next tool. So, each tool may see
74 different values for the arguments added by the :linerange suboption.
74 different values for the arguments added by the :linerange suboption.
75
76 Each fixer tool is allowed to return some metadata in addition to the fixed file
77 content. The metadata must be placed before the file content on stdout,
78 separated from the file content by a zero byte. The metadata is parsed as a JSON
79 value (so, it should be UTF-8 encoded and contain no zero bytes). A fixer tool
80 is expected to produce this metadata encoding if and only if the :metadata
81 suboption is true::
82
83 [fix]
84 tool:command = tool --prepend-json-metadata
85 tool:metadata = true
86
87 The metadata values are passed to hooks, which can be used to print summaries or
88 perform other post-fixing work. The supported hooks are::
89
90 "postfixfile"
91 Run once for each file in each revision where any fixer tools made changes
92 to the file content. Provides "$HG_REV" and "$HG_PATH" to identify the file,
93 and "$HG_METADATA" with a map of fixer names to metadata values from fixer
94 tools that affected the file. Fixer tools that didn't affect the file have a
95 valueof None. Only fixer tools that executed are present in the metadata.
96
97 "postfix"
98 Run once after all files and revisions have been handled. Provides
99 "$HG_REPLACEMENTS" with information about what revisions were created and
100 made obsolete. Provides a boolean "$HG_WDIRWRITTEN" to indicate whether any
101 files in the working copy were updated. Provides a list "$HG_METADATA"
102 mapping fixer tool names to lists of metadata values returned from
103 executions that modified a file. This aggregates the same metadata
104 previously passed to the "postfixfile" hook.
75 """
105 """
76
106
77 from __future__ import absolute_import
107 from __future__ import absolute_import
78
108
79 import collections
109 import collections
80 import itertools
110 import itertools
111 import json
81 import os
112 import os
82 import re
113 import re
83 import subprocess
114 import subprocess
@@ -117,13 +148,14 b' command = registrar.command(cmdtable)'
117 configtable = {}
148 configtable = {}
118 configitem = registrar.configitem(configtable)
149 configitem = registrar.configitem(configtable)
119
150
120 # Register the suboptions allowed for each configured fixer.
151 # Register the suboptions allowed for each configured fixer, and default values.
121 FIXER_ATTRS = {
152 FIXER_ATTRS = {
122 'command': None,
153 'command': None,
123 'linerange': None,
154 'linerange': None,
124 'fileset': None,
155 'fileset': None,
125 'pattern': None,
156 'pattern': None,
126 'priority': 0,
157 'priority': 0,
158 'metadata': False,
127 }
159 }
128
160
129 for key, default in FIXER_ATTRS.items():
161 for key, default in FIXER_ATTRS.items():
@@ -201,10 +233,12 b' def fix(ui, repo, *pats, **opts):'
201 for rev, path in items:
233 for rev, path in items:
202 ctx = repo[rev]
234 ctx = repo[rev]
203 olddata = ctx[path].data()
235 olddata = ctx[path].data()
204 newdata = fixfile(ui, opts, fixers, ctx, path, basectxs[rev])
236 metadata, newdata = fixfile(ui, opts, fixers, ctx, path,
237 basectxs[rev])
205 # Don't waste memory/time passing unchanged content back, but
238 # Don't waste memory/time passing unchanged content back, but
206 # produce one result per item either way.
239 # produce one result per item either way.
207 yield (rev, path, newdata if newdata != olddata else None)
240 yield (rev, path, metadata,
241 newdata if newdata != olddata else None)
208 results = worker.worker(ui, 1.0, getfixes, tuple(), workqueue,
242 results = worker.worker(ui, 1.0, getfixes, tuple(), workqueue,
209 threadsafe=False)
243 threadsafe=False)
210
244
@@ -215,15 +249,25 b' def fix(ui, repo, *pats, **opts):'
215 # the tests deterministic. It might also be considered a feature since
249 # the tests deterministic. It might also be considered a feature since
216 # it makes the results more easily reproducible.
250 # it makes the results more easily reproducible.
217 filedata = collections.defaultdict(dict)
251 filedata = collections.defaultdict(dict)
252 aggregatemetadata = collections.defaultdict(list)
218 replacements = {}
253 replacements = {}
219 wdirwritten = False
254 wdirwritten = False
220 commitorder = sorted(revstofix, reverse=True)
255 commitorder = sorted(revstofix, reverse=True)
221 with ui.makeprogress(topic=_('fixing'), unit=_('files'),
256 with ui.makeprogress(topic=_('fixing'), unit=_('files'),
222 total=sum(numitems.values())) as progress:
257 total=sum(numitems.values())) as progress:
223 for rev, path, newdata in results:
258 for rev, path, filerevmetadata, newdata in results:
224 progress.increment(item=path)
259 progress.increment(item=path)
260 for fixername, fixermetadata in filerevmetadata.items():
261 aggregatemetadata[fixername].append(fixermetadata)
225 if newdata is not None:
262 if newdata is not None:
226 filedata[rev][path] = newdata
263 filedata[rev][path] = newdata
264 hookargs = {
265 'rev': rev,
266 'path': path,
267 'metadata': filerevmetadata,
268 }
269 repo.hook('postfixfile', throw=False,
270 **pycompat.strkwargs(hookargs))
227 numitems[rev] -= 1
271 numitems[rev] -= 1
228 # Apply the fixes for this and any other revisions that are
272 # Apply the fixes for this and any other revisions that are
229 # ready and sitting at the front of the queue. Using a loop here
273 # ready and sitting at the front of the queue. Using a loop here
@@ -240,6 +284,12 b' def fix(ui, repo, *pats, **opts):'
240 del filedata[rev]
284 del filedata[rev]
241
285
242 cleanup(repo, replacements, wdirwritten)
286 cleanup(repo, replacements, wdirwritten)
287 hookargs = {
288 'replacements': replacements,
289 'wdirwritten': wdirwritten,
290 'metadata': aggregatemetadata,
291 }
292 repo.hook('postfix', throw=True, **pycompat.strkwargs(hookargs))
243
293
244 def cleanup(repo, replacements, wdirwritten):
294 def cleanup(repo, replacements, wdirwritten):
245 """Calls scmutil.cleanupnodes() with the given replacements.
295 """Calls scmutil.cleanupnodes() with the given replacements.
@@ -491,6 +541,7 b' def fixfile(ui, opts, fixers, fixctx, pa'
491 A fixer tool's stdout will become the file's new content if and only if it
541 A fixer tool's stdout will become the file's new content if and only if it
492 exits with code zero.
542 exits with code zero.
493 """
543 """
544 metadata = {}
494 newdata = fixctx[path].data()
545 newdata = fixctx[path].data()
495 for fixername, fixer in fixers.iteritems():
546 for fixername, fixer in fixers.iteritems():
496 if fixer.affects(opts, fixctx, path):
547 if fixer.affects(opts, fixctx, path):
@@ -506,9 +557,20 b' def fixfile(ui, opts, fixers, fixctx, pa'
506 stdin=subprocess.PIPE,
557 stdin=subprocess.PIPE,
507 stdout=subprocess.PIPE,
558 stdout=subprocess.PIPE,
508 stderr=subprocess.PIPE)
559 stderr=subprocess.PIPE)
509 newerdata, stderr = proc.communicate(newdata)
560 stdout, stderr = proc.communicate(newdata)
510 if stderr:
561 if stderr:
511 showstderr(ui, fixctx.rev(), fixername, stderr)
562 showstderr(ui, fixctx.rev(), fixername, stderr)
563 newerdata = stdout
564 if fixer.shouldoutputmetadata():
565 try:
566 metadatajson, newerdata = stdout.split('\0', 1)
567 metadata[fixername] = json.loads(metadatajson)
568 except ValueError:
569 ui.warn(_('ignored invalid output from fixer tool: %s\n') %
570 (fixername,))
571 continue
572 else:
573 metadata[fixername] = None
512 if proc.returncode == 0:
574 if proc.returncode == 0:
513 newdata = newerdata
575 newdata = newerdata
514 else:
576 else:
@@ -519,7 +581,7 b' def fixfile(ui, opts, fixers, fixctx, pa'
519 ui, _('no fixes will be applied'),
581 ui, _('no fixes will be applied'),
520 hint=_('use --config fix.failure=continue to apply any '
582 hint=_('use --config fix.failure=continue to apply any '
521 'successful fixes anyway'))
583 'successful fixes anyway'))
522 return newdata
584 return metadata, newdata
523
585
524 def showstderr(ui, rev, fixername, stderr):
586 def showstderr(ui, rev, fixername, stderr):
525 """Writes the lines of the stderr string as warnings on the ui
587 """Writes the lines of the stderr string as warnings on the ui
@@ -667,6 +729,10 b' class Fixer(object):'
667 """Should this fixer run on the file at the given path and context?"""
729 """Should this fixer run on the file at the given path and context?"""
668 return scmutil.match(fixctx, [self._pattern], opts)(path)
730 return scmutil.match(fixctx, [self._pattern], opts)(path)
669
731
732 def shouldoutputmetadata(self):
733 """Should the stdout of this fixer start with JSON and a null byte?"""
734 return self._metadata
735
670 def command(self, ui, path, rangesfn):
736 def command(self, ui, path, rangesfn):
671 """A shell command to use to invoke this fixer on the given file/lines
737 """A shell command to use to invoke this fixer on the given file/lines
672
738
@@ -192,12 +192,15 b' def am(ui, repo, *args, **kwargs):'
192 def apply(ui, repo, *args, **kwargs):
192 def apply(ui, repo, *args, **kwargs):
193 cmdoptions = [
193 cmdoptions = [
194 ('p', 'p', int, ''),
194 ('p', 'p', int, ''),
195 ('', 'directory', '', ''),
195 ]
196 ]
196 args, opts = parseoptions(ui, cmdoptions, args)
197 args, opts = parseoptions(ui, cmdoptions, args)
197
198
198 cmd = Command('import --no-commit')
199 cmd = Command('import --no-commit')
199 if (opts.get('p')):
200 if (opts.get('p')):
200 cmd['-p'] = opts.get('p')
201 cmd['-p'] = opts.get('p')
202 if opts.get('directory'):
203 cmd['--prefix'] = opts.get('directory')
201 cmd.extend(args)
204 cmd.extend(args)
202
205
203 ui.status((bytes(cmd)), "\n")
206 ui.status((bytes(cmd)), "\n")
@@ -681,6 +684,7 b' def mergetool(ui, repo, *args, **kwargs)'
681 def mv(ui, repo, *args, **kwargs):
684 def mv(ui, repo, *args, **kwargs):
682 cmdoptions = [
685 cmdoptions = [
683 ('f', 'force', None, ''),
686 ('f', 'force', None, ''),
687 ('n', 'dry-run', None, ''),
684 ]
688 ]
685 args, opts = parseoptions(ui, cmdoptions, args)
689 args, opts = parseoptions(ui, cmdoptions, args)
686
690
@@ -689,6 +693,8 b' def mv(ui, repo, *args, **kwargs):'
689
693
690 if opts.get('force'):
694 if opts.get('force'):
691 cmd['-f'] = None
695 cmd['-f'] = None
696 if opts.get('dry_run'):
697 cmd['-n'] = None
692
698
693 ui.status((bytes(cmd)), "\n")
699 ui.status((bytes(cmd)), "\n")
694
700
@@ -917,6 +923,7 b' def show(ui, repo, *args, **kwargs):'
917
923
918 def stash(ui, repo, *args, **kwargs):
924 def stash(ui, repo, *args, **kwargs):
919 cmdoptions = [
925 cmdoptions = [
926 ('p', 'patch', None, ''),
920 ]
927 ]
921 args, opts = parseoptions(ui, cmdoptions, args)
928 args, opts = parseoptions(ui, cmdoptions, args)
922
929
@@ -925,6 +932,17 b' def stash(ui, repo, *args, **kwargs):'
925
932
926 if action == 'list':
933 if action == 'list':
927 cmd['-l'] = None
934 cmd['-l'] = None
935 if opts.get('patch'):
936 cmd['-p'] = None
937 elif action == 'show':
938 if opts.get('patch'):
939 cmd['-p'] = None
940 else:
941 cmd['--stat'] = None
942 if len(args) > 1:
943 cmd.append(args[1])
944 elif action == 'clear':
945 cmd['--cleanup'] = None
928 elif action == 'drop':
946 elif action == 'drop':
929 cmd['-d'] = None
947 cmd['-d'] = None
930 if len(args) > 1:
948 if len(args) > 1:
@@ -937,10 +955,9 b' def stash(ui, repo, *args, **kwargs):'
937 cmd.append(args[1])
955 cmd.append(args[1])
938 if action == 'apply':
956 if action == 'apply':
939 cmd['--keep'] = None
957 cmd['--keep'] = None
940 elif (action == 'branch' or action == 'show' or action == 'clear'
958 elif action == 'branch' or action == 'create':
941 or action == 'create'):
942 ui.status(_("note: Mercurial doesn't have equivalents to the "
959 ui.status(_("note: Mercurial doesn't have equivalents to the "
943 "git stash branch, show, clear, or create actions\n\n"))
960 "git stash branch or create actions\n\n"))
944 return
961 return
945 else:
962 else:
946 if len(args) > 0:
963 if len(args) > 0:
@@ -49,6 +49,11 b" configitem('gpg', '.*',"
49
49
50 # Custom help category
50 # Custom help category
51 _HELP_CATEGORY = 'gpg'
51 _HELP_CATEGORY = 'gpg'
52 help.CATEGORY_ORDER.insert(
53 help.CATEGORY_ORDER.index(registrar.command.CATEGORY_HELP),
54 _HELP_CATEGORY
55 )
56 help.CATEGORY_NAMES[_HELP_CATEGORY] = 'Signing changes (GPG)'
52
57
53 class gpg(object):
58 class gpg(object):
54 def __init__(self, path, key=None):
59 def __init__(self, path, key=None):
@@ -1079,6 +1079,8 b' def movecursor(state, oldpos, newpos):'
1079 def changemode(state, mode):
1079 def changemode(state, mode):
1080 curmode, _ = state['mode']
1080 curmode, _ = state['mode']
1081 state['mode'] = (mode, curmode)
1081 state['mode'] = (mode, curmode)
1082 if mode == MODE_PATCH:
1083 state['modes'][MODE_PATCH]['patchcontents'] = patchcontents(state)
1082
1084
1083 def makeselection(state, pos):
1085 def makeselection(state, pos):
1084 state['selected'] = pos
1086 state['selected'] = pos
@@ -1134,7 +1136,7 b' def changeview(state, delta, unit):'
1134 if mode != MODE_PATCH:
1136 if mode != MODE_PATCH:
1135 return
1137 return
1136 mode_state = state['modes'][mode]
1138 mode_state = state['modes'][mode]
1137 num_lines = len(patchcontents(state))
1139 num_lines = len(mode_state['patchcontents'])
1138 page_height = state['page_height']
1140 page_height = state['page_height']
1139 unit = page_height if unit == 'page' else 1
1141 unit = page_height if unit == 'page' else 1
1140 num_pages = 1 + (num_lines - 1) / page_height
1142 num_pages = 1 + (num_lines - 1) / page_height
@@ -1227,15 +1229,25 b' def addln(win, y, x, line, color=None):'
1227 else:
1229 else:
1228 win.addstr(y, x, line)
1230 win.addstr(y, x, line)
1229
1231
1232 def _trunc_head(line, n):
1233 if len(line) <= n:
1234 return line
1235 return '> ' + line[-(n - 2):]
1236 def _trunc_tail(line, n):
1237 if len(line) <= n:
1238 return line
1239 return line[:n - 2] + ' >'
1240
1230 def patchcontents(state):
1241 def patchcontents(state):
1231 repo = state['repo']
1242 repo = state['repo']
1232 rule = state['rules'][state['pos']]
1243 rule = state['rules'][state['pos']]
1233 repo.ui.verbose = True
1234 displayer = logcmdutil.changesetdisplayer(repo.ui, repo, {
1244 displayer = logcmdutil.changesetdisplayer(repo.ui, repo, {
1235 "patch": True, "template": "status"
1245 "patch": True, "template": "status"
1236 }, buffered=True)
1246 }, buffered=True)
1237 displayer.show(rule.ctx)
1247 overrides = {('ui', 'verbose'): True}
1238 displayer.close()
1248 with repo.ui.configoverride(overrides, source='histedit'):
1249 displayer.show(rule.ctx)
1250 displayer.close()
1239 return displayer.hunk[rule.ctx.rev()].splitlines()
1251 return displayer.hunk[rule.ctx.rev()].splitlines()
1240
1252
1241 def _chisteditmain(repo, rules, stdscr):
1253 def _chisteditmain(repo, rules, stdscr):
@@ -1283,11 +1295,23 b' def _chisteditmain(repo, rules, stdscr):'
1283 line = "bookmark: {0}".format(' '.join(bms))
1295 line = "bookmark: {0}".format(' '.join(bms))
1284 win.addstr(3, 1, line[:length])
1296 win.addstr(3, 1, line[:length])
1285
1297
1286 line = "files: {0}".format(','.join(ctx.files()))
1298 line = "summary: {0}".format(ctx.description().splitlines()[0])
1287 win.addstr(4, 1, line[:length])
1299 win.addstr(4, 1, line[:length])
1288
1300
1289 line = "summary: {0}".format(ctx.description().splitlines()[0])
1301 line = "files: "
1290 win.addstr(5, 1, line[:length])
1302 win.addstr(5, 1, line)
1303 fnx = 1 + len(line)
1304 fnmaxx = length - fnx + 1
1305 y = 5
1306 fnmaxn = maxy - (1 + y) - 1
1307 files = ctx.files()
1308 for i, line1 in enumerate(files):
1309 if len(files) > fnmaxn and i == fnmaxn - 1:
1310 win.addstr(y, fnx, _trunc_tail(','.join(files[i:]), fnmaxx))
1311 y = y + 1
1312 break
1313 win.addstr(y, fnx, _trunc_head(line1, fnmaxx))
1314 y = y + 1
1291
1315
1292 conflicts = rule.conflicts
1316 conflicts = rule.conflicts
1293 if len(conflicts) > 0:
1317 if len(conflicts) > 0:
@@ -1296,7 +1320,7 b' def _chisteditmain(repo, rules, stdscr):'
1296 else:
1320 else:
1297 conflictstr = 'no overlap'
1321 conflictstr = 'no overlap'
1298
1322
1299 win.addstr(6, 1, conflictstr[:length])
1323 win.addstr(y, 1, conflictstr[:length])
1300 win.noutrefresh()
1324 win.noutrefresh()
1301
1325
1302 def helplines(mode):
1326 def helplines(mode):
@@ -1372,15 +1396,16 b' pgup/K: move patch up, pgdn/J: move patc'
1372
1396
1373 def renderpatch(win, state):
1397 def renderpatch(win, state):
1374 start = state['modes'][MODE_PATCH]['line_offset']
1398 start = state['modes'][MODE_PATCH]['line_offset']
1375 renderstring(win, state, patchcontents(state)[start:], diffcolors=True)
1399 content = state['modes'][MODE_PATCH]['patchcontents']
1400 renderstring(win, state, content[start:], diffcolors=True)
1376
1401
1377 def layout(mode):
1402 def layout(mode):
1378 maxy, maxx = stdscr.getmaxyx()
1403 maxy, maxx = stdscr.getmaxyx()
1379 helplen = len(helplines(mode))
1404 helplen = len(helplines(mode))
1380 return {
1405 return {
1381 'commit': (8, maxx),
1406 'commit': (12, maxx),
1382 'help': (helplen, maxx),
1407 'help': (helplen, maxx),
1383 'main': (maxy - helplen - 8, maxx),
1408 'main': (maxy - helplen - 12, maxx),
1384 }
1409 }
1385
1410
1386 def drawvertwin(size, y, x):
1411 def drawvertwin(size, y, x):
@@ -1894,6 +1919,14 b' def _aborthistedit(ui, repo, state, noba'
1894 finally:
1919 finally:
1895 state.clear()
1920 state.clear()
1896
1921
1922 def hgaborthistedit(ui, repo):
1923 state = histeditstate(repo)
1924 nobackup = not ui.configbool('rewrite', 'backup-bundle')
1925 with repo.wlock() as wlock, repo.lock() as lock:
1926 state.wlock = wlock
1927 state.lock = lock
1928 _aborthistedit(ui, repo, state, nobackup=nobackup)
1929
1897 def _edithisteditplan(ui, repo, state, rules):
1930 def _edithisteditplan(ui, repo, state, rules):
1898 state.read()
1931 state.read()
1899 if not rules:
1932 if not rules:
@@ -2288,8 +2321,6 b' def summaryhook(ui, repo):'
2288
2321
2289 def extsetup(ui):
2322 def extsetup(ui):
2290 cmdutil.summaryhooks.add('histedit', summaryhook)
2323 cmdutil.summaryhooks.add('histedit', summaryhook)
2291 cmdutil.unfinishedstates.append(
2324 statemod.addunfinished('histedit', fname='histedit-state', allowcommit=True,
2292 ['histedit-state', False, True, _('histedit in progress'),
2325 continueflag=True, abortfunc=hgaborthistedit)
2293 _("use 'hg histedit --continue' or 'hg histedit --abort'")])
2326
2294 cmdutil.afterresolvedstates.append(
2295 ['histedit-state', _('hg histedit --continue')])
@@ -785,8 +785,8 b' def reposetup(ui, repo):'
785 finally:
785 finally:
786 del self.commitctx
786 del self.commitctx
787
787
788 def kwcommitctx(self, ctx, error=False):
788 def kwcommitctx(self, ctx, error=False, origctx=None):
789 n = super(kwrepo, self).commitctx(ctx, error)
789 n = super(kwrepo, self).commitctx(ctx, error, origctx)
790 # no lock needed, only called from repo.commit() which already locks
790 # no lock needed, only called from repo.commit() which already locks
791 if not kwt.postcommit:
791 if not kwt.postcommit:
792 restrict = kwt.restrict
792 restrict = kwt.restrict
@@ -515,7 +515,7 b' def overridecalculateupdates(origfn, rep'
515 return actions, diverge, renamedelete
515 return actions, diverge, renamedelete
516
516
517 @eh.wrapfunction(merge, 'recordupdates')
517 @eh.wrapfunction(merge, 'recordupdates')
518 def mergerecordupdates(orig, repo, actions, branchmerge):
518 def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
519 if 'lfmr' in actions:
519 if 'lfmr' in actions:
520 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
520 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
521 for lfile, args, msg in actions['lfmr']:
521 for lfile, args, msg in actions['lfmr']:
@@ -526,7 +526,7 b' def mergerecordupdates(orig, repo, actio'
526 lfdirstate.add(lfile)
526 lfdirstate.add(lfile)
527 lfdirstate.write()
527 lfdirstate.write()
528
528
529 return orig(repo, actions, branchmerge)
529 return orig(repo, actions, branchmerge, getfiledata)
530
530
531 # Override filemerge to prompt the user about how they wish to merge
531 # Override filemerge to prompt the user about how they wish to merge
532 # largefiles. This will handle identical edits without prompting the user.
532 # largefiles. This will handle identical edits without prompting the user.
@@ -545,7 +545,8 b' def overridefilemerge(origfn, premerge, '
545 (dhash == ahash or
545 (dhash == ahash or
546 repo.ui.promptchoice(
546 repo.ui.promptchoice(
547 _('largefile %s has a merge conflict\nancestor was %s\n'
547 _('largefile %s has a merge conflict\nancestor was %s\n'
548 'keep (l)ocal %s or\ntake (o)ther %s?'
548 'you can keep (l)ocal %s or take (o)ther %s.\n'
549 'what do you want to do?'
549 '$$ &Local $$ &Other') %
550 '$$ &Local $$ &Other') %
550 (lfutil.splitstandin(orig), ahash, dhash, ohash),
551 (lfutil.splitstandin(orig), ahash, dhash, ohash),
551 0) == 1)):
552 0) == 1)):
@@ -227,9 +227,9 b' def _reposetup(ui, repo):'
227
227
228 class lfsrepo(repo.__class__):
228 class lfsrepo(repo.__class__):
229 @localrepo.unfilteredmethod
229 @localrepo.unfilteredmethod
230 def commitctx(self, ctx, error=False):
230 def commitctx(self, ctx, error=False, origctx=None):
231 repo.svfs.options['lfstrack'] = _trackedmatcher(self)
231 repo.svfs.options['lfstrack'] = _trackedmatcher(self)
232 return super(lfsrepo, self).commitctx(ctx, error)
232 return super(lfsrepo, self).commitctx(ctx, error, origctx=origctx)
233
233
234 repo.__class__ = lfsrepo
234 repo.__class__ = lfsrepo
235
235
@@ -144,9 +144,21 b' except KeyError:'
144 stripext = extensions.load(dummyui(), 'strip', '')
144 stripext = extensions.load(dummyui(), 'strip', '')
145
145
146 strip = stripext.strip
146 strip = stripext.strip
147 checksubstate = stripext.checksubstate
147
148 checklocalchanges = stripext.checklocalchanges
148 def checksubstate(repo, baserev=None):
149
149 '''return list of subrepos at a different revision than substate.
150 Abort if any subrepos have uncommitted changes.'''
151 inclsubs = []
152 wctx = repo[None]
153 if baserev:
154 bctx = repo[baserev]
155 else:
156 bctx = wctx.p1()
157 for s in sorted(wctx.substate):
158 wctx.sub(s).bailifchanged(True)
159 if s not in bctx.substate or bctx.sub(s).dirty():
160 inclsubs.append(s)
161 return inclsubs
150
162
151 # Patch names looks like unix-file names.
163 # Patch names looks like unix-file names.
152 # They must be joinable with queue directory and result in the patch path.
164 # They must be joinable with queue directory and result in the patch path.
@@ -1149,7 +1161,19 b' class queue(object):'
1149 # plain versions for i18n tool to detect them
1161 # plain versions for i18n tool to detect them
1150 _("local changes found, qrefresh first")
1162 _("local changes found, qrefresh first")
1151 _("local changed subrepos found, qrefresh first")
1163 _("local changed subrepos found, qrefresh first")
1152 return checklocalchanges(repo, force, excsuffix)
1164
1165 s = repo.status()
1166 if not force:
1167 cmdutil.checkunfinished(repo)
1168 if s.modified or s.added or s.removed or s.deleted:
1169 _("local changes found") # i18n tool detection
1170 raise error.Abort(_("local changes found" + excsuffix))
1171 if checksubstate(repo):
1172 _("local changed subrepos found") # i18n tool detection
1173 raise error.Abort(_("local changed subrepos found" + excsuffix))
1174 else:
1175 cmdutil.checkunfinished(repo, skipmerge=True)
1176 return s
1153
1177
1154 _reserved = ('series', 'status', 'guards', '.', '..')
1178 _reserved = ('series', 'status', 'guards', '.', '..')
1155 def checkreservedname(self, name):
1179 def checkreservedname(self, name):
@@ -51,21 +51,25 b' def getbundlechangegrouppart_narrow(bund'
51 assert repo.ui.configbool('experimental', 'narrowservebrokenellipses')
51 assert repo.ui.configbool('experimental', 'narrowservebrokenellipses')
52
52
53 cgversions = b2caps.get('changegroup')
53 cgversions = b2caps.get('changegroup')
54 if cgversions: # 3.1 and 3.2 ship with an empty value
54 cgversions = [v for v in cgversions
55 cgversions = [v for v in cgversions
55 if v in changegroup.supportedoutgoingversions(repo)]
56 if v in changegroup.supportedoutgoingversions(repo)]
56 if not cgversions:
57 if not cgversions:
57 raise ValueError(_('no common changegroup version'))
58 raise ValueError(_('no common changegroup version'))
58 version = max(cgversions)
59 version = max(cgversions)
60 else:
61 raise ValueError(_("server does not advertise changegroup version,"
62 " can't negotiate support for ellipsis nodes"))
63
59
64 include = sorted(filter(bool, kwargs.get(r'includepats', [])))
60 oldinclude = sorted(filter(bool, kwargs.get(r'oldincludepats', [])))
65 exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
61 oldexclude = sorted(filter(bool, kwargs.get(r'oldexcludepats', [])))
66 newmatch = narrowspec.match(repo.root, include=include, exclude=exclude)
62 newinclude = sorted(filter(bool, kwargs.get(r'includepats', [])))
63 newexclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
64 known = {bin(n) for n in kwargs.get(r'known', [])}
65 generateellipsesbundle2(bundler, repo, oldinclude, oldexclude, newinclude,
66 newexclude, version, common, heads, known,
67 kwargs.get(r'depth', None))
67
68
68 depth = kwargs.get(r'depth', None)
69 def generateellipsesbundle2(bundler, repo, oldinclude, oldexclude, newinclude,
70 newexclude, version, common, heads, known, depth):
71 newmatch = narrowspec.match(repo.root, include=newinclude,
72 exclude=newexclude)
69 if depth is not None:
73 if depth is not None:
70 depth = int(depth)
74 depth = int(depth)
71 if depth < 1:
75 if depth < 1:
@@ -73,10 +77,7 b' def getbundlechangegrouppart_narrow(bund'
73
77
74 heads = set(heads or repo.heads())
78 heads = set(heads or repo.heads())
75 common = set(common or [nullid])
79 common = set(common or [nullid])
76 oldinclude = sorted(filter(bool, kwargs.get(r'oldincludepats', [])))
80 if known and (oldinclude != newinclude or oldexclude != newexclude):
77 oldexclude = sorted(filter(bool, kwargs.get(r'oldexcludepats', [])))
78 known = {bin(n) for n in kwargs.get(r'known', [])}
79 if known and (oldinclude != include or oldexclude != exclude):
80 # Steps:
81 # Steps:
81 # 1. Send kill for "$known & ::common"
82 # 1. Send kill for "$known & ::common"
82 #
83 #
@@ -146,7 +146,7 b' def pullbundle2extraprepare(orig, pullop'
146 kwargs['excludepats'] = exclude
146 kwargs['excludepats'] = exclude
147 # calculate known nodes only in ellipses cases because in non-ellipses cases
147 # calculate known nodes only in ellipses cases because in non-ellipses cases
148 # we have all the nodes
148 # we have all the nodes
149 if wireprototypes.ELLIPSESCAP in pullop.remote.capabilities():
149 if wireprototypes.ELLIPSESCAP1 in pullop.remote.capabilities():
150 kwargs['known'] = [node.hex(ctx.node()) for ctx in
150 kwargs['known'] = [node.hex(ctx.node()) for ctx in
151 repo.set('::%ln', pullop.common)
151 repo.set('::%ln', pullop.common)
152 if ctx.node() != node.nullid]
152 if ctx.node() != node.nullid]
@@ -216,7 +216,7 b' def _narrow(ui, repo, remote, commoninc,'
216 todelete.append(f)
216 todelete.append(f)
217 elif f.startswith('meta/'):
217 elif f.startswith('meta/'):
218 dir = f[5:-13]
218 dir = f[5:-13]
219 dirs = ['.'] + sorted(util.dirs({dir})) + [dir]
219 dirs = sorted(util.dirs({dir})) + [dir]
220 include = True
220 include = True
221 for d in dirs:
221 for d in dirs:
222 visit = newmatch.visitdir(d)
222 visit = newmatch.visitdir(d)
@@ -253,7 +253,14 b' def _widen(ui, repo, remote, commoninc, '
253 # then send that information to server whether we want ellipses or not.
253 # then send that information to server whether we want ellipses or not.
254 # Theoretically a non-ellipses repo should be able to use narrow
254 # Theoretically a non-ellipses repo should be able to use narrow
255 # functionality from an ellipses enabled server
255 # functionality from an ellipses enabled server
256 ellipsesremote = wireprototypes.ELLIPSESCAP in remote.capabilities()
256 remotecap = remote.capabilities()
257 ellipsesremote = any(cap in remotecap
258 for cap in wireprototypes.SUPPORTED_ELLIPSESCAP)
259
260 # check whether we are talking to a server which supports old version of
261 # ellipses capabilities
262 isoldellipses = (ellipsesremote and wireprototypes.ELLIPSESCAP1 in
263 remotecap and wireprototypes.ELLIPSESCAP not in remotecap)
257
264
258 def pullbundle2extraprepare_widen(orig, pullop, kwargs):
265 def pullbundle2extraprepare_widen(orig, pullop, kwargs):
259 orig(pullop, kwargs)
266 orig(pullop, kwargs)
@@ -271,19 +278,22 b' def _widen(ui, repo, remote, commoninc, '
271 # silence the devel-warning of applying an empty changegroup
278 # silence the devel-warning of applying an empty changegroup
272 overrides = {('devel', 'all-warnings'): False}
279 overrides = {('devel', 'all-warnings'): False}
273
280
281 common = commoninc[0]
274 with ui.uninterruptible():
282 with ui.uninterruptible():
275 common = commoninc[0]
276 if ellipsesremote:
283 if ellipsesremote:
277 ds = repo.dirstate
284 ds = repo.dirstate
278 p1, p2 = ds.p1(), ds.p2()
285 p1, p2 = ds.p1(), ds.p2()
279 with ds.parentchange():
286 with ds.parentchange():
280 ds.setparents(node.nullid, node.nullid)
287 ds.setparents(node.nullid, node.nullid)
288 if isoldellipses:
281 with wrappedextraprepare:
289 with wrappedextraprepare:
282 with repo.ui.configoverride(overrides, 'widen'):
290 exchange.pull(repo, remote, heads=common)
283 exchange.pull(repo, remote, heads=common)
284 with ds.parentchange():
285 ds.setparents(p1, p2)
286 else:
291 else:
292 known = []
293 if ellipsesremote:
294 known = [node.hex(ctx.node()) for ctx in
295 repo.set('::%ln', common)
296 if ctx.node() != node.nullid]
287 with remote.commandexecutor() as e:
297 with remote.commandexecutor() as e:
288 bundle = e.callcommand('narrow_widen', {
298 bundle = e.callcommand('narrow_widen', {
289 'oldincludes': oldincludes,
299 'oldincludes': oldincludes,
@@ -292,15 +302,20 b' def _widen(ui, repo, remote, commoninc, '
292 'newexcludes': newexcludes,
302 'newexcludes': newexcludes,
293 'cgversion': '03',
303 'cgversion': '03',
294 'commonheads': common,
304 'commonheads': common,
295 'known': [],
305 'known': known,
296 'ellipses': False,
306 'ellipses': ellipsesremote,
297 }).result()
307 }).result()
298
308
299 with repo.transaction('widening') as tr:
309 trmanager = exchange.transactionmanager(repo, 'widen', remote.url())
300 with repo.ui.configoverride(overrides, 'widen'):
310 with trmanager, repo.ui.configoverride(overrides, 'widen'):
301 tgetter = lambda: tr
311 op = bundle2.bundleoperation(repo, trmanager.transaction,
302 bundle2.processbundle(repo, bundle,
312 source='widen')
303 transactiongetter=tgetter)
313 # TODO: we should catch error.Abort here
314 bundle2.processbundle(repo, bundle, op=op)
315
316 if ellipsesremote:
317 with ds.parentchange():
318 ds.setparents(p1, p2)
304
319
305 with repo.transaction('widening'):
320 with repo.transaction('widening'):
306 repo.setnewnarrowpats()
321 repo.setnewnarrowpats()
@@ -16,21 +16,21 b' def wrapdirstate(repo, dirstate):'
16 """Add narrow spec dirstate ignore, block changes outside narrow spec."""
16 """Add narrow spec dirstate ignore, block changes outside narrow spec."""
17
17
18 def _editfunc(fn):
18 def _editfunc(fn):
19 def _wrapper(self, *args):
19 def _wrapper(self, *args, **kwargs):
20 narrowmatch = repo.narrowmatch()
20 narrowmatch = repo.narrowmatch()
21 for f in args:
21 for f in args:
22 if f is not None and not narrowmatch(f) and f not in self:
22 if f is not None and not narrowmatch(f) and f not in self:
23 raise error.Abort(_("cannot track '%s' - it is outside " +
23 raise error.Abort(_("cannot track '%s' - it is outside " +
24 "the narrow clone") % f)
24 "the narrow clone") % f)
25 return fn(self, *args)
25 return fn(self, *args, **kwargs)
26 return _wrapper
26 return _wrapper
27
27
28 class narrowdirstate(dirstate.__class__):
28 class narrowdirstate(dirstate.__class__):
29 # Prevent adding/editing/copying/deleting files that are outside the
29 # Prevent adding/editing/copying/deleting files that are outside the
30 # sparse checkout
30 # sparse checkout
31 @_editfunc
31 @_editfunc
32 def normal(self, *args):
32 def normal(self, *args, **kwargs):
33 return super(narrowdirstate, self).normal(*args)
33 return super(narrowdirstate, self).normal(*args, **kwargs)
34
34
35 @_editfunc
35 @_editfunc
36 def add(self, *args):
36 def add(self, *args):
@@ -37,7 +37,7 b' def outsidenarrow(context, mapping):'
37 repo = context.resource(mapping, 'repo')
37 repo = context.resource(mapping, 'repo')
38 ctx = context.resource(mapping, 'ctx')
38 ctx = context.resource(mapping, 'ctx')
39 m = repo.narrowmatch()
39 m = repo.narrowmatch()
40 if not m.always():
40 if ctx.files() and not m.always():
41 if not any(m(f) for f in ctx.files()):
41 if not any(m(f) for f in ctx.files()):
42 return 'outsidenarrow'
42 return 'outsidenarrow'
43 return ''
43 return ''
@@ -13,12 +13,15 b' from mercurial import ('
13 extensions,
13 extensions,
14 hg,
14 hg,
15 narrowspec,
15 narrowspec,
16 node as nodemod,
16 pycompat,
17 pycompat,
17 wireprototypes,
18 wireprototypes,
18 wireprotov1peer,
19 wireprotov1peer,
19 wireprotov1server,
20 wireprotov1server,
20 )
21 )
21
22
23 from . import narrowbundle2
24
22 def uisetup():
25 def uisetup():
23 wireprotov1peer.wirepeer.narrow_widen = peernarrowwiden
26 wireprotov1peer.wirepeer.narrow_widen = peernarrowwiden
24
27
@@ -69,21 +72,26 b' def narrow_widen(repo, proto, oldinclude'
69 narrowspec.validatepatterns(set(newexcludes))
72 narrowspec.validatepatterns(set(newexcludes))
70
73
71 common = wireprototypes.decodelist(commonheads)
74 common = wireprototypes.decodelist(commonheads)
72 known = None
75 known = wireprototypes.decodelist(known)
73 if known:
76 known = {nodemod.bin(n) for n in known}
74 known = wireprototypes.decodelist(known)
75 if ellipses == '0':
77 if ellipses == '0':
76 ellipses = False
78 ellipses = False
77 else:
79 else:
78 ellipses = bool(ellipses)
80 ellipses = bool(ellipses)
79 cgversion = cgversion
81 cgversion = cgversion
80 newmatch = narrowspec.match(repo.root, include=newincludes,
81 exclude=newexcludes)
82 oldmatch = narrowspec.match(repo.root, include=oldincludes,
83 exclude=oldexcludes)
84
82
85 bundler = bundle2.widen_bundle(repo, oldmatch, newmatch, common, known,
83 bundler = bundle2.bundle20(repo.ui)
86 cgversion, ellipses)
84 if not ellipses:
85 newmatch = narrowspec.match(repo.root, include=newincludes,
86 exclude=newexcludes)
87 oldmatch = narrowspec.match(repo.root, include=oldincludes,
88 exclude=oldexcludes)
89 bundle2.widen_bundle(bundler, repo, oldmatch, newmatch, common,
90 known, cgversion, ellipses)
91 else:
92 narrowbundle2.generateellipsesbundle2(bundler, repo, oldincludes,
93 oldexcludes, newincludes, newexcludes, cgversion, common,
94 list(common), known, None)
87 except error.Abort as exc:
95 except error.Abort as exc:
88 bundler = bundle2.bundle20(repo.ui)
96 bundler = bundle2.bundle20(repo.ui)
89 manargs = [('message', pycompat.bytestr(exc))]
97 manargs = [('message', pycompat.bytestr(exc))]
@@ -65,6 +65,7 b' from mercurial import ('
65 scmutil,
65 scmutil,
66 smartset,
66 smartset,
67 tags,
67 tags,
68 templatefilters,
68 templateutil,
69 templateutil,
69 url as urlmod,
70 url as urlmod,
70 util,
71 util,
@@ -124,8 +125,28 b' colortable = {'
124 )),
125 )),
125 ]
126 ]
126
127
127 def vcrcommand(name, flags, spec, helpcategory=None):
128 def vcrcommand(name, flags, spec, helpcategory=None, optionalrepo=False):
128 fullflags = flags + _VCR_FLAGS
129 fullflags = flags + _VCR_FLAGS
130 def hgmatcher(r1, r2):
131 if r1.uri != r2.uri or r1.method != r2.method:
132 return False
133 r1params = r1.body.split(b'&')
134 r2params = r2.body.split(b'&')
135 return set(r1params) == set(r2params)
136
137 def sanitiserequest(request):
138 request.body = re.sub(
139 r'cli-[a-z0-9]+',
140 r'cli-hahayouwish',
141 request.body
142 )
143 return request
144
145 def sanitiseresponse(response):
146 if r'set-cookie' in response[r'headers']:
147 del response[r'headers'][r'set-cookie']
148 return response
149
129 def decorate(fn):
150 def decorate(fn):
130 def inner(*args, **kwargs):
151 def inner(*args, **kwargs):
131 cassette = pycompat.fsdecode(kwargs.pop(r'test_vcr', None))
152 cassette = pycompat.fsdecode(kwargs.pop(r'test_vcr', None))
@@ -136,18 +157,22 b' def vcrcommand(name, flags, spec, helpca'
136 import vcr.stubs as stubs
157 import vcr.stubs as stubs
137 vcr = vcrmod.VCR(
158 vcr = vcrmod.VCR(
138 serializer=r'json',
159 serializer=r'json',
160 before_record_request=sanitiserequest,
161 before_record_response=sanitiseresponse,
139 custom_patches=[
162 custom_patches=[
140 (urlmod, r'httpconnection',
163 (urlmod, r'httpconnection',
141 stubs.VCRHTTPConnection),
164 stubs.VCRHTTPConnection),
142 (urlmod, r'httpsconnection',
165 (urlmod, r'httpsconnection',
143 stubs.VCRHTTPSConnection),
166 stubs.VCRHTTPSConnection),
144 ])
167 ])
145 with vcr.use_cassette(cassette):
168 vcr.register_matcher(r'hgmatcher', hgmatcher)
169 with vcr.use_cassette(cassette, match_on=[r'hgmatcher']):
146 return fn(*args, **kwargs)
170 return fn(*args, **kwargs)
147 return fn(*args, **kwargs)
171 return fn(*args, **kwargs)
148 inner.__name__ = fn.__name__
172 inner.__name__ = fn.__name__
149 inner.__doc__ = fn.__doc__
173 inner.__doc__ = fn.__doc__
150 return command(name, fullflags, spec, helpcategory=helpcategory)(inner)
174 return command(name, fullflags, spec, helpcategory=helpcategory,
175 optionalrepo=optionalrepo)(inner)
151 return decorate
176 return decorate
152
177
153 def urlencodenested(params):
178 def urlencodenested(params):
@@ -174,24 +199,24 b' def urlencodenested(params):'
174 process(b'', params)
199 process(b'', params)
175 return util.urlreq.urlencode(flatparams)
200 return util.urlreq.urlencode(flatparams)
176
201
177 def readurltoken(repo):
202 def readurltoken(ui):
178 """return conduit url, token and make sure they exist
203 """return conduit url, token and make sure they exist
179
204
180 Currently read from [auth] config section. In the future, it might
205 Currently read from [auth] config section. In the future, it might
181 make sense to read from .arcconfig and .arcrc as well.
206 make sense to read from .arcconfig and .arcrc as well.
182 """
207 """
183 url = repo.ui.config(b'phabricator', b'url')
208 url = ui.config(b'phabricator', b'url')
184 if not url:
209 if not url:
185 raise error.Abort(_(b'config %s.%s is required')
210 raise error.Abort(_(b'config %s.%s is required')
186 % (b'phabricator', b'url'))
211 % (b'phabricator', b'url'))
187
212
188 res = httpconnectionmod.readauthforuri(repo.ui, url, util.url(url).user)
213 res = httpconnectionmod.readauthforuri(ui, url, util.url(url).user)
189 token = None
214 token = None
190
215
191 if res:
216 if res:
192 group, auth = res
217 group, auth = res
193
218
194 repo.ui.debug(b"using auth.%s.* for authentication\n" % group)
219 ui.debug(b"using auth.%s.* for authentication\n" % group)
195
220
196 token = auth.get(b'phabtoken')
221 token = auth.get(b'phabtoken')
197
222
@@ -201,15 +226,15 b' def readurltoken(repo):'
201
226
202 return url, token
227 return url, token
203
228
204 def callconduit(repo, name, params):
229 def callconduit(ui, name, params):
205 """call Conduit API, params is a dict. return json.loads result, or None"""
230 """call Conduit API, params is a dict. return json.loads result, or None"""
206 host, token = readurltoken(repo)
231 host, token = readurltoken(ui)
207 url, authinfo = util.url(b'/'.join([host, b'api', name])).authinfo()
232 url, authinfo = util.url(b'/'.join([host, b'api', name])).authinfo()
208 repo.ui.debug(b'Conduit Call: %s %s\n' % (url, pycompat.byterepr(params)))
233 ui.debug(b'Conduit Call: %s %s\n' % (url, pycompat.byterepr(params)))
209 params = params.copy()
234 params = params.copy()
210 params[b'api.token'] = token
235 params[b'api.token'] = token
211 data = urlencodenested(params)
236 data = urlencodenested(params)
212 curlcmd = repo.ui.config(b'phabricator', b'curlcmd')
237 curlcmd = ui.config(b'phabricator', b'curlcmd')
213 if curlcmd:
238 if curlcmd:
214 sin, sout = procutil.popen2(b'%s -d @- %s'
239 sin, sout = procutil.popen2(b'%s -d @- %s'
215 % (curlcmd, procutil.shellquote(url)))
240 % (curlcmd, procutil.shellquote(url)))
@@ -217,11 +242,11 b' def callconduit(repo, name, params):'
217 sin.close()
242 sin.close()
218 body = sout.read()
243 body = sout.read()
219 else:
244 else:
220 urlopener = urlmod.opener(repo.ui, authinfo)
245 urlopener = urlmod.opener(ui, authinfo)
221 request = util.urlreq.request(pycompat.strurl(url), data=data)
246 request = util.urlreq.request(pycompat.strurl(url), data=data)
222 with contextlib.closing(urlopener.open(request)) as rsp:
247 with contextlib.closing(urlopener.open(request)) as rsp:
223 body = rsp.read()
248 body = rsp.read()
224 repo.ui.debug(b'Conduit Response: %s\n' % body)
249 ui.debug(b'Conduit Response: %s\n' % body)
225 parsed = pycompat.rapply(
250 parsed = pycompat.rapply(
226 lambda x: encoding.unitolocal(x) if isinstance(x, pycompat.unicode)
251 lambda x: encoding.unitolocal(x) if isinstance(x, pycompat.unicode)
227 else x,
252 else x,
@@ -233,7 +258,7 b' def callconduit(repo, name, params):'
233 raise error.Abort(msg)
258 raise error.Abort(msg)
234 return parsed[b'result']
259 return parsed[b'result']
235
260
236 @vcrcommand(b'debugcallconduit', [], _(b'METHOD'))
261 @vcrcommand(b'debugcallconduit', [], _(b'METHOD'), optionalrepo=True)
237 def debugcallconduit(ui, repo, name):
262 def debugcallconduit(ui, repo, name):
238 """call Conduit API
263 """call Conduit API
239
264
@@ -250,7 +275,7 b' def debugcallconduit(ui, repo, name):'
250 # json.dumps only accepts unicode strings
275 # json.dumps only accepts unicode strings
251 result = pycompat.rapply(lambda x:
276 result = pycompat.rapply(lambda x:
252 encoding.unifromlocal(x) if isinstance(x, bytes) else x,
277 encoding.unifromlocal(x) if isinstance(x, bytes) else x,
253 callconduit(repo, name, params)
278 callconduit(ui, name, params)
254 )
279 )
255 s = json.dumps(result, sort_keys=True, indent=2, separators=(u',', u': '))
280 s = json.dumps(result, sort_keys=True, indent=2, separators=(u',', u': '))
256 ui.write(b'%s\n' % encoding.unitolocal(s))
281 ui.write(b'%s\n' % encoding.unitolocal(s))
@@ -264,7 +289,7 b' def getrepophid(repo):'
264 callsign = repo.ui.config(b'phabricator', b'callsign')
289 callsign = repo.ui.config(b'phabricator', b'callsign')
265 if not callsign:
290 if not callsign:
266 return None
291 return None
267 query = callconduit(repo, b'diffusion.repository.search',
292 query = callconduit(repo.ui, b'diffusion.repository.search',
268 {b'constraints': {b'callsigns': [callsign]}})
293 {b'constraints': {b'callsigns': [callsign]}})
269 if len(query[b'data']) == 0:
294 if len(query[b'data']) == 0:
270 return None
295 return None
@@ -320,7 +345,7 b' def getoldnodedrevmap(repo, nodelist):'
320 # Phabricator, and expect precursors overlap with it.
345 # Phabricator, and expect precursors overlap with it.
321 if toconfirm:
346 if toconfirm:
322 drevs = [drev for force, precs, drev in toconfirm.values()]
347 drevs = [drev for force, precs, drev in toconfirm.values()]
323 alldiffs = callconduit(unfi, b'differential.querydiffs',
348 alldiffs = callconduit(unfi.ui, b'differential.querydiffs',
324 {b'revisionIDs': drevs})
349 {b'revisionIDs': drevs})
325 getnode = lambda d: bin(
350 getnode = lambda d: bin(
326 getdiffmeta(d).get(b'node', b'')) or None
351 getdiffmeta(d).get(b'node', b'')) or None
@@ -370,7 +395,7 b' def creatediff(ctx):'
370 params = {b'diff': getdiff(ctx, mdiff.diffopts(git=True, context=32767))}
395 params = {b'diff': getdiff(ctx, mdiff.diffopts(git=True, context=32767))}
371 if repophid:
396 if repophid:
372 params[b'repositoryPHID'] = repophid
397 params[b'repositoryPHID'] = repophid
373 diff = callconduit(repo, b'differential.createrawdiff', params)
398 diff = callconduit(repo.ui, b'differential.createrawdiff', params)
374 if not diff:
399 if not diff:
375 raise error.Abort(_(b'cannot create diff for %s') % ctx)
400 raise error.Abort(_(b'cannot create diff for %s') % ctx)
376 return diff
401 return diff
@@ -380,35 +405,39 b' def writediffproperties(ctx, diff):'
380 params = {
405 params = {
381 b'diff_id': diff[b'id'],
406 b'diff_id': diff[b'id'],
382 b'name': b'hg:meta',
407 b'name': b'hg:meta',
383 b'data': json.dumps({
408 b'data': templatefilters.json({
384 u'user': encoding.unifromlocal(ctx.user()),
409 b'user': ctx.user(),
385 u'date': u'{:.0f} {}'.format(*ctx.date()),
410 b'date': b'%d %d' % ctx.date(),
386 u'node': encoding.unifromlocal(ctx.hex()),
411 b'branch': ctx.branch(),
387 u'parent': encoding.unifromlocal(ctx.p1().hex()),
412 b'node': ctx.hex(),
413 b'parent': ctx.p1().hex(),
388 }),
414 }),
389 }
415 }
390 callconduit(ctx.repo(), b'differential.setdiffproperty', params)
416 callconduit(ctx.repo().ui, b'differential.setdiffproperty', params)
391
417
392 params = {
418 params = {
393 b'diff_id': diff[b'id'],
419 b'diff_id': diff[b'id'],
394 b'name': b'local:commits',
420 b'name': b'local:commits',
395 b'data': json.dumps({
421 b'data': templatefilters.json({
396 encoding.unifromlocal(ctx.hex()): {
422 ctx.hex(): {
397 u'author': encoding.unifromlocal(stringutil.person(ctx.user())),
423 b'author': stringutil.person(ctx.user()),
398 u'authorEmail': encoding.unifromlocal(
424 b'authorEmail': stringutil.email(ctx.user()),
399 stringutil.email(ctx.user())),
425 b'time': int(ctx.date()[0]),
400 u'time': u'{:.0f}'.format(ctx.date()[0]),
426 b'commit': ctx.hex(),
427 b'parents': [ctx.p1().hex()],
428 b'branch': ctx.branch(),
401 },
429 },
402 }),
430 }),
403 }
431 }
404 callconduit(ctx.repo(), b'differential.setdiffproperty', params)
432 callconduit(ctx.repo().ui, b'differential.setdiffproperty', params)
405
433
406 def createdifferentialrevision(ctx, revid=None, parentrevid=None, oldnode=None,
434 def createdifferentialrevision(ctx, revid=None, parentrevphid=None,
407 olddiff=None, actions=None):
435 oldnode=None, olddiff=None, actions=None,
436 comment=None):
408 """create or update a Differential Revision
437 """create or update a Differential Revision
409
438
410 If revid is None, create a new Differential Revision, otherwise update
439 If revid is None, create a new Differential Revision, otherwise update
411 revid. If parentrevid is not None, set it as a dependency.
440 revid. If parentrevphid is not None, set it as a dependency.
412
441
413 If oldnode is not None, check if the patch content (without commit message
442 If oldnode is not None, check if the patch content (without commit message
414 and metadata) has changed before creating another diff.
443 and metadata) has changed before creating another diff.
@@ -427,6 +456,8 b' def createdifferentialrevision(ctx, revi'
427 if neednewdiff:
456 if neednewdiff:
428 diff = creatediff(ctx)
457 diff = creatediff(ctx)
429 transactions.append({b'type': b'update', b'value': diff[b'phid']})
458 transactions.append({b'type': b'update', b'value': diff[b'phid']})
459 if comment:
460 transactions.append({b'type': b'comment', b'value': comment})
430 else:
461 else:
431 # Even if we don't need to upload a new diff because the patch content
462 # Even if we don't need to upload a new diff because the patch content
432 # does not change. We might still need to update its metadata so
463 # does not change. We might still need to update its metadata so
@@ -435,21 +466,17 b' def createdifferentialrevision(ctx, revi'
435 diff = olddiff
466 diff = olddiff
436 writediffproperties(ctx, diff)
467 writediffproperties(ctx, diff)
437
468
438 # Use a temporary summary to set dependency. There might be better ways but
469 # Set the parent Revision every time, so commit re-ordering is picked-up
439 # I cannot find them for now. But do not do that if we are updating an
470 if parentrevphid:
440 # existing revision (revid is not None) since that introduces visible
471 transactions.append({b'type': b'parents.set',
441 # churns (someone edited "Summary" twice) on the web page.
472 b'value': [parentrevphid]})
442 if parentrevid and revid is None:
443 summary = b'Depends on D%d' % parentrevid
444 transactions += [{b'type': b'summary', b'value': summary},
445 {b'type': b'summary', b'value': b' '}]
446
473
447 if actions:
474 if actions:
448 transactions += actions
475 transactions += actions
449
476
450 # Parse commit message and update related fields.
477 # Parse commit message and update related fields.
451 desc = ctx.description()
478 desc = ctx.description()
452 info = callconduit(repo, b'differential.parsecommitmessage',
479 info = callconduit(repo.ui, b'differential.parsecommitmessage',
453 {b'corpus': desc})
480 {b'corpus': desc})
454 for k, v in info[b'fields'].items():
481 for k, v in info[b'fields'].items():
455 if k in [b'title', b'summary', b'testPlan']:
482 if k in [b'title', b'summary', b'testPlan']:
@@ -460,7 +487,7 b' def createdifferentialrevision(ctx, revi'
460 # Update an existing Differential Revision
487 # Update an existing Differential Revision
461 params[b'objectIdentifier'] = revid
488 params[b'objectIdentifier'] = revid
462
489
463 revision = callconduit(repo, b'differential.revision.edit', params)
490 revision = callconduit(repo.ui, b'differential.revision.edit', params)
464 if not revision:
491 if not revision:
465 raise error.Abort(_(b'cannot create revision for %s') % ctx)
492 raise error.Abort(_(b'cannot create revision for %s') % ctx)
466
493
@@ -470,7 +497,7 b' def userphids(repo, names):'
470 """convert user names to PHIDs"""
497 """convert user names to PHIDs"""
471 names = [name.lower() for name in names]
498 names = [name.lower() for name in names]
472 query = {b'constraints': {b'usernames': names}}
499 query = {b'constraints': {b'usernames': names}}
473 result = callconduit(repo, b'user.search', query)
500 result = callconduit(repo.ui, b'user.search', query)
474 # username not found is not an error of the API. So check if we have missed
501 # username not found is not an error of the API. So check if we have missed
475 # some names here.
502 # some names here.
476 data = result[b'data']
503 data = result[b'data']
@@ -485,6 +512,9 b' def userphids(repo, names):'
485 [(b'r', b'rev', [], _(b'revisions to send'), _(b'REV')),
512 [(b'r', b'rev', [], _(b'revisions to send'), _(b'REV')),
486 (b'', b'amend', True, _(b'update commit messages')),
513 (b'', b'amend', True, _(b'update commit messages')),
487 (b'', b'reviewer', [], _(b'specify reviewers')),
514 (b'', b'reviewer', [], _(b'specify reviewers')),
515 (b'', b'blocker', [], _(b'specify blocking reviewers')),
516 (b'm', b'comment', b'',
517 _(b'add a comment to Revisions with new/updated Diffs')),
488 (b'', b'confirm', None, _(b'ask for confirmation before sending'))],
518 (b'', b'confirm', None, _(b'ask for confirmation before sending'))],
489 _(b'REV [OPTIONS]'),
519 _(b'REV [OPTIONS]'),
490 helpcategory=command.CATEGORY_IMPORT_EXPORT)
520 helpcategory=command.CATEGORY_IMPORT_EXPORT)
@@ -536,16 +566,23 b' def phabsend(ui, repo, *revs, **opts):'
536
566
537 actions = []
567 actions = []
538 reviewers = opts.get(b'reviewer', [])
568 reviewers = opts.get(b'reviewer', [])
569 blockers = opts.get(b'blocker', [])
570 phids = []
539 if reviewers:
571 if reviewers:
540 phids = userphids(repo, reviewers)
572 phids.extend(userphids(repo, reviewers))
573 if blockers:
574 phids.extend(map(
575 lambda phid: b'blocking(%s)' % phid, userphids(repo, blockers)
576 ))
577 if phids:
541 actions.append({b'type': b'reviewers.add', b'value': phids})
578 actions.append({b'type': b'reviewers.add', b'value': phids})
542
579
543 drevids = [] # [int]
580 drevids = [] # [int]
544 diffmap = {} # {newnode: diff}
581 diffmap = {} # {newnode: diff}
545
582
546 # Send patches one by one so we know their Differential Revision IDs and
583 # Send patches one by one so we know their Differential Revision PHIDs and
547 # can provide dependency relationship
584 # can provide dependency relationship
548 lastrevid = None
585 lastrevphid = None
549 for rev in revs:
586 for rev in revs:
550 ui.debug(b'sending rev %d\n' % rev)
587 ui.debug(b'sending rev %d\n' % rev)
551 ctx = repo[rev]
588 ctx = repo[rev]
@@ -555,9 +592,11 b' def phabsend(ui, repo, *revs, **opts):'
555 if oldnode != ctx.node() or opts.get(b'amend'):
592 if oldnode != ctx.node() or opts.get(b'amend'):
556 # Create or update Differential Revision
593 # Create or update Differential Revision
557 revision, diff = createdifferentialrevision(
594 revision, diff = createdifferentialrevision(
558 ctx, revid, lastrevid, oldnode, olddiff, actions)
595 ctx, revid, lastrevphid, oldnode, olddiff, actions,
596 opts.get(b'comment'))
559 diffmap[ctx.node()] = diff
597 diffmap[ctx.node()] = diff
560 newrevid = int(revision[b'object'][b'id'])
598 newrevid = int(revision[b'object'][b'id'])
599 newrevphid = revision[b'object'][b'phid']
561 if revid:
600 if revid:
562 action = b'updated'
601 action = b'updated'
563 else:
602 else:
@@ -571,8 +610,9 b' def phabsend(ui, repo, *revs, **opts):'
571 tags.tag(repo, tagname, ctx.node(), message=None, user=None,
610 tags.tag(repo, tagname, ctx.node(), message=None, user=None,
572 date=None, local=True)
611 date=None, local=True)
573 else:
612 else:
574 # Nothing changed. But still set "newrevid" so the next revision
613 # Nothing changed. But still set "newrevphid" so the next revision
575 # could depend on this one.
614 # could depend on this one and "newrevid" for the summary line.
615 newrevphid = querydrev(repo, str(revid))[0][b'phid']
576 newrevid = revid
616 newrevid = revid
577 action = b'skipped'
617 action = b'skipped'
578
618
@@ -587,12 +627,12 b' def phabsend(ui, repo, *revs, **opts):'
587 ui.write(_(b'%s - %s - %s: %s\n') % (drevdesc, actiondesc, nodedesc,
627 ui.write(_(b'%s - %s - %s: %s\n') % (drevdesc, actiondesc, nodedesc,
588 desc))
628 desc))
589 drevids.append(newrevid)
629 drevids.append(newrevid)
590 lastrevid = newrevid
630 lastrevphid = newrevphid
591
631
592 # Update commit messages and remove tags
632 # Update commit messages and remove tags
593 if opts.get(b'amend'):
633 if opts.get(b'amend'):
594 unfi = repo.unfiltered()
634 unfi = repo.unfiltered()
595 drevs = callconduit(repo, b'differential.query', {b'ids': drevids})
635 drevs = callconduit(ui, b'differential.query', {b'ids': drevids})
596 with repo.wlock(), repo.lock(), repo.transaction(b'phabsend'):
636 with repo.wlock(), repo.lock(), repo.transaction(b'phabsend'):
597 wnode = unfi[b'.'].node()
637 wnode = unfi[b'.'].node()
598 mapping = {} # {oldnode: [newnode]}
638 mapping = {} # {oldnode: [newnode]}
@@ -632,10 +672,11 b' def phabsend(ui, repo, *revs, **opts):'
632 # Map from "hg:meta" keys to header understood by "hg import". The order is
672 # Map from "hg:meta" keys to header understood by "hg import". The order is
633 # consistent with "hg export" output.
673 # consistent with "hg export" output.
634 _metanamemap = util.sortdict([(b'user', b'User'), (b'date', b'Date'),
674 _metanamemap = util.sortdict([(b'user', b'User'), (b'date', b'Date'),
635 (b'node', b'Node ID'), (b'parent', b'Parent ')])
675 (b'branch', b'Branch'), (b'node', b'Node ID'),
676 (b'parent', b'Parent ')])
636
677
637 def _confirmbeforesend(repo, revs, oldmap):
678 def _confirmbeforesend(repo, revs, oldmap):
638 url, token = readurltoken(repo)
679 url, token = readurltoken(repo.ui)
639 ui = repo.ui
680 ui = repo.ui
640 for rev in revs:
681 for rev in revs:
641 ctx = repo[rev]
682 ctx = repo[rev]
@@ -777,7 +818,7 b' def querydrev(repo, spec):'
777 key = (params.get(b'ids') or params.get(b'phids') or [None])[0]
818 key = (params.get(b'ids') or params.get(b'phids') or [None])[0]
778 if key in prefetched:
819 if key in prefetched:
779 return prefetched[key]
820 return prefetched[key]
780 drevs = callconduit(repo, b'differential.query', params)
821 drevs = callconduit(repo.ui, b'differential.query', params)
781 # Fill prefetched with the result
822 # Fill prefetched with the result
782 for drev in drevs:
823 for drev in drevs:
783 prefetched[drev[b'phid']] = drev
824 prefetched[drev[b'phid']] = drev
@@ -901,16 +942,31 b' def getdiffmeta(diff):'
901 """
942 """
902 props = diff.get(b'properties') or {}
943 props = diff.get(b'properties') or {}
903 meta = props.get(b'hg:meta')
944 meta = props.get(b'hg:meta')
904 if not meta and props.get(b'local:commits'):
945 if not meta:
905 commit = sorted(props[b'local:commits'].values())[0]
946 if props.get(b'local:commits'):
906 meta = {
947 commit = sorted(props[b'local:commits'].values())[0]
907 b'date': b'%d 0' % commit[b'time'],
948 meta = {}
908 b'node': commit[b'rev'],
949 if b'author' in commit and b'authorEmail' in commit:
909 b'user': b'%s <%s>' % (commit[b'author'], commit[b'authorEmail']),
950 meta[b'user'] = b'%s <%s>' % (commit[b'author'],
910 }
951 commit[b'authorEmail'])
911 if len(commit.get(b'parents', ())) >= 1:
952 if b'time' in commit:
912 meta[b'parent'] = commit[b'parents'][0]
953 meta[b'date'] = b'%d 0' % int(commit[b'time'])
913 return meta or {}
954 if b'branch' in commit:
955 meta[b'branch'] = commit[b'branch']
956 node = commit.get(b'commit', commit.get(b'rev'))
957 if node:
958 meta[b'node'] = node
959 if len(commit.get(b'parents', ())) >= 1:
960 meta[b'parent'] = commit[b'parents'][0]
961 else:
962 meta = {}
963 if b'date' not in meta and b'dateCreated' in diff:
964 meta[b'date'] = b'%s 0' % diff[b'dateCreated']
965 if b'branch' not in meta and diff.get(b'branch'):
966 meta[b'branch'] = diff[b'branch']
967 if b'parent' not in meta and diff.get(b'sourceControlBaseRevision'):
968 meta[b'parent'] = diff[b'sourceControlBaseRevision']
969 return meta
914
970
915 def readpatch(repo, drevs, write):
971 def readpatch(repo, drevs, write):
916 """generate plain-text patch readable by 'hg import'
972 """generate plain-text patch readable by 'hg import'
@@ -920,14 +976,14 b' def readpatch(repo, drevs, write):'
920 """
976 """
921 # Prefetch hg:meta property for all diffs
977 # Prefetch hg:meta property for all diffs
922 diffids = sorted(set(max(int(v) for v in drev[b'diffs']) for drev in drevs))
978 diffids = sorted(set(max(int(v) for v in drev[b'diffs']) for drev in drevs))
923 diffs = callconduit(repo, b'differential.querydiffs', {b'ids': diffids})
979 diffs = callconduit(repo.ui, b'differential.querydiffs', {b'ids': diffids})
924
980
925 # Generate patch for each drev
981 # Generate patch for each drev
926 for drev in drevs:
982 for drev in drevs:
927 repo.ui.note(_(b'reading D%s\n') % drev[b'id'])
983 repo.ui.note(_(b'reading D%s\n') % drev[b'id'])
928
984
929 diffid = max(int(v) for v in drev[b'diffs'])
985 diffid = max(int(v) for v in drev[b'diffs'])
930 body = callconduit(repo, b'differential.getrawdiff',
986 body = callconduit(repo.ui, b'differential.getrawdiff',
931 {b'diffID': diffid})
987 {b'diffID': diffid})
932 desc = getdescfromdrev(drev)
988 desc = getdescfromdrev(drev)
933 header = b'# HG changeset patch\n'
989 header = b'# HG changeset patch\n'
@@ -1001,7 +1057,7 b' def phabupdate(ui, repo, spec, **opts):'
1001 if actions:
1057 if actions:
1002 params = {b'objectIdentifier': drev[b'phid'],
1058 params = {b'objectIdentifier': drev[b'phid'],
1003 b'transactions': actions}
1059 b'transactions': actions}
1004 callconduit(repo, b'differential.revision.edit', params)
1060 callconduit(ui, b'differential.revision.edit', params)
1005
1061
1006 templatekeyword = registrar.templatekeyword()
1062 templatekeyword = registrar.templatekeyword()
1007
1063
@@ -108,7 +108,9 b' def _revsetdestrebase(repo, subset, x):'
108
108
109 @revsetpredicate('_destautoorphanrebase')
109 @revsetpredicate('_destautoorphanrebase')
110 def _revsetdestautoorphanrebase(repo, subset, x):
110 def _revsetdestautoorphanrebase(repo, subset, x):
111 """automatic rebase destination for a single orphan revision"""
111 # ``_destautoorphanrebase()``
112
113 # automatic rebase destination for a single orphan revision.
112 unfi = repo.unfiltered()
114 unfi = repo.unfiltered()
113 obsoleted = unfi.revs('obsolete()')
115 obsoleted = unfi.revs('obsolete()')
114
116
@@ -848,8 +850,9 b' def rebase(ui, repo, **opts):'
848 singletransaction = True
850 singletransaction = True
849
851
850 By default, rebase writes to the working copy, but you can configure it to
852 By default, rebase writes to the working copy, but you can configure it to
851 run in-memory for for better performance, and to allow it to run if the
853 run in-memory for better performance. When the rebase is not moving the
852 working copy is dirty::
854 parent(s) of the working copy (AKA the "currently checked out changesets"),
855 this may also allow it to run even if the working copy is dirty::
853
856
854 [rebase]
857 [rebase]
855 experimental.inmemory = True
858 experimental.inmemory = True
@@ -1819,7 +1822,7 b' def pullrebase(orig, ui, repo, *args, **'
1819 ui.debug('--update and --rebase are not compatible, ignoring '
1822 ui.debug('--update and --rebase are not compatible, ignoring '
1820 'the update flag\n')
1823 'the update flag\n')
1821
1824
1822 cmdutil.checkunfinished(repo)
1825 cmdutil.checkunfinished(repo, skipmerge=True)
1823 cmdutil.bailifchanged(repo, hint=_('cannot pull with rebase: '
1826 cmdutil.bailifchanged(repo, hint=_('cannot pull with rebase: '
1824 'please commit or shelve your changes first'))
1827 'please commit or shelve your changes first'))
1825
1828
@@ -1920,6 +1923,22 b' def _computeobsoletenotrebased(repo, reb'
1920 obsoleteextinctsuccessors,
1923 obsoleteextinctsuccessors,
1921 )
1924 )
1922
1925
1926 def abortrebase(ui, repo):
1927 with repo.wlock(), repo.lock():
1928 rbsrt = rebaseruntime(repo, ui)
1929 rbsrt._prepareabortorcontinue(isabort=True)
1930
1931 def continuerebase(ui, repo):
1932 with repo.wlock(), repo.lock():
1933 rbsrt = rebaseruntime(repo, ui)
1934 ms = mergemod.mergestate.read(repo)
1935 mergeutil.checkunresolved(ms)
1936 retcode = rbsrt._prepareabortorcontinue(isabort=False)
1937 if retcode is not None:
1938 return retcode
1939 rbsrt._performrebase(None)
1940 rbsrt._finishrebase()
1941
1923 def summaryhook(ui, repo):
1942 def summaryhook(ui, repo):
1924 if not repo.vfs.exists('rebasestate'):
1943 if not repo.vfs.exists('rebasestate'):
1925 return
1944 return
@@ -1947,8 +1966,6 b' def uisetup(ui):'
1947 entry[1].append(('t', 'tool', '',
1966 entry[1].append(('t', 'tool', '',
1948 _("specify merge tool for rebase")))
1967 _("specify merge tool for rebase")))
1949 cmdutil.summaryhooks.add('rebase', summaryhook)
1968 cmdutil.summaryhooks.add('rebase', summaryhook)
1950 cmdutil.unfinishedstates.append(
1969 statemod.addunfinished('rebase', fname='rebasestate', stopflag=True,
1951 ['rebasestate', False, False, _('rebase in progress'),
1970 continueflag=True, abortfunc=abortrebase,
1952 _("use 'hg rebase --continue' or 'hg rebase --abort'")])
1971 continuefunc=continuerebase)
1953 cmdutil.afterresolvedstates.append(
1954 ['rebasestate', _('hg rebase --continue')])
@@ -293,6 +293,35 b' def uisetup(ui):'
293 # debugdata needs remotefilelog.len to work
293 # debugdata needs remotefilelog.len to work
294 extensions.wrapcommand(commands.table, 'debugdata', debugdatashallow)
294 extensions.wrapcommand(commands.table, 'debugdata', debugdatashallow)
295
295
296 changegroup.cgpacker = shallowbundle.shallowcg1packer
297
298 extensions.wrapfunction(changegroup, '_addchangegroupfiles',
299 shallowbundle.addchangegroupfiles)
300 extensions.wrapfunction(
301 changegroup, 'makechangegroup', shallowbundle.makechangegroup)
302 extensions.wrapfunction(localrepo, 'makestore', storewrapper)
303 extensions.wrapfunction(exchange, 'pull', exchangepull)
304 extensions.wrapfunction(merge, 'applyupdates', applyupdates)
305 extensions.wrapfunction(merge, '_checkunknownfiles', checkunknownfiles)
306 extensions.wrapfunction(context.workingctx, '_checklookup', checklookup)
307 extensions.wrapfunction(scmutil, '_findrenames', findrenames)
308 extensions.wrapfunction(copies, '_computeforwardmissing',
309 computeforwardmissing)
310 extensions.wrapfunction(dispatch, 'runcommand', runcommand)
311 extensions.wrapfunction(repair, '_collectbrokencsets', _collectbrokencsets)
312 extensions.wrapfunction(context.changectx, 'filectx', filectx)
313 extensions.wrapfunction(context.workingctx, 'filectx', workingfilectx)
314 extensions.wrapfunction(patch, 'trydiff', trydiff)
315 extensions.wrapfunction(hg, 'verify', _verify)
316 scmutil.fileprefetchhooks.add('remotefilelog', _fileprefetchhook)
317
318 # disappointing hacks below
319 extensions.wrapfunction(scmutil, 'getrenamedfn', getrenamedfn)
320 extensions.wrapfunction(revset, 'filelog', filelogrevset)
321 revset.symbols['filelog'] = revset.filelog
322 extensions.wrapfunction(cmdutil, 'walkfilerevs', walkfilerevs)
323
324
296 def cloneshallow(orig, ui, repo, *args, **opts):
325 def cloneshallow(orig, ui, repo, *args, **opts):
297 if opts.get(r'shallow'):
326 if opts.get(r'shallow'):
298 repos = []
327 repos = []
@@ -405,6 +434,158 b' def setupclient(ui, repo):'
405 shallowrepo.wraprepo(repo)
434 shallowrepo.wraprepo(repo)
406 repo.store = shallowstore.wrapstore(repo.store)
435 repo.store = shallowstore.wrapstore(repo.store)
407
436
437 def storewrapper(orig, requirements, path, vfstype):
438 s = orig(requirements, path, vfstype)
439 if constants.SHALLOWREPO_REQUIREMENT in requirements:
440 s = shallowstore.wrapstore(s)
441
442 return s
443
444 # prefetch files before update
445 def applyupdates(orig, repo, actions, wctx, mctx, overwrite, wantfiledata,
446 labels=None):
447 if isenabled(repo):
448 manifest = mctx.manifest()
449 files = []
450 for f, args, msg in actions['g']:
451 files.append((f, hex(manifest[f])))
452 # batch fetch the needed files from the server
453 repo.fileservice.prefetch(files)
454 return orig(repo, actions, wctx, mctx, overwrite, wantfiledata,
455 labels=labels)
456
457 # Prefetch merge checkunknownfiles
458 def checkunknownfiles(orig, repo, wctx, mctx, force, actions,
459 *args, **kwargs):
460 if isenabled(repo):
461 files = []
462 sparsematch = repo.maybesparsematch(mctx.rev())
463 for f, (m, actionargs, msg) in actions.iteritems():
464 if sparsematch and not sparsematch(f):
465 continue
466 if m in ('c', 'dc', 'cm'):
467 files.append((f, hex(mctx.filenode(f))))
468 elif m == 'dg':
469 f2 = actionargs[0]
470 files.append((f2, hex(mctx.filenode(f2))))
471 # batch fetch the needed files from the server
472 repo.fileservice.prefetch(files)
473 return orig(repo, wctx, mctx, force, actions, *args, **kwargs)
474
475 # Prefetch files before status attempts to look at their size and contents
476 def checklookup(orig, self, files):
477 repo = self._repo
478 if isenabled(repo):
479 prefetchfiles = []
480 for parent in self._parents:
481 for f in files:
482 if f in parent:
483 prefetchfiles.append((f, hex(parent.filenode(f))))
484 # batch fetch the needed files from the server
485 repo.fileservice.prefetch(prefetchfiles)
486 return orig(self, files)
487
488 # Prefetch the logic that compares added and removed files for renames
489 def findrenames(orig, repo, matcher, added, removed, *args, **kwargs):
490 if isenabled(repo):
491 files = []
492 pmf = repo['.'].manifest()
493 for f in removed:
494 if f in pmf:
495 files.append((f, hex(pmf[f])))
496 # batch fetch the needed files from the server
497 repo.fileservice.prefetch(files)
498 return orig(repo, matcher, added, removed, *args, **kwargs)
499
500 # prefetch files before pathcopies check
501 def computeforwardmissing(orig, a, b, match=None):
502 missing = orig(a, b, match=match)
503 repo = a._repo
504 if isenabled(repo):
505 mb = b.manifest()
506
507 files = []
508 sparsematch = repo.maybesparsematch(b.rev())
509 if sparsematch:
510 sparsemissing = set()
511 for f in missing:
512 if sparsematch(f):
513 files.append((f, hex(mb[f])))
514 sparsemissing.add(f)
515 missing = sparsemissing
516
517 # batch fetch the needed files from the server
518 repo.fileservice.prefetch(files)
519 return missing
520
521 # close cache miss server connection after the command has finished
522 def runcommand(orig, lui, repo, *args, **kwargs):
523 fileservice = None
524 # repo can be None when running in chg:
525 # - at startup, reposetup was called because serve is not norepo
526 # - a norepo command like "help" is called
527 if repo and isenabled(repo):
528 fileservice = repo.fileservice
529 try:
530 return orig(lui, repo, *args, **kwargs)
531 finally:
532 if fileservice:
533 fileservice.close()
534
535 # prevent strip from stripping remotefilelogs
536 def _collectbrokencsets(orig, repo, files, striprev):
537 if isenabled(repo):
538 files = list([f for f in files if not repo.shallowmatch(f)])
539 return orig(repo, files, striprev)
540
541 # changectx wrappers
542 def filectx(orig, self, path, fileid=None, filelog=None):
543 if fileid is None:
544 fileid = self.filenode(path)
545 if (isenabled(self._repo) and self._repo.shallowmatch(path)):
546 return remotefilectx.remotefilectx(self._repo, path, fileid=fileid,
547 changectx=self, filelog=filelog)
548 return orig(self, path, fileid=fileid, filelog=filelog)
549
550 def workingfilectx(orig, self, path, filelog=None):
551 if (isenabled(self._repo) and self._repo.shallowmatch(path)):
552 return remotefilectx.remoteworkingfilectx(self._repo, path,
553 workingctx=self,
554 filelog=filelog)
555 return orig(self, path, filelog=filelog)
556
557 # prefetch required revisions before a diff
558 def trydiff(orig, repo, revs, ctx1, ctx2, modified, added, removed,
559 copy, getfilectx, *args, **kwargs):
560 if isenabled(repo):
561 prefetch = []
562 mf1 = ctx1.manifest()
563 for fname in modified + added + removed:
564 if fname in mf1:
565 fnode = getfilectx(fname, ctx1).filenode()
566 # fnode can be None if it's a edited working ctx file
567 if fnode:
568 prefetch.append((fname, hex(fnode)))
569 if fname not in removed:
570 fnode = getfilectx(fname, ctx2).filenode()
571 if fnode:
572 prefetch.append((fname, hex(fnode)))
573
574 repo.fileservice.prefetch(prefetch)
575
576 return orig(repo, revs, ctx1, ctx2, modified, added, removed, copy,
577 getfilectx, *args, **kwargs)
578
579 # Prevent verify from processing files
580 # a stub for mercurial.hg.verify()
581 def _verify(orig, repo, level=None):
582 lock = repo.lock()
583 try:
584 return shallowverifier.shallowverifier(repo).verify()
585 finally:
586 lock.release()
587
588
408 clientonetime = False
589 clientonetime = False
409 def onetimeclientsetup(ui):
590 def onetimeclientsetup(ui):
410 global clientonetime
591 global clientonetime
@@ -412,163 +593,6 b' def onetimeclientsetup(ui):'
412 return
593 return
413 clientonetime = True
594 clientonetime = True
414
595
415 changegroup.cgpacker = shallowbundle.shallowcg1packer
416
417 extensions.wrapfunction(changegroup, '_addchangegroupfiles',
418 shallowbundle.addchangegroupfiles)
419 extensions.wrapfunction(
420 changegroup, 'makechangegroup', shallowbundle.makechangegroup)
421
422 def storewrapper(orig, requirements, path, vfstype):
423 s = orig(requirements, path, vfstype)
424 if constants.SHALLOWREPO_REQUIREMENT in requirements:
425 s = shallowstore.wrapstore(s)
426
427 return s
428 extensions.wrapfunction(localrepo, 'makestore', storewrapper)
429
430 extensions.wrapfunction(exchange, 'pull', exchangepull)
431
432 # prefetch files before update
433 def applyupdates(orig, repo, actions, wctx, mctx, overwrite, labels=None):
434 if isenabled(repo):
435 manifest = mctx.manifest()
436 files = []
437 for f, args, msg in actions['g']:
438 files.append((f, hex(manifest[f])))
439 # batch fetch the needed files from the server
440 repo.fileservice.prefetch(files)
441 return orig(repo, actions, wctx, mctx, overwrite, labels=labels)
442 extensions.wrapfunction(merge, 'applyupdates', applyupdates)
443
444 # Prefetch merge checkunknownfiles
445 def checkunknownfiles(orig, repo, wctx, mctx, force, actions,
446 *args, **kwargs):
447 if isenabled(repo):
448 files = []
449 sparsematch = repo.maybesparsematch(mctx.rev())
450 for f, (m, actionargs, msg) in actions.iteritems():
451 if sparsematch and not sparsematch(f):
452 continue
453 if m in ('c', 'dc', 'cm'):
454 files.append((f, hex(mctx.filenode(f))))
455 elif m == 'dg':
456 f2 = actionargs[0]
457 files.append((f2, hex(mctx.filenode(f2))))
458 # batch fetch the needed files from the server
459 repo.fileservice.prefetch(files)
460 return orig(repo, wctx, mctx, force, actions, *args, **kwargs)
461 extensions.wrapfunction(merge, '_checkunknownfiles', checkunknownfiles)
462
463 # Prefetch files before status attempts to look at their size and contents
464 def checklookup(orig, self, files):
465 repo = self._repo
466 if isenabled(repo):
467 prefetchfiles = []
468 for parent in self._parents:
469 for f in files:
470 if f in parent:
471 prefetchfiles.append((f, hex(parent.filenode(f))))
472 # batch fetch the needed files from the server
473 repo.fileservice.prefetch(prefetchfiles)
474 return orig(self, files)
475 extensions.wrapfunction(context.workingctx, '_checklookup', checklookup)
476
477 # Prefetch the logic that compares added and removed files for renames
478 def findrenames(orig, repo, matcher, added, removed, *args, **kwargs):
479 if isenabled(repo):
480 files = []
481 pmf = repo['.'].manifest()
482 for f in removed:
483 if f in pmf:
484 files.append((f, hex(pmf[f])))
485 # batch fetch the needed files from the server
486 repo.fileservice.prefetch(files)
487 return orig(repo, matcher, added, removed, *args, **kwargs)
488 extensions.wrapfunction(scmutil, '_findrenames', findrenames)
489
490 # prefetch files before mergecopies check
491 def computenonoverlap(orig, repo, c1, c2, *args, **kwargs):
492 u1, u2 = orig(repo, c1, c2, *args, **kwargs)
493 if isenabled(repo):
494 m1 = c1.manifest()
495 m2 = c2.manifest()
496 files = []
497
498 sparsematch1 = repo.maybesparsematch(c1.rev())
499 if sparsematch1:
500 sparseu1 = set()
501 for f in u1:
502 if sparsematch1(f):
503 files.append((f, hex(m1[f])))
504 sparseu1.add(f)
505 u1 = sparseu1
506
507 sparsematch2 = repo.maybesparsematch(c2.rev())
508 if sparsematch2:
509 sparseu2 = set()
510 for f in u2:
511 if sparsematch2(f):
512 files.append((f, hex(m2[f])))
513 sparseu2.add(f)
514 u2 = sparseu2
515
516 # batch fetch the needed files from the server
517 repo.fileservice.prefetch(files)
518 return u1, u2
519 extensions.wrapfunction(copies, '_computenonoverlap', computenonoverlap)
520
521 # prefetch files before pathcopies check
522 def computeforwardmissing(orig, a, b, match=None):
523 missing = orig(a, b, match=match)
524 repo = a._repo
525 if isenabled(repo):
526 mb = b.manifest()
527
528 files = []
529 sparsematch = repo.maybesparsematch(b.rev())
530 if sparsematch:
531 sparsemissing = set()
532 for f in missing:
533 if sparsematch(f):
534 files.append((f, hex(mb[f])))
535 sparsemissing.add(f)
536 missing = sparsemissing
537
538 # batch fetch the needed files from the server
539 repo.fileservice.prefetch(files)
540 return missing
541 extensions.wrapfunction(copies, '_computeforwardmissing',
542 computeforwardmissing)
543
544 # close cache miss server connection after the command has finished
545 def runcommand(orig, lui, repo, *args, **kwargs):
546 fileservice = None
547 # repo can be None when running in chg:
548 # - at startup, reposetup was called because serve is not norepo
549 # - a norepo command like "help" is called
550 if repo and isenabled(repo):
551 fileservice = repo.fileservice
552 try:
553 return orig(lui, repo, *args, **kwargs)
554 finally:
555 if fileservice:
556 fileservice.close()
557 extensions.wrapfunction(dispatch, 'runcommand', runcommand)
558
559 # disappointing hacks below
560 scmutil.getrenamedfn = getrenamedfn
561 extensions.wrapfunction(revset, 'filelog', filelogrevset)
562 revset.symbols['filelog'] = revset.filelog
563 extensions.wrapfunction(cmdutil, 'walkfilerevs', walkfilerevs)
564
565 # prevent strip from stripping remotefilelogs
566 def _collectbrokencsets(orig, repo, files, striprev):
567 if isenabled(repo):
568 files = list([f for f in files if not repo.shallowmatch(f)])
569 return orig(repo, files, striprev)
570 extensions.wrapfunction(repair, '_collectbrokencsets', _collectbrokencsets)
571
572 # Don't commit filelogs until we know the commit hash, since the hash
596 # Don't commit filelogs until we know the commit hash, since the hash
573 # is present in the filelog blob.
597 # is present in the filelog blob.
574 # This violates Mercurial's filelog->manifest->changelog write order,
598 # This violates Mercurial's filelog->manifest->changelog write order,
@@ -611,60 +635,10 b' def onetimeclientsetup(ui):'
611 return node
635 return node
612 extensions.wrapfunction(changelog.changelog, 'add', changelogadd)
636 extensions.wrapfunction(changelog.changelog, 'add', changelogadd)
613
637
614 # changectx wrappers
638 def getrenamedfn(orig, repo, endrev=None):
615 def filectx(orig, self, path, fileid=None, filelog=None):
639 if not isenabled(repo) or copies.usechangesetcentricalgo(repo):
616 if fileid is None:
640 return orig(repo, endrev)
617 fileid = self.filenode(path)
618 if (isenabled(self._repo) and self._repo.shallowmatch(path)):
619 return remotefilectx.remotefilectx(self._repo, path,
620 fileid=fileid, changectx=self, filelog=filelog)
621 return orig(self, path, fileid=fileid, filelog=filelog)
622 extensions.wrapfunction(context.changectx, 'filectx', filectx)
623
624 def workingfilectx(orig, self, path, filelog=None):
625 if (isenabled(self._repo) and self._repo.shallowmatch(path)):
626 return remotefilectx.remoteworkingfilectx(self._repo,
627 path, workingctx=self, filelog=filelog)
628 return orig(self, path, filelog=filelog)
629 extensions.wrapfunction(context.workingctx, 'filectx', workingfilectx)
630
641
631 # prefetch required revisions before a diff
632 def trydiff(orig, repo, revs, ctx1, ctx2, modified, added, removed,
633 copy, getfilectx, *args, **kwargs):
634 if isenabled(repo):
635 prefetch = []
636 mf1 = ctx1.manifest()
637 for fname in modified + added + removed:
638 if fname in mf1:
639 fnode = getfilectx(fname, ctx1).filenode()
640 # fnode can be None if it's a edited working ctx file
641 if fnode:
642 prefetch.append((fname, hex(fnode)))
643 if fname not in removed:
644 fnode = getfilectx(fname, ctx2).filenode()
645 if fnode:
646 prefetch.append((fname, hex(fnode)))
647
648 repo.fileservice.prefetch(prefetch)
649
650 return orig(repo, revs, ctx1, ctx2, modified, added, removed,
651 copy, getfilectx, *args, **kwargs)
652 extensions.wrapfunction(patch, 'trydiff', trydiff)
653
654 # Prevent verify from processing files
655 # a stub for mercurial.hg.verify()
656 def _verify(orig, repo):
657 lock = repo.lock()
658 try:
659 return shallowverifier.shallowverifier(repo).verify()
660 finally:
661 lock.release()
662
663 extensions.wrapfunction(hg, 'verify', _verify)
664
665 scmutil.fileprefetchhooks.add('remotefilelog', _fileprefetchhook)
666
667 def getrenamedfn(repo, endrev=None):
668 rcache = {}
642 rcache = {}
669
643
670 def getrenamed(fn, rev):
644 def getrenamed(fn, rev):
@@ -1019,9 +993,6 b' def _fileprefetchhook(repo, revs, match)'
1019 mf = ctx.manifest()
993 mf = ctx.manifest()
1020 sparsematch = repo.maybesparsematch(ctx.rev())
994 sparsematch = repo.maybesparsematch(ctx.rev())
1021 for path in ctx.walk(match):
995 for path in ctx.walk(match):
1022 if path.endswith('/'):
1023 # Tree manifest that's being excluded as part of narrow
1024 continue
1025 if (not sparsematch or sparsematch(path)) and path in mf:
996 if (not sparsematch or sparsematch(path)) and path in mf:
1026 allfiles.append((path, hex(mf[path])))
997 allfiles.append((path, hex(mf[path])))
1027 repo.fileservice.prefetch(allfiles)
998 repo.fileservice.prefetch(allfiles)
@@ -396,6 +396,9 b' class fileserverclient(object):'
396 batchdefault = 10
396 batchdefault = 10
397 batchsize = self.ui.configint(
397 batchsize = self.ui.configint(
398 'remotefilelog', 'batchsize', batchdefault)
398 'remotefilelog', 'batchsize', batchdefault)
399 self.ui.debug(
400 b'requesting %d files from '
401 b'remotefilelog server...\n' % len(missed))
399 _getfilesbatch(
402 _getfilesbatch(
400 remote, self.receivemissing, progress.increment,
403 remote, self.receivemissing, progress.increment,
401 missed, idmap, batchsize)
404 missed, idmap, batchsize)
@@ -43,7 +43,8 b' def backgroundrepack(repo, incremental=T'
43 if packsonly:
43 if packsonly:
44 cmd.append('--packsonly')
44 cmd.append('--packsonly')
45 repo.ui.warn(msg)
45 repo.ui.warn(msg)
46 procutil.runbgcommand(cmd, encoding.environ)
46 # We know this command will find a binary, so don't block on it starting.
47 procutil.runbgcommand(cmd, encoding.environ, ensurestart=False)
47
48
48 def fullrepack(repo, options=None):
49 def fullrepack(repo, options=None):
49 """If ``packsonly`` is True, stores creating only loose objects are skipped.
50 """If ``packsonly`` is True, stores creating only loose objects are skipped.
@@ -33,13 +33,6 b' from . import ('
33 shallowutil,
33 shallowutil,
34 )
34 )
35
35
36 if util.safehasattr(util, '_hgexecutable'):
37 # Before 5be286db
38 _hgexecutable = util.hgexecutable
39 else:
40 from mercurial.utils import procutil
41 _hgexecutable = procutil.hgexecutable
42
43 # These make*stores functions are global so that other extensions can replace
36 # These make*stores functions are global so that other extensions can replace
44 # them.
37 # them.
45 def makelocalstores(repo):
38 def makelocalstores(repo):
@@ -168,7 +161,7 b' def wraprepo(repo):'
168 **kwargs)
161 **kwargs)
169
162
170 @localrepo.unfilteredmethod
163 @localrepo.unfilteredmethod
171 def commitctx(self, ctx, error=False):
164 def commitctx(self, ctx, error=False, origctx=None):
172 """Add a new revision to current repository.
165 """Add a new revision to current repository.
173 Revision information is passed via the context argument.
166 Revision information is passed via the context argument.
174 """
167 """
@@ -186,18 +179,21 b' def wraprepo(repo):'
186 files.append((f, hex(fparent1)))
179 files.append((f, hex(fparent1)))
187 self.fileservice.prefetch(files)
180 self.fileservice.prefetch(files)
188 return super(shallowrepository, self).commitctx(ctx,
181 return super(shallowrepository, self).commitctx(ctx,
189 error=error)
182 error=error,
183 origctx=origctx)
190
184
191 def backgroundprefetch(self, revs, base=None, repack=False, pats=None,
185 def backgroundprefetch(self, revs, base=None, repack=False, pats=None,
192 opts=None):
186 opts=None):
193 """Runs prefetch in background with optional repack
187 """Runs prefetch in background with optional repack
194 """
188 """
195 cmd = [_hgexecutable(), '-R', repo.origroot, 'prefetch']
189 cmd = [procutil.hgexecutable(), '-R', repo.origroot, 'prefetch']
196 if repack:
190 if repack:
197 cmd.append('--repack')
191 cmd.append('--repack')
198 if revs:
192 if revs:
199 cmd += ['-r', revs]
193 cmd += ['-r', revs]
200 procutil.runbgcommand(cmd, encoding.environ)
194 # We know this command will find a binary, so don't block
195 # on it starting.
196 procutil.runbgcommand(cmd, encoding.environ, ensurestart=False)
201
197
202 def prefetch(self, revs, base=None, pats=None, opts=None):
198 def prefetch(self, revs, base=None, pats=None, opts=None):
203 """Prefetches all the necessary file revisions for the given revs
199 """Prefetches all the necessary file revisions for the given revs
@@ -167,6 +167,8 b' class lazyremotenamedict(mutablemapping)'
167 for k, vtup in self.potentialentries.iteritems():
167 for k, vtup in self.potentialentries.iteritems():
168 yield (k, [bin(vtup[0])])
168 yield (k, [bin(vtup[0])])
169
169
170 items = iteritems
171
170 class remotenames(object):
172 class remotenames(object):
171 """
173 """
172 This class encapsulates all the remotenames state. It also contains
174 This class encapsulates all the remotenames state. It also contains
@@ -125,6 +125,10 b' def extsetup(ui):'
125
125
126 def _hassharedbookmarks(repo):
126 def _hassharedbookmarks(repo):
127 """Returns whether this repo has shared bookmarks"""
127 """Returns whether this repo has shared bookmarks"""
128 if bookmarks.bookmarksinstore(repo):
129 # Kind of a lie, but it means that we skip our custom reads and writes
130 # from/to the source repo.
131 return False
128 try:
132 try:
129 shared = repo.vfs.read('shared').splitlines()
133 shared = repo.vfs.read('shared').splitlines()
130 except IOError as inst:
134 except IOError as inst:
@@ -460,8 +460,8 b' def _updatedocstring():'
460 longest = max(map(len, showview._table.keys()))
460 longest = max(map(len, showview._table.keys()))
461 entries = []
461 entries = []
462 for key in sorted(showview._table.keys()):
462 for key in sorted(showview._table.keys()):
463 entries.append(pycompat.sysstr(' %s %s' % (
463 entries.append(r' %s %s' % (
464 key.ljust(longest), showview._table[key]._origdoc)))
464 pycompat.sysstr(key.ljust(longest)), showview._table[key]._origdoc))
465
465
466 cmdtable['show'][0].__doc__ = pycompat.sysstr('%s\n\n%s\n ') % (
466 cmdtable['show'][0].__doc__ = pycompat.sysstr('%s\n\n%s\n ') % (
467 cmdtable['show'][0].__doc__.rstrip(),
467 cmdtable['show'][0].__doc__.rstrip(),
@@ -228,7 +228,7 b' def _setupdirstate(ui):'
228 hint = _('include file with `hg debugsparse --include <pattern>` or use ' +
228 hint = _('include file with `hg debugsparse --include <pattern>` or use ' +
229 '`hg add -s <file>` to include file directory while adding')
229 '`hg add -s <file>` to include file directory while adding')
230 for func in editfuncs:
230 for func in editfuncs:
231 def _wrapper(orig, self, *args):
231 def _wrapper(orig, self, *args, **kwargs):
232 sparsematch = self._sparsematcher
232 sparsematch = self._sparsematcher
233 if not sparsematch.always():
233 if not sparsematch.always():
234 for f in args:
234 for f in args:
@@ -237,7 +237,7 b' def _setupdirstate(ui):'
237 raise error.Abort(_("cannot add '%s' - it is outside "
237 raise error.Abort(_("cannot add '%s' - it is outside "
238 "the sparse checkout") % f,
238 "the sparse checkout") % f,
239 hint=hint)
239 hint=hint)
240 return orig(self, *args)
240 return orig(self, *args, **kwargs)
241 extensions.wrapfunction(dirstate.dirstate, func, _wrapper)
241 extensions.wrapfunction(dirstate.dirstate, func, _wrapper)
242
242
243 @command('debugsparse', [
243 @command('debugsparse', [
@@ -31,31 +31,13 b' command = registrar.command(cmdtable)'
31 # leave the attribute unspecified.
31 # leave the attribute unspecified.
32 testedwith = 'ships-with-hg-core'
32 testedwith = 'ships-with-hg-core'
33
33
34 def checksubstate(repo, baserev=None):
34 def checklocalchanges(repo, force=False):
35 '''return list of subrepos at a different revision than substate.
36 Abort if any subrepos have uncommitted changes.'''
37 inclsubs = []
38 wctx = repo[None]
39 if baserev:
40 bctx = repo[baserev]
41 else:
42 bctx = wctx.p1()
43 for s in sorted(wctx.substate):
44 wctx.sub(s).bailifchanged(True)
45 if s not in bctx.substate or bctx.sub(s).dirty():
46 inclsubs.append(s)
47 return inclsubs
48
49 def checklocalchanges(repo, force=False, excsuffix=''):
50 cmdutil.checkunfinished(repo)
51 s = repo.status()
35 s = repo.status()
52 if not force:
36 if not force:
53 if s.modified or s.added or s.removed or s.deleted:
37 cmdutil.checkunfinished(repo)
54 _("local changes found") # i18n tool detection
38 cmdutil.bailifchanged(repo)
55 raise error.Abort(_("local changes found" + excsuffix))
39 else:
56 if checksubstate(repo):
40 cmdutil.checkunfinished(repo, skipmerge=True)
57 _("local changed subrepos found") # i18n tool detection
58 raise error.Abort(_("local changed subrepos found" + excsuffix))
59 return s
41 return s
60
42
61 def _findupdatetarget(repo, nodes):
43 def _findupdatetarget(repo, nodes):
@@ -35,6 +35,7 b' from mercurial import ('
35 revset,
35 revset,
36 scmutil,
36 scmutil,
37 smartset,
37 smartset,
38 state as statemod,
38 util,
39 util,
39 vfs as vfsmod,
40 vfs as vfsmod,
40 )
41 )
@@ -757,9 +758,12 b' def kwtransplanted(context, mapping):'
757 return n and nodemod.hex(n) or ''
758 return n and nodemod.hex(n) or ''
758
759
759 def extsetup(ui):
760 def extsetup(ui):
760 cmdutil.unfinishedstates.append(
761 statemod.addunfinished (
761 ['transplant/journal', True, False, _('transplant in progress'),
762 'transplant', fname='transplant/journal', clearable=True,
762 _("use 'hg transplant --continue' or 'hg update' to abort")])
763 statushint=_('To continue: hg transplant --continue\n'
764 'To abort: hg update'),
765 cmdhint=_("use 'hg transplant --continue' or 'hg update' to abort")
766 )
763
767
764 # tell hggettext to extract docstrings from these functions:
768 # tell hggettext to extract docstrings from these functions:
765 i18nfunctions = [revsettransplanted, kwtransplanted]
769 i18nfunctions = [revsettransplanted, kwtransplanted]
@@ -89,6 +89,8 b' import threading'
89 import time
89 import time
90 import traceback
90 import traceback
91
91
92 from mercurial import pycompat
93
92 __all__ = ["Zeroconf", "ServiceInfo", "ServiceBrowser"]
94 __all__ = ["Zeroconf", "ServiceInfo", "ServiceBrowser"]
93
95
94 # hook for threads
96 # hook for threads
@@ -270,6 +272,8 b' class DNSQuestion(DNSEntry):'
270 """A DNS question entry"""
272 """A DNS question entry"""
271
273
272 def __init__(self, name, type, clazz):
274 def __init__(self, name, type, clazz):
275 if pycompat.ispy3 and isinstance(name, str):
276 name = name.encode('ascii')
273 if not name.endswith(".local."):
277 if not name.endswith(".local."):
274 raise NonLocalNameException(name)
278 raise NonLocalNameException(name)
275 DNSEntry.__init__(self, name, type, clazz)
279 DNSEntry.__init__(self, name, type, clazz)
@@ -535,7 +539,7 b' class DNSIncoming(object):'
535
539
536 def readString(self, len):
540 def readString(self, len):
537 """Reads a string of a given length from the packet"""
541 """Reads a string of a given length from the packet"""
538 format = '!' + str(len) + 's'
542 format = '!%ds' % len
539 length = struct.calcsize(format)
543 length = struct.calcsize(format)
540 info = struct.unpack(format,
544 info = struct.unpack(format,
541 self.data[self.offset:self.offset + length])
545 self.data[self.offset:self.offset + length])
@@ -613,7 +617,7 b' class DNSIncoming(object):'
613
617
614 def readName(self):
618 def readName(self):
615 """Reads a domain name from the packet"""
619 """Reads a domain name from the packet"""
616 result = ''
620 result = r''
617 off = self.offset
621 off = self.offset
618 next = -1
622 next = -1
619 first = off
623 first = off
@@ -625,7 +629,7 b' class DNSIncoming(object):'
625 break
629 break
626 t = len & 0xC0
630 t = len & 0xC0
627 if t == 0x00:
631 if t == 0x00:
628 result = ''.join((result, self.readUTF(off, len) + '.'))
632 result = r''.join((result, self.readUTF(off, len) + r'.'))
629 off += len
633 off += len
630 elif t == 0xC0:
634 elif t == 0xC0:
631 if next < 0:
635 if next < 0:
@@ -34,6 +34,7 b' from mercurial import ('
34 encoding,
34 encoding,
35 extensions,
35 extensions,
36 hg,
36 hg,
37 pycompat,
37 ui as uimod,
38 ui as uimod,
38 )
39 )
39 from mercurial.hgweb import (
40 from mercurial.hgweb import (
@@ -55,7 +56,7 b' def getip():'
55 # finds external-facing interface without sending any packets (Linux)
56 # finds external-facing interface without sending any packets (Linux)
56 try:
57 try:
57 s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
58 s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
58 s.connect(('1.0.0.1', 0))
59 s.connect((r'1.0.0.1', 0))
59 ip = s.getsockname()[0]
60 ip = s.getsockname()[0]
60 return ip
61 return ip
61 except socket.error:
62 except socket.error:
@@ -64,17 +65,17 b' def getip():'
64 # Generic method, sometimes gives useless results
65 # Generic method, sometimes gives useless results
65 try:
66 try:
66 dumbip = socket.gethostbyaddr(socket.gethostname())[2][0]
67 dumbip = socket.gethostbyaddr(socket.gethostname())[2][0]
67 if ':' in dumbip:
68 if r':' in dumbip:
68 dumbip = '127.0.0.1'
69 dumbip = r'127.0.0.1'
69 if not dumbip.startswith('127.'):
70 if not dumbip.startswith(r'127.'):
70 return dumbip
71 return dumbip
71 except (socket.gaierror, socket.herror):
72 except (socket.gaierror, socket.herror):
72 dumbip = '127.0.0.1'
73 dumbip = r'127.0.0.1'
73
74
74 # works elsewhere, but actually sends a packet
75 # works elsewhere, but actually sends a packet
75 try:
76 try:
76 s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
77 s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
77 s.connect(('1.0.0.1', 1))
78 s.connect((r'1.0.0.1', 1))
78 ip = s.getsockname()[0]
79 ip = s.getsockname()[0]
79 return ip
80 return ip
80 except socket.error:
81 except socket.error:
@@ -86,19 +87,19 b' def publish(name, desc, path, port):'
86 global server, localip
87 global server, localip
87 if not server:
88 if not server:
88 ip = getip()
89 ip = getip()
89 if ip.startswith('127.'):
90 if ip.startswith(r'127.'):
90 # if we have no internet connection, this can happen.
91 # if we have no internet connection, this can happen.
91 return
92 return
92 localip = socket.inet_aton(ip)
93 localip = socket.inet_aton(ip)
93 server = Zeroconf.Zeroconf(ip)
94 server = Zeroconf.Zeroconf(ip)
94
95
95 hostname = socket.gethostname().split('.')[0]
96 hostname = socket.gethostname().split(r'.')[0]
96 host = hostname + ".local"
97 host = hostname + r".local"
97 name = "%s-%s" % (hostname, name)
98 name = r"%s-%s" % (hostname, name)
98
99
99 # advertise to browsers
100 # advertise to browsers
100 svc = Zeroconf.ServiceInfo('_http._tcp.local.',
101 svc = Zeroconf.ServiceInfo('_http._tcp.local.',
101 name + '._http._tcp.local.',
102 pycompat.bytestr(name + r'._http._tcp.local.'),
102 server = host,
103 server = host,
103 port = port,
104 port = port,
104 properties = {'description': desc,
105 properties = {'description': desc,
@@ -108,7 +109,7 b' def publish(name, desc, path, port):'
108
109
109 # advertise to Mercurial clients
110 # advertise to Mercurial clients
110 svc = Zeroconf.ServiceInfo('_hg._tcp.local.',
111 svc = Zeroconf.ServiceInfo('_hg._tcp.local.',
111 name + '._hg._tcp.local.',
112 pycompat.bytestr(name + r'._hg._tcp.local.'),
112 server = host,
113 server = host,
113 port = port,
114 port = port,
114 properties = {'description': desc,
115 properties = {'description': desc,
@@ -158,7 +159,7 b' class listener(object):'
158
159
159 def getzcpaths():
160 def getzcpaths():
160 ip = getip()
161 ip = getip()
161 if ip.startswith('127.'):
162 if ip.startswith(r'127.'):
162 return
163 return
163 server = Zeroconf.Zeroconf(ip)
164 server = Zeroconf.Zeroconf(ip)
164 l = listener()
165 l = listener()
@@ -166,10 +167,10 b' def getzcpaths():'
166 time.sleep(1)
167 time.sleep(1)
167 server.close()
168 server.close()
168 for value in l.found.values():
169 for value in l.found.values():
169 name = value.name[:value.name.index('.')]
170 name = value.name[:value.name.index(b'.')]
170 url = "http://%s:%s%s" % (socket.inet_ntoa(value.address), value.port,
171 url = r"http://%s:%s%s" % (socket.inet_ntoa(value.address), value.port,
171 value.properties.get("path", "/"))
172 value.properties.get(r"path", r"/"))
172 yield "zc-" + name, url
173 yield b"zc-" + name, pycompat.bytestr(url)
173
174
174 def config(orig, self, section, key, *args, **kwargs):
175 def config(orig, self, section, key, *args, **kwargs):
175 if section == "paths" and key.startswith("zc-"):
176 if section == "paths" and key.startswith("zc-"):
@@ -29,7 +29,7 b' if sys.version_info[0] >= 3:'
29 """A sys.meta_path finder that uses a custom module loader."""
29 """A sys.meta_path finder that uses a custom module loader."""
30 def find_spec(self, fullname, path, target=None):
30 def find_spec(self, fullname, path, target=None):
31 # Only handle Mercurial-related modules.
31 # Only handle Mercurial-related modules.
32 if not fullname.startswith(('mercurial.', 'hgext.', 'hgext3rd.')):
32 if not fullname.startswith(('mercurial.', 'hgext.')):
33 return None
33 return None
34 # don't try to parse binary
34 # don't try to parse binary
35 if fullname.startswith('mercurial.cext.'):
35 if fullname.startswith('mercurial.cext.'):
@@ -54,7 +54,16 b' if sys.version_info[0] >= 3:'
54 if finder == self:
54 if finder == self:
55 continue
55 continue
56
56
57 spec = finder.find_spec(fullname, path, target=target)
57 # Originally the API was a `find_module` method, but it was
58 # renamed to `find_spec` in python 3.4, with a new `target`
59 # argument.
60 find_spec_method = getattr(finder, 'find_spec', None)
61 if find_spec_method:
62 spec = find_spec_method(fullname, path, target=target)
63 else:
64 spec = finder.find_module(fullname)
65 if spec is not None:
66 spec = importlib.util.spec_from_loader(fullname, spec)
58 if spec:
67 if spec:
59 break
68 break
60
69
@@ -216,7 +225,9 b' if sys.version_info[0] >= 3:'
216
225
217 # It changes iteritems/values to items/values as they are not
226 # It changes iteritems/values to items/values as they are not
218 # present in Python 3 world.
227 # present in Python 3 world.
219 elif fn in ('iteritems', 'itervalues'):
228 elif (fn in ('iteritems', 'itervalues') and
229 not (tokens[i - 1].type == token.NAME and
230 tokens[i - 1].string == 'def')):
220 yield t._replace(string=fn[4:])
231 yield t._replace(string=fn[4:])
221 continue
232 continue
222
233
@@ -227,7 +238,7 b' if sys.version_info[0] >= 3:'
227 # ``replacetoken`` or any mechanism that changes semantics of module
238 # ``replacetoken`` or any mechanism that changes semantics of module
228 # loading is changed. Otherwise cached bytecode may get loaded without
239 # loading is changed. Otherwise cached bytecode may get loaded without
229 # the new transformation mechanisms applied.
240 # the new transformation mechanisms applied.
230 BYTECODEHEADER = b'HG\x00\x0b'
241 BYTECODEHEADER = b'HG\x00\x0c'
231
242
232 class hgloader(importlib.machinery.SourceFileLoader):
243 class hgloader(importlib.machinery.SourceFileLoader):
233 """Custom module loader that transforms source code.
244 """Custom module loader that transforms source code.
@@ -33,6 +33,14 b' from . import ('
33 # custom styles
33 # custom styles
34 activebookmarklabel = 'bookmarks.active bookmarks.current'
34 activebookmarklabel = 'bookmarks.active bookmarks.current'
35
35
36 BOOKMARKS_IN_STORE_REQUIREMENT = 'bookmarksinstore'
37
38 def bookmarksinstore(repo):
39 return BOOKMARKS_IN_STORE_REQUIREMENT in repo.requirements
40
41 def bookmarksvfs(repo):
42 return repo.svfs if bookmarksinstore(repo) else repo.vfs
43
36 def _getbkfile(repo):
44 def _getbkfile(repo):
37 """Hook so that extensions that mess with the store can hook bm storage.
45 """Hook so that extensions that mess with the store can hook bm storage.
38
46
@@ -40,7 +48,7 b' def _getbkfile(repo):'
40 bookmarks or the committed ones. Other extensions (like share)
48 bookmarks or the committed ones. Other extensions (like share)
41 may need to tweak this behavior further.
49 may need to tweak this behavior further.
42 """
50 """
43 fp, pending = txnutil.trypending(repo.root, repo.vfs, 'bookmarks')
51 fp, pending = txnutil.trypending(repo.root, bookmarksvfs(repo), 'bookmarks')
44 return fp
52 return fp
45
53
46 class bmstore(object):
54 class bmstore(object):
@@ -91,8 +99,11 b' class bmstore(object):'
91 # ValueError:
99 # ValueError:
92 # - node in nm, for non-20-bytes entry
100 # - node in nm, for non-20-bytes entry
93 # - split(...), for string without ' '
101 # - split(...), for string without ' '
94 repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n')
102 bookmarkspath = '.hg/bookmarks'
95 % pycompat.bytestr(line))
103 if bookmarksinstore(repo):
104 bookmarkspath = '.hg/store/bookmarks'
105 repo.ui.warn(_('malformed line in %s: %r\n')
106 % (bookmarkspath, pycompat.bytestr(line)))
96 except IOError as inst:
107 except IOError as inst:
97 if inst.errno != errno.ENOENT:
108 if inst.errno != errno.ENOENT:
98 raise
109 raise
@@ -192,8 +203,9 b' class bmstore(object):'
192 """record that bookmarks have been changed in a transaction
203 """record that bookmarks have been changed in a transaction
193
204
194 The transaction is then responsible for updating the file content."""
205 The transaction is then responsible for updating the file content."""
206 location = '' if bookmarksinstore(self._repo) else 'plain'
195 tr.addfilegenerator('bookmarks', ('bookmarks',), self._write,
207 tr.addfilegenerator('bookmarks', ('bookmarks',), self._write,
196 location='plain')
208 location=location)
197 tr.hookargs['bookmark_moved'] = '1'
209 tr.hookargs['bookmark_moved'] = '1'
198
210
199 def _writerepo(self, repo):
211 def _writerepo(self, repo):
@@ -203,28 +215,24 b' class bmstore(object):'
203 rbm.active = None
215 rbm.active = None
204 rbm._writeactive()
216 rbm._writeactive()
205
217
206 with repo.wlock():
218 if bookmarksinstore(repo):
207 file_ = repo.vfs('bookmarks', 'w', atomictemp=True,
219 vfs = repo.svfs
208 checkambig=True)
220 lock = repo.lock()
209 try:
221 else:
210 self._write(file_)
222 vfs = repo.vfs
211 except: # re-raises
223 lock = repo.wlock()
212 file_.discard()
224 with lock:
213 raise
225 with vfs('bookmarks', 'w', atomictemp=True, checkambig=True) as f:
214 finally:
226 self._write(f)
215 file_.close()
216
227
217 def _writeactive(self):
228 def _writeactive(self):
218 if self._aclean:
229 if self._aclean:
219 return
230 return
220 with self._repo.wlock():
231 with self._repo.wlock():
221 if self._active is not None:
232 if self._active is not None:
222 f = self._repo.vfs('bookmarks.current', 'w', atomictemp=True,
233 with self._repo.vfs('bookmarks.current', 'w', atomictemp=True,
223 checkambig=True)
234 checkambig=True) as f:
224 try:
225 f.write(encoding.fromlocal(self._active))
235 f.write(encoding.fromlocal(self._active))
226 finally:
227 f.close()
228 else:
236 else:
229 self._repo.vfs.tryunlink('bookmarks.current')
237 self._repo.vfs.tryunlink('bookmarks.current')
230 self._aclean = True
238 self._aclean = True
@@ -306,28 +314,12 b' def _readactive(repo, marks):'
306 itself as we commit. This function returns the name of that bookmark.
314 itself as we commit. This function returns the name of that bookmark.
307 It is stored in .hg/bookmarks.current
315 It is stored in .hg/bookmarks.current
308 """
316 """
309 try:
317 # No readline() in osutil.posixfile, reading everything is
310 file = repo.vfs('bookmarks.current')
318 # cheap.
311 except IOError as inst:
319 content = repo.vfs.tryread('bookmarks.current')
312 if inst.errno != errno.ENOENT:
320 mark = encoding.tolocal((content.splitlines() or [''])[0])
313 raise
321 if mark == '' or mark not in marks:
314 return None
322 mark = None
315 try:
316 # No readline() in osutil.posixfile, reading everything is
317 # cheap.
318 # Note that it's possible for readlines() here to raise
319 # IOError, since we might be reading the active mark over
320 # static-http which only tries to load the file when we try
321 # to read from it.
322 mark = encoding.tolocal((file.readlines() or [''])[0])
323 if mark == '' or mark not in marks:
324 mark = None
325 except IOError as inst:
326 if inst.errno != errno.ENOENT:
327 raise
328 return None
329 finally:
330 file.close()
331 return mark
323 return mark
332
324
333 def activate(repo, mark):
325 def activate(repo, mark):
@@ -453,7 +445,11 b' def listbookmarks(repo):'
453 return d
445 return d
454
446
455 def pushbookmark(repo, key, old, new):
447 def pushbookmark(repo, key, old, new):
456 with repo.wlock(), repo.lock(), repo.transaction('bookmarks') as tr:
448 if bookmarksinstore(repo):
449 wlock = util.nullcontextmanager()
450 else:
451 wlock = repo.wlock()
452 with wlock, repo.lock(), repo.transaction('bookmarks') as tr:
457 marks = repo._bookmarks
453 marks = repo._bookmarks
458 existing = hex(marks.get(key, ''))
454 existing = hex(marks.get(key, ''))
459 if existing != old and existing != new:
455 if existing != old and existing != new:
@@ -121,6 +121,12 b' def _unknownnode(node):'
121 """
121 """
122 raise ValueError(r'node %s does not exist' % pycompat.sysstr(hex(node)))
122 raise ValueError(r'node %s does not exist' % pycompat.sysstr(hex(node)))
123
123
124 def _branchcachedesc(repo):
125 if repo.filtername is not None:
126 return 'branch cache (%s)' % repo.filtername
127 else:
128 return 'branch cache'
129
124 class branchcache(object):
130 class branchcache(object):
125 """A dict like object that hold branches heads cache.
131 """A dict like object that hold branches heads cache.
126
132
@@ -212,6 +218,8 b' class branchcache(object):'
212 self._verifybranch(k)
218 self._verifybranch(k)
213 yield k, v
219 yield k, v
214
220
221 items = iteritems
222
215 def hasbranch(self, label):
223 def hasbranch(self, label):
216 """ checks whether a branch of this name exists or not """
224 """ checks whether a branch of this name exists or not """
217 self._verifybranch(label)
225 self._verifybranch(label)
@@ -241,11 +249,9 b' class branchcache(object):'
241
249
242 except Exception as inst:
250 except Exception as inst:
243 if repo.ui.debugflag:
251 if repo.ui.debugflag:
244 msg = 'invalid branchheads cache'
252 msg = 'invalid %s: %s\n'
245 if repo.filtername is not None:
253 repo.ui.debug(msg % (_branchcachedesc(repo),
246 msg += ' (%s)' % repo.filtername
254 pycompat.bytestr(inst)))
247 msg += ': %s\n'
248 repo.ui.debug(msg % pycompat.bytestr(inst))
249 bcache = None
255 bcache = None
250
256
251 finally:
257 finally:
@@ -351,9 +357,8 b' class branchcache(object):'
351 state = 'o'
357 state = 'o'
352 f.write("%s %s %s\n" % (hex(node), state, label))
358 f.write("%s %s %s\n" % (hex(node), state, label))
353 f.close()
359 f.close()
354 repo.ui.log('branchcache',
360 repo.ui.log('branchcache', 'wrote %s with %d labels and %d nodes\n',
355 'wrote %s branch cache with %d labels and %d nodes\n',
361 _branchcachedesc(repo), len(self._entries), nodecount)
356 repo.filtername, len(self._entries), nodecount)
357 except (IOError, OSError, error.Abort) as inst:
362 except (IOError, OSError, error.Abort) as inst:
358 # Abort may be raised by read only opener, so log and continue
363 # Abort may be raised by read only opener, so log and continue
359 repo.ui.debug("couldn't write branch cache: %s\n" %
364 repo.ui.debug("couldn't write branch cache: %s\n" %
@@ -378,6 +383,10 b' class branchcache(object):'
378 # fetch current topological heads to speed up filtering
383 # fetch current topological heads to speed up filtering
379 topoheads = set(cl.headrevs())
384 topoheads = set(cl.headrevs())
380
385
386 # new tip revision which we found after iterating items from new
387 # branches
388 ntiprev = self.tiprev
389
381 # if older branchheads are reachable from new ones, they aren't
390 # if older branchheads are reachable from new ones, they aren't
382 # really branchheads. Note checking parents is insufficient:
391 # really branchheads. Note checking parents is insufficient:
383 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
392 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
@@ -401,9 +410,12 b' class branchcache(object):'
401 bheadrevs = sorted(bheadset)
410 bheadrevs = sorted(bheadset)
402 self[branch] = [cl.node(rev) for rev in bheadrevs]
411 self[branch] = [cl.node(rev) for rev in bheadrevs]
403 tiprev = bheadrevs[-1]
412 tiprev = bheadrevs[-1]
404 if tiprev > self.tiprev:
413 if tiprev > ntiprev:
405 self.tipnode = cl.node(tiprev)
414 ntiprev = tiprev
406 self.tiprev = tiprev
415
416 if ntiprev > self.tiprev:
417 self.tiprev = ntiprev
418 self.tipnode = cl.node(ntiprev)
407
419
408 if not self.validfor(repo):
420 if not self.validfor(repo):
409 # cache key are not valid anymore
421 # cache key are not valid anymore
@@ -417,8 +429,8 b' class branchcache(object):'
417 self.filteredhash = scmutil.filteredhash(repo, self.tiprev)
429 self.filteredhash = scmutil.filteredhash(repo, self.tiprev)
418
430
419 duration = util.timer() - starttime
431 duration = util.timer() - starttime
420 repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n',
432 repo.ui.log('branchcache', 'updated %s in %.4f seconds\n',
421 repo.filtername or b'None', duration)
433 _branchcachedesc(repo), duration)
422
434
423 self.write(repo)
435 self.write(repo)
424
436
@@ -608,51 +620,59 b' class revbranchcache(object):'
608 wlock = None
620 wlock = None
609 step = ''
621 step = ''
610 try:
622 try:
623 # write the new names
611 if self._rbcnamescount < len(self._names):
624 if self._rbcnamescount < len(self._names):
612 step = ' names'
613 wlock = repo.wlock(wait=False)
625 wlock = repo.wlock(wait=False)
614 if self._rbcnamescount != 0:
626 step = ' names'
615 f = repo.cachevfs.open(_rbcnames, 'ab')
627 self._writenames(repo)
616 if f.tell() == self._rbcsnameslen:
617 f.write('\0')
618 else:
619 f.close()
620 repo.ui.debug("%s changed - rewriting it\n" % _rbcnames)
621 self._rbcnamescount = 0
622 self._rbcrevslen = 0
623 if self._rbcnamescount == 0:
624 # before rewriting names, make sure references are removed
625 repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
626 f = repo.cachevfs.open(_rbcnames, 'wb')
627 f.write('\0'.join(encoding.fromlocal(b)
628 for b in self._names[self._rbcnamescount:]))
629 self._rbcsnameslen = f.tell()
630 f.close()
631 self._rbcnamescount = len(self._names)
632
628
629 # write the new revs
633 start = self._rbcrevslen * _rbcrecsize
630 start = self._rbcrevslen * _rbcrecsize
634 if start != len(self._rbcrevs):
631 if start != len(self._rbcrevs):
635 step = ''
632 step = ''
636 if wlock is None:
633 if wlock is None:
637 wlock = repo.wlock(wait=False)
634 wlock = repo.wlock(wait=False)
638 revs = min(len(repo.changelog),
635 self._writerevs(repo, start)
639 len(self._rbcrevs) // _rbcrecsize)
636
640 f = repo.cachevfs.open(_rbcrevs, 'ab')
641 if f.tell() != start:
642 repo.ui.debug("truncating cache/%s to %d\n"
643 % (_rbcrevs, start))
644 f.seek(start)
645 if f.tell() != start:
646 start = 0
647 f.seek(start)
648 f.truncate()
649 end = revs * _rbcrecsize
650 f.write(self._rbcrevs[start:end])
651 f.close()
652 self._rbcrevslen = revs
653 except (IOError, OSError, error.Abort, error.LockError) as inst:
637 except (IOError, OSError, error.Abort, error.LockError) as inst:
654 repo.ui.debug("couldn't write revision branch cache%s: %s\n"
638 repo.ui.debug("couldn't write revision branch cache%s: %s\n"
655 % (step, stringutil.forcebytestr(inst)))
639 % (step, stringutil.forcebytestr(inst)))
656 finally:
640 finally:
657 if wlock is not None:
641 if wlock is not None:
658 wlock.release()
642 wlock.release()
643
644 def _writenames(self, repo):
645 """ write the new branch names to revbranchcache """
646 if self._rbcnamescount != 0:
647 f = repo.cachevfs.open(_rbcnames, 'ab')
648 if f.tell() == self._rbcsnameslen:
649 f.write('\0')
650 else:
651 f.close()
652 repo.ui.debug("%s changed - rewriting it\n" % _rbcnames)
653 self._rbcnamescount = 0
654 self._rbcrevslen = 0
655 if self._rbcnamescount == 0:
656 # before rewriting names, make sure references are removed
657 repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
658 f = repo.cachevfs.open(_rbcnames, 'wb')
659 f.write('\0'.join(encoding.fromlocal(b)
660 for b in self._names[self._rbcnamescount:]))
661 self._rbcsnameslen = f.tell()
662 f.close()
663 self._rbcnamescount = len(self._names)
664
665 def _writerevs(self, repo, start):
666 """ write the new revs to revbranchcache """
667 revs = min(len(repo.changelog), len(self._rbcrevs) // _rbcrecsize)
668 with repo.cachevfs.open(_rbcrevs, 'ab') as f:
669 if f.tell() != start:
670 repo.ui.debug("truncating cache/%s to %d\n" % (_rbcrevs, start))
671 f.seek(start)
672 if f.tell() != start:
673 start = 0
674 f.seek(start)
675 f.truncate()
676 end = revs * _rbcrecsize
677 f.write(self._rbcrevs[start:end])
678 self._rbcrevslen = revs
@@ -2298,10 +2298,11 b' def handlestreamv2bundle(op, part):'
2298 streamclone.applybundlev2(repo, part, filecount, bytecount,
2298 streamclone.applybundlev2(repo, part, filecount, bytecount,
2299 requirements)
2299 requirements)
2300
2300
2301 def widen_bundle(repo, oldmatcher, newmatcher, common, known, cgversion,
2301 def widen_bundle(bundler, repo, oldmatcher, newmatcher, common,
2302 ellipses):
2302 known, cgversion, ellipses):
2303 """generates bundle2 for widening a narrow clone
2303 """generates bundle2 for widening a narrow clone
2304
2304
2305 bundler is the bundle to which data should be added
2305 repo is the localrepository instance
2306 repo is the localrepository instance
2306 oldmatcher matches what the client already has
2307 oldmatcher matches what the client already has
2307 newmatcher matches what the client needs (including what it already has)
2308 newmatcher matches what the client needs (including what it already has)
@@ -2312,7 +2313,6 b' def widen_bundle(repo, oldmatcher, newma'
2312
2313
2313 returns bundle2 of the data required for extending
2314 returns bundle2 of the data required for extending
2314 """
2315 """
2315 bundler = bundle20(repo.ui)
2316 commonnodes = set()
2316 commonnodes = set()
2317 cl = repo.changelog
2317 cl = repo.changelog
2318 for r in repo.revs("::%ln", common):
2318 for r in repo.revs("::%ln", common):
@@ -42,6 +42,9 b' static inline Py_ssize_t _finddir(const '
42 break;
42 break;
43 pos -= 1;
43 pos -= 1;
44 }
44 }
45 if (pos == -1) {
46 return 0;
47 }
45
48
46 return pos;
49 return pos;
47 }
50 }
@@ -667,10 +667,11 b' void dirs_module_init(PyObject *mod);'
667 void manifest_module_init(PyObject *mod);
667 void manifest_module_init(PyObject *mod);
668 void revlog_module_init(PyObject *mod);
668 void revlog_module_init(PyObject *mod);
669
669
670 static const int version = 12;
670 static const int version = 13;
671
671
672 static void module_init(PyObject *mod)
672 static void module_init(PyObject *mod)
673 {
673 {
674 PyObject *capsule = NULL;
674 PyModule_AddIntConstant(mod, "version", version);
675 PyModule_AddIntConstant(mod, "version", version);
675
676
676 /* This module constant has two purposes. First, it lets us unit test
677 /* This module constant has two purposes. First, it lets us unit test
@@ -687,6 +688,12 b' static void module_init(PyObject *mod)'
687 manifest_module_init(mod);
688 manifest_module_init(mod);
688 revlog_module_init(mod);
689 revlog_module_init(mod);
689
690
691 capsule = PyCapsule_New(
692 make_dirstate_tuple,
693 "mercurial.cext.parsers.make_dirstate_tuple_CAPI", NULL);
694 if (capsule != NULL)
695 PyModule_AddObject(mod, "make_dirstate_tuple_CAPI", capsule);
696
690 if (PyType_Ready(&dirstateTupleType) < 0) {
697 if (PyType_Ready(&dirstateTupleType) < 0) {
691 return;
698 return;
692 }
699 }
@@ -1061,7 +1061,7 b' class cgpacker(object):'
1061 while tmfnodes:
1061 while tmfnodes:
1062 tree, nodes = tmfnodes.popitem()
1062 tree, nodes = tmfnodes.popitem()
1063
1063
1064 should_visit = self._matcher.visitdir(tree[:-1] or '.')
1064 should_visit = self._matcher.visitdir(tree[:-1])
1065 if tree and not should_visit:
1065 if tree and not should_visit:
1066 continue
1066 continue
1067
1067
@@ -1093,7 +1093,7 b' class cgpacker(object):'
1093 fullclnodes=self._fullclnodes,
1093 fullclnodes=self._fullclnodes,
1094 precomputedellipsis=self._precomputedellipsis)
1094 precomputedellipsis=self._precomputedellipsis)
1095
1095
1096 if not self._oldmatcher.visitdir(store.tree[:-1] or '.'):
1096 if not self._oldmatcher.visitdir(store.tree[:-1]):
1097 yield tree, deltas
1097 yield tree, deltas
1098 else:
1098 else:
1099 # 'deltas' is a generator and we need to consume it even if
1099 # 'deltas' is a generator and we need to consume it even if
@@ -80,25 +80,55 b' def encodeextra(d):'
80 ]
80 ]
81 return "\0".join(items)
81 return "\0".join(items)
82
82
83 def encodecopies(copies):
83 def encodecopies(files, copies):
84 items = [
84 items = []
85 '%s\0%s' % (k, copies[k])
85 for i, dst in enumerate(files):
86 for k in sorted(copies)
86 if dst in copies:
87 ]
87 items.append('%d\0%s' % (i, copies[dst]))
88 if len(items) != len(copies):
89 raise error.ProgrammingError('some copy targets missing from file list')
88 return "\n".join(items)
90 return "\n".join(items)
89
91
90 def decodecopies(data):
92 def decodecopies(files, data):
91 try:
93 try:
92 copies = {}
94 copies = {}
95 if not data:
96 return copies
93 for l in data.split('\n'):
97 for l in data.split('\n'):
94 k, v = l.split('\0')
98 strindex, src = l.split('\0')
95 copies[k] = v
99 i = int(strindex)
100 dst = files[i]
101 copies[dst] = src
96 return copies
102 return copies
97 except ValueError:
103 except (ValueError, IndexError):
98 # Perhaps someone had chosen the same key name (e.g. "p1copies") and
104 # Perhaps someone had chosen the same key name (e.g. "p1copies") and
99 # used different syntax for the value.
105 # used different syntax for the value.
100 return None
106 return None
101
107
108 def encodefileindices(files, subset):
109 subset = set(subset)
110 indices = []
111 for i, f in enumerate(files):
112 if f in subset:
113 indices.append('%d' % i)
114 return '\n'.join(indices)
115
116 def decodefileindices(files, data):
117 try:
118 subset = []
119 if not data:
120 return subset
121 for strindex in data.split('\n'):
122 i = int(strindex)
123 if i < 0 or i >= len(files):
124 return None
125 subset.append(files[i])
126 return subset
127 except (ValueError, IndexError):
128 # Perhaps someone had chosen the same key name (e.g. "added") and
129 # used different syntax for the value.
130 return None
131
102 def stripdesc(desc):
132 def stripdesc(desc):
103 """strip trailing whitespace and leading and trailing empty lines"""
133 """strip trailing whitespace and leading and trailing empty lines"""
104 return '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n')
134 return '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n')
@@ -194,6 +224,10 b' class _changelogrevision(object):'
194 user = attr.ib(default='')
224 user = attr.ib(default='')
195 date = attr.ib(default=(0, 0))
225 date = attr.ib(default=(0, 0))
196 files = attr.ib(default=attr.Factory(list))
226 files = attr.ib(default=attr.Factory(list))
227 filesadded = attr.ib(default=None)
228 filesremoved = attr.ib(default=None)
229 p1copies = attr.ib(default=None)
230 p2copies = attr.ib(default=None)
197 description = attr.ib(default='')
231 description = attr.ib(default='')
198
232
199 class changelogrevision(object):
233 class changelogrevision(object):
@@ -298,14 +332,24 b' class changelogrevision(object):'
298 return self._text[off[2] + 1:off[3]].split('\n')
332 return self._text[off[2] + 1:off[3]].split('\n')
299
333
300 @property
334 @property
335 def filesadded(self):
336 rawindices = self.extra.get('filesadded')
337 return rawindices and decodefileindices(self.files, rawindices)
338
339 @property
340 def filesremoved(self):
341 rawindices = self.extra.get('filesremoved')
342 return rawindices and decodefileindices(self.files, rawindices)
343
344 @property
301 def p1copies(self):
345 def p1copies(self):
302 rawcopies = self.extra.get('p1copies')
346 rawcopies = self.extra.get('p1copies')
303 return rawcopies and decodecopies(rawcopies)
347 return rawcopies and decodecopies(self.files, rawcopies)
304
348
305 @property
349 @property
306 def p2copies(self):
350 def p2copies(self):
307 rawcopies = self.extra.get('p2copies')
351 rawcopies = self.extra.get('p2copies')
308 return rawcopies and decodecopies(rawcopies)
352 return rawcopies and decodecopies(self.files, rawcopies)
309
353
310 @property
354 @property
311 def description(self):
355 def description(self):
@@ -380,9 +424,6 b' class changelog(revlog.revlog):'
380 if i not in self.filteredrevs:
424 if i not in self.filteredrevs:
381 yield i
425 yield i
382
426
383 def reachableroots(self, minroot, heads, roots, includepath=False):
384 return self.index.reachableroots2(minroot, heads, roots, includepath)
385
386 def _checknofilteredinrevs(self, revs):
427 def _checknofilteredinrevs(self, revs):
387 """raise the appropriate error if 'revs' contains a filtered revision
428 """raise the appropriate error if 'revs' contains a filtered revision
388
429
@@ -562,7 +603,8 b' class changelog(revlog.revlog):'
562 return l[3:]
603 return l[3:]
563
604
564 def add(self, manifest, files, desc, transaction, p1, p2,
605 def add(self, manifest, files, desc, transaction, p1, p2,
565 user, date=None, extra=None, p1copies=None, p2copies=None):
606 user, date=None, extra=None, p1copies=None, p2copies=None,
607 filesadded=None, filesremoved=None):
566 # Convert to UTF-8 encoded bytestrings as the very first
608 # Convert to UTF-8 encoded bytestrings as the very first
567 # thing: calling any method on a localstr object will turn it
609 # thing: calling any method on a localstr object will turn it
568 # into a str object and the cached UTF-8 string is thus lost.
610 # into a str object and the cached UTF-8 string is thus lost.
@@ -591,17 +633,23 b' class changelog(revlog.revlog):'
591 elif branch in (".", "null", "tip"):
633 elif branch in (".", "null", "tip"):
592 raise error.StorageError(_('the name \'%s\' is reserved')
634 raise error.StorageError(_('the name \'%s\' is reserved')
593 % branch)
635 % branch)
594 if (p1copies or p2copies) and extra is None:
636 extrasentries = p1copies, p2copies, filesadded, filesremoved
637 if extra is None and any(x is not None for x in extrasentries):
595 extra = {}
638 extra = {}
596 if p1copies:
639 sortedfiles = sorted(files)
597 extra['p1copies'] = encodecopies(p1copies)
640 if p1copies is not None:
598 if p2copies:
641 extra['p1copies'] = encodecopies(sortedfiles, p1copies)
599 extra['p2copies'] = encodecopies(p2copies)
642 if p2copies is not None:
643 extra['p2copies'] = encodecopies(sortedfiles, p2copies)
644 if filesadded is not None:
645 extra['filesadded'] = encodefileindices(sortedfiles, filesadded)
646 if filesremoved is not None:
647 extra['filesremoved'] = encodefileindices(sortedfiles, filesremoved)
600
648
601 if extra:
649 if extra:
602 extra = encodeextra(extra)
650 extra = encodeextra(extra)
603 parseddate = "%s %s" % (parseddate, extra)
651 parseddate = "%s %s" % (parseddate, extra)
604 l = [hex(manifest), user, parseddate] + sorted(files) + ["", desc]
652 l = [hex(manifest), user, parseddate] + sortedfiles + ["", desc]
605 text = "\n".join(l)
653 text = "\n".join(l)
606 return self.addrevision(text, transaction, len(self), p1, p2)
654 return self.addrevision(text, transaction, len(self), p1, p2)
607
655
@@ -138,7 +138,9 b' def _getmtimepaths(ui):'
138 modules.append(__version__)
138 modules.append(__version__)
139 except ImportError:
139 except ImportError:
140 pass
140 pass
141 files = [pycompat.sysexecutable]
141 files = []
142 if pycompat.sysexecutable:
143 files.append(pycompat.sysexecutable)
142 for m in modules:
144 for m in modules:
143 try:
145 try:
144 files.append(pycompat.fsencode(inspect.getabsfile(m)))
146 files.append(pycompat.fsencode(inspect.getabsfile(m)))
@@ -38,10 +38,12 b' from . import ('
38 pathutil,
38 pathutil,
39 phases,
39 phases,
40 pycompat,
40 pycompat,
41 repair,
41 revlog,
42 revlog,
42 rewriteutil,
43 rewriteutil,
43 scmutil,
44 scmutil,
44 smartset,
45 smartset,
46 state as statemod,
45 subrepoutil,
47 subrepoutil,
46 templatekw,
48 templatekw,
47 templater,
49 templater,
@@ -264,8 +266,8 b' def dorecord(ui, repo, commitfunc, cmdsu'
264 In the end we'll record interesting changes, and everything else
266 In the end we'll record interesting changes, and everything else
265 will be left in place, so the user can continue working.
267 will be left in place, so the user can continue working.
266 """
268 """
267
269 if not opts.get('interactive-unshelve'):
268 checkunfinished(repo, commit=True)
270 checkunfinished(repo, commit=True)
269 wctx = repo[None]
271 wctx = repo[None]
270 merge = len(wctx.parents()) > 1
272 merge = len(wctx.parents()) > 1
271 if merge:
273 if merge:
@@ -278,8 +280,8 b' def dorecord(ui, repo, commitfunc, cmdsu'
278 force = opts.get('force')
280 force = opts.get('force')
279 if not force:
281 if not force:
280 vdirs = []
282 vdirs = []
283 match = matchmod.badmatch(match, fail)
281 match.explicitdir = vdirs.append
284 match.explicitdir = vdirs.append
282 match.bad = fail
283
285
284 status = repo.status(match=match)
286 status = repo.status(match=match)
285
287
@@ -618,74 +620,18 b' To mark files as resolved: hg resolve -'
618
620
619 return _commentlines(msg)
621 return _commentlines(msg)
620
622
621 def _helpmessage(continuecmd, abortcmd):
622 msg = _('To continue: %s\n'
623 'To abort: %s') % (continuecmd, abortcmd)
624 return _commentlines(msg)
625
626 def _rebasemsg():
627 return _helpmessage('hg rebase --continue', 'hg rebase --abort')
628
629 def _histeditmsg():
630 return _helpmessage('hg histedit --continue', 'hg histedit --abort')
631
632 def _unshelvemsg():
633 return _helpmessage('hg unshelve --continue', 'hg unshelve --abort')
634
635 def _graftmsg():
636 return _helpmessage('hg graft --continue', 'hg graft --abort')
637
638 def _mergemsg():
639 return _helpmessage('hg commit', 'hg merge --abort')
640
641 def _bisectmsg():
642 msg = _('To mark the changeset good: hg bisect --good\n'
643 'To mark the changeset bad: hg bisect --bad\n'
644 'To abort: hg bisect --reset\n')
645 return _commentlines(msg)
646
647 def fileexistspredicate(filename):
648 return lambda repo: repo.vfs.exists(filename)
649
650 def _mergepredicate(repo):
651 return len(repo[None].parents()) > 1
652
653 STATES = (
654 # (state, predicate to detect states, helpful message function)
655 ('histedit', fileexistspredicate('histedit-state'), _histeditmsg),
656 ('bisect', fileexistspredicate('bisect.state'), _bisectmsg),
657 ('graft', fileexistspredicate('graftstate'), _graftmsg),
658 ('unshelve', fileexistspredicate('shelvedstate'), _unshelvemsg),
659 ('rebase', fileexistspredicate('rebasestate'), _rebasemsg),
660 # The merge state is part of a list that will be iterated over.
661 # They need to be last because some of the other unfinished states may also
662 # be in a merge or update state (eg. rebase, histedit, graft, etc).
663 # We want those to have priority.
664 ('merge', _mergepredicate, _mergemsg),
665 )
666
667 def _getrepostate(repo):
668 # experimental config: commands.status.skipstates
669 skip = set(repo.ui.configlist('commands', 'status.skipstates'))
670 for state, statedetectionpredicate, msgfn in STATES:
671 if state in skip:
672 continue
673 if statedetectionpredicate(repo):
674 return (state, statedetectionpredicate, msgfn)
675
676 def morestatus(repo, fm):
623 def morestatus(repo, fm):
677 statetuple = _getrepostate(repo)
624 statetuple = statemod.getrepostate(repo)
678 label = 'status.morestatus'
625 label = 'status.morestatus'
679 if statetuple:
626 if statetuple:
680 state, statedetectionpredicate, helpfulmsg = statetuple
627 state, helpfulmsg = statetuple
681 statemsg = _('The repository is in an unfinished *%s* state.') % state
628 statemsg = _('The repository is in an unfinished *%s* state.') % state
682 fm.plain('%s\n' % _commentlines(statemsg), label=label)
629 fm.plain('%s\n' % _commentlines(statemsg), label=label)
683 conmsg = _conflictsmsg(repo)
630 conmsg = _conflictsmsg(repo)
684 if conmsg:
631 if conmsg:
685 fm.plain('%s\n' % conmsg, label=label)
632 fm.plain('%s\n' % conmsg, label=label)
686 if helpfulmsg:
633 if helpfulmsg:
687 helpmsg = helpfulmsg()
634 fm.plain('%s\n' % _commentlines(helpfulmsg), label=label)
688 fm.plain('%s\n' % helpmsg, label=label)
689
635
690 def findpossible(cmd, table, strict=False):
636 def findpossible(cmd, table, strict=False):
691 """
637 """
@@ -1668,6 +1614,14 b' def _exportfntemplate(repo, revs, basefm'
1668 _exportsingle(repo, ctx, fm, match, switch_parent, seqno,
1614 _exportsingle(repo, ctx, fm, match, switch_parent, seqno,
1669 diffopts)
1615 diffopts)
1670
1616
1617 def _prefetchchangedfiles(repo, revs, match):
1618 allfiles = set()
1619 for rev in revs:
1620 for file in repo[rev].files():
1621 if not match or match(file):
1622 allfiles.add(file)
1623 scmutil.prefetchfiles(repo, revs, scmutil.matchfiles(repo, allfiles))
1624
1671 def export(repo, revs, basefm, fntemplate='hg-%h.patch', switch_parent=False,
1625 def export(repo, revs, basefm, fntemplate='hg-%h.patch', switch_parent=False,
1672 opts=None, match=None):
1626 opts=None, match=None):
1673 '''export changesets as hg patches
1627 '''export changesets as hg patches
@@ -1692,7 +1646,7 b' def export(repo, revs, basefm, fntemplat'
1692 the given template.
1646 the given template.
1693 Otherwise: All revs will be written to basefm.
1647 Otherwise: All revs will be written to basefm.
1694 '''
1648 '''
1695 scmutil.prefetchfiles(repo, revs, match)
1649 _prefetchchangedfiles(repo, revs, match)
1696
1650
1697 if not fntemplate:
1651 if not fntemplate:
1698 _exportfile(repo, revs, basefm, '<unnamed>', switch_parent, opts, match)
1652 _exportfile(repo, revs, basefm, '<unnamed>', switch_parent, opts, match)
@@ -1702,7 +1656,7 b' def export(repo, revs, basefm, fntemplat'
1702
1656
1703 def exportfile(repo, revs, fp, switch_parent=False, opts=None, match=None):
1657 def exportfile(repo, revs, fp, switch_parent=False, opts=None, match=None):
1704 """Export changesets to the given file stream"""
1658 """Export changesets to the given file stream"""
1705 scmutil.prefetchfiles(repo, revs, match)
1659 _prefetchchangedfiles(repo, revs, match)
1706
1660
1707 dest = getattr(fp, 'name', '<unnamed>')
1661 dest = getattr(fp, 'name', '<unnamed>')
1708 with formatter.formatter(repo.ui, fp, 'export', {}) as fm:
1662 with formatter.formatter(repo.ui, fp, 'export', {}) as fm:
@@ -2345,14 +2299,22 b' def remove(ui, repo, m, prefix, uipathfn'
2345
2299
2346 return ret
2300 return ret
2347
2301
2302 def _catfmtneedsdata(fm):
2303 return not fm.datahint() or 'data' in fm.datahint()
2304
2348 def _updatecatformatter(fm, ctx, matcher, path, decode):
2305 def _updatecatformatter(fm, ctx, matcher, path, decode):
2349 """Hook for adding data to the formatter used by ``hg cat``.
2306 """Hook for adding data to the formatter used by ``hg cat``.
2350
2307
2351 Extensions (e.g., lfs) can wrap this to inject keywords/data, but must call
2308 Extensions (e.g., lfs) can wrap this to inject keywords/data, but must call
2352 this method first."""
2309 this method first."""
2353 data = ctx[path].data()
2310
2354 if decode:
2311 # data() can be expensive to fetch (e.g. lfs), so don't fetch it if it
2355 data = ctx.repo().wwritedata(path, data)
2312 # wasn't requested.
2313 data = b''
2314 if _catfmtneedsdata(fm):
2315 data = ctx[path].data()
2316 if decode:
2317 data = ctx.repo().wwritedata(path, data)
2356 fm.startitem()
2318 fm.startitem()
2357 fm.context(ctx=ctx)
2319 fm.context(ctx=ctx)
2358 fm.write('data', '%s', data)
2320 fm.write('data', '%s', data)
@@ -2383,13 +2345,15 b' def cat(ui, repo, ctx, matcher, basefm, '
2383 mfnode = ctx.manifestnode()
2345 mfnode = ctx.manifestnode()
2384 try:
2346 try:
2385 if mfnode and mfl[mfnode].find(file)[0]:
2347 if mfnode and mfl[mfnode].find(file)[0]:
2386 scmutil.prefetchfiles(repo, [ctx.rev()], matcher)
2348 if _catfmtneedsdata(basefm):
2349 scmutil.prefetchfiles(repo, [ctx.rev()], matcher)
2387 write(file)
2350 write(file)
2388 return 0
2351 return 0
2389 except KeyError:
2352 except KeyError:
2390 pass
2353 pass
2391
2354
2392 scmutil.prefetchfiles(repo, [ctx.rev()], matcher)
2355 if _catfmtneedsdata(basefm):
2356 scmutil.prefetchfiles(repo, [ctx.rev()], matcher)
2393
2357
2394 for abs in ctx.walk(matcher):
2358 for abs in ctx.walk(matcher):
2395 write(abs)
2359 write(abs)
@@ -2583,12 +2547,18 b' def amend(ui, repo, old, extra, pats, op'
2583 message = logmessage(ui, opts)
2547 message = logmessage(ui, opts)
2584
2548
2585 editform = mergeeditform(old, 'commit.amend')
2549 editform = mergeeditform(old, 'commit.amend')
2586 editor = getcommiteditor(editform=editform,
2587 **pycompat.strkwargs(opts))
2588
2550
2589 if not message:
2551 if not message:
2590 editor = getcommiteditor(edit=True, editform=editform)
2591 message = old.description()
2552 message = old.description()
2553 # Default if message isn't provided and --edit is not passed is to
2554 # invoke editor, but allow --no-edit. If somehow we don't have any
2555 # description, let's always start the editor.
2556 doedit = not message or opts.get('edit') in [True, None]
2557 else:
2558 # Default if message is provided is to not invoke editor, but allow
2559 # --edit.
2560 doedit = opts.get('edit') is True
2561 editor = getcommiteditor(edit=doedit, editform=editform)
2592
2562
2593 pureextra = extra.copy()
2563 pureextra = extra.copy()
2594 extra['amend_source'] = old.hex()
2564 extra['amend_source'] = old.hex()
@@ -3289,66 +3259,69 b' summaryhooks = util.hooks()'
3289 # - (desturl, destbranch, destpeer, outgoing)
3259 # - (desturl, destbranch, destpeer, outgoing)
3290 summaryremotehooks = util.hooks()
3260 summaryremotehooks = util.hooks()
3291
3261
3292 # A list of state files kept by multistep operations like graft.
3262
3293 # Since graft cannot be aborted, it is considered 'clearable' by update.
3263 def checkunfinished(repo, commit=False, skipmerge=False):
3294 # note: bisect is intentionally excluded
3295 # (state file, clearable, allowcommit, error, hint)
3296 unfinishedstates = [
3297 ('graftstate', True, False, _('graft in progress'),
3298 _("use 'hg graft --continue' or 'hg graft --stop' to stop")),
3299 ('updatestate', True, False, _('last update was interrupted'),
3300 _("use 'hg update' to get a consistent checkout"))
3301 ]
3302
3303 def checkunfinished(repo, commit=False):
3304 '''Look for an unfinished multistep operation, like graft, and abort
3264 '''Look for an unfinished multistep operation, like graft, and abort
3305 if found. It's probably good to check this right before
3265 if found. It's probably good to check this right before
3306 bailifchanged().
3266 bailifchanged().
3307 '''
3267 '''
3308 # Check for non-clearable states first, so things like rebase will take
3268 # Check for non-clearable states first, so things like rebase will take
3309 # precedence over update.
3269 # precedence over update.
3310 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3270 for state in statemod._unfinishedstates:
3311 if clearable or (commit and allowcommit):
3271 if (state._clearable or (commit and state._allowcommit) or
3272 state._reportonly):
3312 continue
3273 continue
3313 if repo.vfs.exists(f):
3274 if state.isunfinished(repo):
3314 raise error.Abort(msg, hint=hint)
3275 raise error.Abort(state.msg(), hint=state.hint())
3315
3276
3316 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3277 for s in statemod._unfinishedstates:
3317 if not clearable or (commit and allowcommit):
3278 if (not s._clearable or (commit and s._allowcommit) or
3279 (s._opname == 'merge' and skipmerge) or s._reportonly):
3318 continue
3280 continue
3319 if repo.vfs.exists(f):
3281 if s.isunfinished(repo):
3320 raise error.Abort(msg, hint=hint)
3282 raise error.Abort(s.msg(), hint=s.hint())
3321
3283
3322 def clearunfinished(repo):
3284 def clearunfinished(repo):
3323 '''Check for unfinished operations (as above), and clear the ones
3285 '''Check for unfinished operations (as above), and clear the ones
3324 that are clearable.
3286 that are clearable.
3325 '''
3287 '''
3326 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3288 for state in statemod._unfinishedstates:
3327 if not clearable and repo.vfs.exists(f):
3289 if state._reportonly:
3328 raise error.Abort(msg, hint=hint)
3290 continue
3329 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3291 if not state._clearable and state.isunfinished(repo):
3330 if clearable and repo.vfs.exists(f):
3292 raise error.Abort(state.msg(), hint=state.hint())
3331 util.unlink(repo.vfs.join(f))
3293
3332
3294 for s in statemod._unfinishedstates:
3333 afterresolvedstates = [
3295 if s._opname == 'merge' or state._reportonly:
3334 ('graftstate',
3296 continue
3335 _('hg graft --continue')),
3297 if s._clearable and s.isunfinished(repo):
3336 ]
3298 util.unlink(repo.vfs.join(s._fname))
3299
3300 def getunfinishedstate(repo):
3301 ''' Checks for unfinished operations and returns statecheck object
3302 for it'''
3303 for state in statemod._unfinishedstates:
3304 if state.isunfinished(repo):
3305 return state
3306 return None
3337
3307
3338 def howtocontinue(repo):
3308 def howtocontinue(repo):
3339 '''Check for an unfinished operation and return the command to finish
3309 '''Check for an unfinished operation and return the command to finish
3340 it.
3310 it.
3341
3311
3342 afterresolvedstates tuples define a .hg/{file} and the corresponding
3312 statemod._unfinishedstates list is checked for an unfinished operation
3343 command needed to finish it.
3313 and the corresponding message to finish it is generated if a method to
3314 continue is supported by the operation.
3344
3315
3345 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3316 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3346 a boolean.
3317 a boolean.
3347 '''
3318 '''
3348 contmsg = _("continue: %s")
3319 contmsg = _("continue: %s")
3349 for f, msg in afterresolvedstates:
3320 for state in statemod._unfinishedstates:
3350 if repo.vfs.exists(f):
3321 if not state._continueflag:
3351 return contmsg % msg, True
3322 continue
3323 if state.isunfinished(repo):
3324 return contmsg % state.continuemsg(), True
3352 if repo[None].dirty(missing=True, merge=False, branch=False):
3325 if repo[None].dirty(missing=True, merge=False, branch=False):
3353 return contmsg % _("hg commit"), False
3326 return contmsg % _("hg commit"), False
3354 return None, None
3327 return None, None
@@ -3356,8 +3329,8 b' def howtocontinue(repo):'
3356 def checkafterresolved(repo):
3329 def checkafterresolved(repo):
3357 '''Inform the user about the next action after completing hg resolve
3330 '''Inform the user about the next action after completing hg resolve
3358
3331
3359 If there's a matching afterresolvedstates, howtocontinue will yield
3332 If there's a an unfinished operation that supports continue flag,
3360 repo.ui.warn as the reporter.
3333 howtocontinue will yield repo.ui.warn as the reporter.
3361
3334
3362 Otherwise, it will yield repo.ui.note.
3335 Otherwise, it will yield repo.ui.note.
3363 '''
3336 '''
@@ -3382,3 +3355,73 b' def wrongtooltocontinue(repo, task):'
3382 if after[1]:
3355 if after[1]:
3383 hint = after[0]
3356 hint = after[0]
3384 raise error.Abort(_('no %s in progress') % task, hint=hint)
3357 raise error.Abort(_('no %s in progress') % task, hint=hint)
3358
3359 def abortgraft(ui, repo, graftstate):
3360 """abort the interrupted graft and rollbacks to the state before interrupted
3361 graft"""
3362 if not graftstate.exists():
3363 raise error.Abort(_("no interrupted graft to abort"))
3364 statedata = readgraftstate(repo, graftstate)
3365 newnodes = statedata.get('newnodes')
3366 if newnodes is None:
3367 # and old graft state which does not have all the data required to abort
3368 # the graft
3369 raise error.Abort(_("cannot abort using an old graftstate"))
3370
3371 # changeset from which graft operation was started
3372 if len(newnodes) > 0:
3373 startctx = repo[newnodes[0]].p1()
3374 else:
3375 startctx = repo['.']
3376 # whether to strip or not
3377 cleanup = False
3378 from . import hg
3379 if newnodes:
3380 newnodes = [repo[r].rev() for r in newnodes]
3381 cleanup = True
3382 # checking that none of the newnodes turned public or is public
3383 immutable = [c for c in newnodes if not repo[c].mutable()]
3384 if immutable:
3385 repo.ui.warn(_("cannot clean up public changesets %s\n")
3386 % ', '.join(bytes(repo[r]) for r in immutable),
3387 hint=_("see 'hg help phases' for details"))
3388 cleanup = False
3389
3390 # checking that no new nodes are created on top of grafted revs
3391 desc = set(repo.changelog.descendants(newnodes))
3392 if desc - set(newnodes):
3393 repo.ui.warn(_("new changesets detected on destination "
3394 "branch, can't strip\n"))
3395 cleanup = False
3396
3397 if cleanup:
3398 with repo.wlock(), repo.lock():
3399 hg.updaterepo(repo, startctx.node(), overwrite=True)
3400 # stripping the new nodes created
3401 strippoints = [c.node() for c in repo.set("roots(%ld)",
3402 newnodes)]
3403 repair.strip(repo.ui, repo, strippoints, backup=False)
3404
3405 if not cleanup:
3406 # we don't update to the startnode if we can't strip
3407 startctx = repo['.']
3408 hg.updaterepo(repo, startctx.node(), overwrite=True)
3409
3410 ui.status(_("graft aborted\n"))
3411 ui.status(_("working directory is now at %s\n") % startctx.hex()[:12])
3412 graftstate.delete()
3413 return 0
3414
3415 def readgraftstate(repo, graftstate):
3416 """read the graft state file and return a dict of the data stored in it"""
3417 try:
3418 return graftstate.read()
3419 except error.CorruptedState:
3420 nodes = repo.vfs.read('graftstate').splitlines()
3421 return {'nodes': nodes}
3422
3423 def hgabortgraft(ui, repo):
3424 """ abort logic for aborting graft using 'hg abort'"""
3425 with repo.wlock():
3426 graftstate = statemod.cmdstate(repo, 'graftstate')
3427 return abortgraft(ui, repo, graftstate)
@@ -53,16 +53,17 b' from . import ('
53 pycompat,
53 pycompat,
54 rcutil,
54 rcutil,
55 registrar,
55 registrar,
56 repair,
57 revsetlang,
56 revsetlang,
58 rewriteutil,
57 rewriteutil,
59 scmutil,
58 scmutil,
60 server,
59 server,
60 shelve as shelvemod,
61 state as statemod,
61 state as statemod,
62 streamclone,
62 streamclone,
63 tags as tagsmod,
63 tags as tagsmod,
64 ui as uimod,
64 ui as uimod,
65 util,
65 util,
66 verify as verifymod,
66 wireprotoserver,
67 wireprotoserver,
67 )
68 )
68 from .utils import (
69 from .utils import (
@@ -130,6 +131,29 b' debugrevlogopts = cmdutil.debugrevlogopt'
130
131
131 # Commands start here, listed alphabetically
132 # Commands start here, listed alphabetically
132
133
134 @command('abort',
135 dryrunopts, helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
136 helpbasic=True)
137 def abort(ui, repo, **opts):
138 """abort an unfinished operation (EXPERIMENTAL)
139
140 Aborts a multistep operation like graft, histedit, rebase, merge,
141 and unshelve if they are in an unfinished state.
142
143 use --dry-run/-n to dry run the command.
144 """
145 dryrun = opts.get(r'dry_run')
146 abortstate = cmdutil.getunfinishedstate(repo)
147 if not abortstate:
148 raise error.Abort(_('no operation in progress'))
149 if not abortstate.abortfunc:
150 raise error.Abort((_("%s in progress but does not support 'hg abort'") %
151 (abortstate._opname)), hint=abortstate.hint())
152 if dryrun:
153 ui.status(_('%s in progress, will be aborted\n') % (abortstate._opname))
154 return
155 return abortstate.abortfunc(ui, repo)
156
133 @command('add',
157 @command('add',
134 walkopts + subrepoopts + dryrunopts,
158 walkopts + subrepoopts + dryrunopts,
135 _('[OPTION]... [FILE]...'),
159 _('[OPTION]... [FILE]...'),
@@ -1582,6 +1606,8 b' def clone(ui, source, dest=None, **opts)'
1582 ('', 'amend', None, _('amend the parent of the working directory')),
1606 ('', 'amend', None, _('amend the parent of the working directory')),
1583 ('s', 'secret', None, _('use the secret phase for committing')),
1607 ('s', 'secret', None, _('use the secret phase for committing')),
1584 ('e', 'edit', None, _('invoke editor on commit messages')),
1608 ('e', 'edit', None, _('invoke editor on commit messages')),
1609 ('', 'force-close-branch', None,
1610 _('forcibly close branch from a non-head changeset (ADVANCED)')),
1585 ('i', 'interactive', None, _('use interactive mode')),
1611 ('i', 'interactive', None, _('use interactive mode')),
1586 ] + walkopts + commitopts + commitopts2 + subrepoopts,
1612 ] + walkopts + commitopts + commitopts2 + subrepoopts,
1587 _('[OPTION]... [FILE]...'),
1613 _('[OPTION]... [FILE]...'),
@@ -1669,11 +1695,19 b' def _docommit(ui, repo, *pats, **opts):'
1669 bheads = repo.branchheads(branch)
1695 bheads = repo.branchheads(branch)
1670
1696
1671 extra = {}
1697 extra = {}
1672 if opts.get('close_branch'):
1698 if opts.get('close_branch') or opts.get('force_close_branch'):
1673 extra['close'] = '1'
1699 extra['close'] = '1'
1674
1700
1675 if not bheads:
1701 if repo['.'].closesbranch():
1676 raise error.Abort(_('can only close branch heads'))
1702 raise error.Abort(_('current revision is already a branch closing'
1703 ' head'))
1704 elif not bheads:
1705 raise error.Abort(_('branch "%s" has no heads to close') % branch)
1706 elif (branch == repo['.'].branch() and repo['.'].node() not in bheads
1707 and not opts.get('force_close_branch')):
1708 hint = _('use --force-close-branch to close branch from a non-head'
1709 ' changeset')
1710 raise error.Abort(_('can only close branch heads'), hint=hint)
1677 elif opts.get('amend'):
1711 elif opts.get('amend'):
1678 if (repo['.'].p1().branch() != branch and
1712 if (repo['.'].p1().branch() != branch and
1679 repo['.'].p2().branch() != branch):
1713 repo['.'].p2().branch() != branch):
@@ -1732,6 +1766,10 b' def _docommit(ui, repo, *pats, **opts):'
1732
1766
1733 cmdutil.commitstatus(repo, node, branch, bheads, opts)
1767 cmdutil.commitstatus(repo, node, branch, bheads, opts)
1734
1768
1769 if not ui.quiet and ui.configbool('commands', 'commit.post-status'):
1770 status(ui, repo, modified=True, added=True, removed=True, deleted=True,
1771 unknown=True, subrepos=opts.get('subrepos'))
1772
1735 @command('config|showconfig|debugconfig',
1773 @command('config|showconfig|debugconfig',
1736 [('u', 'untrusted', None, _('show untrusted configuration options')),
1774 [('u', 'untrusted', None, _('show untrusted configuration options')),
1737 ('e', 'edit', None, _('edit user config')),
1775 ('e', 'edit', None, _('edit user config')),
@@ -1853,6 +1891,30 b' def config(ui, repo, *values, **opts):'
1853 return 0
1891 return 0
1854 return 1
1892 return 1
1855
1893
1894 @command('continue',
1895 dryrunopts, helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
1896 helpbasic=True)
1897 def continuecmd(ui, repo, **opts):
1898 """resumes an interrupted operation (EXPERIMENTAL)
1899
1900 Finishes a multistep operation like graft, histedit, rebase, merge,
1901 and unshelve if they are in an interrupted state.
1902
1903 use --dry-run/-n to dry run the command.
1904 """
1905 dryrun = opts.get(r'dry_run')
1906 contstate = cmdutil.getunfinishedstate(repo)
1907 if not contstate:
1908 raise error.Abort(_('no operation in progress'))
1909 if not contstate.continuefunc:
1910 raise error.Abort((_("%s in progress but does not support "
1911 "'hg continue'") % (contstate._opname)),
1912 hint=contstate.continuemsg())
1913 if dryrun:
1914 ui.status(_('%s in progress, will be resumed\n') % (contstate._opname))
1915 return
1916 return contstate.continuefunc(ui, repo)
1917
1856 @command('copy|cp',
1918 @command('copy|cp',
1857 [('A', 'after', None, _('record a copy that has already occurred')),
1919 [('A', 'after', None, _('record a copy that has already occurred')),
1858 ('f', 'force', None, _('forcibly copy over an existing managed file')),
1920 ('f', 'force', None, _('forcibly copy over an existing managed file')),
@@ -2449,14 +2511,14 b' def _dograft(ui, repo, *revs, **opts):'
2449 opts.get('currentuser'), opts.get('rev'))):
2511 opts.get('currentuser'), opts.get('rev'))):
2450 raise error.Abort(_("cannot specify any other flag with '--abort'"))
2512 raise error.Abort(_("cannot specify any other flag with '--abort'"))
2451
2513
2452 return _abortgraft(ui, repo, graftstate)
2514 return cmdutil.abortgraft(ui, repo, graftstate)
2453 elif opts.get('continue'):
2515 elif opts.get('continue'):
2454 cont = True
2516 cont = True
2455 if revs:
2517 if revs:
2456 raise error.Abort(_("can't specify --continue and revisions"))
2518 raise error.Abort(_("can't specify --continue and revisions"))
2457 # read in unfinished revisions
2519 # read in unfinished revisions
2458 if graftstate.exists():
2520 if graftstate.exists():
2459 statedata = _readgraftstate(repo, graftstate)
2521 statedata = cmdutil.readgraftstate(repo, graftstate)
2460 if statedata.get('date'):
2522 if statedata.get('date'):
2461 opts['date'] = statedata['date']
2523 opts['date'] = statedata['date']
2462 if statedata.get('user'):
2524 if statedata.get('user'):
@@ -2626,69 +2688,6 b' def _dograft(ui, repo, *revs, **opts):'
2626
2688
2627 return 0
2689 return 0
2628
2690
2629 def _abortgraft(ui, repo, graftstate):
2630 """abort the interrupted graft and rollbacks to the state before interrupted
2631 graft"""
2632 if not graftstate.exists():
2633 raise error.Abort(_("no interrupted graft to abort"))
2634 statedata = _readgraftstate(repo, graftstate)
2635 newnodes = statedata.get('newnodes')
2636 if newnodes is None:
2637 # and old graft state which does not have all the data required to abort
2638 # the graft
2639 raise error.Abort(_("cannot abort using an old graftstate"))
2640
2641 # changeset from which graft operation was started
2642 if len(newnodes) > 0:
2643 startctx = repo[newnodes[0]].p1()
2644 else:
2645 startctx = repo['.']
2646 # whether to strip or not
2647 cleanup = False
2648 if newnodes:
2649 newnodes = [repo[r].rev() for r in newnodes]
2650 cleanup = True
2651 # checking that none of the newnodes turned public or is public
2652 immutable = [c for c in newnodes if not repo[c].mutable()]
2653 if immutable:
2654 repo.ui.warn(_("cannot clean up public changesets %s\n")
2655 % ', '.join(bytes(repo[r]) for r in immutable),
2656 hint=_("see 'hg help phases' for details"))
2657 cleanup = False
2658
2659 # checking that no new nodes are created on top of grafted revs
2660 desc = set(repo.changelog.descendants(newnodes))
2661 if desc - set(newnodes):
2662 repo.ui.warn(_("new changesets detected on destination "
2663 "branch, can't strip\n"))
2664 cleanup = False
2665
2666 if cleanup:
2667 with repo.wlock(), repo.lock():
2668 hg.updaterepo(repo, startctx.node(), overwrite=True)
2669 # stripping the new nodes created
2670 strippoints = [c.node() for c in repo.set("roots(%ld)",
2671 newnodes)]
2672 repair.strip(repo.ui, repo, strippoints, backup=False)
2673
2674 if not cleanup:
2675 # we don't update to the startnode if we can't strip
2676 startctx = repo['.']
2677 hg.updaterepo(repo, startctx.node(), overwrite=True)
2678
2679 ui.status(_("graft aborted\n"))
2680 ui.status(_("working directory is now at %s\n") % startctx.hex()[:12])
2681 graftstate.delete()
2682 return 0
2683
2684 def _readgraftstate(repo, graftstate):
2685 """read the graft state file and return a dict of the data stored in it"""
2686 try:
2687 return graftstate.read()
2688 except error.CorruptedState:
2689 nodes = repo.vfs.read('graftstate').splitlines()
2690 return {'nodes': nodes}
2691
2692 def _stopgraft(ui, repo, graftstate):
2691 def _stopgraft(ui, repo, graftstate):
2693 """stop the interrupted graft"""
2692 """stop the interrupted graft"""
2694 if not graftstate.exists():
2693 if not graftstate.exists():
@@ -2700,6 +2699,12 b' def _stopgraft(ui, repo, graftstate):'
2700 ui.status(_("working directory is now at %s\n") % pctx.hex()[:12])
2699 ui.status(_("working directory is now at %s\n") % pctx.hex()[:12])
2701 return 0
2700 return 0
2702
2701
2702 statemod.addunfinished(
2703 'graft', fname='graftstate', clearable=True, stopflag=True,
2704 continueflag=True, abortfunc=cmdutil.hgabortgraft,
2705 cmdhint=_("use 'hg graft --continue' or 'hg graft --stop' to stop")
2706 )
2707
2703 @command('grep',
2708 @command('grep',
2704 [('0', 'print0', None, _('end fields with NUL')),
2709 [('0', 'print0', None, _('end fields with NUL')),
2705 ('', 'all', None, _('print all revisions that match (DEPRECATED) ')),
2710 ('', 'all', None, _('print all revisions that match (DEPRECATED) ')),
@@ -3715,7 +3720,8 b' def locate(ui, repo, *pats, **opts):'
3715 _('follow line range of specified file (EXPERIMENTAL)'),
3720 _('follow line range of specified file (EXPERIMENTAL)'),
3716 _('FILE,RANGE')),
3721 _('FILE,RANGE')),
3717 ('', 'removed', None, _('include revisions where files were removed')),
3722 ('', 'removed', None, _('include revisions where files were removed')),
3718 ('m', 'only-merges', None, _('show only merges (DEPRECATED)')),
3723 ('m', 'only-merges', None,
3724 _('show only merges (DEPRECATED) (use -r "merge()" instead)')),
3719 ('u', 'user', [], _('revisions committed by user'), _('USER')),
3725 ('u', 'user', [], _('revisions committed by user'), _('USER')),
3720 ('', 'only-branch', [],
3726 ('', 'only-branch', [],
3721 _('show only changesets within the given named branch (DEPRECATED)'),
3727 _('show only changesets within the given named branch (DEPRECATED)'),
@@ -3876,12 +3882,12 b' def log(ui, repo, *pats, **opts):'
3876 # then filter the result by logcmdutil._makerevset() and --limit
3882 # then filter the result by logcmdutil._makerevset() and --limit
3877 revs, differ = logcmdutil.getlinerangerevs(repo, revs, opts)
3883 revs, differ = logcmdutil.getlinerangerevs(repo, revs, opts)
3878
3884
3879 getrenamed = None
3885 getcopies = None
3880 if opts.get('copies'):
3886 if opts.get('copies'):
3881 endrev = None
3887 endrev = None
3882 if revs:
3888 if revs:
3883 endrev = revs.max() + 1
3889 endrev = revs.max() + 1
3884 getrenamed = scmutil.getrenamedfn(repo, endrev=endrev)
3890 getcopies = scmutil.getcopiesfn(repo, endrev=endrev)
3885
3891
3886 ui.pager('log')
3892 ui.pager('log')
3887 displayer = logcmdutil.changesetdisplayer(ui, repo, opts, differ,
3893 displayer = logcmdutil.changesetdisplayer(ui, repo, opts, differ,
@@ -3890,7 +3896,7 b' def log(ui, repo, *pats, **opts):'
3890 displayfn = logcmdutil.displaygraphrevs
3896 displayfn = logcmdutil.displaygraphrevs
3891 else:
3897 else:
3892 displayfn = logcmdutil.displayrevs
3898 displayfn = logcmdutil.displayrevs
3893 displayfn(ui, repo, revs, displayer, getrenamed)
3899 displayfn(ui, repo, revs, displayer, getcopies)
3894
3900
3895 @command('manifest',
3901 @command('manifest',
3896 [('r', 'rev', '', _('revision to display'), _('REV')),
3902 [('r', 'rev', '', _('revision to display'), _('REV')),
@@ -3983,7 +3989,7 b' def merge(ui, repo, node=None, **opts):'
3983 If no revision is specified, the working directory's parent is a
3989 If no revision is specified, the working directory's parent is a
3984 head revision, and the current branch contains exactly one other
3990 head revision, and the current branch contains exactly one other
3985 head, the other head is merged with by default. Otherwise, an
3991 head, the other head is merged with by default. Otherwise, an
3986 explicit revision with which to merge with must be provided.
3992 explicit revision with which to merge must be provided.
3987
3993
3988 See :hg:`help resolve` for information on handling file conflicts.
3994 See :hg:`help resolve` for information on handling file conflicts.
3989
3995
@@ -3999,6 +4005,10 b' def merge(ui, repo, node=None, **opts):'
3999 if abort and repo.dirstate.p2() == nullid:
4005 if abort and repo.dirstate.p2() == nullid:
4000 cmdutil.wrongtooltocontinue(repo, _('merge'))
4006 cmdutil.wrongtooltocontinue(repo, _('merge'))
4001 if abort:
4007 if abort:
4008 state = cmdutil.getunfinishedstate(repo)
4009 if state and state._opname != 'merge':
4010 raise error.Abort(_('cannot abort merge with %s in progress') %
4011 (state._opname), hint=state.hint())
4002 if node:
4012 if node:
4003 raise error.Abort(_("cannot specify a node with --abort"))
4013 raise error.Abort(_("cannot specify a node with --abort"))
4004 if opts.get('rev'):
4014 if opts.get('rev'):
@@ -4036,6 +4046,14 b' def merge(ui, repo, node=None, **opts):'
4036 return hg.merge(repo, node, force=force, mergeforce=force,
4046 return hg.merge(repo, node, force=force, mergeforce=force,
4037 labels=labels, abort=abort)
4047 labels=labels, abort=abort)
4038
4048
4049 statemod.addunfinished(
4050 'merge', fname=None, clearable=True, allowcommit=True,
4051 cmdmsg=_('outstanding uncommitted merge'), abortfunc=hg.abortmerge,
4052 statushint=_('To continue: hg commit\n'
4053 'To abort: hg merge --abort'),
4054 cmdhint=_("use 'hg commit' or 'hg merge --abort'")
4055 )
4056
4039 @command('outgoing|out',
4057 @command('outgoing|out',
4040 [('f', 'force', None, _('run even when the destination is unrelated')),
4058 [('f', 'force', None, _('run even when the destination is unrelated')),
4041 ('r', 'rev', [],
4059 ('r', 'rev', [],
@@ -4672,7 +4690,7 b' def recover(ui, repo, **opts):'
4672 """
4690 """
4673 ret = repo.recover()
4691 ret = repo.recover()
4674 if ret:
4692 if ret:
4675 if opts['verify']:
4693 if opts[r'verify']:
4676 return hg.verify(repo)
4694 return hg.verify(repo)
4677 else:
4695 else:
4678 msg = _("(verify step skipped, run `hg verify` to check your "
4696 msg = _("(verify step skipped, run `hg verify` to check your "
@@ -5217,16 +5235,30 b' def rollback(ui, repo, **opts):'
5217 force=opts.get(r'force'))
5235 force=opts.get(r'force'))
5218
5236
5219 @command(
5237 @command(
5220 'root', [], intents={INTENT_READONLY},
5238 'root', [] + formatteropts, intents={INTENT_READONLY},
5221 helpcategory=command.CATEGORY_WORKING_DIRECTORY)
5239 helpcategory=command.CATEGORY_WORKING_DIRECTORY)
5222 def root(ui, repo):
5240 def root(ui, repo, **opts):
5223 """print the root (top) of the current working directory
5241 """print the root (top) of the current working directory
5224
5242
5225 Print the root directory of the current repository.
5243 Print the root directory of the current repository.
5226
5244
5245 .. container:: verbose
5246
5247 Template:
5248
5249 The following keywords are supported in addition to the common template
5250 keywords and functions. See also :hg:`help templates`.
5251
5252 :hgpath: String. Path to the .hg directory.
5253 :storepath: String. Path to the directory holding versioned data.
5254
5227 Returns 0 on success.
5255 Returns 0 on success.
5228 """
5256 """
5229 ui.write(repo.root + "\n")
5257 opts = pycompat.byteskwargs(opts)
5258 with ui.formatter('root', opts) as fm:
5259 fm.startitem()
5260 fm.write('reporoot', '%s\n', repo.root)
5261 fm.data(hgpath=repo.path, storepath=repo.spath)
5230
5262
5231 @command('serve',
5263 @command('serve',
5232 [('A', 'accesslog', '', _('name of access log file to write to'),
5264 [('A', 'accesslog', '', _('name of access log file to write to'),
@@ -5299,6 +5331,106 b' def serve(ui, repo, **opts):'
5299 service = server.createservice(ui, repo, opts)
5331 service = server.createservice(ui, repo, opts)
5300 return server.runservice(opts, initfn=service.init, runfn=service.run)
5332 return server.runservice(opts, initfn=service.init, runfn=service.run)
5301
5333
5334 @command('shelve',
5335 [('A', 'addremove', None,
5336 _('mark new/missing files as added/removed before shelving')),
5337 ('u', 'unknown', None,
5338 _('store unknown files in the shelve')),
5339 ('', 'cleanup', None,
5340 _('delete all shelved changes')),
5341 ('', 'date', '',
5342 _('shelve with the specified commit date'), _('DATE')),
5343 ('d', 'delete', None,
5344 _('delete the named shelved change(s)')),
5345 ('e', 'edit', False,
5346 _('invoke editor on commit messages')),
5347 ('k', 'keep', False,
5348 _('shelve, but keep changes in the working directory')),
5349 ('l', 'list', None,
5350 _('list current shelves')),
5351 ('m', 'message', '',
5352 _('use text as shelve message'), _('TEXT')),
5353 ('n', 'name', '',
5354 _('use the given name for the shelved commit'), _('NAME')),
5355 ('p', 'patch', None,
5356 _('output patches for changes (provide the names of the shelved '
5357 'changes as positional arguments)')),
5358 ('i', 'interactive', None,
5359 _('interactive mode')),
5360 ('', 'stat', None,
5361 _('output diffstat-style summary of changes (provide the names of '
5362 'the shelved changes as positional arguments)')
5363 )] + cmdutil.walkopts,
5364 _('hg shelve [OPTION]... [FILE]...'),
5365 helpcategory=command.CATEGORY_WORKING_DIRECTORY)
5366 def shelve(ui, repo, *pats, **opts):
5367 '''save and set aside changes from the working directory
5368
5369 Shelving takes files that "hg status" reports as not clean, saves
5370 the modifications to a bundle (a shelved change), and reverts the
5371 files so that their state in the working directory becomes clean.
5372
5373 To restore these changes to the working directory, using "hg
5374 unshelve"; this will work even if you switch to a different
5375 commit.
5376
5377 When no files are specified, "hg shelve" saves all not-clean
5378 files. If specific files or directories are named, only changes to
5379 those files are shelved.
5380
5381 In bare shelve (when no files are specified, without interactive,
5382 include and exclude option), shelving remembers information if the
5383 working directory was on newly created branch, in other words working
5384 directory was on different branch than its first parent. In this
5385 situation unshelving restores branch information to the working directory.
5386
5387 Each shelved change has a name that makes it easier to find later.
5388 The name of a shelved change defaults to being based on the active
5389 bookmark, or if there is no active bookmark, the current named
5390 branch. To specify a different name, use ``--name``.
5391
5392 To see a list of existing shelved changes, use the ``--list``
5393 option. For each shelved change, this will print its name, age,
5394 and description; use ``--patch`` or ``--stat`` for more details.
5395
5396 To delete specific shelved changes, use ``--delete``. To delete
5397 all shelved changes, use ``--cleanup``.
5398 '''
5399 opts = pycompat.byteskwargs(opts)
5400 allowables = [
5401 ('addremove', {'create'}), # 'create' is pseudo action
5402 ('unknown', {'create'}),
5403 ('cleanup', {'cleanup'}),
5404 # ('date', {'create'}), # ignored for passing '--date "0 0"' in tests
5405 ('delete', {'delete'}),
5406 ('edit', {'create'}),
5407 ('keep', {'create'}),
5408 ('list', {'list'}),
5409 ('message', {'create'}),
5410 ('name', {'create'}),
5411 ('patch', {'patch', 'list'}),
5412 ('stat', {'stat', 'list'}),
5413 ]
5414 def checkopt(opt):
5415 if opts.get(opt):
5416 for i, allowable in allowables:
5417 if opts[i] and opt not in allowable:
5418 raise error.Abort(_("options '--%s' and '--%s' may not be "
5419 "used together") % (opt, i))
5420 return True
5421 if checkopt('cleanup'):
5422 if pats:
5423 raise error.Abort(_("cannot specify names when using '--cleanup'"))
5424 return shelvemod.cleanupcmd(ui, repo)
5425 elif checkopt('delete'):
5426 return shelvemod.deletecmd(ui, repo, pats)
5427 elif checkopt('list'):
5428 return shelvemod.listcmd(ui, repo, pats, opts)
5429 elif checkopt('patch') or checkopt('stat'):
5430 return shelvemod.patchcmds(ui, repo, pats, opts)
5431 else:
5432 return shelvemod.createcmd(ui, repo, pats, opts)
5433
5302 _NOTTERSE = 'nothing'
5434 _NOTTERSE = 'nothing'
5303
5435
5304 @command('status|st',
5436 @command('status|st',
@@ -6027,6 +6159,68 b' def unbundle(ui, repo, fname1, *fnames, '
6027
6159
6028 return postincoming(ui, repo, modheads, opts.get(r'update'), None, None)
6160 return postincoming(ui, repo, modheads, opts.get(r'update'), None, None)
6029
6161
6162 @command('unshelve',
6163 [('a', 'abort', None,
6164 _('abort an incomplete unshelve operation')),
6165 ('c', 'continue', None,
6166 _('continue an incomplete unshelve operation')),
6167 ('i', 'interactive', None,
6168 _('use interactive mode (EXPERIMENTAL)')),
6169 ('k', 'keep', None,
6170 _('keep shelve after unshelving')),
6171 ('n', 'name', '',
6172 _('restore shelved change with given name'), _('NAME')),
6173 ('t', 'tool', '', _('specify merge tool')),
6174 ('', 'date', '',
6175 _('set date for temporary commits (DEPRECATED)'), _('DATE'))],
6176 _('hg unshelve [OPTION]... [FILE]... [-n SHELVED]'),
6177 helpcategory=command.CATEGORY_WORKING_DIRECTORY)
6178 def unshelve(ui, repo, *shelved, **opts):
6179 """restore a shelved change to the working directory
6180
6181 This command accepts an optional name of a shelved change to
6182 restore. If none is given, the most recent shelved change is used.
6183
6184 If a shelved change is applied successfully, the bundle that
6185 contains the shelved changes is moved to a backup location
6186 (.hg/shelve-backup).
6187
6188 Since you can restore a shelved change on top of an arbitrary
6189 commit, it is possible that unshelving will result in a conflict
6190 between your changes and the commits you are unshelving onto. If
6191 this occurs, you must resolve the conflict, then use
6192 ``--continue`` to complete the unshelve operation. (The bundle
6193 will not be moved until you successfully complete the unshelve.)
6194
6195 (Alternatively, you can use ``--abort`` to abandon an unshelve
6196 that causes a conflict. This reverts the unshelved changes, and
6197 leaves the bundle in place.)
6198
6199 If bare shelved change (when no files are specified, without interactive,
6200 include and exclude option) was done on newly created branch it would
6201 restore branch information to the working directory.
6202
6203 After a successful unshelve, the shelved changes are stored in a
6204 backup directory. Only the N most recent backups are kept. N
6205 defaults to 10 but can be overridden using the ``shelve.maxbackups``
6206 configuration option.
6207
6208 .. container:: verbose
6209
6210 Timestamp in seconds is used to decide order of backups. More
6211 than ``maxbackups`` backups are kept, if same timestamp
6212 prevents from deciding exact order of them, for safety.
6213 """
6214 with repo.wlock():
6215 return shelvemod.dounshelve(ui, repo, *shelved, **opts)
6216
6217 statemod.addunfinished(
6218 'unshelve', fname='shelvedstate', continueflag=True,
6219 abortfunc=shelvemod.hgabortunshelve,
6220 continuefunc=shelvemod.hgcontinueunshelve,
6221 cmdmsg=_('unshelve already in progress'),
6222 )
6223
6030 @command('update|up|checkout|co',
6224 @command('update|up|checkout|co',
6031 [('C', 'clean', None, _('discard uncommitted changes (no backup)')),
6225 [('C', 'clean', None, _('discard uncommitted changes (no backup)')),
6032 ('c', 'check', None, _('require clean working directory')),
6226 ('c', 'check', None, _('require clean working directory')),
@@ -6123,7 +6317,6 b' def update(ui, repo, node=None, **opts):'
6123
6317
6124 with repo.wlock():
6318 with repo.wlock():
6125 cmdutil.clearunfinished(repo)
6319 cmdutil.clearunfinished(repo)
6126
6127 if date:
6320 if date:
6128 rev = cmdutil.finddate(ui, repo, date)
6321 rev = cmdutil.finddate(ui, repo, date)
6129
6322
@@ -6147,8 +6340,10 b' def update(ui, repo, node=None, **opts):'
6147 ui.warn("(%s)\n" % obsfatemsg)
6340 ui.warn("(%s)\n" % obsfatemsg)
6148 return ret
6341 return ret
6149
6342
6150 @command('verify', [], helpcategory=command.CATEGORY_MAINTENANCE)
6343 @command('verify',
6151 def verify(ui, repo):
6344 [('', 'full', False, 'perform more checks (EXPERIMENTAL)')],
6345 helpcategory=command.CATEGORY_MAINTENANCE)
6346 def verify(ui, repo, **opts):
6152 """verify the integrity of the repository
6347 """verify the integrity of the repository
6153
6348
6154 Verify the integrity of the current repository.
6349 Verify the integrity of the current repository.
@@ -6164,7 +6359,12 b' def verify(ui, repo):'
6164
6359
6165 Returns 0 on success, 1 if errors are encountered.
6360 Returns 0 on success, 1 if errors are encountered.
6166 """
6361 """
6167 return hg.verify(repo)
6362 opts = pycompat.byteskwargs(opts)
6363
6364 level = None
6365 if opts['full']:
6366 level = verifymod.VERIFY_FULL
6367 return hg.verify(repo, level)
6168
6368
6169 @command(
6369 @command(
6170 'version', [] + formatteropts, helpcategory=command.CATEGORY_HELP,
6370 'version', [] + formatteropts, helpcategory=command.CATEGORY_HELP,
@@ -6233,16 +6433,6 b' def version_(ui, **opts):'
6233 def loadcmdtable(ui, name, cmdtable):
6433 def loadcmdtable(ui, name, cmdtable):
6234 """Load command functions from specified cmdtable
6434 """Load command functions from specified cmdtable
6235 """
6435 """
6236 cmdtable = cmdtable.copy()
6237 for cmd in list(cmdtable):
6238 if not cmd.startswith('^'):
6239 continue
6240 ui.deprecwarn("old-style command registration '%s' in extension '%s'"
6241 % (cmd, name), '4.8')
6242 entry = cmdtable.pop(cmd)
6243 entry[0].helpbasic = True
6244 cmdtable[cmd[1:]] = entry
6245
6246 overrides = [cmd for cmd in cmdtable if cmd in table]
6436 overrides = [cmd for cmd in cmdtable if cmd in table]
6247 if overrides:
6437 if overrides:
6248 ui.warn(_("extension '%s' overrides commands: %s\n")
6438 ui.warn(_("extension '%s' overrides commands: %s\n")
@@ -202,6 +202,9 b" coreconfigitem('color', 'pagermode',"
202 default=dynamicdefault,
202 default=dynamicdefault,
203 )
203 )
204 _registerdiffopts(section='commands', configprefix='commit.interactive.')
204 _registerdiffopts(section='commands', configprefix='commit.interactive.')
205 coreconfigitem('commands', 'commit.post-status',
206 default=False,
207 )
205 coreconfigitem('commands', 'grep.all-files',
208 coreconfigitem('commands', 'grep.all-files',
206 default=False,
209 default=False,
207 )
210 )
@@ -288,6 +291,9 b" coreconfigitem('convert', 'hg.clonebranc"
288 coreconfigitem('convert', 'hg.ignoreerrors',
291 coreconfigitem('convert', 'hg.ignoreerrors',
289 default=False,
292 default=False,
290 )
293 )
294 coreconfigitem('convert', 'hg.preserve-hash',
295 default=False,
296 )
291 coreconfigitem('convert', 'hg.revs',
297 coreconfigitem('convert', 'hg.revs',
292 default=None,
298 default=None,
293 )
299 )
@@ -526,12 +532,22 b" coreconfigitem('experimental', 'evolutio"
526 coreconfigitem('experimental', 'evolution.bundle-obsmarker',
532 coreconfigitem('experimental', 'evolution.bundle-obsmarker',
527 default=False,
533 default=False,
528 )
534 )
535 coreconfigitem('experimental', 'log.topo',
536 default=False,
537 )
529 coreconfigitem('experimental', 'evolution.report-instabilities',
538 coreconfigitem('experimental', 'evolution.report-instabilities',
530 default=True,
539 default=True,
531 )
540 )
532 coreconfigitem('experimental', 'evolution.track-operation',
541 coreconfigitem('experimental', 'evolution.track-operation',
533 default=True,
542 default=True,
534 )
543 )
544 # repo-level config to exclude a revset visibility
545 #
546 # The target use case is to use `share` to expose different subset of the same
547 # repository, especially server side. See also `server.view`.
548 coreconfigitem('experimental', 'extra-filter-revs',
549 default=None,
550 )
535 coreconfigitem('experimental', 'maxdeltachainspan',
551 coreconfigitem('experimental', 'maxdeltachainspan',
536 default=-1,
552 default=-1,
537 )
553 )
@@ -663,6 +679,9 b" coreconfigitem('extdata', '.*',"
663 default=None,
679 default=None,
664 generic=True,
680 generic=True,
665 )
681 )
682 coreconfigitem('format', 'bookmarks-in-store',
683 default=False,
684 )
666 coreconfigitem('format', 'chunkcachesize',
685 coreconfigitem('format', 'chunkcachesize',
667 default=None,
686 default=None,
668 )
687 )
@@ -931,6 +950,9 b" coreconfigitem('profiling', 'showmax',"
931 coreconfigitem('profiling', 'showmin',
950 coreconfigitem('profiling', 'showmin',
932 default=dynamicdefault,
951 default=dynamicdefault,
933 )
952 )
953 coreconfigitem('profiling', 'showtime',
954 default=True,
955 )
934 coreconfigitem('profiling', 'sort',
956 coreconfigitem('profiling', 'sort',
935 default='inlinetime',
957 default='inlinetime',
936 )
958 )
@@ -1072,6 +1094,9 b" coreconfigitem('share', 'pool',"
1072 coreconfigitem('share', 'poolnaming',
1094 coreconfigitem('share', 'poolnaming',
1073 default='identity',
1095 default='identity',
1074 )
1096 )
1097 coreconfigitem('shelve','maxbackups',
1098 default=10,
1099 )
1075 coreconfigitem('smtp', 'host',
1100 coreconfigitem('smtp', 'host',
1076 default=None,
1101 default=None,
1077 )
1102 )
@@ -272,6 +272,30 b' class basectx(object):'
272 except error.LookupError:
272 except error.LookupError:
273 return ''
273 return ''
274
274
275 @propertycache
276 def _copies(self):
277 p1copies = {}
278 p2copies = {}
279 p1 = self.p1()
280 p2 = self.p2()
281 narrowmatch = self._repo.narrowmatch()
282 for dst in self.files():
283 if not narrowmatch(dst) or dst not in self:
284 continue
285 copied = self[dst].renamed()
286 if not copied:
287 continue
288 src, srcnode = copied
289 if src in p1 and p1[src].filenode() == srcnode:
290 p1copies[dst] = src
291 elif src in p2 and p2[src].filenode() == srcnode:
292 p2copies[dst] = src
293 return p1copies, p2copies
294 def p1copies(self):
295 return self._copies[0]
296 def p2copies(self):
297 return self._copies[1]
298
275 def sub(self, path, allowcreate=True):
299 def sub(self, path, allowcreate=True):
276 '''return a subrepo for the stored revision of path, never wdir()'''
300 '''return a subrepo for the stored revision of path, never wdir()'''
277 return subrepo.subrepo(self, path, allowcreate=allowcreate)
301 return subrepo.subrepo(self, path, allowcreate=allowcreate)
@@ -439,6 +463,36 b' class changectx(basectx):'
439 return self._changeset.date
463 return self._changeset.date
440 def files(self):
464 def files(self):
441 return self._changeset.files
465 return self._changeset.files
466 def filesmodified(self):
467 modified = set(self.files())
468 modified.difference_update(self.filesadded())
469 modified.difference_update(self.filesremoved())
470 return sorted(modified)
471 def filesadded(self):
472 source = self._repo.ui.config('experimental', 'copies.read-from')
473 if (source == 'changeset-only' or
474 (source == 'compatibility' and
475 self._changeset.filesadded is not None)):
476 return self._changeset.filesadded or []
477
478 added = []
479 for f in self.files():
480 if not any(f in p for p in self.parents()):
481 added.append(f)
482 return added
483 def filesremoved(self):
484 source = self._repo.ui.config('experimental', 'copies.read-from')
485 if (source == 'changeset-only' or
486 (source == 'compatibility' and
487 self._changeset.filesremoved is not None)):
488 return self._changeset.filesremoved or []
489
490 removed = []
491 for f in self.files():
492 if f not in self:
493 removed.append(f)
494 return removed
495
442 @propertycache
496 @propertycache
443 def _copies(self):
497 def _copies(self):
444 source = self._repo.ui.config('experimental', 'copies.read-from')
498 source = self._repo.ui.config('experimental', 'copies.read-from')
@@ -456,27 +510,7 b' class changectx(basectx):'
456 # Otherwise (config said to read only from filelog, or we are in
510 # Otherwise (config said to read only from filelog, or we are in
457 # compatiblity mode and there is not data in the changeset), we get
511 # compatiblity mode and there is not data in the changeset), we get
458 # the copy metadata from the filelogs.
512 # the copy metadata from the filelogs.
459 p1copies = {}
513 return super(changectx, self)._copies
460 p2copies = {}
461 p1 = self.p1()
462 p2 = self.p2()
463 narrowmatch = self._repo.narrowmatch()
464 for dst in self.files():
465 if not narrowmatch(dst) or dst not in self:
466 continue
467 copied = self[dst].renamed()
468 if not copied:
469 continue
470 src, srcnode = copied
471 if src in p1 and p1[src].filenode() == srcnode:
472 p1copies[dst] = src
473 elif src in p2 and p2[src].filenode() == srcnode:
474 p2copies[dst] = src
475 return p1copies, p2copies
476 def p1copies(self):
477 return self._copies[0]
478 def p2copies(self):
479 return self._copies[1]
480 def description(self):
514 def description(self):
481 return self._changeset.description
515 return self._changeset.description
482 def branch(self):
516 def branch(self):
@@ -1098,7 +1132,7 b' class committablectx(basectx):'
1098 """A committablectx object provides common functionality for a context that
1132 """A committablectx object provides common functionality for a context that
1099 wants the ability to commit, e.g. workingctx or memctx."""
1133 wants the ability to commit, e.g. workingctx or memctx."""
1100 def __init__(self, repo, text="", user=None, date=None, extra=None,
1134 def __init__(self, repo, text="", user=None, date=None, extra=None,
1101 changes=None):
1135 changes=None, branch=None):
1102 super(committablectx, self).__init__(repo)
1136 super(committablectx, self).__init__(repo)
1103 self._rev = None
1137 self._rev = None
1104 self._node = None
1138 self._node = None
@@ -1113,13 +1147,9 b' class committablectx(basectx):'
1113 self._extra = {}
1147 self._extra = {}
1114 if extra:
1148 if extra:
1115 self._extra = extra.copy()
1149 self._extra = extra.copy()
1116 if 'branch' not in self._extra:
1150 if branch is not None:
1117 try:
1151 self._extra['branch'] = encoding.fromlocal(branch)
1118 branch = encoding.fromlocal(self._repo.dirstate.branch())
1152 if not self._extra.get('branch'):
1119 except UnicodeDecodeError:
1120 raise error.Abort(_('branch name not in UTF-8!'))
1121 self._extra['branch'] = branch
1122 if self._extra['branch'] == '':
1123 self._extra['branch'] = 'default'
1153 self._extra['branch'] = 'default'
1124
1154
1125 def __bytes__(self):
1155 def __bytes__(self):
@@ -1132,42 +1162,6 b' class committablectx(basectx):'
1132
1162
1133 __bool__ = __nonzero__
1163 __bool__ = __nonzero__
1134
1164
1135 def _buildflagfunc(self):
1136 # Create a fallback function for getting file flags when the
1137 # filesystem doesn't support them
1138
1139 copiesget = self._repo.dirstate.copies().get
1140 parents = self.parents()
1141 if len(parents) < 2:
1142 # when we have one parent, it's easy: copy from parent
1143 man = parents[0].manifest()
1144 def func(f):
1145 f = copiesget(f, f)
1146 return man.flags(f)
1147 else:
1148 # merges are tricky: we try to reconstruct the unstored
1149 # result from the merge (issue1802)
1150 p1, p2 = parents
1151 pa = p1.ancestor(p2)
1152 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1153
1154 def func(f):
1155 f = copiesget(f, f) # may be wrong for merges with copies
1156 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1157 if fl1 == fl2:
1158 return fl1
1159 if fl1 == fla:
1160 return fl2
1161 if fl2 == fla:
1162 return fl1
1163 return '' # punt for conflicts
1164
1165 return func
1166
1167 @propertycache
1168 def _flagfunc(self):
1169 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1170
1171 @propertycache
1165 @propertycache
1172 def _status(self):
1166 def _status(self):
1173 return self._repo.status()
1167 return self._repo.status()
@@ -1206,26 +1200,10 b' class committablectx(basectx):'
1206 return self._status.removed
1200 return self._status.removed
1207 def deleted(self):
1201 def deleted(self):
1208 return self._status.deleted
1202 return self._status.deleted
1209 @propertycache
1203 filesmodified = modified
1210 def _copies(self):
1204 filesadded = added
1211 p1copies = {}
1205 filesremoved = removed
1212 p2copies = {}
1206
1213 parents = self._repo.dirstate.parents()
1214 p1manifest = self._repo[parents[0]].manifest()
1215 p2manifest = self._repo[parents[1]].manifest()
1216 narrowmatch = self._repo.narrowmatch()
1217 for dst, src in self._repo.dirstate.copies().items():
1218 if not narrowmatch(dst):
1219 continue
1220 if src in p1manifest:
1221 p1copies[dst] = src
1222 elif src in p2manifest:
1223 p2copies[dst] = src
1224 return p1copies, p2copies
1225 def p1copies(self):
1226 return self._copies[0]
1227 def p2copies(self):
1228 return self._copies[1]
1229 def branch(self):
1207 def branch(self):
1230 return encoding.tolocal(self._extra['branch'])
1208 return encoding.tolocal(self._extra['branch'])
1231 def closesbranch(self):
1209 def closesbranch(self):
@@ -1257,33 +1235,10 b' class committablectx(basectx):'
1257 def children(self):
1235 def children(self):
1258 return []
1236 return []
1259
1237
1260 def flags(self, path):
1261 if r'_manifest' in self.__dict__:
1262 try:
1263 return self._manifest.flags(path)
1264 except KeyError:
1265 return ''
1266
1267 try:
1268 return self._flagfunc(path)
1269 except OSError:
1270 return ''
1271
1272 def ancestor(self, c2):
1238 def ancestor(self, c2):
1273 """return the "best" ancestor context of self and c2"""
1239 """return the "best" ancestor context of self and c2"""
1274 return self._parents[0].ancestor(c2) # punt on two parents for now
1240 return self._parents[0].ancestor(c2) # punt on two parents for now
1275
1241
1276 def walk(self, match):
1277 '''Generates matching file names.'''
1278 return sorted(self._repo.dirstate.walk(self._repo.narrowmatch(match),
1279 subrepos=sorted(self.substate),
1280 unknown=True, ignored=False))
1281
1282 def matches(self, match):
1283 match = self._repo.narrowmatch(match)
1284 ds = self._repo.dirstate
1285 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1286
1287 def ancestors(self):
1242 def ancestors(self):
1288 for p in self._parents:
1243 for p in self._parents:
1289 yield p
1244 yield p
@@ -1301,18 +1256,6 b' class committablectx(basectx):'
1301
1256
1302 """
1257 """
1303
1258
1304 with self._repo.dirstate.parentchange():
1305 for f in self.modified() + self.added():
1306 self._repo.dirstate.normal(f)
1307 for f in self.removed():
1308 self._repo.dirstate.drop(f)
1309 self._repo.dirstate.setparents(node)
1310
1311 # write changes out explicitly, because nesting wlock at
1312 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1313 # from immediately doing so for subsequent changing files
1314 self._repo.dirstate.write(self._repo.currenttransaction())
1315
1316 def dirty(self, missing=False, merge=True, branch=True):
1259 def dirty(self, missing=False, merge=True, branch=True):
1317 return False
1260 return False
1318
1261
@@ -1327,7 +1270,14 b' class workingctx(committablectx):'
1327 """
1270 """
1328 def __init__(self, repo, text="", user=None, date=None, extra=None,
1271 def __init__(self, repo, text="", user=None, date=None, extra=None,
1329 changes=None):
1272 changes=None):
1330 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1273 branch = None
1274 if not extra or 'branch' not in extra:
1275 try:
1276 branch = repo.dirstate.branch()
1277 except UnicodeDecodeError:
1278 raise error.Abort(_('branch name not in UTF-8!'))
1279 super(workingctx, self).__init__(repo, text, user, date, extra, changes,
1280 branch=branch)
1331
1281
1332 def __iter__(self):
1282 def __iter__(self):
1333 d = self._repo.dirstate
1283 d = self._repo.dirstate
@@ -1355,6 +1305,54 b' class workingctx(committablectx):'
1355 self._manifest
1305 self._manifest
1356 return super(workingctx, self)._fileinfo(path)
1306 return super(workingctx, self)._fileinfo(path)
1357
1307
1308 def _buildflagfunc(self):
1309 # Create a fallback function for getting file flags when the
1310 # filesystem doesn't support them
1311
1312 copiesget = self._repo.dirstate.copies().get
1313 parents = self.parents()
1314 if len(parents) < 2:
1315 # when we have one parent, it's easy: copy from parent
1316 man = parents[0].manifest()
1317 def func(f):
1318 f = copiesget(f, f)
1319 return man.flags(f)
1320 else:
1321 # merges are tricky: we try to reconstruct the unstored
1322 # result from the merge (issue1802)
1323 p1, p2 = parents
1324 pa = p1.ancestor(p2)
1325 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1326
1327 def func(f):
1328 f = copiesget(f, f) # may be wrong for merges with copies
1329 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1330 if fl1 == fl2:
1331 return fl1
1332 if fl1 == fla:
1333 return fl2
1334 if fl2 == fla:
1335 return fl1
1336 return '' # punt for conflicts
1337
1338 return func
1339
1340 @propertycache
1341 def _flagfunc(self):
1342 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1343
1344 def flags(self, path):
1345 if r'_manifest' in self.__dict__:
1346 try:
1347 return self._manifest.flags(path)
1348 except KeyError:
1349 return ''
1350
1351 try:
1352 return self._flagfunc(path)
1353 except OSError:
1354 return ''
1355
1358 def filectx(self, path, filelog=None):
1356 def filectx(self, path, filelog=None):
1359 """get a file context from the working directory"""
1357 """get a file context from the working directory"""
1360 return workingfilectx(self._repo, path, workingctx=self,
1358 return workingfilectx(self._repo, path, workingctx=self,
@@ -1579,6 +1577,23 b' class workingctx(committablectx):'
1579 return s
1577 return s
1580
1578
1581 @propertycache
1579 @propertycache
1580 def _copies(self):
1581 p1copies = {}
1582 p2copies = {}
1583 parents = self._repo.dirstate.parents()
1584 p1manifest = self._repo[parents[0]].manifest()
1585 p2manifest = self._repo[parents[1]].manifest()
1586 narrowmatch = self._repo.narrowmatch()
1587 for dst, src in self._repo.dirstate.copies().items():
1588 if not narrowmatch(dst):
1589 continue
1590 if src in p1manifest:
1591 p1copies[dst] = src
1592 elif src in p2manifest:
1593 p2copies[dst] = src
1594 return p1copies, p2copies
1595
1596 @propertycache
1582 def _manifest(self):
1597 def _manifest(self):
1583 """generate a manifest corresponding to the values in self._status
1598 """generate a manifest corresponding to the values in self._status
1584
1599
@@ -1651,8 +1666,29 b' class workingctx(committablectx):'
1651 match.bad = bad
1666 match.bad = bad
1652 return match
1667 return match
1653
1668
1669 def walk(self, match):
1670 '''Generates matching file names.'''
1671 return sorted(self._repo.dirstate.walk(self._repo.narrowmatch(match),
1672 subrepos=sorted(self.substate),
1673 unknown=True, ignored=False))
1674
1675 def matches(self, match):
1676 match = self._repo.narrowmatch(match)
1677 ds = self._repo.dirstate
1678 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1679
1654 def markcommitted(self, node):
1680 def markcommitted(self, node):
1655 super(workingctx, self).markcommitted(node)
1681 with self._repo.dirstate.parentchange():
1682 for f in self.modified() + self.added():
1683 self._repo.dirstate.normal(f)
1684 for f in self.removed():
1685 self._repo.dirstate.drop(f)
1686 self._repo.dirstate.setparents(node)
1687
1688 # write changes out explicitly, because nesting wlock at
1689 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1690 # from immediately doing so for subsequent changing files
1691 self._repo.dirstate.write(self._repo.currenttransaction())
1656
1692
1657 sparse.aftercommit(self._repo, node)
1693 sparse.aftercommit(self._repo, node)
1658
1694
@@ -1726,6 +1762,8 b' class workingfilectx(committablefilectx)'
1726
1762
1727 def size(self):
1763 def size(self):
1728 return self._repo.wvfs.lstat(self._path).st_size
1764 return self._repo.wvfs.lstat(self._path).st_size
1765 def lstat(self):
1766 return self._repo.wvfs.lstat(self._path)
1729 def date(self):
1767 def date(self):
1730 t, tz = self._changectx.date()
1768 t, tz = self._changectx.date()
1731 try:
1769 try:
@@ -1761,14 +1799,13 b' class workingfilectx(committablefilectx)'
1761
1799
1762 def write(self, data, flags, backgroundclose=False, **kwargs):
1800 def write(self, data, flags, backgroundclose=False, **kwargs):
1763 """wraps repo.wwrite"""
1801 """wraps repo.wwrite"""
1764 self._repo.wwrite(self._path, data, flags,
1802 return self._repo.wwrite(self._path, data, flags,
1765 backgroundclose=backgroundclose,
1803 backgroundclose=backgroundclose,
1766 **kwargs)
1804 **kwargs)
1767
1805
1768 def markcopied(self, src):
1806 def markcopied(self, src):
1769 """marks this file a copy of `src`"""
1807 """marks this file a copy of `src`"""
1770 if self._repo.dirstate[self._path] in "nma":
1808 self._repo.dirstate.copy(src, self._path)
1771 self._repo.dirstate.copy(src, self._path)
1772
1809
1773 def clearunknown(self):
1810 def clearunknown(self):
1774 """Removes conflicting items in the working directory so that
1811 """Removes conflicting items in the working directory so that
@@ -1913,7 +1950,7 b' class overlayworkingctx(committablectx):'
1913 if self.isdirty(path):
1950 if self.isdirty(path):
1914 return self._cache[path]['copied']
1951 return self._cache[path]['copied']
1915 else:
1952 else:
1916 raise error.ProgrammingError('copydata() called on clean context')
1953 return None
1917
1954
1918 def flags(self, path):
1955 def flags(self, path):
1919 if self.isdirty(path):
1956 if self.isdirty(path):
@@ -2055,7 +2092,7 b' class overlayworkingctx(committablectx):'
2055 else:
2092 else:
2056 parents = (self._repo[parents[0]], self._repo[parents[1]])
2093 parents = (self._repo[parents[0]], self._repo[parents[1]])
2057
2094
2058 files = self._cache.keys()
2095 files = self.files()
2059 def getfile(repo, memctx, path):
2096 def getfile(repo, memctx, path):
2060 if self._cache[path]['exists']:
2097 if self._cache[path]['exists']:
2061 return memfilectx(repo, memctx, path,
2098 return memfilectx(repo, memctx, path,
@@ -2118,7 +2155,9 b' class overlayworkingctx(committablectx):'
2118 # the file is marked as existing.
2155 # the file is marked as existing.
2119 if exists and data is None:
2156 if exists and data is None:
2120 oldentry = self._cache.get(path) or {}
2157 oldentry = self._cache.get(path) or {}
2121 data = oldentry.get('data') or self._wrappedctx[path].data()
2158 data = oldentry.get('data')
2159 if data is None:
2160 data = self._wrappedctx[path].data()
2122
2161
2123 self._cache[path] = {
2162 self._cache[path] = {
2124 'exists': exists,
2163 'exists': exists,
@@ -2305,7 +2344,8 b' class memctx(committablectx):'
2305
2344
2306 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2345 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2307 date=None, extra=None, branch=None, editor=False):
2346 date=None, extra=None, branch=None, editor=False):
2308 super(memctx, self).__init__(repo, text, user, date, extra)
2347 super(memctx, self).__init__(repo, text, user, date, extra,
2348 branch=branch)
2309 self._rev = None
2349 self._rev = None
2310 self._node = None
2350 self._node = None
2311 parents = [(p or nullid) for p in parents]
2351 parents = [(p or nullid) for p in parents]
@@ -2313,8 +2353,6 b' class memctx(committablectx):'
2313 self._parents = [self._repo[p] for p in (p1, p2)]
2353 self._parents = [self._repo[p] for p in (p1, p2)]
2314 files = sorted(set(files))
2354 files = sorted(set(files))
2315 self._files = files
2355 self._files = files
2316 if branch is not None:
2317 self._extra['branch'] = encoding.fromlocal(branch)
2318 self.substate = {}
2356 self.substate = {}
2319
2357
2320 if isinstance(filectxfn, patch.filestore):
2358 if isinstance(filectxfn, patch.filestore):
This diff has been collapsed as it changes many lines, (593 lines changed) Show them Hide them
@@ -107,40 +107,60 b' def _findlimit(repo, ctxa, ctxb):'
107 # This only occurs when a is a descendent of b or visa-versa.
107 # This only occurs when a is a descendent of b or visa-versa.
108 return min(limit, a, b)
108 return min(limit, a, b)
109
109
110 def _chain(src, dst, a, b):
110 def _filter(src, dst, t):
111 """chain two sets of copies a->b"""
111 """filters out invalid copies after chaining"""
112 t = a.copy()
112
113 for k, v in b.iteritems():
113 # When _chain()'ing copies in 'a' (from 'src' via some other commit 'mid')
114 if v in t:
114 # with copies in 'b' (from 'mid' to 'dst'), we can get the different cases
115 # found a chain
115 # in the following table (not including trivial cases). For example, case 2
116 if t[v] != k:
116 # is where a file existed in 'src' and remained under that name in 'mid' and
117 # file wasn't renamed back to itself
117 # then was renamed between 'mid' and 'dst'.
118 t[k] = t[v]
118 #
119 if v not in dst:
119 # case src mid dst result
120 # chain was a rename, not a copy
120 # 1 x y - -
121 del t[v]
121 # 2 x y y x->y
122 if v in src:
122 # 3 x y x -
123 # file is a copy of an existing file
123 # 4 x y z x->z
124 t[k] = v
124 # 5 - x y -
125 # 6 x x y x->y
126 #
127 # _chain() takes care of chaining the copies in 'a' and 'b', but it
128 # cannot tell the difference between cases 1 and 2, between 3 and 4, or
129 # between 5 and 6, so it includes all cases in its result.
130 # Cases 1, 3, and 5 are then removed by _filter().
125
131
126 for k, v in list(t.items()):
132 for k, v in list(t.items()):
133 # remove copies from files that didn't exist
134 if v not in src:
135 del t[k]
127 # remove criss-crossed copies
136 # remove criss-crossed copies
128 if k in src and v in dst:
137 elif k in src and v in dst:
129 del t[k]
138 del t[k]
130 # remove copies to files that were then removed
139 # remove copies to files that were then removed
131 elif k not in dst:
140 elif k not in dst:
132 del t[k]
141 del t[k]
133
142
143 def _chain(a, b):
144 """chain two sets of copies 'a' and 'b'"""
145 t = a.copy()
146 for k, v in b.iteritems():
147 if v in t:
148 t[k] = t[v]
149 else:
150 t[k] = v
134 return t
151 return t
135
152
136 def _tracefile(fctx, am, limit=node.nullrev):
153 def _tracefile(fctx, am, basemf, limit):
137 """return file context that is the ancestor of fctx present in ancestor
154 """return file context that is the ancestor of fctx present in ancestor
138 manifest am, stopping after the first ancestor lower than limit"""
155 manifest am, stopping after the first ancestor lower than limit"""
139
156
140 for f in fctx.ancestors():
157 for f in fctx.ancestors():
141 if am.get(f.path(), None) == f.filenode():
158 path = f.path()
142 return f
159 if am.get(path, None) == f.filenode():
143 if limit >= 0 and not f.isintroducedafter(limit):
160 return path
161 if basemf and basemf.get(path, None) == f.filenode():
162 return path
163 if not f.isintroducedafter(limit):
144 return None
164 return None
145
165
146 def _dirstatecopies(repo, match=None):
166 def _dirstatecopies(repo, match=None):
@@ -165,7 +185,7 b' def usechangesetcentricalgo(repo):'
165 return (repo.ui.config('experimental', 'copies.read-from') in
185 return (repo.ui.config('experimental', 'copies.read-from') in
166 ('changeset-only', 'compatibility'))
186 ('changeset-only', 'compatibility'))
167
187
168 def _committedforwardcopies(a, b, match):
188 def _committedforwardcopies(a, b, base, match):
169 """Like _forwardcopies(), but b.rev() cannot be None (working copy)"""
189 """Like _forwardcopies(), but b.rev() cannot be None (working copy)"""
170 # files might have to be traced back to the fctx parent of the last
190 # files might have to be traced back to the fctx parent of the last
171 # one-side-only changeset, but not further back than that
191 # one-side-only changeset, but not further back than that
@@ -183,6 +203,7 b' def _committedforwardcopies(a, b, match)'
183 if debug:
203 if debug:
184 dbg('debug.copies: search limit: %d\n' % limit)
204 dbg('debug.copies: search limit: %d\n' % limit)
185 am = a.manifest()
205 am = a.manifest()
206 basemf = None if base is None else base.manifest()
186
207
187 # find where new files came from
208 # find where new files came from
188 # we currently don't try to find where old files went, too expensive
209 # we currently don't try to find where old files went, too expensive
@@ -204,9 +225,9 b' def _committedforwardcopies(a, b, match)'
204 ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True)
225 ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True)
205
226
206 if debug:
227 if debug:
207 dbg('debug.copies: missing file to search: %d\n' % len(missing))
228 dbg('debug.copies: missing files to search: %d\n' % len(missing))
208
229
209 for f in missing:
230 for f in sorted(missing):
210 if debug:
231 if debug:
211 dbg('debug.copies: tracing file: %s\n' % f)
232 dbg('debug.copies: tracing file: %s\n' % f)
212 fctx = b[f]
233 fctx = b[f]
@@ -214,11 +235,11 b' def _committedforwardcopies(a, b, match)'
214
235
215 if debug:
236 if debug:
216 start = util.timer()
237 start = util.timer()
217 ofctx = _tracefile(fctx, am, limit)
238 opath = _tracefile(fctx, am, basemf, limit)
218 if ofctx:
239 if opath:
219 if debug:
240 if debug:
220 dbg('debug.copies: rename of: %s\n' % ofctx._path)
241 dbg('debug.copies: rename of: %s\n' % opath)
221 cm[f] = ofctx.path()
242 cm[f] = opath
222 if debug:
243 if debug:
223 dbg('debug.copies: time: %f seconds\n'
244 dbg('debug.copies: time: %f seconds\n'
224 % (util.timer() - start))
245 % (util.timer() - start))
@@ -245,40 +266,30 b' def _changesetforwardcopies(a, b, match)'
245 # 'work' contains 3-tuples of a (revision number, parent number, copies).
266 # 'work' contains 3-tuples of a (revision number, parent number, copies).
246 # The parent number is only used for knowing which parent the copies dict
267 # The parent number is only used for knowing which parent the copies dict
247 # came from.
268 # came from.
269 # NOTE: To reduce costly copying the 'copies' dicts, we reuse the same
270 # instance for *one* of the child nodes (the last one). Once an instance
271 # has been put on the queue, it is thus no longer safe to modify it.
272 # Conversely, it *is* safe to modify an instance popped off the queue.
248 work = [(r, 1, {}) for r in roots]
273 work = [(r, 1, {}) for r in roots]
249 heapq.heapify(work)
274 heapq.heapify(work)
275 alwaysmatch = match.always()
250 while work:
276 while work:
251 r, i1, copies1 = heapq.heappop(work)
277 r, i1, copies = heapq.heappop(work)
252 if work and work[0][0] == r:
278 if work and work[0][0] == r:
253 # We are tracing copies from both parents
279 # We are tracing copies from both parents
254 r, i2, copies2 = heapq.heappop(work)
280 r, i2, copies2 = heapq.heappop(work)
255 copies = {}
281 for dst, src in copies2.items():
256 ctx = repo[r]
282 # Unlike when copies are stored in the filelog, we consider
257 p1man, p2man = ctx.p1().manifest(), ctx.p2().manifest()
283 # it a copy even if the destination already existed on the
258 allcopies = set(copies1) | set(copies2)
284 # other branch. It's simply too expensive to check if the
259 # TODO: perhaps this filtering should be done as long as ctx
285 # file existed in the manifest.
260 # is merge, whether or not we're tracing from both parent.
286 if dst not in copies:
261 for dst in allcopies:
287 # If it was copied on the p1 side, leave it as copied from
262 if not match(dst):
288 # that side, even if it was also copied on the p2 side.
263 continue
289 copies[dst] = copies2[dst]
264 if dst not in copies2:
265 # Copied on p1 side: mark as copy from p1 side if it didn't
266 # already exist on p2 side
267 if dst not in p2man:
268 copies[dst] = copies1[dst]
269 elif dst not in copies1:
270 # Copied on p2 side: mark as copy from p2 side if it didn't
271 # already exist on p1 side
272 if dst not in p1man:
273 copies[dst] = copies2[dst]
274 else:
275 # Copied on both sides: mark as copy from p1 side
276 copies[dst] = copies1[dst]
277 else:
278 copies = copies1
279 if r == b.rev():
290 if r == b.rev():
280 return copies
291 return copies
281 for c in children[r]:
292 for i, c in enumerate(children[r]):
282 childctx = repo[c]
293 childctx = repo[c]
283 if r == childctx.p1().rev():
294 if r == childctx.p1().rev():
284 parent = 1
295 parent = 1
@@ -287,27 +298,36 b' def _changesetforwardcopies(a, b, match)'
287 assert r == childctx.p2().rev()
298 assert r == childctx.p2().rev()
288 parent = 2
299 parent = 2
289 childcopies = childctx.p2copies()
300 childcopies = childctx.p2copies()
290 if not match.always():
301 if not alwaysmatch:
291 childcopies = {dst: src for dst, src in childcopies.items()
302 childcopies = {dst: src for dst, src in childcopies.items()
292 if match(dst)}
303 if match(dst)}
293 childcopies = _chain(a, childctx, copies, childcopies)
304 # Copy the dict only if later iterations will also need it
294 heapq.heappush(work, (c, parent, childcopies))
305 if i != len(children[r]) - 1:
306 newcopies = copies.copy()
307 else:
308 newcopies = copies
309 if childcopies:
310 newcopies = _chain(newcopies, childcopies)
311 for f in childctx.filesremoved():
312 if f in newcopies:
313 del newcopies[f]
314 heapq.heappush(work, (c, parent, newcopies))
295 assert False
315 assert False
296
316
297 def _forwardcopies(a, b, match=None):
317 def _forwardcopies(a, b, base=None, match=None):
298 """find {dst@b: src@a} copy mapping where a is an ancestor of b"""
318 """find {dst@b: src@a} copy mapping where a is an ancestor of b"""
299
319
320 if base is None:
321 base = a
300 match = a.repo().narrowmatch(match)
322 match = a.repo().narrowmatch(match)
301 # check for working copy
323 # check for working copy
302 if b.rev() is None:
324 if b.rev() is None:
303 if a == b.p1():
325 cm = _committedforwardcopies(a, b.p1(), base, match)
304 # short-circuit to avoid issues with merge states
305 return _dirstatecopies(b._repo, match)
306
307 cm = _committedforwardcopies(a, b.p1(), match)
308 # combine copies from dirstate if necessary
326 # combine copies from dirstate if necessary
309 return _chain(a, b, cm, _dirstatecopies(b._repo, match))
327 copies = _chain(cm, _dirstatecopies(b._repo, match))
310 return _committedforwardcopies(a, b, match)
328 else:
329 copies = _committedforwardcopies(a, b, base, match)
330 return copies
311
331
312 def _backwardrenames(a, b, match):
332 def _backwardrenames(a, b, match):
313 if a._repo.ui.config('experimental', 'copytrace') == 'off':
333 if a._repo.ui.config('experimental', 'copytrace') == 'off':
@@ -343,90 +363,24 b' def pathcopies(x, y, match=None):'
343 if a == x:
363 if a == x:
344 if debug:
364 if debug:
345 repo.ui.debug('debug.copies: search mode: forward\n')
365 repo.ui.debug('debug.copies: search mode: forward\n')
346 return _forwardcopies(x, y, match=match)
366 if y.rev() is None and x == y.p1():
347 if a == y:
367 # short-circuit to avoid issues with merge states
368 return _dirstatecopies(repo, match)
369 copies = _forwardcopies(x, y, match=match)
370 elif a == y:
348 if debug:
371 if debug:
349 repo.ui.debug('debug.copies: search mode: backward\n')
372 repo.ui.debug('debug.copies: search mode: backward\n')
350 return _backwardrenames(x, y, match=match)
373 copies = _backwardrenames(x, y, match=match)
351 if debug:
374 else:
352 repo.ui.debug('debug.copies: search mode: combined\n')
375 if debug:
353 return _chain(x, y, _backwardrenames(x, a, match=match),
376 repo.ui.debug('debug.copies: search mode: combined\n')
354 _forwardcopies(a, y, match=match))
377 base = None
355
378 if a.rev() != node.nullrev:
356 def _computenonoverlap(repo, c1, c2, addedinm1, addedinm2, baselabel=''):
379 base = x
357 """Computes, based on addedinm1 and addedinm2, the files exclusive to c1
380 copies = _chain(_backwardrenames(x, a, match=match),
358 and c2. This is its own function so extensions can easily wrap this call
381 _forwardcopies(a, y, base, match=match))
359 to see what files mergecopies is about to process.
382 _filter(x, y, copies)
360
383 return copies
361 Even though c1 and c2 are not used in this function, they are useful in
362 other extensions for being able to read the file nodes of the changed files.
363
364 "baselabel" can be passed to help distinguish the multiple computations
365 done in the graft case.
366 """
367 u1 = sorted(addedinm1 - addedinm2)
368 u2 = sorted(addedinm2 - addedinm1)
369
370 header = " unmatched files in %s"
371 if baselabel:
372 header += ' (from %s)' % baselabel
373 if u1:
374 repo.ui.debug("%s:\n %s\n" % (header % 'local', "\n ".join(u1)))
375 if u2:
376 repo.ui.debug("%s:\n %s\n" % (header % 'other', "\n ".join(u2)))
377
378 return u1, u2
379
380 def _makegetfctx(ctx):
381 """return a 'getfctx' function suitable for _checkcopies usage
382
383 We have to re-setup the function building 'filectx' for each
384 '_checkcopies' to ensure the linkrev adjustment is properly setup for
385 each. Linkrev adjustment is important to avoid bug in rename
386 detection. Moreover, having a proper '_ancestrycontext' setup ensures
387 the performance impact of this adjustment is kept limited. Without it,
388 each file could do a full dag traversal making the time complexity of
389 the operation explode (see issue4537).
390
391 This function exists here mostly to limit the impact on stable. Feel
392 free to refactor on default.
393 """
394 rev = ctx.rev()
395 repo = ctx._repo
396 ac = getattr(ctx, '_ancestrycontext', None)
397 if ac is None:
398 revs = [rev]
399 if rev is None:
400 revs = [p.rev() for p in ctx.parents()]
401 ac = repo.changelog.ancestors(revs, inclusive=True)
402 ctx._ancestrycontext = ac
403 def makectx(f, n):
404 if n in node.wdirfilenodeids: # in a working context?
405 if ctx.rev() is None:
406 return ctx.filectx(f)
407 return repo[None][f]
408 fctx = repo.filectx(f, fileid=n)
409 # setup only needed for filectx not create from a changectx
410 fctx._ancestrycontext = ac
411 fctx._descendantrev = rev
412 return fctx
413 return util.lrucachefunc(makectx)
414
415 def _combinecopies(copyfrom, copyto, finalcopy, diverge, incompletediverge):
416 """combine partial copy paths"""
417 remainder = {}
418 for f in copyfrom:
419 if f in copyto:
420 finalcopy[copyto[f]] = copyfrom[f]
421 del copyto[f]
422 for f in incompletediverge:
423 assert f not in diverge
424 ic = incompletediverge[f]
425 if ic[0] in copyto:
426 diverge[f] = [copyto[ic[0]], ic[1]]
427 else:
428 remainder[f] = ic
429 return remainder
430
384
431 def mergecopies(repo, c1, c2, base):
385 def mergecopies(repo, c1, c2, base):
432 """
386 """
@@ -485,7 +439,14 b' def mergecopies(repo, c1, c2, base):'
485 return _dirstatecopies(repo, narrowmatch), {}, {}, {}, {}
439 return _dirstatecopies(repo, narrowmatch), {}, {}, {}, {}
486
440
487 copytracing = repo.ui.config('experimental', 'copytrace')
441 copytracing = repo.ui.config('experimental', 'copytrace')
488 boolctrace = stringutil.parsebool(copytracing)
442 if stringutil.parsebool(copytracing) is False:
443 # stringutil.parsebool() returns None when it is unable to parse the
444 # value, so we should rely on making sure copytracing is on such cases
445 return {}, {}, {}, {}, {}
446
447 if usechangesetcentricalgo(repo):
448 # The heuristics don't make sense when we need changeset-centric algos
449 return _fullcopytracing(repo, c1, c2, base)
489
450
490 # Copy trace disabling is explicitly below the node == p1 logic above
451 # Copy trace disabling is explicitly below the node == p1 logic above
491 # because the logic above is required for a simple copy to be kept across a
452 # because the logic above is required for a simple copy to be kept across a
@@ -497,10 +458,6 b' def mergecopies(repo, c1, c2, base):'
497 if _isfullcopytraceable(repo, c1, base):
458 if _isfullcopytraceable(repo, c1, base):
498 return _fullcopytracing(repo, c1, c2, base)
459 return _fullcopytracing(repo, c1, c2, base)
499 return _heuristicscopytracing(repo, c1, c2, base)
460 return _heuristicscopytracing(repo, c1, c2, base)
500 elif boolctrace is False:
501 # stringutil.parsebool() returns None when it is unable to parse the
502 # value, so we should rely on making sure copytracing is on such cases
503 return {}, {}, {}, {}, {}
504 else:
461 else:
505 return _fullcopytracing(repo, c1, c2, base)
462 return _fullcopytracing(repo, c1, c2, base)
506
463
@@ -522,6 +479,23 b' def _isfullcopytraceable(repo, c1, base)'
522 return commits < sourcecommitlimit
479 return commits < sourcecommitlimit
523 return False
480 return False
524
481
482 def _checksinglesidecopies(src, dsts1, m1, m2, mb, c2, base,
483 copy, renamedelete):
484 if src not in m2:
485 # deleted on side 2
486 if src not in m1:
487 # renamed on side 1, deleted on side 2
488 renamedelete[src] = dsts1
489 elif m2[src] != mb[src]:
490 if not _related(c2[src], base[src]):
491 return
492 # modified on side 2
493 for dst in dsts1:
494 if dst not in m2:
495 # dst not added on side 2 (handle as regular
496 # "both created" case in manifestmerge otherwise)
497 copy[dst] = src
498
525 def _fullcopytracing(repo, c1, c2, base):
499 def _fullcopytracing(repo, c1, c2, base):
526 """ The full copytracing algorithm which finds all the new files that were
500 """ The full copytracing algorithm which finds all the new files that were
527 added from merge base up to the top commit and for each file it checks if
501 added from merge base up to the top commit and for each file it checks if
@@ -530,159 +504,84 b' def _fullcopytracing(repo, c1, c2, base)'
530 This is pretty slow when a lot of changesets are involved but will track all
504 This is pretty slow when a lot of changesets are involved but will track all
531 the copies.
505 the copies.
532 """
506 """
533 # In certain scenarios (e.g. graft, update or rebase), base can be
534 # overridden We still need to know a real common ancestor in this case We
535 # can't just compute _c1.ancestor(_c2) and compare it to ca, because there
536 # can be multiple common ancestors, e.g. in case of bidmerge. Because our
537 # caller may not know if the revision passed in lieu of the CA is a genuine
538 # common ancestor or not without explicitly checking it, it's better to
539 # determine that here.
540 #
541 # base.isancestorof(wc) is False, work around that
542 _c1 = c1.p1() if c1.rev() is None else c1
543 _c2 = c2.p1() if c2.rev() is None else c2
544 # an endpoint is "dirty" if it isn't a descendant of the merge base
545 # if we have a dirty endpoint, we need to trigger graft logic, and also
546 # keep track of which endpoint is dirty
547 dirtyc1 = not base.isancestorof(_c1)
548 dirtyc2 = not base.isancestorof(_c2)
549 graft = dirtyc1 or dirtyc2
550 tca = base
551 if graft:
552 tca = _c1.ancestor(_c2)
553
554 limit = _findlimit(repo, c1, c2)
555 repo.ui.debug(" searching for copies back to rev %d\n" % limit)
556
557 m1 = c1.manifest()
507 m1 = c1.manifest()
558 m2 = c2.manifest()
508 m2 = c2.manifest()
559 mb = base.manifest()
509 mb = base.manifest()
560
510
561 # gather data from _checkcopies:
511 copies1 = pathcopies(base, c1)
562 # - diverge = record all diverges in this dict
512 copies2 = pathcopies(base, c2)
563 # - copy = record all non-divergent copies in this dict
513
564 # - fullcopy = record all copies in this dict
514 inversecopies1 = {}
565 # - incomplete = record non-divergent partial copies here
515 inversecopies2 = {}
566 # - incompletediverge = record divergent partial copies here
516 for dst, src in copies1.items():
567 diverge = {} # divergence data is shared
517 inversecopies1.setdefault(src, []).append(dst)
568 incompletediverge = {}
518 for dst, src in copies2.items():
569 data1 = {'copy': {},
519 inversecopies2.setdefault(src, []).append(dst)
570 'fullcopy': {},
520
571 'incomplete': {},
521 copy = {}
572 'diverge': diverge,
522 diverge = {}
573 'incompletediverge': incompletediverge,
523 renamedelete = {}
574 }
524 allsources = set(inversecopies1) | set(inversecopies2)
575 data2 = {'copy': {},
525 for src in allsources:
576 'fullcopy': {},
526 dsts1 = inversecopies1.get(src)
577 'incomplete': {},
527 dsts2 = inversecopies2.get(src)
578 'diverge': diverge,
528 if dsts1 and dsts2:
579 'incompletediverge': incompletediverge,
529 # copied/renamed on both sides
580 }
530 if src not in m1 and src not in m2:
531 # renamed on both sides
532 dsts1 = set(dsts1)
533 dsts2 = set(dsts2)
534 # If there's some overlap in the rename destinations, we
535 # consider it not divergent. For example, if side 1 copies 'a'
536 # to 'b' and 'c' and deletes 'a', and side 2 copies 'a' to 'c'
537 # and 'd' and deletes 'a'.
538 if dsts1 & dsts2:
539 for dst in (dsts1 & dsts2):
540 copy[dst] = src
541 else:
542 diverge[src] = sorted(dsts1 | dsts2)
543 elif src in m1 and src in m2:
544 # copied on both sides
545 dsts1 = set(dsts1)
546 dsts2 = set(dsts2)
547 for dst in (dsts1 & dsts2):
548 copy[dst] = src
549 # TODO: Handle cases where it was renamed on one side and copied
550 # on the other side
551 elif dsts1:
552 # copied/renamed only on side 1
553 _checksinglesidecopies(src, dsts1, m1, m2, mb, c2, base,
554 copy, renamedelete)
555 elif dsts2:
556 # copied/renamed only on side 2
557 _checksinglesidecopies(src, dsts2, m2, m1, mb, c1, base,
558 copy, renamedelete)
559
560 renamedeleteset = set()
561 divergeset = set()
562 for dsts in diverge.values():
563 divergeset.update(dsts)
564 for dsts in renamedelete.values():
565 renamedeleteset.update(dsts)
581
566
582 # find interesting file sets from manifests
567 # find interesting file sets from manifests
583 addedinm1 = m1.filesnotin(mb, repo.narrowmatch())
568 addedinm1 = m1.filesnotin(mb, repo.narrowmatch())
584 addedinm2 = m2.filesnotin(mb, repo.narrowmatch())
569 addedinm2 = m2.filesnotin(mb, repo.narrowmatch())
585 bothnew = sorted(addedinm1 & addedinm2)
570 u1 = sorted(addedinm1 - addedinm2)
586 if tca == base:
571 u2 = sorted(addedinm2 - addedinm1)
587 # unmatched file from base
588 u1r, u2r = _computenonoverlap(repo, c1, c2, addedinm1, addedinm2)
589 u1u, u2u = u1r, u2r
590 else:
591 # unmatched file from base (DAG rotation in the graft case)
592 u1r, u2r = _computenonoverlap(repo, c1, c2, addedinm1, addedinm2,
593 baselabel='base')
594 # unmatched file from topological common ancestors (no DAG rotation)
595 # need to recompute this for directory move handling when grafting
596 mta = tca.manifest()
597 u1u, u2u = _computenonoverlap(repo, c1, c2,
598 m1.filesnotin(mta, repo.narrowmatch()),
599 m2.filesnotin(mta, repo.narrowmatch()),
600 baselabel='topological common ancestor')
601
602 for f in u1u:
603 _checkcopies(c1, c2, f, base, tca, dirtyc1, limit, data1)
604
605 for f in u2u:
606 _checkcopies(c2, c1, f, base, tca, dirtyc2, limit, data2)
607
608 copy = dict(data1['copy'])
609 copy.update(data2['copy'])
610 fullcopy = dict(data1['fullcopy'])
611 fullcopy.update(data2['fullcopy'])
612
613 if dirtyc1:
614 _combinecopies(data2['incomplete'], data1['incomplete'], copy, diverge,
615 incompletediverge)
616 if dirtyc2:
617 _combinecopies(data1['incomplete'], data2['incomplete'], copy, diverge,
618 incompletediverge)
619
620 renamedelete = {}
621 renamedeleteset = set()
622 divergeset = set()
623 for of, fl in list(diverge.items()):
624 if len(fl) == 1 or of in c1 or of in c2:
625 del diverge[of] # not actually divergent, or not a rename
626 if of not in c1 and of not in c2:
627 # renamed on one side, deleted on the other side, but filter
628 # out files that have been renamed and then deleted
629 renamedelete[of] = [f for f in fl if f in c1 or f in c2]
630 renamedeleteset.update(fl) # reverse map for below
631 else:
632 divergeset.update(fl) # reverse map for below
633
572
634 if bothnew:
573 header = " unmatched files in %s"
635 repo.ui.debug(" unmatched files new in both:\n %s\n"
574 if u1:
636 % "\n ".join(bothnew))
575 repo.ui.debug("%s:\n %s\n" % (header % 'local', "\n ".join(u1)))
637 bothdiverge = {}
576 if u2:
638 bothincompletediverge = {}
577 repo.ui.debug("%s:\n %s\n" % (header % 'other', "\n ".join(u2)))
639 remainder = {}
640 both1 = {'copy': {},
641 'fullcopy': {},
642 'incomplete': {},
643 'diverge': bothdiverge,
644 'incompletediverge': bothincompletediverge
645 }
646 both2 = {'copy': {},
647 'fullcopy': {},
648 'incomplete': {},
649 'diverge': bothdiverge,
650 'incompletediverge': bothincompletediverge
651 }
652 for f in bothnew:
653 _checkcopies(c1, c2, f, base, tca, dirtyc1, limit, both1)
654 _checkcopies(c2, c1, f, base, tca, dirtyc2, limit, both2)
655 if dirtyc1 and dirtyc2:
656 remainder = _combinecopies(both2['incomplete'], both1['incomplete'],
657 copy, bothdiverge, bothincompletediverge)
658 remainder1 = _combinecopies(both1['incomplete'], both2['incomplete'],
659 copy, bothdiverge, bothincompletediverge)
660 remainder.update(remainder1)
661 elif dirtyc1:
662 # incomplete copies may only be found on the "dirty" side for bothnew
663 assert not both2['incomplete']
664 remainder = _combinecopies({}, both1['incomplete'], copy, bothdiverge,
665 bothincompletediverge)
666 elif dirtyc2:
667 assert not both1['incomplete']
668 remainder = _combinecopies({}, both2['incomplete'], copy, bothdiverge,
669 bothincompletediverge)
670 else:
671 # incomplete copies and divergences can't happen outside grafts
672 assert not both1['incomplete']
673 assert not both2['incomplete']
674 assert not bothincompletediverge
675 for f in remainder:
676 assert f not in bothdiverge
677 ic = remainder[f]
678 if ic[0] in (m1 if dirtyc1 else m2):
679 # backed-out rename on one side, but watch out for deleted files
680 bothdiverge[f] = ic
681 for of, fl in bothdiverge.items():
682 if len(fl) == 2 and fl[0] == fl[1]:
683 copy[fl[0]] = of # not actually divergent, just matching renames
684
578
685 if fullcopy and repo.ui.debugflag:
579 fullcopy = copies1.copy()
580 fullcopy.update(copies2)
581 if not fullcopy:
582 return copy, {}, diverge, renamedelete, {}
583
584 if repo.ui.debugflag:
686 repo.ui.debug(" all copies found (* = to merge, ! = divergent, "
585 repo.ui.debug(" all copies found (* = to merge, ! = divergent, "
687 "% = renamed and deleted):\n")
586 "% = renamed and deleted):\n")
688 for f in sorted(fullcopy):
587 for f in sorted(fullcopy):
@@ -697,16 +596,10 b' def _fullcopytracing(repo, c1, c2, base)'
697 note))
596 note))
698 del divergeset
597 del divergeset
699
598
700 if not fullcopy:
701 return copy, {}, diverge, renamedelete, {}
702
703 repo.ui.debug(" checking for directory renames\n")
599 repo.ui.debug(" checking for directory renames\n")
704
600
705 # generate a directory move map
601 # generate a directory move map
706 d1, d2 = c1.dirs(), c2.dirs()
602 d1, d2 = c1.dirs(), c2.dirs()
707 # Hack for adding '', which is not otherwise added, to d1 and d2
708 d1.addpath('/')
709 d2.addpath('/')
710 invalid = set()
603 invalid = set()
711 dirmove = {}
604 dirmove = {}
712
605
@@ -746,7 +639,7 b' def _fullcopytracing(repo, c1, c2, base)'
746
639
747 movewithdir = {}
640 movewithdir = {}
748 # check unaccounted nonoverlapping files against directory moves
641 # check unaccounted nonoverlapping files against directory moves
749 for f in u1r + u2r:
642 for f in u1 + u2:
750 if f not in fullcopy:
643 if f not in fullcopy:
751 for d in dirmove:
644 for d in dirmove:
752 if f.startswith(d):
645 if f.startswith(d):
@@ -893,99 +786,6 b' def _related(f1, f2):'
893 except StopIteration:
786 except StopIteration:
894 return False
787 return False
895
788
896 def _checkcopies(srcctx, dstctx, f, base, tca, remotebase, limit, data):
897 """
898 check possible copies of f from msrc to mdst
899
900 srcctx = starting context for f in msrc
901 dstctx = destination context for f in mdst
902 f = the filename to check (as in msrc)
903 base = the changectx used as a merge base
904 tca = topological common ancestor for graft-like scenarios
905 remotebase = True if base is outside tca::srcctx, False otherwise
906 limit = the rev number to not search beyond
907 data = dictionary of dictionary to store copy data. (see mergecopies)
908
909 note: limit is only an optimization, and provides no guarantee that
910 irrelevant revisions will not be visited
911 there is no easy way to make this algorithm stop in a guaranteed way
912 once it "goes behind a certain revision".
913 """
914
915 msrc = srcctx.manifest()
916 mdst = dstctx.manifest()
917 mb = base.manifest()
918 mta = tca.manifest()
919 # Might be true if this call is about finding backward renames,
920 # This happens in the case of grafts because the DAG is then rotated.
921 # If the file exists in both the base and the source, we are not looking
922 # for a rename on the source side, but on the part of the DAG that is
923 # traversed backwards.
924 #
925 # In the case there is both backward and forward renames (before and after
926 # the base) this is more complicated as we must detect a divergence.
927 # We use 'backwards = False' in that case.
928 backwards = not remotebase and base != tca and f in mb
929 getsrcfctx = _makegetfctx(srcctx)
930 getdstfctx = _makegetfctx(dstctx)
931
932 if msrc[f] == mb.get(f) and not remotebase:
933 # Nothing to merge
934 return
935
936 of = None
937 seen = {f}
938 for oc in getsrcfctx(f, msrc[f]).ancestors():
939 of = oc.path()
940 if of in seen:
941 # check limit late - grab last rename before
942 if oc.linkrev() < limit:
943 break
944 continue
945 seen.add(of)
946
947 # remember for dir rename detection
948 if backwards:
949 data['fullcopy'][of] = f # grafting backwards through renames
950 else:
951 data['fullcopy'][f] = of
952 if of not in mdst:
953 continue # no match, keep looking
954 if mdst[of] == mb.get(of):
955 return # no merge needed, quit early
956 c2 = getdstfctx(of, mdst[of])
957 # c2 might be a plain new file on added on destination side that is
958 # unrelated to the droids we are looking for.
959 cr = _related(oc, c2)
960 if cr and (of == f or of == c2.path()): # non-divergent
961 if backwards:
962 data['copy'][of] = f
963 elif of in mb:
964 data['copy'][f] = of
965 elif remotebase: # special case: a <- b <- a -> b "ping-pong" rename
966 data['copy'][of] = f
967 del data['fullcopy'][f]
968 data['fullcopy'][of] = f
969 else: # divergence w.r.t. graft CA on one side of topological CA
970 for sf in seen:
971 if sf in mb:
972 assert sf not in data['diverge']
973 data['diverge'][sf] = [f, of]
974 break
975 return
976
977 if of in mta:
978 if backwards or remotebase:
979 data['incomplete'][of] = f
980 else:
981 for sf in seen:
982 if sf in mb:
983 if tca == base:
984 data['diverge'].setdefault(sf, []).append(f)
985 else:
986 data['incompletediverge'][sf] = [of, f]
987 return
988
989 def duplicatecopies(repo, wctx, rev, fromrev, skiprev=None):
789 def duplicatecopies(repo, wctx, rev, fromrev, skiprev=None):
990 """reproduce copies from fromrev to rev in the dirstate
790 """reproduce copies from fromrev to rev in the dirstate
991
791
@@ -1005,8 +805,7 b' def duplicatecopies(repo, wctx, rev, fro'
1005 # metadata across the rebase anyway).
805 # metadata across the rebase anyway).
1006 exclude = pathcopies(repo[fromrev], repo[skiprev])
806 exclude = pathcopies(repo[fromrev], repo[skiprev])
1007 for dst, src in pathcopies(repo[fromrev], repo[rev]).iteritems():
807 for dst, src in pathcopies(repo[fromrev], repo[rev]).iteritems():
1008 # copies.pathcopies returns backward renames, so dst might not
1009 # actually be in the dirstate
1010 if dst in exclude:
808 if dst in exclude:
1011 continue
809 continue
1012 wctx[dst].markcopied(src)
810 if dst in wctx:
811 wctx[dst].markcopied(src)
@@ -608,6 +608,7 b' class curseschunkselector(object):'
608
608
609 # the currently selected header, hunk, or hunk-line
609 # the currently selected header, hunk, or hunk-line
610 self.currentselecteditem = self.headerlist[0]
610 self.currentselecteditem = self.headerlist[0]
611 self.lastapplieditem = None
611
612
612 # updated when printing out patch-display -- the 'lines' here are the
613 # updated when printing out patch-display -- the 'lines' here are the
613 # line positions *in the pad*, not on the screen.
614 # line positions *in the pad*, not on the screen.
@@ -723,7 +724,7 b' class curseschunkselector(object):'
723 self.currentselecteditem = nextitem
724 self.currentselecteditem = nextitem
724 self.recenterdisplayedarea()
725 self.recenterdisplayedarea()
725
726
726 def nextsametype(self):
727 def nextsametype(self, test=False):
727 currentitem = self.currentselecteditem
728 currentitem = self.currentselecteditem
728 sametype = lambda item: isinstance(item, type(currentitem))
729 sametype = lambda item: isinstance(item, type(currentitem))
729 nextitem = currentitem.nextitem()
730 nextitem = currentitem.nextitem()
@@ -739,7 +740,8 b' class curseschunkselector(object):'
739 self.togglefolded(parent)
740 self.togglefolded(parent)
740
741
741 self.currentselecteditem = nextitem
742 self.currentselecteditem = nextitem
742 self.recenterdisplayedarea()
743 if not test:
744 self.recenterdisplayedarea()
743
745
744 def rightarrowevent(self):
746 def rightarrowevent(self):
745 """
747 """
@@ -838,6 +840,8 b' class curseschunkselector(object):'
838 """
840 """
839 if item is None:
841 if item is None:
840 item = self.currentselecteditem
842 item = self.currentselecteditem
843 # Only set this when NOT using 'toggleall'
844 self.lastapplieditem = item
841
845
842 item.applied = not item.applied
846 item.applied = not item.applied
843
847
@@ -931,6 +935,45 b' class curseschunkselector(object):'
931 self.toggleapply(item)
935 self.toggleapply(item)
932 self.waslasttoggleallapplied = not self.waslasttoggleallapplied
936 self.waslasttoggleallapplied = not self.waslasttoggleallapplied
933
937
938 def toggleallbetween(self):
939 "toggle applied on or off for all items in range [lastapplied,current]."
940 if (not self.lastapplieditem or
941 self.currentselecteditem == self.lastapplieditem):
942 # Treat this like a normal 'x'/' '
943 self.toggleapply()
944 return
945
946 startitem = self.lastapplieditem
947 enditem = self.currentselecteditem
948 # Verify that enditem is "after" startitem, otherwise swap them.
949 for direction in ['forward', 'reverse']:
950 nextitem = startitem.nextitem()
951 while nextitem and nextitem != enditem:
952 nextitem = nextitem.nextitem()
953 if nextitem:
954 break
955 # Looks like we went the wrong direction :)
956 startitem, enditem = enditem, startitem
957
958 if not nextitem:
959 # We didn't find a path going either forward or backward? Don't know
960 # how this can happen, let's not crash though.
961 return
962
963 nextitem = startitem
964 # Switch all items to be the opposite state of the currently selected
965 # item. Specifically:
966 # [ ] startitem
967 # [x] middleitem
968 # [ ] enditem <-- currently selected
969 # This will turn all three on, since the currently selected item is off.
970 # This does *not* invert each item (i.e. middleitem stays marked/on)
971 desiredstate = not self.currentselecteditem.applied
972 while nextitem != enditem.nextitem():
973 if nextitem.applied != desiredstate:
974 self.toggleapply(item=nextitem)
975 nextitem = nextitem.nextitem()
976
934 def togglefolded(self, item=None, foldparent=False):
977 def togglefolded(self, item=None, foldparent=False):
935 "toggle folded flag of specified item (defaults to currently selected)"
978 "toggle folded flag of specified item (defaults to currently selected)"
936 if item is None:
979 if item is None:
@@ -1460,9 +1503,10 b' changes, the unselected changes are stil'
1460 can use crecord multiple times to split large changes into smaller changesets.
1503 can use crecord multiple times to split large changes into smaller changesets.
1461 the following are valid keystrokes:
1504 the following are valid keystrokes:
1462
1505
1463 [space] : (un-)select item ([~]/[x] = partly/fully applied)
1506 x [space] : (un-)select item ([~]/[x] = partly/fully applied)
1464 [enter] : (un-)select item and go to next item of same type
1507 [enter] : (un-)select item and go to next item of same type
1465 A : (un-)select all items
1508 A : (un-)select all items
1509 X : (un-)select all items between current and most-recent
1466 up/down-arrow [k/j] : go to previous/next unfolded item
1510 up/down-arrow [k/j] : go to previous/next unfolded item
1467 pgup/pgdn [K/J] : go to previous/next item of same type
1511 pgup/pgdn [K/J] : go to previous/next item of same type
1468 right/left-arrow [l/h] : go to child item / parent item
1512 right/left-arrow [l/h] : go to child item / parent item
@@ -1724,7 +1768,7 b' are you sure you want to review/edit and'
1724 keypressed = pycompat.bytestr(keypressed)
1768 keypressed = pycompat.bytestr(keypressed)
1725 if keypressed in ["k", "KEY_UP"]:
1769 if keypressed in ["k", "KEY_UP"]:
1726 self.uparrowevent()
1770 self.uparrowevent()
1727 if keypressed in ["K", "KEY_PPAGE"]:
1771 elif keypressed in ["K", "KEY_PPAGE"]:
1728 self.uparrowshiftevent()
1772 self.uparrowshiftevent()
1729 elif keypressed in ["j", "KEY_DOWN"]:
1773 elif keypressed in ["j", "KEY_DOWN"]:
1730 self.downarrowevent()
1774 self.downarrowevent()
@@ -1742,8 +1786,6 b' are you sure you want to review/edit and'
1742 self.toggleamend(self.opts, test)
1786 self.toggleamend(self.opts, test)
1743 elif keypressed in ["c"]:
1787 elif keypressed in ["c"]:
1744 return True
1788 return True
1745 elif test and keypressed in ['X']:
1746 return True
1747 elif keypressed in ["r"]:
1789 elif keypressed in ["r"]:
1748 if self.reviewcommit():
1790 if self.reviewcommit():
1749 self.opts['review'] = True
1791 self.opts['review'] = True
@@ -1751,11 +1793,13 b' are you sure you want to review/edit and'
1751 elif test and keypressed in ['R']:
1793 elif test and keypressed in ['R']:
1752 self.opts['review'] = True
1794 self.opts['review'] = True
1753 return True
1795 return True
1754 elif keypressed in [' '] or (test and keypressed in ["TOGGLE"]):
1796 elif keypressed in [' ', 'x']:
1755 self.toggleapply()
1797 self.toggleapply()
1756 elif keypressed in ['\n', 'KEY_ENTER']:
1798 elif keypressed in ['\n', 'KEY_ENTER']:
1757 self.toggleapply()
1799 self.toggleapply()
1758 self.nextsametype()
1800 self.nextsametype(test=test)
1801 elif keypressed in ['X']:
1802 self.toggleallbetween()
1759 elif keypressed in ['A']:
1803 elif keypressed in ['A']:
1760 self.toggleall()
1804 self.toggleall()
1761 elif keypressed in ['e']:
1805 elif keypressed in ['e']:
@@ -259,13 +259,10 b' def descendantrevs(revs, revsfn, parentr'
259 yield rev
259 yield rev
260 break
260 break
261
261
262 def _reachablerootspure(repo, minroot, roots, heads, includepath):
262 def _reachablerootspure(pfunc, minroot, roots, heads, includepath):
263 """return (heads(::<roots> and ::<heads>))
263 """See revlog.reachableroots"""
264
265 If includepath is True, return (<roots>::<heads>)."""
266 if not roots:
264 if not roots:
267 return []
265 return []
268 parentrevs = repo.changelog.parentrevs
269 roots = set(roots)
266 roots = set(roots)
270 visit = list(heads)
267 visit = list(heads)
271 reachable = set()
268 reachable = set()
@@ -282,7 +279,7 b' def _reachablerootspure(repo, minroot, r'
282 reached(rev)
279 reached(rev)
283 if not includepath:
280 if not includepath:
284 continue
281 continue
285 parents = parentrevs(rev)
282 parents = pfunc(rev)
286 seen[rev] = parents
283 seen[rev] = parents
287 for parent in parents:
284 for parent in parents:
288 if parent >= minroot and parent not in seen:
285 if parent >= minroot and parent not in seen:
@@ -298,18 +295,13 b' def _reachablerootspure(repo, minroot, r'
298 return reachable
295 return reachable
299
296
300 def reachableroots(repo, roots, heads, includepath=False):
297 def reachableroots(repo, roots, heads, includepath=False):
301 """return (heads(::<roots> and ::<heads>))
298 """See revlog.reachableroots"""
302
303 If includepath is True, return (<roots>::<heads>)."""
304 if not roots:
299 if not roots:
305 return baseset()
300 return baseset()
306 minroot = roots.min()
301 minroot = roots.min()
307 roots = list(roots)
302 roots = list(roots)
308 heads = list(heads)
303 heads = list(heads)
309 try:
304 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
310 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
311 except AttributeError:
312 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
313 revs = baseset(revs)
305 revs = baseset(revs)
314 revs.sort()
306 revs.sort()
315 return revs
307 return revs
@@ -1240,7 +1240,7 b' def debuginstall(ui, **opts):'
1240
1240
1241 # Python
1241 # Python
1242 fm.write('pythonexe', _("checking Python executable (%s)\n"),
1242 fm.write('pythonexe', _("checking Python executable (%s)\n"),
1243 pycompat.sysexecutable)
1243 pycompat.sysexecutable or _("unknown"))
1244 fm.write('pythonver', _("checking Python version (%s)\n"),
1244 fm.write('pythonver', _("checking Python version (%s)\n"),
1245 ("%d.%d.%d" % sys.version_info[:3]))
1245 ("%d.%d.%d" % sys.version_info[:3]))
1246 fm.write('pythonlib', _("checking Python lib (%s)...\n"),
1246 fm.write('pythonlib', _("checking Python lib (%s)...\n"),
@@ -1278,16 +1278,28 b' def debuginstall(ui, **opts):'
1278 fm.write('hgmodules', _("checking installed modules (%s)...\n"),
1278 fm.write('hgmodules', _("checking installed modules (%s)...\n"),
1279 os.path.dirname(pycompat.fsencode(__file__)))
1279 os.path.dirname(pycompat.fsencode(__file__)))
1280
1280
1281 if policy.policy in ('c', 'allow'):
1281 rustandc = policy.policy in ('rust+c', 'rust+c-allow')
1282 rustext = rustandc # for now, that's the only case
1283 cext = policy.policy in ('c', 'allow') or rustandc
1284 nopure = cext or rustext
1285 if nopure:
1282 err = None
1286 err = None
1283 try:
1287 try:
1284 from .cext import (
1288 if cext:
1285 base85,
1289 from .cext import (
1286 bdiff,
1290 base85,
1287 mpatch,
1291 bdiff,
1288 osutil,
1292 mpatch,
1289 )
1293 osutil,
1290 dir(bdiff), dir(mpatch), dir(base85), dir(osutil) # quiet pyflakes
1294 )
1295 # quiet pyflakes
1296 dir(bdiff), dir(mpatch), dir(base85), dir(osutil)
1297 if rustext:
1298 from .rustext import (
1299 ancestor,
1300 dirstate,
1301 )
1302 dir(ancestor), dir(dirstate) # quiet pyflakes
1291 except Exception as inst:
1303 except Exception as inst:
1292 err = stringutil.forcebytestr(inst)
1304 err = stringutil.forcebytestr(inst)
1293 problems += 1
1305 problems += 1
@@ -28,6 +28,7 b' from . import ('
28 )
28 )
29
29
30 parsers = policy.importmod(r'parsers')
30 parsers = policy.importmod(r'parsers')
31 dirstatemod = policy.importrust(r'dirstate', default=parsers)
31
32
32 propertycache = util.propertycache
33 propertycache = util.propertycache
33 filecache = scmutil.filecache
34 filecache = scmutil.filecache
@@ -390,12 +391,24 b' class dirstate(object):'
390 self._updatedfiles.add(f)
391 self._updatedfiles.add(f)
391 self._map.addfile(f, oldstate, state, mode, size, mtime)
392 self._map.addfile(f, oldstate, state, mode, size, mtime)
392
393
393 def normal(self, f):
394 def normal(self, f, parentfiledata=None):
394 '''Mark a file normal and clean.'''
395 '''Mark a file normal and clean.
395 s = os.lstat(self._join(f))
396
396 mtime = s[stat.ST_MTIME]
397 parentfiledata: (mode, size, mtime) of the clean file
397 self._addpath(f, 'n', s.st_mode,
398
398 s.st_size & _rangemask, mtime & _rangemask)
399 parentfiledata should be computed from memory (for mode,
400 size), as or close as possible from the point where we
401 determined the file was clean, to limit the risk of the
402 file having been changed by an external process between the
403 moment where the file was determined to be clean and now.'''
404 if parentfiledata:
405 (mode, size, mtime) = parentfiledata
406 else:
407 s = os.lstat(self._join(f))
408 mode = s.st_mode
409 size = s.st_size
410 mtime = s[stat.ST_MTIME]
411 self._addpath(f, 'n', mode, size & _rangemask, mtime & _rangemask)
399 self._map.copymap.pop(f, None)
412 self._map.copymap.pop(f, None)
400 if f in self._map.nonnormalset:
413 if f in self._map.nonnormalset:
401 self._map.nonnormalset.remove(f)
414 self._map.nonnormalset.remove(f)
@@ -656,8 +669,6 b' class dirstate(object):'
656 self._dirty = False
669 self._dirty = False
657
670
658 def _dirignore(self, f):
671 def _dirignore(self, f):
659 if f == '.':
660 return False
661 if self._ignore(f):
672 if self._ignore(f):
662 return True
673 return True
663 for p in util.finddirs(f):
674 for p in util.finddirs(f):
@@ -751,15 +762,16 b' class dirstate(object):'
751 del files[i]
762 del files[i]
752 j += 1
763 j += 1
753
764
754 if not files or '.' in files:
765 if not files or '' in files:
755 files = ['.']
766 files = ['']
767 # constructing the foldmap is expensive, so don't do it for the
768 # common case where files is ['']
769 normalize = None
756 results = dict.fromkeys(subrepos)
770 results = dict.fromkeys(subrepos)
757 results['.hg'] = None
771 results['.hg'] = None
758
772
759 for ff in files:
773 for ff in files:
760 # constructing the foldmap is expensive, so don't do it for the
774 if normalize:
761 # common case where files is ['.']
762 if normalize and ff != '.':
763 nf = normalize(ff, False, True)
775 nf = normalize(ff, False, True)
764 else:
776 else:
765 nf = ff
777 nf = ff
@@ -903,9 +915,7 b' class dirstate(object):'
903 if visitentries == 'this' or visitentries == 'all':
915 if visitentries == 'this' or visitentries == 'all':
904 visitentries = None
916 visitentries = None
905 skip = None
917 skip = None
906 if nd == '.':
918 if nd != '':
907 nd = ''
908 else:
909 skip = '.hg'
919 skip = '.hg'
910 try:
920 try:
911 entries = listdir(join(nd), stat=True, skip=skip)
921 entries = listdir(join(nd), stat=True, skip=skip)
@@ -1465,7 +1475,7 b' class dirstatemap(object):'
1465 # parsing the dirstate.
1475 # parsing the dirstate.
1466 #
1476 #
1467 # (we cannot decorate the function directly since it is in a C module)
1477 # (we cannot decorate the function directly since it is in a C module)
1468 parse_dirstate = util.nogc(parsers.parse_dirstate)
1478 parse_dirstate = util.nogc(dirstatemod.parse_dirstate)
1469 p = parse_dirstate(self._map, self.copymap, st)
1479 p = parse_dirstate(self._map, self.copymap, st)
1470 if not self._dirtyparents:
1480 if not self._dirtyparents:
1471 self.setparents(*p)
1481 self.setparents(*p)
@@ -1476,8 +1486,8 b' class dirstatemap(object):'
1476 self.get = self._map.get
1486 self.get = self._map.get
1477
1487
1478 def write(self, st, now):
1488 def write(self, st, now):
1479 st.write(parsers.pack_dirstate(self._map, self.copymap,
1489 st.write(dirstatemod.pack_dirstate(self._map, self.copymap,
1480 self.parents(), now))
1490 self.parents(), now))
1481 st.close()
1491 st.close()
1482 self._dirtyparents = False
1492 self._dirtyparents = False
1483 self.nonnormalset, self.otherparentset = self.nonnormalentries()
1493 self.nonnormalset, self.otherparentset = self.nonnormalentries()
@@ -343,10 +343,19 b' def checkheads(pushop):'
343 # 1. Check for new branches on the remote.
343 # 1. Check for new branches on the remote.
344 if newbranches and not newbranch: # new branch requires --new-branch
344 if newbranches and not newbranch: # new branch requires --new-branch
345 branchnames = ', '.join(sorted(newbranches))
345 branchnames = ', '.join(sorted(newbranches))
346 raise error.Abort(_("push creates new remote branches: %s!")
346 # Calculate how many of the new branches are closed branches
347 % branchnames,
347 closedbranches = set()
348 hint=_("use 'hg push --new-branch' to create"
348 for tag, heads, tip, isclosed in repo.branchmap().iterbranches():
349 " new remote branches"))
349 if isclosed:
350 closedbranches.add(tag)
351 closedbranches = (closedbranches & set(newbranches))
352 if closedbranches:
353 errmsg = (_("push creates new remote branches: %s (%d closed)!")
354 % (branchnames, len(closedbranches)))
355 else:
356 errmsg = (_("push creates new remote branches: %s!")% branchnames)
357 hint=_("use 'hg push --new-branch' to create new remote branches")
358 raise error.Abort(errmsg, hint=hint)
350
359
351 # 2. Find heads that we need not warn about
360 # 2. Find heads that we need not warn about
352 nowarnheads = _nowarnheads(pushop)
361 nowarnheads = _nowarnheads(pushop)
@@ -539,10 +539,12 b' def push(repo, remote, force=False, revs'
539 # get lock as we might write phase data
539 # get lock as we might write phase data
540 wlock = lock = None
540 wlock = lock = None
541 try:
541 try:
542 # bundle2 push may receive a reply bundle touching bookmarks or other
542 # bundle2 push may receive a reply bundle touching bookmarks
543 # things requiring the wlock. Take it now to ensure proper ordering.
543 # requiring the wlock. Take it now to ensure proper ordering.
544 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
544 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
545 if (not _forcebundle1(pushop)) and maypushback:
545 if ((not _forcebundle1(pushop)) and
546 maypushback and
547 not bookmod.bookmarksinstore(repo)):
546 wlock = pushop.repo.wlock()
548 wlock = pushop.repo.wlock()
547 lock = pushop.repo.lock()
549 lock = pushop.repo.lock()
548 pushop.trmanager = transactionmanager(pushop.repo,
550 pushop.trmanager = transactionmanager(pushop.repo,
@@ -1548,7 +1550,10 b' def pull(repo, remote, heads=None, force'
1548 raise error.Abort(msg)
1550 raise error.Abort(msg)
1549
1551
1550 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1552 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1551 with repo.wlock(), repo.lock(), pullop.trmanager:
1553 wlock = util.nullcontextmanager()
1554 if not bookmod.bookmarksinstore(repo):
1555 wlock = repo.wlock()
1556 with wlock, repo.lock(), pullop.trmanager:
1552 # Use the modern wire protocol, if available.
1557 # Use the modern wire protocol, if available.
1553 if remote.capable('command-changesetdata'):
1558 if remote.capable('command-changesetdata'):
1554 exchangev2.pull(pullop)
1559 exchangev2.pull(pullop)
@@ -2395,7 +2400,8 b' def unbundle(repo, cg, heads, source, ur'
2395 try:
2400 try:
2396 def gettransaction():
2401 def gettransaction():
2397 if not lockandtr[2]:
2402 if not lockandtr[2]:
2398 lockandtr[0] = repo.wlock()
2403 if not bookmod.bookmarksinstore(repo):
2404 lockandtr[0] = repo.wlock()
2399 lockandtr[1] = repo.lock()
2405 lockandtr[1] = repo.lock()
2400 lockandtr[2] = repo.transaction(source)
2406 lockandtr[2] = repo.transaction(source)
2401 lockandtr[2].hookargs['source'] = source
2407 lockandtr[2].hookargs['source'] = source
@@ -43,7 +43,8 b' from .utils import ('
43 'progress',
43 'progress',
44 'interhg',
44 'interhg',
45 'inotify',
45 'inotify',
46 'hgcia'
46 'hgcia',
47 'shelve',
47 }
48 }
48
49
49 def extensions(ui=None):
50 def extensions(ui=None):
@@ -221,14 +222,7 b' def _runextsetup(name, ui):'
221 extsetup = getattr(_extensions[name], 'extsetup', None)
222 extsetup = getattr(_extensions[name], 'extsetup', None)
222 if extsetup:
223 if extsetup:
223 try:
224 try:
224 try:
225 extsetup(ui)
225 extsetup(ui)
226 except TypeError:
227 if pycompat.getargspec(extsetup).args:
228 raise
229 ui.deprecwarn("extsetup for '%s' must take a ui argument"
230 % name, "4.9")
231 extsetup() # old extsetup with no ui argument
232 except Exception as inst:
226 except Exception as inst:
233 ui.traceback(force=True)
227 ui.traceback(force=True)
234 msg = stringutil.forcebytestr(inst)
228 msg = stringutil.forcebytestr(inst)
@@ -15,9 +15,12 b' from . import ('
15 commands,
15 commands,
16 error,
16 error,
17 extensions,
17 extensions,
18 pycompat,
18 registrar,
19 registrar,
19 )
20 )
20
21
22 from hgdemandimport import tracing
23
21 class exthelper(object):
24 class exthelper(object):
22 """Helper for modular extension setup
25 """Helper for modular extension setup
23
26
@@ -135,7 +138,8 b' class exthelper(object):'
135 for cont, funcname, wrapper in self._functionwrappers:
138 for cont, funcname, wrapper in self._functionwrappers:
136 extensions.wrapfunction(cont, funcname, wrapper)
139 extensions.wrapfunction(cont, funcname, wrapper)
137 for c in self._uicallables:
140 for c in self._uicallables:
138 c(ui)
141 with tracing.log(b'finaluisetup: %s', pycompat.sysbytes(repr(c))):
142 c(ui)
139
143
140 def finaluipopulate(self, ui):
144 def finaluipopulate(self, ui):
141 """Method to be used as the extension uipopulate
145 """Method to be used as the extension uipopulate
@@ -175,7 +179,8 b' class exthelper(object):'
175 entry[1].append(opt)
179 entry[1].append(opt)
176
180
177 for c in self._extcallables:
181 for c in self._extcallables:
178 c(ui)
182 with tracing.log(b'finalextsetup: %s', pycompat.sysbytes(repr(c))):
183 c(ui)
179
184
180 def finalreposetup(self, ui, repo):
185 def finalreposetup(self, ui, repo):
181 """Method to be used as the extension reposetup
186 """Method to be used as the extension reposetup
@@ -187,7 +192,8 b' class exthelper(object):'
187 - Changes to repo.__class__, repo.dirstate.__class__
192 - Changes to repo.__class__, repo.dirstate.__class__
188 """
193 """
189 for c in self._repocallables:
194 for c in self._repocallables:
190 c(ui, repo)
195 with tracing.log(b'finalreposetup: %s', pycompat.sysbytes(repr(c))):
196 c(ui, repo)
191
197
192 def uisetup(self, call):
198 def uisetup(self, call):
193 """Decorated function will be executed during uisetup
199 """Decorated function will be executed during uisetup
@@ -60,17 +60,20 b' nomerge = internaltool.nomerge'
60 mergeonly = internaltool.mergeonly # just the full merge, no premerge
60 mergeonly = internaltool.mergeonly # just the full merge, no premerge
61 fullmerge = internaltool.fullmerge # both premerge and merge
61 fullmerge = internaltool.fullmerge # both premerge and merge
62
62
63 # IMPORTANT: keep the last line of this prompt very short ("What do you want to
64 # do?") because of issue6158, ideally to <40 English characters (to allow other
65 # languages that may take more columns to still have a chance to fit in an
66 # 80-column screen).
63 _localchangedotherdeletedmsg = _(
67 _localchangedotherdeletedmsg = _(
64 "file '%(fd)s' was deleted in other%(o)s but was modified in local%(l)s.\n"
68 "file '%(fd)s' was deleted in other%(o)s but was modified in local%(l)s.\n"
65 "What do you want to do?\n"
69 "You can use (c)hanged version, (d)elete, or leave (u)nresolved.\n"
66 "use (c)hanged version, (d)elete, or leave (u)nresolved?"
70 "What do you want to do?"
67 "$$ &Changed $$ &Delete $$ &Unresolved")
71 "$$ &Changed $$ &Delete $$ &Unresolved")
68
72
69 _otherchangedlocaldeletedmsg = _(
73 _otherchangedlocaldeletedmsg = _(
70 "file '%(fd)s' was deleted in local%(l)s but was modified in other%(o)s.\n"
74 "file '%(fd)s' was deleted in local%(l)s but was modified in other%(o)s.\n"
71 "What do you want to do?\n"
75 "You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved.\n"
72 "use (c)hanged version, leave (d)eleted, or "
76 "What do you want to do?"
73 "leave (u)nresolved?"
74 "$$ &Changed $$ &Deleted $$ &Unresolved")
77 "$$ &Changed $$ &Deleted $$ &Unresolved")
75
78
76 class absentfilectx(object):
79 class absentfilectx(object):
@@ -299,9 +302,14 b' def _iprompt(repo, mynode, orig, fcd, fc'
299 _otherchangedlocaldeletedmsg % prompts, 2)
302 _otherchangedlocaldeletedmsg % prompts, 2)
300 choice = ['other', 'local', 'unresolved'][index]
303 choice = ['other', 'local', 'unresolved'][index]
301 else:
304 else:
305 # IMPORTANT: keep the last line of this prompt ("What do you want to
306 # do?") very short, see comment next to _localchangedotherdeletedmsg
307 # at the top of the file for details.
302 index = ui.promptchoice(
308 index = ui.promptchoice(
303 _("keep (l)ocal%(l)s, take (o)ther%(o)s, or leave (u)nresolved"
309 _("file '%(fd)s' needs to be resolved.\n"
304 " for %(fd)s?"
310 "You can keep (l)ocal%(l)s, take (o)ther%(o)s, or leave "
311 "(u)nresolved.\n"
312 "What do you want to do?"
305 "$$ &Local $$ &Other $$ &Unresolved") % prompts, 2)
313 "$$ &Local $$ &Other $$ &Unresolved") % prompts, 2)
306 choice = ['local', 'other', 'unresolved'][index]
314 choice = ['local', 'other', 'unresolved'][index]
307
315
@@ -469,22 +469,6 b' def ascii(ui, state, type, char, text, c'
469 while len(text) < len(lines):
469 while len(text) < len(lines):
470 text.append("")
470 text.append("")
471
471
472 if any(len(char) > 1 for char in edgemap.values()):
473 # limit drawing an edge to the first or last N lines of the current
474 # section the rest of the edge is drawn like a parent line.
475 parent = state['styles'][PARENT][-1:]
476 def _drawgp(char, i):
477 # should a grandparent character be drawn for this line?
478 if len(char) < 2:
479 return True
480 num = int(char[:-1])
481 # either skip first num lines or take last num lines, based on sign
482 return -num <= i if num < 0 else (len(lines) - i) <= num
483 for i, line in enumerate(lines):
484 line[:] = [c[-1:] if _drawgp(c, i) else parent for c in line]
485 edgemap.update(
486 (e, (c if len(c) < 2 else parent)) for e, c in edgemap.items())
487
488 # print lines
472 # print lines
489 indentation_level = max(ncols, ncols + coldiff)
473 indentation_level = max(ncols, ncols + coldiff)
490 lines = ["%-*s " % (2 * indentation_level, "".join(line)) for line in lines]
474 lines = ["%-*s " % (2 * indentation_level, "".join(line)) for line in lines]
@@ -32,6 +32,7 b' def bisect(repo, state):'
32 if searching for a first bad one.
32 if searching for a first bad one.
33 """
33 """
34
34
35 repo = repo.unfiltered()
35 changelog = repo.changelog
36 changelog = repo.changelog
36 clparents = changelog.parentrevs
37 clparents = changelog.parentrevs
37 skip = {changelog.rev(n) for n in state['skip']}
38 skip = {changelog.rev(n) for n in state['skip']}
@@ -139,7 +140,7 b' def load_state(repo):'
139 state = {'current': [], 'good': [], 'bad': [], 'skip': []}
140 state = {'current': [], 'good': [], 'bad': [], 'skip': []}
140 for l in repo.vfs.tryreadlines("bisect.state"):
141 for l in repo.vfs.tryreadlines("bisect.state"):
141 kind, node = l[:-1].split()
142 kind, node = l[:-1].split()
142 node = repo.lookup(node)
143 node = repo.unfiltered().lookup(node)
143 if kind not in state:
144 if kind not in state:
144 raise error.Abort(_("unknown bisect kind %s") % kind)
145 raise error.Abort(_("unknown bisect kind %s") % kind)
145 state[kind].append(node)
146 state[kind].append(node)
@@ -184,7 +185,7 b' def get(repo, status):'
184 """
185 """
185 state = load_state(repo)
186 state = load_state(repo)
186 if status in ('good', 'bad', 'skip', 'current'):
187 if status in ('good', 'bad', 'skip', 'current'):
187 return map(repo.changelog.rev, state[status])
188 return map(repo.unfiltered().changelog.rev, state[status])
188 else:
189 else:
189 # In the following sets, we do *not* call 'bisect()' with more
190 # In the following sets, we do *not* call 'bisect()' with more
190 # than one level of recursion, because that can be very, very
191 # than one level of recursion, because that can be very, very
@@ -268,6 +269,7 b' def label(repo, node):'
268 return None
269 return None
269
270
270 def printresult(ui, repo, state, displayer, nodes, good):
271 def printresult(ui, repo, state, displayer, nodes, good):
272 repo = repo.unfiltered()
271 if len(nodes) == 1:
273 if len(nodes) == 1:
272 # narrowed it down to a single revision
274 # narrowed it down to a single revision
273 if good:
275 if good:
@@ -320,6 +320,8 b' internalstable = sorted(['
320 loaddoc('config', subdir='internals')),
320 loaddoc('config', subdir='internals')),
321 (['extensions', 'extension'], _('Extension API'),
321 (['extensions', 'extension'], _('Extension API'),
322 loaddoc('extensions', subdir='internals')),
322 loaddoc('extensions', subdir='internals')),
323 (['mergestate'], _('Mergestate'),
324 loaddoc('mergestate', subdir='internals')),
323 (['requirements'], _('Repository Requirements'),
325 (['requirements'], _('Repository Requirements'),
324 loaddoc('requirements', subdir='internals')),
326 loaddoc('requirements', subdir='internals')),
325 (['revlogs'], _('Revision Logs'),
327 (['revlogs'], _('Revision Logs'),
@@ -453,7 +455,7 b' def inserttweakrc(ui, topic, doc):'
453 addtopichook('config', inserttweakrc)
455 addtopichook('config', inserttweakrc)
454
456
455 def help_(ui, commands, name, unknowncmd=False, full=True, subtopic=None,
457 def help_(ui, commands, name, unknowncmd=False, full=True, subtopic=None,
456 **opts):
458 fullname=None, **opts):
457 '''
459 '''
458 Generate the help for 'name' as unformatted restructured text. If
460 Generate the help for 'name' as unformatted restructured text. If
459 'name' is None, describe the commands available.
461 'name' is None, describe the commands available.
@@ -689,6 +691,8 b' def help_(ui, commands, name, unknowncmd'
689 for names, header, doc in subtopics[name]:
691 for names, header, doc in subtopics[name]:
690 if subtopic in names:
692 if subtopic in names:
691 break
693 break
694 if not any(subtopic in s[0] for s in subtopics[name]):
695 raise error.UnknownCommand(name)
692
696
693 if not header:
697 if not header:
694 for topic in helptable:
698 for topic in helptable:
@@ -812,8 +816,16 b' def help_(ui, commands, name, unknowncmd'
812 if unknowncmd:
816 if unknowncmd:
813 raise error.UnknownCommand(name)
817 raise error.UnknownCommand(name)
814 else:
818 else:
815 msg = _('no such help topic: %s') % name
819 if fullname:
816 hint = _("try 'hg help --keyword %s'") % name
820 formatname = fullname
821 else:
822 formatname = name
823 if subtopic:
824 hintname = subtopic
825 else:
826 hintname = name
827 msg = _('no such help topic: %s') % formatname
828 hint = _("try 'hg help --keyword %s'") % hintname
817 raise error.Abort(msg, hint=hint)
829 raise error.Abort(msg, hint=hint)
818 else:
830 else:
819 # program name
831 # program name
@@ -848,7 +860,7 b' def formattedhelp(ui, commands, fullname'
848 termwidth = ui.termwidth() - 2
860 termwidth = ui.termwidth() - 2
849 if textwidth <= 0 or termwidth < textwidth:
861 if textwidth <= 0 or termwidth < textwidth:
850 textwidth = termwidth
862 textwidth = termwidth
851 text = help_(ui, commands, name,
863 text = help_(ui, commands, name, fullname=fullname,
852 subtopic=subtopic, unknowncmd=unknowncmd, full=full, **opts)
864 subtopic=subtopic, unknowncmd=unknowncmd, full=full, **opts)
853
865
854 blocks, pruned = minirst.parse(text, keep=keep)
866 blocks, pruned = minirst.parse(text, keep=keep)
@@ -438,6 +438,10 b' effect and style see :hg:`help color`.'
438 ``commands``
438 ``commands``
439 ------------
439 ------------
440
440
441 ``commit.post-status``
442 Show status of files in the working directory after successful commit.
443 (default: False)
444
441 ``resolve.confirm``
445 ``resolve.confirm``
442 Confirm before performing action if no filename is passed.
446 Confirm before performing action if no filename is passed.
443 (default: False)
447 (default: False)
@@ -875,6 +879,15 b' https://www.mercurial-scm.org/wiki/Missi'
875
879
876 On some system, Mercurial installation may lack `zstd` supports. Default is `zlib`.
880 On some system, Mercurial installation may lack `zstd` supports. Default is `zlib`.
877
881
882 ``bookmarks-in-store``
883 Store bookmarks in .hg/store/. This means that bookmarks are shared when
884 using `hg share` regardless of the `-B` option.
885
886 Repositories with this on-disk format require Mercurial version 5.1.
887
888 Disabled by default.
889
890
878 ``graph``
891 ``graph``
879 ---------
892 ---------
880
893
@@ -1767,6 +1780,11 b' statistical text report generated from t'
1767
1780
1768 The option is unused on other formats.
1781 The option is unused on other formats.
1769
1782
1783 ``showtime``
1784 Show time taken as absolute durations, in addition to percentages.
1785 Only used by the ``hotpath`` format.
1786 (default: true)
1787
1770 ``progress``
1788 ``progress``
1771 ------------
1789 ------------
1772
1790
@@ -129,3 +129,16 b' August 2017). This requirement and featu'
129 disappear in a future Mercurial release. The requirement will only
129 disappear in a future Mercurial release. The requirement will only
130 be present on repositories that have opted in to a sparse working
130 be present on repositories that have opted in to a sparse working
131 directory.
131 directory.
132
133 bookmarksinstore
134 ==================
135
136 Bookmarks are stored in ``.hg/store/`` instead of directly in ``.hg/``
137 where they used to be stored. The active bookmark is still stored
138 directly in ``.hg/``. This makes them always shared by ``hg share``,
139 whether or not ``-B`` was passed.
140
141 Support for this requirement was added in Mercurial 5.1 (released
142 August 2019). The requirement will only be present on repositories
143 that have opted in to this format (by having
144 ``format.bookmarks-in-store=true`` set when they were created).
@@ -28,8 +28,8 b' File Format'
28 ===========
28 ===========
29
29
30 A revlog begins with a 32-bit big endian integer holding version info
30 A revlog begins with a 32-bit big endian integer holding version info
31 and feature flags. This integer is shared with the first revision
31 and feature flags. This integer overlaps with the first four bytes of
32 entry.
32 the first revision entry.
33
33
34 This integer is logically divided into 2 16-bit shorts. The least
34 This integer is logically divided into 2 16-bit shorts. The least
35 significant half of the integer is the format/version short. The other
35 significant half of the integer is the format/version short. The other
@@ -78,10 +78,10 b' 00 02 00 01'
78 00 03 00 01
78 00 03 00 01
79 v1 + inline + generaldelta
79 v1 + inline + generaldelta
80
80
81 Following the 32-bit header is the remainder of the first index entry.
81 Following the 32-bit header is the remaining 60 bytes of the first index
82 Following that are remaining *index* data. Inlined revision data is
82 entry. Following that are additional *index* entries. Inlined revision
83 possibly located between index entries. More on this layout is described
83 data is possibly located between index entries. More on this inlined
84 below.
84 layout is described below.
85
85
86 Version 1 Format
86 Version 1 Format
87 ================
87 ================
@@ -149,8 +149,12 b' If revision data is not inline, then raw'
149 separate byte container. The offsets from bytes 0-5 and the compressed
149 separate byte container. The offsets from bytes 0-5 and the compressed
150 length from bytes 8-11 define how to access this data.
150 length from bytes 8-11 define how to access this data.
151
151
152 The first 4 bytes of the revlog are shared between the revlog header
152 The 6 byte absolute offset field from the first revlog entry overlaps
153 and the 6 byte absolute offset field from the first revlog entry.
153 with the revlog header. That is, the first 6 bytes of the first revlog
154 entry can be split into four bytes containing the header for the revlog
155 file and an additional two bytes containing the offset for the first
156 entry. Since this is the offset from the beginning of the file for the
157 first revision entry, the two bytes will always be set to zero.
154
158
155 Version 2 Format
159 Version 2 Format
156 ================
160 ================
@@ -956,31 +956,34 b' def merge(repo, node, force=None, remind'
956 abort=False):
956 abort=False):
957 """Branch merge with node, resolving changes. Return true if any
957 """Branch merge with node, resolving changes. Return true if any
958 unresolved conflicts."""
958 unresolved conflicts."""
959 if not abort:
959 if abort:
960 stats = mergemod.update(repo, node, branchmerge=True, force=force,
960 return abortmerge(repo.ui, repo)
961 mergeforce=mergeforce, labels=labels)
962 else:
963 ms = mergemod.mergestate.read(repo)
964 if ms.active():
965 # there were conflicts
966 node = ms.localctx.hex()
967 else:
968 # there were no conficts, mergestate was not stored
969 node = repo['.'].hex()
970
961
971 repo.ui.status(_("aborting the merge, updating back to"
962 stats = mergemod.update(repo, node, branchmerge=True, force=force,
972 " %s\n") % node[:12])
963 mergeforce=mergeforce, labels=labels)
973 stats = mergemod.update(repo, node, branchmerge=False, force=True,
974 labels=labels)
975
976 _showstats(repo, stats)
964 _showstats(repo, stats)
977 if stats.unresolvedcount:
965 if stats.unresolvedcount:
978 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
966 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
979 "or 'hg merge --abort' to abandon\n"))
967 "or 'hg merge --abort' to abandon\n"))
980 elif remind and not abort:
968 elif remind:
981 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
969 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
982 return stats.unresolvedcount > 0
970 return stats.unresolvedcount > 0
983
971
972 def abortmerge(ui, repo):
973 ms = mergemod.mergestate.read(repo)
974 if ms.active():
975 # there were conflicts
976 node = ms.localctx.hex()
977 else:
978 # there were no conficts, mergestate was not stored
979 node = repo['.'].hex()
980
981 repo.ui.status(_("aborting the merge, updating back to"
982 " %s\n") % node[:12])
983 stats = mergemod.update(repo, node, branchmerge=False, force=True)
984 _showstats(repo, stats)
985 return stats.unresolvedcount > 0
986
984 def _incoming(displaychlist, subreporecurse, ui, repo, source,
987 def _incoming(displaychlist, subreporecurse, ui, repo, source,
985 opts, buffered=False):
988 opts, buffered=False):
986 """
989 """
@@ -1092,9 +1095,9 b' def outgoing(ui, repo, dest, opts):'
1092 recurse()
1095 recurse()
1093 return 0 # exit code is zero since we found outgoing changes
1096 return 0 # exit code is zero since we found outgoing changes
1094
1097
1095 def verify(repo):
1098 def verify(repo, level=None):
1096 """verify the consistency of a repository"""
1099 """verify the consistency of a repository"""
1097 ret = verifymod.verify(repo)
1100 ret = verifymod.verify(repo, level=level)
1098
1101
1099 # Broken subrepo references in hidden csets don't seem worth worrying about,
1102 # Broken subrepo references in hidden csets don't seem worth worrying about,
1100 # since they can't be pushed/pulled, and --hidden can be used if they are a
1103 # since they can't be pushed/pulled, and --hidden can be used if they are a
@@ -38,6 +38,9 b' def hgweb(config, name=None, baseui=None'
38 - list of virtual:real tuples (multi-repo view)
38 - list of virtual:real tuples (multi-repo view)
39 '''
39 '''
40
40
41 if isinstance(config, pycompat.unicode):
42 raise error.ProgrammingError(
43 'Mercurial only supports encoded strings: %r' % config)
41 if ((isinstance(config, bytes) and not os.path.isdir(config)) or
44 if ((isinstance(config, bytes) and not os.path.isdir(config)) or
42 isinstance(config, dict) or isinstance(config, list)):
45 isinstance(config, dict) or isinstance(config, list)):
43 # create a multi-dir interface
46 # create a multi-dir interface
@@ -414,14 +414,10 b' class hgwebdir(object):'
414 return self.makeindex(req, res, tmpl, subdir)
414 return self.makeindex(req, res, tmpl, subdir)
415
415
416 def _virtualdirs():
416 def _virtualdirs():
417 # Check the full virtual path, each parent, and the root ('')
417 # Check the full virtual path, and each parent
418 if virtual != '':
418 yield virtual
419 yield virtual
419 for p in util.finddirs(virtual):
420
420 yield p
421 for p in util.finddirs(virtual):
422 yield p
423
424 yield ''
425
421
426 for virtualrepo in _virtualdirs():
422 for virtualrepo in _virtualdirs():
427 real = repos.get(virtualrepo)
423 real = repos.get(virtualrepo)
@@ -409,12 +409,6 b' def whyunstable(context, mapping):'
409
409
410 whyunstable._requires = {'repo', 'ctx'}
410 whyunstable._requires = {'repo', 'ctx'}
411
411
412 # helper to mark a function as a new-style template keyword; can be removed
413 # once old-style function gets unsupported and new-style becomes the default
414 def _kwfunc(f):
415 f._requires = ()
416 return f
417
418 def commonentry(repo, ctx):
412 def commonentry(repo, ctx):
419 node = scmutil.binnode(ctx)
413 node = scmutil.binnode(ctx)
420 return {
414 return {
@@ -439,8 +433,8 b' def commonentry(repo, ctx):'
439 'branches': nodebranchdict(repo, ctx),
433 'branches': nodebranchdict(repo, ctx),
440 'tags': nodetagsdict(repo, node),
434 'tags': nodetagsdict(repo, node),
441 'bookmarks': nodebookmarksdict(repo, node),
435 'bookmarks': nodebookmarksdict(repo, node),
442 'parent': _kwfunc(lambda context, mapping: parents(ctx)),
436 'parent': lambda context, mapping: parents(ctx),
443 'child': _kwfunc(lambda context, mapping: children(ctx)),
437 'child': lambda context, mapping: children(ctx),
444 }
438 }
445
439
446 def changelistentry(web, ctx):
440 def changelistentry(web, ctx):
@@ -457,9 +451,9 b' def changelistentry(web, ctx):'
457
451
458 entry = commonentry(repo, ctx)
452 entry = commonentry(repo, ctx)
459 entry.update({
453 entry.update({
460 'allparents': _kwfunc(lambda context, mapping: parents(ctx)),
454 'allparents': lambda context, mapping: parents(ctx),
461 'parent': _kwfunc(lambda context, mapping: parents(ctx, rev - 1)),
455 'parent': lambda context, mapping: parents(ctx, rev - 1),
462 'child': _kwfunc(lambda context, mapping: children(ctx, rev + 1)),
456 'child': lambda context, mapping: children(ctx, rev + 1),
463 'changelogtag': showtags,
457 'changelogtag': showtags,
464 'files': files,
458 'files': files,
465 })
459 })
@@ -529,7 +523,7 b' def changesetentry(web, ctx):'
529 changesetbranch=showbranch,
523 changesetbranch=showbranch,
530 files=templateutil.mappedgenerator(_listfilesgen,
524 files=templateutil.mappedgenerator(_listfilesgen,
531 args=(ctx, web.stripecount)),
525 args=(ctx, web.stripecount)),
532 diffsummary=_kwfunc(lambda context, mapping: diffsummary(diffstatsgen)),
526 diffsummary=lambda context, mapping: diffsummary(diffstatsgen),
533 diffstat=diffstats,
527 diffstat=diffstats,
534 archives=web.archivelist(ctx.hex()),
528 archives=web.archivelist(ctx.hex()),
535 **pycompat.strkwargs(commonentry(web.repo, ctx)))
529 **pycompat.strkwargs(commonentry(web.repo, ctx)))
@@ -382,6 +382,7 b' class httppeer(wireprotov1peer.wirepeer)'
382 self._path = path
382 self._path = path
383 self._url = url
383 self._url = url
384 self._caps = caps
384 self._caps = caps
385 self.limitedarguments = caps is not None and 'httppostargs' not in caps
385 self._urlopener = opener
386 self._urlopener = opener
386 self._requestbuilder = requestbuilder
387 self._requestbuilder = requestbuilder
387
388
@@ -750,6 +751,9 b' class httpv2executor(object):'
750
751
751 @interfaceutil.implementer(repository.ipeerv2)
752 @interfaceutil.implementer(repository.ipeerv2)
752 class httpv2peer(object):
753 class httpv2peer(object):
754
755 limitedarguments = False
756
753 def __init__(self, ui, repourl, apipath, opener, requestbuilder,
757 def __init__(self, ui, repourl, apipath, opener, requestbuilder,
754 apidescriptor):
758 apidescriptor):
755 self.ui = ui
759 self.ui = ui
@@ -128,8 +128,7 b' class mixedrepostorecache(_basefilecache'
128 # scmutil.filecache only uses the path for passing back into our
128 # scmutil.filecache only uses the path for passing back into our
129 # join(), so we can safely pass a list of paths and locations
129 # join(), so we can safely pass a list of paths and locations
130 super(mixedrepostorecache, self).__init__(*pathsandlocations)
130 super(mixedrepostorecache, self).__init__(*pathsandlocations)
131 for path, location in pathsandlocations:
131 _cachedfiles.update(pathsandlocations)
132 _cachedfiles.update(pathsandlocations)
133
132
134 def join(self, obj, fnameandlocation):
133 def join(self, obj, fnameandlocation):
135 fname, location = fnameandlocation
134 fname, location = fnameandlocation
@@ -910,6 +909,7 b' class localrepository(object):'
910 'treemanifest',
909 'treemanifest',
911 REVLOGV2_REQUIREMENT,
910 REVLOGV2_REQUIREMENT,
912 SPARSEREVLOG_REQUIREMENT,
911 SPARSEREVLOG_REQUIREMENT,
912 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
913 }
913 }
914 _basesupported = supportedformats | {
914 _basesupported = supportedformats | {
915 'store',
915 'store',
@@ -1069,6 +1069,8 b' class localrepository(object):'
1069 # Signature to cached matcher instance.
1069 # Signature to cached matcher instance.
1070 self._sparsematchercache = {}
1070 self._sparsematchercache = {}
1071
1071
1072 self._extrafilterid = repoview.extrafilter(ui)
1073
1072 def _getvfsward(self, origfunc):
1074 def _getvfsward(self, origfunc):
1073 """build a ward for self.vfs"""
1075 """build a ward for self.vfs"""
1074 rref = weakref.ref(self)
1076 rref = weakref.ref(self)
@@ -1216,11 +1218,14 b' class localrepository(object):'
1216
1218
1217 In other word, there is always only one level of `repoview` "filtering".
1219 In other word, there is always only one level of `repoview` "filtering".
1218 """
1220 """
1221 if self._extrafilterid is not None and '%' not in name:
1222 name = name + '%' + self._extrafilterid
1223
1219 cls = repoview.newtype(self.unfiltered().__class__)
1224 cls = repoview.newtype(self.unfiltered().__class__)
1220 return cls(self, name, visibilityexceptions)
1225 return cls(self, name, visibilityexceptions)
1221
1226
1222 @mixedrepostorecache(('bookmarks', 'plain'), ('bookmarks.current', 'plain'),
1227 @mixedrepostorecache(('bookmarks', 'plain'), ('bookmarks.current', 'plain'),
1223 ('00changelog.i', ''))
1228 ('bookmarks', ''), ('00changelog.i', ''))
1224 def _bookmarks(self):
1229 def _bookmarks(self):
1225 return bookmarks.bmstore(self)
1230 return bookmarks.bmstore(self)
1226
1231
@@ -1982,7 +1987,7 b' class localrepository(object):'
1982 (self.vfs, 'journal.dirstate'),
1987 (self.vfs, 'journal.dirstate'),
1983 (self.vfs, 'journal.branch'),
1988 (self.vfs, 'journal.branch'),
1984 (self.vfs, 'journal.desc'),
1989 (self.vfs, 'journal.desc'),
1985 (self.vfs, 'journal.bookmarks'),
1990 (bookmarks.bookmarksvfs(self), 'journal.bookmarks'),
1986 (self.svfs, 'journal.phaseroots'))
1991 (self.svfs, 'journal.phaseroots'))
1987
1992
1988 def undofiles(self):
1993 def undofiles(self):
@@ -1997,8 +2002,9 b' class localrepository(object):'
1997 encoding.fromlocal(self.dirstate.branch()))
2002 encoding.fromlocal(self.dirstate.branch()))
1998 self.vfs.write("journal.desc",
2003 self.vfs.write("journal.desc",
1999 "%d\n%s\n" % (len(self), desc))
2004 "%d\n%s\n" % (len(self), desc))
2000 self.vfs.write("journal.bookmarks",
2005 bookmarksvfs = bookmarks.bookmarksvfs(self)
2001 self.vfs.tryread("bookmarks"))
2006 bookmarksvfs.write("journal.bookmarks",
2007 bookmarksvfs.tryread("bookmarks"))
2002 self.svfs.write("journal.phaseroots",
2008 self.svfs.write("journal.phaseroots",
2003 self.svfs.tryread("phaseroots"))
2009 self.svfs.tryread("phaseroots"))
2004
2010
@@ -2068,8 +2074,9 b' class localrepository(object):'
2068 vfsmap = {'plain': self.vfs, '': self.svfs}
2074 vfsmap = {'plain': self.vfs, '': self.svfs}
2069 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
2075 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
2070 checkambigfiles=_cachedfiles)
2076 checkambigfiles=_cachedfiles)
2071 if self.vfs.exists('undo.bookmarks'):
2077 bookmarksvfs = bookmarks.bookmarksvfs(self)
2072 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
2078 if bookmarksvfs.exists('undo.bookmarks'):
2079 bookmarksvfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
2073 if self.svfs.exists('undo.phaseroots'):
2080 if self.svfs.exists('undo.phaseroots'):
2074 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
2081 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
2075 self.invalidate()
2082 self.invalidate()
@@ -2152,6 +2159,8 b' class localrepository(object):'
2152 for ctx in self['.'].parents():
2159 for ctx in self['.'].parents():
2153 ctx.manifest() # accessing the manifest is enough
2160 ctx.manifest() # accessing the manifest is enough
2154
2161
2162 # accessing fnode cache warms the cache
2163 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2155 # accessing tags warm the cache
2164 # accessing tags warm the cache
2156 self.tags()
2165 self.tags()
2157 self.filtered('served').tags()
2166 self.filtered('served').tags()
@@ -2362,7 +2371,10 b' class localrepository(object):'
2362 node = fctx.filenode()
2371 node = fctx.filenode()
2363 if node in [fparent1, fparent2]:
2372 if node in [fparent1, fparent2]:
2364 self.ui.debug('reusing %s filelog entry\n' % fname)
2373 self.ui.debug('reusing %s filelog entry\n' % fname)
2365 if manifest1.flags(fname) != fctx.flags():
2374 if ((fparent1 != nullid and
2375 manifest1.flags(fname) != fctx.flags()) or
2376 (fparent2 != nullid and
2377 manifest2.flags(fname) != fctx.flags())):
2366 changelist.append(fname)
2378 changelist.append(fname)
2367 return node
2379 return node
2368
2380
@@ -2556,17 +2568,17 b' class localrepository(object):'
2556 _('note: commit message saved in %s\n') % msgfn)
2568 _('note: commit message saved in %s\n') % msgfn)
2557 raise
2569 raise
2558
2570
2559 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2571 def commithook():
2560 # hack for command that use a temporary commit (eg: histedit)
2572 # hack for command that use a temporary commit (eg: histedit)
2561 # temporary commit got stripped before hook release
2573 # temporary commit got stripped before hook release
2562 if self.changelog.hasnode(ret):
2574 if self.changelog.hasnode(ret):
2563 self.hook("commit", node=node, parent1=parent1,
2575 self.hook("commit", node=hex(ret), parent1=hookp1,
2564 parent2=parent2)
2576 parent2=hookp2)
2565 self._afterlock(commithook)
2577 self._afterlock(commithook)
2566 return ret
2578 return ret
2567
2579
2568 @unfilteredmethod
2580 @unfilteredmethod
2569 def commitctx(self, ctx, error=False):
2581 def commitctx(self, ctx, error=False, origctx=None):
2570 """Add a new revision to current repository.
2582 """Add a new revision to current repository.
2571 Revision information is passed via the context argument.
2583 Revision information is passed via the context argument.
2572
2584
@@ -2574,6 +2586,12 b' class localrepository(object):'
2574 modified/added/removed files. On merge, it may be wider than the
2586 modified/added/removed files. On merge, it may be wider than the
2575 ctx.files() to be committed, since any file nodes derived directly
2587 ctx.files() to be committed, since any file nodes derived directly
2576 from p1 or p2 are excluded from the committed ctx.files().
2588 from p1 or p2 are excluded from the committed ctx.files().
2589
2590 origctx is for convert to work around the problem that bug
2591 fixes to the files list in changesets change hashes. For
2592 convert to be the identity, it can pass an origctx and this
2593 function will use the same files list when it makes sense to
2594 do so.
2577 """
2595 """
2578
2596
2579 p1, p2 = ctx.p1(), ctx.p2()
2597 p1, p2 = ctx.p1(), ctx.p2()
@@ -2581,10 +2599,13 b' class localrepository(object):'
2581
2599
2582 writecopiesto = self.ui.config('experimental', 'copies.write-to')
2600 writecopiesto = self.ui.config('experimental', 'copies.write-to')
2583 writefilecopymeta = writecopiesto != 'changeset-only'
2601 writefilecopymeta = writecopiesto != 'changeset-only'
2602 writechangesetcopy = (writecopiesto in
2603 ('changeset-only', 'compatibility'))
2584 p1copies, p2copies = None, None
2604 p1copies, p2copies = None, None
2585 if writecopiesto in ('changeset-only', 'compatibility'):
2605 if writechangesetcopy:
2586 p1copies = ctx.p1copies()
2606 p1copies = ctx.p1copies()
2587 p2copies = ctx.p2copies()
2607 p2copies = ctx.p2copies()
2608 filesadded, filesremoved = None, None
2588 with self.lock(), self.transaction("commit") as tr:
2609 with self.lock(), self.transaction("commit") as tr:
2589 trp = weakref.proxy(tr)
2610 trp = weakref.proxy(tr)
2590
2611
@@ -2593,6 +2614,9 b' class localrepository(object):'
2593 self.ui.debug('reusing known manifest\n')
2614 self.ui.debug('reusing known manifest\n')
2594 mn = ctx.manifestnode()
2615 mn = ctx.manifestnode()
2595 files = ctx.files()
2616 files = ctx.files()
2617 if writechangesetcopy:
2618 filesadded = ctx.filesadded()
2619 filesremoved = ctx.filesremoved()
2596 elif ctx.files():
2620 elif ctx.files():
2597 m1ctx = p1.manifestctx()
2621 m1ctx = p1.manifestctx()
2598 m2ctx = p2.manifestctx()
2622 m2ctx = p2.manifestctx()
@@ -2633,10 +2657,51 b' class localrepository(object):'
2633 raise
2657 raise
2634
2658
2635 # update manifest
2659 # update manifest
2636 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2660 removed = [f for f in removed if f in m1 or f in m2]
2637 drop = [f for f in removed if f in m]
2661 drop = sorted([f for f in removed if f in m])
2638 for f in drop:
2662 for f in drop:
2639 del m[f]
2663 del m[f]
2664 if p2.rev() != nullrev:
2665 @util.cachefunc
2666 def mas():
2667 p1n = p1.node()
2668 p2n = p2.node()
2669 cahs = self.changelog.commonancestorsheads(p1n, p2n)
2670 if not cahs:
2671 cahs = [nullrev]
2672 return [self[r].manifest() for r in cahs]
2673 def deletionfromparent(f):
2674 # When a file is removed relative to p1 in a merge, this
2675 # function determines whether the absence is due to a
2676 # deletion from a parent, or whether the merge commit
2677 # itself deletes the file. We decide this by doing a
2678 # simplified three way merge of the manifest entry for
2679 # the file. There are two ways we decide the merge
2680 # itself didn't delete a file:
2681 # - neither parent (nor the merge) contain the file
2682 # - exactly one parent contains the file, and that
2683 # parent has the same filelog entry as the merge
2684 # ancestor (or all of them if there two). In other
2685 # words, that parent left the file unchanged while the
2686 # other one deleted it.
2687 # One way to think about this is that deleting a file is
2688 # similar to emptying it, so the list of changed files
2689 # should be similar either way. The computation
2690 # described above is not done directly in _filecommit
2691 # when creating the list of changed files, however
2692 # it does something very similar by comparing filelog
2693 # nodes.
2694 if f in m1:
2695 return (f not in m2
2696 and all(f in ma and ma.find(f) == m1.find(f)
2697 for ma in mas()))
2698 elif f in m2:
2699 return all(f in ma and ma.find(f) == m2.find(f)
2700 for ma in mas())
2701 else:
2702 return True
2703 removed = [f for f in removed if not deletionfromparent(f)]
2704
2640 files = changed + removed
2705 files = changed + removed
2641 md = None
2706 md = None
2642 if not files:
2707 if not files:
@@ -2659,8 +2724,13 b' class localrepository(object):'
2659 mn = mctx.write(trp, linkrev,
2724 mn = mctx.write(trp, linkrev,
2660 p1.manifestnode(), p2.manifestnode(),
2725 p1.manifestnode(), p2.manifestnode(),
2661 added, drop, match=self.narrowmatch())
2726 added, drop, match=self.narrowmatch())
2727
2728 if writechangesetcopy:
2729 filesadded = [f for f in changed
2730 if not (f in m1 or f in m2)]
2731 filesremoved = removed
2662 else:
2732 else:
2663 self.ui.debug('reusing manifest form p1 (listed files '
2733 self.ui.debug('reusing manifest from p1 (listed files '
2664 'actually unchanged)\n')
2734 'actually unchanged)\n')
2665 mn = p1.manifestnode()
2735 mn = p1.manifestnode()
2666 else:
2736 else:
@@ -2668,13 +2738,26 b' class localrepository(object):'
2668 mn = p1.manifestnode()
2738 mn = p1.manifestnode()
2669 files = []
2739 files = []
2670
2740
2741 if writecopiesto == 'changeset-only':
2742 # If writing only to changeset extras, use None to indicate that
2743 # no entry should be written. If writing to both, write an empty
2744 # entry to prevent the reader from falling back to reading
2745 # filelogs.
2746 p1copies = p1copies or None
2747 p2copies = p2copies or None
2748 filesadded = filesadded or None
2749 filesremoved = filesremoved or None
2750
2751 if origctx and origctx.manifestnode() == mn:
2752 files = origctx.files()
2753
2671 # update changelog
2754 # update changelog
2672 self.ui.note(_("committing changelog\n"))
2755 self.ui.note(_("committing changelog\n"))
2673 self.changelog.delayupdate(tr)
2756 self.changelog.delayupdate(tr)
2674 n = self.changelog.add(mn, files, ctx.description(),
2757 n = self.changelog.add(mn, files, ctx.description(),
2675 trp, p1.node(), p2.node(),
2758 trp, p1.node(), p2.node(),
2676 user, ctx.date(), ctx.extra().copy(),
2759 user, ctx.date(), ctx.extra().copy(),
2677 p1copies, p2copies)
2760 p1copies, p2copies, filesadded, filesremoved)
2678 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2761 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2679 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2762 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2680 parent2=xp2)
2763 parent2=xp2)
@@ -3013,6 +3096,9 b' def newreporequirements(ui, createopts):'
3013 if createopts.get('lfs'):
3096 if createopts.get('lfs'):
3014 requirements.add('lfs')
3097 requirements.add('lfs')
3015
3098
3099 if ui.configbool('format', 'bookmarks-in-store'):
3100 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3101
3016 return requirements
3102 return requirements
3017
3103
3018 def filterknowncreateopts(ui, createopts):
3104 def filterknowncreateopts(ui, createopts):
@@ -743,10 +743,15 b' def getrevs(repo, pats, opts):'
743 return match
743 return match
744
744
745 expr = _makerevset(repo, match, pats, slowpath, opts)
745 expr = _makerevset(repo, match, pats, slowpath, opts)
746 if opts.get('graph') and opts.get('rev'):
746 if opts.get('graph'):
747 # User-specified revs might be unsorted, but don't sort before
747 # User-specified revs might be unsorted, but don't sort before
748 # _makerevset because it might depend on the order of revs
748 # _makerevset because it might depend on the order of revs
749 if not (revs.isdescending() or revs.istopo()):
749 if repo.ui.configbool('experimental', 'log.topo'):
750 if not revs.istopo():
751 revs = dagop.toposort(revs, repo.changelog.parentrevs)
752 # TODO: try to iterate the set lazily
753 revs = revset.baseset(list(revs), istopo=True)
754 elif not (revs.isdescending() or revs.istopo()):
750 revs.sort(reverse=True)
755 revs.sort(reverse=True)
751 if expr:
756 if expr:
752 matcher = revset.match(None, expr)
757 matcher = revset.match(None, expr)
@@ -857,7 +862,7 b' def _graphnodeformatter(ui, displayer):'
857 return templ.renderdefault(props)
862 return templ.renderdefault(props)
858 return formatnode
863 return formatnode
859
864
860 def displaygraph(ui, repo, dag, displayer, edgefn, getrenamed=None, props=None):
865 def displaygraph(ui, repo, dag, displayer, edgefn, getcopies=None, props=None):
861 props = props or {}
866 props = props or {}
862 formatnode = _graphnodeformatter(ui, displayer)
867 formatnode = _graphnodeformatter(ui, displayer)
863 state = graphmod.asciistate()
868 state = graphmod.asciistate()
@@ -885,13 +890,7 b' def displaygraph(ui, repo, dag, displaye'
885
890
886 for rev, type, ctx, parents in dag:
891 for rev, type, ctx, parents in dag:
887 char = formatnode(repo, ctx)
892 char = formatnode(repo, ctx)
888 copies = None
893 copies = getcopies(ctx) if getcopies else None
889 if getrenamed and ctx.rev():
890 copies = []
891 for fn in ctx.files():
892 rename = getrenamed(fn, ctx.rev())
893 if rename:
894 copies.append((fn, rename))
895 edges = edgefn(type, char, state, rev, parents)
894 edges = edgefn(type, char, state, rev, parents)
896 firstedge = next(edges)
895 firstedge = next(edges)
897 width = firstedge[2]
896 width = firstedge[2]
@@ -910,16 +909,10 b' def displaygraphrevs(ui, repo, revs, dis'
910 revdag = graphmod.dagwalker(repo, revs)
909 revdag = graphmod.dagwalker(repo, revs)
911 displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed)
910 displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed)
912
911
913 def displayrevs(ui, repo, revs, displayer, getrenamed):
912 def displayrevs(ui, repo, revs, displayer, getcopies):
914 for rev in revs:
913 for rev in revs:
915 ctx = repo[rev]
914 ctx = repo[rev]
916 copies = None
915 copies = getcopies(ctx) if getcopies else None
917 if getrenamed is not None and rev:
918 copies = []
919 for fn in ctx.files():
920 rename = getrenamed(fn, rev)
921 if rename:
922 copies.append((fn, rename))
923 displayer.show(ctx, copies=copies)
916 displayer.show(ctx, copies=copies)
924 displayer.flush(ctx)
917 displayer.flush(ctx)
925 displayer.close()
918 displayer.close()
@@ -506,9 +506,9 b' class manifestdict(object):'
506 if match(fn):
506 if match(fn):
507 yield fn
507 yield fn
508
508
509 # for dirstate.walk, files=['.'] means "walk the whole tree".
509 # for dirstate.walk, files=[''] means "walk the whole tree".
510 # follow that here, too
510 # follow that here, too
511 fset.discard('.')
511 fset.discard('')
512
512
513 for fn in sorted(fset):
513 for fn in sorted(fset):
514 if not self.hasdir(fn):
514 if not self.hasdir(fn):
@@ -1078,9 +1078,9 b' class treemanifest(object):'
1078 fset.remove(fn)
1078 fset.remove(fn)
1079 yield fn
1079 yield fn
1080
1080
1081 # for dirstate.walk, files=['.'] means "walk the whole tree".
1081 # for dirstate.walk, files=[''] means "walk the whole tree".
1082 # follow that here, too
1082 # follow that here, too
1083 fset.discard('.')
1083 fset.discard('')
1084
1084
1085 for fn in sorted(fset):
1085 for fn in sorted(fset):
1086 if not self.hasdir(fn):
1086 if not self.hasdir(fn):
@@ -1088,7 +1088,7 b' class treemanifest(object):'
1088
1088
1089 def _walk(self, match):
1089 def _walk(self, match):
1090 '''Recursively generates matching file names for walk().'''
1090 '''Recursively generates matching file names for walk().'''
1091 visit = match.visitchildrenset(self._dir[:-1] or '.')
1091 visit = match.visitchildrenset(self._dir[:-1])
1092 if not visit:
1092 if not visit:
1093 return
1093 return
1094
1094
@@ -1116,7 +1116,7 b' class treemanifest(object):'
1116 '''recursively generate a new manifest filtered by the match argument.
1116 '''recursively generate a new manifest filtered by the match argument.
1117 '''
1117 '''
1118
1118
1119 visit = match.visitchildrenset(self._dir[:-1] or '.')
1119 visit = match.visitchildrenset(self._dir[:-1])
1120 if visit == 'all':
1120 if visit == 'all':
1121 return self.copy()
1121 return self.copy()
1122 ret = treemanifest(self._dir)
1122 ret = treemanifest(self._dir)
@@ -1275,7 +1275,7 b' class treemanifest(object):'
1275 return m._dirs.get(d, emptytree)._node
1275 return m._dirs.get(d, emptytree)._node
1276
1276
1277 # let's skip investigating things that `match` says we do not need.
1277 # let's skip investigating things that `match` says we do not need.
1278 visit = match.visitchildrenset(self._dir[:-1] or '.')
1278 visit = match.visitchildrenset(self._dir[:-1])
1279 visit = self._loadchildrensetlazy(visit)
1279 visit = self._loadchildrensetlazy(visit)
1280 if visit == 'this' or visit == 'all':
1280 if visit == 'this' or visit == 'all':
1281 visit = None
1281 visit = None
@@ -1294,7 +1294,7 b' class treemanifest(object):'
1294
1294
1295 If `matcher` is provided, it only returns subtrees that match.
1295 If `matcher` is provided, it only returns subtrees that match.
1296 """
1296 """
1297 if matcher and not matcher.visitdir(self._dir[:-1] or '.'):
1297 if matcher and not matcher.visitdir(self._dir[:-1]):
1298 return
1298 return
1299 if not matcher or matcher(self._dir[:-1]):
1299 if not matcher or matcher(self._dir[:-1]):
1300 yield self
1300 yield self
@@ -1417,6 +1417,10 b' class manifestfulltextcache(util.lrucach'
1417 self.write()
1417 self.write()
1418 self._read = False
1418 self._read = False
1419
1419
1420 # and upper bound of what we expect from compression
1421 # (real live value seems to be "3")
1422 MAXCOMPRESSION = 3
1423
1420 @interfaceutil.implementer(repository.imanifeststorage)
1424 @interfaceutil.implementer(repository.imanifeststorage)
1421 class manifestrevlog(object):
1425 class manifestrevlog(object):
1422 '''A revlog that stores manifest texts. This is responsible for caching the
1426 '''A revlog that stores manifest texts. This is responsible for caching the
@@ -1467,7 +1471,8 b' class manifestrevlog(object):'
1467 self._revlog = revlog.revlog(opener, indexfile,
1471 self._revlog = revlog.revlog(opener, indexfile,
1468 # only root indexfile is cached
1472 # only root indexfile is cached
1469 checkambig=not bool(tree),
1473 checkambig=not bool(tree),
1470 mmaplargeindex=True)
1474 mmaplargeindex=True,
1475 upperboundcomp=MAXCOMPRESSION)
1471
1476
1472 self.index = self._revlog.index
1477 self.index = self._revlog.index
1473 self.version = self._revlog.version
1478 self.version = self._revlog.version
@@ -1526,8 +1531,8 b' class manifestrevlog(object):'
1526
1531
1527 _checkforbidden(added)
1532 _checkforbidden(added)
1528 # combine the changed lists into one sorted iterator
1533 # combine the changed lists into one sorted iterator
1529 work = heapq.merge([(x, False) for x in added],
1534 work = heapq.merge([(x, False) for x in sorted(added)],
1530 [(x, True) for x in removed])
1535 [(x, True) for x in sorted(removed)])
1531
1536
1532 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
1537 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
1533 cachedelta = self._revlog.rev(p1), deltatext
1538 cachedelta = self._revlog.rev(p1), deltatext
@@ -1725,7 +1730,7 b' class manifestlog(object):'
1725 return self._dirmancache[tree][node]
1730 return self._dirmancache[tree][node]
1726
1731
1727 if not self._narrowmatch.always():
1732 if not self._narrowmatch.always():
1728 if not self._narrowmatch.visitdir(tree[:-1] or '.'):
1733 if not self._narrowmatch.visitdir(tree[:-1]):
1729 return excludeddirmanifestctx(tree, node)
1734 return excludeddirmanifestctx(tree, node)
1730 if tree:
1735 if tree:
1731 if self._rootstore._treeondisk:
1736 if self._rootstore._treeondisk:
@@ -1918,7 +1923,7 b' class treemanifestctx(object):'
1918 def _storage(self):
1923 def _storage(self):
1919 narrowmatch = self._manifestlog._narrowmatch
1924 narrowmatch = self._manifestlog._narrowmatch
1920 if not narrowmatch.always():
1925 if not narrowmatch.always():
1921 if not narrowmatch.visitdir(self._dir[:-1] or '.'):
1926 if not narrowmatch.visitdir(self._dir[:-1]):
1922 return excludedmanifestrevlog(self._dir)
1927 return excludedmanifestrevlog(self._dir)
1923 return self._manifestlog.getstorage(self._dir)
1928 return self._manifestlog.getstorage(self._dir)
1924
1929
@@ -17,6 +17,7 b' from . import ('
17 encoding,
17 encoding,
18 error,
18 error,
19 pathutil,
19 pathutil,
20 policy,
20 pycompat,
21 pycompat,
21 util,
22 util,
22 )
23 )
@@ -24,6 +25,8 b' from .utils import ('
24 stringutil,
25 stringutil,
25 )
26 )
26
27
28 rustmod = policy.importrust('filepatterns')
29
27 allpatternkinds = ('re', 'glob', 'path', 'relglob', 'relpath', 'relre',
30 allpatternkinds = ('re', 'glob', 'path', 'relglob', 'relpath', 'relre',
28 'rootglob',
31 'rootglob',
29 'listfile', 'listfile0', 'set', 'include', 'subinclude',
32 'listfile', 'listfile0', 'set', 'include', 'subinclude',
@@ -305,9 +308,6 b' class basematcher(object):'
305
308
306 def __call__(self, fn):
309 def __call__(self, fn):
307 return self.matchfn(fn)
310 return self.matchfn(fn)
308 def __iter__(self):
309 for f in self._files:
310 yield f
311 # Callbacks related to how the matcher is used by dirstate.walk.
311 # Callbacks related to how the matcher is used by dirstate.walk.
312 # Subscribers to these events must monkeypatch the matcher object.
312 # Subscribers to these events must monkeypatch the matcher object.
313 def bad(self, f, msg):
313 def bad(self, f, msg):
@@ -377,7 +377,7 b' class basematcher(object):'
377 the following values (assuming the implementation of visitchildrenset
377 the following values (assuming the implementation of visitchildrenset
378 is capable of recognizing this; some implementations are not).
378 is capable of recognizing this; some implementations are not).
379
379
380 '.' -> {'foo', 'qux'}
380 '' -> {'foo', 'qux'}
381 'baz' -> set()
381 'baz' -> set()
382 'foo' -> {'bar'}
382 'foo' -> {'bar'}
383 # Ideally this would be 'all', but since the prefix nature of matchers
383 # Ideally this would be 'all', but since the prefix nature of matchers
@@ -480,11 +480,19 b' class predicatematcher(basematcher):'
480 or pycompat.byterepr(self.matchfn))
480 or pycompat.byterepr(self.matchfn))
481 return '<predicatenmatcher pred=%s>' % s
481 return '<predicatenmatcher pred=%s>' % s
482
482
483 def normalizerootdir(dir, funcname):
484 if dir == '.':
485 util.nouideprecwarn("match.%s() no longer accepts "
486 "'.', use '' instead." % funcname, '5.1')
487 return ''
488 return dir
489
490
483 class patternmatcher(basematcher):
491 class patternmatcher(basematcher):
484 """Matches a set of (kind, pat, source) against a 'root' directory.
492 """Matches a set of (kind, pat, source) against a 'root' directory.
485
493
486 >>> kindpats = [
494 >>> kindpats = [
487 ... (b're', b'.*\.c$', b''),
495 ... (b're', br'.*\.c$', b''),
488 ... (b'path', b'foo/a', b''),
496 ... (b'path', b'foo/a', b''),
489 ... (b'relpath', b'b', b''),
497 ... (b'relpath', b'b', b''),
490 ... (b'glob', b'*.h', b''),
498 ... (b'glob', b'*.h', b''),
@@ -504,7 +512,7 b' class patternmatcher(basematcher):'
504 True
512 True
505
513
506 >>> m.files()
514 >>> m.files()
507 ['.', 'foo/a', 'b', '.']
515 ['', 'foo/a', 'b', '']
508 >>> m.exact(b'foo/a')
516 >>> m.exact(b'foo/a')
509 True
517 True
510 >>> m.exact(b'b')
518 >>> m.exact(b'b')
@@ -522,13 +530,13 b' class patternmatcher(basematcher):'
522
530
523 @propertycache
531 @propertycache
524 def _dirs(self):
532 def _dirs(self):
525 return set(util.dirs(self._fileset)) | {'.'}
533 return set(util.dirs(self._fileset))
526
534
527 def visitdir(self, dir):
535 def visitdir(self, dir):
536 dir = normalizerootdir(dir, 'visitdir')
528 if self._prefix and dir in self._fileset:
537 if self._prefix and dir in self._fileset:
529 return 'all'
538 return 'all'
530 return ('.' in self._fileset or
539 return (dir in self._fileset or
531 dir in self._fileset or
532 dir in self._dirs or
540 dir in self._dirs or
533 any(parentdir in self._fileset
541 any(parentdir in self._fileset
534 for parentdir in util.finddirs(dir)))
542 for parentdir in util.finddirs(dir)))
@@ -561,7 +569,7 b' class _dirchildren(object):'
561 addpath(f)
569 addpath(f)
562
570
563 def addpath(self, path):
571 def addpath(self, path):
564 if path == '.':
572 if path == '':
565 return
573 return
566 dirs = self._dirs
574 dirs = self._dirs
567 findsplitdirs = _dirchildren._findsplitdirs
575 findsplitdirs = _dirchildren._findsplitdirs
@@ -575,16 +583,15 b' class _dirchildren(object):'
575 # yields (dirname, basename) tuples, walking back to the root. This is
583 # yields (dirname, basename) tuples, walking back to the root. This is
576 # very similar to util.finddirs, except:
584 # very similar to util.finddirs, except:
577 # - produces a (dirname, basename) tuple, not just 'dirname'
585 # - produces a (dirname, basename) tuple, not just 'dirname'
578 # - includes root dir
579 # Unlike manifest._splittopdir, this does not suffix `dirname` with a
586 # Unlike manifest._splittopdir, this does not suffix `dirname` with a
580 # slash, and produces '.' for the root instead of ''.
587 # slash.
581 oldpos = len(path)
588 oldpos = len(path)
582 pos = path.rfind('/')
589 pos = path.rfind('/')
583 while pos != -1:
590 while pos != -1:
584 yield path[:pos], path[pos + 1:oldpos]
591 yield path[:pos], path[pos + 1:oldpos]
585 oldpos = pos
592 oldpos = pos
586 pos = path.rfind('/', 0, pos)
593 pos = path.rfind('/', 0, pos)
587 yield '.', path[:oldpos]
594 yield '', path[:oldpos]
588
595
589 def get(self, path):
596 def get(self, path):
590 return self._dirs.get(path, set())
597 return self._dirs.get(path, set())
@@ -603,13 +610,13 b' class includematcher(basematcher):'
603 self._dirs = set(dirs)
610 self._dirs = set(dirs)
604 # parents are directories which are non-recursively included because
611 # parents are directories which are non-recursively included because
605 # they are needed to get to items in _dirs or _roots.
612 # they are needed to get to items in _dirs or _roots.
606 self._parents = set(parents)
613 self._parents = parents
607
614
608 def visitdir(self, dir):
615 def visitdir(self, dir):
616 dir = normalizerootdir(dir, 'visitdir')
609 if self._prefix and dir in self._roots:
617 if self._prefix and dir in self._roots:
610 return 'all'
618 return 'all'
611 return ('.' in self._roots or
619 return (dir in self._roots or
612 dir in self._roots or
613 dir in self._dirs or
620 dir in self._dirs or
614 dir in self._parents or
621 dir in self._parents or
615 any(parentdir in self._roots
622 any(parentdir in self._roots
@@ -632,7 +639,7 b' class includematcher(basematcher):'
632 return 'all'
639 return 'all'
633 # Note: this does *not* include the 'dir in self._parents' case from
640 # Note: this does *not* include the 'dir in self._parents' case from
634 # visitdir, that's handled below.
641 # visitdir, that's handled below.
635 if ('.' in self._roots or
642 if ('' in self._roots or
636 dir in self._roots or
643 dir in self._roots or
637 dir in self._dirs or
644 dir in self._dirs or
638 any(parentdir in self._roots
645 any(parentdir in self._roots
@@ -651,7 +658,7 b' class exactmatcher(basematcher):'
651 r'''Matches the input files exactly. They are interpreted as paths, not
658 r'''Matches the input files exactly. They are interpreted as paths, not
652 patterns (so no kind-prefixes).
659 patterns (so no kind-prefixes).
653
660
654 >>> m = exactmatcher([b'a.txt', b're:.*\.c$'])
661 >>> m = exactmatcher([b'a.txt', br're:.*\.c$'])
655 >>> m(b'a.txt')
662 >>> m(b'a.txt')
656 True
663 True
657 >>> m(b'b.txt')
664 >>> m(b'b.txt')
@@ -664,7 +671,7 b' class exactmatcher(basematcher):'
664 So pattern 're:.*\.c$' is not considered as a regex, but as a file name
671 So pattern 're:.*\.c$' is not considered as a regex, but as a file name
665 >>> m(b'main.c')
672 >>> m(b'main.c')
666 False
673 False
667 >>> m(b're:.*\.c$')
674 >>> m(br're:.*\.c$')
668 True
675 True
669 '''
676 '''
670
677
@@ -680,22 +687,25 b' class exactmatcher(basematcher):'
680
687
681 @propertycache
688 @propertycache
682 def _dirs(self):
689 def _dirs(self):
683 return set(util.dirs(self._fileset)) | {'.'}
690 return set(util.dirs(self._fileset))
684
691
685 def visitdir(self, dir):
692 def visitdir(self, dir):
693 dir = normalizerootdir(dir, 'visitdir')
686 return dir in self._dirs
694 return dir in self._dirs
687
695
688 def visitchildrenset(self, dir):
696 def visitchildrenset(self, dir):
697 dir = normalizerootdir(dir, 'visitchildrenset')
698
689 if not self._fileset or dir not in self._dirs:
699 if not self._fileset or dir not in self._dirs:
690 return set()
700 return set()
691
701
692 candidates = self._fileset | self._dirs - {'.'}
702 candidates = self._fileset | self._dirs - {''}
693 if dir != '.':
703 if dir != '':
694 d = dir + '/'
704 d = dir + '/'
695 candidates = set(c[len(d):] for c in candidates if
705 candidates = set(c[len(d):] for c in candidates if
696 c.startswith(d))
706 c.startswith(d))
697 # self._dirs includes all of the directories, recursively, so if
707 # self._dirs includes all of the directories, recursively, so if
698 # we're attempting to match foo/bar/baz.txt, it'll have '.', 'foo',
708 # we're attempting to match foo/bar/baz.txt, it'll have '', 'foo',
699 # 'foo/bar' in it. Thus we can safely ignore a candidate that has a
709 # 'foo/bar' in it. Thus we can safely ignore a candidate that has a
700 # '/' in it, indicating a it's for a subdir-of-a-subdir; the
710 # '/' in it, indicating a it's for a subdir-of-a-subdir; the
701 # immediate subdir will be in there without a slash.
711 # immediate subdir will be in there without a slash.
@@ -769,7 +779,7 b' class differencematcher(basematcher):'
769 # Possible values for m1: set(...), set()
779 # Possible values for m1: set(...), set()
770 # Possible values for m2: 'this', set(...)
780 # Possible values for m2: 'this', set(...)
771 # We ignore m2's set results. They're possibly incorrect:
781 # We ignore m2's set results. They're possibly incorrect:
772 # m1 = path:dir/subdir, m2=rootfilesin:dir, visitchildrenset('.'):
782 # m1 = path:dir/subdir, m2=rootfilesin:dir, visitchildrenset(''):
773 # m1 returns {'dir'}, m2 returns {'dir'}, if we subtracted we'd
783 # m1 returns {'dir'}, m2 returns {'dir'}, if we subtracted we'd
774 # return set(), which is *not* correct, we still need to visit 'dir'!
784 # return set(), which is *not* correct, we still need to visit 'dir'!
775 return m1_set
785 return m1_set
@@ -915,14 +925,16 b' class subdirmatcher(basematcher):'
915 return self._matcher.matchfn(self._path + "/" + f)
925 return self._matcher.matchfn(self._path + "/" + f)
916
926
917 def visitdir(self, dir):
927 def visitdir(self, dir):
918 if dir == '.':
928 dir = normalizerootdir(dir, 'visitdir')
929 if dir == '':
919 dir = self._path
930 dir = self._path
920 else:
931 else:
921 dir = self._path + "/" + dir
932 dir = self._path + "/" + dir
922 return self._matcher.visitdir(dir)
933 return self._matcher.visitdir(dir)
923
934
924 def visitchildrenset(self, dir):
935 def visitchildrenset(self, dir):
925 if dir == '.':
936 dir = normalizerootdir(dir, 'visitchildrenset')
937 if dir == '':
926 dir = self._path
938 dir = self._path
927 else:
939 else:
928 dir = self._path + "/" + dir
940 dir = self._path + "/" + dir
@@ -991,18 +1003,18 b' class prefixdirmatcher(basematcher):'
991
1003
992 @propertycache
1004 @propertycache
993 def _pathdirs(self):
1005 def _pathdirs(self):
994 return set(util.finddirs(self._path)) | {'.'}
1006 return set(util.finddirs(self._path))
995
1007
996 def visitdir(self, dir):
1008 def visitdir(self, dir):
997 if dir == self._path:
1009 if dir == self._path:
998 return self._matcher.visitdir('.')
1010 return self._matcher.visitdir('')
999 if dir.startswith(self._pathprefix):
1011 if dir.startswith(self._pathprefix):
1000 return self._matcher.visitdir(dir[len(self._pathprefix):])
1012 return self._matcher.visitdir(dir[len(self._pathprefix):])
1001 return dir in self._pathdirs
1013 return dir in self._pathdirs
1002
1014
1003 def visitchildrenset(self, dir):
1015 def visitchildrenset(self, dir):
1004 if dir == self._path:
1016 if dir == self._path:
1005 return self._matcher.visitchildrenset('.')
1017 return self._matcher.visitchildrenset('')
1006 if dir.startswith(self._pathprefix):
1018 if dir.startswith(self._pathprefix):
1007 return self._matcher.visitchildrenset(dir[len(self._pathprefix):])
1019 return self._matcher.visitchildrenset(dir[len(self._pathprefix):])
1008 if dir in self._pathdirs:
1020 if dir in self._pathdirs:
@@ -1075,7 +1087,7 b' class unionmatcher(basematcher):'
1075 def patkind(pattern, default=None):
1087 def patkind(pattern, default=None):
1076 '''If pattern is 'kind:pat' with a known kind, return kind.
1088 '''If pattern is 'kind:pat' with a known kind, return kind.
1077
1089
1078 >>> patkind(b're:.*\.c$')
1090 >>> patkind(br're:.*\.c$')
1079 're'
1091 're'
1080 >>> patkind(b'glob:*.c')
1092 >>> patkind(b'glob:*.c')
1081 'glob'
1093 'glob'
@@ -1178,9 +1190,23 b' def _globre(pat):'
1178 return res
1190 return res
1179
1191
1180 def _regex(kind, pat, globsuffix):
1192 def _regex(kind, pat, globsuffix):
1181 '''Convert a (normalized) pattern of any kind into a regular expression.
1193 '''Convert a (normalized) pattern of any kind into a
1194 regular expression.
1182 globsuffix is appended to the regexp of globs.'''
1195 globsuffix is appended to the regexp of globs.'''
1183 if not pat:
1196
1197 if rustmod is not None:
1198 try:
1199 return rustmod.build_single_regex(
1200 kind,
1201 pat,
1202 globsuffix
1203 )
1204 except rustmod.PatternError:
1205 raise error.ProgrammingError(
1206 'not a regex pattern: %s:%s' % (kind, pat)
1207 )
1208
1209 if not pat and kind in ('glob', 'relpath'):
1184 return ''
1210 return ''
1185 if kind == 're':
1211 if kind == 're':
1186 return pat
1212 return pat
@@ -1324,13 +1350,17 b' def _patternrootsanddirs(kindpats):'
1324 if '[' in p or '{' in p or '*' in p or '?' in p:
1350 if '[' in p or '{' in p or '*' in p or '?' in p:
1325 break
1351 break
1326 root.append(p)
1352 root.append(p)
1327 r.append('/'.join(root) or '.')
1353 r.append('/'.join(root))
1328 elif kind in ('relpath', 'path'):
1354 elif kind in ('relpath', 'path'):
1329 r.append(pat or '.')
1355 if pat == '.':
1356 pat = ''
1357 r.append(pat)
1330 elif kind in ('rootfilesin',):
1358 elif kind in ('rootfilesin',):
1331 d.append(pat or '.')
1359 if pat == '.':
1360 pat = ''
1361 d.append(pat)
1332 else: # relglob, re, relre
1362 else: # relglob, re, relre
1333 r.append('.')
1363 r.append('')
1334 return r, d
1364 return r, d
1335
1365
1336 def _roots(kindpats):
1366 def _roots(kindpats):
@@ -1347,31 +1377,33 b' def _rootsdirsandparents(kindpats):'
1347
1377
1348 Returns a tuple of (roots, dirs, parents).
1378 Returns a tuple of (roots, dirs, parents).
1349
1379
1350 >>> _rootsdirsandparents(
1380 >>> r = _rootsdirsandparents(
1351 ... [(b'glob', b'g/h/*', b''), (b'glob', b'g/h', b''),
1381 ... [(b'glob', b'g/h/*', b''), (b'glob', b'g/h', b''),
1352 ... (b'glob', b'g*', b'')])
1382 ... (b'glob', b'g*', b'')])
1353 (['g/h', 'g/h', '.'], [], ['g', '.'])
1383 >>> print(r[0:2], sorted(r[2])) # the set has an unstable output
1354 >>> _rootsdirsandparents(
1384 (['g/h', 'g/h', ''], []) ['', 'g']
1385 >>> r = _rootsdirsandparents(
1355 ... [(b'rootfilesin', b'g/h', b''), (b'rootfilesin', b'', b'')])
1386 ... [(b'rootfilesin', b'g/h', b''), (b'rootfilesin', b'', b'')])
1356 ([], ['g/h', '.'], ['g', '.'])
1387 >>> print(r[0:2], sorted(r[2])) # the set has an unstable output
1357 >>> _rootsdirsandparents(
1388 ([], ['g/h', '']) ['', 'g']
1389 >>> r = _rootsdirsandparents(
1358 ... [(b'relpath', b'r', b''), (b'path', b'p/p', b''),
1390 ... [(b'relpath', b'r', b''), (b'path', b'p/p', b''),
1359 ... (b'path', b'', b'')])
1391 ... (b'path', b'', b'')])
1360 (['r', 'p/p', '.'], [], ['p', '.'])
1392 >>> print(r[0:2], sorted(r[2])) # the set has an unstable output
1361 >>> _rootsdirsandparents(
1393 (['r', 'p/p', ''], []) ['', 'p']
1394 >>> r = _rootsdirsandparents(
1362 ... [(b'relglob', b'rg*', b''), (b're', b're/', b''),
1395 ... [(b'relglob', b'rg*', b''), (b're', b're/', b''),
1363 ... (b'relre', b'rr', b'')])
1396 ... (b'relre', b'rr', b'')])
1364 (['.', '.', '.'], [], ['.'])
1397 >>> print(r[0:2], sorted(r[2])) # the set has an unstable output
1398 (['', '', ''], []) ['']
1365 '''
1399 '''
1366 r, d = _patternrootsanddirs(kindpats)
1400 r, d = _patternrootsanddirs(kindpats)
1367
1401
1368 p = []
1402 p = set()
1369 # Append the parents as non-recursive/exact directories, since they must be
1403 # Add the parents as non-recursive/exact directories, since they must be
1370 # scanned to get to either the roots or the other exact directories.
1404 # scanned to get to either the roots or the other exact directories.
1371 p.extend(util.dirs(d))
1405 p.update(util.dirs(d))
1372 p.extend(util.dirs(r))
1406 p.update(util.dirs(r))
1373 # util.dirs() does not include the root directory, so add it manually
1374 p.append('.')
1375
1407
1376 # FIXME: all uses of this function convert these to sets, do so before
1408 # FIXME: all uses of this function convert these to sets, do so before
1377 # returning.
1409 # returning.
@@ -1421,9 +1453,24 b' def readpatternfile(filepath, warn, sour'
1421 pattern # pattern of the current default type
1453 pattern # pattern of the current default type
1422
1454
1423 if sourceinfo is set, returns a list of tuples:
1455 if sourceinfo is set, returns a list of tuples:
1424 (pattern, lineno, originalline). This is useful to debug ignore patterns.
1456 (pattern, lineno, originalline).
1457 This is useful to debug ignore patterns.
1425 '''
1458 '''
1426
1459
1460 if rustmod is not None:
1461 result, warnings = rustmod.read_pattern_file(
1462 filepath,
1463 bool(warn),
1464 sourceinfo,
1465 )
1466
1467 for warning_params in warnings:
1468 # Can't be easily emitted from Rust, because it would require
1469 # a mechanism for both gettext and calling the `warn` function.
1470 warn(_("%s: ignoring invalid syntax '%s'\n") % warning_params)
1471
1472 return result
1473
1427 syntaxes = {
1474 syntaxes = {
1428 're': 'relre:',
1475 're': 'relre:',
1429 'regexp': 'relre:',
1476 'regexp': 'relre:',
@@ -10,6 +10,7 b' from __future__ import absolute_import'
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import shutil
12 import shutil
13 import stat
13 import struct
14 import struct
14
15
15 from .i18n import _
16 from .i18n import _
@@ -683,7 +684,7 b' class mergestate(object):'
683 def recordactions(self):
684 def recordactions(self):
684 """record remove/add/get actions in the dirstate"""
685 """record remove/add/get actions in the dirstate"""
685 branchmerge = self._repo.dirstate.p2() != nullid
686 branchmerge = self._repo.dirstate.p2() != nullid
686 recordupdates(self._repo, self.actions(), branchmerge)
687 recordupdates(self._repo, self.actions(), branchmerge, None)
687
688
688 def queueremove(self, f):
689 def queueremove(self, f):
689 """queues a file to be removed from the dirstate
690 """queues a file to be removed from the dirstate
@@ -1380,7 +1381,6 b' def calculateupdates(repo, wctx, mctx, a'
1380 # Pick the best bid for each file
1381 # Pick the best bid for each file
1381 repo.ui.note(_('\nauction for merging merge bids\n'))
1382 repo.ui.note(_('\nauction for merging merge bids\n'))
1382 actions = {}
1383 actions = {}
1383 dms = [] # filenames that have dm actions
1384 for f, bids in sorted(fbids.items()):
1384 for f, bids in sorted(fbids.items()):
1385 # bids is a mapping from action method to list af actions
1385 # bids is a mapping from action method to list af actions
1386 # Consensus?
1386 # Consensus?
@@ -1389,8 +1389,6 b' def calculateupdates(repo, wctx, mctx, a'
1389 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1389 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1390 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1390 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1391 actions[f] = l[0]
1391 actions[f] = l[0]
1392 if m == ACTION_DIR_RENAME_MOVE_LOCAL:
1393 dms.append(f)
1394 continue
1392 continue
1395 # If keep is an option, just do it.
1393 # If keep is an option, just do it.
1396 if ACTION_KEEP in bids:
1394 if ACTION_KEEP in bids:
@@ -1415,18 +1413,7 b' def calculateupdates(repo, wctx, mctx, a'
1415 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1413 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1416 (f, m))
1414 (f, m))
1417 actions[f] = l[0]
1415 actions[f] = l[0]
1418 if m == ACTION_DIR_RENAME_MOVE_LOCAL:
1419 dms.append(f)
1420 continue
1416 continue
1421 # Work around 'dm' that can cause multiple actions for the same file
1422 for f in dms:
1423 dm, (f0, flags), msg = actions[f]
1424 assert dm == ACTION_DIR_RENAME_MOVE_LOCAL, dm
1425 if f0 in actions and actions[f0][0] == ACTION_REMOVE:
1426 # We have one bid for removing a file and another for moving it.
1427 # These two could be merged as first move and then delete ...
1428 # but instead drop moving and just delete.
1429 del actions[f]
1430 repo.ui.note(_('end of auction\n\n'))
1417 repo.ui.note(_('end of auction\n\n'))
1431
1418
1432 if wctx.rev() is None:
1419 if wctx.rev() is None:
@@ -1478,13 +1465,17 b' def batchremove(repo, wctx, actions):'
1478 repo.ui.warn(_("current directory was removed\n"
1465 repo.ui.warn(_("current directory was removed\n"
1479 "(consider changing to repo root: %s)\n") % repo.root)
1466 "(consider changing to repo root: %s)\n") % repo.root)
1480
1467
1481 def batchget(repo, mctx, wctx, actions):
1468 def batchget(repo, mctx, wctx, wantfiledata, actions):
1482 """apply gets to the working directory
1469 """apply gets to the working directory
1483
1470
1484 mctx is the context to get from
1471 mctx is the context to get from
1485
1472
1486 yields tuples for progress updates
1473 Yields arbitrarily many (False, tuple) for progress updates, followed by
1474 exactly one (True, filedata). When wantfiledata is false, filedata is an
1475 empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size,
1476 mtime) of the file f written for each action.
1487 """
1477 """
1478 filedata = {}
1488 verbose = repo.ui.verbose
1479 verbose = repo.ui.verbose
1489 fctx = mctx.filectx
1480 fctx = mctx.filectx
1490 ui = repo.ui
1481 ui = repo.ui
@@ -1508,16 +1499,24 b' def batchget(repo, mctx, wctx, actions):'
1508 if repo.wvfs.lexists(conflicting):
1499 if repo.wvfs.lexists(conflicting):
1509 orig = scmutil.backuppath(ui, repo, conflicting)
1500 orig = scmutil.backuppath(ui, repo, conflicting)
1510 util.rename(repo.wjoin(conflicting), orig)
1501 util.rename(repo.wjoin(conflicting), orig)
1511 wctx[f].clearunknown()
1502 wfctx = wctx[f]
1503 wfctx.clearunknown()
1512 atomictemp = ui.configbool("experimental", "update.atomic-file")
1504 atomictemp = ui.configbool("experimental", "update.atomic-file")
1513 wctx[f].write(fctx(f).data(), flags, backgroundclose=True,
1505 size = wfctx.write(fctx(f).data(), flags,
1514 atomictemp=atomictemp)
1506 backgroundclose=True,
1507 atomictemp=atomictemp)
1508 if wantfiledata:
1509 s = wfctx.lstat()
1510 mode = s.st_mode
1511 mtime = s[stat.ST_MTIME]
1512 filedata[f] = ((mode, size, mtime)) # for dirstate.normal
1515 if i == 100:
1513 if i == 100:
1516 yield i, f
1514 yield False, (i, f)
1517 i = 0
1515 i = 0
1518 i += 1
1516 i += 1
1519 if i > 0:
1517 if i > 0:
1520 yield i, f
1518 yield False, (i, f)
1519 yield True, filedata
1521
1520
1522 def _prefetchfiles(repo, ctx, actions):
1521 def _prefetchfiles(repo, ctx, actions):
1523 """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
1522 """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
@@ -1564,14 +1563,17 b' def emptyactions():'
1564 ACTION_PATH_CONFLICT,
1563 ACTION_PATH_CONFLICT,
1565 ACTION_PATH_CONFLICT_RESOLVE))
1564 ACTION_PATH_CONFLICT_RESOLVE))
1566
1565
1567 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1566 def applyupdates(repo, actions, wctx, mctx, overwrite, wantfiledata,
1567 labels=None):
1568 """apply the merge action list to the working directory
1568 """apply the merge action list to the working directory
1569
1569
1570 wctx is the working copy context
1570 wctx is the working copy context
1571 mctx is the context to be merged into the working copy
1571 mctx is the context to be merged into the working copy
1572
1572
1573 Return a tuple of counts (updated, merged, removed, unresolved) that
1573 Return a tuple of (counts, filedata), where counts is a tuple
1574 describes how many files were affected by the update.
1574 (updated, merged, removed, unresolved) that describes how many
1575 files were affected by the update, and filedata is as described in
1576 batchget.
1575 """
1577 """
1576
1578
1577 _prefetchfiles(repo, mctx, actions)
1579 _prefetchfiles(repo, mctx, actions)
@@ -1663,11 +1665,18 b' def applyupdates(repo, actions, wctx, mc'
1663 # get in parallel.
1665 # get in parallel.
1664 threadsafe = repo.ui.configbool('experimental',
1666 threadsafe = repo.ui.configbool('experimental',
1665 'worker.wdir-get-thread-safe')
1667 'worker.wdir-get-thread-safe')
1666 prog = worker.worker(repo.ui, cost, batchget, (repo, mctx, wctx),
1668 prog = worker.worker(repo.ui, cost, batchget,
1669 (repo, mctx, wctx, wantfiledata),
1667 actions[ACTION_GET],
1670 actions[ACTION_GET],
1668 threadsafe=threadsafe)
1671 threadsafe=threadsafe,
1669 for i, item in prog:
1672 hasretval=True)
1670 progress.increment(step=i, item=item)
1673 getfiledata = {}
1674 for final, res in prog:
1675 if final:
1676 getfiledata = res
1677 else:
1678 i, item = res
1679 progress.increment(step=i, item=item)
1671 updated = len(actions[ACTION_GET])
1680 updated = len(actions[ACTION_GET])
1672
1681
1673 if [a for a in actions[ACTION_GET] if a[0] == '.hgsubstate']:
1682 if [a for a in actions[ACTION_GET] if a[0] == '.hgsubstate']:
@@ -1792,6 +1801,10 b' def applyupdates(repo, actions, wctx, mc'
1792 mfiles = set(a[0] for a in actions[ACTION_MERGE])
1801 mfiles = set(a[0] for a in actions[ACTION_MERGE])
1793 for k, acts in extraactions.iteritems():
1802 for k, acts in extraactions.iteritems():
1794 actions[k].extend(acts)
1803 actions[k].extend(acts)
1804 if k == ACTION_GET and wantfiledata:
1805 # no filedata until mergestate is updated to provide it
1806 for a in acts:
1807 getfiledata[a[0]] = None
1795 # Remove these files from actions[ACTION_MERGE] as well. This is
1808 # Remove these files from actions[ACTION_MERGE] as well. This is
1796 # important because in recordupdates, files in actions[ACTION_MERGE]
1809 # important because in recordupdates, files in actions[ACTION_MERGE]
1797 # are processed after files in other actions, and the merge driver
1810 # are processed after files in other actions, and the merge driver
@@ -1814,9 +1827,10 b' def applyupdates(repo, actions, wctx, mc'
1814 if a[0] in mfiles]
1827 if a[0] in mfiles]
1815
1828
1816 progress.complete()
1829 progress.complete()
1817 return updateresult(updated, merged, removed, unresolved)
1830 assert len(getfiledata) == (len(actions[ACTION_GET]) if wantfiledata else 0)
1831 return updateresult(updated, merged, removed, unresolved), getfiledata
1818
1832
1819 def recordupdates(repo, actions, branchmerge):
1833 def recordupdates(repo, actions, branchmerge, getfiledata):
1820 "record merge actions to the dirstate"
1834 "record merge actions to the dirstate"
1821 # remove (must come first)
1835 # remove (must come first)
1822 for f, args, msg in actions.get(ACTION_REMOVE, []):
1836 for f, args, msg in actions.get(ACTION_REMOVE, []):
@@ -1864,7 +1878,8 b' def recordupdates(repo, actions, branchm'
1864 if branchmerge:
1878 if branchmerge:
1865 repo.dirstate.otherparent(f)
1879 repo.dirstate.otherparent(f)
1866 else:
1880 else:
1867 repo.dirstate.normal(f)
1881 parentfiledata = getfiledata[f] if getfiledata else None
1882 repo.dirstate.normal(f, parentfiledata=parentfiledata)
1868
1883
1869 # merge
1884 # merge
1870 for f, args, msg in actions.get(ACTION_MERGE, []):
1885 for f, args, msg in actions.get(ACTION_MERGE, []):
@@ -1991,14 +2006,10 b' def update(repo, node, branchmerge, forc'
1991 wc = repo[None]
2006 wc = repo[None]
1992 pl = wc.parents()
2007 pl = wc.parents()
1993 p1 = pl[0]
2008 p1 = pl[0]
1994 pas = [None]
2009 p2 = repo[node]
1995 if ancestor is not None:
2010 if ancestor is not None:
1996 pas = [repo[ancestor]]
2011 pas = [repo[ancestor]]
1997
2012 else:
1998 overwrite = force and not branchmerge
1999
2000 p2 = repo[node]
2001 if pas[0] is None:
2002 if repo.ui.configlist('merge', 'preferancestor') == ['*']:
2013 if repo.ui.configlist('merge', 'preferancestor') == ['*']:
2003 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
2014 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
2004 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
2015 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
@@ -2007,6 +2018,7 b' def update(repo, node, branchmerge, forc'
2007
2018
2008 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
2019 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
2009
2020
2021 overwrite = force and not branchmerge
2010 ### check phase
2022 ### check phase
2011 if not overwrite:
2023 if not overwrite:
2012 if len(pl) > 1:
2024 if len(pl) > 1:
@@ -2183,12 +2195,15 b' def update(repo, node, branchmerge, forc'
2183 'fsmonitor enabled; enable fsmonitor to improve performance; '
2195 'fsmonitor enabled; enable fsmonitor to improve performance; '
2184 'see "hg help -e fsmonitor")\n'))
2196 'see "hg help -e fsmonitor")\n'))
2185
2197
2186 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
2198 updatedirstate = not partial and not wc.isinmemory()
2199 wantfiledata = updatedirstate and not branchmerge
2200 stats, getfiledata = applyupdates(repo, actions, wc, p2, overwrite,
2201 wantfiledata, labels=labels)
2187
2202
2188 if not partial and not wc.isinmemory():
2203 if updatedirstate:
2189 with repo.dirstate.parentchange():
2204 with repo.dirstate.parentchange():
2190 repo.setparents(fp1, fp2)
2205 repo.setparents(fp1, fp2)
2191 recordupdates(repo, actions, branchmerge)
2206 recordupdates(repo, actions, branchmerge, getfiledata)
2192 # update completed, clear state
2207 # update completed, clear state
2193 util.unlink(repo.vfs.join('updatestate'))
2208 util.unlink(repo.vfs.join('updatestate'))
2194
2209
@@ -2219,7 +2234,7 b' def graft(repo, ctx, pctx, labels=None, '
2219 pctx - merge base, usually ctx.p1()
2234 pctx - merge base, usually ctx.p1()
2220 labels - merge labels eg ['local', 'graft']
2235 labels - merge labels eg ['local', 'graft']
2221 keepparent - keep second parent if any
2236 keepparent - keep second parent if any
2222 keepparent - if unresolved, keep parent used for the merge
2237 keepconflictparent - if unresolved, keep parent used for the merge
2223
2238
2224 """
2239 """
2225 # If we're grafting a descendant onto an ancestor, be sure to pass
2240 # If we're grafting a descendant onto an ancestor, be sure to pass
@@ -44,6 +44,9 b' def subsubsection(s):'
44 def subsubsubsection(s):
44 def subsubsubsection(s):
45 return "%s\n%s\n\n" % (s, "." * encoding.colwidth(s))
45 return "%s\n%s\n\n" % (s, "." * encoding.colwidth(s))
46
46
47 def subsubsubsubsection(s):
48 return "%s\n%s\n\n" % (s, "'" * encoding.colwidth(s))
49
47 def replace(text, substs):
50 def replace(text, substs):
48 '''
51 '''
49 Apply a list of (find, replace) pairs to a text.
52 Apply a list of (find, replace) pairs to a text.
@@ -7,14 +7,13 b''
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
11
12 from .i18n import _
10 from .i18n import _
13 from . import (
11 from . import (
14 error,
12 error,
15 match as matchmod,
13 match as matchmod,
16 merge,
14 merge,
17 repository,
15 repository,
16 scmutil,
18 sparse,
17 sparse,
19 util,
18 util,
20 )
19 )
@@ -144,15 +143,9 b' def parseconfig(ui, spec):'
144 return includepats, excludepats
143 return includepats, excludepats
145
144
146 def load(repo):
145 def load(repo):
147 try:
146 # Treat "narrowspec does not exist" the same as "narrowspec file exists
148 spec = repo.svfs.read(FILENAME)
147 # and is empty".
149 except IOError as e:
148 spec = repo.svfs.tryread(FILENAME)
150 # Treat "narrowspec does not exist" the same as "narrowspec file exists
151 # and is empty".
152 if e.errno == errno.ENOENT:
153 return set(), set()
154 raise
155
156 return parseconfig(repo.ui, spec)
149 return parseconfig(repo.ui, spec)
157
150
158 def save(repo, includepats, excludepats):
151 def save(repo, includepats, excludepats):
@@ -266,9 +259,12 b' def _writeaddedfiles(repo, pctx, files):'
266 if not repo.wvfs.exists(f):
259 if not repo.wvfs.exists(f):
267 addgaction((f, (mf.flags(f), False), "narrowspec updated"))
260 addgaction((f, (mf.flags(f), False), "narrowspec updated"))
268 merge.applyupdates(repo, actions, wctx=repo[None],
261 merge.applyupdates(repo, actions, wctx=repo[None],
269 mctx=repo['.'], overwrite=False)
262 mctx=repo['.'], overwrite=False, wantfiledata=False)
270
263
271 def checkworkingcopynarrowspec(repo):
264 def checkworkingcopynarrowspec(repo):
265 # Avoid infinite recursion when updating the working copy
266 if getattr(repo, '_updatingnarrowspec', False):
267 return
272 storespec = repo.svfs.tryread(FILENAME)
268 storespec = repo.svfs.tryread(FILENAME)
273 wcspec = repo.vfs.tryread(DIRSTATE_FILENAME)
269 wcspec = repo.vfs.tryread(DIRSTATE_FILENAME)
274 if wcspec != storespec:
270 if wcspec != storespec:
@@ -283,6 +279,7 b' def updateworkingcopy(repo, assumeclean='
283 """
279 """
284 oldspec = repo.vfs.tryread(DIRSTATE_FILENAME)
280 oldspec = repo.vfs.tryread(DIRSTATE_FILENAME)
285 newspec = repo.svfs.tryread(FILENAME)
281 newspec = repo.svfs.tryread(FILENAME)
282 repo._updatingnarrowspec = True
286
283
287 oldincludes, oldexcludes = parseconfig(repo.ui, oldspec)
284 oldincludes, oldexcludes = parseconfig(repo.ui, oldspec)
288 newincludes, newexcludes = parseconfig(repo.ui, newspec)
285 newincludes, newexcludes = parseconfig(repo.ui, newspec)
@@ -292,8 +289,8 b' def updateworkingcopy(repo, assumeclean='
292 removedmatch = matchmod.differencematcher(oldmatch, newmatch)
289 removedmatch = matchmod.differencematcher(oldmatch, newmatch)
293
290
294 ds = repo.dirstate
291 ds = repo.dirstate
295 lookup, status = ds.status(removedmatch, subrepos=[], ignored=False,
292 lookup, status = ds.status(removedmatch, subrepos=[], ignored=True,
296 clean=True, unknown=False)
293 clean=True, unknown=True)
297 trackeddirty = status.modified + status.added
294 trackeddirty = status.modified + status.added
298 clean = status.clean
295 clean = status.clean
299 if assumeclean:
296 if assumeclean:
@@ -302,15 +299,19 b' def updateworkingcopy(repo, assumeclean='
302 else:
299 else:
303 trackeddirty.extend(lookup)
300 trackeddirty.extend(lookup)
304 _deletecleanfiles(repo, clean)
301 _deletecleanfiles(repo, clean)
302 uipathfn = scmutil.getuipathfn(repo)
305 for f in sorted(trackeddirty):
303 for f in sorted(trackeddirty):
306 repo.ui.status(_('not deleting possibly dirty file %s\n') % f)
304 repo.ui.status(_('not deleting possibly dirty file %s\n') % uipathfn(f))
305 for f in sorted(status.unknown):
306 repo.ui.status(_('not deleting unknown file %s\n') % uipathfn(f))
307 for f in sorted(status.ignored):
308 repo.ui.status(_('not deleting ignored file %s\n') % uipathfn(f))
307 for f in clean + trackeddirty:
309 for f in clean + trackeddirty:
308 ds.drop(f)
310 ds.drop(f)
309
311
310 repo.narrowpats = newincludes, newexcludes
311 repo._narrowmatch = newmatch
312 pctx = repo['.']
312 pctx = repo['.']
313 newfiles = [f for f in pctx.manifest().walk(addedmatch) if f not in ds]
313 newfiles = [f for f in pctx.manifest().walk(addedmatch) if f not in ds]
314 for f in newfiles:
314 for f in newfiles:
315 ds.normallookup(f)
315 ds.normallookup(f)
316 _writeaddedfiles(repo, pctx, newfiles)
316 _writeaddedfiles(repo, pctx, newfiles)
317 repo._updatingnarrowspec = False
@@ -93,10 +93,6 b" parsers = policy.importmod(r'parsers')"
93 _calcsize = struct.calcsize
93 _calcsize = struct.calcsize
94 propertycache = util.propertycache
94 propertycache = util.propertycache
95
95
96 # the obsolete feature is not mature enough to be enabled by default.
97 # you have to rely on third party extension extension to enable this.
98 _enabled = False
99
100 # Options for obsolescence
96 # Options for obsolescence
101 createmarkersopt = 'createmarkers'
97 createmarkersopt = 'createmarkers'
102 allowunstableopt = 'allowunstable'
98 allowunstableopt = 'allowunstable'
@@ -124,11 +120,6 b' def _getoptionvalue(repo, option):'
124 if 'all' in result:
120 if 'all' in result:
125 return True
121 return True
126
122
127 # For migration purposes, temporarily return true if the config hasn't
128 # been set but _enabled is true.
129 if len(result) == 0 and _enabled:
130 return True
131
132 # Temporary hack for next check
123 # Temporary hack for next check
133 newconfig = repo.ui.config('experimental', 'evolution.createmarkers')
124 newconfig = repo.ui.config('experimental', 'evolution.createmarkers')
134 if newconfig:
125 if newconfig:
@@ -1089,7 +1089,9 b' def filterpatch(ui, headers, match, oper'
1089 return skipfile, skipfile, skipall, newpatches
1089 return skipfile, skipfile, skipall, newpatches
1090 while True:
1090 while True:
1091 resps = messages['help'][operation]
1091 resps = messages['help'][operation]
1092 r = ui.promptchoice("%s %s" % (query, resps))
1092 # IMPORTANT: keep the last line of this prompt short (<40 english
1093 # chars is a good target) because of issue6158.
1094 r = ui.promptchoice("%s\n(enter ? for help) %s" % (query, resps))
1093 ui.write("\n")
1095 ui.write("\n")
1094 if r == 8: # ?
1096 if r == 8: # ?
1095 for c, t in ui.extractchoices(resps)[1]:
1097 for c, t in ui.extractchoices(resps)[1]:
@@ -13,6 +13,9 b' import sys'
13 # Rules for how modules can be loaded. Values are:
13 # Rules for how modules can be loaded. Values are:
14 #
14 #
15 # c - require C extensions
15 # c - require C extensions
16 # rust+c - require Rust and C extensions
17 # rust+c-allow - allow Rust and C extensions with fallback to pure Python
18 # for each
16 # allow - allow pure Python implementation when C loading fails
19 # allow - allow pure Python implementation when C loading fails
17 # cffi - required cffi versions (implemented within pure module)
20 # cffi - required cffi versions (implemented within pure module)
18 # cffi-allow - allow pure Python implementation if cffi version is missing
21 # cffi-allow - allow pure Python implementation if cffi version is missing
@@ -29,6 +32,9 b" policy = b'allow'"
29 b'cffi': (r'cffi', None),
32 b'cffi': (r'cffi', None),
30 b'cffi-allow': (r'cffi', r'pure'),
33 b'cffi-allow': (r'cffi', r'pure'),
31 b'py': (None, r'pure'),
34 b'py': (None, r'pure'),
35 # For now, rust policies impact importrust only
36 b'rust+c': (r'cext', None),
37 b'rust+c-allow': (r'cext', r'pure'),
32 }
38 }
33
39
34 try:
40 try:
@@ -69,7 +75,7 b' def _importfrom(pkgname, modname):'
69 (r'cext', r'bdiff'): 3,
75 (r'cext', r'bdiff'): 3,
70 (r'cext', r'mpatch'): 1,
76 (r'cext', r'mpatch'): 1,
71 (r'cext', r'osutil'): 4,
77 (r'cext', r'osutil'): 4,
72 (r'cext', r'parsers'): 12,
78 (r'cext', r'parsers'): 13,
73 }
79 }
74
80
75 # map import request to other package or module
81 # map import request to other package or module
@@ -107,3 +113,34 b' def importmod(modname):'
107 raise
113 raise
108 pn, mn = _modredirects.get((purepkg, modname), (purepkg, modname))
114 pn, mn = _modredirects.get((purepkg, modname), (purepkg, modname))
109 return _importfrom(pn, mn)
115 return _importfrom(pn, mn)
116
117 def _isrustpermissive():
118 """Assuming the policy is a Rust one, tell if it's permissive."""
119 return policy.endswith(b'-allow')
120
121 def importrust(modname, member=None, default=None):
122 """Import Rust module according to policy and availability.
123
124 If policy isn't a Rust one, this returns `default`.
125
126 If either the module or its member is not available, this returns `default`
127 if policy is permissive and raises `ImportError` if not.
128 """
129 if not policy.startswith(b'rust'):
130 return default
131
132 try:
133 mod = _importfrom(r'rustext', modname)
134 except ImportError:
135 if _isrustpermissive():
136 return default
137 raise
138 if member is None:
139 return mod
140
141 try:
142 return getattr(mod, member)
143 except AttributeError:
144 if _isrustpermissive():
145 return default
146 raise ImportError(r"Cannot import name %s" % member)
@@ -147,6 +147,8 b' def statprofile(ui, fp):'
147 # inconsistent config: profiling.showmin
147 # inconsistent config: profiling.showmin
148 limit = ui.configwith(fraction, 'profiling', 'showmin', 0.05)
148 limit = ui.configwith(fraction, 'profiling', 'showmin', 0.05)
149 kwargs[r'limit'] = limit
149 kwargs[r'limit'] = limit
150 showtime = ui.configbool('profiling', 'showtime')
151 kwargs[r'showtime'] = showtime
150
152
151 statprof.display(fp, data=data, format=displayformat, **kwargs)
153 statprof.display(fp, data=data, format=displayformat, **kwargs)
152
154
@@ -5,7 +5,7 b''
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import, division
9
9
10 import ctypes
10 import ctypes
11 import ctypes.util
11 import ctypes.util
@@ -149,7 +149,7 b' if not pycompat.iswindows:'
149 cmsg.cmsg_type != _SCM_RIGHTS):
149 cmsg.cmsg_type != _SCM_RIGHTS):
150 return []
150 return []
151 rfds = ctypes.cast(cmsg.cmsg_data, ctypes.POINTER(ctypes.c_int))
151 rfds = ctypes.cast(cmsg.cmsg_data, ctypes.POINTER(ctypes.c_int))
152 rfdscount = ((cmsg.cmsg_len - _cmsghdr.cmsg_data.offset) /
152 rfdscount = ((cmsg.cmsg_len - _cmsghdr.cmsg_data.offset) //
153 ctypes.sizeof(ctypes.c_int))
153 ctypes.sizeof(ctypes.c_int))
154 return [rfds[i] for i in pycompat.xrange(rfdscount)]
154 return [rfds[i] for i in pycompat.xrange(rfdscount)]
155
155
@@ -64,8 +64,8 b' class _funcregistrarbase(object):'
64 raise error.ProgrammingError(msg)
64 raise error.ProgrammingError(msg)
65
65
66 if func.__doc__ and not util.safehasattr(func, '_origdoc'):
66 if func.__doc__ and not util.safehasattr(func, '_origdoc'):
67 doc = pycompat.sysbytes(func.__doc__).strip()
67 func._origdoc = func.__doc__.strip()
68 func._origdoc = doc
68 doc = pycompat.sysbytes(func._origdoc)
69 func.__doc__ = pycompat.sysstr(self._formatdoc(decl, doc))
69 func.__doc__ = pycompat.sysstr(self._formatdoc(decl, doc))
70
70
71 self._table[name] = func
71 self._table[name] = func
@@ -338,19 +338,10 b' class templatekeyword(_templateregistrar'
338 '''
338 '''
339 pass
339 pass
340
340
341 # old API (DEPRECATED)
342 @templatekeyword('mykeyword')
343 def mykeywordfunc(repo, ctx, templ, cache, revcache, **args):
344 '''Explanation of this template keyword ....
345 '''
346 pass
347
348 The first string argument is used also in online help.
341 The first string argument is used also in online help.
349
342
350 Optional argument 'requires' should be a collection of resource names
343 Optional argument 'requires' should be a collection of resource names
351 which the template keyword depends on. This also serves as a flag to
344 which the template keyword depends on.
352 switch to the new API. If 'requires' is unspecified, all template
353 keywords and resources are expanded to the function arguments.
354
345
355 'templatekeyword' instance in example above can be used to
346 'templatekeyword' instance in example above can be used to
356 decorate multiple functions.
347 decorate multiple functions.
@@ -362,7 +353,7 b' class templatekeyword(_templateregistrar'
362 Otherwise, explicit 'templatekw.loadkeyword()' is needed.
353 Otherwise, explicit 'templatekw.loadkeyword()' is needed.
363 """
354 """
364
355
365 def _extrasetup(self, name, func, requires=None):
356 def _extrasetup(self, name, func, requires=()):
366 func._requires = requires
357 func._requires = requires
367
358
368 class templatefilter(_templateregistrarbase):
359 class templatefilter(_templateregistrarbase):
@@ -279,7 +279,9 b' def _bookmarkmovements(repo, tostrip):'
279 if rev in tostrip:
279 if rev in tostrip:
280 updatebm.append(m)
280 updatebm.append(m)
281 newbmtarget = None
281 newbmtarget = None
282 if updatebm: # don't compute anything is there is no bookmark to move anyway
282 # If we need to move bookmarks, compute bookmark
283 # targets. Otherwise we can skip doing this logic.
284 if updatebm:
283 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)),
285 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)),
284 # but is much faster
286 # but is much faster
285 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
287 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
@@ -364,8 +366,9 b' def stripmanifest(repo, striprev, tr, fi'
364 striptrees(repo, tr, striprev, files)
366 striptrees(repo, tr, striprev, files)
365
367
366 def striptrees(repo, tr, striprev, files):
368 def striptrees(repo, tr, striprev, files):
367 if 'treemanifest' in repo.requirements: # safe but unnecessary
369 if 'treemanifest' in repo.requirements:
368 # otherwise
370 # This logic is safe if treemanifest isn't enabled, but also
371 # pointless, so we skip it if treemanifest isn't enabled.
369 for unencoded, encoded, size in repo.store.datafiles():
372 for unencoded, encoded, size in repo.store.datafiles():
370 if (unencoded.startswith('meta/') and
373 if (unencoded.startswith('meta/') and
371 unencoded.endswith('00manifest.i')):
374 unencoded.endswith('00manifest.i')):
@@ -416,7 +419,9 b' def rebuildfncache(ui, repo):'
416
419
417 progress.complete()
420 progress.complete()
418
421
419 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
422 if 'treemanifest' in repo.requirements:
423 # This logic is safe if treemanifest isn't enabled, but also
424 # pointless, so we skip it if treemanifest isn't enabled.
420 for dir in util.dirs(seenfiles):
425 for dir in util.dirs(seenfiles):
421 i = 'meta/%s/00manifest.i' % dir
426 i = 'meta/%s/00manifest.i' % dir
422 d = 'meta/%s/00manifest.d' % dir
427 d = 'meta/%s/00manifest.d' % dir
@@ -291,6 +291,10 b' class ipeercommandexecutor(interfaceutil'
291 class ipeerrequests(interfaceutil.Interface):
291 class ipeerrequests(interfaceutil.Interface):
292 """Interface for executing commands on a peer."""
292 """Interface for executing commands on a peer."""
293
293
294 limitedarguments = interfaceutil.Attribute(
295 """True if the peer cannot receive large argument value for commands."""
296 )
297
294 def commandexecutor():
298 def commandexecutor():
295 """A context manager that resolves to an ipeercommandexecutor.
299 """A context manager that resolves to an ipeercommandexecutor.
296
300
@@ -329,6 +333,8 b' class ipeerv2(ipeerconnection, ipeercapa'
329 class peer(object):
333 class peer(object):
330 """Base class for peer repositories."""
334 """Base class for peer repositories."""
331
335
336 limitedarguments = False
337
332 def capable(self, name):
338 def capable(self, name):
333 caps = self.capabilities()
339 caps = self.capabilities()
334 if name in caps:
340 if name in caps:
@@ -1650,7 +1656,7 b' class ilocalrepositorymain(interfaceutil'
1650 editor=False, extra=None):
1656 editor=False, extra=None):
1651 """Add a new revision to the repository."""
1657 """Add a new revision to the repository."""
1652
1658
1653 def commitctx(ctx, error=False):
1659 def commitctx(ctx, error=False, origctx=None):
1654 """Commit a commitctx instance to the repository."""
1660 """Commit a commitctx instance to the repository."""
1655
1661
1656 def destroying():
1662 def destroying():
@@ -17,6 +17,10 b' from . import ('
17 phases,
17 phases,
18 pycompat,
18 pycompat,
19 tags as tagsmod,
19 tags as tagsmod,
20 util,
21 )
22 from .utils import (
23 repoviewutil,
20 )
24 )
21
25
22 def hideablerevs(repo):
26 def hideablerevs(repo):
@@ -154,6 +158,35 b" filtertable = {'visible': computehidden,"
154 'immutable': computemutable,
158 'immutable': computemutable,
155 'base': computeimpactable}
159 'base': computeimpactable}
156
160
161 _basefiltername = list(filtertable)
162
163 def extrafilter(ui):
164 """initialize extra filter and return its id
165
166 If extra filtering is configured, we make sure the associated filtered view
167 are declared and return the associated id.
168 """
169 frevs = ui.config('experimental', 'extra-filter-revs')
170 if frevs is None:
171 return None
172
173 fid = pycompat.sysbytes(util.DIGESTS['sha1'](frevs).hexdigest())[:12]
174
175 combine = lambda fname: fname + '%' + fid
176
177 subsettable = repoviewutil.subsettable
178
179 if combine('base') not in filtertable:
180 for name in _basefiltername:
181 def extrafilteredrevs(repo, *args, **kwargs):
182 baserevs = filtertable[name](repo, *args, **kwargs)
183 extrarevs = frozenset(repo.revs(frevs))
184 return baserevs | extrarevs
185 filtertable[combine(name)] = extrafilteredrevs
186 if name in subsettable:
187 subsettable[combine(name)] = combine(subsettable[name])
188 return fid
189
157 def filterrevs(repo, filtername, visibilityexceptions=None):
190 def filterrevs(repo, filtername, visibilityexceptions=None):
158 """returns set of filtered revision for this filter name
191 """returns set of filtered revision for this filter name
159
192
@@ -16,6 +16,7 b' from __future__ import absolute_import'
16 import collections
16 import collections
17 import contextlib
17 import contextlib
18 import errno
18 import errno
19 import io
19 import os
20 import os
20 import struct
21 import struct
21 import zlib
22 import zlib
@@ -97,11 +98,8 b' REVIDX_KNOWN_FLAGS'
97 REVIDX_RAWTEXT_CHANGING_FLAGS
98 REVIDX_RAWTEXT_CHANGING_FLAGS
98
99
99 parsers = policy.importmod(r'parsers')
100 parsers = policy.importmod(r'parsers')
100 try:
101 rustancestor = policy.importrust(r'ancestor')
101 from . import rustext
102 rustdagop = policy.importrust(r'dagop')
102 rustext.__name__ # force actual import (see hgdemandimport)
103 except ImportError:
104 rustext = None
105
103
106 # Aliased for performance.
104 # Aliased for performance.
107 _zlibdecompress = zlib.decompress
105 _zlibdecompress = zlib.decompress
@@ -337,15 +335,21 b' class revlog(object):'
337 configured threshold.
335 configured threshold.
338
336
339 If censorable is True, the revlog can have censored revisions.
337 If censorable is True, the revlog can have censored revisions.
338
339 If `upperboundcomp` is not None, this is the expected maximal gain from
340 compression for the data content.
340 """
341 """
341 def __init__(self, opener, indexfile, datafile=None, checkambig=False,
342 def __init__(self, opener, indexfile, datafile=None, checkambig=False,
342 mmaplargeindex=False, censorable=False):
343 mmaplargeindex=False, censorable=False,
344 upperboundcomp=None):
343 """
345 """
344 create a revlog object
346 create a revlog object
345
347
346 opener is a function that abstracts the file opening operation
348 opener is a function that abstracts the file opening operation
347 and can be used to implement COW semantics or the like.
349 and can be used to implement COW semantics or the like.
350
348 """
351 """
352 self.upperboundcomp = upperboundcomp
349 self.indexfile = indexfile
353 self.indexfile = indexfile
350 self.datafile = datafile or (indexfile[:-2] + ".d")
354 self.datafile = datafile or (indexfile[:-2] + ".d")
351 self.opener = opener
355 self.opener = opener
@@ -825,8 +829,8 b' class revlog(object):'
825 checkrev(r)
829 checkrev(r)
826 # and we're sure ancestors aren't filtered as well
830 # and we're sure ancestors aren't filtered as well
827
831
828 if rustext is not None:
832 if rustancestor is not None:
829 lazyancestors = rustext.ancestor.LazyAncestors
833 lazyancestors = rustancestor.LazyAncestors
830 arg = self.index
834 arg = self.index
831 elif util.safehasattr(parsers, 'rustlazyancestors'):
835 elif util.safehasattr(parsers, 'rustlazyancestors'):
832 lazyancestors = ancestor.rustlazyancestors
836 lazyancestors = ancestor.rustlazyancestors
@@ -915,8 +919,8 b' class revlog(object):'
915 if common is None:
919 if common is None:
916 common = [nullrev]
920 common = [nullrev]
917
921
918 if rustext is not None:
922 if rustancestor is not None:
919 return rustext.ancestor.MissingAncestors(self.index, common)
923 return rustancestor.MissingAncestors(self.index, common)
920 return ancestor.incrementalmissingancestors(self.parentrevs, common)
924 return ancestor.incrementalmissingancestors(self.parentrevs, common)
921
925
922 def findmissingrevs(self, common=None, heads=None):
926 def findmissingrevs(self, common=None, heads=None):
@@ -1130,8 +1134,8 b' class revlog(object):'
1130 return self.index.headrevs()
1134 return self.index.headrevs()
1131 except AttributeError:
1135 except AttributeError:
1132 return self._headrevs()
1136 return self._headrevs()
1133 if rustext is not None:
1137 if rustdagop is not None:
1134 return rustext.dagop.headrevs(self.index, revs)
1138 return rustdagop.headrevs(self.index, revs)
1135 return dagop.headrevs(revs, self._uncheckedparentrevs)
1139 return dagop.headrevs(revs, self._uncheckedparentrevs)
1136
1140
1137 def computephases(self, roots):
1141 def computephases(self, roots):
@@ -1216,14 +1220,25 b' class revlog(object):'
1216 A revision is considered an ancestor of itself.
1220 A revision is considered an ancestor of itself.
1217
1221
1218 The implementation of this is trivial but the use of
1222 The implementation of this is trivial but the use of
1219 commonancestorsheads is not."""
1223 reachableroots is not."""
1220 if a == nullrev:
1224 if a == nullrev:
1221 return True
1225 return True
1222 elif a == b:
1226 elif a == b:
1223 return True
1227 return True
1224 elif a > b:
1228 elif a > b:
1225 return False
1229 return False
1226 return a in self._commonancestorsheads(a, b)
1230 return bool(self.reachableroots(a, [b], [a], includepath=False))
1231
1232 def reachableroots(self, minroot, heads, roots, includepath=False):
1233 """return (heads(::<roots> and <roots>::<heads>))
1234
1235 If includepath is True, return (<roots>::<heads>)."""
1236 try:
1237 return self.index.reachableroots2(minroot, heads, roots,
1238 includepath)
1239 except AttributeError:
1240 return dagop._reachablerootspure(self.parentrevs,
1241 minroot, roots, heads, includepath)
1227
1242
1228 def ancestor(self, a, b):
1243 def ancestor(self, a, b):
1229 """calculate the "best" common ancestor of nodes a and b"""
1244 """calculate the "best" common ancestor of nodes a and b"""
@@ -1340,13 +1355,13 b' class revlog(object):'
1340 """Find the shortest unambiguous prefix that matches node."""
1355 """Find the shortest unambiguous prefix that matches node."""
1341 def isvalid(prefix):
1356 def isvalid(prefix):
1342 try:
1357 try:
1343 node = self._partialmatch(prefix)
1358 matchednode = self._partialmatch(prefix)
1344 except error.AmbiguousPrefixLookupError:
1359 except error.AmbiguousPrefixLookupError:
1345 return False
1360 return False
1346 except error.WdirUnsupported:
1361 except error.WdirUnsupported:
1347 # single 'ff...' match
1362 # single 'ff...' match
1348 return True
1363 return True
1349 if node is None:
1364 if matchednode is None:
1350 raise error.LookupError(node, self.indexfile, _('no node'))
1365 raise error.LookupError(node, self.indexfile, _('no node'))
1351 return True
1366 return True
1352
1367
@@ -2292,7 +2307,7 b' class revlog(object):'
2292
2307
2293 try:
2308 try:
2294 with self._datafp() as f:
2309 with self._datafp() as f:
2295 f.seek(0, 2)
2310 f.seek(0, io.SEEK_END)
2296 actual = f.tell()
2311 actual = f.tell()
2297 dd = actual - expected
2312 dd = actual - expected
2298 except IOError as inst:
2313 except IOError as inst:
@@ -2302,7 +2317,7 b' class revlog(object):'
2302
2317
2303 try:
2318 try:
2304 f = self.opener(self.indexfile)
2319 f = self.opener(self.indexfile)
2305 f.seek(0, 2)
2320 f.seek(0, io.SEEK_END)
2306 actual = f.tell()
2321 actual = f.tell()
2307 f.close()
2322 f.close()
2308 s = self._io.size
2323 s = self._io.size
@@ -679,6 +679,31 b' def _candidategroups(revlog, textlen, p1'
679 # if chain already have too much data, skip base
679 # if chain already have too much data, skip base
680 if deltas_limit < chainsize:
680 if deltas_limit < chainsize:
681 continue
681 continue
682 if sparse and revlog.upperboundcomp is not None:
683 maxcomp = revlog.upperboundcomp
684 basenotsnap = (p1, p2, nullrev)
685 if rev not in basenotsnap and revlog.issnapshot(rev):
686 snapshotdepth = revlog.snapshotdepth(rev)
687 # If text is significantly larger than the base, we can
688 # expect the resulting delta to be proportional to the size
689 # difference
690 revsize = revlog.rawsize(rev)
691 rawsizedistance = max(textlen - revsize, 0)
692 # use an estimate of the compression upper bound.
693 lowestrealisticdeltalen = rawsizedistance // maxcomp
694
695 # check the absolute constraint on the delta size
696 snapshotlimit = textlen >> snapshotdepth
697 if snapshotlimit < lowestrealisticdeltalen:
698 # delta lower bound is larger than accepted upper bound
699 continue
700
701 # check the relative constraint on the delta size
702 revlength = revlog.length(rev)
703 if revlength < lowestrealisticdeltalen:
704 # delta probable lower bound is larger than target base
705 continue
706
682 group.append(rev)
707 group.append(rev)
683 if group:
708 if group:
684 # XXX: in the sparse revlog case, group can become large,
709 # XXX: in the sparse revlog case, group can become large,
@@ -907,6 +932,21 b' class deltacomputer(object):'
907
932
908 def _builddeltainfo(self, revinfo, base, fh):
933 def _builddeltainfo(self, revinfo, base, fh):
909 # can we use the cached delta?
934 # can we use the cached delta?
935 revlog = self.revlog
936 chainbase = revlog.chainbase(base)
937 if revlog._generaldelta:
938 deltabase = base
939 else:
940 deltabase = chainbase
941 snapshotdepth = None
942 if revlog._sparserevlog and deltabase == nullrev:
943 snapshotdepth = 0
944 elif revlog._sparserevlog and revlog.issnapshot(deltabase):
945 # A delta chain should always be one full snapshot,
946 # zero or more semi-snapshots, and zero or more deltas
947 p1, p2 = revlog.rev(revinfo.p1), revlog.rev(revinfo.p2)
948 if deltabase not in (p1, p2) and revlog.issnapshot(deltabase):
949 snapshotdepth = len(revlog._deltachain(deltabase)[0])
910 delta = None
950 delta = None
911 if revinfo.cachedelta:
951 if revinfo.cachedelta:
912 cachebase, cachediff = revinfo.cachedelta
952 cachebase, cachediff = revinfo.cachedelta
@@ -920,31 +960,22 b' class deltacomputer(object):'
920 delta = revinfo.cachedelta[1]
960 delta = revinfo.cachedelta[1]
921 if delta is None:
961 if delta is None:
922 delta = self._builddeltadiff(base, revinfo, fh)
962 delta = self._builddeltadiff(base, revinfo, fh)
923 revlog = self.revlog
963 # snapshotdept need to be neither None nor 0 level snapshot
964 if revlog.upperboundcomp is not None and snapshotdepth:
965 lowestrealisticdeltalen = len(delta) // revlog.upperboundcomp
966 snapshotlimit = revinfo.textlen >> snapshotdepth
967 if snapshotlimit < lowestrealisticdeltalen:
968 return None
969 if revlog.length(base) < lowestrealisticdeltalen:
970 return None
924 header, data = revlog.compress(delta)
971 header, data = revlog.compress(delta)
925 deltalen = len(header) + len(data)
972 deltalen = len(header) + len(data)
926 chainbase = revlog.chainbase(base)
927 offset = revlog.end(len(revlog) - 1)
973 offset = revlog.end(len(revlog) - 1)
928 dist = deltalen + offset - revlog.start(chainbase)
974 dist = deltalen + offset - revlog.start(chainbase)
929 if revlog._generaldelta:
930 deltabase = base
931 else:
932 deltabase = chainbase
933 chainlen, compresseddeltalen = revlog._chaininfo(base)
975 chainlen, compresseddeltalen = revlog._chaininfo(base)
934 chainlen += 1
976 chainlen += 1
935 compresseddeltalen += deltalen
977 compresseddeltalen += deltalen
936
978
937 revlog = self.revlog
938 snapshotdepth = None
939 if deltabase == nullrev:
940 snapshotdepth = 0
941 elif revlog._sparserevlog and revlog.issnapshot(deltabase):
942 # A delta chain should always be one full snapshot,
943 # zero or more semi-snapshots, and zero or more deltas
944 p1, p2 = revlog.rev(revinfo.p1), revlog.rev(revinfo.p2)
945 if deltabase not in (p1, p2) and revlog.issnapshot(deltabase):
946 snapshotdepth = len(revlog._deltachain(deltabase)[0])
947
948 return _deltainfo(dist, deltalen, (header, data), deltabase,
979 return _deltainfo(dist, deltalen, (header, data), deltabase,
949 chainbase, chainlen, compresseddeltalen,
980 chainbase, chainlen, compresseddeltalen,
950 snapshotdepth)
981 snapshotdepth)
@@ -1002,8 +1033,9 b' class deltacomputer(object):'
1002 nominateddeltas.append(deltainfo)
1033 nominateddeltas.append(deltainfo)
1003 for candidaterev in candidaterevs:
1034 for candidaterev in candidaterevs:
1004 candidatedelta = self._builddeltainfo(revinfo, candidaterev, fh)
1035 candidatedelta = self._builddeltainfo(revinfo, candidaterev, fh)
1005 if isgooddeltainfo(self.revlog, candidatedelta, revinfo):
1036 if candidatedelta is not None:
1006 nominateddeltas.append(candidatedelta)
1037 if isgooddeltainfo(self.revlog, candidatedelta, revinfo):
1038 nominateddeltas.append(candidatedelta)
1007 if nominateddeltas:
1039 if nominateddeltas:
1008 deltainfo = min(nominateddeltas, key=lambda x: x.deltalen)
1040 deltainfo = min(nominateddeltas, key=lambda x: x.deltalen)
1009 if deltainfo is not None:
1041 if deltainfo is not None:
@@ -52,6 +52,9 b' generatorset = smartset.generatorset'
52 spanset = smartset.spanset
52 spanset = smartset.spanset
53 fullreposet = smartset.fullreposet
53 fullreposet = smartset.fullreposet
54
54
55 # revisions not included in all(), but populated if specified
56 _virtualrevs = (node.nullrev, node.wdirrev)
57
55 # Constants for ordering requirement, used in getset():
58 # Constants for ordering requirement, used in getset():
56 #
59 #
57 # If 'define', any nested functions and operations MAY change the ordering of
60 # If 'define', any nested functions and operations MAY change the ordering of
@@ -120,8 +123,7 b' def stringset(repo, subset, x, order):'
120 if not x:
123 if not x:
121 raise error.ParseError(_("empty string is not a valid revision"))
124 raise error.ParseError(_("empty string is not a valid revision"))
122 x = scmutil.intrev(scmutil.revsymbol(repo, x))
125 x = scmutil.intrev(scmutil.revsymbol(repo, x))
123 if (x in subset
126 if x in subset or x in _virtualrevs and isinstance(subset, fullreposet):
124 or x == node.nullrev and isinstance(subset, fullreposet)):
125 return baseset([x])
127 return baseset([x])
126 return baseset()
128 return baseset()
127
129
@@ -1359,8 +1361,13 b' def merge(repo, subset, x):'
1359 # i18n: "merge" is a keyword
1361 # i18n: "merge" is a keyword
1360 getargs(x, 0, 0, _("merge takes no arguments"))
1362 getargs(x, 0, 0, _("merge takes no arguments"))
1361 cl = repo.changelog
1363 cl = repo.changelog
1362 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1364 nullrev = node.nullrev
1363 condrepr='<merge>')
1365 def ismerge(r):
1366 try:
1367 return cl.parentrevs(r)[1] != nullrev
1368 except error.WdirUnsupported:
1369 return bool(repo[r].p2())
1370 return subset.filter(ismerge, condrepr='<merge>')
1364
1371
1365 @predicate('branchpoint()', safe=True)
1372 @predicate('branchpoint()', safe=True)
1366 def branchpoint(repo, subset, x):
1373 def branchpoint(repo, subset, x):
@@ -1847,7 +1854,7 b' def rev(repo, subset, x):'
1847 except (TypeError, ValueError):
1854 except (TypeError, ValueError):
1848 # i18n: "rev" is a keyword
1855 # i18n: "rev" is a keyword
1849 raise error.ParseError(_("rev expects a number"))
1856 raise error.ParseError(_("rev expects a number"))
1850 if l not in repo.changelog and l not in (node.nullrev, node.wdirrev):
1857 if l not in repo.changelog and l not in _virtualrevs:
1851 return baseset()
1858 return baseset()
1852 return subset & baseset([l])
1859 return subset & baseset([l])
1853
1860
@@ -2262,7 +2269,7 b' def _orderedlist(repo, subset, x):'
2262 if r in seen:
2269 if r in seen:
2263 continue
2270 continue
2264 if (r in subset
2271 if (r in subset
2265 or r == node.nullrev and isinstance(subset, fullreposet)):
2272 or r in _virtualrevs and isinstance(subset, fullreposet)):
2266 ls.append(r)
2273 ls.append(r)
2267 seen.add(r)
2274 seen.add(r)
2268 return baseset(ls)
2275 return baseset(ls)
@@ -1247,6 +1247,28 b' def getrenamedfn(repo, endrev=None):'
1247
1247
1248 return getrenamed
1248 return getrenamed
1249
1249
1250 def getcopiesfn(repo, endrev=None):
1251 if copiesmod.usechangesetcentricalgo(repo):
1252 def copiesfn(ctx):
1253 if ctx.p2copies():
1254 allcopies = ctx.p1copies().copy()
1255 # There should be no overlap
1256 allcopies.update(ctx.p2copies())
1257 return sorted(allcopies.items())
1258 else:
1259 return sorted(ctx.p1copies().items())
1260 else:
1261 getrenamed = getrenamedfn(repo, endrev)
1262 def copiesfn(ctx):
1263 copies = []
1264 for fn in ctx.files():
1265 rename = getrenamed(fn, ctx.rev())
1266 if rename:
1267 copies.append((fn, rename))
1268 return copies
1269
1270 return copiesfn
1271
1250 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1272 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1251 """Update the dirstate to reflect the intent of copying src to dst. For
1273 """Update the dirstate to reflect the intent of copying src to dst. For
1252 different reasons it might not end with dst being marked as copied from src.
1274 different reasons it might not end with dst being marked as copied from src.
@@ -1519,7 +1541,12 b' def extdatasource(repo, source):'
1519 pass # we ignore data for nodes that don't exist locally
1541 pass # we ignore data for nodes that don't exist locally
1520 finally:
1542 finally:
1521 if proc:
1543 if proc:
1522 proc.communicate()
1544 try:
1545 proc.communicate()
1546 except ValueError:
1547 # This happens if we started iterating src and then
1548 # get a parse error on a line. It should be safe to ignore.
1549 pass
1523 if src:
1550 if src:
1524 src.close()
1551 src.close()
1525 if proc and proc.returncode != 0:
1552 if proc and proc.returncode != 0:
@@ -110,22 +110,23 b' class partialdiscovery(object):'
110 (all tracked revisions are known locally)
110 (all tracked revisions are known locally)
111 """
111 """
112
112
113 def __init__(self, repo, targetheads):
113 def __init__(self, repo, targetheads, respectsize):
114 self._repo = repo
114 self._repo = repo
115 self._targetheads = targetheads
115 self._targetheads = targetheads
116 self._common = repo.changelog.incrementalmissingrevs()
116 self._common = repo.changelog.incrementalmissingrevs()
117 self._undecided = None
117 self._undecided = None
118 self.missing = set()
118 self.missing = set()
119 self._childrenmap = None
119 self._childrenmap = None
120 self._respectsize = respectsize
120
121
121 def addcommons(self, commons):
122 def addcommons(self, commons):
122 """registrer nodes known as common"""
123 """register nodes known as common"""
123 self._common.addbases(commons)
124 self._common.addbases(commons)
124 if self._undecided is not None:
125 if self._undecided is not None:
125 self._common.removeancestorsfrom(self._undecided)
126 self._common.removeancestorsfrom(self._undecided)
126
127
127 def addmissings(self, missings):
128 def addmissings(self, missings):
128 """registrer some nodes as missing"""
129 """register some nodes as missing"""
129 newmissing = self._repo.revs('%ld::%ld', missings, self.undecided)
130 newmissing = self._repo.revs('%ld::%ld', missings, self.undecided)
130 if newmissing:
131 if newmissing:
131 self.missing.update(newmissing)
132 self.missing.update(newmissing)
@@ -241,11 +242,13 b' class partialdiscovery(object):'
241
242
242 # update from roots
243 # update from roots
243 revsroots = set(repo.revs('roots(%ld)', revs))
244 revsroots = set(repo.revs('roots(%ld)', revs))
244
245 childrenrevs = self._childrengetter()
245 childrenrevs = self._childrengetter()
246
247 _updatesample(revs, revsroots, sample, childrenrevs)
246 _updatesample(revs, revsroots, sample, childrenrevs)
248 assert sample
247 assert sample
248
249 if not self._respectsize:
250 size = max(size, min(len(revsroots), len(revsheads)))
251
249 sample = _limitsample(sample, size)
252 sample = _limitsample(sample, size)
250 if len(sample) < size:
253 if len(sample) < size:
251 more = size - len(sample)
254 more = size - len(sample)
@@ -256,7 +259,8 b' def findcommonheads(ui, local, remote,'
256 initialsamplesize=100,
259 initialsamplesize=100,
257 fullsamplesize=200,
260 fullsamplesize=200,
258 abortwhenunrelated=True,
261 abortwhenunrelated=True,
259 ancestorsof=None):
262 ancestorsof=None,
263 samplegrowth=1.05):
260 '''Return a tuple (common, anyincoming, remoteheads) used to identify
264 '''Return a tuple (common, anyincoming, remoteheads) used to identify
261 missing nodes from or in remote.
265 missing nodes from or in remote.
262 '''
266 '''
@@ -275,9 +279,63 b' def findcommonheads(ui, local, remote,'
275 # early exit if we know all the specified remote heads already
279 # early exit if we know all the specified remote heads already
276 ui.debug("query 1; heads\n")
280 ui.debug("query 1; heads\n")
277 roundtrips += 1
281 roundtrips += 1
278 sample = _limitsample(ownheads, initialsamplesize)
282 # We also ask remote about all the local heads. That set can be arbitrarily
279 # indices between sample and externalized version must match
283 # large, so we used to limit it size to `initialsamplesize`. We no longer
280 sample = list(sample)
284 # do as it proved counter productive. The skipped heads could lead to a
285 # large "undecided" set, slower to be clarified than if we asked the
286 # question for all heads right away.
287 #
288 # We are already fetching all server heads using the `heads` commands,
289 # sending a equivalent number of heads the other way should not have a
290 # significant impact. In addition, it is very likely that we are going to
291 # have to issue "known" request for an equivalent amount of revisions in
292 # order to decide if theses heads are common or missing.
293 #
294 # find a detailled analysis below.
295 #
296 # Case A: local and server both has few heads
297 #
298 # Ownheads is below initialsamplesize, limit would not have any effect.
299 #
300 # Case B: local has few heads and server has many
301 #
302 # Ownheads is below initialsamplesize, limit would not have any effect.
303 #
304 # Case C: local and server both has many heads
305 #
306 # We now transfert some more data, but not significantly more than is
307 # already transfered to carry the server heads.
308 #
309 # Case D: local has many heads, server has few
310 #
311 # D.1 local heads are mostly known remotely
312 #
313 # All the known head will have be part of a `known` request at some
314 # point for the discovery to finish. Sending them all earlier is
315 # actually helping.
316 #
317 # (This case is fairly unlikely, it requires the numerous heads to all
318 # be merged server side in only a few heads)
319 #
320 # D.2 local heads are mostly missing remotely
321 #
322 # To determine that the heads are missing, we'll have to issue `known`
323 # request for them or one of their ancestors. This amount of `known`
324 # request will likely be in the same order of magnitude than the amount
325 # of local heads.
326 #
327 # The only case where we can be more efficient using `known` request on
328 # ancestors are case were all the "missing" local heads are based on a
329 # few changeset, also "missing". This means we would have a "complex"
330 # graph (with many heads) attached to, but very independant to a the
331 # "simple" graph on the server. This is a fairly usual case and have
332 # not been met in the wild so far.
333 if remote.limitedarguments:
334 sample = _limitsample(ownheads, initialsamplesize)
335 # indices between sample and externalized version must match
336 sample = list(sample)
337 else:
338 sample = ownheads
281
339
282 with remote.commandexecutor() as e:
340 with remote.commandexecutor() as e:
283 fheads = e.callcommand('heads', {})
341 fheads = e.callcommand('heads', {})
@@ -318,7 +376,7 b' def findcommonheads(ui, local, remote,'
318
376
319 # full blown discovery
377 # full blown discovery
320
378
321 disco = partialdiscovery(local, ownheads)
379 disco = partialdiscovery(local, ownheads, remote.limitedarguments)
322 # treat remote heads (and maybe own heads) as a first implicit sample
380 # treat remote heads (and maybe own heads) as a first implicit sample
323 # response
381 # response
324 disco.addcommons(knownsrvheads)
382 disco.addcommons(knownsrvheads)
@@ -335,6 +393,8 b' def findcommonheads(ui, local, remote,'
335 ui.debug("taking initial sample\n")
393 ui.debug("taking initial sample\n")
336 samplefunc = disco.takefullsample
394 samplefunc = disco.takefullsample
337 targetsize = fullsamplesize
395 targetsize = fullsamplesize
396 if not remote.limitedarguments:
397 fullsamplesize = int(fullsamplesize * samplegrowth)
338 else:
398 else:
339 # use even cheaper initial sample
399 # use even cheaper initial sample
340 ui.debug("taking quick initial sample\n")
400 ui.debug("taking quick initial sample\n")
@@ -27,8 +27,8 b' import errno'
27 import itertools
27 import itertools
28 import stat
28 import stat
29
29
30 from mercurial.i18n import _
30 from .i18n import _
31 from mercurial import (
31 from . import (
32 bookmarks,
32 bookmarks,
33 bundle2,
33 bundle2,
34 bundlerepo,
34 bundlerepo,
@@ -45,37 +45,17 b' from mercurial import ('
45 patch,
45 patch,
46 phases,
46 phases,
47 pycompat,
47 pycompat,
48 registrar,
49 repair,
48 repair,
50 scmutil,
49 scmutil,
51 templatefilters,
50 templatefilters,
52 util,
51 util,
53 vfs as vfsmod,
52 vfs as vfsmod,
54 )
53 )
55
54 from .utils import (
56 from . import (
57 rebase,
58 )
59 from mercurial.utils import (
60 dateutil,
55 dateutil,
61 stringutil,
56 stringutil,
62 )
57 )
63
58
64 cmdtable = {}
65 command = registrar.command(cmdtable)
66 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
67 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
68 # be specifying the version(s) of Mercurial they are tested with, or
69 # leave the attribute unspecified.
70 testedwith = 'ships-with-hg-core'
71
72 configtable = {}
73 configitem = registrar.configitem(configtable)
74
75 configitem('shelve', 'maxbackups',
76 default=10,
77 )
78
79 backupdir = 'shelve-backup'
59 backupdir = 'shelve-backup'
80 shelvedir = 'shelved'
60 shelvedir = 'shelved'
81 shelvefileextensions = ['hg', 'patch', 'shelve']
61 shelvefileextensions = ['hg', 'patch', 'shelve']
@@ -451,8 +431,6 b' def createcmd(ui, repo, pats, opts):'
451 def _docreatecmd(ui, repo, pats, opts):
431 def _docreatecmd(ui, repo, pats, opts):
452 wctx = repo[None]
432 wctx = repo[None]
453 parents = wctx.parents()
433 parents = wctx.parents()
454 if len(parents) > 1:
455 raise error.Abort(_('cannot shelve while merging'))
456 parent = parents[0]
434 parent = parents[0]
457 origbranch = wctx.branch()
435 origbranch = wctx.branch()
458
436
@@ -646,7 +624,30 b' def checkparents(repo, state):'
646 raise error.Abort(_('working directory parents do not match unshelve '
624 raise error.Abort(_('working directory parents do not match unshelve '
647 'state'))
625 'state'))
648
626
649 def unshelveabort(ui, repo, state, opts):
627 def _loadshelvedstate(ui, repo, opts):
628 try:
629 state = shelvedstate.load(repo)
630 if opts.get('keep') is None:
631 opts['keep'] = state.keep
632 except IOError as err:
633 if err.errno != errno.ENOENT:
634 raise
635 cmdutil.wrongtooltocontinue(repo, _('unshelve'))
636 except error.CorruptedState as err:
637 ui.debug(pycompat.bytestr(err) + '\n')
638 if opts.get('continue'):
639 msg = _('corrupted shelved state file')
640 hint = _('please run hg unshelve --abort to abort unshelve '
641 'operation')
642 raise error.Abort(msg, hint=hint)
643 elif opts.get('abort'):
644 shelvedstate.clear(repo)
645 raise error.Abort(_('could not read shelved state file, your '
646 'working copy may be in an unexpected state\n'
647 'please update to some commit\n'))
648 return state
649
650 def unshelveabort(ui, repo, state):
650 """subcommand that abort an in-progress unshelve"""
651 """subcommand that abort an in-progress unshelve"""
651 with repo.lock():
652 with repo.lock():
652 try:
653 try:
@@ -656,11 +657,6 b' def unshelveabort(ui, repo, state, opts)'
656 if (state.activebookmark
657 if (state.activebookmark
657 and state.activebookmark in repo._bookmarks):
658 and state.activebookmark in repo._bookmarks):
658 bookmarks.activate(repo, state.activebookmark)
659 bookmarks.activate(repo, state.activebookmark)
659
660 if repo.vfs.exists('unshelverebasestate'):
661 repo.vfs.rename('unshelverebasestate', 'rebasestate')
662 rebase.clearstatus(repo)
663
664 mergefiles(ui, repo, state.wctx, state.pendingctx)
660 mergefiles(ui, repo, state.wctx, state.pendingctx)
665 if not phases.supportinternal(repo):
661 if not phases.supportinternal(repo):
666 repair.strip(ui, repo, state.nodestoremove, backup=False,
662 repair.strip(ui, repo, state.nodestoremove, backup=False,
@@ -669,6 +665,12 b' def unshelveabort(ui, repo, state, opts)'
669 shelvedstate.clear(repo)
665 shelvedstate.clear(repo)
670 ui.warn(_("unshelve of '%s' aborted\n") % state.name)
666 ui.warn(_("unshelve of '%s' aborted\n") % state.name)
671
667
668 def hgabortunshelve(ui, repo):
669 """logic to abort unshelve using 'hg abort"""
670 with repo.wlock():
671 state = _loadshelvedstate(ui, repo, {'abort' : True})
672 return unshelveabort(ui, repo, state)
673
672 def mergefiles(ui, repo, wctx, shelvectx):
674 def mergefiles(ui, repo, wctx, shelvectx):
673 """updates to wctx and merges the changes from shelvectx into the
675 """updates to wctx and merges the changes from shelvectx into the
674 dirstate."""
676 dirstate."""
@@ -692,11 +694,11 b' def unshelvecleanup(ui, repo, name, opts'
692 if shfile.exists():
694 if shfile.exists():
693 shfile.movetobackup()
695 shfile.movetobackup()
694 cleanupoldbackups(repo)
696 cleanupoldbackups(repo)
695
697 def unshelvecontinue(ui, repo, state, opts, basename=None):
696 def unshelvecontinue(ui, repo, state, opts):
697 """subcommand to continue an in-progress unshelve"""
698 """subcommand to continue an in-progress unshelve"""
698 # We're finishing off a merge. First parent is our original
699 # We're finishing off a merge. First parent is our original
699 # parent, second is the temporary "fake" commit we're unshelving.
700 # parent, second is the temporary "fake" commit we're unshelving.
701 interactive = opts.get('interactive')
700 with repo.lock():
702 with repo.lock():
701 checkparents(repo, state)
703 checkparents(repo, state)
702 ms = merge.mergestate.read(repo)
704 ms = merge.mergestate.read(repo)
@@ -719,10 +721,15 b' def unshelvecontinue(ui, repo, state, op'
719 with repo.ui.configoverride(overrides, 'unshelve'):
721 with repo.ui.configoverride(overrides, 'unshelve'):
720 with repo.dirstate.parentchange():
722 with repo.dirstate.parentchange():
721 repo.setparents(state.parents[0], nodemod.nullid)
723 repo.setparents(state.parents[0], nodemod.nullid)
722 newnode = repo.commit(text=shelvectx.description(),
724 if not interactive:
723 extra=shelvectx.extra(),
725 ispartialunshelve = False
724 user=shelvectx.user(),
726 newnode = repo.commit(text=shelvectx.description(),
725 date=shelvectx.date())
727 extra=shelvectx.extra(),
728 user=shelvectx.user(),
729 date=shelvectx.date())
730 else:
731 newnode, ispartialunshelve = _dounshelveinteractive(ui,
732 repo, shelvectx, basename, opts)
726
733
727 if newnode is None:
734 if newnode is None:
728 # If it ended up being a no-op commit, then the normal
735 # If it ended up being a no-op commit, then the normal
@@ -739,22 +746,24 b' def unshelvecontinue(ui, repo, state, op'
739 shelvectx = repo[newnode]
746 shelvectx = repo[newnode]
740
747
741 hg.updaterepo(repo, pendingctx.node(), overwrite=False)
748 hg.updaterepo(repo, pendingctx.node(), overwrite=False)
742
743 if repo.vfs.exists('unshelverebasestate'):
744 repo.vfs.rename('unshelverebasestate', 'rebasestate')
745 rebase.clearstatus(repo)
746
747 mergefiles(ui, repo, state.wctx, shelvectx)
749 mergefiles(ui, repo, state.wctx, shelvectx)
748 restorebranch(ui, repo, state.branchtorestore)
750 restorebranch(ui, repo, state.branchtorestore)
749
751
750 if not phases.supportinternal(repo):
752 if not ispartialunshelve:
751 repair.strip(ui, repo, state.nodestoremove, backup=False,
753 if not phases.supportinternal(repo):
752 topic='shelve')
754 repair.strip(ui, repo, state.nodestoremove, backup=False,
755 topic='shelve')
756 shelvedstate.clear(repo)
757 unshelvecleanup(ui, repo, state.name, opts)
753 _restoreactivebookmark(repo, state.activebookmark)
758 _restoreactivebookmark(repo, state.activebookmark)
754 shelvedstate.clear(repo)
755 unshelvecleanup(ui, repo, state.name, opts)
756 ui.status(_("unshelve of '%s' complete\n") % state.name)
759 ui.status(_("unshelve of '%s' complete\n") % state.name)
757
760
761 def hgcontinueunshelve(ui, repo):
762 """logic to resume unshelve using 'hg continue'"""
763 with repo.wlock():
764 state = _loadshelvedstate(ui, repo, {'continue' : True})
765 return unshelvecontinue(ui, repo, state, {'keep' : state.keep})
766
758 def _commitworkingcopychanges(ui, repo, opts, tmpwctx):
767 def _commitworkingcopychanges(ui, repo, opts, tmpwctx):
759 """Temporarily commit working copy changes before moving unshelve commit"""
768 """Temporarily commit working copy changes before moving unshelve commit"""
760 # Store pending changes in a commit and remember added in case a shelve
769 # Store pending changes in a commit and remember added in case a shelve
@@ -795,14 +804,40 b' def _unshelverestorecommit(ui, repo, tr,'
795
804
796 return repo, shelvectx
805 return repo, shelvectx
797
806
807 def _dounshelveinteractive(ui, repo, shelvectx, basename, opts):
808 """The user might want to unshelve certain changes only from the stored
809 shelve. So, we would create two commits. One with requested changes to
810 unshelve at that time and the latter is shelved for future.
811 """
812 opts['message'] = shelvectx.description()
813 opts['interactive-unshelve'] = True
814 pats = []
815 commitfunc = getcommitfunc(shelvectx.extra(), interactive=True,
816 editor=True)
817 newnode = cmdutil.dorecord(ui, repo, commitfunc, None, False,
818 cmdutil.recordfilter, *pats,
819 **pycompat.strkwargs(opts))
820 snode = repo.commit(text=shelvectx.description(),
821 extra=shelvectx.extra(),
822 user=shelvectx.user(),
823 date=shelvectx.date())
824 m = scmutil.matchfiles(repo, repo[snode].files())
825 if snode:
826 _shelvecreatedcommit(repo, snode, basename, m)
827
828 return newnode, bool(snode)
829
798 def _rebaserestoredcommit(ui, repo, opts, tr, oldtiprev, basename, pctx,
830 def _rebaserestoredcommit(ui, repo, opts, tr, oldtiprev, basename, pctx,
799 tmpwctx, shelvectx, branchtorestore,
831 tmpwctx, shelvectx, branchtorestore,
800 activebookmark):
832 activebookmark):
801 """Rebase restored commit from its original location to a destination"""
833 """Rebase restored commit from its original location to a destination"""
802 # If the shelve is not immediately on top of the commit
834 # If the shelve is not immediately on top of the commit
803 # we'll be merging with, rebase it to be on top.
835 # we'll be merging with, rebase it to be on top.
804 if tmpwctx.node() == shelvectx.p1().node():
836 interactive = opts.get('interactive')
805 return shelvectx
837 if tmpwctx.node() == shelvectx.p1().node() and not interactive:
838 # We won't skip on interactive mode because, the user might want to
839 # unshelve certain changes only.
840 return shelvectx, False
806
841
807 overrides = {
842 overrides = {
808 ('ui', 'forcemerge'): opts.get('tool', ''),
843 ('ui', 'forcemerge'): opts.get('tool', ''),
@@ -826,10 +861,15 b' def _rebaserestoredcommit(ui, repo, opts'
826
861
827 with repo.dirstate.parentchange():
862 with repo.dirstate.parentchange():
828 repo.setparents(tmpwctx.node(), nodemod.nullid)
863 repo.setparents(tmpwctx.node(), nodemod.nullid)
829 newnode = repo.commit(text=shelvectx.description(),
864 if not interactive:
830 extra=shelvectx.extra(),
865 ispartialunshelve = False
831 user=shelvectx.user(),
866 newnode = repo.commit(text=shelvectx.description(),
832 date=shelvectx.date())
867 extra=shelvectx.extra(),
868 user=shelvectx.user(),
869 date=shelvectx.date())
870 else:
871 newnode, ispartialunshelve = _dounshelveinteractive(ui, repo,
872 shelvectx, basename, opts)
833
873
834 if newnode is None:
874 if newnode is None:
835 # If it ended up being a no-op commit, then the normal
875 # If it ended up being a no-op commit, then the normal
@@ -844,7 +884,7 b' def _rebaserestoredcommit(ui, repo, opts'
844 shelvectx = repo[newnode]
884 shelvectx = repo[newnode]
845 hg.updaterepo(repo, tmpwctx.node(), False)
885 hg.updaterepo(repo, tmpwctx.node(), False)
846
886
847 return shelvectx
887 return shelvectx, ispartialunshelve
848
888
849 def _forgetunknownfiles(repo, shelvectx, addedbefore):
889 def _forgetunknownfiles(repo, shelvectx, addedbefore):
850 # Forget any files that were unknown before the shelve, unknown before
890 # Forget any files that were unknown before the shelve, unknown before
@@ -877,70 +917,18 b' def _checkunshelveuntrackedproblems(ui, '
877 hint = _("run hg status to see which files are missing")
917 hint = _("run hg status to see which files are missing")
878 raise error.Abort(m, hint=hint)
918 raise error.Abort(m, hint=hint)
879
919
880 @command('unshelve',
920 def dounshelve(ui, repo, *shelved, **opts):
881 [('a', 'abort', None,
882 _('abort an incomplete unshelve operation')),
883 ('c', 'continue', None,
884 _('continue an incomplete unshelve operation')),
885 ('k', 'keep', None,
886 _('keep shelve after unshelving')),
887 ('n', 'name', '',
888 _('restore shelved change with given name'), _('NAME')),
889 ('t', 'tool', '', _('specify merge tool')),
890 ('', 'date', '',
891 _('set date for temporary commits (DEPRECATED)'), _('DATE'))],
892 _('hg unshelve [[-n] SHELVED]'),
893 helpcategory=command.CATEGORY_WORKING_DIRECTORY)
894 def unshelve(ui, repo, *shelved, **opts):
895 """restore a shelved change to the working directory
896
897 This command accepts an optional name of a shelved change to
898 restore. If none is given, the most recent shelved change is used.
899
900 If a shelved change is applied successfully, the bundle that
901 contains the shelved changes is moved to a backup location
902 (.hg/shelve-backup).
903
904 Since you can restore a shelved change on top of an arbitrary
905 commit, it is possible that unshelving will result in a conflict
906 between your changes and the commits you are unshelving onto. If
907 this occurs, you must resolve the conflict, then use
908 ``--continue`` to complete the unshelve operation. (The bundle
909 will not be moved until you successfully complete the unshelve.)
910
911 (Alternatively, you can use ``--abort`` to abandon an unshelve
912 that causes a conflict. This reverts the unshelved changes, and
913 leaves the bundle in place.)
914
915 If bare shelved change(when no files are specified, without interactive,
916 include and exclude option) was done on newly created branch it would
917 restore branch information to the working directory.
918
919 After a successful unshelve, the shelved changes are stored in a
920 backup directory. Only the N most recent backups are kept. N
921 defaults to 10 but can be overridden using the ``shelve.maxbackups``
922 configuration option.
923
924 .. container:: verbose
925
926 Timestamp in seconds is used to decide order of backups. More
927 than ``maxbackups`` backups are kept, if same timestamp
928 prevents from deciding exact order of them, for safety.
929 """
930 with repo.wlock():
931 return _dounshelve(ui, repo, *shelved, **opts)
932
933 def _dounshelve(ui, repo, *shelved, **opts):
934 opts = pycompat.byteskwargs(opts)
921 opts = pycompat.byteskwargs(opts)
935 abortf = opts.get('abort')
922 abortf = opts.get('abort')
936 continuef = opts.get('continue')
923 continuef = opts.get('continue')
924 interactive = opts.get('interactive')
937 if not abortf and not continuef:
925 if not abortf and not continuef:
938 cmdutil.checkunfinished(repo)
926 cmdutil.checkunfinished(repo)
939 shelved = list(shelved)
927 shelved = list(shelved)
940 if opts.get("name"):
928 if opts.get("name"):
941 shelved.append(opts["name"])
929 shelved.append(opts["name"])
942
930
943 if abortf or continuef:
931 if abortf or continuef and not interactive:
944 if abortf and continuef:
932 if abortf and continuef:
945 raise error.Abort(_('cannot use both abort and continue'))
933 raise error.Abort(_('cannot use both abort and continue'))
946 if shelved:
934 if shelved:
@@ -949,49 +937,24 b' def _dounshelve(ui, repo, *shelved, **op'
949 if abortf and opts.get('tool', False):
937 if abortf and opts.get('tool', False):
950 ui.warn(_('tool option will be ignored\n'))
938 ui.warn(_('tool option will be ignored\n'))
951
939
952 try:
940 state = _loadshelvedstate(ui, repo, opts)
953 state = shelvedstate.load(repo)
954 if opts.get('keep') is None:
955 opts['keep'] = state.keep
956 except IOError as err:
957 if err.errno != errno.ENOENT:
958 raise
959 cmdutil.wrongtooltocontinue(repo, _('unshelve'))
960 except error.CorruptedState as err:
961 ui.debug(pycompat.bytestr(err) + '\n')
962 if continuef:
963 msg = _('corrupted shelved state file')
964 hint = _('please run hg unshelve --abort to abort unshelve '
965 'operation')
966 raise error.Abort(msg, hint=hint)
967 elif abortf:
968 msg = _('could not read shelved state file, your working copy '
969 'may be in an unexpected state\nplease update to some '
970 'commit\n')
971 ui.warn(msg)
972 shelvedstate.clear(repo)
973 return
974
975 if abortf:
941 if abortf:
976 return unshelveabort(ui, repo, state, opts)
942 return unshelveabort(ui, repo, state)
977 elif continuef:
943 elif continuef:
978 return unshelvecontinue(ui, repo, state, opts)
944 return unshelvecontinue(ui, repo, state, opts)
979 elif len(shelved) > 1:
945 elif len(shelved) > 1:
980 raise error.Abort(_('can only unshelve one change at a time'))
946 raise error.Abort(_('can only unshelve one change at a time'))
981
982 # abort unshelve while merging (issue5123)
983 parents = repo[None].parents()
984 if len(parents) > 1:
985 raise error.Abort(_('cannot unshelve while merging'))
986
987 elif not shelved:
947 elif not shelved:
988 shelved = listshelves(repo)
948 shelved = listshelves(repo)
989 if not shelved:
949 if not shelved:
990 raise error.Abort(_('no shelved changes to apply!'))
950 raise error.Abort(_('no shelved changes to apply!'))
991 basename = util.split(shelved[0][1])[1]
951 basename = util.split(shelved[0][1])[1]
992 ui.status(_("unshelving change '%s'\n") % basename)
952 ui.status(_("unshelving change '%s'\n") % basename)
993 else:
953 elif shelved:
994 basename = shelved[0]
954 basename = shelved[0]
955 if continuef and interactive:
956 state = _loadshelvedstate(ui, repo, opts)
957 return unshelvecontinue(ui, repo, state, opts, basename)
995
958
996 if not shelvedfile(repo, basename, patchextension).exists():
959 if not shelvedfile(repo, basename, patchextension).exists():
997 raise error.Abort(_("shelved change '%s' not found") % basename)
960 raise error.Abort(_("shelved change '%s' not found") % basename)
@@ -1020,128 +983,20 b' def _dounshelve(ui, repo, *shelved, **op'
1020 if shelvectx.branch() != shelvectx.p1().branch():
983 if shelvectx.branch() != shelvectx.p1().branch():
1021 branchtorestore = shelvectx.branch()
984 branchtorestore = shelvectx.branch()
1022
985
1023 shelvectx = _rebaserestoredcommit(ui, repo, opts, tr, oldtiprev,
986 shelvectx, ispartialunshelve = _rebaserestoredcommit(ui, repo, opts,
1024 basename, pctx, tmpwctx,
987 tr, oldtiprev, basename, pctx, tmpwctx, shelvectx,
1025 shelvectx, branchtorestore,
988 branchtorestore, activebookmark)
1026 activebookmark)
1027 overrides = {('ui', 'forcemerge'): opts.get('tool', '')}
989 overrides = {('ui', 'forcemerge'): opts.get('tool', '')}
1028 with ui.configoverride(overrides, 'unshelve'):
990 with ui.configoverride(overrides, 'unshelve'):
1029 mergefiles(ui, repo, pctx, shelvectx)
991 mergefiles(ui, repo, pctx, shelvectx)
1030 restorebranch(ui, repo, branchtorestore)
992 restorebranch(ui, repo, branchtorestore)
1031 _forgetunknownfiles(repo, shelvectx, addedbefore)
993 if not ispartialunshelve:
994 _forgetunknownfiles(repo, shelvectx, addedbefore)
1032
995
1033 shelvedstate.clear(repo)
996 shelvedstate.clear(repo)
1034 _finishunshelve(repo, oldtiprev, tr, activebookmark)
997 _finishunshelve(repo, oldtiprev, tr, activebookmark)
1035 unshelvecleanup(ui, repo, basename, opts)
998 unshelvecleanup(ui, repo, basename, opts)
1036 finally:
999 finally:
1037 if tr:
1000 if tr:
1038 tr.release()
1001 tr.release()
1039 lockmod.release(lock)
1002 lockmod.release(lock)
1040
1041 @command('shelve',
1042 [('A', 'addremove', None,
1043 _('mark new/missing files as added/removed before shelving')),
1044 ('u', 'unknown', None,
1045 _('store unknown files in the shelve')),
1046 ('', 'cleanup', None,
1047 _('delete all shelved changes')),
1048 ('', 'date', '',
1049 _('shelve with the specified commit date'), _('DATE')),
1050 ('d', 'delete', None,
1051 _('delete the named shelved change(s)')),
1052 ('e', 'edit', False,
1053 _('invoke editor on commit messages')),
1054 ('k', 'keep', False,
1055 _('shelve, but keep changes in the working directory')),
1056 ('l', 'list', None,
1057 _('list current shelves')),
1058 ('m', 'message', '',
1059 _('use text as shelve message'), _('TEXT')),
1060 ('n', 'name', '',
1061 _('use the given name for the shelved commit'), _('NAME')),
1062 ('p', 'patch', None,
1063 _('output patches for changes (provide the names of the shelved '
1064 'changes as positional arguments)')),
1065 ('i', 'interactive', None,
1066 _('interactive mode, only works while creating a shelve')),
1067 ('', 'stat', None,
1068 _('output diffstat-style summary of changes (provide the names of '
1069 'the shelved changes as positional arguments)')
1070 )] + cmdutil.walkopts,
1071 _('hg shelve [OPTION]... [FILE]...'),
1072 helpcategory=command.CATEGORY_WORKING_DIRECTORY)
1073 def shelvecmd(ui, repo, *pats, **opts):
1074 '''save and set aside changes from the working directory
1075
1076 Shelving takes files that "hg status" reports as not clean, saves
1077 the modifications to a bundle (a shelved change), and reverts the
1078 files so that their state in the working directory becomes clean.
1079
1080 To restore these changes to the working directory, using "hg
1081 unshelve"; this will work even if you switch to a different
1082 commit.
1083
1084 When no files are specified, "hg shelve" saves all not-clean
1085 files. If specific files or directories are named, only changes to
1086 those files are shelved.
1087
1088 In bare shelve (when no files are specified, without interactive,
1089 include and exclude option), shelving remembers information if the
1090 working directory was on newly created branch, in other words working
1091 directory was on different branch than its first parent. In this
1092 situation unshelving restores branch information to the working directory.
1093
1094 Each shelved change has a name that makes it easier to find later.
1095 The name of a shelved change defaults to being based on the active
1096 bookmark, or if there is no active bookmark, the current named
1097 branch. To specify a different name, use ``--name``.
1098
1099 To see a list of existing shelved changes, use the ``--list``
1100 option. For each shelved change, this will print its name, age,
1101 and description; use ``--patch`` or ``--stat`` for more details.
1102
1103 To delete specific shelved changes, use ``--delete``. To delete
1104 all shelved changes, use ``--cleanup``.
1105 '''
1106 opts = pycompat.byteskwargs(opts)
1107 allowables = [
1108 ('addremove', {'create'}), # 'create' is pseudo action
1109 ('unknown', {'create'}),
1110 ('cleanup', {'cleanup'}),
1111 # ('date', {'create'}), # ignored for passing '--date "0 0"' in tests
1112 ('delete', {'delete'}),
1113 ('edit', {'create'}),
1114 ('keep', {'create'}),
1115 ('list', {'list'}),
1116 ('message', {'create'}),
1117 ('name', {'create'}),
1118 ('patch', {'patch', 'list'}),
1119 ('stat', {'stat', 'list'}),
1120 ]
1121 def checkopt(opt):
1122 if opts.get(opt):
1123 for i, allowable in allowables:
1124 if opts[i] and opt not in allowable:
1125 raise error.Abort(_("options '--%s' and '--%s' may not be "
1126 "used together") % (opt, i))
1127 return True
1128 if checkopt('cleanup'):
1129 if pats:
1130 raise error.Abort(_("cannot specify names when using '--cleanup'"))
1131 return cleanupcmd(ui, repo)
1132 elif checkopt('delete'):
1133 return deletecmd(ui, repo, pats)
1134 elif checkopt('list'):
1135 return listcmd(ui, repo, pats, opts)
1136 elif checkopt('patch') or checkopt('stat'):
1137 return patchcmds(ui, repo, pats, opts)
1138 else:
1139 return createcmd(ui, repo, pats, opts)
1140
1141 def extsetup(ui):
1142 cmdutil.unfinishedstates.append(
1143 [shelvedstate._filename, False, False,
1144 _('unshelve already in progress'),
1145 _("use 'hg unshelve --continue' or 'hg unshelve --abort'")])
1146 cmdutil.afterresolvedstates.append(
1147 [shelvedstate._filename, _('hg unshelve --continue')])
@@ -248,7 +248,8 b' def prunetemporaryincludes(repo):'
248
248
249 typeactions = mergemod.emptyactions()
249 typeactions = mergemod.emptyactions()
250 typeactions['r'] = actions
250 typeactions['r'] = actions
251 mergemod.applyupdates(repo, typeactions, repo[None], repo['.'], False)
251 mergemod.applyupdates(repo, typeactions, repo[None], repo['.'], False,
252 wantfiledata=False)
252
253
253 # Fix dirstate
254 # Fix dirstate
254 for file in dropped:
255 for file in dropped:
@@ -382,7 +383,7 b' def filterupdatesactions(repo, wctx, mct'
382 typeactions = mergemod.emptyactions()
383 typeactions = mergemod.emptyactions()
383 typeactions['g'] = actions
384 typeactions['g'] = actions
384 mergemod.applyupdates(repo, typeactions, repo[None], repo['.'],
385 mergemod.applyupdates(repo, typeactions, repo[None], repo['.'],
385 False)
386 False, wantfiledata=False)
386
387
387 dirstate = repo.dirstate
388 dirstate = repo.dirstate
388 for file, flags, msg in actions:
389 for file, flags, msg in actions:
@@ -486,7 +487,8 b' def refreshwdir(repo, origstatus, origsp'
486 for f, (m, args, msg) in actions.iteritems():
487 for f, (m, args, msg) in actions.iteritems():
487 typeactions[m].append((f, args, msg))
488 typeactions[m].append((f, args, msg))
488
489
489 mergemod.applyupdates(repo, typeactions, repo[None], repo['.'], False)
490 mergemod.applyupdates(repo, typeactions, repo[None], repo['.'], False,
491 wantfiledata=False)
490
492
491 # Fix dirstate
493 # Fix dirstate
492 for file in added:
494 for file in added:
@@ -16,6 +16,7 b' import ssl'
16
16
17 from .i18n import _
17 from .i18n import _
18 from . import (
18 from . import (
19 encoding,
19 error,
20 error,
20 node,
21 node,
21 pycompat,
22 pycompat,
@@ -348,6 +349,17 b' def wrapsocket(sock, keyfile, certfile, '
348 if not serverhostname:
349 if not serverhostname:
349 raise error.Abort(_('serverhostname argument is required'))
350 raise error.Abort(_('serverhostname argument is required'))
350
351
352 if b'SSLKEYLOGFILE' in encoding.environ:
353 try:
354 import sslkeylog
355 sslkeylog.set_keylog(pycompat.fsdecode(
356 encoding.environ[b'SSLKEYLOGFILE']))
357 ui.warn(
358 b'sslkeylog enabled by SSLKEYLOGFILE environment variable\n')
359 except ImportError:
360 ui.warn(b'sslkeylog module missing, '
361 b'but SSLKEYLOGFILE set in environment\n')
362
351 for f in (keyfile, certfile):
363 for f in (keyfile, certfile):
352 if f and not os.path.exists(f):
364 if f and not os.path.exists(f):
353 raise error.Abort(
365 raise error.Abort(
@@ -19,6 +19,8 b' the data.'
19
19
20 from __future__ import absolute_import
20 from __future__ import absolute_import
21
21
22 from .i18n import _
23
22 from . import (
24 from . import (
23 error,
25 error,
24 util,
26 util,
@@ -85,3 +87,134 b' class cmdstate(object):'
85 def exists(self):
87 def exists(self):
86 """check whether the state file exists or not"""
88 """check whether the state file exists or not"""
87 return self._repo.vfs.exists(self.fname)
89 return self._repo.vfs.exists(self.fname)
90
91 class _statecheck(object):
92 """a utility class that deals with multistep operations like graft,
93 histedit, bisect, update etc and check whether such commands
94 are in an unfinished conditition or not and return appropriate message
95 and hint.
96 It also has the ability to register and determine the states of any new
97 multistep operation or multistep command extension.
98 """
99
100 def __init__(self, opname, fname, clearable, allowcommit, reportonly,
101 continueflag, stopflag, cmdmsg, cmdhint, statushint,
102 abortfunc, continuefunc):
103 self._opname = opname
104 self._fname = fname
105 self._clearable = clearable
106 self._allowcommit = allowcommit
107 self._reportonly = reportonly
108 self._continueflag = continueflag
109 self._stopflag = stopflag
110 self._cmdmsg = cmdmsg
111 self._cmdhint = cmdhint
112 self._statushint = statushint
113 self.abortfunc = abortfunc
114 self.continuefunc = continuefunc
115
116 def statusmsg(self):
117 """returns the hint message corresponding to the command for
118 hg status --verbose
119 """
120 if not self._statushint:
121 hint = (_('To continue: hg %s --continue\n'
122 'To abort: hg %s --abort') % (self._opname,
123 self._opname))
124 if self._stopflag:
125 hint = hint + (_('\nTo stop: hg %s --stop') %
126 (self._opname))
127 return hint
128 return self._statushint
129
130 def hint(self):
131 """returns the hint message corresponding to an interrupted
132 operation
133 """
134 if not self._cmdhint:
135 return (_("use 'hg %s --continue' or 'hg %s --abort'") %
136 (self._opname, self._opname))
137 return self._cmdhint
138
139 def msg(self):
140 """returns the status message corresponding to the command"""
141 if not self._cmdmsg:
142 return _('%s in progress') % (self._opname)
143 return self._cmdmsg
144
145 def continuemsg(self):
146 """ returns appropriate continue message corresponding to command"""
147 return _('hg %s --continue') % (self._opname)
148
149 def isunfinished(self, repo):
150 """determines whether a multi-step operation is in progress
151 or not
152 """
153 if self._opname == 'merge':
154 return len(repo[None].parents()) > 1
155 else:
156 return repo.vfs.exists(self._fname)
157
158 # A list of statecheck objects for multistep operations like graft.
159 _unfinishedstates = []
160
161 def addunfinished(opname, fname, clearable=False, allowcommit=False,
162 reportonly=False, continueflag=False, stopflag=False,
163 cmdmsg="", cmdhint="", statushint="", abortfunc=None,
164 continuefunc=None):
165 """this registers a new command or operation to unfinishedstates
166 opname is the name the command or operation
167 fname is the file name in which data should be stored in .hg directory.
168 It is None for merge command.
169 clearable boolean determines whether or not interrupted states can be
170 cleared by running `hg update -C .` which in turn deletes the
171 state file.
172 allowcommit boolean decides whether commit is allowed during interrupted
173 state or not.
174 reportonly flag is used for operations like bisect where we just
175 need to detect the operation using 'hg status --verbose'
176 continueflag is a boolean determines whether or not a command supports
177 `--continue` option or not.
178 stopflag is a boolean that determines whether or not a command supports
179 --stop flag
180 cmdmsg is used to pass a different status message in case standard
181 message of the format "abort: cmdname in progress" is not desired.
182 cmdhint is used to pass a different hint message in case standard
183 message of the format "To continue: hg cmdname --continue
184 To abort: hg cmdname --abort" is not desired.
185 statushint is used to pass a different status message in case standard
186 message of the format ('To continue: hg cmdname --continue'
187 'To abort: hg cmdname --abort') is not desired
188 abortfunc stores the function required to abort an unfinished state.
189 continuefunc stores the function required to finish an interrupted
190 operation.
191 """
192 statecheckobj = _statecheck(opname, fname, clearable, allowcommit,
193 reportonly, continueflag, stopflag, cmdmsg,
194 cmdhint, statushint, abortfunc, continuefunc)
195 if opname == 'merge':
196 _unfinishedstates.append(statecheckobj)
197 else:
198 _unfinishedstates.insert(0, statecheckobj)
199
200 addunfinished(
201 'update', fname='updatestate', clearable=True,
202 cmdmsg=_('last update was interrupted'),
203 cmdhint=_("use 'hg update' to get a consistent checkout"),
204 statushint=_("To continue: hg update")
205 )
206 addunfinished(
207 'bisect', fname='bisect.state', allowcommit=True, reportonly=True,
208 statushint=_('To mark the changeset good: hg bisect --good\n'
209 'To mark the changeset bad: hg bisect --bad\n'
210 'To abort: hg bisect --reset\n')
211 )
212
213 def getrepostate(repo):
214 # experimental config: commands.status.skipstates
215 skip = set(repo.ui.configlist('commands', 'status.skipstates'))
216 for state in _unfinishedstates:
217 if state._opname in skip:
218 continue
219 if state.isunfinished(repo):
220 return (state._opname, state.statusmsg())
@@ -155,6 +155,7 b' class statichttprepository(localrepo.loc'
155
155
156 self.names = namespaces.namespaces()
156 self.names = namespaces.namespaces()
157 self.filtername = None
157 self.filtername = None
158 self._extrafilterid = None
158
159
159 try:
160 try:
160 requirements = set(self.vfs.read(b'requires').splitlines())
161 requirements = set(self.vfs.read(b'requires').splitlines())
@@ -678,6 +678,7 b' def display_hotpath(data, fp, limit=0.05'
678 for sample in data.samples:
678 for sample in data.samples:
679 root.add(sample.stack[::-1], sample.time - lasttime)
679 root.add(sample.stack[::-1], sample.time - lasttime)
680 lasttime = sample.time
680 lasttime = sample.time
681 showtime = kwargs.get(r'showtime', True)
681
682
682 def _write(node, depth, multiple_siblings):
683 def _write(node, depth, multiple_siblings):
683 site = node.site
684 site = node.site
@@ -695,7 +696,9 b' def display_hotpath(data, fp, limit=0.05'
695 # lots of string formatting
696 # lots of string formatting
696 listpattern = ''.ljust(indent) +\
697 listpattern = ''.ljust(indent) +\
697 ('\\' if multiple_siblings else '|') +\
698 ('\\' if multiple_siblings else '|') +\
698 ' %4.1f%% %s %s'
699 ' %4.1f%%' +\
700 (' %5.2fs' % node.count if showtime else '') +\
701 ' %s %s'
699 liststring = listpattern % (node.count / root.count * 100,
702 liststring = listpattern % (node.count / root.count * 100,
700 filename, function)
703 filename, function)
701 codepattern = '%' + ('%d' % (55 - len(liststring))) + 's %d: %s'
704 codepattern = '%' + ('%d' % (55 - len(liststring))) + 's %d: %s'
@@ -40,7 +40,7 b' def _matchtrackedpath(path, matcher):'
40 if path.startswith('data/'):
40 if path.startswith('data/'):
41 return matcher(path[len('data/'):-len('.i')])
41 return matcher(path[len('data/'):-len('.i')])
42 elif path.startswith('meta/'):
42 elif path.startswith('meta/'):
43 return matcher.visitdir(path[len('meta/'):-len('/00manifest.i')] or '.')
43 return matcher.visitdir(path[len('meta/'):-len('/00manifest.i')])
44
44
45 raise error.ProgrammingError("cannot decode path %s" % path)
45 raise error.ProgrammingError("cannot decode path %s" % path)
46
46
@@ -337,7 +337,7 b' def _calcmode(vfs):'
337 mode = None
337 mode = None
338 return mode
338 return mode
339
339
340 _data = ('narrowspec data meta 00manifest.d 00manifest.i'
340 _data = ('bookmarks narrowspec data meta 00manifest.d 00manifest.i'
341 ' 00changelog.d 00changelog.i phaseroots obsstore')
341 ' 00changelog.d 00changelog.i phaseroots obsstore')
342
342
343 def isrevlog(f, kind, st):
343 def isrevlog(f, kind, st):
@@ -612,7 +612,7 b' class fncachestore(basicstore):'
612 raise
612 raise
613
613
614 def copylist(self):
614 def copylist(self):
615 d = ('narrowspec data meta dh fncache phaseroots obsstore'
615 d = ('bookmarks narrowspec data meta dh fncache phaseroots obsstore'
616 ' 00manifest.d 00manifest.i 00changelog.d 00changelog.i')
616 ' 00manifest.d 00manifest.i 00changelog.d 00changelog.i')
617 return (['requires', '00changelog.i'] +
617 return (['requires', '00changelog.i'] +
618 ['store/' + f for f in d.split()])
618 ['store/' + f for f in d.split()])
@@ -88,13 +88,15 b' def annotatesubrepoerror(func):'
88 def _updateprompt(ui, sub, dirty, local, remote):
88 def _updateprompt(ui, sub, dirty, local, remote):
89 if dirty:
89 if dirty:
90 msg = (_(' subrepository sources for %s differ\n'
90 msg = (_(' subrepository sources for %s differ\n'
91 'use (l)ocal source (%s) or (r)emote source (%s)?'
91 'you can use (l)ocal source (%s) or (r)emote source (%s).\n'
92 'what do you want to do?'
92 '$$ &Local $$ &Remote')
93 '$$ &Local $$ &Remote')
93 % (subrelpath(sub), local, remote))
94 % (subrelpath(sub), local, remote))
94 else:
95 else:
95 msg = (_(' subrepository sources for %s differ (in checked out '
96 msg = (_(' subrepository sources for %s differ (in checked out '
96 'version)\n'
97 'version)\n'
97 'use (l)ocal source (%s) or (r)emote source (%s)?'
98 'you can use (l)ocal source (%s) or (r)emote source (%s).\n'
99 'what do you want to do?'
98 '$$ &Local $$ &Remote')
100 '$$ &Local $$ &Remote')
99 % (subrelpath(sub), local, remote))
101 % (subrelpath(sub), local, remote))
100 return ui.promptchoice(msg, 0)
102 return ui.promptchoice(msg, 0)
@@ -168,8 +168,9 b' def submerge(repo, wctx, mctx, actx, ove'
168 prompts['ro'] = r[0]
168 prompts['ro'] = r[0]
169 if repo.ui.promptchoice(
169 if repo.ui.promptchoice(
170 _(' subrepository sources for %(s)s differ\n'
170 _(' subrepository sources for %(s)s differ\n'
171 'use (l)ocal%(l)s source (%(lo)s)'
171 'you can use (l)ocal%(l)s source (%(lo)s)'
172 ' or (r)emote%(o)s source (%(ro)s)?'
172 ' or (r)emote%(o)s source (%(ro)s).\n'
173 'what do you want to do?'
173 '$$ &Local $$ &Remote') % prompts, 0):
174 '$$ &Local $$ &Remote') % prompts, 0):
174 debug(s, "prompt changed, get", r)
175 debug(s, "prompt changed, get", r)
175 wctx.sub(s).get(r, overwrite)
176 wctx.sub(s).get(r, overwrite)
@@ -186,7 +187,9 b' def submerge(repo, wctx, mctx, actx, ove'
186 option = repo.ui.promptchoice(
187 option = repo.ui.promptchoice(
187 _(' subrepository %(s)s diverged (local revision: %(sl)s, '
188 _(' subrepository %(s)s diverged (local revision: %(sl)s, '
188 'remote revision: %(sr)s)\n'
189 'remote revision: %(sr)s)\n'
189 '(M)erge, keep (l)ocal%(l)s or keep (r)emote%(o)s?'
190 'you can (m)erge, keep (l)ocal%(l)s or keep '
191 '(r)emote%(o)s.\n'
192 'what do you want to do?'
190 '$$ &Merge $$ &Local $$ &Remote')
193 '$$ &Merge $$ &Local $$ &Remote')
191 % prompts, 0)
194 % prompts, 0)
192 if option == 0:
195 if option == 0:
@@ -13,11 +13,13 b''
13 from __future__ import absolute_import
13 from __future__ import absolute_import
14
14
15 import errno
15 import errno
16 import io
16
17
17 from .node import (
18 from .node import (
18 bin,
19 bin,
19 hex,
20 hex,
20 nullid,
21 nullid,
22 nullrev,
21 short,
23 short,
22 )
24 )
23 from .i18n import _
25 from .i18n import _
@@ -89,7 +91,7 b' def fnoderevs(ui, repo, revs):'
89 unfi = repo.unfiltered()
91 unfi = repo.unfiltered()
90 tonode = unfi.changelog.node
92 tonode = unfi.changelog.node
91 nodes = [tonode(r) for r in revs]
93 nodes = [tonode(r) for r in revs]
92 fnodes = _getfnodes(ui, repo, nodes[::-1]) # reversed help the cache
94 fnodes = _getfnodes(ui, repo, nodes)
93 fnodes = _filterfnodes(fnodes, nodes)
95 fnodes = _filterfnodes(fnodes, nodes)
94 return fnodes
96 return fnodes
95
97
@@ -457,7 +459,8 b' def _readtagcache(ui, repo):'
457 # This is the most expensive part of finding tags, so performance
459 # This is the most expensive part of finding tags, so performance
458 # depends primarily on the size of newheads. Worst case: no cache
460 # depends primarily on the size of newheads. Worst case: no cache
459 # file, so newheads == repoheads.
461 # file, so newheads == repoheads.
460 cachefnode = _getfnodes(ui, repo, repoheads)
462 # Reversed order helps the cache ('repoheads' is in descending order)
463 cachefnode = _getfnodes(ui, repo, reversed(repoheads))
461
464
462 # Caller has to iterate over all heads, but can use the filenodes in
465 # Caller has to iterate over all heads, but can use the filenodes in
463 # cachefnode to get to each .hgtags revision quickly.
466 # cachefnode to get to each .hgtags revision quickly.
@@ -472,7 +475,7 b' def _getfnodes(ui, repo, nodes):'
472 starttime = util.timer()
475 starttime = util.timer()
473 fnodescache = hgtagsfnodescache(repo.unfiltered())
476 fnodescache = hgtagsfnodescache(repo.unfiltered())
474 cachefnode = {}
477 cachefnode = {}
475 for node in reversed(nodes):
478 for node in nodes:
476 fnode = fnodescache.getfnode(node)
479 fnode = fnodescache.getfnode(node)
477 if fnode != nullid:
480 if fnode != nullid:
478 cachefnode[node] = fnode
481 cachefnode[node] = fnode
@@ -560,7 +563,7 b' def _tag(repo, names, node, message, loc'
560 " branch name\n") % name)
563 " branch name\n") % name)
561
564
562 def writetags(fp, names, munge, prevtags):
565 def writetags(fp, names, munge, prevtags):
563 fp.seek(0, 2)
566 fp.seek(0, io.SEEK_END)
564 if prevtags and not prevtags.endswith('\n'):
567 if prevtags and not prevtags.endswith('\n'):
565 fp.write('\n')
568 fp.write('\n')
566 for name in names:
569 for name in names:
@@ -691,6 +694,9 b' class hgtagsfnodescache(object):'
691 If an .hgtags does not exist at the specified revision, nullid is
694 If an .hgtags does not exist at the specified revision, nullid is
692 returned.
695 returned.
693 """
696 """
697 if node == nullid:
698 return nullid
699
694 ctx = self._repo[node]
700 ctx = self._repo[node]
695 rev = ctx.rev()
701 rev = ctx.rev()
696
702
@@ -715,12 +721,33 b' class hgtagsfnodescache(object):'
715 if not computemissing:
721 if not computemissing:
716 return None
722 return None
717
723
718 # Populate missing entry.
724 fnode = None
719 try:
725 cl = self._repo.changelog
720 fnode = ctx.filenode('.hgtags')
726 p1rev, p2rev = cl._uncheckedparentrevs(rev)
721 except error.LookupError:
727 p1node = cl.node(p1rev)
722 # No .hgtags file on this revision.
728 p1fnode = self.getfnode(p1node, computemissing=False)
723 fnode = nullid
729 if p2rev != nullrev:
730 # There is some no-merge changeset where p1 is null and p2 is set
731 # Processing them as merge is just slower, but still gives a good
732 # result.
733 p2node = cl.node(p1rev)
734 p2fnode = self.getfnode(p2node, computemissing=False)
735 if p1fnode != p2fnode:
736 # we cannot rely on readfast because we don't know against what
737 # parent the readfast delta is computed
738 p1fnode = None
739 if p1fnode is not None:
740 mctx = ctx.manifestctx()
741 fnode = mctx.readfast().get('.hgtags')
742 if fnode is None:
743 fnode = p1fnode
744 if fnode is None:
745 # Populate missing entry.
746 try:
747 fnode = ctx.filenode('.hgtags')
748 except error.LookupError:
749 # No .hgtags file on this revision.
750 fnode = nullid
724
751
725 self._writeentry(offset, properprefix, fnode)
752 self._writeentry(offset, properprefix, fnode)
726 return fnode
753 return fnode
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: file copied from rust/hg-cpython/src/ancestors.rs to rust/hg-cpython/src/filepatterns.rs
NO CONTENT: file copied from rust/hg-cpython/src/ancestors.rs to rust/hg-cpython/src/filepatterns.rs
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: file copied from tests/test-copies.t to tests/test-copies-unrelated.t
NO CONTENT: file copied from tests/test-copies.t to tests/test-copies-unrelated.t
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: file copied from tests/test-share.t to tests/test-share-bookmarks.t
NO CONTENT: file copied from tests/test-share.t to tests/test-share-bookmarks.t
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
General Comments 0
You need to be logged in to leave comments. Login now