##// END OF EJS Templates
merge with stable
Yuya Nishihara -
r42912:863e9e7f merge default
parent child Browse files
Show More
@@ -1,549 +1,551 b''
1 1 # linux.py - Linux specific automation functionality
2 2 #
3 3 # Copyright 2019 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 # no-check-code because Python 3 native.
9 9
10 10 import os
11 11 import pathlib
12 12 import shlex
13 13 import subprocess
14 14 import tempfile
15 15
16 16 from .ssh import (
17 17 exec_command,
18 18 )
19 19
20 20
21 21 # Linux distributions that are supported.
22 22 DISTROS = {
23 23 'debian9',
24 24 'ubuntu18.04',
25 25 'ubuntu18.10',
26 26 'ubuntu19.04',
27 27 }
28 28
29 29 INSTALL_PYTHONS = r'''
30 30 PYENV2_VERSIONS="2.7.16 pypy2.7-7.1.1"
31 31 PYENV3_VERSIONS="3.5.7 3.6.8 3.7.3 3.8-dev pypy3.5-7.0.0 pypy3.6-7.1.1"
32 32
33 33 git clone https://github.com/pyenv/pyenv.git /hgdev/pyenv
34 34 pushd /hgdev/pyenv
35 35 git checkout 3faeda67bb33e07750d1a104271369a7384ca45c
36 36 popd
37 37
38 38 export PYENV_ROOT="/hgdev/pyenv"
39 39 export PATH="$PYENV_ROOT/bin:$PATH"
40 40
41 41 # pip 19.0.3.
42 42 PIP_SHA256=efe99298f3fbb1f56201ce6b81d2658067d2f7d7dfc2d412e0d3cacc9a397c61
43 43 wget -O get-pip.py --progress dot:mega https://github.com/pypa/get-pip/raw/fee32c376da1ff6496a798986d7939cd51e1644f/get-pip.py
44 44 echo "${PIP_SHA256} get-pip.py" | sha256sum --check -
45 45
46 46 VIRTUALENV_SHA256=984d7e607b0a5d1329425dd8845bd971b957424b5ba664729fab51ab8c11bc39
47 47 VIRTUALENV_TARBALL=virtualenv-16.4.3.tar.gz
48 48 wget -O ${VIRTUALENV_TARBALL} --progress dot:mega https://files.pythonhosted.org/packages/37/db/89d6b043b22052109da35416abc3c397655e4bd3cff031446ba02b9654fa/${VIRTUALENV_TARBALL}
49 49 echo "${VIRTUALENV_SHA256} ${VIRTUALENV_TARBALL}" | sha256sum --check -
50 50
51 51 for v in ${PYENV2_VERSIONS}; do
52 52 pyenv install -v ${v}
53 53 ${PYENV_ROOT}/versions/${v}/bin/python get-pip.py
54 54 ${PYENV_ROOT}/versions/${v}/bin/pip install ${VIRTUALENV_TARBALL}
55 55 ${PYENV_ROOT}/versions/${v}/bin/pip install -r /hgdev/requirements-py2.txt
56 56 done
57 57
58 58 for v in ${PYENV3_VERSIONS}; do
59 59 pyenv install -v ${v}
60 60 ${PYENV_ROOT}/versions/${v}/bin/python get-pip.py
61 61 ${PYENV_ROOT}/versions/${v}/bin/pip install -r /hgdev/requirements-py3.txt
62 62 done
63 63
64 64 pyenv global ${PYENV2_VERSIONS} ${PYENV3_VERSIONS} system
65 65 '''.lstrip().replace('\r\n', '\n')
66 66
67 67
68 68 BOOTSTRAP_VIRTUALENV = r'''
69 69 /usr/bin/virtualenv /hgdev/venv-bootstrap
70 70
71 71 HG_SHA256=1bdd21bb87d1e05fb5cd395d488d0e0cc2f2f90ce0fd248e31a03595da5ccb47
72 72 HG_TARBALL=mercurial-4.9.1.tar.gz
73 73
74 74 wget -O ${HG_TARBALL} --progress dot:mega https://www.mercurial-scm.org/release/${HG_TARBALL}
75 75 echo "${HG_SHA256} ${HG_TARBALL}" | sha256sum --check -
76 76
77 77 /hgdev/venv-bootstrap/bin/pip install ${HG_TARBALL}
78 78 '''.lstrip().replace('\r\n', '\n')
79 79
80 80
81 81 BOOTSTRAP_DEBIAN = r'''
82 82 #!/bin/bash
83 83
84 84 set -ex
85 85
86 86 DISTRO=`grep DISTRIB_ID /etc/lsb-release | awk -F= '{{print $2}}'`
87 87 DEBIAN_VERSION=`cat /etc/debian_version`
88 88 LSB_RELEASE=`lsb_release -cs`
89 89
90 90 sudo /usr/sbin/groupadd hg
91 91 sudo /usr/sbin/groupadd docker
92 92 sudo /usr/sbin/useradd -g hg -G sudo,docker -d /home/hg -m -s /bin/bash hg
93 93 sudo mkdir /home/hg/.ssh
94 94 sudo cp ~/.ssh/authorized_keys /home/hg/.ssh/authorized_keys
95 95 sudo chown -R hg:hg /home/hg/.ssh
96 96 sudo chmod 700 /home/hg/.ssh
97 97 sudo chmod 600 /home/hg/.ssh/authorized_keys
98 98
99 99 cat << EOF | sudo tee /etc/sudoers.d/90-hg
100 100 hg ALL=(ALL) NOPASSWD:ALL
101 101 EOF
102 102
103 103 sudo apt-get update
104 104 sudo DEBIAN_FRONTEND=noninteractive apt-get -yq dist-upgrade
105 105
106 106 # Install packages necessary to set up Docker Apt repo.
107 107 sudo DEBIAN_FRONTEND=noninteractive apt-get -yq install --no-install-recommends \
108 108 apt-transport-https \
109 109 gnupg
110 110
111 111 cat > docker-apt-key << EOF
112 112 -----BEGIN PGP PUBLIC KEY BLOCK-----
113 113
114 114 mQINBFit2ioBEADhWpZ8/wvZ6hUTiXOwQHXMAlaFHcPH9hAtr4F1y2+OYdbtMuth
115 115 lqqwp028AqyY+PRfVMtSYMbjuQuu5byyKR01BbqYhuS3jtqQmljZ/bJvXqnmiVXh
116 116 38UuLa+z077PxyxQhu5BbqntTPQMfiyqEiU+BKbq2WmANUKQf+1AmZY/IruOXbnq
117 117 L4C1+gJ8vfmXQt99npCaxEjaNRVYfOS8QcixNzHUYnb6emjlANyEVlZzeqo7XKl7
118 118 UrwV5inawTSzWNvtjEjj4nJL8NsLwscpLPQUhTQ+7BbQXAwAmeHCUTQIvvWXqw0N
119 119 cmhh4HgeQscQHYgOJjjDVfoY5MucvglbIgCqfzAHW9jxmRL4qbMZj+b1XoePEtht
120 120 ku4bIQN1X5P07fNWzlgaRL5Z4POXDDZTlIQ/El58j9kp4bnWRCJW0lya+f8ocodo
121 121 vZZ+Doi+fy4D5ZGrL4XEcIQP/Lv5uFyf+kQtl/94VFYVJOleAv8W92KdgDkhTcTD
122 122 G7c0tIkVEKNUq48b3aQ64NOZQW7fVjfoKwEZdOqPE72Pa45jrZzvUFxSpdiNk2tZ
123 123 XYukHjlxxEgBdC/J3cMMNRE1F4NCA3ApfV1Y7/hTeOnmDuDYwr9/obA8t016Yljj
124 124 q5rdkywPf4JF8mXUW5eCN1vAFHxeg9ZWemhBtQmGxXnw9M+z6hWwc6ahmwARAQAB
125 125 tCtEb2NrZXIgUmVsZWFzZSAoQ0UgZGViKSA8ZG9ja2VyQGRvY2tlci5jb20+iQI3
126 126 BBMBCgAhBQJYrefAAhsvBQsJCAcDBRUKCQgLBRYCAwEAAh4BAheAAAoJEI2BgDwO
127 127 v82IsskP/iQZo68flDQmNvn8X5XTd6RRaUH33kXYXquT6NkHJciS7E2gTJmqvMqd
128 128 tI4mNYHCSEYxI5qrcYV5YqX9P6+Ko+vozo4nseUQLPH/ATQ4qL0Zok+1jkag3Lgk
129 129 jonyUf9bwtWxFp05HC3GMHPhhcUSexCxQLQvnFWXD2sWLKivHp2fT8QbRGeZ+d3m
130 130 6fqcd5Fu7pxsqm0EUDK5NL+nPIgYhN+auTrhgzhK1CShfGccM/wfRlei9Utz6p9P
131 131 XRKIlWnXtT4qNGZNTN0tR+NLG/6Bqd8OYBaFAUcue/w1VW6JQ2VGYZHnZu9S8LMc
132 132 FYBa5Ig9PxwGQOgq6RDKDbV+PqTQT5EFMeR1mrjckk4DQJjbxeMZbiNMG5kGECA8
133 133 g383P3elhn03WGbEEa4MNc3Z4+7c236QI3xWJfNPdUbXRaAwhy/6rTSFbzwKB0Jm
134 134 ebwzQfwjQY6f55MiI/RqDCyuPj3r3jyVRkK86pQKBAJwFHyqj9KaKXMZjfVnowLh
135 135 9svIGfNbGHpucATqREvUHuQbNnqkCx8VVhtYkhDb9fEP2xBu5VvHbR+3nfVhMut5
136 136 G34Ct5RS7Jt6LIfFdtcn8CaSas/l1HbiGeRgc70X/9aYx/V/CEJv0lIe8gP6uDoW
137 137 FPIZ7d6vH+Vro6xuWEGiuMaiznap2KhZmpkgfupyFmplh0s6knymuQINBFit2ioB
138 138 EADneL9S9m4vhU3blaRjVUUyJ7b/qTjcSylvCH5XUE6R2k+ckEZjfAMZPLpO+/tF
139 139 M2JIJMD4SifKuS3xck9KtZGCufGmcwiLQRzeHF7vJUKrLD5RTkNi23ydvWZgPjtx
140 140 Q+DTT1Zcn7BrQFY6FgnRoUVIxwtdw1bMY/89rsFgS5wwuMESd3Q2RYgb7EOFOpnu
141 141 w6da7WakWf4IhnF5nsNYGDVaIHzpiqCl+uTbf1epCjrOlIzkZ3Z3Yk5CM/TiFzPk
142 142 z2lLz89cpD8U+NtCsfagWWfjd2U3jDapgH+7nQnCEWpROtzaKHG6lA3pXdix5zG8
143 143 eRc6/0IbUSWvfjKxLLPfNeCS2pCL3IeEI5nothEEYdQH6szpLog79xB9dVnJyKJb
144 144 VfxXnseoYqVrRz2VVbUI5Blwm6B40E3eGVfUQWiux54DspyVMMk41Mx7QJ3iynIa
145 145 1N4ZAqVMAEruyXTRTxc9XW0tYhDMA/1GYvz0EmFpm8LzTHA6sFVtPm/ZlNCX6P1X
146 146 zJwrv7DSQKD6GGlBQUX+OeEJ8tTkkf8QTJSPUdh8P8YxDFS5EOGAvhhpMBYD42kQ
147 147 pqXjEC+XcycTvGI7impgv9PDY1RCC1zkBjKPa120rNhv/hkVk/YhuGoajoHyy4h7
148 148 ZQopdcMtpN2dgmhEegny9JCSwxfQmQ0zK0g7m6SHiKMwjwARAQABiQQ+BBgBCAAJ
149 149 BQJYrdoqAhsCAikJEI2BgDwOv82IwV0gBBkBCAAGBQJYrdoqAAoJEH6gqcPyc/zY
150 150 1WAP/2wJ+R0gE6qsce3rjaIz58PJmc8goKrir5hnElWhPgbq7cYIsW5qiFyLhkdp
151 151 YcMmhD9mRiPpQn6Ya2w3e3B8zfIVKipbMBnke/ytZ9M7qHmDCcjoiSmwEXN3wKYI
152 152 mD9VHONsl/CG1rU9Isw1jtB5g1YxuBA7M/m36XN6x2u+NtNMDB9P56yc4gfsZVES
153 153 KA9v+yY2/l45L8d/WUkUi0YXomn6hyBGI7JrBLq0CX37GEYP6O9rrKipfz73XfO7
154 154 JIGzOKZlljb/D9RX/g7nRbCn+3EtH7xnk+TK/50euEKw8SMUg147sJTcpQmv6UzZ
155 155 cM4JgL0HbHVCojV4C/plELwMddALOFeYQzTif6sMRPf+3DSj8frbInjChC3yOLy0
156 156 6br92KFom17EIj2CAcoeq7UPhi2oouYBwPxh5ytdehJkoo+sN7RIWua6P2WSmon5
157 157 U888cSylXC0+ADFdgLX9K2zrDVYUG1vo8CX0vzxFBaHwN6Px26fhIT1/hYUHQR1z
158 158 VfNDcyQmXqkOnZvvoMfz/Q0s9BhFJ/zU6AgQbIZE/hm1spsfgvtsD1frZfygXJ9f
159 159 irP+MSAI80xHSf91qSRZOj4Pl3ZJNbq4yYxv0b1pkMqeGdjdCYhLU+LZ4wbQmpCk
160 160 SVe2prlLureigXtmZfkqevRz7FrIZiu9ky8wnCAPwC7/zmS18rgP/17bOtL4/iIz
161 161 QhxAAoAMWVrGyJivSkjhSGx1uCojsWfsTAm11P7jsruIL61ZzMUVE2aM3Pmj5G+W
162 162 9AcZ58Em+1WsVnAXdUR//bMmhyr8wL/G1YO1V3JEJTRdxsSxdYa4deGBBY/Adpsw
163 163 24jxhOJR+lsJpqIUeb999+R8euDhRHG9eFO7DRu6weatUJ6suupoDTRWtr/4yGqe
164 164 dKxV3qQhNLSnaAzqW/1nA3iUB4k7kCaKZxhdhDbClf9P37qaRW467BLCVO/coL3y
165 165 Vm50dwdrNtKpMBh3ZpbB1uJvgi9mXtyBOMJ3v8RZeDzFiG8HdCtg9RvIt/AIFoHR
166 166 H3S+U79NT6i0KPzLImDfs8T7RlpyuMc4Ufs8ggyg9v3Ae6cN3eQyxcK3w0cbBwsh
167 167 /nQNfsA6uu+9H7NhbehBMhYnpNZyrHzCmzyXkauwRAqoCbGCNykTRwsur9gS41TQ
168 168 M8ssD1jFheOJf3hODnkKU+HKjvMROl1DK7zdmLdNzA1cvtZH/nCC9KPj1z8QC47S
169 169 xx+dTZSx4ONAhwbS/LN3PoKtn8LPjY9NP9uDWI+TWYquS2U+KHDrBDlsgozDbs/O
170 170 jCxcpDzNmXpWQHEtHU7649OXHP7UeNST1mCUCH5qdank0V1iejF6/CfTFU4MfcrG
171 171 YT90qFF93M3v01BbxP+EIY2/9tiIPbrd
172 172 =0YYh
173 173 -----END PGP PUBLIC KEY BLOCK-----
174 174 EOF
175 175
176 176 sudo apt-key add docker-apt-key
177 177
178 178 if [ "$DEBIAN_VERSION" = "9.8" ]; then
179 179 cat << EOF | sudo tee -a /etc/apt/sources.list
180 180 # Need backports for clang-format-6.0
181 181 deb http://deb.debian.org/debian stretch-backports main
182 182
183 183 # Sources are useful if we want to compile things locally.
184 184 deb-src http://deb.debian.org/debian stretch main
185 185 deb-src http://security.debian.org/debian-security stretch/updates main
186 186 deb-src http://deb.debian.org/debian stretch-updates main
187 187 deb-src http://deb.debian.org/debian stretch-backports main
188 188
189 189 deb [arch=amd64] https://download.docker.com/linux/debian stretch stable
190 190 EOF
191 191
192 192 elif [ "$DISTRO" = "Ubuntu" ]; then
193 193 cat << EOF | sudo tee -a /etc/apt/sources.list
194 194 deb [arch=amd64] https://download.docker.com/linux/ubuntu $LSB_RELEASE stable
195 195 EOF
196 196
197 197 fi
198 198
199 199 sudo apt-get update
200 200
201 201 PACKAGES="\
202 202 btrfs-progs \
203 203 build-essential \
204 204 bzr \
205 205 clang-format-6.0 \
206 206 cvs \
207 207 darcs \
208 208 debhelper \
209 209 devscripts \
210 210 dpkg-dev \
211 211 dstat \
212 212 emacs \
213 213 gettext \
214 214 git \
215 215 htop \
216 216 iotop \
217 217 jfsutils \
218 218 libbz2-dev \
219 219 libexpat1-dev \
220 220 libffi-dev \
221 221 libgdbm-dev \
222 222 liblzma-dev \
223 223 libncurses5-dev \
224 224 libnss3-dev \
225 225 libreadline-dev \
226 226 libsqlite3-dev \
227 227 libssl-dev \
228 228 netbase \
229 229 ntfs-3g \
230 230 nvme-cli \
231 231 pyflakes \
232 232 pyflakes3 \
233 233 pylint \
234 234 pylint3 \
235 235 python-all-dev \
236 236 python-dev \
237 237 python-docutils \
238 238 python-fuzzywuzzy \
239 239 python-pygments \
240 240 python-subversion \
241 241 python-vcr \
242 242 python3-dev \
243 243 python3-docutils \
244 244 python3-fuzzywuzzy \
245 245 python3-pygments \
246 246 python3-vcr \
247 247 rsync \
248 248 sqlite3 \
249 249 subversion \
250 250 tcl-dev \
251 251 tk-dev \
252 252 tla \
253 253 unzip \
254 254 uuid-dev \
255 255 vim \
256 256 virtualenv \
257 257 wget \
258 258 xfsprogs \
259 259 zip \
260 260 zlib1g-dev"
261 261
262 262 if [ "$DEBIAN_VERSION" = "9.8" ]; then
263 263 PACKAGES="$PACKAGES linux-perf"
264 264 elif [ "$DISTRO" = "Ubuntu" ]; then
265 265 PACKAGES="$PACKAGES linux-tools-common"
266 266 fi
267 267
268 268 # Ubuntu 19.04 removes monotone.
269 269 if [ "$LSB_RELEASE" != "disco" ]; then
270 270 PACKAGES="$PACKAGES monotone"
271 271 fi
272 272
273 273 # As of April 27, 2019, Docker hasn't published packages for
274 274 # Ubuntu 19.04 yet.
275 275 if [ "$LSB_RELEASE" != "disco" ]; then
276 276 PACKAGES="$PACKAGES docker-ce"
277 277 fi
278 278
279 279 sudo DEBIAN_FRONTEND=noninteractive apt-get -yq install --no-install-recommends $PACKAGES
280 280
281 281 # Create clang-format symlink so test harness finds it.
282 282 sudo update-alternatives --install /usr/bin/clang-format clang-format \
283 283 /usr/bin/clang-format-6.0 1000
284 284
285 285 sudo mkdir /hgdev
286 286 # Will be normalized to hg:hg later.
287 287 sudo chown `whoami` /hgdev
288 288
289 289 cp requirements-py2.txt /hgdev/requirements-py2.txt
290 290 cp requirements-py3.txt /hgdev/requirements-py3.txt
291 291
292 292 # Disable the pip version check because it uses the network and can
293 293 # be annoying.
294 294 cat << EOF | sudo tee -a /etc/pip.conf
295 295 [global]
296 296 disable-pip-version-check = True
297 297 EOF
298 298
299 299 {install_pythons}
300 300 {bootstrap_virtualenv}
301 301
302 302 /hgdev/venv-bootstrap/bin/hg clone https://www.mercurial-scm.org/repo/hg /hgdev/src
303 303
304 304 # Mark the repo as non-publishing.
305 305 cat >> /hgdev/src/.hg/hgrc << EOF
306 306 [phases]
307 307 publish = false
308 308 EOF
309 309
310 310 sudo chown -R hg:hg /hgdev
311 311 '''.lstrip().format(
312 312 install_pythons=INSTALL_PYTHONS,
313 313 bootstrap_virtualenv=BOOTSTRAP_VIRTUALENV
314 314 ).replace('\r\n', '\n')
315 315
316 316
317 317 # Prepares /hgdev for operations.
318 318 PREPARE_HGDEV = '''
319 319 #!/bin/bash
320 320
321 321 set -e
322 322
323 323 FS=$1
324 324
325 325 ensure_device() {
326 326 if [ -z "${DEVICE}" ]; then
327 327 echo "could not find block device to format"
328 328 exit 1
329 329 fi
330 330 }
331 331
332 332 # Determine device to partition for extra filesystem.
333 333 # If only 1 volume is present, it will be the root volume and
334 334 # should be /dev/nvme0. If multiple volumes are present, the
335 335 # root volume could be nvme0 or nvme1. Use whichever one doesn't have
336 336 # a partition.
337 337 if [ -e /dev/nvme1n1 ]; then
338 338 if [ -e /dev/nvme0n1p1 ]; then
339 339 DEVICE=/dev/nvme1n1
340 340 else
341 341 DEVICE=/dev/nvme0n1
342 342 fi
343 343 else
344 344 DEVICE=
345 345 fi
346 346
347 347 sudo mkdir /hgwork
348 348
349 349 if [ "${FS}" != "default" -a "${FS}" != "tmpfs" ]; then
350 350 ensure_device
351 351 echo "creating ${FS} filesystem on ${DEVICE}"
352 352 fi
353 353
354 354 if [ "${FS}" = "default" ]; then
355 355 :
356 356
357 357 elif [ "${FS}" = "btrfs" ]; then
358 358 sudo mkfs.btrfs ${DEVICE}
359 359 sudo mount ${DEVICE} /hgwork
360 360
361 361 elif [ "${FS}" = "ext3" ]; then
362 362 # lazy_journal_init speeds up filesystem creation at the expense of
363 363 # integrity if things crash. We are an ephemeral instance, so we don't
364 364 # care about integrity.
365 365 sudo mkfs.ext3 -E lazy_journal_init=1 ${DEVICE}
366 366 sudo mount ${DEVICE} /hgwork
367 367
368 368 elif [ "${FS}" = "ext4" ]; then
369 369 sudo mkfs.ext4 -E lazy_journal_init=1 ${DEVICE}
370 370 sudo mount ${DEVICE} /hgwork
371 371
372 372 elif [ "${FS}" = "jfs" ]; then
373 373 sudo mkfs.jfs ${DEVICE}
374 374 sudo mount ${DEVICE} /hgwork
375 375
376 376 elif [ "${FS}" = "tmpfs" ]; then
377 377 echo "creating tmpfs volume in /hgwork"
378 378 sudo mount -t tmpfs -o size=1024M tmpfs /hgwork
379 379
380 380 elif [ "${FS}" = "xfs" ]; then
381 381 sudo mkfs.xfs ${DEVICE}
382 382 sudo mount ${DEVICE} /hgwork
383 383
384 384 else
385 385 echo "unsupported filesystem: ${FS}"
386 386 exit 1
387 387 fi
388 388
389 389 echo "/hgwork ready"
390 390
391 391 sudo chown hg:hg /hgwork
392 392 mkdir /hgwork/tmp
393 393 chown hg:hg /hgwork/tmp
394 394
395 395 rsync -a /hgdev/src /hgwork/
396 396 '''.lstrip().replace('\r\n', '\n')
397 397
398 398
399 399 HG_UPDATE_CLEAN = '''
400 400 set -ex
401 401
402 402 HG=/hgdev/venv-bootstrap/bin/hg
403 403
404 404 cd /hgwork/src
405 405 ${HG} --config extensions.purge= purge --all
406 406 ${HG} update -C $1
407 407 ${HG} log -r .
408 408 '''.lstrip().replace('\r\n', '\n')
409 409
410 410
411 411 def prepare_exec_environment(ssh_client, filesystem='default'):
412 412 """Prepare an EC2 instance to execute things.
413 413
414 414 The AMI has an ``/hgdev`` bootstrapped with various Python installs
415 415 and a clone of the Mercurial repo.
416 416
417 417 In EC2, EBS volumes launched from snapshots have wonky performance behavior.
418 418 Notably, blocks have to be copied on first access, which makes volume
419 419 I/O extremely slow on fresh volumes.
420 420
421 421 Furthermore, we may want to run operations, tests, etc on alternative
422 422 filesystems so we examine behavior on different filesystems.
423 423
424 424 This function is used to facilitate executing operations on alternate
425 425 volumes.
426 426 """
427 427 sftp = ssh_client.open_sftp()
428 428
429 429 with sftp.open('/hgdev/prepare-hgdev', 'wb') as fh:
430 430 fh.write(PREPARE_HGDEV)
431 431 fh.chmod(0o0777)
432 432
433 433 command = 'sudo /hgdev/prepare-hgdev %s' % filesystem
434 434 chan, stdin, stdout = exec_command(ssh_client, command)
435 435 stdin.close()
436 436
437 437 for line in stdout:
438 438 print(line, end='')
439 439
440 440 res = chan.recv_exit_status()
441 441
442 442 if res:
443 443 raise Exception('non-0 exit code updating working directory; %d'
444 444 % res)
445 445
446 446
447 447 def synchronize_hg(source_path: pathlib.Path, ec2_instance, revision: str=None):
448 448 """Synchronize a local Mercurial source path to remote EC2 instance."""
449 449
450 450 with tempfile.TemporaryDirectory() as temp_dir:
451 451 temp_dir = pathlib.Path(temp_dir)
452 452
453 453 ssh_dir = temp_dir / '.ssh'
454 454 ssh_dir.mkdir()
455 455 ssh_dir.chmod(0o0700)
456 456
457 457 public_ip = ec2_instance.public_ip_address
458 458
459 459 ssh_config = ssh_dir / 'config'
460 460
461 461 with ssh_config.open('w', encoding='utf-8') as fh:
462 462 fh.write('Host %s\n' % public_ip)
463 463 fh.write(' User hg\n')
464 464 fh.write(' StrictHostKeyChecking no\n')
465 465 fh.write(' UserKnownHostsFile %s\n' % (ssh_dir / 'known_hosts'))
466 466 fh.write(' IdentityFile %s\n' % ec2_instance.ssh_private_key_path)
467 467
468 468 if not (source_path / '.hg').is_dir():
469 469 raise Exception('%s is not a Mercurial repository; synchronization '
470 470 'not yet supported' % source_path)
471 471
472 472 env = dict(os.environ)
473 473 env['HGPLAIN'] = '1'
474 474 env['HGENCODING'] = 'utf-8'
475 475
476 476 hg_bin = source_path / 'hg'
477 477
478 478 res = subprocess.run(
479 479 ['python2.7', str(hg_bin), 'log', '-r', revision, '-T', '{node}'],
480 480 cwd=str(source_path), env=env, check=True, capture_output=True)
481 481
482 482 full_revision = res.stdout.decode('ascii')
483 483
484 484 args = [
485 485 'python2.7', str(hg_bin),
486 486 '--config', 'ui.ssh=ssh -F %s' % ssh_config,
487 487 '--config', 'ui.remotecmd=/hgdev/venv-bootstrap/bin/hg',
488 'push', '-f', '-r', full_revision,
488 # Also ensure .hgtags changes are present so auto version
489 # calculation works.
490 'push', '-f', '-r', full_revision, '-r', 'file(.hgtags)',
489 491 'ssh://%s//hgwork/src' % public_ip,
490 492 ]
491 493
492 494 res = subprocess.run(args, cwd=str(source_path), env=env)
493 495
494 496 # Allow 1 (no-op) to not trigger error.
495 497 if res.returncode not in (0, 1):
496 498 res.check_returncode()
497 499
498 500 # TODO support synchronizing dirty working directory.
499 501
500 502 sftp = ec2_instance.ssh_client.open_sftp()
501 503
502 504 with sftp.open('/hgdev/hgup', 'wb') as fh:
503 505 fh.write(HG_UPDATE_CLEAN)
504 506 fh.chmod(0o0700)
505 507
506 508 chan, stdin, stdout = exec_command(
507 509 ec2_instance.ssh_client, '/hgdev/hgup %s' % full_revision)
508 510 stdin.close()
509 511
510 512 for line in stdout:
511 513 print(line, end='')
512 514
513 515 res = chan.recv_exit_status()
514 516
515 517 if res:
516 518 raise Exception('non-0 exit code updating working directory; %d'
517 519 % res)
518 520
519 521
520 522 def run_tests(ssh_client, python_version, test_flags=None):
521 523 """Run tests on a remote Linux machine via an SSH client."""
522 524 test_flags = test_flags or []
523 525
524 526 print('running tests')
525 527
526 528 if python_version == 'system2':
527 529 python = '/usr/bin/python2'
528 530 elif python_version == 'system3':
529 531 python = '/usr/bin/python3'
530 532 elif python_version.startswith('pypy'):
531 533 python = '/hgdev/pyenv/shims/%s' % python_version
532 534 else:
533 535 python = '/hgdev/pyenv/shims/python%s' % python_version
534 536
535 537 test_flags = ' '.join(shlex.quote(a) for a in test_flags)
536 538
537 539 command = (
538 540 '/bin/sh -c "export TMPDIR=/hgwork/tmp; '
539 541 'cd /hgwork/src/tests && %s run-tests.py %s"' % (
540 542 python, test_flags))
541 543
542 544 chan, stdin, stdout = exec_command(ssh_client, command)
543 545
544 546 stdin.close()
545 547
546 548 for line in stdout:
547 549 print(line, end='')
548 550
549 551 return chan.recv_exit_status()
@@ -1,296 +1,298 b''
1 1 # windows.py - Automation specific to Windows
2 2 #
3 3 # Copyright 2019 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 # no-check-code because Python 3 native.
9 9
10 10 import os
11 11 import pathlib
12 12 import re
13 13 import subprocess
14 14 import tempfile
15 15
16 16 from .winrm import (
17 17 run_powershell,
18 18 )
19 19
20 20
21 21 # PowerShell commands to activate a Visual Studio 2008 environment.
22 22 # This is essentially a port of vcvarsall.bat to PowerShell.
23 23 ACTIVATE_VC9_AMD64 = r'''
24 24 Write-Output "activating Visual Studio 2008 environment for AMD64"
25 25 $root = "$env:LOCALAPPDATA\Programs\Common\Microsoft\Visual C++ for Python\9.0"
26 26 $Env:VCINSTALLDIR = "${root}\VC\"
27 27 $Env:WindowsSdkDir = "${root}\WinSDK\"
28 28 $Env:PATH = "${root}\VC\Bin\amd64;${root}\WinSDK\Bin\x64;${root}\WinSDK\Bin;$Env:PATH"
29 29 $Env:INCLUDE = "${root}\VC\Include;${root}\WinSDK\Include;$Env:PATH"
30 30 $Env:LIB = "${root}\VC\Lib\amd64;${root}\WinSDK\Lib\x64;$Env:LIB"
31 31 $Env:LIBPATH = "${root}\VC\Lib\amd64;${root}\WinSDK\Lib\x64;$Env:LIBPATH"
32 32 '''.lstrip()
33 33
34 34 ACTIVATE_VC9_X86 = r'''
35 35 Write-Output "activating Visual Studio 2008 environment for x86"
36 36 $root = "$env:LOCALAPPDATA\Programs\Common\Microsoft\Visual C++ for Python\9.0"
37 37 $Env:VCINSTALLDIR = "${root}\VC\"
38 38 $Env:WindowsSdkDir = "${root}\WinSDK\"
39 39 $Env:PATH = "${root}\VC\Bin;${root}\WinSDK\Bin;$Env:PATH"
40 40 $Env:INCLUDE = "${root}\VC\Include;${root}\WinSDK\Include;$Env:INCLUDE"
41 41 $Env:LIB = "${root}\VC\Lib;${root}\WinSDK\Lib;$Env:LIB"
42 42 $Env:LIBPATH = "${root}\VC\lib;${root}\WinSDK\Lib;$Env:LIBPATH"
43 43 '''.lstrip()
44 44
45 45 HG_PURGE = r'''
46 46 $Env:PATH = "C:\hgdev\venv-bootstrap\Scripts;$Env:PATH"
47 47 Set-Location C:\hgdev\src
48 48 hg.exe --config extensions.purge= purge --all
49 49 if ($LASTEXITCODE -ne 0) {
50 50 throw "process exited non-0: $LASTEXITCODE"
51 51 }
52 52 Write-Output "purged Mercurial repo"
53 53 '''
54 54
55 55 HG_UPDATE_CLEAN = r'''
56 56 $Env:PATH = "C:\hgdev\venv-bootstrap\Scripts;$Env:PATH"
57 57 Set-Location C:\hgdev\src
58 58 hg.exe --config extensions.purge= purge --all
59 59 if ($LASTEXITCODE -ne 0) {{
60 60 throw "process exited non-0: $LASTEXITCODE"
61 61 }}
62 62 hg.exe update -C {revision}
63 63 if ($LASTEXITCODE -ne 0) {{
64 64 throw "process exited non-0: $LASTEXITCODE"
65 65 }}
66 66 hg.exe log -r .
67 67 Write-Output "updated Mercurial working directory to {revision}"
68 68 '''.lstrip()
69 69
70 70 BUILD_INNO = r'''
71 71 Set-Location C:\hgdev\src
72 72 $python = "C:\hgdev\python27-{arch}\python.exe"
73 73 C:\hgdev\python37-x64\python.exe contrib\packaging\inno\build.py --python $python
74 74 if ($LASTEXITCODE -ne 0) {{
75 75 throw "process exited non-0: $LASTEXITCODE"
76 76 }}
77 77 '''.lstrip()
78 78
79 79 BUILD_WHEEL = r'''
80 80 Set-Location C:\hgdev\src
81 81 C:\hgdev\python27-{arch}\Scripts\pip.exe wheel --wheel-dir dist .
82 82 if ($LASTEXITCODE -ne 0) {{
83 83 throw "process exited non-0: $LASTEXITCODE"
84 84 }}
85 85 '''
86 86
87 87 BUILD_WIX = r'''
88 88 Set-Location C:\hgdev\src
89 89 $python = "C:\hgdev\python27-{arch}\python.exe"
90 90 C:\hgdev\python37-x64\python.exe contrib\packaging\wix\build.py --python $python {extra_args}
91 91 if ($LASTEXITCODE -ne 0) {{
92 92 throw "process exited non-0: $LASTEXITCODE"
93 93 }}
94 94 '''
95 95
96 96 RUN_TESTS = r'''
97 97 C:\hgdev\MinGW\msys\1.0\bin\sh.exe --login -c "cd /c/hgdev/src/tests && /c/hgdev/{python_path}/python.exe run-tests.py {test_flags}"
98 98 if ($LASTEXITCODE -ne 0) {{
99 99 throw "process exited non-0: $LASTEXITCODE"
100 100 }}
101 101 '''
102 102
103 103
104 104 def get_vc_prefix(arch):
105 105 if arch == 'x86':
106 106 return ACTIVATE_VC9_X86
107 107 elif arch == 'x64':
108 108 return ACTIVATE_VC9_AMD64
109 109 else:
110 110 raise ValueError('illegal arch: %s; must be x86 or x64' % arch)
111 111
112 112
113 113 def fix_authorized_keys_permissions(winrm_client, path):
114 114 commands = [
115 115 '$ErrorActionPreference = "Stop"',
116 116 'Repair-AuthorizedKeyPermission -FilePath %s -Confirm:$false' % path,
117 117 r'icacls %s /remove:g "NT Service\sshd"' % path,
118 118 ]
119 119
120 120 run_powershell(winrm_client, '\n'.join(commands))
121 121
122 122
123 123 def synchronize_hg(hg_repo: pathlib.Path, revision: str, ec2_instance):
124 124 """Synchronize local Mercurial repo to remote EC2 instance."""
125 125
126 126 winrm_client = ec2_instance.winrm_client
127 127
128 128 with tempfile.TemporaryDirectory() as temp_dir:
129 129 temp_dir = pathlib.Path(temp_dir)
130 130
131 131 ssh_dir = temp_dir / '.ssh'
132 132 ssh_dir.mkdir()
133 133 ssh_dir.chmod(0o0700)
134 134
135 135 # Generate SSH key to use for communication.
136 136 subprocess.run([
137 137 'ssh-keygen', '-t', 'rsa', '-b', '4096', '-N', '',
138 138 '-f', str(ssh_dir / 'id_rsa')],
139 139 check=True, capture_output=True)
140 140
141 141 # Add it to ~/.ssh/authorized_keys on remote.
142 142 # This assumes the file doesn't already exist.
143 143 authorized_keys = r'c:\Users\Administrator\.ssh\authorized_keys'
144 144 winrm_client.execute_cmd(r'mkdir c:\Users\Administrator\.ssh')
145 145 winrm_client.copy(str(ssh_dir / 'id_rsa.pub'), authorized_keys)
146 146 fix_authorized_keys_permissions(winrm_client, authorized_keys)
147 147
148 148 public_ip = ec2_instance.public_ip_address
149 149
150 150 ssh_config = temp_dir / '.ssh' / 'config'
151 151
152 152 with open(ssh_config, 'w', encoding='utf-8') as fh:
153 153 fh.write('Host %s\n' % public_ip)
154 154 fh.write(' User Administrator\n')
155 155 fh.write(' StrictHostKeyChecking no\n')
156 156 fh.write(' UserKnownHostsFile %s\n' % (ssh_dir / 'known_hosts'))
157 157 fh.write(' IdentityFile %s\n' % (ssh_dir / 'id_rsa'))
158 158
159 159 if not (hg_repo / '.hg').is_dir():
160 160 raise Exception('%s is not a Mercurial repository; '
161 161 'synchronization not yet supported' % hg_repo)
162 162
163 163 env = dict(os.environ)
164 164 env['HGPLAIN'] = '1'
165 165 env['HGENCODING'] = 'utf-8'
166 166
167 167 hg_bin = hg_repo / 'hg'
168 168
169 169 res = subprocess.run(
170 170 ['python2.7', str(hg_bin), 'log', '-r', revision, '-T', '{node}'],
171 171 cwd=str(hg_repo), env=env, check=True, capture_output=True)
172 172
173 173 full_revision = res.stdout.decode('ascii')
174 174
175 175 args = [
176 176 'python2.7', hg_bin,
177 177 '--config', 'ui.ssh=ssh -F %s' % ssh_config,
178 178 '--config', 'ui.remotecmd=c:/hgdev/venv-bootstrap/Scripts/hg.exe',
179 'push', '-f', '-r', full_revision,
179 # Also ensure .hgtags changes are present so auto version
180 # calculation works.
181 'push', '-f', '-r', full_revision, '-r', 'file(.hgtags)',
180 182 'ssh://%s/c:/hgdev/src' % public_ip,
181 183 ]
182 184
183 185 res = subprocess.run(args, cwd=str(hg_repo), env=env)
184 186
185 187 # Allow 1 (no-op) to not trigger error.
186 188 if res.returncode not in (0, 1):
187 189 res.check_returncode()
188 190
189 191 run_powershell(winrm_client,
190 192 HG_UPDATE_CLEAN.format(revision=full_revision))
191 193
192 194 # TODO detect dirty local working directory and synchronize accordingly.
193 195
194 196
195 197 def purge_hg(winrm_client):
196 198 """Purge the Mercurial source repository on an EC2 instance."""
197 199 run_powershell(winrm_client, HG_PURGE)
198 200
199 201
200 202 def find_latest_dist(winrm_client, pattern):
201 203 """Find path to newest file in dist/ directory matching a pattern."""
202 204
203 205 res = winrm_client.execute_ps(
204 206 r'$v = Get-ChildItem -Path C:\hgdev\src\dist -Filter "%s" '
205 207 '| Sort-Object LastWriteTime -Descending '
206 208 '| Select-Object -First 1\n'
207 209 '$v.name' % pattern
208 210 )
209 211 return res[0]
210 212
211 213
212 214 def copy_latest_dist(winrm_client, pattern, dest_path):
213 215 """Copy latest file matching pattern in dist/ directory.
214 216
215 217 Given a WinRM client and a file pattern, find the latest file on the remote
216 218 matching that pattern and copy it to the ``dest_path`` directory on the
217 219 local machine.
218 220 """
219 221 latest = find_latest_dist(winrm_client, pattern)
220 222 source = r'C:\hgdev\src\dist\%s' % latest
221 223 dest = dest_path / latest
222 224 print('copying %s to %s' % (source, dest))
223 225 winrm_client.fetch(source, str(dest))
224 226
225 227
226 228 def build_inno_installer(winrm_client, arch: str, dest_path: pathlib.Path,
227 229 version=None):
228 230 """Build the Inno Setup installer on a remote machine.
229 231
230 232 Using a WinRM client, remote commands are executed to build
231 233 a Mercurial Inno Setup installer.
232 234 """
233 235 print('building Inno Setup installer for %s' % arch)
234 236
235 237 extra_args = []
236 238 if version:
237 239 extra_args.extend(['--version', version])
238 240
239 241 ps = get_vc_prefix(arch) + BUILD_INNO.format(arch=arch,
240 242 extra_args=' '.join(extra_args))
241 243 run_powershell(winrm_client, ps)
242 244 copy_latest_dist(winrm_client, '*.exe', dest_path)
243 245
244 246
245 247 def build_wheel(winrm_client, arch: str, dest_path: pathlib.Path):
246 248 """Build Python wheels on a remote machine.
247 249
248 250 Using a WinRM client, remote commands are executed to build a Python wheel
249 251 for Mercurial.
250 252 """
251 253 print('Building Windows wheel for %s' % arch)
252 254 ps = get_vc_prefix(arch) + BUILD_WHEEL.format(arch=arch)
253 255 run_powershell(winrm_client, ps)
254 256 copy_latest_dist(winrm_client, '*.whl', dest_path)
255 257
256 258
257 259 def build_wix_installer(winrm_client, arch: str, dest_path: pathlib.Path,
258 260 version=None):
259 261 """Build the WiX installer on a remote machine.
260 262
261 263 Using a WinRM client, remote commands are executed to build a WiX installer.
262 264 """
263 265 print('Building WiX installer for %s' % arch)
264 266 extra_args = []
265 267 if version:
266 268 extra_args.extend(['--version', version])
267 269
268 270 ps = get_vc_prefix(arch) + BUILD_WIX.format(arch=arch,
269 271 extra_args=' '.join(extra_args))
270 272 run_powershell(winrm_client, ps)
271 273 copy_latest_dist(winrm_client, '*.msi', dest_path)
272 274
273 275
274 276 def run_tests(winrm_client, python_version, arch, test_flags=''):
275 277 """Run tests on a remote Windows machine.
276 278
277 279 ``python_version`` is a ``X.Y`` string like ``2.7`` or ``3.7``.
278 280 ``arch`` is ``x86`` or ``x64``.
279 281 ``test_flags`` is a str representing extra arguments to pass to
280 282 ``run-tests.py``.
281 283 """
282 284 if not re.match(r'\d\.\d', python_version):
283 285 raise ValueError(r'python_version must be \d.\d; got %s' %
284 286 python_version)
285 287
286 288 if arch not in ('x86', 'x64'):
287 289 raise ValueError('arch must be x86 or x64; got %s' % arch)
288 290
289 291 python_path = 'python%s-%s' % (python_version.replace('.', ''), arch)
290 292
291 293 ps = RUN_TESTS.format(
292 294 python_path=python_path,
293 295 test_flags=test_flags or '',
294 296 )
295 297
296 298 run_powershell(winrm_client, ps)
@@ -1,3253 +1,3302 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import os
13 13 import random
14 14 import sys
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 bin,
21 21 hex,
22 22 nullid,
23 23 nullrev,
24 24 short,
25 25 )
26 26 from . import (
27 27 bookmarks,
28 28 branchmap,
29 29 bundle2,
30 30 changegroup,
31 31 changelog,
32 32 color,
33 33 context,
34 34 dirstate,
35 35 dirstateguard,
36 36 discovery,
37 37 encoding,
38 38 error,
39 39 exchange,
40 40 extensions,
41 41 filelog,
42 42 hook,
43 43 lock as lockmod,
44 44 manifest,
45 45 match as matchmod,
46 46 merge as mergemod,
47 47 mergeutil,
48 48 namespaces,
49 49 narrowspec,
50 50 obsolete,
51 51 pathutil,
52 52 phases,
53 53 pushkey,
54 54 pycompat,
55 55 repository,
56 56 repoview,
57 57 revset,
58 58 revsetlang,
59 59 scmutil,
60 60 sparse,
61 61 store as storemod,
62 62 subrepoutil,
63 63 tags as tagsmod,
64 64 transaction,
65 65 txnutil,
66 66 util,
67 67 vfs as vfsmod,
68 68 )
69 69 from .utils import (
70 70 interfaceutil,
71 71 procutil,
72 72 stringutil,
73 73 )
74 74
75 75 from .revlogutils import (
76 76 constants as revlogconst,
77 77 )
78 78
79 79 release = lockmod.release
80 80 urlerr = util.urlerr
81 81 urlreq = util.urlreq
82 82
83 83 # set of (path, vfs-location) tuples. vfs-location is:
84 84 # - 'plain for vfs relative paths
85 85 # - '' for svfs relative paths
86 86 _cachedfiles = set()
87 87
88 88 class _basefilecache(scmutil.filecache):
89 89 """All filecache usage on repo are done for logic that should be unfiltered
90 90 """
91 91 def __get__(self, repo, type=None):
92 92 if repo is None:
93 93 return self
94 94 # proxy to unfiltered __dict__ since filtered repo has no entry
95 95 unfi = repo.unfiltered()
96 96 try:
97 97 return unfi.__dict__[self.sname]
98 98 except KeyError:
99 99 pass
100 100 return super(_basefilecache, self).__get__(unfi, type)
101 101
102 102 def set(self, repo, value):
103 103 return super(_basefilecache, self).set(repo.unfiltered(), value)
104 104
105 105 class repofilecache(_basefilecache):
106 106 """filecache for files in .hg but outside of .hg/store"""
107 107 def __init__(self, *paths):
108 108 super(repofilecache, self).__init__(*paths)
109 109 for path in paths:
110 110 _cachedfiles.add((path, 'plain'))
111 111
112 112 def join(self, obj, fname):
113 113 return obj.vfs.join(fname)
114 114
115 115 class storecache(_basefilecache):
116 116 """filecache for files in the store"""
117 117 def __init__(self, *paths):
118 118 super(storecache, self).__init__(*paths)
119 119 for path in paths:
120 120 _cachedfiles.add((path, ''))
121 121
122 122 def join(self, obj, fname):
123 123 return obj.sjoin(fname)
124 124
125 125 class mixedrepostorecache(_basefilecache):
126 126 """filecache for a mix files in .hg/store and outside"""
127 127 def __init__(self, *pathsandlocations):
128 128 # scmutil.filecache only uses the path for passing back into our
129 129 # join(), so we can safely pass a list of paths and locations
130 130 super(mixedrepostorecache, self).__init__(*pathsandlocations)
131 131 _cachedfiles.update(pathsandlocations)
132 132
133 133 def join(self, obj, fnameandlocation):
134 134 fname, location = fnameandlocation
135 135 if location == 'plain':
136 136 return obj.vfs.join(fname)
137 137 else:
138 138 if location != '':
139 139 raise error.ProgrammingError('unexpected location: %s' %
140 140 location)
141 141 return obj.sjoin(fname)
142 142
143 143 def isfilecached(repo, name):
144 144 """check if a repo has already cached "name" filecache-ed property
145 145
146 146 This returns (cachedobj-or-None, iscached) tuple.
147 147 """
148 148 cacheentry = repo.unfiltered()._filecache.get(name, None)
149 149 if not cacheentry:
150 150 return None, False
151 151 return cacheentry.obj, True
152 152
153 153 class unfilteredpropertycache(util.propertycache):
154 154 """propertycache that apply to unfiltered repo only"""
155 155
156 156 def __get__(self, repo, type=None):
157 157 unfi = repo.unfiltered()
158 158 if unfi is repo:
159 159 return super(unfilteredpropertycache, self).__get__(unfi)
160 160 return getattr(unfi, self.name)
161 161
162 162 class filteredpropertycache(util.propertycache):
163 163 """propertycache that must take filtering in account"""
164 164
165 165 def cachevalue(self, obj, value):
166 166 object.__setattr__(obj, self.name, value)
167 167
168 168
169 169 def hasunfilteredcache(repo, name):
170 170 """check if a repo has an unfilteredpropertycache value for <name>"""
171 171 return name in vars(repo.unfiltered())
172 172
173 173 def unfilteredmethod(orig):
174 174 """decorate method that always need to be run on unfiltered version"""
175 175 def wrapper(repo, *args, **kwargs):
176 176 return orig(repo.unfiltered(), *args, **kwargs)
177 177 return wrapper
178 178
179 179 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
180 180 'unbundle'}
181 181 legacycaps = moderncaps.union({'changegroupsubset'})
182 182
183 183 @interfaceutil.implementer(repository.ipeercommandexecutor)
184 184 class localcommandexecutor(object):
185 185 def __init__(self, peer):
186 186 self._peer = peer
187 187 self._sent = False
188 188 self._closed = False
189 189
190 190 def __enter__(self):
191 191 return self
192 192
193 193 def __exit__(self, exctype, excvalue, exctb):
194 194 self.close()
195 195
196 196 def callcommand(self, command, args):
197 197 if self._sent:
198 198 raise error.ProgrammingError('callcommand() cannot be used after '
199 199 'sendcommands()')
200 200
201 201 if self._closed:
202 202 raise error.ProgrammingError('callcommand() cannot be used after '
203 203 'close()')
204 204
205 205 # We don't need to support anything fancy. Just call the named
206 206 # method on the peer and return a resolved future.
207 207 fn = getattr(self._peer, pycompat.sysstr(command))
208 208
209 209 f = pycompat.futures.Future()
210 210
211 211 try:
212 212 result = fn(**pycompat.strkwargs(args))
213 213 except Exception:
214 214 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
215 215 else:
216 216 f.set_result(result)
217 217
218 218 return f
219 219
220 220 def sendcommands(self):
221 221 self._sent = True
222 222
223 223 def close(self):
224 224 self._closed = True
225 225
226 226 @interfaceutil.implementer(repository.ipeercommands)
227 227 class localpeer(repository.peer):
228 228 '''peer for a local repo; reflects only the most recent API'''
229 229
230 230 def __init__(self, repo, caps=None):
231 231 super(localpeer, self).__init__()
232 232
233 233 if caps is None:
234 234 caps = moderncaps.copy()
235 235 self._repo = repo.filtered('served')
236 236 self.ui = repo.ui
237 237 self._caps = repo._restrictcapabilities(caps)
238 238
239 239 # Begin of _basepeer interface.
240 240
241 241 def url(self):
242 242 return self._repo.url()
243 243
244 244 def local(self):
245 245 return self._repo
246 246
247 247 def peer(self):
248 248 return self
249 249
250 250 def canpush(self):
251 251 return True
252 252
253 253 def close(self):
254 254 self._repo.close()
255 255
256 256 # End of _basepeer interface.
257 257
258 258 # Begin of _basewirecommands interface.
259 259
260 260 def branchmap(self):
261 261 return self._repo.branchmap()
262 262
263 263 def capabilities(self):
264 264 return self._caps
265 265
266 266 def clonebundles(self):
267 267 return self._repo.tryread('clonebundles.manifest')
268 268
269 269 def debugwireargs(self, one, two, three=None, four=None, five=None):
270 270 """Used to test argument passing over the wire"""
271 271 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
272 272 pycompat.bytestr(four),
273 273 pycompat.bytestr(five))
274 274
275 275 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
276 276 **kwargs):
277 277 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
278 278 common=common, bundlecaps=bundlecaps,
279 279 **kwargs)[1]
280 280 cb = util.chunkbuffer(chunks)
281 281
282 282 if exchange.bundle2requested(bundlecaps):
283 283 # When requesting a bundle2, getbundle returns a stream to make the
284 284 # wire level function happier. We need to build a proper object
285 285 # from it in local peer.
286 286 return bundle2.getunbundler(self.ui, cb)
287 287 else:
288 288 return changegroup.getunbundler('01', cb, None)
289 289
290 290 def heads(self):
291 291 return self._repo.heads()
292 292
293 293 def known(self, nodes):
294 294 return self._repo.known(nodes)
295 295
296 296 def listkeys(self, namespace):
297 297 return self._repo.listkeys(namespace)
298 298
299 299 def lookup(self, key):
300 300 return self._repo.lookup(key)
301 301
302 302 def pushkey(self, namespace, key, old, new):
303 303 return self._repo.pushkey(namespace, key, old, new)
304 304
305 305 def stream_out(self):
306 306 raise error.Abort(_('cannot perform stream clone against local '
307 307 'peer'))
308 308
309 309 def unbundle(self, bundle, heads, url):
310 310 """apply a bundle on a repo
311 311
312 312 This function handles the repo locking itself."""
313 313 try:
314 314 try:
315 315 bundle = exchange.readbundle(self.ui, bundle, None)
316 316 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
317 317 if util.safehasattr(ret, 'getchunks'):
318 318 # This is a bundle20 object, turn it into an unbundler.
319 319 # This little dance should be dropped eventually when the
320 320 # API is finally improved.
321 321 stream = util.chunkbuffer(ret.getchunks())
322 322 ret = bundle2.getunbundler(self.ui, stream)
323 323 return ret
324 324 except Exception as exc:
325 325 # If the exception contains output salvaged from a bundle2
326 326 # reply, we need to make sure it is printed before continuing
327 327 # to fail. So we build a bundle2 with such output and consume
328 328 # it directly.
329 329 #
330 330 # This is not very elegant but allows a "simple" solution for
331 331 # issue4594
332 332 output = getattr(exc, '_bundle2salvagedoutput', ())
333 333 if output:
334 334 bundler = bundle2.bundle20(self._repo.ui)
335 335 for out in output:
336 336 bundler.addpart(out)
337 337 stream = util.chunkbuffer(bundler.getchunks())
338 338 b = bundle2.getunbundler(self.ui, stream)
339 339 bundle2.processbundle(self._repo, b)
340 340 raise
341 341 except error.PushRaced as exc:
342 342 raise error.ResponseError(_('push failed:'),
343 343 stringutil.forcebytestr(exc))
344 344
345 345 # End of _basewirecommands interface.
346 346
347 347 # Begin of peer interface.
348 348
349 349 def commandexecutor(self):
350 350 return localcommandexecutor(self)
351 351
352 352 # End of peer interface.
353 353
354 354 @interfaceutil.implementer(repository.ipeerlegacycommands)
355 355 class locallegacypeer(localpeer):
356 356 '''peer extension which implements legacy methods too; used for tests with
357 357 restricted capabilities'''
358 358
359 359 def __init__(self, repo):
360 360 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
361 361
362 362 # Begin of baselegacywirecommands interface.
363 363
364 364 def between(self, pairs):
365 365 return self._repo.between(pairs)
366 366
367 367 def branches(self, nodes):
368 368 return self._repo.branches(nodes)
369 369
370 370 def changegroup(self, nodes, source):
371 371 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
372 372 missingheads=self._repo.heads())
373 373 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
374 374
375 375 def changegroupsubset(self, bases, heads, source):
376 376 outgoing = discovery.outgoing(self._repo, missingroots=bases,
377 377 missingheads=heads)
378 378 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
379 379
380 380 # End of baselegacywirecommands interface.
381 381
382 382 # Increment the sub-version when the revlog v2 format changes to lock out old
383 383 # clients.
384 384 REVLOGV2_REQUIREMENT = 'exp-revlogv2.1'
385 385
386 386 # A repository with the sparserevlog feature will have delta chains that
387 387 # can spread over a larger span. Sparse reading cuts these large spans into
388 388 # pieces, so that each piece isn't too big.
389 389 # Without the sparserevlog capability, reading from the repository could use
390 390 # huge amounts of memory, because the whole span would be read at once,
391 391 # including all the intermediate revisions that aren't pertinent for the chain.
392 392 # This is why once a repository has enabled sparse-read, it becomes required.
393 393 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
394 394
395 395 # Functions receiving (ui, features) that extensions can register to impact
396 396 # the ability to load repositories with custom requirements. Only
397 397 # functions defined in loaded extensions are called.
398 398 #
399 399 # The function receives a set of requirement strings that the repository
400 400 # is capable of opening. Functions will typically add elements to the
401 401 # set to reflect that the extension knows how to handle that requirements.
402 402 featuresetupfuncs = set()
403 403
404 404 def makelocalrepository(baseui, path, intents=None):
405 405 """Create a local repository object.
406 406
407 407 Given arguments needed to construct a local repository, this function
408 408 performs various early repository loading functionality (such as
409 409 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
410 410 the repository can be opened, derives a type suitable for representing
411 411 that repository, and returns an instance of it.
412 412
413 413 The returned object conforms to the ``repository.completelocalrepository``
414 414 interface.
415 415
416 416 The repository type is derived by calling a series of factory functions
417 417 for each aspect/interface of the final repository. These are defined by
418 418 ``REPO_INTERFACES``.
419 419
420 420 Each factory function is called to produce a type implementing a specific
421 421 interface. The cumulative list of returned types will be combined into a
422 422 new type and that type will be instantiated to represent the local
423 423 repository.
424 424
425 425 The factory functions each receive various state that may be consulted
426 426 as part of deriving a type.
427 427
428 428 Extensions should wrap these factory functions to customize repository type
429 429 creation. Note that an extension's wrapped function may be called even if
430 430 that extension is not loaded for the repo being constructed. Extensions
431 431 should check if their ``__name__`` appears in the
432 432 ``extensionmodulenames`` set passed to the factory function and no-op if
433 433 not.
434 434 """
435 435 ui = baseui.copy()
436 436 # Prevent copying repo configuration.
437 437 ui.copy = baseui.copy
438 438
439 439 # Working directory VFS rooted at repository root.
440 440 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
441 441
442 442 # Main VFS for .hg/ directory.
443 443 hgpath = wdirvfs.join(b'.hg')
444 444 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
445 445
446 446 # The .hg/ path should exist and should be a directory. All other
447 447 # cases are errors.
448 448 if not hgvfs.isdir():
449 449 try:
450 450 hgvfs.stat()
451 451 except OSError as e:
452 452 if e.errno != errno.ENOENT:
453 453 raise
454 454
455 455 raise error.RepoError(_(b'repository %s not found') % path)
456 456
457 457 # .hg/requires file contains a newline-delimited list of
458 458 # features/capabilities the opener (us) must have in order to use
459 459 # the repository. This file was introduced in Mercurial 0.9.2,
460 460 # which means very old repositories may not have one. We assume
461 461 # a missing file translates to no requirements.
462 462 try:
463 463 requirements = set(hgvfs.read(b'requires').splitlines())
464 464 except IOError as e:
465 465 if e.errno != errno.ENOENT:
466 466 raise
467 467 requirements = set()
468 468
469 469 # The .hg/hgrc file may load extensions or contain config options
470 470 # that influence repository construction. Attempt to load it and
471 471 # process any new extensions that it may have pulled in.
472 472 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
473 473 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
474 474 extensions.loadall(ui)
475 475 extensions.populateui(ui)
476 476
477 477 # Set of module names of extensions loaded for this repository.
478 478 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
479 479
480 480 supportedrequirements = gathersupportedrequirements(ui)
481 481
482 482 # We first validate the requirements are known.
483 483 ensurerequirementsrecognized(requirements, supportedrequirements)
484 484
485 485 # Then we validate that the known set is reasonable to use together.
486 486 ensurerequirementscompatible(ui, requirements)
487 487
488 488 # TODO there are unhandled edge cases related to opening repositories with
489 489 # shared storage. If storage is shared, we should also test for requirements
490 490 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
491 491 # that repo, as that repo may load extensions needed to open it. This is a
492 492 # bit complicated because we don't want the other hgrc to overwrite settings
493 493 # in this hgrc.
494 494 #
495 495 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
496 496 # file when sharing repos. But if a requirement is added after the share is
497 497 # performed, thereby introducing a new requirement for the opener, we may
498 498 # will not see that and could encounter a run-time error interacting with
499 499 # that shared store since it has an unknown-to-us requirement.
500 500
501 501 # At this point, we know we should be capable of opening the repository.
502 502 # Now get on with doing that.
503 503
504 504 features = set()
505 505
506 506 # The "store" part of the repository holds versioned data. How it is
507 507 # accessed is determined by various requirements. The ``shared`` or
508 508 # ``relshared`` requirements indicate the store lives in the path contained
509 509 # in the ``.hg/sharedpath`` file. This is an absolute path for
510 510 # ``shared`` and relative to ``.hg/`` for ``relshared``.
511 511 if b'shared' in requirements or b'relshared' in requirements:
512 512 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
513 513 if b'relshared' in requirements:
514 514 sharedpath = hgvfs.join(sharedpath)
515 515
516 516 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
517 517
518 518 if not sharedvfs.exists():
519 519 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
520 520 b'directory %s') % sharedvfs.base)
521 521
522 522 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
523 523
524 524 storebasepath = sharedvfs.base
525 525 cachepath = sharedvfs.join(b'cache')
526 526 else:
527 527 storebasepath = hgvfs.base
528 528 cachepath = hgvfs.join(b'cache')
529 529 wcachepath = hgvfs.join(b'wcache')
530 530
531 531
532 532 # The store has changed over time and the exact layout is dictated by
533 533 # requirements. The store interface abstracts differences across all
534 534 # of them.
535 535 store = makestore(requirements, storebasepath,
536 536 lambda base: vfsmod.vfs(base, cacheaudited=True))
537 537 hgvfs.createmode = store.createmode
538 538
539 539 storevfs = store.vfs
540 540 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
541 541
542 542 # The cache vfs is used to manage cache files.
543 543 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
544 544 cachevfs.createmode = store.createmode
545 545 # The cache vfs is used to manage cache files related to the working copy
546 546 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
547 547 wcachevfs.createmode = store.createmode
548 548
549 549 # Now resolve the type for the repository object. We do this by repeatedly
550 550 # calling a factory function to produces types for specific aspects of the
551 551 # repo's operation. The aggregate returned types are used as base classes
552 552 # for a dynamically-derived type, which will represent our new repository.
553 553
554 554 bases = []
555 555 extrastate = {}
556 556
557 557 for iface, fn in REPO_INTERFACES:
558 558 # We pass all potentially useful state to give extensions tons of
559 559 # flexibility.
560 560 typ = fn()(ui=ui,
561 561 intents=intents,
562 562 requirements=requirements,
563 563 features=features,
564 564 wdirvfs=wdirvfs,
565 565 hgvfs=hgvfs,
566 566 store=store,
567 567 storevfs=storevfs,
568 568 storeoptions=storevfs.options,
569 569 cachevfs=cachevfs,
570 570 wcachevfs=wcachevfs,
571 571 extensionmodulenames=extensionmodulenames,
572 572 extrastate=extrastate,
573 573 baseclasses=bases)
574 574
575 575 if not isinstance(typ, type):
576 576 raise error.ProgrammingError('unable to construct type for %s' %
577 577 iface)
578 578
579 579 bases.append(typ)
580 580
581 581 # type() allows you to use characters in type names that wouldn't be
582 582 # recognized as Python symbols in source code. We abuse that to add
583 583 # rich information about our constructed repo.
584 584 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
585 585 wdirvfs.base,
586 586 b','.join(sorted(requirements))))
587 587
588 588 cls = type(name, tuple(bases), {})
589 589
590 590 return cls(
591 591 baseui=baseui,
592 592 ui=ui,
593 593 origroot=path,
594 594 wdirvfs=wdirvfs,
595 595 hgvfs=hgvfs,
596 596 requirements=requirements,
597 597 supportedrequirements=supportedrequirements,
598 598 sharedpath=storebasepath,
599 599 store=store,
600 600 cachevfs=cachevfs,
601 601 wcachevfs=wcachevfs,
602 602 features=features,
603 603 intents=intents)
604 604
605 605 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
606 606 """Load hgrc files/content into a ui instance.
607 607
608 608 This is called during repository opening to load any additional
609 609 config files or settings relevant to the current repository.
610 610
611 611 Returns a bool indicating whether any additional configs were loaded.
612 612
613 613 Extensions should monkeypatch this function to modify how per-repo
614 614 configs are loaded. For example, an extension may wish to pull in
615 615 configs from alternate files or sources.
616 616 """
617 617 try:
618 618 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
619 619 return True
620 620 except IOError:
621 621 return False
622 622
623 623 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
624 624 """Perform additional actions after .hg/hgrc is loaded.
625 625
626 626 This function is called during repository loading immediately after
627 627 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
628 628
629 629 The function can be used to validate configs, automatically add
630 630 options (including extensions) based on requirements, etc.
631 631 """
632 632
633 633 # Map of requirements to list of extensions to load automatically when
634 634 # requirement is present.
635 635 autoextensions = {
636 636 b'largefiles': [b'largefiles'],
637 637 b'lfs': [b'lfs'],
638 638 }
639 639
640 640 for requirement, names in sorted(autoextensions.items()):
641 641 if requirement not in requirements:
642 642 continue
643 643
644 644 for name in names:
645 645 if not ui.hasconfig(b'extensions', name):
646 646 ui.setconfig(b'extensions', name, b'', source='autoload')
647 647
648 648 def gathersupportedrequirements(ui):
649 649 """Determine the complete set of recognized requirements."""
650 650 # Start with all requirements supported by this file.
651 651 supported = set(localrepository._basesupported)
652 652
653 653 # Execute ``featuresetupfuncs`` entries if they belong to an extension
654 654 # relevant to this ui instance.
655 655 modules = {m.__name__ for n, m in extensions.extensions(ui)}
656 656
657 657 for fn in featuresetupfuncs:
658 658 if fn.__module__ in modules:
659 659 fn(ui, supported)
660 660
661 661 # Add derived requirements from registered compression engines.
662 662 for name in util.compengines:
663 663 engine = util.compengines[name]
664 664 if engine.available() and engine.revlogheader():
665 665 supported.add(b'exp-compression-%s' % name)
666 666 if engine.name() == 'zstd':
667 667 supported.add(b'revlog-compression-zstd')
668 668
669 669 return supported
670 670
671 671 def ensurerequirementsrecognized(requirements, supported):
672 672 """Validate that a set of local requirements is recognized.
673 673
674 674 Receives a set of requirements. Raises an ``error.RepoError`` if there
675 675 exists any requirement in that set that currently loaded code doesn't
676 676 recognize.
677 677
678 678 Returns a set of supported requirements.
679 679 """
680 680 missing = set()
681 681
682 682 for requirement in requirements:
683 683 if requirement in supported:
684 684 continue
685 685
686 686 if not requirement or not requirement[0:1].isalnum():
687 687 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
688 688
689 689 missing.add(requirement)
690 690
691 691 if missing:
692 692 raise error.RequirementError(
693 693 _(b'repository requires features unknown to this Mercurial: %s') %
694 694 b' '.join(sorted(missing)),
695 695 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
696 696 b'for more information'))
697 697
698 698 def ensurerequirementscompatible(ui, requirements):
699 699 """Validates that a set of recognized requirements is mutually compatible.
700 700
701 701 Some requirements may not be compatible with others or require
702 702 config options that aren't enabled. This function is called during
703 703 repository opening to ensure that the set of requirements needed
704 704 to open a repository is sane and compatible with config options.
705 705
706 706 Extensions can monkeypatch this function to perform additional
707 707 checking.
708 708
709 709 ``error.RepoError`` should be raised on failure.
710 710 """
711 711 if b'exp-sparse' in requirements and not sparse.enabled:
712 712 raise error.RepoError(_(b'repository is using sparse feature but '
713 713 b'sparse is not enabled; enable the '
714 714 b'"sparse" extensions to access'))
715 715
716 716 def makestore(requirements, path, vfstype):
717 717 """Construct a storage object for a repository."""
718 718 if b'store' in requirements:
719 719 if b'fncache' in requirements:
720 720 return storemod.fncachestore(path, vfstype,
721 721 b'dotencode' in requirements)
722 722
723 723 return storemod.encodedstore(path, vfstype)
724 724
725 725 return storemod.basicstore(path, vfstype)
726 726
727 727 def resolvestorevfsoptions(ui, requirements, features):
728 728 """Resolve the options to pass to the store vfs opener.
729 729
730 730 The returned dict is used to influence behavior of the storage layer.
731 731 """
732 732 options = {}
733 733
734 734 if b'treemanifest' in requirements:
735 735 options[b'treemanifest'] = True
736 736
737 737 # experimental config: format.manifestcachesize
738 738 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
739 739 if manifestcachesize is not None:
740 740 options[b'manifestcachesize'] = manifestcachesize
741 741
742 742 # In the absence of another requirement superseding a revlog-related
743 743 # requirement, we have to assume the repo is using revlog version 0.
744 744 # This revlog format is super old and we don't bother trying to parse
745 745 # opener options for it because those options wouldn't do anything
746 746 # meaningful on such old repos.
747 747 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
748 748 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
749 749
750 750 return options
751 751
752 752 def resolverevlogstorevfsoptions(ui, requirements, features):
753 753 """Resolve opener options specific to revlogs."""
754 754
755 755 options = {}
756 756 options[b'flagprocessors'] = {}
757 757
758 758 if b'revlogv1' in requirements:
759 759 options[b'revlogv1'] = True
760 760 if REVLOGV2_REQUIREMENT in requirements:
761 761 options[b'revlogv2'] = True
762 762
763 763 if b'generaldelta' in requirements:
764 764 options[b'generaldelta'] = True
765 765
766 766 # experimental config: format.chunkcachesize
767 767 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
768 768 if chunkcachesize is not None:
769 769 options[b'chunkcachesize'] = chunkcachesize
770 770
771 771 deltabothparents = ui.configbool(b'storage',
772 772 b'revlog.optimize-delta-parent-choice')
773 773 options[b'deltabothparents'] = deltabothparents
774 774
775 775 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
776 776 lazydeltabase = False
777 777 if lazydelta:
778 778 lazydeltabase = ui.configbool(b'storage',
779 779 b'revlog.reuse-external-delta-parent')
780 780 if lazydeltabase is None:
781 781 lazydeltabase = not scmutil.gddeltaconfig(ui)
782 782 options[b'lazydelta'] = lazydelta
783 783 options[b'lazydeltabase'] = lazydeltabase
784 784
785 785 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
786 786 if 0 <= chainspan:
787 787 options[b'maxdeltachainspan'] = chainspan
788 788
789 789 mmapindexthreshold = ui.configbytes(b'experimental',
790 790 b'mmapindexthreshold')
791 791 if mmapindexthreshold is not None:
792 792 options[b'mmapindexthreshold'] = mmapindexthreshold
793 793
794 794 withsparseread = ui.configbool(b'experimental', b'sparse-read')
795 795 srdensitythres = float(ui.config(b'experimental',
796 796 b'sparse-read.density-threshold'))
797 797 srmingapsize = ui.configbytes(b'experimental',
798 798 b'sparse-read.min-gap-size')
799 799 options[b'with-sparse-read'] = withsparseread
800 800 options[b'sparse-read-density-threshold'] = srdensitythres
801 801 options[b'sparse-read-min-gap-size'] = srmingapsize
802 802
803 803 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
804 804 options[b'sparse-revlog'] = sparserevlog
805 805 if sparserevlog:
806 806 options[b'generaldelta'] = True
807 807
808 808 maxchainlen = None
809 809 if sparserevlog:
810 810 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
811 811 # experimental config: format.maxchainlen
812 812 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
813 813 if maxchainlen is not None:
814 814 options[b'maxchainlen'] = maxchainlen
815 815
816 816 for r in requirements:
817 817 # we allow multiple compression engine requirement to co-exist because
818 818 # strickly speaking, revlog seems to support mixed compression style.
819 819 #
820 820 # The compression used for new entries will be "the last one"
821 821 prefix = r.startswith
822 822 if prefix('revlog-compression-') or prefix('exp-compression-'):
823 823 options[b'compengine'] = r.split('-', 2)[2]
824 824
825 825 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
826 826 if options[b'zlib.level'] is not None:
827 827 if not (0 <= options[b'zlib.level'] <= 9):
828 828 msg = _('invalid value for `storage.revlog.zlib.level` config: %d')
829 829 raise error.Abort(msg % options[b'zlib.level'])
830 830 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
831 831 if options[b'zstd.level'] is not None:
832 832 if not (0 <= options[b'zstd.level'] <= 22):
833 833 msg = _('invalid value for `storage.revlog.zstd.level` config: %d')
834 834 raise error.Abort(msg % options[b'zstd.level'])
835 835
836 836 if repository.NARROW_REQUIREMENT in requirements:
837 837 options[b'enableellipsis'] = True
838 838
839 839 return options
840 840
841 841 def makemain(**kwargs):
842 842 """Produce a type conforming to ``ilocalrepositorymain``."""
843 843 return localrepository
844 844
845 845 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
846 846 class revlogfilestorage(object):
847 847 """File storage when using revlogs."""
848 848
849 849 def file(self, path):
850 850 if path[0] == b'/':
851 851 path = path[1:]
852 852
853 853 return filelog.filelog(self.svfs, path)
854 854
855 855 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
856 856 class revlognarrowfilestorage(object):
857 857 """File storage when using revlogs and narrow files."""
858 858
859 859 def file(self, path):
860 860 if path[0] == b'/':
861 861 path = path[1:]
862 862
863 863 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
864 864
865 865 def makefilestorage(requirements, features, **kwargs):
866 866 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
867 867 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
868 868 features.add(repository.REPO_FEATURE_STREAM_CLONE)
869 869
870 870 if repository.NARROW_REQUIREMENT in requirements:
871 871 return revlognarrowfilestorage
872 872 else:
873 873 return revlogfilestorage
874 874
875 875 # List of repository interfaces and factory functions for them. Each
876 876 # will be called in order during ``makelocalrepository()`` to iteratively
877 877 # derive the final type for a local repository instance. We capture the
878 878 # function as a lambda so we don't hold a reference and the module-level
879 879 # functions can be wrapped.
880 880 REPO_INTERFACES = [
881 881 (repository.ilocalrepositorymain, lambda: makemain),
882 882 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
883 883 ]
884 884
885 885 @interfaceutil.implementer(repository.ilocalrepositorymain)
886 886 class localrepository(object):
887 887 """Main class for representing local repositories.
888 888
889 889 All local repositories are instances of this class.
890 890
891 891 Constructed on its own, instances of this class are not usable as
892 892 repository objects. To obtain a usable repository object, call
893 893 ``hg.repository()``, ``localrepo.instance()``, or
894 894 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
895 895 ``instance()`` adds support for creating new repositories.
896 896 ``hg.repository()`` adds more extension integration, including calling
897 897 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
898 898 used.
899 899 """
900 900
901 901 # obsolete experimental requirements:
902 902 # - manifestv2: An experimental new manifest format that allowed
903 903 # for stem compression of long paths. Experiment ended up not
904 904 # being successful (repository sizes went up due to worse delta
905 905 # chains), and the code was deleted in 4.6.
906 906 supportedformats = {
907 907 'revlogv1',
908 908 'generaldelta',
909 909 'treemanifest',
910 910 REVLOGV2_REQUIREMENT,
911 911 SPARSEREVLOG_REQUIREMENT,
912 912 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
913 913 }
914 914 _basesupported = supportedformats | {
915 915 'store',
916 916 'fncache',
917 917 'shared',
918 918 'relshared',
919 919 'dotencode',
920 920 'exp-sparse',
921 921 'internal-phase'
922 922 }
923 923
924 924 # list of prefix for file which can be written without 'wlock'
925 925 # Extensions should extend this list when needed
926 926 _wlockfreeprefix = {
927 927 # We migh consider requiring 'wlock' for the next
928 928 # two, but pretty much all the existing code assume
929 929 # wlock is not needed so we keep them excluded for
930 930 # now.
931 931 'hgrc',
932 932 'requires',
933 933 # XXX cache is a complicatged business someone
934 934 # should investigate this in depth at some point
935 935 'cache/',
936 936 # XXX shouldn't be dirstate covered by the wlock?
937 937 'dirstate',
938 938 # XXX bisect was still a bit too messy at the time
939 939 # this changeset was introduced. Someone should fix
940 940 # the remainig bit and drop this line
941 941 'bisect.state',
942 942 }
943 943
944 944 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
945 945 supportedrequirements, sharedpath, store, cachevfs, wcachevfs,
946 946 features, intents=None):
947 947 """Create a new local repository instance.
948 948
949 949 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
950 950 or ``localrepo.makelocalrepository()`` for obtaining a new repository
951 951 object.
952 952
953 953 Arguments:
954 954
955 955 baseui
956 956 ``ui.ui`` instance that ``ui`` argument was based off of.
957 957
958 958 ui
959 959 ``ui.ui`` instance for use by the repository.
960 960
961 961 origroot
962 962 ``bytes`` path to working directory root of this repository.
963 963
964 964 wdirvfs
965 965 ``vfs.vfs`` rooted at the working directory.
966 966
967 967 hgvfs
968 968 ``vfs.vfs`` rooted at .hg/
969 969
970 970 requirements
971 971 ``set`` of bytestrings representing repository opening requirements.
972 972
973 973 supportedrequirements
974 974 ``set`` of bytestrings representing repository requirements that we
975 975 know how to open. May be a supetset of ``requirements``.
976 976
977 977 sharedpath
978 978 ``bytes`` Defining path to storage base directory. Points to a
979 979 ``.hg/`` directory somewhere.
980 980
981 981 store
982 982 ``store.basicstore`` (or derived) instance providing access to
983 983 versioned storage.
984 984
985 985 cachevfs
986 986 ``vfs.vfs`` used for cache files.
987 987
988 988 wcachevfs
989 989 ``vfs.vfs`` used for cache files related to the working copy.
990 990
991 991 features
992 992 ``set`` of bytestrings defining features/capabilities of this
993 993 instance.
994 994
995 995 intents
996 996 ``set`` of system strings indicating what this repo will be used
997 997 for.
998 998 """
999 999 self.baseui = baseui
1000 1000 self.ui = ui
1001 1001 self.origroot = origroot
1002 1002 # vfs rooted at working directory.
1003 1003 self.wvfs = wdirvfs
1004 1004 self.root = wdirvfs.base
1005 1005 # vfs rooted at .hg/. Used to access most non-store paths.
1006 1006 self.vfs = hgvfs
1007 1007 self.path = hgvfs.base
1008 1008 self.requirements = requirements
1009 1009 self.supported = supportedrequirements
1010 1010 self.sharedpath = sharedpath
1011 1011 self.store = store
1012 1012 self.cachevfs = cachevfs
1013 1013 self.wcachevfs = wcachevfs
1014 1014 self.features = features
1015 1015
1016 1016 self.filtername = None
1017 1017
1018 1018 if (self.ui.configbool('devel', 'all-warnings') or
1019 1019 self.ui.configbool('devel', 'check-locks')):
1020 1020 self.vfs.audit = self._getvfsward(self.vfs.audit)
1021 1021 # A list of callback to shape the phase if no data were found.
1022 1022 # Callback are in the form: func(repo, roots) --> processed root.
1023 1023 # This list it to be filled by extension during repo setup
1024 1024 self._phasedefaults = []
1025 1025
1026 1026 color.setup(self.ui)
1027 1027
1028 1028 self.spath = self.store.path
1029 1029 self.svfs = self.store.vfs
1030 1030 self.sjoin = self.store.join
1031 1031 if (self.ui.configbool('devel', 'all-warnings') or
1032 1032 self.ui.configbool('devel', 'check-locks')):
1033 1033 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
1034 1034 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1035 1035 else: # standard vfs
1036 1036 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1037 1037
1038 1038 self._dirstatevalidatewarned = False
1039 1039
1040 1040 self._branchcaches = branchmap.BranchMapCache()
1041 1041 self._revbranchcache = None
1042 1042 self._filterpats = {}
1043 1043 self._datafilters = {}
1044 1044 self._transref = self._lockref = self._wlockref = None
1045 1045
1046 1046 # A cache for various files under .hg/ that tracks file changes,
1047 1047 # (used by the filecache decorator)
1048 1048 #
1049 1049 # Maps a property name to its util.filecacheentry
1050 1050 self._filecache = {}
1051 1051
1052 1052 # hold sets of revision to be filtered
1053 1053 # should be cleared when something might have changed the filter value:
1054 1054 # - new changesets,
1055 1055 # - phase change,
1056 1056 # - new obsolescence marker,
1057 1057 # - working directory parent change,
1058 1058 # - bookmark changes
1059 1059 self.filteredrevcache = {}
1060 1060
1061 1061 # post-dirstate-status hooks
1062 1062 self._postdsstatus = []
1063 1063
1064 1064 # generic mapping between names and nodes
1065 1065 self.names = namespaces.namespaces()
1066 1066
1067 1067 # Key to signature value.
1068 1068 self._sparsesignaturecache = {}
1069 1069 # Signature to cached matcher instance.
1070 1070 self._sparsematchercache = {}
1071 1071
1072 1072 self._extrafilterid = repoview.extrafilter(ui)
1073 1073
1074 1074 def _getvfsward(self, origfunc):
1075 1075 """build a ward for self.vfs"""
1076 1076 rref = weakref.ref(self)
1077 1077 def checkvfs(path, mode=None):
1078 1078 ret = origfunc(path, mode=mode)
1079 1079 repo = rref()
1080 1080 if (repo is None
1081 1081 or not util.safehasattr(repo, '_wlockref')
1082 1082 or not util.safehasattr(repo, '_lockref')):
1083 1083 return
1084 1084 if mode in (None, 'r', 'rb'):
1085 1085 return
1086 1086 if path.startswith(repo.path):
1087 1087 # truncate name relative to the repository (.hg)
1088 1088 path = path[len(repo.path) + 1:]
1089 1089 if path.startswith('cache/'):
1090 1090 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1091 1091 repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs")
1092 1092 if path.startswith('journal.') or path.startswith('undo.'):
1093 1093 # journal is covered by 'lock'
1094 1094 if repo._currentlock(repo._lockref) is None:
1095 1095 repo.ui.develwarn('write with no lock: "%s"' % path,
1096 1096 stacklevel=3, config='check-locks')
1097 1097 elif repo._currentlock(repo._wlockref) is None:
1098 1098 # rest of vfs files are covered by 'wlock'
1099 1099 #
1100 1100 # exclude special files
1101 1101 for prefix in self._wlockfreeprefix:
1102 1102 if path.startswith(prefix):
1103 1103 return
1104 1104 repo.ui.develwarn('write with no wlock: "%s"' % path,
1105 1105 stacklevel=3, config='check-locks')
1106 1106 return ret
1107 1107 return checkvfs
1108 1108
1109 1109 def _getsvfsward(self, origfunc):
1110 1110 """build a ward for self.svfs"""
1111 1111 rref = weakref.ref(self)
1112 1112 def checksvfs(path, mode=None):
1113 1113 ret = origfunc(path, mode=mode)
1114 1114 repo = rref()
1115 1115 if repo is None or not util.safehasattr(repo, '_lockref'):
1116 1116 return
1117 1117 if mode in (None, 'r', 'rb'):
1118 1118 return
1119 1119 if path.startswith(repo.sharedpath):
1120 1120 # truncate name relative to the repository (.hg)
1121 1121 path = path[len(repo.sharedpath) + 1:]
1122 1122 if repo._currentlock(repo._lockref) is None:
1123 1123 repo.ui.develwarn('write with no lock: "%s"' % path,
1124 1124 stacklevel=4)
1125 1125 return ret
1126 1126 return checksvfs
1127 1127
1128 1128 def close(self):
1129 1129 self._writecaches()
1130 1130
1131 1131 def _writecaches(self):
1132 1132 if self._revbranchcache:
1133 1133 self._revbranchcache.write()
1134 1134
1135 1135 def _restrictcapabilities(self, caps):
1136 1136 if self.ui.configbool('experimental', 'bundle2-advertise'):
1137 1137 caps = set(caps)
1138 1138 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1139 1139 role='client'))
1140 1140 caps.add('bundle2=' + urlreq.quote(capsblob))
1141 1141 return caps
1142 1142
1143 1143 def _writerequirements(self):
1144 1144 scmutil.writerequires(self.vfs, self.requirements)
1145 1145
1146 1146 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1147 1147 # self -> auditor -> self._checknested -> self
1148 1148
1149 1149 @property
1150 1150 def auditor(self):
1151 1151 # This is only used by context.workingctx.match in order to
1152 1152 # detect files in subrepos.
1153 1153 return pathutil.pathauditor(self.root, callback=self._checknested)
1154 1154
1155 1155 @property
1156 1156 def nofsauditor(self):
1157 1157 # This is only used by context.basectx.match in order to detect
1158 1158 # files in subrepos.
1159 1159 return pathutil.pathauditor(self.root, callback=self._checknested,
1160 1160 realfs=False, cached=True)
1161 1161
1162 1162 def _checknested(self, path):
1163 1163 """Determine if path is a legal nested repository."""
1164 1164 if not path.startswith(self.root):
1165 1165 return False
1166 1166 subpath = path[len(self.root) + 1:]
1167 1167 normsubpath = util.pconvert(subpath)
1168 1168
1169 1169 # XXX: Checking against the current working copy is wrong in
1170 1170 # the sense that it can reject things like
1171 1171 #
1172 1172 # $ hg cat -r 10 sub/x.txt
1173 1173 #
1174 1174 # if sub/ is no longer a subrepository in the working copy
1175 1175 # parent revision.
1176 1176 #
1177 1177 # However, it can of course also allow things that would have
1178 1178 # been rejected before, such as the above cat command if sub/
1179 1179 # is a subrepository now, but was a normal directory before.
1180 1180 # The old path auditor would have rejected by mistake since it
1181 1181 # panics when it sees sub/.hg/.
1182 1182 #
1183 1183 # All in all, checking against the working copy seems sensible
1184 1184 # since we want to prevent access to nested repositories on
1185 1185 # the filesystem *now*.
1186 1186 ctx = self[None]
1187 1187 parts = util.splitpath(subpath)
1188 1188 while parts:
1189 1189 prefix = '/'.join(parts)
1190 1190 if prefix in ctx.substate:
1191 1191 if prefix == normsubpath:
1192 1192 return True
1193 1193 else:
1194 1194 sub = ctx.sub(prefix)
1195 1195 return sub.checknested(subpath[len(prefix) + 1:])
1196 1196 else:
1197 1197 parts.pop()
1198 1198 return False
1199 1199
1200 1200 def peer(self):
1201 1201 return localpeer(self) # not cached to avoid reference cycle
1202 1202
1203 1203 def unfiltered(self):
1204 1204 """Return unfiltered version of the repository
1205 1205
1206 1206 Intended to be overwritten by filtered repo."""
1207 1207 return self
1208 1208
1209 1209 def filtered(self, name, visibilityexceptions=None):
1210 1210 """Return a filtered version of a repository
1211 1211
1212 1212 The `name` parameter is the identifier of the requested view. This
1213 1213 will return a repoview object set "exactly" to the specified view.
1214 1214
1215 1215 This function does not apply recursive filtering to a repository. For
1216 1216 example calling `repo.filtered("served")` will return a repoview using
1217 1217 the "served" view, regardless of the initial view used by `repo`.
1218 1218
1219 1219 In other word, there is always only one level of `repoview` "filtering".
1220 1220 """
1221 1221 if self._extrafilterid is not None and '%' not in name:
1222 1222 name = name + '%' + self._extrafilterid
1223 1223
1224 1224 cls = repoview.newtype(self.unfiltered().__class__)
1225 1225 return cls(self, name, visibilityexceptions)
1226 1226
1227 1227 @mixedrepostorecache(('bookmarks', 'plain'), ('bookmarks.current', 'plain'),
1228 1228 ('bookmarks', ''), ('00changelog.i', ''))
1229 1229 def _bookmarks(self):
1230 # Since the multiple files involved in the transaction cannot be
1231 # written atomically (with current repository format), there is a race
1232 # condition here.
1233 #
1234 # 1) changelog content A is read
1235 # 2) outside transaction update changelog to content B
1236 # 3) outside transaction update bookmark file referring to content B
1237 # 4) bookmarks file content is read and filtered against changelog-A
1238 #
1239 # When this happens, bookmarks against nodes missing from A are dropped.
1240 #
1241 # Having this happening during read is not great, but it become worse
1242 # when this happen during write because the bookmarks to the "unknown"
1243 # nodes will be dropped for good. However, writes happen within locks.
1244 # This locking makes it possible to have a race free consistent read.
1245 # For this purpose data read from disc before locking are
1246 # "invalidated" right after the locks are taken. This invalidations are
1247 # "light", the `filecache` mechanism keep the data in memory and will
1248 # reuse them if the underlying files did not changed. Not parsing the
1249 # same data multiple times helps performances.
1250 #
1251 # Unfortunately in the case describe above, the files tracked by the
1252 # bookmarks file cache might not have changed, but the in-memory
1253 # content is still "wrong" because we used an older changelog content
1254 # to process the on-disk data. So after locking, the changelog would be
1255 # refreshed but `_bookmarks` would be preserved.
1256 # Adding `00changelog.i` to the list of tracked file is not
1257 # enough, because at the time we build the content for `_bookmarks` in
1258 # (4), the changelog file has already diverged from the content used
1259 # for loading `changelog` in (1)
1260 #
1261 # To prevent the issue, we force the changelog to be explicitly
1262 # reloaded while computing `_bookmarks`. The data race can still happen
1263 # without the lock (with a narrower window), but it would no longer go
1264 # undetected during the lock time refresh.
1265 #
1266 # The new schedule is as follow
1267 #
1268 # 1) filecache logic detect that `_bookmarks` needs to be computed
1269 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1270 # 3) We force `changelog` filecache to be tested
1271 # 4) cachestat for `changelog` are captured (for changelog)
1272 # 5) `_bookmarks` is computed and cached
1273 #
1274 # The step in (3) ensure we have a changelog at least as recent as the
1275 # cache stat computed in (1). As a result at locking time:
1276 # * if the changelog did not changed since (1) -> we can reuse the data
1277 # * otherwise -> the bookmarks get refreshed.
1278 self._refreshchangelog()
1230 1279 return bookmarks.bmstore(self)
1231 1280
1232 1281 def _refreshchangelog(self):
1233 1282 """make sure the in memory changelog match the on-disk one"""
1234 1283 if ('changelog' in vars(self) and self.currenttransaction() is None):
1235 1284 del self.changelog
1236 1285
1237 1286 @property
1238 1287 def _activebookmark(self):
1239 1288 return self._bookmarks.active
1240 1289
1241 1290 # _phasesets depend on changelog. what we need is to call
1242 1291 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1243 1292 # can't be easily expressed in filecache mechanism.
1244 1293 @storecache('phaseroots', '00changelog.i')
1245 1294 def _phasecache(self):
1246 1295 return phases.phasecache(self, self._phasedefaults)
1247 1296
1248 1297 @storecache('obsstore')
1249 1298 def obsstore(self):
1250 1299 return obsolete.makestore(self.ui, self)
1251 1300
1252 1301 @storecache('00changelog.i')
1253 1302 def changelog(self):
1254 1303 return changelog.changelog(self.svfs,
1255 1304 trypending=txnutil.mayhavepending(self.root))
1256 1305
1257 1306 @storecache('00manifest.i')
1258 1307 def manifestlog(self):
1259 1308 rootstore = manifest.manifestrevlog(self.svfs)
1260 1309 return manifest.manifestlog(self.svfs, self, rootstore,
1261 1310 self._storenarrowmatch)
1262 1311
1263 1312 @repofilecache('dirstate')
1264 1313 def dirstate(self):
1265 1314 return self._makedirstate()
1266 1315
1267 1316 def _makedirstate(self):
1268 1317 """Extension point for wrapping the dirstate per-repo."""
1269 1318 sparsematchfn = lambda: sparse.matcher(self)
1270 1319
1271 1320 return dirstate.dirstate(self.vfs, self.ui, self.root,
1272 1321 self._dirstatevalidate, sparsematchfn)
1273 1322
1274 1323 def _dirstatevalidate(self, node):
1275 1324 try:
1276 1325 self.changelog.rev(node)
1277 1326 return node
1278 1327 except error.LookupError:
1279 1328 if not self._dirstatevalidatewarned:
1280 1329 self._dirstatevalidatewarned = True
1281 1330 self.ui.warn(_("warning: ignoring unknown"
1282 1331 " working parent %s!\n") % short(node))
1283 1332 return nullid
1284 1333
1285 1334 @storecache(narrowspec.FILENAME)
1286 1335 def narrowpats(self):
1287 1336 """matcher patterns for this repository's narrowspec
1288 1337
1289 1338 A tuple of (includes, excludes).
1290 1339 """
1291 1340 return narrowspec.load(self)
1292 1341
1293 1342 @storecache(narrowspec.FILENAME)
1294 1343 def _storenarrowmatch(self):
1295 1344 if repository.NARROW_REQUIREMENT not in self.requirements:
1296 1345 return matchmod.always()
1297 1346 include, exclude = self.narrowpats
1298 1347 return narrowspec.match(self.root, include=include, exclude=exclude)
1299 1348
1300 1349 @storecache(narrowspec.FILENAME)
1301 1350 def _narrowmatch(self):
1302 1351 if repository.NARROW_REQUIREMENT not in self.requirements:
1303 1352 return matchmod.always()
1304 1353 narrowspec.checkworkingcopynarrowspec(self)
1305 1354 include, exclude = self.narrowpats
1306 1355 return narrowspec.match(self.root, include=include, exclude=exclude)
1307 1356
1308 1357 def narrowmatch(self, match=None, includeexact=False):
1309 1358 """matcher corresponding the the repo's narrowspec
1310 1359
1311 1360 If `match` is given, then that will be intersected with the narrow
1312 1361 matcher.
1313 1362
1314 1363 If `includeexact` is True, then any exact matches from `match` will
1315 1364 be included even if they're outside the narrowspec.
1316 1365 """
1317 1366 if match:
1318 1367 if includeexact and not self._narrowmatch.always():
1319 1368 # do not exclude explicitly-specified paths so that they can
1320 1369 # be warned later on
1321 1370 em = matchmod.exact(match.files())
1322 1371 nm = matchmod.unionmatcher([self._narrowmatch, em])
1323 1372 return matchmod.intersectmatchers(match, nm)
1324 1373 return matchmod.intersectmatchers(match, self._narrowmatch)
1325 1374 return self._narrowmatch
1326 1375
1327 1376 def setnarrowpats(self, newincludes, newexcludes):
1328 1377 narrowspec.save(self, newincludes, newexcludes)
1329 1378 self.invalidate(clearfilecache=True)
1330 1379
1331 1380 def __getitem__(self, changeid):
1332 1381 if changeid is None:
1333 1382 return context.workingctx(self)
1334 1383 if isinstance(changeid, context.basectx):
1335 1384 return changeid
1336 1385 if isinstance(changeid, slice):
1337 1386 # wdirrev isn't contiguous so the slice shouldn't include it
1338 1387 return [self[i]
1339 1388 for i in pycompat.xrange(*changeid.indices(len(self)))
1340 1389 if i not in self.changelog.filteredrevs]
1341 1390 try:
1342 1391 if isinstance(changeid, int):
1343 1392 node = self.changelog.node(changeid)
1344 1393 rev = changeid
1345 1394 elif changeid == 'null':
1346 1395 node = nullid
1347 1396 rev = nullrev
1348 1397 elif changeid == 'tip':
1349 1398 node = self.changelog.tip()
1350 1399 rev = self.changelog.rev(node)
1351 1400 elif changeid == '.':
1352 1401 # this is a hack to delay/avoid loading obsmarkers
1353 1402 # when we know that '.' won't be hidden
1354 1403 node = self.dirstate.p1()
1355 1404 rev = self.unfiltered().changelog.rev(node)
1356 1405 elif len(changeid) == 20:
1357 1406 try:
1358 1407 node = changeid
1359 1408 rev = self.changelog.rev(changeid)
1360 1409 except error.FilteredLookupError:
1361 1410 changeid = hex(changeid) # for the error message
1362 1411 raise
1363 1412 except LookupError:
1364 1413 # check if it might have come from damaged dirstate
1365 1414 #
1366 1415 # XXX we could avoid the unfiltered if we had a recognizable
1367 1416 # exception for filtered changeset access
1368 1417 if (self.local()
1369 1418 and changeid in self.unfiltered().dirstate.parents()):
1370 1419 msg = _("working directory has unknown parent '%s'!")
1371 1420 raise error.Abort(msg % short(changeid))
1372 1421 changeid = hex(changeid) # for the error message
1373 1422 raise
1374 1423
1375 1424 elif len(changeid) == 40:
1376 1425 node = bin(changeid)
1377 1426 rev = self.changelog.rev(node)
1378 1427 else:
1379 1428 raise error.ProgrammingError(
1380 1429 "unsupported changeid '%s' of type %s" %
1381 1430 (changeid, type(changeid)))
1382 1431
1383 1432 return context.changectx(self, rev, node)
1384 1433
1385 1434 except (error.FilteredIndexError, error.FilteredLookupError):
1386 1435 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1387 1436 % pycompat.bytestr(changeid))
1388 1437 except (IndexError, LookupError):
1389 1438 raise error.RepoLookupError(
1390 1439 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1391 1440 except error.WdirUnsupported:
1392 1441 return context.workingctx(self)
1393 1442
1394 1443 def __contains__(self, changeid):
1395 1444 """True if the given changeid exists
1396 1445
1397 1446 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1398 1447 specified.
1399 1448 """
1400 1449 try:
1401 1450 self[changeid]
1402 1451 return True
1403 1452 except error.RepoLookupError:
1404 1453 return False
1405 1454
1406 1455 def __nonzero__(self):
1407 1456 return True
1408 1457
1409 1458 __bool__ = __nonzero__
1410 1459
1411 1460 def __len__(self):
1412 1461 # no need to pay the cost of repoview.changelog
1413 1462 unfi = self.unfiltered()
1414 1463 return len(unfi.changelog)
1415 1464
1416 1465 def __iter__(self):
1417 1466 return iter(self.changelog)
1418 1467
1419 1468 def revs(self, expr, *args):
1420 1469 '''Find revisions matching a revset.
1421 1470
1422 1471 The revset is specified as a string ``expr`` that may contain
1423 1472 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1424 1473
1425 1474 Revset aliases from the configuration are not expanded. To expand
1426 1475 user aliases, consider calling ``scmutil.revrange()`` or
1427 1476 ``repo.anyrevs([expr], user=True)``.
1428 1477
1429 1478 Returns a revset.abstractsmartset, which is a list-like interface
1430 1479 that contains integer revisions.
1431 1480 '''
1432 1481 tree = revsetlang.spectree(expr, *args)
1433 1482 return revset.makematcher(tree)(self)
1434 1483
1435 1484 def set(self, expr, *args):
1436 1485 '''Find revisions matching a revset and emit changectx instances.
1437 1486
1438 1487 This is a convenience wrapper around ``revs()`` that iterates the
1439 1488 result and is a generator of changectx instances.
1440 1489
1441 1490 Revset aliases from the configuration are not expanded. To expand
1442 1491 user aliases, consider calling ``scmutil.revrange()``.
1443 1492 '''
1444 1493 for r in self.revs(expr, *args):
1445 1494 yield self[r]
1446 1495
1447 1496 def anyrevs(self, specs, user=False, localalias=None):
1448 1497 '''Find revisions matching one of the given revsets.
1449 1498
1450 1499 Revset aliases from the configuration are not expanded by default. To
1451 1500 expand user aliases, specify ``user=True``. To provide some local
1452 1501 definitions overriding user aliases, set ``localalias`` to
1453 1502 ``{name: definitionstring}``.
1454 1503 '''
1455 1504 if user:
1456 1505 m = revset.matchany(self.ui, specs,
1457 1506 lookup=revset.lookupfn(self),
1458 1507 localalias=localalias)
1459 1508 else:
1460 1509 m = revset.matchany(None, specs, localalias=localalias)
1461 1510 return m(self)
1462 1511
1463 1512 def url(self):
1464 1513 return 'file:' + self.root
1465 1514
1466 1515 def hook(self, name, throw=False, **args):
1467 1516 """Call a hook, passing this repo instance.
1468 1517
1469 1518 This a convenience method to aid invoking hooks. Extensions likely
1470 1519 won't call this unless they have registered a custom hook or are
1471 1520 replacing code that is expected to call a hook.
1472 1521 """
1473 1522 return hook.hook(self.ui, self, name, throw, **args)
1474 1523
1475 1524 @filteredpropertycache
1476 1525 def _tagscache(self):
1477 1526 '''Returns a tagscache object that contains various tags related
1478 1527 caches.'''
1479 1528
1480 1529 # This simplifies its cache management by having one decorated
1481 1530 # function (this one) and the rest simply fetch things from it.
1482 1531 class tagscache(object):
1483 1532 def __init__(self):
1484 1533 # These two define the set of tags for this repository. tags
1485 1534 # maps tag name to node; tagtypes maps tag name to 'global' or
1486 1535 # 'local'. (Global tags are defined by .hgtags across all
1487 1536 # heads, and local tags are defined in .hg/localtags.)
1488 1537 # They constitute the in-memory cache of tags.
1489 1538 self.tags = self.tagtypes = None
1490 1539
1491 1540 self.nodetagscache = self.tagslist = None
1492 1541
1493 1542 cache = tagscache()
1494 1543 cache.tags, cache.tagtypes = self._findtags()
1495 1544
1496 1545 return cache
1497 1546
1498 1547 def tags(self):
1499 1548 '''return a mapping of tag to node'''
1500 1549 t = {}
1501 1550 if self.changelog.filteredrevs:
1502 1551 tags, tt = self._findtags()
1503 1552 else:
1504 1553 tags = self._tagscache.tags
1505 1554 rev = self.changelog.rev
1506 1555 for k, v in tags.iteritems():
1507 1556 try:
1508 1557 # ignore tags to unknown nodes
1509 1558 rev(v)
1510 1559 t[k] = v
1511 1560 except (error.LookupError, ValueError):
1512 1561 pass
1513 1562 return t
1514 1563
1515 1564 def _findtags(self):
1516 1565 '''Do the hard work of finding tags. Return a pair of dicts
1517 1566 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1518 1567 maps tag name to a string like \'global\' or \'local\'.
1519 1568 Subclasses or extensions are free to add their own tags, but
1520 1569 should be aware that the returned dicts will be retained for the
1521 1570 duration of the localrepo object.'''
1522 1571
1523 1572 # XXX what tagtype should subclasses/extensions use? Currently
1524 1573 # mq and bookmarks add tags, but do not set the tagtype at all.
1525 1574 # Should each extension invent its own tag type? Should there
1526 1575 # be one tagtype for all such "virtual" tags? Or is the status
1527 1576 # quo fine?
1528 1577
1529 1578
1530 1579 # map tag name to (node, hist)
1531 1580 alltags = tagsmod.findglobaltags(self.ui, self)
1532 1581 # map tag name to tag type
1533 1582 tagtypes = dict((tag, 'global') for tag in alltags)
1534 1583
1535 1584 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1536 1585
1537 1586 # Build the return dicts. Have to re-encode tag names because
1538 1587 # the tags module always uses UTF-8 (in order not to lose info
1539 1588 # writing to the cache), but the rest of Mercurial wants them in
1540 1589 # local encoding.
1541 1590 tags = {}
1542 1591 for (name, (node, hist)) in alltags.iteritems():
1543 1592 if node != nullid:
1544 1593 tags[encoding.tolocal(name)] = node
1545 1594 tags['tip'] = self.changelog.tip()
1546 1595 tagtypes = dict([(encoding.tolocal(name), value)
1547 1596 for (name, value) in tagtypes.iteritems()])
1548 1597 return (tags, tagtypes)
1549 1598
1550 1599 def tagtype(self, tagname):
1551 1600 '''
1552 1601 return the type of the given tag. result can be:
1553 1602
1554 1603 'local' : a local tag
1555 1604 'global' : a global tag
1556 1605 None : tag does not exist
1557 1606 '''
1558 1607
1559 1608 return self._tagscache.tagtypes.get(tagname)
1560 1609
1561 1610 def tagslist(self):
1562 1611 '''return a list of tags ordered by revision'''
1563 1612 if not self._tagscache.tagslist:
1564 1613 l = []
1565 1614 for t, n in self.tags().iteritems():
1566 1615 l.append((self.changelog.rev(n), t, n))
1567 1616 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1568 1617
1569 1618 return self._tagscache.tagslist
1570 1619
1571 1620 def nodetags(self, node):
1572 1621 '''return the tags associated with a node'''
1573 1622 if not self._tagscache.nodetagscache:
1574 1623 nodetagscache = {}
1575 1624 for t, n in self._tagscache.tags.iteritems():
1576 1625 nodetagscache.setdefault(n, []).append(t)
1577 1626 for tags in nodetagscache.itervalues():
1578 1627 tags.sort()
1579 1628 self._tagscache.nodetagscache = nodetagscache
1580 1629 return self._tagscache.nodetagscache.get(node, [])
1581 1630
1582 1631 def nodebookmarks(self, node):
1583 1632 """return the list of bookmarks pointing to the specified node"""
1584 1633 return self._bookmarks.names(node)
1585 1634
1586 1635 def branchmap(self):
1587 1636 '''returns a dictionary {branch: [branchheads]} with branchheads
1588 1637 ordered by increasing revision number'''
1589 1638 return self._branchcaches[self]
1590 1639
1591 1640 @unfilteredmethod
1592 1641 def revbranchcache(self):
1593 1642 if not self._revbranchcache:
1594 1643 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1595 1644 return self._revbranchcache
1596 1645
1597 1646 def branchtip(self, branch, ignoremissing=False):
1598 1647 '''return the tip node for a given branch
1599 1648
1600 1649 If ignoremissing is True, then this method will not raise an error.
1601 1650 This is helpful for callers that only expect None for a missing branch
1602 1651 (e.g. namespace).
1603 1652
1604 1653 '''
1605 1654 try:
1606 1655 return self.branchmap().branchtip(branch)
1607 1656 except KeyError:
1608 1657 if not ignoremissing:
1609 1658 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1610 1659 else:
1611 1660 pass
1612 1661
1613 1662 def lookup(self, key):
1614 1663 node = scmutil.revsymbol(self, key).node()
1615 1664 if node is None:
1616 1665 raise error.RepoLookupError(_("unknown revision '%s'") % key)
1617 1666 return node
1618 1667
1619 1668 def lookupbranch(self, key):
1620 1669 if self.branchmap().hasbranch(key):
1621 1670 return key
1622 1671
1623 1672 return scmutil.revsymbol(self, key).branch()
1624 1673
1625 1674 def known(self, nodes):
1626 1675 cl = self.changelog
1627 1676 nm = cl.nodemap
1628 1677 filtered = cl.filteredrevs
1629 1678 result = []
1630 1679 for n in nodes:
1631 1680 r = nm.get(n)
1632 1681 resp = not (r is None or r in filtered)
1633 1682 result.append(resp)
1634 1683 return result
1635 1684
1636 1685 def local(self):
1637 1686 return self
1638 1687
1639 1688 def publishing(self):
1640 1689 # it's safe (and desirable) to trust the publish flag unconditionally
1641 1690 # so that we don't finalize changes shared between users via ssh or nfs
1642 1691 return self.ui.configbool('phases', 'publish', untrusted=True)
1643 1692
1644 1693 def cancopy(self):
1645 1694 # so statichttprepo's override of local() works
1646 1695 if not self.local():
1647 1696 return False
1648 1697 if not self.publishing():
1649 1698 return True
1650 1699 # if publishing we can't copy if there is filtered content
1651 1700 return not self.filtered('visible').changelog.filteredrevs
1652 1701
1653 1702 def shared(self):
1654 1703 '''the type of shared repository (None if not shared)'''
1655 1704 if self.sharedpath != self.path:
1656 1705 return 'store'
1657 1706 return None
1658 1707
1659 1708 def wjoin(self, f, *insidef):
1660 1709 return self.vfs.reljoin(self.root, f, *insidef)
1661 1710
1662 1711 def setparents(self, p1, p2=nullid):
1663 1712 with self.dirstate.parentchange():
1664 1713 copies = self.dirstate.setparents(p1, p2)
1665 1714 pctx = self[p1]
1666 1715 if copies:
1667 1716 # Adjust copy records, the dirstate cannot do it, it
1668 1717 # requires access to parents manifests. Preserve them
1669 1718 # only for entries added to first parent.
1670 1719 for f in copies:
1671 1720 if f not in pctx and copies[f] in pctx:
1672 1721 self.dirstate.copy(copies[f], f)
1673 1722 if p2 == nullid:
1674 1723 for f, s in sorted(self.dirstate.copies().items()):
1675 1724 if f not in pctx and s not in pctx:
1676 1725 self.dirstate.copy(None, f)
1677 1726
1678 1727 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1679 1728 """changeid must be a changeset revision, if specified.
1680 1729 fileid can be a file revision or node."""
1681 1730 return context.filectx(self, path, changeid, fileid,
1682 1731 changectx=changectx)
1683 1732
1684 1733 def getcwd(self):
1685 1734 return self.dirstate.getcwd()
1686 1735
1687 1736 def pathto(self, f, cwd=None):
1688 1737 return self.dirstate.pathto(f, cwd)
1689 1738
1690 1739 def _loadfilter(self, filter):
1691 1740 if filter not in self._filterpats:
1692 1741 l = []
1693 1742 for pat, cmd in self.ui.configitems(filter):
1694 1743 if cmd == '!':
1695 1744 continue
1696 1745 mf = matchmod.match(self.root, '', [pat])
1697 1746 fn = None
1698 1747 params = cmd
1699 1748 for name, filterfn in self._datafilters.iteritems():
1700 1749 if cmd.startswith(name):
1701 1750 fn = filterfn
1702 1751 params = cmd[len(name):].lstrip()
1703 1752 break
1704 1753 if not fn:
1705 1754 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1706 1755 # Wrap old filters not supporting keyword arguments
1707 1756 if not pycompat.getargspec(fn)[2]:
1708 1757 oldfn = fn
1709 1758 fn = lambda s, c, **kwargs: oldfn(s, c)
1710 1759 l.append((mf, fn, params))
1711 1760 self._filterpats[filter] = l
1712 1761 return self._filterpats[filter]
1713 1762
1714 1763 def _filter(self, filterpats, filename, data):
1715 1764 for mf, fn, cmd in filterpats:
1716 1765 if mf(filename):
1717 1766 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1718 1767 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1719 1768 break
1720 1769
1721 1770 return data
1722 1771
1723 1772 @unfilteredpropertycache
1724 1773 def _encodefilterpats(self):
1725 1774 return self._loadfilter('encode')
1726 1775
1727 1776 @unfilteredpropertycache
1728 1777 def _decodefilterpats(self):
1729 1778 return self._loadfilter('decode')
1730 1779
1731 1780 def adddatafilter(self, name, filter):
1732 1781 self._datafilters[name] = filter
1733 1782
1734 1783 def wread(self, filename):
1735 1784 if self.wvfs.islink(filename):
1736 1785 data = self.wvfs.readlink(filename)
1737 1786 else:
1738 1787 data = self.wvfs.read(filename)
1739 1788 return self._filter(self._encodefilterpats, filename, data)
1740 1789
1741 1790 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1742 1791 """write ``data`` into ``filename`` in the working directory
1743 1792
1744 1793 This returns length of written (maybe decoded) data.
1745 1794 """
1746 1795 data = self._filter(self._decodefilterpats, filename, data)
1747 1796 if 'l' in flags:
1748 1797 self.wvfs.symlink(data, filename)
1749 1798 else:
1750 1799 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1751 1800 **kwargs)
1752 1801 if 'x' in flags:
1753 1802 self.wvfs.setflags(filename, False, True)
1754 1803 else:
1755 1804 self.wvfs.setflags(filename, False, False)
1756 1805 return len(data)
1757 1806
1758 1807 def wwritedata(self, filename, data):
1759 1808 return self._filter(self._decodefilterpats, filename, data)
1760 1809
1761 1810 def currenttransaction(self):
1762 1811 """return the current transaction or None if non exists"""
1763 1812 if self._transref:
1764 1813 tr = self._transref()
1765 1814 else:
1766 1815 tr = None
1767 1816
1768 1817 if tr and tr.running():
1769 1818 return tr
1770 1819 return None
1771 1820
1772 1821 def transaction(self, desc, report=None):
1773 1822 if (self.ui.configbool('devel', 'all-warnings')
1774 1823 or self.ui.configbool('devel', 'check-locks')):
1775 1824 if self._currentlock(self._lockref) is None:
1776 1825 raise error.ProgrammingError('transaction requires locking')
1777 1826 tr = self.currenttransaction()
1778 1827 if tr is not None:
1779 1828 return tr.nest(name=desc)
1780 1829
1781 1830 # abort here if the journal already exists
1782 1831 if self.svfs.exists("journal"):
1783 1832 raise error.RepoError(
1784 1833 _("abandoned transaction found"),
1785 1834 hint=_("run 'hg recover' to clean up transaction"))
1786 1835
1787 1836 idbase = "%.40f#%f" % (random.random(), time.time())
1788 1837 ha = hex(hashlib.sha1(idbase).digest())
1789 1838 txnid = 'TXN:' + ha
1790 1839 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1791 1840
1792 1841 self._writejournal(desc)
1793 1842 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1794 1843 if report:
1795 1844 rp = report
1796 1845 else:
1797 1846 rp = self.ui.warn
1798 1847 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1799 1848 # we must avoid cyclic reference between repo and transaction.
1800 1849 reporef = weakref.ref(self)
1801 1850 # Code to track tag movement
1802 1851 #
1803 1852 # Since tags are all handled as file content, it is actually quite hard
1804 1853 # to track these movement from a code perspective. So we fallback to a
1805 1854 # tracking at the repository level. One could envision to track changes
1806 1855 # to the '.hgtags' file through changegroup apply but that fails to
1807 1856 # cope with case where transaction expose new heads without changegroup
1808 1857 # being involved (eg: phase movement).
1809 1858 #
1810 1859 # For now, We gate the feature behind a flag since this likely comes
1811 1860 # with performance impacts. The current code run more often than needed
1812 1861 # and do not use caches as much as it could. The current focus is on
1813 1862 # the behavior of the feature so we disable it by default. The flag
1814 1863 # will be removed when we are happy with the performance impact.
1815 1864 #
1816 1865 # Once this feature is no longer experimental move the following
1817 1866 # documentation to the appropriate help section:
1818 1867 #
1819 1868 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1820 1869 # tags (new or changed or deleted tags). In addition the details of
1821 1870 # these changes are made available in a file at:
1822 1871 # ``REPOROOT/.hg/changes/tags.changes``.
1823 1872 # Make sure you check for HG_TAG_MOVED before reading that file as it
1824 1873 # might exist from a previous transaction even if no tag were touched
1825 1874 # in this one. Changes are recorded in a line base format::
1826 1875 #
1827 1876 # <action> <hex-node> <tag-name>\n
1828 1877 #
1829 1878 # Actions are defined as follow:
1830 1879 # "-R": tag is removed,
1831 1880 # "+A": tag is added,
1832 1881 # "-M": tag is moved (old value),
1833 1882 # "+M": tag is moved (new value),
1834 1883 tracktags = lambda x: None
1835 1884 # experimental config: experimental.hook-track-tags
1836 1885 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1837 1886 if desc != 'strip' and shouldtracktags:
1838 1887 oldheads = self.changelog.headrevs()
1839 1888 def tracktags(tr2):
1840 1889 repo = reporef()
1841 1890 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1842 1891 newheads = repo.changelog.headrevs()
1843 1892 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1844 1893 # notes: we compare lists here.
1845 1894 # As we do it only once buiding set would not be cheaper
1846 1895 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1847 1896 if changes:
1848 1897 tr2.hookargs['tag_moved'] = '1'
1849 1898 with repo.vfs('changes/tags.changes', 'w',
1850 1899 atomictemp=True) as changesfile:
1851 1900 # note: we do not register the file to the transaction
1852 1901 # because we needs it to still exist on the transaction
1853 1902 # is close (for txnclose hooks)
1854 1903 tagsmod.writediff(changesfile, changes)
1855 1904 def validate(tr2):
1856 1905 """will run pre-closing hooks"""
1857 1906 # XXX the transaction API is a bit lacking here so we take a hacky
1858 1907 # path for now
1859 1908 #
1860 1909 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1861 1910 # dict is copied before these run. In addition we needs the data
1862 1911 # available to in memory hooks too.
1863 1912 #
1864 1913 # Moreover, we also need to make sure this runs before txnclose
1865 1914 # hooks and there is no "pending" mechanism that would execute
1866 1915 # logic only if hooks are about to run.
1867 1916 #
1868 1917 # Fixing this limitation of the transaction is also needed to track
1869 1918 # other families of changes (bookmarks, phases, obsolescence).
1870 1919 #
1871 1920 # This will have to be fixed before we remove the experimental
1872 1921 # gating.
1873 1922 tracktags(tr2)
1874 1923 repo = reporef()
1875 1924 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1876 1925 scmutil.enforcesinglehead(repo, tr2, desc)
1877 1926 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1878 1927 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1879 1928 args = tr.hookargs.copy()
1880 1929 args.update(bookmarks.preparehookargs(name, old, new))
1881 1930 repo.hook('pretxnclose-bookmark', throw=True,
1882 1931 **pycompat.strkwargs(args))
1883 1932 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1884 1933 cl = repo.unfiltered().changelog
1885 1934 for rev, (old, new) in tr.changes['phases'].items():
1886 1935 args = tr.hookargs.copy()
1887 1936 node = hex(cl.node(rev))
1888 1937 args.update(phases.preparehookargs(node, old, new))
1889 1938 repo.hook('pretxnclose-phase', throw=True,
1890 1939 **pycompat.strkwargs(args))
1891 1940
1892 1941 repo.hook('pretxnclose', throw=True,
1893 1942 **pycompat.strkwargs(tr.hookargs))
1894 1943 def releasefn(tr, success):
1895 1944 repo = reporef()
1896 1945 if repo is None:
1897 1946 # If the repo has been GC'd (and this release function is being
1898 1947 # called from transaction.__del__), there's not much we can do,
1899 1948 # so just leave the unfinished transaction there and let the
1900 1949 # user run `hg recover`.
1901 1950 return
1902 1951 if success:
1903 1952 # this should be explicitly invoked here, because
1904 1953 # in-memory changes aren't written out at closing
1905 1954 # transaction, if tr.addfilegenerator (via
1906 1955 # dirstate.write or so) isn't invoked while
1907 1956 # transaction running
1908 1957 repo.dirstate.write(None)
1909 1958 else:
1910 1959 # discard all changes (including ones already written
1911 1960 # out) in this transaction
1912 1961 narrowspec.restorebackup(self, 'journal.narrowspec')
1913 1962 narrowspec.restorewcbackup(self, 'journal.narrowspec.dirstate')
1914 1963 repo.dirstate.restorebackup(None, 'journal.dirstate')
1915 1964
1916 1965 repo.invalidate(clearfilecache=True)
1917 1966
1918 1967 tr = transaction.transaction(rp, self.svfs, vfsmap,
1919 1968 "journal",
1920 1969 "undo",
1921 1970 aftertrans(renames),
1922 1971 self.store.createmode,
1923 1972 validator=validate,
1924 1973 releasefn=releasefn,
1925 1974 checkambigfiles=_cachedfiles,
1926 1975 name=desc)
1927 1976 tr.changes['origrepolen'] = len(self)
1928 1977 tr.changes['obsmarkers'] = set()
1929 1978 tr.changes['phases'] = {}
1930 1979 tr.changes['bookmarks'] = {}
1931 1980
1932 1981 tr.hookargs['txnid'] = txnid
1933 1982 tr.hookargs['txnname'] = desc
1934 1983 # note: writing the fncache only during finalize mean that the file is
1935 1984 # outdated when running hooks. As fncache is used for streaming clone,
1936 1985 # this is not expected to break anything that happen during the hooks.
1937 1986 tr.addfinalize('flush-fncache', self.store.write)
1938 1987 def txnclosehook(tr2):
1939 1988 """To be run if transaction is successful, will schedule a hook run
1940 1989 """
1941 1990 # Don't reference tr2 in hook() so we don't hold a reference.
1942 1991 # This reduces memory consumption when there are multiple
1943 1992 # transactions per lock. This can likely go away if issue5045
1944 1993 # fixes the function accumulation.
1945 1994 hookargs = tr2.hookargs
1946 1995
1947 1996 def hookfunc():
1948 1997 repo = reporef()
1949 1998 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1950 1999 bmchanges = sorted(tr.changes['bookmarks'].items())
1951 2000 for name, (old, new) in bmchanges:
1952 2001 args = tr.hookargs.copy()
1953 2002 args.update(bookmarks.preparehookargs(name, old, new))
1954 2003 repo.hook('txnclose-bookmark', throw=False,
1955 2004 **pycompat.strkwargs(args))
1956 2005
1957 2006 if hook.hashook(repo.ui, 'txnclose-phase'):
1958 2007 cl = repo.unfiltered().changelog
1959 2008 phasemv = sorted(tr.changes['phases'].items())
1960 2009 for rev, (old, new) in phasemv:
1961 2010 args = tr.hookargs.copy()
1962 2011 node = hex(cl.node(rev))
1963 2012 args.update(phases.preparehookargs(node, old, new))
1964 2013 repo.hook('txnclose-phase', throw=False,
1965 2014 **pycompat.strkwargs(args))
1966 2015
1967 2016 repo.hook('txnclose', throw=False,
1968 2017 **pycompat.strkwargs(hookargs))
1969 2018 reporef()._afterlock(hookfunc)
1970 2019 tr.addfinalize('txnclose-hook', txnclosehook)
1971 2020 # Include a leading "-" to make it happen before the transaction summary
1972 2021 # reports registered via scmutil.registersummarycallback() whose names
1973 2022 # are 00-txnreport etc. That way, the caches will be warm when the
1974 2023 # callbacks run.
1975 2024 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1976 2025 def txnaborthook(tr2):
1977 2026 """To be run if transaction is aborted
1978 2027 """
1979 2028 reporef().hook('txnabort', throw=False,
1980 2029 **pycompat.strkwargs(tr2.hookargs))
1981 2030 tr.addabort('txnabort-hook', txnaborthook)
1982 2031 # avoid eager cache invalidation. in-memory data should be identical
1983 2032 # to stored data if transaction has no error.
1984 2033 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1985 2034 self._transref = weakref.ref(tr)
1986 2035 scmutil.registersummarycallback(self, tr, desc)
1987 2036 return tr
1988 2037
1989 2038 def _journalfiles(self):
1990 2039 return ((self.svfs, 'journal'),
1991 2040 (self.svfs, 'journal.narrowspec'),
1992 2041 (self.vfs, 'journal.narrowspec.dirstate'),
1993 2042 (self.vfs, 'journal.dirstate'),
1994 2043 (self.vfs, 'journal.branch'),
1995 2044 (self.vfs, 'journal.desc'),
1996 2045 (bookmarks.bookmarksvfs(self), 'journal.bookmarks'),
1997 2046 (self.svfs, 'journal.phaseroots'))
1998 2047
1999 2048 def undofiles(self):
2000 2049 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2001 2050
2002 2051 @unfilteredmethod
2003 2052 def _writejournal(self, desc):
2004 2053 self.dirstate.savebackup(None, 'journal.dirstate')
2005 2054 narrowspec.savewcbackup(self, 'journal.narrowspec.dirstate')
2006 2055 narrowspec.savebackup(self, 'journal.narrowspec')
2007 2056 self.vfs.write("journal.branch",
2008 2057 encoding.fromlocal(self.dirstate.branch()))
2009 2058 self.vfs.write("journal.desc",
2010 2059 "%d\n%s\n" % (len(self), desc))
2011 2060 bookmarksvfs = bookmarks.bookmarksvfs(self)
2012 2061 bookmarksvfs.write("journal.bookmarks",
2013 2062 bookmarksvfs.tryread("bookmarks"))
2014 2063 self.svfs.write("journal.phaseroots",
2015 2064 self.svfs.tryread("phaseroots"))
2016 2065
2017 2066 def recover(self):
2018 2067 with self.lock():
2019 2068 if self.svfs.exists("journal"):
2020 2069 self.ui.status(_("rolling back interrupted transaction\n"))
2021 2070 vfsmap = {'': self.svfs,
2022 2071 'plain': self.vfs,}
2023 2072 transaction.rollback(self.svfs, vfsmap, "journal",
2024 2073 self.ui.warn,
2025 2074 checkambigfiles=_cachedfiles)
2026 2075 self.invalidate()
2027 2076 return True
2028 2077 else:
2029 2078 self.ui.warn(_("no interrupted transaction available\n"))
2030 2079 return False
2031 2080
2032 2081 def rollback(self, dryrun=False, force=False):
2033 2082 wlock = lock = dsguard = None
2034 2083 try:
2035 2084 wlock = self.wlock()
2036 2085 lock = self.lock()
2037 2086 if self.svfs.exists("undo"):
2038 2087 dsguard = dirstateguard.dirstateguard(self, 'rollback')
2039 2088
2040 2089 return self._rollback(dryrun, force, dsguard)
2041 2090 else:
2042 2091 self.ui.warn(_("no rollback information available\n"))
2043 2092 return 1
2044 2093 finally:
2045 2094 release(dsguard, lock, wlock)
2046 2095
2047 2096 @unfilteredmethod # Until we get smarter cache management
2048 2097 def _rollback(self, dryrun, force, dsguard):
2049 2098 ui = self.ui
2050 2099 try:
2051 2100 args = self.vfs.read('undo.desc').splitlines()
2052 2101 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2053 2102 if len(args) >= 3:
2054 2103 detail = args[2]
2055 2104 oldtip = oldlen - 1
2056 2105
2057 2106 if detail and ui.verbose:
2058 2107 msg = (_('repository tip rolled back to revision %d'
2059 2108 ' (undo %s: %s)\n')
2060 2109 % (oldtip, desc, detail))
2061 2110 else:
2062 2111 msg = (_('repository tip rolled back to revision %d'
2063 2112 ' (undo %s)\n')
2064 2113 % (oldtip, desc))
2065 2114 except IOError:
2066 2115 msg = _('rolling back unknown transaction\n')
2067 2116 desc = None
2068 2117
2069 2118 if not force and self['.'] != self['tip'] and desc == 'commit':
2070 2119 raise error.Abort(
2071 2120 _('rollback of last commit while not checked out '
2072 2121 'may lose data'), hint=_('use -f to force'))
2073 2122
2074 2123 ui.status(msg)
2075 2124 if dryrun:
2076 2125 return 0
2077 2126
2078 2127 parents = self.dirstate.parents()
2079 2128 self.destroying()
2080 2129 vfsmap = {'plain': self.vfs, '': self.svfs}
2081 2130 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
2082 2131 checkambigfiles=_cachedfiles)
2083 2132 bookmarksvfs = bookmarks.bookmarksvfs(self)
2084 2133 if bookmarksvfs.exists('undo.bookmarks'):
2085 2134 bookmarksvfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
2086 2135 if self.svfs.exists('undo.phaseroots'):
2087 2136 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
2088 2137 self.invalidate()
2089 2138
2090 2139 parentgone = any(p not in self.changelog.nodemap for p in parents)
2091 2140 if parentgone:
2092 2141 # prevent dirstateguard from overwriting already restored one
2093 2142 dsguard.close()
2094 2143
2095 2144 narrowspec.restorebackup(self, 'undo.narrowspec')
2096 2145 narrowspec.restorewcbackup(self, 'undo.narrowspec.dirstate')
2097 2146 self.dirstate.restorebackup(None, 'undo.dirstate')
2098 2147 try:
2099 2148 branch = self.vfs.read('undo.branch')
2100 2149 self.dirstate.setbranch(encoding.tolocal(branch))
2101 2150 except IOError:
2102 2151 ui.warn(_('named branch could not be reset: '
2103 2152 'current branch is still \'%s\'\n')
2104 2153 % self.dirstate.branch())
2105 2154
2106 2155 parents = tuple([p.rev() for p in self[None].parents()])
2107 2156 if len(parents) > 1:
2108 2157 ui.status(_('working directory now based on '
2109 2158 'revisions %d and %d\n') % parents)
2110 2159 else:
2111 2160 ui.status(_('working directory now based on '
2112 2161 'revision %d\n') % parents)
2113 2162 mergemod.mergestate.clean(self, self['.'].node())
2114 2163
2115 2164 # TODO: if we know which new heads may result from this rollback, pass
2116 2165 # them to destroy(), which will prevent the branchhead cache from being
2117 2166 # invalidated.
2118 2167 self.destroyed()
2119 2168 return 0
2120 2169
2121 2170 def _buildcacheupdater(self, newtransaction):
2122 2171 """called during transaction to build the callback updating cache
2123 2172
2124 2173 Lives on the repository to help extension who might want to augment
2125 2174 this logic. For this purpose, the created transaction is passed to the
2126 2175 method.
2127 2176 """
2128 2177 # we must avoid cyclic reference between repo and transaction.
2129 2178 reporef = weakref.ref(self)
2130 2179 def updater(tr):
2131 2180 repo = reporef()
2132 2181 repo.updatecaches(tr)
2133 2182 return updater
2134 2183
2135 2184 @unfilteredmethod
2136 2185 def updatecaches(self, tr=None, full=False):
2137 2186 """warm appropriate caches
2138 2187
2139 2188 If this function is called after a transaction closed. The transaction
2140 2189 will be available in the 'tr' argument. This can be used to selectively
2141 2190 update caches relevant to the changes in that transaction.
2142 2191
2143 2192 If 'full' is set, make sure all caches the function knows about have
2144 2193 up-to-date data. Even the ones usually loaded more lazily.
2145 2194 """
2146 2195 if tr is not None and tr.hookargs.get('source') == 'strip':
2147 2196 # During strip, many caches are invalid but
2148 2197 # later call to `destroyed` will refresh them.
2149 2198 return
2150 2199
2151 2200 if tr is None or tr.changes['origrepolen'] < len(self):
2152 2201 # accessing the 'ser ved' branchmap should refresh all the others,
2153 2202 self.ui.debug('updating the branch cache\n')
2154 2203 self.filtered('served').branchmap()
2155 2204 self.filtered('served.hidden').branchmap()
2156 2205
2157 2206 if full:
2158 2207 unfi = self.unfiltered()
2159 2208 rbc = unfi.revbranchcache()
2160 2209 for r in unfi.changelog:
2161 2210 rbc.branchinfo(r)
2162 2211 rbc.write()
2163 2212
2164 2213 # ensure the working copy parents are in the manifestfulltextcache
2165 2214 for ctx in self['.'].parents():
2166 2215 ctx.manifest() # accessing the manifest is enough
2167 2216
2168 2217 # accessing fnode cache warms the cache
2169 2218 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2170 2219 # accessing tags warm the cache
2171 2220 self.tags()
2172 2221 self.filtered('served').tags()
2173 2222
2174 2223 def invalidatecaches(self):
2175 2224
2176 2225 if r'_tagscache' in vars(self):
2177 2226 # can't use delattr on proxy
2178 2227 del self.__dict__[r'_tagscache']
2179 2228
2180 2229 self._branchcaches.clear()
2181 2230 self.invalidatevolatilesets()
2182 2231 self._sparsesignaturecache.clear()
2183 2232
2184 2233 def invalidatevolatilesets(self):
2185 2234 self.filteredrevcache.clear()
2186 2235 obsolete.clearobscaches(self)
2187 2236
2188 2237 def invalidatedirstate(self):
2189 2238 '''Invalidates the dirstate, causing the next call to dirstate
2190 2239 to check if it was modified since the last time it was read,
2191 2240 rereading it if it has.
2192 2241
2193 2242 This is different to dirstate.invalidate() that it doesn't always
2194 2243 rereads the dirstate. Use dirstate.invalidate() if you want to
2195 2244 explicitly read the dirstate again (i.e. restoring it to a previous
2196 2245 known good state).'''
2197 2246 if hasunfilteredcache(self, r'dirstate'):
2198 2247 for k in self.dirstate._filecache:
2199 2248 try:
2200 2249 delattr(self.dirstate, k)
2201 2250 except AttributeError:
2202 2251 pass
2203 2252 delattr(self.unfiltered(), r'dirstate')
2204 2253
2205 2254 def invalidate(self, clearfilecache=False):
2206 2255 '''Invalidates both store and non-store parts other than dirstate
2207 2256
2208 2257 If a transaction is running, invalidation of store is omitted,
2209 2258 because discarding in-memory changes might cause inconsistency
2210 2259 (e.g. incomplete fncache causes unintentional failure, but
2211 2260 redundant one doesn't).
2212 2261 '''
2213 2262 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2214 2263 for k in list(self._filecache.keys()):
2215 2264 # dirstate is invalidated separately in invalidatedirstate()
2216 2265 if k == 'dirstate':
2217 2266 continue
2218 2267 if (k == 'changelog' and
2219 2268 self.currenttransaction() and
2220 2269 self.changelog._delayed):
2221 2270 # The changelog object may store unwritten revisions. We don't
2222 2271 # want to lose them.
2223 2272 # TODO: Solve the problem instead of working around it.
2224 2273 continue
2225 2274
2226 2275 if clearfilecache:
2227 2276 del self._filecache[k]
2228 2277 try:
2229 2278 delattr(unfiltered, k)
2230 2279 except AttributeError:
2231 2280 pass
2232 2281 self.invalidatecaches()
2233 2282 if not self.currenttransaction():
2234 2283 # TODO: Changing contents of store outside transaction
2235 2284 # causes inconsistency. We should make in-memory store
2236 2285 # changes detectable, and abort if changed.
2237 2286 self.store.invalidatecaches()
2238 2287
2239 2288 def invalidateall(self):
2240 2289 '''Fully invalidates both store and non-store parts, causing the
2241 2290 subsequent operation to reread any outside changes.'''
2242 2291 # extension should hook this to invalidate its caches
2243 2292 self.invalidate()
2244 2293 self.invalidatedirstate()
2245 2294
2246 2295 @unfilteredmethod
2247 2296 def _refreshfilecachestats(self, tr):
2248 2297 """Reload stats of cached files so that they are flagged as valid"""
2249 2298 for k, ce in self._filecache.items():
2250 2299 k = pycompat.sysstr(k)
2251 2300 if k == r'dirstate' or k not in self.__dict__:
2252 2301 continue
2253 2302 ce.refresh()
2254 2303
2255 2304 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2256 2305 inheritchecker=None, parentenvvar=None):
2257 2306 parentlock = None
2258 2307 # the contents of parentenvvar are used by the underlying lock to
2259 2308 # determine whether it can be inherited
2260 2309 if parentenvvar is not None:
2261 2310 parentlock = encoding.environ.get(parentenvvar)
2262 2311
2263 2312 timeout = 0
2264 2313 warntimeout = 0
2265 2314 if wait:
2266 2315 timeout = self.ui.configint("ui", "timeout")
2267 2316 warntimeout = self.ui.configint("ui", "timeout.warn")
2268 2317 # internal config: ui.signal-safe-lock
2269 2318 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2270 2319
2271 2320 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2272 2321 releasefn=releasefn,
2273 2322 acquirefn=acquirefn, desc=desc,
2274 2323 inheritchecker=inheritchecker,
2275 2324 parentlock=parentlock,
2276 2325 signalsafe=signalsafe)
2277 2326 return l
2278 2327
2279 2328 def _afterlock(self, callback):
2280 2329 """add a callback to be run when the repository is fully unlocked
2281 2330
2282 2331 The callback will be executed when the outermost lock is released
2283 2332 (with wlock being higher level than 'lock')."""
2284 2333 for ref in (self._wlockref, self._lockref):
2285 2334 l = ref and ref()
2286 2335 if l and l.held:
2287 2336 l.postrelease.append(callback)
2288 2337 break
2289 2338 else: # no lock have been found.
2290 2339 callback()
2291 2340
2292 2341 def lock(self, wait=True):
2293 2342 '''Lock the repository store (.hg/store) and return a weak reference
2294 2343 to the lock. Use this before modifying the store (e.g. committing or
2295 2344 stripping). If you are opening a transaction, get a lock as well.)
2296 2345
2297 2346 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2298 2347 'wlock' first to avoid a dead-lock hazard.'''
2299 2348 l = self._currentlock(self._lockref)
2300 2349 if l is not None:
2301 2350 l.lock()
2302 2351 return l
2303 2352
2304 2353 l = self._lock(vfs=self.svfs,
2305 2354 lockname="lock",
2306 2355 wait=wait,
2307 2356 releasefn=None,
2308 2357 acquirefn=self.invalidate,
2309 2358 desc=_('repository %s') % self.origroot)
2310 2359 self._lockref = weakref.ref(l)
2311 2360 return l
2312 2361
2313 2362 def _wlockchecktransaction(self):
2314 2363 if self.currenttransaction() is not None:
2315 2364 raise error.LockInheritanceContractViolation(
2316 2365 'wlock cannot be inherited in the middle of a transaction')
2317 2366
2318 2367 def wlock(self, wait=True):
2319 2368 '''Lock the non-store parts of the repository (everything under
2320 2369 .hg except .hg/store) and return a weak reference to the lock.
2321 2370
2322 2371 Use this before modifying files in .hg.
2323 2372
2324 2373 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2325 2374 'wlock' first to avoid a dead-lock hazard.'''
2326 2375 l = self._wlockref and self._wlockref()
2327 2376 if l is not None and l.held:
2328 2377 l.lock()
2329 2378 return l
2330 2379
2331 2380 # We do not need to check for non-waiting lock acquisition. Such
2332 2381 # acquisition would not cause dead-lock as they would just fail.
2333 2382 if wait and (self.ui.configbool('devel', 'all-warnings')
2334 2383 or self.ui.configbool('devel', 'check-locks')):
2335 2384 if self._currentlock(self._lockref) is not None:
2336 2385 self.ui.develwarn('"wlock" acquired after "lock"')
2337 2386
2338 2387 def unlock():
2339 2388 if self.dirstate.pendingparentchange():
2340 2389 self.dirstate.invalidate()
2341 2390 else:
2342 2391 self.dirstate.write(None)
2343 2392
2344 2393 self._filecache['dirstate'].refresh()
2345 2394
2346 2395 l = self._lock(self.vfs, "wlock", wait, unlock,
2347 2396 self.invalidatedirstate, _('working directory of %s') %
2348 2397 self.origroot,
2349 2398 inheritchecker=self._wlockchecktransaction,
2350 2399 parentenvvar='HG_WLOCK_LOCKER')
2351 2400 self._wlockref = weakref.ref(l)
2352 2401 return l
2353 2402
2354 2403 def _currentlock(self, lockref):
2355 2404 """Returns the lock if it's held, or None if it's not."""
2356 2405 if lockref is None:
2357 2406 return None
2358 2407 l = lockref()
2359 2408 if l is None or not l.held:
2360 2409 return None
2361 2410 return l
2362 2411
2363 2412 def currentwlock(self):
2364 2413 """Returns the wlock if it's held, or None if it's not."""
2365 2414 return self._currentlock(self._wlockref)
2366 2415
2367 2416 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist,
2368 2417 includecopymeta):
2369 2418 """
2370 2419 commit an individual file as part of a larger transaction
2371 2420 """
2372 2421
2373 2422 fname = fctx.path()
2374 2423 fparent1 = manifest1.get(fname, nullid)
2375 2424 fparent2 = manifest2.get(fname, nullid)
2376 2425 if isinstance(fctx, context.filectx):
2377 2426 node = fctx.filenode()
2378 2427 if node in [fparent1, fparent2]:
2379 2428 self.ui.debug('reusing %s filelog entry\n' % fname)
2380 2429 if ((fparent1 != nullid and
2381 2430 manifest1.flags(fname) != fctx.flags()) or
2382 2431 (fparent2 != nullid and
2383 2432 manifest2.flags(fname) != fctx.flags())):
2384 2433 changelist.append(fname)
2385 2434 return node
2386 2435
2387 2436 flog = self.file(fname)
2388 2437 meta = {}
2389 2438 cfname = fctx.copysource()
2390 2439 if cfname and cfname != fname:
2391 2440 # Mark the new revision of this file as a copy of another
2392 2441 # file. This copy data will effectively act as a parent
2393 2442 # of this new revision. If this is a merge, the first
2394 2443 # parent will be the nullid (meaning "look up the copy data")
2395 2444 # and the second one will be the other parent. For example:
2396 2445 #
2397 2446 # 0 --- 1 --- 3 rev1 changes file foo
2398 2447 # \ / rev2 renames foo to bar and changes it
2399 2448 # \- 2 -/ rev3 should have bar with all changes and
2400 2449 # should record that bar descends from
2401 2450 # bar in rev2 and foo in rev1
2402 2451 #
2403 2452 # this allows this merge to succeed:
2404 2453 #
2405 2454 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2406 2455 # \ / merging rev3 and rev4 should use bar@rev2
2407 2456 # \- 2 --- 4 as the merge base
2408 2457 #
2409 2458
2410 2459 cnode = manifest1.get(cfname)
2411 2460 newfparent = fparent2
2412 2461
2413 2462 if manifest2: # branch merge
2414 2463 if fparent2 == nullid or cnode is None: # copied on remote side
2415 2464 if cfname in manifest2:
2416 2465 cnode = manifest2[cfname]
2417 2466 newfparent = fparent1
2418 2467
2419 2468 # Here, we used to search backwards through history to try to find
2420 2469 # where the file copy came from if the source of a copy was not in
2421 2470 # the parent directory. However, this doesn't actually make sense to
2422 2471 # do (what does a copy from something not in your working copy even
2423 2472 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2424 2473 # the user that copy information was dropped, so if they didn't
2425 2474 # expect this outcome it can be fixed, but this is the correct
2426 2475 # behavior in this circumstance.
2427 2476
2428 2477 if cnode:
2429 2478 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(cnode)))
2430 2479 if includecopymeta:
2431 2480 meta["copy"] = cfname
2432 2481 meta["copyrev"] = hex(cnode)
2433 2482 fparent1, fparent2 = nullid, newfparent
2434 2483 else:
2435 2484 self.ui.warn(_("warning: can't find ancestor for '%s' "
2436 2485 "copied from '%s'!\n") % (fname, cfname))
2437 2486
2438 2487 elif fparent1 == nullid:
2439 2488 fparent1, fparent2 = fparent2, nullid
2440 2489 elif fparent2 != nullid:
2441 2490 # is one parent an ancestor of the other?
2442 2491 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2443 2492 if fparent1 in fparentancestors:
2444 2493 fparent1, fparent2 = fparent2, nullid
2445 2494 elif fparent2 in fparentancestors:
2446 2495 fparent2 = nullid
2447 2496
2448 2497 # is the file changed?
2449 2498 text = fctx.data()
2450 2499 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2451 2500 changelist.append(fname)
2452 2501 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2453 2502 # are just the flags changed during merge?
2454 2503 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2455 2504 changelist.append(fname)
2456 2505
2457 2506 return fparent1
2458 2507
2459 2508 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2460 2509 """check for commit arguments that aren't committable"""
2461 2510 if match.isexact() or match.prefix():
2462 2511 matched = set(status.modified + status.added + status.removed)
2463 2512
2464 2513 for f in match.files():
2465 2514 f = self.dirstate.normalize(f)
2466 2515 if f == '.' or f in matched or f in wctx.substate:
2467 2516 continue
2468 2517 if f in status.deleted:
2469 2518 fail(f, _('file not found!'))
2470 2519 if f in vdirs: # visited directory
2471 2520 d = f + '/'
2472 2521 for mf in matched:
2473 2522 if mf.startswith(d):
2474 2523 break
2475 2524 else:
2476 2525 fail(f, _("no match under directory!"))
2477 2526 elif f not in self.dirstate:
2478 2527 fail(f, _("file not tracked!"))
2479 2528
2480 2529 @unfilteredmethod
2481 2530 def commit(self, text="", user=None, date=None, match=None, force=False,
2482 2531 editor=False, extra=None):
2483 2532 """Add a new revision to current repository.
2484 2533
2485 2534 Revision information is gathered from the working directory,
2486 2535 match can be used to filter the committed files. If editor is
2487 2536 supplied, it is called to get a commit message.
2488 2537 """
2489 2538 if extra is None:
2490 2539 extra = {}
2491 2540
2492 2541 def fail(f, msg):
2493 2542 raise error.Abort('%s: %s' % (f, msg))
2494 2543
2495 2544 if not match:
2496 2545 match = matchmod.always()
2497 2546
2498 2547 if not force:
2499 2548 vdirs = []
2500 2549 match.explicitdir = vdirs.append
2501 2550 match.bad = fail
2502 2551
2503 2552 # lock() for recent changelog (see issue4368)
2504 2553 with self.wlock(), self.lock():
2505 2554 wctx = self[None]
2506 2555 merge = len(wctx.parents()) > 1
2507 2556
2508 2557 if not force and merge and not match.always():
2509 2558 raise error.Abort(_('cannot partially commit a merge '
2510 2559 '(do not specify files or patterns)'))
2511 2560
2512 2561 status = self.status(match=match, clean=force)
2513 2562 if force:
2514 2563 status.modified.extend(status.clean) # mq may commit clean files
2515 2564
2516 2565 # check subrepos
2517 2566 subs, commitsubs, newstate = subrepoutil.precommit(
2518 2567 self.ui, wctx, status, match, force=force)
2519 2568
2520 2569 # make sure all explicit patterns are matched
2521 2570 if not force:
2522 2571 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2523 2572
2524 2573 cctx = context.workingcommitctx(self, status,
2525 2574 text, user, date, extra)
2526 2575
2527 2576 # internal config: ui.allowemptycommit
2528 2577 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2529 2578 or extra.get('close') or merge or cctx.files()
2530 2579 or self.ui.configbool('ui', 'allowemptycommit'))
2531 2580 if not allowemptycommit:
2532 2581 return None
2533 2582
2534 2583 if merge and cctx.deleted():
2535 2584 raise error.Abort(_("cannot commit merge with missing files"))
2536 2585
2537 2586 ms = mergemod.mergestate.read(self)
2538 2587 mergeutil.checkunresolved(ms)
2539 2588
2540 2589 if editor:
2541 2590 cctx._text = editor(self, cctx, subs)
2542 2591 edited = (text != cctx._text)
2543 2592
2544 2593 # Save commit message in case this transaction gets rolled back
2545 2594 # (e.g. by a pretxncommit hook). Leave the content alone on
2546 2595 # the assumption that the user will use the same editor again.
2547 2596 msgfn = self.savecommitmessage(cctx._text)
2548 2597
2549 2598 # commit subs and write new state
2550 2599 if subs:
2551 2600 uipathfn = scmutil.getuipathfn(self)
2552 2601 for s in sorted(commitsubs):
2553 2602 sub = wctx.sub(s)
2554 2603 self.ui.status(_('committing subrepository %s\n') %
2555 2604 uipathfn(subrepoutil.subrelpath(sub)))
2556 2605 sr = sub.commit(cctx._text, user, date)
2557 2606 newstate[s] = (newstate[s][0], sr)
2558 2607 subrepoutil.writestate(self, newstate)
2559 2608
2560 2609 p1, p2 = self.dirstate.parents()
2561 2610 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2562 2611 try:
2563 2612 self.hook("precommit", throw=True, parent1=hookp1,
2564 2613 parent2=hookp2)
2565 2614 with self.transaction('commit'):
2566 2615 ret = self.commitctx(cctx, True)
2567 2616 # update bookmarks, dirstate and mergestate
2568 2617 bookmarks.update(self, [p1, p2], ret)
2569 2618 cctx.markcommitted(ret)
2570 2619 ms.reset()
2571 2620 except: # re-raises
2572 2621 if edited:
2573 2622 self.ui.write(
2574 2623 _('note: commit message saved in %s\n') % msgfn)
2575 2624 raise
2576 2625
2577 2626 def commithook():
2578 2627 # hack for command that use a temporary commit (eg: histedit)
2579 2628 # temporary commit got stripped before hook release
2580 2629 if self.changelog.hasnode(ret):
2581 2630 self.hook("commit", node=hex(ret), parent1=hookp1,
2582 2631 parent2=hookp2)
2583 2632 self._afterlock(commithook)
2584 2633 return ret
2585 2634
2586 2635 @unfilteredmethod
2587 2636 def commitctx(self, ctx, error=False, origctx=None):
2588 2637 """Add a new revision to current repository.
2589 2638 Revision information is passed via the context argument.
2590 2639
2591 2640 ctx.files() should list all files involved in this commit, i.e.
2592 2641 modified/added/removed files. On merge, it may be wider than the
2593 2642 ctx.files() to be committed, since any file nodes derived directly
2594 2643 from p1 or p2 are excluded from the committed ctx.files().
2595 2644
2596 2645 origctx is for convert to work around the problem that bug
2597 2646 fixes to the files list in changesets change hashes. For
2598 2647 convert to be the identity, it can pass an origctx and this
2599 2648 function will use the same files list when it makes sense to
2600 2649 do so.
2601 2650 """
2602 2651
2603 2652 p1, p2 = ctx.p1(), ctx.p2()
2604 2653 user = ctx.user()
2605 2654
2606 2655 writecopiesto = self.ui.config('experimental', 'copies.write-to')
2607 2656 writefilecopymeta = writecopiesto != 'changeset-only'
2608 2657 writechangesetcopy = (writecopiesto in
2609 2658 ('changeset-only', 'compatibility'))
2610 2659 p1copies, p2copies = None, None
2611 2660 if writechangesetcopy:
2612 2661 p1copies = ctx.p1copies()
2613 2662 p2copies = ctx.p2copies()
2614 2663 filesadded, filesremoved = None, None
2615 2664 with self.lock(), self.transaction("commit") as tr:
2616 2665 trp = weakref.proxy(tr)
2617 2666
2618 2667 if ctx.manifestnode():
2619 2668 # reuse an existing manifest revision
2620 2669 self.ui.debug('reusing known manifest\n')
2621 2670 mn = ctx.manifestnode()
2622 2671 files = ctx.files()
2623 2672 if writechangesetcopy:
2624 2673 filesadded = ctx.filesadded()
2625 2674 filesremoved = ctx.filesremoved()
2626 2675 elif ctx.files():
2627 2676 m1ctx = p1.manifestctx()
2628 2677 m2ctx = p2.manifestctx()
2629 2678 mctx = m1ctx.copy()
2630 2679
2631 2680 m = mctx.read()
2632 2681 m1 = m1ctx.read()
2633 2682 m2 = m2ctx.read()
2634 2683
2635 2684 # check in files
2636 2685 added = []
2637 2686 changed = []
2638 2687 removed = list(ctx.removed())
2639 2688 linkrev = len(self)
2640 2689 self.ui.note(_("committing files:\n"))
2641 2690 uipathfn = scmutil.getuipathfn(self)
2642 2691 for f in sorted(ctx.modified() + ctx.added()):
2643 2692 self.ui.note(uipathfn(f) + "\n")
2644 2693 try:
2645 2694 fctx = ctx[f]
2646 2695 if fctx is None:
2647 2696 removed.append(f)
2648 2697 else:
2649 2698 added.append(f)
2650 2699 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2651 2700 trp, changed,
2652 2701 writefilecopymeta)
2653 2702 m.setflag(f, fctx.flags())
2654 2703 except OSError:
2655 2704 self.ui.warn(_("trouble committing %s!\n") %
2656 2705 uipathfn(f))
2657 2706 raise
2658 2707 except IOError as inst:
2659 2708 errcode = getattr(inst, 'errno', errno.ENOENT)
2660 2709 if error or errcode and errcode != errno.ENOENT:
2661 2710 self.ui.warn(_("trouble committing %s!\n") %
2662 2711 uipathfn(f))
2663 2712 raise
2664 2713
2665 2714 # update manifest
2666 2715 removed = [f for f in removed if f in m1 or f in m2]
2667 2716 drop = sorted([f for f in removed if f in m])
2668 2717 for f in drop:
2669 2718 del m[f]
2670 2719 if p2.rev() != nullrev:
2671 2720 @util.cachefunc
2672 2721 def mas():
2673 2722 p1n = p1.node()
2674 2723 p2n = p2.node()
2675 2724 cahs = self.changelog.commonancestorsheads(p1n, p2n)
2676 2725 if not cahs:
2677 2726 cahs = [nullrev]
2678 2727 return [self[r].manifest() for r in cahs]
2679 2728 def deletionfromparent(f):
2680 2729 # When a file is removed relative to p1 in a merge, this
2681 2730 # function determines whether the absence is due to a
2682 2731 # deletion from a parent, or whether the merge commit
2683 2732 # itself deletes the file. We decide this by doing a
2684 2733 # simplified three way merge of the manifest entry for
2685 2734 # the file. There are two ways we decide the merge
2686 2735 # itself didn't delete a file:
2687 2736 # - neither parent (nor the merge) contain the file
2688 2737 # - exactly one parent contains the file, and that
2689 2738 # parent has the same filelog entry as the merge
2690 2739 # ancestor (or all of them if there two). In other
2691 2740 # words, that parent left the file unchanged while the
2692 2741 # other one deleted it.
2693 2742 # One way to think about this is that deleting a file is
2694 2743 # similar to emptying it, so the list of changed files
2695 2744 # should be similar either way. The computation
2696 2745 # described above is not done directly in _filecommit
2697 2746 # when creating the list of changed files, however
2698 2747 # it does something very similar by comparing filelog
2699 2748 # nodes.
2700 2749 if f in m1:
2701 2750 return (f not in m2
2702 2751 and all(f in ma and ma.find(f) == m1.find(f)
2703 2752 for ma in mas()))
2704 2753 elif f in m2:
2705 2754 return all(f in ma and ma.find(f) == m2.find(f)
2706 2755 for ma in mas())
2707 2756 else:
2708 2757 return True
2709 2758 removed = [f for f in removed if not deletionfromparent(f)]
2710 2759
2711 2760 files = changed + removed
2712 2761 md = None
2713 2762 if not files:
2714 2763 # if no "files" actually changed in terms of the changelog,
2715 2764 # try hard to detect unmodified manifest entry so that the
2716 2765 # exact same commit can be reproduced later on convert.
2717 2766 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2718 2767 if not files and md:
2719 2768 self.ui.debug('not reusing manifest (no file change in '
2720 2769 'changelog, but manifest differs)\n')
2721 2770 if files or md:
2722 2771 self.ui.note(_("committing manifest\n"))
2723 2772 # we're using narrowmatch here since it's already applied at
2724 2773 # other stages (such as dirstate.walk), so we're already
2725 2774 # ignoring things outside of narrowspec in most cases. The
2726 2775 # one case where we might have files outside the narrowspec
2727 2776 # at this point is merges, and we already error out in the
2728 2777 # case where the merge has files outside of the narrowspec,
2729 2778 # so this is safe.
2730 2779 mn = mctx.write(trp, linkrev,
2731 2780 p1.manifestnode(), p2.manifestnode(),
2732 2781 added, drop, match=self.narrowmatch())
2733 2782
2734 2783 if writechangesetcopy:
2735 2784 filesadded = [f for f in changed
2736 2785 if not (f in m1 or f in m2)]
2737 2786 filesremoved = removed
2738 2787 else:
2739 2788 self.ui.debug('reusing manifest from p1 (listed files '
2740 2789 'actually unchanged)\n')
2741 2790 mn = p1.manifestnode()
2742 2791 else:
2743 2792 self.ui.debug('reusing manifest from p1 (no file change)\n')
2744 2793 mn = p1.manifestnode()
2745 2794 files = []
2746 2795
2747 2796 if writecopiesto == 'changeset-only':
2748 2797 # If writing only to changeset extras, use None to indicate that
2749 2798 # no entry should be written. If writing to both, write an empty
2750 2799 # entry to prevent the reader from falling back to reading
2751 2800 # filelogs.
2752 2801 p1copies = p1copies or None
2753 2802 p2copies = p2copies or None
2754 2803 filesadded = filesadded or None
2755 2804 filesremoved = filesremoved or None
2756 2805
2757 2806 if origctx and origctx.manifestnode() == mn:
2758 2807 files = origctx.files()
2759 2808
2760 2809 # update changelog
2761 2810 self.ui.note(_("committing changelog\n"))
2762 2811 self.changelog.delayupdate(tr)
2763 2812 n = self.changelog.add(mn, files, ctx.description(),
2764 2813 trp, p1.node(), p2.node(),
2765 2814 user, ctx.date(), ctx.extra().copy(),
2766 2815 p1copies, p2copies, filesadded, filesremoved)
2767 2816 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2768 2817 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2769 2818 parent2=xp2)
2770 2819 # set the new commit is proper phase
2771 2820 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2772 2821 if targetphase:
2773 2822 # retract boundary do not alter parent changeset.
2774 2823 # if a parent have higher the resulting phase will
2775 2824 # be compliant anyway
2776 2825 #
2777 2826 # if minimal phase was 0 we don't need to retract anything
2778 2827 phases.registernew(self, tr, targetphase, [n])
2779 2828 return n
2780 2829
2781 2830 @unfilteredmethod
2782 2831 def destroying(self):
2783 2832 '''Inform the repository that nodes are about to be destroyed.
2784 2833 Intended for use by strip and rollback, so there's a common
2785 2834 place for anything that has to be done before destroying history.
2786 2835
2787 2836 This is mostly useful for saving state that is in memory and waiting
2788 2837 to be flushed when the current lock is released. Because a call to
2789 2838 destroyed is imminent, the repo will be invalidated causing those
2790 2839 changes to stay in memory (waiting for the next unlock), or vanish
2791 2840 completely.
2792 2841 '''
2793 2842 # When using the same lock to commit and strip, the phasecache is left
2794 2843 # dirty after committing. Then when we strip, the repo is invalidated,
2795 2844 # causing those changes to disappear.
2796 2845 if '_phasecache' in vars(self):
2797 2846 self._phasecache.write()
2798 2847
2799 2848 @unfilteredmethod
2800 2849 def destroyed(self):
2801 2850 '''Inform the repository that nodes have been destroyed.
2802 2851 Intended for use by strip and rollback, so there's a common
2803 2852 place for anything that has to be done after destroying history.
2804 2853 '''
2805 2854 # When one tries to:
2806 2855 # 1) destroy nodes thus calling this method (e.g. strip)
2807 2856 # 2) use phasecache somewhere (e.g. commit)
2808 2857 #
2809 2858 # then 2) will fail because the phasecache contains nodes that were
2810 2859 # removed. We can either remove phasecache from the filecache,
2811 2860 # causing it to reload next time it is accessed, or simply filter
2812 2861 # the removed nodes now and write the updated cache.
2813 2862 self._phasecache.filterunknown(self)
2814 2863 self._phasecache.write()
2815 2864
2816 2865 # refresh all repository caches
2817 2866 self.updatecaches()
2818 2867
2819 2868 # Ensure the persistent tag cache is updated. Doing it now
2820 2869 # means that the tag cache only has to worry about destroyed
2821 2870 # heads immediately after a strip/rollback. That in turn
2822 2871 # guarantees that "cachetip == currenttip" (comparing both rev
2823 2872 # and node) always means no nodes have been added or destroyed.
2824 2873
2825 2874 # XXX this is suboptimal when qrefresh'ing: we strip the current
2826 2875 # head, refresh the tag cache, then immediately add a new head.
2827 2876 # But I think doing it this way is necessary for the "instant
2828 2877 # tag cache retrieval" case to work.
2829 2878 self.invalidate()
2830 2879
2831 2880 def status(self, node1='.', node2=None, match=None,
2832 2881 ignored=False, clean=False, unknown=False,
2833 2882 listsubrepos=False):
2834 2883 '''a convenience method that calls node1.status(node2)'''
2835 2884 return self[node1].status(node2, match, ignored, clean, unknown,
2836 2885 listsubrepos)
2837 2886
2838 2887 def addpostdsstatus(self, ps):
2839 2888 """Add a callback to run within the wlock, at the point at which status
2840 2889 fixups happen.
2841 2890
2842 2891 On status completion, callback(wctx, status) will be called with the
2843 2892 wlock held, unless the dirstate has changed from underneath or the wlock
2844 2893 couldn't be grabbed.
2845 2894
2846 2895 Callbacks should not capture and use a cached copy of the dirstate --
2847 2896 it might change in the meanwhile. Instead, they should access the
2848 2897 dirstate via wctx.repo().dirstate.
2849 2898
2850 2899 This list is emptied out after each status run -- extensions should
2851 2900 make sure it adds to this list each time dirstate.status is called.
2852 2901 Extensions should also make sure they don't call this for statuses
2853 2902 that don't involve the dirstate.
2854 2903 """
2855 2904
2856 2905 # The list is located here for uniqueness reasons -- it is actually
2857 2906 # managed by the workingctx, but that isn't unique per-repo.
2858 2907 self._postdsstatus.append(ps)
2859 2908
2860 2909 def postdsstatus(self):
2861 2910 """Used by workingctx to get the list of post-dirstate-status hooks."""
2862 2911 return self._postdsstatus
2863 2912
2864 2913 def clearpostdsstatus(self):
2865 2914 """Used by workingctx to clear post-dirstate-status hooks."""
2866 2915 del self._postdsstatus[:]
2867 2916
2868 2917 def heads(self, start=None):
2869 2918 if start is None:
2870 2919 cl = self.changelog
2871 2920 headrevs = reversed(cl.headrevs())
2872 2921 return [cl.node(rev) for rev in headrevs]
2873 2922
2874 2923 heads = self.changelog.heads(start)
2875 2924 # sort the output in rev descending order
2876 2925 return sorted(heads, key=self.changelog.rev, reverse=True)
2877 2926
2878 2927 def branchheads(self, branch=None, start=None, closed=False):
2879 2928 '''return a (possibly filtered) list of heads for the given branch
2880 2929
2881 2930 Heads are returned in topological order, from newest to oldest.
2882 2931 If branch is None, use the dirstate branch.
2883 2932 If start is not None, return only heads reachable from start.
2884 2933 If closed is True, return heads that are marked as closed as well.
2885 2934 '''
2886 2935 if branch is None:
2887 2936 branch = self[None].branch()
2888 2937 branches = self.branchmap()
2889 2938 if not branches.hasbranch(branch):
2890 2939 return []
2891 2940 # the cache returns heads ordered lowest to highest
2892 2941 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2893 2942 if start is not None:
2894 2943 # filter out the heads that cannot be reached from startrev
2895 2944 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2896 2945 bheads = [h for h in bheads if h in fbheads]
2897 2946 return bheads
2898 2947
2899 2948 def branches(self, nodes):
2900 2949 if not nodes:
2901 2950 nodes = [self.changelog.tip()]
2902 2951 b = []
2903 2952 for n in nodes:
2904 2953 t = n
2905 2954 while True:
2906 2955 p = self.changelog.parents(n)
2907 2956 if p[1] != nullid or p[0] == nullid:
2908 2957 b.append((t, n, p[0], p[1]))
2909 2958 break
2910 2959 n = p[0]
2911 2960 return b
2912 2961
2913 2962 def between(self, pairs):
2914 2963 r = []
2915 2964
2916 2965 for top, bottom in pairs:
2917 2966 n, l, i = top, [], 0
2918 2967 f = 1
2919 2968
2920 2969 while n != bottom and n != nullid:
2921 2970 p = self.changelog.parents(n)[0]
2922 2971 if i == f:
2923 2972 l.append(n)
2924 2973 f = f * 2
2925 2974 n = p
2926 2975 i += 1
2927 2976
2928 2977 r.append(l)
2929 2978
2930 2979 return r
2931 2980
2932 2981 def checkpush(self, pushop):
2933 2982 """Extensions can override this function if additional checks have
2934 2983 to be performed before pushing, or call it if they override push
2935 2984 command.
2936 2985 """
2937 2986
2938 2987 @unfilteredpropertycache
2939 2988 def prepushoutgoinghooks(self):
2940 2989 """Return util.hooks consists of a pushop with repo, remote, outgoing
2941 2990 methods, which are called before pushing changesets.
2942 2991 """
2943 2992 return util.hooks()
2944 2993
2945 2994 def pushkey(self, namespace, key, old, new):
2946 2995 try:
2947 2996 tr = self.currenttransaction()
2948 2997 hookargs = {}
2949 2998 if tr is not None:
2950 2999 hookargs.update(tr.hookargs)
2951 3000 hookargs = pycompat.strkwargs(hookargs)
2952 3001 hookargs[r'namespace'] = namespace
2953 3002 hookargs[r'key'] = key
2954 3003 hookargs[r'old'] = old
2955 3004 hookargs[r'new'] = new
2956 3005 self.hook('prepushkey', throw=True, **hookargs)
2957 3006 except error.HookAbort as exc:
2958 3007 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2959 3008 if exc.hint:
2960 3009 self.ui.write_err(_("(%s)\n") % exc.hint)
2961 3010 return False
2962 3011 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2963 3012 ret = pushkey.push(self, namespace, key, old, new)
2964 3013 def runhook():
2965 3014 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2966 3015 ret=ret)
2967 3016 self._afterlock(runhook)
2968 3017 return ret
2969 3018
2970 3019 def listkeys(self, namespace):
2971 3020 self.hook('prelistkeys', throw=True, namespace=namespace)
2972 3021 self.ui.debug('listing keys for "%s"\n' % namespace)
2973 3022 values = pushkey.list(self, namespace)
2974 3023 self.hook('listkeys', namespace=namespace, values=values)
2975 3024 return values
2976 3025
2977 3026 def debugwireargs(self, one, two, three=None, four=None, five=None):
2978 3027 '''used to test argument passing over the wire'''
2979 3028 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2980 3029 pycompat.bytestr(four),
2981 3030 pycompat.bytestr(five))
2982 3031
2983 3032 def savecommitmessage(self, text):
2984 3033 fp = self.vfs('last-message.txt', 'wb')
2985 3034 try:
2986 3035 fp.write(text)
2987 3036 finally:
2988 3037 fp.close()
2989 3038 return self.pathto(fp.name[len(self.root) + 1:])
2990 3039
2991 3040 # used to avoid circular references so destructors work
2992 3041 def aftertrans(files):
2993 3042 renamefiles = [tuple(t) for t in files]
2994 3043 def a():
2995 3044 for vfs, src, dest in renamefiles:
2996 3045 # if src and dest refer to a same file, vfs.rename is a no-op,
2997 3046 # leaving both src and dest on disk. delete dest to make sure
2998 3047 # the rename couldn't be such a no-op.
2999 3048 vfs.tryunlink(dest)
3000 3049 try:
3001 3050 vfs.rename(src, dest)
3002 3051 except OSError: # journal file does not yet exist
3003 3052 pass
3004 3053 return a
3005 3054
3006 3055 def undoname(fn):
3007 3056 base, name = os.path.split(fn)
3008 3057 assert name.startswith('journal')
3009 3058 return os.path.join(base, name.replace('journal', 'undo', 1))
3010 3059
3011 3060 def instance(ui, path, create, intents=None, createopts=None):
3012 3061 localpath = util.urllocalpath(path)
3013 3062 if create:
3014 3063 createrepository(ui, localpath, createopts=createopts)
3015 3064
3016 3065 return makelocalrepository(ui, localpath, intents=intents)
3017 3066
3018 3067 def islocal(path):
3019 3068 return True
3020 3069
3021 3070 def defaultcreateopts(ui, createopts=None):
3022 3071 """Populate the default creation options for a repository.
3023 3072
3024 3073 A dictionary of explicitly requested creation options can be passed
3025 3074 in. Missing keys will be populated.
3026 3075 """
3027 3076 createopts = dict(createopts or {})
3028 3077
3029 3078 if 'backend' not in createopts:
3030 3079 # experimental config: storage.new-repo-backend
3031 3080 createopts['backend'] = ui.config('storage', 'new-repo-backend')
3032 3081
3033 3082 return createopts
3034 3083
3035 3084 def newreporequirements(ui, createopts):
3036 3085 """Determine the set of requirements for a new local repository.
3037 3086
3038 3087 Extensions can wrap this function to specify custom requirements for
3039 3088 new repositories.
3040 3089 """
3041 3090 # If the repo is being created from a shared repository, we copy
3042 3091 # its requirements.
3043 3092 if 'sharedrepo' in createopts:
3044 3093 requirements = set(createopts['sharedrepo'].requirements)
3045 3094 if createopts.get('sharedrelative'):
3046 3095 requirements.add('relshared')
3047 3096 else:
3048 3097 requirements.add('shared')
3049 3098
3050 3099 return requirements
3051 3100
3052 3101 if 'backend' not in createopts:
3053 3102 raise error.ProgrammingError('backend key not present in createopts; '
3054 3103 'was defaultcreateopts() called?')
3055 3104
3056 3105 if createopts['backend'] != 'revlogv1':
3057 3106 raise error.Abort(_('unable to determine repository requirements for '
3058 3107 'storage backend: %s') % createopts['backend'])
3059 3108
3060 3109 requirements = {'revlogv1'}
3061 3110 if ui.configbool('format', 'usestore'):
3062 3111 requirements.add('store')
3063 3112 if ui.configbool('format', 'usefncache'):
3064 3113 requirements.add('fncache')
3065 3114 if ui.configbool('format', 'dotencode'):
3066 3115 requirements.add('dotencode')
3067 3116
3068 3117 compengine = ui.config('format', 'revlog-compression')
3069 3118 if compengine not in util.compengines:
3070 3119 raise error.Abort(_('compression engine %s defined by '
3071 3120 'format.revlog-compression not available') %
3072 3121 compengine,
3073 3122 hint=_('run "hg debuginstall" to list available '
3074 3123 'compression engines'))
3075 3124
3076 3125 # zlib is the historical default and doesn't need an explicit requirement.
3077 3126 elif compengine == 'zstd':
3078 3127 requirements.add('revlog-compression-zstd')
3079 3128 elif compengine != 'zlib':
3080 3129 requirements.add('exp-compression-%s' % compengine)
3081 3130
3082 3131 if scmutil.gdinitconfig(ui):
3083 3132 requirements.add('generaldelta')
3084 3133 if ui.configbool('format', 'sparse-revlog'):
3085 3134 requirements.add(SPARSEREVLOG_REQUIREMENT)
3086 3135 if ui.configbool('experimental', 'treemanifest'):
3087 3136 requirements.add('treemanifest')
3088 3137
3089 3138 revlogv2 = ui.config('experimental', 'revlogv2')
3090 3139 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
3091 3140 requirements.remove('revlogv1')
3092 3141 # generaldelta is implied by revlogv2.
3093 3142 requirements.discard('generaldelta')
3094 3143 requirements.add(REVLOGV2_REQUIREMENT)
3095 3144 # experimental config: format.internal-phase
3096 3145 if ui.configbool('format', 'internal-phase'):
3097 3146 requirements.add('internal-phase')
3098 3147
3099 3148 if createopts.get('narrowfiles'):
3100 3149 requirements.add(repository.NARROW_REQUIREMENT)
3101 3150
3102 3151 if createopts.get('lfs'):
3103 3152 requirements.add('lfs')
3104 3153
3105 3154 if ui.configbool('format', 'bookmarks-in-store'):
3106 3155 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3107 3156
3108 3157 return requirements
3109 3158
3110 3159 def filterknowncreateopts(ui, createopts):
3111 3160 """Filters a dict of repo creation options against options that are known.
3112 3161
3113 3162 Receives a dict of repo creation options and returns a dict of those
3114 3163 options that we don't know how to handle.
3115 3164
3116 3165 This function is called as part of repository creation. If the
3117 3166 returned dict contains any items, repository creation will not
3118 3167 be allowed, as it means there was a request to create a repository
3119 3168 with options not recognized by loaded code.
3120 3169
3121 3170 Extensions can wrap this function to filter out creation options
3122 3171 they know how to handle.
3123 3172 """
3124 3173 known = {
3125 3174 'backend',
3126 3175 'lfs',
3127 3176 'narrowfiles',
3128 3177 'sharedrepo',
3129 3178 'sharedrelative',
3130 3179 'shareditems',
3131 3180 'shallowfilestore',
3132 3181 }
3133 3182
3134 3183 return {k: v for k, v in createopts.items() if k not in known}
3135 3184
3136 3185 def createrepository(ui, path, createopts=None):
3137 3186 """Create a new repository in a vfs.
3138 3187
3139 3188 ``path`` path to the new repo's working directory.
3140 3189 ``createopts`` options for the new repository.
3141 3190
3142 3191 The following keys for ``createopts`` are recognized:
3143 3192
3144 3193 backend
3145 3194 The storage backend to use.
3146 3195 lfs
3147 3196 Repository will be created with ``lfs`` requirement. The lfs extension
3148 3197 will automatically be loaded when the repository is accessed.
3149 3198 narrowfiles
3150 3199 Set up repository to support narrow file storage.
3151 3200 sharedrepo
3152 3201 Repository object from which storage should be shared.
3153 3202 sharedrelative
3154 3203 Boolean indicating if the path to the shared repo should be
3155 3204 stored as relative. By default, the pointer to the "parent" repo
3156 3205 is stored as an absolute path.
3157 3206 shareditems
3158 3207 Set of items to share to the new repository (in addition to storage).
3159 3208 shallowfilestore
3160 3209 Indicates that storage for files should be shallow (not all ancestor
3161 3210 revisions are known).
3162 3211 """
3163 3212 createopts = defaultcreateopts(ui, createopts=createopts)
3164 3213
3165 3214 unknownopts = filterknowncreateopts(ui, createopts)
3166 3215
3167 3216 if not isinstance(unknownopts, dict):
3168 3217 raise error.ProgrammingError('filterknowncreateopts() did not return '
3169 3218 'a dict')
3170 3219
3171 3220 if unknownopts:
3172 3221 raise error.Abort(_('unable to create repository because of unknown '
3173 3222 'creation option: %s') %
3174 3223 ', '.join(sorted(unknownopts)),
3175 3224 hint=_('is a required extension not loaded?'))
3176 3225
3177 3226 requirements = newreporequirements(ui, createopts=createopts)
3178 3227
3179 3228 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3180 3229
3181 3230 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3182 3231 if hgvfs.exists():
3183 3232 raise error.RepoError(_('repository %s already exists') % path)
3184 3233
3185 3234 if 'sharedrepo' in createopts:
3186 3235 sharedpath = createopts['sharedrepo'].sharedpath
3187 3236
3188 3237 if createopts.get('sharedrelative'):
3189 3238 try:
3190 3239 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3191 3240 except (IOError, ValueError) as e:
3192 3241 # ValueError is raised on Windows if the drive letters differ
3193 3242 # on each path.
3194 3243 raise error.Abort(_('cannot calculate relative path'),
3195 3244 hint=stringutil.forcebytestr(e))
3196 3245
3197 3246 if not wdirvfs.exists():
3198 3247 wdirvfs.makedirs()
3199 3248
3200 3249 hgvfs.makedir(notindexed=True)
3201 3250 if 'sharedrepo' not in createopts:
3202 3251 hgvfs.mkdir(b'cache')
3203 3252 hgvfs.mkdir(b'wcache')
3204 3253
3205 3254 if b'store' in requirements and 'sharedrepo' not in createopts:
3206 3255 hgvfs.mkdir(b'store')
3207 3256
3208 3257 # We create an invalid changelog outside the store so very old
3209 3258 # Mercurial versions (which didn't know about the requirements
3210 3259 # file) encounter an error on reading the changelog. This
3211 3260 # effectively locks out old clients and prevents them from
3212 3261 # mucking with a repo in an unknown format.
3213 3262 #
3214 3263 # The revlog header has version 2, which won't be recognized by
3215 3264 # such old clients.
3216 3265 hgvfs.append(b'00changelog.i',
3217 3266 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3218 3267 b'layout')
3219 3268
3220 3269 scmutil.writerequires(hgvfs, requirements)
3221 3270
3222 3271 # Write out file telling readers where to find the shared store.
3223 3272 if 'sharedrepo' in createopts:
3224 3273 hgvfs.write(b'sharedpath', sharedpath)
3225 3274
3226 3275 if createopts.get('shareditems'):
3227 3276 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3228 3277 hgvfs.write(b'shared', shared)
3229 3278
3230 3279 def poisonrepository(repo):
3231 3280 """Poison a repository instance so it can no longer be used."""
3232 3281 # Perform any cleanup on the instance.
3233 3282 repo.close()
3234 3283
3235 3284 # Our strategy is to replace the type of the object with one that
3236 3285 # has all attribute lookups result in error.
3237 3286 #
3238 3287 # But we have to allow the close() method because some constructors
3239 3288 # of repos call close() on repo references.
3240 3289 class poisonedrepository(object):
3241 3290 def __getattribute__(self, item):
3242 3291 if item == r'close':
3243 3292 return object.__getattribute__(self, item)
3244 3293
3245 3294 raise error.ProgrammingError('repo instances should not be used '
3246 3295 'after unshare')
3247 3296
3248 3297 def close(self):
3249 3298 pass
3250 3299
3251 3300 # We may have a repoview, which intercepts __setattr__. So be sure
3252 3301 # we operate at the lowest level possible.
3253 3302 object.__setattr__(repo, r'__class__', poisonedrepository)
@@ -1,479 +1,482 b''
1 1 # repair.py - functions for repository repair for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 # Copyright 2007 Matt Mackall
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import hashlib
13 13
14 14 from .i18n import _
15 15 from .node import (
16 16 hex,
17 17 short,
18 18 )
19 19 from . import (
20 20 bundle2,
21 21 changegroup,
22 22 discovery,
23 23 error,
24 24 exchange,
25 25 obsolete,
26 26 obsutil,
27 27 phases,
28 28 pycompat,
29 29 util,
30 30 )
31 31 from .utils import (
32 32 stringutil,
33 33 )
34 34
35 35 def backupbundle(repo, bases, heads, node, suffix, compress=True,
36 36 obsolescence=True):
37 37 """create a bundle with the specified revisions as a backup"""
38 38
39 39 backupdir = "strip-backup"
40 40 vfs = repo.vfs
41 41 if not vfs.isdir(backupdir):
42 42 vfs.mkdir(backupdir)
43 43
44 44 # Include a hash of all the nodes in the filename for uniqueness
45 45 allcommits = repo.set('%ln::%ln', bases, heads)
46 46 allhashes = sorted(c.hex() for c in allcommits)
47 47 totalhash = hashlib.sha1(''.join(allhashes)).digest()
48 48 name = "%s/%s-%s-%s.hg" % (backupdir, short(node),
49 49 hex(totalhash[:4]), suffix)
50 50
51 51 cgversion = changegroup.localversion(repo)
52 52 comp = None
53 53 if cgversion != '01':
54 54 bundletype = "HG20"
55 55 if compress:
56 56 comp = 'BZ'
57 57 elif compress:
58 58 bundletype = "HG10BZ"
59 59 else:
60 60 bundletype = "HG10UN"
61 61
62 62 outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads)
63 63 contentopts = {
64 64 'cg.version': cgversion,
65 65 'obsolescence': obsolescence,
66 66 'phases': True,
67 67 }
68 68 return bundle2.writenewbundle(repo.ui, repo, 'strip', name, bundletype,
69 69 outgoing, contentopts, vfs, compression=comp)
70 70
71 71 def _collectfiles(repo, striprev):
72 72 """find out the filelogs affected by the strip"""
73 73 files = set()
74 74
75 75 for x in pycompat.xrange(striprev, len(repo)):
76 76 files.update(repo[x].files())
77 77
78 78 return sorted(files)
79 79
80 80 def _collectrevlog(revlog, striprev):
81 81 _, brokenset = revlog.getstrippoint(striprev)
82 82 return [revlog.linkrev(r) for r in brokenset]
83 83
84 84 def _collectmanifest(repo, striprev):
85 85 return _collectrevlog(repo.manifestlog.getstorage(b''), striprev)
86 86
87 87 def _collectbrokencsets(repo, files, striprev):
88 88 """return the changesets which will be broken by the truncation"""
89 89 s = set()
90 90
91 91 s.update(_collectmanifest(repo, striprev))
92 92 for fname in files:
93 93 s.update(_collectrevlog(repo.file(fname), striprev))
94 94
95 95 return s
96 96
97 97 def strip(ui, repo, nodelist, backup=True, topic='backup'):
98 98 # This function requires the caller to lock the repo, but it operates
99 99 # within a transaction of its own, and thus requires there to be no current
100 100 # transaction when it is called.
101 101 if repo.currenttransaction() is not None:
102 102 raise error.ProgrammingError('cannot strip from inside a transaction')
103 103
104 104 # Simple way to maintain backwards compatibility for this
105 105 # argument.
106 106 if backup in ['none', 'strip']:
107 107 backup = False
108 108
109 109 repo = repo.unfiltered()
110 110 repo.destroying()
111 111 vfs = repo.vfs
112 # load bookmark before changelog to avoid side effect from outdated
113 # changelog (see repo._refreshchangelog)
114 repo._bookmarks
112 115 cl = repo.changelog
113 116
114 117 # TODO handle undo of merge sets
115 118 if isinstance(nodelist, str):
116 119 nodelist = [nodelist]
117 120 striplist = [cl.rev(node) for node in nodelist]
118 121 striprev = min(striplist)
119 122
120 123 files = _collectfiles(repo, striprev)
121 124 saverevs = _collectbrokencsets(repo, files, striprev)
122 125
123 126 # Some revisions with rev > striprev may not be descendants of striprev.
124 127 # We have to find these revisions and put them in a bundle, so that
125 128 # we can restore them after the truncations.
126 129 # To create the bundle we use repo.changegroupsubset which requires
127 130 # the list of heads and bases of the set of interesting revisions.
128 131 # (head = revision in the set that has no descendant in the set;
129 132 # base = revision in the set that has no ancestor in the set)
130 133 tostrip = set(striplist)
131 134 saveheads = set(saverevs)
132 135 for r in cl.revs(start=striprev + 1):
133 136 if any(p in tostrip for p in cl.parentrevs(r)):
134 137 tostrip.add(r)
135 138
136 139 if r not in tostrip:
137 140 saverevs.add(r)
138 141 saveheads.difference_update(cl.parentrevs(r))
139 142 saveheads.add(r)
140 143 saveheads = [cl.node(r) for r in saveheads]
141 144
142 145 # compute base nodes
143 146 if saverevs:
144 147 descendants = set(cl.descendants(saverevs))
145 148 saverevs.difference_update(descendants)
146 149 savebases = [cl.node(r) for r in saverevs]
147 150 stripbases = [cl.node(r) for r in tostrip]
148 151
149 152 stripobsidx = obsmarkers = ()
150 153 if repo.ui.configbool('devel', 'strip-obsmarkers'):
151 154 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
152 155 if obsmarkers:
153 156 stripobsidx = [i for i, m in enumerate(repo.obsstore)
154 157 if m in obsmarkers]
155 158
156 159 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
157 160
158 161 backupfile = None
159 162 node = nodelist[-1]
160 163 if backup:
161 164 backupfile = _createstripbackup(repo, stripbases, node, topic)
162 165 # create a changegroup for all the branches we need to keep
163 166 tmpbundlefile = None
164 167 if saveheads:
165 168 # do not compress temporary bundle if we remove it from disk later
166 169 #
167 170 # We do not include obsolescence, it might re-introduce prune markers
168 171 # we are trying to strip. This is harmless since the stripped markers
169 172 # are already backed up and we did not touched the markers for the
170 173 # saved changesets.
171 174 tmpbundlefile = backupbundle(repo, savebases, saveheads, node, 'temp',
172 175 compress=False, obsolescence=False)
173 176
174 177 with ui.uninterruptible():
175 178 try:
176 179 with repo.transaction("strip") as tr:
177 180 # TODO this code violates the interface abstraction of the
178 181 # transaction and makes assumptions that file storage is
179 182 # using append-only files. We'll need some kind of storage
180 183 # API to handle stripping for us.
181 184 offset = len(tr._entries)
182 185
183 186 tr.startgroup()
184 187 cl.strip(striprev, tr)
185 188 stripmanifest(repo, striprev, tr, files)
186 189
187 190 for fn in files:
188 191 repo.file(fn).strip(striprev, tr)
189 192 tr.endgroup()
190 193
191 194 for i in pycompat.xrange(offset, len(tr._entries)):
192 195 file, troffset, ignore = tr._entries[i]
193 196 with repo.svfs(file, 'a', checkambig=True) as fp:
194 197 fp.truncate(troffset)
195 198 if troffset == 0:
196 199 repo.store.markremoved(file)
197 200
198 201 deleteobsmarkers(repo.obsstore, stripobsidx)
199 202 del repo.obsstore
200 203 repo.invalidatevolatilesets()
201 204 repo._phasecache.filterunknown(repo)
202 205
203 206 if tmpbundlefile:
204 207 ui.note(_("adding branch\n"))
205 208 f = vfs.open(tmpbundlefile, "rb")
206 209 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
207 210 if not repo.ui.verbose:
208 211 # silence internal shuffling chatter
209 212 repo.ui.pushbuffer()
210 213 tmpbundleurl = 'bundle:' + vfs.join(tmpbundlefile)
211 214 txnname = 'strip'
212 215 if not isinstance(gen, bundle2.unbundle20):
213 216 txnname = "strip\n%s" % util.hidepassword(tmpbundleurl)
214 217 with repo.transaction(txnname) as tr:
215 218 bundle2.applybundle(repo, gen, tr, source='strip',
216 219 url=tmpbundleurl)
217 220 if not repo.ui.verbose:
218 221 repo.ui.popbuffer()
219 222 f.close()
220 223
221 224 with repo.transaction('repair') as tr:
222 225 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
223 226 repo._bookmarks.applychanges(repo, tr, bmchanges)
224 227
225 228 # remove undo files
226 229 for undovfs, undofile in repo.undofiles():
227 230 try:
228 231 undovfs.unlink(undofile)
229 232 except OSError as e:
230 233 if e.errno != errno.ENOENT:
231 234 ui.warn(_('error removing %s: %s\n') %
232 235 (undovfs.join(undofile),
233 236 stringutil.forcebytestr(e)))
234 237
235 238 except: # re-raises
236 239 if backupfile:
237 240 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
238 241 % vfs.join(backupfile))
239 242 if tmpbundlefile:
240 243 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
241 244 % vfs.join(tmpbundlefile))
242 245 ui.warn(_("(fix the problem, then recover the changesets with "
243 246 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
244 247 raise
245 248 else:
246 249 if tmpbundlefile:
247 250 # Remove temporary bundle only if there were no exceptions
248 251 vfs.unlink(tmpbundlefile)
249 252
250 253 repo.destroyed()
251 254 # return the backup file path (or None if 'backup' was False) so
252 255 # extensions can use it
253 256 return backupfile
254 257
255 258 def softstrip(ui, repo, nodelist, backup=True, topic='backup'):
256 259 """perform a "soft" strip using the archived phase"""
257 260 tostrip = [c.node() for c in repo.set('sort(%ln::)', nodelist)]
258 261 if not tostrip:
259 262 return None
260 263
261 264 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
262 265 if backup:
263 266 node = tostrip[0]
264 267 backupfile = _createstripbackup(repo, tostrip, node, topic)
265 268
266 269 with repo.transaction('strip') as tr:
267 270 phases.retractboundary(repo, tr, phases.archived, tostrip)
268 271 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
269 272 repo._bookmarks.applychanges(repo, tr, bmchanges)
270 273 return backupfile
271 274
272 275
273 276 def _bookmarkmovements(repo, tostrip):
274 277 # compute necessary bookmark movement
275 278 bm = repo._bookmarks
276 279 updatebm = []
277 280 for m in bm:
278 281 rev = repo[bm[m]].rev()
279 282 if rev in tostrip:
280 283 updatebm.append(m)
281 284 newbmtarget = None
282 285 # If we need to move bookmarks, compute bookmark
283 286 # targets. Otherwise we can skip doing this logic.
284 287 if updatebm:
285 288 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)),
286 289 # but is much faster
287 290 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
288 291 if newbmtarget:
289 292 newbmtarget = repo[newbmtarget.first()].node()
290 293 else:
291 294 newbmtarget = '.'
292 295 return newbmtarget, updatebm
293 296
294 297 def _createstripbackup(repo, stripbases, node, topic):
295 298 # backup the changeset we are about to strip
296 299 vfs = repo.vfs
297 300 cl = repo.changelog
298 301 backupfile = backupbundle(repo, stripbases, cl.heads(), node, topic)
299 302 repo.ui.status(_("saved backup bundle to %s\n") %
300 303 vfs.join(backupfile))
301 304 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
302 305 vfs.join(backupfile))
303 306 return backupfile
304 307
305 308 def safestriproots(ui, repo, nodes):
306 309 """return list of roots of nodes where descendants are covered by nodes"""
307 310 torev = repo.unfiltered().changelog.rev
308 311 revs = set(torev(n) for n in nodes)
309 312 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
310 313 # orphaned = affected - wanted
311 314 # affected = descendants(roots(wanted))
312 315 # wanted = revs
313 316 revset = '%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )'
314 317 tostrip = set(repo.revs(revset, revs, revs, phases.internal, revs))
315 318 notstrip = revs - tostrip
316 319 if notstrip:
317 320 nodestr = ', '.join(sorted(short(repo[n].node()) for n in notstrip))
318 321 ui.warn(_('warning: orphaned descendants detected, '
319 322 'not stripping %s\n') % nodestr)
320 323 return [c.node() for c in repo.set('roots(%ld)', tostrip)]
321 324
322 325 class stripcallback(object):
323 326 """used as a transaction postclose callback"""
324 327
325 328 def __init__(self, ui, repo, backup, topic):
326 329 self.ui = ui
327 330 self.repo = repo
328 331 self.backup = backup
329 332 self.topic = topic or 'backup'
330 333 self.nodelist = []
331 334
332 335 def addnodes(self, nodes):
333 336 self.nodelist.extend(nodes)
334 337
335 338 def __call__(self, tr):
336 339 roots = safestriproots(self.ui, self.repo, self.nodelist)
337 340 if roots:
338 341 strip(self.ui, self.repo, roots, self.backup, self.topic)
339 342
340 343 def delayedstrip(ui, repo, nodelist, topic=None, backup=True):
341 344 """like strip, but works inside transaction and won't strip irreverent revs
342 345
343 346 nodelist must explicitly contain all descendants. Otherwise a warning will
344 347 be printed that some nodes are not stripped.
345 348
346 349 Will do a backup if `backup` is True. The last non-None "topic" will be
347 350 used as the backup topic name. The default backup topic name is "backup".
348 351 """
349 352 tr = repo.currenttransaction()
350 353 if not tr:
351 354 nodes = safestriproots(ui, repo, nodelist)
352 355 return strip(ui, repo, nodes, backup=backup, topic=topic)
353 356 # transaction postclose callbacks are called in alphabet order.
354 357 # use '\xff' as prefix so we are likely to be called last.
355 358 callback = tr.getpostclose('\xffstrip')
356 359 if callback is None:
357 360 callback = stripcallback(ui, repo, backup=backup, topic=topic)
358 361 tr.addpostclose('\xffstrip', callback)
359 362 if topic:
360 363 callback.topic = topic
361 364 callback.addnodes(nodelist)
362 365
363 366 def stripmanifest(repo, striprev, tr, files):
364 367 revlog = repo.manifestlog.getstorage(b'')
365 368 revlog.strip(striprev, tr)
366 369 striptrees(repo, tr, striprev, files)
367 370
368 371 def striptrees(repo, tr, striprev, files):
369 372 if 'treemanifest' in repo.requirements:
370 373 # This logic is safe if treemanifest isn't enabled, but also
371 374 # pointless, so we skip it if treemanifest isn't enabled.
372 375 for unencoded, encoded, size in repo.store.datafiles():
373 376 if (unencoded.startswith('meta/') and
374 377 unencoded.endswith('00manifest.i')):
375 378 dir = unencoded[5:-12]
376 379 repo.manifestlog.getstorage(dir).strip(striprev, tr)
377 380
378 381 def rebuildfncache(ui, repo):
379 382 """Rebuilds the fncache file from repo history.
380 383
381 384 Missing entries will be added. Extra entries will be removed.
382 385 """
383 386 repo = repo.unfiltered()
384 387
385 388 if 'fncache' not in repo.requirements:
386 389 ui.warn(_('(not rebuilding fncache because repository does not '
387 390 'support fncache)\n'))
388 391 return
389 392
390 393 with repo.lock():
391 394 fnc = repo.store.fncache
392 395 # Trigger load of fncache.
393 396 if 'irrelevant' in fnc:
394 397 pass
395 398
396 399 oldentries = set(fnc.entries)
397 400 newentries = set()
398 401 seenfiles = set()
399 402
400 403 progress = ui.makeprogress(_('rebuilding'), unit=_('changesets'),
401 404 total=len(repo))
402 405 for rev in repo:
403 406 progress.update(rev)
404 407
405 408 ctx = repo[rev]
406 409 for f in ctx.files():
407 410 # This is to minimize I/O.
408 411 if f in seenfiles:
409 412 continue
410 413 seenfiles.add(f)
411 414
412 415 i = 'data/%s.i' % f
413 416 d = 'data/%s.d' % f
414 417
415 418 if repo.store._exists(i):
416 419 newentries.add(i)
417 420 if repo.store._exists(d):
418 421 newentries.add(d)
419 422
420 423 progress.complete()
421 424
422 425 if 'treemanifest' in repo.requirements:
423 426 # This logic is safe if treemanifest isn't enabled, but also
424 427 # pointless, so we skip it if treemanifest isn't enabled.
425 428 for dir in util.dirs(seenfiles):
426 429 i = 'meta/%s/00manifest.i' % dir
427 430 d = 'meta/%s/00manifest.d' % dir
428 431
429 432 if repo.store._exists(i):
430 433 newentries.add(i)
431 434 if repo.store._exists(d):
432 435 newentries.add(d)
433 436
434 437 addcount = len(newentries - oldentries)
435 438 removecount = len(oldentries - newentries)
436 439 for p in sorted(oldentries - newentries):
437 440 ui.write(_('removing %s\n') % p)
438 441 for p in sorted(newentries - oldentries):
439 442 ui.write(_('adding %s\n') % p)
440 443
441 444 if addcount or removecount:
442 445 ui.write(_('%d items added, %d removed from fncache\n') %
443 446 (addcount, removecount))
444 447 fnc.entries = newentries
445 448 fnc._dirty = True
446 449
447 450 with repo.transaction('fncache') as tr:
448 451 fnc.write(tr)
449 452 else:
450 453 ui.write(_('fncache already up to date\n'))
451 454
452 455 def deleteobsmarkers(obsstore, indices):
453 456 """Delete some obsmarkers from obsstore and return how many were deleted
454 457
455 458 'indices' is a list of ints which are the indices
456 459 of the markers to be deleted.
457 460
458 461 Every invocation of this function completely rewrites the obsstore file,
459 462 skipping the markers we want to be removed. The new temporary file is
460 463 created, remaining markers are written there and on .close() this file
461 464 gets atomically renamed to obsstore, thus guaranteeing consistency."""
462 465 if not indices:
463 466 # we don't want to rewrite the obsstore with the same content
464 467 return
465 468
466 469 left = []
467 470 current = obsstore._all
468 471 n = 0
469 472 for i, m in enumerate(current):
470 473 if i in indices:
471 474 n += 1
472 475 continue
473 476 left.append(m)
474 477
475 478 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
476 479 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
477 480 newobsstorefile.write(bytes)
478 481 newobsstorefile.close()
479 482 return n
@@ -1,245 +1,246 b''
1 1 ================================
2 2 Test corner case around bookmark
3 3 ================================
4 4
5 5 This test file is meant to gather test around bookmark that are specific
6 6 enough to not find a place elsewhere.
7 7
8 8 Test bookmark/changelog race condition
9 9 ======================================
10 10
11 11 The data from the bookmark file are filtered to only contains bookmark with
12 12 node known to the changelog. If the cache invalidation between these two bits
13 13 goes wrong, bookmark can be dropped.
14 14
15 15 global setup
16 16 ------------
17 17
18 18 $ cat >> $HGRCPATH << EOF
19 19 > [ui]
20 20 > ssh = "$PYTHON" "$TESTDIR/dummyssh"
21 21 > [server]
22 22 > concurrent-push-mode=check-related
23 23 > EOF
24 24
25 25 Setup
26 26 -----
27 27
28 28 initial repository setup
29 29
30 30 $ hg init bookrace-server
31 31 $ cd bookrace-server
32 32 $ echo a > a
33 33 $ hg add a
34 34 $ hg commit -m root
35 35 $ echo a >> a
36 36 $ hg bookmark book-A
37 37 $ hg commit -m A0
38 38 $ hg up 'desc(root)'
39 39 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
40 40 (leaving bookmark book-A)
41 41 $ echo b > b
42 42 $ hg add b
43 43 $ hg bookmark book-B
44 44 $ hg commit -m B0
45 45 created new head
46 46 $ hg up null
47 47 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
48 48 (leaving bookmark book-B)
49 49 $ hg phase --public --rev 'all()'
50 50 $ hg log -G
51 51 o changeset: 2:c79985706978
52 52 | bookmark: book-B
53 53 | tag: tip
54 54 | parent: 0:6569b5a81c7e
55 55 | user: test
56 56 | date: Thu Jan 01 00:00:00 1970 +0000
57 57 | summary: B0
58 58 |
59 59 | o changeset: 1:39c28d785860
60 60 |/ bookmark: book-A
61 61 | user: test
62 62 | date: Thu Jan 01 00:00:00 1970 +0000
63 63 | summary: A0
64 64 |
65 65 o changeset: 0:6569b5a81c7e
66 66 user: test
67 67 date: Thu Jan 01 00:00:00 1970 +0000
68 68 summary: root
69 69
70 70 $ hg book
71 71 book-A 1:39c28d785860
72 72 book-B 2:c79985706978
73 73 $ cd ..
74 74
75 75 Add new changeset on each bookmark in distinct clones
76 76
77 77 $ hg clone ssh://user@dummy/bookrace-server client-A
78 78 requesting all changes
79 79 adding changesets
80 80 adding manifests
81 81 adding file changes
82 82 added 3 changesets with 3 changes to 2 files (+1 heads)
83 83 new changesets 6569b5a81c7e:c79985706978
84 84 updating to branch default
85 85 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
86 86 $ hg -R client-A update book-A
87 87 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
88 88 (activating bookmark book-A)
89 89 $ echo a >> client-A/a
90 90 $ hg -R client-A commit -m A1
91 91 $ hg clone ssh://user@dummy/bookrace-server client-B
92 92 requesting all changes
93 93 adding changesets
94 94 adding manifests
95 95 adding file changes
96 96 added 3 changesets with 3 changes to 2 files (+1 heads)
97 97 new changesets 6569b5a81c7e:c79985706978
98 98 updating to branch default
99 99 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
100 100 $ hg -R client-B update book-B
101 101 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
102 102 (activating bookmark book-B)
103 103 $ echo b >> client-B/b
104 104 $ hg -R client-B commit -m B1
105 105
106 106 extension to reproduce the race
107 107 -------------------------------
108 108
109 109 If two process are pushing we want to make sure the following happens:
110 110
111 111 * process A read changelog
112 112 * process B to its full push
113 113 * process A read bookmarks
114 114 * process A proceed with rest of the push
115 115
116 116 We build a server side extension for this purpose
117 117
118 118 $ cat > bookrace.py << EOF
119 119 > import atexit
120 120 > import os
121 121 > import time
122 122 > from mercurial import bookmarks, error, extensions
123 123 >
124 124 > def wait(repo):
125 125 > if not os.path.exists('push-A-started'):
126 126 > assert repo._currentlock(repo._lockref) is None
127 127 > assert repo._currentlock(repo._wlockref) is None
128 128 > repo.ui.status(b'setting raced push up\n')
129 129 > with open('push-A-started', 'w'):
130 130 > pass
131 131 > clock = 300
132 132 > while not os.path.exists('push-B-done'):
133 133 > clock -= 1
134 134 > if clock <= 0:
135 135 > raise error.Abort("race scenario timed out")
136 136 > time.sleep(0.1)
137 137 >
138 138 > def reposetup(ui, repo):
139 139 > class racedrepo(repo.__class__):
140 140 > @property
141 141 > def _bookmarks(self):
142 142 > wait(self)
143 143 > return super(racedrepo, self)._bookmarks
144 144 > repo.__class__ = racedrepo
145 145 >
146 146 > def e():
147 147 > with open('push-A-done', 'w'):
148 148 > pass
149 149 > atexit.register(e)
150 150 > EOF
151 151
152 152 Actual test
153 153 -----------
154 154
155 155 Start the raced push.
156 156
157 157 $ cat >> bookrace-server/.hg/hgrc << EOF
158 158 > [extensions]
159 159 > bookrace=$TESTTMP/bookrace.py
160 160 > EOF
161 161 $ hg push -R client-A -r book-A >push-output.txt 2>&1 &
162 162
163 163 Wait up to 30 seconds for that push to start.
164 164
165 165 $ clock=30
166 166 $ while [ ! -f push-A-started ] && [ $clock -gt 0 ] ; do
167 167 > clock=`expr $clock - 1`
168 168 > sleep 1
169 169 > done
170 170
171 171 Do the other push.
172 172
173 173 $ cat >> bookrace-server/.hg/hgrc << EOF
174 174 > [extensions]
175 175 > bookrace=!
176 176 > EOF
177 177
178 178 $ hg push -R client-B -r book-B
179 179 pushing to ssh://user@dummy/bookrace-server
180 180 searching for changes
181 181 remote: adding changesets
182 182 remote: adding manifests
183 183 remote: adding file changes
184 184 remote: added 1 changesets with 1 changes to 1 files
185 185 updating bookmark book-B
186 186
187 187 Signal the raced put that we are done (it waits up to 30 seconds).
188 188
189 189 $ touch push-B-done
190 190
191 191 Wait for the raced push to finish (with the remaning of the initial 30 seconds).
192 192
193 193 $ while [ ! -f push-A-done ] && [ $clock -gt 0 ] ; do
194 194 > clock=`expr $clock - 1`
195 195 > sleep 1
196 196 > done
197 197
198 198 Check raced push output.
199 199
200 200 $ cat push-output.txt
201 201 pushing to ssh://user@dummy/bookrace-server
202 202 searching for changes
203 remote has heads on branch 'default' that are not known locally: f26c3b5167d1
203 204 remote: setting raced push up
204 205 remote: adding changesets
205 206 remote: adding manifests
206 207 remote: adding file changes
207 208 remote: added 1 changesets with 1 changes to 1 files
208 209 updating bookmark book-A
209 210
210 211 Check result of the push.
211 212
212 213 $ hg -R bookrace-server log -G
213 214 o changeset: 4:9ce3b28c16de
214 215 | bookmark: book-A
215 216 | tag: tip
216 217 | parent: 1:39c28d785860
217 218 | user: test
218 219 | date: Thu Jan 01 00:00:00 1970 +0000
219 220 | summary: A1
220 221 |
221 222 | o changeset: 3:f26c3b5167d1
222 | | bookmark: book-B (false !)
223 | | bookmark: book-B
223 224 | | user: test
224 225 | | date: Thu Jan 01 00:00:00 1970 +0000
225 226 | | summary: B1
226 227 | |
227 228 | o changeset: 2:c79985706978
228 229 | | parent: 0:6569b5a81c7e
229 230 | | user: test
230 231 | | date: Thu Jan 01 00:00:00 1970 +0000
231 232 | | summary: B0
232 233 | |
233 234 o | changeset: 1:39c28d785860
234 235 |/ user: test
235 236 | date: Thu Jan 01 00:00:00 1970 +0000
236 237 | summary: A0
237 238 |
238 239 o changeset: 0:6569b5a81c7e
239 240 user: test
240 241 date: Thu Jan 01 00:00:00 1970 +0000
241 242 summary: root
242 243
243 244 $ hg -R bookrace-server book
244 245 book-A 4:9ce3b28c16de
245 book-B 3:f26c3b5167d1 (false !)
246 book-B 3:f26c3b5167d1
@@ -1,1354 +1,1395 b''
1 1 $ echo "[extensions]" >> $HGRCPATH
2 2 $ echo "strip=" >> $HGRCPATH
3 3 $ echo "drawdag=$TESTDIR/drawdag.py" >> $HGRCPATH
4 4
5 5 $ restore() {
6 6 > hg unbundle -q .hg/strip-backup/*
7 7 > rm .hg/strip-backup/*
8 8 > }
9 9 $ teststrip() {
10 10 > hg up -C $1
11 11 > echo % before update $1, strip $2
12 > hg parents
12 > hg log -G -T '{rev}:{node}'
13 13 > hg --traceback strip $2
14 14 > echo % after update $1, strip $2
15 > hg parents
15 > hg log -G -T '{rev}:{node}'
16 16 > restore
17 17 > }
18 18
19 19 $ hg init test
20 20 $ cd test
21 21
22 22 $ echo foo > bar
23 23 $ hg ci -Ama
24 24 adding bar
25 25
26 26 $ echo more >> bar
27 27 $ hg ci -Amb
28 28
29 29 $ echo blah >> bar
30 30 $ hg ci -Amc
31 31
32 32 $ hg up 1
33 33 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
34 34 $ echo blah >> bar
35 35 $ hg ci -Amd
36 36 created new head
37 37
38 38 $ echo final >> bar
39 39 $ hg ci -Ame
40 40
41 41 $ hg log
42 42 changeset: 4:443431ffac4f
43 43 tag: tip
44 44 user: test
45 45 date: Thu Jan 01 00:00:00 1970 +0000
46 46 summary: e
47 47
48 48 changeset: 3:65bd5f99a4a3
49 49 parent: 1:ef3a871183d7
50 50 user: test
51 51 date: Thu Jan 01 00:00:00 1970 +0000
52 52 summary: d
53 53
54 54 changeset: 2:264128213d29
55 55 user: test
56 56 date: Thu Jan 01 00:00:00 1970 +0000
57 57 summary: c
58 58
59 59 changeset: 1:ef3a871183d7
60 60 user: test
61 61 date: Thu Jan 01 00:00:00 1970 +0000
62 62 summary: b
63 63
64 64 changeset: 0:9ab35a2d17cb
65 65 user: test
66 66 date: Thu Jan 01 00:00:00 1970 +0000
67 67 summary: a
68 68
69 69
70 70 $ teststrip 4 4
71 71 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
72 72 % before update 4, strip 4
73 changeset: 4:443431ffac4f
74 tag: tip
75 user: test
76 date: Thu Jan 01 00:00:00 1970 +0000
77 summary: e
73 @ 4:443431ffac4f5b5a19b0b6c298a21b7ba736bcce
74 |
75 o 3:65bd5f99a4a376cdea23a1153f07856b0d881d64
76 |
77 | o 2:264128213d290d868c54642d13aeaa3675551a78
78 |/
79 o 1:ef3a871183d7199c541cc140218298bbfcc6c28a
80 |
81 o 0:9ab35a2d17cb64271241ea881efcc19dd953215b
78 82
79 83 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
80 84 saved backup bundle to $TESTTMP/test/.hg/strip-backup/*-backup.hg (glob)
81 85 % after update 4, strip 4
82 changeset: 3:65bd5f99a4a3
83 tag: tip
84 parent: 1:ef3a871183d7
85 user: test
86 date: Thu Jan 01 00:00:00 1970 +0000
87 summary: d
86 @ 3:65bd5f99a4a376cdea23a1153f07856b0d881d64
87 |
88 | o 2:264128213d290d868c54642d13aeaa3675551a78
89 |/
90 o 1:ef3a871183d7199c541cc140218298bbfcc6c28a
91 |
92 o 0:9ab35a2d17cb64271241ea881efcc19dd953215b
88 93
89 94 $ teststrip 4 3
90 95 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
91 96 % before update 4, strip 3
92 changeset: 4:443431ffac4f
93 tag: tip
94 user: test
95 date: Thu Jan 01 00:00:00 1970 +0000
96 summary: e
97 @ 4:443431ffac4f5b5a19b0b6c298a21b7ba736bcce
98 |
99 o 3:65bd5f99a4a376cdea23a1153f07856b0d881d64
100 |
101 | o 2:264128213d290d868c54642d13aeaa3675551a78
102 |/
103 o 1:ef3a871183d7199c541cc140218298bbfcc6c28a
104 |
105 o 0:9ab35a2d17cb64271241ea881efcc19dd953215b
97 106
98 107 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
99 108 saved backup bundle to $TESTTMP/test/.hg/strip-backup/*-backup.hg (glob)
100 109 % after update 4, strip 3
101 changeset: 1:ef3a871183d7
102 user: test
103 date: Thu Jan 01 00:00:00 1970 +0000
104 summary: b
110 o 2:264128213d290d868c54642d13aeaa3675551a78
111 |
112 @ 1:ef3a871183d7199c541cc140218298bbfcc6c28a
113 |
114 o 0:9ab35a2d17cb64271241ea881efcc19dd953215b
105 115
106 116 $ teststrip 1 4
107 117 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
108 118 % before update 1, strip 4
109 changeset: 1:ef3a871183d7
110 user: test
111 date: Thu Jan 01 00:00:00 1970 +0000
112 summary: b
119 o 4:443431ffac4f5b5a19b0b6c298a21b7ba736bcce
120 |
121 o 3:65bd5f99a4a376cdea23a1153f07856b0d881d64
122 |
123 | o 2:264128213d290d868c54642d13aeaa3675551a78
124 |/
125 @ 1:ef3a871183d7199c541cc140218298bbfcc6c28a
126 |
127 o 0:9ab35a2d17cb64271241ea881efcc19dd953215b
113 128
114 129 saved backup bundle to $TESTTMP/test/.hg/strip-backup/*-backup.hg (glob)
115 130 % after update 1, strip 4
116 changeset: 1:ef3a871183d7
117 user: test
118 date: Thu Jan 01 00:00:00 1970 +0000
119 summary: b
131 o 3:65bd5f99a4a376cdea23a1153f07856b0d881d64
132 |
133 | o 2:264128213d290d868c54642d13aeaa3675551a78
134 |/
135 @ 1:ef3a871183d7199c541cc140218298bbfcc6c28a
136 |
137 o 0:9ab35a2d17cb64271241ea881efcc19dd953215b
120 138
121 139 $ teststrip 4 2
122 140 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
123 141 % before update 4, strip 2
124 changeset: 4:443431ffac4f
125 tag: tip
126 user: test
127 date: Thu Jan 01 00:00:00 1970 +0000
128 summary: e
142 @ 4:443431ffac4f5b5a19b0b6c298a21b7ba736bcce
143 |
144 o 3:65bd5f99a4a376cdea23a1153f07856b0d881d64
145 |
146 | o 2:264128213d290d868c54642d13aeaa3675551a78
147 |/
148 o 1:ef3a871183d7199c541cc140218298bbfcc6c28a
149 |
150 o 0:9ab35a2d17cb64271241ea881efcc19dd953215b
129 151
130 152 saved backup bundle to $TESTTMP/test/.hg/strip-backup/*-backup.hg (glob)
131 153 % after update 4, strip 2
132 changeset: 3:443431ffac4f
133 tag: tip
134 user: test
135 date: Thu Jan 01 00:00:00 1970 +0000
136 summary: e
154 @ 3:443431ffac4f5b5a19b0b6c298a21b7ba736bcce
155 |
156 o 2:65bd5f99a4a376cdea23a1153f07856b0d881d64
157 |
158 o 1:ef3a871183d7199c541cc140218298bbfcc6c28a
159 |
160 o 0:9ab35a2d17cb64271241ea881efcc19dd953215b
137 161
138 162 $ teststrip 4 1
139 163 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
140 164 % before update 4, strip 1
141 changeset: 4:264128213d29
142 tag: tip
143 parent: 1:ef3a871183d7
144 user: test
145 date: Thu Jan 01 00:00:00 1970 +0000
146 summary: c
165 @ 4:264128213d290d868c54642d13aeaa3675551a78
166 |
167 | o 3:443431ffac4f5b5a19b0b6c298a21b7ba736bcce
168 | |
169 | o 2:65bd5f99a4a376cdea23a1153f07856b0d881d64
170 |/
171 o 1:ef3a871183d7199c541cc140218298bbfcc6c28a
172 |
173 o 0:9ab35a2d17cb64271241ea881efcc19dd953215b
147 174
148 175 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
149 176 saved backup bundle to $TESTTMP/test/.hg/strip-backup/*-backup.hg (glob)
150 177 % after update 4, strip 1
151 changeset: 0:9ab35a2d17cb
152 tag: tip
153 user: test
154 date: Thu Jan 01 00:00:00 1970 +0000
155 summary: a
178 @ 0:9ab35a2d17cb64271241ea881efcc19dd953215b
156 179
157 180 $ teststrip null 4
158 181 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
159 182 % before update null, strip 4
183 o 4:264128213d290d868c54642d13aeaa3675551a78
184 |
185 | o 3:443431ffac4f5b5a19b0b6c298a21b7ba736bcce
186 | |
187 | o 2:65bd5f99a4a376cdea23a1153f07856b0d881d64
188 |/
189 o 1:ef3a871183d7199c541cc140218298bbfcc6c28a
190 |
191 o 0:9ab35a2d17cb64271241ea881efcc19dd953215b
192
160 193 saved backup bundle to $TESTTMP/test/.hg/strip-backup/*-backup.hg (glob)
161 194 % after update null, strip 4
195 o 3:443431ffac4f5b5a19b0b6c298a21b7ba736bcce
196 |
197 o 2:65bd5f99a4a376cdea23a1153f07856b0d881d64
198 |
199 o 1:ef3a871183d7199c541cc140218298bbfcc6c28a
200 |
201 o 0:9ab35a2d17cb64271241ea881efcc19dd953215b
202
162 203
163 204 $ hg log
164 205 changeset: 4:264128213d29
165 206 tag: tip
166 207 parent: 1:ef3a871183d7
167 208 user: test
168 209 date: Thu Jan 01 00:00:00 1970 +0000
169 210 summary: c
170 211
171 212 changeset: 3:443431ffac4f
172 213 user: test
173 214 date: Thu Jan 01 00:00:00 1970 +0000
174 215 summary: e
175 216
176 217 changeset: 2:65bd5f99a4a3
177 218 user: test
178 219 date: Thu Jan 01 00:00:00 1970 +0000
179 220 summary: d
180 221
181 222 changeset: 1:ef3a871183d7
182 223 user: test
183 224 date: Thu Jan 01 00:00:00 1970 +0000
184 225 summary: b
185 226
186 227 changeset: 0:9ab35a2d17cb
187 228 user: test
188 229 date: Thu Jan 01 00:00:00 1970 +0000
189 230 summary: a
190 231
191 232 $ hg up -C 4
192 233 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
193 234 $ hg parents
194 235 changeset: 4:264128213d29
195 236 tag: tip
196 237 parent: 1:ef3a871183d7
197 238 user: test
198 239 date: Thu Jan 01 00:00:00 1970 +0000
199 240 summary: c
200 241
201 242
202 243 $ hg --traceback strip 4
203 244 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
204 245 saved backup bundle to $TESTTMP/test/.hg/strip-backup/264128213d29-0b39d6bf-backup.hg
205 246 $ hg parents
206 247 changeset: 1:ef3a871183d7
207 248 user: test
208 249 date: Thu Jan 01 00:00:00 1970 +0000
209 250 summary: b
210 251
211 252 $ hg debugbundle .hg/strip-backup/*
212 253 Stream params: {Compression: BZ}
213 254 changegroup -- {nbchanges: 1, version: 02} (mandatory: True)
214 255 264128213d290d868c54642d13aeaa3675551a78
215 256 cache:rev-branch-cache -- {} (mandatory: False)
216 257 phase-heads -- {} (mandatory: True)
217 258 264128213d290d868c54642d13aeaa3675551a78 draft
218 259 $ hg unbundle .hg/strip-backup/*
219 260 adding changesets
220 261 adding manifests
221 262 adding file changes
222 263 added 1 changesets with 0 changes to 1 files (+1 heads)
223 264 new changesets 264128213d29 (1 drafts)
224 265 (run 'hg heads' to see heads, 'hg merge' to merge)
225 266 $ rm .hg/strip-backup/*
226 267 $ hg log --graph
227 268 o changeset: 4:264128213d29
228 269 | tag: tip
229 270 | parent: 1:ef3a871183d7
230 271 | user: test
231 272 | date: Thu Jan 01 00:00:00 1970 +0000
232 273 | summary: c
233 274 |
234 275 | o changeset: 3:443431ffac4f
235 276 | | user: test
236 277 | | date: Thu Jan 01 00:00:00 1970 +0000
237 278 | | summary: e
238 279 | |
239 280 | o changeset: 2:65bd5f99a4a3
240 281 |/ user: test
241 282 | date: Thu Jan 01 00:00:00 1970 +0000
242 283 | summary: d
243 284 |
244 285 @ changeset: 1:ef3a871183d7
245 286 | user: test
246 287 | date: Thu Jan 01 00:00:00 1970 +0000
247 288 | summary: b
248 289 |
249 290 o changeset: 0:9ab35a2d17cb
250 291 user: test
251 292 date: Thu Jan 01 00:00:00 1970 +0000
252 293 summary: a
253 294
254 295 $ hg up -C 2
255 296 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
256 297 $ hg merge 4
257 298 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
258 299 (branch merge, don't forget to commit)
259 300
260 301 before strip of merge parent
261 302
262 303 $ hg parents
263 304 changeset: 2:65bd5f99a4a3
264 305 user: test
265 306 date: Thu Jan 01 00:00:00 1970 +0000
266 307 summary: d
267 308
268 309 changeset: 4:264128213d29
269 310 tag: tip
270 311 parent: 1:ef3a871183d7
271 312 user: test
272 313 date: Thu Jan 01 00:00:00 1970 +0000
273 314 summary: c
274 315
275 316 ##strip not allowed with merge in progress
276 317 $ hg strip 4
277 318 abort: outstanding uncommitted merge
278 319 (use 'hg commit' or 'hg merge --abort')
279 320 [255]
280 321 ##strip allowed --force with merge in progress
281 322 $ hg strip 4 --force
282 323 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
283 324 saved backup bundle to $TESTTMP/test/.hg/strip-backup/*-backup.hg (glob)
284 325
285 326 after strip of merge parent
286 327
287 328 $ hg parents
288 329 changeset: 1:ef3a871183d7
289 330 user: test
290 331 date: Thu Jan 01 00:00:00 1970 +0000
291 332 summary: b
292 333
293 334 $ restore
294 335
295 336 $ hg up
296 337 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
297 338 updated to "264128213d29: c"
298 339 1 other heads for branch "default"
299 340 $ hg log -G
300 341 @ changeset: 4:264128213d29
301 342 | tag: tip
302 343 | parent: 1:ef3a871183d7
303 344 | user: test
304 345 | date: Thu Jan 01 00:00:00 1970 +0000
305 346 | summary: c
306 347 |
307 348 | o changeset: 3:443431ffac4f
308 349 | | user: test
309 350 | | date: Thu Jan 01 00:00:00 1970 +0000
310 351 | | summary: e
311 352 | |
312 353 | o changeset: 2:65bd5f99a4a3
313 354 |/ user: test
314 355 | date: Thu Jan 01 00:00:00 1970 +0000
315 356 | summary: d
316 357 |
317 358 o changeset: 1:ef3a871183d7
318 359 | user: test
319 360 | date: Thu Jan 01 00:00:00 1970 +0000
320 361 | summary: b
321 362 |
322 363 o changeset: 0:9ab35a2d17cb
323 364 user: test
324 365 date: Thu Jan 01 00:00:00 1970 +0000
325 366 summary: a
326 367
327 368
328 369 2 is parent of 3, only one strip should happen
329 370
330 371 $ hg strip "roots(2)" 3
331 372 saved backup bundle to $TESTTMP/test/.hg/strip-backup/*-backup.hg (glob)
332 373 $ hg log -G
333 374 @ changeset: 2:264128213d29
334 375 | tag: tip
335 376 | user: test
336 377 | date: Thu Jan 01 00:00:00 1970 +0000
337 378 | summary: c
338 379 |
339 380 o changeset: 1:ef3a871183d7
340 381 | user: test
341 382 | date: Thu Jan 01 00:00:00 1970 +0000
342 383 | summary: b
343 384 |
344 385 o changeset: 0:9ab35a2d17cb
345 386 user: test
346 387 date: Thu Jan 01 00:00:00 1970 +0000
347 388 summary: a
348 389
349 390 $ restore
350 391 $ hg log -G
351 392 o changeset: 4:443431ffac4f
352 393 | tag: tip
353 394 | user: test
354 395 | date: Thu Jan 01 00:00:00 1970 +0000
355 396 | summary: e
356 397 |
357 398 o changeset: 3:65bd5f99a4a3
358 399 | parent: 1:ef3a871183d7
359 400 | user: test
360 401 | date: Thu Jan 01 00:00:00 1970 +0000
361 402 | summary: d
362 403 |
363 404 | @ changeset: 2:264128213d29
364 405 |/ user: test
365 406 | date: Thu Jan 01 00:00:00 1970 +0000
366 407 | summary: c
367 408 |
368 409 o changeset: 1:ef3a871183d7
369 410 | user: test
370 411 | date: Thu Jan 01 00:00:00 1970 +0000
371 412 | summary: b
372 413 |
373 414 o changeset: 0:9ab35a2d17cb
374 415 user: test
375 416 date: Thu Jan 01 00:00:00 1970 +0000
376 417 summary: a
377 418
378 419 Failed hook while applying "saveheads" bundle.
379 420
380 421 $ hg strip 2 --config hooks.pretxnchangegroup.bad=false
381 422 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
382 423 saved backup bundle to $TESTTMP/test/.hg/strip-backup/*-backup.hg (glob)
383 424 transaction abort!
384 425 rollback completed
385 426 strip failed, backup bundle stored in '$TESTTMP/test/.hg/strip-backup/*-backup.hg' (glob)
386 427 strip failed, unrecovered changes stored in '$TESTTMP/test/.hg/strip-backup/*-temp.hg' (glob)
387 428 (fix the problem, then recover the changesets with "hg unbundle '$TESTTMP/test/.hg/strip-backup/*-temp.hg'") (glob)
388 429 abort: pretxnchangegroup.bad hook exited with status 1
389 430 [255]
390 431 $ restore
391 432 $ hg log -G
392 433 o changeset: 4:443431ffac4f
393 434 | tag: tip
394 435 | user: test
395 436 | date: Thu Jan 01 00:00:00 1970 +0000
396 437 | summary: e
397 438 |
398 439 o changeset: 3:65bd5f99a4a3
399 440 | parent: 1:ef3a871183d7
400 441 | user: test
401 442 | date: Thu Jan 01 00:00:00 1970 +0000
402 443 | summary: d
403 444 |
404 445 | o changeset: 2:264128213d29
405 446 |/ user: test
406 447 | date: Thu Jan 01 00:00:00 1970 +0000
407 448 | summary: c
408 449 |
409 450 @ changeset: 1:ef3a871183d7
410 451 | user: test
411 452 | date: Thu Jan 01 00:00:00 1970 +0000
412 453 | summary: b
413 454 |
414 455 o changeset: 0:9ab35a2d17cb
415 456 user: test
416 457 date: Thu Jan 01 00:00:00 1970 +0000
417 458 summary: a
418 459
419 460
420 461 2 different branches: 2 strips
421 462
422 463 $ hg strip 2 4
423 464 saved backup bundle to $TESTTMP/test/.hg/strip-backup/*-backup.hg (glob)
424 465 $ hg log -G
425 466 o changeset: 2:65bd5f99a4a3
426 467 | tag: tip
427 468 | user: test
428 469 | date: Thu Jan 01 00:00:00 1970 +0000
429 470 | summary: d
430 471 |
431 472 @ changeset: 1:ef3a871183d7
432 473 | user: test
433 474 | date: Thu Jan 01 00:00:00 1970 +0000
434 475 | summary: b
435 476 |
436 477 o changeset: 0:9ab35a2d17cb
437 478 user: test
438 479 date: Thu Jan 01 00:00:00 1970 +0000
439 480 summary: a
440 481
441 482 $ restore
442 483
443 484 2 different branches and a common ancestor: 1 strip
444 485
445 486 $ hg strip 1 "2|4"
446 487 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
447 488 saved backup bundle to $TESTTMP/test/.hg/strip-backup/*-backup.hg (glob)
448 489 $ restore
449 490
450 491 verify fncache is kept up-to-date
451 492
452 493 $ touch a
453 494 $ hg ci -qAm a
454 495 #if repofncache
455 496 $ cat .hg/store/fncache | sort
456 497 data/a.i
457 498 data/bar.i
458 499 #endif
459 500
460 501 $ hg strip tip
461 502 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
462 503 saved backup bundle to $TESTTMP/test/.hg/strip-backup/*-backup.hg (glob)
463 504 #if repofncache
464 505 $ cat .hg/store/fncache
465 506 data/bar.i
466 507 #endif
467 508
468 509 stripping an empty revset
469 510
470 511 $ hg strip "1 and not 1"
471 512 abort: empty revision set
472 513 [255]
473 514
474 515 remove branchy history for qimport tests
475 516
476 517 $ hg strip 3
477 518 saved backup bundle to $TESTTMP/test/.hg/strip-backup/*-backup.hg (glob)
478 519
479 520
480 521 strip of applied mq should cleanup status file
481 522
482 523 $ echo "mq=" >> $HGRCPATH
483 524 $ hg up -C 3
484 525 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
485 526 $ echo fooagain >> bar
486 527 $ hg ci -mf
487 528 $ hg qimport -r tip:2
488 529
489 530 applied patches before strip
490 531
491 532 $ hg qapplied
492 533 d
493 534 e
494 535 f
495 536
496 537 stripping revision in queue
497 538
498 539 $ hg strip 3
499 540 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
500 541 saved backup bundle to $TESTTMP/test/.hg/strip-backup/*-backup.hg (glob)
501 542
502 543 applied patches after stripping rev in queue
503 544
504 545 $ hg qapplied
505 546 d
506 547
507 548 stripping ancestor of queue
508 549
509 550 $ hg strip 1
510 551 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
511 552 saved backup bundle to $TESTTMP/test/.hg/strip-backup/*-backup.hg (glob)
512 553
513 554 applied patches after stripping ancestor of queue
514 555
515 556 $ hg qapplied
516 557
517 558 Verify strip protects against stripping wc parent when there are uncommitted mods
518 559
519 560 $ echo b > b
520 561 $ echo bb > bar
521 562 $ hg add b
522 563 $ hg ci -m 'b'
523 564 $ hg log --graph
524 565 @ changeset: 1:76dcf9fab855
525 566 | tag: tip
526 567 | user: test
527 568 | date: Thu Jan 01 00:00:00 1970 +0000
528 569 | summary: b
529 570 |
530 571 o changeset: 0:9ab35a2d17cb
531 572 user: test
532 573 date: Thu Jan 01 00:00:00 1970 +0000
533 574 summary: a
534 575
535 576 $ hg up 0
536 577 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
537 578 $ echo c > bar
538 579 $ hg up -t false
539 580 merging bar
540 581 merging bar failed!
541 582 1 files updated, 0 files merged, 0 files removed, 1 files unresolved
542 583 use 'hg resolve' to retry unresolved file merges
543 584 [1]
544 585 $ hg sum
545 586 parent: 1:76dcf9fab855 tip
546 587 b
547 588 branch: default
548 589 commit: 1 modified, 1 unknown, 1 unresolved
549 590 update: (current)
550 591 phases: 2 draft
551 592 mq: 3 unapplied
552 593
553 594 $ echo c > b
554 595 $ hg strip tip
555 596 abort: uncommitted changes
556 597 [255]
557 598 $ hg strip tip --keep
558 599 saved backup bundle to $TESTTMP/test/.hg/strip-backup/*-backup.hg (glob)
559 600 $ hg log --graph
560 601 @ changeset: 0:9ab35a2d17cb
561 602 tag: tip
562 603 user: test
563 604 date: Thu Jan 01 00:00:00 1970 +0000
564 605 summary: a
565 606
566 607 $ hg status
567 608 M bar
568 609 ? b
569 610 ? bar.orig
570 611
571 612 $ rm bar.orig
572 613 $ hg sum
573 614 parent: 0:9ab35a2d17cb tip
574 615 a
575 616 branch: default
576 617 commit: 1 modified, 1 unknown
577 618 update: (current)
578 619 phases: 1 draft
579 620 mq: 3 unapplied
580 621
581 622 Strip adds, removes, modifies with --keep
582 623
583 624 $ touch b
584 625 $ hg add b
585 626 $ hg commit -mb
586 627 $ touch c
587 628
588 629 ... with a clean working dir
589 630
590 631 $ hg add c
591 632 $ hg rm bar
592 633 $ hg commit -mc
593 634 $ hg status
594 635 $ hg strip --keep tip
595 636 saved backup bundle to $TESTTMP/test/.hg/strip-backup/*-backup.hg (glob)
596 637 $ hg status
597 638 ! bar
598 639 ? c
599 640
600 641 ... with a dirty working dir
601 642
602 643 $ hg add c
603 644 $ hg rm bar
604 645 $ hg commit -mc
605 646 $ hg status
606 647 $ echo b > b
607 648 $ echo d > d
608 649 $ hg strip --keep tip
609 650 saved backup bundle to $TESTTMP/test/.hg/strip-backup/*-backup.hg (glob)
610 651 $ hg status
611 652 M b
612 653 ! bar
613 654 ? c
614 655 ? d
615 656
616 657 ... after updating the dirstate
617 658 $ hg add c
618 659 $ hg commit -mc
619 660 $ hg rm c
620 661 $ hg commit -mc
621 662 $ hg strip --keep '.^' -q
622 663 $ cd ..
623 664
624 665 stripping many nodes on a complex graph (issue3299)
625 666
626 667 $ hg init issue3299
627 668 $ cd issue3299
628 669 $ hg debugbuilddag '@a.:a@b.:b.:x<a@a.:a<b@b.:b<a@a.:a'
629 670 $ hg strip 'not ancestors(x)'
630 671 saved backup bundle to $TESTTMP/issue3299/.hg/strip-backup/*-backup.hg (glob)
631 672
632 673 test hg strip -B bookmark
633 674
634 675 $ cd ..
635 676 $ hg init bookmarks
636 677 $ cd bookmarks
637 678 $ hg debugbuilddag '..<2.*1/2:m<2+3:c<m+3:a<2.:b<m+2:d<2.:e<m+1:f'
638 679 $ hg bookmark -r 'a' 'todelete'
639 680 $ hg bookmark -r 'b' 'B'
640 681 $ hg bookmark -r 'b' 'nostrip'
641 682 $ hg bookmark -r 'c' 'delete'
642 683 $ hg bookmark -r 'd' 'multipledelete1'
643 684 $ hg bookmark -r 'e' 'multipledelete2'
644 685 $ hg bookmark -r 'f' 'singlenode1'
645 686 $ hg bookmark -r 'f' 'singlenode2'
646 687 $ hg up -C todelete
647 688 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
648 689 (activating bookmark todelete)
649 690 $ hg strip -B nostrip
650 691 bookmark 'nostrip' deleted
651 692 abort: empty revision set
652 693 [255]
653 694 $ hg strip -B todelete
654 695 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
655 696 saved backup bundle to $TESTTMP/bookmarks/.hg/strip-backup/*-backup.hg (glob)
656 697 bookmark 'todelete' deleted
657 698 $ hg id -ir dcbb326fdec2
658 699 abort: unknown revision 'dcbb326fdec2'!
659 700 [255]
660 701 $ hg id -ir d62d843c9a01
661 702 d62d843c9a01
662 703 $ hg bookmarks
663 704 B 9:ff43616e5d0f
664 705 delete 6:2702dd0c91e7
665 706 multipledelete1 11:e46a4836065c
666 707 multipledelete2 12:b4594d867745
667 708 singlenode1 13:43227190fef8
668 709 singlenode2 13:43227190fef8
669 710 $ hg strip -B multipledelete1 -B multipledelete2
670 711 saved backup bundle to $TESTTMP/bookmarks/.hg/strip-backup/e46a4836065c-89ec65c2-backup.hg
671 712 bookmark 'multipledelete1' deleted
672 713 bookmark 'multipledelete2' deleted
673 714 $ hg id -ir e46a4836065c
674 715 abort: unknown revision 'e46a4836065c'!
675 716 [255]
676 717 $ hg id -ir b4594d867745
677 718 abort: unknown revision 'b4594d867745'!
678 719 [255]
679 720 $ hg strip -B singlenode1 -B singlenode2
680 721 saved backup bundle to $TESTTMP/bookmarks/.hg/strip-backup/43227190fef8-8da858f2-backup.hg
681 722 bookmark 'singlenode1' deleted
682 723 bookmark 'singlenode2' deleted
683 724 $ hg id -ir 43227190fef8
684 725 abort: unknown revision '43227190fef8'!
685 726 [255]
686 727 $ hg strip -B unknownbookmark
687 728 abort: bookmark 'unknownbookmark' not found
688 729 [255]
689 730 $ hg strip -B unknownbookmark1 -B unknownbookmark2
690 731 abort: bookmark 'unknownbookmark1,unknownbookmark2' not found
691 732 [255]
692 733 $ hg strip -B delete -B unknownbookmark
693 734 abort: bookmark 'unknownbookmark' not found
694 735 [255]
695 736 $ hg strip -B delete
696 737 saved backup bundle to $TESTTMP/bookmarks/.hg/strip-backup/*-backup.hg (glob)
697 738 bookmark 'delete' deleted
698 739 $ hg id -ir 6:2702dd0c91e7
699 740 abort: unknown revision '2702dd0c91e7'!
700 741 [255]
701 742 $ hg update B
702 743 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
703 744 (activating bookmark B)
704 745 $ echo a > a
705 746 $ hg add a
706 747 $ hg strip -B B
707 748 abort: uncommitted changes
708 749 [255]
709 750 $ hg bookmarks
710 751 * B 6:ff43616e5d0f
711 752
712 753 Make sure no one adds back a -b option:
713 754
714 755 $ hg strip -b tip
715 756 hg strip: option -b not recognized
716 757 hg strip [-k] [-f] [-B bookmark] [-r] REV...
717 758
718 759 strip changesets and all their descendants from the repository
719 760
720 761 (use 'hg help -e strip' to show help for the strip extension)
721 762
722 763 options ([+] can be repeated):
723 764
724 765 -r --rev REV [+] strip specified revision (optional, can specify
725 766 revisions without this option)
726 767 -f --force force removal of changesets, discard uncommitted
727 768 changes (no backup)
728 769 --no-backup do not save backup bundle
729 770 -k --keep do not modify working directory during strip
730 771 -B --bookmark BOOKMARK [+] remove revs only reachable from given bookmark
731 772 --mq operate on patch repository
732 773
733 774 (use 'hg strip -h' to show more help)
734 775 [255]
735 776
736 777 $ cd ..
737 778
738 779 Verify bundles don't get overwritten:
739 780
740 781 $ hg init doublebundle
741 782 $ cd doublebundle
742 783 $ touch a
743 784 $ hg commit -Aqm a
744 785 $ touch b
745 786 $ hg commit -Aqm b
746 787 $ hg strip -r 0
747 788 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
748 789 saved backup bundle to $TESTTMP/doublebundle/.hg/strip-backup/3903775176ed-e68910bd-backup.hg
749 790 $ ls .hg/strip-backup
750 791 3903775176ed-e68910bd-backup.hg
751 792 #if repobundlerepo
752 793 $ hg pull -q -r 3903775176ed .hg/strip-backup/3903775176ed-e68910bd-backup.hg
753 794 $ hg strip -r 0
754 795 saved backup bundle to $TESTTMP/doublebundle/.hg/strip-backup/3903775176ed-54390173-backup.hg
755 796 $ ls .hg/strip-backup
756 797 3903775176ed-54390173-backup.hg
757 798 3903775176ed-e68910bd-backup.hg
758 799 #endif
759 800 $ cd ..
760 801
761 802 Test that we only bundle the stripped changesets (issue4736)
762 803 ------------------------------------------------------------
763 804
764 805 initialization (previous repo is empty anyway)
765 806
766 807 $ hg init issue4736
767 808 $ cd issue4736
768 809 $ echo a > a
769 810 $ hg add a
770 811 $ hg commit -m commitA
771 812 $ echo b > b
772 813 $ hg add b
773 814 $ hg commit -m commitB
774 815 $ echo c > c
775 816 $ hg add c
776 817 $ hg commit -m commitC
777 818 $ hg up 'desc(commitB)'
778 819 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
779 820 $ echo d > d
780 821 $ hg add d
781 822 $ hg commit -m commitD
782 823 created new head
783 824 $ hg up 'desc(commitC)'
784 825 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
785 826 $ hg merge 'desc(commitD)'
786 827 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
787 828 (branch merge, don't forget to commit)
788 829 $ hg ci -m 'mergeCD'
789 830 $ hg log -G
790 831 @ changeset: 4:d8db9d137221
791 832 |\ tag: tip
792 833 | | parent: 2:5c51d8d6557d
793 834 | | parent: 3:6625a5168474
794 835 | | user: test
795 836 | | date: Thu Jan 01 00:00:00 1970 +0000
796 837 | | summary: mergeCD
797 838 | |
798 839 | o changeset: 3:6625a5168474
799 840 | | parent: 1:eca11cf91c71
800 841 | | user: test
801 842 | | date: Thu Jan 01 00:00:00 1970 +0000
802 843 | | summary: commitD
803 844 | |
804 845 o | changeset: 2:5c51d8d6557d
805 846 |/ user: test
806 847 | date: Thu Jan 01 00:00:00 1970 +0000
807 848 | summary: commitC
808 849 |
809 850 o changeset: 1:eca11cf91c71
810 851 | user: test
811 852 | date: Thu Jan 01 00:00:00 1970 +0000
812 853 | summary: commitB
813 854 |
814 855 o changeset: 0:105141ef12d0
815 856 user: test
816 857 date: Thu Jan 01 00:00:00 1970 +0000
817 858 summary: commitA
818 859
819 860
820 861 Check bundle behavior:
821 862
822 863 $ hg bundle -r 'desc(mergeCD)' --base 'desc(commitC)' ../issue4736.hg
823 864 2 changesets found
824 865 #if repobundlerepo
825 866 $ hg log -r 'bundle()' -R ../issue4736.hg
826 867 changeset: 3:6625a5168474
827 868 parent: 1:eca11cf91c71
828 869 user: test
829 870 date: Thu Jan 01 00:00:00 1970 +0000
830 871 summary: commitD
831 872
832 873 changeset: 4:d8db9d137221
833 874 tag: tip
834 875 parent: 2:5c51d8d6557d
835 876 parent: 3:6625a5168474
836 877 user: test
837 878 date: Thu Jan 01 00:00:00 1970 +0000
838 879 summary: mergeCD
839 880
840 881 #endif
841 882
842 883 check strip behavior
843 884
844 885 $ hg --config extensions.strip= strip 'desc(commitD)' --debug
845 886 resolving manifests
846 887 branchmerge: False, force: True, partial: False
847 888 ancestor: d8db9d137221+, local: d8db9d137221+, remote: eca11cf91c71
848 889 c: other deleted -> r
849 890 removing c
850 891 d: other deleted -> r
851 892 removing d
852 893 starting 4 threads for background file closing (?)
853 894 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
854 895 2 changesets found
855 896 list of changesets:
856 897 6625a516847449b6f0fa3737b9ba56e9f0f3032c
857 898 d8db9d1372214336d2b5570f20ee468d2c72fa8b
858 899 bundle2-output-bundle: "HG20", (1 params) 3 parts total
859 900 bundle2-output-part: "changegroup" (params: 1 mandatory 1 advisory) streamed payload
860 901 bundle2-output-part: "cache:rev-branch-cache" (advisory) streamed payload
861 902 bundle2-output-part: "phase-heads" 24 bytes payload
862 903 saved backup bundle to $TESTTMP/issue4736/.hg/strip-backup/6625a5168474-345bb43d-backup.hg
863 904 updating the branch cache
864 905 invalid branch cache (served): tip differs
865 906 $ hg log -G
866 907 o changeset: 2:5c51d8d6557d
867 908 | tag: tip
868 909 | user: test
869 910 | date: Thu Jan 01 00:00:00 1970 +0000
870 911 | summary: commitC
871 912 |
872 913 @ changeset: 1:eca11cf91c71
873 914 | user: test
874 915 | date: Thu Jan 01 00:00:00 1970 +0000
875 916 | summary: commitB
876 917 |
877 918 o changeset: 0:105141ef12d0
878 919 user: test
879 920 date: Thu Jan 01 00:00:00 1970 +0000
880 921 summary: commitA
881 922
882 923
883 924 strip backup content
884 925
885 926 #if repobundlerepo
886 927 $ hg log -r 'bundle()' -R .hg/strip-backup/6625a5168474-*-backup.hg
887 928 changeset: 3:6625a5168474
888 929 parent: 1:eca11cf91c71
889 930 user: test
890 931 date: Thu Jan 01 00:00:00 1970 +0000
891 932 summary: commitD
892 933
893 934 changeset: 4:d8db9d137221
894 935 tag: tip
895 936 parent: 2:5c51d8d6557d
896 937 parent: 3:6625a5168474
897 938 user: test
898 939 date: Thu Jan 01 00:00:00 1970 +0000
899 940 summary: mergeCD
900 941
901 942
902 943 #endif
903 944
904 945 Check that the phase cache is properly invalidated after a strip with bookmark.
905 946
906 947 $ cat > ../stripstalephasecache.py << EOF
907 948 > from mercurial import extensions, localrepo
908 949 > def transactioncallback(orig, repo, desc, *args, **kwargs):
909 950 > def test(transaction):
910 951 > # observe cache inconsistency
911 952 > try:
912 953 > [repo.changelog.node(r) for r in repo.revs(b"not public()")]
913 954 > except IndexError:
914 955 > repo.ui.status(b"Index error!\n")
915 956 > transaction = orig(repo, desc, *args, **kwargs)
916 957 > # warm up the phase cache
917 958 > list(repo.revs(b"not public()"))
918 959 > if desc != b'strip':
919 960 > transaction.addpostclose(b"phase invalidation test", test)
920 961 > return transaction
921 962 > def extsetup(ui):
922 963 > extensions.wrapfunction(localrepo.localrepository, b"transaction",
923 964 > transactioncallback)
924 965 > EOF
925 966 $ hg up -C 2
926 967 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
927 968 $ echo k > k
928 969 $ hg add k
929 970 $ hg commit -m commitK
930 971 $ echo l > l
931 972 $ hg add l
932 973 $ hg commit -m commitL
933 974 $ hg book -r tip blah
934 975 $ hg strip ".^" --config extensions.crash=$TESTTMP/stripstalephasecache.py
935 976 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
936 977 saved backup bundle to $TESTTMP/issue4736/.hg/strip-backup/8f0b4384875c-4fa10deb-backup.hg
937 978 $ hg up -C 1
938 979 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
939 980
940 981 Error during post-close callback of the strip transaction
941 982 (They should be gracefully handled and reported)
942 983
943 984 $ cat > ../crashstrip.py << EOF
944 985 > from mercurial import error
945 986 > def reposetup(ui, repo):
946 987 > class crashstriprepo(repo.__class__):
947 988 > def transaction(self, desc, *args, **kwargs):
948 989 > tr = super(crashstriprepo, self).transaction(desc, *args, **kwargs)
949 990 > if desc == b'strip':
950 991 > def crash(tra): raise error.Abort(b'boom')
951 992 > tr.addpostclose(b'crash', crash)
952 993 > return tr
953 994 > repo.__class__ = crashstriprepo
954 995 > EOF
955 996 $ hg strip tip --config extensions.crash=$TESTTMP/crashstrip.py
956 997 saved backup bundle to $TESTTMP/issue4736/.hg/strip-backup/5c51d8d6557d-70daef06-backup.hg
957 998 strip failed, backup bundle stored in '$TESTTMP/issue4736/.hg/strip-backup/5c51d8d6557d-70daef06-backup.hg'
958 999 abort: boom
959 1000 [255]
960 1001
961 1002 test stripping a working directory parent doesn't switch named branches
962 1003
963 1004 $ hg log -G
964 1005 @ changeset: 1:eca11cf91c71
965 1006 | tag: tip
966 1007 | user: test
967 1008 | date: Thu Jan 01 00:00:00 1970 +0000
968 1009 | summary: commitB
969 1010 |
970 1011 o changeset: 0:105141ef12d0
971 1012 user: test
972 1013 date: Thu Jan 01 00:00:00 1970 +0000
973 1014 summary: commitA
974 1015
975 1016
976 1017 $ hg branch new-branch
977 1018 marked working directory as branch new-branch
978 1019 (branches are permanent and global, did you want a bookmark?)
979 1020 $ hg ci -m "start new branch"
980 1021 $ echo 'foo' > foo.txt
981 1022 $ hg ci -Aqm foo
982 1023 $ hg up default
983 1024 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
984 1025 $ echo 'bar' > bar.txt
985 1026 $ hg ci -Aqm bar
986 1027 $ hg up new-branch
987 1028 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
988 1029 $ hg merge default
989 1030 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
990 1031 (branch merge, don't forget to commit)
991 1032 $ hg log -G
992 1033 @ changeset: 4:35358f982181
993 1034 | tag: tip
994 1035 | parent: 1:eca11cf91c71
995 1036 | user: test
996 1037 | date: Thu Jan 01 00:00:00 1970 +0000
997 1038 | summary: bar
998 1039 |
999 1040 | @ changeset: 3:f62c6c09b707
1000 1041 | | branch: new-branch
1001 1042 | | user: test
1002 1043 | | date: Thu Jan 01 00:00:00 1970 +0000
1003 1044 | | summary: foo
1004 1045 | |
1005 1046 | o changeset: 2:b1d33a8cadd9
1006 1047 |/ branch: new-branch
1007 1048 | user: test
1008 1049 | date: Thu Jan 01 00:00:00 1970 +0000
1009 1050 | summary: start new branch
1010 1051 |
1011 1052 o changeset: 1:eca11cf91c71
1012 1053 | user: test
1013 1054 | date: Thu Jan 01 00:00:00 1970 +0000
1014 1055 | summary: commitB
1015 1056 |
1016 1057 o changeset: 0:105141ef12d0
1017 1058 user: test
1018 1059 date: Thu Jan 01 00:00:00 1970 +0000
1019 1060 summary: commitA
1020 1061
1021 1062
1022 1063 $ hg strip --force -r 35358f982181
1023 1064 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
1024 1065 saved backup bundle to $TESTTMP/issue4736/.hg/strip-backup/35358f982181-50d992d4-backup.hg
1025 1066 $ hg log -G
1026 1067 @ changeset: 3:f62c6c09b707
1027 1068 | branch: new-branch
1028 1069 | tag: tip
1029 1070 | user: test
1030 1071 | date: Thu Jan 01 00:00:00 1970 +0000
1031 1072 | summary: foo
1032 1073 |
1033 1074 o changeset: 2:b1d33a8cadd9
1034 1075 | branch: new-branch
1035 1076 | user: test
1036 1077 | date: Thu Jan 01 00:00:00 1970 +0000
1037 1078 | summary: start new branch
1038 1079 |
1039 1080 o changeset: 1:eca11cf91c71
1040 1081 | user: test
1041 1082 | date: Thu Jan 01 00:00:00 1970 +0000
1042 1083 | summary: commitB
1043 1084 |
1044 1085 o changeset: 0:105141ef12d0
1045 1086 user: test
1046 1087 date: Thu Jan 01 00:00:00 1970 +0000
1047 1088 summary: commitA
1048 1089
1049 1090
1050 1091 $ hg up default
1051 1092 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
1052 1093 $ echo 'bar' > bar.txt
1053 1094 $ hg ci -Aqm bar
1054 1095 $ hg up new-branch
1055 1096 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
1056 1097 $ hg merge default
1057 1098 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1058 1099 (branch merge, don't forget to commit)
1059 1100 $ hg ci -m merge
1060 1101 $ hg log -G
1061 1102 @ changeset: 5:4cf5e92caec2
1062 1103 |\ branch: new-branch
1063 1104 | | tag: tip
1064 1105 | | parent: 3:f62c6c09b707
1065 1106 | | parent: 4:35358f982181
1066 1107 | | user: test
1067 1108 | | date: Thu Jan 01 00:00:00 1970 +0000
1068 1109 | | summary: merge
1069 1110 | |
1070 1111 | o changeset: 4:35358f982181
1071 1112 | | parent: 1:eca11cf91c71
1072 1113 | | user: test
1073 1114 | | date: Thu Jan 01 00:00:00 1970 +0000
1074 1115 | | summary: bar
1075 1116 | |
1076 1117 o | changeset: 3:f62c6c09b707
1077 1118 | | branch: new-branch
1078 1119 | | user: test
1079 1120 | | date: Thu Jan 01 00:00:00 1970 +0000
1080 1121 | | summary: foo
1081 1122 | |
1082 1123 o | changeset: 2:b1d33a8cadd9
1083 1124 |/ branch: new-branch
1084 1125 | user: test
1085 1126 | date: Thu Jan 01 00:00:00 1970 +0000
1086 1127 | summary: start new branch
1087 1128 |
1088 1129 o changeset: 1:eca11cf91c71
1089 1130 | user: test
1090 1131 | date: Thu Jan 01 00:00:00 1970 +0000
1091 1132 | summary: commitB
1092 1133 |
1093 1134 o changeset: 0:105141ef12d0
1094 1135 user: test
1095 1136 date: Thu Jan 01 00:00:00 1970 +0000
1096 1137 summary: commitA
1097 1138
1098 1139
1099 1140 $ hg strip -r 35358f982181
1100 1141 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
1101 1142 saved backup bundle to $TESTTMP/issue4736/.hg/strip-backup/35358f982181-a6f020aa-backup.hg
1102 1143 $ hg log -G
1103 1144 @ changeset: 3:f62c6c09b707
1104 1145 | branch: new-branch
1105 1146 | tag: tip
1106 1147 | user: test
1107 1148 | date: Thu Jan 01 00:00:00 1970 +0000
1108 1149 | summary: foo
1109 1150 |
1110 1151 o changeset: 2:b1d33a8cadd9
1111 1152 | branch: new-branch
1112 1153 | user: test
1113 1154 | date: Thu Jan 01 00:00:00 1970 +0000
1114 1155 | summary: start new branch
1115 1156 |
1116 1157 o changeset: 1:eca11cf91c71
1117 1158 | user: test
1118 1159 | date: Thu Jan 01 00:00:00 1970 +0000
1119 1160 | summary: commitB
1120 1161 |
1121 1162 o changeset: 0:105141ef12d0
1122 1163 user: test
1123 1164 date: Thu Jan 01 00:00:00 1970 +0000
1124 1165 summary: commitA
1125 1166
1126 1167
1127 1168 $ hg unbundle -u $TESTTMP/issue4736/.hg/strip-backup/35358f982181-a6f020aa-backup.hg
1128 1169 adding changesets
1129 1170 adding manifests
1130 1171 adding file changes
1131 1172 added 2 changesets with 1 changes to 1 files
1132 1173 new changesets 35358f982181:4cf5e92caec2 (2 drafts)
1133 1174 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1134 1175
1135 1176 $ hg strip -k -r 35358f982181
1136 1177 saved backup bundle to $TESTTMP/issue4736/.hg/strip-backup/35358f982181-a6f020aa-backup.hg
1137 1178 $ hg log -G
1138 1179 @ changeset: 3:f62c6c09b707
1139 1180 | branch: new-branch
1140 1181 | tag: tip
1141 1182 | user: test
1142 1183 | date: Thu Jan 01 00:00:00 1970 +0000
1143 1184 | summary: foo
1144 1185 |
1145 1186 o changeset: 2:b1d33a8cadd9
1146 1187 | branch: new-branch
1147 1188 | user: test
1148 1189 | date: Thu Jan 01 00:00:00 1970 +0000
1149 1190 | summary: start new branch
1150 1191 |
1151 1192 o changeset: 1:eca11cf91c71
1152 1193 | user: test
1153 1194 | date: Thu Jan 01 00:00:00 1970 +0000
1154 1195 | summary: commitB
1155 1196 |
1156 1197 o changeset: 0:105141ef12d0
1157 1198 user: test
1158 1199 date: Thu Jan 01 00:00:00 1970 +0000
1159 1200 summary: commitA
1160 1201
1161 1202 $ hg diff
1162 1203 diff -r f62c6c09b707 bar.txt
1163 1204 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1164 1205 +++ b/bar.txt Thu Jan 01 00:00:00 1970 +0000
1165 1206 @@ -0,0 +1,1 @@
1166 1207 +bar
1167 1208
1168 1209 Use delayedstrip to strip inside a transaction
1169 1210
1170 1211 $ cd $TESTTMP
1171 1212 $ hg init delayedstrip
1172 1213 $ cd delayedstrip
1173 1214 $ hg debugdrawdag <<'EOS'
1174 1215 > D
1175 1216 > |
1176 1217 > C F H # Commit on top of "I",
1177 1218 > | |/| # Strip B+D+I+E+G+H+Z
1178 1219 > I B E G
1179 1220 > \|/
1180 1221 > A Z
1181 1222 > EOS
1182 1223 $ cp -R . ../scmutilcleanup
1183 1224
1184 1225 $ hg up -C I
1185 1226 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1186 1227 $ echo 3 >> I
1187 1228 $ cat > $TESTTMP/delayedstrip.py <<EOF
1188 1229 > from __future__ import absolute_import
1189 1230 > from mercurial import commands, registrar, repair
1190 1231 > cmdtable = {}
1191 1232 > command = registrar.command(cmdtable)
1192 1233 > @command(b'testdelayedstrip')
1193 1234 > def testdelayedstrip(ui, repo):
1194 1235 > def getnodes(expr):
1195 1236 > return [repo.changelog.node(r) for r in repo.revs(expr)]
1196 1237 > with repo.wlock():
1197 1238 > with repo.lock():
1198 1239 > with repo.transaction(b'delayedstrip'):
1199 1240 > repair.delayedstrip(ui, repo, getnodes(b'B+I+Z+D+E'), b'J')
1200 1241 > repair.delayedstrip(ui, repo, getnodes(b'G+H+Z'), b'I')
1201 1242 > commands.commit(ui, repo, message=b'J', date=b'0 0')
1202 1243 > EOF
1203 1244 $ hg testdelayedstrip --config extensions.t=$TESTTMP/delayedstrip.py
1204 1245 warning: orphaned descendants detected, not stripping 08ebfeb61bac, 112478962961, 7fb047a69f22
1205 1246 saved backup bundle to $TESTTMP/delayedstrip/.hg/strip-backup/f585351a92f8-17475721-I.hg
1206 1247
1207 1248 $ hg log -G -T '{rev}:{node|short} {desc}' -r 'sort(all(), topo)'
1208 1249 @ 6:2f2d51af6205 J
1209 1250 |
1210 1251 o 3:08ebfeb61bac I
1211 1252 |
1212 1253 | o 5:64a8289d2492 F
1213 1254 | |
1214 1255 | o 2:7fb047a69f22 E
1215 1256 |/
1216 1257 | o 4:26805aba1e60 C
1217 1258 | |
1218 1259 | o 1:112478962961 B
1219 1260 |/
1220 1261 o 0:426bada5c675 A
1221 1262
1222 1263 Test high-level scmutil.cleanupnodes API
1223 1264
1224 1265 $ cd $TESTTMP/scmutilcleanup
1225 1266 $ hg debugdrawdag <<'EOS'
1226 1267 > D2 F2 G2 # D2, F2, G2 are replacements for D, F, G
1227 1268 > | | |
1228 1269 > C H G
1229 1270 > EOS
1230 1271 $ for i in B C D F G I Z; do
1231 1272 > hg bookmark -i -r $i b-$i
1232 1273 > done
1233 1274 $ hg bookmark -i -r E 'b-F@divergent1'
1234 1275 $ hg bookmark -i -r H 'b-F@divergent2'
1235 1276 $ hg bookmark -i -r G 'b-F@divergent3'
1236 1277 $ cp -R . ../scmutilcleanup.obsstore
1237 1278
1238 1279 $ cat > $TESTTMP/scmutilcleanup.py <<EOF
1239 1280 > from mercurial import registrar, scmutil
1240 1281 > cmdtable = {}
1241 1282 > command = registrar.command(cmdtable)
1242 1283 > @command(b'testnodescleanup')
1243 1284 > def testnodescleanup(ui, repo):
1244 1285 > def nodes(expr):
1245 1286 > return [repo.changelog.node(r) for r in repo.revs(expr)]
1246 1287 > def node(expr):
1247 1288 > return nodes(expr)[0]
1248 1289 > with repo.wlock():
1249 1290 > with repo.lock():
1250 1291 > with repo.transaction(b'delayedstrip'):
1251 1292 > mapping = {node(b'F'): [node(b'F2')],
1252 1293 > node(b'D'): [node(b'D2')],
1253 1294 > node(b'G'): [node(b'G2')]}
1254 1295 > scmutil.cleanupnodes(repo, mapping, b'replace')
1255 1296 > scmutil.cleanupnodes(repo, nodes(b'((B::)+I+Z)-D2-obsolete()'),
1256 1297 > b'replace')
1257 1298 > EOF
1258 1299 $ hg testnodescleanup --config extensions.t=$TESTTMP/scmutilcleanup.py
1259 1300 warning: orphaned descendants detected, not stripping 112478962961, 1fc8102cda62, 26805aba1e60
1260 1301 saved backup bundle to $TESTTMP/scmutilcleanup/.hg/strip-backup/f585351a92f8-73fb7c03-replace.hg
1261 1302
1262 1303 $ hg log -G -T '{rev}:{node|short} {desc} {bookmarks}' -r 'sort(all(), topo)'
1263 1304 o 8:1473d4b996d1 G2 b-F@divergent3 b-G
1264 1305 |
1265 1306 | o 7:d11b3456a873 F2 b-F
1266 1307 | |
1267 1308 | o 5:5cb05ba470a7 H
1268 1309 |/|
1269 1310 | o 3:7fb047a69f22 E b-F@divergent1
1270 1311 | |
1271 1312 | | o 6:7c78f703e465 D2 b-D
1272 1313 | | |
1273 1314 | | o 4:26805aba1e60 C
1274 1315 | | |
1275 1316 | | o 2:112478962961 B
1276 1317 | |/
1277 1318 o | 1:1fc8102cda62 G
1278 1319 /
1279 1320 o 0:426bada5c675 A b-B b-C b-I
1280 1321
1281 1322 $ hg bookmark
1282 1323 b-B 0:426bada5c675
1283 1324 b-C 0:426bada5c675
1284 1325 b-D 6:7c78f703e465
1285 1326 b-F 7:d11b3456a873
1286 1327 b-F@divergent1 3:7fb047a69f22
1287 1328 b-F@divergent3 8:1473d4b996d1
1288 1329 b-G 8:1473d4b996d1
1289 1330 b-I 0:426bada5c675
1290 1331 b-Z -1:000000000000
1291 1332
1292 1333 Test the above using obsstore "by the way". Not directly related to strip, but
1293 1334 we have reusable code here
1294 1335
1295 1336 $ cd $TESTTMP/scmutilcleanup.obsstore
1296 1337 $ cat >> .hg/hgrc <<EOF
1297 1338 > [experimental]
1298 1339 > evolution=true
1299 1340 > evolution.track-operation=1
1300 1341 > EOF
1301 1342
1302 1343 $ hg testnodescleanup --config extensions.t=$TESTTMP/scmutilcleanup.py
1303 1344 4 new orphan changesets
1304 1345
1305 1346 $ rm .hg/localtags
1306 1347 $ hg log -G -T '{rev}:{node|short} {desc} {bookmarks}' -r 'sort(all(), topo)'
1307 1348 * 12:1473d4b996d1 G2 b-F@divergent3 b-G
1308 1349 |
1309 1350 | * 11:d11b3456a873 F2 b-F
1310 1351 | |
1311 1352 | * 8:5cb05ba470a7 H
1312 1353 |/|
1313 1354 | o 4:7fb047a69f22 E b-F@divergent1
1314 1355 | |
1315 1356 | | * 10:7c78f703e465 D2 b-D
1316 1357 | | |
1317 1358 | | x 6:26805aba1e60 C
1318 1359 | | |
1319 1360 | | x 3:112478962961 B
1320 1361 | |/
1321 1362 x | 1:1fc8102cda62 G
1322 1363 /
1323 1364 o 0:426bada5c675 A b-B b-C b-I
1324 1365
1325 1366 $ hg debugobsolete
1326 1367 1fc8102cda6204549f031015641606ccf5513ec3 1473d4b996d1d1b121de6b39fab6a04fbf9d873e 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '13', 'operation': 'replace', 'user': 'test'}
1327 1368 64a8289d249234b9886244d379f15e6b650b28e3 d11b3456a873daec7c7bc53e5622e8df6d741bd2 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '13', 'operation': 'replace', 'user': 'test'}
1328 1369 f585351a92f85104bff7c284233c338b10eb1df7 7c78f703e465d73102cc8780667ce269c5208a40 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '9', 'operation': 'replace', 'user': 'test'}
1329 1370 48b9aae0607f43ff110d84e6883c151942add5ab 0 {0000000000000000000000000000000000000000} (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '0', 'operation': 'replace', 'user': 'test'}
1330 1371 112478962961147124edd43549aedd1a335e44bf 0 {426bada5c67598ca65036d57d9e4b64b0c1ce7a0} (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '0', 'operation': 'replace', 'user': 'test'}
1331 1372 08ebfeb61bac6e3f12079de774d285a0d6689eba 0 {426bada5c67598ca65036d57d9e4b64b0c1ce7a0} (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '0', 'operation': 'replace', 'user': 'test'}
1332 1373 26805aba1e600a82e93661149f2313866a221a7b 0 {112478962961147124edd43549aedd1a335e44bf} (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '0', 'operation': 'replace', 'user': 'test'}
1333 1374 $ cd ..
1334 1375
1335 1376 Test that obsmarkers are restored even when not using generaldelta
1336 1377
1337 1378 $ hg --config format.usegeneraldelta=no init issue5678
1338 1379 $ cd issue5678
1339 1380 $ cat >> .hg/hgrc <<EOF
1340 1381 > [experimental]
1341 1382 > evolution=true
1342 1383 > EOF
1343 1384 $ echo a > a
1344 1385 $ hg ci -Aqm a
1345 1386 $ hg ci --amend -m a2
1346 1387 $ hg debugobsolete
1347 1388 cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b 489bac576828490c0bb8d45eac9e5e172e4ec0a8 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '1', 'operation': 'amend', 'user': 'test'}
1348 1389 $ hg strip .
1349 1390 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
1350 1391 saved backup bundle to $TESTTMP/issue5678/.hg/strip-backup/489bac576828-bef27e14-backup.hg
1351 1392 $ hg unbundle -q .hg/strip-backup/*
1352 1393 $ hg debugobsolete
1353 1394 cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b 489bac576828490c0bb8d45eac9e5e172e4ec0a8 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '1', 'operation': 'amend', 'user': 'test'}
1354 1395 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now