##// END OF EJS Templates
branching: merge stable into default...
marmoute -
r51069:596a6b9b merge default
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

@@ -0,0 +1,33
1 #!/bin/bash
2
3 set -e
4 set -u
5
6 # Find the python3 setup that would run pytype
7 PYTYPE=`which pytype`
8 PYTHON3=`head -n1 ${PYTYPE} | sed -s 's/#!//'`
9
10 # Existing stubs that pytype processes live here
11 TYPESHED=$(${PYTHON3} -c "import pytype; print(pytype.__path__[0])")/typeshed/stubs
12 HG_STUBS=${TYPESHED}/mercurial
13
14 echo "Patching typeshed at $HG_STUBS"
15
16 rm -rf ${HG_STUBS}
17 mkdir -p ${HG_STUBS}
18
19 cat > ${HG_STUBS}/METADATA.toml <<EOF
20 version = "0.1"
21 EOF
22
23
24 mkdir -p ${HG_STUBS}/mercurial/cext ${HG_STUBS}/mercurial/thirdparty/attr
25
26 touch ${HG_STUBS}/mercurial/__init__.pyi
27 touch ${HG_STUBS}/mercurial/cext/__init__.pyi
28 touch ${HG_STUBS}/mercurial/thirdparty/__init__.pyi
29
30 ln -sf $(hg root)/mercurial/cext/*.{pyi,typed} \
31 ${HG_STUBS}/mercurial/cext
32 ln -sf $(hg root)/mercurial/thirdparty/attr/*.{pyi,typed} \
33 ${HG_STUBS}/mercurial/thirdparty/attr
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
@@ -1,13 +1,18
1 /assign_reviewer @mercurial.review
1 /assign_reviewer @mercurial.review
2
2
3
4 <!--
5
3 Welcome to the Mercurial Merge Request creation process:
6 Welcome to the Mercurial Merge Request creation process:
4
7
5 * Set a simple title for your MR,
8 * Set a simple title for your MR,
6 * All important information should be contained in your changesets' content or description,
9 * All important information should be contained in your changesets' content or description,
7 * You can add some workflow-relevant information here (eg: when this depends on another MR),
10 * You can add some workflow-relevant information here (eg: when this depends on another MR),
8 * If your changes are not ready for review yet, click `Start the title with Draft:` under the title.
11 * If your changes are not ready for review yet, click `Start the title with Draft:` under the title.
9
12
10 More details here:
13 More details here:
11
14
12 * https://www.mercurial-scm.org/wiki/ContributingChanges
15 * https://www.mercurial-scm.org/wiki/ContributingChanges
13 * https://www.mercurial-scm.org/wiki/Heptapod
16 * https://www.mercurial-scm.org/wiki/Heptapod
17
18 -->
@@ -1,300 +1,305
1 # If you want to change PREFIX, do not just edit it below. The changed
1 # If you want to change PREFIX, do not just edit it below. The changed
2 # value wont get passed on to recursive make calls. You should instead
2 # value wont get passed on to recursive make calls. You should instead
3 # override the variable on the command like:
3 # override the variable on the command like:
4 #
4 #
5 # % make PREFIX=/opt/ install
5 # % make PREFIX=/opt/ install
6
6
7 export PREFIX=/usr/local
7 export PREFIX=/usr/local
8
8
9 # Default to Python 3.
9 # Default to Python 3.
10 #
10 #
11 # Windows ships Python 3 as `python.exe`, which may not be on PATH. py.exe is.
11 # Windows ships Python 3 as `python.exe`, which may not be on PATH. py.exe is.
12 ifeq ($(OS),Windows_NT)
12 ifeq ($(OS),Windows_NT)
13 PYTHON?=py -3
13 PYTHON?=py -3
14 else
14 else
15 PYTHON?=python3
15 PYTHON?=python3
16 endif
16 endif
17
17
18 PYOXIDIZER?=pyoxidizer
18 PYOXIDIZER?=pyoxidizer
19
19
20 $(eval HGROOT := $(shell pwd))
20 $(eval HGROOT := $(shell pwd))
21 HGPYTHONS ?= $(HGROOT)/build/pythons
21 HGPYTHONS ?= $(HGROOT)/build/pythons
22 PURE=
22 PURE=
23 PYFILESCMD=find mercurial hgext doc -name '*.py'
23 PYFILESCMD=find mercurial hgext doc -name '*.py'
24 PYFILES:=$(shell $(PYFILESCMD))
24 PYFILES:=$(shell $(PYFILESCMD))
25 DOCFILES=mercurial/helptext/*.txt
25 DOCFILES=mercurial/helptext/*.txt
26 export LANGUAGE=C
26 export LANGUAGE=C
27 export LC_ALL=C
27 export LC_ALL=C
28 TESTFLAGS ?= $(shell echo $$HGTESTFLAGS)
28 TESTFLAGS ?= $(shell echo $$HGTESTFLAGS)
29 OSXVERSIONFLAGS ?= $(shell echo $$OSXVERSIONFLAGS)
29 OSXVERSIONFLAGS ?= $(shell echo $$OSXVERSIONFLAGS)
30 CARGO = cargo
30 CARGO = cargo
31
31
32 # Set this to e.g. "mingw32" to use a non-default compiler.
32 # Set this to e.g. "mingw32" to use a non-default compiler.
33 COMPILER=
33 COMPILER=
34
34
35 COMPILERFLAG_tmp_ =
35 COMPILERFLAG_tmp_ =
36 COMPILERFLAG_tmp_${COMPILER} ?= -c $(COMPILER)
36 COMPILERFLAG_tmp_${COMPILER} ?= -c $(COMPILER)
37 COMPILERFLAG=${COMPILERFLAG_tmp_${COMPILER}}
37 COMPILERFLAG=${COMPILERFLAG_tmp_${COMPILER}}
38
38
39 help:
39 help:
40 @echo 'Commonly used make targets:'
40 @echo 'Commonly used make targets:'
41 @echo ' all - build program and documentation'
41 @echo ' all - build program and documentation'
42 @echo ' install - install program and man pages to $$PREFIX ($(PREFIX))'
42 @echo ' install - install program and man pages to $$PREFIX ($(PREFIX))'
43 @echo ' install-home - install with setup.py install --home=$$HOME ($(HOME))'
43 @echo ' install-home - install with setup.py install --home=$$HOME ($(HOME))'
44 @echo ' local - build for inplace usage'
44 @echo ' local - build for inplace usage'
45 @echo ' tests - run all tests in the automatic test suite'
45 @echo ' tests - run all tests in the automatic test suite'
46 @echo ' test-foo - run only specified tests (e.g. test-merge1.t)'
46 @echo ' test-foo - run only specified tests (e.g. test-merge1.t)'
47 @echo ' dist - run all tests and create a source tarball in dist/'
47 @echo ' dist - run all tests and create a source tarball in dist/'
48 @echo ' clean - remove files created by other targets'
48 @echo ' clean - remove files created by other targets'
49 @echo ' (except installed files or dist source tarball)'
49 @echo ' (except installed files or dist source tarball)'
50 @echo ' update-pot - update i18n/hg.pot'
50 @echo ' update-pot - update i18n/hg.pot'
51 @echo
51 @echo
52 @echo 'Example for a system-wide installation under /usr/local:'
52 @echo 'Example for a system-wide installation under /usr/local:'
53 @echo ' make all && su -c "make install" && hg version'
53 @echo ' make all && su -c "make install" && hg version'
54 @echo
54 @echo
55 @echo 'Example for a local installation (usable in this directory):'
55 @echo 'Example for a local installation (usable in this directory):'
56 @echo ' make local && ./hg version'
56 @echo ' make local && ./hg version'
57
57
58 all: build doc
58 all: build doc
59
59
60 local:
60 local:
61 MERCURIAL_SETUP_MAKE_LOCAL=1 $(PYTHON) setup.py $(PURE) \
61 MERCURIAL_SETUP_MAKE_LOCAL=1 $(PYTHON) setup.py $(PURE) \
62 build_py -c -d . \
62 build_py -c -d . \
63 build_ext $(COMPILERFLAG) -i \
63 build_ext $(COMPILERFLAG) -i \
64 build_hgexe $(COMPILERFLAG) -i \
64 build_hgexe $(COMPILERFLAG) -i \
65 build_mo
65 build_mo
66 env HGRCPATH= $(PYTHON) hg version
66 env HGRCPATH= $(PYTHON) hg version
67
67
68 build:
68 build:
69 $(PYTHON) setup.py $(PURE) build $(COMPILERFLAG)
69 $(PYTHON) setup.py $(PURE) build $(COMPILERFLAG)
70
70
71 build-chg:
71 build-chg:
72 make -C contrib/chg
72 make -C contrib/chg
73
73
74 build-rhg:
74 build-rhg:
75 (cd rust/rhg; cargo build --release)
75 (cd rust/rhg; cargo build --release)
76
76
77 wheel:
77 wheel:
78 FORCE_SETUPTOOLS=1 $(PYTHON) setup.py $(PURE) bdist_wheel $(COMPILERFLAG)
78 FORCE_SETUPTOOLS=1 $(PYTHON) setup.py $(PURE) bdist_wheel $(COMPILERFLAG)
79
79
80 doc:
80 doc:
81 $(MAKE) -C doc
81 $(MAKE) -C doc
82
82
83 cleanbutpackages:
83 cleanbutpackages:
84 rm -f hg.exe
84 rm -f hg.exe
85 -$(PYTHON) setup.py clean --all # ignore errors from this command
85 -$(PYTHON) setup.py clean --all # ignore errors from this command
86 find contrib doc hgext hgext3rd i18n mercurial tests hgdemandimport \
86 find contrib doc hgext hgext3rd i18n mercurial tests hgdemandimport \
87 \( -name '*.py[cdo]' -o -name '*.so' \) -exec rm -f '{}' ';'
87 \( -name '*.py[cdo]' -o -name '*.so' \) -exec rm -f '{}' ';'
88 rm -f MANIFEST MANIFEST.in hgext/__index__.py tests/*.err
88 rm -f MANIFEST MANIFEST.in hgext/__index__.py tests/*.err
89 rm -f mercurial/__modulepolicy__.py
89 rm -f mercurial/__modulepolicy__.py
90 if test -d .hg; then rm -f mercurial/__version__.py; fi
90 if test -d .hg; then rm -f mercurial/__version__.py; fi
91 rm -rf build mercurial/locale
91 rm -rf build mercurial/locale
92 $(MAKE) -C doc clean
92 $(MAKE) -C doc clean
93 $(MAKE) -C contrib/chg distclean
93 $(MAKE) -C contrib/chg distclean
94 rm -rf rust/target
94 rm -rf rust/target
95 rm -f mercurial/rustext.so
95 rm -f mercurial/rustext.so
96
96
97 clean: cleanbutpackages
97 clean: cleanbutpackages
98 rm -rf packages
98 rm -rf packages
99
99
100 install: install-bin install-doc
100 install: install-bin install-doc
101
101
102 install-bin: build
102 install-bin: build
103 $(PYTHON) setup.py $(PURE) install --root="$(DESTDIR)/" --prefix="$(PREFIX)" --force
103 $(PYTHON) setup.py $(PURE) install --root="$(DESTDIR)/" --prefix="$(PREFIX)" --force
104
104
105 install-chg: build-chg
105 install-chg: build-chg
106 make -C contrib/chg install PREFIX="$(PREFIX)"
106 make -C contrib/chg install PREFIX="$(PREFIX)"
107
107
108 install-doc: doc
108 install-doc: doc
109 cd doc && $(MAKE) $(MFLAGS) install
109 cd doc && $(MAKE) $(MFLAGS) install
110
110
111 install-home: install-home-bin install-home-doc
111 install-home: install-home-bin install-home-doc
112
112
113 install-home-bin: build
113 install-home-bin: build
114 $(PYTHON) setup.py $(PURE) install --home="$(HOME)" --prefix="" --force
114 $(PYTHON) setup.py $(PURE) install --home="$(HOME)" --prefix="" --force
115
115
116 install-home-doc: doc
116 install-home-doc: doc
117 cd doc && $(MAKE) $(MFLAGS) PREFIX="$(HOME)" install
117 cd doc && $(MAKE) $(MFLAGS) PREFIX="$(HOME)" install
118
118
119 install-rhg: build-rhg
119 install-rhg: build-rhg
120 install -m 755 rust/target/release/rhg "$(PREFIX)"/bin/
120 install -m 755 rust/target/release/rhg "$(PREFIX)"/bin/
121
121
122 MANIFEST-doc:
122 MANIFEST-doc:
123 $(MAKE) -C doc MANIFEST
123 $(MAKE) -C doc MANIFEST
124
124
125 MANIFEST.in: MANIFEST-doc
125 MANIFEST.in: MANIFEST-doc
126 hg manifest | sed -e 's/^/include /' > MANIFEST.in
126 hg manifest | sed -e 's/^/include /' > MANIFEST.in
127 echo include mercurial/__version__.py >> MANIFEST.in
127 echo include mercurial/__version__.py >> MANIFEST.in
128 sed -e 's/^/include /' < doc/MANIFEST >> MANIFEST.in
128 sed -e 's/^/include /' < doc/MANIFEST >> MANIFEST.in
129
129
130 dist: tests dist-notests
130 dist: tests dist-notests
131
131
132 dist-notests: doc MANIFEST.in
132 dist-notests: doc MANIFEST.in
133 TAR_OPTIONS="--owner=root --group=root --mode=u+w,go-w,a+rX-s" $(PYTHON) setup.py -q sdist
133 TAR_OPTIONS="--owner=root --group=root --mode=u+w,go-w,a+rX-s" $(PYTHON) setup.py -q sdist
134
134
135 check: tests
135 check: tests
136
136
137 tests:
137 tests:
138 # Run Rust tests if cargo is installed
138 # Run Rust tests if cargo is installed
139 if command -v $(CARGO) >/dev/null 2>&1; then \
139 if command -v $(CARGO) >/dev/null 2>&1; then \
140 $(MAKE) rust-tests; \
140 $(MAKE) rust-tests; \
141 $(MAKE) cargo-clippy; \
141 fi
142 fi
142 cd tests && $(PYTHON) run-tests.py $(TESTFLAGS)
143 cd tests && $(PYTHON) run-tests.py $(TESTFLAGS)
143
144
144 test-%:
145 test-%:
145 cd tests && $(PYTHON) run-tests.py $(TESTFLAGS) $@
146 cd tests && $(PYTHON) run-tests.py $(TESTFLAGS) $@
146
147
147 testpy-%:
148 testpy-%:
148 @echo Looking for Python $* in $(HGPYTHONS)
149 @echo Looking for Python $* in $(HGPYTHONS)
149 [ -e $(HGPYTHONS)/$*/bin/python ] || ( \
150 [ -e $(HGPYTHONS)/$*/bin/python ] || ( \
150 cd $$(mktemp --directory --tmpdir) && \
151 cd $$(mktemp --directory --tmpdir) && \
151 $(MAKE) -f $(HGROOT)/contrib/Makefile.python PYTHONVER=$* PREFIX=$(HGPYTHONS)/$* python )
152 $(MAKE) -f $(HGROOT)/contrib/Makefile.python PYTHONVER=$* PREFIX=$(HGPYTHONS)/$* python )
152 cd tests && $(HGPYTHONS)/$*/bin/python run-tests.py $(TESTFLAGS)
153 cd tests && $(HGPYTHONS)/$*/bin/python run-tests.py $(TESTFLAGS)
153
154
154 rust-tests:
155 rust-tests:
155 cd $(HGROOT)/rust/hg-cpython \
156 cd $(HGROOT)/rust \
156 && $(CARGO) test --quiet --all --features "$(HG_RUST_FEATURES)"
157 && $(CARGO) test --quiet --all --features "$(HG_RUST_FEATURES)"
157
158
159 cargo-clippy:
160 cd $(HGROOT)/rust \
161 && $(CARGO) clippy --all --features "$(HG_RUST_FEATURES)" -- -D warnings
162
158 check-code:
163 check-code:
159 hg manifest | xargs python contrib/check-code.py
164 hg manifest | xargs python contrib/check-code.py
160
165
161 format-c:
166 format-c:
162 clang-format --style file -i \
167 clang-format --style file -i \
163 `hg files 'set:(**.c or **.cc or **.h) and not "listfile:contrib/clang-format-ignorelist"'`
168 `hg files 'set:(**.c or **.cc or **.h) and not "listfile:contrib/clang-format-ignorelist"'`
164
169
165 update-pot: i18n/hg.pot
170 update-pot: i18n/hg.pot
166
171
167 i18n/hg.pot: $(PYFILES) $(DOCFILES) i18n/posplit i18n/hggettext
172 i18n/hg.pot: $(PYFILES) $(DOCFILES) i18n/posplit i18n/hggettext
168 $(PYTHON) i18n/hggettext mercurial/commands.py \
173 $(PYTHON) i18n/hggettext mercurial/commands.py \
169 hgext/*.py hgext/*/__init__.py \
174 hgext/*.py hgext/*/__init__.py \
170 mercurial/fileset.py mercurial/revset.py \
175 mercurial/fileset.py mercurial/revset.py \
171 mercurial/templatefilters.py \
176 mercurial/templatefilters.py \
172 mercurial/templatefuncs.py \
177 mercurial/templatefuncs.py \
173 mercurial/templatekw.py \
178 mercurial/templatekw.py \
174 mercurial/filemerge.py \
179 mercurial/filemerge.py \
175 mercurial/hgweb/webcommands.py \
180 mercurial/hgweb/webcommands.py \
176 mercurial/util.py \
181 mercurial/util.py \
177 $(DOCFILES) > i18n/hg.pot.tmp
182 $(DOCFILES) > i18n/hg.pot.tmp
178 # All strings marked for translation in Mercurial contain
183 # All strings marked for translation in Mercurial contain
179 # ASCII characters only. But some files contain string
184 # ASCII characters only. But some files contain string
180 # literals like this '\037\213'. xgettext thinks it has to
185 # literals like this '\037\213'. xgettext thinks it has to
181 # parse them even though they are not marked for translation.
186 # parse them even though they are not marked for translation.
182 # Extracting with an explicit encoding of ISO-8859-1 will make
187 # Extracting with an explicit encoding of ISO-8859-1 will make
183 # xgettext "parse" and ignore them.
188 # xgettext "parse" and ignore them.
184 $(PYFILESCMD) | xargs \
189 $(PYFILESCMD) | xargs \
185 xgettext --package-name "Mercurial" \
190 xgettext --package-name "Mercurial" \
186 --msgid-bugs-address "<mercurial-devel@mercurial-scm.org>" \
191 --msgid-bugs-address "<mercurial-devel@mercurial-scm.org>" \
187 --copyright-holder "Olivia Mackall <olivia@selenic.com> and others" \
192 --copyright-holder "Olivia Mackall <olivia@selenic.com> and others" \
188 --from-code ISO-8859-1 --join --sort-by-file --add-comments=i18n: \
193 --from-code ISO-8859-1 --join --sort-by-file --add-comments=i18n: \
189 -d hg -p i18n -o hg.pot.tmp
194 -d hg -p i18n -o hg.pot.tmp
190 $(PYTHON) i18n/posplit i18n/hg.pot.tmp
195 $(PYTHON) i18n/posplit i18n/hg.pot.tmp
191 # The target file is not created before the last step. So it never is in
196 # The target file is not created before the last step. So it never is in
192 # an intermediate state.
197 # an intermediate state.
193 mv -f i18n/hg.pot.tmp i18n/hg.pot
198 mv -f i18n/hg.pot.tmp i18n/hg.pot
194
199
195 %.po: i18n/hg.pot
200 %.po: i18n/hg.pot
196 # work on a temporary copy for never having a half completed target
201 # work on a temporary copy for never having a half completed target
197 cp $@ $@.tmp
202 cp $@ $@.tmp
198 msgmerge --no-location --update $@.tmp $^
203 msgmerge --no-location --update $@.tmp $^
199 mv -f $@.tmp $@
204 mv -f $@.tmp $@
200
205
201 # Packaging targets
206 # Packaging targets
202
207
203 packaging_targets := \
208 packaging_targets := \
204 rhel7 \
209 rhel7 \
205 rhel8 \
210 rhel8 \
206 rhel9 \
211 rhel9 \
207 deb \
212 deb \
208 docker-rhel7 \
213 docker-rhel7 \
209 docker-rhel8 \
214 docker-rhel8 \
210 docker-rhel9 \
215 docker-rhel9 \
211 docker-debian-bullseye \
216 docker-debian-bullseye \
212 docker-debian-buster \
217 docker-debian-buster \
213 docker-debian-stretch \
218 docker-debian-stretch \
214 docker-fedora \
219 docker-fedora \
215 docker-ubuntu-xenial \
220 docker-ubuntu-xenial \
216 docker-ubuntu-xenial-ppa \
221 docker-ubuntu-xenial-ppa \
217 docker-ubuntu-bionic \
222 docker-ubuntu-bionic \
218 docker-ubuntu-bionic-ppa \
223 docker-ubuntu-bionic-ppa \
219 docker-ubuntu-focal \
224 docker-ubuntu-focal \
220 docker-ubuntu-focal-ppa \
225 docker-ubuntu-focal-ppa \
221 fedora \
226 fedora \
222 linux-wheels \
227 linux-wheels \
223 linux-wheels-x86_64 \
228 linux-wheels-x86_64 \
224 linux-wheels-i686 \
229 linux-wheels-i686 \
225 ppa
230 ppa
226
231
227 # Forward packaging targets for convenience.
232 # Forward packaging targets for convenience.
228 $(packaging_targets):
233 $(packaging_targets):
229 $(MAKE) -C contrib/packaging $@
234 $(MAKE) -C contrib/packaging $@
230
235
231 osx:
236 osx:
232 rm -rf build/mercurial
237 rm -rf build/mercurial
233 /usr/bin/python2.7 setup.py install --optimize=1 \
238 /usr/bin/python2.7 setup.py install --optimize=1 \
234 --root=build/mercurial/ --prefix=/usr/local/ \
239 --root=build/mercurial/ --prefix=/usr/local/ \
235 --install-lib=/Library/Python/2.7/site-packages/
240 --install-lib=/Library/Python/2.7/site-packages/
236 make -C doc all install DESTDIR="$(PWD)/build/mercurial/"
241 make -C doc all install DESTDIR="$(PWD)/build/mercurial/"
237 # Place a bogon .DS_Store file in the target dir so we can be
242 # Place a bogon .DS_Store file in the target dir so we can be
238 # sure it doesn't get included in the final package.
243 # sure it doesn't get included in the final package.
239 touch build/mercurial/.DS_Store
244 touch build/mercurial/.DS_Store
240 make -C contrib/chg \
245 make -C contrib/chg \
241 HGPATH=/usr/local/bin/hg \
246 HGPATH=/usr/local/bin/hg \
242 PYTHON=/usr/bin/python2.7 \
247 PYTHON=/usr/bin/python2.7 \
243 DESTDIR=../../build/mercurial \
248 DESTDIR=../../build/mercurial \
244 PREFIX=/usr/local \
249 PREFIX=/usr/local \
245 clean install
250 clean install
246 mkdir -p $${OUTPUTDIR:-dist}
251 mkdir -p $${OUTPUTDIR:-dist}
247 HGVER=$$(python contrib/genosxversion.py $(OSXVERSIONFLAGS) build/mercurial/Library/Python/2.7/site-packages/mercurial/__version__.py) && \
252 HGVER=$$(python contrib/genosxversion.py $(OSXVERSIONFLAGS) build/mercurial/Library/Python/2.7/site-packages/mercurial/__version__.py) && \
248 OSXVER=$$(sw_vers -productVersion | cut -d. -f1,2) && \
253 OSXVER=$$(sw_vers -productVersion | cut -d. -f1,2) && \
249 pkgbuild --filter \\.DS_Store --root build/mercurial/ \
254 pkgbuild --filter \\.DS_Store --root build/mercurial/ \
250 --identifier org.mercurial-scm.mercurial \
255 --identifier org.mercurial-scm.mercurial \
251 --version "$${HGVER}" \
256 --version "$${HGVER}" \
252 build/mercurial.pkg && \
257 build/mercurial.pkg && \
253 productbuild --distribution contrib/packaging/macosx/distribution.xml \
258 productbuild --distribution contrib/packaging/macosx/distribution.xml \
254 --package-path build/ \
259 --package-path build/ \
255 --version "$${HGVER}" \
260 --version "$${HGVER}" \
256 --resources contrib/packaging/macosx/ \
261 --resources contrib/packaging/macosx/ \
257 "$${OUTPUTDIR:-dist/}"/Mercurial-"$${HGVER}"-macosx"$${OSXVER}".pkg
262 "$${OUTPUTDIR:-dist/}"/Mercurial-"$${HGVER}"-macosx"$${OSXVER}".pkg
258
263
259 pyoxidizer:
264 pyoxidizer:
260 $(PYOXIDIZER) build --path ./rust/hgcli --release
265 $(PYOXIDIZER) build --path ./rust/hgcli --release
261
266
262
267
263 # a temporary target to setup all we need for run-tests.py --pyoxidizer
268 # a temporary target to setup all we need for run-tests.py --pyoxidizer
264 # (should go away as the run-tests implementation improves
269 # (should go away as the run-tests implementation improves
265 pyoxidizer-windows-tests: PYOX_DIR=build/pyoxidizer/x86_64-pc-windows-msvc/release/app
270 pyoxidizer-windows-tests: PYOX_DIR=build/pyoxidizer/x86_64-pc-windows-msvc/release/app
266 pyoxidizer-windows-tests: pyoxidizer
271 pyoxidizer-windows-tests: pyoxidizer
267 rm -rf $(PYOX_DIR)/templates
272 rm -rf $(PYOX_DIR)/templates
268 cp -ar $(PYOX_DIR)/lib/mercurial/templates $(PYOX_DIR)/templates
273 cp -ar $(PYOX_DIR)/lib/mercurial/templates $(PYOX_DIR)/templates
269 rm -rf $(PYOX_DIR)/helptext
274 rm -rf $(PYOX_DIR)/helptext
270 cp -ar $(PYOX_DIR)/lib/mercurial/helptext $(PYOX_DIR)/helptext
275 cp -ar $(PYOX_DIR)/lib/mercurial/helptext $(PYOX_DIR)/helptext
271 rm -rf $(PYOX_DIR)/defaultrc
276 rm -rf $(PYOX_DIR)/defaultrc
272 cp -ar $(PYOX_DIR)/lib/mercurial/defaultrc $(PYOX_DIR)/defaultrc
277 cp -ar $(PYOX_DIR)/lib/mercurial/defaultrc $(PYOX_DIR)/defaultrc
273 rm -rf $(PYOX_DIR)/contrib
278 rm -rf $(PYOX_DIR)/contrib
274 cp -ar contrib $(PYOX_DIR)/contrib
279 cp -ar contrib $(PYOX_DIR)/contrib
275 rm -rf $(PYOX_DIR)/doc
280 rm -rf $(PYOX_DIR)/doc
276 cp -ar doc $(PYOX_DIR)/doc
281 cp -ar doc $(PYOX_DIR)/doc
277
282
278
283
279 # a temporary target to setup all we need for run-tests.py --pyoxidizer
284 # a temporary target to setup all we need for run-tests.py --pyoxidizer
280 # (should go away as the run-tests implementation improves
285 # (should go away as the run-tests implementation improves
281 pyoxidizer-macos-tests: PYOX_DIR=build/pyoxidizer/x86_64-apple-darwin/release/app
286 pyoxidizer-macos-tests: PYOX_DIR=build/pyoxidizer/x86_64-apple-darwin/release/app
282 pyoxidizer-macos-tests: pyoxidizer
287 pyoxidizer-macos-tests: pyoxidizer
283 rm -rf $(PYOX_DIR)/templates
288 rm -rf $(PYOX_DIR)/templates
284 cp -a mercurial/templates $(PYOX_DIR)/templates
289 cp -a mercurial/templates $(PYOX_DIR)/templates
285 rm -rf $(PYOX_DIR)/helptext
290 rm -rf $(PYOX_DIR)/helptext
286 cp -a mercurial/helptext $(PYOX_DIR)/helptext
291 cp -a mercurial/helptext $(PYOX_DIR)/helptext
287 rm -rf $(PYOX_DIR)/defaultrc
292 rm -rf $(PYOX_DIR)/defaultrc
288 cp -a mercurial/defaultrc $(PYOX_DIR)/defaultrc
293 cp -a mercurial/defaultrc $(PYOX_DIR)/defaultrc
289 rm -rf $(PYOX_DIR)/contrib
294 rm -rf $(PYOX_DIR)/contrib
290 cp -a contrib $(PYOX_DIR)/contrib
295 cp -a contrib $(PYOX_DIR)/contrib
291 rm -rf $(PYOX_DIR)/doc
296 rm -rf $(PYOX_DIR)/doc
292 cp -a doc $(PYOX_DIR)/doc
297 cp -a doc $(PYOX_DIR)/doc
293
298
294
299
295 .PHONY: help all local build doc cleanbutpackages clean install install-bin \
300 .PHONY: help all local build doc cleanbutpackages clean install install-bin \
296 install-doc install-home install-home-bin install-home-doc \
301 install-doc install-home install-home-bin install-home-doc \
297 dist dist-notests check tests rust-tests check-code format-c \
302 dist dist-notests check tests rust-tests check-code format-c \
298 update-pot pyoxidizer pyoxidizer-windows-tests pyoxidizer-macos-tests \
303 update-pot pyoxidizer pyoxidizer-windows-tests pyoxidizer-macos-tests \
299 $(packaging_targets) \
304 $(packaging_targets) \
300 osx
305 osx
@@ -1,1095 +1,1091
1 #!/usr/bin/env python3
1 #!/usr/bin/env python3
2 #
2 #
3 # check-code - a style and portability checker for Mercurial
3 # check-code - a style and portability checker for Mercurial
4 #
4 #
5 # Copyright 2010 Olivia Mackall <olivia@selenic.com>
5 # Copyright 2010 Olivia Mackall <olivia@selenic.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """style and portability checker for Mercurial
10 """style and portability checker for Mercurial
11
11
12 when a rule triggers wrong, do one of the following (prefer one from top):
12 when a rule triggers wrong, do one of the following (prefer one from top):
13 * do the work-around the rule suggests
13 * do the work-around the rule suggests
14 * doublecheck that it is a false match
14 * doublecheck that it is a false match
15 * improve the rule pattern
15 * improve the rule pattern
16 * add an ignore pattern to the rule (3rd arg) which matches your good line
16 * add an ignore pattern to the rule (3rd arg) which matches your good line
17 (you can append a short comment and match this, like: #re-raises)
17 (you can append a short comment and match this, like: #re-raises)
18 * change the pattern to a warning and list the exception in test-check-code-hg
18 * change the pattern to a warning and list the exception in test-check-code-hg
19 * ONLY use no--check-code for skipping entire files from external sources
19 * ONLY use no--check-code for skipping entire files from external sources
20 """
20 """
21
21
22 import glob
22 import glob
23 import keyword
23 import keyword
24 import optparse
24 import optparse
25 import os
25 import os
26 import re
26 import re
27 import sys
27 import sys
28
28
29 if sys.version_info[0] < 3:
29 if sys.version_info[0] < 3:
30 opentext = open
30 opentext = open
31 else:
31 else:
32
32
33 def opentext(f):
33 def opentext(f):
34 return open(f, encoding='latin1')
34 return open(f, encoding='latin1')
35
35
36
36
37 try:
37 try:
38 xrange
38 xrange
39 except NameError:
39 except NameError:
40 xrange = range
40 xrange = range
41 try:
41 try:
42 import re2
42 import re2
43 except ImportError:
43 except ImportError:
44 re2 = None
44 re2 = None
45
45
46 import testparseutil
46 import testparseutil
47
47
48
48
49 def compilere(pat, multiline=False):
49 def compilere(pat, multiline=False):
50 if multiline:
50 if multiline:
51 pat = '(?m)' + pat
51 pat = '(?m)' + pat
52 if re2:
52 if re2:
53 try:
53 try:
54 return re2.compile(pat)
54 return re2.compile(pat)
55 except re2.error:
55 except re2.error:
56 pass
56 pass
57 return re.compile(pat)
57 return re.compile(pat)
58
58
59
59
60 # check "rules depending on implementation of repquote()" in each
60 # check "rules depending on implementation of repquote()" in each
61 # patterns (especially pypats), before changing around repquote()
61 # patterns (especially pypats), before changing around repquote()
62 _repquotefixedmap = {
62 _repquotefixedmap = {
63 ' ': ' ',
63 ' ': ' ',
64 '\n': '\n',
64 '\n': '\n',
65 '.': 'p',
65 '.': 'p',
66 ':': 'q',
66 ':': 'q',
67 '%': '%',
67 '%': '%',
68 '\\': 'b',
68 '\\': 'b',
69 '*': 'A',
69 '*': 'A',
70 '+': 'P',
70 '+': 'P',
71 '-': 'M',
71 '-': 'M',
72 }
72 }
73
73
74
74
75 def _repquoteencodechr(i):
75 def _repquoteencodechr(i):
76 if i > 255:
76 if i > 255:
77 return 'u'
77 return 'u'
78 c = chr(i)
78 c = chr(i)
79 if c in _repquotefixedmap:
79 if c in _repquotefixedmap:
80 return _repquotefixedmap[c]
80 return _repquotefixedmap[c]
81 if c.isalpha():
81 if c.isalpha():
82 return 'x'
82 return 'x'
83 if c.isdigit():
83 if c.isdigit():
84 return 'n'
84 return 'n'
85 return 'o'
85 return 'o'
86
86
87
87
88 _repquotett = ''.join(_repquoteencodechr(i) for i in xrange(256))
88 _repquotett = ''.join(_repquoteencodechr(i) for i in xrange(256))
89
89
90
90
91 def repquote(m):
91 def repquote(m):
92 t = m.group('text')
92 t = m.group('text')
93 t = t.translate(_repquotett)
93 t = t.translate(_repquotett)
94 return m.group('quote') + t + m.group('quote')
94 return m.group('quote') + t + m.group('quote')
95
95
96
96
97 def reppython(m):
97 def reppython(m):
98 comment = m.group('comment')
98 comment = m.group('comment')
99 if comment:
99 if comment:
100 l = len(comment.rstrip())
100 l = len(comment.rstrip())
101 return "#" * l + comment[l:]
101 return "#" * l + comment[l:]
102 return repquote(m)
102 return repquote(m)
103
103
104
104
105 def repcomment(m):
105 def repcomment(m):
106 return m.group(1) + "#" * len(m.group(2))
106 return m.group(1) + "#" * len(m.group(2))
107
107
108
108
109 def repccomment(m):
109 def repccomment(m):
110 t = re.sub(r"((?<=\n) )|\S", "x", m.group(2))
110 t = re.sub(r"((?<=\n) )|\S", "x", m.group(2))
111 return m.group(1) + t + "*/"
111 return m.group(1) + t + "*/"
112
112
113
113
114 def repcallspaces(m):
114 def repcallspaces(m):
115 t = re.sub(r"\n\s+", "\n", m.group(2))
115 t = re.sub(r"\n\s+", "\n", m.group(2))
116 return m.group(1) + t
116 return m.group(1) + t
117
117
118
118
119 def repinclude(m):
119 def repinclude(m):
120 return m.group(1) + "<foo>"
120 return m.group(1) + "<foo>"
121
121
122
122
123 def rephere(m):
123 def rephere(m):
124 t = re.sub(r"\S", "x", m.group(2))
124 t = re.sub(r"\S", "x", m.group(2))
125 return m.group(1) + t
125 return m.group(1) + t
126
126
127
127
128 testpats = [
128 testpats = [
129 [
129 [
130 (r'\b(push|pop)d\b', "don't use 'pushd' or 'popd', use 'cd'"),
130 (r'\b(push|pop)d\b', "don't use 'pushd' or 'popd', use 'cd'"),
131 (r'\W\$?\(\([^\)\n]*\)\)', "don't use (()) or $(()), use 'expr'"),
131 (r'\W\$?\(\([^\)\n]*\)\)', "don't use (()) or $(()), use 'expr'"),
132 (r'grep.*-q', "don't use 'grep -q', redirect to /dev/null"),
132 (r'grep.*-q', "don't use 'grep -q', redirect to /dev/null"),
133 (r'(?<!hg )grep.* -a', "don't use 'grep -a', use in-line python"),
133 (r'(?<!hg )grep.* -a', "don't use 'grep -a', use in-line python"),
134 (r'sed.*-i', "don't use 'sed -i', use a temporary file"),
134 (r'sed.*-i', "don't use 'sed -i', use a temporary file"),
135 (r'\becho\b.*\\n', "don't use 'echo \\n', use printf"),
135 (r'\becho\b.*\\n', "don't use 'echo \\n', use printf"),
136 (r'echo -n', "don't use 'echo -n', use printf"),
136 (r'echo -n', "don't use 'echo -n', use printf"),
137 (r'(^|\|\s*)\bwc\b[^|]*$\n(?!.*\(re\))', "filter wc output"),
137 (r'(^|\|\s*)\bwc\b[^|]*$\n(?!.*\(re\))', "filter wc output"),
138 (r'head -c', "don't use 'head -c', use 'dd'"),
138 (r'head -c', "don't use 'head -c', use 'dd'"),
139 (r'tail -n', "don't use the '-n' option to tail, just use '-<num>'"),
139 (r'tail -n', "don't use the '-n' option to tail, just use '-<num>'"),
140 (r'sha1sum', "don't use sha1sum, use $TESTDIR/md5sum.py"),
140 (r'sha1sum', "don't use sha1sum, use $TESTDIR/md5sum.py"),
141 (r'\bls\b.*-\w*R', "don't use 'ls -R', use 'find'"),
141 (r'\bls\b.*-\w*R', "don't use 'ls -R', use 'find'"),
142 (r'printf.*[^\\]\\([1-9]|0\d)', r"don't use 'printf \NNN', use Python"),
142 (r'printf.*[^\\]\\([1-9]|0\d)', r"don't use 'printf \NNN', use Python"),
143 (r'printf.*[^\\]\\x', "don't use printf \\x, use Python"),
143 (r'printf.*[^\\]\\x', "don't use printf \\x, use Python"),
144 (r'rm -rf \*', "don't use naked rm -rf, target a directory"),
144 (r'rm -rf \*', "don't use naked rm -rf, target a directory"),
145 (
145 (
146 r'\[[^\]]+==',
146 r'\[[^\]]+==',
147 '[ foo == bar ] is a bashism, use [ foo = bar ] instead',
147 '[ foo == bar ] is a bashism, use [ foo = bar ] instead',
148 ),
148 ),
149 (
149 (
150 r'(^|\|\s*)grep (-\w\s+)*[^|]*[(|]\w',
150 r'(^|\|\s*)grep (-\w\s+)*[^|]*[(|]\w',
151 "use egrep for extended grep syntax",
151 "use egrep for extended grep syntax",
152 ),
152 ),
153 (r'(^|\|\s*)e?grep .*\\S', "don't use \\S in regular expression"),
153 (r'(^|\|\s*)e?grep .*\\S', "don't use \\S in regular expression"),
154 (r'(?<!!)/bin/', "don't use explicit paths for tools"),
154 (r'(?<!!)/bin/', "don't use explicit paths for tools"),
155 (r'#!.*/bash', "don't use bash in shebang, use sh"),
155 (r'#!.*/bash', "don't use bash in shebang, use sh"),
156 (r'[^\n]\Z', "no trailing newline"),
156 (r'[^\n]\Z', "no trailing newline"),
157 (r'export .*=', "don't export and assign at once"),
157 (r'export .*=', "don't export and assign at once"),
158 (r'^source\b', "don't use 'source', use '.'"),
158 (r'^source\b', "don't use 'source', use '.'"),
159 (r'touch -d', "don't use 'touch -d', use 'touch -t' instead"),
159 (r'touch -d', "don't use 'touch -d', use 'touch -t' instead"),
160 (r'\bls +[^|\n-]+ +-', "options to 'ls' must come before filenames"),
160 (r'\bls +[^|\n-]+ +-', "options to 'ls' must come before filenames"),
161 (r'[^>\n]>\s*\$HGRCPATH', "don't overwrite $HGRCPATH, append to it"),
161 (r'[^>\n]>\s*\$HGRCPATH', "don't overwrite $HGRCPATH, append to it"),
162 (r'^stop\(\)', "don't use 'stop' as a shell function name"),
162 (r'^stop\(\)', "don't use 'stop' as a shell function name"),
163 (r'(\[|\btest\b).*-e ', "don't use 'test -e', use 'test -f'"),
163 (r'(\[|\btest\b).*-e ', "don't use 'test -e', use 'test -f'"),
164 (r'\[\[\s+[^\]]*\]\]', "don't use '[[ ]]', use '[ ]'"),
164 (r'\[\[\s+[^\]]*\]\]', "don't use '[[ ]]', use '[ ]'"),
165 (r'^alias\b.*=', "don't use alias, use a function"),
165 (r'^alias\b.*=', "don't use alias, use a function"),
166 (r'if\s*!', "don't use '!' to negate exit status"),
166 (r'if\s*!', "don't use '!' to negate exit status"),
167 (r'/dev/u?random', "don't use entropy, use /dev/zero"),
167 (r'/dev/u?random', "don't use entropy, use /dev/zero"),
168 (r'do\s*true;\s*done', "don't use true as loop body, use sleep 0"),
168 (r'do\s*true;\s*done', "don't use true as loop body, use sleep 0"),
169 (
169 (
170 r'sed (-e )?\'(\d+|/[^/]*/)i(?!\\\n)',
170 r'sed (-e )?\'(\d+|/[^/]*/)i(?!\\\n)',
171 "put a backslash-escaped newline after sed 'i' command",
171 "put a backslash-escaped newline after sed 'i' command",
172 ),
172 ),
173 (r'^diff *-\w*[uU].*$\n(^ \$ |^$)', "prefix diff -u/-U with cmp"),
173 (r'^diff *-\w*[uU].*$\n(^ \$ |^$)', "prefix diff -u/-U with cmp"),
174 (r'^\s+(if)? diff *-\w*[uU]', "prefix diff -u/-U with cmp"),
174 (r'^\s+(if)? diff *-\w*[uU]', "prefix diff -u/-U with cmp"),
175 (r'[\s="`\']python\s(?!bindings)', "don't use 'python', use '$PYTHON'"),
175 (r'[\s="`\']python\s(?!bindings)', "don't use 'python', use '$PYTHON'"),
176 (r'seq ', "don't use 'seq', use $TESTDIR/seq.py"),
176 (r'seq ', "don't use 'seq', use $TESTDIR/seq.py"),
177 (r'\butil\.Abort\b', "directly use error.Abort"),
177 (r'\butil\.Abort\b', "directly use error.Abort"),
178 (r'\|&', "don't use |&, use 2>&1"),
178 (r'\|&', "don't use |&, use 2>&1"),
179 (r'\w = +\w', "only one space after = allowed"),
179 (r'\w = +\w', "only one space after = allowed"),
180 (
180 (
181 r'\bsed\b.*[^\\]\\n',
181 r'\bsed\b.*[^\\]\\n',
182 "don't use 'sed ... \\n', use a \\ and a newline",
182 "don't use 'sed ... \\n', use a \\ and a newline",
183 ),
183 ),
184 (r'env.*-u', "don't use 'env -u VAR', use 'unset VAR'"),
184 (r'env.*-u', "don't use 'env -u VAR', use 'unset VAR'"),
185 (r'cp.* -r ', "don't use 'cp -r', use 'cp -R'"),
185 (r'cp.* -r ', "don't use 'cp -r', use 'cp -R'"),
186 (r'grep.* -[ABC]', "don't use grep's context flags"),
186 (r'grep.* -[ABC]', "don't use grep's context flags"),
187 (
187 (
188 r'find.*-printf',
188 r'find.*-printf',
189 "don't use 'find -printf', it doesn't exist on BSD find(1)",
189 "don't use 'find -printf', it doesn't exist on BSD find(1)",
190 ),
190 ),
191 (r'\$RANDOM ', "don't use bash-only $RANDOM to generate random values"),
191 (r'\$RANDOM ', "don't use bash-only $RANDOM to generate random values"),
192 ],
192 ],
193 # warnings
193 # warnings
194 [
194 [
195 (r'^function', "don't use 'function', use old style"),
195 (r'^function', "don't use 'function', use old style"),
196 (r'^diff.*-\w*N', "don't use 'diff -N'"),
196 (r'^diff.*-\w*N', "don't use 'diff -N'"),
197 (r'\$PWD|\${PWD}', "don't use $PWD, use `pwd`", "no-pwd-check"),
197 (r'\$PWD|\${PWD}', "don't use $PWD, use `pwd`", "no-pwd-check"),
198 (r'^([^"\'\n]|("[^"\n]*")|(\'[^\'\n]*\'))*\^', "^ must be quoted"),
198 (r'^([^"\'\n]|("[^"\n]*")|(\'[^\'\n]*\'))*\^', "^ must be quoted"),
199 (r'kill (`|\$\()', "don't use kill, use killdaemons.py"),
199 (r'kill (`|\$\()', "don't use kill, use killdaemons.py"),
200 ],
200 ],
201 ]
201 ]
202
202
203 testfilters = [
203 testfilters = [
204 (r"( *)(#([^!][^\n]*\S)?)", repcomment),
204 (r"( *)(#([^!][^\n]*\S)?)", repcomment),
205 (r"<<(\S+)((.|\n)*?\n\1)", rephere),
205 (r"<<(\S+)((.|\n)*?\n\1)", rephere),
206 ]
206 ]
207
207
208 uprefix = r"^ \$ "
208 uprefix = r"^ \$ "
209 utestpats = [
209 utestpats = [
210 [
210 [
211 (r'^(\S.*|| [$>] \S.*)[ \t]\n', "trailing whitespace on non-output"),
211 (r'^(\S.*|| [$>] \S.*)[ \t]\n', "trailing whitespace on non-output"),
212 (
212 (
213 uprefix + r'.*\|\s*sed[^|>\n]*\n',
213 uprefix + r'.*\|\s*sed[^|>\n]*\n',
214 "use regex test output patterns instead of sed",
214 "use regex test output patterns instead of sed",
215 ),
215 ),
216 (uprefix + r'(true|exit 0)', "explicit zero exit unnecessary"),
216 (uprefix + r'(true|exit 0)', "explicit zero exit unnecessary"),
217 (
217 (
218 uprefix + r'.*\|\| echo.*(fail|error)',
218 uprefix + r'.*\|\| echo.*(fail|error)',
219 "explicit exit code checks unnecessary",
219 "explicit exit code checks unnecessary",
220 ),
220 ),
221 (uprefix + r'set -e', "don't use set -e"),
221 (uprefix + r'set -e', "don't use set -e"),
222 (uprefix + r'(\s|fi\b|done\b)', "use > for continued lines"),
222 (uprefix + r'(\s|fi\b|done\b)', "use > for continued lines"),
223 (
223 (
224 uprefix + r'.*:\.\S*/',
224 uprefix + r'.*:\.\S*/',
225 "x:.y in a path does not work on msys, rewrite "
225 "x:.y in a path does not work on msys, rewrite "
226 "as x://.y, or see `hg log -k msys` for alternatives",
226 "as x://.y, or see `hg log -k msys` for alternatives",
227 r'-\S+:\.|' '# no-msys', # -Rxxx
227 r'-\S+:\.|' '# no-msys', # -Rxxx
228 ), # in test-pull.t which is skipped on windows
228 ), # in test-pull.t which is skipped on windows
229 (
229 (
230 r'^ [^$>].*27\.0\.0\.1',
230 r'^ [^$>].*27\.0\.0\.1',
231 'use $LOCALIP not an explicit loopback address',
231 'use $LOCALIP not an explicit loopback address',
232 ),
232 ),
233 (
233 (
234 r'^ (?![>$] ).*\$LOCALIP.*[^)]$',
234 r'^ (?![>$] ).*\$LOCALIP.*[^)]$',
235 'mark $LOCALIP output lines with (glob) to help tests in BSD jails',
235 'mark $LOCALIP output lines with (glob) to help tests in BSD jails',
236 ),
236 ),
237 (
237 (
238 r'^ (cat|find): .*: \$ENOENT\$',
238 r'^ (cat|find): .*: \$ENOENT\$',
239 'use test -f to test for file existence',
239 'use test -f to test for file existence',
240 ),
240 ),
241 (
241 (
242 r'^ diff -[^ -]*p',
242 r'^ diff -[^ -]*p',
243 "don't use (external) diff with -p for portability",
243 "don't use (external) diff with -p for portability",
244 ),
244 ),
245 (r' readlink ', 'use readlink.py instead of readlink'),
245 (r' readlink ', 'use readlink.py instead of readlink'),
246 (
246 (
247 r'^ [-+][-+][-+] .* [-+]0000 \(glob\)',
247 r'^ [-+][-+][-+] .* [-+]0000 \(glob\)',
248 "glob timezone field in diff output for portability",
248 "glob timezone field in diff output for portability",
249 ),
249 ),
250 (
250 (
251 r'^ @@ -[0-9]+ [+][0-9]+,[0-9]+ @@',
251 r'^ @@ -[0-9]+ [+][0-9]+,[0-9]+ @@',
252 "use '@@ -N* +N,n @@ (glob)' style chunk header for portability",
252 "use '@@ -N* +N,n @@ (glob)' style chunk header for portability",
253 ),
253 ),
254 (
254 (
255 r'^ @@ -[0-9]+,[0-9]+ [+][0-9]+ @@',
255 r'^ @@ -[0-9]+,[0-9]+ [+][0-9]+ @@',
256 "use '@@ -N,n +N* @@ (glob)' style chunk header for portability",
256 "use '@@ -N,n +N* @@ (glob)' style chunk header for portability",
257 ),
257 ),
258 (
258 (
259 r'^ @@ -[0-9]+ [+][0-9]+ @@',
259 r'^ @@ -[0-9]+ [+][0-9]+ @@',
260 "use '@@ -N* +N* @@ (glob)' style chunk header for portability",
260 "use '@@ -N* +N* @@ (glob)' style chunk header for portability",
261 ),
261 ),
262 (
262 (
263 uprefix + r'hg( +-[^ ]+( +[^ ]+)?)* +extdiff'
263 uprefix + r'hg( +-[^ ]+( +[^ ]+)?)* +extdiff'
264 r'( +(-[^ po-]+|--(?!program|option)[^ ]+|[^-][^ ]*))*$',
264 r'( +(-[^ po-]+|--(?!program|option)[^ ]+|[^-][^ ]*))*$',
265 "use $RUNTESTDIR/pdiff via extdiff (or -o/-p for false-positives)",
265 "use $RUNTESTDIR/pdiff via extdiff (or -o/-p for false-positives)",
266 ),
266 ),
267 ],
267 ],
268 # warnings
268 # warnings
269 [
269 [
270 (
270 (
271 r'^ (?!.*\$LOCALIP)[^*?/\n]* \(glob\)$',
271 r'^ (?!.*\$LOCALIP)[^*?/\n]* \(glob\)$',
272 "glob match with no glob string (?, *, /, and $LOCALIP)",
272 "glob match with no glob string (?, *, /, and $LOCALIP)",
273 ),
273 ),
274 ],
274 ],
275 ]
275 ]
276
276
277 # transform plain test rules to unified test's
277 # transform plain test rules to unified test's
278 for i in [0, 1]:
278 for i in [0, 1]:
279 for tp in testpats[i]:
279 for tp in testpats[i]:
280 p = tp[0]
280 p = tp[0]
281 m = tp[1]
281 m = tp[1]
282 if p.startswith('^'):
282 if p.startswith('^'):
283 p = "^ [$>] (%s)" % p[1:]
283 p = "^ [$>] (%s)" % p[1:]
284 else:
284 else:
285 p = "^ [$>] .*(%s)" % p
285 p = "^ [$>] .*(%s)" % p
286 utestpats[i].append((p, m) + tp[2:])
286 utestpats[i].append((p, m) + tp[2:])
287
287
288 # don't transform the following rules:
288 # don't transform the following rules:
289 # " > \t" and " \t" should be allowed in unified tests
289 # " > \t" and " \t" should be allowed in unified tests
290 testpats[0].append((r'^( *)\t', "don't use tabs to indent"))
290 testpats[0].append((r'^( *)\t', "don't use tabs to indent"))
291 utestpats[0].append((r'^( ?)\t', "don't use tabs to indent"))
291 utestpats[0].append((r'^( ?)\t', "don't use tabs to indent"))
292
292
293 utestfilters = [
293 utestfilters = [
294 (r"<<(\S+)((.|\n)*?\n > \1)", rephere),
294 (r"<<(\S+)((.|\n)*?\n > \1)", rephere),
295 (r"( +)(#([^!][^\n]*\S)?)", repcomment),
295 (r"( +)(#([^!][^\n]*\S)?)", repcomment),
296 ]
296 ]
297
297
298 # common patterns to check *.py
298 # common patterns to check *.py
299 commonpypats = [
299 commonpypats = [
300 [
300 [
301 (r'\\$', 'Use () to wrap long lines in Python, not \\'),
301 (r'\\$', 'Use () to wrap long lines in Python, not \\'),
302 (
302 (
303 r'^\s*def\s*\w+\s*\(.*,\s*\(',
303 r'^\s*def\s*\w+\s*\(.*,\s*\(',
304 "tuple parameter unpacking not available in Python 3+",
304 "tuple parameter unpacking not available in Python 3+",
305 ),
305 ),
306 (
306 (
307 r'lambda\s*\(.*,.*\)',
307 r'lambda\s*\(.*,.*\)',
308 "tuple parameter unpacking not available in Python 3+",
308 "tuple parameter unpacking not available in Python 3+",
309 ),
309 ),
310 (r'(?<!def)\s+(cmp)\(', "cmp is not available in Python 3+"),
310 (r'(?<!def)\s+(cmp)\(', "cmp is not available in Python 3+"),
311 (r'(?<!\.)\breduce\s*\(.*', "reduce is not available in Python 3+"),
311 (r'(?<!\.)\breduce\s*\(.*', "reduce is not available in Python 3+"),
312 (
312 (
313 r'\bdict\(.*=',
313 r'\bdict\(.*=',
314 'dict() is different in Py2 and 3 and is slower than {}',
314 'dict() is different in Py2 and 3 and is slower than {}',
315 'dict-from-generator',
315 'dict-from-generator',
316 ),
316 ),
317 (r'\.has_key\b', "dict.has_key is not available in Python 3+"),
317 (r'\.has_key\b', "dict.has_key is not available in Python 3+"),
318 (r'\s<>\s', '<> operator is not available in Python 3+, use !='),
318 (r'\s<>\s', '<> operator is not available in Python 3+, use !='),
319 (r'^\s*\t', "don't use tabs"),
319 (r'^\s*\t', "don't use tabs"),
320 (r'\S;\s*\n', "semicolon"),
320 (r'\S;\s*\n', "semicolon"),
321 (r'[^_]_\([ \t\n]*(?:"[^"]+"[ \t\n+]*)+%', "don't use % inside _()"),
321 (r'[^_]_\([ \t\n]*(?:"[^"]+"[ \t\n+]*)+%', "don't use % inside _()"),
322 (r"[^_]_\([ \t\n]*(?:'[^']+'[ \t\n+]*)+%", "don't use % inside _()"),
322 (r"[^_]_\([ \t\n]*(?:'[^']+'[ \t\n+]*)+%", "don't use % inside _()"),
323 (r'(\w|\)),\w', "missing whitespace after ,"),
323 (r'(\w|\)),\w', "missing whitespace after ,"),
324 (r'(\w|\))[+/*\-<>]\w', "missing whitespace in expression"),
324 (r'(\w|\))[+/*\-<>]\w', "missing whitespace in expression"),
325 (r'\w\s=\s\s+\w', "gratuitous whitespace after ="),
325 (r'\w\s=\s\s+\w', "gratuitous whitespace after ="),
326 (
326 (
327 (
327 (
328 # a line ending with a colon, potentially with trailing comments
328 # a line ending with a colon, potentially with trailing comments
329 r':([ \t]*#[^\n]*)?\n'
329 r':([ \t]*#[^\n]*)?\n'
330 # one that is not a pass and not only a comment
330 # one that is not a pass and not only a comment
331 r'(?P<indent>[ \t]+)[^#][^\n]+\n'
331 r'(?P<indent>[ \t]+)[^#][^\n]+\n'
332 # more lines at the same indent level
332 # more lines at the same indent level
333 r'((?P=indent)[^\n]+\n)*'
333 r'((?P=indent)[^\n]+\n)*'
334 # a pass at the same indent level, which is bogus
334 # a pass at the same indent level, which is bogus
335 r'(?P=indent)pass[ \t\n#]'
335 r'(?P=indent)pass[ \t\n#]'
336 ),
336 ),
337 'omit superfluous pass',
337 'omit superfluous pass',
338 ),
338 ),
339 (r'[^\n]\Z', "no trailing newline"),
339 (r'[^\n]\Z', "no trailing newline"),
340 (r'(\S[ \t]+|^[ \t]+)\n', "trailing whitespace"),
340 (r'(\S[ \t]+|^[ \t]+)\n', "trailing whitespace"),
341 (
341 (
342 r'^\s*(if|while|def|class|except|try)\s[^[\n]*:\s*[^\\n]#\s]+',
342 r'^\s*(if|while|def|class|except|try)\s[^[\n]*:\s*[^\\n]#\s]+',
343 "linebreak after :",
343 "linebreak after :",
344 ),
344 ),
345 (
345 (
346 r'\b(%s)\('
346 r'\b(%s)\('
347 % '|'.join(k for k in keyword.kwlist if k not in ('print', 'exec')),
347 % '|'.join(k for k in keyword.kwlist if k not in ('print', 'exec')),
348 "Python keyword is not a function",
348 "Python keyword is not a function",
349 ),
349 ),
350 # (r'class\s[A-Z][^\(]*\((?!Exception)',
350 # (r'class\s[A-Z][^\(]*\((?!Exception)',
351 # "don't capitalize non-exception classes"),
351 # "don't capitalize non-exception classes"),
352 # (r'in range\(', "use xrange"),
352 # (r'in range\(', "use xrange"),
353 # (r'^\s*print\s+', "avoid using print in core and extensions"),
353 # (r'^\s*print\s+', "avoid using print in core and extensions"),
354 (r'[\x80-\xff]', "non-ASCII character literal"),
354 (r'[\x80-\xff]', "non-ASCII character literal"),
355 (r'("\')\.format\(', "str.format() has no bytes counterpart, use %"),
355 (r'("\')\.format\(', "str.format() has no bytes counterpart, use %"),
356 (
356 (
357 r'([\(\[][ \t]\S)|(\S[ \t][\)\]])',
357 r'([\(\[][ \t]\S)|(\S[ \t][\)\]])',
358 "gratuitous whitespace in () or []",
358 "gratuitous whitespace in () or []",
359 ),
359 ),
360 # (r'\s\s=', "gratuitous whitespace before ="),
360 # (r'\s\s=', "gratuitous whitespace before ="),
361 (
361 (
362 r'[^>< ](\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\S',
362 r'[^>< ](\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\S',
363 "missing whitespace around operator",
363 "missing whitespace around operator",
364 ),
364 ),
365 (
365 (
366 r'[^>< ](\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\s',
366 r'[^>< ](\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\s',
367 "missing whitespace around operator",
367 "missing whitespace around operator",
368 ),
368 ),
369 (
369 (
370 r'\s(\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\S',
370 r'\s(\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\S',
371 "missing whitespace around operator",
371 "missing whitespace around operator",
372 ),
372 ),
373 (r'[^^+=*/!<>&| %-](\s=|=\s)[^= ]', "wrong whitespace around ="),
373 (r'[^^+=*/!<>&| %-](\s=|=\s)[^= ]', "wrong whitespace around ="),
374 (
374 (
375 r'\([^()]*( =[^=]|[^<>!=]= )',
376 "no whitespace around = for named parameters",
377 ),
378 (
379 r'raise [^,(]+, (\([^\)]+\)|[^,\(\)]+)$',
375 r'raise [^,(]+, (\([^\)]+\)|[^,\(\)]+)$',
380 "don't use old-style two-argument raise, use Exception(message)",
376 "don't use old-style two-argument raise, use Exception(message)",
381 ),
377 ),
382 (r' is\s+(not\s+)?["\'0-9-]', "object comparison with literal"),
378 (r' is\s+(not\s+)?["\'0-9-]', "object comparison with literal"),
383 (
379 (
384 r' [=!]=\s+(True|False|None)',
380 r' [=!]=\s+(True|False|None)',
385 "comparison with singleton, use 'is' or 'is not' instead",
381 "comparison with singleton, use 'is' or 'is not' instead",
386 ),
382 ),
387 (
383 (
388 r'^\s*(while|if) [01]:',
384 r'^\s*(while|if) [01]:',
389 "use True/False for constant Boolean expression",
385 "use True/False for constant Boolean expression",
390 ),
386 ),
391 (r'^\s*if False(:| +and)', 'Remove code instead of using `if False`'),
387 (r'^\s*if False(:| +and)', 'Remove code instead of using `if False`'),
392 (
388 (
393 r'(?:(?<!def)\s+|\()hasattr\(',
389 r'(?:(?<!def)\s+|\()hasattr\(',
394 'hasattr(foo, bar) is broken on py2, use util.safehasattr(foo, bar) '
390 'hasattr(foo, bar) is broken on py2, use util.safehasattr(foo, bar) '
395 'instead',
391 'instead',
396 r'#.*hasattr-py3-only',
392 r'#.*hasattr-py3-only',
397 ),
393 ),
398 (r'opener\([^)]*\).read\(', "use opener.read() instead"),
394 (r'opener\([^)]*\).read\(', "use opener.read() instead"),
399 (r'opener\([^)]*\).write\(', "use opener.write() instead"),
395 (r'opener\([^)]*\).write\(', "use opener.write() instead"),
400 (r'(?i)descend[e]nt', "the proper spelling is descendAnt"),
396 (r'(?i)descend[e]nt', "the proper spelling is descendAnt"),
401 (r'\.debug\(\_', "don't mark debug messages for translation"),
397 (r'\.debug\(\_', "don't mark debug messages for translation"),
402 (r'\.strip\(\)\.split\(\)', "no need to strip before splitting"),
398 (r'\.strip\(\)\.split\(\)', "no need to strip before splitting"),
403 (r'^\s*except\s*:', "naked except clause", r'#.*re-raises'),
399 (r'^\s*except\s*:', "naked except clause", r'#.*re-raises'),
404 (
400 (
405 r'^\s*except\s([^\(,]+|\([^\)]+\))\s*,',
401 r'^\s*except\s([^\(,]+|\([^\)]+\))\s*,',
406 'legacy exception syntax; use "as" instead of ","',
402 'legacy exception syntax; use "as" instead of ","',
407 ),
403 ),
408 (r'release\(.*wlock, .*lock\)', "wrong lock release order"),
404 (r'release\(.*wlock, .*lock\)', "wrong lock release order"),
409 (r'\bdef\s+__bool__\b', "__bool__ should be __nonzero__ in Python 2"),
405 (r'\bdef\s+__bool__\b', "__bool__ should be __nonzero__ in Python 2"),
410 (
406 (
411 r'os\.path\.join\(.*, *(""|\'\')\)',
407 r'os\.path\.join\(.*, *(""|\'\')\)',
412 "use pathutil.normasprefix(path) instead of os.path.join(path, '')",
408 "use pathutil.normasprefix(path) instead of os.path.join(path, '')",
413 ),
409 ),
414 (r'\s0[0-7]+\b', 'legacy octal syntax; use "0o" prefix instead of "0"'),
410 (r'\s0[0-7]+\b', 'legacy octal syntax; use "0o" prefix instead of "0"'),
415 # XXX only catch mutable arguments on the first line of the definition
411 # XXX only catch mutable arguments on the first line of the definition
416 (r'def.*[( ]\w+=\{\}', "don't use mutable default arguments"),
412 (r'def.*[( ]\w+=\{\}', "don't use mutable default arguments"),
417 (r'\butil\.Abort\b', "directly use error.Abort"),
413 (r'\butil\.Abort\b', "directly use error.Abort"),
418 (
414 (
419 r'^@(\w*\.)?cachefunc',
415 r'^@(\w*\.)?cachefunc',
420 "module-level @cachefunc is risky, please avoid",
416 "module-level @cachefunc is risky, please avoid",
421 ),
417 ),
422 (
418 (
423 r'^(from|import) mercurial\.(cext|pure|cffi)',
419 r'^(from|import) mercurial\.(cext|pure|cffi)',
424 "use mercurial.policy.importmod instead",
420 "use mercurial.policy.importmod instead",
425 ),
421 ),
426 (r'\.next\(\)', "don't use .next(), use next(...)"),
422 (r'\.next\(\)', "don't use .next(), use next(...)"),
427 (
423 (
428 r'([a-z]*).revision\(\1\.node\(',
424 r'([a-z]*).revision\(\1\.node\(',
429 "don't convert rev to node before passing to revision(nodeorrev)",
425 "don't convert rev to node before passing to revision(nodeorrev)",
430 ),
426 ),
431 (r'platform\.system\(\)', "don't use platform.system(), use pycompat"),
427 (r'platform\.system\(\)', "don't use platform.system(), use pycompat"),
432 ],
428 ],
433 # warnings
429 # warnings
434 [],
430 [],
435 ]
431 ]
436
432
437 # patterns to check normal *.py files
433 # patterns to check normal *.py files
438 pypats = [
434 pypats = [
439 [
435 [
440 # Ideally, these should be placed in "commonpypats" for
436 # Ideally, these should be placed in "commonpypats" for
441 # consistency of coding rules in Mercurial source tree.
437 # consistency of coding rules in Mercurial source tree.
442 # But on the other hand, these are not so seriously required for
438 # But on the other hand, these are not so seriously required for
443 # python code fragments embedded in test scripts. Fixing test
439 # python code fragments embedded in test scripts. Fixing test
444 # scripts for these patterns requires many changes, and has less
440 # scripts for these patterns requires many changes, and has less
445 # profit than effort.
441 # profit than effort.
446 (r'raise Exception', "don't raise generic exceptions"),
442 (r'raise Exception', "don't raise generic exceptions"),
447 (r'[\s\(](open|file)\([^)]*\)\.read\(', "use util.readfile() instead"),
443 (r'[\s\(](open|file)\([^)]*\)\.read\(', "use util.readfile() instead"),
448 (
444 (
449 r'[\s\(](open|file)\([^)]*\)\.write\(',
445 r'[\s\(](open|file)\([^)]*\)\.write\(',
450 "use util.writefile() instead",
446 "use util.writefile() instead",
451 ),
447 ),
452 (
448 (
453 r'^[\s\(]*(open(er)?|file)\([^)]*\)(?!\.close\(\))',
449 r'^[\s\(]*(open(er)?|file)\([^)]*\)(?!\.close\(\))',
454 "always assign an opened file to a variable, and close it afterwards",
450 "always assign an opened file to a variable, and close it afterwards",
455 ),
451 ),
456 (
452 (
457 r'[\s\(](open|file)\([^)]*\)\.(?!close\(\))',
453 r'[\s\(](open|file)\([^)]*\)\.(?!close\(\))',
458 "always assign an opened file to a variable, and close it afterwards",
454 "always assign an opened file to a variable, and close it afterwards",
459 ),
455 ),
460 (r':\n( )*( ){1,3}[^ ]', "must indent 4 spaces"),
456 (r':\n( )*( ){1,3}[^ ]', "must indent 4 spaces"),
461 (r'^import atexit', "don't use atexit, use ui.atexit"),
457 (r'^import atexit', "don't use atexit, use ui.atexit"),
462 # rules depending on implementation of repquote()
458 # rules depending on implementation of repquote()
463 (
459 (
464 r' x+[xpqo%APM][\'"]\n\s+[\'"]x',
460 r' x+[xpqo%APM][\'"]\n\s+[\'"]x',
465 'string join across lines with no space',
461 'string join across lines with no space',
466 ),
462 ),
467 (
463 (
468 r'''(?x)ui\.(status|progress|write|note|warn)\(
464 r'''(?x)ui\.(status|progress|write|note|warn)\(
469 [ \t\n#]*
465 [ \t\n#]*
470 (?# any strings/comments might precede a string, which
466 (?# any strings/comments might precede a string, which
471 # contains translatable message)
467 # contains translatable message)
472 b?((['"]|\'\'\'|""")[ \npq%bAPMxno]*(['"]|\'\'\'|""")[ \t\n#]+)*
468 b?((['"]|\'\'\'|""")[ \npq%bAPMxno]*(['"]|\'\'\'|""")[ \t\n#]+)*
473 (?# sequence consisting of below might precede translatable message
469 (?# sequence consisting of below might precede translatable message
474 # - formatting string: "% 10s", "%05d", "% -3.2f", "%*s", "%%" ...
470 # - formatting string: "% 10s", "%05d", "% -3.2f", "%*s", "%%" ...
475 # - escaped character: "\\", "\n", "\0" ...
471 # - escaped character: "\\", "\n", "\0" ...
476 # - character other than '%', 'b' as '\', and 'x' as alphabet)
472 # - character other than '%', 'b' as '\', and 'x' as alphabet)
477 (['"]|\'\'\'|""")
473 (['"]|\'\'\'|""")
478 ((%([ n]?[PM]?([np]+|A))?x)|%%|b[bnx]|[ \nnpqAPMo])*x
474 ((%([ n]?[PM]?([np]+|A))?x)|%%|b[bnx]|[ \nnpqAPMo])*x
479 (?# this regexp can't use [^...] style,
475 (?# this regexp can't use [^...] style,
480 # because _preparepats forcibly adds "\n" into [^...],
476 # because _preparepats forcibly adds "\n" into [^...],
481 # even though this regexp wants match it against "\n")''',
477 # even though this regexp wants match it against "\n")''',
482 "missing _() in ui message (use () to hide false-positives)",
478 "missing _() in ui message (use () to hide false-positives)",
483 ),
479 ),
484 ]
480 ]
485 + commonpypats[0],
481 + commonpypats[0],
486 # warnings
482 # warnings
487 [
483 [
488 # rules depending on implementation of repquote()
484 # rules depending on implementation of repquote()
489 (r'(^| )pp +xxxxqq[ \n][^\n]', "add two newlines after '.. note::'"),
485 (r'(^| )pp +xxxxqq[ \n][^\n]', "add two newlines after '.. note::'"),
490 ]
486 ]
491 + commonpypats[1],
487 + commonpypats[1],
492 ]
488 ]
493
489
494 # patterns to check *.py for embedded ones in test script
490 # patterns to check *.py for embedded ones in test script
495 embeddedpypats = [
491 embeddedpypats = [
496 [] + commonpypats[0],
492 [] + commonpypats[0],
497 # warnings
493 # warnings
498 [] + commonpypats[1],
494 [] + commonpypats[1],
499 ]
495 ]
500
496
501 # common filters to convert *.py
497 # common filters to convert *.py
502 commonpyfilters = [
498 commonpyfilters = [
503 (
499 (
504 r"""(?msx)(?P<comment>\#.*?$)|
500 r"""(?msx)(?P<comment>\#.*?$)|
505 ((?P<quote>('''|\"\"\"|(?<!')'(?!')|(?<!")"(?!")))
501 ((?P<quote>('''|\"\"\"|(?<!')'(?!')|(?<!")"(?!")))
506 (?P<text>(([^\\]|\\.)*?))
502 (?P<text>(([^\\]|\\.)*?))
507 (?P=quote))""",
503 (?P=quote))""",
508 reppython,
504 reppython,
509 ),
505 ),
510 ]
506 ]
511
507
512 # pattern only for mercurial and extensions
508 # pattern only for mercurial and extensions
513 core_py_pats = [
509 core_py_pats = [
514 [
510 [
515 # Windows tend to get confused about capitalization of the drive letter
511 # Windows tend to get confused about capitalization of the drive letter
516 #
512 #
517 # see mercurial.windows.abspath for details
513 # see mercurial.windows.abspath for details
518 (
514 (
519 r'os\.path\.abspath',
515 r'os\.path\.abspath',
520 "use util.abspath instead (windows)",
516 "use util.abspath instead (windows)",
521 r'#.*re-exports',
517 r'#.*re-exports',
522 ),
518 ),
523 ],
519 ],
524 # warnings
520 # warnings
525 [],
521 [],
526 ]
522 ]
527
523
528 # filters to convert normal *.py files
524 # filters to convert normal *.py files
529 pyfilters = [] + commonpyfilters
525 pyfilters = [] + commonpyfilters
530
526
531 # non-filter patterns
527 # non-filter patterns
532 pynfpats = [
528 pynfpats = [
533 [
529 [
534 (r'pycompat\.osname\s*[=!]=\s*[\'"]nt[\'"]', "use pycompat.iswindows"),
530 (r'pycompat\.osname\s*[=!]=\s*[\'"]nt[\'"]', "use pycompat.iswindows"),
535 (r'pycompat\.osname\s*[=!]=\s*[\'"]posix[\'"]', "use pycompat.isposix"),
531 (r'pycompat\.osname\s*[=!]=\s*[\'"]posix[\'"]', "use pycompat.isposix"),
536 (
532 (
537 r'pycompat\.sysplatform\s*[!=]=\s*[\'"]darwin[\'"]',
533 r'pycompat\.sysplatform\s*[!=]=\s*[\'"]darwin[\'"]',
538 "use pycompat.isdarwin",
534 "use pycompat.isdarwin",
539 ),
535 ),
540 ],
536 ],
541 # warnings
537 # warnings
542 [],
538 [],
543 ]
539 ]
544
540
545 # filters to convert *.py for embedded ones in test script
541 # filters to convert *.py for embedded ones in test script
546 embeddedpyfilters = [] + commonpyfilters
542 embeddedpyfilters = [] + commonpyfilters
547
543
548 # extension non-filter patterns
544 # extension non-filter patterns
549 pyextnfpats = [
545 pyextnfpats = [
550 [(r'^"""\n?[A-Z]', "don't capitalize docstring title")],
546 [(r'^"""\n?[A-Z]', "don't capitalize docstring title")],
551 # warnings
547 # warnings
552 [],
548 [],
553 ]
549 ]
554
550
555 txtfilters = []
551 txtfilters = []
556
552
557 txtpats = [
553 txtpats = [
558 [
554 [
559 (r'\s$', 'trailing whitespace'),
555 (r'\s$', 'trailing whitespace'),
560 ('.. note::[ \n][^\n]', 'add two newlines after note::'),
556 ('.. note::[ \n][^\n]', 'add two newlines after note::'),
561 ],
557 ],
562 [],
558 [],
563 ]
559 ]
564
560
565 cpats = [
561 cpats = [
566 [
562 [
567 (r'//', "don't use //-style comments"),
563 (r'//', "don't use //-style comments"),
568 (r'\S\t', "don't use tabs except for indent"),
564 (r'\S\t', "don't use tabs except for indent"),
569 (r'(\S[ \t]+|^[ \t]+)\n', "trailing whitespace"),
565 (r'(\S[ \t]+|^[ \t]+)\n', "trailing whitespace"),
570 (r'(while|if|do|for)\(', "use space after while/if/do/for"),
566 (r'(while|if|do|for)\(', "use space after while/if/do/for"),
571 (r'return\(', "return is not a function"),
567 (r'return\(', "return is not a function"),
572 (r' ;', "no space before ;"),
568 (r' ;', "no space before ;"),
573 (r'[^;] \)', "no space before )"),
569 (r'[^;] \)', "no space before )"),
574 (r'[)][{]', "space between ) and {"),
570 (r'[)][{]', "space between ) and {"),
575 (r'\w+\* \w+', "use int *foo, not int* foo"),
571 (r'\w+\* \w+', "use int *foo, not int* foo"),
576 (r'\W\([^\)]+\) \w+', "use (int)foo, not (int) foo"),
572 (r'\W\([^\)]+\) \w+', "use (int)foo, not (int) foo"),
577 (r'\w+ (\+\+|--)', "use foo++, not foo ++"),
573 (r'\w+ (\+\+|--)', "use foo++, not foo ++"),
578 (r'\w,\w', "missing whitespace after ,"),
574 (r'\w,\w', "missing whitespace after ,"),
579 (r'^[^#]\w[+/*]\w', "missing whitespace in expression"),
575 (r'^[^#]\w[+/*]\w', "missing whitespace in expression"),
580 (r'\w\s=\s\s+\w', "gratuitous whitespace after ="),
576 (r'\w\s=\s\s+\w', "gratuitous whitespace after ="),
581 (r'^#\s+\w', "use #foo, not # foo"),
577 (r'^#\s+\w', "use #foo, not # foo"),
582 (r'[^\n]\Z', "no trailing newline"),
578 (r'[^\n]\Z', "no trailing newline"),
583 (r'^\s*#import\b', "use only #include in standard C code"),
579 (r'^\s*#import\b', "use only #include in standard C code"),
584 (r'strcpy\(', "don't use strcpy, use strlcpy or memcpy"),
580 (r'strcpy\(', "don't use strcpy, use strlcpy or memcpy"),
585 (r'strcat\(', "don't use strcat"),
581 (r'strcat\(', "don't use strcat"),
586 # rules depending on implementation of repquote()
582 # rules depending on implementation of repquote()
587 ],
583 ],
588 # warnings
584 # warnings
589 [
585 [
590 # rules depending on implementation of repquote()
586 # rules depending on implementation of repquote()
591 ],
587 ],
592 ]
588 ]
593
589
594 cfilters = [
590 cfilters = [
595 (r'(/\*)(((\*(?!/))|[^*])*)\*/', repccomment),
591 (r'(/\*)(((\*(?!/))|[^*])*)\*/', repccomment),
596 (r'''(?P<quote>(?<!")")(?P<text>([^"]|\\")+)"(?!")''', repquote),
592 (r'''(?P<quote>(?<!")")(?P<text>([^"]|\\")+)"(?!")''', repquote),
597 (r'''(#\s*include\s+<)([^>]+)>''', repinclude),
593 (r'''(#\s*include\s+<)([^>]+)>''', repinclude),
598 (r'(\()([^)]+\))', repcallspaces),
594 (r'(\()([^)]+\))', repcallspaces),
599 ]
595 ]
600
596
601 inutilpats = [
597 inutilpats = [
602 [
598 [
603 (r'\bui\.', "don't use ui in util"),
599 (r'\bui\.', "don't use ui in util"),
604 ],
600 ],
605 # warnings
601 # warnings
606 [],
602 [],
607 ]
603 ]
608
604
609 inrevlogpats = [
605 inrevlogpats = [
610 [
606 [
611 (r'\brepo\.', "don't use repo in revlog"),
607 (r'\brepo\.', "don't use repo in revlog"),
612 ],
608 ],
613 # warnings
609 # warnings
614 [],
610 [],
615 ]
611 ]
616
612
617 webtemplatefilters = []
613 webtemplatefilters = []
618
614
619 webtemplatepats = [
615 webtemplatepats = [
620 [],
616 [],
621 [
617 [
622 (
618 (
623 r'{desc(\|(?!websub|firstline)[^\|]*)+}',
619 r'{desc(\|(?!websub|firstline)[^\|]*)+}',
624 'follow desc keyword with either firstline or websub',
620 'follow desc keyword with either firstline or websub',
625 ),
621 ),
626 ],
622 ],
627 ]
623 ]
628
624
629 allfilesfilters = []
625 allfilesfilters = []
630
626
631 allfilespats = [
627 allfilespats = [
632 [
628 [
633 (
629 (
634 r'(http|https)://[a-zA-Z0-9./]*selenic.com/',
630 r'(http|https)://[a-zA-Z0-9./]*selenic.com/',
635 'use mercurial-scm.org domain URL',
631 'use mercurial-scm.org domain URL',
636 ),
632 ),
637 (
633 (
638 r'mercurial@selenic\.com',
634 r'mercurial@selenic\.com',
639 'use mercurial-scm.org domain for mercurial ML address',
635 'use mercurial-scm.org domain for mercurial ML address',
640 ),
636 ),
641 (
637 (
642 r'mercurial-devel@selenic\.com',
638 r'mercurial-devel@selenic\.com',
643 'use mercurial-scm.org domain for mercurial-devel ML address',
639 'use mercurial-scm.org domain for mercurial-devel ML address',
644 ),
640 ),
645 ],
641 ],
646 # warnings
642 # warnings
647 [],
643 [],
648 ]
644 ]
649
645
650 py3pats = [
646 py3pats = [
651 [
647 [
652 (
648 (
653 r'os\.environ',
649 r'os\.environ',
654 "use encoding.environ instead (py3)",
650 "use encoding.environ instead (py3)",
655 r'#.*re-exports',
651 r'#.*re-exports',
656 ),
652 ),
657 (r'os\.name', "use pycompat.osname instead (py3)"),
653 (r'os\.name', "use pycompat.osname instead (py3)"),
658 (r'os\.getcwd', "use encoding.getcwd instead (py3)", r'#.*re-exports'),
654 (r'os\.getcwd', "use encoding.getcwd instead (py3)", r'#.*re-exports'),
659 (r'os\.sep', "use pycompat.ossep instead (py3)"),
655 (r'os\.sep', "use pycompat.ossep instead (py3)"),
660 (r'os\.pathsep', "use pycompat.ospathsep instead (py3)"),
656 (r'os\.pathsep', "use pycompat.ospathsep instead (py3)"),
661 (r'os\.altsep', "use pycompat.osaltsep instead (py3)"),
657 (r'os\.altsep', "use pycompat.osaltsep instead (py3)"),
662 (r'sys\.platform', "use pycompat.sysplatform instead (py3)"),
658 (r'sys\.platform', "use pycompat.sysplatform instead (py3)"),
663 (r'getopt\.getopt', "use pycompat.getoptb instead (py3)"),
659 (r'getopt\.getopt', "use pycompat.getoptb instead (py3)"),
664 (r'os\.getenv', "use encoding.environ.get instead"),
660 (r'os\.getenv', "use encoding.environ.get instead"),
665 (r'os\.setenv', "modifying the environ dict is not preferred"),
661 (r'os\.setenv', "modifying the environ dict is not preferred"),
666 (r'(?<!pycompat\.)xrange', "use pycompat.xrange instead (py3)"),
662 (r'(?<!pycompat\.)xrange', "use pycompat.xrange instead (py3)"),
667 ],
663 ],
668 # warnings
664 # warnings
669 [],
665 [],
670 ]
666 ]
671
667
672 checks = [
668 checks = [
673 ('python', r'.*\.(py|cgi)$', r'^#!.*python', pyfilters, pypats),
669 ('python', r'.*\.(py|cgi)$', r'^#!.*python', pyfilters, pypats),
674 ('python', r'.*\.(py|cgi)$', r'^#!.*python', [], pynfpats),
670 ('python', r'.*\.(py|cgi)$', r'^#!.*python', [], pynfpats),
675 ('python', r'.*hgext.*\.py$', '', [], pyextnfpats),
671 ('python', r'.*hgext.*\.py$', '', [], pyextnfpats),
676 (
672 (
677 'python 3',
673 'python 3',
678 r'.*(hgext|mercurial)/(?!demandimport|policy|pycompat).*\.py',
674 r'.*(hgext|mercurial)/(?!demandimport|policy|pycompat).*\.py',
679 '',
675 '',
680 pyfilters,
676 pyfilters,
681 py3pats,
677 py3pats,
682 ),
678 ),
683 (
679 (
684 'core files',
680 'core files',
685 r'.*(hgext|mercurial)/(?!demandimport|policy|pycompat).*\.py',
681 r'.*(hgext|mercurial)/(?!demandimport|policy|pycompat).*\.py',
686 '',
682 '',
687 pyfilters,
683 pyfilters,
688 core_py_pats,
684 core_py_pats,
689 ),
685 ),
690 ('test script', r'(.*/)?test-[^.~]*$', '', testfilters, testpats),
686 ('test script', r'(.*/)?test-[^.~]*$', '', testfilters, testpats),
691 ('c', r'.*\.[ch]$', '', cfilters, cpats),
687 ('c', r'.*\.[ch]$', '', cfilters, cpats),
692 ('unified test', r'.*\.t$', '', utestfilters, utestpats),
688 ('unified test', r'.*\.t$', '', utestfilters, utestpats),
693 (
689 (
694 'layering violation repo in revlog',
690 'layering violation repo in revlog',
695 r'mercurial/revlog\.py',
691 r'mercurial/revlog\.py',
696 '',
692 '',
697 pyfilters,
693 pyfilters,
698 inrevlogpats,
694 inrevlogpats,
699 ),
695 ),
700 (
696 (
701 'layering violation ui in util',
697 'layering violation ui in util',
702 r'mercurial/util\.py',
698 r'mercurial/util\.py',
703 '',
699 '',
704 pyfilters,
700 pyfilters,
705 inutilpats,
701 inutilpats,
706 ),
702 ),
707 ('txt', r'.*\.txt$', '', txtfilters, txtpats),
703 ('txt', r'.*\.txt$', '', txtfilters, txtpats),
708 (
704 (
709 'web template',
705 'web template',
710 r'mercurial/templates/.*\.tmpl',
706 r'mercurial/templates/.*\.tmpl',
711 '',
707 '',
712 webtemplatefilters,
708 webtemplatefilters,
713 webtemplatepats,
709 webtemplatepats,
714 ),
710 ),
715 ('all except for .po', r'.*(?<!\.po)$', '', allfilesfilters, allfilespats),
711 ('all except for .po', r'.*(?<!\.po)$', '', allfilesfilters, allfilespats),
716 ]
712 ]
717
713
718 # (desc,
714 # (desc,
719 # func to pick up embedded code fragments,
715 # func to pick up embedded code fragments,
720 # list of patterns to convert target files
716 # list of patterns to convert target files
721 # list of patterns to detect errors/warnings)
717 # list of patterns to detect errors/warnings)
722 embeddedchecks = [
718 embeddedchecks = [
723 (
719 (
724 'embedded python',
720 'embedded python',
725 testparseutil.pyembedded,
721 testparseutil.pyembedded,
726 embeddedpyfilters,
722 embeddedpyfilters,
727 embeddedpypats,
723 embeddedpypats,
728 )
724 )
729 ]
725 ]
730
726
731
727
732 def _preparepats():
728 def _preparepats():
733 def preparefailandwarn(failandwarn):
729 def preparefailandwarn(failandwarn):
734 for pats in failandwarn:
730 for pats in failandwarn:
735 for i, pseq in enumerate(pats):
731 for i, pseq in enumerate(pats):
736 # fix-up regexes for multi-line searches
732 # fix-up regexes for multi-line searches
737 p = pseq[0]
733 p = pseq[0]
738 # \s doesn't match \n (done in two steps)
734 # \s doesn't match \n (done in two steps)
739 # first, we replace \s that appears in a set already
735 # first, we replace \s that appears in a set already
740 p = re.sub(r'\[\\s', r'[ \\t', p)
736 p = re.sub(r'\[\\s', r'[ \\t', p)
741 # now we replace other \s instances.
737 # now we replace other \s instances.
742 p = re.sub(r'(?<!(\\|\[))\\s', r'[ \\t]', p)
738 p = re.sub(r'(?<!(\\|\[))\\s', r'[ \\t]', p)
743 # [^...] doesn't match newline
739 # [^...] doesn't match newline
744 p = re.sub(r'(?<!\\)\[\^', r'[^\\n', p)
740 p = re.sub(r'(?<!\\)\[\^', r'[^\\n', p)
745
741
746 pats[i] = (re.compile(p, re.MULTILINE),) + pseq[1:]
742 pats[i] = (re.compile(p, re.MULTILINE),) + pseq[1:]
747
743
748 def preparefilters(filters):
744 def preparefilters(filters):
749 for i, flt in enumerate(filters):
745 for i, flt in enumerate(filters):
750 filters[i] = re.compile(flt[0]), flt[1]
746 filters[i] = re.compile(flt[0]), flt[1]
751
747
752 for cs in (checks, embeddedchecks):
748 for cs in (checks, embeddedchecks):
753 for c in cs:
749 for c in cs:
754 failandwarn = c[-1]
750 failandwarn = c[-1]
755 preparefailandwarn(failandwarn)
751 preparefailandwarn(failandwarn)
756
752
757 filters = c[-2]
753 filters = c[-2]
758 preparefilters(filters)
754 preparefilters(filters)
759
755
760
756
761 class norepeatlogger:
757 class norepeatlogger:
762 def __init__(self):
758 def __init__(self):
763 self._lastseen = None
759 self._lastseen = None
764
760
765 def log(self, fname, lineno, line, msg, blame):
761 def log(self, fname, lineno, line, msg, blame):
766 """print error related a to given line of a given file.
762 """print error related a to given line of a given file.
767
763
768 The faulty line will also be printed but only once in the case
764 The faulty line will also be printed but only once in the case
769 of multiple errors.
765 of multiple errors.
770
766
771 :fname: filename
767 :fname: filename
772 :lineno: line number
768 :lineno: line number
773 :line: actual content of the line
769 :line: actual content of the line
774 :msg: error message
770 :msg: error message
775 """
771 """
776 msgid = fname, lineno, line
772 msgid = fname, lineno, line
777 if msgid != self._lastseen:
773 if msgid != self._lastseen:
778 if blame:
774 if blame:
779 print("%s:%d (%s):" % (fname, lineno, blame))
775 print("%s:%d (%s):" % (fname, lineno, blame))
780 else:
776 else:
781 print("%s:%d:" % (fname, lineno))
777 print("%s:%d:" % (fname, lineno))
782 print(" > %s" % line)
778 print(" > %s" % line)
783 self._lastseen = msgid
779 self._lastseen = msgid
784 print(" " + msg)
780 print(" " + msg)
785
781
786
782
787 _defaultlogger = norepeatlogger()
783 _defaultlogger = norepeatlogger()
788
784
789
785
790 def getblame(f):
786 def getblame(f):
791 lines = []
787 lines = []
792 for l in os.popen('hg annotate -un %s' % f):
788 for l in os.popen('hg annotate -un %s' % f):
793 start, line = l.split(':', 1)
789 start, line = l.split(':', 1)
794 user, rev = start.split()
790 user, rev = start.split()
795 lines.append((line[1:-1], user, rev))
791 lines.append((line[1:-1], user, rev))
796 return lines
792 return lines
797
793
798
794
799 def checkfile(
795 def checkfile(
800 f,
796 f,
801 logfunc=_defaultlogger.log,
797 logfunc=_defaultlogger.log,
802 maxerr=None,
798 maxerr=None,
803 warnings=False,
799 warnings=False,
804 blame=False,
800 blame=False,
805 debug=False,
801 debug=False,
806 lineno=True,
802 lineno=True,
807 ):
803 ):
808 """checks style and portability of a given file
804 """checks style and portability of a given file
809
805
810 :f: filepath
806 :f: filepath
811 :logfunc: function used to report error
807 :logfunc: function used to report error
812 logfunc(filename, linenumber, linecontent, errormessage)
808 logfunc(filename, linenumber, linecontent, errormessage)
813 :maxerr: number of error to display before aborting.
809 :maxerr: number of error to display before aborting.
814 Set to false (default) to report all errors
810 Set to false (default) to report all errors
815
811
816 return True if no error is found, False otherwise.
812 return True if no error is found, False otherwise.
817 """
813 """
818 result = True
814 result = True
819
815
820 try:
816 try:
821 with opentext(f) as fp:
817 with opentext(f) as fp:
822 try:
818 try:
823 pre = fp.read()
819 pre = fp.read()
824 except UnicodeDecodeError as e:
820 except UnicodeDecodeError as e:
825 print("%s while reading %s" % (e, f))
821 print("%s while reading %s" % (e, f))
826 return result
822 return result
827 except IOError as e:
823 except IOError as e:
828 print("Skipping %s, %s" % (f, str(e).split(':', 1)[0]))
824 print("Skipping %s, %s" % (f, str(e).split(':', 1)[0]))
829 return result
825 return result
830
826
831 # context information shared while single checkfile() invocation
827 # context information shared while single checkfile() invocation
832 context = {'blamecache': None}
828 context = {'blamecache': None}
833
829
834 for name, match, magic, filters, pats in checks:
830 for name, match, magic, filters, pats in checks:
835 if debug:
831 if debug:
836 print(name, f)
832 print(name, f)
837 if not (re.match(match, f) or (magic and re.search(magic, pre))):
833 if not (re.match(match, f) or (magic and re.search(magic, pre))):
838 if debug:
834 if debug:
839 print(
835 print(
840 "Skipping %s for %s it doesn't match %s" % (name, match, f)
836 "Skipping %s for %s it doesn't match %s" % (name, match, f)
841 )
837 )
842 continue
838 continue
843 if "no-" "check-code" in pre:
839 if "no-" "check-code" in pre:
844 # If you're looking at this line, it's because a file has:
840 # If you're looking at this line, it's because a file has:
845 # no- check- code
841 # no- check- code
846 # but the reason to output skipping is to make life for
842 # but the reason to output skipping is to make life for
847 # tests easier. So, instead of writing it with a normal
843 # tests easier. So, instead of writing it with a normal
848 # spelling, we write it with the expected spelling from
844 # spelling, we write it with the expected spelling from
849 # tests/test-check-code.t
845 # tests/test-check-code.t
850 print("Skipping %s it has no-che?k-code (glob)" % f)
846 print("Skipping %s it has no-che?k-code (glob)" % f)
851 return "Skip" # skip checking this file
847 return "Skip" # skip checking this file
852
848
853 fc = _checkfiledata(
849 fc = _checkfiledata(
854 name,
850 name,
855 f,
851 f,
856 pre,
852 pre,
857 filters,
853 filters,
858 pats,
854 pats,
859 context,
855 context,
860 logfunc,
856 logfunc,
861 maxerr,
857 maxerr,
862 warnings,
858 warnings,
863 blame,
859 blame,
864 debug,
860 debug,
865 lineno,
861 lineno,
866 )
862 )
867 if fc:
863 if fc:
868 result = False
864 result = False
869
865
870 if f.endswith('.t') and "no-" "check-code" not in pre:
866 if f.endswith('.t') and "no-" "check-code" not in pre:
871 if debug:
867 if debug:
872 print("Checking embedded code in %s" % f)
868 print("Checking embedded code in %s" % f)
873
869
874 prelines = pre.splitlines()
870 prelines = pre.splitlines()
875 embeddederros = []
871 embeddederros = []
876 for name, embedded, filters, pats in embeddedchecks:
872 for name, embedded, filters, pats in embeddedchecks:
877 # "reset curmax at each repetition" treats maxerr as "max
873 # "reset curmax at each repetition" treats maxerr as "max
878 # nubmer of errors in an actual file per entry of
874 # nubmer of errors in an actual file per entry of
879 # (embedded)checks"
875 # (embedded)checks"
880 curmaxerr = maxerr
876 curmaxerr = maxerr
881
877
882 for found in embedded(f, prelines, embeddederros):
878 for found in embedded(f, prelines, embeddederros):
883 filename, starts, ends, code = found
879 filename, starts, ends, code = found
884 fc = _checkfiledata(
880 fc = _checkfiledata(
885 name,
881 name,
886 f,
882 f,
887 code,
883 code,
888 filters,
884 filters,
889 pats,
885 pats,
890 context,
886 context,
891 logfunc,
887 logfunc,
892 curmaxerr,
888 curmaxerr,
893 warnings,
889 warnings,
894 blame,
890 blame,
895 debug,
891 debug,
896 lineno,
892 lineno,
897 offset=starts - 1,
893 offset=starts - 1,
898 )
894 )
899 if fc:
895 if fc:
900 result = False
896 result = False
901 if curmaxerr:
897 if curmaxerr:
902 if fc >= curmaxerr:
898 if fc >= curmaxerr:
903 break
899 break
904 curmaxerr -= fc
900 curmaxerr -= fc
905
901
906 return result
902 return result
907
903
908
904
909 def _checkfiledata(
905 def _checkfiledata(
910 name,
906 name,
911 f,
907 f,
912 filedata,
908 filedata,
913 filters,
909 filters,
914 pats,
910 pats,
915 context,
911 context,
916 logfunc,
912 logfunc,
917 maxerr,
913 maxerr,
918 warnings,
914 warnings,
919 blame,
915 blame,
920 debug,
916 debug,
921 lineno,
917 lineno,
922 offset=None,
918 offset=None,
923 ):
919 ):
924 """Execute actual error check for file data
920 """Execute actual error check for file data
925
921
926 :name: of the checking category
922 :name: of the checking category
927 :f: filepath
923 :f: filepath
928 :filedata: content of a file
924 :filedata: content of a file
929 :filters: to be applied before checking
925 :filters: to be applied before checking
930 :pats: to detect errors
926 :pats: to detect errors
931 :context: a dict of information shared while single checkfile() invocation
927 :context: a dict of information shared while single checkfile() invocation
932 Valid keys: 'blamecache'.
928 Valid keys: 'blamecache'.
933 :logfunc: function used to report error
929 :logfunc: function used to report error
934 logfunc(filename, linenumber, linecontent, errormessage)
930 logfunc(filename, linenumber, linecontent, errormessage)
935 :maxerr: number of error to display before aborting, or False to
931 :maxerr: number of error to display before aborting, or False to
936 report all errors
932 report all errors
937 :warnings: whether warning level checks should be applied
933 :warnings: whether warning level checks should be applied
938 :blame: whether blame information should be displayed at error reporting
934 :blame: whether blame information should be displayed at error reporting
939 :debug: whether debug information should be displayed
935 :debug: whether debug information should be displayed
940 :lineno: whether lineno should be displayed at error reporting
936 :lineno: whether lineno should be displayed at error reporting
941 :offset: line number offset of 'filedata' in 'f' for checking
937 :offset: line number offset of 'filedata' in 'f' for checking
942 an embedded code fragment, or None (offset=0 is different
938 an embedded code fragment, or None (offset=0 is different
943 from offset=None)
939 from offset=None)
944
940
945 returns number of detected errors.
941 returns number of detected errors.
946 """
942 """
947 blamecache = context['blamecache']
943 blamecache = context['blamecache']
948 if offset is None:
944 if offset is None:
949 lineoffset = 0
945 lineoffset = 0
950 else:
946 else:
951 lineoffset = offset
947 lineoffset = offset
952
948
953 fc = 0
949 fc = 0
954 pre = post = filedata
950 pre = post = filedata
955
951
956 if True: # TODO: get rid of this redundant 'if' block
952 if True: # TODO: get rid of this redundant 'if' block
957 for p, r in filters:
953 for p, r in filters:
958 post = re.sub(p, r, post)
954 post = re.sub(p, r, post)
959 nerrs = len(pats[0]) # nerr elements are errors
955 nerrs = len(pats[0]) # nerr elements are errors
960 if warnings:
956 if warnings:
961 pats = pats[0] + pats[1]
957 pats = pats[0] + pats[1]
962 else:
958 else:
963 pats = pats[0]
959 pats = pats[0]
964 # print post # uncomment to show filtered version
960 # print post # uncomment to show filtered version
965
961
966 if debug:
962 if debug:
967 print("Checking %s for %s" % (name, f))
963 print("Checking %s for %s" % (name, f))
968
964
969 prelines = None
965 prelines = None
970 errors = []
966 errors = []
971 for i, pat in enumerate(pats):
967 for i, pat in enumerate(pats):
972 if len(pat) == 3:
968 if len(pat) == 3:
973 p, msg, ignore = pat
969 p, msg, ignore = pat
974 else:
970 else:
975 p, msg = pat
971 p, msg = pat
976 ignore = None
972 ignore = None
977 if i >= nerrs:
973 if i >= nerrs:
978 msg = "warning: " + msg
974 msg = "warning: " + msg
979
975
980 pos = 0
976 pos = 0
981 n = 0
977 n = 0
982 for m in p.finditer(post):
978 for m in p.finditer(post):
983 if prelines is None:
979 if prelines is None:
984 prelines = pre.splitlines()
980 prelines = pre.splitlines()
985 postlines = post.splitlines(True)
981 postlines = post.splitlines(True)
986
982
987 start = m.start()
983 start = m.start()
988 while n < len(postlines):
984 while n < len(postlines):
989 step = len(postlines[n])
985 step = len(postlines[n])
990 if pos + step > start:
986 if pos + step > start:
991 break
987 break
992 pos += step
988 pos += step
993 n += 1
989 n += 1
994 l = prelines[n]
990 l = prelines[n]
995
991
996 if ignore and re.search(ignore, l, re.MULTILINE):
992 if ignore and re.search(ignore, l, re.MULTILINE):
997 if debug:
993 if debug:
998 print(
994 print(
999 "Skipping %s for %s:%s (ignore pattern)"
995 "Skipping %s for %s:%s (ignore pattern)"
1000 % (name, f, (n + lineoffset))
996 % (name, f, (n + lineoffset))
1001 )
997 )
1002 continue
998 continue
1003 bd = ""
999 bd = ""
1004 if blame:
1000 if blame:
1005 bd = 'working directory'
1001 bd = 'working directory'
1006 if blamecache is None:
1002 if blamecache is None:
1007 blamecache = getblame(f)
1003 blamecache = getblame(f)
1008 context['blamecache'] = blamecache
1004 context['blamecache'] = blamecache
1009 if (n + lineoffset) < len(blamecache):
1005 if (n + lineoffset) < len(blamecache):
1010 bl, bu, br = blamecache[(n + lineoffset)]
1006 bl, bu, br = blamecache[(n + lineoffset)]
1011 if offset is None and bl == l:
1007 if offset is None and bl == l:
1012 bd = '%s@%s' % (bu, br)
1008 bd = '%s@%s' % (bu, br)
1013 elif offset is not None and bl.endswith(l):
1009 elif offset is not None and bl.endswith(l):
1014 # "offset is not None" means "checking
1010 # "offset is not None" means "checking
1015 # embedded code fragment". In this case,
1011 # embedded code fragment". In this case,
1016 # "l" does not have information about the
1012 # "l" does not have information about the
1017 # beginning of an *original* line in the
1013 # beginning of an *original* line in the
1018 # file (e.g. ' > ').
1014 # file (e.g. ' > ').
1019 # Therefore, use "str.endswith()", and
1015 # Therefore, use "str.endswith()", and
1020 # show "maybe" for a little loose
1016 # show "maybe" for a little loose
1021 # examination.
1017 # examination.
1022 bd = '%s@%s, maybe' % (bu, br)
1018 bd = '%s@%s, maybe' % (bu, br)
1023
1019
1024 errors.append((f, lineno and (n + lineoffset + 1), l, msg, bd))
1020 errors.append((f, lineno and (n + lineoffset + 1), l, msg, bd))
1025
1021
1026 errors.sort()
1022 errors.sort()
1027 for e in errors:
1023 for e in errors:
1028 logfunc(*e)
1024 logfunc(*e)
1029 fc += 1
1025 fc += 1
1030 if maxerr and fc >= maxerr:
1026 if maxerr and fc >= maxerr:
1031 print(" (too many errors, giving up)")
1027 print(" (too many errors, giving up)")
1032 break
1028 break
1033
1029
1034 return fc
1030 return fc
1035
1031
1036
1032
1037 def main():
1033 def main():
1038 parser = optparse.OptionParser("%prog [options] [files | -]")
1034 parser = optparse.OptionParser("%prog [options] [files | -]")
1039 parser.add_option(
1035 parser.add_option(
1040 "-w",
1036 "-w",
1041 "--warnings",
1037 "--warnings",
1042 action="store_true",
1038 action="store_true",
1043 help="include warning-level checks",
1039 help="include warning-level checks",
1044 )
1040 )
1045 parser.add_option(
1041 parser.add_option(
1046 "-p", "--per-file", type="int", help="max warnings per file"
1042 "-p", "--per-file", type="int", help="max warnings per file"
1047 )
1043 )
1048 parser.add_option(
1044 parser.add_option(
1049 "-b",
1045 "-b",
1050 "--blame",
1046 "--blame",
1051 action="store_true",
1047 action="store_true",
1052 help="use annotate to generate blame info",
1048 help="use annotate to generate blame info",
1053 )
1049 )
1054 parser.add_option(
1050 parser.add_option(
1055 "", "--debug", action="store_true", help="show debug information"
1051 "", "--debug", action="store_true", help="show debug information"
1056 )
1052 )
1057 parser.add_option(
1053 parser.add_option(
1058 "",
1054 "",
1059 "--nolineno",
1055 "--nolineno",
1060 action="store_false",
1056 action="store_false",
1061 dest='lineno',
1057 dest='lineno',
1062 help="don't show line numbers",
1058 help="don't show line numbers",
1063 )
1059 )
1064
1060
1065 parser.set_defaults(
1061 parser.set_defaults(
1066 per_file=15, warnings=False, blame=False, debug=False, lineno=True
1062 per_file=15, warnings=False, blame=False, debug=False, lineno=True
1067 )
1063 )
1068 (options, args) = parser.parse_args()
1064 (options, args) = parser.parse_args()
1069
1065
1070 if len(args) == 0:
1066 if len(args) == 0:
1071 check = glob.glob("*")
1067 check = glob.glob("*")
1072 elif args == ['-']:
1068 elif args == ['-']:
1073 # read file list from stdin
1069 # read file list from stdin
1074 check = sys.stdin.read().splitlines()
1070 check = sys.stdin.read().splitlines()
1075 else:
1071 else:
1076 check = args
1072 check = args
1077
1073
1078 _preparepats()
1074 _preparepats()
1079
1075
1080 ret = 0
1076 ret = 0
1081 for f in check:
1077 for f in check:
1082 if not checkfile(
1078 if not checkfile(
1083 f,
1079 f,
1084 maxerr=options.per_file,
1080 maxerr=options.per_file,
1085 warnings=options.warnings,
1081 warnings=options.warnings,
1086 blame=options.blame,
1082 blame=options.blame,
1087 debug=options.debug,
1083 debug=options.debug,
1088 lineno=options.lineno,
1084 lineno=options.lineno,
1089 ):
1085 ):
1090 ret = 1
1086 ret = 1
1091 return ret
1087 return ret
1092
1088
1093
1089
1094 if __name__ == "__main__":
1090 if __name__ == "__main__":
1095 sys.exit(main())
1091 sys.exit(main())
@@ -1,72 +1,133
1 #!/bin/sh
1 #!/bin/sh
2
2
3 set -e
3 set -e
4 set -u
4 set -u
5
5
6 cd `hg root`
6 cd `hg root`
7
7
8 # Many of the individual files that are excluded here confuse pytype
8 # Many of the individual files that are excluded here confuse pytype
9 # because they do a mix of Python 2 and Python 3 things
9 # because they do a mix of Python 2 and Python 3 things
10 # conditionally. There's no good way to help it out with that as far as
10 # conditionally. There's no good way to help it out with that as far as
11 # I can tell, so let's just hide those files from it for now. We should
11 # I can tell, so let's just hide those files from it for now. We should
12 # endeavor to empty this list out over time, as some of these are
12 # endeavor to empty this list out over time, as some of these are
13 # probably hiding real problems.
13 # probably hiding real problems.
14 #
14 #
15 # hgext/absorb.py # [attribute-error]
16 # hgext/bugzilla.py # [pyi-error], [attribute-error]
17 # hgext/convert/bzr.py # [attribute-error]
18 # hgext/convert/cvs.py # [attribute-error], [wrong-arg-types]
19 # hgext/convert/cvsps.py # [attribute-error]
20 # hgext/convert/p4.py # [wrong-arg-types] (__file: mercurial.utils.procutil._pfile -> IO)
21 # hgext/convert/subversion.py # [attribute-error], [name-error], [pyi-error]
22 # hgext/fastannotate/context.py # no linelog.copyfrom()
23 # hgext/fastannotate/formatter.py # [unsupported-operands]
24 # hgext/fsmonitor/__init__.py # [name-error]
25 # hgext/git/__init__.py # [attribute-error]
26 # hgext/githelp.py # [attribute-error] [wrong-arg-types]
27 # hgext/hgk.py # [attribute-error]
28 # hgext/histedit.py # [attribute-error], [wrong-arg-types]
29 # hgext/infinitepush # using bytes for str literal; scheduled for removal
30 # hgext/keyword.py # [attribute-error]
31 # hgext/largefiles/storefactory.py # [attribute-error]
32 # hgext/lfs/__init__.py # [attribute-error]
33 # hgext/narrow/narrowbundle2.py # [attribute-error]
34 # hgext/narrow/narrowcommands.py # [attribute-error], [name-error]
35 # hgext/rebase.py # [attribute-error]
36 # hgext/remotefilelog/basepack.py # [attribute-error], [wrong-arg-count]
37 # hgext/remotefilelog/basestore.py # [attribute-error]
38 # hgext/remotefilelog/contentstore.py # [missing-parameter], [wrong-keyword-args], [attribute-error]
39 # hgext/remotefilelog/fileserverclient.py # [attribute-error]
40 # hgext/remotefilelog/shallowbundle.py # [attribute-error]
41 # hgext/remotefilelog/remotefilectx.py # [module-attr] (This is an actual bug)
42 # hgext/sqlitestore.py # [attribute-error]
43 # hgext/zeroconf/__init__.py # bytes vs str; tests fail on macOS
44 #
15 # mercurial/bundlerepo.py # no vfs and ui attrs on bundlerepo
45 # mercurial/bundlerepo.py # no vfs and ui attrs on bundlerepo
16 # mercurial/context.py # many [attribute-error]
46 # mercurial/context.py # many [attribute-error]
17 # mercurial/crecord.py # tons of [attribute-error], [module-attr]
47 # mercurial/crecord.py # tons of [attribute-error], [module-attr]
18 # mercurial/debugcommands.py # [wrong-arg-types]
48 # mercurial/debugcommands.py # [wrong-arg-types]
19 # mercurial/dispatch.py # initstdio: No attribute ... on TextIO [attribute-error]
49 # mercurial/dispatch.py # initstdio: No attribute ... on TextIO [attribute-error]
20 # mercurial/exchange.py # [attribute-error]
50 # mercurial/exchange.py # [attribute-error]
21 # mercurial/hgweb/hgweb_mod.py # [attribute-error], [name-error], [wrong-arg-types]
51 # mercurial/hgweb/hgweb_mod.py # [attribute-error], [name-error], [wrong-arg-types]
22 # mercurial/hgweb/server.py # [attribute-error], [name-error], [module-attr]
52 # mercurial/hgweb/server.py # [attribute-error], [name-error], [module-attr]
23 # mercurial/hgweb/wsgicgi.py # confused values in os.environ
53 # mercurial/hgweb/wsgicgi.py # confused values in os.environ
24 # mercurial/httppeer.py # [attribute-error], [wrong-arg-types]
54 # mercurial/httppeer.py # [attribute-error], [wrong-arg-types]
25 # mercurial/interfaces # No attribute 'capabilities' on peer [attribute-error]
55 # mercurial/interfaces # No attribute 'capabilities' on peer [attribute-error]
26 # mercurial/keepalive.py # [attribute-error]
56 # mercurial/keepalive.py # [attribute-error]
27 # mercurial/localrepo.py # [attribute-error]
57 # mercurial/localrepo.py # [attribute-error]
28 # mercurial/manifest.py # [unsupported-operands], [wrong-arg-types]
58 # mercurial/manifest.py # [unsupported-operands], [wrong-arg-types]
29 # mercurial/minirst.py # [unsupported-operands], [attribute-error]
59 # mercurial/minirst.py # [unsupported-operands], [attribute-error]
30 # mercurial/pure/osutil.py # [invalid-typevar], [not-callable]
60 # mercurial/pure/osutil.py # [invalid-typevar], [not-callable]
31 # mercurial/pure/parsers.py # [attribute-error]
61 # mercurial/pure/parsers.py # [attribute-error]
32 # mercurial/repoview.py # [attribute-error]
62 # mercurial/repoview.py # [attribute-error]
33 # mercurial/testing/storage.py # tons of [attribute-error]
63 # mercurial/testing/storage.py # tons of [attribute-error]
34 # mercurial/ui.py # [attribute-error], [wrong-arg-types]
35 # mercurial/unionrepo.py # ui, svfs, unfiltered [attribute-error]
64 # mercurial/unionrepo.py # ui, svfs, unfiltered [attribute-error]
36 # mercurial/win32.py # [not-callable]
65 # mercurial/win32.py # [not-callable]
37 # mercurial/wireprotoframing.py # [unsupported-operands], [attribute-error], [import-error]
66 # mercurial/wireprotoframing.py # [unsupported-operands], [attribute-error], [import-error]
38 # mercurial/wireprotov1peer.py # [attribute-error]
67 # mercurial/wireprotov1peer.py # [attribute-error]
39 # mercurial/wireprotov1server.py # BUG?: BundleValueError handler accesses subclass's attrs
68 # mercurial/wireprotov1server.py # BUG?: BundleValueError handler accesses subclass's attrs
40
69
41 # TODO: use --no-cache on test server? Caching the files locally helps during
70 # TODO: use --no-cache on test server? Caching the files locally helps during
42 # development, but may be a hinderance for CI testing.
71 # development, but may be a hinderance for CI testing.
43
72
44 # TODO: include hgext and hgext3rd
73 # TODO: include hgext and hgext3rd
45
74
46 pytype -V 3.7 --keep-going --jobs auto mercurial \
75 pytype -V 3.7 --keep-going --jobs auto \
76 doc/check-seclevel.py hgdemandimport hgext mercurial \
77 -x hgext/absorb.py \
78 -x hgext/bugzilla.py \
79 -x hgext/convert/bzr.py \
80 -x hgext/convert/cvs.py \
81 -x hgext/convert/cvsps.py \
82 -x hgext/convert/p4.py \
83 -x hgext/convert/subversion.py \
84 -x hgext/fastannotate/context.py \
85 -x hgext/fastannotate/formatter.py \
86 -x hgext/fsmonitor/__init__.py \
87 -x hgext/git/__init__.py \
88 -x hgext/githelp.py \
89 -x hgext/hgk.py \
90 -x hgext/histedit.py \
91 -x hgext/infinitepush \
92 -x hgext/keyword.py \
93 -x hgext/largefiles/storefactory.py \
94 -x hgext/lfs/__init__.py \
95 -x hgext/narrow/narrowbundle2.py \
96 -x hgext/narrow/narrowcommands.py \
97 -x hgext/rebase.py \
98 -x hgext/remotefilelog/basepack.py \
99 -x hgext/remotefilelog/basestore.py \
100 -x hgext/remotefilelog/contentstore.py \
101 -x hgext/remotefilelog/fileserverclient.py \
102 -x hgext/remotefilelog/remotefilectx.py \
103 -x hgext/remotefilelog/shallowbundle.py \
104 -x hgext/sqlitestore.py \
105 -x hgext/zeroconf/__init__.py \
47 -x mercurial/bundlerepo.py \
106 -x mercurial/bundlerepo.py \
48 -x mercurial/context.py \
107 -x mercurial/context.py \
49 -x mercurial/crecord.py \
108 -x mercurial/crecord.py \
50 -x mercurial/debugcommands.py \
109 -x mercurial/debugcommands.py \
51 -x mercurial/dispatch.py \
110 -x mercurial/dispatch.py \
52 -x mercurial/exchange.py \
111 -x mercurial/exchange.py \
53 -x mercurial/hgweb/hgweb_mod.py \
112 -x mercurial/hgweb/hgweb_mod.py \
54 -x mercurial/hgweb/server.py \
113 -x mercurial/hgweb/server.py \
55 -x mercurial/hgweb/wsgicgi.py \
114 -x mercurial/hgweb/wsgicgi.py \
56 -x mercurial/httppeer.py \
115 -x mercurial/httppeer.py \
57 -x mercurial/interfaces \
116 -x mercurial/interfaces \
58 -x mercurial/keepalive.py \
117 -x mercurial/keepalive.py \
59 -x mercurial/localrepo.py \
118 -x mercurial/localrepo.py \
60 -x mercurial/manifest.py \
119 -x mercurial/manifest.py \
61 -x mercurial/minirst.py \
120 -x mercurial/minirst.py \
62 -x mercurial/pure/osutil.py \
121 -x mercurial/pure/osutil.py \
63 -x mercurial/pure/parsers.py \
122 -x mercurial/pure/parsers.py \
64 -x mercurial/repoview.py \
123 -x mercurial/repoview.py \
65 -x mercurial/testing/storage.py \
124 -x mercurial/testing/storage.py \
66 -x mercurial/thirdparty \
125 -x mercurial/thirdparty \
67 -x mercurial/ui.py \
68 -x mercurial/unionrepo.py \
126 -x mercurial/unionrepo.py \
69 -x mercurial/win32.py \
127 -x mercurial/win32.py \
70 -x mercurial/wireprotoframing.py \
128 -x mercurial/wireprotoframing.py \
71 -x mercurial/wireprotov1peer.py \
129 -x mercurial/wireprotov1peer.py \
72 -x mercurial/wireprotov1server.py
130 -x mercurial/wireprotov1server.py
131
132 echo 'pytype crashed while generating the following type stubs:'
133 find .pytype/pyi -name '*.pyi' | xargs grep -l '# Caught error' | sort
@@ -1,61 +1,61
1 #include <Python.h>
1 #include <Python.h>
2 #include <assert.h>
2 #include <assert.h>
3 #include <stdlib.h>
3 #include <stdlib.h>
4 #include <unistd.h>
4 #include <unistd.h>
5
5
6 #include <string>
6 #include <string>
7
7
8 #include "pyutil.h"
8 #include "pyutil.h"
9
9
10 extern "C" {
10 extern "C" {
11
11
12 static PYCODETYPE *code;
12 static PYCODETYPE *code;
13
13
14 extern "C" int LLVMFuzzerInitialize(int *argc, char ***argv)
14 extern "C" int LLVMFuzzerInitialize(int *argc, char ***argv)
15 {
15 {
16 contrib::initpy(*argv[0]);
16 contrib::initpy(*argv[0]);
17 code = (PYCODETYPE *)Py_CompileString(R"py(
17 code = (PYCODETYPE *)Py_CompileString(R"py(
18 for inline in (True, False):
18 for inline in (True, False):
19 try:
19 try:
20 index, cache = parsers.parse_index2(data, inline)
20 index, cache = parsers.parse_index2(data, inline)
21 index.slicechunktodensity(list(range(len(index))), 0.5, 262144)
21 index.slicechunktodensity(list(range(len(index))), 0.5, 262144)
22 index.stats()
22 index.stats()
23 index.findsnapshots({}, 0)
23 index.findsnapshots({}, 0, len(index) - 1)
24 10 in index
24 10 in index
25 for rev in range(len(index)):
25 for rev in range(len(index)):
26 index.reachableroots(0, [len(index)-1], [rev])
26 index.reachableroots(0, [len(index)-1], [rev])
27 node = index[rev][7]
27 node = index[rev][7]
28 partial = index.shortest(node)
28 partial = index.shortest(node)
29 index.partialmatch(node[:partial])
29 index.partialmatch(node[:partial])
30 index.deltachain(rev, None, True)
30 index.deltachain(rev, None, True)
31 except Exception as e:
31 except Exception as e:
32 pass
32 pass
33 # uncomment this print if you're editing this Python code
33 # uncomment this print if you're editing this Python code
34 # to debug failures.
34 # to debug failures.
35 # print e
35 # print e
36 )py",
36 )py",
37 "fuzzer", Py_file_input);
37 "fuzzer", Py_file_input);
38 return 0;
38 return 0;
39 }
39 }
40
40
41 int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size)
41 int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size)
42 {
42 {
43 // Don't allow fuzzer inputs larger than 60k, since we'll just bog
43 // Don't allow fuzzer inputs larger than 60k, since we'll just bog
44 // down and not accomplish much.
44 // down and not accomplish much.
45 if (Size > 60000) {
45 if (Size > 60000) {
46 return 0;
46 return 0;
47 }
47 }
48 PyObject *text =
48 PyObject *text =
49 PyBytes_FromStringAndSize((const char *)Data, (Py_ssize_t)Size);
49 PyBytes_FromStringAndSize((const char *)Data, (Py_ssize_t)Size);
50 PyObject *locals = PyDict_New();
50 PyObject *locals = PyDict_New();
51 PyDict_SetItemString(locals, "data", text);
51 PyDict_SetItemString(locals, "data", text);
52 PyObject *res = PyEval_EvalCode(code, contrib::pyglobals(), locals);
52 PyObject *res = PyEval_EvalCode(code, contrib::pyglobals(), locals);
53 if (!res) {
53 if (!res) {
54 PyErr_Print();
54 PyErr_Print();
55 }
55 }
56 Py_XDECREF(res);
56 Py_XDECREF(res);
57 Py_DECREF(locals);
57 Py_DECREF(locals);
58 Py_DECREF(text);
58 Py_DECREF(text);
59 return 0; // Non-zero return values are reserved for future use.
59 return 0; // Non-zero return values are reserved for future use.
60 }
60 }
61 }
61 }
@@ -1,141 +1,143
1 stages:
1 stages:
2 - tests
2 - tests
3
3
4 image: registry.heptapod.net/mercurial/ci-images/mercurial-core:$HG_CI_IMAGE_TAG
4 image: registry.heptapod.net/mercurial/ci-images/mercurial-core:$HG_CI_IMAGE_TAG
5
5
6 variables:
6 variables:
7 PYTHON: python
7 PYTHON: python
8 TEST_HGMODULEPOLICY: "allow"
8 TEST_HGMODULEPOLICY: "allow"
9 HG_CI_IMAGE_TAG: "v1.0"
9 HG_CI_IMAGE_TAG: "v1.0"
10 TEST_HGTESTS_ALLOW_NETIO: "0"
10 TEST_HGTESTS_ALLOW_NETIO: "0"
11
11
12 .all_template: &all
12 .all_template: &all
13 when: on_success
13 when: on_success
14
14
15 .runtests_template: &runtests
15 .runtests_template: &runtests
16 <<: *all
16 <<: *all
17 stage: tests
17 stage: tests
18 # The runner made a clone as root.
18 # The runner made a clone as root.
19 # We make a new clone owned by user used to run the step.
19 # We make a new clone owned by user used to run the step.
20 before_script:
20 before_script:
21 - hg clone . /tmp/mercurial-ci/ --noupdate --config phases.publish=no
21 - hg clone . /tmp/mercurial-ci/ --noupdate --config phases.publish=no
22 - hg -R /tmp/mercurial-ci/ update `hg log --rev '.' --template '{node}'`
22 - hg -R /tmp/mercurial-ci/ update `hg log --rev '.' --template '{node}'`
23 - cd /tmp/mercurial-ci/
23 - cd /tmp/mercurial-ci/
24 - ls -1 tests/test-check-*.* > /tmp/check-tests.txt
24 - ls -1 tests/test-check-*.* > /tmp/check-tests.txt
25 - black --version
25 - black --version
26 - clang-format --version
26 - clang-format --version
27 script:
27 script:
28 - echo "python used, $PYTHON"
28 - echo "python used, $PYTHON"
29 - echo "$RUNTEST_ARGS"
29 - echo "$RUNTEST_ARGS"
30 - HGTESTS_ALLOW_NETIO="$TEST_HGTESTS_ALLOW_NETIO" HGMODULEPOLICY="$TEST_HGMODULEPOLICY" "$PYTHON" tests/run-tests.py --color=always $RUNTEST_ARGS
30 - HGTESTS_ALLOW_NETIO="$TEST_HGTESTS_ALLOW_NETIO" HGMODULEPOLICY="$TEST_HGMODULEPOLICY" "$PYTHON" tests/run-tests.py --color=always $RUNTEST_ARGS
31
31
32 checks:
32 checks:
33 <<: *runtests
33 <<: *runtests
34 variables:
34 variables:
35 RUNTEST_ARGS: "--time --test-list /tmp/check-tests.txt"
35 RUNTEST_ARGS: "--time --test-list /tmp/check-tests.txt"
36 PYTHON: python3
36 PYTHON: python3
37 CI_CLEVER_CLOUD_FLAVOR: S
37 CI_CLEVER_CLOUD_FLAVOR: S
38
38
39 rust-cargo-test:
39 rust-cargo-test:
40 <<: *all
40 <<: *all
41 stage: tests
41 stage: tests
42 script:
42 script:
43 - echo "python used, $PYTHON"
43 - echo "python used, $PYTHON"
44 - make rust-tests
44 - make rust-tests
45 - make cargo-clippy
45 variables:
46 variables:
46 PYTHON: python3
47 PYTHON: python3
47 CI_CLEVER_CLOUD_FLAVOR: S
48 CI_CLEVER_CLOUD_FLAVOR: S
48
49
49 test-c:
50 test-c:
50 <<: *runtests
51 <<: *runtests
51 variables:
52 variables:
52 RUNTEST_ARGS: " --no-rust --blacklist /tmp/check-tests.txt"
53 RUNTEST_ARGS: " --no-rust --blacklist /tmp/check-tests.txt"
53 PYTHON: python3
54 PYTHON: python3
54 TEST_HGMODULEPOLICY: "c"
55 TEST_HGMODULEPOLICY: "c"
55 TEST_HGTESTS_ALLOW_NETIO: "1"
56 TEST_HGTESTS_ALLOW_NETIO: "1"
56
57
57 test-pure:
58 test-pure:
58 <<: *runtests
59 <<: *runtests
59 variables:
60 variables:
60 RUNTEST_ARGS: "--pure --blacklist /tmp/check-tests.txt"
61 RUNTEST_ARGS: "--pure --blacklist /tmp/check-tests.txt"
61 PYTHON: python3
62 PYTHON: python3
62 TEST_HGMODULEPOLICY: "py"
63 TEST_HGMODULEPOLICY: "py"
63
64
64 test-rust:
65 test-rust:
65 <<: *runtests
66 <<: *runtests
66 variables:
67 variables:
67 HGWITHRUSTEXT: cpython
68 HGWITHRUSTEXT: cpython
68 RUNTEST_ARGS: "--rust --blacklist /tmp/check-tests.txt"
69 RUNTEST_ARGS: "--rust --blacklist /tmp/check-tests.txt"
69 PYTHON: python3
70 PYTHON: python3
70 TEST_HGMODULEPOLICY: "rust+c"
71 TEST_HGMODULEPOLICY: "rust+c"
71
72
72 test-rhg:
73 test-rhg:
73 <<: *runtests
74 <<: *runtests
74 variables:
75 variables:
75 HGWITHRUSTEXT: cpython
76 HGWITHRUSTEXT: cpython
76 RUNTEST_ARGS: "--rust --rhg --blacklist /tmp/check-tests.txt"
77 RUNTEST_ARGS: "--rust --rhg --blacklist /tmp/check-tests.txt"
77 PYTHON: python3
78 PYTHON: python3
78 TEST_HGMODULEPOLICY: "rust+c"
79 TEST_HGMODULEPOLICY: "rust+c"
79
80
80 test-chg:
81 test-chg:
81 <<: *runtests
82 <<: *runtests
82 variables:
83 variables:
83 PYTHON: python3
84 PYTHON: python3
84 RUNTEST_ARGS: "--blacklist /tmp/check-tests.txt --chg"
85 RUNTEST_ARGS: "--blacklist /tmp/check-tests.txt --chg"
85 TEST_HGMODULEPOLICY: "c"
86 TEST_HGMODULEPOLICY: "c"
86
87
87 check-pytype:
88 check-pytype:
88 extends: .runtests_template
89 extends: .runtests_template
89 before_script:
90 before_script:
90 - hg clone . /tmp/mercurial-ci/ --noupdate --config phases.publish=no
91 - hg clone . /tmp/mercurial-ci/ --noupdate --config phases.publish=no
91 - hg -R /tmp/mercurial-ci/ update `hg log --rev '.' --template '{node}'`
92 - hg -R /tmp/mercurial-ci/ update `hg log --rev '.' --template '{node}'`
92 - cd /tmp/mercurial-ci/
93 - cd /tmp/mercurial-ci/
93 - make local PYTHON=$PYTHON
94 - make local PYTHON=$PYTHON
94 - $PYTHON -m pip install --user -U libcst==0.3.20 pytype==2022.03.29
95 - $PYTHON -m pip install --user -U libcst==0.3.20 pytype==2022.11.18
96 - ./contrib/setup-pytype.sh
95 script:
97 script:
96 - echo "Entering script section"
98 - echo "Entering script section"
97 - sh contrib/check-pytype.sh
99 - sh contrib/check-pytype.sh
98 variables:
100 variables:
99 PYTHON: python3
101 PYTHON: python3
100
102
101 # `sh.exe --login` sets a couple of extra environment variables that are defined
103 # `sh.exe --login` sets a couple of extra environment variables that are defined
102 # in the MinGW shell, but switches CWD to /home/$username. The previous value
104 # in the MinGW shell, but switches CWD to /home/$username. The previous value
103 # is stored in OLDPWD. Of the added variables, MSYSTEM is crucial to running
105 # is stored in OLDPWD. Of the added variables, MSYSTEM is crucial to running
104 # run-tests.py- it is needed to make run-tests.py generate a `python3` script
106 # run-tests.py- it is needed to make run-tests.py generate a `python3` script
105 # that satisfies the various shebang lines and delegates to `py -3`.
107 # that satisfies the various shebang lines and delegates to `py -3`.
106 .window_runtests_template: &windows_runtests
108 .window_runtests_template: &windows_runtests
107 <<: *all
109 <<: *all
108 when: manual # we don't have any Windows runners anymore at the moment
110 when: manual # we don't have any Windows runners anymore at the moment
109 stage: tests
111 stage: tests
110 before_script:
112 before_script:
111 - C:/MinGW/msys/1.0/bin/sh.exe --login -c 'cd "$OLDPWD" && ls -1 tests/test-check-*.* > C:/Temp/check-tests.txt'
113 - C:/MinGW/msys/1.0/bin/sh.exe --login -c 'cd "$OLDPWD" && ls -1 tests/test-check-*.* > C:/Temp/check-tests.txt'
112 # TODO: find/install cvs, bzr, perforce, gpg, sqlite3
114 # TODO: find/install cvs, bzr, perforce, gpg, sqlite3
113
115
114 script:
116 script:
115 - echo "Entering script section"
117 - echo "Entering script section"
116 - echo "python used, $Env:PYTHON"
118 - echo "python used, $Env:PYTHON"
117 - Invoke-Expression "$Env:PYTHON -V"
119 - Invoke-Expression "$Env:PYTHON -V"
118 - Invoke-Expression "$Env:PYTHON -m black --version"
120 - Invoke-Expression "$Env:PYTHON -m black --version"
119 - echo "$Env:RUNTEST_ARGS"
121 - echo "$Env:RUNTEST_ARGS"
120 - echo "$Env:TMP"
122 - echo "$Env:TMP"
121 - echo "$Env:TEMP"
123 - echo "$Env:TEMP"
122
124
123 - C:/MinGW/msys/1.0/bin/sh.exe --login -c 'cd "$OLDPWD" && HGTESTS_ALLOW_NETIO="$TEST_HGTESTS_ALLOW_NETIO" HGMODULEPOLICY="$TEST_HGMODULEPOLICY" $PYTHON tests/run-tests.py --color=always $RUNTEST_ARGS'
125 - C:/MinGW/msys/1.0/bin/sh.exe --login -c 'cd "$OLDPWD" && HGTESTS_ALLOW_NETIO="$TEST_HGTESTS_ALLOW_NETIO" HGMODULEPOLICY="$TEST_HGMODULEPOLICY" $PYTHON tests/run-tests.py --color=always $RUNTEST_ARGS'
124
126
125 windows:
127 windows:
126 <<: *windows_runtests
128 <<: *windows_runtests
127 tags:
129 tags:
128 - windows
130 - windows
129 variables:
131 variables:
130 TEST_HGMODULEPOLICY: "c"
132 TEST_HGMODULEPOLICY: "c"
131 RUNTEST_ARGS: "--blacklist C:/Temp/check-tests.txt"
133 RUNTEST_ARGS: "--blacklist C:/Temp/check-tests.txt"
132 PYTHON: py -3
134 PYTHON: py -3
133
135
134 windows-pyox:
136 windows-pyox:
135 <<: *windows_runtests
137 <<: *windows_runtests
136 tags:
138 tags:
137 - windows
139 - windows
138 variables:
140 variables:
139 TEST_HGMODULEPOLICY: "c"
141 TEST_HGMODULEPOLICY: "c"
140 RUNTEST_ARGS: "--blacklist C:/Temp/check-tests.txt --pyoxidized"
142 RUNTEST_ARGS: "--blacklist C:/Temp/check-tests.txt --pyoxidized"
141 PYTHON: py -3
143 PYTHON: py -3
@@ -1,4230 +1,4245
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 import contextlib
57 import contextlib
58 import functools
58 import functools
59 import gc
59 import gc
60 import os
60 import os
61 import random
61 import random
62 import shutil
62 import shutil
63 import struct
63 import struct
64 import sys
64 import sys
65 import tempfile
65 import tempfile
66 import threading
66 import threading
67 import time
67 import time
68
68
69 import mercurial.revlog
69 import mercurial.revlog
70 from mercurial import (
70 from mercurial import (
71 changegroup,
71 changegroup,
72 cmdutil,
72 cmdutil,
73 commands,
73 commands,
74 copies,
74 copies,
75 error,
75 error,
76 extensions,
76 extensions,
77 hg,
77 hg,
78 mdiff,
78 mdiff,
79 merge,
79 merge,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
96
97 dir(registrar) # forcibly load it
97 dir(registrar) # forcibly load it
98 except ImportError:
98 except ImportError:
99 registrar = None
99 registrar = None
100 try:
100 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104 try:
104 try:
105 from mercurial.utils import repoviewutil # since 5.0
105 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
106 except ImportError:
107 repoviewutil = None
107 repoviewutil = None
108 try:
108 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
110 except ImportError:
111 pass
111 pass
112 try:
112 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
114 except ImportError:
115 pass
115 pass
116
116
117 try:
117 try:
118 from mercurial import profiling
118 from mercurial import profiling
119 except ImportError:
119 except ImportError:
120 profiling = None
120 profiling = None
121
121
122 try:
122 try:
123 from mercurial.revlogutils import constants as revlog_constants
123 from mercurial.revlogutils import constants as revlog_constants
124
124
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
126
126
127 def revlog(opener, *args, **kwargs):
127 def revlog(opener, *args, **kwargs):
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
129
129
130
130
131 except (ImportError, AttributeError):
131 except (ImportError, AttributeError):
132 perf_rl_kind = None
132 perf_rl_kind = None
133
133
134 def revlog(opener, *args, **kwargs):
134 def revlog(opener, *args, **kwargs):
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
136
136
137
137
138 def identity(a):
138 def identity(a):
139 return a
139 return a
140
140
141
141
142 try:
142 try:
143 from mercurial import pycompat
143 from mercurial import pycompat
144
144
145 getargspec = pycompat.getargspec # added to module after 4.5
145 getargspec = pycompat.getargspec # added to module after 4.5
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
151 if pycompat.ispy3:
151 if pycompat.ispy3:
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
153 else:
153 else:
154 _maxint = sys.maxint
154 _maxint = sys.maxint
155 except (NameError, ImportError, AttributeError):
155 except (NameError, ImportError, AttributeError):
156 import inspect
156 import inspect
157
157
158 getargspec = inspect.getargspec
158 getargspec = inspect.getargspec
159 _byteskwargs = identity
159 _byteskwargs = identity
160 _bytestr = str
160 _bytestr = str
161 fsencode = identity # no py3 support
161 fsencode = identity # no py3 support
162 _maxint = sys.maxint # no py3 support
162 _maxint = sys.maxint # no py3 support
163 _sysstr = lambda x: x # no py3 support
163 _sysstr = lambda x: x # no py3 support
164 _xrange = xrange
164 _xrange = xrange
165
165
166 try:
166 try:
167 # 4.7+
167 # 4.7+
168 queue = pycompat.queue.Queue
168 queue = pycompat.queue.Queue
169 except (NameError, AttributeError, ImportError):
169 except (NameError, AttributeError, ImportError):
170 # <4.7.
170 # <4.7.
171 try:
171 try:
172 queue = pycompat.queue
172 queue = pycompat.queue
173 except (NameError, AttributeError, ImportError):
173 except (NameError, AttributeError, ImportError):
174 import Queue as queue
174 import Queue as queue
175
175
176 try:
176 try:
177 from mercurial import logcmdutil
177 from mercurial import logcmdutil
178
178
179 makelogtemplater = logcmdutil.maketemplater
179 makelogtemplater = logcmdutil.maketemplater
180 except (AttributeError, ImportError):
180 except (AttributeError, ImportError):
181 try:
181 try:
182 makelogtemplater = cmdutil.makelogtemplater
182 makelogtemplater = cmdutil.makelogtemplater
183 except (AttributeError, ImportError):
183 except (AttributeError, ImportError):
184 makelogtemplater = None
184 makelogtemplater = None
185
185
186 # for "historical portability":
186 # for "historical portability":
187 # define util.safehasattr forcibly, because util.safehasattr has been
187 # define util.safehasattr forcibly, because util.safehasattr has been
188 # available since 1.9.3 (or 94b200a11cf7)
188 # available since 1.9.3 (or 94b200a11cf7)
189 _undefined = object()
189 _undefined = object()
190
190
191
191
192 def safehasattr(thing, attr):
192 def safehasattr(thing, attr):
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
194
194
195
195
196 setattr(util, 'safehasattr', safehasattr)
196 setattr(util, 'safehasattr', safehasattr)
197
197
198 # for "historical portability":
198 # for "historical portability":
199 # define util.timer forcibly, because util.timer has been available
199 # define util.timer forcibly, because util.timer has been available
200 # since ae5d60bb70c9
200 # since ae5d60bb70c9
201 if safehasattr(time, 'perf_counter'):
201 if safehasattr(time, 'perf_counter'):
202 util.timer = time.perf_counter
202 util.timer = time.perf_counter
203 elif os.name == b'nt':
203 elif os.name == b'nt':
204 util.timer = time.clock
204 util.timer = time.clock
205 else:
205 else:
206 util.timer = time.time
206 util.timer = time.time
207
207
208 # for "historical portability":
208 # for "historical portability":
209 # use locally defined empty option list, if formatteropts isn't
209 # use locally defined empty option list, if formatteropts isn't
210 # available, because commands.formatteropts has been available since
210 # available, because commands.formatteropts has been available since
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
212 # available since 2.2 (or ae5f92e154d3)
212 # available since 2.2 (or ae5f92e154d3)
213 formatteropts = getattr(
213 formatteropts = getattr(
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
215 )
215 )
216
216
217 # for "historical portability":
217 # for "historical portability":
218 # use locally defined option list, if debugrevlogopts isn't available,
218 # use locally defined option list, if debugrevlogopts isn't available,
219 # because commands.debugrevlogopts has been available since 3.7 (or
219 # because commands.debugrevlogopts has been available since 3.7 (or
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
221 # since 1.9 (or a79fea6b3e77).
221 # since 1.9 (or a79fea6b3e77).
222 revlogopts = getattr(
222 revlogopts = getattr(
223 cmdutil,
223 cmdutil,
224 "debugrevlogopts",
224 "debugrevlogopts",
225 getattr(
225 getattr(
226 commands,
226 commands,
227 "debugrevlogopts",
227 "debugrevlogopts",
228 [
228 [
229 (b'c', b'changelog', False, b'open changelog'),
229 (b'c', b'changelog', False, b'open changelog'),
230 (b'm', b'manifest', False, b'open manifest'),
230 (b'm', b'manifest', False, b'open manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
232 ],
232 ],
233 ),
233 ),
234 )
234 )
235
235
236 cmdtable = {}
236 cmdtable = {}
237
237
238
238 # for "historical portability":
239 # for "historical portability":
239 # define parsealiases locally, because cmdutil.parsealiases has been
240 # define parsealiases locally, because cmdutil.parsealiases has been
240 # available since 1.5 (or 6252852b4332)
241 # available since 1.5 (or 6252852b4332)
241 def parsealiases(cmd):
242 def parsealiases(cmd):
242 return cmd.split(b"|")
243 return cmd.split(b"|")
243
244
244
245
245 if safehasattr(registrar, 'command'):
246 if safehasattr(registrar, 'command'):
246 command = registrar.command(cmdtable)
247 command = registrar.command(cmdtable)
247 elif safehasattr(cmdutil, 'command'):
248 elif safehasattr(cmdutil, 'command'):
248 command = cmdutil.command(cmdtable)
249 command = cmdutil.command(cmdtable)
249 if 'norepo' not in getargspec(command).args:
250 if 'norepo' not in getargspec(command).args:
250 # for "historical portability":
251 # for "historical portability":
251 # wrap original cmdutil.command, because "norepo" option has
252 # wrap original cmdutil.command, because "norepo" option has
252 # been available since 3.1 (or 75a96326cecb)
253 # been available since 3.1 (or 75a96326cecb)
253 _command = command
254 _command = command
254
255
255 def command(name, options=(), synopsis=None, norepo=False):
256 def command(name, options=(), synopsis=None, norepo=False):
256 if norepo:
257 if norepo:
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 return _command(name, list(options), synopsis)
259 return _command(name, list(options), synopsis)
259
260
260
261
261 else:
262 else:
262 # for "historical portability":
263 # for "historical portability":
263 # define "@command" annotation locally, because cmdutil.command
264 # define "@command" annotation locally, because cmdutil.command
264 # has been available since 1.9 (or 2daa5179e73f)
265 # has been available since 1.9 (or 2daa5179e73f)
265 def command(name, options=(), synopsis=None, norepo=False):
266 def command(name, options=(), synopsis=None, norepo=False):
266 def decorator(func):
267 def decorator(func):
267 if synopsis:
268 if synopsis:
268 cmdtable[name] = func, list(options), synopsis
269 cmdtable[name] = func, list(options), synopsis
269 else:
270 else:
270 cmdtable[name] = func, list(options)
271 cmdtable[name] = func, list(options)
271 if norepo:
272 if norepo:
272 commands.norepo += b' %s' % b' '.join(parsealiases(name))
273 commands.norepo += b' %s' % b' '.join(parsealiases(name))
273 return func
274 return func
274
275
275 return decorator
276 return decorator
276
277
277
278
278 try:
279 try:
279 import mercurial.registrar
280 import mercurial.registrar
280 import mercurial.configitems
281 import mercurial.configitems
281
282
282 configtable = {}
283 configtable = {}
283 configitem = mercurial.registrar.configitem(configtable)
284 configitem = mercurial.registrar.configitem(configtable)
284 configitem(
285 configitem(
285 b'perf',
286 b'perf',
286 b'presleep',
287 b'presleep',
287 default=mercurial.configitems.dynamicdefault,
288 default=mercurial.configitems.dynamicdefault,
288 experimental=True,
289 experimental=True,
289 )
290 )
290 configitem(
291 configitem(
291 b'perf',
292 b'perf',
292 b'stub',
293 b'stub',
293 default=mercurial.configitems.dynamicdefault,
294 default=mercurial.configitems.dynamicdefault,
294 experimental=True,
295 experimental=True,
295 )
296 )
296 configitem(
297 configitem(
297 b'perf',
298 b'perf',
298 b'parentscount',
299 b'parentscount',
299 default=mercurial.configitems.dynamicdefault,
300 default=mercurial.configitems.dynamicdefault,
300 experimental=True,
301 experimental=True,
301 )
302 )
302 configitem(
303 configitem(
303 b'perf',
304 b'perf',
304 b'all-timing',
305 b'all-timing',
305 default=mercurial.configitems.dynamicdefault,
306 default=mercurial.configitems.dynamicdefault,
306 experimental=True,
307 experimental=True,
307 )
308 )
308 configitem(
309 configitem(
309 b'perf',
310 b'perf',
310 b'pre-run',
311 b'pre-run',
311 default=mercurial.configitems.dynamicdefault,
312 default=mercurial.configitems.dynamicdefault,
312 )
313 )
313 configitem(
314 configitem(
314 b'perf',
315 b'perf',
315 b'profile-benchmark',
316 b'profile-benchmark',
316 default=mercurial.configitems.dynamicdefault,
317 default=mercurial.configitems.dynamicdefault,
317 )
318 )
318 configitem(
319 configitem(
319 b'perf',
320 b'perf',
320 b'run-limits',
321 b'run-limits',
321 default=mercurial.configitems.dynamicdefault,
322 default=mercurial.configitems.dynamicdefault,
322 experimental=True,
323 experimental=True,
323 )
324 )
324 except (ImportError, AttributeError):
325 except (ImportError, AttributeError):
325 pass
326 pass
326 except TypeError:
327 except TypeError:
327 # compatibility fix for a11fd395e83f
328 # compatibility fix for a11fd395e83f
328 # hg version: 5.2
329 # hg version: 5.2
329 configitem(
330 configitem(
330 b'perf',
331 b'perf',
331 b'presleep',
332 b'presleep',
332 default=mercurial.configitems.dynamicdefault,
333 default=mercurial.configitems.dynamicdefault,
333 )
334 )
334 configitem(
335 configitem(
335 b'perf',
336 b'perf',
336 b'stub',
337 b'stub',
337 default=mercurial.configitems.dynamicdefault,
338 default=mercurial.configitems.dynamicdefault,
338 )
339 )
339 configitem(
340 configitem(
340 b'perf',
341 b'perf',
341 b'parentscount',
342 b'parentscount',
342 default=mercurial.configitems.dynamicdefault,
343 default=mercurial.configitems.dynamicdefault,
343 )
344 )
344 configitem(
345 configitem(
345 b'perf',
346 b'perf',
346 b'all-timing',
347 b'all-timing',
347 default=mercurial.configitems.dynamicdefault,
348 default=mercurial.configitems.dynamicdefault,
348 )
349 )
349 configitem(
350 configitem(
350 b'perf',
351 b'perf',
351 b'pre-run',
352 b'pre-run',
352 default=mercurial.configitems.dynamicdefault,
353 default=mercurial.configitems.dynamicdefault,
353 )
354 )
354 configitem(
355 configitem(
355 b'perf',
356 b'perf',
356 b'profile-benchmark',
357 b'profile-benchmark',
357 default=mercurial.configitems.dynamicdefault,
358 default=mercurial.configitems.dynamicdefault,
358 )
359 )
359 configitem(
360 configitem(
360 b'perf',
361 b'perf',
361 b'run-limits',
362 b'run-limits',
362 default=mercurial.configitems.dynamicdefault,
363 default=mercurial.configitems.dynamicdefault,
363 )
364 )
364
365
365
366
366 def getlen(ui):
367 def getlen(ui):
367 if ui.configbool(b"perf", b"stub", False):
368 if ui.configbool(b"perf", b"stub", False):
368 return lambda x: 1
369 return lambda x: 1
369 return len
370 return len
370
371
371
372
372 class noop:
373 class noop:
373 """dummy context manager"""
374 """dummy context manager"""
374
375
375 def __enter__(self):
376 def __enter__(self):
376 pass
377 pass
377
378
378 def __exit__(self, *args):
379 def __exit__(self, *args):
379 pass
380 pass
380
381
381
382
382 NOOPCTX = noop()
383 NOOPCTX = noop()
383
384
384
385
385 def gettimer(ui, opts=None):
386 def gettimer(ui, opts=None):
386 """return a timer function and formatter: (timer, formatter)
387 """return a timer function and formatter: (timer, formatter)
387
388
388 This function exists to gather the creation of formatter in a single
389 This function exists to gather the creation of formatter in a single
389 place instead of duplicating it in all performance commands."""
390 place instead of duplicating it in all performance commands."""
390
391
391 # enforce an idle period before execution to counteract power management
392 # enforce an idle period before execution to counteract power management
392 # experimental config: perf.presleep
393 # experimental config: perf.presleep
393 time.sleep(getint(ui, b"perf", b"presleep", 1))
394 time.sleep(getint(ui, b"perf", b"presleep", 1))
394
395
395 if opts is None:
396 if opts is None:
396 opts = {}
397 opts = {}
397 # redirect all to stderr unless buffer api is in use
398 # redirect all to stderr unless buffer api is in use
398 if not ui._buffers:
399 if not ui._buffers:
399 ui = ui.copy()
400 ui = ui.copy()
400 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
401 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
401 if uifout:
402 if uifout:
402 # for "historical portability":
403 # for "historical portability":
403 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
404 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
404 uifout.set(ui.ferr)
405 uifout.set(ui.ferr)
405
406
406 # get a formatter
407 # get a formatter
407 uiformatter = getattr(ui, 'formatter', None)
408 uiformatter = getattr(ui, 'formatter', None)
408 if uiformatter:
409 if uiformatter:
409 fm = uiformatter(b'perf', opts)
410 fm = uiformatter(b'perf', opts)
410 else:
411 else:
411 # for "historical portability":
412 # for "historical portability":
412 # define formatter locally, because ui.formatter has been
413 # define formatter locally, because ui.formatter has been
413 # available since 2.2 (or ae5f92e154d3)
414 # available since 2.2 (or ae5f92e154d3)
414 from mercurial import node
415 from mercurial import node
415
416
416 class defaultformatter:
417 class defaultformatter:
417 """Minimized composition of baseformatter and plainformatter"""
418 """Minimized composition of baseformatter and plainformatter"""
418
419
419 def __init__(self, ui, topic, opts):
420 def __init__(self, ui, topic, opts):
420 self._ui = ui
421 self._ui = ui
421 if ui.debugflag:
422 if ui.debugflag:
422 self.hexfunc = node.hex
423 self.hexfunc = node.hex
423 else:
424 else:
424 self.hexfunc = node.short
425 self.hexfunc = node.short
425
426
426 def __nonzero__(self):
427 def __nonzero__(self):
427 return False
428 return False
428
429
429 __bool__ = __nonzero__
430 __bool__ = __nonzero__
430
431
431 def startitem(self):
432 def startitem(self):
432 pass
433 pass
433
434
434 def data(self, **data):
435 def data(self, **data):
435 pass
436 pass
436
437
437 def write(self, fields, deftext, *fielddata, **opts):
438 def write(self, fields, deftext, *fielddata, **opts):
438 self._ui.write(deftext % fielddata, **opts)
439 self._ui.write(deftext % fielddata, **opts)
439
440
440 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
441 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
441 if cond:
442 if cond:
442 self._ui.write(deftext % fielddata, **opts)
443 self._ui.write(deftext % fielddata, **opts)
443
444
444 def plain(self, text, **opts):
445 def plain(self, text, **opts):
445 self._ui.write(text, **opts)
446 self._ui.write(text, **opts)
446
447
447 def end(self):
448 def end(self):
448 pass
449 pass
449
450
450 fm = defaultformatter(ui, b'perf', opts)
451 fm = defaultformatter(ui, b'perf', opts)
451
452
452 # stub function, runs code only once instead of in a loop
453 # stub function, runs code only once instead of in a loop
453 # experimental config: perf.stub
454 # experimental config: perf.stub
454 if ui.configbool(b"perf", b"stub", False):
455 if ui.configbool(b"perf", b"stub", False):
455 return functools.partial(stub_timer, fm), fm
456 return functools.partial(stub_timer, fm), fm
456
457
457 # experimental config: perf.all-timing
458 # experimental config: perf.all-timing
458 displayall = ui.configbool(b"perf", b"all-timing", False)
459 displayall = ui.configbool(b"perf", b"all-timing", False)
459
460
460 # experimental config: perf.run-limits
461 # experimental config: perf.run-limits
461 limitspec = ui.configlist(b"perf", b"run-limits", [])
462 limitspec = ui.configlist(b"perf", b"run-limits", [])
462 limits = []
463 limits = []
463 for item in limitspec:
464 for item in limitspec:
464 parts = item.split(b'-', 1)
465 parts = item.split(b'-', 1)
465 if len(parts) < 2:
466 if len(parts) < 2:
466 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
467 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
467 continue
468 continue
468 try:
469 try:
469 time_limit = float(_sysstr(parts[0]))
470 time_limit = float(_sysstr(parts[0]))
470 except ValueError as e:
471 except ValueError as e:
471 ui.warn(
472 ui.warn(
472 (
473 (
473 b'malformatted run limit entry, %s: %s\n'
474 b'malformatted run limit entry, %s: %s\n'
474 % (_bytestr(e), item)
475 % (_bytestr(e), item)
475 )
476 )
476 )
477 )
477 continue
478 continue
478 try:
479 try:
479 run_limit = int(_sysstr(parts[1]))
480 run_limit = int(_sysstr(parts[1]))
480 except ValueError as e:
481 except ValueError as e:
481 ui.warn(
482 ui.warn(
482 (
483 (
483 b'malformatted run limit entry, %s: %s\n'
484 b'malformatted run limit entry, %s: %s\n'
484 % (_bytestr(e), item)
485 % (_bytestr(e), item)
485 )
486 )
486 )
487 )
487 continue
488 continue
488 limits.append((time_limit, run_limit))
489 limits.append((time_limit, run_limit))
489 if not limits:
490 if not limits:
490 limits = DEFAULTLIMITS
491 limits = DEFAULTLIMITS
491
492
492 profiler = None
493 profiler = None
493 if profiling is not None:
494 if profiling is not None:
494 if ui.configbool(b"perf", b"profile-benchmark", False):
495 if ui.configbool(b"perf", b"profile-benchmark", False):
495 profiler = profiling.profile(ui)
496 profiler = profiling.profile(ui)
496
497
497 prerun = getint(ui, b"perf", b"pre-run", 0)
498 prerun = getint(ui, b"perf", b"pre-run", 0)
498 t = functools.partial(
499 t = functools.partial(
499 _timer,
500 _timer,
500 fm,
501 fm,
501 displayall=displayall,
502 displayall=displayall,
502 limits=limits,
503 limits=limits,
503 prerun=prerun,
504 prerun=prerun,
504 profiler=profiler,
505 profiler=profiler,
505 )
506 )
506 return t, fm
507 return t, fm
507
508
508
509
509 def stub_timer(fm, func, setup=None, title=None):
510 def stub_timer(fm, func, setup=None, title=None):
510 if setup is not None:
511 if setup is not None:
511 setup()
512 setup()
512 func()
513 func()
513
514
514
515
515 @contextlib.contextmanager
516 @contextlib.contextmanager
516 def timeone():
517 def timeone():
517 r = []
518 r = []
518 ostart = os.times()
519 ostart = os.times()
519 cstart = util.timer()
520 cstart = util.timer()
520 yield r
521 yield r
521 cstop = util.timer()
522 cstop = util.timer()
522 ostop = os.times()
523 ostop = os.times()
523 a, b = ostart, ostop
524 a, b = ostart, ostop
524 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
525 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
525
526
526
527
527 # list of stop condition (elapsed time, minimal run count)
528 # list of stop condition (elapsed time, minimal run count)
528 DEFAULTLIMITS = (
529 DEFAULTLIMITS = (
529 (3.0, 100),
530 (3.0, 100),
530 (10.0, 3),
531 (10.0, 3),
531 )
532 )
532
533
533
534
534 def _timer(
535 def _timer(
535 fm,
536 fm,
536 func,
537 func,
537 setup=None,
538 setup=None,
538 title=None,
539 title=None,
539 displayall=False,
540 displayall=False,
540 limits=DEFAULTLIMITS,
541 limits=DEFAULTLIMITS,
541 prerun=0,
542 prerun=0,
542 profiler=None,
543 profiler=None,
543 ):
544 ):
544 gc.collect()
545 gc.collect()
545 results = []
546 results = []
546 begin = util.timer()
547 begin = util.timer()
547 count = 0
548 count = 0
548 if profiler is None:
549 if profiler is None:
549 profiler = NOOPCTX
550 profiler = NOOPCTX
550 for i in range(prerun):
551 for i in range(prerun):
551 if setup is not None:
552 if setup is not None:
552 setup()
553 setup()
553 func()
554 func()
554 keepgoing = True
555 keepgoing = True
555 while keepgoing:
556 while keepgoing:
556 if setup is not None:
557 if setup is not None:
557 setup()
558 setup()
558 with profiler:
559 with profiler:
559 with timeone() as item:
560 with timeone() as item:
560 r = func()
561 r = func()
561 profiler = NOOPCTX
562 profiler = NOOPCTX
562 count += 1
563 count += 1
563 results.append(item[0])
564 results.append(item[0])
564 cstop = util.timer()
565 cstop = util.timer()
565 # Look for a stop condition.
566 # Look for a stop condition.
566 elapsed = cstop - begin
567 elapsed = cstop - begin
567 for t, mincount in limits:
568 for t, mincount in limits:
568 if elapsed >= t and count >= mincount:
569 if elapsed >= t and count >= mincount:
569 keepgoing = False
570 keepgoing = False
570 break
571 break
571
572
572 formatone(fm, results, title=title, result=r, displayall=displayall)
573 formatone(fm, results, title=title, result=r, displayall=displayall)
573
574
574
575
575 def formatone(fm, timings, title=None, result=None, displayall=False):
576 def formatone(fm, timings, title=None, result=None, displayall=False):
576
577 count = len(timings)
577 count = len(timings)
578
578
579 fm.startitem()
579 fm.startitem()
580
580
581 if title:
581 if title:
582 fm.write(b'title', b'! %s\n', title)
582 fm.write(b'title', b'! %s\n', title)
583 if result:
583 if result:
584 fm.write(b'result', b'! result: %s\n', result)
584 fm.write(b'result', b'! result: %s\n', result)
585
585
586 def display(role, entry):
586 def display(role, entry):
587 prefix = b''
587 prefix = b''
588 if role != b'best':
588 if role != b'best':
589 prefix = b'%s.' % role
589 prefix = b'%s.' % role
590 fm.plain(b'!')
590 fm.plain(b'!')
591 fm.write(prefix + b'wall', b' wall %f', entry[0])
591 fm.write(prefix + b'wall', b' wall %f', entry[0])
592 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
592 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
593 fm.write(prefix + b'user', b' user %f', entry[1])
593 fm.write(prefix + b'user', b' user %f', entry[1])
594 fm.write(prefix + b'sys', b' sys %f', entry[2])
594 fm.write(prefix + b'sys', b' sys %f', entry[2])
595 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
595 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
596 fm.plain(b'\n')
596 fm.plain(b'\n')
597
597
598 timings.sort()
598 timings.sort()
599 min_val = timings[0]
599 min_val = timings[0]
600 display(b'best', min_val)
600 display(b'best', min_val)
601 if displayall:
601 if displayall:
602 max_val = timings[-1]
602 max_val = timings[-1]
603 display(b'max', max_val)
603 display(b'max', max_val)
604 avg = tuple([sum(x) / count for x in zip(*timings)])
604 avg = tuple([sum(x) / count for x in zip(*timings)])
605 display(b'avg', avg)
605 display(b'avg', avg)
606 median = timings[len(timings) // 2]
606 median = timings[len(timings) // 2]
607 display(b'median', median)
607 display(b'median', median)
608
608
609
609
610 # utilities for historical portability
610 # utilities for historical portability
611
611
612
612
613 def getint(ui, section, name, default):
613 def getint(ui, section, name, default):
614 # for "historical portability":
614 # for "historical portability":
615 # ui.configint has been available since 1.9 (or fa2b596db182)
615 # ui.configint has been available since 1.9 (or fa2b596db182)
616 v = ui.config(section, name, None)
616 v = ui.config(section, name, None)
617 if v is None:
617 if v is None:
618 return default
618 return default
619 try:
619 try:
620 return int(v)
620 return int(v)
621 except ValueError:
621 except ValueError:
622 raise error.ConfigError(
622 raise error.ConfigError(
623 b"%s.%s is not an integer ('%s')" % (section, name, v)
623 b"%s.%s is not an integer ('%s')" % (section, name, v)
624 )
624 )
625
625
626
626
627 def safeattrsetter(obj, name, ignoremissing=False):
627 def safeattrsetter(obj, name, ignoremissing=False):
628 """Ensure that 'obj' has 'name' attribute before subsequent setattr
628 """Ensure that 'obj' has 'name' attribute before subsequent setattr
629
629
630 This function is aborted, if 'obj' doesn't have 'name' attribute
630 This function is aborted, if 'obj' doesn't have 'name' attribute
631 at runtime. This avoids overlooking removal of an attribute, which
631 at runtime. This avoids overlooking removal of an attribute, which
632 breaks assumption of performance measurement, in the future.
632 breaks assumption of performance measurement, in the future.
633
633
634 This function returns the object to (1) assign a new value, and
634 This function returns the object to (1) assign a new value, and
635 (2) restore an original value to the attribute.
635 (2) restore an original value to the attribute.
636
636
637 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
637 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
638 abortion, and this function returns None. This is useful to
638 abortion, and this function returns None. This is useful to
639 examine an attribute, which isn't ensured in all Mercurial
639 examine an attribute, which isn't ensured in all Mercurial
640 versions.
640 versions.
641 """
641 """
642 if not util.safehasattr(obj, name):
642 if not util.safehasattr(obj, name):
643 if ignoremissing:
643 if ignoremissing:
644 return None
644 return None
645 raise error.Abort(
645 raise error.Abort(
646 (
646 (
647 b"missing attribute %s of %s might break assumption"
647 b"missing attribute %s of %s might break assumption"
648 b" of performance measurement"
648 b" of performance measurement"
649 )
649 )
650 % (name, obj)
650 % (name, obj)
651 )
651 )
652
652
653 origvalue = getattr(obj, _sysstr(name))
653 origvalue = getattr(obj, _sysstr(name))
654
654
655 class attrutil:
655 class attrutil:
656 def set(self, newvalue):
656 def set(self, newvalue):
657 setattr(obj, _sysstr(name), newvalue)
657 setattr(obj, _sysstr(name), newvalue)
658
658
659 def restore(self):
659 def restore(self):
660 setattr(obj, _sysstr(name), origvalue)
660 setattr(obj, _sysstr(name), origvalue)
661
661
662 return attrutil()
662 return attrutil()
663
663
664
664
665 # utilities to examine each internal API changes
665 # utilities to examine each internal API changes
666
666
667
667
668 def getbranchmapsubsettable():
668 def getbranchmapsubsettable():
669 # for "historical portability":
669 # for "historical portability":
670 # subsettable is defined in:
670 # subsettable is defined in:
671 # - branchmap since 2.9 (or 175c6fd8cacc)
671 # - branchmap since 2.9 (or 175c6fd8cacc)
672 # - repoview since 2.5 (or 59a9f18d4587)
672 # - repoview since 2.5 (or 59a9f18d4587)
673 # - repoviewutil since 5.0
673 # - repoviewutil since 5.0
674 for mod in (branchmap, repoview, repoviewutil):
674 for mod in (branchmap, repoview, repoviewutil):
675 subsettable = getattr(mod, 'subsettable', None)
675 subsettable = getattr(mod, 'subsettable', None)
676 if subsettable:
676 if subsettable:
677 return subsettable
677 return subsettable
678
678
679 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
679 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
680 # branchmap and repoview modules exist, but subsettable attribute
680 # branchmap and repoview modules exist, but subsettable attribute
681 # doesn't)
681 # doesn't)
682 raise error.Abort(
682 raise error.Abort(
683 b"perfbranchmap not available with this Mercurial",
683 b"perfbranchmap not available with this Mercurial",
684 hint=b"use 2.5 or later",
684 hint=b"use 2.5 or later",
685 )
685 )
686
686
687
687
688 def getsvfs(repo):
688 def getsvfs(repo):
689 """Return appropriate object to access files under .hg/store"""
689 """Return appropriate object to access files under .hg/store"""
690 # for "historical portability":
690 # for "historical portability":
691 # repo.svfs has been available since 2.3 (or 7034365089bf)
691 # repo.svfs has been available since 2.3 (or 7034365089bf)
692 svfs = getattr(repo, 'svfs', None)
692 svfs = getattr(repo, 'svfs', None)
693 if svfs:
693 if svfs:
694 return svfs
694 return svfs
695 else:
695 else:
696 return getattr(repo, 'sopener')
696 return getattr(repo, 'sopener')
697
697
698
698
699 def getvfs(repo):
699 def getvfs(repo):
700 """Return appropriate object to access files under .hg"""
700 """Return appropriate object to access files under .hg"""
701 # for "historical portability":
701 # for "historical portability":
702 # repo.vfs has been available since 2.3 (or 7034365089bf)
702 # repo.vfs has been available since 2.3 (or 7034365089bf)
703 vfs = getattr(repo, 'vfs', None)
703 vfs = getattr(repo, 'vfs', None)
704 if vfs:
704 if vfs:
705 return vfs
705 return vfs
706 else:
706 else:
707 return getattr(repo, 'opener')
707 return getattr(repo, 'opener')
708
708
709
709
710 def repocleartagscachefunc(repo):
710 def repocleartagscachefunc(repo):
711 """Return the function to clear tags cache according to repo internal API"""
711 """Return the function to clear tags cache according to repo internal API"""
712 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
712 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
713 # in this case, setattr(repo, '_tagscache', None) or so isn't
713 # in this case, setattr(repo, '_tagscache', None) or so isn't
714 # correct way to clear tags cache, because existing code paths
714 # correct way to clear tags cache, because existing code paths
715 # expect _tagscache to be a structured object.
715 # expect _tagscache to be a structured object.
716 def clearcache():
716 def clearcache():
717 # _tagscache has been filteredpropertycache since 2.5 (or
717 # _tagscache has been filteredpropertycache since 2.5 (or
718 # 98c867ac1330), and delattr() can't work in such case
718 # 98c867ac1330), and delattr() can't work in such case
719 if '_tagscache' in vars(repo):
719 if '_tagscache' in vars(repo):
720 del repo.__dict__['_tagscache']
720 del repo.__dict__['_tagscache']
721
721
722 return clearcache
722 return clearcache
723
723
724 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
724 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
725 if repotags: # since 1.4 (or 5614a628d173)
725 if repotags: # since 1.4 (or 5614a628d173)
726 return lambda: repotags.set(None)
726 return lambda: repotags.set(None)
727
727
728 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
728 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
729 if repotagscache: # since 0.6 (or d7df759d0e97)
729 if repotagscache: # since 0.6 (or d7df759d0e97)
730 return lambda: repotagscache.set(None)
730 return lambda: repotagscache.set(None)
731
731
732 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
732 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
733 # this point, but it isn't so problematic, because:
733 # this point, but it isn't so problematic, because:
734 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
734 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
735 # in perftags() causes failure soon
735 # in perftags() causes failure soon
736 # - perf.py itself has been available since 1.1 (or eb240755386d)
736 # - perf.py itself has been available since 1.1 (or eb240755386d)
737 raise error.Abort(b"tags API of this hg command is unknown")
737 raise error.Abort(b"tags API of this hg command is unknown")
738
738
739
739
740 # utilities to clear cache
740 # utilities to clear cache
741
741
742
742
743 def clearfilecache(obj, attrname):
743 def clearfilecache(obj, attrname):
744 unfiltered = getattr(obj, 'unfiltered', None)
744 unfiltered = getattr(obj, 'unfiltered', None)
745 if unfiltered is not None:
745 if unfiltered is not None:
746 obj = obj.unfiltered()
746 obj = obj.unfiltered()
747 if attrname in vars(obj):
747 if attrname in vars(obj):
748 delattr(obj, attrname)
748 delattr(obj, attrname)
749 obj._filecache.pop(attrname, None)
749 obj._filecache.pop(attrname, None)
750
750
751
751
752 def clearchangelog(repo):
752 def clearchangelog(repo):
753 if repo is not repo.unfiltered():
753 if repo is not repo.unfiltered():
754 object.__setattr__(repo, '_clcachekey', None)
754 object.__setattr__(repo, '_clcachekey', None)
755 object.__setattr__(repo, '_clcache', None)
755 object.__setattr__(repo, '_clcache', None)
756 clearfilecache(repo.unfiltered(), 'changelog')
756 clearfilecache(repo.unfiltered(), 'changelog')
757
757
758
758
759 # perf commands
759 # perf commands
760
760
761
761
762 @command(b'perf::walk|perfwalk', formatteropts)
762 @command(b'perf::walk|perfwalk', formatteropts)
763 def perfwalk(ui, repo, *pats, **opts):
763 def perfwalk(ui, repo, *pats, **opts):
764 opts = _byteskwargs(opts)
764 opts = _byteskwargs(opts)
765 timer, fm = gettimer(ui, opts)
765 timer, fm = gettimer(ui, opts)
766 m = scmutil.match(repo[None], pats, {})
766 m = scmutil.match(repo[None], pats, {})
767 timer(
767 timer(
768 lambda: len(
768 lambda: len(
769 list(
769 list(
770 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
770 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
771 )
771 )
772 )
772 )
773 )
773 )
774 fm.end()
774 fm.end()
775
775
776
776
777 @command(b'perf::annotate|perfannotate', formatteropts)
777 @command(b'perf::annotate|perfannotate', formatteropts)
778 def perfannotate(ui, repo, f, **opts):
778 def perfannotate(ui, repo, f, **opts):
779 opts = _byteskwargs(opts)
779 opts = _byteskwargs(opts)
780 timer, fm = gettimer(ui, opts)
780 timer, fm = gettimer(ui, opts)
781 fc = repo[b'.'][f]
781 fc = repo[b'.'][f]
782 timer(lambda: len(fc.annotate(True)))
782 timer(lambda: len(fc.annotate(True)))
783 fm.end()
783 fm.end()
784
784
785
785
786 @command(
786 @command(
787 b'perf::status|perfstatus',
787 b'perf::status|perfstatus',
788 [
788 [
789 (b'u', b'unknown', False, b'ask status to look for unknown files'),
789 (b'u', b'unknown', False, b'ask status to look for unknown files'),
790 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
790 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
791 ]
791 ]
792 + formatteropts,
792 + formatteropts,
793 )
793 )
794 def perfstatus(ui, repo, **opts):
794 def perfstatus(ui, repo, **opts):
795 """benchmark the performance of a single status call
795 """benchmark the performance of a single status call
796
796
797 The repository data are preserved between each call.
797 The repository data are preserved between each call.
798
798
799 By default, only the status of the tracked file are requested. If
799 By default, only the status of the tracked file are requested. If
800 `--unknown` is passed, the "unknown" files are also tracked.
800 `--unknown` is passed, the "unknown" files are also tracked.
801 """
801 """
802 opts = _byteskwargs(opts)
802 opts = _byteskwargs(opts)
803 # m = match.always(repo.root, repo.getcwd())
803 # m = match.always(repo.root, repo.getcwd())
804 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
804 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
805 # False))))
805 # False))))
806 timer, fm = gettimer(ui, opts)
806 timer, fm = gettimer(ui, opts)
807 if opts[b'dirstate']:
807 if opts[b'dirstate']:
808 dirstate = repo.dirstate
808 dirstate = repo.dirstate
809 m = scmutil.matchall(repo)
809 m = scmutil.matchall(repo)
810 unknown = opts[b'unknown']
810 unknown = opts[b'unknown']
811
811
812 def status_dirstate():
812 def status_dirstate():
813 s = dirstate.status(
813 s = dirstate.status(
814 m, subrepos=[], ignored=False, clean=False, unknown=unknown
814 m, subrepos=[], ignored=False, clean=False, unknown=unknown
815 )
815 )
816 sum(map(bool, s))
816 sum(map(bool, s))
817
817
818 timer(status_dirstate)
818 if util.safehasattr(dirstate, 'running_status'):
819 with dirstate.running_status(repo):
820 timer(status_dirstate)
821 dirstate.invalidate()
822 else:
823 timer(status_dirstate)
819 else:
824 else:
820 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
825 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
821 fm.end()
826 fm.end()
822
827
823
828
824 @command(b'perf::addremove|perfaddremove', formatteropts)
829 @command(b'perf::addremove|perfaddremove', formatteropts)
825 def perfaddremove(ui, repo, **opts):
830 def perfaddremove(ui, repo, **opts):
826 opts = _byteskwargs(opts)
831 opts = _byteskwargs(opts)
827 timer, fm = gettimer(ui, opts)
832 timer, fm = gettimer(ui, opts)
828 try:
833 try:
829 oldquiet = repo.ui.quiet
834 oldquiet = repo.ui.quiet
830 repo.ui.quiet = True
835 repo.ui.quiet = True
831 matcher = scmutil.match(repo[None])
836 matcher = scmutil.match(repo[None])
832 opts[b'dry_run'] = True
837 opts[b'dry_run'] = True
833 if 'uipathfn' in getargspec(scmutil.addremove).args:
838 if 'uipathfn' in getargspec(scmutil.addremove).args:
834 uipathfn = scmutil.getuipathfn(repo)
839 uipathfn = scmutil.getuipathfn(repo)
835 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
840 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
836 else:
841 else:
837 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
842 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
838 finally:
843 finally:
839 repo.ui.quiet = oldquiet
844 repo.ui.quiet = oldquiet
840 fm.end()
845 fm.end()
841
846
842
847
843 def clearcaches(cl):
848 def clearcaches(cl):
844 # behave somewhat consistently across internal API changes
849 # behave somewhat consistently across internal API changes
845 if util.safehasattr(cl, b'clearcaches'):
850 if util.safehasattr(cl, b'clearcaches'):
846 cl.clearcaches()
851 cl.clearcaches()
847 elif util.safehasattr(cl, b'_nodecache'):
852 elif util.safehasattr(cl, b'_nodecache'):
848 # <= hg-5.2
853 # <= hg-5.2
849 from mercurial.node import nullid, nullrev
854 from mercurial.node import nullid, nullrev
850
855
851 cl._nodecache = {nullid: nullrev}
856 cl._nodecache = {nullid: nullrev}
852 cl._nodepos = None
857 cl._nodepos = None
853
858
854
859
855 @command(b'perf::heads|perfheads', formatteropts)
860 @command(b'perf::heads|perfheads', formatteropts)
856 def perfheads(ui, repo, **opts):
861 def perfheads(ui, repo, **opts):
857 """benchmark the computation of a changelog heads"""
862 """benchmark the computation of a changelog heads"""
858 opts = _byteskwargs(opts)
863 opts = _byteskwargs(opts)
859 timer, fm = gettimer(ui, opts)
864 timer, fm = gettimer(ui, opts)
860 cl = repo.changelog
865 cl = repo.changelog
861
866
862 def s():
867 def s():
863 clearcaches(cl)
868 clearcaches(cl)
864
869
865 def d():
870 def d():
866 len(cl.headrevs())
871 len(cl.headrevs())
867
872
868 timer(d, setup=s)
873 timer(d, setup=s)
869 fm.end()
874 fm.end()
870
875
871
876
872 @command(
877 @command(
873 b'perf::tags|perftags',
878 b'perf::tags|perftags',
874 formatteropts
879 formatteropts
875 + [
880 + [
876 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
881 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
877 ],
882 ],
878 )
883 )
879 def perftags(ui, repo, **opts):
884 def perftags(ui, repo, **opts):
880 opts = _byteskwargs(opts)
885 opts = _byteskwargs(opts)
881 timer, fm = gettimer(ui, opts)
886 timer, fm = gettimer(ui, opts)
882 repocleartagscache = repocleartagscachefunc(repo)
887 repocleartagscache = repocleartagscachefunc(repo)
883 clearrevlogs = opts[b'clear_revlogs']
888 clearrevlogs = opts[b'clear_revlogs']
884
889
885 def s():
890 def s():
886 if clearrevlogs:
891 if clearrevlogs:
887 clearchangelog(repo)
892 clearchangelog(repo)
888 clearfilecache(repo.unfiltered(), 'manifest')
893 clearfilecache(repo.unfiltered(), 'manifest')
889 repocleartagscache()
894 repocleartagscache()
890
895
891 def t():
896 def t():
892 return len(repo.tags())
897 return len(repo.tags())
893
898
894 timer(t, setup=s)
899 timer(t, setup=s)
895 fm.end()
900 fm.end()
896
901
897
902
898 @command(b'perf::ancestors|perfancestors', formatteropts)
903 @command(b'perf::ancestors|perfancestors', formatteropts)
899 def perfancestors(ui, repo, **opts):
904 def perfancestors(ui, repo, **opts):
900 opts = _byteskwargs(opts)
905 opts = _byteskwargs(opts)
901 timer, fm = gettimer(ui, opts)
906 timer, fm = gettimer(ui, opts)
902 heads = repo.changelog.headrevs()
907 heads = repo.changelog.headrevs()
903
908
904 def d():
909 def d():
905 for a in repo.changelog.ancestors(heads):
910 for a in repo.changelog.ancestors(heads):
906 pass
911 pass
907
912
908 timer(d)
913 timer(d)
909 fm.end()
914 fm.end()
910
915
911
916
912 @command(b'perf::ancestorset|perfancestorset', formatteropts)
917 @command(b'perf::ancestorset|perfancestorset', formatteropts)
913 def perfancestorset(ui, repo, revset, **opts):
918 def perfancestorset(ui, repo, revset, **opts):
914 opts = _byteskwargs(opts)
919 opts = _byteskwargs(opts)
915 timer, fm = gettimer(ui, opts)
920 timer, fm = gettimer(ui, opts)
916 revs = repo.revs(revset)
921 revs = repo.revs(revset)
917 heads = repo.changelog.headrevs()
922 heads = repo.changelog.headrevs()
918
923
919 def d():
924 def d():
920 s = repo.changelog.ancestors(heads)
925 s = repo.changelog.ancestors(heads)
921 for rev in revs:
926 for rev in revs:
922 rev in s
927 rev in s
923
928
924 timer(d)
929 timer(d)
925 fm.end()
930 fm.end()
926
931
927
932
928 @command(
933 @command(
929 b'perf::delta-find',
934 b'perf::delta-find',
930 revlogopts + formatteropts,
935 revlogopts + formatteropts,
931 b'-c|-m|FILE REV',
936 b'-c|-m|FILE REV',
932 )
937 )
933 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
938 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
934 """benchmark the process of finding a valid delta for a revlog revision
939 """benchmark the process of finding a valid delta for a revlog revision
935
940
936 When a revlog receives a new revision (e.g. from a commit, or from an
941 When a revlog receives a new revision (e.g. from a commit, or from an
937 incoming bundle), it searches for a suitable delta-base to produce a delta.
942 incoming bundle), it searches for a suitable delta-base to produce a delta.
938 This perf command measures how much time we spend in this process. It
943 This perf command measures how much time we spend in this process. It
939 operates on an already stored revision.
944 operates on an already stored revision.
940
945
941 See `hg help debug-delta-find` for another related command.
946 See `hg help debug-delta-find` for another related command.
942 """
947 """
943 from mercurial import revlogutils
948 from mercurial import revlogutils
944 import mercurial.revlogutils.deltas as deltautil
949 import mercurial.revlogutils.deltas as deltautil
945
950
946 opts = _byteskwargs(opts)
951 opts = _byteskwargs(opts)
947 if arg_2 is None:
952 if arg_2 is None:
948 file_ = None
953 file_ = None
949 rev = arg_1
954 rev = arg_1
950 else:
955 else:
951 file_ = arg_1
956 file_ = arg_1
952 rev = arg_2
957 rev = arg_2
953
958
954 repo = repo.unfiltered()
959 repo = repo.unfiltered()
955
960
956 timer, fm = gettimer(ui, opts)
961 timer, fm = gettimer(ui, opts)
957
962
958 rev = int(rev)
963 rev = int(rev)
959
964
960 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
965 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
961
966
962 deltacomputer = deltautil.deltacomputer(revlog)
967 deltacomputer = deltautil.deltacomputer(revlog)
963
968
964 node = revlog.node(rev)
969 node = revlog.node(rev)
965 p1r, p2r = revlog.parentrevs(rev)
970 p1r, p2r = revlog.parentrevs(rev)
966 p1 = revlog.node(p1r)
971 p1 = revlog.node(p1r)
967 p2 = revlog.node(p2r)
972 p2 = revlog.node(p2r)
968 full_text = revlog.revision(rev)
973 full_text = revlog.revision(rev)
969 textlen = len(full_text)
974 textlen = len(full_text)
970 cachedelta = None
975 cachedelta = None
971 flags = revlog.flags(rev)
976 flags = revlog.flags(rev)
972
977
973 revinfo = revlogutils.revisioninfo(
978 revinfo = revlogutils.revisioninfo(
974 node,
979 node,
975 p1,
980 p1,
976 p2,
981 p2,
977 [full_text], # btext
982 [full_text], # btext
978 textlen,
983 textlen,
979 cachedelta,
984 cachedelta,
980 flags,
985 flags,
981 )
986 )
982
987
983 # Note: we should probably purge the potential caches (like the full
988 # Note: we should probably purge the potential caches (like the full
984 # manifest cache) between runs.
989 # manifest cache) between runs.
985 def find_one():
990 def find_one():
986 with revlog._datafp() as fh:
991 with revlog._datafp() as fh:
987 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
992 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
988
993
989 timer(find_one)
994 timer(find_one)
990 fm.end()
995 fm.end()
991
996
992
997
993 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
998 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
994 def perfdiscovery(ui, repo, path, **opts):
999 def perfdiscovery(ui, repo, path, **opts):
995 """benchmark discovery between local repo and the peer at given path"""
1000 """benchmark discovery between local repo and the peer at given path"""
996 repos = [repo, None]
1001 repos = [repo, None]
997 timer, fm = gettimer(ui, opts)
1002 timer, fm = gettimer(ui, opts)
998
1003
999 try:
1004 try:
1000 from mercurial.utils.urlutil import get_unique_pull_path
1005 from mercurial.utils.urlutil import get_unique_pull_path_obj
1001
1006
1002 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1007 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1003 except ImportError:
1008 except ImportError:
1004 path = ui.expandpath(path)
1009 try:
1010 from mercurial.utils.urlutil import get_unique_pull_path
1011
1012 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1013 except ImportError:
1014 path = ui.expandpath(path)
1005
1015
1006 def s():
1016 def s():
1007 repos[1] = hg.peer(ui, opts, path)
1017 repos[1] = hg.peer(ui, opts, path)
1008
1018
1009 def d():
1019 def d():
1010 setdiscovery.findcommonheads(ui, *repos)
1020 setdiscovery.findcommonheads(ui, *repos)
1011
1021
1012 timer(d, setup=s)
1022 timer(d, setup=s)
1013 fm.end()
1023 fm.end()
1014
1024
1015
1025
1016 @command(
1026 @command(
1017 b'perf::bookmarks|perfbookmarks',
1027 b'perf::bookmarks|perfbookmarks',
1018 formatteropts
1028 formatteropts
1019 + [
1029 + [
1020 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1030 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1021 ],
1031 ],
1022 )
1032 )
1023 def perfbookmarks(ui, repo, **opts):
1033 def perfbookmarks(ui, repo, **opts):
1024 """benchmark parsing bookmarks from disk to memory"""
1034 """benchmark parsing bookmarks from disk to memory"""
1025 opts = _byteskwargs(opts)
1035 opts = _byteskwargs(opts)
1026 timer, fm = gettimer(ui, opts)
1036 timer, fm = gettimer(ui, opts)
1027
1037
1028 clearrevlogs = opts[b'clear_revlogs']
1038 clearrevlogs = opts[b'clear_revlogs']
1029
1039
1030 def s():
1040 def s():
1031 if clearrevlogs:
1041 if clearrevlogs:
1032 clearchangelog(repo)
1042 clearchangelog(repo)
1033 clearfilecache(repo, b'_bookmarks')
1043 clearfilecache(repo, b'_bookmarks')
1034
1044
1035 def d():
1045 def d():
1036 repo._bookmarks
1046 repo._bookmarks
1037
1047
1038 timer(d, setup=s)
1048 timer(d, setup=s)
1039 fm.end()
1049 fm.end()
1040
1050
1041
1051
1042 @command(
1052 @command(
1043 b'perf::bundle',
1053 b'perf::bundle',
1044 [
1054 [
1045 (
1055 (
1046 b'r',
1056 b'r',
1047 b'rev',
1057 b'rev',
1048 [],
1058 [],
1049 b'changesets to bundle',
1059 b'changesets to bundle',
1050 b'REV',
1060 b'REV',
1051 ),
1061 ),
1052 (
1062 (
1053 b't',
1063 b't',
1054 b'type',
1064 b'type',
1055 b'none',
1065 b'none',
1056 b'bundlespec to use (see `hg help bundlespec`)',
1066 b'bundlespec to use (see `hg help bundlespec`)',
1057 b'TYPE',
1067 b'TYPE',
1058 ),
1068 ),
1059 ]
1069 ]
1060 + formatteropts,
1070 + formatteropts,
1061 b'REVS',
1071 b'REVS',
1062 )
1072 )
1063 def perfbundle(ui, repo, *revs, **opts):
1073 def perfbundle(ui, repo, *revs, **opts):
1064 """benchmark the creation of a bundle from a repository
1074 """benchmark the creation of a bundle from a repository
1065
1075
1066 For now, this only supports "none" compression.
1076 For now, this only supports "none" compression.
1067 """
1077 """
1068 try:
1078 try:
1069 from mercurial import bundlecaches
1079 from mercurial import bundlecaches
1070
1080
1071 parsebundlespec = bundlecaches.parsebundlespec
1081 parsebundlespec = bundlecaches.parsebundlespec
1072 except ImportError:
1082 except ImportError:
1073 from mercurial import exchange
1083 from mercurial import exchange
1074
1084
1075 parsebundlespec = exchange.parsebundlespec
1085 parsebundlespec = exchange.parsebundlespec
1076
1086
1077 from mercurial import discovery
1087 from mercurial import discovery
1078 from mercurial import bundle2
1088 from mercurial import bundle2
1079
1089
1080 opts = _byteskwargs(opts)
1090 opts = _byteskwargs(opts)
1081 timer, fm = gettimer(ui, opts)
1091 timer, fm = gettimer(ui, opts)
1082
1092
1083 cl = repo.changelog
1093 cl = repo.changelog
1084 revs = list(revs)
1094 revs = list(revs)
1085 revs.extend(opts.get(b'rev', ()))
1095 revs.extend(opts.get(b'rev', ()))
1086 revs = scmutil.revrange(repo, revs)
1096 revs = scmutil.revrange(repo, revs)
1087 if not revs:
1097 if not revs:
1088 raise error.Abort(b"not revision specified")
1098 raise error.Abort(b"not revision specified")
1089 # make it a consistent set (ie: without topological gaps)
1099 # make it a consistent set (ie: without topological gaps)
1090 old_len = len(revs)
1100 old_len = len(revs)
1091 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1101 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1092 if old_len != len(revs):
1102 if old_len != len(revs):
1093 new_count = len(revs) - old_len
1103 new_count = len(revs) - old_len
1094 msg = b"add %d new revisions to make it a consistent set\n"
1104 msg = b"add %d new revisions to make it a consistent set\n"
1095 ui.write_err(msg % new_count)
1105 ui.write_err(msg % new_count)
1096
1106
1097 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1107 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1098 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1108 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1099 outgoing = discovery.outgoing(repo, bases, targets)
1109 outgoing = discovery.outgoing(repo, bases, targets)
1100
1110
1101 bundle_spec = opts.get(b'type')
1111 bundle_spec = opts.get(b'type')
1102
1112
1103 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1113 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1104
1114
1105 cgversion = bundle_spec.params.get(b"cg.version")
1115 cgversion = bundle_spec.params.get(b"cg.version")
1106 if cgversion is None:
1116 if cgversion is None:
1107 if bundle_spec.version == b'v1':
1117 if bundle_spec.version == b'v1':
1108 cgversion = b'01'
1118 cgversion = b'01'
1109 if bundle_spec.version == b'v2':
1119 if bundle_spec.version == b'v2':
1110 cgversion = b'02'
1120 cgversion = b'02'
1111 if cgversion not in changegroup.supportedoutgoingversions(repo):
1121 if cgversion not in changegroup.supportedoutgoingversions(repo):
1112 err = b"repository does not support bundle version %s"
1122 err = b"repository does not support bundle version %s"
1113 raise error.Abort(err % cgversion)
1123 raise error.Abort(err % cgversion)
1114
1124
1115 if cgversion == b'01': # bundle1
1125 if cgversion == b'01': # bundle1
1116 bversion = b'HG10' + bundle_spec.wirecompression
1126 bversion = b'HG10' + bundle_spec.wirecompression
1117 bcompression = None
1127 bcompression = None
1118 elif cgversion in (b'02', b'03'):
1128 elif cgversion in (b'02', b'03'):
1119 bversion = b'HG20'
1129 bversion = b'HG20'
1120 bcompression = bundle_spec.wirecompression
1130 bcompression = bundle_spec.wirecompression
1121 else:
1131 else:
1122 err = b'perf::bundle: unexpected changegroup version %s'
1132 err = b'perf::bundle: unexpected changegroup version %s'
1123 raise error.ProgrammingError(err % cgversion)
1133 raise error.ProgrammingError(err % cgversion)
1124
1134
1125 if bcompression is None:
1135 if bcompression is None:
1126 bcompression = b'UN'
1136 bcompression = b'UN'
1127
1137
1128 if bcompression != b'UN':
1138 if bcompression != b'UN':
1129 err = b'perf::bundle: compression currently unsupported: %s'
1139 err = b'perf::bundle: compression currently unsupported: %s'
1130 raise error.ProgrammingError(err % bcompression)
1140 raise error.ProgrammingError(err % bcompression)
1131
1141
1132 def do_bundle():
1142 def do_bundle():
1133 bundle2.writenewbundle(
1143 bundle2.writenewbundle(
1134 ui,
1144 ui,
1135 repo,
1145 repo,
1136 b'perf::bundle',
1146 b'perf::bundle',
1137 os.devnull,
1147 os.devnull,
1138 bversion,
1148 bversion,
1139 outgoing,
1149 outgoing,
1140 bundle_spec.params,
1150 bundle_spec.params,
1141 )
1151 )
1142
1152
1143 timer(do_bundle)
1153 timer(do_bundle)
1144 fm.end()
1154 fm.end()
1145
1155
1146
1156
1147 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1157 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1148 def perfbundleread(ui, repo, bundlepath, **opts):
1158 def perfbundleread(ui, repo, bundlepath, **opts):
1149 """Benchmark reading of bundle files.
1159 """Benchmark reading of bundle files.
1150
1160
1151 This command is meant to isolate the I/O part of bundle reading as
1161 This command is meant to isolate the I/O part of bundle reading as
1152 much as possible.
1162 much as possible.
1153 """
1163 """
1154 from mercurial import (
1164 from mercurial import (
1155 bundle2,
1165 bundle2,
1156 exchange,
1166 exchange,
1157 streamclone,
1167 streamclone,
1158 )
1168 )
1159
1169
1160 opts = _byteskwargs(opts)
1170 opts = _byteskwargs(opts)
1161
1171
1162 def makebench(fn):
1172 def makebench(fn):
1163 def run():
1173 def run():
1164 with open(bundlepath, b'rb') as fh:
1174 with open(bundlepath, b'rb') as fh:
1165 bundle = exchange.readbundle(ui, fh, bundlepath)
1175 bundle = exchange.readbundle(ui, fh, bundlepath)
1166 fn(bundle)
1176 fn(bundle)
1167
1177
1168 return run
1178 return run
1169
1179
1170 def makereadnbytes(size):
1180 def makereadnbytes(size):
1171 def run():
1181 def run():
1172 with open(bundlepath, b'rb') as fh:
1182 with open(bundlepath, b'rb') as fh:
1173 bundle = exchange.readbundle(ui, fh, bundlepath)
1183 bundle = exchange.readbundle(ui, fh, bundlepath)
1174 while bundle.read(size):
1184 while bundle.read(size):
1175 pass
1185 pass
1176
1186
1177 return run
1187 return run
1178
1188
1179 def makestdioread(size):
1189 def makestdioread(size):
1180 def run():
1190 def run():
1181 with open(bundlepath, b'rb') as fh:
1191 with open(bundlepath, b'rb') as fh:
1182 while fh.read(size):
1192 while fh.read(size):
1183 pass
1193 pass
1184
1194
1185 return run
1195 return run
1186
1196
1187 # bundle1
1197 # bundle1
1188
1198
1189 def deltaiter(bundle):
1199 def deltaiter(bundle):
1190 for delta in bundle.deltaiter():
1200 for delta in bundle.deltaiter():
1191 pass
1201 pass
1192
1202
1193 def iterchunks(bundle):
1203 def iterchunks(bundle):
1194 for chunk in bundle.getchunks():
1204 for chunk in bundle.getchunks():
1195 pass
1205 pass
1196
1206
1197 # bundle2
1207 # bundle2
1198
1208
1199 def forwardchunks(bundle):
1209 def forwardchunks(bundle):
1200 for chunk in bundle._forwardchunks():
1210 for chunk in bundle._forwardchunks():
1201 pass
1211 pass
1202
1212
1203 def iterparts(bundle):
1213 def iterparts(bundle):
1204 for part in bundle.iterparts():
1214 for part in bundle.iterparts():
1205 pass
1215 pass
1206
1216
1207 def iterpartsseekable(bundle):
1217 def iterpartsseekable(bundle):
1208 for part in bundle.iterparts(seekable=True):
1218 for part in bundle.iterparts(seekable=True):
1209 pass
1219 pass
1210
1220
1211 def seek(bundle):
1221 def seek(bundle):
1212 for part in bundle.iterparts(seekable=True):
1222 for part in bundle.iterparts(seekable=True):
1213 part.seek(0, os.SEEK_END)
1223 part.seek(0, os.SEEK_END)
1214
1224
1215 def makepartreadnbytes(size):
1225 def makepartreadnbytes(size):
1216 def run():
1226 def run():
1217 with open(bundlepath, b'rb') as fh:
1227 with open(bundlepath, b'rb') as fh:
1218 bundle = exchange.readbundle(ui, fh, bundlepath)
1228 bundle = exchange.readbundle(ui, fh, bundlepath)
1219 for part in bundle.iterparts():
1229 for part in bundle.iterparts():
1220 while part.read(size):
1230 while part.read(size):
1221 pass
1231 pass
1222
1232
1223 return run
1233 return run
1224
1234
1225 benches = [
1235 benches = [
1226 (makestdioread(8192), b'read(8k)'),
1236 (makestdioread(8192), b'read(8k)'),
1227 (makestdioread(16384), b'read(16k)'),
1237 (makestdioread(16384), b'read(16k)'),
1228 (makestdioread(32768), b'read(32k)'),
1238 (makestdioread(32768), b'read(32k)'),
1229 (makestdioread(131072), b'read(128k)'),
1239 (makestdioread(131072), b'read(128k)'),
1230 ]
1240 ]
1231
1241
1232 with open(bundlepath, b'rb') as fh:
1242 with open(bundlepath, b'rb') as fh:
1233 bundle = exchange.readbundle(ui, fh, bundlepath)
1243 bundle = exchange.readbundle(ui, fh, bundlepath)
1234
1244
1235 if isinstance(bundle, changegroup.cg1unpacker):
1245 if isinstance(bundle, changegroup.cg1unpacker):
1236 benches.extend(
1246 benches.extend(
1237 [
1247 [
1238 (makebench(deltaiter), b'cg1 deltaiter()'),
1248 (makebench(deltaiter), b'cg1 deltaiter()'),
1239 (makebench(iterchunks), b'cg1 getchunks()'),
1249 (makebench(iterchunks), b'cg1 getchunks()'),
1240 (makereadnbytes(8192), b'cg1 read(8k)'),
1250 (makereadnbytes(8192), b'cg1 read(8k)'),
1241 (makereadnbytes(16384), b'cg1 read(16k)'),
1251 (makereadnbytes(16384), b'cg1 read(16k)'),
1242 (makereadnbytes(32768), b'cg1 read(32k)'),
1252 (makereadnbytes(32768), b'cg1 read(32k)'),
1243 (makereadnbytes(131072), b'cg1 read(128k)'),
1253 (makereadnbytes(131072), b'cg1 read(128k)'),
1244 ]
1254 ]
1245 )
1255 )
1246 elif isinstance(bundle, bundle2.unbundle20):
1256 elif isinstance(bundle, bundle2.unbundle20):
1247 benches.extend(
1257 benches.extend(
1248 [
1258 [
1249 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1259 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1250 (makebench(iterparts), b'bundle2 iterparts()'),
1260 (makebench(iterparts), b'bundle2 iterparts()'),
1251 (
1261 (
1252 makebench(iterpartsseekable),
1262 makebench(iterpartsseekable),
1253 b'bundle2 iterparts() seekable',
1263 b'bundle2 iterparts() seekable',
1254 ),
1264 ),
1255 (makebench(seek), b'bundle2 part seek()'),
1265 (makebench(seek), b'bundle2 part seek()'),
1256 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1266 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1257 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1267 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1258 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1268 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1259 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1269 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1260 ]
1270 ]
1261 )
1271 )
1262 elif isinstance(bundle, streamclone.streamcloneapplier):
1272 elif isinstance(bundle, streamclone.streamcloneapplier):
1263 raise error.Abort(b'stream clone bundles not supported')
1273 raise error.Abort(b'stream clone bundles not supported')
1264 else:
1274 else:
1265 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1275 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1266
1276
1267 for fn, title in benches:
1277 for fn, title in benches:
1268 timer, fm = gettimer(ui, opts)
1278 timer, fm = gettimer(ui, opts)
1269 timer(fn, title=title)
1279 timer(fn, title=title)
1270 fm.end()
1280 fm.end()
1271
1281
1272
1282
1273 @command(
1283 @command(
1274 b'perf::changegroupchangelog|perfchangegroupchangelog',
1284 b'perf::changegroupchangelog|perfchangegroupchangelog',
1275 formatteropts
1285 formatteropts
1276 + [
1286 + [
1277 (b'', b'cgversion', b'02', b'changegroup version'),
1287 (b'', b'cgversion', b'02', b'changegroup version'),
1278 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1288 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1279 ],
1289 ],
1280 )
1290 )
1281 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1291 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1282 """Benchmark producing a changelog group for a changegroup.
1292 """Benchmark producing a changelog group for a changegroup.
1283
1293
1284 This measures the time spent processing the changelog during a
1294 This measures the time spent processing the changelog during a
1285 bundle operation. This occurs during `hg bundle` and on a server
1295 bundle operation. This occurs during `hg bundle` and on a server
1286 processing a `getbundle` wire protocol request (handles clones
1296 processing a `getbundle` wire protocol request (handles clones
1287 and pull requests).
1297 and pull requests).
1288
1298
1289 By default, all revisions are added to the changegroup.
1299 By default, all revisions are added to the changegroup.
1290 """
1300 """
1291 opts = _byteskwargs(opts)
1301 opts = _byteskwargs(opts)
1292 cl = repo.changelog
1302 cl = repo.changelog
1293 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1303 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1294 bundler = changegroup.getbundler(cgversion, repo)
1304 bundler = changegroup.getbundler(cgversion, repo)
1295
1305
1296 def d():
1306 def d():
1297 state, chunks = bundler._generatechangelog(cl, nodes)
1307 state, chunks = bundler._generatechangelog(cl, nodes)
1298 for chunk in chunks:
1308 for chunk in chunks:
1299 pass
1309 pass
1300
1310
1301 timer, fm = gettimer(ui, opts)
1311 timer, fm = gettimer(ui, opts)
1302
1312
1303 # Terminal printing can interfere with timing. So disable it.
1313 # Terminal printing can interfere with timing. So disable it.
1304 with ui.configoverride({(b'progress', b'disable'): True}):
1314 with ui.configoverride({(b'progress', b'disable'): True}):
1305 timer(d)
1315 timer(d)
1306
1316
1307 fm.end()
1317 fm.end()
1308
1318
1309
1319
1310 @command(b'perf::dirs|perfdirs', formatteropts)
1320 @command(b'perf::dirs|perfdirs', formatteropts)
1311 def perfdirs(ui, repo, **opts):
1321 def perfdirs(ui, repo, **opts):
1312 opts = _byteskwargs(opts)
1322 opts = _byteskwargs(opts)
1313 timer, fm = gettimer(ui, opts)
1323 timer, fm = gettimer(ui, opts)
1314 dirstate = repo.dirstate
1324 dirstate = repo.dirstate
1315 b'a' in dirstate
1325 b'a' in dirstate
1316
1326
1317 def d():
1327 def d():
1318 dirstate.hasdir(b'a')
1328 dirstate.hasdir(b'a')
1319 try:
1329 try:
1320 del dirstate._map._dirs
1330 del dirstate._map._dirs
1321 except AttributeError:
1331 except AttributeError:
1322 pass
1332 pass
1323
1333
1324 timer(d)
1334 timer(d)
1325 fm.end()
1335 fm.end()
1326
1336
1327
1337
1328 @command(
1338 @command(
1329 b'perf::dirstate|perfdirstate',
1339 b'perf::dirstate|perfdirstate',
1330 [
1340 [
1331 (
1341 (
1332 b'',
1342 b'',
1333 b'iteration',
1343 b'iteration',
1334 None,
1344 None,
1335 b'benchmark a full iteration for the dirstate',
1345 b'benchmark a full iteration for the dirstate',
1336 ),
1346 ),
1337 (
1347 (
1338 b'',
1348 b'',
1339 b'contains',
1349 b'contains',
1340 None,
1350 None,
1341 b'benchmark a large amount of `nf in dirstate` calls',
1351 b'benchmark a large amount of `nf in dirstate` calls',
1342 ),
1352 ),
1343 ]
1353 ]
1344 + formatteropts,
1354 + formatteropts,
1345 )
1355 )
1346 def perfdirstate(ui, repo, **opts):
1356 def perfdirstate(ui, repo, **opts):
1347 """benchmap the time of various distate operations
1357 """benchmap the time of various distate operations
1348
1358
1349 By default benchmark the time necessary to load a dirstate from scratch.
1359 By default benchmark the time necessary to load a dirstate from scratch.
1350 The dirstate is loaded to the point were a "contains" request can be
1360 The dirstate is loaded to the point were a "contains" request can be
1351 answered.
1361 answered.
1352 """
1362 """
1353 opts = _byteskwargs(opts)
1363 opts = _byteskwargs(opts)
1354 timer, fm = gettimer(ui, opts)
1364 timer, fm = gettimer(ui, opts)
1355 b"a" in repo.dirstate
1365 b"a" in repo.dirstate
1356
1366
1357 if opts[b'iteration'] and opts[b'contains']:
1367 if opts[b'iteration'] and opts[b'contains']:
1358 msg = b'only specify one of --iteration or --contains'
1368 msg = b'only specify one of --iteration or --contains'
1359 raise error.Abort(msg)
1369 raise error.Abort(msg)
1360
1370
1361 if opts[b'iteration']:
1371 if opts[b'iteration']:
1362 setup = None
1372 setup = None
1363 dirstate = repo.dirstate
1373 dirstate = repo.dirstate
1364
1374
1365 def d():
1375 def d():
1366 for f in dirstate:
1376 for f in dirstate:
1367 pass
1377 pass
1368
1378
1369 elif opts[b'contains']:
1379 elif opts[b'contains']:
1370 setup = None
1380 setup = None
1371 dirstate = repo.dirstate
1381 dirstate = repo.dirstate
1372 allfiles = list(dirstate)
1382 allfiles = list(dirstate)
1373 # also add file path that will be "missing" from the dirstate
1383 # also add file path that will be "missing" from the dirstate
1374 allfiles.extend([f[::-1] for f in allfiles])
1384 allfiles.extend([f[::-1] for f in allfiles])
1375
1385
1376 def d():
1386 def d():
1377 for f in allfiles:
1387 for f in allfiles:
1378 f in dirstate
1388 f in dirstate
1379
1389
1380 else:
1390 else:
1381
1391
1382 def setup():
1392 def setup():
1383 repo.dirstate.invalidate()
1393 repo.dirstate.invalidate()
1384
1394
1385 def d():
1395 def d():
1386 b"a" in repo.dirstate
1396 b"a" in repo.dirstate
1387
1397
1388 timer(d, setup=setup)
1398 timer(d, setup=setup)
1389 fm.end()
1399 fm.end()
1390
1400
1391
1401
1392 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1402 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1393 def perfdirstatedirs(ui, repo, **opts):
1403 def perfdirstatedirs(ui, repo, **opts):
1394 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1404 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1395 opts = _byteskwargs(opts)
1405 opts = _byteskwargs(opts)
1396 timer, fm = gettimer(ui, opts)
1406 timer, fm = gettimer(ui, opts)
1397 repo.dirstate.hasdir(b"a")
1407 repo.dirstate.hasdir(b"a")
1398
1408
1399 def setup():
1409 def setup():
1400 try:
1410 try:
1401 del repo.dirstate._map._dirs
1411 del repo.dirstate._map._dirs
1402 except AttributeError:
1412 except AttributeError:
1403 pass
1413 pass
1404
1414
1405 def d():
1415 def d():
1406 repo.dirstate.hasdir(b"a")
1416 repo.dirstate.hasdir(b"a")
1407
1417
1408 timer(d, setup=setup)
1418 timer(d, setup=setup)
1409 fm.end()
1419 fm.end()
1410
1420
1411
1421
1412 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1422 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1413 def perfdirstatefoldmap(ui, repo, **opts):
1423 def perfdirstatefoldmap(ui, repo, **opts):
1414 """benchmap a `dirstate._map.filefoldmap.get()` request
1424 """benchmap a `dirstate._map.filefoldmap.get()` request
1415
1425
1416 The dirstate filefoldmap cache is dropped between every request.
1426 The dirstate filefoldmap cache is dropped between every request.
1417 """
1427 """
1418 opts = _byteskwargs(opts)
1428 opts = _byteskwargs(opts)
1419 timer, fm = gettimer(ui, opts)
1429 timer, fm = gettimer(ui, opts)
1420 dirstate = repo.dirstate
1430 dirstate = repo.dirstate
1421 dirstate._map.filefoldmap.get(b'a')
1431 dirstate._map.filefoldmap.get(b'a')
1422
1432
1423 def setup():
1433 def setup():
1424 del dirstate._map.filefoldmap
1434 del dirstate._map.filefoldmap
1425
1435
1426 def d():
1436 def d():
1427 dirstate._map.filefoldmap.get(b'a')
1437 dirstate._map.filefoldmap.get(b'a')
1428
1438
1429 timer(d, setup=setup)
1439 timer(d, setup=setup)
1430 fm.end()
1440 fm.end()
1431
1441
1432
1442
1433 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1443 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1434 def perfdirfoldmap(ui, repo, **opts):
1444 def perfdirfoldmap(ui, repo, **opts):
1435 """benchmap a `dirstate._map.dirfoldmap.get()` request
1445 """benchmap a `dirstate._map.dirfoldmap.get()` request
1436
1446
1437 The dirstate dirfoldmap cache is dropped between every request.
1447 The dirstate dirfoldmap cache is dropped between every request.
1438 """
1448 """
1439 opts = _byteskwargs(opts)
1449 opts = _byteskwargs(opts)
1440 timer, fm = gettimer(ui, opts)
1450 timer, fm = gettimer(ui, opts)
1441 dirstate = repo.dirstate
1451 dirstate = repo.dirstate
1442 dirstate._map.dirfoldmap.get(b'a')
1452 dirstate._map.dirfoldmap.get(b'a')
1443
1453
1444 def setup():
1454 def setup():
1445 del dirstate._map.dirfoldmap
1455 del dirstate._map.dirfoldmap
1446 try:
1456 try:
1447 del dirstate._map._dirs
1457 del dirstate._map._dirs
1448 except AttributeError:
1458 except AttributeError:
1449 pass
1459 pass
1450
1460
1451 def d():
1461 def d():
1452 dirstate._map.dirfoldmap.get(b'a')
1462 dirstate._map.dirfoldmap.get(b'a')
1453
1463
1454 timer(d, setup=setup)
1464 timer(d, setup=setup)
1455 fm.end()
1465 fm.end()
1456
1466
1457
1467
1458 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1468 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1459 def perfdirstatewrite(ui, repo, **opts):
1469 def perfdirstatewrite(ui, repo, **opts):
1460 """benchmap the time it take to write a dirstate on disk"""
1470 """benchmap the time it take to write a dirstate on disk"""
1461 opts = _byteskwargs(opts)
1471 opts = _byteskwargs(opts)
1462 timer, fm = gettimer(ui, opts)
1472 timer, fm = gettimer(ui, opts)
1463 ds = repo.dirstate
1473 ds = repo.dirstate
1464 b"a" in ds
1474 b"a" in ds
1465
1475
1466 def setup():
1476 def setup():
1467 ds._dirty = True
1477 ds._dirty = True
1468
1478
1469 def d():
1479 def d():
1470 ds.write(repo.currenttransaction())
1480 ds.write(repo.currenttransaction())
1471
1481
1472 timer(d, setup=setup)
1482 with repo.wlock():
1483 timer(d, setup=setup)
1473 fm.end()
1484 fm.end()
1474
1485
1475
1486
1476 def _getmergerevs(repo, opts):
1487 def _getmergerevs(repo, opts):
1477 """parse command argument to return rev involved in merge
1488 """parse command argument to return rev involved in merge
1478
1489
1479 input: options dictionnary with `rev`, `from` and `bse`
1490 input: options dictionnary with `rev`, `from` and `bse`
1480 output: (localctx, otherctx, basectx)
1491 output: (localctx, otherctx, basectx)
1481 """
1492 """
1482 if opts[b'from']:
1493 if opts[b'from']:
1483 fromrev = scmutil.revsingle(repo, opts[b'from'])
1494 fromrev = scmutil.revsingle(repo, opts[b'from'])
1484 wctx = repo[fromrev]
1495 wctx = repo[fromrev]
1485 else:
1496 else:
1486 wctx = repo[None]
1497 wctx = repo[None]
1487 # we don't want working dir files to be stat'd in the benchmark, so
1498 # we don't want working dir files to be stat'd in the benchmark, so
1488 # prime that cache
1499 # prime that cache
1489 wctx.dirty()
1500 wctx.dirty()
1490 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1501 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1491 if opts[b'base']:
1502 if opts[b'base']:
1492 fromrev = scmutil.revsingle(repo, opts[b'base'])
1503 fromrev = scmutil.revsingle(repo, opts[b'base'])
1493 ancestor = repo[fromrev]
1504 ancestor = repo[fromrev]
1494 else:
1505 else:
1495 ancestor = wctx.ancestor(rctx)
1506 ancestor = wctx.ancestor(rctx)
1496 return (wctx, rctx, ancestor)
1507 return (wctx, rctx, ancestor)
1497
1508
1498
1509
1499 @command(
1510 @command(
1500 b'perf::mergecalculate|perfmergecalculate',
1511 b'perf::mergecalculate|perfmergecalculate',
1501 [
1512 [
1502 (b'r', b'rev', b'.', b'rev to merge against'),
1513 (b'r', b'rev', b'.', b'rev to merge against'),
1503 (b'', b'from', b'', b'rev to merge from'),
1514 (b'', b'from', b'', b'rev to merge from'),
1504 (b'', b'base', b'', b'the revision to use as base'),
1515 (b'', b'base', b'', b'the revision to use as base'),
1505 ]
1516 ]
1506 + formatteropts,
1517 + formatteropts,
1507 )
1518 )
1508 def perfmergecalculate(ui, repo, **opts):
1519 def perfmergecalculate(ui, repo, **opts):
1509 opts = _byteskwargs(opts)
1520 opts = _byteskwargs(opts)
1510 timer, fm = gettimer(ui, opts)
1521 timer, fm = gettimer(ui, opts)
1511
1522
1512 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1523 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1513
1524
1514 def d():
1525 def d():
1515 # acceptremote is True because we don't want prompts in the middle of
1526 # acceptremote is True because we don't want prompts in the middle of
1516 # our benchmark
1527 # our benchmark
1517 merge.calculateupdates(
1528 merge.calculateupdates(
1518 repo,
1529 repo,
1519 wctx,
1530 wctx,
1520 rctx,
1531 rctx,
1521 [ancestor],
1532 [ancestor],
1522 branchmerge=False,
1533 branchmerge=False,
1523 force=False,
1534 force=False,
1524 acceptremote=True,
1535 acceptremote=True,
1525 followcopies=True,
1536 followcopies=True,
1526 )
1537 )
1527
1538
1528 timer(d)
1539 timer(d)
1529 fm.end()
1540 fm.end()
1530
1541
1531
1542
1532 @command(
1543 @command(
1533 b'perf::mergecopies|perfmergecopies',
1544 b'perf::mergecopies|perfmergecopies',
1534 [
1545 [
1535 (b'r', b'rev', b'.', b'rev to merge against'),
1546 (b'r', b'rev', b'.', b'rev to merge against'),
1536 (b'', b'from', b'', b'rev to merge from'),
1547 (b'', b'from', b'', b'rev to merge from'),
1537 (b'', b'base', b'', b'the revision to use as base'),
1548 (b'', b'base', b'', b'the revision to use as base'),
1538 ]
1549 ]
1539 + formatteropts,
1550 + formatteropts,
1540 )
1551 )
1541 def perfmergecopies(ui, repo, **opts):
1552 def perfmergecopies(ui, repo, **opts):
1542 """measure runtime of `copies.mergecopies`"""
1553 """measure runtime of `copies.mergecopies`"""
1543 opts = _byteskwargs(opts)
1554 opts = _byteskwargs(opts)
1544 timer, fm = gettimer(ui, opts)
1555 timer, fm = gettimer(ui, opts)
1545 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1556 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1546
1557
1547 def d():
1558 def d():
1548 # acceptremote is True because we don't want prompts in the middle of
1559 # acceptremote is True because we don't want prompts in the middle of
1549 # our benchmark
1560 # our benchmark
1550 copies.mergecopies(repo, wctx, rctx, ancestor)
1561 copies.mergecopies(repo, wctx, rctx, ancestor)
1551
1562
1552 timer(d)
1563 timer(d)
1553 fm.end()
1564 fm.end()
1554
1565
1555
1566
1556 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1567 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1557 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1568 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1558 """benchmark the copy tracing logic"""
1569 """benchmark the copy tracing logic"""
1559 opts = _byteskwargs(opts)
1570 opts = _byteskwargs(opts)
1560 timer, fm = gettimer(ui, opts)
1571 timer, fm = gettimer(ui, opts)
1561 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1572 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1562 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1573 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1563
1574
1564 def d():
1575 def d():
1565 copies.pathcopies(ctx1, ctx2)
1576 copies.pathcopies(ctx1, ctx2)
1566
1577
1567 timer(d)
1578 timer(d)
1568 fm.end()
1579 fm.end()
1569
1580
1570
1581
1571 @command(
1582 @command(
1572 b'perf::phases|perfphases',
1583 b'perf::phases|perfphases',
1573 [
1584 [
1574 (b'', b'full', False, b'include file reading time too'),
1585 (b'', b'full', False, b'include file reading time too'),
1575 ],
1586 ],
1576 b"",
1587 b"",
1577 )
1588 )
1578 def perfphases(ui, repo, **opts):
1589 def perfphases(ui, repo, **opts):
1579 """benchmark phasesets computation"""
1590 """benchmark phasesets computation"""
1580 opts = _byteskwargs(opts)
1591 opts = _byteskwargs(opts)
1581 timer, fm = gettimer(ui, opts)
1592 timer, fm = gettimer(ui, opts)
1582 _phases = repo._phasecache
1593 _phases = repo._phasecache
1583 full = opts.get(b'full')
1594 full = opts.get(b'full')
1584
1595
1585 def d():
1596 def d():
1586 phases = _phases
1597 phases = _phases
1587 if full:
1598 if full:
1588 clearfilecache(repo, b'_phasecache')
1599 clearfilecache(repo, b'_phasecache')
1589 phases = repo._phasecache
1600 phases = repo._phasecache
1590 phases.invalidate()
1601 phases.invalidate()
1591 phases.loadphaserevs(repo)
1602 phases.loadphaserevs(repo)
1592
1603
1593 timer(d)
1604 timer(d)
1594 fm.end()
1605 fm.end()
1595
1606
1596
1607
1597 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1608 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1598 def perfphasesremote(ui, repo, dest=None, **opts):
1609 def perfphasesremote(ui, repo, dest=None, **opts):
1599 """benchmark time needed to analyse phases of the remote server"""
1610 """benchmark time needed to analyse phases of the remote server"""
1600 from mercurial.node import bin
1611 from mercurial.node import bin
1601 from mercurial import (
1612 from mercurial import (
1602 exchange,
1613 exchange,
1603 hg,
1614 hg,
1604 phases,
1615 phases,
1605 )
1616 )
1606
1617
1607 opts = _byteskwargs(opts)
1618 opts = _byteskwargs(opts)
1608 timer, fm = gettimer(ui, opts)
1619 timer, fm = gettimer(ui, opts)
1609
1620
1610 path = ui.getpath(dest, default=(b'default-push', b'default'))
1621 path = ui.getpath(dest, default=(b'default-push', b'default'))
1611 if not path:
1622 if not path:
1612 raise error.Abort(
1623 raise error.Abort(
1613 b'default repository not configured!',
1624 b'default repository not configured!',
1614 hint=b"see 'hg help config.paths'",
1625 hint=b"see 'hg help config.paths'",
1615 )
1626 )
1616 dest = path.pushloc or path.loc
1627 if util.safehasattr(path, 'main_path'):
1628 path = path.get_push_variant()
1629 dest = path.loc
1630 else:
1631 dest = path.pushloc or path.loc
1617 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1632 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1618 other = hg.peer(repo, opts, dest)
1633 other = hg.peer(repo, opts, dest)
1619
1634
1620 # easier to perform discovery through the operation
1635 # easier to perform discovery through the operation
1621 op = exchange.pushoperation(repo, other)
1636 op = exchange.pushoperation(repo, other)
1622 exchange._pushdiscoverychangeset(op)
1637 exchange._pushdiscoverychangeset(op)
1623
1638
1624 remotesubset = op.fallbackheads
1639 remotesubset = op.fallbackheads
1625
1640
1626 with other.commandexecutor() as e:
1641 with other.commandexecutor() as e:
1627 remotephases = e.callcommand(
1642 remotephases = e.callcommand(
1628 b'listkeys', {b'namespace': b'phases'}
1643 b'listkeys', {b'namespace': b'phases'}
1629 ).result()
1644 ).result()
1630 del other
1645 del other
1631 publishing = remotephases.get(b'publishing', False)
1646 publishing = remotephases.get(b'publishing', False)
1632 if publishing:
1647 if publishing:
1633 ui.statusnoi18n(b'publishing: yes\n')
1648 ui.statusnoi18n(b'publishing: yes\n')
1634 else:
1649 else:
1635 ui.statusnoi18n(b'publishing: no\n')
1650 ui.statusnoi18n(b'publishing: no\n')
1636
1651
1637 has_node = getattr(repo.changelog.index, 'has_node', None)
1652 has_node = getattr(repo.changelog.index, 'has_node', None)
1638 if has_node is None:
1653 if has_node is None:
1639 has_node = repo.changelog.nodemap.__contains__
1654 has_node = repo.changelog.nodemap.__contains__
1640 nonpublishroots = 0
1655 nonpublishroots = 0
1641 for nhex, phase in remotephases.iteritems():
1656 for nhex, phase in remotephases.iteritems():
1642 if nhex == b'publishing': # ignore data related to publish option
1657 if nhex == b'publishing': # ignore data related to publish option
1643 continue
1658 continue
1644 node = bin(nhex)
1659 node = bin(nhex)
1645 if has_node(node) and int(phase):
1660 if has_node(node) and int(phase):
1646 nonpublishroots += 1
1661 nonpublishroots += 1
1647 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1662 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1648 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1663 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1649
1664
1650 def d():
1665 def d():
1651 phases.remotephasessummary(repo, remotesubset, remotephases)
1666 phases.remotephasessummary(repo, remotesubset, remotephases)
1652
1667
1653 timer(d)
1668 timer(d)
1654 fm.end()
1669 fm.end()
1655
1670
1656
1671
1657 @command(
1672 @command(
1658 b'perf::manifest|perfmanifest',
1673 b'perf::manifest|perfmanifest',
1659 [
1674 [
1660 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1675 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1661 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1676 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1662 ]
1677 ]
1663 + formatteropts,
1678 + formatteropts,
1664 b'REV|NODE',
1679 b'REV|NODE',
1665 )
1680 )
1666 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1681 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1667 """benchmark the time to read a manifest from disk and return a usable
1682 """benchmark the time to read a manifest from disk and return a usable
1668 dict-like object
1683 dict-like object
1669
1684
1670 Manifest caches are cleared before retrieval."""
1685 Manifest caches are cleared before retrieval."""
1671 opts = _byteskwargs(opts)
1686 opts = _byteskwargs(opts)
1672 timer, fm = gettimer(ui, opts)
1687 timer, fm = gettimer(ui, opts)
1673 if not manifest_rev:
1688 if not manifest_rev:
1674 ctx = scmutil.revsingle(repo, rev, rev)
1689 ctx = scmutil.revsingle(repo, rev, rev)
1675 t = ctx.manifestnode()
1690 t = ctx.manifestnode()
1676 else:
1691 else:
1677 from mercurial.node import bin
1692 from mercurial.node import bin
1678
1693
1679 if len(rev) == 40:
1694 if len(rev) == 40:
1680 t = bin(rev)
1695 t = bin(rev)
1681 else:
1696 else:
1682 try:
1697 try:
1683 rev = int(rev)
1698 rev = int(rev)
1684
1699
1685 if util.safehasattr(repo.manifestlog, b'getstorage'):
1700 if util.safehasattr(repo.manifestlog, b'getstorage'):
1686 t = repo.manifestlog.getstorage(b'').node(rev)
1701 t = repo.manifestlog.getstorage(b'').node(rev)
1687 else:
1702 else:
1688 t = repo.manifestlog._revlog.lookup(rev)
1703 t = repo.manifestlog._revlog.lookup(rev)
1689 except ValueError:
1704 except ValueError:
1690 raise error.Abort(
1705 raise error.Abort(
1691 b'manifest revision must be integer or full node'
1706 b'manifest revision must be integer or full node'
1692 )
1707 )
1693
1708
1694 def d():
1709 def d():
1695 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1710 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1696 repo.manifestlog[t].read()
1711 repo.manifestlog[t].read()
1697
1712
1698 timer(d)
1713 timer(d)
1699 fm.end()
1714 fm.end()
1700
1715
1701
1716
1702 @command(b'perf::changeset|perfchangeset', formatteropts)
1717 @command(b'perf::changeset|perfchangeset', formatteropts)
1703 def perfchangeset(ui, repo, rev, **opts):
1718 def perfchangeset(ui, repo, rev, **opts):
1704 opts = _byteskwargs(opts)
1719 opts = _byteskwargs(opts)
1705 timer, fm = gettimer(ui, opts)
1720 timer, fm = gettimer(ui, opts)
1706 n = scmutil.revsingle(repo, rev).node()
1721 n = scmutil.revsingle(repo, rev).node()
1707
1722
1708 def d():
1723 def d():
1709 repo.changelog.read(n)
1724 repo.changelog.read(n)
1710 # repo.changelog._cache = None
1725 # repo.changelog._cache = None
1711
1726
1712 timer(d)
1727 timer(d)
1713 fm.end()
1728 fm.end()
1714
1729
1715
1730
1716 @command(b'perf::ignore|perfignore', formatteropts)
1731 @command(b'perf::ignore|perfignore', formatteropts)
1717 def perfignore(ui, repo, **opts):
1732 def perfignore(ui, repo, **opts):
1718 """benchmark operation related to computing ignore"""
1733 """benchmark operation related to computing ignore"""
1719 opts = _byteskwargs(opts)
1734 opts = _byteskwargs(opts)
1720 timer, fm = gettimer(ui, opts)
1735 timer, fm = gettimer(ui, opts)
1721 dirstate = repo.dirstate
1736 dirstate = repo.dirstate
1722
1737
1723 def setupone():
1738 def setupone():
1724 dirstate.invalidate()
1739 dirstate.invalidate()
1725 clearfilecache(dirstate, b'_ignore')
1740 clearfilecache(dirstate, b'_ignore')
1726
1741
1727 def runone():
1742 def runone():
1728 dirstate._ignore
1743 dirstate._ignore
1729
1744
1730 timer(runone, setup=setupone, title=b"load")
1745 timer(runone, setup=setupone, title=b"load")
1731 fm.end()
1746 fm.end()
1732
1747
1733
1748
1734 @command(
1749 @command(
1735 b'perf::index|perfindex',
1750 b'perf::index|perfindex',
1736 [
1751 [
1737 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1752 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1738 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1753 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1739 ]
1754 ]
1740 + formatteropts,
1755 + formatteropts,
1741 )
1756 )
1742 def perfindex(ui, repo, **opts):
1757 def perfindex(ui, repo, **opts):
1743 """benchmark index creation time followed by a lookup
1758 """benchmark index creation time followed by a lookup
1744
1759
1745 The default is to look `tip` up. Depending on the index implementation,
1760 The default is to look `tip` up. Depending on the index implementation,
1746 the revision looked up can matters. For example, an implementation
1761 the revision looked up can matters. For example, an implementation
1747 scanning the index will have a faster lookup time for `--rev tip` than for
1762 scanning the index will have a faster lookup time for `--rev tip` than for
1748 `--rev 0`. The number of looked up revisions and their order can also
1763 `--rev 0`. The number of looked up revisions and their order can also
1749 matters.
1764 matters.
1750
1765
1751 Example of useful set to test:
1766 Example of useful set to test:
1752
1767
1753 * tip
1768 * tip
1754 * 0
1769 * 0
1755 * -10:
1770 * -10:
1756 * :10
1771 * :10
1757 * -10: + :10
1772 * -10: + :10
1758 * :10: + -10:
1773 * :10: + -10:
1759 * -10000:
1774 * -10000:
1760 * -10000: + 0
1775 * -10000: + 0
1761
1776
1762 It is not currently possible to check for lookup of a missing node. For
1777 It is not currently possible to check for lookup of a missing node. For
1763 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1778 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1764 import mercurial.revlog
1779 import mercurial.revlog
1765
1780
1766 opts = _byteskwargs(opts)
1781 opts = _byteskwargs(opts)
1767 timer, fm = gettimer(ui, opts)
1782 timer, fm = gettimer(ui, opts)
1768 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1783 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1769 if opts[b'no_lookup']:
1784 if opts[b'no_lookup']:
1770 if opts['rev']:
1785 if opts['rev']:
1771 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1786 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1772 nodes = []
1787 nodes = []
1773 elif not opts[b'rev']:
1788 elif not opts[b'rev']:
1774 nodes = [repo[b"tip"].node()]
1789 nodes = [repo[b"tip"].node()]
1775 else:
1790 else:
1776 revs = scmutil.revrange(repo, opts[b'rev'])
1791 revs = scmutil.revrange(repo, opts[b'rev'])
1777 cl = repo.changelog
1792 cl = repo.changelog
1778 nodes = [cl.node(r) for r in revs]
1793 nodes = [cl.node(r) for r in revs]
1779
1794
1780 unfi = repo.unfiltered()
1795 unfi = repo.unfiltered()
1781 # find the filecache func directly
1796 # find the filecache func directly
1782 # This avoid polluting the benchmark with the filecache logic
1797 # This avoid polluting the benchmark with the filecache logic
1783 makecl = unfi.__class__.changelog.func
1798 makecl = unfi.__class__.changelog.func
1784
1799
1785 def setup():
1800 def setup():
1786 # probably not necessary, but for good measure
1801 # probably not necessary, but for good measure
1787 clearchangelog(unfi)
1802 clearchangelog(unfi)
1788
1803
1789 def d():
1804 def d():
1790 cl = makecl(unfi)
1805 cl = makecl(unfi)
1791 for n in nodes:
1806 for n in nodes:
1792 cl.rev(n)
1807 cl.rev(n)
1793
1808
1794 timer(d, setup=setup)
1809 timer(d, setup=setup)
1795 fm.end()
1810 fm.end()
1796
1811
1797
1812
1798 @command(
1813 @command(
1799 b'perf::nodemap|perfnodemap',
1814 b'perf::nodemap|perfnodemap',
1800 [
1815 [
1801 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1816 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1802 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1817 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1803 ]
1818 ]
1804 + formatteropts,
1819 + formatteropts,
1805 )
1820 )
1806 def perfnodemap(ui, repo, **opts):
1821 def perfnodemap(ui, repo, **opts):
1807 """benchmark the time necessary to look up revision from a cold nodemap
1822 """benchmark the time necessary to look up revision from a cold nodemap
1808
1823
1809 Depending on the implementation, the amount and order of revision we look
1824 Depending on the implementation, the amount and order of revision we look
1810 up can varies. Example of useful set to test:
1825 up can varies. Example of useful set to test:
1811 * tip
1826 * tip
1812 * 0
1827 * 0
1813 * -10:
1828 * -10:
1814 * :10
1829 * :10
1815 * -10: + :10
1830 * -10: + :10
1816 * :10: + -10:
1831 * :10: + -10:
1817 * -10000:
1832 * -10000:
1818 * -10000: + 0
1833 * -10000: + 0
1819
1834
1820 The command currently focus on valid binary lookup. Benchmarking for
1835 The command currently focus on valid binary lookup. Benchmarking for
1821 hexlookup, prefix lookup and missing lookup would also be valuable.
1836 hexlookup, prefix lookup and missing lookup would also be valuable.
1822 """
1837 """
1823 import mercurial.revlog
1838 import mercurial.revlog
1824
1839
1825 opts = _byteskwargs(opts)
1840 opts = _byteskwargs(opts)
1826 timer, fm = gettimer(ui, opts)
1841 timer, fm = gettimer(ui, opts)
1827 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1842 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1828
1843
1829 unfi = repo.unfiltered()
1844 unfi = repo.unfiltered()
1830 clearcaches = opts[b'clear_caches']
1845 clearcaches = opts[b'clear_caches']
1831 # find the filecache func directly
1846 # find the filecache func directly
1832 # This avoid polluting the benchmark with the filecache logic
1847 # This avoid polluting the benchmark with the filecache logic
1833 makecl = unfi.__class__.changelog.func
1848 makecl = unfi.__class__.changelog.func
1834 if not opts[b'rev']:
1849 if not opts[b'rev']:
1835 raise error.Abort(b'use --rev to specify revisions to look up')
1850 raise error.Abort(b'use --rev to specify revisions to look up')
1836 revs = scmutil.revrange(repo, opts[b'rev'])
1851 revs = scmutil.revrange(repo, opts[b'rev'])
1837 cl = repo.changelog
1852 cl = repo.changelog
1838 nodes = [cl.node(r) for r in revs]
1853 nodes = [cl.node(r) for r in revs]
1839
1854
1840 # use a list to pass reference to a nodemap from one closure to the next
1855 # use a list to pass reference to a nodemap from one closure to the next
1841 nodeget = [None]
1856 nodeget = [None]
1842
1857
1843 def setnodeget():
1858 def setnodeget():
1844 # probably not necessary, but for good measure
1859 # probably not necessary, but for good measure
1845 clearchangelog(unfi)
1860 clearchangelog(unfi)
1846 cl = makecl(unfi)
1861 cl = makecl(unfi)
1847 if util.safehasattr(cl.index, 'get_rev'):
1862 if util.safehasattr(cl.index, 'get_rev'):
1848 nodeget[0] = cl.index.get_rev
1863 nodeget[0] = cl.index.get_rev
1849 else:
1864 else:
1850 nodeget[0] = cl.nodemap.get
1865 nodeget[0] = cl.nodemap.get
1851
1866
1852 def d():
1867 def d():
1853 get = nodeget[0]
1868 get = nodeget[0]
1854 for n in nodes:
1869 for n in nodes:
1855 get(n)
1870 get(n)
1856
1871
1857 setup = None
1872 setup = None
1858 if clearcaches:
1873 if clearcaches:
1859
1874
1860 def setup():
1875 def setup():
1861 setnodeget()
1876 setnodeget()
1862
1877
1863 else:
1878 else:
1864 setnodeget()
1879 setnodeget()
1865 d() # prewarm the data structure
1880 d() # prewarm the data structure
1866 timer(d, setup=setup)
1881 timer(d, setup=setup)
1867 fm.end()
1882 fm.end()
1868
1883
1869
1884
1870 @command(b'perf::startup|perfstartup', formatteropts)
1885 @command(b'perf::startup|perfstartup', formatteropts)
1871 def perfstartup(ui, repo, **opts):
1886 def perfstartup(ui, repo, **opts):
1872 opts = _byteskwargs(opts)
1887 opts = _byteskwargs(opts)
1873 timer, fm = gettimer(ui, opts)
1888 timer, fm = gettimer(ui, opts)
1874
1889
1875 def d():
1890 def d():
1876 if os.name != 'nt':
1891 if os.name != 'nt':
1877 os.system(
1892 os.system(
1878 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1893 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1879 )
1894 )
1880 else:
1895 else:
1881 os.environ['HGRCPATH'] = r' '
1896 os.environ['HGRCPATH'] = r' '
1882 os.system("%s version -q > NUL" % sys.argv[0])
1897 os.system("%s version -q > NUL" % sys.argv[0])
1883
1898
1884 timer(d)
1899 timer(d)
1885 fm.end()
1900 fm.end()
1886
1901
1887
1902
1888 @command(b'perf::parents|perfparents', formatteropts)
1903 @command(b'perf::parents|perfparents', formatteropts)
1889 def perfparents(ui, repo, **opts):
1904 def perfparents(ui, repo, **opts):
1890 """benchmark the time necessary to fetch one changeset's parents.
1905 """benchmark the time necessary to fetch one changeset's parents.
1891
1906
1892 The fetch is done using the `node identifier`, traversing all object layers
1907 The fetch is done using the `node identifier`, traversing all object layers
1893 from the repository object. The first N revisions will be used for this
1908 from the repository object. The first N revisions will be used for this
1894 benchmark. N is controlled by the ``perf.parentscount`` config option
1909 benchmark. N is controlled by the ``perf.parentscount`` config option
1895 (default: 1000).
1910 (default: 1000).
1896 """
1911 """
1897 opts = _byteskwargs(opts)
1912 opts = _byteskwargs(opts)
1898 timer, fm = gettimer(ui, opts)
1913 timer, fm = gettimer(ui, opts)
1899 # control the number of commits perfparents iterates over
1914 # control the number of commits perfparents iterates over
1900 # experimental config: perf.parentscount
1915 # experimental config: perf.parentscount
1901 count = getint(ui, b"perf", b"parentscount", 1000)
1916 count = getint(ui, b"perf", b"parentscount", 1000)
1902 if len(repo.changelog) < count:
1917 if len(repo.changelog) < count:
1903 raise error.Abort(b"repo needs %d commits for this test" % count)
1918 raise error.Abort(b"repo needs %d commits for this test" % count)
1904 repo = repo.unfiltered()
1919 repo = repo.unfiltered()
1905 nl = [repo.changelog.node(i) for i in _xrange(count)]
1920 nl = [repo.changelog.node(i) for i in _xrange(count)]
1906
1921
1907 def d():
1922 def d():
1908 for n in nl:
1923 for n in nl:
1909 repo.changelog.parents(n)
1924 repo.changelog.parents(n)
1910
1925
1911 timer(d)
1926 timer(d)
1912 fm.end()
1927 fm.end()
1913
1928
1914
1929
1915 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
1930 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
1916 def perfctxfiles(ui, repo, x, **opts):
1931 def perfctxfiles(ui, repo, x, **opts):
1917 opts = _byteskwargs(opts)
1932 opts = _byteskwargs(opts)
1918 x = int(x)
1933 x = int(x)
1919 timer, fm = gettimer(ui, opts)
1934 timer, fm = gettimer(ui, opts)
1920
1935
1921 def d():
1936 def d():
1922 len(repo[x].files())
1937 len(repo[x].files())
1923
1938
1924 timer(d)
1939 timer(d)
1925 fm.end()
1940 fm.end()
1926
1941
1927
1942
1928 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
1943 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
1929 def perfrawfiles(ui, repo, x, **opts):
1944 def perfrawfiles(ui, repo, x, **opts):
1930 opts = _byteskwargs(opts)
1945 opts = _byteskwargs(opts)
1931 x = int(x)
1946 x = int(x)
1932 timer, fm = gettimer(ui, opts)
1947 timer, fm = gettimer(ui, opts)
1933 cl = repo.changelog
1948 cl = repo.changelog
1934
1949
1935 def d():
1950 def d():
1936 len(cl.read(x)[3])
1951 len(cl.read(x)[3])
1937
1952
1938 timer(d)
1953 timer(d)
1939 fm.end()
1954 fm.end()
1940
1955
1941
1956
1942 @command(b'perf::lookup|perflookup', formatteropts)
1957 @command(b'perf::lookup|perflookup', formatteropts)
1943 def perflookup(ui, repo, rev, **opts):
1958 def perflookup(ui, repo, rev, **opts):
1944 opts = _byteskwargs(opts)
1959 opts = _byteskwargs(opts)
1945 timer, fm = gettimer(ui, opts)
1960 timer, fm = gettimer(ui, opts)
1946 timer(lambda: len(repo.lookup(rev)))
1961 timer(lambda: len(repo.lookup(rev)))
1947 fm.end()
1962 fm.end()
1948
1963
1949
1964
1950 @command(
1965 @command(
1951 b'perf::linelogedits|perflinelogedits',
1966 b'perf::linelogedits|perflinelogedits',
1952 [
1967 [
1953 (b'n', b'edits', 10000, b'number of edits'),
1968 (b'n', b'edits', 10000, b'number of edits'),
1954 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1969 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1955 ],
1970 ],
1956 norepo=True,
1971 norepo=True,
1957 )
1972 )
1958 def perflinelogedits(ui, **opts):
1973 def perflinelogedits(ui, **opts):
1959 from mercurial import linelog
1974 from mercurial import linelog
1960
1975
1961 opts = _byteskwargs(opts)
1976 opts = _byteskwargs(opts)
1962
1977
1963 edits = opts[b'edits']
1978 edits = opts[b'edits']
1964 maxhunklines = opts[b'max_hunk_lines']
1979 maxhunklines = opts[b'max_hunk_lines']
1965
1980
1966 maxb1 = 100000
1981 maxb1 = 100000
1967 random.seed(0)
1982 random.seed(0)
1968 randint = random.randint
1983 randint = random.randint
1969 currentlines = 0
1984 currentlines = 0
1970 arglist = []
1985 arglist = []
1971 for rev in _xrange(edits):
1986 for rev in _xrange(edits):
1972 a1 = randint(0, currentlines)
1987 a1 = randint(0, currentlines)
1973 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1988 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1974 b1 = randint(0, maxb1)
1989 b1 = randint(0, maxb1)
1975 b2 = randint(b1, b1 + maxhunklines)
1990 b2 = randint(b1, b1 + maxhunklines)
1976 currentlines += (b2 - b1) - (a2 - a1)
1991 currentlines += (b2 - b1) - (a2 - a1)
1977 arglist.append((rev, a1, a2, b1, b2))
1992 arglist.append((rev, a1, a2, b1, b2))
1978
1993
1979 def d():
1994 def d():
1980 ll = linelog.linelog()
1995 ll = linelog.linelog()
1981 for args in arglist:
1996 for args in arglist:
1982 ll.replacelines(*args)
1997 ll.replacelines(*args)
1983
1998
1984 timer, fm = gettimer(ui, opts)
1999 timer, fm = gettimer(ui, opts)
1985 timer(d)
2000 timer(d)
1986 fm.end()
2001 fm.end()
1987
2002
1988
2003
1989 @command(b'perf::revrange|perfrevrange', formatteropts)
2004 @command(b'perf::revrange|perfrevrange', formatteropts)
1990 def perfrevrange(ui, repo, *specs, **opts):
2005 def perfrevrange(ui, repo, *specs, **opts):
1991 opts = _byteskwargs(opts)
2006 opts = _byteskwargs(opts)
1992 timer, fm = gettimer(ui, opts)
2007 timer, fm = gettimer(ui, opts)
1993 revrange = scmutil.revrange
2008 revrange = scmutil.revrange
1994 timer(lambda: len(revrange(repo, specs)))
2009 timer(lambda: len(revrange(repo, specs)))
1995 fm.end()
2010 fm.end()
1996
2011
1997
2012
1998 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2013 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
1999 def perfnodelookup(ui, repo, rev, **opts):
2014 def perfnodelookup(ui, repo, rev, **opts):
2000 opts = _byteskwargs(opts)
2015 opts = _byteskwargs(opts)
2001 timer, fm = gettimer(ui, opts)
2016 timer, fm = gettimer(ui, opts)
2002 import mercurial.revlog
2017 import mercurial.revlog
2003
2018
2004 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2019 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2005 n = scmutil.revsingle(repo, rev).node()
2020 n = scmutil.revsingle(repo, rev).node()
2006
2021
2007 try:
2022 try:
2008 cl = revlog(getsvfs(repo), radix=b"00changelog")
2023 cl = revlog(getsvfs(repo), radix=b"00changelog")
2009 except TypeError:
2024 except TypeError:
2010 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2025 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2011
2026
2012 def d():
2027 def d():
2013 cl.rev(n)
2028 cl.rev(n)
2014 clearcaches(cl)
2029 clearcaches(cl)
2015
2030
2016 timer(d)
2031 timer(d)
2017 fm.end()
2032 fm.end()
2018
2033
2019
2034
2020 @command(
2035 @command(
2021 b'perf::log|perflog',
2036 b'perf::log|perflog',
2022 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2037 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2023 )
2038 )
2024 def perflog(ui, repo, rev=None, **opts):
2039 def perflog(ui, repo, rev=None, **opts):
2025 opts = _byteskwargs(opts)
2040 opts = _byteskwargs(opts)
2026 if rev is None:
2041 if rev is None:
2027 rev = []
2042 rev = []
2028 timer, fm = gettimer(ui, opts)
2043 timer, fm = gettimer(ui, opts)
2029 ui.pushbuffer()
2044 ui.pushbuffer()
2030 timer(
2045 timer(
2031 lambda: commands.log(
2046 lambda: commands.log(
2032 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2047 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2033 )
2048 )
2034 )
2049 )
2035 ui.popbuffer()
2050 ui.popbuffer()
2036 fm.end()
2051 fm.end()
2037
2052
2038
2053
2039 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2054 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2040 def perfmoonwalk(ui, repo, **opts):
2055 def perfmoonwalk(ui, repo, **opts):
2041 """benchmark walking the changelog backwards
2056 """benchmark walking the changelog backwards
2042
2057
2043 This also loads the changelog data for each revision in the changelog.
2058 This also loads the changelog data for each revision in the changelog.
2044 """
2059 """
2045 opts = _byteskwargs(opts)
2060 opts = _byteskwargs(opts)
2046 timer, fm = gettimer(ui, opts)
2061 timer, fm = gettimer(ui, opts)
2047
2062
2048 def moonwalk():
2063 def moonwalk():
2049 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2064 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2050 ctx = repo[i]
2065 ctx = repo[i]
2051 ctx.branch() # read changelog data (in addition to the index)
2066 ctx.branch() # read changelog data (in addition to the index)
2052
2067
2053 timer(moonwalk)
2068 timer(moonwalk)
2054 fm.end()
2069 fm.end()
2055
2070
2056
2071
2057 @command(
2072 @command(
2058 b'perf::templating|perftemplating',
2073 b'perf::templating|perftemplating',
2059 [
2074 [
2060 (b'r', b'rev', [], b'revisions to run the template on'),
2075 (b'r', b'rev', [], b'revisions to run the template on'),
2061 ]
2076 ]
2062 + formatteropts,
2077 + formatteropts,
2063 )
2078 )
2064 def perftemplating(ui, repo, testedtemplate=None, **opts):
2079 def perftemplating(ui, repo, testedtemplate=None, **opts):
2065 """test the rendering time of a given template"""
2080 """test the rendering time of a given template"""
2066 if makelogtemplater is None:
2081 if makelogtemplater is None:
2067 raise error.Abort(
2082 raise error.Abort(
2068 b"perftemplating not available with this Mercurial",
2083 b"perftemplating not available with this Mercurial",
2069 hint=b"use 4.3 or later",
2084 hint=b"use 4.3 or later",
2070 )
2085 )
2071
2086
2072 opts = _byteskwargs(opts)
2087 opts = _byteskwargs(opts)
2073
2088
2074 nullui = ui.copy()
2089 nullui = ui.copy()
2075 nullui.fout = open(os.devnull, 'wb')
2090 nullui.fout = open(os.devnull, 'wb')
2076 nullui.disablepager()
2091 nullui.disablepager()
2077 revs = opts.get(b'rev')
2092 revs = opts.get(b'rev')
2078 if not revs:
2093 if not revs:
2079 revs = [b'all()']
2094 revs = [b'all()']
2080 revs = list(scmutil.revrange(repo, revs))
2095 revs = list(scmutil.revrange(repo, revs))
2081
2096
2082 defaulttemplate = (
2097 defaulttemplate = (
2083 b'{date|shortdate} [{rev}:{node|short}]'
2098 b'{date|shortdate} [{rev}:{node|short}]'
2084 b' {author|person}: {desc|firstline}\n'
2099 b' {author|person}: {desc|firstline}\n'
2085 )
2100 )
2086 if testedtemplate is None:
2101 if testedtemplate is None:
2087 testedtemplate = defaulttemplate
2102 testedtemplate = defaulttemplate
2088 displayer = makelogtemplater(nullui, repo, testedtemplate)
2103 displayer = makelogtemplater(nullui, repo, testedtemplate)
2089
2104
2090 def format():
2105 def format():
2091 for r in revs:
2106 for r in revs:
2092 ctx = repo[r]
2107 ctx = repo[r]
2093 displayer.show(ctx)
2108 displayer.show(ctx)
2094 displayer.flush(ctx)
2109 displayer.flush(ctx)
2095
2110
2096 timer, fm = gettimer(ui, opts)
2111 timer, fm = gettimer(ui, opts)
2097 timer(format)
2112 timer(format)
2098 fm.end()
2113 fm.end()
2099
2114
2100
2115
2101 def _displaystats(ui, opts, entries, data):
2116 def _displaystats(ui, opts, entries, data):
2102 # use a second formatter because the data are quite different, not sure
2117 # use a second formatter because the data are quite different, not sure
2103 # how it flies with the templater.
2118 # how it flies with the templater.
2104 fm = ui.formatter(b'perf-stats', opts)
2119 fm = ui.formatter(b'perf-stats', opts)
2105 for key, title in entries:
2120 for key, title in entries:
2106 values = data[key]
2121 values = data[key]
2107 nbvalues = len(data)
2122 nbvalues = len(data)
2108 values.sort()
2123 values.sort()
2109 stats = {
2124 stats = {
2110 'key': key,
2125 'key': key,
2111 'title': title,
2126 'title': title,
2112 'nbitems': len(values),
2127 'nbitems': len(values),
2113 'min': values[0][0],
2128 'min': values[0][0],
2114 '10%': values[(nbvalues * 10) // 100][0],
2129 '10%': values[(nbvalues * 10) // 100][0],
2115 '25%': values[(nbvalues * 25) // 100][0],
2130 '25%': values[(nbvalues * 25) // 100][0],
2116 '50%': values[(nbvalues * 50) // 100][0],
2131 '50%': values[(nbvalues * 50) // 100][0],
2117 '75%': values[(nbvalues * 75) // 100][0],
2132 '75%': values[(nbvalues * 75) // 100][0],
2118 '80%': values[(nbvalues * 80) // 100][0],
2133 '80%': values[(nbvalues * 80) // 100][0],
2119 '85%': values[(nbvalues * 85) // 100][0],
2134 '85%': values[(nbvalues * 85) // 100][0],
2120 '90%': values[(nbvalues * 90) // 100][0],
2135 '90%': values[(nbvalues * 90) // 100][0],
2121 '95%': values[(nbvalues * 95) // 100][0],
2136 '95%': values[(nbvalues * 95) // 100][0],
2122 '99%': values[(nbvalues * 99) // 100][0],
2137 '99%': values[(nbvalues * 99) // 100][0],
2123 'max': values[-1][0],
2138 'max': values[-1][0],
2124 }
2139 }
2125 fm.startitem()
2140 fm.startitem()
2126 fm.data(**stats)
2141 fm.data(**stats)
2127 # make node pretty for the human output
2142 # make node pretty for the human output
2128 fm.plain('### %s (%d items)\n' % (title, len(values)))
2143 fm.plain('### %s (%d items)\n' % (title, len(values)))
2129 lines = [
2144 lines = [
2130 'min',
2145 'min',
2131 '10%',
2146 '10%',
2132 '25%',
2147 '25%',
2133 '50%',
2148 '50%',
2134 '75%',
2149 '75%',
2135 '80%',
2150 '80%',
2136 '85%',
2151 '85%',
2137 '90%',
2152 '90%',
2138 '95%',
2153 '95%',
2139 '99%',
2154 '99%',
2140 'max',
2155 'max',
2141 ]
2156 ]
2142 for l in lines:
2157 for l in lines:
2143 fm.plain('%s: %s\n' % (l, stats[l]))
2158 fm.plain('%s: %s\n' % (l, stats[l]))
2144 fm.end()
2159 fm.end()
2145
2160
2146
2161
2147 @command(
2162 @command(
2148 b'perf::helper-mergecopies|perfhelper-mergecopies',
2163 b'perf::helper-mergecopies|perfhelper-mergecopies',
2149 formatteropts
2164 formatteropts
2150 + [
2165 + [
2151 (b'r', b'revs', [], b'restrict search to these revisions'),
2166 (b'r', b'revs', [], b'restrict search to these revisions'),
2152 (b'', b'timing', False, b'provides extra data (costly)'),
2167 (b'', b'timing', False, b'provides extra data (costly)'),
2153 (b'', b'stats', False, b'provides statistic about the measured data'),
2168 (b'', b'stats', False, b'provides statistic about the measured data'),
2154 ],
2169 ],
2155 )
2170 )
2156 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2171 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2157 """find statistics about potential parameters for `perfmergecopies`
2172 """find statistics about potential parameters for `perfmergecopies`
2158
2173
2159 This command find (base, p1, p2) triplet relevant for copytracing
2174 This command find (base, p1, p2) triplet relevant for copytracing
2160 benchmarking in the context of a merge. It reports values for some of the
2175 benchmarking in the context of a merge. It reports values for some of the
2161 parameters that impact merge copy tracing time during merge.
2176 parameters that impact merge copy tracing time during merge.
2162
2177
2163 If `--timing` is set, rename detection is run and the associated timing
2178 If `--timing` is set, rename detection is run and the associated timing
2164 will be reported. The extra details come at the cost of slower command
2179 will be reported. The extra details come at the cost of slower command
2165 execution.
2180 execution.
2166
2181
2167 Since rename detection is only run once, other factors might easily
2182 Since rename detection is only run once, other factors might easily
2168 affect the precision of the timing. However it should give a good
2183 affect the precision of the timing. However it should give a good
2169 approximation of which revision triplets are very costly.
2184 approximation of which revision triplets are very costly.
2170 """
2185 """
2171 opts = _byteskwargs(opts)
2186 opts = _byteskwargs(opts)
2172 fm = ui.formatter(b'perf', opts)
2187 fm = ui.formatter(b'perf', opts)
2173 dotiming = opts[b'timing']
2188 dotiming = opts[b'timing']
2174 dostats = opts[b'stats']
2189 dostats = opts[b'stats']
2175
2190
2176 output_template = [
2191 output_template = [
2177 ("base", "%(base)12s"),
2192 ("base", "%(base)12s"),
2178 ("p1", "%(p1.node)12s"),
2193 ("p1", "%(p1.node)12s"),
2179 ("p2", "%(p2.node)12s"),
2194 ("p2", "%(p2.node)12s"),
2180 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2195 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2181 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2196 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2182 ("p1.renames", "%(p1.renamedfiles)12d"),
2197 ("p1.renames", "%(p1.renamedfiles)12d"),
2183 ("p1.time", "%(p1.time)12.3f"),
2198 ("p1.time", "%(p1.time)12.3f"),
2184 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2199 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2185 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2200 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2186 ("p2.renames", "%(p2.renamedfiles)12d"),
2201 ("p2.renames", "%(p2.renamedfiles)12d"),
2187 ("p2.time", "%(p2.time)12.3f"),
2202 ("p2.time", "%(p2.time)12.3f"),
2188 ("renames", "%(nbrenamedfiles)12d"),
2203 ("renames", "%(nbrenamedfiles)12d"),
2189 ("total.time", "%(time)12.3f"),
2204 ("total.time", "%(time)12.3f"),
2190 ]
2205 ]
2191 if not dotiming:
2206 if not dotiming:
2192 output_template = [
2207 output_template = [
2193 i
2208 i
2194 for i in output_template
2209 for i in output_template
2195 if not ('time' in i[0] or 'renames' in i[0])
2210 if not ('time' in i[0] or 'renames' in i[0])
2196 ]
2211 ]
2197 header_names = [h for (h, v) in output_template]
2212 header_names = [h for (h, v) in output_template]
2198 output = ' '.join([v for (h, v) in output_template]) + '\n'
2213 output = ' '.join([v for (h, v) in output_template]) + '\n'
2199 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2214 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2200 fm.plain(header % tuple(header_names))
2215 fm.plain(header % tuple(header_names))
2201
2216
2202 if not revs:
2217 if not revs:
2203 revs = ['all()']
2218 revs = ['all()']
2204 revs = scmutil.revrange(repo, revs)
2219 revs = scmutil.revrange(repo, revs)
2205
2220
2206 if dostats:
2221 if dostats:
2207 alldata = {
2222 alldata = {
2208 'nbrevs': [],
2223 'nbrevs': [],
2209 'nbmissingfiles': [],
2224 'nbmissingfiles': [],
2210 }
2225 }
2211 if dotiming:
2226 if dotiming:
2212 alldata['parentnbrenames'] = []
2227 alldata['parentnbrenames'] = []
2213 alldata['totalnbrenames'] = []
2228 alldata['totalnbrenames'] = []
2214 alldata['parenttime'] = []
2229 alldata['parenttime'] = []
2215 alldata['totaltime'] = []
2230 alldata['totaltime'] = []
2216
2231
2217 roi = repo.revs('merge() and %ld', revs)
2232 roi = repo.revs('merge() and %ld', revs)
2218 for r in roi:
2233 for r in roi:
2219 ctx = repo[r]
2234 ctx = repo[r]
2220 p1 = ctx.p1()
2235 p1 = ctx.p1()
2221 p2 = ctx.p2()
2236 p2 = ctx.p2()
2222 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2237 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2223 for b in bases:
2238 for b in bases:
2224 b = repo[b]
2239 b = repo[b]
2225 p1missing = copies._computeforwardmissing(b, p1)
2240 p1missing = copies._computeforwardmissing(b, p1)
2226 p2missing = copies._computeforwardmissing(b, p2)
2241 p2missing = copies._computeforwardmissing(b, p2)
2227 data = {
2242 data = {
2228 b'base': b.hex(),
2243 b'base': b.hex(),
2229 b'p1.node': p1.hex(),
2244 b'p1.node': p1.hex(),
2230 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2245 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2231 b'p1.nbmissingfiles': len(p1missing),
2246 b'p1.nbmissingfiles': len(p1missing),
2232 b'p2.node': p2.hex(),
2247 b'p2.node': p2.hex(),
2233 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2248 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2234 b'p2.nbmissingfiles': len(p2missing),
2249 b'p2.nbmissingfiles': len(p2missing),
2235 }
2250 }
2236 if dostats:
2251 if dostats:
2237 if p1missing:
2252 if p1missing:
2238 alldata['nbrevs'].append(
2253 alldata['nbrevs'].append(
2239 (data['p1.nbrevs'], b.hex(), p1.hex())
2254 (data['p1.nbrevs'], b.hex(), p1.hex())
2240 )
2255 )
2241 alldata['nbmissingfiles'].append(
2256 alldata['nbmissingfiles'].append(
2242 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2257 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2243 )
2258 )
2244 if p2missing:
2259 if p2missing:
2245 alldata['nbrevs'].append(
2260 alldata['nbrevs'].append(
2246 (data['p2.nbrevs'], b.hex(), p2.hex())
2261 (data['p2.nbrevs'], b.hex(), p2.hex())
2247 )
2262 )
2248 alldata['nbmissingfiles'].append(
2263 alldata['nbmissingfiles'].append(
2249 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2264 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2250 )
2265 )
2251 if dotiming:
2266 if dotiming:
2252 begin = util.timer()
2267 begin = util.timer()
2253 mergedata = copies.mergecopies(repo, p1, p2, b)
2268 mergedata = copies.mergecopies(repo, p1, p2, b)
2254 end = util.timer()
2269 end = util.timer()
2255 # not very stable timing since we did only one run
2270 # not very stable timing since we did only one run
2256 data['time'] = end - begin
2271 data['time'] = end - begin
2257 # mergedata contains five dicts: "copy", "movewithdir",
2272 # mergedata contains five dicts: "copy", "movewithdir",
2258 # "diverge", "renamedelete" and "dirmove".
2273 # "diverge", "renamedelete" and "dirmove".
2259 # The first 4 are about renamed file so lets count that.
2274 # The first 4 are about renamed file so lets count that.
2260 renames = len(mergedata[0])
2275 renames = len(mergedata[0])
2261 renames += len(mergedata[1])
2276 renames += len(mergedata[1])
2262 renames += len(mergedata[2])
2277 renames += len(mergedata[2])
2263 renames += len(mergedata[3])
2278 renames += len(mergedata[3])
2264 data['nbrenamedfiles'] = renames
2279 data['nbrenamedfiles'] = renames
2265 begin = util.timer()
2280 begin = util.timer()
2266 p1renames = copies.pathcopies(b, p1)
2281 p1renames = copies.pathcopies(b, p1)
2267 end = util.timer()
2282 end = util.timer()
2268 data['p1.time'] = end - begin
2283 data['p1.time'] = end - begin
2269 begin = util.timer()
2284 begin = util.timer()
2270 p2renames = copies.pathcopies(b, p2)
2285 p2renames = copies.pathcopies(b, p2)
2271 end = util.timer()
2286 end = util.timer()
2272 data['p2.time'] = end - begin
2287 data['p2.time'] = end - begin
2273 data['p1.renamedfiles'] = len(p1renames)
2288 data['p1.renamedfiles'] = len(p1renames)
2274 data['p2.renamedfiles'] = len(p2renames)
2289 data['p2.renamedfiles'] = len(p2renames)
2275
2290
2276 if dostats:
2291 if dostats:
2277 if p1missing:
2292 if p1missing:
2278 alldata['parentnbrenames'].append(
2293 alldata['parentnbrenames'].append(
2279 (data['p1.renamedfiles'], b.hex(), p1.hex())
2294 (data['p1.renamedfiles'], b.hex(), p1.hex())
2280 )
2295 )
2281 alldata['parenttime'].append(
2296 alldata['parenttime'].append(
2282 (data['p1.time'], b.hex(), p1.hex())
2297 (data['p1.time'], b.hex(), p1.hex())
2283 )
2298 )
2284 if p2missing:
2299 if p2missing:
2285 alldata['parentnbrenames'].append(
2300 alldata['parentnbrenames'].append(
2286 (data['p2.renamedfiles'], b.hex(), p2.hex())
2301 (data['p2.renamedfiles'], b.hex(), p2.hex())
2287 )
2302 )
2288 alldata['parenttime'].append(
2303 alldata['parenttime'].append(
2289 (data['p2.time'], b.hex(), p2.hex())
2304 (data['p2.time'], b.hex(), p2.hex())
2290 )
2305 )
2291 if p1missing or p2missing:
2306 if p1missing or p2missing:
2292 alldata['totalnbrenames'].append(
2307 alldata['totalnbrenames'].append(
2293 (
2308 (
2294 data['nbrenamedfiles'],
2309 data['nbrenamedfiles'],
2295 b.hex(),
2310 b.hex(),
2296 p1.hex(),
2311 p1.hex(),
2297 p2.hex(),
2312 p2.hex(),
2298 )
2313 )
2299 )
2314 )
2300 alldata['totaltime'].append(
2315 alldata['totaltime'].append(
2301 (data['time'], b.hex(), p1.hex(), p2.hex())
2316 (data['time'], b.hex(), p1.hex(), p2.hex())
2302 )
2317 )
2303 fm.startitem()
2318 fm.startitem()
2304 fm.data(**data)
2319 fm.data(**data)
2305 # make node pretty for the human output
2320 # make node pretty for the human output
2306 out = data.copy()
2321 out = data.copy()
2307 out['base'] = fm.hexfunc(b.node())
2322 out['base'] = fm.hexfunc(b.node())
2308 out['p1.node'] = fm.hexfunc(p1.node())
2323 out['p1.node'] = fm.hexfunc(p1.node())
2309 out['p2.node'] = fm.hexfunc(p2.node())
2324 out['p2.node'] = fm.hexfunc(p2.node())
2310 fm.plain(output % out)
2325 fm.plain(output % out)
2311
2326
2312 fm.end()
2327 fm.end()
2313 if dostats:
2328 if dostats:
2314 # use a second formatter because the data are quite different, not sure
2329 # use a second formatter because the data are quite different, not sure
2315 # how it flies with the templater.
2330 # how it flies with the templater.
2316 entries = [
2331 entries = [
2317 ('nbrevs', 'number of revision covered'),
2332 ('nbrevs', 'number of revision covered'),
2318 ('nbmissingfiles', 'number of missing files at head'),
2333 ('nbmissingfiles', 'number of missing files at head'),
2319 ]
2334 ]
2320 if dotiming:
2335 if dotiming:
2321 entries.append(
2336 entries.append(
2322 ('parentnbrenames', 'rename from one parent to base')
2337 ('parentnbrenames', 'rename from one parent to base')
2323 )
2338 )
2324 entries.append(('totalnbrenames', 'total number of renames'))
2339 entries.append(('totalnbrenames', 'total number of renames'))
2325 entries.append(('parenttime', 'time for one parent'))
2340 entries.append(('parenttime', 'time for one parent'))
2326 entries.append(('totaltime', 'time for both parents'))
2341 entries.append(('totaltime', 'time for both parents'))
2327 _displaystats(ui, opts, entries, alldata)
2342 _displaystats(ui, opts, entries, alldata)
2328
2343
2329
2344
2330 @command(
2345 @command(
2331 b'perf::helper-pathcopies|perfhelper-pathcopies',
2346 b'perf::helper-pathcopies|perfhelper-pathcopies',
2332 formatteropts
2347 formatteropts
2333 + [
2348 + [
2334 (b'r', b'revs', [], b'restrict search to these revisions'),
2349 (b'r', b'revs', [], b'restrict search to these revisions'),
2335 (b'', b'timing', False, b'provides extra data (costly)'),
2350 (b'', b'timing', False, b'provides extra data (costly)'),
2336 (b'', b'stats', False, b'provides statistic about the measured data'),
2351 (b'', b'stats', False, b'provides statistic about the measured data'),
2337 ],
2352 ],
2338 )
2353 )
2339 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2354 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2340 """find statistic about potential parameters for the `perftracecopies`
2355 """find statistic about potential parameters for the `perftracecopies`
2341
2356
2342 This command find source-destination pair relevant for copytracing testing.
2357 This command find source-destination pair relevant for copytracing testing.
2343 It report value for some of the parameters that impact copy tracing time.
2358 It report value for some of the parameters that impact copy tracing time.
2344
2359
2345 If `--timing` is set, rename detection is run and the associated timing
2360 If `--timing` is set, rename detection is run and the associated timing
2346 will be reported. The extra details comes at the cost of a slower command
2361 will be reported. The extra details comes at the cost of a slower command
2347 execution.
2362 execution.
2348
2363
2349 Since the rename detection is only run once, other factors might easily
2364 Since the rename detection is only run once, other factors might easily
2350 affect the precision of the timing. However it should give a good
2365 affect the precision of the timing. However it should give a good
2351 approximation of which revision pairs are very costly.
2366 approximation of which revision pairs are very costly.
2352 """
2367 """
2353 opts = _byteskwargs(opts)
2368 opts = _byteskwargs(opts)
2354 fm = ui.formatter(b'perf', opts)
2369 fm = ui.formatter(b'perf', opts)
2355 dotiming = opts[b'timing']
2370 dotiming = opts[b'timing']
2356 dostats = opts[b'stats']
2371 dostats = opts[b'stats']
2357
2372
2358 if dotiming:
2373 if dotiming:
2359 header = '%12s %12s %12s %12s %12s %12s\n'
2374 header = '%12s %12s %12s %12s %12s %12s\n'
2360 output = (
2375 output = (
2361 "%(source)12s %(destination)12s "
2376 "%(source)12s %(destination)12s "
2362 "%(nbrevs)12d %(nbmissingfiles)12d "
2377 "%(nbrevs)12d %(nbmissingfiles)12d "
2363 "%(nbrenamedfiles)12d %(time)18.5f\n"
2378 "%(nbrenamedfiles)12d %(time)18.5f\n"
2364 )
2379 )
2365 header_names = (
2380 header_names = (
2366 "source",
2381 "source",
2367 "destination",
2382 "destination",
2368 "nb-revs",
2383 "nb-revs",
2369 "nb-files",
2384 "nb-files",
2370 "nb-renames",
2385 "nb-renames",
2371 "time",
2386 "time",
2372 )
2387 )
2373 fm.plain(header % header_names)
2388 fm.plain(header % header_names)
2374 else:
2389 else:
2375 header = '%12s %12s %12s %12s\n'
2390 header = '%12s %12s %12s %12s\n'
2376 output = (
2391 output = (
2377 "%(source)12s %(destination)12s "
2392 "%(source)12s %(destination)12s "
2378 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2393 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2379 )
2394 )
2380 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2395 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2381
2396
2382 if not revs:
2397 if not revs:
2383 revs = ['all()']
2398 revs = ['all()']
2384 revs = scmutil.revrange(repo, revs)
2399 revs = scmutil.revrange(repo, revs)
2385
2400
2386 if dostats:
2401 if dostats:
2387 alldata = {
2402 alldata = {
2388 'nbrevs': [],
2403 'nbrevs': [],
2389 'nbmissingfiles': [],
2404 'nbmissingfiles': [],
2390 }
2405 }
2391 if dotiming:
2406 if dotiming:
2392 alldata['nbrenames'] = []
2407 alldata['nbrenames'] = []
2393 alldata['time'] = []
2408 alldata['time'] = []
2394
2409
2395 roi = repo.revs('merge() and %ld', revs)
2410 roi = repo.revs('merge() and %ld', revs)
2396 for r in roi:
2411 for r in roi:
2397 ctx = repo[r]
2412 ctx = repo[r]
2398 p1 = ctx.p1().rev()
2413 p1 = ctx.p1().rev()
2399 p2 = ctx.p2().rev()
2414 p2 = ctx.p2().rev()
2400 bases = repo.changelog._commonancestorsheads(p1, p2)
2415 bases = repo.changelog._commonancestorsheads(p1, p2)
2401 for p in (p1, p2):
2416 for p in (p1, p2):
2402 for b in bases:
2417 for b in bases:
2403 base = repo[b]
2418 base = repo[b]
2404 parent = repo[p]
2419 parent = repo[p]
2405 missing = copies._computeforwardmissing(base, parent)
2420 missing = copies._computeforwardmissing(base, parent)
2406 if not missing:
2421 if not missing:
2407 continue
2422 continue
2408 data = {
2423 data = {
2409 b'source': base.hex(),
2424 b'source': base.hex(),
2410 b'destination': parent.hex(),
2425 b'destination': parent.hex(),
2411 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2426 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2412 b'nbmissingfiles': len(missing),
2427 b'nbmissingfiles': len(missing),
2413 }
2428 }
2414 if dostats:
2429 if dostats:
2415 alldata['nbrevs'].append(
2430 alldata['nbrevs'].append(
2416 (
2431 (
2417 data['nbrevs'],
2432 data['nbrevs'],
2418 base.hex(),
2433 base.hex(),
2419 parent.hex(),
2434 parent.hex(),
2420 )
2435 )
2421 )
2436 )
2422 alldata['nbmissingfiles'].append(
2437 alldata['nbmissingfiles'].append(
2423 (
2438 (
2424 data['nbmissingfiles'],
2439 data['nbmissingfiles'],
2425 base.hex(),
2440 base.hex(),
2426 parent.hex(),
2441 parent.hex(),
2427 )
2442 )
2428 )
2443 )
2429 if dotiming:
2444 if dotiming:
2430 begin = util.timer()
2445 begin = util.timer()
2431 renames = copies.pathcopies(base, parent)
2446 renames = copies.pathcopies(base, parent)
2432 end = util.timer()
2447 end = util.timer()
2433 # not very stable timing since we did only one run
2448 # not very stable timing since we did only one run
2434 data['time'] = end - begin
2449 data['time'] = end - begin
2435 data['nbrenamedfiles'] = len(renames)
2450 data['nbrenamedfiles'] = len(renames)
2436 if dostats:
2451 if dostats:
2437 alldata['time'].append(
2452 alldata['time'].append(
2438 (
2453 (
2439 data['time'],
2454 data['time'],
2440 base.hex(),
2455 base.hex(),
2441 parent.hex(),
2456 parent.hex(),
2442 )
2457 )
2443 )
2458 )
2444 alldata['nbrenames'].append(
2459 alldata['nbrenames'].append(
2445 (
2460 (
2446 data['nbrenamedfiles'],
2461 data['nbrenamedfiles'],
2447 base.hex(),
2462 base.hex(),
2448 parent.hex(),
2463 parent.hex(),
2449 )
2464 )
2450 )
2465 )
2451 fm.startitem()
2466 fm.startitem()
2452 fm.data(**data)
2467 fm.data(**data)
2453 out = data.copy()
2468 out = data.copy()
2454 out['source'] = fm.hexfunc(base.node())
2469 out['source'] = fm.hexfunc(base.node())
2455 out['destination'] = fm.hexfunc(parent.node())
2470 out['destination'] = fm.hexfunc(parent.node())
2456 fm.plain(output % out)
2471 fm.plain(output % out)
2457
2472
2458 fm.end()
2473 fm.end()
2459 if dostats:
2474 if dostats:
2460 entries = [
2475 entries = [
2461 ('nbrevs', 'number of revision covered'),
2476 ('nbrevs', 'number of revision covered'),
2462 ('nbmissingfiles', 'number of missing files at head'),
2477 ('nbmissingfiles', 'number of missing files at head'),
2463 ]
2478 ]
2464 if dotiming:
2479 if dotiming:
2465 entries.append(('nbrenames', 'renamed files'))
2480 entries.append(('nbrenames', 'renamed files'))
2466 entries.append(('time', 'time'))
2481 entries.append(('time', 'time'))
2467 _displaystats(ui, opts, entries, alldata)
2482 _displaystats(ui, opts, entries, alldata)
2468
2483
2469
2484
2470 @command(b'perf::cca|perfcca', formatteropts)
2485 @command(b'perf::cca|perfcca', formatteropts)
2471 def perfcca(ui, repo, **opts):
2486 def perfcca(ui, repo, **opts):
2472 opts = _byteskwargs(opts)
2487 opts = _byteskwargs(opts)
2473 timer, fm = gettimer(ui, opts)
2488 timer, fm = gettimer(ui, opts)
2474 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2489 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2475 fm.end()
2490 fm.end()
2476
2491
2477
2492
2478 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2493 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2479 def perffncacheload(ui, repo, **opts):
2494 def perffncacheload(ui, repo, **opts):
2480 opts = _byteskwargs(opts)
2495 opts = _byteskwargs(opts)
2481 timer, fm = gettimer(ui, opts)
2496 timer, fm = gettimer(ui, opts)
2482 s = repo.store
2497 s = repo.store
2483
2498
2484 def d():
2499 def d():
2485 s.fncache._load()
2500 s.fncache._load()
2486
2501
2487 timer(d)
2502 timer(d)
2488 fm.end()
2503 fm.end()
2489
2504
2490
2505
2491 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2506 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2492 def perffncachewrite(ui, repo, **opts):
2507 def perffncachewrite(ui, repo, **opts):
2493 opts = _byteskwargs(opts)
2508 opts = _byteskwargs(opts)
2494 timer, fm = gettimer(ui, opts)
2509 timer, fm = gettimer(ui, opts)
2495 s = repo.store
2510 s = repo.store
2496 lock = repo.lock()
2511 lock = repo.lock()
2497 s.fncache._load()
2512 s.fncache._load()
2498 tr = repo.transaction(b'perffncachewrite')
2513 tr = repo.transaction(b'perffncachewrite')
2499 tr.addbackup(b'fncache')
2514 tr.addbackup(b'fncache')
2500
2515
2501 def d():
2516 def d():
2502 s.fncache._dirty = True
2517 s.fncache._dirty = True
2503 s.fncache.write(tr)
2518 s.fncache.write(tr)
2504
2519
2505 timer(d)
2520 timer(d)
2506 tr.close()
2521 tr.close()
2507 lock.release()
2522 lock.release()
2508 fm.end()
2523 fm.end()
2509
2524
2510
2525
2511 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2526 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2512 def perffncacheencode(ui, repo, **opts):
2527 def perffncacheencode(ui, repo, **opts):
2513 opts = _byteskwargs(opts)
2528 opts = _byteskwargs(opts)
2514 timer, fm = gettimer(ui, opts)
2529 timer, fm = gettimer(ui, opts)
2515 s = repo.store
2530 s = repo.store
2516 s.fncache._load()
2531 s.fncache._load()
2517
2532
2518 def d():
2533 def d():
2519 for p in s.fncache.entries:
2534 for p in s.fncache.entries:
2520 s.encode(p)
2535 s.encode(p)
2521
2536
2522 timer(d)
2537 timer(d)
2523 fm.end()
2538 fm.end()
2524
2539
2525
2540
2526 def _bdiffworker(q, blocks, xdiff, ready, done):
2541 def _bdiffworker(q, blocks, xdiff, ready, done):
2527 while not done.is_set():
2542 while not done.is_set():
2528 pair = q.get()
2543 pair = q.get()
2529 while pair is not None:
2544 while pair is not None:
2530 if xdiff:
2545 if xdiff:
2531 mdiff.bdiff.xdiffblocks(*pair)
2546 mdiff.bdiff.xdiffblocks(*pair)
2532 elif blocks:
2547 elif blocks:
2533 mdiff.bdiff.blocks(*pair)
2548 mdiff.bdiff.blocks(*pair)
2534 else:
2549 else:
2535 mdiff.textdiff(*pair)
2550 mdiff.textdiff(*pair)
2536 q.task_done()
2551 q.task_done()
2537 pair = q.get()
2552 pair = q.get()
2538 q.task_done() # for the None one
2553 q.task_done() # for the None one
2539 with ready:
2554 with ready:
2540 ready.wait()
2555 ready.wait()
2541
2556
2542
2557
2543 def _manifestrevision(repo, mnode):
2558 def _manifestrevision(repo, mnode):
2544 ml = repo.manifestlog
2559 ml = repo.manifestlog
2545
2560
2546 if util.safehasattr(ml, b'getstorage'):
2561 if util.safehasattr(ml, b'getstorage'):
2547 store = ml.getstorage(b'')
2562 store = ml.getstorage(b'')
2548 else:
2563 else:
2549 store = ml._revlog
2564 store = ml._revlog
2550
2565
2551 return store.revision(mnode)
2566 return store.revision(mnode)
2552
2567
2553
2568
2554 @command(
2569 @command(
2555 b'perf::bdiff|perfbdiff',
2570 b'perf::bdiff|perfbdiff',
2556 revlogopts
2571 revlogopts
2557 + formatteropts
2572 + formatteropts
2558 + [
2573 + [
2559 (
2574 (
2560 b'',
2575 b'',
2561 b'count',
2576 b'count',
2562 1,
2577 1,
2563 b'number of revisions to test (when using --startrev)',
2578 b'number of revisions to test (when using --startrev)',
2564 ),
2579 ),
2565 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2580 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2566 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2581 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2567 (b'', b'blocks', False, b'test computing diffs into blocks'),
2582 (b'', b'blocks', False, b'test computing diffs into blocks'),
2568 (b'', b'xdiff', False, b'use xdiff algorithm'),
2583 (b'', b'xdiff', False, b'use xdiff algorithm'),
2569 ],
2584 ],
2570 b'-c|-m|FILE REV',
2585 b'-c|-m|FILE REV',
2571 )
2586 )
2572 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2587 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2573 """benchmark a bdiff between revisions
2588 """benchmark a bdiff between revisions
2574
2589
2575 By default, benchmark a bdiff between its delta parent and itself.
2590 By default, benchmark a bdiff between its delta parent and itself.
2576
2591
2577 With ``--count``, benchmark bdiffs between delta parents and self for N
2592 With ``--count``, benchmark bdiffs between delta parents and self for N
2578 revisions starting at the specified revision.
2593 revisions starting at the specified revision.
2579
2594
2580 With ``--alldata``, assume the requested revision is a changeset and
2595 With ``--alldata``, assume the requested revision is a changeset and
2581 measure bdiffs for all changes related to that changeset (manifest
2596 measure bdiffs for all changes related to that changeset (manifest
2582 and filelogs).
2597 and filelogs).
2583 """
2598 """
2584 opts = _byteskwargs(opts)
2599 opts = _byteskwargs(opts)
2585
2600
2586 if opts[b'xdiff'] and not opts[b'blocks']:
2601 if opts[b'xdiff'] and not opts[b'blocks']:
2587 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2602 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2588
2603
2589 if opts[b'alldata']:
2604 if opts[b'alldata']:
2590 opts[b'changelog'] = True
2605 opts[b'changelog'] = True
2591
2606
2592 if opts.get(b'changelog') or opts.get(b'manifest'):
2607 if opts.get(b'changelog') or opts.get(b'manifest'):
2593 file_, rev = None, file_
2608 file_, rev = None, file_
2594 elif rev is None:
2609 elif rev is None:
2595 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2610 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2596
2611
2597 blocks = opts[b'blocks']
2612 blocks = opts[b'blocks']
2598 xdiff = opts[b'xdiff']
2613 xdiff = opts[b'xdiff']
2599 textpairs = []
2614 textpairs = []
2600
2615
2601 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2616 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2602
2617
2603 startrev = r.rev(r.lookup(rev))
2618 startrev = r.rev(r.lookup(rev))
2604 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2619 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2605 if opts[b'alldata']:
2620 if opts[b'alldata']:
2606 # Load revisions associated with changeset.
2621 # Load revisions associated with changeset.
2607 ctx = repo[rev]
2622 ctx = repo[rev]
2608 mtext = _manifestrevision(repo, ctx.manifestnode())
2623 mtext = _manifestrevision(repo, ctx.manifestnode())
2609 for pctx in ctx.parents():
2624 for pctx in ctx.parents():
2610 pman = _manifestrevision(repo, pctx.manifestnode())
2625 pman = _manifestrevision(repo, pctx.manifestnode())
2611 textpairs.append((pman, mtext))
2626 textpairs.append((pman, mtext))
2612
2627
2613 # Load filelog revisions by iterating manifest delta.
2628 # Load filelog revisions by iterating manifest delta.
2614 man = ctx.manifest()
2629 man = ctx.manifest()
2615 pman = ctx.p1().manifest()
2630 pman = ctx.p1().manifest()
2616 for filename, change in pman.diff(man).items():
2631 for filename, change in pman.diff(man).items():
2617 fctx = repo.file(filename)
2632 fctx = repo.file(filename)
2618 f1 = fctx.revision(change[0][0] or -1)
2633 f1 = fctx.revision(change[0][0] or -1)
2619 f2 = fctx.revision(change[1][0] or -1)
2634 f2 = fctx.revision(change[1][0] or -1)
2620 textpairs.append((f1, f2))
2635 textpairs.append((f1, f2))
2621 else:
2636 else:
2622 dp = r.deltaparent(rev)
2637 dp = r.deltaparent(rev)
2623 textpairs.append((r.revision(dp), r.revision(rev)))
2638 textpairs.append((r.revision(dp), r.revision(rev)))
2624
2639
2625 withthreads = threads > 0
2640 withthreads = threads > 0
2626 if not withthreads:
2641 if not withthreads:
2627
2642
2628 def d():
2643 def d():
2629 for pair in textpairs:
2644 for pair in textpairs:
2630 if xdiff:
2645 if xdiff:
2631 mdiff.bdiff.xdiffblocks(*pair)
2646 mdiff.bdiff.xdiffblocks(*pair)
2632 elif blocks:
2647 elif blocks:
2633 mdiff.bdiff.blocks(*pair)
2648 mdiff.bdiff.blocks(*pair)
2634 else:
2649 else:
2635 mdiff.textdiff(*pair)
2650 mdiff.textdiff(*pair)
2636
2651
2637 else:
2652 else:
2638 q = queue()
2653 q = queue()
2639 for i in _xrange(threads):
2654 for i in _xrange(threads):
2640 q.put(None)
2655 q.put(None)
2641 ready = threading.Condition()
2656 ready = threading.Condition()
2642 done = threading.Event()
2657 done = threading.Event()
2643 for i in _xrange(threads):
2658 for i in _xrange(threads):
2644 threading.Thread(
2659 threading.Thread(
2645 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2660 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2646 ).start()
2661 ).start()
2647 q.join()
2662 q.join()
2648
2663
2649 def d():
2664 def d():
2650 for pair in textpairs:
2665 for pair in textpairs:
2651 q.put(pair)
2666 q.put(pair)
2652 for i in _xrange(threads):
2667 for i in _xrange(threads):
2653 q.put(None)
2668 q.put(None)
2654 with ready:
2669 with ready:
2655 ready.notify_all()
2670 ready.notify_all()
2656 q.join()
2671 q.join()
2657
2672
2658 timer, fm = gettimer(ui, opts)
2673 timer, fm = gettimer(ui, opts)
2659 timer(d)
2674 timer(d)
2660 fm.end()
2675 fm.end()
2661
2676
2662 if withthreads:
2677 if withthreads:
2663 done.set()
2678 done.set()
2664 for i in _xrange(threads):
2679 for i in _xrange(threads):
2665 q.put(None)
2680 q.put(None)
2666 with ready:
2681 with ready:
2667 ready.notify_all()
2682 ready.notify_all()
2668
2683
2669
2684
2670 @command(
2685 @command(
2671 b'perf::unbundle',
2686 b'perf::unbundle',
2672 formatteropts,
2687 formatteropts,
2673 b'BUNDLE_FILE',
2688 b'BUNDLE_FILE',
2674 )
2689 )
2675 def perf_unbundle(ui, repo, fname, **opts):
2690 def perf_unbundle(ui, repo, fname, **opts):
2676 """benchmark application of a bundle in a repository.
2691 """benchmark application of a bundle in a repository.
2677
2692
2678 This does not include the final transaction processing"""
2693 This does not include the final transaction processing"""
2679
2694
2680 from mercurial import exchange
2695 from mercurial import exchange
2681 from mercurial import bundle2
2696 from mercurial import bundle2
2682 from mercurial import transaction
2697 from mercurial import transaction
2683
2698
2684 opts = _byteskwargs(opts)
2699 opts = _byteskwargs(opts)
2685
2700
2686 ### some compatibility hotfix
2701 ### some compatibility hotfix
2687 #
2702 #
2688 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
2703 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
2689 # critical regression that break transaction rollback for files that are
2704 # critical regression that break transaction rollback for files that are
2690 # de-inlined.
2705 # de-inlined.
2691 method = transaction.transaction._addentry
2706 method = transaction.transaction._addentry
2692 pre_63edc384d3b7 = "data" in getargspec(method).args
2707 pre_63edc384d3b7 = "data" in getargspec(method).args
2693 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
2708 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
2694 # a changeset that is a close descendant of 18415fc918a1, the changeset
2709 # a changeset that is a close descendant of 18415fc918a1, the changeset
2695 # that conclude the fix run for the bug introduced in 63edc384d3b7.
2710 # that conclude the fix run for the bug introduced in 63edc384d3b7.
2696 args = getargspec(error.Abort.__init__).args
2711 args = getargspec(error.Abort.__init__).args
2697 post_18415fc918a1 = "detailed_exit_code" in args
2712 post_18415fc918a1 = "detailed_exit_code" in args
2698
2713
2699 old_max_inline = None
2714 old_max_inline = None
2700 try:
2715 try:
2701 if not (pre_63edc384d3b7 or post_18415fc918a1):
2716 if not (pre_63edc384d3b7 or post_18415fc918a1):
2702 # disable inlining
2717 # disable inlining
2703 old_max_inline = mercurial.revlog._maxinline
2718 old_max_inline = mercurial.revlog._maxinline
2704 # large enough to never happen
2719 # large enough to never happen
2705 mercurial.revlog._maxinline = 2 ** 50
2720 mercurial.revlog._maxinline = 2 ** 50
2706
2721
2707 with repo.lock():
2722 with repo.lock():
2708 bundle = [None, None]
2723 bundle = [None, None]
2709 orig_quiet = repo.ui.quiet
2724 orig_quiet = repo.ui.quiet
2710 try:
2725 try:
2711 repo.ui.quiet = True
2726 repo.ui.quiet = True
2712 with open(fname, mode="rb") as f:
2727 with open(fname, mode="rb") as f:
2713
2728
2714 def noop_report(*args, **kwargs):
2729 def noop_report(*args, **kwargs):
2715 pass
2730 pass
2716
2731
2717 def setup():
2732 def setup():
2718 gen, tr = bundle
2733 gen, tr = bundle
2719 if tr is not None:
2734 if tr is not None:
2720 tr.abort()
2735 tr.abort()
2721 bundle[:] = [None, None]
2736 bundle[:] = [None, None]
2722 f.seek(0)
2737 f.seek(0)
2723 bundle[0] = exchange.readbundle(ui, f, fname)
2738 bundle[0] = exchange.readbundle(ui, f, fname)
2724 bundle[1] = repo.transaction(b'perf::unbundle')
2739 bundle[1] = repo.transaction(b'perf::unbundle')
2725 # silence the transaction
2740 # silence the transaction
2726 bundle[1]._report = noop_report
2741 bundle[1]._report = noop_report
2727
2742
2728 def apply():
2743 def apply():
2729 gen, tr = bundle
2744 gen, tr = bundle
2730 bundle2.applybundle(
2745 bundle2.applybundle(
2731 repo,
2746 repo,
2732 gen,
2747 gen,
2733 tr,
2748 tr,
2734 source=b'perf::unbundle',
2749 source=b'perf::unbundle',
2735 url=fname,
2750 url=fname,
2736 )
2751 )
2737
2752
2738 timer, fm = gettimer(ui, opts)
2753 timer, fm = gettimer(ui, opts)
2739 timer(apply, setup=setup)
2754 timer(apply, setup=setup)
2740 fm.end()
2755 fm.end()
2741 finally:
2756 finally:
2742 repo.ui.quiet == orig_quiet
2757 repo.ui.quiet == orig_quiet
2743 gen, tr = bundle
2758 gen, tr = bundle
2744 if tr is not None:
2759 if tr is not None:
2745 tr.abort()
2760 tr.abort()
2746 finally:
2761 finally:
2747 if old_max_inline is not None:
2762 if old_max_inline is not None:
2748 mercurial.revlog._maxinline = old_max_inline
2763 mercurial.revlog._maxinline = old_max_inline
2749
2764
2750
2765
2751 @command(
2766 @command(
2752 b'perf::unidiff|perfunidiff',
2767 b'perf::unidiff|perfunidiff',
2753 revlogopts
2768 revlogopts
2754 + formatteropts
2769 + formatteropts
2755 + [
2770 + [
2756 (
2771 (
2757 b'',
2772 b'',
2758 b'count',
2773 b'count',
2759 1,
2774 1,
2760 b'number of revisions to test (when using --startrev)',
2775 b'number of revisions to test (when using --startrev)',
2761 ),
2776 ),
2762 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2777 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2763 ],
2778 ],
2764 b'-c|-m|FILE REV',
2779 b'-c|-m|FILE REV',
2765 )
2780 )
2766 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2781 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2767 """benchmark a unified diff between revisions
2782 """benchmark a unified diff between revisions
2768
2783
2769 This doesn't include any copy tracing - it's just a unified diff
2784 This doesn't include any copy tracing - it's just a unified diff
2770 of the texts.
2785 of the texts.
2771
2786
2772 By default, benchmark a diff between its delta parent and itself.
2787 By default, benchmark a diff between its delta parent and itself.
2773
2788
2774 With ``--count``, benchmark diffs between delta parents and self for N
2789 With ``--count``, benchmark diffs between delta parents and self for N
2775 revisions starting at the specified revision.
2790 revisions starting at the specified revision.
2776
2791
2777 With ``--alldata``, assume the requested revision is a changeset and
2792 With ``--alldata``, assume the requested revision is a changeset and
2778 measure diffs for all changes related to that changeset (manifest
2793 measure diffs for all changes related to that changeset (manifest
2779 and filelogs).
2794 and filelogs).
2780 """
2795 """
2781 opts = _byteskwargs(opts)
2796 opts = _byteskwargs(opts)
2782 if opts[b'alldata']:
2797 if opts[b'alldata']:
2783 opts[b'changelog'] = True
2798 opts[b'changelog'] = True
2784
2799
2785 if opts.get(b'changelog') or opts.get(b'manifest'):
2800 if opts.get(b'changelog') or opts.get(b'manifest'):
2786 file_, rev = None, file_
2801 file_, rev = None, file_
2787 elif rev is None:
2802 elif rev is None:
2788 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2803 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2789
2804
2790 textpairs = []
2805 textpairs = []
2791
2806
2792 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2807 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2793
2808
2794 startrev = r.rev(r.lookup(rev))
2809 startrev = r.rev(r.lookup(rev))
2795 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2810 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2796 if opts[b'alldata']:
2811 if opts[b'alldata']:
2797 # Load revisions associated with changeset.
2812 # Load revisions associated with changeset.
2798 ctx = repo[rev]
2813 ctx = repo[rev]
2799 mtext = _manifestrevision(repo, ctx.manifestnode())
2814 mtext = _manifestrevision(repo, ctx.manifestnode())
2800 for pctx in ctx.parents():
2815 for pctx in ctx.parents():
2801 pman = _manifestrevision(repo, pctx.manifestnode())
2816 pman = _manifestrevision(repo, pctx.manifestnode())
2802 textpairs.append((pman, mtext))
2817 textpairs.append((pman, mtext))
2803
2818
2804 # Load filelog revisions by iterating manifest delta.
2819 # Load filelog revisions by iterating manifest delta.
2805 man = ctx.manifest()
2820 man = ctx.manifest()
2806 pman = ctx.p1().manifest()
2821 pman = ctx.p1().manifest()
2807 for filename, change in pman.diff(man).items():
2822 for filename, change in pman.diff(man).items():
2808 fctx = repo.file(filename)
2823 fctx = repo.file(filename)
2809 f1 = fctx.revision(change[0][0] or -1)
2824 f1 = fctx.revision(change[0][0] or -1)
2810 f2 = fctx.revision(change[1][0] or -1)
2825 f2 = fctx.revision(change[1][0] or -1)
2811 textpairs.append((f1, f2))
2826 textpairs.append((f1, f2))
2812 else:
2827 else:
2813 dp = r.deltaparent(rev)
2828 dp = r.deltaparent(rev)
2814 textpairs.append((r.revision(dp), r.revision(rev)))
2829 textpairs.append((r.revision(dp), r.revision(rev)))
2815
2830
2816 def d():
2831 def d():
2817 for left, right in textpairs:
2832 for left, right in textpairs:
2818 # The date strings don't matter, so we pass empty strings.
2833 # The date strings don't matter, so we pass empty strings.
2819 headerlines, hunks = mdiff.unidiff(
2834 headerlines, hunks = mdiff.unidiff(
2820 left, b'', right, b'', b'left', b'right', binary=False
2835 left, b'', right, b'', b'left', b'right', binary=False
2821 )
2836 )
2822 # consume iterators in roughly the way patch.py does
2837 # consume iterators in roughly the way patch.py does
2823 b'\n'.join(headerlines)
2838 b'\n'.join(headerlines)
2824 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2839 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2825
2840
2826 timer, fm = gettimer(ui, opts)
2841 timer, fm = gettimer(ui, opts)
2827 timer(d)
2842 timer(d)
2828 fm.end()
2843 fm.end()
2829
2844
2830
2845
2831 @command(b'perf::diffwd|perfdiffwd', formatteropts)
2846 @command(b'perf::diffwd|perfdiffwd', formatteropts)
2832 def perfdiffwd(ui, repo, **opts):
2847 def perfdiffwd(ui, repo, **opts):
2833 """Profile diff of working directory changes"""
2848 """Profile diff of working directory changes"""
2834 opts = _byteskwargs(opts)
2849 opts = _byteskwargs(opts)
2835 timer, fm = gettimer(ui, opts)
2850 timer, fm = gettimer(ui, opts)
2836 options = {
2851 options = {
2837 'w': 'ignore_all_space',
2852 'w': 'ignore_all_space',
2838 'b': 'ignore_space_change',
2853 'b': 'ignore_space_change',
2839 'B': 'ignore_blank_lines',
2854 'B': 'ignore_blank_lines',
2840 }
2855 }
2841
2856
2842 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2857 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2843 opts = {options[c]: b'1' for c in diffopt}
2858 opts = {options[c]: b'1' for c in diffopt}
2844
2859
2845 def d():
2860 def d():
2846 ui.pushbuffer()
2861 ui.pushbuffer()
2847 commands.diff(ui, repo, **opts)
2862 commands.diff(ui, repo, **opts)
2848 ui.popbuffer()
2863 ui.popbuffer()
2849
2864
2850 diffopt = diffopt.encode('ascii')
2865 diffopt = diffopt.encode('ascii')
2851 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2866 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2852 timer(d, title=title)
2867 timer(d, title=title)
2853 fm.end()
2868 fm.end()
2854
2869
2855
2870
2856 @command(
2871 @command(
2857 b'perf::revlogindex|perfrevlogindex',
2872 b'perf::revlogindex|perfrevlogindex',
2858 revlogopts + formatteropts,
2873 revlogopts + formatteropts,
2859 b'-c|-m|FILE',
2874 b'-c|-m|FILE',
2860 )
2875 )
2861 def perfrevlogindex(ui, repo, file_=None, **opts):
2876 def perfrevlogindex(ui, repo, file_=None, **opts):
2862 """Benchmark operations against a revlog index.
2877 """Benchmark operations against a revlog index.
2863
2878
2864 This tests constructing a revlog instance, reading index data,
2879 This tests constructing a revlog instance, reading index data,
2865 parsing index data, and performing various operations related to
2880 parsing index data, and performing various operations related to
2866 index data.
2881 index data.
2867 """
2882 """
2868
2883
2869 opts = _byteskwargs(opts)
2884 opts = _byteskwargs(opts)
2870
2885
2871 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2886 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2872
2887
2873 opener = getattr(rl, 'opener') # trick linter
2888 opener = getattr(rl, 'opener') # trick linter
2874 # compat with hg <= 5.8
2889 # compat with hg <= 5.8
2875 radix = getattr(rl, 'radix', None)
2890 radix = getattr(rl, 'radix', None)
2876 indexfile = getattr(rl, '_indexfile', None)
2891 indexfile = getattr(rl, '_indexfile', None)
2877 if indexfile is None:
2892 if indexfile is None:
2878 # compatibility with <= hg-5.8
2893 # compatibility with <= hg-5.8
2879 indexfile = getattr(rl, 'indexfile')
2894 indexfile = getattr(rl, 'indexfile')
2880 data = opener.read(indexfile)
2895 data = opener.read(indexfile)
2881
2896
2882 header = struct.unpack(b'>I', data[0:4])[0]
2897 header = struct.unpack(b'>I', data[0:4])[0]
2883 version = header & 0xFFFF
2898 version = header & 0xFFFF
2884 if version == 1:
2899 if version == 1:
2885 inline = header & (1 << 16)
2900 inline = header & (1 << 16)
2886 else:
2901 else:
2887 raise error.Abort(b'unsupported revlog version: %d' % version)
2902 raise error.Abort(b'unsupported revlog version: %d' % version)
2888
2903
2889 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
2904 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
2890 if parse_index_v1 is None:
2905 if parse_index_v1 is None:
2891 parse_index_v1 = mercurial.revlog.revlogio().parseindex
2906 parse_index_v1 = mercurial.revlog.revlogio().parseindex
2892
2907
2893 rllen = len(rl)
2908 rllen = len(rl)
2894
2909
2895 node0 = rl.node(0)
2910 node0 = rl.node(0)
2896 node25 = rl.node(rllen // 4)
2911 node25 = rl.node(rllen // 4)
2897 node50 = rl.node(rllen // 2)
2912 node50 = rl.node(rllen // 2)
2898 node75 = rl.node(rllen // 4 * 3)
2913 node75 = rl.node(rllen // 4 * 3)
2899 node100 = rl.node(rllen - 1)
2914 node100 = rl.node(rllen - 1)
2900
2915
2901 allrevs = range(rllen)
2916 allrevs = range(rllen)
2902 allrevsrev = list(reversed(allrevs))
2917 allrevsrev = list(reversed(allrevs))
2903 allnodes = [rl.node(rev) for rev in range(rllen)]
2918 allnodes = [rl.node(rev) for rev in range(rllen)]
2904 allnodesrev = list(reversed(allnodes))
2919 allnodesrev = list(reversed(allnodes))
2905
2920
2906 def constructor():
2921 def constructor():
2907 if radix is not None:
2922 if radix is not None:
2908 revlog(opener, radix=radix)
2923 revlog(opener, radix=radix)
2909 else:
2924 else:
2910 # hg <= 5.8
2925 # hg <= 5.8
2911 revlog(opener, indexfile=indexfile)
2926 revlog(opener, indexfile=indexfile)
2912
2927
2913 def read():
2928 def read():
2914 with opener(indexfile) as fh:
2929 with opener(indexfile) as fh:
2915 fh.read()
2930 fh.read()
2916
2931
2917 def parseindex():
2932 def parseindex():
2918 parse_index_v1(data, inline)
2933 parse_index_v1(data, inline)
2919
2934
2920 def getentry(revornode):
2935 def getentry(revornode):
2921 index = parse_index_v1(data, inline)[0]
2936 index = parse_index_v1(data, inline)[0]
2922 index[revornode]
2937 index[revornode]
2923
2938
2924 def getentries(revs, count=1):
2939 def getentries(revs, count=1):
2925 index = parse_index_v1(data, inline)[0]
2940 index = parse_index_v1(data, inline)[0]
2926
2941
2927 for i in range(count):
2942 for i in range(count):
2928 for rev in revs:
2943 for rev in revs:
2929 index[rev]
2944 index[rev]
2930
2945
2931 def resolvenode(node):
2946 def resolvenode(node):
2932 index = parse_index_v1(data, inline)[0]
2947 index = parse_index_v1(data, inline)[0]
2933 rev = getattr(index, 'rev', None)
2948 rev = getattr(index, 'rev', None)
2934 if rev is None:
2949 if rev is None:
2935 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2950 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2936 # This only works for the C code.
2951 # This only works for the C code.
2937 if nodemap is None:
2952 if nodemap is None:
2938 return
2953 return
2939 rev = nodemap.__getitem__
2954 rev = nodemap.__getitem__
2940
2955
2941 try:
2956 try:
2942 rev(node)
2957 rev(node)
2943 except error.RevlogError:
2958 except error.RevlogError:
2944 pass
2959 pass
2945
2960
2946 def resolvenodes(nodes, count=1):
2961 def resolvenodes(nodes, count=1):
2947 index = parse_index_v1(data, inline)[0]
2962 index = parse_index_v1(data, inline)[0]
2948 rev = getattr(index, 'rev', None)
2963 rev = getattr(index, 'rev', None)
2949 if rev is None:
2964 if rev is None:
2950 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2965 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2951 # This only works for the C code.
2966 # This only works for the C code.
2952 if nodemap is None:
2967 if nodemap is None:
2953 return
2968 return
2954 rev = nodemap.__getitem__
2969 rev = nodemap.__getitem__
2955
2970
2956 for i in range(count):
2971 for i in range(count):
2957 for node in nodes:
2972 for node in nodes:
2958 try:
2973 try:
2959 rev(node)
2974 rev(node)
2960 except error.RevlogError:
2975 except error.RevlogError:
2961 pass
2976 pass
2962
2977
2963 benches = [
2978 benches = [
2964 (constructor, b'revlog constructor'),
2979 (constructor, b'revlog constructor'),
2965 (read, b'read'),
2980 (read, b'read'),
2966 (parseindex, b'create index object'),
2981 (parseindex, b'create index object'),
2967 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2982 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2968 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2983 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2969 (lambda: resolvenode(node0), b'look up node at rev 0'),
2984 (lambda: resolvenode(node0), b'look up node at rev 0'),
2970 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2985 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2971 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2986 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2972 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2987 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2973 (lambda: resolvenode(node100), b'look up node at tip'),
2988 (lambda: resolvenode(node100), b'look up node at tip'),
2974 # 2x variation is to measure caching impact.
2989 # 2x variation is to measure caching impact.
2975 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2990 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2976 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2991 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2977 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2992 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2978 (
2993 (
2979 lambda: resolvenodes(allnodesrev, 2),
2994 lambda: resolvenodes(allnodesrev, 2),
2980 b'look up all nodes 2x (reverse)',
2995 b'look up all nodes 2x (reverse)',
2981 ),
2996 ),
2982 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2997 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2983 (
2998 (
2984 lambda: getentries(allrevs, 2),
2999 lambda: getentries(allrevs, 2),
2985 b'retrieve all index entries 2x (forward)',
3000 b'retrieve all index entries 2x (forward)',
2986 ),
3001 ),
2987 (
3002 (
2988 lambda: getentries(allrevsrev),
3003 lambda: getentries(allrevsrev),
2989 b'retrieve all index entries (reverse)',
3004 b'retrieve all index entries (reverse)',
2990 ),
3005 ),
2991 (
3006 (
2992 lambda: getentries(allrevsrev, 2),
3007 lambda: getentries(allrevsrev, 2),
2993 b'retrieve all index entries 2x (reverse)',
3008 b'retrieve all index entries 2x (reverse)',
2994 ),
3009 ),
2995 ]
3010 ]
2996
3011
2997 for fn, title in benches:
3012 for fn, title in benches:
2998 timer, fm = gettimer(ui, opts)
3013 timer, fm = gettimer(ui, opts)
2999 timer(fn, title=title)
3014 timer(fn, title=title)
3000 fm.end()
3015 fm.end()
3001
3016
3002
3017
3003 @command(
3018 @command(
3004 b'perf::revlogrevisions|perfrevlogrevisions',
3019 b'perf::revlogrevisions|perfrevlogrevisions',
3005 revlogopts
3020 revlogopts
3006 + formatteropts
3021 + formatteropts
3007 + [
3022 + [
3008 (b'd', b'dist', 100, b'distance between the revisions'),
3023 (b'd', b'dist', 100, b'distance between the revisions'),
3009 (b's', b'startrev', 0, b'revision to start reading at'),
3024 (b's', b'startrev', 0, b'revision to start reading at'),
3010 (b'', b'reverse', False, b'read in reverse'),
3025 (b'', b'reverse', False, b'read in reverse'),
3011 ],
3026 ],
3012 b'-c|-m|FILE',
3027 b'-c|-m|FILE',
3013 )
3028 )
3014 def perfrevlogrevisions(
3029 def perfrevlogrevisions(
3015 ui, repo, file_=None, startrev=0, reverse=False, **opts
3030 ui, repo, file_=None, startrev=0, reverse=False, **opts
3016 ):
3031 ):
3017 """Benchmark reading a series of revisions from a revlog.
3032 """Benchmark reading a series of revisions from a revlog.
3018
3033
3019 By default, we read every ``-d/--dist`` revision from 0 to tip of
3034 By default, we read every ``-d/--dist`` revision from 0 to tip of
3020 the specified revlog.
3035 the specified revlog.
3021
3036
3022 The start revision can be defined via ``-s/--startrev``.
3037 The start revision can be defined via ``-s/--startrev``.
3023 """
3038 """
3024 opts = _byteskwargs(opts)
3039 opts = _byteskwargs(opts)
3025
3040
3026 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3041 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3027 rllen = getlen(ui)(rl)
3042 rllen = getlen(ui)(rl)
3028
3043
3029 if startrev < 0:
3044 if startrev < 0:
3030 startrev = rllen + startrev
3045 startrev = rllen + startrev
3031
3046
3032 def d():
3047 def d():
3033 rl.clearcaches()
3048 rl.clearcaches()
3034
3049
3035 beginrev = startrev
3050 beginrev = startrev
3036 endrev = rllen
3051 endrev = rllen
3037 dist = opts[b'dist']
3052 dist = opts[b'dist']
3038
3053
3039 if reverse:
3054 if reverse:
3040 beginrev, endrev = endrev - 1, beginrev - 1
3055 beginrev, endrev = endrev - 1, beginrev - 1
3041 dist = -1 * dist
3056 dist = -1 * dist
3042
3057
3043 for x in _xrange(beginrev, endrev, dist):
3058 for x in _xrange(beginrev, endrev, dist):
3044 # Old revisions don't support passing int.
3059 # Old revisions don't support passing int.
3045 n = rl.node(x)
3060 n = rl.node(x)
3046 rl.revision(n)
3061 rl.revision(n)
3047
3062
3048 timer, fm = gettimer(ui, opts)
3063 timer, fm = gettimer(ui, opts)
3049 timer(d)
3064 timer(d)
3050 fm.end()
3065 fm.end()
3051
3066
3052
3067
3053 @command(
3068 @command(
3054 b'perf::revlogwrite|perfrevlogwrite',
3069 b'perf::revlogwrite|perfrevlogwrite',
3055 revlogopts
3070 revlogopts
3056 + formatteropts
3071 + formatteropts
3057 + [
3072 + [
3058 (b's', b'startrev', 1000, b'revision to start writing at'),
3073 (b's', b'startrev', 1000, b'revision to start writing at'),
3059 (b'', b'stoprev', -1, b'last revision to write'),
3074 (b'', b'stoprev', -1, b'last revision to write'),
3060 (b'', b'count', 3, b'number of passes to perform'),
3075 (b'', b'count', 3, b'number of passes to perform'),
3061 (b'', b'details', False, b'print timing for every revisions tested'),
3076 (b'', b'details', False, b'print timing for every revisions tested'),
3062 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3077 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3063 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3078 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3064 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3079 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3065 ],
3080 ],
3066 b'-c|-m|FILE',
3081 b'-c|-m|FILE',
3067 )
3082 )
3068 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3083 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3069 """Benchmark writing a series of revisions to a revlog.
3084 """Benchmark writing a series of revisions to a revlog.
3070
3085
3071 Possible source values are:
3086 Possible source values are:
3072 * `full`: add from a full text (default).
3087 * `full`: add from a full text (default).
3073 * `parent-1`: add from a delta to the first parent
3088 * `parent-1`: add from a delta to the first parent
3074 * `parent-2`: add from a delta to the second parent if it exists
3089 * `parent-2`: add from a delta to the second parent if it exists
3075 (use a delta from the first parent otherwise)
3090 (use a delta from the first parent otherwise)
3076 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3091 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3077 * `storage`: add from the existing precomputed deltas
3092 * `storage`: add from the existing precomputed deltas
3078
3093
3079 Note: This performance command measures performance in a custom way. As a
3094 Note: This performance command measures performance in a custom way. As a
3080 result some of the global configuration of the 'perf' command does not
3095 result some of the global configuration of the 'perf' command does not
3081 apply to it:
3096 apply to it:
3082
3097
3083 * ``pre-run``: disabled
3098 * ``pre-run``: disabled
3084
3099
3085 * ``profile-benchmark``: disabled
3100 * ``profile-benchmark``: disabled
3086
3101
3087 * ``run-limits``: disabled use --count instead
3102 * ``run-limits``: disabled use --count instead
3088 """
3103 """
3089 opts = _byteskwargs(opts)
3104 opts = _byteskwargs(opts)
3090
3105
3091 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3106 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3092 rllen = getlen(ui)(rl)
3107 rllen = getlen(ui)(rl)
3093 if startrev < 0:
3108 if startrev < 0:
3094 startrev = rllen + startrev
3109 startrev = rllen + startrev
3095 if stoprev < 0:
3110 if stoprev < 0:
3096 stoprev = rllen + stoprev
3111 stoprev = rllen + stoprev
3097
3112
3098 lazydeltabase = opts['lazydeltabase']
3113 lazydeltabase = opts['lazydeltabase']
3099 source = opts['source']
3114 source = opts['source']
3100 clearcaches = opts['clear_caches']
3115 clearcaches = opts['clear_caches']
3101 validsource = (
3116 validsource = (
3102 b'full',
3117 b'full',
3103 b'parent-1',
3118 b'parent-1',
3104 b'parent-2',
3119 b'parent-2',
3105 b'parent-smallest',
3120 b'parent-smallest',
3106 b'storage',
3121 b'storage',
3107 )
3122 )
3108 if source not in validsource:
3123 if source not in validsource:
3109 raise error.Abort('invalid source type: %s' % source)
3124 raise error.Abort('invalid source type: %s' % source)
3110
3125
3111 ### actually gather results
3126 ### actually gather results
3112 count = opts['count']
3127 count = opts['count']
3113 if count <= 0:
3128 if count <= 0:
3114 raise error.Abort('invalide run count: %d' % count)
3129 raise error.Abort('invalide run count: %d' % count)
3115 allresults = []
3130 allresults = []
3116 for c in range(count):
3131 for c in range(count):
3117 timing = _timeonewrite(
3132 timing = _timeonewrite(
3118 ui,
3133 ui,
3119 rl,
3134 rl,
3120 source,
3135 source,
3121 startrev,
3136 startrev,
3122 stoprev,
3137 stoprev,
3123 c + 1,
3138 c + 1,
3124 lazydeltabase=lazydeltabase,
3139 lazydeltabase=lazydeltabase,
3125 clearcaches=clearcaches,
3140 clearcaches=clearcaches,
3126 )
3141 )
3127 allresults.append(timing)
3142 allresults.append(timing)
3128
3143
3129 ### consolidate the results in a single list
3144 ### consolidate the results in a single list
3130 results = []
3145 results = []
3131 for idx, (rev, t) in enumerate(allresults[0]):
3146 for idx, (rev, t) in enumerate(allresults[0]):
3132 ts = [t]
3147 ts = [t]
3133 for other in allresults[1:]:
3148 for other in allresults[1:]:
3134 orev, ot = other[idx]
3149 orev, ot = other[idx]
3135 assert orev == rev
3150 assert orev == rev
3136 ts.append(ot)
3151 ts.append(ot)
3137 results.append((rev, ts))
3152 results.append((rev, ts))
3138 resultcount = len(results)
3153 resultcount = len(results)
3139
3154
3140 ### Compute and display relevant statistics
3155 ### Compute and display relevant statistics
3141
3156
3142 # get a formatter
3157 # get a formatter
3143 fm = ui.formatter(b'perf', opts)
3158 fm = ui.formatter(b'perf', opts)
3144 displayall = ui.configbool(b"perf", b"all-timing", False)
3159 displayall = ui.configbool(b"perf", b"all-timing", False)
3145
3160
3146 # print individual details if requested
3161 # print individual details if requested
3147 if opts['details']:
3162 if opts['details']:
3148 for idx, item in enumerate(results, 1):
3163 for idx, item in enumerate(results, 1):
3149 rev, data = item
3164 rev, data = item
3150 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3165 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3151 formatone(fm, data, title=title, displayall=displayall)
3166 formatone(fm, data, title=title, displayall=displayall)
3152
3167
3153 # sorts results by median time
3168 # sorts results by median time
3154 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3169 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3155 # list of (name, index) to display)
3170 # list of (name, index) to display)
3156 relevants = [
3171 relevants = [
3157 ("min", 0),
3172 ("min", 0),
3158 ("10%", resultcount * 10 // 100),
3173 ("10%", resultcount * 10 // 100),
3159 ("25%", resultcount * 25 // 100),
3174 ("25%", resultcount * 25 // 100),
3160 ("50%", resultcount * 70 // 100),
3175 ("50%", resultcount * 70 // 100),
3161 ("75%", resultcount * 75 // 100),
3176 ("75%", resultcount * 75 // 100),
3162 ("90%", resultcount * 90 // 100),
3177 ("90%", resultcount * 90 // 100),
3163 ("95%", resultcount * 95 // 100),
3178 ("95%", resultcount * 95 // 100),
3164 ("99%", resultcount * 99 // 100),
3179 ("99%", resultcount * 99 // 100),
3165 ("99.9%", resultcount * 999 // 1000),
3180 ("99.9%", resultcount * 999 // 1000),
3166 ("99.99%", resultcount * 9999 // 10000),
3181 ("99.99%", resultcount * 9999 // 10000),
3167 ("99.999%", resultcount * 99999 // 100000),
3182 ("99.999%", resultcount * 99999 // 100000),
3168 ("max", -1),
3183 ("max", -1),
3169 ]
3184 ]
3170 if not ui.quiet:
3185 if not ui.quiet:
3171 for name, idx in relevants:
3186 for name, idx in relevants:
3172 data = results[idx]
3187 data = results[idx]
3173 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3188 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3174 formatone(fm, data[1], title=title, displayall=displayall)
3189 formatone(fm, data[1], title=title, displayall=displayall)
3175
3190
3176 # XXX summing that many float will not be very precise, we ignore this fact
3191 # XXX summing that many float will not be very precise, we ignore this fact
3177 # for now
3192 # for now
3178 totaltime = []
3193 totaltime = []
3179 for item in allresults:
3194 for item in allresults:
3180 totaltime.append(
3195 totaltime.append(
3181 (
3196 (
3182 sum(x[1][0] for x in item),
3197 sum(x[1][0] for x in item),
3183 sum(x[1][1] for x in item),
3198 sum(x[1][1] for x in item),
3184 sum(x[1][2] for x in item),
3199 sum(x[1][2] for x in item),
3185 )
3200 )
3186 )
3201 )
3187 formatone(
3202 formatone(
3188 fm,
3203 fm,
3189 totaltime,
3204 totaltime,
3190 title="total time (%d revs)" % resultcount,
3205 title="total time (%d revs)" % resultcount,
3191 displayall=displayall,
3206 displayall=displayall,
3192 )
3207 )
3193 fm.end()
3208 fm.end()
3194
3209
3195
3210
3196 class _faketr:
3211 class _faketr:
3197 def add(s, x, y, z=None):
3212 def add(s, x, y, z=None):
3198 return None
3213 return None
3199
3214
3200
3215
3201 def _timeonewrite(
3216 def _timeonewrite(
3202 ui,
3217 ui,
3203 orig,
3218 orig,
3204 source,
3219 source,
3205 startrev,
3220 startrev,
3206 stoprev,
3221 stoprev,
3207 runidx=None,
3222 runidx=None,
3208 lazydeltabase=True,
3223 lazydeltabase=True,
3209 clearcaches=True,
3224 clearcaches=True,
3210 ):
3225 ):
3211 timings = []
3226 timings = []
3212 tr = _faketr()
3227 tr = _faketr()
3213 with _temprevlog(ui, orig, startrev) as dest:
3228 with _temprevlog(ui, orig, startrev) as dest:
3214 dest._lazydeltabase = lazydeltabase
3229 dest._lazydeltabase = lazydeltabase
3215 revs = list(orig.revs(startrev, stoprev))
3230 revs = list(orig.revs(startrev, stoprev))
3216 total = len(revs)
3231 total = len(revs)
3217 topic = 'adding'
3232 topic = 'adding'
3218 if runidx is not None:
3233 if runidx is not None:
3219 topic += ' (run #%d)' % runidx
3234 topic += ' (run #%d)' % runidx
3220 # Support both old and new progress API
3235 # Support both old and new progress API
3221 if util.safehasattr(ui, 'makeprogress'):
3236 if util.safehasattr(ui, 'makeprogress'):
3222 progress = ui.makeprogress(topic, unit='revs', total=total)
3237 progress = ui.makeprogress(topic, unit='revs', total=total)
3223
3238
3224 def updateprogress(pos):
3239 def updateprogress(pos):
3225 progress.update(pos)
3240 progress.update(pos)
3226
3241
3227 def completeprogress():
3242 def completeprogress():
3228 progress.complete()
3243 progress.complete()
3229
3244
3230 else:
3245 else:
3231
3246
3232 def updateprogress(pos):
3247 def updateprogress(pos):
3233 ui.progress(topic, pos, unit='revs', total=total)
3248 ui.progress(topic, pos, unit='revs', total=total)
3234
3249
3235 def completeprogress():
3250 def completeprogress():
3236 ui.progress(topic, None, unit='revs', total=total)
3251 ui.progress(topic, None, unit='revs', total=total)
3237
3252
3238 for idx, rev in enumerate(revs):
3253 for idx, rev in enumerate(revs):
3239 updateprogress(idx)
3254 updateprogress(idx)
3240 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3255 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3241 if clearcaches:
3256 if clearcaches:
3242 dest.index.clearcaches()
3257 dest.index.clearcaches()
3243 dest.clearcaches()
3258 dest.clearcaches()
3244 with timeone() as r:
3259 with timeone() as r:
3245 dest.addrawrevision(*addargs, **addkwargs)
3260 dest.addrawrevision(*addargs, **addkwargs)
3246 timings.append((rev, r[0]))
3261 timings.append((rev, r[0]))
3247 updateprogress(total)
3262 updateprogress(total)
3248 completeprogress()
3263 completeprogress()
3249 return timings
3264 return timings
3250
3265
3251
3266
3252 def _getrevisionseed(orig, rev, tr, source):
3267 def _getrevisionseed(orig, rev, tr, source):
3253 from mercurial.node import nullid
3268 from mercurial.node import nullid
3254
3269
3255 linkrev = orig.linkrev(rev)
3270 linkrev = orig.linkrev(rev)
3256 node = orig.node(rev)
3271 node = orig.node(rev)
3257 p1, p2 = orig.parents(node)
3272 p1, p2 = orig.parents(node)
3258 flags = orig.flags(rev)
3273 flags = orig.flags(rev)
3259 cachedelta = None
3274 cachedelta = None
3260 text = None
3275 text = None
3261
3276
3262 if source == b'full':
3277 if source == b'full':
3263 text = orig.revision(rev)
3278 text = orig.revision(rev)
3264 elif source == b'parent-1':
3279 elif source == b'parent-1':
3265 baserev = orig.rev(p1)
3280 baserev = orig.rev(p1)
3266 cachedelta = (baserev, orig.revdiff(p1, rev))
3281 cachedelta = (baserev, orig.revdiff(p1, rev))
3267 elif source == b'parent-2':
3282 elif source == b'parent-2':
3268 parent = p2
3283 parent = p2
3269 if p2 == nullid:
3284 if p2 == nullid:
3270 parent = p1
3285 parent = p1
3271 baserev = orig.rev(parent)
3286 baserev = orig.rev(parent)
3272 cachedelta = (baserev, orig.revdiff(parent, rev))
3287 cachedelta = (baserev, orig.revdiff(parent, rev))
3273 elif source == b'parent-smallest':
3288 elif source == b'parent-smallest':
3274 p1diff = orig.revdiff(p1, rev)
3289 p1diff = orig.revdiff(p1, rev)
3275 parent = p1
3290 parent = p1
3276 diff = p1diff
3291 diff = p1diff
3277 if p2 != nullid:
3292 if p2 != nullid:
3278 p2diff = orig.revdiff(p2, rev)
3293 p2diff = orig.revdiff(p2, rev)
3279 if len(p1diff) > len(p2diff):
3294 if len(p1diff) > len(p2diff):
3280 parent = p2
3295 parent = p2
3281 diff = p2diff
3296 diff = p2diff
3282 baserev = orig.rev(parent)
3297 baserev = orig.rev(parent)
3283 cachedelta = (baserev, diff)
3298 cachedelta = (baserev, diff)
3284 elif source == b'storage':
3299 elif source == b'storage':
3285 baserev = orig.deltaparent(rev)
3300 baserev = orig.deltaparent(rev)
3286 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3301 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3287
3302
3288 return (
3303 return (
3289 (text, tr, linkrev, p1, p2),
3304 (text, tr, linkrev, p1, p2),
3290 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3305 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3291 )
3306 )
3292
3307
3293
3308
3294 @contextlib.contextmanager
3309 @contextlib.contextmanager
3295 def _temprevlog(ui, orig, truncaterev):
3310 def _temprevlog(ui, orig, truncaterev):
3296 from mercurial import vfs as vfsmod
3311 from mercurial import vfs as vfsmod
3297
3312
3298 if orig._inline:
3313 if orig._inline:
3299 raise error.Abort('not supporting inline revlog (yet)')
3314 raise error.Abort('not supporting inline revlog (yet)')
3300 revlogkwargs = {}
3315 revlogkwargs = {}
3301 k = 'upperboundcomp'
3316 k = 'upperboundcomp'
3302 if util.safehasattr(orig, k):
3317 if util.safehasattr(orig, k):
3303 revlogkwargs[k] = getattr(orig, k)
3318 revlogkwargs[k] = getattr(orig, k)
3304
3319
3305 indexfile = getattr(orig, '_indexfile', None)
3320 indexfile = getattr(orig, '_indexfile', None)
3306 if indexfile is None:
3321 if indexfile is None:
3307 # compatibility with <= hg-5.8
3322 # compatibility with <= hg-5.8
3308 indexfile = getattr(orig, 'indexfile')
3323 indexfile = getattr(orig, 'indexfile')
3309 origindexpath = orig.opener.join(indexfile)
3324 origindexpath = orig.opener.join(indexfile)
3310
3325
3311 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3326 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3312 origdatapath = orig.opener.join(datafile)
3327 origdatapath = orig.opener.join(datafile)
3313 radix = b'revlog'
3328 radix = b'revlog'
3314 indexname = b'revlog.i'
3329 indexname = b'revlog.i'
3315 dataname = b'revlog.d'
3330 dataname = b'revlog.d'
3316
3331
3317 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3332 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3318 try:
3333 try:
3319 # copy the data file in a temporary directory
3334 # copy the data file in a temporary directory
3320 ui.debug('copying data in %s\n' % tmpdir)
3335 ui.debug('copying data in %s\n' % tmpdir)
3321 destindexpath = os.path.join(tmpdir, 'revlog.i')
3336 destindexpath = os.path.join(tmpdir, 'revlog.i')
3322 destdatapath = os.path.join(tmpdir, 'revlog.d')
3337 destdatapath = os.path.join(tmpdir, 'revlog.d')
3323 shutil.copyfile(origindexpath, destindexpath)
3338 shutil.copyfile(origindexpath, destindexpath)
3324 shutil.copyfile(origdatapath, destdatapath)
3339 shutil.copyfile(origdatapath, destdatapath)
3325
3340
3326 # remove the data we want to add again
3341 # remove the data we want to add again
3327 ui.debug('truncating data to be rewritten\n')
3342 ui.debug('truncating data to be rewritten\n')
3328 with open(destindexpath, 'ab') as index:
3343 with open(destindexpath, 'ab') as index:
3329 index.seek(0)
3344 index.seek(0)
3330 index.truncate(truncaterev * orig._io.size)
3345 index.truncate(truncaterev * orig._io.size)
3331 with open(destdatapath, 'ab') as data:
3346 with open(destdatapath, 'ab') as data:
3332 data.seek(0)
3347 data.seek(0)
3333 data.truncate(orig.start(truncaterev))
3348 data.truncate(orig.start(truncaterev))
3334
3349
3335 # instantiate a new revlog from the temporary copy
3350 # instantiate a new revlog from the temporary copy
3336 ui.debug('truncating adding to be rewritten\n')
3351 ui.debug('truncating adding to be rewritten\n')
3337 vfs = vfsmod.vfs(tmpdir)
3352 vfs = vfsmod.vfs(tmpdir)
3338 vfs.options = getattr(orig.opener, 'options', None)
3353 vfs.options = getattr(orig.opener, 'options', None)
3339
3354
3340 try:
3355 try:
3341 dest = revlog(vfs, radix=radix, **revlogkwargs)
3356 dest = revlog(vfs, radix=radix, **revlogkwargs)
3342 except TypeError:
3357 except TypeError:
3343 dest = revlog(
3358 dest = revlog(
3344 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3359 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3345 )
3360 )
3346 if dest._inline:
3361 if dest._inline:
3347 raise error.Abort('not supporting inline revlog (yet)')
3362 raise error.Abort('not supporting inline revlog (yet)')
3348 # make sure internals are initialized
3363 # make sure internals are initialized
3349 dest.revision(len(dest) - 1)
3364 dest.revision(len(dest) - 1)
3350 yield dest
3365 yield dest
3351 del dest, vfs
3366 del dest, vfs
3352 finally:
3367 finally:
3353 shutil.rmtree(tmpdir, True)
3368 shutil.rmtree(tmpdir, True)
3354
3369
3355
3370
3356 @command(
3371 @command(
3357 b'perf::revlogchunks|perfrevlogchunks',
3372 b'perf::revlogchunks|perfrevlogchunks',
3358 revlogopts
3373 revlogopts
3359 + formatteropts
3374 + formatteropts
3360 + [
3375 + [
3361 (b'e', b'engines', b'', b'compression engines to use'),
3376 (b'e', b'engines', b'', b'compression engines to use'),
3362 (b's', b'startrev', 0, b'revision to start at'),
3377 (b's', b'startrev', 0, b'revision to start at'),
3363 ],
3378 ],
3364 b'-c|-m|FILE',
3379 b'-c|-m|FILE',
3365 )
3380 )
3366 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3381 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3367 """Benchmark operations on revlog chunks.
3382 """Benchmark operations on revlog chunks.
3368
3383
3369 Logically, each revlog is a collection of fulltext revisions. However,
3384 Logically, each revlog is a collection of fulltext revisions. However,
3370 stored within each revlog are "chunks" of possibly compressed data. This
3385 stored within each revlog are "chunks" of possibly compressed data. This
3371 data needs to be read and decompressed or compressed and written.
3386 data needs to be read and decompressed or compressed and written.
3372
3387
3373 This command measures the time it takes to read+decompress and recompress
3388 This command measures the time it takes to read+decompress and recompress
3374 chunks in a revlog. It effectively isolates I/O and compression performance.
3389 chunks in a revlog. It effectively isolates I/O and compression performance.
3375 For measurements of higher-level operations like resolving revisions,
3390 For measurements of higher-level operations like resolving revisions,
3376 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3391 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3377 """
3392 """
3378 opts = _byteskwargs(opts)
3393 opts = _byteskwargs(opts)
3379
3394
3380 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3395 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3381
3396
3382 # _chunkraw was renamed to _getsegmentforrevs.
3397 # _chunkraw was renamed to _getsegmentforrevs.
3383 try:
3398 try:
3384 segmentforrevs = rl._getsegmentforrevs
3399 segmentforrevs = rl._getsegmentforrevs
3385 except AttributeError:
3400 except AttributeError:
3386 segmentforrevs = rl._chunkraw
3401 segmentforrevs = rl._chunkraw
3387
3402
3388 # Verify engines argument.
3403 # Verify engines argument.
3389 if engines:
3404 if engines:
3390 engines = {e.strip() for e in engines.split(b',')}
3405 engines = {e.strip() for e in engines.split(b',')}
3391 for engine in engines:
3406 for engine in engines:
3392 try:
3407 try:
3393 util.compressionengines[engine]
3408 util.compressionengines[engine]
3394 except KeyError:
3409 except KeyError:
3395 raise error.Abort(b'unknown compression engine: %s' % engine)
3410 raise error.Abort(b'unknown compression engine: %s' % engine)
3396 else:
3411 else:
3397 engines = []
3412 engines = []
3398 for e in util.compengines:
3413 for e in util.compengines:
3399 engine = util.compengines[e]
3414 engine = util.compengines[e]
3400 try:
3415 try:
3401 if engine.available():
3416 if engine.available():
3402 engine.revlogcompressor().compress(b'dummy')
3417 engine.revlogcompressor().compress(b'dummy')
3403 engines.append(e)
3418 engines.append(e)
3404 except NotImplementedError:
3419 except NotImplementedError:
3405 pass
3420 pass
3406
3421
3407 revs = list(rl.revs(startrev, len(rl) - 1))
3422 revs = list(rl.revs(startrev, len(rl) - 1))
3408
3423
3409 def rlfh(rl):
3424 def rlfh(rl):
3410 if rl._inline:
3425 if rl._inline:
3411 indexfile = getattr(rl, '_indexfile', None)
3426 indexfile = getattr(rl, '_indexfile', None)
3412 if indexfile is None:
3427 if indexfile is None:
3413 # compatibility with <= hg-5.8
3428 # compatibility with <= hg-5.8
3414 indexfile = getattr(rl, 'indexfile')
3429 indexfile = getattr(rl, 'indexfile')
3415 return getsvfs(repo)(indexfile)
3430 return getsvfs(repo)(indexfile)
3416 else:
3431 else:
3417 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3432 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3418 return getsvfs(repo)(datafile)
3433 return getsvfs(repo)(datafile)
3419
3434
3420 def doread():
3435 def doread():
3421 rl.clearcaches()
3436 rl.clearcaches()
3422 for rev in revs:
3437 for rev in revs:
3423 segmentforrevs(rev, rev)
3438 segmentforrevs(rev, rev)
3424
3439
3425 def doreadcachedfh():
3440 def doreadcachedfh():
3426 rl.clearcaches()
3441 rl.clearcaches()
3427 fh = rlfh(rl)
3442 fh = rlfh(rl)
3428 for rev in revs:
3443 for rev in revs:
3429 segmentforrevs(rev, rev, df=fh)
3444 segmentforrevs(rev, rev, df=fh)
3430
3445
3431 def doreadbatch():
3446 def doreadbatch():
3432 rl.clearcaches()
3447 rl.clearcaches()
3433 segmentforrevs(revs[0], revs[-1])
3448 segmentforrevs(revs[0], revs[-1])
3434
3449
3435 def doreadbatchcachedfh():
3450 def doreadbatchcachedfh():
3436 rl.clearcaches()
3451 rl.clearcaches()
3437 fh = rlfh(rl)
3452 fh = rlfh(rl)
3438 segmentforrevs(revs[0], revs[-1], df=fh)
3453 segmentforrevs(revs[0], revs[-1], df=fh)
3439
3454
3440 def dochunk():
3455 def dochunk():
3441 rl.clearcaches()
3456 rl.clearcaches()
3442 fh = rlfh(rl)
3457 fh = rlfh(rl)
3443 for rev in revs:
3458 for rev in revs:
3444 rl._chunk(rev, df=fh)
3459 rl._chunk(rev, df=fh)
3445
3460
3446 chunks = [None]
3461 chunks = [None]
3447
3462
3448 def dochunkbatch():
3463 def dochunkbatch():
3449 rl.clearcaches()
3464 rl.clearcaches()
3450 fh = rlfh(rl)
3465 fh = rlfh(rl)
3451 # Save chunks as a side-effect.
3466 # Save chunks as a side-effect.
3452 chunks[0] = rl._chunks(revs, df=fh)
3467 chunks[0] = rl._chunks(revs, df=fh)
3453
3468
3454 def docompress(compressor):
3469 def docompress(compressor):
3455 rl.clearcaches()
3470 rl.clearcaches()
3456
3471
3457 try:
3472 try:
3458 # Swap in the requested compression engine.
3473 # Swap in the requested compression engine.
3459 oldcompressor = rl._compressor
3474 oldcompressor = rl._compressor
3460 rl._compressor = compressor
3475 rl._compressor = compressor
3461 for chunk in chunks[0]:
3476 for chunk in chunks[0]:
3462 rl.compress(chunk)
3477 rl.compress(chunk)
3463 finally:
3478 finally:
3464 rl._compressor = oldcompressor
3479 rl._compressor = oldcompressor
3465
3480
3466 benches = [
3481 benches = [
3467 (lambda: doread(), b'read'),
3482 (lambda: doread(), b'read'),
3468 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3483 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3469 (lambda: doreadbatch(), b'read batch'),
3484 (lambda: doreadbatch(), b'read batch'),
3470 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3485 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3471 (lambda: dochunk(), b'chunk'),
3486 (lambda: dochunk(), b'chunk'),
3472 (lambda: dochunkbatch(), b'chunk batch'),
3487 (lambda: dochunkbatch(), b'chunk batch'),
3473 ]
3488 ]
3474
3489
3475 for engine in sorted(engines):
3490 for engine in sorted(engines):
3476 compressor = util.compengines[engine].revlogcompressor()
3491 compressor = util.compengines[engine].revlogcompressor()
3477 benches.append(
3492 benches.append(
3478 (
3493 (
3479 functools.partial(docompress, compressor),
3494 functools.partial(docompress, compressor),
3480 b'compress w/ %s' % engine,
3495 b'compress w/ %s' % engine,
3481 )
3496 )
3482 )
3497 )
3483
3498
3484 for fn, title in benches:
3499 for fn, title in benches:
3485 timer, fm = gettimer(ui, opts)
3500 timer, fm = gettimer(ui, opts)
3486 timer(fn, title=title)
3501 timer(fn, title=title)
3487 fm.end()
3502 fm.end()
3488
3503
3489
3504
3490 @command(
3505 @command(
3491 b'perf::revlogrevision|perfrevlogrevision',
3506 b'perf::revlogrevision|perfrevlogrevision',
3492 revlogopts
3507 revlogopts
3493 + formatteropts
3508 + formatteropts
3494 + [(b'', b'cache', False, b'use caches instead of clearing')],
3509 + [(b'', b'cache', False, b'use caches instead of clearing')],
3495 b'-c|-m|FILE REV',
3510 b'-c|-m|FILE REV',
3496 )
3511 )
3497 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3512 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3498 """Benchmark obtaining a revlog revision.
3513 """Benchmark obtaining a revlog revision.
3499
3514
3500 Obtaining a revlog revision consists of roughly the following steps:
3515 Obtaining a revlog revision consists of roughly the following steps:
3501
3516
3502 1. Compute the delta chain
3517 1. Compute the delta chain
3503 2. Slice the delta chain if applicable
3518 2. Slice the delta chain if applicable
3504 3. Obtain the raw chunks for that delta chain
3519 3. Obtain the raw chunks for that delta chain
3505 4. Decompress each raw chunk
3520 4. Decompress each raw chunk
3506 5. Apply binary patches to obtain fulltext
3521 5. Apply binary patches to obtain fulltext
3507 6. Verify hash of fulltext
3522 6. Verify hash of fulltext
3508
3523
3509 This command measures the time spent in each of these phases.
3524 This command measures the time spent in each of these phases.
3510 """
3525 """
3511 opts = _byteskwargs(opts)
3526 opts = _byteskwargs(opts)
3512
3527
3513 if opts.get(b'changelog') or opts.get(b'manifest'):
3528 if opts.get(b'changelog') or opts.get(b'manifest'):
3514 file_, rev = None, file_
3529 file_, rev = None, file_
3515 elif rev is None:
3530 elif rev is None:
3516 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3531 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3517
3532
3518 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3533 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3519
3534
3520 # _chunkraw was renamed to _getsegmentforrevs.
3535 # _chunkraw was renamed to _getsegmentforrevs.
3521 try:
3536 try:
3522 segmentforrevs = r._getsegmentforrevs
3537 segmentforrevs = r._getsegmentforrevs
3523 except AttributeError:
3538 except AttributeError:
3524 segmentforrevs = r._chunkraw
3539 segmentforrevs = r._chunkraw
3525
3540
3526 node = r.lookup(rev)
3541 node = r.lookup(rev)
3527 rev = r.rev(node)
3542 rev = r.rev(node)
3528
3543
3529 def getrawchunks(data, chain):
3544 def getrawchunks(data, chain):
3530 start = r.start
3545 start = r.start
3531 length = r.length
3546 length = r.length
3532 inline = r._inline
3547 inline = r._inline
3533 try:
3548 try:
3534 iosize = r.index.entry_size
3549 iosize = r.index.entry_size
3535 except AttributeError:
3550 except AttributeError:
3536 iosize = r._io.size
3551 iosize = r._io.size
3537 buffer = util.buffer
3552 buffer = util.buffer
3538
3553
3539 chunks = []
3554 chunks = []
3540 ladd = chunks.append
3555 ladd = chunks.append
3541 for idx, item in enumerate(chain):
3556 for idx, item in enumerate(chain):
3542 offset = start(item[0])
3557 offset = start(item[0])
3543 bits = data[idx]
3558 bits = data[idx]
3544 for rev in item:
3559 for rev in item:
3545 chunkstart = start(rev)
3560 chunkstart = start(rev)
3546 if inline:
3561 if inline:
3547 chunkstart += (rev + 1) * iosize
3562 chunkstart += (rev + 1) * iosize
3548 chunklength = length(rev)
3563 chunklength = length(rev)
3549 ladd(buffer(bits, chunkstart - offset, chunklength))
3564 ladd(buffer(bits, chunkstart - offset, chunklength))
3550
3565
3551 return chunks
3566 return chunks
3552
3567
3553 def dodeltachain(rev):
3568 def dodeltachain(rev):
3554 if not cache:
3569 if not cache:
3555 r.clearcaches()
3570 r.clearcaches()
3556 r._deltachain(rev)
3571 r._deltachain(rev)
3557
3572
3558 def doread(chain):
3573 def doread(chain):
3559 if not cache:
3574 if not cache:
3560 r.clearcaches()
3575 r.clearcaches()
3561 for item in slicedchain:
3576 for item in slicedchain:
3562 segmentforrevs(item[0], item[-1])
3577 segmentforrevs(item[0], item[-1])
3563
3578
3564 def doslice(r, chain, size):
3579 def doslice(r, chain, size):
3565 for s in slicechunk(r, chain, targetsize=size):
3580 for s in slicechunk(r, chain, targetsize=size):
3566 pass
3581 pass
3567
3582
3568 def dorawchunks(data, chain):
3583 def dorawchunks(data, chain):
3569 if not cache:
3584 if not cache:
3570 r.clearcaches()
3585 r.clearcaches()
3571 getrawchunks(data, chain)
3586 getrawchunks(data, chain)
3572
3587
3573 def dodecompress(chunks):
3588 def dodecompress(chunks):
3574 decomp = r.decompress
3589 decomp = r.decompress
3575 for chunk in chunks:
3590 for chunk in chunks:
3576 decomp(chunk)
3591 decomp(chunk)
3577
3592
3578 def dopatch(text, bins):
3593 def dopatch(text, bins):
3579 if not cache:
3594 if not cache:
3580 r.clearcaches()
3595 r.clearcaches()
3581 mdiff.patches(text, bins)
3596 mdiff.patches(text, bins)
3582
3597
3583 def dohash(text):
3598 def dohash(text):
3584 if not cache:
3599 if not cache:
3585 r.clearcaches()
3600 r.clearcaches()
3586 r.checkhash(text, node, rev=rev)
3601 r.checkhash(text, node, rev=rev)
3587
3602
3588 def dorevision():
3603 def dorevision():
3589 if not cache:
3604 if not cache:
3590 r.clearcaches()
3605 r.clearcaches()
3591 r.revision(node)
3606 r.revision(node)
3592
3607
3593 try:
3608 try:
3594 from mercurial.revlogutils.deltas import slicechunk
3609 from mercurial.revlogutils.deltas import slicechunk
3595 except ImportError:
3610 except ImportError:
3596 slicechunk = getattr(revlog, '_slicechunk', None)
3611 slicechunk = getattr(revlog, '_slicechunk', None)
3597
3612
3598 size = r.length(rev)
3613 size = r.length(rev)
3599 chain = r._deltachain(rev)[0]
3614 chain = r._deltachain(rev)[0]
3600 if not getattr(r, '_withsparseread', False):
3615 if not getattr(r, '_withsparseread', False):
3601 slicedchain = (chain,)
3616 slicedchain = (chain,)
3602 else:
3617 else:
3603 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3618 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3604 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3619 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3605 rawchunks = getrawchunks(data, slicedchain)
3620 rawchunks = getrawchunks(data, slicedchain)
3606 bins = r._chunks(chain)
3621 bins = r._chunks(chain)
3607 text = bytes(bins[0])
3622 text = bytes(bins[0])
3608 bins = bins[1:]
3623 bins = bins[1:]
3609 text = mdiff.patches(text, bins)
3624 text = mdiff.patches(text, bins)
3610
3625
3611 benches = [
3626 benches = [
3612 (lambda: dorevision(), b'full'),
3627 (lambda: dorevision(), b'full'),
3613 (lambda: dodeltachain(rev), b'deltachain'),
3628 (lambda: dodeltachain(rev), b'deltachain'),
3614 (lambda: doread(chain), b'read'),
3629 (lambda: doread(chain), b'read'),
3615 ]
3630 ]
3616
3631
3617 if getattr(r, '_withsparseread', False):
3632 if getattr(r, '_withsparseread', False):
3618 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3633 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3619 benches.append(slicing)
3634 benches.append(slicing)
3620
3635
3621 benches.extend(
3636 benches.extend(
3622 [
3637 [
3623 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3638 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3624 (lambda: dodecompress(rawchunks), b'decompress'),
3639 (lambda: dodecompress(rawchunks), b'decompress'),
3625 (lambda: dopatch(text, bins), b'patch'),
3640 (lambda: dopatch(text, bins), b'patch'),
3626 (lambda: dohash(text), b'hash'),
3641 (lambda: dohash(text), b'hash'),
3627 ]
3642 ]
3628 )
3643 )
3629
3644
3630 timer, fm = gettimer(ui, opts)
3645 timer, fm = gettimer(ui, opts)
3631 for fn, title in benches:
3646 for fn, title in benches:
3632 timer(fn, title=title)
3647 timer(fn, title=title)
3633 fm.end()
3648 fm.end()
3634
3649
3635
3650
3636 @command(
3651 @command(
3637 b'perf::revset|perfrevset',
3652 b'perf::revset|perfrevset',
3638 [
3653 [
3639 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3654 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3640 (b'', b'contexts', False, b'obtain changectx for each revision'),
3655 (b'', b'contexts', False, b'obtain changectx for each revision'),
3641 ]
3656 ]
3642 + formatteropts,
3657 + formatteropts,
3643 b"REVSET",
3658 b"REVSET",
3644 )
3659 )
3645 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3660 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3646 """benchmark the execution time of a revset
3661 """benchmark the execution time of a revset
3647
3662
3648 Use the --clean option if need to evaluate the impact of build volatile
3663 Use the --clean option if need to evaluate the impact of build volatile
3649 revisions set cache on the revset execution. Volatile cache hold filtered
3664 revisions set cache on the revset execution. Volatile cache hold filtered
3650 and obsolete related cache."""
3665 and obsolete related cache."""
3651 opts = _byteskwargs(opts)
3666 opts = _byteskwargs(opts)
3652
3667
3653 timer, fm = gettimer(ui, opts)
3668 timer, fm = gettimer(ui, opts)
3654
3669
3655 def d():
3670 def d():
3656 if clear:
3671 if clear:
3657 repo.invalidatevolatilesets()
3672 repo.invalidatevolatilesets()
3658 if contexts:
3673 if contexts:
3659 for ctx in repo.set(expr):
3674 for ctx in repo.set(expr):
3660 pass
3675 pass
3661 else:
3676 else:
3662 for r in repo.revs(expr):
3677 for r in repo.revs(expr):
3663 pass
3678 pass
3664
3679
3665 timer(d)
3680 timer(d)
3666 fm.end()
3681 fm.end()
3667
3682
3668
3683
3669 @command(
3684 @command(
3670 b'perf::volatilesets|perfvolatilesets',
3685 b'perf::volatilesets|perfvolatilesets',
3671 [
3686 [
3672 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3687 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3673 ]
3688 ]
3674 + formatteropts,
3689 + formatteropts,
3675 )
3690 )
3676 def perfvolatilesets(ui, repo, *names, **opts):
3691 def perfvolatilesets(ui, repo, *names, **opts):
3677 """benchmark the computation of various volatile set
3692 """benchmark the computation of various volatile set
3678
3693
3679 Volatile set computes element related to filtering and obsolescence."""
3694 Volatile set computes element related to filtering and obsolescence."""
3680 opts = _byteskwargs(opts)
3695 opts = _byteskwargs(opts)
3681 timer, fm = gettimer(ui, opts)
3696 timer, fm = gettimer(ui, opts)
3682 repo = repo.unfiltered()
3697 repo = repo.unfiltered()
3683
3698
3684 def getobs(name):
3699 def getobs(name):
3685 def d():
3700 def d():
3686 repo.invalidatevolatilesets()
3701 repo.invalidatevolatilesets()
3687 if opts[b'clear_obsstore']:
3702 if opts[b'clear_obsstore']:
3688 clearfilecache(repo, b'obsstore')
3703 clearfilecache(repo, b'obsstore')
3689 obsolete.getrevs(repo, name)
3704 obsolete.getrevs(repo, name)
3690
3705
3691 return d
3706 return d
3692
3707
3693 allobs = sorted(obsolete.cachefuncs)
3708 allobs = sorted(obsolete.cachefuncs)
3694 if names:
3709 if names:
3695 allobs = [n for n in allobs if n in names]
3710 allobs = [n for n in allobs if n in names]
3696
3711
3697 for name in allobs:
3712 for name in allobs:
3698 timer(getobs(name), title=name)
3713 timer(getobs(name), title=name)
3699
3714
3700 def getfiltered(name):
3715 def getfiltered(name):
3701 def d():
3716 def d():
3702 repo.invalidatevolatilesets()
3717 repo.invalidatevolatilesets()
3703 if opts[b'clear_obsstore']:
3718 if opts[b'clear_obsstore']:
3704 clearfilecache(repo, b'obsstore')
3719 clearfilecache(repo, b'obsstore')
3705 repoview.filterrevs(repo, name)
3720 repoview.filterrevs(repo, name)
3706
3721
3707 return d
3722 return d
3708
3723
3709 allfilter = sorted(repoview.filtertable)
3724 allfilter = sorted(repoview.filtertable)
3710 if names:
3725 if names:
3711 allfilter = [n for n in allfilter if n in names]
3726 allfilter = [n for n in allfilter if n in names]
3712
3727
3713 for name in allfilter:
3728 for name in allfilter:
3714 timer(getfiltered(name), title=name)
3729 timer(getfiltered(name), title=name)
3715 fm.end()
3730 fm.end()
3716
3731
3717
3732
3718 @command(
3733 @command(
3719 b'perf::branchmap|perfbranchmap',
3734 b'perf::branchmap|perfbranchmap',
3720 [
3735 [
3721 (b'f', b'full', False, b'Includes build time of subset'),
3736 (b'f', b'full', False, b'Includes build time of subset'),
3722 (
3737 (
3723 b'',
3738 b'',
3724 b'clear-revbranch',
3739 b'clear-revbranch',
3725 False,
3740 False,
3726 b'purge the revbranch cache between computation',
3741 b'purge the revbranch cache between computation',
3727 ),
3742 ),
3728 ]
3743 ]
3729 + formatteropts,
3744 + formatteropts,
3730 )
3745 )
3731 def perfbranchmap(ui, repo, *filternames, **opts):
3746 def perfbranchmap(ui, repo, *filternames, **opts):
3732 """benchmark the update of a branchmap
3747 """benchmark the update of a branchmap
3733
3748
3734 This benchmarks the full repo.branchmap() call with read and write disabled
3749 This benchmarks the full repo.branchmap() call with read and write disabled
3735 """
3750 """
3736 opts = _byteskwargs(opts)
3751 opts = _byteskwargs(opts)
3737 full = opts.get(b"full", False)
3752 full = opts.get(b"full", False)
3738 clear_revbranch = opts.get(b"clear_revbranch", False)
3753 clear_revbranch = opts.get(b"clear_revbranch", False)
3739 timer, fm = gettimer(ui, opts)
3754 timer, fm = gettimer(ui, opts)
3740
3755
3741 def getbranchmap(filtername):
3756 def getbranchmap(filtername):
3742 """generate a benchmark function for the filtername"""
3757 """generate a benchmark function for the filtername"""
3743 if filtername is None:
3758 if filtername is None:
3744 view = repo
3759 view = repo
3745 else:
3760 else:
3746 view = repo.filtered(filtername)
3761 view = repo.filtered(filtername)
3747 if util.safehasattr(view._branchcaches, '_per_filter'):
3762 if util.safehasattr(view._branchcaches, '_per_filter'):
3748 filtered = view._branchcaches._per_filter
3763 filtered = view._branchcaches._per_filter
3749 else:
3764 else:
3750 # older versions
3765 # older versions
3751 filtered = view._branchcaches
3766 filtered = view._branchcaches
3752
3767
3753 def d():
3768 def d():
3754 if clear_revbranch:
3769 if clear_revbranch:
3755 repo.revbranchcache()._clear()
3770 repo.revbranchcache()._clear()
3756 if full:
3771 if full:
3757 view._branchcaches.clear()
3772 view._branchcaches.clear()
3758 else:
3773 else:
3759 filtered.pop(filtername, None)
3774 filtered.pop(filtername, None)
3760 view.branchmap()
3775 view.branchmap()
3761
3776
3762 return d
3777 return d
3763
3778
3764 # add filter in smaller subset to bigger subset
3779 # add filter in smaller subset to bigger subset
3765 possiblefilters = set(repoview.filtertable)
3780 possiblefilters = set(repoview.filtertable)
3766 if filternames:
3781 if filternames:
3767 possiblefilters &= set(filternames)
3782 possiblefilters &= set(filternames)
3768 subsettable = getbranchmapsubsettable()
3783 subsettable = getbranchmapsubsettable()
3769 allfilters = []
3784 allfilters = []
3770 while possiblefilters:
3785 while possiblefilters:
3771 for name in possiblefilters:
3786 for name in possiblefilters:
3772 subset = subsettable.get(name)
3787 subset = subsettable.get(name)
3773 if subset not in possiblefilters:
3788 if subset not in possiblefilters:
3774 break
3789 break
3775 else:
3790 else:
3776 assert False, b'subset cycle %s!' % possiblefilters
3791 assert False, b'subset cycle %s!' % possiblefilters
3777 allfilters.append(name)
3792 allfilters.append(name)
3778 possiblefilters.remove(name)
3793 possiblefilters.remove(name)
3779
3794
3780 # warm the cache
3795 # warm the cache
3781 if not full:
3796 if not full:
3782 for name in allfilters:
3797 for name in allfilters:
3783 repo.filtered(name).branchmap()
3798 repo.filtered(name).branchmap()
3784 if not filternames or b'unfiltered' in filternames:
3799 if not filternames or b'unfiltered' in filternames:
3785 # add unfiltered
3800 # add unfiltered
3786 allfilters.append(None)
3801 allfilters.append(None)
3787
3802
3788 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3803 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3789 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3804 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3790 branchcacheread.set(classmethod(lambda *args: None))
3805 branchcacheread.set(classmethod(lambda *args: None))
3791 else:
3806 else:
3792 # older versions
3807 # older versions
3793 branchcacheread = safeattrsetter(branchmap, b'read')
3808 branchcacheread = safeattrsetter(branchmap, b'read')
3794 branchcacheread.set(lambda *args: None)
3809 branchcacheread.set(lambda *args: None)
3795 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3810 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3796 branchcachewrite.set(lambda *args: None)
3811 branchcachewrite.set(lambda *args: None)
3797 try:
3812 try:
3798 for name in allfilters:
3813 for name in allfilters:
3799 printname = name
3814 printname = name
3800 if name is None:
3815 if name is None:
3801 printname = b'unfiltered'
3816 printname = b'unfiltered'
3802 timer(getbranchmap(name), title=printname)
3817 timer(getbranchmap(name), title=printname)
3803 finally:
3818 finally:
3804 branchcacheread.restore()
3819 branchcacheread.restore()
3805 branchcachewrite.restore()
3820 branchcachewrite.restore()
3806 fm.end()
3821 fm.end()
3807
3822
3808
3823
3809 @command(
3824 @command(
3810 b'perf::branchmapupdate|perfbranchmapupdate',
3825 b'perf::branchmapupdate|perfbranchmapupdate',
3811 [
3826 [
3812 (b'', b'base', [], b'subset of revision to start from'),
3827 (b'', b'base', [], b'subset of revision to start from'),
3813 (b'', b'target', [], b'subset of revision to end with'),
3828 (b'', b'target', [], b'subset of revision to end with'),
3814 (b'', b'clear-caches', False, b'clear cache between each runs'),
3829 (b'', b'clear-caches', False, b'clear cache between each runs'),
3815 ]
3830 ]
3816 + formatteropts,
3831 + formatteropts,
3817 )
3832 )
3818 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3833 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3819 """benchmark branchmap update from for <base> revs to <target> revs
3834 """benchmark branchmap update from for <base> revs to <target> revs
3820
3835
3821 If `--clear-caches` is passed, the following items will be reset before
3836 If `--clear-caches` is passed, the following items will be reset before
3822 each update:
3837 each update:
3823 * the changelog instance and associated indexes
3838 * the changelog instance and associated indexes
3824 * the rev-branch-cache instance
3839 * the rev-branch-cache instance
3825
3840
3826 Examples:
3841 Examples:
3827
3842
3828 # update for the one last revision
3843 # update for the one last revision
3829 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3844 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3830
3845
3831 $ update for change coming with a new branch
3846 $ update for change coming with a new branch
3832 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3847 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3833 """
3848 """
3834 from mercurial import branchmap
3849 from mercurial import branchmap
3835 from mercurial import repoview
3850 from mercurial import repoview
3836
3851
3837 opts = _byteskwargs(opts)
3852 opts = _byteskwargs(opts)
3838 timer, fm = gettimer(ui, opts)
3853 timer, fm = gettimer(ui, opts)
3839 clearcaches = opts[b'clear_caches']
3854 clearcaches = opts[b'clear_caches']
3840 unfi = repo.unfiltered()
3855 unfi = repo.unfiltered()
3841 x = [None] # used to pass data between closure
3856 x = [None] # used to pass data between closure
3842
3857
3843 # we use a `list` here to avoid possible side effect from smartset
3858 # we use a `list` here to avoid possible side effect from smartset
3844 baserevs = list(scmutil.revrange(repo, base))
3859 baserevs = list(scmutil.revrange(repo, base))
3845 targetrevs = list(scmutil.revrange(repo, target))
3860 targetrevs = list(scmutil.revrange(repo, target))
3846 if not baserevs:
3861 if not baserevs:
3847 raise error.Abort(b'no revisions selected for --base')
3862 raise error.Abort(b'no revisions selected for --base')
3848 if not targetrevs:
3863 if not targetrevs:
3849 raise error.Abort(b'no revisions selected for --target')
3864 raise error.Abort(b'no revisions selected for --target')
3850
3865
3851 # make sure the target branchmap also contains the one in the base
3866 # make sure the target branchmap also contains the one in the base
3852 targetrevs = list(set(baserevs) | set(targetrevs))
3867 targetrevs = list(set(baserevs) | set(targetrevs))
3853 targetrevs.sort()
3868 targetrevs.sort()
3854
3869
3855 cl = repo.changelog
3870 cl = repo.changelog
3856 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3871 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3857 allbaserevs.sort()
3872 allbaserevs.sort()
3858 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3873 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3859
3874
3860 newrevs = list(alltargetrevs.difference(allbaserevs))
3875 newrevs = list(alltargetrevs.difference(allbaserevs))
3861 newrevs.sort()
3876 newrevs.sort()
3862
3877
3863 allrevs = frozenset(unfi.changelog.revs())
3878 allrevs = frozenset(unfi.changelog.revs())
3864 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3879 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3865 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3880 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3866
3881
3867 def basefilter(repo, visibilityexceptions=None):
3882 def basefilter(repo, visibilityexceptions=None):
3868 return basefilterrevs
3883 return basefilterrevs
3869
3884
3870 def targetfilter(repo, visibilityexceptions=None):
3885 def targetfilter(repo, visibilityexceptions=None):
3871 return targetfilterrevs
3886 return targetfilterrevs
3872
3887
3873 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3888 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3874 ui.status(msg % (len(allbaserevs), len(newrevs)))
3889 ui.status(msg % (len(allbaserevs), len(newrevs)))
3875 if targetfilterrevs:
3890 if targetfilterrevs:
3876 msg = b'(%d revisions still filtered)\n'
3891 msg = b'(%d revisions still filtered)\n'
3877 ui.status(msg % len(targetfilterrevs))
3892 ui.status(msg % len(targetfilterrevs))
3878
3893
3879 try:
3894 try:
3880 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3895 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3881 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3896 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3882
3897
3883 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3898 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3884 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3899 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3885
3900
3886 # try to find an existing branchmap to reuse
3901 # try to find an existing branchmap to reuse
3887 subsettable = getbranchmapsubsettable()
3902 subsettable = getbranchmapsubsettable()
3888 candidatefilter = subsettable.get(None)
3903 candidatefilter = subsettable.get(None)
3889 while candidatefilter is not None:
3904 while candidatefilter is not None:
3890 candidatebm = repo.filtered(candidatefilter).branchmap()
3905 candidatebm = repo.filtered(candidatefilter).branchmap()
3891 if candidatebm.validfor(baserepo):
3906 if candidatebm.validfor(baserepo):
3892 filtered = repoview.filterrevs(repo, candidatefilter)
3907 filtered = repoview.filterrevs(repo, candidatefilter)
3893 missing = [r for r in allbaserevs if r in filtered]
3908 missing = [r for r in allbaserevs if r in filtered]
3894 base = candidatebm.copy()
3909 base = candidatebm.copy()
3895 base.update(baserepo, missing)
3910 base.update(baserepo, missing)
3896 break
3911 break
3897 candidatefilter = subsettable.get(candidatefilter)
3912 candidatefilter = subsettable.get(candidatefilter)
3898 else:
3913 else:
3899 # no suitable subset where found
3914 # no suitable subset where found
3900 base = branchmap.branchcache()
3915 base = branchmap.branchcache()
3901 base.update(baserepo, allbaserevs)
3916 base.update(baserepo, allbaserevs)
3902
3917
3903 def setup():
3918 def setup():
3904 x[0] = base.copy()
3919 x[0] = base.copy()
3905 if clearcaches:
3920 if clearcaches:
3906 unfi._revbranchcache = None
3921 unfi._revbranchcache = None
3907 clearchangelog(repo)
3922 clearchangelog(repo)
3908
3923
3909 def bench():
3924 def bench():
3910 x[0].update(targetrepo, newrevs)
3925 x[0].update(targetrepo, newrevs)
3911
3926
3912 timer(bench, setup=setup)
3927 timer(bench, setup=setup)
3913 fm.end()
3928 fm.end()
3914 finally:
3929 finally:
3915 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3930 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3916 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3931 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3917
3932
3918
3933
3919 @command(
3934 @command(
3920 b'perf::branchmapload|perfbranchmapload',
3935 b'perf::branchmapload|perfbranchmapload',
3921 [
3936 [
3922 (b'f', b'filter', b'', b'Specify repoview filter'),
3937 (b'f', b'filter', b'', b'Specify repoview filter'),
3923 (b'', b'list', False, b'List brachmap filter caches'),
3938 (b'', b'list', False, b'List brachmap filter caches'),
3924 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3939 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3925 ]
3940 ]
3926 + formatteropts,
3941 + formatteropts,
3927 )
3942 )
3928 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3943 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3929 """benchmark reading the branchmap"""
3944 """benchmark reading the branchmap"""
3930 opts = _byteskwargs(opts)
3945 opts = _byteskwargs(opts)
3931 clearrevlogs = opts[b'clear_revlogs']
3946 clearrevlogs = opts[b'clear_revlogs']
3932
3947
3933 if list:
3948 if list:
3934 for name, kind, st in repo.cachevfs.readdir(stat=True):
3949 for name, kind, st in repo.cachevfs.readdir(stat=True):
3935 if name.startswith(b'branch2'):
3950 if name.startswith(b'branch2'):
3936 filtername = name.partition(b'-')[2] or b'unfiltered'
3951 filtername = name.partition(b'-')[2] or b'unfiltered'
3937 ui.status(
3952 ui.status(
3938 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3953 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3939 )
3954 )
3940 return
3955 return
3941 if not filter:
3956 if not filter:
3942 filter = None
3957 filter = None
3943 subsettable = getbranchmapsubsettable()
3958 subsettable = getbranchmapsubsettable()
3944 if filter is None:
3959 if filter is None:
3945 repo = repo.unfiltered()
3960 repo = repo.unfiltered()
3946 else:
3961 else:
3947 repo = repoview.repoview(repo, filter)
3962 repo = repoview.repoview(repo, filter)
3948
3963
3949 repo.branchmap() # make sure we have a relevant, up to date branchmap
3964 repo.branchmap() # make sure we have a relevant, up to date branchmap
3950
3965
3951 try:
3966 try:
3952 fromfile = branchmap.branchcache.fromfile
3967 fromfile = branchmap.branchcache.fromfile
3953 except AttributeError:
3968 except AttributeError:
3954 # older versions
3969 # older versions
3955 fromfile = branchmap.read
3970 fromfile = branchmap.read
3956
3971
3957 currentfilter = filter
3972 currentfilter = filter
3958 # try once without timer, the filter may not be cached
3973 # try once without timer, the filter may not be cached
3959 while fromfile(repo) is None:
3974 while fromfile(repo) is None:
3960 currentfilter = subsettable.get(currentfilter)
3975 currentfilter = subsettable.get(currentfilter)
3961 if currentfilter is None:
3976 if currentfilter is None:
3962 raise error.Abort(
3977 raise error.Abort(
3963 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3978 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3964 )
3979 )
3965 repo = repo.filtered(currentfilter)
3980 repo = repo.filtered(currentfilter)
3966 timer, fm = gettimer(ui, opts)
3981 timer, fm = gettimer(ui, opts)
3967
3982
3968 def setup():
3983 def setup():
3969 if clearrevlogs:
3984 if clearrevlogs:
3970 clearchangelog(repo)
3985 clearchangelog(repo)
3971
3986
3972 def bench():
3987 def bench():
3973 fromfile(repo)
3988 fromfile(repo)
3974
3989
3975 timer(bench, setup=setup)
3990 timer(bench, setup=setup)
3976 fm.end()
3991 fm.end()
3977
3992
3978
3993
3979 @command(b'perf::loadmarkers|perfloadmarkers')
3994 @command(b'perf::loadmarkers|perfloadmarkers')
3980 def perfloadmarkers(ui, repo):
3995 def perfloadmarkers(ui, repo):
3981 """benchmark the time to parse the on-disk markers for a repo
3996 """benchmark the time to parse the on-disk markers for a repo
3982
3997
3983 Result is the number of markers in the repo."""
3998 Result is the number of markers in the repo."""
3984 timer, fm = gettimer(ui)
3999 timer, fm = gettimer(ui)
3985 svfs = getsvfs(repo)
4000 svfs = getsvfs(repo)
3986 timer(lambda: len(obsolete.obsstore(repo, svfs)))
4001 timer(lambda: len(obsolete.obsstore(repo, svfs)))
3987 fm.end()
4002 fm.end()
3988
4003
3989
4004
3990 @command(
4005 @command(
3991 b'perf::lrucachedict|perflrucachedict',
4006 b'perf::lrucachedict|perflrucachedict',
3992 formatteropts
4007 formatteropts
3993 + [
4008 + [
3994 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4009 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3995 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4010 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3996 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4011 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3997 (b'', b'size', 4, b'size of cache'),
4012 (b'', b'size', 4, b'size of cache'),
3998 (b'', b'gets', 10000, b'number of key lookups'),
4013 (b'', b'gets', 10000, b'number of key lookups'),
3999 (b'', b'sets', 10000, b'number of key sets'),
4014 (b'', b'sets', 10000, b'number of key sets'),
4000 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4015 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4001 (
4016 (
4002 b'',
4017 b'',
4003 b'mixedgetfreq',
4018 b'mixedgetfreq',
4004 50,
4019 50,
4005 b'frequency of get vs set ops in mixed mode',
4020 b'frequency of get vs set ops in mixed mode',
4006 ),
4021 ),
4007 ],
4022 ],
4008 norepo=True,
4023 norepo=True,
4009 )
4024 )
4010 def perflrucache(
4025 def perflrucache(
4011 ui,
4026 ui,
4012 mincost=0,
4027 mincost=0,
4013 maxcost=100,
4028 maxcost=100,
4014 costlimit=0,
4029 costlimit=0,
4015 size=4,
4030 size=4,
4016 gets=10000,
4031 gets=10000,
4017 sets=10000,
4032 sets=10000,
4018 mixed=10000,
4033 mixed=10000,
4019 mixedgetfreq=50,
4034 mixedgetfreq=50,
4020 **opts
4035 **opts
4021 ):
4036 ):
4022 opts = _byteskwargs(opts)
4037 opts = _byteskwargs(opts)
4023
4038
4024 def doinit():
4039 def doinit():
4025 for i in _xrange(10000):
4040 for i in _xrange(10000):
4026 util.lrucachedict(size)
4041 util.lrucachedict(size)
4027
4042
4028 costrange = list(range(mincost, maxcost + 1))
4043 costrange = list(range(mincost, maxcost + 1))
4029
4044
4030 values = []
4045 values = []
4031 for i in _xrange(size):
4046 for i in _xrange(size):
4032 values.append(random.randint(0, _maxint))
4047 values.append(random.randint(0, _maxint))
4033
4048
4034 # Get mode fills the cache and tests raw lookup performance with no
4049 # Get mode fills the cache and tests raw lookup performance with no
4035 # eviction.
4050 # eviction.
4036 getseq = []
4051 getseq = []
4037 for i in _xrange(gets):
4052 for i in _xrange(gets):
4038 getseq.append(random.choice(values))
4053 getseq.append(random.choice(values))
4039
4054
4040 def dogets():
4055 def dogets():
4041 d = util.lrucachedict(size)
4056 d = util.lrucachedict(size)
4042 for v in values:
4057 for v in values:
4043 d[v] = v
4058 d[v] = v
4044 for key in getseq:
4059 for key in getseq:
4045 value = d[key]
4060 value = d[key]
4046 value # silence pyflakes warning
4061 value # silence pyflakes warning
4047
4062
4048 def dogetscost():
4063 def dogetscost():
4049 d = util.lrucachedict(size, maxcost=costlimit)
4064 d = util.lrucachedict(size, maxcost=costlimit)
4050 for i, v in enumerate(values):
4065 for i, v in enumerate(values):
4051 d.insert(v, v, cost=costs[i])
4066 d.insert(v, v, cost=costs[i])
4052 for key in getseq:
4067 for key in getseq:
4053 try:
4068 try:
4054 value = d[key]
4069 value = d[key]
4055 value # silence pyflakes warning
4070 value # silence pyflakes warning
4056 except KeyError:
4071 except KeyError:
4057 pass
4072 pass
4058
4073
4059 # Set mode tests insertion speed with cache eviction.
4074 # Set mode tests insertion speed with cache eviction.
4060 setseq = []
4075 setseq = []
4061 costs = []
4076 costs = []
4062 for i in _xrange(sets):
4077 for i in _xrange(sets):
4063 setseq.append(random.randint(0, _maxint))
4078 setseq.append(random.randint(0, _maxint))
4064 costs.append(random.choice(costrange))
4079 costs.append(random.choice(costrange))
4065
4080
4066 def doinserts():
4081 def doinserts():
4067 d = util.lrucachedict(size)
4082 d = util.lrucachedict(size)
4068 for v in setseq:
4083 for v in setseq:
4069 d.insert(v, v)
4084 d.insert(v, v)
4070
4085
4071 def doinsertscost():
4086 def doinsertscost():
4072 d = util.lrucachedict(size, maxcost=costlimit)
4087 d = util.lrucachedict(size, maxcost=costlimit)
4073 for i, v in enumerate(setseq):
4088 for i, v in enumerate(setseq):
4074 d.insert(v, v, cost=costs[i])
4089 d.insert(v, v, cost=costs[i])
4075
4090
4076 def dosets():
4091 def dosets():
4077 d = util.lrucachedict(size)
4092 d = util.lrucachedict(size)
4078 for v in setseq:
4093 for v in setseq:
4079 d[v] = v
4094 d[v] = v
4080
4095
4081 # Mixed mode randomly performs gets and sets with eviction.
4096 # Mixed mode randomly performs gets and sets with eviction.
4082 mixedops = []
4097 mixedops = []
4083 for i in _xrange(mixed):
4098 for i in _xrange(mixed):
4084 r = random.randint(0, 100)
4099 r = random.randint(0, 100)
4085 if r < mixedgetfreq:
4100 if r < mixedgetfreq:
4086 op = 0
4101 op = 0
4087 else:
4102 else:
4088 op = 1
4103 op = 1
4089
4104
4090 mixedops.append(
4105 mixedops.append(
4091 (op, random.randint(0, size * 2), random.choice(costrange))
4106 (op, random.randint(0, size * 2), random.choice(costrange))
4092 )
4107 )
4093
4108
4094 def domixed():
4109 def domixed():
4095 d = util.lrucachedict(size)
4110 d = util.lrucachedict(size)
4096
4111
4097 for op, v, cost in mixedops:
4112 for op, v, cost in mixedops:
4098 if op == 0:
4113 if op == 0:
4099 try:
4114 try:
4100 d[v]
4115 d[v]
4101 except KeyError:
4116 except KeyError:
4102 pass
4117 pass
4103 else:
4118 else:
4104 d[v] = v
4119 d[v] = v
4105
4120
4106 def domixedcost():
4121 def domixedcost():
4107 d = util.lrucachedict(size, maxcost=costlimit)
4122 d = util.lrucachedict(size, maxcost=costlimit)
4108
4123
4109 for op, v, cost in mixedops:
4124 for op, v, cost in mixedops:
4110 if op == 0:
4125 if op == 0:
4111 try:
4126 try:
4112 d[v]
4127 d[v]
4113 except KeyError:
4128 except KeyError:
4114 pass
4129 pass
4115 else:
4130 else:
4116 d.insert(v, v, cost=cost)
4131 d.insert(v, v, cost=cost)
4117
4132
4118 benches = [
4133 benches = [
4119 (doinit, b'init'),
4134 (doinit, b'init'),
4120 ]
4135 ]
4121
4136
4122 if costlimit:
4137 if costlimit:
4123 benches.extend(
4138 benches.extend(
4124 [
4139 [
4125 (dogetscost, b'gets w/ cost limit'),
4140 (dogetscost, b'gets w/ cost limit'),
4126 (doinsertscost, b'inserts w/ cost limit'),
4141 (doinsertscost, b'inserts w/ cost limit'),
4127 (domixedcost, b'mixed w/ cost limit'),
4142 (domixedcost, b'mixed w/ cost limit'),
4128 ]
4143 ]
4129 )
4144 )
4130 else:
4145 else:
4131 benches.extend(
4146 benches.extend(
4132 [
4147 [
4133 (dogets, b'gets'),
4148 (dogets, b'gets'),
4134 (doinserts, b'inserts'),
4149 (doinserts, b'inserts'),
4135 (dosets, b'sets'),
4150 (dosets, b'sets'),
4136 (domixed, b'mixed'),
4151 (domixed, b'mixed'),
4137 ]
4152 ]
4138 )
4153 )
4139
4154
4140 for fn, title in benches:
4155 for fn, title in benches:
4141 timer, fm = gettimer(ui, opts)
4156 timer, fm = gettimer(ui, opts)
4142 timer(fn, title=title)
4157 timer(fn, title=title)
4143 fm.end()
4158 fm.end()
4144
4159
4145
4160
4146 @command(
4161 @command(
4147 b'perf::write|perfwrite',
4162 b'perf::write|perfwrite',
4148 formatteropts
4163 formatteropts
4149 + [
4164 + [
4150 (b'', b'write-method', b'write', b'ui write method'),
4165 (b'', b'write-method', b'write', b'ui write method'),
4151 (b'', b'nlines', 100, b'number of lines'),
4166 (b'', b'nlines', 100, b'number of lines'),
4152 (b'', b'nitems', 100, b'number of items (per line)'),
4167 (b'', b'nitems', 100, b'number of items (per line)'),
4153 (b'', b'item', b'x', b'item that is written'),
4168 (b'', b'item', b'x', b'item that is written'),
4154 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4169 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4155 (b'', b'flush-line', None, b'flush after each line'),
4170 (b'', b'flush-line', None, b'flush after each line'),
4156 ],
4171 ],
4157 )
4172 )
4158 def perfwrite(ui, repo, **opts):
4173 def perfwrite(ui, repo, **opts):
4159 """microbenchmark ui.write (and others)"""
4174 """microbenchmark ui.write (and others)"""
4160 opts = _byteskwargs(opts)
4175 opts = _byteskwargs(opts)
4161
4176
4162 write = getattr(ui, _sysstr(opts[b'write_method']))
4177 write = getattr(ui, _sysstr(opts[b'write_method']))
4163 nlines = int(opts[b'nlines'])
4178 nlines = int(opts[b'nlines'])
4164 nitems = int(opts[b'nitems'])
4179 nitems = int(opts[b'nitems'])
4165 item = opts[b'item']
4180 item = opts[b'item']
4166 batch_line = opts.get(b'batch_line')
4181 batch_line = opts.get(b'batch_line')
4167 flush_line = opts.get(b'flush_line')
4182 flush_line = opts.get(b'flush_line')
4168
4183
4169 if batch_line:
4184 if batch_line:
4170 line = item * nitems + b'\n'
4185 line = item * nitems + b'\n'
4171
4186
4172 def benchmark():
4187 def benchmark():
4173 for i in pycompat.xrange(nlines):
4188 for i in pycompat.xrange(nlines):
4174 if batch_line:
4189 if batch_line:
4175 write(line)
4190 write(line)
4176 else:
4191 else:
4177 for i in pycompat.xrange(nitems):
4192 for i in pycompat.xrange(nitems):
4178 write(item)
4193 write(item)
4179 write(b'\n')
4194 write(b'\n')
4180 if flush_line:
4195 if flush_line:
4181 ui.flush()
4196 ui.flush()
4182 ui.flush()
4197 ui.flush()
4183
4198
4184 timer, fm = gettimer(ui, opts)
4199 timer, fm = gettimer(ui, opts)
4185 timer(benchmark)
4200 timer(benchmark)
4186 fm.end()
4201 fm.end()
4187
4202
4188
4203
4189 def uisetup(ui):
4204 def uisetup(ui):
4190 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4205 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4191 commands, b'debugrevlogopts'
4206 commands, b'debugrevlogopts'
4192 ):
4207 ):
4193 # for "historical portability":
4208 # for "historical portability":
4194 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4209 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4195 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4210 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4196 # openrevlog() should cause failure, because it has been
4211 # openrevlog() should cause failure, because it has been
4197 # available since 3.5 (or 49c583ca48c4).
4212 # available since 3.5 (or 49c583ca48c4).
4198 def openrevlog(orig, repo, cmd, file_, opts):
4213 def openrevlog(orig, repo, cmd, file_, opts):
4199 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4214 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4200 raise error.Abort(
4215 raise error.Abort(
4201 b"This version doesn't support --dir option",
4216 b"This version doesn't support --dir option",
4202 hint=b"use 3.5 or later",
4217 hint=b"use 3.5 or later",
4203 )
4218 )
4204 return orig(repo, cmd, file_, opts)
4219 return orig(repo, cmd, file_, opts)
4205
4220
4206 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4221 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4207
4222
4208
4223
4209 @command(
4224 @command(
4210 b'perf::progress|perfprogress',
4225 b'perf::progress|perfprogress',
4211 formatteropts
4226 formatteropts
4212 + [
4227 + [
4213 (b'', b'topic', b'topic', b'topic for progress messages'),
4228 (b'', b'topic', b'topic', b'topic for progress messages'),
4214 (b'c', b'total', 1000000, b'total value we are progressing to'),
4229 (b'c', b'total', 1000000, b'total value we are progressing to'),
4215 ],
4230 ],
4216 norepo=True,
4231 norepo=True,
4217 )
4232 )
4218 def perfprogress(ui, topic=None, total=None, **opts):
4233 def perfprogress(ui, topic=None, total=None, **opts):
4219 """printing of progress bars"""
4234 """printing of progress bars"""
4220 opts = _byteskwargs(opts)
4235 opts = _byteskwargs(opts)
4221
4236
4222 timer, fm = gettimer(ui, opts)
4237 timer, fm = gettimer(ui, opts)
4223
4238
4224 def doprogress():
4239 def doprogress():
4225 with ui.makeprogress(topic, total=total) as progress:
4240 with ui.makeprogress(topic, total=total) as progress:
4226 for i in _xrange(total):
4241 for i in _xrange(total):
4227 progress.increment()
4242 progress.increment()
4228
4243
4229 timer(doprogress)
4244 timer(doprogress)
4230 fm.end()
4245 fm.end()
@@ -1,667 +1,657
1 # testparseutil.py - utilities to parse test script for check tools
1 # testparseutil.py - utilities to parse test script for check tools
2 #
2 #
3 # Copyright 2018 FUJIWARA Katsunori <foozy@lares.dti.ne.jp> and others
3 # Copyright 2018 FUJIWARA Katsunori <foozy@lares.dti.ne.jp> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import abc
9 import abc
10 import builtins
10 import re
11 import re
11 import sys
12
12
13 ####################
13 ####################
14 # for Python3 compatibility (almost comes from mercurial/pycompat.py)
14 # for Python3 compatibility (almost comes from mercurial/pycompat.py)
15
15
16 ispy3 = sys.version_info[0] >= 3
17
18
16
19 def identity(a):
17 def identity(a):
20 return a
18 return a
21
19
22
20
23 def _rapply(f, xs):
21 def _rapply(f, xs):
24 if xs is None:
22 if xs is None:
25 # assume None means non-value of optional data
23 # assume None means non-value of optional data
26 return xs
24 return xs
27 if isinstance(xs, (list, set, tuple)):
25 if isinstance(xs, (list, set, tuple)):
28 return type(xs)(_rapply(f, x) for x in xs)
26 return type(xs)(_rapply(f, x) for x in xs)
29 if isinstance(xs, dict):
27 if isinstance(xs, dict):
30 return type(xs)((_rapply(f, k), _rapply(f, v)) for k, v in xs.items())
28 return type(xs)((_rapply(f, k), _rapply(f, v)) for k, v in xs.items())
31 return f(xs)
29 return f(xs)
32
30
33
31
34 def rapply(f, xs):
32 def rapply(f, xs):
35 if f is identity:
33 if f is identity:
36 # fast path mainly for py2
34 # fast path mainly for py2
37 return xs
35 return xs
38 return _rapply(f, xs)
36 return _rapply(f, xs)
39
37
40
38
41 if ispy3:
39 def bytestr(s):
42 import builtins
40 # tiny version of pycompat.bytestr
43
41 return s.encode('latin1')
44 def bytestr(s):
45 # tiny version of pycompat.bytestr
46 return s.encode('latin1')
47
48 def sysstr(s):
49 if isinstance(s, builtins.str):
50 return s
51 return s.decode('latin-1')
52
53 def opentext(f):
54 return open(f, 'r')
55
42
56
43
57 else:
44 def sysstr(s):
58 bytestr = str
45 if isinstance(s, builtins.str):
59 sysstr = identity
46 return s
47 return s.decode('latin-1')
60
48
61 opentext = open
49
50 def opentext(f):
51 return open(f, 'r')
62
52
63
53
64 def b2s(x):
54 def b2s(x):
65 # convert BYTES elements in "x" to SYSSTR recursively
55 # convert BYTES elements in "x" to SYSSTR recursively
66 return rapply(sysstr, x)
56 return rapply(sysstr, x)
67
57
68
58
69 def writeout(data):
59 def writeout(data):
70 # write "data" in BYTES into stdout
60 # write "data" in BYTES into stdout
71 sys.stdout.write(data)
61 sys.stdout.write(data)
72
62
73
63
74 def writeerr(data):
64 def writeerr(data):
75 # write "data" in BYTES into stderr
65 # write "data" in BYTES into stderr
76 sys.stderr.write(data)
66 sys.stderr.write(data)
77
67
78
68
79 ####################
69 ####################
80
70
81
71
82 class embeddedmatcher: # pytype: disable=ignored-metaclass
72 class embeddedmatcher: # pytype: disable=ignored-metaclass
83 """Base class to detect embedded code fragments in *.t test script"""
73 """Base class to detect embedded code fragments in *.t test script"""
84
74
85 __metaclass__ = abc.ABCMeta
75 __metaclass__ = abc.ABCMeta
86
76
87 def __init__(self, desc):
77 def __init__(self, desc):
88 self.desc = desc
78 self.desc = desc
89
79
90 @abc.abstractmethod
80 @abc.abstractmethod
91 def startsat(self, line):
81 def startsat(self, line):
92 """Examine whether embedded code starts at line
82 """Examine whether embedded code starts at line
93
83
94 This can return arbitrary object, and it is used as 'ctx' for
84 This can return arbitrary object, and it is used as 'ctx' for
95 subsequent method invocations.
85 subsequent method invocations.
96 """
86 """
97
87
98 @abc.abstractmethod
88 @abc.abstractmethod
99 def endsat(self, ctx, line):
89 def endsat(self, ctx, line):
100 """Examine whether embedded code ends at line"""
90 """Examine whether embedded code ends at line"""
101
91
102 @abc.abstractmethod
92 @abc.abstractmethod
103 def isinside(self, ctx, line):
93 def isinside(self, ctx, line):
104 """Examine whether line is inside embedded code, if not yet endsat"""
94 """Examine whether line is inside embedded code, if not yet endsat"""
105
95
106 @abc.abstractmethod
96 @abc.abstractmethod
107 def ignores(self, ctx):
97 def ignores(self, ctx):
108 """Examine whether detected embedded code should be ignored"""
98 """Examine whether detected embedded code should be ignored"""
109
99
110 @abc.abstractmethod
100 @abc.abstractmethod
111 def filename(self, ctx):
101 def filename(self, ctx):
112 """Return filename of embedded code
102 """Return filename of embedded code
113
103
114 If filename isn't specified for embedded code explicitly, this
104 If filename isn't specified for embedded code explicitly, this
115 returns None.
105 returns None.
116 """
106 """
117
107
118 @abc.abstractmethod
108 @abc.abstractmethod
119 def codeatstart(self, ctx, line):
109 def codeatstart(self, ctx, line):
120 """Return actual code at the start line of embedded code
110 """Return actual code at the start line of embedded code
121
111
122 This might return None, if the start line doesn't contain
112 This might return None, if the start line doesn't contain
123 actual code.
113 actual code.
124 """
114 """
125
115
126 @abc.abstractmethod
116 @abc.abstractmethod
127 def codeatend(self, ctx, line):
117 def codeatend(self, ctx, line):
128 """Return actual code at the end line of embedded code
118 """Return actual code at the end line of embedded code
129
119
130 This might return None, if the end line doesn't contain actual
120 This might return None, if the end line doesn't contain actual
131 code.
121 code.
132 """
122 """
133
123
134 @abc.abstractmethod
124 @abc.abstractmethod
135 def codeinside(self, ctx, line):
125 def codeinside(self, ctx, line):
136 """Return actual code at line inside embedded code"""
126 """Return actual code at line inside embedded code"""
137
127
138
128
139 def embedded(basefile, lines, errors, matchers):
129 def embedded(basefile, lines, errors, matchers):
140 """pick embedded code fragments up from given lines
130 """pick embedded code fragments up from given lines
141
131
142 This is common parsing logic, which examines specified matchers on
132 This is common parsing logic, which examines specified matchers on
143 given lines.
133 given lines.
144
134
145 :basefile: a name of a file, from which lines to be parsed come.
135 :basefile: a name of a file, from which lines to be parsed come.
146 :lines: to be parsed (might be a value returned by "open(basefile)")
136 :lines: to be parsed (might be a value returned by "open(basefile)")
147 :errors: an array, into which messages for detected error are stored
137 :errors: an array, into which messages for detected error are stored
148 :matchers: an array of embeddedmatcher objects
138 :matchers: an array of embeddedmatcher objects
149
139
150 This function yields '(filename, starts, ends, code)' tuple.
140 This function yields '(filename, starts, ends, code)' tuple.
151
141
152 :filename: a name of embedded code, if it is explicitly specified
142 :filename: a name of embedded code, if it is explicitly specified
153 (e.g. "foobar" of "cat >> foobar <<EOF").
143 (e.g. "foobar" of "cat >> foobar <<EOF").
154 Otherwise, this is None
144 Otherwise, this is None
155 :starts: line number (1-origin), at which embedded code starts (inclusive)
145 :starts: line number (1-origin), at which embedded code starts (inclusive)
156 :ends: line number (1-origin), at which embedded code ends (exclusive)
146 :ends: line number (1-origin), at which embedded code ends (exclusive)
157 :code: extracted embedded code, which is single-stringified
147 :code: extracted embedded code, which is single-stringified
158
148
159 >>> class ambigmatcher:
149 >>> class ambigmatcher:
160 ... # mock matcher class to examine implementation of
150 ... # mock matcher class to examine implementation of
161 ... # "ambiguous matching" corner case
151 ... # "ambiguous matching" corner case
162 ... def __init__(self, desc, matchfunc):
152 ... def __init__(self, desc, matchfunc):
163 ... self.desc = desc
153 ... self.desc = desc
164 ... self.matchfunc = matchfunc
154 ... self.matchfunc = matchfunc
165 ... def startsat(self, line):
155 ... def startsat(self, line):
166 ... return self.matchfunc(line)
156 ... return self.matchfunc(line)
167 >>> ambig1 = ambigmatcher('ambiguous #1',
157 >>> ambig1 = ambigmatcher('ambiguous #1',
168 ... lambda l: l.startswith(' $ cat '))
158 ... lambda l: l.startswith(' $ cat '))
169 >>> ambig2 = ambigmatcher('ambiguous #2',
159 >>> ambig2 = ambigmatcher('ambiguous #2',
170 ... lambda l: l.endswith('<< EOF\\n'))
160 ... lambda l: l.endswith('<< EOF\\n'))
171 >>> lines = [' $ cat > foo.py << EOF\\n']
161 >>> lines = [' $ cat > foo.py << EOF\\n']
172 >>> errors = []
162 >>> errors = []
173 >>> matchers = [ambig1, ambig2]
163 >>> matchers = [ambig1, ambig2]
174 >>> list(t for t in embedded('<dummy>', lines, errors, matchers))
164 >>> list(t for t in embedded('<dummy>', lines, errors, matchers))
175 []
165 []
176 >>> b2s(errors)
166 >>> b2s(errors)
177 ['<dummy>:1: ambiguous line for "ambiguous #1", "ambiguous #2"']
167 ['<dummy>:1: ambiguous line for "ambiguous #1", "ambiguous #2"']
178
168
179 """
169 """
180 matcher = None
170 matcher = None
181 ctx = filename = code = startline = None # for pyflakes
171 ctx = filename = code = startline = None # for pyflakes
182
172
183 for lineno, line in enumerate(lines, 1):
173 for lineno, line in enumerate(lines, 1):
184 if not line.endswith('\n'):
174 if not line.endswith('\n'):
185 line += '\n' # to normalize EOF line
175 line += '\n' # to normalize EOF line
186 if matcher: # now, inside embedded code
176 if matcher: # now, inside embedded code
187 if matcher.endsat(ctx, line):
177 if matcher.endsat(ctx, line):
188 codeatend = matcher.codeatend(ctx, line)
178 codeatend = matcher.codeatend(ctx, line)
189 if codeatend is not None:
179 if codeatend is not None:
190 code.append(codeatend)
180 code.append(codeatend)
191 if not matcher.ignores(ctx):
181 if not matcher.ignores(ctx):
192 yield (filename, startline, lineno, ''.join(code))
182 yield (filename, startline, lineno, ''.join(code))
193 matcher = None
183 matcher = None
194 # DO NOT "continue", because line might start next fragment
184 # DO NOT "continue", because line might start next fragment
195 elif not matcher.isinside(ctx, line):
185 elif not matcher.isinside(ctx, line):
196 # this is an error of basefile
186 # this is an error of basefile
197 # (if matchers are implemented correctly)
187 # (if matchers are implemented correctly)
198 errors.append(
188 errors.append(
199 '%s:%d: unexpected line for "%s"'
189 '%s:%d: unexpected line for "%s"'
200 % (basefile, lineno, matcher.desc)
190 % (basefile, lineno, matcher.desc)
201 )
191 )
202 # stop extracting embedded code by current 'matcher',
192 # stop extracting embedded code by current 'matcher',
203 # because appearance of unexpected line might mean
193 # because appearance of unexpected line might mean
204 # that expected end-of-embedded-code line might never
194 # that expected end-of-embedded-code line might never
205 # appear
195 # appear
206 matcher = None
196 matcher = None
207 # DO NOT "continue", because line might start next fragment
197 # DO NOT "continue", because line might start next fragment
208 else:
198 else:
209 code.append(matcher.codeinside(ctx, line))
199 code.append(matcher.codeinside(ctx, line))
210 continue
200 continue
211
201
212 # examine whether current line starts embedded code or not
202 # examine whether current line starts embedded code or not
213 assert not matcher
203 assert not matcher
214
204
215 matched = []
205 matched = []
216 for m in matchers:
206 for m in matchers:
217 ctx = m.startsat(line)
207 ctx = m.startsat(line)
218 if ctx:
208 if ctx:
219 matched.append((m, ctx))
209 matched.append((m, ctx))
220 if matched:
210 if matched:
221 if len(matched) > 1:
211 if len(matched) > 1:
222 # this is an error of matchers, maybe
212 # this is an error of matchers, maybe
223 errors.append(
213 errors.append(
224 '%s:%d: ambiguous line for %s'
214 '%s:%d: ambiguous line for %s'
225 % (
215 % (
226 basefile,
216 basefile,
227 lineno,
217 lineno,
228 ', '.join(['"%s"' % m.desc for m, c in matched]),
218 ', '.join(['"%s"' % m.desc for m, c in matched]),
229 )
219 )
230 )
220 )
231 # omit extracting embedded code, because choosing
221 # omit extracting embedded code, because choosing
232 # arbitrary matcher from matched ones might fail to
222 # arbitrary matcher from matched ones might fail to
233 # detect the end of embedded code as expected.
223 # detect the end of embedded code as expected.
234 continue
224 continue
235 matcher, ctx = matched[0]
225 matcher, ctx = matched[0]
236 filename = matcher.filename(ctx)
226 filename = matcher.filename(ctx)
237 code = []
227 code = []
238 codeatstart = matcher.codeatstart(ctx, line)
228 codeatstart = matcher.codeatstart(ctx, line)
239 if codeatstart is not None:
229 if codeatstart is not None:
240 code.append(codeatstart)
230 code.append(codeatstart)
241 startline = lineno
231 startline = lineno
242 else:
232 else:
243 startline = lineno + 1
233 startline = lineno + 1
244
234
245 if matcher:
235 if matcher:
246 # examine whether EOF ends embedded code, because embedded
236 # examine whether EOF ends embedded code, because embedded
247 # code isn't yet ended explicitly
237 # code isn't yet ended explicitly
248 if matcher.endsat(ctx, '\n'):
238 if matcher.endsat(ctx, '\n'):
249 codeatend = matcher.codeatend(ctx, '\n')
239 codeatend = matcher.codeatend(ctx, '\n')
250 if codeatend is not None:
240 if codeatend is not None:
251 code.append(codeatend)
241 code.append(codeatend)
252 if not matcher.ignores(ctx):
242 if not matcher.ignores(ctx):
253 yield (filename, startline, lineno + 1, ''.join(code))
243 yield (filename, startline, lineno + 1, ''.join(code))
254 else:
244 else:
255 # this is an error of basefile
245 # this is an error of basefile
256 # (if matchers are implemented correctly)
246 # (if matchers are implemented correctly)
257 errors.append(
247 errors.append(
258 '%s:%d: unexpected end of file for "%s"'
248 '%s:%d: unexpected end of file for "%s"'
259 % (basefile, lineno, matcher.desc)
249 % (basefile, lineno, matcher.desc)
260 )
250 )
261
251
262
252
263 # heredoc limit mark to ignore embedded code at check-code.py or so
253 # heredoc limit mark to ignore embedded code at check-code.py or so
264 heredocignorelimit = 'NO_CHECK_EOF'
254 heredocignorelimit = 'NO_CHECK_EOF'
265
255
266 # the pattern to match against cases below, and to return a limit mark
256 # the pattern to match against cases below, and to return a limit mark
267 # string as 'lname' group
257 # string as 'lname' group
268 #
258 #
269 # - << LIMITMARK
259 # - << LIMITMARK
270 # - << "LIMITMARK"
260 # - << "LIMITMARK"
271 # - << 'LIMITMARK'
261 # - << 'LIMITMARK'
272 heredoclimitpat = r'\s*<<\s*(?P<lquote>["\']?)(?P<limit>\w+)(?P=lquote)'
262 heredoclimitpat = r'\s*<<\s*(?P<lquote>["\']?)(?P<limit>\w+)(?P=lquote)'
273
263
274
264
275 class fileheredocmatcher(embeddedmatcher):
265 class fileheredocmatcher(embeddedmatcher):
276 """Detect "cat > FILE << LIMIT" style embedded code
266 """Detect "cat > FILE << LIMIT" style embedded code
277
267
278 >>> matcher = fileheredocmatcher('heredoc .py file', r'[^<]+\\.py')
268 >>> matcher = fileheredocmatcher('heredoc .py file', r'[^<]+\\.py')
279 >>> b2s(matcher.startsat(' $ cat > file.py << EOF\\n'))
269 >>> b2s(matcher.startsat(' $ cat > file.py << EOF\\n'))
280 ('file.py', ' > EOF\\n')
270 ('file.py', ' > EOF\\n')
281 >>> b2s(matcher.startsat(' $ cat >>file.py <<EOF\\n'))
271 >>> b2s(matcher.startsat(' $ cat >>file.py <<EOF\\n'))
282 ('file.py', ' > EOF\\n')
272 ('file.py', ' > EOF\\n')
283 >>> b2s(matcher.startsat(' $ cat> \\x27any file.py\\x27<< "EOF"\\n'))
273 >>> b2s(matcher.startsat(' $ cat> \\x27any file.py\\x27<< "EOF"\\n'))
284 ('any file.py', ' > EOF\\n')
274 ('any file.py', ' > EOF\\n')
285 >>> b2s(matcher.startsat(" $ cat > file.py << 'ANYLIMIT'\\n"))
275 >>> b2s(matcher.startsat(" $ cat > file.py << 'ANYLIMIT'\\n"))
286 ('file.py', ' > ANYLIMIT\\n')
276 ('file.py', ' > ANYLIMIT\\n')
287 >>> b2s(matcher.startsat(' $ cat<<ANYLIMIT>"file.py"\\n'))
277 >>> b2s(matcher.startsat(' $ cat<<ANYLIMIT>"file.py"\\n'))
288 ('file.py', ' > ANYLIMIT\\n')
278 ('file.py', ' > ANYLIMIT\\n')
289 >>> start = ' $ cat > file.py << EOF\\n'
279 >>> start = ' $ cat > file.py << EOF\\n'
290 >>> ctx = matcher.startsat(start)
280 >>> ctx = matcher.startsat(start)
291 >>> matcher.codeatstart(ctx, start)
281 >>> matcher.codeatstart(ctx, start)
292 >>> b2s(matcher.filename(ctx))
282 >>> b2s(matcher.filename(ctx))
293 'file.py'
283 'file.py'
294 >>> matcher.ignores(ctx)
284 >>> matcher.ignores(ctx)
295 False
285 False
296 >>> inside = ' > foo = 1\\n'
286 >>> inside = ' > foo = 1\\n'
297 >>> matcher.endsat(ctx, inside)
287 >>> matcher.endsat(ctx, inside)
298 False
288 False
299 >>> matcher.isinside(ctx, inside)
289 >>> matcher.isinside(ctx, inside)
300 True
290 True
301 >>> b2s(matcher.codeinside(ctx, inside))
291 >>> b2s(matcher.codeinside(ctx, inside))
302 'foo = 1\\n'
292 'foo = 1\\n'
303 >>> end = ' > EOF\\n'
293 >>> end = ' > EOF\\n'
304 >>> matcher.endsat(ctx, end)
294 >>> matcher.endsat(ctx, end)
305 True
295 True
306 >>> matcher.codeatend(ctx, end)
296 >>> matcher.codeatend(ctx, end)
307 >>> matcher.endsat(ctx, ' > EOFEOF\\n')
297 >>> matcher.endsat(ctx, ' > EOFEOF\\n')
308 False
298 False
309 >>> ctx = matcher.startsat(' $ cat > file.py << NO_CHECK_EOF\\n')
299 >>> ctx = matcher.startsat(' $ cat > file.py << NO_CHECK_EOF\\n')
310 >>> matcher.ignores(ctx)
300 >>> matcher.ignores(ctx)
311 True
301 True
312 """
302 """
313
303
314 _prefix = ' > '
304 _prefix = ' > '
315
305
316 def __init__(self, desc, namepat):
306 def __init__(self, desc, namepat):
317 super(fileheredocmatcher, self).__init__(desc)
307 super(fileheredocmatcher, self).__init__(desc)
318
308
319 # build the pattern to match against cases below (and ">>"
309 # build the pattern to match against cases below (and ">>"
320 # variants), and to return a target filename string as 'name'
310 # variants), and to return a target filename string as 'name'
321 # group
311 # group
322 #
312 #
323 # - > NAMEPAT
313 # - > NAMEPAT
324 # - > "NAMEPAT"
314 # - > "NAMEPAT"
325 # - > 'NAMEPAT'
315 # - > 'NAMEPAT'
326 namepat = (
316 namepat = (
327 r'\s*>>?\s*(?P<nquote>["\']?)(?P<name>%s)(?P=nquote)' % namepat
317 r'\s*>>?\s*(?P<nquote>["\']?)(?P<name>%s)(?P=nquote)' % namepat
328 )
318 )
329 self._fileres = [
319 self._fileres = [
330 # "cat > NAME << LIMIT" case
320 # "cat > NAME << LIMIT" case
331 re.compile(r' {2}\$ \s*cat' + namepat + heredoclimitpat),
321 re.compile(r' {2}\$ \s*cat' + namepat + heredoclimitpat),
332 # "cat << LIMIT > NAME" case
322 # "cat << LIMIT > NAME" case
333 re.compile(r' {2}\$ \s*cat' + heredoclimitpat + namepat),
323 re.compile(r' {2}\$ \s*cat' + heredoclimitpat + namepat),
334 ]
324 ]
335
325
336 def startsat(self, line):
326 def startsat(self, line):
337 # ctx is (filename, END-LINE-OF-EMBEDDED-CODE) tuple
327 # ctx is (filename, END-LINE-OF-EMBEDDED-CODE) tuple
338 for filere in self._fileres:
328 for filere in self._fileres:
339 matched = filere.match(line)
329 matched = filere.match(line)
340 if matched:
330 if matched:
341 return (
331 return (
342 matched.group('name'),
332 matched.group('name'),
343 ' > %s\n' % matched.group('limit'),
333 ' > %s\n' % matched.group('limit'),
344 )
334 )
345
335
346 def endsat(self, ctx, line):
336 def endsat(self, ctx, line):
347 return ctx[1] == line
337 return ctx[1] == line
348
338
349 def isinside(self, ctx, line):
339 def isinside(self, ctx, line):
350 return line.startswith(self._prefix)
340 return line.startswith(self._prefix)
351
341
352 def ignores(self, ctx):
342 def ignores(self, ctx):
353 return ' > %s\n' % heredocignorelimit == ctx[1]
343 return ' > %s\n' % heredocignorelimit == ctx[1]
354
344
355 def filename(self, ctx):
345 def filename(self, ctx):
356 return ctx[0]
346 return ctx[0]
357
347
358 def codeatstart(self, ctx, line):
348 def codeatstart(self, ctx, line):
359 return None # no embedded code at start line
349 return None # no embedded code at start line
360
350
361 def codeatend(self, ctx, line):
351 def codeatend(self, ctx, line):
362 return None # no embedded code at end line
352 return None # no embedded code at end line
363
353
364 def codeinside(self, ctx, line):
354 def codeinside(self, ctx, line):
365 return line[len(self._prefix) :] # strip prefix
355 return line[len(self._prefix) :] # strip prefix
366
356
367
357
368 ####
358 ####
369 # for embedded python script
359 # for embedded python script
370
360
371
361
372 class pydoctestmatcher(embeddedmatcher):
362 class pydoctestmatcher(embeddedmatcher):
373 """Detect ">>> code" style embedded python code
363 """Detect ">>> code" style embedded python code
374
364
375 >>> matcher = pydoctestmatcher()
365 >>> matcher = pydoctestmatcher()
376 >>> startline = ' >>> foo = 1\\n'
366 >>> startline = ' >>> foo = 1\\n'
377 >>> matcher.startsat(startline)
367 >>> matcher.startsat(startline)
378 True
368 True
379 >>> matcher.startsat(' ... foo = 1\\n')
369 >>> matcher.startsat(' ... foo = 1\\n')
380 False
370 False
381 >>> ctx = matcher.startsat(startline)
371 >>> ctx = matcher.startsat(startline)
382 >>> matcher.filename(ctx)
372 >>> matcher.filename(ctx)
383 >>> matcher.ignores(ctx)
373 >>> matcher.ignores(ctx)
384 False
374 False
385 >>> b2s(matcher.codeatstart(ctx, startline))
375 >>> b2s(matcher.codeatstart(ctx, startline))
386 'foo = 1\\n'
376 'foo = 1\\n'
387 >>> inside = ' >>> foo = 1\\n'
377 >>> inside = ' >>> foo = 1\\n'
388 >>> matcher.endsat(ctx, inside)
378 >>> matcher.endsat(ctx, inside)
389 False
379 False
390 >>> matcher.isinside(ctx, inside)
380 >>> matcher.isinside(ctx, inside)
391 True
381 True
392 >>> b2s(matcher.codeinside(ctx, inside))
382 >>> b2s(matcher.codeinside(ctx, inside))
393 'foo = 1\\n'
383 'foo = 1\\n'
394 >>> inside = ' ... foo = 1\\n'
384 >>> inside = ' ... foo = 1\\n'
395 >>> matcher.endsat(ctx, inside)
385 >>> matcher.endsat(ctx, inside)
396 False
386 False
397 >>> matcher.isinside(ctx, inside)
387 >>> matcher.isinside(ctx, inside)
398 True
388 True
399 >>> b2s(matcher.codeinside(ctx, inside))
389 >>> b2s(matcher.codeinside(ctx, inside))
400 'foo = 1\\n'
390 'foo = 1\\n'
401 >>> inside = ' expected output\\n'
391 >>> inside = ' expected output\\n'
402 >>> matcher.endsat(ctx, inside)
392 >>> matcher.endsat(ctx, inside)
403 False
393 False
404 >>> matcher.isinside(ctx, inside)
394 >>> matcher.isinside(ctx, inside)
405 True
395 True
406 >>> b2s(matcher.codeinside(ctx, inside))
396 >>> b2s(matcher.codeinside(ctx, inside))
407 '\\n'
397 '\\n'
408 >>> inside = ' \\n'
398 >>> inside = ' \\n'
409 >>> matcher.endsat(ctx, inside)
399 >>> matcher.endsat(ctx, inside)
410 False
400 False
411 >>> matcher.isinside(ctx, inside)
401 >>> matcher.isinside(ctx, inside)
412 True
402 True
413 >>> b2s(matcher.codeinside(ctx, inside))
403 >>> b2s(matcher.codeinside(ctx, inside))
414 '\\n'
404 '\\n'
415 >>> end = ' $ foo bar\\n'
405 >>> end = ' $ foo bar\\n'
416 >>> matcher.endsat(ctx, end)
406 >>> matcher.endsat(ctx, end)
417 True
407 True
418 >>> matcher.codeatend(ctx, end)
408 >>> matcher.codeatend(ctx, end)
419 >>> end = '\\n'
409 >>> end = '\\n'
420 >>> matcher.endsat(ctx, end)
410 >>> matcher.endsat(ctx, end)
421 True
411 True
422 >>> matcher.codeatend(ctx, end)
412 >>> matcher.codeatend(ctx, end)
423 """
413 """
424
414
425 _prefix = ' >>> '
415 _prefix = ' >>> '
426 _prefixre = re.compile(r' {2}(>>>|\.\.\.) ')
416 _prefixre = re.compile(r' {2}(>>>|\.\.\.) ')
427
417
428 # If a line matches against not _prefixre but _outputre, that line
418 # If a line matches against not _prefixre but _outputre, that line
429 # is "an expected output line" (= not a part of code fragment).
419 # is "an expected output line" (= not a part of code fragment).
430 #
420 #
431 # Strictly speaking, a line matching against "(#if|#else|#endif)"
421 # Strictly speaking, a line matching against "(#if|#else|#endif)"
432 # is also treated similarly in "inline python code" semantics by
422 # is also treated similarly in "inline python code" semantics by
433 # run-tests.py. But "directive line inside inline python code"
423 # run-tests.py. But "directive line inside inline python code"
434 # should be rejected by Mercurial reviewers. Therefore, this
424 # should be rejected by Mercurial reviewers. Therefore, this
435 # regexp does not matche against such directive lines.
425 # regexp does not matche against such directive lines.
436 _outputre = re.compile(r' {2}$| {2}[^$]')
426 _outputre = re.compile(r' {2}$| {2}[^$]')
437
427
438 def __init__(self):
428 def __init__(self):
439 super(pydoctestmatcher, self).__init__("doctest style python code")
429 super(pydoctestmatcher, self).__init__("doctest style python code")
440
430
441 def startsat(self, line):
431 def startsat(self, line):
442 # ctx is "True"
432 # ctx is "True"
443 return line.startswith(self._prefix)
433 return line.startswith(self._prefix)
444
434
445 def endsat(self, ctx, line):
435 def endsat(self, ctx, line):
446 return not (self._prefixre.match(line) or self._outputre.match(line))
436 return not (self._prefixre.match(line) or self._outputre.match(line))
447
437
448 def isinside(self, ctx, line):
438 def isinside(self, ctx, line):
449 return True # always true, if not yet ended
439 return True # always true, if not yet ended
450
440
451 def ignores(self, ctx):
441 def ignores(self, ctx):
452 return False # should be checked always
442 return False # should be checked always
453
443
454 def filename(self, ctx):
444 def filename(self, ctx):
455 return None # no filename
445 return None # no filename
456
446
457 def codeatstart(self, ctx, line):
447 def codeatstart(self, ctx, line):
458 return line[len(self._prefix) :] # strip prefix ' >>> '/' ... '
448 return line[len(self._prefix) :] # strip prefix ' >>> '/' ... '
459
449
460 def codeatend(self, ctx, line):
450 def codeatend(self, ctx, line):
461 return None # no embedded code at end line
451 return None # no embedded code at end line
462
452
463 def codeinside(self, ctx, line):
453 def codeinside(self, ctx, line):
464 if self._prefixre.match(line):
454 if self._prefixre.match(line):
465 return line[len(self._prefix) :] # strip prefix ' >>> '/' ... '
455 return line[len(self._prefix) :] # strip prefix ' >>> '/' ... '
466 return '\n' # an expected output line is treated as an empty line
456 return '\n' # an expected output line is treated as an empty line
467
457
468
458
469 class pyheredocmatcher(embeddedmatcher):
459 class pyheredocmatcher(embeddedmatcher):
470 """Detect "python << LIMIT" style embedded python code
460 """Detect "python << LIMIT" style embedded python code
471
461
472 >>> matcher = pyheredocmatcher()
462 >>> matcher = pyheredocmatcher()
473 >>> b2s(matcher.startsat(' $ python << EOF\\n'))
463 >>> b2s(matcher.startsat(' $ python << EOF\\n'))
474 ' > EOF\\n'
464 ' > EOF\\n'
475 >>> b2s(matcher.startsat(' $ $PYTHON <<EOF\\n'))
465 >>> b2s(matcher.startsat(' $ $PYTHON <<EOF\\n'))
476 ' > EOF\\n'
466 ' > EOF\\n'
477 >>> b2s(matcher.startsat(' $ "$PYTHON"<< "EOF"\\n'))
467 >>> b2s(matcher.startsat(' $ "$PYTHON"<< "EOF"\\n'))
478 ' > EOF\\n'
468 ' > EOF\\n'
479 >>> b2s(matcher.startsat(" $ $PYTHON << 'ANYLIMIT'\\n"))
469 >>> b2s(matcher.startsat(" $ $PYTHON << 'ANYLIMIT'\\n"))
480 ' > ANYLIMIT\\n'
470 ' > ANYLIMIT\\n'
481 >>> matcher.startsat(' $ "$PYTHON" < EOF\\n')
471 >>> matcher.startsat(' $ "$PYTHON" < EOF\\n')
482 >>> start = ' $ python << EOF\\n'
472 >>> start = ' $ python << EOF\\n'
483 >>> ctx = matcher.startsat(start)
473 >>> ctx = matcher.startsat(start)
484 >>> matcher.codeatstart(ctx, start)
474 >>> matcher.codeatstart(ctx, start)
485 >>> matcher.filename(ctx)
475 >>> matcher.filename(ctx)
486 >>> matcher.ignores(ctx)
476 >>> matcher.ignores(ctx)
487 False
477 False
488 >>> inside = ' > foo = 1\\n'
478 >>> inside = ' > foo = 1\\n'
489 >>> matcher.endsat(ctx, inside)
479 >>> matcher.endsat(ctx, inside)
490 False
480 False
491 >>> matcher.isinside(ctx, inside)
481 >>> matcher.isinside(ctx, inside)
492 True
482 True
493 >>> b2s(matcher.codeinside(ctx, inside))
483 >>> b2s(matcher.codeinside(ctx, inside))
494 'foo = 1\\n'
484 'foo = 1\\n'
495 >>> end = ' > EOF\\n'
485 >>> end = ' > EOF\\n'
496 >>> matcher.endsat(ctx, end)
486 >>> matcher.endsat(ctx, end)
497 True
487 True
498 >>> matcher.codeatend(ctx, end)
488 >>> matcher.codeatend(ctx, end)
499 >>> matcher.endsat(ctx, ' > EOFEOF\\n')
489 >>> matcher.endsat(ctx, ' > EOFEOF\\n')
500 False
490 False
501 >>> ctx = matcher.startsat(' $ python << NO_CHECK_EOF\\n')
491 >>> ctx = matcher.startsat(' $ python << NO_CHECK_EOF\\n')
502 >>> matcher.ignores(ctx)
492 >>> matcher.ignores(ctx)
503 True
493 True
504 """
494 """
505
495
506 _prefix = ' > '
496 _prefix = ' > '
507
497
508 _startre = re.compile(
498 _startre = re.compile(
509 r' {2}\$ (\$PYTHON|"\$PYTHON"|python).*' + heredoclimitpat
499 r' {2}\$ (\$PYTHON|"\$PYTHON"|python).*' + heredoclimitpat
510 )
500 )
511
501
512 def __init__(self):
502 def __init__(self):
513 super(pyheredocmatcher, self).__init__("heredoc python invocation")
503 super(pyheredocmatcher, self).__init__("heredoc python invocation")
514
504
515 def startsat(self, line):
505 def startsat(self, line):
516 # ctx is END-LINE-OF-EMBEDDED-CODE
506 # ctx is END-LINE-OF-EMBEDDED-CODE
517 matched = self._startre.match(line)
507 matched = self._startre.match(line)
518 if matched:
508 if matched:
519 return ' > %s\n' % matched.group('limit')
509 return ' > %s\n' % matched.group('limit')
520
510
521 def endsat(self, ctx, line):
511 def endsat(self, ctx, line):
522 return ctx == line
512 return ctx == line
523
513
524 def isinside(self, ctx, line):
514 def isinside(self, ctx, line):
525 return line.startswith(self._prefix)
515 return line.startswith(self._prefix)
526
516
527 def ignores(self, ctx):
517 def ignores(self, ctx):
528 return ' > %s\n' % heredocignorelimit == ctx
518 return ' > %s\n' % heredocignorelimit == ctx
529
519
530 def filename(self, ctx):
520 def filename(self, ctx):
531 return None # no filename
521 return None # no filename
532
522
533 def codeatstart(self, ctx, line):
523 def codeatstart(self, ctx, line):
534 return None # no embedded code at start line
524 return None # no embedded code at start line
535
525
536 def codeatend(self, ctx, line):
526 def codeatend(self, ctx, line):
537 return None # no embedded code at end line
527 return None # no embedded code at end line
538
528
539 def codeinside(self, ctx, line):
529 def codeinside(self, ctx, line):
540 return line[len(self._prefix) :] # strip prefix
530 return line[len(self._prefix) :] # strip prefix
541
531
542
532
543 _pymatchers = [
533 _pymatchers = [
544 pydoctestmatcher(),
534 pydoctestmatcher(),
545 pyheredocmatcher(),
535 pyheredocmatcher(),
546 # use '[^<]+' instead of '\S+', in order to match against
536 # use '[^<]+' instead of '\S+', in order to match against
547 # paths including whitespaces
537 # paths including whitespaces
548 fileheredocmatcher('heredoc .py file', r'[^<]+\.py'),
538 fileheredocmatcher('heredoc .py file', r'[^<]+\.py'),
549 ]
539 ]
550
540
551
541
552 def pyembedded(basefile, lines, errors):
542 def pyembedded(basefile, lines, errors):
553 return embedded(basefile, lines, errors, _pymatchers)
543 return embedded(basefile, lines, errors, _pymatchers)
554
544
555
545
556 ####
546 ####
557 # for embedded shell script
547 # for embedded shell script
558
548
559 _shmatchers = [
549 _shmatchers = [
560 # use '[^<]+' instead of '\S+', in order to match against
550 # use '[^<]+' instead of '\S+', in order to match against
561 # paths including whitespaces
551 # paths including whitespaces
562 fileheredocmatcher('heredoc .sh file', r'[^<]+\.sh'),
552 fileheredocmatcher('heredoc .sh file', r'[^<]+\.sh'),
563 ]
553 ]
564
554
565
555
566 def shembedded(basefile, lines, errors):
556 def shembedded(basefile, lines, errors):
567 return embedded(basefile, lines, errors, _shmatchers)
557 return embedded(basefile, lines, errors, _shmatchers)
568
558
569
559
570 ####
560 ####
571 # for embedded hgrc configuration
561 # for embedded hgrc configuration
572
562
573 _hgrcmatchers = [
563 _hgrcmatchers = [
574 # use '[^<]+' instead of '\S+', in order to match against
564 # use '[^<]+' instead of '\S+', in order to match against
575 # paths including whitespaces
565 # paths including whitespaces
576 fileheredocmatcher(
566 fileheredocmatcher(
577 'heredoc hgrc file', r'(([^/<]+/)+hgrc|\$HGRCPATH|\${HGRCPATH})'
567 'heredoc hgrc file', r'(([^/<]+/)+hgrc|\$HGRCPATH|\${HGRCPATH})'
578 ),
568 ),
579 ]
569 ]
580
570
581
571
582 def hgrcembedded(basefile, lines, errors):
572 def hgrcembedded(basefile, lines, errors):
583 return embedded(basefile, lines, errors, _hgrcmatchers)
573 return embedded(basefile, lines, errors, _hgrcmatchers)
584
574
585
575
586 ####
576 ####
587
577
588 if __name__ == "__main__":
578 if __name__ == "__main__":
589 import optparse
579 import optparse
590 import sys
580 import sys
591
581
592 def showembedded(basefile, lines, embeddedfunc, opts):
582 def showembedded(basefile, lines, embeddedfunc, opts):
593 errors = []
583 errors = []
594 for name, starts, ends, code in embeddedfunc(basefile, lines, errors):
584 for name, starts, ends, code in embeddedfunc(basefile, lines, errors):
595 if not name:
585 if not name:
596 name = '<anonymous>'
586 name = '<anonymous>'
597 writeout("%s:%d: %s starts\n" % (basefile, starts, name))
587 writeout("%s:%d: %s starts\n" % (basefile, starts, name))
598 if opts.verbose and code:
588 if opts.verbose and code:
599 writeout(" |%s\n" % "\n |".join(l for l in code.splitlines()))
589 writeout(" |%s\n" % "\n |".join(l for l in code.splitlines()))
600 writeout("%s:%d: %s ends\n" % (basefile, ends, name))
590 writeout("%s:%d: %s ends\n" % (basefile, ends, name))
601 for e in errors:
591 for e in errors:
602 writeerr("%s\n" % e)
592 writeerr("%s\n" % e)
603 return len(errors)
593 return len(errors)
604
594
605 def applyembedded(args, embeddedfunc, opts):
595 def applyembedded(args, embeddedfunc, opts):
606 ret = 0
596 ret = 0
607 if args:
597 if args:
608 for f in args:
598 for f in args:
609 with opentext(f) as fp:
599 with opentext(f) as fp:
610 if showembedded(f, fp, embeddedfunc, opts):
600 if showembedded(f, fp, embeddedfunc, opts):
611 ret = 1
601 ret = 1
612 else:
602 else:
613 lines = [l for l in sys.stdin.readlines()]
603 lines = [l for l in sys.stdin.readlines()]
614 if showembedded('<stdin>', lines, embeddedfunc, opts):
604 if showembedded('<stdin>', lines, embeddedfunc, opts):
615 ret = 1
605 ret = 1
616 return ret
606 return ret
617
607
618 commands = {}
608 commands = {}
619
609
620 def command(name, desc):
610 def command(name, desc):
621 def wrap(func):
611 def wrap(func):
622 commands[name] = (desc, func)
612 commands[name] = (desc, func)
623
613
624 return wrap
614 return wrap
625
615
626 @command("pyembedded", "detect embedded python script")
616 @command("pyembedded", "detect embedded python script")
627 def pyembeddedcmd(args, opts):
617 def pyembeddedcmd(args, opts):
628 return applyembedded(args, pyembedded, opts)
618 return applyembedded(args, pyembedded, opts)
629
619
630 @command("shembedded", "detect embedded shell script")
620 @command("shembedded", "detect embedded shell script")
631 def shembeddedcmd(args, opts):
621 def shembeddedcmd(args, opts):
632 return applyembedded(args, shembedded, opts)
622 return applyembedded(args, shembedded, opts)
633
623
634 @command("hgrcembedded", "detect embedded hgrc configuration")
624 @command("hgrcembedded", "detect embedded hgrc configuration")
635 def hgrcembeddedcmd(args, opts):
625 def hgrcembeddedcmd(args, opts):
636 return applyembedded(args, hgrcembedded, opts)
626 return applyembedded(args, hgrcembedded, opts)
637
627
638 availablecommands = "\n".join(
628 availablecommands = "\n".join(
639 [" - %s: %s" % (key, value[0]) for key, value in commands.items()]
629 [" - %s: %s" % (key, value[0]) for key, value in commands.items()]
640 )
630 )
641
631
642 parser = optparse.OptionParser(
632 parser = optparse.OptionParser(
643 """%prog COMMAND [file ...]
633 """%prog COMMAND [file ...]
644
634
645 Pick up embedded code fragments from given file(s) or stdin, and list
635 Pick up embedded code fragments from given file(s) or stdin, and list
646 up start/end lines of them in standard compiler format
636 up start/end lines of them in standard compiler format
647 ("FILENAME:LINENO:").
637 ("FILENAME:LINENO:").
648
638
649 Available commands are:
639 Available commands are:
650 """
640 """
651 + availablecommands
641 + availablecommands
652 + """
642 + """
653 """
643 """
654 )
644 )
655 parser.add_option(
645 parser.add_option(
656 "-v",
646 "-v",
657 "--verbose",
647 "--verbose",
658 help="enable additional output (e.g. actual code)",
648 help="enable additional output (e.g. actual code)",
659 action="store_true",
649 action="store_true",
660 )
650 )
661 (opts, args) = parser.parse_args()
651 (opts, args) = parser.parse_args()
662
652
663 if not args or args[0] not in commands:
653 if not args or args[0] not in commands:
664 parser.print_help()
654 parser.print_help()
665 sys.exit(255)
655 sys.exit(255)
666
656
667 sys.exit(commands[args[0]][1](args[1:], opts))
657 sys.exit(commands[args[0]][1](args[1:], opts))
@@ -1,232 +1,242
1 #!/usr/bin/env python3
1 #!/usr/bin/env python3
2 #
2 #
3 # checkseclevel - checking section title levels in each online help document
3 # checkseclevel - checking section title levels in each online help document
4
4
5
5
6 import optparse
6 import optparse
7 import os
7 import os
8 import sys
8 import sys
9
9
10 # import from the live mercurial repo
10 # import from the live mercurial repo
11 os.environ['HGMODULEPOLICY'] = 'py'
11 os.environ['HGMODULEPOLICY'] = 'py'
12 sys.path.insert(0, os.path.abspath(".."))
12 sys.path.insert(0, os.path.abspath(".."))
13 from mercurial import demandimport
13 from mercurial import demandimport
14
14
15 demandimport.enable()
15 demandimport.enable()
16 from mercurial import (
16 from mercurial import (
17 commands,
17 commands,
18 extensions,
18 extensions,
19 help,
19 help,
20 minirst,
20 minirst,
21 ui as uimod,
21 ui as uimod,
22 )
22 )
23
23
24 table = commands.table
24 table = commands.table
25 helptable = help.helptable
25 helptable = help.helptable
26
26
27 level2mark = [b'"', b'=', b'-', b'.', b'#']
27 level2mark = [b'"', b'=', b'-', b'.', b'#']
28 reservedmarks = [b'"']
28 reservedmarks = [b'"']
29
29
30 mark2level = {}
30 mark2level = {}
31 for m, l in zip(level2mark, range(len(level2mark))):
31 for m, l in zip(level2mark, range(len(level2mark))):
32 if m not in reservedmarks:
32 if m not in reservedmarks:
33 mark2level[m] = l
33 mark2level[m] = l
34
34
35 initlevel_topic = 0
35 initlevel_topic = 0
36 initlevel_cmd = 1
36 initlevel_cmd = 1
37 initlevel_ext = 1
37 initlevel_ext = 1
38 initlevel_ext_cmd = 3
38 initlevel_ext_cmd = 3
39
39
40
40
41 def showavailables(ui, initlevel):
41 def showavailables(ui, initlevel):
42 avail = ' available marks and order of them in this help: %s\n' % (
42 avail = ' available marks and order of them in this help: %s\n' % (
43 ', '.join(['%r' % (m * 4) for m in level2mark[initlevel + 1 :]])
43 ', '.join(['%r' % (m * 4) for m in level2mark[initlevel + 1 :]])
44 )
44 )
45 ui.warn(avail.encode('utf-8'))
45 ui.warn(avail.encode('utf-8'))
46
46
47
47
48 def checkseclevel(ui, doc, name, initlevel):
48 def checkseclevel(ui, doc, name, initlevel):
49 ui.notenoi18n('checking "%s"\n' % name)
49 ui.notenoi18n(('checking "%s"\n' % name).encode('utf-8'))
50 if not isinstance(doc, bytes):
50 if not isinstance(doc, bytes):
51 doc = doc.encode('utf-8')
51 doc = doc.encode('utf-8')
52 blocks, pruned = minirst.parse(doc, 0, ['verbose'])
52 blocks, pruned = minirst.parse(doc, 0, ['verbose'])
53 errorcnt = 0
53 errorcnt = 0
54 curlevel = initlevel
54 curlevel = initlevel
55 for block in blocks:
55 for block in blocks:
56 if block[b'type'] != b'section':
56 if block[b'type'] != b'section':
57 continue
57 continue
58 mark = block[b'underline']
58 mark = block[b'underline']
59 title = block[b'lines'][0]
59 title = block[b'lines'][0]
60 if (mark not in mark2level) or (mark2level[mark] <= initlevel):
60 if (mark not in mark2level) or (mark2level[mark] <= initlevel):
61 ui.warn(
61 ui.warn(
62 (
62 (
63 'invalid section mark %r for "%s" of %s\n'
63 'invalid section mark %r for "%s" of %s\n'
64 % (mark * 4, title, name)
64 % (mark * 4, title, name)
65 ).encode('utf-8')
65 ).encode('utf-8')
66 )
66 )
67 showavailables(ui, initlevel)
67 showavailables(ui, initlevel)
68 errorcnt += 1
68 errorcnt += 1
69 continue
69 continue
70 nextlevel = mark2level[mark]
70 nextlevel = mark2level[mark]
71 if curlevel < nextlevel and curlevel + 1 != nextlevel:
71 if curlevel < nextlevel and curlevel + 1 != nextlevel:
72 ui.warnnoi18n(
72 ui.warnnoi18n(
73 'gap of section level at "%s" of %s\n' % (title, name)
73 ('gap of section level at "%s" of %s\n' % (title, name)).encode(
74 'utf-8'
75 )
74 )
76 )
75 showavailables(ui, initlevel)
77 showavailables(ui, initlevel)
76 errorcnt += 1
78 errorcnt += 1
77 continue
79 continue
78 ui.notenoi18n(
80 ui.notenoi18n(
79 'appropriate section level for "%s %s"\n'
81 (
80 % (mark * (nextlevel * 2), title)
82 'appropriate section level for "%s %s"\n'
83 % (mark * (nextlevel * 2), title)
84 ).encode('utf-8')
81 )
85 )
82 curlevel = nextlevel
86 curlevel = nextlevel
83
87
84 return errorcnt
88 return errorcnt
85
89
86
90
87 def checkcmdtable(ui, cmdtable, namefmt, initlevel):
91 def checkcmdtable(ui, cmdtable, namefmt, initlevel):
88 errorcnt = 0
92 errorcnt = 0
89 for k, entry in cmdtable.items():
93 for k, entry in cmdtable.items():
90 name = k.split(b"|")[0].lstrip(b"^")
94 name = k.split(b"|")[0].lstrip(b"^")
91 if not entry[0].__doc__:
95 if not entry[0].__doc__:
92 ui.notenoi18n(
96 ui.notenoi18n(
93 'skip checking %s: no help document\n' % (namefmt % name)
97 (
98 'skip checking %s: no help document\n' % (namefmt % name)
99 ).encode('utf-8')
94 )
100 )
95 continue
101 continue
96 errorcnt += checkseclevel(
102 errorcnt += checkseclevel(
97 ui, entry[0].__doc__, namefmt % name, initlevel
103 ui, entry[0].__doc__, namefmt % name, initlevel
98 )
104 )
99 return errorcnt
105 return errorcnt
100
106
101
107
102 def checkhghelps(ui):
108 def checkhghelps(ui):
103 errorcnt = 0
109 errorcnt = 0
104 for h in helptable:
110 for h in helptable:
105 names, sec, doc = h[0:3]
111 names, sec, doc = h[0:3]
106 if callable(doc):
112 if callable(doc):
107 doc = doc(ui)
113 doc = doc(ui)
108 errorcnt += checkseclevel(
114 errorcnt += checkseclevel(
109 ui, doc, '%s help topic' % names[0], initlevel_topic
115 ui, doc, '%s help topic' % names[0], initlevel_topic
110 )
116 )
111
117
112 errorcnt += checkcmdtable(ui, table, '%s command', initlevel_cmd)
118 errorcnt += checkcmdtable(ui, table, '%s command', initlevel_cmd)
113
119
114 for name in sorted(
120 for name in sorted(
115 list(extensions.enabled()) + list(extensions.disabled())
121 list(extensions.enabled()) + list(extensions.disabled())
116 ):
122 ):
117 mod = extensions.load(ui, name, None)
123 mod = extensions.load(ui, name, None)
118 if not mod.__doc__:
124 if not mod.__doc__:
119 ui.notenoi18n(
125 ui.notenoi18n(
120 'skip checking %s extension: no help document\n' % name
126 (
127 'skip checking %s extension: no help document\n' % name
128 ).encode('utf-8')
121 )
129 )
122 continue
130 continue
123 errorcnt += checkseclevel(
131 errorcnt += checkseclevel(
124 ui, mod.__doc__, '%s extension' % name, initlevel_ext
132 ui, mod.__doc__, '%s extension' % name, initlevel_ext
125 )
133 )
126
134
127 cmdtable = getattr(mod, 'cmdtable', None)
135 cmdtable = getattr(mod, 'cmdtable', None)
128 if cmdtable:
136 if cmdtable:
129 errorcnt += checkcmdtable(
137 errorcnt += checkcmdtable(
130 ui,
138 ui,
131 cmdtable,
139 cmdtable,
132 '%%s command of %s extension' % name,
140 '%%s command of %s extension' % name,
133 initlevel_ext_cmd,
141 initlevel_ext_cmd,
134 )
142 )
135 return errorcnt
143 return errorcnt
136
144
137
145
138 def checkfile(ui, filename, initlevel):
146 def checkfile(ui, filename, initlevel):
139 if filename == '-':
147 if filename == '-':
140 filename = 'stdin'
148 filename = 'stdin'
141 doc = sys.stdin.read()
149 doc = sys.stdin.read()
142 else:
150 else:
143 with open(filename) as fp:
151 with open(filename) as fp:
144 doc = fp.read()
152 doc = fp.read()
145
153
146 ui.notenoi18n(
154 ui.notenoi18n(
147 'checking input from %s with initlevel %d\n' % (filename, initlevel)
155 (
156 'checking input from %s with initlevel %d\n' % (filename, initlevel)
157 ).encode('utf-8')
148 )
158 )
149 return checkseclevel(ui, doc, 'input from %s' % filename, initlevel)
159 return checkseclevel(ui, doc, 'input from %s' % filename, initlevel)
150
160
151
161
152 def main():
162 def main():
153 optparser = optparse.OptionParser(
163 optparser = optparse.OptionParser(
154 """%prog [options]
164 """%prog [options]
155
165
156 This checks all help documents of Mercurial (topics, commands,
166 This checks all help documents of Mercurial (topics, commands,
157 extensions and commands of them), if no file is specified by --file
167 extensions and commands of them), if no file is specified by --file
158 option.
168 option.
159 """
169 """
160 )
170 )
161 optparser.add_option(
171 optparser.add_option(
162 "-v", "--verbose", help="enable additional output", action="store_true"
172 "-v", "--verbose", help="enable additional output", action="store_true"
163 )
173 )
164 optparser.add_option(
174 optparser.add_option(
165 "-d", "--debug", help="debug mode", action="store_true"
175 "-d", "--debug", help="debug mode", action="store_true"
166 )
176 )
167 optparser.add_option(
177 optparser.add_option(
168 "-f",
178 "-f",
169 "--file",
179 "--file",
170 help="filename to read in (or '-' for stdin)",
180 help="filename to read in (or '-' for stdin)",
171 action="store",
181 action="store",
172 default="",
182 default="",
173 )
183 )
174
184
175 optparser.add_option(
185 optparser.add_option(
176 "-t",
186 "-t",
177 "--topic",
187 "--topic",
178 help="parse file as help topic",
188 help="parse file as help topic",
179 action="store_const",
189 action="store_const",
180 dest="initlevel",
190 dest="initlevel",
181 const=0,
191 const=0,
182 )
192 )
183 optparser.add_option(
193 optparser.add_option(
184 "-c",
194 "-c",
185 "--command",
195 "--command",
186 help="parse file as help of core command",
196 help="parse file as help of core command",
187 action="store_const",
197 action="store_const",
188 dest="initlevel",
198 dest="initlevel",
189 const=1,
199 const=1,
190 )
200 )
191 optparser.add_option(
201 optparser.add_option(
192 "-e",
202 "-e",
193 "--extension",
203 "--extension",
194 help="parse file as help of extension",
204 help="parse file as help of extension",
195 action="store_const",
205 action="store_const",
196 dest="initlevel",
206 dest="initlevel",
197 const=1,
207 const=1,
198 )
208 )
199 optparser.add_option(
209 optparser.add_option(
200 "-C",
210 "-C",
201 "--extension-command",
211 "--extension-command",
202 help="parse file as help of extension command",
212 help="parse file as help of extension command",
203 action="store_const",
213 action="store_const",
204 dest="initlevel",
214 dest="initlevel",
205 const=3,
215 const=3,
206 )
216 )
207
217
208 optparser.add_option(
218 optparser.add_option(
209 "-l",
219 "-l",
210 "--initlevel",
220 "--initlevel",
211 help="set initial section level manually",
221 help="set initial section level manually",
212 action="store",
222 action="store",
213 type="int",
223 type="int",
214 default=0,
224 default=0,
215 )
225 )
216
226
217 (options, args) = optparser.parse_args()
227 (options, args) = optparser.parse_args()
218
228
219 ui = uimod.ui.load()
229 ui = uimod.ui.load()
220 ui.setconfig(b'ui', b'verbose', options.verbose, b'--verbose')
230 ui.setconfig(b'ui', b'verbose', options.verbose, b'--verbose')
221 ui.setconfig(b'ui', b'debug', options.debug, b'--debug')
231 ui.setconfig(b'ui', b'debug', options.debug, b'--debug')
222
232
223 if options.file:
233 if options.file:
224 if checkfile(ui, options.file, options.initlevel):
234 if checkfile(ui, options.file, options.initlevel):
225 sys.exit(1)
235 sys.exit(1)
226 else:
236 else:
227 if checkhghelps(ui):
237 if checkhghelps(ui):
228 sys.exit(1)
238 sys.exit(1)
229
239
230
240
231 if __name__ == "__main__":
241 if __name__ == "__main__":
232 main()
242 main()
@@ -1,174 +1,178
1 # demandimportpy3 - global demand-loading of modules for Mercurial
1 # demandimportpy3 - global demand-loading of modules for Mercurial
2 #
2 #
3 # Copyright 2017 Facebook Inc.
3 # Copyright 2017 Facebook Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Lazy loading for Python 3.6 and above.
8 """Lazy loading for Python 3.6 and above.
9
9
10 This uses the new importlib finder/loader functionality available in Python 3.5
10 This uses the new importlib finder/loader functionality available in Python 3.5
11 and up. The code reuses most of the mechanics implemented inside importlib.util,
11 and up. The code reuses most of the mechanics implemented inside importlib.util,
12 but with a few additions:
12 but with a few additions:
13
13
14 * Allow excluding certain modules from lazy imports.
14 * Allow excluding certain modules from lazy imports.
15 * Expose an interface that's substantially the same as demandimport for
15 * Expose an interface that's substantially the same as demandimport for
16 Python 2.
16 Python 2.
17
17
18 This also has some limitations compared to the Python 2 implementation:
18 This also has some limitations compared to the Python 2 implementation:
19
19
20 * Much of the logic is per-package, not per-module, so any packages loaded
20 * Much of the logic is per-package, not per-module, so any packages loaded
21 before demandimport is enabled will not be lazily imported in the future. In
21 before demandimport is enabled will not be lazily imported in the future. In
22 practice, we only expect builtins to be loaded before demandimport is
22 practice, we only expect builtins to be loaded before demandimport is
23 enabled.
23 enabled.
24 """
24 """
25
25
26 # This line is unnecessary, but it satisfies test-check-py3-compat.t.
27
28 import contextlib
26 import contextlib
29 import importlib.util
27 import importlib.util
30 import sys
28 import sys
31
29
32 from . import tracing
30 from . import tracing
33
31
34 _deactivated = False
32 _deactivated = False
35
33
36
34
37 class _lazyloaderex(importlib.util.LazyLoader):
35 class _lazyloaderex(importlib.util.LazyLoader):
38 """This is a LazyLoader except it also follows the _deactivated global and
36 """This is a LazyLoader except it also follows the _deactivated global and
39 the ignore list.
37 the ignore list.
40 """
38 """
41
39
40 _HAS_DYNAMIC_ATTRIBUTES = True # help pytype not flag self.loader
41
42 def exec_module(self, module):
42 def exec_module(self, module):
43 """Make the module load lazily."""
43 """Make the module load lazily."""
44 with tracing.log('demandimport %s', module):
44 with tracing.log('demandimport %s', module):
45 if _deactivated or module.__name__ in ignores:
45 if _deactivated or module.__name__ in ignores:
46 # Reset the loader on the module as super() does (issue6725)
47 module.__spec__.loader = self.loader
48 module.__loader__ = self.loader
49
46 self.loader.exec_module(module)
50 self.loader.exec_module(module)
47 else:
51 else:
48 super().exec_module(module)
52 super().exec_module(module)
49
53
50
54
51 class LazyFinder:
55 class LazyFinder:
52 """A wrapper around a ``MetaPathFinder`` that makes loaders lazy.
56 """A wrapper around a ``MetaPathFinder`` that makes loaders lazy.
53
57
54 ``sys.meta_path`` finders have their ``find_spec()`` called to locate a
58 ``sys.meta_path`` finders have their ``find_spec()`` called to locate a
55 module. This returns a ``ModuleSpec`` if found or ``None``. The
59 module. This returns a ``ModuleSpec`` if found or ``None``. The
56 ``ModuleSpec`` has a ``loader`` attribute, which is called to actually
60 ``ModuleSpec`` has a ``loader`` attribute, which is called to actually
57 load a module.
61 load a module.
58
62
59 Our class wraps an existing finder and overloads its ``find_spec()`` to
63 Our class wraps an existing finder and overloads its ``find_spec()`` to
60 replace the ``loader`` with our lazy loader proxy.
64 replace the ``loader`` with our lazy loader proxy.
61
65
62 We have to use __getattribute__ to proxy the instance because some meta
66 We have to use __getattribute__ to proxy the instance because some meta
63 path finders don't support monkeypatching.
67 path finders don't support monkeypatching.
64 """
68 """
65
69
66 __slots__ = ("_finder",)
70 __slots__ = ("_finder",)
67
71
68 def __init__(self, finder):
72 def __init__(self, finder):
69 object.__setattr__(self, "_finder", finder)
73 object.__setattr__(self, "_finder", finder)
70
74
71 def __repr__(self):
75 def __repr__(self):
72 return "<LazyFinder for %r>" % object.__getattribute__(self, "_finder")
76 return "<LazyFinder for %r>" % object.__getattribute__(self, "_finder")
73
77
74 # __bool__ is canonical Python 3. But check-code insists on __nonzero__ being
78 # __bool__ is canonical Python 3. But check-code insists on __nonzero__ being
75 # defined via `def`.
79 # defined via `def`.
76 def __nonzero__(self):
80 def __nonzero__(self):
77 return bool(object.__getattribute__(self, "_finder"))
81 return bool(object.__getattribute__(self, "_finder"))
78
82
79 __bool__ = __nonzero__
83 __bool__ = __nonzero__
80
84
81 def __getattribute__(self, name):
85 def __getattribute__(self, name):
82 if name in ("_finder", "find_spec"):
86 if name in ("_finder", "find_spec"):
83 return object.__getattribute__(self, name)
87 return object.__getattribute__(self, name)
84
88
85 return getattr(object.__getattribute__(self, "_finder"), name)
89 return getattr(object.__getattribute__(self, "_finder"), name)
86
90
87 def __delattr__(self, name):
91 def __delattr__(self, name):
88 return delattr(object.__getattribute__(self, "_finder"), name)
92 return delattr(object.__getattribute__(self, "_finder"), name)
89
93
90 def __setattr__(self, name, value):
94 def __setattr__(self, name, value):
91 return setattr(object.__getattribute__(self, "_finder"), name, value)
95 return setattr(object.__getattribute__(self, "_finder"), name, value)
92
96
93 def find_spec(self, fullname, path, target=None):
97 def find_spec(self, fullname, path, target=None):
94 finder = object.__getattribute__(self, "_finder")
98 finder = object.__getattribute__(self, "_finder")
95 try:
99 try:
96 find_spec = finder.find_spec
100 find_spec = finder.find_spec
97 except AttributeError:
101 except AttributeError:
98 loader = finder.find_module(fullname, path)
102 loader = finder.find_module(fullname, path)
99 if loader is None:
103 if loader is None:
100 spec = None
104 spec = None
101 else:
105 else:
102 spec = importlib.util.spec_from_loader(fullname, loader)
106 spec = importlib.util.spec_from_loader(fullname, loader)
103 else:
107 else:
104 spec = find_spec(fullname, path, target)
108 spec = find_spec(fullname, path, target)
105
109
106 # Lazy loader requires exec_module().
110 # Lazy loader requires exec_module().
107 if (
111 if (
108 spec is not None
112 spec is not None
109 and spec.loader is not None
113 and spec.loader is not None
110 and getattr(spec.loader, "exec_module", None)
114 and getattr(spec.loader, "exec_module", None)
111 ):
115 ):
112 spec.loader = _lazyloaderex(spec.loader)
116 spec.loader = _lazyloaderex(spec.loader)
113
117
114 return spec
118 return spec
115
119
116
120
117 ignores = set()
121 ignores = set()
118
122
119
123
120 def init(ignoreset):
124 def init(ignoreset):
121 global ignores
125 global ignores
122 ignores = ignoreset
126 ignores = ignoreset
123
127
124
128
125 def isenabled():
129 def isenabled():
126 return not _deactivated and any(
130 return not _deactivated and any(
127 isinstance(finder, LazyFinder) for finder in sys.meta_path
131 isinstance(finder, LazyFinder) for finder in sys.meta_path
128 )
132 )
129
133
130
134
131 def disable():
135 def disable():
132 new_finders = []
136 new_finders = []
133 for finder in sys.meta_path:
137 for finder in sys.meta_path:
134 new_finders.append(
138 new_finders.append(
135 finder._finder if isinstance(finder, LazyFinder) else finder
139 finder._finder if isinstance(finder, LazyFinder) else finder
136 )
140 )
137 sys.meta_path[:] = new_finders
141 sys.meta_path[:] = new_finders
138
142
139
143
140 def enable():
144 def enable():
141 new_finders = []
145 new_finders = []
142 for finder in sys.meta_path:
146 for finder in sys.meta_path:
143 new_finders.append(
147 new_finders.append(
144 LazyFinder(finder) if not isinstance(finder, LazyFinder) else finder
148 LazyFinder(finder) if not isinstance(finder, LazyFinder) else finder
145 )
149 )
146 sys.meta_path[:] = new_finders
150 sys.meta_path[:] = new_finders
147
151
148
152
149 @contextlib.contextmanager
153 @contextlib.contextmanager
150 def deactivated():
154 def deactivated():
151 # This implementation is a bit different from Python 2's. Python 3
155 # This implementation is a bit different from Python 2's. Python 3
152 # maintains a per-package finder cache in sys.path_importer_cache (see
156 # maintains a per-package finder cache in sys.path_importer_cache (see
153 # PEP 302). This means that we can't just call disable + enable.
157 # PEP 302). This means that we can't just call disable + enable.
154 # If we do that, in situations like:
158 # If we do that, in situations like:
155 #
159 #
156 # demandimport.enable()
160 # demandimport.enable()
157 # ...
161 # ...
158 # from foo.bar import mod1
162 # from foo.bar import mod1
159 # with demandimport.deactivated():
163 # with demandimport.deactivated():
160 # from foo.bar import mod2
164 # from foo.bar import mod2
161 #
165 #
162 # mod2 will be imported lazily. (The converse also holds -- whatever finder
166 # mod2 will be imported lazily. (The converse also holds -- whatever finder
163 # first gets cached will be used.)
167 # first gets cached will be used.)
164 #
168 #
165 # Instead, have a global flag the LazyLoader can use.
169 # Instead, have a global flag the LazyLoader can use.
166 global _deactivated
170 global _deactivated
167 demandenabled = isenabled()
171 demandenabled = isenabled()
168 if demandenabled:
172 if demandenabled:
169 _deactivated = True
173 _deactivated = True
170 try:
174 try:
171 yield
175 yield
172 finally:
176 finally:
173 if demandenabled:
177 if demandenabled:
174 _deactivated = False
178 _deactivated = False
@@ -1,1165 +1,1165
1 # absorb.py
1 # absorb.py
2 #
2 #
3 # Copyright 2016 Facebook, Inc.
3 # Copyright 2016 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """apply working directory changes to changesets (EXPERIMENTAL)
8 """apply working directory changes to changesets (EXPERIMENTAL)
9
9
10 The absorb extension provides a command to use annotate information to
10 The absorb extension provides a command to use annotate information to
11 amend modified chunks into the corresponding non-public changesets.
11 amend modified chunks into the corresponding non-public changesets.
12
12
13 ::
13 ::
14
14
15 [absorb]
15 [absorb]
16 # only check 50 recent non-public changesets at most
16 # only check 50 recent non-public changesets at most
17 max-stack-size = 50
17 max-stack-size = 50
18 # whether to add noise to new commits to avoid obsolescence cycle
18 # whether to add noise to new commits to avoid obsolescence cycle
19 add-noise = 1
19 add-noise = 1
20 # make `amend --correlated` a shortcut to the main command
20 # make `amend --correlated` a shortcut to the main command
21 amend-flag = correlated
21 amend-flag = correlated
22
22
23 [color]
23 [color]
24 absorb.description = yellow
24 absorb.description = yellow
25 absorb.node = blue bold
25 absorb.node = blue bold
26 absorb.path = bold
26 absorb.path = bold
27 """
27 """
28
28
29 # TODO:
29 # TODO:
30 # * Rename config items to [commands] namespace
30 # * Rename config items to [commands] namespace
31 # * Converge getdraftstack() with other code in core
31 # * Converge getdraftstack() with other code in core
32 # * move many attributes on fixupstate to be private
32 # * move many attributes on fixupstate to be private
33
33
34
34
35 import collections
35 import collections
36
36
37 from mercurial.i18n import _
37 from mercurial.i18n import _
38 from mercurial.node import (
38 from mercurial.node import (
39 hex,
39 hex,
40 short,
40 short,
41 )
41 )
42 from mercurial import (
42 from mercurial import (
43 cmdutil,
43 cmdutil,
44 commands,
44 commands,
45 context,
45 context,
46 crecord,
46 crecord,
47 error,
47 error,
48 linelog,
48 linelog,
49 mdiff,
49 mdiff,
50 obsolete,
50 obsolete,
51 patch,
51 patch,
52 phases,
52 phases,
53 pycompat,
53 pycompat,
54 registrar,
54 registrar,
55 rewriteutil,
55 rewriteutil,
56 scmutil,
56 scmutil,
57 util,
57 util,
58 )
58 )
59 from mercurial.utils import stringutil
59 from mercurial.utils import stringutil
60
60
61 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
61 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
62 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
62 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
63 # be specifying the version(s) of Mercurial they are tested with, or
63 # be specifying the version(s) of Mercurial they are tested with, or
64 # leave the attribute unspecified.
64 # leave the attribute unspecified.
65 testedwith = b'ships-with-hg-core'
65 testedwith = b'ships-with-hg-core'
66
66
67 cmdtable = {}
67 cmdtable = {}
68 command = registrar.command(cmdtable)
68 command = registrar.command(cmdtable)
69
69
70 configtable = {}
70 configtable = {}
71 configitem = registrar.configitem(configtable)
71 configitem = registrar.configitem(configtable)
72
72
73 configitem(b'absorb', b'add-noise', default=True)
73 configitem(b'absorb', b'add-noise', default=True)
74 configitem(b'absorb', b'amend-flag', default=None)
74 configitem(b'absorb', b'amend-flag', default=None)
75 configitem(b'absorb', b'max-stack-size', default=50)
75 configitem(b'absorb', b'max-stack-size', default=50)
76
76
77 colortable = {
77 colortable = {
78 b'absorb.description': b'yellow',
78 b'absorb.description': b'yellow',
79 b'absorb.node': b'blue bold',
79 b'absorb.node': b'blue bold',
80 b'absorb.path': b'bold',
80 b'absorb.path': b'bold',
81 }
81 }
82
82
83 defaultdict = collections.defaultdict
83 defaultdict = collections.defaultdict
84
84
85
85
86 class nullui:
86 class nullui:
87 """blank ui object doing nothing"""
87 """blank ui object doing nothing"""
88
88
89 debugflag = False
89 debugflag = False
90 verbose = False
90 verbose = False
91 quiet = True
91 quiet = True
92
92
93 def __getitem__(name):
93 def __getitem__(name):
94 def nullfunc(*args, **kwds):
94 def nullfunc(*args, **kwds):
95 return
95 return
96
96
97 return nullfunc
97 return nullfunc
98
98
99
99
100 class emptyfilecontext:
100 class emptyfilecontext:
101 """minimal filecontext representing an empty file"""
101 """minimal filecontext representing an empty file"""
102
102
103 def __init__(self, repo):
103 def __init__(self, repo):
104 self._repo = repo
104 self._repo = repo
105
105
106 def data(self):
106 def data(self):
107 return b''
107 return b''
108
108
109 def node(self):
109 def node(self):
110 return self._repo.nullid
110 return self._repo.nullid
111
111
112
112
113 def uniq(lst):
113 def uniq(lst):
114 """list -> list. remove duplicated items without changing the order"""
114 """list -> list. remove duplicated items without changing the order"""
115 seen = set()
115 seen = set()
116 result = []
116 result = []
117 for x in lst:
117 for x in lst:
118 if x not in seen:
118 if x not in seen:
119 seen.add(x)
119 seen.add(x)
120 result.append(x)
120 result.append(x)
121 return result
121 return result
122
122
123
123
124 def getdraftstack(headctx, limit=None):
124 def getdraftstack(headctx, limit=None):
125 """(ctx, int?) -> [ctx]. get a linear stack of non-public changesets.
125 """(ctx, int?) -> [ctx]. get a linear stack of non-public changesets.
126
126
127 changesets are sorted in topo order, oldest first.
127 changesets are sorted in topo order, oldest first.
128 return at most limit items, if limit is a positive number.
128 return at most limit items, if limit is a positive number.
129
129
130 merges are considered as non-draft as well. i.e. every commit
130 merges are considered as non-draft as well. i.e. every commit
131 returned has and only has 1 parent.
131 returned has and only has 1 parent.
132 """
132 """
133 ctx = headctx
133 ctx = headctx
134 result = []
134 result = []
135 while ctx.phase() != phases.public:
135 while ctx.phase() != phases.public:
136 if limit and len(result) >= limit:
136 if limit and len(result) >= limit:
137 break
137 break
138 parents = ctx.parents()
138 parents = ctx.parents()
139 if len(parents) != 1:
139 if len(parents) != 1:
140 break
140 break
141 result.append(ctx)
141 result.append(ctx)
142 ctx = parents[0]
142 ctx = parents[0]
143 result.reverse()
143 result.reverse()
144 return result
144 return result
145
145
146
146
147 def getfilestack(stack, path, seenfctxs=None):
147 def getfilestack(stack, path, seenfctxs=None):
148 """([ctx], str, set) -> [fctx], {ctx: fctx}
148 """([ctx], str, set) -> [fctx], {ctx: fctx}
149
149
150 stack is a list of contexts, from old to new. usually they are what
150 stack is a list of contexts, from old to new. usually they are what
151 "getdraftstack" returns.
151 "getdraftstack" returns.
152
152
153 follows renames, but not copies.
153 follows renames, but not copies.
154
154
155 seenfctxs is a set of filecontexts that will be considered "immutable".
155 seenfctxs is a set of filecontexts that will be considered "immutable".
156 they are usually what this function returned in earlier calls, useful
156 they are usually what this function returned in earlier calls, useful
157 to avoid issues that a file was "moved" to multiple places and was then
157 to avoid issues that a file was "moved" to multiple places and was then
158 modified differently, like: "a" was copied to "b", "a" was also copied to
158 modified differently, like: "a" was copied to "b", "a" was also copied to
159 "c" and then "a" was deleted, then both "b" and "c" were "moved" from "a"
159 "c" and then "a" was deleted, then both "b" and "c" were "moved" from "a"
160 and we enforce only one of them to be able to affect "a"'s content.
160 and we enforce only one of them to be able to affect "a"'s content.
161
161
162 return an empty list and an empty dict, if the specified path does not
162 return an empty list and an empty dict, if the specified path does not
163 exist in stack[-1] (the top of the stack).
163 exist in stack[-1] (the top of the stack).
164
164
165 otherwise, return a list of de-duplicated filecontexts, and the map to
165 otherwise, return a list of de-duplicated filecontexts, and the map to
166 convert ctx in the stack to fctx, for possible mutable fctxs. the first item
166 convert ctx in the stack to fctx, for possible mutable fctxs. the first item
167 of the list would be outside the stack and should be considered immutable.
167 of the list would be outside the stack and should be considered immutable.
168 the remaining items are within the stack.
168 the remaining items are within the stack.
169
169
170 for example, given the following changelog and corresponding filelog
170 for example, given the following changelog and corresponding filelog
171 revisions:
171 revisions:
172
172
173 changelog: 3----4----5----6----7
173 changelog: 3----4----5----6----7
174 filelog: x 0----1----1----2 (x: no such file yet)
174 filelog: x 0----1----1----2 (x: no such file yet)
175
175
176 - if stack = [5, 6, 7], returns ([0, 1, 2], {5: 1, 6: 1, 7: 2})
176 - if stack = [5, 6, 7], returns ([0, 1, 2], {5: 1, 6: 1, 7: 2})
177 - if stack = [3, 4, 5], returns ([e, 0, 1], {4: 0, 5: 1}), where "e" is a
177 - if stack = [3, 4, 5], returns ([e, 0, 1], {4: 0, 5: 1}), where "e" is a
178 dummy empty filecontext.
178 dummy empty filecontext.
179 - if stack = [2], returns ([], {})
179 - if stack = [2], returns ([], {})
180 - if stack = [7], returns ([1, 2], {7: 2})
180 - if stack = [7], returns ([1, 2], {7: 2})
181 - if stack = [6, 7], returns ([1, 2], {6: 1, 7: 2}), although {6: 1} can be
181 - if stack = [6, 7], returns ([1, 2], {6: 1, 7: 2}), although {6: 1} can be
182 removed, since 1 is immutable.
182 removed, since 1 is immutable.
183 """
183 """
184 if seenfctxs is None:
184 if seenfctxs is None:
185 seenfctxs = set()
185 seenfctxs = set()
186 assert stack
186 assert stack
187
187
188 if path not in stack[-1]:
188 if path not in stack[-1]:
189 return [], {}
189 return [], {}
190
190
191 fctxs = []
191 fctxs = []
192 fctxmap = {}
192 fctxmap = {}
193
193
194 pctx = stack[0].p1() # the public (immutable) ctx we stop at
194 pctx = stack[0].p1() # the public (immutable) ctx we stop at
195 for ctx in reversed(stack):
195 for ctx in reversed(stack):
196 if path not in ctx: # the file is added in the next commit
196 if path not in ctx: # the file is added in the next commit
197 pctx = ctx
197 pctx = ctx
198 break
198 break
199 fctx = ctx[path]
199 fctx = ctx[path]
200 fctxs.append(fctx)
200 fctxs.append(fctx)
201 if fctx in seenfctxs: # treat fctx as the immutable one
201 if fctx in seenfctxs: # treat fctx as the immutable one
202 pctx = None # do not add another immutable fctx
202 pctx = None # do not add another immutable fctx
203 break
203 break
204 fctxmap[ctx] = fctx # only for mutable fctxs
204 fctxmap[ctx] = fctx # only for mutable fctxs
205 copy = fctx.copysource()
205 copy = fctx.copysource()
206 if copy:
206 if copy:
207 path = copy # follow rename
207 path = copy # follow rename
208 if path in ctx: # but do not follow copy
208 if path in ctx: # but do not follow copy
209 pctx = ctx.p1()
209 pctx = ctx.p1()
210 break
210 break
211
211
212 if pctx is not None: # need an extra immutable fctx
212 if pctx is not None: # need an extra immutable fctx
213 if path in pctx:
213 if path in pctx:
214 fctxs.append(pctx[path])
214 fctxs.append(pctx[path])
215 else:
215 else:
216 fctxs.append(emptyfilecontext(pctx.repo()))
216 fctxs.append(emptyfilecontext(pctx.repo()))
217
217
218 fctxs.reverse()
218 fctxs.reverse()
219 # note: we rely on a property of hg: filerev is not reused for linear
219 # note: we rely on a property of hg: filerev is not reused for linear
220 # history. i.e. it's impossible to have:
220 # history. i.e. it's impossible to have:
221 # changelog: 4----5----6 (linear, no merges)
221 # changelog: 4----5----6 (linear, no merges)
222 # filelog: 1----2----1
222 # filelog: 1----2----1
223 # ^ reuse filerev (impossible)
223 # ^ reuse filerev (impossible)
224 # because parents are part of the hash. if that's not true, we need to
224 # because parents are part of the hash. if that's not true, we need to
225 # remove uniq and find a different way to identify fctxs.
225 # remove uniq and find a different way to identify fctxs.
226 return uniq(fctxs), fctxmap
226 return uniq(fctxs), fctxmap
227
227
228
228
229 class overlaystore(patch.filestore):
229 class overlaystore(patch.filestore):
230 """read-only, hybrid store based on a dict and ctx.
230 """read-only, hybrid store based on a dict and ctx.
231 memworkingcopy: {path: content}, overrides file contents.
231 memworkingcopy: {path: content}, overrides file contents.
232 """
232 """
233
233
234 def __init__(self, basectx, memworkingcopy):
234 def __init__(self, basectx, memworkingcopy):
235 self.basectx = basectx
235 self.basectx = basectx
236 self.memworkingcopy = memworkingcopy
236 self.memworkingcopy = memworkingcopy
237
237
238 def getfile(self, path):
238 def getfile(self, path):
239 """comply with mercurial.patch.filestore.getfile"""
239 """comply with mercurial.patch.filestore.getfile"""
240 if path not in self.basectx:
240 if path not in self.basectx:
241 return None, None, None
241 return None, None, None
242 fctx = self.basectx[path]
242 fctx = self.basectx[path]
243 if path in self.memworkingcopy:
243 if path in self.memworkingcopy:
244 content = self.memworkingcopy[path]
244 content = self.memworkingcopy[path]
245 else:
245 else:
246 content = fctx.data()
246 content = fctx.data()
247 mode = (fctx.islink(), fctx.isexec())
247 mode = (fctx.islink(), fctx.isexec())
248 copy = fctx.copysource()
248 copy = fctx.copysource()
249 return content, mode, copy
249 return content, mode, copy
250
250
251
251
252 def overlaycontext(memworkingcopy, ctx, parents=None, extra=None, desc=None):
252 def overlaycontext(memworkingcopy, ctx, parents=None, extra=None, desc=None):
253 """({path: content}, ctx, (p1node, p2node)?, {}?) -> memctx
253 """({path: content}, ctx, (p1node, p2node)?, {}?) -> memctx
254 memworkingcopy overrides file contents.
254 memworkingcopy overrides file contents.
255 """
255 """
256 # parents must contain 2 items: (node1, node2)
256 # parents must contain 2 items: (node1, node2)
257 if parents is None:
257 if parents is None:
258 parents = ctx.repo().changelog.parents(ctx.node())
258 parents = ctx.repo().changelog.parents(ctx.node())
259 if extra is None:
259 if extra is None:
260 extra = ctx.extra()
260 extra = ctx.extra()
261 if desc is None:
261 if desc is None:
262 desc = ctx.description()
262 desc = ctx.description()
263 date = ctx.date()
263 date = ctx.date()
264 user = ctx.user()
264 user = ctx.user()
265 files = set(ctx.files()).union(memworkingcopy)
265 files = set(ctx.files()).union(memworkingcopy)
266 store = overlaystore(ctx, memworkingcopy)
266 store = overlaystore(ctx, memworkingcopy)
267 return context.memctx(
267 return context.memctx(
268 repo=ctx.repo(),
268 repo=ctx.repo(),
269 parents=parents,
269 parents=parents,
270 text=desc,
270 text=desc,
271 files=files,
271 files=files,
272 filectxfn=store,
272 filectxfn=store,
273 user=user,
273 user=user,
274 date=date,
274 date=date,
275 branch=None,
275 branch=None,
276 extra=extra,
276 extra=extra,
277 )
277 )
278
278
279
279
280 class filefixupstate:
280 class filefixupstate:
281 """state needed to apply fixups to a single file
281 """state needed to apply fixups to a single file
282
282
283 internally, it keeps file contents of several revisions and a linelog.
283 internally, it keeps file contents of several revisions and a linelog.
284
284
285 the linelog uses odd revision numbers for original contents (fctxs passed
285 the linelog uses odd revision numbers for original contents (fctxs passed
286 to __init__), and even revision numbers for fixups, like:
286 to __init__), and even revision numbers for fixups, like:
287
287
288 linelog rev 1: self.fctxs[0] (from an immutable "public" changeset)
288 linelog rev 1: self.fctxs[0] (from an immutable "public" changeset)
289 linelog rev 2: fixups made to self.fctxs[0]
289 linelog rev 2: fixups made to self.fctxs[0]
290 linelog rev 3: self.fctxs[1] (a child of fctxs[0])
290 linelog rev 3: self.fctxs[1] (a child of fctxs[0])
291 linelog rev 4: fixups made to self.fctxs[1]
291 linelog rev 4: fixups made to self.fctxs[1]
292 ...
292 ...
293
293
294 a typical use is like:
294 a typical use is like:
295
295
296 1. call diffwith, to calculate self.fixups
296 1. call diffwith, to calculate self.fixups
297 2. (optionally), present self.fixups to the user, or change it
297 2. (optionally), present self.fixups to the user, or change it
298 3. call apply, to apply changes
298 3. call apply, to apply changes
299 4. read results from "finalcontents", or call getfinalcontent
299 4. read results from "finalcontents", or call getfinalcontent
300 """
300 """
301
301
302 def __init__(self, fctxs, path, ui=None, opts=None):
302 def __init__(self, fctxs, path, ui=None, opts=None):
303 """([fctx], ui or None) -> None
303 """([fctx], ui or None) -> None
304
304
305 fctxs should be linear, and sorted by topo order - oldest first.
305 fctxs should be linear, and sorted by topo order - oldest first.
306 fctxs[0] will be considered as "immutable" and will not be changed.
306 fctxs[0] will be considered as "immutable" and will not be changed.
307 """
307 """
308 self.fctxs = fctxs
308 self.fctxs = fctxs
309 self.path = path
309 self.path = path
310 self.ui = ui or nullui()
310 self.ui = ui or nullui()
311 self.opts = opts or {}
311 self.opts = opts or {}
312
312
313 # following fields are built from fctxs. they exist for perf reason
313 # following fields are built from fctxs. they exist for perf reason
314 self.contents = [f.data() for f in fctxs]
314 self.contents = [f.data() for f in fctxs]
315 self.contentlines = pycompat.maplist(mdiff.splitnewlines, self.contents)
315 self.contentlines = pycompat.maplist(mdiff.splitnewlines, self.contents)
316 self.linelog = self._buildlinelog()
316 self.linelog = self._buildlinelog()
317 if self.ui.debugflag:
317 if self.ui.debugflag:
318 assert self._checkoutlinelog() == self.contents
318 assert self._checkoutlinelog() == self.contents
319
319
320 # following fields will be filled later
320 # following fields will be filled later
321 self.chunkstats = [0, 0] # [adopted, total : int]
321 self.chunkstats = [0, 0] # [adopted, total : int]
322 self.targetlines = [] # [str]
322 self.targetlines = [] # [str]
323 self.fixups = [] # [(linelog rev, a1, a2, b1, b2)]
323 self.fixups = [] # [(linelog rev, a1, a2, b1, b2)]
324 self.finalcontents = [] # [str]
324 self.finalcontents = [] # [str]
325 self.ctxaffected = set()
325 self.ctxaffected = set()
326
326
327 def diffwith(self, targetfctx, fm=None):
327 def diffwith(self, targetfctx, fm=None):
328 """calculate fixups needed by examining the differences between
328 """calculate fixups needed by examining the differences between
329 self.fctxs[-1] and targetfctx, chunk by chunk.
329 self.fctxs[-1] and targetfctx, chunk by chunk.
330
330
331 targetfctx is the target state we move towards. we may or may not be
331 targetfctx is the target state we move towards. we may or may not be
332 able to get there because not all modified chunks can be amended into
332 able to get there because not all modified chunks can be amended into
333 a non-public fctx unambiguously.
333 a non-public fctx unambiguously.
334
334
335 call this only once, before apply().
335 call this only once, before apply().
336
336
337 update self.fixups, self.chunkstats, and self.targetlines.
337 update self.fixups, self.chunkstats, and self.targetlines.
338 """
338 """
339 a = self.contents[-1]
339 a = self.contents[-1]
340 alines = self.contentlines[-1]
340 alines = self.contentlines[-1]
341 b = targetfctx.data()
341 b = targetfctx.data()
342 blines = mdiff.splitnewlines(b)
342 blines = mdiff.splitnewlines(b)
343 self.targetlines = blines
343 self.targetlines = blines
344
344
345 self.linelog.annotate(self.linelog.maxrev)
345 self.linelog.annotate(self.linelog.maxrev)
346 annotated = self.linelog.annotateresult # [(linelog rev, linenum)]
346 annotated = self.linelog.annotateresult # [(linelog rev, linenum)]
347 assert len(annotated) == len(alines)
347 assert len(annotated) == len(alines)
348 # add a dummy end line to make insertion at the end easier
348 # add a dummy end line to make insertion at the end easier
349 if annotated:
349 if annotated:
350 dummyendline = (annotated[-1][0], annotated[-1][1] + 1)
350 dummyendline = (annotated[-1][0], annotated[-1][1] + 1)
351 annotated.append(dummyendline)
351 annotated.append(dummyendline)
352
352
353 # analyse diff blocks
353 # analyse diff blocks
354 for chunk in self._alldiffchunks(a, b, alines, blines):
354 for chunk in self._alldiffchunks(a, b, alines, blines):
355 newfixups = self._analysediffchunk(chunk, annotated)
355 newfixups = self._analysediffchunk(chunk, annotated)
356 self.chunkstats[0] += bool(newfixups) # 1 or 0
356 self.chunkstats[0] += bool(newfixups) # 1 or 0
357 self.chunkstats[1] += 1
357 self.chunkstats[1] += 1
358 self.fixups += newfixups
358 self.fixups += newfixups
359 if fm is not None:
359 if fm is not None:
360 self._showchanges(fm, alines, blines, chunk, newfixups)
360 self._showchanges(fm, alines, blines, chunk, newfixups)
361
361
362 def apply(self):
362 def apply(self):
363 """apply self.fixups. update self.linelog, self.finalcontents.
363 """apply self.fixups. update self.linelog, self.finalcontents.
364
364
365 call this only once, before getfinalcontent(), after diffwith().
365 call this only once, before getfinalcontent(), after diffwith().
366 """
366 """
367 # the following is unnecessary, as it's done by "diffwith":
367 # the following is unnecessary, as it's done by "diffwith":
368 # self.linelog.annotate(self.linelog.maxrev)
368 # self.linelog.annotate(self.linelog.maxrev)
369 for rev, a1, a2, b1, b2 in reversed(self.fixups):
369 for rev, a1, a2, b1, b2 in reversed(self.fixups):
370 blines = self.targetlines[b1:b2]
370 blines = self.targetlines[b1:b2]
371 if self.ui.debugflag:
371 if self.ui.debugflag:
372 idx = (max(rev - 1, 0)) // 2
372 idx = (max(rev - 1, 0)) // 2
373 self.ui.write(
373 self.ui.write(
374 _(b'%s: chunk %d:%d -> %d lines\n')
374 _(b'%s: chunk %d:%d -> %d lines\n')
375 % (short(self.fctxs[idx].node()), a1, a2, len(blines))
375 % (short(self.fctxs[idx].node()), a1, a2, len(blines))
376 )
376 )
377 self.linelog.replacelines(rev, a1, a2, b1, b2)
377 self.linelog.replacelines(rev, a1, a2, b1, b2)
378 if self.opts.get(b'edit_lines', False):
378 if self.opts.get(b'edit_lines', False):
379 self.finalcontents = self._checkoutlinelogwithedits()
379 self.finalcontents = self._checkoutlinelogwithedits()
380 else:
380 else:
381 self.finalcontents = self._checkoutlinelog()
381 self.finalcontents = self._checkoutlinelog()
382
382
383 def getfinalcontent(self, fctx):
383 def getfinalcontent(self, fctx):
384 """(fctx) -> str. get modified file content for a given filecontext"""
384 """(fctx) -> str. get modified file content for a given filecontext"""
385 idx = self.fctxs.index(fctx)
385 idx = self.fctxs.index(fctx)
386 return self.finalcontents[idx]
386 return self.finalcontents[idx]
387
387
388 def _analysediffchunk(self, chunk, annotated):
388 def _analysediffchunk(self, chunk, annotated):
389 """analyse a different chunk and return new fixups found
389 """analyse a different chunk and return new fixups found
390
390
391 return [] if no lines from the chunk can be safely applied.
391 return [] if no lines from the chunk can be safely applied.
392
392
393 the chunk (or lines) cannot be safely applied, if, for example:
393 the chunk (or lines) cannot be safely applied, if, for example:
394 - the modified (deleted) lines belong to a public changeset
394 - the modified (deleted) lines belong to a public changeset
395 (self.fctxs[0])
395 (self.fctxs[0])
396 - the chunk is a pure insertion and the adjacent lines (at most 2
396 - the chunk is a pure insertion and the adjacent lines (at most 2
397 lines) belong to different non-public changesets, or do not belong
397 lines) belong to different non-public changesets, or do not belong
398 to any non-public changesets.
398 to any non-public changesets.
399 - the chunk is modifying lines from different changesets.
399 - the chunk is modifying lines from different changesets.
400 in this case, if the number of lines deleted equals to the number
400 in this case, if the number of lines deleted equals to the number
401 of lines added, assume it's a simple 1:1 map (could be wrong).
401 of lines added, assume it's a simple 1:1 map (could be wrong).
402 otherwise, give up.
402 otherwise, give up.
403 - the chunk is modifying lines from a single non-public changeset,
403 - the chunk is modifying lines from a single non-public changeset,
404 but other revisions touch the area as well. i.e. the lines are
404 but other revisions touch the area as well. i.e. the lines are
405 not continuous as seen from the linelog.
405 not continuous as seen from the linelog.
406 """
406 """
407 a1, a2, b1, b2 = chunk
407 a1, a2, b1, b2 = chunk
408 # find involved indexes from annotate result
408 # find involved indexes from annotate result
409 involved = annotated[a1:a2]
409 involved = annotated[a1:a2]
410 if not involved and annotated: # a1 == a2 and a is not empty
410 if not involved and annotated: # a1 == a2 and a is not empty
411 # pure insertion, check nearby lines. ignore lines belong
411 # pure insertion, check nearby lines. ignore lines belong
412 # to the public (first) changeset (i.e. annotated[i][0] == 1)
412 # to the public (first) changeset (i.e. annotated[i][0] == 1)
413 nearbylinenums = {a2, max(0, a1 - 1)}
413 nearbylinenums = {a2, max(0, a1 - 1)}
414 involved = [
414 involved = [
415 annotated[i] for i in nearbylinenums if annotated[i][0] != 1
415 annotated[i] for i in nearbylinenums if annotated[i][0] != 1
416 ]
416 ]
417 involvedrevs = list({r for r, l in involved})
417 involvedrevs = list({r for r, l in involved})
418 newfixups = []
418 newfixups = []
419 if len(involvedrevs) == 1 and self._iscontinuous(a1, a2 - 1, True):
419 if len(involvedrevs) == 1 and self._iscontinuous(a1, a2 - 1, True):
420 # chunk belongs to a single revision
420 # chunk belongs to a single revision
421 rev = involvedrevs[0]
421 rev = involvedrevs[0]
422 if rev > 1:
422 if rev > 1:
423 fixuprev = rev + 1
423 fixuprev = rev + 1
424 newfixups.append((fixuprev, a1, a2, b1, b2))
424 newfixups.append((fixuprev, a1, a2, b1, b2))
425 elif a2 - a1 == b2 - b1 or b1 == b2:
425 elif a2 - a1 == b2 - b1 or b1 == b2:
426 # 1:1 line mapping, or chunk was deleted
426 # 1:1 line mapping, or chunk was deleted
427 for i in range(a1, a2):
427 for i in range(a1, a2):
428 rev, linenum = annotated[i]
428 rev, linenum = annotated[i]
429 if rev > 1:
429 if rev > 1:
430 if b1 == b2: # deletion, simply remove that single line
430 if b1 == b2: # deletion, simply remove that single line
431 nb1 = nb2 = 0
431 nb1 = nb2 = 0
432 else: # 1:1 line mapping, change the corresponding rev
432 else: # 1:1 line mapping, change the corresponding rev
433 nb1 = b1 + i - a1
433 nb1 = b1 + i - a1
434 nb2 = nb1 + 1
434 nb2 = nb1 + 1
435 fixuprev = rev + 1
435 fixuprev = rev + 1
436 newfixups.append((fixuprev, i, i + 1, nb1, nb2))
436 newfixups.append((fixuprev, i, i + 1, nb1, nb2))
437 return self._optimizefixups(newfixups)
437 return self._optimizefixups(newfixups)
438
438
439 @staticmethod
439 @staticmethod
440 def _alldiffchunks(a, b, alines, blines):
440 def _alldiffchunks(a, b, alines, blines):
441 """like mdiff.allblocks, but only care about differences"""
441 """like mdiff.allblocks, but only care about differences"""
442 blocks = mdiff.allblocks(a, b, lines1=alines, lines2=blines)
442 blocks = mdiff.allblocks(a, b, lines1=alines, lines2=blines)
443 for chunk, btype in blocks:
443 for chunk, btype in blocks:
444 if btype != b'!':
444 if btype != b'!':
445 continue
445 continue
446 yield chunk
446 yield chunk
447
447
448 def _buildlinelog(self):
448 def _buildlinelog(self):
449 """calculate the initial linelog based on self.content{,line}s.
449 """calculate the initial linelog based on self.content{,line}s.
450 this is similar to running a partial "annotate".
450 this is similar to running a partial "annotate".
451 """
451 """
452 llog = linelog.linelog()
452 llog = linelog.linelog()
453 a, alines = b'', []
453 a, alines = b'', []
454 for i in range(len(self.contents)):
454 for i in range(len(self.contents)):
455 b, blines = self.contents[i], self.contentlines[i]
455 b, blines = self.contents[i], self.contentlines[i]
456 llrev = i * 2 + 1
456 llrev = i * 2 + 1
457 chunks = self._alldiffchunks(a, b, alines, blines)
457 chunks = self._alldiffchunks(a, b, alines, blines)
458 for a1, a2, b1, b2 in reversed(list(chunks)):
458 for a1, a2, b1, b2 in reversed(list(chunks)):
459 llog.replacelines(llrev, a1, a2, b1, b2)
459 llog.replacelines(llrev, a1, a2, b1, b2)
460 a, alines = b, blines
460 a, alines = b, blines
461 return llog
461 return llog
462
462
463 def _checkoutlinelog(self):
463 def _checkoutlinelog(self):
464 """() -> [str]. check out file contents from linelog"""
464 """() -> [str]. check out file contents from linelog"""
465 contents = []
465 contents = []
466 for i in range(len(self.contents)):
466 for i in range(len(self.contents)):
467 rev = (i + 1) * 2
467 rev = (i + 1) * 2
468 self.linelog.annotate(rev)
468 self.linelog.annotate(rev)
469 content = b''.join(map(self._getline, self.linelog.annotateresult))
469 content = b''.join(map(self._getline, self.linelog.annotateresult))
470 contents.append(content)
470 contents.append(content)
471 return contents
471 return contents
472
472
473 def _checkoutlinelogwithedits(self):
473 def _checkoutlinelogwithedits(self):
474 """() -> [str]. prompt all lines for edit"""
474 """() -> [str]. prompt all lines for edit"""
475 alllines = self.linelog.getalllines()
475 alllines = self.linelog.getalllines()
476 # header
476 # header
477 editortext = (
477 editortext = (
478 _(
478 _(
479 b'HG: editing %s\nHG: "y" means the line to the right '
479 b'HG: editing %s\nHG: "y" means the line to the right '
480 b'exists in the changeset to the top\nHG:\n'
480 b'exists in the changeset to the top\nHG:\n'
481 )
481 )
482 % self.fctxs[-1].path()
482 % self.fctxs[-1].path()
483 )
483 )
484 # [(idx, fctx)]. hide the dummy emptyfilecontext
484 # [(idx, fctx)]. hide the dummy emptyfilecontext
485 visiblefctxs = [
485 visiblefctxs = [
486 (i, f)
486 (i, f)
487 for i, f in enumerate(self.fctxs)
487 for i, f in enumerate(self.fctxs)
488 if not isinstance(f, emptyfilecontext)
488 if not isinstance(f, emptyfilecontext)
489 ]
489 ]
490 for i, (j, f) in enumerate(visiblefctxs):
490 for i, (j, f) in enumerate(visiblefctxs):
491 editortext += _(b'HG: %s/%s %s %s\n') % (
491 editortext += _(b'HG: %s/%s %s %s\n') % (
492 b'|' * i,
492 b'|' * i,
493 b'-' * (len(visiblefctxs) - i + 1),
493 b'-' * (len(visiblefctxs) - i + 1),
494 short(f.node()),
494 short(f.node()),
495 f.description().split(b'\n', 1)[0],
495 f.description().split(b'\n', 1)[0],
496 )
496 )
497 editortext += _(b'HG: %s\n') % (b'|' * len(visiblefctxs))
497 editortext += _(b'HG: %s\n') % (b'|' * len(visiblefctxs))
498 # figure out the lifetime of a line, this is relatively inefficient,
498 # figure out the lifetime of a line, this is relatively inefficient,
499 # but probably fine
499 # but probably fine
500 lineset = defaultdict(lambda: set()) # {(llrev, linenum): {llrev}}
500 lineset = defaultdict(lambda: set()) # {(llrev, linenum): {llrev}}
501 for i, f in visiblefctxs:
501 for i, f in visiblefctxs:
502 self.linelog.annotate((i + 1) * 2)
502 self.linelog.annotate((i + 1) * 2)
503 for l in self.linelog.annotateresult:
503 for l in self.linelog.annotateresult:
504 lineset[l].add(i)
504 lineset[l].add(i)
505 # append lines
505 # append lines
506 for l in alllines:
506 for l in alllines:
507 editortext += b' %s : %s' % (
507 editortext += b' %s : %s' % (
508 b''.join(
508 b''.join(
509 [
509 [
510 (b'y' if i in lineset[l] else b' ')
510 (b'y' if i in lineset[l] else b' ')
511 for i, _f in visiblefctxs
511 for i, _f in visiblefctxs
512 ]
512 ]
513 ),
513 ),
514 self._getline(l),
514 self._getline(l),
515 )
515 )
516 # run editor
516 # run editor
517 editedtext = self.ui.edit(editortext, b'', action=b'absorb')
517 editedtext = self.ui.edit(editortext, b'', action=b'absorb')
518 if not editedtext:
518 if not editedtext:
519 raise error.InputError(_(b'empty editor text'))
519 raise error.InputError(_(b'empty editor text'))
520 # parse edited result
520 # parse edited result
521 contents = [b''] * len(self.fctxs)
521 contents = [b''] * len(self.fctxs)
522 leftpadpos = 4
522 leftpadpos = 4
523 colonpos = leftpadpos + len(visiblefctxs) + 1
523 colonpos = leftpadpos + len(visiblefctxs) + 1
524 for l in mdiff.splitnewlines(editedtext):
524 for l in mdiff.splitnewlines(editedtext):
525 if l.startswith(b'HG:'):
525 if l.startswith(b'HG:'):
526 continue
526 continue
527 if l[colonpos - 1 : colonpos + 2] != b' : ':
527 if l[colonpos - 1 : colonpos + 2] != b' : ':
528 raise error.InputError(_(b'malformed line: %s') % l)
528 raise error.InputError(_(b'malformed line: %s') % l)
529 linecontent = l[colonpos + 2 :]
529 linecontent = l[colonpos + 2 :]
530 for i, ch in enumerate(
530 for i, ch in enumerate(
531 pycompat.bytestr(l[leftpadpos : colonpos - 1])
531 pycompat.bytestr(l[leftpadpos : colonpos - 1])
532 ):
532 ):
533 if ch == b'y':
533 if ch == b'y':
534 contents[visiblefctxs[i][0]] += linecontent
534 contents[visiblefctxs[i][0]] += linecontent
535 # chunkstats is hard to calculate if anything changes, therefore
535 # chunkstats is hard to calculate if anything changes, therefore
536 # set them to just a simple value (1, 1).
536 # set them to just a simple value (1, 1).
537 if editedtext != editortext:
537 if editedtext != editortext:
538 self.chunkstats = [1, 1]
538 self.chunkstats = [1, 1]
539 return contents
539 return contents
540
540
541 def _getline(self, lineinfo):
541 def _getline(self, lineinfo):
542 """((rev, linenum)) -> str. convert rev+line number to line content"""
542 """((rev, linenum)) -> str. convert rev+line number to line content"""
543 rev, linenum = lineinfo
543 rev, linenum = lineinfo
544 if rev & 1: # odd: original line taken from fctxs
544 if rev & 1: # odd: original line taken from fctxs
545 return self.contentlines[rev // 2][linenum]
545 return self.contentlines[rev // 2][linenum]
546 else: # even: fixup line from targetfctx
546 else: # even: fixup line from targetfctx
547 return self.targetlines[linenum]
547 return self.targetlines[linenum]
548
548
549 def _iscontinuous(self, a1, a2, closedinterval=False):
549 def _iscontinuous(self, a1, a2, closedinterval=False):
550 """(a1, a2 : int) -> bool
550 """(a1, a2 : int) -> bool
551
551
552 check if these lines are continuous. i.e. no other insertions or
552 check if these lines are continuous. i.e. no other insertions or
553 deletions (from other revisions) among these lines.
553 deletions (from other revisions) among these lines.
554
554
555 closedinterval decides whether a2 should be included or not. i.e. is
555 closedinterval decides whether a2 should be included or not. i.e. is
556 it [a1, a2), or [a1, a2] ?
556 it [a1, a2), or [a1, a2] ?
557 """
557 """
558 if a1 >= a2:
558 if a1 >= a2:
559 return True
559 return True
560 llog = self.linelog
560 llog = self.linelog
561 offset1 = llog.getoffset(a1)
561 offset1 = llog.getoffset(a1)
562 offset2 = llog.getoffset(a2) + int(closedinterval)
562 offset2 = llog.getoffset(a2) + int(closedinterval)
563 linesinbetween = llog.getalllines(offset1, offset2)
563 linesinbetween = llog.getalllines(offset1, offset2)
564 return len(linesinbetween) == a2 - a1 + int(closedinterval)
564 return len(linesinbetween) == a2 - a1 + int(closedinterval)
565
565
566 def _optimizefixups(self, fixups):
566 def _optimizefixups(self, fixups):
567 """[(rev, a1, a2, b1, b2)] -> [(rev, a1, a2, b1, b2)].
567 """[(rev, a1, a2, b1, b2)] -> [(rev, a1, a2, b1, b2)].
568 merge adjacent fixups to make them less fragmented.
568 merge adjacent fixups to make them less fragmented.
569 """
569 """
570 result = []
570 result = []
571 pcurrentchunk = [[-1, -1, -1, -1, -1]]
571 pcurrentchunk = [[-1, -1, -1, -1, -1]]
572
572
573 def pushchunk():
573 def pushchunk():
574 if pcurrentchunk[0][0] != -1:
574 if pcurrentchunk[0][0] != -1:
575 result.append(tuple(pcurrentchunk[0]))
575 result.append(tuple(pcurrentchunk[0]))
576
576
577 for i, chunk in enumerate(fixups):
577 for i, chunk in enumerate(fixups):
578 rev, a1, a2, b1, b2 = chunk
578 rev, a1, a2, b1, b2 = chunk
579 lastrev = pcurrentchunk[0][0]
579 lastrev = pcurrentchunk[0][0]
580 lasta2 = pcurrentchunk[0][2]
580 lasta2 = pcurrentchunk[0][2]
581 lastb2 = pcurrentchunk[0][4]
581 lastb2 = pcurrentchunk[0][4]
582 if (
582 if (
583 a1 == lasta2
583 a1 == lasta2
584 and b1 == lastb2
584 and b1 == lastb2
585 and rev == lastrev
585 and rev == lastrev
586 and self._iscontinuous(max(a1 - 1, 0), a1)
586 and self._iscontinuous(max(a1 - 1, 0), a1)
587 ):
587 ):
588 # merge into currentchunk
588 # merge into currentchunk
589 pcurrentchunk[0][2] = a2
589 pcurrentchunk[0][2] = a2
590 pcurrentchunk[0][4] = b2
590 pcurrentchunk[0][4] = b2
591 else:
591 else:
592 pushchunk()
592 pushchunk()
593 pcurrentchunk[0] = list(chunk)
593 pcurrentchunk[0] = list(chunk)
594 pushchunk()
594 pushchunk()
595 return result
595 return result
596
596
597 def _showchanges(self, fm, alines, blines, chunk, fixups):
597 def _showchanges(self, fm, alines, blines, chunk, fixups):
598 def trim(line):
598 def trim(line):
599 if line.endswith(b'\n'):
599 if line.endswith(b'\n'):
600 line = line[:-1]
600 line = line[:-1]
601 return line
601 return line
602
602
603 # this is not optimized for perf but _showchanges only gets executed
603 # this is not optimized for perf but _showchanges only gets executed
604 # with an extra command-line flag.
604 # with an extra command-line flag.
605 a1, a2, b1, b2 = chunk
605 a1, a2, b1, b2 = chunk
606 aidxs, bidxs = [0] * (a2 - a1), [0] * (b2 - b1)
606 aidxs, bidxs = [0] * (a2 - a1), [0] * (b2 - b1)
607 for idx, fa1, fa2, fb1, fb2 in fixups:
607 for idx, fa1, fa2, fb1, fb2 in fixups:
608 for i in range(fa1, fa2):
608 for i in range(fa1, fa2):
609 aidxs[i - a1] = (max(idx, 1) - 1) // 2
609 aidxs[i - a1] = (max(idx, 1) - 1) // 2
610 for i in range(fb1, fb2):
610 for i in range(fb1, fb2):
611 bidxs[i - b1] = (max(idx, 1) - 1) // 2
611 bidxs[i - b1] = (max(idx, 1) - 1) // 2
612
612
613 fm.startitem()
613 fm.startitem()
614 fm.write(
614 fm.write(
615 b'hunk',
615 b'hunk',
616 b' %s\n',
616 b' %s\n',
617 b'@@ -%d,%d +%d,%d @@' % (a1, a2 - a1, b1, b2 - b1),
617 b'@@ -%d,%d +%d,%d @@' % (a1, a2 - a1, b1, b2 - b1),
618 label=b'diff.hunk',
618 label=b'diff.hunk',
619 )
619 )
620 fm.data(path=self.path, linetype=b'hunk')
620 fm.data(path=self.path, linetype=b'hunk')
621
621
622 def writeline(idx, diffchar, line, linetype, linelabel):
622 def writeline(idx, diffchar, line, linetype, linelabel):
623 fm.startitem()
623 fm.startitem()
624 node = b''
624 node = b''
625 if idx:
625 if idx:
626 ctx = self.fctxs[idx]
626 ctx = self.fctxs[idx]
627 fm.context(fctx=ctx)
627 fm.context(fctx=ctx)
628 node = ctx.hex()
628 node = ctx.hex()
629 self.ctxaffected.add(ctx.changectx())
629 self.ctxaffected.add(ctx.changectx())
630 fm.write(b'node', b'%-7.7s ', node, label=b'absorb.node')
630 fm.write(b'node', b'%-7.7s ', node, label=b'absorb.node')
631 fm.write(
631 fm.write(
632 b'diffchar ' + linetype,
632 b'diffchar ' + linetype,
633 b'%s%s\n',
633 b'%s%s\n',
634 diffchar,
634 diffchar,
635 line,
635 line,
636 label=linelabel,
636 label=linelabel,
637 )
637 )
638 fm.data(path=self.path, linetype=linetype)
638 fm.data(path=self.path, linetype=linetype)
639
639
640 for i in range(a1, a2):
640 for i in range(a1, a2):
641 writeline(
641 writeline(
642 aidxs[i - a1],
642 aidxs[i - a1],
643 b'-',
643 b'-',
644 trim(alines[i]),
644 trim(alines[i]),
645 b'deleted',
645 b'deleted',
646 b'diff.deleted',
646 b'diff.deleted',
647 )
647 )
648 for i in range(b1, b2):
648 for i in range(b1, b2):
649 writeline(
649 writeline(
650 bidxs[i - b1],
650 bidxs[i - b1],
651 b'+',
651 b'+',
652 trim(blines[i]),
652 trim(blines[i]),
653 b'inserted',
653 b'inserted',
654 b'diff.inserted',
654 b'diff.inserted',
655 )
655 )
656
656
657
657
658 class fixupstate:
658 class fixupstate:
659 """state needed to run absorb
659 """state needed to run absorb
660
660
661 internally, it keeps paths and filefixupstates.
661 internally, it keeps paths and filefixupstates.
662
662
663 a typical use is like filefixupstates:
663 a typical use is like filefixupstates:
664
664
665 1. call diffwith, to calculate fixups
665 1. call diffwith, to calculate fixups
666 2. (optionally), present fixups to the user, or edit fixups
666 2. (optionally), present fixups to the user, or edit fixups
667 3. call apply, to apply changes to memory
667 3. call apply, to apply changes to memory
668 4. call commit, to commit changes to hg database
668 4. call commit, to commit changes to hg database
669 """
669 """
670
670
671 def __init__(self, stack, ui=None, opts=None):
671 def __init__(self, stack, ui=None, opts=None):
672 """([ctx], ui or None) -> None
672 """([ctx], ui or None) -> None
673
673
674 stack: should be linear, and sorted by topo order - oldest first.
674 stack: should be linear, and sorted by topo order - oldest first.
675 all commits in stack are considered mutable.
675 all commits in stack are considered mutable.
676 """
676 """
677 assert stack
677 assert stack
678 self.ui = ui or nullui()
678 self.ui = ui or nullui()
679 self.opts = opts or {}
679 self.opts = opts or {}
680 self.stack = stack
680 self.stack = stack
681 self.repo = stack[-1].repo().unfiltered()
681 self.repo = stack[-1].repo().unfiltered()
682
682
683 # following fields will be filled later
683 # following fields will be filled later
684 self.paths = [] # [str]
684 self.paths = [] # [str]
685 self.status = None # ctx.status output
685 self.status = None # ctx.status output
686 self.fctxmap = {} # {path: {ctx: fctx}}
686 self.fctxmap = {} # {path: {ctx: fctx}}
687 self.fixupmap = {} # {path: filefixupstate}
687 self.fixupmap = {} # {path: filefixupstate}
688 self.replacemap = {} # {oldnode: newnode or None}
688 self.replacemap = {} # {oldnode: newnode or None}
689 self.finalnode = None # head after all fixups
689 self.finalnode = None # head after all fixups
690 self.ctxaffected = set() # ctx that will be absorbed into
690 self.ctxaffected = set() # ctx that will be absorbed into
691
691
692 def diffwith(self, targetctx, match=None, fm=None):
692 def diffwith(self, targetctx, match=None, fm=None):
693 """diff and prepare fixups. update self.fixupmap, self.paths"""
693 """diff and prepare fixups. update self.fixupmap, self.paths"""
694 # only care about modified files
694 # only care about modified files
695 self.status = self.stack[-1].status(targetctx, match)
695 self.status = self.stack[-1].status(targetctx, match)
696 self.paths = []
696 self.paths = []
697 # but if --edit-lines is used, the user may want to edit files
697 # but if --edit-lines is used, the user may want to edit files
698 # even if they are not modified
698 # even if they are not modified
699 editopt = self.opts.get(b'edit_lines')
699 editopt = self.opts.get(b'edit_lines')
700 if not self.status.modified and editopt and match:
700 if not self.status.modified and editopt and match:
701 interestingpaths = match.files()
701 interestingpaths = match.files()
702 else:
702 else:
703 interestingpaths = self.status.modified
703 interestingpaths = self.status.modified
704 # prepare the filefixupstate
704 # prepare the filefixupstate
705 seenfctxs = set()
705 seenfctxs = set()
706 # sorting is necessary to eliminate ambiguity for the "double move"
706 # sorting is necessary to eliminate ambiguity for the "double move"
707 # case: "hg cp A B; hg cp A C; hg rm A", then only "B" can affect "A".
707 # case: "hg cp A B; hg cp A C; hg rm A", then only "B" can affect "A".
708 for path in sorted(interestingpaths):
708 for path in sorted(interestingpaths):
709 self.ui.debug(b'calculating fixups for %s\n' % path)
709 self.ui.debug(b'calculating fixups for %s\n' % path)
710 targetfctx = targetctx[path]
710 targetfctx = targetctx[path]
711 fctxs, ctx2fctx = getfilestack(self.stack, path, seenfctxs)
711 fctxs, ctx2fctx = getfilestack(self.stack, path, seenfctxs)
712 # ignore symbolic links or binary, or unchanged files
712 # ignore symbolic links or binary, or unchanged files
713 if any(
713 if any(
714 f.islink() or stringutil.binary(f.data())
714 f.islink() or stringutil.binary(f.data())
715 for f in [targetfctx] + fctxs
715 for f in [targetfctx] + fctxs
716 if not isinstance(f, emptyfilecontext)
716 if not isinstance(f, emptyfilecontext)
717 ):
717 ):
718 continue
718 continue
719 if targetfctx.data() == fctxs[-1].data() and not editopt:
719 if targetfctx.data() == fctxs[-1].data() and not editopt:
720 continue
720 continue
721 seenfctxs.update(fctxs[1:])
721 seenfctxs.update(fctxs[1:])
722 self.fctxmap[path] = ctx2fctx
722 self.fctxmap[path] = ctx2fctx
723 fstate = filefixupstate(fctxs, path, ui=self.ui, opts=self.opts)
723 fstate = filefixupstate(fctxs, path, ui=self.ui, opts=self.opts)
724 if fm is not None:
724 if fm is not None:
725 fm.startitem()
725 fm.startitem()
726 fm.plain(b'showing changes for ')
726 fm.plain(b'showing changes for ')
727 fm.write(b'path', b'%s\n', path, label=b'absorb.path')
727 fm.write(b'path', b'%s\n', path, label=b'absorb.path')
728 fm.data(linetype=b'path')
728 fm.data(linetype=b'path')
729 fstate.diffwith(targetfctx, fm)
729 fstate.diffwith(targetfctx, fm)
730 self.fixupmap[path] = fstate
730 self.fixupmap[path] = fstate
731 self.paths.append(path)
731 self.paths.append(path)
732 self.ctxaffected.update(fstate.ctxaffected)
732 self.ctxaffected.update(fstate.ctxaffected)
733
733
734 def apply(self):
734 def apply(self):
735 """apply fixups to individual filefixupstates"""
735 """apply fixups to individual filefixupstates"""
736 for path, state in self.fixupmap.items():
736 for path, state in self.fixupmap.items():
737 if self.ui.debugflag:
737 if self.ui.debugflag:
738 self.ui.write(_(b'applying fixups to %s\n') % path)
738 self.ui.write(_(b'applying fixups to %s\n') % path)
739 state.apply()
739 state.apply()
740
740
741 @property
741 @property
742 def chunkstats(self):
742 def chunkstats(self):
743 """-> {path: chunkstats}. collect chunkstats from filefixupstates"""
743 """-> {path: chunkstats}. collect chunkstats from filefixupstates"""
744 return {path: state.chunkstats for path, state in self.fixupmap.items()}
744 return {path: state.chunkstats for path, state in self.fixupmap.items()}
745
745
746 def commit(self):
746 def commit(self):
747 """commit changes. update self.finalnode, self.replacemap"""
747 """commit changes. update self.finalnode, self.replacemap"""
748 with self.repo.transaction(b'absorb') as tr:
748 with self.repo.transaction(b'absorb') as tr:
749 self._commitstack()
749 self._commitstack()
750 self._movebookmarks(tr)
750 self._movebookmarks(tr)
751 if self.repo[b'.'].node() in self.replacemap:
751 if self.repo[b'.'].node() in self.replacemap:
752 self._moveworkingdirectoryparent()
752 self._moveworkingdirectoryparent()
753 self._cleanupoldcommits()
753 self._cleanupoldcommits()
754 return self.finalnode
754 return self.finalnode
755
755
756 def printchunkstats(self):
756 def printchunkstats(self):
757 """print things like '1 of 2 chunk(s) applied'"""
757 """print things like '1 of 2 chunk(s) applied'"""
758 ui = self.ui
758 ui = self.ui
759 chunkstats = self.chunkstats
759 chunkstats = self.chunkstats
760 if ui.verbose:
760 if ui.verbose:
761 # chunkstats for each file
761 # chunkstats for each file
762 for path, stat in chunkstats.items():
762 for path, stat in chunkstats.items():
763 if stat[0]:
763 if stat[0]:
764 ui.write(
764 ui.write(
765 _(b'%s: %d of %d chunk(s) applied\n')
765 _(b'%s: %d of %d chunk(s) applied\n')
766 % (path, stat[0], stat[1])
766 % (path, stat[0], stat[1])
767 )
767 )
768 elif not ui.quiet:
768 elif not ui.quiet:
769 # a summary for all files
769 # a summary for all files
770 stats = chunkstats.values()
770 stats = chunkstats.values()
771 applied, total = (sum(s[i] for s in stats) for i in (0, 1))
771 applied, total = (sum(s[i] for s in stats) for i in (0, 1))
772 ui.write(_(b'%d of %d chunk(s) applied\n') % (applied, total))
772 ui.write(_(b'%d of %d chunk(s) applied\n') % (applied, total))
773
773
774 def _commitstack(self):
774 def _commitstack(self):
775 """make new commits. update self.finalnode, self.replacemap.
775 """make new commits. update self.finalnode, self.replacemap.
776 it is splitted from "commit" to avoid too much indentation.
776 it is splitted from "commit" to avoid too much indentation.
777 """
777 """
778 # last node (20-char) committed by us
778 # last node (20-char) committed by us
779 lastcommitted = None
779 lastcommitted = None
780 # p1 which overrides the parent of the next commit, "None" means use
780 # p1 which overrides the parent of the next commit, "None" means use
781 # the original parent unchanged
781 # the original parent unchanged
782 nextp1 = None
782 nextp1 = None
783 for ctx in self.stack:
783 for ctx in self.stack:
784 memworkingcopy = self._getnewfilecontents(ctx)
784 memworkingcopy = self._getnewfilecontents(ctx)
785 if not memworkingcopy and not lastcommitted:
785 if not memworkingcopy and not lastcommitted:
786 # nothing changed, nothing commited
786 # nothing changed, nothing commited
787 nextp1 = ctx
787 nextp1 = ctx
788 continue
788 continue
789 willbecomenoop = ctx.files() and self._willbecomenoop(
789 willbecomenoop = ctx.files() and self._willbecomenoop(
790 memworkingcopy, ctx, nextp1
790 memworkingcopy, ctx, nextp1
791 )
791 )
792 if self.skip_empty_successor and willbecomenoop:
792 if self.skip_empty_successor and willbecomenoop:
793 # changeset is no longer necessary
793 # changeset is no longer necessary
794 self.replacemap[ctx.node()] = None
794 self.replacemap[ctx.node()] = None
795 msg = _(b'became empty and was dropped')
795 msg = _(b'became empty and was dropped')
796 else:
796 else:
797 # changeset needs re-commit
797 # changeset needs re-commit
798 nodestr = self._commitsingle(memworkingcopy, ctx, p1=nextp1)
798 nodestr = self._commitsingle(memworkingcopy, ctx, p1=nextp1)
799 lastcommitted = self.repo[nodestr]
799 lastcommitted = self.repo[nodestr]
800 nextp1 = lastcommitted
800 nextp1 = lastcommitted
801 self.replacemap[ctx.node()] = lastcommitted.node()
801 self.replacemap[ctx.node()] = lastcommitted.node()
802 if memworkingcopy:
802 if memworkingcopy:
803 if willbecomenoop:
803 if willbecomenoop:
804 msg = _(b'%d file(s) changed, became empty as %s')
804 msg = _(b'%d file(s) changed, became empty as %s')
805 else:
805 else:
806 msg = _(b'%d file(s) changed, became %s')
806 msg = _(b'%d file(s) changed, became %s')
807 msg = msg % (
807 msg = msg % (
808 len(memworkingcopy),
808 len(memworkingcopy),
809 self._ctx2str(lastcommitted),
809 self._ctx2str(lastcommitted),
810 )
810 )
811 else:
811 else:
812 msg = _(b'became %s') % self._ctx2str(lastcommitted)
812 msg = _(b'became %s') % self._ctx2str(lastcommitted)
813 if self.ui.verbose and msg:
813 if self.ui.verbose and msg:
814 self.ui.write(_(b'%s: %s\n') % (self._ctx2str(ctx), msg))
814 self.ui.write(_(b'%s: %s\n') % (self._ctx2str(ctx), msg))
815 self.finalnode = lastcommitted and lastcommitted.node()
815 self.finalnode = lastcommitted and lastcommitted.node()
816
816
817 def _ctx2str(self, ctx):
817 def _ctx2str(self, ctx):
818 if self.ui.debugflag:
818 if self.ui.debugflag:
819 return b'%d:%s' % (ctx.rev(), ctx.hex())
819 return b'%d:%s' % (ctx.rev(), ctx.hex())
820 else:
820 else:
821 return b'%d:%s' % (ctx.rev(), short(ctx.node()))
821 return b'%d:%s' % (ctx.rev(), short(ctx.node()))
822
822
823 def _getnewfilecontents(self, ctx):
823 def _getnewfilecontents(self, ctx):
824 """(ctx) -> {path: str}
824 """(ctx) -> {path: str}
825
825
826 fetch file contents from filefixupstates.
826 fetch file contents from filefixupstates.
827 return the working copy overrides - files different from ctx.
827 return the working copy overrides - files different from ctx.
828 """
828 """
829 result = {}
829 result = {}
830 for path in self.paths:
830 for path in self.paths:
831 ctx2fctx = self.fctxmap[path] # {ctx: fctx}
831 ctx2fctx = self.fctxmap[path] # {ctx: fctx}
832 if ctx not in ctx2fctx:
832 if ctx not in ctx2fctx:
833 continue
833 continue
834 fctx = ctx2fctx[ctx]
834 fctx = ctx2fctx[ctx]
835 content = fctx.data()
835 content = fctx.data()
836 newcontent = self.fixupmap[path].getfinalcontent(fctx)
836 newcontent = self.fixupmap[path].getfinalcontent(fctx)
837 if content != newcontent:
837 if content != newcontent:
838 result[fctx.path()] = newcontent
838 result[fctx.path()] = newcontent
839 return result
839 return result
840
840
841 def _movebookmarks(self, tr):
841 def _movebookmarks(self, tr):
842 repo = self.repo
842 repo = self.repo
843 needupdate = [
843 needupdate = [
844 (name, self.replacemap[hsh])
844 (name, self.replacemap[hsh])
845 for name, hsh in repo._bookmarks.items()
845 for name, hsh in repo._bookmarks.items()
846 if hsh in self.replacemap
846 if hsh in self.replacemap
847 ]
847 ]
848 changes = []
848 changes = []
849 for name, hsh in needupdate:
849 for name, hsh in needupdate:
850 if hsh:
850 if hsh:
851 changes.append((name, hsh))
851 changes.append((name, hsh))
852 if self.ui.verbose:
852 if self.ui.verbose:
853 self.ui.write(
853 self.ui.write(
854 _(b'moving bookmark %s to %s\n') % (name, hex(hsh))
854 _(b'moving bookmark %s to %s\n') % (name, hex(hsh))
855 )
855 )
856 else:
856 else:
857 changes.append((name, None))
857 changes.append((name, None))
858 if self.ui.verbose:
858 if self.ui.verbose:
859 self.ui.write(_(b'deleting bookmark %s\n') % name)
859 self.ui.write(_(b'deleting bookmark %s\n') % name)
860 repo._bookmarks.applychanges(repo, tr, changes)
860 repo._bookmarks.applychanges(repo, tr, changes)
861
861
862 def _moveworkingdirectoryparent(self):
862 def _moveworkingdirectoryparent(self):
863 if not self.finalnode:
863 if not self.finalnode:
864 # Find the latest not-{obsoleted,stripped} parent.
864 # Find the latest not-{obsoleted,stripped} parent.
865 revs = self.repo.revs(b'max(::. - %ln)', self.replacemap.keys())
865 revs = self.repo.revs(b'max(::. - %ln)', self.replacemap.keys())
866 ctx = self.repo[revs.first()]
866 ctx = self.repo[revs.first()]
867 self.finalnode = ctx.node()
867 self.finalnode = ctx.node()
868 else:
868 else:
869 ctx = self.repo[self.finalnode]
869 ctx = self.repo[self.finalnode]
870
870
871 dirstate = self.repo.dirstate
871 dirstate = self.repo.dirstate
872 # dirstate.rebuild invalidates fsmonitorstate, causing "hg status" to
872 # dirstate.rebuild invalidates fsmonitorstate, causing "hg status" to
873 # be slow. in absorb's case, no need to invalidate fsmonitorstate.
873 # be slow. in absorb's case, no need to invalidate fsmonitorstate.
874 noop = lambda: 0
874 noop = lambda: 0
875 restore = noop
875 restore = noop
876 if util.safehasattr(dirstate, '_fsmonitorstate'):
876 if util.safehasattr(dirstate, '_fsmonitorstate'):
877 bak = dirstate._fsmonitorstate.invalidate
877 bak = dirstate._fsmonitorstate.invalidate
878
878
879 def restore():
879 def restore():
880 dirstate._fsmonitorstate.invalidate = bak
880 dirstate._fsmonitorstate.invalidate = bak
881
881
882 dirstate._fsmonitorstate.invalidate = noop
882 dirstate._fsmonitorstate.invalidate = noop
883 try:
883 try:
884 with dirstate.parentchange():
884 with dirstate.changing_parents(self.repo):
885 dirstate.rebuild(ctx.node(), ctx.manifest(), self.paths)
885 dirstate.rebuild(ctx.node(), ctx.manifest(), self.paths)
886 finally:
886 finally:
887 restore()
887 restore()
888
888
889 @staticmethod
889 @staticmethod
890 def _willbecomenoop(memworkingcopy, ctx, pctx=None):
890 def _willbecomenoop(memworkingcopy, ctx, pctx=None):
891 """({path: content}, ctx, ctx) -> bool. test if a commit will be noop
891 """({path: content}, ctx, ctx) -> bool. test if a commit will be noop
892
892
893 if it will become an empty commit (does not change anything, after the
893 if it will become an empty commit (does not change anything, after the
894 memworkingcopy overrides), return True. otherwise return False.
894 memworkingcopy overrides), return True. otherwise return False.
895 """
895 """
896 if not pctx:
896 if not pctx:
897 parents = ctx.parents()
897 parents = ctx.parents()
898 if len(parents) != 1:
898 if len(parents) != 1:
899 return False
899 return False
900 pctx = parents[0]
900 pctx = parents[0]
901 if ctx.branch() != pctx.branch():
901 if ctx.branch() != pctx.branch():
902 return False
902 return False
903 if ctx.extra().get(b'close'):
903 if ctx.extra().get(b'close'):
904 return False
904 return False
905 # ctx changes more files (not a subset of memworkingcopy)
905 # ctx changes more files (not a subset of memworkingcopy)
906 if not set(ctx.files()).issubset(set(memworkingcopy)):
906 if not set(ctx.files()).issubset(set(memworkingcopy)):
907 return False
907 return False
908 for path, content in memworkingcopy.items():
908 for path, content in memworkingcopy.items():
909 if path not in pctx or path not in ctx:
909 if path not in pctx or path not in ctx:
910 return False
910 return False
911 fctx = ctx[path]
911 fctx = ctx[path]
912 pfctx = pctx[path]
912 pfctx = pctx[path]
913 if pfctx.flags() != fctx.flags():
913 if pfctx.flags() != fctx.flags():
914 return False
914 return False
915 if pfctx.data() != content:
915 if pfctx.data() != content:
916 return False
916 return False
917 return True
917 return True
918
918
919 def _commitsingle(self, memworkingcopy, ctx, p1=None):
919 def _commitsingle(self, memworkingcopy, ctx, p1=None):
920 """(ctx, {path: content}, node) -> node. make a single commit
920 """(ctx, {path: content}, node) -> node. make a single commit
921
921
922 the commit is a clone from ctx, with a (optionally) different p1, and
922 the commit is a clone from ctx, with a (optionally) different p1, and
923 different file contents replaced by memworkingcopy.
923 different file contents replaced by memworkingcopy.
924 """
924 """
925 parents = p1 and (p1, self.repo.nullid)
925 parents = p1 and (p1, self.repo.nullid)
926 extra = ctx.extra()
926 extra = ctx.extra()
927 if self._useobsolete and self.ui.configbool(b'absorb', b'add-noise'):
927 if self._useobsolete and self.ui.configbool(b'absorb', b'add-noise'):
928 extra[b'absorb_source'] = ctx.hex()
928 extra[b'absorb_source'] = ctx.hex()
929
929
930 desc = rewriteutil.update_hash_refs(
930 desc = rewriteutil.update_hash_refs(
931 ctx.repo(),
931 ctx.repo(),
932 ctx.description(),
932 ctx.description(),
933 {
933 {
934 oldnode: [newnode]
934 oldnode: [newnode]
935 for oldnode, newnode in self.replacemap.items()
935 for oldnode, newnode in self.replacemap.items()
936 },
936 },
937 )
937 )
938 mctx = overlaycontext(
938 mctx = overlaycontext(
939 memworkingcopy, ctx, parents, extra=extra, desc=desc
939 memworkingcopy, ctx, parents, extra=extra, desc=desc
940 )
940 )
941 return mctx.commit()
941 return mctx.commit()
942
942
943 @util.propertycache
943 @util.propertycache
944 def _useobsolete(self):
944 def _useobsolete(self):
945 """() -> bool"""
945 """() -> bool"""
946 return obsolete.isenabled(self.repo, obsolete.createmarkersopt)
946 return obsolete.isenabled(self.repo, obsolete.createmarkersopt)
947
947
948 def _cleanupoldcommits(self):
948 def _cleanupoldcommits(self):
949 replacements = {
949 replacements = {
950 k: ([v] if v is not None else [])
950 k: ([v] if v is not None else [])
951 for k, v in self.replacemap.items()
951 for k, v in self.replacemap.items()
952 }
952 }
953 if replacements:
953 if replacements:
954 scmutil.cleanupnodes(
954 scmutil.cleanupnodes(
955 self.repo, replacements, operation=b'absorb', fixphase=True
955 self.repo, replacements, operation=b'absorb', fixphase=True
956 )
956 )
957
957
958 @util.propertycache
958 @util.propertycache
959 def skip_empty_successor(self):
959 def skip_empty_successor(self):
960 return rewriteutil.skip_empty_successor(self.ui, b'absorb')
960 return rewriteutil.skip_empty_successor(self.ui, b'absorb')
961
961
962
962
963 def _parsechunk(hunk):
963 def _parsechunk(hunk):
964 """(crecord.uihunk or patch.recordhunk) -> (path, (a1, a2, [bline]))"""
964 """(crecord.uihunk or patch.recordhunk) -> (path, (a1, a2, [bline]))"""
965 if type(hunk) not in (crecord.uihunk, patch.recordhunk):
965 if type(hunk) not in (crecord.uihunk, patch.recordhunk):
966 return None, None
966 return None, None
967 path = hunk.header.filename()
967 path = hunk.header.filename()
968 a1 = hunk.fromline + len(hunk.before) - 1
968 a1 = hunk.fromline + len(hunk.before) - 1
969 # remove before and after context
969 # remove before and after context
970 hunk.before = hunk.after = []
970 hunk.before = hunk.after = []
971 buf = util.stringio()
971 buf = util.stringio()
972 hunk.write(buf)
972 hunk.write(buf)
973 patchlines = mdiff.splitnewlines(buf.getvalue())
973 patchlines = mdiff.splitnewlines(buf.getvalue())
974 # hunk.prettystr() will update hunk.removed
974 # hunk.prettystr() will update hunk.removed
975 a2 = a1 + hunk.removed
975 a2 = a1 + hunk.removed
976 blines = [l[1:] for l in patchlines[1:] if not l.startswith(b'-')]
976 blines = [l[1:] for l in patchlines[1:] if not l.startswith(b'-')]
977 return path, (a1, a2, blines)
977 return path, (a1, a2, blines)
978
978
979
979
980 def overlaydiffcontext(ctx, chunks):
980 def overlaydiffcontext(ctx, chunks):
981 """(ctx, [crecord.uihunk]) -> memctx
981 """(ctx, [crecord.uihunk]) -> memctx
982
982
983 return a memctx with some [1] patches (chunks) applied to ctx.
983 return a memctx with some [1] patches (chunks) applied to ctx.
984 [1]: modifications are handled. renames, mode changes, etc. are ignored.
984 [1]: modifications are handled. renames, mode changes, etc. are ignored.
985 """
985 """
986 # sadly the applying-patch logic is hardly reusable, and messy:
986 # sadly the applying-patch logic is hardly reusable, and messy:
987 # 1. the core logic "_applydiff" is too heavy - it writes .rej files, it
987 # 1. the core logic "_applydiff" is too heavy - it writes .rej files, it
988 # needs a file stream of a patch and will re-parse it, while we have
988 # needs a file stream of a patch and will re-parse it, while we have
989 # structured hunk objects at hand.
989 # structured hunk objects at hand.
990 # 2. a lot of different implementations about "chunk" (patch.hunk,
990 # 2. a lot of different implementations about "chunk" (patch.hunk,
991 # patch.recordhunk, crecord.uihunk)
991 # patch.recordhunk, crecord.uihunk)
992 # as we only care about applying changes to modified files, no mode
992 # as we only care about applying changes to modified files, no mode
993 # change, no binary diff, and no renames, it's probably okay to
993 # change, no binary diff, and no renames, it's probably okay to
994 # re-invent the logic using much simpler code here.
994 # re-invent the logic using much simpler code here.
995 memworkingcopy = {} # {path: content}
995 memworkingcopy = {} # {path: content}
996 patchmap = defaultdict(lambda: []) # {path: [(a1, a2, [bline])]}
996 patchmap = defaultdict(lambda: []) # {path: [(a1, a2, [bline])]}
997 for path, info in map(_parsechunk, chunks):
997 for path, info in map(_parsechunk, chunks):
998 if not path or not info:
998 if not path or not info:
999 continue
999 continue
1000 patchmap[path].append(info)
1000 patchmap[path].append(info)
1001 for path, patches in patchmap.items():
1001 for path, patches in patchmap.items():
1002 if path not in ctx or not patches:
1002 if path not in ctx or not patches:
1003 continue
1003 continue
1004 patches.sort(reverse=True)
1004 patches.sort(reverse=True)
1005 lines = mdiff.splitnewlines(ctx[path].data())
1005 lines = mdiff.splitnewlines(ctx[path].data())
1006 for a1, a2, blines in patches:
1006 for a1, a2, blines in patches:
1007 lines[a1:a2] = blines
1007 lines[a1:a2] = blines
1008 memworkingcopy[path] = b''.join(lines)
1008 memworkingcopy[path] = b''.join(lines)
1009 return overlaycontext(memworkingcopy, ctx)
1009 return overlaycontext(memworkingcopy, ctx)
1010
1010
1011
1011
1012 def absorb(ui, repo, stack=None, targetctx=None, pats=None, opts=None):
1012 def absorb(ui, repo, stack=None, targetctx=None, pats=None, opts=None):
1013 """pick fixup chunks from targetctx, apply them to stack.
1013 """pick fixup chunks from targetctx, apply them to stack.
1014
1014
1015 if targetctx is None, the working copy context will be used.
1015 if targetctx is None, the working copy context will be used.
1016 if stack is None, the current draft stack will be used.
1016 if stack is None, the current draft stack will be used.
1017 return fixupstate.
1017 return fixupstate.
1018 """
1018 """
1019 if stack is None:
1019 if stack is None:
1020 limit = ui.configint(b'absorb', b'max-stack-size')
1020 limit = ui.configint(b'absorb', b'max-stack-size')
1021 headctx = repo[b'.']
1021 headctx = repo[b'.']
1022 if len(headctx.parents()) > 1:
1022 if len(headctx.parents()) > 1:
1023 raise error.InputError(_(b'cannot absorb into a merge'))
1023 raise error.InputError(_(b'cannot absorb into a merge'))
1024 stack = getdraftstack(headctx, limit)
1024 stack = getdraftstack(headctx, limit)
1025 if limit and len(stack) >= limit:
1025 if limit and len(stack) >= limit:
1026 ui.warn(
1026 ui.warn(
1027 _(
1027 _(
1028 b'absorb: only the recent %d changesets will '
1028 b'absorb: only the recent %d changesets will '
1029 b'be analysed\n'
1029 b'be analysed\n'
1030 )
1030 )
1031 % limit
1031 % limit
1032 )
1032 )
1033 if not stack:
1033 if not stack:
1034 raise error.InputError(_(b'no mutable changeset to change'))
1034 raise error.InputError(_(b'no mutable changeset to change'))
1035 if targetctx is None: # default to working copy
1035 if targetctx is None: # default to working copy
1036 targetctx = repo[None]
1036 targetctx = repo[None]
1037 if pats is None:
1037 if pats is None:
1038 pats = ()
1038 pats = ()
1039 if opts is None:
1039 if opts is None:
1040 opts = {}
1040 opts = {}
1041 state = fixupstate(stack, ui=ui, opts=opts)
1041 state = fixupstate(stack, ui=ui, opts=opts)
1042 matcher = scmutil.match(targetctx, pats, opts)
1042 matcher = scmutil.match(targetctx, pats, opts)
1043 if opts.get(b'interactive'):
1043 if opts.get(b'interactive'):
1044 diff = patch.diff(repo, stack[-1].node(), targetctx.node(), matcher)
1044 diff = patch.diff(repo, stack[-1].node(), targetctx.node(), matcher)
1045 origchunks = patch.parsepatch(diff)
1045 origchunks = patch.parsepatch(diff)
1046 chunks = cmdutil.recordfilter(ui, origchunks, matcher)[0]
1046 chunks = cmdutil.recordfilter(ui, origchunks, matcher)[0]
1047 targetctx = overlaydiffcontext(stack[-1], chunks)
1047 targetctx = overlaydiffcontext(stack[-1], chunks)
1048 if opts.get(b'edit_lines'):
1048 if opts.get(b'edit_lines'):
1049 # If we're going to open the editor, don't ask the user to confirm
1049 # If we're going to open the editor, don't ask the user to confirm
1050 # first
1050 # first
1051 opts[b'apply_changes'] = True
1051 opts[b'apply_changes'] = True
1052 fm = None
1052 fm = None
1053 if opts.get(b'print_changes') or not opts.get(b'apply_changes'):
1053 if opts.get(b'print_changes') or not opts.get(b'apply_changes'):
1054 fm = ui.formatter(b'absorb', opts)
1054 fm = ui.formatter(b'absorb', opts)
1055 state.diffwith(targetctx, matcher, fm)
1055 state.diffwith(targetctx, matcher, fm)
1056 if fm is not None:
1056 if fm is not None:
1057 fm.startitem()
1057 fm.startitem()
1058 fm.write(
1058 fm.write(
1059 b"count", b"\n%d changesets affected\n", len(state.ctxaffected)
1059 b"count", b"\n%d changesets affected\n", len(state.ctxaffected)
1060 )
1060 )
1061 fm.data(linetype=b'summary')
1061 fm.data(linetype=b'summary')
1062 for ctx in reversed(stack):
1062 for ctx in reversed(stack):
1063 if ctx not in state.ctxaffected:
1063 if ctx not in state.ctxaffected:
1064 continue
1064 continue
1065 fm.startitem()
1065 fm.startitem()
1066 fm.context(ctx=ctx)
1066 fm.context(ctx=ctx)
1067 fm.data(linetype=b'changeset')
1067 fm.data(linetype=b'changeset')
1068 fm.write(b'node', b'%-7.7s ', ctx.hex(), label=b'absorb.node')
1068 fm.write(b'node', b'%-7.7s ', ctx.hex(), label=b'absorb.node')
1069 descfirstline = stringutil.firstline(ctx.description())
1069 descfirstline = stringutil.firstline(ctx.description())
1070 fm.write(
1070 fm.write(
1071 b'descfirstline',
1071 b'descfirstline',
1072 b'%s\n',
1072 b'%s\n',
1073 descfirstline,
1073 descfirstline,
1074 label=b'absorb.description',
1074 label=b'absorb.description',
1075 )
1075 )
1076 fm.end()
1076 fm.end()
1077 if not opts.get(b'dry_run'):
1077 if not opts.get(b'dry_run'):
1078 if (
1078 if (
1079 not opts.get(b'apply_changes')
1079 not opts.get(b'apply_changes')
1080 and state.ctxaffected
1080 and state.ctxaffected
1081 and ui.promptchoice(
1081 and ui.promptchoice(
1082 b"apply changes (y/N)? $$ &Yes $$ &No", default=1
1082 b"apply changes (y/N)? $$ &Yes $$ &No", default=1
1083 )
1083 )
1084 ):
1084 ):
1085 raise error.CanceledError(_(b'absorb cancelled\n'))
1085 raise error.CanceledError(_(b'absorb cancelled\n'))
1086
1086
1087 state.apply()
1087 state.apply()
1088 if state.commit():
1088 if state.commit():
1089 state.printchunkstats()
1089 state.printchunkstats()
1090 elif not ui.quiet:
1090 elif not ui.quiet:
1091 ui.write(_(b'nothing applied\n'))
1091 ui.write(_(b'nothing applied\n'))
1092 return state
1092 return state
1093
1093
1094
1094
1095 @command(
1095 @command(
1096 b'absorb',
1096 b'absorb',
1097 [
1097 [
1098 (
1098 (
1099 b'a',
1099 b'a',
1100 b'apply-changes',
1100 b'apply-changes',
1101 None,
1101 None,
1102 _(b'apply changes without prompting for confirmation'),
1102 _(b'apply changes without prompting for confirmation'),
1103 ),
1103 ),
1104 (
1104 (
1105 b'p',
1105 b'p',
1106 b'print-changes',
1106 b'print-changes',
1107 None,
1107 None,
1108 _(b'always print which changesets are modified by which changes'),
1108 _(b'always print which changesets are modified by which changes'),
1109 ),
1109 ),
1110 (
1110 (
1111 b'i',
1111 b'i',
1112 b'interactive',
1112 b'interactive',
1113 None,
1113 None,
1114 _(b'interactively select which chunks to apply'),
1114 _(b'interactively select which chunks to apply'),
1115 ),
1115 ),
1116 (
1116 (
1117 b'e',
1117 b'e',
1118 b'edit-lines',
1118 b'edit-lines',
1119 None,
1119 None,
1120 _(
1120 _(
1121 b'edit what lines belong to which changesets before commit '
1121 b'edit what lines belong to which changesets before commit '
1122 b'(EXPERIMENTAL)'
1122 b'(EXPERIMENTAL)'
1123 ),
1123 ),
1124 ),
1124 ),
1125 ]
1125 ]
1126 + commands.dryrunopts
1126 + commands.dryrunopts
1127 + commands.templateopts
1127 + commands.templateopts
1128 + commands.walkopts,
1128 + commands.walkopts,
1129 _(b'hg absorb [OPTION] [FILE]...'),
1129 _(b'hg absorb [OPTION] [FILE]...'),
1130 helpcategory=command.CATEGORY_COMMITTING,
1130 helpcategory=command.CATEGORY_COMMITTING,
1131 helpbasic=True,
1131 helpbasic=True,
1132 )
1132 )
1133 def absorbcmd(ui, repo, *pats, **opts):
1133 def absorbcmd(ui, repo, *pats, **opts):
1134 """incorporate corrections into the stack of draft changesets
1134 """incorporate corrections into the stack of draft changesets
1135
1135
1136 absorb analyzes each change in your working directory and attempts to
1136 absorb analyzes each change in your working directory and attempts to
1137 amend the changed lines into the changesets in your stack that first
1137 amend the changed lines into the changesets in your stack that first
1138 introduced those lines.
1138 introduced those lines.
1139
1139
1140 If absorb cannot find an unambiguous changeset to amend for a change,
1140 If absorb cannot find an unambiguous changeset to amend for a change,
1141 that change will be left in the working directory, untouched. They can be
1141 that change will be left in the working directory, untouched. They can be
1142 observed by :hg:`status` or :hg:`diff` afterwards. In other words,
1142 observed by :hg:`status` or :hg:`diff` afterwards. In other words,
1143 absorb does not write to the working directory.
1143 absorb does not write to the working directory.
1144
1144
1145 Changesets outside the revset `::. and not public() and not merge()` will
1145 Changesets outside the revset `::. and not public() and not merge()` will
1146 not be changed.
1146 not be changed.
1147
1147
1148 Changesets that become empty after applying the changes will be deleted.
1148 Changesets that become empty after applying the changes will be deleted.
1149
1149
1150 By default, absorb will show what it plans to do and prompt for
1150 By default, absorb will show what it plans to do and prompt for
1151 confirmation. If you are confident that the changes will be absorbed
1151 confirmation. If you are confident that the changes will be absorbed
1152 to the correct place, run :hg:`absorb -a` to apply the changes
1152 to the correct place, run :hg:`absorb -a` to apply the changes
1153 immediately.
1153 immediately.
1154
1154
1155 Returns 0 on success, 1 if all chunks were ignored and nothing amended.
1155 Returns 0 on success, 1 if all chunks were ignored and nothing amended.
1156 """
1156 """
1157 opts = pycompat.byteskwargs(opts)
1157 opts = pycompat.byteskwargs(opts)
1158
1158
1159 with repo.wlock(), repo.lock():
1159 with repo.wlock(), repo.lock():
1160 if not opts[b'dry_run']:
1160 if not opts[b'dry_run']:
1161 cmdutil.checkunfinished(repo)
1161 cmdutil.checkunfinished(repo)
1162
1162
1163 state = absorb(ui, repo, pats=pats, opts=opts)
1163 state = absorb(ui, repo, pats=pats, opts=opts)
1164 if sum(s[0] for s in state.chunkstats.values()) == 0:
1164 if sum(s[0] for s in state.chunkstats.values()) == 0:
1165 return 1
1165 return 1
@@ -1,73 +1,75
1 # amend.py - provide the amend command
1 # amend.py - provide the amend command
2 #
2 #
3 # Copyright 2017 Facebook, Inc.
3 # Copyright 2017 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """provide the amend command (EXPERIMENTAL)
7 """provide the amend command (EXPERIMENTAL)
8
8
9 This extension provides an ``amend`` command that is similar to
9 This extension provides an ``amend`` command that is similar to
10 ``commit --amend`` but does not prompt an editor.
10 ``commit --amend`` but does not prompt an editor.
11 """
11 """
12
12
13
13
14 from mercurial.i18n import _
14 from mercurial.i18n import _
15 from mercurial import (
15 from mercurial import (
16 cmdutil,
16 cmdutil,
17 commands,
17 commands,
18 registrar,
18 registrar,
19 )
19 )
20
20
21 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
21 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
22 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
22 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
23 # be specifying the version(s) of Mercurial they are tested with, or
23 # be specifying the version(s) of Mercurial they are tested with, or
24 # leave the attribute unspecified.
24 # leave the attribute unspecified.
25 testedwith = b'ships-with-hg-core'
25 testedwith = b'ships-with-hg-core'
26
26
27 cmdtable = {}
27 cmdtable = {}
28 command = registrar.command(cmdtable)
28 command = registrar.command(cmdtable)
29
29
30
30
31 @command(
31 @command(
32 b'amend',
32 b'amend',
33 [
33 [
34 (
34 (
35 b'A',
35 b'A',
36 b'addremove',
36 b'addremove',
37 None,
37 None,
38 _(b'mark new/missing files as added/removed before committing'),
38 _(b'mark new/missing files as added/removed before committing'),
39 ),
39 ),
40 (b'e', b'edit', None, _(b'invoke editor on commit messages')),
40 (b'e', b'edit', None, _(b'invoke editor on commit messages')),
41 (b'i', b'interactive', None, _(b'use interactive mode')),
41 (b'i', b'interactive', None, _(b'use interactive mode')),
42 (
42 (
43 b'',
43 b'',
44 b'close-branch',
44 b'close-branch',
45 None,
45 None,
46 _(b'mark a branch as closed, hiding it from the branch list'),
46 _(b'mark a branch as closed, hiding it from the branch list'),
47 ),
47 ),
48 (b's', b'secret', None, _(b'use the secret phase for committing')),
48 (b's', b'secret', None, _(b'use the secret phase for committing')),
49 (b'', b'draft', None, _(b'use the draft phase for committing')),
49 (b'n', b'note', b'', _(b'store a note on the amend')),
50 (b'n', b'note', b'', _(b'store a note on the amend')),
50 ]
51 ]
51 + cmdutil.walkopts
52 + cmdutil.walkopts
52 + cmdutil.commitopts
53 + cmdutil.commitopts
53 + cmdutil.commitopts2
54 + cmdutil.commitopts2
54 + cmdutil.commitopts3,
55 + cmdutil.commitopts3,
55 _(b'[OPTION]... [FILE]...'),
56 _(b'[OPTION]... [FILE]...'),
56 helpcategory=command.CATEGORY_COMMITTING,
57 helpcategory=command.CATEGORY_COMMITTING,
57 inferrepo=True,
58 inferrepo=True,
58 )
59 )
59 def amend(ui, repo, *pats, **opts):
60 def amend(ui, repo, *pats, **opts):
60 """amend the working copy parent with all or specified outstanding changes
61 """amend the working copy parent with all or specified outstanding changes
61
62
62 Similar to :hg:`commit --amend`, but reuse the commit message without
63 Similar to :hg:`commit --amend`, but reuse the commit message without
63 invoking editor, unless ``--edit`` was set.
64 invoking editor, unless ``--edit`` was set.
64
65
65 See :hg:`help commit` for more details.
66 See :hg:`help commit` for more details.
66 """
67 """
68 cmdutil.check_at_most_one_arg(opts, 'draft', 'secret')
67 cmdutil.check_note_size(opts)
69 cmdutil.check_note_size(opts)
68
70
69 with repo.wlock(), repo.lock():
71 with repo.wlock(), repo.lock():
70 if not opts.get('logfile'):
72 if not opts.get('logfile'):
71 opts['message'] = opts.get('message') or repo[b'.'].description()
73 opts['message'] = opts.get('message') or repo[b'.'].description()
72 opts['amend'] = True
74 opts['amend'] = True
73 return commands._docommit(ui, repo, *pats, **opts)
75 return commands._docommit(ui, repo, *pats, **opts)
@@ -1,118 +1,126
1 # automv.py
1 # automv.py
2 #
2 #
3 # Copyright 2013-2016 Facebook, Inc.
3 # Copyright 2013-2016 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """check for unrecorded moves at commit time (EXPERIMENTAL)
7 """check for unrecorded moves at commit time (EXPERIMENTAL)
8
8
9 This extension checks at commit/amend time if any of the committed files
9 This extension checks at commit/amend time if any of the committed files
10 comes from an unrecorded mv.
10 comes from an unrecorded mv.
11
11
12 The threshold at which a file is considered a move can be set with the
12 The threshold at which a file is considered a move can be set with the
13 ``automv.similarity`` config option. This option takes a percentage between 0
13 ``automv.similarity`` config option. This option takes a percentage between 0
14 (disabled) and 100 (files must be identical), the default is 95.
14 (disabled) and 100 (files must be identical), the default is 95.
15
15
16 """
16 """
17
17
18 # Using 95 as a default similarity is based on an analysis of the mercurial
18 # Using 95 as a default similarity is based on an analysis of the mercurial
19 # repositories of the cpython, mozilla-central & mercurial repositories, as
19 # repositories of the cpython, mozilla-central & mercurial repositories, as
20 # well as 2 very large facebook repositories. At 95 50% of all potential
20 # well as 2 very large facebook repositories. At 95 50% of all potential
21 # missed moves would be caught, as well as correspond with 87% of all
21 # missed moves would be caught, as well as correspond with 87% of all
22 # explicitly marked moves. Together, 80% of moved files are 95% similar or
22 # explicitly marked moves. Together, 80% of moved files are 95% similar or
23 # more.
23 # more.
24 #
24 #
25 # See http://markmail.org/thread/5pxnljesvufvom57 for context.
25 # See http://markmail.org/thread/5pxnljesvufvom57 for context.
26
26
27
27
28 from mercurial.i18n import _
28 from mercurial.i18n import _
29 from mercurial import (
29 from mercurial import (
30 commands,
30 commands,
31 copies,
31 copies,
32 error,
32 error,
33 extensions,
33 extensions,
34 pycompat,
34 pycompat,
35 registrar,
35 registrar,
36 scmutil,
36 scmutil,
37 similar,
37 similar,
38 )
38 )
39
39
40 configtable = {}
40 configtable = {}
41 configitem = registrar.configitem(configtable)
41 configitem = registrar.configitem(configtable)
42
42
43 configitem(
43 configitem(
44 b'automv',
44 b'automv',
45 b'similarity',
45 b'similarity',
46 default=95,
46 default=95,
47 )
47 )
48
48
49
49
50 def extsetup(ui):
50 def extsetup(ui):
51 entry = extensions.wrapcommand(commands.table, b'commit', mvcheck)
51 entry = extensions.wrapcommand(commands.table, b'commit', mvcheck)
52 entry[1].append(
52 entry[1].append(
53 (b'', b'no-automv', None, _(b'disable automatic file move detection'))
53 (b'', b'no-automv', None, _(b'disable automatic file move detection'))
54 )
54 )
55
55
56
56
57 def mvcheck(orig, ui, repo, *pats, **opts):
57 def mvcheck(orig, ui, repo, *pats, **opts):
58 """Hook to check for moves at commit time"""
58 """Hook to check for moves at commit time"""
59 opts = pycompat.byteskwargs(opts)
59 opts = pycompat.byteskwargs(opts)
60 renames = None
60 renames = None
61 disabled = opts.pop(b'no_automv', False)
61 disabled = opts.pop(b'no_automv', False)
62 if not disabled:
62 with repo.wlock():
63 threshold = ui.configint(b'automv', b'similarity')
63 if not disabled:
64 if not 0 <= threshold <= 100:
64 threshold = ui.configint(b'automv', b'similarity')
65 raise error.Abort(_(b'automv.similarity must be between 0 and 100'))
65 if not 0 <= threshold <= 100:
66 if threshold > 0:
66 raise error.Abort(
67 match = scmutil.match(repo[None], pats, opts)
67 _(b'automv.similarity must be between 0 and 100')
68 added, removed = _interestingfiles(repo, match)
68 )
69 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
69 if threshold > 0:
70 renames = _findrenames(
70 match = scmutil.match(repo[None], pats, opts)
71 repo, uipathfn, added, removed, threshold / 100.0
71 added, removed = _interestingfiles(repo, match)
72 )
72 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
73 renames = _findrenames(
74 repo, uipathfn, added, removed, threshold / 100.0
75 )
73
76
74 with repo.wlock():
75 if renames is not None:
77 if renames is not None:
76 scmutil._markchanges(repo, (), (), renames)
78 with repo.dirstate.changing_files(repo):
79 # XXX this should be wider and integrated with the commit
80 # transaction. At the same time as we do the `addremove` logic
81 # for commit. However we can't really do better with the
82 # current extension structure, and this is not worse than what
83 # happened before.
84 scmutil._markchanges(repo, (), (), renames)
77 return orig(ui, repo, *pats, **pycompat.strkwargs(opts))
85 return orig(ui, repo, *pats, **pycompat.strkwargs(opts))
78
86
79
87
80 def _interestingfiles(repo, matcher):
88 def _interestingfiles(repo, matcher):
81 """Find what files were added or removed in this commit.
89 """Find what files were added or removed in this commit.
82
90
83 Returns a tuple of two lists: (added, removed). Only files not *already*
91 Returns a tuple of two lists: (added, removed). Only files not *already*
84 marked as moved are included in the added list.
92 marked as moved are included in the added list.
85
93
86 """
94 """
87 stat = repo.status(match=matcher)
95 stat = repo.status(match=matcher)
88 added = stat.added
96 added = stat.added
89 removed = stat.removed
97 removed = stat.removed
90
98
91 copy = copies.pathcopies(repo[b'.'], repo[None], matcher)
99 copy = copies.pathcopies(repo[b'.'], repo[None], matcher)
92 # remove the copy files for which we already have copy info
100 # remove the copy files for which we already have copy info
93 added = [f for f in added if f not in copy]
101 added = [f for f in added if f not in copy]
94
102
95 return added, removed
103 return added, removed
96
104
97
105
98 def _findrenames(repo, uipathfn, added, removed, similarity):
106 def _findrenames(repo, uipathfn, added, removed, similarity):
99 """Find what files in added are really moved files.
107 """Find what files in added are really moved files.
100
108
101 Any file named in removed that is at least similarity% similar to a file
109 Any file named in removed that is at least similarity% similar to a file
102 in added is seen as a rename.
110 in added is seen as a rename.
103
111
104 """
112 """
105 renames = {}
113 renames = {}
106 if similarity > 0:
114 if similarity > 0:
107 for src, dst, score in similar.findrenames(
115 for src, dst, score in similar.findrenames(
108 repo, added, removed, similarity
116 repo, added, removed, similarity
109 ):
117 ):
110 if repo.ui.verbose:
118 if repo.ui.verbose:
111 repo.ui.status(
119 repo.ui.status(
112 _(b'detected move of %s as %s (%d%% similar)\n')
120 _(b'detected move of %s as %s (%d%% similar)\n')
113 % (uipathfn(src), uipathfn(dst), score * 100)
121 % (uipathfn(src), uipathfn(dst), score * 100)
114 )
122 )
115 renames[dst] = src
123 renames[dst] = src
116 if renames:
124 if renames:
117 repo.ui.status(_(b'detected move of %d files\n') % len(renames))
125 repo.ui.status(_(b'detected move of %d files\n') % len(renames))
118 return renames
126 return renames
@@ -1,240 +1,242
1 # blackbox.py - log repository events to a file for post-mortem debugging
1 # blackbox.py - log repository events to a file for post-mortem debugging
2 #
2 #
3 # Copyright 2010 Nicolas Dumazet
3 # Copyright 2010 Nicolas Dumazet
4 # Copyright 2013 Facebook, Inc.
4 # Copyright 2013 Facebook, Inc.
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """log repository events to a blackbox for debugging
9 """log repository events to a blackbox for debugging
10
10
11 Logs event information to .hg/blackbox.log to help debug and diagnose problems.
11 Logs event information to .hg/blackbox.log to help debug and diagnose problems.
12 The events that get logged can be configured via the blackbox.track and
12 The events that get logged can be configured via the blackbox.track and
13 blackbox.ignore config keys.
13 blackbox.ignore config keys.
14
14
15 Examples::
15 Examples::
16
16
17 [blackbox]
17 [blackbox]
18 track = *
18 track = *
19 ignore = pythonhook
19 ignore = pythonhook
20 # dirty is *EXPENSIVE* (slow);
20 # dirty is *EXPENSIVE* (slow);
21 # each log entry indicates `+` if the repository is dirty, like :hg:`id`.
21 # each log entry indicates `+` if the repository is dirty, like :hg:`id`.
22 dirty = True
22 dirty = True
23 # record the source of log messages
23 # record the source of log messages
24 logsource = True
24 logsource = True
25
25
26 [blackbox]
26 [blackbox]
27 track = command, commandfinish, commandexception, exthook, pythonhook
27 track = command, commandfinish, commandexception, exthook, pythonhook
28
28
29 [blackbox]
29 [blackbox]
30 track = incoming
30 track = incoming
31
31
32 [blackbox]
32 [blackbox]
33 # limit the size of a log file
33 # limit the size of a log file
34 maxsize = 1.5 MB
34 maxsize = 1.5 MB
35 # rotate up to N log files when the current one gets too big
35 # rotate up to N log files when the current one gets too big
36 maxfiles = 3
36 maxfiles = 3
37
37
38 [blackbox]
38 [blackbox]
39 # Include microseconds in log entries with %f (see Python function
39 # Include microseconds in log entries with %f (see Python function
40 # datetime.datetime.strftime)
40 # datetime.datetime.strftime)
41 date-format = %Y-%m-%d @ %H:%M:%S.%f
41 date-format = %Y-%m-%d @ %H:%M:%S.%f
42
42
43 """
43 """
44
44
45
45
46 import re
46 import re
47
47
48 from mercurial.i18n import _
48 from mercurial.i18n import _
49 from mercurial.node import hex
49 from mercurial.node import hex
50
50
51 from mercurial import (
51 from mercurial import (
52 encoding,
52 encoding,
53 loggingutil,
53 loggingutil,
54 registrar,
54 registrar,
55 )
55 )
56 from mercurial.utils import (
56 from mercurial.utils import (
57 dateutil,
57 dateutil,
58 procutil,
58 procutil,
59 )
59 )
60
60
61 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
61 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
62 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
62 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
63 # be specifying the version(s) of Mercurial they are tested with, or
63 # be specifying the version(s) of Mercurial they are tested with, or
64 # leave the attribute unspecified.
64 # leave the attribute unspecified.
65 testedwith = b'ships-with-hg-core'
65 testedwith = b'ships-with-hg-core'
66
66
67 cmdtable = {}
67 cmdtable = {}
68 command = registrar.command(cmdtable)
68 command = registrar.command(cmdtable)
69
69
70 configtable = {}
70 configtable = {}
71 configitem = registrar.configitem(configtable)
71 configitem = registrar.configitem(configtable)
72
72
73 configitem(
73 configitem(
74 b'blackbox',
74 b'blackbox',
75 b'dirty',
75 b'dirty',
76 default=False,
76 default=False,
77 )
77 )
78 configitem(
78 configitem(
79 b'blackbox',
79 b'blackbox',
80 b'maxsize',
80 b'maxsize',
81 default=b'1 MB',
81 default=b'1 MB',
82 )
82 )
83 configitem(
83 configitem(
84 b'blackbox',
84 b'blackbox',
85 b'logsource',
85 b'logsource',
86 default=False,
86 default=False,
87 )
87 )
88 configitem(
88 configitem(
89 b'blackbox',
89 b'blackbox',
90 b'maxfiles',
90 b'maxfiles',
91 default=7,
91 default=7,
92 )
92 )
93 configitem(
93 configitem(
94 b'blackbox',
94 b'blackbox',
95 b'track',
95 b'track',
96 default=lambda: [b'*'],
96 default=lambda: [b'*'],
97 )
97 )
98 configitem(
98 configitem(
99 b'blackbox',
99 b'blackbox',
100 b'ignore',
100 b'ignore',
101 default=lambda: [b'chgserver', b'cmdserver', b'extension'],
101 default=lambda: [b'chgserver', b'cmdserver', b'extension'],
102 )
102 )
103 configitem(b'blackbox', b'date-format', default=b'')
103 configitem(b'blackbox', b'date-format', default=b'')
104
104
105 _lastlogger = loggingutil.proxylogger()
105 _lastlogger = loggingutil.proxylogger()
106
106
107
107
108 class blackboxlogger:
108 class blackboxlogger:
109 def __init__(self, ui, repo):
109 def __init__(self, ui, repo):
110 self._repo = repo
110 self._repo = repo
111 self._trackedevents = set(ui.configlist(b'blackbox', b'track'))
111 self._trackedevents = set(ui.configlist(b'blackbox', b'track'))
112 self._ignoredevents = set(ui.configlist(b'blackbox', b'ignore'))
112 self._ignoredevents = set(ui.configlist(b'blackbox', b'ignore'))
113 self._maxfiles = ui.configint(b'blackbox', b'maxfiles')
113 self._maxfiles = ui.configint(b'blackbox', b'maxfiles')
114 self._maxsize = ui.configbytes(b'blackbox', b'maxsize')
114 self._maxsize = ui.configbytes(b'blackbox', b'maxsize')
115 self._inlog = False
115 self._inlog = False
116
116
117 def tracked(self, event):
117 def tracked(self, event):
118 return (
118 return (
119 b'*' in self._trackedevents and event not in self._ignoredevents
119 b'*' in self._trackedevents and event not in self._ignoredevents
120 ) or event in self._trackedevents
120 ) or event in self._trackedevents
121
121
122 def log(self, ui, event, msg, opts):
122 def log(self, ui, event, msg, opts):
123 # self._log() -> ctx.dirty() may create new subrepo instance, which
123 # self._log() -> ctx.dirty() may create new subrepo instance, which
124 # ui is derived from baseui. So the recursion guard in ui.log()
124 # ui is derived from baseui. So the recursion guard in ui.log()
125 # doesn't work as it's local to the ui instance.
125 # doesn't work as it's local to the ui instance.
126 if self._inlog:
126 if self._inlog:
127 return
127 return
128 self._inlog = True
128 self._inlog = True
129 try:
129 try:
130 self._log(ui, event, msg, opts)
130 self._log(ui, event, msg, opts)
131 finally:
131 finally:
132 self._inlog = False
132 self._inlog = False
133
133
134 def _log(self, ui, event, msg, opts):
134 def _log(self, ui, event, msg, opts):
135 default = ui.configdate(b'devel', b'default-date')
135 default = ui.configdate(b'devel', b'default-date')
136 dateformat = ui.config(b'blackbox', b'date-format')
136 dateformat = ui.config(b'blackbox', b'date-format')
137 if dateformat:
137 if dateformat:
138 date = dateutil.datestr(default, dateformat)
138 date = dateutil.datestr(default, dateformat)
139 else:
139 else:
140 # We want to display milliseconds (more precision seems
140 # We want to display milliseconds (more precision seems
141 # unnecessary). Since %.3f is not supported, use %f and truncate
141 # unnecessary). Since %.3f is not supported, use %f and truncate
142 # microseconds.
142 # microseconds.
143 date = dateutil.datestr(default, b'%Y-%m-%d %H:%M:%S.%f')[:-3]
143 date = dateutil.datestr(default, b'%Y-%m-%d %H:%M:%S.%f')[:-3]
144 user = procutil.getuser()
144 user = procutil.getuser()
145 pid = b'%d' % procutil.getpid()
145 pid = b'%d' % procutil.getpid()
146 changed = b''
146 changed = b''
147 ctx = self._repo[None]
147 ctx = self._repo[None]
148 parents = ctx.parents()
148 parents = ctx.parents()
149 rev = b'+'.join([hex(p.node()) for p in parents])
149 rev = b'+'.join([hex(p.node()) for p in parents])
150 if ui.configbool(b'blackbox', b'dirty') and ctx.dirty(
150 if ui.configbool(b'blackbox', b'dirty') and ctx.dirty(
151 missing=True, merge=False, branch=False
151 missing=True, merge=False, branch=False
152 ):
152 ):
153 changed = b'+'
153 changed = b'+'
154 if ui.configbool(b'blackbox', b'logsource'):
154 if ui.configbool(b'blackbox', b'logsource'):
155 src = b' [%s]' % event
155 src = b' [%s]' % event
156 else:
156 else:
157 src = b''
157 src = b''
158 try:
158 try:
159 fmt = b'%s %s @%s%s (%s)%s> %s'
159 fmt = b'%s %s @%s%s (%s)%s> %s'
160 args = (date, user, rev, changed, pid, src, msg)
160 args = (date, user, rev, changed, pid, src, msg)
161 with loggingutil.openlogfile(
161 with loggingutil.openlogfile(
162 ui,
162 ui,
163 self._repo.vfs,
163 self._repo.vfs,
164 name=b'blackbox.log',
164 name=b'blackbox.log',
165 maxfiles=self._maxfiles,
165 maxfiles=self._maxfiles,
166 maxsize=self._maxsize,
166 maxsize=self._maxsize,
167 ) as fp:
167 ) as fp:
168 fp.write(fmt % args)
168 fp.write(fmt % args)
169 except (IOError, OSError) as err:
169 except (IOError, OSError) as err:
170 # deactivate this to avoid failed logging again
170 # deactivate this to avoid failed logging again
171 self._trackedevents.clear()
171 self._trackedevents.clear()
172 ui.debug(
172 ui.debug(
173 b'warning: cannot write to blackbox.log: %s\n'
173 b'warning: cannot write to blackbox.log: %s\n'
174 % encoding.strtolocal(err.strerror)
174 % encoding.strtolocal(err.strerror)
175 )
175 )
176 return
176 return
177 _lastlogger.logger = self
177 _lastlogger.logger = self
178
178
179
179
180 def uipopulate(ui):
180 def uipopulate(ui):
181 ui.setlogger(b'blackbox', _lastlogger)
181 ui.setlogger(b'blackbox', _lastlogger)
182
182
183
183
184 def reposetup(ui, repo):
184 def reposetup(ui, repo):
185 # During 'hg pull' a httppeer repo is created to represent the remote repo.
185 # During 'hg pull' a httppeer repo is created to represent the remote repo.
186 # It doesn't have a .hg directory to put a blackbox in, so we don't do
186 # It doesn't have a .hg directory to put a blackbox in, so we don't do
187 # the blackbox setup for it.
187 # the blackbox setup for it.
188 if not repo.local():
188 if not repo.local():
189 return
189 return
190
190
191 # Since blackbox.log is stored in the repo directory, the logger should be
191 # Since blackbox.log is stored in the repo directory, the logger should be
192 # instantiated per repository.
192 # instantiated per repository.
193 logger = blackboxlogger(ui, repo)
193 logger = blackboxlogger(ui, repo)
194 ui.setlogger(b'blackbox', logger)
194 ui.setlogger(b'blackbox', logger)
195
195
196 # Set _lastlogger even if ui.log is not called. This gives blackbox a
196 # Set _lastlogger even if ui.log is not called. This gives blackbox a
197 # fallback place to log
197 # fallback place to log
198 if _lastlogger.logger is None:
198 if _lastlogger.logger is None:
199 _lastlogger.logger = logger
199 _lastlogger.logger = logger
200
200
201 repo._wlockfreeprefix.add(b'blackbox.log')
201 repo._wlockfreeprefix.add(b'blackbox.log')
202
202
203
203
204 @command(
204 @command(
205 b'blackbox',
205 b'blackbox',
206 [
206 [
207 (b'l', b'limit', 10, _(b'the number of events to show')),
207 (b'l', b'limit', 10, _(b'the number of events to show')),
208 ],
208 ],
209 _(b'hg blackbox [OPTION]...'),
209 _(b'hg blackbox [OPTION]...'),
210 helpcategory=command.CATEGORY_MAINTENANCE,
210 helpcategory=command.CATEGORY_MAINTENANCE,
211 helpbasic=True,
211 helpbasic=True,
212 )
212 )
213 def blackbox(ui, repo, *revs, **opts):
213 def blackbox(ui, repo, *revs, **opts):
214 """view the recent repository events"""
214 """view the recent repository events"""
215
215
216 if not repo.vfs.exists(b'blackbox.log'):
216 if not repo.vfs.exists(b'blackbox.log'):
217 return
217 return
218
218
219 limit = opts.get('limit')
219 limit = opts.get('limit')
220 assert limit is not None # help pytype
221
220 fp = repo.vfs(b'blackbox.log', b'r')
222 fp = repo.vfs(b'blackbox.log', b'r')
221 lines = fp.read().split(b'\n')
223 lines = fp.read().split(b'\n')
222
224
223 count = 0
225 count = 0
224 output = []
226 output = []
225 for line in reversed(lines):
227 for line in reversed(lines):
226 if count >= limit:
228 if count >= limit:
227 break
229 break
228
230
229 # count the commands by matching lines like:
231 # count the commands by matching lines like:
230 # 2013/01/23 19:13:36 root>
232 # 2013/01/23 19:13:36 root>
231 # 2013/01/23 19:13:36 root (1234)>
233 # 2013/01/23 19:13:36 root (1234)>
232 # 2013/01/23 19:13:36 root @0000000000000000000000000000000000000000 (1234)>
234 # 2013/01/23 19:13:36 root @0000000000000000000000000000000000000000 (1234)>
233 # 2013-01-23 19:13:36.000 root @0000000000000000000000000000000000000000 (1234)>
235 # 2013-01-23 19:13:36.000 root @0000000000000000000000000000000000000000 (1234)>
234 if re.match(
236 if re.match(
235 br'^\d{4}[-/]\d{2}[-/]\d{2} \d{2}:\d{2}:\d{2}(.\d*)? .*> .*', line
237 br'^\d{4}[-/]\d{2}[-/]\d{2} \d{2}:\d{2}:\d{2}(.\d*)? .*> .*', line
236 ):
238 ):
237 count += 1
239 count += 1
238 output.append(line)
240 output.append(line)
239
241
240 ui.status(b'\n'.join(reversed(output)))
242 ui.status(b'\n'.join(reversed(output)))
@@ -1,343 +1,346
1 # bzr.py - bzr support for the convert extension
1 # bzr.py - bzr support for the convert extension
2 #
2 #
3 # Copyright 2008, 2009 Marek Kubica <marek@xivilization.net> and others
3 # Copyright 2008, 2009 Marek Kubica <marek@xivilization.net> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 # This module is for handling Breezy imports or `brz`, but it's also compatible
8 # This module is for handling Breezy imports or `brz`, but it's also compatible
9 # with Bazaar or `bzr`, that was formerly known as Bazaar-NG;
9 # with Bazaar or `bzr`, that was formerly known as Bazaar-NG;
10 # it cannot access `bar` repositories, but they were never used very much.
10 # it cannot access `bar` repositories, but they were never used very much.
11
11
12 import os
12 import os
13
13
14 from mercurial.i18n import _
14 from mercurial.i18n import _
15 from mercurial import (
15 from mercurial import (
16 demandimport,
16 demandimport,
17 error,
17 error,
18 util,
18 util,
19 )
19 )
20 from . import common
20 from . import common
21
21
22
22
23 # these do not work with demandimport, blacklist
23 # these do not work with demandimport, blacklist
24 demandimport.IGNORES.update(
24 demandimport.IGNORES.update(
25 [
25 [
26 'breezy.transactions',
26 'breezy.transactions',
27 'breezy.urlutils',
27 'breezy.urlutils',
28 'ElementPath',
28 'ElementPath',
29 ]
29 ]
30 )
30 )
31
31
32 try:
32 try:
33 # bazaar imports
33 # bazaar imports
34 # pytype: disable=import-error
34 import breezy.bzr.bzrdir
35 import breezy.bzr.bzrdir
35 import breezy.errors
36 import breezy.errors
36 import breezy.revision
37 import breezy.revision
37 import breezy.revisionspec
38 import breezy.revisionspec
38
39
40 # pytype: enable=import-error
41
39 bzrdir = breezy.bzr.bzrdir
42 bzrdir = breezy.bzr.bzrdir
40 errors = breezy.errors
43 errors = breezy.errors
41 revision = breezy.revision
44 revision = breezy.revision
42 revisionspec = breezy.revisionspec
45 revisionspec = breezy.revisionspec
43 revisionspec.RevisionSpec
46 revisionspec.RevisionSpec
44
47
45 try:
48 try:
46 # brz 3.3.0 (revno: 7614.2.2)
49 # brz 3.3.0 (revno: 7614.2.2)
47 from breezy.transport import NoSuchFile
50 from breezy.transport import NoSuchFile
48 except ImportError:
51 except ImportError:
49 from breezy.errors import NoSuchFile
52 from breezy.errors import NoSuchFile
50 except ImportError:
53 except ImportError:
51 pass
54 pass
52
55
53 supportedkinds = ('file', 'symlink')
56 supportedkinds = ('file', 'symlink')
54
57
55
58
56 class bzr_source(common.converter_source):
59 class bzr_source(common.converter_source):
57 """Reads Bazaar repositories by using the Bazaar Python libraries"""
60 """Reads Bazaar repositories by using the Bazaar Python libraries"""
58
61
59 def __init__(self, ui, repotype, path, revs=None):
62 def __init__(self, ui, repotype, path, revs=None):
60 super(bzr_source, self).__init__(ui, repotype, path, revs=revs)
63 super(bzr_source, self).__init__(ui, repotype, path, revs=revs)
61
64
62 if not os.path.exists(os.path.join(path, b'.bzr')):
65 if not os.path.exists(os.path.join(path, b'.bzr')):
63 raise common.NoRepo(
66 raise common.NoRepo(
64 _(b'%s does not look like a Bazaar repository') % path
67 _(b'%s does not look like a Bazaar repository') % path
65 )
68 )
66
69
67 try:
70 try:
68 # access breezy stuff
71 # access breezy stuff
69 bzrdir
72 bzrdir
70 except NameError:
73 except NameError:
71 raise common.NoRepo(_(b'Bazaar modules could not be loaded'))
74 raise common.NoRepo(_(b'Bazaar modules could not be loaded'))
72
75
73 path = util.abspath(path)
76 path = util.abspath(path)
74 self._checkrepotype(path)
77 self._checkrepotype(path)
75 try:
78 try:
76 bzr_dir = bzrdir.BzrDir.open(path.decode())
79 bzr_dir = bzrdir.BzrDir.open(path.decode())
77 self.sourcerepo = bzr_dir.open_repository()
80 self.sourcerepo = bzr_dir.open_repository()
78 except errors.NoRepositoryPresent:
81 except errors.NoRepositoryPresent:
79 raise common.NoRepo(
82 raise common.NoRepo(
80 _(b'%s does not look like a Bazaar repository') % path
83 _(b'%s does not look like a Bazaar repository') % path
81 )
84 )
82 self._parentids = {}
85 self._parentids = {}
83 self._saverev = ui.configbool(b'convert', b'bzr.saverev')
86 self._saverev = ui.configbool(b'convert', b'bzr.saverev')
84
87
85 def _checkrepotype(self, path):
88 def _checkrepotype(self, path):
86 # Lightweight checkouts detection is informational but probably
89 # Lightweight checkouts detection is informational but probably
87 # fragile at API level. It should not terminate the conversion.
90 # fragile at API level. It should not terminate the conversion.
88 try:
91 try:
89 dir = bzrdir.BzrDir.open_containing(path.decode())[0]
92 dir = bzrdir.BzrDir.open_containing(path.decode())[0]
90 try:
93 try:
91 tree = dir.open_workingtree(recommend_upgrade=False)
94 tree = dir.open_workingtree(recommend_upgrade=False)
92 branch = tree.branch
95 branch = tree.branch
93 except (errors.NoWorkingTree, errors.NotLocalUrl):
96 except (errors.NoWorkingTree, errors.NotLocalUrl):
94 tree = None
97 tree = None
95 branch = dir.open_branch()
98 branch = dir.open_branch()
96 if (
99 if (
97 tree is not None
100 tree is not None
98 and tree.controldir.root_transport.base
101 and tree.controldir.root_transport.base
99 != branch.controldir.root_transport.base
102 != branch.controldir.root_transport.base
100 ):
103 ):
101 self.ui.warn(
104 self.ui.warn(
102 _(
105 _(
103 b'warning: lightweight checkouts may cause '
106 b'warning: lightweight checkouts may cause '
104 b'conversion failures, try with a regular '
107 b'conversion failures, try with a regular '
105 b'branch instead.\n'
108 b'branch instead.\n'
106 )
109 )
107 )
110 )
108 except Exception:
111 except Exception:
109 self.ui.note(_(b'bzr source type could not be determined\n'))
112 self.ui.note(_(b'bzr source type could not be determined\n'))
110
113
111 def before(self):
114 def before(self):
112 """Before the conversion begins, acquire a read lock
115 """Before the conversion begins, acquire a read lock
113 for all the operations that might need it. Fortunately
116 for all the operations that might need it. Fortunately
114 read locks don't block other reads or writes to the
117 read locks don't block other reads or writes to the
115 repository, so this shouldn't have any impact on the usage of
118 repository, so this shouldn't have any impact on the usage of
116 the source repository.
119 the source repository.
117
120
118 The alternative would be locking on every operation that
121 The alternative would be locking on every operation that
119 needs locks (there are currently two: getting the file and
122 needs locks (there are currently two: getting the file and
120 getting the parent map) and releasing immediately after,
123 getting the parent map) and releasing immediately after,
121 but this approach can take even 40% longer."""
124 but this approach can take even 40% longer."""
122 self.sourcerepo.lock_read()
125 self.sourcerepo.lock_read()
123
126
124 def after(self):
127 def after(self):
125 self.sourcerepo.unlock()
128 self.sourcerepo.unlock()
126
129
127 def _bzrbranches(self):
130 def _bzrbranches(self):
128 return self.sourcerepo.find_branches(using=True)
131 return self.sourcerepo.find_branches(using=True)
129
132
130 def getheads(self):
133 def getheads(self):
131 if not self.revs:
134 if not self.revs:
132 # Set using=True to avoid nested repositories (see issue3254)
135 # Set using=True to avoid nested repositories (see issue3254)
133 heads = sorted([b.last_revision() for b in self._bzrbranches()])
136 heads = sorted([b.last_revision() for b in self._bzrbranches()])
134 else:
137 else:
135 revid = None
138 revid = None
136 for branch in self._bzrbranches():
139 for branch in self._bzrbranches():
137 try:
140 try:
138 revspec = self.revs[0].decode()
141 revspec = self.revs[0].decode()
139 r = revisionspec.RevisionSpec.from_string(revspec)
142 r = revisionspec.RevisionSpec.from_string(revspec)
140 info = r.in_history(branch)
143 info = r.in_history(branch)
141 except errors.BzrError:
144 except errors.BzrError:
142 pass
145 pass
143 revid = info.rev_id
146 revid = info.rev_id
144 if revid is None:
147 if revid is None:
145 raise error.Abort(
148 raise error.Abort(
146 _(b'%s is not a valid revision') % self.revs[0]
149 _(b'%s is not a valid revision') % self.revs[0]
147 )
150 )
148 heads = [revid]
151 heads = [revid]
149 # Empty repositories return 'null:', which cannot be retrieved
152 # Empty repositories return 'null:', which cannot be retrieved
150 heads = [h for h in heads if h != b'null:']
153 heads = [h for h in heads if h != b'null:']
151 return heads
154 return heads
152
155
153 def getfile(self, name, rev):
156 def getfile(self, name, rev):
154 name = name.decode()
157 name = name.decode()
155 revtree = self.sourcerepo.revision_tree(rev)
158 revtree = self.sourcerepo.revision_tree(rev)
156
159
157 try:
160 try:
158 kind = revtree.kind(name)
161 kind = revtree.kind(name)
159 except NoSuchFile:
162 except NoSuchFile:
160 return None, None
163 return None, None
161 if kind not in supportedkinds:
164 if kind not in supportedkinds:
162 # the file is not available anymore - was deleted
165 # the file is not available anymore - was deleted
163 return None, None
166 return None, None
164 mode = self._modecache[(name.encode(), rev)]
167 mode = self._modecache[(name.encode(), rev)]
165 if kind == 'symlink':
168 if kind == 'symlink':
166 target = revtree.get_symlink_target(name)
169 target = revtree.get_symlink_target(name)
167 if target is None:
170 if target is None:
168 raise error.Abort(
171 raise error.Abort(
169 _(b'%s.%s symlink has no target') % (name, rev)
172 _(b'%s.%s symlink has no target') % (name, rev)
170 )
173 )
171 return target.encode(), mode
174 return target.encode(), mode
172 else:
175 else:
173 sio = revtree.get_file(name)
176 sio = revtree.get_file(name)
174 return sio.read(), mode
177 return sio.read(), mode
175
178
176 def getchanges(self, version, full):
179 def getchanges(self, version, full):
177 if full:
180 if full:
178 raise error.Abort(_(b"convert from cvs does not support --full"))
181 raise error.Abort(_(b"convert from cvs does not support --full"))
179 self._modecache = {}
182 self._modecache = {}
180 self._revtree = self.sourcerepo.revision_tree(version)
183 self._revtree = self.sourcerepo.revision_tree(version)
181 # get the parentids from the cache
184 # get the parentids from the cache
182 parentids = self._parentids.pop(version)
185 parentids = self._parentids.pop(version)
183 # only diff against first parent id
186 # only diff against first parent id
184 prevtree = self.sourcerepo.revision_tree(parentids[0])
187 prevtree = self.sourcerepo.revision_tree(parentids[0])
185 files, changes = self._gettreechanges(self._revtree, prevtree)
188 files, changes = self._gettreechanges(self._revtree, prevtree)
186 return files, changes, set()
189 return files, changes, set()
187
190
188 def getcommit(self, version):
191 def getcommit(self, version):
189 rev = self.sourcerepo.get_revision(version)
192 rev = self.sourcerepo.get_revision(version)
190 # populate parent id cache
193 # populate parent id cache
191 if not rev.parent_ids:
194 if not rev.parent_ids:
192 parents = []
195 parents = []
193 self._parentids[version] = (revision.NULL_REVISION,)
196 self._parentids[version] = (revision.NULL_REVISION,)
194 else:
197 else:
195 parents = self._filterghosts(rev.parent_ids)
198 parents = self._filterghosts(rev.parent_ids)
196 self._parentids[version] = parents
199 self._parentids[version] = parents
197
200
198 branch = rev.properties.get('branch-nick', 'default')
201 branch = rev.properties.get('branch-nick', 'default')
199 if branch == 'trunk':
202 if branch == 'trunk':
200 branch = 'default'
203 branch = 'default'
201 return common.commit(
204 return common.commit(
202 parents=parents,
205 parents=parents,
203 date=b'%d %d' % (rev.timestamp, -rev.timezone),
206 date=b'%d %d' % (rev.timestamp, -rev.timezone),
204 author=self.recode(rev.committer),
207 author=self.recode(rev.committer),
205 desc=self.recode(rev.message),
208 desc=self.recode(rev.message),
206 branch=branch.encode('utf8'),
209 branch=branch.encode('utf8'),
207 rev=version,
210 rev=version,
208 saverev=self._saverev,
211 saverev=self._saverev,
209 )
212 )
210
213
211 def gettags(self):
214 def gettags(self):
212 bytetags = {}
215 bytetags = {}
213 for branch in self._bzrbranches():
216 for branch in self._bzrbranches():
214 if not branch.supports_tags():
217 if not branch.supports_tags():
215 return {}
218 return {}
216 tagdict = branch.tags.get_tag_dict()
219 tagdict = branch.tags.get_tag_dict()
217 for name, rev in tagdict.items():
220 for name, rev in tagdict.items():
218 bytetags[self.recode(name)] = rev
221 bytetags[self.recode(name)] = rev
219 return bytetags
222 return bytetags
220
223
221 def getchangedfiles(self, rev, i):
224 def getchangedfiles(self, rev, i):
222 self._modecache = {}
225 self._modecache = {}
223 curtree = self.sourcerepo.revision_tree(rev)
226 curtree = self.sourcerepo.revision_tree(rev)
224 if i is not None:
227 if i is not None:
225 parentid = self._parentids[rev][i]
228 parentid = self._parentids[rev][i]
226 else:
229 else:
227 # no parent id, get the empty revision
230 # no parent id, get the empty revision
228 parentid = revision.NULL_REVISION
231 parentid = revision.NULL_REVISION
229
232
230 prevtree = self.sourcerepo.revision_tree(parentid)
233 prevtree = self.sourcerepo.revision_tree(parentid)
231 changes = [e[0] for e in self._gettreechanges(curtree, prevtree)[0]]
234 changes = [e[0] for e in self._gettreechanges(curtree, prevtree)[0]]
232 return changes
235 return changes
233
236
234 def _gettreechanges(self, current, origin):
237 def _gettreechanges(self, current, origin):
235 revid = current._revision_id
238 revid = current._revision_id
236 changes = []
239 changes = []
237 renames = {}
240 renames = {}
238 seen = set()
241 seen = set()
239
242
240 # Fall back to the deprecated attribute for legacy installations.
243 # Fall back to the deprecated attribute for legacy installations.
241 try:
244 try:
242 inventory = origin.root_inventory
245 inventory = origin.root_inventory
243 except AttributeError:
246 except AttributeError:
244 inventory = origin.inventory
247 inventory = origin.inventory
245
248
246 # Process the entries by reverse lexicographic name order to
249 # Process the entries by reverse lexicographic name order to
247 # handle nested renames correctly, most specific first.
250 # handle nested renames correctly, most specific first.
248
251
249 def key(c):
252 def key(c):
250 return c.path[0] or c.path[1] or ""
253 return c.path[0] or c.path[1] or ""
251
254
252 curchanges = sorted(
255 curchanges = sorted(
253 current.iter_changes(origin),
256 current.iter_changes(origin),
254 key=key,
257 key=key,
255 reverse=True,
258 reverse=True,
256 )
259 )
257 for change in curchanges:
260 for change in curchanges:
258 paths = change.path
261 paths = change.path
259 kind = change.kind
262 kind = change.kind
260 executable = change.executable
263 executable = change.executable
261 if paths[0] == u'' or paths[1] == u'':
264 if paths[0] == u'' or paths[1] == u'':
262 # ignore changes to tree root
265 # ignore changes to tree root
263 continue
266 continue
264
267
265 # bazaar tracks directories, mercurial does not, so
268 # bazaar tracks directories, mercurial does not, so
266 # we have to rename the directory contents
269 # we have to rename the directory contents
267 if kind[1] == 'directory':
270 if kind[1] == 'directory':
268 if kind[0] not in (None, 'directory'):
271 if kind[0] not in (None, 'directory'):
269 # Replacing 'something' with a directory, record it
272 # Replacing 'something' with a directory, record it
270 # so it can be removed.
273 # so it can be removed.
271 changes.append((self.recode(paths[0]), revid))
274 changes.append((self.recode(paths[0]), revid))
272
275
273 if kind[0] == 'directory' and None not in paths:
276 if kind[0] == 'directory' and None not in paths:
274 renaming = paths[0] != paths[1]
277 renaming = paths[0] != paths[1]
275 # neither an add nor an delete - a move
278 # neither an add nor an delete - a move
276 # rename all directory contents manually
279 # rename all directory contents manually
277 subdir = inventory.path2id(paths[0])
280 subdir = inventory.path2id(paths[0])
278 # get all child-entries of the directory
281 # get all child-entries of the directory
279 for name, entry in inventory.iter_entries(subdir):
282 for name, entry in inventory.iter_entries(subdir):
280 # hg does not track directory renames
283 # hg does not track directory renames
281 if entry.kind == 'directory':
284 if entry.kind == 'directory':
282 continue
285 continue
283 frompath = self.recode(paths[0] + '/' + name)
286 frompath = self.recode(paths[0] + '/' + name)
284 if frompath in seen:
287 if frompath in seen:
285 # Already handled by a more specific change entry
288 # Already handled by a more specific change entry
286 # This is important when you have:
289 # This is important when you have:
287 # a => b
290 # a => b
288 # a/c => a/c
291 # a/c => a/c
289 # Here a/c must not be renamed into b/c
292 # Here a/c must not be renamed into b/c
290 continue
293 continue
291 seen.add(frompath)
294 seen.add(frompath)
292 if not renaming:
295 if not renaming:
293 continue
296 continue
294 topath = self.recode(paths[1] + '/' + name)
297 topath = self.recode(paths[1] + '/' + name)
295 # register the files as changed
298 # register the files as changed
296 changes.append((frompath, revid))
299 changes.append((frompath, revid))
297 changes.append((topath, revid))
300 changes.append((topath, revid))
298 # add to mode cache
301 # add to mode cache
299 mode = (
302 mode = (
300 (entry.executable and b'x')
303 (entry.executable and b'x')
301 or (entry.kind == 'symlink' and b's')
304 or (entry.kind == 'symlink' and b's')
302 or b''
305 or b''
303 )
306 )
304 self._modecache[(topath, revid)] = mode
307 self._modecache[(topath, revid)] = mode
305 # register the change as move
308 # register the change as move
306 renames[topath] = frompath
309 renames[topath] = frompath
307
310
308 # no further changes, go to the next change
311 # no further changes, go to the next change
309 continue
312 continue
310
313
311 # we got unicode paths, need to convert them
314 # we got unicode paths, need to convert them
312 path, topath = paths
315 path, topath = paths
313 if path is not None:
316 if path is not None:
314 path = self.recode(path)
317 path = self.recode(path)
315 if topath is not None:
318 if topath is not None:
316 topath = self.recode(topath)
319 topath = self.recode(topath)
317 seen.add(path or topath)
320 seen.add(path or topath)
318
321
319 if topath is None:
322 if topath is None:
320 # file deleted
323 # file deleted
321 changes.append((path, revid))
324 changes.append((path, revid))
322 continue
325 continue
323
326
324 # renamed
327 # renamed
325 if path and path != topath:
328 if path and path != topath:
326 renames[topath] = path
329 renames[topath] = path
327 changes.append((path, revid))
330 changes.append((path, revid))
328
331
329 # populate the mode cache
332 # populate the mode cache
330 kind, executable = [e[1] for e in (kind, executable)]
333 kind, executable = [e[1] for e in (kind, executable)]
331 mode = (executable and b'x') or (kind == 'symlink' and b'l') or b''
334 mode = (executable and b'x') or (kind == 'symlink' and b'l') or b''
332 self._modecache[(topath, revid)] = mode
335 self._modecache[(topath, revid)] = mode
333 changes.append((topath, revid))
336 changes.append((topath, revid))
334
337
335 return changes, renames
338 return changes, renames
336
339
337 def _filterghosts(self, ids):
340 def _filterghosts(self, ids):
338 """Filters out ghost revisions which hg does not support, see
341 """Filters out ghost revisions which hg does not support, see
339 <http://bazaar-vcs.org/GhostRevision>
342 <http://bazaar-vcs.org/GhostRevision>
340 """
343 """
341 parentmap = self.sourcerepo.get_parent_map(ids)
344 parentmap = self.sourcerepo.get_parent_map(ids)
342 parents = tuple([parent for parent in ids if parent in parentmap])
345 parents = tuple([parent for parent in ids if parent in parentmap])
343 return parents
346 return parents
@@ -1,731 +1,734
1 # hg.py - hg backend for convert extension
1 # hg.py - hg backend for convert extension
2 #
2 #
3 # Copyright 2005-2009 Olivia Mackall <olivia@selenic.com> and others
3 # Copyright 2005-2009 Olivia Mackall <olivia@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 # Notes for hg->hg conversion:
8 # Notes for hg->hg conversion:
9 #
9 #
10 # * Old versions of Mercurial didn't trim the whitespace from the ends
10 # * Old versions of Mercurial didn't trim the whitespace from the ends
11 # of commit messages, but new versions do. Changesets created by
11 # of commit messages, but new versions do. Changesets created by
12 # those older versions, then converted, may thus have different
12 # those older versions, then converted, may thus have different
13 # hashes for changesets that are otherwise identical.
13 # hashes for changesets that are otherwise identical.
14 #
14 #
15 # * Using "--config convert.hg.saverev=true" will make the source
15 # * Using "--config convert.hg.saverev=true" will make the source
16 # identifier to be stored in the converted revision. This will cause
16 # identifier to be stored in the converted revision. This will cause
17 # the converted revision to have a different identity than the
17 # the converted revision to have a different identity than the
18 # source.
18 # source.
19
19
20 import os
20 import os
21 import re
21 import re
22 import time
22 import time
23
23
24 from mercurial.i18n import _
24 from mercurial.i18n import _
25 from mercurial.pycompat import open
25 from mercurial.pycompat import open
26 from mercurial.node import (
26 from mercurial.node import (
27 bin,
27 bin,
28 hex,
28 hex,
29 sha1nodeconstants,
29 sha1nodeconstants,
30 )
30 )
31 from mercurial import (
31 from mercurial import (
32 bookmarks,
32 bookmarks,
33 context,
33 context,
34 error,
34 error,
35 exchange,
35 exchange,
36 hg,
36 hg,
37 lock as lockmod,
37 lock as lockmod,
38 logcmdutil,
38 logcmdutil,
39 merge as mergemod,
39 merge as mergemod,
40 mergestate,
40 mergestate,
41 phases,
41 phases,
42 util,
42 util,
43 )
43 )
44 from mercurial.utils import dateutil
44 from mercurial.utils import dateutil
45
45
46 stringio = util.stringio
46 stringio = util.stringio
47
47
48 from . import common
48 from . import common
49
49
50 mapfile = common.mapfile
50 mapfile = common.mapfile
51 NoRepo = common.NoRepo
51 NoRepo = common.NoRepo
52
52
53 sha1re = re.compile(br'\b[0-9a-f]{12,40}\b')
53 sha1re = re.compile(br'\b[0-9a-f]{12,40}\b')
54
54
55
55
56 class mercurial_sink(common.converter_sink):
56 class mercurial_sink(common.converter_sink):
57 def __init__(self, ui, repotype, path):
57 def __init__(self, ui, repotype, path):
58 common.converter_sink.__init__(self, ui, repotype, path)
58 common.converter_sink.__init__(self, ui, repotype, path)
59 self.branchnames = ui.configbool(b'convert', b'hg.usebranchnames')
59 self.branchnames = ui.configbool(b'convert', b'hg.usebranchnames')
60 self.clonebranches = ui.configbool(b'convert', b'hg.clonebranches')
60 self.clonebranches = ui.configbool(b'convert', b'hg.clonebranches')
61 self.tagsbranch = ui.config(b'convert', b'hg.tagsbranch')
61 self.tagsbranch = ui.config(b'convert', b'hg.tagsbranch')
62 self.lastbranch = None
62 self.lastbranch = None
63 if os.path.isdir(path) and len(os.listdir(path)) > 0:
63 if os.path.isdir(path) and len(os.listdir(path)) > 0:
64 try:
64 try:
65 self.repo = hg.repository(self.ui, path)
65 self.repo = hg.repository(self.ui, path)
66 if not self.repo.local():
66 if not self.repo.local():
67 raise NoRepo(
67 raise NoRepo(
68 _(b'%s is not a local Mercurial repository') % path
68 _(b'%s is not a local Mercurial repository') % path
69 )
69 )
70 except error.RepoError as err:
70 except error.RepoError as err:
71 ui.traceback()
71 ui.traceback()
72 raise NoRepo(err.args[0])
72 raise NoRepo(err.args[0])
73 else:
73 else:
74 try:
74 try:
75 ui.status(_(b'initializing destination %s repository\n') % path)
75 ui.status(_(b'initializing destination %s repository\n') % path)
76 self.repo = hg.repository(self.ui, path, create=True)
76 self.repo = hg.repository(self.ui, path, create=True)
77 if not self.repo.local():
77 if not self.repo.local():
78 raise NoRepo(
78 raise NoRepo(
79 _(b'%s is not a local Mercurial repository') % path
79 _(b'%s is not a local Mercurial repository') % path
80 )
80 )
81 self.created.append(path)
81 self.created.append(path)
82 except error.RepoError:
82 except error.RepoError:
83 ui.traceback()
83 ui.traceback()
84 raise NoRepo(
84 raise NoRepo(
85 _(b"could not create hg repository %s as sink") % path
85 _(b"could not create hg repository %s as sink") % path
86 )
86 )
87 self.lock = None
87 self.lock = None
88 self.wlock = None
88 self.wlock = None
89 self.filemapmode = False
89 self.filemapmode = False
90 self.subrevmaps = {}
90 self.subrevmaps = {}
91
91
92 def before(self):
92 def before(self):
93 self.ui.debug(b'run hg sink pre-conversion action\n')
93 self.ui.debug(b'run hg sink pre-conversion action\n')
94 self.wlock = self.repo.wlock()
94 self.wlock = self.repo.wlock()
95 self.lock = self.repo.lock()
95 self.lock = self.repo.lock()
96
96
97 def after(self):
97 def after(self):
98 self.ui.debug(b'run hg sink post-conversion action\n')
98 self.ui.debug(b'run hg sink post-conversion action\n')
99 if self.lock:
99 if self.lock:
100 self.lock.release()
100 self.lock.release()
101 if self.wlock:
101 if self.wlock:
102 self.wlock.release()
102 self.wlock.release()
103
103
104 def revmapfile(self):
104 def revmapfile(self):
105 return self.repo.vfs.join(b"shamap")
105 return self.repo.vfs.join(b"shamap")
106
106
107 def authorfile(self):
107 def authorfile(self):
108 return self.repo.vfs.join(b"authormap")
108 return self.repo.vfs.join(b"authormap")
109
109
110 def setbranch(self, branch, pbranches):
110 def setbranch(self, branch, pbranches):
111 if not self.clonebranches:
111 if not self.clonebranches:
112 return
112 return
113
113
114 setbranch = branch != self.lastbranch
114 setbranch = branch != self.lastbranch
115 self.lastbranch = branch
115 self.lastbranch = branch
116 if not branch:
116 if not branch:
117 branch = b'default'
117 branch = b'default'
118 pbranches = [(b[0], b[1] and b[1] or b'default') for b in pbranches]
118 pbranches = [(b[0], b[1] and b[1] or b'default') for b in pbranches]
119
119
120 branchpath = os.path.join(self.path, branch)
120 branchpath = os.path.join(self.path, branch)
121 if setbranch:
121 if setbranch:
122 self.after()
122 self.after()
123 try:
123 try:
124 self.repo = hg.repository(self.ui, branchpath)
124 self.repo = hg.repository(self.ui, branchpath)
125 except Exception:
125 except Exception:
126 self.repo = hg.repository(self.ui, branchpath, create=True)
126 self.repo = hg.repository(self.ui, branchpath, create=True)
127 self.before()
127 self.before()
128
128
129 # pbranches may bring revisions from other branches (merge parents)
129 # pbranches may bring revisions from other branches (merge parents)
130 # Make sure we have them, or pull them.
130 # Make sure we have them, or pull them.
131 missings = {}
131 missings = {}
132 for b in pbranches:
132 for b in pbranches:
133 try:
133 try:
134 self.repo.lookup(b[0])
134 self.repo.lookup(b[0])
135 except Exception:
135 except Exception:
136 missings.setdefault(b[1], []).append(b[0])
136 missings.setdefault(b[1], []).append(b[0])
137
137
138 if missings:
138 if missings:
139 self.after()
139 self.after()
140 for pbranch, heads in sorted(missings.items()):
140 for pbranch, heads in sorted(missings.items()):
141 pbranchpath = os.path.join(self.path, pbranch)
141 pbranchpath = os.path.join(self.path, pbranch)
142 prepo = hg.peer(self.ui, {}, pbranchpath)
142 prepo = hg.peer(self.ui, {}, pbranchpath)
143 self.ui.note(
143 self.ui.note(
144 _(b'pulling from %s into %s\n') % (pbranch, branch)
144 _(b'pulling from %s into %s\n') % (pbranch, branch)
145 )
145 )
146 exchange.pull(
146 exchange.pull(
147 self.repo, prepo, heads=[prepo.lookup(h) for h in heads]
147 self.repo, prepo, heads=[prepo.lookup(h) for h in heads]
148 )
148 )
149 self.before()
149 self.before()
150
150
151 def _rewritetags(self, source, revmap, data):
151 def _rewritetags(self, source, revmap, data):
152 fp = stringio()
152 fp = stringio()
153 for line in data.splitlines():
153 for line in data.splitlines():
154 s = line.split(b' ', 1)
154 s = line.split(b' ', 1)
155 if len(s) != 2:
155 if len(s) != 2:
156 self.ui.warn(_(b'invalid tag entry: "%s"\n') % line)
156 self.ui.warn(_(b'invalid tag entry: "%s"\n') % line)
157 fp.write(b'%s\n' % line) # Bogus, but keep for hash stability
157 fp.write(b'%s\n' % line) # Bogus, but keep for hash stability
158 continue
158 continue
159 revid = revmap.get(source.lookuprev(s[0]))
159 revid = revmap.get(source.lookuprev(s[0]))
160 if not revid:
160 if not revid:
161 if s[0] == sha1nodeconstants.nullhex:
161 if s[0] == sha1nodeconstants.nullhex:
162 revid = s[0]
162 revid = s[0]
163 else:
163 else:
164 # missing, but keep for hash stability
164 # missing, but keep for hash stability
165 self.ui.warn(_(b'missing tag entry: "%s"\n') % line)
165 self.ui.warn(_(b'missing tag entry: "%s"\n') % line)
166 fp.write(b'%s\n' % line)
166 fp.write(b'%s\n' % line)
167 continue
167 continue
168 fp.write(b'%s %s\n' % (revid, s[1]))
168 fp.write(b'%s %s\n' % (revid, s[1]))
169 return fp.getvalue()
169 return fp.getvalue()
170
170
171 def _rewritesubstate(self, source, data):
171 def _rewritesubstate(self, source, data):
172 fp = stringio()
172 fp = stringio()
173 for line in data.splitlines():
173 for line in data.splitlines():
174 s = line.split(b' ', 1)
174 s = line.split(b' ', 1)
175 if len(s) != 2:
175 if len(s) != 2:
176 continue
176 continue
177
177
178 revid = s[0]
178 revid = s[0]
179 subpath = s[1]
179 subpath = s[1]
180 if revid != sha1nodeconstants.nullhex:
180 if revid != sha1nodeconstants.nullhex:
181 revmap = self.subrevmaps.get(subpath)
181 revmap = self.subrevmaps.get(subpath)
182 if revmap is None:
182 if revmap is None:
183 revmap = mapfile(
183 revmap = mapfile(
184 self.ui, self.repo.wjoin(subpath, b'.hg/shamap')
184 self.ui, self.repo.wjoin(subpath, b'.hg/shamap')
185 )
185 )
186 self.subrevmaps[subpath] = revmap
186 self.subrevmaps[subpath] = revmap
187
187
188 # It is reasonable that one or more of the subrepos don't
188 # It is reasonable that one or more of the subrepos don't
189 # need to be converted, in which case they can be cloned
189 # need to be converted, in which case they can be cloned
190 # into place instead of converted. Therefore, only warn
190 # into place instead of converted. Therefore, only warn
191 # once.
191 # once.
192 msg = _(b'no ".hgsubstate" updates will be made for "%s"\n')
192 msg = _(b'no ".hgsubstate" updates will be made for "%s"\n')
193 if len(revmap) == 0:
193 if len(revmap) == 0:
194 sub = self.repo.wvfs.reljoin(subpath, b'.hg')
194 sub = self.repo.wvfs.reljoin(subpath, b'.hg')
195
195
196 if self.repo.wvfs.exists(sub):
196 if self.repo.wvfs.exists(sub):
197 self.ui.warn(msg % subpath)
197 self.ui.warn(msg % subpath)
198
198
199 newid = revmap.get(revid)
199 newid = revmap.get(revid)
200 if not newid:
200 if not newid:
201 if len(revmap) > 0:
201 if len(revmap) > 0:
202 self.ui.warn(
202 self.ui.warn(
203 _(b"%s is missing from %s/.hg/shamap\n")
203 _(b"%s is missing from %s/.hg/shamap\n")
204 % (revid, subpath)
204 % (revid, subpath)
205 )
205 )
206 else:
206 else:
207 revid = newid
207 revid = newid
208
208
209 fp.write(b'%s %s\n' % (revid, subpath))
209 fp.write(b'%s %s\n' % (revid, subpath))
210
210
211 return fp.getvalue()
211 return fp.getvalue()
212
212
213 def _calculatemergedfiles(self, source, p1ctx, p2ctx):
213 def _calculatemergedfiles(self, source, p1ctx, p2ctx):
214 """Calculates the files from p2 that we need to pull in when merging p1
214 """Calculates the files from p2 that we need to pull in when merging p1
215 and p2, given that the merge is coming from the given source.
215 and p2, given that the merge is coming from the given source.
216
216
217 This prevents us from losing files that only exist in the target p2 and
217 This prevents us from losing files that only exist in the target p2 and
218 that don't come from the source repo (like if you're merging multiple
218 that don't come from the source repo (like if you're merging multiple
219 repositories together).
219 repositories together).
220 """
220 """
221 anc = [p1ctx.ancestor(p2ctx)]
221 anc = [p1ctx.ancestor(p2ctx)]
222 # Calculate what files are coming from p2
222 # Calculate what files are coming from p2
223 # TODO: mresult.commitinfo might be able to get that info
223 # TODO: mresult.commitinfo might be able to get that info
224 mresult = mergemod.calculateupdates(
224 mresult = mergemod.calculateupdates(
225 self.repo,
225 self.repo,
226 p1ctx,
226 p1ctx,
227 p2ctx,
227 p2ctx,
228 anc,
228 anc,
229 branchmerge=True,
229 branchmerge=True,
230 force=True,
230 force=True,
231 acceptremote=False,
231 acceptremote=False,
232 followcopies=False,
232 followcopies=False,
233 )
233 )
234
234
235 for file, (action, info, msg) in mresult.filemap():
235 for file, (action, info, msg) in mresult.filemap():
236 if source.targetfilebelongstosource(file):
236 if source.targetfilebelongstosource(file):
237 # If the file belongs to the source repo, ignore the p2
237 # If the file belongs to the source repo, ignore the p2
238 # since it will be covered by the existing fileset.
238 # since it will be covered by the existing fileset.
239 continue
239 continue
240
240
241 # If the file requires actual merging, abort. We don't have enough
241 # If the file requires actual merging, abort. We don't have enough
242 # context to resolve merges correctly.
242 # context to resolve merges correctly.
243 if action in mergestate.CONVERT_MERGE_ACTIONS:
243 if action in mergestate.CONVERT_MERGE_ACTIONS:
244 raise error.Abort(
244 raise error.Abort(
245 _(
245 _(
246 b"unable to convert merge commit "
246 b"unable to convert merge commit "
247 b"since target parents do not merge cleanly (file "
247 b"since target parents do not merge cleanly (file "
248 b"%s, parents %s and %s)"
248 b"%s, parents %s and %s)"
249 )
249 )
250 % (file, p1ctx, p2ctx)
250 % (file, p1ctx, p2ctx)
251 )
251 )
252 elif action == mergestate.ACTION_KEEP:
252 elif action == mergestate.ACTION_KEEP:
253 # 'keep' means nothing changed from p1
253 # 'keep' means nothing changed from p1
254 continue
254 continue
255 else:
255 else:
256 # Any other change means we want to take the p2 version
256 # Any other change means we want to take the p2 version
257 yield file
257 yield file
258
258
259 def putcommit(
259 def putcommit(
260 self, files, copies, parents, commit, source, revmap, full, cleanp2
260 self, files, copies, parents, commit, source, revmap, full, cleanp2
261 ):
261 ):
262 files = dict(files)
262 files = dict(files)
263
263
264 def getfilectx(repo, memctx, f):
264 def getfilectx(repo, memctx, f):
265 if p2ctx and f in p2files and f not in copies:
265 if p2ctx and f in p2files and f not in copies:
266 self.ui.debug(b'reusing %s from p2\n' % f)
266 self.ui.debug(b'reusing %s from p2\n' % f)
267 try:
267 try:
268 return p2ctx[f]
268 return p2ctx[f]
269 except error.ManifestLookupError:
269 except error.ManifestLookupError:
270 # If the file doesn't exist in p2, then we're syncing a
270 # If the file doesn't exist in p2, then we're syncing a
271 # delete, so just return None.
271 # delete, so just return None.
272 return None
272 return None
273 try:
273 try:
274 v = files[f]
274 v = files[f]
275 except KeyError:
275 except KeyError:
276 return None
276 return None
277 data, mode = source.getfile(f, v)
277 data, mode = source.getfile(f, v)
278 if data is None:
278 if data is None:
279 return None
279 return None
280 if f == b'.hgtags':
280 if f == b'.hgtags':
281 data = self._rewritetags(source, revmap, data)
281 data = self._rewritetags(source, revmap, data)
282 if f == b'.hgsubstate':
282 if f == b'.hgsubstate':
283 data = self._rewritesubstate(source, data)
283 data = self._rewritesubstate(source, data)
284 return context.memfilectx(
284 return context.memfilectx(
285 self.repo,
285 self.repo,
286 memctx,
286 memctx,
287 f,
287 f,
288 data,
288 data,
289 b'l' in mode,
289 b'l' in mode,
290 b'x' in mode,
290 b'x' in mode,
291 copies.get(f),
291 copies.get(f),
292 )
292 )
293
293
294 pl = []
294 pl = []
295 for p in parents:
295 for p in parents:
296 if p not in pl:
296 if p not in pl:
297 pl.append(p)
297 pl.append(p)
298 parents = pl
298 parents = pl
299 nparents = len(parents)
299 nparents = len(parents)
300 if self.filemapmode and nparents == 1:
300 if self.filemapmode and nparents == 1:
301 m1node = self.repo.changelog.read(bin(parents[0]))[0]
301 m1node = self.repo.changelog.read(bin(parents[0]))[0]
302 parent = parents[0]
302 parent = parents[0]
303
303
304 if len(parents) < 2:
304 if len(parents) < 2:
305 parents.append(self.repo.nullid)
305 parents.append(self.repo.nullid)
306 if len(parents) < 2:
306 if len(parents) < 2:
307 parents.append(self.repo.nullid)
307 parents.append(self.repo.nullid)
308 p2 = parents.pop(0)
308 p2 = parents.pop(0)
309
309
310 text = commit.desc
310 text = commit.desc
311
311
312 sha1s = re.findall(sha1re, text)
312 sha1s = re.findall(sha1re, text)
313 for sha1 in sha1s:
313 for sha1 in sha1s:
314 oldrev = source.lookuprev(sha1)
314 oldrev = source.lookuprev(sha1)
315 newrev = revmap.get(oldrev)
315 newrev = revmap.get(oldrev)
316 if newrev is not None:
316 if newrev is not None:
317 text = text.replace(sha1, newrev[: len(sha1)])
317 text = text.replace(sha1, newrev[: len(sha1)])
318
318
319 extra = commit.extra.copy()
319 extra = commit.extra.copy()
320
320
321 sourcename = self.repo.ui.config(b'convert', b'hg.sourcename')
321 sourcename = self.repo.ui.config(b'convert', b'hg.sourcename')
322 if sourcename:
322 if sourcename:
323 extra[b'convert_source'] = sourcename
323 extra[b'convert_source'] = sourcename
324
324
325 for label in (
325 for label in (
326 b'source',
326 b'source',
327 b'transplant_source',
327 b'transplant_source',
328 b'rebase_source',
328 b'rebase_source',
329 b'intermediate-source',
329 b'intermediate-source',
330 ):
330 ):
331 node = extra.get(label)
331 node = extra.get(label)
332
332
333 if node is None:
333 if node is None:
334 continue
334 continue
335
335
336 # Only transplant stores its reference in binary
336 # Only transplant stores its reference in binary
337 if label == b'transplant_source':
337 if label == b'transplant_source':
338 node = hex(node)
338 node = hex(node)
339
339
340 newrev = revmap.get(node)
340 newrev = revmap.get(node)
341 if newrev is not None:
341 if newrev is not None:
342 if label == b'transplant_source':
342 if label == b'transplant_source':
343 newrev = bin(newrev)
343 newrev = bin(newrev)
344
344
345 extra[label] = newrev
345 extra[label] = newrev
346
346
347 if self.branchnames and commit.branch:
347 if self.branchnames and commit.branch:
348 extra[b'branch'] = commit.branch
348 extra[b'branch'] = commit.branch
349 if commit.rev and commit.saverev:
349 if commit.rev and commit.saverev:
350 extra[b'convert_revision'] = commit.rev
350 extra[b'convert_revision'] = commit.rev
351
351
352 while parents:
352 while parents:
353 p1 = p2
353 p1 = p2
354 p2 = parents.pop(0)
354 p2 = parents.pop(0)
355 p1ctx = self.repo[p1]
355 p1ctx = self.repo[p1]
356 p2ctx = None
356 p2ctx = None
357 if p2 != self.repo.nullid:
357 if p2 != self.repo.nullid:
358 p2ctx = self.repo[p2]
358 p2ctx = self.repo[p2]
359 fileset = set(files)
359 fileset = set(files)
360 if full:
360 if full:
361 fileset.update(self.repo[p1])
361 fileset.update(self.repo[p1])
362 fileset.update(self.repo[p2])
362 fileset.update(self.repo[p2])
363
363
364 if p2ctx:
364 if p2ctx:
365 p2files = set(cleanp2)
365 p2files = set(cleanp2)
366 for file in self._calculatemergedfiles(source, p1ctx, p2ctx):
366 for file in self._calculatemergedfiles(source, p1ctx, p2ctx):
367 p2files.add(file)
367 p2files.add(file)
368 fileset.add(file)
368 fileset.add(file)
369
369
370 ctx = context.memctx(
370 ctx = context.memctx(
371 self.repo,
371 self.repo,
372 (p1, p2),
372 (p1, p2),
373 text,
373 text,
374 fileset,
374 fileset,
375 getfilectx,
375 getfilectx,
376 commit.author,
376 commit.author,
377 commit.date,
377 commit.date,
378 extra,
378 extra,
379 )
379 )
380
380
381 # We won't know if the conversion changes the node until after the
381 # We won't know if the conversion changes the node until after the
382 # commit, so copy the source's phase for now.
382 # commit, so copy the source's phase for now.
383 self.repo.ui.setconfig(
383 self.repo.ui.setconfig(
384 b'phases',
384 b'phases',
385 b'new-commit',
385 b'new-commit',
386 phases.phasenames[commit.phase],
386 phases.phasenames[commit.phase],
387 b'convert',
387 b'convert',
388 )
388 )
389
389
390 with self.repo.transaction(b"convert") as tr:
390 with self.repo.transaction(b"convert") as tr:
391 if self.repo.ui.config(b'convert', b'hg.preserve-hash'):
391 if self.repo.ui.config(b'convert', b'hg.preserve-hash'):
392 origctx = commit.ctx
392 origctx = commit.ctx
393 else:
393 else:
394 origctx = None
394 origctx = None
395 node = hex(self.repo.commitctx(ctx, origctx=origctx))
395 node = hex(self.repo.commitctx(ctx, origctx=origctx))
396
396
397 # If the node value has changed, but the phase is lower than
397 # If the node value has changed, but the phase is lower than
398 # draft, set it back to draft since it hasn't been exposed
398 # draft, set it back to draft since it hasn't been exposed
399 # anywhere.
399 # anywhere.
400 if commit.rev != node:
400 if commit.rev != node:
401 ctx = self.repo[node]
401 ctx = self.repo[node]
402 if ctx.phase() < phases.draft:
402 if ctx.phase() < phases.draft:
403 phases.registernew(
403 phases.registernew(
404 self.repo, tr, phases.draft, [ctx.rev()]
404 self.repo, tr, phases.draft, [ctx.rev()]
405 )
405 )
406
406
407 text = b"(octopus merge fixup)\n"
407 text = b"(octopus merge fixup)\n"
408 p2 = node
408 p2 = node
409
409
410 if self.filemapmode and nparents == 1:
410 if self.filemapmode and nparents == 1:
411 man = self.repo.manifestlog.getstorage(b'')
411 man = self.repo.manifestlog.getstorage(b'')
412 mnode = self.repo.changelog.read(bin(p2))[0]
412 mnode = self.repo.changelog.read(bin(p2))[0]
413 closed = b'close' in commit.extra
413 closed = b'close' in commit.extra
414 if not closed and not man.cmp(m1node, man.revision(mnode)):
414 if not closed and not man.cmp(m1node, man.revision(mnode)):
415 self.ui.status(_(b"filtering out empty revision\n"))
415 self.ui.status(_(b"filtering out empty revision\n"))
416 self.repo.rollback(force=True)
416 self.repo.rollback(force=True)
417 return parent
417 return parent
418 return p2
418 return p2
419
419
420 def puttags(self, tags):
420 def puttags(self, tags):
421 tagparent = self.repo.branchtip(self.tagsbranch, ignoremissing=True)
421 tagparent = self.repo.branchtip(self.tagsbranch, ignoremissing=True)
422 tagparent = tagparent or self.repo.nullid
422 tagparent = tagparent or self.repo.nullid
423
423
424 oldlines = set()
424 oldlines = set()
425 for branch, heads in self.repo.branchmap().items():
425 for branch, heads in self.repo.branchmap().items():
426 for h in heads:
426 for h in heads:
427 if b'.hgtags' in self.repo[h]:
427 if b'.hgtags' in self.repo[h]:
428 oldlines.update(
428 oldlines.update(
429 set(self.repo[h][b'.hgtags'].data().splitlines(True))
429 set(self.repo[h][b'.hgtags'].data().splitlines(True))
430 )
430 )
431 oldlines = sorted(list(oldlines))
431 oldlines = sorted(list(oldlines))
432
432
433 newlines = sorted([(b"%s %s\n" % (tags[tag], tag)) for tag in tags])
433 newlines = sorted([(b"%s %s\n" % (tags[tag], tag)) for tag in tags])
434 if newlines == oldlines:
434 if newlines == oldlines:
435 return None, None
435 return None, None
436
436
437 # if the old and new tags match, then there is nothing to update
437 # if the old and new tags match, then there is nothing to update
438 oldtags = set()
438 oldtags = set()
439 newtags = set()
439 newtags = set()
440 for line in oldlines:
440 for line in oldlines:
441 s = line.strip().split(b' ', 1)
441 s = line.strip().split(b' ', 1)
442 if len(s) != 2:
442 if len(s) != 2:
443 continue
443 continue
444 oldtags.add(s[1])
444 oldtags.add(s[1])
445 for line in newlines:
445 for line in newlines:
446 s = line.strip().split(b' ', 1)
446 s = line.strip().split(b' ', 1)
447 if len(s) != 2:
447 if len(s) != 2:
448 continue
448 continue
449 if s[1] not in oldtags:
449 if s[1] not in oldtags:
450 newtags.add(s[1].strip())
450 newtags.add(s[1].strip())
451
451
452 if not newtags:
452 if not newtags:
453 return None, None
453 return None, None
454
454
455 data = b"".join(newlines)
455 data = b"".join(newlines)
456
456
457 def getfilectx(repo, memctx, f):
457 def getfilectx(repo, memctx, f):
458 return context.memfilectx(repo, memctx, f, data, False, False, None)
458 return context.memfilectx(repo, memctx, f, data, False, False, None)
459
459
460 self.ui.status(_(b"updating tags\n"))
460 self.ui.status(_(b"updating tags\n"))
461 date = b"%d 0" % int(time.mktime(time.gmtime()))
461 date = b"%d 0" % int(time.mktime(time.gmtime()))
462 extra = {b'branch': self.tagsbranch}
462 extra = {b'branch': self.tagsbranch}
463 ctx = context.memctx(
463 ctx = context.memctx(
464 self.repo,
464 self.repo,
465 (tagparent, None),
465 (tagparent, None),
466 b"update tags",
466 b"update tags",
467 [b".hgtags"],
467 [b".hgtags"],
468 getfilectx,
468 getfilectx,
469 b"convert-repo",
469 b"convert-repo",
470 date,
470 date,
471 extra,
471 extra,
472 )
472 )
473 node = self.repo.commitctx(ctx)
473 node = self.repo.commitctx(ctx)
474 return hex(node), hex(tagparent)
474 return hex(node), hex(tagparent)
475
475
476 def setfilemapmode(self, active):
476 def setfilemapmode(self, active):
477 self.filemapmode = active
477 self.filemapmode = active
478
478
479 def putbookmarks(self, updatedbookmark):
479 def putbookmarks(self, updatedbookmark):
480 if not len(updatedbookmark):
480 if not len(updatedbookmark):
481 return
481 return
482 wlock = lock = tr = None
482 wlock = lock = tr = None
483 try:
483 try:
484 wlock = self.repo.wlock()
484 wlock = self.repo.wlock()
485 lock = self.repo.lock()
485 lock = self.repo.lock()
486 tr = self.repo.transaction(b'bookmark')
486 tr = self.repo.transaction(b'bookmark')
487 self.ui.status(_(b"updating bookmarks\n"))
487 self.ui.status(_(b"updating bookmarks\n"))
488 destmarks = self.repo._bookmarks
488 destmarks = self.repo._bookmarks
489 changes = [
489 changes = [
490 (bookmark, bin(updatedbookmark[bookmark]))
490 (bookmark, bin(updatedbookmark[bookmark]))
491 for bookmark in updatedbookmark
491 for bookmark in updatedbookmark
492 ]
492 ]
493 destmarks.applychanges(self.repo, tr, changes)
493 destmarks.applychanges(self.repo, tr, changes)
494 tr.close()
494 tr.close()
495 finally:
495 finally:
496 lockmod.release(lock, wlock, tr)
496 lockmod.release(lock, wlock, tr)
497
497
498 def hascommitfrommap(self, rev):
498 def hascommitfrommap(self, rev):
499 # the exact semantics of clonebranches is unclear so we can't say no
499 # the exact semantics of clonebranches is unclear so we can't say no
500 return rev in self.repo or self.clonebranches
500 return rev in self.repo or self.clonebranches
501
501
502 def hascommitforsplicemap(self, rev):
502 def hascommitforsplicemap(self, rev):
503 if rev not in self.repo and self.clonebranches:
503 if rev not in self.repo and self.clonebranches:
504 raise error.Abort(
504 raise error.Abort(
505 _(
505 _(
506 b'revision %s not found in destination '
506 b'revision %s not found in destination '
507 b'repository (lookups with clonebranches=true '
507 b'repository (lookups with clonebranches=true '
508 b'are not implemented)'
508 b'are not implemented)'
509 )
509 )
510 % rev
510 % rev
511 )
511 )
512 return rev in self.repo
512 return rev in self.repo
513
513
514
514
515 class mercurial_source(common.converter_source):
515 class mercurial_source(common.converter_source):
516 def __init__(self, ui, repotype, path, revs=None):
516 def __init__(self, ui, repotype, path, revs=None):
517 common.converter_source.__init__(self, ui, repotype, path, revs)
517 common.converter_source.__init__(self, ui, repotype, path, revs)
518 self.ignoreerrors = ui.configbool(b'convert', b'hg.ignoreerrors')
518 self.ignoreerrors = ui.configbool(b'convert', b'hg.ignoreerrors')
519 self.ignored = set()
519 self.ignored = set()
520 self.saverev = ui.configbool(b'convert', b'hg.saverev')
520 self.saverev = ui.configbool(b'convert', b'hg.saverev')
521 try:
521 try:
522 self.repo = hg.repository(self.ui, path)
522 self.repo = hg.repository(self.ui, path)
523 # try to provoke an exception if this isn't really a hg
523 # try to provoke an exception if this isn't really a hg
524 # repo, but some other bogus compatible-looking url
524 # repo, but some other bogus compatible-looking url
525 if not self.repo.local():
525 if not self.repo.local():
526 raise error.RepoError
526 raise error.RepoError
527 except error.RepoError:
527 except error.RepoError:
528 ui.traceback()
528 ui.traceback()
529 raise NoRepo(_(b"%s is not a local Mercurial repository") % path)
529 raise NoRepo(_(b"%s is not a local Mercurial repository") % path)
530 self.lastrev = None
530 self.lastrev = None
531 self.lastctx = None
531 self.lastctx = None
532 self._changescache = None, None
532 self._changescache = None, None
533 self.convertfp = None
533 self.convertfp = None
534 # Restrict converted revisions to startrev descendants
534 # Restrict converted revisions to startrev descendants
535 startnode = ui.config(b'convert', b'hg.startrev')
535 startnode = ui.config(b'convert', b'hg.startrev')
536 hgrevs = ui.config(b'convert', b'hg.revs')
536 hgrevs = ui.config(b'convert', b'hg.revs')
537 if hgrevs is None:
537 if hgrevs is None:
538 if startnode is not None:
538 if startnode is not None:
539 try:
539 try:
540 startnode = self.repo.lookup(startnode)
540 startnode = self.repo.lookup(startnode)
541 except error.RepoError:
541 except error.RepoError:
542 raise error.Abort(
542 raise error.Abort(
543 _(b'%s is not a valid start revision') % startnode
543 _(b'%s is not a valid start revision') % startnode
544 )
544 )
545 startrev = self.repo.changelog.rev(startnode)
545 startrev = self.repo.changelog.rev(startnode)
546 children = {startnode: 1}
546 children = {startnode: 1}
547 for r in self.repo.changelog.descendants([startrev]):
547 for r in self.repo.changelog.descendants([startrev]):
548 children[self.repo.changelog.node(r)] = 1
548 children[self.repo.changelog.node(r)] = 1
549 self.keep = children.__contains__
549 self.keep = children.__contains__
550 else:
550 else:
551 self.keep = util.always
551 self.keep = util.always
552 if revs:
552 if revs:
553 self._heads = [self.repo.lookup(r) for r in revs]
553 self._heads = [self.repo.lookup(r) for r in revs]
554 else:
554 else:
555 self._heads = self.repo.heads()
555 self._heads = self.repo.heads()
556 else:
556 else:
557 if revs or startnode is not None:
557 if revs or startnode is not None:
558 raise error.Abort(
558 raise error.Abort(
559 _(
559 _(
560 b'hg.revs cannot be combined with '
560 b'hg.revs cannot be combined with '
561 b'hg.startrev or --rev'
561 b'hg.startrev or --rev'
562 )
562 )
563 )
563 )
564 nodes = set()
564 nodes = set()
565 parents = set()
565 parents = set()
566 for r in logcmdutil.revrange(self.repo, [hgrevs]):
566 for r in logcmdutil.revrange(self.repo, [hgrevs]):
567 ctx = self.repo[r]
567 ctx = self.repo[r]
568 nodes.add(ctx.node())
568 nodes.add(ctx.node())
569 parents.update(p.node() for p in ctx.parents())
569 parents.update(p.node() for p in ctx.parents())
570 self.keep = nodes.__contains__
570 self.keep = nodes.__contains__
571 self._heads = nodes - parents
571 self._heads = nodes - parents
572
572
573 def _changectx(self, rev):
573 def _changectx(self, rev):
574 if self.lastrev != rev:
574 if self.lastrev != rev:
575 self.lastctx = self.repo[rev]
575 self.lastctx = self.repo[rev]
576 self.lastrev = rev
576 self.lastrev = rev
577 return self.lastctx
577 return self.lastctx
578
578
579 def _parents(self, ctx):
579 def _parents(self, ctx):
580 return [p for p in ctx.parents() if p and self.keep(p.node())]
580 return [p for p in ctx.parents() if p and self.keep(p.node())]
581
581
582 def getheads(self):
582 def getheads(self):
583 return [hex(h) for h in self._heads if self.keep(h)]
583 return [hex(h) for h in self._heads if self.keep(h)]
584
584
585 def getfile(self, name, rev):
585 def getfile(self, name, rev):
586 try:
586 try:
587 fctx = self._changectx(rev)[name]
587 fctx = self._changectx(rev)[name]
588 return fctx.data(), fctx.flags()
588 return fctx.data(), fctx.flags()
589 except error.LookupError:
589 except error.LookupError:
590 return None, None
590 return None, None
591
591
592 def _changedfiles(self, ctx1, ctx2):
592 def _changedfiles(self, ctx1, ctx2):
593 ma, r = [], []
593 ma, r = [], []
594 maappend = ma.append
594 maappend = ma.append
595 rappend = r.append
595 rappend = r.append
596 d = ctx1.manifest().diff(ctx2.manifest())
596 d = ctx1.manifest().diff(ctx2.manifest())
597 for f, ((node1, flag1), (node2, flag2)) in d.items():
597 for f, ((node1, flag1), (node2, flag2)) in d.items():
598 if node2 is None:
598 if node2 is None:
599 rappend(f)
599 rappend(f)
600 else:
600 else:
601 maappend(f)
601 maappend(f)
602 return ma, r
602 return ma, r
603
603
604 def getchanges(self, rev, full):
604 def getchanges(self, rev, full):
605 ctx = self._changectx(rev)
605 ctx = self._changectx(rev)
606 parents = self._parents(ctx)
606 parents = self._parents(ctx)
607 if full or not parents:
607 if full or not parents:
608 files = copyfiles = ctx.manifest()
608 files = copyfiles = ctx.manifest()
609 if parents:
609 if parents:
610 if self._changescache[0] == rev:
610 if self._changescache[0] == rev:
611 ma, r = self._changescache[1]
611 # TODO: add type hints to avoid this warning, instead of
612 # suppressing it:
613 # No attribute '__iter__' on None [attribute-error]
614 ma, r = self._changescache[1] # pytype: disable=attribute-error
612 else:
615 else:
613 ma, r = self._changedfiles(parents[0], ctx)
616 ma, r = self._changedfiles(parents[0], ctx)
614 if not full:
617 if not full:
615 files = ma + r
618 files = ma + r
616 copyfiles = ma
619 copyfiles = ma
617 # _getcopies() is also run for roots and before filtering so missing
620 # _getcopies() is also run for roots and before filtering so missing
618 # revlogs are detected early
621 # revlogs are detected early
619 copies = self._getcopies(ctx, parents, copyfiles)
622 copies = self._getcopies(ctx, parents, copyfiles)
620 cleanp2 = set()
623 cleanp2 = set()
621 if len(parents) == 2:
624 if len(parents) == 2:
622 d = parents[1].manifest().diff(ctx.manifest(), clean=True)
625 d = parents[1].manifest().diff(ctx.manifest(), clean=True)
623 for f, value in d.items():
626 for f, value in d.items():
624 if value is None:
627 if value is None:
625 cleanp2.add(f)
628 cleanp2.add(f)
626 changes = [(f, rev) for f in files if f not in self.ignored]
629 changes = [(f, rev) for f in files if f not in self.ignored]
627 changes.sort()
630 changes.sort()
628 return changes, copies, cleanp2
631 return changes, copies, cleanp2
629
632
630 def _getcopies(self, ctx, parents, files):
633 def _getcopies(self, ctx, parents, files):
631 copies = {}
634 copies = {}
632 for name in files:
635 for name in files:
633 if name in self.ignored:
636 if name in self.ignored:
634 continue
637 continue
635 try:
638 try:
636 copysource = ctx.filectx(name).copysource()
639 copysource = ctx.filectx(name).copysource()
637 if copysource in self.ignored:
640 if copysource in self.ignored:
638 continue
641 continue
639 # Ignore copy sources not in parent revisions
642 # Ignore copy sources not in parent revisions
640 if not any(copysource in p for p in parents):
643 if not any(copysource in p for p in parents):
641 continue
644 continue
642 copies[name] = copysource
645 copies[name] = copysource
643 except TypeError:
646 except TypeError:
644 pass
647 pass
645 except error.LookupError as e:
648 except error.LookupError as e:
646 if not self.ignoreerrors:
649 if not self.ignoreerrors:
647 raise
650 raise
648 self.ignored.add(name)
651 self.ignored.add(name)
649 self.ui.warn(_(b'ignoring: %s\n') % e)
652 self.ui.warn(_(b'ignoring: %s\n') % e)
650 return copies
653 return copies
651
654
652 def getcommit(self, rev):
655 def getcommit(self, rev):
653 ctx = self._changectx(rev)
656 ctx = self._changectx(rev)
654 _parents = self._parents(ctx)
657 _parents = self._parents(ctx)
655 parents = [p.hex() for p in _parents]
658 parents = [p.hex() for p in _parents]
656 optparents = [p.hex() for p in ctx.parents() if p and p not in _parents]
659 optparents = [p.hex() for p in ctx.parents() if p and p not in _parents]
657 crev = rev
660 crev = rev
658
661
659 return common.commit(
662 return common.commit(
660 author=ctx.user(),
663 author=ctx.user(),
661 date=dateutil.datestr(ctx.date(), b'%Y-%m-%d %H:%M:%S %1%2'),
664 date=dateutil.datestr(ctx.date(), b'%Y-%m-%d %H:%M:%S %1%2'),
662 desc=ctx.description(),
665 desc=ctx.description(),
663 rev=crev,
666 rev=crev,
664 parents=parents,
667 parents=parents,
665 optparents=optparents,
668 optparents=optparents,
666 branch=ctx.branch(),
669 branch=ctx.branch(),
667 extra=ctx.extra(),
670 extra=ctx.extra(),
668 sortkey=ctx.rev(),
671 sortkey=ctx.rev(),
669 saverev=self.saverev,
672 saverev=self.saverev,
670 phase=ctx.phase(),
673 phase=ctx.phase(),
671 ctx=ctx,
674 ctx=ctx,
672 )
675 )
673
676
674 def numcommits(self):
677 def numcommits(self):
675 return len(self.repo)
678 return len(self.repo)
676
679
677 def gettags(self):
680 def gettags(self):
678 # This will get written to .hgtags, filter non global tags out.
681 # This will get written to .hgtags, filter non global tags out.
679 tags = [
682 tags = [
680 t
683 t
681 for t in self.repo.tagslist()
684 for t in self.repo.tagslist()
682 if self.repo.tagtype(t[0]) == b'global'
685 if self.repo.tagtype(t[0]) == b'global'
683 ]
686 ]
684 return {name: hex(node) for name, node in tags if self.keep(node)}
687 return {name: hex(node) for name, node in tags if self.keep(node)}
685
688
686 def getchangedfiles(self, rev, i):
689 def getchangedfiles(self, rev, i):
687 ctx = self._changectx(rev)
690 ctx = self._changectx(rev)
688 parents = self._parents(ctx)
691 parents = self._parents(ctx)
689 if not parents and i is None:
692 if not parents and i is None:
690 i = 0
693 i = 0
691 ma, r = ctx.manifest().keys(), []
694 ma, r = ctx.manifest().keys(), []
692 else:
695 else:
693 i = i or 0
696 i = i or 0
694 ma, r = self._changedfiles(parents[i], ctx)
697 ma, r = self._changedfiles(parents[i], ctx)
695 ma, r = [[f for f in l if f not in self.ignored] for l in (ma, r)]
698 ma, r = [[f for f in l if f not in self.ignored] for l in (ma, r)]
696
699
697 if i == 0:
700 if i == 0:
698 self._changescache = (rev, (ma, r))
701 self._changescache = (rev, (ma, r))
699
702
700 return ma + r
703 return ma + r
701
704
702 def converted(self, rev, destrev):
705 def converted(self, rev, destrev):
703 if self.convertfp is None:
706 if self.convertfp is None:
704 self.convertfp = open(self.repo.vfs.join(b'shamap'), b'ab')
707 self.convertfp = open(self.repo.vfs.join(b'shamap'), b'ab')
705 self.convertfp.write(util.tonativeeol(b'%s %s\n' % (destrev, rev)))
708 self.convertfp.write(util.tonativeeol(b'%s %s\n' % (destrev, rev)))
706 self.convertfp.flush()
709 self.convertfp.flush()
707
710
708 def before(self):
711 def before(self):
709 self.ui.debug(b'run hg source pre-conversion action\n')
712 self.ui.debug(b'run hg source pre-conversion action\n')
710
713
711 def after(self):
714 def after(self):
712 self.ui.debug(b'run hg source post-conversion action\n')
715 self.ui.debug(b'run hg source post-conversion action\n')
713
716
714 def hasnativeorder(self):
717 def hasnativeorder(self):
715 return True
718 return True
716
719
717 def hasnativeclose(self):
720 def hasnativeclose(self):
718 return True
721 return True
719
722
720 def lookuprev(self, rev):
723 def lookuprev(self, rev):
721 try:
724 try:
722 return hex(self.repo.lookup(rev))
725 return hex(self.repo.lookup(rev))
723 except (error.RepoError, error.LookupError):
726 except (error.RepoError, error.LookupError):
724 return None
727 return None
725
728
726 def getbookmarks(self):
729 def getbookmarks(self):
727 return bookmarks.listbookmarks(self.repo)
730 return bookmarks.listbookmarks(self.repo)
728
731
729 def checkrevformat(self, revstr, mapname=b'splicemap'):
732 def checkrevformat(self, revstr, mapname=b'splicemap'):
730 """Mercurial, revision string is a 40 byte hex"""
733 """Mercurial, revision string is a 40 byte hex"""
731 self.checkhexformat(revstr, mapname)
734 self.checkhexformat(revstr, mapname)
@@ -1,410 +1,411
1 # monotone.py - monotone support for the convert extension
1 # monotone.py - monotone support for the convert extension
2 #
2 #
3 # Copyright 2008, 2009 Mikkel Fahnoe Jorgensen <mikkel@dvide.com> and
3 # Copyright 2008, 2009 Mikkel Fahnoe Jorgensen <mikkel@dvide.com> and
4 # others
4 # others
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import os
9 import os
10 import re
10 import re
11
11
12 from mercurial.i18n import _
12 from mercurial.i18n import _
13 from mercurial.pycompat import open
13 from mercurial.pycompat import open
14 from mercurial import (
14 from mercurial import (
15 error,
15 error,
16 pycompat,
16 pycompat,
17 )
17 )
18 from mercurial.utils import dateutil
18 from mercurial.utils import dateutil
19
19
20 from . import common
20 from . import common
21
21
22
22
23 class monotone_source(common.converter_source, common.commandline):
23 class monotone_source(common.converter_source, common.commandline):
24 def __init__(self, ui, repotype, path=None, revs=None):
24 def __init__(self, ui, repotype, path=None, revs=None):
25 common.converter_source.__init__(self, ui, repotype, path, revs)
25 common.converter_source.__init__(self, ui, repotype, path, revs)
26 if revs and len(revs) > 1:
26 if revs and len(revs) > 1:
27 raise error.Abort(
27 raise error.Abort(
28 _(
28 _(
29 b'monotone source does not support specifying '
29 b'monotone source does not support specifying '
30 b'multiple revs'
30 b'multiple revs'
31 )
31 )
32 )
32 )
33 common.commandline.__init__(self, ui, b'mtn')
33 common.commandline.__init__(self, ui, b'mtn')
34
34
35 self.ui = ui
35 self.ui = ui
36 self.path = path
36 self.path = path
37 self.automatestdio = False
37 self.automatestdio = False
38 self.revs = revs
38 self.revs = revs
39
39
40 norepo = common.NoRepo(
40 norepo = common.NoRepo(
41 _(b"%s does not look like a monotone repository") % path
41 _(b"%s does not look like a monotone repository") % path
42 )
42 )
43 if not os.path.exists(os.path.join(path, b'_MTN')):
43 if not os.path.exists(os.path.join(path, b'_MTN')):
44 # Could be a monotone repository (SQLite db file)
44 # Could be a monotone repository (SQLite db file)
45 try:
45 try:
46 f = open(path, b'rb')
46 f = open(path, b'rb')
47 header = f.read(16)
47 header = f.read(16)
48 f.close()
48 f.close()
49 except IOError:
49 except IOError:
50 header = b''
50 header = b''
51 if header != b'SQLite format 3\x00':
51 if header != b'SQLite format 3\x00':
52 raise norepo
52 raise norepo
53
53
54 # regular expressions for parsing monotone output
54 # regular expressions for parsing monotone output
55 space = br'\s*'
55 space = br'\s*'
56 name = br'\s+"((?:\\"|[^"])*)"\s*'
56 name = br'\s+"((?:\\"|[^"])*)"\s*'
57 value = name
57 value = name
58 revision = br'\s+\[(\w+)\]\s*'
58 revision = br'\s+\[(\w+)\]\s*'
59 lines = br'(?:.|\n)+'
59 lines = br'(?:.|\n)+'
60
60
61 self.dir_re = re.compile(space + b"dir" + name)
61 self.dir_re = re.compile(space + b"dir" + name)
62 self.file_re = re.compile(
62 self.file_re = re.compile(
63 space + b"file" + name + b"content" + revision
63 space + b"file" + name + b"content" + revision
64 )
64 )
65 self.add_file_re = re.compile(
65 self.add_file_re = re.compile(
66 space + b"add_file" + name + b"content" + revision
66 space + b"add_file" + name + b"content" + revision
67 )
67 )
68 self.patch_re = re.compile(
68 self.patch_re = re.compile(
69 space + b"patch" + name + b"from" + revision + b"to" + revision
69 space + b"patch" + name + b"from" + revision + b"to" + revision
70 )
70 )
71 self.rename_re = re.compile(space + b"rename" + name + b"to" + name)
71 self.rename_re = re.compile(space + b"rename" + name + b"to" + name)
72 self.delete_re = re.compile(space + b"delete" + name)
72 self.delete_re = re.compile(space + b"delete" + name)
73 self.tag_re = re.compile(space + b"tag" + name + b"revision" + revision)
73 self.tag_re = re.compile(space + b"tag" + name + b"revision" + revision)
74 self.cert_re = re.compile(
74 self.cert_re = re.compile(
75 lines + space + b"name" + name + b"value" + value
75 lines + space + b"name" + name + b"value" + value
76 )
76 )
77
77
78 attr = space + b"file" + lines + space + b"attr" + space
78 attr = space + b"file" + lines + space + b"attr" + space
79 self.attr_execute_re = re.compile(
79 self.attr_execute_re = re.compile(
80 attr + b'"mtn:execute"' + space + b'"true"'
80 attr + b'"mtn:execute"' + space + b'"true"'
81 )
81 )
82
82
83 # cached data
83 # cached data
84 self.manifest_rev = None
84 self.manifest_rev = None
85 self.manifest = None
85 self.manifest = None
86 self.files = None
86 self.files = None
87 self.dirs = None
87 self.dirs = None
88
88
89 common.checktool(b'mtn', abort=False)
89 common.checktool(b'mtn', abort=False)
90
90
91 def mtnrun(self, *args, **kwargs):
91 def mtnrun(self, *args, **kwargs):
92 if self.automatestdio:
92 if self.automatestdio:
93 return self.mtnrunstdio(*args, **kwargs)
93 return self.mtnrunstdio(*args, **kwargs)
94 else:
94 else:
95 return self.mtnrunsingle(*args, **kwargs)
95 return self.mtnrunsingle(*args, **kwargs)
96
96
97 def mtnrunsingle(self, *args, **kwargs):
97 def mtnrunsingle(self, *args, **kwargs):
98 kwargs['d'] = self.path
98 kwargs['d'] = self.path
99 return self.run0(b'automate', *args, **kwargs)
99 return self.run0(b'automate', *args, **kwargs)
100
100
101 def mtnrunstdio(self, *args, **kwargs):
101 def mtnrunstdio(self, *args, **kwargs):
102 # Prepare the command in automate stdio format
102 # Prepare the command in automate stdio format
103 kwargs = pycompat.byteskwargs(kwargs)
103 kwargs = pycompat.byteskwargs(kwargs)
104 command = []
104 command = []
105 for k, v in kwargs.items():
105 for k, v in kwargs.items():
106 command.append(b"%d:%s" % (len(k), k))
106 command.append(b"%d:%s" % (len(k), k))
107 if v:
107 if v:
108 command.append(b"%d:%s" % (len(v), v))
108 command.append(b"%d:%s" % (len(v), v))
109 if command:
109 if command:
110 command.insert(0, b'o')
110 command.insert(0, b'o')
111 command.append(b'e')
111 command.append(b'e')
112
112
113 command.append(b'l')
113 command.append(b'l')
114 for arg in args:
114 for arg in args:
115 command.append(b"%d:%s" % (len(arg), arg))
115 command.append(b"%d:%s" % (len(arg), arg))
116 command.append(b'e')
116 command.append(b'e')
117 command = b''.join(command)
117 command = b''.join(command)
118
118
119 self.ui.debug(b"mtn: sending '%s'\n" % command)
119 self.ui.debug(b"mtn: sending '%s'\n" % command)
120 self.mtnwritefp.write(command)
120 self.mtnwritefp.write(command)
121 self.mtnwritefp.flush()
121 self.mtnwritefp.flush()
122
122
123 return self.mtnstdioreadcommandoutput(command)
123 return self.mtnstdioreadcommandoutput(command)
124
124
125 def mtnstdioreadpacket(self):
125 def mtnstdioreadpacket(self):
126 read = None
126 read = None
127 commandnbr = b''
127 commandnbr = b''
128 while read != b':':
128 while read != b':':
129 read = self.mtnreadfp.read(1)
129 read = self.mtnreadfp.read(1)
130 if not read:
130 if not read:
131 raise error.Abort(_(b'bad mtn packet - no end of commandnbr'))
131 raise error.Abort(_(b'bad mtn packet - no end of commandnbr'))
132 commandnbr += read
132 commandnbr += read
133 commandnbr = commandnbr[:-1]
133 commandnbr = commandnbr[:-1]
134
134
135 stream = self.mtnreadfp.read(1)
135 stream = self.mtnreadfp.read(1)
136 if stream not in b'mewptl':
136 if stream not in b'mewptl':
137 raise error.Abort(
137 raise error.Abort(
138 _(b'bad mtn packet - bad stream type %s') % stream
138 _(b'bad mtn packet - bad stream type %s') % stream
139 )
139 )
140
140
141 read = self.mtnreadfp.read(1)
141 read = self.mtnreadfp.read(1)
142 if read != b':':
142 if read != b':':
143 raise error.Abort(_(b'bad mtn packet - no divider before size'))
143 raise error.Abort(_(b'bad mtn packet - no divider before size'))
144
144
145 read = None
145 read = None
146 lengthstr = b''
146 lengthstr = b''
147 while read != b':':
147 while read != b':':
148 read = self.mtnreadfp.read(1)
148 read = self.mtnreadfp.read(1)
149 if not read:
149 if not read:
150 raise error.Abort(_(b'bad mtn packet - no end of packet size'))
150 raise error.Abort(_(b'bad mtn packet - no end of packet size'))
151 lengthstr += read
151 lengthstr += read
152 try:
152 try:
153 length = int(lengthstr[:-1])
153 length = int(lengthstr[:-1])
154 except TypeError:
154 except TypeError:
155 raise error.Abort(
155 raise error.Abort(
156 _(b'bad mtn packet - bad packet size %s') % lengthstr
156 _(b'bad mtn packet - bad packet size %s') % lengthstr
157 )
157 )
158
158
159 read = self.mtnreadfp.read(length)
159 read = self.mtnreadfp.read(length)
160 if len(read) != length:
160 if len(read) != length:
161 raise error.Abort(
161 raise error.Abort(
162 _(
162 _(
163 b"bad mtn packet - unable to read full packet "
163 b"bad mtn packet - unable to read full packet "
164 b"read %s of %s"
164 b"read %s of %s"
165 )
165 )
166 % (len(read), length)
166 % (len(read), length)
167 )
167 )
168
168
169 return (commandnbr, stream, length, read)
169 return (commandnbr, stream, length, read)
170
170
171 def mtnstdioreadcommandoutput(self, command):
171 def mtnstdioreadcommandoutput(self, command):
172 retval = []
172 retval = []
173 while True:
173 while True:
174 commandnbr, stream, length, output = self.mtnstdioreadpacket()
174 commandnbr, stream, length, output = self.mtnstdioreadpacket()
175 self.ui.debug(
175 self.ui.debug(
176 b'mtn: read packet %s:%s:%d\n' % (commandnbr, stream, length)
176 b'mtn: read packet %s:%s:%d\n' % (commandnbr, stream, length)
177 )
177 )
178
178
179 if stream == b'l':
179 if stream == b'l':
180 # End of command
180 # End of command
181 if output != b'0':
181 if output != b'0':
182 raise error.Abort(
182 raise error.Abort(
183 _(b"mtn command '%s' returned %s") % (command, output)
183 _(b"mtn command '%s' returned %s") % (command, output)
184 )
184 )
185 break
185 break
186 elif stream in b'ew':
186 elif stream in b'ew':
187 # Error, warning output
187 # Error, warning output
188 self.ui.warn(_(b'%s error:\n') % self.command)
188 self.ui.warn(_(b'%s error:\n') % self.command)
189 self.ui.warn(output)
189 self.ui.warn(output)
190 elif stream == b'p':
190 elif stream == b'p':
191 # Progress messages
191 # Progress messages
192 self.ui.debug(b'mtn: ' + output)
192 self.ui.debug(b'mtn: ' + output)
193 elif stream == b'm':
193 elif stream == b'm':
194 # Main stream - command output
194 # Main stream - command output
195 retval.append(output)
195 retval.append(output)
196
196
197 return b''.join(retval)
197 return b''.join(retval)
198
198
199 def mtnloadmanifest(self, rev):
199 def mtnloadmanifest(self, rev):
200 if self.manifest_rev == rev:
200 if self.manifest_rev == rev:
201 return
201 return
202 self.manifest = self.mtnrun(b"get_manifest_of", rev).split(b"\n\n")
202 self.manifest = self.mtnrun(b"get_manifest_of", rev).split(b"\n\n")
203 self.manifest_rev = rev
203 self.manifest_rev = rev
204 self.files = {}
204 self.files = {}
205 self.dirs = {}
205 self.dirs = {}
206
206
207 for e in self.manifest:
207 for e in self.manifest:
208 m = self.file_re.match(e)
208 m = self.file_re.match(e)
209 if m:
209 if m:
210 attr = b""
210 attr = b""
211 name = m.group(1)
211 name = m.group(1)
212 node = m.group(2)
212 node = m.group(2)
213 if self.attr_execute_re.match(e):
213 if self.attr_execute_re.match(e):
214 attr += b"x"
214 attr += b"x"
215 self.files[name] = (node, attr)
215 self.files[name] = (node, attr)
216 m = self.dir_re.match(e)
216 m = self.dir_re.match(e)
217 if m:
217 if m:
218 self.dirs[m.group(1)] = True
218 self.dirs[m.group(1)] = True
219
219
220 def mtnisfile(self, name, rev):
220 def mtnisfile(self, name, rev):
221 # a non-file could be a directory or a deleted or renamed file
221 # a non-file could be a directory or a deleted or renamed file
222 self.mtnloadmanifest(rev)
222 self.mtnloadmanifest(rev)
223 return name in self.files
223 return name in self.files
224
224
225 def mtnisdir(self, name, rev):
225 def mtnisdir(self, name, rev):
226 self.mtnloadmanifest(rev)
226 self.mtnloadmanifest(rev)
227 return name in self.dirs
227 return name in self.dirs
228
228
229 def mtngetcerts(self, rev):
229 def mtngetcerts(self, rev):
230 certs = {
230 certs = {
231 b"author": b"<missing>",
231 b"author": b"<missing>",
232 b"date": b"<missing>",
232 b"date": b"<missing>",
233 b"changelog": b"<missing>",
233 b"changelog": b"<missing>",
234 b"branch": b"<missing>",
234 b"branch": b"<missing>",
235 }
235 }
236 certlist = self.mtnrun(b"certs", rev)
236 certlist = self.mtnrun(b"certs", rev)
237 # mtn < 0.45:
237 # mtn < 0.45:
238 # key "test@selenic.com"
238 # key "test@selenic.com"
239 # mtn >= 0.45:
239 # mtn >= 0.45:
240 # key [ff58a7ffb771907c4ff68995eada1c4da068d328]
240 # key [ff58a7ffb771907c4ff68995eada1c4da068d328]
241 certlist = re.split(br'\n\n {6}key ["\[]', certlist)
241 certlist = re.split(br'\n\n {6}key ["\[]', certlist)
242 for e in certlist:
242 for e in certlist:
243 m = self.cert_re.match(e)
243 m = self.cert_re.match(e)
244 if m:
244 if m:
245 name, value = m.groups()
245 name, value = m.groups()
246 assert value is not None # help pytype
246 value = value.replace(br'\"', b'"')
247 value = value.replace(br'\"', b'"')
247 value = value.replace(br'\\', b'\\')
248 value = value.replace(br'\\', b'\\')
248 certs[name] = value
249 certs[name] = value
249 # Monotone may have subsecond dates: 2005-02-05T09:39:12.364306
250 # Monotone may have subsecond dates: 2005-02-05T09:39:12.364306
250 # and all times are stored in UTC
251 # and all times are stored in UTC
251 certs[b"date"] = certs[b"date"].split(b'.')[0] + b" UTC"
252 certs[b"date"] = certs[b"date"].split(b'.')[0] + b" UTC"
252 return certs
253 return certs
253
254
254 # implement the converter_source interface:
255 # implement the converter_source interface:
255
256
256 def getheads(self):
257 def getheads(self):
257 if not self.revs:
258 if not self.revs:
258 return self.mtnrun(b"leaves").splitlines()
259 return self.mtnrun(b"leaves").splitlines()
259 else:
260 else:
260 return self.revs
261 return self.revs
261
262
262 def getchanges(self, rev, full):
263 def getchanges(self, rev, full):
263 if full:
264 if full:
264 raise error.Abort(
265 raise error.Abort(
265 _(b"convert from monotone does not support --full")
266 _(b"convert from monotone does not support --full")
266 )
267 )
267 revision = self.mtnrun(b"get_revision", rev).split(b"\n\n")
268 revision = self.mtnrun(b"get_revision", rev).split(b"\n\n")
268 files = {}
269 files = {}
269 ignoremove = {}
270 ignoremove = {}
270 renameddirs = []
271 renameddirs = []
271 copies = {}
272 copies = {}
272 for e in revision:
273 for e in revision:
273 m = self.add_file_re.match(e)
274 m = self.add_file_re.match(e)
274 if m:
275 if m:
275 files[m.group(1)] = rev
276 files[m.group(1)] = rev
276 ignoremove[m.group(1)] = rev
277 ignoremove[m.group(1)] = rev
277 m = self.patch_re.match(e)
278 m = self.patch_re.match(e)
278 if m:
279 if m:
279 files[m.group(1)] = rev
280 files[m.group(1)] = rev
280 # Delete/rename is handled later when the convert engine
281 # Delete/rename is handled later when the convert engine
281 # discovers an IOError exception from getfile,
282 # discovers an IOError exception from getfile,
282 # but only if we add the "from" file to the list of changes.
283 # but only if we add the "from" file to the list of changes.
283 m = self.delete_re.match(e)
284 m = self.delete_re.match(e)
284 if m:
285 if m:
285 files[m.group(1)] = rev
286 files[m.group(1)] = rev
286 m = self.rename_re.match(e)
287 m = self.rename_re.match(e)
287 if m:
288 if m:
288 toname = m.group(2)
289 toname = m.group(2)
289 fromname = m.group(1)
290 fromname = m.group(1)
290 if self.mtnisfile(toname, rev):
291 if self.mtnisfile(toname, rev):
291 ignoremove[toname] = 1
292 ignoremove[toname] = 1
292 copies[toname] = fromname
293 copies[toname] = fromname
293 files[toname] = rev
294 files[toname] = rev
294 files[fromname] = rev
295 files[fromname] = rev
295 elif self.mtnisdir(toname, rev):
296 elif self.mtnisdir(toname, rev):
296 renameddirs.append((fromname, toname))
297 renameddirs.append((fromname, toname))
297
298
298 # Directory renames can be handled only once we have recorded
299 # Directory renames can be handled only once we have recorded
299 # all new files
300 # all new files
300 for fromdir, todir in renameddirs:
301 for fromdir, todir in renameddirs:
301 renamed = {}
302 renamed = {}
302 for tofile in self.files:
303 for tofile in self.files:
303 if tofile in ignoremove:
304 if tofile in ignoremove:
304 continue
305 continue
305 if tofile.startswith(todir + b'/'):
306 if tofile.startswith(todir + b'/'):
306 renamed[tofile] = fromdir + tofile[len(todir) :]
307 renamed[tofile] = fromdir + tofile[len(todir) :]
307 # Avoid chained moves like:
308 # Avoid chained moves like:
308 # d1(/a) => d3/d1(/a)
309 # d1(/a) => d3/d1(/a)
309 # d2 => d3
310 # d2 => d3
310 ignoremove[tofile] = 1
311 ignoremove[tofile] = 1
311 for tofile, fromfile in renamed.items():
312 for tofile, fromfile in renamed.items():
312 self.ui.debug(
313 self.ui.debug(
313 b"copying file in renamed directory from '%s' to '%s'"
314 b"copying file in renamed directory from '%s' to '%s'"
314 % (fromfile, tofile),
315 % (fromfile, tofile),
315 b'\n',
316 b'\n',
316 )
317 )
317 files[tofile] = rev
318 files[tofile] = rev
318 copies[tofile] = fromfile
319 copies[tofile] = fromfile
319 for fromfile in renamed.values():
320 for fromfile in renamed.values():
320 files[fromfile] = rev
321 files[fromfile] = rev
321
322
322 return (files.items(), copies, set())
323 return (files.items(), copies, set())
323
324
324 def getfile(self, name, rev):
325 def getfile(self, name, rev):
325 if not self.mtnisfile(name, rev):
326 if not self.mtnisfile(name, rev):
326 return None, None
327 return None, None
327 try:
328 try:
328 data = self.mtnrun(b"get_file_of", name, r=rev)
329 data = self.mtnrun(b"get_file_of", name, r=rev)
329 except Exception:
330 except Exception:
330 return None, None
331 return None, None
331 self.mtnloadmanifest(rev)
332 self.mtnloadmanifest(rev)
332 node, attr = self.files.get(name, (None, b""))
333 node, attr = self.files.get(name, (None, b""))
333 return data, attr
334 return data, attr
334
335
335 def getcommit(self, rev):
336 def getcommit(self, rev):
336 extra = {}
337 extra = {}
337 certs = self.mtngetcerts(rev)
338 certs = self.mtngetcerts(rev)
338 if certs.get(b'suspend') == certs[b"branch"]:
339 if certs.get(b'suspend') == certs[b"branch"]:
339 extra[b'close'] = b'1'
340 extra[b'close'] = b'1'
340 dateformat = b"%Y-%m-%dT%H:%M:%S"
341 dateformat = b"%Y-%m-%dT%H:%M:%S"
341 return common.commit(
342 return common.commit(
342 author=certs[b"author"],
343 author=certs[b"author"],
343 date=dateutil.datestr(dateutil.strdate(certs[b"date"], dateformat)),
344 date=dateutil.datestr(dateutil.strdate(certs[b"date"], dateformat)),
344 desc=certs[b"changelog"],
345 desc=certs[b"changelog"],
345 rev=rev,
346 rev=rev,
346 parents=self.mtnrun(b"parents", rev).splitlines(),
347 parents=self.mtnrun(b"parents", rev).splitlines(),
347 branch=certs[b"branch"],
348 branch=certs[b"branch"],
348 extra=extra,
349 extra=extra,
349 )
350 )
350
351
351 def gettags(self):
352 def gettags(self):
352 tags = {}
353 tags = {}
353 for e in self.mtnrun(b"tags").split(b"\n\n"):
354 for e in self.mtnrun(b"tags").split(b"\n\n"):
354 m = self.tag_re.match(e)
355 m = self.tag_re.match(e)
355 if m:
356 if m:
356 tags[m.group(1)] = m.group(2)
357 tags[m.group(1)] = m.group(2)
357 return tags
358 return tags
358
359
359 def getchangedfiles(self, rev, i):
360 def getchangedfiles(self, rev, i):
360 # This function is only needed to support --filemap
361 # This function is only needed to support --filemap
361 # ... and we don't support that
362 # ... and we don't support that
362 raise NotImplementedError
363 raise NotImplementedError
363
364
364 def before(self):
365 def before(self):
365 # Check if we have a new enough version to use automate stdio
366 # Check if we have a new enough version to use automate stdio
366 try:
367 try:
367 versionstr = self.mtnrunsingle(b"interface_version")
368 versionstr = self.mtnrunsingle(b"interface_version")
368 version = float(versionstr)
369 version = float(versionstr)
369 except Exception:
370 except Exception:
370 raise error.Abort(
371 raise error.Abort(
371 _(b"unable to determine mtn automate interface version")
372 _(b"unable to determine mtn automate interface version")
372 )
373 )
373
374
374 if version >= 12.0:
375 if version >= 12.0:
375 self.automatestdio = True
376 self.automatestdio = True
376 self.ui.debug(
377 self.ui.debug(
377 b"mtn automate version %f - using automate stdio\n" % version
378 b"mtn automate version %f - using automate stdio\n" % version
378 )
379 )
379
380
380 # launch the long-running automate stdio process
381 # launch the long-running automate stdio process
381 self.mtnwritefp, self.mtnreadfp = self._run2(
382 self.mtnwritefp, self.mtnreadfp = self._run2(
382 b'automate', b'stdio', b'-d', self.path
383 b'automate', b'stdio', b'-d', self.path
383 )
384 )
384 # read the headers
385 # read the headers
385 read = self.mtnreadfp.readline()
386 read = self.mtnreadfp.readline()
386 if read != b'format-version: 2\n':
387 if read != b'format-version: 2\n':
387 raise error.Abort(
388 raise error.Abort(
388 _(b'mtn automate stdio header unexpected: %s') % read
389 _(b'mtn automate stdio header unexpected: %s') % read
389 )
390 )
390 while read != b'\n':
391 while read != b'\n':
391 read = self.mtnreadfp.readline()
392 read = self.mtnreadfp.readline()
392 if not read:
393 if not read:
393 raise error.Abort(
394 raise error.Abort(
394 _(
395 _(
395 b"failed to reach end of mtn automate "
396 b"failed to reach end of mtn automate "
396 b"stdio headers"
397 b"stdio headers"
397 )
398 )
398 )
399 )
399 else:
400 else:
400 self.ui.debug(
401 self.ui.debug(
401 b"mtn automate version %s - not using automate stdio "
402 b"mtn automate version %s - not using automate stdio "
402 b"(automate >= 12.0 - mtn >= 0.46 is needed)\n" % version
403 b"(automate >= 12.0 - mtn >= 0.46 is needed)\n" % version
403 )
404 )
404
405
405 def after(self):
406 def after(self):
406 if self.automatestdio:
407 if self.automatestdio:
407 self.mtnwritefp.close()
408 self.mtnwritefp.close()
408 self.mtnwritefp = None
409 self.mtnwritefp = None
409 self.mtnreadfp.close()
410 self.mtnreadfp.close()
410 self.mtnreadfp = None
411 self.mtnreadfp = None
@@ -1,1721 +1,1730
1 # Subversion 1.4/1.5 Python API backend
1 # Subversion 1.4/1.5 Python API backend
2 #
2 #
3 # Copyright(C) 2007 Daniel Holth et al
3 # Copyright(C) 2007 Daniel Holth et al
4
4
5 import codecs
5 import codecs
6 import locale
6 import locale
7 import os
7 import os
8 import pickle
8 import pickle
9 import re
9 import re
10 import xml.dom.minidom
10 import xml.dom.minidom
11
11
12 from mercurial.i18n import _
12 from mercurial.i18n import _
13 from mercurial.pycompat import open
13 from mercurial.pycompat import open
14 from mercurial import (
14 from mercurial import (
15 encoding,
15 encoding,
16 error,
16 error,
17 pycompat,
17 pycompat,
18 util,
18 util,
19 vfs as vfsmod,
19 vfs as vfsmod,
20 )
20 )
21 from mercurial.utils import (
21 from mercurial.utils import (
22 dateutil,
22 dateutil,
23 procutil,
23 procutil,
24 stringutil,
24 stringutil,
25 )
25 )
26
26
27 from . import common
27 from . import common
28
28
29 stringio = util.stringio
29 stringio = util.stringio
30 propertycache = util.propertycache
30 propertycache = util.propertycache
31 urlerr = util.urlerr
31 urlerr = util.urlerr
32 urlreq = util.urlreq
32 urlreq = util.urlreq
33
33
34 commandline = common.commandline
34 commandline = common.commandline
35 commit = common.commit
35 commit = common.commit
36 converter_sink = common.converter_sink
36 converter_sink = common.converter_sink
37 converter_source = common.converter_source
37 converter_source = common.converter_source
38 decodeargs = common.decodeargs
38 decodeargs = common.decodeargs
39 encodeargs = common.encodeargs
39 encodeargs = common.encodeargs
40 makedatetimestamp = common.makedatetimestamp
40 makedatetimestamp = common.makedatetimestamp
41 mapfile = common.mapfile
41 mapfile = common.mapfile
42 MissingTool = common.MissingTool
42 MissingTool = common.MissingTool
43 NoRepo = common.NoRepo
43 NoRepo = common.NoRepo
44
44
45 # Subversion stuff. Works best with very recent Python SVN bindings
45 # Subversion stuff. Works best with very recent Python SVN bindings
46 # e.g. SVN 1.5 or backports. Thanks to the bzr folks for enhancing
46 # e.g. SVN 1.5 or backports. Thanks to the bzr folks for enhancing
47 # these bindings.
47 # these bindings.
48
48
49 try:
49 try:
50 # pytype: disable=import-error
50 import svn
51 import svn
51 import svn.client
52 import svn.client
52 import svn.core
53 import svn.core
53 import svn.ra
54 import svn.ra
54 import svn.delta
55 import svn.delta
56
57 # pytype: enable=import-error
55 from . import transport
58 from . import transport
56 import warnings
59 import warnings
57
60
58 warnings.filterwarnings(
61 warnings.filterwarnings(
59 'ignore', module='svn.core', category=DeprecationWarning
62 'ignore', module='svn.core', category=DeprecationWarning
60 )
63 )
61 svn.core.SubversionException # trigger import to catch error
64 svn.core.SubversionException # trigger import to catch error
62
65
63 except ImportError:
66 except ImportError:
64 svn = None
67 svn = None
65
68
66
69
67 # In Subversion, paths and URLs are Unicode (encoded as UTF-8), which
70 # In Subversion, paths and URLs are Unicode (encoded as UTF-8), which
68 # Subversion converts from / to native strings when interfacing with the OS.
71 # Subversion converts from / to native strings when interfacing with the OS.
69 # When passing paths and URLs to Subversion, we have to recode them such that
72 # When passing paths and URLs to Subversion, we have to recode them such that
70 # it roundstrips with what Subversion is doing.
73 # it roundstrips with what Subversion is doing.
71
74
72 fsencoding = None
75 fsencoding = None
73
76
74
77
75 def init_fsencoding():
78 def init_fsencoding():
76 global fsencoding, fsencoding_is_utf8
79 global fsencoding, fsencoding_is_utf8
77 if fsencoding is not None:
80 if fsencoding is not None:
78 return
81 return
79 if pycompat.iswindows:
82 if pycompat.iswindows:
80 # On Windows, filenames are Unicode, but we store them using the MBCS
83 # On Windows, filenames are Unicode, but we store them using the MBCS
81 # encoding.
84 # encoding.
82 fsencoding = 'mbcs'
85 fsencoding = 'mbcs'
83 else:
86 else:
84 # This is the encoding used to convert UTF-8 back to natively-encoded
87 # This is the encoding used to convert UTF-8 back to natively-encoded
85 # strings in Subversion 1.14.0 or earlier with APR 1.7.0 or earlier.
88 # strings in Subversion 1.14.0 or earlier with APR 1.7.0 or earlier.
86 with util.with_lc_ctype():
89 with util.with_lc_ctype():
87 fsencoding = locale.nl_langinfo(locale.CODESET) or 'ISO-8859-1'
90 fsencoding = locale.nl_langinfo(locale.CODESET) or 'ISO-8859-1'
88 fsencoding = codecs.lookup(fsencoding).name
91 fsencoding = codecs.lookup(fsencoding).name
89 fsencoding_is_utf8 = fsencoding == codecs.lookup('utf-8').name
92 fsencoding_is_utf8 = fsencoding == codecs.lookup('utf-8').name
90
93
91
94
92 def fs2svn(s):
95 def fs2svn(s):
93 if fsencoding_is_utf8:
96 if fsencoding_is_utf8:
94 return s
97 return s
95 else:
98 else:
96 return s.decode(fsencoding).encode('utf-8')
99 return s.decode(fsencoding).encode('utf-8')
97
100
98
101
99 def formatsvndate(date):
102 def formatsvndate(date):
100 return dateutil.datestr(date, b'%Y-%m-%dT%H:%M:%S.000000Z')
103 return dateutil.datestr(date, b'%Y-%m-%dT%H:%M:%S.000000Z')
101
104
102
105
103 def parsesvndate(s):
106 def parsesvndate(s):
104 # Example SVN datetime. Includes microseconds.
107 # Example SVN datetime. Includes microseconds.
105 # ISO-8601 conformant
108 # ISO-8601 conformant
106 # '2007-01-04T17:35:00.902377Z'
109 # '2007-01-04T17:35:00.902377Z'
107 return dateutil.parsedate(s[:19] + b' UTC', [b'%Y-%m-%dT%H:%M:%S'])
110 return dateutil.parsedate(s[:19] + b' UTC', [b'%Y-%m-%dT%H:%M:%S'])
108
111
109
112
110 class SvnPathNotFound(Exception):
113 class SvnPathNotFound(Exception):
111 pass
114 pass
112
115
113
116
114 def revsplit(rev):
117 def revsplit(rev):
115 """Parse a revision string and return (uuid, path, revnum).
118 """Parse a revision string and return (uuid, path, revnum).
116 >>> revsplit(b'svn:a2147622-4a9f-4db4-a8d3-13562ff547b2'
119 >>> revsplit(b'svn:a2147622-4a9f-4db4-a8d3-13562ff547b2'
117 ... b'/proj%20B/mytrunk/mytrunk@1')
120 ... b'/proj%20B/mytrunk/mytrunk@1')
118 ('a2147622-4a9f-4db4-a8d3-13562ff547b2', '/proj%20B/mytrunk/mytrunk', 1)
121 ('a2147622-4a9f-4db4-a8d3-13562ff547b2', '/proj%20B/mytrunk/mytrunk', 1)
119 >>> revsplit(b'svn:8af66a51-67f5-4354-b62c-98d67cc7be1d@1')
122 >>> revsplit(b'svn:8af66a51-67f5-4354-b62c-98d67cc7be1d@1')
120 ('', '', 1)
123 ('', '', 1)
121 >>> revsplit(b'@7')
124 >>> revsplit(b'@7')
122 ('', '', 7)
125 ('', '', 7)
123 >>> revsplit(b'7')
126 >>> revsplit(b'7')
124 ('', '', 0)
127 ('', '', 0)
125 >>> revsplit(b'bad')
128 >>> revsplit(b'bad')
126 ('', '', 0)
129 ('', '', 0)
127 """
130 """
128 parts = rev.rsplit(b'@', 1)
131 parts = rev.rsplit(b'@', 1)
129 revnum = 0
132 revnum = 0
130 if len(parts) > 1:
133 if len(parts) > 1:
131 revnum = int(parts[1])
134 revnum = int(parts[1])
132 parts = parts[0].split(b'/', 1)
135 parts = parts[0].split(b'/', 1)
133 uuid = b''
136 uuid = b''
134 mod = b''
137 mod = b''
135 if len(parts) > 1 and parts[0].startswith(b'svn:'):
138 if len(parts) > 1 and parts[0].startswith(b'svn:'):
136 uuid = parts[0][4:]
139 uuid = parts[0][4:]
137 mod = b'/' + parts[1]
140 mod = b'/' + parts[1]
138 return uuid, mod, revnum
141 return uuid, mod, revnum
139
142
140
143
141 def quote(s):
144 def quote(s):
142 # As of svn 1.7, many svn calls expect "canonical" paths. In
145 # As of svn 1.7, many svn calls expect "canonical" paths. In
143 # theory, we should call svn.core.*canonicalize() on all paths
146 # theory, we should call svn.core.*canonicalize() on all paths
144 # before passing them to the API. Instead, we assume the base url
147 # before passing them to the API. Instead, we assume the base url
145 # is canonical and copy the behaviour of svn URL encoding function
148 # is canonical and copy the behaviour of svn URL encoding function
146 # so we can extend it safely with new components. The "safe"
149 # so we can extend it safely with new components. The "safe"
147 # characters were taken from the "svn_uri__char_validity" table in
150 # characters were taken from the "svn_uri__char_validity" table in
148 # libsvn_subr/path.c.
151 # libsvn_subr/path.c.
149 return urlreq.quote(s, b"!$&'()*+,-./:=@_~")
152 return urlreq.quote(s, b"!$&'()*+,-./:=@_~")
150
153
151
154
152 def geturl(path):
155 def geturl(path):
153 """Convert path or URL to a SVN URL, encoded in UTF-8.
156 """Convert path or URL to a SVN URL, encoded in UTF-8.
154
157
155 This can raise UnicodeDecodeError if the path or URL can't be converted to
158 This can raise UnicodeDecodeError if the path or URL can't be converted to
156 unicode using `fsencoding`.
159 unicode using `fsencoding`.
157 """
160 """
158 try:
161 try:
159 return svn.client.url_from_path(
162 return svn.client.url_from_path(
160 svn.core.svn_path_canonicalize(fs2svn(path))
163 svn.core.svn_path_canonicalize(fs2svn(path))
161 )
164 )
162 except svn.core.SubversionException:
165 except svn.core.SubversionException:
163 # svn.client.url_from_path() fails with local repositories
166 # svn.client.url_from_path() fails with local repositories
164 pass
167 pass
165 if os.path.isdir(path):
168 if os.path.isdir(path):
166 path = os.path.normpath(util.abspath(path))
169 path = os.path.normpath(util.abspath(path))
167 if pycompat.iswindows:
170 if pycompat.iswindows:
168 path = b'/' + util.normpath(path)
171 path = b'/' + util.normpath(path)
169 # Module URL is later compared with the repository URL returned
172 # Module URL is later compared with the repository URL returned
170 # by svn API, which is UTF-8.
173 # by svn API, which is UTF-8.
171 path = fs2svn(path)
174 path = fs2svn(path)
172 path = b'file://%s' % quote(path)
175 path = b'file://%s' % quote(path)
173 return svn.core.svn_path_canonicalize(path)
176 return svn.core.svn_path_canonicalize(path)
174
177
175
178
176 def optrev(number):
179 def optrev(number):
177 optrev = svn.core.svn_opt_revision_t()
180 optrev = svn.core.svn_opt_revision_t()
178 optrev.kind = svn.core.svn_opt_revision_number
181 optrev.kind = svn.core.svn_opt_revision_number
179 optrev.value.number = number
182 optrev.value.number = number
180 return optrev
183 return optrev
181
184
182
185
183 class changedpath:
186 class changedpath:
184 def __init__(self, p):
187 def __init__(self, p):
185 self.copyfrom_path = p.copyfrom_path
188 self.copyfrom_path = p.copyfrom_path
186 self.copyfrom_rev = p.copyfrom_rev
189 self.copyfrom_rev = p.copyfrom_rev
187 self.action = p.action
190 self.action = p.action
188
191
189
192
190 def get_log_child(
193 def get_log_child(
191 fp,
194 fp,
192 url,
195 url,
193 paths,
196 paths,
194 start,
197 start,
195 end,
198 end,
196 limit=0,
199 limit=0,
197 discover_changed_paths=True,
200 discover_changed_paths=True,
198 strict_node_history=False,
201 strict_node_history=False,
199 ):
202 ):
200 protocol = -1
203 protocol = -1
201
204
202 def receiver(orig_paths, revnum, author, date, message, pool):
205 def receiver(orig_paths, revnum, author, date, message, pool):
203 paths = {}
206 paths = {}
204 if orig_paths is not None:
207 if orig_paths is not None:
205 for k, v in orig_paths.items():
208 for k, v in orig_paths.items():
206 paths[k] = changedpath(v)
209 paths[k] = changedpath(v)
207 pickle.dump((paths, revnum, author, date, message), fp, protocol)
210 pickle.dump((paths, revnum, author, date, message), fp, protocol)
208
211
209 try:
212 try:
210 # Use an ra of our own so that our parent can consume
213 # Use an ra of our own so that our parent can consume
211 # our results without confusing the server.
214 # our results without confusing the server.
212 t = transport.SvnRaTransport(url=url)
215 t = transport.SvnRaTransport(url=url)
213 svn.ra.get_log(
216 svn.ra.get_log(
214 t.ra,
217 t.ra,
215 paths,
218 paths,
216 start,
219 start,
217 end,
220 end,
218 limit,
221 limit,
219 discover_changed_paths,
222 discover_changed_paths,
220 strict_node_history,
223 strict_node_history,
221 receiver,
224 receiver,
222 )
225 )
223 except IOError:
226 except IOError:
224 # Caller may interrupt the iteration
227 # Caller may interrupt the iteration
225 pickle.dump(None, fp, protocol)
228 pickle.dump(None, fp, protocol)
226 except Exception as inst:
229 except Exception as inst:
227 pickle.dump(stringutil.forcebytestr(inst), fp, protocol)
230 pickle.dump(stringutil.forcebytestr(inst), fp, protocol)
228 else:
231 else:
229 pickle.dump(None, fp, protocol)
232 pickle.dump(None, fp, protocol)
230 fp.flush()
233 fp.flush()
231 # With large history, cleanup process goes crazy and suddenly
234 # With large history, cleanup process goes crazy and suddenly
232 # consumes *huge* amount of memory. The output file being closed,
235 # consumes *huge* amount of memory. The output file being closed,
233 # there is no need for clean termination.
236 # there is no need for clean termination.
234 os._exit(0)
237 os._exit(0)
235
238
236
239
237 def debugsvnlog(ui, **opts):
240 def debugsvnlog(ui, **opts):
238 """Fetch SVN log in a subprocess and channel them back to parent to
241 """Fetch SVN log in a subprocess and channel them back to parent to
239 avoid memory collection issues.
242 avoid memory collection issues.
240 """
243 """
241 with util.with_lc_ctype():
244 with util.with_lc_ctype():
242 if svn is None:
245 if svn is None:
243 raise error.Abort(
246 raise error.Abort(
244 _(b'debugsvnlog could not load Subversion python bindings')
247 _(b'debugsvnlog could not load Subversion python bindings')
245 )
248 )
246
249
247 args = decodeargs(ui.fin.read())
250 args = decodeargs(ui.fin.read())
248 get_log_child(ui.fout, *args)
251 get_log_child(ui.fout, *args)
249
252
250
253
251 class logstream:
254 class logstream:
252 """Interruptible revision log iterator."""
255 """Interruptible revision log iterator."""
253
256
254 def __init__(self, stdout):
257 def __init__(self, stdout):
255 self._stdout = stdout
258 self._stdout = stdout
256
259
257 def __iter__(self):
260 def __iter__(self):
258 while True:
261 while True:
259 try:
262 try:
260 entry = pickle.load(self._stdout)
263 entry = pickle.load(self._stdout)
261 except EOFError:
264 except EOFError:
262 raise error.Abort(
265 raise error.Abort(
263 _(
266 _(
264 b'Mercurial failed to run itself, check'
267 b'Mercurial failed to run itself, check'
265 b' hg executable is in PATH'
268 b' hg executable is in PATH'
266 )
269 )
267 )
270 )
268 try:
271 try:
269 orig_paths, revnum, author, date, message = entry
272 orig_paths, revnum, author, date, message = entry
270 except (TypeError, ValueError):
273 except (TypeError, ValueError):
271 if entry is None:
274 if entry is None:
272 break
275 break
273 raise error.Abort(_(b"log stream exception '%s'") % entry)
276 raise error.Abort(_(b"log stream exception '%s'") % entry)
274 yield entry
277 yield entry
275
278
276 def close(self):
279 def close(self):
277 if self._stdout:
280 if self._stdout:
278 self._stdout.close()
281 self._stdout.close()
279 self._stdout = None
282 self._stdout = None
280
283
281
284
282 class directlogstream(list):
285 class directlogstream(list):
283 """Direct revision log iterator.
286 """Direct revision log iterator.
284 This can be used for debugging and development but it will probably leak
287 This can be used for debugging and development but it will probably leak
285 memory and is not suitable for real conversions."""
288 memory and is not suitable for real conversions."""
286
289
287 def __init__(
290 def __init__(
288 self,
291 self,
289 url,
292 url,
290 paths,
293 paths,
291 start,
294 start,
292 end,
295 end,
293 limit=0,
296 limit=0,
294 discover_changed_paths=True,
297 discover_changed_paths=True,
295 strict_node_history=False,
298 strict_node_history=False,
296 ):
299 ):
297 def receiver(orig_paths, revnum, author, date, message, pool):
300 def receiver(orig_paths, revnum, author, date, message, pool):
298 paths = {}
301 paths = {}
299 if orig_paths is not None:
302 if orig_paths is not None:
300 for k, v in orig_paths.items():
303 for k, v in orig_paths.items():
301 paths[k] = changedpath(v)
304 paths[k] = changedpath(v)
302 self.append((paths, revnum, author, date, message))
305 self.append((paths, revnum, author, date, message))
303
306
304 # Use an ra of our own so that our parent can consume
307 # Use an ra of our own so that our parent can consume
305 # our results without confusing the server.
308 # our results without confusing the server.
306 t = transport.SvnRaTransport(url=url)
309 t = transport.SvnRaTransport(url=url)
307 svn.ra.get_log(
310 svn.ra.get_log(
308 t.ra,
311 t.ra,
309 paths,
312 paths,
310 start,
313 start,
311 end,
314 end,
312 limit,
315 limit,
313 discover_changed_paths,
316 discover_changed_paths,
314 strict_node_history,
317 strict_node_history,
315 receiver,
318 receiver,
316 )
319 )
317
320
318 def close(self):
321 def close(self):
319 pass
322 pass
320
323
321
324
322 # Check to see if the given path is a local Subversion repo. Verify this by
325 # Check to see if the given path is a local Subversion repo. Verify this by
323 # looking for several svn-specific files and directories in the given
326 # looking for several svn-specific files and directories in the given
324 # directory.
327 # directory.
325 def filecheck(ui, path, proto):
328 def filecheck(ui, path, proto):
326 for x in (b'locks', b'hooks', b'format', b'db'):
329 for x in (b'locks', b'hooks', b'format', b'db'):
327 if not os.path.exists(os.path.join(path, x)):
330 if not os.path.exists(os.path.join(path, x)):
328 return False
331 return False
329 return True
332 return True
330
333
331
334
332 # Check to see if a given path is the root of an svn repo over http. We verify
335 # Check to see if a given path is the root of an svn repo over http. We verify
333 # this by requesting a version-controlled URL we know can't exist and looking
336 # this by requesting a version-controlled URL we know can't exist and looking
334 # for the svn-specific "not found" XML.
337 # for the svn-specific "not found" XML.
335 def httpcheck(ui, path, proto):
338 def httpcheck(ui, path, proto):
336 try:
339 try:
337 opener = urlreq.buildopener()
340 opener = urlreq.buildopener()
338 rsp = opener.open(
341 rsp = opener.open(
339 pycompat.strurl(b'%s://%s/!svn/ver/0/.svn' % (proto, path)), b'rb'
342 pycompat.strurl(b'%s://%s/!svn/ver/0/.svn' % (proto, path)), b'rb'
340 )
343 )
341 data = rsp.read()
344 data = rsp.read()
342 except urlerr.httperror as inst:
345 except urlerr.httperror as inst:
343 if inst.code != 404:
346 if inst.code != 404:
344 # Except for 404 we cannot know for sure this is not an svn repo
347 # Except for 404 we cannot know for sure this is not an svn repo
345 ui.warn(
348 ui.warn(
346 _(
349 _(
347 b'svn: cannot probe remote repository, assume it could '
350 b'svn: cannot probe remote repository, assume it could '
348 b'be a subversion repository. Use --source-type if you '
351 b'be a subversion repository. Use --source-type if you '
349 b'know better.\n'
352 b'know better.\n'
350 )
353 )
351 )
354 )
352 return True
355 return True
353 data = inst.fp.read()
356 data = inst.fp.read()
354 except Exception:
357 except Exception:
355 # Could be urlerr.urlerror if the URL is invalid or anything else.
358 # Could be urlerr.urlerror if the URL is invalid or anything else.
356 return False
359 return False
357 return b'<m:human-readable errcode="160013">' in data
360 return b'<m:human-readable errcode="160013">' in data
358
361
359
362
360 protomap = {
363 protomap = {
361 b'http': httpcheck,
364 b'http': httpcheck,
362 b'https': httpcheck,
365 b'https': httpcheck,
363 b'file': filecheck,
366 b'file': filecheck,
364 }
367 }
365
368
366
369
367 def issvnurl(ui, url):
370 def issvnurl(ui, url):
368 try:
371 try:
369 proto, path = url.split(b'://', 1)
372 proto, path = url.split(b'://', 1)
370 if proto == b'file':
373 if proto == b'file':
371 if (
374 if (
372 pycompat.iswindows
375 pycompat.iswindows
373 and path[:1] == b'/'
376 and path[:1] == b'/'
374 and path[1:2].isalpha()
377 and path[1:2].isalpha()
375 and path[2:6].lower() == b'%3a/'
378 and path[2:6].lower() == b'%3a/'
376 ):
379 ):
377 path = path[:2] + b':/' + path[6:]
380 path = path[:2] + b':/' + path[6:]
378 try:
381 try:
379 unicodepath = path.decode(fsencoding)
382 unicodepath = path.decode(fsencoding)
380 except UnicodeDecodeError:
383 except UnicodeDecodeError:
381 ui.warn(
384 ui.warn(
382 _(
385 _(
383 b'Subversion requires that file URLs can be converted '
386 b'Subversion requires that file URLs can be converted '
384 b'to Unicode using the current locale encoding (%s)\n'
387 b'to Unicode using the current locale encoding (%s)\n'
385 )
388 )
386 % pycompat.sysbytes(fsencoding)
389 % pycompat.sysbytes(fsencoding)
387 )
390 )
388 return False
391 return False
389
392
390 # Subversion paths are Unicode. Since it does percent-decoding on
393 # Subversion paths are Unicode. Since it does percent-decoding on
391 # UTF-8-encoded strings, percent-encoded bytes are interpreted as
394 # UTF-8-encoded strings, percent-encoded bytes are interpreted as
392 # UTF-8.
395 # UTF-8.
393 # On Python 3, we have to pass unicode to urlreq.url2pathname().
396 # On Python 3, we have to pass unicode to urlreq.url2pathname().
394 # Percent-decoded bytes get decoded using UTF-8 and the 'replace'
397 # Percent-decoded bytes get decoded using UTF-8 and the 'replace'
395 # error handler.
398 # error handler.
396 unicodepath = urlreq.url2pathname(unicodepath)
399 unicodepath = urlreq.url2pathname(unicodepath)
397 if u'\N{REPLACEMENT CHARACTER}' in unicodepath:
400 if u'\N{REPLACEMENT CHARACTER}' in unicodepath:
398 ui.warn(
401 ui.warn(
399 _(
402 _(
400 b'Subversion does not support non-UTF-8 '
403 b'Subversion does not support non-UTF-8 '
401 b'percent-encoded bytes in file URLs\n'
404 b'percent-encoded bytes in file URLs\n'
402 )
405 )
403 )
406 )
404 return False
407 return False
405
408
406 # Below, we approximate how Subversion checks the path. On Unix, we
409 # Below, we approximate how Subversion checks the path. On Unix, we
407 # should therefore convert the path to bytes using `fsencoding`
410 # should therefore convert the path to bytes using `fsencoding`
408 # (like Subversion does). On Windows, the right thing would
411 # (like Subversion does). On Windows, the right thing would
409 # actually be to leave the path as unicode. For now, we restrict
412 # actually be to leave the path as unicode. For now, we restrict
410 # the path to MBCS.
413 # the path to MBCS.
411 path = unicodepath.encode(fsencoding)
414 path = unicodepath.encode(fsencoding)
412 except ValueError:
415 except ValueError:
413 proto = b'file'
416 proto = b'file'
414 path = util.abspath(url)
417 path = util.abspath(url)
415 try:
418 try:
416 path.decode(fsencoding)
419 path.decode(fsencoding)
417 except UnicodeDecodeError:
420 except UnicodeDecodeError:
418 ui.warn(
421 ui.warn(
419 _(
422 _(
420 b'Subversion requires that paths can be converted to '
423 b'Subversion requires that paths can be converted to '
421 b'Unicode using the current locale encoding (%s)\n'
424 b'Unicode using the current locale encoding (%s)\n'
422 )
425 )
423 % pycompat.sysbytes(fsencoding)
426 % pycompat.sysbytes(fsencoding)
424 )
427 )
425 return False
428 return False
426 if proto == b'file':
429 if proto == b'file':
427 path = util.pconvert(path)
430 path = util.pconvert(path)
428 elif proto in (b'http', 'https'):
431 elif proto in (b'http', 'https'):
429 if not encoding.isasciistr(path):
432 if not encoding.isasciistr(path):
430 ui.warn(
433 ui.warn(
431 _(
434 _(
432 b"Subversion sources don't support non-ASCII characters in "
435 b"Subversion sources don't support non-ASCII characters in "
433 b"HTTP(S) URLs. Please percent-encode them.\n"
436 b"HTTP(S) URLs. Please percent-encode them.\n"
434 )
437 )
435 )
438 )
436 return False
439 return False
437 check = protomap.get(proto, lambda *args: False)
440 check = protomap.get(proto, lambda *args: False)
438 while b'/' in path:
441 while b'/' in path:
439 if check(ui, path, proto):
442 if check(ui, path, proto):
440 return True
443 return True
441 path = path.rsplit(b'/', 1)[0]
444 path = path.rsplit(b'/', 1)[0]
442 return False
445 return False
443
446
444
447
445 # SVN conversion code stolen from bzr-svn and tailor
448 # SVN conversion code stolen from bzr-svn and tailor
446 #
449 #
447 # Subversion looks like a versioned filesystem, branches structures
450 # Subversion looks like a versioned filesystem, branches structures
448 # are defined by conventions and not enforced by the tool. First,
451 # are defined by conventions and not enforced by the tool. First,
449 # we define the potential branches (modules) as "trunk" and "branches"
452 # we define the potential branches (modules) as "trunk" and "branches"
450 # children directories. Revisions are then identified by their
453 # children directories. Revisions are then identified by their
451 # module and revision number (and a repository identifier).
454 # module and revision number (and a repository identifier).
452 #
455 #
453 # The revision graph is really a tree (or a forest). By default, a
456 # The revision graph is really a tree (or a forest). By default, a
454 # revision parent is the previous revision in the same module. If the
457 # revision parent is the previous revision in the same module. If the
455 # module directory is copied/moved from another module then the
458 # module directory is copied/moved from another module then the
456 # revision is the module root and its parent the source revision in
459 # revision is the module root and its parent the source revision in
457 # the parent module. A revision has at most one parent.
460 # the parent module. A revision has at most one parent.
458 #
461 #
459 class svn_source(converter_source):
462 class svn_source(converter_source):
460 def __init__(self, ui, repotype, url, revs=None):
463 def __init__(self, ui, repotype, url, revs=None):
461 super(svn_source, self).__init__(ui, repotype, url, revs=revs)
464 super(svn_source, self).__init__(ui, repotype, url, revs=revs)
462
465
463 init_fsencoding()
466 init_fsencoding()
464 if not (
467 if not (
465 url.startswith(b'svn://')
468 url.startswith(b'svn://')
466 or url.startswith(b'svn+ssh://')
469 or url.startswith(b'svn+ssh://')
467 or (
470 or (
468 os.path.exists(url)
471 os.path.exists(url)
469 and os.path.exists(os.path.join(url, b'.svn'))
472 and os.path.exists(os.path.join(url, b'.svn'))
470 )
473 )
471 or issvnurl(ui, url)
474 or issvnurl(ui, url)
472 ):
475 ):
473 raise NoRepo(
476 raise NoRepo(
474 _(b"%s does not look like a Subversion repository") % url
477 _(b"%s does not look like a Subversion repository") % url
475 )
478 )
476 if svn is None:
479 if svn is None:
477 raise MissingTool(_(b'could not load Subversion python bindings'))
480 raise MissingTool(_(b'could not load Subversion python bindings'))
478
481
479 try:
482 try:
480 version = svn.core.SVN_VER_MAJOR, svn.core.SVN_VER_MINOR
483 version = svn.core.SVN_VER_MAJOR, svn.core.SVN_VER_MINOR
481 if version < (1, 4):
484 if version < (1, 4):
482 raise MissingTool(
485 raise MissingTool(
483 _(
486 _(
484 b'Subversion python bindings %d.%d found, '
487 b'Subversion python bindings %d.%d found, '
485 b'1.4 or later required'
488 b'1.4 or later required'
486 )
489 )
487 % version
490 % version
488 )
491 )
489 except AttributeError:
492 except AttributeError:
490 raise MissingTool(
493 raise MissingTool(
491 _(
494 _(
492 b'Subversion python bindings are too old, 1.4 '
495 b'Subversion python bindings are too old, 1.4 '
493 b'or later required'
496 b'or later required'
494 )
497 )
495 )
498 )
496
499
497 self.lastrevs = {}
500 self.lastrevs = {}
498
501
499 latest = None
502 latest = None
500 try:
503 try:
501 # Support file://path@rev syntax. Useful e.g. to convert
504 # Support file://path@rev syntax. Useful e.g. to convert
502 # deleted branches.
505 # deleted branches.
503 at = url.rfind(b'@')
506 at = url.rfind(b'@')
504 if at >= 0:
507 if at >= 0:
505 latest = int(url[at + 1 :])
508 latest = int(url[at + 1 :])
506 url = url[:at]
509 url = url[:at]
507 except ValueError:
510 except ValueError:
508 pass
511 pass
509 self.url = geturl(url)
512 self.url = geturl(url)
510 self.encoding = b'UTF-8' # Subversion is always nominal UTF-8
513 self.encoding = b'UTF-8' # Subversion is always nominal UTF-8
511 try:
514 try:
512 with util.with_lc_ctype():
515 with util.with_lc_ctype():
513 self.transport = transport.SvnRaTransport(url=self.url)
516 self.transport = transport.SvnRaTransport(url=self.url)
514 self.ra = self.transport.ra
517 self.ra = self.transport.ra
515 self.ctx = self.transport.client
518 self.ctx = self.transport.client
516 self.baseurl = svn.ra.get_repos_root(self.ra)
519 self.baseurl = svn.ra.get_repos_root(self.ra)
517 # Module is either empty or a repository path starting with
520 # Module is either empty or a repository path starting with
518 # a slash and not ending with a slash.
521 # a slash and not ending with a slash.
519 self.module = urlreq.unquote(self.url[len(self.baseurl) :])
522 self.module = urlreq.unquote(self.url[len(self.baseurl) :])
520 self.prevmodule = None
523 self.prevmodule = None
521 self.rootmodule = self.module
524 self.rootmodule = self.module
522 self.commits = {}
525 self.commits = {}
523 self.paths = {}
526 self.paths = {}
524 self.uuid = svn.ra.get_uuid(self.ra)
527 self.uuid = svn.ra.get_uuid(self.ra)
525 except svn.core.SubversionException:
528 except svn.core.SubversionException:
526 ui.traceback()
529 ui.traceback()
527 svnversion = b'%d.%d.%d' % (
530 svnversion = b'%d.%d.%d' % (
528 svn.core.SVN_VER_MAJOR,
531 svn.core.SVN_VER_MAJOR,
529 svn.core.SVN_VER_MINOR,
532 svn.core.SVN_VER_MINOR,
530 svn.core.SVN_VER_MICRO,
533 svn.core.SVN_VER_MICRO,
531 )
534 )
532 raise NoRepo(
535 raise NoRepo(
533 _(
536 _(
534 b"%s does not look like a Subversion repository "
537 b"%s does not look like a Subversion repository "
535 b"to libsvn version %s"
538 b"to libsvn version %s"
536 )
539 )
537 % (self.url, svnversion)
540 % (self.url, svnversion)
538 )
541 )
539
542
540 if revs:
543 if revs:
541 if len(revs) > 1:
544 if len(revs) > 1:
542 raise error.Abort(
545 raise error.Abort(
543 _(
546 _(
544 b'subversion source does not support '
547 b'subversion source does not support '
545 b'specifying multiple revisions'
548 b'specifying multiple revisions'
546 )
549 )
547 )
550 )
548 try:
551 try:
549 latest = int(revs[0])
552 latest = int(revs[0])
550 except ValueError:
553 except ValueError:
551 raise error.Abort(
554 raise error.Abort(
552 _(b'svn: revision %s is not an integer') % revs[0]
555 _(b'svn: revision %s is not an integer') % revs[0]
553 )
556 )
554
557
555 trunkcfg = self.ui.config(b'convert', b'svn.trunk')
558 trunkcfg = self.ui.config(b'convert', b'svn.trunk')
556 if trunkcfg is None:
559 if trunkcfg is None:
557 trunkcfg = b'trunk'
560 trunkcfg = b'trunk'
558 self.trunkname = trunkcfg.strip(b'/')
561 self.trunkname = trunkcfg.strip(b'/')
559 self.startrev = self.ui.config(b'convert', b'svn.startrev')
562 self.startrev = self.ui.config(b'convert', b'svn.startrev')
560 try:
563 try:
561 self.startrev = int(self.startrev)
564 self.startrev = int(self.startrev)
562 if self.startrev < 0:
565 if self.startrev < 0:
563 self.startrev = 0
566 self.startrev = 0
564 except ValueError:
567 except ValueError:
565 raise error.Abort(
568 raise error.Abort(
566 _(b'svn: start revision %s is not an integer') % self.startrev
569 _(b'svn: start revision %s is not an integer') % self.startrev
567 )
570 )
568
571
569 try:
572 try:
570 with util.with_lc_ctype():
573 with util.with_lc_ctype():
571 self.head = self.latest(self.module, latest)
574 self.head = self.latest(self.module, latest)
572 except SvnPathNotFound:
575 except SvnPathNotFound:
573 self.head = None
576 self.head = None
574 if not self.head:
577 if not self.head:
575 raise error.Abort(
578 raise error.Abort(
576 _(b'no revision found in module %s') % self.module
579 _(b'no revision found in module %s') % self.module
577 )
580 )
578 self.last_changed = self.revnum(self.head)
581 self.last_changed = self.revnum(self.head)
579
582
580 self._changescache = (None, None)
583 self._changescache = (None, None)
581
584
582 if os.path.exists(os.path.join(url, b'.svn/entries')):
585 if os.path.exists(os.path.join(url, b'.svn/entries')):
583 self.wc = url
586 self.wc = url
584 else:
587 else:
585 self.wc = None
588 self.wc = None
586 self.convertfp = None
589 self.convertfp = None
587
590
588 def before(self):
591 def before(self):
589 self.with_lc_ctype = util.with_lc_ctype()
592 self.with_lc_ctype = util.with_lc_ctype()
590 self.with_lc_ctype.__enter__()
593 self.with_lc_ctype.__enter__()
591
594
592 def after(self):
595 def after(self):
593 self.with_lc_ctype.__exit__(None, None, None)
596 self.with_lc_ctype.__exit__(None, None, None)
594
597
595 def setrevmap(self, revmap):
598 def setrevmap(self, revmap):
596 lastrevs = {}
599 lastrevs = {}
597 for revid in revmap:
600 for revid in revmap:
598 uuid, module, revnum = revsplit(revid)
601 uuid, module, revnum = revsplit(revid)
599 lastrevnum = lastrevs.setdefault(module, revnum)
602 lastrevnum = lastrevs.setdefault(module, revnum)
600 if revnum > lastrevnum:
603 if revnum > lastrevnum:
601 lastrevs[module] = revnum
604 lastrevs[module] = revnum
602 self.lastrevs = lastrevs
605 self.lastrevs = lastrevs
603
606
604 def exists(self, path, optrev):
607 def exists(self, path, optrev):
605 try:
608 try:
606 svn.client.ls(
609 svn.client.ls(
607 self.url.rstrip(b'/') + b'/' + quote(path),
610 self.url.rstrip(b'/') + b'/' + quote(path),
608 optrev,
611 optrev,
609 False,
612 False,
610 self.ctx,
613 self.ctx,
611 )
614 )
612 return True
615 return True
613 except svn.core.SubversionException:
616 except svn.core.SubversionException:
614 return False
617 return False
615
618
616 def getheads(self):
619 def getheads(self):
617 def isdir(path, revnum):
620 def isdir(path, revnum):
618 kind = self._checkpath(path, revnum)
621 kind = self._checkpath(path, revnum)
619 return kind == svn.core.svn_node_dir
622 return kind == svn.core.svn_node_dir
620
623
621 def getcfgpath(name, rev):
624 def getcfgpath(name, rev):
622 cfgpath = self.ui.config(b'convert', b'svn.' + name)
625 cfgpath = self.ui.config(b'convert', b'svn.' + name)
623 if cfgpath is not None and cfgpath.strip() == b'':
626 if cfgpath is not None and cfgpath.strip() == b'':
624 return None
627 return None
625 path = (cfgpath or name).strip(b'/')
628 path = (cfgpath or name).strip(b'/')
626 if not self.exists(path, rev):
629 if not self.exists(path, rev):
627 if self.module.endswith(path) and name == b'trunk':
630 if self.module.endswith(path) and name == b'trunk':
628 # we are converting from inside this directory
631 # we are converting from inside this directory
629 return None
632 return None
630 if cfgpath:
633 if cfgpath:
631 raise error.Abort(
634 raise error.Abort(
632 _(b'expected %s to be at %r, but not found')
635 _(b'expected %s to be at %r, but not found')
633 % (name, path)
636 % (name, path)
634 )
637 )
635 return None
638 return None
636 self.ui.note(
639 self.ui.note(
637 _(b'found %s at %r\n') % (name, pycompat.bytestr(path))
640 _(b'found %s at %r\n') % (name, pycompat.bytestr(path))
638 )
641 )
639 return path
642 return path
640
643
641 rev = optrev(self.last_changed)
644 rev = optrev(self.last_changed)
642 oldmodule = b''
645 oldmodule = b''
643 trunk = getcfgpath(b'trunk', rev)
646 trunk = getcfgpath(b'trunk', rev)
644 self.tags = getcfgpath(b'tags', rev)
647 self.tags = getcfgpath(b'tags', rev)
645 branches = getcfgpath(b'branches', rev)
648 branches = getcfgpath(b'branches', rev)
646
649
647 # If the project has a trunk or branches, we will extract heads
650 # If the project has a trunk or branches, we will extract heads
648 # from them. We keep the project root otherwise.
651 # from them. We keep the project root otherwise.
649 if trunk:
652 if trunk:
650 oldmodule = self.module or b''
653 oldmodule = self.module or b''
651 self.module += b'/' + trunk
654 self.module += b'/' + trunk
652 self.head = self.latest(self.module, self.last_changed)
655 self.head = self.latest(self.module, self.last_changed)
653 if not self.head:
656 if not self.head:
654 raise error.Abort(
657 raise error.Abort(
655 _(b'no revision found in module %s') % self.module
658 _(b'no revision found in module %s') % self.module
656 )
659 )
657
660
658 # First head in the list is the module's head
661 # First head in the list is the module's head
659 self.heads = [self.head]
662 self.heads = [self.head]
660 if self.tags is not None:
663 if self.tags is not None:
661 self.tags = b'%s/%s' % (oldmodule, (self.tags or b'tags'))
664 self.tags = b'%s/%s' % (oldmodule, (self.tags or b'tags'))
662
665
663 # Check if branches bring a few more heads to the list
666 # Check if branches bring a few more heads to the list
664 if branches:
667 if branches:
665 rpath = self.url.strip(b'/')
668 rpath = self.url.strip(b'/')
666 branchnames = svn.client.ls(
669 branchnames = svn.client.ls(
667 rpath + b'/' + quote(branches), rev, False, self.ctx
670 rpath + b'/' + quote(branches), rev, False, self.ctx
668 )
671 )
669 for branch in sorted(branchnames):
672 for branch in sorted(branchnames):
670 module = b'%s/%s/%s' % (oldmodule, branches, branch)
673 module = b'%s/%s/%s' % (oldmodule, branches, branch)
671 if not isdir(module, self.last_changed):
674 if not isdir(module, self.last_changed):
672 continue
675 continue
673 brevid = self.latest(module, self.last_changed)
676 brevid = self.latest(module, self.last_changed)
674 if not brevid:
677 if not brevid:
675 self.ui.note(_(b'ignoring empty branch %s\n') % branch)
678 self.ui.note(_(b'ignoring empty branch %s\n') % branch)
676 continue
679 continue
677 self.ui.note(
680 self.ui.note(
678 _(b'found branch %s at %d\n')
681 _(b'found branch %s at %d\n')
679 % (branch, self.revnum(brevid))
682 % (branch, self.revnum(brevid))
680 )
683 )
681 self.heads.append(brevid)
684 self.heads.append(brevid)
682
685
683 if self.startrev and self.heads:
686 if self.startrev and self.heads:
684 if len(self.heads) > 1:
687 if len(self.heads) > 1:
685 raise error.Abort(
688 raise error.Abort(
686 _(
689 _(
687 b'svn: start revision is not supported '
690 b'svn: start revision is not supported '
688 b'with more than one branch'
691 b'with more than one branch'
689 )
692 )
690 )
693 )
691 revnum = self.revnum(self.heads[0])
694 revnum = self.revnum(self.heads[0])
692 if revnum < self.startrev:
695 if revnum < self.startrev:
693 raise error.Abort(
696 raise error.Abort(
694 _(b'svn: no revision found after start revision %d')
697 _(b'svn: no revision found after start revision %d')
695 % self.startrev
698 % self.startrev
696 )
699 )
697
700
698 return self.heads
701 return self.heads
699
702
700 def _getchanges(self, rev, full):
703 def _getchanges(self, rev, full):
701 (paths, parents) = self.paths[rev]
704 (paths, parents) = self.paths[rev]
702 copies = {}
705 copies = {}
703 if parents:
706 if parents:
704 files, self.removed, copies = self.expandpaths(rev, paths, parents)
707 files, self.removed, copies = self.expandpaths(rev, paths, parents)
705 if full or not parents:
708 if full or not parents:
706 # Perform a full checkout on roots
709 # Perform a full checkout on roots
707 uuid, module, revnum = revsplit(rev)
710 uuid, module, revnum = revsplit(rev)
708 entries = svn.client.ls(
711 entries = svn.client.ls(
709 self.baseurl + quote(module), optrev(revnum), True, self.ctx
712 self.baseurl + quote(module), optrev(revnum), True, self.ctx
710 )
713 )
711 files = [
714 files = [
712 n
715 n
713 for n, e in entries.items()
716 for n, e in entries.items()
714 if e.kind == svn.core.svn_node_file
717 if e.kind == svn.core.svn_node_file
715 ]
718 ]
716 self.removed = set()
719 self.removed = set()
717
720
718 files.sort()
721 files.sort()
719 files = pycompat.ziplist(files, [rev] * len(files))
722 files = pycompat.ziplist(files, [rev] * len(files))
720 return (files, copies)
723 return (files, copies)
721
724
722 def getchanges(self, rev, full):
725 def getchanges(self, rev, full):
723 # reuse cache from getchangedfiles
726 # reuse cache from getchangedfiles
724 if self._changescache[0] == rev and not full:
727 if self._changescache[0] == rev and not full:
728 # TODO: add type hints to avoid this warning, instead of
729 # suppressing it:
730 # No attribute '__iter__' on None [attribute-error]
731
732 # pytype: disable=attribute-error
725 (files, copies) = self._changescache[1]
733 (files, copies) = self._changescache[1]
734 # pytype: enable=attribute-error
726 else:
735 else:
727 (files, copies) = self._getchanges(rev, full)
736 (files, copies) = self._getchanges(rev, full)
728 # caller caches the result, so free it here to release memory
737 # caller caches the result, so free it here to release memory
729 del self.paths[rev]
738 del self.paths[rev]
730 return (files, copies, set())
739 return (files, copies, set())
731
740
732 def getchangedfiles(self, rev, i):
741 def getchangedfiles(self, rev, i):
733 # called from filemap - cache computed values for reuse in getchanges
742 # called from filemap - cache computed values for reuse in getchanges
734 (files, copies) = self._getchanges(rev, False)
743 (files, copies) = self._getchanges(rev, False)
735 self._changescache = (rev, (files, copies))
744 self._changescache = (rev, (files, copies))
736 return [f[0] for f in files]
745 return [f[0] for f in files]
737
746
738 def getcommit(self, rev):
747 def getcommit(self, rev):
739 if rev not in self.commits:
748 if rev not in self.commits:
740 uuid, module, revnum = revsplit(rev)
749 uuid, module, revnum = revsplit(rev)
741 self.module = module
750 self.module = module
742 self.reparent(module)
751 self.reparent(module)
743 # We assume that:
752 # We assume that:
744 # - requests for revisions after "stop" come from the
753 # - requests for revisions after "stop" come from the
745 # revision graph backward traversal. Cache all of them
754 # revision graph backward traversal. Cache all of them
746 # down to stop, they will be used eventually.
755 # down to stop, they will be used eventually.
747 # - requests for revisions before "stop" come to get
756 # - requests for revisions before "stop" come to get
748 # isolated branches parents. Just fetch what is needed.
757 # isolated branches parents. Just fetch what is needed.
749 stop = self.lastrevs.get(module, 0)
758 stop = self.lastrevs.get(module, 0)
750 if revnum < stop:
759 if revnum < stop:
751 stop = revnum + 1
760 stop = revnum + 1
752 self._fetch_revisions(revnum, stop)
761 self._fetch_revisions(revnum, stop)
753 if rev not in self.commits:
762 if rev not in self.commits:
754 raise error.Abort(_(b'svn: revision %s not found') % revnum)
763 raise error.Abort(_(b'svn: revision %s not found') % revnum)
755 revcommit = self.commits[rev]
764 revcommit = self.commits[rev]
756 # caller caches the result, so free it here to release memory
765 # caller caches the result, so free it here to release memory
757 del self.commits[rev]
766 del self.commits[rev]
758 return revcommit
767 return revcommit
759
768
760 def checkrevformat(self, revstr, mapname=b'splicemap'):
769 def checkrevformat(self, revstr, mapname=b'splicemap'):
761 """fails if revision format does not match the correct format"""
770 """fails if revision format does not match the correct format"""
762 if not re.match(
771 if not re.match(
763 br'svn:[0-9a-f]{8,8}-[0-9a-f]{4,4}-'
772 br'svn:[0-9a-f]{8,8}-[0-9a-f]{4,4}-'
764 br'[0-9a-f]{4,4}-[0-9a-f]{4,4}-[0-9a-f]'
773 br'[0-9a-f]{4,4}-[0-9a-f]{4,4}-[0-9a-f]'
765 br'{12,12}(.*)@[0-9]+$',
774 br'{12,12}(.*)@[0-9]+$',
766 revstr,
775 revstr,
767 ):
776 ):
768 raise error.Abort(
777 raise error.Abort(
769 _(b'%s entry %s is not a valid revision identifier')
778 _(b'%s entry %s is not a valid revision identifier')
770 % (mapname, revstr)
779 % (mapname, revstr)
771 )
780 )
772
781
773 def numcommits(self):
782 def numcommits(self):
774 return int(self.head.rsplit(b'@', 1)[1]) - self.startrev
783 return int(self.head.rsplit(b'@', 1)[1]) - self.startrev
775
784
776 def gettags(self):
785 def gettags(self):
777 tags = {}
786 tags = {}
778 if self.tags is None:
787 if self.tags is None:
779 return tags
788 return tags
780
789
781 # svn tags are just a convention, project branches left in a
790 # svn tags are just a convention, project branches left in a
782 # 'tags' directory. There is no other relationship than
791 # 'tags' directory. There is no other relationship than
783 # ancestry, which is expensive to discover and makes them hard
792 # ancestry, which is expensive to discover and makes them hard
784 # to update incrementally. Worse, past revisions may be
793 # to update incrementally. Worse, past revisions may be
785 # referenced by tags far away in the future, requiring a deep
794 # referenced by tags far away in the future, requiring a deep
786 # history traversal on every calculation. Current code
795 # history traversal on every calculation. Current code
787 # performs a single backward traversal, tracking moves within
796 # performs a single backward traversal, tracking moves within
788 # the tags directory (tag renaming) and recording a new tag
797 # the tags directory (tag renaming) and recording a new tag
789 # everytime a project is copied from outside the tags
798 # everytime a project is copied from outside the tags
790 # directory. It also lists deleted tags, this behaviour may
799 # directory. It also lists deleted tags, this behaviour may
791 # change in the future.
800 # change in the future.
792 pendings = []
801 pendings = []
793 tagspath = self.tags
802 tagspath = self.tags
794 start = svn.ra.get_latest_revnum(self.ra)
803 start = svn.ra.get_latest_revnum(self.ra)
795 stream = self._getlog([self.tags], start, self.startrev)
804 stream = self._getlog([self.tags], start, self.startrev)
796 try:
805 try:
797 for entry in stream:
806 for entry in stream:
798 origpaths, revnum, author, date, message = entry
807 origpaths, revnum, author, date, message = entry
799 if not origpaths:
808 if not origpaths:
800 origpaths = []
809 origpaths = []
801 copies = [
810 copies = [
802 (e.copyfrom_path, e.copyfrom_rev, p)
811 (e.copyfrom_path, e.copyfrom_rev, p)
803 for p, e in origpaths.items()
812 for p, e in origpaths.items()
804 if e.copyfrom_path
813 if e.copyfrom_path
805 ]
814 ]
806 # Apply moves/copies from more specific to general
815 # Apply moves/copies from more specific to general
807 copies.sort(reverse=True)
816 copies.sort(reverse=True)
808
817
809 srctagspath = tagspath
818 srctagspath = tagspath
810 if copies and copies[-1][2] == tagspath:
819 if copies and copies[-1][2] == tagspath:
811 # Track tags directory moves
820 # Track tags directory moves
812 srctagspath = copies.pop()[0]
821 srctagspath = copies.pop()[0]
813
822
814 for source, sourcerev, dest in copies:
823 for source, sourcerev, dest in copies:
815 if not dest.startswith(tagspath + b'/'):
824 if not dest.startswith(tagspath + b'/'):
816 continue
825 continue
817 for tag in pendings:
826 for tag in pendings:
818 if tag[0].startswith(dest):
827 if tag[0].startswith(dest):
819 tagpath = source + tag[0][len(dest) :]
828 tagpath = source + tag[0][len(dest) :]
820 tag[:2] = [tagpath, sourcerev]
829 tag[:2] = [tagpath, sourcerev]
821 break
830 break
822 else:
831 else:
823 pendings.append([source, sourcerev, dest])
832 pendings.append([source, sourcerev, dest])
824
833
825 # Filter out tags with children coming from different
834 # Filter out tags with children coming from different
826 # parts of the repository like:
835 # parts of the repository like:
827 # /tags/tag.1 (from /trunk:10)
836 # /tags/tag.1 (from /trunk:10)
828 # /tags/tag.1/foo (from /branches/foo:12)
837 # /tags/tag.1/foo (from /branches/foo:12)
829 # Here/tags/tag.1 discarded as well as its children.
838 # Here/tags/tag.1 discarded as well as its children.
830 # It happens with tools like cvs2svn. Such tags cannot
839 # It happens with tools like cvs2svn. Such tags cannot
831 # be represented in mercurial.
840 # be represented in mercurial.
832 addeds = {
841 addeds = {
833 p: e.copyfrom_path
842 p: e.copyfrom_path
834 for p, e in origpaths.items()
843 for p, e in origpaths.items()
835 if e.action == b'A' and e.copyfrom_path
844 if e.action == b'A' and e.copyfrom_path
836 }
845 }
837 badroots = set()
846 badroots = set()
838 for destroot in addeds:
847 for destroot in addeds:
839 for source, sourcerev, dest in pendings:
848 for source, sourcerev, dest in pendings:
840 if not dest.startswith(
849 if not dest.startswith(
841 destroot + b'/'
850 destroot + b'/'
842 ) or source.startswith(addeds[destroot] + b'/'):
851 ) or source.startswith(addeds[destroot] + b'/'):
843 continue
852 continue
844 badroots.add(destroot)
853 badroots.add(destroot)
845 break
854 break
846
855
847 for badroot in badroots:
856 for badroot in badroots:
848 pendings = [
857 pendings = [
849 p
858 p
850 for p in pendings
859 for p in pendings
851 if p[2] != badroot
860 if p[2] != badroot
852 and not p[2].startswith(badroot + b'/')
861 and not p[2].startswith(badroot + b'/')
853 ]
862 ]
854
863
855 # Tell tag renamings from tag creations
864 # Tell tag renamings from tag creations
856 renamings = []
865 renamings = []
857 for source, sourcerev, dest in pendings:
866 for source, sourcerev, dest in pendings:
858 tagname = dest.split(b'/')[-1]
867 tagname = dest.split(b'/')[-1]
859 if source.startswith(srctagspath):
868 if source.startswith(srctagspath):
860 renamings.append([source, sourcerev, tagname])
869 renamings.append([source, sourcerev, tagname])
861 continue
870 continue
862 if tagname in tags:
871 if tagname in tags:
863 # Keep the latest tag value
872 # Keep the latest tag value
864 continue
873 continue
865 # From revision may be fake, get one with changes
874 # From revision may be fake, get one with changes
866 try:
875 try:
867 tagid = self.latest(source, sourcerev)
876 tagid = self.latest(source, sourcerev)
868 if tagid and tagname not in tags:
877 if tagid and tagname not in tags:
869 tags[tagname] = tagid
878 tags[tagname] = tagid
870 except SvnPathNotFound:
879 except SvnPathNotFound:
871 # It happens when we are following directories
880 # It happens when we are following directories
872 # we assumed were copied with their parents
881 # we assumed were copied with their parents
873 # but were really created in the tag
882 # but were really created in the tag
874 # directory.
883 # directory.
875 pass
884 pass
876 pendings = renamings
885 pendings = renamings
877 tagspath = srctagspath
886 tagspath = srctagspath
878 finally:
887 finally:
879 stream.close()
888 stream.close()
880 return tags
889 return tags
881
890
882 def converted(self, rev, destrev):
891 def converted(self, rev, destrev):
883 if not self.wc:
892 if not self.wc:
884 return
893 return
885 if self.convertfp is None:
894 if self.convertfp is None:
886 self.convertfp = open(
895 self.convertfp = open(
887 os.path.join(self.wc, b'.svn', b'hg-shamap'), b'ab'
896 os.path.join(self.wc, b'.svn', b'hg-shamap'), b'ab'
888 )
897 )
889 self.convertfp.write(
898 self.convertfp.write(
890 util.tonativeeol(b'%s %d\n' % (destrev, self.revnum(rev)))
899 util.tonativeeol(b'%s %d\n' % (destrev, self.revnum(rev)))
891 )
900 )
892 self.convertfp.flush()
901 self.convertfp.flush()
893
902
894 def revid(self, revnum, module=None):
903 def revid(self, revnum, module=None):
895 return b'svn:%s%s@%d' % (self.uuid, module or self.module, revnum)
904 return b'svn:%s%s@%d' % (self.uuid, module or self.module, revnum)
896
905
897 def revnum(self, rev):
906 def revnum(self, rev):
898 return int(rev.split(b'@')[-1])
907 return int(rev.split(b'@')[-1])
899
908
900 def latest(self, path, stop=None):
909 def latest(self, path, stop=None):
901 """Find the latest revid affecting path, up to stop revision
910 """Find the latest revid affecting path, up to stop revision
902 number. If stop is None, default to repository latest
911 number. If stop is None, default to repository latest
903 revision. It may return a revision in a different module,
912 revision. It may return a revision in a different module,
904 since a branch may be moved without a change being
913 since a branch may be moved without a change being
905 reported. Return None if computed module does not belong to
914 reported. Return None if computed module does not belong to
906 rootmodule subtree.
915 rootmodule subtree.
907 """
916 """
908
917
909 def findchanges(path, start, stop=None):
918 def findchanges(path, start, stop=None):
910 stream = self._getlog([path], start, stop or 1)
919 stream = self._getlog([path], start, stop or 1)
911 try:
920 try:
912 for entry in stream:
921 for entry in stream:
913 paths, revnum, author, date, message = entry
922 paths, revnum, author, date, message = entry
914 if stop is None and paths:
923 if stop is None and paths:
915 # We do not know the latest changed revision,
924 # We do not know the latest changed revision,
916 # keep the first one with changed paths.
925 # keep the first one with changed paths.
917 break
926 break
918 if stop is not None and revnum <= stop:
927 if stop is not None and revnum <= stop:
919 break
928 break
920
929
921 for p in paths:
930 for p in paths:
922 if not path.startswith(p) or not paths[p].copyfrom_path:
931 if not path.startswith(p) or not paths[p].copyfrom_path:
923 continue
932 continue
924 newpath = paths[p].copyfrom_path + path[len(p) :]
933 newpath = paths[p].copyfrom_path + path[len(p) :]
925 self.ui.debug(
934 self.ui.debug(
926 b"branch renamed from %s to %s at %d\n"
935 b"branch renamed from %s to %s at %d\n"
927 % (path, newpath, revnum)
936 % (path, newpath, revnum)
928 )
937 )
929 path = newpath
938 path = newpath
930 break
939 break
931 if not paths:
940 if not paths:
932 revnum = None
941 revnum = None
933 return revnum, path
942 return revnum, path
934 finally:
943 finally:
935 stream.close()
944 stream.close()
936
945
937 if not path.startswith(self.rootmodule):
946 if not path.startswith(self.rootmodule):
938 # Requests on foreign branches may be forbidden at server level
947 # Requests on foreign branches may be forbidden at server level
939 self.ui.debug(b'ignoring foreign branch %r\n' % path)
948 self.ui.debug(b'ignoring foreign branch %r\n' % path)
940 return None
949 return None
941
950
942 if stop is None:
951 if stop is None:
943 stop = svn.ra.get_latest_revnum(self.ra)
952 stop = svn.ra.get_latest_revnum(self.ra)
944 try:
953 try:
945 prevmodule = self.reparent(b'')
954 prevmodule = self.reparent(b'')
946 dirent = svn.ra.stat(self.ra, path.strip(b'/'), stop)
955 dirent = svn.ra.stat(self.ra, path.strip(b'/'), stop)
947 self.reparent(prevmodule)
956 self.reparent(prevmodule)
948 except svn.core.SubversionException:
957 except svn.core.SubversionException:
949 dirent = None
958 dirent = None
950 if not dirent:
959 if not dirent:
951 raise SvnPathNotFound(
960 raise SvnPathNotFound(
952 _(b'%s not found up to revision %d') % (path, stop)
961 _(b'%s not found up to revision %d') % (path, stop)
953 )
962 )
954
963
955 # stat() gives us the previous revision on this line of
964 # stat() gives us the previous revision on this line of
956 # development, but it might be in *another module*. Fetch the
965 # development, but it might be in *another module*. Fetch the
957 # log and detect renames down to the latest revision.
966 # log and detect renames down to the latest revision.
958 revnum, realpath = findchanges(path, stop, dirent.created_rev)
967 revnum, realpath = findchanges(path, stop, dirent.created_rev)
959 if revnum is None:
968 if revnum is None:
960 # Tools like svnsync can create empty revision, when
969 # Tools like svnsync can create empty revision, when
961 # synchronizing only a subtree for instance. These empty
970 # synchronizing only a subtree for instance. These empty
962 # revisions created_rev still have their original values
971 # revisions created_rev still have their original values
963 # despite all changes having disappeared and can be
972 # despite all changes having disappeared and can be
964 # returned by ra.stat(), at least when stating the root
973 # returned by ra.stat(), at least when stating the root
965 # module. In that case, do not trust created_rev and scan
974 # module. In that case, do not trust created_rev and scan
966 # the whole history.
975 # the whole history.
967 revnum, realpath = findchanges(path, stop)
976 revnum, realpath = findchanges(path, stop)
968 if revnum is None:
977 if revnum is None:
969 self.ui.debug(b'ignoring empty branch %r\n' % realpath)
978 self.ui.debug(b'ignoring empty branch %r\n' % realpath)
970 return None
979 return None
971
980
972 if not realpath.startswith(self.rootmodule):
981 if not realpath.startswith(self.rootmodule):
973 self.ui.debug(b'ignoring foreign branch %r\n' % realpath)
982 self.ui.debug(b'ignoring foreign branch %r\n' % realpath)
974 return None
983 return None
975 return self.revid(revnum, realpath)
984 return self.revid(revnum, realpath)
976
985
977 def reparent(self, module):
986 def reparent(self, module):
978 """Reparent the svn transport and return the previous parent."""
987 """Reparent the svn transport and return the previous parent."""
979 if self.prevmodule == module:
988 if self.prevmodule == module:
980 return module
989 return module
981 svnurl = self.baseurl + quote(module)
990 svnurl = self.baseurl + quote(module)
982 prevmodule = self.prevmodule
991 prevmodule = self.prevmodule
983 if prevmodule is None:
992 if prevmodule is None:
984 prevmodule = b''
993 prevmodule = b''
985 self.ui.debug(b"reparent to %s\n" % svnurl)
994 self.ui.debug(b"reparent to %s\n" % svnurl)
986 svn.ra.reparent(self.ra, svnurl)
995 svn.ra.reparent(self.ra, svnurl)
987 self.prevmodule = module
996 self.prevmodule = module
988 return prevmodule
997 return prevmodule
989
998
990 def expandpaths(self, rev, paths, parents):
999 def expandpaths(self, rev, paths, parents):
991 changed, removed = set(), set()
1000 changed, removed = set(), set()
992 copies = {}
1001 copies = {}
993
1002
994 new_module, revnum = revsplit(rev)[1:]
1003 new_module, revnum = revsplit(rev)[1:]
995 if new_module != self.module:
1004 if new_module != self.module:
996 self.module = new_module
1005 self.module = new_module
997 self.reparent(self.module)
1006 self.reparent(self.module)
998
1007
999 progress = self.ui.makeprogress(
1008 progress = self.ui.makeprogress(
1000 _(b'scanning paths'), unit=_(b'paths'), total=len(paths)
1009 _(b'scanning paths'), unit=_(b'paths'), total=len(paths)
1001 )
1010 )
1002 for i, (path, ent) in enumerate(paths):
1011 for i, (path, ent) in enumerate(paths):
1003 progress.update(i, item=path)
1012 progress.update(i, item=path)
1004 entrypath = self.getrelpath(path)
1013 entrypath = self.getrelpath(path)
1005
1014
1006 kind = self._checkpath(entrypath, revnum)
1015 kind = self._checkpath(entrypath, revnum)
1007 if kind == svn.core.svn_node_file:
1016 if kind == svn.core.svn_node_file:
1008 changed.add(self.recode(entrypath))
1017 changed.add(self.recode(entrypath))
1009 if not ent.copyfrom_path or not parents:
1018 if not ent.copyfrom_path or not parents:
1010 continue
1019 continue
1011 # Copy sources not in parent revisions cannot be
1020 # Copy sources not in parent revisions cannot be
1012 # represented, ignore their origin for now
1021 # represented, ignore their origin for now
1013 pmodule, prevnum = revsplit(parents[0])[1:]
1022 pmodule, prevnum = revsplit(parents[0])[1:]
1014 if ent.copyfrom_rev < prevnum:
1023 if ent.copyfrom_rev < prevnum:
1015 continue
1024 continue
1016 copyfrom_path = self.getrelpath(ent.copyfrom_path, pmodule)
1025 copyfrom_path = self.getrelpath(ent.copyfrom_path, pmodule)
1017 if not copyfrom_path:
1026 if not copyfrom_path:
1018 continue
1027 continue
1019 self.ui.debug(
1028 self.ui.debug(
1020 b"copied to %s from %s@%d\n"
1029 b"copied to %s from %s@%d\n"
1021 % (entrypath, copyfrom_path, ent.copyfrom_rev)
1030 % (entrypath, copyfrom_path, ent.copyfrom_rev)
1022 )
1031 )
1023 copies[self.recode(entrypath)] = self.recode(copyfrom_path)
1032 copies[self.recode(entrypath)] = self.recode(copyfrom_path)
1024 elif kind == 0: # gone, but had better be a deleted *file*
1033 elif kind == 0: # gone, but had better be a deleted *file*
1025 self.ui.debug(b"gone from %d\n" % ent.copyfrom_rev)
1034 self.ui.debug(b"gone from %d\n" % ent.copyfrom_rev)
1026 pmodule, prevnum = revsplit(parents[0])[1:]
1035 pmodule, prevnum = revsplit(parents[0])[1:]
1027 parentpath = pmodule + b"/" + entrypath
1036 parentpath = pmodule + b"/" + entrypath
1028 fromkind = self._checkpath(entrypath, prevnum, pmodule)
1037 fromkind = self._checkpath(entrypath, prevnum, pmodule)
1029
1038
1030 if fromkind == svn.core.svn_node_file:
1039 if fromkind == svn.core.svn_node_file:
1031 removed.add(self.recode(entrypath))
1040 removed.add(self.recode(entrypath))
1032 elif fromkind == svn.core.svn_node_dir:
1041 elif fromkind == svn.core.svn_node_dir:
1033 oroot = parentpath.strip(b'/')
1042 oroot = parentpath.strip(b'/')
1034 nroot = path.strip(b'/')
1043 nroot = path.strip(b'/')
1035 children = self._iterfiles(oroot, prevnum)
1044 children = self._iterfiles(oroot, prevnum)
1036 for childpath in children:
1045 for childpath in children:
1037 childpath = childpath.replace(oroot, nroot)
1046 childpath = childpath.replace(oroot, nroot)
1038 childpath = self.getrelpath(b"/" + childpath, pmodule)
1047 childpath = self.getrelpath(b"/" + childpath, pmodule)
1039 if childpath:
1048 if childpath:
1040 removed.add(self.recode(childpath))
1049 removed.add(self.recode(childpath))
1041 else:
1050 else:
1042 self.ui.debug(
1051 self.ui.debug(
1043 b'unknown path in revision %d: %s\n' % (revnum, path)
1052 b'unknown path in revision %d: %s\n' % (revnum, path)
1044 )
1053 )
1045 elif kind == svn.core.svn_node_dir:
1054 elif kind == svn.core.svn_node_dir:
1046 if ent.action == b'M':
1055 if ent.action == b'M':
1047 # If the directory just had a prop change,
1056 # If the directory just had a prop change,
1048 # then we shouldn't need to look for its children.
1057 # then we shouldn't need to look for its children.
1049 continue
1058 continue
1050 if ent.action == b'R' and parents:
1059 if ent.action == b'R' and parents:
1051 # If a directory is replacing a file, mark the previous
1060 # If a directory is replacing a file, mark the previous
1052 # file as deleted
1061 # file as deleted
1053 pmodule, prevnum = revsplit(parents[0])[1:]
1062 pmodule, prevnum = revsplit(parents[0])[1:]
1054 pkind = self._checkpath(entrypath, prevnum, pmodule)
1063 pkind = self._checkpath(entrypath, prevnum, pmodule)
1055 if pkind == svn.core.svn_node_file:
1064 if pkind == svn.core.svn_node_file:
1056 removed.add(self.recode(entrypath))
1065 removed.add(self.recode(entrypath))
1057 elif pkind == svn.core.svn_node_dir:
1066 elif pkind == svn.core.svn_node_dir:
1058 # We do not know what files were kept or removed,
1067 # We do not know what files were kept or removed,
1059 # mark them all as changed.
1068 # mark them all as changed.
1060 for childpath in self._iterfiles(pmodule, prevnum):
1069 for childpath in self._iterfiles(pmodule, prevnum):
1061 childpath = self.getrelpath(b"/" + childpath)
1070 childpath = self.getrelpath(b"/" + childpath)
1062 if childpath:
1071 if childpath:
1063 changed.add(self.recode(childpath))
1072 changed.add(self.recode(childpath))
1064
1073
1065 for childpath in self._iterfiles(path, revnum):
1074 for childpath in self._iterfiles(path, revnum):
1066 childpath = self.getrelpath(b"/" + childpath)
1075 childpath = self.getrelpath(b"/" + childpath)
1067 if childpath:
1076 if childpath:
1068 changed.add(self.recode(childpath))
1077 changed.add(self.recode(childpath))
1069
1078
1070 # Handle directory copies
1079 # Handle directory copies
1071 if not ent.copyfrom_path or not parents:
1080 if not ent.copyfrom_path or not parents:
1072 continue
1081 continue
1073 # Copy sources not in parent revisions cannot be
1082 # Copy sources not in parent revisions cannot be
1074 # represented, ignore their origin for now
1083 # represented, ignore their origin for now
1075 pmodule, prevnum = revsplit(parents[0])[1:]
1084 pmodule, prevnum = revsplit(parents[0])[1:]
1076 if ent.copyfrom_rev < prevnum:
1085 if ent.copyfrom_rev < prevnum:
1077 continue
1086 continue
1078 copyfrompath = self.getrelpath(ent.copyfrom_path, pmodule)
1087 copyfrompath = self.getrelpath(ent.copyfrom_path, pmodule)
1079 if not copyfrompath:
1088 if not copyfrompath:
1080 continue
1089 continue
1081 self.ui.debug(
1090 self.ui.debug(
1082 b"mark %s came from %s:%d\n"
1091 b"mark %s came from %s:%d\n"
1083 % (path, copyfrompath, ent.copyfrom_rev)
1092 % (path, copyfrompath, ent.copyfrom_rev)
1084 )
1093 )
1085 children = self._iterfiles(ent.copyfrom_path, ent.copyfrom_rev)
1094 children = self._iterfiles(ent.copyfrom_path, ent.copyfrom_rev)
1086 for childpath in children:
1095 for childpath in children:
1087 childpath = self.getrelpath(b"/" + childpath, pmodule)
1096 childpath = self.getrelpath(b"/" + childpath, pmodule)
1088 if not childpath:
1097 if not childpath:
1089 continue
1098 continue
1090 copytopath = path + childpath[len(copyfrompath) :]
1099 copytopath = path + childpath[len(copyfrompath) :]
1091 copytopath = self.getrelpath(copytopath)
1100 copytopath = self.getrelpath(copytopath)
1092 copies[self.recode(copytopath)] = self.recode(childpath)
1101 copies[self.recode(copytopath)] = self.recode(childpath)
1093
1102
1094 progress.complete()
1103 progress.complete()
1095 changed.update(removed)
1104 changed.update(removed)
1096 return (list(changed), removed, copies)
1105 return (list(changed), removed, copies)
1097
1106
1098 def _fetch_revisions(self, from_revnum, to_revnum):
1107 def _fetch_revisions(self, from_revnum, to_revnum):
1099 if from_revnum < to_revnum:
1108 if from_revnum < to_revnum:
1100 from_revnum, to_revnum = to_revnum, from_revnum
1109 from_revnum, to_revnum = to_revnum, from_revnum
1101
1110
1102 self.child_cset = None
1111 self.child_cset = None
1103
1112
1104 def parselogentry(orig_paths, revnum, author, date, message):
1113 def parselogentry(orig_paths, revnum, author, date, message):
1105 """Return the parsed commit object or None, and True if
1114 """Return the parsed commit object or None, and True if
1106 the revision is a branch root.
1115 the revision is a branch root.
1107 """
1116 """
1108 self.ui.debug(
1117 self.ui.debug(
1109 b"parsing revision %d (%d changes)\n"
1118 b"parsing revision %d (%d changes)\n"
1110 % (revnum, len(orig_paths))
1119 % (revnum, len(orig_paths))
1111 )
1120 )
1112
1121
1113 branched = False
1122 branched = False
1114 rev = self.revid(revnum)
1123 rev = self.revid(revnum)
1115 # branch log might return entries for a parent we already have
1124 # branch log might return entries for a parent we already have
1116
1125
1117 if rev in self.commits or revnum < to_revnum:
1126 if rev in self.commits or revnum < to_revnum:
1118 return None, branched
1127 return None, branched
1119
1128
1120 parents = []
1129 parents = []
1121 # check whether this revision is the start of a branch or part
1130 # check whether this revision is the start of a branch or part
1122 # of a branch renaming
1131 # of a branch renaming
1123 orig_paths = sorted(orig_paths.items())
1132 orig_paths = sorted(orig_paths.items())
1124 root_paths = [
1133 root_paths = [
1125 (p, e) for p, e in orig_paths if self.module.startswith(p)
1134 (p, e) for p, e in orig_paths if self.module.startswith(p)
1126 ]
1135 ]
1127 if root_paths:
1136 if root_paths:
1128 path, ent = root_paths[-1]
1137 path, ent = root_paths[-1]
1129 if ent.copyfrom_path:
1138 if ent.copyfrom_path:
1130 branched = True
1139 branched = True
1131 newpath = ent.copyfrom_path + self.module[len(path) :]
1140 newpath = ent.copyfrom_path + self.module[len(path) :]
1132 # ent.copyfrom_rev may not be the actual last revision
1141 # ent.copyfrom_rev may not be the actual last revision
1133 previd = self.latest(newpath, ent.copyfrom_rev)
1142 previd = self.latest(newpath, ent.copyfrom_rev)
1134 if previd is not None:
1143 if previd is not None:
1135 prevmodule, prevnum = revsplit(previd)[1:]
1144 prevmodule, prevnum = revsplit(previd)[1:]
1136 if prevnum >= self.startrev:
1145 if prevnum >= self.startrev:
1137 parents = [previd]
1146 parents = [previd]
1138 self.ui.note(
1147 self.ui.note(
1139 _(b'found parent of branch %s at %d: %s\n')
1148 _(b'found parent of branch %s at %d: %s\n')
1140 % (self.module, prevnum, prevmodule)
1149 % (self.module, prevnum, prevmodule)
1141 )
1150 )
1142 else:
1151 else:
1143 self.ui.debug(b"no copyfrom path, don't know what to do.\n")
1152 self.ui.debug(b"no copyfrom path, don't know what to do.\n")
1144
1153
1145 paths = []
1154 paths = []
1146 # filter out unrelated paths
1155 # filter out unrelated paths
1147 for path, ent in orig_paths:
1156 for path, ent in orig_paths:
1148 if self.getrelpath(path) is None:
1157 if self.getrelpath(path) is None:
1149 continue
1158 continue
1150 paths.append((path, ent))
1159 paths.append((path, ent))
1151
1160
1152 date = parsesvndate(date)
1161 date = parsesvndate(date)
1153 if self.ui.configbool(b'convert', b'localtimezone'):
1162 if self.ui.configbool(b'convert', b'localtimezone'):
1154 date = makedatetimestamp(date[0])
1163 date = makedatetimestamp(date[0])
1155
1164
1156 if message:
1165 if message:
1157 log = self.recode(message)
1166 log = self.recode(message)
1158 else:
1167 else:
1159 log = b''
1168 log = b''
1160
1169
1161 if author:
1170 if author:
1162 author = self.recode(author)
1171 author = self.recode(author)
1163 else:
1172 else:
1164 author = b''
1173 author = b''
1165
1174
1166 try:
1175 try:
1167 branch = self.module.split(b"/")[-1]
1176 branch = self.module.split(b"/")[-1]
1168 if branch == self.trunkname:
1177 if branch == self.trunkname:
1169 branch = None
1178 branch = None
1170 except IndexError:
1179 except IndexError:
1171 branch = None
1180 branch = None
1172
1181
1173 cset = commit(
1182 cset = commit(
1174 author=author,
1183 author=author,
1175 date=dateutil.datestr(date, b'%Y-%m-%d %H:%M:%S %1%2'),
1184 date=dateutil.datestr(date, b'%Y-%m-%d %H:%M:%S %1%2'),
1176 desc=log,
1185 desc=log,
1177 parents=parents,
1186 parents=parents,
1178 branch=branch,
1187 branch=branch,
1179 rev=rev,
1188 rev=rev,
1180 )
1189 )
1181
1190
1182 self.commits[rev] = cset
1191 self.commits[rev] = cset
1183 # The parents list is *shared* among self.paths and the
1192 # The parents list is *shared* among self.paths and the
1184 # commit object. Both will be updated below.
1193 # commit object. Both will be updated below.
1185 self.paths[rev] = (paths, cset.parents)
1194 self.paths[rev] = (paths, cset.parents)
1186 if self.child_cset and not self.child_cset.parents:
1195 if self.child_cset and not self.child_cset.parents:
1187 self.child_cset.parents[:] = [rev]
1196 self.child_cset.parents[:] = [rev]
1188 self.child_cset = cset
1197 self.child_cset = cset
1189 return cset, branched
1198 return cset, branched
1190
1199
1191 self.ui.note(
1200 self.ui.note(
1192 _(b'fetching revision log for "%s" from %d to %d\n')
1201 _(b'fetching revision log for "%s" from %d to %d\n')
1193 % (self.module, from_revnum, to_revnum)
1202 % (self.module, from_revnum, to_revnum)
1194 )
1203 )
1195
1204
1196 try:
1205 try:
1197 firstcset = None
1206 firstcset = None
1198 lastonbranch = False
1207 lastonbranch = False
1199 stream = self._getlog([self.module], from_revnum, to_revnum)
1208 stream = self._getlog([self.module], from_revnum, to_revnum)
1200 try:
1209 try:
1201 for entry in stream:
1210 for entry in stream:
1202 paths, revnum, author, date, message = entry
1211 paths, revnum, author, date, message = entry
1203 if revnum < self.startrev:
1212 if revnum < self.startrev:
1204 lastonbranch = True
1213 lastonbranch = True
1205 break
1214 break
1206 if not paths:
1215 if not paths:
1207 self.ui.debug(b'revision %d has no entries\n' % revnum)
1216 self.ui.debug(b'revision %d has no entries\n' % revnum)
1208 # If we ever leave the loop on an empty
1217 # If we ever leave the loop on an empty
1209 # revision, do not try to get a parent branch
1218 # revision, do not try to get a parent branch
1210 lastonbranch = lastonbranch or revnum == 0
1219 lastonbranch = lastonbranch or revnum == 0
1211 continue
1220 continue
1212 cset, lastonbranch = parselogentry(
1221 cset, lastonbranch = parselogentry(
1213 paths, revnum, author, date, message
1222 paths, revnum, author, date, message
1214 )
1223 )
1215 if cset:
1224 if cset:
1216 firstcset = cset
1225 firstcset = cset
1217 if lastonbranch:
1226 if lastonbranch:
1218 break
1227 break
1219 finally:
1228 finally:
1220 stream.close()
1229 stream.close()
1221
1230
1222 if not lastonbranch and firstcset and not firstcset.parents:
1231 if not lastonbranch and firstcset and not firstcset.parents:
1223 # The first revision of the sequence (the last fetched one)
1232 # The first revision of the sequence (the last fetched one)
1224 # has invalid parents if not a branch root. Find the parent
1233 # has invalid parents if not a branch root. Find the parent
1225 # revision now, if any.
1234 # revision now, if any.
1226 try:
1235 try:
1227 firstrevnum = self.revnum(firstcset.rev)
1236 firstrevnum = self.revnum(firstcset.rev)
1228 if firstrevnum > 1:
1237 if firstrevnum > 1:
1229 latest = self.latest(self.module, firstrevnum - 1)
1238 latest = self.latest(self.module, firstrevnum - 1)
1230 if latest:
1239 if latest:
1231 firstcset.parents.append(latest)
1240 firstcset.parents.append(latest)
1232 except SvnPathNotFound:
1241 except SvnPathNotFound:
1233 pass
1242 pass
1234 except svn.core.SubversionException as xxx_todo_changeme:
1243 except svn.core.SubversionException as xxx_todo_changeme:
1235 (inst, num) = xxx_todo_changeme.args
1244 (inst, num) = xxx_todo_changeme.args
1236 if num == svn.core.SVN_ERR_FS_NO_SUCH_REVISION:
1245 if num == svn.core.SVN_ERR_FS_NO_SUCH_REVISION:
1237 raise error.Abort(
1246 raise error.Abort(
1238 _(b'svn: branch has no revision %s') % to_revnum
1247 _(b'svn: branch has no revision %s') % to_revnum
1239 )
1248 )
1240 raise
1249 raise
1241
1250
1242 def getfile(self, file, rev):
1251 def getfile(self, file, rev):
1243 # TODO: ra.get_file transmits the whole file instead of diffs.
1252 # TODO: ra.get_file transmits the whole file instead of diffs.
1244 if file in self.removed:
1253 if file in self.removed:
1245 return None, None
1254 return None, None
1246 try:
1255 try:
1247 new_module, revnum = revsplit(rev)[1:]
1256 new_module, revnum = revsplit(rev)[1:]
1248 if self.module != new_module:
1257 if self.module != new_module:
1249 self.module = new_module
1258 self.module = new_module
1250 self.reparent(self.module)
1259 self.reparent(self.module)
1251 io = stringio()
1260 io = stringio()
1252 info = svn.ra.get_file(self.ra, file, revnum, io)
1261 info = svn.ra.get_file(self.ra, file, revnum, io)
1253 data = io.getvalue()
1262 data = io.getvalue()
1254 # ra.get_file() seems to keep a reference on the input buffer
1263 # ra.get_file() seems to keep a reference on the input buffer
1255 # preventing collection. Release it explicitly.
1264 # preventing collection. Release it explicitly.
1256 io.close()
1265 io.close()
1257 if isinstance(info, list):
1266 if isinstance(info, list):
1258 info = info[-1]
1267 info = info[-1]
1259 mode = (b"svn:executable" in info) and b'x' or b''
1268 mode = (b"svn:executable" in info) and b'x' or b''
1260 mode = (b"svn:special" in info) and b'l' or mode
1269 mode = (b"svn:special" in info) and b'l' or mode
1261 except svn.core.SubversionException as e:
1270 except svn.core.SubversionException as e:
1262 notfound = (
1271 notfound = (
1263 svn.core.SVN_ERR_FS_NOT_FOUND,
1272 svn.core.SVN_ERR_FS_NOT_FOUND,
1264 svn.core.SVN_ERR_RA_DAV_PATH_NOT_FOUND,
1273 svn.core.SVN_ERR_RA_DAV_PATH_NOT_FOUND,
1265 )
1274 )
1266 if e.apr_err in notfound: # File not found
1275 if e.apr_err in notfound: # File not found
1267 return None, None
1276 return None, None
1268 raise
1277 raise
1269 if mode == b'l':
1278 if mode == b'l':
1270 link_prefix = b"link "
1279 link_prefix = b"link "
1271 if data.startswith(link_prefix):
1280 if data.startswith(link_prefix):
1272 data = data[len(link_prefix) :]
1281 data = data[len(link_prefix) :]
1273 return data, mode
1282 return data, mode
1274
1283
1275 def _iterfiles(self, path, revnum):
1284 def _iterfiles(self, path, revnum):
1276 """Enumerate all files in path at revnum, recursively."""
1285 """Enumerate all files in path at revnum, recursively."""
1277 path = path.strip(b'/')
1286 path = path.strip(b'/')
1278 pool = svn.core.Pool()
1287 pool = svn.core.Pool()
1279 rpath = b'/'.join([self.baseurl, quote(path)]).strip(b'/')
1288 rpath = b'/'.join([self.baseurl, quote(path)]).strip(b'/')
1280 entries = svn.client.ls(rpath, optrev(revnum), True, self.ctx, pool)
1289 entries = svn.client.ls(rpath, optrev(revnum), True, self.ctx, pool)
1281 if path:
1290 if path:
1282 path += b'/'
1291 path += b'/'
1283 return (
1292 return (
1284 (path + p)
1293 (path + p)
1285 for p, e in entries.items()
1294 for p, e in entries.items()
1286 if e.kind == svn.core.svn_node_file
1295 if e.kind == svn.core.svn_node_file
1287 )
1296 )
1288
1297
1289 def getrelpath(self, path, module=None):
1298 def getrelpath(self, path, module=None):
1290 if module is None:
1299 if module is None:
1291 module = self.module
1300 module = self.module
1292 # Given the repository url of this wc, say
1301 # Given the repository url of this wc, say
1293 # "http://server/plone/CMFPlone/branches/Plone-2_0-branch"
1302 # "http://server/plone/CMFPlone/branches/Plone-2_0-branch"
1294 # extract the "entry" portion (a relative path) from what
1303 # extract the "entry" portion (a relative path) from what
1295 # svn log --xml says, i.e.
1304 # svn log --xml says, i.e.
1296 # "/CMFPlone/branches/Plone-2_0-branch/tests/PloneTestCase.py"
1305 # "/CMFPlone/branches/Plone-2_0-branch/tests/PloneTestCase.py"
1297 # that is to say "tests/PloneTestCase.py"
1306 # that is to say "tests/PloneTestCase.py"
1298 if path.startswith(module):
1307 if path.startswith(module):
1299 relative = path.rstrip(b'/')[len(module) :]
1308 relative = path.rstrip(b'/')[len(module) :]
1300 if relative.startswith(b'/'):
1309 if relative.startswith(b'/'):
1301 return relative[1:]
1310 return relative[1:]
1302 elif relative == b'':
1311 elif relative == b'':
1303 return relative
1312 return relative
1304
1313
1305 # The path is outside our tracked tree...
1314 # The path is outside our tracked tree...
1306 self.ui.debug(
1315 self.ui.debug(
1307 b'%r is not under %r, ignoring\n'
1316 b'%r is not under %r, ignoring\n'
1308 % (pycompat.bytestr(path), pycompat.bytestr(module))
1317 % (pycompat.bytestr(path), pycompat.bytestr(module))
1309 )
1318 )
1310 return None
1319 return None
1311
1320
1312 def _checkpath(self, path, revnum, module=None):
1321 def _checkpath(self, path, revnum, module=None):
1313 if module is not None:
1322 if module is not None:
1314 prevmodule = self.reparent(b'')
1323 prevmodule = self.reparent(b'')
1315 path = module + b'/' + path
1324 path = module + b'/' + path
1316 try:
1325 try:
1317 # ra.check_path does not like leading slashes very much, it leads
1326 # ra.check_path does not like leading slashes very much, it leads
1318 # to PROPFIND subversion errors
1327 # to PROPFIND subversion errors
1319 return svn.ra.check_path(self.ra, path.strip(b'/'), revnum)
1328 return svn.ra.check_path(self.ra, path.strip(b'/'), revnum)
1320 finally:
1329 finally:
1321 if module is not None:
1330 if module is not None:
1322 self.reparent(prevmodule)
1331 self.reparent(prevmodule)
1323
1332
1324 def _getlog(
1333 def _getlog(
1325 self,
1334 self,
1326 paths,
1335 paths,
1327 start,
1336 start,
1328 end,
1337 end,
1329 limit=0,
1338 limit=0,
1330 discover_changed_paths=True,
1339 discover_changed_paths=True,
1331 strict_node_history=False,
1340 strict_node_history=False,
1332 ):
1341 ):
1333 # Normalize path names, svn >= 1.5 only wants paths relative to
1342 # Normalize path names, svn >= 1.5 only wants paths relative to
1334 # supplied URL
1343 # supplied URL
1335 relpaths = []
1344 relpaths = []
1336 for p in paths:
1345 for p in paths:
1337 if not p.startswith(b'/'):
1346 if not p.startswith(b'/'):
1338 p = self.module + b'/' + p
1347 p = self.module + b'/' + p
1339 relpaths.append(p.strip(b'/'))
1348 relpaths.append(p.strip(b'/'))
1340 args = [
1349 args = [
1341 self.baseurl,
1350 self.baseurl,
1342 relpaths,
1351 relpaths,
1343 start,
1352 start,
1344 end,
1353 end,
1345 limit,
1354 limit,
1346 discover_changed_paths,
1355 discover_changed_paths,
1347 strict_node_history,
1356 strict_node_history,
1348 ]
1357 ]
1349 # developer config: convert.svn.debugsvnlog
1358 # developer config: convert.svn.debugsvnlog
1350 if not self.ui.configbool(b'convert', b'svn.debugsvnlog'):
1359 if not self.ui.configbool(b'convert', b'svn.debugsvnlog'):
1351 return directlogstream(*args)
1360 return directlogstream(*args)
1352 arg = encodeargs(args)
1361 arg = encodeargs(args)
1353 hgexe = procutil.hgexecutable()
1362 hgexe = procutil.hgexecutable()
1354 cmd = b'%s debugsvnlog' % procutil.shellquote(hgexe)
1363 cmd = b'%s debugsvnlog' % procutil.shellquote(hgexe)
1355 stdin, stdout = procutil.popen2(cmd)
1364 stdin, stdout = procutil.popen2(cmd)
1356 stdin.write(arg)
1365 stdin.write(arg)
1357 try:
1366 try:
1358 stdin.close()
1367 stdin.close()
1359 except IOError:
1368 except IOError:
1360 raise error.Abort(
1369 raise error.Abort(
1361 _(
1370 _(
1362 b'Mercurial failed to run itself, check'
1371 b'Mercurial failed to run itself, check'
1363 b' hg executable is in PATH'
1372 b' hg executable is in PATH'
1364 )
1373 )
1365 )
1374 )
1366 return logstream(stdout)
1375 return logstream(stdout)
1367
1376
1368
1377
1369 pre_revprop_change_template = b'''#!/bin/sh
1378 pre_revprop_change_template = b'''#!/bin/sh
1370
1379
1371 REPOS="$1"
1380 REPOS="$1"
1372 REV="$2"
1381 REV="$2"
1373 USER="$3"
1382 USER="$3"
1374 PROPNAME="$4"
1383 PROPNAME="$4"
1375 ACTION="$5"
1384 ACTION="$5"
1376
1385
1377 %(rules)s
1386 %(rules)s
1378
1387
1379 echo "Changing prohibited revision property" >&2
1388 echo "Changing prohibited revision property" >&2
1380 exit 1
1389 exit 1
1381 '''
1390 '''
1382
1391
1383
1392
1384 def gen_pre_revprop_change_hook(prop_actions_allowed):
1393 def gen_pre_revprop_change_hook(prop_actions_allowed):
1385 rules = []
1394 rules = []
1386 for action, propname in prop_actions_allowed:
1395 for action, propname in prop_actions_allowed:
1387 rules.append(
1396 rules.append(
1388 (
1397 (
1389 b'if [ "$ACTION" = "%s" -a "$PROPNAME" = "%s" ]; '
1398 b'if [ "$ACTION" = "%s" -a "$PROPNAME" = "%s" ]; '
1390 b'then exit 0; fi'
1399 b'then exit 0; fi'
1391 )
1400 )
1392 % (action, propname)
1401 % (action, propname)
1393 )
1402 )
1394 return pre_revprop_change_template % {b'rules': b'\n'.join(rules)}
1403 return pre_revprop_change_template % {b'rules': b'\n'.join(rules)}
1395
1404
1396
1405
1397 class svn_sink(converter_sink, commandline):
1406 class svn_sink(converter_sink, commandline):
1398 commit_re = re.compile(br'Committed revision (\d+).', re.M)
1407 commit_re = re.compile(br'Committed revision (\d+).', re.M)
1399 uuid_re = re.compile(br'Repository UUID:\s*(\S+)', re.M)
1408 uuid_re = re.compile(br'Repository UUID:\s*(\S+)', re.M)
1400
1409
1401 def prerun(self):
1410 def prerun(self):
1402 if self.wc:
1411 if self.wc:
1403 os.chdir(self.wc)
1412 os.chdir(self.wc)
1404
1413
1405 def postrun(self):
1414 def postrun(self):
1406 if self.wc:
1415 if self.wc:
1407 os.chdir(self.cwd)
1416 os.chdir(self.cwd)
1408
1417
1409 def join(self, name):
1418 def join(self, name):
1410 return os.path.join(self.wc, b'.svn', name)
1419 return os.path.join(self.wc, b'.svn', name)
1411
1420
1412 def revmapfile(self):
1421 def revmapfile(self):
1413 return self.join(b'hg-shamap')
1422 return self.join(b'hg-shamap')
1414
1423
1415 def authorfile(self):
1424 def authorfile(self):
1416 return self.join(b'hg-authormap')
1425 return self.join(b'hg-authormap')
1417
1426
1418 def __init__(self, ui, repotype, path):
1427 def __init__(self, ui, repotype, path):
1419
1428
1420 converter_sink.__init__(self, ui, repotype, path)
1429 converter_sink.__init__(self, ui, repotype, path)
1421 commandline.__init__(self, ui, b'svn')
1430 commandline.__init__(self, ui, b'svn')
1422 self.delete = []
1431 self.delete = []
1423 self.setexec = []
1432 self.setexec = []
1424 self.delexec = []
1433 self.delexec = []
1425 self.copies = []
1434 self.copies = []
1426 self.wc = None
1435 self.wc = None
1427 self.cwd = encoding.getcwd()
1436 self.cwd = encoding.getcwd()
1428
1437
1429 created = False
1438 created = False
1430 if os.path.isfile(os.path.join(path, b'.svn', b'entries')):
1439 if os.path.isfile(os.path.join(path, b'.svn', b'entries')):
1431 self.wc = os.path.realpath(path)
1440 self.wc = os.path.realpath(path)
1432 self.run0(b'update')
1441 self.run0(b'update')
1433 else:
1442 else:
1434 if not re.search(br'^(file|http|https|svn|svn\+ssh)://', path):
1443 if not re.search(br'^(file|http|https|svn|svn\+ssh)://', path):
1435 path = os.path.realpath(path)
1444 path = os.path.realpath(path)
1436 if os.path.isdir(os.path.dirname(path)):
1445 if os.path.isdir(os.path.dirname(path)):
1437 if not os.path.exists(
1446 if not os.path.exists(
1438 os.path.join(path, b'db', b'fs-type')
1447 os.path.join(path, b'db', b'fs-type')
1439 ):
1448 ):
1440 ui.status(
1449 ui.status(
1441 _(b"initializing svn repository '%s'\n")
1450 _(b"initializing svn repository '%s'\n")
1442 % os.path.basename(path)
1451 % os.path.basename(path)
1443 )
1452 )
1444 commandline(ui, b'svnadmin').run0(b'create', path)
1453 commandline(ui, b'svnadmin').run0(b'create', path)
1445 created = path
1454 created = path
1446 path = util.normpath(path)
1455 path = util.normpath(path)
1447 if not path.startswith(b'/'):
1456 if not path.startswith(b'/'):
1448 path = b'/' + path
1457 path = b'/' + path
1449 path = b'file://' + path
1458 path = b'file://' + path
1450
1459
1451 wcpath = os.path.join(
1460 wcpath = os.path.join(
1452 encoding.getcwd(), os.path.basename(path) + b'-wc'
1461 encoding.getcwd(), os.path.basename(path) + b'-wc'
1453 )
1462 )
1454 ui.status(
1463 ui.status(
1455 _(b"initializing svn working copy '%s'\n")
1464 _(b"initializing svn working copy '%s'\n")
1456 % os.path.basename(wcpath)
1465 % os.path.basename(wcpath)
1457 )
1466 )
1458 self.run0(b'checkout', path, wcpath)
1467 self.run0(b'checkout', path, wcpath)
1459
1468
1460 self.wc = wcpath
1469 self.wc = wcpath
1461 self.opener = vfsmod.vfs(self.wc)
1470 self.opener = vfsmod.vfs(self.wc)
1462 self.wopener = vfsmod.vfs(self.wc)
1471 self.wopener = vfsmod.vfs(self.wc)
1463 self.childmap = mapfile(ui, self.join(b'hg-childmap'))
1472 self.childmap = mapfile(ui, self.join(b'hg-childmap'))
1464 if util.checkexec(self.wc):
1473 if util.checkexec(self.wc):
1465 self.is_exec = util.isexec
1474 self.is_exec = util.isexec
1466 else:
1475 else:
1467 self.is_exec = None
1476 self.is_exec = None
1468
1477
1469 if created:
1478 if created:
1470 prop_actions_allowed = [
1479 prop_actions_allowed = [
1471 (b'M', b'svn:log'),
1480 (b'M', b'svn:log'),
1472 (b'A', b'hg:convert-branch'),
1481 (b'A', b'hg:convert-branch'),
1473 (b'A', b'hg:convert-rev'),
1482 (b'A', b'hg:convert-rev'),
1474 ]
1483 ]
1475
1484
1476 if self.ui.configbool(
1485 if self.ui.configbool(
1477 b'convert', b'svn.dangerous-set-commit-dates'
1486 b'convert', b'svn.dangerous-set-commit-dates'
1478 ):
1487 ):
1479 prop_actions_allowed.append((b'M', b'svn:date'))
1488 prop_actions_allowed.append((b'M', b'svn:date'))
1480
1489
1481 hook = os.path.join(created, b'hooks', b'pre-revprop-change')
1490 hook = os.path.join(created, b'hooks', b'pre-revprop-change')
1482 fp = open(hook, b'wb')
1491 fp = open(hook, b'wb')
1483 fp.write(gen_pre_revprop_change_hook(prop_actions_allowed))
1492 fp.write(gen_pre_revprop_change_hook(prop_actions_allowed))
1484 fp.close()
1493 fp.close()
1485 util.setflags(hook, False, True)
1494 util.setflags(hook, False, True)
1486
1495
1487 output = self.run0(b'info')
1496 output = self.run0(b'info')
1488 self.uuid = self.uuid_re.search(output).group(1).strip()
1497 self.uuid = self.uuid_re.search(output).group(1).strip()
1489
1498
1490 def wjoin(self, *names):
1499 def wjoin(self, *names):
1491 return os.path.join(self.wc, *names)
1500 return os.path.join(self.wc, *names)
1492
1501
1493 @propertycache
1502 @propertycache
1494 def manifest(self):
1503 def manifest(self):
1495 # As of svn 1.7, the "add" command fails when receiving
1504 # As of svn 1.7, the "add" command fails when receiving
1496 # already tracked entries, so we have to track and filter them
1505 # already tracked entries, so we have to track and filter them
1497 # ourselves.
1506 # ourselves.
1498 m = set()
1507 m = set()
1499 output = self.run0(b'ls', recursive=True, xml=True)
1508 output = self.run0(b'ls', recursive=True, xml=True)
1500 doc = xml.dom.minidom.parseString(output)
1509 doc = xml.dom.minidom.parseString(output)
1501 for e in doc.getElementsByTagName('entry'):
1510 for e in doc.getElementsByTagName('entry'):
1502 for n in e.childNodes:
1511 for n in e.childNodes:
1503 if n.nodeType != n.ELEMENT_NODE or n.tagName != 'name':
1512 if n.nodeType != n.ELEMENT_NODE or n.tagName != 'name':
1504 continue
1513 continue
1505 name = ''.join(
1514 name = ''.join(
1506 c.data for c in n.childNodes if c.nodeType == c.TEXT_NODE
1515 c.data for c in n.childNodes if c.nodeType == c.TEXT_NODE
1507 )
1516 )
1508 # Entries are compared with names coming from
1517 # Entries are compared with names coming from
1509 # mercurial, so bytes with undefined encoding. Our
1518 # mercurial, so bytes with undefined encoding. Our
1510 # best bet is to assume they are in local
1519 # best bet is to assume they are in local
1511 # encoding. They will be passed to command line calls
1520 # encoding. They will be passed to command line calls
1512 # later anyway, so they better be.
1521 # later anyway, so they better be.
1513 m.add(encoding.unitolocal(name))
1522 m.add(encoding.unitolocal(name))
1514 break
1523 break
1515 return m
1524 return m
1516
1525
1517 def putfile(self, filename, flags, data):
1526 def putfile(self, filename, flags, data):
1518 if b'l' in flags:
1527 if b'l' in flags:
1519 self.wopener.symlink(data, filename)
1528 self.wopener.symlink(data, filename)
1520 else:
1529 else:
1521 try:
1530 try:
1522 if os.path.islink(self.wjoin(filename)):
1531 if os.path.islink(self.wjoin(filename)):
1523 os.unlink(filename)
1532 os.unlink(filename)
1524 except OSError:
1533 except OSError:
1525 pass
1534 pass
1526
1535
1527 if self.is_exec:
1536 if self.is_exec:
1528 # We need to check executability of the file before the change,
1537 # We need to check executability of the file before the change,
1529 # because `vfs.write` is able to reset exec bit.
1538 # because `vfs.write` is able to reset exec bit.
1530 wasexec = False
1539 wasexec = False
1531 if os.path.exists(self.wjoin(filename)):
1540 if os.path.exists(self.wjoin(filename)):
1532 wasexec = self.is_exec(self.wjoin(filename))
1541 wasexec = self.is_exec(self.wjoin(filename))
1533
1542
1534 self.wopener.write(filename, data)
1543 self.wopener.write(filename, data)
1535
1544
1536 if self.is_exec:
1545 if self.is_exec:
1537 if wasexec:
1546 if wasexec:
1538 if b'x' not in flags:
1547 if b'x' not in flags:
1539 self.delexec.append(filename)
1548 self.delexec.append(filename)
1540 else:
1549 else:
1541 if b'x' in flags:
1550 if b'x' in flags:
1542 self.setexec.append(filename)
1551 self.setexec.append(filename)
1543 util.setflags(self.wjoin(filename), False, b'x' in flags)
1552 util.setflags(self.wjoin(filename), False, b'x' in flags)
1544
1553
1545 def _copyfile(self, source, dest):
1554 def _copyfile(self, source, dest):
1546 # SVN's copy command pukes if the destination file exists, but
1555 # SVN's copy command pukes if the destination file exists, but
1547 # our copyfile method expects to record a copy that has
1556 # our copyfile method expects to record a copy that has
1548 # already occurred. Cross the semantic gap.
1557 # already occurred. Cross the semantic gap.
1549 wdest = self.wjoin(dest)
1558 wdest = self.wjoin(dest)
1550 exists = os.path.lexists(wdest)
1559 exists = os.path.lexists(wdest)
1551 if exists:
1560 if exists:
1552 fd, tempname = pycompat.mkstemp(
1561 fd, tempname = pycompat.mkstemp(
1553 prefix=b'hg-copy-', dir=os.path.dirname(wdest)
1562 prefix=b'hg-copy-', dir=os.path.dirname(wdest)
1554 )
1563 )
1555 os.close(fd)
1564 os.close(fd)
1556 os.unlink(tempname)
1565 os.unlink(tempname)
1557 os.rename(wdest, tempname)
1566 os.rename(wdest, tempname)
1558 try:
1567 try:
1559 self.run0(b'copy', source, dest)
1568 self.run0(b'copy', source, dest)
1560 finally:
1569 finally:
1561 self.manifest.add(dest)
1570 self.manifest.add(dest)
1562 if exists:
1571 if exists:
1563 try:
1572 try:
1564 os.unlink(wdest)
1573 os.unlink(wdest)
1565 except OSError:
1574 except OSError:
1566 pass
1575 pass
1567 os.rename(tempname, wdest)
1576 os.rename(tempname, wdest)
1568
1577
1569 def dirs_of(self, files):
1578 def dirs_of(self, files):
1570 dirs = set()
1579 dirs = set()
1571 for f in files:
1580 for f in files:
1572 if os.path.isdir(self.wjoin(f)):
1581 if os.path.isdir(self.wjoin(f)):
1573 dirs.add(f)
1582 dirs.add(f)
1574 i = len(f)
1583 i = len(f)
1575 for i in iter(lambda: f.rfind(b'/', 0, i), -1):
1584 for i in iter(lambda: f.rfind(b'/', 0, i), -1):
1576 dirs.add(f[:i])
1585 dirs.add(f[:i])
1577 return dirs
1586 return dirs
1578
1587
1579 def add_dirs(self, files):
1588 def add_dirs(self, files):
1580 add_dirs = [
1589 add_dirs = [
1581 d for d in sorted(self.dirs_of(files)) if d not in self.manifest
1590 d for d in sorted(self.dirs_of(files)) if d not in self.manifest
1582 ]
1591 ]
1583 if add_dirs:
1592 if add_dirs:
1584 self.manifest.update(add_dirs)
1593 self.manifest.update(add_dirs)
1585 self.xargs(add_dirs, b'add', non_recursive=True, quiet=True)
1594 self.xargs(add_dirs, b'add', non_recursive=True, quiet=True)
1586 return add_dirs
1595 return add_dirs
1587
1596
1588 def add_files(self, files):
1597 def add_files(self, files):
1589 files = [f for f in files if f not in self.manifest]
1598 files = [f for f in files if f not in self.manifest]
1590 if files:
1599 if files:
1591 self.manifest.update(files)
1600 self.manifest.update(files)
1592 self.xargs(files, b'add', quiet=True)
1601 self.xargs(files, b'add', quiet=True)
1593 return files
1602 return files
1594
1603
1595 def addchild(self, parent, child):
1604 def addchild(self, parent, child):
1596 self.childmap[parent] = child
1605 self.childmap[parent] = child
1597
1606
1598 def revid(self, rev):
1607 def revid(self, rev):
1599 return b"svn:%s@%s" % (self.uuid, rev)
1608 return b"svn:%s@%s" % (self.uuid, rev)
1600
1609
1601 def putcommit(
1610 def putcommit(
1602 self, files, copies, parents, commit, source, revmap, full, cleanp2
1611 self, files, copies, parents, commit, source, revmap, full, cleanp2
1603 ):
1612 ):
1604 for parent in parents:
1613 for parent in parents:
1605 try:
1614 try:
1606 return self.revid(self.childmap[parent])
1615 return self.revid(self.childmap[parent])
1607 except KeyError:
1616 except KeyError:
1608 pass
1617 pass
1609
1618
1610 # Apply changes to working copy
1619 # Apply changes to working copy
1611 for f, v in files:
1620 for f, v in files:
1612 data, mode = source.getfile(f, v)
1621 data, mode = source.getfile(f, v)
1613 if data is None:
1622 if data is None:
1614 self.delete.append(f)
1623 self.delete.append(f)
1615 else:
1624 else:
1616 self.putfile(f, mode, data)
1625 self.putfile(f, mode, data)
1617 if f in copies:
1626 if f in copies:
1618 self.copies.append([copies[f], f])
1627 self.copies.append([copies[f], f])
1619 if full:
1628 if full:
1620 self.delete.extend(sorted(self.manifest.difference(files)))
1629 self.delete.extend(sorted(self.manifest.difference(files)))
1621 files = [f[0] for f in files]
1630 files = [f[0] for f in files]
1622
1631
1623 entries = set(self.delete)
1632 entries = set(self.delete)
1624 files = frozenset(files)
1633 files = frozenset(files)
1625 entries.update(self.add_dirs(files.difference(entries)))
1634 entries.update(self.add_dirs(files.difference(entries)))
1626 if self.copies:
1635 if self.copies:
1627 for s, d in self.copies:
1636 for s, d in self.copies:
1628 self._copyfile(s, d)
1637 self._copyfile(s, d)
1629 self.copies = []
1638 self.copies = []
1630 if self.delete:
1639 if self.delete:
1631 self.xargs(self.delete, b'delete')
1640 self.xargs(self.delete, b'delete')
1632 for f in self.delete:
1641 for f in self.delete:
1633 self.manifest.remove(f)
1642 self.manifest.remove(f)
1634 self.delete = []
1643 self.delete = []
1635 entries.update(self.add_files(files.difference(entries)))
1644 entries.update(self.add_files(files.difference(entries)))
1636 if self.delexec:
1645 if self.delexec:
1637 self.xargs(self.delexec, b'propdel', b'svn:executable')
1646 self.xargs(self.delexec, b'propdel', b'svn:executable')
1638 self.delexec = []
1647 self.delexec = []
1639 if self.setexec:
1648 if self.setexec:
1640 self.xargs(self.setexec, b'propset', b'svn:executable', b'*')
1649 self.xargs(self.setexec, b'propset', b'svn:executable', b'*')
1641 self.setexec = []
1650 self.setexec = []
1642
1651
1643 fd, messagefile = pycompat.mkstemp(prefix=b'hg-convert-')
1652 fd, messagefile = pycompat.mkstemp(prefix=b'hg-convert-')
1644 fp = os.fdopen(fd, 'wb')
1653 fp = os.fdopen(fd, 'wb')
1645 fp.write(util.tonativeeol(commit.desc))
1654 fp.write(util.tonativeeol(commit.desc))
1646 fp.close()
1655 fp.close()
1647 try:
1656 try:
1648 output = self.run0(
1657 output = self.run0(
1649 b'commit',
1658 b'commit',
1650 username=stringutil.shortuser(commit.author),
1659 username=stringutil.shortuser(commit.author),
1651 file=messagefile,
1660 file=messagefile,
1652 encoding=b'utf-8',
1661 encoding=b'utf-8',
1653 )
1662 )
1654 try:
1663 try:
1655 rev = self.commit_re.search(output).group(1)
1664 rev = self.commit_re.search(output).group(1)
1656 except AttributeError:
1665 except AttributeError:
1657 if not files:
1666 if not files:
1658 return parents[0] if parents else b'None'
1667 return parents[0] if parents else b'None'
1659 self.ui.warn(_(b'unexpected svn output:\n'))
1668 self.ui.warn(_(b'unexpected svn output:\n'))
1660 self.ui.warn(output)
1669 self.ui.warn(output)
1661 raise error.Abort(_(b'unable to cope with svn output'))
1670 raise error.Abort(_(b'unable to cope with svn output'))
1662 if commit.rev:
1671 if commit.rev:
1663 self.run(
1672 self.run(
1664 b'propset',
1673 b'propset',
1665 b'hg:convert-rev',
1674 b'hg:convert-rev',
1666 commit.rev,
1675 commit.rev,
1667 revprop=True,
1676 revprop=True,
1668 revision=rev,
1677 revision=rev,
1669 )
1678 )
1670 if commit.branch and commit.branch != b'default':
1679 if commit.branch and commit.branch != b'default':
1671 self.run(
1680 self.run(
1672 b'propset',
1681 b'propset',
1673 b'hg:convert-branch',
1682 b'hg:convert-branch',
1674 commit.branch,
1683 commit.branch,
1675 revprop=True,
1684 revprop=True,
1676 revision=rev,
1685 revision=rev,
1677 )
1686 )
1678
1687
1679 if self.ui.configbool(
1688 if self.ui.configbool(
1680 b'convert', b'svn.dangerous-set-commit-dates'
1689 b'convert', b'svn.dangerous-set-commit-dates'
1681 ):
1690 ):
1682 # Subverson always uses UTC to represent date and time
1691 # Subverson always uses UTC to represent date and time
1683 date = dateutil.parsedate(commit.date)
1692 date = dateutil.parsedate(commit.date)
1684 date = (date[0], 0)
1693 date = (date[0], 0)
1685
1694
1686 # The only way to set date and time for svn commit is to use propset after commit is done
1695 # The only way to set date and time for svn commit is to use propset after commit is done
1687 self.run(
1696 self.run(
1688 b'propset',
1697 b'propset',
1689 b'svn:date',
1698 b'svn:date',
1690 formatsvndate(date),
1699 formatsvndate(date),
1691 revprop=True,
1700 revprop=True,
1692 revision=rev,
1701 revision=rev,
1693 )
1702 )
1694
1703
1695 for parent in parents:
1704 for parent in parents:
1696 self.addchild(parent, rev)
1705 self.addchild(parent, rev)
1697 return self.revid(rev)
1706 return self.revid(rev)
1698 finally:
1707 finally:
1699 os.unlink(messagefile)
1708 os.unlink(messagefile)
1700
1709
1701 def puttags(self, tags):
1710 def puttags(self, tags):
1702 self.ui.warn(_(b'writing Subversion tags is not yet implemented\n'))
1711 self.ui.warn(_(b'writing Subversion tags is not yet implemented\n'))
1703 return None, None
1712 return None, None
1704
1713
1705 def hascommitfrommap(self, rev):
1714 def hascommitfrommap(self, rev):
1706 # We trust that revisions referenced in a map still is present
1715 # We trust that revisions referenced in a map still is present
1707 # TODO: implement something better if necessary and feasible
1716 # TODO: implement something better if necessary and feasible
1708 return True
1717 return True
1709
1718
1710 def hascommitforsplicemap(self, rev):
1719 def hascommitforsplicemap(self, rev):
1711 # This is not correct as one can convert to an existing subversion
1720 # This is not correct as one can convert to an existing subversion
1712 # repository and childmap would not list all revisions. Too bad.
1721 # repository and childmap would not list all revisions. Too bad.
1713 if rev in self.childmap:
1722 if rev in self.childmap:
1714 return True
1723 return True
1715 raise error.Abort(
1724 raise error.Abort(
1716 _(
1725 _(
1717 b'splice map revision %s not found in subversion '
1726 b'splice map revision %s not found in subversion '
1718 b'child map (revision lookups are not implemented)'
1727 b'child map (revision lookups are not implemented)'
1719 )
1728 )
1720 % rev
1729 % rev
1721 )
1730 )
@@ -1,157 +1,160
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2
2
3 # Copyright (C) 2007 Daniel Holth <dholth@fastmail.fm>
3 # Copyright (C) 2007 Daniel Holth <dholth@fastmail.fm>
4 # This is a stripped-down version of the original bzr-svn transport.py,
4 # This is a stripped-down version of the original bzr-svn transport.py,
5 # Copyright (C) 2006 Jelmer Vernooij <jelmer@samba.org>
5 # Copyright (C) 2006 Jelmer Vernooij <jelmer@samba.org>
6
6
7 # This program is free software; you can redistribute it and/or modify
7 # This program is free software; you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation; either version 2 of the License, or
9 # the Free Software Foundation; either version 2 of the License, or
10 # (at your option) any later version.
10 # (at your option) any later version.
11
11
12 # This program is distributed in the hope that it will be useful,
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
15 # GNU General Public License for more details.
16
16
17 # You should have received a copy of the GNU General Public License
17 # You should have received a copy of the GNU General Public License
18 # along with this program; if not, see <http://www.gnu.org/licenses/>.
18 # along with this program; if not, see <http://www.gnu.org/licenses/>.
19
19
20 # pytype: disable=import-error
20 import svn.client
21 import svn.client
21 import svn.core
22 import svn.core
22 import svn.ra
23 import svn.ra
23
24
25 # pytype: enable=import-error
26
24 Pool = svn.core.Pool
27 Pool = svn.core.Pool
25 SubversionException = svn.core.SubversionException
28 SubversionException = svn.core.SubversionException
26
29
27 from mercurial.pycompat import getattr
30 from mercurial.pycompat import getattr
28 from mercurial import util
31 from mercurial import util
29
32
30 # Some older versions of the Python bindings need to be
33 # Some older versions of the Python bindings need to be
31 # explicitly initialized. But what we want to do probably
34 # explicitly initialized. But what we want to do probably
32 # won't work worth a darn against those libraries anyway!
35 # won't work worth a darn against those libraries anyway!
33 svn.ra.initialize()
36 svn.ra.initialize()
34
37
35 svn_config = None
38 svn_config = None
36
39
37
40
38 def _create_auth_baton(pool):
41 def _create_auth_baton(pool):
39 """Create a Subversion authentication baton."""
42 """Create a Subversion authentication baton."""
40 import svn.client
43 import svn.client # pytype: disable=import-error
41
44
42 # Give the client context baton a suite of authentication
45 # Give the client context baton a suite of authentication
43 # providers.h
46 # providers.h
44 providers = [
47 providers = [
45 svn.client.get_simple_provider(pool),
48 svn.client.get_simple_provider(pool),
46 svn.client.get_username_provider(pool),
49 svn.client.get_username_provider(pool),
47 svn.client.get_ssl_client_cert_file_provider(pool),
50 svn.client.get_ssl_client_cert_file_provider(pool),
48 svn.client.get_ssl_client_cert_pw_file_provider(pool),
51 svn.client.get_ssl_client_cert_pw_file_provider(pool),
49 svn.client.get_ssl_server_trust_file_provider(pool),
52 svn.client.get_ssl_server_trust_file_provider(pool),
50 ]
53 ]
51 # Platform-dependent authentication methods
54 # Platform-dependent authentication methods
52 getprovider = getattr(
55 getprovider = getattr(
53 svn.core, 'svn_auth_get_platform_specific_provider', None
56 svn.core, 'svn_auth_get_platform_specific_provider', None
54 )
57 )
55 if getprovider:
58 if getprovider:
56 # Available in svn >= 1.6
59 # Available in svn >= 1.6
57 for name in (b'gnome_keyring', b'keychain', b'kwallet', b'windows'):
60 for name in (b'gnome_keyring', b'keychain', b'kwallet', b'windows'):
58 for type in (b'simple', b'ssl_client_cert_pw', b'ssl_server_trust'):
61 for type in (b'simple', b'ssl_client_cert_pw', b'ssl_server_trust'):
59 p = getprovider(name, type, pool)
62 p = getprovider(name, type, pool)
60 if p:
63 if p:
61 providers.append(p)
64 providers.append(p)
62 else:
65 else:
63 if util.safehasattr(svn.client, b'get_windows_simple_provider'):
66 if util.safehasattr(svn.client, b'get_windows_simple_provider'):
64 providers.append(svn.client.get_windows_simple_provider(pool))
67 providers.append(svn.client.get_windows_simple_provider(pool))
65
68
66 return svn.core.svn_auth_open(providers, pool)
69 return svn.core.svn_auth_open(providers, pool)
67
70
68
71
69 class NotBranchError(SubversionException):
72 class NotBranchError(SubversionException):
70 pass
73 pass
71
74
72
75
73 class SvnRaTransport:
76 class SvnRaTransport:
74 """
77 """
75 Open an ra connection to a Subversion repository.
78 Open an ra connection to a Subversion repository.
76 """
79 """
77
80
78 def __init__(self, url=b"", ra=None):
81 def __init__(self, url=b"", ra=None):
79 self.pool = Pool()
82 self.pool = Pool()
80 self.svn_url = url
83 self.svn_url = url
81 self.username = b''
84 self.username = b''
82 self.password = b''
85 self.password = b''
83
86
84 # Only Subversion 1.4 has reparent()
87 # Only Subversion 1.4 has reparent()
85 if ra is None or not util.safehasattr(svn.ra, b'reparent'):
88 if ra is None or not util.safehasattr(svn.ra, b'reparent'):
86 self.client = svn.client.create_context(self.pool)
89 self.client = svn.client.create_context(self.pool)
87 ab = _create_auth_baton(self.pool)
90 ab = _create_auth_baton(self.pool)
88 self.client.auth_baton = ab
91 self.client.auth_baton = ab
89 global svn_config
92 global svn_config
90 if svn_config is None:
93 if svn_config is None:
91 svn_config = svn.core.svn_config_get_config(None)
94 svn_config = svn.core.svn_config_get_config(None)
92 self.client.config = svn_config
95 self.client.config = svn_config
93 try:
96 try:
94 self.ra = svn.client.open_ra_session(
97 self.ra = svn.client.open_ra_session(
95 self.svn_url, self.client, self.pool
98 self.svn_url, self.client, self.pool
96 )
99 )
97 except SubversionException as xxx_todo_changeme:
100 except SubversionException as xxx_todo_changeme:
98 (inst, num) = xxx_todo_changeme.args
101 (inst, num) = xxx_todo_changeme.args
99 if num in (
102 if num in (
100 svn.core.SVN_ERR_RA_ILLEGAL_URL,
103 svn.core.SVN_ERR_RA_ILLEGAL_URL,
101 svn.core.SVN_ERR_RA_LOCAL_REPOS_OPEN_FAILED,
104 svn.core.SVN_ERR_RA_LOCAL_REPOS_OPEN_FAILED,
102 svn.core.SVN_ERR_BAD_URL,
105 svn.core.SVN_ERR_BAD_URL,
103 ):
106 ):
104 raise NotBranchError(url)
107 raise NotBranchError(url)
105 raise
108 raise
106 else:
109 else:
107 self.ra = ra
110 self.ra = ra
108 svn.ra.reparent(self.ra, self.svn_url.encode('utf8'))
111 svn.ra.reparent(self.ra, self.svn_url.encode('utf8'))
109
112
110 class Reporter:
113 class Reporter:
111 def __init__(self, reporter_data):
114 def __init__(self, reporter_data):
112 self._reporter, self._baton = reporter_data
115 self._reporter, self._baton = reporter_data
113
116
114 def set_path(self, path, revnum, start_empty, lock_token, pool=None):
117 def set_path(self, path, revnum, start_empty, lock_token, pool=None):
115 svn.ra.reporter2_invoke_set_path(
118 svn.ra.reporter2_invoke_set_path(
116 self._reporter,
119 self._reporter,
117 self._baton,
120 self._baton,
118 path,
121 path,
119 revnum,
122 revnum,
120 start_empty,
123 start_empty,
121 lock_token,
124 lock_token,
122 pool,
125 pool,
123 )
126 )
124
127
125 def delete_path(self, path, pool=None):
128 def delete_path(self, path, pool=None):
126 svn.ra.reporter2_invoke_delete_path(
129 svn.ra.reporter2_invoke_delete_path(
127 self._reporter, self._baton, path, pool
130 self._reporter, self._baton, path, pool
128 )
131 )
129
132
130 def link_path(
133 def link_path(
131 self, path, url, revision, start_empty, lock_token, pool=None
134 self, path, url, revision, start_empty, lock_token, pool=None
132 ):
135 ):
133 svn.ra.reporter2_invoke_link_path(
136 svn.ra.reporter2_invoke_link_path(
134 self._reporter,
137 self._reporter,
135 self._baton,
138 self._baton,
136 path,
139 path,
137 url,
140 url,
138 revision,
141 revision,
139 start_empty,
142 start_empty,
140 lock_token,
143 lock_token,
141 pool,
144 pool,
142 )
145 )
143
146
144 def finish_report(self, pool=None):
147 def finish_report(self, pool=None):
145 svn.ra.reporter2_invoke_finish_report(
148 svn.ra.reporter2_invoke_finish_report(
146 self._reporter, self._baton, pool
149 self._reporter, self._baton, pool
147 )
150 )
148
151
149 def abort_report(self, pool=None):
152 def abort_report(self, pool=None):
150 svn.ra.reporter2_invoke_abort_report(
153 svn.ra.reporter2_invoke_abort_report(
151 self._reporter, self._baton, pool
154 self._reporter, self._baton, pool
152 )
155 )
153
156
154 def do_update(self, revnum, path, *args, **kwargs):
157 def do_update(self, revnum, path, *args, **kwargs):
155 return self.Reporter(
158 return self.Reporter(
156 svn.ra.do_update(self.ra, revnum, path, *args, **kwargs)
159 svn.ra.do_update(self.ra, revnum, path, *args, **kwargs)
157 )
160 )
@@ -1,479 +1,480
1 """automatically manage newlines in repository files
1 """automatically manage newlines in repository files
2
2
3 This extension allows you to manage the type of line endings (CRLF or
3 This extension allows you to manage the type of line endings (CRLF or
4 LF) that are used in the repository and in the local working
4 LF) that are used in the repository and in the local working
5 directory. That way you can get CRLF line endings on Windows and LF on
5 directory. That way you can get CRLF line endings on Windows and LF on
6 Unix/Mac, thereby letting everybody use their OS native line endings.
6 Unix/Mac, thereby letting everybody use their OS native line endings.
7
7
8 The extension reads its configuration from a versioned ``.hgeol``
8 The extension reads its configuration from a versioned ``.hgeol``
9 configuration file found in the root of the working directory. The
9 configuration file found in the root of the working directory. The
10 ``.hgeol`` file use the same syntax as all other Mercurial
10 ``.hgeol`` file use the same syntax as all other Mercurial
11 configuration files. It uses two sections, ``[patterns]`` and
11 configuration files. It uses two sections, ``[patterns]`` and
12 ``[repository]``.
12 ``[repository]``.
13
13
14 The ``[patterns]`` section specifies how line endings should be
14 The ``[patterns]`` section specifies how line endings should be
15 converted between the working directory and the repository. The format is
15 converted between the working directory and the repository. The format is
16 specified by a file pattern. The first match is used, so put more
16 specified by a file pattern. The first match is used, so put more
17 specific patterns first. The available line endings are ``LF``,
17 specific patterns first. The available line endings are ``LF``,
18 ``CRLF``, and ``BIN``.
18 ``CRLF``, and ``BIN``.
19
19
20 Files with the declared format of ``CRLF`` or ``LF`` are always
20 Files with the declared format of ``CRLF`` or ``LF`` are always
21 checked out and stored in the repository in that format and files
21 checked out and stored in the repository in that format and files
22 declared to be binary (``BIN``) are left unchanged. Additionally,
22 declared to be binary (``BIN``) are left unchanged. Additionally,
23 ``native`` is an alias for checking out in the platform's default line
23 ``native`` is an alias for checking out in the platform's default line
24 ending: ``LF`` on Unix (including Mac OS X) and ``CRLF`` on
24 ending: ``LF`` on Unix (including Mac OS X) and ``CRLF`` on
25 Windows. Note that ``BIN`` (do nothing to line endings) is Mercurial's
25 Windows. Note that ``BIN`` (do nothing to line endings) is Mercurial's
26 default behavior; it is only needed if you need to override a later,
26 default behavior; it is only needed if you need to override a later,
27 more general pattern.
27 more general pattern.
28
28
29 The optional ``[repository]`` section specifies the line endings to
29 The optional ``[repository]`` section specifies the line endings to
30 use for files stored in the repository. It has a single setting,
30 use for files stored in the repository. It has a single setting,
31 ``native``, which determines the storage line endings for files
31 ``native``, which determines the storage line endings for files
32 declared as ``native`` in the ``[patterns]`` section. It can be set to
32 declared as ``native`` in the ``[patterns]`` section. It can be set to
33 ``LF`` or ``CRLF``. The default is ``LF``. For example, this means
33 ``LF`` or ``CRLF``. The default is ``LF``. For example, this means
34 that on Windows, files configured as ``native`` (``CRLF`` by default)
34 that on Windows, files configured as ``native`` (``CRLF`` by default)
35 will be converted to ``LF`` when stored in the repository. Files
35 will be converted to ``LF`` when stored in the repository. Files
36 declared as ``LF``, ``CRLF``, or ``BIN`` in the ``[patterns]`` section
36 declared as ``LF``, ``CRLF``, or ``BIN`` in the ``[patterns]`` section
37 are always stored as-is in the repository.
37 are always stored as-is in the repository.
38
38
39 Example versioned ``.hgeol`` file::
39 Example versioned ``.hgeol`` file::
40
40
41 [patterns]
41 [patterns]
42 **.py = native
42 **.py = native
43 **.vcproj = CRLF
43 **.vcproj = CRLF
44 **.txt = native
44 **.txt = native
45 Makefile = LF
45 Makefile = LF
46 **.jpg = BIN
46 **.jpg = BIN
47
47
48 [repository]
48 [repository]
49 native = LF
49 native = LF
50
50
51 .. note::
51 .. note::
52
52
53 The rules will first apply when files are touched in the working
53 The rules will first apply when files are touched in the working
54 directory, e.g. by updating to null and back to tip to touch all files.
54 directory, e.g. by updating to null and back to tip to touch all files.
55
55
56 The extension uses an optional ``[eol]`` section read from both the
56 The extension uses an optional ``[eol]`` section read from both the
57 normal Mercurial configuration files and the ``.hgeol`` file, with the
57 normal Mercurial configuration files and the ``.hgeol`` file, with the
58 latter overriding the former. You can use that section to control the
58 latter overriding the former. You can use that section to control the
59 overall behavior. There are three settings:
59 overall behavior. There are three settings:
60
60
61 - ``eol.native`` (default ``os.linesep``) can be set to ``LF`` or
61 - ``eol.native`` (default ``os.linesep``) can be set to ``LF`` or
62 ``CRLF`` to override the default interpretation of ``native`` for
62 ``CRLF`` to override the default interpretation of ``native`` for
63 checkout. This can be used with :hg:`archive` on Unix, say, to
63 checkout. This can be used with :hg:`archive` on Unix, say, to
64 generate an archive where files have line endings for Windows.
64 generate an archive where files have line endings for Windows.
65
65
66 - ``eol.only-consistent`` (default True) can be set to False to make
66 - ``eol.only-consistent`` (default True) can be set to False to make
67 the extension convert files with inconsistent EOLs. Inconsistent
67 the extension convert files with inconsistent EOLs. Inconsistent
68 means that there is both ``CRLF`` and ``LF`` present in the file.
68 means that there is both ``CRLF`` and ``LF`` present in the file.
69 Such files are normally not touched under the assumption that they
69 Such files are normally not touched under the assumption that they
70 have mixed EOLs on purpose.
70 have mixed EOLs on purpose.
71
71
72 - ``eol.fix-trailing-newline`` (default False) can be set to True to
72 - ``eol.fix-trailing-newline`` (default False) can be set to True to
73 ensure that converted files end with a EOL character (either ``\\n``
73 ensure that converted files end with a EOL character (either ``\\n``
74 or ``\\r\\n`` as per the configured patterns).
74 or ``\\r\\n`` as per the configured patterns).
75
75
76 The extension provides ``cleverencode:`` and ``cleverdecode:`` filters
76 The extension provides ``cleverencode:`` and ``cleverdecode:`` filters
77 like the deprecated win32text extension does. This means that you can
77 like the deprecated win32text extension does. This means that you can
78 disable win32text and enable eol and your filters will still work. You
78 disable win32text and enable eol and your filters will still work. You
79 only need to these filters until you have prepared a ``.hgeol`` file.
79 only need to these filters until you have prepared a ``.hgeol`` file.
80
80
81 The ``win32text.forbid*`` hooks provided by the win32text extension
81 The ``win32text.forbid*`` hooks provided by the win32text extension
82 have been unified into a single hook named ``eol.checkheadshook``. The
82 have been unified into a single hook named ``eol.checkheadshook``. The
83 hook will lookup the expected line endings from the ``.hgeol`` file,
83 hook will lookup the expected line endings from the ``.hgeol`` file,
84 which means you must migrate to a ``.hgeol`` file first before using
84 which means you must migrate to a ``.hgeol`` file first before using
85 the hook. ``eol.checkheadshook`` only checks heads, intermediate
85 the hook. ``eol.checkheadshook`` only checks heads, intermediate
86 invalid revisions will be pushed. To forbid them completely, use the
86 invalid revisions will be pushed. To forbid them completely, use the
87 ``eol.checkallhook`` hook. These hooks are best used as
87 ``eol.checkallhook`` hook. These hooks are best used as
88 ``pretxnchangegroup`` hooks.
88 ``pretxnchangegroup`` hooks.
89
89
90 See :hg:`help patterns` for more information about the glob patterns
90 See :hg:`help patterns` for more information about the glob patterns
91 used.
91 used.
92 """
92 """
93
93
94
94
95 import os
95 import os
96 import re
96 import re
97 from mercurial.i18n import _
97 from mercurial.i18n import _
98 from mercurial import (
98 from mercurial import (
99 config,
99 config,
100 error as errormod,
100 error as errormod,
101 extensions,
101 extensions,
102 match,
102 match,
103 pycompat,
103 pycompat,
104 registrar,
104 registrar,
105 scmutil,
105 scmutil,
106 util,
106 util,
107 )
107 )
108 from mercurial.utils import stringutil
108 from mercurial.utils import stringutil
109
109
110 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
110 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
111 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
111 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
112 # be specifying the version(s) of Mercurial they are tested with, or
112 # be specifying the version(s) of Mercurial they are tested with, or
113 # leave the attribute unspecified.
113 # leave the attribute unspecified.
114 testedwith = b'ships-with-hg-core'
114 testedwith = b'ships-with-hg-core'
115
115
116 configtable = {}
116 configtable = {}
117 configitem = registrar.configitem(configtable)
117 configitem = registrar.configitem(configtable)
118
118
119 configitem(
119 configitem(
120 b'eol',
120 b'eol',
121 b'fix-trailing-newline',
121 b'fix-trailing-newline',
122 default=False,
122 default=False,
123 )
123 )
124 configitem(
124 configitem(
125 b'eol',
125 b'eol',
126 b'native',
126 b'native',
127 default=pycompat.oslinesep,
127 default=pycompat.oslinesep,
128 )
128 )
129 configitem(
129 configitem(
130 b'eol',
130 b'eol',
131 b'only-consistent',
131 b'only-consistent',
132 default=True,
132 default=True,
133 )
133 )
134
134
135 # Matches a lone LF, i.e., one that is not part of CRLF.
135 # Matches a lone LF, i.e., one that is not part of CRLF.
136 singlelf = re.compile(b'(^|[^\r])\n')
136 singlelf = re.compile(b'(^|[^\r])\n')
137
137
138
138
139 def inconsistenteol(data):
139 def inconsistenteol(data):
140 return b'\r\n' in data and singlelf.search(data)
140 return b'\r\n' in data and singlelf.search(data)
141
141
142
142
143 def tolf(s, params, ui, **kwargs):
143 def tolf(s, params, ui, **kwargs):
144 """Filter to convert to LF EOLs."""
144 """Filter to convert to LF EOLs."""
145 if stringutil.binary(s):
145 if stringutil.binary(s):
146 return s
146 return s
147 if ui.configbool(b'eol', b'only-consistent') and inconsistenteol(s):
147 if ui.configbool(b'eol', b'only-consistent') and inconsistenteol(s):
148 return s
148 return s
149 if (
149 if (
150 ui.configbool(b'eol', b'fix-trailing-newline')
150 ui.configbool(b'eol', b'fix-trailing-newline')
151 and s
151 and s
152 and not s.endswith(b'\n')
152 and not s.endswith(b'\n')
153 ):
153 ):
154 s = s + b'\n'
154 s = s + b'\n'
155 return util.tolf(s)
155 return util.tolf(s)
156
156
157
157
158 def tocrlf(s, params, ui, **kwargs):
158 def tocrlf(s, params, ui, **kwargs):
159 """Filter to convert to CRLF EOLs."""
159 """Filter to convert to CRLF EOLs."""
160 if stringutil.binary(s):
160 if stringutil.binary(s):
161 return s
161 return s
162 if ui.configbool(b'eol', b'only-consistent') and inconsistenteol(s):
162 if ui.configbool(b'eol', b'only-consistent') and inconsistenteol(s):
163 return s
163 return s
164 if (
164 if (
165 ui.configbool(b'eol', b'fix-trailing-newline')
165 ui.configbool(b'eol', b'fix-trailing-newline')
166 and s
166 and s
167 and not s.endswith(b'\n')
167 and not s.endswith(b'\n')
168 ):
168 ):
169 s = s + b'\n'
169 s = s + b'\n'
170 return util.tocrlf(s)
170 return util.tocrlf(s)
171
171
172
172
173 def isbinary(s, params, ui, **kwargs):
173 def isbinary(s, params, ui, **kwargs):
174 """Filter to do nothing with the file."""
174 """Filter to do nothing with the file."""
175 return s
175 return s
176
176
177
177
178 filters = {
178 filters = {
179 b'to-lf': tolf,
179 b'to-lf': tolf,
180 b'to-crlf': tocrlf,
180 b'to-crlf': tocrlf,
181 b'is-binary': isbinary,
181 b'is-binary': isbinary,
182 # The following provide backwards compatibility with win32text
182 # The following provide backwards compatibility with win32text
183 b'cleverencode:': tolf,
183 b'cleverencode:': tolf,
184 b'cleverdecode:': tocrlf,
184 b'cleverdecode:': tocrlf,
185 }
185 }
186
186
187
187
188 class eolfile:
188 class eolfile:
189 def __init__(self, ui, root, data):
189 def __init__(self, ui, root, data):
190 self._decode = {
190 self._decode = {
191 b'LF': b'to-lf',
191 b'LF': b'to-lf',
192 b'CRLF': b'to-crlf',
192 b'CRLF': b'to-crlf',
193 b'BIN': b'is-binary',
193 b'BIN': b'is-binary',
194 }
194 }
195 self._encode = {
195 self._encode = {
196 b'LF': b'to-lf',
196 b'LF': b'to-lf',
197 b'CRLF': b'to-crlf',
197 b'CRLF': b'to-crlf',
198 b'BIN': b'is-binary',
198 b'BIN': b'is-binary',
199 }
199 }
200
200
201 self.cfg = config.config()
201 self.cfg = config.config()
202 # Our files should not be touched. The pattern must be
202 # Our files should not be touched. The pattern must be
203 # inserted first override a '** = native' pattern.
203 # inserted first override a '** = native' pattern.
204 self.cfg.set(b'patterns', b'.hg*', b'BIN', b'eol')
204 self.cfg.set(b'patterns', b'.hg*', b'BIN', b'eol')
205 # We can then parse the user's patterns.
205 # We can then parse the user's patterns.
206 self.cfg.parse(b'.hgeol', data)
206 self.cfg.parse(b'.hgeol', data)
207
207
208 isrepolf = self.cfg.get(b'repository', b'native') != b'CRLF'
208 isrepolf = self.cfg.get(b'repository', b'native') != b'CRLF'
209 self._encode[b'NATIVE'] = isrepolf and b'to-lf' or b'to-crlf'
209 self._encode[b'NATIVE'] = isrepolf and b'to-lf' or b'to-crlf'
210 iswdlf = ui.config(b'eol', b'native') in (b'LF', b'\n')
210 iswdlf = ui.config(b'eol', b'native') in (b'LF', b'\n')
211 self._decode[b'NATIVE'] = iswdlf and b'to-lf' or b'to-crlf'
211 self._decode[b'NATIVE'] = iswdlf and b'to-lf' or b'to-crlf'
212
212
213 include = []
213 include = []
214 exclude = []
214 exclude = []
215 self.patterns = []
215 self.patterns = []
216 for pattern, style in self.cfg.items(b'patterns'):
216 for pattern, style in self.cfg.items(b'patterns'):
217 key = style.upper()
217 key = style.upper()
218 if key == b'BIN':
218 if key == b'BIN':
219 exclude.append(pattern)
219 exclude.append(pattern)
220 else:
220 else:
221 include.append(pattern)
221 include.append(pattern)
222 m = match.match(root, b'', [pattern])
222 m = match.match(root, b'', [pattern])
223 self.patterns.append((pattern, key, m))
223 self.patterns.append((pattern, key, m))
224 # This will match the files for which we need to care
224 # This will match the files for which we need to care
225 # about inconsistent newlines.
225 # about inconsistent newlines.
226 self.match = match.match(root, b'', [], include, exclude)
226 self.match = match.match(root, b'', [], include, exclude)
227
227
228 def copytoui(self, ui):
228 def copytoui(self, ui):
229 newpatterns = {pattern for pattern, key, m in self.patterns}
229 newpatterns = {pattern for pattern, key, m in self.patterns}
230 for section in (b'decode', b'encode'):
230 for section in (b'decode', b'encode'):
231 for oldpattern, _filter in ui.configitems(section):
231 for oldpattern, _filter in ui.configitems(section):
232 if oldpattern not in newpatterns:
232 if oldpattern not in newpatterns:
233 if ui.configsource(section, oldpattern) == b'eol':
233 if ui.configsource(section, oldpattern) == b'eol':
234 ui.setconfig(section, oldpattern, b'!', b'eol')
234 ui.setconfig(section, oldpattern, b'!', b'eol')
235 for pattern, key, m in self.patterns:
235 for pattern, key, m in self.patterns:
236 try:
236 try:
237 ui.setconfig(b'decode', pattern, self._decode[key], b'eol')
237 ui.setconfig(b'decode', pattern, self._decode[key], b'eol')
238 ui.setconfig(b'encode', pattern, self._encode[key], b'eol')
238 ui.setconfig(b'encode', pattern, self._encode[key], b'eol')
239 except KeyError:
239 except KeyError:
240 ui.warn(
240 ui.warn(
241 _(b"ignoring unknown EOL style '%s' from %s\n")
241 _(b"ignoring unknown EOL style '%s' from %s\n")
242 % (key, self.cfg.source(b'patterns', pattern))
242 % (key, self.cfg.source(b'patterns', pattern))
243 )
243 )
244 # eol.only-consistent can be specified in ~/.hgrc or .hgeol
244 # eol.only-consistent can be specified in ~/.hgrc or .hgeol
245 for k, v in self.cfg.items(b'eol'):
245 for k, v in self.cfg.items(b'eol'):
246 ui.setconfig(b'eol', k, v, b'eol')
246 ui.setconfig(b'eol', k, v, b'eol')
247
247
248 def checkrev(self, repo, ctx, files):
248 def checkrev(self, repo, ctx, files):
249 failed = []
249 failed = []
250 for f in files or ctx.files():
250 for f in files or ctx.files():
251 if f not in ctx:
251 if f not in ctx:
252 continue
252 continue
253 for pattern, key, m in self.patterns:
253 for pattern, key, m in self.patterns:
254 if not m(f):
254 if not m(f):
255 continue
255 continue
256 target = self._encode[key]
256 target = self._encode[key]
257 data = ctx[f].data()
257 data = ctx[f].data()
258 if (
258 if (
259 target == b"to-lf"
259 target == b"to-lf"
260 and b"\r\n" in data
260 and b"\r\n" in data
261 or target == b"to-crlf"
261 or target == b"to-crlf"
262 and singlelf.search(data)
262 and singlelf.search(data)
263 ):
263 ):
264 failed.append((f, target, bytes(ctx)))
264 failed.append((f, target, bytes(ctx)))
265 break
265 break
266 return failed
266 return failed
267
267
268
268
269 def parseeol(ui, repo, nodes):
269 def parseeol(ui, repo, nodes):
270 try:
270 try:
271 for node in nodes:
271 for node in nodes:
272 try:
272 try:
273 if node is None:
273 if node is None:
274 # Cannot use workingctx.data() since it would load
274 # Cannot use workingctx.data() since it would load
275 # and cache the filters before we configure them.
275 # and cache the filters before we configure them.
276 data = repo.wvfs(b'.hgeol').read()
276 data = repo.wvfs(b'.hgeol').read()
277 else:
277 else:
278 data = repo[node][b'.hgeol'].data()
278 data = repo[node][b'.hgeol'].data()
279 return eolfile(ui, repo.root, data)
279 return eolfile(ui, repo.root, data)
280 except (IOError, LookupError):
280 except (IOError, LookupError):
281 pass
281 pass
282 except errormod.ConfigError as inst:
282 except errormod.ConfigError as inst:
283 ui.warn(
283 ui.warn(
284 _(
284 _(
285 b"warning: ignoring .hgeol file due to parse error "
285 b"warning: ignoring .hgeol file due to parse error "
286 b"at %s: %s\n"
286 b"at %s: %s\n"
287 )
287 )
288 % (inst.location, inst.message)
288 % (inst.location, inst.message)
289 )
289 )
290 return None
290 return None
291
291
292
292
293 def ensureenabled(ui):
293 def ensureenabled(ui):
294 """make sure the extension is enabled when used as hook
294 """make sure the extension is enabled when used as hook
295
295
296 When eol is used through hooks, the extension is never formally loaded and
296 When eol is used through hooks, the extension is never formally loaded and
297 enabled. This has some side effect, for example the config declaration is
297 enabled. This has some side effect, for example the config declaration is
298 never loaded. This function ensure the extension is enabled when running
298 never loaded. This function ensure the extension is enabled when running
299 hooks.
299 hooks.
300 """
300 """
301 if b'eol' in ui._knownconfig:
301 if b'eol' in ui._knownconfig:
302 return
302 return
303 ui.setconfig(b'extensions', b'eol', b'', source=b'internal')
303 ui.setconfig(b'extensions', b'eol', b'', source=b'internal')
304 extensions.loadall(ui, [b'eol'])
304 extensions.loadall(ui, [b'eol'])
305
305
306
306
307 def _checkhook(ui, repo, node, headsonly):
307 def _checkhook(ui, repo, node, headsonly):
308 # Get revisions to check and touched files at the same time
308 # Get revisions to check and touched files at the same time
309 ensureenabled(ui)
309 ensureenabled(ui)
310 files = set()
310 files = set()
311 revs = set()
311 revs = set()
312 for rev in range(repo[node].rev(), len(repo)):
312 for rev in range(repo[node].rev(), len(repo)):
313 revs.add(rev)
313 revs.add(rev)
314 if headsonly:
314 if headsonly:
315 ctx = repo[rev]
315 ctx = repo[rev]
316 files.update(ctx.files())
316 files.update(ctx.files())
317 for pctx in ctx.parents():
317 for pctx in ctx.parents():
318 revs.discard(pctx.rev())
318 revs.discard(pctx.rev())
319 failed = []
319 failed = []
320 for rev in revs:
320 for rev in revs:
321 ctx = repo[rev]
321 ctx = repo[rev]
322 eol = parseeol(ui, repo, [ctx.node()])
322 eol = parseeol(ui, repo, [ctx.node()])
323 if eol:
323 if eol:
324 failed.extend(eol.checkrev(repo, ctx, files))
324 failed.extend(eol.checkrev(repo, ctx, files))
325
325
326 if failed:
326 if failed:
327 eols = {b'to-lf': b'CRLF', b'to-crlf': b'LF'}
327 eols = {b'to-lf': b'CRLF', b'to-crlf': b'LF'}
328 msgs = []
328 msgs = []
329 for f, target, node in sorted(failed):
329 for f, target, node in sorted(failed):
330 msgs.append(
330 msgs.append(
331 _(b" %s in %s should not have %s line endings")
331 _(b" %s in %s should not have %s line endings")
332 % (f, node, eols[target])
332 % (f, node, eols[target])
333 )
333 )
334 raise errormod.Abort(
334 raise errormod.Abort(
335 _(b"end-of-line check failed:\n") + b"\n".join(msgs)
335 _(b"end-of-line check failed:\n") + b"\n".join(msgs)
336 )
336 )
337
337
338
338
339 def checkallhook(ui, repo, node, hooktype, **kwargs):
339 def checkallhook(ui, repo, node, hooktype, **kwargs):
340 """verify that files have expected EOLs"""
340 """verify that files have expected EOLs"""
341 _checkhook(ui, repo, node, False)
341 _checkhook(ui, repo, node, False)
342
342
343
343
344 def checkheadshook(ui, repo, node, hooktype, **kwargs):
344 def checkheadshook(ui, repo, node, hooktype, **kwargs):
345 """verify that files have expected EOLs"""
345 """verify that files have expected EOLs"""
346 _checkhook(ui, repo, node, True)
346 _checkhook(ui, repo, node, True)
347
347
348
348
349 # "checkheadshook" used to be called "hook"
349 # "checkheadshook" used to be called "hook"
350 hook = checkheadshook
350 hook = checkheadshook
351
351
352
352
353 def preupdate(ui, repo, hooktype, parent1, parent2):
353 def preupdate(ui, repo, hooktype, parent1, parent2):
354 p1node = scmutil.resolvehexnodeidprefix(repo, parent1)
354 p1node = scmutil.resolvehexnodeidprefix(repo, parent1)
355 repo.loadeol([p1node])
355 repo.loadeol([p1node])
356 return False
356 return False
357
357
358
358
359 def uisetup(ui):
359 def uisetup(ui):
360 ui.setconfig(b'hooks', b'preupdate.eol', preupdate, b'eol')
360 ui.setconfig(b'hooks', b'preupdate.eol', preupdate, b'eol')
361
361
362
362
363 def extsetup(ui):
363 def extsetup(ui):
364 try:
364 try:
365 extensions.find(b'win32text')
365 extensions.find(b'win32text')
366 ui.warn(
366 ui.warn(
367 _(
367 _(
368 b"the eol extension is incompatible with the "
368 b"the eol extension is incompatible with the "
369 b"win32text extension\n"
369 b"win32text extension\n"
370 )
370 )
371 )
371 )
372 except KeyError:
372 except KeyError:
373 pass
373 pass
374
374
375
375
376 def reposetup(ui, repo):
376 def reposetup(ui, repo):
377 uisetup(repo.ui)
377 uisetup(repo.ui)
378
378
379 if not repo.local():
379 if not repo.local():
380 return
380 return
381 for name, fn in filters.items():
381 for name, fn in filters.items():
382 repo.adddatafilter(name, fn)
382 repo.adddatafilter(name, fn)
383
383
384 ui.setconfig(b'patch', b'eol', b'auto', b'eol')
384 ui.setconfig(b'patch', b'eol', b'auto', b'eol')
385
385
386 class eolrepo(repo.__class__):
386 class eolrepo(repo.__class__):
387 def loadeol(self, nodes):
387 def loadeol(self, nodes):
388 eol = parseeol(self.ui, self, nodes)
388 eol = parseeol(self.ui, self, nodes)
389 if eol is None:
389 if eol is None:
390 return None
390 return None
391 eol.copytoui(self.ui)
391 eol.copytoui(self.ui)
392 return eol.match
392 return eol.match
393
393
394 def _hgcleardirstate(self):
394 def _hgcleardirstate(self):
395 self._eolmatch = self.loadeol([None])
395 self._eolmatch = self.loadeol([None])
396 if not self._eolmatch:
396 if not self._eolmatch:
397 self._eolmatch = util.never
397 self._eolmatch = util.never
398 return
398 return
399
399
400 oldeol = None
400 oldeol = None
401 try:
401 try:
402 cachemtime = os.path.getmtime(self.vfs.join(b"eol.cache"))
402 cachemtime = os.path.getmtime(self.vfs.join(b"eol.cache"))
403 except OSError:
403 except OSError:
404 cachemtime = 0
404 cachemtime = 0
405 else:
405 else:
406 olddata = self.vfs.read(b"eol.cache")
406 olddata = self.vfs.read(b"eol.cache")
407 if olddata:
407 if olddata:
408 oldeol = eolfile(self.ui, self.root, olddata)
408 oldeol = eolfile(self.ui, self.root, olddata)
409
409
410 try:
410 try:
411 eolmtime = os.path.getmtime(self.wjoin(b".hgeol"))
411 eolmtime = os.path.getmtime(self.wjoin(b".hgeol"))
412 except OSError:
412 except OSError:
413 eolmtime = 0
413 eolmtime = 0
414
414
415 if eolmtime >= cachemtime and eolmtime > 0:
415 if eolmtime >= cachemtime and eolmtime > 0:
416 self.ui.debug(b"eol: detected change in .hgeol\n")
416 self.ui.debug(b"eol: detected change in .hgeol\n")
417
417
418 hgeoldata = self.wvfs.read(b'.hgeol')
418 hgeoldata = self.wvfs.read(b'.hgeol')
419 neweol = eolfile(self.ui, self.root, hgeoldata)
419 neweol = eolfile(self.ui, self.root, hgeoldata)
420
420
421 wlock = None
421 wlock = None
422 try:
422 try:
423 wlock = self.wlock()
423 wlock = self.wlock()
424 for f in self.dirstate:
424 with self.dirstate.changing_files(self):
425 if not self.dirstate.get_entry(f).maybe_clean:
425 for f in self.dirstate:
426 continue
426 if not self.dirstate.get_entry(f).maybe_clean:
427 if oldeol is not None:
428 if not oldeol.match(f) and not neweol.match(f):
429 continue
427 continue
430 oldkey = None
428 if oldeol is not None:
431 for pattern, key, m in oldeol.patterns:
429 if not oldeol.match(f) and not neweol.match(f):
432 if m(f):
430 continue
433 oldkey = key
431 oldkey = None
434 break
432 for pattern, key, m in oldeol.patterns:
435 newkey = None
433 if m(f):
436 for pattern, key, m in neweol.patterns:
434 oldkey = key
437 if m(f):
435 break
438 newkey = key
436 newkey = None
439 break
437 for pattern, key, m in neweol.patterns:
440 if oldkey == newkey:
438 if m(f):
441 continue
439 newkey = key
442 # all normal files need to be looked at again since
440 break
443 # the new .hgeol file specify a different filter
441 if oldkey == newkey:
444 self.dirstate.set_possibly_dirty(f)
442 continue
445 # Write the cache to update mtime and cache .hgeol
443 # all normal files need to be looked at again since
446 with self.vfs(b"eol.cache", b"w") as f:
444 # the new .hgeol file specify a different filter
447 f.write(hgeoldata)
445 self.dirstate.set_possibly_dirty(f)
446 # Write the cache to update mtime and cache .hgeol
447 with self.vfs(b"eol.cache", b"w") as f:
448 f.write(hgeoldata)
448 except errormod.LockUnavailable:
449 except errormod.LockUnavailable:
449 # If we cannot lock the repository and clear the
450 # If we cannot lock the repository and clear the
450 # dirstate, then a commit might not see all files
451 # dirstate, then a commit might not see all files
451 # as modified. But if we cannot lock the
452 # as modified. But if we cannot lock the
452 # repository, then we can also not make a commit,
453 # repository, then we can also not make a commit,
453 # so ignore the error.
454 # so ignore the error.
454 pass
455 pass
455 finally:
456 finally:
456 if wlock is not None:
457 if wlock is not None:
457 wlock.release()
458 wlock.release()
458
459
459 def commitctx(self, ctx, error=False, origctx=None):
460 def commitctx(self, ctx, error=False, origctx=None):
460 for f in sorted(ctx.added() + ctx.modified()):
461 for f in sorted(ctx.added() + ctx.modified()):
461 if not self._eolmatch(f):
462 if not self._eolmatch(f):
462 continue
463 continue
463 fctx = ctx[f]
464 fctx = ctx[f]
464 if fctx is None:
465 if fctx is None:
465 continue
466 continue
466 data = fctx.data()
467 data = fctx.data()
467 if stringutil.binary(data):
468 if stringutil.binary(data):
468 # We should not abort here, since the user should
469 # We should not abort here, since the user should
469 # be able to say "** = native" to automatically
470 # be able to say "** = native" to automatically
470 # have all non-binary files taken care of.
471 # have all non-binary files taken care of.
471 continue
472 continue
472 if inconsistenteol(data):
473 if inconsistenteol(data):
473 raise errormod.Abort(
474 raise errormod.Abort(
474 _(b"inconsistent newline style in %s\n") % f
475 _(b"inconsistent newline style in %s\n") % f
475 )
476 )
476 return super(eolrepo, self).commitctx(ctx, error, origctx)
477 return super(eolrepo, self).commitctx(ctx, error, origctx)
477
478
478 repo.__class__ = eolrepo
479 repo.__class__ = eolrepo
479 repo._hgcleardirstate()
480 repo._hgcleardirstate()
@@ -1,259 +1,262
1 # Copyright 2016-present Facebook. All Rights Reserved.
1 # Copyright 2016-present Facebook. All Rights Reserved.
2 #
2 #
3 # protocol: logic for a server providing fastannotate support
3 # protocol: logic for a server providing fastannotate support
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import contextlib
8 import contextlib
9 import os
9 import os
10
10
11 from mercurial.i18n import _
11 from mercurial.i18n import _
12 from mercurial.pycompat import open
12 from mercurial.pycompat import open
13 from mercurial import (
13 from mercurial import (
14 error,
14 error,
15 extensions,
15 extensions,
16 hg,
16 hg,
17 util,
17 util,
18 wireprotov1peer,
18 wireprotov1peer,
19 wireprotov1server,
19 wireprotov1server,
20 )
20 )
21 from mercurial.utils import (
21 from mercurial.utils import (
22 urlutil,
22 urlutil,
23 )
23 )
24 from . import context
24 from . import context
25
25
26 # common
26 # common
27
27
28
28
29 def _getmaster(ui):
29 def _getmaster(ui):
30 """get the mainbranch, and enforce it is set"""
30 """get the mainbranch, and enforce it is set"""
31 master = ui.config(b'fastannotate', b'mainbranch')
31 master = ui.config(b'fastannotate', b'mainbranch')
32 if not master:
32 if not master:
33 raise error.Abort(
33 raise error.Abort(
34 _(
34 _(
35 b'fastannotate.mainbranch is required '
35 b'fastannotate.mainbranch is required '
36 b'for both the client and the server'
36 b'for both the client and the server'
37 )
37 )
38 )
38 )
39 return master
39 return master
40
40
41
41
42 # server-side
42 # server-side
43
43
44
44
45 def _capabilities(orig, repo, proto):
45 def _capabilities(orig, repo, proto):
46 result = orig(repo, proto)
46 result = orig(repo, proto)
47 result.append(b'getannotate')
47 result.append(b'getannotate')
48 return result
48 return result
49
49
50
50
51 def _getannotate(repo, proto, path, lastnode):
51 def _getannotate(repo, proto, path, lastnode):
52 # output:
52 # output:
53 # FILE := vfspath + '\0' + str(size) + '\0' + content
53 # FILE := vfspath + '\0' + str(size) + '\0' + content
54 # OUTPUT := '' | FILE + OUTPUT
54 # OUTPUT := '' | FILE + OUTPUT
55 result = b''
55 result = b''
56 buildondemand = repo.ui.configbool(
56 buildondemand = repo.ui.configbool(
57 b'fastannotate', b'serverbuildondemand', True
57 b'fastannotate', b'serverbuildondemand', True
58 )
58 )
59 with context.annotatecontext(repo, path) as actx:
59 with context.annotatecontext(repo, path) as actx:
60 if buildondemand:
60 if buildondemand:
61 # update before responding to the client
61 # update before responding to the client
62 master = _getmaster(repo.ui)
62 master = _getmaster(repo.ui)
63 try:
63 try:
64 if not actx.isuptodate(master):
64 if not actx.isuptodate(master):
65 actx.annotate(master, master)
65 actx.annotate(master, master)
66 except Exception:
66 except Exception:
67 # non-fast-forward move or corrupted. rebuild automically.
67 # non-fast-forward move or corrupted. rebuild automically.
68 actx.rebuild()
68 actx.rebuild()
69 try:
69 try:
70 actx.annotate(master, master)
70 actx.annotate(master, master)
71 except Exception:
71 except Exception:
72 actx.rebuild() # delete files
72 actx.rebuild() # delete files
73 finally:
73 finally:
74 # although the "with" context will also do a close/flush, we
74 # although the "with" context will also do a close/flush, we
75 # need to do it early so we can send the correct respond to
75 # need to do it early so we can send the correct respond to
76 # client.
76 # client.
77 actx.close()
77 actx.close()
78 # send back the full content of revmap and linelog, in the future we
78 # send back the full content of revmap and linelog, in the future we
79 # may want to do some rsync-like fancy updating.
79 # may want to do some rsync-like fancy updating.
80 # the lastnode check is not necessary if the client and the server
80 # the lastnode check is not necessary if the client and the server
81 # agree where the main branch is.
81 # agree where the main branch is.
82 if actx.lastnode != lastnode:
82 if actx.lastnode != lastnode:
83 for p in [actx.revmappath, actx.linelogpath]:
83 for p in [actx.revmappath, actx.linelogpath]:
84 if not os.path.exists(p):
84 if not os.path.exists(p):
85 continue
85 continue
86 with open(p, b'rb') as f:
86 with open(p, b'rb') as f:
87 content = f.read()
87 content = f.read()
88 vfsbaselen = len(repo.vfs.base + b'/')
88 vfsbaselen = len(repo.vfs.base + b'/')
89 relpath = p[vfsbaselen:]
89 relpath = p[vfsbaselen:]
90 result += b'%s\0%d\0%s' % (relpath, len(content), content)
90 result += b'%s\0%d\0%s' % (relpath, len(content), content)
91 return result
91 return result
92
92
93
93
94 def _registerwireprotocommand():
94 def _registerwireprotocommand():
95 if b'getannotate' in wireprotov1server.commands:
95 if b'getannotate' in wireprotov1server.commands:
96 return
96 return
97 wireprotov1server.wireprotocommand(b'getannotate', b'path lastnode')(
97 wireprotov1server.wireprotocommand(b'getannotate', b'path lastnode')(
98 _getannotate
98 _getannotate
99 )
99 )
100
100
101
101
102 def serveruisetup(ui):
102 def serveruisetup(ui):
103 _registerwireprotocommand()
103 _registerwireprotocommand()
104 extensions.wrapfunction(wireprotov1server, b'_capabilities', _capabilities)
104 extensions.wrapfunction(wireprotov1server, b'_capabilities', _capabilities)
105
105
106
106
107 # client-side
107 # client-side
108
108
109
109
110 def _parseresponse(payload):
110 def _parseresponse(payload):
111 result = {}
111 result = {}
112 i = 0
112 i = 0
113 l = len(payload) - 1
113 l = len(payload) - 1
114 state = 0 # 0: vfspath, 1: size
114 state = 0 # 0: vfspath, 1: size
115 vfspath = size = b''
115 vfspath = size = b''
116 while i < l:
116 while i < l:
117 ch = payload[i : i + 1]
117 ch = payload[i : i + 1]
118 if ch == b'\0':
118 if ch == b'\0':
119 if state == 1:
119 if state == 1:
120 result[vfspath] = payload[i + 1 : i + 1 + int(size)]
120 result[vfspath] = payload[i + 1 : i + 1 + int(size)]
121 i += int(size)
121 i += int(size)
122 state = 0
122 state = 0
123 vfspath = size = b''
123 vfspath = size = b''
124 elif state == 0:
124 elif state == 0:
125 state = 1
125 state = 1
126 else:
126 else:
127 if state == 1:
127 if state == 1:
128 size += ch
128 size += ch
129 elif state == 0:
129 elif state == 0:
130 vfspath += ch
130 vfspath += ch
131 i += 1
131 i += 1
132 return result
132 return result
133
133
134
134
135 def peersetup(ui, peer):
135 def peersetup(ui, peer):
136 class fastannotatepeer(peer.__class__):
136 class fastannotatepeer(peer.__class__):
137 @wireprotov1peer.batchable
137 @wireprotov1peer.batchable
138 def getannotate(self, path, lastnode=None):
138 def getannotate(self, path, lastnode=None):
139 if not self.capable(b'getannotate'):
139 if not self.capable(b'getannotate'):
140 ui.warn(_(b'remote peer cannot provide annotate cache\n'))
140 ui.warn(_(b'remote peer cannot provide annotate cache\n'))
141 return None, None
141 return None, None
142 else:
142 else:
143 args = {b'path': path, b'lastnode': lastnode or b''}
143 args = {b'path': path, b'lastnode': lastnode or b''}
144 return args, _parseresponse
144 return args, _parseresponse
145
145
146 peer.__class__ = fastannotatepeer
146 peer.__class__ = fastannotatepeer
147
147
148
148
149 @contextlib.contextmanager
149 @contextlib.contextmanager
150 def annotatepeer(repo):
150 def annotatepeer(repo):
151 ui = repo.ui
151 ui = repo.ui
152
152
153 remotedest = ui.config(b'fastannotate', b'remotepath', b'default')
153 remotedest = ui.config(b'fastannotate', b'remotepath', b'default')
154 r = urlutil.get_unique_pull_path(b'fastannotate', repo, ui, remotedest)
154 remotepath = urlutil.get_unique_pull_path_obj(
155 remotepath = r[0]
155 b'fastannotate',
156 ui,
157 remotedest,
158 )
156 peer = hg.peer(ui, {}, remotepath)
159 peer = hg.peer(ui, {}, remotepath)
157
160
158 try:
161 try:
159 yield peer
162 yield peer
160 finally:
163 finally:
161 peer.close()
164 peer.close()
162
165
163
166
164 def clientfetch(repo, paths, lastnodemap=None, peer=None):
167 def clientfetch(repo, paths, lastnodemap=None, peer=None):
165 """download annotate cache from the server for paths"""
168 """download annotate cache from the server for paths"""
166 if not paths:
169 if not paths:
167 return
170 return
168
171
169 if peer is None:
172 if peer is None:
170 with annotatepeer(repo) as peer:
173 with annotatepeer(repo) as peer:
171 return clientfetch(repo, paths, lastnodemap, peer)
174 return clientfetch(repo, paths, lastnodemap, peer)
172
175
173 if lastnodemap is None:
176 if lastnodemap is None:
174 lastnodemap = {}
177 lastnodemap = {}
175
178
176 ui = repo.ui
179 ui = repo.ui
177 results = []
180 results = []
178 with peer.commandexecutor() as batcher:
181 with peer.commandexecutor() as batcher:
179 ui.debug(b'fastannotate: requesting %d files\n' % len(paths))
182 ui.debug(b'fastannotate: requesting %d files\n' % len(paths))
180 for p in paths:
183 for p in paths:
181 results.append(
184 results.append(
182 batcher.callcommand(
185 batcher.callcommand(
183 b'getannotate',
186 b'getannotate',
184 {b'path': p, b'lastnode': lastnodemap.get(p)},
187 {b'path': p, b'lastnode': lastnodemap.get(p)},
185 )
188 )
186 )
189 )
187
190
188 for result in results:
191 for result in results:
189 r = result.result()
192 r = result.result()
190 # TODO: pconvert these paths on the server?
193 # TODO: pconvert these paths on the server?
191 r = {util.pconvert(p): v for p, v in r.items()}
194 r = {util.pconvert(p): v for p, v in r.items()}
192 for path in sorted(r):
195 for path in sorted(r):
193 # ignore malicious paths
196 # ignore malicious paths
194 if not path.startswith(b'fastannotate/') or b'/../' in (
197 if not path.startswith(b'fastannotate/') or b'/../' in (
195 path + b'/'
198 path + b'/'
196 ):
199 ):
197 ui.debug(
200 ui.debug(
198 b'fastannotate: ignored malicious path %s\n' % path
201 b'fastannotate: ignored malicious path %s\n' % path
199 )
202 )
200 continue
203 continue
201 content = r[path]
204 content = r[path]
202 if ui.debugflag:
205 if ui.debugflag:
203 ui.debug(
206 ui.debug(
204 b'fastannotate: writing %d bytes to %s\n'
207 b'fastannotate: writing %d bytes to %s\n'
205 % (len(content), path)
208 % (len(content), path)
206 )
209 )
207 repo.vfs.makedirs(os.path.dirname(path))
210 repo.vfs.makedirs(os.path.dirname(path))
208 with repo.vfs(path, b'wb') as f:
211 with repo.vfs(path, b'wb') as f:
209 f.write(content)
212 f.write(content)
210
213
211
214
212 def _filterfetchpaths(repo, paths):
215 def _filterfetchpaths(repo, paths):
213 """return a subset of paths whose history is long and need to fetch linelog
216 """return a subset of paths whose history is long and need to fetch linelog
214 from the server. works with remotefilelog and non-remotefilelog repos.
217 from the server. works with remotefilelog and non-remotefilelog repos.
215 """
218 """
216 threshold = repo.ui.configint(b'fastannotate', b'clientfetchthreshold', 10)
219 threshold = repo.ui.configint(b'fastannotate', b'clientfetchthreshold', 10)
217 if threshold <= 0:
220 if threshold <= 0:
218 return paths
221 return paths
219
222
220 result = []
223 result = []
221 for path in paths:
224 for path in paths:
222 try:
225 try:
223 if len(repo.file(path)) >= threshold:
226 if len(repo.file(path)) >= threshold:
224 result.append(path)
227 result.append(path)
225 except Exception: # file not found etc.
228 except Exception: # file not found etc.
226 result.append(path)
229 result.append(path)
227
230
228 return result
231 return result
229
232
230
233
231 def localreposetup(ui, repo):
234 def localreposetup(ui, repo):
232 class fastannotaterepo(repo.__class__):
235 class fastannotaterepo(repo.__class__):
233 def prefetchfastannotate(self, paths, peer=None):
236 def prefetchfastannotate(self, paths, peer=None):
234 master = _getmaster(self.ui)
237 master = _getmaster(self.ui)
235 needupdatepaths = []
238 needupdatepaths = []
236 lastnodemap = {}
239 lastnodemap = {}
237 try:
240 try:
238 for path in _filterfetchpaths(self, paths):
241 for path in _filterfetchpaths(self, paths):
239 with context.annotatecontext(self, path) as actx:
242 with context.annotatecontext(self, path) as actx:
240 if not actx.isuptodate(master, strict=False):
243 if not actx.isuptodate(master, strict=False):
241 needupdatepaths.append(path)
244 needupdatepaths.append(path)
242 lastnodemap[path] = actx.lastnode
245 lastnodemap[path] = actx.lastnode
243 if needupdatepaths:
246 if needupdatepaths:
244 clientfetch(self, needupdatepaths, lastnodemap, peer)
247 clientfetch(self, needupdatepaths, lastnodemap, peer)
245 except Exception as ex:
248 except Exception as ex:
246 # could be directory not writable or so, not fatal
249 # could be directory not writable or so, not fatal
247 self.ui.debug(b'fastannotate: prefetch failed: %r\n' % ex)
250 self.ui.debug(b'fastannotate: prefetch failed: %r\n' % ex)
248
251
249 repo.__class__ = fastannotaterepo
252 repo.__class__ = fastannotaterepo
250
253
251
254
252 def clientreposetup(ui, repo):
255 def clientreposetup(ui, repo):
253 _registerwireprotocommand()
256 _registerwireprotocommand()
254 if repo.local():
257 if repo.local():
255 localreposetup(ui, repo)
258 localreposetup(ui, repo)
256 # TODO: this mutates global state, but only if at least one repo
259 # TODO: this mutates global state, but only if at least one repo
257 # has the extension enabled. This is probably bad for hgweb.
260 # has the extension enabled. This is probably bad for hgweb.
258 if peersetup not in hg.wirepeersetupfuncs:
261 if peersetup not in hg.wirepeersetupfuncs:
259 hg.wirepeersetupfuncs.append(peersetup)
262 hg.wirepeersetupfuncs.append(peersetup)
@@ -1,198 +1,198
1 # fetch.py - pull and merge remote changes
1 # fetch.py - pull and merge remote changes
2 #
2 #
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''pull, update and merge in one command (DEPRECATED)'''
8 '''pull, update and merge in one command (DEPRECATED)'''
9
9
10
10
11 from mercurial.i18n import _
11 from mercurial.i18n import _
12 from mercurial.node import short
12 from mercurial.node import short
13 from mercurial import (
13 from mercurial import (
14 cmdutil,
14 cmdutil,
15 error,
15 error,
16 exchange,
16 exchange,
17 hg,
17 hg,
18 lock,
18 lock,
19 pycompat,
19 pycompat,
20 registrar,
20 registrar,
21 )
21 )
22 from mercurial.utils import (
22 from mercurial.utils import (
23 dateutil,
23 dateutil,
24 urlutil,
24 urlutil,
25 )
25 )
26
26
27 release = lock.release
27 release = lock.release
28 cmdtable = {}
28 cmdtable = {}
29 command = registrar.command(cmdtable)
29 command = registrar.command(cmdtable)
30 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
30 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
31 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
31 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
32 # be specifying the version(s) of Mercurial they are tested with, or
32 # be specifying the version(s) of Mercurial they are tested with, or
33 # leave the attribute unspecified.
33 # leave the attribute unspecified.
34 testedwith = b'ships-with-hg-core'
34 testedwith = b'ships-with-hg-core'
35
35
36
36
37 @command(
37 @command(
38 b'fetch',
38 b'fetch',
39 [
39 [
40 (
40 (
41 b'r',
41 b'r',
42 b'rev',
42 b'rev',
43 [],
43 [],
44 _(b'a specific revision you would like to pull'),
44 _(b'a specific revision you would like to pull'),
45 _(b'REV'),
45 _(b'REV'),
46 ),
46 ),
47 (b'', b'edit', None, _(b'invoke editor on commit messages')),
47 (b'', b'edit', None, _(b'invoke editor on commit messages')),
48 (b'', b'force-editor', None, _(b'edit commit message (DEPRECATED)')),
48 (b'', b'force-editor', None, _(b'edit commit message (DEPRECATED)')),
49 (b'', b'switch-parent', None, _(b'switch parents when merging')),
49 (b'', b'switch-parent', None, _(b'switch parents when merging')),
50 ]
50 ]
51 + cmdutil.commitopts
51 + cmdutil.commitopts
52 + cmdutil.commitopts2
52 + cmdutil.commitopts2
53 + cmdutil.remoteopts,
53 + cmdutil.remoteopts,
54 _(b'hg fetch [SOURCE]'),
54 _(b'hg fetch [SOURCE]'),
55 helpcategory=command.CATEGORY_REMOTE_REPO_MANAGEMENT,
55 helpcategory=command.CATEGORY_REMOTE_REPO_MANAGEMENT,
56 )
56 )
57 def fetch(ui, repo, source=b'default', **opts):
57 def fetch(ui, repo, source=b'default', **opts):
58 """pull changes from a remote repository, merge new changes if needed.
58 """pull changes from a remote repository, merge new changes if needed.
59
59
60 This finds all changes from the repository at the specified path
60 This finds all changes from the repository at the specified path
61 or URL and adds them to the local repository.
61 or URL and adds them to the local repository.
62
62
63 If the pulled changes add a new branch head, the head is
63 If the pulled changes add a new branch head, the head is
64 automatically merged, and the result of the merge is committed.
64 automatically merged, and the result of the merge is committed.
65 Otherwise, the working directory is updated to include the new
65 Otherwise, the working directory is updated to include the new
66 changes.
66 changes.
67
67
68 When a merge is needed, the working directory is first updated to
68 When a merge is needed, the working directory is first updated to
69 the newly pulled changes. Local changes are then merged into the
69 the newly pulled changes. Local changes are then merged into the
70 pulled changes. To switch the merge order, use --switch-parent.
70 pulled changes. To switch the merge order, use --switch-parent.
71
71
72 See :hg:`help dates` for a list of formats valid for -d/--date.
72 See :hg:`help dates` for a list of formats valid for -d/--date.
73
73
74 Returns 0 on success.
74 Returns 0 on success.
75 """
75 """
76
76
77 opts = pycompat.byteskwargs(opts)
77 opts = pycompat.byteskwargs(opts)
78 date = opts.get(b'date')
78 date = opts.get(b'date')
79 if date:
79 if date:
80 opts[b'date'] = dateutil.parsedate(date)
80 opts[b'date'] = dateutil.parsedate(date)
81
81
82 parent = repo.dirstate.p1()
82 parent = repo.dirstate.p1()
83 branch = repo.dirstate.branch()
83 branch = repo.dirstate.branch()
84 try:
84 try:
85 branchnode = repo.branchtip(branch)
85 branchnode = repo.branchtip(branch)
86 except error.RepoLookupError:
86 except error.RepoLookupError:
87 branchnode = None
87 branchnode = None
88 if parent != branchnode:
88 if parent != branchnode:
89 raise error.Abort(
89 raise error.Abort(
90 _(b'working directory not at branch tip'),
90 _(b'working directory not at branch tip'),
91 hint=_(b"use 'hg update' to check out branch tip"),
91 hint=_(b"use 'hg update' to check out branch tip"),
92 )
92 )
93
93
94 wlock = lock = None
94 wlock = lock = None
95 try:
95 try:
96 wlock = repo.wlock()
96 wlock = repo.wlock()
97 lock = repo.lock()
97 lock = repo.lock()
98
98
99 cmdutil.bailifchanged(repo)
99 cmdutil.bailifchanged(repo)
100
100
101 bheads = repo.branchheads(branch)
101 bheads = repo.branchheads(branch)
102 bheads = [head for head in bheads if len(repo[head].children()) == 0]
102 bheads = [head for head in bheads if len(repo[head].children()) == 0]
103 if len(bheads) > 1:
103 if len(bheads) > 1:
104 raise error.Abort(
104 raise error.Abort(
105 _(
105 _(
106 b'multiple heads in this branch '
106 b'multiple heads in this branch '
107 b'(use "hg heads ." and "hg merge" to merge)'
107 b'(use "hg heads ." and "hg merge" to merge)'
108 )
108 )
109 )
109 )
110
110
111 path = urlutil.get_unique_pull_path(b'fetch', repo, ui, source)[0]
111 path = urlutil.get_unique_pull_path_obj(b'fetch', ui, source)
112 other = hg.peer(repo, opts, path)
112 other = hg.peer(repo, opts, path)
113 ui.status(_(b'pulling from %s\n') % urlutil.hidepassword(path))
113 ui.status(_(b'pulling from %s\n') % urlutil.hidepassword(path.loc))
114 revs = None
114 revs = None
115 if opts[b'rev']:
115 if opts[b'rev']:
116 try:
116 try:
117 revs = [other.lookup(rev) for rev in opts[b'rev']]
117 revs = [other.lookup(rev) for rev in opts[b'rev']]
118 except error.CapabilityError:
118 except error.CapabilityError:
119 err = _(
119 err = _(
120 b"other repository doesn't support revision lookup, "
120 b"other repository doesn't support revision lookup, "
121 b"so a rev cannot be specified."
121 b"so a rev cannot be specified."
122 )
122 )
123 raise error.Abort(err)
123 raise error.Abort(err)
124
124
125 # Are there any changes at all?
125 # Are there any changes at all?
126 modheads = exchange.pull(repo, other, heads=revs).cgresult
126 modheads = exchange.pull(repo, other, heads=revs).cgresult
127 if modheads == 0:
127 if modheads == 0:
128 return 0
128 return 0
129
129
130 # Is this a simple fast-forward along the current branch?
130 # Is this a simple fast-forward along the current branch?
131 newheads = repo.branchheads(branch)
131 newheads = repo.branchheads(branch)
132 newchildren = repo.changelog.nodesbetween([parent], newheads)[2]
132 newchildren = repo.changelog.nodesbetween([parent], newheads)[2]
133 if len(newheads) == 1 and len(newchildren):
133 if len(newheads) == 1 and len(newchildren):
134 if newchildren[0] != parent:
134 if newchildren[0] != parent:
135 return hg.update(repo, newchildren[0])
135 return hg.update(repo, newchildren[0])
136 else:
136 else:
137 return 0
137 return 0
138
138
139 # Are there more than one additional branch heads?
139 # Are there more than one additional branch heads?
140 newchildren = [n for n in newchildren if n != parent]
140 newchildren = [n for n in newchildren if n != parent]
141 newparent = parent
141 newparent = parent
142 if newchildren:
142 if newchildren:
143 newparent = newchildren[0]
143 newparent = newchildren[0]
144 hg.clean(repo, newparent)
144 hg.clean(repo, newparent)
145 newheads = [n for n in newheads if n != newparent]
145 newheads = [n for n in newheads if n != newparent]
146 if len(newheads) > 1:
146 if len(newheads) > 1:
147 ui.status(
147 ui.status(
148 _(
148 _(
149 b'not merging with %d other new branch heads '
149 b'not merging with %d other new branch heads '
150 b'(use "hg heads ." and "hg merge" to merge them)\n'
150 b'(use "hg heads ." and "hg merge" to merge them)\n'
151 )
151 )
152 % (len(newheads) - 1)
152 % (len(newheads) - 1)
153 )
153 )
154 return 1
154 return 1
155
155
156 if not newheads:
156 if not newheads:
157 return 0
157 return 0
158
158
159 # Otherwise, let's merge.
159 # Otherwise, let's merge.
160 err = False
160 err = False
161 if newheads:
161 if newheads:
162 # By default, we consider the repository we're pulling
162 # By default, we consider the repository we're pulling
163 # *from* as authoritative, so we merge our changes into
163 # *from* as authoritative, so we merge our changes into
164 # theirs.
164 # theirs.
165 if opts[b'switch_parent']:
165 if opts[b'switch_parent']:
166 firstparent, secondparent = newparent, newheads[0]
166 firstparent, secondparent = newparent, newheads[0]
167 else:
167 else:
168 firstparent, secondparent = newheads[0], newparent
168 firstparent, secondparent = newheads[0], newparent
169 ui.status(
169 ui.status(
170 _(b'updating to %d:%s\n')
170 _(b'updating to %d:%s\n')
171 % (repo.changelog.rev(firstparent), short(firstparent))
171 % (repo.changelog.rev(firstparent), short(firstparent))
172 )
172 )
173 hg.clean(repo, firstparent)
173 hg.clean(repo, firstparent)
174 p2ctx = repo[secondparent]
174 p2ctx = repo[secondparent]
175 ui.status(
175 ui.status(
176 _(b'merging with %d:%s\n') % (p2ctx.rev(), short(secondparent))
176 _(b'merging with %d:%s\n') % (p2ctx.rev(), short(secondparent))
177 )
177 )
178 err = hg.merge(p2ctx, remind=False)
178 err = hg.merge(p2ctx, remind=False)
179
179
180 if not err:
180 if not err:
181 # we don't translate commit messages
181 # we don't translate commit messages
182 message = cmdutil.logmessage(ui, opts) or (
182 message = cmdutil.logmessage(ui, opts) or (
183 b'Automated merge with %s' % urlutil.removeauth(other.url())
183 b'Automated merge with %s' % urlutil.removeauth(other.url())
184 )
184 )
185 editopt = opts.get(b'edit') or opts.get(b'force_editor')
185 editopt = opts.get(b'edit') or opts.get(b'force_editor')
186 editor = cmdutil.getcommiteditor(edit=editopt, editform=b'fetch')
186 editor = cmdutil.getcommiteditor(edit=editopt, editform=b'fetch')
187 n = repo.commit(
187 n = repo.commit(
188 message, opts[b'user'], opts[b'date'], editor=editor
188 message, opts[b'user'], opts[b'date'], editor=editor
189 )
189 )
190 ui.status(
190 ui.status(
191 _(b'new changeset %d:%s merges remote changes with local\n')
191 _(b'new changeset %d:%s merges remote changes with local\n')
192 % (repo.changelog.rev(n), short(n))
192 % (repo.changelog.rev(n), short(n))
193 )
193 )
194
194
195 return err
195 return err
196
196
197 finally:
197 finally:
198 release(lock, wlock)
198 release(lock, wlock)
@@ -1,958 +1,958
1 # fix - rewrite file content in changesets and working copy
1 # fix - rewrite file content in changesets and working copy
2 #
2 #
3 # Copyright 2018 Google LLC.
3 # Copyright 2018 Google LLC.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """rewrite file content in changesets or working copy (EXPERIMENTAL)
7 """rewrite file content in changesets or working copy (EXPERIMENTAL)
8
8
9 Provides a command that runs configured tools on the contents of modified files,
9 Provides a command that runs configured tools on the contents of modified files,
10 writing back any fixes to the working copy or replacing changesets.
10 writing back any fixes to the working copy or replacing changesets.
11
11
12 Here is an example configuration that causes :hg:`fix` to apply automatic
12 Here is an example configuration that causes :hg:`fix` to apply automatic
13 formatting fixes to modified lines in C++ code::
13 formatting fixes to modified lines in C++ code::
14
14
15 [fix]
15 [fix]
16 clang-format:command=clang-format --assume-filename={rootpath}
16 clang-format:command=clang-format --assume-filename={rootpath}
17 clang-format:linerange=--lines={first}:{last}
17 clang-format:linerange=--lines={first}:{last}
18 clang-format:pattern=set:**.cpp or **.hpp
18 clang-format:pattern=set:**.cpp or **.hpp
19
19
20 The :command suboption forms the first part of the shell command that will be
20 The :command suboption forms the first part of the shell command that will be
21 used to fix a file. The content of the file is passed on standard input, and the
21 used to fix a file. The content of the file is passed on standard input, and the
22 fixed file content is expected on standard output. Any output on standard error
22 fixed file content is expected on standard output. Any output on standard error
23 will be displayed as a warning. If the exit status is not zero, the file will
23 will be displayed as a warning. If the exit status is not zero, the file will
24 not be affected. A placeholder warning is displayed if there is a non-zero exit
24 not be affected. A placeholder warning is displayed if there is a non-zero exit
25 status but no standard error output. Some values may be substituted into the
25 status but no standard error output. Some values may be substituted into the
26 command::
26 command::
27
27
28 {rootpath} The path of the file being fixed, relative to the repo root
28 {rootpath} The path of the file being fixed, relative to the repo root
29 {basename} The name of the file being fixed, without the directory path
29 {basename} The name of the file being fixed, without the directory path
30
30
31 If the :linerange suboption is set, the tool will only be run if there are
31 If the :linerange suboption is set, the tool will only be run if there are
32 changed lines in a file. The value of this suboption is appended to the shell
32 changed lines in a file. The value of this suboption is appended to the shell
33 command once for every range of changed lines in the file. Some values may be
33 command once for every range of changed lines in the file. Some values may be
34 substituted into the command::
34 substituted into the command::
35
35
36 {first} The 1-based line number of the first line in the modified range
36 {first} The 1-based line number of the first line in the modified range
37 {last} The 1-based line number of the last line in the modified range
37 {last} The 1-based line number of the last line in the modified range
38
38
39 Deleted sections of a file will be ignored by :linerange, because there is no
39 Deleted sections of a file will be ignored by :linerange, because there is no
40 corresponding line range in the version being fixed.
40 corresponding line range in the version being fixed.
41
41
42 By default, tools that set :linerange will only be executed if there is at least
42 By default, tools that set :linerange will only be executed if there is at least
43 one changed line range. This is meant to prevent accidents like running a code
43 one changed line range. This is meant to prevent accidents like running a code
44 formatter in such a way that it unexpectedly reformats the whole file. If such a
44 formatter in such a way that it unexpectedly reformats the whole file. If such a
45 tool needs to operate on unchanged files, it should set the :skipclean suboption
45 tool needs to operate on unchanged files, it should set the :skipclean suboption
46 to false.
46 to false.
47
47
48 The :pattern suboption determines which files will be passed through each
48 The :pattern suboption determines which files will be passed through each
49 configured tool. See :hg:`help patterns` for possible values. However, all
49 configured tool. See :hg:`help patterns` for possible values. However, all
50 patterns are relative to the repo root, even if that text says they are relative
50 patterns are relative to the repo root, even if that text says they are relative
51 to the current working directory. If there are file arguments to :hg:`fix`, the
51 to the current working directory. If there are file arguments to :hg:`fix`, the
52 intersection of these patterns is used.
52 intersection of these patterns is used.
53
53
54 There is also a configurable limit for the maximum size of file that will be
54 There is also a configurable limit for the maximum size of file that will be
55 processed by :hg:`fix`::
55 processed by :hg:`fix`::
56
56
57 [fix]
57 [fix]
58 maxfilesize = 2MB
58 maxfilesize = 2MB
59
59
60 Normally, execution of configured tools will continue after a failure (indicated
60 Normally, execution of configured tools will continue after a failure (indicated
61 by a non-zero exit status). It can also be configured to abort after the first
61 by a non-zero exit status). It can also be configured to abort after the first
62 such failure, so that no files will be affected if any tool fails. This abort
62 such failure, so that no files will be affected if any tool fails. This abort
63 will also cause :hg:`fix` to exit with a non-zero status::
63 will also cause :hg:`fix` to exit with a non-zero status::
64
64
65 [fix]
65 [fix]
66 failure = abort
66 failure = abort
67
67
68 When multiple tools are configured to affect a file, they execute in an order
68 When multiple tools are configured to affect a file, they execute in an order
69 defined by the :priority suboption. The priority suboption has a default value
69 defined by the :priority suboption. The priority suboption has a default value
70 of zero for each tool. Tools are executed in order of descending priority. The
70 of zero for each tool. Tools are executed in order of descending priority. The
71 execution order of tools with equal priority is unspecified. For example, you
71 execution order of tools with equal priority is unspecified. For example, you
72 could use the 'sort' and 'head' utilities to keep only the 10 smallest numbers
72 could use the 'sort' and 'head' utilities to keep only the 10 smallest numbers
73 in a text file by ensuring that 'sort' runs before 'head'::
73 in a text file by ensuring that 'sort' runs before 'head'::
74
74
75 [fix]
75 [fix]
76 sort:command = sort -n
76 sort:command = sort -n
77 head:command = head -n 10
77 head:command = head -n 10
78 sort:pattern = numbers.txt
78 sort:pattern = numbers.txt
79 head:pattern = numbers.txt
79 head:pattern = numbers.txt
80 sort:priority = 2
80 sort:priority = 2
81 head:priority = 1
81 head:priority = 1
82
82
83 To account for changes made by each tool, the line numbers used for incremental
83 To account for changes made by each tool, the line numbers used for incremental
84 formatting are recomputed before executing the next tool. So, each tool may see
84 formatting are recomputed before executing the next tool. So, each tool may see
85 different values for the arguments added by the :linerange suboption.
85 different values for the arguments added by the :linerange suboption.
86
86
87 Each fixer tool is allowed to return some metadata in addition to the fixed file
87 Each fixer tool is allowed to return some metadata in addition to the fixed file
88 content. The metadata must be placed before the file content on stdout,
88 content. The metadata must be placed before the file content on stdout,
89 separated from the file content by a zero byte. The metadata is parsed as a JSON
89 separated from the file content by a zero byte. The metadata is parsed as a JSON
90 value (so, it should be UTF-8 encoded and contain no zero bytes). A fixer tool
90 value (so, it should be UTF-8 encoded and contain no zero bytes). A fixer tool
91 is expected to produce this metadata encoding if and only if the :metadata
91 is expected to produce this metadata encoding if and only if the :metadata
92 suboption is true::
92 suboption is true::
93
93
94 [fix]
94 [fix]
95 tool:command = tool --prepend-json-metadata
95 tool:command = tool --prepend-json-metadata
96 tool:metadata = true
96 tool:metadata = true
97
97
98 The metadata values are passed to hooks, which can be used to print summaries or
98 The metadata values are passed to hooks, which can be used to print summaries or
99 perform other post-fixing work. The supported hooks are::
99 perform other post-fixing work. The supported hooks are::
100
100
101 "postfixfile"
101 "postfixfile"
102 Run once for each file in each revision where any fixer tools made changes
102 Run once for each file in each revision where any fixer tools made changes
103 to the file content. Provides "$HG_REV" and "$HG_PATH" to identify the file,
103 to the file content. Provides "$HG_REV" and "$HG_PATH" to identify the file,
104 and "$HG_METADATA" with a map of fixer names to metadata values from fixer
104 and "$HG_METADATA" with a map of fixer names to metadata values from fixer
105 tools that affected the file. Fixer tools that didn't affect the file have a
105 tools that affected the file. Fixer tools that didn't affect the file have a
106 value of None. Only fixer tools that executed are present in the metadata.
106 value of None. Only fixer tools that executed are present in the metadata.
107
107
108 "postfix"
108 "postfix"
109 Run once after all files and revisions have been handled. Provides
109 Run once after all files and revisions have been handled. Provides
110 "$HG_REPLACEMENTS" with information about what revisions were created and
110 "$HG_REPLACEMENTS" with information about what revisions were created and
111 made obsolete. Provides a boolean "$HG_WDIRWRITTEN" to indicate whether any
111 made obsolete. Provides a boolean "$HG_WDIRWRITTEN" to indicate whether any
112 files in the working copy were updated. Provides a list "$HG_METADATA"
112 files in the working copy were updated. Provides a list "$HG_METADATA"
113 mapping fixer tool names to lists of metadata values returned from
113 mapping fixer tool names to lists of metadata values returned from
114 executions that modified a file. This aggregates the same metadata
114 executions that modified a file. This aggregates the same metadata
115 previously passed to the "postfixfile" hook.
115 previously passed to the "postfixfile" hook.
116
116
117 Fixer tools are run in the repository's root directory. This allows them to read
117 Fixer tools are run in the repository's root directory. This allows them to read
118 configuration files from the working copy, or even write to the working copy.
118 configuration files from the working copy, or even write to the working copy.
119 The working copy is not updated to match the revision being fixed. In fact,
119 The working copy is not updated to match the revision being fixed. In fact,
120 several revisions may be fixed in parallel. Writes to the working copy are not
120 several revisions may be fixed in parallel. Writes to the working copy are not
121 amended into the revision being fixed; fixer tools should always write fixed
121 amended into the revision being fixed; fixer tools should always write fixed
122 file content back to stdout as documented above.
122 file content back to stdout as documented above.
123 """
123 """
124
124
125
125
126 import collections
126 import collections
127 import itertools
127 import itertools
128 import os
128 import os
129 import re
129 import re
130 import subprocess
130 import subprocess
131
131
132 from mercurial.i18n import _
132 from mercurial.i18n import _
133 from mercurial.node import (
133 from mercurial.node import (
134 nullid,
134 nullid,
135 nullrev,
135 nullrev,
136 wdirrev,
136 wdirrev,
137 )
137 )
138
138
139 from mercurial.utils import procutil
139 from mercurial.utils import procutil
140
140
141 from mercurial import (
141 from mercurial import (
142 cmdutil,
142 cmdutil,
143 context,
143 context,
144 copies,
144 copies,
145 error,
145 error,
146 logcmdutil,
146 logcmdutil,
147 match as matchmod,
147 match as matchmod,
148 mdiff,
148 mdiff,
149 merge,
149 merge,
150 mergestate as mergestatemod,
150 mergestate as mergestatemod,
151 pycompat,
151 pycompat,
152 registrar,
152 registrar,
153 rewriteutil,
153 rewriteutil,
154 scmutil,
154 scmutil,
155 util,
155 util,
156 worker,
156 worker,
157 )
157 )
158
158
159 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
159 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
160 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
160 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
161 # be specifying the version(s) of Mercurial they are tested with, or
161 # be specifying the version(s) of Mercurial they are tested with, or
162 # leave the attribute unspecified.
162 # leave the attribute unspecified.
163 testedwith = b'ships-with-hg-core'
163 testedwith = b'ships-with-hg-core'
164
164
165 cmdtable = {}
165 cmdtable = {}
166 command = registrar.command(cmdtable)
166 command = registrar.command(cmdtable)
167
167
168 configtable = {}
168 configtable = {}
169 configitem = registrar.configitem(configtable)
169 configitem = registrar.configitem(configtable)
170
170
171 # Register the suboptions allowed for each configured fixer, and default values.
171 # Register the suboptions allowed for each configured fixer, and default values.
172 FIXER_ATTRS = {
172 FIXER_ATTRS = {
173 b'command': None,
173 b'command': None,
174 b'linerange': None,
174 b'linerange': None,
175 b'pattern': None,
175 b'pattern': None,
176 b'priority': 0,
176 b'priority': 0,
177 b'metadata': False,
177 b'metadata': False,
178 b'skipclean': True,
178 b'skipclean': True,
179 b'enabled': True,
179 b'enabled': True,
180 }
180 }
181
181
182 for key, default in FIXER_ATTRS.items():
182 for key, default in FIXER_ATTRS.items():
183 configitem(b'fix', b'.*:%s$' % key, default=default, generic=True)
183 configitem(b'fix', b'.*:%s$' % key, default=default, generic=True)
184
184
185 # A good default size allows most source code files to be fixed, but avoids
185 # A good default size allows most source code files to be fixed, but avoids
186 # letting fixer tools choke on huge inputs, which could be surprising to the
186 # letting fixer tools choke on huge inputs, which could be surprising to the
187 # user.
187 # user.
188 configitem(b'fix', b'maxfilesize', default=b'2MB')
188 configitem(b'fix', b'maxfilesize', default=b'2MB')
189
189
190 # Allow fix commands to exit non-zero if an executed fixer tool exits non-zero.
190 # Allow fix commands to exit non-zero if an executed fixer tool exits non-zero.
191 # This helps users do shell scripts that stop when a fixer tool signals a
191 # This helps users do shell scripts that stop when a fixer tool signals a
192 # problem.
192 # problem.
193 configitem(b'fix', b'failure', default=b'continue')
193 configitem(b'fix', b'failure', default=b'continue')
194
194
195
195
196 def checktoolfailureaction(ui, message, hint=None):
196 def checktoolfailureaction(ui, message, hint=None):
197 """Abort with 'message' if fix.failure=abort"""
197 """Abort with 'message' if fix.failure=abort"""
198 action = ui.config(b'fix', b'failure')
198 action = ui.config(b'fix', b'failure')
199 if action not in (b'continue', b'abort'):
199 if action not in (b'continue', b'abort'):
200 raise error.Abort(
200 raise error.Abort(
201 _(b'unknown fix.failure action: %s') % (action,),
201 _(b'unknown fix.failure action: %s') % (action,),
202 hint=_(b'use "continue" or "abort"'),
202 hint=_(b'use "continue" or "abort"'),
203 )
203 )
204 if action == b'abort':
204 if action == b'abort':
205 raise error.Abort(message, hint=hint)
205 raise error.Abort(message, hint=hint)
206
206
207
207
208 allopt = (b'', b'all', False, _(b'fix all non-public non-obsolete revisions'))
208 allopt = (b'', b'all', False, _(b'fix all non-public non-obsolete revisions'))
209 baseopt = (
209 baseopt = (
210 b'',
210 b'',
211 b'base',
211 b'base',
212 [],
212 [],
213 _(
213 _(
214 b'revisions to diff against (overrides automatic '
214 b'revisions to diff against (overrides automatic '
215 b'selection, and applies to every revision being '
215 b'selection, and applies to every revision being '
216 b'fixed)'
216 b'fixed)'
217 ),
217 ),
218 _(b'REV'),
218 _(b'REV'),
219 )
219 )
220 revopt = (b'r', b'rev', [], _(b'revisions to fix (ADVANCED)'), _(b'REV'))
220 revopt = (b'r', b'rev', [], _(b'revisions to fix (ADVANCED)'), _(b'REV'))
221 sourceopt = (
221 sourceopt = (
222 b's',
222 b's',
223 b'source',
223 b'source',
224 [],
224 [],
225 _(b'fix the specified revisions and their descendants'),
225 _(b'fix the specified revisions and their descendants'),
226 _(b'REV'),
226 _(b'REV'),
227 )
227 )
228 wdiropt = (b'w', b'working-dir', False, _(b'fix the working directory'))
228 wdiropt = (b'w', b'working-dir', False, _(b'fix the working directory'))
229 wholeopt = (b'', b'whole', False, _(b'always fix every line of a file'))
229 wholeopt = (b'', b'whole', False, _(b'always fix every line of a file'))
230 usage = _(b'[OPTION]... [FILE]...')
230 usage = _(b'[OPTION]... [FILE]...')
231
231
232
232
233 @command(
233 @command(
234 b'fix',
234 b'fix',
235 [allopt, baseopt, revopt, sourceopt, wdiropt, wholeopt],
235 [allopt, baseopt, revopt, sourceopt, wdiropt, wholeopt],
236 usage,
236 usage,
237 helpcategory=command.CATEGORY_FILE_CONTENTS,
237 helpcategory=command.CATEGORY_FILE_CONTENTS,
238 )
238 )
239 def fix(ui, repo, *pats, **opts):
239 def fix(ui, repo, *pats, **opts):
240 """rewrite file content in changesets or working directory
240 """rewrite file content in changesets or working directory
241
241
242 Runs any configured tools to fix the content of files. Only affects files
242 Runs any configured tools to fix the content of files. Only affects files
243 with changes, unless file arguments are provided. Only affects changed lines
243 with changes, unless file arguments are provided. Only affects changed lines
244 of files, unless the --whole flag is used. Some tools may always affect the
244 of files, unless the --whole flag is used. Some tools may always affect the
245 whole file regardless of --whole.
245 whole file regardless of --whole.
246
246
247 If --working-dir is used, files with uncommitted changes in the working copy
247 If --working-dir is used, files with uncommitted changes in the working copy
248 will be fixed. Note that no backup are made.
248 will be fixed. Note that no backup are made.
249
249
250 If revisions are specified with --source, those revisions and their
250 If revisions are specified with --source, those revisions and their
251 descendants will be checked, and they may be replaced with new revisions
251 descendants will be checked, and they may be replaced with new revisions
252 that have fixed file content. By automatically including the descendants,
252 that have fixed file content. By automatically including the descendants,
253 no merging, rebasing, or evolution will be required. If an ancestor of the
253 no merging, rebasing, or evolution will be required. If an ancestor of the
254 working copy is included, then the working copy itself will also be fixed,
254 working copy is included, then the working copy itself will also be fixed,
255 and the working copy will be updated to the fixed parent.
255 and the working copy will be updated to the fixed parent.
256
256
257 When determining what lines of each file to fix at each revision, the whole
257 When determining what lines of each file to fix at each revision, the whole
258 set of revisions being fixed is considered, so that fixes to earlier
258 set of revisions being fixed is considered, so that fixes to earlier
259 revisions are not forgotten in later ones. The --base flag can be used to
259 revisions are not forgotten in later ones. The --base flag can be used to
260 override this default behavior, though it is not usually desirable to do so.
260 override this default behavior, though it is not usually desirable to do so.
261 """
261 """
262 opts = pycompat.byteskwargs(opts)
262 opts = pycompat.byteskwargs(opts)
263 cmdutil.check_at_most_one_arg(opts, b'all', b'source', b'rev')
263 cmdutil.check_at_most_one_arg(opts, b'all', b'source', b'rev')
264 cmdutil.check_incompatible_arguments(
264 cmdutil.check_incompatible_arguments(
265 opts, b'working_dir', [b'all', b'source']
265 opts, b'working_dir', [b'all', b'source']
266 )
266 )
267
267
268 with repo.wlock(), repo.lock(), repo.transaction(b'fix'):
268 with repo.wlock(), repo.lock(), repo.transaction(b'fix'):
269 revstofix = getrevstofix(ui, repo, opts)
269 revstofix = getrevstofix(ui, repo, opts)
270 basectxs = getbasectxs(repo, opts, revstofix)
270 basectxs = getbasectxs(repo, opts, revstofix)
271 workqueue, numitems = getworkqueue(
271 workqueue, numitems = getworkqueue(
272 ui, repo, pats, opts, revstofix, basectxs
272 ui, repo, pats, opts, revstofix, basectxs
273 )
273 )
274 basepaths = getbasepaths(repo, opts, workqueue, basectxs)
274 basepaths = getbasepaths(repo, opts, workqueue, basectxs)
275 fixers = getfixers(ui)
275 fixers = getfixers(ui)
276
276
277 # Rather than letting each worker independently fetch the files
277 # Rather than letting each worker independently fetch the files
278 # (which also would add complications for shared/keepalive
278 # (which also would add complications for shared/keepalive
279 # connections), prefetch them all first.
279 # connections), prefetch them all first.
280 _prefetchfiles(repo, workqueue, basepaths)
280 _prefetchfiles(repo, workqueue, basepaths)
281
281
282 # There are no data dependencies between the workers fixing each file
282 # There are no data dependencies between the workers fixing each file
283 # revision, so we can use all available parallelism.
283 # revision, so we can use all available parallelism.
284 def getfixes(items):
284 def getfixes(items):
285 for srcrev, path, dstrevs in items:
285 for srcrev, path, dstrevs in items:
286 ctx = repo[srcrev]
286 ctx = repo[srcrev]
287 olddata = ctx[path].data()
287 olddata = ctx[path].data()
288 metadata, newdata = fixfile(
288 metadata, newdata = fixfile(
289 ui,
289 ui,
290 repo,
290 repo,
291 opts,
291 opts,
292 fixers,
292 fixers,
293 ctx,
293 ctx,
294 path,
294 path,
295 basepaths,
295 basepaths,
296 basectxs[srcrev],
296 basectxs[srcrev],
297 )
297 )
298 # We ungroup the work items now, because the code that consumes
298 # We ungroup the work items now, because the code that consumes
299 # these results has to handle each dstrev separately, and in
299 # these results has to handle each dstrev separately, and in
300 # topological order. Because these are handled in topological
300 # topological order. Because these are handled in topological
301 # order, it's important that we pass around references to
301 # order, it's important that we pass around references to
302 # "newdata" instead of copying it. Otherwise, we would be
302 # "newdata" instead of copying it. Otherwise, we would be
303 # keeping more copies of file content in memory at a time than
303 # keeping more copies of file content in memory at a time than
304 # if we hadn't bothered to group/deduplicate the work items.
304 # if we hadn't bothered to group/deduplicate the work items.
305 data = newdata if newdata != olddata else None
305 data = newdata if newdata != olddata else None
306 for dstrev in dstrevs:
306 for dstrev in dstrevs:
307 yield (dstrev, path, metadata, data)
307 yield (dstrev, path, metadata, data)
308
308
309 results = worker.worker(
309 results = worker.worker(
310 ui, 1.0, getfixes, tuple(), workqueue, threadsafe=False
310 ui, 1.0, getfixes, tuple(), workqueue, threadsafe=False
311 )
311 )
312
312
313 # We have to hold on to the data for each successor revision in memory
313 # We have to hold on to the data for each successor revision in memory
314 # until all its parents are committed. We ensure this by committing and
314 # until all its parents are committed. We ensure this by committing and
315 # freeing memory for the revisions in some topological order. This
315 # freeing memory for the revisions in some topological order. This
316 # leaves a little bit of memory efficiency on the table, but also makes
316 # leaves a little bit of memory efficiency on the table, but also makes
317 # the tests deterministic. It might also be considered a feature since
317 # the tests deterministic. It might also be considered a feature since
318 # it makes the results more easily reproducible.
318 # it makes the results more easily reproducible.
319 filedata = collections.defaultdict(dict)
319 filedata = collections.defaultdict(dict)
320 aggregatemetadata = collections.defaultdict(list)
320 aggregatemetadata = collections.defaultdict(list)
321 replacements = {}
321 replacements = {}
322 wdirwritten = False
322 wdirwritten = False
323 commitorder = sorted(revstofix, reverse=True)
323 commitorder = sorted(revstofix, reverse=True)
324 with ui.makeprogress(
324 with ui.makeprogress(
325 topic=_(b'fixing'), unit=_(b'files'), total=sum(numitems.values())
325 topic=_(b'fixing'), unit=_(b'files'), total=sum(numitems.values())
326 ) as progress:
326 ) as progress:
327 for rev, path, filerevmetadata, newdata in results:
327 for rev, path, filerevmetadata, newdata in results:
328 progress.increment(item=path)
328 progress.increment(item=path)
329 for fixername, fixermetadata in filerevmetadata.items():
329 for fixername, fixermetadata in filerevmetadata.items():
330 aggregatemetadata[fixername].append(fixermetadata)
330 aggregatemetadata[fixername].append(fixermetadata)
331 if newdata is not None:
331 if newdata is not None:
332 filedata[rev][path] = newdata
332 filedata[rev][path] = newdata
333 hookargs = {
333 hookargs = {
334 b'rev': rev,
334 b'rev': rev,
335 b'path': path,
335 b'path': path,
336 b'metadata': filerevmetadata,
336 b'metadata': filerevmetadata,
337 }
337 }
338 repo.hook(
338 repo.hook(
339 b'postfixfile',
339 b'postfixfile',
340 throw=False,
340 throw=False,
341 **pycompat.strkwargs(hookargs)
341 **pycompat.strkwargs(hookargs)
342 )
342 )
343 numitems[rev] -= 1
343 numitems[rev] -= 1
344 # Apply the fixes for this and any other revisions that are
344 # Apply the fixes for this and any other revisions that are
345 # ready and sitting at the front of the queue. Using a loop here
345 # ready and sitting at the front of the queue. Using a loop here
346 # prevents the queue from being blocked by the first revision to
346 # prevents the queue from being blocked by the first revision to
347 # be ready out of order.
347 # be ready out of order.
348 while commitorder and not numitems[commitorder[-1]]:
348 while commitorder and not numitems[commitorder[-1]]:
349 rev = commitorder.pop()
349 rev = commitorder.pop()
350 ctx = repo[rev]
350 ctx = repo[rev]
351 if rev == wdirrev:
351 if rev == wdirrev:
352 writeworkingdir(repo, ctx, filedata[rev], replacements)
352 writeworkingdir(repo, ctx, filedata[rev], replacements)
353 wdirwritten = bool(filedata[rev])
353 wdirwritten = bool(filedata[rev])
354 else:
354 else:
355 replacerev(ui, repo, ctx, filedata[rev], replacements)
355 replacerev(ui, repo, ctx, filedata[rev], replacements)
356 del filedata[rev]
356 del filedata[rev]
357
357
358 cleanup(repo, replacements, wdirwritten)
358 cleanup(repo, replacements, wdirwritten)
359 hookargs = {
359 hookargs = {
360 b'replacements': replacements,
360 b'replacements': replacements,
361 b'wdirwritten': wdirwritten,
361 b'wdirwritten': wdirwritten,
362 b'metadata': aggregatemetadata,
362 b'metadata': aggregatemetadata,
363 }
363 }
364 repo.hook(b'postfix', throw=True, **pycompat.strkwargs(hookargs))
364 repo.hook(b'postfix', throw=True, **pycompat.strkwargs(hookargs))
365
365
366
366
367 def cleanup(repo, replacements, wdirwritten):
367 def cleanup(repo, replacements, wdirwritten):
368 """Calls scmutil.cleanupnodes() with the given replacements.
368 """Calls scmutil.cleanupnodes() with the given replacements.
369
369
370 "replacements" is a dict from nodeid to nodeid, with one key and one value
370 "replacements" is a dict from nodeid to nodeid, with one key and one value
371 for every revision that was affected by fixing. This is slightly different
371 for every revision that was affected by fixing. This is slightly different
372 from cleanupnodes().
372 from cleanupnodes().
373
373
374 "wdirwritten" is a bool which tells whether the working copy was affected by
374 "wdirwritten" is a bool which tells whether the working copy was affected by
375 fixing, since it has no entry in "replacements".
375 fixing, since it has no entry in "replacements".
376
376
377 Useful as a hook point for extending "hg fix" with output summarizing the
377 Useful as a hook point for extending "hg fix" with output summarizing the
378 effects of the command, though we choose not to output anything here.
378 effects of the command, though we choose not to output anything here.
379 """
379 """
380 replacements = {prec: [succ] for prec, succ in replacements.items()}
380 replacements = {prec: [succ] for prec, succ in replacements.items()}
381 scmutil.cleanupnodes(repo, replacements, b'fix', fixphase=True)
381 scmutil.cleanupnodes(repo, replacements, b'fix', fixphase=True)
382
382
383
383
384 def getworkqueue(ui, repo, pats, opts, revstofix, basectxs):
384 def getworkqueue(ui, repo, pats, opts, revstofix, basectxs):
385 """Constructs a list of files to fix and which revisions each fix applies to
385 """Constructs a list of files to fix and which revisions each fix applies to
386
386
387 To avoid duplicating work, there is usually only one work item for each file
387 To avoid duplicating work, there is usually only one work item for each file
388 revision that might need to be fixed. There can be multiple work items per
388 revision that might need to be fixed. There can be multiple work items per
389 file revision if the same file needs to be fixed in multiple changesets with
389 file revision if the same file needs to be fixed in multiple changesets with
390 different baserevs. Each work item also contains a list of changesets where
390 different baserevs. Each work item also contains a list of changesets where
391 the file's data should be replaced with the fixed data. The work items for
391 the file's data should be replaced with the fixed data. The work items for
392 earlier changesets come earlier in the work queue, to improve pipelining by
392 earlier changesets come earlier in the work queue, to improve pipelining by
393 allowing the first changeset to be replaced while fixes are still being
393 allowing the first changeset to be replaced while fixes are still being
394 computed for later changesets.
394 computed for later changesets.
395
395
396 Also returned is a map from changesets to the count of work items that might
396 Also returned is a map from changesets to the count of work items that might
397 affect each changeset. This is used later to count when all of a changeset's
397 affect each changeset. This is used later to count when all of a changeset's
398 work items have been finished, without having to inspect the remaining work
398 work items have been finished, without having to inspect the remaining work
399 queue in each worker subprocess.
399 queue in each worker subprocess.
400
400
401 The example work item (1, "foo/bar.txt", (1, 2, 3)) means that the data of
401 The example work item (1, "foo/bar.txt", (1, 2, 3)) means that the data of
402 bar.txt should be read from revision 1, then fixed, and written back to
402 bar.txt should be read from revision 1, then fixed, and written back to
403 revisions 1, 2 and 3. Revision 1 is called the "srcrev" and the list of
403 revisions 1, 2 and 3. Revision 1 is called the "srcrev" and the list of
404 revisions is called the "dstrevs". In practice the srcrev is always one of
404 revisions is called the "dstrevs". In practice the srcrev is always one of
405 the dstrevs, and we make that choice when constructing the work item so that
405 the dstrevs, and we make that choice when constructing the work item so that
406 the choice can't be made inconsistently later on. The dstrevs should all
406 the choice can't be made inconsistently later on. The dstrevs should all
407 have the same file revision for the given path, so the choice of srcrev is
407 have the same file revision for the given path, so the choice of srcrev is
408 arbitrary. The wdirrev can be a dstrev and a srcrev.
408 arbitrary. The wdirrev can be a dstrev and a srcrev.
409 """
409 """
410 dstrevmap = collections.defaultdict(list)
410 dstrevmap = collections.defaultdict(list)
411 numitems = collections.defaultdict(int)
411 numitems = collections.defaultdict(int)
412 maxfilesize = ui.configbytes(b'fix', b'maxfilesize')
412 maxfilesize = ui.configbytes(b'fix', b'maxfilesize')
413 for rev in sorted(revstofix):
413 for rev in sorted(revstofix):
414 fixctx = repo[rev]
414 fixctx = repo[rev]
415 match = scmutil.match(fixctx, pats, opts)
415 match = scmutil.match(fixctx, pats, opts)
416 for path in sorted(
416 for path in sorted(
417 pathstofix(ui, repo, pats, opts, match, basectxs[rev], fixctx)
417 pathstofix(ui, repo, pats, opts, match, basectxs[rev], fixctx)
418 ):
418 ):
419 fctx = fixctx[path]
419 fctx = fixctx[path]
420 if fctx.islink():
420 if fctx.islink():
421 continue
421 continue
422 if fctx.size() > maxfilesize:
422 if fctx.size() > maxfilesize:
423 ui.warn(
423 ui.warn(
424 _(b'ignoring file larger than %s: %s\n')
424 _(b'ignoring file larger than %s: %s\n')
425 % (util.bytecount(maxfilesize), path)
425 % (util.bytecount(maxfilesize), path)
426 )
426 )
427 continue
427 continue
428 baserevs = tuple(ctx.rev() for ctx in basectxs[rev])
428 baserevs = tuple(ctx.rev() for ctx in basectxs[rev])
429 dstrevmap[(fctx.filerev(), baserevs, path)].append(rev)
429 dstrevmap[(fctx.filerev(), baserevs, path)].append(rev)
430 numitems[rev] += 1
430 numitems[rev] += 1
431 workqueue = [
431 workqueue = [
432 (min(dstrevs), path, dstrevs)
432 (min(dstrevs), path, dstrevs)
433 for (_filerev, _baserevs, path), dstrevs in dstrevmap.items()
433 for (_filerev, _baserevs, path), dstrevs in dstrevmap.items()
434 ]
434 ]
435 # Move work items for earlier changesets to the front of the queue, so we
435 # Move work items for earlier changesets to the front of the queue, so we
436 # might be able to replace those changesets (in topological order) while
436 # might be able to replace those changesets (in topological order) while
437 # we're still processing later work items. Note the min() in the previous
437 # we're still processing later work items. Note the min() in the previous
438 # expression, which means we don't need a custom comparator here. The path
438 # expression, which means we don't need a custom comparator here. The path
439 # is also important in the sort order to make the output order stable. There
439 # is also important in the sort order to make the output order stable. There
440 # are some situations where this doesn't help much, but some situations
440 # are some situations where this doesn't help much, but some situations
441 # where it lets us buffer O(1) files instead of O(n) files.
441 # where it lets us buffer O(1) files instead of O(n) files.
442 workqueue.sort()
442 workqueue.sort()
443 return workqueue, numitems
443 return workqueue, numitems
444
444
445
445
446 def getrevstofix(ui, repo, opts):
446 def getrevstofix(ui, repo, opts):
447 """Returns the set of revision numbers that should be fixed"""
447 """Returns the set of revision numbers that should be fixed"""
448 if opts[b'all']:
448 if opts[b'all']:
449 revs = repo.revs(b'(not public() and not obsolete()) or wdir()')
449 revs = repo.revs(b'(not public() and not obsolete()) or wdir()')
450 elif opts[b'source']:
450 elif opts[b'source']:
451 source_revs = logcmdutil.revrange(repo, opts[b'source'])
451 source_revs = logcmdutil.revrange(repo, opts[b'source'])
452 revs = set(repo.revs(b'(%ld::) - obsolete()', source_revs))
452 revs = set(repo.revs(b'(%ld::) - obsolete()', source_revs))
453 if wdirrev in source_revs:
453 if wdirrev in source_revs:
454 # `wdir()::` is currently empty, so manually add wdir
454 # `wdir()::` is currently empty, so manually add wdir
455 revs.add(wdirrev)
455 revs.add(wdirrev)
456 if repo[b'.'].rev() in revs:
456 if repo[b'.'].rev() in revs:
457 revs.add(wdirrev)
457 revs.add(wdirrev)
458 else:
458 else:
459 revs = set(logcmdutil.revrange(repo, opts[b'rev']))
459 revs = set(logcmdutil.revrange(repo, opts[b'rev']))
460 if opts.get(b'working_dir'):
460 if opts.get(b'working_dir'):
461 revs.add(wdirrev)
461 revs.add(wdirrev)
462 # Allow fixing only wdir() even if there's an unfinished operation
462 # Allow fixing only wdir() even if there's an unfinished operation
463 if not (len(revs) == 1 and wdirrev in revs):
463 if not (len(revs) == 1 and wdirrev in revs):
464 cmdutil.checkunfinished(repo)
464 cmdutil.checkunfinished(repo)
465 rewriteutil.precheck(repo, revs, b'fix')
465 rewriteutil.precheck(repo, revs, b'fix')
466 if (
466 if (
467 wdirrev in revs
467 wdirrev in revs
468 and mergestatemod.mergestate.read(repo).unresolvedcount()
468 and mergestatemod.mergestate.read(repo).unresolvedcount()
469 ):
469 ):
470 raise error.Abort(b'unresolved conflicts', hint=b"use 'hg resolve'")
470 raise error.Abort(b'unresolved conflicts', hint=b"use 'hg resolve'")
471 if not revs:
471 if not revs:
472 raise error.Abort(
472 raise error.Abort(
473 b'no changesets specified', hint=b'use --source or --working-dir'
473 b'no changesets specified', hint=b'use --source or --working-dir'
474 )
474 )
475 return revs
475 return revs
476
476
477
477
478 def pathstofix(ui, repo, pats, opts, match, basectxs, fixctx):
478 def pathstofix(ui, repo, pats, opts, match, basectxs, fixctx):
479 """Returns the set of files that should be fixed in a context
479 """Returns the set of files that should be fixed in a context
480
480
481 The result depends on the base contexts; we include any file that has
481 The result depends on the base contexts; we include any file that has
482 changed relative to any of the base contexts. Base contexts should be
482 changed relative to any of the base contexts. Base contexts should be
483 ancestors of the context being fixed.
483 ancestors of the context being fixed.
484 """
484 """
485 files = set()
485 files = set()
486 for basectx in basectxs:
486 for basectx in basectxs:
487 stat = basectx.status(
487 stat = basectx.status(
488 fixctx, match=match, listclean=bool(pats), listunknown=bool(pats)
488 fixctx, match=match, listclean=bool(pats), listunknown=bool(pats)
489 )
489 )
490 files.update(
490 files.update(
491 set(
491 set(
492 itertools.chain(
492 itertools.chain(
493 stat.added, stat.modified, stat.clean, stat.unknown
493 stat.added, stat.modified, stat.clean, stat.unknown
494 )
494 )
495 )
495 )
496 )
496 )
497 return files
497 return files
498
498
499
499
500 def lineranges(opts, path, basepaths, basectxs, fixctx, content2):
500 def lineranges(opts, path, basepaths, basectxs, fixctx, content2):
501 """Returns the set of line ranges that should be fixed in a file
501 """Returns the set of line ranges that should be fixed in a file
502
502
503 Of the form [(10, 20), (30, 40)].
503 Of the form [(10, 20), (30, 40)].
504
504
505 This depends on the given base contexts; we must consider lines that have
505 This depends on the given base contexts; we must consider lines that have
506 changed versus any of the base contexts, and whether the file has been
506 changed versus any of the base contexts, and whether the file has been
507 renamed versus any of them.
507 renamed versus any of them.
508
508
509 Another way to understand this is that we exclude line ranges that are
509 Another way to understand this is that we exclude line ranges that are
510 common to the file in all base contexts.
510 common to the file in all base contexts.
511 """
511 """
512 if opts.get(b'whole'):
512 if opts.get(b'whole'):
513 # Return a range containing all lines. Rely on the diff implementation's
513 # Return a range containing all lines. Rely on the diff implementation's
514 # idea of how many lines are in the file, instead of reimplementing it.
514 # idea of how many lines are in the file, instead of reimplementing it.
515 return difflineranges(b'', content2)
515 return difflineranges(b'', content2)
516
516
517 rangeslist = []
517 rangeslist = []
518 for basectx in basectxs:
518 for basectx in basectxs:
519 basepath = basepaths.get((basectx.rev(), fixctx.rev(), path), path)
519 basepath = basepaths.get((basectx.rev(), fixctx.rev(), path), path)
520
520
521 if basepath in basectx:
521 if basepath in basectx:
522 content1 = basectx[basepath].data()
522 content1 = basectx[basepath].data()
523 else:
523 else:
524 content1 = b''
524 content1 = b''
525 rangeslist.extend(difflineranges(content1, content2))
525 rangeslist.extend(difflineranges(content1, content2))
526 return unionranges(rangeslist)
526 return unionranges(rangeslist)
527
527
528
528
529 def getbasepaths(repo, opts, workqueue, basectxs):
529 def getbasepaths(repo, opts, workqueue, basectxs):
530 if opts.get(b'whole'):
530 if opts.get(b'whole'):
531 # Base paths will never be fetched for line range determination.
531 # Base paths will never be fetched for line range determination.
532 return {}
532 return {}
533
533
534 basepaths = {}
534 basepaths = {}
535 for srcrev, path, _dstrevs in workqueue:
535 for srcrev, path, _dstrevs in workqueue:
536 fixctx = repo[srcrev]
536 fixctx = repo[srcrev]
537 for basectx in basectxs[srcrev]:
537 for basectx in basectxs[srcrev]:
538 basepath = copies.pathcopies(basectx, fixctx).get(path, path)
538 basepath = copies.pathcopies(basectx, fixctx).get(path, path)
539 if basepath in basectx:
539 if basepath in basectx:
540 basepaths[(basectx.rev(), fixctx.rev(), path)] = basepath
540 basepaths[(basectx.rev(), fixctx.rev(), path)] = basepath
541 return basepaths
541 return basepaths
542
542
543
543
544 def unionranges(rangeslist):
544 def unionranges(rangeslist):
545 """Return the union of some closed intervals
545 """Return the union of some closed intervals
546
546
547 >>> unionranges([])
547 >>> unionranges([])
548 []
548 []
549 >>> unionranges([(1, 100)])
549 >>> unionranges([(1, 100)])
550 [(1, 100)]
550 [(1, 100)]
551 >>> unionranges([(1, 100), (1, 100)])
551 >>> unionranges([(1, 100), (1, 100)])
552 [(1, 100)]
552 [(1, 100)]
553 >>> unionranges([(1, 100), (2, 100)])
553 >>> unionranges([(1, 100), (2, 100)])
554 [(1, 100)]
554 [(1, 100)]
555 >>> unionranges([(1, 99), (1, 100)])
555 >>> unionranges([(1, 99), (1, 100)])
556 [(1, 100)]
556 [(1, 100)]
557 >>> unionranges([(1, 100), (40, 60)])
557 >>> unionranges([(1, 100), (40, 60)])
558 [(1, 100)]
558 [(1, 100)]
559 >>> unionranges([(1, 49), (50, 100)])
559 >>> unionranges([(1, 49), (50, 100)])
560 [(1, 100)]
560 [(1, 100)]
561 >>> unionranges([(1, 48), (50, 100)])
561 >>> unionranges([(1, 48), (50, 100)])
562 [(1, 48), (50, 100)]
562 [(1, 48), (50, 100)]
563 >>> unionranges([(1, 2), (3, 4), (5, 6)])
563 >>> unionranges([(1, 2), (3, 4), (5, 6)])
564 [(1, 6)]
564 [(1, 6)]
565 """
565 """
566 rangeslist = sorted(set(rangeslist))
566 rangeslist = sorted(set(rangeslist))
567 unioned = []
567 unioned = []
568 if rangeslist:
568 if rangeslist:
569 unioned, rangeslist = [rangeslist[0]], rangeslist[1:]
569 unioned, rangeslist = [rangeslist[0]], rangeslist[1:]
570 for a, b in rangeslist:
570 for a, b in rangeslist:
571 c, d = unioned[-1]
571 c, d = unioned[-1]
572 if a > d + 1:
572 if a > d + 1:
573 unioned.append((a, b))
573 unioned.append((a, b))
574 else:
574 else:
575 unioned[-1] = (c, max(b, d))
575 unioned[-1] = (c, max(b, d))
576 return unioned
576 return unioned
577
577
578
578
579 def difflineranges(content1, content2):
579 def difflineranges(content1, content2):
580 """Return list of line number ranges in content2 that differ from content1.
580 """Return list of line number ranges in content2 that differ from content1.
581
581
582 Line numbers are 1-based. The numbers are the first and last line contained
582 Line numbers are 1-based. The numbers are the first and last line contained
583 in the range. Single-line ranges have the same line number for the first and
583 in the range. Single-line ranges have the same line number for the first and
584 last line. Excludes any empty ranges that result from lines that are only
584 last line. Excludes any empty ranges that result from lines that are only
585 present in content1. Relies on mdiff's idea of where the line endings are in
585 present in content1. Relies on mdiff's idea of where the line endings are in
586 the string.
586 the string.
587
587
588 >>> from mercurial import pycompat
588 >>> from mercurial import pycompat
589 >>> lines = lambda s: b'\\n'.join([c for c in pycompat.iterbytestr(s)])
589 >>> lines = lambda s: b'\\n'.join([c for c in pycompat.iterbytestr(s)])
590 >>> difflineranges2 = lambda a, b: difflineranges(lines(a), lines(b))
590 >>> difflineranges2 = lambda a, b: difflineranges(lines(a), lines(b))
591 >>> difflineranges2(b'', b'')
591 >>> difflineranges2(b'', b'')
592 []
592 []
593 >>> difflineranges2(b'a', b'')
593 >>> difflineranges2(b'a', b'')
594 []
594 []
595 >>> difflineranges2(b'', b'A')
595 >>> difflineranges2(b'', b'A')
596 [(1, 1)]
596 [(1, 1)]
597 >>> difflineranges2(b'a', b'a')
597 >>> difflineranges2(b'a', b'a')
598 []
598 []
599 >>> difflineranges2(b'a', b'A')
599 >>> difflineranges2(b'a', b'A')
600 [(1, 1)]
600 [(1, 1)]
601 >>> difflineranges2(b'ab', b'')
601 >>> difflineranges2(b'ab', b'')
602 []
602 []
603 >>> difflineranges2(b'', b'AB')
603 >>> difflineranges2(b'', b'AB')
604 [(1, 2)]
604 [(1, 2)]
605 >>> difflineranges2(b'abc', b'ac')
605 >>> difflineranges2(b'abc', b'ac')
606 []
606 []
607 >>> difflineranges2(b'ab', b'aCb')
607 >>> difflineranges2(b'ab', b'aCb')
608 [(2, 2)]
608 [(2, 2)]
609 >>> difflineranges2(b'abc', b'aBc')
609 >>> difflineranges2(b'abc', b'aBc')
610 [(2, 2)]
610 [(2, 2)]
611 >>> difflineranges2(b'ab', b'AB')
611 >>> difflineranges2(b'ab', b'AB')
612 [(1, 2)]
612 [(1, 2)]
613 >>> difflineranges2(b'abcde', b'aBcDe')
613 >>> difflineranges2(b'abcde', b'aBcDe')
614 [(2, 2), (4, 4)]
614 [(2, 2), (4, 4)]
615 >>> difflineranges2(b'abcde', b'aBCDe')
615 >>> difflineranges2(b'abcde', b'aBCDe')
616 [(2, 4)]
616 [(2, 4)]
617 """
617 """
618 ranges = []
618 ranges = []
619 for lines, kind in mdiff.allblocks(content1, content2):
619 for lines, kind in mdiff.allblocks(content1, content2):
620 firstline, lastline = lines[2:4]
620 firstline, lastline = lines[2:4]
621 if kind == b'!' and firstline != lastline:
621 if kind == b'!' and firstline != lastline:
622 ranges.append((firstline + 1, lastline))
622 ranges.append((firstline + 1, lastline))
623 return ranges
623 return ranges
624
624
625
625
626 def getbasectxs(repo, opts, revstofix):
626 def getbasectxs(repo, opts, revstofix):
627 """Returns a map of the base contexts for each revision
627 """Returns a map of the base contexts for each revision
628
628
629 The base contexts determine which lines are considered modified when we
629 The base contexts determine which lines are considered modified when we
630 attempt to fix just the modified lines in a file. It also determines which
630 attempt to fix just the modified lines in a file. It also determines which
631 files we attempt to fix, so it is important to compute this even when
631 files we attempt to fix, so it is important to compute this even when
632 --whole is used.
632 --whole is used.
633 """
633 """
634 # The --base flag overrides the usual logic, and we give every revision
634 # The --base flag overrides the usual logic, and we give every revision
635 # exactly the set of baserevs that the user specified.
635 # exactly the set of baserevs that the user specified.
636 if opts.get(b'base'):
636 if opts.get(b'base'):
637 baserevs = set(logcmdutil.revrange(repo, opts.get(b'base')))
637 baserevs = set(logcmdutil.revrange(repo, opts.get(b'base')))
638 if not baserevs:
638 if not baserevs:
639 baserevs = {nullrev}
639 baserevs = {nullrev}
640 basectxs = {repo[rev] for rev in baserevs}
640 basectxs = {repo[rev] for rev in baserevs}
641 return {rev: basectxs for rev in revstofix}
641 return {rev: basectxs for rev in revstofix}
642
642
643 # Proceed in topological order so that we can easily determine each
643 # Proceed in topological order so that we can easily determine each
644 # revision's baserevs by looking at its parents and their baserevs.
644 # revision's baserevs by looking at its parents and their baserevs.
645 basectxs = collections.defaultdict(set)
645 basectxs = collections.defaultdict(set)
646 for rev in sorted(revstofix):
646 for rev in sorted(revstofix):
647 ctx = repo[rev]
647 ctx = repo[rev]
648 for pctx in ctx.parents():
648 for pctx in ctx.parents():
649 if pctx.rev() in basectxs:
649 if pctx.rev() in basectxs:
650 basectxs[rev].update(basectxs[pctx.rev()])
650 basectxs[rev].update(basectxs[pctx.rev()])
651 else:
651 else:
652 basectxs[rev].add(pctx)
652 basectxs[rev].add(pctx)
653 return basectxs
653 return basectxs
654
654
655
655
656 def _prefetchfiles(repo, workqueue, basepaths):
656 def _prefetchfiles(repo, workqueue, basepaths):
657 toprefetch = set()
657 toprefetch = set()
658
658
659 # Prefetch the files that will be fixed.
659 # Prefetch the files that will be fixed.
660 for srcrev, path, _dstrevs in workqueue:
660 for srcrev, path, _dstrevs in workqueue:
661 if srcrev == wdirrev:
661 if srcrev == wdirrev:
662 continue
662 continue
663 toprefetch.add((srcrev, path))
663 toprefetch.add((srcrev, path))
664
664
665 # Prefetch the base contents for lineranges().
665 # Prefetch the base contents for lineranges().
666 for (baserev, fixrev, path), basepath in basepaths.items():
666 for (baserev, fixrev, path), basepath in basepaths.items():
667 toprefetch.add((baserev, basepath))
667 toprefetch.add((baserev, basepath))
668
668
669 if toprefetch:
669 if toprefetch:
670 scmutil.prefetchfiles(
670 scmutil.prefetchfiles(
671 repo,
671 repo,
672 [
672 [
673 (rev, scmutil.matchfiles(repo, [path]))
673 (rev, scmutil.matchfiles(repo, [path]))
674 for rev, path in toprefetch
674 for rev, path in toprefetch
675 ],
675 ],
676 )
676 )
677
677
678
678
679 def fixfile(ui, repo, opts, fixers, fixctx, path, basepaths, basectxs):
679 def fixfile(ui, repo, opts, fixers, fixctx, path, basepaths, basectxs):
680 """Run any configured fixers that should affect the file in this context
680 """Run any configured fixers that should affect the file in this context
681
681
682 Returns the file content that results from applying the fixers in some order
682 Returns the file content that results from applying the fixers in some order
683 starting with the file's content in the fixctx. Fixers that support line
683 starting with the file's content in the fixctx. Fixers that support line
684 ranges will affect lines that have changed relative to any of the basectxs
684 ranges will affect lines that have changed relative to any of the basectxs
685 (i.e. they will only avoid lines that are common to all basectxs).
685 (i.e. they will only avoid lines that are common to all basectxs).
686
686
687 A fixer tool's stdout will become the file's new content if and only if it
687 A fixer tool's stdout will become the file's new content if and only if it
688 exits with code zero. The fixer tool's working directory is the repository's
688 exits with code zero. The fixer tool's working directory is the repository's
689 root.
689 root.
690 """
690 """
691 metadata = {}
691 metadata = {}
692 newdata = fixctx[path].data()
692 newdata = fixctx[path].data()
693 for fixername, fixer in fixers.items():
693 for fixername, fixer in fixers.items():
694 if fixer.affects(opts, fixctx, path):
694 if fixer.affects(opts, fixctx, path):
695 ranges = lineranges(
695 ranges = lineranges(
696 opts, path, basepaths, basectxs, fixctx, newdata
696 opts, path, basepaths, basectxs, fixctx, newdata
697 )
697 )
698 command = fixer.command(ui, path, ranges)
698 command = fixer.command(ui, path, ranges)
699 if command is None:
699 if command is None:
700 continue
700 continue
701 msg = b'fixing: %s - %s - %s\n'
701 msg = b'fixing: %s - %s - %s\n'
702 msg %= (fixctx, fixername, path)
702 msg %= (fixctx, fixername, path)
703 ui.debug(msg)
703 ui.debug(msg)
704 ui.debug(b'subprocess: %s\n' % (command,))
704 ui.debug(b'subprocess: %s\n' % (command,))
705 proc = subprocess.Popen(
705 proc = subprocess.Popen(
706 procutil.tonativestr(command),
706 procutil.tonativestr(command),
707 shell=True,
707 shell=True,
708 cwd=procutil.tonativestr(repo.root),
708 cwd=procutil.tonativestr(repo.root),
709 stdin=subprocess.PIPE,
709 stdin=subprocess.PIPE,
710 stdout=subprocess.PIPE,
710 stdout=subprocess.PIPE,
711 stderr=subprocess.PIPE,
711 stderr=subprocess.PIPE,
712 )
712 )
713 stdout, stderr = proc.communicate(newdata)
713 stdout, stderr = proc.communicate(newdata)
714 if stderr:
714 if stderr:
715 showstderr(ui, fixctx.rev(), fixername, stderr)
715 showstderr(ui, fixctx.rev(), fixername, stderr)
716 newerdata = stdout
716 newerdata = stdout
717 if fixer.shouldoutputmetadata():
717 if fixer.shouldoutputmetadata():
718 try:
718 try:
719 metadatajson, newerdata = stdout.split(b'\0', 1)
719 metadatajson, newerdata = stdout.split(b'\0', 1)
720 metadata[fixername] = pycompat.json_loads(metadatajson)
720 metadata[fixername] = pycompat.json_loads(metadatajson)
721 except ValueError:
721 except ValueError:
722 ui.warn(
722 ui.warn(
723 _(b'ignored invalid output from fixer tool: %s\n')
723 _(b'ignored invalid output from fixer tool: %s\n')
724 % (fixername,)
724 % (fixername,)
725 )
725 )
726 continue
726 continue
727 else:
727 else:
728 metadata[fixername] = None
728 metadata[fixername] = None
729 if proc.returncode == 0:
729 if proc.returncode == 0:
730 newdata = newerdata
730 newdata = newerdata
731 else:
731 else:
732 if not stderr:
732 if not stderr:
733 message = _(b'exited with status %d\n') % (proc.returncode,)
733 message = _(b'exited with status %d\n') % (proc.returncode,)
734 showstderr(ui, fixctx.rev(), fixername, message)
734 showstderr(ui, fixctx.rev(), fixername, message)
735 checktoolfailureaction(
735 checktoolfailureaction(
736 ui,
736 ui,
737 _(b'no fixes will be applied'),
737 _(b'no fixes will be applied'),
738 hint=_(
738 hint=_(
739 b'use --config fix.failure=continue to apply any '
739 b'use --config fix.failure=continue to apply any '
740 b'successful fixes anyway'
740 b'successful fixes anyway'
741 ),
741 ),
742 )
742 )
743 return metadata, newdata
743 return metadata, newdata
744
744
745
745
746 def showstderr(ui, rev, fixername, stderr):
746 def showstderr(ui, rev, fixername, stderr):
747 """Writes the lines of the stderr string as warnings on the ui
747 """Writes the lines of the stderr string as warnings on the ui
748
748
749 Uses the revision number and fixername to give more context to each line of
749 Uses the revision number and fixername to give more context to each line of
750 the error message. Doesn't include file names, since those take up a lot of
750 the error message. Doesn't include file names, since those take up a lot of
751 space and would tend to be included in the error message if they were
751 space and would tend to be included in the error message if they were
752 relevant.
752 relevant.
753 """
753 """
754 for line in re.split(b'[\r\n]+', stderr):
754 for line in re.split(b'[\r\n]+', stderr):
755 if line:
755 if line:
756 ui.warn(b'[')
756 ui.warn(b'[')
757 if rev is None:
757 if rev is None:
758 ui.warn(_(b'wdir'), label=b'evolve.rev')
758 ui.warn(_(b'wdir'), label=b'evolve.rev')
759 else:
759 else:
760 ui.warn(b'%d' % rev, label=b'evolve.rev')
760 ui.warn(b'%d' % rev, label=b'evolve.rev')
761 ui.warn(b'] %s: %s\n' % (fixername, line))
761 ui.warn(b'] %s: %s\n' % (fixername, line))
762
762
763
763
764 def writeworkingdir(repo, ctx, filedata, replacements):
764 def writeworkingdir(repo, ctx, filedata, replacements):
765 """Write new content to the working copy and check out the new p1 if any
765 """Write new content to the working copy and check out the new p1 if any
766
766
767 We check out a new revision if and only if we fixed something in both the
767 We check out a new revision if and only if we fixed something in both the
768 working directory and its parent revision. This avoids the need for a full
768 working directory and its parent revision. This avoids the need for a full
769 update/merge, and means that the working directory simply isn't affected
769 update/merge, and means that the working directory simply isn't affected
770 unless the --working-dir flag is given.
770 unless the --working-dir flag is given.
771
771
772 Directly updates the dirstate for the affected files.
772 Directly updates the dirstate for the affected files.
773 """
773 """
774 for path, data in filedata.items():
774 for path, data in filedata.items():
775 fctx = ctx[path]
775 fctx = ctx[path]
776 fctx.write(data, fctx.flags())
776 fctx.write(data, fctx.flags())
777
777
778 oldp1 = repo.dirstate.p1()
778 oldp1 = repo.dirstate.p1()
779 newp1 = replacements.get(oldp1, oldp1)
779 newp1 = replacements.get(oldp1, oldp1)
780 if newp1 != oldp1:
780 if newp1 != oldp1:
781 assert repo.dirstate.p2() == nullid
781 assert repo.dirstate.p2() == nullid
782 with repo.dirstate.parentchange():
782 with repo.dirstate.changing_parents(repo):
783 scmutil.movedirstate(repo, repo[newp1])
783 scmutil.movedirstate(repo, repo[newp1])
784
784
785
785
786 def replacerev(ui, repo, ctx, filedata, replacements):
786 def replacerev(ui, repo, ctx, filedata, replacements):
787 """Commit a new revision like the given one, but with file content changes
787 """Commit a new revision like the given one, but with file content changes
788
788
789 "ctx" is the original revision to be replaced by a modified one.
789 "ctx" is the original revision to be replaced by a modified one.
790
790
791 "filedata" is a dict that maps paths to their new file content. All other
791 "filedata" is a dict that maps paths to their new file content. All other
792 paths will be recreated from the original revision without changes.
792 paths will be recreated from the original revision without changes.
793 "filedata" may contain paths that didn't exist in the original revision;
793 "filedata" may contain paths that didn't exist in the original revision;
794 they will be added.
794 they will be added.
795
795
796 "replacements" is a dict that maps a single node to a single node, and it is
796 "replacements" is a dict that maps a single node to a single node, and it is
797 updated to indicate the original revision is replaced by the newly created
797 updated to indicate the original revision is replaced by the newly created
798 one. No entry is added if the replacement's node already exists.
798 one. No entry is added if the replacement's node already exists.
799
799
800 The new revision has the same parents as the old one, unless those parents
800 The new revision has the same parents as the old one, unless those parents
801 have already been replaced, in which case those replacements are the parents
801 have already been replaced, in which case those replacements are the parents
802 of this new revision. Thus, if revisions are replaced in topological order,
802 of this new revision. Thus, if revisions are replaced in topological order,
803 there is no need to rebase them into the original topology later.
803 there is no need to rebase them into the original topology later.
804 """
804 """
805
805
806 p1rev, p2rev = repo.changelog.parentrevs(ctx.rev())
806 p1rev, p2rev = repo.changelog.parentrevs(ctx.rev())
807 p1ctx, p2ctx = repo[p1rev], repo[p2rev]
807 p1ctx, p2ctx = repo[p1rev], repo[p2rev]
808 newp1node = replacements.get(p1ctx.node(), p1ctx.node())
808 newp1node = replacements.get(p1ctx.node(), p1ctx.node())
809 newp2node = replacements.get(p2ctx.node(), p2ctx.node())
809 newp2node = replacements.get(p2ctx.node(), p2ctx.node())
810
810
811 # We don't want to create a revision that has no changes from the original,
811 # We don't want to create a revision that has no changes from the original,
812 # but we should if the original revision's parent has been replaced.
812 # but we should if the original revision's parent has been replaced.
813 # Otherwise, we would produce an orphan that needs no actual human
813 # Otherwise, we would produce an orphan that needs no actual human
814 # intervention to evolve. We can't rely on commit() to avoid creating the
814 # intervention to evolve. We can't rely on commit() to avoid creating the
815 # un-needed revision because the extra field added below produces a new hash
815 # un-needed revision because the extra field added below produces a new hash
816 # regardless of file content changes.
816 # regardless of file content changes.
817 if (
817 if (
818 not filedata
818 not filedata
819 and p1ctx.node() not in replacements
819 and p1ctx.node() not in replacements
820 and p2ctx.node() not in replacements
820 and p2ctx.node() not in replacements
821 ):
821 ):
822 return
822 return
823
823
824 extra = ctx.extra().copy()
824 extra = ctx.extra().copy()
825 extra[b'fix_source'] = ctx.hex()
825 extra[b'fix_source'] = ctx.hex()
826
826
827 wctx = context.overlayworkingctx(repo)
827 wctx = context.overlayworkingctx(repo)
828 wctx.setbase(repo[newp1node])
828 wctx.setbase(repo[newp1node])
829 merge.revert_to(ctx, wc=wctx)
829 merge.revert_to(ctx, wc=wctx)
830 copies.graftcopies(wctx, ctx, ctx.p1())
830 copies.graftcopies(wctx, ctx, ctx.p1())
831
831
832 for path in filedata.keys():
832 for path in filedata.keys():
833 fctx = ctx[path]
833 fctx = ctx[path]
834 copysource = fctx.copysource()
834 copysource = fctx.copysource()
835 wctx.write(path, filedata[path], flags=fctx.flags())
835 wctx.write(path, filedata[path], flags=fctx.flags())
836 if copysource:
836 if copysource:
837 wctx.markcopied(path, copysource)
837 wctx.markcopied(path, copysource)
838
838
839 desc = rewriteutil.update_hash_refs(
839 desc = rewriteutil.update_hash_refs(
840 repo,
840 repo,
841 ctx.description(),
841 ctx.description(),
842 {oldnode: [newnode] for oldnode, newnode in replacements.items()},
842 {oldnode: [newnode] for oldnode, newnode in replacements.items()},
843 )
843 )
844
844
845 memctx = wctx.tomemctx(
845 memctx = wctx.tomemctx(
846 text=desc,
846 text=desc,
847 branch=ctx.branch(),
847 branch=ctx.branch(),
848 extra=extra,
848 extra=extra,
849 date=ctx.date(),
849 date=ctx.date(),
850 parents=(newp1node, newp2node),
850 parents=(newp1node, newp2node),
851 user=ctx.user(),
851 user=ctx.user(),
852 )
852 )
853
853
854 sucnode = memctx.commit()
854 sucnode = memctx.commit()
855 prenode = ctx.node()
855 prenode = ctx.node()
856 if prenode == sucnode:
856 if prenode == sucnode:
857 ui.debug(b'node %s already existed\n' % (ctx.hex()))
857 ui.debug(b'node %s already existed\n' % (ctx.hex()))
858 else:
858 else:
859 replacements[ctx.node()] = sucnode
859 replacements[ctx.node()] = sucnode
860
860
861
861
862 def getfixers(ui):
862 def getfixers(ui):
863 """Returns a map of configured fixer tools indexed by their names
863 """Returns a map of configured fixer tools indexed by their names
864
864
865 Each value is a Fixer object with methods that implement the behavior of the
865 Each value is a Fixer object with methods that implement the behavior of the
866 fixer's config suboptions. Does not validate the config values.
866 fixer's config suboptions. Does not validate the config values.
867 """
867 """
868 fixers = {}
868 fixers = {}
869 for name in fixernames(ui):
869 for name in fixernames(ui):
870 enabled = ui.configbool(b'fix', name + b':enabled')
870 enabled = ui.configbool(b'fix', name + b':enabled')
871 command = ui.config(b'fix', name + b':command')
871 command = ui.config(b'fix', name + b':command')
872 pattern = ui.config(b'fix', name + b':pattern')
872 pattern = ui.config(b'fix', name + b':pattern')
873 linerange = ui.config(b'fix', name + b':linerange')
873 linerange = ui.config(b'fix', name + b':linerange')
874 priority = ui.configint(b'fix', name + b':priority')
874 priority = ui.configint(b'fix', name + b':priority')
875 metadata = ui.configbool(b'fix', name + b':metadata')
875 metadata = ui.configbool(b'fix', name + b':metadata')
876 skipclean = ui.configbool(b'fix', name + b':skipclean')
876 skipclean = ui.configbool(b'fix', name + b':skipclean')
877 # Don't use a fixer if it has no pattern configured. It would be
877 # Don't use a fixer if it has no pattern configured. It would be
878 # dangerous to let it affect all files. It would be pointless to let it
878 # dangerous to let it affect all files. It would be pointless to let it
879 # affect no files. There is no reasonable subset of files to use as the
879 # affect no files. There is no reasonable subset of files to use as the
880 # default.
880 # default.
881 if command is None:
881 if command is None:
882 ui.warn(
882 ui.warn(
883 _(b'fixer tool has no command configuration: %s\n') % (name,)
883 _(b'fixer tool has no command configuration: %s\n') % (name,)
884 )
884 )
885 elif pattern is None:
885 elif pattern is None:
886 ui.warn(
886 ui.warn(
887 _(b'fixer tool has no pattern configuration: %s\n') % (name,)
887 _(b'fixer tool has no pattern configuration: %s\n') % (name,)
888 )
888 )
889 elif not enabled:
889 elif not enabled:
890 ui.debug(b'ignoring disabled fixer tool: %s\n' % (name,))
890 ui.debug(b'ignoring disabled fixer tool: %s\n' % (name,))
891 else:
891 else:
892 fixers[name] = Fixer(
892 fixers[name] = Fixer(
893 command, pattern, linerange, priority, metadata, skipclean
893 command, pattern, linerange, priority, metadata, skipclean
894 )
894 )
895 return collections.OrderedDict(
895 return collections.OrderedDict(
896 sorted(fixers.items(), key=lambda item: item[1]._priority, reverse=True)
896 sorted(fixers.items(), key=lambda item: item[1]._priority, reverse=True)
897 )
897 )
898
898
899
899
900 def fixernames(ui):
900 def fixernames(ui):
901 """Returns the names of [fix] config options that have suboptions"""
901 """Returns the names of [fix] config options that have suboptions"""
902 names = set()
902 names = set()
903 for k, v in ui.configitems(b'fix'):
903 for k, v in ui.configitems(b'fix'):
904 if b':' in k:
904 if b':' in k:
905 names.add(k.split(b':', 1)[0])
905 names.add(k.split(b':', 1)[0])
906 return names
906 return names
907
907
908
908
909 class Fixer:
909 class Fixer:
910 """Wraps the raw config values for a fixer with methods"""
910 """Wraps the raw config values for a fixer with methods"""
911
911
912 def __init__(
912 def __init__(
913 self, command, pattern, linerange, priority, metadata, skipclean
913 self, command, pattern, linerange, priority, metadata, skipclean
914 ):
914 ):
915 self._command = command
915 self._command = command
916 self._pattern = pattern
916 self._pattern = pattern
917 self._linerange = linerange
917 self._linerange = linerange
918 self._priority = priority
918 self._priority = priority
919 self._metadata = metadata
919 self._metadata = metadata
920 self._skipclean = skipclean
920 self._skipclean = skipclean
921
921
922 def affects(self, opts, fixctx, path):
922 def affects(self, opts, fixctx, path):
923 """Should this fixer run on the file at the given path and context?"""
923 """Should this fixer run on the file at the given path and context?"""
924 repo = fixctx.repo()
924 repo = fixctx.repo()
925 matcher = matchmod.match(
925 matcher = matchmod.match(
926 repo.root, repo.root, [self._pattern], ctx=fixctx
926 repo.root, repo.root, [self._pattern], ctx=fixctx
927 )
927 )
928 return matcher(path)
928 return matcher(path)
929
929
930 def shouldoutputmetadata(self):
930 def shouldoutputmetadata(self):
931 """Should the stdout of this fixer start with JSON and a null byte?"""
931 """Should the stdout of this fixer start with JSON and a null byte?"""
932 return self._metadata
932 return self._metadata
933
933
934 def command(self, ui, path, ranges):
934 def command(self, ui, path, ranges):
935 """A shell command to use to invoke this fixer on the given file/lines
935 """A shell command to use to invoke this fixer on the given file/lines
936
936
937 May return None if there is no appropriate command to run for the given
937 May return None if there is no appropriate command to run for the given
938 parameters.
938 parameters.
939 """
939 """
940 expand = cmdutil.rendercommandtemplate
940 expand = cmdutil.rendercommandtemplate
941 parts = [
941 parts = [
942 expand(
942 expand(
943 ui,
943 ui,
944 self._command,
944 self._command,
945 {b'rootpath': path, b'basename': os.path.basename(path)},
945 {b'rootpath': path, b'basename': os.path.basename(path)},
946 )
946 )
947 ]
947 ]
948 if self._linerange:
948 if self._linerange:
949 if self._skipclean and not ranges:
949 if self._skipclean and not ranges:
950 # No line ranges to fix, so don't run the fixer.
950 # No line ranges to fix, so don't run the fixer.
951 return None
951 return None
952 for first, last in ranges:
952 for first, last in ranges:
953 parts.append(
953 parts.append(
954 expand(
954 expand(
955 ui, self._linerange, {b'first': first, b'last': last}
955 ui, self._linerange, {b'first': first, b'last': last}
956 )
956 )
957 )
957 )
958 return b' '.join(parts)
958 return b' '.join(parts)
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: file renamed from mercurial/thirdparty/attr/LICENSE.txt to mercurial/thirdparty/attr/LICENSE
NO CONTENT: file renamed from mercurial/thirdparty/attr/LICENSE.txt to mercurial/thirdparty/attr/LICENSE
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: file renamed from rust/hg-core/src/config.rs to rust/hg-core/src/config/mod.rs
NO CONTENT: file renamed from rust/hg-core/src/config.rs to rust/hg-core/src/config/mod.rs
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: file renamed from rust/hg-core/src/revlog.rs to rust/hg-core/src/revlog/mod.rs
NO CONTENT: file renamed from rust/hg-core/src/revlog.rs to rust/hg-core/src/revlog/mod.rs
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: file was removed
NO CONTENT: file was removed
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: file was removed
NO CONTENT: file was removed
The requested commit or file is too big and content was truncated. Show full diff
General Comments 0
You need to be logged in to leave comments. Login now