Show More
The requested changes are too big and content was truncated. Show full diff
@@ -1,127 +1,141 b'' | |||||
1 | stages: |
|
1 | stages: | |
2 | - tests |
|
2 | - tests | |
3 | - phabricator |
|
3 | - phabricator | |
4 |
|
4 | |||
5 | image: registry.heptapod.net/mercurial/ci-images/mercurial-core:$HG_CI_IMAGE_TAG |
|
5 | image: registry.heptapod.net/mercurial/ci-images/mercurial-core:$HG_CI_IMAGE_TAG | |
6 |
|
6 | |||
7 | variables: |
|
7 | variables: | |
8 | PYTHON: python |
|
8 | PYTHON: python | |
9 | TEST_HGMODULEPOLICY: "allow" |
|
9 | TEST_HGMODULEPOLICY: "allow" | |
10 | HG_CI_IMAGE_TAG: "latest" |
|
10 | HG_CI_IMAGE_TAG: "latest" | |
11 | TEST_HGTESTS_ALLOW_NETIO: "0" |
|
11 | TEST_HGTESTS_ALLOW_NETIO: "0" | |
12 |
|
12 | |||
13 | .runtests_template: &runtests |
|
13 | .runtests_template: &runtests | |
14 | stage: tests |
|
14 | stage: tests | |
15 | # The runner made a clone as root. |
|
15 | # The runner made a clone as root. | |
16 | # We make a new clone owned by user used to run the step. |
|
16 | # We make a new clone owned by user used to run the step. | |
17 | before_script: |
|
17 | before_script: | |
18 | - hg clone . /tmp/mercurial-ci/ --noupdate --config phases.publish=no |
|
18 | - hg clone . /tmp/mercurial-ci/ --noupdate --config phases.publish=no | |
19 | - hg -R /tmp/mercurial-ci/ update `hg log --rev '.' --template '{node}'` |
|
19 | - hg -R /tmp/mercurial-ci/ update `hg log --rev '.' --template '{node}'` | |
20 | - cd /tmp/mercurial-ci/ |
|
20 | - cd /tmp/mercurial-ci/ | |
21 | - ls -1 tests/test-check-*.* > /tmp/check-tests.txt |
|
21 | - ls -1 tests/test-check-*.* > /tmp/check-tests.txt | |
22 | - black --version |
|
22 | - black --version | |
23 | - clang-format --version |
|
23 | - clang-format --version | |
24 | script: |
|
24 | script: | |
25 | - echo "python used, $PYTHON" |
|
25 | - echo "python used, $PYTHON" | |
26 | - echo "$RUNTEST_ARGS" |
|
26 | - echo "$RUNTEST_ARGS" | |
27 | - HGTESTS_ALLOW_NETIO="$TEST_HGTESTS_ALLOW_NETIO" HGMODULEPOLICY="$TEST_HGMODULEPOLICY" "$PYTHON" tests/run-tests.py --color=always $RUNTEST_ARGS |
|
27 | - HGTESTS_ALLOW_NETIO="$TEST_HGTESTS_ALLOW_NETIO" HGMODULEPOLICY="$TEST_HGMODULEPOLICY" "$PYTHON" tests/run-tests.py --color=always $RUNTEST_ARGS | |
28 |
|
28 | |||
29 | checks-py2: |
|
29 | checks-py2: | |
30 | <<: *runtests |
|
30 | <<: *runtests | |
31 | variables: |
|
31 | variables: | |
32 | RUNTEST_ARGS: "--time --test-list /tmp/check-tests.txt" |
|
32 | RUNTEST_ARGS: "--time --test-list /tmp/check-tests.txt" | |
33 |
|
33 | |||
34 | checks-py3: |
|
34 | checks-py3: | |
35 | <<: *runtests |
|
35 | <<: *runtests | |
36 | variables: |
|
36 | variables: | |
37 | RUNTEST_ARGS: "--time --test-list /tmp/check-tests.txt" |
|
37 | RUNTEST_ARGS: "--time --test-list /tmp/check-tests.txt" | |
38 | PYTHON: python3 |
|
38 | PYTHON: python3 | |
39 |
|
39 | |||
40 | rust-cargo-test-py2: &rust_cargo_test |
|
40 | rust-cargo-test-py2: &rust_cargo_test | |
41 | stage: tests |
|
41 | stage: tests | |
42 | script: |
|
42 | script: | |
43 | - echo "python used, $PYTHON" |
|
43 | - echo "python used, $PYTHON" | |
44 | - make rust-tests |
|
44 | - make rust-tests | |
45 |
|
45 | |||
46 | rust-cargo-test-py3: |
|
46 | rust-cargo-test-py3: | |
47 | stage: tests |
|
47 | stage: tests | |
48 | <<: *rust_cargo_test |
|
48 | <<: *rust_cargo_test | |
49 | variables: |
|
49 | variables: | |
50 | PYTHON: python3 |
|
50 | PYTHON: python3 | |
51 |
|
51 | |||
52 | phabricator-refresh: |
|
52 | phabricator-refresh: | |
53 | stage: phabricator |
|
53 | stage: phabricator | |
54 | variables: |
|
54 | variables: | |
55 | DEFAULT_COMMENT: ":white_check_mark: refresh by Heptapod after a successful CI run (:octopus: :green_heart:)" |
|
55 | DEFAULT_COMMENT: ":white_check_mark: refresh by Heptapod after a successful CI run (:octopus: :green_heart:)" | |
56 | STABLE_COMMENT: ":white_check_mark: refresh by Heptapod after a successful CI run (:octopus: :green_heart:)\n⚠ This patch is intended for stable ⚠\n{image https://media.giphy.com/media/nYI8SmmChYXK0/source.gif}" |
|
56 | STABLE_COMMENT: ":white_check_mark: refresh by Heptapod after a successful CI run (:octopus: :green_heart:)\n⚠ This patch is intended for stable ⚠\n{image https://media.giphy.com/media/nYI8SmmChYXK0/source.gif}" | |
57 | script: |
|
57 | script: | |
58 | - | |
|
58 | - | | |
59 | if [ `hg branch` == "stable" ]; then |
|
59 | if [ `hg branch` == "stable" ]; then | |
60 | ./contrib/phab-refresh-stack.sh --comment "$STABLE_COMMENT"; |
|
60 | ./contrib/phab-refresh-stack.sh --comment "$STABLE_COMMENT"; | |
61 | else |
|
61 | else | |
62 | ./contrib/phab-refresh-stack.sh --comment "$DEFAULT_COMMENT"; |
|
62 | ./contrib/phab-refresh-stack.sh --comment "$DEFAULT_COMMENT"; | |
63 | fi |
|
63 | fi | |
64 |
|
64 | |||
65 | test-py2: |
|
65 | test-py2: | |
66 | <<: *runtests |
|
66 | <<: *runtests | |
67 | variables: |
|
67 | variables: | |
68 | RUNTEST_ARGS: " --no-rust --blacklist /tmp/check-tests.txt" |
|
68 | RUNTEST_ARGS: " --no-rust --blacklist /tmp/check-tests.txt" | |
69 | TEST_HGMODULEPOLICY: "c" |
|
69 | TEST_HGMODULEPOLICY: "c" | |
70 | TEST_HGTESTS_ALLOW_NETIO: "1" |
|
70 | TEST_HGTESTS_ALLOW_NETIO: "1" | |
71 |
|
71 | |||
72 | test-py3: |
|
72 | test-py3: | |
73 | <<: *runtests |
|
73 | <<: *runtests | |
74 | variables: |
|
74 | variables: | |
75 | RUNTEST_ARGS: " --no-rust --blacklist /tmp/check-tests.txt" |
|
75 | RUNTEST_ARGS: " --no-rust --blacklist /tmp/check-tests.txt" | |
76 | PYTHON: python3 |
|
76 | PYTHON: python3 | |
77 | TEST_HGMODULEPOLICY: "c" |
|
77 | TEST_HGMODULEPOLICY: "c" | |
78 | TEST_HGTESTS_ALLOW_NETIO: "1" |
|
78 | TEST_HGTESTS_ALLOW_NETIO: "1" | |
79 |
|
79 | |||
80 | test-py2-pure: |
|
80 | test-py2-pure: | |
81 | <<: *runtests |
|
81 | <<: *runtests | |
82 | variables: |
|
82 | variables: | |
83 | RUNTEST_ARGS: "--pure --blacklist /tmp/check-tests.txt" |
|
83 | RUNTEST_ARGS: "--pure --blacklist /tmp/check-tests.txt" | |
84 | TEST_HGMODULEPOLICY: "py" |
|
84 | TEST_HGMODULEPOLICY: "py" | |
85 |
|
85 | |||
86 | test-py3-pure: |
|
86 | test-py3-pure: | |
87 | <<: *runtests |
|
87 | <<: *runtests | |
88 | variables: |
|
88 | variables: | |
89 | RUNTEST_ARGS: "--pure --blacklist /tmp/check-tests.txt" |
|
89 | RUNTEST_ARGS: "--pure --blacklist /tmp/check-tests.txt" | |
90 | PYTHON: python3 |
|
90 | PYTHON: python3 | |
91 | TEST_HGMODULEPOLICY: "py" |
|
91 | TEST_HGMODULEPOLICY: "py" | |
92 |
|
92 | |||
93 | test-py2-rust: |
|
93 | test-py2-rust: | |
94 | <<: *runtests |
|
94 | <<: *runtests | |
95 | variables: |
|
95 | variables: | |
96 | HGWITHRUSTEXT: cpython |
|
96 | HGWITHRUSTEXT: cpython | |
97 | RUNTEST_ARGS: "--rust --blacklist /tmp/check-tests.txt" |
|
97 | RUNTEST_ARGS: "--rust --blacklist /tmp/check-tests.txt" | |
98 | TEST_HGMODULEPOLICY: "rust+c" |
|
98 | TEST_HGMODULEPOLICY: "rust+c" | |
99 |
|
99 | |||
100 | test-py3-rust: |
|
100 | test-py3-rust: | |
101 | <<: *runtests |
|
101 | <<: *runtests | |
102 | variables: |
|
102 | variables: | |
103 | HGWITHRUSTEXT: cpython |
|
103 | HGWITHRUSTEXT: cpython | |
104 | RUNTEST_ARGS: "--rust --blacklist /tmp/check-tests.txt" |
|
104 | RUNTEST_ARGS: "--rust --blacklist /tmp/check-tests.txt" | |
105 | PYTHON: python3 |
|
105 | PYTHON: python3 | |
106 | TEST_HGMODULEPOLICY: "rust+c" |
|
106 | TEST_HGMODULEPOLICY: "rust+c" | |
107 |
|
107 | |||
108 | test-py3-rhg: |
|
108 | test-py3-rhg: | |
109 | <<: *runtests |
|
109 | <<: *runtests | |
110 | variables: |
|
110 | variables: | |
111 | HGWITHRUSTEXT: cpython |
|
111 | HGWITHRUSTEXT: cpython | |
112 | RUNTEST_ARGS: "--rust --rhg --blacklist /tmp/check-tests.txt" |
|
112 | RUNTEST_ARGS: "--rust --rhg --blacklist /tmp/check-tests.txt" | |
113 | PYTHON: python3 |
|
113 | PYTHON: python3 | |
114 | TEST_HGMODULEPOLICY: "rust+c" |
|
114 | TEST_HGMODULEPOLICY: "rust+c" | |
115 |
|
115 | |||
116 | test-py2-chg: |
|
116 | test-py2-chg: | |
117 | <<: *runtests |
|
117 | <<: *runtests | |
118 | variables: |
|
118 | variables: | |
119 | RUNTEST_ARGS: "--blacklist /tmp/check-tests.txt --chg" |
|
119 | RUNTEST_ARGS: "--blacklist /tmp/check-tests.txt --chg" | |
120 | TEST_HGMODULEPOLICY: "c" |
|
120 | TEST_HGMODULEPOLICY: "c" | |
121 |
|
121 | |||
122 | test-py3-chg: |
|
122 | test-py3-chg: | |
123 | <<: *runtests |
|
123 | <<: *runtests | |
124 | variables: |
|
124 | variables: | |
125 | PYTHON: python3 |
|
125 | PYTHON: python3 | |
126 | RUNTEST_ARGS: "--blacklist /tmp/check-tests.txt --chg" |
|
126 | RUNTEST_ARGS: "--blacklist /tmp/check-tests.txt --chg" | |
127 | TEST_HGMODULEPOLICY: "c" |
|
127 | TEST_HGMODULEPOLICY: "c" | |
|
128 | ||||
|
129 | check-pytype-py3: | |||
|
130 | extends: .runtests_template | |||
|
131 | when: manual | |||
|
132 | before_script: | |||
|
133 | - hg clone . /tmp/mercurial-ci/ --noupdate --config phases.publish=no | |||
|
134 | - hg -R /tmp/mercurial-ci/ update `hg log --rev '.' --template '{node}'` | |||
|
135 | - cd /tmp/mercurial-ci/ | |||
|
136 | - make local PYTHON=$PYTHON | |||
|
137 | - $PYTHON -m pip install --user -U pytype==2021.04.15 | |||
|
138 | variables: | |||
|
139 | RUNTEST_ARGS: " --allow-slow-tests tests/test-check-pytype.t" | |||
|
140 | PYTHON: python3 | |||
|
141 | TEST_HGMODULEPOLICY: "c" |
@@ -1,405 +1,403 b'' | |||||
1 | # Perforce source for convert extension. |
|
1 | # Perforce source for convert extension. | |
2 | # |
|
2 | # | |
3 | # Copyright 2009, Frank Kingswood <frank@kingswood-consulting.co.uk> |
|
3 | # Copyright 2009, Frank Kingswood <frank@kingswood-consulting.co.uk> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 | from __future__ import absolute_import |
|
7 | from __future__ import absolute_import | |
8 |
|
8 | |||
9 | import marshal |
|
9 | import marshal | |
10 | import re |
|
10 | import re | |
11 |
|
11 | |||
12 | from mercurial.i18n import _ |
|
12 | from mercurial.i18n import _ | |
13 | from mercurial import ( |
|
13 | from mercurial import ( | |
14 | error, |
|
14 | error, | |
15 | util, |
|
15 | util, | |
16 | ) |
|
16 | ) | |
17 | from mercurial.utils import ( |
|
17 | from mercurial.utils import ( | |
18 | dateutil, |
|
18 | dateutil, | |
19 | procutil, |
|
19 | procutil, | |
20 | stringutil, |
|
20 | stringutil, | |
21 | ) |
|
21 | ) | |
22 |
|
22 | |||
23 | from . import common |
|
23 | from . import common | |
24 |
|
24 | |||
25 |
|
25 | |||
26 | def loaditer(f): |
|
26 | def loaditer(f): | |
27 | """Yield the dictionary objects generated by p4""" |
|
27 | """Yield the dictionary objects generated by p4""" | |
28 | try: |
|
28 | try: | |
29 | while True: |
|
29 | while True: | |
30 | d = marshal.load(f) |
|
30 | d = marshal.load(f) | |
31 | if not d: |
|
31 | if not d: | |
32 | break |
|
32 | break | |
33 | yield d |
|
33 | yield d | |
34 | except EOFError: |
|
34 | except EOFError: | |
35 | pass |
|
35 | pass | |
36 |
|
36 | |||
37 |
|
37 | |||
38 | def decodefilename(filename): |
|
38 | def decodefilename(filename): | |
39 | """Perforce escapes special characters @, #, *, or % |
|
39 | """Perforce escapes special characters @, #, *, or % | |
40 | with %40, %23, %2A, or %25 respectively |
|
40 | with %40, %23, %2A, or %25 respectively | |
41 |
|
41 | |||
42 | >>> decodefilename(b'portable-net45%252Bnetcore45%252Bwp8%252BMonoAndroid') |
|
42 | >>> decodefilename(b'portable-net45%252Bnetcore45%252Bwp8%252BMonoAndroid') | |
43 | 'portable-net45%2Bnetcore45%2Bwp8%2BMonoAndroid' |
|
43 | 'portable-net45%2Bnetcore45%2Bwp8%2BMonoAndroid' | |
44 | >>> decodefilename(b'//Depot/Directory/%2525/%2523/%23%40.%2A') |
|
44 | >>> decodefilename(b'//Depot/Directory/%2525/%2523/%23%40.%2A') | |
45 | '//Depot/Directory/%25/%23/#@.*' |
|
45 | '//Depot/Directory/%25/%23/#@.*' | |
46 | """ |
|
46 | """ | |
47 | replacements = [ |
|
47 | replacements = [ | |
48 | (b'%2A', b'*'), |
|
48 | (b'%2A', b'*'), | |
49 | (b'%23', b'#'), |
|
49 | (b'%23', b'#'), | |
50 | (b'%40', b'@'), |
|
50 | (b'%40', b'@'), | |
51 | (b'%25', b'%'), |
|
51 | (b'%25', b'%'), | |
52 | ] |
|
52 | ] | |
53 | for k, v in replacements: |
|
53 | for k, v in replacements: | |
54 | filename = filename.replace(k, v) |
|
54 | filename = filename.replace(k, v) | |
55 | return filename |
|
55 | return filename | |
56 |
|
56 | |||
57 |
|
57 | |||
58 | class p4_source(common.converter_source): |
|
58 | class p4_source(common.converter_source): | |
59 | def __init__(self, ui, repotype, path, revs=None): |
|
59 | def __init__(self, ui, repotype, path, revs=None): | |
60 | # avoid import cycle |
|
60 | # avoid import cycle | |
61 | from . import convcmd |
|
61 | from . import convcmd | |
62 |
|
62 | |||
63 | super(p4_source, self).__init__(ui, repotype, path, revs=revs) |
|
63 | super(p4_source, self).__init__(ui, repotype, path, revs=revs) | |
64 |
|
64 | |||
65 | if b"/" in path and not path.startswith(b'//'): |
|
65 | if b"/" in path and not path.startswith(b'//'): | |
66 | raise common.NoRepo( |
|
66 | raise common.NoRepo( | |
67 | _(b'%s does not look like a P4 repository') % path |
|
67 | _(b'%s does not look like a P4 repository') % path | |
68 | ) |
|
68 | ) | |
69 |
|
69 | |||
70 | common.checktool(b'p4', abort=False) |
|
70 | common.checktool(b'p4', abort=False) | |
71 |
|
71 | |||
72 | self.revmap = {} |
|
72 | self.revmap = {} | |
73 | self.encoding = self.ui.config( |
|
73 | self.encoding = self.ui.config( | |
74 | b'convert', b'p4.encoding', convcmd.orig_encoding |
|
74 | b'convert', b'p4.encoding', convcmd.orig_encoding | |
75 | ) |
|
75 | ) | |
76 | self.re_type = re.compile( |
|
76 | self.re_type = re.compile( | |
77 | br"([a-z]+)?(text|binary|symlink|apple|resource|unicode|utf\d+)" |
|
77 | br"([a-z]+)?(text|binary|symlink|apple|resource|unicode|utf\d+)" | |
78 | br"(\+\w+)?$" |
|
78 | br"(\+\w+)?$" | |
79 | ) |
|
79 | ) | |
80 | self.re_keywords = re.compile( |
|
80 | self.re_keywords = re.compile( | |
81 | br"\$(Id|Header|Date|DateTime|Change|File|Revision|Author)" |
|
81 | br"\$(Id|Header|Date|DateTime|Change|File|Revision|Author)" | |
82 | br":[^$\n]*\$" |
|
82 | br":[^$\n]*\$" | |
83 | ) |
|
83 | ) | |
84 | self.re_keywords_old = re.compile(br"\$(Id|Header):[^$\n]*\$") |
|
84 | self.re_keywords_old = re.compile(br"\$(Id|Header):[^$\n]*\$") | |
85 |
|
85 | |||
86 | if revs and len(revs) > 1: |
|
86 | if revs and len(revs) > 1: | |
87 | raise error.Abort( |
|
87 | raise error.Abort( | |
88 | _( |
|
88 | _( | |
89 | b"p4 source does not support specifying " |
|
89 | b"p4 source does not support specifying " | |
90 | b"multiple revisions" |
|
90 | b"multiple revisions" | |
91 | ) |
|
91 | ) | |
92 | ) |
|
92 | ) | |
93 |
|
93 | |||
94 | def setrevmap(self, revmap): |
|
94 | def setrevmap(self, revmap): | |
95 | """Sets the parsed revmap dictionary. |
|
95 | """Sets the parsed revmap dictionary. | |
96 |
|
96 | |||
97 | Revmap stores mappings from a source revision to a target revision. |
|
97 | Revmap stores mappings from a source revision to a target revision. | |
98 | It is set in convertcmd.convert and provided by the user as a file |
|
98 | It is set in convertcmd.convert and provided by the user as a file | |
99 | on the commandline. |
|
99 | on the commandline. | |
100 |
|
100 | |||
101 | Revisions in the map are considered beeing present in the |
|
101 | Revisions in the map are considered beeing present in the | |
102 | repository and ignored during _parse(). This allows for incremental |
|
102 | repository and ignored during _parse(). This allows for incremental | |
103 | imports if a revmap is provided. |
|
103 | imports if a revmap is provided. | |
104 | """ |
|
104 | """ | |
105 | self.revmap = revmap |
|
105 | self.revmap = revmap | |
106 |
|
106 | |||
107 | def _parse_view(self, path): |
|
107 | def _parse_view(self, path): | |
108 | """Read changes affecting the path""" |
|
108 | """Read changes affecting the path""" | |
109 | cmd = b'p4 -G changes -s submitted %s' % procutil.shellquote(path) |
|
109 | cmd = b'p4 -G changes -s submitted %s' % procutil.shellquote(path) | |
110 | stdout = procutil.popen(cmd, mode=b'rb') |
|
110 | stdout = procutil.popen(cmd, mode=b'rb') | |
111 | p4changes = {} |
|
111 | p4changes = {} | |
112 | for d in loaditer(stdout): |
|
112 | for d in loaditer(stdout): | |
113 | c = d.get(b"change", None) |
|
113 | c = d.get(b"change", None) | |
114 | if c: |
|
114 | if c: | |
115 | p4changes[c] = True |
|
115 | p4changes[c] = True | |
116 | return p4changes |
|
116 | return p4changes | |
117 |
|
117 | |||
118 | def _parse(self, ui, path): |
|
118 | def _parse(self, ui, path): | |
119 | """Prepare list of P4 filenames and revisions to import""" |
|
119 | """Prepare list of P4 filenames and revisions to import""" | |
120 | p4changes = {} |
|
120 | p4changes = {} | |
121 | changeset = {} |
|
121 | changeset = {} | |
122 | files_map = {} |
|
122 | files_map = {} | |
123 | copies_map = {} |
|
123 | copies_map = {} | |
124 | localname = {} |
|
124 | localname = {} | |
125 | depotname = {} |
|
125 | depotname = {} | |
126 | heads = [] |
|
126 | heads = [] | |
127 |
|
127 | |||
128 | ui.status(_(b'reading p4 views\n')) |
|
128 | ui.status(_(b'reading p4 views\n')) | |
129 |
|
129 | |||
130 | # read client spec or view |
|
130 | # read client spec or view | |
131 | if b"/" in path: |
|
131 | if b"/" in path: | |
132 | p4changes.update(self._parse_view(path)) |
|
132 | p4changes.update(self._parse_view(path)) | |
133 | if path.startswith(b"//") and path.endswith(b"/..."): |
|
133 | if path.startswith(b"//") and path.endswith(b"/..."): | |
134 | views = {path[:-3]: b""} |
|
134 | views = {path[:-3]: b""} | |
135 | else: |
|
135 | else: | |
136 | views = {b"//": b""} |
|
136 | views = {b"//": b""} | |
137 | else: |
|
137 | else: | |
138 | cmd = b'p4 -G client -o %s' % procutil.shellquote(path) |
|
138 | cmd = b'p4 -G client -o %s' % procutil.shellquote(path) | |
139 | clientspec = marshal.load(procutil.popen(cmd, mode=b'rb')) |
|
139 | clientspec = marshal.load(procutil.popen(cmd, mode=b'rb')) | |
140 |
|
140 | |||
141 | views = {} |
|
141 | views = {} | |
142 | for client in clientspec: |
|
142 | for client in clientspec: | |
143 | if client.startswith(b"View"): |
|
143 | if client.startswith(b"View"): | |
144 | sview, cview = clientspec[client].split() |
|
144 | sview, cview = clientspec[client].split() | |
145 | p4changes.update(self._parse_view(sview)) |
|
145 | p4changes.update(self._parse_view(sview)) | |
146 | if sview.endswith(b"...") and cview.endswith(b"..."): |
|
146 | if sview.endswith(b"...") and cview.endswith(b"..."): | |
147 | sview = sview[:-3] |
|
147 | sview = sview[:-3] | |
148 | cview = cview[:-3] |
|
148 | cview = cview[:-3] | |
149 | cview = cview[2:] |
|
149 | cview = cview[2:] | |
150 | cview = cview[cview.find(b"/") + 1 :] |
|
150 | cview = cview[cview.find(b"/") + 1 :] | |
151 | views[sview] = cview |
|
151 | views[sview] = cview | |
152 |
|
152 | |||
153 | # list of changes that affect our source files |
|
153 | # list of changes that affect our source files | |
154 | p4changes = p4changes.keys() |
|
154 | p4changes = sorted(p4changes.keys(), key=int) | |
155 | p4changes.sort(key=int) |
|
|||
156 |
|
155 | |||
157 | # list with depot pathnames, longest first |
|
156 | # list with depot pathnames, longest first | |
158 | vieworder = views.keys() |
|
157 | vieworder = sorted(views.keys(), key=len, reverse=True) | |
159 | vieworder.sort(key=len, reverse=True) |
|
|||
160 |
|
158 | |||
161 | # handle revision limiting |
|
159 | # handle revision limiting | |
162 | startrev = self.ui.config(b'convert', b'p4.startrev') |
|
160 | startrev = self.ui.config(b'convert', b'p4.startrev') | |
163 |
|
161 | |||
164 | # now read the full changelists to get the list of file revisions |
|
162 | # now read the full changelists to get the list of file revisions | |
165 | ui.status(_(b'collecting p4 changelists\n')) |
|
163 | ui.status(_(b'collecting p4 changelists\n')) | |
166 | lastid = None |
|
164 | lastid = None | |
167 | for change in p4changes: |
|
165 | for change in p4changes: | |
168 | if startrev and int(change) < int(startrev): |
|
166 | if startrev and int(change) < int(startrev): | |
169 | continue |
|
167 | continue | |
170 | if self.revs and int(change) > int(self.revs[0]): |
|
168 | if self.revs and int(change) > int(self.revs[0]): | |
171 | continue |
|
169 | continue | |
172 | if change in self.revmap: |
|
170 | if change in self.revmap: | |
173 | # Ignore already present revisions, but set the parent pointer. |
|
171 | # Ignore already present revisions, but set the parent pointer. | |
174 | lastid = change |
|
172 | lastid = change | |
175 | continue |
|
173 | continue | |
176 |
|
174 | |||
177 | if lastid: |
|
175 | if lastid: | |
178 | parents = [lastid] |
|
176 | parents = [lastid] | |
179 | else: |
|
177 | else: | |
180 | parents = [] |
|
178 | parents = [] | |
181 |
|
179 | |||
182 | d = self._fetch_revision(change) |
|
180 | d = self._fetch_revision(change) | |
183 | c = self._construct_commit(d, parents) |
|
181 | c = self._construct_commit(d, parents) | |
184 |
|
182 | |||
185 | descarr = c.desc.splitlines(True) |
|
183 | descarr = c.desc.splitlines(True) | |
186 | if len(descarr) > 0: |
|
184 | if len(descarr) > 0: | |
187 | shortdesc = descarr[0].rstrip(b'\r\n') |
|
185 | shortdesc = descarr[0].rstrip(b'\r\n') | |
188 | else: |
|
186 | else: | |
189 | shortdesc = b'**empty changelist description**' |
|
187 | shortdesc = b'**empty changelist description**' | |
190 |
|
188 | |||
191 |
t = b'%s %s' % (c.rev, |
|
189 | t = b'%s %s' % (c.rev, shortdesc) | |
192 | ui.status(stringutil.ellipsis(t, 80) + b'\n') |
|
190 | ui.status(stringutil.ellipsis(t, 80) + b'\n') | |
193 |
|
191 | |||
194 | files = [] |
|
192 | files = [] | |
195 | copies = {} |
|
193 | copies = {} | |
196 | copiedfiles = [] |
|
194 | copiedfiles = [] | |
197 | i = 0 |
|
195 | i = 0 | |
198 | while (b"depotFile%d" % i) in d and (b"rev%d" % i) in d: |
|
196 | while (b"depotFile%d" % i) in d and (b"rev%d" % i) in d: | |
199 | oldname = d[b"depotFile%d" % i] |
|
197 | oldname = d[b"depotFile%d" % i] | |
200 | filename = None |
|
198 | filename = None | |
201 | for v in vieworder: |
|
199 | for v in vieworder: | |
202 | if oldname.lower().startswith(v.lower()): |
|
200 | if oldname.lower().startswith(v.lower()): | |
203 | filename = decodefilename(views[v] + oldname[len(v) :]) |
|
201 | filename = decodefilename(views[v] + oldname[len(v) :]) | |
204 | break |
|
202 | break | |
205 | if filename: |
|
203 | if filename: | |
206 | files.append((filename, d[b"rev%d" % i])) |
|
204 | files.append((filename, d[b"rev%d" % i])) | |
207 | depotname[filename] = oldname |
|
205 | depotname[filename] = oldname | |
208 | if d.get(b"action%d" % i) == b"move/add": |
|
206 | if d.get(b"action%d" % i) == b"move/add": | |
209 | copiedfiles.append(filename) |
|
207 | copiedfiles.append(filename) | |
210 | localname[oldname] = filename |
|
208 | localname[oldname] = filename | |
211 | i += 1 |
|
209 | i += 1 | |
212 |
|
210 | |||
213 | # Collect information about copied files |
|
211 | # Collect information about copied files | |
214 | for filename in copiedfiles: |
|
212 | for filename in copiedfiles: | |
215 | oldname = depotname[filename] |
|
213 | oldname = depotname[filename] | |
216 |
|
214 | |||
217 | flcmd = b'p4 -G filelog %s' % procutil.shellquote(oldname) |
|
215 | flcmd = b'p4 -G filelog %s' % procutil.shellquote(oldname) | |
218 | flstdout = procutil.popen(flcmd, mode=b'rb') |
|
216 | flstdout = procutil.popen(flcmd, mode=b'rb') | |
219 |
|
217 | |||
220 | copiedfilename = None |
|
218 | copiedfilename = None | |
221 | for d in loaditer(flstdout): |
|
219 | for d in loaditer(flstdout): | |
222 | copiedoldname = None |
|
220 | copiedoldname = None | |
223 |
|
221 | |||
224 | i = 0 |
|
222 | i = 0 | |
225 | while (b"change%d" % i) in d: |
|
223 | while (b"change%d" % i) in d: | |
226 | if ( |
|
224 | if ( | |
227 | d[b"change%d" % i] == change |
|
225 | d[b"change%d" % i] == change | |
228 | and d[b"action%d" % i] == b"move/add" |
|
226 | and d[b"action%d" % i] == b"move/add" | |
229 | ): |
|
227 | ): | |
230 | j = 0 |
|
228 | j = 0 | |
231 | while (b"file%d,%d" % (i, j)) in d: |
|
229 | while (b"file%d,%d" % (i, j)) in d: | |
232 | if d[b"how%d,%d" % (i, j)] == b"moved from": |
|
230 | if d[b"how%d,%d" % (i, j)] == b"moved from": | |
233 | copiedoldname = d[b"file%d,%d" % (i, j)] |
|
231 | copiedoldname = d[b"file%d,%d" % (i, j)] | |
234 | break |
|
232 | break | |
235 | j += 1 |
|
233 | j += 1 | |
236 | i += 1 |
|
234 | i += 1 | |
237 |
|
235 | |||
238 | if copiedoldname and copiedoldname in localname: |
|
236 | if copiedoldname and copiedoldname in localname: | |
239 | copiedfilename = localname[copiedoldname] |
|
237 | copiedfilename = localname[copiedoldname] | |
240 | break |
|
238 | break | |
241 |
|
239 | |||
242 | if copiedfilename: |
|
240 | if copiedfilename: | |
243 | copies[filename] = copiedfilename |
|
241 | copies[filename] = copiedfilename | |
244 | else: |
|
242 | else: | |
245 | ui.warn( |
|
243 | ui.warn( | |
246 | _(b"cannot find source for copied file: %s@%s\n") |
|
244 | _(b"cannot find source for copied file: %s@%s\n") | |
247 | % (filename, change) |
|
245 | % (filename, change) | |
248 | ) |
|
246 | ) | |
249 |
|
247 | |||
250 | changeset[change] = c |
|
248 | changeset[change] = c | |
251 | files_map[change] = files |
|
249 | files_map[change] = files | |
252 | copies_map[change] = copies |
|
250 | copies_map[change] = copies | |
253 | lastid = change |
|
251 | lastid = change | |
254 |
|
252 | |||
255 | if lastid and len(changeset) > 0: |
|
253 | if lastid and len(changeset) > 0: | |
256 | heads = [lastid] |
|
254 | heads = [lastid] | |
257 |
|
255 | |||
258 | return { |
|
256 | return { | |
259 | b'changeset': changeset, |
|
257 | b'changeset': changeset, | |
260 | b'files': files_map, |
|
258 | b'files': files_map, | |
261 | b'copies': copies_map, |
|
259 | b'copies': copies_map, | |
262 | b'heads': heads, |
|
260 | b'heads': heads, | |
263 | b'depotname': depotname, |
|
261 | b'depotname': depotname, | |
264 | } |
|
262 | } | |
265 |
|
263 | |||
266 | @util.propertycache |
|
264 | @util.propertycache | |
267 | def _parse_once(self): |
|
265 | def _parse_once(self): | |
268 | return self._parse(self.ui, self.path) |
|
266 | return self._parse(self.ui, self.path) | |
269 |
|
267 | |||
270 | @util.propertycache |
|
268 | @util.propertycache | |
271 | def copies(self): |
|
269 | def copies(self): | |
272 | return self._parse_once[b'copies'] |
|
270 | return self._parse_once[b'copies'] | |
273 |
|
271 | |||
274 | @util.propertycache |
|
272 | @util.propertycache | |
275 | def files(self): |
|
273 | def files(self): | |
276 | return self._parse_once[b'files'] |
|
274 | return self._parse_once[b'files'] | |
277 |
|
275 | |||
278 | @util.propertycache |
|
276 | @util.propertycache | |
279 | def changeset(self): |
|
277 | def changeset(self): | |
280 | return self._parse_once[b'changeset'] |
|
278 | return self._parse_once[b'changeset'] | |
281 |
|
279 | |||
282 | @util.propertycache |
|
280 | @util.propertycache | |
283 | def heads(self): |
|
281 | def heads(self): | |
284 | return self._parse_once[b'heads'] |
|
282 | return self._parse_once[b'heads'] | |
285 |
|
283 | |||
286 | @util.propertycache |
|
284 | @util.propertycache | |
287 | def depotname(self): |
|
285 | def depotname(self): | |
288 | return self._parse_once[b'depotname'] |
|
286 | return self._parse_once[b'depotname'] | |
289 |
|
287 | |||
290 | def getheads(self): |
|
288 | def getheads(self): | |
291 | return self.heads |
|
289 | return self.heads | |
292 |
|
290 | |||
293 | def getfile(self, name, rev): |
|
291 | def getfile(self, name, rev): | |
294 | cmd = b'p4 -G print %s' % procutil.shellquote( |
|
292 | cmd = b'p4 -G print %s' % procutil.shellquote( | |
295 | b"%s#%s" % (self.depotname[name], rev) |
|
293 | b"%s#%s" % (self.depotname[name], rev) | |
296 | ) |
|
294 | ) | |
297 |
|
295 | |||
298 | lasterror = None |
|
296 | lasterror = None | |
299 | while True: |
|
297 | while True: | |
300 | stdout = procutil.popen(cmd, mode=b'rb') |
|
298 | stdout = procutil.popen(cmd, mode=b'rb') | |
301 |
|
299 | |||
302 | mode = None |
|
300 | mode = None | |
303 | contents = [] |
|
301 | contents = [] | |
304 | keywords = None |
|
302 | keywords = None | |
305 |
|
303 | |||
306 | for d in loaditer(stdout): |
|
304 | for d in loaditer(stdout): | |
307 | code = d[b"code"] |
|
305 | code = d[b"code"] | |
308 | data = d.get(b"data") |
|
306 | data = d.get(b"data") | |
309 |
|
307 | |||
310 | if code == b"error": |
|
308 | if code == b"error": | |
311 | # if this is the first time error happened |
|
309 | # if this is the first time error happened | |
312 | # re-attempt getting the file |
|
310 | # re-attempt getting the file | |
313 | if not lasterror: |
|
311 | if not lasterror: | |
314 | lasterror = IOError(d[b"generic"], data) |
|
312 | lasterror = IOError(d[b"generic"], data) | |
315 | # this will exit inner-most for-loop |
|
313 | # this will exit inner-most for-loop | |
316 | break |
|
314 | break | |
317 | else: |
|
315 | else: | |
318 | raise lasterror |
|
316 | raise lasterror | |
319 |
|
317 | |||
320 | elif code == b"stat": |
|
318 | elif code == b"stat": | |
321 | action = d.get(b"action") |
|
319 | action = d.get(b"action") | |
322 | if action in [b"purge", b"delete", b"move/delete"]: |
|
320 | if action in [b"purge", b"delete", b"move/delete"]: | |
323 | return None, None |
|
321 | return None, None | |
324 | p4type = self.re_type.match(d[b"type"]) |
|
322 | p4type = self.re_type.match(d[b"type"]) | |
325 | if p4type: |
|
323 | if p4type: | |
326 | mode = b"" |
|
324 | mode = b"" | |
327 | flags = (p4type.group(1) or b"") + ( |
|
325 | flags = (p4type.group(1) or b"") + ( | |
328 | p4type.group(3) or b"" |
|
326 | p4type.group(3) or b"" | |
329 | ) |
|
327 | ) | |
330 | if b"x" in flags: |
|
328 | if b"x" in flags: | |
331 | mode = b"x" |
|
329 | mode = b"x" | |
332 | if p4type.group(2) == b"symlink": |
|
330 | if p4type.group(2) == b"symlink": | |
333 | mode = b"l" |
|
331 | mode = b"l" | |
334 | if b"ko" in flags: |
|
332 | if b"ko" in flags: | |
335 | keywords = self.re_keywords_old |
|
333 | keywords = self.re_keywords_old | |
336 | elif b"k" in flags: |
|
334 | elif b"k" in flags: | |
337 | keywords = self.re_keywords |
|
335 | keywords = self.re_keywords | |
338 |
|
336 | |||
339 | elif code == b"text" or code == b"binary": |
|
337 | elif code == b"text" or code == b"binary": | |
340 | contents.append(data) |
|
338 | contents.append(data) | |
341 |
|
339 | |||
342 | lasterror = None |
|
340 | lasterror = None | |
343 |
|
341 | |||
344 | if not lasterror: |
|
342 | if not lasterror: | |
345 | break |
|
343 | break | |
346 |
|
344 | |||
347 | if mode is None: |
|
345 | if mode is None: | |
348 | return None, None |
|
346 | return None, None | |
349 |
|
347 | |||
350 | contents = b''.join(contents) |
|
348 | contents = b''.join(contents) | |
351 |
|
349 | |||
352 | if keywords: |
|
350 | if keywords: | |
353 | contents = keywords.sub(b"$\\1$", contents) |
|
351 | contents = keywords.sub(b"$\\1$", contents) | |
354 | if mode == b"l" and contents.endswith(b"\n"): |
|
352 | if mode == b"l" and contents.endswith(b"\n"): | |
355 | contents = contents[:-1] |
|
353 | contents = contents[:-1] | |
356 |
|
354 | |||
357 | return contents, mode |
|
355 | return contents, mode | |
358 |
|
356 | |||
359 | def getchanges(self, rev, full): |
|
357 | def getchanges(self, rev, full): | |
360 | if full: |
|
358 | if full: | |
361 | raise error.Abort(_(b"convert from p4 does not support --full")) |
|
359 | raise error.Abort(_(b"convert from p4 does not support --full")) | |
362 | return self.files[rev], self.copies[rev], set() |
|
360 | return self.files[rev], self.copies[rev], set() | |
363 |
|
361 | |||
364 | def _construct_commit(self, obj, parents=None): |
|
362 | def _construct_commit(self, obj, parents=None): | |
365 | """ |
|
363 | """ | |
366 | Constructs a common.commit object from an unmarshalled |
|
364 | Constructs a common.commit object from an unmarshalled | |
367 | `p4 describe` output |
|
365 | `p4 describe` output | |
368 | """ |
|
366 | """ | |
369 | desc = self.recode(obj.get(b"desc", b"")) |
|
367 | desc = self.recode(obj.get(b"desc", b"")) | |
370 | date = (int(obj[b"time"]), 0) # timezone not set |
|
368 | date = (int(obj[b"time"]), 0) # timezone not set | |
371 | if parents is None: |
|
369 | if parents is None: | |
372 | parents = [] |
|
370 | parents = [] | |
373 |
|
371 | |||
374 | return common.commit( |
|
372 | return common.commit( | |
375 | author=self.recode(obj[b"user"]), |
|
373 | author=self.recode(obj[b"user"]), | |
376 | date=dateutil.datestr(date, b'%Y-%m-%d %H:%M:%S %1%2'), |
|
374 | date=dateutil.datestr(date, b'%Y-%m-%d %H:%M:%S %1%2'), | |
377 | parents=parents, |
|
375 | parents=parents, | |
378 | desc=desc, |
|
376 | desc=desc, | |
379 | branch=None, |
|
377 | branch=None, | |
380 | rev=obj[b'change'], |
|
378 | rev=obj[b'change'], | |
381 | extra={b"p4": obj[b'change'], b"convert_revision": obj[b'change']}, |
|
379 | extra={b"p4": obj[b'change'], b"convert_revision": obj[b'change']}, | |
382 | ) |
|
380 | ) | |
383 |
|
381 | |||
384 | def _fetch_revision(self, rev): |
|
382 | def _fetch_revision(self, rev): | |
385 | """Return an output of `p4 describe` including author, commit date as |
|
383 | """Return an output of `p4 describe` including author, commit date as | |
386 | a dictionary.""" |
|
384 | a dictionary.""" | |
387 | cmd = b"p4 -G describe -s %s" % rev |
|
385 | cmd = b"p4 -G describe -s %s" % rev | |
388 | stdout = procutil.popen(cmd, mode=b'rb') |
|
386 | stdout = procutil.popen(cmd, mode=b'rb') | |
389 | return marshal.load(stdout) |
|
387 | return marshal.load(stdout) | |
390 |
|
388 | |||
391 | def getcommit(self, rev): |
|
389 | def getcommit(self, rev): | |
392 | if rev in self.changeset: |
|
390 | if rev in self.changeset: | |
393 | return self.changeset[rev] |
|
391 | return self.changeset[rev] | |
394 | elif rev in self.revmap: |
|
392 | elif rev in self.revmap: | |
395 | d = self._fetch_revision(rev) |
|
393 | d = self._fetch_revision(rev) | |
396 | return self._construct_commit(d, parents=None) |
|
394 | return self._construct_commit(d, parents=None) | |
397 | raise error.Abort( |
|
395 | raise error.Abort( | |
398 | _(b"cannot find %s in the revmap or parsed changesets") % rev |
|
396 | _(b"cannot find %s in the revmap or parsed changesets") % rev | |
399 | ) |
|
397 | ) | |
400 |
|
398 | |||
401 | def gettags(self): |
|
399 | def gettags(self): | |
402 | return {} |
|
400 | return {} | |
403 |
|
401 | |||
404 | def getchangedfiles(self, rev, i): |
|
402 | def getchangedfiles(self, rev, i): | |
405 | return sorted([x[0] for x in self.files[rev]]) |
|
403 | return sorted([x[0] for x in self.files[rev]]) |
@@ -1,1599 +1,1599 b'' | |||||
1 | # hg.py - repository classes for mercurial |
|
1 | # hg.py - repository classes for mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com> |
|
3 | # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com> | |
4 | # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> |
|
4 | # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> | |
5 | # |
|
5 | # | |
6 | # This software may be used and distributed according to the terms of the |
|
6 | # This software may be used and distributed according to the terms of the | |
7 | # GNU General Public License version 2 or any later version. |
|
7 | # GNU General Public License version 2 or any later version. | |
8 |
|
8 | |||
9 | from __future__ import absolute_import |
|
9 | from __future__ import absolute_import | |
10 |
|
10 | |||
11 | import errno |
|
11 | import errno | |
12 | import os |
|
12 | import os | |
13 | import shutil |
|
13 | import shutil | |
14 | import stat |
|
14 | import stat | |
15 |
|
15 | |||
16 | from .i18n import _ |
|
16 | from .i18n import _ | |
17 | from .node import ( |
|
17 | from .node import ( | |
18 | hex, |
|
18 | hex, | |
19 | sha1nodeconstants, |
|
19 | sha1nodeconstants, | |
20 | short, |
|
20 | short, | |
21 | ) |
|
21 | ) | |
22 | from .pycompat import getattr |
|
22 | from .pycompat import getattr | |
23 |
|
23 | |||
24 | from . import ( |
|
24 | from . import ( | |
25 | bookmarks, |
|
25 | bookmarks, | |
26 | bundlerepo, |
|
26 | bundlerepo, | |
27 | cacheutil, |
|
27 | cacheutil, | |
28 | cmdutil, |
|
28 | cmdutil, | |
29 | destutil, |
|
29 | destutil, | |
30 | discovery, |
|
30 | discovery, | |
31 | error, |
|
31 | error, | |
32 | exchange, |
|
32 | exchange, | |
33 | extensions, |
|
33 | extensions, | |
34 | graphmod, |
|
34 | graphmod, | |
35 | httppeer, |
|
35 | httppeer, | |
36 | localrepo, |
|
36 | localrepo, | |
37 | lock, |
|
37 | lock, | |
38 | logcmdutil, |
|
38 | logcmdutil, | |
39 | logexchange, |
|
39 | logexchange, | |
40 | merge as mergemod, |
|
40 | merge as mergemod, | |
41 | mergestate as mergestatemod, |
|
41 | mergestate as mergestatemod, | |
42 | narrowspec, |
|
42 | narrowspec, | |
43 | phases, |
|
43 | phases, | |
44 | requirements, |
|
44 | requirements, | |
45 | scmutil, |
|
45 | scmutil, | |
46 | sshpeer, |
|
46 | sshpeer, | |
47 | statichttprepo, |
|
47 | statichttprepo, | |
48 | ui as uimod, |
|
48 | ui as uimod, | |
49 | unionrepo, |
|
49 | unionrepo, | |
50 | url, |
|
50 | url, | |
51 | util, |
|
51 | util, | |
52 | verify as verifymod, |
|
52 | verify as verifymod, | |
53 | vfs as vfsmod, |
|
53 | vfs as vfsmod, | |
54 | ) |
|
54 | ) | |
55 | from .utils import ( |
|
55 | from .utils import ( | |
56 | hashutil, |
|
56 | hashutil, | |
57 | stringutil, |
|
57 | stringutil, | |
58 | urlutil, |
|
58 | urlutil, | |
59 | ) |
|
59 | ) | |
60 |
|
60 | |||
61 |
|
61 | |||
62 | release = lock.release |
|
62 | release = lock.release | |
63 |
|
63 | |||
64 | # shared features |
|
64 | # shared features | |
65 | sharedbookmarks = b'bookmarks' |
|
65 | sharedbookmarks = b'bookmarks' | |
66 |
|
66 | |||
67 |
|
67 | |||
68 | def _local(path): |
|
68 | def _local(path): | |
69 | path = util.expandpath(urlutil.urllocalpath(path)) |
|
69 | path = util.expandpath(urlutil.urllocalpath(path)) | |
70 |
|
70 | |||
71 | try: |
|
71 | try: | |
72 | # we use os.stat() directly here instead of os.path.isfile() |
|
72 | # we use os.stat() directly here instead of os.path.isfile() | |
73 | # because the latter started returning `False` on invalid path |
|
73 | # because the latter started returning `False` on invalid path | |
74 | # exceptions starting in 3.8 and we care about handling |
|
74 | # exceptions starting in 3.8 and we care about handling | |
75 | # invalid paths specially here. |
|
75 | # invalid paths specially here. | |
76 | st = os.stat(path) |
|
76 | st = os.stat(path) | |
77 | isfile = stat.S_ISREG(st.st_mode) |
|
77 | isfile = stat.S_ISREG(st.st_mode) | |
78 | # Python 2 raises TypeError, Python 3 ValueError. |
|
78 | # Python 2 raises TypeError, Python 3 ValueError. | |
79 | except (TypeError, ValueError) as e: |
|
79 | except (TypeError, ValueError) as e: | |
80 | raise error.Abort( |
|
80 | raise error.Abort( | |
81 | _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e)) |
|
81 | _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e)) | |
82 | ) |
|
82 | ) | |
83 | except OSError: |
|
83 | except OSError: | |
84 | isfile = False |
|
84 | isfile = False | |
85 |
|
85 | |||
86 | return isfile and bundlerepo or localrepo |
|
86 | return isfile and bundlerepo or localrepo | |
87 |
|
87 | |||
88 |
|
88 | |||
89 | def addbranchrevs(lrepo, other, branches, revs): |
|
89 | def addbranchrevs(lrepo, other, branches, revs): | |
90 | peer = other.peer() # a courtesy to callers using a localrepo for other |
|
90 | peer = other.peer() # a courtesy to callers using a localrepo for other | |
91 | hashbranch, branches = branches |
|
91 | hashbranch, branches = branches | |
92 | if not hashbranch and not branches: |
|
92 | if not hashbranch and not branches: | |
93 | x = revs or None |
|
93 | x = revs or None | |
94 | if revs: |
|
94 | if revs: | |
95 | y = revs[0] |
|
95 | y = revs[0] | |
96 | else: |
|
96 | else: | |
97 | y = None |
|
97 | y = None | |
98 | return x, y |
|
98 | return x, y | |
99 | if revs: |
|
99 | if revs: | |
100 | revs = list(revs) |
|
100 | revs = list(revs) | |
101 | else: |
|
101 | else: | |
102 | revs = [] |
|
102 | revs = [] | |
103 |
|
103 | |||
104 | if not peer.capable(b'branchmap'): |
|
104 | if not peer.capable(b'branchmap'): | |
105 | if branches: |
|
105 | if branches: | |
106 | raise error.Abort(_(b"remote branch lookup not supported")) |
|
106 | raise error.Abort(_(b"remote branch lookup not supported")) | |
107 | revs.append(hashbranch) |
|
107 | revs.append(hashbranch) | |
108 | return revs, revs[0] |
|
108 | return revs, revs[0] | |
109 |
|
109 | |||
110 | with peer.commandexecutor() as e: |
|
110 | with peer.commandexecutor() as e: | |
111 | branchmap = e.callcommand(b'branchmap', {}).result() |
|
111 | branchmap = e.callcommand(b'branchmap', {}).result() | |
112 |
|
112 | |||
113 | def primary(branch): |
|
113 | def primary(branch): | |
114 | if branch == b'.': |
|
114 | if branch == b'.': | |
115 | if not lrepo: |
|
115 | if not lrepo: | |
116 | raise error.Abort(_(b"dirstate branch not accessible")) |
|
116 | raise error.Abort(_(b"dirstate branch not accessible")) | |
117 | branch = lrepo.dirstate.branch() |
|
117 | branch = lrepo.dirstate.branch() | |
118 | if branch in branchmap: |
|
118 | if branch in branchmap: | |
119 | revs.extend(hex(r) for r in reversed(branchmap[branch])) |
|
119 | revs.extend(hex(r) for r in reversed(branchmap[branch])) | |
120 | return True |
|
120 | return True | |
121 | else: |
|
121 | else: | |
122 | return False |
|
122 | return False | |
123 |
|
123 | |||
124 | for branch in branches: |
|
124 | for branch in branches: | |
125 | if not primary(branch): |
|
125 | if not primary(branch): | |
126 | raise error.RepoLookupError(_(b"unknown branch '%s'") % branch) |
|
126 | raise error.RepoLookupError(_(b"unknown branch '%s'") % branch) | |
127 | if hashbranch: |
|
127 | if hashbranch: | |
128 | if not primary(hashbranch): |
|
128 | if not primary(hashbranch): | |
129 | revs.append(hashbranch) |
|
129 | revs.append(hashbranch) | |
130 | return revs, revs[0] |
|
130 | return revs, revs[0] | |
131 |
|
131 | |||
132 |
|
132 | |||
133 | def parseurl(path, branches=None): |
|
133 | def parseurl(path, branches=None): | |
134 | '''parse url#branch, returning (url, (branch, branches))''' |
|
134 | '''parse url#branch, returning (url, (branch, branches))''' | |
135 | msg = b'parseurl(...) moved to mercurial.utils.urlutil' |
|
135 | msg = b'parseurl(...) moved to mercurial.utils.urlutil' | |
136 | util.nouideprecwarn(msg, b'6.0', stacklevel=2) |
|
136 | util.nouideprecwarn(msg, b'6.0', stacklevel=2) | |
137 | return urlutil.parseurl(path, branches=branches) |
|
137 | return urlutil.parseurl(path, branches=branches) | |
138 |
|
138 | |||
139 |
|
139 | |||
140 | schemes = { |
|
140 | schemes = { | |
141 | b'bundle': bundlerepo, |
|
141 | b'bundle': bundlerepo, | |
142 | b'union': unionrepo, |
|
142 | b'union': unionrepo, | |
143 | b'file': _local, |
|
143 | b'file': _local, | |
144 | b'http': httppeer, |
|
144 | b'http': httppeer, | |
145 | b'https': httppeer, |
|
145 | b'https': httppeer, | |
146 | b'ssh': sshpeer, |
|
146 | b'ssh': sshpeer, | |
147 | b'static-http': statichttprepo, |
|
147 | b'static-http': statichttprepo, | |
148 | } |
|
148 | } | |
149 |
|
149 | |||
150 |
|
150 | |||
151 | def _peerlookup(path): |
|
151 | def _peerlookup(path): | |
152 | u = urlutil.url(path) |
|
152 | u = urlutil.url(path) | |
153 | scheme = u.scheme or b'file' |
|
153 | scheme = u.scheme or b'file' | |
154 | thing = schemes.get(scheme) or schemes[b'file'] |
|
154 | thing = schemes.get(scheme) or schemes[b'file'] | |
155 | try: |
|
155 | try: | |
156 | return thing(path) |
|
156 | return thing(path) | |
157 | except TypeError: |
|
157 | except TypeError: | |
158 | # we can't test callable(thing) because 'thing' can be an unloaded |
|
158 | # we can't test callable(thing) because 'thing' can be an unloaded | |
159 | # module that implements __call__ |
|
159 | # module that implements __call__ | |
160 | if not util.safehasattr(thing, b'instance'): |
|
160 | if not util.safehasattr(thing, b'instance'): | |
161 | raise |
|
161 | raise | |
162 | return thing |
|
162 | return thing | |
163 |
|
163 | |||
164 |
|
164 | |||
165 | def islocal(repo): |
|
165 | def islocal(repo): | |
166 | '''return true if repo (or path pointing to repo) is local''' |
|
166 | '''return true if repo (or path pointing to repo) is local''' | |
167 | if isinstance(repo, bytes): |
|
167 | if isinstance(repo, bytes): | |
168 | try: |
|
168 | try: | |
169 | return _peerlookup(repo).islocal(repo) |
|
169 | return _peerlookup(repo).islocal(repo) | |
170 | except AttributeError: |
|
170 | except AttributeError: | |
171 | return False |
|
171 | return False | |
172 | return repo.local() |
|
172 | return repo.local() | |
173 |
|
173 | |||
174 |
|
174 | |||
175 | def openpath(ui, path, sendaccept=True): |
|
175 | def openpath(ui, path, sendaccept=True): | |
176 | '''open path with open if local, url.open if remote''' |
|
176 | '''open path with open if local, url.open if remote''' | |
177 | pathurl = urlutil.url(path, parsequery=False, parsefragment=False) |
|
177 | pathurl = urlutil.url(path, parsequery=False, parsefragment=False) | |
178 | if pathurl.islocal(): |
|
178 | if pathurl.islocal(): | |
179 | return util.posixfile(pathurl.localpath(), b'rb') |
|
179 | return util.posixfile(pathurl.localpath(), b'rb') | |
180 | else: |
|
180 | else: | |
181 | return url.open(ui, path, sendaccept=sendaccept) |
|
181 | return url.open(ui, path, sendaccept=sendaccept) | |
182 |
|
182 | |||
183 |
|
183 | |||
184 | # a list of (ui, repo) functions called for wire peer initialization |
|
184 | # a list of (ui, repo) functions called for wire peer initialization | |
185 | wirepeersetupfuncs = [] |
|
185 | wirepeersetupfuncs = [] | |
186 |
|
186 | |||
187 |
|
187 | |||
188 | def _peerorrepo( |
|
188 | def _peerorrepo( | |
189 | ui, path, create=False, presetupfuncs=None, intents=None, createopts=None |
|
189 | ui, path, create=False, presetupfuncs=None, intents=None, createopts=None | |
190 | ): |
|
190 | ): | |
191 | """return a repository object for the specified path""" |
|
191 | """return a repository object for the specified path""" | |
192 | obj = _peerlookup(path).instance( |
|
192 | obj = _peerlookup(path).instance( | |
193 | ui, path, create, intents=intents, createopts=createopts |
|
193 | ui, path, create, intents=intents, createopts=createopts | |
194 | ) |
|
194 | ) | |
195 | ui = getattr(obj, "ui", ui) |
|
195 | ui = getattr(obj, "ui", ui) | |
196 | for f in presetupfuncs or []: |
|
196 | for f in presetupfuncs or []: | |
197 | f(ui, obj) |
|
197 | f(ui, obj) | |
198 | ui.log(b'extension', b'- executing reposetup hooks\n') |
|
198 | ui.log(b'extension', b'- executing reposetup hooks\n') | |
199 | with util.timedcm('all reposetup') as allreposetupstats: |
|
199 | with util.timedcm('all reposetup') as allreposetupstats: | |
200 | for name, module in extensions.extensions(ui): |
|
200 | for name, module in extensions.extensions(ui): | |
201 | ui.log(b'extension', b' - running reposetup for %s\n', name) |
|
201 | ui.log(b'extension', b' - running reposetup for %s\n', name) | |
202 | hook = getattr(module, 'reposetup', None) |
|
202 | hook = getattr(module, 'reposetup', None) | |
203 | if hook: |
|
203 | if hook: | |
204 | with util.timedcm('reposetup %r', name) as stats: |
|
204 | with util.timedcm('reposetup %r', name) as stats: | |
205 | hook(ui, obj) |
|
205 | hook(ui, obj) | |
206 | ui.log( |
|
206 | ui.log( | |
207 | b'extension', b' > reposetup for %s took %s\n', name, stats |
|
207 | b'extension', b' > reposetup for %s took %s\n', name, stats | |
208 | ) |
|
208 | ) | |
209 | ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats) |
|
209 | ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats) | |
210 | if not obj.local(): |
|
210 | if not obj.local(): | |
211 | for f in wirepeersetupfuncs: |
|
211 | for f in wirepeersetupfuncs: | |
212 | f(ui, obj) |
|
212 | f(ui, obj) | |
213 | return obj |
|
213 | return obj | |
214 |
|
214 | |||
215 |
|
215 | |||
216 | def repository( |
|
216 | def repository( | |
217 | ui, |
|
217 | ui, | |
218 | path=b'', |
|
218 | path=b'', | |
219 | create=False, |
|
219 | create=False, | |
220 | presetupfuncs=None, |
|
220 | presetupfuncs=None, | |
221 | intents=None, |
|
221 | intents=None, | |
222 | createopts=None, |
|
222 | createopts=None, | |
223 | ): |
|
223 | ): | |
224 | """return a repository object for the specified path""" |
|
224 | """return a repository object for the specified path""" | |
225 | peer = _peerorrepo( |
|
225 | peer = _peerorrepo( | |
226 | ui, |
|
226 | ui, | |
227 | path, |
|
227 | path, | |
228 | create, |
|
228 | create, | |
229 | presetupfuncs=presetupfuncs, |
|
229 | presetupfuncs=presetupfuncs, | |
230 | intents=intents, |
|
230 | intents=intents, | |
231 | createopts=createopts, |
|
231 | createopts=createopts, | |
232 | ) |
|
232 | ) | |
233 | repo = peer.local() |
|
233 | repo = peer.local() | |
234 | if not repo: |
|
234 | if not repo: | |
235 | raise error.Abort( |
|
235 | raise error.Abort( | |
236 | _(b"repository '%s' is not local") % (path or peer.url()) |
|
236 | _(b"repository '%s' is not local") % (path or peer.url()) | |
237 | ) |
|
237 | ) | |
238 | return repo.filtered(b'visible') |
|
238 | return repo.filtered(b'visible') | |
239 |
|
239 | |||
240 |
|
240 | |||
241 | def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None): |
|
241 | def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None): | |
242 | '''return a repository peer for the specified path''' |
|
242 | '''return a repository peer for the specified path''' | |
243 | rui = remoteui(uiorrepo, opts) |
|
243 | rui = remoteui(uiorrepo, opts) | |
244 | return _peerorrepo( |
|
244 | return _peerorrepo( | |
245 | rui, path, create, intents=intents, createopts=createopts |
|
245 | rui, path, create, intents=intents, createopts=createopts | |
246 | ).peer() |
|
246 | ).peer() | |
247 |
|
247 | |||
248 |
|
248 | |||
249 | def defaultdest(source): |
|
249 | def defaultdest(source): | |
250 | """return default destination of clone if none is given |
|
250 | """return default destination of clone if none is given | |
251 |
|
251 | |||
252 | >>> defaultdest(b'foo') |
|
252 | >>> defaultdest(b'foo') | |
253 | 'foo' |
|
253 | 'foo' | |
254 | >>> defaultdest(b'/foo/bar') |
|
254 | >>> defaultdest(b'/foo/bar') | |
255 | 'bar' |
|
255 | 'bar' | |
256 | >>> defaultdest(b'/') |
|
256 | >>> defaultdest(b'/') | |
257 | '' |
|
257 | '' | |
258 | >>> defaultdest(b'') |
|
258 | >>> defaultdest(b'') | |
259 | '' |
|
259 | '' | |
260 | >>> defaultdest(b'http://example.org/') |
|
260 | >>> defaultdest(b'http://example.org/') | |
261 | '' |
|
261 | '' | |
262 | >>> defaultdest(b'http://example.org/foo/') |
|
262 | >>> defaultdest(b'http://example.org/foo/') | |
263 | 'foo' |
|
263 | 'foo' | |
264 | """ |
|
264 | """ | |
265 | path = urlutil.url(source).path |
|
265 | path = urlutil.url(source).path | |
266 | if not path: |
|
266 | if not path: | |
267 | return b'' |
|
267 | return b'' | |
268 | return os.path.basename(os.path.normpath(path)) |
|
268 | return os.path.basename(os.path.normpath(path)) | |
269 |
|
269 | |||
270 |
|
270 | |||
271 | def sharedreposource(repo): |
|
271 | def sharedreposource(repo): | |
272 | """Returns repository object for source repository of a shared repo. |
|
272 | """Returns repository object for source repository of a shared repo. | |
273 |
|
273 | |||
274 | If repo is not a shared repository, returns None. |
|
274 | If repo is not a shared repository, returns None. | |
275 | """ |
|
275 | """ | |
276 | if repo.sharedpath == repo.path: |
|
276 | if repo.sharedpath == repo.path: | |
277 | return None |
|
277 | return None | |
278 |
|
278 | |||
279 | if util.safehasattr(repo, b'srcrepo') and repo.srcrepo: |
|
279 | if util.safehasattr(repo, b'srcrepo') and repo.srcrepo: | |
280 | return repo.srcrepo |
|
280 | return repo.srcrepo | |
281 |
|
281 | |||
282 | # the sharedpath always ends in the .hg; we want the path to the repo |
|
282 | # the sharedpath always ends in the .hg; we want the path to the repo | |
283 | source = repo.vfs.split(repo.sharedpath)[0] |
|
283 | source = repo.vfs.split(repo.sharedpath)[0] | |
284 | srcurl, branches = urlutil.parseurl(source) |
|
284 | srcurl, branches = urlutil.parseurl(source) | |
285 | srcrepo = repository(repo.ui, srcurl) |
|
285 | srcrepo = repository(repo.ui, srcurl) | |
286 | repo.srcrepo = srcrepo |
|
286 | repo.srcrepo = srcrepo | |
287 | return srcrepo |
|
287 | return srcrepo | |
288 |
|
288 | |||
289 |
|
289 | |||
290 | def share( |
|
290 | def share( | |
291 | ui, |
|
291 | ui, | |
292 | source, |
|
292 | source, | |
293 | dest=None, |
|
293 | dest=None, | |
294 | update=True, |
|
294 | update=True, | |
295 | bookmarks=True, |
|
295 | bookmarks=True, | |
296 | defaultpath=None, |
|
296 | defaultpath=None, | |
297 | relative=False, |
|
297 | relative=False, | |
298 | ): |
|
298 | ): | |
299 | '''create a shared repository''' |
|
299 | '''create a shared repository''' | |
300 |
|
300 | |||
301 | if not islocal(source): |
|
301 | if not islocal(source): | |
302 | raise error.Abort(_(b'can only share local repositories')) |
|
302 | raise error.Abort(_(b'can only share local repositories')) | |
303 |
|
303 | |||
304 | if not dest: |
|
304 | if not dest: | |
305 | dest = defaultdest(source) |
|
305 | dest = defaultdest(source) | |
306 | else: |
|
306 | else: | |
307 | dest = urlutil.get_clone_path(ui, dest)[1] |
|
307 | dest = urlutil.get_clone_path(ui, dest)[1] | |
308 |
|
308 | |||
309 | if isinstance(source, bytes): |
|
309 | if isinstance(source, bytes): | |
310 | origsource, source, branches = urlutil.get_clone_path(ui, source) |
|
310 | origsource, source, branches = urlutil.get_clone_path(ui, source) | |
311 | srcrepo = repository(ui, source) |
|
311 | srcrepo = repository(ui, source) | |
312 | rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None) |
|
312 | rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None) | |
313 | else: |
|
313 | else: | |
314 | srcrepo = source.local() |
|
314 | srcrepo = source.local() | |
315 | checkout = None |
|
315 | checkout = None | |
316 |
|
316 | |||
317 | shareditems = set() |
|
317 | shareditems = set() | |
318 | if bookmarks: |
|
318 | if bookmarks: | |
319 | shareditems.add(sharedbookmarks) |
|
319 | shareditems.add(sharedbookmarks) | |
320 |
|
320 | |||
321 | r = repository( |
|
321 | r = repository( | |
322 | ui, |
|
322 | ui, | |
323 | dest, |
|
323 | dest, | |
324 | create=True, |
|
324 | create=True, | |
325 | createopts={ |
|
325 | createopts={ | |
326 | b'sharedrepo': srcrepo, |
|
326 | b'sharedrepo': srcrepo, | |
327 | b'sharedrelative': relative, |
|
327 | b'sharedrelative': relative, | |
328 | b'shareditems': shareditems, |
|
328 | b'shareditems': shareditems, | |
329 | }, |
|
329 | }, | |
330 | ) |
|
330 | ) | |
331 |
|
331 | |||
332 | postshare(srcrepo, r, defaultpath=defaultpath) |
|
332 | postshare(srcrepo, r, defaultpath=defaultpath) | |
333 | r = repository(ui, dest) |
|
333 | r = repository(ui, dest) | |
334 | _postshareupdate(r, update, checkout=checkout) |
|
334 | _postshareupdate(r, update, checkout=checkout) | |
335 | return r |
|
335 | return r | |
336 |
|
336 | |||
337 |
|
337 | |||
338 | def _prependsourcehgrc(repo): |
|
338 | def _prependsourcehgrc(repo): | |
339 | """copies the source repo config and prepend it in current repo .hg/hgrc |
|
339 | """copies the source repo config and prepend it in current repo .hg/hgrc | |
340 | on unshare. This is only done if the share was perfomed using share safe |
|
340 | on unshare. This is only done if the share was perfomed using share safe | |
341 | method where we share config of source in shares""" |
|
341 | method where we share config of source in shares""" | |
342 | srcvfs = vfsmod.vfs(repo.sharedpath) |
|
342 | srcvfs = vfsmod.vfs(repo.sharedpath) | |
343 | dstvfs = vfsmod.vfs(repo.path) |
|
343 | dstvfs = vfsmod.vfs(repo.path) | |
344 |
|
344 | |||
345 | if not srcvfs.exists(b'hgrc'): |
|
345 | if not srcvfs.exists(b'hgrc'): | |
346 | return |
|
346 | return | |
347 |
|
347 | |||
348 | currentconfig = b'' |
|
348 | currentconfig = b'' | |
349 | if dstvfs.exists(b'hgrc'): |
|
349 | if dstvfs.exists(b'hgrc'): | |
350 | currentconfig = dstvfs.read(b'hgrc') |
|
350 | currentconfig = dstvfs.read(b'hgrc') | |
351 |
|
351 | |||
352 | with dstvfs(b'hgrc', b'wb') as fp: |
|
352 | with dstvfs(b'hgrc', b'wb') as fp: | |
353 | sourceconfig = srcvfs.read(b'hgrc') |
|
353 | sourceconfig = srcvfs.read(b'hgrc') | |
354 | fp.write(b"# Config copied from shared source\n") |
|
354 | fp.write(b"# Config copied from shared source\n") | |
355 | fp.write(sourceconfig) |
|
355 | fp.write(sourceconfig) | |
356 | fp.write(b'\n') |
|
356 | fp.write(b'\n') | |
357 | fp.write(currentconfig) |
|
357 | fp.write(currentconfig) | |
358 |
|
358 | |||
359 |
|
359 | |||
360 | def unshare(ui, repo): |
|
360 | def unshare(ui, repo): | |
361 | """convert a shared repository to a normal one |
|
361 | """convert a shared repository to a normal one | |
362 |
|
362 | |||
363 | Copy the store data to the repo and remove the sharedpath data. |
|
363 | Copy the store data to the repo and remove the sharedpath data. | |
364 |
|
364 | |||
365 | Returns a new repository object representing the unshared repository. |
|
365 | Returns a new repository object representing the unshared repository. | |
366 |
|
366 | |||
367 | The passed repository object is not usable after this function is |
|
367 | The passed repository object is not usable after this function is | |
368 | called. |
|
368 | called. | |
369 | """ |
|
369 | """ | |
370 |
|
370 | |||
371 | with repo.lock(): |
|
371 | with repo.lock(): | |
372 | # we use locks here because if we race with commit, we |
|
372 | # we use locks here because if we race with commit, we | |
373 | # can end up with extra data in the cloned revlogs that's |
|
373 | # can end up with extra data in the cloned revlogs that's | |
374 | # not pointed to by changesets, thus causing verify to |
|
374 | # not pointed to by changesets, thus causing verify to | |
375 | # fail |
|
375 | # fail | |
376 | destlock = copystore(ui, repo, repo.path) |
|
376 | destlock = copystore(ui, repo, repo.path) | |
377 | with destlock or util.nullcontextmanager(): |
|
377 | with destlock or util.nullcontextmanager(): | |
378 | if requirements.SHARESAFE_REQUIREMENT in repo.requirements: |
|
378 | if requirements.SHARESAFE_REQUIREMENT in repo.requirements: | |
379 | # we were sharing .hg/hgrc of the share source with the current |
|
379 | # we were sharing .hg/hgrc of the share source with the current | |
380 | # repo. We need to copy that while unsharing otherwise it can |
|
380 | # repo. We need to copy that while unsharing otherwise it can | |
381 | # disable hooks and other checks |
|
381 | # disable hooks and other checks | |
382 | _prependsourcehgrc(repo) |
|
382 | _prependsourcehgrc(repo) | |
383 |
|
383 | |||
384 | sharefile = repo.vfs.join(b'sharedpath') |
|
384 | sharefile = repo.vfs.join(b'sharedpath') | |
385 | util.rename(sharefile, sharefile + b'.old') |
|
385 | util.rename(sharefile, sharefile + b'.old') | |
386 |
|
386 | |||
387 | repo.requirements.discard(requirements.SHARED_REQUIREMENT) |
|
387 | repo.requirements.discard(requirements.SHARED_REQUIREMENT) | |
388 | repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT) |
|
388 | repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT) | |
389 | scmutil.writereporequirements(repo) |
|
389 | scmutil.writereporequirements(repo) | |
390 |
|
390 | |||
391 | # Removing share changes some fundamental properties of the repo instance. |
|
391 | # Removing share changes some fundamental properties of the repo instance. | |
392 | # So we instantiate a new repo object and operate on it rather than |
|
392 | # So we instantiate a new repo object and operate on it rather than | |
393 | # try to keep the existing repo usable. |
|
393 | # try to keep the existing repo usable. | |
394 | newrepo = repository(repo.baseui, repo.root, create=False) |
|
394 | newrepo = repository(repo.baseui, repo.root, create=False) | |
395 |
|
395 | |||
396 | # TODO: figure out how to access subrepos that exist, but were previously |
|
396 | # TODO: figure out how to access subrepos that exist, but were previously | |
397 | # removed from .hgsub |
|
397 | # removed from .hgsub | |
398 | c = newrepo[b'.'] |
|
398 | c = newrepo[b'.'] | |
399 | subs = c.substate |
|
399 | subs = c.substate | |
400 | for s in sorted(subs): |
|
400 | for s in sorted(subs): | |
401 | c.sub(s).unshare() |
|
401 | c.sub(s).unshare() | |
402 |
|
402 | |||
403 | localrepo.poisonrepository(repo) |
|
403 | localrepo.poisonrepository(repo) | |
404 |
|
404 | |||
405 | return newrepo |
|
405 | return newrepo | |
406 |
|
406 | |||
407 |
|
407 | |||
408 | def postshare(sourcerepo, destrepo, defaultpath=None): |
|
408 | def postshare(sourcerepo, destrepo, defaultpath=None): | |
409 | """Called after a new shared repo is created. |
|
409 | """Called after a new shared repo is created. | |
410 |
|
410 | |||
411 | The new repo only has a requirements file and pointer to the source. |
|
411 | The new repo only has a requirements file and pointer to the source. | |
412 | This function configures additional shared data. |
|
412 | This function configures additional shared data. | |
413 |
|
413 | |||
414 | Extensions can wrap this function and write additional entries to |
|
414 | Extensions can wrap this function and write additional entries to | |
415 | destrepo/.hg/shared to indicate additional pieces of data to be shared. |
|
415 | destrepo/.hg/shared to indicate additional pieces of data to be shared. | |
416 | """ |
|
416 | """ | |
417 | default = defaultpath or sourcerepo.ui.config(b'paths', b'default') |
|
417 | default = defaultpath or sourcerepo.ui.config(b'paths', b'default') | |
418 | if default: |
|
418 | if default: | |
419 | template = b'[paths]\ndefault = %s\n' |
|
419 | template = b'[paths]\ndefault = %s\n' | |
420 | destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default)) |
|
420 | destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default)) | |
421 | if requirements.NARROW_REQUIREMENT in sourcerepo.requirements: |
|
421 | if requirements.NARROW_REQUIREMENT in sourcerepo.requirements: | |
422 | with destrepo.wlock(): |
|
422 | with destrepo.wlock(): | |
423 | narrowspec.copytoworkingcopy(destrepo) |
|
423 | narrowspec.copytoworkingcopy(destrepo) | |
424 |
|
424 | |||
425 |
|
425 | |||
426 | def _postshareupdate(repo, update, checkout=None): |
|
426 | def _postshareupdate(repo, update, checkout=None): | |
427 | """Maybe perform a working directory update after a shared repo is created. |
|
427 | """Maybe perform a working directory update after a shared repo is created. | |
428 |
|
428 | |||
429 | ``update`` can be a boolean or a revision to update to. |
|
429 | ``update`` can be a boolean or a revision to update to. | |
430 | """ |
|
430 | """ | |
431 | if not update: |
|
431 | if not update: | |
432 | return |
|
432 | return | |
433 |
|
433 | |||
434 | repo.ui.status(_(b"updating working directory\n")) |
|
434 | repo.ui.status(_(b"updating working directory\n")) | |
435 | if update is not True: |
|
435 | if update is not True: | |
436 | checkout = update |
|
436 | checkout = update | |
437 | for test in (checkout, b'default', b'tip'): |
|
437 | for test in (checkout, b'default', b'tip'): | |
438 | if test is None: |
|
438 | if test is None: | |
439 | continue |
|
439 | continue | |
440 | try: |
|
440 | try: | |
441 | uprev = repo.lookup(test) |
|
441 | uprev = repo.lookup(test) | |
442 | break |
|
442 | break | |
443 | except error.RepoLookupError: |
|
443 | except error.RepoLookupError: | |
444 | continue |
|
444 | continue | |
445 | _update(repo, uprev) |
|
445 | _update(repo, uprev) | |
446 |
|
446 | |||
447 |
|
447 | |||
448 | def copystore(ui, srcrepo, destpath): |
|
448 | def copystore(ui, srcrepo, destpath): | |
449 | """copy files from store of srcrepo in destpath |
|
449 | """copy files from store of srcrepo in destpath | |
450 |
|
450 | |||
451 | returns destlock |
|
451 | returns destlock | |
452 | """ |
|
452 | """ | |
453 | destlock = None |
|
453 | destlock = None | |
454 | try: |
|
454 | try: | |
455 | hardlink = None |
|
455 | hardlink = None | |
456 | topic = _(b'linking') if hardlink else _(b'copying') |
|
456 | topic = _(b'linking') if hardlink else _(b'copying') | |
457 | with ui.makeprogress(topic, unit=_(b'files')) as progress: |
|
457 | with ui.makeprogress(topic, unit=_(b'files')) as progress: | |
458 | num = 0 |
|
458 | num = 0 | |
459 | srcpublishing = srcrepo.publishing() |
|
459 | srcpublishing = srcrepo.publishing() | |
460 | srcvfs = vfsmod.vfs(srcrepo.sharedpath) |
|
460 | srcvfs = vfsmod.vfs(srcrepo.sharedpath) | |
461 | dstvfs = vfsmod.vfs(destpath) |
|
461 | dstvfs = vfsmod.vfs(destpath) | |
462 | for f in srcrepo.store.copylist(): |
|
462 | for f in srcrepo.store.copylist(): | |
463 | if srcpublishing and f.endswith(b'phaseroots'): |
|
463 | if srcpublishing and f.endswith(b'phaseroots'): | |
464 | continue |
|
464 | continue | |
465 | dstbase = os.path.dirname(f) |
|
465 | dstbase = os.path.dirname(f) | |
466 | if dstbase and not dstvfs.exists(dstbase): |
|
466 | if dstbase and not dstvfs.exists(dstbase): | |
467 | dstvfs.mkdir(dstbase) |
|
467 | dstvfs.mkdir(dstbase) | |
468 | if srcvfs.exists(f): |
|
468 | if srcvfs.exists(f): | |
469 | if f.endswith(b'data'): |
|
469 | if f.endswith(b'data'): | |
470 | # 'dstbase' may be empty (e.g. revlog format 0) |
|
470 | # 'dstbase' may be empty (e.g. revlog format 0) | |
471 | lockfile = os.path.join(dstbase, b"lock") |
|
471 | lockfile = os.path.join(dstbase, b"lock") | |
472 | # lock to avoid premature writing to the target |
|
472 | # lock to avoid premature writing to the target | |
473 | destlock = lock.lock(dstvfs, lockfile) |
|
473 | destlock = lock.lock(dstvfs, lockfile) | |
474 | hardlink, n = util.copyfiles( |
|
474 | hardlink, n = util.copyfiles( | |
475 | srcvfs.join(f), dstvfs.join(f), hardlink, progress |
|
475 | srcvfs.join(f), dstvfs.join(f), hardlink, progress | |
476 | ) |
|
476 | ) | |
477 | num += n |
|
477 | num += n | |
478 | if hardlink: |
|
478 | if hardlink: | |
479 | ui.debug(b"linked %d files\n" % num) |
|
479 | ui.debug(b"linked %d files\n" % num) | |
480 | else: |
|
480 | else: | |
481 | ui.debug(b"copied %d files\n" % num) |
|
481 | ui.debug(b"copied %d files\n" % num) | |
482 | return destlock |
|
482 | return destlock | |
483 | except: # re-raises |
|
483 | except: # re-raises | |
484 | release(destlock) |
|
484 | release(destlock) | |
485 | raise |
|
485 | raise | |
486 |
|
486 | |||
487 |
|
487 | |||
488 | def clonewithshare( |
|
488 | def clonewithshare( | |
489 | ui, |
|
489 | ui, | |
490 | peeropts, |
|
490 | peeropts, | |
491 | sharepath, |
|
491 | sharepath, | |
492 | source, |
|
492 | source, | |
493 | srcpeer, |
|
493 | srcpeer, | |
494 | dest, |
|
494 | dest, | |
495 | pull=False, |
|
495 | pull=False, | |
496 | rev=None, |
|
496 | rev=None, | |
497 | update=True, |
|
497 | update=True, | |
498 | stream=False, |
|
498 | stream=False, | |
499 | ): |
|
499 | ): | |
500 | """Perform a clone using a shared repo. |
|
500 | """Perform a clone using a shared repo. | |
501 |
|
501 | |||
502 | The store for the repository will be located at <sharepath>/.hg. The |
|
502 | The store for the repository will be located at <sharepath>/.hg. The | |
503 | specified revisions will be cloned or pulled from "source". A shared repo |
|
503 | specified revisions will be cloned or pulled from "source". A shared repo | |
504 | will be created at "dest" and a working copy will be created if "update" is |
|
504 | will be created at "dest" and a working copy will be created if "update" is | |
505 | True. |
|
505 | True. | |
506 | """ |
|
506 | """ | |
507 | revs = None |
|
507 | revs = None | |
508 | if rev: |
|
508 | if rev: | |
509 | if not srcpeer.capable(b'lookup'): |
|
509 | if not srcpeer.capable(b'lookup'): | |
510 | raise error.Abort( |
|
510 | raise error.Abort( | |
511 | _( |
|
511 | _( | |
512 | b"src repository does not support " |
|
512 | b"src repository does not support " | |
513 | b"revision lookup and so doesn't " |
|
513 | b"revision lookup and so doesn't " | |
514 | b"support clone by revision" |
|
514 | b"support clone by revision" | |
515 | ) |
|
515 | ) | |
516 | ) |
|
516 | ) | |
517 |
|
517 | |||
518 | # TODO this is batchable. |
|
518 | # TODO this is batchable. | |
519 | remoterevs = [] |
|
519 | remoterevs = [] | |
520 | for r in rev: |
|
520 | for r in rev: | |
521 | with srcpeer.commandexecutor() as e: |
|
521 | with srcpeer.commandexecutor() as e: | |
522 | remoterevs.append( |
|
522 | remoterevs.append( | |
523 | e.callcommand( |
|
523 | e.callcommand( | |
524 | b'lookup', |
|
524 | b'lookup', | |
525 | { |
|
525 | { | |
526 | b'key': r, |
|
526 | b'key': r, | |
527 | }, |
|
527 | }, | |
528 | ).result() |
|
528 | ).result() | |
529 | ) |
|
529 | ) | |
530 | revs = remoterevs |
|
530 | revs = remoterevs | |
531 |
|
531 | |||
532 | # Obtain a lock before checking for or cloning the pooled repo otherwise |
|
532 | # Obtain a lock before checking for or cloning the pooled repo otherwise | |
533 | # 2 clients may race creating or populating it. |
|
533 | # 2 clients may race creating or populating it. | |
534 | pooldir = os.path.dirname(sharepath) |
|
534 | pooldir = os.path.dirname(sharepath) | |
535 | # lock class requires the directory to exist. |
|
535 | # lock class requires the directory to exist. | |
536 | try: |
|
536 | try: | |
537 | util.makedir(pooldir, False) |
|
537 | util.makedir(pooldir, False) | |
538 | except OSError as e: |
|
538 | except OSError as e: | |
539 | if e.errno != errno.EEXIST: |
|
539 | if e.errno != errno.EEXIST: | |
540 | raise |
|
540 | raise | |
541 |
|
541 | |||
542 | poolvfs = vfsmod.vfs(pooldir) |
|
542 | poolvfs = vfsmod.vfs(pooldir) | |
543 | basename = os.path.basename(sharepath) |
|
543 | basename = os.path.basename(sharepath) | |
544 |
|
544 | |||
545 | with lock.lock(poolvfs, b'%s.lock' % basename): |
|
545 | with lock.lock(poolvfs, b'%s.lock' % basename): | |
546 | if os.path.exists(sharepath): |
|
546 | if os.path.exists(sharepath): | |
547 | ui.status( |
|
547 | ui.status( | |
548 | _(b'(sharing from existing pooled repository %s)\n') % basename |
|
548 | _(b'(sharing from existing pooled repository %s)\n') % basename | |
549 | ) |
|
549 | ) | |
550 | else: |
|
550 | else: | |
551 | ui.status( |
|
551 | ui.status( | |
552 | _(b'(sharing from new pooled repository %s)\n') % basename |
|
552 | _(b'(sharing from new pooled repository %s)\n') % basename | |
553 | ) |
|
553 | ) | |
554 | # Always use pull mode because hardlinks in share mode don't work |
|
554 | # Always use pull mode because hardlinks in share mode don't work | |
555 | # well. Never update because working copies aren't necessary in |
|
555 | # well. Never update because working copies aren't necessary in | |
556 | # share mode. |
|
556 | # share mode. | |
557 | clone( |
|
557 | clone( | |
558 | ui, |
|
558 | ui, | |
559 | peeropts, |
|
559 | peeropts, | |
560 | source, |
|
560 | source, | |
561 | dest=sharepath, |
|
561 | dest=sharepath, | |
562 | pull=True, |
|
562 | pull=True, | |
563 | revs=rev, |
|
563 | revs=rev, | |
564 | update=False, |
|
564 | update=False, | |
565 | stream=stream, |
|
565 | stream=stream, | |
566 | ) |
|
566 | ) | |
567 |
|
567 | |||
568 | # Resolve the value to put in [paths] section for the source. |
|
568 | # Resolve the value to put in [paths] section for the source. | |
569 | if islocal(source): |
|
569 | if islocal(source): | |
570 | defaultpath = os.path.abspath(urlutil.urllocalpath(source)) |
|
570 | defaultpath = os.path.abspath(urlutil.urllocalpath(source)) | |
571 | else: |
|
571 | else: | |
572 | defaultpath = source |
|
572 | defaultpath = source | |
573 |
|
573 | |||
574 | sharerepo = repository(ui, path=sharepath) |
|
574 | sharerepo = repository(ui, path=sharepath) | |
575 | destrepo = share( |
|
575 | destrepo = share( | |
576 | ui, |
|
576 | ui, | |
577 | sharerepo, |
|
577 | sharerepo, | |
578 | dest=dest, |
|
578 | dest=dest, | |
579 | update=False, |
|
579 | update=False, | |
580 | bookmarks=False, |
|
580 | bookmarks=False, | |
581 | defaultpath=defaultpath, |
|
581 | defaultpath=defaultpath, | |
582 | ) |
|
582 | ) | |
583 |
|
583 | |||
584 | # We need to perform a pull against the dest repo to fetch bookmarks |
|
584 | # We need to perform a pull against the dest repo to fetch bookmarks | |
585 | # and other non-store data that isn't shared by default. In the case of |
|
585 | # and other non-store data that isn't shared by default. In the case of | |
586 | # non-existing shared repo, this means we pull from the remote twice. This |
|
586 | # non-existing shared repo, this means we pull from the remote twice. This | |
587 | # is a bit weird. But at the time it was implemented, there wasn't an easy |
|
587 | # is a bit weird. But at the time it was implemented, there wasn't an easy | |
588 | # way to pull just non-changegroup data. |
|
588 | # way to pull just non-changegroup data. | |
589 | exchange.pull(destrepo, srcpeer, heads=revs) |
|
589 | exchange.pull(destrepo, srcpeer, heads=revs) | |
590 |
|
590 | |||
591 | _postshareupdate(destrepo, update) |
|
591 | _postshareupdate(destrepo, update) | |
592 |
|
592 | |||
593 | return srcpeer, peer(ui, peeropts, dest) |
|
593 | return srcpeer, peer(ui, peeropts, dest) | |
594 |
|
594 | |||
595 |
|
595 | |||
596 | # Recomputing caches is often slow on big repos, so copy them. |
|
596 | # Recomputing caches is often slow on big repos, so copy them. | |
597 | def _copycache(srcrepo, dstcachedir, fname): |
|
597 | def _copycache(srcrepo, dstcachedir, fname): | |
598 | """copy a cache from srcrepo to destcachedir (if it exists)""" |
|
598 | """copy a cache from srcrepo to destcachedir (if it exists)""" | |
599 | srcfname = srcrepo.cachevfs.join(fname) |
|
599 | srcfname = srcrepo.cachevfs.join(fname) | |
600 | dstfname = os.path.join(dstcachedir, fname) |
|
600 | dstfname = os.path.join(dstcachedir, fname) | |
601 | if os.path.exists(srcfname): |
|
601 | if os.path.exists(srcfname): | |
602 | if not os.path.exists(dstcachedir): |
|
602 | if not os.path.exists(dstcachedir): | |
603 | os.mkdir(dstcachedir) |
|
603 | os.mkdir(dstcachedir) | |
604 | util.copyfile(srcfname, dstfname) |
|
604 | util.copyfile(srcfname, dstfname) | |
605 |
|
605 | |||
606 |
|
606 | |||
607 | def clone( |
|
607 | def clone( | |
608 | ui, |
|
608 | ui, | |
609 | peeropts, |
|
609 | peeropts, | |
610 | source, |
|
610 | source, | |
611 | dest=None, |
|
611 | dest=None, | |
612 | pull=False, |
|
612 | pull=False, | |
613 | revs=None, |
|
613 | revs=None, | |
614 | update=True, |
|
614 | update=True, | |
615 | stream=False, |
|
615 | stream=False, | |
616 | branch=None, |
|
616 | branch=None, | |
617 | shareopts=None, |
|
617 | shareopts=None, | |
618 | storeincludepats=None, |
|
618 | storeincludepats=None, | |
619 | storeexcludepats=None, |
|
619 | storeexcludepats=None, | |
620 | depth=None, |
|
620 | depth=None, | |
621 | ): |
|
621 | ): | |
622 | """Make a copy of an existing repository. |
|
622 | """Make a copy of an existing repository. | |
623 |
|
623 | |||
624 | Create a copy of an existing repository in a new directory. The |
|
624 | Create a copy of an existing repository in a new directory. The | |
625 | source and destination are URLs, as passed to the repository |
|
625 | source and destination are URLs, as passed to the repository | |
626 | function. Returns a pair of repository peers, the source and |
|
626 | function. Returns a pair of repository peers, the source and | |
627 | newly created destination. |
|
627 | newly created destination. | |
628 |
|
628 | |||
629 | The location of the source is added to the new repository's |
|
629 | The location of the source is added to the new repository's | |
630 | .hg/hgrc file, as the default to be used for future pulls and |
|
630 | .hg/hgrc file, as the default to be used for future pulls and | |
631 | pushes. |
|
631 | pushes. | |
632 |
|
632 | |||
633 | If an exception is raised, the partly cloned/updated destination |
|
633 | If an exception is raised, the partly cloned/updated destination | |
634 | repository will be deleted. |
|
634 | repository will be deleted. | |
635 |
|
635 | |||
636 | Arguments: |
|
636 | Arguments: | |
637 |
|
637 | |||
638 | source: repository object or URL |
|
638 | source: repository object or URL | |
639 |
|
639 | |||
640 | dest: URL of destination repository to create (defaults to base |
|
640 | dest: URL of destination repository to create (defaults to base | |
641 | name of source repository) |
|
641 | name of source repository) | |
642 |
|
642 | |||
643 | pull: always pull from source repository, even in local case or if the |
|
643 | pull: always pull from source repository, even in local case or if the | |
644 | server prefers streaming |
|
644 | server prefers streaming | |
645 |
|
645 | |||
646 | stream: stream raw data uncompressed from repository (fast over |
|
646 | stream: stream raw data uncompressed from repository (fast over | |
647 | LAN, slow over WAN) |
|
647 | LAN, slow over WAN) | |
648 |
|
648 | |||
649 | revs: revision to clone up to (implies pull=True) |
|
649 | revs: revision to clone up to (implies pull=True) | |
650 |
|
650 | |||
651 | update: update working directory after clone completes, if |
|
651 | update: update working directory after clone completes, if | |
652 | destination is local repository (True means update to default rev, |
|
652 | destination is local repository (True means update to default rev, | |
653 | anything else is treated as a revision) |
|
653 | anything else is treated as a revision) | |
654 |
|
654 | |||
655 | branch: branches to clone |
|
655 | branch: branches to clone | |
656 |
|
656 | |||
657 | shareopts: dict of options to control auto sharing behavior. The "pool" key |
|
657 | shareopts: dict of options to control auto sharing behavior. The "pool" key | |
658 | activates auto sharing mode and defines the directory for stores. The |
|
658 | activates auto sharing mode and defines the directory for stores. The | |
659 | "mode" key determines how to construct the directory name of the shared |
|
659 | "mode" key determines how to construct the directory name of the shared | |
660 | repository. "identity" means the name is derived from the node of the first |
|
660 | repository. "identity" means the name is derived from the node of the first | |
661 | changeset in the repository. "remote" means the name is derived from the |
|
661 | changeset in the repository. "remote" means the name is derived from the | |
662 | remote's path/URL. Defaults to "identity." |
|
662 | remote's path/URL. Defaults to "identity." | |
663 |
|
663 | |||
664 | storeincludepats and storeexcludepats: sets of file patterns to include and |
|
664 | storeincludepats and storeexcludepats: sets of file patterns to include and | |
665 | exclude in the repository copy, respectively. If not defined, all files |
|
665 | exclude in the repository copy, respectively. If not defined, all files | |
666 | will be included (a "full" clone). Otherwise a "narrow" clone containing |
|
666 | will be included (a "full" clone). Otherwise a "narrow" clone containing | |
667 | only the requested files will be performed. If ``storeincludepats`` is not |
|
667 | only the requested files will be performed. If ``storeincludepats`` is not | |
668 | defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be |
|
668 | defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be | |
669 | ``path:.``. If both are empty sets, no files will be cloned. |
|
669 | ``path:.``. If both are empty sets, no files will be cloned. | |
670 | """ |
|
670 | """ | |
671 |
|
671 | |||
672 | if isinstance(source, bytes): |
|
672 | if isinstance(source, bytes): | |
673 | src = urlutil.get_clone_path(ui, source, branch) |
|
673 | src = urlutil.get_clone_path(ui, source, branch) | |
674 | origsource, source, branches = src |
|
674 | origsource, source, branches = src | |
675 | srcpeer = peer(ui, peeropts, source) |
|
675 | srcpeer = peer(ui, peeropts, source) | |
676 | else: |
|
676 | else: | |
677 | srcpeer = source.peer() # in case we were called with a localrepo |
|
677 | srcpeer = source.peer() # in case we were called with a localrepo | |
678 | branches = (None, branch or []) |
|
678 | branches = (None, branch or []) | |
679 | origsource = source = srcpeer.url() |
|
679 | origsource = source = srcpeer.url() | |
680 | srclock = destlock = cleandir = None |
|
680 | srclock = destlock = cleandir = None | |
681 | destpeer = None |
|
681 | destpeer = None | |
682 | try: |
|
682 | try: | |
683 | revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs) |
|
683 | revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs) | |
684 |
|
684 | |||
685 | if dest is None: |
|
685 | if dest is None: | |
686 | dest = defaultdest(source) |
|
686 | dest = defaultdest(source) | |
687 | if dest: |
|
687 | if dest: | |
688 | ui.status(_(b"destination directory: %s\n") % dest) |
|
688 | ui.status(_(b"destination directory: %s\n") % dest) | |
689 | else: |
|
689 | else: | |
690 | dest = urlutil.get_clone_path(ui, dest)[0] |
|
690 | dest = urlutil.get_clone_path(ui, dest)[0] | |
691 |
|
691 | |||
692 | dest = urlutil.urllocalpath(dest) |
|
692 | dest = urlutil.urllocalpath(dest) | |
693 | source = urlutil.urllocalpath(source) |
|
693 | source = urlutil.urllocalpath(source) | |
694 |
|
694 | |||
695 | if not dest: |
|
695 | if not dest: | |
696 | raise error.InputError(_(b"empty destination path is not valid")) |
|
696 | raise error.InputError(_(b"empty destination path is not valid")) | |
697 |
|
697 | |||
698 | destvfs = vfsmod.vfs(dest, expandpath=True) |
|
698 | destvfs = vfsmod.vfs(dest, expandpath=True) | |
699 | if destvfs.lexists(): |
|
699 | if destvfs.lexists(): | |
700 | if not destvfs.isdir(): |
|
700 | if not destvfs.isdir(): | |
701 | raise error.InputError( |
|
701 | raise error.InputError( | |
702 | _(b"destination '%s' already exists") % dest |
|
702 | _(b"destination '%s' already exists") % dest | |
703 | ) |
|
703 | ) | |
704 | elif destvfs.listdir(): |
|
704 | elif destvfs.listdir(): | |
705 | raise error.InputError( |
|
705 | raise error.InputError( | |
706 | _(b"destination '%s' is not empty") % dest |
|
706 | _(b"destination '%s' is not empty") % dest | |
707 | ) |
|
707 | ) | |
708 |
|
708 | |||
709 | createopts = {} |
|
709 | createopts = {} | |
710 | narrow = False |
|
710 | narrow = False | |
711 |
|
711 | |||
712 | if storeincludepats is not None: |
|
712 | if storeincludepats is not None: | |
713 | narrowspec.validatepatterns(storeincludepats) |
|
713 | narrowspec.validatepatterns(storeincludepats) | |
714 | narrow = True |
|
714 | narrow = True | |
715 |
|
715 | |||
716 | if storeexcludepats is not None: |
|
716 | if storeexcludepats is not None: | |
717 | narrowspec.validatepatterns(storeexcludepats) |
|
717 | narrowspec.validatepatterns(storeexcludepats) | |
718 | narrow = True |
|
718 | narrow = True | |
719 |
|
719 | |||
720 | if narrow: |
|
720 | if narrow: | |
721 | # Include everything by default if only exclusion patterns defined. |
|
721 | # Include everything by default if only exclusion patterns defined. | |
722 | if storeexcludepats and not storeincludepats: |
|
722 | if storeexcludepats and not storeincludepats: | |
723 | storeincludepats = {b'path:.'} |
|
723 | storeincludepats = {b'path:.'} | |
724 |
|
724 | |||
725 | createopts[b'narrowfiles'] = True |
|
725 | createopts[b'narrowfiles'] = True | |
726 |
|
726 | |||
727 | if depth: |
|
727 | if depth: | |
728 | createopts[b'shallowfilestore'] = True |
|
728 | createopts[b'shallowfilestore'] = True | |
729 |
|
729 | |||
730 | if srcpeer.capable(b'lfs-serve'): |
|
730 | if srcpeer.capable(b'lfs-serve'): | |
731 | # Repository creation honors the config if it disabled the extension, so |
|
731 | # Repository creation honors the config if it disabled the extension, so | |
732 | # we can't just announce that lfs will be enabled. This check avoids |
|
732 | # we can't just announce that lfs will be enabled. This check avoids | |
733 | # saying that lfs will be enabled, and then saying it's an unknown |
|
733 | # saying that lfs will be enabled, and then saying it's an unknown | |
734 | # feature. The lfs creation option is set in either case so that a |
|
734 | # feature. The lfs creation option is set in either case so that a | |
735 | # requirement is added. If the extension is explicitly disabled but the |
|
735 | # requirement is added. If the extension is explicitly disabled but the | |
736 | # requirement is set, the clone aborts early, before transferring any |
|
736 | # requirement is set, the clone aborts early, before transferring any | |
737 | # data. |
|
737 | # data. | |
738 | createopts[b'lfs'] = True |
|
738 | createopts[b'lfs'] = True | |
739 |
|
739 | |||
740 | if extensions.disabled_help(b'lfs'): |
|
740 | if extensions.disabled_help(b'lfs'): | |
741 | ui.status( |
|
741 | ui.status( | |
742 | _( |
|
742 | _( | |
743 | b'(remote is using large file support (lfs), but it is ' |
|
743 | b'(remote is using large file support (lfs), but it is ' | |
744 | b'explicitly disabled in the local configuration)\n' |
|
744 | b'explicitly disabled in the local configuration)\n' | |
745 | ) |
|
745 | ) | |
746 | ) |
|
746 | ) | |
747 | else: |
|
747 | else: | |
748 | ui.status( |
|
748 | ui.status( | |
749 | _( |
|
749 | _( | |
750 | b'(remote is using large file support (lfs); lfs will ' |
|
750 | b'(remote is using large file support (lfs); lfs will ' | |
751 | b'be enabled for this repository)\n' |
|
751 | b'be enabled for this repository)\n' | |
752 | ) |
|
752 | ) | |
753 | ) |
|
753 | ) | |
754 |
|
754 | |||
755 | shareopts = shareopts or {} |
|
755 | shareopts = shareopts or {} | |
756 | sharepool = shareopts.get(b'pool') |
|
756 | sharepool = shareopts.get(b'pool') | |
757 | sharenamemode = shareopts.get(b'mode') |
|
757 | sharenamemode = shareopts.get(b'mode') | |
758 | if sharepool and islocal(dest): |
|
758 | if sharepool and islocal(dest): | |
759 | sharepath = None |
|
759 | sharepath = None | |
760 | if sharenamemode == b'identity': |
|
760 | if sharenamemode == b'identity': | |
761 | # Resolve the name from the initial changeset in the remote |
|
761 | # Resolve the name from the initial changeset in the remote | |
762 | # repository. This returns nullid when the remote is empty. It |
|
762 | # repository. This returns nullid when the remote is empty. It | |
763 | # raises RepoLookupError if revision 0 is filtered or otherwise |
|
763 | # raises RepoLookupError if revision 0 is filtered or otherwise | |
764 | # not available. If we fail to resolve, sharing is not enabled. |
|
764 | # not available. If we fail to resolve, sharing is not enabled. | |
765 | try: |
|
765 | try: | |
766 | with srcpeer.commandexecutor() as e: |
|
766 | with srcpeer.commandexecutor() as e: | |
767 | rootnode = e.callcommand( |
|
767 | rootnode = e.callcommand( | |
768 | b'lookup', |
|
768 | b'lookup', | |
769 | { |
|
769 | { | |
770 | b'key': b'0', |
|
770 | b'key': b'0', | |
771 | }, |
|
771 | }, | |
772 | ).result() |
|
772 | ).result() | |
773 |
|
773 | |||
774 | if rootnode != sha1nodeconstants.nullid: |
|
774 | if rootnode != sha1nodeconstants.nullid: | |
775 | sharepath = os.path.join(sharepool, hex(rootnode)) |
|
775 | sharepath = os.path.join(sharepool, hex(rootnode)) | |
776 | else: |
|
776 | else: | |
777 | ui.status( |
|
777 | ui.status( | |
778 | _( |
|
778 | _( | |
779 | b'(not using pooled storage: ' |
|
779 | b'(not using pooled storage: ' | |
780 | b'remote appears to be empty)\n' |
|
780 | b'remote appears to be empty)\n' | |
781 | ) |
|
781 | ) | |
782 | ) |
|
782 | ) | |
783 | except error.RepoLookupError: |
|
783 | except error.RepoLookupError: | |
784 | ui.status( |
|
784 | ui.status( | |
785 | _( |
|
785 | _( | |
786 | b'(not using pooled storage: ' |
|
786 | b'(not using pooled storage: ' | |
787 | b'unable to resolve identity of remote)\n' |
|
787 | b'unable to resolve identity of remote)\n' | |
788 | ) |
|
788 | ) | |
789 | ) |
|
789 | ) | |
790 | elif sharenamemode == b'remote': |
|
790 | elif sharenamemode == b'remote': | |
791 | sharepath = os.path.join( |
|
791 | sharepath = os.path.join( | |
792 | sharepool, hex(hashutil.sha1(source).digest()) |
|
792 | sharepool, hex(hashutil.sha1(source).digest()) | |
793 | ) |
|
793 | ) | |
794 | else: |
|
794 | else: | |
795 | raise error.Abort( |
|
795 | raise error.Abort( | |
796 | _(b'unknown share naming mode: %s') % sharenamemode |
|
796 | _(b'unknown share naming mode: %s') % sharenamemode | |
797 | ) |
|
797 | ) | |
798 |
|
798 | |||
799 | # TODO this is a somewhat arbitrary restriction. |
|
799 | # TODO this is a somewhat arbitrary restriction. | |
800 | if narrow: |
|
800 | if narrow: | |
801 | ui.status( |
|
801 | ui.status( | |
802 | _(b'(pooled storage not supported for narrow clones)\n') |
|
802 | _(b'(pooled storage not supported for narrow clones)\n') | |
803 | ) |
|
803 | ) | |
804 | sharepath = None |
|
804 | sharepath = None | |
805 |
|
805 | |||
806 | if sharepath: |
|
806 | if sharepath: | |
807 | return clonewithshare( |
|
807 | return clonewithshare( | |
808 | ui, |
|
808 | ui, | |
809 | peeropts, |
|
809 | peeropts, | |
810 | sharepath, |
|
810 | sharepath, | |
811 | source, |
|
811 | source, | |
812 | srcpeer, |
|
812 | srcpeer, | |
813 | dest, |
|
813 | dest, | |
814 | pull=pull, |
|
814 | pull=pull, | |
815 | rev=revs, |
|
815 | rev=revs, | |
816 | update=update, |
|
816 | update=update, | |
817 | stream=stream, |
|
817 | stream=stream, | |
818 | ) |
|
818 | ) | |
819 |
|
819 | |||
820 | srcrepo = srcpeer.local() |
|
820 | srcrepo = srcpeer.local() | |
821 |
|
821 | |||
822 | abspath = origsource |
|
822 | abspath = origsource | |
823 | if islocal(origsource): |
|
823 | if islocal(origsource): | |
824 | abspath = os.path.abspath(urlutil.urllocalpath(origsource)) |
|
824 | abspath = os.path.abspath(urlutil.urllocalpath(origsource)) | |
825 |
|
825 | |||
826 | if islocal(dest): |
|
826 | if islocal(dest): | |
827 | cleandir = dest |
|
827 | cleandir = dest | |
828 |
|
828 | |||
829 | copy = False |
|
829 | copy = False | |
830 | if ( |
|
830 | if ( | |
831 | srcrepo |
|
831 | srcrepo | |
832 | and srcrepo.cancopy() |
|
832 | and srcrepo.cancopy() | |
833 | and islocal(dest) |
|
833 | and islocal(dest) | |
834 | and not phases.hassecret(srcrepo) |
|
834 | and not phases.hassecret(srcrepo) | |
835 | ): |
|
835 | ): | |
836 | copy = not pull and not revs |
|
836 | copy = not pull and not revs | |
837 |
|
837 | |||
838 | # TODO this is a somewhat arbitrary restriction. |
|
838 | # TODO this is a somewhat arbitrary restriction. | |
839 | if narrow: |
|
839 | if narrow: | |
840 | copy = False |
|
840 | copy = False | |
841 |
|
841 | |||
842 | if copy: |
|
842 | if copy: | |
843 | try: |
|
843 | try: | |
844 | # we use a lock here because if we race with commit, we |
|
844 | # we use a lock here because if we race with commit, we | |
845 | # can end up with extra data in the cloned revlogs that's |
|
845 | # can end up with extra data in the cloned revlogs that's | |
846 | # not pointed to by changesets, thus causing verify to |
|
846 | # not pointed to by changesets, thus causing verify to | |
847 | # fail |
|
847 | # fail | |
848 | srclock = srcrepo.lock(wait=False) |
|
848 | srclock = srcrepo.lock(wait=False) | |
849 | except error.LockError: |
|
849 | except error.LockError: | |
850 | copy = False |
|
850 | copy = False | |
851 |
|
851 | |||
852 | if copy: |
|
852 | if copy: | |
853 | srcrepo.hook(b'preoutgoing', throw=True, source=b'clone') |
|
853 | srcrepo.hook(b'preoutgoing', throw=True, source=b'clone') | |
854 | hgdir = os.path.realpath(os.path.join(dest, b".hg")) |
|
854 | hgdir = os.path.realpath(os.path.join(dest, b".hg")) | |
855 | if not os.path.exists(dest): |
|
855 | if not os.path.exists(dest): | |
856 | util.makedirs(dest) |
|
856 | util.makedirs(dest) | |
857 | else: |
|
857 | else: | |
858 | # only clean up directories we create ourselves |
|
858 | # only clean up directories we create ourselves | |
859 | cleandir = hgdir |
|
859 | cleandir = hgdir | |
860 | try: |
|
860 | try: | |
861 | destpath = hgdir |
|
861 | destpath = hgdir | |
862 | util.makedir(destpath, notindexed=True) |
|
862 | util.makedir(destpath, notindexed=True) | |
863 | except OSError as inst: |
|
863 | except OSError as inst: | |
864 | if inst.errno == errno.EEXIST: |
|
864 | if inst.errno == errno.EEXIST: | |
865 | cleandir = None |
|
865 | cleandir = None | |
866 | raise error.Abort( |
|
866 | raise error.Abort( | |
867 | _(b"destination '%s' already exists") % dest |
|
867 | _(b"destination '%s' already exists") % dest | |
868 | ) |
|
868 | ) | |
869 | raise |
|
869 | raise | |
870 |
|
870 | |||
871 | destlock = copystore(ui, srcrepo, destpath) |
|
871 | destlock = copystore(ui, srcrepo, destpath) | |
872 | # copy bookmarks over |
|
872 | # copy bookmarks over | |
873 | srcbookmarks = srcrepo.vfs.join(b'bookmarks') |
|
873 | srcbookmarks = srcrepo.vfs.join(b'bookmarks') | |
874 | dstbookmarks = os.path.join(destpath, b'bookmarks') |
|
874 | dstbookmarks = os.path.join(destpath, b'bookmarks') | |
875 | if os.path.exists(srcbookmarks): |
|
875 | if os.path.exists(srcbookmarks): | |
876 | util.copyfile(srcbookmarks, dstbookmarks) |
|
876 | util.copyfile(srcbookmarks, dstbookmarks) | |
877 |
|
877 | |||
878 | dstcachedir = os.path.join(destpath, b'cache') |
|
878 | dstcachedir = os.path.join(destpath, b'cache') | |
879 | for cache in cacheutil.cachetocopy(srcrepo): |
|
879 | for cache in cacheutil.cachetocopy(srcrepo): | |
880 | _copycache(srcrepo, dstcachedir, cache) |
|
880 | _copycache(srcrepo, dstcachedir, cache) | |
881 |
|
881 | |||
882 | # we need to re-init the repo after manually copying the data |
|
882 | # we need to re-init the repo after manually copying the data | |
883 | # into it |
|
883 | # into it | |
884 | destpeer = peer(srcrepo, peeropts, dest) |
|
884 | destpeer = peer(srcrepo, peeropts, dest) | |
885 | srcrepo.hook( |
|
885 | srcrepo.hook( | |
886 | b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex |
|
886 | b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex | |
887 | ) |
|
887 | ) | |
888 | else: |
|
888 | else: | |
889 | try: |
|
889 | try: | |
890 | # only pass ui when no srcrepo |
|
890 | # only pass ui when no srcrepo | |
891 | destpeer = peer( |
|
891 | destpeer = peer( | |
892 | srcrepo or ui, |
|
892 | srcrepo or ui, | |
893 | peeropts, |
|
893 | peeropts, | |
894 | dest, |
|
894 | dest, | |
895 | create=True, |
|
895 | create=True, | |
896 | createopts=createopts, |
|
896 | createopts=createopts, | |
897 | ) |
|
897 | ) | |
898 | except OSError as inst: |
|
898 | except OSError as inst: | |
899 | if inst.errno == errno.EEXIST: |
|
899 | if inst.errno == errno.EEXIST: | |
900 | cleandir = None |
|
900 | cleandir = None | |
901 | raise error.Abort( |
|
901 | raise error.Abort( | |
902 | _(b"destination '%s' already exists") % dest |
|
902 | _(b"destination '%s' already exists") % dest | |
903 | ) |
|
903 | ) | |
904 | raise |
|
904 | raise | |
905 |
|
905 | |||
906 | if revs: |
|
906 | if revs: | |
907 | if not srcpeer.capable(b'lookup'): |
|
907 | if not srcpeer.capable(b'lookup'): | |
908 | raise error.Abort( |
|
908 | raise error.Abort( | |
909 | _( |
|
909 | _( | |
910 | b"src repository does not support " |
|
910 | b"src repository does not support " | |
911 | b"revision lookup and so doesn't " |
|
911 | b"revision lookup and so doesn't " | |
912 | b"support clone by revision" |
|
912 | b"support clone by revision" | |
913 | ) |
|
913 | ) | |
914 | ) |
|
914 | ) | |
915 |
|
915 | |||
916 | # TODO this is batchable. |
|
916 | # TODO this is batchable. | |
917 | remoterevs = [] |
|
917 | remoterevs = [] | |
918 | for rev in revs: |
|
918 | for rev in revs: | |
919 | with srcpeer.commandexecutor() as e: |
|
919 | with srcpeer.commandexecutor() as e: | |
920 | remoterevs.append( |
|
920 | remoterevs.append( | |
921 | e.callcommand( |
|
921 | e.callcommand( | |
922 | b'lookup', |
|
922 | b'lookup', | |
923 | { |
|
923 | { | |
924 | b'key': rev, |
|
924 | b'key': rev, | |
925 | }, |
|
925 | }, | |
926 | ).result() |
|
926 | ).result() | |
927 | ) |
|
927 | ) | |
928 | revs = remoterevs |
|
928 | revs = remoterevs | |
929 |
|
929 | |||
930 | checkout = revs[0] |
|
930 | checkout = revs[0] | |
931 | else: |
|
931 | else: | |
932 | revs = None |
|
932 | revs = None | |
933 | local = destpeer.local() |
|
933 | local = destpeer.local() | |
934 | if local: |
|
934 | if local: | |
935 | if narrow: |
|
935 | if narrow: | |
936 | with local.wlock(), local.lock(): |
|
936 | with local.wlock(), local.lock(): | |
937 | local.setnarrowpats(storeincludepats, storeexcludepats) |
|
937 | local.setnarrowpats(storeincludepats, storeexcludepats) | |
938 | narrowspec.copytoworkingcopy(local) |
|
938 | narrowspec.copytoworkingcopy(local) | |
939 |
|
939 | |||
940 | u = urlutil.url(abspath) |
|
940 | u = urlutil.url(abspath) | |
941 | defaulturl = bytes(u) |
|
941 | defaulturl = bytes(u) | |
942 | local.ui.setconfig(b'paths', b'default', defaulturl, b'clone') |
|
942 | local.ui.setconfig(b'paths', b'default', defaulturl, b'clone') | |
943 | if not stream: |
|
943 | if not stream: | |
944 | if pull: |
|
944 | if pull: | |
945 | stream = False |
|
945 | stream = False | |
946 | else: |
|
946 | else: | |
947 | stream = None |
|
947 | stream = None | |
948 | # internal config: ui.quietbookmarkmove |
|
948 | # internal config: ui.quietbookmarkmove | |
949 | overrides = {(b'ui', b'quietbookmarkmove'): True} |
|
949 | overrides = {(b'ui', b'quietbookmarkmove'): True} | |
950 | with local.ui.configoverride(overrides, b'clone'): |
|
950 | with local.ui.configoverride(overrides, b'clone'): | |
951 | exchange.pull( |
|
951 | exchange.pull( | |
952 | local, |
|
952 | local, | |
953 | srcpeer, |
|
953 | srcpeer, | |
954 | revs, |
|
954 | revs, | |
955 | streamclonerequested=stream, |
|
955 | streamclonerequested=stream, | |
956 | includepats=storeincludepats, |
|
956 | includepats=storeincludepats, | |
957 | excludepats=storeexcludepats, |
|
957 | excludepats=storeexcludepats, | |
958 | depth=depth, |
|
958 | depth=depth, | |
959 | ) |
|
959 | ) | |
960 | elif srcrepo: |
|
960 | elif srcrepo: | |
961 | # TODO lift restriction once exchange.push() accepts narrow |
|
961 | # TODO lift restriction once exchange.push() accepts narrow | |
962 | # push. |
|
962 | # push. | |
963 | if narrow: |
|
963 | if narrow: | |
964 | raise error.Abort( |
|
964 | raise error.Abort( | |
965 | _( |
|
965 | _( | |
966 | b'narrow clone not available for ' |
|
966 | b'narrow clone not available for ' | |
967 | b'remote destinations' |
|
967 | b'remote destinations' | |
968 | ) |
|
968 | ) | |
969 | ) |
|
969 | ) | |
970 |
|
970 | |||
971 | exchange.push( |
|
971 | exchange.push( | |
972 | srcrepo, |
|
972 | srcrepo, | |
973 | destpeer, |
|
973 | destpeer, | |
974 | revs=revs, |
|
974 | revs=revs, | |
975 | bookmarks=srcrepo._bookmarks.keys(), |
|
975 | bookmarks=srcrepo._bookmarks.keys(), | |
976 | ) |
|
976 | ) | |
977 | else: |
|
977 | else: | |
978 | raise error.Abort( |
|
978 | raise error.Abort( | |
979 | _(b"clone from remote to remote not supported") |
|
979 | _(b"clone from remote to remote not supported") | |
980 | ) |
|
980 | ) | |
981 |
|
981 | |||
982 | cleandir = None |
|
982 | cleandir = None | |
983 |
|
983 | |||
984 | destrepo = destpeer.local() |
|
984 | destrepo = destpeer.local() | |
985 | if destrepo: |
|
985 | if destrepo: | |
986 | template = uimod.samplehgrcs[b'cloned'] |
|
986 | template = uimod.samplehgrcs[b'cloned'] | |
987 | u = urlutil.url(abspath) |
|
987 | u = urlutil.url(abspath) | |
988 | u.passwd = None |
|
988 | u.passwd = None | |
989 | defaulturl = bytes(u) |
|
989 | defaulturl = bytes(u) | |
990 | destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl)) |
|
990 | destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl)) | |
991 | destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone') |
|
991 | destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone') | |
992 |
|
992 | |||
993 | if ui.configbool(b'experimental', b'remotenames'): |
|
993 | if ui.configbool(b'experimental', b'remotenames'): | |
994 | logexchange.pullremotenames(destrepo, srcpeer) |
|
994 | logexchange.pullremotenames(destrepo, srcpeer) | |
995 |
|
995 | |||
996 | if update: |
|
996 | if update: | |
997 | if update is not True: |
|
997 | if update is not True: | |
998 | with srcpeer.commandexecutor() as e: |
|
998 | with srcpeer.commandexecutor() as e: | |
999 | checkout = e.callcommand( |
|
999 | checkout = e.callcommand( | |
1000 | b'lookup', |
|
1000 | b'lookup', | |
1001 | { |
|
1001 | { | |
1002 | b'key': update, |
|
1002 | b'key': update, | |
1003 | }, |
|
1003 | }, | |
1004 | ).result() |
|
1004 | ).result() | |
1005 |
|
1005 | |||
1006 | uprev = None |
|
1006 | uprev = None | |
1007 | status = None |
|
1007 | status = None | |
1008 | if checkout is not None: |
|
1008 | if checkout is not None: | |
1009 | # Some extensions (at least hg-git and hg-subversion) have |
|
1009 | # Some extensions (at least hg-git and hg-subversion) have | |
1010 | # a peer.lookup() implementation that returns a name instead |
|
1010 | # a peer.lookup() implementation that returns a name instead | |
1011 | # of a nodeid. We work around it here until we've figured |
|
1011 | # of a nodeid. We work around it here until we've figured | |
1012 | # out a better solution. |
|
1012 | # out a better solution. | |
1013 | if len(checkout) == 20 and checkout in destrepo: |
|
1013 | if len(checkout) == 20 and checkout in destrepo: | |
1014 | uprev = checkout |
|
1014 | uprev = checkout | |
1015 | elif scmutil.isrevsymbol(destrepo, checkout): |
|
1015 | elif scmutil.isrevsymbol(destrepo, checkout): | |
1016 | uprev = scmutil.revsymbol(destrepo, checkout).node() |
|
1016 | uprev = scmutil.revsymbol(destrepo, checkout).node() | |
1017 | else: |
|
1017 | else: | |
1018 | if update is not True: |
|
1018 | if update is not True: | |
1019 | try: |
|
1019 | try: | |
1020 | uprev = destrepo.lookup(update) |
|
1020 | uprev = destrepo.lookup(update) | |
1021 | except error.RepoLookupError: |
|
1021 | except error.RepoLookupError: | |
1022 | pass |
|
1022 | pass | |
1023 | if uprev is None: |
|
1023 | if uprev is None: | |
1024 | try: |
|
1024 | try: | |
1025 | if destrepo._activebookmark: |
|
1025 | if destrepo._activebookmark: | |
1026 | uprev = destrepo.lookup(destrepo._activebookmark) |
|
1026 | uprev = destrepo.lookup(destrepo._activebookmark) | |
1027 | update = destrepo._activebookmark |
|
1027 | update = destrepo._activebookmark | |
1028 | else: |
|
1028 | else: | |
1029 | uprev = destrepo._bookmarks[b'@'] |
|
1029 | uprev = destrepo._bookmarks[b'@'] | |
1030 | update = b'@' |
|
1030 | update = b'@' | |
1031 | bn = destrepo[uprev].branch() |
|
1031 | bn = destrepo[uprev].branch() | |
1032 | if bn == b'default': |
|
1032 | if bn == b'default': | |
1033 | status = _(b"updating to bookmark %s\n" % update) |
|
1033 | status = _(b"updating to bookmark %s\n" % update) | |
1034 | else: |
|
1034 | else: | |
1035 | status = ( |
|
1035 | status = ( | |
1036 | _(b"updating to bookmark %s on branch %s\n") |
|
1036 | _(b"updating to bookmark %s on branch %s\n") | |
1037 | ) % (update, bn) |
|
1037 | ) % (update, bn) | |
1038 | except KeyError: |
|
1038 | except KeyError: | |
1039 | try: |
|
1039 | try: | |
1040 | uprev = destrepo.branchtip(b'default') |
|
1040 | uprev = destrepo.branchtip(b'default') | |
1041 | except error.RepoLookupError: |
|
1041 | except error.RepoLookupError: | |
1042 | uprev = destrepo.lookup(b'tip') |
|
1042 | uprev = destrepo.lookup(b'tip') | |
1043 | if not status: |
|
1043 | if not status: | |
1044 | bn = destrepo[uprev].branch() |
|
1044 | bn = destrepo[uprev].branch() | |
1045 | status = _(b"updating to branch %s\n") % bn |
|
1045 | status = _(b"updating to branch %s\n") % bn | |
1046 | destrepo.ui.status(status) |
|
1046 | destrepo.ui.status(status) | |
1047 | _update(destrepo, uprev) |
|
1047 | _update(destrepo, uprev) | |
1048 | if update in destrepo._bookmarks: |
|
1048 | if update in destrepo._bookmarks: | |
1049 | bookmarks.activate(destrepo, update) |
|
1049 | bookmarks.activate(destrepo, update) | |
1050 | if destlock is not None: |
|
1050 | if destlock is not None: | |
1051 | release(destlock) |
|
1051 | release(destlock) | |
1052 | # here is a tiny windows were someone could end up writing the |
|
1052 | # here is a tiny windows were someone could end up writing the | |
1053 | # repository before the cache are sure to be warm. This is "fine" |
|
1053 | # repository before the cache are sure to be warm. This is "fine" | |
1054 | # as the only "bad" outcome would be some slowness. That potential |
|
1054 | # as the only "bad" outcome would be some slowness. That potential | |
1055 | # slowness already affect reader. |
|
1055 | # slowness already affect reader. | |
1056 | with destrepo.lock(): |
|
1056 | with destrepo.lock(): | |
1057 |
destrepo.updatecaches(full= |
|
1057 | destrepo.updatecaches(full=b"post-clone") | |
1058 | finally: |
|
1058 | finally: | |
1059 | release(srclock, destlock) |
|
1059 | release(srclock, destlock) | |
1060 | if cleandir is not None: |
|
1060 | if cleandir is not None: | |
1061 | shutil.rmtree(cleandir, True) |
|
1061 | shutil.rmtree(cleandir, True) | |
1062 | if srcpeer is not None: |
|
1062 | if srcpeer is not None: | |
1063 | srcpeer.close() |
|
1063 | srcpeer.close() | |
1064 | if destpeer and destpeer.local() is None: |
|
1064 | if destpeer and destpeer.local() is None: | |
1065 | destpeer.close() |
|
1065 | destpeer.close() | |
1066 | return srcpeer, destpeer |
|
1066 | return srcpeer, destpeer | |
1067 |
|
1067 | |||
1068 |
|
1068 | |||
1069 | def _showstats(repo, stats, quietempty=False): |
|
1069 | def _showstats(repo, stats, quietempty=False): | |
1070 | if quietempty and stats.isempty(): |
|
1070 | if quietempty and stats.isempty(): | |
1071 | return |
|
1071 | return | |
1072 | repo.ui.status( |
|
1072 | repo.ui.status( | |
1073 | _( |
|
1073 | _( | |
1074 | b"%d files updated, %d files merged, " |
|
1074 | b"%d files updated, %d files merged, " | |
1075 | b"%d files removed, %d files unresolved\n" |
|
1075 | b"%d files removed, %d files unresolved\n" | |
1076 | ) |
|
1076 | ) | |
1077 | % ( |
|
1077 | % ( | |
1078 | stats.updatedcount, |
|
1078 | stats.updatedcount, | |
1079 | stats.mergedcount, |
|
1079 | stats.mergedcount, | |
1080 | stats.removedcount, |
|
1080 | stats.removedcount, | |
1081 | stats.unresolvedcount, |
|
1081 | stats.unresolvedcount, | |
1082 | ) |
|
1082 | ) | |
1083 | ) |
|
1083 | ) | |
1084 |
|
1084 | |||
1085 |
|
1085 | |||
1086 | def updaterepo(repo, node, overwrite, updatecheck=None): |
|
1086 | def updaterepo(repo, node, overwrite, updatecheck=None): | |
1087 | """Update the working directory to node. |
|
1087 | """Update the working directory to node. | |
1088 |
|
1088 | |||
1089 | When overwrite is set, changes are clobbered, merged else |
|
1089 | When overwrite is set, changes are clobbered, merged else | |
1090 |
|
1090 | |||
1091 | returns stats (see pydoc mercurial.merge.applyupdates)""" |
|
1091 | returns stats (see pydoc mercurial.merge.applyupdates)""" | |
1092 | repo.ui.deprecwarn( |
|
1092 | repo.ui.deprecwarn( | |
1093 | b'prefer merge.update() or merge.clean_update() over hg.updaterepo()', |
|
1093 | b'prefer merge.update() or merge.clean_update() over hg.updaterepo()', | |
1094 | b'5.7', |
|
1094 | b'5.7', | |
1095 | ) |
|
1095 | ) | |
1096 | return mergemod._update( |
|
1096 | return mergemod._update( | |
1097 | repo, |
|
1097 | repo, | |
1098 | node, |
|
1098 | node, | |
1099 | branchmerge=False, |
|
1099 | branchmerge=False, | |
1100 | force=overwrite, |
|
1100 | force=overwrite, | |
1101 | labels=[b'working copy', b'destination'], |
|
1101 | labels=[b'working copy', b'destination'], | |
1102 | updatecheck=updatecheck, |
|
1102 | updatecheck=updatecheck, | |
1103 | ) |
|
1103 | ) | |
1104 |
|
1104 | |||
1105 |
|
1105 | |||
1106 | def update(repo, node, quietempty=False, updatecheck=None): |
|
1106 | def update(repo, node, quietempty=False, updatecheck=None): | |
1107 | """update the working directory to node""" |
|
1107 | """update the working directory to node""" | |
1108 | stats = mergemod.update(repo[node], updatecheck=updatecheck) |
|
1108 | stats = mergemod.update(repo[node], updatecheck=updatecheck) | |
1109 | _showstats(repo, stats, quietempty) |
|
1109 | _showstats(repo, stats, quietempty) | |
1110 | if stats.unresolvedcount: |
|
1110 | if stats.unresolvedcount: | |
1111 | repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n")) |
|
1111 | repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n")) | |
1112 | return stats.unresolvedcount > 0 |
|
1112 | return stats.unresolvedcount > 0 | |
1113 |
|
1113 | |||
1114 |
|
1114 | |||
1115 | # naming conflict in clone() |
|
1115 | # naming conflict in clone() | |
1116 | _update = update |
|
1116 | _update = update | |
1117 |
|
1117 | |||
1118 |
|
1118 | |||
1119 | def clean(repo, node, show_stats=True, quietempty=False): |
|
1119 | def clean(repo, node, show_stats=True, quietempty=False): | |
1120 | """forcibly switch the working directory to node, clobbering changes""" |
|
1120 | """forcibly switch the working directory to node, clobbering changes""" | |
1121 | stats = mergemod.clean_update(repo[node]) |
|
1121 | stats = mergemod.clean_update(repo[node]) | |
1122 | assert stats.unresolvedcount == 0 |
|
1122 | assert stats.unresolvedcount == 0 | |
1123 | if show_stats: |
|
1123 | if show_stats: | |
1124 | _showstats(repo, stats, quietempty) |
|
1124 | _showstats(repo, stats, quietempty) | |
1125 | return False |
|
1125 | return False | |
1126 |
|
1126 | |||
1127 |
|
1127 | |||
1128 | # naming conflict in updatetotally() |
|
1128 | # naming conflict in updatetotally() | |
1129 | _clean = clean |
|
1129 | _clean = clean | |
1130 |
|
1130 | |||
1131 | _VALID_UPDATECHECKS = { |
|
1131 | _VALID_UPDATECHECKS = { | |
1132 | mergemod.UPDATECHECK_ABORT, |
|
1132 | mergemod.UPDATECHECK_ABORT, | |
1133 | mergemod.UPDATECHECK_NONE, |
|
1133 | mergemod.UPDATECHECK_NONE, | |
1134 | mergemod.UPDATECHECK_LINEAR, |
|
1134 | mergemod.UPDATECHECK_LINEAR, | |
1135 | mergemod.UPDATECHECK_NO_CONFLICT, |
|
1135 | mergemod.UPDATECHECK_NO_CONFLICT, | |
1136 | } |
|
1136 | } | |
1137 |
|
1137 | |||
1138 |
|
1138 | |||
1139 | def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None): |
|
1139 | def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None): | |
1140 | """Update the working directory with extra care for non-file components |
|
1140 | """Update the working directory with extra care for non-file components | |
1141 |
|
1141 | |||
1142 | This takes care of non-file components below: |
|
1142 | This takes care of non-file components below: | |
1143 |
|
1143 | |||
1144 | :bookmark: might be advanced or (in)activated |
|
1144 | :bookmark: might be advanced or (in)activated | |
1145 |
|
1145 | |||
1146 | This takes arguments below: |
|
1146 | This takes arguments below: | |
1147 |
|
1147 | |||
1148 | :checkout: to which revision the working directory is updated |
|
1148 | :checkout: to which revision the working directory is updated | |
1149 | :brev: a name, which might be a bookmark to be activated after updating |
|
1149 | :brev: a name, which might be a bookmark to be activated after updating | |
1150 | :clean: whether changes in the working directory can be discarded |
|
1150 | :clean: whether changes in the working directory can be discarded | |
1151 | :updatecheck: how to deal with a dirty working directory |
|
1151 | :updatecheck: how to deal with a dirty working directory | |
1152 |
|
1152 | |||
1153 | Valid values for updatecheck are the UPDATECHECK_* constants |
|
1153 | Valid values for updatecheck are the UPDATECHECK_* constants | |
1154 | defined in the merge module. Passing `None` will result in using the |
|
1154 | defined in the merge module. Passing `None` will result in using the | |
1155 | configured default. |
|
1155 | configured default. | |
1156 |
|
1156 | |||
1157 | * ABORT: abort if the working directory is dirty |
|
1157 | * ABORT: abort if the working directory is dirty | |
1158 | * NONE: don't check (merge working directory changes into destination) |
|
1158 | * NONE: don't check (merge working directory changes into destination) | |
1159 | * LINEAR: check that update is linear before merging working directory |
|
1159 | * LINEAR: check that update is linear before merging working directory | |
1160 | changes into destination |
|
1160 | changes into destination | |
1161 | * NO_CONFLICT: check that the update does not result in file merges |
|
1161 | * NO_CONFLICT: check that the update does not result in file merges | |
1162 |
|
1162 | |||
1163 | This returns whether conflict is detected at updating or not. |
|
1163 | This returns whether conflict is detected at updating or not. | |
1164 | """ |
|
1164 | """ | |
1165 | if updatecheck is None: |
|
1165 | if updatecheck is None: | |
1166 | updatecheck = ui.config(b'commands', b'update.check') |
|
1166 | updatecheck = ui.config(b'commands', b'update.check') | |
1167 | if updatecheck not in _VALID_UPDATECHECKS: |
|
1167 | if updatecheck not in _VALID_UPDATECHECKS: | |
1168 | # If not configured, or invalid value configured |
|
1168 | # If not configured, or invalid value configured | |
1169 | updatecheck = mergemod.UPDATECHECK_LINEAR |
|
1169 | updatecheck = mergemod.UPDATECHECK_LINEAR | |
1170 | if updatecheck not in _VALID_UPDATECHECKS: |
|
1170 | if updatecheck not in _VALID_UPDATECHECKS: | |
1171 | raise ValueError( |
|
1171 | raise ValueError( | |
1172 | r'Invalid updatecheck value %r (can accept %r)' |
|
1172 | r'Invalid updatecheck value %r (can accept %r)' | |
1173 | % (updatecheck, _VALID_UPDATECHECKS) |
|
1173 | % (updatecheck, _VALID_UPDATECHECKS) | |
1174 | ) |
|
1174 | ) | |
1175 | with repo.wlock(): |
|
1175 | with repo.wlock(): | |
1176 | movemarkfrom = None |
|
1176 | movemarkfrom = None | |
1177 | warndest = False |
|
1177 | warndest = False | |
1178 | if checkout is None: |
|
1178 | if checkout is None: | |
1179 | updata = destutil.destupdate(repo, clean=clean) |
|
1179 | updata = destutil.destupdate(repo, clean=clean) | |
1180 | checkout, movemarkfrom, brev = updata |
|
1180 | checkout, movemarkfrom, brev = updata | |
1181 | warndest = True |
|
1181 | warndest = True | |
1182 |
|
1182 | |||
1183 | if clean: |
|
1183 | if clean: | |
1184 | ret = _clean(repo, checkout) |
|
1184 | ret = _clean(repo, checkout) | |
1185 | else: |
|
1185 | else: | |
1186 | if updatecheck == mergemod.UPDATECHECK_ABORT: |
|
1186 | if updatecheck == mergemod.UPDATECHECK_ABORT: | |
1187 | cmdutil.bailifchanged(repo, merge=False) |
|
1187 | cmdutil.bailifchanged(repo, merge=False) | |
1188 | updatecheck = mergemod.UPDATECHECK_NONE |
|
1188 | updatecheck = mergemod.UPDATECHECK_NONE | |
1189 | ret = _update(repo, checkout, updatecheck=updatecheck) |
|
1189 | ret = _update(repo, checkout, updatecheck=updatecheck) | |
1190 |
|
1190 | |||
1191 | if not ret and movemarkfrom: |
|
1191 | if not ret and movemarkfrom: | |
1192 | if movemarkfrom == repo[b'.'].node(): |
|
1192 | if movemarkfrom == repo[b'.'].node(): | |
1193 | pass # no-op update |
|
1193 | pass # no-op update | |
1194 | elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()): |
|
1194 | elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()): | |
1195 | b = ui.label(repo._activebookmark, b'bookmarks.active') |
|
1195 | b = ui.label(repo._activebookmark, b'bookmarks.active') | |
1196 | ui.status(_(b"updating bookmark %s\n") % b) |
|
1196 | ui.status(_(b"updating bookmark %s\n") % b) | |
1197 | else: |
|
1197 | else: | |
1198 | # this can happen with a non-linear update |
|
1198 | # this can happen with a non-linear update | |
1199 | b = ui.label(repo._activebookmark, b'bookmarks') |
|
1199 | b = ui.label(repo._activebookmark, b'bookmarks') | |
1200 | ui.status(_(b"(leaving bookmark %s)\n") % b) |
|
1200 | ui.status(_(b"(leaving bookmark %s)\n") % b) | |
1201 | bookmarks.deactivate(repo) |
|
1201 | bookmarks.deactivate(repo) | |
1202 | elif brev in repo._bookmarks: |
|
1202 | elif brev in repo._bookmarks: | |
1203 | if brev != repo._activebookmark: |
|
1203 | if brev != repo._activebookmark: | |
1204 | b = ui.label(brev, b'bookmarks.active') |
|
1204 | b = ui.label(brev, b'bookmarks.active') | |
1205 | ui.status(_(b"(activating bookmark %s)\n") % b) |
|
1205 | ui.status(_(b"(activating bookmark %s)\n") % b) | |
1206 | bookmarks.activate(repo, brev) |
|
1206 | bookmarks.activate(repo, brev) | |
1207 | elif brev: |
|
1207 | elif brev: | |
1208 | if repo._activebookmark: |
|
1208 | if repo._activebookmark: | |
1209 | b = ui.label(repo._activebookmark, b'bookmarks') |
|
1209 | b = ui.label(repo._activebookmark, b'bookmarks') | |
1210 | ui.status(_(b"(leaving bookmark %s)\n") % b) |
|
1210 | ui.status(_(b"(leaving bookmark %s)\n") % b) | |
1211 | bookmarks.deactivate(repo) |
|
1211 | bookmarks.deactivate(repo) | |
1212 |
|
1212 | |||
1213 | if warndest: |
|
1213 | if warndest: | |
1214 | destutil.statusotherdests(ui, repo) |
|
1214 | destutil.statusotherdests(ui, repo) | |
1215 |
|
1215 | |||
1216 | return ret |
|
1216 | return ret | |
1217 |
|
1217 | |||
1218 |
|
1218 | |||
1219 | def merge( |
|
1219 | def merge( | |
1220 | ctx, |
|
1220 | ctx, | |
1221 | force=False, |
|
1221 | force=False, | |
1222 | remind=True, |
|
1222 | remind=True, | |
1223 | labels=None, |
|
1223 | labels=None, | |
1224 | ): |
|
1224 | ): | |
1225 | """Branch merge with node, resolving changes. Return true if any |
|
1225 | """Branch merge with node, resolving changes. Return true if any | |
1226 | unresolved conflicts.""" |
|
1226 | unresolved conflicts.""" | |
1227 | repo = ctx.repo() |
|
1227 | repo = ctx.repo() | |
1228 | stats = mergemod.merge(ctx, force=force, labels=labels) |
|
1228 | stats = mergemod.merge(ctx, force=force, labels=labels) | |
1229 | _showstats(repo, stats) |
|
1229 | _showstats(repo, stats) | |
1230 | if stats.unresolvedcount: |
|
1230 | if stats.unresolvedcount: | |
1231 | repo.ui.status( |
|
1231 | repo.ui.status( | |
1232 | _( |
|
1232 | _( | |
1233 | b"use 'hg resolve' to retry unresolved file merges " |
|
1233 | b"use 'hg resolve' to retry unresolved file merges " | |
1234 | b"or 'hg merge --abort' to abandon\n" |
|
1234 | b"or 'hg merge --abort' to abandon\n" | |
1235 | ) |
|
1235 | ) | |
1236 | ) |
|
1236 | ) | |
1237 | elif remind: |
|
1237 | elif remind: | |
1238 | repo.ui.status(_(b"(branch merge, don't forget to commit)\n")) |
|
1238 | repo.ui.status(_(b"(branch merge, don't forget to commit)\n")) | |
1239 | return stats.unresolvedcount > 0 |
|
1239 | return stats.unresolvedcount > 0 | |
1240 |
|
1240 | |||
1241 |
|
1241 | |||
1242 | def abortmerge(ui, repo): |
|
1242 | def abortmerge(ui, repo): | |
1243 | ms = mergestatemod.mergestate.read(repo) |
|
1243 | ms = mergestatemod.mergestate.read(repo) | |
1244 | if ms.active(): |
|
1244 | if ms.active(): | |
1245 | # there were conflicts |
|
1245 | # there were conflicts | |
1246 | node = ms.localctx.hex() |
|
1246 | node = ms.localctx.hex() | |
1247 | else: |
|
1247 | else: | |
1248 | # there were no conficts, mergestate was not stored |
|
1248 | # there were no conficts, mergestate was not stored | |
1249 | node = repo[b'.'].hex() |
|
1249 | node = repo[b'.'].hex() | |
1250 |
|
1250 | |||
1251 | repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12]) |
|
1251 | repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12]) | |
1252 | stats = mergemod.clean_update(repo[node]) |
|
1252 | stats = mergemod.clean_update(repo[node]) | |
1253 | assert stats.unresolvedcount == 0 |
|
1253 | assert stats.unresolvedcount == 0 | |
1254 | _showstats(repo, stats) |
|
1254 | _showstats(repo, stats) | |
1255 |
|
1255 | |||
1256 |
|
1256 | |||
1257 | def _incoming( |
|
1257 | def _incoming( | |
1258 | displaychlist, |
|
1258 | displaychlist, | |
1259 | subreporecurse, |
|
1259 | subreporecurse, | |
1260 | ui, |
|
1260 | ui, | |
1261 | repo, |
|
1261 | repo, | |
1262 | source, |
|
1262 | source, | |
1263 | opts, |
|
1263 | opts, | |
1264 | buffered=False, |
|
1264 | buffered=False, | |
1265 | subpath=None, |
|
1265 | subpath=None, | |
1266 | ): |
|
1266 | ): | |
1267 | """ |
|
1267 | """ | |
1268 | Helper for incoming / gincoming. |
|
1268 | Helper for incoming / gincoming. | |
1269 | displaychlist gets called with |
|
1269 | displaychlist gets called with | |
1270 | (remoterepo, incomingchangesetlist, displayer) parameters, |
|
1270 | (remoterepo, incomingchangesetlist, displayer) parameters, | |
1271 | and is supposed to contain only code that can't be unified. |
|
1271 | and is supposed to contain only code that can't be unified. | |
1272 | """ |
|
1272 | """ | |
1273 | srcs = urlutil.get_pull_paths(repo, ui, [source], opts.get(b'branch')) |
|
1273 | srcs = urlutil.get_pull_paths(repo, ui, [source], opts.get(b'branch')) | |
1274 | srcs = list(srcs) |
|
1274 | srcs = list(srcs) | |
1275 | if len(srcs) != 1: |
|
1275 | if len(srcs) != 1: | |
1276 | msg = _(b'for now, incoming supports only a single source, %d provided') |
|
1276 | msg = _(b'for now, incoming supports only a single source, %d provided') | |
1277 | msg %= len(srcs) |
|
1277 | msg %= len(srcs) | |
1278 | raise error.Abort(msg) |
|
1278 | raise error.Abort(msg) | |
1279 | source, branches = srcs[0] |
|
1279 | source, branches = srcs[0] | |
1280 | if subpath is not None: |
|
1280 | if subpath is not None: | |
1281 | subpath = urlutil.url(subpath) |
|
1281 | subpath = urlutil.url(subpath) | |
1282 | if subpath.isabs(): |
|
1282 | if subpath.isabs(): | |
1283 | source = bytes(subpath) |
|
1283 | source = bytes(subpath) | |
1284 | else: |
|
1284 | else: | |
1285 | p = urlutil.url(source) |
|
1285 | p = urlutil.url(source) | |
1286 | p.path = os.path.normpath(b'%s/%s' % (p.path, subpath)) |
|
1286 | p.path = os.path.normpath(b'%s/%s' % (p.path, subpath)) | |
1287 | source = bytes(p) |
|
1287 | source = bytes(p) | |
1288 | other = peer(repo, opts, source) |
|
1288 | other = peer(repo, opts, source) | |
1289 | cleanupfn = other.close |
|
1289 | cleanupfn = other.close | |
1290 | try: |
|
1290 | try: | |
1291 | ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source)) |
|
1291 | ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source)) | |
1292 | revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev')) |
|
1292 | revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev')) | |
1293 |
|
1293 | |||
1294 | if revs: |
|
1294 | if revs: | |
1295 | revs = [other.lookup(rev) for rev in revs] |
|
1295 | revs = [other.lookup(rev) for rev in revs] | |
1296 | other, chlist, cleanupfn = bundlerepo.getremotechanges( |
|
1296 | other, chlist, cleanupfn = bundlerepo.getremotechanges( | |
1297 | ui, repo, other, revs, opts[b"bundle"], opts[b"force"] |
|
1297 | ui, repo, other, revs, opts[b"bundle"], opts[b"force"] | |
1298 | ) |
|
1298 | ) | |
1299 |
|
1299 | |||
1300 | if not chlist: |
|
1300 | if not chlist: | |
1301 | ui.status(_(b"no changes found\n")) |
|
1301 | ui.status(_(b"no changes found\n")) | |
1302 | return subreporecurse() |
|
1302 | return subreporecurse() | |
1303 | ui.pager(b'incoming') |
|
1303 | ui.pager(b'incoming') | |
1304 | displayer = logcmdutil.changesetdisplayer( |
|
1304 | displayer = logcmdutil.changesetdisplayer( | |
1305 | ui, other, opts, buffered=buffered |
|
1305 | ui, other, opts, buffered=buffered | |
1306 | ) |
|
1306 | ) | |
1307 | displaychlist(other, chlist, displayer) |
|
1307 | displaychlist(other, chlist, displayer) | |
1308 | displayer.close() |
|
1308 | displayer.close() | |
1309 | finally: |
|
1309 | finally: | |
1310 | cleanupfn() |
|
1310 | cleanupfn() | |
1311 | subreporecurse() |
|
1311 | subreporecurse() | |
1312 | return 0 # exit code is zero since we found incoming changes |
|
1312 | return 0 # exit code is zero since we found incoming changes | |
1313 |
|
1313 | |||
1314 |
|
1314 | |||
1315 | def incoming(ui, repo, source, opts, subpath=None): |
|
1315 | def incoming(ui, repo, source, opts, subpath=None): | |
1316 | def subreporecurse(): |
|
1316 | def subreporecurse(): | |
1317 | ret = 1 |
|
1317 | ret = 1 | |
1318 | if opts.get(b'subrepos'): |
|
1318 | if opts.get(b'subrepos'): | |
1319 | ctx = repo[None] |
|
1319 | ctx = repo[None] | |
1320 | for subpath in sorted(ctx.substate): |
|
1320 | for subpath in sorted(ctx.substate): | |
1321 | sub = ctx.sub(subpath) |
|
1321 | sub = ctx.sub(subpath) | |
1322 | ret = min(ret, sub.incoming(ui, source, opts)) |
|
1322 | ret = min(ret, sub.incoming(ui, source, opts)) | |
1323 | return ret |
|
1323 | return ret | |
1324 |
|
1324 | |||
1325 | def display(other, chlist, displayer): |
|
1325 | def display(other, chlist, displayer): | |
1326 | limit = logcmdutil.getlimit(opts) |
|
1326 | limit = logcmdutil.getlimit(opts) | |
1327 | if opts.get(b'newest_first'): |
|
1327 | if opts.get(b'newest_first'): | |
1328 | chlist.reverse() |
|
1328 | chlist.reverse() | |
1329 | count = 0 |
|
1329 | count = 0 | |
1330 | for n in chlist: |
|
1330 | for n in chlist: | |
1331 | if limit is not None and count >= limit: |
|
1331 | if limit is not None and count >= limit: | |
1332 | break |
|
1332 | break | |
1333 | parents = [ |
|
1333 | parents = [ | |
1334 | p for p in other.changelog.parents(n) if p != repo.nullid |
|
1334 | p for p in other.changelog.parents(n) if p != repo.nullid | |
1335 | ] |
|
1335 | ] | |
1336 | if opts.get(b'no_merges') and len(parents) == 2: |
|
1336 | if opts.get(b'no_merges') and len(parents) == 2: | |
1337 | continue |
|
1337 | continue | |
1338 | count += 1 |
|
1338 | count += 1 | |
1339 | displayer.show(other[n]) |
|
1339 | displayer.show(other[n]) | |
1340 |
|
1340 | |||
1341 | return _incoming( |
|
1341 | return _incoming( | |
1342 | display, subreporecurse, ui, repo, source, opts, subpath=subpath |
|
1342 | display, subreporecurse, ui, repo, source, opts, subpath=subpath | |
1343 | ) |
|
1343 | ) | |
1344 |
|
1344 | |||
1345 |
|
1345 | |||
1346 | def _outgoing(ui, repo, dests, opts, subpath=None): |
|
1346 | def _outgoing(ui, repo, dests, opts, subpath=None): | |
1347 | out = set() |
|
1347 | out = set() | |
1348 | others = [] |
|
1348 | others = [] | |
1349 | for path in urlutil.get_push_paths(repo, ui, dests): |
|
1349 | for path in urlutil.get_push_paths(repo, ui, dests): | |
1350 | dest = path.pushloc or path.loc |
|
1350 | dest = path.pushloc or path.loc | |
1351 | if subpath is not None: |
|
1351 | if subpath is not None: | |
1352 | subpath = urlutil.url(subpath) |
|
1352 | subpath = urlutil.url(subpath) | |
1353 | if subpath.isabs(): |
|
1353 | if subpath.isabs(): | |
1354 | dest = bytes(subpath) |
|
1354 | dest = bytes(subpath) | |
1355 | else: |
|
1355 | else: | |
1356 | p = urlutil.url(dest) |
|
1356 | p = urlutil.url(dest) | |
1357 | p.path = os.path.normpath(b'%s/%s' % (p.path, subpath)) |
|
1357 | p.path = os.path.normpath(b'%s/%s' % (p.path, subpath)) | |
1358 | dest = bytes(p) |
|
1358 | dest = bytes(p) | |
1359 | branches = path.branch, opts.get(b'branch') or [] |
|
1359 | branches = path.branch, opts.get(b'branch') or [] | |
1360 |
|
1360 | |||
1361 | ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest)) |
|
1361 | ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest)) | |
1362 | revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev')) |
|
1362 | revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev')) | |
1363 | if revs: |
|
1363 | if revs: | |
1364 | revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)] |
|
1364 | revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)] | |
1365 |
|
1365 | |||
1366 | other = peer(repo, opts, dest) |
|
1366 | other = peer(repo, opts, dest) | |
1367 | try: |
|
1367 | try: | |
1368 | outgoing = discovery.findcommonoutgoing( |
|
1368 | outgoing = discovery.findcommonoutgoing( | |
1369 | repo, other, revs, force=opts.get(b'force') |
|
1369 | repo, other, revs, force=opts.get(b'force') | |
1370 | ) |
|
1370 | ) | |
1371 | o = outgoing.missing |
|
1371 | o = outgoing.missing | |
1372 | out.update(o) |
|
1372 | out.update(o) | |
1373 | if not o: |
|
1373 | if not o: | |
1374 | scmutil.nochangesfound(repo.ui, repo, outgoing.excluded) |
|
1374 | scmutil.nochangesfound(repo.ui, repo, outgoing.excluded) | |
1375 | others.append(other) |
|
1375 | others.append(other) | |
1376 | except: # re-raises |
|
1376 | except: # re-raises | |
1377 | other.close() |
|
1377 | other.close() | |
1378 | raise |
|
1378 | raise | |
1379 | # make sure this is ordered by revision number |
|
1379 | # make sure this is ordered by revision number | |
1380 | outgoing_revs = list(out) |
|
1380 | outgoing_revs = list(out) | |
1381 | cl = repo.changelog |
|
1381 | cl = repo.changelog | |
1382 | outgoing_revs.sort(key=cl.rev) |
|
1382 | outgoing_revs.sort(key=cl.rev) | |
1383 | return outgoing_revs, others |
|
1383 | return outgoing_revs, others | |
1384 |
|
1384 | |||
1385 |
|
1385 | |||
1386 | def _outgoing_recurse(ui, repo, dests, opts): |
|
1386 | def _outgoing_recurse(ui, repo, dests, opts): | |
1387 | ret = 1 |
|
1387 | ret = 1 | |
1388 | if opts.get(b'subrepos'): |
|
1388 | if opts.get(b'subrepos'): | |
1389 | ctx = repo[None] |
|
1389 | ctx = repo[None] | |
1390 | for subpath in sorted(ctx.substate): |
|
1390 | for subpath in sorted(ctx.substate): | |
1391 | sub = ctx.sub(subpath) |
|
1391 | sub = ctx.sub(subpath) | |
1392 | ret = min(ret, sub.outgoing(ui, dests, opts)) |
|
1392 | ret = min(ret, sub.outgoing(ui, dests, opts)) | |
1393 | return ret |
|
1393 | return ret | |
1394 |
|
1394 | |||
1395 |
|
1395 | |||
1396 | def _outgoing_filter(repo, revs, opts): |
|
1396 | def _outgoing_filter(repo, revs, opts): | |
1397 | """apply revision filtering/ordering option for outgoing""" |
|
1397 | """apply revision filtering/ordering option for outgoing""" | |
1398 | limit = logcmdutil.getlimit(opts) |
|
1398 | limit = logcmdutil.getlimit(opts) | |
1399 | no_merges = opts.get(b'no_merges') |
|
1399 | no_merges = opts.get(b'no_merges') | |
1400 | if opts.get(b'newest_first'): |
|
1400 | if opts.get(b'newest_first'): | |
1401 | revs.reverse() |
|
1401 | revs.reverse() | |
1402 | if limit is None and not no_merges: |
|
1402 | if limit is None and not no_merges: | |
1403 | for r in revs: |
|
1403 | for r in revs: | |
1404 | yield r |
|
1404 | yield r | |
1405 | return |
|
1405 | return | |
1406 |
|
1406 | |||
1407 | count = 0 |
|
1407 | count = 0 | |
1408 | cl = repo.changelog |
|
1408 | cl = repo.changelog | |
1409 | for n in revs: |
|
1409 | for n in revs: | |
1410 | if limit is not None and count >= limit: |
|
1410 | if limit is not None and count >= limit: | |
1411 | break |
|
1411 | break | |
1412 | parents = [p for p in cl.parents(n) if p != repo.nullid] |
|
1412 | parents = [p for p in cl.parents(n) if p != repo.nullid] | |
1413 | if no_merges and len(parents) == 2: |
|
1413 | if no_merges and len(parents) == 2: | |
1414 | continue |
|
1414 | continue | |
1415 | count += 1 |
|
1415 | count += 1 | |
1416 | yield n |
|
1416 | yield n | |
1417 |
|
1417 | |||
1418 |
|
1418 | |||
1419 | def outgoing(ui, repo, dests, opts, subpath=None): |
|
1419 | def outgoing(ui, repo, dests, opts, subpath=None): | |
1420 | if opts.get(b'graph'): |
|
1420 | if opts.get(b'graph'): | |
1421 | logcmdutil.checkunsupportedgraphflags([], opts) |
|
1421 | logcmdutil.checkunsupportedgraphflags([], opts) | |
1422 | o, others = _outgoing(ui, repo, dests, opts, subpath=subpath) |
|
1422 | o, others = _outgoing(ui, repo, dests, opts, subpath=subpath) | |
1423 | ret = 1 |
|
1423 | ret = 1 | |
1424 | try: |
|
1424 | try: | |
1425 | if o: |
|
1425 | if o: | |
1426 | ret = 0 |
|
1426 | ret = 0 | |
1427 |
|
1427 | |||
1428 | if opts.get(b'graph'): |
|
1428 | if opts.get(b'graph'): | |
1429 | revdag = logcmdutil.graphrevs(repo, o, opts) |
|
1429 | revdag = logcmdutil.graphrevs(repo, o, opts) | |
1430 | ui.pager(b'outgoing') |
|
1430 | ui.pager(b'outgoing') | |
1431 | displayer = logcmdutil.changesetdisplayer( |
|
1431 | displayer = logcmdutil.changesetdisplayer( | |
1432 | ui, repo, opts, buffered=True |
|
1432 | ui, repo, opts, buffered=True | |
1433 | ) |
|
1433 | ) | |
1434 | logcmdutil.displaygraph( |
|
1434 | logcmdutil.displaygraph( | |
1435 | ui, repo, revdag, displayer, graphmod.asciiedges |
|
1435 | ui, repo, revdag, displayer, graphmod.asciiedges | |
1436 | ) |
|
1436 | ) | |
1437 | else: |
|
1437 | else: | |
1438 | ui.pager(b'outgoing') |
|
1438 | ui.pager(b'outgoing') | |
1439 | displayer = logcmdutil.changesetdisplayer(ui, repo, opts) |
|
1439 | displayer = logcmdutil.changesetdisplayer(ui, repo, opts) | |
1440 | for n in _outgoing_filter(repo, o, opts): |
|
1440 | for n in _outgoing_filter(repo, o, opts): | |
1441 | displayer.show(repo[n]) |
|
1441 | displayer.show(repo[n]) | |
1442 | displayer.close() |
|
1442 | displayer.close() | |
1443 | for oth in others: |
|
1443 | for oth in others: | |
1444 | cmdutil.outgoinghooks(ui, repo, oth, opts, o) |
|
1444 | cmdutil.outgoinghooks(ui, repo, oth, opts, o) | |
1445 | ret = min(ret, _outgoing_recurse(ui, repo, dests, opts)) |
|
1445 | ret = min(ret, _outgoing_recurse(ui, repo, dests, opts)) | |
1446 | return ret # exit code is zero since we found outgoing changes |
|
1446 | return ret # exit code is zero since we found outgoing changes | |
1447 | finally: |
|
1447 | finally: | |
1448 | for oth in others: |
|
1448 | for oth in others: | |
1449 | oth.close() |
|
1449 | oth.close() | |
1450 |
|
1450 | |||
1451 |
|
1451 | |||
1452 | def verify(repo, level=None): |
|
1452 | def verify(repo, level=None): | |
1453 | """verify the consistency of a repository""" |
|
1453 | """verify the consistency of a repository""" | |
1454 | ret = verifymod.verify(repo, level=level) |
|
1454 | ret = verifymod.verify(repo, level=level) | |
1455 |
|
1455 | |||
1456 | # Broken subrepo references in hidden csets don't seem worth worrying about, |
|
1456 | # Broken subrepo references in hidden csets don't seem worth worrying about, | |
1457 | # since they can't be pushed/pulled, and --hidden can be used if they are a |
|
1457 | # since they can't be pushed/pulled, and --hidden can be used if they are a | |
1458 | # concern. |
|
1458 | # concern. | |
1459 |
|
1459 | |||
1460 | # pathto() is needed for -R case |
|
1460 | # pathto() is needed for -R case | |
1461 | revs = repo.revs( |
|
1461 | revs = repo.revs( | |
1462 | b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate') |
|
1462 | b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate') | |
1463 | ) |
|
1463 | ) | |
1464 |
|
1464 | |||
1465 | if revs: |
|
1465 | if revs: | |
1466 | repo.ui.status(_(b'checking subrepo links\n')) |
|
1466 | repo.ui.status(_(b'checking subrepo links\n')) | |
1467 | for rev in revs: |
|
1467 | for rev in revs: | |
1468 | ctx = repo[rev] |
|
1468 | ctx = repo[rev] | |
1469 | try: |
|
1469 | try: | |
1470 | for subpath in ctx.substate: |
|
1470 | for subpath in ctx.substate: | |
1471 | try: |
|
1471 | try: | |
1472 | ret = ( |
|
1472 | ret = ( | |
1473 | ctx.sub(subpath, allowcreate=False).verify() or ret |
|
1473 | ctx.sub(subpath, allowcreate=False).verify() or ret | |
1474 | ) |
|
1474 | ) | |
1475 | except error.RepoError as e: |
|
1475 | except error.RepoError as e: | |
1476 | repo.ui.warn(b'%d: %s\n' % (rev, e)) |
|
1476 | repo.ui.warn(b'%d: %s\n' % (rev, e)) | |
1477 | except Exception: |
|
1477 | except Exception: | |
1478 | repo.ui.warn( |
|
1478 | repo.ui.warn( | |
1479 | _(b'.hgsubstate is corrupt in revision %s\n') |
|
1479 | _(b'.hgsubstate is corrupt in revision %s\n') | |
1480 | % short(ctx.node()) |
|
1480 | % short(ctx.node()) | |
1481 | ) |
|
1481 | ) | |
1482 |
|
1482 | |||
1483 | return ret |
|
1483 | return ret | |
1484 |
|
1484 | |||
1485 |
|
1485 | |||
1486 | def remoteui(src, opts): |
|
1486 | def remoteui(src, opts): | |
1487 | """build a remote ui from ui or repo and opts""" |
|
1487 | """build a remote ui from ui or repo and opts""" | |
1488 | if util.safehasattr(src, b'baseui'): # looks like a repository |
|
1488 | if util.safehasattr(src, b'baseui'): # looks like a repository | |
1489 | dst = src.baseui.copy() # drop repo-specific config |
|
1489 | dst = src.baseui.copy() # drop repo-specific config | |
1490 | src = src.ui # copy target options from repo |
|
1490 | src = src.ui # copy target options from repo | |
1491 | else: # assume it's a global ui object |
|
1491 | else: # assume it's a global ui object | |
1492 | dst = src.copy() # keep all global options |
|
1492 | dst = src.copy() # keep all global options | |
1493 |
|
1493 | |||
1494 | # copy ssh-specific options |
|
1494 | # copy ssh-specific options | |
1495 | for o in b'ssh', b'remotecmd': |
|
1495 | for o in b'ssh', b'remotecmd': | |
1496 | v = opts.get(o) or src.config(b'ui', o) |
|
1496 | v = opts.get(o) or src.config(b'ui', o) | |
1497 | if v: |
|
1497 | if v: | |
1498 | dst.setconfig(b"ui", o, v, b'copied') |
|
1498 | dst.setconfig(b"ui", o, v, b'copied') | |
1499 |
|
1499 | |||
1500 | # copy bundle-specific options |
|
1500 | # copy bundle-specific options | |
1501 | r = src.config(b'bundle', b'mainreporoot') |
|
1501 | r = src.config(b'bundle', b'mainreporoot') | |
1502 | if r: |
|
1502 | if r: | |
1503 | dst.setconfig(b'bundle', b'mainreporoot', r, b'copied') |
|
1503 | dst.setconfig(b'bundle', b'mainreporoot', r, b'copied') | |
1504 |
|
1504 | |||
1505 | # copy selected local settings to the remote ui |
|
1505 | # copy selected local settings to the remote ui | |
1506 | for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'): |
|
1506 | for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'): | |
1507 | for key, val in src.configitems(sect): |
|
1507 | for key, val in src.configitems(sect): | |
1508 | dst.setconfig(sect, key, val, b'copied') |
|
1508 | dst.setconfig(sect, key, val, b'copied') | |
1509 | v = src.config(b'web', b'cacerts') |
|
1509 | v = src.config(b'web', b'cacerts') | |
1510 | if v: |
|
1510 | if v: | |
1511 | dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied') |
|
1511 | dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied') | |
1512 |
|
1512 | |||
1513 | return dst |
|
1513 | return dst | |
1514 |
|
1514 | |||
1515 |
|
1515 | |||
1516 | # Files of interest |
|
1516 | # Files of interest | |
1517 | # Used to check if the repository has changed looking at mtime and size of |
|
1517 | # Used to check if the repository has changed looking at mtime and size of | |
1518 | # these files. |
|
1518 | # these files. | |
1519 | foi = [ |
|
1519 | foi = [ | |
1520 | (b'spath', b'00changelog.i'), |
|
1520 | (b'spath', b'00changelog.i'), | |
1521 | (b'spath', b'phaseroots'), # ! phase can change content at the same size |
|
1521 | (b'spath', b'phaseroots'), # ! phase can change content at the same size | |
1522 | (b'spath', b'obsstore'), |
|
1522 | (b'spath', b'obsstore'), | |
1523 | (b'path', b'bookmarks'), # ! bookmark can change content at the same size |
|
1523 | (b'path', b'bookmarks'), # ! bookmark can change content at the same size | |
1524 | ] |
|
1524 | ] | |
1525 |
|
1525 | |||
1526 |
|
1526 | |||
1527 | class cachedlocalrepo(object): |
|
1527 | class cachedlocalrepo(object): | |
1528 | """Holds a localrepository that can be cached and reused.""" |
|
1528 | """Holds a localrepository that can be cached and reused.""" | |
1529 |
|
1529 | |||
1530 | def __init__(self, repo): |
|
1530 | def __init__(self, repo): | |
1531 | """Create a new cached repo from an existing repo. |
|
1531 | """Create a new cached repo from an existing repo. | |
1532 |
|
1532 | |||
1533 | We assume the passed in repo was recently created. If the |
|
1533 | We assume the passed in repo was recently created. If the | |
1534 | repo has changed between when it was created and when it was |
|
1534 | repo has changed between when it was created and when it was | |
1535 | turned into a cache, it may not refresh properly. |
|
1535 | turned into a cache, it may not refresh properly. | |
1536 | """ |
|
1536 | """ | |
1537 | assert isinstance(repo, localrepo.localrepository) |
|
1537 | assert isinstance(repo, localrepo.localrepository) | |
1538 | self._repo = repo |
|
1538 | self._repo = repo | |
1539 | self._state, self.mtime = self._repostate() |
|
1539 | self._state, self.mtime = self._repostate() | |
1540 | self._filtername = repo.filtername |
|
1540 | self._filtername = repo.filtername | |
1541 |
|
1541 | |||
1542 | def fetch(self): |
|
1542 | def fetch(self): | |
1543 | """Refresh (if necessary) and return a repository. |
|
1543 | """Refresh (if necessary) and return a repository. | |
1544 |
|
1544 | |||
1545 | If the cached instance is out of date, it will be recreated |
|
1545 | If the cached instance is out of date, it will be recreated | |
1546 | automatically and returned. |
|
1546 | automatically and returned. | |
1547 |
|
1547 | |||
1548 | Returns a tuple of the repo and a boolean indicating whether a new |
|
1548 | Returns a tuple of the repo and a boolean indicating whether a new | |
1549 | repo instance was created. |
|
1549 | repo instance was created. | |
1550 | """ |
|
1550 | """ | |
1551 | # We compare the mtimes and sizes of some well-known files to |
|
1551 | # We compare the mtimes and sizes of some well-known files to | |
1552 | # determine if the repo changed. This is not precise, as mtimes |
|
1552 | # determine if the repo changed. This is not precise, as mtimes | |
1553 | # are susceptible to clock skew and imprecise filesystems and |
|
1553 | # are susceptible to clock skew and imprecise filesystems and | |
1554 | # file content can change while maintaining the same size. |
|
1554 | # file content can change while maintaining the same size. | |
1555 |
|
1555 | |||
1556 | state, mtime = self._repostate() |
|
1556 | state, mtime = self._repostate() | |
1557 | if state == self._state: |
|
1557 | if state == self._state: | |
1558 | return self._repo, False |
|
1558 | return self._repo, False | |
1559 |
|
1559 | |||
1560 | repo = repository(self._repo.baseui, self._repo.url()) |
|
1560 | repo = repository(self._repo.baseui, self._repo.url()) | |
1561 | if self._filtername: |
|
1561 | if self._filtername: | |
1562 | self._repo = repo.filtered(self._filtername) |
|
1562 | self._repo = repo.filtered(self._filtername) | |
1563 | else: |
|
1563 | else: | |
1564 | self._repo = repo.unfiltered() |
|
1564 | self._repo = repo.unfiltered() | |
1565 | self._state = state |
|
1565 | self._state = state | |
1566 | self.mtime = mtime |
|
1566 | self.mtime = mtime | |
1567 |
|
1567 | |||
1568 | return self._repo, True |
|
1568 | return self._repo, True | |
1569 |
|
1569 | |||
1570 | def _repostate(self): |
|
1570 | def _repostate(self): | |
1571 | state = [] |
|
1571 | state = [] | |
1572 | maxmtime = -1 |
|
1572 | maxmtime = -1 | |
1573 | for attr, fname in foi: |
|
1573 | for attr, fname in foi: | |
1574 | prefix = getattr(self._repo, attr) |
|
1574 | prefix = getattr(self._repo, attr) | |
1575 | p = os.path.join(prefix, fname) |
|
1575 | p = os.path.join(prefix, fname) | |
1576 | try: |
|
1576 | try: | |
1577 | st = os.stat(p) |
|
1577 | st = os.stat(p) | |
1578 | except OSError: |
|
1578 | except OSError: | |
1579 | st = os.stat(prefix) |
|
1579 | st = os.stat(prefix) | |
1580 | state.append((st[stat.ST_MTIME], st.st_size)) |
|
1580 | state.append((st[stat.ST_MTIME], st.st_size)) | |
1581 | maxmtime = max(maxmtime, st[stat.ST_MTIME]) |
|
1581 | maxmtime = max(maxmtime, st[stat.ST_MTIME]) | |
1582 |
|
1582 | |||
1583 | return tuple(state), maxmtime |
|
1583 | return tuple(state), maxmtime | |
1584 |
|
1584 | |||
1585 | def copy(self): |
|
1585 | def copy(self): | |
1586 | """Obtain a copy of this class instance. |
|
1586 | """Obtain a copy of this class instance. | |
1587 |
|
1587 | |||
1588 | A new localrepository instance is obtained. The new instance should be |
|
1588 | A new localrepository instance is obtained. The new instance should be | |
1589 | completely independent of the original. |
|
1589 | completely independent of the original. | |
1590 | """ |
|
1590 | """ | |
1591 | repo = repository(self._repo.baseui, self._repo.origroot) |
|
1591 | repo = repository(self._repo.baseui, self._repo.origroot) | |
1592 | if self._filtername: |
|
1592 | if self._filtername: | |
1593 | repo = repo.filtered(self._filtername) |
|
1593 | repo = repo.filtered(self._filtername) | |
1594 | else: |
|
1594 | else: | |
1595 | repo = repo.unfiltered() |
|
1595 | repo = repo.unfiltered() | |
1596 | c = cachedlocalrepo(repo) |
|
1596 | c = cachedlocalrepo(repo) | |
1597 | c._state = self._state |
|
1597 | c._state = self._state | |
1598 | c.mtime = self.mtime |
|
1598 | c.mtime = self.mtime | |
1599 | return c |
|
1599 | return c |
@@ -1,3763 +1,3769 b'' | |||||
1 | # localrepo.py - read/write repository class for mercurial |
|
1 | # localrepo.py - read/write repository class for mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com> |
|
3 | # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import errno |
|
10 | import errno | |
11 | import functools |
|
11 | import functools | |
12 | import os |
|
12 | import os | |
13 | import random |
|
13 | import random | |
14 | import sys |
|
14 | import sys | |
15 | import time |
|
15 | import time | |
16 | import weakref |
|
16 | import weakref | |
17 |
|
17 | |||
18 | from .i18n import _ |
|
18 | from .i18n import _ | |
19 | from .node import ( |
|
19 | from .node import ( | |
20 | bin, |
|
20 | bin, | |
21 | hex, |
|
21 | hex, | |
22 | nullrev, |
|
22 | nullrev, | |
23 | sha1nodeconstants, |
|
23 | sha1nodeconstants, | |
24 | short, |
|
24 | short, | |
25 | ) |
|
25 | ) | |
26 | from .pycompat import ( |
|
26 | from .pycompat import ( | |
27 | delattr, |
|
27 | delattr, | |
28 | getattr, |
|
28 | getattr, | |
29 | ) |
|
29 | ) | |
30 | from . import ( |
|
30 | from . import ( | |
31 | bookmarks, |
|
31 | bookmarks, | |
32 | branchmap, |
|
32 | branchmap, | |
33 | bundle2, |
|
33 | bundle2, | |
34 | bundlecaches, |
|
34 | bundlecaches, | |
35 | changegroup, |
|
35 | changegroup, | |
36 | color, |
|
36 | color, | |
37 | commit, |
|
37 | commit, | |
38 | context, |
|
38 | context, | |
39 | dirstate, |
|
39 | dirstate, | |
40 | dirstateguard, |
|
40 | dirstateguard, | |
41 | discovery, |
|
41 | discovery, | |
42 | encoding, |
|
42 | encoding, | |
43 | error, |
|
43 | error, | |
44 | exchange, |
|
44 | exchange, | |
45 | extensions, |
|
45 | extensions, | |
46 | filelog, |
|
46 | filelog, | |
47 | hook, |
|
47 | hook, | |
48 | lock as lockmod, |
|
48 | lock as lockmod, | |
49 | match as matchmod, |
|
49 | match as matchmod, | |
50 | mergestate as mergestatemod, |
|
50 | mergestate as mergestatemod, | |
51 | mergeutil, |
|
51 | mergeutil, | |
52 | namespaces, |
|
52 | namespaces, | |
53 | narrowspec, |
|
53 | narrowspec, | |
54 | obsolete, |
|
54 | obsolete, | |
55 | pathutil, |
|
55 | pathutil, | |
56 | phases, |
|
56 | phases, | |
57 | pushkey, |
|
57 | pushkey, | |
58 | pycompat, |
|
58 | pycompat, | |
59 | rcutil, |
|
59 | rcutil, | |
60 | repoview, |
|
60 | repoview, | |
61 | requirements as requirementsmod, |
|
61 | requirements as requirementsmod, | |
62 | revlog, |
|
62 | revlog, | |
63 | revset, |
|
63 | revset, | |
64 | revsetlang, |
|
64 | revsetlang, | |
65 | scmutil, |
|
65 | scmutil, | |
66 | sparse, |
|
66 | sparse, | |
67 | store as storemod, |
|
67 | store as storemod, | |
68 | subrepoutil, |
|
68 | subrepoutil, | |
69 | tags as tagsmod, |
|
69 | tags as tagsmod, | |
70 | transaction, |
|
70 | transaction, | |
71 | txnutil, |
|
71 | txnutil, | |
72 | util, |
|
72 | util, | |
73 | vfs as vfsmod, |
|
73 | vfs as vfsmod, | |
74 | wireprototypes, |
|
74 | wireprototypes, | |
75 | ) |
|
75 | ) | |
76 |
|
76 | |||
77 | from .interfaces import ( |
|
77 | from .interfaces import ( | |
78 | repository, |
|
78 | repository, | |
79 | util as interfaceutil, |
|
79 | util as interfaceutil, | |
80 | ) |
|
80 | ) | |
81 |
|
81 | |||
82 | from .utils import ( |
|
82 | from .utils import ( | |
83 | hashutil, |
|
83 | hashutil, | |
84 | procutil, |
|
84 | procutil, | |
85 | stringutil, |
|
85 | stringutil, | |
86 | urlutil, |
|
86 | urlutil, | |
87 | ) |
|
87 | ) | |
88 |
|
88 | |||
89 | from .revlogutils import ( |
|
89 | from .revlogutils import ( | |
90 | concurrency_checker as revlogchecker, |
|
90 | concurrency_checker as revlogchecker, | |
91 | constants as revlogconst, |
|
91 | constants as revlogconst, | |
92 | sidedata as sidedatamod, |
|
92 | sidedata as sidedatamod, | |
93 | ) |
|
93 | ) | |
94 |
|
94 | |||
95 | release = lockmod.release |
|
95 | release = lockmod.release | |
96 | urlerr = util.urlerr |
|
96 | urlerr = util.urlerr | |
97 | urlreq = util.urlreq |
|
97 | urlreq = util.urlreq | |
98 |
|
98 | |||
99 | # set of (path, vfs-location) tuples. vfs-location is: |
|
99 | # set of (path, vfs-location) tuples. vfs-location is: | |
100 | # - 'plain for vfs relative paths |
|
100 | # - 'plain for vfs relative paths | |
101 | # - '' for svfs relative paths |
|
101 | # - '' for svfs relative paths | |
102 | _cachedfiles = set() |
|
102 | _cachedfiles = set() | |
103 |
|
103 | |||
104 |
|
104 | |||
105 | class _basefilecache(scmutil.filecache): |
|
105 | class _basefilecache(scmutil.filecache): | |
106 | """All filecache usage on repo are done for logic that should be unfiltered""" |
|
106 | """All filecache usage on repo are done for logic that should be unfiltered""" | |
107 |
|
107 | |||
108 | def __get__(self, repo, type=None): |
|
108 | def __get__(self, repo, type=None): | |
109 | if repo is None: |
|
109 | if repo is None: | |
110 | return self |
|
110 | return self | |
111 | # proxy to unfiltered __dict__ since filtered repo has no entry |
|
111 | # proxy to unfiltered __dict__ since filtered repo has no entry | |
112 | unfi = repo.unfiltered() |
|
112 | unfi = repo.unfiltered() | |
113 | try: |
|
113 | try: | |
114 | return unfi.__dict__[self.sname] |
|
114 | return unfi.__dict__[self.sname] | |
115 | except KeyError: |
|
115 | except KeyError: | |
116 | pass |
|
116 | pass | |
117 | return super(_basefilecache, self).__get__(unfi, type) |
|
117 | return super(_basefilecache, self).__get__(unfi, type) | |
118 |
|
118 | |||
119 | def set(self, repo, value): |
|
119 | def set(self, repo, value): | |
120 | return super(_basefilecache, self).set(repo.unfiltered(), value) |
|
120 | return super(_basefilecache, self).set(repo.unfiltered(), value) | |
121 |
|
121 | |||
122 |
|
122 | |||
123 | class repofilecache(_basefilecache): |
|
123 | class repofilecache(_basefilecache): | |
124 | """filecache for files in .hg but outside of .hg/store""" |
|
124 | """filecache for files in .hg but outside of .hg/store""" | |
125 |
|
125 | |||
126 | def __init__(self, *paths): |
|
126 | def __init__(self, *paths): | |
127 | super(repofilecache, self).__init__(*paths) |
|
127 | super(repofilecache, self).__init__(*paths) | |
128 | for path in paths: |
|
128 | for path in paths: | |
129 | _cachedfiles.add((path, b'plain')) |
|
129 | _cachedfiles.add((path, b'plain')) | |
130 |
|
130 | |||
131 | def join(self, obj, fname): |
|
131 | def join(self, obj, fname): | |
132 | return obj.vfs.join(fname) |
|
132 | return obj.vfs.join(fname) | |
133 |
|
133 | |||
134 |
|
134 | |||
135 | class storecache(_basefilecache): |
|
135 | class storecache(_basefilecache): | |
136 | """filecache for files in the store""" |
|
136 | """filecache for files in the store""" | |
137 |
|
137 | |||
138 | def __init__(self, *paths): |
|
138 | def __init__(self, *paths): | |
139 | super(storecache, self).__init__(*paths) |
|
139 | super(storecache, self).__init__(*paths) | |
140 | for path in paths: |
|
140 | for path in paths: | |
141 | _cachedfiles.add((path, b'')) |
|
141 | _cachedfiles.add((path, b'')) | |
142 |
|
142 | |||
143 | def join(self, obj, fname): |
|
143 | def join(self, obj, fname): | |
144 | return obj.sjoin(fname) |
|
144 | return obj.sjoin(fname) | |
145 |
|
145 | |||
146 |
|
146 | |||
147 | class mixedrepostorecache(_basefilecache): |
|
147 | class mixedrepostorecache(_basefilecache): | |
148 | """filecache for a mix files in .hg/store and outside""" |
|
148 | """filecache for a mix files in .hg/store and outside""" | |
149 |
|
149 | |||
150 | def __init__(self, *pathsandlocations): |
|
150 | def __init__(self, *pathsandlocations): | |
151 | # scmutil.filecache only uses the path for passing back into our |
|
151 | # scmutil.filecache only uses the path for passing back into our | |
152 | # join(), so we can safely pass a list of paths and locations |
|
152 | # join(), so we can safely pass a list of paths and locations | |
153 | super(mixedrepostorecache, self).__init__(*pathsandlocations) |
|
153 | super(mixedrepostorecache, self).__init__(*pathsandlocations) | |
154 | _cachedfiles.update(pathsandlocations) |
|
154 | _cachedfiles.update(pathsandlocations) | |
155 |
|
155 | |||
156 | def join(self, obj, fnameandlocation): |
|
156 | def join(self, obj, fnameandlocation): | |
157 | fname, location = fnameandlocation |
|
157 | fname, location = fnameandlocation | |
158 | if location == b'plain': |
|
158 | if location == b'plain': | |
159 | return obj.vfs.join(fname) |
|
159 | return obj.vfs.join(fname) | |
160 | else: |
|
160 | else: | |
161 | if location != b'': |
|
161 | if location != b'': | |
162 | raise error.ProgrammingError( |
|
162 | raise error.ProgrammingError( | |
163 | b'unexpected location: %s' % location |
|
163 | b'unexpected location: %s' % location | |
164 | ) |
|
164 | ) | |
165 | return obj.sjoin(fname) |
|
165 | return obj.sjoin(fname) | |
166 |
|
166 | |||
167 |
|
167 | |||
168 | def isfilecached(repo, name): |
|
168 | def isfilecached(repo, name): | |
169 | """check if a repo has already cached "name" filecache-ed property |
|
169 | """check if a repo has already cached "name" filecache-ed property | |
170 |
|
170 | |||
171 | This returns (cachedobj-or-None, iscached) tuple. |
|
171 | This returns (cachedobj-or-None, iscached) tuple. | |
172 | """ |
|
172 | """ | |
173 | cacheentry = repo.unfiltered()._filecache.get(name, None) |
|
173 | cacheentry = repo.unfiltered()._filecache.get(name, None) | |
174 | if not cacheentry: |
|
174 | if not cacheentry: | |
175 | return None, False |
|
175 | return None, False | |
176 | return cacheentry.obj, True |
|
176 | return cacheentry.obj, True | |
177 |
|
177 | |||
178 |
|
178 | |||
179 | class unfilteredpropertycache(util.propertycache): |
|
179 | class unfilteredpropertycache(util.propertycache): | |
180 | """propertycache that apply to unfiltered repo only""" |
|
180 | """propertycache that apply to unfiltered repo only""" | |
181 |
|
181 | |||
182 | def __get__(self, repo, type=None): |
|
182 | def __get__(self, repo, type=None): | |
183 | unfi = repo.unfiltered() |
|
183 | unfi = repo.unfiltered() | |
184 | if unfi is repo: |
|
184 | if unfi is repo: | |
185 | return super(unfilteredpropertycache, self).__get__(unfi) |
|
185 | return super(unfilteredpropertycache, self).__get__(unfi) | |
186 | return getattr(unfi, self.name) |
|
186 | return getattr(unfi, self.name) | |
187 |
|
187 | |||
188 |
|
188 | |||
189 | class filteredpropertycache(util.propertycache): |
|
189 | class filteredpropertycache(util.propertycache): | |
190 | """propertycache that must take filtering in account""" |
|
190 | """propertycache that must take filtering in account""" | |
191 |
|
191 | |||
192 | def cachevalue(self, obj, value): |
|
192 | def cachevalue(self, obj, value): | |
193 | object.__setattr__(obj, self.name, value) |
|
193 | object.__setattr__(obj, self.name, value) | |
194 |
|
194 | |||
195 |
|
195 | |||
196 | def hasunfilteredcache(repo, name): |
|
196 | def hasunfilteredcache(repo, name): | |
197 | """check if a repo has an unfilteredpropertycache value for <name>""" |
|
197 | """check if a repo has an unfilteredpropertycache value for <name>""" | |
198 | return name in vars(repo.unfiltered()) |
|
198 | return name in vars(repo.unfiltered()) | |
199 |
|
199 | |||
200 |
|
200 | |||
201 | def unfilteredmethod(orig): |
|
201 | def unfilteredmethod(orig): | |
202 | """decorate method that always need to be run on unfiltered version""" |
|
202 | """decorate method that always need to be run on unfiltered version""" | |
203 |
|
203 | |||
204 | @functools.wraps(orig) |
|
204 | @functools.wraps(orig) | |
205 | def wrapper(repo, *args, **kwargs): |
|
205 | def wrapper(repo, *args, **kwargs): | |
206 | return orig(repo.unfiltered(), *args, **kwargs) |
|
206 | return orig(repo.unfiltered(), *args, **kwargs) | |
207 |
|
207 | |||
208 | return wrapper |
|
208 | return wrapper | |
209 |
|
209 | |||
210 |
|
210 | |||
211 | moderncaps = { |
|
211 | moderncaps = { | |
212 | b'lookup', |
|
212 | b'lookup', | |
213 | b'branchmap', |
|
213 | b'branchmap', | |
214 | b'pushkey', |
|
214 | b'pushkey', | |
215 | b'known', |
|
215 | b'known', | |
216 | b'getbundle', |
|
216 | b'getbundle', | |
217 | b'unbundle', |
|
217 | b'unbundle', | |
218 | } |
|
218 | } | |
219 | legacycaps = moderncaps.union({b'changegroupsubset'}) |
|
219 | legacycaps = moderncaps.union({b'changegroupsubset'}) | |
220 |
|
220 | |||
221 |
|
221 | |||
222 | @interfaceutil.implementer(repository.ipeercommandexecutor) |
|
222 | @interfaceutil.implementer(repository.ipeercommandexecutor) | |
223 | class localcommandexecutor(object): |
|
223 | class localcommandexecutor(object): | |
224 | def __init__(self, peer): |
|
224 | def __init__(self, peer): | |
225 | self._peer = peer |
|
225 | self._peer = peer | |
226 | self._sent = False |
|
226 | self._sent = False | |
227 | self._closed = False |
|
227 | self._closed = False | |
228 |
|
228 | |||
229 | def __enter__(self): |
|
229 | def __enter__(self): | |
230 | return self |
|
230 | return self | |
231 |
|
231 | |||
232 | def __exit__(self, exctype, excvalue, exctb): |
|
232 | def __exit__(self, exctype, excvalue, exctb): | |
233 | self.close() |
|
233 | self.close() | |
234 |
|
234 | |||
235 | def callcommand(self, command, args): |
|
235 | def callcommand(self, command, args): | |
236 | if self._sent: |
|
236 | if self._sent: | |
237 | raise error.ProgrammingError( |
|
237 | raise error.ProgrammingError( | |
238 | b'callcommand() cannot be used after sendcommands()' |
|
238 | b'callcommand() cannot be used after sendcommands()' | |
239 | ) |
|
239 | ) | |
240 |
|
240 | |||
241 | if self._closed: |
|
241 | if self._closed: | |
242 | raise error.ProgrammingError( |
|
242 | raise error.ProgrammingError( | |
243 | b'callcommand() cannot be used after close()' |
|
243 | b'callcommand() cannot be used after close()' | |
244 | ) |
|
244 | ) | |
245 |
|
245 | |||
246 | # We don't need to support anything fancy. Just call the named |
|
246 | # We don't need to support anything fancy. Just call the named | |
247 | # method on the peer and return a resolved future. |
|
247 | # method on the peer and return a resolved future. | |
248 | fn = getattr(self._peer, pycompat.sysstr(command)) |
|
248 | fn = getattr(self._peer, pycompat.sysstr(command)) | |
249 |
|
249 | |||
250 | f = pycompat.futures.Future() |
|
250 | f = pycompat.futures.Future() | |
251 |
|
251 | |||
252 | try: |
|
252 | try: | |
253 | result = fn(**pycompat.strkwargs(args)) |
|
253 | result = fn(**pycompat.strkwargs(args)) | |
254 | except Exception: |
|
254 | except Exception: | |
255 | pycompat.future_set_exception_info(f, sys.exc_info()[1:]) |
|
255 | pycompat.future_set_exception_info(f, sys.exc_info()[1:]) | |
256 | else: |
|
256 | else: | |
257 | f.set_result(result) |
|
257 | f.set_result(result) | |
258 |
|
258 | |||
259 | return f |
|
259 | return f | |
260 |
|
260 | |||
261 | def sendcommands(self): |
|
261 | def sendcommands(self): | |
262 | self._sent = True |
|
262 | self._sent = True | |
263 |
|
263 | |||
264 | def close(self): |
|
264 | def close(self): | |
265 | self._closed = True |
|
265 | self._closed = True | |
266 |
|
266 | |||
267 |
|
267 | |||
268 | @interfaceutil.implementer(repository.ipeercommands) |
|
268 | @interfaceutil.implementer(repository.ipeercommands) | |
269 | class localpeer(repository.peer): |
|
269 | class localpeer(repository.peer): | |
270 | '''peer for a local repo; reflects only the most recent API''' |
|
270 | '''peer for a local repo; reflects only the most recent API''' | |
271 |
|
271 | |||
272 | def __init__(self, repo, caps=None): |
|
272 | def __init__(self, repo, caps=None): | |
273 | super(localpeer, self).__init__() |
|
273 | super(localpeer, self).__init__() | |
274 |
|
274 | |||
275 | if caps is None: |
|
275 | if caps is None: | |
276 | caps = moderncaps.copy() |
|
276 | caps = moderncaps.copy() | |
277 | self._repo = repo.filtered(b'served') |
|
277 | self._repo = repo.filtered(b'served') | |
278 | self.ui = repo.ui |
|
278 | self.ui = repo.ui | |
279 |
|
279 | |||
280 | if repo._wanted_sidedata: |
|
280 | if repo._wanted_sidedata: | |
281 | formatted = bundle2.format_remote_wanted_sidedata(repo) |
|
281 | formatted = bundle2.format_remote_wanted_sidedata(repo) | |
282 | caps.add(b'exp-wanted-sidedata=' + formatted) |
|
282 | caps.add(b'exp-wanted-sidedata=' + formatted) | |
283 |
|
283 | |||
284 | self._caps = repo._restrictcapabilities(caps) |
|
284 | self._caps = repo._restrictcapabilities(caps) | |
285 |
|
285 | |||
286 | # Begin of _basepeer interface. |
|
286 | # Begin of _basepeer interface. | |
287 |
|
287 | |||
288 | def url(self): |
|
288 | def url(self): | |
289 | return self._repo.url() |
|
289 | return self._repo.url() | |
290 |
|
290 | |||
291 | def local(self): |
|
291 | def local(self): | |
292 | return self._repo |
|
292 | return self._repo | |
293 |
|
293 | |||
294 | def peer(self): |
|
294 | def peer(self): | |
295 | return self |
|
295 | return self | |
296 |
|
296 | |||
297 | def canpush(self): |
|
297 | def canpush(self): | |
298 | return True |
|
298 | return True | |
299 |
|
299 | |||
300 | def close(self): |
|
300 | def close(self): | |
301 | self._repo.close() |
|
301 | self._repo.close() | |
302 |
|
302 | |||
303 | # End of _basepeer interface. |
|
303 | # End of _basepeer interface. | |
304 |
|
304 | |||
305 | # Begin of _basewirecommands interface. |
|
305 | # Begin of _basewirecommands interface. | |
306 |
|
306 | |||
307 | def branchmap(self): |
|
307 | def branchmap(self): | |
308 | return self._repo.branchmap() |
|
308 | return self._repo.branchmap() | |
309 |
|
309 | |||
310 | def capabilities(self): |
|
310 | def capabilities(self): | |
311 | return self._caps |
|
311 | return self._caps | |
312 |
|
312 | |||
313 | def clonebundles(self): |
|
313 | def clonebundles(self): | |
314 | return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE) |
|
314 | return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE) | |
315 |
|
315 | |||
316 | def debugwireargs(self, one, two, three=None, four=None, five=None): |
|
316 | def debugwireargs(self, one, two, three=None, four=None, five=None): | |
317 | """Used to test argument passing over the wire""" |
|
317 | """Used to test argument passing over the wire""" | |
318 | return b"%s %s %s %s %s" % ( |
|
318 | return b"%s %s %s %s %s" % ( | |
319 | one, |
|
319 | one, | |
320 | two, |
|
320 | two, | |
321 | pycompat.bytestr(three), |
|
321 | pycompat.bytestr(three), | |
322 | pycompat.bytestr(four), |
|
322 | pycompat.bytestr(four), | |
323 | pycompat.bytestr(five), |
|
323 | pycompat.bytestr(five), | |
324 | ) |
|
324 | ) | |
325 |
|
325 | |||
326 | def getbundle( |
|
326 | def getbundle( | |
327 | self, |
|
327 | self, | |
328 | source, |
|
328 | source, | |
329 | heads=None, |
|
329 | heads=None, | |
330 | common=None, |
|
330 | common=None, | |
331 | bundlecaps=None, |
|
331 | bundlecaps=None, | |
332 | remote_sidedata=None, |
|
332 | remote_sidedata=None, | |
333 | **kwargs |
|
333 | **kwargs | |
334 | ): |
|
334 | ): | |
335 | chunks = exchange.getbundlechunks( |
|
335 | chunks = exchange.getbundlechunks( | |
336 | self._repo, |
|
336 | self._repo, | |
337 | source, |
|
337 | source, | |
338 | heads=heads, |
|
338 | heads=heads, | |
339 | common=common, |
|
339 | common=common, | |
340 | bundlecaps=bundlecaps, |
|
340 | bundlecaps=bundlecaps, | |
341 | remote_sidedata=remote_sidedata, |
|
341 | remote_sidedata=remote_sidedata, | |
342 | **kwargs |
|
342 | **kwargs | |
343 | )[1] |
|
343 | )[1] | |
344 | cb = util.chunkbuffer(chunks) |
|
344 | cb = util.chunkbuffer(chunks) | |
345 |
|
345 | |||
346 | if exchange.bundle2requested(bundlecaps): |
|
346 | if exchange.bundle2requested(bundlecaps): | |
347 | # When requesting a bundle2, getbundle returns a stream to make the |
|
347 | # When requesting a bundle2, getbundle returns a stream to make the | |
348 | # wire level function happier. We need to build a proper object |
|
348 | # wire level function happier. We need to build a proper object | |
349 | # from it in local peer. |
|
349 | # from it in local peer. | |
350 | return bundle2.getunbundler(self.ui, cb) |
|
350 | return bundle2.getunbundler(self.ui, cb) | |
351 | else: |
|
351 | else: | |
352 | return changegroup.getunbundler(b'01', cb, None) |
|
352 | return changegroup.getunbundler(b'01', cb, None) | |
353 |
|
353 | |||
354 | def heads(self): |
|
354 | def heads(self): | |
355 | return self._repo.heads() |
|
355 | return self._repo.heads() | |
356 |
|
356 | |||
357 | def known(self, nodes): |
|
357 | def known(self, nodes): | |
358 | return self._repo.known(nodes) |
|
358 | return self._repo.known(nodes) | |
359 |
|
359 | |||
360 | def listkeys(self, namespace): |
|
360 | def listkeys(self, namespace): | |
361 | return self._repo.listkeys(namespace) |
|
361 | return self._repo.listkeys(namespace) | |
362 |
|
362 | |||
363 | def lookup(self, key): |
|
363 | def lookup(self, key): | |
364 | return self._repo.lookup(key) |
|
364 | return self._repo.lookup(key) | |
365 |
|
365 | |||
366 | def pushkey(self, namespace, key, old, new): |
|
366 | def pushkey(self, namespace, key, old, new): | |
367 | return self._repo.pushkey(namespace, key, old, new) |
|
367 | return self._repo.pushkey(namespace, key, old, new) | |
368 |
|
368 | |||
369 | def stream_out(self): |
|
369 | def stream_out(self): | |
370 | raise error.Abort(_(b'cannot perform stream clone against local peer')) |
|
370 | raise error.Abort(_(b'cannot perform stream clone against local peer')) | |
371 |
|
371 | |||
372 | def unbundle(self, bundle, heads, url): |
|
372 | def unbundle(self, bundle, heads, url): | |
373 | """apply a bundle on a repo |
|
373 | """apply a bundle on a repo | |
374 |
|
374 | |||
375 | This function handles the repo locking itself.""" |
|
375 | This function handles the repo locking itself.""" | |
376 | try: |
|
376 | try: | |
377 | try: |
|
377 | try: | |
378 | bundle = exchange.readbundle(self.ui, bundle, None) |
|
378 | bundle = exchange.readbundle(self.ui, bundle, None) | |
379 | ret = exchange.unbundle(self._repo, bundle, heads, b'push', url) |
|
379 | ret = exchange.unbundle(self._repo, bundle, heads, b'push', url) | |
380 | if util.safehasattr(ret, b'getchunks'): |
|
380 | if util.safehasattr(ret, b'getchunks'): | |
381 | # This is a bundle20 object, turn it into an unbundler. |
|
381 | # This is a bundle20 object, turn it into an unbundler. | |
382 | # This little dance should be dropped eventually when the |
|
382 | # This little dance should be dropped eventually when the | |
383 | # API is finally improved. |
|
383 | # API is finally improved. | |
384 | stream = util.chunkbuffer(ret.getchunks()) |
|
384 | stream = util.chunkbuffer(ret.getchunks()) | |
385 | ret = bundle2.getunbundler(self.ui, stream) |
|
385 | ret = bundle2.getunbundler(self.ui, stream) | |
386 | return ret |
|
386 | return ret | |
387 | except Exception as exc: |
|
387 | except Exception as exc: | |
388 | # If the exception contains output salvaged from a bundle2 |
|
388 | # If the exception contains output salvaged from a bundle2 | |
389 | # reply, we need to make sure it is printed before continuing |
|
389 | # reply, we need to make sure it is printed before continuing | |
390 | # to fail. So we build a bundle2 with such output and consume |
|
390 | # to fail. So we build a bundle2 with such output and consume | |
391 | # it directly. |
|
391 | # it directly. | |
392 | # |
|
392 | # | |
393 | # This is not very elegant but allows a "simple" solution for |
|
393 | # This is not very elegant but allows a "simple" solution for | |
394 | # issue4594 |
|
394 | # issue4594 | |
395 | output = getattr(exc, '_bundle2salvagedoutput', ()) |
|
395 | output = getattr(exc, '_bundle2salvagedoutput', ()) | |
396 | if output: |
|
396 | if output: | |
397 | bundler = bundle2.bundle20(self._repo.ui) |
|
397 | bundler = bundle2.bundle20(self._repo.ui) | |
398 | for out in output: |
|
398 | for out in output: | |
399 | bundler.addpart(out) |
|
399 | bundler.addpart(out) | |
400 | stream = util.chunkbuffer(bundler.getchunks()) |
|
400 | stream = util.chunkbuffer(bundler.getchunks()) | |
401 | b = bundle2.getunbundler(self.ui, stream) |
|
401 | b = bundle2.getunbundler(self.ui, stream) | |
402 | bundle2.processbundle(self._repo, b) |
|
402 | bundle2.processbundle(self._repo, b) | |
403 | raise |
|
403 | raise | |
404 | except error.PushRaced as exc: |
|
404 | except error.PushRaced as exc: | |
405 | raise error.ResponseError( |
|
405 | raise error.ResponseError( | |
406 | _(b'push failed:'), stringutil.forcebytestr(exc) |
|
406 | _(b'push failed:'), stringutil.forcebytestr(exc) | |
407 | ) |
|
407 | ) | |
408 |
|
408 | |||
409 | # End of _basewirecommands interface. |
|
409 | # End of _basewirecommands interface. | |
410 |
|
410 | |||
411 | # Begin of peer interface. |
|
411 | # Begin of peer interface. | |
412 |
|
412 | |||
413 | def commandexecutor(self): |
|
413 | def commandexecutor(self): | |
414 | return localcommandexecutor(self) |
|
414 | return localcommandexecutor(self) | |
415 |
|
415 | |||
416 | # End of peer interface. |
|
416 | # End of peer interface. | |
417 |
|
417 | |||
418 |
|
418 | |||
419 | @interfaceutil.implementer(repository.ipeerlegacycommands) |
|
419 | @interfaceutil.implementer(repository.ipeerlegacycommands) | |
420 | class locallegacypeer(localpeer): |
|
420 | class locallegacypeer(localpeer): | |
421 | """peer extension which implements legacy methods too; used for tests with |
|
421 | """peer extension which implements legacy methods too; used for tests with | |
422 | restricted capabilities""" |
|
422 | restricted capabilities""" | |
423 |
|
423 | |||
424 | def __init__(self, repo): |
|
424 | def __init__(self, repo): | |
425 | super(locallegacypeer, self).__init__(repo, caps=legacycaps) |
|
425 | super(locallegacypeer, self).__init__(repo, caps=legacycaps) | |
426 |
|
426 | |||
427 | # Begin of baselegacywirecommands interface. |
|
427 | # Begin of baselegacywirecommands interface. | |
428 |
|
428 | |||
429 | def between(self, pairs): |
|
429 | def between(self, pairs): | |
430 | return self._repo.between(pairs) |
|
430 | return self._repo.between(pairs) | |
431 |
|
431 | |||
432 | def branches(self, nodes): |
|
432 | def branches(self, nodes): | |
433 | return self._repo.branches(nodes) |
|
433 | return self._repo.branches(nodes) | |
434 |
|
434 | |||
435 | def changegroup(self, nodes, source): |
|
435 | def changegroup(self, nodes, source): | |
436 | outgoing = discovery.outgoing( |
|
436 | outgoing = discovery.outgoing( | |
437 | self._repo, missingroots=nodes, ancestorsof=self._repo.heads() |
|
437 | self._repo, missingroots=nodes, ancestorsof=self._repo.heads() | |
438 | ) |
|
438 | ) | |
439 | return changegroup.makechangegroup(self._repo, outgoing, b'01', source) |
|
439 | return changegroup.makechangegroup(self._repo, outgoing, b'01', source) | |
440 |
|
440 | |||
441 | def changegroupsubset(self, bases, heads, source): |
|
441 | def changegroupsubset(self, bases, heads, source): | |
442 | outgoing = discovery.outgoing( |
|
442 | outgoing = discovery.outgoing( | |
443 | self._repo, missingroots=bases, ancestorsof=heads |
|
443 | self._repo, missingroots=bases, ancestorsof=heads | |
444 | ) |
|
444 | ) | |
445 | return changegroup.makechangegroup(self._repo, outgoing, b'01', source) |
|
445 | return changegroup.makechangegroup(self._repo, outgoing, b'01', source) | |
446 |
|
446 | |||
447 | # End of baselegacywirecommands interface. |
|
447 | # End of baselegacywirecommands interface. | |
448 |
|
448 | |||
449 |
|
449 | |||
450 | # Functions receiving (ui, features) that extensions can register to impact |
|
450 | # Functions receiving (ui, features) that extensions can register to impact | |
451 | # the ability to load repositories with custom requirements. Only |
|
451 | # the ability to load repositories with custom requirements. Only | |
452 | # functions defined in loaded extensions are called. |
|
452 | # functions defined in loaded extensions are called. | |
453 | # |
|
453 | # | |
454 | # The function receives a set of requirement strings that the repository |
|
454 | # The function receives a set of requirement strings that the repository | |
455 | # is capable of opening. Functions will typically add elements to the |
|
455 | # is capable of opening. Functions will typically add elements to the | |
456 | # set to reflect that the extension knows how to handle that requirements. |
|
456 | # set to reflect that the extension knows how to handle that requirements. | |
457 | featuresetupfuncs = set() |
|
457 | featuresetupfuncs = set() | |
458 |
|
458 | |||
459 |
|
459 | |||
460 | def _getsharedvfs(hgvfs, requirements): |
|
460 | def _getsharedvfs(hgvfs, requirements): | |
461 | """returns the vfs object pointing to root of shared source |
|
461 | """returns the vfs object pointing to root of shared source | |
462 | repo for a shared repository |
|
462 | repo for a shared repository | |
463 |
|
463 | |||
464 | hgvfs is vfs pointing at .hg/ of current repo (shared one) |
|
464 | hgvfs is vfs pointing at .hg/ of current repo (shared one) | |
465 | requirements is a set of requirements of current repo (shared one) |
|
465 | requirements is a set of requirements of current repo (shared one) | |
466 | """ |
|
466 | """ | |
467 | # The ``shared`` or ``relshared`` requirements indicate the |
|
467 | # The ``shared`` or ``relshared`` requirements indicate the | |
468 | # store lives in the path contained in the ``.hg/sharedpath`` file. |
|
468 | # store lives in the path contained in the ``.hg/sharedpath`` file. | |
469 | # This is an absolute path for ``shared`` and relative to |
|
469 | # This is an absolute path for ``shared`` and relative to | |
470 | # ``.hg/`` for ``relshared``. |
|
470 | # ``.hg/`` for ``relshared``. | |
471 | sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n') |
|
471 | sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n') | |
472 | if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements: |
|
472 | if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements: | |
473 | sharedpath = util.normpath(hgvfs.join(sharedpath)) |
|
473 | sharedpath = util.normpath(hgvfs.join(sharedpath)) | |
474 |
|
474 | |||
475 | sharedvfs = vfsmod.vfs(sharedpath, realpath=True) |
|
475 | sharedvfs = vfsmod.vfs(sharedpath, realpath=True) | |
476 |
|
476 | |||
477 | if not sharedvfs.exists(): |
|
477 | if not sharedvfs.exists(): | |
478 | raise error.RepoError( |
|
478 | raise error.RepoError( | |
479 | _(b'.hg/sharedpath points to nonexistent directory %s') |
|
479 | _(b'.hg/sharedpath points to nonexistent directory %s') | |
480 | % sharedvfs.base |
|
480 | % sharedvfs.base | |
481 | ) |
|
481 | ) | |
482 | return sharedvfs |
|
482 | return sharedvfs | |
483 |
|
483 | |||
484 |
|
484 | |||
485 | def _readrequires(vfs, allowmissing): |
|
485 | def _readrequires(vfs, allowmissing): | |
486 | """reads the require file present at root of this vfs |
|
486 | """reads the require file present at root of this vfs | |
487 | and return a set of requirements |
|
487 | and return a set of requirements | |
488 |
|
488 | |||
489 | If allowmissing is True, we suppress ENOENT if raised""" |
|
489 | If allowmissing is True, we suppress ENOENT if raised""" | |
490 | # requires file contains a newline-delimited list of |
|
490 | # requires file contains a newline-delimited list of | |
491 | # features/capabilities the opener (us) must have in order to use |
|
491 | # features/capabilities the opener (us) must have in order to use | |
492 | # the repository. This file was introduced in Mercurial 0.9.2, |
|
492 | # the repository. This file was introduced in Mercurial 0.9.2, | |
493 | # which means very old repositories may not have one. We assume |
|
493 | # which means very old repositories may not have one. We assume | |
494 | # a missing file translates to no requirements. |
|
494 | # a missing file translates to no requirements. | |
495 | try: |
|
495 | try: | |
496 | requirements = set(vfs.read(b'requires').splitlines()) |
|
496 | requirements = set(vfs.read(b'requires').splitlines()) | |
497 | except IOError as e: |
|
497 | except IOError as e: | |
498 | if not (allowmissing and e.errno == errno.ENOENT): |
|
498 | if not (allowmissing and e.errno == errno.ENOENT): | |
499 | raise |
|
499 | raise | |
500 | requirements = set() |
|
500 | requirements = set() | |
501 | return requirements |
|
501 | return requirements | |
502 |
|
502 | |||
503 |
|
503 | |||
504 | def makelocalrepository(baseui, path, intents=None): |
|
504 | def makelocalrepository(baseui, path, intents=None): | |
505 | """Create a local repository object. |
|
505 | """Create a local repository object. | |
506 |
|
506 | |||
507 | Given arguments needed to construct a local repository, this function |
|
507 | Given arguments needed to construct a local repository, this function | |
508 | performs various early repository loading functionality (such as |
|
508 | performs various early repository loading functionality (such as | |
509 | reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that |
|
509 | reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that | |
510 | the repository can be opened, derives a type suitable for representing |
|
510 | the repository can be opened, derives a type suitable for representing | |
511 | that repository, and returns an instance of it. |
|
511 | that repository, and returns an instance of it. | |
512 |
|
512 | |||
513 | The returned object conforms to the ``repository.completelocalrepository`` |
|
513 | The returned object conforms to the ``repository.completelocalrepository`` | |
514 | interface. |
|
514 | interface. | |
515 |
|
515 | |||
516 | The repository type is derived by calling a series of factory functions |
|
516 | The repository type is derived by calling a series of factory functions | |
517 | for each aspect/interface of the final repository. These are defined by |
|
517 | for each aspect/interface of the final repository. These are defined by | |
518 | ``REPO_INTERFACES``. |
|
518 | ``REPO_INTERFACES``. | |
519 |
|
519 | |||
520 | Each factory function is called to produce a type implementing a specific |
|
520 | Each factory function is called to produce a type implementing a specific | |
521 | interface. The cumulative list of returned types will be combined into a |
|
521 | interface. The cumulative list of returned types will be combined into a | |
522 | new type and that type will be instantiated to represent the local |
|
522 | new type and that type will be instantiated to represent the local | |
523 | repository. |
|
523 | repository. | |
524 |
|
524 | |||
525 | The factory functions each receive various state that may be consulted |
|
525 | The factory functions each receive various state that may be consulted | |
526 | as part of deriving a type. |
|
526 | as part of deriving a type. | |
527 |
|
527 | |||
528 | Extensions should wrap these factory functions to customize repository type |
|
528 | Extensions should wrap these factory functions to customize repository type | |
529 | creation. Note that an extension's wrapped function may be called even if |
|
529 | creation. Note that an extension's wrapped function may be called even if | |
530 | that extension is not loaded for the repo being constructed. Extensions |
|
530 | that extension is not loaded for the repo being constructed. Extensions | |
531 | should check if their ``__name__`` appears in the |
|
531 | should check if their ``__name__`` appears in the | |
532 | ``extensionmodulenames`` set passed to the factory function and no-op if |
|
532 | ``extensionmodulenames`` set passed to the factory function and no-op if | |
533 | not. |
|
533 | not. | |
534 | """ |
|
534 | """ | |
535 | ui = baseui.copy() |
|
535 | ui = baseui.copy() | |
536 | # Prevent copying repo configuration. |
|
536 | # Prevent copying repo configuration. | |
537 | ui.copy = baseui.copy |
|
537 | ui.copy = baseui.copy | |
538 |
|
538 | |||
539 | # Working directory VFS rooted at repository root. |
|
539 | # Working directory VFS rooted at repository root. | |
540 | wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True) |
|
540 | wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True) | |
541 |
|
541 | |||
542 | # Main VFS for .hg/ directory. |
|
542 | # Main VFS for .hg/ directory. | |
543 | hgpath = wdirvfs.join(b'.hg') |
|
543 | hgpath = wdirvfs.join(b'.hg') | |
544 | hgvfs = vfsmod.vfs(hgpath, cacheaudited=True) |
|
544 | hgvfs = vfsmod.vfs(hgpath, cacheaudited=True) | |
545 | # Whether this repository is shared one or not |
|
545 | # Whether this repository is shared one or not | |
546 | shared = False |
|
546 | shared = False | |
547 | # If this repository is shared, vfs pointing to shared repo |
|
547 | # If this repository is shared, vfs pointing to shared repo | |
548 | sharedvfs = None |
|
548 | sharedvfs = None | |
549 |
|
549 | |||
550 | # The .hg/ path should exist and should be a directory. All other |
|
550 | # The .hg/ path should exist and should be a directory. All other | |
551 | # cases are errors. |
|
551 | # cases are errors. | |
552 | if not hgvfs.isdir(): |
|
552 | if not hgvfs.isdir(): | |
553 | try: |
|
553 | try: | |
554 | hgvfs.stat() |
|
554 | hgvfs.stat() | |
555 | except OSError as e: |
|
555 | except OSError as e: | |
556 | if e.errno != errno.ENOENT: |
|
556 | if e.errno != errno.ENOENT: | |
557 | raise |
|
557 | raise | |
558 | except ValueError as e: |
|
558 | except ValueError as e: | |
559 | # Can be raised on Python 3.8 when path is invalid. |
|
559 | # Can be raised on Python 3.8 when path is invalid. | |
560 | raise error.Abort( |
|
560 | raise error.Abort( | |
561 | _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e)) |
|
561 | _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e)) | |
562 | ) |
|
562 | ) | |
563 |
|
563 | |||
564 | raise error.RepoError(_(b'repository %s not found') % path) |
|
564 | raise error.RepoError(_(b'repository %s not found') % path) | |
565 |
|
565 | |||
566 | requirements = _readrequires(hgvfs, True) |
|
566 | requirements = _readrequires(hgvfs, True) | |
567 | shared = ( |
|
567 | shared = ( | |
568 | requirementsmod.SHARED_REQUIREMENT in requirements |
|
568 | requirementsmod.SHARED_REQUIREMENT in requirements | |
569 | or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements |
|
569 | or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements | |
570 | ) |
|
570 | ) | |
571 | storevfs = None |
|
571 | storevfs = None | |
572 | if shared: |
|
572 | if shared: | |
573 | # This is a shared repo |
|
573 | # This is a shared repo | |
574 | sharedvfs = _getsharedvfs(hgvfs, requirements) |
|
574 | sharedvfs = _getsharedvfs(hgvfs, requirements) | |
575 | storevfs = vfsmod.vfs(sharedvfs.join(b'store')) |
|
575 | storevfs = vfsmod.vfs(sharedvfs.join(b'store')) | |
576 | else: |
|
576 | else: | |
577 | storevfs = vfsmod.vfs(hgvfs.join(b'store')) |
|
577 | storevfs = vfsmod.vfs(hgvfs.join(b'store')) | |
578 |
|
578 | |||
579 | # if .hg/requires contains the sharesafe requirement, it means |
|
579 | # if .hg/requires contains the sharesafe requirement, it means | |
580 | # there exists a `.hg/store/requires` too and we should read it |
|
580 | # there exists a `.hg/store/requires` too and we should read it | |
581 | # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement |
|
581 | # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement | |
582 | # is present. We never write SHARESAFE_REQUIREMENT for a repo if store |
|
582 | # is present. We never write SHARESAFE_REQUIREMENT for a repo if store | |
583 | # is not present, refer checkrequirementscompat() for that |
|
583 | # is not present, refer checkrequirementscompat() for that | |
584 | # |
|
584 | # | |
585 | # However, if SHARESAFE_REQUIREMENT is not present, it means that the |
|
585 | # However, if SHARESAFE_REQUIREMENT is not present, it means that the | |
586 | # repository was shared the old way. We check the share source .hg/requires |
|
586 | # repository was shared the old way. We check the share source .hg/requires | |
587 | # for SHARESAFE_REQUIREMENT to detect whether the current repository needs |
|
587 | # for SHARESAFE_REQUIREMENT to detect whether the current repository needs | |
588 | # to be reshared |
|
588 | # to be reshared | |
589 | hint = _(b"see `hg help config.format.use-share-safe` for more information") |
|
589 | hint = _(b"see `hg help config.format.use-share-safe` for more information") | |
590 | if requirementsmod.SHARESAFE_REQUIREMENT in requirements: |
|
590 | if requirementsmod.SHARESAFE_REQUIREMENT in requirements: | |
591 |
|
591 | |||
592 | if ( |
|
592 | if ( | |
593 | shared |
|
593 | shared | |
594 | and requirementsmod.SHARESAFE_REQUIREMENT |
|
594 | and requirementsmod.SHARESAFE_REQUIREMENT | |
595 | not in _readrequires(sharedvfs, True) |
|
595 | not in _readrequires(sharedvfs, True) | |
596 | ): |
|
596 | ): | |
597 | mismatch_warn = ui.configbool( |
|
597 | mismatch_warn = ui.configbool( | |
598 | b'share', b'safe-mismatch.source-not-safe.warn' |
|
598 | b'share', b'safe-mismatch.source-not-safe.warn' | |
599 | ) |
|
599 | ) | |
600 | mismatch_config = ui.config( |
|
600 | mismatch_config = ui.config( | |
601 | b'share', b'safe-mismatch.source-not-safe' |
|
601 | b'share', b'safe-mismatch.source-not-safe' | |
602 | ) |
|
602 | ) | |
603 | if mismatch_config in ( |
|
603 | if mismatch_config in ( | |
604 | b'downgrade-allow', |
|
604 | b'downgrade-allow', | |
605 | b'allow', |
|
605 | b'allow', | |
606 | b'downgrade-abort', |
|
606 | b'downgrade-abort', | |
607 | ): |
|
607 | ): | |
608 | # prevent cyclic import localrepo -> upgrade -> localrepo |
|
608 | # prevent cyclic import localrepo -> upgrade -> localrepo | |
609 | from . import upgrade |
|
609 | from . import upgrade | |
610 |
|
610 | |||
611 | upgrade.downgrade_share_to_non_safe( |
|
611 | upgrade.downgrade_share_to_non_safe( | |
612 | ui, |
|
612 | ui, | |
613 | hgvfs, |
|
613 | hgvfs, | |
614 | sharedvfs, |
|
614 | sharedvfs, | |
615 | requirements, |
|
615 | requirements, | |
616 | mismatch_config, |
|
616 | mismatch_config, | |
617 | mismatch_warn, |
|
617 | mismatch_warn, | |
618 | ) |
|
618 | ) | |
619 | elif mismatch_config == b'abort': |
|
619 | elif mismatch_config == b'abort': | |
620 | raise error.Abort( |
|
620 | raise error.Abort( | |
621 | _(b"share source does not support share-safe requirement"), |
|
621 | _(b"share source does not support share-safe requirement"), | |
622 | hint=hint, |
|
622 | hint=hint, | |
623 | ) |
|
623 | ) | |
624 | else: |
|
624 | else: | |
625 | raise error.Abort( |
|
625 | raise error.Abort( | |
626 | _( |
|
626 | _( | |
627 | b"share-safe mismatch with source.\nUnrecognized" |
|
627 | b"share-safe mismatch with source.\nUnrecognized" | |
628 | b" value '%s' of `share.safe-mismatch.source-not-safe`" |
|
628 | b" value '%s' of `share.safe-mismatch.source-not-safe`" | |
629 | b" set." |
|
629 | b" set." | |
630 | ) |
|
630 | ) | |
631 | % mismatch_config, |
|
631 | % mismatch_config, | |
632 | hint=hint, |
|
632 | hint=hint, | |
633 | ) |
|
633 | ) | |
634 | else: |
|
634 | else: | |
635 | requirements |= _readrequires(storevfs, False) |
|
635 | requirements |= _readrequires(storevfs, False) | |
636 | elif shared: |
|
636 | elif shared: | |
637 | sourcerequires = _readrequires(sharedvfs, False) |
|
637 | sourcerequires = _readrequires(sharedvfs, False) | |
638 | if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires: |
|
638 | if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires: | |
639 | mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe') |
|
639 | mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe') | |
640 | mismatch_warn = ui.configbool( |
|
640 | mismatch_warn = ui.configbool( | |
641 | b'share', b'safe-mismatch.source-safe.warn' |
|
641 | b'share', b'safe-mismatch.source-safe.warn' | |
642 | ) |
|
642 | ) | |
643 | if mismatch_config in ( |
|
643 | if mismatch_config in ( | |
644 | b'upgrade-allow', |
|
644 | b'upgrade-allow', | |
645 | b'allow', |
|
645 | b'allow', | |
646 | b'upgrade-abort', |
|
646 | b'upgrade-abort', | |
647 | ): |
|
647 | ): | |
648 | # prevent cyclic import localrepo -> upgrade -> localrepo |
|
648 | # prevent cyclic import localrepo -> upgrade -> localrepo | |
649 | from . import upgrade |
|
649 | from . import upgrade | |
650 |
|
650 | |||
651 | upgrade.upgrade_share_to_safe( |
|
651 | upgrade.upgrade_share_to_safe( | |
652 | ui, |
|
652 | ui, | |
653 | hgvfs, |
|
653 | hgvfs, | |
654 | storevfs, |
|
654 | storevfs, | |
655 | requirements, |
|
655 | requirements, | |
656 | mismatch_config, |
|
656 | mismatch_config, | |
657 | mismatch_warn, |
|
657 | mismatch_warn, | |
658 | ) |
|
658 | ) | |
659 | elif mismatch_config == b'abort': |
|
659 | elif mismatch_config == b'abort': | |
660 | raise error.Abort( |
|
660 | raise error.Abort( | |
661 | _( |
|
661 | _( | |
662 | b'version mismatch: source uses share-safe' |
|
662 | b'version mismatch: source uses share-safe' | |
663 | b' functionality while the current share does not' |
|
663 | b' functionality while the current share does not' | |
664 | ), |
|
664 | ), | |
665 | hint=hint, |
|
665 | hint=hint, | |
666 | ) |
|
666 | ) | |
667 | else: |
|
667 | else: | |
668 | raise error.Abort( |
|
668 | raise error.Abort( | |
669 | _( |
|
669 | _( | |
670 | b"share-safe mismatch with source.\nUnrecognized" |
|
670 | b"share-safe mismatch with source.\nUnrecognized" | |
671 | b" value '%s' of `share.safe-mismatch.source-safe` set." |
|
671 | b" value '%s' of `share.safe-mismatch.source-safe` set." | |
672 | ) |
|
672 | ) | |
673 | % mismatch_config, |
|
673 | % mismatch_config, | |
674 | hint=hint, |
|
674 | hint=hint, | |
675 | ) |
|
675 | ) | |
676 |
|
676 | |||
677 | # The .hg/hgrc file may load extensions or contain config options |
|
677 | # The .hg/hgrc file may load extensions or contain config options | |
678 | # that influence repository construction. Attempt to load it and |
|
678 | # that influence repository construction. Attempt to load it and | |
679 | # process any new extensions that it may have pulled in. |
|
679 | # process any new extensions that it may have pulled in. | |
680 | if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs): |
|
680 | if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs): | |
681 | afterhgrcload(ui, wdirvfs, hgvfs, requirements) |
|
681 | afterhgrcload(ui, wdirvfs, hgvfs, requirements) | |
682 | extensions.loadall(ui) |
|
682 | extensions.loadall(ui) | |
683 | extensions.populateui(ui) |
|
683 | extensions.populateui(ui) | |
684 |
|
684 | |||
685 | # Set of module names of extensions loaded for this repository. |
|
685 | # Set of module names of extensions loaded for this repository. | |
686 | extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)} |
|
686 | extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)} | |
687 |
|
687 | |||
688 | supportedrequirements = gathersupportedrequirements(ui) |
|
688 | supportedrequirements = gathersupportedrequirements(ui) | |
689 |
|
689 | |||
690 | # We first validate the requirements are known. |
|
690 | # We first validate the requirements are known. | |
691 | ensurerequirementsrecognized(requirements, supportedrequirements) |
|
691 | ensurerequirementsrecognized(requirements, supportedrequirements) | |
692 |
|
692 | |||
693 | # Then we validate that the known set is reasonable to use together. |
|
693 | # Then we validate that the known set is reasonable to use together. | |
694 | ensurerequirementscompatible(ui, requirements) |
|
694 | ensurerequirementscompatible(ui, requirements) | |
695 |
|
695 | |||
696 | # TODO there are unhandled edge cases related to opening repositories with |
|
696 | # TODO there are unhandled edge cases related to opening repositories with | |
697 | # shared storage. If storage is shared, we should also test for requirements |
|
697 | # shared storage. If storage is shared, we should also test for requirements | |
698 | # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in |
|
698 | # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in | |
699 | # that repo, as that repo may load extensions needed to open it. This is a |
|
699 | # that repo, as that repo may load extensions needed to open it. This is a | |
700 | # bit complicated because we don't want the other hgrc to overwrite settings |
|
700 | # bit complicated because we don't want the other hgrc to overwrite settings | |
701 | # in this hgrc. |
|
701 | # in this hgrc. | |
702 | # |
|
702 | # | |
703 | # This bug is somewhat mitigated by the fact that we copy the .hg/requires |
|
703 | # This bug is somewhat mitigated by the fact that we copy the .hg/requires | |
704 | # file when sharing repos. But if a requirement is added after the share is |
|
704 | # file when sharing repos. But if a requirement is added after the share is | |
705 | # performed, thereby introducing a new requirement for the opener, we may |
|
705 | # performed, thereby introducing a new requirement for the opener, we may | |
706 | # will not see that and could encounter a run-time error interacting with |
|
706 | # will not see that and could encounter a run-time error interacting with | |
707 | # that shared store since it has an unknown-to-us requirement. |
|
707 | # that shared store since it has an unknown-to-us requirement. | |
708 |
|
708 | |||
709 | # At this point, we know we should be capable of opening the repository. |
|
709 | # At this point, we know we should be capable of opening the repository. | |
710 | # Now get on with doing that. |
|
710 | # Now get on with doing that. | |
711 |
|
711 | |||
712 | features = set() |
|
712 | features = set() | |
713 |
|
713 | |||
714 | # The "store" part of the repository holds versioned data. How it is |
|
714 | # The "store" part of the repository holds versioned data. How it is | |
715 | # accessed is determined by various requirements. If `shared` or |
|
715 | # accessed is determined by various requirements. If `shared` or | |
716 | # `relshared` requirements are present, this indicates current repository |
|
716 | # `relshared` requirements are present, this indicates current repository | |
717 | # is a share and store exists in path mentioned in `.hg/sharedpath` |
|
717 | # is a share and store exists in path mentioned in `.hg/sharedpath` | |
718 | if shared: |
|
718 | if shared: | |
719 | storebasepath = sharedvfs.base |
|
719 | storebasepath = sharedvfs.base | |
720 | cachepath = sharedvfs.join(b'cache') |
|
720 | cachepath = sharedvfs.join(b'cache') | |
721 | features.add(repository.REPO_FEATURE_SHARED_STORAGE) |
|
721 | features.add(repository.REPO_FEATURE_SHARED_STORAGE) | |
722 | else: |
|
722 | else: | |
723 | storebasepath = hgvfs.base |
|
723 | storebasepath = hgvfs.base | |
724 | cachepath = hgvfs.join(b'cache') |
|
724 | cachepath = hgvfs.join(b'cache') | |
725 | wcachepath = hgvfs.join(b'wcache') |
|
725 | wcachepath = hgvfs.join(b'wcache') | |
726 |
|
726 | |||
727 | # The store has changed over time and the exact layout is dictated by |
|
727 | # The store has changed over time and the exact layout is dictated by | |
728 | # requirements. The store interface abstracts differences across all |
|
728 | # requirements. The store interface abstracts differences across all | |
729 | # of them. |
|
729 | # of them. | |
730 | store = makestore( |
|
730 | store = makestore( | |
731 | requirements, |
|
731 | requirements, | |
732 | storebasepath, |
|
732 | storebasepath, | |
733 | lambda base: vfsmod.vfs(base, cacheaudited=True), |
|
733 | lambda base: vfsmod.vfs(base, cacheaudited=True), | |
734 | ) |
|
734 | ) | |
735 | hgvfs.createmode = store.createmode |
|
735 | hgvfs.createmode = store.createmode | |
736 |
|
736 | |||
737 | storevfs = store.vfs |
|
737 | storevfs = store.vfs | |
738 | storevfs.options = resolvestorevfsoptions(ui, requirements, features) |
|
738 | storevfs.options = resolvestorevfsoptions(ui, requirements, features) | |
739 |
|
739 | |||
740 | if requirementsmod.REVLOGV2_REQUIREMENT in requirements: |
|
740 | if requirementsmod.REVLOGV2_REQUIREMENT in requirements: | |
741 | features.add(repository.REPO_FEATURE_SIDE_DATA) |
|
741 | features.add(repository.REPO_FEATURE_SIDE_DATA) | |
742 |
|
742 | |||
743 | # The cache vfs is used to manage cache files. |
|
743 | # The cache vfs is used to manage cache files. | |
744 | cachevfs = vfsmod.vfs(cachepath, cacheaudited=True) |
|
744 | cachevfs = vfsmod.vfs(cachepath, cacheaudited=True) | |
745 | cachevfs.createmode = store.createmode |
|
745 | cachevfs.createmode = store.createmode | |
746 | # The cache vfs is used to manage cache files related to the working copy |
|
746 | # The cache vfs is used to manage cache files related to the working copy | |
747 | wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True) |
|
747 | wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True) | |
748 | wcachevfs.createmode = store.createmode |
|
748 | wcachevfs.createmode = store.createmode | |
749 |
|
749 | |||
750 | # Now resolve the type for the repository object. We do this by repeatedly |
|
750 | # Now resolve the type for the repository object. We do this by repeatedly | |
751 | # calling a factory function to produces types for specific aspects of the |
|
751 | # calling a factory function to produces types for specific aspects of the | |
752 | # repo's operation. The aggregate returned types are used as base classes |
|
752 | # repo's operation. The aggregate returned types are used as base classes | |
753 | # for a dynamically-derived type, which will represent our new repository. |
|
753 | # for a dynamically-derived type, which will represent our new repository. | |
754 |
|
754 | |||
755 | bases = [] |
|
755 | bases = [] | |
756 | extrastate = {} |
|
756 | extrastate = {} | |
757 |
|
757 | |||
758 | for iface, fn in REPO_INTERFACES: |
|
758 | for iface, fn in REPO_INTERFACES: | |
759 | # We pass all potentially useful state to give extensions tons of |
|
759 | # We pass all potentially useful state to give extensions tons of | |
760 | # flexibility. |
|
760 | # flexibility. | |
761 | typ = fn()( |
|
761 | typ = fn()( | |
762 | ui=ui, |
|
762 | ui=ui, | |
763 | intents=intents, |
|
763 | intents=intents, | |
764 | requirements=requirements, |
|
764 | requirements=requirements, | |
765 | features=features, |
|
765 | features=features, | |
766 | wdirvfs=wdirvfs, |
|
766 | wdirvfs=wdirvfs, | |
767 | hgvfs=hgvfs, |
|
767 | hgvfs=hgvfs, | |
768 | store=store, |
|
768 | store=store, | |
769 | storevfs=storevfs, |
|
769 | storevfs=storevfs, | |
770 | storeoptions=storevfs.options, |
|
770 | storeoptions=storevfs.options, | |
771 | cachevfs=cachevfs, |
|
771 | cachevfs=cachevfs, | |
772 | wcachevfs=wcachevfs, |
|
772 | wcachevfs=wcachevfs, | |
773 | extensionmodulenames=extensionmodulenames, |
|
773 | extensionmodulenames=extensionmodulenames, | |
774 | extrastate=extrastate, |
|
774 | extrastate=extrastate, | |
775 | baseclasses=bases, |
|
775 | baseclasses=bases, | |
776 | ) |
|
776 | ) | |
777 |
|
777 | |||
778 | if not isinstance(typ, type): |
|
778 | if not isinstance(typ, type): | |
779 | raise error.ProgrammingError( |
|
779 | raise error.ProgrammingError( | |
780 | b'unable to construct type for %s' % iface |
|
780 | b'unable to construct type for %s' % iface | |
781 | ) |
|
781 | ) | |
782 |
|
782 | |||
783 | bases.append(typ) |
|
783 | bases.append(typ) | |
784 |
|
784 | |||
785 | # type() allows you to use characters in type names that wouldn't be |
|
785 | # type() allows you to use characters in type names that wouldn't be | |
786 | # recognized as Python symbols in source code. We abuse that to add |
|
786 | # recognized as Python symbols in source code. We abuse that to add | |
787 | # rich information about our constructed repo. |
|
787 | # rich information about our constructed repo. | |
788 | name = pycompat.sysstr( |
|
788 | name = pycompat.sysstr( | |
789 | b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements))) |
|
789 | b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements))) | |
790 | ) |
|
790 | ) | |
791 |
|
791 | |||
792 | cls = type(name, tuple(bases), {}) |
|
792 | cls = type(name, tuple(bases), {}) | |
793 |
|
793 | |||
794 | return cls( |
|
794 | return cls( | |
795 | baseui=baseui, |
|
795 | baseui=baseui, | |
796 | ui=ui, |
|
796 | ui=ui, | |
797 | origroot=path, |
|
797 | origroot=path, | |
798 | wdirvfs=wdirvfs, |
|
798 | wdirvfs=wdirvfs, | |
799 | hgvfs=hgvfs, |
|
799 | hgvfs=hgvfs, | |
800 | requirements=requirements, |
|
800 | requirements=requirements, | |
801 | supportedrequirements=supportedrequirements, |
|
801 | supportedrequirements=supportedrequirements, | |
802 | sharedpath=storebasepath, |
|
802 | sharedpath=storebasepath, | |
803 | store=store, |
|
803 | store=store, | |
804 | cachevfs=cachevfs, |
|
804 | cachevfs=cachevfs, | |
805 | wcachevfs=wcachevfs, |
|
805 | wcachevfs=wcachevfs, | |
806 | features=features, |
|
806 | features=features, | |
807 | intents=intents, |
|
807 | intents=intents, | |
808 | ) |
|
808 | ) | |
809 |
|
809 | |||
810 |
|
810 | |||
811 | def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None): |
|
811 | def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None): | |
812 | """Load hgrc files/content into a ui instance. |
|
812 | """Load hgrc files/content into a ui instance. | |
813 |
|
813 | |||
814 | This is called during repository opening to load any additional |
|
814 | This is called during repository opening to load any additional | |
815 | config files or settings relevant to the current repository. |
|
815 | config files or settings relevant to the current repository. | |
816 |
|
816 | |||
817 | Returns a bool indicating whether any additional configs were loaded. |
|
817 | Returns a bool indicating whether any additional configs were loaded. | |
818 |
|
818 | |||
819 | Extensions should monkeypatch this function to modify how per-repo |
|
819 | Extensions should monkeypatch this function to modify how per-repo | |
820 | configs are loaded. For example, an extension may wish to pull in |
|
820 | configs are loaded. For example, an extension may wish to pull in | |
821 | configs from alternate files or sources. |
|
821 | configs from alternate files or sources. | |
822 |
|
822 | |||
823 | sharedvfs is vfs object pointing to source repo if the current one is a |
|
823 | sharedvfs is vfs object pointing to source repo if the current one is a | |
824 | shared one |
|
824 | shared one | |
825 | """ |
|
825 | """ | |
826 | if not rcutil.use_repo_hgrc(): |
|
826 | if not rcutil.use_repo_hgrc(): | |
827 | return False |
|
827 | return False | |
828 |
|
828 | |||
829 | ret = False |
|
829 | ret = False | |
830 | # first load config from shared source if we has to |
|
830 | # first load config from shared source if we has to | |
831 | if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs: |
|
831 | if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs: | |
832 | try: |
|
832 | try: | |
833 | ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base) |
|
833 | ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base) | |
834 | ret = True |
|
834 | ret = True | |
835 | except IOError: |
|
835 | except IOError: | |
836 | pass |
|
836 | pass | |
837 |
|
837 | |||
838 | try: |
|
838 | try: | |
839 | ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base) |
|
839 | ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base) | |
840 | ret = True |
|
840 | ret = True | |
841 | except IOError: |
|
841 | except IOError: | |
842 | pass |
|
842 | pass | |
843 |
|
843 | |||
844 | try: |
|
844 | try: | |
845 | ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base) |
|
845 | ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base) | |
846 | ret = True |
|
846 | ret = True | |
847 | except IOError: |
|
847 | except IOError: | |
848 | pass |
|
848 | pass | |
849 |
|
849 | |||
850 | return ret |
|
850 | return ret | |
851 |
|
851 | |||
852 |
|
852 | |||
853 | def afterhgrcload(ui, wdirvfs, hgvfs, requirements): |
|
853 | def afterhgrcload(ui, wdirvfs, hgvfs, requirements): | |
854 | """Perform additional actions after .hg/hgrc is loaded. |
|
854 | """Perform additional actions after .hg/hgrc is loaded. | |
855 |
|
855 | |||
856 | This function is called during repository loading immediately after |
|
856 | This function is called during repository loading immediately after | |
857 | the .hg/hgrc file is loaded and before per-repo extensions are loaded. |
|
857 | the .hg/hgrc file is loaded and before per-repo extensions are loaded. | |
858 |
|
858 | |||
859 | The function can be used to validate configs, automatically add |
|
859 | The function can be used to validate configs, automatically add | |
860 | options (including extensions) based on requirements, etc. |
|
860 | options (including extensions) based on requirements, etc. | |
861 | """ |
|
861 | """ | |
862 |
|
862 | |||
863 | # Map of requirements to list of extensions to load automatically when |
|
863 | # Map of requirements to list of extensions to load automatically when | |
864 | # requirement is present. |
|
864 | # requirement is present. | |
865 | autoextensions = { |
|
865 | autoextensions = { | |
866 | b'git': [b'git'], |
|
866 | b'git': [b'git'], | |
867 | b'largefiles': [b'largefiles'], |
|
867 | b'largefiles': [b'largefiles'], | |
868 | b'lfs': [b'lfs'], |
|
868 | b'lfs': [b'lfs'], | |
869 | } |
|
869 | } | |
870 |
|
870 | |||
871 | for requirement, names in sorted(autoextensions.items()): |
|
871 | for requirement, names in sorted(autoextensions.items()): | |
872 | if requirement not in requirements: |
|
872 | if requirement not in requirements: | |
873 | continue |
|
873 | continue | |
874 |
|
874 | |||
875 | for name in names: |
|
875 | for name in names: | |
876 | if not ui.hasconfig(b'extensions', name): |
|
876 | if not ui.hasconfig(b'extensions', name): | |
877 | ui.setconfig(b'extensions', name, b'', source=b'autoload') |
|
877 | ui.setconfig(b'extensions', name, b'', source=b'autoload') | |
878 |
|
878 | |||
879 |
|
879 | |||
880 | def gathersupportedrequirements(ui): |
|
880 | def gathersupportedrequirements(ui): | |
881 | """Determine the complete set of recognized requirements.""" |
|
881 | """Determine the complete set of recognized requirements.""" | |
882 | # Start with all requirements supported by this file. |
|
882 | # Start with all requirements supported by this file. | |
883 | supported = set(localrepository._basesupported) |
|
883 | supported = set(localrepository._basesupported) | |
884 |
|
884 | |||
885 | # Execute ``featuresetupfuncs`` entries if they belong to an extension |
|
885 | # Execute ``featuresetupfuncs`` entries if they belong to an extension | |
886 | # relevant to this ui instance. |
|
886 | # relevant to this ui instance. | |
887 | modules = {m.__name__ for n, m in extensions.extensions(ui)} |
|
887 | modules = {m.__name__ for n, m in extensions.extensions(ui)} | |
888 |
|
888 | |||
889 | for fn in featuresetupfuncs: |
|
889 | for fn in featuresetupfuncs: | |
890 | if fn.__module__ in modules: |
|
890 | if fn.__module__ in modules: | |
891 | fn(ui, supported) |
|
891 | fn(ui, supported) | |
892 |
|
892 | |||
893 | # Add derived requirements from registered compression engines. |
|
893 | # Add derived requirements from registered compression engines. | |
894 | for name in util.compengines: |
|
894 | for name in util.compengines: | |
895 | engine = util.compengines[name] |
|
895 | engine = util.compengines[name] | |
896 | if engine.available() and engine.revlogheader(): |
|
896 | if engine.available() and engine.revlogheader(): | |
897 | supported.add(b'exp-compression-%s' % name) |
|
897 | supported.add(b'exp-compression-%s' % name) | |
898 | if engine.name() == b'zstd': |
|
898 | if engine.name() == b'zstd': | |
899 | supported.add(b'revlog-compression-zstd') |
|
899 | supported.add(b'revlog-compression-zstd') | |
900 |
|
900 | |||
901 | return supported |
|
901 | return supported | |
902 |
|
902 | |||
903 |
|
903 | |||
904 | def ensurerequirementsrecognized(requirements, supported): |
|
904 | def ensurerequirementsrecognized(requirements, supported): | |
905 | """Validate that a set of local requirements is recognized. |
|
905 | """Validate that a set of local requirements is recognized. | |
906 |
|
906 | |||
907 | Receives a set of requirements. Raises an ``error.RepoError`` if there |
|
907 | Receives a set of requirements. Raises an ``error.RepoError`` if there | |
908 | exists any requirement in that set that currently loaded code doesn't |
|
908 | exists any requirement in that set that currently loaded code doesn't | |
909 | recognize. |
|
909 | recognize. | |
910 |
|
910 | |||
911 | Returns a set of supported requirements. |
|
911 | Returns a set of supported requirements. | |
912 | """ |
|
912 | """ | |
913 | missing = set() |
|
913 | missing = set() | |
914 |
|
914 | |||
915 | for requirement in requirements: |
|
915 | for requirement in requirements: | |
916 | if requirement in supported: |
|
916 | if requirement in supported: | |
917 | continue |
|
917 | continue | |
918 |
|
918 | |||
919 | if not requirement or not requirement[0:1].isalnum(): |
|
919 | if not requirement or not requirement[0:1].isalnum(): | |
920 | raise error.RequirementError(_(b'.hg/requires file is corrupt')) |
|
920 | raise error.RequirementError(_(b'.hg/requires file is corrupt')) | |
921 |
|
921 | |||
922 | missing.add(requirement) |
|
922 | missing.add(requirement) | |
923 |
|
923 | |||
924 | if missing: |
|
924 | if missing: | |
925 | raise error.RequirementError( |
|
925 | raise error.RequirementError( | |
926 | _(b'repository requires features unknown to this Mercurial: %s') |
|
926 | _(b'repository requires features unknown to this Mercurial: %s') | |
927 | % b' '.join(sorted(missing)), |
|
927 | % b' '.join(sorted(missing)), | |
928 | hint=_( |
|
928 | hint=_( | |
929 | b'see https://mercurial-scm.org/wiki/MissingRequirement ' |
|
929 | b'see https://mercurial-scm.org/wiki/MissingRequirement ' | |
930 | b'for more information' |
|
930 | b'for more information' | |
931 | ), |
|
931 | ), | |
932 | ) |
|
932 | ) | |
933 |
|
933 | |||
934 |
|
934 | |||
935 | def ensurerequirementscompatible(ui, requirements): |
|
935 | def ensurerequirementscompatible(ui, requirements): | |
936 | """Validates that a set of recognized requirements is mutually compatible. |
|
936 | """Validates that a set of recognized requirements is mutually compatible. | |
937 |
|
937 | |||
938 | Some requirements may not be compatible with others or require |
|
938 | Some requirements may not be compatible with others or require | |
939 | config options that aren't enabled. This function is called during |
|
939 | config options that aren't enabled. This function is called during | |
940 | repository opening to ensure that the set of requirements needed |
|
940 | repository opening to ensure that the set of requirements needed | |
941 | to open a repository is sane and compatible with config options. |
|
941 | to open a repository is sane and compatible with config options. | |
942 |
|
942 | |||
943 | Extensions can monkeypatch this function to perform additional |
|
943 | Extensions can monkeypatch this function to perform additional | |
944 | checking. |
|
944 | checking. | |
945 |
|
945 | |||
946 | ``error.RepoError`` should be raised on failure. |
|
946 | ``error.RepoError`` should be raised on failure. | |
947 | """ |
|
947 | """ | |
948 | if ( |
|
948 | if ( | |
949 | requirementsmod.SPARSE_REQUIREMENT in requirements |
|
949 | requirementsmod.SPARSE_REQUIREMENT in requirements | |
950 | and not sparse.enabled |
|
950 | and not sparse.enabled | |
951 | ): |
|
951 | ): | |
952 | raise error.RepoError( |
|
952 | raise error.RepoError( | |
953 | _( |
|
953 | _( | |
954 | b'repository is using sparse feature but ' |
|
954 | b'repository is using sparse feature but ' | |
955 | b'sparse is not enabled; enable the ' |
|
955 | b'sparse is not enabled; enable the ' | |
956 | b'"sparse" extensions to access' |
|
956 | b'"sparse" extensions to access' | |
957 | ) |
|
957 | ) | |
958 | ) |
|
958 | ) | |
959 |
|
959 | |||
960 |
|
960 | |||
961 | def makestore(requirements, path, vfstype): |
|
961 | def makestore(requirements, path, vfstype): | |
962 | """Construct a storage object for a repository.""" |
|
962 | """Construct a storage object for a repository.""" | |
963 | if requirementsmod.STORE_REQUIREMENT in requirements: |
|
963 | if requirementsmod.STORE_REQUIREMENT in requirements: | |
964 | if requirementsmod.FNCACHE_REQUIREMENT in requirements: |
|
964 | if requirementsmod.FNCACHE_REQUIREMENT in requirements: | |
965 | dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements |
|
965 | dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements | |
966 | return storemod.fncachestore(path, vfstype, dotencode) |
|
966 | return storemod.fncachestore(path, vfstype, dotencode) | |
967 |
|
967 | |||
968 | return storemod.encodedstore(path, vfstype) |
|
968 | return storemod.encodedstore(path, vfstype) | |
969 |
|
969 | |||
970 | return storemod.basicstore(path, vfstype) |
|
970 | return storemod.basicstore(path, vfstype) | |
971 |
|
971 | |||
972 |
|
972 | |||
973 | def resolvestorevfsoptions(ui, requirements, features): |
|
973 | def resolvestorevfsoptions(ui, requirements, features): | |
974 | """Resolve the options to pass to the store vfs opener. |
|
974 | """Resolve the options to pass to the store vfs opener. | |
975 |
|
975 | |||
976 | The returned dict is used to influence behavior of the storage layer. |
|
976 | The returned dict is used to influence behavior of the storage layer. | |
977 | """ |
|
977 | """ | |
978 | options = {} |
|
978 | options = {} | |
979 |
|
979 | |||
980 | if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements: |
|
980 | if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements: | |
981 | options[b'treemanifest'] = True |
|
981 | options[b'treemanifest'] = True | |
982 |
|
982 | |||
983 | # experimental config: format.manifestcachesize |
|
983 | # experimental config: format.manifestcachesize | |
984 | manifestcachesize = ui.configint(b'format', b'manifestcachesize') |
|
984 | manifestcachesize = ui.configint(b'format', b'manifestcachesize') | |
985 | if manifestcachesize is not None: |
|
985 | if manifestcachesize is not None: | |
986 | options[b'manifestcachesize'] = manifestcachesize |
|
986 | options[b'manifestcachesize'] = manifestcachesize | |
987 |
|
987 | |||
988 | # In the absence of another requirement superseding a revlog-related |
|
988 | # In the absence of another requirement superseding a revlog-related | |
989 | # requirement, we have to assume the repo is using revlog version 0. |
|
989 | # requirement, we have to assume the repo is using revlog version 0. | |
990 | # This revlog format is super old and we don't bother trying to parse |
|
990 | # This revlog format is super old and we don't bother trying to parse | |
991 | # opener options for it because those options wouldn't do anything |
|
991 | # opener options for it because those options wouldn't do anything | |
992 | # meaningful on such old repos. |
|
992 | # meaningful on such old repos. | |
993 | if ( |
|
993 | if ( | |
994 | requirementsmod.REVLOGV1_REQUIREMENT in requirements |
|
994 | requirementsmod.REVLOGV1_REQUIREMENT in requirements | |
995 | or requirementsmod.REVLOGV2_REQUIREMENT in requirements |
|
995 | or requirementsmod.REVLOGV2_REQUIREMENT in requirements | |
996 | ): |
|
996 | ): | |
997 | options.update(resolverevlogstorevfsoptions(ui, requirements, features)) |
|
997 | options.update(resolverevlogstorevfsoptions(ui, requirements, features)) | |
998 | else: # explicitly mark repo as using revlogv0 |
|
998 | else: # explicitly mark repo as using revlogv0 | |
999 | options[b'revlogv0'] = True |
|
999 | options[b'revlogv0'] = True | |
1000 |
|
1000 | |||
1001 | if requirementsmod.COPIESSDC_REQUIREMENT in requirements: |
|
1001 | if requirementsmod.COPIESSDC_REQUIREMENT in requirements: | |
1002 | options[b'copies-storage'] = b'changeset-sidedata' |
|
1002 | options[b'copies-storage'] = b'changeset-sidedata' | |
1003 | else: |
|
1003 | else: | |
1004 | writecopiesto = ui.config(b'experimental', b'copies.write-to') |
|
1004 | writecopiesto = ui.config(b'experimental', b'copies.write-to') | |
1005 | copiesextramode = (b'changeset-only', b'compatibility') |
|
1005 | copiesextramode = (b'changeset-only', b'compatibility') | |
1006 | if writecopiesto in copiesextramode: |
|
1006 | if writecopiesto in copiesextramode: | |
1007 | options[b'copies-storage'] = b'extra' |
|
1007 | options[b'copies-storage'] = b'extra' | |
1008 |
|
1008 | |||
1009 | return options |
|
1009 | return options | |
1010 |
|
1010 | |||
1011 |
|
1011 | |||
1012 | def resolverevlogstorevfsoptions(ui, requirements, features): |
|
1012 | def resolverevlogstorevfsoptions(ui, requirements, features): | |
1013 | """Resolve opener options specific to revlogs.""" |
|
1013 | """Resolve opener options specific to revlogs.""" | |
1014 |
|
1014 | |||
1015 | options = {} |
|
1015 | options = {} | |
1016 | options[b'flagprocessors'] = {} |
|
1016 | options[b'flagprocessors'] = {} | |
1017 |
|
1017 | |||
1018 | if requirementsmod.REVLOGV1_REQUIREMENT in requirements: |
|
1018 | if requirementsmod.REVLOGV1_REQUIREMENT in requirements: | |
1019 | options[b'revlogv1'] = True |
|
1019 | options[b'revlogv1'] = True | |
1020 | if requirementsmod.REVLOGV2_REQUIREMENT in requirements: |
|
1020 | if requirementsmod.REVLOGV2_REQUIREMENT in requirements: | |
1021 | options[b'revlogv2'] = True |
|
1021 | options[b'revlogv2'] = True | |
1022 |
|
1022 | |||
1023 | if requirementsmod.GENERALDELTA_REQUIREMENT in requirements: |
|
1023 | if requirementsmod.GENERALDELTA_REQUIREMENT in requirements: | |
1024 | options[b'generaldelta'] = True |
|
1024 | options[b'generaldelta'] = True | |
1025 |
|
1025 | |||
1026 | # experimental config: format.chunkcachesize |
|
1026 | # experimental config: format.chunkcachesize | |
1027 | chunkcachesize = ui.configint(b'format', b'chunkcachesize') |
|
1027 | chunkcachesize = ui.configint(b'format', b'chunkcachesize') | |
1028 | if chunkcachesize is not None: |
|
1028 | if chunkcachesize is not None: | |
1029 | options[b'chunkcachesize'] = chunkcachesize |
|
1029 | options[b'chunkcachesize'] = chunkcachesize | |
1030 |
|
1030 | |||
1031 | deltabothparents = ui.configbool( |
|
1031 | deltabothparents = ui.configbool( | |
1032 | b'storage', b'revlog.optimize-delta-parent-choice' |
|
1032 | b'storage', b'revlog.optimize-delta-parent-choice' | |
1033 | ) |
|
1033 | ) | |
1034 | options[b'deltabothparents'] = deltabothparents |
|
1034 | options[b'deltabothparents'] = deltabothparents | |
1035 |
|
1035 | |||
1036 | lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta') |
|
1036 | lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta') | |
1037 | lazydeltabase = False |
|
1037 | lazydeltabase = False | |
1038 | if lazydelta: |
|
1038 | if lazydelta: | |
1039 | lazydeltabase = ui.configbool( |
|
1039 | lazydeltabase = ui.configbool( | |
1040 | b'storage', b'revlog.reuse-external-delta-parent' |
|
1040 | b'storage', b'revlog.reuse-external-delta-parent' | |
1041 | ) |
|
1041 | ) | |
1042 | if lazydeltabase is None: |
|
1042 | if lazydeltabase is None: | |
1043 | lazydeltabase = not scmutil.gddeltaconfig(ui) |
|
1043 | lazydeltabase = not scmutil.gddeltaconfig(ui) | |
1044 | options[b'lazydelta'] = lazydelta |
|
1044 | options[b'lazydelta'] = lazydelta | |
1045 | options[b'lazydeltabase'] = lazydeltabase |
|
1045 | options[b'lazydeltabase'] = lazydeltabase | |
1046 |
|
1046 | |||
1047 | chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan') |
|
1047 | chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan') | |
1048 | if 0 <= chainspan: |
|
1048 | if 0 <= chainspan: | |
1049 | options[b'maxdeltachainspan'] = chainspan |
|
1049 | options[b'maxdeltachainspan'] = chainspan | |
1050 |
|
1050 | |||
1051 | mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold') |
|
1051 | mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold') | |
1052 | if mmapindexthreshold is not None: |
|
1052 | if mmapindexthreshold is not None: | |
1053 | options[b'mmapindexthreshold'] = mmapindexthreshold |
|
1053 | options[b'mmapindexthreshold'] = mmapindexthreshold | |
1054 |
|
1054 | |||
1055 | withsparseread = ui.configbool(b'experimental', b'sparse-read') |
|
1055 | withsparseread = ui.configbool(b'experimental', b'sparse-read') | |
1056 | srdensitythres = float( |
|
1056 | srdensitythres = float( | |
1057 | ui.config(b'experimental', b'sparse-read.density-threshold') |
|
1057 | ui.config(b'experimental', b'sparse-read.density-threshold') | |
1058 | ) |
|
1058 | ) | |
1059 | srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size') |
|
1059 | srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size') | |
1060 | options[b'with-sparse-read'] = withsparseread |
|
1060 | options[b'with-sparse-read'] = withsparseread | |
1061 | options[b'sparse-read-density-threshold'] = srdensitythres |
|
1061 | options[b'sparse-read-density-threshold'] = srdensitythres | |
1062 | options[b'sparse-read-min-gap-size'] = srmingapsize |
|
1062 | options[b'sparse-read-min-gap-size'] = srmingapsize | |
1063 |
|
1063 | |||
1064 | sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements |
|
1064 | sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements | |
1065 | options[b'sparse-revlog'] = sparserevlog |
|
1065 | options[b'sparse-revlog'] = sparserevlog | |
1066 | if sparserevlog: |
|
1066 | if sparserevlog: | |
1067 | options[b'generaldelta'] = True |
|
1067 | options[b'generaldelta'] = True | |
1068 |
|
1068 | |||
1069 | maxchainlen = None |
|
1069 | maxchainlen = None | |
1070 | if sparserevlog: |
|
1070 | if sparserevlog: | |
1071 | maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH |
|
1071 | maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH | |
1072 | # experimental config: format.maxchainlen |
|
1072 | # experimental config: format.maxchainlen | |
1073 | maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen) |
|
1073 | maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen) | |
1074 | if maxchainlen is not None: |
|
1074 | if maxchainlen is not None: | |
1075 | options[b'maxchainlen'] = maxchainlen |
|
1075 | options[b'maxchainlen'] = maxchainlen | |
1076 |
|
1076 | |||
1077 | for r in requirements: |
|
1077 | for r in requirements: | |
1078 | # we allow multiple compression engine requirement to co-exist because |
|
1078 | # we allow multiple compression engine requirement to co-exist because | |
1079 | # strickly speaking, revlog seems to support mixed compression style. |
|
1079 | # strickly speaking, revlog seems to support mixed compression style. | |
1080 | # |
|
1080 | # | |
1081 | # The compression used for new entries will be "the last one" |
|
1081 | # The compression used for new entries will be "the last one" | |
1082 | prefix = r.startswith |
|
1082 | prefix = r.startswith | |
1083 | if prefix(b'revlog-compression-') or prefix(b'exp-compression-'): |
|
1083 | if prefix(b'revlog-compression-') or prefix(b'exp-compression-'): | |
1084 | options[b'compengine'] = r.split(b'-', 2)[2] |
|
1084 | options[b'compengine'] = r.split(b'-', 2)[2] | |
1085 |
|
1085 | |||
1086 | options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level') |
|
1086 | options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level') | |
1087 | if options[b'zlib.level'] is not None: |
|
1087 | if options[b'zlib.level'] is not None: | |
1088 | if not (0 <= options[b'zlib.level'] <= 9): |
|
1088 | if not (0 <= options[b'zlib.level'] <= 9): | |
1089 | msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d') |
|
1089 | msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d') | |
1090 | raise error.Abort(msg % options[b'zlib.level']) |
|
1090 | raise error.Abort(msg % options[b'zlib.level']) | |
1091 | options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level') |
|
1091 | options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level') | |
1092 | if options[b'zstd.level'] is not None: |
|
1092 | if options[b'zstd.level'] is not None: | |
1093 | if not (0 <= options[b'zstd.level'] <= 22): |
|
1093 | if not (0 <= options[b'zstd.level'] <= 22): | |
1094 | msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d') |
|
1094 | msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d') | |
1095 | raise error.Abort(msg % options[b'zstd.level']) |
|
1095 | raise error.Abort(msg % options[b'zstd.level']) | |
1096 |
|
1096 | |||
1097 | if requirementsmod.NARROW_REQUIREMENT in requirements: |
|
1097 | if requirementsmod.NARROW_REQUIREMENT in requirements: | |
1098 | options[b'enableellipsis'] = True |
|
1098 | options[b'enableellipsis'] = True | |
1099 |
|
1099 | |||
1100 | if ui.configbool(b'experimental', b'rust.index'): |
|
1100 | if ui.configbool(b'experimental', b'rust.index'): | |
1101 | options[b'rust.index'] = True |
|
1101 | options[b'rust.index'] = True | |
1102 | if requirementsmod.NODEMAP_REQUIREMENT in requirements: |
|
1102 | if requirementsmod.NODEMAP_REQUIREMENT in requirements: | |
1103 | slow_path = ui.config( |
|
1103 | slow_path = ui.config( | |
1104 | b'storage', b'revlog.persistent-nodemap.slow-path' |
|
1104 | b'storage', b'revlog.persistent-nodemap.slow-path' | |
1105 | ) |
|
1105 | ) | |
1106 | if slow_path not in (b'allow', b'warn', b'abort'): |
|
1106 | if slow_path not in (b'allow', b'warn', b'abort'): | |
1107 | default = ui.config_default( |
|
1107 | default = ui.config_default( | |
1108 | b'storage', b'revlog.persistent-nodemap.slow-path' |
|
1108 | b'storage', b'revlog.persistent-nodemap.slow-path' | |
1109 | ) |
|
1109 | ) | |
1110 | msg = _( |
|
1110 | msg = _( | |
1111 | b'unknown value for config ' |
|
1111 | b'unknown value for config ' | |
1112 | b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n' |
|
1112 | b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n' | |
1113 | ) |
|
1113 | ) | |
1114 | ui.warn(msg % slow_path) |
|
1114 | ui.warn(msg % slow_path) | |
1115 | if not ui.quiet: |
|
1115 | if not ui.quiet: | |
1116 | ui.warn(_(b'falling back to default value: %s\n') % default) |
|
1116 | ui.warn(_(b'falling back to default value: %s\n') % default) | |
1117 | slow_path = default |
|
1117 | slow_path = default | |
1118 |
|
1118 | |||
1119 | msg = _( |
|
1119 | msg = _( | |
1120 | b"accessing `persistent-nodemap` repository without associated " |
|
1120 | b"accessing `persistent-nodemap` repository without associated " | |
1121 | b"fast implementation." |
|
1121 | b"fast implementation." | |
1122 | ) |
|
1122 | ) | |
1123 | hint = _( |
|
1123 | hint = _( | |
1124 | b"check `hg help config.format.use-persistent-nodemap` " |
|
1124 | b"check `hg help config.format.use-persistent-nodemap` " | |
1125 | b"for details" |
|
1125 | b"for details" | |
1126 | ) |
|
1126 | ) | |
1127 | if not revlog.HAS_FAST_PERSISTENT_NODEMAP: |
|
1127 | if not revlog.HAS_FAST_PERSISTENT_NODEMAP: | |
1128 | if slow_path == b'warn': |
|
1128 | if slow_path == b'warn': | |
1129 | msg = b"warning: " + msg + b'\n' |
|
1129 | msg = b"warning: " + msg + b'\n' | |
1130 | ui.warn(msg) |
|
1130 | ui.warn(msg) | |
1131 | if not ui.quiet: |
|
1131 | if not ui.quiet: | |
1132 | hint = b'(' + hint + b')\n' |
|
1132 | hint = b'(' + hint + b')\n' | |
1133 | ui.warn(hint) |
|
1133 | ui.warn(hint) | |
1134 | if slow_path == b'abort': |
|
1134 | if slow_path == b'abort': | |
1135 | raise error.Abort(msg, hint=hint) |
|
1135 | raise error.Abort(msg, hint=hint) | |
1136 | options[b'persistent-nodemap'] = True |
|
1136 | options[b'persistent-nodemap'] = True | |
1137 | if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'): |
|
1137 | if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'): | |
1138 | options[b'persistent-nodemap.mmap'] = True |
|
1138 | options[b'persistent-nodemap.mmap'] = True | |
1139 | if ui.configbool(b'devel', b'persistent-nodemap'): |
|
1139 | if ui.configbool(b'devel', b'persistent-nodemap'): | |
1140 | options[b'devel-force-nodemap'] = True |
|
1140 | options[b'devel-force-nodemap'] = True | |
1141 |
|
1141 | |||
1142 | return options |
|
1142 | return options | |
1143 |
|
1143 | |||
1144 |
|
1144 | |||
1145 | def makemain(**kwargs): |
|
1145 | def makemain(**kwargs): | |
1146 | """Produce a type conforming to ``ilocalrepositorymain``.""" |
|
1146 | """Produce a type conforming to ``ilocalrepositorymain``.""" | |
1147 | return localrepository |
|
1147 | return localrepository | |
1148 |
|
1148 | |||
1149 |
|
1149 | |||
1150 | @interfaceutil.implementer(repository.ilocalrepositoryfilestorage) |
|
1150 | @interfaceutil.implementer(repository.ilocalrepositoryfilestorage) | |
1151 | class revlogfilestorage(object): |
|
1151 | class revlogfilestorage(object): | |
1152 | """File storage when using revlogs.""" |
|
1152 | """File storage when using revlogs.""" | |
1153 |
|
1153 | |||
1154 | def file(self, path): |
|
1154 | def file(self, path): | |
1155 | if path.startswith(b'/'): |
|
1155 | if path.startswith(b'/'): | |
1156 | path = path[1:] |
|
1156 | path = path[1:] | |
1157 |
|
1157 | |||
1158 | return filelog.filelog(self.svfs, path) |
|
1158 | return filelog.filelog(self.svfs, path) | |
1159 |
|
1159 | |||
1160 |
|
1160 | |||
1161 | @interfaceutil.implementer(repository.ilocalrepositoryfilestorage) |
|
1161 | @interfaceutil.implementer(repository.ilocalrepositoryfilestorage) | |
1162 | class revlognarrowfilestorage(object): |
|
1162 | class revlognarrowfilestorage(object): | |
1163 | """File storage when using revlogs and narrow files.""" |
|
1163 | """File storage when using revlogs and narrow files.""" | |
1164 |
|
1164 | |||
1165 | def file(self, path): |
|
1165 | def file(self, path): | |
1166 | if path.startswith(b'/'): |
|
1166 | if path.startswith(b'/'): | |
1167 | path = path[1:] |
|
1167 | path = path[1:] | |
1168 |
|
1168 | |||
1169 | return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch) |
|
1169 | return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch) | |
1170 |
|
1170 | |||
1171 |
|
1171 | |||
1172 | def makefilestorage(requirements, features, **kwargs): |
|
1172 | def makefilestorage(requirements, features, **kwargs): | |
1173 | """Produce a type conforming to ``ilocalrepositoryfilestorage``.""" |
|
1173 | """Produce a type conforming to ``ilocalrepositoryfilestorage``.""" | |
1174 | features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE) |
|
1174 | features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE) | |
1175 | features.add(repository.REPO_FEATURE_STREAM_CLONE) |
|
1175 | features.add(repository.REPO_FEATURE_STREAM_CLONE) | |
1176 |
|
1176 | |||
1177 | if requirementsmod.NARROW_REQUIREMENT in requirements: |
|
1177 | if requirementsmod.NARROW_REQUIREMENT in requirements: | |
1178 | return revlognarrowfilestorage |
|
1178 | return revlognarrowfilestorage | |
1179 | else: |
|
1179 | else: | |
1180 | return revlogfilestorage |
|
1180 | return revlogfilestorage | |
1181 |
|
1181 | |||
1182 |
|
1182 | |||
1183 | # List of repository interfaces and factory functions for them. Each |
|
1183 | # List of repository interfaces and factory functions for them. Each | |
1184 | # will be called in order during ``makelocalrepository()`` to iteratively |
|
1184 | # will be called in order during ``makelocalrepository()`` to iteratively | |
1185 | # derive the final type for a local repository instance. We capture the |
|
1185 | # derive the final type for a local repository instance. We capture the | |
1186 | # function as a lambda so we don't hold a reference and the module-level |
|
1186 | # function as a lambda so we don't hold a reference and the module-level | |
1187 | # functions can be wrapped. |
|
1187 | # functions can be wrapped. | |
1188 | REPO_INTERFACES = [ |
|
1188 | REPO_INTERFACES = [ | |
1189 | (repository.ilocalrepositorymain, lambda: makemain), |
|
1189 | (repository.ilocalrepositorymain, lambda: makemain), | |
1190 | (repository.ilocalrepositoryfilestorage, lambda: makefilestorage), |
|
1190 | (repository.ilocalrepositoryfilestorage, lambda: makefilestorage), | |
1191 | ] |
|
1191 | ] | |
1192 |
|
1192 | |||
1193 |
|
1193 | |||
1194 | @interfaceutil.implementer(repository.ilocalrepositorymain) |
|
1194 | @interfaceutil.implementer(repository.ilocalrepositorymain) | |
1195 | class localrepository(object): |
|
1195 | class localrepository(object): | |
1196 | """Main class for representing local repositories. |
|
1196 | """Main class for representing local repositories. | |
1197 |
|
1197 | |||
1198 | All local repositories are instances of this class. |
|
1198 | All local repositories are instances of this class. | |
1199 |
|
1199 | |||
1200 | Constructed on its own, instances of this class are not usable as |
|
1200 | Constructed on its own, instances of this class are not usable as | |
1201 | repository objects. To obtain a usable repository object, call |
|
1201 | repository objects. To obtain a usable repository object, call | |
1202 | ``hg.repository()``, ``localrepo.instance()``, or |
|
1202 | ``hg.repository()``, ``localrepo.instance()``, or | |
1203 | ``localrepo.makelocalrepository()``. The latter is the lowest-level. |
|
1203 | ``localrepo.makelocalrepository()``. The latter is the lowest-level. | |
1204 | ``instance()`` adds support for creating new repositories. |
|
1204 | ``instance()`` adds support for creating new repositories. | |
1205 | ``hg.repository()`` adds more extension integration, including calling |
|
1205 | ``hg.repository()`` adds more extension integration, including calling | |
1206 | ``reposetup()``. Generally speaking, ``hg.repository()`` should be |
|
1206 | ``reposetup()``. Generally speaking, ``hg.repository()`` should be | |
1207 | used. |
|
1207 | used. | |
1208 | """ |
|
1208 | """ | |
1209 |
|
1209 | |||
1210 | # obsolete experimental requirements: |
|
1210 | # obsolete experimental requirements: | |
1211 | # - manifestv2: An experimental new manifest format that allowed |
|
1211 | # - manifestv2: An experimental new manifest format that allowed | |
1212 | # for stem compression of long paths. Experiment ended up not |
|
1212 | # for stem compression of long paths. Experiment ended up not | |
1213 | # being successful (repository sizes went up due to worse delta |
|
1213 | # being successful (repository sizes went up due to worse delta | |
1214 | # chains), and the code was deleted in 4.6. |
|
1214 | # chains), and the code was deleted in 4.6. | |
1215 | supportedformats = { |
|
1215 | supportedformats = { | |
1216 | requirementsmod.REVLOGV1_REQUIREMENT, |
|
1216 | requirementsmod.REVLOGV1_REQUIREMENT, | |
1217 | requirementsmod.GENERALDELTA_REQUIREMENT, |
|
1217 | requirementsmod.GENERALDELTA_REQUIREMENT, | |
1218 | requirementsmod.TREEMANIFEST_REQUIREMENT, |
|
1218 | requirementsmod.TREEMANIFEST_REQUIREMENT, | |
1219 | requirementsmod.COPIESSDC_REQUIREMENT, |
|
1219 | requirementsmod.COPIESSDC_REQUIREMENT, | |
1220 | requirementsmod.REVLOGV2_REQUIREMENT, |
|
1220 | requirementsmod.REVLOGV2_REQUIREMENT, | |
1221 | requirementsmod.SPARSEREVLOG_REQUIREMENT, |
|
1221 | requirementsmod.SPARSEREVLOG_REQUIREMENT, | |
1222 | requirementsmod.NODEMAP_REQUIREMENT, |
|
1222 | requirementsmod.NODEMAP_REQUIREMENT, | |
1223 | bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT, |
|
1223 | bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT, | |
1224 | requirementsmod.SHARESAFE_REQUIREMENT, |
|
1224 | requirementsmod.SHARESAFE_REQUIREMENT, | |
1225 | } |
|
1225 | } | |
1226 | _basesupported = supportedformats | { |
|
1226 | _basesupported = supportedformats | { | |
1227 | requirementsmod.STORE_REQUIREMENT, |
|
1227 | requirementsmod.STORE_REQUIREMENT, | |
1228 | requirementsmod.FNCACHE_REQUIREMENT, |
|
1228 | requirementsmod.FNCACHE_REQUIREMENT, | |
1229 | requirementsmod.SHARED_REQUIREMENT, |
|
1229 | requirementsmod.SHARED_REQUIREMENT, | |
1230 | requirementsmod.RELATIVE_SHARED_REQUIREMENT, |
|
1230 | requirementsmod.RELATIVE_SHARED_REQUIREMENT, | |
1231 | requirementsmod.DOTENCODE_REQUIREMENT, |
|
1231 | requirementsmod.DOTENCODE_REQUIREMENT, | |
1232 | requirementsmod.SPARSE_REQUIREMENT, |
|
1232 | requirementsmod.SPARSE_REQUIREMENT, | |
1233 | requirementsmod.INTERNAL_PHASE_REQUIREMENT, |
|
1233 | requirementsmod.INTERNAL_PHASE_REQUIREMENT, | |
1234 | } |
|
1234 | } | |
1235 |
|
1235 | |||
1236 | # list of prefix for file which can be written without 'wlock' |
|
1236 | # list of prefix for file which can be written without 'wlock' | |
1237 | # Extensions should extend this list when needed |
|
1237 | # Extensions should extend this list when needed | |
1238 | _wlockfreeprefix = { |
|
1238 | _wlockfreeprefix = { | |
1239 | # We migh consider requiring 'wlock' for the next |
|
1239 | # We migh consider requiring 'wlock' for the next | |
1240 | # two, but pretty much all the existing code assume |
|
1240 | # two, but pretty much all the existing code assume | |
1241 | # wlock is not needed so we keep them excluded for |
|
1241 | # wlock is not needed so we keep them excluded for | |
1242 | # now. |
|
1242 | # now. | |
1243 | b'hgrc', |
|
1243 | b'hgrc', | |
1244 | b'requires', |
|
1244 | b'requires', | |
1245 | # XXX cache is a complicatged business someone |
|
1245 | # XXX cache is a complicatged business someone | |
1246 | # should investigate this in depth at some point |
|
1246 | # should investigate this in depth at some point | |
1247 | b'cache/', |
|
1247 | b'cache/', | |
1248 | # XXX shouldn't be dirstate covered by the wlock? |
|
1248 | # XXX shouldn't be dirstate covered by the wlock? | |
1249 | b'dirstate', |
|
1249 | b'dirstate', | |
1250 | # XXX bisect was still a bit too messy at the time |
|
1250 | # XXX bisect was still a bit too messy at the time | |
1251 | # this changeset was introduced. Someone should fix |
|
1251 | # this changeset was introduced. Someone should fix | |
1252 | # the remainig bit and drop this line |
|
1252 | # the remainig bit and drop this line | |
1253 | b'bisect.state', |
|
1253 | b'bisect.state', | |
1254 | } |
|
1254 | } | |
1255 |
|
1255 | |||
1256 | def __init__( |
|
1256 | def __init__( | |
1257 | self, |
|
1257 | self, | |
1258 | baseui, |
|
1258 | baseui, | |
1259 | ui, |
|
1259 | ui, | |
1260 | origroot, |
|
1260 | origroot, | |
1261 | wdirvfs, |
|
1261 | wdirvfs, | |
1262 | hgvfs, |
|
1262 | hgvfs, | |
1263 | requirements, |
|
1263 | requirements, | |
1264 | supportedrequirements, |
|
1264 | supportedrequirements, | |
1265 | sharedpath, |
|
1265 | sharedpath, | |
1266 | store, |
|
1266 | store, | |
1267 | cachevfs, |
|
1267 | cachevfs, | |
1268 | wcachevfs, |
|
1268 | wcachevfs, | |
1269 | features, |
|
1269 | features, | |
1270 | intents=None, |
|
1270 | intents=None, | |
1271 | ): |
|
1271 | ): | |
1272 | """Create a new local repository instance. |
|
1272 | """Create a new local repository instance. | |
1273 |
|
1273 | |||
1274 | Most callers should use ``hg.repository()``, ``localrepo.instance()``, |
|
1274 | Most callers should use ``hg.repository()``, ``localrepo.instance()``, | |
1275 | or ``localrepo.makelocalrepository()`` for obtaining a new repository |
|
1275 | or ``localrepo.makelocalrepository()`` for obtaining a new repository | |
1276 | object. |
|
1276 | object. | |
1277 |
|
1277 | |||
1278 | Arguments: |
|
1278 | Arguments: | |
1279 |
|
1279 | |||
1280 | baseui |
|
1280 | baseui | |
1281 | ``ui.ui`` instance that ``ui`` argument was based off of. |
|
1281 | ``ui.ui`` instance that ``ui`` argument was based off of. | |
1282 |
|
1282 | |||
1283 | ui |
|
1283 | ui | |
1284 | ``ui.ui`` instance for use by the repository. |
|
1284 | ``ui.ui`` instance for use by the repository. | |
1285 |
|
1285 | |||
1286 | origroot |
|
1286 | origroot | |
1287 | ``bytes`` path to working directory root of this repository. |
|
1287 | ``bytes`` path to working directory root of this repository. | |
1288 |
|
1288 | |||
1289 | wdirvfs |
|
1289 | wdirvfs | |
1290 | ``vfs.vfs`` rooted at the working directory. |
|
1290 | ``vfs.vfs`` rooted at the working directory. | |
1291 |
|
1291 | |||
1292 | hgvfs |
|
1292 | hgvfs | |
1293 | ``vfs.vfs`` rooted at .hg/ |
|
1293 | ``vfs.vfs`` rooted at .hg/ | |
1294 |
|
1294 | |||
1295 | requirements |
|
1295 | requirements | |
1296 | ``set`` of bytestrings representing repository opening requirements. |
|
1296 | ``set`` of bytestrings representing repository opening requirements. | |
1297 |
|
1297 | |||
1298 | supportedrequirements |
|
1298 | supportedrequirements | |
1299 | ``set`` of bytestrings representing repository requirements that we |
|
1299 | ``set`` of bytestrings representing repository requirements that we | |
1300 | know how to open. May be a supetset of ``requirements``. |
|
1300 | know how to open. May be a supetset of ``requirements``. | |
1301 |
|
1301 | |||
1302 | sharedpath |
|
1302 | sharedpath | |
1303 | ``bytes`` Defining path to storage base directory. Points to a |
|
1303 | ``bytes`` Defining path to storage base directory. Points to a | |
1304 | ``.hg/`` directory somewhere. |
|
1304 | ``.hg/`` directory somewhere. | |
1305 |
|
1305 | |||
1306 | store |
|
1306 | store | |
1307 | ``store.basicstore`` (or derived) instance providing access to |
|
1307 | ``store.basicstore`` (or derived) instance providing access to | |
1308 | versioned storage. |
|
1308 | versioned storage. | |
1309 |
|
1309 | |||
1310 | cachevfs |
|
1310 | cachevfs | |
1311 | ``vfs.vfs`` used for cache files. |
|
1311 | ``vfs.vfs`` used for cache files. | |
1312 |
|
1312 | |||
1313 | wcachevfs |
|
1313 | wcachevfs | |
1314 | ``vfs.vfs`` used for cache files related to the working copy. |
|
1314 | ``vfs.vfs`` used for cache files related to the working copy. | |
1315 |
|
1315 | |||
1316 | features |
|
1316 | features | |
1317 | ``set`` of bytestrings defining features/capabilities of this |
|
1317 | ``set`` of bytestrings defining features/capabilities of this | |
1318 | instance. |
|
1318 | instance. | |
1319 |
|
1319 | |||
1320 | intents |
|
1320 | intents | |
1321 | ``set`` of system strings indicating what this repo will be used |
|
1321 | ``set`` of system strings indicating what this repo will be used | |
1322 | for. |
|
1322 | for. | |
1323 | """ |
|
1323 | """ | |
1324 | self.baseui = baseui |
|
1324 | self.baseui = baseui | |
1325 | self.ui = ui |
|
1325 | self.ui = ui | |
1326 | self.origroot = origroot |
|
1326 | self.origroot = origroot | |
1327 | # vfs rooted at working directory. |
|
1327 | # vfs rooted at working directory. | |
1328 | self.wvfs = wdirvfs |
|
1328 | self.wvfs = wdirvfs | |
1329 | self.root = wdirvfs.base |
|
1329 | self.root = wdirvfs.base | |
1330 | # vfs rooted at .hg/. Used to access most non-store paths. |
|
1330 | # vfs rooted at .hg/. Used to access most non-store paths. | |
1331 | self.vfs = hgvfs |
|
1331 | self.vfs = hgvfs | |
1332 | self.path = hgvfs.base |
|
1332 | self.path = hgvfs.base | |
1333 | self.requirements = requirements |
|
1333 | self.requirements = requirements | |
1334 | self.nodeconstants = sha1nodeconstants |
|
1334 | self.nodeconstants = sha1nodeconstants | |
1335 | self.nullid = self.nodeconstants.nullid |
|
1335 | self.nullid = self.nodeconstants.nullid | |
1336 | self.supported = supportedrequirements |
|
1336 | self.supported = supportedrequirements | |
1337 | self.sharedpath = sharedpath |
|
1337 | self.sharedpath = sharedpath | |
1338 | self.store = store |
|
1338 | self.store = store | |
1339 | self.cachevfs = cachevfs |
|
1339 | self.cachevfs = cachevfs | |
1340 | self.wcachevfs = wcachevfs |
|
1340 | self.wcachevfs = wcachevfs | |
1341 | self.features = features |
|
1341 | self.features = features | |
1342 |
|
1342 | |||
1343 | self.filtername = None |
|
1343 | self.filtername = None | |
1344 |
|
1344 | |||
1345 | if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool( |
|
1345 | if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool( | |
1346 | b'devel', b'check-locks' |
|
1346 | b'devel', b'check-locks' | |
1347 | ): |
|
1347 | ): | |
1348 | self.vfs.audit = self._getvfsward(self.vfs.audit) |
|
1348 | self.vfs.audit = self._getvfsward(self.vfs.audit) | |
1349 | # A list of callback to shape the phase if no data were found. |
|
1349 | # A list of callback to shape the phase if no data were found. | |
1350 | # Callback are in the form: func(repo, roots) --> processed root. |
|
1350 | # Callback are in the form: func(repo, roots) --> processed root. | |
1351 | # This list it to be filled by extension during repo setup |
|
1351 | # This list it to be filled by extension during repo setup | |
1352 | self._phasedefaults = [] |
|
1352 | self._phasedefaults = [] | |
1353 |
|
1353 | |||
1354 | color.setup(self.ui) |
|
1354 | color.setup(self.ui) | |
1355 |
|
1355 | |||
1356 | self.spath = self.store.path |
|
1356 | self.spath = self.store.path | |
1357 | self.svfs = self.store.vfs |
|
1357 | self.svfs = self.store.vfs | |
1358 | self.sjoin = self.store.join |
|
1358 | self.sjoin = self.store.join | |
1359 | if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool( |
|
1359 | if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool( | |
1360 | b'devel', b'check-locks' |
|
1360 | b'devel', b'check-locks' | |
1361 | ): |
|
1361 | ): | |
1362 | if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs |
|
1362 | if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs | |
1363 | self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit) |
|
1363 | self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit) | |
1364 | else: # standard vfs |
|
1364 | else: # standard vfs | |
1365 | self.svfs.audit = self._getsvfsward(self.svfs.audit) |
|
1365 | self.svfs.audit = self._getsvfsward(self.svfs.audit) | |
1366 |
|
1366 | |||
1367 | self._dirstatevalidatewarned = False |
|
1367 | self._dirstatevalidatewarned = False | |
1368 |
|
1368 | |||
1369 | self._branchcaches = branchmap.BranchMapCache() |
|
1369 | self._branchcaches = branchmap.BranchMapCache() | |
1370 | self._revbranchcache = None |
|
1370 | self._revbranchcache = None | |
1371 | self._filterpats = {} |
|
1371 | self._filterpats = {} | |
1372 | self._datafilters = {} |
|
1372 | self._datafilters = {} | |
1373 | self._transref = self._lockref = self._wlockref = None |
|
1373 | self._transref = self._lockref = self._wlockref = None | |
1374 |
|
1374 | |||
1375 | # A cache for various files under .hg/ that tracks file changes, |
|
1375 | # A cache for various files under .hg/ that tracks file changes, | |
1376 | # (used by the filecache decorator) |
|
1376 | # (used by the filecache decorator) | |
1377 | # |
|
1377 | # | |
1378 | # Maps a property name to its util.filecacheentry |
|
1378 | # Maps a property name to its util.filecacheentry | |
1379 | self._filecache = {} |
|
1379 | self._filecache = {} | |
1380 |
|
1380 | |||
1381 | # hold sets of revision to be filtered |
|
1381 | # hold sets of revision to be filtered | |
1382 | # should be cleared when something might have changed the filter value: |
|
1382 | # should be cleared when something might have changed the filter value: | |
1383 | # - new changesets, |
|
1383 | # - new changesets, | |
1384 | # - phase change, |
|
1384 | # - phase change, | |
1385 | # - new obsolescence marker, |
|
1385 | # - new obsolescence marker, | |
1386 | # - working directory parent change, |
|
1386 | # - working directory parent change, | |
1387 | # - bookmark changes |
|
1387 | # - bookmark changes | |
1388 | self.filteredrevcache = {} |
|
1388 | self.filteredrevcache = {} | |
1389 |
|
1389 | |||
1390 | # post-dirstate-status hooks |
|
1390 | # post-dirstate-status hooks | |
1391 | self._postdsstatus = [] |
|
1391 | self._postdsstatus = [] | |
1392 |
|
1392 | |||
1393 | # generic mapping between names and nodes |
|
1393 | # generic mapping between names and nodes | |
1394 | self.names = namespaces.namespaces() |
|
1394 | self.names = namespaces.namespaces() | |
1395 |
|
1395 | |||
1396 | # Key to signature value. |
|
1396 | # Key to signature value. | |
1397 | self._sparsesignaturecache = {} |
|
1397 | self._sparsesignaturecache = {} | |
1398 | # Signature to cached matcher instance. |
|
1398 | # Signature to cached matcher instance. | |
1399 | self._sparsematchercache = {} |
|
1399 | self._sparsematchercache = {} | |
1400 |
|
1400 | |||
1401 | self._extrafilterid = repoview.extrafilter(ui) |
|
1401 | self._extrafilterid = repoview.extrafilter(ui) | |
1402 |
|
1402 | |||
1403 | self.filecopiesmode = None |
|
1403 | self.filecopiesmode = None | |
1404 | if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements: |
|
1404 | if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements: | |
1405 | self.filecopiesmode = b'changeset-sidedata' |
|
1405 | self.filecopiesmode = b'changeset-sidedata' | |
1406 |
|
1406 | |||
1407 | self._wanted_sidedata = set() |
|
1407 | self._wanted_sidedata = set() | |
1408 | self._sidedata_computers = {} |
|
1408 | self._sidedata_computers = {} | |
1409 | sidedatamod.set_sidedata_spec_for_repo(self) |
|
1409 | sidedatamod.set_sidedata_spec_for_repo(self) | |
1410 |
|
1410 | |||
1411 | def _getvfsward(self, origfunc): |
|
1411 | def _getvfsward(self, origfunc): | |
1412 | """build a ward for self.vfs""" |
|
1412 | """build a ward for self.vfs""" | |
1413 | rref = weakref.ref(self) |
|
1413 | rref = weakref.ref(self) | |
1414 |
|
1414 | |||
1415 | def checkvfs(path, mode=None): |
|
1415 | def checkvfs(path, mode=None): | |
1416 | ret = origfunc(path, mode=mode) |
|
1416 | ret = origfunc(path, mode=mode) | |
1417 | repo = rref() |
|
1417 | repo = rref() | |
1418 | if ( |
|
1418 | if ( | |
1419 | repo is None |
|
1419 | repo is None | |
1420 | or not util.safehasattr(repo, b'_wlockref') |
|
1420 | or not util.safehasattr(repo, b'_wlockref') | |
1421 | or not util.safehasattr(repo, b'_lockref') |
|
1421 | or not util.safehasattr(repo, b'_lockref') | |
1422 | ): |
|
1422 | ): | |
1423 | return |
|
1423 | return | |
1424 | if mode in (None, b'r', b'rb'): |
|
1424 | if mode in (None, b'r', b'rb'): | |
1425 | return |
|
1425 | return | |
1426 | if path.startswith(repo.path): |
|
1426 | if path.startswith(repo.path): | |
1427 | # truncate name relative to the repository (.hg) |
|
1427 | # truncate name relative to the repository (.hg) | |
1428 | path = path[len(repo.path) + 1 :] |
|
1428 | path = path[len(repo.path) + 1 :] | |
1429 | if path.startswith(b'cache/'): |
|
1429 | if path.startswith(b'cache/'): | |
1430 | msg = b'accessing cache with vfs instead of cachevfs: "%s"' |
|
1430 | msg = b'accessing cache with vfs instead of cachevfs: "%s"' | |
1431 | repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs") |
|
1431 | repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs") | |
1432 | # path prefixes covered by 'lock' |
|
1432 | # path prefixes covered by 'lock' | |
1433 | vfs_path_prefixes = ( |
|
1433 | vfs_path_prefixes = ( | |
1434 | b'journal.', |
|
1434 | b'journal.', | |
1435 | b'undo.', |
|
1435 | b'undo.', | |
1436 | b'strip-backup/', |
|
1436 | b'strip-backup/', | |
1437 | b'cache/', |
|
1437 | b'cache/', | |
1438 | ) |
|
1438 | ) | |
1439 | if any(path.startswith(prefix) for prefix in vfs_path_prefixes): |
|
1439 | if any(path.startswith(prefix) for prefix in vfs_path_prefixes): | |
1440 | if repo._currentlock(repo._lockref) is None: |
|
1440 | if repo._currentlock(repo._lockref) is None: | |
1441 | repo.ui.develwarn( |
|
1441 | repo.ui.develwarn( | |
1442 | b'write with no lock: "%s"' % path, |
|
1442 | b'write with no lock: "%s"' % path, | |
1443 | stacklevel=3, |
|
1443 | stacklevel=3, | |
1444 | config=b'check-locks', |
|
1444 | config=b'check-locks', | |
1445 | ) |
|
1445 | ) | |
1446 | elif repo._currentlock(repo._wlockref) is None: |
|
1446 | elif repo._currentlock(repo._wlockref) is None: | |
1447 | # rest of vfs files are covered by 'wlock' |
|
1447 | # rest of vfs files are covered by 'wlock' | |
1448 | # |
|
1448 | # | |
1449 | # exclude special files |
|
1449 | # exclude special files | |
1450 | for prefix in self._wlockfreeprefix: |
|
1450 | for prefix in self._wlockfreeprefix: | |
1451 | if path.startswith(prefix): |
|
1451 | if path.startswith(prefix): | |
1452 | return |
|
1452 | return | |
1453 | repo.ui.develwarn( |
|
1453 | repo.ui.develwarn( | |
1454 | b'write with no wlock: "%s"' % path, |
|
1454 | b'write with no wlock: "%s"' % path, | |
1455 | stacklevel=3, |
|
1455 | stacklevel=3, | |
1456 | config=b'check-locks', |
|
1456 | config=b'check-locks', | |
1457 | ) |
|
1457 | ) | |
1458 | return ret |
|
1458 | return ret | |
1459 |
|
1459 | |||
1460 | return checkvfs |
|
1460 | return checkvfs | |
1461 |
|
1461 | |||
1462 | def _getsvfsward(self, origfunc): |
|
1462 | def _getsvfsward(self, origfunc): | |
1463 | """build a ward for self.svfs""" |
|
1463 | """build a ward for self.svfs""" | |
1464 | rref = weakref.ref(self) |
|
1464 | rref = weakref.ref(self) | |
1465 |
|
1465 | |||
1466 | def checksvfs(path, mode=None): |
|
1466 | def checksvfs(path, mode=None): | |
1467 | ret = origfunc(path, mode=mode) |
|
1467 | ret = origfunc(path, mode=mode) | |
1468 | repo = rref() |
|
1468 | repo = rref() | |
1469 | if repo is None or not util.safehasattr(repo, b'_lockref'): |
|
1469 | if repo is None or not util.safehasattr(repo, b'_lockref'): | |
1470 | return |
|
1470 | return | |
1471 | if mode in (None, b'r', b'rb'): |
|
1471 | if mode in (None, b'r', b'rb'): | |
1472 | return |
|
1472 | return | |
1473 | if path.startswith(repo.sharedpath): |
|
1473 | if path.startswith(repo.sharedpath): | |
1474 | # truncate name relative to the repository (.hg) |
|
1474 | # truncate name relative to the repository (.hg) | |
1475 | path = path[len(repo.sharedpath) + 1 :] |
|
1475 | path = path[len(repo.sharedpath) + 1 :] | |
1476 | if repo._currentlock(repo._lockref) is None: |
|
1476 | if repo._currentlock(repo._lockref) is None: | |
1477 | repo.ui.develwarn( |
|
1477 | repo.ui.develwarn( | |
1478 | b'write with no lock: "%s"' % path, stacklevel=4 |
|
1478 | b'write with no lock: "%s"' % path, stacklevel=4 | |
1479 | ) |
|
1479 | ) | |
1480 | return ret |
|
1480 | return ret | |
1481 |
|
1481 | |||
1482 | return checksvfs |
|
1482 | return checksvfs | |
1483 |
|
1483 | |||
1484 | def close(self): |
|
1484 | def close(self): | |
1485 | self._writecaches() |
|
1485 | self._writecaches() | |
1486 |
|
1486 | |||
1487 | def _writecaches(self): |
|
1487 | def _writecaches(self): | |
1488 | if self._revbranchcache: |
|
1488 | if self._revbranchcache: | |
1489 | self._revbranchcache.write() |
|
1489 | self._revbranchcache.write() | |
1490 |
|
1490 | |||
1491 | def _restrictcapabilities(self, caps): |
|
1491 | def _restrictcapabilities(self, caps): | |
1492 | if self.ui.configbool(b'experimental', b'bundle2-advertise'): |
|
1492 | if self.ui.configbool(b'experimental', b'bundle2-advertise'): | |
1493 | caps = set(caps) |
|
1493 | caps = set(caps) | |
1494 | capsblob = bundle2.encodecaps( |
|
1494 | capsblob = bundle2.encodecaps( | |
1495 | bundle2.getrepocaps(self, role=b'client') |
|
1495 | bundle2.getrepocaps(self, role=b'client') | |
1496 | ) |
|
1496 | ) | |
1497 | caps.add(b'bundle2=' + urlreq.quote(capsblob)) |
|
1497 | caps.add(b'bundle2=' + urlreq.quote(capsblob)) | |
1498 | if self.ui.configbool(b'experimental', b'narrow'): |
|
1498 | if self.ui.configbool(b'experimental', b'narrow'): | |
1499 | caps.add(wireprototypes.NARROWCAP) |
|
1499 | caps.add(wireprototypes.NARROWCAP) | |
1500 | return caps |
|
1500 | return caps | |
1501 |
|
1501 | |||
1502 | # Don't cache auditor/nofsauditor, or you'll end up with reference cycle: |
|
1502 | # Don't cache auditor/nofsauditor, or you'll end up with reference cycle: | |
1503 | # self -> auditor -> self._checknested -> self |
|
1503 | # self -> auditor -> self._checknested -> self | |
1504 |
|
1504 | |||
1505 | @property |
|
1505 | @property | |
1506 | def auditor(self): |
|
1506 | def auditor(self): | |
1507 | # This is only used by context.workingctx.match in order to |
|
1507 | # This is only used by context.workingctx.match in order to | |
1508 | # detect files in subrepos. |
|
1508 | # detect files in subrepos. | |
1509 | return pathutil.pathauditor(self.root, callback=self._checknested) |
|
1509 | return pathutil.pathauditor(self.root, callback=self._checknested) | |
1510 |
|
1510 | |||
1511 | @property |
|
1511 | @property | |
1512 | def nofsauditor(self): |
|
1512 | def nofsauditor(self): | |
1513 | # This is only used by context.basectx.match in order to detect |
|
1513 | # This is only used by context.basectx.match in order to detect | |
1514 | # files in subrepos. |
|
1514 | # files in subrepos. | |
1515 | return pathutil.pathauditor( |
|
1515 | return pathutil.pathauditor( | |
1516 | self.root, callback=self._checknested, realfs=False, cached=True |
|
1516 | self.root, callback=self._checknested, realfs=False, cached=True | |
1517 | ) |
|
1517 | ) | |
1518 |
|
1518 | |||
1519 | def _checknested(self, path): |
|
1519 | def _checknested(self, path): | |
1520 | """Determine if path is a legal nested repository.""" |
|
1520 | """Determine if path is a legal nested repository.""" | |
1521 | if not path.startswith(self.root): |
|
1521 | if not path.startswith(self.root): | |
1522 | return False |
|
1522 | return False | |
1523 | subpath = path[len(self.root) + 1 :] |
|
1523 | subpath = path[len(self.root) + 1 :] | |
1524 | normsubpath = util.pconvert(subpath) |
|
1524 | normsubpath = util.pconvert(subpath) | |
1525 |
|
1525 | |||
1526 | # XXX: Checking against the current working copy is wrong in |
|
1526 | # XXX: Checking against the current working copy is wrong in | |
1527 | # the sense that it can reject things like |
|
1527 | # the sense that it can reject things like | |
1528 | # |
|
1528 | # | |
1529 | # $ hg cat -r 10 sub/x.txt |
|
1529 | # $ hg cat -r 10 sub/x.txt | |
1530 | # |
|
1530 | # | |
1531 | # if sub/ is no longer a subrepository in the working copy |
|
1531 | # if sub/ is no longer a subrepository in the working copy | |
1532 | # parent revision. |
|
1532 | # parent revision. | |
1533 | # |
|
1533 | # | |
1534 | # However, it can of course also allow things that would have |
|
1534 | # However, it can of course also allow things that would have | |
1535 | # been rejected before, such as the above cat command if sub/ |
|
1535 | # been rejected before, such as the above cat command if sub/ | |
1536 | # is a subrepository now, but was a normal directory before. |
|
1536 | # is a subrepository now, but was a normal directory before. | |
1537 | # The old path auditor would have rejected by mistake since it |
|
1537 | # The old path auditor would have rejected by mistake since it | |
1538 | # panics when it sees sub/.hg/. |
|
1538 | # panics when it sees sub/.hg/. | |
1539 | # |
|
1539 | # | |
1540 | # All in all, checking against the working copy seems sensible |
|
1540 | # All in all, checking against the working copy seems sensible | |
1541 | # since we want to prevent access to nested repositories on |
|
1541 | # since we want to prevent access to nested repositories on | |
1542 | # the filesystem *now*. |
|
1542 | # the filesystem *now*. | |
1543 | ctx = self[None] |
|
1543 | ctx = self[None] | |
1544 | parts = util.splitpath(subpath) |
|
1544 | parts = util.splitpath(subpath) | |
1545 | while parts: |
|
1545 | while parts: | |
1546 | prefix = b'/'.join(parts) |
|
1546 | prefix = b'/'.join(parts) | |
1547 | if prefix in ctx.substate: |
|
1547 | if prefix in ctx.substate: | |
1548 | if prefix == normsubpath: |
|
1548 | if prefix == normsubpath: | |
1549 | return True |
|
1549 | return True | |
1550 | else: |
|
1550 | else: | |
1551 | sub = ctx.sub(prefix) |
|
1551 | sub = ctx.sub(prefix) | |
1552 | return sub.checknested(subpath[len(prefix) + 1 :]) |
|
1552 | return sub.checknested(subpath[len(prefix) + 1 :]) | |
1553 | else: |
|
1553 | else: | |
1554 | parts.pop() |
|
1554 | parts.pop() | |
1555 | return False |
|
1555 | return False | |
1556 |
|
1556 | |||
1557 | def peer(self): |
|
1557 | def peer(self): | |
1558 | return localpeer(self) # not cached to avoid reference cycle |
|
1558 | return localpeer(self) # not cached to avoid reference cycle | |
1559 |
|
1559 | |||
1560 | def unfiltered(self): |
|
1560 | def unfiltered(self): | |
1561 | """Return unfiltered version of the repository |
|
1561 | """Return unfiltered version of the repository | |
1562 |
|
1562 | |||
1563 | Intended to be overwritten by filtered repo.""" |
|
1563 | Intended to be overwritten by filtered repo.""" | |
1564 | return self |
|
1564 | return self | |
1565 |
|
1565 | |||
1566 | def filtered(self, name, visibilityexceptions=None): |
|
1566 | def filtered(self, name, visibilityexceptions=None): | |
1567 | """Return a filtered version of a repository |
|
1567 | """Return a filtered version of a repository | |
1568 |
|
1568 | |||
1569 | The `name` parameter is the identifier of the requested view. This |
|
1569 | The `name` parameter is the identifier of the requested view. This | |
1570 | will return a repoview object set "exactly" to the specified view. |
|
1570 | will return a repoview object set "exactly" to the specified view. | |
1571 |
|
1571 | |||
1572 | This function does not apply recursive filtering to a repository. For |
|
1572 | This function does not apply recursive filtering to a repository. For | |
1573 | example calling `repo.filtered("served")` will return a repoview using |
|
1573 | example calling `repo.filtered("served")` will return a repoview using | |
1574 | the "served" view, regardless of the initial view used by `repo`. |
|
1574 | the "served" view, regardless of the initial view used by `repo`. | |
1575 |
|
1575 | |||
1576 | In other word, there is always only one level of `repoview` "filtering". |
|
1576 | In other word, there is always only one level of `repoview` "filtering". | |
1577 | """ |
|
1577 | """ | |
1578 | if self._extrafilterid is not None and b'%' not in name: |
|
1578 | if self._extrafilterid is not None and b'%' not in name: | |
1579 | name = name + b'%' + self._extrafilterid |
|
1579 | name = name + b'%' + self._extrafilterid | |
1580 |
|
1580 | |||
1581 | cls = repoview.newtype(self.unfiltered().__class__) |
|
1581 | cls = repoview.newtype(self.unfiltered().__class__) | |
1582 | return cls(self, name, visibilityexceptions) |
|
1582 | return cls(self, name, visibilityexceptions) | |
1583 |
|
1583 | |||
1584 | @mixedrepostorecache( |
|
1584 | @mixedrepostorecache( | |
1585 | (b'bookmarks', b'plain'), |
|
1585 | (b'bookmarks', b'plain'), | |
1586 | (b'bookmarks.current', b'plain'), |
|
1586 | (b'bookmarks.current', b'plain'), | |
1587 | (b'bookmarks', b''), |
|
1587 | (b'bookmarks', b''), | |
1588 | (b'00changelog.i', b''), |
|
1588 | (b'00changelog.i', b''), | |
1589 | ) |
|
1589 | ) | |
1590 | def _bookmarks(self): |
|
1590 | def _bookmarks(self): | |
1591 | # Since the multiple files involved in the transaction cannot be |
|
1591 | # Since the multiple files involved in the transaction cannot be | |
1592 | # written atomically (with current repository format), there is a race |
|
1592 | # written atomically (with current repository format), there is a race | |
1593 | # condition here. |
|
1593 | # condition here. | |
1594 | # |
|
1594 | # | |
1595 | # 1) changelog content A is read |
|
1595 | # 1) changelog content A is read | |
1596 | # 2) outside transaction update changelog to content B |
|
1596 | # 2) outside transaction update changelog to content B | |
1597 | # 3) outside transaction update bookmark file referring to content B |
|
1597 | # 3) outside transaction update bookmark file referring to content B | |
1598 | # 4) bookmarks file content is read and filtered against changelog-A |
|
1598 | # 4) bookmarks file content is read and filtered against changelog-A | |
1599 | # |
|
1599 | # | |
1600 | # When this happens, bookmarks against nodes missing from A are dropped. |
|
1600 | # When this happens, bookmarks against nodes missing from A are dropped. | |
1601 | # |
|
1601 | # | |
1602 | # Having this happening during read is not great, but it become worse |
|
1602 | # Having this happening during read is not great, but it become worse | |
1603 | # when this happen during write because the bookmarks to the "unknown" |
|
1603 | # when this happen during write because the bookmarks to the "unknown" | |
1604 | # nodes will be dropped for good. However, writes happen within locks. |
|
1604 | # nodes will be dropped for good. However, writes happen within locks. | |
1605 | # This locking makes it possible to have a race free consistent read. |
|
1605 | # This locking makes it possible to have a race free consistent read. | |
1606 | # For this purpose data read from disc before locking are |
|
1606 | # For this purpose data read from disc before locking are | |
1607 | # "invalidated" right after the locks are taken. This invalidations are |
|
1607 | # "invalidated" right after the locks are taken. This invalidations are | |
1608 | # "light", the `filecache` mechanism keep the data in memory and will |
|
1608 | # "light", the `filecache` mechanism keep the data in memory and will | |
1609 | # reuse them if the underlying files did not changed. Not parsing the |
|
1609 | # reuse them if the underlying files did not changed. Not parsing the | |
1610 | # same data multiple times helps performances. |
|
1610 | # same data multiple times helps performances. | |
1611 | # |
|
1611 | # | |
1612 | # Unfortunately in the case describe above, the files tracked by the |
|
1612 | # Unfortunately in the case describe above, the files tracked by the | |
1613 | # bookmarks file cache might not have changed, but the in-memory |
|
1613 | # bookmarks file cache might not have changed, but the in-memory | |
1614 | # content is still "wrong" because we used an older changelog content |
|
1614 | # content is still "wrong" because we used an older changelog content | |
1615 | # to process the on-disk data. So after locking, the changelog would be |
|
1615 | # to process the on-disk data. So after locking, the changelog would be | |
1616 | # refreshed but `_bookmarks` would be preserved. |
|
1616 | # refreshed but `_bookmarks` would be preserved. | |
1617 | # Adding `00changelog.i` to the list of tracked file is not |
|
1617 | # Adding `00changelog.i` to the list of tracked file is not | |
1618 | # enough, because at the time we build the content for `_bookmarks` in |
|
1618 | # enough, because at the time we build the content for `_bookmarks` in | |
1619 | # (4), the changelog file has already diverged from the content used |
|
1619 | # (4), the changelog file has already diverged from the content used | |
1620 | # for loading `changelog` in (1) |
|
1620 | # for loading `changelog` in (1) | |
1621 | # |
|
1621 | # | |
1622 | # To prevent the issue, we force the changelog to be explicitly |
|
1622 | # To prevent the issue, we force the changelog to be explicitly | |
1623 | # reloaded while computing `_bookmarks`. The data race can still happen |
|
1623 | # reloaded while computing `_bookmarks`. The data race can still happen | |
1624 | # without the lock (with a narrower window), but it would no longer go |
|
1624 | # without the lock (with a narrower window), but it would no longer go | |
1625 | # undetected during the lock time refresh. |
|
1625 | # undetected during the lock time refresh. | |
1626 | # |
|
1626 | # | |
1627 | # The new schedule is as follow |
|
1627 | # The new schedule is as follow | |
1628 | # |
|
1628 | # | |
1629 | # 1) filecache logic detect that `_bookmarks` needs to be computed |
|
1629 | # 1) filecache logic detect that `_bookmarks` needs to be computed | |
1630 | # 2) cachestat for `bookmarks` and `changelog` are captured (for book) |
|
1630 | # 2) cachestat for `bookmarks` and `changelog` are captured (for book) | |
1631 | # 3) We force `changelog` filecache to be tested |
|
1631 | # 3) We force `changelog` filecache to be tested | |
1632 | # 4) cachestat for `changelog` are captured (for changelog) |
|
1632 | # 4) cachestat for `changelog` are captured (for changelog) | |
1633 | # 5) `_bookmarks` is computed and cached |
|
1633 | # 5) `_bookmarks` is computed and cached | |
1634 | # |
|
1634 | # | |
1635 | # The step in (3) ensure we have a changelog at least as recent as the |
|
1635 | # The step in (3) ensure we have a changelog at least as recent as the | |
1636 | # cache stat computed in (1). As a result at locking time: |
|
1636 | # cache stat computed in (1). As a result at locking time: | |
1637 | # * if the changelog did not changed since (1) -> we can reuse the data |
|
1637 | # * if the changelog did not changed since (1) -> we can reuse the data | |
1638 | # * otherwise -> the bookmarks get refreshed. |
|
1638 | # * otherwise -> the bookmarks get refreshed. | |
1639 | self._refreshchangelog() |
|
1639 | self._refreshchangelog() | |
1640 | return bookmarks.bmstore(self) |
|
1640 | return bookmarks.bmstore(self) | |
1641 |
|
1641 | |||
1642 | def _refreshchangelog(self): |
|
1642 | def _refreshchangelog(self): | |
1643 | """make sure the in memory changelog match the on-disk one""" |
|
1643 | """make sure the in memory changelog match the on-disk one""" | |
1644 | if 'changelog' in vars(self) and self.currenttransaction() is None: |
|
1644 | if 'changelog' in vars(self) and self.currenttransaction() is None: | |
1645 | del self.changelog |
|
1645 | del self.changelog | |
1646 |
|
1646 | |||
1647 | @property |
|
1647 | @property | |
1648 | def _activebookmark(self): |
|
1648 | def _activebookmark(self): | |
1649 | return self._bookmarks.active |
|
1649 | return self._bookmarks.active | |
1650 |
|
1650 | |||
1651 | # _phasesets depend on changelog. what we need is to call |
|
1651 | # _phasesets depend on changelog. what we need is to call | |
1652 | # _phasecache.invalidate() if '00changelog.i' was changed, but it |
|
1652 | # _phasecache.invalidate() if '00changelog.i' was changed, but it | |
1653 | # can't be easily expressed in filecache mechanism. |
|
1653 | # can't be easily expressed in filecache mechanism. | |
1654 | @storecache(b'phaseroots', b'00changelog.i') |
|
1654 | @storecache(b'phaseroots', b'00changelog.i') | |
1655 | def _phasecache(self): |
|
1655 | def _phasecache(self): | |
1656 | return phases.phasecache(self, self._phasedefaults) |
|
1656 | return phases.phasecache(self, self._phasedefaults) | |
1657 |
|
1657 | |||
1658 | @storecache(b'obsstore') |
|
1658 | @storecache(b'obsstore') | |
1659 | def obsstore(self): |
|
1659 | def obsstore(self): | |
1660 | return obsolete.makestore(self.ui, self) |
|
1660 | return obsolete.makestore(self.ui, self) | |
1661 |
|
1661 | |||
1662 | @storecache(b'00changelog.i') |
|
1662 | @storecache(b'00changelog.i') | |
1663 | def changelog(self): |
|
1663 | def changelog(self): | |
1664 | # load dirstate before changelog to avoid race see issue6303 |
|
1664 | # load dirstate before changelog to avoid race see issue6303 | |
1665 | self.dirstate.prefetch_parents() |
|
1665 | self.dirstate.prefetch_parents() | |
1666 | return self.store.changelog( |
|
1666 | return self.store.changelog( | |
1667 | txnutil.mayhavepending(self.root), |
|
1667 | txnutil.mayhavepending(self.root), | |
1668 | concurrencychecker=revlogchecker.get_checker(self.ui, b'changelog'), |
|
1668 | concurrencychecker=revlogchecker.get_checker(self.ui, b'changelog'), | |
1669 | ) |
|
1669 | ) | |
1670 |
|
1670 | |||
1671 | @storecache(b'00manifest.i') |
|
1671 | @storecache(b'00manifest.i') | |
1672 | def manifestlog(self): |
|
1672 | def manifestlog(self): | |
1673 | return self.store.manifestlog(self, self._storenarrowmatch) |
|
1673 | return self.store.manifestlog(self, self._storenarrowmatch) | |
1674 |
|
1674 | |||
1675 | @repofilecache(b'dirstate') |
|
1675 | @repofilecache(b'dirstate') | |
1676 | def dirstate(self): |
|
1676 | def dirstate(self): | |
1677 | return self._makedirstate() |
|
1677 | return self._makedirstate() | |
1678 |
|
1678 | |||
1679 | def _makedirstate(self): |
|
1679 | def _makedirstate(self): | |
1680 | """Extension point for wrapping the dirstate per-repo.""" |
|
1680 | """Extension point for wrapping the dirstate per-repo.""" | |
1681 | sparsematchfn = lambda: sparse.matcher(self) |
|
1681 | sparsematchfn = lambda: sparse.matcher(self) | |
1682 |
|
1682 | |||
1683 | return dirstate.dirstate( |
|
1683 | return dirstate.dirstate( | |
1684 | self.vfs, |
|
1684 | self.vfs, | |
1685 | self.ui, |
|
1685 | self.ui, | |
1686 | self.root, |
|
1686 | self.root, | |
1687 | self._dirstatevalidate, |
|
1687 | self._dirstatevalidate, | |
1688 | sparsematchfn, |
|
1688 | sparsematchfn, | |
1689 | self.nodeconstants, |
|
1689 | self.nodeconstants, | |
1690 | ) |
|
1690 | ) | |
1691 |
|
1691 | |||
1692 | def _dirstatevalidate(self, node): |
|
1692 | def _dirstatevalidate(self, node): | |
1693 | try: |
|
1693 | try: | |
1694 | self.changelog.rev(node) |
|
1694 | self.changelog.rev(node) | |
1695 | return node |
|
1695 | return node | |
1696 | except error.LookupError: |
|
1696 | except error.LookupError: | |
1697 | if not self._dirstatevalidatewarned: |
|
1697 | if not self._dirstatevalidatewarned: | |
1698 | self._dirstatevalidatewarned = True |
|
1698 | self._dirstatevalidatewarned = True | |
1699 | self.ui.warn( |
|
1699 | self.ui.warn( | |
1700 | _(b"warning: ignoring unknown working parent %s!\n") |
|
1700 | _(b"warning: ignoring unknown working parent %s!\n") | |
1701 | % short(node) |
|
1701 | % short(node) | |
1702 | ) |
|
1702 | ) | |
1703 | return self.nullid |
|
1703 | return self.nullid | |
1704 |
|
1704 | |||
1705 | @storecache(narrowspec.FILENAME) |
|
1705 | @storecache(narrowspec.FILENAME) | |
1706 | def narrowpats(self): |
|
1706 | def narrowpats(self): | |
1707 | """matcher patterns for this repository's narrowspec |
|
1707 | """matcher patterns for this repository's narrowspec | |
1708 |
|
1708 | |||
1709 | A tuple of (includes, excludes). |
|
1709 | A tuple of (includes, excludes). | |
1710 | """ |
|
1710 | """ | |
1711 | return narrowspec.load(self) |
|
1711 | return narrowspec.load(self) | |
1712 |
|
1712 | |||
1713 | @storecache(narrowspec.FILENAME) |
|
1713 | @storecache(narrowspec.FILENAME) | |
1714 | def _storenarrowmatch(self): |
|
1714 | def _storenarrowmatch(self): | |
1715 | if requirementsmod.NARROW_REQUIREMENT not in self.requirements: |
|
1715 | if requirementsmod.NARROW_REQUIREMENT not in self.requirements: | |
1716 | return matchmod.always() |
|
1716 | return matchmod.always() | |
1717 | include, exclude = self.narrowpats |
|
1717 | include, exclude = self.narrowpats | |
1718 | return narrowspec.match(self.root, include=include, exclude=exclude) |
|
1718 | return narrowspec.match(self.root, include=include, exclude=exclude) | |
1719 |
|
1719 | |||
1720 | @storecache(narrowspec.FILENAME) |
|
1720 | @storecache(narrowspec.FILENAME) | |
1721 | def _narrowmatch(self): |
|
1721 | def _narrowmatch(self): | |
1722 | if requirementsmod.NARROW_REQUIREMENT not in self.requirements: |
|
1722 | if requirementsmod.NARROW_REQUIREMENT not in self.requirements: | |
1723 | return matchmod.always() |
|
1723 | return matchmod.always() | |
1724 | narrowspec.checkworkingcopynarrowspec(self) |
|
1724 | narrowspec.checkworkingcopynarrowspec(self) | |
1725 | include, exclude = self.narrowpats |
|
1725 | include, exclude = self.narrowpats | |
1726 | return narrowspec.match(self.root, include=include, exclude=exclude) |
|
1726 | return narrowspec.match(self.root, include=include, exclude=exclude) | |
1727 |
|
1727 | |||
1728 | def narrowmatch(self, match=None, includeexact=False): |
|
1728 | def narrowmatch(self, match=None, includeexact=False): | |
1729 | """matcher corresponding the the repo's narrowspec |
|
1729 | """matcher corresponding the the repo's narrowspec | |
1730 |
|
1730 | |||
1731 | If `match` is given, then that will be intersected with the narrow |
|
1731 | If `match` is given, then that will be intersected with the narrow | |
1732 | matcher. |
|
1732 | matcher. | |
1733 |
|
1733 | |||
1734 | If `includeexact` is True, then any exact matches from `match` will |
|
1734 | If `includeexact` is True, then any exact matches from `match` will | |
1735 | be included even if they're outside the narrowspec. |
|
1735 | be included even if they're outside the narrowspec. | |
1736 | """ |
|
1736 | """ | |
1737 | if match: |
|
1737 | if match: | |
1738 | if includeexact and not self._narrowmatch.always(): |
|
1738 | if includeexact and not self._narrowmatch.always(): | |
1739 | # do not exclude explicitly-specified paths so that they can |
|
1739 | # do not exclude explicitly-specified paths so that they can | |
1740 | # be warned later on |
|
1740 | # be warned later on | |
1741 | em = matchmod.exact(match.files()) |
|
1741 | em = matchmod.exact(match.files()) | |
1742 | nm = matchmod.unionmatcher([self._narrowmatch, em]) |
|
1742 | nm = matchmod.unionmatcher([self._narrowmatch, em]) | |
1743 | return matchmod.intersectmatchers(match, nm) |
|
1743 | return matchmod.intersectmatchers(match, nm) | |
1744 | return matchmod.intersectmatchers(match, self._narrowmatch) |
|
1744 | return matchmod.intersectmatchers(match, self._narrowmatch) | |
1745 | return self._narrowmatch |
|
1745 | return self._narrowmatch | |
1746 |
|
1746 | |||
1747 | def setnarrowpats(self, newincludes, newexcludes): |
|
1747 | def setnarrowpats(self, newincludes, newexcludes): | |
1748 | narrowspec.save(self, newincludes, newexcludes) |
|
1748 | narrowspec.save(self, newincludes, newexcludes) | |
1749 | self.invalidate(clearfilecache=True) |
|
1749 | self.invalidate(clearfilecache=True) | |
1750 |
|
1750 | |||
1751 | @unfilteredpropertycache |
|
1751 | @unfilteredpropertycache | |
1752 | def _quick_access_changeid_null(self): |
|
1752 | def _quick_access_changeid_null(self): | |
1753 | return { |
|
1753 | return { | |
1754 | b'null': (nullrev, self.nodeconstants.nullid), |
|
1754 | b'null': (nullrev, self.nodeconstants.nullid), | |
1755 | nullrev: (nullrev, self.nodeconstants.nullid), |
|
1755 | nullrev: (nullrev, self.nodeconstants.nullid), | |
1756 | self.nullid: (nullrev, self.nullid), |
|
1756 | self.nullid: (nullrev, self.nullid), | |
1757 | } |
|
1757 | } | |
1758 |
|
1758 | |||
1759 | @unfilteredpropertycache |
|
1759 | @unfilteredpropertycache | |
1760 | def _quick_access_changeid_wc(self): |
|
1760 | def _quick_access_changeid_wc(self): | |
1761 | # also fast path access to the working copy parents |
|
1761 | # also fast path access to the working copy parents | |
1762 | # however, only do it for filter that ensure wc is visible. |
|
1762 | # however, only do it for filter that ensure wc is visible. | |
1763 | quick = self._quick_access_changeid_null.copy() |
|
1763 | quick = self._quick_access_changeid_null.copy() | |
1764 | cl = self.unfiltered().changelog |
|
1764 | cl = self.unfiltered().changelog | |
1765 | for node in self.dirstate.parents(): |
|
1765 | for node in self.dirstate.parents(): | |
1766 | if node == self.nullid: |
|
1766 | if node == self.nullid: | |
1767 | continue |
|
1767 | continue | |
1768 | rev = cl.index.get_rev(node) |
|
1768 | rev = cl.index.get_rev(node) | |
1769 | if rev is None: |
|
1769 | if rev is None: | |
1770 | # unknown working copy parent case: |
|
1770 | # unknown working copy parent case: | |
1771 | # |
|
1771 | # | |
1772 | # skip the fast path and let higher code deal with it |
|
1772 | # skip the fast path and let higher code deal with it | |
1773 | continue |
|
1773 | continue | |
1774 | pair = (rev, node) |
|
1774 | pair = (rev, node) | |
1775 | quick[rev] = pair |
|
1775 | quick[rev] = pair | |
1776 | quick[node] = pair |
|
1776 | quick[node] = pair | |
1777 | # also add the parents of the parents |
|
1777 | # also add the parents of the parents | |
1778 | for r in cl.parentrevs(rev): |
|
1778 | for r in cl.parentrevs(rev): | |
1779 | if r == nullrev: |
|
1779 | if r == nullrev: | |
1780 | continue |
|
1780 | continue | |
1781 | n = cl.node(r) |
|
1781 | n = cl.node(r) | |
1782 | pair = (r, n) |
|
1782 | pair = (r, n) | |
1783 | quick[r] = pair |
|
1783 | quick[r] = pair | |
1784 | quick[n] = pair |
|
1784 | quick[n] = pair | |
1785 | p1node = self.dirstate.p1() |
|
1785 | p1node = self.dirstate.p1() | |
1786 | if p1node != self.nullid: |
|
1786 | if p1node != self.nullid: | |
1787 | quick[b'.'] = quick[p1node] |
|
1787 | quick[b'.'] = quick[p1node] | |
1788 | return quick |
|
1788 | return quick | |
1789 |
|
1789 | |||
1790 | @unfilteredmethod |
|
1790 | @unfilteredmethod | |
1791 | def _quick_access_changeid_invalidate(self): |
|
1791 | def _quick_access_changeid_invalidate(self): | |
1792 | if '_quick_access_changeid_wc' in vars(self): |
|
1792 | if '_quick_access_changeid_wc' in vars(self): | |
1793 | del self.__dict__['_quick_access_changeid_wc'] |
|
1793 | del self.__dict__['_quick_access_changeid_wc'] | |
1794 |
|
1794 | |||
1795 | @property |
|
1795 | @property | |
1796 | def _quick_access_changeid(self): |
|
1796 | def _quick_access_changeid(self): | |
1797 | """an helper dictionnary for __getitem__ calls |
|
1797 | """an helper dictionnary for __getitem__ calls | |
1798 |
|
1798 | |||
1799 | This contains a list of symbol we can recognise right away without |
|
1799 | This contains a list of symbol we can recognise right away without | |
1800 | further processing. |
|
1800 | further processing. | |
1801 | """ |
|
1801 | """ | |
1802 | if self.filtername in repoview.filter_has_wc: |
|
1802 | if self.filtername in repoview.filter_has_wc: | |
1803 | return self._quick_access_changeid_wc |
|
1803 | return self._quick_access_changeid_wc | |
1804 | return self._quick_access_changeid_null |
|
1804 | return self._quick_access_changeid_null | |
1805 |
|
1805 | |||
1806 | def __getitem__(self, changeid): |
|
1806 | def __getitem__(self, changeid): | |
1807 | # dealing with special cases |
|
1807 | # dealing with special cases | |
1808 | if changeid is None: |
|
1808 | if changeid is None: | |
1809 | return context.workingctx(self) |
|
1809 | return context.workingctx(self) | |
1810 | if isinstance(changeid, context.basectx): |
|
1810 | if isinstance(changeid, context.basectx): | |
1811 | return changeid |
|
1811 | return changeid | |
1812 |
|
1812 | |||
1813 | # dealing with multiple revisions |
|
1813 | # dealing with multiple revisions | |
1814 | if isinstance(changeid, slice): |
|
1814 | if isinstance(changeid, slice): | |
1815 | # wdirrev isn't contiguous so the slice shouldn't include it |
|
1815 | # wdirrev isn't contiguous so the slice shouldn't include it | |
1816 | return [ |
|
1816 | return [ | |
1817 | self[i] |
|
1817 | self[i] | |
1818 | for i in pycompat.xrange(*changeid.indices(len(self))) |
|
1818 | for i in pycompat.xrange(*changeid.indices(len(self))) | |
1819 | if i not in self.changelog.filteredrevs |
|
1819 | if i not in self.changelog.filteredrevs | |
1820 | ] |
|
1820 | ] | |
1821 |
|
1821 | |||
1822 | # dealing with some special values |
|
1822 | # dealing with some special values | |
1823 | quick_access = self._quick_access_changeid.get(changeid) |
|
1823 | quick_access = self._quick_access_changeid.get(changeid) | |
1824 | if quick_access is not None: |
|
1824 | if quick_access is not None: | |
1825 | rev, node = quick_access |
|
1825 | rev, node = quick_access | |
1826 | return context.changectx(self, rev, node, maybe_filtered=False) |
|
1826 | return context.changectx(self, rev, node, maybe_filtered=False) | |
1827 | if changeid == b'tip': |
|
1827 | if changeid == b'tip': | |
1828 | node = self.changelog.tip() |
|
1828 | node = self.changelog.tip() | |
1829 | rev = self.changelog.rev(node) |
|
1829 | rev = self.changelog.rev(node) | |
1830 | return context.changectx(self, rev, node) |
|
1830 | return context.changectx(self, rev, node) | |
1831 |
|
1831 | |||
1832 | # dealing with arbitrary values |
|
1832 | # dealing with arbitrary values | |
1833 | try: |
|
1833 | try: | |
1834 | if isinstance(changeid, int): |
|
1834 | if isinstance(changeid, int): | |
1835 | node = self.changelog.node(changeid) |
|
1835 | node = self.changelog.node(changeid) | |
1836 | rev = changeid |
|
1836 | rev = changeid | |
1837 | elif changeid == b'.': |
|
1837 | elif changeid == b'.': | |
1838 | # this is a hack to delay/avoid loading obsmarkers |
|
1838 | # this is a hack to delay/avoid loading obsmarkers | |
1839 | # when we know that '.' won't be hidden |
|
1839 | # when we know that '.' won't be hidden | |
1840 | node = self.dirstate.p1() |
|
1840 | node = self.dirstate.p1() | |
1841 | rev = self.unfiltered().changelog.rev(node) |
|
1841 | rev = self.unfiltered().changelog.rev(node) | |
1842 | elif len(changeid) == self.nodeconstants.nodelen: |
|
1842 | elif len(changeid) == self.nodeconstants.nodelen: | |
1843 | try: |
|
1843 | try: | |
1844 | node = changeid |
|
1844 | node = changeid | |
1845 | rev = self.changelog.rev(changeid) |
|
1845 | rev = self.changelog.rev(changeid) | |
1846 | except error.FilteredLookupError: |
|
1846 | except error.FilteredLookupError: | |
1847 | changeid = hex(changeid) # for the error message |
|
1847 | changeid = hex(changeid) # for the error message | |
1848 | raise |
|
1848 | raise | |
1849 | except LookupError: |
|
1849 | except LookupError: | |
1850 | # check if it might have come from damaged dirstate |
|
1850 | # check if it might have come from damaged dirstate | |
1851 | # |
|
1851 | # | |
1852 | # XXX we could avoid the unfiltered if we had a recognizable |
|
1852 | # XXX we could avoid the unfiltered if we had a recognizable | |
1853 | # exception for filtered changeset access |
|
1853 | # exception for filtered changeset access | |
1854 | if ( |
|
1854 | if ( | |
1855 | self.local() |
|
1855 | self.local() | |
1856 | and changeid in self.unfiltered().dirstate.parents() |
|
1856 | and changeid in self.unfiltered().dirstate.parents() | |
1857 | ): |
|
1857 | ): | |
1858 | msg = _(b"working directory has unknown parent '%s'!") |
|
1858 | msg = _(b"working directory has unknown parent '%s'!") | |
1859 | raise error.Abort(msg % short(changeid)) |
|
1859 | raise error.Abort(msg % short(changeid)) | |
1860 | changeid = hex(changeid) # for the error message |
|
1860 | changeid = hex(changeid) # for the error message | |
1861 | raise |
|
1861 | raise | |
1862 |
|
1862 | |||
1863 | elif len(changeid) == 2 * self.nodeconstants.nodelen: |
|
1863 | elif len(changeid) == 2 * self.nodeconstants.nodelen: | |
1864 | node = bin(changeid) |
|
1864 | node = bin(changeid) | |
1865 | rev = self.changelog.rev(node) |
|
1865 | rev = self.changelog.rev(node) | |
1866 | else: |
|
1866 | else: | |
1867 | raise error.ProgrammingError( |
|
1867 | raise error.ProgrammingError( | |
1868 | b"unsupported changeid '%s' of type %s" |
|
1868 | b"unsupported changeid '%s' of type %s" | |
1869 | % (changeid, pycompat.bytestr(type(changeid))) |
|
1869 | % (changeid, pycompat.bytestr(type(changeid))) | |
1870 | ) |
|
1870 | ) | |
1871 |
|
1871 | |||
1872 | return context.changectx(self, rev, node) |
|
1872 | return context.changectx(self, rev, node) | |
1873 |
|
1873 | |||
1874 | except (error.FilteredIndexError, error.FilteredLookupError): |
|
1874 | except (error.FilteredIndexError, error.FilteredLookupError): | |
1875 | raise error.FilteredRepoLookupError( |
|
1875 | raise error.FilteredRepoLookupError( | |
1876 | _(b"filtered revision '%s'") % pycompat.bytestr(changeid) |
|
1876 | _(b"filtered revision '%s'") % pycompat.bytestr(changeid) | |
1877 | ) |
|
1877 | ) | |
1878 | except (IndexError, LookupError): |
|
1878 | except (IndexError, LookupError): | |
1879 | raise error.RepoLookupError( |
|
1879 | raise error.RepoLookupError( | |
1880 | _(b"unknown revision '%s'") % pycompat.bytestr(changeid) |
|
1880 | _(b"unknown revision '%s'") % pycompat.bytestr(changeid) | |
1881 | ) |
|
1881 | ) | |
1882 | except error.WdirUnsupported: |
|
1882 | except error.WdirUnsupported: | |
1883 | return context.workingctx(self) |
|
1883 | return context.workingctx(self) | |
1884 |
|
1884 | |||
1885 | def __contains__(self, changeid): |
|
1885 | def __contains__(self, changeid): | |
1886 | """True if the given changeid exists""" |
|
1886 | """True if the given changeid exists""" | |
1887 | try: |
|
1887 | try: | |
1888 | self[changeid] |
|
1888 | self[changeid] | |
1889 | return True |
|
1889 | return True | |
1890 | except error.RepoLookupError: |
|
1890 | except error.RepoLookupError: | |
1891 | return False |
|
1891 | return False | |
1892 |
|
1892 | |||
1893 | def __nonzero__(self): |
|
1893 | def __nonzero__(self): | |
1894 | return True |
|
1894 | return True | |
1895 |
|
1895 | |||
1896 | __bool__ = __nonzero__ |
|
1896 | __bool__ = __nonzero__ | |
1897 |
|
1897 | |||
1898 | def __len__(self): |
|
1898 | def __len__(self): | |
1899 | # no need to pay the cost of repoview.changelog |
|
1899 | # no need to pay the cost of repoview.changelog | |
1900 | unfi = self.unfiltered() |
|
1900 | unfi = self.unfiltered() | |
1901 | return len(unfi.changelog) |
|
1901 | return len(unfi.changelog) | |
1902 |
|
1902 | |||
1903 | def __iter__(self): |
|
1903 | def __iter__(self): | |
1904 | return iter(self.changelog) |
|
1904 | return iter(self.changelog) | |
1905 |
|
1905 | |||
1906 | def revs(self, expr, *args): |
|
1906 | def revs(self, expr, *args): | |
1907 | """Find revisions matching a revset. |
|
1907 | """Find revisions matching a revset. | |
1908 |
|
1908 | |||
1909 | The revset is specified as a string ``expr`` that may contain |
|
1909 | The revset is specified as a string ``expr`` that may contain | |
1910 | %-formatting to escape certain types. See ``revsetlang.formatspec``. |
|
1910 | %-formatting to escape certain types. See ``revsetlang.formatspec``. | |
1911 |
|
1911 | |||
1912 | Revset aliases from the configuration are not expanded. To expand |
|
1912 | Revset aliases from the configuration are not expanded. To expand | |
1913 | user aliases, consider calling ``scmutil.revrange()`` or |
|
1913 | user aliases, consider calling ``scmutil.revrange()`` or | |
1914 | ``repo.anyrevs([expr], user=True)``. |
|
1914 | ``repo.anyrevs([expr], user=True)``. | |
1915 |
|
1915 | |||
1916 | Returns a smartset.abstractsmartset, which is a list-like interface |
|
1916 | Returns a smartset.abstractsmartset, which is a list-like interface | |
1917 | that contains integer revisions. |
|
1917 | that contains integer revisions. | |
1918 | """ |
|
1918 | """ | |
1919 | tree = revsetlang.spectree(expr, *args) |
|
1919 | tree = revsetlang.spectree(expr, *args) | |
1920 | return revset.makematcher(tree)(self) |
|
1920 | return revset.makematcher(tree)(self) | |
1921 |
|
1921 | |||
1922 | def set(self, expr, *args): |
|
1922 | def set(self, expr, *args): | |
1923 | """Find revisions matching a revset and emit changectx instances. |
|
1923 | """Find revisions matching a revset and emit changectx instances. | |
1924 |
|
1924 | |||
1925 | This is a convenience wrapper around ``revs()`` that iterates the |
|
1925 | This is a convenience wrapper around ``revs()`` that iterates the | |
1926 | result and is a generator of changectx instances. |
|
1926 | result and is a generator of changectx instances. | |
1927 |
|
1927 | |||
1928 | Revset aliases from the configuration are not expanded. To expand |
|
1928 | Revset aliases from the configuration are not expanded. To expand | |
1929 | user aliases, consider calling ``scmutil.revrange()``. |
|
1929 | user aliases, consider calling ``scmutil.revrange()``. | |
1930 | """ |
|
1930 | """ | |
1931 | for r in self.revs(expr, *args): |
|
1931 | for r in self.revs(expr, *args): | |
1932 | yield self[r] |
|
1932 | yield self[r] | |
1933 |
|
1933 | |||
1934 | def anyrevs(self, specs, user=False, localalias=None): |
|
1934 | def anyrevs(self, specs, user=False, localalias=None): | |
1935 | """Find revisions matching one of the given revsets. |
|
1935 | """Find revisions matching one of the given revsets. | |
1936 |
|
1936 | |||
1937 | Revset aliases from the configuration are not expanded by default. To |
|
1937 | Revset aliases from the configuration are not expanded by default. To | |
1938 | expand user aliases, specify ``user=True``. To provide some local |
|
1938 | expand user aliases, specify ``user=True``. To provide some local | |
1939 | definitions overriding user aliases, set ``localalias`` to |
|
1939 | definitions overriding user aliases, set ``localalias`` to | |
1940 | ``{name: definitionstring}``. |
|
1940 | ``{name: definitionstring}``. | |
1941 | """ |
|
1941 | """ | |
1942 | if specs == [b'null']: |
|
1942 | if specs == [b'null']: | |
1943 | return revset.baseset([nullrev]) |
|
1943 | return revset.baseset([nullrev]) | |
1944 | if specs == [b'.']: |
|
1944 | if specs == [b'.']: | |
1945 | quick_data = self._quick_access_changeid.get(b'.') |
|
1945 | quick_data = self._quick_access_changeid.get(b'.') | |
1946 | if quick_data is not None: |
|
1946 | if quick_data is not None: | |
1947 | return revset.baseset([quick_data[0]]) |
|
1947 | return revset.baseset([quick_data[0]]) | |
1948 | if user: |
|
1948 | if user: | |
1949 | m = revset.matchany( |
|
1949 | m = revset.matchany( | |
1950 | self.ui, |
|
1950 | self.ui, | |
1951 | specs, |
|
1951 | specs, | |
1952 | lookup=revset.lookupfn(self), |
|
1952 | lookup=revset.lookupfn(self), | |
1953 | localalias=localalias, |
|
1953 | localalias=localalias, | |
1954 | ) |
|
1954 | ) | |
1955 | else: |
|
1955 | else: | |
1956 | m = revset.matchany(None, specs, localalias=localalias) |
|
1956 | m = revset.matchany(None, specs, localalias=localalias) | |
1957 | return m(self) |
|
1957 | return m(self) | |
1958 |
|
1958 | |||
1959 | def url(self): |
|
1959 | def url(self): | |
1960 | return b'file:' + self.root |
|
1960 | return b'file:' + self.root | |
1961 |
|
1961 | |||
1962 | def hook(self, name, throw=False, **args): |
|
1962 | def hook(self, name, throw=False, **args): | |
1963 | """Call a hook, passing this repo instance. |
|
1963 | """Call a hook, passing this repo instance. | |
1964 |
|
1964 | |||
1965 | This a convenience method to aid invoking hooks. Extensions likely |
|
1965 | This a convenience method to aid invoking hooks. Extensions likely | |
1966 | won't call this unless they have registered a custom hook or are |
|
1966 | won't call this unless they have registered a custom hook or are | |
1967 | replacing code that is expected to call a hook. |
|
1967 | replacing code that is expected to call a hook. | |
1968 | """ |
|
1968 | """ | |
1969 | return hook.hook(self.ui, self, name, throw, **args) |
|
1969 | return hook.hook(self.ui, self, name, throw, **args) | |
1970 |
|
1970 | |||
1971 | @filteredpropertycache |
|
1971 | @filteredpropertycache | |
1972 | def _tagscache(self): |
|
1972 | def _tagscache(self): | |
1973 | """Returns a tagscache object that contains various tags related |
|
1973 | """Returns a tagscache object that contains various tags related | |
1974 | caches.""" |
|
1974 | caches.""" | |
1975 |
|
1975 | |||
1976 | # This simplifies its cache management by having one decorated |
|
1976 | # This simplifies its cache management by having one decorated | |
1977 | # function (this one) and the rest simply fetch things from it. |
|
1977 | # function (this one) and the rest simply fetch things from it. | |
1978 | class tagscache(object): |
|
1978 | class tagscache(object): | |
1979 | def __init__(self): |
|
1979 | def __init__(self): | |
1980 | # These two define the set of tags for this repository. tags |
|
1980 | # These two define the set of tags for this repository. tags | |
1981 | # maps tag name to node; tagtypes maps tag name to 'global' or |
|
1981 | # maps tag name to node; tagtypes maps tag name to 'global' or | |
1982 | # 'local'. (Global tags are defined by .hgtags across all |
|
1982 | # 'local'. (Global tags are defined by .hgtags across all | |
1983 | # heads, and local tags are defined in .hg/localtags.) |
|
1983 | # heads, and local tags are defined in .hg/localtags.) | |
1984 | # They constitute the in-memory cache of tags. |
|
1984 | # They constitute the in-memory cache of tags. | |
1985 | self.tags = self.tagtypes = None |
|
1985 | self.tags = self.tagtypes = None | |
1986 |
|
1986 | |||
1987 | self.nodetagscache = self.tagslist = None |
|
1987 | self.nodetagscache = self.tagslist = None | |
1988 |
|
1988 | |||
1989 | cache = tagscache() |
|
1989 | cache = tagscache() | |
1990 | cache.tags, cache.tagtypes = self._findtags() |
|
1990 | cache.tags, cache.tagtypes = self._findtags() | |
1991 |
|
1991 | |||
1992 | return cache |
|
1992 | return cache | |
1993 |
|
1993 | |||
1994 | def tags(self): |
|
1994 | def tags(self): | |
1995 | '''return a mapping of tag to node''' |
|
1995 | '''return a mapping of tag to node''' | |
1996 | t = {} |
|
1996 | t = {} | |
1997 | if self.changelog.filteredrevs: |
|
1997 | if self.changelog.filteredrevs: | |
1998 | tags, tt = self._findtags() |
|
1998 | tags, tt = self._findtags() | |
1999 | else: |
|
1999 | else: | |
2000 | tags = self._tagscache.tags |
|
2000 | tags = self._tagscache.tags | |
2001 | rev = self.changelog.rev |
|
2001 | rev = self.changelog.rev | |
2002 | for k, v in pycompat.iteritems(tags): |
|
2002 | for k, v in pycompat.iteritems(tags): | |
2003 | try: |
|
2003 | try: | |
2004 | # ignore tags to unknown nodes |
|
2004 | # ignore tags to unknown nodes | |
2005 | rev(v) |
|
2005 | rev(v) | |
2006 | t[k] = v |
|
2006 | t[k] = v | |
2007 | except (error.LookupError, ValueError): |
|
2007 | except (error.LookupError, ValueError): | |
2008 | pass |
|
2008 | pass | |
2009 | return t |
|
2009 | return t | |
2010 |
|
2010 | |||
2011 | def _findtags(self): |
|
2011 | def _findtags(self): | |
2012 | """Do the hard work of finding tags. Return a pair of dicts |
|
2012 | """Do the hard work of finding tags. Return a pair of dicts | |
2013 | (tags, tagtypes) where tags maps tag name to node, and tagtypes |
|
2013 | (tags, tagtypes) where tags maps tag name to node, and tagtypes | |
2014 | maps tag name to a string like \'global\' or \'local\'. |
|
2014 | maps tag name to a string like \'global\' or \'local\'. | |
2015 | Subclasses or extensions are free to add their own tags, but |
|
2015 | Subclasses or extensions are free to add their own tags, but | |
2016 | should be aware that the returned dicts will be retained for the |
|
2016 | should be aware that the returned dicts will be retained for the | |
2017 | duration of the localrepo object.""" |
|
2017 | duration of the localrepo object.""" | |
2018 |
|
2018 | |||
2019 | # XXX what tagtype should subclasses/extensions use? Currently |
|
2019 | # XXX what tagtype should subclasses/extensions use? Currently | |
2020 | # mq and bookmarks add tags, but do not set the tagtype at all. |
|
2020 | # mq and bookmarks add tags, but do not set the tagtype at all. | |
2021 | # Should each extension invent its own tag type? Should there |
|
2021 | # Should each extension invent its own tag type? Should there | |
2022 | # be one tagtype for all such "virtual" tags? Or is the status |
|
2022 | # be one tagtype for all such "virtual" tags? Or is the status | |
2023 | # quo fine? |
|
2023 | # quo fine? | |
2024 |
|
2024 | |||
2025 | # map tag name to (node, hist) |
|
2025 | # map tag name to (node, hist) | |
2026 | alltags = tagsmod.findglobaltags(self.ui, self) |
|
2026 | alltags = tagsmod.findglobaltags(self.ui, self) | |
2027 | # map tag name to tag type |
|
2027 | # map tag name to tag type | |
2028 | tagtypes = {tag: b'global' for tag in alltags} |
|
2028 | tagtypes = {tag: b'global' for tag in alltags} | |
2029 |
|
2029 | |||
2030 | tagsmod.readlocaltags(self.ui, self, alltags, tagtypes) |
|
2030 | tagsmod.readlocaltags(self.ui, self, alltags, tagtypes) | |
2031 |
|
2031 | |||
2032 | # Build the return dicts. Have to re-encode tag names because |
|
2032 | # Build the return dicts. Have to re-encode tag names because | |
2033 | # the tags module always uses UTF-8 (in order not to lose info |
|
2033 | # the tags module always uses UTF-8 (in order not to lose info | |
2034 | # writing to the cache), but the rest of Mercurial wants them in |
|
2034 | # writing to the cache), but the rest of Mercurial wants them in | |
2035 | # local encoding. |
|
2035 | # local encoding. | |
2036 | tags = {} |
|
2036 | tags = {} | |
2037 | for (name, (node, hist)) in pycompat.iteritems(alltags): |
|
2037 | for (name, (node, hist)) in pycompat.iteritems(alltags): | |
2038 | if node != self.nullid: |
|
2038 | if node != self.nullid: | |
2039 | tags[encoding.tolocal(name)] = node |
|
2039 | tags[encoding.tolocal(name)] = node | |
2040 | tags[b'tip'] = self.changelog.tip() |
|
2040 | tags[b'tip'] = self.changelog.tip() | |
2041 | tagtypes = { |
|
2041 | tagtypes = { | |
2042 | encoding.tolocal(name): value |
|
2042 | encoding.tolocal(name): value | |
2043 | for (name, value) in pycompat.iteritems(tagtypes) |
|
2043 | for (name, value) in pycompat.iteritems(tagtypes) | |
2044 | } |
|
2044 | } | |
2045 | return (tags, tagtypes) |
|
2045 | return (tags, tagtypes) | |
2046 |
|
2046 | |||
2047 | def tagtype(self, tagname): |
|
2047 | def tagtype(self, tagname): | |
2048 | """ |
|
2048 | """ | |
2049 | return the type of the given tag. result can be: |
|
2049 | return the type of the given tag. result can be: | |
2050 |
|
2050 | |||
2051 | 'local' : a local tag |
|
2051 | 'local' : a local tag | |
2052 | 'global' : a global tag |
|
2052 | 'global' : a global tag | |
2053 | None : tag does not exist |
|
2053 | None : tag does not exist | |
2054 | """ |
|
2054 | """ | |
2055 |
|
2055 | |||
2056 | return self._tagscache.tagtypes.get(tagname) |
|
2056 | return self._tagscache.tagtypes.get(tagname) | |
2057 |
|
2057 | |||
2058 | def tagslist(self): |
|
2058 | def tagslist(self): | |
2059 | '''return a list of tags ordered by revision''' |
|
2059 | '''return a list of tags ordered by revision''' | |
2060 | if not self._tagscache.tagslist: |
|
2060 | if not self._tagscache.tagslist: | |
2061 | l = [] |
|
2061 | l = [] | |
2062 | for t, n in pycompat.iteritems(self.tags()): |
|
2062 | for t, n in pycompat.iteritems(self.tags()): | |
2063 | l.append((self.changelog.rev(n), t, n)) |
|
2063 | l.append((self.changelog.rev(n), t, n)) | |
2064 | self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)] |
|
2064 | self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)] | |
2065 |
|
2065 | |||
2066 | return self._tagscache.tagslist |
|
2066 | return self._tagscache.tagslist | |
2067 |
|
2067 | |||
2068 | def nodetags(self, node): |
|
2068 | def nodetags(self, node): | |
2069 | '''return the tags associated with a node''' |
|
2069 | '''return the tags associated with a node''' | |
2070 | if not self._tagscache.nodetagscache: |
|
2070 | if not self._tagscache.nodetagscache: | |
2071 | nodetagscache = {} |
|
2071 | nodetagscache = {} | |
2072 | for t, n in pycompat.iteritems(self._tagscache.tags): |
|
2072 | for t, n in pycompat.iteritems(self._tagscache.tags): | |
2073 | nodetagscache.setdefault(n, []).append(t) |
|
2073 | nodetagscache.setdefault(n, []).append(t) | |
2074 | for tags in pycompat.itervalues(nodetagscache): |
|
2074 | for tags in pycompat.itervalues(nodetagscache): | |
2075 | tags.sort() |
|
2075 | tags.sort() | |
2076 | self._tagscache.nodetagscache = nodetagscache |
|
2076 | self._tagscache.nodetagscache = nodetagscache | |
2077 | return self._tagscache.nodetagscache.get(node, []) |
|
2077 | return self._tagscache.nodetagscache.get(node, []) | |
2078 |
|
2078 | |||
2079 | def nodebookmarks(self, node): |
|
2079 | def nodebookmarks(self, node): | |
2080 | """return the list of bookmarks pointing to the specified node""" |
|
2080 | """return the list of bookmarks pointing to the specified node""" | |
2081 | return self._bookmarks.names(node) |
|
2081 | return self._bookmarks.names(node) | |
2082 |
|
2082 | |||
2083 | def branchmap(self): |
|
2083 | def branchmap(self): | |
2084 | """returns a dictionary {branch: [branchheads]} with branchheads |
|
2084 | """returns a dictionary {branch: [branchheads]} with branchheads | |
2085 | ordered by increasing revision number""" |
|
2085 | ordered by increasing revision number""" | |
2086 | return self._branchcaches[self] |
|
2086 | return self._branchcaches[self] | |
2087 |
|
2087 | |||
2088 | @unfilteredmethod |
|
2088 | @unfilteredmethod | |
2089 | def revbranchcache(self): |
|
2089 | def revbranchcache(self): | |
2090 | if not self._revbranchcache: |
|
2090 | if not self._revbranchcache: | |
2091 | self._revbranchcache = branchmap.revbranchcache(self.unfiltered()) |
|
2091 | self._revbranchcache = branchmap.revbranchcache(self.unfiltered()) | |
2092 | return self._revbranchcache |
|
2092 | return self._revbranchcache | |
2093 |
|
2093 | |||
2094 | def register_changeset(self, rev, changelogrevision): |
|
2094 | def register_changeset(self, rev, changelogrevision): | |
2095 | self.revbranchcache().setdata(rev, changelogrevision) |
|
2095 | self.revbranchcache().setdata(rev, changelogrevision) | |
2096 |
|
2096 | |||
2097 | def branchtip(self, branch, ignoremissing=False): |
|
2097 | def branchtip(self, branch, ignoremissing=False): | |
2098 | """return the tip node for a given branch |
|
2098 | """return the tip node for a given branch | |
2099 |
|
2099 | |||
2100 | If ignoremissing is True, then this method will not raise an error. |
|
2100 | If ignoremissing is True, then this method will not raise an error. | |
2101 | This is helpful for callers that only expect None for a missing branch |
|
2101 | This is helpful for callers that only expect None for a missing branch | |
2102 | (e.g. namespace). |
|
2102 | (e.g. namespace). | |
2103 |
|
2103 | |||
2104 | """ |
|
2104 | """ | |
2105 | try: |
|
2105 | try: | |
2106 | return self.branchmap().branchtip(branch) |
|
2106 | return self.branchmap().branchtip(branch) | |
2107 | except KeyError: |
|
2107 | except KeyError: | |
2108 | if not ignoremissing: |
|
2108 | if not ignoremissing: | |
2109 | raise error.RepoLookupError(_(b"unknown branch '%s'") % branch) |
|
2109 | raise error.RepoLookupError(_(b"unknown branch '%s'") % branch) | |
2110 | else: |
|
2110 | else: | |
2111 | pass |
|
2111 | pass | |
2112 |
|
2112 | |||
2113 | def lookup(self, key): |
|
2113 | def lookup(self, key): | |
2114 | node = scmutil.revsymbol(self, key).node() |
|
2114 | node = scmutil.revsymbol(self, key).node() | |
2115 | if node is None: |
|
2115 | if node is None: | |
2116 | raise error.RepoLookupError(_(b"unknown revision '%s'") % key) |
|
2116 | raise error.RepoLookupError(_(b"unknown revision '%s'") % key) | |
2117 | return node |
|
2117 | return node | |
2118 |
|
2118 | |||
2119 | def lookupbranch(self, key): |
|
2119 | def lookupbranch(self, key): | |
2120 | if self.branchmap().hasbranch(key): |
|
2120 | if self.branchmap().hasbranch(key): | |
2121 | return key |
|
2121 | return key | |
2122 |
|
2122 | |||
2123 | return scmutil.revsymbol(self, key).branch() |
|
2123 | return scmutil.revsymbol(self, key).branch() | |
2124 |
|
2124 | |||
2125 | def known(self, nodes): |
|
2125 | def known(self, nodes): | |
2126 | cl = self.changelog |
|
2126 | cl = self.changelog | |
2127 | get_rev = cl.index.get_rev |
|
2127 | get_rev = cl.index.get_rev | |
2128 | filtered = cl.filteredrevs |
|
2128 | filtered = cl.filteredrevs | |
2129 | result = [] |
|
2129 | result = [] | |
2130 | for n in nodes: |
|
2130 | for n in nodes: | |
2131 | r = get_rev(n) |
|
2131 | r = get_rev(n) | |
2132 | resp = not (r is None or r in filtered) |
|
2132 | resp = not (r is None or r in filtered) | |
2133 | result.append(resp) |
|
2133 | result.append(resp) | |
2134 | return result |
|
2134 | return result | |
2135 |
|
2135 | |||
2136 | def local(self): |
|
2136 | def local(self): | |
2137 | return self |
|
2137 | return self | |
2138 |
|
2138 | |||
2139 | def publishing(self): |
|
2139 | def publishing(self): | |
2140 | # it's safe (and desirable) to trust the publish flag unconditionally |
|
2140 | # it's safe (and desirable) to trust the publish flag unconditionally | |
2141 | # so that we don't finalize changes shared between users via ssh or nfs |
|
2141 | # so that we don't finalize changes shared between users via ssh or nfs | |
2142 | return self.ui.configbool(b'phases', b'publish', untrusted=True) |
|
2142 | return self.ui.configbool(b'phases', b'publish', untrusted=True) | |
2143 |
|
2143 | |||
2144 | def cancopy(self): |
|
2144 | def cancopy(self): | |
2145 | # so statichttprepo's override of local() works |
|
2145 | # so statichttprepo's override of local() works | |
2146 | if not self.local(): |
|
2146 | if not self.local(): | |
2147 | return False |
|
2147 | return False | |
2148 | if not self.publishing(): |
|
2148 | if not self.publishing(): | |
2149 | return True |
|
2149 | return True | |
2150 | # if publishing we can't copy if there is filtered content |
|
2150 | # if publishing we can't copy if there is filtered content | |
2151 | return not self.filtered(b'visible').changelog.filteredrevs |
|
2151 | return not self.filtered(b'visible').changelog.filteredrevs | |
2152 |
|
2152 | |||
2153 | def shared(self): |
|
2153 | def shared(self): | |
2154 | '''the type of shared repository (None if not shared)''' |
|
2154 | '''the type of shared repository (None if not shared)''' | |
2155 | if self.sharedpath != self.path: |
|
2155 | if self.sharedpath != self.path: | |
2156 | return b'store' |
|
2156 | return b'store' | |
2157 | return None |
|
2157 | return None | |
2158 |
|
2158 | |||
2159 | def wjoin(self, f, *insidef): |
|
2159 | def wjoin(self, f, *insidef): | |
2160 | return self.vfs.reljoin(self.root, f, *insidef) |
|
2160 | return self.vfs.reljoin(self.root, f, *insidef) | |
2161 |
|
2161 | |||
2162 | def setparents(self, p1, p2=None): |
|
2162 | def setparents(self, p1, p2=None): | |
2163 | if p2 is None: |
|
2163 | if p2 is None: | |
2164 | p2 = self.nullid |
|
2164 | p2 = self.nullid | |
2165 | self[None].setparents(p1, p2) |
|
2165 | self[None].setparents(p1, p2) | |
2166 | self._quick_access_changeid_invalidate() |
|
2166 | self._quick_access_changeid_invalidate() | |
2167 |
|
2167 | |||
2168 | def filectx(self, path, changeid=None, fileid=None, changectx=None): |
|
2168 | def filectx(self, path, changeid=None, fileid=None, changectx=None): | |
2169 | """changeid must be a changeset revision, if specified. |
|
2169 | """changeid must be a changeset revision, if specified. | |
2170 | fileid can be a file revision or node.""" |
|
2170 | fileid can be a file revision or node.""" | |
2171 | return context.filectx( |
|
2171 | return context.filectx( | |
2172 | self, path, changeid, fileid, changectx=changectx |
|
2172 | self, path, changeid, fileid, changectx=changectx | |
2173 | ) |
|
2173 | ) | |
2174 |
|
2174 | |||
2175 | def getcwd(self): |
|
2175 | def getcwd(self): | |
2176 | return self.dirstate.getcwd() |
|
2176 | return self.dirstate.getcwd() | |
2177 |
|
2177 | |||
2178 | def pathto(self, f, cwd=None): |
|
2178 | def pathto(self, f, cwd=None): | |
2179 | return self.dirstate.pathto(f, cwd) |
|
2179 | return self.dirstate.pathto(f, cwd) | |
2180 |
|
2180 | |||
2181 | def _loadfilter(self, filter): |
|
2181 | def _loadfilter(self, filter): | |
2182 | if filter not in self._filterpats: |
|
2182 | if filter not in self._filterpats: | |
2183 | l = [] |
|
2183 | l = [] | |
2184 | for pat, cmd in self.ui.configitems(filter): |
|
2184 | for pat, cmd in self.ui.configitems(filter): | |
2185 | if cmd == b'!': |
|
2185 | if cmd == b'!': | |
2186 | continue |
|
2186 | continue | |
2187 | mf = matchmod.match(self.root, b'', [pat]) |
|
2187 | mf = matchmod.match(self.root, b'', [pat]) | |
2188 | fn = None |
|
2188 | fn = None | |
2189 | params = cmd |
|
2189 | params = cmd | |
2190 | for name, filterfn in pycompat.iteritems(self._datafilters): |
|
2190 | for name, filterfn in pycompat.iteritems(self._datafilters): | |
2191 | if cmd.startswith(name): |
|
2191 | if cmd.startswith(name): | |
2192 | fn = filterfn |
|
2192 | fn = filterfn | |
2193 | params = cmd[len(name) :].lstrip() |
|
2193 | params = cmd[len(name) :].lstrip() | |
2194 | break |
|
2194 | break | |
2195 | if not fn: |
|
2195 | if not fn: | |
2196 | fn = lambda s, c, **kwargs: procutil.filter(s, c) |
|
2196 | fn = lambda s, c, **kwargs: procutil.filter(s, c) | |
2197 | fn.__name__ = 'commandfilter' |
|
2197 | fn.__name__ = 'commandfilter' | |
2198 | # Wrap old filters not supporting keyword arguments |
|
2198 | # Wrap old filters not supporting keyword arguments | |
2199 | if not pycompat.getargspec(fn)[2]: |
|
2199 | if not pycompat.getargspec(fn)[2]: | |
2200 | oldfn = fn |
|
2200 | oldfn = fn | |
2201 | fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c) |
|
2201 | fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c) | |
2202 | fn.__name__ = 'compat-' + oldfn.__name__ |
|
2202 | fn.__name__ = 'compat-' + oldfn.__name__ | |
2203 | l.append((mf, fn, params)) |
|
2203 | l.append((mf, fn, params)) | |
2204 | self._filterpats[filter] = l |
|
2204 | self._filterpats[filter] = l | |
2205 | return self._filterpats[filter] |
|
2205 | return self._filterpats[filter] | |
2206 |
|
2206 | |||
2207 | def _filter(self, filterpats, filename, data): |
|
2207 | def _filter(self, filterpats, filename, data): | |
2208 | for mf, fn, cmd in filterpats: |
|
2208 | for mf, fn, cmd in filterpats: | |
2209 | if mf(filename): |
|
2209 | if mf(filename): | |
2210 | self.ui.debug( |
|
2210 | self.ui.debug( | |
2211 | b"filtering %s through %s\n" |
|
2211 | b"filtering %s through %s\n" | |
2212 | % (filename, cmd or pycompat.sysbytes(fn.__name__)) |
|
2212 | % (filename, cmd or pycompat.sysbytes(fn.__name__)) | |
2213 | ) |
|
2213 | ) | |
2214 | data = fn(data, cmd, ui=self.ui, repo=self, filename=filename) |
|
2214 | data = fn(data, cmd, ui=self.ui, repo=self, filename=filename) | |
2215 | break |
|
2215 | break | |
2216 |
|
2216 | |||
2217 | return data |
|
2217 | return data | |
2218 |
|
2218 | |||
2219 | @unfilteredpropertycache |
|
2219 | @unfilteredpropertycache | |
2220 | def _encodefilterpats(self): |
|
2220 | def _encodefilterpats(self): | |
2221 | return self._loadfilter(b'encode') |
|
2221 | return self._loadfilter(b'encode') | |
2222 |
|
2222 | |||
2223 | @unfilteredpropertycache |
|
2223 | @unfilteredpropertycache | |
2224 | def _decodefilterpats(self): |
|
2224 | def _decodefilterpats(self): | |
2225 | return self._loadfilter(b'decode') |
|
2225 | return self._loadfilter(b'decode') | |
2226 |
|
2226 | |||
2227 | def adddatafilter(self, name, filter): |
|
2227 | def adddatafilter(self, name, filter): | |
2228 | self._datafilters[name] = filter |
|
2228 | self._datafilters[name] = filter | |
2229 |
|
2229 | |||
2230 | def wread(self, filename): |
|
2230 | def wread(self, filename): | |
2231 | if self.wvfs.islink(filename): |
|
2231 | if self.wvfs.islink(filename): | |
2232 | data = self.wvfs.readlink(filename) |
|
2232 | data = self.wvfs.readlink(filename) | |
2233 | else: |
|
2233 | else: | |
2234 | data = self.wvfs.read(filename) |
|
2234 | data = self.wvfs.read(filename) | |
2235 | return self._filter(self._encodefilterpats, filename, data) |
|
2235 | return self._filter(self._encodefilterpats, filename, data) | |
2236 |
|
2236 | |||
2237 | def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs): |
|
2237 | def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs): | |
2238 | """write ``data`` into ``filename`` in the working directory |
|
2238 | """write ``data`` into ``filename`` in the working directory | |
2239 |
|
2239 | |||
2240 | This returns length of written (maybe decoded) data. |
|
2240 | This returns length of written (maybe decoded) data. | |
2241 | """ |
|
2241 | """ | |
2242 | data = self._filter(self._decodefilterpats, filename, data) |
|
2242 | data = self._filter(self._decodefilterpats, filename, data) | |
2243 | if b'l' in flags: |
|
2243 | if b'l' in flags: | |
2244 | self.wvfs.symlink(data, filename) |
|
2244 | self.wvfs.symlink(data, filename) | |
2245 | else: |
|
2245 | else: | |
2246 | self.wvfs.write( |
|
2246 | self.wvfs.write( | |
2247 | filename, data, backgroundclose=backgroundclose, **kwargs |
|
2247 | filename, data, backgroundclose=backgroundclose, **kwargs | |
2248 | ) |
|
2248 | ) | |
2249 | if b'x' in flags: |
|
2249 | if b'x' in flags: | |
2250 | self.wvfs.setflags(filename, False, True) |
|
2250 | self.wvfs.setflags(filename, False, True) | |
2251 | else: |
|
2251 | else: | |
2252 | self.wvfs.setflags(filename, False, False) |
|
2252 | self.wvfs.setflags(filename, False, False) | |
2253 | return len(data) |
|
2253 | return len(data) | |
2254 |
|
2254 | |||
2255 | def wwritedata(self, filename, data): |
|
2255 | def wwritedata(self, filename, data): | |
2256 | return self._filter(self._decodefilterpats, filename, data) |
|
2256 | return self._filter(self._decodefilterpats, filename, data) | |
2257 |
|
2257 | |||
2258 | def currenttransaction(self): |
|
2258 | def currenttransaction(self): | |
2259 | """return the current transaction or None if non exists""" |
|
2259 | """return the current transaction or None if non exists""" | |
2260 | if self._transref: |
|
2260 | if self._transref: | |
2261 | tr = self._transref() |
|
2261 | tr = self._transref() | |
2262 | else: |
|
2262 | else: | |
2263 | tr = None |
|
2263 | tr = None | |
2264 |
|
2264 | |||
2265 | if tr and tr.running(): |
|
2265 | if tr and tr.running(): | |
2266 | return tr |
|
2266 | return tr | |
2267 | return None |
|
2267 | return None | |
2268 |
|
2268 | |||
2269 | def transaction(self, desc, report=None): |
|
2269 | def transaction(self, desc, report=None): | |
2270 | if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool( |
|
2270 | if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool( | |
2271 | b'devel', b'check-locks' |
|
2271 | b'devel', b'check-locks' | |
2272 | ): |
|
2272 | ): | |
2273 | if self._currentlock(self._lockref) is None: |
|
2273 | if self._currentlock(self._lockref) is None: | |
2274 | raise error.ProgrammingError(b'transaction requires locking') |
|
2274 | raise error.ProgrammingError(b'transaction requires locking') | |
2275 | tr = self.currenttransaction() |
|
2275 | tr = self.currenttransaction() | |
2276 | if tr is not None: |
|
2276 | if tr is not None: | |
2277 | return tr.nest(name=desc) |
|
2277 | return tr.nest(name=desc) | |
2278 |
|
2278 | |||
2279 | # abort here if the journal already exists |
|
2279 | # abort here if the journal already exists | |
2280 | if self.svfs.exists(b"journal"): |
|
2280 | if self.svfs.exists(b"journal"): | |
2281 | raise error.RepoError( |
|
2281 | raise error.RepoError( | |
2282 | _(b"abandoned transaction found"), |
|
2282 | _(b"abandoned transaction found"), | |
2283 | hint=_(b"run 'hg recover' to clean up transaction"), |
|
2283 | hint=_(b"run 'hg recover' to clean up transaction"), | |
2284 | ) |
|
2284 | ) | |
2285 |
|
2285 | |||
2286 | idbase = b"%.40f#%f" % (random.random(), time.time()) |
|
2286 | idbase = b"%.40f#%f" % (random.random(), time.time()) | |
2287 | ha = hex(hashutil.sha1(idbase).digest()) |
|
2287 | ha = hex(hashutil.sha1(idbase).digest()) | |
2288 | txnid = b'TXN:' + ha |
|
2288 | txnid = b'TXN:' + ha | |
2289 | self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid) |
|
2289 | self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid) | |
2290 |
|
2290 | |||
2291 | self._writejournal(desc) |
|
2291 | self._writejournal(desc) | |
2292 | renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()] |
|
2292 | renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()] | |
2293 | if report: |
|
2293 | if report: | |
2294 | rp = report |
|
2294 | rp = report | |
2295 | else: |
|
2295 | else: | |
2296 | rp = self.ui.warn |
|
2296 | rp = self.ui.warn | |
2297 | vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/ |
|
2297 | vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/ | |
2298 | # we must avoid cyclic reference between repo and transaction. |
|
2298 | # we must avoid cyclic reference between repo and transaction. | |
2299 | reporef = weakref.ref(self) |
|
2299 | reporef = weakref.ref(self) | |
2300 | # Code to track tag movement |
|
2300 | # Code to track tag movement | |
2301 | # |
|
2301 | # | |
2302 | # Since tags are all handled as file content, it is actually quite hard |
|
2302 | # Since tags are all handled as file content, it is actually quite hard | |
2303 | # to track these movement from a code perspective. So we fallback to a |
|
2303 | # to track these movement from a code perspective. So we fallback to a | |
2304 | # tracking at the repository level. One could envision to track changes |
|
2304 | # tracking at the repository level. One could envision to track changes | |
2305 | # to the '.hgtags' file through changegroup apply but that fails to |
|
2305 | # to the '.hgtags' file through changegroup apply but that fails to | |
2306 | # cope with case where transaction expose new heads without changegroup |
|
2306 | # cope with case where transaction expose new heads without changegroup | |
2307 | # being involved (eg: phase movement). |
|
2307 | # being involved (eg: phase movement). | |
2308 | # |
|
2308 | # | |
2309 | # For now, We gate the feature behind a flag since this likely comes |
|
2309 | # For now, We gate the feature behind a flag since this likely comes | |
2310 | # with performance impacts. The current code run more often than needed |
|
2310 | # with performance impacts. The current code run more often than needed | |
2311 | # and do not use caches as much as it could. The current focus is on |
|
2311 | # and do not use caches as much as it could. The current focus is on | |
2312 | # the behavior of the feature so we disable it by default. The flag |
|
2312 | # the behavior of the feature so we disable it by default. The flag | |
2313 | # will be removed when we are happy with the performance impact. |
|
2313 | # will be removed when we are happy with the performance impact. | |
2314 | # |
|
2314 | # | |
2315 | # Once this feature is no longer experimental move the following |
|
2315 | # Once this feature is no longer experimental move the following | |
2316 | # documentation to the appropriate help section: |
|
2316 | # documentation to the appropriate help section: | |
2317 | # |
|
2317 | # | |
2318 | # The ``HG_TAG_MOVED`` variable will be set if the transaction touched |
|
2318 | # The ``HG_TAG_MOVED`` variable will be set if the transaction touched | |
2319 | # tags (new or changed or deleted tags). In addition the details of |
|
2319 | # tags (new or changed or deleted tags). In addition the details of | |
2320 | # these changes are made available in a file at: |
|
2320 | # these changes are made available in a file at: | |
2321 | # ``REPOROOT/.hg/changes/tags.changes``. |
|
2321 | # ``REPOROOT/.hg/changes/tags.changes``. | |
2322 | # Make sure you check for HG_TAG_MOVED before reading that file as it |
|
2322 | # Make sure you check for HG_TAG_MOVED before reading that file as it | |
2323 | # might exist from a previous transaction even if no tag were touched |
|
2323 | # might exist from a previous transaction even if no tag were touched | |
2324 | # in this one. Changes are recorded in a line base format:: |
|
2324 | # in this one. Changes are recorded in a line base format:: | |
2325 | # |
|
2325 | # | |
2326 | # <action> <hex-node> <tag-name>\n |
|
2326 | # <action> <hex-node> <tag-name>\n | |
2327 | # |
|
2327 | # | |
2328 | # Actions are defined as follow: |
|
2328 | # Actions are defined as follow: | |
2329 | # "-R": tag is removed, |
|
2329 | # "-R": tag is removed, | |
2330 | # "+A": tag is added, |
|
2330 | # "+A": tag is added, | |
2331 | # "-M": tag is moved (old value), |
|
2331 | # "-M": tag is moved (old value), | |
2332 | # "+M": tag is moved (new value), |
|
2332 | # "+M": tag is moved (new value), | |
2333 | tracktags = lambda x: None |
|
2333 | tracktags = lambda x: None | |
2334 | # experimental config: experimental.hook-track-tags |
|
2334 | # experimental config: experimental.hook-track-tags | |
2335 | shouldtracktags = self.ui.configbool( |
|
2335 | shouldtracktags = self.ui.configbool( | |
2336 | b'experimental', b'hook-track-tags' |
|
2336 | b'experimental', b'hook-track-tags' | |
2337 | ) |
|
2337 | ) | |
2338 | if desc != b'strip' and shouldtracktags: |
|
2338 | if desc != b'strip' and shouldtracktags: | |
2339 | oldheads = self.changelog.headrevs() |
|
2339 | oldheads = self.changelog.headrevs() | |
2340 |
|
2340 | |||
2341 | def tracktags(tr2): |
|
2341 | def tracktags(tr2): | |
2342 | repo = reporef() |
|
2342 | repo = reporef() | |
2343 | assert repo is not None # help pytype |
|
2343 | assert repo is not None # help pytype | |
2344 | oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads) |
|
2344 | oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads) | |
2345 | newheads = repo.changelog.headrevs() |
|
2345 | newheads = repo.changelog.headrevs() | |
2346 | newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads) |
|
2346 | newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads) | |
2347 | # notes: we compare lists here. |
|
2347 | # notes: we compare lists here. | |
2348 | # As we do it only once buiding set would not be cheaper |
|
2348 | # As we do it only once buiding set would not be cheaper | |
2349 | changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes) |
|
2349 | changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes) | |
2350 | if changes: |
|
2350 | if changes: | |
2351 | tr2.hookargs[b'tag_moved'] = b'1' |
|
2351 | tr2.hookargs[b'tag_moved'] = b'1' | |
2352 | with repo.vfs( |
|
2352 | with repo.vfs( | |
2353 | b'changes/tags.changes', b'w', atomictemp=True |
|
2353 | b'changes/tags.changes', b'w', atomictemp=True | |
2354 | ) as changesfile: |
|
2354 | ) as changesfile: | |
2355 | # note: we do not register the file to the transaction |
|
2355 | # note: we do not register the file to the transaction | |
2356 | # because we needs it to still exist on the transaction |
|
2356 | # because we needs it to still exist on the transaction | |
2357 | # is close (for txnclose hooks) |
|
2357 | # is close (for txnclose hooks) | |
2358 | tagsmod.writediff(changesfile, changes) |
|
2358 | tagsmod.writediff(changesfile, changes) | |
2359 |
|
2359 | |||
2360 | def validate(tr2): |
|
2360 | def validate(tr2): | |
2361 | """will run pre-closing hooks""" |
|
2361 | """will run pre-closing hooks""" | |
2362 | # XXX the transaction API is a bit lacking here so we take a hacky |
|
2362 | # XXX the transaction API is a bit lacking here so we take a hacky | |
2363 | # path for now |
|
2363 | # path for now | |
2364 | # |
|
2364 | # | |
2365 | # We cannot add this as a "pending" hooks since the 'tr.hookargs' |
|
2365 | # We cannot add this as a "pending" hooks since the 'tr.hookargs' | |
2366 | # dict is copied before these run. In addition we needs the data |
|
2366 | # dict is copied before these run. In addition we needs the data | |
2367 | # available to in memory hooks too. |
|
2367 | # available to in memory hooks too. | |
2368 | # |
|
2368 | # | |
2369 | # Moreover, we also need to make sure this runs before txnclose |
|
2369 | # Moreover, we also need to make sure this runs before txnclose | |
2370 | # hooks and there is no "pending" mechanism that would execute |
|
2370 | # hooks and there is no "pending" mechanism that would execute | |
2371 | # logic only if hooks are about to run. |
|
2371 | # logic only if hooks are about to run. | |
2372 | # |
|
2372 | # | |
2373 | # Fixing this limitation of the transaction is also needed to track |
|
2373 | # Fixing this limitation of the transaction is also needed to track | |
2374 | # other families of changes (bookmarks, phases, obsolescence). |
|
2374 | # other families of changes (bookmarks, phases, obsolescence). | |
2375 | # |
|
2375 | # | |
2376 | # This will have to be fixed before we remove the experimental |
|
2376 | # This will have to be fixed before we remove the experimental | |
2377 | # gating. |
|
2377 | # gating. | |
2378 | tracktags(tr2) |
|
2378 | tracktags(tr2) | |
2379 | repo = reporef() |
|
2379 | repo = reporef() | |
2380 | assert repo is not None # help pytype |
|
2380 | assert repo is not None # help pytype | |
2381 |
|
2381 | |||
2382 | singleheadopt = (b'experimental', b'single-head-per-branch') |
|
2382 | singleheadopt = (b'experimental', b'single-head-per-branch') | |
2383 | singlehead = repo.ui.configbool(*singleheadopt) |
|
2383 | singlehead = repo.ui.configbool(*singleheadopt) | |
2384 | if singlehead: |
|
2384 | if singlehead: | |
2385 | singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1] |
|
2385 | singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1] | |
2386 | accountclosed = singleheadsub.get( |
|
2386 | accountclosed = singleheadsub.get( | |
2387 | b"account-closed-heads", False |
|
2387 | b"account-closed-heads", False | |
2388 | ) |
|
2388 | ) | |
2389 | if singleheadsub.get(b"public-changes-only", False): |
|
2389 | if singleheadsub.get(b"public-changes-only", False): | |
2390 | filtername = b"immutable" |
|
2390 | filtername = b"immutable" | |
2391 | else: |
|
2391 | else: | |
2392 | filtername = b"visible" |
|
2392 | filtername = b"visible" | |
2393 | scmutil.enforcesinglehead( |
|
2393 | scmutil.enforcesinglehead( | |
2394 | repo, tr2, desc, accountclosed, filtername |
|
2394 | repo, tr2, desc, accountclosed, filtername | |
2395 | ) |
|
2395 | ) | |
2396 | if hook.hashook(repo.ui, b'pretxnclose-bookmark'): |
|
2396 | if hook.hashook(repo.ui, b'pretxnclose-bookmark'): | |
2397 | for name, (old, new) in sorted( |
|
2397 | for name, (old, new) in sorted( | |
2398 | tr.changes[b'bookmarks'].items() |
|
2398 | tr.changes[b'bookmarks'].items() | |
2399 | ): |
|
2399 | ): | |
2400 | args = tr.hookargs.copy() |
|
2400 | args = tr.hookargs.copy() | |
2401 | args.update(bookmarks.preparehookargs(name, old, new)) |
|
2401 | args.update(bookmarks.preparehookargs(name, old, new)) | |
2402 | repo.hook( |
|
2402 | repo.hook( | |
2403 | b'pretxnclose-bookmark', |
|
2403 | b'pretxnclose-bookmark', | |
2404 | throw=True, |
|
2404 | throw=True, | |
2405 | **pycompat.strkwargs(args) |
|
2405 | **pycompat.strkwargs(args) | |
2406 | ) |
|
2406 | ) | |
2407 | if hook.hashook(repo.ui, b'pretxnclose-phase'): |
|
2407 | if hook.hashook(repo.ui, b'pretxnclose-phase'): | |
2408 | cl = repo.unfiltered().changelog |
|
2408 | cl = repo.unfiltered().changelog | |
2409 | for revs, (old, new) in tr.changes[b'phases']: |
|
2409 | for revs, (old, new) in tr.changes[b'phases']: | |
2410 | for rev in revs: |
|
2410 | for rev in revs: | |
2411 | args = tr.hookargs.copy() |
|
2411 | args = tr.hookargs.copy() | |
2412 | node = hex(cl.node(rev)) |
|
2412 | node = hex(cl.node(rev)) | |
2413 | args.update(phases.preparehookargs(node, old, new)) |
|
2413 | args.update(phases.preparehookargs(node, old, new)) | |
2414 | repo.hook( |
|
2414 | repo.hook( | |
2415 | b'pretxnclose-phase', |
|
2415 | b'pretxnclose-phase', | |
2416 | throw=True, |
|
2416 | throw=True, | |
2417 | **pycompat.strkwargs(args) |
|
2417 | **pycompat.strkwargs(args) | |
2418 | ) |
|
2418 | ) | |
2419 |
|
2419 | |||
2420 | repo.hook( |
|
2420 | repo.hook( | |
2421 | b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs) |
|
2421 | b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs) | |
2422 | ) |
|
2422 | ) | |
2423 |
|
2423 | |||
2424 | def releasefn(tr, success): |
|
2424 | def releasefn(tr, success): | |
2425 | repo = reporef() |
|
2425 | repo = reporef() | |
2426 | if repo is None: |
|
2426 | if repo is None: | |
2427 | # If the repo has been GC'd (and this release function is being |
|
2427 | # If the repo has been GC'd (and this release function is being | |
2428 | # called from transaction.__del__), there's not much we can do, |
|
2428 | # called from transaction.__del__), there's not much we can do, | |
2429 | # so just leave the unfinished transaction there and let the |
|
2429 | # so just leave the unfinished transaction there and let the | |
2430 | # user run `hg recover`. |
|
2430 | # user run `hg recover`. | |
2431 | return |
|
2431 | return | |
2432 | if success: |
|
2432 | if success: | |
2433 | # this should be explicitly invoked here, because |
|
2433 | # this should be explicitly invoked here, because | |
2434 | # in-memory changes aren't written out at closing |
|
2434 | # in-memory changes aren't written out at closing | |
2435 | # transaction, if tr.addfilegenerator (via |
|
2435 | # transaction, if tr.addfilegenerator (via | |
2436 | # dirstate.write or so) isn't invoked while |
|
2436 | # dirstate.write or so) isn't invoked while | |
2437 | # transaction running |
|
2437 | # transaction running | |
2438 | repo.dirstate.write(None) |
|
2438 | repo.dirstate.write(None) | |
2439 | else: |
|
2439 | else: | |
2440 | # discard all changes (including ones already written |
|
2440 | # discard all changes (including ones already written | |
2441 | # out) in this transaction |
|
2441 | # out) in this transaction | |
2442 | narrowspec.restorebackup(self, b'journal.narrowspec') |
|
2442 | narrowspec.restorebackup(self, b'journal.narrowspec') | |
2443 | narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate') |
|
2443 | narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate') | |
2444 | repo.dirstate.restorebackup(None, b'journal.dirstate') |
|
2444 | repo.dirstate.restorebackup(None, b'journal.dirstate') | |
2445 |
|
2445 | |||
2446 | repo.invalidate(clearfilecache=True) |
|
2446 | repo.invalidate(clearfilecache=True) | |
2447 |
|
2447 | |||
2448 | tr = transaction.transaction( |
|
2448 | tr = transaction.transaction( | |
2449 | rp, |
|
2449 | rp, | |
2450 | self.svfs, |
|
2450 | self.svfs, | |
2451 | vfsmap, |
|
2451 | vfsmap, | |
2452 | b"journal", |
|
2452 | b"journal", | |
2453 | b"undo", |
|
2453 | b"undo", | |
2454 | aftertrans(renames), |
|
2454 | aftertrans(renames), | |
2455 | self.store.createmode, |
|
2455 | self.store.createmode, | |
2456 | validator=validate, |
|
2456 | validator=validate, | |
2457 | releasefn=releasefn, |
|
2457 | releasefn=releasefn, | |
2458 | checkambigfiles=_cachedfiles, |
|
2458 | checkambigfiles=_cachedfiles, | |
2459 | name=desc, |
|
2459 | name=desc, | |
2460 | ) |
|
2460 | ) | |
2461 | tr.changes[b'origrepolen'] = len(self) |
|
2461 | tr.changes[b'origrepolen'] = len(self) | |
2462 | tr.changes[b'obsmarkers'] = set() |
|
2462 | tr.changes[b'obsmarkers'] = set() | |
2463 | tr.changes[b'phases'] = [] |
|
2463 | tr.changes[b'phases'] = [] | |
2464 | tr.changes[b'bookmarks'] = {} |
|
2464 | tr.changes[b'bookmarks'] = {} | |
2465 |
|
2465 | |||
2466 | tr.hookargs[b'txnid'] = txnid |
|
2466 | tr.hookargs[b'txnid'] = txnid | |
2467 | tr.hookargs[b'txnname'] = desc |
|
2467 | tr.hookargs[b'txnname'] = desc | |
2468 | tr.hookargs[b'changes'] = tr.changes |
|
2468 | tr.hookargs[b'changes'] = tr.changes | |
2469 | # note: writing the fncache only during finalize mean that the file is |
|
2469 | # note: writing the fncache only during finalize mean that the file is | |
2470 | # outdated when running hooks. As fncache is used for streaming clone, |
|
2470 | # outdated when running hooks. As fncache is used for streaming clone, | |
2471 | # this is not expected to break anything that happen during the hooks. |
|
2471 | # this is not expected to break anything that happen during the hooks. | |
2472 | tr.addfinalize(b'flush-fncache', self.store.write) |
|
2472 | tr.addfinalize(b'flush-fncache', self.store.write) | |
2473 |
|
2473 | |||
2474 | def txnclosehook(tr2): |
|
2474 | def txnclosehook(tr2): | |
2475 | """To be run if transaction is successful, will schedule a hook run""" |
|
2475 | """To be run if transaction is successful, will schedule a hook run""" | |
2476 | # Don't reference tr2 in hook() so we don't hold a reference. |
|
2476 | # Don't reference tr2 in hook() so we don't hold a reference. | |
2477 | # This reduces memory consumption when there are multiple |
|
2477 | # This reduces memory consumption when there are multiple | |
2478 | # transactions per lock. This can likely go away if issue5045 |
|
2478 | # transactions per lock. This can likely go away if issue5045 | |
2479 | # fixes the function accumulation. |
|
2479 | # fixes the function accumulation. | |
2480 | hookargs = tr2.hookargs |
|
2480 | hookargs = tr2.hookargs | |
2481 |
|
2481 | |||
2482 | def hookfunc(unused_success): |
|
2482 | def hookfunc(unused_success): | |
2483 | repo = reporef() |
|
2483 | repo = reporef() | |
2484 | assert repo is not None # help pytype |
|
2484 | assert repo is not None # help pytype | |
2485 |
|
2485 | |||
2486 | if hook.hashook(repo.ui, b'txnclose-bookmark'): |
|
2486 | if hook.hashook(repo.ui, b'txnclose-bookmark'): | |
2487 | bmchanges = sorted(tr.changes[b'bookmarks'].items()) |
|
2487 | bmchanges = sorted(tr.changes[b'bookmarks'].items()) | |
2488 | for name, (old, new) in bmchanges: |
|
2488 | for name, (old, new) in bmchanges: | |
2489 | args = tr.hookargs.copy() |
|
2489 | args = tr.hookargs.copy() | |
2490 | args.update(bookmarks.preparehookargs(name, old, new)) |
|
2490 | args.update(bookmarks.preparehookargs(name, old, new)) | |
2491 | repo.hook( |
|
2491 | repo.hook( | |
2492 | b'txnclose-bookmark', |
|
2492 | b'txnclose-bookmark', | |
2493 | throw=False, |
|
2493 | throw=False, | |
2494 | **pycompat.strkwargs(args) |
|
2494 | **pycompat.strkwargs(args) | |
2495 | ) |
|
2495 | ) | |
2496 |
|
2496 | |||
2497 | if hook.hashook(repo.ui, b'txnclose-phase'): |
|
2497 | if hook.hashook(repo.ui, b'txnclose-phase'): | |
2498 | cl = repo.unfiltered().changelog |
|
2498 | cl = repo.unfiltered().changelog | |
2499 | phasemv = sorted( |
|
2499 | phasemv = sorted( | |
2500 | tr.changes[b'phases'], key=lambda r: r[0][0] |
|
2500 | tr.changes[b'phases'], key=lambda r: r[0][0] | |
2501 | ) |
|
2501 | ) | |
2502 | for revs, (old, new) in phasemv: |
|
2502 | for revs, (old, new) in phasemv: | |
2503 | for rev in revs: |
|
2503 | for rev in revs: | |
2504 | args = tr.hookargs.copy() |
|
2504 | args = tr.hookargs.copy() | |
2505 | node = hex(cl.node(rev)) |
|
2505 | node = hex(cl.node(rev)) | |
2506 | args.update(phases.preparehookargs(node, old, new)) |
|
2506 | args.update(phases.preparehookargs(node, old, new)) | |
2507 | repo.hook( |
|
2507 | repo.hook( | |
2508 | b'txnclose-phase', |
|
2508 | b'txnclose-phase', | |
2509 | throw=False, |
|
2509 | throw=False, | |
2510 | **pycompat.strkwargs(args) |
|
2510 | **pycompat.strkwargs(args) | |
2511 | ) |
|
2511 | ) | |
2512 |
|
2512 | |||
2513 | repo.hook( |
|
2513 | repo.hook( | |
2514 | b'txnclose', throw=False, **pycompat.strkwargs(hookargs) |
|
2514 | b'txnclose', throw=False, **pycompat.strkwargs(hookargs) | |
2515 | ) |
|
2515 | ) | |
2516 |
|
2516 | |||
2517 | repo = reporef() |
|
2517 | repo = reporef() | |
2518 | assert repo is not None # help pytype |
|
2518 | assert repo is not None # help pytype | |
2519 | repo._afterlock(hookfunc) |
|
2519 | repo._afterlock(hookfunc) | |
2520 |
|
2520 | |||
2521 | tr.addfinalize(b'txnclose-hook', txnclosehook) |
|
2521 | tr.addfinalize(b'txnclose-hook', txnclosehook) | |
2522 | # Include a leading "-" to make it happen before the transaction summary |
|
2522 | # Include a leading "-" to make it happen before the transaction summary | |
2523 | # reports registered via scmutil.registersummarycallback() whose names |
|
2523 | # reports registered via scmutil.registersummarycallback() whose names | |
2524 | # are 00-txnreport etc. That way, the caches will be warm when the |
|
2524 | # are 00-txnreport etc. That way, the caches will be warm when the | |
2525 | # callbacks run. |
|
2525 | # callbacks run. | |
2526 | tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr)) |
|
2526 | tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr)) | |
2527 |
|
2527 | |||
2528 | def txnaborthook(tr2): |
|
2528 | def txnaborthook(tr2): | |
2529 | """To be run if transaction is aborted""" |
|
2529 | """To be run if transaction is aborted""" | |
2530 | repo = reporef() |
|
2530 | repo = reporef() | |
2531 | assert repo is not None # help pytype |
|
2531 | assert repo is not None # help pytype | |
2532 | repo.hook( |
|
2532 | repo.hook( | |
2533 | b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs) |
|
2533 | b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs) | |
2534 | ) |
|
2534 | ) | |
2535 |
|
2535 | |||
2536 | tr.addabort(b'txnabort-hook', txnaborthook) |
|
2536 | tr.addabort(b'txnabort-hook', txnaborthook) | |
2537 | # avoid eager cache invalidation. in-memory data should be identical |
|
2537 | # avoid eager cache invalidation. in-memory data should be identical | |
2538 | # to stored data if transaction has no error. |
|
2538 | # to stored data if transaction has no error. | |
2539 | tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats) |
|
2539 | tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats) | |
2540 | self._transref = weakref.ref(tr) |
|
2540 | self._transref = weakref.ref(tr) | |
2541 | scmutil.registersummarycallback(self, tr, desc) |
|
2541 | scmutil.registersummarycallback(self, tr, desc) | |
2542 | return tr |
|
2542 | return tr | |
2543 |
|
2543 | |||
2544 | def _journalfiles(self): |
|
2544 | def _journalfiles(self): | |
2545 | return ( |
|
2545 | return ( | |
2546 | (self.svfs, b'journal'), |
|
2546 | (self.svfs, b'journal'), | |
2547 | (self.svfs, b'journal.narrowspec'), |
|
2547 | (self.svfs, b'journal.narrowspec'), | |
2548 | (self.vfs, b'journal.narrowspec.dirstate'), |
|
2548 | (self.vfs, b'journal.narrowspec.dirstate'), | |
2549 | (self.vfs, b'journal.dirstate'), |
|
2549 | (self.vfs, b'journal.dirstate'), | |
2550 | (self.vfs, b'journal.branch'), |
|
2550 | (self.vfs, b'journal.branch'), | |
2551 | (self.vfs, b'journal.desc'), |
|
2551 | (self.vfs, b'journal.desc'), | |
2552 | (bookmarks.bookmarksvfs(self), b'journal.bookmarks'), |
|
2552 | (bookmarks.bookmarksvfs(self), b'journal.bookmarks'), | |
2553 | (self.svfs, b'journal.phaseroots'), |
|
2553 | (self.svfs, b'journal.phaseroots'), | |
2554 | ) |
|
2554 | ) | |
2555 |
|
2555 | |||
2556 | def undofiles(self): |
|
2556 | def undofiles(self): | |
2557 | return [(vfs, undoname(x)) for vfs, x in self._journalfiles()] |
|
2557 | return [(vfs, undoname(x)) for vfs, x in self._journalfiles()] | |
2558 |
|
2558 | |||
2559 | @unfilteredmethod |
|
2559 | @unfilteredmethod | |
2560 | def _writejournal(self, desc): |
|
2560 | def _writejournal(self, desc): | |
2561 | self.dirstate.savebackup(None, b'journal.dirstate') |
|
2561 | self.dirstate.savebackup(None, b'journal.dirstate') | |
2562 | narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate') |
|
2562 | narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate') | |
2563 | narrowspec.savebackup(self, b'journal.narrowspec') |
|
2563 | narrowspec.savebackup(self, b'journal.narrowspec') | |
2564 | self.vfs.write( |
|
2564 | self.vfs.write( | |
2565 | b"journal.branch", encoding.fromlocal(self.dirstate.branch()) |
|
2565 | b"journal.branch", encoding.fromlocal(self.dirstate.branch()) | |
2566 | ) |
|
2566 | ) | |
2567 | self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc)) |
|
2567 | self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc)) | |
2568 | bookmarksvfs = bookmarks.bookmarksvfs(self) |
|
2568 | bookmarksvfs = bookmarks.bookmarksvfs(self) | |
2569 | bookmarksvfs.write( |
|
2569 | bookmarksvfs.write( | |
2570 | b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks") |
|
2570 | b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks") | |
2571 | ) |
|
2571 | ) | |
2572 | self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots")) |
|
2572 | self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots")) | |
2573 |
|
2573 | |||
2574 | def recover(self): |
|
2574 | def recover(self): | |
2575 | with self.lock(): |
|
2575 | with self.lock(): | |
2576 | if self.svfs.exists(b"journal"): |
|
2576 | if self.svfs.exists(b"journal"): | |
2577 | self.ui.status(_(b"rolling back interrupted transaction\n")) |
|
2577 | self.ui.status(_(b"rolling back interrupted transaction\n")) | |
2578 | vfsmap = { |
|
2578 | vfsmap = { | |
2579 | b'': self.svfs, |
|
2579 | b'': self.svfs, | |
2580 | b'plain': self.vfs, |
|
2580 | b'plain': self.vfs, | |
2581 | } |
|
2581 | } | |
2582 | transaction.rollback( |
|
2582 | transaction.rollback( | |
2583 | self.svfs, |
|
2583 | self.svfs, | |
2584 | vfsmap, |
|
2584 | vfsmap, | |
2585 | b"journal", |
|
2585 | b"journal", | |
2586 | self.ui.warn, |
|
2586 | self.ui.warn, | |
2587 | checkambigfiles=_cachedfiles, |
|
2587 | checkambigfiles=_cachedfiles, | |
2588 | ) |
|
2588 | ) | |
2589 | self.invalidate() |
|
2589 | self.invalidate() | |
2590 | return True |
|
2590 | return True | |
2591 | else: |
|
2591 | else: | |
2592 | self.ui.warn(_(b"no interrupted transaction available\n")) |
|
2592 | self.ui.warn(_(b"no interrupted transaction available\n")) | |
2593 | return False |
|
2593 | return False | |
2594 |
|
2594 | |||
2595 | def rollback(self, dryrun=False, force=False): |
|
2595 | def rollback(self, dryrun=False, force=False): | |
2596 | wlock = lock = dsguard = None |
|
2596 | wlock = lock = dsguard = None | |
2597 | try: |
|
2597 | try: | |
2598 | wlock = self.wlock() |
|
2598 | wlock = self.wlock() | |
2599 | lock = self.lock() |
|
2599 | lock = self.lock() | |
2600 | if self.svfs.exists(b"undo"): |
|
2600 | if self.svfs.exists(b"undo"): | |
2601 | dsguard = dirstateguard.dirstateguard(self, b'rollback') |
|
2601 | dsguard = dirstateguard.dirstateguard(self, b'rollback') | |
2602 |
|
2602 | |||
2603 | return self._rollback(dryrun, force, dsguard) |
|
2603 | return self._rollback(dryrun, force, dsguard) | |
2604 | else: |
|
2604 | else: | |
2605 | self.ui.warn(_(b"no rollback information available\n")) |
|
2605 | self.ui.warn(_(b"no rollback information available\n")) | |
2606 | return 1 |
|
2606 | return 1 | |
2607 | finally: |
|
2607 | finally: | |
2608 | release(dsguard, lock, wlock) |
|
2608 | release(dsguard, lock, wlock) | |
2609 |
|
2609 | |||
2610 | @unfilteredmethod # Until we get smarter cache management |
|
2610 | @unfilteredmethod # Until we get smarter cache management | |
2611 | def _rollback(self, dryrun, force, dsguard): |
|
2611 | def _rollback(self, dryrun, force, dsguard): | |
2612 | ui = self.ui |
|
2612 | ui = self.ui | |
2613 | try: |
|
2613 | try: | |
2614 | args = self.vfs.read(b'undo.desc').splitlines() |
|
2614 | args = self.vfs.read(b'undo.desc').splitlines() | |
2615 | (oldlen, desc, detail) = (int(args[0]), args[1], None) |
|
2615 | (oldlen, desc, detail) = (int(args[0]), args[1], None) | |
2616 | if len(args) >= 3: |
|
2616 | if len(args) >= 3: | |
2617 | detail = args[2] |
|
2617 | detail = args[2] | |
2618 | oldtip = oldlen - 1 |
|
2618 | oldtip = oldlen - 1 | |
2619 |
|
2619 | |||
2620 | if detail and ui.verbose: |
|
2620 | if detail and ui.verbose: | |
2621 | msg = _( |
|
2621 | msg = _( | |
2622 | b'repository tip rolled back to revision %d' |
|
2622 | b'repository tip rolled back to revision %d' | |
2623 | b' (undo %s: %s)\n' |
|
2623 | b' (undo %s: %s)\n' | |
2624 | ) % (oldtip, desc, detail) |
|
2624 | ) % (oldtip, desc, detail) | |
2625 | else: |
|
2625 | else: | |
2626 | msg = _( |
|
2626 | msg = _( | |
2627 | b'repository tip rolled back to revision %d (undo %s)\n' |
|
2627 | b'repository tip rolled back to revision %d (undo %s)\n' | |
2628 | ) % (oldtip, desc) |
|
2628 | ) % (oldtip, desc) | |
2629 | except IOError: |
|
2629 | except IOError: | |
2630 | msg = _(b'rolling back unknown transaction\n') |
|
2630 | msg = _(b'rolling back unknown transaction\n') | |
2631 | desc = None |
|
2631 | desc = None | |
2632 |
|
2632 | |||
2633 | if not force and self[b'.'] != self[b'tip'] and desc == b'commit': |
|
2633 | if not force and self[b'.'] != self[b'tip'] and desc == b'commit': | |
2634 | raise error.Abort( |
|
2634 | raise error.Abort( | |
2635 | _( |
|
2635 | _( | |
2636 | b'rollback of last commit while not checked out ' |
|
2636 | b'rollback of last commit while not checked out ' | |
2637 | b'may lose data' |
|
2637 | b'may lose data' | |
2638 | ), |
|
2638 | ), | |
2639 | hint=_(b'use -f to force'), |
|
2639 | hint=_(b'use -f to force'), | |
2640 | ) |
|
2640 | ) | |
2641 |
|
2641 | |||
2642 | ui.status(msg) |
|
2642 | ui.status(msg) | |
2643 | if dryrun: |
|
2643 | if dryrun: | |
2644 | return 0 |
|
2644 | return 0 | |
2645 |
|
2645 | |||
2646 | parents = self.dirstate.parents() |
|
2646 | parents = self.dirstate.parents() | |
2647 | self.destroying() |
|
2647 | self.destroying() | |
2648 | vfsmap = {b'plain': self.vfs, b'': self.svfs} |
|
2648 | vfsmap = {b'plain': self.vfs, b'': self.svfs} | |
2649 | transaction.rollback( |
|
2649 | transaction.rollback( | |
2650 | self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles |
|
2650 | self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles | |
2651 | ) |
|
2651 | ) | |
2652 | bookmarksvfs = bookmarks.bookmarksvfs(self) |
|
2652 | bookmarksvfs = bookmarks.bookmarksvfs(self) | |
2653 | if bookmarksvfs.exists(b'undo.bookmarks'): |
|
2653 | if bookmarksvfs.exists(b'undo.bookmarks'): | |
2654 | bookmarksvfs.rename( |
|
2654 | bookmarksvfs.rename( | |
2655 | b'undo.bookmarks', b'bookmarks', checkambig=True |
|
2655 | b'undo.bookmarks', b'bookmarks', checkambig=True | |
2656 | ) |
|
2656 | ) | |
2657 | if self.svfs.exists(b'undo.phaseroots'): |
|
2657 | if self.svfs.exists(b'undo.phaseroots'): | |
2658 | self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True) |
|
2658 | self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True) | |
2659 | self.invalidate() |
|
2659 | self.invalidate() | |
2660 |
|
2660 | |||
2661 | has_node = self.changelog.index.has_node |
|
2661 | has_node = self.changelog.index.has_node | |
2662 | parentgone = any(not has_node(p) for p in parents) |
|
2662 | parentgone = any(not has_node(p) for p in parents) | |
2663 | if parentgone: |
|
2663 | if parentgone: | |
2664 | # prevent dirstateguard from overwriting already restored one |
|
2664 | # prevent dirstateguard from overwriting already restored one | |
2665 | dsguard.close() |
|
2665 | dsguard.close() | |
2666 |
|
2666 | |||
2667 | narrowspec.restorebackup(self, b'undo.narrowspec') |
|
2667 | narrowspec.restorebackup(self, b'undo.narrowspec') | |
2668 | narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate') |
|
2668 | narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate') | |
2669 | self.dirstate.restorebackup(None, b'undo.dirstate') |
|
2669 | self.dirstate.restorebackup(None, b'undo.dirstate') | |
2670 | try: |
|
2670 | try: | |
2671 | branch = self.vfs.read(b'undo.branch') |
|
2671 | branch = self.vfs.read(b'undo.branch') | |
2672 | self.dirstate.setbranch(encoding.tolocal(branch)) |
|
2672 | self.dirstate.setbranch(encoding.tolocal(branch)) | |
2673 | except IOError: |
|
2673 | except IOError: | |
2674 | ui.warn( |
|
2674 | ui.warn( | |
2675 | _( |
|
2675 | _( | |
2676 | b'named branch could not be reset: ' |
|
2676 | b'named branch could not be reset: ' | |
2677 | b'current branch is still \'%s\'\n' |
|
2677 | b'current branch is still \'%s\'\n' | |
2678 | ) |
|
2678 | ) | |
2679 | % self.dirstate.branch() |
|
2679 | % self.dirstate.branch() | |
2680 | ) |
|
2680 | ) | |
2681 |
|
2681 | |||
2682 | parents = tuple([p.rev() for p in self[None].parents()]) |
|
2682 | parents = tuple([p.rev() for p in self[None].parents()]) | |
2683 | if len(parents) > 1: |
|
2683 | if len(parents) > 1: | |
2684 | ui.status( |
|
2684 | ui.status( | |
2685 | _( |
|
2685 | _( | |
2686 | b'working directory now based on ' |
|
2686 | b'working directory now based on ' | |
2687 | b'revisions %d and %d\n' |
|
2687 | b'revisions %d and %d\n' | |
2688 | ) |
|
2688 | ) | |
2689 | % parents |
|
2689 | % parents | |
2690 | ) |
|
2690 | ) | |
2691 | else: |
|
2691 | else: | |
2692 | ui.status( |
|
2692 | ui.status( | |
2693 | _(b'working directory now based on revision %d\n') % parents |
|
2693 | _(b'working directory now based on revision %d\n') % parents | |
2694 | ) |
|
2694 | ) | |
2695 | mergestatemod.mergestate.clean(self) |
|
2695 | mergestatemod.mergestate.clean(self) | |
2696 |
|
2696 | |||
2697 | # TODO: if we know which new heads may result from this rollback, pass |
|
2697 | # TODO: if we know which new heads may result from this rollback, pass | |
2698 | # them to destroy(), which will prevent the branchhead cache from being |
|
2698 | # them to destroy(), which will prevent the branchhead cache from being | |
2699 | # invalidated. |
|
2699 | # invalidated. | |
2700 | self.destroyed() |
|
2700 | self.destroyed() | |
2701 | return 0 |
|
2701 | return 0 | |
2702 |
|
2702 | |||
2703 | def _buildcacheupdater(self, newtransaction): |
|
2703 | def _buildcacheupdater(self, newtransaction): | |
2704 | """called during transaction to build the callback updating cache |
|
2704 | """called during transaction to build the callback updating cache | |
2705 |
|
2705 | |||
2706 | Lives on the repository to help extension who might want to augment |
|
2706 | Lives on the repository to help extension who might want to augment | |
2707 | this logic. For this purpose, the created transaction is passed to the |
|
2707 | this logic. For this purpose, the created transaction is passed to the | |
2708 | method. |
|
2708 | method. | |
2709 | """ |
|
2709 | """ | |
2710 | # we must avoid cyclic reference between repo and transaction. |
|
2710 | # we must avoid cyclic reference between repo and transaction. | |
2711 | reporef = weakref.ref(self) |
|
2711 | reporef = weakref.ref(self) | |
2712 |
|
2712 | |||
2713 | def updater(tr): |
|
2713 | def updater(tr): | |
2714 | repo = reporef() |
|
2714 | repo = reporef() | |
2715 | assert repo is not None # help pytype |
|
2715 | assert repo is not None # help pytype | |
2716 | repo.updatecaches(tr) |
|
2716 | repo.updatecaches(tr) | |
2717 |
|
2717 | |||
2718 | return updater |
|
2718 | return updater | |
2719 |
|
2719 | |||
2720 | @unfilteredmethod |
|
2720 | @unfilteredmethod | |
2721 | def updatecaches(self, tr=None, full=False): |
|
2721 | def updatecaches(self, tr=None, full=False): | |
2722 | """warm appropriate caches |
|
2722 | """warm appropriate caches | |
2723 |
|
2723 | |||
2724 | If this function is called after a transaction closed. The transaction |
|
2724 | If this function is called after a transaction closed. The transaction | |
2725 | will be available in the 'tr' argument. This can be used to selectively |
|
2725 | will be available in the 'tr' argument. This can be used to selectively | |
2726 | update caches relevant to the changes in that transaction. |
|
2726 | update caches relevant to the changes in that transaction. | |
2727 |
|
2727 | |||
2728 | If 'full' is set, make sure all caches the function knows about have |
|
2728 | If 'full' is set, make sure all caches the function knows about have | |
2729 | up-to-date data. Even the ones usually loaded more lazily. |
|
2729 | up-to-date data. Even the ones usually loaded more lazily. | |
|
2730 | ||||
|
2731 | The `full` argument can take a special "post-clone" value. In this case | |||
|
2732 | the cache warming is made after a clone and of the slower cache might | |||
|
2733 | be skipped, namely the `.fnodetags` one. This argument is 5.8 specific | |||
|
2734 | as we plan for a cleaner way to deal with this for 5.9. | |||
2730 | """ |
|
2735 | """ | |
2731 | if tr is not None and tr.hookargs.get(b'source') == b'strip': |
|
2736 | if tr is not None and tr.hookargs.get(b'source') == b'strip': | |
2732 | # During strip, many caches are invalid but |
|
2737 | # During strip, many caches are invalid but | |
2733 | # later call to `destroyed` will refresh them. |
|
2738 | # later call to `destroyed` will refresh them. | |
2734 | return |
|
2739 | return | |
2735 |
|
2740 | |||
2736 | if tr is None or tr.changes[b'origrepolen'] < len(self): |
|
2741 | if tr is None or tr.changes[b'origrepolen'] < len(self): | |
2737 | # accessing the 'served' branchmap should refresh all the others, |
|
2742 | # accessing the 'served' branchmap should refresh all the others, | |
2738 | self.ui.debug(b'updating the branch cache\n') |
|
2743 | self.ui.debug(b'updating the branch cache\n') | |
2739 | self.filtered(b'served').branchmap() |
|
2744 | self.filtered(b'served').branchmap() | |
2740 | self.filtered(b'served.hidden').branchmap() |
|
2745 | self.filtered(b'served.hidden').branchmap() | |
2741 |
|
2746 | |||
2742 | if full: |
|
2747 | if full: | |
2743 | unfi = self.unfiltered() |
|
2748 | unfi = self.unfiltered() | |
2744 |
|
2749 | |||
2745 | self.changelog.update_caches(transaction=tr) |
|
2750 | self.changelog.update_caches(transaction=tr) | |
2746 | self.manifestlog.update_caches(transaction=tr) |
|
2751 | self.manifestlog.update_caches(transaction=tr) | |
2747 |
|
2752 | |||
2748 | rbc = unfi.revbranchcache() |
|
2753 | rbc = unfi.revbranchcache() | |
2749 | for r in unfi.changelog: |
|
2754 | for r in unfi.changelog: | |
2750 | rbc.branchinfo(r) |
|
2755 | rbc.branchinfo(r) | |
2751 | rbc.write() |
|
2756 | rbc.write() | |
2752 |
|
2757 | |||
2753 | # ensure the working copy parents are in the manifestfulltextcache |
|
2758 | # ensure the working copy parents are in the manifestfulltextcache | |
2754 | for ctx in self[b'.'].parents(): |
|
2759 | for ctx in self[b'.'].parents(): | |
2755 | ctx.manifest() # accessing the manifest is enough |
|
2760 | ctx.manifest() # accessing the manifest is enough | |
2756 |
|
2761 | |||
|
2762 | if not full == b"post-clone": | |||
2757 | # accessing fnode cache warms the cache |
|
2763 | # accessing fnode cache warms the cache | |
2758 | tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs()) |
|
2764 | tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs()) | |
2759 | # accessing tags warm the cache |
|
2765 | # accessing tags warm the cache | |
2760 | self.tags() |
|
2766 | self.tags() | |
2761 | self.filtered(b'served').tags() |
|
2767 | self.filtered(b'served').tags() | |
2762 |
|
2768 | |||
2763 | # The `full` arg is documented as updating even the lazily-loaded |
|
2769 | # The `full` arg is documented as updating even the lazily-loaded | |
2764 | # caches immediately, so we're forcing a write to cause these caches |
|
2770 | # caches immediately, so we're forcing a write to cause these caches | |
2765 | # to be warmed up even if they haven't explicitly been requested |
|
2771 | # to be warmed up even if they haven't explicitly been requested | |
2766 | # yet (if they've never been used by hg, they won't ever have been |
|
2772 | # yet (if they've never been used by hg, they won't ever have been | |
2767 | # written, even if they're a subset of another kind of cache that |
|
2773 | # written, even if they're a subset of another kind of cache that | |
2768 | # *has* been used). |
|
2774 | # *has* been used). | |
2769 | for filt in repoview.filtertable.keys(): |
|
2775 | for filt in repoview.filtertable.keys(): | |
2770 | filtered = self.filtered(filt) |
|
2776 | filtered = self.filtered(filt) | |
2771 | filtered.branchmap().write(filtered) |
|
2777 | filtered.branchmap().write(filtered) | |
2772 |
|
2778 | |||
2773 | def invalidatecaches(self): |
|
2779 | def invalidatecaches(self): | |
2774 |
|
2780 | |||
2775 | if '_tagscache' in vars(self): |
|
2781 | if '_tagscache' in vars(self): | |
2776 | # can't use delattr on proxy |
|
2782 | # can't use delattr on proxy | |
2777 | del self.__dict__['_tagscache'] |
|
2783 | del self.__dict__['_tagscache'] | |
2778 |
|
2784 | |||
2779 | self._branchcaches.clear() |
|
2785 | self._branchcaches.clear() | |
2780 | self.invalidatevolatilesets() |
|
2786 | self.invalidatevolatilesets() | |
2781 | self._sparsesignaturecache.clear() |
|
2787 | self._sparsesignaturecache.clear() | |
2782 |
|
2788 | |||
2783 | def invalidatevolatilesets(self): |
|
2789 | def invalidatevolatilesets(self): | |
2784 | self.filteredrevcache.clear() |
|
2790 | self.filteredrevcache.clear() | |
2785 | obsolete.clearobscaches(self) |
|
2791 | obsolete.clearobscaches(self) | |
2786 | self._quick_access_changeid_invalidate() |
|
2792 | self._quick_access_changeid_invalidate() | |
2787 |
|
2793 | |||
2788 | def invalidatedirstate(self): |
|
2794 | def invalidatedirstate(self): | |
2789 | """Invalidates the dirstate, causing the next call to dirstate |
|
2795 | """Invalidates the dirstate, causing the next call to dirstate | |
2790 | to check if it was modified since the last time it was read, |
|
2796 | to check if it was modified since the last time it was read, | |
2791 | rereading it if it has. |
|
2797 | rereading it if it has. | |
2792 |
|
2798 | |||
2793 | This is different to dirstate.invalidate() that it doesn't always |
|
2799 | This is different to dirstate.invalidate() that it doesn't always | |
2794 | rereads the dirstate. Use dirstate.invalidate() if you want to |
|
2800 | rereads the dirstate. Use dirstate.invalidate() if you want to | |
2795 | explicitly read the dirstate again (i.e. restoring it to a previous |
|
2801 | explicitly read the dirstate again (i.e. restoring it to a previous | |
2796 | known good state).""" |
|
2802 | known good state).""" | |
2797 | if hasunfilteredcache(self, 'dirstate'): |
|
2803 | if hasunfilteredcache(self, 'dirstate'): | |
2798 | for k in self.dirstate._filecache: |
|
2804 | for k in self.dirstate._filecache: | |
2799 | try: |
|
2805 | try: | |
2800 | delattr(self.dirstate, k) |
|
2806 | delattr(self.dirstate, k) | |
2801 | except AttributeError: |
|
2807 | except AttributeError: | |
2802 | pass |
|
2808 | pass | |
2803 | delattr(self.unfiltered(), 'dirstate') |
|
2809 | delattr(self.unfiltered(), 'dirstate') | |
2804 |
|
2810 | |||
2805 | def invalidate(self, clearfilecache=False): |
|
2811 | def invalidate(self, clearfilecache=False): | |
2806 | """Invalidates both store and non-store parts other than dirstate |
|
2812 | """Invalidates both store and non-store parts other than dirstate | |
2807 |
|
2813 | |||
2808 | If a transaction is running, invalidation of store is omitted, |
|
2814 | If a transaction is running, invalidation of store is omitted, | |
2809 | because discarding in-memory changes might cause inconsistency |
|
2815 | because discarding in-memory changes might cause inconsistency | |
2810 | (e.g. incomplete fncache causes unintentional failure, but |
|
2816 | (e.g. incomplete fncache causes unintentional failure, but | |
2811 | redundant one doesn't). |
|
2817 | redundant one doesn't). | |
2812 | """ |
|
2818 | """ | |
2813 | unfiltered = self.unfiltered() # all file caches are stored unfiltered |
|
2819 | unfiltered = self.unfiltered() # all file caches are stored unfiltered | |
2814 | for k in list(self._filecache.keys()): |
|
2820 | for k in list(self._filecache.keys()): | |
2815 | # dirstate is invalidated separately in invalidatedirstate() |
|
2821 | # dirstate is invalidated separately in invalidatedirstate() | |
2816 | if k == b'dirstate': |
|
2822 | if k == b'dirstate': | |
2817 | continue |
|
2823 | continue | |
2818 | if ( |
|
2824 | if ( | |
2819 | k == b'changelog' |
|
2825 | k == b'changelog' | |
2820 | and self.currenttransaction() |
|
2826 | and self.currenttransaction() | |
2821 | and self.changelog._delayed |
|
2827 | and self.changelog._delayed | |
2822 | ): |
|
2828 | ): | |
2823 | # The changelog object may store unwritten revisions. We don't |
|
2829 | # The changelog object may store unwritten revisions. We don't | |
2824 | # want to lose them. |
|
2830 | # want to lose them. | |
2825 | # TODO: Solve the problem instead of working around it. |
|
2831 | # TODO: Solve the problem instead of working around it. | |
2826 | continue |
|
2832 | continue | |
2827 |
|
2833 | |||
2828 | if clearfilecache: |
|
2834 | if clearfilecache: | |
2829 | del self._filecache[k] |
|
2835 | del self._filecache[k] | |
2830 | try: |
|
2836 | try: | |
2831 | delattr(unfiltered, k) |
|
2837 | delattr(unfiltered, k) | |
2832 | except AttributeError: |
|
2838 | except AttributeError: | |
2833 | pass |
|
2839 | pass | |
2834 | self.invalidatecaches() |
|
2840 | self.invalidatecaches() | |
2835 | if not self.currenttransaction(): |
|
2841 | if not self.currenttransaction(): | |
2836 | # TODO: Changing contents of store outside transaction |
|
2842 | # TODO: Changing contents of store outside transaction | |
2837 | # causes inconsistency. We should make in-memory store |
|
2843 | # causes inconsistency. We should make in-memory store | |
2838 | # changes detectable, and abort if changed. |
|
2844 | # changes detectable, and abort if changed. | |
2839 | self.store.invalidatecaches() |
|
2845 | self.store.invalidatecaches() | |
2840 |
|
2846 | |||
2841 | def invalidateall(self): |
|
2847 | def invalidateall(self): | |
2842 | """Fully invalidates both store and non-store parts, causing the |
|
2848 | """Fully invalidates both store and non-store parts, causing the | |
2843 | subsequent operation to reread any outside changes.""" |
|
2849 | subsequent operation to reread any outside changes.""" | |
2844 | # extension should hook this to invalidate its caches |
|
2850 | # extension should hook this to invalidate its caches | |
2845 | self.invalidate() |
|
2851 | self.invalidate() | |
2846 | self.invalidatedirstate() |
|
2852 | self.invalidatedirstate() | |
2847 |
|
2853 | |||
2848 | @unfilteredmethod |
|
2854 | @unfilteredmethod | |
2849 | def _refreshfilecachestats(self, tr): |
|
2855 | def _refreshfilecachestats(self, tr): | |
2850 | """Reload stats of cached files so that they are flagged as valid""" |
|
2856 | """Reload stats of cached files so that they are flagged as valid""" | |
2851 | for k, ce in self._filecache.items(): |
|
2857 | for k, ce in self._filecache.items(): | |
2852 | k = pycompat.sysstr(k) |
|
2858 | k = pycompat.sysstr(k) | |
2853 | if k == 'dirstate' or k not in self.__dict__: |
|
2859 | if k == 'dirstate' or k not in self.__dict__: | |
2854 | continue |
|
2860 | continue | |
2855 | ce.refresh() |
|
2861 | ce.refresh() | |
2856 |
|
2862 | |||
2857 | def _lock( |
|
2863 | def _lock( | |
2858 | self, |
|
2864 | self, | |
2859 | vfs, |
|
2865 | vfs, | |
2860 | lockname, |
|
2866 | lockname, | |
2861 | wait, |
|
2867 | wait, | |
2862 | releasefn, |
|
2868 | releasefn, | |
2863 | acquirefn, |
|
2869 | acquirefn, | |
2864 | desc, |
|
2870 | desc, | |
2865 | ): |
|
2871 | ): | |
2866 | timeout = 0 |
|
2872 | timeout = 0 | |
2867 | warntimeout = 0 |
|
2873 | warntimeout = 0 | |
2868 | if wait: |
|
2874 | if wait: | |
2869 | timeout = self.ui.configint(b"ui", b"timeout") |
|
2875 | timeout = self.ui.configint(b"ui", b"timeout") | |
2870 | warntimeout = self.ui.configint(b"ui", b"timeout.warn") |
|
2876 | warntimeout = self.ui.configint(b"ui", b"timeout.warn") | |
2871 | # internal config: ui.signal-safe-lock |
|
2877 | # internal config: ui.signal-safe-lock | |
2872 | signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock') |
|
2878 | signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock') | |
2873 |
|
2879 | |||
2874 | l = lockmod.trylock( |
|
2880 | l = lockmod.trylock( | |
2875 | self.ui, |
|
2881 | self.ui, | |
2876 | vfs, |
|
2882 | vfs, | |
2877 | lockname, |
|
2883 | lockname, | |
2878 | timeout, |
|
2884 | timeout, | |
2879 | warntimeout, |
|
2885 | warntimeout, | |
2880 | releasefn=releasefn, |
|
2886 | releasefn=releasefn, | |
2881 | acquirefn=acquirefn, |
|
2887 | acquirefn=acquirefn, | |
2882 | desc=desc, |
|
2888 | desc=desc, | |
2883 | signalsafe=signalsafe, |
|
2889 | signalsafe=signalsafe, | |
2884 | ) |
|
2890 | ) | |
2885 | return l |
|
2891 | return l | |
2886 |
|
2892 | |||
2887 | def _afterlock(self, callback): |
|
2893 | def _afterlock(self, callback): | |
2888 | """add a callback to be run when the repository is fully unlocked |
|
2894 | """add a callback to be run when the repository is fully unlocked | |
2889 |
|
2895 | |||
2890 | The callback will be executed when the outermost lock is released |
|
2896 | The callback will be executed when the outermost lock is released | |
2891 | (with wlock being higher level than 'lock').""" |
|
2897 | (with wlock being higher level than 'lock').""" | |
2892 | for ref in (self._wlockref, self._lockref): |
|
2898 | for ref in (self._wlockref, self._lockref): | |
2893 | l = ref and ref() |
|
2899 | l = ref and ref() | |
2894 | if l and l.held: |
|
2900 | if l and l.held: | |
2895 | l.postrelease.append(callback) |
|
2901 | l.postrelease.append(callback) | |
2896 | break |
|
2902 | break | |
2897 | else: # no lock have been found. |
|
2903 | else: # no lock have been found. | |
2898 | callback(True) |
|
2904 | callback(True) | |
2899 |
|
2905 | |||
2900 | def lock(self, wait=True): |
|
2906 | def lock(self, wait=True): | |
2901 | """Lock the repository store (.hg/store) and return a weak reference |
|
2907 | """Lock the repository store (.hg/store) and return a weak reference | |
2902 | to the lock. Use this before modifying the store (e.g. committing or |
|
2908 | to the lock. Use this before modifying the store (e.g. committing or | |
2903 | stripping). If you are opening a transaction, get a lock as well.) |
|
2909 | stripping). If you are opening a transaction, get a lock as well.) | |
2904 |
|
2910 | |||
2905 | If both 'lock' and 'wlock' must be acquired, ensure you always acquires |
|
2911 | If both 'lock' and 'wlock' must be acquired, ensure you always acquires | |
2906 | 'wlock' first to avoid a dead-lock hazard.""" |
|
2912 | 'wlock' first to avoid a dead-lock hazard.""" | |
2907 | l = self._currentlock(self._lockref) |
|
2913 | l = self._currentlock(self._lockref) | |
2908 | if l is not None: |
|
2914 | if l is not None: | |
2909 | l.lock() |
|
2915 | l.lock() | |
2910 | return l |
|
2916 | return l | |
2911 |
|
2917 | |||
2912 | l = self._lock( |
|
2918 | l = self._lock( | |
2913 | vfs=self.svfs, |
|
2919 | vfs=self.svfs, | |
2914 | lockname=b"lock", |
|
2920 | lockname=b"lock", | |
2915 | wait=wait, |
|
2921 | wait=wait, | |
2916 | releasefn=None, |
|
2922 | releasefn=None, | |
2917 | acquirefn=self.invalidate, |
|
2923 | acquirefn=self.invalidate, | |
2918 | desc=_(b'repository %s') % self.origroot, |
|
2924 | desc=_(b'repository %s') % self.origroot, | |
2919 | ) |
|
2925 | ) | |
2920 | self._lockref = weakref.ref(l) |
|
2926 | self._lockref = weakref.ref(l) | |
2921 | return l |
|
2927 | return l | |
2922 |
|
2928 | |||
2923 | def wlock(self, wait=True): |
|
2929 | def wlock(self, wait=True): | |
2924 | """Lock the non-store parts of the repository (everything under |
|
2930 | """Lock the non-store parts of the repository (everything under | |
2925 | .hg except .hg/store) and return a weak reference to the lock. |
|
2931 | .hg except .hg/store) and return a weak reference to the lock. | |
2926 |
|
2932 | |||
2927 | Use this before modifying files in .hg. |
|
2933 | Use this before modifying files in .hg. | |
2928 |
|
2934 | |||
2929 | If both 'lock' and 'wlock' must be acquired, ensure you always acquires |
|
2935 | If both 'lock' and 'wlock' must be acquired, ensure you always acquires | |
2930 | 'wlock' first to avoid a dead-lock hazard.""" |
|
2936 | 'wlock' first to avoid a dead-lock hazard.""" | |
2931 | l = self._wlockref() if self._wlockref else None |
|
2937 | l = self._wlockref() if self._wlockref else None | |
2932 | if l is not None and l.held: |
|
2938 | if l is not None and l.held: | |
2933 | l.lock() |
|
2939 | l.lock() | |
2934 | return l |
|
2940 | return l | |
2935 |
|
2941 | |||
2936 | # We do not need to check for non-waiting lock acquisition. Such |
|
2942 | # We do not need to check for non-waiting lock acquisition. Such | |
2937 | # acquisition would not cause dead-lock as they would just fail. |
|
2943 | # acquisition would not cause dead-lock as they would just fail. | |
2938 | if wait and ( |
|
2944 | if wait and ( | |
2939 | self.ui.configbool(b'devel', b'all-warnings') |
|
2945 | self.ui.configbool(b'devel', b'all-warnings') | |
2940 | or self.ui.configbool(b'devel', b'check-locks') |
|
2946 | or self.ui.configbool(b'devel', b'check-locks') | |
2941 | ): |
|
2947 | ): | |
2942 | if self._currentlock(self._lockref) is not None: |
|
2948 | if self._currentlock(self._lockref) is not None: | |
2943 | self.ui.develwarn(b'"wlock" acquired after "lock"') |
|
2949 | self.ui.develwarn(b'"wlock" acquired after "lock"') | |
2944 |
|
2950 | |||
2945 | def unlock(): |
|
2951 | def unlock(): | |
2946 | if self.dirstate.pendingparentchange(): |
|
2952 | if self.dirstate.pendingparentchange(): | |
2947 | self.dirstate.invalidate() |
|
2953 | self.dirstate.invalidate() | |
2948 | else: |
|
2954 | else: | |
2949 | self.dirstate.write(None) |
|
2955 | self.dirstate.write(None) | |
2950 |
|
2956 | |||
2951 | self._filecache[b'dirstate'].refresh() |
|
2957 | self._filecache[b'dirstate'].refresh() | |
2952 |
|
2958 | |||
2953 | l = self._lock( |
|
2959 | l = self._lock( | |
2954 | self.vfs, |
|
2960 | self.vfs, | |
2955 | b"wlock", |
|
2961 | b"wlock", | |
2956 | wait, |
|
2962 | wait, | |
2957 | unlock, |
|
2963 | unlock, | |
2958 | self.invalidatedirstate, |
|
2964 | self.invalidatedirstate, | |
2959 | _(b'working directory of %s') % self.origroot, |
|
2965 | _(b'working directory of %s') % self.origroot, | |
2960 | ) |
|
2966 | ) | |
2961 | self._wlockref = weakref.ref(l) |
|
2967 | self._wlockref = weakref.ref(l) | |
2962 | return l |
|
2968 | return l | |
2963 |
|
2969 | |||
2964 | def _currentlock(self, lockref): |
|
2970 | def _currentlock(self, lockref): | |
2965 | """Returns the lock if it's held, or None if it's not.""" |
|
2971 | """Returns the lock if it's held, or None if it's not.""" | |
2966 | if lockref is None: |
|
2972 | if lockref is None: | |
2967 | return None |
|
2973 | return None | |
2968 | l = lockref() |
|
2974 | l = lockref() | |
2969 | if l is None or not l.held: |
|
2975 | if l is None or not l.held: | |
2970 | return None |
|
2976 | return None | |
2971 | return l |
|
2977 | return l | |
2972 |
|
2978 | |||
2973 | def currentwlock(self): |
|
2979 | def currentwlock(self): | |
2974 | """Returns the wlock if it's held, or None if it's not.""" |
|
2980 | """Returns the wlock if it's held, or None if it's not.""" | |
2975 | return self._currentlock(self._wlockref) |
|
2981 | return self._currentlock(self._wlockref) | |
2976 |
|
2982 | |||
2977 | def checkcommitpatterns(self, wctx, match, status, fail): |
|
2983 | def checkcommitpatterns(self, wctx, match, status, fail): | |
2978 | """check for commit arguments that aren't committable""" |
|
2984 | """check for commit arguments that aren't committable""" | |
2979 | if match.isexact() or match.prefix(): |
|
2985 | if match.isexact() or match.prefix(): | |
2980 | matched = set(status.modified + status.added + status.removed) |
|
2986 | matched = set(status.modified + status.added + status.removed) | |
2981 |
|
2987 | |||
2982 | for f in match.files(): |
|
2988 | for f in match.files(): | |
2983 | f = self.dirstate.normalize(f) |
|
2989 | f = self.dirstate.normalize(f) | |
2984 | if f == b'.' or f in matched or f in wctx.substate: |
|
2990 | if f == b'.' or f in matched or f in wctx.substate: | |
2985 | continue |
|
2991 | continue | |
2986 | if f in status.deleted: |
|
2992 | if f in status.deleted: | |
2987 | fail(f, _(b'file not found!')) |
|
2993 | fail(f, _(b'file not found!')) | |
2988 | # Is it a directory that exists or used to exist? |
|
2994 | # Is it a directory that exists or used to exist? | |
2989 | if self.wvfs.isdir(f) or wctx.p1().hasdir(f): |
|
2995 | if self.wvfs.isdir(f) or wctx.p1().hasdir(f): | |
2990 | d = f + b'/' |
|
2996 | d = f + b'/' | |
2991 | for mf in matched: |
|
2997 | for mf in matched: | |
2992 | if mf.startswith(d): |
|
2998 | if mf.startswith(d): | |
2993 | break |
|
2999 | break | |
2994 | else: |
|
3000 | else: | |
2995 | fail(f, _(b"no match under directory!")) |
|
3001 | fail(f, _(b"no match under directory!")) | |
2996 | elif f not in self.dirstate: |
|
3002 | elif f not in self.dirstate: | |
2997 | fail(f, _(b"file not tracked!")) |
|
3003 | fail(f, _(b"file not tracked!")) | |
2998 |
|
3004 | |||
2999 | @unfilteredmethod |
|
3005 | @unfilteredmethod | |
3000 | def commit( |
|
3006 | def commit( | |
3001 | self, |
|
3007 | self, | |
3002 | text=b"", |
|
3008 | text=b"", | |
3003 | user=None, |
|
3009 | user=None, | |
3004 | date=None, |
|
3010 | date=None, | |
3005 | match=None, |
|
3011 | match=None, | |
3006 | force=False, |
|
3012 | force=False, | |
3007 | editor=None, |
|
3013 | editor=None, | |
3008 | extra=None, |
|
3014 | extra=None, | |
3009 | ): |
|
3015 | ): | |
3010 | """Add a new revision to current repository. |
|
3016 | """Add a new revision to current repository. | |
3011 |
|
3017 | |||
3012 | Revision information is gathered from the working directory, |
|
3018 | Revision information is gathered from the working directory, | |
3013 | match can be used to filter the committed files. If editor is |
|
3019 | match can be used to filter the committed files. If editor is | |
3014 | supplied, it is called to get a commit message. |
|
3020 | supplied, it is called to get a commit message. | |
3015 | """ |
|
3021 | """ | |
3016 | if extra is None: |
|
3022 | if extra is None: | |
3017 | extra = {} |
|
3023 | extra = {} | |
3018 |
|
3024 | |||
3019 | def fail(f, msg): |
|
3025 | def fail(f, msg): | |
3020 | raise error.InputError(b'%s: %s' % (f, msg)) |
|
3026 | raise error.InputError(b'%s: %s' % (f, msg)) | |
3021 |
|
3027 | |||
3022 | if not match: |
|
3028 | if not match: | |
3023 | match = matchmod.always() |
|
3029 | match = matchmod.always() | |
3024 |
|
3030 | |||
3025 | if not force: |
|
3031 | if not force: | |
3026 | match.bad = fail |
|
3032 | match.bad = fail | |
3027 |
|
3033 | |||
3028 | # lock() for recent changelog (see issue4368) |
|
3034 | # lock() for recent changelog (see issue4368) | |
3029 | with self.wlock(), self.lock(): |
|
3035 | with self.wlock(), self.lock(): | |
3030 | wctx = self[None] |
|
3036 | wctx = self[None] | |
3031 | merge = len(wctx.parents()) > 1 |
|
3037 | merge = len(wctx.parents()) > 1 | |
3032 |
|
3038 | |||
3033 | if not force and merge and not match.always(): |
|
3039 | if not force and merge and not match.always(): | |
3034 | raise error.Abort( |
|
3040 | raise error.Abort( | |
3035 | _( |
|
3041 | _( | |
3036 | b'cannot partially commit a merge ' |
|
3042 | b'cannot partially commit a merge ' | |
3037 | b'(do not specify files or patterns)' |
|
3043 | b'(do not specify files or patterns)' | |
3038 | ) |
|
3044 | ) | |
3039 | ) |
|
3045 | ) | |
3040 |
|
3046 | |||
3041 | status = self.status(match=match, clean=force) |
|
3047 | status = self.status(match=match, clean=force) | |
3042 | if force: |
|
3048 | if force: | |
3043 | status.modified.extend( |
|
3049 | status.modified.extend( | |
3044 | status.clean |
|
3050 | status.clean | |
3045 | ) # mq may commit clean files |
|
3051 | ) # mq may commit clean files | |
3046 |
|
3052 | |||
3047 | # check subrepos |
|
3053 | # check subrepos | |
3048 | subs, commitsubs, newstate = subrepoutil.precommit( |
|
3054 | subs, commitsubs, newstate = subrepoutil.precommit( | |
3049 | self.ui, wctx, status, match, force=force |
|
3055 | self.ui, wctx, status, match, force=force | |
3050 | ) |
|
3056 | ) | |
3051 |
|
3057 | |||
3052 | # make sure all explicit patterns are matched |
|
3058 | # make sure all explicit patterns are matched | |
3053 | if not force: |
|
3059 | if not force: | |
3054 | self.checkcommitpatterns(wctx, match, status, fail) |
|
3060 | self.checkcommitpatterns(wctx, match, status, fail) | |
3055 |
|
3061 | |||
3056 | cctx = context.workingcommitctx( |
|
3062 | cctx = context.workingcommitctx( | |
3057 | self, status, text, user, date, extra |
|
3063 | self, status, text, user, date, extra | |
3058 | ) |
|
3064 | ) | |
3059 |
|
3065 | |||
3060 | ms = mergestatemod.mergestate.read(self) |
|
3066 | ms = mergestatemod.mergestate.read(self) | |
3061 | mergeutil.checkunresolved(ms) |
|
3067 | mergeutil.checkunresolved(ms) | |
3062 |
|
3068 | |||
3063 | # internal config: ui.allowemptycommit |
|
3069 | # internal config: ui.allowemptycommit | |
3064 | if cctx.isempty() and not self.ui.configbool( |
|
3070 | if cctx.isempty() and not self.ui.configbool( | |
3065 | b'ui', b'allowemptycommit' |
|
3071 | b'ui', b'allowemptycommit' | |
3066 | ): |
|
3072 | ): | |
3067 | self.ui.debug(b'nothing to commit, clearing merge state\n') |
|
3073 | self.ui.debug(b'nothing to commit, clearing merge state\n') | |
3068 | ms.reset() |
|
3074 | ms.reset() | |
3069 | return None |
|
3075 | return None | |
3070 |
|
3076 | |||
3071 | if merge and cctx.deleted(): |
|
3077 | if merge and cctx.deleted(): | |
3072 | raise error.Abort(_(b"cannot commit merge with missing files")) |
|
3078 | raise error.Abort(_(b"cannot commit merge with missing files")) | |
3073 |
|
3079 | |||
3074 | if editor: |
|
3080 | if editor: | |
3075 | cctx._text = editor(self, cctx, subs) |
|
3081 | cctx._text = editor(self, cctx, subs) | |
3076 | edited = text != cctx._text |
|
3082 | edited = text != cctx._text | |
3077 |
|
3083 | |||
3078 | # Save commit message in case this transaction gets rolled back |
|
3084 | # Save commit message in case this transaction gets rolled back | |
3079 | # (e.g. by a pretxncommit hook). Leave the content alone on |
|
3085 | # (e.g. by a pretxncommit hook). Leave the content alone on | |
3080 | # the assumption that the user will use the same editor again. |
|
3086 | # the assumption that the user will use the same editor again. | |
3081 | msgfn = self.savecommitmessage(cctx._text) |
|
3087 | msgfn = self.savecommitmessage(cctx._text) | |
3082 |
|
3088 | |||
3083 | # commit subs and write new state |
|
3089 | # commit subs and write new state | |
3084 | if subs: |
|
3090 | if subs: | |
3085 | uipathfn = scmutil.getuipathfn(self) |
|
3091 | uipathfn = scmutil.getuipathfn(self) | |
3086 | for s in sorted(commitsubs): |
|
3092 | for s in sorted(commitsubs): | |
3087 | sub = wctx.sub(s) |
|
3093 | sub = wctx.sub(s) | |
3088 | self.ui.status( |
|
3094 | self.ui.status( | |
3089 | _(b'committing subrepository %s\n') |
|
3095 | _(b'committing subrepository %s\n') | |
3090 | % uipathfn(subrepoutil.subrelpath(sub)) |
|
3096 | % uipathfn(subrepoutil.subrelpath(sub)) | |
3091 | ) |
|
3097 | ) | |
3092 | sr = sub.commit(cctx._text, user, date) |
|
3098 | sr = sub.commit(cctx._text, user, date) | |
3093 | newstate[s] = (newstate[s][0], sr) |
|
3099 | newstate[s] = (newstate[s][0], sr) | |
3094 | subrepoutil.writestate(self, newstate) |
|
3100 | subrepoutil.writestate(self, newstate) | |
3095 |
|
3101 | |||
3096 | p1, p2 = self.dirstate.parents() |
|
3102 | p1, p2 = self.dirstate.parents() | |
3097 | hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'') |
|
3103 | hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'') | |
3098 | try: |
|
3104 | try: | |
3099 | self.hook( |
|
3105 | self.hook( | |
3100 | b"precommit", throw=True, parent1=hookp1, parent2=hookp2 |
|
3106 | b"precommit", throw=True, parent1=hookp1, parent2=hookp2 | |
3101 | ) |
|
3107 | ) | |
3102 | with self.transaction(b'commit'): |
|
3108 | with self.transaction(b'commit'): | |
3103 | ret = self.commitctx(cctx, True) |
|
3109 | ret = self.commitctx(cctx, True) | |
3104 | # update bookmarks, dirstate and mergestate |
|
3110 | # update bookmarks, dirstate and mergestate | |
3105 | bookmarks.update(self, [p1, p2], ret) |
|
3111 | bookmarks.update(self, [p1, p2], ret) | |
3106 | cctx.markcommitted(ret) |
|
3112 | cctx.markcommitted(ret) | |
3107 | ms.reset() |
|
3113 | ms.reset() | |
3108 | except: # re-raises |
|
3114 | except: # re-raises | |
3109 | if edited: |
|
3115 | if edited: | |
3110 | self.ui.write( |
|
3116 | self.ui.write( | |
3111 | _(b'note: commit message saved in %s\n') % msgfn |
|
3117 | _(b'note: commit message saved in %s\n') % msgfn | |
3112 | ) |
|
3118 | ) | |
3113 | self.ui.write( |
|
3119 | self.ui.write( | |
3114 | _( |
|
3120 | _( | |
3115 | b"note: use 'hg commit --logfile " |
|
3121 | b"note: use 'hg commit --logfile " | |
3116 | b".hg/last-message.txt --edit' to reuse it\n" |
|
3122 | b".hg/last-message.txt --edit' to reuse it\n" | |
3117 | ) |
|
3123 | ) | |
3118 | ) |
|
3124 | ) | |
3119 | raise |
|
3125 | raise | |
3120 |
|
3126 | |||
3121 | def commithook(unused_success): |
|
3127 | def commithook(unused_success): | |
3122 | # hack for command that use a temporary commit (eg: histedit) |
|
3128 | # hack for command that use a temporary commit (eg: histedit) | |
3123 | # temporary commit got stripped before hook release |
|
3129 | # temporary commit got stripped before hook release | |
3124 | if self.changelog.hasnode(ret): |
|
3130 | if self.changelog.hasnode(ret): | |
3125 | self.hook( |
|
3131 | self.hook( | |
3126 | b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2 |
|
3132 | b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2 | |
3127 | ) |
|
3133 | ) | |
3128 |
|
3134 | |||
3129 | self._afterlock(commithook) |
|
3135 | self._afterlock(commithook) | |
3130 | return ret |
|
3136 | return ret | |
3131 |
|
3137 | |||
3132 | @unfilteredmethod |
|
3138 | @unfilteredmethod | |
3133 | def commitctx(self, ctx, error=False, origctx=None): |
|
3139 | def commitctx(self, ctx, error=False, origctx=None): | |
3134 | return commit.commitctx(self, ctx, error=error, origctx=origctx) |
|
3140 | return commit.commitctx(self, ctx, error=error, origctx=origctx) | |
3135 |
|
3141 | |||
3136 | @unfilteredmethod |
|
3142 | @unfilteredmethod | |
3137 | def destroying(self): |
|
3143 | def destroying(self): | |
3138 | """Inform the repository that nodes are about to be destroyed. |
|
3144 | """Inform the repository that nodes are about to be destroyed. | |
3139 | Intended for use by strip and rollback, so there's a common |
|
3145 | Intended for use by strip and rollback, so there's a common | |
3140 | place for anything that has to be done before destroying history. |
|
3146 | place for anything that has to be done before destroying history. | |
3141 |
|
3147 | |||
3142 | This is mostly useful for saving state that is in memory and waiting |
|
3148 | This is mostly useful for saving state that is in memory and waiting | |
3143 | to be flushed when the current lock is released. Because a call to |
|
3149 | to be flushed when the current lock is released. Because a call to | |
3144 | destroyed is imminent, the repo will be invalidated causing those |
|
3150 | destroyed is imminent, the repo will be invalidated causing those | |
3145 | changes to stay in memory (waiting for the next unlock), or vanish |
|
3151 | changes to stay in memory (waiting for the next unlock), or vanish | |
3146 | completely. |
|
3152 | completely. | |
3147 | """ |
|
3153 | """ | |
3148 | # When using the same lock to commit and strip, the phasecache is left |
|
3154 | # When using the same lock to commit and strip, the phasecache is left | |
3149 | # dirty after committing. Then when we strip, the repo is invalidated, |
|
3155 | # dirty after committing. Then when we strip, the repo is invalidated, | |
3150 | # causing those changes to disappear. |
|
3156 | # causing those changes to disappear. | |
3151 | if '_phasecache' in vars(self): |
|
3157 | if '_phasecache' in vars(self): | |
3152 | self._phasecache.write() |
|
3158 | self._phasecache.write() | |
3153 |
|
3159 | |||
3154 | @unfilteredmethod |
|
3160 | @unfilteredmethod | |
3155 | def destroyed(self): |
|
3161 | def destroyed(self): | |
3156 | """Inform the repository that nodes have been destroyed. |
|
3162 | """Inform the repository that nodes have been destroyed. | |
3157 | Intended for use by strip and rollback, so there's a common |
|
3163 | Intended for use by strip and rollback, so there's a common | |
3158 | place for anything that has to be done after destroying history. |
|
3164 | place for anything that has to be done after destroying history. | |
3159 | """ |
|
3165 | """ | |
3160 | # When one tries to: |
|
3166 | # When one tries to: | |
3161 | # 1) destroy nodes thus calling this method (e.g. strip) |
|
3167 | # 1) destroy nodes thus calling this method (e.g. strip) | |
3162 | # 2) use phasecache somewhere (e.g. commit) |
|
3168 | # 2) use phasecache somewhere (e.g. commit) | |
3163 | # |
|
3169 | # | |
3164 | # then 2) will fail because the phasecache contains nodes that were |
|
3170 | # then 2) will fail because the phasecache contains nodes that were | |
3165 | # removed. We can either remove phasecache from the filecache, |
|
3171 | # removed. We can either remove phasecache from the filecache, | |
3166 | # causing it to reload next time it is accessed, or simply filter |
|
3172 | # causing it to reload next time it is accessed, or simply filter | |
3167 | # the removed nodes now and write the updated cache. |
|
3173 | # the removed nodes now and write the updated cache. | |
3168 | self._phasecache.filterunknown(self) |
|
3174 | self._phasecache.filterunknown(self) | |
3169 | self._phasecache.write() |
|
3175 | self._phasecache.write() | |
3170 |
|
3176 | |||
3171 | # refresh all repository caches |
|
3177 | # refresh all repository caches | |
3172 | self.updatecaches() |
|
3178 | self.updatecaches() | |
3173 |
|
3179 | |||
3174 | # Ensure the persistent tag cache is updated. Doing it now |
|
3180 | # Ensure the persistent tag cache is updated. Doing it now | |
3175 | # means that the tag cache only has to worry about destroyed |
|
3181 | # means that the tag cache only has to worry about destroyed | |
3176 | # heads immediately after a strip/rollback. That in turn |
|
3182 | # heads immediately after a strip/rollback. That in turn | |
3177 | # guarantees that "cachetip == currenttip" (comparing both rev |
|
3183 | # guarantees that "cachetip == currenttip" (comparing both rev | |
3178 | # and node) always means no nodes have been added or destroyed. |
|
3184 | # and node) always means no nodes have been added or destroyed. | |
3179 |
|
3185 | |||
3180 | # XXX this is suboptimal when qrefresh'ing: we strip the current |
|
3186 | # XXX this is suboptimal when qrefresh'ing: we strip the current | |
3181 | # head, refresh the tag cache, then immediately add a new head. |
|
3187 | # head, refresh the tag cache, then immediately add a new head. | |
3182 | # But I think doing it this way is necessary for the "instant |
|
3188 | # But I think doing it this way is necessary for the "instant | |
3183 | # tag cache retrieval" case to work. |
|
3189 | # tag cache retrieval" case to work. | |
3184 | self.invalidate() |
|
3190 | self.invalidate() | |
3185 |
|
3191 | |||
3186 | def status( |
|
3192 | def status( | |
3187 | self, |
|
3193 | self, | |
3188 | node1=b'.', |
|
3194 | node1=b'.', | |
3189 | node2=None, |
|
3195 | node2=None, | |
3190 | match=None, |
|
3196 | match=None, | |
3191 | ignored=False, |
|
3197 | ignored=False, | |
3192 | clean=False, |
|
3198 | clean=False, | |
3193 | unknown=False, |
|
3199 | unknown=False, | |
3194 | listsubrepos=False, |
|
3200 | listsubrepos=False, | |
3195 | ): |
|
3201 | ): | |
3196 | '''a convenience method that calls node1.status(node2)''' |
|
3202 | '''a convenience method that calls node1.status(node2)''' | |
3197 | return self[node1].status( |
|
3203 | return self[node1].status( | |
3198 | node2, match, ignored, clean, unknown, listsubrepos |
|
3204 | node2, match, ignored, clean, unknown, listsubrepos | |
3199 | ) |
|
3205 | ) | |
3200 |
|
3206 | |||
3201 | def addpostdsstatus(self, ps): |
|
3207 | def addpostdsstatus(self, ps): | |
3202 | """Add a callback to run within the wlock, at the point at which status |
|
3208 | """Add a callback to run within the wlock, at the point at which status | |
3203 | fixups happen. |
|
3209 | fixups happen. | |
3204 |
|
3210 | |||
3205 | On status completion, callback(wctx, status) will be called with the |
|
3211 | On status completion, callback(wctx, status) will be called with the | |
3206 | wlock held, unless the dirstate has changed from underneath or the wlock |
|
3212 | wlock held, unless the dirstate has changed from underneath or the wlock | |
3207 | couldn't be grabbed. |
|
3213 | couldn't be grabbed. | |
3208 |
|
3214 | |||
3209 | Callbacks should not capture and use a cached copy of the dirstate -- |
|
3215 | Callbacks should not capture and use a cached copy of the dirstate -- | |
3210 | it might change in the meanwhile. Instead, they should access the |
|
3216 | it might change in the meanwhile. Instead, they should access the | |
3211 | dirstate via wctx.repo().dirstate. |
|
3217 | dirstate via wctx.repo().dirstate. | |
3212 |
|
3218 | |||
3213 | This list is emptied out after each status run -- extensions should |
|
3219 | This list is emptied out after each status run -- extensions should | |
3214 | make sure it adds to this list each time dirstate.status is called. |
|
3220 | make sure it adds to this list each time dirstate.status is called. | |
3215 | Extensions should also make sure they don't call this for statuses |
|
3221 | Extensions should also make sure they don't call this for statuses | |
3216 | that don't involve the dirstate. |
|
3222 | that don't involve the dirstate. | |
3217 | """ |
|
3223 | """ | |
3218 |
|
3224 | |||
3219 | # The list is located here for uniqueness reasons -- it is actually |
|
3225 | # The list is located here for uniqueness reasons -- it is actually | |
3220 | # managed by the workingctx, but that isn't unique per-repo. |
|
3226 | # managed by the workingctx, but that isn't unique per-repo. | |
3221 | self._postdsstatus.append(ps) |
|
3227 | self._postdsstatus.append(ps) | |
3222 |
|
3228 | |||
3223 | def postdsstatus(self): |
|
3229 | def postdsstatus(self): | |
3224 | """Used by workingctx to get the list of post-dirstate-status hooks.""" |
|
3230 | """Used by workingctx to get the list of post-dirstate-status hooks.""" | |
3225 | return self._postdsstatus |
|
3231 | return self._postdsstatus | |
3226 |
|
3232 | |||
3227 | def clearpostdsstatus(self): |
|
3233 | def clearpostdsstatus(self): | |
3228 | """Used by workingctx to clear post-dirstate-status hooks.""" |
|
3234 | """Used by workingctx to clear post-dirstate-status hooks.""" | |
3229 | del self._postdsstatus[:] |
|
3235 | del self._postdsstatus[:] | |
3230 |
|
3236 | |||
3231 | def heads(self, start=None): |
|
3237 | def heads(self, start=None): | |
3232 | if start is None: |
|
3238 | if start is None: | |
3233 | cl = self.changelog |
|
3239 | cl = self.changelog | |
3234 | headrevs = reversed(cl.headrevs()) |
|
3240 | headrevs = reversed(cl.headrevs()) | |
3235 | return [cl.node(rev) for rev in headrevs] |
|
3241 | return [cl.node(rev) for rev in headrevs] | |
3236 |
|
3242 | |||
3237 | heads = self.changelog.heads(start) |
|
3243 | heads = self.changelog.heads(start) | |
3238 | # sort the output in rev descending order |
|
3244 | # sort the output in rev descending order | |
3239 | return sorted(heads, key=self.changelog.rev, reverse=True) |
|
3245 | return sorted(heads, key=self.changelog.rev, reverse=True) | |
3240 |
|
3246 | |||
3241 | def branchheads(self, branch=None, start=None, closed=False): |
|
3247 | def branchheads(self, branch=None, start=None, closed=False): | |
3242 | """return a (possibly filtered) list of heads for the given branch |
|
3248 | """return a (possibly filtered) list of heads for the given branch | |
3243 |
|
3249 | |||
3244 | Heads are returned in topological order, from newest to oldest. |
|
3250 | Heads are returned in topological order, from newest to oldest. | |
3245 | If branch is None, use the dirstate branch. |
|
3251 | If branch is None, use the dirstate branch. | |
3246 | If start is not None, return only heads reachable from start. |
|
3252 | If start is not None, return only heads reachable from start. | |
3247 | If closed is True, return heads that are marked as closed as well. |
|
3253 | If closed is True, return heads that are marked as closed as well. | |
3248 | """ |
|
3254 | """ | |
3249 | if branch is None: |
|
3255 | if branch is None: | |
3250 | branch = self[None].branch() |
|
3256 | branch = self[None].branch() | |
3251 | branches = self.branchmap() |
|
3257 | branches = self.branchmap() | |
3252 | if not branches.hasbranch(branch): |
|
3258 | if not branches.hasbranch(branch): | |
3253 | return [] |
|
3259 | return [] | |
3254 | # the cache returns heads ordered lowest to highest |
|
3260 | # the cache returns heads ordered lowest to highest | |
3255 | bheads = list(reversed(branches.branchheads(branch, closed=closed))) |
|
3261 | bheads = list(reversed(branches.branchheads(branch, closed=closed))) | |
3256 | if start is not None: |
|
3262 | if start is not None: | |
3257 | # filter out the heads that cannot be reached from startrev |
|
3263 | # filter out the heads that cannot be reached from startrev | |
3258 | fbheads = set(self.changelog.nodesbetween([start], bheads)[2]) |
|
3264 | fbheads = set(self.changelog.nodesbetween([start], bheads)[2]) | |
3259 | bheads = [h for h in bheads if h in fbheads] |
|
3265 | bheads = [h for h in bheads if h in fbheads] | |
3260 | return bheads |
|
3266 | return bheads | |
3261 |
|
3267 | |||
3262 | def branches(self, nodes): |
|
3268 | def branches(self, nodes): | |
3263 | if not nodes: |
|
3269 | if not nodes: | |
3264 | nodes = [self.changelog.tip()] |
|
3270 | nodes = [self.changelog.tip()] | |
3265 | b = [] |
|
3271 | b = [] | |
3266 | for n in nodes: |
|
3272 | for n in nodes: | |
3267 | t = n |
|
3273 | t = n | |
3268 | while True: |
|
3274 | while True: | |
3269 | p = self.changelog.parents(n) |
|
3275 | p = self.changelog.parents(n) | |
3270 | if p[1] != self.nullid or p[0] == self.nullid: |
|
3276 | if p[1] != self.nullid or p[0] == self.nullid: | |
3271 | b.append((t, n, p[0], p[1])) |
|
3277 | b.append((t, n, p[0], p[1])) | |
3272 | break |
|
3278 | break | |
3273 | n = p[0] |
|
3279 | n = p[0] | |
3274 | return b |
|
3280 | return b | |
3275 |
|
3281 | |||
3276 | def between(self, pairs): |
|
3282 | def between(self, pairs): | |
3277 | r = [] |
|
3283 | r = [] | |
3278 |
|
3284 | |||
3279 | for top, bottom in pairs: |
|
3285 | for top, bottom in pairs: | |
3280 | n, l, i = top, [], 0 |
|
3286 | n, l, i = top, [], 0 | |
3281 | f = 1 |
|
3287 | f = 1 | |
3282 |
|
3288 | |||
3283 | while n != bottom and n != self.nullid: |
|
3289 | while n != bottom and n != self.nullid: | |
3284 | p = self.changelog.parents(n)[0] |
|
3290 | p = self.changelog.parents(n)[0] | |
3285 | if i == f: |
|
3291 | if i == f: | |
3286 | l.append(n) |
|
3292 | l.append(n) | |
3287 | f = f * 2 |
|
3293 | f = f * 2 | |
3288 | n = p |
|
3294 | n = p | |
3289 | i += 1 |
|
3295 | i += 1 | |
3290 |
|
3296 | |||
3291 | r.append(l) |
|
3297 | r.append(l) | |
3292 |
|
3298 | |||
3293 | return r |
|
3299 | return r | |
3294 |
|
3300 | |||
3295 | def checkpush(self, pushop): |
|
3301 | def checkpush(self, pushop): | |
3296 | """Extensions can override this function if additional checks have |
|
3302 | """Extensions can override this function if additional checks have | |
3297 | to be performed before pushing, or call it if they override push |
|
3303 | to be performed before pushing, or call it if they override push | |
3298 | command. |
|
3304 | command. | |
3299 | """ |
|
3305 | """ | |
3300 |
|
3306 | |||
3301 | @unfilteredpropertycache |
|
3307 | @unfilteredpropertycache | |
3302 | def prepushoutgoinghooks(self): |
|
3308 | def prepushoutgoinghooks(self): | |
3303 | """Return util.hooks consists of a pushop with repo, remote, outgoing |
|
3309 | """Return util.hooks consists of a pushop with repo, remote, outgoing | |
3304 | methods, which are called before pushing changesets. |
|
3310 | methods, which are called before pushing changesets. | |
3305 | """ |
|
3311 | """ | |
3306 | return util.hooks() |
|
3312 | return util.hooks() | |
3307 |
|
3313 | |||
3308 | def pushkey(self, namespace, key, old, new): |
|
3314 | def pushkey(self, namespace, key, old, new): | |
3309 | try: |
|
3315 | try: | |
3310 | tr = self.currenttransaction() |
|
3316 | tr = self.currenttransaction() | |
3311 | hookargs = {} |
|
3317 | hookargs = {} | |
3312 | if tr is not None: |
|
3318 | if tr is not None: | |
3313 | hookargs.update(tr.hookargs) |
|
3319 | hookargs.update(tr.hookargs) | |
3314 | hookargs = pycompat.strkwargs(hookargs) |
|
3320 | hookargs = pycompat.strkwargs(hookargs) | |
3315 | hookargs['namespace'] = namespace |
|
3321 | hookargs['namespace'] = namespace | |
3316 | hookargs['key'] = key |
|
3322 | hookargs['key'] = key | |
3317 | hookargs['old'] = old |
|
3323 | hookargs['old'] = old | |
3318 | hookargs['new'] = new |
|
3324 | hookargs['new'] = new | |
3319 | self.hook(b'prepushkey', throw=True, **hookargs) |
|
3325 | self.hook(b'prepushkey', throw=True, **hookargs) | |
3320 | except error.HookAbort as exc: |
|
3326 | except error.HookAbort as exc: | |
3321 | self.ui.write_err(_(b"pushkey-abort: %s\n") % exc) |
|
3327 | self.ui.write_err(_(b"pushkey-abort: %s\n") % exc) | |
3322 | if exc.hint: |
|
3328 | if exc.hint: | |
3323 | self.ui.write_err(_(b"(%s)\n") % exc.hint) |
|
3329 | self.ui.write_err(_(b"(%s)\n") % exc.hint) | |
3324 | return False |
|
3330 | return False | |
3325 | self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key)) |
|
3331 | self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key)) | |
3326 | ret = pushkey.push(self, namespace, key, old, new) |
|
3332 | ret = pushkey.push(self, namespace, key, old, new) | |
3327 |
|
3333 | |||
3328 | def runhook(unused_success): |
|
3334 | def runhook(unused_success): | |
3329 | self.hook( |
|
3335 | self.hook( | |
3330 | b'pushkey', |
|
3336 | b'pushkey', | |
3331 | namespace=namespace, |
|
3337 | namespace=namespace, | |
3332 | key=key, |
|
3338 | key=key, | |
3333 | old=old, |
|
3339 | old=old, | |
3334 | new=new, |
|
3340 | new=new, | |
3335 | ret=ret, |
|
3341 | ret=ret, | |
3336 | ) |
|
3342 | ) | |
3337 |
|
3343 | |||
3338 | self._afterlock(runhook) |
|
3344 | self._afterlock(runhook) | |
3339 | return ret |
|
3345 | return ret | |
3340 |
|
3346 | |||
3341 | def listkeys(self, namespace): |
|
3347 | def listkeys(self, namespace): | |
3342 | self.hook(b'prelistkeys', throw=True, namespace=namespace) |
|
3348 | self.hook(b'prelistkeys', throw=True, namespace=namespace) | |
3343 | self.ui.debug(b'listing keys for "%s"\n' % namespace) |
|
3349 | self.ui.debug(b'listing keys for "%s"\n' % namespace) | |
3344 | values = pushkey.list(self, namespace) |
|
3350 | values = pushkey.list(self, namespace) | |
3345 | self.hook(b'listkeys', namespace=namespace, values=values) |
|
3351 | self.hook(b'listkeys', namespace=namespace, values=values) | |
3346 | return values |
|
3352 | return values | |
3347 |
|
3353 | |||
3348 | def debugwireargs(self, one, two, three=None, four=None, five=None): |
|
3354 | def debugwireargs(self, one, two, three=None, four=None, five=None): | |
3349 | '''used to test argument passing over the wire''' |
|
3355 | '''used to test argument passing over the wire''' | |
3350 | return b"%s %s %s %s %s" % ( |
|
3356 | return b"%s %s %s %s %s" % ( | |
3351 | one, |
|
3357 | one, | |
3352 | two, |
|
3358 | two, | |
3353 | pycompat.bytestr(three), |
|
3359 | pycompat.bytestr(three), | |
3354 | pycompat.bytestr(four), |
|
3360 | pycompat.bytestr(four), | |
3355 | pycompat.bytestr(five), |
|
3361 | pycompat.bytestr(five), | |
3356 | ) |
|
3362 | ) | |
3357 |
|
3363 | |||
3358 | def savecommitmessage(self, text): |
|
3364 | def savecommitmessage(self, text): | |
3359 | fp = self.vfs(b'last-message.txt', b'wb') |
|
3365 | fp = self.vfs(b'last-message.txt', b'wb') | |
3360 | try: |
|
3366 | try: | |
3361 | fp.write(text) |
|
3367 | fp.write(text) | |
3362 | finally: |
|
3368 | finally: | |
3363 | fp.close() |
|
3369 | fp.close() | |
3364 | return self.pathto(fp.name[len(self.root) + 1 :]) |
|
3370 | return self.pathto(fp.name[len(self.root) + 1 :]) | |
3365 |
|
3371 | |||
3366 | def register_wanted_sidedata(self, category): |
|
3372 | def register_wanted_sidedata(self, category): | |
3367 | if requirementsmod.REVLOGV2_REQUIREMENT not in self.requirements: |
|
3373 | if requirementsmod.REVLOGV2_REQUIREMENT not in self.requirements: | |
3368 | # Only revlogv2 repos can want sidedata. |
|
3374 | # Only revlogv2 repos can want sidedata. | |
3369 | return |
|
3375 | return | |
3370 | self._wanted_sidedata.add(pycompat.bytestr(category)) |
|
3376 | self._wanted_sidedata.add(pycompat.bytestr(category)) | |
3371 |
|
3377 | |||
3372 | def register_sidedata_computer( |
|
3378 | def register_sidedata_computer( | |
3373 | self, kind, category, keys, computer, flags, replace=False |
|
3379 | self, kind, category, keys, computer, flags, replace=False | |
3374 | ): |
|
3380 | ): | |
3375 | if kind not in revlogconst.ALL_KINDS: |
|
3381 | if kind not in revlogconst.ALL_KINDS: | |
3376 | msg = _(b"unexpected revlog kind '%s'.") |
|
3382 | msg = _(b"unexpected revlog kind '%s'.") | |
3377 | raise error.ProgrammingError(msg % kind) |
|
3383 | raise error.ProgrammingError(msg % kind) | |
3378 | category = pycompat.bytestr(category) |
|
3384 | category = pycompat.bytestr(category) | |
3379 | already_registered = category in self._sidedata_computers.get(kind, []) |
|
3385 | already_registered = category in self._sidedata_computers.get(kind, []) | |
3380 | if already_registered and not replace: |
|
3386 | if already_registered and not replace: | |
3381 | msg = _( |
|
3387 | msg = _( | |
3382 | b"cannot register a sidedata computer twice for category '%s'." |
|
3388 | b"cannot register a sidedata computer twice for category '%s'." | |
3383 | ) |
|
3389 | ) | |
3384 | raise error.ProgrammingError(msg % category) |
|
3390 | raise error.ProgrammingError(msg % category) | |
3385 | if replace and not already_registered: |
|
3391 | if replace and not already_registered: | |
3386 | msg = _( |
|
3392 | msg = _( | |
3387 | b"cannot replace a sidedata computer that isn't registered " |
|
3393 | b"cannot replace a sidedata computer that isn't registered " | |
3388 | b"for category '%s'." |
|
3394 | b"for category '%s'." | |
3389 | ) |
|
3395 | ) | |
3390 | raise error.ProgrammingError(msg % category) |
|
3396 | raise error.ProgrammingError(msg % category) | |
3391 | self._sidedata_computers.setdefault(kind, {}) |
|
3397 | self._sidedata_computers.setdefault(kind, {}) | |
3392 | self._sidedata_computers[kind][category] = (keys, computer, flags) |
|
3398 | self._sidedata_computers[kind][category] = (keys, computer, flags) | |
3393 |
|
3399 | |||
3394 |
|
3400 | |||
3395 | # used to avoid circular references so destructors work |
|
3401 | # used to avoid circular references so destructors work | |
3396 | def aftertrans(files): |
|
3402 | def aftertrans(files): | |
3397 | renamefiles = [tuple(t) for t in files] |
|
3403 | renamefiles = [tuple(t) for t in files] | |
3398 |
|
3404 | |||
3399 | def a(): |
|
3405 | def a(): | |
3400 | for vfs, src, dest in renamefiles: |
|
3406 | for vfs, src, dest in renamefiles: | |
3401 | # if src and dest refer to a same file, vfs.rename is a no-op, |
|
3407 | # if src and dest refer to a same file, vfs.rename is a no-op, | |
3402 | # leaving both src and dest on disk. delete dest to make sure |
|
3408 | # leaving both src and dest on disk. delete dest to make sure | |
3403 | # the rename couldn't be such a no-op. |
|
3409 | # the rename couldn't be such a no-op. | |
3404 | vfs.tryunlink(dest) |
|
3410 | vfs.tryunlink(dest) | |
3405 | try: |
|
3411 | try: | |
3406 | vfs.rename(src, dest) |
|
3412 | vfs.rename(src, dest) | |
3407 | except OSError: # journal file does not yet exist |
|
3413 | except OSError: # journal file does not yet exist | |
3408 | pass |
|
3414 | pass | |
3409 |
|
3415 | |||
3410 | return a |
|
3416 | return a | |
3411 |
|
3417 | |||
3412 |
|
3418 | |||
3413 | def undoname(fn): |
|
3419 | def undoname(fn): | |
3414 | base, name = os.path.split(fn) |
|
3420 | base, name = os.path.split(fn) | |
3415 | assert name.startswith(b'journal') |
|
3421 | assert name.startswith(b'journal') | |
3416 | return os.path.join(base, name.replace(b'journal', b'undo', 1)) |
|
3422 | return os.path.join(base, name.replace(b'journal', b'undo', 1)) | |
3417 |
|
3423 | |||
3418 |
|
3424 | |||
3419 | def instance(ui, path, create, intents=None, createopts=None): |
|
3425 | def instance(ui, path, create, intents=None, createopts=None): | |
3420 | localpath = urlutil.urllocalpath(path) |
|
3426 | localpath = urlutil.urllocalpath(path) | |
3421 | if create: |
|
3427 | if create: | |
3422 | createrepository(ui, localpath, createopts=createopts) |
|
3428 | createrepository(ui, localpath, createopts=createopts) | |
3423 |
|
3429 | |||
3424 | return makelocalrepository(ui, localpath, intents=intents) |
|
3430 | return makelocalrepository(ui, localpath, intents=intents) | |
3425 |
|
3431 | |||
3426 |
|
3432 | |||
3427 | def islocal(path): |
|
3433 | def islocal(path): | |
3428 | return True |
|
3434 | return True | |
3429 |
|
3435 | |||
3430 |
|
3436 | |||
3431 | def defaultcreateopts(ui, createopts=None): |
|
3437 | def defaultcreateopts(ui, createopts=None): | |
3432 | """Populate the default creation options for a repository. |
|
3438 | """Populate the default creation options for a repository. | |
3433 |
|
3439 | |||
3434 | A dictionary of explicitly requested creation options can be passed |
|
3440 | A dictionary of explicitly requested creation options can be passed | |
3435 | in. Missing keys will be populated. |
|
3441 | in. Missing keys will be populated. | |
3436 | """ |
|
3442 | """ | |
3437 | createopts = dict(createopts or {}) |
|
3443 | createopts = dict(createopts or {}) | |
3438 |
|
3444 | |||
3439 | if b'backend' not in createopts: |
|
3445 | if b'backend' not in createopts: | |
3440 | # experimental config: storage.new-repo-backend |
|
3446 | # experimental config: storage.new-repo-backend | |
3441 | createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend') |
|
3447 | createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend') | |
3442 |
|
3448 | |||
3443 | return createopts |
|
3449 | return createopts | |
3444 |
|
3450 | |||
3445 |
|
3451 | |||
3446 | def newreporequirements(ui, createopts): |
|
3452 | def newreporequirements(ui, createopts): | |
3447 | """Determine the set of requirements for a new local repository. |
|
3453 | """Determine the set of requirements for a new local repository. | |
3448 |
|
3454 | |||
3449 | Extensions can wrap this function to specify custom requirements for |
|
3455 | Extensions can wrap this function to specify custom requirements for | |
3450 | new repositories. |
|
3456 | new repositories. | |
3451 | """ |
|
3457 | """ | |
3452 | # If the repo is being created from a shared repository, we copy |
|
3458 | # If the repo is being created from a shared repository, we copy | |
3453 | # its requirements. |
|
3459 | # its requirements. | |
3454 | if b'sharedrepo' in createopts: |
|
3460 | if b'sharedrepo' in createopts: | |
3455 | requirements = set(createopts[b'sharedrepo'].requirements) |
|
3461 | requirements = set(createopts[b'sharedrepo'].requirements) | |
3456 | if createopts.get(b'sharedrelative'): |
|
3462 | if createopts.get(b'sharedrelative'): | |
3457 | requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT) |
|
3463 | requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT) | |
3458 | else: |
|
3464 | else: | |
3459 | requirements.add(requirementsmod.SHARED_REQUIREMENT) |
|
3465 | requirements.add(requirementsmod.SHARED_REQUIREMENT) | |
3460 |
|
3466 | |||
3461 | return requirements |
|
3467 | return requirements | |
3462 |
|
3468 | |||
3463 | if b'backend' not in createopts: |
|
3469 | if b'backend' not in createopts: | |
3464 | raise error.ProgrammingError( |
|
3470 | raise error.ProgrammingError( | |
3465 | b'backend key not present in createopts; ' |
|
3471 | b'backend key not present in createopts; ' | |
3466 | b'was defaultcreateopts() called?' |
|
3472 | b'was defaultcreateopts() called?' | |
3467 | ) |
|
3473 | ) | |
3468 |
|
3474 | |||
3469 | if createopts[b'backend'] != b'revlogv1': |
|
3475 | if createopts[b'backend'] != b'revlogv1': | |
3470 | raise error.Abort( |
|
3476 | raise error.Abort( | |
3471 | _( |
|
3477 | _( | |
3472 | b'unable to determine repository requirements for ' |
|
3478 | b'unable to determine repository requirements for ' | |
3473 | b'storage backend: %s' |
|
3479 | b'storage backend: %s' | |
3474 | ) |
|
3480 | ) | |
3475 | % createopts[b'backend'] |
|
3481 | % createopts[b'backend'] | |
3476 | ) |
|
3482 | ) | |
3477 |
|
3483 | |||
3478 | requirements = {requirementsmod.REVLOGV1_REQUIREMENT} |
|
3484 | requirements = {requirementsmod.REVLOGV1_REQUIREMENT} | |
3479 | if ui.configbool(b'format', b'usestore'): |
|
3485 | if ui.configbool(b'format', b'usestore'): | |
3480 | requirements.add(requirementsmod.STORE_REQUIREMENT) |
|
3486 | requirements.add(requirementsmod.STORE_REQUIREMENT) | |
3481 | if ui.configbool(b'format', b'usefncache'): |
|
3487 | if ui.configbool(b'format', b'usefncache'): | |
3482 | requirements.add(requirementsmod.FNCACHE_REQUIREMENT) |
|
3488 | requirements.add(requirementsmod.FNCACHE_REQUIREMENT) | |
3483 | if ui.configbool(b'format', b'dotencode'): |
|
3489 | if ui.configbool(b'format', b'dotencode'): | |
3484 | requirements.add(requirementsmod.DOTENCODE_REQUIREMENT) |
|
3490 | requirements.add(requirementsmod.DOTENCODE_REQUIREMENT) | |
3485 |
|
3491 | |||
3486 | compengines = ui.configlist(b'format', b'revlog-compression') |
|
3492 | compengines = ui.configlist(b'format', b'revlog-compression') | |
3487 | for compengine in compengines: |
|
3493 | for compengine in compengines: | |
3488 | if compengine in util.compengines: |
|
3494 | if compengine in util.compengines: | |
3489 | engine = util.compengines[compengine] |
|
3495 | engine = util.compengines[compengine] | |
3490 | if engine.available() and engine.revlogheader(): |
|
3496 | if engine.available() and engine.revlogheader(): | |
3491 | break |
|
3497 | break | |
3492 | else: |
|
3498 | else: | |
3493 | raise error.Abort( |
|
3499 | raise error.Abort( | |
3494 | _( |
|
3500 | _( | |
3495 | b'compression engines %s defined by ' |
|
3501 | b'compression engines %s defined by ' | |
3496 | b'format.revlog-compression not available' |
|
3502 | b'format.revlog-compression not available' | |
3497 | ) |
|
3503 | ) | |
3498 | % b', '.join(b'"%s"' % e for e in compengines), |
|
3504 | % b', '.join(b'"%s"' % e for e in compengines), | |
3499 | hint=_( |
|
3505 | hint=_( | |
3500 | b'run "hg debuginstall" to list available ' |
|
3506 | b'run "hg debuginstall" to list available ' | |
3501 | b'compression engines' |
|
3507 | b'compression engines' | |
3502 | ), |
|
3508 | ), | |
3503 | ) |
|
3509 | ) | |
3504 |
|
3510 | |||
3505 | # zlib is the historical default and doesn't need an explicit requirement. |
|
3511 | # zlib is the historical default and doesn't need an explicit requirement. | |
3506 | if compengine == b'zstd': |
|
3512 | if compengine == b'zstd': | |
3507 | requirements.add(b'revlog-compression-zstd') |
|
3513 | requirements.add(b'revlog-compression-zstd') | |
3508 | elif compengine != b'zlib': |
|
3514 | elif compengine != b'zlib': | |
3509 | requirements.add(b'exp-compression-%s' % compengine) |
|
3515 | requirements.add(b'exp-compression-%s' % compengine) | |
3510 |
|
3516 | |||
3511 | if scmutil.gdinitconfig(ui): |
|
3517 | if scmutil.gdinitconfig(ui): | |
3512 | requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT) |
|
3518 | requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT) | |
3513 | if ui.configbool(b'format', b'sparse-revlog'): |
|
3519 | if ui.configbool(b'format', b'sparse-revlog'): | |
3514 | requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT) |
|
3520 | requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT) | |
3515 |
|
3521 | |||
3516 | # experimental config: format.exp-use-copies-side-data-changeset |
|
3522 | # experimental config: format.exp-use-copies-side-data-changeset | |
3517 | if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'): |
|
3523 | if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'): | |
3518 | requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT) |
|
3524 | requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT) | |
3519 | requirements.add(requirementsmod.REVLOGV2_REQUIREMENT) |
|
3525 | requirements.add(requirementsmod.REVLOGV2_REQUIREMENT) | |
3520 | requirements.add(requirementsmod.COPIESSDC_REQUIREMENT) |
|
3526 | requirements.add(requirementsmod.COPIESSDC_REQUIREMENT) | |
3521 | if ui.configbool(b'experimental', b'treemanifest'): |
|
3527 | if ui.configbool(b'experimental', b'treemanifest'): | |
3522 | requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT) |
|
3528 | requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT) | |
3523 |
|
3529 | |||
3524 | revlogv2 = ui.config(b'experimental', b'revlogv2') |
|
3530 | revlogv2 = ui.config(b'experimental', b'revlogv2') | |
3525 | if revlogv2 == b'enable-unstable-format-and-corrupt-my-data': |
|
3531 | if revlogv2 == b'enable-unstable-format-and-corrupt-my-data': | |
3526 | requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT) |
|
3532 | requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT) | |
3527 | requirements.add(requirementsmod.REVLOGV2_REQUIREMENT) |
|
3533 | requirements.add(requirementsmod.REVLOGV2_REQUIREMENT) | |
3528 | # experimental config: format.internal-phase |
|
3534 | # experimental config: format.internal-phase | |
3529 | if ui.configbool(b'format', b'internal-phase'): |
|
3535 | if ui.configbool(b'format', b'internal-phase'): | |
3530 | requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT) |
|
3536 | requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT) | |
3531 |
|
3537 | |||
3532 | if createopts.get(b'narrowfiles'): |
|
3538 | if createopts.get(b'narrowfiles'): | |
3533 | requirements.add(requirementsmod.NARROW_REQUIREMENT) |
|
3539 | requirements.add(requirementsmod.NARROW_REQUIREMENT) | |
3534 |
|
3540 | |||
3535 | if createopts.get(b'lfs'): |
|
3541 | if createopts.get(b'lfs'): | |
3536 | requirements.add(b'lfs') |
|
3542 | requirements.add(b'lfs') | |
3537 |
|
3543 | |||
3538 | if ui.configbool(b'format', b'bookmarks-in-store'): |
|
3544 | if ui.configbool(b'format', b'bookmarks-in-store'): | |
3539 | requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT) |
|
3545 | requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT) | |
3540 |
|
3546 | |||
3541 | if ui.configbool(b'format', b'use-persistent-nodemap'): |
|
3547 | if ui.configbool(b'format', b'use-persistent-nodemap'): | |
3542 | requirements.add(requirementsmod.NODEMAP_REQUIREMENT) |
|
3548 | requirements.add(requirementsmod.NODEMAP_REQUIREMENT) | |
3543 |
|
3549 | |||
3544 | # if share-safe is enabled, let's create the new repository with the new |
|
3550 | # if share-safe is enabled, let's create the new repository with the new | |
3545 | # requirement |
|
3551 | # requirement | |
3546 | if ui.configbool(b'format', b'use-share-safe'): |
|
3552 | if ui.configbool(b'format', b'use-share-safe'): | |
3547 | requirements.add(requirementsmod.SHARESAFE_REQUIREMENT) |
|
3553 | requirements.add(requirementsmod.SHARESAFE_REQUIREMENT) | |
3548 |
|
3554 | |||
3549 | return requirements |
|
3555 | return requirements | |
3550 |
|
3556 | |||
3551 |
|
3557 | |||
3552 | def checkrequirementscompat(ui, requirements): |
|
3558 | def checkrequirementscompat(ui, requirements): | |
3553 | """Checks compatibility of repository requirements enabled and disabled. |
|
3559 | """Checks compatibility of repository requirements enabled and disabled. | |
3554 |
|
3560 | |||
3555 | Returns a set of requirements which needs to be dropped because dependend |
|
3561 | Returns a set of requirements which needs to be dropped because dependend | |
3556 | requirements are not enabled. Also warns users about it""" |
|
3562 | requirements are not enabled. Also warns users about it""" | |
3557 |
|
3563 | |||
3558 | dropped = set() |
|
3564 | dropped = set() | |
3559 |
|
3565 | |||
3560 | if requirementsmod.STORE_REQUIREMENT not in requirements: |
|
3566 | if requirementsmod.STORE_REQUIREMENT not in requirements: | |
3561 | if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements: |
|
3567 | if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements: | |
3562 | ui.warn( |
|
3568 | ui.warn( | |
3563 | _( |
|
3569 | _( | |
3564 | b'ignoring enabled \'format.bookmarks-in-store\' config ' |
|
3570 | b'ignoring enabled \'format.bookmarks-in-store\' config ' | |
3565 | b'beacuse it is incompatible with disabled ' |
|
3571 | b'beacuse it is incompatible with disabled ' | |
3566 | b'\'format.usestore\' config\n' |
|
3572 | b'\'format.usestore\' config\n' | |
3567 | ) |
|
3573 | ) | |
3568 | ) |
|
3574 | ) | |
3569 | dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT) |
|
3575 | dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT) | |
3570 |
|
3576 | |||
3571 | if ( |
|
3577 | if ( | |
3572 | requirementsmod.SHARED_REQUIREMENT in requirements |
|
3578 | requirementsmod.SHARED_REQUIREMENT in requirements | |
3573 | or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements |
|
3579 | or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements | |
3574 | ): |
|
3580 | ): | |
3575 | raise error.Abort( |
|
3581 | raise error.Abort( | |
3576 | _( |
|
3582 | _( | |
3577 | b"cannot create shared repository as source was created" |
|
3583 | b"cannot create shared repository as source was created" | |
3578 | b" with 'format.usestore' config disabled" |
|
3584 | b" with 'format.usestore' config disabled" | |
3579 | ) |
|
3585 | ) | |
3580 | ) |
|
3586 | ) | |
3581 |
|
3587 | |||
3582 | if requirementsmod.SHARESAFE_REQUIREMENT in requirements: |
|
3588 | if requirementsmod.SHARESAFE_REQUIREMENT in requirements: | |
3583 | ui.warn( |
|
3589 | ui.warn( | |
3584 | _( |
|
3590 | _( | |
3585 | b"ignoring enabled 'format.use-share-safe' config because " |
|
3591 | b"ignoring enabled 'format.use-share-safe' config because " | |
3586 | b"it is incompatible with disabled 'format.usestore'" |
|
3592 | b"it is incompatible with disabled 'format.usestore'" | |
3587 | b" config\n" |
|
3593 | b" config\n" | |
3588 | ) |
|
3594 | ) | |
3589 | ) |
|
3595 | ) | |
3590 | dropped.add(requirementsmod.SHARESAFE_REQUIREMENT) |
|
3596 | dropped.add(requirementsmod.SHARESAFE_REQUIREMENT) | |
3591 |
|
3597 | |||
3592 | return dropped |
|
3598 | return dropped | |
3593 |
|
3599 | |||
3594 |
|
3600 | |||
3595 | def filterknowncreateopts(ui, createopts): |
|
3601 | def filterknowncreateopts(ui, createopts): | |
3596 | """Filters a dict of repo creation options against options that are known. |
|
3602 | """Filters a dict of repo creation options against options that are known. | |
3597 |
|
3603 | |||
3598 | Receives a dict of repo creation options and returns a dict of those |
|
3604 | Receives a dict of repo creation options and returns a dict of those | |
3599 | options that we don't know how to handle. |
|
3605 | options that we don't know how to handle. | |
3600 |
|
3606 | |||
3601 | This function is called as part of repository creation. If the |
|
3607 | This function is called as part of repository creation. If the | |
3602 | returned dict contains any items, repository creation will not |
|
3608 | returned dict contains any items, repository creation will not | |
3603 | be allowed, as it means there was a request to create a repository |
|
3609 | be allowed, as it means there was a request to create a repository | |
3604 | with options not recognized by loaded code. |
|
3610 | with options not recognized by loaded code. | |
3605 |
|
3611 | |||
3606 | Extensions can wrap this function to filter out creation options |
|
3612 | Extensions can wrap this function to filter out creation options | |
3607 | they know how to handle. |
|
3613 | they know how to handle. | |
3608 | """ |
|
3614 | """ | |
3609 | known = { |
|
3615 | known = { | |
3610 | b'backend', |
|
3616 | b'backend', | |
3611 | b'lfs', |
|
3617 | b'lfs', | |
3612 | b'narrowfiles', |
|
3618 | b'narrowfiles', | |
3613 | b'sharedrepo', |
|
3619 | b'sharedrepo', | |
3614 | b'sharedrelative', |
|
3620 | b'sharedrelative', | |
3615 | b'shareditems', |
|
3621 | b'shareditems', | |
3616 | b'shallowfilestore', |
|
3622 | b'shallowfilestore', | |
3617 | } |
|
3623 | } | |
3618 |
|
3624 | |||
3619 | return {k: v for k, v in createopts.items() if k not in known} |
|
3625 | return {k: v for k, v in createopts.items() if k not in known} | |
3620 |
|
3626 | |||
3621 |
|
3627 | |||
3622 | def createrepository(ui, path, createopts=None): |
|
3628 | def createrepository(ui, path, createopts=None): | |
3623 | """Create a new repository in a vfs. |
|
3629 | """Create a new repository in a vfs. | |
3624 |
|
3630 | |||
3625 | ``path`` path to the new repo's working directory. |
|
3631 | ``path`` path to the new repo's working directory. | |
3626 | ``createopts`` options for the new repository. |
|
3632 | ``createopts`` options for the new repository. | |
3627 |
|
3633 | |||
3628 | The following keys for ``createopts`` are recognized: |
|
3634 | The following keys for ``createopts`` are recognized: | |
3629 |
|
3635 | |||
3630 | backend |
|
3636 | backend | |
3631 | The storage backend to use. |
|
3637 | The storage backend to use. | |
3632 | lfs |
|
3638 | lfs | |
3633 | Repository will be created with ``lfs`` requirement. The lfs extension |
|
3639 | Repository will be created with ``lfs`` requirement. The lfs extension | |
3634 | will automatically be loaded when the repository is accessed. |
|
3640 | will automatically be loaded when the repository is accessed. | |
3635 | narrowfiles |
|
3641 | narrowfiles | |
3636 | Set up repository to support narrow file storage. |
|
3642 | Set up repository to support narrow file storage. | |
3637 | sharedrepo |
|
3643 | sharedrepo | |
3638 | Repository object from which storage should be shared. |
|
3644 | Repository object from which storage should be shared. | |
3639 | sharedrelative |
|
3645 | sharedrelative | |
3640 | Boolean indicating if the path to the shared repo should be |
|
3646 | Boolean indicating if the path to the shared repo should be | |
3641 | stored as relative. By default, the pointer to the "parent" repo |
|
3647 | stored as relative. By default, the pointer to the "parent" repo | |
3642 | is stored as an absolute path. |
|
3648 | is stored as an absolute path. | |
3643 | shareditems |
|
3649 | shareditems | |
3644 | Set of items to share to the new repository (in addition to storage). |
|
3650 | Set of items to share to the new repository (in addition to storage). | |
3645 | shallowfilestore |
|
3651 | shallowfilestore | |
3646 | Indicates that storage for files should be shallow (not all ancestor |
|
3652 | Indicates that storage for files should be shallow (not all ancestor | |
3647 | revisions are known). |
|
3653 | revisions are known). | |
3648 | """ |
|
3654 | """ | |
3649 | createopts = defaultcreateopts(ui, createopts=createopts) |
|
3655 | createopts = defaultcreateopts(ui, createopts=createopts) | |
3650 |
|
3656 | |||
3651 | unknownopts = filterknowncreateopts(ui, createopts) |
|
3657 | unknownopts = filterknowncreateopts(ui, createopts) | |
3652 |
|
3658 | |||
3653 | if not isinstance(unknownopts, dict): |
|
3659 | if not isinstance(unknownopts, dict): | |
3654 | raise error.ProgrammingError( |
|
3660 | raise error.ProgrammingError( | |
3655 | b'filterknowncreateopts() did not return a dict' |
|
3661 | b'filterknowncreateopts() did not return a dict' | |
3656 | ) |
|
3662 | ) | |
3657 |
|
3663 | |||
3658 | if unknownopts: |
|
3664 | if unknownopts: | |
3659 | raise error.Abort( |
|
3665 | raise error.Abort( | |
3660 | _( |
|
3666 | _( | |
3661 | b'unable to create repository because of unknown ' |
|
3667 | b'unable to create repository because of unknown ' | |
3662 | b'creation option: %s' |
|
3668 | b'creation option: %s' | |
3663 | ) |
|
3669 | ) | |
3664 | % b', '.join(sorted(unknownopts)), |
|
3670 | % b', '.join(sorted(unknownopts)), | |
3665 | hint=_(b'is a required extension not loaded?'), |
|
3671 | hint=_(b'is a required extension not loaded?'), | |
3666 | ) |
|
3672 | ) | |
3667 |
|
3673 | |||
3668 | requirements = newreporequirements(ui, createopts=createopts) |
|
3674 | requirements = newreporequirements(ui, createopts=createopts) | |
3669 | requirements -= checkrequirementscompat(ui, requirements) |
|
3675 | requirements -= checkrequirementscompat(ui, requirements) | |
3670 |
|
3676 | |||
3671 | wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True) |
|
3677 | wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True) | |
3672 |
|
3678 | |||
3673 | hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg')) |
|
3679 | hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg')) | |
3674 | if hgvfs.exists(): |
|
3680 | if hgvfs.exists(): | |
3675 | raise error.RepoError(_(b'repository %s already exists') % path) |
|
3681 | raise error.RepoError(_(b'repository %s already exists') % path) | |
3676 |
|
3682 | |||
3677 | if b'sharedrepo' in createopts: |
|
3683 | if b'sharedrepo' in createopts: | |
3678 | sharedpath = createopts[b'sharedrepo'].sharedpath |
|
3684 | sharedpath = createopts[b'sharedrepo'].sharedpath | |
3679 |
|
3685 | |||
3680 | if createopts.get(b'sharedrelative'): |
|
3686 | if createopts.get(b'sharedrelative'): | |
3681 | try: |
|
3687 | try: | |
3682 | sharedpath = os.path.relpath(sharedpath, hgvfs.base) |
|
3688 | sharedpath = os.path.relpath(sharedpath, hgvfs.base) | |
3683 | sharedpath = util.pconvert(sharedpath) |
|
3689 | sharedpath = util.pconvert(sharedpath) | |
3684 | except (IOError, ValueError) as e: |
|
3690 | except (IOError, ValueError) as e: | |
3685 | # ValueError is raised on Windows if the drive letters differ |
|
3691 | # ValueError is raised on Windows if the drive letters differ | |
3686 | # on each path. |
|
3692 | # on each path. | |
3687 | raise error.Abort( |
|
3693 | raise error.Abort( | |
3688 | _(b'cannot calculate relative path'), |
|
3694 | _(b'cannot calculate relative path'), | |
3689 | hint=stringutil.forcebytestr(e), |
|
3695 | hint=stringutil.forcebytestr(e), | |
3690 | ) |
|
3696 | ) | |
3691 |
|
3697 | |||
3692 | if not wdirvfs.exists(): |
|
3698 | if not wdirvfs.exists(): | |
3693 | wdirvfs.makedirs() |
|
3699 | wdirvfs.makedirs() | |
3694 |
|
3700 | |||
3695 | hgvfs.makedir(notindexed=True) |
|
3701 | hgvfs.makedir(notindexed=True) | |
3696 | if b'sharedrepo' not in createopts: |
|
3702 | if b'sharedrepo' not in createopts: | |
3697 | hgvfs.mkdir(b'cache') |
|
3703 | hgvfs.mkdir(b'cache') | |
3698 | hgvfs.mkdir(b'wcache') |
|
3704 | hgvfs.mkdir(b'wcache') | |
3699 |
|
3705 | |||
3700 | has_store = requirementsmod.STORE_REQUIREMENT in requirements |
|
3706 | has_store = requirementsmod.STORE_REQUIREMENT in requirements | |
3701 | if has_store and b'sharedrepo' not in createopts: |
|
3707 | if has_store and b'sharedrepo' not in createopts: | |
3702 | hgvfs.mkdir(b'store') |
|
3708 | hgvfs.mkdir(b'store') | |
3703 |
|
3709 | |||
3704 | # We create an invalid changelog outside the store so very old |
|
3710 | # We create an invalid changelog outside the store so very old | |
3705 | # Mercurial versions (which didn't know about the requirements |
|
3711 | # Mercurial versions (which didn't know about the requirements | |
3706 | # file) encounter an error on reading the changelog. This |
|
3712 | # file) encounter an error on reading the changelog. This | |
3707 | # effectively locks out old clients and prevents them from |
|
3713 | # effectively locks out old clients and prevents them from | |
3708 | # mucking with a repo in an unknown format. |
|
3714 | # mucking with a repo in an unknown format. | |
3709 | # |
|
3715 | # | |
3710 | # The revlog header has version 65535, which won't be recognized by |
|
3716 | # The revlog header has version 65535, which won't be recognized by | |
3711 | # such old clients. |
|
3717 | # such old clients. | |
3712 | hgvfs.append( |
|
3718 | hgvfs.append( | |
3713 | b'00changelog.i', |
|
3719 | b'00changelog.i', | |
3714 | b'\0\0\xFF\xFF dummy changelog to prevent using the old repo ' |
|
3720 | b'\0\0\xFF\xFF dummy changelog to prevent using the old repo ' | |
3715 | b'layout', |
|
3721 | b'layout', | |
3716 | ) |
|
3722 | ) | |
3717 |
|
3723 | |||
3718 | # Filter the requirements into working copy and store ones |
|
3724 | # Filter the requirements into working copy and store ones | |
3719 | wcreq, storereq = scmutil.filterrequirements(requirements) |
|
3725 | wcreq, storereq = scmutil.filterrequirements(requirements) | |
3720 | # write working copy ones |
|
3726 | # write working copy ones | |
3721 | scmutil.writerequires(hgvfs, wcreq) |
|
3727 | scmutil.writerequires(hgvfs, wcreq) | |
3722 | # If there are store requirements and the current repository |
|
3728 | # If there are store requirements and the current repository | |
3723 | # is not a shared one, write stored requirements |
|
3729 | # is not a shared one, write stored requirements | |
3724 | # For new shared repository, we don't need to write the store |
|
3730 | # For new shared repository, we don't need to write the store | |
3725 | # requirements as they are already present in store requires |
|
3731 | # requirements as they are already present in store requires | |
3726 | if storereq and b'sharedrepo' not in createopts: |
|
3732 | if storereq and b'sharedrepo' not in createopts: | |
3727 | storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True) |
|
3733 | storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True) | |
3728 | scmutil.writerequires(storevfs, storereq) |
|
3734 | scmutil.writerequires(storevfs, storereq) | |
3729 |
|
3735 | |||
3730 | # Write out file telling readers where to find the shared store. |
|
3736 | # Write out file telling readers where to find the shared store. | |
3731 | if b'sharedrepo' in createopts: |
|
3737 | if b'sharedrepo' in createopts: | |
3732 | hgvfs.write(b'sharedpath', sharedpath) |
|
3738 | hgvfs.write(b'sharedpath', sharedpath) | |
3733 |
|
3739 | |||
3734 | if createopts.get(b'shareditems'): |
|
3740 | if createopts.get(b'shareditems'): | |
3735 | shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n' |
|
3741 | shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n' | |
3736 | hgvfs.write(b'shared', shared) |
|
3742 | hgvfs.write(b'shared', shared) | |
3737 |
|
3743 | |||
3738 |
|
3744 | |||
3739 | def poisonrepository(repo): |
|
3745 | def poisonrepository(repo): | |
3740 | """Poison a repository instance so it can no longer be used.""" |
|
3746 | """Poison a repository instance so it can no longer be used.""" | |
3741 | # Perform any cleanup on the instance. |
|
3747 | # Perform any cleanup on the instance. | |
3742 | repo.close() |
|
3748 | repo.close() | |
3743 |
|
3749 | |||
3744 | # Our strategy is to replace the type of the object with one that |
|
3750 | # Our strategy is to replace the type of the object with one that | |
3745 | # has all attribute lookups result in error. |
|
3751 | # has all attribute lookups result in error. | |
3746 | # |
|
3752 | # | |
3747 | # But we have to allow the close() method because some constructors |
|
3753 | # But we have to allow the close() method because some constructors | |
3748 | # of repos call close() on repo references. |
|
3754 | # of repos call close() on repo references. | |
3749 | class poisonedrepository(object): |
|
3755 | class poisonedrepository(object): | |
3750 | def __getattribute__(self, item): |
|
3756 | def __getattribute__(self, item): | |
3751 | if item == 'close': |
|
3757 | if item == 'close': | |
3752 | return object.__getattribute__(self, item) |
|
3758 | return object.__getattribute__(self, item) | |
3753 |
|
3759 | |||
3754 | raise error.ProgrammingError( |
|
3760 | raise error.ProgrammingError( | |
3755 | b'repo instances should not be used after unshare' |
|
3761 | b'repo instances should not be used after unshare' | |
3756 | ) |
|
3762 | ) | |
3757 |
|
3763 | |||
3758 | def close(self): |
|
3764 | def close(self): | |
3759 | pass |
|
3765 | pass | |
3760 |
|
3766 | |||
3761 | # We may have a repoview, which intercepts __setattr__. So be sure |
|
3767 | # We may have a repoview, which intercepts __setattr__. So be sure | |
3762 | # we operate at the lowest level possible. |
|
3768 | # we operate at the lowest level possible. | |
3763 | object.__setattr__(repo, '__class__', poisonedrepository) |
|
3769 | object.__setattr__(repo, '__class__', poisonedrepository) |
@@ -1,783 +1,787 b'' | |||||
1 | # posix.py - Posix utility function implementations for Mercurial |
|
1 | # posix.py - Posix utility function implementations for Mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2005-2009 Olivia Mackall <olivia@selenic.com> and others |
|
3 | # Copyright 2005-2009 Olivia Mackall <olivia@selenic.com> and others | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import errno |
|
10 | import errno | |
11 | import fcntl |
|
11 | import fcntl | |
12 | import getpass |
|
12 | import getpass | |
13 | import grp |
|
13 | import grp | |
14 | import os |
|
14 | import os | |
15 | import pwd |
|
15 | import pwd | |
16 | import re |
|
16 | import re | |
17 | import select |
|
17 | import select | |
18 | import stat |
|
18 | import stat | |
19 | import sys |
|
19 | import sys | |
20 | import tempfile |
|
20 | import tempfile | |
21 | import unicodedata |
|
21 | import unicodedata | |
22 |
|
22 | |||
23 | from .i18n import _ |
|
23 | from .i18n import _ | |
24 | from .pycompat import ( |
|
24 | from .pycompat import ( | |
25 | getattr, |
|
25 | getattr, | |
26 | open, |
|
26 | open, | |
27 | ) |
|
27 | ) | |
28 | from . import ( |
|
28 | from . import ( | |
29 | encoding, |
|
29 | encoding, | |
30 | error, |
|
30 | error, | |
31 | policy, |
|
31 | policy, | |
32 | pycompat, |
|
32 | pycompat, | |
33 | ) |
|
33 | ) | |
34 |
|
34 | |||
35 | osutil = policy.importmod('osutil') |
|
35 | osutil = policy.importmod('osutil') | |
36 |
|
36 | |||
37 | normpath = os.path.normpath |
|
37 | normpath = os.path.normpath | |
38 | samestat = os.path.samestat |
|
38 | samestat = os.path.samestat | |
39 | try: |
|
39 | try: | |
40 | oslink = os.link |
|
40 | oslink = os.link | |
41 | except AttributeError: |
|
41 | except AttributeError: | |
42 | # Some platforms build Python without os.link on systems that are |
|
42 | # Some platforms build Python without os.link on systems that are | |
43 | # vaguely unix-like but don't have hardlink support. For those |
|
43 | # vaguely unix-like but don't have hardlink support. For those | |
44 | # poor souls, just say we tried and that it failed so we fall back |
|
44 | # poor souls, just say we tried and that it failed so we fall back | |
45 | # to copies. |
|
45 | # to copies. | |
46 | def oslink(src, dst): |
|
46 | def oslink(src, dst): | |
47 | raise OSError( |
|
47 | raise OSError( | |
48 | errno.EINVAL, b'hardlinks not supported: %s to %s' % (src, dst) |
|
48 | errno.EINVAL, b'hardlinks not supported: %s to %s' % (src, dst) | |
49 | ) |
|
49 | ) | |
50 |
|
50 | |||
51 |
|
51 | |||
52 | readlink = os.readlink |
|
52 | readlink = os.readlink | |
53 | unlink = os.unlink |
|
53 | unlink = os.unlink | |
54 | rename = os.rename |
|
54 | rename = os.rename | |
55 | removedirs = os.removedirs |
|
55 | removedirs = os.removedirs | |
56 | expandglobs = False |
|
56 | expandglobs = False | |
57 |
|
57 | |||
58 | umask = os.umask(0) |
|
58 | umask = os.umask(0) | |
59 | os.umask(umask) |
|
59 | os.umask(umask) | |
60 |
|
60 | |||
61 | if not pycompat.ispy3: |
|
61 | if not pycompat.ispy3: | |
62 |
|
62 | |||
63 | def posixfile(name, mode='r', buffering=-1): |
|
63 | def posixfile(name, mode='r', buffering=-1): | |
64 | fp = open(name, mode=mode, buffering=buffering) |
|
64 | fp = open(name, mode=mode, buffering=buffering) | |
65 | # The position when opening in append mode is implementation defined, so |
|
65 | # The position when opening in append mode is implementation defined, so | |
66 | # make it consistent by always seeking to the end. |
|
66 | # make it consistent by always seeking to the end. | |
67 | if 'a' in mode: |
|
67 | if 'a' in mode: | |
68 | fp.seek(0, os.SEEK_END) |
|
68 | fp.seek(0, os.SEEK_END) | |
69 | return fp |
|
69 | return fp | |
70 |
|
70 | |||
71 |
|
71 | |||
72 | else: |
|
72 | else: | |
73 | # The underlying file object seeks as required in Python 3: |
|
73 | # The underlying file object seeks as required in Python 3: | |
74 | # https://github.com/python/cpython/blob/v3.7.3/Modules/_io/fileio.c#L474 |
|
74 | # https://github.com/python/cpython/blob/v3.7.3/Modules/_io/fileio.c#L474 | |
75 | posixfile = open |
|
75 | posixfile = open | |
76 |
|
76 | |||
77 |
|
77 | |||
78 | def split(p): |
|
78 | def split(p): | |
79 | """Same as posixpath.split, but faster |
|
79 | """Same as posixpath.split, but faster | |
80 |
|
80 | |||
81 | >>> import posixpath |
|
81 | >>> import posixpath | |
82 | >>> for f in [b'/absolute/path/to/file', |
|
82 | >>> for f in [b'/absolute/path/to/file', | |
83 | ... b'relative/path/to/file', |
|
83 | ... b'relative/path/to/file', | |
84 | ... b'file_alone', |
|
84 | ... b'file_alone', | |
85 | ... b'path/to/directory/', |
|
85 | ... b'path/to/directory/', | |
86 | ... b'/multiple/path//separators', |
|
86 | ... b'/multiple/path//separators', | |
87 | ... b'/file_at_root', |
|
87 | ... b'/file_at_root', | |
88 | ... b'///multiple_leading_separators_at_root', |
|
88 | ... b'///multiple_leading_separators_at_root', | |
89 | ... b'']: |
|
89 | ... b'']: | |
90 | ... assert split(f) == posixpath.split(f), f |
|
90 | ... assert split(f) == posixpath.split(f), f | |
91 | """ |
|
91 | """ | |
92 | ht = p.rsplit(b'/', 1) |
|
92 | ht = p.rsplit(b'/', 1) | |
93 | if len(ht) == 1: |
|
93 | if len(ht) == 1: | |
94 | return b'', p |
|
94 | return b'', p | |
95 | nh = ht[0].rstrip(b'/') |
|
95 | nh = ht[0].rstrip(b'/') | |
96 | if nh: |
|
96 | if nh: | |
97 | return nh, ht[1] |
|
97 | return nh, ht[1] | |
98 | return ht[0] + b'/', ht[1] |
|
98 | return ht[0] + b'/', ht[1] | |
99 |
|
99 | |||
100 |
|
100 | |||
101 | def openhardlinks(): |
|
101 | def openhardlinks(): | |
102 | '''return true if it is safe to hold open file handles to hardlinks''' |
|
102 | '''return true if it is safe to hold open file handles to hardlinks''' | |
103 | return True |
|
103 | return True | |
104 |
|
104 | |||
105 |
|
105 | |||
106 | def nlinks(name): |
|
106 | def nlinks(name): | |
107 | '''return number of hardlinks for the given file''' |
|
107 | '''return number of hardlinks for the given file''' | |
108 | return os.lstat(name).st_nlink |
|
108 | return os.lstat(name).st_nlink | |
109 |
|
109 | |||
110 |
|
110 | |||
111 | def parsepatchoutput(output_line): |
|
111 | def parsepatchoutput(output_line): | |
112 | """parses the output produced by patch and returns the filename""" |
|
112 | """parses the output produced by patch and returns the filename""" | |
113 | pf = output_line[14:] |
|
113 | pf = output_line[14:] | |
114 | if pycompat.sysplatform == b'OpenVMS': |
|
114 | if pycompat.sysplatform == b'OpenVMS': | |
115 | if pf[0] == b'`': |
|
115 | if pf[0] == b'`': | |
116 | pf = pf[1:-1] # Remove the quotes |
|
116 | pf = pf[1:-1] # Remove the quotes | |
117 | else: |
|
117 | else: | |
118 | if pf.startswith(b"'") and pf.endswith(b"'") and b" " in pf: |
|
118 | if pf.startswith(b"'") and pf.endswith(b"'") and b" " in pf: | |
119 | pf = pf[1:-1] # Remove the quotes |
|
119 | pf = pf[1:-1] # Remove the quotes | |
120 | return pf |
|
120 | return pf | |
121 |
|
121 | |||
122 |
|
122 | |||
123 | def sshargs(sshcmd, host, user, port): |
|
123 | def sshargs(sshcmd, host, user, port): | |
124 | '''Build argument list for ssh''' |
|
124 | '''Build argument list for ssh''' | |
125 | args = user and (b"%s@%s" % (user, host)) or host |
|
125 | args = user and (b"%s@%s" % (user, host)) or host | |
126 | if b'-' in args[:1]: |
|
126 | if b'-' in args[:1]: | |
127 | raise error.Abort( |
|
127 | raise error.Abort( | |
128 | _(b'illegal ssh hostname or username starting with -: %s') % args |
|
128 | _(b'illegal ssh hostname or username starting with -: %s') % args | |
129 | ) |
|
129 | ) | |
130 | args = shellquote(args) |
|
130 | args = shellquote(args) | |
131 | if port: |
|
131 | if port: | |
132 | args = b'-p %s %s' % (shellquote(port), args) |
|
132 | args = b'-p %s %s' % (shellquote(port), args) | |
133 | return args |
|
133 | return args | |
134 |
|
134 | |||
135 |
|
135 | |||
136 | def isexec(f): |
|
136 | def isexec(f): | |
137 | """check whether a file is executable""" |
|
137 | """check whether a file is executable""" | |
138 | return os.lstat(f).st_mode & 0o100 != 0 |
|
138 | return os.lstat(f).st_mode & 0o100 != 0 | |
139 |
|
139 | |||
140 |
|
140 | |||
141 | def setflags(f, l, x): |
|
141 | def setflags(f, l, x): | |
142 | st = os.lstat(f) |
|
142 | st = os.lstat(f) | |
143 | s = st.st_mode |
|
143 | s = st.st_mode | |
144 | if l: |
|
144 | if l: | |
145 | if not stat.S_ISLNK(s): |
|
145 | if not stat.S_ISLNK(s): | |
146 | # switch file to link |
|
146 | # switch file to link | |
147 | with open(f, b'rb') as fp: |
|
147 | with open(f, b'rb') as fp: | |
148 | data = fp.read() |
|
148 | data = fp.read() | |
149 | unlink(f) |
|
149 | unlink(f) | |
150 | try: |
|
150 | try: | |
151 | os.symlink(data, f) |
|
151 | os.symlink(data, f) | |
152 | except OSError: |
|
152 | except OSError: | |
153 | # failed to make a link, rewrite file |
|
153 | # failed to make a link, rewrite file | |
154 | with open(f, b"wb") as fp: |
|
154 | with open(f, b"wb") as fp: | |
155 | fp.write(data) |
|
155 | fp.write(data) | |
156 |
|
156 | |||
157 | # no chmod needed at this point |
|
157 | # no chmod needed at this point | |
158 | return |
|
158 | return | |
159 | if stat.S_ISLNK(s): |
|
159 | if stat.S_ISLNK(s): | |
160 | # switch link to file |
|
160 | # switch link to file | |
161 | data = os.readlink(f) |
|
161 | data = os.readlink(f) | |
162 | unlink(f) |
|
162 | unlink(f) | |
163 | with open(f, b"wb") as fp: |
|
163 | with open(f, b"wb") as fp: | |
164 | fp.write(data) |
|
164 | fp.write(data) | |
165 | s = 0o666 & ~umask # avoid restatting for chmod |
|
165 | s = 0o666 & ~umask # avoid restatting for chmod | |
166 |
|
166 | |||
167 | sx = s & 0o100 |
|
167 | sx = s & 0o100 | |
168 | if st.st_nlink > 1 and bool(x) != bool(sx): |
|
168 | if st.st_nlink > 1 and bool(x) != bool(sx): | |
169 | # the file is a hardlink, break it |
|
169 | # the file is a hardlink, break it | |
170 | with open(f, b"rb") as fp: |
|
170 | with open(f, b"rb") as fp: | |
171 | data = fp.read() |
|
171 | data = fp.read() | |
172 | unlink(f) |
|
172 | unlink(f) | |
173 | with open(f, b"wb") as fp: |
|
173 | with open(f, b"wb") as fp: | |
174 | fp.write(data) |
|
174 | fp.write(data) | |
175 |
|
175 | |||
176 | if x and not sx: |
|
176 | if x and not sx: | |
177 | # Turn on +x for every +r bit when making a file executable |
|
177 | # Turn on +x for every +r bit when making a file executable | |
178 | # and obey umask. |
|
178 | # and obey umask. | |
179 | os.chmod(f, s | (s & 0o444) >> 2 & ~umask) |
|
179 | os.chmod(f, s | (s & 0o444) >> 2 & ~umask) | |
180 | elif not x and sx: |
|
180 | elif not x and sx: | |
181 | # Turn off all +x bits |
|
181 | # Turn off all +x bits | |
182 | os.chmod(f, s & 0o666) |
|
182 | os.chmod(f, s & 0o666) | |
183 |
|
183 | |||
184 |
|
184 | |||
185 | def copymode(src, dst, mode=None, enforcewritable=False): |
|
185 | def copymode(src, dst, mode=None, enforcewritable=False): | |
186 | """Copy the file mode from the file at path src to dst. |
|
186 | """Copy the file mode from the file at path src to dst. | |
187 | If src doesn't exist, we're using mode instead. If mode is None, we're |
|
187 | If src doesn't exist, we're using mode instead. If mode is None, we're | |
188 | using umask.""" |
|
188 | using umask.""" | |
189 | try: |
|
189 | try: | |
190 | st_mode = os.lstat(src).st_mode & 0o777 |
|
190 | st_mode = os.lstat(src).st_mode & 0o777 | |
191 | except OSError as inst: |
|
191 | except OSError as inst: | |
192 | if inst.errno != errno.ENOENT: |
|
192 | if inst.errno != errno.ENOENT: | |
193 | raise |
|
193 | raise | |
194 | st_mode = mode |
|
194 | st_mode = mode | |
195 | if st_mode is None: |
|
195 | if st_mode is None: | |
196 | st_mode = ~umask |
|
196 | st_mode = ~umask | |
197 | st_mode &= 0o666 |
|
197 | st_mode &= 0o666 | |
198 |
|
198 | |||
199 | new_mode = st_mode |
|
199 | new_mode = st_mode | |
200 |
|
200 | |||
201 | if enforcewritable: |
|
201 | if enforcewritable: | |
202 | new_mode |= stat.S_IWUSR |
|
202 | new_mode |= stat.S_IWUSR | |
203 |
|
203 | |||
204 | os.chmod(dst, new_mode) |
|
204 | os.chmod(dst, new_mode) | |
205 |
|
205 | |||
206 |
|
206 | |||
207 | def checkexec(path): |
|
207 | def checkexec(path): | |
208 | """ |
|
208 | """ | |
209 | Check whether the given path is on a filesystem with UNIX-like exec flags |
|
209 | Check whether the given path is on a filesystem with UNIX-like exec flags | |
210 |
|
210 | |||
211 | Requires a directory (like /foo/.hg) |
|
211 | Requires a directory (like /foo/.hg) | |
212 | """ |
|
212 | """ | |
213 |
|
213 | |||
214 | # VFAT on some Linux versions can flip mode but it doesn't persist |
|
214 | # VFAT on some Linux versions can flip mode but it doesn't persist | |
215 | # a FS remount. Frequently we can detect it if files are created |
|
215 | # a FS remount. Frequently we can detect it if files are created | |
216 | # with exec bit on. |
|
216 | # with exec bit on. | |
217 |
|
217 | |||
218 | try: |
|
218 | try: | |
219 | EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH |
|
219 | EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH | |
220 | basedir = os.path.join(path, b'.hg') |
|
220 | basedir = os.path.join(path, b'.hg') | |
221 | cachedir = os.path.join(basedir, b'wcache') |
|
221 | cachedir = os.path.join(basedir, b'wcache') | |
222 | storedir = os.path.join(basedir, b'store') |
|
222 | storedir = os.path.join(basedir, b'store') | |
223 | if not os.path.exists(cachedir): |
|
223 | if not os.path.exists(cachedir): | |
224 | try: |
|
224 | try: | |
225 | # we want to create the 'cache' directory, not the '.hg' one. |
|
225 | # we want to create the 'cache' directory, not the '.hg' one. | |
226 | # Automatically creating '.hg' directory could silently spawn |
|
226 | # Automatically creating '.hg' directory could silently spawn | |
227 | # invalid Mercurial repositories. That seems like a bad idea. |
|
227 | # invalid Mercurial repositories. That seems like a bad idea. | |
228 | os.mkdir(cachedir) |
|
228 | os.mkdir(cachedir) | |
229 | if os.path.exists(storedir): |
|
229 | if os.path.exists(storedir): | |
230 | copymode(storedir, cachedir) |
|
230 | copymode(storedir, cachedir) | |
231 | else: |
|
231 | else: | |
232 | copymode(basedir, cachedir) |
|
232 | copymode(basedir, cachedir) | |
233 | except (IOError, OSError): |
|
233 | except (IOError, OSError): | |
234 | # we other fallback logic triggers |
|
234 | # we other fallback logic triggers | |
235 | pass |
|
235 | pass | |
236 | if os.path.isdir(cachedir): |
|
236 | if os.path.isdir(cachedir): | |
237 | checkisexec = os.path.join(cachedir, b'checkisexec') |
|
237 | checkisexec = os.path.join(cachedir, b'checkisexec') | |
238 | checknoexec = os.path.join(cachedir, b'checknoexec') |
|
238 | checknoexec = os.path.join(cachedir, b'checknoexec') | |
239 |
|
239 | |||
240 | try: |
|
240 | try: | |
241 | m = os.stat(checkisexec).st_mode |
|
241 | m = os.stat(checkisexec).st_mode | |
242 | except OSError as e: |
|
242 | except OSError as e: | |
243 | if e.errno != errno.ENOENT: |
|
243 | if e.errno != errno.ENOENT: | |
244 | raise |
|
244 | raise | |
245 | # checkisexec does not exist - fall through ... |
|
245 | # checkisexec does not exist - fall through ... | |
246 | else: |
|
246 | else: | |
247 | # checkisexec exists, check if it actually is exec |
|
247 | # checkisexec exists, check if it actually is exec | |
248 | if m & EXECFLAGS != 0: |
|
248 | if m & EXECFLAGS != 0: | |
249 | # ensure checkisexec exists, check it isn't exec |
|
249 | # ensure checkisexec exists, check it isn't exec | |
250 | try: |
|
250 | try: | |
251 | m = os.stat(checknoexec).st_mode |
|
251 | m = os.stat(checknoexec).st_mode | |
252 | except OSError as e: |
|
252 | except OSError as e: | |
253 | if e.errno != errno.ENOENT: |
|
253 | if e.errno != errno.ENOENT: | |
254 | raise |
|
254 | raise | |
255 | open(checknoexec, b'w').close() # might fail |
|
255 | open(checknoexec, b'w').close() # might fail | |
256 | m = os.stat(checknoexec).st_mode |
|
256 | m = os.stat(checknoexec).st_mode | |
257 | if m & EXECFLAGS == 0: |
|
257 | if m & EXECFLAGS == 0: | |
258 | # check-exec is exec and check-no-exec is not exec |
|
258 | # check-exec is exec and check-no-exec is not exec | |
259 | return True |
|
259 | return True | |
260 | # checknoexec exists but is exec - delete it |
|
260 | # checknoexec exists but is exec - delete it | |
261 | unlink(checknoexec) |
|
261 | unlink(checknoexec) | |
262 | # checkisexec exists but is not exec - delete it |
|
262 | # checkisexec exists but is not exec - delete it | |
263 | unlink(checkisexec) |
|
263 | unlink(checkisexec) | |
264 |
|
264 | |||
265 | # check using one file, leave it as checkisexec |
|
265 | # check using one file, leave it as checkisexec | |
266 | checkdir = cachedir |
|
266 | checkdir = cachedir | |
267 | else: |
|
267 | else: | |
268 | # check directly in path and don't leave checkisexec behind |
|
268 | # check directly in path and don't leave checkisexec behind | |
269 | checkdir = path |
|
269 | checkdir = path | |
270 | checkisexec = None |
|
270 | checkisexec = None | |
271 | fh, fn = pycompat.mkstemp(dir=checkdir, prefix=b'hg-checkexec-') |
|
271 | fh, fn = pycompat.mkstemp(dir=checkdir, prefix=b'hg-checkexec-') | |
272 | try: |
|
272 | try: | |
273 | os.close(fh) |
|
273 | os.close(fh) | |
274 | m = os.stat(fn).st_mode |
|
274 | m = os.stat(fn).st_mode | |
275 | if m & EXECFLAGS == 0: |
|
275 | if m & EXECFLAGS == 0: | |
276 | os.chmod(fn, m & 0o777 | EXECFLAGS) |
|
276 | os.chmod(fn, m & 0o777 | EXECFLAGS) | |
277 | if os.stat(fn).st_mode & EXECFLAGS != 0: |
|
277 | if os.stat(fn).st_mode & EXECFLAGS != 0: | |
278 | if checkisexec is not None: |
|
278 | if checkisexec is not None: | |
279 | os.rename(fn, checkisexec) |
|
279 | os.rename(fn, checkisexec) | |
280 | fn = None |
|
280 | fn = None | |
281 | return True |
|
281 | return True | |
282 | finally: |
|
282 | finally: | |
283 | if fn is not None: |
|
283 | if fn is not None: | |
284 | unlink(fn) |
|
284 | unlink(fn) | |
285 | except (IOError, OSError): |
|
285 | except (IOError, OSError): | |
286 | # we don't care, the user probably won't be able to commit anyway |
|
286 | # we don't care, the user probably won't be able to commit anyway | |
287 | return False |
|
287 | return False | |
288 |
|
288 | |||
289 |
|
289 | |||
290 | def checklink(path): |
|
290 | def checklink(path): | |
291 | """check whether the given path is on a symlink-capable filesystem""" |
|
291 | """check whether the given path is on a symlink-capable filesystem""" | |
292 | # mktemp is not racy because symlink creation will fail if the |
|
292 | # mktemp is not racy because symlink creation will fail if the | |
293 | # file already exists |
|
293 | # file already exists | |
294 | while True: |
|
294 | while True: | |
295 | cachedir = os.path.join(path, b'.hg', b'wcache') |
|
295 | cachedir = os.path.join(path, b'.hg', b'wcache') | |
296 | checklink = os.path.join(cachedir, b'checklink') |
|
296 | checklink = os.path.join(cachedir, b'checklink') | |
297 | # try fast path, read only |
|
297 | # try fast path, read only | |
298 | if os.path.islink(checklink): |
|
298 | if os.path.islink(checklink): | |
299 | return True |
|
299 | return True | |
300 | if os.path.isdir(cachedir): |
|
300 | if os.path.isdir(cachedir): | |
301 | checkdir = cachedir |
|
301 | checkdir = cachedir | |
302 | else: |
|
302 | else: | |
303 | checkdir = path |
|
303 | checkdir = path | |
304 | cachedir = None |
|
304 | cachedir = None | |
305 | name = tempfile.mktemp( |
|
305 | name = tempfile.mktemp( | |
306 | dir=pycompat.fsdecode(checkdir), prefix=r'checklink-' |
|
306 | dir=pycompat.fsdecode(checkdir), prefix=r'checklink-' | |
307 | ) |
|
307 | ) | |
308 | name = pycompat.fsencode(name) |
|
308 | name = pycompat.fsencode(name) | |
309 | try: |
|
309 | try: | |
310 | fd = None |
|
310 | fd = None | |
311 | if cachedir is None: |
|
311 | if cachedir is None: | |
312 | fd = pycompat.namedtempfile( |
|
312 | fd = pycompat.namedtempfile( | |
313 | dir=checkdir, prefix=b'hg-checklink-' |
|
313 | dir=checkdir, prefix=b'hg-checklink-' | |
314 | ) |
|
314 | ) | |
315 | target = os.path.basename(fd.name) |
|
315 | target = os.path.basename(fd.name) | |
316 | else: |
|
316 | else: | |
317 | # create a fixed file to link to; doesn't matter if it |
|
317 | # create a fixed file to link to; doesn't matter if it | |
318 | # already exists. |
|
318 | # already exists. | |
319 | target = b'checklink-target' |
|
319 | target = b'checklink-target' | |
320 | try: |
|
320 | try: | |
321 | fullpath = os.path.join(cachedir, target) |
|
321 | fullpath = os.path.join(cachedir, target) | |
322 | open(fullpath, b'w').close() |
|
322 | open(fullpath, b'w').close() | |
323 | except IOError as inst: |
|
323 | except IOError as inst: | |
324 | # pytype: disable=unsupported-operands |
|
324 | # pytype: disable=unsupported-operands | |
325 | if inst[0] == errno.EACCES: |
|
325 | if inst[0] == errno.EACCES: | |
326 | # pytype: enable=unsupported-operands |
|
326 | # pytype: enable=unsupported-operands | |
327 |
|
327 | |||
328 | # If we can't write to cachedir, just pretend |
|
328 | # If we can't write to cachedir, just pretend | |
329 | # that the fs is readonly and by association |
|
329 | # that the fs is readonly and by association | |
330 | # that the fs won't support symlinks. This |
|
330 | # that the fs won't support symlinks. This | |
331 | # seems like the least dangerous way to avoid |
|
331 | # seems like the least dangerous way to avoid | |
332 | # data loss. |
|
332 | # data loss. | |
333 | return False |
|
333 | return False | |
334 | raise |
|
334 | raise | |
335 | try: |
|
335 | try: | |
336 | os.symlink(target, name) |
|
336 | os.symlink(target, name) | |
337 | if cachedir is None: |
|
337 | if cachedir is None: | |
338 | unlink(name) |
|
338 | unlink(name) | |
339 | else: |
|
339 | else: | |
340 | try: |
|
340 | try: | |
341 | os.rename(name, checklink) |
|
341 | os.rename(name, checklink) | |
342 | except OSError: |
|
342 | except OSError: | |
343 | unlink(name) |
|
343 | unlink(name) | |
344 | return True |
|
344 | return True | |
345 | except OSError as inst: |
|
345 | except OSError as inst: | |
346 | # link creation might race, try again |
|
346 | # link creation might race, try again | |
347 | if inst.errno == errno.EEXIST: |
|
347 | if inst.errno == errno.EEXIST: | |
348 | continue |
|
348 | continue | |
349 | raise |
|
349 | raise | |
350 | finally: |
|
350 | finally: | |
351 | if fd is not None: |
|
351 | if fd is not None: | |
352 | fd.close() |
|
352 | fd.close() | |
353 | except AttributeError: |
|
353 | except AttributeError: | |
354 | return False |
|
354 | return False | |
355 | except OSError as inst: |
|
355 | except OSError as inst: | |
356 | # sshfs might report failure while successfully creating the link |
|
356 | # sshfs might report failure while successfully creating the link | |
357 | if inst.errno == errno.EIO and os.path.exists(name): |
|
357 | if inst.errno == errno.EIO and os.path.exists(name): | |
358 | unlink(name) |
|
358 | unlink(name) | |
359 | return False |
|
359 | return False | |
360 |
|
360 | |||
361 |
|
361 | |||
362 | def checkosfilename(path): |
|
362 | def checkosfilename(path): | |
363 | """Check that the base-relative path is a valid filename on this platform. |
|
363 | """Check that the base-relative path is a valid filename on this platform. | |
364 | Returns None if the path is ok, or a UI string describing the problem.""" |
|
364 | Returns None if the path is ok, or a UI string describing the problem.""" | |
365 | return None # on posix platforms, every path is ok |
|
365 | return None # on posix platforms, every path is ok | |
366 |
|
366 | |||
367 |
|
367 | |||
368 | def getfsmountpoint(dirpath): |
|
368 | def getfsmountpoint(dirpath): | |
369 | """Get the filesystem mount point from a directory (best-effort) |
|
369 | """Get the filesystem mount point from a directory (best-effort) | |
370 |
|
370 | |||
371 | Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc. |
|
371 | Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc. | |
372 | """ |
|
372 | """ | |
373 | return getattr(osutil, 'getfsmountpoint', lambda x: None)(dirpath) |
|
373 | return getattr(osutil, 'getfsmountpoint', lambda x: None)(dirpath) | |
374 |
|
374 | |||
375 |
|
375 | |||
376 | def getfstype(dirpath): |
|
376 | def getfstype(dirpath): | |
377 | """Get the filesystem type name from a directory (best-effort) |
|
377 | """Get the filesystem type name from a directory (best-effort) | |
378 |
|
378 | |||
379 | Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc. |
|
379 | Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc. | |
380 | """ |
|
380 | """ | |
381 | return getattr(osutil, 'getfstype', lambda x: None)(dirpath) |
|
381 | return getattr(osutil, 'getfstype', lambda x: None)(dirpath) | |
382 |
|
382 | |||
383 |
|
383 | |||
|
384 | def get_password(): | |||
|
385 | return encoding.strtolocal(getpass.getpass('')) | |||
|
386 | ||||
|
387 | ||||
384 | def setbinary(fd): |
|
388 | def setbinary(fd): | |
385 | pass |
|
389 | pass | |
386 |
|
390 | |||
387 |
|
391 | |||
388 | def pconvert(path): |
|
392 | def pconvert(path): | |
389 | return path |
|
393 | return path | |
390 |
|
394 | |||
391 |
|
395 | |||
392 | def localpath(path): |
|
396 | def localpath(path): | |
393 | return path |
|
397 | return path | |
394 |
|
398 | |||
395 |
|
399 | |||
396 | def samefile(fpath1, fpath2): |
|
400 | def samefile(fpath1, fpath2): | |
397 | """Returns whether path1 and path2 refer to the same file. This is only |
|
401 | """Returns whether path1 and path2 refer to the same file. This is only | |
398 | guaranteed to work for files, not directories.""" |
|
402 | guaranteed to work for files, not directories.""" | |
399 | return os.path.samefile(fpath1, fpath2) |
|
403 | return os.path.samefile(fpath1, fpath2) | |
400 |
|
404 | |||
401 |
|
405 | |||
402 | def samedevice(fpath1, fpath2): |
|
406 | def samedevice(fpath1, fpath2): | |
403 | """Returns whether fpath1 and fpath2 are on the same device. This is only |
|
407 | """Returns whether fpath1 and fpath2 are on the same device. This is only | |
404 | guaranteed to work for files, not directories.""" |
|
408 | guaranteed to work for files, not directories.""" | |
405 | st1 = os.lstat(fpath1) |
|
409 | st1 = os.lstat(fpath1) | |
406 | st2 = os.lstat(fpath2) |
|
410 | st2 = os.lstat(fpath2) | |
407 | return st1.st_dev == st2.st_dev |
|
411 | return st1.st_dev == st2.st_dev | |
408 |
|
412 | |||
409 |
|
413 | |||
410 | # os.path.normcase is a no-op, which doesn't help us on non-native filesystems |
|
414 | # os.path.normcase is a no-op, which doesn't help us on non-native filesystems | |
411 | def normcase(path): |
|
415 | def normcase(path): | |
412 | return path.lower() |
|
416 | return path.lower() | |
413 |
|
417 | |||
414 |
|
418 | |||
415 | # what normcase does to ASCII strings |
|
419 | # what normcase does to ASCII strings | |
416 | normcasespec = encoding.normcasespecs.lower |
|
420 | normcasespec = encoding.normcasespecs.lower | |
417 | # fallback normcase function for non-ASCII strings |
|
421 | # fallback normcase function for non-ASCII strings | |
418 | normcasefallback = normcase |
|
422 | normcasefallback = normcase | |
419 |
|
423 | |||
420 | if pycompat.isdarwin: |
|
424 | if pycompat.isdarwin: | |
421 |
|
425 | |||
422 | def normcase(path): |
|
426 | def normcase(path): | |
423 | """ |
|
427 | """ | |
424 | Normalize a filename for OS X-compatible comparison: |
|
428 | Normalize a filename for OS X-compatible comparison: | |
425 | - escape-encode invalid characters |
|
429 | - escape-encode invalid characters | |
426 | - decompose to NFD |
|
430 | - decompose to NFD | |
427 | - lowercase |
|
431 | - lowercase | |
428 | - omit ignored characters [200c-200f, 202a-202e, 206a-206f,feff] |
|
432 | - omit ignored characters [200c-200f, 202a-202e, 206a-206f,feff] | |
429 |
|
433 | |||
430 | >>> normcase(b'UPPER') |
|
434 | >>> normcase(b'UPPER') | |
431 | 'upper' |
|
435 | 'upper' | |
432 | >>> normcase(b'Caf\\xc3\\xa9') |
|
436 | >>> normcase(b'Caf\\xc3\\xa9') | |
433 | 'cafe\\xcc\\x81' |
|
437 | 'cafe\\xcc\\x81' | |
434 | >>> normcase(b'\\xc3\\x89') |
|
438 | >>> normcase(b'\\xc3\\x89') | |
435 | 'e\\xcc\\x81' |
|
439 | 'e\\xcc\\x81' | |
436 | >>> normcase(b'\\xb8\\xca\\xc3\\xca\\xbe\\xc8.JPG') # issue3918 |
|
440 | >>> normcase(b'\\xb8\\xca\\xc3\\xca\\xbe\\xc8.JPG') # issue3918 | |
437 | '%b8%ca%c3\\xca\\xbe%c8.jpg' |
|
441 | '%b8%ca%c3\\xca\\xbe%c8.jpg' | |
438 | """ |
|
442 | """ | |
439 |
|
443 | |||
440 | try: |
|
444 | try: | |
441 | return encoding.asciilower(path) # exception for non-ASCII |
|
445 | return encoding.asciilower(path) # exception for non-ASCII | |
442 | except UnicodeDecodeError: |
|
446 | except UnicodeDecodeError: | |
443 | return normcasefallback(path) |
|
447 | return normcasefallback(path) | |
444 |
|
448 | |||
445 | normcasespec = encoding.normcasespecs.lower |
|
449 | normcasespec = encoding.normcasespecs.lower | |
446 |
|
450 | |||
447 | def normcasefallback(path): |
|
451 | def normcasefallback(path): | |
448 | try: |
|
452 | try: | |
449 | u = path.decode('utf-8') |
|
453 | u = path.decode('utf-8') | |
450 | except UnicodeDecodeError: |
|
454 | except UnicodeDecodeError: | |
451 | # OS X percent-encodes any bytes that aren't valid utf-8 |
|
455 | # OS X percent-encodes any bytes that aren't valid utf-8 | |
452 | s = b'' |
|
456 | s = b'' | |
453 | pos = 0 |
|
457 | pos = 0 | |
454 | l = len(path) |
|
458 | l = len(path) | |
455 | while pos < l: |
|
459 | while pos < l: | |
456 | try: |
|
460 | try: | |
457 | c = encoding.getutf8char(path, pos) |
|
461 | c = encoding.getutf8char(path, pos) | |
458 | pos += len(c) |
|
462 | pos += len(c) | |
459 | except ValueError: |
|
463 | except ValueError: | |
460 | c = b'%%%02X' % ord(path[pos : pos + 1]) |
|
464 | c = b'%%%02X' % ord(path[pos : pos + 1]) | |
461 | pos += 1 |
|
465 | pos += 1 | |
462 | s += c |
|
466 | s += c | |
463 |
|
467 | |||
464 | u = s.decode('utf-8') |
|
468 | u = s.decode('utf-8') | |
465 |
|
469 | |||
466 | # Decompose then lowercase (HFS+ technote specifies lower) |
|
470 | # Decompose then lowercase (HFS+ technote specifies lower) | |
467 | enc = unicodedata.normalize('NFD', u).lower().encode('utf-8') |
|
471 | enc = unicodedata.normalize('NFD', u).lower().encode('utf-8') | |
468 | # drop HFS+ ignored characters |
|
472 | # drop HFS+ ignored characters | |
469 | return encoding.hfsignoreclean(enc) |
|
473 | return encoding.hfsignoreclean(enc) | |
470 |
|
474 | |||
471 |
|
475 | |||
472 | if pycompat.sysplatform == b'cygwin': |
|
476 | if pycompat.sysplatform == b'cygwin': | |
473 | # workaround for cygwin, in which mount point part of path is |
|
477 | # workaround for cygwin, in which mount point part of path is | |
474 | # treated as case sensitive, even though underlying NTFS is case |
|
478 | # treated as case sensitive, even though underlying NTFS is case | |
475 | # insensitive. |
|
479 | # insensitive. | |
476 |
|
480 | |||
477 | # default mount points |
|
481 | # default mount points | |
478 | cygwinmountpoints = sorted( |
|
482 | cygwinmountpoints = sorted( | |
479 | [ |
|
483 | [ | |
480 | b"/usr/bin", |
|
484 | b"/usr/bin", | |
481 | b"/usr/lib", |
|
485 | b"/usr/lib", | |
482 | b"/cygdrive", |
|
486 | b"/cygdrive", | |
483 | ], |
|
487 | ], | |
484 | reverse=True, |
|
488 | reverse=True, | |
485 | ) |
|
489 | ) | |
486 |
|
490 | |||
487 | # use upper-ing as normcase as same as NTFS workaround |
|
491 | # use upper-ing as normcase as same as NTFS workaround | |
488 | def normcase(path): |
|
492 | def normcase(path): | |
489 | pathlen = len(path) |
|
493 | pathlen = len(path) | |
490 | if (pathlen == 0) or (path[0] != pycompat.ossep): |
|
494 | if (pathlen == 0) or (path[0] != pycompat.ossep): | |
491 | # treat as relative |
|
495 | # treat as relative | |
492 | return encoding.upper(path) |
|
496 | return encoding.upper(path) | |
493 |
|
497 | |||
494 | # to preserve case of mountpoint part |
|
498 | # to preserve case of mountpoint part | |
495 | for mp in cygwinmountpoints: |
|
499 | for mp in cygwinmountpoints: | |
496 | if not path.startswith(mp): |
|
500 | if not path.startswith(mp): | |
497 | continue |
|
501 | continue | |
498 |
|
502 | |||
499 | mplen = len(mp) |
|
503 | mplen = len(mp) | |
500 | if mplen == pathlen: # mount point itself |
|
504 | if mplen == pathlen: # mount point itself | |
501 | return mp |
|
505 | return mp | |
502 | if path[mplen] == pycompat.ossep: |
|
506 | if path[mplen] == pycompat.ossep: | |
503 | return mp + encoding.upper(path[mplen:]) |
|
507 | return mp + encoding.upper(path[mplen:]) | |
504 |
|
508 | |||
505 | return encoding.upper(path) |
|
509 | return encoding.upper(path) | |
506 |
|
510 | |||
507 | normcasespec = encoding.normcasespecs.other |
|
511 | normcasespec = encoding.normcasespecs.other | |
508 | normcasefallback = normcase |
|
512 | normcasefallback = normcase | |
509 |
|
513 | |||
510 | # Cygwin translates native ACLs to POSIX permissions, |
|
514 | # Cygwin translates native ACLs to POSIX permissions, | |
511 | # but these translations are not supported by native |
|
515 | # but these translations are not supported by native | |
512 | # tools, so the exec bit tends to be set erroneously. |
|
516 | # tools, so the exec bit tends to be set erroneously. | |
513 | # Therefore, disable executable bit access on Cygwin. |
|
517 | # Therefore, disable executable bit access on Cygwin. | |
514 | def checkexec(path): |
|
518 | def checkexec(path): | |
515 | return False |
|
519 | return False | |
516 |
|
520 | |||
517 | # Similarly, Cygwin's symlink emulation is likely to create |
|
521 | # Similarly, Cygwin's symlink emulation is likely to create | |
518 | # problems when Mercurial is used from both Cygwin and native |
|
522 | # problems when Mercurial is used from both Cygwin and native | |
519 | # Windows, with other native tools, or on shared volumes |
|
523 | # Windows, with other native tools, or on shared volumes | |
520 | def checklink(path): |
|
524 | def checklink(path): | |
521 | return False |
|
525 | return False | |
522 |
|
526 | |||
523 |
|
527 | |||
524 | _needsshellquote = None |
|
528 | _needsshellquote = None | |
525 |
|
529 | |||
526 |
|
530 | |||
527 | def shellquote(s): |
|
531 | def shellquote(s): | |
528 | if pycompat.sysplatform == b'OpenVMS': |
|
532 | if pycompat.sysplatform == b'OpenVMS': | |
529 | return b'"%s"' % s |
|
533 | return b'"%s"' % s | |
530 | global _needsshellquote |
|
534 | global _needsshellquote | |
531 | if _needsshellquote is None: |
|
535 | if _needsshellquote is None: | |
532 | _needsshellquote = re.compile(br'[^a-zA-Z0-9._/+-]').search |
|
536 | _needsshellquote = re.compile(br'[^a-zA-Z0-9._/+-]').search | |
533 | if s and not _needsshellquote(s): |
|
537 | if s and not _needsshellquote(s): | |
534 | # "s" shouldn't have to be quoted |
|
538 | # "s" shouldn't have to be quoted | |
535 | return s |
|
539 | return s | |
536 | else: |
|
540 | else: | |
537 | return b"'%s'" % s.replace(b"'", b"'\\''") |
|
541 | return b"'%s'" % s.replace(b"'", b"'\\''") | |
538 |
|
542 | |||
539 |
|
543 | |||
540 | def shellsplit(s): |
|
544 | def shellsplit(s): | |
541 | """Parse a command string in POSIX shell way (best-effort)""" |
|
545 | """Parse a command string in POSIX shell way (best-effort)""" | |
542 | return pycompat.shlexsplit(s, posix=True) |
|
546 | return pycompat.shlexsplit(s, posix=True) | |
543 |
|
547 | |||
544 |
|
548 | |||
545 | def testpid(pid): |
|
549 | def testpid(pid): | |
546 | '''return False if pid dead, True if running or not sure''' |
|
550 | '''return False if pid dead, True if running or not sure''' | |
547 | if pycompat.sysplatform == b'OpenVMS': |
|
551 | if pycompat.sysplatform == b'OpenVMS': | |
548 | return True |
|
552 | return True | |
549 | try: |
|
553 | try: | |
550 | os.kill(pid, 0) |
|
554 | os.kill(pid, 0) | |
551 | return True |
|
555 | return True | |
552 | except OSError as inst: |
|
556 | except OSError as inst: | |
553 | return inst.errno != errno.ESRCH |
|
557 | return inst.errno != errno.ESRCH | |
554 |
|
558 | |||
555 |
|
559 | |||
556 | def isowner(st): |
|
560 | def isowner(st): | |
557 | """Return True if the stat object st is from the current user.""" |
|
561 | """Return True if the stat object st is from the current user.""" | |
558 | return st.st_uid == os.getuid() |
|
562 | return st.st_uid == os.getuid() | |
559 |
|
563 | |||
560 |
|
564 | |||
561 | def findexe(command): |
|
565 | def findexe(command): | |
562 | """Find executable for command searching like which does. |
|
566 | """Find executable for command searching like which does. | |
563 | If command is a basename then PATH is searched for command. |
|
567 | If command is a basename then PATH is searched for command. | |
564 | PATH isn't searched if command is an absolute or relative path. |
|
568 | PATH isn't searched if command is an absolute or relative path. | |
565 | If command isn't found None is returned.""" |
|
569 | If command isn't found None is returned.""" | |
566 | if pycompat.sysplatform == b'OpenVMS': |
|
570 | if pycompat.sysplatform == b'OpenVMS': | |
567 | return command |
|
571 | return command | |
568 |
|
572 | |||
569 | def findexisting(executable): |
|
573 | def findexisting(executable): | |
570 | b'Will return executable if existing file' |
|
574 | b'Will return executable if existing file' | |
571 | if os.path.isfile(executable) and os.access(executable, os.X_OK): |
|
575 | if os.path.isfile(executable) and os.access(executable, os.X_OK): | |
572 | return executable |
|
576 | return executable | |
573 | return None |
|
577 | return None | |
574 |
|
578 | |||
575 | if pycompat.ossep in command: |
|
579 | if pycompat.ossep in command: | |
576 | return findexisting(command) |
|
580 | return findexisting(command) | |
577 |
|
581 | |||
578 | if pycompat.sysplatform == b'plan9': |
|
582 | if pycompat.sysplatform == b'plan9': | |
579 | return findexisting(os.path.join(b'/bin', command)) |
|
583 | return findexisting(os.path.join(b'/bin', command)) | |
580 |
|
584 | |||
581 | for path in encoding.environ.get(b'PATH', b'').split(pycompat.ospathsep): |
|
585 | for path in encoding.environ.get(b'PATH', b'').split(pycompat.ospathsep): | |
582 | executable = findexisting(os.path.join(path, command)) |
|
586 | executable = findexisting(os.path.join(path, command)) | |
583 | if executable is not None: |
|
587 | if executable is not None: | |
584 | return executable |
|
588 | return executable | |
585 | return None |
|
589 | return None | |
586 |
|
590 | |||
587 |
|
591 | |||
588 | def setsignalhandler(): |
|
592 | def setsignalhandler(): | |
589 | pass |
|
593 | pass | |
590 |
|
594 | |||
591 |
|
595 | |||
592 | _wantedkinds = {stat.S_IFREG, stat.S_IFLNK} |
|
596 | _wantedkinds = {stat.S_IFREG, stat.S_IFLNK} | |
593 |
|
597 | |||
594 |
|
598 | |||
595 | def statfiles(files): |
|
599 | def statfiles(files): | |
596 | """Stat each file in files. Yield each stat, or None if a file does not |
|
600 | """Stat each file in files. Yield each stat, or None if a file does not | |
597 | exist or has a type we don't care about.""" |
|
601 | exist or has a type we don't care about.""" | |
598 | lstat = os.lstat |
|
602 | lstat = os.lstat | |
599 | getkind = stat.S_IFMT |
|
603 | getkind = stat.S_IFMT | |
600 | for nf in files: |
|
604 | for nf in files: | |
601 | try: |
|
605 | try: | |
602 | st = lstat(nf) |
|
606 | st = lstat(nf) | |
603 | if getkind(st.st_mode) not in _wantedkinds: |
|
607 | if getkind(st.st_mode) not in _wantedkinds: | |
604 | st = None |
|
608 | st = None | |
605 | except OSError as err: |
|
609 | except OSError as err: | |
606 | if err.errno not in (errno.ENOENT, errno.ENOTDIR): |
|
610 | if err.errno not in (errno.ENOENT, errno.ENOTDIR): | |
607 | raise |
|
611 | raise | |
608 | st = None |
|
612 | st = None | |
609 | yield st |
|
613 | yield st | |
610 |
|
614 | |||
611 |
|
615 | |||
612 | def getuser(): |
|
616 | def getuser(): | |
613 | '''return name of current user''' |
|
617 | '''return name of current user''' | |
614 | return pycompat.fsencode(getpass.getuser()) |
|
618 | return pycompat.fsencode(getpass.getuser()) | |
615 |
|
619 | |||
616 |
|
620 | |||
617 | def username(uid=None): |
|
621 | def username(uid=None): | |
618 | """Return the name of the user with the given uid. |
|
622 | """Return the name of the user with the given uid. | |
619 |
|
623 | |||
620 | If uid is None, return the name of the current user.""" |
|
624 | If uid is None, return the name of the current user.""" | |
621 |
|
625 | |||
622 | if uid is None: |
|
626 | if uid is None: | |
623 | uid = os.getuid() |
|
627 | uid = os.getuid() | |
624 | try: |
|
628 | try: | |
625 | return pycompat.fsencode(pwd.getpwuid(uid)[0]) |
|
629 | return pycompat.fsencode(pwd.getpwuid(uid)[0]) | |
626 | except KeyError: |
|
630 | except KeyError: | |
627 | return b'%d' % uid |
|
631 | return b'%d' % uid | |
628 |
|
632 | |||
629 |
|
633 | |||
630 | def groupname(gid=None): |
|
634 | def groupname(gid=None): | |
631 | """Return the name of the group with the given gid. |
|
635 | """Return the name of the group with the given gid. | |
632 |
|
636 | |||
633 | If gid is None, return the name of the current group.""" |
|
637 | If gid is None, return the name of the current group.""" | |
634 |
|
638 | |||
635 | if gid is None: |
|
639 | if gid is None: | |
636 | gid = os.getgid() |
|
640 | gid = os.getgid() | |
637 | try: |
|
641 | try: | |
638 | return pycompat.fsencode(grp.getgrgid(gid)[0]) |
|
642 | return pycompat.fsencode(grp.getgrgid(gid)[0]) | |
639 | except KeyError: |
|
643 | except KeyError: | |
640 | return pycompat.bytestr(gid) |
|
644 | return pycompat.bytestr(gid) | |
641 |
|
645 | |||
642 |
|
646 | |||
643 | def groupmembers(name): |
|
647 | def groupmembers(name): | |
644 | """Return the list of members of the group with the given |
|
648 | """Return the list of members of the group with the given | |
645 | name, KeyError if the group does not exist. |
|
649 | name, KeyError if the group does not exist. | |
646 | """ |
|
650 | """ | |
647 | name = pycompat.fsdecode(name) |
|
651 | name = pycompat.fsdecode(name) | |
648 | return pycompat.rapply(pycompat.fsencode, list(grp.getgrnam(name).gr_mem)) |
|
652 | return pycompat.rapply(pycompat.fsencode, list(grp.getgrnam(name).gr_mem)) | |
649 |
|
653 | |||
650 |
|
654 | |||
651 | def spawndetached(args): |
|
655 | def spawndetached(args): | |
652 | return os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0), args[0], args) |
|
656 | return os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0), args[0], args) | |
653 |
|
657 | |||
654 |
|
658 | |||
655 | def gethgcmd(): |
|
659 | def gethgcmd(): | |
656 | return sys.argv[:1] |
|
660 | return sys.argv[:1] | |
657 |
|
661 | |||
658 |
|
662 | |||
659 | def makedir(path, notindexed): |
|
663 | def makedir(path, notindexed): | |
660 | os.mkdir(path) |
|
664 | os.mkdir(path) | |
661 |
|
665 | |||
662 |
|
666 | |||
663 | def lookupreg(key, name=None, scope=None): |
|
667 | def lookupreg(key, name=None, scope=None): | |
664 | return None |
|
668 | return None | |
665 |
|
669 | |||
666 |
|
670 | |||
667 | def hidewindow(): |
|
671 | def hidewindow(): | |
668 | """Hide current shell window. |
|
672 | """Hide current shell window. | |
669 |
|
673 | |||
670 | Used to hide the window opened when starting asynchronous |
|
674 | Used to hide the window opened when starting asynchronous | |
671 | child process under Windows, unneeded on other systems. |
|
675 | child process under Windows, unneeded on other systems. | |
672 | """ |
|
676 | """ | |
673 | pass |
|
677 | pass | |
674 |
|
678 | |||
675 |
|
679 | |||
676 | class cachestat(object): |
|
680 | class cachestat(object): | |
677 | def __init__(self, path): |
|
681 | def __init__(self, path): | |
678 | self.stat = os.stat(path) |
|
682 | self.stat = os.stat(path) | |
679 |
|
683 | |||
680 | def cacheable(self): |
|
684 | def cacheable(self): | |
681 | return bool(self.stat.st_ino) |
|
685 | return bool(self.stat.st_ino) | |
682 |
|
686 | |||
683 | __hash__ = object.__hash__ |
|
687 | __hash__ = object.__hash__ | |
684 |
|
688 | |||
685 | def __eq__(self, other): |
|
689 | def __eq__(self, other): | |
686 | try: |
|
690 | try: | |
687 | # Only dev, ino, size, mtime and atime are likely to change. Out |
|
691 | # Only dev, ino, size, mtime and atime are likely to change. Out | |
688 | # of these, we shouldn't compare atime but should compare the |
|
692 | # of these, we shouldn't compare atime but should compare the | |
689 | # rest. However, one of the other fields changing indicates |
|
693 | # rest. However, one of the other fields changing indicates | |
690 | # something fishy going on, so return False if anything but atime |
|
694 | # something fishy going on, so return False if anything but atime | |
691 | # changes. |
|
695 | # changes. | |
692 | return ( |
|
696 | return ( | |
693 | self.stat.st_mode == other.stat.st_mode |
|
697 | self.stat.st_mode == other.stat.st_mode | |
694 | and self.stat.st_ino == other.stat.st_ino |
|
698 | and self.stat.st_ino == other.stat.st_ino | |
695 | and self.stat.st_dev == other.stat.st_dev |
|
699 | and self.stat.st_dev == other.stat.st_dev | |
696 | and self.stat.st_nlink == other.stat.st_nlink |
|
700 | and self.stat.st_nlink == other.stat.st_nlink | |
697 | and self.stat.st_uid == other.stat.st_uid |
|
701 | and self.stat.st_uid == other.stat.st_uid | |
698 | and self.stat.st_gid == other.stat.st_gid |
|
702 | and self.stat.st_gid == other.stat.st_gid | |
699 | and self.stat.st_size == other.stat.st_size |
|
703 | and self.stat.st_size == other.stat.st_size | |
700 | and self.stat[stat.ST_MTIME] == other.stat[stat.ST_MTIME] |
|
704 | and self.stat[stat.ST_MTIME] == other.stat[stat.ST_MTIME] | |
701 | and self.stat[stat.ST_CTIME] == other.stat[stat.ST_CTIME] |
|
705 | and self.stat[stat.ST_CTIME] == other.stat[stat.ST_CTIME] | |
702 | ) |
|
706 | ) | |
703 | except AttributeError: |
|
707 | except AttributeError: | |
704 | return False |
|
708 | return False | |
705 |
|
709 | |||
706 | def __ne__(self, other): |
|
710 | def __ne__(self, other): | |
707 | return not self == other |
|
711 | return not self == other | |
708 |
|
712 | |||
709 |
|
713 | |||
710 | def statislink(st): |
|
714 | def statislink(st): | |
711 | '''check whether a stat result is a symlink''' |
|
715 | '''check whether a stat result is a symlink''' | |
712 | return st and stat.S_ISLNK(st.st_mode) |
|
716 | return st and stat.S_ISLNK(st.st_mode) | |
713 |
|
717 | |||
714 |
|
718 | |||
715 | def statisexec(st): |
|
719 | def statisexec(st): | |
716 | '''check whether a stat result is an executable file''' |
|
720 | '''check whether a stat result is an executable file''' | |
717 | return st and (st.st_mode & 0o100 != 0) |
|
721 | return st and (st.st_mode & 0o100 != 0) | |
718 |
|
722 | |||
719 |
|
723 | |||
720 | def poll(fds): |
|
724 | def poll(fds): | |
721 | """block until something happens on any file descriptor |
|
725 | """block until something happens on any file descriptor | |
722 |
|
726 | |||
723 | This is a generic helper that will check for any activity |
|
727 | This is a generic helper that will check for any activity | |
724 | (read, write. exception) and return the list of touched files. |
|
728 | (read, write. exception) and return the list of touched files. | |
725 |
|
729 | |||
726 | In unsupported cases, it will raise a NotImplementedError""" |
|
730 | In unsupported cases, it will raise a NotImplementedError""" | |
727 | try: |
|
731 | try: | |
728 | while True: |
|
732 | while True: | |
729 | try: |
|
733 | try: | |
730 | res = select.select(fds, fds, fds) |
|
734 | res = select.select(fds, fds, fds) | |
731 | break |
|
735 | break | |
732 | except select.error as inst: |
|
736 | except select.error as inst: | |
733 | if inst.args[0] == errno.EINTR: |
|
737 | if inst.args[0] == errno.EINTR: | |
734 | continue |
|
738 | continue | |
735 | raise |
|
739 | raise | |
736 | except ValueError: # out of range file descriptor |
|
740 | except ValueError: # out of range file descriptor | |
737 | raise NotImplementedError() |
|
741 | raise NotImplementedError() | |
738 | return sorted(list(set(sum(res, [])))) |
|
742 | return sorted(list(set(sum(res, [])))) | |
739 |
|
743 | |||
740 |
|
744 | |||
741 | def readpipe(pipe): |
|
745 | def readpipe(pipe): | |
742 | """Read all available data from a pipe.""" |
|
746 | """Read all available data from a pipe.""" | |
743 | # We can't fstat() a pipe because Linux will always report 0. |
|
747 | # We can't fstat() a pipe because Linux will always report 0. | |
744 | # So, we set the pipe to non-blocking mode and read everything |
|
748 | # So, we set the pipe to non-blocking mode and read everything | |
745 | # that's available. |
|
749 | # that's available. | |
746 | flags = fcntl.fcntl(pipe, fcntl.F_GETFL) |
|
750 | flags = fcntl.fcntl(pipe, fcntl.F_GETFL) | |
747 | flags |= os.O_NONBLOCK |
|
751 | flags |= os.O_NONBLOCK | |
748 | oldflags = fcntl.fcntl(pipe, fcntl.F_SETFL, flags) |
|
752 | oldflags = fcntl.fcntl(pipe, fcntl.F_SETFL, flags) | |
749 |
|
753 | |||
750 | try: |
|
754 | try: | |
751 | chunks = [] |
|
755 | chunks = [] | |
752 | while True: |
|
756 | while True: | |
753 | try: |
|
757 | try: | |
754 | s = pipe.read() |
|
758 | s = pipe.read() | |
755 | if not s: |
|
759 | if not s: | |
756 | break |
|
760 | break | |
757 | chunks.append(s) |
|
761 | chunks.append(s) | |
758 | except IOError: |
|
762 | except IOError: | |
759 | break |
|
763 | break | |
760 |
|
764 | |||
761 | return b''.join(chunks) |
|
765 | return b''.join(chunks) | |
762 | finally: |
|
766 | finally: | |
763 | fcntl.fcntl(pipe, fcntl.F_SETFL, oldflags) |
|
767 | fcntl.fcntl(pipe, fcntl.F_SETFL, oldflags) | |
764 |
|
768 | |||
765 |
|
769 | |||
766 | def bindunixsocket(sock, path): |
|
770 | def bindunixsocket(sock, path): | |
767 | """Bind the UNIX domain socket to the specified path""" |
|
771 | """Bind the UNIX domain socket to the specified path""" | |
768 | # use relative path instead of full path at bind() if possible, since |
|
772 | # use relative path instead of full path at bind() if possible, since | |
769 | # AF_UNIX path has very small length limit (107 chars) on common |
|
773 | # AF_UNIX path has very small length limit (107 chars) on common | |
770 | # platforms (see sys/un.h) |
|
774 | # platforms (see sys/un.h) | |
771 | dirname, basename = os.path.split(path) |
|
775 | dirname, basename = os.path.split(path) | |
772 | bakwdfd = None |
|
776 | bakwdfd = None | |
773 |
|
777 | |||
774 | try: |
|
778 | try: | |
775 | if dirname: |
|
779 | if dirname: | |
776 | bakwdfd = os.open(b'.', os.O_DIRECTORY) |
|
780 | bakwdfd = os.open(b'.', os.O_DIRECTORY) | |
777 | os.chdir(dirname) |
|
781 | os.chdir(dirname) | |
778 | sock.bind(basename) |
|
782 | sock.bind(basename) | |
779 | if bakwdfd: |
|
783 | if bakwdfd: | |
780 | os.fchdir(bakwdfd) |
|
784 | os.fchdir(bakwdfd) | |
781 | finally: |
|
785 | finally: | |
782 | if bakwdfd: |
|
786 | if bakwdfd: | |
783 | os.close(bakwdfd) |
|
787 | os.close(bakwdfd) |
@@ -1,2230 +1,2229 b'' | |||||
1 | # ui.py - user interface bits for mercurial |
|
1 | # ui.py - user interface bits for mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com> |
|
3 | # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import collections |
|
10 | import collections | |
11 | import contextlib |
|
11 | import contextlib | |
12 | import datetime |
|
12 | import datetime | |
13 | import errno |
|
13 | import errno | |
14 | import getpass |
|
|||
15 | import inspect |
|
14 | import inspect | |
16 | import os |
|
15 | import os | |
17 | import re |
|
16 | import re | |
18 | import signal |
|
17 | import signal | |
19 | import socket |
|
18 | import socket | |
20 | import subprocess |
|
19 | import subprocess | |
21 | import sys |
|
20 | import sys | |
22 | import traceback |
|
21 | import traceback | |
23 |
|
22 | |||
24 | from .i18n import _ |
|
23 | from .i18n import _ | |
25 | from .node import hex |
|
24 | from .node import hex | |
26 | from .pycompat import ( |
|
25 | from .pycompat import ( | |
27 | getattr, |
|
26 | getattr, | |
28 | open, |
|
27 | open, | |
29 | ) |
|
28 | ) | |
30 |
|
29 | |||
31 | from . import ( |
|
30 | from . import ( | |
32 | color, |
|
31 | color, | |
33 | config, |
|
32 | config, | |
34 | configitems, |
|
33 | configitems, | |
35 | encoding, |
|
34 | encoding, | |
36 | error, |
|
35 | error, | |
37 | formatter, |
|
36 | formatter, | |
38 | loggingutil, |
|
37 | loggingutil, | |
39 | progress, |
|
38 | progress, | |
40 | pycompat, |
|
39 | pycompat, | |
41 | rcutil, |
|
40 | rcutil, | |
42 | scmutil, |
|
41 | scmutil, | |
43 | util, |
|
42 | util, | |
44 | ) |
|
43 | ) | |
45 | from .utils import ( |
|
44 | from .utils import ( | |
46 | dateutil, |
|
45 | dateutil, | |
47 | procutil, |
|
46 | procutil, | |
48 | resourceutil, |
|
47 | resourceutil, | |
49 | stringutil, |
|
48 | stringutil, | |
50 | urlutil, |
|
49 | urlutil, | |
51 | ) |
|
50 | ) | |
52 |
|
51 | |||
53 | urlreq = util.urlreq |
|
52 | urlreq = util.urlreq | |
54 |
|
53 | |||
55 | # for use with str.translate(None, _keepalnum), to keep just alphanumerics |
|
54 | # for use with str.translate(None, _keepalnum), to keep just alphanumerics | |
56 | _keepalnum = b''.join( |
|
55 | _keepalnum = b''.join( | |
57 | c for c in map(pycompat.bytechr, range(256)) if not c.isalnum() |
|
56 | c for c in map(pycompat.bytechr, range(256)) if not c.isalnum() | |
58 | ) |
|
57 | ) | |
59 |
|
58 | |||
60 | # The config knobs that will be altered (if unset) by ui.tweakdefaults. |
|
59 | # The config knobs that will be altered (if unset) by ui.tweakdefaults. | |
61 | tweakrc = b""" |
|
60 | tweakrc = b""" | |
62 | [ui] |
|
61 | [ui] | |
63 | # The rollback command is dangerous. As a rule, don't use it. |
|
62 | # The rollback command is dangerous. As a rule, don't use it. | |
64 | rollback = False |
|
63 | rollback = False | |
65 | # Make `hg status` report copy information |
|
64 | # Make `hg status` report copy information | |
66 | statuscopies = yes |
|
65 | statuscopies = yes | |
67 | # Prefer curses UIs when available. Revert to plain-text with `text`. |
|
66 | # Prefer curses UIs when available. Revert to plain-text with `text`. | |
68 | interface = curses |
|
67 | interface = curses | |
69 | # Make compatible commands emit cwd-relative paths by default. |
|
68 | # Make compatible commands emit cwd-relative paths by default. | |
70 | relative-paths = yes |
|
69 | relative-paths = yes | |
71 |
|
70 | |||
72 | [commands] |
|
71 | [commands] | |
73 | # Grep working directory by default. |
|
72 | # Grep working directory by default. | |
74 | grep.all-files = True |
|
73 | grep.all-files = True | |
75 | # Refuse to perform an `hg update` that would cause a file content merge |
|
74 | # Refuse to perform an `hg update` that would cause a file content merge | |
76 | update.check = noconflict |
|
75 | update.check = noconflict | |
77 | # Show conflicts information in `hg status` |
|
76 | # Show conflicts information in `hg status` | |
78 | status.verbose = True |
|
77 | status.verbose = True | |
79 | # Make `hg resolve` with no action (like `-m`) fail instead of re-merging. |
|
78 | # Make `hg resolve` with no action (like `-m`) fail instead of re-merging. | |
80 | resolve.explicit-re-merge = True |
|
79 | resolve.explicit-re-merge = True | |
81 |
|
80 | |||
82 | [diff] |
|
81 | [diff] | |
83 | git = 1 |
|
82 | git = 1 | |
84 | showfunc = 1 |
|
83 | showfunc = 1 | |
85 | word-diff = 1 |
|
84 | word-diff = 1 | |
86 | """ |
|
85 | """ | |
87 |
|
86 | |||
88 | samplehgrcs = { |
|
87 | samplehgrcs = { | |
89 | b'user': b"""# example user config (see 'hg help config' for more info) |
|
88 | b'user': b"""# example user config (see 'hg help config' for more info) | |
90 | [ui] |
|
89 | [ui] | |
91 | # name and email, e.g. |
|
90 | # name and email, e.g. | |
92 | # username = Jane Doe <jdoe@example.com> |
|
91 | # username = Jane Doe <jdoe@example.com> | |
93 | username = |
|
92 | username = | |
94 |
|
93 | |||
95 | # We recommend enabling tweakdefaults to get slight improvements to |
|
94 | # We recommend enabling tweakdefaults to get slight improvements to | |
96 | # the UI over time. Make sure to set HGPLAIN in the environment when |
|
95 | # the UI over time. Make sure to set HGPLAIN in the environment when | |
97 | # writing scripts! |
|
96 | # writing scripts! | |
98 | # tweakdefaults = True |
|
97 | # tweakdefaults = True | |
99 |
|
98 | |||
100 | # uncomment to disable color in command output |
|
99 | # uncomment to disable color in command output | |
101 | # (see 'hg help color' for details) |
|
100 | # (see 'hg help color' for details) | |
102 | # color = never |
|
101 | # color = never | |
103 |
|
102 | |||
104 | # uncomment to disable command output pagination |
|
103 | # uncomment to disable command output pagination | |
105 | # (see 'hg help pager' for details) |
|
104 | # (see 'hg help pager' for details) | |
106 | # paginate = never |
|
105 | # paginate = never | |
107 |
|
106 | |||
108 | [extensions] |
|
107 | [extensions] | |
109 | # uncomment the lines below to enable some popular extensions |
|
108 | # uncomment the lines below to enable some popular extensions | |
110 | # (see 'hg help extensions' for more info) |
|
109 | # (see 'hg help extensions' for more info) | |
111 | # |
|
110 | # | |
112 | # histedit = |
|
111 | # histedit = | |
113 | # rebase = |
|
112 | # rebase = | |
114 | # uncommit = |
|
113 | # uncommit = | |
115 | """, |
|
114 | """, | |
116 | b'cloned': b"""# example repository config (see 'hg help config' for more info) |
|
115 | b'cloned': b"""# example repository config (see 'hg help config' for more info) | |
117 | [paths] |
|
116 | [paths] | |
118 | default = %s |
|
117 | default = %s | |
119 |
|
118 | |||
120 | # path aliases to other clones of this repo in URLs or filesystem paths |
|
119 | # path aliases to other clones of this repo in URLs or filesystem paths | |
121 | # (see 'hg help config.paths' for more info) |
|
120 | # (see 'hg help config.paths' for more info) | |
122 | # |
|
121 | # | |
123 | # default:pushurl = ssh://jdoe@example.net/hg/jdoes-fork |
|
122 | # default:pushurl = ssh://jdoe@example.net/hg/jdoes-fork | |
124 | # my-fork = ssh://jdoe@example.net/hg/jdoes-fork |
|
123 | # my-fork = ssh://jdoe@example.net/hg/jdoes-fork | |
125 | # my-clone = /home/jdoe/jdoes-clone |
|
124 | # my-clone = /home/jdoe/jdoes-clone | |
126 |
|
125 | |||
127 | [ui] |
|
126 | [ui] | |
128 | # name and email (local to this repository, optional), e.g. |
|
127 | # name and email (local to this repository, optional), e.g. | |
129 | # username = Jane Doe <jdoe@example.com> |
|
128 | # username = Jane Doe <jdoe@example.com> | |
130 | """, |
|
129 | """, | |
131 | b'local': b"""# example repository config (see 'hg help config' for more info) |
|
130 | b'local': b"""# example repository config (see 'hg help config' for more info) | |
132 | [paths] |
|
131 | [paths] | |
133 | # path aliases to other clones of this repo in URLs or filesystem paths |
|
132 | # path aliases to other clones of this repo in URLs or filesystem paths | |
134 | # (see 'hg help config.paths' for more info) |
|
133 | # (see 'hg help config.paths' for more info) | |
135 | # |
|
134 | # | |
136 | # default = http://example.com/hg/example-repo |
|
135 | # default = http://example.com/hg/example-repo | |
137 | # default:pushurl = ssh://jdoe@example.net/hg/jdoes-fork |
|
136 | # default:pushurl = ssh://jdoe@example.net/hg/jdoes-fork | |
138 | # my-fork = ssh://jdoe@example.net/hg/jdoes-fork |
|
137 | # my-fork = ssh://jdoe@example.net/hg/jdoes-fork | |
139 | # my-clone = /home/jdoe/jdoes-clone |
|
138 | # my-clone = /home/jdoe/jdoes-clone | |
140 |
|
139 | |||
141 | [ui] |
|
140 | [ui] | |
142 | # name and email (local to this repository, optional), e.g. |
|
141 | # name and email (local to this repository, optional), e.g. | |
143 | # username = Jane Doe <jdoe@example.com> |
|
142 | # username = Jane Doe <jdoe@example.com> | |
144 | """, |
|
143 | """, | |
145 | b'global': b"""# example system-wide hg config (see 'hg help config' for more info) |
|
144 | b'global': b"""# example system-wide hg config (see 'hg help config' for more info) | |
146 |
|
145 | |||
147 | [ui] |
|
146 | [ui] | |
148 | # uncomment to disable color in command output |
|
147 | # uncomment to disable color in command output | |
149 | # (see 'hg help color' for details) |
|
148 | # (see 'hg help color' for details) | |
150 | # color = never |
|
149 | # color = never | |
151 |
|
150 | |||
152 | # uncomment to disable command output pagination |
|
151 | # uncomment to disable command output pagination | |
153 | # (see 'hg help pager' for details) |
|
152 | # (see 'hg help pager' for details) | |
154 | # paginate = never |
|
153 | # paginate = never | |
155 |
|
154 | |||
156 | [extensions] |
|
155 | [extensions] | |
157 | # uncomment the lines below to enable some popular extensions |
|
156 | # uncomment the lines below to enable some popular extensions | |
158 | # (see 'hg help extensions' for more info) |
|
157 | # (see 'hg help extensions' for more info) | |
159 | # |
|
158 | # | |
160 | # blackbox = |
|
159 | # blackbox = | |
161 | # churn = |
|
160 | # churn = | |
162 | """, |
|
161 | """, | |
163 | } |
|
162 | } | |
164 |
|
163 | |||
165 |
|
164 | |||
166 | def _maybestrurl(maybebytes): |
|
165 | def _maybestrurl(maybebytes): | |
167 | return pycompat.rapply(pycompat.strurl, maybebytes) |
|
166 | return pycompat.rapply(pycompat.strurl, maybebytes) | |
168 |
|
167 | |||
169 |
|
168 | |||
170 | def _maybebytesurl(maybestr): |
|
169 | def _maybebytesurl(maybestr): | |
171 | return pycompat.rapply(pycompat.bytesurl, maybestr) |
|
170 | return pycompat.rapply(pycompat.bytesurl, maybestr) | |
172 |
|
171 | |||
173 |
|
172 | |||
174 | class httppasswordmgrdbproxy(object): |
|
173 | class httppasswordmgrdbproxy(object): | |
175 | """Delays loading urllib2 until it's needed.""" |
|
174 | """Delays loading urllib2 until it's needed.""" | |
176 |
|
175 | |||
177 | def __init__(self): |
|
176 | def __init__(self): | |
178 | self._mgr = None |
|
177 | self._mgr = None | |
179 |
|
178 | |||
180 | def _get_mgr(self): |
|
179 | def _get_mgr(self): | |
181 | if self._mgr is None: |
|
180 | if self._mgr is None: | |
182 | self._mgr = urlreq.httppasswordmgrwithdefaultrealm() |
|
181 | self._mgr = urlreq.httppasswordmgrwithdefaultrealm() | |
183 | return self._mgr |
|
182 | return self._mgr | |
184 |
|
183 | |||
185 | def add_password(self, realm, uris, user, passwd): |
|
184 | def add_password(self, realm, uris, user, passwd): | |
186 | return self._get_mgr().add_password( |
|
185 | return self._get_mgr().add_password( | |
187 | _maybestrurl(realm), |
|
186 | _maybestrurl(realm), | |
188 | _maybestrurl(uris), |
|
187 | _maybestrurl(uris), | |
189 | _maybestrurl(user), |
|
188 | _maybestrurl(user), | |
190 | _maybestrurl(passwd), |
|
189 | _maybestrurl(passwd), | |
191 | ) |
|
190 | ) | |
192 |
|
191 | |||
193 | def find_user_password(self, realm, uri): |
|
192 | def find_user_password(self, realm, uri): | |
194 | mgr = self._get_mgr() |
|
193 | mgr = self._get_mgr() | |
195 | return _maybebytesurl( |
|
194 | return _maybebytesurl( | |
196 | mgr.find_user_password(_maybestrurl(realm), _maybestrurl(uri)) |
|
195 | mgr.find_user_password(_maybestrurl(realm), _maybestrurl(uri)) | |
197 | ) |
|
196 | ) | |
198 |
|
197 | |||
199 |
|
198 | |||
200 | def _catchterm(*args): |
|
199 | def _catchterm(*args): | |
201 | raise error.SignalInterrupt |
|
200 | raise error.SignalInterrupt | |
202 |
|
201 | |||
203 |
|
202 | |||
204 | # unique object used to detect no default value has been provided when |
|
203 | # unique object used to detect no default value has been provided when | |
205 | # retrieving configuration value. |
|
204 | # retrieving configuration value. | |
206 | _unset = object() |
|
205 | _unset = object() | |
207 |
|
206 | |||
208 | # _reqexithandlers: callbacks run at the end of a request |
|
207 | # _reqexithandlers: callbacks run at the end of a request | |
209 | _reqexithandlers = [] |
|
208 | _reqexithandlers = [] | |
210 |
|
209 | |||
211 |
|
210 | |||
212 | class ui(object): |
|
211 | class ui(object): | |
213 | def __init__(self, src=None): |
|
212 | def __init__(self, src=None): | |
214 | """Create a fresh new ui object if no src given |
|
213 | """Create a fresh new ui object if no src given | |
215 |
|
214 | |||
216 | Use uimod.ui.load() to create a ui which knows global and user configs. |
|
215 | Use uimod.ui.load() to create a ui which knows global and user configs. | |
217 | In most cases, you should use ui.copy() to create a copy of an existing |
|
216 | In most cases, you should use ui.copy() to create a copy of an existing | |
218 | ui object. |
|
217 | ui object. | |
219 | """ |
|
218 | """ | |
220 | # _buffers: used for temporary capture of output |
|
219 | # _buffers: used for temporary capture of output | |
221 | self._buffers = [] |
|
220 | self._buffers = [] | |
222 | # 3-tuple describing how each buffer in the stack behaves. |
|
221 | # 3-tuple describing how each buffer in the stack behaves. | |
223 | # Values are (capture stderr, capture subprocesses, apply labels). |
|
222 | # Values are (capture stderr, capture subprocesses, apply labels). | |
224 | self._bufferstates = [] |
|
223 | self._bufferstates = [] | |
225 | # When a buffer is active, defines whether we are expanding labels. |
|
224 | # When a buffer is active, defines whether we are expanding labels. | |
226 | # This exists to prevent an extra list lookup. |
|
225 | # This exists to prevent an extra list lookup. | |
227 | self._bufferapplylabels = None |
|
226 | self._bufferapplylabels = None | |
228 | self.quiet = self.verbose = self.debugflag = self.tracebackflag = False |
|
227 | self.quiet = self.verbose = self.debugflag = self.tracebackflag = False | |
229 | self._reportuntrusted = True |
|
228 | self._reportuntrusted = True | |
230 | self._knownconfig = configitems.coreitems |
|
229 | self._knownconfig = configitems.coreitems | |
231 | self._ocfg = config.config() # overlay |
|
230 | self._ocfg = config.config() # overlay | |
232 | self._tcfg = config.config() # trusted |
|
231 | self._tcfg = config.config() # trusted | |
233 | self._ucfg = config.config() # untrusted |
|
232 | self._ucfg = config.config() # untrusted | |
234 | self._trustusers = set() |
|
233 | self._trustusers = set() | |
235 | self._trustgroups = set() |
|
234 | self._trustgroups = set() | |
236 | self.callhooks = True |
|
235 | self.callhooks = True | |
237 | # Insecure server connections requested. |
|
236 | # Insecure server connections requested. | |
238 | self.insecureconnections = False |
|
237 | self.insecureconnections = False | |
239 | # Blocked time |
|
238 | # Blocked time | |
240 | self.logblockedtimes = False |
|
239 | self.logblockedtimes = False | |
241 | # color mode: see mercurial/color.py for possible value |
|
240 | # color mode: see mercurial/color.py for possible value | |
242 | self._colormode = None |
|
241 | self._colormode = None | |
243 | self._terminfoparams = {} |
|
242 | self._terminfoparams = {} | |
244 | self._styles = {} |
|
243 | self._styles = {} | |
245 | self._uninterruptible = False |
|
244 | self._uninterruptible = False | |
246 | self.showtimestamp = False |
|
245 | self.showtimestamp = False | |
247 |
|
246 | |||
248 | if src: |
|
247 | if src: | |
249 | self._fout = src._fout |
|
248 | self._fout = src._fout | |
250 | self._ferr = src._ferr |
|
249 | self._ferr = src._ferr | |
251 | self._fin = src._fin |
|
250 | self._fin = src._fin | |
252 | self._fmsg = src._fmsg |
|
251 | self._fmsg = src._fmsg | |
253 | self._fmsgout = src._fmsgout |
|
252 | self._fmsgout = src._fmsgout | |
254 | self._fmsgerr = src._fmsgerr |
|
253 | self._fmsgerr = src._fmsgerr | |
255 | self._finoutredirected = src._finoutredirected |
|
254 | self._finoutredirected = src._finoutredirected | |
256 | self._loggers = src._loggers.copy() |
|
255 | self._loggers = src._loggers.copy() | |
257 | self.pageractive = src.pageractive |
|
256 | self.pageractive = src.pageractive | |
258 | self._disablepager = src._disablepager |
|
257 | self._disablepager = src._disablepager | |
259 | self._tweaked = src._tweaked |
|
258 | self._tweaked = src._tweaked | |
260 |
|
259 | |||
261 | self._tcfg = src._tcfg.copy() |
|
260 | self._tcfg = src._tcfg.copy() | |
262 | self._ucfg = src._ucfg.copy() |
|
261 | self._ucfg = src._ucfg.copy() | |
263 | self._ocfg = src._ocfg.copy() |
|
262 | self._ocfg = src._ocfg.copy() | |
264 | self._trustusers = src._trustusers.copy() |
|
263 | self._trustusers = src._trustusers.copy() | |
265 | self._trustgroups = src._trustgroups.copy() |
|
264 | self._trustgroups = src._trustgroups.copy() | |
266 | self.environ = src.environ |
|
265 | self.environ = src.environ | |
267 | self.callhooks = src.callhooks |
|
266 | self.callhooks = src.callhooks | |
268 | self.insecureconnections = src.insecureconnections |
|
267 | self.insecureconnections = src.insecureconnections | |
269 | self._colormode = src._colormode |
|
268 | self._colormode = src._colormode | |
270 | self._terminfoparams = src._terminfoparams.copy() |
|
269 | self._terminfoparams = src._terminfoparams.copy() | |
271 | self._styles = src._styles.copy() |
|
270 | self._styles = src._styles.copy() | |
272 |
|
271 | |||
273 | self.fixconfig() |
|
272 | self.fixconfig() | |
274 |
|
273 | |||
275 | self.httppasswordmgrdb = src.httppasswordmgrdb |
|
274 | self.httppasswordmgrdb = src.httppasswordmgrdb | |
276 | self._blockedtimes = src._blockedtimes |
|
275 | self._blockedtimes = src._blockedtimes | |
277 | else: |
|
276 | else: | |
278 | self._fout = procutil.stdout |
|
277 | self._fout = procutil.stdout | |
279 | self._ferr = procutil.stderr |
|
278 | self._ferr = procutil.stderr | |
280 | self._fin = procutil.stdin |
|
279 | self._fin = procutil.stdin | |
281 | self._fmsg = None |
|
280 | self._fmsg = None | |
282 | self._fmsgout = self.fout # configurable |
|
281 | self._fmsgout = self.fout # configurable | |
283 | self._fmsgerr = self.ferr # configurable |
|
282 | self._fmsgerr = self.ferr # configurable | |
284 | self._finoutredirected = False |
|
283 | self._finoutredirected = False | |
285 | self._loggers = {} |
|
284 | self._loggers = {} | |
286 | self.pageractive = False |
|
285 | self.pageractive = False | |
287 | self._disablepager = False |
|
286 | self._disablepager = False | |
288 | self._tweaked = False |
|
287 | self._tweaked = False | |
289 |
|
288 | |||
290 | # shared read-only environment |
|
289 | # shared read-only environment | |
291 | self.environ = encoding.environ |
|
290 | self.environ = encoding.environ | |
292 |
|
291 | |||
293 | self.httppasswordmgrdb = httppasswordmgrdbproxy() |
|
292 | self.httppasswordmgrdb = httppasswordmgrdbproxy() | |
294 | self._blockedtimes = collections.defaultdict(int) |
|
293 | self._blockedtimes = collections.defaultdict(int) | |
295 |
|
294 | |||
296 | allowed = self.configlist(b'experimental', b'exportableenviron') |
|
295 | allowed = self.configlist(b'experimental', b'exportableenviron') | |
297 | if b'*' in allowed: |
|
296 | if b'*' in allowed: | |
298 | self._exportableenviron = self.environ |
|
297 | self._exportableenviron = self.environ | |
299 | else: |
|
298 | else: | |
300 | self._exportableenviron = {} |
|
299 | self._exportableenviron = {} | |
301 | for k in allowed: |
|
300 | for k in allowed: | |
302 | if k in self.environ: |
|
301 | if k in self.environ: | |
303 | self._exportableenviron[k] = self.environ[k] |
|
302 | self._exportableenviron[k] = self.environ[k] | |
304 |
|
303 | |||
305 | def _new_source(self): |
|
304 | def _new_source(self): | |
306 | self._ocfg.new_source() |
|
305 | self._ocfg.new_source() | |
307 | self._tcfg.new_source() |
|
306 | self._tcfg.new_source() | |
308 | self._ucfg.new_source() |
|
307 | self._ucfg.new_source() | |
309 |
|
308 | |||
310 | @classmethod |
|
309 | @classmethod | |
311 | def load(cls): |
|
310 | def load(cls): | |
312 | """Create a ui and load global and user configs""" |
|
311 | """Create a ui and load global and user configs""" | |
313 | u = cls() |
|
312 | u = cls() | |
314 | # we always trust global config files and environment variables |
|
313 | # we always trust global config files and environment variables | |
315 | for t, f in rcutil.rccomponents(): |
|
314 | for t, f in rcutil.rccomponents(): | |
316 | if t == b'path': |
|
315 | if t == b'path': | |
317 | u.readconfig(f, trust=True) |
|
316 | u.readconfig(f, trust=True) | |
318 | elif t == b'resource': |
|
317 | elif t == b'resource': | |
319 | u.read_resource_config(f, trust=True) |
|
318 | u.read_resource_config(f, trust=True) | |
320 | elif t == b'items': |
|
319 | elif t == b'items': | |
321 | u._new_source() |
|
320 | u._new_source() | |
322 | sections = set() |
|
321 | sections = set() | |
323 | for section, name, value, source in f: |
|
322 | for section, name, value, source in f: | |
324 | # do not set u._ocfg |
|
323 | # do not set u._ocfg | |
325 | # XXX clean this up once immutable config object is a thing |
|
324 | # XXX clean this up once immutable config object is a thing | |
326 | u._tcfg.set(section, name, value, source) |
|
325 | u._tcfg.set(section, name, value, source) | |
327 | u._ucfg.set(section, name, value, source) |
|
326 | u._ucfg.set(section, name, value, source) | |
328 | sections.add(section) |
|
327 | sections.add(section) | |
329 | for section in sections: |
|
328 | for section in sections: | |
330 | u.fixconfig(section=section) |
|
329 | u.fixconfig(section=section) | |
331 | else: |
|
330 | else: | |
332 | raise error.ProgrammingError(b'unknown rctype: %s' % t) |
|
331 | raise error.ProgrammingError(b'unknown rctype: %s' % t) | |
333 | u._maybetweakdefaults() |
|
332 | u._maybetweakdefaults() | |
334 | u._new_source() # anything after that is a different level |
|
333 | u._new_source() # anything after that is a different level | |
335 | return u |
|
334 | return u | |
336 |
|
335 | |||
337 | def _maybetweakdefaults(self): |
|
336 | def _maybetweakdefaults(self): | |
338 | if not self.configbool(b'ui', b'tweakdefaults'): |
|
337 | if not self.configbool(b'ui', b'tweakdefaults'): | |
339 | return |
|
338 | return | |
340 | if self._tweaked or self.plain(b'tweakdefaults'): |
|
339 | if self._tweaked or self.plain(b'tweakdefaults'): | |
341 | return |
|
340 | return | |
342 |
|
341 | |||
343 | # Note: it is SUPER IMPORTANT that you set self._tweaked to |
|
342 | # Note: it is SUPER IMPORTANT that you set self._tweaked to | |
344 | # True *before* any calls to setconfig(), otherwise you'll get |
|
343 | # True *before* any calls to setconfig(), otherwise you'll get | |
345 | # infinite recursion between setconfig and this method. |
|
344 | # infinite recursion between setconfig and this method. | |
346 | # |
|
345 | # | |
347 | # TODO: We should extract an inner method in setconfig() to |
|
346 | # TODO: We should extract an inner method in setconfig() to | |
348 | # avoid this weirdness. |
|
347 | # avoid this weirdness. | |
349 | self._tweaked = True |
|
348 | self._tweaked = True | |
350 | tmpcfg = config.config() |
|
349 | tmpcfg = config.config() | |
351 | tmpcfg.parse(b'<tweakdefaults>', tweakrc) |
|
350 | tmpcfg.parse(b'<tweakdefaults>', tweakrc) | |
352 | for section in tmpcfg: |
|
351 | for section in tmpcfg: | |
353 | for name, value in tmpcfg.items(section): |
|
352 | for name, value in tmpcfg.items(section): | |
354 | if not self.hasconfig(section, name): |
|
353 | if not self.hasconfig(section, name): | |
355 | self.setconfig(section, name, value, b"<tweakdefaults>") |
|
354 | self.setconfig(section, name, value, b"<tweakdefaults>") | |
356 |
|
355 | |||
357 | def copy(self): |
|
356 | def copy(self): | |
358 | return self.__class__(self) |
|
357 | return self.__class__(self) | |
359 |
|
358 | |||
360 | def resetstate(self): |
|
359 | def resetstate(self): | |
361 | """Clear internal state that shouldn't persist across commands""" |
|
360 | """Clear internal state that shouldn't persist across commands""" | |
362 | if self._progbar: |
|
361 | if self._progbar: | |
363 | self._progbar.resetstate() # reset last-print time of progress bar |
|
362 | self._progbar.resetstate() # reset last-print time of progress bar | |
364 | self.httppasswordmgrdb = httppasswordmgrdbproxy() |
|
363 | self.httppasswordmgrdb = httppasswordmgrdbproxy() | |
365 |
|
364 | |||
366 | @contextlib.contextmanager |
|
365 | @contextlib.contextmanager | |
367 | def timeblockedsection(self, key): |
|
366 | def timeblockedsection(self, key): | |
368 | # this is open-coded below - search for timeblockedsection to find them |
|
367 | # this is open-coded below - search for timeblockedsection to find them | |
369 | starttime = util.timer() |
|
368 | starttime = util.timer() | |
370 | try: |
|
369 | try: | |
371 | yield |
|
370 | yield | |
372 | finally: |
|
371 | finally: | |
373 | self._blockedtimes[key + b'_blocked'] += ( |
|
372 | self._blockedtimes[key + b'_blocked'] += ( | |
374 | util.timer() - starttime |
|
373 | util.timer() - starttime | |
375 | ) * 1000 |
|
374 | ) * 1000 | |
376 |
|
375 | |||
377 | @contextlib.contextmanager |
|
376 | @contextlib.contextmanager | |
378 | def uninterruptible(self): |
|
377 | def uninterruptible(self): | |
379 | """Mark an operation as unsafe. |
|
378 | """Mark an operation as unsafe. | |
380 |
|
379 | |||
381 | Most operations on a repository are safe to interrupt, but a |
|
380 | Most operations on a repository are safe to interrupt, but a | |
382 | few are risky (for example repair.strip). This context manager |
|
381 | few are risky (for example repair.strip). This context manager | |
383 | lets you advise Mercurial that something risky is happening so |
|
382 | lets you advise Mercurial that something risky is happening so | |
384 | that control-C etc can be blocked if desired. |
|
383 | that control-C etc can be blocked if desired. | |
385 | """ |
|
384 | """ | |
386 | enabled = self.configbool(b'experimental', b'nointerrupt') |
|
385 | enabled = self.configbool(b'experimental', b'nointerrupt') | |
387 | if enabled and self.configbool( |
|
386 | if enabled and self.configbool( | |
388 | b'experimental', b'nointerrupt-interactiveonly' |
|
387 | b'experimental', b'nointerrupt-interactiveonly' | |
389 | ): |
|
388 | ): | |
390 | enabled = self.interactive() |
|
389 | enabled = self.interactive() | |
391 | if self._uninterruptible or not enabled: |
|
390 | if self._uninterruptible or not enabled: | |
392 | # if nointerrupt support is turned off, the process isn't |
|
391 | # if nointerrupt support is turned off, the process isn't | |
393 | # interactive, or we're already in an uninterruptible |
|
392 | # interactive, or we're already in an uninterruptible | |
394 | # block, do nothing. |
|
393 | # block, do nothing. | |
395 | yield |
|
394 | yield | |
396 | return |
|
395 | return | |
397 |
|
396 | |||
398 | def warn(): |
|
397 | def warn(): | |
399 | self.warn(_(b"shutting down cleanly\n")) |
|
398 | self.warn(_(b"shutting down cleanly\n")) | |
400 | self.warn( |
|
399 | self.warn( | |
401 | _(b"press ^C again to terminate immediately (dangerous)\n") |
|
400 | _(b"press ^C again to terminate immediately (dangerous)\n") | |
402 | ) |
|
401 | ) | |
403 | return True |
|
402 | return True | |
404 |
|
403 | |||
405 | with procutil.uninterruptible(warn): |
|
404 | with procutil.uninterruptible(warn): | |
406 | try: |
|
405 | try: | |
407 | self._uninterruptible = True |
|
406 | self._uninterruptible = True | |
408 | yield |
|
407 | yield | |
409 | finally: |
|
408 | finally: | |
410 | self._uninterruptible = False |
|
409 | self._uninterruptible = False | |
411 |
|
410 | |||
412 | def formatter(self, topic, opts): |
|
411 | def formatter(self, topic, opts): | |
413 | return formatter.formatter(self, self, topic, opts) |
|
412 | return formatter.formatter(self, self, topic, opts) | |
414 |
|
413 | |||
415 | def _trusted(self, fp, f): |
|
414 | def _trusted(self, fp, f): | |
416 | st = util.fstat(fp) |
|
415 | st = util.fstat(fp) | |
417 | if util.isowner(st): |
|
416 | if util.isowner(st): | |
418 | return True |
|
417 | return True | |
419 |
|
418 | |||
420 | tusers, tgroups = self._trustusers, self._trustgroups |
|
419 | tusers, tgroups = self._trustusers, self._trustgroups | |
421 | if b'*' in tusers or b'*' in tgroups: |
|
420 | if b'*' in tusers or b'*' in tgroups: | |
422 | return True |
|
421 | return True | |
423 |
|
422 | |||
424 | user = util.username(st.st_uid) |
|
423 | user = util.username(st.st_uid) | |
425 | group = util.groupname(st.st_gid) |
|
424 | group = util.groupname(st.st_gid) | |
426 | if user in tusers or group in tgroups or user == util.username(): |
|
425 | if user in tusers or group in tgroups or user == util.username(): | |
427 | return True |
|
426 | return True | |
428 |
|
427 | |||
429 | if self._reportuntrusted: |
|
428 | if self._reportuntrusted: | |
430 | self.warn( |
|
429 | self.warn( | |
431 | _( |
|
430 | _( | |
432 | b'not trusting file %s from untrusted ' |
|
431 | b'not trusting file %s from untrusted ' | |
433 | b'user %s, group %s\n' |
|
432 | b'user %s, group %s\n' | |
434 | ) |
|
433 | ) | |
435 | % (f, user, group) |
|
434 | % (f, user, group) | |
436 | ) |
|
435 | ) | |
437 | return False |
|
436 | return False | |
438 |
|
437 | |||
439 | def read_resource_config( |
|
438 | def read_resource_config( | |
440 | self, name, root=None, trust=False, sections=None, remap=None |
|
439 | self, name, root=None, trust=False, sections=None, remap=None | |
441 | ): |
|
440 | ): | |
442 | try: |
|
441 | try: | |
443 | fp = resourceutil.open_resource(name[0], name[1]) |
|
442 | fp = resourceutil.open_resource(name[0], name[1]) | |
444 | except IOError: |
|
443 | except IOError: | |
445 | if not sections: # ignore unless we were looking for something |
|
444 | if not sections: # ignore unless we were looking for something | |
446 | return |
|
445 | return | |
447 | raise |
|
446 | raise | |
448 |
|
447 | |||
449 | self._readconfig( |
|
448 | self._readconfig( | |
450 | b'resource:%s.%s' % name, fp, root, trust, sections, remap |
|
449 | b'resource:%s.%s' % name, fp, root, trust, sections, remap | |
451 | ) |
|
450 | ) | |
452 |
|
451 | |||
453 | def readconfig( |
|
452 | def readconfig( | |
454 | self, filename, root=None, trust=False, sections=None, remap=None |
|
453 | self, filename, root=None, trust=False, sections=None, remap=None | |
455 | ): |
|
454 | ): | |
456 | try: |
|
455 | try: | |
457 | fp = open(filename, 'rb') |
|
456 | fp = open(filename, 'rb') | |
458 | except IOError: |
|
457 | except IOError: | |
459 | if not sections: # ignore unless we were looking for something |
|
458 | if not sections: # ignore unless we were looking for something | |
460 | return |
|
459 | return | |
461 | raise |
|
460 | raise | |
462 |
|
461 | |||
463 | self._readconfig(filename, fp, root, trust, sections, remap) |
|
462 | self._readconfig(filename, fp, root, trust, sections, remap) | |
464 |
|
463 | |||
465 | def _readconfig( |
|
464 | def _readconfig( | |
466 | self, filename, fp, root=None, trust=False, sections=None, remap=None |
|
465 | self, filename, fp, root=None, trust=False, sections=None, remap=None | |
467 | ): |
|
466 | ): | |
468 | with fp: |
|
467 | with fp: | |
469 | cfg = config.config() |
|
468 | cfg = config.config() | |
470 | trusted = sections or trust or self._trusted(fp, filename) |
|
469 | trusted = sections or trust or self._trusted(fp, filename) | |
471 |
|
470 | |||
472 | try: |
|
471 | try: | |
473 | cfg.read(filename, fp, sections=sections, remap=remap) |
|
472 | cfg.read(filename, fp, sections=sections, remap=remap) | |
474 | except error.ConfigError as inst: |
|
473 | except error.ConfigError as inst: | |
475 | if trusted: |
|
474 | if trusted: | |
476 | raise |
|
475 | raise | |
477 | self.warn( |
|
476 | self.warn( | |
478 | _(b'ignored %s: %s\n') % (inst.location, inst.message) |
|
477 | _(b'ignored %s: %s\n') % (inst.location, inst.message) | |
479 | ) |
|
478 | ) | |
480 |
|
479 | |||
481 | self._applyconfig(cfg, trusted, root) |
|
480 | self._applyconfig(cfg, trusted, root) | |
482 |
|
481 | |||
483 | def applyconfig(self, configitems, source=b"", root=None): |
|
482 | def applyconfig(self, configitems, source=b"", root=None): | |
484 | """Add configitems from a non-file source. Unlike with ``setconfig()``, |
|
483 | """Add configitems from a non-file source. Unlike with ``setconfig()``, | |
485 | they can be overridden by subsequent config file reads. The items are |
|
484 | they can be overridden by subsequent config file reads. The items are | |
486 | in the same format as ``configoverride()``, namely a dict of the |
|
485 | in the same format as ``configoverride()``, namely a dict of the | |
487 | following structures: {(section, name) : value} |
|
486 | following structures: {(section, name) : value} | |
488 |
|
487 | |||
489 | Typically this is used by extensions that inject themselves into the |
|
488 | Typically this is used by extensions that inject themselves into the | |
490 | config file load procedure by monkeypatching ``localrepo.loadhgrc()``. |
|
489 | config file load procedure by monkeypatching ``localrepo.loadhgrc()``. | |
491 | """ |
|
490 | """ | |
492 | cfg = config.config() |
|
491 | cfg = config.config() | |
493 |
|
492 | |||
494 | for (section, name), value in configitems.items(): |
|
493 | for (section, name), value in configitems.items(): | |
495 | cfg.set(section, name, value, source) |
|
494 | cfg.set(section, name, value, source) | |
496 |
|
495 | |||
497 | self._applyconfig(cfg, True, root) |
|
496 | self._applyconfig(cfg, True, root) | |
498 |
|
497 | |||
499 | def _applyconfig(self, cfg, trusted, root): |
|
498 | def _applyconfig(self, cfg, trusted, root): | |
500 | if self.plain(): |
|
499 | if self.plain(): | |
501 | for k in ( |
|
500 | for k in ( | |
502 | b'debug', |
|
501 | b'debug', | |
503 | b'fallbackencoding', |
|
502 | b'fallbackencoding', | |
504 | b'quiet', |
|
503 | b'quiet', | |
505 | b'slash', |
|
504 | b'slash', | |
506 | b'logtemplate', |
|
505 | b'logtemplate', | |
507 | b'message-output', |
|
506 | b'message-output', | |
508 | b'statuscopies', |
|
507 | b'statuscopies', | |
509 | b'style', |
|
508 | b'style', | |
510 | b'traceback', |
|
509 | b'traceback', | |
511 | b'verbose', |
|
510 | b'verbose', | |
512 | ): |
|
511 | ): | |
513 | if k in cfg[b'ui']: |
|
512 | if k in cfg[b'ui']: | |
514 | del cfg[b'ui'][k] |
|
513 | del cfg[b'ui'][k] | |
515 | for k, v in cfg.items(b'defaults'): |
|
514 | for k, v in cfg.items(b'defaults'): | |
516 | del cfg[b'defaults'][k] |
|
515 | del cfg[b'defaults'][k] | |
517 | for k, v in cfg.items(b'commands'): |
|
516 | for k, v in cfg.items(b'commands'): | |
518 | del cfg[b'commands'][k] |
|
517 | del cfg[b'commands'][k] | |
519 | for k, v in cfg.items(b'command-templates'): |
|
518 | for k, v in cfg.items(b'command-templates'): | |
520 | del cfg[b'command-templates'][k] |
|
519 | del cfg[b'command-templates'][k] | |
521 | # Don't remove aliases from the configuration if in the exceptionlist |
|
520 | # Don't remove aliases from the configuration if in the exceptionlist | |
522 | if self.plain(b'alias'): |
|
521 | if self.plain(b'alias'): | |
523 | for k, v in cfg.items(b'alias'): |
|
522 | for k, v in cfg.items(b'alias'): | |
524 | del cfg[b'alias'][k] |
|
523 | del cfg[b'alias'][k] | |
525 | if self.plain(b'revsetalias'): |
|
524 | if self.plain(b'revsetalias'): | |
526 | for k, v in cfg.items(b'revsetalias'): |
|
525 | for k, v in cfg.items(b'revsetalias'): | |
527 | del cfg[b'revsetalias'][k] |
|
526 | del cfg[b'revsetalias'][k] | |
528 | if self.plain(b'templatealias'): |
|
527 | if self.plain(b'templatealias'): | |
529 | for k, v in cfg.items(b'templatealias'): |
|
528 | for k, v in cfg.items(b'templatealias'): | |
530 | del cfg[b'templatealias'][k] |
|
529 | del cfg[b'templatealias'][k] | |
531 |
|
530 | |||
532 | if trusted: |
|
531 | if trusted: | |
533 | self._tcfg.update(cfg) |
|
532 | self._tcfg.update(cfg) | |
534 | self._tcfg.update(self._ocfg) |
|
533 | self._tcfg.update(self._ocfg) | |
535 | self._ucfg.update(cfg) |
|
534 | self._ucfg.update(cfg) | |
536 | self._ucfg.update(self._ocfg) |
|
535 | self._ucfg.update(self._ocfg) | |
537 |
|
536 | |||
538 | if root is None: |
|
537 | if root is None: | |
539 | root = os.path.expanduser(b'~') |
|
538 | root = os.path.expanduser(b'~') | |
540 | self.fixconfig(root=root) |
|
539 | self.fixconfig(root=root) | |
541 |
|
540 | |||
542 | def fixconfig(self, root=None, section=None): |
|
541 | def fixconfig(self, root=None, section=None): | |
543 | if section in (None, b'paths'): |
|
542 | if section in (None, b'paths'): | |
544 | # expand vars and ~ |
|
543 | # expand vars and ~ | |
545 | # translate paths relative to root (or home) into absolute paths |
|
544 | # translate paths relative to root (or home) into absolute paths | |
546 | root = root or encoding.getcwd() |
|
545 | root = root or encoding.getcwd() | |
547 | for c in self._tcfg, self._ucfg, self._ocfg: |
|
546 | for c in self._tcfg, self._ucfg, self._ocfg: | |
548 | for n, p in c.items(b'paths'): |
|
547 | for n, p in c.items(b'paths'): | |
549 | # Ignore sub-options. |
|
548 | # Ignore sub-options. | |
550 | if b':' in n: |
|
549 | if b':' in n: | |
551 | continue |
|
550 | continue | |
552 | if not p: |
|
551 | if not p: | |
553 | continue |
|
552 | continue | |
554 | if b'%%' in p: |
|
553 | if b'%%' in p: | |
555 | s = self.configsource(b'paths', n) or b'none' |
|
554 | s = self.configsource(b'paths', n) or b'none' | |
556 | self.warn( |
|
555 | self.warn( | |
557 | _(b"(deprecated '%%' in path %s=%s from %s)\n") |
|
556 | _(b"(deprecated '%%' in path %s=%s from %s)\n") | |
558 | % (n, p, s) |
|
557 | % (n, p, s) | |
559 | ) |
|
558 | ) | |
560 | p = p.replace(b'%%', b'%') |
|
559 | p = p.replace(b'%%', b'%') | |
561 | p = util.expandpath(p) |
|
560 | p = util.expandpath(p) | |
562 | if not urlutil.hasscheme(p) and not os.path.isabs(p): |
|
561 | if not urlutil.hasscheme(p) and not os.path.isabs(p): | |
563 | p = os.path.normpath(os.path.join(root, p)) |
|
562 | p = os.path.normpath(os.path.join(root, p)) | |
564 | c.alter(b"paths", n, p) |
|
563 | c.alter(b"paths", n, p) | |
565 |
|
564 | |||
566 | if section in (None, b'ui'): |
|
565 | if section in (None, b'ui'): | |
567 | # update ui options |
|
566 | # update ui options | |
568 | self._fmsgout, self._fmsgerr = _selectmsgdests(self) |
|
567 | self._fmsgout, self._fmsgerr = _selectmsgdests(self) | |
569 | self.debugflag = self.configbool(b'ui', b'debug') |
|
568 | self.debugflag = self.configbool(b'ui', b'debug') | |
570 | self.verbose = self.debugflag or self.configbool(b'ui', b'verbose') |
|
569 | self.verbose = self.debugflag or self.configbool(b'ui', b'verbose') | |
571 | self.quiet = not self.debugflag and self.configbool(b'ui', b'quiet') |
|
570 | self.quiet = not self.debugflag and self.configbool(b'ui', b'quiet') | |
572 | if self.verbose and self.quiet: |
|
571 | if self.verbose and self.quiet: | |
573 | self.quiet = self.verbose = False |
|
572 | self.quiet = self.verbose = False | |
574 | self._reportuntrusted = self.debugflag or self.configbool( |
|
573 | self._reportuntrusted = self.debugflag or self.configbool( | |
575 | b"ui", b"report_untrusted" |
|
574 | b"ui", b"report_untrusted" | |
576 | ) |
|
575 | ) | |
577 | self.showtimestamp = self.configbool(b'ui', b'timestamp-output') |
|
576 | self.showtimestamp = self.configbool(b'ui', b'timestamp-output') | |
578 | self.tracebackflag = self.configbool(b'ui', b'traceback') |
|
577 | self.tracebackflag = self.configbool(b'ui', b'traceback') | |
579 | self.logblockedtimes = self.configbool(b'ui', b'logblockedtimes') |
|
578 | self.logblockedtimes = self.configbool(b'ui', b'logblockedtimes') | |
580 |
|
579 | |||
581 | if section in (None, b'trusted'): |
|
580 | if section in (None, b'trusted'): | |
582 | # update trust information |
|
581 | # update trust information | |
583 | self._trustusers.update(self.configlist(b'trusted', b'users')) |
|
582 | self._trustusers.update(self.configlist(b'trusted', b'users')) | |
584 | self._trustgroups.update(self.configlist(b'trusted', b'groups')) |
|
583 | self._trustgroups.update(self.configlist(b'trusted', b'groups')) | |
585 |
|
584 | |||
586 | if section in (None, b'devel', b'ui') and self.debugflag: |
|
585 | if section in (None, b'devel', b'ui') and self.debugflag: | |
587 | tracked = set() |
|
586 | tracked = set() | |
588 | if self.configbool(b'devel', b'debug.extensions'): |
|
587 | if self.configbool(b'devel', b'debug.extensions'): | |
589 | tracked.add(b'extension') |
|
588 | tracked.add(b'extension') | |
590 | if tracked: |
|
589 | if tracked: | |
591 | logger = loggingutil.fileobjectlogger(self._ferr, tracked) |
|
590 | logger = loggingutil.fileobjectlogger(self._ferr, tracked) | |
592 | self.setlogger(b'debug', logger) |
|
591 | self.setlogger(b'debug', logger) | |
593 |
|
592 | |||
594 | def backupconfig(self, section, item): |
|
593 | def backupconfig(self, section, item): | |
595 | return ( |
|
594 | return ( | |
596 | self._ocfg.backup(section, item), |
|
595 | self._ocfg.backup(section, item), | |
597 | self._tcfg.backup(section, item), |
|
596 | self._tcfg.backup(section, item), | |
598 | self._ucfg.backup(section, item), |
|
597 | self._ucfg.backup(section, item), | |
599 | ) |
|
598 | ) | |
600 |
|
599 | |||
601 | def restoreconfig(self, data): |
|
600 | def restoreconfig(self, data): | |
602 | self._ocfg.restore(data[0]) |
|
601 | self._ocfg.restore(data[0]) | |
603 | self._tcfg.restore(data[1]) |
|
602 | self._tcfg.restore(data[1]) | |
604 | self._ucfg.restore(data[2]) |
|
603 | self._ucfg.restore(data[2]) | |
605 |
|
604 | |||
606 | def setconfig(self, section, name, value, source=b''): |
|
605 | def setconfig(self, section, name, value, source=b''): | |
607 | for cfg in (self._ocfg, self._tcfg, self._ucfg): |
|
606 | for cfg in (self._ocfg, self._tcfg, self._ucfg): | |
608 | cfg.set(section, name, value, source) |
|
607 | cfg.set(section, name, value, source) | |
609 | self.fixconfig(section=section) |
|
608 | self.fixconfig(section=section) | |
610 | self._maybetweakdefaults() |
|
609 | self._maybetweakdefaults() | |
611 |
|
610 | |||
612 | def _data(self, untrusted): |
|
611 | def _data(self, untrusted): | |
613 | return untrusted and self._ucfg or self._tcfg |
|
612 | return untrusted and self._ucfg or self._tcfg | |
614 |
|
613 | |||
615 | def configsource(self, section, name, untrusted=False): |
|
614 | def configsource(self, section, name, untrusted=False): | |
616 | return self._data(untrusted).source(section, name) |
|
615 | return self._data(untrusted).source(section, name) | |
617 |
|
616 | |||
618 | def config(self, section, name, default=_unset, untrusted=False): |
|
617 | def config(self, section, name, default=_unset, untrusted=False): | |
619 | """return the plain string version of a config""" |
|
618 | """return the plain string version of a config""" | |
620 | value = self._config( |
|
619 | value = self._config( | |
621 | section, name, default=default, untrusted=untrusted |
|
620 | section, name, default=default, untrusted=untrusted | |
622 | ) |
|
621 | ) | |
623 | if value is _unset: |
|
622 | if value is _unset: | |
624 | return None |
|
623 | return None | |
625 | return value |
|
624 | return value | |
626 |
|
625 | |||
627 | def _config(self, section, name, default=_unset, untrusted=False): |
|
626 | def _config(self, section, name, default=_unset, untrusted=False): | |
628 | value = itemdefault = default |
|
627 | value = itemdefault = default | |
629 | item = self._knownconfig.get(section, {}).get(name) |
|
628 | item = self._knownconfig.get(section, {}).get(name) | |
630 | alternates = [(section, name)] |
|
629 | alternates = [(section, name)] | |
631 |
|
630 | |||
632 | if item is not None: |
|
631 | if item is not None: | |
633 | alternates.extend(item.alias) |
|
632 | alternates.extend(item.alias) | |
634 | if callable(item.default): |
|
633 | if callable(item.default): | |
635 | itemdefault = item.default() |
|
634 | itemdefault = item.default() | |
636 | else: |
|
635 | else: | |
637 | itemdefault = item.default |
|
636 | itemdefault = item.default | |
638 | else: |
|
637 | else: | |
639 | msg = b"accessing unregistered config item: '%s.%s'" |
|
638 | msg = b"accessing unregistered config item: '%s.%s'" | |
640 | msg %= (section, name) |
|
639 | msg %= (section, name) | |
641 | self.develwarn(msg, 2, b'warn-config-unknown') |
|
640 | self.develwarn(msg, 2, b'warn-config-unknown') | |
642 |
|
641 | |||
643 | if default is _unset: |
|
642 | if default is _unset: | |
644 | if item is None: |
|
643 | if item is None: | |
645 | value = default |
|
644 | value = default | |
646 | elif item.default is configitems.dynamicdefault: |
|
645 | elif item.default is configitems.dynamicdefault: | |
647 | value = None |
|
646 | value = None | |
648 | msg = b"config item requires an explicit default value: '%s.%s'" |
|
647 | msg = b"config item requires an explicit default value: '%s.%s'" | |
649 | msg %= (section, name) |
|
648 | msg %= (section, name) | |
650 | self.develwarn(msg, 2, b'warn-config-default') |
|
649 | self.develwarn(msg, 2, b'warn-config-default') | |
651 | else: |
|
650 | else: | |
652 | value = itemdefault |
|
651 | value = itemdefault | |
653 | elif ( |
|
652 | elif ( | |
654 | item is not None |
|
653 | item is not None | |
655 | and item.default is not configitems.dynamicdefault |
|
654 | and item.default is not configitems.dynamicdefault | |
656 | and default != itemdefault |
|
655 | and default != itemdefault | |
657 | ): |
|
656 | ): | |
658 | msg = ( |
|
657 | msg = ( | |
659 | b"specifying a mismatched default value for a registered " |
|
658 | b"specifying a mismatched default value for a registered " | |
660 | b"config item: '%s.%s' '%s'" |
|
659 | b"config item: '%s.%s' '%s'" | |
661 | ) |
|
660 | ) | |
662 | msg %= (section, name, pycompat.bytestr(default)) |
|
661 | msg %= (section, name, pycompat.bytestr(default)) | |
663 | self.develwarn(msg, 2, b'warn-config-default') |
|
662 | self.develwarn(msg, 2, b'warn-config-default') | |
664 |
|
663 | |||
665 | candidates = [] |
|
664 | candidates = [] | |
666 | config = self._data(untrusted) |
|
665 | config = self._data(untrusted) | |
667 | for s, n in alternates: |
|
666 | for s, n in alternates: | |
668 | candidate = config.get(s, n, None) |
|
667 | candidate = config.get(s, n, None) | |
669 | if candidate is not None: |
|
668 | if candidate is not None: | |
670 | candidates.append((s, n, candidate)) |
|
669 | candidates.append((s, n, candidate)) | |
671 | if candidates: |
|
670 | if candidates: | |
672 |
|
671 | |||
673 | def level(x): |
|
672 | def level(x): | |
674 | return config.level(x[0], x[1]) |
|
673 | return config.level(x[0], x[1]) | |
675 |
|
674 | |||
676 | value = max(candidates, key=level)[2] |
|
675 | value = max(candidates, key=level)[2] | |
677 |
|
676 | |||
678 | if self.debugflag and not untrusted and self._reportuntrusted: |
|
677 | if self.debugflag and not untrusted and self._reportuntrusted: | |
679 | for s, n in alternates: |
|
678 | for s, n in alternates: | |
680 | uvalue = self._ucfg.get(s, n) |
|
679 | uvalue = self._ucfg.get(s, n) | |
681 | if uvalue is not None and uvalue != value: |
|
680 | if uvalue is not None and uvalue != value: | |
682 | self.debug( |
|
681 | self.debug( | |
683 | b"ignoring untrusted configuration option " |
|
682 | b"ignoring untrusted configuration option " | |
684 | b"%s.%s = %s\n" % (s, n, uvalue) |
|
683 | b"%s.%s = %s\n" % (s, n, uvalue) | |
685 | ) |
|
684 | ) | |
686 | return value |
|
685 | return value | |
687 |
|
686 | |||
688 | def config_default(self, section, name): |
|
687 | def config_default(self, section, name): | |
689 | """return the default value for a config option |
|
688 | """return the default value for a config option | |
690 |
|
689 | |||
691 | The default is returned "raw", for example if it is a callable, the |
|
690 | The default is returned "raw", for example if it is a callable, the | |
692 | callable was not called. |
|
691 | callable was not called. | |
693 | """ |
|
692 | """ | |
694 | item = self._knownconfig.get(section, {}).get(name) |
|
693 | item = self._knownconfig.get(section, {}).get(name) | |
695 |
|
694 | |||
696 | if item is None: |
|
695 | if item is None: | |
697 | raise KeyError((section, name)) |
|
696 | raise KeyError((section, name)) | |
698 | return item.default |
|
697 | return item.default | |
699 |
|
698 | |||
700 | def configsuboptions(self, section, name, default=_unset, untrusted=False): |
|
699 | def configsuboptions(self, section, name, default=_unset, untrusted=False): | |
701 | """Get a config option and all sub-options. |
|
700 | """Get a config option and all sub-options. | |
702 |
|
701 | |||
703 | Some config options have sub-options that are declared with the |
|
702 | Some config options have sub-options that are declared with the | |
704 | format "key:opt = value". This method is used to return the main |
|
703 | format "key:opt = value". This method is used to return the main | |
705 | option and all its declared sub-options. |
|
704 | option and all its declared sub-options. | |
706 |
|
705 | |||
707 | Returns a 2-tuple of ``(option, sub-options)``, where `sub-options`` |
|
706 | Returns a 2-tuple of ``(option, sub-options)``, where `sub-options`` | |
708 | is a dict of defined sub-options where keys and values are strings. |
|
707 | is a dict of defined sub-options where keys and values are strings. | |
709 | """ |
|
708 | """ | |
710 | main = self.config(section, name, default, untrusted=untrusted) |
|
709 | main = self.config(section, name, default, untrusted=untrusted) | |
711 | data = self._data(untrusted) |
|
710 | data = self._data(untrusted) | |
712 | sub = {} |
|
711 | sub = {} | |
713 | prefix = b'%s:' % name |
|
712 | prefix = b'%s:' % name | |
714 | for k, v in data.items(section): |
|
713 | for k, v in data.items(section): | |
715 | if k.startswith(prefix): |
|
714 | if k.startswith(prefix): | |
716 | sub[k[len(prefix) :]] = v |
|
715 | sub[k[len(prefix) :]] = v | |
717 |
|
716 | |||
718 | if self.debugflag and not untrusted and self._reportuntrusted: |
|
717 | if self.debugflag and not untrusted and self._reportuntrusted: | |
719 | for k, v in sub.items(): |
|
718 | for k, v in sub.items(): | |
720 | uvalue = self._ucfg.get(section, b'%s:%s' % (name, k)) |
|
719 | uvalue = self._ucfg.get(section, b'%s:%s' % (name, k)) | |
721 | if uvalue is not None and uvalue != v: |
|
720 | if uvalue is not None and uvalue != v: | |
722 | self.debug( |
|
721 | self.debug( | |
723 | b'ignoring untrusted configuration option ' |
|
722 | b'ignoring untrusted configuration option ' | |
724 | b'%s:%s.%s = %s\n' % (section, name, k, uvalue) |
|
723 | b'%s:%s.%s = %s\n' % (section, name, k, uvalue) | |
725 | ) |
|
724 | ) | |
726 |
|
725 | |||
727 | return main, sub |
|
726 | return main, sub | |
728 |
|
727 | |||
729 | def configpath(self, section, name, default=_unset, untrusted=False): |
|
728 | def configpath(self, section, name, default=_unset, untrusted=False): | |
730 | """get a path config item, expanded relative to repo root or config |
|
729 | """get a path config item, expanded relative to repo root or config | |
731 | file""" |
|
730 | file""" | |
732 | v = self.config(section, name, default, untrusted) |
|
731 | v = self.config(section, name, default, untrusted) | |
733 | if v is None: |
|
732 | if v is None: | |
734 | return None |
|
733 | return None | |
735 | if not os.path.isabs(v) or b"://" not in v: |
|
734 | if not os.path.isabs(v) or b"://" not in v: | |
736 | src = self.configsource(section, name, untrusted) |
|
735 | src = self.configsource(section, name, untrusted) | |
737 | if b':' in src: |
|
736 | if b':' in src: | |
738 | base = os.path.dirname(src.rsplit(b':')[0]) |
|
737 | base = os.path.dirname(src.rsplit(b':')[0]) | |
739 | v = os.path.join(base, os.path.expanduser(v)) |
|
738 | v = os.path.join(base, os.path.expanduser(v)) | |
740 | return v |
|
739 | return v | |
741 |
|
740 | |||
742 | def configbool(self, section, name, default=_unset, untrusted=False): |
|
741 | def configbool(self, section, name, default=_unset, untrusted=False): | |
743 | """parse a configuration element as a boolean |
|
742 | """parse a configuration element as a boolean | |
744 |
|
743 | |||
745 | >>> u = ui(); s = b'foo' |
|
744 | >>> u = ui(); s = b'foo' | |
746 | >>> u.setconfig(s, b'true', b'yes') |
|
745 | >>> u.setconfig(s, b'true', b'yes') | |
747 | >>> u.configbool(s, b'true') |
|
746 | >>> u.configbool(s, b'true') | |
748 | True |
|
747 | True | |
749 | >>> u.setconfig(s, b'false', b'no') |
|
748 | >>> u.setconfig(s, b'false', b'no') | |
750 | >>> u.configbool(s, b'false') |
|
749 | >>> u.configbool(s, b'false') | |
751 | False |
|
750 | False | |
752 | >>> u.configbool(s, b'unknown') |
|
751 | >>> u.configbool(s, b'unknown') | |
753 | False |
|
752 | False | |
754 | >>> u.configbool(s, b'unknown', True) |
|
753 | >>> u.configbool(s, b'unknown', True) | |
755 | True |
|
754 | True | |
756 | >>> u.setconfig(s, b'invalid', b'somevalue') |
|
755 | >>> u.setconfig(s, b'invalid', b'somevalue') | |
757 | >>> u.configbool(s, b'invalid') |
|
756 | >>> u.configbool(s, b'invalid') | |
758 | Traceback (most recent call last): |
|
757 | Traceback (most recent call last): | |
759 | ... |
|
758 | ... | |
760 | ConfigError: foo.invalid is not a boolean ('somevalue') |
|
759 | ConfigError: foo.invalid is not a boolean ('somevalue') | |
761 | """ |
|
760 | """ | |
762 |
|
761 | |||
763 | v = self._config(section, name, default, untrusted=untrusted) |
|
762 | v = self._config(section, name, default, untrusted=untrusted) | |
764 | if v is None: |
|
763 | if v is None: | |
765 | return v |
|
764 | return v | |
766 | if v is _unset: |
|
765 | if v is _unset: | |
767 | if default is _unset: |
|
766 | if default is _unset: | |
768 | return False |
|
767 | return False | |
769 | return default |
|
768 | return default | |
770 | if isinstance(v, bool): |
|
769 | if isinstance(v, bool): | |
771 | return v |
|
770 | return v | |
772 | b = stringutil.parsebool(v) |
|
771 | b = stringutil.parsebool(v) | |
773 | if b is None: |
|
772 | if b is None: | |
774 | raise error.ConfigError( |
|
773 | raise error.ConfigError( | |
775 | _(b"%s.%s is not a boolean ('%s')") % (section, name, v) |
|
774 | _(b"%s.%s is not a boolean ('%s')") % (section, name, v) | |
776 | ) |
|
775 | ) | |
777 | return b |
|
776 | return b | |
778 |
|
777 | |||
779 | def configwith( |
|
778 | def configwith( | |
780 | self, convert, section, name, default=_unset, desc=None, untrusted=False |
|
779 | self, convert, section, name, default=_unset, desc=None, untrusted=False | |
781 | ): |
|
780 | ): | |
782 | """parse a configuration element with a conversion function |
|
781 | """parse a configuration element with a conversion function | |
783 |
|
782 | |||
784 | >>> u = ui(); s = b'foo' |
|
783 | >>> u = ui(); s = b'foo' | |
785 | >>> u.setconfig(s, b'float1', b'42') |
|
784 | >>> u.setconfig(s, b'float1', b'42') | |
786 | >>> u.configwith(float, s, b'float1') |
|
785 | >>> u.configwith(float, s, b'float1') | |
787 | 42.0 |
|
786 | 42.0 | |
788 | >>> u.setconfig(s, b'float2', b'-4.25') |
|
787 | >>> u.setconfig(s, b'float2', b'-4.25') | |
789 | >>> u.configwith(float, s, b'float2') |
|
788 | >>> u.configwith(float, s, b'float2') | |
790 | -4.25 |
|
789 | -4.25 | |
791 | >>> u.configwith(float, s, b'unknown', 7) |
|
790 | >>> u.configwith(float, s, b'unknown', 7) | |
792 | 7.0 |
|
791 | 7.0 | |
793 | >>> u.setconfig(s, b'invalid', b'somevalue') |
|
792 | >>> u.setconfig(s, b'invalid', b'somevalue') | |
794 | >>> u.configwith(float, s, b'invalid') |
|
793 | >>> u.configwith(float, s, b'invalid') | |
795 | Traceback (most recent call last): |
|
794 | Traceback (most recent call last): | |
796 | ... |
|
795 | ... | |
797 | ConfigError: foo.invalid is not a valid float ('somevalue') |
|
796 | ConfigError: foo.invalid is not a valid float ('somevalue') | |
798 | >>> u.configwith(float, s, b'invalid', desc=b'womble') |
|
797 | >>> u.configwith(float, s, b'invalid', desc=b'womble') | |
799 | Traceback (most recent call last): |
|
798 | Traceback (most recent call last): | |
800 | ... |
|
799 | ... | |
801 | ConfigError: foo.invalid is not a valid womble ('somevalue') |
|
800 | ConfigError: foo.invalid is not a valid womble ('somevalue') | |
802 | """ |
|
801 | """ | |
803 |
|
802 | |||
804 | v = self.config(section, name, default, untrusted) |
|
803 | v = self.config(section, name, default, untrusted) | |
805 | if v is None: |
|
804 | if v is None: | |
806 | return v # do not attempt to convert None |
|
805 | return v # do not attempt to convert None | |
807 | try: |
|
806 | try: | |
808 | return convert(v) |
|
807 | return convert(v) | |
809 | except (ValueError, error.ParseError): |
|
808 | except (ValueError, error.ParseError): | |
810 | if desc is None: |
|
809 | if desc is None: | |
811 | desc = pycompat.sysbytes(convert.__name__) |
|
810 | desc = pycompat.sysbytes(convert.__name__) | |
812 | raise error.ConfigError( |
|
811 | raise error.ConfigError( | |
813 | _(b"%s.%s is not a valid %s ('%s')") % (section, name, desc, v) |
|
812 | _(b"%s.%s is not a valid %s ('%s')") % (section, name, desc, v) | |
814 | ) |
|
813 | ) | |
815 |
|
814 | |||
816 | def configint(self, section, name, default=_unset, untrusted=False): |
|
815 | def configint(self, section, name, default=_unset, untrusted=False): | |
817 | """parse a configuration element as an integer |
|
816 | """parse a configuration element as an integer | |
818 |
|
817 | |||
819 | >>> u = ui(); s = b'foo' |
|
818 | >>> u = ui(); s = b'foo' | |
820 | >>> u.setconfig(s, b'int1', b'42') |
|
819 | >>> u.setconfig(s, b'int1', b'42') | |
821 | >>> u.configint(s, b'int1') |
|
820 | >>> u.configint(s, b'int1') | |
822 | 42 |
|
821 | 42 | |
823 | >>> u.setconfig(s, b'int2', b'-42') |
|
822 | >>> u.setconfig(s, b'int2', b'-42') | |
824 | >>> u.configint(s, b'int2') |
|
823 | >>> u.configint(s, b'int2') | |
825 | -42 |
|
824 | -42 | |
826 | >>> u.configint(s, b'unknown', 7) |
|
825 | >>> u.configint(s, b'unknown', 7) | |
827 | 7 |
|
826 | 7 | |
828 | >>> u.setconfig(s, b'invalid', b'somevalue') |
|
827 | >>> u.setconfig(s, b'invalid', b'somevalue') | |
829 | >>> u.configint(s, b'invalid') |
|
828 | >>> u.configint(s, b'invalid') | |
830 | Traceback (most recent call last): |
|
829 | Traceback (most recent call last): | |
831 | ... |
|
830 | ... | |
832 | ConfigError: foo.invalid is not a valid integer ('somevalue') |
|
831 | ConfigError: foo.invalid is not a valid integer ('somevalue') | |
833 | """ |
|
832 | """ | |
834 |
|
833 | |||
835 | return self.configwith( |
|
834 | return self.configwith( | |
836 | int, section, name, default, b'integer', untrusted |
|
835 | int, section, name, default, b'integer', untrusted | |
837 | ) |
|
836 | ) | |
838 |
|
837 | |||
839 | def configbytes(self, section, name, default=_unset, untrusted=False): |
|
838 | def configbytes(self, section, name, default=_unset, untrusted=False): | |
840 | """parse a configuration element as a quantity in bytes |
|
839 | """parse a configuration element as a quantity in bytes | |
841 |
|
840 | |||
842 | Units can be specified as b (bytes), k or kb (kilobytes), m or |
|
841 | Units can be specified as b (bytes), k or kb (kilobytes), m or | |
843 | mb (megabytes), g or gb (gigabytes). |
|
842 | mb (megabytes), g or gb (gigabytes). | |
844 |
|
843 | |||
845 | >>> u = ui(); s = b'foo' |
|
844 | >>> u = ui(); s = b'foo' | |
846 | >>> u.setconfig(s, b'val1', b'42') |
|
845 | >>> u.setconfig(s, b'val1', b'42') | |
847 | >>> u.configbytes(s, b'val1') |
|
846 | >>> u.configbytes(s, b'val1') | |
848 | 42 |
|
847 | 42 | |
849 | >>> u.setconfig(s, b'val2', b'42.5 kb') |
|
848 | >>> u.setconfig(s, b'val2', b'42.5 kb') | |
850 | >>> u.configbytes(s, b'val2') |
|
849 | >>> u.configbytes(s, b'val2') | |
851 | 43520 |
|
850 | 43520 | |
852 | >>> u.configbytes(s, b'unknown', b'7 MB') |
|
851 | >>> u.configbytes(s, b'unknown', b'7 MB') | |
853 | 7340032 |
|
852 | 7340032 | |
854 | >>> u.setconfig(s, b'invalid', b'somevalue') |
|
853 | >>> u.setconfig(s, b'invalid', b'somevalue') | |
855 | >>> u.configbytes(s, b'invalid') |
|
854 | >>> u.configbytes(s, b'invalid') | |
856 | Traceback (most recent call last): |
|
855 | Traceback (most recent call last): | |
857 | ... |
|
856 | ... | |
858 | ConfigError: foo.invalid is not a byte quantity ('somevalue') |
|
857 | ConfigError: foo.invalid is not a byte quantity ('somevalue') | |
859 | """ |
|
858 | """ | |
860 |
|
859 | |||
861 | value = self._config(section, name, default, untrusted) |
|
860 | value = self._config(section, name, default, untrusted) | |
862 | if value is _unset: |
|
861 | if value is _unset: | |
863 | if default is _unset: |
|
862 | if default is _unset: | |
864 | default = 0 |
|
863 | default = 0 | |
865 | value = default |
|
864 | value = default | |
866 | if not isinstance(value, bytes): |
|
865 | if not isinstance(value, bytes): | |
867 | return value |
|
866 | return value | |
868 | try: |
|
867 | try: | |
869 | return util.sizetoint(value) |
|
868 | return util.sizetoint(value) | |
870 | except error.ParseError: |
|
869 | except error.ParseError: | |
871 | raise error.ConfigError( |
|
870 | raise error.ConfigError( | |
872 | _(b"%s.%s is not a byte quantity ('%s')") |
|
871 | _(b"%s.%s is not a byte quantity ('%s')") | |
873 | % (section, name, value) |
|
872 | % (section, name, value) | |
874 | ) |
|
873 | ) | |
875 |
|
874 | |||
876 | def configlist(self, section, name, default=_unset, untrusted=False): |
|
875 | def configlist(self, section, name, default=_unset, untrusted=False): | |
877 | """parse a configuration element as a list of comma/space separated |
|
876 | """parse a configuration element as a list of comma/space separated | |
878 | strings |
|
877 | strings | |
879 |
|
878 | |||
880 | >>> u = ui(); s = b'foo' |
|
879 | >>> u = ui(); s = b'foo' | |
881 | >>> u.setconfig(s, b'list1', b'this,is "a small" ,test') |
|
880 | >>> u.setconfig(s, b'list1', b'this,is "a small" ,test') | |
882 | >>> u.configlist(s, b'list1') |
|
881 | >>> u.configlist(s, b'list1') | |
883 | ['this', 'is', 'a small', 'test'] |
|
882 | ['this', 'is', 'a small', 'test'] | |
884 | >>> u.setconfig(s, b'list2', b'this, is "a small" , test ') |
|
883 | >>> u.setconfig(s, b'list2', b'this, is "a small" , test ') | |
885 | >>> u.configlist(s, b'list2') |
|
884 | >>> u.configlist(s, b'list2') | |
886 | ['this', 'is', 'a small', 'test'] |
|
885 | ['this', 'is', 'a small', 'test'] | |
887 | """ |
|
886 | """ | |
888 | # default is not always a list |
|
887 | # default is not always a list | |
889 | v = self.configwith( |
|
888 | v = self.configwith( | |
890 | stringutil.parselist, section, name, default, b'list', untrusted |
|
889 | stringutil.parselist, section, name, default, b'list', untrusted | |
891 | ) |
|
890 | ) | |
892 | if isinstance(v, bytes): |
|
891 | if isinstance(v, bytes): | |
893 | return stringutil.parselist(v) |
|
892 | return stringutil.parselist(v) | |
894 | elif v is None: |
|
893 | elif v is None: | |
895 | return [] |
|
894 | return [] | |
896 | return v |
|
895 | return v | |
897 |
|
896 | |||
898 | def configdate(self, section, name, default=_unset, untrusted=False): |
|
897 | def configdate(self, section, name, default=_unset, untrusted=False): | |
899 | """parse a configuration element as a tuple of ints |
|
898 | """parse a configuration element as a tuple of ints | |
900 |
|
899 | |||
901 | >>> u = ui(); s = b'foo' |
|
900 | >>> u = ui(); s = b'foo' | |
902 | >>> u.setconfig(s, b'date', b'0 0') |
|
901 | >>> u.setconfig(s, b'date', b'0 0') | |
903 | >>> u.configdate(s, b'date') |
|
902 | >>> u.configdate(s, b'date') | |
904 | (0, 0) |
|
903 | (0, 0) | |
905 | """ |
|
904 | """ | |
906 | if self.config(section, name, default, untrusted): |
|
905 | if self.config(section, name, default, untrusted): | |
907 | return self.configwith( |
|
906 | return self.configwith( | |
908 | dateutil.parsedate, section, name, default, b'date', untrusted |
|
907 | dateutil.parsedate, section, name, default, b'date', untrusted | |
909 | ) |
|
908 | ) | |
910 | if default is _unset: |
|
909 | if default is _unset: | |
911 | return None |
|
910 | return None | |
912 | return default |
|
911 | return default | |
913 |
|
912 | |||
914 | def configdefault(self, section, name): |
|
913 | def configdefault(self, section, name): | |
915 | """returns the default value of the config item""" |
|
914 | """returns the default value of the config item""" | |
916 | item = self._knownconfig.get(section, {}).get(name) |
|
915 | item = self._knownconfig.get(section, {}).get(name) | |
917 | itemdefault = None |
|
916 | itemdefault = None | |
918 | if item is not None: |
|
917 | if item is not None: | |
919 | if callable(item.default): |
|
918 | if callable(item.default): | |
920 | itemdefault = item.default() |
|
919 | itemdefault = item.default() | |
921 | else: |
|
920 | else: | |
922 | itemdefault = item.default |
|
921 | itemdefault = item.default | |
923 | return itemdefault |
|
922 | return itemdefault | |
924 |
|
923 | |||
925 | def hasconfig(self, section, name, untrusted=False): |
|
924 | def hasconfig(self, section, name, untrusted=False): | |
926 | return self._data(untrusted).hasitem(section, name) |
|
925 | return self._data(untrusted).hasitem(section, name) | |
927 |
|
926 | |||
928 | def has_section(self, section, untrusted=False): |
|
927 | def has_section(self, section, untrusted=False): | |
929 | '''tell whether section exists in config.''' |
|
928 | '''tell whether section exists in config.''' | |
930 | return section in self._data(untrusted) |
|
929 | return section in self._data(untrusted) | |
931 |
|
930 | |||
932 | def configitems(self, section, untrusted=False, ignoresub=False): |
|
931 | def configitems(self, section, untrusted=False, ignoresub=False): | |
933 | items = self._data(untrusted).items(section) |
|
932 | items = self._data(untrusted).items(section) | |
934 | if ignoresub: |
|
933 | if ignoresub: | |
935 | items = [i for i in items if b':' not in i[0]] |
|
934 | items = [i for i in items if b':' not in i[0]] | |
936 | if self.debugflag and not untrusted and self._reportuntrusted: |
|
935 | if self.debugflag and not untrusted and self._reportuntrusted: | |
937 | for k, v in self._ucfg.items(section): |
|
936 | for k, v in self._ucfg.items(section): | |
938 | if self._tcfg.get(section, k) != v: |
|
937 | if self._tcfg.get(section, k) != v: | |
939 | self.debug( |
|
938 | self.debug( | |
940 | b"ignoring untrusted configuration option " |
|
939 | b"ignoring untrusted configuration option " | |
941 | b"%s.%s = %s\n" % (section, k, v) |
|
940 | b"%s.%s = %s\n" % (section, k, v) | |
942 | ) |
|
941 | ) | |
943 | return items |
|
942 | return items | |
944 |
|
943 | |||
945 | def walkconfig(self, untrusted=False): |
|
944 | def walkconfig(self, untrusted=False): | |
946 | cfg = self._data(untrusted) |
|
945 | cfg = self._data(untrusted) | |
947 | for section in cfg.sections(): |
|
946 | for section in cfg.sections(): | |
948 | for name, value in self.configitems(section, untrusted): |
|
947 | for name, value in self.configitems(section, untrusted): | |
949 | yield section, name, value |
|
948 | yield section, name, value | |
950 |
|
949 | |||
951 | def plain(self, feature=None): |
|
950 | def plain(self, feature=None): | |
952 | """is plain mode active? |
|
951 | """is plain mode active? | |
953 |
|
952 | |||
954 | Plain mode means that all configuration variables which affect |
|
953 | Plain mode means that all configuration variables which affect | |
955 | the behavior and output of Mercurial should be |
|
954 | the behavior and output of Mercurial should be | |
956 | ignored. Additionally, the output should be stable, |
|
955 | ignored. Additionally, the output should be stable, | |
957 | reproducible and suitable for use in scripts or applications. |
|
956 | reproducible and suitable for use in scripts or applications. | |
958 |
|
957 | |||
959 | The only way to trigger plain mode is by setting either the |
|
958 | The only way to trigger plain mode is by setting either the | |
960 | `HGPLAIN' or `HGPLAINEXCEPT' environment variables. |
|
959 | `HGPLAIN' or `HGPLAINEXCEPT' environment variables. | |
961 |
|
960 | |||
962 | The return value can either be |
|
961 | The return value can either be | |
963 | - False if HGPLAIN is not set, or feature is in HGPLAINEXCEPT |
|
962 | - False if HGPLAIN is not set, or feature is in HGPLAINEXCEPT | |
964 | - False if feature is disabled by default and not included in HGPLAIN |
|
963 | - False if feature is disabled by default and not included in HGPLAIN | |
965 | - True otherwise |
|
964 | - True otherwise | |
966 | """ |
|
965 | """ | |
967 | if ( |
|
966 | if ( | |
968 | b'HGPLAIN' not in encoding.environ |
|
967 | b'HGPLAIN' not in encoding.environ | |
969 | and b'HGPLAINEXCEPT' not in encoding.environ |
|
968 | and b'HGPLAINEXCEPT' not in encoding.environ | |
970 | ): |
|
969 | ): | |
971 | return False |
|
970 | return False | |
972 | exceptions = ( |
|
971 | exceptions = ( | |
973 | encoding.environ.get(b'HGPLAINEXCEPT', b'').strip().split(b',') |
|
972 | encoding.environ.get(b'HGPLAINEXCEPT', b'').strip().split(b',') | |
974 | ) |
|
973 | ) | |
975 | # TODO: add support for HGPLAIN=+feature,-feature syntax |
|
974 | # TODO: add support for HGPLAIN=+feature,-feature syntax | |
976 | if b'+strictflags' not in encoding.environ.get(b'HGPLAIN', b'').split( |
|
975 | if b'+strictflags' not in encoding.environ.get(b'HGPLAIN', b'').split( | |
977 | b',' |
|
976 | b',' | |
978 | ): |
|
977 | ): | |
979 | exceptions.append(b'strictflags') |
|
978 | exceptions.append(b'strictflags') | |
980 | if feature and exceptions: |
|
979 | if feature and exceptions: | |
981 | return feature not in exceptions |
|
980 | return feature not in exceptions | |
982 | return True |
|
981 | return True | |
983 |
|
982 | |||
984 | def username(self, acceptempty=False): |
|
983 | def username(self, acceptempty=False): | |
985 | """Return default username to be used in commits. |
|
984 | """Return default username to be used in commits. | |
986 |
|
985 | |||
987 | Searched in this order: $HGUSER, [ui] section of hgrcs, $EMAIL |
|
986 | Searched in this order: $HGUSER, [ui] section of hgrcs, $EMAIL | |
988 | and stop searching if one of these is set. |
|
987 | and stop searching if one of these is set. | |
989 | If not found and acceptempty is True, returns None. |
|
988 | If not found and acceptempty is True, returns None. | |
990 | If not found and ui.askusername is True, ask the user, else use |
|
989 | If not found and ui.askusername is True, ask the user, else use | |
991 | ($LOGNAME or $USER or $LNAME or $USERNAME) + "@full.hostname". |
|
990 | ($LOGNAME or $USER or $LNAME or $USERNAME) + "@full.hostname". | |
992 | If no username could be found, raise an Abort error. |
|
991 | If no username could be found, raise an Abort error. | |
993 | """ |
|
992 | """ | |
994 | user = encoding.environ.get(b"HGUSER") |
|
993 | user = encoding.environ.get(b"HGUSER") | |
995 | if user is None: |
|
994 | if user is None: | |
996 | user = self.config(b"ui", b"username") |
|
995 | user = self.config(b"ui", b"username") | |
997 | if user is not None: |
|
996 | if user is not None: | |
998 | user = os.path.expandvars(user) |
|
997 | user = os.path.expandvars(user) | |
999 | if user is None: |
|
998 | if user is None: | |
1000 | user = encoding.environ.get(b"EMAIL") |
|
999 | user = encoding.environ.get(b"EMAIL") | |
1001 | if user is None and acceptempty: |
|
1000 | if user is None and acceptempty: | |
1002 | return user |
|
1001 | return user | |
1003 | if user is None and self.configbool(b"ui", b"askusername"): |
|
1002 | if user is None and self.configbool(b"ui", b"askusername"): | |
1004 | user = self.prompt(_(b"enter a commit username:"), default=None) |
|
1003 | user = self.prompt(_(b"enter a commit username:"), default=None) | |
1005 | if user is None and not self.interactive(): |
|
1004 | if user is None and not self.interactive(): | |
1006 | try: |
|
1005 | try: | |
1007 | user = b'%s@%s' % ( |
|
1006 | user = b'%s@%s' % ( | |
1008 | procutil.getuser(), |
|
1007 | procutil.getuser(), | |
1009 | encoding.strtolocal(socket.getfqdn()), |
|
1008 | encoding.strtolocal(socket.getfqdn()), | |
1010 | ) |
|
1009 | ) | |
1011 | self.warn(_(b"no username found, using '%s' instead\n") % user) |
|
1010 | self.warn(_(b"no username found, using '%s' instead\n") % user) | |
1012 | except KeyError: |
|
1011 | except KeyError: | |
1013 | pass |
|
1012 | pass | |
1014 | if not user: |
|
1013 | if not user: | |
1015 | raise error.Abort( |
|
1014 | raise error.Abort( | |
1016 | _(b'no username supplied'), |
|
1015 | _(b'no username supplied'), | |
1017 | hint=_(b"use 'hg config --edit' " b'to set your username'), |
|
1016 | hint=_(b"use 'hg config --edit' " b'to set your username'), | |
1018 | ) |
|
1017 | ) | |
1019 | if b"\n" in user: |
|
1018 | if b"\n" in user: | |
1020 | raise error.Abort( |
|
1019 | raise error.Abort( | |
1021 | _(b"username %r contains a newline\n") % pycompat.bytestr(user) |
|
1020 | _(b"username %r contains a newline\n") % pycompat.bytestr(user) | |
1022 | ) |
|
1021 | ) | |
1023 | return user |
|
1022 | return user | |
1024 |
|
1023 | |||
1025 | def shortuser(self, user): |
|
1024 | def shortuser(self, user): | |
1026 | """Return a short representation of a user name or email address.""" |
|
1025 | """Return a short representation of a user name or email address.""" | |
1027 | if not self.verbose: |
|
1026 | if not self.verbose: | |
1028 | user = stringutil.shortuser(user) |
|
1027 | user = stringutil.shortuser(user) | |
1029 | return user |
|
1028 | return user | |
1030 |
|
1029 | |||
1031 | def expandpath(self, loc, default=None): |
|
1030 | def expandpath(self, loc, default=None): | |
1032 | """Return repository location relative to cwd or from [paths]""" |
|
1031 | """Return repository location relative to cwd or from [paths]""" | |
1033 | msg = b'ui.expandpath is deprecated, use `get_*` functions from urlutil' |
|
1032 | msg = b'ui.expandpath is deprecated, use `get_*` functions from urlutil' | |
1034 | self.deprecwarn(msg, b'6.0') |
|
1033 | self.deprecwarn(msg, b'6.0') | |
1035 | try: |
|
1034 | try: | |
1036 | p = self.getpath(loc) |
|
1035 | p = self.getpath(loc) | |
1037 | if p: |
|
1036 | if p: | |
1038 | return p.rawloc |
|
1037 | return p.rawloc | |
1039 | except error.RepoError: |
|
1038 | except error.RepoError: | |
1040 | pass |
|
1039 | pass | |
1041 |
|
1040 | |||
1042 | if default: |
|
1041 | if default: | |
1043 | try: |
|
1042 | try: | |
1044 | p = self.getpath(default) |
|
1043 | p = self.getpath(default) | |
1045 | if p: |
|
1044 | if p: | |
1046 | return p.rawloc |
|
1045 | return p.rawloc | |
1047 | except error.RepoError: |
|
1046 | except error.RepoError: | |
1048 | pass |
|
1047 | pass | |
1049 |
|
1048 | |||
1050 | return loc |
|
1049 | return loc | |
1051 |
|
1050 | |||
1052 | @util.propertycache |
|
1051 | @util.propertycache | |
1053 | def paths(self): |
|
1052 | def paths(self): | |
1054 | return urlutil.paths(self) |
|
1053 | return urlutil.paths(self) | |
1055 |
|
1054 | |||
1056 | def getpath(self, *args, **kwargs): |
|
1055 | def getpath(self, *args, **kwargs): | |
1057 | """see paths.getpath for details |
|
1056 | """see paths.getpath for details | |
1058 |
|
1057 | |||
1059 | This method exist as `getpath` need a ui for potential warning message. |
|
1058 | This method exist as `getpath` need a ui for potential warning message. | |
1060 | """ |
|
1059 | """ | |
1061 | msg = b'ui.getpath is deprecated, use `get_*` functions from urlutil' |
|
1060 | msg = b'ui.getpath is deprecated, use `get_*` functions from urlutil' | |
1062 | self.deprecwarn(msg, '6.0') |
|
1061 | self.deprecwarn(msg, '6.0') | |
1063 | return self.paths.getpath(self, *args, **kwargs) |
|
1062 | return self.paths.getpath(self, *args, **kwargs) | |
1064 |
|
1063 | |||
1065 | @property |
|
1064 | @property | |
1066 | def fout(self): |
|
1065 | def fout(self): | |
1067 | return self._fout |
|
1066 | return self._fout | |
1068 |
|
1067 | |||
1069 | @fout.setter |
|
1068 | @fout.setter | |
1070 | def fout(self, f): |
|
1069 | def fout(self, f): | |
1071 | self._fout = f |
|
1070 | self._fout = f | |
1072 | self._fmsgout, self._fmsgerr = _selectmsgdests(self) |
|
1071 | self._fmsgout, self._fmsgerr = _selectmsgdests(self) | |
1073 |
|
1072 | |||
1074 | @property |
|
1073 | @property | |
1075 | def ferr(self): |
|
1074 | def ferr(self): | |
1076 | return self._ferr |
|
1075 | return self._ferr | |
1077 |
|
1076 | |||
1078 | @ferr.setter |
|
1077 | @ferr.setter | |
1079 | def ferr(self, f): |
|
1078 | def ferr(self, f): | |
1080 | self._ferr = f |
|
1079 | self._ferr = f | |
1081 | self._fmsgout, self._fmsgerr = _selectmsgdests(self) |
|
1080 | self._fmsgout, self._fmsgerr = _selectmsgdests(self) | |
1082 |
|
1081 | |||
1083 | @property |
|
1082 | @property | |
1084 | def fin(self): |
|
1083 | def fin(self): | |
1085 | return self._fin |
|
1084 | return self._fin | |
1086 |
|
1085 | |||
1087 | @fin.setter |
|
1086 | @fin.setter | |
1088 | def fin(self, f): |
|
1087 | def fin(self, f): | |
1089 | self._fin = f |
|
1088 | self._fin = f | |
1090 |
|
1089 | |||
1091 | @property |
|
1090 | @property | |
1092 | def fmsg(self): |
|
1091 | def fmsg(self): | |
1093 | """Stream dedicated for status/error messages; may be None if |
|
1092 | """Stream dedicated for status/error messages; may be None if | |
1094 | fout/ferr are used""" |
|
1093 | fout/ferr are used""" | |
1095 | return self._fmsg |
|
1094 | return self._fmsg | |
1096 |
|
1095 | |||
1097 | @fmsg.setter |
|
1096 | @fmsg.setter | |
1098 | def fmsg(self, f): |
|
1097 | def fmsg(self, f): | |
1099 | self._fmsg = f |
|
1098 | self._fmsg = f | |
1100 | self._fmsgout, self._fmsgerr = _selectmsgdests(self) |
|
1099 | self._fmsgout, self._fmsgerr = _selectmsgdests(self) | |
1101 |
|
1100 | |||
1102 | def pushbuffer(self, error=False, subproc=False, labeled=False): |
|
1101 | def pushbuffer(self, error=False, subproc=False, labeled=False): | |
1103 | """install a buffer to capture standard output of the ui object |
|
1102 | """install a buffer to capture standard output of the ui object | |
1104 |
|
1103 | |||
1105 | If error is True, the error output will be captured too. |
|
1104 | If error is True, the error output will be captured too. | |
1106 |
|
1105 | |||
1107 | If subproc is True, output from subprocesses (typically hooks) will be |
|
1106 | If subproc is True, output from subprocesses (typically hooks) will be | |
1108 | captured too. |
|
1107 | captured too. | |
1109 |
|
1108 | |||
1110 | If labeled is True, any labels associated with buffered |
|
1109 | If labeled is True, any labels associated with buffered | |
1111 | output will be handled. By default, this has no effect |
|
1110 | output will be handled. By default, this has no effect | |
1112 | on the output returned, but extensions and GUI tools may |
|
1111 | on the output returned, but extensions and GUI tools may | |
1113 | handle this argument and returned styled output. If output |
|
1112 | handle this argument and returned styled output. If output | |
1114 | is being buffered so it can be captured and parsed or |
|
1113 | is being buffered so it can be captured and parsed or | |
1115 | processed, labeled should not be set to True. |
|
1114 | processed, labeled should not be set to True. | |
1116 | """ |
|
1115 | """ | |
1117 | self._buffers.append([]) |
|
1116 | self._buffers.append([]) | |
1118 | self._bufferstates.append((error, subproc, labeled)) |
|
1117 | self._bufferstates.append((error, subproc, labeled)) | |
1119 | self._bufferapplylabels = labeled |
|
1118 | self._bufferapplylabels = labeled | |
1120 |
|
1119 | |||
1121 | def popbuffer(self): |
|
1120 | def popbuffer(self): | |
1122 | '''pop the last buffer and return the buffered output''' |
|
1121 | '''pop the last buffer and return the buffered output''' | |
1123 | self._bufferstates.pop() |
|
1122 | self._bufferstates.pop() | |
1124 | if self._bufferstates: |
|
1123 | if self._bufferstates: | |
1125 | self._bufferapplylabels = self._bufferstates[-1][2] |
|
1124 | self._bufferapplylabels = self._bufferstates[-1][2] | |
1126 | else: |
|
1125 | else: | |
1127 | self._bufferapplylabels = None |
|
1126 | self._bufferapplylabels = None | |
1128 |
|
1127 | |||
1129 | return b"".join(self._buffers.pop()) |
|
1128 | return b"".join(self._buffers.pop()) | |
1130 |
|
1129 | |||
1131 | def _isbuffered(self, dest): |
|
1130 | def _isbuffered(self, dest): | |
1132 | if dest is self._fout: |
|
1131 | if dest is self._fout: | |
1133 | return bool(self._buffers) |
|
1132 | return bool(self._buffers) | |
1134 | if dest is self._ferr: |
|
1133 | if dest is self._ferr: | |
1135 | return bool(self._bufferstates and self._bufferstates[-1][0]) |
|
1134 | return bool(self._bufferstates and self._bufferstates[-1][0]) | |
1136 | return False |
|
1135 | return False | |
1137 |
|
1136 | |||
1138 | def canwritewithoutlabels(self): |
|
1137 | def canwritewithoutlabels(self): | |
1139 | '''check if write skips the label''' |
|
1138 | '''check if write skips the label''' | |
1140 | if self._buffers and not self._bufferapplylabels: |
|
1139 | if self._buffers and not self._bufferapplylabels: | |
1141 | return True |
|
1140 | return True | |
1142 | return self._colormode is None |
|
1141 | return self._colormode is None | |
1143 |
|
1142 | |||
1144 | def canbatchlabeledwrites(self): |
|
1143 | def canbatchlabeledwrites(self): | |
1145 | '''check if write calls with labels are batchable''' |
|
1144 | '''check if write calls with labels are batchable''' | |
1146 | # Windows color printing is special, see ``write``. |
|
1145 | # Windows color printing is special, see ``write``. | |
1147 | return self._colormode != b'win32' |
|
1146 | return self._colormode != b'win32' | |
1148 |
|
1147 | |||
1149 | def write(self, *args, **opts): |
|
1148 | def write(self, *args, **opts): | |
1150 | """write args to output |
|
1149 | """write args to output | |
1151 |
|
1150 | |||
1152 | By default, this method simply writes to the buffer or stdout. |
|
1151 | By default, this method simply writes to the buffer or stdout. | |
1153 | Color mode can be set on the UI class to have the output decorated |
|
1152 | Color mode can be set on the UI class to have the output decorated | |
1154 | with color modifier before being written to stdout. |
|
1153 | with color modifier before being written to stdout. | |
1155 |
|
1154 | |||
1156 | The color used is controlled by an optional keyword argument, "label". |
|
1155 | The color used is controlled by an optional keyword argument, "label". | |
1157 | This should be a string containing label names separated by space. |
|
1156 | This should be a string containing label names separated by space. | |
1158 | Label names take the form of "topic.type". For example, ui.debug() |
|
1157 | Label names take the form of "topic.type". For example, ui.debug() | |
1159 | issues a label of "ui.debug". |
|
1158 | issues a label of "ui.debug". | |
1160 |
|
1159 | |||
1161 | Progress reports via stderr are normally cleared before writing as |
|
1160 | Progress reports via stderr are normally cleared before writing as | |
1162 | stdout and stderr go to the same terminal. This can be skipped with |
|
1161 | stdout and stderr go to the same terminal. This can be skipped with | |
1163 | the optional keyword argument "keepprogressbar". The progress bar |
|
1162 | the optional keyword argument "keepprogressbar". The progress bar | |
1164 | will continue to occupy a partial line on stderr in that case. |
|
1163 | will continue to occupy a partial line on stderr in that case. | |
1165 | This functionality is intended when Mercurial acts as data source |
|
1164 | This functionality is intended when Mercurial acts as data source | |
1166 | in a pipe. |
|
1165 | in a pipe. | |
1167 |
|
1166 | |||
1168 | When labeling output for a specific command, a label of |
|
1167 | When labeling output for a specific command, a label of | |
1169 | "cmdname.type" is recommended. For example, status issues |
|
1168 | "cmdname.type" is recommended. For example, status issues | |
1170 | a label of "status.modified" for modified files. |
|
1169 | a label of "status.modified" for modified files. | |
1171 | """ |
|
1170 | """ | |
1172 | dest = self._fout |
|
1171 | dest = self._fout | |
1173 |
|
1172 | |||
1174 | # inlined _write() for speed |
|
1173 | # inlined _write() for speed | |
1175 | if self._buffers: |
|
1174 | if self._buffers: | |
1176 | label = opts.get('label', b'') |
|
1175 | label = opts.get('label', b'') | |
1177 | if label and self._bufferapplylabels: |
|
1176 | if label and self._bufferapplylabels: | |
1178 | self._buffers[-1].extend(self.label(a, label) for a in args) |
|
1177 | self._buffers[-1].extend(self.label(a, label) for a in args) | |
1179 | else: |
|
1178 | else: | |
1180 | self._buffers[-1].extend(args) |
|
1179 | self._buffers[-1].extend(args) | |
1181 | return |
|
1180 | return | |
1182 |
|
1181 | |||
1183 | # inlined _writenobuf() for speed |
|
1182 | # inlined _writenobuf() for speed | |
1184 | if not opts.get('keepprogressbar', False): |
|
1183 | if not opts.get('keepprogressbar', False): | |
1185 | self._progclear() |
|
1184 | self._progclear() | |
1186 | msg = b''.join(args) |
|
1185 | msg = b''.join(args) | |
1187 |
|
1186 | |||
1188 | # opencode timeblockedsection because this is a critical path |
|
1187 | # opencode timeblockedsection because this is a critical path | |
1189 | starttime = util.timer() |
|
1188 | starttime = util.timer() | |
1190 | try: |
|
1189 | try: | |
1191 | if self._colormode == b'win32': |
|
1190 | if self._colormode == b'win32': | |
1192 | # windows color printing is its own can of crab, defer to |
|
1191 | # windows color printing is its own can of crab, defer to | |
1193 | # the color module and that is it. |
|
1192 | # the color module and that is it. | |
1194 | color.win32print(self, dest.write, msg, **opts) |
|
1193 | color.win32print(self, dest.write, msg, **opts) | |
1195 | else: |
|
1194 | else: | |
1196 | if self._colormode is not None: |
|
1195 | if self._colormode is not None: | |
1197 | label = opts.get('label', b'') |
|
1196 | label = opts.get('label', b'') | |
1198 | msg = self.label(msg, label) |
|
1197 | msg = self.label(msg, label) | |
1199 | dest.write(msg) |
|
1198 | dest.write(msg) | |
1200 | except IOError as err: |
|
1199 | except IOError as err: | |
1201 | raise error.StdioError(err) |
|
1200 | raise error.StdioError(err) | |
1202 | finally: |
|
1201 | finally: | |
1203 | self._blockedtimes[b'stdio_blocked'] += ( |
|
1202 | self._blockedtimes[b'stdio_blocked'] += ( | |
1204 | util.timer() - starttime |
|
1203 | util.timer() - starttime | |
1205 | ) * 1000 |
|
1204 | ) * 1000 | |
1206 |
|
1205 | |||
1207 | def write_err(self, *args, **opts): |
|
1206 | def write_err(self, *args, **opts): | |
1208 | self._write(self._ferr, *args, **opts) |
|
1207 | self._write(self._ferr, *args, **opts) | |
1209 |
|
1208 | |||
1210 | def _write(self, dest, *args, **opts): |
|
1209 | def _write(self, dest, *args, **opts): | |
1211 | # update write() as well if you touch this code |
|
1210 | # update write() as well if you touch this code | |
1212 | if self._isbuffered(dest): |
|
1211 | if self._isbuffered(dest): | |
1213 | label = opts.get('label', b'') |
|
1212 | label = opts.get('label', b'') | |
1214 | if label and self._bufferapplylabels: |
|
1213 | if label and self._bufferapplylabels: | |
1215 | self._buffers[-1].extend(self.label(a, label) for a in args) |
|
1214 | self._buffers[-1].extend(self.label(a, label) for a in args) | |
1216 | else: |
|
1215 | else: | |
1217 | self._buffers[-1].extend(args) |
|
1216 | self._buffers[-1].extend(args) | |
1218 | else: |
|
1217 | else: | |
1219 | self._writenobuf(dest, *args, **opts) |
|
1218 | self._writenobuf(dest, *args, **opts) | |
1220 |
|
1219 | |||
1221 | def _writenobuf(self, dest, *args, **opts): |
|
1220 | def _writenobuf(self, dest, *args, **opts): | |
1222 | # update write() as well if you touch this code |
|
1221 | # update write() as well if you touch this code | |
1223 | if not opts.get('keepprogressbar', False): |
|
1222 | if not opts.get('keepprogressbar', False): | |
1224 | self._progclear() |
|
1223 | self._progclear() | |
1225 | msg = b''.join(args) |
|
1224 | msg = b''.join(args) | |
1226 |
|
1225 | |||
1227 | # opencode timeblockedsection because this is a critical path |
|
1226 | # opencode timeblockedsection because this is a critical path | |
1228 | starttime = util.timer() |
|
1227 | starttime = util.timer() | |
1229 | try: |
|
1228 | try: | |
1230 | if dest is self._ferr and not getattr(self._fout, 'closed', False): |
|
1229 | if dest is self._ferr and not getattr(self._fout, 'closed', False): | |
1231 | self._fout.flush() |
|
1230 | self._fout.flush() | |
1232 | if getattr(dest, 'structured', False): |
|
1231 | if getattr(dest, 'structured', False): | |
1233 | # channel for machine-readable output with metadata, where |
|
1232 | # channel for machine-readable output with metadata, where | |
1234 | # no extra colorization is necessary. |
|
1233 | # no extra colorization is necessary. | |
1235 | dest.write(msg, **opts) |
|
1234 | dest.write(msg, **opts) | |
1236 | elif self._colormode == b'win32': |
|
1235 | elif self._colormode == b'win32': | |
1237 | # windows color printing is its own can of crab, defer to |
|
1236 | # windows color printing is its own can of crab, defer to | |
1238 | # the color module and that is it. |
|
1237 | # the color module and that is it. | |
1239 | color.win32print(self, dest.write, msg, **opts) |
|
1238 | color.win32print(self, dest.write, msg, **opts) | |
1240 | else: |
|
1239 | else: | |
1241 | if self._colormode is not None: |
|
1240 | if self._colormode is not None: | |
1242 | label = opts.get('label', b'') |
|
1241 | label = opts.get('label', b'') | |
1243 | msg = self.label(msg, label) |
|
1242 | msg = self.label(msg, label) | |
1244 | dest.write(msg) |
|
1243 | dest.write(msg) | |
1245 | # stderr may be buffered under win32 when redirected to files, |
|
1244 | # stderr may be buffered under win32 when redirected to files, | |
1246 | # including stdout. |
|
1245 | # including stdout. | |
1247 | if dest is self._ferr and not getattr(dest, 'closed', False): |
|
1246 | if dest is self._ferr and not getattr(dest, 'closed', False): | |
1248 | dest.flush() |
|
1247 | dest.flush() | |
1249 | except IOError as err: |
|
1248 | except IOError as err: | |
1250 | if dest is self._ferr and err.errno in ( |
|
1249 | if dest is self._ferr and err.errno in ( | |
1251 | errno.EPIPE, |
|
1250 | errno.EPIPE, | |
1252 | errno.EIO, |
|
1251 | errno.EIO, | |
1253 | errno.EBADF, |
|
1252 | errno.EBADF, | |
1254 | ): |
|
1253 | ): | |
1255 | # no way to report the error, so ignore it |
|
1254 | # no way to report the error, so ignore it | |
1256 | return |
|
1255 | return | |
1257 | raise error.StdioError(err) |
|
1256 | raise error.StdioError(err) | |
1258 | finally: |
|
1257 | finally: | |
1259 | self._blockedtimes[b'stdio_blocked'] += ( |
|
1258 | self._blockedtimes[b'stdio_blocked'] += ( | |
1260 | util.timer() - starttime |
|
1259 | util.timer() - starttime | |
1261 | ) * 1000 |
|
1260 | ) * 1000 | |
1262 |
|
1261 | |||
1263 | def _writemsg(self, dest, *args, **opts): |
|
1262 | def _writemsg(self, dest, *args, **opts): | |
1264 | timestamp = self.showtimestamp and opts.get('type') in { |
|
1263 | timestamp = self.showtimestamp and opts.get('type') in { | |
1265 | b'debug', |
|
1264 | b'debug', | |
1266 | b'error', |
|
1265 | b'error', | |
1267 | b'note', |
|
1266 | b'note', | |
1268 | b'status', |
|
1267 | b'status', | |
1269 | b'warning', |
|
1268 | b'warning', | |
1270 | } |
|
1269 | } | |
1271 | if timestamp: |
|
1270 | if timestamp: | |
1272 | args = ( |
|
1271 | args = ( | |
1273 | b'[%s] ' |
|
1272 | b'[%s] ' | |
1274 | % pycompat.bytestr(datetime.datetime.now().isoformat()), |
|
1273 | % pycompat.bytestr(datetime.datetime.now().isoformat()), | |
1275 | ) + args |
|
1274 | ) + args | |
1276 | _writemsgwith(self._write, dest, *args, **opts) |
|
1275 | _writemsgwith(self._write, dest, *args, **opts) | |
1277 | if timestamp: |
|
1276 | if timestamp: | |
1278 | dest.flush() |
|
1277 | dest.flush() | |
1279 |
|
1278 | |||
1280 | def _writemsgnobuf(self, dest, *args, **opts): |
|
1279 | def _writemsgnobuf(self, dest, *args, **opts): | |
1281 | _writemsgwith(self._writenobuf, dest, *args, **opts) |
|
1280 | _writemsgwith(self._writenobuf, dest, *args, **opts) | |
1282 |
|
1281 | |||
1283 | def flush(self): |
|
1282 | def flush(self): | |
1284 | # opencode timeblockedsection because this is a critical path |
|
1283 | # opencode timeblockedsection because this is a critical path | |
1285 | starttime = util.timer() |
|
1284 | starttime = util.timer() | |
1286 | try: |
|
1285 | try: | |
1287 | try: |
|
1286 | try: | |
1288 | self._fout.flush() |
|
1287 | self._fout.flush() | |
1289 | except IOError as err: |
|
1288 | except IOError as err: | |
1290 | if err.errno not in (errno.EPIPE, errno.EIO, errno.EBADF): |
|
1289 | if err.errno not in (errno.EPIPE, errno.EIO, errno.EBADF): | |
1291 | raise error.StdioError(err) |
|
1290 | raise error.StdioError(err) | |
1292 | finally: |
|
1291 | finally: | |
1293 | try: |
|
1292 | try: | |
1294 | self._ferr.flush() |
|
1293 | self._ferr.flush() | |
1295 | except IOError as err: |
|
1294 | except IOError as err: | |
1296 | if err.errno not in (errno.EPIPE, errno.EIO, errno.EBADF): |
|
1295 | if err.errno not in (errno.EPIPE, errno.EIO, errno.EBADF): | |
1297 | raise error.StdioError(err) |
|
1296 | raise error.StdioError(err) | |
1298 | finally: |
|
1297 | finally: | |
1299 | self._blockedtimes[b'stdio_blocked'] += ( |
|
1298 | self._blockedtimes[b'stdio_blocked'] += ( | |
1300 | util.timer() - starttime |
|
1299 | util.timer() - starttime | |
1301 | ) * 1000 |
|
1300 | ) * 1000 | |
1302 |
|
1301 | |||
1303 | def _isatty(self, fh): |
|
1302 | def _isatty(self, fh): | |
1304 | if self.configbool(b'ui', b'nontty'): |
|
1303 | if self.configbool(b'ui', b'nontty'): | |
1305 | return False |
|
1304 | return False | |
1306 | return procutil.isatty(fh) |
|
1305 | return procutil.isatty(fh) | |
1307 |
|
1306 | |||
1308 | def protectfinout(self): |
|
1307 | def protectfinout(self): | |
1309 | """Duplicate ui streams and redirect original if they are stdio |
|
1308 | """Duplicate ui streams and redirect original if they are stdio | |
1310 |
|
1309 | |||
1311 | Returns (fin, fout) which point to the original ui fds, but may be |
|
1310 | Returns (fin, fout) which point to the original ui fds, but may be | |
1312 | copy of them. The returned streams can be considered "owned" in that |
|
1311 | copy of them. The returned streams can be considered "owned" in that | |
1313 | print(), exec(), etc. never reach to them. |
|
1312 | print(), exec(), etc. never reach to them. | |
1314 | """ |
|
1313 | """ | |
1315 | if self._finoutredirected: |
|
1314 | if self._finoutredirected: | |
1316 | # if already redirected, protectstdio() would just create another |
|
1315 | # if already redirected, protectstdio() would just create another | |
1317 | # nullfd pair, which is equivalent to returning self._fin/_fout. |
|
1316 | # nullfd pair, which is equivalent to returning self._fin/_fout. | |
1318 | return self._fin, self._fout |
|
1317 | return self._fin, self._fout | |
1319 | fin, fout = procutil.protectstdio(self._fin, self._fout) |
|
1318 | fin, fout = procutil.protectstdio(self._fin, self._fout) | |
1320 | self._finoutredirected = (fin, fout) != (self._fin, self._fout) |
|
1319 | self._finoutredirected = (fin, fout) != (self._fin, self._fout) | |
1321 | return fin, fout |
|
1320 | return fin, fout | |
1322 |
|
1321 | |||
1323 | def restorefinout(self, fin, fout): |
|
1322 | def restorefinout(self, fin, fout): | |
1324 | """Restore ui streams from possibly duplicated (fin, fout)""" |
|
1323 | """Restore ui streams from possibly duplicated (fin, fout)""" | |
1325 | if (fin, fout) == (self._fin, self._fout): |
|
1324 | if (fin, fout) == (self._fin, self._fout): | |
1326 | return |
|
1325 | return | |
1327 | procutil.restorestdio(self._fin, self._fout, fin, fout) |
|
1326 | procutil.restorestdio(self._fin, self._fout, fin, fout) | |
1328 | # protectfinout() won't create more than one duplicated streams, |
|
1327 | # protectfinout() won't create more than one duplicated streams, | |
1329 | # so we can just turn the redirection flag off. |
|
1328 | # so we can just turn the redirection flag off. | |
1330 | self._finoutredirected = False |
|
1329 | self._finoutredirected = False | |
1331 |
|
1330 | |||
1332 | @contextlib.contextmanager |
|
1331 | @contextlib.contextmanager | |
1333 | def protectedfinout(self): |
|
1332 | def protectedfinout(self): | |
1334 | """Run code block with protected standard streams""" |
|
1333 | """Run code block with protected standard streams""" | |
1335 | fin, fout = self.protectfinout() |
|
1334 | fin, fout = self.protectfinout() | |
1336 | try: |
|
1335 | try: | |
1337 | yield fin, fout |
|
1336 | yield fin, fout | |
1338 | finally: |
|
1337 | finally: | |
1339 | self.restorefinout(fin, fout) |
|
1338 | self.restorefinout(fin, fout) | |
1340 |
|
1339 | |||
1341 | def disablepager(self): |
|
1340 | def disablepager(self): | |
1342 | self._disablepager = True |
|
1341 | self._disablepager = True | |
1343 |
|
1342 | |||
1344 | def pager(self, command): |
|
1343 | def pager(self, command): | |
1345 | """Start a pager for subsequent command output. |
|
1344 | """Start a pager for subsequent command output. | |
1346 |
|
1345 | |||
1347 | Commands which produce a long stream of output should call |
|
1346 | Commands which produce a long stream of output should call | |
1348 | this function to activate the user's preferred pagination |
|
1347 | this function to activate the user's preferred pagination | |
1349 | mechanism (which may be no pager). Calling this function |
|
1348 | mechanism (which may be no pager). Calling this function | |
1350 | precludes any future use of interactive functionality, such as |
|
1349 | precludes any future use of interactive functionality, such as | |
1351 | prompting the user or activating curses. |
|
1350 | prompting the user or activating curses. | |
1352 |
|
1351 | |||
1353 | Args: |
|
1352 | Args: | |
1354 | command: The full, non-aliased name of the command. That is, "log" |
|
1353 | command: The full, non-aliased name of the command. That is, "log" | |
1355 | not "history, "summary" not "summ", etc. |
|
1354 | not "history, "summary" not "summ", etc. | |
1356 | """ |
|
1355 | """ | |
1357 | if self._disablepager or self.pageractive: |
|
1356 | if self._disablepager or self.pageractive: | |
1358 | # how pager should do is already determined |
|
1357 | # how pager should do is already determined | |
1359 | return |
|
1358 | return | |
1360 |
|
1359 | |||
1361 | if not command.startswith(b'internal-always-') and ( |
|
1360 | if not command.startswith(b'internal-always-') and ( | |
1362 | # explicit --pager=on (= 'internal-always-' prefix) should |
|
1361 | # explicit --pager=on (= 'internal-always-' prefix) should | |
1363 | # take precedence over disabling factors below |
|
1362 | # take precedence over disabling factors below | |
1364 | command in self.configlist(b'pager', b'ignore') |
|
1363 | command in self.configlist(b'pager', b'ignore') | |
1365 | or not self.configbool(b'ui', b'paginate') |
|
1364 | or not self.configbool(b'ui', b'paginate') | |
1366 | or not self.configbool(b'pager', b'attend-' + command, True) |
|
1365 | or not self.configbool(b'pager', b'attend-' + command, True) | |
1367 | or encoding.environ.get(b'TERM') == b'dumb' |
|
1366 | or encoding.environ.get(b'TERM') == b'dumb' | |
1368 | # TODO: if we want to allow HGPLAINEXCEPT=pager, |
|
1367 | # TODO: if we want to allow HGPLAINEXCEPT=pager, | |
1369 | # formatted() will need some adjustment. |
|
1368 | # formatted() will need some adjustment. | |
1370 | or not self.formatted() |
|
1369 | or not self.formatted() | |
1371 | or self.plain() |
|
1370 | or self.plain() | |
1372 | or self._buffers |
|
1371 | or self._buffers | |
1373 | # TODO: expose debugger-enabled on the UI object |
|
1372 | # TODO: expose debugger-enabled on the UI object | |
1374 | or b'--debugger' in pycompat.sysargv |
|
1373 | or b'--debugger' in pycompat.sysargv | |
1375 | ): |
|
1374 | ): | |
1376 | # We only want to paginate if the ui appears to be |
|
1375 | # We only want to paginate if the ui appears to be | |
1377 | # interactive, the user didn't say HGPLAIN or |
|
1376 | # interactive, the user didn't say HGPLAIN or | |
1378 | # HGPLAINEXCEPT=pager, and the user didn't specify --debug. |
|
1377 | # HGPLAINEXCEPT=pager, and the user didn't specify --debug. | |
1379 | return |
|
1378 | return | |
1380 |
|
1379 | |||
1381 | pagercmd = self.config(b'pager', b'pager', rcutil.fallbackpager) |
|
1380 | pagercmd = self.config(b'pager', b'pager', rcutil.fallbackpager) | |
1382 | if not pagercmd: |
|
1381 | if not pagercmd: | |
1383 | return |
|
1382 | return | |
1384 |
|
1383 | |||
1385 | pagerenv = {} |
|
1384 | pagerenv = {} | |
1386 | for name, value in rcutil.defaultpagerenv().items(): |
|
1385 | for name, value in rcutil.defaultpagerenv().items(): | |
1387 | if name not in encoding.environ: |
|
1386 | if name not in encoding.environ: | |
1388 | pagerenv[name] = value |
|
1387 | pagerenv[name] = value | |
1389 |
|
1388 | |||
1390 | self.debug( |
|
1389 | self.debug( | |
1391 | b'starting pager for command %s\n' % stringutil.pprint(command) |
|
1390 | b'starting pager for command %s\n' % stringutil.pprint(command) | |
1392 | ) |
|
1391 | ) | |
1393 | self.flush() |
|
1392 | self.flush() | |
1394 |
|
1393 | |||
1395 | wasformatted = self.formatted() |
|
1394 | wasformatted = self.formatted() | |
1396 | if util.safehasattr(signal, b"SIGPIPE"): |
|
1395 | if util.safehasattr(signal, b"SIGPIPE"): | |
1397 | signal.signal(signal.SIGPIPE, _catchterm) |
|
1396 | signal.signal(signal.SIGPIPE, _catchterm) | |
1398 | if self._runpager(pagercmd, pagerenv): |
|
1397 | if self._runpager(pagercmd, pagerenv): | |
1399 | self.pageractive = True |
|
1398 | self.pageractive = True | |
1400 | # Preserve the formatted-ness of the UI. This is important |
|
1399 | # Preserve the formatted-ness of the UI. This is important | |
1401 | # because we mess with stdout, which might confuse |
|
1400 | # because we mess with stdout, which might confuse | |
1402 | # auto-detection of things being formatted. |
|
1401 | # auto-detection of things being formatted. | |
1403 | self.setconfig(b'ui', b'formatted', wasformatted, b'pager') |
|
1402 | self.setconfig(b'ui', b'formatted', wasformatted, b'pager') | |
1404 | self.setconfig(b'ui', b'interactive', False, b'pager') |
|
1403 | self.setconfig(b'ui', b'interactive', False, b'pager') | |
1405 |
|
1404 | |||
1406 | # If pagermode differs from color.mode, reconfigure color now that |
|
1405 | # If pagermode differs from color.mode, reconfigure color now that | |
1407 | # pageractive is set. |
|
1406 | # pageractive is set. | |
1408 | cm = self._colormode |
|
1407 | cm = self._colormode | |
1409 | if cm != self.config(b'color', b'pagermode', cm): |
|
1408 | if cm != self.config(b'color', b'pagermode', cm): | |
1410 | color.setup(self) |
|
1409 | color.setup(self) | |
1411 | else: |
|
1410 | else: | |
1412 | # If the pager can't be spawned in dispatch when --pager=on is |
|
1411 | # If the pager can't be spawned in dispatch when --pager=on is | |
1413 | # given, don't try again when the command runs, to avoid a duplicate |
|
1412 | # given, don't try again when the command runs, to avoid a duplicate | |
1414 | # warning about a missing pager command. |
|
1413 | # warning about a missing pager command. | |
1415 | self.disablepager() |
|
1414 | self.disablepager() | |
1416 |
|
1415 | |||
1417 | def _runpager(self, command, env=None): |
|
1416 | def _runpager(self, command, env=None): | |
1418 | """Actually start the pager and set up file descriptors. |
|
1417 | """Actually start the pager and set up file descriptors. | |
1419 |
|
1418 | |||
1420 | This is separate in part so that extensions (like chg) can |
|
1419 | This is separate in part so that extensions (like chg) can | |
1421 | override how a pager is invoked. |
|
1420 | override how a pager is invoked. | |
1422 | """ |
|
1421 | """ | |
1423 | if command == b'cat': |
|
1422 | if command == b'cat': | |
1424 | # Save ourselves some work. |
|
1423 | # Save ourselves some work. | |
1425 | return False |
|
1424 | return False | |
1426 | # If the command doesn't contain any of these characters, we |
|
1425 | # If the command doesn't contain any of these characters, we | |
1427 | # assume it's a binary and exec it directly. This means for |
|
1426 | # assume it's a binary and exec it directly. This means for | |
1428 | # simple pager command configurations, we can degrade |
|
1427 | # simple pager command configurations, we can degrade | |
1429 | # gracefully and tell the user about their broken pager. |
|
1428 | # gracefully and tell the user about their broken pager. | |
1430 | shell = any(c in command for c in b"|&;<>()$`\\\"' \t\n*?[#~=%") |
|
1429 | shell = any(c in command for c in b"|&;<>()$`\\\"' \t\n*?[#~=%") | |
1431 |
|
1430 | |||
1432 | if pycompat.iswindows and not shell: |
|
1431 | if pycompat.iswindows and not shell: | |
1433 | # Window's built-in `more` cannot be invoked with shell=False, but |
|
1432 | # Window's built-in `more` cannot be invoked with shell=False, but | |
1434 | # its `more.com` can. Hide this implementation detail from the |
|
1433 | # its `more.com` can. Hide this implementation detail from the | |
1435 | # user so we can also get sane bad PAGER behavior. MSYS has |
|
1434 | # user so we can also get sane bad PAGER behavior. MSYS has | |
1436 | # `more.exe`, so do a cmd.exe style resolution of the executable to |
|
1435 | # `more.exe`, so do a cmd.exe style resolution of the executable to | |
1437 | # determine which one to use. |
|
1436 | # determine which one to use. | |
1438 | fullcmd = procutil.findexe(command) |
|
1437 | fullcmd = procutil.findexe(command) | |
1439 | if not fullcmd: |
|
1438 | if not fullcmd: | |
1440 | self.warn( |
|
1439 | self.warn( | |
1441 | _(b"missing pager command '%s', skipping pager\n") % command |
|
1440 | _(b"missing pager command '%s', skipping pager\n") % command | |
1442 | ) |
|
1441 | ) | |
1443 | return False |
|
1442 | return False | |
1444 |
|
1443 | |||
1445 | command = fullcmd |
|
1444 | command = fullcmd | |
1446 |
|
1445 | |||
1447 | try: |
|
1446 | try: | |
1448 | pager = subprocess.Popen( |
|
1447 | pager = subprocess.Popen( | |
1449 | procutil.tonativestr(command), |
|
1448 | procutil.tonativestr(command), | |
1450 | shell=shell, |
|
1449 | shell=shell, | |
1451 | bufsize=-1, |
|
1450 | bufsize=-1, | |
1452 | close_fds=procutil.closefds, |
|
1451 | close_fds=procutil.closefds, | |
1453 | stdin=subprocess.PIPE, |
|
1452 | stdin=subprocess.PIPE, | |
1454 | stdout=procutil.stdout, |
|
1453 | stdout=procutil.stdout, | |
1455 | stderr=procutil.stderr, |
|
1454 | stderr=procutil.stderr, | |
1456 | env=procutil.tonativeenv(procutil.shellenviron(env)), |
|
1455 | env=procutil.tonativeenv(procutil.shellenviron(env)), | |
1457 | ) |
|
1456 | ) | |
1458 | except OSError as e: |
|
1457 | except OSError as e: | |
1459 | if e.errno == errno.ENOENT and not shell: |
|
1458 | if e.errno == errno.ENOENT and not shell: | |
1460 | self.warn( |
|
1459 | self.warn( | |
1461 | _(b"missing pager command '%s', skipping pager\n") % command |
|
1460 | _(b"missing pager command '%s', skipping pager\n") % command | |
1462 | ) |
|
1461 | ) | |
1463 | return False |
|
1462 | return False | |
1464 | raise |
|
1463 | raise | |
1465 |
|
1464 | |||
1466 | # back up original file descriptors |
|
1465 | # back up original file descriptors | |
1467 | stdoutfd = os.dup(procutil.stdout.fileno()) |
|
1466 | stdoutfd = os.dup(procutil.stdout.fileno()) | |
1468 | stderrfd = os.dup(procutil.stderr.fileno()) |
|
1467 | stderrfd = os.dup(procutil.stderr.fileno()) | |
1469 |
|
1468 | |||
1470 | os.dup2(pager.stdin.fileno(), procutil.stdout.fileno()) |
|
1469 | os.dup2(pager.stdin.fileno(), procutil.stdout.fileno()) | |
1471 | if self._isatty(procutil.stderr): |
|
1470 | if self._isatty(procutil.stderr): | |
1472 | os.dup2(pager.stdin.fileno(), procutil.stderr.fileno()) |
|
1471 | os.dup2(pager.stdin.fileno(), procutil.stderr.fileno()) | |
1473 |
|
1472 | |||
1474 | @self.atexit |
|
1473 | @self.atexit | |
1475 | def killpager(): |
|
1474 | def killpager(): | |
1476 | if util.safehasattr(signal, b"SIGINT"): |
|
1475 | if util.safehasattr(signal, b"SIGINT"): | |
1477 | signal.signal(signal.SIGINT, signal.SIG_IGN) |
|
1476 | signal.signal(signal.SIGINT, signal.SIG_IGN) | |
1478 | # restore original fds, closing pager.stdin copies in the process |
|
1477 | # restore original fds, closing pager.stdin copies in the process | |
1479 | os.dup2(stdoutfd, procutil.stdout.fileno()) |
|
1478 | os.dup2(stdoutfd, procutil.stdout.fileno()) | |
1480 | os.dup2(stderrfd, procutil.stderr.fileno()) |
|
1479 | os.dup2(stderrfd, procutil.stderr.fileno()) | |
1481 | pager.stdin.close() |
|
1480 | pager.stdin.close() | |
1482 | pager.wait() |
|
1481 | pager.wait() | |
1483 |
|
1482 | |||
1484 | return True |
|
1483 | return True | |
1485 |
|
1484 | |||
1486 | @property |
|
1485 | @property | |
1487 | def _exithandlers(self): |
|
1486 | def _exithandlers(self): | |
1488 | return _reqexithandlers |
|
1487 | return _reqexithandlers | |
1489 |
|
1488 | |||
1490 | def atexit(self, func, *args, **kwargs): |
|
1489 | def atexit(self, func, *args, **kwargs): | |
1491 | """register a function to run after dispatching a request |
|
1490 | """register a function to run after dispatching a request | |
1492 |
|
1491 | |||
1493 | Handlers do not stay registered across request boundaries.""" |
|
1492 | Handlers do not stay registered across request boundaries.""" | |
1494 | self._exithandlers.append((func, args, kwargs)) |
|
1493 | self._exithandlers.append((func, args, kwargs)) | |
1495 | return func |
|
1494 | return func | |
1496 |
|
1495 | |||
1497 | def interface(self, feature): |
|
1496 | def interface(self, feature): | |
1498 | """what interface to use for interactive console features? |
|
1497 | """what interface to use for interactive console features? | |
1499 |
|
1498 | |||
1500 | The interface is controlled by the value of `ui.interface` but also by |
|
1499 | The interface is controlled by the value of `ui.interface` but also by | |
1501 | the value of feature-specific configuration. For example: |
|
1500 | the value of feature-specific configuration. For example: | |
1502 |
|
1501 | |||
1503 | ui.interface.histedit = text |
|
1502 | ui.interface.histedit = text | |
1504 | ui.interface.chunkselector = curses |
|
1503 | ui.interface.chunkselector = curses | |
1505 |
|
1504 | |||
1506 | Here the features are "histedit" and "chunkselector". |
|
1505 | Here the features are "histedit" and "chunkselector". | |
1507 |
|
1506 | |||
1508 | The configuration above means that the default interfaces for commands |
|
1507 | The configuration above means that the default interfaces for commands | |
1509 | is curses, the interface for histedit is text and the interface for |
|
1508 | is curses, the interface for histedit is text and the interface for | |
1510 | selecting chunk is crecord (the best curses interface available). |
|
1509 | selecting chunk is crecord (the best curses interface available). | |
1511 |
|
1510 | |||
1512 | Consider the following example: |
|
1511 | Consider the following example: | |
1513 | ui.interface = curses |
|
1512 | ui.interface = curses | |
1514 | ui.interface.histedit = text |
|
1513 | ui.interface.histedit = text | |
1515 |
|
1514 | |||
1516 | Then histedit will use the text interface and chunkselector will use |
|
1515 | Then histedit will use the text interface and chunkselector will use | |
1517 | the default curses interface (crecord at the moment). |
|
1516 | the default curses interface (crecord at the moment). | |
1518 | """ |
|
1517 | """ | |
1519 | alldefaults = frozenset([b"text", b"curses"]) |
|
1518 | alldefaults = frozenset([b"text", b"curses"]) | |
1520 |
|
1519 | |||
1521 | featureinterfaces = { |
|
1520 | featureinterfaces = { | |
1522 | b"chunkselector": [ |
|
1521 | b"chunkselector": [ | |
1523 | b"text", |
|
1522 | b"text", | |
1524 | b"curses", |
|
1523 | b"curses", | |
1525 | ], |
|
1524 | ], | |
1526 | b"histedit": [ |
|
1525 | b"histedit": [ | |
1527 | b"text", |
|
1526 | b"text", | |
1528 | b"curses", |
|
1527 | b"curses", | |
1529 | ], |
|
1528 | ], | |
1530 | } |
|
1529 | } | |
1531 |
|
1530 | |||
1532 | # Feature-specific interface |
|
1531 | # Feature-specific interface | |
1533 | if feature not in featureinterfaces.keys(): |
|
1532 | if feature not in featureinterfaces.keys(): | |
1534 | # Programming error, not user error |
|
1533 | # Programming error, not user error | |
1535 | raise ValueError(b"Unknown feature requested %s" % feature) |
|
1534 | raise ValueError(b"Unknown feature requested %s" % feature) | |
1536 |
|
1535 | |||
1537 | availableinterfaces = frozenset(featureinterfaces[feature]) |
|
1536 | availableinterfaces = frozenset(featureinterfaces[feature]) | |
1538 | if alldefaults > availableinterfaces: |
|
1537 | if alldefaults > availableinterfaces: | |
1539 | # Programming error, not user error. We need a use case to |
|
1538 | # Programming error, not user error. We need a use case to | |
1540 | # define the right thing to do here. |
|
1539 | # define the right thing to do here. | |
1541 | raise ValueError( |
|
1540 | raise ValueError( | |
1542 | b"Feature %s does not handle all default interfaces" % feature |
|
1541 | b"Feature %s does not handle all default interfaces" % feature | |
1543 | ) |
|
1542 | ) | |
1544 |
|
1543 | |||
1545 | if self.plain() or encoding.environ.get(b'TERM') == b'dumb': |
|
1544 | if self.plain() or encoding.environ.get(b'TERM') == b'dumb': | |
1546 | return b"text" |
|
1545 | return b"text" | |
1547 |
|
1546 | |||
1548 | # Default interface for all the features |
|
1547 | # Default interface for all the features | |
1549 | defaultinterface = b"text" |
|
1548 | defaultinterface = b"text" | |
1550 | i = self.config(b"ui", b"interface") |
|
1549 | i = self.config(b"ui", b"interface") | |
1551 | if i in alldefaults: |
|
1550 | if i in alldefaults: | |
1552 | defaultinterface = i |
|
1551 | defaultinterface = i | |
1553 |
|
1552 | |||
1554 | choseninterface = defaultinterface |
|
1553 | choseninterface = defaultinterface | |
1555 | f = self.config(b"ui", b"interface.%s" % feature) |
|
1554 | f = self.config(b"ui", b"interface.%s" % feature) | |
1556 | if f in availableinterfaces: |
|
1555 | if f in availableinterfaces: | |
1557 | choseninterface = f |
|
1556 | choseninterface = f | |
1558 |
|
1557 | |||
1559 | if i is not None and defaultinterface != i: |
|
1558 | if i is not None and defaultinterface != i: | |
1560 | if f is not None: |
|
1559 | if f is not None: | |
1561 | self.warn(_(b"invalid value for ui.interface: %s\n") % (i,)) |
|
1560 | self.warn(_(b"invalid value for ui.interface: %s\n") % (i,)) | |
1562 | else: |
|
1561 | else: | |
1563 | self.warn( |
|
1562 | self.warn( | |
1564 | _(b"invalid value for ui.interface: %s (using %s)\n") |
|
1563 | _(b"invalid value for ui.interface: %s (using %s)\n") | |
1565 | % (i, choseninterface) |
|
1564 | % (i, choseninterface) | |
1566 | ) |
|
1565 | ) | |
1567 | if f is not None and choseninterface != f: |
|
1566 | if f is not None and choseninterface != f: | |
1568 | self.warn( |
|
1567 | self.warn( | |
1569 | _(b"invalid value for ui.interface.%s: %s (using %s)\n") |
|
1568 | _(b"invalid value for ui.interface.%s: %s (using %s)\n") | |
1570 | % (feature, f, choseninterface) |
|
1569 | % (feature, f, choseninterface) | |
1571 | ) |
|
1570 | ) | |
1572 |
|
1571 | |||
1573 | return choseninterface |
|
1572 | return choseninterface | |
1574 |
|
1573 | |||
1575 | def interactive(self): |
|
1574 | def interactive(self): | |
1576 | """is interactive input allowed? |
|
1575 | """is interactive input allowed? | |
1577 |
|
1576 | |||
1578 | An interactive session is a session where input can be reasonably read |
|
1577 | An interactive session is a session where input can be reasonably read | |
1579 | from `sys.stdin'. If this function returns false, any attempt to read |
|
1578 | from `sys.stdin'. If this function returns false, any attempt to read | |
1580 | from stdin should fail with an error, unless a sensible default has been |
|
1579 | from stdin should fail with an error, unless a sensible default has been | |
1581 | specified. |
|
1580 | specified. | |
1582 |
|
1581 | |||
1583 | Interactiveness is triggered by the value of the `ui.interactive' |
|
1582 | Interactiveness is triggered by the value of the `ui.interactive' | |
1584 | configuration variable or - if it is unset - when `sys.stdin' points |
|
1583 | configuration variable or - if it is unset - when `sys.stdin' points | |
1585 | to a terminal device. |
|
1584 | to a terminal device. | |
1586 |
|
1585 | |||
1587 | This function refers to input only; for output, see `ui.formatted()'. |
|
1586 | This function refers to input only; for output, see `ui.formatted()'. | |
1588 | """ |
|
1587 | """ | |
1589 | i = self.configbool(b"ui", b"interactive") |
|
1588 | i = self.configbool(b"ui", b"interactive") | |
1590 | if i is None: |
|
1589 | if i is None: | |
1591 | # some environments replace stdin without implementing isatty |
|
1590 | # some environments replace stdin without implementing isatty | |
1592 | # usually those are non-interactive |
|
1591 | # usually those are non-interactive | |
1593 | return self._isatty(self._fin) |
|
1592 | return self._isatty(self._fin) | |
1594 |
|
1593 | |||
1595 | return i |
|
1594 | return i | |
1596 |
|
1595 | |||
1597 | def termwidth(self): |
|
1596 | def termwidth(self): | |
1598 | """how wide is the terminal in columns?""" |
|
1597 | """how wide is the terminal in columns?""" | |
1599 | if b'COLUMNS' in encoding.environ: |
|
1598 | if b'COLUMNS' in encoding.environ: | |
1600 | try: |
|
1599 | try: | |
1601 | return int(encoding.environ[b'COLUMNS']) |
|
1600 | return int(encoding.environ[b'COLUMNS']) | |
1602 | except ValueError: |
|
1601 | except ValueError: | |
1603 | pass |
|
1602 | pass | |
1604 | return scmutil.termsize(self)[0] |
|
1603 | return scmutil.termsize(self)[0] | |
1605 |
|
1604 | |||
1606 | def formatted(self): |
|
1605 | def formatted(self): | |
1607 | """should formatted output be used? |
|
1606 | """should formatted output be used? | |
1608 |
|
1607 | |||
1609 | It is often desirable to format the output to suite the output medium. |
|
1608 | It is often desirable to format the output to suite the output medium. | |
1610 | Examples of this are truncating long lines or colorizing messages. |
|
1609 | Examples of this are truncating long lines or colorizing messages. | |
1611 | However, this is not often not desirable when piping output into other |
|
1610 | However, this is not often not desirable when piping output into other | |
1612 | utilities, e.g. `grep'. |
|
1611 | utilities, e.g. `grep'. | |
1613 |
|
1612 | |||
1614 | Formatted output is triggered by the value of the `ui.formatted' |
|
1613 | Formatted output is triggered by the value of the `ui.formatted' | |
1615 | configuration variable or - if it is unset - when `sys.stdout' points |
|
1614 | configuration variable or - if it is unset - when `sys.stdout' points | |
1616 | to a terminal device. Please note that `ui.formatted' should be |
|
1615 | to a terminal device. Please note that `ui.formatted' should be | |
1617 | considered an implementation detail; it is not intended for use outside |
|
1616 | considered an implementation detail; it is not intended for use outside | |
1618 | Mercurial or its extensions. |
|
1617 | Mercurial or its extensions. | |
1619 |
|
1618 | |||
1620 | This function refers to output only; for input, see `ui.interactive()'. |
|
1619 | This function refers to output only; for input, see `ui.interactive()'. | |
1621 | This function always returns false when in plain mode, see `ui.plain()'. |
|
1620 | This function always returns false when in plain mode, see `ui.plain()'. | |
1622 | """ |
|
1621 | """ | |
1623 | if self.plain(): |
|
1622 | if self.plain(): | |
1624 | return False |
|
1623 | return False | |
1625 |
|
1624 | |||
1626 | i = self.configbool(b"ui", b"formatted") |
|
1625 | i = self.configbool(b"ui", b"formatted") | |
1627 | if i is None: |
|
1626 | if i is None: | |
1628 | # some environments replace stdout without implementing isatty |
|
1627 | # some environments replace stdout without implementing isatty | |
1629 | # usually those are non-interactive |
|
1628 | # usually those are non-interactive | |
1630 | return self._isatty(self._fout) |
|
1629 | return self._isatty(self._fout) | |
1631 |
|
1630 | |||
1632 | return i |
|
1631 | return i | |
1633 |
|
1632 | |||
1634 | def _readline(self, prompt=b' ', promptopts=None): |
|
1633 | def _readline(self, prompt=b' ', promptopts=None): | |
1635 | # Replacing stdin/stdout temporarily is a hard problem on Python 3 |
|
1634 | # Replacing stdin/stdout temporarily is a hard problem on Python 3 | |
1636 | # because they have to be text streams with *no buffering*. Instead, |
|
1635 | # because they have to be text streams with *no buffering*. Instead, | |
1637 | # we use rawinput() only if call_readline() will be invoked by |
|
1636 | # we use rawinput() only if call_readline() will be invoked by | |
1638 | # PyOS_Readline(), so no I/O will be made at Python layer. |
|
1637 | # PyOS_Readline(), so no I/O will be made at Python layer. | |
1639 | usereadline = ( |
|
1638 | usereadline = ( | |
1640 | self._isatty(self._fin) |
|
1639 | self._isatty(self._fin) | |
1641 | and self._isatty(self._fout) |
|
1640 | and self._isatty(self._fout) | |
1642 | and procutil.isstdin(self._fin) |
|
1641 | and procutil.isstdin(self._fin) | |
1643 | and procutil.isstdout(self._fout) |
|
1642 | and procutil.isstdout(self._fout) | |
1644 | ) |
|
1643 | ) | |
1645 | if usereadline: |
|
1644 | if usereadline: | |
1646 | try: |
|
1645 | try: | |
1647 | # magically add command line editing support, where |
|
1646 | # magically add command line editing support, where | |
1648 | # available |
|
1647 | # available | |
1649 | import readline |
|
1648 | import readline | |
1650 |
|
1649 | |||
1651 | # force demandimport to really load the module |
|
1650 | # force demandimport to really load the module | |
1652 | readline.read_history_file |
|
1651 | readline.read_history_file | |
1653 | # windows sometimes raises something other than ImportError |
|
1652 | # windows sometimes raises something other than ImportError | |
1654 | except Exception: |
|
1653 | except Exception: | |
1655 | usereadline = False |
|
1654 | usereadline = False | |
1656 |
|
1655 | |||
1657 | if self._colormode == b'win32' or not usereadline: |
|
1656 | if self._colormode == b'win32' or not usereadline: | |
1658 | if not promptopts: |
|
1657 | if not promptopts: | |
1659 | promptopts = {} |
|
1658 | promptopts = {} | |
1660 | self._writemsgnobuf( |
|
1659 | self._writemsgnobuf( | |
1661 | self._fmsgout, prompt, type=b'prompt', **promptopts |
|
1660 | self._fmsgout, prompt, type=b'prompt', **promptopts | |
1662 | ) |
|
1661 | ) | |
1663 | self.flush() |
|
1662 | self.flush() | |
1664 | prompt = b' ' |
|
1663 | prompt = b' ' | |
1665 | else: |
|
1664 | else: | |
1666 | prompt = self.label(prompt, b'ui.prompt') + b' ' |
|
1665 | prompt = self.label(prompt, b'ui.prompt') + b' ' | |
1667 |
|
1666 | |||
1668 | # prompt ' ' must exist; otherwise readline may delete entire line |
|
1667 | # prompt ' ' must exist; otherwise readline may delete entire line | |
1669 | # - http://bugs.python.org/issue12833 |
|
1668 | # - http://bugs.python.org/issue12833 | |
1670 | with self.timeblockedsection(b'stdio'): |
|
1669 | with self.timeblockedsection(b'stdio'): | |
1671 | if usereadline: |
|
1670 | if usereadline: | |
1672 | self.flush() |
|
1671 | self.flush() | |
1673 | prompt = encoding.strfromlocal(prompt) |
|
1672 | prompt = encoding.strfromlocal(prompt) | |
1674 | line = encoding.strtolocal(pycompat.rawinput(prompt)) |
|
1673 | line = encoding.strtolocal(pycompat.rawinput(prompt)) | |
1675 | # When stdin is in binary mode on Windows, it can cause |
|
1674 | # When stdin is in binary mode on Windows, it can cause | |
1676 | # raw_input() to emit an extra trailing carriage return |
|
1675 | # raw_input() to emit an extra trailing carriage return | |
1677 | if pycompat.oslinesep == b'\r\n' and line.endswith(b'\r'): |
|
1676 | if pycompat.oslinesep == b'\r\n' and line.endswith(b'\r'): | |
1678 | line = line[:-1] |
|
1677 | line = line[:-1] | |
1679 | else: |
|
1678 | else: | |
1680 | self._fout.write(pycompat.bytestr(prompt)) |
|
1679 | self._fout.write(pycompat.bytestr(prompt)) | |
1681 | self._fout.flush() |
|
1680 | self._fout.flush() | |
1682 | line = self._fin.readline() |
|
1681 | line = self._fin.readline() | |
1683 | if not line: |
|
1682 | if not line: | |
1684 | raise EOFError |
|
1683 | raise EOFError | |
1685 | line = line.rstrip(pycompat.oslinesep) |
|
1684 | line = line.rstrip(pycompat.oslinesep) | |
1686 |
|
1685 | |||
1687 | return line |
|
1686 | return line | |
1688 |
|
1687 | |||
1689 | def prompt(self, msg, default=b"y"): |
|
1688 | def prompt(self, msg, default=b"y"): | |
1690 | """Prompt user with msg, read response. |
|
1689 | """Prompt user with msg, read response. | |
1691 | If ui is not interactive, the default is returned. |
|
1690 | If ui is not interactive, the default is returned. | |
1692 | """ |
|
1691 | """ | |
1693 | return self._prompt(msg, default=default) |
|
1692 | return self._prompt(msg, default=default) | |
1694 |
|
1693 | |||
1695 | def _prompt(self, msg, **opts): |
|
1694 | def _prompt(self, msg, **opts): | |
1696 | default = opts['default'] |
|
1695 | default = opts['default'] | |
1697 | if not self.interactive(): |
|
1696 | if not self.interactive(): | |
1698 | self._writemsg(self._fmsgout, msg, b' ', type=b'prompt', **opts) |
|
1697 | self._writemsg(self._fmsgout, msg, b' ', type=b'prompt', **opts) | |
1699 | self._writemsg( |
|
1698 | self._writemsg( | |
1700 | self._fmsgout, default or b'', b"\n", type=b'promptecho' |
|
1699 | self._fmsgout, default or b'', b"\n", type=b'promptecho' | |
1701 | ) |
|
1700 | ) | |
1702 | return default |
|
1701 | return default | |
1703 | try: |
|
1702 | try: | |
1704 | r = self._readline(prompt=msg, promptopts=opts) |
|
1703 | r = self._readline(prompt=msg, promptopts=opts) | |
1705 | if not r: |
|
1704 | if not r: | |
1706 | r = default |
|
1705 | r = default | |
1707 | if self.configbool(b'ui', b'promptecho'): |
|
1706 | if self.configbool(b'ui', b'promptecho'): | |
1708 | self._writemsg( |
|
1707 | self._writemsg( | |
1709 | self._fmsgout, r or b'', b"\n", type=b'promptecho' |
|
1708 | self._fmsgout, r or b'', b"\n", type=b'promptecho' | |
1710 | ) |
|
1709 | ) | |
1711 | return r |
|
1710 | return r | |
1712 | except EOFError: |
|
1711 | except EOFError: | |
1713 | raise error.ResponseExpected() |
|
1712 | raise error.ResponseExpected() | |
1714 |
|
1713 | |||
1715 | @staticmethod |
|
1714 | @staticmethod | |
1716 | def extractchoices(prompt): |
|
1715 | def extractchoices(prompt): | |
1717 | """Extract prompt message and list of choices from specified prompt. |
|
1716 | """Extract prompt message and list of choices from specified prompt. | |
1718 |
|
1717 | |||
1719 | This returns tuple "(message, choices)", and "choices" is the |
|
1718 | This returns tuple "(message, choices)", and "choices" is the | |
1720 | list of tuple "(response character, text without &)". |
|
1719 | list of tuple "(response character, text without &)". | |
1721 |
|
1720 | |||
1722 | >>> ui.extractchoices(b"awake? $$ &Yes $$ &No") |
|
1721 | >>> ui.extractchoices(b"awake? $$ &Yes $$ &No") | |
1723 | ('awake? ', [('y', 'Yes'), ('n', 'No')]) |
|
1722 | ('awake? ', [('y', 'Yes'), ('n', 'No')]) | |
1724 | >>> ui.extractchoices(b"line\\nbreak? $$ &Yes $$ &No") |
|
1723 | >>> ui.extractchoices(b"line\\nbreak? $$ &Yes $$ &No") | |
1725 | ('line\\nbreak? ', [('y', 'Yes'), ('n', 'No')]) |
|
1724 | ('line\\nbreak? ', [('y', 'Yes'), ('n', 'No')]) | |
1726 | >>> ui.extractchoices(b"want lots of $$money$$?$$Ye&s$$N&o") |
|
1725 | >>> ui.extractchoices(b"want lots of $$money$$?$$Ye&s$$N&o") | |
1727 | ('want lots of $$money$$?', [('s', 'Yes'), ('o', 'No')]) |
|
1726 | ('want lots of $$money$$?', [('s', 'Yes'), ('o', 'No')]) | |
1728 | """ |
|
1727 | """ | |
1729 |
|
1728 | |||
1730 | # Sadly, the prompt string may have been built with a filename |
|
1729 | # Sadly, the prompt string may have been built with a filename | |
1731 | # containing "$$" so let's try to find the first valid-looking |
|
1730 | # containing "$$" so let's try to find the first valid-looking | |
1732 | # prompt to start parsing. Sadly, we also can't rely on |
|
1731 | # prompt to start parsing. Sadly, we also can't rely on | |
1733 | # choices containing spaces, ASCII, or basically anything |
|
1732 | # choices containing spaces, ASCII, or basically anything | |
1734 | # except an ampersand followed by a character. |
|
1733 | # except an ampersand followed by a character. | |
1735 | m = re.match(br'(?s)(.+?)\$\$([^$]*&[^ $].*)', prompt) |
|
1734 | m = re.match(br'(?s)(.+?)\$\$([^$]*&[^ $].*)', prompt) | |
1736 | msg = m.group(1) |
|
1735 | msg = m.group(1) | |
1737 | choices = [p.strip(b' ') for p in m.group(2).split(b'$$')] |
|
1736 | choices = [p.strip(b' ') for p in m.group(2).split(b'$$')] | |
1738 |
|
1737 | |||
1739 | def choicetuple(s): |
|
1738 | def choicetuple(s): | |
1740 | ampidx = s.index(b'&') |
|
1739 | ampidx = s.index(b'&') | |
1741 | return s[ampidx + 1 : ampidx + 2].lower(), s.replace(b'&', b'', 1) |
|
1740 | return s[ampidx + 1 : ampidx + 2].lower(), s.replace(b'&', b'', 1) | |
1742 |
|
1741 | |||
1743 | return (msg, [choicetuple(s) for s in choices]) |
|
1742 | return (msg, [choicetuple(s) for s in choices]) | |
1744 |
|
1743 | |||
1745 | def promptchoice(self, prompt, default=0): |
|
1744 | def promptchoice(self, prompt, default=0): | |
1746 | """Prompt user with a message, read response, and ensure it matches |
|
1745 | """Prompt user with a message, read response, and ensure it matches | |
1747 | one of the provided choices. The prompt is formatted as follows: |
|
1746 | one of the provided choices. The prompt is formatted as follows: | |
1748 |
|
1747 | |||
1749 | "would you like fries with that (Yn)? $$ &Yes $$ &No" |
|
1748 | "would you like fries with that (Yn)? $$ &Yes $$ &No" | |
1750 |
|
1749 | |||
1751 | The index of the choice is returned. Responses are case |
|
1750 | The index of the choice is returned. Responses are case | |
1752 | insensitive. If ui is not interactive, the default is |
|
1751 | insensitive. If ui is not interactive, the default is | |
1753 | returned. |
|
1752 | returned. | |
1754 | """ |
|
1753 | """ | |
1755 |
|
1754 | |||
1756 | msg, choices = self.extractchoices(prompt) |
|
1755 | msg, choices = self.extractchoices(prompt) | |
1757 | resps = [r for r, t in choices] |
|
1756 | resps = [r for r, t in choices] | |
1758 | while True: |
|
1757 | while True: | |
1759 | r = self._prompt(msg, default=resps[default], choices=choices) |
|
1758 | r = self._prompt(msg, default=resps[default], choices=choices) | |
1760 | if r.lower() in resps: |
|
1759 | if r.lower() in resps: | |
1761 | return resps.index(r.lower()) |
|
1760 | return resps.index(r.lower()) | |
1762 | # TODO: shouldn't it be a warning? |
|
1761 | # TODO: shouldn't it be a warning? | |
1763 | self._writemsg(self._fmsgout, _(b"unrecognized response\n")) |
|
1762 | self._writemsg(self._fmsgout, _(b"unrecognized response\n")) | |
1764 |
|
1763 | |||
1765 | def getpass(self, prompt=None, default=None): |
|
1764 | def getpass(self, prompt=None, default=None): | |
1766 | if not self.interactive(): |
|
1765 | if not self.interactive(): | |
1767 | return default |
|
1766 | return default | |
1768 | try: |
|
1767 | try: | |
1769 | self._writemsg( |
|
1768 | self._writemsg( | |
1770 | self._fmsgerr, |
|
1769 | self._fmsgerr, | |
1771 | prompt or _(b'password: '), |
|
1770 | prompt or _(b'password: '), | |
1772 | type=b'prompt', |
|
1771 | type=b'prompt', | |
1773 | password=True, |
|
1772 | password=True, | |
1774 | ) |
|
1773 | ) | |
1775 | # disable getpass() only if explicitly specified. it's still valid |
|
1774 | # disable getpass() only if explicitly specified. it's still valid | |
1776 | # to interact with tty even if fin is not a tty. |
|
1775 | # to interact with tty even if fin is not a tty. | |
1777 | with self.timeblockedsection(b'stdio'): |
|
1776 | with self.timeblockedsection(b'stdio'): | |
1778 | if self.configbool(b'ui', b'nontty'): |
|
1777 | if self.configbool(b'ui', b'nontty'): | |
1779 | l = self._fin.readline() |
|
1778 | l = self._fin.readline() | |
1780 | if not l: |
|
1779 | if not l: | |
1781 | raise EOFError |
|
1780 | raise EOFError | |
1782 | return l.rstrip(b'\n') |
|
1781 | return l.rstrip(b'\n') | |
1783 | else: |
|
1782 | else: | |
1784 |
return |
|
1783 | return util.get_password() | |
1785 | except EOFError: |
|
1784 | except EOFError: | |
1786 | raise error.ResponseExpected() |
|
1785 | raise error.ResponseExpected() | |
1787 |
|
1786 | |||
1788 | def status(self, *msg, **opts): |
|
1787 | def status(self, *msg, **opts): | |
1789 | """write status message to output (if ui.quiet is False) |
|
1788 | """write status message to output (if ui.quiet is False) | |
1790 |
|
1789 | |||
1791 | This adds an output label of "ui.status". |
|
1790 | This adds an output label of "ui.status". | |
1792 | """ |
|
1791 | """ | |
1793 | if not self.quiet: |
|
1792 | if not self.quiet: | |
1794 | self._writemsg(self._fmsgout, type=b'status', *msg, **opts) |
|
1793 | self._writemsg(self._fmsgout, type=b'status', *msg, **opts) | |
1795 |
|
1794 | |||
1796 | def warn(self, *msg, **opts): |
|
1795 | def warn(self, *msg, **opts): | |
1797 | """write warning message to output (stderr) |
|
1796 | """write warning message to output (stderr) | |
1798 |
|
1797 | |||
1799 | This adds an output label of "ui.warning". |
|
1798 | This adds an output label of "ui.warning". | |
1800 | """ |
|
1799 | """ | |
1801 | self._writemsg(self._fmsgerr, type=b'warning', *msg, **opts) |
|
1800 | self._writemsg(self._fmsgerr, type=b'warning', *msg, **opts) | |
1802 |
|
1801 | |||
1803 | def error(self, *msg, **opts): |
|
1802 | def error(self, *msg, **opts): | |
1804 | """write error message to output (stderr) |
|
1803 | """write error message to output (stderr) | |
1805 |
|
1804 | |||
1806 | This adds an output label of "ui.error". |
|
1805 | This adds an output label of "ui.error". | |
1807 | """ |
|
1806 | """ | |
1808 | self._writemsg(self._fmsgerr, type=b'error', *msg, **opts) |
|
1807 | self._writemsg(self._fmsgerr, type=b'error', *msg, **opts) | |
1809 |
|
1808 | |||
1810 | def note(self, *msg, **opts): |
|
1809 | def note(self, *msg, **opts): | |
1811 | """write note to output (if ui.verbose is True) |
|
1810 | """write note to output (if ui.verbose is True) | |
1812 |
|
1811 | |||
1813 | This adds an output label of "ui.note". |
|
1812 | This adds an output label of "ui.note". | |
1814 | """ |
|
1813 | """ | |
1815 | if self.verbose: |
|
1814 | if self.verbose: | |
1816 | self._writemsg(self._fmsgout, type=b'note', *msg, **opts) |
|
1815 | self._writemsg(self._fmsgout, type=b'note', *msg, **opts) | |
1817 |
|
1816 | |||
1818 | def debug(self, *msg, **opts): |
|
1817 | def debug(self, *msg, **opts): | |
1819 | """write debug message to output (if ui.debugflag is True) |
|
1818 | """write debug message to output (if ui.debugflag is True) | |
1820 |
|
1819 | |||
1821 | This adds an output label of "ui.debug". |
|
1820 | This adds an output label of "ui.debug". | |
1822 | """ |
|
1821 | """ | |
1823 | if self.debugflag: |
|
1822 | if self.debugflag: | |
1824 | self._writemsg(self._fmsgout, type=b'debug', *msg, **opts) |
|
1823 | self._writemsg(self._fmsgout, type=b'debug', *msg, **opts) | |
1825 | self.log(b'debug', b'%s', b''.join(msg)) |
|
1824 | self.log(b'debug', b'%s', b''.join(msg)) | |
1826 |
|
1825 | |||
1827 | # Aliases to defeat check-code. |
|
1826 | # Aliases to defeat check-code. | |
1828 | statusnoi18n = status |
|
1827 | statusnoi18n = status | |
1829 | notenoi18n = note |
|
1828 | notenoi18n = note | |
1830 | warnnoi18n = warn |
|
1829 | warnnoi18n = warn | |
1831 | writenoi18n = write |
|
1830 | writenoi18n = write | |
1832 |
|
1831 | |||
1833 | def edit( |
|
1832 | def edit( | |
1834 | self, |
|
1833 | self, | |
1835 | text, |
|
1834 | text, | |
1836 | user, |
|
1835 | user, | |
1837 | extra=None, |
|
1836 | extra=None, | |
1838 | editform=None, |
|
1837 | editform=None, | |
1839 | pending=None, |
|
1838 | pending=None, | |
1840 | repopath=None, |
|
1839 | repopath=None, | |
1841 | action=None, |
|
1840 | action=None, | |
1842 | ): |
|
1841 | ): | |
1843 | if action is None: |
|
1842 | if action is None: | |
1844 | self.develwarn( |
|
1843 | self.develwarn( | |
1845 | b'action is None but will soon be a required ' |
|
1844 | b'action is None but will soon be a required ' | |
1846 | b'parameter to ui.edit()' |
|
1845 | b'parameter to ui.edit()' | |
1847 | ) |
|
1846 | ) | |
1848 | extra_defaults = { |
|
1847 | extra_defaults = { | |
1849 | b'prefix': b'editor', |
|
1848 | b'prefix': b'editor', | |
1850 | b'suffix': b'.txt', |
|
1849 | b'suffix': b'.txt', | |
1851 | } |
|
1850 | } | |
1852 | if extra is not None: |
|
1851 | if extra is not None: | |
1853 | if extra.get(b'suffix') is not None: |
|
1852 | if extra.get(b'suffix') is not None: | |
1854 | self.develwarn( |
|
1853 | self.develwarn( | |
1855 | b'extra.suffix is not None but will soon be ' |
|
1854 | b'extra.suffix is not None but will soon be ' | |
1856 | b'ignored by ui.edit()' |
|
1855 | b'ignored by ui.edit()' | |
1857 | ) |
|
1856 | ) | |
1858 | extra_defaults.update(extra) |
|
1857 | extra_defaults.update(extra) | |
1859 | extra = extra_defaults |
|
1858 | extra = extra_defaults | |
1860 |
|
1859 | |||
1861 | if action == b'diff': |
|
1860 | if action == b'diff': | |
1862 | suffix = b'.diff' |
|
1861 | suffix = b'.diff' | |
1863 | elif action: |
|
1862 | elif action: | |
1864 | suffix = b'.%s.hg.txt' % action |
|
1863 | suffix = b'.%s.hg.txt' % action | |
1865 | else: |
|
1864 | else: | |
1866 | suffix = extra[b'suffix'] |
|
1865 | suffix = extra[b'suffix'] | |
1867 |
|
1866 | |||
1868 | rdir = None |
|
1867 | rdir = None | |
1869 | if self.configbool(b'experimental', b'editortmpinhg'): |
|
1868 | if self.configbool(b'experimental', b'editortmpinhg'): | |
1870 | rdir = repopath |
|
1869 | rdir = repopath | |
1871 | (fd, name) = pycompat.mkstemp( |
|
1870 | (fd, name) = pycompat.mkstemp( | |
1872 | prefix=b'hg-' + extra[b'prefix'] + b'-', suffix=suffix, dir=rdir |
|
1871 | prefix=b'hg-' + extra[b'prefix'] + b'-', suffix=suffix, dir=rdir | |
1873 | ) |
|
1872 | ) | |
1874 | try: |
|
1873 | try: | |
1875 | with os.fdopen(fd, 'wb') as f: |
|
1874 | with os.fdopen(fd, 'wb') as f: | |
1876 | f.write(util.tonativeeol(text)) |
|
1875 | f.write(util.tonativeeol(text)) | |
1877 |
|
1876 | |||
1878 | environ = {b'HGUSER': user} |
|
1877 | environ = {b'HGUSER': user} | |
1879 | if b'transplant_source' in extra: |
|
1878 | if b'transplant_source' in extra: | |
1880 | environ.update( |
|
1879 | environ.update( | |
1881 | {b'HGREVISION': hex(extra[b'transplant_source'])} |
|
1880 | {b'HGREVISION': hex(extra[b'transplant_source'])} | |
1882 | ) |
|
1881 | ) | |
1883 | for label in (b'intermediate-source', b'source', b'rebase_source'): |
|
1882 | for label in (b'intermediate-source', b'source', b'rebase_source'): | |
1884 | if label in extra: |
|
1883 | if label in extra: | |
1885 | environ.update({b'HGREVISION': extra[label]}) |
|
1884 | environ.update({b'HGREVISION': extra[label]}) | |
1886 | break |
|
1885 | break | |
1887 | if editform: |
|
1886 | if editform: | |
1888 | environ.update({b'HGEDITFORM': editform}) |
|
1887 | environ.update({b'HGEDITFORM': editform}) | |
1889 | if pending: |
|
1888 | if pending: | |
1890 | environ.update({b'HG_PENDING': pending}) |
|
1889 | environ.update({b'HG_PENDING': pending}) | |
1891 |
|
1890 | |||
1892 | editor = self.geteditor() |
|
1891 | editor = self.geteditor() | |
1893 |
|
1892 | |||
1894 | self.system( |
|
1893 | self.system( | |
1895 | b"%s \"%s\"" % (editor, name), |
|
1894 | b"%s \"%s\"" % (editor, name), | |
1896 | environ=environ, |
|
1895 | environ=environ, | |
1897 | onerr=error.CanceledError, |
|
1896 | onerr=error.CanceledError, | |
1898 | errprefix=_(b"edit failed"), |
|
1897 | errprefix=_(b"edit failed"), | |
1899 | blockedtag=b'editor', |
|
1898 | blockedtag=b'editor', | |
1900 | ) |
|
1899 | ) | |
1901 |
|
1900 | |||
1902 | with open(name, 'rb') as f: |
|
1901 | with open(name, 'rb') as f: | |
1903 | t = util.fromnativeeol(f.read()) |
|
1902 | t = util.fromnativeeol(f.read()) | |
1904 | finally: |
|
1903 | finally: | |
1905 | os.unlink(name) |
|
1904 | os.unlink(name) | |
1906 |
|
1905 | |||
1907 | return t |
|
1906 | return t | |
1908 |
|
1907 | |||
1909 | def system( |
|
1908 | def system( | |
1910 | self, |
|
1909 | self, | |
1911 | cmd, |
|
1910 | cmd, | |
1912 | environ=None, |
|
1911 | environ=None, | |
1913 | cwd=None, |
|
1912 | cwd=None, | |
1914 | onerr=None, |
|
1913 | onerr=None, | |
1915 | errprefix=None, |
|
1914 | errprefix=None, | |
1916 | blockedtag=None, |
|
1915 | blockedtag=None, | |
1917 | ): |
|
1916 | ): | |
1918 | """execute shell command with appropriate output stream. command |
|
1917 | """execute shell command with appropriate output stream. command | |
1919 | output will be redirected if fout is not stdout. |
|
1918 | output will be redirected if fout is not stdout. | |
1920 |
|
1919 | |||
1921 | if command fails and onerr is None, return status, else raise onerr |
|
1920 | if command fails and onerr is None, return status, else raise onerr | |
1922 | object as exception. |
|
1921 | object as exception. | |
1923 | """ |
|
1922 | """ | |
1924 | if blockedtag is None: |
|
1923 | if blockedtag is None: | |
1925 | # Long cmds tend to be because of an absolute path on cmd. Keep |
|
1924 | # Long cmds tend to be because of an absolute path on cmd. Keep | |
1926 | # the tail end instead |
|
1925 | # the tail end instead | |
1927 | cmdsuffix = cmd.translate(None, _keepalnum)[-85:] |
|
1926 | cmdsuffix = cmd.translate(None, _keepalnum)[-85:] | |
1928 | blockedtag = b'unknown_system_' + cmdsuffix |
|
1927 | blockedtag = b'unknown_system_' + cmdsuffix | |
1929 | out = self._fout |
|
1928 | out = self._fout | |
1930 | if any(s[1] for s in self._bufferstates): |
|
1929 | if any(s[1] for s in self._bufferstates): | |
1931 | out = self |
|
1930 | out = self | |
1932 | with self.timeblockedsection(blockedtag): |
|
1931 | with self.timeblockedsection(blockedtag): | |
1933 | rc = self._runsystem(cmd, environ=environ, cwd=cwd, out=out) |
|
1932 | rc = self._runsystem(cmd, environ=environ, cwd=cwd, out=out) | |
1934 | if rc and onerr: |
|
1933 | if rc and onerr: | |
1935 | errmsg = b'%s %s' % ( |
|
1934 | errmsg = b'%s %s' % ( | |
1936 | procutil.shellsplit(cmd)[0], |
|
1935 | procutil.shellsplit(cmd)[0], | |
1937 | procutil.explainexit(rc), |
|
1936 | procutil.explainexit(rc), | |
1938 | ) |
|
1937 | ) | |
1939 | if errprefix: |
|
1938 | if errprefix: | |
1940 | errmsg = b'%s: %s' % (errprefix, errmsg) |
|
1939 | errmsg = b'%s: %s' % (errprefix, errmsg) | |
1941 | raise onerr(errmsg) |
|
1940 | raise onerr(errmsg) | |
1942 | return rc |
|
1941 | return rc | |
1943 |
|
1942 | |||
1944 | def _runsystem(self, cmd, environ, cwd, out): |
|
1943 | def _runsystem(self, cmd, environ, cwd, out): | |
1945 | """actually execute the given shell command (can be overridden by |
|
1944 | """actually execute the given shell command (can be overridden by | |
1946 | extensions like chg)""" |
|
1945 | extensions like chg)""" | |
1947 | return procutil.system(cmd, environ=environ, cwd=cwd, out=out) |
|
1946 | return procutil.system(cmd, environ=environ, cwd=cwd, out=out) | |
1948 |
|
1947 | |||
1949 | def traceback(self, exc=None, force=False): |
|
1948 | def traceback(self, exc=None, force=False): | |
1950 | """print exception traceback if traceback printing enabled or forced. |
|
1949 | """print exception traceback if traceback printing enabled or forced. | |
1951 | only to call in exception handler. returns true if traceback |
|
1950 | only to call in exception handler. returns true if traceback | |
1952 | printed.""" |
|
1951 | printed.""" | |
1953 | if self.tracebackflag or force: |
|
1952 | if self.tracebackflag or force: | |
1954 | if exc is None: |
|
1953 | if exc is None: | |
1955 | exc = sys.exc_info() |
|
1954 | exc = sys.exc_info() | |
1956 | cause = getattr(exc[1], 'cause', None) |
|
1955 | cause = getattr(exc[1], 'cause', None) | |
1957 |
|
1956 | |||
1958 | if cause is not None: |
|
1957 | if cause is not None: | |
1959 | causetb = traceback.format_tb(cause[2]) |
|
1958 | causetb = traceback.format_tb(cause[2]) | |
1960 | exctb = traceback.format_tb(exc[2]) |
|
1959 | exctb = traceback.format_tb(exc[2]) | |
1961 | exconly = traceback.format_exception_only(cause[0], cause[1]) |
|
1960 | exconly = traceback.format_exception_only(cause[0], cause[1]) | |
1962 |
|
1961 | |||
1963 | # exclude frame where 'exc' was chained and rethrown from exctb |
|
1962 | # exclude frame where 'exc' was chained and rethrown from exctb | |
1964 | self.write_err( |
|
1963 | self.write_err( | |
1965 | b'Traceback (most recent call last):\n', |
|
1964 | b'Traceback (most recent call last):\n', | |
1966 | encoding.strtolocal(''.join(exctb[:-1])), |
|
1965 | encoding.strtolocal(''.join(exctb[:-1])), | |
1967 | encoding.strtolocal(''.join(causetb)), |
|
1966 | encoding.strtolocal(''.join(causetb)), | |
1968 | encoding.strtolocal(''.join(exconly)), |
|
1967 | encoding.strtolocal(''.join(exconly)), | |
1969 | ) |
|
1968 | ) | |
1970 | else: |
|
1969 | else: | |
1971 | output = traceback.format_exception(exc[0], exc[1], exc[2]) |
|
1970 | output = traceback.format_exception(exc[0], exc[1], exc[2]) | |
1972 | self.write_err(encoding.strtolocal(''.join(output))) |
|
1971 | self.write_err(encoding.strtolocal(''.join(output))) | |
1973 | return self.tracebackflag or force |
|
1972 | return self.tracebackflag or force | |
1974 |
|
1973 | |||
1975 | def geteditor(self): |
|
1974 | def geteditor(self): | |
1976 | '''return editor to use''' |
|
1975 | '''return editor to use''' | |
1977 | if pycompat.sysplatform == b'plan9': |
|
1976 | if pycompat.sysplatform == b'plan9': | |
1978 | # vi is the MIPS instruction simulator on Plan 9. We |
|
1977 | # vi is the MIPS instruction simulator on Plan 9. We | |
1979 | # instead default to E to plumb commit messages to |
|
1978 | # instead default to E to plumb commit messages to | |
1980 | # avoid confusion. |
|
1979 | # avoid confusion. | |
1981 | editor = b'E' |
|
1980 | editor = b'E' | |
1982 | elif pycompat.isdarwin: |
|
1981 | elif pycompat.isdarwin: | |
1983 | # vi on darwin is POSIX compatible to a fault, and that includes |
|
1982 | # vi on darwin is POSIX compatible to a fault, and that includes | |
1984 | # exiting non-zero if you make any mistake when running an ex |
|
1983 | # exiting non-zero if you make any mistake when running an ex | |
1985 | # command. Proof: `vi -c ':unknown' -c ':qa'; echo $?` produces 1, |
|
1984 | # command. Proof: `vi -c ':unknown' -c ':qa'; echo $?` produces 1, | |
1986 | # while s/vi/vim/ doesn't. |
|
1985 | # while s/vi/vim/ doesn't. | |
1987 | editor = b'vim' |
|
1986 | editor = b'vim' | |
1988 | else: |
|
1987 | else: | |
1989 | editor = b'vi' |
|
1988 | editor = b'vi' | |
1990 | return encoding.environ.get(b"HGEDITOR") or self.config( |
|
1989 | return encoding.environ.get(b"HGEDITOR") or self.config( | |
1991 | b"ui", b"editor", editor |
|
1990 | b"ui", b"editor", editor | |
1992 | ) |
|
1991 | ) | |
1993 |
|
1992 | |||
1994 | @util.propertycache |
|
1993 | @util.propertycache | |
1995 | def _progbar(self): |
|
1994 | def _progbar(self): | |
1996 | """setup the progbar singleton to the ui object""" |
|
1995 | """setup the progbar singleton to the ui object""" | |
1997 | if ( |
|
1996 | if ( | |
1998 | self.quiet |
|
1997 | self.quiet | |
1999 | or self.debugflag |
|
1998 | or self.debugflag | |
2000 | or self.configbool(b'progress', b'disable') |
|
1999 | or self.configbool(b'progress', b'disable') | |
2001 | or not progress.shouldprint(self) |
|
2000 | or not progress.shouldprint(self) | |
2002 | ): |
|
2001 | ): | |
2003 | return None |
|
2002 | return None | |
2004 | return getprogbar(self) |
|
2003 | return getprogbar(self) | |
2005 |
|
2004 | |||
2006 | def _progclear(self): |
|
2005 | def _progclear(self): | |
2007 | """clear progress bar output if any. use it before any output""" |
|
2006 | """clear progress bar output if any. use it before any output""" | |
2008 | if not haveprogbar(): # nothing loaded yet |
|
2007 | if not haveprogbar(): # nothing loaded yet | |
2009 | return |
|
2008 | return | |
2010 | if self._progbar is not None and self._progbar.printed: |
|
2009 | if self._progbar is not None and self._progbar.printed: | |
2011 | self._progbar.clear() |
|
2010 | self._progbar.clear() | |
2012 |
|
2011 | |||
2013 | def makeprogress(self, topic, unit=b"", total=None): |
|
2012 | def makeprogress(self, topic, unit=b"", total=None): | |
2014 | """Create a progress helper for the specified topic""" |
|
2013 | """Create a progress helper for the specified topic""" | |
2015 | if getattr(self._fmsgerr, 'structured', False): |
|
2014 | if getattr(self._fmsgerr, 'structured', False): | |
2016 | # channel for machine-readable output with metadata, just send |
|
2015 | # channel for machine-readable output with metadata, just send | |
2017 | # raw information |
|
2016 | # raw information | |
2018 | # TODO: consider porting some useful information (e.g. estimated |
|
2017 | # TODO: consider porting some useful information (e.g. estimated | |
2019 | # time) from progbar. we might want to support update delay to |
|
2018 | # time) from progbar. we might want to support update delay to | |
2020 | # reduce the cost of transferring progress messages. |
|
2019 | # reduce the cost of transferring progress messages. | |
2021 | def updatebar(topic, pos, item, unit, total): |
|
2020 | def updatebar(topic, pos, item, unit, total): | |
2022 | self._fmsgerr.write( |
|
2021 | self._fmsgerr.write( | |
2023 | None, |
|
2022 | None, | |
2024 | type=b'progress', |
|
2023 | type=b'progress', | |
2025 | topic=topic, |
|
2024 | topic=topic, | |
2026 | pos=pos, |
|
2025 | pos=pos, | |
2027 | item=item, |
|
2026 | item=item, | |
2028 | unit=unit, |
|
2027 | unit=unit, | |
2029 | total=total, |
|
2028 | total=total, | |
2030 | ) |
|
2029 | ) | |
2031 |
|
2030 | |||
2032 | elif self._progbar is not None: |
|
2031 | elif self._progbar is not None: | |
2033 | updatebar = self._progbar.progress |
|
2032 | updatebar = self._progbar.progress | |
2034 | else: |
|
2033 | else: | |
2035 |
|
2034 | |||
2036 | def updatebar(topic, pos, item, unit, total): |
|
2035 | def updatebar(topic, pos, item, unit, total): | |
2037 | pass |
|
2036 | pass | |
2038 |
|
2037 | |||
2039 | return scmutil.progress(self, updatebar, topic, unit, total) |
|
2038 | return scmutil.progress(self, updatebar, topic, unit, total) | |
2040 |
|
2039 | |||
2041 | def getlogger(self, name): |
|
2040 | def getlogger(self, name): | |
2042 | """Returns a logger of the given name; or None if not registered""" |
|
2041 | """Returns a logger of the given name; or None if not registered""" | |
2043 | return self._loggers.get(name) |
|
2042 | return self._loggers.get(name) | |
2044 |
|
2043 | |||
2045 | def setlogger(self, name, logger): |
|
2044 | def setlogger(self, name, logger): | |
2046 | """Install logger which can be identified later by the given name |
|
2045 | """Install logger which can be identified later by the given name | |
2047 |
|
2046 | |||
2048 | More than one loggers can be registered. Use extension or module |
|
2047 | More than one loggers can be registered. Use extension or module | |
2049 | name to uniquely identify the logger instance. |
|
2048 | name to uniquely identify the logger instance. | |
2050 | """ |
|
2049 | """ | |
2051 | self._loggers[name] = logger |
|
2050 | self._loggers[name] = logger | |
2052 |
|
2051 | |||
2053 | def log(self, event, msgfmt, *msgargs, **opts): |
|
2052 | def log(self, event, msgfmt, *msgargs, **opts): | |
2054 | """hook for logging facility extensions |
|
2053 | """hook for logging facility extensions | |
2055 |
|
2054 | |||
2056 | event should be a readily-identifiable subsystem, which will |
|
2055 | event should be a readily-identifiable subsystem, which will | |
2057 | allow filtering. |
|
2056 | allow filtering. | |
2058 |
|
2057 | |||
2059 | msgfmt should be a newline-terminated format string to log, and |
|
2058 | msgfmt should be a newline-terminated format string to log, and | |
2060 | *msgargs are %-formatted into it. |
|
2059 | *msgargs are %-formatted into it. | |
2061 |
|
2060 | |||
2062 | **opts currently has no defined meanings. |
|
2061 | **opts currently has no defined meanings. | |
2063 | """ |
|
2062 | """ | |
2064 | if not self._loggers: |
|
2063 | if not self._loggers: | |
2065 | return |
|
2064 | return | |
2066 | activeloggers = [ |
|
2065 | activeloggers = [ | |
2067 | l for l in pycompat.itervalues(self._loggers) if l.tracked(event) |
|
2066 | l for l in pycompat.itervalues(self._loggers) if l.tracked(event) | |
2068 | ] |
|
2067 | ] | |
2069 | if not activeloggers: |
|
2068 | if not activeloggers: | |
2070 | return |
|
2069 | return | |
2071 | msg = msgfmt % msgargs |
|
2070 | msg = msgfmt % msgargs | |
2072 | opts = pycompat.byteskwargs(opts) |
|
2071 | opts = pycompat.byteskwargs(opts) | |
2073 | # guard against recursion from e.g. ui.debug() |
|
2072 | # guard against recursion from e.g. ui.debug() | |
2074 | registeredloggers = self._loggers |
|
2073 | registeredloggers = self._loggers | |
2075 | self._loggers = {} |
|
2074 | self._loggers = {} | |
2076 | try: |
|
2075 | try: | |
2077 | for logger in activeloggers: |
|
2076 | for logger in activeloggers: | |
2078 | logger.log(self, event, msg, opts) |
|
2077 | logger.log(self, event, msg, opts) | |
2079 | finally: |
|
2078 | finally: | |
2080 | self._loggers = registeredloggers |
|
2079 | self._loggers = registeredloggers | |
2081 |
|
2080 | |||
2082 | def label(self, msg, label): |
|
2081 | def label(self, msg, label): | |
2083 | """style msg based on supplied label |
|
2082 | """style msg based on supplied label | |
2084 |
|
2083 | |||
2085 | If some color mode is enabled, this will add the necessary control |
|
2084 | If some color mode is enabled, this will add the necessary control | |
2086 | characters to apply such color. In addition, 'debug' color mode adds |
|
2085 | characters to apply such color. In addition, 'debug' color mode adds | |
2087 | markup showing which label affects a piece of text. |
|
2086 | markup showing which label affects a piece of text. | |
2088 |
|
2087 | |||
2089 | ui.write(s, 'label') is equivalent to |
|
2088 | ui.write(s, 'label') is equivalent to | |
2090 | ui.write(ui.label(s, 'label')). |
|
2089 | ui.write(ui.label(s, 'label')). | |
2091 | """ |
|
2090 | """ | |
2092 | if self._colormode is not None: |
|
2091 | if self._colormode is not None: | |
2093 | return color.colorlabel(self, msg, label) |
|
2092 | return color.colorlabel(self, msg, label) | |
2094 | return msg |
|
2093 | return msg | |
2095 |
|
2094 | |||
2096 | def develwarn(self, msg, stacklevel=1, config=None): |
|
2095 | def develwarn(self, msg, stacklevel=1, config=None): | |
2097 | """issue a developer warning message |
|
2096 | """issue a developer warning message | |
2098 |
|
2097 | |||
2099 | Use 'stacklevel' to report the offender some layers further up in the |
|
2098 | Use 'stacklevel' to report the offender some layers further up in the | |
2100 | stack. |
|
2099 | stack. | |
2101 | """ |
|
2100 | """ | |
2102 | if not self.configbool(b'devel', b'all-warnings'): |
|
2101 | if not self.configbool(b'devel', b'all-warnings'): | |
2103 | if config is None or not self.configbool(b'devel', config): |
|
2102 | if config is None or not self.configbool(b'devel', config): | |
2104 | return |
|
2103 | return | |
2105 | msg = b'devel-warn: ' + msg |
|
2104 | msg = b'devel-warn: ' + msg | |
2106 | stacklevel += 1 # get in develwarn |
|
2105 | stacklevel += 1 # get in develwarn | |
2107 | if self.tracebackflag: |
|
2106 | if self.tracebackflag: | |
2108 | util.debugstacktrace(msg, stacklevel, self._ferr, self._fout) |
|
2107 | util.debugstacktrace(msg, stacklevel, self._ferr, self._fout) | |
2109 | self.log( |
|
2108 | self.log( | |
2110 | b'develwarn', |
|
2109 | b'develwarn', | |
2111 | b'%s at:\n%s' |
|
2110 | b'%s at:\n%s' | |
2112 | % (msg, b''.join(util.getstackframes(stacklevel))), |
|
2111 | % (msg, b''.join(util.getstackframes(stacklevel))), | |
2113 | ) |
|
2112 | ) | |
2114 | else: |
|
2113 | else: | |
2115 | curframe = inspect.currentframe() |
|
2114 | curframe = inspect.currentframe() | |
2116 | calframe = inspect.getouterframes(curframe, 2) |
|
2115 | calframe = inspect.getouterframes(curframe, 2) | |
2117 | fname, lineno, fmsg = calframe[stacklevel][1:4] |
|
2116 | fname, lineno, fmsg = calframe[stacklevel][1:4] | |
2118 | fname, fmsg = pycompat.sysbytes(fname), pycompat.sysbytes(fmsg) |
|
2117 | fname, fmsg = pycompat.sysbytes(fname), pycompat.sysbytes(fmsg) | |
2119 | self.write_err(b'%s at: %s:%d (%s)\n' % (msg, fname, lineno, fmsg)) |
|
2118 | self.write_err(b'%s at: %s:%d (%s)\n' % (msg, fname, lineno, fmsg)) | |
2120 | self.log( |
|
2119 | self.log( | |
2121 | b'develwarn', b'%s at: %s:%d (%s)\n', msg, fname, lineno, fmsg |
|
2120 | b'develwarn', b'%s at: %s:%d (%s)\n', msg, fname, lineno, fmsg | |
2122 | ) |
|
2121 | ) | |
2123 |
|
2122 | |||
2124 | # avoid cycles |
|
2123 | # avoid cycles | |
2125 | del curframe |
|
2124 | del curframe | |
2126 | del calframe |
|
2125 | del calframe | |
2127 |
|
2126 | |||
2128 | def deprecwarn(self, msg, version, stacklevel=2): |
|
2127 | def deprecwarn(self, msg, version, stacklevel=2): | |
2129 | """issue a deprecation warning |
|
2128 | """issue a deprecation warning | |
2130 |
|
2129 | |||
2131 | - msg: message explaining what is deprecated and how to upgrade, |
|
2130 | - msg: message explaining what is deprecated and how to upgrade, | |
2132 | - version: last version where the API will be supported, |
|
2131 | - version: last version where the API will be supported, | |
2133 | """ |
|
2132 | """ | |
2134 | if not ( |
|
2133 | if not ( | |
2135 | self.configbool(b'devel', b'all-warnings') |
|
2134 | self.configbool(b'devel', b'all-warnings') | |
2136 | or self.configbool(b'devel', b'deprec-warn') |
|
2135 | or self.configbool(b'devel', b'deprec-warn') | |
2137 | ): |
|
2136 | ): | |
2138 | return |
|
2137 | return | |
2139 | msg += ( |
|
2138 | msg += ( | |
2140 | b"\n(compatibility will be dropped after Mercurial-%s," |
|
2139 | b"\n(compatibility will be dropped after Mercurial-%s," | |
2141 | b" update your code.)" |
|
2140 | b" update your code.)" | |
2142 | ) % version |
|
2141 | ) % version | |
2143 | self.develwarn(msg, stacklevel=stacklevel, config=b'deprec-warn') |
|
2142 | self.develwarn(msg, stacklevel=stacklevel, config=b'deprec-warn') | |
2144 |
|
2143 | |||
2145 | def exportableenviron(self): |
|
2144 | def exportableenviron(self): | |
2146 | """The environment variables that are safe to export, e.g. through |
|
2145 | """The environment variables that are safe to export, e.g. through | |
2147 | hgweb. |
|
2146 | hgweb. | |
2148 | """ |
|
2147 | """ | |
2149 | return self._exportableenviron |
|
2148 | return self._exportableenviron | |
2150 |
|
2149 | |||
2151 | @contextlib.contextmanager |
|
2150 | @contextlib.contextmanager | |
2152 | def configoverride(self, overrides, source=b""): |
|
2151 | def configoverride(self, overrides, source=b""): | |
2153 | """Context manager for temporary config overrides |
|
2152 | """Context manager for temporary config overrides | |
2154 | `overrides` must be a dict of the following structure: |
|
2153 | `overrides` must be a dict of the following structure: | |
2155 | {(section, name) : value}""" |
|
2154 | {(section, name) : value}""" | |
2156 | backups = {} |
|
2155 | backups = {} | |
2157 | try: |
|
2156 | try: | |
2158 | for (section, name), value in overrides.items(): |
|
2157 | for (section, name), value in overrides.items(): | |
2159 | backups[(section, name)] = self.backupconfig(section, name) |
|
2158 | backups[(section, name)] = self.backupconfig(section, name) | |
2160 | self.setconfig(section, name, value, source) |
|
2159 | self.setconfig(section, name, value, source) | |
2161 | yield |
|
2160 | yield | |
2162 | finally: |
|
2161 | finally: | |
2163 | for __, backup in backups.items(): |
|
2162 | for __, backup in backups.items(): | |
2164 | self.restoreconfig(backup) |
|
2163 | self.restoreconfig(backup) | |
2165 | # just restoring ui.quiet config to the previous value is not enough |
|
2164 | # just restoring ui.quiet config to the previous value is not enough | |
2166 | # as it does not update ui.quiet class member |
|
2165 | # as it does not update ui.quiet class member | |
2167 | if (b'ui', b'quiet') in overrides: |
|
2166 | if (b'ui', b'quiet') in overrides: | |
2168 | self.fixconfig(section=b'ui') |
|
2167 | self.fixconfig(section=b'ui') | |
2169 |
|
2168 | |||
2170 | def estimatememory(self): |
|
2169 | def estimatememory(self): | |
2171 | """Provide an estimate for the available system memory in Bytes. |
|
2170 | """Provide an estimate for the available system memory in Bytes. | |
2172 |
|
2171 | |||
2173 | This can be overriden via ui.available-memory. It returns None, if |
|
2172 | This can be overriden via ui.available-memory. It returns None, if | |
2174 | no estimate can be computed. |
|
2173 | no estimate can be computed. | |
2175 | """ |
|
2174 | """ | |
2176 | value = self.config(b'ui', b'available-memory') |
|
2175 | value = self.config(b'ui', b'available-memory') | |
2177 | if value is not None: |
|
2176 | if value is not None: | |
2178 | try: |
|
2177 | try: | |
2179 | return util.sizetoint(value) |
|
2178 | return util.sizetoint(value) | |
2180 | except error.ParseError: |
|
2179 | except error.ParseError: | |
2181 | raise error.ConfigError( |
|
2180 | raise error.ConfigError( | |
2182 | _(b"ui.available-memory value is invalid ('%s')") % value |
|
2181 | _(b"ui.available-memory value is invalid ('%s')") % value | |
2183 | ) |
|
2182 | ) | |
2184 | return util._estimatememory() |
|
2183 | return util._estimatememory() | |
2185 |
|
2184 | |||
2186 |
|
2185 | |||
2187 | # we instantiate one globally shared progress bar to avoid |
|
2186 | # we instantiate one globally shared progress bar to avoid | |
2188 | # competing progress bars when multiple UI objects get created |
|
2187 | # competing progress bars when multiple UI objects get created | |
2189 | _progresssingleton = None |
|
2188 | _progresssingleton = None | |
2190 |
|
2189 | |||
2191 |
|
2190 | |||
2192 | def getprogbar(ui): |
|
2191 | def getprogbar(ui): | |
2193 | global _progresssingleton |
|
2192 | global _progresssingleton | |
2194 | if _progresssingleton is None: |
|
2193 | if _progresssingleton is None: | |
2195 | # passing 'ui' object to the singleton is fishy, |
|
2194 | # passing 'ui' object to the singleton is fishy, | |
2196 | # this is how the extension used to work but feel free to rework it. |
|
2195 | # this is how the extension used to work but feel free to rework it. | |
2197 | _progresssingleton = progress.progbar(ui) |
|
2196 | _progresssingleton = progress.progbar(ui) | |
2198 | return _progresssingleton |
|
2197 | return _progresssingleton | |
2199 |
|
2198 | |||
2200 |
|
2199 | |||
2201 | def haveprogbar(): |
|
2200 | def haveprogbar(): | |
2202 | return _progresssingleton is not None |
|
2201 | return _progresssingleton is not None | |
2203 |
|
2202 | |||
2204 |
|
2203 | |||
2205 | def _selectmsgdests(ui): |
|
2204 | def _selectmsgdests(ui): | |
2206 | name = ui.config(b'ui', b'message-output') |
|
2205 | name = ui.config(b'ui', b'message-output') | |
2207 | if name == b'channel': |
|
2206 | if name == b'channel': | |
2208 | if ui.fmsg: |
|
2207 | if ui.fmsg: | |
2209 | return ui.fmsg, ui.fmsg |
|
2208 | return ui.fmsg, ui.fmsg | |
2210 | else: |
|
2209 | else: | |
2211 | # fall back to ferr if channel isn't ready so that status/error |
|
2210 | # fall back to ferr if channel isn't ready so that status/error | |
2212 | # messages can be printed |
|
2211 | # messages can be printed | |
2213 | return ui.ferr, ui.ferr |
|
2212 | return ui.ferr, ui.ferr | |
2214 | if name == b'stdio': |
|
2213 | if name == b'stdio': | |
2215 | return ui.fout, ui.ferr |
|
2214 | return ui.fout, ui.ferr | |
2216 | if name == b'stderr': |
|
2215 | if name == b'stderr': | |
2217 | return ui.ferr, ui.ferr |
|
2216 | return ui.ferr, ui.ferr | |
2218 | raise error.Abort(b'invalid ui.message-output destination: %s' % name) |
|
2217 | raise error.Abort(b'invalid ui.message-output destination: %s' % name) | |
2219 |
|
2218 | |||
2220 |
|
2219 | |||
2221 | def _writemsgwith(write, dest, *args, **opts): |
|
2220 | def _writemsgwith(write, dest, *args, **opts): | |
2222 | """Write ui message with the given ui._write*() function |
|
2221 | """Write ui message with the given ui._write*() function | |
2223 |
|
2222 | |||
2224 | The specified message type is translated to 'ui.<type>' label if the dest |
|
2223 | The specified message type is translated to 'ui.<type>' label if the dest | |
2225 | isn't a structured channel, so that the message will be colorized. |
|
2224 | isn't a structured channel, so that the message will be colorized. | |
2226 | """ |
|
2225 | """ | |
2227 | # TODO: maybe change 'type' to a mandatory option |
|
2226 | # TODO: maybe change 'type' to a mandatory option | |
2228 | if 'type' in opts and not getattr(dest, 'structured', False): |
|
2227 | if 'type' in opts and not getattr(dest, 'structured', False): | |
2229 | opts['label'] = opts.get('label', b'') + b' ui.%s' % opts.pop('type') |
|
2228 | opts['label'] = opts.get('label', b'') + b' ui.%s' % opts.pop('type') | |
2230 | write(dest, *args, **opts) |
|
2229 | write(dest, *args, **opts) |
@@ -1,3379 +1,3380 b'' | |||||
1 | # util.py - Mercurial utility functions and platform specific implementations |
|
1 | # util.py - Mercurial utility functions and platform specific implementations | |
2 | # |
|
2 | # | |
3 | # Copyright 2005 K. Thananchayan <thananck@yahoo.com> |
|
3 | # Copyright 2005 K. Thananchayan <thananck@yahoo.com> | |
4 | # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com> |
|
4 | # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com> | |
5 | # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> |
|
5 | # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> | |
6 | # |
|
6 | # | |
7 | # This software may be used and distributed according to the terms of the |
|
7 | # This software may be used and distributed according to the terms of the | |
8 | # GNU General Public License version 2 or any later version. |
|
8 | # GNU General Public License version 2 or any later version. | |
9 |
|
9 | |||
10 | """Mercurial utility functions and platform specific implementations. |
|
10 | """Mercurial utility functions and platform specific implementations. | |
11 |
|
11 | |||
12 | This contains helper routines that are independent of the SCM core and |
|
12 | This contains helper routines that are independent of the SCM core and | |
13 | hide platform-specific details from the core. |
|
13 | hide platform-specific details from the core. | |
14 | """ |
|
14 | """ | |
15 |
|
15 | |||
16 | from __future__ import absolute_import, print_function |
|
16 | from __future__ import absolute_import, print_function | |
17 |
|
17 | |||
18 | import abc |
|
18 | import abc | |
19 | import collections |
|
19 | import collections | |
20 | import contextlib |
|
20 | import contextlib | |
21 | import errno |
|
21 | import errno | |
22 | import gc |
|
22 | import gc | |
23 | import hashlib |
|
23 | import hashlib | |
24 | import itertools |
|
24 | import itertools | |
25 | import locale |
|
25 | import locale | |
26 | import mmap |
|
26 | import mmap | |
27 | import os |
|
27 | import os | |
28 | import platform as pyplatform |
|
28 | import platform as pyplatform | |
29 | import re as remod |
|
29 | import re as remod | |
30 | import shutil |
|
30 | import shutil | |
31 | import stat |
|
31 | import stat | |
32 | import sys |
|
32 | import sys | |
33 | import time |
|
33 | import time | |
34 | import traceback |
|
34 | import traceback | |
35 | import warnings |
|
35 | import warnings | |
36 |
|
36 | |||
37 | from .node import hex |
|
37 | from .node import hex | |
38 | from .thirdparty import attr |
|
38 | from .thirdparty import attr | |
39 | from .pycompat import ( |
|
39 | from .pycompat import ( | |
40 | delattr, |
|
40 | delattr, | |
41 | getattr, |
|
41 | getattr, | |
42 | open, |
|
42 | open, | |
43 | setattr, |
|
43 | setattr, | |
44 | ) |
|
44 | ) | |
45 | from .node import hex |
|
45 | from .node import hex | |
46 | from hgdemandimport import tracing |
|
46 | from hgdemandimport import tracing | |
47 | from . import ( |
|
47 | from . import ( | |
48 | encoding, |
|
48 | encoding, | |
49 | error, |
|
49 | error, | |
50 | i18n, |
|
50 | i18n, | |
51 | policy, |
|
51 | policy, | |
52 | pycompat, |
|
52 | pycompat, | |
53 | urllibcompat, |
|
53 | urllibcompat, | |
54 | ) |
|
54 | ) | |
55 | from .utils import ( |
|
55 | from .utils import ( | |
56 | compression, |
|
56 | compression, | |
57 | hashutil, |
|
57 | hashutil, | |
58 | procutil, |
|
58 | procutil, | |
59 | stringutil, |
|
59 | stringutil, | |
60 | urlutil, |
|
60 | urlutil, | |
61 | ) |
|
61 | ) | |
62 |
|
62 | |||
63 | if pycompat.TYPE_CHECKING: |
|
63 | if pycompat.TYPE_CHECKING: | |
64 | from typing import ( |
|
64 | from typing import ( | |
65 | Iterator, |
|
65 | Iterator, | |
66 | List, |
|
66 | List, | |
67 | Optional, |
|
67 | Optional, | |
68 | Tuple, |
|
68 | Tuple, | |
69 | ) |
|
69 | ) | |
70 |
|
70 | |||
71 |
|
71 | |||
72 | base85 = policy.importmod('base85') |
|
72 | base85 = policy.importmod('base85') | |
73 | osutil = policy.importmod('osutil') |
|
73 | osutil = policy.importmod('osutil') | |
74 |
|
74 | |||
75 | b85decode = base85.b85decode |
|
75 | b85decode = base85.b85decode | |
76 | b85encode = base85.b85encode |
|
76 | b85encode = base85.b85encode | |
77 |
|
77 | |||
78 | cookielib = pycompat.cookielib |
|
78 | cookielib = pycompat.cookielib | |
79 | httplib = pycompat.httplib |
|
79 | httplib = pycompat.httplib | |
80 | pickle = pycompat.pickle |
|
80 | pickle = pycompat.pickle | |
81 | safehasattr = pycompat.safehasattr |
|
81 | safehasattr = pycompat.safehasattr | |
82 | socketserver = pycompat.socketserver |
|
82 | socketserver = pycompat.socketserver | |
83 | bytesio = pycompat.bytesio |
|
83 | bytesio = pycompat.bytesio | |
84 | # TODO deprecate stringio name, as it is a lie on Python 3. |
|
84 | # TODO deprecate stringio name, as it is a lie on Python 3. | |
85 | stringio = bytesio |
|
85 | stringio = bytesio | |
86 | xmlrpclib = pycompat.xmlrpclib |
|
86 | xmlrpclib = pycompat.xmlrpclib | |
87 |
|
87 | |||
88 | httpserver = urllibcompat.httpserver |
|
88 | httpserver = urllibcompat.httpserver | |
89 | urlerr = urllibcompat.urlerr |
|
89 | urlerr = urllibcompat.urlerr | |
90 | urlreq = urllibcompat.urlreq |
|
90 | urlreq = urllibcompat.urlreq | |
91 |
|
91 | |||
92 | # workaround for win32mbcs |
|
92 | # workaround for win32mbcs | |
93 | _filenamebytestr = pycompat.bytestr |
|
93 | _filenamebytestr = pycompat.bytestr | |
94 |
|
94 | |||
95 | if pycompat.iswindows: |
|
95 | if pycompat.iswindows: | |
96 | from . import windows as platform |
|
96 | from . import windows as platform | |
97 | else: |
|
97 | else: | |
98 | from . import posix as platform |
|
98 | from . import posix as platform | |
99 |
|
99 | |||
100 | _ = i18n._ |
|
100 | _ = i18n._ | |
101 |
|
101 | |||
102 | bindunixsocket = platform.bindunixsocket |
|
102 | bindunixsocket = platform.bindunixsocket | |
103 | cachestat = platform.cachestat |
|
103 | cachestat = platform.cachestat | |
104 | checkexec = platform.checkexec |
|
104 | checkexec = platform.checkexec | |
105 | checklink = platform.checklink |
|
105 | checklink = platform.checklink | |
106 | copymode = platform.copymode |
|
106 | copymode = platform.copymode | |
107 | expandglobs = platform.expandglobs |
|
107 | expandglobs = platform.expandglobs | |
108 | getfsmountpoint = platform.getfsmountpoint |
|
108 | getfsmountpoint = platform.getfsmountpoint | |
109 | getfstype = platform.getfstype |
|
109 | getfstype = platform.getfstype | |
|
110 | get_password = platform.get_password | |||
110 | groupmembers = platform.groupmembers |
|
111 | groupmembers = platform.groupmembers | |
111 | groupname = platform.groupname |
|
112 | groupname = platform.groupname | |
112 | isexec = platform.isexec |
|
113 | isexec = platform.isexec | |
113 | isowner = platform.isowner |
|
114 | isowner = platform.isowner | |
114 | listdir = osutil.listdir |
|
115 | listdir = osutil.listdir | |
115 | localpath = platform.localpath |
|
116 | localpath = platform.localpath | |
116 | lookupreg = platform.lookupreg |
|
117 | lookupreg = platform.lookupreg | |
117 | makedir = platform.makedir |
|
118 | makedir = platform.makedir | |
118 | nlinks = platform.nlinks |
|
119 | nlinks = platform.nlinks | |
119 | normpath = platform.normpath |
|
120 | normpath = platform.normpath | |
120 | normcase = platform.normcase |
|
121 | normcase = platform.normcase | |
121 | normcasespec = platform.normcasespec |
|
122 | normcasespec = platform.normcasespec | |
122 | normcasefallback = platform.normcasefallback |
|
123 | normcasefallback = platform.normcasefallback | |
123 | openhardlinks = platform.openhardlinks |
|
124 | openhardlinks = platform.openhardlinks | |
124 | oslink = platform.oslink |
|
125 | oslink = platform.oslink | |
125 | parsepatchoutput = platform.parsepatchoutput |
|
126 | parsepatchoutput = platform.parsepatchoutput | |
126 | pconvert = platform.pconvert |
|
127 | pconvert = platform.pconvert | |
127 | poll = platform.poll |
|
128 | poll = platform.poll | |
128 | posixfile = platform.posixfile |
|
129 | posixfile = platform.posixfile | |
129 | readlink = platform.readlink |
|
130 | readlink = platform.readlink | |
130 | rename = platform.rename |
|
131 | rename = platform.rename | |
131 | removedirs = platform.removedirs |
|
132 | removedirs = platform.removedirs | |
132 | samedevice = platform.samedevice |
|
133 | samedevice = platform.samedevice | |
133 | samefile = platform.samefile |
|
134 | samefile = platform.samefile | |
134 | samestat = platform.samestat |
|
135 | samestat = platform.samestat | |
135 | setflags = platform.setflags |
|
136 | setflags = platform.setflags | |
136 | split = platform.split |
|
137 | split = platform.split | |
137 | statfiles = getattr(osutil, 'statfiles', platform.statfiles) |
|
138 | statfiles = getattr(osutil, 'statfiles', platform.statfiles) | |
138 | statisexec = platform.statisexec |
|
139 | statisexec = platform.statisexec | |
139 | statislink = platform.statislink |
|
140 | statislink = platform.statislink | |
140 | umask = platform.umask |
|
141 | umask = platform.umask | |
141 | unlink = platform.unlink |
|
142 | unlink = platform.unlink | |
142 | username = platform.username |
|
143 | username = platform.username | |
143 |
|
144 | |||
144 |
|
145 | |||
145 | def setumask(val): |
|
146 | def setumask(val): | |
146 | # type: (int) -> None |
|
147 | # type: (int) -> None | |
147 | '''updates the umask. used by chg server''' |
|
148 | '''updates the umask. used by chg server''' | |
148 | if pycompat.iswindows: |
|
149 | if pycompat.iswindows: | |
149 | return |
|
150 | return | |
150 | os.umask(val) |
|
151 | os.umask(val) | |
151 | global umask |
|
152 | global umask | |
152 | platform.umask = umask = val & 0o777 |
|
153 | platform.umask = umask = val & 0o777 | |
153 |
|
154 | |||
154 |
|
155 | |||
155 | # small compat layer |
|
156 | # small compat layer | |
156 | compengines = compression.compengines |
|
157 | compengines = compression.compengines | |
157 | SERVERROLE = compression.SERVERROLE |
|
158 | SERVERROLE = compression.SERVERROLE | |
158 | CLIENTROLE = compression.CLIENTROLE |
|
159 | CLIENTROLE = compression.CLIENTROLE | |
159 |
|
160 | |||
160 | try: |
|
161 | try: | |
161 | recvfds = osutil.recvfds |
|
162 | recvfds = osutil.recvfds | |
162 | except AttributeError: |
|
163 | except AttributeError: | |
163 | pass |
|
164 | pass | |
164 |
|
165 | |||
165 | # Python compatibility |
|
166 | # Python compatibility | |
166 |
|
167 | |||
167 | _notset = object() |
|
168 | _notset = object() | |
168 |
|
169 | |||
169 |
|
170 | |||
170 | def bitsfrom(container): |
|
171 | def bitsfrom(container): | |
171 | bits = 0 |
|
172 | bits = 0 | |
172 | for bit in container: |
|
173 | for bit in container: | |
173 | bits |= bit |
|
174 | bits |= bit | |
174 | return bits |
|
175 | return bits | |
175 |
|
176 | |||
176 |
|
177 | |||
177 | # python 2.6 still have deprecation warning enabled by default. We do not want |
|
178 | # python 2.6 still have deprecation warning enabled by default. We do not want | |
178 | # to display anything to standard user so detect if we are running test and |
|
179 | # to display anything to standard user so detect if we are running test and | |
179 | # only use python deprecation warning in this case. |
|
180 | # only use python deprecation warning in this case. | |
180 | _dowarn = bool(encoding.environ.get(b'HGEMITWARNINGS')) |
|
181 | _dowarn = bool(encoding.environ.get(b'HGEMITWARNINGS')) | |
181 | if _dowarn: |
|
182 | if _dowarn: | |
182 | # explicitly unfilter our warning for python 2.7 |
|
183 | # explicitly unfilter our warning for python 2.7 | |
183 | # |
|
184 | # | |
184 | # The option of setting PYTHONWARNINGS in the test runner was investigated. |
|
185 | # The option of setting PYTHONWARNINGS in the test runner was investigated. | |
185 | # However, module name set through PYTHONWARNINGS was exactly matched, so |
|
186 | # However, module name set through PYTHONWARNINGS was exactly matched, so | |
186 | # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This |
|
187 | # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This | |
187 | # makes the whole PYTHONWARNINGS thing useless for our usecase. |
|
188 | # makes the whole PYTHONWARNINGS thing useless for our usecase. | |
188 | warnings.filterwarnings('default', '', DeprecationWarning, 'mercurial') |
|
189 | warnings.filterwarnings('default', '', DeprecationWarning, 'mercurial') | |
189 | warnings.filterwarnings('default', '', DeprecationWarning, 'hgext') |
|
190 | warnings.filterwarnings('default', '', DeprecationWarning, 'hgext') | |
190 | warnings.filterwarnings('default', '', DeprecationWarning, 'hgext3rd') |
|
191 | warnings.filterwarnings('default', '', DeprecationWarning, 'hgext3rd') | |
191 | if _dowarn and pycompat.ispy3: |
|
192 | if _dowarn and pycompat.ispy3: | |
192 | # silence warning emitted by passing user string to re.sub() |
|
193 | # silence warning emitted by passing user string to re.sub() | |
193 | warnings.filterwarnings( |
|
194 | warnings.filterwarnings( | |
194 | 'ignore', 'bad escape', DeprecationWarning, 'mercurial' |
|
195 | 'ignore', 'bad escape', DeprecationWarning, 'mercurial' | |
195 | ) |
|
196 | ) | |
196 | warnings.filterwarnings( |
|
197 | warnings.filterwarnings( | |
197 | 'ignore', 'invalid escape sequence', DeprecationWarning, 'mercurial' |
|
198 | 'ignore', 'invalid escape sequence', DeprecationWarning, 'mercurial' | |
198 | ) |
|
199 | ) | |
199 | # TODO: reinvent imp.is_frozen() |
|
200 | # TODO: reinvent imp.is_frozen() | |
200 | warnings.filterwarnings( |
|
201 | warnings.filterwarnings( | |
201 | 'ignore', |
|
202 | 'ignore', | |
202 | 'the imp module is deprecated', |
|
203 | 'the imp module is deprecated', | |
203 | DeprecationWarning, |
|
204 | DeprecationWarning, | |
204 | 'mercurial', |
|
205 | 'mercurial', | |
205 | ) |
|
206 | ) | |
206 |
|
207 | |||
207 |
|
208 | |||
208 | def nouideprecwarn(msg, version, stacklevel=1): |
|
209 | def nouideprecwarn(msg, version, stacklevel=1): | |
209 | """Issue an python native deprecation warning |
|
210 | """Issue an python native deprecation warning | |
210 |
|
211 | |||
211 | This is a noop outside of tests, use 'ui.deprecwarn' when possible. |
|
212 | This is a noop outside of tests, use 'ui.deprecwarn' when possible. | |
212 | """ |
|
213 | """ | |
213 | if _dowarn: |
|
214 | if _dowarn: | |
214 | msg += ( |
|
215 | msg += ( | |
215 | b"\n(compatibility will be dropped after Mercurial-%s," |
|
216 | b"\n(compatibility will be dropped after Mercurial-%s," | |
216 | b" update your code.)" |
|
217 | b" update your code.)" | |
217 | ) % version |
|
218 | ) % version | |
218 | warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1) |
|
219 | warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1) | |
219 | # on python 3 with chg, we will need to explicitly flush the output |
|
220 | # on python 3 with chg, we will need to explicitly flush the output | |
220 | sys.stderr.flush() |
|
221 | sys.stderr.flush() | |
221 |
|
222 | |||
222 |
|
223 | |||
223 | DIGESTS = { |
|
224 | DIGESTS = { | |
224 | b'md5': hashlib.md5, |
|
225 | b'md5': hashlib.md5, | |
225 | b'sha1': hashutil.sha1, |
|
226 | b'sha1': hashutil.sha1, | |
226 | b'sha512': hashlib.sha512, |
|
227 | b'sha512': hashlib.sha512, | |
227 | } |
|
228 | } | |
228 | # List of digest types from strongest to weakest |
|
229 | # List of digest types from strongest to weakest | |
229 | DIGESTS_BY_STRENGTH = [b'sha512', b'sha1', b'md5'] |
|
230 | DIGESTS_BY_STRENGTH = [b'sha512', b'sha1', b'md5'] | |
230 |
|
231 | |||
231 | for k in DIGESTS_BY_STRENGTH: |
|
232 | for k in DIGESTS_BY_STRENGTH: | |
232 | assert k in DIGESTS |
|
233 | assert k in DIGESTS | |
233 |
|
234 | |||
234 |
|
235 | |||
235 | class digester(object): |
|
236 | class digester(object): | |
236 | """helper to compute digests. |
|
237 | """helper to compute digests. | |
237 |
|
238 | |||
238 | This helper can be used to compute one or more digests given their name. |
|
239 | This helper can be used to compute one or more digests given their name. | |
239 |
|
240 | |||
240 | >>> d = digester([b'md5', b'sha1']) |
|
241 | >>> d = digester([b'md5', b'sha1']) | |
241 | >>> d.update(b'foo') |
|
242 | >>> d.update(b'foo') | |
242 | >>> [k for k in sorted(d)] |
|
243 | >>> [k for k in sorted(d)] | |
243 | ['md5', 'sha1'] |
|
244 | ['md5', 'sha1'] | |
244 | >>> d[b'md5'] |
|
245 | >>> d[b'md5'] | |
245 | 'acbd18db4cc2f85cedef654fccc4a4d8' |
|
246 | 'acbd18db4cc2f85cedef654fccc4a4d8' | |
246 | >>> d[b'sha1'] |
|
247 | >>> d[b'sha1'] | |
247 | '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33' |
|
248 | '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33' | |
248 | >>> digester.preferred([b'md5', b'sha1']) |
|
249 | >>> digester.preferred([b'md5', b'sha1']) | |
249 | 'sha1' |
|
250 | 'sha1' | |
250 | """ |
|
251 | """ | |
251 |
|
252 | |||
252 | def __init__(self, digests, s=b''): |
|
253 | def __init__(self, digests, s=b''): | |
253 | self._hashes = {} |
|
254 | self._hashes = {} | |
254 | for k in digests: |
|
255 | for k in digests: | |
255 | if k not in DIGESTS: |
|
256 | if k not in DIGESTS: | |
256 | raise error.Abort(_(b'unknown digest type: %s') % k) |
|
257 | raise error.Abort(_(b'unknown digest type: %s') % k) | |
257 | self._hashes[k] = DIGESTS[k]() |
|
258 | self._hashes[k] = DIGESTS[k]() | |
258 | if s: |
|
259 | if s: | |
259 | self.update(s) |
|
260 | self.update(s) | |
260 |
|
261 | |||
261 | def update(self, data): |
|
262 | def update(self, data): | |
262 | for h in self._hashes.values(): |
|
263 | for h in self._hashes.values(): | |
263 | h.update(data) |
|
264 | h.update(data) | |
264 |
|
265 | |||
265 | def __getitem__(self, key): |
|
266 | def __getitem__(self, key): | |
266 | if key not in DIGESTS: |
|
267 | if key not in DIGESTS: | |
267 | raise error.Abort(_(b'unknown digest type: %s') % k) |
|
268 | raise error.Abort(_(b'unknown digest type: %s') % k) | |
268 | return hex(self._hashes[key].digest()) |
|
269 | return hex(self._hashes[key].digest()) | |
269 |
|
270 | |||
270 | def __iter__(self): |
|
271 | def __iter__(self): | |
271 | return iter(self._hashes) |
|
272 | return iter(self._hashes) | |
272 |
|
273 | |||
273 | @staticmethod |
|
274 | @staticmethod | |
274 | def preferred(supported): |
|
275 | def preferred(supported): | |
275 | """returns the strongest digest type in both supported and DIGESTS.""" |
|
276 | """returns the strongest digest type in both supported and DIGESTS.""" | |
276 |
|
277 | |||
277 | for k in DIGESTS_BY_STRENGTH: |
|
278 | for k in DIGESTS_BY_STRENGTH: | |
278 | if k in supported: |
|
279 | if k in supported: | |
279 | return k |
|
280 | return k | |
280 | return None |
|
281 | return None | |
281 |
|
282 | |||
282 |
|
283 | |||
283 | class digestchecker(object): |
|
284 | class digestchecker(object): | |
284 | """file handle wrapper that additionally checks content against a given |
|
285 | """file handle wrapper that additionally checks content against a given | |
285 | size and digests. |
|
286 | size and digests. | |
286 |
|
287 | |||
287 | d = digestchecker(fh, size, {'md5': '...'}) |
|
288 | d = digestchecker(fh, size, {'md5': '...'}) | |
288 |
|
289 | |||
289 | When multiple digests are given, all of them are validated. |
|
290 | When multiple digests are given, all of them are validated. | |
290 | """ |
|
291 | """ | |
291 |
|
292 | |||
292 | def __init__(self, fh, size, digests): |
|
293 | def __init__(self, fh, size, digests): | |
293 | self._fh = fh |
|
294 | self._fh = fh | |
294 | self._size = size |
|
295 | self._size = size | |
295 | self._got = 0 |
|
296 | self._got = 0 | |
296 | self._digests = dict(digests) |
|
297 | self._digests = dict(digests) | |
297 | self._digester = digester(self._digests.keys()) |
|
298 | self._digester = digester(self._digests.keys()) | |
298 |
|
299 | |||
299 | def read(self, length=-1): |
|
300 | def read(self, length=-1): | |
300 | content = self._fh.read(length) |
|
301 | content = self._fh.read(length) | |
301 | self._digester.update(content) |
|
302 | self._digester.update(content) | |
302 | self._got += len(content) |
|
303 | self._got += len(content) | |
303 | return content |
|
304 | return content | |
304 |
|
305 | |||
305 | def validate(self): |
|
306 | def validate(self): | |
306 | if self._size != self._got: |
|
307 | if self._size != self._got: | |
307 | raise error.Abort( |
|
308 | raise error.Abort( | |
308 | _(b'size mismatch: expected %d, got %d') |
|
309 | _(b'size mismatch: expected %d, got %d') | |
309 | % (self._size, self._got) |
|
310 | % (self._size, self._got) | |
310 | ) |
|
311 | ) | |
311 | for k, v in self._digests.items(): |
|
312 | for k, v in self._digests.items(): | |
312 | if v != self._digester[k]: |
|
313 | if v != self._digester[k]: | |
313 | # i18n: first parameter is a digest name |
|
314 | # i18n: first parameter is a digest name | |
314 | raise error.Abort( |
|
315 | raise error.Abort( | |
315 | _(b'%s mismatch: expected %s, got %s') |
|
316 | _(b'%s mismatch: expected %s, got %s') | |
316 | % (k, v, self._digester[k]) |
|
317 | % (k, v, self._digester[k]) | |
317 | ) |
|
318 | ) | |
318 |
|
319 | |||
319 |
|
320 | |||
320 | try: |
|
321 | try: | |
321 | buffer = buffer # pytype: disable=name-error |
|
322 | buffer = buffer # pytype: disable=name-error | |
322 | except NameError: |
|
323 | except NameError: | |
323 |
|
324 | |||
324 | def buffer(sliceable, offset=0, length=None): |
|
325 | def buffer(sliceable, offset=0, length=None): | |
325 | if length is not None: |
|
326 | if length is not None: | |
326 | return memoryview(sliceable)[offset : offset + length] |
|
327 | return memoryview(sliceable)[offset : offset + length] | |
327 | return memoryview(sliceable)[offset:] |
|
328 | return memoryview(sliceable)[offset:] | |
328 |
|
329 | |||
329 |
|
330 | |||
330 | _chunksize = 4096 |
|
331 | _chunksize = 4096 | |
331 |
|
332 | |||
332 |
|
333 | |||
333 | class bufferedinputpipe(object): |
|
334 | class bufferedinputpipe(object): | |
334 | """a manually buffered input pipe |
|
335 | """a manually buffered input pipe | |
335 |
|
336 | |||
336 | Python will not let us use buffered IO and lazy reading with 'polling' at |
|
337 | Python will not let us use buffered IO and lazy reading with 'polling' at | |
337 | the same time. We cannot probe the buffer state and select will not detect |
|
338 | the same time. We cannot probe the buffer state and select will not detect | |
338 | that data are ready to read if they are already buffered. |
|
339 | that data are ready to read if they are already buffered. | |
339 |
|
340 | |||
340 | This class let us work around that by implementing its own buffering |
|
341 | This class let us work around that by implementing its own buffering | |
341 | (allowing efficient readline) while offering a way to know if the buffer is |
|
342 | (allowing efficient readline) while offering a way to know if the buffer is | |
342 | empty from the output (allowing collaboration of the buffer with polling). |
|
343 | empty from the output (allowing collaboration of the buffer with polling). | |
343 |
|
344 | |||
344 | This class lives in the 'util' module because it makes use of the 'os' |
|
345 | This class lives in the 'util' module because it makes use of the 'os' | |
345 | module from the python stdlib. |
|
346 | module from the python stdlib. | |
346 | """ |
|
347 | """ | |
347 |
|
348 | |||
348 | def __new__(cls, fh): |
|
349 | def __new__(cls, fh): | |
349 | # If we receive a fileobjectproxy, we need to use a variation of this |
|
350 | # If we receive a fileobjectproxy, we need to use a variation of this | |
350 | # class that notifies observers about activity. |
|
351 | # class that notifies observers about activity. | |
351 | if isinstance(fh, fileobjectproxy): |
|
352 | if isinstance(fh, fileobjectproxy): | |
352 | cls = observedbufferedinputpipe |
|
353 | cls = observedbufferedinputpipe | |
353 |
|
354 | |||
354 | return super(bufferedinputpipe, cls).__new__(cls) |
|
355 | return super(bufferedinputpipe, cls).__new__(cls) | |
355 |
|
356 | |||
356 | def __init__(self, input): |
|
357 | def __init__(self, input): | |
357 | self._input = input |
|
358 | self._input = input | |
358 | self._buffer = [] |
|
359 | self._buffer = [] | |
359 | self._eof = False |
|
360 | self._eof = False | |
360 | self._lenbuf = 0 |
|
361 | self._lenbuf = 0 | |
361 |
|
362 | |||
362 | @property |
|
363 | @property | |
363 | def hasbuffer(self): |
|
364 | def hasbuffer(self): | |
364 | """True is any data is currently buffered |
|
365 | """True is any data is currently buffered | |
365 |
|
366 | |||
366 | This will be used externally a pre-step for polling IO. If there is |
|
367 | This will be used externally a pre-step for polling IO. If there is | |
367 | already data then no polling should be set in place.""" |
|
368 | already data then no polling should be set in place.""" | |
368 | return bool(self._buffer) |
|
369 | return bool(self._buffer) | |
369 |
|
370 | |||
370 | @property |
|
371 | @property | |
371 | def closed(self): |
|
372 | def closed(self): | |
372 | return self._input.closed |
|
373 | return self._input.closed | |
373 |
|
374 | |||
374 | def fileno(self): |
|
375 | def fileno(self): | |
375 | return self._input.fileno() |
|
376 | return self._input.fileno() | |
376 |
|
377 | |||
377 | def close(self): |
|
378 | def close(self): | |
378 | return self._input.close() |
|
379 | return self._input.close() | |
379 |
|
380 | |||
380 | def read(self, size): |
|
381 | def read(self, size): | |
381 | while (not self._eof) and (self._lenbuf < size): |
|
382 | while (not self._eof) and (self._lenbuf < size): | |
382 | self._fillbuffer() |
|
383 | self._fillbuffer() | |
383 | return self._frombuffer(size) |
|
384 | return self._frombuffer(size) | |
384 |
|
385 | |||
385 | def unbufferedread(self, size): |
|
386 | def unbufferedread(self, size): | |
386 | if not self._eof and self._lenbuf == 0: |
|
387 | if not self._eof and self._lenbuf == 0: | |
387 | self._fillbuffer(max(size, _chunksize)) |
|
388 | self._fillbuffer(max(size, _chunksize)) | |
388 | return self._frombuffer(min(self._lenbuf, size)) |
|
389 | return self._frombuffer(min(self._lenbuf, size)) | |
389 |
|
390 | |||
390 | def readline(self, *args, **kwargs): |
|
391 | def readline(self, *args, **kwargs): | |
391 | if len(self._buffer) > 1: |
|
392 | if len(self._buffer) > 1: | |
392 | # this should not happen because both read and readline end with a |
|
393 | # this should not happen because both read and readline end with a | |
393 | # _frombuffer call that collapse it. |
|
394 | # _frombuffer call that collapse it. | |
394 | self._buffer = [b''.join(self._buffer)] |
|
395 | self._buffer = [b''.join(self._buffer)] | |
395 | self._lenbuf = len(self._buffer[0]) |
|
396 | self._lenbuf = len(self._buffer[0]) | |
396 | lfi = -1 |
|
397 | lfi = -1 | |
397 | if self._buffer: |
|
398 | if self._buffer: | |
398 | lfi = self._buffer[-1].find(b'\n') |
|
399 | lfi = self._buffer[-1].find(b'\n') | |
399 | while (not self._eof) and lfi < 0: |
|
400 | while (not self._eof) and lfi < 0: | |
400 | self._fillbuffer() |
|
401 | self._fillbuffer() | |
401 | if self._buffer: |
|
402 | if self._buffer: | |
402 | lfi = self._buffer[-1].find(b'\n') |
|
403 | lfi = self._buffer[-1].find(b'\n') | |
403 | size = lfi + 1 |
|
404 | size = lfi + 1 | |
404 | if lfi < 0: # end of file |
|
405 | if lfi < 0: # end of file | |
405 | size = self._lenbuf |
|
406 | size = self._lenbuf | |
406 | elif len(self._buffer) > 1: |
|
407 | elif len(self._buffer) > 1: | |
407 | # we need to take previous chunks into account |
|
408 | # we need to take previous chunks into account | |
408 | size += self._lenbuf - len(self._buffer[-1]) |
|
409 | size += self._lenbuf - len(self._buffer[-1]) | |
409 | return self._frombuffer(size) |
|
410 | return self._frombuffer(size) | |
410 |
|
411 | |||
411 | def _frombuffer(self, size): |
|
412 | def _frombuffer(self, size): | |
412 | """return at most 'size' data from the buffer |
|
413 | """return at most 'size' data from the buffer | |
413 |
|
414 | |||
414 | The data are removed from the buffer.""" |
|
415 | The data are removed from the buffer.""" | |
415 | if size == 0 or not self._buffer: |
|
416 | if size == 0 or not self._buffer: | |
416 | return b'' |
|
417 | return b'' | |
417 | buf = self._buffer[0] |
|
418 | buf = self._buffer[0] | |
418 | if len(self._buffer) > 1: |
|
419 | if len(self._buffer) > 1: | |
419 | buf = b''.join(self._buffer) |
|
420 | buf = b''.join(self._buffer) | |
420 |
|
421 | |||
421 | data = buf[:size] |
|
422 | data = buf[:size] | |
422 | buf = buf[len(data) :] |
|
423 | buf = buf[len(data) :] | |
423 | if buf: |
|
424 | if buf: | |
424 | self._buffer = [buf] |
|
425 | self._buffer = [buf] | |
425 | self._lenbuf = len(buf) |
|
426 | self._lenbuf = len(buf) | |
426 | else: |
|
427 | else: | |
427 | self._buffer = [] |
|
428 | self._buffer = [] | |
428 | self._lenbuf = 0 |
|
429 | self._lenbuf = 0 | |
429 | return data |
|
430 | return data | |
430 |
|
431 | |||
431 | def _fillbuffer(self, size=_chunksize): |
|
432 | def _fillbuffer(self, size=_chunksize): | |
432 | """read data to the buffer""" |
|
433 | """read data to the buffer""" | |
433 | data = os.read(self._input.fileno(), size) |
|
434 | data = os.read(self._input.fileno(), size) | |
434 | if not data: |
|
435 | if not data: | |
435 | self._eof = True |
|
436 | self._eof = True | |
436 | else: |
|
437 | else: | |
437 | self._lenbuf += len(data) |
|
438 | self._lenbuf += len(data) | |
438 | self._buffer.append(data) |
|
439 | self._buffer.append(data) | |
439 |
|
440 | |||
440 | return data |
|
441 | return data | |
441 |
|
442 | |||
442 |
|
443 | |||
443 | def mmapread(fp, size=None): |
|
444 | def mmapread(fp, size=None): | |
444 | if size == 0: |
|
445 | if size == 0: | |
445 | # size of 0 to mmap.mmap() means "all data" |
|
446 | # size of 0 to mmap.mmap() means "all data" | |
446 | # rather than "zero bytes", so special case that. |
|
447 | # rather than "zero bytes", so special case that. | |
447 | return b'' |
|
448 | return b'' | |
448 | elif size is None: |
|
449 | elif size is None: | |
449 | size = 0 |
|
450 | size = 0 | |
450 | try: |
|
451 | try: | |
451 | fd = getattr(fp, 'fileno', lambda: fp)() |
|
452 | fd = getattr(fp, 'fileno', lambda: fp)() | |
452 | return mmap.mmap(fd, size, access=mmap.ACCESS_READ) |
|
453 | return mmap.mmap(fd, size, access=mmap.ACCESS_READ) | |
453 | except ValueError: |
|
454 | except ValueError: | |
454 | # Empty files cannot be mmapped, but mmapread should still work. Check |
|
455 | # Empty files cannot be mmapped, but mmapread should still work. Check | |
455 | # if the file is empty, and if so, return an empty buffer. |
|
456 | # if the file is empty, and if so, return an empty buffer. | |
456 | if os.fstat(fd).st_size == 0: |
|
457 | if os.fstat(fd).st_size == 0: | |
457 | return b'' |
|
458 | return b'' | |
458 | raise |
|
459 | raise | |
459 |
|
460 | |||
460 |
|
461 | |||
461 | class fileobjectproxy(object): |
|
462 | class fileobjectproxy(object): | |
462 | """A proxy around file objects that tells a watcher when events occur. |
|
463 | """A proxy around file objects that tells a watcher when events occur. | |
463 |
|
464 | |||
464 | This type is intended to only be used for testing purposes. Think hard |
|
465 | This type is intended to only be used for testing purposes. Think hard | |
465 | before using it in important code. |
|
466 | before using it in important code. | |
466 | """ |
|
467 | """ | |
467 |
|
468 | |||
468 | __slots__ = ( |
|
469 | __slots__ = ( | |
469 | '_orig', |
|
470 | '_orig', | |
470 | '_observer', |
|
471 | '_observer', | |
471 | ) |
|
472 | ) | |
472 |
|
473 | |||
473 | def __init__(self, fh, observer): |
|
474 | def __init__(self, fh, observer): | |
474 | object.__setattr__(self, '_orig', fh) |
|
475 | object.__setattr__(self, '_orig', fh) | |
475 | object.__setattr__(self, '_observer', observer) |
|
476 | object.__setattr__(self, '_observer', observer) | |
476 |
|
477 | |||
477 | def __getattribute__(self, name): |
|
478 | def __getattribute__(self, name): | |
478 | ours = { |
|
479 | ours = { | |
479 | '_observer', |
|
480 | '_observer', | |
480 | # IOBase |
|
481 | # IOBase | |
481 | 'close', |
|
482 | 'close', | |
482 | # closed if a property |
|
483 | # closed if a property | |
483 | 'fileno', |
|
484 | 'fileno', | |
484 | 'flush', |
|
485 | 'flush', | |
485 | 'isatty', |
|
486 | 'isatty', | |
486 | 'readable', |
|
487 | 'readable', | |
487 | 'readline', |
|
488 | 'readline', | |
488 | 'readlines', |
|
489 | 'readlines', | |
489 | 'seek', |
|
490 | 'seek', | |
490 | 'seekable', |
|
491 | 'seekable', | |
491 | 'tell', |
|
492 | 'tell', | |
492 | 'truncate', |
|
493 | 'truncate', | |
493 | 'writable', |
|
494 | 'writable', | |
494 | 'writelines', |
|
495 | 'writelines', | |
495 | # RawIOBase |
|
496 | # RawIOBase | |
496 | 'read', |
|
497 | 'read', | |
497 | 'readall', |
|
498 | 'readall', | |
498 | 'readinto', |
|
499 | 'readinto', | |
499 | 'write', |
|
500 | 'write', | |
500 | # BufferedIOBase |
|
501 | # BufferedIOBase | |
501 | # raw is a property |
|
502 | # raw is a property | |
502 | 'detach', |
|
503 | 'detach', | |
503 | # read defined above |
|
504 | # read defined above | |
504 | 'read1', |
|
505 | 'read1', | |
505 | # readinto defined above |
|
506 | # readinto defined above | |
506 | # write defined above |
|
507 | # write defined above | |
507 | } |
|
508 | } | |
508 |
|
509 | |||
509 | # We only observe some methods. |
|
510 | # We only observe some methods. | |
510 | if name in ours: |
|
511 | if name in ours: | |
511 | return object.__getattribute__(self, name) |
|
512 | return object.__getattribute__(self, name) | |
512 |
|
513 | |||
513 | return getattr(object.__getattribute__(self, '_orig'), name) |
|
514 | return getattr(object.__getattribute__(self, '_orig'), name) | |
514 |
|
515 | |||
515 | def __nonzero__(self): |
|
516 | def __nonzero__(self): | |
516 | return bool(object.__getattribute__(self, '_orig')) |
|
517 | return bool(object.__getattribute__(self, '_orig')) | |
517 |
|
518 | |||
518 | __bool__ = __nonzero__ |
|
519 | __bool__ = __nonzero__ | |
519 |
|
520 | |||
520 | def __delattr__(self, name): |
|
521 | def __delattr__(self, name): | |
521 | return delattr(object.__getattribute__(self, '_orig'), name) |
|
522 | return delattr(object.__getattribute__(self, '_orig'), name) | |
522 |
|
523 | |||
523 | def __setattr__(self, name, value): |
|
524 | def __setattr__(self, name, value): | |
524 | return setattr(object.__getattribute__(self, '_orig'), name, value) |
|
525 | return setattr(object.__getattribute__(self, '_orig'), name, value) | |
525 |
|
526 | |||
526 | def __iter__(self): |
|
527 | def __iter__(self): | |
527 | return object.__getattribute__(self, '_orig').__iter__() |
|
528 | return object.__getattribute__(self, '_orig').__iter__() | |
528 |
|
529 | |||
529 | def _observedcall(self, name, *args, **kwargs): |
|
530 | def _observedcall(self, name, *args, **kwargs): | |
530 | # Call the original object. |
|
531 | # Call the original object. | |
531 | orig = object.__getattribute__(self, '_orig') |
|
532 | orig = object.__getattribute__(self, '_orig') | |
532 | res = getattr(orig, name)(*args, **kwargs) |
|
533 | res = getattr(orig, name)(*args, **kwargs) | |
533 |
|
534 | |||
534 | # Call a method on the observer of the same name with arguments |
|
535 | # Call a method on the observer of the same name with arguments | |
535 | # so it can react, log, etc. |
|
536 | # so it can react, log, etc. | |
536 | observer = object.__getattribute__(self, '_observer') |
|
537 | observer = object.__getattribute__(self, '_observer') | |
537 | fn = getattr(observer, name, None) |
|
538 | fn = getattr(observer, name, None) | |
538 | if fn: |
|
539 | if fn: | |
539 | fn(res, *args, **kwargs) |
|
540 | fn(res, *args, **kwargs) | |
540 |
|
541 | |||
541 | return res |
|
542 | return res | |
542 |
|
543 | |||
543 | def close(self, *args, **kwargs): |
|
544 | def close(self, *args, **kwargs): | |
544 | return object.__getattribute__(self, '_observedcall')( |
|
545 | return object.__getattribute__(self, '_observedcall')( | |
545 | 'close', *args, **kwargs |
|
546 | 'close', *args, **kwargs | |
546 | ) |
|
547 | ) | |
547 |
|
548 | |||
548 | def fileno(self, *args, **kwargs): |
|
549 | def fileno(self, *args, **kwargs): | |
549 | return object.__getattribute__(self, '_observedcall')( |
|
550 | return object.__getattribute__(self, '_observedcall')( | |
550 | 'fileno', *args, **kwargs |
|
551 | 'fileno', *args, **kwargs | |
551 | ) |
|
552 | ) | |
552 |
|
553 | |||
553 | def flush(self, *args, **kwargs): |
|
554 | def flush(self, *args, **kwargs): | |
554 | return object.__getattribute__(self, '_observedcall')( |
|
555 | return object.__getattribute__(self, '_observedcall')( | |
555 | 'flush', *args, **kwargs |
|
556 | 'flush', *args, **kwargs | |
556 | ) |
|
557 | ) | |
557 |
|
558 | |||
558 | def isatty(self, *args, **kwargs): |
|
559 | def isatty(self, *args, **kwargs): | |
559 | return object.__getattribute__(self, '_observedcall')( |
|
560 | return object.__getattribute__(self, '_observedcall')( | |
560 | 'isatty', *args, **kwargs |
|
561 | 'isatty', *args, **kwargs | |
561 | ) |
|
562 | ) | |
562 |
|
563 | |||
563 | def readable(self, *args, **kwargs): |
|
564 | def readable(self, *args, **kwargs): | |
564 | return object.__getattribute__(self, '_observedcall')( |
|
565 | return object.__getattribute__(self, '_observedcall')( | |
565 | 'readable', *args, **kwargs |
|
566 | 'readable', *args, **kwargs | |
566 | ) |
|
567 | ) | |
567 |
|
568 | |||
568 | def readline(self, *args, **kwargs): |
|
569 | def readline(self, *args, **kwargs): | |
569 | return object.__getattribute__(self, '_observedcall')( |
|
570 | return object.__getattribute__(self, '_observedcall')( | |
570 | 'readline', *args, **kwargs |
|
571 | 'readline', *args, **kwargs | |
571 | ) |
|
572 | ) | |
572 |
|
573 | |||
573 | def readlines(self, *args, **kwargs): |
|
574 | def readlines(self, *args, **kwargs): | |
574 | return object.__getattribute__(self, '_observedcall')( |
|
575 | return object.__getattribute__(self, '_observedcall')( | |
575 | 'readlines', *args, **kwargs |
|
576 | 'readlines', *args, **kwargs | |
576 | ) |
|
577 | ) | |
577 |
|
578 | |||
578 | def seek(self, *args, **kwargs): |
|
579 | def seek(self, *args, **kwargs): | |
579 | return object.__getattribute__(self, '_observedcall')( |
|
580 | return object.__getattribute__(self, '_observedcall')( | |
580 | 'seek', *args, **kwargs |
|
581 | 'seek', *args, **kwargs | |
581 | ) |
|
582 | ) | |
582 |
|
583 | |||
583 | def seekable(self, *args, **kwargs): |
|
584 | def seekable(self, *args, **kwargs): | |
584 | return object.__getattribute__(self, '_observedcall')( |
|
585 | return object.__getattribute__(self, '_observedcall')( | |
585 | 'seekable', *args, **kwargs |
|
586 | 'seekable', *args, **kwargs | |
586 | ) |
|
587 | ) | |
587 |
|
588 | |||
588 | def tell(self, *args, **kwargs): |
|
589 | def tell(self, *args, **kwargs): | |
589 | return object.__getattribute__(self, '_observedcall')( |
|
590 | return object.__getattribute__(self, '_observedcall')( | |
590 | 'tell', *args, **kwargs |
|
591 | 'tell', *args, **kwargs | |
591 | ) |
|
592 | ) | |
592 |
|
593 | |||
593 | def truncate(self, *args, **kwargs): |
|
594 | def truncate(self, *args, **kwargs): | |
594 | return object.__getattribute__(self, '_observedcall')( |
|
595 | return object.__getattribute__(self, '_observedcall')( | |
595 | 'truncate', *args, **kwargs |
|
596 | 'truncate', *args, **kwargs | |
596 | ) |
|
597 | ) | |
597 |
|
598 | |||
598 | def writable(self, *args, **kwargs): |
|
599 | def writable(self, *args, **kwargs): | |
599 | return object.__getattribute__(self, '_observedcall')( |
|
600 | return object.__getattribute__(self, '_observedcall')( | |
600 | 'writable', *args, **kwargs |
|
601 | 'writable', *args, **kwargs | |
601 | ) |
|
602 | ) | |
602 |
|
603 | |||
603 | def writelines(self, *args, **kwargs): |
|
604 | def writelines(self, *args, **kwargs): | |
604 | return object.__getattribute__(self, '_observedcall')( |
|
605 | return object.__getattribute__(self, '_observedcall')( | |
605 | 'writelines', *args, **kwargs |
|
606 | 'writelines', *args, **kwargs | |
606 | ) |
|
607 | ) | |
607 |
|
608 | |||
608 | def read(self, *args, **kwargs): |
|
609 | def read(self, *args, **kwargs): | |
609 | return object.__getattribute__(self, '_observedcall')( |
|
610 | return object.__getattribute__(self, '_observedcall')( | |
610 | 'read', *args, **kwargs |
|
611 | 'read', *args, **kwargs | |
611 | ) |
|
612 | ) | |
612 |
|
613 | |||
613 | def readall(self, *args, **kwargs): |
|
614 | def readall(self, *args, **kwargs): | |
614 | return object.__getattribute__(self, '_observedcall')( |
|
615 | return object.__getattribute__(self, '_observedcall')( | |
615 | 'readall', *args, **kwargs |
|
616 | 'readall', *args, **kwargs | |
616 | ) |
|
617 | ) | |
617 |
|
618 | |||
618 | def readinto(self, *args, **kwargs): |
|
619 | def readinto(self, *args, **kwargs): | |
619 | return object.__getattribute__(self, '_observedcall')( |
|
620 | return object.__getattribute__(self, '_observedcall')( | |
620 | 'readinto', *args, **kwargs |
|
621 | 'readinto', *args, **kwargs | |
621 | ) |
|
622 | ) | |
622 |
|
623 | |||
623 | def write(self, *args, **kwargs): |
|
624 | def write(self, *args, **kwargs): | |
624 | return object.__getattribute__(self, '_observedcall')( |
|
625 | return object.__getattribute__(self, '_observedcall')( | |
625 | 'write', *args, **kwargs |
|
626 | 'write', *args, **kwargs | |
626 | ) |
|
627 | ) | |
627 |
|
628 | |||
628 | def detach(self, *args, **kwargs): |
|
629 | def detach(self, *args, **kwargs): | |
629 | return object.__getattribute__(self, '_observedcall')( |
|
630 | return object.__getattribute__(self, '_observedcall')( | |
630 | 'detach', *args, **kwargs |
|
631 | 'detach', *args, **kwargs | |
631 | ) |
|
632 | ) | |
632 |
|
633 | |||
633 | def read1(self, *args, **kwargs): |
|
634 | def read1(self, *args, **kwargs): | |
634 | return object.__getattribute__(self, '_observedcall')( |
|
635 | return object.__getattribute__(self, '_observedcall')( | |
635 | 'read1', *args, **kwargs |
|
636 | 'read1', *args, **kwargs | |
636 | ) |
|
637 | ) | |
637 |
|
638 | |||
638 |
|
639 | |||
639 | class observedbufferedinputpipe(bufferedinputpipe): |
|
640 | class observedbufferedinputpipe(bufferedinputpipe): | |
640 | """A variation of bufferedinputpipe that is aware of fileobjectproxy. |
|
641 | """A variation of bufferedinputpipe that is aware of fileobjectproxy. | |
641 |
|
642 | |||
642 | ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that |
|
643 | ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that | |
643 | bypass ``fileobjectproxy``. Because of this, we need to make |
|
644 | bypass ``fileobjectproxy``. Because of this, we need to make | |
644 | ``bufferedinputpipe`` aware of these operations. |
|
645 | ``bufferedinputpipe`` aware of these operations. | |
645 |
|
646 | |||
646 | This variation of ``bufferedinputpipe`` can notify observers about |
|
647 | This variation of ``bufferedinputpipe`` can notify observers about | |
647 | ``os.read()`` events. It also re-publishes other events, such as |
|
648 | ``os.read()`` events. It also re-publishes other events, such as | |
648 | ``read()`` and ``readline()``. |
|
649 | ``read()`` and ``readline()``. | |
649 | """ |
|
650 | """ | |
650 |
|
651 | |||
651 | def _fillbuffer(self): |
|
652 | def _fillbuffer(self): | |
652 | res = super(observedbufferedinputpipe, self)._fillbuffer() |
|
653 | res = super(observedbufferedinputpipe, self)._fillbuffer() | |
653 |
|
654 | |||
654 | fn = getattr(self._input._observer, 'osread', None) |
|
655 | fn = getattr(self._input._observer, 'osread', None) | |
655 | if fn: |
|
656 | if fn: | |
656 | fn(res, _chunksize) |
|
657 | fn(res, _chunksize) | |
657 |
|
658 | |||
658 | return res |
|
659 | return res | |
659 |
|
660 | |||
660 | # We use different observer methods because the operation isn't |
|
661 | # We use different observer methods because the operation isn't | |
661 | # performed on the actual file object but on us. |
|
662 | # performed on the actual file object but on us. | |
662 | def read(self, size): |
|
663 | def read(self, size): | |
663 | res = super(observedbufferedinputpipe, self).read(size) |
|
664 | res = super(observedbufferedinputpipe, self).read(size) | |
664 |
|
665 | |||
665 | fn = getattr(self._input._observer, 'bufferedread', None) |
|
666 | fn = getattr(self._input._observer, 'bufferedread', None) | |
666 | if fn: |
|
667 | if fn: | |
667 | fn(res, size) |
|
668 | fn(res, size) | |
668 |
|
669 | |||
669 | return res |
|
670 | return res | |
670 |
|
671 | |||
671 | def readline(self, *args, **kwargs): |
|
672 | def readline(self, *args, **kwargs): | |
672 | res = super(observedbufferedinputpipe, self).readline(*args, **kwargs) |
|
673 | res = super(observedbufferedinputpipe, self).readline(*args, **kwargs) | |
673 |
|
674 | |||
674 | fn = getattr(self._input._observer, 'bufferedreadline', None) |
|
675 | fn = getattr(self._input._observer, 'bufferedreadline', None) | |
675 | if fn: |
|
676 | if fn: | |
676 | fn(res) |
|
677 | fn(res) | |
677 |
|
678 | |||
678 | return res |
|
679 | return res | |
679 |
|
680 | |||
680 |
|
681 | |||
681 | PROXIED_SOCKET_METHODS = { |
|
682 | PROXIED_SOCKET_METHODS = { | |
682 | 'makefile', |
|
683 | 'makefile', | |
683 | 'recv', |
|
684 | 'recv', | |
684 | 'recvfrom', |
|
685 | 'recvfrom', | |
685 | 'recvfrom_into', |
|
686 | 'recvfrom_into', | |
686 | 'recv_into', |
|
687 | 'recv_into', | |
687 | 'send', |
|
688 | 'send', | |
688 | 'sendall', |
|
689 | 'sendall', | |
689 | 'sendto', |
|
690 | 'sendto', | |
690 | 'setblocking', |
|
691 | 'setblocking', | |
691 | 'settimeout', |
|
692 | 'settimeout', | |
692 | 'gettimeout', |
|
693 | 'gettimeout', | |
693 | 'setsockopt', |
|
694 | 'setsockopt', | |
694 | } |
|
695 | } | |
695 |
|
696 | |||
696 |
|
697 | |||
697 | class socketproxy(object): |
|
698 | class socketproxy(object): | |
698 | """A proxy around a socket that tells a watcher when events occur. |
|
699 | """A proxy around a socket that tells a watcher when events occur. | |
699 |
|
700 | |||
700 | This is like ``fileobjectproxy`` except for sockets. |
|
701 | This is like ``fileobjectproxy`` except for sockets. | |
701 |
|
702 | |||
702 | This type is intended to only be used for testing purposes. Think hard |
|
703 | This type is intended to only be used for testing purposes. Think hard | |
703 | before using it in important code. |
|
704 | before using it in important code. | |
704 | """ |
|
705 | """ | |
705 |
|
706 | |||
706 | __slots__ = ( |
|
707 | __slots__ = ( | |
707 | '_orig', |
|
708 | '_orig', | |
708 | '_observer', |
|
709 | '_observer', | |
709 | ) |
|
710 | ) | |
710 |
|
711 | |||
711 | def __init__(self, sock, observer): |
|
712 | def __init__(self, sock, observer): | |
712 | object.__setattr__(self, '_orig', sock) |
|
713 | object.__setattr__(self, '_orig', sock) | |
713 | object.__setattr__(self, '_observer', observer) |
|
714 | object.__setattr__(self, '_observer', observer) | |
714 |
|
715 | |||
715 | def __getattribute__(self, name): |
|
716 | def __getattribute__(self, name): | |
716 | if name in PROXIED_SOCKET_METHODS: |
|
717 | if name in PROXIED_SOCKET_METHODS: | |
717 | return object.__getattribute__(self, name) |
|
718 | return object.__getattribute__(self, name) | |
718 |
|
719 | |||
719 | return getattr(object.__getattribute__(self, '_orig'), name) |
|
720 | return getattr(object.__getattribute__(self, '_orig'), name) | |
720 |
|
721 | |||
721 | def __delattr__(self, name): |
|
722 | def __delattr__(self, name): | |
722 | return delattr(object.__getattribute__(self, '_orig'), name) |
|
723 | return delattr(object.__getattribute__(self, '_orig'), name) | |
723 |
|
724 | |||
724 | def __setattr__(self, name, value): |
|
725 | def __setattr__(self, name, value): | |
725 | return setattr(object.__getattribute__(self, '_orig'), name, value) |
|
726 | return setattr(object.__getattribute__(self, '_orig'), name, value) | |
726 |
|
727 | |||
727 | def __nonzero__(self): |
|
728 | def __nonzero__(self): | |
728 | return bool(object.__getattribute__(self, '_orig')) |
|
729 | return bool(object.__getattribute__(self, '_orig')) | |
729 |
|
730 | |||
730 | __bool__ = __nonzero__ |
|
731 | __bool__ = __nonzero__ | |
731 |
|
732 | |||
732 | def _observedcall(self, name, *args, **kwargs): |
|
733 | def _observedcall(self, name, *args, **kwargs): | |
733 | # Call the original object. |
|
734 | # Call the original object. | |
734 | orig = object.__getattribute__(self, '_orig') |
|
735 | orig = object.__getattribute__(self, '_orig') | |
735 | res = getattr(orig, name)(*args, **kwargs) |
|
736 | res = getattr(orig, name)(*args, **kwargs) | |
736 |
|
737 | |||
737 | # Call a method on the observer of the same name with arguments |
|
738 | # Call a method on the observer of the same name with arguments | |
738 | # so it can react, log, etc. |
|
739 | # so it can react, log, etc. | |
739 | observer = object.__getattribute__(self, '_observer') |
|
740 | observer = object.__getattribute__(self, '_observer') | |
740 | fn = getattr(observer, name, None) |
|
741 | fn = getattr(observer, name, None) | |
741 | if fn: |
|
742 | if fn: | |
742 | fn(res, *args, **kwargs) |
|
743 | fn(res, *args, **kwargs) | |
743 |
|
744 | |||
744 | return res |
|
745 | return res | |
745 |
|
746 | |||
746 | def makefile(self, *args, **kwargs): |
|
747 | def makefile(self, *args, **kwargs): | |
747 | res = object.__getattribute__(self, '_observedcall')( |
|
748 | res = object.__getattribute__(self, '_observedcall')( | |
748 | 'makefile', *args, **kwargs |
|
749 | 'makefile', *args, **kwargs | |
749 | ) |
|
750 | ) | |
750 |
|
751 | |||
751 | # The file object may be used for I/O. So we turn it into a |
|
752 | # The file object may be used for I/O. So we turn it into a | |
752 | # proxy using our observer. |
|
753 | # proxy using our observer. | |
753 | observer = object.__getattribute__(self, '_observer') |
|
754 | observer = object.__getattribute__(self, '_observer') | |
754 | return makeloggingfileobject( |
|
755 | return makeloggingfileobject( | |
755 | observer.fh, |
|
756 | observer.fh, | |
756 | res, |
|
757 | res, | |
757 | observer.name, |
|
758 | observer.name, | |
758 | reads=observer.reads, |
|
759 | reads=observer.reads, | |
759 | writes=observer.writes, |
|
760 | writes=observer.writes, | |
760 | logdata=observer.logdata, |
|
761 | logdata=observer.logdata, | |
761 | logdataapis=observer.logdataapis, |
|
762 | logdataapis=observer.logdataapis, | |
762 | ) |
|
763 | ) | |
763 |
|
764 | |||
764 | def recv(self, *args, **kwargs): |
|
765 | def recv(self, *args, **kwargs): | |
765 | return object.__getattribute__(self, '_observedcall')( |
|
766 | return object.__getattribute__(self, '_observedcall')( | |
766 | 'recv', *args, **kwargs |
|
767 | 'recv', *args, **kwargs | |
767 | ) |
|
768 | ) | |
768 |
|
769 | |||
769 | def recvfrom(self, *args, **kwargs): |
|
770 | def recvfrom(self, *args, **kwargs): | |
770 | return object.__getattribute__(self, '_observedcall')( |
|
771 | return object.__getattribute__(self, '_observedcall')( | |
771 | 'recvfrom', *args, **kwargs |
|
772 | 'recvfrom', *args, **kwargs | |
772 | ) |
|
773 | ) | |
773 |
|
774 | |||
774 | def recvfrom_into(self, *args, **kwargs): |
|
775 | def recvfrom_into(self, *args, **kwargs): | |
775 | return object.__getattribute__(self, '_observedcall')( |
|
776 | return object.__getattribute__(self, '_observedcall')( | |
776 | 'recvfrom_into', *args, **kwargs |
|
777 | 'recvfrom_into', *args, **kwargs | |
777 | ) |
|
778 | ) | |
778 |
|
779 | |||
779 | def recv_into(self, *args, **kwargs): |
|
780 | def recv_into(self, *args, **kwargs): | |
780 | return object.__getattribute__(self, '_observedcall')( |
|
781 | return object.__getattribute__(self, '_observedcall')( | |
781 | 'recv_info', *args, **kwargs |
|
782 | 'recv_info', *args, **kwargs | |
782 | ) |
|
783 | ) | |
783 |
|
784 | |||
784 | def send(self, *args, **kwargs): |
|
785 | def send(self, *args, **kwargs): | |
785 | return object.__getattribute__(self, '_observedcall')( |
|
786 | return object.__getattribute__(self, '_observedcall')( | |
786 | 'send', *args, **kwargs |
|
787 | 'send', *args, **kwargs | |
787 | ) |
|
788 | ) | |
788 |
|
789 | |||
789 | def sendall(self, *args, **kwargs): |
|
790 | def sendall(self, *args, **kwargs): | |
790 | return object.__getattribute__(self, '_observedcall')( |
|
791 | return object.__getattribute__(self, '_observedcall')( | |
791 | 'sendall', *args, **kwargs |
|
792 | 'sendall', *args, **kwargs | |
792 | ) |
|
793 | ) | |
793 |
|
794 | |||
794 | def sendto(self, *args, **kwargs): |
|
795 | def sendto(self, *args, **kwargs): | |
795 | return object.__getattribute__(self, '_observedcall')( |
|
796 | return object.__getattribute__(self, '_observedcall')( | |
796 | 'sendto', *args, **kwargs |
|
797 | 'sendto', *args, **kwargs | |
797 | ) |
|
798 | ) | |
798 |
|
799 | |||
799 | def setblocking(self, *args, **kwargs): |
|
800 | def setblocking(self, *args, **kwargs): | |
800 | return object.__getattribute__(self, '_observedcall')( |
|
801 | return object.__getattribute__(self, '_observedcall')( | |
801 | 'setblocking', *args, **kwargs |
|
802 | 'setblocking', *args, **kwargs | |
802 | ) |
|
803 | ) | |
803 |
|
804 | |||
804 | def settimeout(self, *args, **kwargs): |
|
805 | def settimeout(self, *args, **kwargs): | |
805 | return object.__getattribute__(self, '_observedcall')( |
|
806 | return object.__getattribute__(self, '_observedcall')( | |
806 | 'settimeout', *args, **kwargs |
|
807 | 'settimeout', *args, **kwargs | |
807 | ) |
|
808 | ) | |
808 |
|
809 | |||
809 | def gettimeout(self, *args, **kwargs): |
|
810 | def gettimeout(self, *args, **kwargs): | |
810 | return object.__getattribute__(self, '_observedcall')( |
|
811 | return object.__getattribute__(self, '_observedcall')( | |
811 | 'gettimeout', *args, **kwargs |
|
812 | 'gettimeout', *args, **kwargs | |
812 | ) |
|
813 | ) | |
813 |
|
814 | |||
814 | def setsockopt(self, *args, **kwargs): |
|
815 | def setsockopt(self, *args, **kwargs): | |
815 | return object.__getattribute__(self, '_observedcall')( |
|
816 | return object.__getattribute__(self, '_observedcall')( | |
816 | 'setsockopt', *args, **kwargs |
|
817 | 'setsockopt', *args, **kwargs | |
817 | ) |
|
818 | ) | |
818 |
|
819 | |||
819 |
|
820 | |||
820 | class baseproxyobserver(object): |
|
821 | class baseproxyobserver(object): | |
821 | def __init__(self, fh, name, logdata, logdataapis): |
|
822 | def __init__(self, fh, name, logdata, logdataapis): | |
822 | self.fh = fh |
|
823 | self.fh = fh | |
823 | self.name = name |
|
824 | self.name = name | |
824 | self.logdata = logdata |
|
825 | self.logdata = logdata | |
825 | self.logdataapis = logdataapis |
|
826 | self.logdataapis = logdataapis | |
826 |
|
827 | |||
827 | def _writedata(self, data): |
|
828 | def _writedata(self, data): | |
828 | if not self.logdata: |
|
829 | if not self.logdata: | |
829 | if self.logdataapis: |
|
830 | if self.logdataapis: | |
830 | self.fh.write(b'\n') |
|
831 | self.fh.write(b'\n') | |
831 | self.fh.flush() |
|
832 | self.fh.flush() | |
832 | return |
|
833 | return | |
833 |
|
834 | |||
834 | # Simple case writes all data on a single line. |
|
835 | # Simple case writes all data on a single line. | |
835 | if b'\n' not in data: |
|
836 | if b'\n' not in data: | |
836 | if self.logdataapis: |
|
837 | if self.logdataapis: | |
837 | self.fh.write(b': %s\n' % stringutil.escapestr(data)) |
|
838 | self.fh.write(b': %s\n' % stringutil.escapestr(data)) | |
838 | else: |
|
839 | else: | |
839 | self.fh.write( |
|
840 | self.fh.write( | |
840 | b'%s> %s\n' % (self.name, stringutil.escapestr(data)) |
|
841 | b'%s> %s\n' % (self.name, stringutil.escapestr(data)) | |
841 | ) |
|
842 | ) | |
842 | self.fh.flush() |
|
843 | self.fh.flush() | |
843 | return |
|
844 | return | |
844 |
|
845 | |||
845 | # Data with newlines is written to multiple lines. |
|
846 | # Data with newlines is written to multiple lines. | |
846 | if self.logdataapis: |
|
847 | if self.logdataapis: | |
847 | self.fh.write(b':\n') |
|
848 | self.fh.write(b':\n') | |
848 |
|
849 | |||
849 | lines = data.splitlines(True) |
|
850 | lines = data.splitlines(True) | |
850 | for line in lines: |
|
851 | for line in lines: | |
851 | self.fh.write( |
|
852 | self.fh.write( | |
852 | b'%s> %s\n' % (self.name, stringutil.escapestr(line)) |
|
853 | b'%s> %s\n' % (self.name, stringutil.escapestr(line)) | |
853 | ) |
|
854 | ) | |
854 | self.fh.flush() |
|
855 | self.fh.flush() | |
855 |
|
856 | |||
856 |
|
857 | |||
857 | class fileobjectobserver(baseproxyobserver): |
|
858 | class fileobjectobserver(baseproxyobserver): | |
858 | """Logs file object activity.""" |
|
859 | """Logs file object activity.""" | |
859 |
|
860 | |||
860 | def __init__( |
|
861 | def __init__( | |
861 | self, fh, name, reads=True, writes=True, logdata=False, logdataapis=True |
|
862 | self, fh, name, reads=True, writes=True, logdata=False, logdataapis=True | |
862 | ): |
|
863 | ): | |
863 | super(fileobjectobserver, self).__init__(fh, name, logdata, logdataapis) |
|
864 | super(fileobjectobserver, self).__init__(fh, name, logdata, logdataapis) | |
864 | self.reads = reads |
|
865 | self.reads = reads | |
865 | self.writes = writes |
|
866 | self.writes = writes | |
866 |
|
867 | |||
867 | def read(self, res, size=-1): |
|
868 | def read(self, res, size=-1): | |
868 | if not self.reads: |
|
869 | if not self.reads: | |
869 | return |
|
870 | return | |
870 | # Python 3 can return None from reads at EOF instead of empty strings. |
|
871 | # Python 3 can return None from reads at EOF instead of empty strings. | |
871 | if res is None: |
|
872 | if res is None: | |
872 | res = b'' |
|
873 | res = b'' | |
873 |
|
874 | |||
874 | if size == -1 and res == b'': |
|
875 | if size == -1 and res == b'': | |
875 | # Suppress pointless read(-1) calls that return |
|
876 | # Suppress pointless read(-1) calls that return | |
876 | # nothing. These happen _a lot_ on Python 3, and there |
|
877 | # nothing. These happen _a lot_ on Python 3, and there | |
877 | # doesn't seem to be a better workaround to have matching |
|
878 | # doesn't seem to be a better workaround to have matching | |
878 | # Python 2 and 3 behavior. :( |
|
879 | # Python 2 and 3 behavior. :( | |
879 | return |
|
880 | return | |
880 |
|
881 | |||
881 | if self.logdataapis: |
|
882 | if self.logdataapis: | |
882 | self.fh.write(b'%s> read(%d) -> %d' % (self.name, size, len(res))) |
|
883 | self.fh.write(b'%s> read(%d) -> %d' % (self.name, size, len(res))) | |
883 |
|
884 | |||
884 | self._writedata(res) |
|
885 | self._writedata(res) | |
885 |
|
886 | |||
886 | def readline(self, res, limit=-1): |
|
887 | def readline(self, res, limit=-1): | |
887 | if not self.reads: |
|
888 | if not self.reads: | |
888 | return |
|
889 | return | |
889 |
|
890 | |||
890 | if self.logdataapis: |
|
891 | if self.logdataapis: | |
891 | self.fh.write(b'%s> readline() -> %d' % (self.name, len(res))) |
|
892 | self.fh.write(b'%s> readline() -> %d' % (self.name, len(res))) | |
892 |
|
893 | |||
893 | self._writedata(res) |
|
894 | self._writedata(res) | |
894 |
|
895 | |||
895 | def readinto(self, res, dest): |
|
896 | def readinto(self, res, dest): | |
896 | if not self.reads: |
|
897 | if not self.reads: | |
897 | return |
|
898 | return | |
898 |
|
899 | |||
899 | if self.logdataapis: |
|
900 | if self.logdataapis: | |
900 | self.fh.write( |
|
901 | self.fh.write( | |
901 | b'%s> readinto(%d) -> %r' % (self.name, len(dest), res) |
|
902 | b'%s> readinto(%d) -> %r' % (self.name, len(dest), res) | |
902 | ) |
|
903 | ) | |
903 |
|
904 | |||
904 | data = dest[0:res] if res is not None else b'' |
|
905 | data = dest[0:res] if res is not None else b'' | |
905 |
|
906 | |||
906 | # _writedata() uses "in" operator and is confused by memoryview because |
|
907 | # _writedata() uses "in" operator and is confused by memoryview because | |
907 | # characters are ints on Python 3. |
|
908 | # characters are ints on Python 3. | |
908 | if isinstance(data, memoryview): |
|
909 | if isinstance(data, memoryview): | |
909 | data = data.tobytes() |
|
910 | data = data.tobytes() | |
910 |
|
911 | |||
911 | self._writedata(data) |
|
912 | self._writedata(data) | |
912 |
|
913 | |||
913 | def write(self, res, data): |
|
914 | def write(self, res, data): | |
914 | if not self.writes: |
|
915 | if not self.writes: | |
915 | return |
|
916 | return | |
916 |
|
917 | |||
917 | # Python 2 returns None from some write() calls. Python 3 (reasonably) |
|
918 | # Python 2 returns None from some write() calls. Python 3 (reasonably) | |
918 | # returns the integer bytes written. |
|
919 | # returns the integer bytes written. | |
919 | if res is None and data: |
|
920 | if res is None and data: | |
920 | res = len(data) |
|
921 | res = len(data) | |
921 |
|
922 | |||
922 | if self.logdataapis: |
|
923 | if self.logdataapis: | |
923 | self.fh.write(b'%s> write(%d) -> %r' % (self.name, len(data), res)) |
|
924 | self.fh.write(b'%s> write(%d) -> %r' % (self.name, len(data), res)) | |
924 |
|
925 | |||
925 | self._writedata(data) |
|
926 | self._writedata(data) | |
926 |
|
927 | |||
927 | def flush(self, res): |
|
928 | def flush(self, res): | |
928 | if not self.writes: |
|
929 | if not self.writes: | |
929 | return |
|
930 | return | |
930 |
|
931 | |||
931 | self.fh.write(b'%s> flush() -> %r\n' % (self.name, res)) |
|
932 | self.fh.write(b'%s> flush() -> %r\n' % (self.name, res)) | |
932 |
|
933 | |||
933 | # For observedbufferedinputpipe. |
|
934 | # For observedbufferedinputpipe. | |
934 | def bufferedread(self, res, size): |
|
935 | def bufferedread(self, res, size): | |
935 | if not self.reads: |
|
936 | if not self.reads: | |
936 | return |
|
937 | return | |
937 |
|
938 | |||
938 | if self.logdataapis: |
|
939 | if self.logdataapis: | |
939 | self.fh.write( |
|
940 | self.fh.write( | |
940 | b'%s> bufferedread(%d) -> %d' % (self.name, size, len(res)) |
|
941 | b'%s> bufferedread(%d) -> %d' % (self.name, size, len(res)) | |
941 | ) |
|
942 | ) | |
942 |
|
943 | |||
943 | self._writedata(res) |
|
944 | self._writedata(res) | |
944 |
|
945 | |||
945 | def bufferedreadline(self, res): |
|
946 | def bufferedreadline(self, res): | |
946 | if not self.reads: |
|
947 | if not self.reads: | |
947 | return |
|
948 | return | |
948 |
|
949 | |||
949 | if self.logdataapis: |
|
950 | if self.logdataapis: | |
950 | self.fh.write( |
|
951 | self.fh.write( | |
951 | b'%s> bufferedreadline() -> %d' % (self.name, len(res)) |
|
952 | b'%s> bufferedreadline() -> %d' % (self.name, len(res)) | |
952 | ) |
|
953 | ) | |
953 |
|
954 | |||
954 | self._writedata(res) |
|
955 | self._writedata(res) | |
955 |
|
956 | |||
956 |
|
957 | |||
957 | def makeloggingfileobject( |
|
958 | def makeloggingfileobject( | |
958 | logh, fh, name, reads=True, writes=True, logdata=False, logdataapis=True |
|
959 | logh, fh, name, reads=True, writes=True, logdata=False, logdataapis=True | |
959 | ): |
|
960 | ): | |
960 | """Turn a file object into a logging file object.""" |
|
961 | """Turn a file object into a logging file object.""" | |
961 |
|
962 | |||
962 | observer = fileobjectobserver( |
|
963 | observer = fileobjectobserver( | |
963 | logh, |
|
964 | logh, | |
964 | name, |
|
965 | name, | |
965 | reads=reads, |
|
966 | reads=reads, | |
966 | writes=writes, |
|
967 | writes=writes, | |
967 | logdata=logdata, |
|
968 | logdata=logdata, | |
968 | logdataapis=logdataapis, |
|
969 | logdataapis=logdataapis, | |
969 | ) |
|
970 | ) | |
970 | return fileobjectproxy(fh, observer) |
|
971 | return fileobjectproxy(fh, observer) | |
971 |
|
972 | |||
972 |
|
973 | |||
973 | class socketobserver(baseproxyobserver): |
|
974 | class socketobserver(baseproxyobserver): | |
974 | """Logs socket activity.""" |
|
975 | """Logs socket activity.""" | |
975 |
|
976 | |||
976 | def __init__( |
|
977 | def __init__( | |
977 | self, |
|
978 | self, | |
978 | fh, |
|
979 | fh, | |
979 | name, |
|
980 | name, | |
980 | reads=True, |
|
981 | reads=True, | |
981 | writes=True, |
|
982 | writes=True, | |
982 | states=True, |
|
983 | states=True, | |
983 | logdata=False, |
|
984 | logdata=False, | |
984 | logdataapis=True, |
|
985 | logdataapis=True, | |
985 | ): |
|
986 | ): | |
986 | super(socketobserver, self).__init__(fh, name, logdata, logdataapis) |
|
987 | super(socketobserver, self).__init__(fh, name, logdata, logdataapis) | |
987 | self.reads = reads |
|
988 | self.reads = reads | |
988 | self.writes = writes |
|
989 | self.writes = writes | |
989 | self.states = states |
|
990 | self.states = states | |
990 |
|
991 | |||
991 | def makefile(self, res, mode=None, bufsize=None): |
|
992 | def makefile(self, res, mode=None, bufsize=None): | |
992 | if not self.states: |
|
993 | if not self.states: | |
993 | return |
|
994 | return | |
994 |
|
995 | |||
995 | self.fh.write(b'%s> makefile(%r, %r)\n' % (self.name, mode, bufsize)) |
|
996 | self.fh.write(b'%s> makefile(%r, %r)\n' % (self.name, mode, bufsize)) | |
996 |
|
997 | |||
997 | def recv(self, res, size, flags=0): |
|
998 | def recv(self, res, size, flags=0): | |
998 | if not self.reads: |
|
999 | if not self.reads: | |
999 | return |
|
1000 | return | |
1000 |
|
1001 | |||
1001 | if self.logdataapis: |
|
1002 | if self.logdataapis: | |
1002 | self.fh.write( |
|
1003 | self.fh.write( | |
1003 | b'%s> recv(%d, %d) -> %d' % (self.name, size, flags, len(res)) |
|
1004 | b'%s> recv(%d, %d) -> %d' % (self.name, size, flags, len(res)) | |
1004 | ) |
|
1005 | ) | |
1005 | self._writedata(res) |
|
1006 | self._writedata(res) | |
1006 |
|
1007 | |||
1007 | def recvfrom(self, res, size, flags=0): |
|
1008 | def recvfrom(self, res, size, flags=0): | |
1008 | if not self.reads: |
|
1009 | if not self.reads: | |
1009 | return |
|
1010 | return | |
1010 |
|
1011 | |||
1011 | if self.logdataapis: |
|
1012 | if self.logdataapis: | |
1012 | self.fh.write( |
|
1013 | self.fh.write( | |
1013 | b'%s> recvfrom(%d, %d) -> %d' |
|
1014 | b'%s> recvfrom(%d, %d) -> %d' | |
1014 | % (self.name, size, flags, len(res[0])) |
|
1015 | % (self.name, size, flags, len(res[0])) | |
1015 | ) |
|
1016 | ) | |
1016 |
|
1017 | |||
1017 | self._writedata(res[0]) |
|
1018 | self._writedata(res[0]) | |
1018 |
|
1019 | |||
1019 | def recvfrom_into(self, res, buf, size, flags=0): |
|
1020 | def recvfrom_into(self, res, buf, size, flags=0): | |
1020 | if not self.reads: |
|
1021 | if not self.reads: | |
1021 | return |
|
1022 | return | |
1022 |
|
1023 | |||
1023 | if self.logdataapis: |
|
1024 | if self.logdataapis: | |
1024 | self.fh.write( |
|
1025 | self.fh.write( | |
1025 | b'%s> recvfrom_into(%d, %d) -> %d' |
|
1026 | b'%s> recvfrom_into(%d, %d) -> %d' | |
1026 | % (self.name, size, flags, res[0]) |
|
1027 | % (self.name, size, flags, res[0]) | |
1027 | ) |
|
1028 | ) | |
1028 |
|
1029 | |||
1029 | self._writedata(buf[0 : res[0]]) |
|
1030 | self._writedata(buf[0 : res[0]]) | |
1030 |
|
1031 | |||
1031 | def recv_into(self, res, buf, size=0, flags=0): |
|
1032 | def recv_into(self, res, buf, size=0, flags=0): | |
1032 | if not self.reads: |
|
1033 | if not self.reads: | |
1033 | return |
|
1034 | return | |
1034 |
|
1035 | |||
1035 | if self.logdataapis: |
|
1036 | if self.logdataapis: | |
1036 | self.fh.write( |
|
1037 | self.fh.write( | |
1037 | b'%s> recv_into(%d, %d) -> %d' % (self.name, size, flags, res) |
|
1038 | b'%s> recv_into(%d, %d) -> %d' % (self.name, size, flags, res) | |
1038 | ) |
|
1039 | ) | |
1039 |
|
1040 | |||
1040 | self._writedata(buf[0:res]) |
|
1041 | self._writedata(buf[0:res]) | |
1041 |
|
1042 | |||
1042 | def send(self, res, data, flags=0): |
|
1043 | def send(self, res, data, flags=0): | |
1043 | if not self.writes: |
|
1044 | if not self.writes: | |
1044 | return |
|
1045 | return | |
1045 |
|
1046 | |||
1046 | self.fh.write( |
|
1047 | self.fh.write( | |
1047 | b'%s> send(%d, %d) -> %d' % (self.name, len(data), flags, len(res)) |
|
1048 | b'%s> send(%d, %d) -> %d' % (self.name, len(data), flags, len(res)) | |
1048 | ) |
|
1049 | ) | |
1049 | self._writedata(data) |
|
1050 | self._writedata(data) | |
1050 |
|
1051 | |||
1051 | def sendall(self, res, data, flags=0): |
|
1052 | def sendall(self, res, data, flags=0): | |
1052 | if not self.writes: |
|
1053 | if not self.writes: | |
1053 | return |
|
1054 | return | |
1054 |
|
1055 | |||
1055 | if self.logdataapis: |
|
1056 | if self.logdataapis: | |
1056 | # Returns None on success. So don't bother reporting return value. |
|
1057 | # Returns None on success. So don't bother reporting return value. | |
1057 | self.fh.write( |
|
1058 | self.fh.write( | |
1058 | b'%s> sendall(%d, %d)' % (self.name, len(data), flags) |
|
1059 | b'%s> sendall(%d, %d)' % (self.name, len(data), flags) | |
1059 | ) |
|
1060 | ) | |
1060 |
|
1061 | |||
1061 | self._writedata(data) |
|
1062 | self._writedata(data) | |
1062 |
|
1063 | |||
1063 | def sendto(self, res, data, flagsoraddress, address=None): |
|
1064 | def sendto(self, res, data, flagsoraddress, address=None): | |
1064 | if not self.writes: |
|
1065 | if not self.writes: | |
1065 | return |
|
1066 | return | |
1066 |
|
1067 | |||
1067 | if address: |
|
1068 | if address: | |
1068 | flags = flagsoraddress |
|
1069 | flags = flagsoraddress | |
1069 | else: |
|
1070 | else: | |
1070 | flags = 0 |
|
1071 | flags = 0 | |
1071 |
|
1072 | |||
1072 | if self.logdataapis: |
|
1073 | if self.logdataapis: | |
1073 | self.fh.write( |
|
1074 | self.fh.write( | |
1074 | b'%s> sendto(%d, %d, %r) -> %d' |
|
1075 | b'%s> sendto(%d, %d, %r) -> %d' | |
1075 | % (self.name, len(data), flags, address, res) |
|
1076 | % (self.name, len(data), flags, address, res) | |
1076 | ) |
|
1077 | ) | |
1077 |
|
1078 | |||
1078 | self._writedata(data) |
|
1079 | self._writedata(data) | |
1079 |
|
1080 | |||
1080 | def setblocking(self, res, flag): |
|
1081 | def setblocking(self, res, flag): | |
1081 | if not self.states: |
|
1082 | if not self.states: | |
1082 | return |
|
1083 | return | |
1083 |
|
1084 | |||
1084 | self.fh.write(b'%s> setblocking(%r)\n' % (self.name, flag)) |
|
1085 | self.fh.write(b'%s> setblocking(%r)\n' % (self.name, flag)) | |
1085 |
|
1086 | |||
1086 | def settimeout(self, res, value): |
|
1087 | def settimeout(self, res, value): | |
1087 | if not self.states: |
|
1088 | if not self.states: | |
1088 | return |
|
1089 | return | |
1089 |
|
1090 | |||
1090 | self.fh.write(b'%s> settimeout(%r)\n' % (self.name, value)) |
|
1091 | self.fh.write(b'%s> settimeout(%r)\n' % (self.name, value)) | |
1091 |
|
1092 | |||
1092 | def gettimeout(self, res): |
|
1093 | def gettimeout(self, res): | |
1093 | if not self.states: |
|
1094 | if not self.states: | |
1094 | return |
|
1095 | return | |
1095 |
|
1096 | |||
1096 | self.fh.write(b'%s> gettimeout() -> %f\n' % (self.name, res)) |
|
1097 | self.fh.write(b'%s> gettimeout() -> %f\n' % (self.name, res)) | |
1097 |
|
1098 | |||
1098 | def setsockopt(self, res, level, optname, value): |
|
1099 | def setsockopt(self, res, level, optname, value): | |
1099 | if not self.states: |
|
1100 | if not self.states: | |
1100 | return |
|
1101 | return | |
1101 |
|
1102 | |||
1102 | self.fh.write( |
|
1103 | self.fh.write( | |
1103 | b'%s> setsockopt(%r, %r, %r) -> %r\n' |
|
1104 | b'%s> setsockopt(%r, %r, %r) -> %r\n' | |
1104 | % (self.name, level, optname, value, res) |
|
1105 | % (self.name, level, optname, value, res) | |
1105 | ) |
|
1106 | ) | |
1106 |
|
1107 | |||
1107 |
|
1108 | |||
1108 | def makeloggingsocket( |
|
1109 | def makeloggingsocket( | |
1109 | logh, |
|
1110 | logh, | |
1110 | fh, |
|
1111 | fh, | |
1111 | name, |
|
1112 | name, | |
1112 | reads=True, |
|
1113 | reads=True, | |
1113 | writes=True, |
|
1114 | writes=True, | |
1114 | states=True, |
|
1115 | states=True, | |
1115 | logdata=False, |
|
1116 | logdata=False, | |
1116 | logdataapis=True, |
|
1117 | logdataapis=True, | |
1117 | ): |
|
1118 | ): | |
1118 | """Turn a socket into a logging socket.""" |
|
1119 | """Turn a socket into a logging socket.""" | |
1119 |
|
1120 | |||
1120 | observer = socketobserver( |
|
1121 | observer = socketobserver( | |
1121 | logh, |
|
1122 | logh, | |
1122 | name, |
|
1123 | name, | |
1123 | reads=reads, |
|
1124 | reads=reads, | |
1124 | writes=writes, |
|
1125 | writes=writes, | |
1125 | states=states, |
|
1126 | states=states, | |
1126 | logdata=logdata, |
|
1127 | logdata=logdata, | |
1127 | logdataapis=logdataapis, |
|
1128 | logdataapis=logdataapis, | |
1128 | ) |
|
1129 | ) | |
1129 | return socketproxy(fh, observer) |
|
1130 | return socketproxy(fh, observer) | |
1130 |
|
1131 | |||
1131 |
|
1132 | |||
1132 | def version(): |
|
1133 | def version(): | |
1133 | """Return version information if available.""" |
|
1134 | """Return version information if available.""" | |
1134 | try: |
|
1135 | try: | |
1135 | from . import __version__ |
|
1136 | from . import __version__ | |
1136 |
|
1137 | |||
1137 | return __version__.version |
|
1138 | return __version__.version | |
1138 | except ImportError: |
|
1139 | except ImportError: | |
1139 | return b'unknown' |
|
1140 | return b'unknown' | |
1140 |
|
1141 | |||
1141 |
|
1142 | |||
1142 | def versiontuple(v=None, n=4): |
|
1143 | def versiontuple(v=None, n=4): | |
1143 | """Parses a Mercurial version string into an N-tuple. |
|
1144 | """Parses a Mercurial version string into an N-tuple. | |
1144 |
|
1145 | |||
1145 | The version string to be parsed is specified with the ``v`` argument. |
|
1146 | The version string to be parsed is specified with the ``v`` argument. | |
1146 | If it isn't defined, the current Mercurial version string will be parsed. |
|
1147 | If it isn't defined, the current Mercurial version string will be parsed. | |
1147 |
|
1148 | |||
1148 | ``n`` can be 2, 3, or 4. Here is how some version strings map to |
|
1149 | ``n`` can be 2, 3, or 4. Here is how some version strings map to | |
1149 | returned values: |
|
1150 | returned values: | |
1150 |
|
1151 | |||
1151 | >>> v = b'3.6.1+190-df9b73d2d444' |
|
1152 | >>> v = b'3.6.1+190-df9b73d2d444' | |
1152 | >>> versiontuple(v, 2) |
|
1153 | >>> versiontuple(v, 2) | |
1153 | (3, 6) |
|
1154 | (3, 6) | |
1154 | >>> versiontuple(v, 3) |
|
1155 | >>> versiontuple(v, 3) | |
1155 | (3, 6, 1) |
|
1156 | (3, 6, 1) | |
1156 | >>> versiontuple(v, 4) |
|
1157 | >>> versiontuple(v, 4) | |
1157 | (3, 6, 1, '190-df9b73d2d444') |
|
1158 | (3, 6, 1, '190-df9b73d2d444') | |
1158 |
|
1159 | |||
1159 | >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118') |
|
1160 | >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118') | |
1160 | (3, 6, 1, '190-df9b73d2d444+20151118') |
|
1161 | (3, 6, 1, '190-df9b73d2d444+20151118') | |
1161 |
|
1162 | |||
1162 | >>> v = b'3.6' |
|
1163 | >>> v = b'3.6' | |
1163 | >>> versiontuple(v, 2) |
|
1164 | >>> versiontuple(v, 2) | |
1164 | (3, 6) |
|
1165 | (3, 6) | |
1165 | >>> versiontuple(v, 3) |
|
1166 | >>> versiontuple(v, 3) | |
1166 | (3, 6, None) |
|
1167 | (3, 6, None) | |
1167 | >>> versiontuple(v, 4) |
|
1168 | >>> versiontuple(v, 4) | |
1168 | (3, 6, None, None) |
|
1169 | (3, 6, None, None) | |
1169 |
|
1170 | |||
1170 | >>> v = b'3.9-rc' |
|
1171 | >>> v = b'3.9-rc' | |
1171 | >>> versiontuple(v, 2) |
|
1172 | >>> versiontuple(v, 2) | |
1172 | (3, 9) |
|
1173 | (3, 9) | |
1173 | >>> versiontuple(v, 3) |
|
1174 | >>> versiontuple(v, 3) | |
1174 | (3, 9, None) |
|
1175 | (3, 9, None) | |
1175 | >>> versiontuple(v, 4) |
|
1176 | >>> versiontuple(v, 4) | |
1176 | (3, 9, None, 'rc') |
|
1177 | (3, 9, None, 'rc') | |
1177 |
|
1178 | |||
1178 | >>> v = b'3.9-rc+2-02a8fea4289b' |
|
1179 | >>> v = b'3.9-rc+2-02a8fea4289b' | |
1179 | >>> versiontuple(v, 2) |
|
1180 | >>> versiontuple(v, 2) | |
1180 | (3, 9) |
|
1181 | (3, 9) | |
1181 | >>> versiontuple(v, 3) |
|
1182 | >>> versiontuple(v, 3) | |
1182 | (3, 9, None) |
|
1183 | (3, 9, None) | |
1183 | >>> versiontuple(v, 4) |
|
1184 | >>> versiontuple(v, 4) | |
1184 | (3, 9, None, 'rc+2-02a8fea4289b') |
|
1185 | (3, 9, None, 'rc+2-02a8fea4289b') | |
1185 |
|
1186 | |||
1186 | >>> versiontuple(b'4.6rc0') |
|
1187 | >>> versiontuple(b'4.6rc0') | |
1187 | (4, 6, None, 'rc0') |
|
1188 | (4, 6, None, 'rc0') | |
1188 | >>> versiontuple(b'4.6rc0+12-425d55e54f98') |
|
1189 | >>> versiontuple(b'4.6rc0+12-425d55e54f98') | |
1189 | (4, 6, None, 'rc0+12-425d55e54f98') |
|
1190 | (4, 6, None, 'rc0+12-425d55e54f98') | |
1190 | >>> versiontuple(b'.1.2.3') |
|
1191 | >>> versiontuple(b'.1.2.3') | |
1191 | (None, None, None, '.1.2.3') |
|
1192 | (None, None, None, '.1.2.3') | |
1192 | >>> versiontuple(b'12.34..5') |
|
1193 | >>> versiontuple(b'12.34..5') | |
1193 | (12, 34, None, '..5') |
|
1194 | (12, 34, None, '..5') | |
1194 | >>> versiontuple(b'1.2.3.4.5.6') |
|
1195 | >>> versiontuple(b'1.2.3.4.5.6') | |
1195 | (1, 2, 3, '.4.5.6') |
|
1196 | (1, 2, 3, '.4.5.6') | |
1196 | """ |
|
1197 | """ | |
1197 | if not v: |
|
1198 | if not v: | |
1198 | v = version() |
|
1199 | v = version() | |
1199 | m = remod.match(br'(\d+(?:\.\d+){,2})[+-]?(.*)', v) |
|
1200 | m = remod.match(br'(\d+(?:\.\d+){,2})[+-]?(.*)', v) | |
1200 | if not m: |
|
1201 | if not m: | |
1201 | vparts, extra = b'', v |
|
1202 | vparts, extra = b'', v | |
1202 | elif m.group(2): |
|
1203 | elif m.group(2): | |
1203 | vparts, extra = m.groups() |
|
1204 | vparts, extra = m.groups() | |
1204 | else: |
|
1205 | else: | |
1205 | vparts, extra = m.group(1), None |
|
1206 | vparts, extra = m.group(1), None | |
1206 |
|
1207 | |||
1207 | assert vparts is not None # help pytype |
|
1208 | assert vparts is not None # help pytype | |
1208 |
|
1209 | |||
1209 | vints = [] |
|
1210 | vints = [] | |
1210 | for i in vparts.split(b'.'): |
|
1211 | for i in vparts.split(b'.'): | |
1211 | try: |
|
1212 | try: | |
1212 | vints.append(int(i)) |
|
1213 | vints.append(int(i)) | |
1213 | except ValueError: |
|
1214 | except ValueError: | |
1214 | break |
|
1215 | break | |
1215 | # (3, 6) -> (3, 6, None) |
|
1216 | # (3, 6) -> (3, 6, None) | |
1216 | while len(vints) < 3: |
|
1217 | while len(vints) < 3: | |
1217 | vints.append(None) |
|
1218 | vints.append(None) | |
1218 |
|
1219 | |||
1219 | if n == 2: |
|
1220 | if n == 2: | |
1220 | return (vints[0], vints[1]) |
|
1221 | return (vints[0], vints[1]) | |
1221 | if n == 3: |
|
1222 | if n == 3: | |
1222 | return (vints[0], vints[1], vints[2]) |
|
1223 | return (vints[0], vints[1], vints[2]) | |
1223 | if n == 4: |
|
1224 | if n == 4: | |
1224 | return (vints[0], vints[1], vints[2], extra) |
|
1225 | return (vints[0], vints[1], vints[2], extra) | |
1225 |
|
1226 | |||
1226 |
|
1227 | |||
1227 | def cachefunc(func): |
|
1228 | def cachefunc(func): | |
1228 | '''cache the result of function calls''' |
|
1229 | '''cache the result of function calls''' | |
1229 | # XXX doesn't handle keywords args |
|
1230 | # XXX doesn't handle keywords args | |
1230 | if func.__code__.co_argcount == 0: |
|
1231 | if func.__code__.co_argcount == 0: | |
1231 | listcache = [] |
|
1232 | listcache = [] | |
1232 |
|
1233 | |||
1233 | def f(): |
|
1234 | def f(): | |
1234 | if len(listcache) == 0: |
|
1235 | if len(listcache) == 0: | |
1235 | listcache.append(func()) |
|
1236 | listcache.append(func()) | |
1236 | return listcache[0] |
|
1237 | return listcache[0] | |
1237 |
|
1238 | |||
1238 | return f |
|
1239 | return f | |
1239 | cache = {} |
|
1240 | cache = {} | |
1240 | if func.__code__.co_argcount == 1: |
|
1241 | if func.__code__.co_argcount == 1: | |
1241 | # we gain a small amount of time because |
|
1242 | # we gain a small amount of time because | |
1242 | # we don't need to pack/unpack the list |
|
1243 | # we don't need to pack/unpack the list | |
1243 | def f(arg): |
|
1244 | def f(arg): | |
1244 | if arg not in cache: |
|
1245 | if arg not in cache: | |
1245 | cache[arg] = func(arg) |
|
1246 | cache[arg] = func(arg) | |
1246 | return cache[arg] |
|
1247 | return cache[arg] | |
1247 |
|
1248 | |||
1248 | else: |
|
1249 | else: | |
1249 |
|
1250 | |||
1250 | def f(*args): |
|
1251 | def f(*args): | |
1251 | if args not in cache: |
|
1252 | if args not in cache: | |
1252 | cache[args] = func(*args) |
|
1253 | cache[args] = func(*args) | |
1253 | return cache[args] |
|
1254 | return cache[args] | |
1254 |
|
1255 | |||
1255 | return f |
|
1256 | return f | |
1256 |
|
1257 | |||
1257 |
|
1258 | |||
1258 | class cow(object): |
|
1259 | class cow(object): | |
1259 | """helper class to make copy-on-write easier |
|
1260 | """helper class to make copy-on-write easier | |
1260 |
|
1261 | |||
1261 | Call preparewrite before doing any writes. |
|
1262 | Call preparewrite before doing any writes. | |
1262 | """ |
|
1263 | """ | |
1263 |
|
1264 | |||
1264 | def preparewrite(self): |
|
1265 | def preparewrite(self): | |
1265 | """call this before writes, return self or a copied new object""" |
|
1266 | """call this before writes, return self or a copied new object""" | |
1266 | if getattr(self, '_copied', 0): |
|
1267 | if getattr(self, '_copied', 0): | |
1267 | self._copied -= 1 |
|
1268 | self._copied -= 1 | |
1268 | # Function cow.__init__ expects 1 arg(s), got 2 [wrong-arg-count] |
|
1269 | # Function cow.__init__ expects 1 arg(s), got 2 [wrong-arg-count] | |
1269 | return self.__class__(self) # pytype: disable=wrong-arg-count |
|
1270 | return self.__class__(self) # pytype: disable=wrong-arg-count | |
1270 | return self |
|
1271 | return self | |
1271 |
|
1272 | |||
1272 | def copy(self): |
|
1273 | def copy(self): | |
1273 | """always do a cheap copy""" |
|
1274 | """always do a cheap copy""" | |
1274 | self._copied = getattr(self, '_copied', 0) + 1 |
|
1275 | self._copied = getattr(self, '_copied', 0) + 1 | |
1275 | return self |
|
1276 | return self | |
1276 |
|
1277 | |||
1277 |
|
1278 | |||
1278 | class sortdict(collections.OrderedDict): |
|
1279 | class sortdict(collections.OrderedDict): | |
1279 | """a simple sorted dictionary |
|
1280 | """a simple sorted dictionary | |
1280 |
|
1281 | |||
1281 | >>> d1 = sortdict([(b'a', 0), (b'b', 1)]) |
|
1282 | >>> d1 = sortdict([(b'a', 0), (b'b', 1)]) | |
1282 | >>> d2 = d1.copy() |
|
1283 | >>> d2 = d1.copy() | |
1283 | >>> d2 |
|
1284 | >>> d2 | |
1284 | sortdict([('a', 0), ('b', 1)]) |
|
1285 | sortdict([('a', 0), ('b', 1)]) | |
1285 | >>> d2.update([(b'a', 2)]) |
|
1286 | >>> d2.update([(b'a', 2)]) | |
1286 | >>> list(d2.keys()) # should still be in last-set order |
|
1287 | >>> list(d2.keys()) # should still be in last-set order | |
1287 | ['b', 'a'] |
|
1288 | ['b', 'a'] | |
1288 | >>> d1.insert(1, b'a.5', 0.5) |
|
1289 | >>> d1.insert(1, b'a.5', 0.5) | |
1289 | >>> d1 |
|
1290 | >>> d1 | |
1290 | sortdict([('a', 0), ('a.5', 0.5), ('b', 1)]) |
|
1291 | sortdict([('a', 0), ('a.5', 0.5), ('b', 1)]) | |
1291 | """ |
|
1292 | """ | |
1292 |
|
1293 | |||
1293 | def __setitem__(self, key, value): |
|
1294 | def __setitem__(self, key, value): | |
1294 | if key in self: |
|
1295 | if key in self: | |
1295 | del self[key] |
|
1296 | del self[key] | |
1296 | super(sortdict, self).__setitem__(key, value) |
|
1297 | super(sortdict, self).__setitem__(key, value) | |
1297 |
|
1298 | |||
1298 | if pycompat.ispypy: |
|
1299 | if pycompat.ispypy: | |
1299 | # __setitem__() isn't called as of PyPy 5.8.0 |
|
1300 | # __setitem__() isn't called as of PyPy 5.8.0 | |
1300 | def update(self, src, **f): |
|
1301 | def update(self, src, **f): | |
1301 | if isinstance(src, dict): |
|
1302 | if isinstance(src, dict): | |
1302 | src = pycompat.iteritems(src) |
|
1303 | src = pycompat.iteritems(src) | |
1303 | for k, v in src: |
|
1304 | for k, v in src: | |
1304 | self[k] = v |
|
1305 | self[k] = v | |
1305 | for k in f: |
|
1306 | for k in f: | |
1306 | self[k] = f[k] |
|
1307 | self[k] = f[k] | |
1307 |
|
1308 | |||
1308 | def insert(self, position, key, value): |
|
1309 | def insert(self, position, key, value): | |
1309 | for (i, (k, v)) in enumerate(list(self.items())): |
|
1310 | for (i, (k, v)) in enumerate(list(self.items())): | |
1310 | if i == position: |
|
1311 | if i == position: | |
1311 | self[key] = value |
|
1312 | self[key] = value | |
1312 | if i >= position: |
|
1313 | if i >= position: | |
1313 | del self[k] |
|
1314 | del self[k] | |
1314 | self[k] = v |
|
1315 | self[k] = v | |
1315 |
|
1316 | |||
1316 |
|
1317 | |||
1317 | class cowdict(cow, dict): |
|
1318 | class cowdict(cow, dict): | |
1318 | """copy-on-write dict |
|
1319 | """copy-on-write dict | |
1319 |
|
1320 | |||
1320 | Be sure to call d = d.preparewrite() before writing to d. |
|
1321 | Be sure to call d = d.preparewrite() before writing to d. | |
1321 |
|
1322 | |||
1322 | >>> a = cowdict() |
|
1323 | >>> a = cowdict() | |
1323 | >>> a is a.preparewrite() |
|
1324 | >>> a is a.preparewrite() | |
1324 | True |
|
1325 | True | |
1325 | >>> b = a.copy() |
|
1326 | >>> b = a.copy() | |
1326 | >>> b is a |
|
1327 | >>> b is a | |
1327 | True |
|
1328 | True | |
1328 | >>> c = b.copy() |
|
1329 | >>> c = b.copy() | |
1329 | >>> c is a |
|
1330 | >>> c is a | |
1330 | True |
|
1331 | True | |
1331 | >>> a = a.preparewrite() |
|
1332 | >>> a = a.preparewrite() | |
1332 | >>> b is a |
|
1333 | >>> b is a | |
1333 | False |
|
1334 | False | |
1334 | >>> a is a.preparewrite() |
|
1335 | >>> a is a.preparewrite() | |
1335 | True |
|
1336 | True | |
1336 | >>> c = c.preparewrite() |
|
1337 | >>> c = c.preparewrite() | |
1337 | >>> b is c |
|
1338 | >>> b is c | |
1338 | False |
|
1339 | False | |
1339 | >>> b is b.preparewrite() |
|
1340 | >>> b is b.preparewrite() | |
1340 | True |
|
1341 | True | |
1341 | """ |
|
1342 | """ | |
1342 |
|
1343 | |||
1343 |
|
1344 | |||
1344 | class cowsortdict(cow, sortdict): |
|
1345 | class cowsortdict(cow, sortdict): | |
1345 | """copy-on-write sortdict |
|
1346 | """copy-on-write sortdict | |
1346 |
|
1347 | |||
1347 | Be sure to call d = d.preparewrite() before writing to d. |
|
1348 | Be sure to call d = d.preparewrite() before writing to d. | |
1348 | """ |
|
1349 | """ | |
1349 |
|
1350 | |||
1350 |
|
1351 | |||
1351 | class transactional(object): # pytype: disable=ignored-metaclass |
|
1352 | class transactional(object): # pytype: disable=ignored-metaclass | |
1352 | """Base class for making a transactional type into a context manager.""" |
|
1353 | """Base class for making a transactional type into a context manager.""" | |
1353 |
|
1354 | |||
1354 | __metaclass__ = abc.ABCMeta |
|
1355 | __metaclass__ = abc.ABCMeta | |
1355 |
|
1356 | |||
1356 | @abc.abstractmethod |
|
1357 | @abc.abstractmethod | |
1357 | def close(self): |
|
1358 | def close(self): | |
1358 | """Successfully closes the transaction.""" |
|
1359 | """Successfully closes the transaction.""" | |
1359 |
|
1360 | |||
1360 | @abc.abstractmethod |
|
1361 | @abc.abstractmethod | |
1361 | def release(self): |
|
1362 | def release(self): | |
1362 | """Marks the end of the transaction. |
|
1363 | """Marks the end of the transaction. | |
1363 |
|
1364 | |||
1364 | If the transaction has not been closed, it will be aborted. |
|
1365 | If the transaction has not been closed, it will be aborted. | |
1365 | """ |
|
1366 | """ | |
1366 |
|
1367 | |||
1367 | def __enter__(self): |
|
1368 | def __enter__(self): | |
1368 | return self |
|
1369 | return self | |
1369 |
|
1370 | |||
1370 | def __exit__(self, exc_type, exc_val, exc_tb): |
|
1371 | def __exit__(self, exc_type, exc_val, exc_tb): | |
1371 | try: |
|
1372 | try: | |
1372 | if exc_type is None: |
|
1373 | if exc_type is None: | |
1373 | self.close() |
|
1374 | self.close() | |
1374 | finally: |
|
1375 | finally: | |
1375 | self.release() |
|
1376 | self.release() | |
1376 |
|
1377 | |||
1377 |
|
1378 | |||
1378 | @contextlib.contextmanager |
|
1379 | @contextlib.contextmanager | |
1379 | def acceptintervention(tr=None): |
|
1380 | def acceptintervention(tr=None): | |
1380 | """A context manager that closes the transaction on InterventionRequired |
|
1381 | """A context manager that closes the transaction on InterventionRequired | |
1381 |
|
1382 | |||
1382 | If no transaction was provided, this simply runs the body and returns |
|
1383 | If no transaction was provided, this simply runs the body and returns | |
1383 | """ |
|
1384 | """ | |
1384 | if not tr: |
|
1385 | if not tr: | |
1385 | yield |
|
1386 | yield | |
1386 | return |
|
1387 | return | |
1387 | try: |
|
1388 | try: | |
1388 | yield |
|
1389 | yield | |
1389 | tr.close() |
|
1390 | tr.close() | |
1390 | except error.InterventionRequired: |
|
1391 | except error.InterventionRequired: | |
1391 | tr.close() |
|
1392 | tr.close() | |
1392 | raise |
|
1393 | raise | |
1393 | finally: |
|
1394 | finally: | |
1394 | tr.release() |
|
1395 | tr.release() | |
1395 |
|
1396 | |||
1396 |
|
1397 | |||
1397 | @contextlib.contextmanager |
|
1398 | @contextlib.contextmanager | |
1398 | def nullcontextmanager(enter_result=None): |
|
1399 | def nullcontextmanager(enter_result=None): | |
1399 | yield enter_result |
|
1400 | yield enter_result | |
1400 |
|
1401 | |||
1401 |
|
1402 | |||
1402 | class _lrucachenode(object): |
|
1403 | class _lrucachenode(object): | |
1403 | """A node in a doubly linked list. |
|
1404 | """A node in a doubly linked list. | |
1404 |
|
1405 | |||
1405 | Holds a reference to nodes on either side as well as a key-value |
|
1406 | Holds a reference to nodes on either side as well as a key-value | |
1406 | pair for the dictionary entry. |
|
1407 | pair for the dictionary entry. | |
1407 | """ |
|
1408 | """ | |
1408 |
|
1409 | |||
1409 | __slots__ = ('next', 'prev', 'key', 'value', 'cost') |
|
1410 | __slots__ = ('next', 'prev', 'key', 'value', 'cost') | |
1410 |
|
1411 | |||
1411 | def __init__(self): |
|
1412 | def __init__(self): | |
1412 | self.next = self |
|
1413 | self.next = self | |
1413 | self.prev = self |
|
1414 | self.prev = self | |
1414 |
|
1415 | |||
1415 | self.key = _notset |
|
1416 | self.key = _notset | |
1416 | self.value = None |
|
1417 | self.value = None | |
1417 | self.cost = 0 |
|
1418 | self.cost = 0 | |
1418 |
|
1419 | |||
1419 | def markempty(self): |
|
1420 | def markempty(self): | |
1420 | """Mark the node as emptied.""" |
|
1421 | """Mark the node as emptied.""" | |
1421 | self.key = _notset |
|
1422 | self.key = _notset | |
1422 | self.value = None |
|
1423 | self.value = None | |
1423 | self.cost = 0 |
|
1424 | self.cost = 0 | |
1424 |
|
1425 | |||
1425 |
|
1426 | |||
1426 | class lrucachedict(object): |
|
1427 | class lrucachedict(object): | |
1427 | """Dict that caches most recent accesses and sets. |
|
1428 | """Dict that caches most recent accesses and sets. | |
1428 |
|
1429 | |||
1429 | The dict consists of an actual backing dict - indexed by original |
|
1430 | The dict consists of an actual backing dict - indexed by original | |
1430 | key - and a doubly linked circular list defining the order of entries in |
|
1431 | key - and a doubly linked circular list defining the order of entries in | |
1431 | the cache. |
|
1432 | the cache. | |
1432 |
|
1433 | |||
1433 | The head node is the newest entry in the cache. If the cache is full, |
|
1434 | The head node is the newest entry in the cache. If the cache is full, | |
1434 | we recycle head.prev and make it the new head. Cache accesses result in |
|
1435 | we recycle head.prev and make it the new head. Cache accesses result in | |
1435 | the node being moved to before the existing head and being marked as the |
|
1436 | the node being moved to before the existing head and being marked as the | |
1436 | new head node. |
|
1437 | new head node. | |
1437 |
|
1438 | |||
1438 | Items in the cache can be inserted with an optional "cost" value. This is |
|
1439 | Items in the cache can be inserted with an optional "cost" value. This is | |
1439 | simply an integer that is specified by the caller. The cache can be queried |
|
1440 | simply an integer that is specified by the caller. The cache can be queried | |
1440 | for the total cost of all items presently in the cache. |
|
1441 | for the total cost of all items presently in the cache. | |
1441 |
|
1442 | |||
1442 | The cache can also define a maximum cost. If a cache insertion would |
|
1443 | The cache can also define a maximum cost. If a cache insertion would | |
1443 | cause the total cost of the cache to go beyond the maximum cost limit, |
|
1444 | cause the total cost of the cache to go beyond the maximum cost limit, | |
1444 | nodes will be evicted to make room for the new code. This can be used |
|
1445 | nodes will be evicted to make room for the new code. This can be used | |
1445 | to e.g. set a max memory limit and associate an estimated bytes size |
|
1446 | to e.g. set a max memory limit and associate an estimated bytes size | |
1446 | cost to each item in the cache. By default, no maximum cost is enforced. |
|
1447 | cost to each item in the cache. By default, no maximum cost is enforced. | |
1447 | """ |
|
1448 | """ | |
1448 |
|
1449 | |||
1449 | def __init__(self, max, maxcost=0): |
|
1450 | def __init__(self, max, maxcost=0): | |
1450 | self._cache = {} |
|
1451 | self._cache = {} | |
1451 |
|
1452 | |||
1452 | self._head = _lrucachenode() |
|
1453 | self._head = _lrucachenode() | |
1453 | self._size = 1 |
|
1454 | self._size = 1 | |
1454 | self.capacity = max |
|
1455 | self.capacity = max | |
1455 | self.totalcost = 0 |
|
1456 | self.totalcost = 0 | |
1456 | self.maxcost = maxcost |
|
1457 | self.maxcost = maxcost | |
1457 |
|
1458 | |||
1458 | def __len__(self): |
|
1459 | def __len__(self): | |
1459 | return len(self._cache) |
|
1460 | return len(self._cache) | |
1460 |
|
1461 | |||
1461 | def __contains__(self, k): |
|
1462 | def __contains__(self, k): | |
1462 | return k in self._cache |
|
1463 | return k in self._cache | |
1463 |
|
1464 | |||
1464 | def __iter__(self): |
|
1465 | def __iter__(self): | |
1465 | # We don't have to iterate in cache order, but why not. |
|
1466 | # We don't have to iterate in cache order, but why not. | |
1466 | n = self._head |
|
1467 | n = self._head | |
1467 | for i in range(len(self._cache)): |
|
1468 | for i in range(len(self._cache)): | |
1468 | yield n.key |
|
1469 | yield n.key | |
1469 | n = n.next |
|
1470 | n = n.next | |
1470 |
|
1471 | |||
1471 | def __getitem__(self, k): |
|
1472 | def __getitem__(self, k): | |
1472 | node = self._cache[k] |
|
1473 | node = self._cache[k] | |
1473 | self._movetohead(node) |
|
1474 | self._movetohead(node) | |
1474 | return node.value |
|
1475 | return node.value | |
1475 |
|
1476 | |||
1476 | def insert(self, k, v, cost=0): |
|
1477 | def insert(self, k, v, cost=0): | |
1477 | """Insert a new item in the cache with optional cost value.""" |
|
1478 | """Insert a new item in the cache with optional cost value.""" | |
1478 | node = self._cache.get(k) |
|
1479 | node = self._cache.get(k) | |
1479 | # Replace existing value and mark as newest. |
|
1480 | # Replace existing value and mark as newest. | |
1480 | if node is not None: |
|
1481 | if node is not None: | |
1481 | self.totalcost -= node.cost |
|
1482 | self.totalcost -= node.cost | |
1482 | node.value = v |
|
1483 | node.value = v | |
1483 | node.cost = cost |
|
1484 | node.cost = cost | |
1484 | self.totalcost += cost |
|
1485 | self.totalcost += cost | |
1485 | self._movetohead(node) |
|
1486 | self._movetohead(node) | |
1486 |
|
1487 | |||
1487 | if self.maxcost: |
|
1488 | if self.maxcost: | |
1488 | self._enforcecostlimit() |
|
1489 | self._enforcecostlimit() | |
1489 |
|
1490 | |||
1490 | return |
|
1491 | return | |
1491 |
|
1492 | |||
1492 | if self._size < self.capacity: |
|
1493 | if self._size < self.capacity: | |
1493 | node = self._addcapacity() |
|
1494 | node = self._addcapacity() | |
1494 | else: |
|
1495 | else: | |
1495 | # Grab the last/oldest item. |
|
1496 | # Grab the last/oldest item. | |
1496 | node = self._head.prev |
|
1497 | node = self._head.prev | |
1497 |
|
1498 | |||
1498 | # At capacity. Kill the old entry. |
|
1499 | # At capacity. Kill the old entry. | |
1499 | if node.key is not _notset: |
|
1500 | if node.key is not _notset: | |
1500 | self.totalcost -= node.cost |
|
1501 | self.totalcost -= node.cost | |
1501 | del self._cache[node.key] |
|
1502 | del self._cache[node.key] | |
1502 |
|
1503 | |||
1503 | node.key = k |
|
1504 | node.key = k | |
1504 | node.value = v |
|
1505 | node.value = v | |
1505 | node.cost = cost |
|
1506 | node.cost = cost | |
1506 | self.totalcost += cost |
|
1507 | self.totalcost += cost | |
1507 | self._cache[k] = node |
|
1508 | self._cache[k] = node | |
1508 | # And mark it as newest entry. No need to adjust order since it |
|
1509 | # And mark it as newest entry. No need to adjust order since it | |
1509 | # is already self._head.prev. |
|
1510 | # is already self._head.prev. | |
1510 | self._head = node |
|
1511 | self._head = node | |
1511 |
|
1512 | |||
1512 | if self.maxcost: |
|
1513 | if self.maxcost: | |
1513 | self._enforcecostlimit() |
|
1514 | self._enforcecostlimit() | |
1514 |
|
1515 | |||
1515 | def __setitem__(self, k, v): |
|
1516 | def __setitem__(self, k, v): | |
1516 | self.insert(k, v) |
|
1517 | self.insert(k, v) | |
1517 |
|
1518 | |||
1518 | def __delitem__(self, k): |
|
1519 | def __delitem__(self, k): | |
1519 | self.pop(k) |
|
1520 | self.pop(k) | |
1520 |
|
1521 | |||
1521 | def pop(self, k, default=_notset): |
|
1522 | def pop(self, k, default=_notset): | |
1522 | try: |
|
1523 | try: | |
1523 | node = self._cache.pop(k) |
|
1524 | node = self._cache.pop(k) | |
1524 | except KeyError: |
|
1525 | except KeyError: | |
1525 | if default is _notset: |
|
1526 | if default is _notset: | |
1526 | raise |
|
1527 | raise | |
1527 | return default |
|
1528 | return default | |
1528 |
|
1529 | |||
1529 | assert node is not None # help pytype |
|
1530 | assert node is not None # help pytype | |
1530 | value = node.value |
|
1531 | value = node.value | |
1531 | self.totalcost -= node.cost |
|
1532 | self.totalcost -= node.cost | |
1532 | node.markempty() |
|
1533 | node.markempty() | |
1533 |
|
1534 | |||
1534 | # Temporarily mark as newest item before re-adjusting head to make |
|
1535 | # Temporarily mark as newest item before re-adjusting head to make | |
1535 | # this node the oldest item. |
|
1536 | # this node the oldest item. | |
1536 | self._movetohead(node) |
|
1537 | self._movetohead(node) | |
1537 | self._head = node.next |
|
1538 | self._head = node.next | |
1538 |
|
1539 | |||
1539 | return value |
|
1540 | return value | |
1540 |
|
1541 | |||
1541 | # Additional dict methods. |
|
1542 | # Additional dict methods. | |
1542 |
|
1543 | |||
1543 | def get(self, k, default=None): |
|
1544 | def get(self, k, default=None): | |
1544 | try: |
|
1545 | try: | |
1545 | return self.__getitem__(k) |
|
1546 | return self.__getitem__(k) | |
1546 | except KeyError: |
|
1547 | except KeyError: | |
1547 | return default |
|
1548 | return default | |
1548 |
|
1549 | |||
1549 | def peek(self, k, default=_notset): |
|
1550 | def peek(self, k, default=_notset): | |
1550 | """Get the specified item without moving it to the head |
|
1551 | """Get the specified item without moving it to the head | |
1551 |
|
1552 | |||
1552 | Unlike get(), this doesn't mutate the internal state. But be aware |
|
1553 | Unlike get(), this doesn't mutate the internal state. But be aware | |
1553 | that it doesn't mean peek() is thread safe. |
|
1554 | that it doesn't mean peek() is thread safe. | |
1554 | """ |
|
1555 | """ | |
1555 | try: |
|
1556 | try: | |
1556 | node = self._cache[k] |
|
1557 | node = self._cache[k] | |
1557 | assert node is not None # help pytype |
|
1558 | assert node is not None # help pytype | |
1558 | return node.value |
|
1559 | return node.value | |
1559 | except KeyError: |
|
1560 | except KeyError: | |
1560 | if default is _notset: |
|
1561 | if default is _notset: | |
1561 | raise |
|
1562 | raise | |
1562 | return default |
|
1563 | return default | |
1563 |
|
1564 | |||
1564 | def clear(self): |
|
1565 | def clear(self): | |
1565 | n = self._head |
|
1566 | n = self._head | |
1566 | while n.key is not _notset: |
|
1567 | while n.key is not _notset: | |
1567 | self.totalcost -= n.cost |
|
1568 | self.totalcost -= n.cost | |
1568 | n.markempty() |
|
1569 | n.markempty() | |
1569 | n = n.next |
|
1570 | n = n.next | |
1570 |
|
1571 | |||
1571 | self._cache.clear() |
|
1572 | self._cache.clear() | |
1572 |
|
1573 | |||
1573 | def copy(self, capacity=None, maxcost=0): |
|
1574 | def copy(self, capacity=None, maxcost=0): | |
1574 | """Create a new cache as a copy of the current one. |
|
1575 | """Create a new cache as a copy of the current one. | |
1575 |
|
1576 | |||
1576 | By default, the new cache has the same capacity as the existing one. |
|
1577 | By default, the new cache has the same capacity as the existing one. | |
1577 | But, the cache capacity can be changed as part of performing the |
|
1578 | But, the cache capacity can be changed as part of performing the | |
1578 | copy. |
|
1579 | copy. | |
1579 |
|
1580 | |||
1580 | Items in the copy have an insertion/access order matching this |
|
1581 | Items in the copy have an insertion/access order matching this | |
1581 | instance. |
|
1582 | instance. | |
1582 | """ |
|
1583 | """ | |
1583 |
|
1584 | |||
1584 | capacity = capacity or self.capacity |
|
1585 | capacity = capacity or self.capacity | |
1585 | maxcost = maxcost or self.maxcost |
|
1586 | maxcost = maxcost or self.maxcost | |
1586 | result = lrucachedict(capacity, maxcost=maxcost) |
|
1587 | result = lrucachedict(capacity, maxcost=maxcost) | |
1587 |
|
1588 | |||
1588 | # We copy entries by iterating in oldest-to-newest order so the copy |
|
1589 | # We copy entries by iterating in oldest-to-newest order so the copy | |
1589 | # has the correct ordering. |
|
1590 | # has the correct ordering. | |
1590 |
|
1591 | |||
1591 | # Find the first non-empty entry. |
|
1592 | # Find the first non-empty entry. | |
1592 | n = self._head.prev |
|
1593 | n = self._head.prev | |
1593 | while n.key is _notset and n is not self._head: |
|
1594 | while n.key is _notset and n is not self._head: | |
1594 | n = n.prev |
|
1595 | n = n.prev | |
1595 |
|
1596 | |||
1596 | # We could potentially skip the first N items when decreasing capacity. |
|
1597 | # We could potentially skip the first N items when decreasing capacity. | |
1597 | # But let's keep it simple unless it is a performance problem. |
|
1598 | # But let's keep it simple unless it is a performance problem. | |
1598 | for i in range(len(self._cache)): |
|
1599 | for i in range(len(self._cache)): | |
1599 | result.insert(n.key, n.value, cost=n.cost) |
|
1600 | result.insert(n.key, n.value, cost=n.cost) | |
1600 | n = n.prev |
|
1601 | n = n.prev | |
1601 |
|
1602 | |||
1602 | return result |
|
1603 | return result | |
1603 |
|
1604 | |||
1604 | def popoldest(self): |
|
1605 | def popoldest(self): | |
1605 | """Remove the oldest item from the cache. |
|
1606 | """Remove the oldest item from the cache. | |
1606 |
|
1607 | |||
1607 | Returns the (key, value) describing the removed cache entry. |
|
1608 | Returns the (key, value) describing the removed cache entry. | |
1608 | """ |
|
1609 | """ | |
1609 | if not self._cache: |
|
1610 | if not self._cache: | |
1610 | return |
|
1611 | return | |
1611 |
|
1612 | |||
1612 | # Walk the linked list backwards starting at tail node until we hit |
|
1613 | # Walk the linked list backwards starting at tail node until we hit | |
1613 | # a non-empty node. |
|
1614 | # a non-empty node. | |
1614 | n = self._head.prev |
|
1615 | n = self._head.prev | |
1615 |
|
1616 | |||
1616 | assert n is not None # help pytype |
|
1617 | assert n is not None # help pytype | |
1617 |
|
1618 | |||
1618 | while n.key is _notset: |
|
1619 | while n.key is _notset: | |
1619 | n = n.prev |
|
1620 | n = n.prev | |
1620 |
|
1621 | |||
1621 | assert n is not None # help pytype |
|
1622 | assert n is not None # help pytype | |
1622 |
|
1623 | |||
1623 | key, value = n.key, n.value |
|
1624 | key, value = n.key, n.value | |
1624 |
|
1625 | |||
1625 | # And remove it from the cache and mark it as empty. |
|
1626 | # And remove it from the cache and mark it as empty. | |
1626 | del self._cache[n.key] |
|
1627 | del self._cache[n.key] | |
1627 | self.totalcost -= n.cost |
|
1628 | self.totalcost -= n.cost | |
1628 | n.markempty() |
|
1629 | n.markempty() | |
1629 |
|
1630 | |||
1630 | return key, value |
|
1631 | return key, value | |
1631 |
|
1632 | |||
1632 | def _movetohead(self, node): |
|
1633 | def _movetohead(self, node): | |
1633 | """Mark a node as the newest, making it the new head. |
|
1634 | """Mark a node as the newest, making it the new head. | |
1634 |
|
1635 | |||
1635 | When a node is accessed, it becomes the freshest entry in the LRU |
|
1636 | When a node is accessed, it becomes the freshest entry in the LRU | |
1636 | list, which is denoted by self._head. |
|
1637 | list, which is denoted by self._head. | |
1637 |
|
1638 | |||
1638 | Visually, let's make ``N`` the new head node (* denotes head): |
|
1639 | Visually, let's make ``N`` the new head node (* denotes head): | |
1639 |
|
1640 | |||
1640 | previous/oldest <-> head <-> next/next newest |
|
1641 | previous/oldest <-> head <-> next/next newest | |
1641 |
|
1642 | |||
1642 | ----<->--- A* ---<->----- |
|
1643 | ----<->--- A* ---<->----- | |
1643 | | | |
|
1644 | | | | |
1644 | E <-> D <-> N <-> C <-> B |
|
1645 | E <-> D <-> N <-> C <-> B | |
1645 |
|
1646 | |||
1646 | To: |
|
1647 | To: | |
1647 |
|
1648 | |||
1648 | ----<->--- N* ---<->----- |
|
1649 | ----<->--- N* ---<->----- | |
1649 | | | |
|
1650 | | | | |
1650 | E <-> D <-> C <-> B <-> A |
|
1651 | E <-> D <-> C <-> B <-> A | |
1651 |
|
1652 | |||
1652 | This requires the following moves: |
|
1653 | This requires the following moves: | |
1653 |
|
1654 | |||
1654 | C.next = D (node.prev.next = node.next) |
|
1655 | C.next = D (node.prev.next = node.next) | |
1655 | D.prev = C (node.next.prev = node.prev) |
|
1656 | D.prev = C (node.next.prev = node.prev) | |
1656 | E.next = N (head.prev.next = node) |
|
1657 | E.next = N (head.prev.next = node) | |
1657 | N.prev = E (node.prev = head.prev) |
|
1658 | N.prev = E (node.prev = head.prev) | |
1658 | N.next = A (node.next = head) |
|
1659 | N.next = A (node.next = head) | |
1659 | A.prev = N (head.prev = node) |
|
1660 | A.prev = N (head.prev = node) | |
1660 | """ |
|
1661 | """ | |
1661 | head = self._head |
|
1662 | head = self._head | |
1662 | # C.next = D |
|
1663 | # C.next = D | |
1663 | node.prev.next = node.next |
|
1664 | node.prev.next = node.next | |
1664 | # D.prev = C |
|
1665 | # D.prev = C | |
1665 | node.next.prev = node.prev |
|
1666 | node.next.prev = node.prev | |
1666 | # N.prev = E |
|
1667 | # N.prev = E | |
1667 | node.prev = head.prev |
|
1668 | node.prev = head.prev | |
1668 | # N.next = A |
|
1669 | # N.next = A | |
1669 | # It is tempting to do just "head" here, however if node is |
|
1670 | # It is tempting to do just "head" here, however if node is | |
1670 | # adjacent to head, this will do bad things. |
|
1671 | # adjacent to head, this will do bad things. | |
1671 | node.next = head.prev.next |
|
1672 | node.next = head.prev.next | |
1672 | # E.next = N |
|
1673 | # E.next = N | |
1673 | node.next.prev = node |
|
1674 | node.next.prev = node | |
1674 | # A.prev = N |
|
1675 | # A.prev = N | |
1675 | node.prev.next = node |
|
1676 | node.prev.next = node | |
1676 |
|
1677 | |||
1677 | self._head = node |
|
1678 | self._head = node | |
1678 |
|
1679 | |||
1679 | def _addcapacity(self): |
|
1680 | def _addcapacity(self): | |
1680 | """Add a node to the circular linked list. |
|
1681 | """Add a node to the circular linked list. | |
1681 |
|
1682 | |||
1682 | The new node is inserted before the head node. |
|
1683 | The new node is inserted before the head node. | |
1683 | """ |
|
1684 | """ | |
1684 | head = self._head |
|
1685 | head = self._head | |
1685 | node = _lrucachenode() |
|
1686 | node = _lrucachenode() | |
1686 | head.prev.next = node |
|
1687 | head.prev.next = node | |
1687 | node.prev = head.prev |
|
1688 | node.prev = head.prev | |
1688 | node.next = head |
|
1689 | node.next = head | |
1689 | head.prev = node |
|
1690 | head.prev = node | |
1690 | self._size += 1 |
|
1691 | self._size += 1 | |
1691 | return node |
|
1692 | return node | |
1692 |
|
1693 | |||
1693 | def _enforcecostlimit(self): |
|
1694 | def _enforcecostlimit(self): | |
1694 | # This should run after an insertion. It should only be called if total |
|
1695 | # This should run after an insertion. It should only be called if total | |
1695 | # cost limits are being enforced. |
|
1696 | # cost limits are being enforced. | |
1696 | # The most recently inserted node is never evicted. |
|
1697 | # The most recently inserted node is never evicted. | |
1697 | if len(self) <= 1 or self.totalcost <= self.maxcost: |
|
1698 | if len(self) <= 1 or self.totalcost <= self.maxcost: | |
1698 | return |
|
1699 | return | |
1699 |
|
1700 | |||
1700 | # This is logically equivalent to calling popoldest() until we |
|
1701 | # This is logically equivalent to calling popoldest() until we | |
1701 | # free up enough cost. We don't do that since popoldest() needs |
|
1702 | # free up enough cost. We don't do that since popoldest() needs | |
1702 | # to walk the linked list and doing this in a loop would be |
|
1703 | # to walk the linked list and doing this in a loop would be | |
1703 | # quadratic. So we find the first non-empty node and then |
|
1704 | # quadratic. So we find the first non-empty node and then | |
1704 | # walk nodes until we free up enough capacity. |
|
1705 | # walk nodes until we free up enough capacity. | |
1705 | # |
|
1706 | # | |
1706 | # If we only removed the minimum number of nodes to free enough |
|
1707 | # If we only removed the minimum number of nodes to free enough | |
1707 | # cost at insert time, chances are high that the next insert would |
|
1708 | # cost at insert time, chances are high that the next insert would | |
1708 | # also require pruning. This would effectively constitute quadratic |
|
1709 | # also require pruning. This would effectively constitute quadratic | |
1709 | # behavior for insert-heavy workloads. To mitigate this, we set a |
|
1710 | # behavior for insert-heavy workloads. To mitigate this, we set a | |
1710 | # target cost that is a percentage of the max cost. This will tend |
|
1711 | # target cost that is a percentage of the max cost. This will tend | |
1711 | # to free more nodes when the high water mark is reached, which |
|
1712 | # to free more nodes when the high water mark is reached, which | |
1712 | # lowers the chances of needing to prune on the subsequent insert. |
|
1713 | # lowers the chances of needing to prune on the subsequent insert. | |
1713 | targetcost = int(self.maxcost * 0.75) |
|
1714 | targetcost = int(self.maxcost * 0.75) | |
1714 |
|
1715 | |||
1715 | n = self._head.prev |
|
1716 | n = self._head.prev | |
1716 | while n.key is _notset: |
|
1717 | while n.key is _notset: | |
1717 | n = n.prev |
|
1718 | n = n.prev | |
1718 |
|
1719 | |||
1719 | while len(self) > 1 and self.totalcost > targetcost: |
|
1720 | while len(self) > 1 and self.totalcost > targetcost: | |
1720 | del self._cache[n.key] |
|
1721 | del self._cache[n.key] | |
1721 | self.totalcost -= n.cost |
|
1722 | self.totalcost -= n.cost | |
1722 | n.markempty() |
|
1723 | n.markempty() | |
1723 | n = n.prev |
|
1724 | n = n.prev | |
1724 |
|
1725 | |||
1725 |
|
1726 | |||
1726 | def lrucachefunc(func): |
|
1727 | def lrucachefunc(func): | |
1727 | '''cache most recent results of function calls''' |
|
1728 | '''cache most recent results of function calls''' | |
1728 | cache = {} |
|
1729 | cache = {} | |
1729 | order = collections.deque() |
|
1730 | order = collections.deque() | |
1730 | if func.__code__.co_argcount == 1: |
|
1731 | if func.__code__.co_argcount == 1: | |
1731 |
|
1732 | |||
1732 | def f(arg): |
|
1733 | def f(arg): | |
1733 | if arg not in cache: |
|
1734 | if arg not in cache: | |
1734 | if len(cache) > 20: |
|
1735 | if len(cache) > 20: | |
1735 | del cache[order.popleft()] |
|
1736 | del cache[order.popleft()] | |
1736 | cache[arg] = func(arg) |
|
1737 | cache[arg] = func(arg) | |
1737 | else: |
|
1738 | else: | |
1738 | order.remove(arg) |
|
1739 | order.remove(arg) | |
1739 | order.append(arg) |
|
1740 | order.append(arg) | |
1740 | return cache[arg] |
|
1741 | return cache[arg] | |
1741 |
|
1742 | |||
1742 | else: |
|
1743 | else: | |
1743 |
|
1744 | |||
1744 | def f(*args): |
|
1745 | def f(*args): | |
1745 | if args not in cache: |
|
1746 | if args not in cache: | |
1746 | if len(cache) > 20: |
|
1747 | if len(cache) > 20: | |
1747 | del cache[order.popleft()] |
|
1748 | del cache[order.popleft()] | |
1748 | cache[args] = func(*args) |
|
1749 | cache[args] = func(*args) | |
1749 | else: |
|
1750 | else: | |
1750 | order.remove(args) |
|
1751 | order.remove(args) | |
1751 | order.append(args) |
|
1752 | order.append(args) | |
1752 | return cache[args] |
|
1753 | return cache[args] | |
1753 |
|
1754 | |||
1754 | return f |
|
1755 | return f | |
1755 |
|
1756 | |||
1756 |
|
1757 | |||
1757 | class propertycache(object): |
|
1758 | class propertycache(object): | |
1758 | def __init__(self, func): |
|
1759 | def __init__(self, func): | |
1759 | self.func = func |
|
1760 | self.func = func | |
1760 | self.name = func.__name__ |
|
1761 | self.name = func.__name__ | |
1761 |
|
1762 | |||
1762 | def __get__(self, obj, type=None): |
|
1763 | def __get__(self, obj, type=None): | |
1763 | result = self.func(obj) |
|
1764 | result = self.func(obj) | |
1764 | self.cachevalue(obj, result) |
|
1765 | self.cachevalue(obj, result) | |
1765 | return result |
|
1766 | return result | |
1766 |
|
1767 | |||
1767 | def cachevalue(self, obj, value): |
|
1768 | def cachevalue(self, obj, value): | |
1768 | # __dict__ assignment required to bypass __setattr__ (eg: repoview) |
|
1769 | # __dict__ assignment required to bypass __setattr__ (eg: repoview) | |
1769 | obj.__dict__[self.name] = value |
|
1770 | obj.__dict__[self.name] = value | |
1770 |
|
1771 | |||
1771 |
|
1772 | |||
1772 | def clearcachedproperty(obj, prop): |
|
1773 | def clearcachedproperty(obj, prop): | |
1773 | '''clear a cached property value, if one has been set''' |
|
1774 | '''clear a cached property value, if one has been set''' | |
1774 | prop = pycompat.sysstr(prop) |
|
1775 | prop = pycompat.sysstr(prop) | |
1775 | if prop in obj.__dict__: |
|
1776 | if prop in obj.__dict__: | |
1776 | del obj.__dict__[prop] |
|
1777 | del obj.__dict__[prop] | |
1777 |
|
1778 | |||
1778 |
|
1779 | |||
1779 | def increasingchunks(source, min=1024, max=65536): |
|
1780 | def increasingchunks(source, min=1024, max=65536): | |
1780 | """return no less than min bytes per chunk while data remains, |
|
1781 | """return no less than min bytes per chunk while data remains, | |
1781 | doubling min after each chunk until it reaches max""" |
|
1782 | doubling min after each chunk until it reaches max""" | |
1782 |
|
1783 | |||
1783 | def log2(x): |
|
1784 | def log2(x): | |
1784 | if not x: |
|
1785 | if not x: | |
1785 | return 0 |
|
1786 | return 0 | |
1786 | i = 0 |
|
1787 | i = 0 | |
1787 | while x: |
|
1788 | while x: | |
1788 | x >>= 1 |
|
1789 | x >>= 1 | |
1789 | i += 1 |
|
1790 | i += 1 | |
1790 | return i - 1 |
|
1791 | return i - 1 | |
1791 |
|
1792 | |||
1792 | buf = [] |
|
1793 | buf = [] | |
1793 | blen = 0 |
|
1794 | blen = 0 | |
1794 | for chunk in source: |
|
1795 | for chunk in source: | |
1795 | buf.append(chunk) |
|
1796 | buf.append(chunk) | |
1796 | blen += len(chunk) |
|
1797 | blen += len(chunk) | |
1797 | if blen >= min: |
|
1798 | if blen >= min: | |
1798 | if min < max: |
|
1799 | if min < max: | |
1799 | min = min << 1 |
|
1800 | min = min << 1 | |
1800 | nmin = 1 << log2(blen) |
|
1801 | nmin = 1 << log2(blen) | |
1801 | if nmin > min: |
|
1802 | if nmin > min: | |
1802 | min = nmin |
|
1803 | min = nmin | |
1803 | if min > max: |
|
1804 | if min > max: | |
1804 | min = max |
|
1805 | min = max | |
1805 | yield b''.join(buf) |
|
1806 | yield b''.join(buf) | |
1806 | blen = 0 |
|
1807 | blen = 0 | |
1807 | buf = [] |
|
1808 | buf = [] | |
1808 | if buf: |
|
1809 | if buf: | |
1809 | yield b''.join(buf) |
|
1810 | yield b''.join(buf) | |
1810 |
|
1811 | |||
1811 |
|
1812 | |||
1812 | def always(fn): |
|
1813 | def always(fn): | |
1813 | return True |
|
1814 | return True | |
1814 |
|
1815 | |||
1815 |
|
1816 | |||
1816 | def never(fn): |
|
1817 | def never(fn): | |
1817 | return False |
|
1818 | return False | |
1818 |
|
1819 | |||
1819 |
|
1820 | |||
1820 | def nogc(func): |
|
1821 | def nogc(func): | |
1821 | """disable garbage collector |
|
1822 | """disable garbage collector | |
1822 |
|
1823 | |||
1823 | Python's garbage collector triggers a GC each time a certain number of |
|
1824 | Python's garbage collector triggers a GC each time a certain number of | |
1824 | container objects (the number being defined by gc.get_threshold()) are |
|
1825 | container objects (the number being defined by gc.get_threshold()) are | |
1825 | allocated even when marked not to be tracked by the collector. Tracking has |
|
1826 | allocated even when marked not to be tracked by the collector. Tracking has | |
1826 | no effect on when GCs are triggered, only on what objects the GC looks |
|
1827 | no effect on when GCs are triggered, only on what objects the GC looks | |
1827 | into. As a workaround, disable GC while building complex (huge) |
|
1828 | into. As a workaround, disable GC while building complex (huge) | |
1828 | containers. |
|
1829 | containers. | |
1829 |
|
1830 | |||
1830 | This garbage collector issue have been fixed in 2.7. But it still affect |
|
1831 | This garbage collector issue have been fixed in 2.7. But it still affect | |
1831 | CPython's performance. |
|
1832 | CPython's performance. | |
1832 | """ |
|
1833 | """ | |
1833 |
|
1834 | |||
1834 | def wrapper(*args, **kwargs): |
|
1835 | def wrapper(*args, **kwargs): | |
1835 | gcenabled = gc.isenabled() |
|
1836 | gcenabled = gc.isenabled() | |
1836 | gc.disable() |
|
1837 | gc.disable() | |
1837 | try: |
|
1838 | try: | |
1838 | return func(*args, **kwargs) |
|
1839 | return func(*args, **kwargs) | |
1839 | finally: |
|
1840 | finally: | |
1840 | if gcenabled: |
|
1841 | if gcenabled: | |
1841 | gc.enable() |
|
1842 | gc.enable() | |
1842 |
|
1843 | |||
1843 | return wrapper |
|
1844 | return wrapper | |
1844 |
|
1845 | |||
1845 |
|
1846 | |||
1846 | if pycompat.ispypy: |
|
1847 | if pycompat.ispypy: | |
1847 | # PyPy runs slower with gc disabled |
|
1848 | # PyPy runs slower with gc disabled | |
1848 | nogc = lambda x: x |
|
1849 | nogc = lambda x: x | |
1849 |
|
1850 | |||
1850 |
|
1851 | |||
1851 | def pathto(root, n1, n2): |
|
1852 | def pathto(root, n1, n2): | |
1852 | # type: (bytes, bytes, bytes) -> bytes |
|
1853 | # type: (bytes, bytes, bytes) -> bytes | |
1853 | """return the relative path from one place to another. |
|
1854 | """return the relative path from one place to another. | |
1854 | root should use os.sep to separate directories |
|
1855 | root should use os.sep to separate directories | |
1855 | n1 should use os.sep to separate directories |
|
1856 | n1 should use os.sep to separate directories | |
1856 | n2 should use "/" to separate directories |
|
1857 | n2 should use "/" to separate directories | |
1857 | returns an os.sep-separated path. |
|
1858 | returns an os.sep-separated path. | |
1858 |
|
1859 | |||
1859 | If n1 is a relative path, it's assumed it's |
|
1860 | If n1 is a relative path, it's assumed it's | |
1860 | relative to root. |
|
1861 | relative to root. | |
1861 | n2 should always be relative to root. |
|
1862 | n2 should always be relative to root. | |
1862 | """ |
|
1863 | """ | |
1863 | if not n1: |
|
1864 | if not n1: | |
1864 | return localpath(n2) |
|
1865 | return localpath(n2) | |
1865 | if os.path.isabs(n1): |
|
1866 | if os.path.isabs(n1): | |
1866 | if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]: |
|
1867 | if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]: | |
1867 | return os.path.join(root, localpath(n2)) |
|
1868 | return os.path.join(root, localpath(n2)) | |
1868 | n2 = b'/'.join((pconvert(root), n2)) |
|
1869 | n2 = b'/'.join((pconvert(root), n2)) | |
1869 | a, b = splitpath(n1), n2.split(b'/') |
|
1870 | a, b = splitpath(n1), n2.split(b'/') | |
1870 | a.reverse() |
|
1871 | a.reverse() | |
1871 | b.reverse() |
|
1872 | b.reverse() | |
1872 | while a and b and a[-1] == b[-1]: |
|
1873 | while a and b and a[-1] == b[-1]: | |
1873 | a.pop() |
|
1874 | a.pop() | |
1874 | b.pop() |
|
1875 | b.pop() | |
1875 | b.reverse() |
|
1876 | b.reverse() | |
1876 | return pycompat.ossep.join(([b'..'] * len(a)) + b) or b'.' |
|
1877 | return pycompat.ossep.join(([b'..'] * len(a)) + b) or b'.' | |
1877 |
|
1878 | |||
1878 |
|
1879 | |||
1879 | def checksignature(func, depth=1): |
|
1880 | def checksignature(func, depth=1): | |
1880 | '''wrap a function with code to check for calling errors''' |
|
1881 | '''wrap a function with code to check for calling errors''' | |
1881 |
|
1882 | |||
1882 | def check(*args, **kwargs): |
|
1883 | def check(*args, **kwargs): | |
1883 | try: |
|
1884 | try: | |
1884 | return func(*args, **kwargs) |
|
1885 | return func(*args, **kwargs) | |
1885 | except TypeError: |
|
1886 | except TypeError: | |
1886 | if len(traceback.extract_tb(sys.exc_info()[2])) == depth: |
|
1887 | if len(traceback.extract_tb(sys.exc_info()[2])) == depth: | |
1887 | raise error.SignatureError |
|
1888 | raise error.SignatureError | |
1888 | raise |
|
1889 | raise | |
1889 |
|
1890 | |||
1890 | return check |
|
1891 | return check | |
1891 |
|
1892 | |||
1892 |
|
1893 | |||
1893 | # a whilelist of known filesystems where hardlink works reliably |
|
1894 | # a whilelist of known filesystems where hardlink works reliably | |
1894 | _hardlinkfswhitelist = { |
|
1895 | _hardlinkfswhitelist = { | |
1895 | b'apfs', |
|
1896 | b'apfs', | |
1896 | b'btrfs', |
|
1897 | b'btrfs', | |
1897 | b'ext2', |
|
1898 | b'ext2', | |
1898 | b'ext3', |
|
1899 | b'ext3', | |
1899 | b'ext4', |
|
1900 | b'ext4', | |
1900 | b'hfs', |
|
1901 | b'hfs', | |
1901 | b'jfs', |
|
1902 | b'jfs', | |
1902 | b'NTFS', |
|
1903 | b'NTFS', | |
1903 | b'reiserfs', |
|
1904 | b'reiserfs', | |
1904 | b'tmpfs', |
|
1905 | b'tmpfs', | |
1905 | b'ufs', |
|
1906 | b'ufs', | |
1906 | b'xfs', |
|
1907 | b'xfs', | |
1907 | b'zfs', |
|
1908 | b'zfs', | |
1908 | } |
|
1909 | } | |
1909 |
|
1910 | |||
1910 |
|
1911 | |||
1911 | def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False): |
|
1912 | def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False): | |
1912 | """copy a file, preserving mode and optionally other stat info like |
|
1913 | """copy a file, preserving mode and optionally other stat info like | |
1913 | atime/mtime |
|
1914 | atime/mtime | |
1914 |
|
1915 | |||
1915 | checkambig argument is used with filestat, and is useful only if |
|
1916 | checkambig argument is used with filestat, and is useful only if | |
1916 | destination file is guarded by any lock (e.g. repo.lock or |
|
1917 | destination file is guarded by any lock (e.g. repo.lock or | |
1917 | repo.wlock). |
|
1918 | repo.wlock). | |
1918 |
|
1919 | |||
1919 | copystat and checkambig should be exclusive. |
|
1920 | copystat and checkambig should be exclusive. | |
1920 | """ |
|
1921 | """ | |
1921 | assert not (copystat and checkambig) |
|
1922 | assert not (copystat and checkambig) | |
1922 | oldstat = None |
|
1923 | oldstat = None | |
1923 | if os.path.lexists(dest): |
|
1924 | if os.path.lexists(dest): | |
1924 | if checkambig: |
|
1925 | if checkambig: | |
1925 | oldstat = checkambig and filestat.frompath(dest) |
|
1926 | oldstat = checkambig and filestat.frompath(dest) | |
1926 | unlink(dest) |
|
1927 | unlink(dest) | |
1927 | if hardlink: |
|
1928 | if hardlink: | |
1928 | # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks |
|
1929 | # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks | |
1929 | # unless we are confident that dest is on a whitelisted filesystem. |
|
1930 | # unless we are confident that dest is on a whitelisted filesystem. | |
1930 | try: |
|
1931 | try: | |
1931 | fstype = getfstype(os.path.dirname(dest)) |
|
1932 | fstype = getfstype(os.path.dirname(dest)) | |
1932 | except OSError: |
|
1933 | except OSError: | |
1933 | fstype = None |
|
1934 | fstype = None | |
1934 | if fstype not in _hardlinkfswhitelist: |
|
1935 | if fstype not in _hardlinkfswhitelist: | |
1935 | hardlink = False |
|
1936 | hardlink = False | |
1936 | if hardlink: |
|
1937 | if hardlink: | |
1937 | try: |
|
1938 | try: | |
1938 | oslink(src, dest) |
|
1939 | oslink(src, dest) | |
1939 | return |
|
1940 | return | |
1940 | except (IOError, OSError): |
|
1941 | except (IOError, OSError): | |
1941 | pass # fall back to normal copy |
|
1942 | pass # fall back to normal copy | |
1942 | if os.path.islink(src): |
|
1943 | if os.path.islink(src): | |
1943 | os.symlink(os.readlink(src), dest) |
|
1944 | os.symlink(os.readlink(src), dest) | |
1944 | # copytime is ignored for symlinks, but in general copytime isn't needed |
|
1945 | # copytime is ignored for symlinks, but in general copytime isn't needed | |
1945 | # for them anyway |
|
1946 | # for them anyway | |
1946 | else: |
|
1947 | else: | |
1947 | try: |
|
1948 | try: | |
1948 | shutil.copyfile(src, dest) |
|
1949 | shutil.copyfile(src, dest) | |
1949 | if copystat: |
|
1950 | if copystat: | |
1950 | # copystat also copies mode |
|
1951 | # copystat also copies mode | |
1951 | shutil.copystat(src, dest) |
|
1952 | shutil.copystat(src, dest) | |
1952 | else: |
|
1953 | else: | |
1953 | shutil.copymode(src, dest) |
|
1954 | shutil.copymode(src, dest) | |
1954 | if oldstat and oldstat.stat: |
|
1955 | if oldstat and oldstat.stat: | |
1955 | newstat = filestat.frompath(dest) |
|
1956 | newstat = filestat.frompath(dest) | |
1956 | if newstat.isambig(oldstat): |
|
1957 | if newstat.isambig(oldstat): | |
1957 | # stat of copied file is ambiguous to original one |
|
1958 | # stat of copied file is ambiguous to original one | |
1958 | advanced = ( |
|
1959 | advanced = ( | |
1959 | oldstat.stat[stat.ST_MTIME] + 1 |
|
1960 | oldstat.stat[stat.ST_MTIME] + 1 | |
1960 | ) & 0x7FFFFFFF |
|
1961 | ) & 0x7FFFFFFF | |
1961 | os.utime(dest, (advanced, advanced)) |
|
1962 | os.utime(dest, (advanced, advanced)) | |
1962 | except shutil.Error as inst: |
|
1963 | except shutil.Error as inst: | |
1963 | raise error.Abort(stringutil.forcebytestr(inst)) |
|
1964 | raise error.Abort(stringutil.forcebytestr(inst)) | |
1964 |
|
1965 | |||
1965 |
|
1966 | |||
1966 | def copyfiles(src, dst, hardlink=None, progress=None): |
|
1967 | def copyfiles(src, dst, hardlink=None, progress=None): | |
1967 | """Copy a directory tree using hardlinks if possible.""" |
|
1968 | """Copy a directory tree using hardlinks if possible.""" | |
1968 | num = 0 |
|
1969 | num = 0 | |
1969 |
|
1970 | |||
1970 | def settopic(): |
|
1971 | def settopic(): | |
1971 | if progress: |
|
1972 | if progress: | |
1972 | progress.topic = _(b'linking') if hardlink else _(b'copying') |
|
1973 | progress.topic = _(b'linking') if hardlink else _(b'copying') | |
1973 |
|
1974 | |||
1974 | if os.path.isdir(src): |
|
1975 | if os.path.isdir(src): | |
1975 | if hardlink is None: |
|
1976 | if hardlink is None: | |
1976 | hardlink = ( |
|
1977 | hardlink = ( | |
1977 | os.stat(src).st_dev == os.stat(os.path.dirname(dst)).st_dev |
|
1978 | os.stat(src).st_dev == os.stat(os.path.dirname(dst)).st_dev | |
1978 | ) |
|
1979 | ) | |
1979 | settopic() |
|
1980 | settopic() | |
1980 | os.mkdir(dst) |
|
1981 | os.mkdir(dst) | |
1981 | for name, kind in listdir(src): |
|
1982 | for name, kind in listdir(src): | |
1982 | srcname = os.path.join(src, name) |
|
1983 | srcname = os.path.join(src, name) | |
1983 | dstname = os.path.join(dst, name) |
|
1984 | dstname = os.path.join(dst, name) | |
1984 | hardlink, n = copyfiles(srcname, dstname, hardlink, progress) |
|
1985 | hardlink, n = copyfiles(srcname, dstname, hardlink, progress) | |
1985 | num += n |
|
1986 | num += n | |
1986 | else: |
|
1987 | else: | |
1987 | if hardlink is None: |
|
1988 | if hardlink is None: | |
1988 | hardlink = ( |
|
1989 | hardlink = ( | |
1989 | os.stat(os.path.dirname(src)).st_dev |
|
1990 | os.stat(os.path.dirname(src)).st_dev | |
1990 | == os.stat(os.path.dirname(dst)).st_dev |
|
1991 | == os.stat(os.path.dirname(dst)).st_dev | |
1991 | ) |
|
1992 | ) | |
1992 | settopic() |
|
1993 | settopic() | |
1993 |
|
1994 | |||
1994 | if hardlink: |
|
1995 | if hardlink: | |
1995 | try: |
|
1996 | try: | |
1996 | oslink(src, dst) |
|
1997 | oslink(src, dst) | |
1997 | except (IOError, OSError): |
|
1998 | except (IOError, OSError): | |
1998 | hardlink = False |
|
1999 | hardlink = False | |
1999 | shutil.copy(src, dst) |
|
2000 | shutil.copy(src, dst) | |
2000 | else: |
|
2001 | else: | |
2001 | shutil.copy(src, dst) |
|
2002 | shutil.copy(src, dst) | |
2002 | num += 1 |
|
2003 | num += 1 | |
2003 | if progress: |
|
2004 | if progress: | |
2004 | progress.increment() |
|
2005 | progress.increment() | |
2005 |
|
2006 | |||
2006 | return hardlink, num |
|
2007 | return hardlink, num | |
2007 |
|
2008 | |||
2008 |
|
2009 | |||
2009 | _winreservednames = { |
|
2010 | _winreservednames = { | |
2010 | b'con', |
|
2011 | b'con', | |
2011 | b'prn', |
|
2012 | b'prn', | |
2012 | b'aux', |
|
2013 | b'aux', | |
2013 | b'nul', |
|
2014 | b'nul', | |
2014 | b'com1', |
|
2015 | b'com1', | |
2015 | b'com2', |
|
2016 | b'com2', | |
2016 | b'com3', |
|
2017 | b'com3', | |
2017 | b'com4', |
|
2018 | b'com4', | |
2018 | b'com5', |
|
2019 | b'com5', | |
2019 | b'com6', |
|
2020 | b'com6', | |
2020 | b'com7', |
|
2021 | b'com7', | |
2021 | b'com8', |
|
2022 | b'com8', | |
2022 | b'com9', |
|
2023 | b'com9', | |
2023 | b'lpt1', |
|
2024 | b'lpt1', | |
2024 | b'lpt2', |
|
2025 | b'lpt2', | |
2025 | b'lpt3', |
|
2026 | b'lpt3', | |
2026 | b'lpt4', |
|
2027 | b'lpt4', | |
2027 | b'lpt5', |
|
2028 | b'lpt5', | |
2028 | b'lpt6', |
|
2029 | b'lpt6', | |
2029 | b'lpt7', |
|
2030 | b'lpt7', | |
2030 | b'lpt8', |
|
2031 | b'lpt8', | |
2031 | b'lpt9', |
|
2032 | b'lpt9', | |
2032 | } |
|
2033 | } | |
2033 | _winreservedchars = b':*?"<>|' |
|
2034 | _winreservedchars = b':*?"<>|' | |
2034 |
|
2035 | |||
2035 |
|
2036 | |||
2036 | def checkwinfilename(path): |
|
2037 | def checkwinfilename(path): | |
2037 | # type: (bytes) -> Optional[bytes] |
|
2038 | # type: (bytes) -> Optional[bytes] | |
2038 | r"""Check that the base-relative path is a valid filename on Windows. |
|
2039 | r"""Check that the base-relative path is a valid filename on Windows. | |
2039 | Returns None if the path is ok, or a UI string describing the problem. |
|
2040 | Returns None if the path is ok, or a UI string describing the problem. | |
2040 |
|
2041 | |||
2041 | >>> checkwinfilename(b"just/a/normal/path") |
|
2042 | >>> checkwinfilename(b"just/a/normal/path") | |
2042 | >>> checkwinfilename(b"foo/bar/con.xml") |
|
2043 | >>> checkwinfilename(b"foo/bar/con.xml") | |
2043 | "filename contains 'con', which is reserved on Windows" |
|
2044 | "filename contains 'con', which is reserved on Windows" | |
2044 | >>> checkwinfilename(b"foo/con.xml/bar") |
|
2045 | >>> checkwinfilename(b"foo/con.xml/bar") | |
2045 | "filename contains 'con', which is reserved on Windows" |
|
2046 | "filename contains 'con', which is reserved on Windows" | |
2046 | >>> checkwinfilename(b"foo/bar/xml.con") |
|
2047 | >>> checkwinfilename(b"foo/bar/xml.con") | |
2047 | >>> checkwinfilename(b"foo/bar/AUX/bla.txt") |
|
2048 | >>> checkwinfilename(b"foo/bar/AUX/bla.txt") | |
2048 | "filename contains 'AUX', which is reserved on Windows" |
|
2049 | "filename contains 'AUX', which is reserved on Windows" | |
2049 | >>> checkwinfilename(b"foo/bar/bla:.txt") |
|
2050 | >>> checkwinfilename(b"foo/bar/bla:.txt") | |
2050 | "filename contains ':', which is reserved on Windows" |
|
2051 | "filename contains ':', which is reserved on Windows" | |
2051 | >>> checkwinfilename(b"foo/bar/b\07la.txt") |
|
2052 | >>> checkwinfilename(b"foo/bar/b\07la.txt") | |
2052 | "filename contains '\\x07', which is invalid on Windows" |
|
2053 | "filename contains '\\x07', which is invalid on Windows" | |
2053 | >>> checkwinfilename(b"foo/bar/bla ") |
|
2054 | >>> checkwinfilename(b"foo/bar/bla ") | |
2054 | "filename ends with ' ', which is not allowed on Windows" |
|
2055 | "filename ends with ' ', which is not allowed on Windows" | |
2055 | >>> checkwinfilename(b"../bar") |
|
2056 | >>> checkwinfilename(b"../bar") | |
2056 | >>> checkwinfilename(b"foo\\") |
|
2057 | >>> checkwinfilename(b"foo\\") | |
2057 | "filename ends with '\\', which is invalid on Windows" |
|
2058 | "filename ends with '\\', which is invalid on Windows" | |
2058 | >>> checkwinfilename(b"foo\\/bar") |
|
2059 | >>> checkwinfilename(b"foo\\/bar") | |
2059 | "directory name ends with '\\', which is invalid on Windows" |
|
2060 | "directory name ends with '\\', which is invalid on Windows" | |
2060 | """ |
|
2061 | """ | |
2061 | if path.endswith(b'\\'): |
|
2062 | if path.endswith(b'\\'): | |
2062 | return _(b"filename ends with '\\', which is invalid on Windows") |
|
2063 | return _(b"filename ends with '\\', which is invalid on Windows") | |
2063 | if b'\\/' in path: |
|
2064 | if b'\\/' in path: | |
2064 | return _(b"directory name ends with '\\', which is invalid on Windows") |
|
2065 | return _(b"directory name ends with '\\', which is invalid on Windows") | |
2065 | for n in path.replace(b'\\', b'/').split(b'/'): |
|
2066 | for n in path.replace(b'\\', b'/').split(b'/'): | |
2066 | if not n: |
|
2067 | if not n: | |
2067 | continue |
|
2068 | continue | |
2068 | for c in _filenamebytestr(n): |
|
2069 | for c in _filenamebytestr(n): | |
2069 | if c in _winreservedchars: |
|
2070 | if c in _winreservedchars: | |
2070 | return ( |
|
2071 | return ( | |
2071 | _( |
|
2072 | _( | |
2072 | b"filename contains '%s', which is reserved " |
|
2073 | b"filename contains '%s', which is reserved " | |
2073 | b"on Windows" |
|
2074 | b"on Windows" | |
2074 | ) |
|
2075 | ) | |
2075 | % c |
|
2076 | % c | |
2076 | ) |
|
2077 | ) | |
2077 | if ord(c) <= 31: |
|
2078 | if ord(c) <= 31: | |
2078 | return _( |
|
2079 | return _( | |
2079 | b"filename contains '%s', which is invalid on Windows" |
|
2080 | b"filename contains '%s', which is invalid on Windows" | |
2080 | ) % stringutil.escapestr(c) |
|
2081 | ) % stringutil.escapestr(c) | |
2081 | base = n.split(b'.')[0] |
|
2082 | base = n.split(b'.')[0] | |
2082 | if base and base.lower() in _winreservednames: |
|
2083 | if base and base.lower() in _winreservednames: | |
2083 | return ( |
|
2084 | return ( | |
2084 | _(b"filename contains '%s', which is reserved on Windows") |
|
2085 | _(b"filename contains '%s', which is reserved on Windows") | |
2085 | % base |
|
2086 | % base | |
2086 | ) |
|
2087 | ) | |
2087 | t = n[-1:] |
|
2088 | t = n[-1:] | |
2088 | if t in b'. ' and n not in b'..': |
|
2089 | if t in b'. ' and n not in b'..': | |
2089 | return ( |
|
2090 | return ( | |
2090 | _( |
|
2091 | _( | |
2091 | b"filename ends with '%s', which is not allowed " |
|
2092 | b"filename ends with '%s', which is not allowed " | |
2092 | b"on Windows" |
|
2093 | b"on Windows" | |
2093 | ) |
|
2094 | ) | |
2094 | % t |
|
2095 | % t | |
2095 | ) |
|
2096 | ) | |
2096 |
|
2097 | |||
2097 |
|
2098 | |||
2098 | timer = getattr(time, "perf_counter", None) |
|
2099 | timer = getattr(time, "perf_counter", None) | |
2099 |
|
2100 | |||
2100 | if pycompat.iswindows: |
|
2101 | if pycompat.iswindows: | |
2101 | checkosfilename = checkwinfilename |
|
2102 | checkosfilename = checkwinfilename | |
2102 | if not timer: |
|
2103 | if not timer: | |
2103 | timer = time.clock |
|
2104 | timer = time.clock | |
2104 | else: |
|
2105 | else: | |
2105 | # mercurial.windows doesn't have platform.checkosfilename |
|
2106 | # mercurial.windows doesn't have platform.checkosfilename | |
2106 | checkosfilename = platform.checkosfilename # pytype: disable=module-attr |
|
2107 | checkosfilename = platform.checkosfilename # pytype: disable=module-attr | |
2107 | if not timer: |
|
2108 | if not timer: | |
2108 | timer = time.time |
|
2109 | timer = time.time | |
2109 |
|
2110 | |||
2110 |
|
2111 | |||
2111 | def makelock(info, pathname): |
|
2112 | def makelock(info, pathname): | |
2112 | """Create a lock file atomically if possible |
|
2113 | """Create a lock file atomically if possible | |
2113 |
|
2114 | |||
2114 | This may leave a stale lock file if symlink isn't supported and signal |
|
2115 | This may leave a stale lock file if symlink isn't supported and signal | |
2115 | interrupt is enabled. |
|
2116 | interrupt is enabled. | |
2116 | """ |
|
2117 | """ | |
2117 | try: |
|
2118 | try: | |
2118 | return os.symlink(info, pathname) |
|
2119 | return os.symlink(info, pathname) | |
2119 | except OSError as why: |
|
2120 | except OSError as why: | |
2120 | if why.errno == errno.EEXIST: |
|
2121 | if why.errno == errno.EEXIST: | |
2121 | raise |
|
2122 | raise | |
2122 | except AttributeError: # no symlink in os |
|
2123 | except AttributeError: # no symlink in os | |
2123 | pass |
|
2124 | pass | |
2124 |
|
2125 | |||
2125 | flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0) |
|
2126 | flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0) | |
2126 | ld = os.open(pathname, flags) |
|
2127 | ld = os.open(pathname, flags) | |
2127 | os.write(ld, info) |
|
2128 | os.write(ld, info) | |
2128 | os.close(ld) |
|
2129 | os.close(ld) | |
2129 |
|
2130 | |||
2130 |
|
2131 | |||
2131 | def readlock(pathname): |
|
2132 | def readlock(pathname): | |
2132 | # type: (bytes) -> bytes |
|
2133 | # type: (bytes) -> bytes | |
2133 | try: |
|
2134 | try: | |
2134 | return readlink(pathname) |
|
2135 | return readlink(pathname) | |
2135 | except OSError as why: |
|
2136 | except OSError as why: | |
2136 | if why.errno not in (errno.EINVAL, errno.ENOSYS): |
|
2137 | if why.errno not in (errno.EINVAL, errno.ENOSYS): | |
2137 | raise |
|
2138 | raise | |
2138 | except AttributeError: # no symlink in os |
|
2139 | except AttributeError: # no symlink in os | |
2139 | pass |
|
2140 | pass | |
2140 | with posixfile(pathname, b'rb') as fp: |
|
2141 | with posixfile(pathname, b'rb') as fp: | |
2141 | return fp.read() |
|
2142 | return fp.read() | |
2142 |
|
2143 | |||
2143 |
|
2144 | |||
2144 | def fstat(fp): |
|
2145 | def fstat(fp): | |
2145 | '''stat file object that may not have fileno method.''' |
|
2146 | '''stat file object that may not have fileno method.''' | |
2146 | try: |
|
2147 | try: | |
2147 | return os.fstat(fp.fileno()) |
|
2148 | return os.fstat(fp.fileno()) | |
2148 | except AttributeError: |
|
2149 | except AttributeError: | |
2149 | return os.stat(fp.name) |
|
2150 | return os.stat(fp.name) | |
2150 |
|
2151 | |||
2151 |
|
2152 | |||
2152 | # File system features |
|
2153 | # File system features | |
2153 |
|
2154 | |||
2154 |
|
2155 | |||
2155 | def fscasesensitive(path): |
|
2156 | def fscasesensitive(path): | |
2156 | # type: (bytes) -> bool |
|
2157 | # type: (bytes) -> bool | |
2157 | """ |
|
2158 | """ | |
2158 | Return true if the given path is on a case-sensitive filesystem |
|
2159 | Return true if the given path is on a case-sensitive filesystem | |
2159 |
|
2160 | |||
2160 | Requires a path (like /foo/.hg) ending with a foldable final |
|
2161 | Requires a path (like /foo/.hg) ending with a foldable final | |
2161 | directory component. |
|
2162 | directory component. | |
2162 | """ |
|
2163 | """ | |
2163 | s1 = os.lstat(path) |
|
2164 | s1 = os.lstat(path) | |
2164 | d, b = os.path.split(path) |
|
2165 | d, b = os.path.split(path) | |
2165 | b2 = b.upper() |
|
2166 | b2 = b.upper() | |
2166 | if b == b2: |
|
2167 | if b == b2: | |
2167 | b2 = b.lower() |
|
2168 | b2 = b.lower() | |
2168 | if b == b2: |
|
2169 | if b == b2: | |
2169 | return True # no evidence against case sensitivity |
|
2170 | return True # no evidence against case sensitivity | |
2170 | p2 = os.path.join(d, b2) |
|
2171 | p2 = os.path.join(d, b2) | |
2171 | try: |
|
2172 | try: | |
2172 | s2 = os.lstat(p2) |
|
2173 | s2 = os.lstat(p2) | |
2173 | if s2 == s1: |
|
2174 | if s2 == s1: | |
2174 | return False |
|
2175 | return False | |
2175 | return True |
|
2176 | return True | |
2176 | except OSError: |
|
2177 | except OSError: | |
2177 | return True |
|
2178 | return True | |
2178 |
|
2179 | |||
2179 |
|
2180 | |||
2180 | _re2_input = lambda x: x |
|
2181 | _re2_input = lambda x: x | |
2181 | try: |
|
2182 | try: | |
2182 | import re2 # pytype: disable=import-error |
|
2183 | import re2 # pytype: disable=import-error | |
2183 |
|
2184 | |||
2184 | _re2 = None |
|
2185 | _re2 = None | |
2185 | except ImportError: |
|
2186 | except ImportError: | |
2186 | _re2 = False |
|
2187 | _re2 = False | |
2187 |
|
2188 | |||
2188 |
|
2189 | |||
2189 | class _re(object): |
|
2190 | class _re(object): | |
2190 | def _checkre2(self): |
|
2191 | def _checkre2(self): | |
2191 | global _re2 |
|
2192 | global _re2 | |
2192 | global _re2_input |
|
2193 | global _re2_input | |
2193 |
|
2194 | |||
2194 | check_pattern = br'\[([^\[]+)\]' |
|
2195 | check_pattern = br'\[([^\[]+)\]' | |
2195 | check_input = b'[ui]' |
|
2196 | check_input = b'[ui]' | |
2196 | try: |
|
2197 | try: | |
2197 | # check if match works, see issue3964 |
|
2198 | # check if match works, see issue3964 | |
2198 | _re2 = bool(re2.match(check_pattern, check_input)) |
|
2199 | _re2 = bool(re2.match(check_pattern, check_input)) | |
2199 | except ImportError: |
|
2200 | except ImportError: | |
2200 | _re2 = False |
|
2201 | _re2 = False | |
2201 | except TypeError: |
|
2202 | except TypeError: | |
2202 | # the `pyre-2` project provides a re2 module that accept bytes |
|
2203 | # the `pyre-2` project provides a re2 module that accept bytes | |
2203 | # the `fb-re2` project provides a re2 module that acccept sysstr |
|
2204 | # the `fb-re2` project provides a re2 module that acccept sysstr | |
2204 | check_pattern = pycompat.sysstr(check_pattern) |
|
2205 | check_pattern = pycompat.sysstr(check_pattern) | |
2205 | check_input = pycompat.sysstr(check_input) |
|
2206 | check_input = pycompat.sysstr(check_input) | |
2206 | _re2 = bool(re2.match(check_pattern, check_input)) |
|
2207 | _re2 = bool(re2.match(check_pattern, check_input)) | |
2207 | _re2_input = pycompat.sysstr |
|
2208 | _re2_input = pycompat.sysstr | |
2208 |
|
2209 | |||
2209 | def compile(self, pat, flags=0): |
|
2210 | def compile(self, pat, flags=0): | |
2210 | """Compile a regular expression, using re2 if possible |
|
2211 | """Compile a regular expression, using re2 if possible | |
2211 |
|
2212 | |||
2212 | For best performance, use only re2-compatible regexp features. The |
|
2213 | For best performance, use only re2-compatible regexp features. The | |
2213 | only flags from the re module that are re2-compatible are |
|
2214 | only flags from the re module that are re2-compatible are | |
2214 | IGNORECASE and MULTILINE.""" |
|
2215 | IGNORECASE and MULTILINE.""" | |
2215 | if _re2 is None: |
|
2216 | if _re2 is None: | |
2216 | self._checkre2() |
|
2217 | self._checkre2() | |
2217 | if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0: |
|
2218 | if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0: | |
2218 | if flags & remod.IGNORECASE: |
|
2219 | if flags & remod.IGNORECASE: | |
2219 | pat = b'(?i)' + pat |
|
2220 | pat = b'(?i)' + pat | |
2220 | if flags & remod.MULTILINE: |
|
2221 | if flags & remod.MULTILINE: | |
2221 | pat = b'(?m)' + pat |
|
2222 | pat = b'(?m)' + pat | |
2222 | try: |
|
2223 | try: | |
2223 | return re2.compile(_re2_input(pat)) |
|
2224 | return re2.compile(_re2_input(pat)) | |
2224 | except re2.error: |
|
2225 | except re2.error: | |
2225 | pass |
|
2226 | pass | |
2226 | return remod.compile(pat, flags) |
|
2227 | return remod.compile(pat, flags) | |
2227 |
|
2228 | |||
2228 | @propertycache |
|
2229 | @propertycache | |
2229 | def escape(self): |
|
2230 | def escape(self): | |
2230 | """Return the version of escape corresponding to self.compile. |
|
2231 | """Return the version of escape corresponding to self.compile. | |
2231 |
|
2232 | |||
2232 | This is imperfect because whether re2 or re is used for a particular |
|
2233 | This is imperfect because whether re2 or re is used for a particular | |
2233 | function depends on the flags, etc, but it's the best we can do. |
|
2234 | function depends on the flags, etc, but it's the best we can do. | |
2234 | """ |
|
2235 | """ | |
2235 | global _re2 |
|
2236 | global _re2 | |
2236 | if _re2 is None: |
|
2237 | if _re2 is None: | |
2237 | self._checkre2() |
|
2238 | self._checkre2() | |
2238 | if _re2: |
|
2239 | if _re2: | |
2239 | return re2.escape |
|
2240 | return re2.escape | |
2240 | else: |
|
2241 | else: | |
2241 | return remod.escape |
|
2242 | return remod.escape | |
2242 |
|
2243 | |||
2243 |
|
2244 | |||
2244 | re = _re() |
|
2245 | re = _re() | |
2245 |
|
2246 | |||
2246 | _fspathcache = {} |
|
2247 | _fspathcache = {} | |
2247 |
|
2248 | |||
2248 |
|
2249 | |||
2249 | def fspath(name, root): |
|
2250 | def fspath(name, root): | |
2250 | # type: (bytes, bytes) -> bytes |
|
2251 | # type: (bytes, bytes) -> bytes | |
2251 | """Get name in the case stored in the filesystem |
|
2252 | """Get name in the case stored in the filesystem | |
2252 |
|
2253 | |||
2253 | The name should be relative to root, and be normcase-ed for efficiency. |
|
2254 | The name should be relative to root, and be normcase-ed for efficiency. | |
2254 |
|
2255 | |||
2255 | Note that this function is unnecessary, and should not be |
|
2256 | Note that this function is unnecessary, and should not be | |
2256 | called, for case-sensitive filesystems (simply because it's expensive). |
|
2257 | called, for case-sensitive filesystems (simply because it's expensive). | |
2257 |
|
2258 | |||
2258 | The root should be normcase-ed, too. |
|
2259 | The root should be normcase-ed, too. | |
2259 | """ |
|
2260 | """ | |
2260 |
|
2261 | |||
2261 | def _makefspathcacheentry(dir): |
|
2262 | def _makefspathcacheentry(dir): | |
2262 | return {normcase(n): n for n in os.listdir(dir)} |
|
2263 | return {normcase(n): n for n in os.listdir(dir)} | |
2263 |
|
2264 | |||
2264 | seps = pycompat.ossep |
|
2265 | seps = pycompat.ossep | |
2265 | if pycompat.osaltsep: |
|
2266 | if pycompat.osaltsep: | |
2266 | seps = seps + pycompat.osaltsep |
|
2267 | seps = seps + pycompat.osaltsep | |
2267 | # Protect backslashes. This gets silly very quickly. |
|
2268 | # Protect backslashes. This gets silly very quickly. | |
2268 | seps.replace(b'\\', b'\\\\') |
|
2269 | seps.replace(b'\\', b'\\\\') | |
2269 | pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps)) |
|
2270 | pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps)) | |
2270 | dir = os.path.normpath(root) |
|
2271 | dir = os.path.normpath(root) | |
2271 | result = [] |
|
2272 | result = [] | |
2272 | for part, sep in pattern.findall(name): |
|
2273 | for part, sep in pattern.findall(name): | |
2273 | if sep: |
|
2274 | if sep: | |
2274 | result.append(sep) |
|
2275 | result.append(sep) | |
2275 | continue |
|
2276 | continue | |
2276 |
|
2277 | |||
2277 | if dir not in _fspathcache: |
|
2278 | if dir not in _fspathcache: | |
2278 | _fspathcache[dir] = _makefspathcacheentry(dir) |
|
2279 | _fspathcache[dir] = _makefspathcacheentry(dir) | |
2279 | contents = _fspathcache[dir] |
|
2280 | contents = _fspathcache[dir] | |
2280 |
|
2281 | |||
2281 | found = contents.get(part) |
|
2282 | found = contents.get(part) | |
2282 | if not found: |
|
2283 | if not found: | |
2283 | # retry "once per directory" per "dirstate.walk" which |
|
2284 | # retry "once per directory" per "dirstate.walk" which | |
2284 | # may take place for each patches of "hg qpush", for example |
|
2285 | # may take place for each patches of "hg qpush", for example | |
2285 | _fspathcache[dir] = contents = _makefspathcacheentry(dir) |
|
2286 | _fspathcache[dir] = contents = _makefspathcacheentry(dir) | |
2286 | found = contents.get(part) |
|
2287 | found = contents.get(part) | |
2287 |
|
2288 | |||
2288 | result.append(found or part) |
|
2289 | result.append(found or part) | |
2289 | dir = os.path.join(dir, part) |
|
2290 | dir = os.path.join(dir, part) | |
2290 |
|
2291 | |||
2291 | return b''.join(result) |
|
2292 | return b''.join(result) | |
2292 |
|
2293 | |||
2293 |
|
2294 | |||
2294 | def checknlink(testfile): |
|
2295 | def checknlink(testfile): | |
2295 | # type: (bytes) -> bool |
|
2296 | # type: (bytes) -> bool | |
2296 | '''check whether hardlink count reporting works properly''' |
|
2297 | '''check whether hardlink count reporting works properly''' | |
2297 |
|
2298 | |||
2298 | # testfile may be open, so we need a separate file for checking to |
|
2299 | # testfile may be open, so we need a separate file for checking to | |
2299 | # work around issue2543 (or testfile may get lost on Samba shares) |
|
2300 | # work around issue2543 (or testfile may get lost on Samba shares) | |
2300 | f1, f2, fp = None, None, None |
|
2301 | f1, f2, fp = None, None, None | |
2301 | try: |
|
2302 | try: | |
2302 | fd, f1 = pycompat.mkstemp( |
|
2303 | fd, f1 = pycompat.mkstemp( | |
2303 | prefix=b'.%s-' % os.path.basename(testfile), |
|
2304 | prefix=b'.%s-' % os.path.basename(testfile), | |
2304 | suffix=b'1~', |
|
2305 | suffix=b'1~', | |
2305 | dir=os.path.dirname(testfile), |
|
2306 | dir=os.path.dirname(testfile), | |
2306 | ) |
|
2307 | ) | |
2307 | os.close(fd) |
|
2308 | os.close(fd) | |
2308 | f2 = b'%s2~' % f1[:-2] |
|
2309 | f2 = b'%s2~' % f1[:-2] | |
2309 |
|
2310 | |||
2310 | oslink(f1, f2) |
|
2311 | oslink(f1, f2) | |
2311 | # nlinks() may behave differently for files on Windows shares if |
|
2312 | # nlinks() may behave differently for files on Windows shares if | |
2312 | # the file is open. |
|
2313 | # the file is open. | |
2313 | fp = posixfile(f2) |
|
2314 | fp = posixfile(f2) | |
2314 | return nlinks(f2) > 1 |
|
2315 | return nlinks(f2) > 1 | |
2315 | except OSError: |
|
2316 | except OSError: | |
2316 | return False |
|
2317 | return False | |
2317 | finally: |
|
2318 | finally: | |
2318 | if fp is not None: |
|
2319 | if fp is not None: | |
2319 | fp.close() |
|
2320 | fp.close() | |
2320 | for f in (f1, f2): |
|
2321 | for f in (f1, f2): | |
2321 | try: |
|
2322 | try: | |
2322 | if f is not None: |
|
2323 | if f is not None: | |
2323 | os.unlink(f) |
|
2324 | os.unlink(f) | |
2324 | except OSError: |
|
2325 | except OSError: | |
2325 | pass |
|
2326 | pass | |
2326 |
|
2327 | |||
2327 |
|
2328 | |||
2328 | def endswithsep(path): |
|
2329 | def endswithsep(path): | |
2329 | # type: (bytes) -> bool |
|
2330 | # type: (bytes) -> bool | |
2330 | '''Check path ends with os.sep or os.altsep.''' |
|
2331 | '''Check path ends with os.sep or os.altsep.''' | |
2331 | return bool( # help pytype |
|
2332 | return bool( # help pytype | |
2332 | path.endswith(pycompat.ossep) |
|
2333 | path.endswith(pycompat.ossep) | |
2333 | or pycompat.osaltsep |
|
2334 | or pycompat.osaltsep | |
2334 | and path.endswith(pycompat.osaltsep) |
|
2335 | and path.endswith(pycompat.osaltsep) | |
2335 | ) |
|
2336 | ) | |
2336 |
|
2337 | |||
2337 |
|
2338 | |||
2338 | def splitpath(path): |
|
2339 | def splitpath(path): | |
2339 | # type: (bytes) -> List[bytes] |
|
2340 | # type: (bytes) -> List[bytes] | |
2340 | """Split path by os.sep. |
|
2341 | """Split path by os.sep. | |
2341 | Note that this function does not use os.altsep because this is |
|
2342 | Note that this function does not use os.altsep because this is | |
2342 | an alternative of simple "xxx.split(os.sep)". |
|
2343 | an alternative of simple "xxx.split(os.sep)". | |
2343 | It is recommended to use os.path.normpath() before using this |
|
2344 | It is recommended to use os.path.normpath() before using this | |
2344 | function if need.""" |
|
2345 | function if need.""" | |
2345 | return path.split(pycompat.ossep) |
|
2346 | return path.split(pycompat.ossep) | |
2346 |
|
2347 | |||
2347 |
|
2348 | |||
2348 | def mktempcopy(name, emptyok=False, createmode=None, enforcewritable=False): |
|
2349 | def mktempcopy(name, emptyok=False, createmode=None, enforcewritable=False): | |
2349 | """Create a temporary file with the same contents from name |
|
2350 | """Create a temporary file with the same contents from name | |
2350 |
|
2351 | |||
2351 | The permission bits are copied from the original file. |
|
2352 | The permission bits are copied from the original file. | |
2352 |
|
2353 | |||
2353 | If the temporary file is going to be truncated immediately, you |
|
2354 | If the temporary file is going to be truncated immediately, you | |
2354 | can use emptyok=True as an optimization. |
|
2355 | can use emptyok=True as an optimization. | |
2355 |
|
2356 | |||
2356 | Returns the name of the temporary file. |
|
2357 | Returns the name of the temporary file. | |
2357 | """ |
|
2358 | """ | |
2358 | d, fn = os.path.split(name) |
|
2359 | d, fn = os.path.split(name) | |
2359 | fd, temp = pycompat.mkstemp(prefix=b'.%s-' % fn, suffix=b'~', dir=d) |
|
2360 | fd, temp = pycompat.mkstemp(prefix=b'.%s-' % fn, suffix=b'~', dir=d) | |
2360 | os.close(fd) |
|
2361 | os.close(fd) | |
2361 | # Temporary files are created with mode 0600, which is usually not |
|
2362 | # Temporary files are created with mode 0600, which is usually not | |
2362 | # what we want. If the original file already exists, just copy |
|
2363 | # what we want. If the original file already exists, just copy | |
2363 | # its mode. Otherwise, manually obey umask. |
|
2364 | # its mode. Otherwise, manually obey umask. | |
2364 | copymode(name, temp, createmode, enforcewritable) |
|
2365 | copymode(name, temp, createmode, enforcewritable) | |
2365 |
|
2366 | |||
2366 | if emptyok: |
|
2367 | if emptyok: | |
2367 | return temp |
|
2368 | return temp | |
2368 | try: |
|
2369 | try: | |
2369 | try: |
|
2370 | try: | |
2370 | ifp = posixfile(name, b"rb") |
|
2371 | ifp = posixfile(name, b"rb") | |
2371 | except IOError as inst: |
|
2372 | except IOError as inst: | |
2372 | if inst.errno == errno.ENOENT: |
|
2373 | if inst.errno == errno.ENOENT: | |
2373 | return temp |
|
2374 | return temp | |
2374 | if not getattr(inst, 'filename', None): |
|
2375 | if not getattr(inst, 'filename', None): | |
2375 | inst.filename = name |
|
2376 | inst.filename = name | |
2376 | raise |
|
2377 | raise | |
2377 | ofp = posixfile(temp, b"wb") |
|
2378 | ofp = posixfile(temp, b"wb") | |
2378 | for chunk in filechunkiter(ifp): |
|
2379 | for chunk in filechunkiter(ifp): | |
2379 | ofp.write(chunk) |
|
2380 | ofp.write(chunk) | |
2380 | ifp.close() |
|
2381 | ifp.close() | |
2381 | ofp.close() |
|
2382 | ofp.close() | |
2382 | except: # re-raises |
|
2383 | except: # re-raises | |
2383 | try: |
|
2384 | try: | |
2384 | os.unlink(temp) |
|
2385 | os.unlink(temp) | |
2385 | except OSError: |
|
2386 | except OSError: | |
2386 | pass |
|
2387 | pass | |
2387 | raise |
|
2388 | raise | |
2388 | return temp |
|
2389 | return temp | |
2389 |
|
2390 | |||
2390 |
|
2391 | |||
2391 | class filestat(object): |
|
2392 | class filestat(object): | |
2392 | """help to exactly detect change of a file |
|
2393 | """help to exactly detect change of a file | |
2393 |
|
2394 | |||
2394 | 'stat' attribute is result of 'os.stat()' if specified 'path' |
|
2395 | 'stat' attribute is result of 'os.stat()' if specified 'path' | |
2395 | exists. Otherwise, it is None. This can avoid preparative |
|
2396 | exists. Otherwise, it is None. This can avoid preparative | |
2396 | 'exists()' examination on client side of this class. |
|
2397 | 'exists()' examination on client side of this class. | |
2397 | """ |
|
2398 | """ | |
2398 |
|
2399 | |||
2399 | def __init__(self, stat): |
|
2400 | def __init__(self, stat): | |
2400 | self.stat = stat |
|
2401 | self.stat = stat | |
2401 |
|
2402 | |||
2402 | @classmethod |
|
2403 | @classmethod | |
2403 | def frompath(cls, path): |
|
2404 | def frompath(cls, path): | |
2404 | try: |
|
2405 | try: | |
2405 | stat = os.stat(path) |
|
2406 | stat = os.stat(path) | |
2406 | except OSError as err: |
|
2407 | except OSError as err: | |
2407 | if err.errno != errno.ENOENT: |
|
2408 | if err.errno != errno.ENOENT: | |
2408 | raise |
|
2409 | raise | |
2409 | stat = None |
|
2410 | stat = None | |
2410 | return cls(stat) |
|
2411 | return cls(stat) | |
2411 |
|
2412 | |||
2412 | @classmethod |
|
2413 | @classmethod | |
2413 | def fromfp(cls, fp): |
|
2414 | def fromfp(cls, fp): | |
2414 | stat = os.fstat(fp.fileno()) |
|
2415 | stat = os.fstat(fp.fileno()) | |
2415 | return cls(stat) |
|
2416 | return cls(stat) | |
2416 |
|
2417 | |||
2417 | __hash__ = object.__hash__ |
|
2418 | __hash__ = object.__hash__ | |
2418 |
|
2419 | |||
2419 | def __eq__(self, old): |
|
2420 | def __eq__(self, old): | |
2420 | try: |
|
2421 | try: | |
2421 | # if ambiguity between stat of new and old file is |
|
2422 | # if ambiguity between stat of new and old file is | |
2422 | # avoided, comparison of size, ctime and mtime is enough |
|
2423 | # avoided, comparison of size, ctime and mtime is enough | |
2423 | # to exactly detect change of a file regardless of platform |
|
2424 | # to exactly detect change of a file regardless of platform | |
2424 | return ( |
|
2425 | return ( | |
2425 | self.stat.st_size == old.stat.st_size |
|
2426 | self.stat.st_size == old.stat.st_size | |
2426 | and self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME] |
|
2427 | and self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME] | |
2427 | and self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME] |
|
2428 | and self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME] | |
2428 | ) |
|
2429 | ) | |
2429 | except AttributeError: |
|
2430 | except AttributeError: | |
2430 | pass |
|
2431 | pass | |
2431 | try: |
|
2432 | try: | |
2432 | return self.stat is None and old.stat is None |
|
2433 | return self.stat is None and old.stat is None | |
2433 | except AttributeError: |
|
2434 | except AttributeError: | |
2434 | return False |
|
2435 | return False | |
2435 |
|
2436 | |||
2436 | def isambig(self, old): |
|
2437 | def isambig(self, old): | |
2437 | """Examine whether new (= self) stat is ambiguous against old one |
|
2438 | """Examine whether new (= self) stat is ambiguous against old one | |
2438 |
|
2439 | |||
2439 | "S[N]" below means stat of a file at N-th change: |
|
2440 | "S[N]" below means stat of a file at N-th change: | |
2440 |
|
2441 | |||
2441 | - S[n-1].ctime < S[n].ctime: can detect change of a file |
|
2442 | - S[n-1].ctime < S[n].ctime: can detect change of a file | |
2442 | - S[n-1].ctime == S[n].ctime |
|
2443 | - S[n-1].ctime == S[n].ctime | |
2443 | - S[n-1].ctime < S[n].mtime: means natural advancing (*1) |
|
2444 | - S[n-1].ctime < S[n].mtime: means natural advancing (*1) | |
2444 | - S[n-1].ctime == S[n].mtime: is ambiguous (*2) |
|
2445 | - S[n-1].ctime == S[n].mtime: is ambiguous (*2) | |
2445 | - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care) |
|
2446 | - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care) | |
2446 | - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care) |
|
2447 | - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care) | |
2447 |
|
2448 | |||
2448 | Case (*2) above means that a file was changed twice or more at |
|
2449 | Case (*2) above means that a file was changed twice or more at | |
2449 | same time in sec (= S[n-1].ctime), and comparison of timestamp |
|
2450 | same time in sec (= S[n-1].ctime), and comparison of timestamp | |
2450 | is ambiguous. |
|
2451 | is ambiguous. | |
2451 |
|
2452 | |||
2452 | Base idea to avoid such ambiguity is "advance mtime 1 sec, if |
|
2453 | Base idea to avoid such ambiguity is "advance mtime 1 sec, if | |
2453 | timestamp is ambiguous". |
|
2454 | timestamp is ambiguous". | |
2454 |
|
2455 | |||
2455 | But advancing mtime only in case (*2) doesn't work as |
|
2456 | But advancing mtime only in case (*2) doesn't work as | |
2456 | expected, because naturally advanced S[n].mtime in case (*1) |
|
2457 | expected, because naturally advanced S[n].mtime in case (*1) | |
2457 | might be equal to manually advanced S[n-1 or earlier].mtime. |
|
2458 | might be equal to manually advanced S[n-1 or earlier].mtime. | |
2458 |
|
2459 | |||
2459 | Therefore, all "S[n-1].ctime == S[n].ctime" cases should be |
|
2460 | Therefore, all "S[n-1].ctime == S[n].ctime" cases should be | |
2460 | treated as ambiguous regardless of mtime, to avoid overlooking |
|
2461 | treated as ambiguous regardless of mtime, to avoid overlooking | |
2461 | by confliction between such mtime. |
|
2462 | by confliction between such mtime. | |
2462 |
|
2463 | |||
2463 | Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime != |
|
2464 | Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime != | |
2464 | S[n].mtime", even if size of a file isn't changed. |
|
2465 | S[n].mtime", even if size of a file isn't changed. | |
2465 | """ |
|
2466 | """ | |
2466 | try: |
|
2467 | try: | |
2467 | return self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME] |
|
2468 | return self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME] | |
2468 | except AttributeError: |
|
2469 | except AttributeError: | |
2469 | return False |
|
2470 | return False | |
2470 |
|
2471 | |||
2471 | def avoidambig(self, path, old): |
|
2472 | def avoidambig(self, path, old): | |
2472 | """Change file stat of specified path to avoid ambiguity |
|
2473 | """Change file stat of specified path to avoid ambiguity | |
2473 |
|
2474 | |||
2474 | 'old' should be previous filestat of 'path'. |
|
2475 | 'old' should be previous filestat of 'path'. | |
2475 |
|
2476 | |||
2476 | This skips avoiding ambiguity, if a process doesn't have |
|
2477 | This skips avoiding ambiguity, if a process doesn't have | |
2477 | appropriate privileges for 'path'. This returns False in this |
|
2478 | appropriate privileges for 'path'. This returns False in this | |
2478 | case. |
|
2479 | case. | |
2479 |
|
2480 | |||
2480 | Otherwise, this returns True, as "ambiguity is avoided". |
|
2481 | Otherwise, this returns True, as "ambiguity is avoided". | |
2481 | """ |
|
2482 | """ | |
2482 | advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF |
|
2483 | advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF | |
2483 | try: |
|
2484 | try: | |
2484 | os.utime(path, (advanced, advanced)) |
|
2485 | os.utime(path, (advanced, advanced)) | |
2485 | except OSError as inst: |
|
2486 | except OSError as inst: | |
2486 | if inst.errno == errno.EPERM: |
|
2487 | if inst.errno == errno.EPERM: | |
2487 | # utime() on the file created by another user causes EPERM, |
|
2488 | # utime() on the file created by another user causes EPERM, | |
2488 | # if a process doesn't have appropriate privileges |
|
2489 | # if a process doesn't have appropriate privileges | |
2489 | return False |
|
2490 | return False | |
2490 | raise |
|
2491 | raise | |
2491 | return True |
|
2492 | return True | |
2492 |
|
2493 | |||
2493 | def __ne__(self, other): |
|
2494 | def __ne__(self, other): | |
2494 | return not self == other |
|
2495 | return not self == other | |
2495 |
|
2496 | |||
2496 |
|
2497 | |||
2497 | class atomictempfile(object): |
|
2498 | class atomictempfile(object): | |
2498 | """writable file object that atomically updates a file |
|
2499 | """writable file object that atomically updates a file | |
2499 |
|
2500 | |||
2500 | All writes will go to a temporary copy of the original file. Call |
|
2501 | All writes will go to a temporary copy of the original file. Call | |
2501 | close() when you are done writing, and atomictempfile will rename |
|
2502 | close() when you are done writing, and atomictempfile will rename | |
2502 | the temporary copy to the original name, making the changes |
|
2503 | the temporary copy to the original name, making the changes | |
2503 | visible. If the object is destroyed without being closed, all your |
|
2504 | visible. If the object is destroyed without being closed, all your | |
2504 | writes are discarded. |
|
2505 | writes are discarded. | |
2505 |
|
2506 | |||
2506 | checkambig argument of constructor is used with filestat, and is |
|
2507 | checkambig argument of constructor is used with filestat, and is | |
2507 | useful only if target file is guarded by any lock (e.g. repo.lock |
|
2508 | useful only if target file is guarded by any lock (e.g. repo.lock | |
2508 | or repo.wlock). |
|
2509 | or repo.wlock). | |
2509 | """ |
|
2510 | """ | |
2510 |
|
2511 | |||
2511 | def __init__(self, name, mode=b'w+b', createmode=None, checkambig=False): |
|
2512 | def __init__(self, name, mode=b'w+b', createmode=None, checkambig=False): | |
2512 | self.__name = name # permanent name |
|
2513 | self.__name = name # permanent name | |
2513 | self._tempname = mktempcopy( |
|
2514 | self._tempname = mktempcopy( | |
2514 | name, |
|
2515 | name, | |
2515 | emptyok=(b'w' in mode), |
|
2516 | emptyok=(b'w' in mode), | |
2516 | createmode=createmode, |
|
2517 | createmode=createmode, | |
2517 | enforcewritable=(b'w' in mode), |
|
2518 | enforcewritable=(b'w' in mode), | |
2518 | ) |
|
2519 | ) | |
2519 |
|
2520 | |||
2520 | self._fp = posixfile(self._tempname, mode) |
|
2521 | self._fp = posixfile(self._tempname, mode) | |
2521 | self._checkambig = checkambig |
|
2522 | self._checkambig = checkambig | |
2522 |
|
2523 | |||
2523 | # delegated methods |
|
2524 | # delegated methods | |
2524 | self.read = self._fp.read |
|
2525 | self.read = self._fp.read | |
2525 | self.write = self._fp.write |
|
2526 | self.write = self._fp.write | |
2526 | self.seek = self._fp.seek |
|
2527 | self.seek = self._fp.seek | |
2527 | self.tell = self._fp.tell |
|
2528 | self.tell = self._fp.tell | |
2528 | self.fileno = self._fp.fileno |
|
2529 | self.fileno = self._fp.fileno | |
2529 |
|
2530 | |||
2530 | def close(self): |
|
2531 | def close(self): | |
2531 | if not self._fp.closed: |
|
2532 | if not self._fp.closed: | |
2532 | self._fp.close() |
|
2533 | self._fp.close() | |
2533 | filename = localpath(self.__name) |
|
2534 | filename = localpath(self.__name) | |
2534 | oldstat = self._checkambig and filestat.frompath(filename) |
|
2535 | oldstat = self._checkambig and filestat.frompath(filename) | |
2535 | if oldstat and oldstat.stat: |
|
2536 | if oldstat and oldstat.stat: | |
2536 | rename(self._tempname, filename) |
|
2537 | rename(self._tempname, filename) | |
2537 | newstat = filestat.frompath(filename) |
|
2538 | newstat = filestat.frompath(filename) | |
2538 | if newstat.isambig(oldstat): |
|
2539 | if newstat.isambig(oldstat): | |
2539 | # stat of changed file is ambiguous to original one |
|
2540 | # stat of changed file is ambiguous to original one | |
2540 | advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF |
|
2541 | advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF | |
2541 | os.utime(filename, (advanced, advanced)) |
|
2542 | os.utime(filename, (advanced, advanced)) | |
2542 | else: |
|
2543 | else: | |
2543 | rename(self._tempname, filename) |
|
2544 | rename(self._tempname, filename) | |
2544 |
|
2545 | |||
2545 | def discard(self): |
|
2546 | def discard(self): | |
2546 | if not self._fp.closed: |
|
2547 | if not self._fp.closed: | |
2547 | try: |
|
2548 | try: | |
2548 | os.unlink(self._tempname) |
|
2549 | os.unlink(self._tempname) | |
2549 | except OSError: |
|
2550 | except OSError: | |
2550 | pass |
|
2551 | pass | |
2551 | self._fp.close() |
|
2552 | self._fp.close() | |
2552 |
|
2553 | |||
2553 | def __del__(self): |
|
2554 | def __del__(self): | |
2554 | if safehasattr(self, '_fp'): # constructor actually did something |
|
2555 | if safehasattr(self, '_fp'): # constructor actually did something | |
2555 | self.discard() |
|
2556 | self.discard() | |
2556 |
|
2557 | |||
2557 | def __enter__(self): |
|
2558 | def __enter__(self): | |
2558 | return self |
|
2559 | return self | |
2559 |
|
2560 | |||
2560 | def __exit__(self, exctype, excvalue, traceback): |
|
2561 | def __exit__(self, exctype, excvalue, traceback): | |
2561 | if exctype is not None: |
|
2562 | if exctype is not None: | |
2562 | self.discard() |
|
2563 | self.discard() | |
2563 | else: |
|
2564 | else: | |
2564 | self.close() |
|
2565 | self.close() | |
2565 |
|
2566 | |||
2566 |
|
2567 | |||
2567 | def unlinkpath(f, ignoremissing=False, rmdir=True): |
|
2568 | def unlinkpath(f, ignoremissing=False, rmdir=True): | |
2568 | # type: (bytes, bool, bool) -> None |
|
2569 | # type: (bytes, bool, bool) -> None | |
2569 | """unlink and remove the directory if it is empty""" |
|
2570 | """unlink and remove the directory if it is empty""" | |
2570 | if ignoremissing: |
|
2571 | if ignoremissing: | |
2571 | tryunlink(f) |
|
2572 | tryunlink(f) | |
2572 | else: |
|
2573 | else: | |
2573 | unlink(f) |
|
2574 | unlink(f) | |
2574 | if rmdir: |
|
2575 | if rmdir: | |
2575 | # try removing directories that might now be empty |
|
2576 | # try removing directories that might now be empty | |
2576 | try: |
|
2577 | try: | |
2577 | removedirs(os.path.dirname(f)) |
|
2578 | removedirs(os.path.dirname(f)) | |
2578 | except OSError: |
|
2579 | except OSError: | |
2579 | pass |
|
2580 | pass | |
2580 |
|
2581 | |||
2581 |
|
2582 | |||
2582 | def tryunlink(f): |
|
2583 | def tryunlink(f): | |
2583 | # type: (bytes) -> None |
|
2584 | # type: (bytes) -> None | |
2584 | """Attempt to remove a file, ignoring ENOENT errors.""" |
|
2585 | """Attempt to remove a file, ignoring ENOENT errors.""" | |
2585 | try: |
|
2586 | try: | |
2586 | unlink(f) |
|
2587 | unlink(f) | |
2587 | except OSError as e: |
|
2588 | except OSError as e: | |
2588 | if e.errno != errno.ENOENT: |
|
2589 | if e.errno != errno.ENOENT: | |
2589 | raise |
|
2590 | raise | |
2590 |
|
2591 | |||
2591 |
|
2592 | |||
2592 | def makedirs(name, mode=None, notindexed=False): |
|
2593 | def makedirs(name, mode=None, notindexed=False): | |
2593 | # type: (bytes, Optional[int], bool) -> None |
|
2594 | # type: (bytes, Optional[int], bool) -> None | |
2594 | """recursive directory creation with parent mode inheritance |
|
2595 | """recursive directory creation with parent mode inheritance | |
2595 |
|
2596 | |||
2596 | Newly created directories are marked as "not to be indexed by |
|
2597 | Newly created directories are marked as "not to be indexed by | |
2597 | the content indexing service", if ``notindexed`` is specified |
|
2598 | the content indexing service", if ``notindexed`` is specified | |
2598 | for "write" mode access. |
|
2599 | for "write" mode access. | |
2599 | """ |
|
2600 | """ | |
2600 | try: |
|
2601 | try: | |
2601 | makedir(name, notindexed) |
|
2602 | makedir(name, notindexed) | |
2602 | except OSError as err: |
|
2603 | except OSError as err: | |
2603 | if err.errno == errno.EEXIST: |
|
2604 | if err.errno == errno.EEXIST: | |
2604 | return |
|
2605 | return | |
2605 | if err.errno != errno.ENOENT or not name: |
|
2606 | if err.errno != errno.ENOENT or not name: | |
2606 | raise |
|
2607 | raise | |
2607 | parent = os.path.dirname(os.path.abspath(name)) |
|
2608 | parent = os.path.dirname(os.path.abspath(name)) | |
2608 | if parent == name: |
|
2609 | if parent == name: | |
2609 | raise |
|
2610 | raise | |
2610 | makedirs(parent, mode, notindexed) |
|
2611 | makedirs(parent, mode, notindexed) | |
2611 | try: |
|
2612 | try: | |
2612 | makedir(name, notindexed) |
|
2613 | makedir(name, notindexed) | |
2613 | except OSError as err: |
|
2614 | except OSError as err: | |
2614 | # Catch EEXIST to handle races |
|
2615 | # Catch EEXIST to handle races | |
2615 | if err.errno == errno.EEXIST: |
|
2616 | if err.errno == errno.EEXIST: | |
2616 | return |
|
2617 | return | |
2617 | raise |
|
2618 | raise | |
2618 | if mode is not None: |
|
2619 | if mode is not None: | |
2619 | os.chmod(name, mode) |
|
2620 | os.chmod(name, mode) | |
2620 |
|
2621 | |||
2621 |
|
2622 | |||
2622 | def readfile(path): |
|
2623 | def readfile(path): | |
2623 | # type: (bytes) -> bytes |
|
2624 | # type: (bytes) -> bytes | |
2624 | with open(path, b'rb') as fp: |
|
2625 | with open(path, b'rb') as fp: | |
2625 | return fp.read() |
|
2626 | return fp.read() | |
2626 |
|
2627 | |||
2627 |
|
2628 | |||
2628 | def writefile(path, text): |
|
2629 | def writefile(path, text): | |
2629 | # type: (bytes, bytes) -> None |
|
2630 | # type: (bytes, bytes) -> None | |
2630 | with open(path, b'wb') as fp: |
|
2631 | with open(path, b'wb') as fp: | |
2631 | fp.write(text) |
|
2632 | fp.write(text) | |
2632 |
|
2633 | |||
2633 |
|
2634 | |||
2634 | def appendfile(path, text): |
|
2635 | def appendfile(path, text): | |
2635 | # type: (bytes, bytes) -> None |
|
2636 | # type: (bytes, bytes) -> None | |
2636 | with open(path, b'ab') as fp: |
|
2637 | with open(path, b'ab') as fp: | |
2637 | fp.write(text) |
|
2638 | fp.write(text) | |
2638 |
|
2639 | |||
2639 |
|
2640 | |||
2640 | class chunkbuffer(object): |
|
2641 | class chunkbuffer(object): | |
2641 | """Allow arbitrary sized chunks of data to be efficiently read from an |
|
2642 | """Allow arbitrary sized chunks of data to be efficiently read from an | |
2642 | iterator over chunks of arbitrary size.""" |
|
2643 | iterator over chunks of arbitrary size.""" | |
2643 |
|
2644 | |||
2644 | def __init__(self, in_iter): |
|
2645 | def __init__(self, in_iter): | |
2645 | """in_iter is the iterator that's iterating over the input chunks.""" |
|
2646 | """in_iter is the iterator that's iterating over the input chunks.""" | |
2646 |
|
2647 | |||
2647 | def splitbig(chunks): |
|
2648 | def splitbig(chunks): | |
2648 | for chunk in chunks: |
|
2649 | for chunk in chunks: | |
2649 | if len(chunk) > 2 ** 20: |
|
2650 | if len(chunk) > 2 ** 20: | |
2650 | pos = 0 |
|
2651 | pos = 0 | |
2651 | while pos < len(chunk): |
|
2652 | while pos < len(chunk): | |
2652 | end = pos + 2 ** 18 |
|
2653 | end = pos + 2 ** 18 | |
2653 | yield chunk[pos:end] |
|
2654 | yield chunk[pos:end] | |
2654 | pos = end |
|
2655 | pos = end | |
2655 | else: |
|
2656 | else: | |
2656 | yield chunk |
|
2657 | yield chunk | |
2657 |
|
2658 | |||
2658 | self.iter = splitbig(in_iter) |
|
2659 | self.iter = splitbig(in_iter) | |
2659 | self._queue = collections.deque() |
|
2660 | self._queue = collections.deque() | |
2660 | self._chunkoffset = 0 |
|
2661 | self._chunkoffset = 0 | |
2661 |
|
2662 | |||
2662 | def read(self, l=None): |
|
2663 | def read(self, l=None): | |
2663 | """Read L bytes of data from the iterator of chunks of data. |
|
2664 | """Read L bytes of data from the iterator of chunks of data. | |
2664 | Returns less than L bytes if the iterator runs dry. |
|
2665 | Returns less than L bytes if the iterator runs dry. | |
2665 |
|
2666 | |||
2666 | If size parameter is omitted, read everything""" |
|
2667 | If size parameter is omitted, read everything""" | |
2667 | if l is None: |
|
2668 | if l is None: | |
2668 | return b''.join(self.iter) |
|
2669 | return b''.join(self.iter) | |
2669 |
|
2670 | |||
2670 | left = l |
|
2671 | left = l | |
2671 | buf = [] |
|
2672 | buf = [] | |
2672 | queue = self._queue |
|
2673 | queue = self._queue | |
2673 | while left > 0: |
|
2674 | while left > 0: | |
2674 | # refill the queue |
|
2675 | # refill the queue | |
2675 | if not queue: |
|
2676 | if not queue: | |
2676 | target = 2 ** 18 |
|
2677 | target = 2 ** 18 | |
2677 | for chunk in self.iter: |
|
2678 | for chunk in self.iter: | |
2678 | queue.append(chunk) |
|
2679 | queue.append(chunk) | |
2679 | target -= len(chunk) |
|
2680 | target -= len(chunk) | |
2680 | if target <= 0: |
|
2681 | if target <= 0: | |
2681 | break |
|
2682 | break | |
2682 | if not queue: |
|
2683 | if not queue: | |
2683 | break |
|
2684 | break | |
2684 |
|
2685 | |||
2685 | # The easy way to do this would be to queue.popleft(), modify the |
|
2686 | # The easy way to do this would be to queue.popleft(), modify the | |
2686 | # chunk (if necessary), then queue.appendleft(). However, for cases |
|
2687 | # chunk (if necessary), then queue.appendleft(). However, for cases | |
2687 | # where we read partial chunk content, this incurs 2 dequeue |
|
2688 | # where we read partial chunk content, this incurs 2 dequeue | |
2688 | # mutations and creates a new str for the remaining chunk in the |
|
2689 | # mutations and creates a new str for the remaining chunk in the | |
2689 | # queue. Our code below avoids this overhead. |
|
2690 | # queue. Our code below avoids this overhead. | |
2690 |
|
2691 | |||
2691 | chunk = queue[0] |
|
2692 | chunk = queue[0] | |
2692 | chunkl = len(chunk) |
|
2693 | chunkl = len(chunk) | |
2693 | offset = self._chunkoffset |
|
2694 | offset = self._chunkoffset | |
2694 |
|
2695 | |||
2695 | # Use full chunk. |
|
2696 | # Use full chunk. | |
2696 | if offset == 0 and left >= chunkl: |
|
2697 | if offset == 0 and left >= chunkl: | |
2697 | left -= chunkl |
|
2698 | left -= chunkl | |
2698 | queue.popleft() |
|
2699 | queue.popleft() | |
2699 | buf.append(chunk) |
|
2700 | buf.append(chunk) | |
2700 | # self._chunkoffset remains at 0. |
|
2701 | # self._chunkoffset remains at 0. | |
2701 | continue |
|
2702 | continue | |
2702 |
|
2703 | |||
2703 | chunkremaining = chunkl - offset |
|
2704 | chunkremaining = chunkl - offset | |
2704 |
|
2705 | |||
2705 | # Use all of unconsumed part of chunk. |
|
2706 | # Use all of unconsumed part of chunk. | |
2706 | if left >= chunkremaining: |
|
2707 | if left >= chunkremaining: | |
2707 | left -= chunkremaining |
|
2708 | left -= chunkremaining | |
2708 | queue.popleft() |
|
2709 | queue.popleft() | |
2709 | # offset == 0 is enabled by block above, so this won't merely |
|
2710 | # offset == 0 is enabled by block above, so this won't merely | |
2710 | # copy via ``chunk[0:]``. |
|
2711 | # copy via ``chunk[0:]``. | |
2711 | buf.append(chunk[offset:]) |
|
2712 | buf.append(chunk[offset:]) | |
2712 | self._chunkoffset = 0 |
|
2713 | self._chunkoffset = 0 | |
2713 |
|
2714 | |||
2714 | # Partial chunk needed. |
|
2715 | # Partial chunk needed. | |
2715 | else: |
|
2716 | else: | |
2716 | buf.append(chunk[offset : offset + left]) |
|
2717 | buf.append(chunk[offset : offset + left]) | |
2717 | self._chunkoffset += left |
|
2718 | self._chunkoffset += left | |
2718 | left -= chunkremaining |
|
2719 | left -= chunkremaining | |
2719 |
|
2720 | |||
2720 | return b''.join(buf) |
|
2721 | return b''.join(buf) | |
2721 |
|
2722 | |||
2722 |
|
2723 | |||
2723 | def filechunkiter(f, size=131072, limit=None): |
|
2724 | def filechunkiter(f, size=131072, limit=None): | |
2724 | """Create a generator that produces the data in the file size |
|
2725 | """Create a generator that produces the data in the file size | |
2725 | (default 131072) bytes at a time, up to optional limit (default is |
|
2726 | (default 131072) bytes at a time, up to optional limit (default is | |
2726 | to read all data). Chunks may be less than size bytes if the |
|
2727 | to read all data). Chunks may be less than size bytes if the | |
2727 | chunk is the last chunk in the file, or the file is a socket or |
|
2728 | chunk is the last chunk in the file, or the file is a socket or | |
2728 | some other type of file that sometimes reads less data than is |
|
2729 | some other type of file that sometimes reads less data than is | |
2729 | requested.""" |
|
2730 | requested.""" | |
2730 | assert size >= 0 |
|
2731 | assert size >= 0 | |
2731 | assert limit is None or limit >= 0 |
|
2732 | assert limit is None or limit >= 0 | |
2732 | while True: |
|
2733 | while True: | |
2733 | if limit is None: |
|
2734 | if limit is None: | |
2734 | nbytes = size |
|
2735 | nbytes = size | |
2735 | else: |
|
2736 | else: | |
2736 | nbytes = min(limit, size) |
|
2737 | nbytes = min(limit, size) | |
2737 | s = nbytes and f.read(nbytes) |
|
2738 | s = nbytes and f.read(nbytes) | |
2738 | if not s: |
|
2739 | if not s: | |
2739 | break |
|
2740 | break | |
2740 | if limit: |
|
2741 | if limit: | |
2741 | limit -= len(s) |
|
2742 | limit -= len(s) | |
2742 | yield s |
|
2743 | yield s | |
2743 |
|
2744 | |||
2744 |
|
2745 | |||
2745 | class cappedreader(object): |
|
2746 | class cappedreader(object): | |
2746 | """A file object proxy that allows reading up to N bytes. |
|
2747 | """A file object proxy that allows reading up to N bytes. | |
2747 |
|
2748 | |||
2748 | Given a source file object, instances of this type allow reading up to |
|
2749 | Given a source file object, instances of this type allow reading up to | |
2749 | N bytes from that source file object. Attempts to read past the allowed |
|
2750 | N bytes from that source file object. Attempts to read past the allowed | |
2750 | limit are treated as EOF. |
|
2751 | limit are treated as EOF. | |
2751 |
|
2752 | |||
2752 | It is assumed that I/O is not performed on the original file object |
|
2753 | It is assumed that I/O is not performed on the original file object | |
2753 | in addition to I/O that is performed by this instance. If there is, |
|
2754 | in addition to I/O that is performed by this instance. If there is, | |
2754 | state tracking will get out of sync and unexpected results will ensue. |
|
2755 | state tracking will get out of sync and unexpected results will ensue. | |
2755 | """ |
|
2756 | """ | |
2756 |
|
2757 | |||
2757 | def __init__(self, fh, limit): |
|
2758 | def __init__(self, fh, limit): | |
2758 | """Allow reading up to <limit> bytes from <fh>.""" |
|
2759 | """Allow reading up to <limit> bytes from <fh>.""" | |
2759 | self._fh = fh |
|
2760 | self._fh = fh | |
2760 | self._left = limit |
|
2761 | self._left = limit | |
2761 |
|
2762 | |||
2762 | def read(self, n=-1): |
|
2763 | def read(self, n=-1): | |
2763 | if not self._left: |
|
2764 | if not self._left: | |
2764 | return b'' |
|
2765 | return b'' | |
2765 |
|
2766 | |||
2766 | if n < 0: |
|
2767 | if n < 0: | |
2767 | n = self._left |
|
2768 | n = self._left | |
2768 |
|
2769 | |||
2769 | data = self._fh.read(min(n, self._left)) |
|
2770 | data = self._fh.read(min(n, self._left)) | |
2770 | self._left -= len(data) |
|
2771 | self._left -= len(data) | |
2771 | assert self._left >= 0 |
|
2772 | assert self._left >= 0 | |
2772 |
|
2773 | |||
2773 | return data |
|
2774 | return data | |
2774 |
|
2775 | |||
2775 | def readinto(self, b): |
|
2776 | def readinto(self, b): | |
2776 | res = self.read(len(b)) |
|
2777 | res = self.read(len(b)) | |
2777 | if res is None: |
|
2778 | if res is None: | |
2778 | return None |
|
2779 | return None | |
2779 |
|
2780 | |||
2780 | b[0 : len(res)] = res |
|
2781 | b[0 : len(res)] = res | |
2781 | return len(res) |
|
2782 | return len(res) | |
2782 |
|
2783 | |||
2783 |
|
2784 | |||
2784 | def unitcountfn(*unittable): |
|
2785 | def unitcountfn(*unittable): | |
2785 | '''return a function that renders a readable count of some quantity''' |
|
2786 | '''return a function that renders a readable count of some quantity''' | |
2786 |
|
2787 | |||
2787 | def go(count): |
|
2788 | def go(count): | |
2788 | for multiplier, divisor, format in unittable: |
|
2789 | for multiplier, divisor, format in unittable: | |
2789 | if abs(count) >= divisor * multiplier: |
|
2790 | if abs(count) >= divisor * multiplier: | |
2790 | return format % (count / float(divisor)) |
|
2791 | return format % (count / float(divisor)) | |
2791 | return unittable[-1][2] % count |
|
2792 | return unittable[-1][2] % count | |
2792 |
|
2793 | |||
2793 | return go |
|
2794 | return go | |
2794 |
|
2795 | |||
2795 |
|
2796 | |||
2796 | def processlinerange(fromline, toline): |
|
2797 | def processlinerange(fromline, toline): | |
2797 | # type: (int, int) -> Tuple[int, int] |
|
2798 | # type: (int, int) -> Tuple[int, int] | |
2798 | """Check that linerange <fromline>:<toline> makes sense and return a |
|
2799 | """Check that linerange <fromline>:<toline> makes sense and return a | |
2799 | 0-based range. |
|
2800 | 0-based range. | |
2800 |
|
2801 | |||
2801 | >>> processlinerange(10, 20) |
|
2802 | >>> processlinerange(10, 20) | |
2802 | (9, 20) |
|
2803 | (9, 20) | |
2803 | >>> processlinerange(2, 1) |
|
2804 | >>> processlinerange(2, 1) | |
2804 | Traceback (most recent call last): |
|
2805 | Traceback (most recent call last): | |
2805 | ... |
|
2806 | ... | |
2806 | ParseError: line range must be positive |
|
2807 | ParseError: line range must be positive | |
2807 | >>> processlinerange(0, 5) |
|
2808 | >>> processlinerange(0, 5) | |
2808 | Traceback (most recent call last): |
|
2809 | Traceback (most recent call last): | |
2809 | ... |
|
2810 | ... | |
2810 | ParseError: fromline must be strictly positive |
|
2811 | ParseError: fromline must be strictly positive | |
2811 | """ |
|
2812 | """ | |
2812 | if toline - fromline < 0: |
|
2813 | if toline - fromline < 0: | |
2813 | raise error.ParseError(_(b"line range must be positive")) |
|
2814 | raise error.ParseError(_(b"line range must be positive")) | |
2814 | if fromline < 1: |
|
2815 | if fromline < 1: | |
2815 | raise error.ParseError(_(b"fromline must be strictly positive")) |
|
2816 | raise error.ParseError(_(b"fromline must be strictly positive")) | |
2816 | return fromline - 1, toline |
|
2817 | return fromline - 1, toline | |
2817 |
|
2818 | |||
2818 |
|
2819 | |||
2819 | bytecount = unitcountfn( |
|
2820 | bytecount = unitcountfn( | |
2820 | (100, 1 << 30, _(b'%.0f GB')), |
|
2821 | (100, 1 << 30, _(b'%.0f GB')), | |
2821 | (10, 1 << 30, _(b'%.1f GB')), |
|
2822 | (10, 1 << 30, _(b'%.1f GB')), | |
2822 | (1, 1 << 30, _(b'%.2f GB')), |
|
2823 | (1, 1 << 30, _(b'%.2f GB')), | |
2823 | (100, 1 << 20, _(b'%.0f MB')), |
|
2824 | (100, 1 << 20, _(b'%.0f MB')), | |
2824 | (10, 1 << 20, _(b'%.1f MB')), |
|
2825 | (10, 1 << 20, _(b'%.1f MB')), | |
2825 | (1, 1 << 20, _(b'%.2f MB')), |
|
2826 | (1, 1 << 20, _(b'%.2f MB')), | |
2826 | (100, 1 << 10, _(b'%.0f KB')), |
|
2827 | (100, 1 << 10, _(b'%.0f KB')), | |
2827 | (10, 1 << 10, _(b'%.1f KB')), |
|
2828 | (10, 1 << 10, _(b'%.1f KB')), | |
2828 | (1, 1 << 10, _(b'%.2f KB')), |
|
2829 | (1, 1 << 10, _(b'%.2f KB')), | |
2829 | (1, 1, _(b'%.0f bytes')), |
|
2830 | (1, 1, _(b'%.0f bytes')), | |
2830 | ) |
|
2831 | ) | |
2831 |
|
2832 | |||
2832 |
|
2833 | |||
2833 | class transformingwriter(object): |
|
2834 | class transformingwriter(object): | |
2834 | """Writable file wrapper to transform data by function""" |
|
2835 | """Writable file wrapper to transform data by function""" | |
2835 |
|
2836 | |||
2836 | def __init__(self, fp, encode): |
|
2837 | def __init__(self, fp, encode): | |
2837 | self._fp = fp |
|
2838 | self._fp = fp | |
2838 | self._encode = encode |
|
2839 | self._encode = encode | |
2839 |
|
2840 | |||
2840 | def close(self): |
|
2841 | def close(self): | |
2841 | self._fp.close() |
|
2842 | self._fp.close() | |
2842 |
|
2843 | |||
2843 | def flush(self): |
|
2844 | def flush(self): | |
2844 | self._fp.flush() |
|
2845 | self._fp.flush() | |
2845 |
|
2846 | |||
2846 | def write(self, data): |
|
2847 | def write(self, data): | |
2847 | return self._fp.write(self._encode(data)) |
|
2848 | return self._fp.write(self._encode(data)) | |
2848 |
|
2849 | |||
2849 |
|
2850 | |||
2850 | # Matches a single EOL which can either be a CRLF where repeated CR |
|
2851 | # Matches a single EOL which can either be a CRLF where repeated CR | |
2851 | # are removed or a LF. We do not care about old Macintosh files, so a |
|
2852 | # are removed or a LF. We do not care about old Macintosh files, so a | |
2852 | # stray CR is an error. |
|
2853 | # stray CR is an error. | |
2853 | _eolre = remod.compile(br'\r*\n') |
|
2854 | _eolre = remod.compile(br'\r*\n') | |
2854 |
|
2855 | |||
2855 |
|
2856 | |||
2856 | def tolf(s): |
|
2857 | def tolf(s): | |
2857 | # type: (bytes) -> bytes |
|
2858 | # type: (bytes) -> bytes | |
2858 | return _eolre.sub(b'\n', s) |
|
2859 | return _eolre.sub(b'\n', s) | |
2859 |
|
2860 | |||
2860 |
|
2861 | |||
2861 | def tocrlf(s): |
|
2862 | def tocrlf(s): | |
2862 | # type: (bytes) -> bytes |
|
2863 | # type: (bytes) -> bytes | |
2863 | return _eolre.sub(b'\r\n', s) |
|
2864 | return _eolre.sub(b'\r\n', s) | |
2864 |
|
2865 | |||
2865 |
|
2866 | |||
2866 | def _crlfwriter(fp): |
|
2867 | def _crlfwriter(fp): | |
2867 | return transformingwriter(fp, tocrlf) |
|
2868 | return transformingwriter(fp, tocrlf) | |
2868 |
|
2869 | |||
2869 |
|
2870 | |||
2870 | if pycompat.oslinesep == b'\r\n': |
|
2871 | if pycompat.oslinesep == b'\r\n': | |
2871 | tonativeeol = tocrlf |
|
2872 | tonativeeol = tocrlf | |
2872 | fromnativeeol = tolf |
|
2873 | fromnativeeol = tolf | |
2873 | nativeeolwriter = _crlfwriter |
|
2874 | nativeeolwriter = _crlfwriter | |
2874 | else: |
|
2875 | else: | |
2875 | tonativeeol = pycompat.identity |
|
2876 | tonativeeol = pycompat.identity | |
2876 | fromnativeeol = pycompat.identity |
|
2877 | fromnativeeol = pycompat.identity | |
2877 | nativeeolwriter = pycompat.identity |
|
2878 | nativeeolwriter = pycompat.identity | |
2878 |
|
2879 | |||
2879 | if pyplatform.python_implementation() == b'CPython' and sys.version_info < ( |
|
2880 | if pyplatform.python_implementation() == b'CPython' and sys.version_info < ( | |
2880 | 3, |
|
2881 | 3, | |
2881 | 0, |
|
2882 | 0, | |
2882 | ): |
|
2883 | ): | |
2883 | # There is an issue in CPython that some IO methods do not handle EINTR |
|
2884 | # There is an issue in CPython that some IO methods do not handle EINTR | |
2884 | # correctly. The following table shows what CPython version (and functions) |
|
2885 | # correctly. The following table shows what CPython version (and functions) | |
2885 | # are affected (buggy: has the EINTR bug, okay: otherwise): |
|
2886 | # are affected (buggy: has the EINTR bug, okay: otherwise): | |
2886 | # |
|
2887 | # | |
2887 | # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0 |
|
2888 | # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0 | |
2888 | # -------------------------------------------------- |
|
2889 | # -------------------------------------------------- | |
2889 | # fp.__iter__ | buggy | buggy | okay |
|
2890 | # fp.__iter__ | buggy | buggy | okay | |
2890 | # fp.read* | buggy | okay [1] | okay |
|
2891 | # fp.read* | buggy | okay [1] | okay | |
2891 | # |
|
2892 | # | |
2892 | # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo. |
|
2893 | # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo. | |
2893 | # |
|
2894 | # | |
2894 | # Here we workaround the EINTR issue for fileobj.__iter__. Other methods |
|
2895 | # Here we workaround the EINTR issue for fileobj.__iter__. Other methods | |
2895 | # like "read*" work fine, as we do not support Python < 2.7.4. |
|
2896 | # like "read*" work fine, as we do not support Python < 2.7.4. | |
2896 | # |
|
2897 | # | |
2897 | # Although we can workaround the EINTR issue for fp.__iter__, it is slower: |
|
2898 | # Although we can workaround the EINTR issue for fp.__iter__, it is slower: | |
2898 | # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in |
|
2899 | # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in | |
2899 | # CPython 2, because CPython 2 maintains an internal readahead buffer for |
|
2900 | # CPython 2, because CPython 2 maintains an internal readahead buffer for | |
2900 | # fp.__iter__ but not other fp.read* methods. |
|
2901 | # fp.__iter__ but not other fp.read* methods. | |
2901 | # |
|
2902 | # | |
2902 | # On modern systems like Linux, the "read" syscall cannot be interrupted |
|
2903 | # On modern systems like Linux, the "read" syscall cannot be interrupted | |
2903 | # when reading "fast" files like on-disk files. So the EINTR issue only |
|
2904 | # when reading "fast" files like on-disk files. So the EINTR issue only | |
2904 | # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG) |
|
2905 | # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG) | |
2905 | # files approximately as "fast" files and use the fast (unsafe) code path, |
|
2906 | # files approximately as "fast" files and use the fast (unsafe) code path, | |
2906 | # to minimize the performance impact. |
|
2907 | # to minimize the performance impact. | |
2907 |
|
2908 | |||
2908 | def iterfile(fp): |
|
2909 | def iterfile(fp): | |
2909 | fastpath = True |
|
2910 | fastpath = True | |
2910 | if type(fp) is file: |
|
2911 | if type(fp) is file: | |
2911 | fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode) |
|
2912 | fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode) | |
2912 | if fastpath: |
|
2913 | if fastpath: | |
2913 | return fp |
|
2914 | return fp | |
2914 | else: |
|
2915 | else: | |
2915 | # fp.readline deals with EINTR correctly, use it as a workaround. |
|
2916 | # fp.readline deals with EINTR correctly, use it as a workaround. | |
2916 | return iter(fp.readline, b'') |
|
2917 | return iter(fp.readline, b'') | |
2917 |
|
2918 | |||
2918 |
|
2919 | |||
2919 | else: |
|
2920 | else: | |
2920 | # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed. |
|
2921 | # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed. | |
2921 | def iterfile(fp): |
|
2922 | def iterfile(fp): | |
2922 | return fp |
|
2923 | return fp | |
2923 |
|
2924 | |||
2924 |
|
2925 | |||
2925 | def iterlines(iterator): |
|
2926 | def iterlines(iterator): | |
2926 | # type: (Iterator[bytes]) -> Iterator[bytes] |
|
2927 | # type: (Iterator[bytes]) -> Iterator[bytes] | |
2927 | for chunk in iterator: |
|
2928 | for chunk in iterator: | |
2928 | for line in chunk.splitlines(): |
|
2929 | for line in chunk.splitlines(): | |
2929 | yield line |
|
2930 | yield line | |
2930 |
|
2931 | |||
2931 |
|
2932 | |||
2932 | def expandpath(path): |
|
2933 | def expandpath(path): | |
2933 | # type: (bytes) -> bytes |
|
2934 | # type: (bytes) -> bytes | |
2934 | return os.path.expanduser(os.path.expandvars(path)) |
|
2935 | return os.path.expanduser(os.path.expandvars(path)) | |
2935 |
|
2936 | |||
2936 |
|
2937 | |||
2937 | def interpolate(prefix, mapping, s, fn=None, escape_prefix=False): |
|
2938 | def interpolate(prefix, mapping, s, fn=None, escape_prefix=False): | |
2938 | """Return the result of interpolating items in the mapping into string s. |
|
2939 | """Return the result of interpolating items in the mapping into string s. | |
2939 |
|
2940 | |||
2940 | prefix is a single character string, or a two character string with |
|
2941 | prefix is a single character string, or a two character string with | |
2941 | a backslash as the first character if the prefix needs to be escaped in |
|
2942 | a backslash as the first character if the prefix needs to be escaped in | |
2942 | a regular expression. |
|
2943 | a regular expression. | |
2943 |
|
2944 | |||
2944 | fn is an optional function that will be applied to the replacement text |
|
2945 | fn is an optional function that will be applied to the replacement text | |
2945 | just before replacement. |
|
2946 | just before replacement. | |
2946 |
|
2947 | |||
2947 | escape_prefix is an optional flag that allows using doubled prefix for |
|
2948 | escape_prefix is an optional flag that allows using doubled prefix for | |
2948 | its escaping. |
|
2949 | its escaping. | |
2949 | """ |
|
2950 | """ | |
2950 | fn = fn or (lambda s: s) |
|
2951 | fn = fn or (lambda s: s) | |
2951 | patterns = b'|'.join(mapping.keys()) |
|
2952 | patterns = b'|'.join(mapping.keys()) | |
2952 | if escape_prefix: |
|
2953 | if escape_prefix: | |
2953 | patterns += b'|' + prefix |
|
2954 | patterns += b'|' + prefix | |
2954 | if len(prefix) > 1: |
|
2955 | if len(prefix) > 1: | |
2955 | prefix_char = prefix[1:] |
|
2956 | prefix_char = prefix[1:] | |
2956 | else: |
|
2957 | else: | |
2957 | prefix_char = prefix |
|
2958 | prefix_char = prefix | |
2958 | mapping[prefix_char] = prefix_char |
|
2959 | mapping[prefix_char] = prefix_char | |
2959 | r = remod.compile(br'%s(%s)' % (prefix, patterns)) |
|
2960 | r = remod.compile(br'%s(%s)' % (prefix, patterns)) | |
2960 | return r.sub(lambda x: fn(mapping[x.group()[1:]]), s) |
|
2961 | return r.sub(lambda x: fn(mapping[x.group()[1:]]), s) | |
2961 |
|
2962 | |||
2962 |
|
2963 | |||
2963 | def getport(*args, **kwargs): |
|
2964 | def getport(*args, **kwargs): | |
2964 | msg = b'getport(...) moved to mercurial.utils.urlutil' |
|
2965 | msg = b'getport(...) moved to mercurial.utils.urlutil' | |
2965 | nouideprecwarn(msg, b'6.0', stacklevel=2) |
|
2966 | nouideprecwarn(msg, b'6.0', stacklevel=2) | |
2966 | return urlutil.getport(*args, **kwargs) |
|
2967 | return urlutil.getport(*args, **kwargs) | |
2967 |
|
2968 | |||
2968 |
|
2969 | |||
2969 | def url(*args, **kwargs): |
|
2970 | def url(*args, **kwargs): | |
2970 | msg = b'url(...) moved to mercurial.utils.urlutil' |
|
2971 | msg = b'url(...) moved to mercurial.utils.urlutil' | |
2971 | nouideprecwarn(msg, b'6.0', stacklevel=2) |
|
2972 | nouideprecwarn(msg, b'6.0', stacklevel=2) | |
2972 | return urlutil.url(*args, **kwargs) |
|
2973 | return urlutil.url(*args, **kwargs) | |
2973 |
|
2974 | |||
2974 |
|
2975 | |||
2975 | def hasscheme(*args, **kwargs): |
|
2976 | def hasscheme(*args, **kwargs): | |
2976 | msg = b'hasscheme(...) moved to mercurial.utils.urlutil' |
|
2977 | msg = b'hasscheme(...) moved to mercurial.utils.urlutil' | |
2977 | nouideprecwarn(msg, b'6.0', stacklevel=2) |
|
2978 | nouideprecwarn(msg, b'6.0', stacklevel=2) | |
2978 | return urlutil.hasscheme(*args, **kwargs) |
|
2979 | return urlutil.hasscheme(*args, **kwargs) | |
2979 |
|
2980 | |||
2980 |
|
2981 | |||
2981 | def hasdriveletter(*args, **kwargs): |
|
2982 | def hasdriveletter(*args, **kwargs): | |
2982 | msg = b'hasdriveletter(...) moved to mercurial.utils.urlutil' |
|
2983 | msg = b'hasdriveletter(...) moved to mercurial.utils.urlutil' | |
2983 | nouideprecwarn(msg, b'6.0', stacklevel=2) |
|
2984 | nouideprecwarn(msg, b'6.0', stacklevel=2) | |
2984 | return urlutil.hasdriveletter(*args, **kwargs) |
|
2985 | return urlutil.hasdriveletter(*args, **kwargs) | |
2985 |
|
2986 | |||
2986 |
|
2987 | |||
2987 | def urllocalpath(*args, **kwargs): |
|
2988 | def urllocalpath(*args, **kwargs): | |
2988 | msg = b'urllocalpath(...) moved to mercurial.utils.urlutil' |
|
2989 | msg = b'urllocalpath(...) moved to mercurial.utils.urlutil' | |
2989 | nouideprecwarn(msg, b'6.0', stacklevel=2) |
|
2990 | nouideprecwarn(msg, b'6.0', stacklevel=2) | |
2990 | return urlutil.urllocalpath(*args, **kwargs) |
|
2991 | return urlutil.urllocalpath(*args, **kwargs) | |
2991 |
|
2992 | |||
2992 |
|
2993 | |||
2993 | def checksafessh(*args, **kwargs): |
|
2994 | def checksafessh(*args, **kwargs): | |
2994 | msg = b'checksafessh(...) moved to mercurial.utils.urlutil' |
|
2995 | msg = b'checksafessh(...) moved to mercurial.utils.urlutil' | |
2995 | nouideprecwarn(msg, b'6.0', stacklevel=2) |
|
2996 | nouideprecwarn(msg, b'6.0', stacklevel=2) | |
2996 | return urlutil.checksafessh(*args, **kwargs) |
|
2997 | return urlutil.checksafessh(*args, **kwargs) | |
2997 |
|
2998 | |||
2998 |
|
2999 | |||
2999 | def hidepassword(*args, **kwargs): |
|
3000 | def hidepassword(*args, **kwargs): | |
3000 | msg = b'hidepassword(...) moved to mercurial.utils.urlutil' |
|
3001 | msg = b'hidepassword(...) moved to mercurial.utils.urlutil' | |
3001 | nouideprecwarn(msg, b'6.0', stacklevel=2) |
|
3002 | nouideprecwarn(msg, b'6.0', stacklevel=2) | |
3002 | return urlutil.hidepassword(*args, **kwargs) |
|
3003 | return urlutil.hidepassword(*args, **kwargs) | |
3003 |
|
3004 | |||
3004 |
|
3005 | |||
3005 | def removeauth(*args, **kwargs): |
|
3006 | def removeauth(*args, **kwargs): | |
3006 | msg = b'removeauth(...) moved to mercurial.utils.urlutil' |
|
3007 | msg = b'removeauth(...) moved to mercurial.utils.urlutil' | |
3007 | nouideprecwarn(msg, b'6.0', stacklevel=2) |
|
3008 | nouideprecwarn(msg, b'6.0', stacklevel=2) | |
3008 | return urlutil.removeauth(*args, **kwargs) |
|
3009 | return urlutil.removeauth(*args, **kwargs) | |
3009 |
|
3010 | |||
3010 |
|
3011 | |||
3011 | timecount = unitcountfn( |
|
3012 | timecount = unitcountfn( | |
3012 | (1, 1e3, _(b'%.0f s')), |
|
3013 | (1, 1e3, _(b'%.0f s')), | |
3013 | (100, 1, _(b'%.1f s')), |
|
3014 | (100, 1, _(b'%.1f s')), | |
3014 | (10, 1, _(b'%.2f s')), |
|
3015 | (10, 1, _(b'%.2f s')), | |
3015 | (1, 1, _(b'%.3f s')), |
|
3016 | (1, 1, _(b'%.3f s')), | |
3016 | (100, 0.001, _(b'%.1f ms')), |
|
3017 | (100, 0.001, _(b'%.1f ms')), | |
3017 | (10, 0.001, _(b'%.2f ms')), |
|
3018 | (10, 0.001, _(b'%.2f ms')), | |
3018 | (1, 0.001, _(b'%.3f ms')), |
|
3019 | (1, 0.001, _(b'%.3f ms')), | |
3019 | (100, 0.000001, _(b'%.1f us')), |
|
3020 | (100, 0.000001, _(b'%.1f us')), | |
3020 | (10, 0.000001, _(b'%.2f us')), |
|
3021 | (10, 0.000001, _(b'%.2f us')), | |
3021 | (1, 0.000001, _(b'%.3f us')), |
|
3022 | (1, 0.000001, _(b'%.3f us')), | |
3022 | (100, 0.000000001, _(b'%.1f ns')), |
|
3023 | (100, 0.000000001, _(b'%.1f ns')), | |
3023 | (10, 0.000000001, _(b'%.2f ns')), |
|
3024 | (10, 0.000000001, _(b'%.2f ns')), | |
3024 | (1, 0.000000001, _(b'%.3f ns')), |
|
3025 | (1, 0.000000001, _(b'%.3f ns')), | |
3025 | ) |
|
3026 | ) | |
3026 |
|
3027 | |||
3027 |
|
3028 | |||
3028 | @attr.s |
|
3029 | @attr.s | |
3029 | class timedcmstats(object): |
|
3030 | class timedcmstats(object): | |
3030 | """Stats information produced by the timedcm context manager on entering.""" |
|
3031 | """Stats information produced by the timedcm context manager on entering.""" | |
3031 |
|
3032 | |||
3032 | # the starting value of the timer as a float (meaning and resulution is |
|
3033 | # the starting value of the timer as a float (meaning and resulution is | |
3033 | # platform dependent, see util.timer) |
|
3034 | # platform dependent, see util.timer) | |
3034 | start = attr.ib(default=attr.Factory(lambda: timer())) |
|
3035 | start = attr.ib(default=attr.Factory(lambda: timer())) | |
3035 | # the number of seconds as a floating point value; starts at 0, updated when |
|
3036 | # the number of seconds as a floating point value; starts at 0, updated when | |
3036 | # the context is exited. |
|
3037 | # the context is exited. | |
3037 | elapsed = attr.ib(default=0) |
|
3038 | elapsed = attr.ib(default=0) | |
3038 | # the number of nested timedcm context managers. |
|
3039 | # the number of nested timedcm context managers. | |
3039 | level = attr.ib(default=1) |
|
3040 | level = attr.ib(default=1) | |
3040 |
|
3041 | |||
3041 | def __bytes__(self): |
|
3042 | def __bytes__(self): | |
3042 | return timecount(self.elapsed) if self.elapsed else b'<unknown>' |
|
3043 | return timecount(self.elapsed) if self.elapsed else b'<unknown>' | |
3043 |
|
3044 | |||
3044 | __str__ = encoding.strmethod(__bytes__) |
|
3045 | __str__ = encoding.strmethod(__bytes__) | |
3045 |
|
3046 | |||
3046 |
|
3047 | |||
3047 | @contextlib.contextmanager |
|
3048 | @contextlib.contextmanager | |
3048 | def timedcm(whencefmt, *whenceargs): |
|
3049 | def timedcm(whencefmt, *whenceargs): | |
3049 | """A context manager that produces timing information for a given context. |
|
3050 | """A context manager that produces timing information for a given context. | |
3050 |
|
3051 | |||
3051 | On entering a timedcmstats instance is produced. |
|
3052 | On entering a timedcmstats instance is produced. | |
3052 |
|
3053 | |||
3053 | This context manager is reentrant. |
|
3054 | This context manager is reentrant. | |
3054 |
|
3055 | |||
3055 | """ |
|
3056 | """ | |
3056 | # track nested context managers |
|
3057 | # track nested context managers | |
3057 | timedcm._nested += 1 |
|
3058 | timedcm._nested += 1 | |
3058 | timing_stats = timedcmstats(level=timedcm._nested) |
|
3059 | timing_stats = timedcmstats(level=timedcm._nested) | |
3059 | try: |
|
3060 | try: | |
3060 | with tracing.log(whencefmt, *whenceargs): |
|
3061 | with tracing.log(whencefmt, *whenceargs): | |
3061 | yield timing_stats |
|
3062 | yield timing_stats | |
3062 | finally: |
|
3063 | finally: | |
3063 | timing_stats.elapsed = timer() - timing_stats.start |
|
3064 | timing_stats.elapsed = timer() - timing_stats.start | |
3064 | timedcm._nested -= 1 |
|
3065 | timedcm._nested -= 1 | |
3065 |
|
3066 | |||
3066 |
|
3067 | |||
3067 | timedcm._nested = 0 |
|
3068 | timedcm._nested = 0 | |
3068 |
|
3069 | |||
3069 |
|
3070 | |||
3070 | def timed(func): |
|
3071 | def timed(func): | |
3071 | """Report the execution time of a function call to stderr. |
|
3072 | """Report the execution time of a function call to stderr. | |
3072 |
|
3073 | |||
3073 | During development, use as a decorator when you need to measure |
|
3074 | During development, use as a decorator when you need to measure | |
3074 | the cost of a function, e.g. as follows: |
|
3075 | the cost of a function, e.g. as follows: | |
3075 |
|
3076 | |||
3076 | @util.timed |
|
3077 | @util.timed | |
3077 | def foo(a, b, c): |
|
3078 | def foo(a, b, c): | |
3078 | pass |
|
3079 | pass | |
3079 | """ |
|
3080 | """ | |
3080 |
|
3081 | |||
3081 | def wrapper(*args, **kwargs): |
|
3082 | def wrapper(*args, **kwargs): | |
3082 | with timedcm(pycompat.bytestr(func.__name__)) as time_stats: |
|
3083 | with timedcm(pycompat.bytestr(func.__name__)) as time_stats: | |
3083 | result = func(*args, **kwargs) |
|
3084 | result = func(*args, **kwargs) | |
3084 | stderr = procutil.stderr |
|
3085 | stderr = procutil.stderr | |
3085 | stderr.write( |
|
3086 | stderr.write( | |
3086 | b'%s%s: %s\n' |
|
3087 | b'%s%s: %s\n' | |
3087 | % ( |
|
3088 | % ( | |
3088 | b' ' * time_stats.level * 2, |
|
3089 | b' ' * time_stats.level * 2, | |
3089 | pycompat.bytestr(func.__name__), |
|
3090 | pycompat.bytestr(func.__name__), | |
3090 | time_stats, |
|
3091 | time_stats, | |
3091 | ) |
|
3092 | ) | |
3092 | ) |
|
3093 | ) | |
3093 | return result |
|
3094 | return result | |
3094 |
|
3095 | |||
3095 | return wrapper |
|
3096 | return wrapper | |
3096 |
|
3097 | |||
3097 |
|
3098 | |||
3098 | _sizeunits = ( |
|
3099 | _sizeunits = ( | |
3099 | (b'm', 2 ** 20), |
|
3100 | (b'm', 2 ** 20), | |
3100 | (b'k', 2 ** 10), |
|
3101 | (b'k', 2 ** 10), | |
3101 | (b'g', 2 ** 30), |
|
3102 | (b'g', 2 ** 30), | |
3102 | (b'kb', 2 ** 10), |
|
3103 | (b'kb', 2 ** 10), | |
3103 | (b'mb', 2 ** 20), |
|
3104 | (b'mb', 2 ** 20), | |
3104 | (b'gb', 2 ** 30), |
|
3105 | (b'gb', 2 ** 30), | |
3105 | (b'b', 1), |
|
3106 | (b'b', 1), | |
3106 | ) |
|
3107 | ) | |
3107 |
|
3108 | |||
3108 |
|
3109 | |||
3109 | def sizetoint(s): |
|
3110 | def sizetoint(s): | |
3110 | # type: (bytes) -> int |
|
3111 | # type: (bytes) -> int | |
3111 | """Convert a space specifier to a byte count. |
|
3112 | """Convert a space specifier to a byte count. | |
3112 |
|
3113 | |||
3113 | >>> sizetoint(b'30') |
|
3114 | >>> sizetoint(b'30') | |
3114 | 30 |
|
3115 | 30 | |
3115 | >>> sizetoint(b'2.2kb') |
|
3116 | >>> sizetoint(b'2.2kb') | |
3116 | 2252 |
|
3117 | 2252 | |
3117 | >>> sizetoint(b'6M') |
|
3118 | >>> sizetoint(b'6M') | |
3118 | 6291456 |
|
3119 | 6291456 | |
3119 | """ |
|
3120 | """ | |
3120 | t = s.strip().lower() |
|
3121 | t = s.strip().lower() | |
3121 | try: |
|
3122 | try: | |
3122 | for k, u in _sizeunits: |
|
3123 | for k, u in _sizeunits: | |
3123 | if t.endswith(k): |
|
3124 | if t.endswith(k): | |
3124 | return int(float(t[: -len(k)]) * u) |
|
3125 | return int(float(t[: -len(k)]) * u) | |
3125 | return int(t) |
|
3126 | return int(t) | |
3126 | except ValueError: |
|
3127 | except ValueError: | |
3127 | raise error.ParseError(_(b"couldn't parse size: %s") % s) |
|
3128 | raise error.ParseError(_(b"couldn't parse size: %s") % s) | |
3128 |
|
3129 | |||
3129 |
|
3130 | |||
3130 | class hooks(object): |
|
3131 | class hooks(object): | |
3131 | """A collection of hook functions that can be used to extend a |
|
3132 | """A collection of hook functions that can be used to extend a | |
3132 | function's behavior. Hooks are called in lexicographic order, |
|
3133 | function's behavior. Hooks are called in lexicographic order, | |
3133 | based on the names of their sources.""" |
|
3134 | based on the names of their sources.""" | |
3134 |
|
3135 | |||
3135 | def __init__(self): |
|
3136 | def __init__(self): | |
3136 | self._hooks = [] |
|
3137 | self._hooks = [] | |
3137 |
|
3138 | |||
3138 | def add(self, source, hook): |
|
3139 | def add(self, source, hook): | |
3139 | self._hooks.append((source, hook)) |
|
3140 | self._hooks.append((source, hook)) | |
3140 |
|
3141 | |||
3141 | def __call__(self, *args): |
|
3142 | def __call__(self, *args): | |
3142 | self._hooks.sort(key=lambda x: x[0]) |
|
3143 | self._hooks.sort(key=lambda x: x[0]) | |
3143 | results = [] |
|
3144 | results = [] | |
3144 | for source, hook in self._hooks: |
|
3145 | for source, hook in self._hooks: | |
3145 | results.append(hook(*args)) |
|
3146 | results.append(hook(*args)) | |
3146 | return results |
|
3147 | return results | |
3147 |
|
3148 | |||
3148 |
|
3149 | |||
3149 | def getstackframes(skip=0, line=b' %-*s in %s\n', fileline=b'%s:%d', depth=0): |
|
3150 | def getstackframes(skip=0, line=b' %-*s in %s\n', fileline=b'%s:%d', depth=0): | |
3150 | """Yields lines for a nicely formatted stacktrace. |
|
3151 | """Yields lines for a nicely formatted stacktrace. | |
3151 | Skips the 'skip' last entries, then return the last 'depth' entries. |
|
3152 | Skips the 'skip' last entries, then return the last 'depth' entries. | |
3152 | Each file+linenumber is formatted according to fileline. |
|
3153 | Each file+linenumber is formatted according to fileline. | |
3153 | Each line is formatted according to line. |
|
3154 | Each line is formatted according to line. | |
3154 | If line is None, it yields: |
|
3155 | If line is None, it yields: | |
3155 | length of longest filepath+line number, |
|
3156 | length of longest filepath+line number, | |
3156 | filepath+linenumber, |
|
3157 | filepath+linenumber, | |
3157 | function |
|
3158 | function | |
3158 |
|
3159 | |||
3159 | Not be used in production code but very convenient while developing. |
|
3160 | Not be used in production code but very convenient while developing. | |
3160 | """ |
|
3161 | """ | |
3161 | entries = [ |
|
3162 | entries = [ | |
3162 | (fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func)) |
|
3163 | (fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func)) | |
3163 | for fn, ln, func, _text in traceback.extract_stack()[: -skip - 1] |
|
3164 | for fn, ln, func, _text in traceback.extract_stack()[: -skip - 1] | |
3164 | ][-depth:] |
|
3165 | ][-depth:] | |
3165 | if entries: |
|
3166 | if entries: | |
3166 | fnmax = max(len(entry[0]) for entry in entries) |
|
3167 | fnmax = max(len(entry[0]) for entry in entries) | |
3167 | for fnln, func in entries: |
|
3168 | for fnln, func in entries: | |
3168 | if line is None: |
|
3169 | if line is None: | |
3169 | yield (fnmax, fnln, func) |
|
3170 | yield (fnmax, fnln, func) | |
3170 | else: |
|
3171 | else: | |
3171 | yield line % (fnmax, fnln, func) |
|
3172 | yield line % (fnmax, fnln, func) | |
3172 |
|
3173 | |||
3173 |
|
3174 | |||
3174 | def debugstacktrace( |
|
3175 | def debugstacktrace( | |
3175 | msg=b'stacktrace', |
|
3176 | msg=b'stacktrace', | |
3176 | skip=0, |
|
3177 | skip=0, | |
3177 | f=procutil.stderr, |
|
3178 | f=procutil.stderr, | |
3178 | otherf=procutil.stdout, |
|
3179 | otherf=procutil.stdout, | |
3179 | depth=0, |
|
3180 | depth=0, | |
3180 | prefix=b'', |
|
3181 | prefix=b'', | |
3181 | ): |
|
3182 | ): | |
3182 | """Writes a message to f (stderr) with a nicely formatted stacktrace. |
|
3183 | """Writes a message to f (stderr) with a nicely formatted stacktrace. | |
3183 | Skips the 'skip' entries closest to the call, then show 'depth' entries. |
|
3184 | Skips the 'skip' entries closest to the call, then show 'depth' entries. | |
3184 | By default it will flush stdout first. |
|
3185 | By default it will flush stdout first. | |
3185 | It can be used everywhere and intentionally does not require an ui object. |
|
3186 | It can be used everywhere and intentionally does not require an ui object. | |
3186 | Not be used in production code but very convenient while developing. |
|
3187 | Not be used in production code but very convenient while developing. | |
3187 | """ |
|
3188 | """ | |
3188 | if otherf: |
|
3189 | if otherf: | |
3189 | otherf.flush() |
|
3190 | otherf.flush() | |
3190 | f.write(b'%s%s at:\n' % (prefix, msg.rstrip())) |
|
3191 | f.write(b'%s%s at:\n' % (prefix, msg.rstrip())) | |
3191 | for line in getstackframes(skip + 1, depth=depth): |
|
3192 | for line in getstackframes(skip + 1, depth=depth): | |
3192 | f.write(prefix + line) |
|
3193 | f.write(prefix + line) | |
3193 | f.flush() |
|
3194 | f.flush() | |
3194 |
|
3195 | |||
3195 |
|
3196 | |||
3196 | # convenient shortcut |
|
3197 | # convenient shortcut | |
3197 | dst = debugstacktrace |
|
3198 | dst = debugstacktrace | |
3198 |
|
3199 | |||
3199 |
|
3200 | |||
3200 | def safename(f, tag, ctx, others=None): |
|
3201 | def safename(f, tag, ctx, others=None): | |
3201 | """ |
|
3202 | """ | |
3202 | Generate a name that it is safe to rename f to in the given context. |
|
3203 | Generate a name that it is safe to rename f to in the given context. | |
3203 |
|
3204 | |||
3204 | f: filename to rename |
|
3205 | f: filename to rename | |
3205 | tag: a string tag that will be included in the new name |
|
3206 | tag: a string tag that will be included in the new name | |
3206 | ctx: a context, in which the new name must not exist |
|
3207 | ctx: a context, in which the new name must not exist | |
3207 | others: a set of other filenames that the new name must not be in |
|
3208 | others: a set of other filenames that the new name must not be in | |
3208 |
|
3209 | |||
3209 | Returns a file name of the form oldname~tag[~number] which does not exist |
|
3210 | Returns a file name of the form oldname~tag[~number] which does not exist | |
3210 | in the provided context and is not in the set of other names. |
|
3211 | in the provided context and is not in the set of other names. | |
3211 | """ |
|
3212 | """ | |
3212 | if others is None: |
|
3213 | if others is None: | |
3213 | others = set() |
|
3214 | others = set() | |
3214 |
|
3215 | |||
3215 | fn = b'%s~%s' % (f, tag) |
|
3216 | fn = b'%s~%s' % (f, tag) | |
3216 | if fn not in ctx and fn not in others: |
|
3217 | if fn not in ctx and fn not in others: | |
3217 | return fn |
|
3218 | return fn | |
3218 | for n in itertools.count(1): |
|
3219 | for n in itertools.count(1): | |
3219 | fn = b'%s~%s~%s' % (f, tag, n) |
|
3220 | fn = b'%s~%s~%s' % (f, tag, n) | |
3220 | if fn not in ctx and fn not in others: |
|
3221 | if fn not in ctx and fn not in others: | |
3221 | return fn |
|
3222 | return fn | |
3222 |
|
3223 | |||
3223 |
|
3224 | |||
3224 | def readexactly(stream, n): |
|
3225 | def readexactly(stream, n): | |
3225 | '''read n bytes from stream.read and abort if less was available''' |
|
3226 | '''read n bytes from stream.read and abort if less was available''' | |
3226 | s = stream.read(n) |
|
3227 | s = stream.read(n) | |
3227 | if len(s) < n: |
|
3228 | if len(s) < n: | |
3228 | raise error.Abort( |
|
3229 | raise error.Abort( | |
3229 | _(b"stream ended unexpectedly (got %d bytes, expected %d)") |
|
3230 | _(b"stream ended unexpectedly (got %d bytes, expected %d)") | |
3230 | % (len(s), n) |
|
3231 | % (len(s), n) | |
3231 | ) |
|
3232 | ) | |
3232 | return s |
|
3233 | return s | |
3233 |
|
3234 | |||
3234 |
|
3235 | |||
3235 | def uvarintencode(value): |
|
3236 | def uvarintencode(value): | |
3236 | """Encode an unsigned integer value to a varint. |
|
3237 | """Encode an unsigned integer value to a varint. | |
3237 |
|
3238 | |||
3238 | A varint is a variable length integer of 1 or more bytes. Each byte |
|
3239 | A varint is a variable length integer of 1 or more bytes. Each byte | |
3239 | except the last has the most significant bit set. The lower 7 bits of |
|
3240 | except the last has the most significant bit set. The lower 7 bits of | |
3240 | each byte store the 2's complement representation, least significant group |
|
3241 | each byte store the 2's complement representation, least significant group | |
3241 | first. |
|
3242 | first. | |
3242 |
|
3243 | |||
3243 | >>> uvarintencode(0) |
|
3244 | >>> uvarintencode(0) | |
3244 | '\\x00' |
|
3245 | '\\x00' | |
3245 | >>> uvarintencode(1) |
|
3246 | >>> uvarintencode(1) | |
3246 | '\\x01' |
|
3247 | '\\x01' | |
3247 | >>> uvarintencode(127) |
|
3248 | >>> uvarintencode(127) | |
3248 | '\\x7f' |
|
3249 | '\\x7f' | |
3249 | >>> uvarintencode(1337) |
|
3250 | >>> uvarintencode(1337) | |
3250 | '\\xb9\\n' |
|
3251 | '\\xb9\\n' | |
3251 | >>> uvarintencode(65536) |
|
3252 | >>> uvarintencode(65536) | |
3252 | '\\x80\\x80\\x04' |
|
3253 | '\\x80\\x80\\x04' | |
3253 | >>> uvarintencode(-1) |
|
3254 | >>> uvarintencode(-1) | |
3254 | Traceback (most recent call last): |
|
3255 | Traceback (most recent call last): | |
3255 | ... |
|
3256 | ... | |
3256 | ProgrammingError: negative value for uvarint: -1 |
|
3257 | ProgrammingError: negative value for uvarint: -1 | |
3257 | """ |
|
3258 | """ | |
3258 | if value < 0: |
|
3259 | if value < 0: | |
3259 | raise error.ProgrammingError(b'negative value for uvarint: %d' % value) |
|
3260 | raise error.ProgrammingError(b'negative value for uvarint: %d' % value) | |
3260 | bits = value & 0x7F |
|
3261 | bits = value & 0x7F | |
3261 | value >>= 7 |
|
3262 | value >>= 7 | |
3262 | bytes = [] |
|
3263 | bytes = [] | |
3263 | while value: |
|
3264 | while value: | |
3264 | bytes.append(pycompat.bytechr(0x80 | bits)) |
|
3265 | bytes.append(pycompat.bytechr(0x80 | bits)) | |
3265 | bits = value & 0x7F |
|
3266 | bits = value & 0x7F | |
3266 | value >>= 7 |
|
3267 | value >>= 7 | |
3267 | bytes.append(pycompat.bytechr(bits)) |
|
3268 | bytes.append(pycompat.bytechr(bits)) | |
3268 |
|
3269 | |||
3269 | return b''.join(bytes) |
|
3270 | return b''.join(bytes) | |
3270 |
|
3271 | |||
3271 |
|
3272 | |||
3272 | def uvarintdecodestream(fh): |
|
3273 | def uvarintdecodestream(fh): | |
3273 | """Decode an unsigned variable length integer from a stream. |
|
3274 | """Decode an unsigned variable length integer from a stream. | |
3274 |
|
3275 | |||
3275 | The passed argument is anything that has a ``.read(N)`` method. |
|
3276 | The passed argument is anything that has a ``.read(N)`` method. | |
3276 |
|
3277 | |||
3277 | >>> try: |
|
3278 | >>> try: | |
3278 | ... from StringIO import StringIO as BytesIO |
|
3279 | ... from StringIO import StringIO as BytesIO | |
3279 | ... except ImportError: |
|
3280 | ... except ImportError: | |
3280 | ... from io import BytesIO |
|
3281 | ... from io import BytesIO | |
3281 | >>> uvarintdecodestream(BytesIO(b'\\x00')) |
|
3282 | >>> uvarintdecodestream(BytesIO(b'\\x00')) | |
3282 | 0 |
|
3283 | 0 | |
3283 | >>> uvarintdecodestream(BytesIO(b'\\x01')) |
|
3284 | >>> uvarintdecodestream(BytesIO(b'\\x01')) | |
3284 | 1 |
|
3285 | 1 | |
3285 | >>> uvarintdecodestream(BytesIO(b'\\x7f')) |
|
3286 | >>> uvarintdecodestream(BytesIO(b'\\x7f')) | |
3286 | 127 |
|
3287 | 127 | |
3287 | >>> uvarintdecodestream(BytesIO(b'\\xb9\\n')) |
|
3288 | >>> uvarintdecodestream(BytesIO(b'\\xb9\\n')) | |
3288 | 1337 |
|
3289 | 1337 | |
3289 | >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04')) |
|
3290 | >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04')) | |
3290 | 65536 |
|
3291 | 65536 | |
3291 | >>> uvarintdecodestream(BytesIO(b'\\x80')) |
|
3292 | >>> uvarintdecodestream(BytesIO(b'\\x80')) | |
3292 | Traceback (most recent call last): |
|
3293 | Traceback (most recent call last): | |
3293 | ... |
|
3294 | ... | |
3294 | Abort: stream ended unexpectedly (got 0 bytes, expected 1) |
|
3295 | Abort: stream ended unexpectedly (got 0 bytes, expected 1) | |
3295 | """ |
|
3296 | """ | |
3296 | result = 0 |
|
3297 | result = 0 | |
3297 | shift = 0 |
|
3298 | shift = 0 | |
3298 | while True: |
|
3299 | while True: | |
3299 | byte = ord(readexactly(fh, 1)) |
|
3300 | byte = ord(readexactly(fh, 1)) | |
3300 | result |= (byte & 0x7F) << shift |
|
3301 | result |= (byte & 0x7F) << shift | |
3301 | if not (byte & 0x80): |
|
3302 | if not (byte & 0x80): | |
3302 | return result |
|
3303 | return result | |
3303 | shift += 7 |
|
3304 | shift += 7 | |
3304 |
|
3305 | |||
3305 |
|
3306 | |||
3306 | # Passing the '' locale means that the locale should be set according to the |
|
3307 | # Passing the '' locale means that the locale should be set according to the | |
3307 | # user settings (environment variables). |
|
3308 | # user settings (environment variables). | |
3308 | # Python sometimes avoids setting the global locale settings. When interfacing |
|
3309 | # Python sometimes avoids setting the global locale settings. When interfacing | |
3309 | # with C code (e.g. the curses module or the Subversion bindings), the global |
|
3310 | # with C code (e.g. the curses module or the Subversion bindings), the global | |
3310 | # locale settings must be initialized correctly. Python 2 does not initialize |
|
3311 | # locale settings must be initialized correctly. Python 2 does not initialize | |
3311 | # the global locale settings on interpreter startup. Python 3 sometimes |
|
3312 | # the global locale settings on interpreter startup. Python 3 sometimes | |
3312 | # initializes LC_CTYPE, but not consistently at least on Windows. Therefore we |
|
3313 | # initializes LC_CTYPE, but not consistently at least on Windows. Therefore we | |
3313 | # explicitly initialize it to get consistent behavior if it's not already |
|
3314 | # explicitly initialize it to get consistent behavior if it's not already | |
3314 | # initialized. Since CPython commit 177d921c8c03d30daa32994362023f777624b10d, |
|
3315 | # initialized. Since CPython commit 177d921c8c03d30daa32994362023f777624b10d, | |
3315 | # LC_CTYPE is always initialized. If we require Python 3.8+, we should re-check |
|
3316 | # LC_CTYPE is always initialized. If we require Python 3.8+, we should re-check | |
3316 | # if we can remove this code. |
|
3317 | # if we can remove this code. | |
3317 | @contextlib.contextmanager |
|
3318 | @contextlib.contextmanager | |
3318 | def with_lc_ctype(): |
|
3319 | def with_lc_ctype(): | |
3319 | oldloc = locale.setlocale(locale.LC_CTYPE, None) |
|
3320 | oldloc = locale.setlocale(locale.LC_CTYPE, None) | |
3320 | if oldloc == 'C': |
|
3321 | if oldloc == 'C': | |
3321 | try: |
|
3322 | try: | |
3322 | try: |
|
3323 | try: | |
3323 | locale.setlocale(locale.LC_CTYPE, '') |
|
3324 | locale.setlocale(locale.LC_CTYPE, '') | |
3324 | except locale.Error: |
|
3325 | except locale.Error: | |
3325 | # The likely case is that the locale from the environment |
|
3326 | # The likely case is that the locale from the environment | |
3326 | # variables is unknown. |
|
3327 | # variables is unknown. | |
3327 | pass |
|
3328 | pass | |
3328 | yield |
|
3329 | yield | |
3329 | finally: |
|
3330 | finally: | |
3330 | locale.setlocale(locale.LC_CTYPE, oldloc) |
|
3331 | locale.setlocale(locale.LC_CTYPE, oldloc) | |
3331 | else: |
|
3332 | else: | |
3332 | yield |
|
3333 | yield | |
3333 |
|
3334 | |||
3334 |
|
3335 | |||
3335 | def _estimatememory(): |
|
3336 | def _estimatememory(): | |
3336 | # type: () -> Optional[int] |
|
3337 | # type: () -> Optional[int] | |
3337 | """Provide an estimate for the available system memory in Bytes. |
|
3338 | """Provide an estimate for the available system memory in Bytes. | |
3338 |
|
3339 | |||
3339 | If no estimate can be provided on the platform, returns None. |
|
3340 | If no estimate can be provided on the platform, returns None. | |
3340 | """ |
|
3341 | """ | |
3341 | if pycompat.sysplatform.startswith(b'win'): |
|
3342 | if pycompat.sysplatform.startswith(b'win'): | |
3342 | # On Windows, use the GlobalMemoryStatusEx kernel function directly. |
|
3343 | # On Windows, use the GlobalMemoryStatusEx kernel function directly. | |
3343 | from ctypes import c_long as DWORD, c_ulonglong as DWORDLONG |
|
3344 | from ctypes import c_long as DWORD, c_ulonglong as DWORDLONG | |
3344 | from ctypes.wintypes import ( # pytype: disable=import-error |
|
3345 | from ctypes.wintypes import ( # pytype: disable=import-error | |
3345 | Structure, |
|
3346 | Structure, | |
3346 | byref, |
|
3347 | byref, | |
3347 | sizeof, |
|
3348 | sizeof, | |
3348 | windll, |
|
3349 | windll, | |
3349 | ) |
|
3350 | ) | |
3350 |
|
3351 | |||
3351 | class MEMORYSTATUSEX(Structure): |
|
3352 | class MEMORYSTATUSEX(Structure): | |
3352 | _fields_ = [ |
|
3353 | _fields_ = [ | |
3353 | ('dwLength', DWORD), |
|
3354 | ('dwLength', DWORD), | |
3354 | ('dwMemoryLoad', DWORD), |
|
3355 | ('dwMemoryLoad', DWORD), | |
3355 | ('ullTotalPhys', DWORDLONG), |
|
3356 | ('ullTotalPhys', DWORDLONG), | |
3356 | ('ullAvailPhys', DWORDLONG), |
|
3357 | ('ullAvailPhys', DWORDLONG), | |
3357 | ('ullTotalPageFile', DWORDLONG), |
|
3358 | ('ullTotalPageFile', DWORDLONG), | |
3358 | ('ullAvailPageFile', DWORDLONG), |
|
3359 | ('ullAvailPageFile', DWORDLONG), | |
3359 | ('ullTotalVirtual', DWORDLONG), |
|
3360 | ('ullTotalVirtual', DWORDLONG), | |
3360 | ('ullAvailVirtual', DWORDLONG), |
|
3361 | ('ullAvailVirtual', DWORDLONG), | |
3361 | ('ullExtendedVirtual', DWORDLONG), |
|
3362 | ('ullExtendedVirtual', DWORDLONG), | |
3362 | ] |
|
3363 | ] | |
3363 |
|
3364 | |||
3364 | x = MEMORYSTATUSEX() |
|
3365 | x = MEMORYSTATUSEX() | |
3365 | x.dwLength = sizeof(x) |
|
3366 | x.dwLength = sizeof(x) | |
3366 | windll.kernel32.GlobalMemoryStatusEx(byref(x)) |
|
3367 | windll.kernel32.GlobalMemoryStatusEx(byref(x)) | |
3367 | return x.ullAvailPhys |
|
3368 | return x.ullAvailPhys | |
3368 |
|
3369 | |||
3369 | # On newer Unix-like systems and Mac OSX, the sysconf interface |
|
3370 | # On newer Unix-like systems and Mac OSX, the sysconf interface | |
3370 | # can be used. _SC_PAGE_SIZE is part of POSIX; _SC_PHYS_PAGES |
|
3371 | # can be used. _SC_PAGE_SIZE is part of POSIX; _SC_PHYS_PAGES | |
3371 | # seems to be implemented on most systems. |
|
3372 | # seems to be implemented on most systems. | |
3372 | try: |
|
3373 | try: | |
3373 | pagesize = os.sysconf(os.sysconf_names['SC_PAGE_SIZE']) |
|
3374 | pagesize = os.sysconf(os.sysconf_names['SC_PAGE_SIZE']) | |
3374 | pages = os.sysconf(os.sysconf_names['SC_PHYS_PAGES']) |
|
3375 | pages = os.sysconf(os.sysconf_names['SC_PHYS_PAGES']) | |
3375 | return pagesize * pages |
|
3376 | return pagesize * pages | |
3376 | except OSError: # sysconf can fail |
|
3377 | except OSError: # sysconf can fail | |
3377 | pass |
|
3378 | pass | |
3378 | except KeyError: # unknown parameter |
|
3379 | except KeyError: # unknown parameter | |
3379 | pass |
|
3380 | pass |
@@ -1,691 +1,713 b'' | |||||
1 | # windows.py - Windows utility function implementations for Mercurial |
|
1 | # windows.py - Windows utility function implementations for Mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2005-2009 Olivia Mackall <olivia@selenic.com> and others |
|
3 | # Copyright 2005-2009 Olivia Mackall <olivia@selenic.com> and others | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import errno |
|
10 | import errno | |
11 | import getpass |
|
11 | import getpass | |
12 | import msvcrt |
|
12 | import msvcrt | |
13 | import os |
|
13 | import os | |
14 | import re |
|
14 | import re | |
15 | import stat |
|
15 | import stat | |
16 | import string |
|
16 | import string | |
17 | import sys |
|
17 | import sys | |
18 |
|
18 | |||
19 | from .i18n import _ |
|
19 | from .i18n import _ | |
20 | from .pycompat import getattr |
|
20 | from .pycompat import getattr | |
21 | from . import ( |
|
21 | from . import ( | |
22 | encoding, |
|
22 | encoding, | |
23 | error, |
|
23 | error, | |
24 | policy, |
|
24 | policy, | |
25 | pycompat, |
|
25 | pycompat, | |
26 | win32, |
|
26 | win32, | |
27 | ) |
|
27 | ) | |
28 |
|
28 | |||
29 | try: |
|
29 | try: | |
30 | import _winreg as winreg # pytype: disable=import-error |
|
30 | import _winreg as winreg # pytype: disable=import-error | |
31 |
|
31 | |||
32 | winreg.CloseKey |
|
32 | winreg.CloseKey | |
33 | except ImportError: |
|
33 | except ImportError: | |
34 | # py2 only |
|
34 | # py2 only | |
35 | import winreg # pytype: disable=import-error |
|
35 | import winreg # pytype: disable=import-error | |
36 |
|
36 | |||
37 | osutil = policy.importmod('osutil') |
|
37 | osutil = policy.importmod('osutil') | |
38 |
|
38 | |||
39 | getfsmountpoint = win32.getvolumename |
|
39 | getfsmountpoint = win32.getvolumename | |
40 | getfstype = win32.getfstype |
|
40 | getfstype = win32.getfstype | |
41 | getuser = win32.getuser |
|
41 | getuser = win32.getuser | |
42 | hidewindow = win32.hidewindow |
|
42 | hidewindow = win32.hidewindow | |
43 | makedir = win32.makedir |
|
43 | makedir = win32.makedir | |
44 | nlinks = win32.nlinks |
|
44 | nlinks = win32.nlinks | |
45 | oslink = win32.oslink |
|
45 | oslink = win32.oslink | |
46 | samedevice = win32.samedevice |
|
46 | samedevice = win32.samedevice | |
47 | samefile = win32.samefile |
|
47 | samefile = win32.samefile | |
48 | setsignalhandler = win32.setsignalhandler |
|
48 | setsignalhandler = win32.setsignalhandler | |
49 | spawndetached = win32.spawndetached |
|
49 | spawndetached = win32.spawndetached | |
50 | split = os.path.split |
|
50 | split = os.path.split | |
51 | testpid = win32.testpid |
|
51 | testpid = win32.testpid | |
52 | unlink = win32.unlink |
|
52 | unlink = win32.unlink | |
53 |
|
53 | |||
54 | umask = 0o022 |
|
54 | umask = 0o022 | |
55 |
|
55 | |||
56 |
|
56 | |||
57 | class mixedfilemodewrapper(object): |
|
57 | class mixedfilemodewrapper(object): | |
58 | """Wraps a file handle when it is opened in read/write mode. |
|
58 | """Wraps a file handle when it is opened in read/write mode. | |
59 |
|
59 | |||
60 | fopen() and fdopen() on Windows have a specific-to-Windows requirement |
|
60 | fopen() and fdopen() on Windows have a specific-to-Windows requirement | |
61 | that files opened with mode r+, w+, or a+ make a call to a file positioning |
|
61 | that files opened with mode r+, w+, or a+ make a call to a file positioning | |
62 | function when switching between reads and writes. Without this extra call, |
|
62 | function when switching between reads and writes. Without this extra call, | |
63 | Python will raise a not very intuitive "IOError: [Errno 0] Error." |
|
63 | Python will raise a not very intuitive "IOError: [Errno 0] Error." | |
64 |
|
64 | |||
65 | This class wraps posixfile instances when the file is opened in read/write |
|
65 | This class wraps posixfile instances when the file is opened in read/write | |
66 | mode and automatically adds checks or inserts appropriate file positioning |
|
66 | mode and automatically adds checks or inserts appropriate file positioning | |
67 | calls when necessary. |
|
67 | calls when necessary. | |
68 | """ |
|
68 | """ | |
69 |
|
69 | |||
70 | OPNONE = 0 |
|
70 | OPNONE = 0 | |
71 | OPREAD = 1 |
|
71 | OPREAD = 1 | |
72 | OPWRITE = 2 |
|
72 | OPWRITE = 2 | |
73 |
|
73 | |||
74 | def __init__(self, fp): |
|
74 | def __init__(self, fp): | |
75 | object.__setattr__(self, '_fp', fp) |
|
75 | object.__setattr__(self, '_fp', fp) | |
76 | object.__setattr__(self, '_lastop', 0) |
|
76 | object.__setattr__(self, '_lastop', 0) | |
77 |
|
77 | |||
78 | def __enter__(self): |
|
78 | def __enter__(self): | |
79 | self._fp.__enter__() |
|
79 | self._fp.__enter__() | |
80 | return self |
|
80 | return self | |
81 |
|
81 | |||
82 | def __exit__(self, exc_type, exc_val, exc_tb): |
|
82 | def __exit__(self, exc_type, exc_val, exc_tb): | |
83 | self._fp.__exit__(exc_type, exc_val, exc_tb) |
|
83 | self._fp.__exit__(exc_type, exc_val, exc_tb) | |
84 |
|
84 | |||
85 | def __getattr__(self, name): |
|
85 | def __getattr__(self, name): | |
86 | return getattr(self._fp, name) |
|
86 | return getattr(self._fp, name) | |
87 |
|
87 | |||
88 | def __setattr__(self, name, value): |
|
88 | def __setattr__(self, name, value): | |
89 | return self._fp.__setattr__(name, value) |
|
89 | return self._fp.__setattr__(name, value) | |
90 |
|
90 | |||
91 | def _noopseek(self): |
|
91 | def _noopseek(self): | |
92 | self._fp.seek(0, os.SEEK_CUR) |
|
92 | self._fp.seek(0, os.SEEK_CUR) | |
93 |
|
93 | |||
94 | def seek(self, *args, **kwargs): |
|
94 | def seek(self, *args, **kwargs): | |
95 | object.__setattr__(self, '_lastop', self.OPNONE) |
|
95 | object.__setattr__(self, '_lastop', self.OPNONE) | |
96 | return self._fp.seek(*args, **kwargs) |
|
96 | return self._fp.seek(*args, **kwargs) | |
97 |
|
97 | |||
98 | def write(self, d): |
|
98 | def write(self, d): | |
99 | if self._lastop == self.OPREAD: |
|
99 | if self._lastop == self.OPREAD: | |
100 | self._noopseek() |
|
100 | self._noopseek() | |
101 |
|
101 | |||
102 | object.__setattr__(self, '_lastop', self.OPWRITE) |
|
102 | object.__setattr__(self, '_lastop', self.OPWRITE) | |
103 | return self._fp.write(d) |
|
103 | return self._fp.write(d) | |
104 |
|
104 | |||
105 | def writelines(self, *args, **kwargs): |
|
105 | def writelines(self, *args, **kwargs): | |
106 | if self._lastop == self.OPREAD: |
|
106 | if self._lastop == self.OPREAD: | |
107 | self._noopeseek() |
|
107 | self._noopeseek() | |
108 |
|
108 | |||
109 | object.__setattr__(self, '_lastop', self.OPWRITE) |
|
109 | object.__setattr__(self, '_lastop', self.OPWRITE) | |
110 | return self._fp.writelines(*args, **kwargs) |
|
110 | return self._fp.writelines(*args, **kwargs) | |
111 |
|
111 | |||
112 | def read(self, *args, **kwargs): |
|
112 | def read(self, *args, **kwargs): | |
113 | if self._lastop == self.OPWRITE: |
|
113 | if self._lastop == self.OPWRITE: | |
114 | self._noopseek() |
|
114 | self._noopseek() | |
115 |
|
115 | |||
116 | object.__setattr__(self, '_lastop', self.OPREAD) |
|
116 | object.__setattr__(self, '_lastop', self.OPREAD) | |
117 | return self._fp.read(*args, **kwargs) |
|
117 | return self._fp.read(*args, **kwargs) | |
118 |
|
118 | |||
119 | def readline(self, *args, **kwargs): |
|
119 | def readline(self, *args, **kwargs): | |
120 | if self._lastop == self.OPWRITE: |
|
120 | if self._lastop == self.OPWRITE: | |
121 | self._noopseek() |
|
121 | self._noopseek() | |
122 |
|
122 | |||
123 | object.__setattr__(self, '_lastop', self.OPREAD) |
|
123 | object.__setattr__(self, '_lastop', self.OPREAD) | |
124 | return self._fp.readline(*args, **kwargs) |
|
124 | return self._fp.readline(*args, **kwargs) | |
125 |
|
125 | |||
126 | def readlines(self, *args, **kwargs): |
|
126 | def readlines(self, *args, **kwargs): | |
127 | if self._lastop == self.OPWRITE: |
|
127 | if self._lastop == self.OPWRITE: | |
128 | self._noopseek() |
|
128 | self._noopseek() | |
129 |
|
129 | |||
130 | object.__setattr__(self, '_lastop', self.OPREAD) |
|
130 | object.__setattr__(self, '_lastop', self.OPREAD) | |
131 | return self._fp.readlines(*args, **kwargs) |
|
131 | return self._fp.readlines(*args, **kwargs) | |
132 |
|
132 | |||
133 |
|
133 | |||
134 | class fdproxy(object): |
|
134 | class fdproxy(object): | |
135 | """Wraps osutil.posixfile() to override the name attribute to reflect the |
|
135 | """Wraps osutil.posixfile() to override the name attribute to reflect the | |
136 | underlying file name. |
|
136 | underlying file name. | |
137 | """ |
|
137 | """ | |
138 |
|
138 | |||
139 | def __init__(self, name, fp): |
|
139 | def __init__(self, name, fp): | |
140 | self.name = name |
|
140 | self.name = name | |
141 | self._fp = fp |
|
141 | self._fp = fp | |
142 |
|
142 | |||
143 | def __enter__(self): |
|
143 | def __enter__(self): | |
144 | self._fp.__enter__() |
|
144 | self._fp.__enter__() | |
145 | # Return this wrapper for the context manager so that the name is |
|
145 | # Return this wrapper for the context manager so that the name is | |
146 | # still available. |
|
146 | # still available. | |
147 | return self |
|
147 | return self | |
148 |
|
148 | |||
149 | def __exit__(self, exc_type, exc_value, traceback): |
|
149 | def __exit__(self, exc_type, exc_value, traceback): | |
150 | self._fp.__exit__(exc_type, exc_value, traceback) |
|
150 | self._fp.__exit__(exc_type, exc_value, traceback) | |
151 |
|
151 | |||
152 | def __iter__(self): |
|
152 | def __iter__(self): | |
153 | return iter(self._fp) |
|
153 | return iter(self._fp) | |
154 |
|
154 | |||
155 | def __getattr__(self, name): |
|
155 | def __getattr__(self, name): | |
156 | return getattr(self._fp, name) |
|
156 | return getattr(self._fp, name) | |
157 |
|
157 | |||
158 |
|
158 | |||
159 | def posixfile(name, mode=b'r', buffering=-1): |
|
159 | def posixfile(name, mode=b'r', buffering=-1): | |
160 | '''Open a file with even more POSIX-like semantics''' |
|
160 | '''Open a file with even more POSIX-like semantics''' | |
161 | try: |
|
161 | try: | |
162 | fp = osutil.posixfile(name, mode, buffering) # may raise WindowsError |
|
162 | fp = osutil.posixfile(name, mode, buffering) # may raise WindowsError | |
163 |
|
163 | |||
164 | # PyFile_FromFd() ignores the name, and seems to report fp.name as the |
|
164 | # PyFile_FromFd() ignores the name, and seems to report fp.name as the | |
165 | # underlying file descriptor. |
|
165 | # underlying file descriptor. | |
166 | if pycompat.ispy3: |
|
166 | if pycompat.ispy3: | |
167 | fp = fdproxy(name, fp) |
|
167 | fp = fdproxy(name, fp) | |
168 |
|
168 | |||
169 | # The position when opening in append mode is implementation defined, so |
|
169 | # The position when opening in append mode is implementation defined, so | |
170 | # make it consistent with other platforms, which position at EOF. |
|
170 | # make it consistent with other platforms, which position at EOF. | |
171 | if b'a' in mode: |
|
171 | if b'a' in mode: | |
172 | fp.seek(0, os.SEEK_END) |
|
172 | fp.seek(0, os.SEEK_END) | |
173 |
|
173 | |||
174 | if b'+' in mode: |
|
174 | if b'+' in mode: | |
175 | return mixedfilemodewrapper(fp) |
|
175 | return mixedfilemodewrapper(fp) | |
176 |
|
176 | |||
177 | return fp |
|
177 | return fp | |
178 | except WindowsError as err: |
|
178 | except WindowsError as err: | |
179 | # convert to a friendlier exception |
|
179 | # convert to a friendlier exception | |
180 | raise IOError( |
|
180 | raise IOError( | |
181 | err.errno, '%s: %s' % (encoding.strfromlocal(name), err.strerror) |
|
181 | err.errno, '%s: %s' % (encoding.strfromlocal(name), err.strerror) | |
182 | ) |
|
182 | ) | |
183 |
|
183 | |||
184 |
|
184 | |||
185 | # may be wrapped by win32mbcs extension |
|
185 | # may be wrapped by win32mbcs extension | |
186 | listdir = osutil.listdir |
|
186 | listdir = osutil.listdir | |
187 |
|
187 | |||
188 |
|
188 | |||
189 | # copied from .utils.procutil, remove after Python 2 support was dropped |
|
189 | # copied from .utils.procutil, remove after Python 2 support was dropped | |
190 | def _isatty(fp): |
|
190 | def _isatty(fp): | |
191 | try: |
|
191 | try: | |
192 | return fp.isatty() |
|
192 | return fp.isatty() | |
193 | except AttributeError: |
|
193 | except AttributeError: | |
194 | return False |
|
194 | return False | |
195 |
|
195 | |||
196 |
|
196 | |||
|
197 | def get_password(): | |||
|
198 | """Prompt for password with echo off, using Windows getch(). | |||
|
199 | ||||
|
200 | This shouldn't be called directly- use ``ui.getpass()`` instead, which | |||
|
201 | checks if the session is interactive first. | |||
|
202 | """ | |||
|
203 | pw = "" | |||
|
204 | while True: | |||
|
205 | c = msvcrt.getwch() | |||
|
206 | if c == '\r' or c == '\n': | |||
|
207 | break | |||
|
208 | if c == '\003': | |||
|
209 | raise KeyboardInterrupt | |||
|
210 | if c == '\b': | |||
|
211 | pw = pw[:-1] | |||
|
212 | else: | |||
|
213 | pw = pw + c | |||
|
214 | msvcrt.putwch('\r') | |||
|
215 | msvcrt.putwch('\n') | |||
|
216 | return encoding.strtolocal(pw) | |||
|
217 | ||||
|
218 | ||||
197 | class winstdout(object): |
|
219 | class winstdout(object): | |
198 | """Some files on Windows misbehave. |
|
220 | """Some files on Windows misbehave. | |
199 |
|
221 | |||
200 | When writing to a broken pipe, EINVAL instead of EPIPE may be raised. |
|
222 | When writing to a broken pipe, EINVAL instead of EPIPE may be raised. | |
201 |
|
223 | |||
202 | When writing too many bytes to a console at the same, a "Not enough space" |
|
224 | When writing too many bytes to a console at the same, a "Not enough space" | |
203 | error may happen. Python 3 already works around that. |
|
225 | error may happen. Python 3 already works around that. | |
204 | """ |
|
226 | """ | |
205 |
|
227 | |||
206 | def __init__(self, fp): |
|
228 | def __init__(self, fp): | |
207 | self.fp = fp |
|
229 | self.fp = fp | |
208 | self.throttle = not pycompat.ispy3 and _isatty(fp) |
|
230 | self.throttle = not pycompat.ispy3 and _isatty(fp) | |
209 |
|
231 | |||
210 | def __getattr__(self, key): |
|
232 | def __getattr__(self, key): | |
211 | return getattr(self.fp, key) |
|
233 | return getattr(self.fp, key) | |
212 |
|
234 | |||
213 | def close(self): |
|
235 | def close(self): | |
214 | try: |
|
236 | try: | |
215 | self.fp.close() |
|
237 | self.fp.close() | |
216 | except IOError: |
|
238 | except IOError: | |
217 | pass |
|
239 | pass | |
218 |
|
240 | |||
219 | def write(self, s): |
|
241 | def write(self, s): | |
220 | try: |
|
242 | try: | |
221 | if not self.throttle: |
|
243 | if not self.throttle: | |
222 | return self.fp.write(s) |
|
244 | return self.fp.write(s) | |
223 | # This is workaround for "Not enough space" error on |
|
245 | # This is workaround for "Not enough space" error on | |
224 | # writing large size of data to console. |
|
246 | # writing large size of data to console. | |
225 | limit = 16000 |
|
247 | limit = 16000 | |
226 | l = len(s) |
|
248 | l = len(s) | |
227 | start = 0 |
|
249 | start = 0 | |
228 | while start < l: |
|
250 | while start < l: | |
229 | end = start + limit |
|
251 | end = start + limit | |
230 | self.fp.write(s[start:end]) |
|
252 | self.fp.write(s[start:end]) | |
231 | start = end |
|
253 | start = end | |
232 | except IOError as inst: |
|
254 | except IOError as inst: | |
233 | if inst.errno != 0 and not win32.lasterrorwaspipeerror(inst): |
|
255 | if inst.errno != 0 and not win32.lasterrorwaspipeerror(inst): | |
234 | raise |
|
256 | raise | |
235 | self.close() |
|
257 | self.close() | |
236 | raise IOError(errno.EPIPE, 'Broken pipe') |
|
258 | raise IOError(errno.EPIPE, 'Broken pipe') | |
237 |
|
259 | |||
238 | def flush(self): |
|
260 | def flush(self): | |
239 | try: |
|
261 | try: | |
240 | return self.fp.flush() |
|
262 | return self.fp.flush() | |
241 | except IOError as inst: |
|
263 | except IOError as inst: | |
242 | if not win32.lasterrorwaspipeerror(inst): |
|
264 | if not win32.lasterrorwaspipeerror(inst): | |
243 | raise |
|
265 | raise | |
244 | raise IOError(errno.EPIPE, 'Broken pipe') |
|
266 | raise IOError(errno.EPIPE, 'Broken pipe') | |
245 |
|
267 | |||
246 |
|
268 | |||
247 | def openhardlinks(): |
|
269 | def openhardlinks(): | |
248 | return True |
|
270 | return True | |
249 |
|
271 | |||
250 |
|
272 | |||
251 | def parsepatchoutput(output_line): |
|
273 | def parsepatchoutput(output_line): | |
252 | """parses the output produced by patch and returns the filename""" |
|
274 | """parses the output produced by patch and returns the filename""" | |
253 | pf = output_line[14:] |
|
275 | pf = output_line[14:] | |
254 | if pf[0] == b'`': |
|
276 | if pf[0] == b'`': | |
255 | pf = pf[1:-1] # Remove the quotes |
|
277 | pf = pf[1:-1] # Remove the quotes | |
256 | return pf |
|
278 | return pf | |
257 |
|
279 | |||
258 |
|
280 | |||
259 | def sshargs(sshcmd, host, user, port): |
|
281 | def sshargs(sshcmd, host, user, port): | |
260 | '''Build argument list for ssh or Plink''' |
|
282 | '''Build argument list for ssh or Plink''' | |
261 | pflag = b'plink' in sshcmd.lower() and b'-P' or b'-p' |
|
283 | pflag = b'plink' in sshcmd.lower() and b'-P' or b'-p' | |
262 | args = user and (b"%s@%s" % (user, host)) or host |
|
284 | args = user and (b"%s@%s" % (user, host)) or host | |
263 | if args.startswith(b'-') or args.startswith(b'/'): |
|
285 | if args.startswith(b'-') or args.startswith(b'/'): | |
264 | raise error.Abort( |
|
286 | raise error.Abort( | |
265 | _(b'illegal ssh hostname or username starting with - or /: %s') |
|
287 | _(b'illegal ssh hostname or username starting with - or /: %s') | |
266 | % args |
|
288 | % args | |
267 | ) |
|
289 | ) | |
268 | args = shellquote(args) |
|
290 | args = shellquote(args) | |
269 | if port: |
|
291 | if port: | |
270 | args = b'%s %s %s' % (pflag, shellquote(port), args) |
|
292 | args = b'%s %s %s' % (pflag, shellquote(port), args) | |
271 | return args |
|
293 | return args | |
272 |
|
294 | |||
273 |
|
295 | |||
274 | def setflags(f, l, x): |
|
296 | def setflags(f, l, x): | |
275 | pass |
|
297 | pass | |
276 |
|
298 | |||
277 |
|
299 | |||
278 | def copymode(src, dst, mode=None, enforcewritable=False): |
|
300 | def copymode(src, dst, mode=None, enforcewritable=False): | |
279 | pass |
|
301 | pass | |
280 |
|
302 | |||
281 |
|
303 | |||
282 | def checkexec(path): |
|
304 | def checkexec(path): | |
283 | return False |
|
305 | return False | |
284 |
|
306 | |||
285 |
|
307 | |||
286 | def checklink(path): |
|
308 | def checklink(path): | |
287 | return False |
|
309 | return False | |
288 |
|
310 | |||
289 |
|
311 | |||
290 | def setbinary(fd): |
|
312 | def setbinary(fd): | |
291 | # When run without console, pipes may expose invalid |
|
313 | # When run without console, pipes may expose invalid | |
292 | # fileno(), usually set to -1. |
|
314 | # fileno(), usually set to -1. | |
293 | fno = getattr(fd, 'fileno', None) |
|
315 | fno = getattr(fd, 'fileno', None) | |
294 | if fno is not None and fno() >= 0: |
|
316 | if fno is not None and fno() >= 0: | |
295 | msvcrt.setmode(fno(), os.O_BINARY) # pytype: disable=module-attr |
|
317 | msvcrt.setmode(fno(), os.O_BINARY) # pytype: disable=module-attr | |
296 |
|
318 | |||
297 |
|
319 | |||
298 | def pconvert(path): |
|
320 | def pconvert(path): | |
299 | return path.replace(pycompat.ossep, b'/') |
|
321 | return path.replace(pycompat.ossep, b'/') | |
300 |
|
322 | |||
301 |
|
323 | |||
302 | def localpath(path): |
|
324 | def localpath(path): | |
303 | return path.replace(b'/', b'\\') |
|
325 | return path.replace(b'/', b'\\') | |
304 |
|
326 | |||
305 |
|
327 | |||
306 | def normpath(path): |
|
328 | def normpath(path): | |
307 | return pconvert(os.path.normpath(path)) |
|
329 | return pconvert(os.path.normpath(path)) | |
308 |
|
330 | |||
309 |
|
331 | |||
310 | def normcase(path): |
|
332 | def normcase(path): | |
311 | return encoding.upper(path) # NTFS compares via upper() |
|
333 | return encoding.upper(path) # NTFS compares via upper() | |
312 |
|
334 | |||
313 |
|
335 | |||
314 | # see posix.py for definitions |
|
336 | # see posix.py for definitions | |
315 | normcasespec = encoding.normcasespecs.upper |
|
337 | normcasespec = encoding.normcasespecs.upper | |
316 | normcasefallback = encoding.upperfallback |
|
338 | normcasefallback = encoding.upperfallback | |
317 |
|
339 | |||
318 |
|
340 | |||
319 | def samestat(s1, s2): |
|
341 | def samestat(s1, s2): | |
320 | return False |
|
342 | return False | |
321 |
|
343 | |||
322 |
|
344 | |||
323 | def shelltocmdexe(path, env): |
|
345 | def shelltocmdexe(path, env): | |
324 | r"""Convert shell variables in the form $var and ${var} inside ``path`` |
|
346 | r"""Convert shell variables in the form $var and ${var} inside ``path`` | |
325 | to %var% form. Existing Windows style variables are left unchanged. |
|
347 | to %var% form. Existing Windows style variables are left unchanged. | |
326 |
|
348 | |||
327 | The variables are limited to the given environment. Unknown variables are |
|
349 | The variables are limited to the given environment. Unknown variables are | |
328 | left unchanged. |
|
350 | left unchanged. | |
329 |
|
351 | |||
330 | >>> e = {b'var1': b'v1', b'var2': b'v2', b'var3': b'v3'} |
|
352 | >>> e = {b'var1': b'v1', b'var2': b'v2', b'var3': b'v3'} | |
331 | >>> # Only valid values are expanded |
|
353 | >>> # Only valid values are expanded | |
332 | >>> shelltocmdexe(b'cmd $var1 ${var2} %var3% $missing ${missing} %missing%', |
|
354 | >>> shelltocmdexe(b'cmd $var1 ${var2} %var3% $missing ${missing} %missing%', | |
333 | ... e) |
|
355 | ... e) | |
334 | 'cmd %var1% %var2% %var3% $missing ${missing} %missing%' |
|
356 | 'cmd %var1% %var2% %var3% $missing ${missing} %missing%' | |
335 | >>> # Single quote prevents expansion, as does \$ escaping |
|
357 | >>> # Single quote prevents expansion, as does \$ escaping | |
336 | >>> shelltocmdexe(b"cmd '$var1 ${var2} %var3%' \$var1 \${var2} \\", e) |
|
358 | >>> shelltocmdexe(b"cmd '$var1 ${var2} %var3%' \$var1 \${var2} \\", e) | |
337 | 'cmd "$var1 ${var2} %var3%" $var1 ${var2} \\' |
|
359 | 'cmd "$var1 ${var2} %var3%" $var1 ${var2} \\' | |
338 | >>> # $$ is not special. %% is not special either, but can be the end and |
|
360 | >>> # $$ is not special. %% is not special either, but can be the end and | |
339 | >>> # start of consecutive variables |
|
361 | >>> # start of consecutive variables | |
340 | >>> shelltocmdexe(b"cmd $$ %% %var1%%var2%", e) |
|
362 | >>> shelltocmdexe(b"cmd $$ %% %var1%%var2%", e) | |
341 | 'cmd $$ %% %var1%%var2%' |
|
363 | 'cmd $$ %% %var1%%var2%' | |
342 | >>> # No double substitution |
|
364 | >>> # No double substitution | |
343 | >>> shelltocmdexe(b"$var1 %var1%", {b'var1': b'%var2%', b'var2': b'boom'}) |
|
365 | >>> shelltocmdexe(b"$var1 %var1%", {b'var1': b'%var2%', b'var2': b'boom'}) | |
344 | '%var1% %var1%' |
|
366 | '%var1% %var1%' | |
345 | >>> # Tilde expansion |
|
367 | >>> # Tilde expansion | |
346 | >>> shelltocmdexe(b"~/dir ~\dir2 ~tmpfile \~/", {}) |
|
368 | >>> shelltocmdexe(b"~/dir ~\dir2 ~tmpfile \~/", {}) | |
347 | '%USERPROFILE%/dir %USERPROFILE%\\dir2 ~tmpfile ~/' |
|
369 | '%USERPROFILE%/dir %USERPROFILE%\\dir2 ~tmpfile ~/' | |
348 | """ |
|
370 | """ | |
349 | if not any(c in path for c in b"$'~"): |
|
371 | if not any(c in path for c in b"$'~"): | |
350 | return path |
|
372 | return path | |
351 |
|
373 | |||
352 | varchars = pycompat.sysbytes(string.ascii_letters + string.digits) + b'_-' |
|
374 | varchars = pycompat.sysbytes(string.ascii_letters + string.digits) + b'_-' | |
353 |
|
375 | |||
354 | res = b'' |
|
376 | res = b'' | |
355 | index = 0 |
|
377 | index = 0 | |
356 | pathlen = len(path) |
|
378 | pathlen = len(path) | |
357 | while index < pathlen: |
|
379 | while index < pathlen: | |
358 | c = path[index : index + 1] |
|
380 | c = path[index : index + 1] | |
359 | if c == b'\'': # no expansion within single quotes |
|
381 | if c == b'\'': # no expansion within single quotes | |
360 | path = path[index + 1 :] |
|
382 | path = path[index + 1 :] | |
361 | pathlen = len(path) |
|
383 | pathlen = len(path) | |
362 | try: |
|
384 | try: | |
363 | index = path.index(b'\'') |
|
385 | index = path.index(b'\'') | |
364 | res += b'"' + path[:index] + b'"' |
|
386 | res += b'"' + path[:index] + b'"' | |
365 | except ValueError: |
|
387 | except ValueError: | |
366 | res += c + path |
|
388 | res += c + path | |
367 | index = pathlen - 1 |
|
389 | index = pathlen - 1 | |
368 | elif c == b'%': # variable |
|
390 | elif c == b'%': # variable | |
369 | path = path[index + 1 :] |
|
391 | path = path[index + 1 :] | |
370 | pathlen = len(path) |
|
392 | pathlen = len(path) | |
371 | try: |
|
393 | try: | |
372 | index = path.index(b'%') |
|
394 | index = path.index(b'%') | |
373 | except ValueError: |
|
395 | except ValueError: | |
374 | res += b'%' + path |
|
396 | res += b'%' + path | |
375 | index = pathlen - 1 |
|
397 | index = pathlen - 1 | |
376 | else: |
|
398 | else: | |
377 | var = path[:index] |
|
399 | var = path[:index] | |
378 | res += b'%' + var + b'%' |
|
400 | res += b'%' + var + b'%' | |
379 | elif c == b'$': # variable |
|
401 | elif c == b'$': # variable | |
380 | if path[index + 1 : index + 2] == b'{': |
|
402 | if path[index + 1 : index + 2] == b'{': | |
381 | path = path[index + 2 :] |
|
403 | path = path[index + 2 :] | |
382 | pathlen = len(path) |
|
404 | pathlen = len(path) | |
383 | try: |
|
405 | try: | |
384 | index = path.index(b'}') |
|
406 | index = path.index(b'}') | |
385 | var = path[:index] |
|
407 | var = path[:index] | |
386 |
|
408 | |||
387 | # See below for why empty variables are handled specially |
|
409 | # See below for why empty variables are handled specially | |
388 | if env.get(var, b'') != b'': |
|
410 | if env.get(var, b'') != b'': | |
389 | res += b'%' + var + b'%' |
|
411 | res += b'%' + var + b'%' | |
390 | else: |
|
412 | else: | |
391 | res += b'${' + var + b'}' |
|
413 | res += b'${' + var + b'}' | |
392 | except ValueError: |
|
414 | except ValueError: | |
393 | res += b'${' + path |
|
415 | res += b'${' + path | |
394 | index = pathlen - 1 |
|
416 | index = pathlen - 1 | |
395 | else: |
|
417 | else: | |
396 | var = b'' |
|
418 | var = b'' | |
397 | index += 1 |
|
419 | index += 1 | |
398 | c = path[index : index + 1] |
|
420 | c = path[index : index + 1] | |
399 | while c != b'' and c in varchars: |
|
421 | while c != b'' and c in varchars: | |
400 | var += c |
|
422 | var += c | |
401 | index += 1 |
|
423 | index += 1 | |
402 | c = path[index : index + 1] |
|
424 | c = path[index : index + 1] | |
403 | # Some variables (like HG_OLDNODE) may be defined, but have an |
|
425 | # Some variables (like HG_OLDNODE) may be defined, but have an | |
404 | # empty value. Those need to be skipped because when spawning |
|
426 | # empty value. Those need to be skipped because when spawning | |
405 | # cmd.exe to run the hook, it doesn't replace %VAR% for an empty |
|
427 | # cmd.exe to run the hook, it doesn't replace %VAR% for an empty | |
406 | # VAR, and that really confuses things like revset expressions. |
|
428 | # VAR, and that really confuses things like revset expressions. | |
407 | # OTOH, if it's left in Unix format and the hook runs sh.exe, it |
|
429 | # OTOH, if it's left in Unix format and the hook runs sh.exe, it | |
408 | # will substitute to an empty string, and everything is happy. |
|
430 | # will substitute to an empty string, and everything is happy. | |
409 | if env.get(var, b'') != b'': |
|
431 | if env.get(var, b'') != b'': | |
410 | res += b'%' + var + b'%' |
|
432 | res += b'%' + var + b'%' | |
411 | else: |
|
433 | else: | |
412 | res += b'$' + var |
|
434 | res += b'$' + var | |
413 |
|
435 | |||
414 | if c != b'': |
|
436 | if c != b'': | |
415 | index -= 1 |
|
437 | index -= 1 | |
416 | elif ( |
|
438 | elif ( | |
417 | c == b'~' |
|
439 | c == b'~' | |
418 | and index + 1 < pathlen |
|
440 | and index + 1 < pathlen | |
419 | and path[index + 1 : index + 2] in (b'\\', b'/') |
|
441 | and path[index + 1 : index + 2] in (b'\\', b'/') | |
420 | ): |
|
442 | ): | |
421 | res += b"%USERPROFILE%" |
|
443 | res += b"%USERPROFILE%" | |
422 | elif ( |
|
444 | elif ( | |
423 | c == b'\\' |
|
445 | c == b'\\' | |
424 | and index + 1 < pathlen |
|
446 | and index + 1 < pathlen | |
425 | and path[index + 1 : index + 2] in (b'$', b'~') |
|
447 | and path[index + 1 : index + 2] in (b'$', b'~') | |
426 | ): |
|
448 | ): | |
427 | # Skip '\', but only if it is escaping $ or ~ |
|
449 | # Skip '\', but only if it is escaping $ or ~ | |
428 | res += path[index + 1 : index + 2] |
|
450 | res += path[index + 1 : index + 2] | |
429 | index += 1 |
|
451 | index += 1 | |
430 | else: |
|
452 | else: | |
431 | res += c |
|
453 | res += c | |
432 |
|
454 | |||
433 | index += 1 |
|
455 | index += 1 | |
434 | return res |
|
456 | return res | |
435 |
|
457 | |||
436 |
|
458 | |||
437 | # A sequence of backslashes is special iff it precedes a double quote: |
|
459 | # A sequence of backslashes is special iff it precedes a double quote: | |
438 | # - if there's an even number of backslashes, the double quote is not |
|
460 | # - if there's an even number of backslashes, the double quote is not | |
439 | # quoted (i.e. it ends the quoted region) |
|
461 | # quoted (i.e. it ends the quoted region) | |
440 | # - if there's an odd number of backslashes, the double quote is quoted |
|
462 | # - if there's an odd number of backslashes, the double quote is quoted | |
441 | # - in both cases, every pair of backslashes is unquoted into a single |
|
463 | # - in both cases, every pair of backslashes is unquoted into a single | |
442 | # backslash |
|
464 | # backslash | |
443 | # (See http://msdn2.microsoft.com/en-us/library/a1y7w461.aspx ) |
|
465 | # (See http://msdn2.microsoft.com/en-us/library/a1y7w461.aspx ) | |
444 | # So, to quote a string, we must surround it in double quotes, double |
|
466 | # So, to quote a string, we must surround it in double quotes, double | |
445 | # the number of backslashes that precede double quotes and add another |
|
467 | # the number of backslashes that precede double quotes and add another | |
446 | # backslash before every double quote (being careful with the double |
|
468 | # backslash before every double quote (being careful with the double | |
447 | # quote we've appended to the end) |
|
469 | # quote we've appended to the end) | |
448 | _quotere = None |
|
470 | _quotere = None | |
449 | _needsshellquote = None |
|
471 | _needsshellquote = None | |
450 |
|
472 | |||
451 |
|
473 | |||
452 | def shellquote(s): |
|
474 | def shellquote(s): | |
453 | r""" |
|
475 | r""" | |
454 | >>> shellquote(br'C:\Users\xyz') |
|
476 | >>> shellquote(br'C:\Users\xyz') | |
455 | '"C:\\Users\\xyz"' |
|
477 | '"C:\\Users\\xyz"' | |
456 | >>> shellquote(br'C:\Users\xyz/mixed') |
|
478 | >>> shellquote(br'C:\Users\xyz/mixed') | |
457 | '"C:\\Users\\xyz/mixed"' |
|
479 | '"C:\\Users\\xyz/mixed"' | |
458 | >>> # Would be safe not to quote too, since it is all double backslashes |
|
480 | >>> # Would be safe not to quote too, since it is all double backslashes | |
459 | >>> shellquote(br'C:\\Users\\xyz') |
|
481 | >>> shellquote(br'C:\\Users\\xyz') | |
460 | '"C:\\\\Users\\\\xyz"' |
|
482 | '"C:\\\\Users\\\\xyz"' | |
461 | >>> # But this must be quoted |
|
483 | >>> # But this must be quoted | |
462 | >>> shellquote(br'C:\\Users\\xyz/abc') |
|
484 | >>> shellquote(br'C:\\Users\\xyz/abc') | |
463 | '"C:\\\\Users\\\\xyz/abc"' |
|
485 | '"C:\\\\Users\\\\xyz/abc"' | |
464 | """ |
|
486 | """ | |
465 | global _quotere |
|
487 | global _quotere | |
466 | if _quotere is None: |
|
488 | if _quotere is None: | |
467 | _quotere = re.compile(br'(\\*)("|\\$)') |
|
489 | _quotere = re.compile(br'(\\*)("|\\$)') | |
468 | global _needsshellquote |
|
490 | global _needsshellquote | |
469 | if _needsshellquote is None: |
|
491 | if _needsshellquote is None: | |
470 | # ":" is also treated as "safe character", because it is used as a part |
|
492 | # ":" is also treated as "safe character", because it is used as a part | |
471 | # of path name on Windows. "\" is also part of a path name, but isn't |
|
493 | # of path name on Windows. "\" is also part of a path name, but isn't | |
472 | # safe because shlex.split() (kind of) treats it as an escape char and |
|
494 | # safe because shlex.split() (kind of) treats it as an escape char and | |
473 | # drops it. It will leave the next character, even if it is another |
|
495 | # drops it. It will leave the next character, even if it is another | |
474 | # "\". |
|
496 | # "\". | |
475 | _needsshellquote = re.compile(br'[^a-zA-Z0-9._:/-]').search |
|
497 | _needsshellquote = re.compile(br'[^a-zA-Z0-9._:/-]').search | |
476 | if s and not _needsshellquote(s) and not _quotere.search(s): |
|
498 | if s and not _needsshellquote(s) and not _quotere.search(s): | |
477 | # "s" shouldn't have to be quoted |
|
499 | # "s" shouldn't have to be quoted | |
478 | return s |
|
500 | return s | |
479 | return b'"%s"' % _quotere.sub(br'\1\1\\\2', s) |
|
501 | return b'"%s"' % _quotere.sub(br'\1\1\\\2', s) | |
480 |
|
502 | |||
481 |
|
503 | |||
482 | def _unquote(s): |
|
504 | def _unquote(s): | |
483 | if s.startswith(b'"') and s.endswith(b'"'): |
|
505 | if s.startswith(b'"') and s.endswith(b'"'): | |
484 | return s[1:-1] |
|
506 | return s[1:-1] | |
485 | return s |
|
507 | return s | |
486 |
|
508 | |||
487 |
|
509 | |||
488 | def shellsplit(s): |
|
510 | def shellsplit(s): | |
489 | """Parse a command string in cmd.exe way (best-effort)""" |
|
511 | """Parse a command string in cmd.exe way (best-effort)""" | |
490 | return pycompat.maplist(_unquote, pycompat.shlexsplit(s, posix=False)) |
|
512 | return pycompat.maplist(_unquote, pycompat.shlexsplit(s, posix=False)) | |
491 |
|
513 | |||
492 |
|
514 | |||
493 | # if you change this stub into a real check, please try to implement the |
|
515 | # if you change this stub into a real check, please try to implement the | |
494 | # username and groupname functions above, too. |
|
516 | # username and groupname functions above, too. | |
495 | def isowner(st): |
|
517 | def isowner(st): | |
496 | return True |
|
518 | return True | |
497 |
|
519 | |||
498 |
|
520 | |||
499 | def findexe(command): |
|
521 | def findexe(command): | |
500 | """Find executable for command searching like cmd.exe does. |
|
522 | """Find executable for command searching like cmd.exe does. | |
501 | If command is a basename then PATH is searched for command. |
|
523 | If command is a basename then PATH is searched for command. | |
502 | PATH isn't searched if command is an absolute or relative path. |
|
524 | PATH isn't searched if command is an absolute or relative path. | |
503 | An extension from PATHEXT is found and added if not present. |
|
525 | An extension from PATHEXT is found and added if not present. | |
504 | If command isn't found None is returned.""" |
|
526 | If command isn't found None is returned.""" | |
505 | pathext = encoding.environ.get(b'PATHEXT', b'.COM;.EXE;.BAT;.CMD') |
|
527 | pathext = encoding.environ.get(b'PATHEXT', b'.COM;.EXE;.BAT;.CMD') | |
506 | pathexts = [ext for ext in pathext.lower().split(pycompat.ospathsep)] |
|
528 | pathexts = [ext for ext in pathext.lower().split(pycompat.ospathsep)] | |
507 | if os.path.splitext(command)[1].lower() in pathexts: |
|
529 | if os.path.splitext(command)[1].lower() in pathexts: | |
508 | pathexts = [b''] |
|
530 | pathexts = [b''] | |
509 |
|
531 | |||
510 | def findexisting(pathcommand): |
|
532 | def findexisting(pathcommand): | |
511 | """Will append extension (if needed) and return existing file""" |
|
533 | """Will append extension (if needed) and return existing file""" | |
512 | for ext in pathexts: |
|
534 | for ext in pathexts: | |
513 | executable = pathcommand + ext |
|
535 | executable = pathcommand + ext | |
514 | if os.path.exists(executable): |
|
536 | if os.path.exists(executable): | |
515 | return executable |
|
537 | return executable | |
516 | return None |
|
538 | return None | |
517 |
|
539 | |||
518 | if pycompat.ossep in command: |
|
540 | if pycompat.ossep in command: | |
519 | return findexisting(command) |
|
541 | return findexisting(command) | |
520 |
|
542 | |||
521 | for path in encoding.environ.get(b'PATH', b'').split(pycompat.ospathsep): |
|
543 | for path in encoding.environ.get(b'PATH', b'').split(pycompat.ospathsep): | |
522 | executable = findexisting(os.path.join(path, command)) |
|
544 | executable = findexisting(os.path.join(path, command)) | |
523 | if executable is not None: |
|
545 | if executable is not None: | |
524 | return executable |
|
546 | return executable | |
525 | return findexisting(os.path.expanduser(os.path.expandvars(command))) |
|
547 | return findexisting(os.path.expanduser(os.path.expandvars(command))) | |
526 |
|
548 | |||
527 |
|
549 | |||
528 | _wantedkinds = {stat.S_IFREG, stat.S_IFLNK} |
|
550 | _wantedkinds = {stat.S_IFREG, stat.S_IFLNK} | |
529 |
|
551 | |||
530 |
|
552 | |||
531 | def statfiles(files): |
|
553 | def statfiles(files): | |
532 | """Stat each file in files. Yield each stat, or None if a file |
|
554 | """Stat each file in files. Yield each stat, or None if a file | |
533 | does not exist or has a type we don't care about. |
|
555 | does not exist or has a type we don't care about. | |
534 |
|
556 | |||
535 | Cluster and cache stat per directory to minimize number of OS stat calls.""" |
|
557 | Cluster and cache stat per directory to minimize number of OS stat calls.""" | |
536 | dircache = {} # dirname -> filename -> status | None if file does not exist |
|
558 | dircache = {} # dirname -> filename -> status | None if file does not exist | |
537 | getkind = stat.S_IFMT |
|
559 | getkind = stat.S_IFMT | |
538 | for nf in files: |
|
560 | for nf in files: | |
539 | nf = normcase(nf) |
|
561 | nf = normcase(nf) | |
540 | dir, base = os.path.split(nf) |
|
562 | dir, base = os.path.split(nf) | |
541 | if not dir: |
|
563 | if not dir: | |
542 | dir = b'.' |
|
564 | dir = b'.' | |
543 | cache = dircache.get(dir, None) |
|
565 | cache = dircache.get(dir, None) | |
544 | if cache is None: |
|
566 | if cache is None: | |
545 | try: |
|
567 | try: | |
546 | dmap = { |
|
568 | dmap = { | |
547 | normcase(n): s |
|
569 | normcase(n): s | |
548 | for n, k, s in listdir(dir, True) |
|
570 | for n, k, s in listdir(dir, True) | |
549 | if getkind(s.st_mode) in _wantedkinds |
|
571 | if getkind(s.st_mode) in _wantedkinds | |
550 | } |
|
572 | } | |
551 | except OSError as err: |
|
573 | except OSError as err: | |
552 | # Python >= 2.5 returns ENOENT and adds winerror field |
|
574 | # Python >= 2.5 returns ENOENT and adds winerror field | |
553 | # EINVAL is raised if dir is not a directory. |
|
575 | # EINVAL is raised if dir is not a directory. | |
554 | if err.errno not in (errno.ENOENT, errno.EINVAL, errno.ENOTDIR): |
|
576 | if err.errno not in (errno.ENOENT, errno.EINVAL, errno.ENOTDIR): | |
555 | raise |
|
577 | raise | |
556 | dmap = {} |
|
578 | dmap = {} | |
557 | cache = dircache.setdefault(dir, dmap) |
|
579 | cache = dircache.setdefault(dir, dmap) | |
558 | yield cache.get(base, None) |
|
580 | yield cache.get(base, None) | |
559 |
|
581 | |||
560 |
|
582 | |||
561 | def username(uid=None): |
|
583 | def username(uid=None): | |
562 | """Return the name of the user with the given uid. |
|
584 | """Return the name of the user with the given uid. | |
563 |
|
585 | |||
564 | If uid is None, return the name of the current user.""" |
|
586 | If uid is None, return the name of the current user.""" | |
565 | if not uid: |
|
587 | if not uid: | |
566 | return pycompat.fsencode(getpass.getuser()) |
|
588 | return pycompat.fsencode(getpass.getuser()) | |
567 | return None |
|
589 | return None | |
568 |
|
590 | |||
569 |
|
591 | |||
570 | def groupname(gid=None): |
|
592 | def groupname(gid=None): | |
571 | """Return the name of the group with the given gid. |
|
593 | """Return the name of the group with the given gid. | |
572 |
|
594 | |||
573 | If gid is None, return the name of the current group.""" |
|
595 | If gid is None, return the name of the current group.""" | |
574 | return None |
|
596 | return None | |
575 |
|
597 | |||
576 |
|
598 | |||
577 | def readlink(pathname): |
|
599 | def readlink(pathname): | |
578 | return pycompat.fsencode(os.readlink(pycompat.fsdecode(pathname))) |
|
600 | return pycompat.fsencode(os.readlink(pycompat.fsdecode(pathname))) | |
579 |
|
601 | |||
580 |
|
602 | |||
581 | def removedirs(name): |
|
603 | def removedirs(name): | |
582 | """special version of os.removedirs that does not remove symlinked |
|
604 | """special version of os.removedirs that does not remove symlinked | |
583 | directories or junction points if they actually contain files""" |
|
605 | directories or junction points if they actually contain files""" | |
584 | if listdir(name): |
|
606 | if listdir(name): | |
585 | return |
|
607 | return | |
586 | os.rmdir(name) |
|
608 | os.rmdir(name) | |
587 | head, tail = os.path.split(name) |
|
609 | head, tail = os.path.split(name) | |
588 | if not tail: |
|
610 | if not tail: | |
589 | head, tail = os.path.split(head) |
|
611 | head, tail = os.path.split(head) | |
590 | while head and tail: |
|
612 | while head and tail: | |
591 | try: |
|
613 | try: | |
592 | if listdir(head): |
|
614 | if listdir(head): | |
593 | return |
|
615 | return | |
594 | os.rmdir(head) |
|
616 | os.rmdir(head) | |
595 | except (ValueError, OSError): |
|
617 | except (ValueError, OSError): | |
596 | break |
|
618 | break | |
597 | head, tail = os.path.split(head) |
|
619 | head, tail = os.path.split(head) | |
598 |
|
620 | |||
599 |
|
621 | |||
600 | def rename(src, dst): |
|
622 | def rename(src, dst): | |
601 | '''atomically rename file src to dst, replacing dst if it exists''' |
|
623 | '''atomically rename file src to dst, replacing dst if it exists''' | |
602 | try: |
|
624 | try: | |
603 | os.rename(src, dst) |
|
625 | os.rename(src, dst) | |
604 | except OSError as e: |
|
626 | except OSError as e: | |
605 | if e.errno != errno.EEXIST: |
|
627 | if e.errno != errno.EEXIST: | |
606 | raise |
|
628 | raise | |
607 | unlink(dst) |
|
629 | unlink(dst) | |
608 | os.rename(src, dst) |
|
630 | os.rename(src, dst) | |
609 |
|
631 | |||
610 |
|
632 | |||
611 | def gethgcmd(): |
|
633 | def gethgcmd(): | |
612 | return [encoding.strtolocal(arg) for arg in [sys.executable] + sys.argv[:1]] |
|
634 | return [encoding.strtolocal(arg) for arg in [sys.executable] + sys.argv[:1]] | |
613 |
|
635 | |||
614 |
|
636 | |||
615 | def groupmembers(name): |
|
637 | def groupmembers(name): | |
616 | # Don't support groups on Windows for now |
|
638 | # Don't support groups on Windows for now | |
617 | raise KeyError |
|
639 | raise KeyError | |
618 |
|
640 | |||
619 |
|
641 | |||
620 | def isexec(f): |
|
642 | def isexec(f): | |
621 | return False |
|
643 | return False | |
622 |
|
644 | |||
623 |
|
645 | |||
624 | class cachestat(object): |
|
646 | class cachestat(object): | |
625 | def __init__(self, path): |
|
647 | def __init__(self, path): | |
626 | pass |
|
648 | pass | |
627 |
|
649 | |||
628 | def cacheable(self): |
|
650 | def cacheable(self): | |
629 | return False |
|
651 | return False | |
630 |
|
652 | |||
631 |
|
653 | |||
632 | def lookupreg(key, valname=None, scope=None): |
|
654 | def lookupreg(key, valname=None, scope=None): | |
633 | """Look up a key/value name in the Windows registry. |
|
655 | """Look up a key/value name in the Windows registry. | |
634 |
|
656 | |||
635 | valname: value name. If unspecified, the default value for the key |
|
657 | valname: value name. If unspecified, the default value for the key | |
636 | is used. |
|
658 | is used. | |
637 | scope: optionally specify scope for registry lookup, this can be |
|
659 | scope: optionally specify scope for registry lookup, this can be | |
638 | a sequence of scopes to look up in order. Default (CURRENT_USER, |
|
660 | a sequence of scopes to look up in order. Default (CURRENT_USER, | |
639 | LOCAL_MACHINE). |
|
661 | LOCAL_MACHINE). | |
640 | """ |
|
662 | """ | |
641 | if scope is None: |
|
663 | if scope is None: | |
642 | scope = (winreg.HKEY_CURRENT_USER, winreg.HKEY_LOCAL_MACHINE) |
|
664 | scope = (winreg.HKEY_CURRENT_USER, winreg.HKEY_LOCAL_MACHINE) | |
643 | elif not isinstance(scope, (list, tuple)): |
|
665 | elif not isinstance(scope, (list, tuple)): | |
644 | scope = (scope,) |
|
666 | scope = (scope,) | |
645 | for s in scope: |
|
667 | for s in scope: | |
646 | try: |
|
668 | try: | |
647 | with winreg.OpenKey(s, encoding.strfromlocal(key)) as hkey: |
|
669 | with winreg.OpenKey(s, encoding.strfromlocal(key)) as hkey: | |
648 | name = valname and encoding.strfromlocal(valname) or valname |
|
670 | name = valname and encoding.strfromlocal(valname) or valname | |
649 | val = winreg.QueryValueEx(hkey, name)[0] |
|
671 | val = winreg.QueryValueEx(hkey, name)[0] | |
650 | # never let a Unicode string escape into the wild |
|
672 | # never let a Unicode string escape into the wild | |
651 | return encoding.unitolocal(val) |
|
673 | return encoding.unitolocal(val) | |
652 | except EnvironmentError: |
|
674 | except EnvironmentError: | |
653 | pass |
|
675 | pass | |
654 |
|
676 | |||
655 |
|
677 | |||
656 | expandglobs = True |
|
678 | expandglobs = True | |
657 |
|
679 | |||
658 |
|
680 | |||
659 | def statislink(st): |
|
681 | def statislink(st): | |
660 | '''check whether a stat result is a symlink''' |
|
682 | '''check whether a stat result is a symlink''' | |
661 | return False |
|
683 | return False | |
662 |
|
684 | |||
663 |
|
685 | |||
664 | def statisexec(st): |
|
686 | def statisexec(st): | |
665 | '''check whether a stat result is an executable file''' |
|
687 | '''check whether a stat result is an executable file''' | |
666 | return False |
|
688 | return False | |
667 |
|
689 | |||
668 |
|
690 | |||
669 | def poll(fds): |
|
691 | def poll(fds): | |
670 | # see posix.py for description |
|
692 | # see posix.py for description | |
671 | raise NotImplementedError() |
|
693 | raise NotImplementedError() | |
672 |
|
694 | |||
673 |
|
695 | |||
674 | def readpipe(pipe): |
|
696 | def readpipe(pipe): | |
675 | """Read all available data from a pipe.""" |
|
697 | """Read all available data from a pipe.""" | |
676 | chunks = [] |
|
698 | chunks = [] | |
677 | while True: |
|
699 | while True: | |
678 | size = win32.peekpipe(pipe) |
|
700 | size = win32.peekpipe(pipe) | |
679 | if not size: |
|
701 | if not size: | |
680 | break |
|
702 | break | |
681 |
|
703 | |||
682 | s = pipe.read(size) |
|
704 | s = pipe.read(size) | |
683 | if not s: |
|
705 | if not s: | |
684 | break |
|
706 | break | |
685 | chunks.append(s) |
|
707 | chunks.append(s) | |
686 |
|
708 | |||
687 | return b''.join(chunks) |
|
709 | return b''.join(chunks) | |
688 |
|
710 | |||
689 |
|
711 | |||
690 | def bindunixsocket(sock, path): |
|
712 | def bindunixsocket(sock, path): | |
691 | raise NotImplementedError('unsupported platform') |
|
713 | raise NotImplementedError('unsupported platform') |
@@ -1,1120 +1,1123 b'' | |||||
1 | from __future__ import absolute_import, print_function |
|
1 | from __future__ import absolute_import, print_function | |
2 |
|
2 | |||
3 | import distutils.version |
|
3 | import distutils.version | |
4 | import os |
|
4 | import os | |
5 | import re |
|
5 | import re | |
6 | import socket |
|
6 | import socket | |
7 | import stat |
|
7 | import stat | |
8 | import subprocess |
|
8 | import subprocess | |
9 | import sys |
|
9 | import sys | |
10 | import tempfile |
|
10 | import tempfile | |
11 |
|
11 | |||
12 | tempprefix = 'hg-hghave-' |
|
12 | tempprefix = 'hg-hghave-' | |
13 |
|
13 | |||
14 | checks = { |
|
14 | checks = { | |
15 | "true": (lambda: True, "yak shaving"), |
|
15 | "true": (lambda: True, "yak shaving"), | |
16 | "false": (lambda: False, "nail clipper"), |
|
16 | "false": (lambda: False, "nail clipper"), | |
17 | "known-bad-output": (lambda: True, "use for currently known bad output"), |
|
17 | "known-bad-output": (lambda: True, "use for currently known bad output"), | |
18 | "missing-correct-output": (lambda: False, "use for missing good output"), |
|
18 | "missing-correct-output": (lambda: False, "use for missing good output"), | |
19 | } |
|
19 | } | |
20 |
|
20 | |||
21 | try: |
|
21 | try: | |
22 | import msvcrt |
|
22 | import msvcrt | |
23 |
|
23 | |||
24 | msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) |
|
24 | msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) | |
25 | msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY) |
|
25 | msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY) | |
26 | except ImportError: |
|
26 | except ImportError: | |
27 | pass |
|
27 | pass | |
28 |
|
28 | |||
29 | stdout = getattr(sys.stdout, 'buffer', sys.stdout) |
|
29 | stdout = getattr(sys.stdout, 'buffer', sys.stdout) | |
30 | stderr = getattr(sys.stderr, 'buffer', sys.stderr) |
|
30 | stderr = getattr(sys.stderr, 'buffer', sys.stderr) | |
31 |
|
31 | |||
32 | if sys.version_info[0] >= 3: |
|
32 | if sys.version_info[0] >= 3: | |
33 |
|
33 | |||
34 | def _sys2bytes(p): |
|
34 | def _sys2bytes(p): | |
35 | if p is None: |
|
35 | if p is None: | |
36 | return p |
|
36 | return p | |
37 | return p.encode('utf-8') |
|
37 | return p.encode('utf-8') | |
38 |
|
38 | |||
39 | def _bytes2sys(p): |
|
39 | def _bytes2sys(p): | |
40 | if p is None: |
|
40 | if p is None: | |
41 | return p |
|
41 | return p | |
42 | return p.decode('utf-8') |
|
42 | return p.decode('utf-8') | |
43 |
|
43 | |||
44 |
|
44 | |||
45 | else: |
|
45 | else: | |
46 |
|
46 | |||
47 | def _sys2bytes(p): |
|
47 | def _sys2bytes(p): | |
48 | return p |
|
48 | return p | |
49 |
|
49 | |||
50 | _bytes2sys = _sys2bytes |
|
50 | _bytes2sys = _sys2bytes | |
51 |
|
51 | |||
52 |
|
52 | |||
53 | def check(name, desc): |
|
53 | def check(name, desc): | |
54 | """Registers a check function for a feature.""" |
|
54 | """Registers a check function for a feature.""" | |
55 |
|
55 | |||
56 | def decorator(func): |
|
56 | def decorator(func): | |
57 | checks[name] = (func, desc) |
|
57 | checks[name] = (func, desc) | |
58 | return func |
|
58 | return func | |
59 |
|
59 | |||
60 | return decorator |
|
60 | return decorator | |
61 |
|
61 | |||
62 |
|
62 | |||
63 | def checkvers(name, desc, vers): |
|
63 | def checkvers(name, desc, vers): | |
64 | """Registers a check function for each of a series of versions. |
|
64 | """Registers a check function for each of a series of versions. | |
65 |
|
65 | |||
66 | vers can be a list or an iterator. |
|
66 | vers can be a list or an iterator. | |
67 |
|
67 | |||
68 | Produces a series of feature checks that have the form <name><vers> without |
|
68 | Produces a series of feature checks that have the form <name><vers> without | |
69 | any punctuation (even if there's punctuation in 'vers'; i.e. this produces |
|
69 | any punctuation (even if there's punctuation in 'vers'; i.e. this produces | |
70 | 'py38', not 'py3.8' or 'py-38').""" |
|
70 | 'py38', not 'py3.8' or 'py-38').""" | |
71 |
|
71 | |||
72 | def decorator(func): |
|
72 | def decorator(func): | |
73 | def funcv(v): |
|
73 | def funcv(v): | |
74 | def f(): |
|
74 | def f(): | |
75 | return func(v) |
|
75 | return func(v) | |
76 |
|
76 | |||
77 | return f |
|
77 | return f | |
78 |
|
78 | |||
79 | for v in vers: |
|
79 | for v in vers: | |
80 | v = str(v) |
|
80 | v = str(v) | |
81 | f = funcv(v) |
|
81 | f = funcv(v) | |
82 | checks['%s%s' % (name, v.replace('.', ''))] = (f, desc % v) |
|
82 | checks['%s%s' % (name, v.replace('.', ''))] = (f, desc % v) | |
83 | return func |
|
83 | return func | |
84 |
|
84 | |||
85 | return decorator |
|
85 | return decorator | |
86 |
|
86 | |||
87 |
|
87 | |||
88 | def checkfeatures(features): |
|
88 | def checkfeatures(features): | |
89 | result = { |
|
89 | result = { | |
90 | 'error': [], |
|
90 | 'error': [], | |
91 | 'missing': [], |
|
91 | 'missing': [], | |
92 | 'skipped': [], |
|
92 | 'skipped': [], | |
93 | } |
|
93 | } | |
94 |
|
94 | |||
95 | for feature in features: |
|
95 | for feature in features: | |
96 | negate = feature.startswith('no-') |
|
96 | negate = feature.startswith('no-') | |
97 | if negate: |
|
97 | if negate: | |
98 | feature = feature[3:] |
|
98 | feature = feature[3:] | |
99 |
|
99 | |||
100 | if feature not in checks: |
|
100 | if feature not in checks: | |
101 | result['missing'].append(feature) |
|
101 | result['missing'].append(feature) | |
102 | continue |
|
102 | continue | |
103 |
|
103 | |||
104 | check, desc = checks[feature] |
|
104 | check, desc = checks[feature] | |
105 | try: |
|
105 | try: | |
106 | available = check() |
|
106 | available = check() | |
107 | except Exception: |
|
107 | except Exception: | |
108 | result['error'].append('hghave check failed: %s' % feature) |
|
108 | result['error'].append('hghave check failed: %s' % feature) | |
109 | continue |
|
109 | continue | |
110 |
|
110 | |||
111 | if not negate and not available: |
|
111 | if not negate and not available: | |
112 | result['skipped'].append('missing feature: %s' % desc) |
|
112 | result['skipped'].append('missing feature: %s' % desc) | |
113 | elif negate and available: |
|
113 | elif negate and available: | |
114 | result['skipped'].append('system supports %s' % desc) |
|
114 | result['skipped'].append('system supports %s' % desc) | |
115 |
|
115 | |||
116 | return result |
|
116 | return result | |
117 |
|
117 | |||
118 |
|
118 | |||
119 | def require(features): |
|
119 | def require(features): | |
120 | """Require that features are available, exiting if not.""" |
|
120 | """Require that features are available, exiting if not.""" | |
121 | result = checkfeatures(features) |
|
121 | result = checkfeatures(features) | |
122 |
|
122 | |||
123 | for missing in result['missing']: |
|
123 | for missing in result['missing']: | |
124 | stderr.write( |
|
124 | stderr.write( | |
125 | ('skipped: unknown feature: %s\n' % missing).encode('utf-8') |
|
125 | ('skipped: unknown feature: %s\n' % missing).encode('utf-8') | |
126 | ) |
|
126 | ) | |
127 | for msg in result['skipped']: |
|
127 | for msg in result['skipped']: | |
128 | stderr.write(('skipped: %s\n' % msg).encode('utf-8')) |
|
128 | stderr.write(('skipped: %s\n' % msg).encode('utf-8')) | |
129 | for msg in result['error']: |
|
129 | for msg in result['error']: | |
130 | stderr.write(('%s\n' % msg).encode('utf-8')) |
|
130 | stderr.write(('%s\n' % msg).encode('utf-8')) | |
131 |
|
131 | |||
132 | if result['missing']: |
|
132 | if result['missing']: | |
133 | sys.exit(2) |
|
133 | sys.exit(2) | |
134 |
|
134 | |||
135 | if result['skipped'] or result['error']: |
|
135 | if result['skipped'] or result['error']: | |
136 | sys.exit(1) |
|
136 | sys.exit(1) | |
137 |
|
137 | |||
138 |
|
138 | |||
139 | def matchoutput(cmd, regexp, ignorestatus=False): |
|
139 | def matchoutput(cmd, regexp, ignorestatus=False): | |
140 | """Return the match object if cmd executes successfully and its output |
|
140 | """Return the match object if cmd executes successfully and its output | |
141 | is matched by the supplied regular expression. |
|
141 | is matched by the supplied regular expression. | |
142 | """ |
|
142 | """ | |
143 |
|
143 | |||
144 | # Tests on Windows have to fake USERPROFILE to point to the test area so |
|
144 | # Tests on Windows have to fake USERPROFILE to point to the test area so | |
145 | # that `~` is properly expanded on py3.8+. However, some tools like black |
|
145 | # that `~` is properly expanded on py3.8+. However, some tools like black | |
146 | # make calls that need the real USERPROFILE in order to run `foo --version`. |
|
146 | # make calls that need the real USERPROFILE in order to run `foo --version`. | |
147 | env = os.environ |
|
147 | env = os.environ | |
148 | if os.name == 'nt': |
|
148 | if os.name == 'nt': | |
149 | env = os.environ.copy() |
|
149 | env = os.environ.copy() | |
150 | env['USERPROFILE'] = env['REALUSERPROFILE'] |
|
150 | env['USERPROFILE'] = env['REALUSERPROFILE'] | |
151 |
|
151 | |||
152 | r = re.compile(regexp) |
|
152 | r = re.compile(regexp) | |
153 | p = subprocess.Popen( |
|
153 | p = subprocess.Popen( | |
154 | cmd, |
|
154 | cmd, | |
155 | shell=True, |
|
155 | shell=True, | |
156 | stdout=subprocess.PIPE, |
|
156 | stdout=subprocess.PIPE, | |
157 | stderr=subprocess.STDOUT, |
|
157 | stderr=subprocess.STDOUT, | |
158 | env=env, |
|
158 | env=env, | |
159 | ) |
|
159 | ) | |
160 | s = p.communicate()[0] |
|
160 | s = p.communicate()[0] | |
161 | ret = p.returncode |
|
161 | ret = p.returncode | |
162 | return (ignorestatus or not ret) and r.search(s) |
|
162 | return (ignorestatus or not ret) and r.search(s) | |
163 |
|
163 | |||
164 |
|
164 | |||
165 | @check("baz", "GNU Arch baz client") |
|
165 | @check("baz", "GNU Arch baz client") | |
166 | def has_baz(): |
|
166 | def has_baz(): | |
167 | return matchoutput('baz --version 2>&1', br'baz Bazaar version') |
|
167 | return matchoutput('baz --version 2>&1', br'baz Bazaar version') | |
168 |
|
168 | |||
169 |
|
169 | |||
170 | @check("bzr", "Canonical's Bazaar client") |
|
170 | @check("bzr", "Canonical's Bazaar client") | |
171 | def has_bzr(): |
|
171 | def has_bzr(): | |
172 | try: |
|
172 | try: | |
173 | import bzrlib |
|
173 | import bzrlib | |
174 | import bzrlib.bzrdir |
|
174 | import bzrlib.bzrdir | |
175 | import bzrlib.errors |
|
175 | import bzrlib.errors | |
176 | import bzrlib.revision |
|
176 | import bzrlib.revision | |
177 | import bzrlib.revisionspec |
|
177 | import bzrlib.revisionspec | |
178 |
|
178 | |||
179 | bzrlib.revisionspec.RevisionSpec |
|
179 | bzrlib.revisionspec.RevisionSpec | |
180 | return bzrlib.__doc__ is not None |
|
180 | return bzrlib.__doc__ is not None | |
181 | except (AttributeError, ImportError): |
|
181 | except (AttributeError, ImportError): | |
182 | return False |
|
182 | return False | |
183 |
|
183 | |||
184 |
|
184 | |||
185 | @checkvers("bzr", "Canonical's Bazaar client >= %s", (1.14,)) |
|
185 | @checkvers("bzr", "Canonical's Bazaar client >= %s", (1.14,)) | |
186 | def has_bzr_range(v): |
|
186 | def has_bzr_range(v): | |
187 | major, minor = v.split('rc')[0].split('.')[0:2] |
|
187 | major, minor = v.split('rc')[0].split('.')[0:2] | |
188 | try: |
|
188 | try: | |
189 | import bzrlib |
|
189 | import bzrlib | |
190 |
|
190 | |||
191 | return bzrlib.__doc__ is not None and bzrlib.version_info[:2] >= ( |
|
191 | return bzrlib.__doc__ is not None and bzrlib.version_info[:2] >= ( | |
192 | int(major), |
|
192 | int(major), | |
193 | int(minor), |
|
193 | int(minor), | |
194 | ) |
|
194 | ) | |
195 | except ImportError: |
|
195 | except ImportError: | |
196 | return False |
|
196 | return False | |
197 |
|
197 | |||
198 |
|
198 | |||
199 | @check("chg", "running with chg") |
|
199 | @check("chg", "running with chg") | |
200 | def has_chg(): |
|
200 | def has_chg(): | |
201 | return 'CHGHG' in os.environ |
|
201 | return 'CHGHG' in os.environ | |
202 |
|
202 | |||
203 |
|
203 | |||
204 | @check("rhg", "running with rhg as 'hg'") |
|
204 | @check("rhg", "running with rhg as 'hg'") | |
205 | def has_rhg(): |
|
205 | def has_rhg(): | |
206 | return 'RHG_INSTALLED_AS_HG' in os.environ |
|
206 | return 'RHG_INSTALLED_AS_HG' in os.environ | |
207 |
|
207 | |||
208 |
|
208 | |||
209 | @check("cvs", "cvs client/server") |
|
209 | @check("cvs", "cvs client/server") | |
210 | def has_cvs(): |
|
210 | def has_cvs(): | |
211 | re = br'Concurrent Versions System.*?server' |
|
211 | re = br'Concurrent Versions System.*?server' | |
212 | return matchoutput('cvs --version 2>&1', re) and not has_msys() |
|
212 | return matchoutput('cvs --version 2>&1', re) and not has_msys() | |
213 |
|
213 | |||
214 |
|
214 | |||
215 | @check("cvs112", "cvs client/server 1.12.* (not cvsnt)") |
|
215 | @check("cvs112", "cvs client/server 1.12.* (not cvsnt)") | |
216 | def has_cvs112(): |
|
216 | def has_cvs112(): | |
217 | re = br'Concurrent Versions System \(CVS\) 1.12.*?server' |
|
217 | re = br'Concurrent Versions System \(CVS\) 1.12.*?server' | |
218 | return matchoutput('cvs --version 2>&1', re) and not has_msys() |
|
218 | return matchoutput('cvs --version 2>&1', re) and not has_msys() | |
219 |
|
219 | |||
220 |
|
220 | |||
221 | @check("cvsnt", "cvsnt client/server") |
|
221 | @check("cvsnt", "cvsnt client/server") | |
222 | def has_cvsnt(): |
|
222 | def has_cvsnt(): | |
223 | re = br'Concurrent Versions System \(CVSNT\) (\d+).(\d+).*\(client/server\)' |
|
223 | re = br'Concurrent Versions System \(CVSNT\) (\d+).(\d+).*\(client/server\)' | |
224 | return matchoutput('cvsnt --version 2>&1', re) |
|
224 | return matchoutput('cvsnt --version 2>&1', re) | |
225 |
|
225 | |||
226 |
|
226 | |||
227 | @check("darcs", "darcs client") |
|
227 | @check("darcs", "darcs client") | |
228 | def has_darcs(): |
|
228 | def has_darcs(): | |
229 | return matchoutput('darcs --version', br'\b2\.([2-9]|\d{2})', True) |
|
229 | return matchoutput('darcs --version', br'\b2\.([2-9]|\d{2})', True) | |
230 |
|
230 | |||
231 |
|
231 | |||
232 | @check("mtn", "monotone client (>= 1.0)") |
|
232 | @check("mtn", "monotone client (>= 1.0)") | |
233 | def has_mtn(): |
|
233 | def has_mtn(): | |
234 | return matchoutput('mtn --version', br'monotone', True) and not matchoutput( |
|
234 | return matchoutput('mtn --version', br'monotone', True) and not matchoutput( | |
235 | 'mtn --version', br'monotone 0\.', True |
|
235 | 'mtn --version', br'monotone 0\.', True | |
236 | ) |
|
236 | ) | |
237 |
|
237 | |||
238 |
|
238 | |||
239 | @check("eol-in-paths", "end-of-lines in paths") |
|
239 | @check("eol-in-paths", "end-of-lines in paths") | |
240 | def has_eol_in_paths(): |
|
240 | def has_eol_in_paths(): | |
241 | try: |
|
241 | try: | |
242 | fd, path = tempfile.mkstemp(dir='.', prefix=tempprefix, suffix='\n\r') |
|
242 | fd, path = tempfile.mkstemp(dir='.', prefix=tempprefix, suffix='\n\r') | |
243 | os.close(fd) |
|
243 | os.close(fd) | |
244 | os.remove(path) |
|
244 | os.remove(path) | |
245 | return True |
|
245 | return True | |
246 | except (IOError, OSError): |
|
246 | except (IOError, OSError): | |
247 | return False |
|
247 | return False | |
248 |
|
248 | |||
249 |
|
249 | |||
250 | @check("execbit", "executable bit") |
|
250 | @check("execbit", "executable bit") | |
251 | def has_executablebit(): |
|
251 | def has_executablebit(): | |
252 | try: |
|
252 | try: | |
253 | EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH |
|
253 | EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH | |
254 | fh, fn = tempfile.mkstemp(dir='.', prefix=tempprefix) |
|
254 | fh, fn = tempfile.mkstemp(dir='.', prefix=tempprefix) | |
255 | try: |
|
255 | try: | |
256 | os.close(fh) |
|
256 | os.close(fh) | |
257 | m = os.stat(fn).st_mode & 0o777 |
|
257 | m = os.stat(fn).st_mode & 0o777 | |
258 | new_file_has_exec = m & EXECFLAGS |
|
258 | new_file_has_exec = m & EXECFLAGS | |
259 | os.chmod(fn, m ^ EXECFLAGS) |
|
259 | os.chmod(fn, m ^ EXECFLAGS) | |
260 | exec_flags_cannot_flip = (os.stat(fn).st_mode & 0o777) == m |
|
260 | exec_flags_cannot_flip = (os.stat(fn).st_mode & 0o777) == m | |
261 | finally: |
|
261 | finally: | |
262 | os.unlink(fn) |
|
262 | os.unlink(fn) | |
263 | except (IOError, OSError): |
|
263 | except (IOError, OSError): | |
264 | # we don't care, the user probably won't be able to commit anyway |
|
264 | # we don't care, the user probably won't be able to commit anyway | |
265 | return False |
|
265 | return False | |
266 | return not (new_file_has_exec or exec_flags_cannot_flip) |
|
266 | return not (new_file_has_exec or exec_flags_cannot_flip) | |
267 |
|
267 | |||
268 |
|
268 | |||
269 | @check("icasefs", "case insensitive file system") |
|
269 | @check("icasefs", "case insensitive file system") | |
270 | def has_icasefs(): |
|
270 | def has_icasefs(): | |
271 | # Stolen from mercurial.util |
|
271 | # Stolen from mercurial.util | |
272 | fd, path = tempfile.mkstemp(dir='.', prefix=tempprefix) |
|
272 | fd, path = tempfile.mkstemp(dir='.', prefix=tempprefix) | |
273 | os.close(fd) |
|
273 | os.close(fd) | |
274 | try: |
|
274 | try: | |
275 | s1 = os.stat(path) |
|
275 | s1 = os.stat(path) | |
276 | d, b = os.path.split(path) |
|
276 | d, b = os.path.split(path) | |
277 | p2 = os.path.join(d, b.upper()) |
|
277 | p2 = os.path.join(d, b.upper()) | |
278 | if path == p2: |
|
278 | if path == p2: | |
279 | p2 = os.path.join(d, b.lower()) |
|
279 | p2 = os.path.join(d, b.lower()) | |
280 | try: |
|
280 | try: | |
281 | s2 = os.stat(p2) |
|
281 | s2 = os.stat(p2) | |
282 | return s2 == s1 |
|
282 | return s2 == s1 | |
283 | except OSError: |
|
283 | except OSError: | |
284 | return False |
|
284 | return False | |
285 | finally: |
|
285 | finally: | |
286 | os.remove(path) |
|
286 | os.remove(path) | |
287 |
|
287 | |||
288 |
|
288 | |||
289 | @check("fifo", "named pipes") |
|
289 | @check("fifo", "named pipes") | |
290 | def has_fifo(): |
|
290 | def has_fifo(): | |
291 | if getattr(os, "mkfifo", None) is None: |
|
291 | if getattr(os, "mkfifo", None) is None: | |
292 | return False |
|
292 | return False | |
293 | name = tempfile.mktemp(dir='.', prefix=tempprefix) |
|
293 | name = tempfile.mktemp(dir='.', prefix=tempprefix) | |
294 | try: |
|
294 | try: | |
295 | os.mkfifo(name) |
|
295 | os.mkfifo(name) | |
296 | os.unlink(name) |
|
296 | os.unlink(name) | |
297 | return True |
|
297 | return True | |
298 | except OSError: |
|
298 | except OSError: | |
299 | return False |
|
299 | return False | |
300 |
|
300 | |||
301 |
|
301 | |||
302 | @check("killdaemons", 'killdaemons.py support') |
|
302 | @check("killdaemons", 'killdaemons.py support') | |
303 | def has_killdaemons(): |
|
303 | def has_killdaemons(): | |
304 | return True |
|
304 | return True | |
305 |
|
305 | |||
306 |
|
306 | |||
307 | @check("cacheable", "cacheable filesystem") |
|
307 | @check("cacheable", "cacheable filesystem") | |
308 | def has_cacheable_fs(): |
|
308 | def has_cacheable_fs(): | |
309 | from mercurial import util |
|
309 | from mercurial import util | |
310 |
|
310 | |||
311 | fd, path = tempfile.mkstemp(dir='.', prefix=tempprefix) |
|
311 | fd, path = tempfile.mkstemp(dir='.', prefix=tempprefix) | |
312 | os.close(fd) |
|
312 | os.close(fd) | |
313 | try: |
|
313 | try: | |
314 | return util.cachestat(path).cacheable() |
|
314 | return util.cachestat(path).cacheable() | |
315 | finally: |
|
315 | finally: | |
316 | os.remove(path) |
|
316 | os.remove(path) | |
317 |
|
317 | |||
318 |
|
318 | |||
319 | @check("lsprof", "python lsprof module") |
|
319 | @check("lsprof", "python lsprof module") | |
320 | def has_lsprof(): |
|
320 | def has_lsprof(): | |
321 | try: |
|
321 | try: | |
322 | import _lsprof |
|
322 | import _lsprof | |
323 |
|
323 | |||
324 | _lsprof.Profiler # silence unused import warning |
|
324 | _lsprof.Profiler # silence unused import warning | |
325 | return True |
|
325 | return True | |
326 | except ImportError: |
|
326 | except ImportError: | |
327 | return False |
|
327 | return False | |
328 |
|
328 | |||
329 |
|
329 | |||
330 | def _gethgversion(): |
|
330 | def _gethgversion(): | |
331 | m = matchoutput('hg --version --quiet 2>&1', br'(\d+)\.(\d+)') |
|
331 | m = matchoutput('hg --version --quiet 2>&1', br'(\d+)\.(\d+)') | |
332 | if not m: |
|
332 | if not m: | |
333 | return (0, 0) |
|
333 | return (0, 0) | |
334 | return (int(m.group(1)), int(m.group(2))) |
|
334 | return (int(m.group(1)), int(m.group(2))) | |
335 |
|
335 | |||
336 |
|
336 | |||
337 | _hgversion = None |
|
337 | _hgversion = None | |
338 |
|
338 | |||
339 |
|
339 | |||
340 | def gethgversion(): |
|
340 | def gethgversion(): | |
341 | global _hgversion |
|
341 | global _hgversion | |
342 | if _hgversion is None: |
|
342 | if _hgversion is None: | |
343 | _hgversion = _gethgversion() |
|
343 | _hgversion = _gethgversion() | |
344 | return _hgversion |
|
344 | return _hgversion | |
345 |
|
345 | |||
346 |
|
346 | |||
347 | @checkvers( |
|
347 | @checkvers( | |
348 | "hg", "Mercurial >= %s", list([(1.0 * x) / 10 for x in range(9, 99)]) |
|
348 | "hg", "Mercurial >= %s", list([(1.0 * x) / 10 for x in range(9, 99)]) | |
349 | ) |
|
349 | ) | |
350 | def has_hg_range(v): |
|
350 | def has_hg_range(v): | |
351 | major, minor = v.split('.')[0:2] |
|
351 | major, minor = v.split('.')[0:2] | |
352 | return gethgversion() >= (int(major), int(minor)) |
|
352 | return gethgversion() >= (int(major), int(minor)) | |
353 |
|
353 | |||
354 |
|
354 | |||
355 | @check("rust", "Using the Rust extensions") |
|
355 | @check("rust", "Using the Rust extensions") | |
356 | def has_rust(): |
|
356 | def has_rust(): | |
357 | """Check is the mercurial currently running is using some rust code""" |
|
357 | """Check is the mercurial currently running is using some rust code""" | |
358 | cmd = 'hg debuginstall --quiet 2>&1' |
|
358 | cmd = 'hg debuginstall --quiet 2>&1' | |
359 | match = br'checking module policy \(([^)]+)\)' |
|
359 | match = br'checking module policy \(([^)]+)\)' | |
360 | policy = matchoutput(cmd, match) |
|
360 | policy = matchoutput(cmd, match) | |
361 | if not policy: |
|
361 | if not policy: | |
362 | return False |
|
362 | return False | |
363 | return b'rust' in policy.group(1) |
|
363 | return b'rust' in policy.group(1) | |
364 |
|
364 | |||
365 |
|
365 | |||
366 | @check("hg08", "Mercurial >= 0.8") |
|
366 | @check("hg08", "Mercurial >= 0.8") | |
367 | def has_hg08(): |
|
367 | def has_hg08(): | |
368 | if checks["hg09"][0](): |
|
368 | if checks["hg09"][0](): | |
369 | return True |
|
369 | return True | |
370 | return matchoutput('hg help annotate 2>&1', '--date') |
|
370 | return matchoutput('hg help annotate 2>&1', '--date') | |
371 |
|
371 | |||
372 |
|
372 | |||
373 | @check("hg07", "Mercurial >= 0.7") |
|
373 | @check("hg07", "Mercurial >= 0.7") | |
374 | def has_hg07(): |
|
374 | def has_hg07(): | |
375 | if checks["hg08"][0](): |
|
375 | if checks["hg08"][0](): | |
376 | return True |
|
376 | return True | |
377 | return matchoutput('hg --version --quiet 2>&1', 'Mercurial Distributed SCM') |
|
377 | return matchoutput('hg --version --quiet 2>&1', 'Mercurial Distributed SCM') | |
378 |
|
378 | |||
379 |
|
379 | |||
380 | @check("hg06", "Mercurial >= 0.6") |
|
380 | @check("hg06", "Mercurial >= 0.6") | |
381 | def has_hg06(): |
|
381 | def has_hg06(): | |
382 | if checks["hg07"][0](): |
|
382 | if checks["hg07"][0](): | |
383 | return True |
|
383 | return True | |
384 | return matchoutput('hg --version --quiet 2>&1', 'Mercurial version') |
|
384 | return matchoutput('hg --version --quiet 2>&1', 'Mercurial version') | |
385 |
|
385 | |||
386 |
|
386 | |||
387 | @check("gettext", "GNU Gettext (msgfmt)") |
|
387 | @check("gettext", "GNU Gettext (msgfmt)") | |
388 | def has_gettext(): |
|
388 | def has_gettext(): | |
389 | return matchoutput('msgfmt --version', br'GNU gettext-tools') |
|
389 | return matchoutput('msgfmt --version', br'GNU gettext-tools') | |
390 |
|
390 | |||
391 |
|
391 | |||
392 | @check("git", "git command line client") |
|
392 | @check("git", "git command line client") | |
393 | def has_git(): |
|
393 | def has_git(): | |
394 | return matchoutput('git --version 2>&1', br'^git version') |
|
394 | return matchoutput('git --version 2>&1', br'^git version') | |
395 |
|
395 | |||
396 |
|
396 | |||
397 | def getgitversion(): |
|
397 | def getgitversion(): | |
398 | m = matchoutput('git --version 2>&1', br'git version (\d+)\.(\d+)') |
|
398 | m = matchoutput('git --version 2>&1', br'git version (\d+)\.(\d+)') | |
399 | if not m: |
|
399 | if not m: | |
400 | return (0, 0) |
|
400 | return (0, 0) | |
401 | return (int(m.group(1)), int(m.group(2))) |
|
401 | return (int(m.group(1)), int(m.group(2))) | |
402 |
|
402 | |||
403 |
|
403 | |||
404 | @check("pygit2", "pygit2 Python library") |
|
404 | @check("pygit2", "pygit2 Python library") | |
405 | def has_git(): |
|
405 | def has_git(): | |
406 | try: |
|
406 | try: | |
407 | import pygit2 |
|
407 | import pygit2 | |
408 |
|
408 | |||
409 | pygit2.Oid # silence unused import |
|
409 | pygit2.Oid # silence unused import | |
410 | return True |
|
410 | return True | |
411 | except ImportError: |
|
411 | except ImportError: | |
412 | return False |
|
412 | return False | |
413 |
|
413 | |||
414 |
|
414 | |||
415 | # https://github.com/git-lfs/lfs-test-server |
|
415 | # https://github.com/git-lfs/lfs-test-server | |
416 | @check("lfs-test-server", "git-lfs test server") |
|
416 | @check("lfs-test-server", "git-lfs test server") | |
417 | def has_lfsserver(): |
|
417 | def has_lfsserver(): | |
418 | exe = 'lfs-test-server' |
|
418 | exe = 'lfs-test-server' | |
419 | if has_windows(): |
|
419 | if has_windows(): | |
420 | exe = 'lfs-test-server.exe' |
|
420 | exe = 'lfs-test-server.exe' | |
421 | return any( |
|
421 | return any( | |
422 | os.access(os.path.join(path, exe), os.X_OK) |
|
422 | os.access(os.path.join(path, exe), os.X_OK) | |
423 | for path in os.environ["PATH"].split(os.pathsep) |
|
423 | for path in os.environ["PATH"].split(os.pathsep) | |
424 | ) |
|
424 | ) | |
425 |
|
425 | |||
426 |
|
426 | |||
427 | @checkvers("git", "git client (with ext::sh support) version >= %s", (1.9,)) |
|
427 | @checkvers("git", "git client (with ext::sh support) version >= %s", (1.9,)) | |
428 | def has_git_range(v): |
|
428 | def has_git_range(v): | |
429 | major, minor = v.split('.')[0:2] |
|
429 | major, minor = v.split('.')[0:2] | |
430 | return getgitversion() >= (int(major), int(minor)) |
|
430 | return getgitversion() >= (int(major), int(minor)) | |
431 |
|
431 | |||
432 |
|
432 | |||
433 | @check("docutils", "Docutils text processing library") |
|
433 | @check("docutils", "Docutils text processing library") | |
434 | def has_docutils(): |
|
434 | def has_docutils(): | |
435 | try: |
|
435 | try: | |
436 | import docutils.core |
|
436 | import docutils.core | |
437 |
|
437 | |||
438 | docutils.core.publish_cmdline # silence unused import |
|
438 | docutils.core.publish_cmdline # silence unused import | |
439 | return True |
|
439 | return True | |
440 | except ImportError: |
|
440 | except ImportError: | |
441 | return False |
|
441 | return False | |
442 |
|
442 | |||
443 |
|
443 | |||
444 | def getsvnversion(): |
|
444 | def getsvnversion(): | |
445 | m = matchoutput('svn --version --quiet 2>&1', br'^(\d+)\.(\d+)') |
|
445 | m = matchoutput('svn --version --quiet 2>&1', br'^(\d+)\.(\d+)') | |
446 | if not m: |
|
446 | if not m: | |
447 | return (0, 0) |
|
447 | return (0, 0) | |
448 | return (int(m.group(1)), int(m.group(2))) |
|
448 | return (int(m.group(1)), int(m.group(2))) | |
449 |
|
449 | |||
450 |
|
450 | |||
451 | @checkvers("svn", "subversion client and admin tools >= %s", (1.3, 1.5)) |
|
451 | @checkvers("svn", "subversion client and admin tools >= %s", (1.3, 1.5)) | |
452 | def has_svn_range(v): |
|
452 | def has_svn_range(v): | |
453 | major, minor = v.split('.')[0:2] |
|
453 | major, minor = v.split('.')[0:2] | |
454 | return getsvnversion() >= (int(major), int(minor)) |
|
454 | return getsvnversion() >= (int(major), int(minor)) | |
455 |
|
455 | |||
456 |
|
456 | |||
457 | @check("svn", "subversion client and admin tools") |
|
457 | @check("svn", "subversion client and admin tools") | |
458 | def has_svn(): |
|
458 | def has_svn(): | |
459 | return matchoutput('svn --version 2>&1', br'^svn, version') and matchoutput( |
|
459 | return matchoutput('svn --version 2>&1', br'^svn, version') and matchoutput( | |
460 | 'svnadmin --version 2>&1', br'^svnadmin, version' |
|
460 | 'svnadmin --version 2>&1', br'^svnadmin, version' | |
461 | ) |
|
461 | ) | |
462 |
|
462 | |||
463 |
|
463 | |||
464 | @check("svn-bindings", "subversion python bindings") |
|
464 | @check("svn-bindings", "subversion python bindings") | |
465 | def has_svn_bindings(): |
|
465 | def has_svn_bindings(): | |
466 | try: |
|
466 | try: | |
467 | import svn.core |
|
467 | import svn.core | |
468 |
|
468 | |||
469 | version = svn.core.SVN_VER_MAJOR, svn.core.SVN_VER_MINOR |
|
469 | version = svn.core.SVN_VER_MAJOR, svn.core.SVN_VER_MINOR | |
470 | if version < (1, 4): |
|
470 | if version < (1, 4): | |
471 | return False |
|
471 | return False | |
472 | return True |
|
472 | return True | |
473 | except ImportError: |
|
473 | except ImportError: | |
474 | return False |
|
474 | return False | |
475 |
|
475 | |||
476 |
|
476 | |||
477 | @check("p4", "Perforce server and client") |
|
477 | @check("p4", "Perforce server and client") | |
478 | def has_p4(): |
|
478 | def has_p4(): | |
479 | return matchoutput('p4 -V', br'Rev\. P4/') and matchoutput( |
|
479 | return matchoutput('p4 -V', br'Rev\. P4/') and matchoutput( | |
480 | 'p4d -V', br'Rev\. P4D/' |
|
480 | 'p4d -V', br'Rev\. P4D/' | |
481 | ) |
|
481 | ) | |
482 |
|
482 | |||
483 |
|
483 | |||
484 | @check("symlink", "symbolic links") |
|
484 | @check("symlink", "symbolic links") | |
485 | def has_symlink(): |
|
485 | def has_symlink(): | |
486 | # mercurial.windows.checklink() is a hard 'no' at the moment |
|
486 | # mercurial.windows.checklink() is a hard 'no' at the moment | |
487 | if os.name == 'nt' or getattr(os, "symlink", None) is None: |
|
487 | if os.name == 'nt' or getattr(os, "symlink", None) is None: | |
488 | return False |
|
488 | return False | |
489 | name = tempfile.mktemp(dir='.', prefix=tempprefix) |
|
489 | name = tempfile.mktemp(dir='.', prefix=tempprefix) | |
490 | try: |
|
490 | try: | |
491 | os.symlink(".", name) |
|
491 | os.symlink(".", name) | |
492 | os.unlink(name) |
|
492 | os.unlink(name) | |
493 | return True |
|
493 | return True | |
494 | except (OSError, AttributeError): |
|
494 | except (OSError, AttributeError): | |
495 | return False |
|
495 | return False | |
496 |
|
496 | |||
497 |
|
497 | |||
498 | @check("hardlink", "hardlinks") |
|
498 | @check("hardlink", "hardlinks") | |
499 | def has_hardlink(): |
|
499 | def has_hardlink(): | |
500 | from mercurial import util |
|
500 | from mercurial import util | |
501 |
|
501 | |||
502 | fh, fn = tempfile.mkstemp(dir='.', prefix=tempprefix) |
|
502 | fh, fn = tempfile.mkstemp(dir='.', prefix=tempprefix) | |
503 | os.close(fh) |
|
503 | os.close(fh) | |
504 | name = tempfile.mktemp(dir='.', prefix=tempprefix) |
|
504 | name = tempfile.mktemp(dir='.', prefix=tempprefix) | |
505 | try: |
|
505 | try: | |
506 | util.oslink(_sys2bytes(fn), _sys2bytes(name)) |
|
506 | util.oslink(_sys2bytes(fn), _sys2bytes(name)) | |
507 | os.unlink(name) |
|
507 | os.unlink(name) | |
508 | return True |
|
508 | return True | |
509 | except OSError: |
|
509 | except OSError: | |
510 | return False |
|
510 | return False | |
511 | finally: |
|
511 | finally: | |
512 | os.unlink(fn) |
|
512 | os.unlink(fn) | |
513 |
|
513 | |||
514 |
|
514 | |||
515 | @check("hardlink-whitelisted", "hardlinks on whitelisted filesystems") |
|
515 | @check("hardlink-whitelisted", "hardlinks on whitelisted filesystems") | |
516 | def has_hardlink_whitelisted(): |
|
516 | def has_hardlink_whitelisted(): | |
517 | from mercurial import util |
|
517 | from mercurial import util | |
518 |
|
518 | |||
519 | try: |
|
519 | try: | |
520 | fstype = util.getfstype(b'.') |
|
520 | fstype = util.getfstype(b'.') | |
521 | except OSError: |
|
521 | except OSError: | |
522 | return False |
|
522 | return False | |
523 | return fstype in util._hardlinkfswhitelist |
|
523 | return fstype in util._hardlinkfswhitelist | |
524 |
|
524 | |||
525 |
|
525 | |||
526 | @check("rmcwd", "can remove current working directory") |
|
526 | @check("rmcwd", "can remove current working directory") | |
527 | def has_rmcwd(): |
|
527 | def has_rmcwd(): | |
528 | ocwd = os.getcwd() |
|
528 | ocwd = os.getcwd() | |
529 | temp = tempfile.mkdtemp(dir='.', prefix=tempprefix) |
|
529 | temp = tempfile.mkdtemp(dir='.', prefix=tempprefix) | |
530 | try: |
|
530 | try: | |
531 | os.chdir(temp) |
|
531 | os.chdir(temp) | |
532 | # On Linux, 'rmdir .' isn't allowed, but the other names are okay. |
|
532 | # On Linux, 'rmdir .' isn't allowed, but the other names are okay. | |
533 | # On Solaris and Windows, the cwd can't be removed by any names. |
|
533 | # On Solaris and Windows, the cwd can't be removed by any names. | |
534 | os.rmdir(os.getcwd()) |
|
534 | os.rmdir(os.getcwd()) | |
535 | return True |
|
535 | return True | |
536 | except OSError: |
|
536 | except OSError: | |
537 | return False |
|
537 | return False | |
538 | finally: |
|
538 | finally: | |
539 | os.chdir(ocwd) |
|
539 | os.chdir(ocwd) | |
540 | # clean up temp dir on platforms where cwd can't be removed |
|
540 | # clean up temp dir on platforms where cwd can't be removed | |
541 | try: |
|
541 | try: | |
542 | os.rmdir(temp) |
|
542 | os.rmdir(temp) | |
543 | except OSError: |
|
543 | except OSError: | |
544 | pass |
|
544 | pass | |
545 |
|
545 | |||
546 |
|
546 | |||
547 | @check("tla", "GNU Arch tla client") |
|
547 | @check("tla", "GNU Arch tla client") | |
548 | def has_tla(): |
|
548 | def has_tla(): | |
549 | return matchoutput('tla --version 2>&1', br'The GNU Arch Revision') |
|
549 | return matchoutput('tla --version 2>&1', br'The GNU Arch Revision') | |
550 |
|
550 | |||
551 |
|
551 | |||
552 | @check("gpg", "gpg client") |
|
552 | @check("gpg", "gpg client") | |
553 | def has_gpg(): |
|
553 | def has_gpg(): | |
554 | return matchoutput('gpg --version 2>&1', br'GnuPG') |
|
554 | return matchoutput('gpg --version 2>&1', br'GnuPG') | |
555 |
|
555 | |||
556 |
|
556 | |||
557 | @check("gpg2", "gpg client v2") |
|
557 | @check("gpg2", "gpg client v2") | |
558 | def has_gpg2(): |
|
558 | def has_gpg2(): | |
559 | return matchoutput('gpg --version 2>&1', br'GnuPG[^0-9]+2\.') |
|
559 | return matchoutput('gpg --version 2>&1', br'GnuPG[^0-9]+2\.') | |
560 |
|
560 | |||
561 |
|
561 | |||
562 | @check("gpg21", "gpg client v2.1+") |
|
562 | @check("gpg21", "gpg client v2.1+") | |
563 | def has_gpg21(): |
|
563 | def has_gpg21(): | |
564 | return matchoutput('gpg --version 2>&1', br'GnuPG[^0-9]+2\.(?!0)') |
|
564 | return matchoutput('gpg --version 2>&1', br'GnuPG[^0-9]+2\.(?!0)') | |
565 |
|
565 | |||
566 |
|
566 | |||
567 | @check("unix-permissions", "unix-style permissions") |
|
567 | @check("unix-permissions", "unix-style permissions") | |
568 | def has_unix_permissions(): |
|
568 | def has_unix_permissions(): | |
569 | d = tempfile.mkdtemp(dir='.', prefix=tempprefix) |
|
569 | d = tempfile.mkdtemp(dir='.', prefix=tempprefix) | |
570 | try: |
|
570 | try: | |
571 | fname = os.path.join(d, 'foo') |
|
571 | fname = os.path.join(d, 'foo') | |
572 | for umask in (0o77, 0o07, 0o22): |
|
572 | for umask in (0o77, 0o07, 0o22): | |
573 | os.umask(umask) |
|
573 | os.umask(umask) | |
574 | f = open(fname, 'w') |
|
574 | f = open(fname, 'w') | |
575 | f.close() |
|
575 | f.close() | |
576 | mode = os.stat(fname).st_mode |
|
576 | mode = os.stat(fname).st_mode | |
577 | os.unlink(fname) |
|
577 | os.unlink(fname) | |
578 | if mode & 0o777 != ~umask & 0o666: |
|
578 | if mode & 0o777 != ~umask & 0o666: | |
579 | return False |
|
579 | return False | |
580 | return True |
|
580 | return True | |
581 | finally: |
|
581 | finally: | |
582 | os.rmdir(d) |
|
582 | os.rmdir(d) | |
583 |
|
583 | |||
584 |
|
584 | |||
585 | @check("unix-socket", "AF_UNIX socket family") |
|
585 | @check("unix-socket", "AF_UNIX socket family") | |
586 | def has_unix_socket(): |
|
586 | def has_unix_socket(): | |
587 | return getattr(socket, 'AF_UNIX', None) is not None |
|
587 | return getattr(socket, 'AF_UNIX', None) is not None | |
588 |
|
588 | |||
589 |
|
589 | |||
590 | @check("root", "root permissions") |
|
590 | @check("root", "root permissions") | |
591 | def has_root(): |
|
591 | def has_root(): | |
592 | return getattr(os, 'geteuid', None) and os.geteuid() == 0 |
|
592 | return getattr(os, 'geteuid', None) and os.geteuid() == 0 | |
593 |
|
593 | |||
594 |
|
594 | |||
595 | @check("pyflakes", "Pyflakes python linter") |
|
595 | @check("pyflakes", "Pyflakes python linter") | |
596 | def has_pyflakes(): |
|
596 | def has_pyflakes(): | |
597 | try: |
|
597 | try: | |
598 | import pyflakes |
|
598 | import pyflakes | |
599 |
|
599 | |||
600 | pyflakes.__version__ |
|
600 | pyflakes.__version__ | |
601 | except ImportError: |
|
601 | except ImportError: | |
602 | return False |
|
602 | return False | |
603 | else: |
|
603 | else: | |
604 | return True |
|
604 | return True | |
605 |
|
605 | |||
606 |
|
606 | |||
607 | @check("pylint", "Pylint python linter") |
|
607 | @check("pylint", "Pylint python linter") | |
608 | def has_pylint(): |
|
608 | def has_pylint(): | |
609 | return matchoutput("pylint --help", br"Usage:[ ]+pylint", True) |
|
609 | return matchoutput("pylint --help", br"Usage:[ ]+pylint", True) | |
610 |
|
610 | |||
611 |
|
611 | |||
612 | @check("clang-format", "clang-format C code formatter (>= 11)") |
|
612 | @check("clang-format", "clang-format C code formatter (>= 11)") | |
613 | def has_clang_format(): |
|
613 | def has_clang_format(): | |
614 | m = matchoutput('clang-format --version', br'clang-format version (\d+)') |
|
614 | m = matchoutput('clang-format --version', br'clang-format version (\d+)') | |
615 | # style changed somewhere between 10.x and 11.x |
|
615 | # style changed somewhere between 10.x and 11.x | |
616 | return m and int(m.group(1)) >= 11 |
|
616 | return m and int(m.group(1)) >= 11 | |
617 |
|
617 | |||
618 |
|
618 | |||
619 | @check("jshint", "JSHint static code analysis tool") |
|
619 | @check("jshint", "JSHint static code analysis tool") | |
620 | def has_jshint(): |
|
620 | def has_jshint(): | |
621 | return matchoutput("jshint --version 2>&1", br"jshint v") |
|
621 | return matchoutput("jshint --version 2>&1", br"jshint v") | |
622 |
|
622 | |||
623 |
|
623 | |||
624 | @check("pygments", "Pygments source highlighting library") |
|
624 | @check("pygments", "Pygments source highlighting library") | |
625 | def has_pygments(): |
|
625 | def has_pygments(): | |
626 | try: |
|
626 | try: | |
627 | import pygments |
|
627 | import pygments | |
628 |
|
628 | |||
629 | pygments.highlight # silence unused import warning |
|
629 | pygments.highlight # silence unused import warning | |
630 | return True |
|
630 | return True | |
631 | except ImportError: |
|
631 | except ImportError: | |
632 | return False |
|
632 | return False | |
633 |
|
633 | |||
634 |
|
634 | |||
635 | @check("pygments25", "Pygments version >= 2.5") |
|
635 | @check("pygments25", "Pygments version >= 2.5") | |
636 | def pygments25(): |
|
636 | def pygments25(): | |
637 | try: |
|
637 | try: | |
638 | import pygments |
|
638 | import pygments | |
639 |
|
639 | |||
640 | v = pygments.__version__ |
|
640 | v = pygments.__version__ | |
641 | except ImportError: |
|
641 | except ImportError: | |
642 | return False |
|
642 | return False | |
643 |
|
643 | |||
644 | parts = v.split(".") |
|
644 | parts = v.split(".") | |
645 | major = int(parts[0]) |
|
645 | major = int(parts[0]) | |
646 | minor = int(parts[1]) |
|
646 | minor = int(parts[1]) | |
647 |
|
647 | |||
648 | return (major, minor) >= (2, 5) |
|
648 | return (major, minor) >= (2, 5) | |
649 |
|
649 | |||
650 |
|
650 | |||
651 | @check("outer-repo", "outer repo") |
|
651 | @check("outer-repo", "outer repo") | |
652 | def has_outer_repo(): |
|
652 | def has_outer_repo(): | |
653 | # failing for other reasons than 'no repo' imply that there is a repo |
|
653 | # failing for other reasons than 'no repo' imply that there is a repo | |
654 | return not matchoutput('hg root 2>&1', br'abort: no repository found', True) |
|
654 | return not matchoutput('hg root 2>&1', br'abort: no repository found', True) | |
655 |
|
655 | |||
656 |
|
656 | |||
657 | @check("ssl", "ssl module available") |
|
657 | @check("ssl", "ssl module available") | |
658 | def has_ssl(): |
|
658 | def has_ssl(): | |
659 | try: |
|
659 | try: | |
660 | import ssl |
|
660 | import ssl | |
661 |
|
661 | |||
662 | ssl.CERT_NONE |
|
662 | ssl.CERT_NONE | |
663 | return True |
|
663 | return True | |
664 | except ImportError: |
|
664 | except ImportError: | |
665 | return False |
|
665 | return False | |
666 |
|
666 | |||
667 |
|
667 | |||
668 | @check("defaultcacertsloaded", "detected presence of loaded system CA certs") |
|
668 | @check("defaultcacertsloaded", "detected presence of loaded system CA certs") | |
669 | def has_defaultcacertsloaded(): |
|
669 | def has_defaultcacertsloaded(): | |
670 | import ssl |
|
670 | import ssl | |
671 | from mercurial import sslutil, ui as uimod |
|
671 | from mercurial import sslutil, ui as uimod | |
672 |
|
672 | |||
673 | ui = uimod.ui.load() |
|
673 | ui = uimod.ui.load() | |
674 | cafile = sslutil._defaultcacerts(ui) |
|
674 | cafile = sslutil._defaultcacerts(ui) | |
675 | ctx = ssl.create_default_context() |
|
675 | ctx = ssl.create_default_context() | |
676 | if cafile: |
|
676 | if cafile: | |
677 | ctx.load_verify_locations(cafile=cafile) |
|
677 | ctx.load_verify_locations(cafile=cafile) | |
678 | else: |
|
678 | else: | |
679 | ctx.load_default_certs() |
|
679 | ctx.load_default_certs() | |
680 |
|
680 | |||
681 | return len(ctx.get_ca_certs()) > 0 |
|
681 | return len(ctx.get_ca_certs()) > 0 | |
682 |
|
682 | |||
683 |
|
683 | |||
684 | @check("tls1.2", "TLS 1.2 protocol support") |
|
684 | @check("tls1.2", "TLS 1.2 protocol support") | |
685 | def has_tls1_2(): |
|
685 | def has_tls1_2(): | |
686 | from mercurial import sslutil |
|
686 | from mercurial import sslutil | |
687 |
|
687 | |||
688 | return b'tls1.2' in sslutil.supportedprotocols |
|
688 | return b'tls1.2' in sslutil.supportedprotocols | |
689 |
|
689 | |||
690 |
|
690 | |||
691 | @check("windows", "Windows") |
|
691 | @check("windows", "Windows") | |
692 | def has_windows(): |
|
692 | def has_windows(): | |
693 | return os.name == 'nt' |
|
693 | return os.name == 'nt' | |
694 |
|
694 | |||
695 |
|
695 | |||
696 | @check("system-sh", "system() uses sh") |
|
696 | @check("system-sh", "system() uses sh") | |
697 | def has_system_sh(): |
|
697 | def has_system_sh(): | |
698 | return os.name != 'nt' |
|
698 | return os.name != 'nt' | |
699 |
|
699 | |||
700 |
|
700 | |||
701 | @check("serve", "platform and python can manage 'hg serve -d'") |
|
701 | @check("serve", "platform and python can manage 'hg serve -d'") | |
702 | def has_serve(): |
|
702 | def has_serve(): | |
703 | return True |
|
703 | return True | |
704 |
|
704 | |||
705 |
|
705 | |||
706 | @check("setprocname", "whether osutil.setprocname is available or not") |
|
706 | @check("setprocname", "whether osutil.setprocname is available or not") | |
707 | def has_setprocname(): |
|
707 | def has_setprocname(): | |
708 | try: |
|
708 | try: | |
709 | from mercurial.utils import procutil |
|
709 | from mercurial.utils import procutil | |
710 |
|
710 | |||
711 | procutil.setprocname |
|
711 | procutil.setprocname | |
712 | return True |
|
712 | return True | |
713 | except AttributeError: |
|
713 | except AttributeError: | |
714 | return False |
|
714 | return False | |
715 |
|
715 | |||
716 |
|
716 | |||
717 | @check("test-repo", "running tests from repository") |
|
717 | @check("test-repo", "running tests from repository") | |
718 | def has_test_repo(): |
|
718 | def has_test_repo(): | |
719 | t = os.environ["TESTDIR"] |
|
719 | t = os.environ["TESTDIR"] | |
720 | return os.path.isdir(os.path.join(t, "..", ".hg")) |
|
720 | return os.path.isdir(os.path.join(t, "..", ".hg")) | |
721 |
|
721 | |||
722 |
|
722 | |||
723 | @check("network-io", "whether tests are allowed to access 3rd party services") |
|
723 | @check("network-io", "whether tests are allowed to access 3rd party services") | |
724 | def has_test_repo(): |
|
724 | def has_test_repo(): | |
725 | t = os.environ.get("HGTESTS_ALLOW_NETIO") |
|
725 | t = os.environ.get("HGTESTS_ALLOW_NETIO") | |
726 | return t == "1" |
|
726 | return t == "1" | |
727 |
|
727 | |||
728 |
|
728 | |||
729 | @check("curses", "terminfo compiler and curses module") |
|
729 | @check("curses", "terminfo compiler and curses module") | |
730 | def has_curses(): |
|
730 | def has_curses(): | |
731 | try: |
|
731 | try: | |
732 | import curses |
|
732 | import curses | |
733 |
|
733 | |||
734 | curses.COLOR_BLUE |
|
734 | curses.COLOR_BLUE | |
735 |
|
735 | |||
736 | # Windows doesn't have a `tic` executable, but the windows_curses |
|
736 | # Windows doesn't have a `tic` executable, but the windows_curses | |
737 | # package is sufficient to run the tests without it. |
|
737 | # package is sufficient to run the tests without it. | |
738 | if os.name == 'nt': |
|
738 | if os.name == 'nt': | |
739 | return True |
|
739 | return True | |
740 |
|
740 | |||
741 | return has_tic() |
|
741 | return has_tic() | |
742 |
|
742 | |||
743 | except (ImportError, AttributeError): |
|
743 | except (ImportError, AttributeError): | |
744 | return False |
|
744 | return False | |
745 |
|
745 | |||
746 |
|
746 | |||
747 | @check("tic", "terminfo compiler") |
|
747 | @check("tic", "terminfo compiler") | |
748 | def has_tic(): |
|
748 | def has_tic(): | |
749 | return matchoutput('test -x "`which tic`"', br'') |
|
749 | return matchoutput('test -x "`which tic`"', br'') | |
750 |
|
750 | |||
751 |
|
751 | |||
752 | @check("xz", "xz compression utility") |
|
752 | @check("xz", "xz compression utility") | |
753 | def has_xz(): |
|
753 | def has_xz(): | |
754 | # When Windows invokes a subprocess in shell mode, it uses `cmd.exe`, which |
|
754 | # When Windows invokes a subprocess in shell mode, it uses `cmd.exe`, which | |
755 | # only knows `where`, not `which`. So invoke MSYS shell explicitly. |
|
755 | # only knows `where`, not `which`. So invoke MSYS shell explicitly. | |
756 | return matchoutput("sh -c 'test -x \"`which xz`\"'", b'') |
|
756 | return matchoutput("sh -c 'test -x \"`which xz`\"'", b'') | |
757 |
|
757 | |||
758 |
|
758 | |||
759 | @check("msys", "Windows with MSYS") |
|
759 | @check("msys", "Windows with MSYS") | |
760 | def has_msys(): |
|
760 | def has_msys(): | |
761 | return os.getenv('MSYSTEM') |
|
761 | return os.getenv('MSYSTEM') | |
762 |
|
762 | |||
763 |
|
763 | |||
764 | @check("aix", "AIX") |
|
764 | @check("aix", "AIX") | |
765 | def has_aix(): |
|
765 | def has_aix(): | |
766 | return sys.platform.startswith("aix") |
|
766 | return sys.platform.startswith("aix") | |
767 |
|
767 | |||
768 |
|
768 | |||
769 | @check("osx", "OS X") |
|
769 | @check("osx", "OS X") | |
770 | def has_osx(): |
|
770 | def has_osx(): | |
771 | return sys.platform == 'darwin' |
|
771 | return sys.platform == 'darwin' | |
772 |
|
772 | |||
773 |
|
773 | |||
774 | @check("osxpackaging", "OS X packaging tools") |
|
774 | @check("osxpackaging", "OS X packaging tools") | |
775 | def has_osxpackaging(): |
|
775 | def has_osxpackaging(): | |
776 | try: |
|
776 | try: | |
777 | return ( |
|
777 | return ( | |
778 | matchoutput('pkgbuild', br'Usage: pkgbuild ', ignorestatus=1) |
|
778 | matchoutput('pkgbuild', br'Usage: pkgbuild ', ignorestatus=1) | |
779 | and matchoutput( |
|
779 | and matchoutput( | |
780 | 'productbuild', br'Usage: productbuild ', ignorestatus=1 |
|
780 | 'productbuild', br'Usage: productbuild ', ignorestatus=1 | |
781 | ) |
|
781 | ) | |
782 | and matchoutput('lsbom', br'Usage: lsbom', ignorestatus=1) |
|
782 | and matchoutput('lsbom', br'Usage: lsbom', ignorestatus=1) | |
783 | and matchoutput('xar --help', br'Usage: xar', ignorestatus=1) |
|
783 | and matchoutput('xar --help', br'Usage: xar', ignorestatus=1) | |
784 | ) |
|
784 | ) | |
785 | except ImportError: |
|
785 | except ImportError: | |
786 | return False |
|
786 | return False | |
787 |
|
787 | |||
788 |
|
788 | |||
789 | @check('linuxormacos', 'Linux or MacOS') |
|
789 | @check('linuxormacos', 'Linux or MacOS') | |
790 | def has_linuxormacos(): |
|
790 | def has_linuxormacos(): | |
791 | # This isn't a perfect test for MacOS. But it is sufficient for our needs. |
|
791 | # This isn't a perfect test for MacOS. But it is sufficient for our needs. | |
792 | return sys.platform.startswith(('linux', 'darwin')) |
|
792 | return sys.platform.startswith(('linux', 'darwin')) | |
793 |
|
793 | |||
794 |
|
794 | |||
795 | @check("docker", "docker support") |
|
795 | @check("docker", "docker support") | |
796 | def has_docker(): |
|
796 | def has_docker(): | |
797 | pat = br'A self-sufficient runtime for' |
|
797 | pat = br'A self-sufficient runtime for' | |
798 | if matchoutput('docker --help', pat): |
|
798 | if matchoutput('docker --help', pat): | |
799 | if 'linux' not in sys.platform: |
|
799 | if 'linux' not in sys.platform: | |
800 | # TODO: in theory we should be able to test docker-based |
|
800 | # TODO: in theory we should be able to test docker-based | |
801 | # package creation on non-linux using boot2docker, but in |
|
801 | # package creation on non-linux using boot2docker, but in | |
802 | # practice that requires extra coordination to make sure |
|
802 | # practice that requires extra coordination to make sure | |
803 | # $TESTTEMP is going to be visible at the same path to the |
|
803 | # $TESTTEMP is going to be visible at the same path to the | |
804 | # boot2docker VM. If we figure out how to verify that, we |
|
804 | # boot2docker VM. If we figure out how to verify that, we | |
805 | # can use the following instead of just saying False: |
|
805 | # can use the following instead of just saying False: | |
806 | # return 'DOCKER_HOST' in os.environ |
|
806 | # return 'DOCKER_HOST' in os.environ | |
807 | return False |
|
807 | return False | |
808 |
|
808 | |||
809 | return True |
|
809 | return True | |
810 | return False |
|
810 | return False | |
811 |
|
811 | |||
812 |
|
812 | |||
813 | @check("debhelper", "debian packaging tools") |
|
813 | @check("debhelper", "debian packaging tools") | |
814 | def has_debhelper(): |
|
814 | def has_debhelper(): | |
815 | # Some versions of dpkg say `dpkg', some say 'dpkg' (` vs ' on the first |
|
815 | # Some versions of dpkg say `dpkg', some say 'dpkg' (` vs ' on the first | |
816 | # quote), so just accept anything in that spot. |
|
816 | # quote), so just accept anything in that spot. | |
817 | dpkg = matchoutput( |
|
817 | dpkg = matchoutput( | |
818 | 'dpkg --version', br"Debian .dpkg' package management program" |
|
818 | 'dpkg --version', br"Debian .dpkg' package management program" | |
819 | ) |
|
819 | ) | |
820 | dh = matchoutput( |
|
820 | dh = matchoutput( | |
821 | 'dh --help', br'dh is a part of debhelper.', ignorestatus=True |
|
821 | 'dh --help', br'dh is a part of debhelper.', ignorestatus=True | |
822 | ) |
|
822 | ) | |
823 | dh_py2 = matchoutput( |
|
823 | dh_py2 = matchoutput( | |
824 | 'dh_python2 --help', br'other supported Python versions' |
|
824 | 'dh_python2 --help', br'other supported Python versions' | |
825 | ) |
|
825 | ) | |
826 | # debuild comes from the 'devscripts' package, though you might want |
|
826 | # debuild comes from the 'devscripts' package, though you might want | |
827 | # the 'build-debs' package instead, which has a dependency on devscripts. |
|
827 | # the 'build-debs' package instead, which has a dependency on devscripts. | |
828 | debuild = matchoutput( |
|
828 | debuild = matchoutput( | |
829 | 'debuild --help', br'to run debian/rules with given parameter' |
|
829 | 'debuild --help', br'to run debian/rules with given parameter' | |
830 | ) |
|
830 | ) | |
831 | return dpkg and dh and dh_py2 and debuild |
|
831 | return dpkg and dh and dh_py2 and debuild | |
832 |
|
832 | |||
833 |
|
833 | |||
834 | @check( |
|
834 | @check( | |
835 | "debdeps", "debian build dependencies (run dpkg-checkbuilddeps in contrib/)" |
|
835 | "debdeps", "debian build dependencies (run dpkg-checkbuilddeps in contrib/)" | |
836 | ) |
|
836 | ) | |
837 | def has_debdeps(): |
|
837 | def has_debdeps(): | |
838 | # just check exit status (ignoring output) |
|
838 | # just check exit status (ignoring output) | |
839 | path = '%s/../contrib/packaging/debian/control' % os.environ['TESTDIR'] |
|
839 | path = '%s/../contrib/packaging/debian/control' % os.environ['TESTDIR'] | |
840 | return matchoutput('dpkg-checkbuilddeps %s' % path, br'') |
|
840 | return matchoutput('dpkg-checkbuilddeps %s' % path, br'') | |
841 |
|
841 | |||
842 |
|
842 | |||
843 | @check("demandimport", "demandimport enabled") |
|
843 | @check("demandimport", "demandimport enabled") | |
844 | def has_demandimport(): |
|
844 | def has_demandimport(): | |
845 | # chg disables demandimport intentionally for performance wins. |
|
845 | # chg disables demandimport intentionally for performance wins. | |
846 | return (not has_chg()) and os.environ.get('HGDEMANDIMPORT') != 'disable' |
|
846 | return (not has_chg()) and os.environ.get('HGDEMANDIMPORT') != 'disable' | |
847 |
|
847 | |||
848 |
|
848 | |||
849 | # Add "py27", "py35", ... as possible feature checks. Note that there's no |
|
849 | # Add "py27", "py35", ... as possible feature checks. Note that there's no | |
850 | # punctuation here. |
|
850 | # punctuation here. | |
851 | @checkvers("py", "Python >= %s", (2.7, 3.5, 3.6, 3.7, 3.8, 3.9)) |
|
851 | @checkvers("py", "Python >= %s", (2.7, 3.5, 3.6, 3.7, 3.8, 3.9)) | |
852 | def has_python_range(v): |
|
852 | def has_python_range(v): | |
853 | major, minor = v.split('.')[0:2] |
|
853 | major, minor = v.split('.')[0:2] | |
854 | py_major, py_minor = sys.version_info.major, sys.version_info.minor |
|
854 | py_major, py_minor = sys.version_info.major, sys.version_info.minor | |
855 |
|
855 | |||
856 | return (py_major, py_minor) >= (int(major), int(minor)) |
|
856 | return (py_major, py_minor) >= (int(major), int(minor)) | |
857 |
|
857 | |||
858 |
|
858 | |||
859 | @check("py3", "running with Python 3.x") |
|
859 | @check("py3", "running with Python 3.x") | |
860 | def has_py3(): |
|
860 | def has_py3(): | |
861 | return 3 == sys.version_info[0] |
|
861 | return 3 == sys.version_info[0] | |
862 |
|
862 | |||
863 |
|
863 | |||
864 | @check("py3exe", "a Python 3.x interpreter is available") |
|
864 | @check("py3exe", "a Python 3.x interpreter is available") | |
865 | def has_python3exe(): |
|
865 | def has_python3exe(): | |
866 | return matchoutput('python3 -V', br'^Python 3.(5|6|7|8|9)') |
|
866 | py = 'python3' | |
|
867 | if os.name == 'nt': | |||
|
868 | py = 'py -3' | |||
|
869 | return matchoutput('%s -V' % py, br'^Python 3.(5|6|7|8|9)') | |||
867 |
|
870 | |||
868 |
|
871 | |||
869 | @check("pure", "running with pure Python code") |
|
872 | @check("pure", "running with pure Python code") | |
870 | def has_pure(): |
|
873 | def has_pure(): | |
871 | return any( |
|
874 | return any( | |
872 | [ |
|
875 | [ | |
873 | os.environ.get("HGMODULEPOLICY") == "py", |
|
876 | os.environ.get("HGMODULEPOLICY") == "py", | |
874 | os.environ.get("HGTEST_RUN_TESTS_PURE") == "--pure", |
|
877 | os.environ.get("HGTEST_RUN_TESTS_PURE") == "--pure", | |
875 | ] |
|
878 | ] | |
876 | ) |
|
879 | ) | |
877 |
|
880 | |||
878 |
|
881 | |||
879 | @check("slow", "allow slow tests (use --allow-slow-tests)") |
|
882 | @check("slow", "allow slow tests (use --allow-slow-tests)") | |
880 | def has_slow(): |
|
883 | def has_slow(): | |
881 | return os.environ.get('HGTEST_SLOW') == 'slow' |
|
884 | return os.environ.get('HGTEST_SLOW') == 'slow' | |
882 |
|
885 | |||
883 |
|
886 | |||
884 | @check("hypothesis", "Hypothesis automated test generation") |
|
887 | @check("hypothesis", "Hypothesis automated test generation") | |
885 | def has_hypothesis(): |
|
888 | def has_hypothesis(): | |
886 | try: |
|
889 | try: | |
887 | import hypothesis |
|
890 | import hypothesis | |
888 |
|
891 | |||
889 | hypothesis.given |
|
892 | hypothesis.given | |
890 | return True |
|
893 | return True | |
891 | except ImportError: |
|
894 | except ImportError: | |
892 | return False |
|
895 | return False | |
893 |
|
896 | |||
894 |
|
897 | |||
895 | @check("unziplinks", "unzip(1) understands and extracts symlinks") |
|
898 | @check("unziplinks", "unzip(1) understands and extracts symlinks") | |
896 | def unzip_understands_symlinks(): |
|
899 | def unzip_understands_symlinks(): | |
897 | return matchoutput('unzip --help', br'Info-ZIP') |
|
900 | return matchoutput('unzip --help', br'Info-ZIP') | |
898 |
|
901 | |||
899 |
|
902 | |||
900 | @check("zstd", "zstd Python module available") |
|
903 | @check("zstd", "zstd Python module available") | |
901 | def has_zstd(): |
|
904 | def has_zstd(): | |
902 | try: |
|
905 | try: | |
903 | import mercurial.zstd |
|
906 | import mercurial.zstd | |
904 |
|
907 | |||
905 | mercurial.zstd.__version__ |
|
908 | mercurial.zstd.__version__ | |
906 | return True |
|
909 | return True | |
907 | except ImportError: |
|
910 | except ImportError: | |
908 | return False |
|
911 | return False | |
909 |
|
912 | |||
910 |
|
913 | |||
911 | @check("devfull", "/dev/full special file") |
|
914 | @check("devfull", "/dev/full special file") | |
912 | def has_dev_full(): |
|
915 | def has_dev_full(): | |
913 | return os.path.exists('/dev/full') |
|
916 | return os.path.exists('/dev/full') | |
914 |
|
917 | |||
915 |
|
918 | |||
916 | @check("ensurepip", "ensurepip module") |
|
919 | @check("ensurepip", "ensurepip module") | |
917 | def has_ensurepip(): |
|
920 | def has_ensurepip(): | |
918 | try: |
|
921 | try: | |
919 | import ensurepip |
|
922 | import ensurepip | |
920 |
|
923 | |||
921 | ensurepip.bootstrap |
|
924 | ensurepip.bootstrap | |
922 | return True |
|
925 | return True | |
923 | except ImportError: |
|
926 | except ImportError: | |
924 | return False |
|
927 | return False | |
925 |
|
928 | |||
926 |
|
929 | |||
927 | @check("virtualenv", "virtualenv support") |
|
930 | @check("virtualenv", "virtualenv support") | |
928 | def has_virtualenv(): |
|
931 | def has_virtualenv(): | |
929 | try: |
|
932 | try: | |
930 | import virtualenv |
|
933 | import virtualenv | |
931 |
|
934 | |||
932 | # --no-site-package became the default in 1.7 (Nov 2011), and the |
|
935 | # --no-site-package became the default in 1.7 (Nov 2011), and the | |
933 | # argument was removed in 20.0 (Feb 2020). Rather than make the |
|
936 | # argument was removed in 20.0 (Feb 2020). Rather than make the | |
934 | # script complicated, just ignore ancient versions. |
|
937 | # script complicated, just ignore ancient versions. | |
935 | return int(virtualenv.__version__.split('.')[0]) > 1 |
|
938 | return int(virtualenv.__version__.split('.')[0]) > 1 | |
936 | except (AttributeError, ImportError, IndexError): |
|
939 | except (AttributeError, ImportError, IndexError): | |
937 | return False |
|
940 | return False | |
938 |
|
941 | |||
939 |
|
942 | |||
940 | @check("fsmonitor", "running tests with fsmonitor") |
|
943 | @check("fsmonitor", "running tests with fsmonitor") | |
941 | def has_fsmonitor(): |
|
944 | def has_fsmonitor(): | |
942 | return 'HGFSMONITOR_TESTS' in os.environ |
|
945 | return 'HGFSMONITOR_TESTS' in os.environ | |
943 |
|
946 | |||
944 |
|
947 | |||
945 | @check("fuzzywuzzy", "Fuzzy string matching library") |
|
948 | @check("fuzzywuzzy", "Fuzzy string matching library") | |
946 | def has_fuzzywuzzy(): |
|
949 | def has_fuzzywuzzy(): | |
947 | try: |
|
950 | try: | |
948 | import fuzzywuzzy |
|
951 | import fuzzywuzzy | |
949 |
|
952 | |||
950 | fuzzywuzzy.__version__ |
|
953 | fuzzywuzzy.__version__ | |
951 | return True |
|
954 | return True | |
952 | except ImportError: |
|
955 | except ImportError: | |
953 | return False |
|
956 | return False | |
954 |
|
957 | |||
955 |
|
958 | |||
956 | @check("clang-libfuzzer", "clang new enough to include libfuzzer") |
|
959 | @check("clang-libfuzzer", "clang new enough to include libfuzzer") | |
957 | def has_clang_libfuzzer(): |
|
960 | def has_clang_libfuzzer(): | |
958 | mat = matchoutput('clang --version', br'clang version (\d)') |
|
961 | mat = matchoutput('clang --version', br'clang version (\d)') | |
959 | if mat: |
|
962 | if mat: | |
960 | # libfuzzer is new in clang 6 |
|
963 | # libfuzzer is new in clang 6 | |
961 | return int(mat.group(1)) > 5 |
|
964 | return int(mat.group(1)) > 5 | |
962 | return False |
|
965 | return False | |
963 |
|
966 | |||
964 |
|
967 | |||
965 | @check("clang-6.0", "clang 6.0 with version suffix (libfuzzer included)") |
|
968 | @check("clang-6.0", "clang 6.0 with version suffix (libfuzzer included)") | |
966 | def has_clang60(): |
|
969 | def has_clang60(): | |
967 | return matchoutput('clang-6.0 --version', br'clang version 6\.') |
|
970 | return matchoutput('clang-6.0 --version', br'clang version 6\.') | |
968 |
|
971 | |||
969 |
|
972 | |||
970 | @check("xdiff", "xdiff algorithm") |
|
973 | @check("xdiff", "xdiff algorithm") | |
971 | def has_xdiff(): |
|
974 | def has_xdiff(): | |
972 | try: |
|
975 | try: | |
973 | from mercurial import policy |
|
976 | from mercurial import policy | |
974 |
|
977 | |||
975 | bdiff = policy.importmod('bdiff') |
|
978 | bdiff = policy.importmod('bdiff') | |
976 | return bdiff.xdiffblocks(b'', b'') == [(0, 0, 0, 0)] |
|
979 | return bdiff.xdiffblocks(b'', b'') == [(0, 0, 0, 0)] | |
977 | except (ImportError, AttributeError): |
|
980 | except (ImportError, AttributeError): | |
978 | return False |
|
981 | return False | |
979 |
|
982 | |||
980 |
|
983 | |||
981 | @check('extraextensions', 'whether tests are running with extra extensions') |
|
984 | @check('extraextensions', 'whether tests are running with extra extensions') | |
982 | def has_extraextensions(): |
|
985 | def has_extraextensions(): | |
983 | return 'HGTESTEXTRAEXTENSIONS' in os.environ |
|
986 | return 'HGTESTEXTRAEXTENSIONS' in os.environ | |
984 |
|
987 | |||
985 |
|
988 | |||
986 | def getrepofeatures(): |
|
989 | def getrepofeatures(): | |
987 | """Obtain set of repository features in use. |
|
990 | """Obtain set of repository features in use. | |
988 |
|
991 | |||
989 | HGREPOFEATURES can be used to define or remove features. It contains |
|
992 | HGREPOFEATURES can be used to define or remove features. It contains | |
990 | a space-delimited list of feature strings. Strings beginning with ``-`` |
|
993 | a space-delimited list of feature strings. Strings beginning with ``-`` | |
991 | mean to remove. |
|
994 | mean to remove. | |
992 | """ |
|
995 | """ | |
993 | # Default list provided by core. |
|
996 | # Default list provided by core. | |
994 | features = { |
|
997 | features = { | |
995 | 'bundlerepo', |
|
998 | 'bundlerepo', | |
996 | 'revlogstore', |
|
999 | 'revlogstore', | |
997 | 'fncache', |
|
1000 | 'fncache', | |
998 | } |
|
1001 | } | |
999 |
|
1002 | |||
1000 | # Features that imply other features. |
|
1003 | # Features that imply other features. | |
1001 | implies = { |
|
1004 | implies = { | |
1002 | 'simplestore': ['-revlogstore', '-bundlerepo', '-fncache'], |
|
1005 | 'simplestore': ['-revlogstore', '-bundlerepo', '-fncache'], | |
1003 | } |
|
1006 | } | |
1004 |
|
1007 | |||
1005 | for override in os.environ.get('HGREPOFEATURES', '').split(' '): |
|
1008 | for override in os.environ.get('HGREPOFEATURES', '').split(' '): | |
1006 | if not override: |
|
1009 | if not override: | |
1007 | continue |
|
1010 | continue | |
1008 |
|
1011 | |||
1009 | if override.startswith('-'): |
|
1012 | if override.startswith('-'): | |
1010 | if override[1:] in features: |
|
1013 | if override[1:] in features: | |
1011 | features.remove(override[1:]) |
|
1014 | features.remove(override[1:]) | |
1012 | else: |
|
1015 | else: | |
1013 | features.add(override) |
|
1016 | features.add(override) | |
1014 |
|
1017 | |||
1015 | for imply in implies.get(override, []): |
|
1018 | for imply in implies.get(override, []): | |
1016 | if imply.startswith('-'): |
|
1019 | if imply.startswith('-'): | |
1017 | if imply[1:] in features: |
|
1020 | if imply[1:] in features: | |
1018 | features.remove(imply[1:]) |
|
1021 | features.remove(imply[1:]) | |
1019 | else: |
|
1022 | else: | |
1020 | features.add(imply) |
|
1023 | features.add(imply) | |
1021 |
|
1024 | |||
1022 | return features |
|
1025 | return features | |
1023 |
|
1026 | |||
1024 |
|
1027 | |||
1025 | @check('reporevlogstore', 'repository using the default revlog store') |
|
1028 | @check('reporevlogstore', 'repository using the default revlog store') | |
1026 | def has_reporevlogstore(): |
|
1029 | def has_reporevlogstore(): | |
1027 | return 'revlogstore' in getrepofeatures() |
|
1030 | return 'revlogstore' in getrepofeatures() | |
1028 |
|
1031 | |||
1029 |
|
1032 | |||
1030 | @check('reposimplestore', 'repository using simple storage extension') |
|
1033 | @check('reposimplestore', 'repository using simple storage extension') | |
1031 | def has_reposimplestore(): |
|
1034 | def has_reposimplestore(): | |
1032 | return 'simplestore' in getrepofeatures() |
|
1035 | return 'simplestore' in getrepofeatures() | |
1033 |
|
1036 | |||
1034 |
|
1037 | |||
1035 | @check('repobundlerepo', 'whether we can open bundle files as repos') |
|
1038 | @check('repobundlerepo', 'whether we can open bundle files as repos') | |
1036 | def has_repobundlerepo(): |
|
1039 | def has_repobundlerepo(): | |
1037 | return 'bundlerepo' in getrepofeatures() |
|
1040 | return 'bundlerepo' in getrepofeatures() | |
1038 |
|
1041 | |||
1039 |
|
1042 | |||
1040 | @check('repofncache', 'repository has an fncache') |
|
1043 | @check('repofncache', 'repository has an fncache') | |
1041 | def has_repofncache(): |
|
1044 | def has_repofncache(): | |
1042 | return 'fncache' in getrepofeatures() |
|
1045 | return 'fncache' in getrepofeatures() | |
1043 |
|
1046 | |||
1044 |
|
1047 | |||
1045 | @check('sqlite', 'sqlite3 module and matching cli is available') |
|
1048 | @check('sqlite', 'sqlite3 module and matching cli is available') | |
1046 | def has_sqlite(): |
|
1049 | def has_sqlite(): | |
1047 | try: |
|
1050 | try: | |
1048 | import sqlite3 |
|
1051 | import sqlite3 | |
1049 |
|
1052 | |||
1050 | version = sqlite3.sqlite_version_info |
|
1053 | version = sqlite3.sqlite_version_info | |
1051 | except ImportError: |
|
1054 | except ImportError: | |
1052 | return False |
|
1055 | return False | |
1053 |
|
1056 | |||
1054 | if version < (3, 8, 3): |
|
1057 | if version < (3, 8, 3): | |
1055 | # WITH clause not supported |
|
1058 | # WITH clause not supported | |
1056 | return False |
|
1059 | return False | |
1057 |
|
1060 | |||
1058 | return matchoutput('sqlite3 -version', br'^3\.\d+') |
|
1061 | return matchoutput('sqlite3 -version', br'^3\.\d+') | |
1059 |
|
1062 | |||
1060 |
|
1063 | |||
1061 | @check('vcr', 'vcr http mocking library (pytest-vcr)') |
|
1064 | @check('vcr', 'vcr http mocking library (pytest-vcr)') | |
1062 | def has_vcr(): |
|
1065 | def has_vcr(): | |
1063 | try: |
|
1066 | try: | |
1064 | import vcr |
|
1067 | import vcr | |
1065 |
|
1068 | |||
1066 | vcr.VCR |
|
1069 | vcr.VCR | |
1067 | return True |
|
1070 | return True | |
1068 | except (ImportError, AttributeError): |
|
1071 | except (ImportError, AttributeError): | |
1069 | pass |
|
1072 | pass | |
1070 | return False |
|
1073 | return False | |
1071 |
|
1074 | |||
1072 |
|
1075 | |||
1073 | @check('emacs', 'GNU Emacs') |
|
1076 | @check('emacs', 'GNU Emacs') | |
1074 | def has_emacs(): |
|
1077 | def has_emacs(): | |
1075 | # Our emacs lisp uses `with-eval-after-load` which is new in emacs |
|
1078 | # Our emacs lisp uses `with-eval-after-load` which is new in emacs | |
1076 | # 24.4, so we allow emacs 24.4, 24.5, and 25+ (24.5 was the last |
|
1079 | # 24.4, so we allow emacs 24.4, 24.5, and 25+ (24.5 was the last | |
1077 | # 24 release) |
|
1080 | # 24 release) | |
1078 | return matchoutput('emacs --version', b'GNU Emacs 2(4.4|4.5|5|6|7|8|9)') |
|
1081 | return matchoutput('emacs --version', b'GNU Emacs 2(4.4|4.5|5|6|7|8|9)') | |
1079 |
|
1082 | |||
1080 |
|
1083 | |||
1081 | @check('black', 'the black formatter for python (>= 20.8b1)') |
|
1084 | @check('black', 'the black formatter for python (>= 20.8b1)') | |
1082 | def has_black(): |
|
1085 | def has_black(): | |
1083 | blackcmd = 'black --version' |
|
1086 | blackcmd = 'black --version' | |
1084 | version_regex = b'black, version ([0-9a-b.]+)' |
|
1087 | version_regex = b'black, version ([0-9a-b.]+)' | |
1085 | version = matchoutput(blackcmd, version_regex) |
|
1088 | version = matchoutput(blackcmd, version_regex) | |
1086 | sv = distutils.version.StrictVersion |
|
1089 | sv = distutils.version.StrictVersion | |
1087 | return version and sv(_bytes2sys(version.group(1))) >= sv('20.8b1') |
|
1090 | return version and sv(_bytes2sys(version.group(1))) >= sv('20.8b1') | |
1088 |
|
1091 | |||
1089 |
|
1092 | |||
1090 | @check('pytype', 'the pytype type checker') |
|
1093 | @check('pytype', 'the pytype type checker') | |
1091 | def has_pytype(): |
|
1094 | def has_pytype(): | |
1092 | pytypecmd = 'pytype --version' |
|
1095 | pytypecmd = 'pytype --version' | |
1093 | version = matchoutput(pytypecmd, b'[0-9a-b.]+') |
|
1096 | version = matchoutput(pytypecmd, b'[0-9a-b.]+') | |
1094 | sv = distutils.version.StrictVersion |
|
1097 | sv = distutils.version.StrictVersion | |
1095 | return version and sv(_bytes2sys(version.group(0))) >= sv('2019.10.17') |
|
1098 | return version and sv(_bytes2sys(version.group(0))) >= sv('2019.10.17') | |
1096 |
|
1099 | |||
1097 |
|
1100 | |||
1098 | @check("rustfmt", "rustfmt tool at version nightly-2020-10-04") |
|
1101 | @check("rustfmt", "rustfmt tool at version nightly-2020-10-04") | |
1099 | def has_rustfmt(): |
|
1102 | def has_rustfmt(): | |
1100 | # We use Nightly's rustfmt due to current unstable config options. |
|
1103 | # We use Nightly's rustfmt due to current unstable config options. | |
1101 | return matchoutput( |
|
1104 | return matchoutput( | |
1102 | '`rustup which --toolchain nightly-2020-10-04 rustfmt` --version', |
|
1105 | '`rustup which --toolchain nightly-2020-10-04 rustfmt` --version', | |
1103 | b'rustfmt', |
|
1106 | b'rustfmt', | |
1104 | ) |
|
1107 | ) | |
1105 |
|
1108 | |||
1106 |
|
1109 | |||
1107 | @check("cargo", "cargo tool") |
|
1110 | @check("cargo", "cargo tool") | |
1108 | def has_cargo(): |
|
1111 | def has_cargo(): | |
1109 | return matchoutput('`rustup which cargo` --version', b'cargo') |
|
1112 | return matchoutput('`rustup which cargo` --version', b'cargo') | |
1110 |
|
1113 | |||
1111 |
|
1114 | |||
1112 | @check("lzma", "python lzma module") |
|
1115 | @check("lzma", "python lzma module") | |
1113 | def has_lzma(): |
|
1116 | def has_lzma(): | |
1114 | try: |
|
1117 | try: | |
1115 | import _lzma |
|
1118 | import _lzma | |
1116 |
|
1119 | |||
1117 | _lzma.FORMAT_XZ |
|
1120 | _lzma.FORMAT_XZ | |
1118 | return True |
|
1121 | return True | |
1119 | except ImportError: |
|
1122 | except ImportError: | |
1120 | return False |
|
1123 | return False |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
General Comments 0
You need to be logged in to leave comments.
Login now