Show More
@@ -1,49 +1,53 | |||||
1 | # status.py - Type annotations for status related objects |
|
1 | # status.py - Type annotations for status related objects | |
2 | # |
|
2 | # | |
3 | # Copyright Matt Harbison <mharbison72@gmail.com> |
|
3 | # Copyright Matt Harbison <mharbison72@gmail.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import annotations |
|
8 | from __future__ import annotations | |
9 |
|
9 | |||
|
10 | import abc | |||
|
11 | ||||
10 | from typing import ( |
|
12 | from typing import ( | |
11 | Iterator, |
|
13 | Iterator, | |
12 | Protocol, |
|
14 | Protocol, | |
13 | ) |
|
15 | ) | |
14 |
|
16 | |||
15 |
|
17 | |||
16 | class Status(Protocol): |
|
18 | class Status(Protocol): | |
17 | """Struct with a list of files per status. |
|
19 | """Struct with a list of files per status. | |
18 |
|
20 | |||
19 | The 'deleted', 'unknown' and 'ignored' properties are only |
|
21 | The 'deleted', 'unknown' and 'ignored' properties are only | |
20 | relevant to the working copy. |
|
22 | relevant to the working copy. | |
21 | """ |
|
23 | """ | |
22 |
|
24 | |||
23 | modified: list[bytes] |
|
25 | modified: list[bytes] | |
24 | """The list of files with modifications.""" |
|
26 | """The list of files with modifications.""" | |
25 |
|
27 | |||
26 | added: list[bytes] |
|
28 | added: list[bytes] | |
27 | """The list of files that started being tracked.""" |
|
29 | """The list of files that started being tracked.""" | |
28 |
|
30 | |||
29 | removed: list[bytes] |
|
31 | removed: list[bytes] | |
30 | """The list of files that stopped being tracked.""" |
|
32 | """The list of files that stopped being tracked.""" | |
31 |
|
33 | |||
32 | deleted: list[bytes] |
|
34 | deleted: list[bytes] | |
33 | """The list of files in the working directory that are deleted from the |
|
35 | """The list of files in the working directory that are deleted from the | |
34 | file system (but not in the removed state).""" |
|
36 | file system (but not in the removed state).""" | |
35 |
|
37 | |||
36 | unknown: list[bytes] |
|
38 | unknown: list[bytes] | |
37 | """The list of files in the working directory that are not tracked.""" |
|
39 | """The list of files in the working directory that are not tracked.""" | |
38 |
|
40 | |||
39 | ignored: list[bytes] |
|
41 | ignored: list[bytes] | |
40 | """The list of files in the working directory that are ignored.""" |
|
42 | """The list of files in the working directory that are ignored.""" | |
41 |
|
43 | |||
42 | clean: list[bytes] |
|
44 | clean: list[bytes] | |
43 | """The list of files that are not in any other state.""" |
|
45 | """The list of files that are not in any other state.""" | |
44 |
|
46 | |||
|
47 | @abc.abstractmethod | |||
45 | def __iter__(self) -> Iterator[list[bytes]]: |
|
48 | def __iter__(self) -> Iterator[list[bytes]]: | |
46 | """Iterates over each of the categories of file lists.""" |
|
49 | """Iterates over each of the categories of file lists.""" | |
47 |
|
50 | |||
|
51 | @abc.abstractmethod | |||
48 | def __repr__(self) -> str: |
|
52 | def __repr__(self) -> str: | |
49 | """Creates a string representation of the file lists.""" |
|
53 | """Creates a string representation of the file lists.""" |
@@ -1,2527 +1,2529 | |||||
1 | # scmutil.py - Mercurial core utility functions |
|
1 | # scmutil.py - Mercurial core utility functions | |
2 | # |
|
2 | # | |
3 | # Copyright Olivia Mackall <olivia@selenic.com> |
|
3 | # Copyright Olivia Mackall <olivia@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import annotations |
|
8 | from __future__ import annotations | |
9 |
|
9 | |||
10 | import binascii |
|
10 | import binascii | |
11 | import errno |
|
11 | import errno | |
12 | import glob |
|
12 | import glob | |
13 | import os |
|
13 | import os | |
14 | import posixpath |
|
14 | import posixpath | |
15 | import re |
|
15 | import re | |
16 | import subprocess |
|
16 | import subprocess | |
17 | import typing |
|
17 | import typing | |
18 | import weakref |
|
18 | import weakref | |
19 |
|
19 | |||
20 | from typing import ( |
|
20 | from typing import ( | |
21 | Callable, |
|
21 | Callable, | |
22 | Dict, |
|
22 | Dict, | |
23 | Iterable, |
|
23 | Iterable, | |
24 | Iterator, |
|
24 | Iterator, | |
25 | List, |
|
25 | List, | |
26 | Optional, |
|
26 | Optional, | |
27 | Set, |
|
27 | Set, | |
28 | Tuple, |
|
28 | Tuple, | |
29 | ) |
|
29 | ) | |
30 |
|
30 | |||
31 | from .i18n import _ |
|
31 | from .i18n import _ | |
32 | from .node import ( |
|
32 | from .node import ( | |
33 | bin, |
|
33 | bin, | |
34 | hex, |
|
34 | hex, | |
35 | nullrev, |
|
35 | nullrev, | |
36 | short, |
|
36 | short, | |
37 | wdirrev, |
|
37 | wdirrev, | |
38 | ) |
|
38 | ) | |
39 | from .thirdparty import attr |
|
39 | from .thirdparty import attr | |
40 |
|
40 | |||
41 | # Force pytype to use the non-vendored package |
|
41 | # Force pytype to use the non-vendored package | |
42 | if typing.TYPE_CHECKING: |
|
42 | if typing.TYPE_CHECKING: | |
43 | # noinspection PyPackageRequirements |
|
43 | # noinspection PyPackageRequirements | |
44 | import attr |
|
44 | import attr | |
45 |
|
45 | |||
46 | from . import ( |
|
46 | from . import ( | |
47 | copies as copiesmod, |
|
47 | copies as copiesmod, | |
48 | encoding, |
|
48 | encoding, | |
49 | error, |
|
49 | error, | |
50 | match as matchmod, |
|
50 | match as matchmod, | |
51 | obsolete, |
|
51 | obsolete, | |
52 | obsutil, |
|
52 | obsutil, | |
53 | pathutil, |
|
53 | pathutil, | |
54 | phases, |
|
54 | phases, | |
55 | policy, |
|
55 | policy, | |
56 | pycompat, |
|
56 | pycompat, | |
57 | requirements as requirementsmod, |
|
57 | requirements as requirementsmod, | |
58 | revsetlang, |
|
58 | revsetlang, | |
59 | similar, |
|
59 | similar, | |
60 | smartset, |
|
60 | smartset, | |
61 | typelib, |
|
61 | typelib, | |
62 | url, |
|
62 | url, | |
63 | util, |
|
63 | util, | |
64 | vfs, |
|
64 | vfs, | |
65 | ) |
|
65 | ) | |
66 |
|
66 | |||
|
67 | from .interfaces import status as istatus | |||
|
68 | ||||
67 | from .utils import ( |
|
69 | from .utils import ( | |
68 | hashutil, |
|
70 | hashutil, | |
69 | procutil, |
|
71 | procutil, | |
70 | stringutil, |
|
72 | stringutil, | |
71 | ) |
|
73 | ) | |
72 |
|
74 | |||
73 | if pycompat.iswindows: |
|
75 | if pycompat.iswindows: | |
74 | from . import scmwindows as scmplatform |
|
76 | from . import scmwindows as scmplatform | |
75 | else: |
|
77 | else: | |
76 | from . import scmposix as scmplatform |
|
78 | from . import scmposix as scmplatform | |
77 |
|
79 | |||
78 | if typing.TYPE_CHECKING: |
|
80 | if typing.TYPE_CHECKING: | |
79 | from . import ( |
|
81 | from . import ( | |
80 | ui as uimod, |
|
82 | ui as uimod, | |
81 | ) |
|
83 | ) | |
82 |
|
84 | |||
83 | parsers = policy.importmod('parsers') |
|
85 | parsers = policy.importmod('parsers') | |
84 | rustrevlog = policy.importrust('revlog') |
|
86 | rustrevlog = policy.importrust('revlog') | |
85 |
|
87 | |||
86 | termsize = scmplatform.termsize |
|
88 | termsize = scmplatform.termsize | |
87 |
|
89 | |||
88 |
|
90 | |||
89 | @attr.s(slots=True, repr=False) |
|
91 | @attr.s(slots=True, repr=False) | |
90 | class status: |
|
92 | class status(istatus.Status): | |
91 | """Struct with a list of files per status. |
|
93 | """Struct with a list of files per status. | |
92 |
|
94 | |||
93 | The 'deleted', 'unknown' and 'ignored' properties are only |
|
95 | The 'deleted', 'unknown' and 'ignored' properties are only | |
94 | relevant to the working copy. |
|
96 | relevant to the working copy. | |
95 | """ |
|
97 | """ | |
96 |
|
98 | |||
97 | modified = attr.ib(default=attr.Factory(list), type=List[bytes]) |
|
99 | modified = attr.ib(default=attr.Factory(list), type=List[bytes]) | |
98 | added = attr.ib(default=attr.Factory(list), type=List[bytes]) |
|
100 | added = attr.ib(default=attr.Factory(list), type=List[bytes]) | |
99 | removed = attr.ib(default=attr.Factory(list), type=List[bytes]) |
|
101 | removed = attr.ib(default=attr.Factory(list), type=List[bytes]) | |
100 | deleted = attr.ib(default=attr.Factory(list), type=List[bytes]) |
|
102 | deleted = attr.ib(default=attr.Factory(list), type=List[bytes]) | |
101 | unknown = attr.ib(default=attr.Factory(list), type=List[bytes]) |
|
103 | unknown = attr.ib(default=attr.Factory(list), type=List[bytes]) | |
102 | ignored = attr.ib(default=attr.Factory(list), type=List[bytes]) |
|
104 | ignored = attr.ib(default=attr.Factory(list), type=List[bytes]) | |
103 | clean = attr.ib(default=attr.Factory(list), type=List[bytes]) |
|
105 | clean = attr.ib(default=attr.Factory(list), type=List[bytes]) | |
104 |
|
106 | |||
105 | def __iter__(self) -> Iterator[List[bytes]]: |
|
107 | def __iter__(self) -> Iterator[List[bytes]]: | |
106 | yield self.modified |
|
108 | yield self.modified | |
107 | yield self.added |
|
109 | yield self.added | |
108 | yield self.removed |
|
110 | yield self.removed | |
109 | yield self.deleted |
|
111 | yield self.deleted | |
110 | yield self.unknown |
|
112 | yield self.unknown | |
111 | yield self.ignored |
|
113 | yield self.ignored | |
112 | yield self.clean |
|
114 | yield self.clean | |
113 |
|
115 | |||
114 | def __repr__(self) -> str: |
|
116 | def __repr__(self) -> str: | |
115 | return ( |
|
117 | return ( | |
116 | r'<status modified=%s, added=%s, removed=%s, deleted=%s, ' |
|
118 | r'<status modified=%s, added=%s, removed=%s, deleted=%s, ' | |
117 | r'unknown=%s, ignored=%s, clean=%s>' |
|
119 | r'unknown=%s, ignored=%s, clean=%s>' | |
118 | ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self) |
|
120 | ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self) | |
119 |
|
121 | |||
120 |
|
122 | |||
121 | def itersubrepos(ctx1, ctx2): |
|
123 | def itersubrepos(ctx1, ctx2): | |
122 | """find subrepos in ctx1 or ctx2""" |
|
124 | """find subrepos in ctx1 or ctx2""" | |
123 | # Create a (subpath, ctx) mapping where we prefer subpaths from |
|
125 | # Create a (subpath, ctx) mapping where we prefer subpaths from | |
124 | # ctx1. The subpaths from ctx2 are important when the .hgsub file |
|
126 | # ctx1. The subpaths from ctx2 are important when the .hgsub file | |
125 | # has been modified (in ctx2) but not yet committed (in ctx1). |
|
127 | # has been modified (in ctx2) but not yet committed (in ctx1). | |
126 | subpaths = dict.fromkeys(ctx2.substate, ctx2) |
|
128 | subpaths = dict.fromkeys(ctx2.substate, ctx2) | |
127 | subpaths.update(dict.fromkeys(ctx1.substate, ctx1)) |
|
129 | subpaths.update(dict.fromkeys(ctx1.substate, ctx1)) | |
128 |
|
130 | |||
129 | missing = set() |
|
131 | missing = set() | |
130 |
|
132 | |||
131 | for subpath in ctx2.substate: |
|
133 | for subpath in ctx2.substate: | |
132 | if subpath not in ctx1.substate: |
|
134 | if subpath not in ctx1.substate: | |
133 | del subpaths[subpath] |
|
135 | del subpaths[subpath] | |
134 | missing.add(subpath) |
|
136 | missing.add(subpath) | |
135 |
|
137 | |||
136 | for subpath, ctx in sorted(subpaths.items()): |
|
138 | for subpath, ctx in sorted(subpaths.items()): | |
137 | yield subpath, ctx.sub(subpath) |
|
139 | yield subpath, ctx.sub(subpath) | |
138 |
|
140 | |||
139 | # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way, |
|
141 | # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way, | |
140 | # status and diff will have an accurate result when it does |
|
142 | # status and diff will have an accurate result when it does | |
141 | # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared |
|
143 | # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared | |
142 | # against itself. |
|
144 | # against itself. | |
143 | for subpath in missing: |
|
145 | for subpath in missing: | |
144 | yield subpath, ctx2.nullsub(subpath, ctx1) |
|
146 | yield subpath, ctx2.nullsub(subpath, ctx1) | |
145 |
|
147 | |||
146 |
|
148 | |||
147 | def nochangesfound(ui: "uimod.ui", repo, excluded=None) -> None: |
|
149 | def nochangesfound(ui: "uimod.ui", repo, excluded=None) -> None: | |
148 | """Report no changes for push/pull, excluded is None or a list of |
|
150 | """Report no changes for push/pull, excluded is None or a list of | |
149 | nodes excluded from the push/pull. |
|
151 | nodes excluded from the push/pull. | |
150 | """ |
|
152 | """ | |
151 | secretlist = [] |
|
153 | secretlist = [] | |
152 | if excluded: |
|
154 | if excluded: | |
153 | for n in excluded: |
|
155 | for n in excluded: | |
154 | ctx = repo[n] |
|
156 | ctx = repo[n] | |
155 | if ctx.phase() >= phases.secret and not ctx.extinct(): |
|
157 | if ctx.phase() >= phases.secret and not ctx.extinct(): | |
156 | secretlist.append(n) |
|
158 | secretlist.append(n) | |
157 |
|
159 | |||
158 | if secretlist: |
|
160 | if secretlist: | |
159 | ui.status( |
|
161 | ui.status( | |
160 | _(b"no changes found (ignored %d secret changesets)\n") |
|
162 | _(b"no changes found (ignored %d secret changesets)\n") | |
161 | % len(secretlist) |
|
163 | % len(secretlist) | |
162 | ) |
|
164 | ) | |
163 | else: |
|
165 | else: | |
164 | ui.status(_(b"no changes found\n")) |
|
166 | ui.status(_(b"no changes found\n")) | |
165 |
|
167 | |||
166 |
|
168 | |||
167 | def callcatch(ui: "uimod.ui", func: Callable[[], int]) -> int: |
|
169 | def callcatch(ui: "uimod.ui", func: Callable[[], int]) -> int: | |
168 | """call func() with global exception handling |
|
170 | """call func() with global exception handling | |
169 |
|
171 | |||
170 | return func() if no exception happens. otherwise do some error handling |
|
172 | return func() if no exception happens. otherwise do some error handling | |
171 | and return an exit code accordingly. does not handle all exceptions. |
|
173 | and return an exit code accordingly. does not handle all exceptions. | |
172 | """ |
|
174 | """ | |
173 | coarse_exit_code = -1 |
|
175 | coarse_exit_code = -1 | |
174 | detailed_exit_code = -1 |
|
176 | detailed_exit_code = -1 | |
175 | try: |
|
177 | try: | |
176 | try: |
|
178 | try: | |
177 | return func() |
|
179 | return func() | |
178 | except: # re-raises |
|
180 | except: # re-raises | |
179 | ui.traceback() |
|
181 | ui.traceback() | |
180 | raise |
|
182 | raise | |
181 | # Global exception handling, alphabetically |
|
183 | # Global exception handling, alphabetically | |
182 | # Mercurial-specific first, followed by built-in and library exceptions |
|
184 | # Mercurial-specific first, followed by built-in and library exceptions | |
183 | except error.LockHeld as inst: |
|
185 | except error.LockHeld as inst: | |
184 | detailed_exit_code = 20 |
|
186 | detailed_exit_code = 20 | |
185 | if inst.errno == errno.ETIMEDOUT: |
|
187 | if inst.errno == errno.ETIMEDOUT: | |
186 | reason = _(b'timed out waiting for lock held by %r') % ( |
|
188 | reason = _(b'timed out waiting for lock held by %r') % ( | |
187 | pycompat.bytestr(inst.locker) |
|
189 | pycompat.bytestr(inst.locker) | |
188 | ) |
|
190 | ) | |
189 | else: |
|
191 | else: | |
190 | reason = _(b'lock held by %r') % inst.locker |
|
192 | reason = _(b'lock held by %r') % inst.locker | |
191 | ui.error( |
|
193 | ui.error( | |
192 | _(b"abort: %s: %s\n") |
|
194 | _(b"abort: %s: %s\n") | |
193 | % (inst.desc or stringutil.forcebytestr(inst.filename), reason) |
|
195 | % (inst.desc or stringutil.forcebytestr(inst.filename), reason) | |
194 | ) |
|
196 | ) | |
195 | if not inst.locker: |
|
197 | if not inst.locker: | |
196 | ui.error(_(b"(lock might be very busy)\n")) |
|
198 | ui.error(_(b"(lock might be very busy)\n")) | |
197 | except error.LockUnavailable as inst: |
|
199 | except error.LockUnavailable as inst: | |
198 | detailed_exit_code = 20 |
|
200 | detailed_exit_code = 20 | |
199 | ui.error( |
|
201 | ui.error( | |
200 | _(b"abort: could not lock %s: %s\n") |
|
202 | _(b"abort: could not lock %s: %s\n") | |
201 | % ( |
|
203 | % ( | |
202 | inst.desc or stringutil.forcebytestr(inst.filename), |
|
204 | inst.desc or stringutil.forcebytestr(inst.filename), | |
203 | encoding.strtolocal(inst.strerror), |
|
205 | encoding.strtolocal(inst.strerror), | |
204 | ) |
|
206 | ) | |
205 | ) |
|
207 | ) | |
206 | except error.RepoError as inst: |
|
208 | except error.RepoError as inst: | |
207 | if isinstance(inst, error.RepoLookupError): |
|
209 | if isinstance(inst, error.RepoLookupError): | |
208 | detailed_exit_code = 10 |
|
210 | detailed_exit_code = 10 | |
209 | ui.error(_(b"abort: %s\n") % inst) |
|
211 | ui.error(_(b"abort: %s\n") % inst) | |
210 | if inst.hint: |
|
212 | if inst.hint: | |
211 | ui.error(_(b"(%s)\n") % inst.hint) |
|
213 | ui.error(_(b"(%s)\n") % inst.hint) | |
212 | except error.ResponseError as inst: |
|
214 | except error.ResponseError as inst: | |
213 | ui.error(_(b"abort: %s") % inst.args[0]) |
|
215 | ui.error(_(b"abort: %s") % inst.args[0]) | |
214 | msg = inst.args[1] |
|
216 | msg = inst.args[1] | |
215 | if isinstance(msg, type(u'')): |
|
217 | if isinstance(msg, type(u'')): | |
216 | msg = pycompat.sysbytes(msg) |
|
218 | msg = pycompat.sysbytes(msg) | |
217 | if msg is None: |
|
219 | if msg is None: | |
218 | ui.error(b"\n") |
|
220 | ui.error(b"\n") | |
219 | elif not isinstance(msg, bytes): |
|
221 | elif not isinstance(msg, bytes): | |
220 | ui.error(b" %r\n" % (msg,)) |
|
222 | ui.error(b" %r\n" % (msg,)) | |
221 | elif not msg: |
|
223 | elif not msg: | |
222 | ui.error(_(b" empty string\n")) |
|
224 | ui.error(_(b" empty string\n")) | |
223 | else: |
|
225 | else: | |
224 | ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg))) |
|
226 | ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg))) | |
225 | except error.CensoredNodeError as inst: |
|
227 | except error.CensoredNodeError as inst: | |
226 | ui.error(_(b"abort: file censored %s\n") % inst) |
|
228 | ui.error(_(b"abort: file censored %s\n") % inst) | |
227 | except error.WdirUnsupported: |
|
229 | except error.WdirUnsupported: | |
228 | ui.error(_(b"abort: working directory revision cannot be specified\n")) |
|
230 | ui.error(_(b"abort: working directory revision cannot be specified\n")) | |
229 | except error.Error as inst: |
|
231 | except error.Error as inst: | |
230 | if inst.detailed_exit_code is not None: |
|
232 | if inst.detailed_exit_code is not None: | |
231 | detailed_exit_code = inst.detailed_exit_code |
|
233 | detailed_exit_code = inst.detailed_exit_code | |
232 | if inst.coarse_exit_code is not None: |
|
234 | if inst.coarse_exit_code is not None: | |
233 | coarse_exit_code = inst.coarse_exit_code |
|
235 | coarse_exit_code = inst.coarse_exit_code | |
234 | ui.error(inst.format()) |
|
236 | ui.error(inst.format()) | |
235 | except error.WorkerError as inst: |
|
237 | except error.WorkerError as inst: | |
236 | # Don't print a message -- the worker already should have |
|
238 | # Don't print a message -- the worker already should have | |
237 | return inst.status_code |
|
239 | return inst.status_code | |
238 | except ImportError as inst: |
|
240 | except ImportError as inst: | |
239 | ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst)) |
|
241 | ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst)) | |
240 | m = stringutil.forcebytestr(inst).split()[-1] |
|
242 | m = stringutil.forcebytestr(inst).split()[-1] | |
241 | if m in b"mpatch bdiff".split(): |
|
243 | if m in b"mpatch bdiff".split(): | |
242 | ui.error(_(b"(did you forget to compile extensions?)\n")) |
|
244 | ui.error(_(b"(did you forget to compile extensions?)\n")) | |
243 | elif m in b"zlib".split(): |
|
245 | elif m in b"zlib".split(): | |
244 | ui.error(_(b"(is your Python install correct?)\n")) |
|
246 | ui.error(_(b"(is your Python install correct?)\n")) | |
245 | except util.urlerr.httperror as inst: |
|
247 | except util.urlerr.httperror as inst: | |
246 | detailed_exit_code = 100 |
|
248 | detailed_exit_code = 100 | |
247 | ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst)) |
|
249 | ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst)) | |
248 | except util.urlerr.urlerror as inst: |
|
250 | except util.urlerr.urlerror as inst: | |
249 | detailed_exit_code = 100 |
|
251 | detailed_exit_code = 100 | |
250 | try: # usually it is in the form (errno, strerror) |
|
252 | try: # usually it is in the form (errno, strerror) | |
251 | reason = inst.reason.args[1] |
|
253 | reason = inst.reason.args[1] | |
252 | except (AttributeError, IndexError): |
|
254 | except (AttributeError, IndexError): | |
253 | # it might be anything, for example a string |
|
255 | # it might be anything, for example a string | |
254 | reason = inst.reason |
|
256 | reason = inst.reason | |
255 | if isinstance(reason, str): |
|
257 | if isinstance(reason, str): | |
256 | # SSLError of Python 2.7.9 contains a unicode |
|
258 | # SSLError of Python 2.7.9 contains a unicode | |
257 | reason = encoding.unitolocal(reason) |
|
259 | reason = encoding.unitolocal(reason) | |
258 | ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason)) |
|
260 | ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason)) | |
259 | except (IOError, OSError) as inst: |
|
261 | except (IOError, OSError) as inst: | |
260 | if hasattr(inst, "args") and inst.args and inst.args[0] == errno.EPIPE: |
|
262 | if hasattr(inst, "args") and inst.args and inst.args[0] == errno.EPIPE: | |
261 | pass |
|
263 | pass | |
262 | elif getattr(inst, "strerror", None): # common IOError or OSError |
|
264 | elif getattr(inst, "strerror", None): # common IOError or OSError | |
263 | if getattr(inst, "filename", None) is not None: |
|
265 | if getattr(inst, "filename", None) is not None: | |
264 | ui.error( |
|
266 | ui.error( | |
265 | _(b"abort: %s: '%s'\n") |
|
267 | _(b"abort: %s: '%s'\n") | |
266 | % ( |
|
268 | % ( | |
267 | encoding.strtolocal(inst.strerror), |
|
269 | encoding.strtolocal(inst.strerror), | |
268 | stringutil.forcebytestr(inst.filename), |
|
270 | stringutil.forcebytestr(inst.filename), | |
269 | ) |
|
271 | ) | |
270 | ) |
|
272 | ) | |
271 | else: |
|
273 | else: | |
272 | ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror)) |
|
274 | ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror)) | |
273 | else: # suspicious IOError |
|
275 | else: # suspicious IOError | |
274 | raise |
|
276 | raise | |
275 | except MemoryError: |
|
277 | except MemoryError: | |
276 | ui.error(_(b"abort: out of memory\n")) |
|
278 | ui.error(_(b"abort: out of memory\n")) | |
277 | except SystemExit as inst: |
|
279 | except SystemExit as inst: | |
278 | # Commands shouldn't sys.exit directly, but give a return code. |
|
280 | # Commands shouldn't sys.exit directly, but give a return code. | |
279 | # Just in case catch this and and pass exit code to caller. |
|
281 | # Just in case catch this and and pass exit code to caller. | |
280 | detailed_exit_code = 254 |
|
282 | detailed_exit_code = 254 | |
281 | coarse_exit_code = inst.code |
|
283 | coarse_exit_code = inst.code | |
282 |
|
284 | |||
283 | if ui.configbool(b'ui', b'detailed-exit-code'): |
|
285 | if ui.configbool(b'ui', b'detailed-exit-code'): | |
284 | return detailed_exit_code |
|
286 | return detailed_exit_code | |
285 | else: |
|
287 | else: | |
286 | return coarse_exit_code |
|
288 | return coarse_exit_code | |
287 |
|
289 | |||
288 |
|
290 | |||
289 | def checknewlabel(repo, lbl: bytes, kind) -> None: |
|
291 | def checknewlabel(repo, lbl: bytes, kind) -> None: | |
290 | # Do not use the "kind" parameter in ui output. |
|
292 | # Do not use the "kind" parameter in ui output. | |
291 | # It makes strings difficult to translate. |
|
293 | # It makes strings difficult to translate. | |
292 | if lbl in [b'tip', b'.', b'null']: |
|
294 | if lbl in [b'tip', b'.', b'null']: | |
293 | raise error.InputError(_(b"the name '%s' is reserved") % lbl) |
|
295 | raise error.InputError(_(b"the name '%s' is reserved") % lbl) | |
294 | for c in (b':', b'\0', b'\n', b'\r'): |
|
296 | for c in (b':', b'\0', b'\n', b'\r'): | |
295 | if c in lbl: |
|
297 | if c in lbl: | |
296 | raise error.InputError( |
|
298 | raise error.InputError( | |
297 | _(b"%r cannot be used in a name") % pycompat.bytestr(c) |
|
299 | _(b"%r cannot be used in a name") % pycompat.bytestr(c) | |
298 | ) |
|
300 | ) | |
299 | try: |
|
301 | try: | |
300 | int(lbl) |
|
302 | int(lbl) | |
301 | if b'_' in lbl: |
|
303 | if b'_' in lbl: | |
302 | # If label contains underscores, Python might consider it an |
|
304 | # If label contains underscores, Python might consider it an | |
303 | # integer (with "_" as visual separators), but we do not. |
|
305 | # integer (with "_" as visual separators), but we do not. | |
304 | # See PEP 515 - Underscores in Numeric Literals. |
|
306 | # See PEP 515 - Underscores in Numeric Literals. | |
305 | raise ValueError |
|
307 | raise ValueError | |
306 | raise error.InputError(_(b"cannot use an integer as a name")) |
|
308 | raise error.InputError(_(b"cannot use an integer as a name")) | |
307 | except ValueError: |
|
309 | except ValueError: | |
308 | pass |
|
310 | pass | |
309 | if lbl.strip() != lbl: |
|
311 | if lbl.strip() != lbl: | |
310 | raise error.InputError( |
|
312 | raise error.InputError( | |
311 | _(b"leading or trailing whitespace in name %r") % lbl |
|
313 | _(b"leading or trailing whitespace in name %r") % lbl | |
312 | ) |
|
314 | ) | |
313 |
|
315 | |||
314 |
|
316 | |||
315 | def checkfilename(f: bytes) -> None: |
|
317 | def checkfilename(f: bytes) -> None: | |
316 | '''Check that the filename f is an acceptable filename for a tracked file''' |
|
318 | '''Check that the filename f is an acceptable filename for a tracked file''' | |
317 | if b'\r' in f or b'\n' in f: |
|
319 | if b'\r' in f or b'\n' in f: | |
318 | raise error.InputError( |
|
320 | raise error.InputError( | |
319 | _(b"'\\n' and '\\r' disallowed in filenames: %r") |
|
321 | _(b"'\\n' and '\\r' disallowed in filenames: %r") | |
320 | % pycompat.bytestr(f) |
|
322 | % pycompat.bytestr(f) | |
321 | ) |
|
323 | ) | |
322 |
|
324 | |||
323 |
|
325 | |||
324 | def checkportable(ui: "uimod.ui", f: bytes) -> None: |
|
326 | def checkportable(ui: "uimod.ui", f: bytes) -> None: | |
325 | '''Check if filename f is portable and warn or abort depending on config''' |
|
327 | '''Check if filename f is portable and warn or abort depending on config''' | |
326 | checkfilename(f) |
|
328 | checkfilename(f) | |
327 | abort, warn = checkportabilityalert(ui) |
|
329 | abort, warn = checkportabilityalert(ui) | |
328 | if abort or warn: |
|
330 | if abort or warn: | |
329 | msg = util.checkwinfilename(f) |
|
331 | msg = util.checkwinfilename(f) | |
330 | if msg: |
|
332 | if msg: | |
331 | msg = b"%s: %s" % (msg, procutil.shellquote(f)) |
|
333 | msg = b"%s: %s" % (msg, procutil.shellquote(f)) | |
332 | if abort: |
|
334 | if abort: | |
333 | raise error.InputError(msg) |
|
335 | raise error.InputError(msg) | |
334 | ui.warn(_(b"warning: %s\n") % msg) |
|
336 | ui.warn(_(b"warning: %s\n") % msg) | |
335 |
|
337 | |||
336 |
|
338 | |||
337 | def checkportabilityalert(ui: "uimod.ui") -> Tuple[bool, bool]: |
|
339 | def checkportabilityalert(ui: "uimod.ui") -> Tuple[bool, bool]: | |
338 | """check if the user's config requests nothing, a warning, or abort for |
|
340 | """check if the user's config requests nothing, a warning, or abort for | |
339 | non-portable filenames""" |
|
341 | non-portable filenames""" | |
340 | val = ui.config(b'ui', b'portablefilenames') |
|
342 | val = ui.config(b'ui', b'portablefilenames') | |
341 | lval = val.lower() |
|
343 | lval = val.lower() | |
342 | bval = stringutil.parsebool(val) |
|
344 | bval = stringutil.parsebool(val) | |
343 | abort = pycompat.iswindows or lval == b'abort' |
|
345 | abort = pycompat.iswindows or lval == b'abort' | |
344 | warn = bval or lval == b'warn' |
|
346 | warn = bval or lval == b'warn' | |
345 | if bval is None and not (warn or abort or lval == b'ignore'): |
|
347 | if bval is None and not (warn or abort or lval == b'ignore'): | |
346 | raise error.ConfigError( |
|
348 | raise error.ConfigError( | |
347 | _(b"ui.portablefilenames value is invalid ('%s')") % val |
|
349 | _(b"ui.portablefilenames value is invalid ('%s')") % val | |
348 | ) |
|
350 | ) | |
349 | return abort, warn |
|
351 | return abort, warn | |
350 |
|
352 | |||
351 |
|
353 | |||
352 | class casecollisionauditor: |
|
354 | class casecollisionauditor: | |
353 | def __init__(self, ui: "uimod.ui", abort: bool, dirstate) -> None: |
|
355 | def __init__(self, ui: "uimod.ui", abort: bool, dirstate) -> None: | |
354 | self._ui = ui |
|
356 | self._ui = ui | |
355 | self._abort = abort |
|
357 | self._abort = abort | |
356 | allfiles = b'\0'.join(dirstate) |
|
358 | allfiles = b'\0'.join(dirstate) | |
357 | self._loweredfiles = set(encoding.lower(allfiles).split(b'\0')) |
|
359 | self._loweredfiles = set(encoding.lower(allfiles).split(b'\0')) | |
358 | self._dirstate = dirstate |
|
360 | self._dirstate = dirstate | |
359 | # The purpose of _newfiles is so that we don't complain about |
|
361 | # The purpose of _newfiles is so that we don't complain about | |
360 | # case collisions if someone were to call this object with the |
|
362 | # case collisions if someone were to call this object with the | |
361 | # same filename twice. |
|
363 | # same filename twice. | |
362 | self._newfiles = set() |
|
364 | self._newfiles = set() | |
363 |
|
365 | |||
364 | def __call__(self, f: bytes) -> None: |
|
366 | def __call__(self, f: bytes) -> None: | |
365 | if f in self._newfiles: |
|
367 | if f in self._newfiles: | |
366 | return |
|
368 | return | |
367 | fl = encoding.lower(f) |
|
369 | fl = encoding.lower(f) | |
368 | if fl in self._loweredfiles and f not in self._dirstate: |
|
370 | if fl in self._loweredfiles and f not in self._dirstate: | |
369 | msg = _(b'possible case-folding collision for %s') % f |
|
371 | msg = _(b'possible case-folding collision for %s') % f | |
370 | if self._abort: |
|
372 | if self._abort: | |
371 | raise error.StateError(msg) |
|
373 | raise error.StateError(msg) | |
372 | self._ui.warn(_(b"warning: %s\n") % msg) |
|
374 | self._ui.warn(_(b"warning: %s\n") % msg) | |
373 | self._loweredfiles.add(fl) |
|
375 | self._loweredfiles.add(fl) | |
374 | self._newfiles.add(f) |
|
376 | self._newfiles.add(f) | |
375 |
|
377 | |||
376 |
|
378 | |||
377 | def combined_filtered_and_obsolete_hash( |
|
379 | def combined_filtered_and_obsolete_hash( | |
378 | repo, maxrev, needobsolete: bool = False |
|
380 | repo, maxrev, needobsolete: bool = False | |
379 | ): |
|
381 | ): | |
380 | """build hash of filtered revisions in the current repoview. |
|
382 | """build hash of filtered revisions in the current repoview. | |
381 |
|
383 | |||
382 | Multiple caches perform up-to-date validation by checking that the |
|
384 | Multiple caches perform up-to-date validation by checking that the | |
383 | tiprev and tipnode stored in the cache file match the current repository. |
|
385 | tiprev and tipnode stored in the cache file match the current repository. | |
384 | However, this is not sufficient for validating repoviews because the set |
|
386 | However, this is not sufficient for validating repoviews because the set | |
385 | of revisions in the view may change without the repository tiprev and |
|
387 | of revisions in the view may change without the repository tiprev and | |
386 | tipnode changing. |
|
388 | tipnode changing. | |
387 |
|
389 | |||
388 | This function hashes all the revs filtered from the view (and, optionally, |
|
390 | This function hashes all the revs filtered from the view (and, optionally, | |
389 | all obsolete revs) up to maxrev and returns that SHA-1 digest. |
|
391 | all obsolete revs) up to maxrev and returns that SHA-1 digest. | |
390 | """ |
|
392 | """ | |
391 | cl = repo.changelog |
|
393 | cl = repo.changelog | |
392 | if needobsolete: |
|
394 | if needobsolete: | |
393 | obsrevs = obsolete.getrevs(repo, b'obsolete') |
|
395 | obsrevs = obsolete.getrevs(repo, b'obsolete') | |
394 | if not cl.filteredrevs and not obsrevs: |
|
396 | if not cl.filteredrevs and not obsrevs: | |
395 | return None |
|
397 | return None | |
396 | key = (maxrev, hash(cl.filteredrevs), hash(obsrevs)) |
|
398 | key = (maxrev, hash(cl.filteredrevs), hash(obsrevs)) | |
397 | else: |
|
399 | else: | |
398 | if not cl.filteredrevs: |
|
400 | if not cl.filteredrevs: | |
399 | return None |
|
401 | return None | |
400 | key = maxrev |
|
402 | key = maxrev | |
401 | obsrevs = frozenset() |
|
403 | obsrevs = frozenset() | |
402 |
|
404 | |||
403 | result = cl._filteredrevs_hashcache.get(key) |
|
405 | result = cl._filteredrevs_hashcache.get(key) | |
404 | if not result: |
|
406 | if not result: | |
405 | revs, obs_revs = _filtered_and_obs_revs(repo, maxrev) |
|
407 | revs, obs_revs = _filtered_and_obs_revs(repo, maxrev) | |
406 | if needobsolete: |
|
408 | if needobsolete: | |
407 | revs = revs | obs_revs |
|
409 | revs = revs | obs_revs | |
408 | revs = sorted(revs) |
|
410 | revs = sorted(revs) | |
409 | if revs: |
|
411 | if revs: | |
410 | result = _hash_revs(revs) |
|
412 | result = _hash_revs(revs) | |
411 | cl._filteredrevs_hashcache[key] = result |
|
413 | cl._filteredrevs_hashcache[key] = result | |
412 | return result |
|
414 | return result | |
413 |
|
415 | |||
414 |
|
416 | |||
415 | def filtered_and_obsolete_hash(repo, maxrev): |
|
417 | def filtered_and_obsolete_hash(repo, maxrev): | |
416 | """build hashs of filtered and obsolete revisions in the current repoview. |
|
418 | """build hashs of filtered and obsolete revisions in the current repoview. | |
417 |
|
419 | |||
418 | Multiple caches perform up-to-date validation by checking that the |
|
420 | Multiple caches perform up-to-date validation by checking that the | |
419 | tiprev and tipnode stored in the cache file match the current repository. |
|
421 | tiprev and tipnode stored in the cache file match the current repository. | |
420 | However, this is not sufficient for validating repoviews because the set |
|
422 | However, this is not sufficient for validating repoviews because the set | |
421 | of revisions in the view may change without the repository tiprev and |
|
423 | of revisions in the view may change without the repository tiprev and | |
422 | tipnode changing. |
|
424 | tipnode changing. | |
423 |
|
425 | |||
424 | This function hashes all the revs filtered from the view up to maxrev and |
|
426 | This function hashes all the revs filtered from the view up to maxrev and | |
425 | returns that SHA-1 digest. The obsolete revisions hashed are only the |
|
427 | returns that SHA-1 digest. The obsolete revisions hashed are only the | |
426 | non-filtered one. |
|
428 | non-filtered one. | |
427 | """ |
|
429 | """ | |
428 | cl = repo.changelog |
|
430 | cl = repo.changelog | |
429 | obs_set = obsolete.getrevs(repo, b'obsolete') |
|
431 | obs_set = obsolete.getrevs(repo, b'obsolete') | |
430 | key = (maxrev, hash(cl.filteredrevs), hash(obs_set)) |
|
432 | key = (maxrev, hash(cl.filteredrevs), hash(obs_set)) | |
431 |
|
433 | |||
432 | result = cl._filteredrevs_hashcache.get(key) |
|
434 | result = cl._filteredrevs_hashcache.get(key) | |
433 | if result is None: |
|
435 | if result is None: | |
434 | filtered_hash = None |
|
436 | filtered_hash = None | |
435 | obs_hash = None |
|
437 | obs_hash = None | |
436 | filtered_revs, obs_revs = _filtered_and_obs_revs(repo, maxrev) |
|
438 | filtered_revs, obs_revs = _filtered_and_obs_revs(repo, maxrev) | |
437 | if filtered_revs: |
|
439 | if filtered_revs: | |
438 | filtered_hash = _hash_revs(filtered_revs) |
|
440 | filtered_hash = _hash_revs(filtered_revs) | |
439 | if obs_revs: |
|
441 | if obs_revs: | |
440 | obs_hash = _hash_revs(obs_revs) |
|
442 | obs_hash = _hash_revs(obs_revs) | |
441 | result = (filtered_hash, obs_hash) |
|
443 | result = (filtered_hash, obs_hash) | |
442 | cl._filteredrevs_hashcache[key] = result |
|
444 | cl._filteredrevs_hashcache[key] = result | |
443 | return result |
|
445 | return result | |
444 |
|
446 | |||
445 |
|
447 | |||
446 | def _filtered_and_obs_revs(repo, max_rev): |
|
448 | def _filtered_and_obs_revs(repo, max_rev): | |
447 | """return the set of filtered and non-filtered obsolete revision""" |
|
449 | """return the set of filtered and non-filtered obsolete revision""" | |
448 | cl = repo.changelog |
|
450 | cl = repo.changelog | |
449 | obs_set = obsolete.getrevs(repo, b'obsolete') |
|
451 | obs_set = obsolete.getrevs(repo, b'obsolete') | |
450 | filtered_set = cl.filteredrevs |
|
452 | filtered_set = cl.filteredrevs | |
451 | if cl.filteredrevs: |
|
453 | if cl.filteredrevs: | |
452 | obs_set = obs_set - cl.filteredrevs |
|
454 | obs_set = obs_set - cl.filteredrevs | |
453 | if max_rev < (len(cl) - 1): |
|
455 | if max_rev < (len(cl) - 1): | |
454 | # there might be revision to filter out |
|
456 | # there might be revision to filter out | |
455 | filtered_set = set(r for r in filtered_set if r <= max_rev) |
|
457 | filtered_set = set(r for r in filtered_set if r <= max_rev) | |
456 | obs_set = set(r for r in obs_set if r <= max_rev) |
|
458 | obs_set = set(r for r in obs_set if r <= max_rev) | |
457 | return (filtered_set, obs_set) |
|
459 | return (filtered_set, obs_set) | |
458 |
|
460 | |||
459 |
|
461 | |||
460 | def _hash_revs(revs: Iterable[int]) -> bytes: |
|
462 | def _hash_revs(revs: Iterable[int]) -> bytes: | |
461 | """return a hash from a list of revision numbers""" |
|
463 | """return a hash from a list of revision numbers""" | |
462 | s = hashutil.sha1() |
|
464 | s = hashutil.sha1() | |
463 | for rev in revs: |
|
465 | for rev in revs: | |
464 | s.update(b'%d;' % rev) |
|
466 | s.update(b'%d;' % rev) | |
465 | return s.digest() |
|
467 | return s.digest() | |
466 |
|
468 | |||
467 |
|
469 | |||
468 | def walkrepos( |
|
470 | def walkrepos( | |
469 | path, |
|
471 | path, | |
470 | followsym: bool = False, |
|
472 | followsym: bool = False, | |
471 | seen_dirs: Optional[List[bytes]] = None, |
|
473 | seen_dirs: Optional[List[bytes]] = None, | |
472 | recurse: bool = False, |
|
474 | recurse: bool = False, | |
473 | ) -> Iterable[bytes]: |
|
475 | ) -> Iterable[bytes]: | |
474 | """yield every hg repository under path, always recursively. |
|
476 | """yield every hg repository under path, always recursively. | |
475 | The recurse flag will only control recursion into repo working dirs""" |
|
477 | The recurse flag will only control recursion into repo working dirs""" | |
476 |
|
478 | |||
477 | def errhandler(err): |
|
479 | def errhandler(err): | |
478 | if err.filename == path: |
|
480 | if err.filename == path: | |
479 | raise err |
|
481 | raise err | |
480 |
|
482 | |||
481 | samestat = getattr(os.path, 'samestat', None) |
|
483 | samestat = getattr(os.path, 'samestat', None) | |
482 | if followsym and samestat is not None: |
|
484 | if followsym and samestat is not None: | |
483 |
|
485 | |||
484 | def adddir(dirlst, dirname): |
|
486 | def adddir(dirlst, dirname): | |
485 | dirstat = os.stat(dirname) |
|
487 | dirstat = os.stat(dirname) | |
486 | match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst) |
|
488 | match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst) | |
487 | if not match: |
|
489 | if not match: | |
488 | dirlst.append(dirstat) |
|
490 | dirlst.append(dirstat) | |
489 | return not match |
|
491 | return not match | |
490 |
|
492 | |||
491 | else: |
|
493 | else: | |
492 | followsym = False |
|
494 | followsym = False | |
493 |
|
495 | |||
494 | if (seen_dirs is None) and followsym: |
|
496 | if (seen_dirs is None) and followsym: | |
495 | seen_dirs = [] |
|
497 | seen_dirs = [] | |
496 | adddir(seen_dirs, path) |
|
498 | adddir(seen_dirs, path) | |
497 | for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler): |
|
499 | for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler): | |
498 | dirs.sort() |
|
500 | dirs.sort() | |
499 | if b'.hg' in dirs: |
|
501 | if b'.hg' in dirs: | |
500 | yield root # found a repository |
|
502 | yield root # found a repository | |
501 | qroot = os.path.join(root, b'.hg', b'patches') |
|
503 | qroot = os.path.join(root, b'.hg', b'patches') | |
502 | if os.path.isdir(os.path.join(qroot, b'.hg')): |
|
504 | if os.path.isdir(os.path.join(qroot, b'.hg')): | |
503 | yield qroot # we have a patch queue repo here |
|
505 | yield qroot # we have a patch queue repo here | |
504 | if recurse: |
|
506 | if recurse: | |
505 | # avoid recursing inside the .hg directory |
|
507 | # avoid recursing inside the .hg directory | |
506 | dirs.remove(b'.hg') |
|
508 | dirs.remove(b'.hg') | |
507 | else: |
|
509 | else: | |
508 | dirs[:] = [] # don't descend further |
|
510 | dirs[:] = [] # don't descend further | |
509 | elif followsym: |
|
511 | elif followsym: | |
510 | newdirs = [] |
|
512 | newdirs = [] | |
511 | for d in dirs: |
|
513 | for d in dirs: | |
512 | fname = os.path.join(root, d) |
|
514 | fname = os.path.join(root, d) | |
513 | if adddir(seen_dirs, fname): |
|
515 | if adddir(seen_dirs, fname): | |
514 | if os.path.islink(fname): |
|
516 | if os.path.islink(fname): | |
515 | for hgname in walkrepos(fname, True, seen_dirs): |
|
517 | for hgname in walkrepos(fname, True, seen_dirs): | |
516 | yield hgname |
|
518 | yield hgname | |
517 | else: |
|
519 | else: | |
518 | newdirs.append(d) |
|
520 | newdirs.append(d) | |
519 | dirs[:] = newdirs |
|
521 | dirs[:] = newdirs | |
520 |
|
522 | |||
521 |
|
523 | |||
522 | def binnode(ctx) -> bytes: |
|
524 | def binnode(ctx) -> bytes: | |
523 | """Return binary node id for a given basectx""" |
|
525 | """Return binary node id for a given basectx""" | |
524 | node = ctx.node() |
|
526 | node = ctx.node() | |
525 | if node is None: |
|
527 | if node is None: | |
526 | return ctx.repo().nodeconstants.wdirid |
|
528 | return ctx.repo().nodeconstants.wdirid | |
527 | return node |
|
529 | return node | |
528 |
|
530 | |||
529 |
|
531 | |||
530 | def intrev(ctx) -> int: |
|
532 | def intrev(ctx) -> int: | |
531 | """Return integer for a given basectx that can be used in comparison or |
|
533 | """Return integer for a given basectx that can be used in comparison or | |
532 | arithmetic operation""" |
|
534 | arithmetic operation""" | |
533 | rev = ctx.rev() |
|
535 | rev = ctx.rev() | |
534 | if rev is None: |
|
536 | if rev is None: | |
535 | return wdirrev |
|
537 | return wdirrev | |
536 | return rev |
|
538 | return rev | |
537 |
|
539 | |||
538 |
|
540 | |||
539 | def formatchangeid(ctx) -> bytes: |
|
541 | def formatchangeid(ctx) -> bytes: | |
540 | """Format changectx as '{rev}:{node|formatnode}', which is the default |
|
542 | """Format changectx as '{rev}:{node|formatnode}', which is the default | |
541 | template provided by logcmdutil.changesettemplater""" |
|
543 | template provided by logcmdutil.changesettemplater""" | |
542 | repo = ctx.repo() |
|
544 | repo = ctx.repo() | |
543 | return formatrevnode(repo.ui, intrev(ctx), binnode(ctx)) |
|
545 | return formatrevnode(repo.ui, intrev(ctx), binnode(ctx)) | |
544 |
|
546 | |||
545 |
|
547 | |||
546 | def formatrevnode(ui: "uimod.ui", rev: int, node: bytes) -> bytes: |
|
548 | def formatrevnode(ui: "uimod.ui", rev: int, node: bytes) -> bytes: | |
547 | """Format given revision and node depending on the current verbosity""" |
|
549 | """Format given revision and node depending on the current verbosity""" | |
548 | if ui.debugflag: |
|
550 | if ui.debugflag: | |
549 | hexfunc = hex |
|
551 | hexfunc = hex | |
550 | else: |
|
552 | else: | |
551 | hexfunc = short |
|
553 | hexfunc = short | |
552 | return b'%d:%s' % (rev, hexfunc(node)) |
|
554 | return b'%d:%s' % (rev, hexfunc(node)) | |
553 |
|
555 | |||
554 |
|
556 | |||
555 | def resolvehexnodeidprefix(repo, prefix: bytes): |
|
557 | def resolvehexnodeidprefix(repo, prefix: bytes): | |
556 | if prefix.startswith(b'x'): |
|
558 | if prefix.startswith(b'x'): | |
557 | prefix = prefix[1:] |
|
559 | prefix = prefix[1:] | |
558 | try: |
|
560 | try: | |
559 | # Uses unfiltered repo because it's faster when prefix is ambiguous/ |
|
561 | # Uses unfiltered repo because it's faster when prefix is ambiguous/ | |
560 | # This matches the shortesthexnodeidprefix() function below. |
|
562 | # This matches the shortesthexnodeidprefix() function below. | |
561 | node = repo.unfiltered().changelog._partialmatch(prefix) |
|
563 | node = repo.unfiltered().changelog._partialmatch(prefix) | |
562 | except error.AmbiguousPrefixLookupError: |
|
564 | except error.AmbiguousPrefixLookupError: | |
563 | revset = repo.ui.config( |
|
565 | revset = repo.ui.config( | |
564 | b'experimental', b'revisions.disambiguatewithin' |
|
566 | b'experimental', b'revisions.disambiguatewithin' | |
565 | ) |
|
567 | ) | |
566 | if revset: |
|
568 | if revset: | |
567 | # Clear config to avoid infinite recursion |
|
569 | # Clear config to avoid infinite recursion | |
568 | configoverrides = { |
|
570 | configoverrides = { | |
569 | (b'experimental', b'revisions.disambiguatewithin'): None |
|
571 | (b'experimental', b'revisions.disambiguatewithin'): None | |
570 | } |
|
572 | } | |
571 | with repo.ui.configoverride(configoverrides): |
|
573 | with repo.ui.configoverride(configoverrides): | |
572 | revs = repo.anyrevs([revset], user=True) |
|
574 | revs = repo.anyrevs([revset], user=True) | |
573 | matches = [] |
|
575 | matches = [] | |
574 | for rev in revs: |
|
576 | for rev in revs: | |
575 | node = repo.changelog.node(rev) |
|
577 | node = repo.changelog.node(rev) | |
576 | if hex(node).startswith(prefix): |
|
578 | if hex(node).startswith(prefix): | |
577 | matches.append(node) |
|
579 | matches.append(node) | |
578 | if len(matches) == 1: |
|
580 | if len(matches) == 1: | |
579 | return matches[0] |
|
581 | return matches[0] | |
580 | raise |
|
582 | raise | |
581 | if node is None: |
|
583 | if node is None: | |
582 | return |
|
584 | return | |
583 | repo.changelog.rev(node) # make sure node isn't filtered |
|
585 | repo.changelog.rev(node) # make sure node isn't filtered | |
584 | return node |
|
586 | return node | |
585 |
|
587 | |||
586 |
|
588 | |||
587 | def mayberevnum(repo, prefix: bytes) -> bool: |
|
589 | def mayberevnum(repo, prefix: bytes) -> bool: | |
588 | """Checks if the given prefix may be mistaken for a revision number""" |
|
590 | """Checks if the given prefix may be mistaken for a revision number""" | |
589 | try: |
|
591 | try: | |
590 | i = int(prefix) |
|
592 | i = int(prefix) | |
591 | # if we are a pure int, then starting with zero will not be |
|
593 | # if we are a pure int, then starting with zero will not be | |
592 | # confused as a rev; or, obviously, if the int is larger |
|
594 | # confused as a rev; or, obviously, if the int is larger | |
593 | # than the value of the tip rev. We still need to disambiguate if |
|
595 | # than the value of the tip rev. We still need to disambiguate if | |
594 | # prefix == '0', since that *is* a valid revnum. |
|
596 | # prefix == '0', since that *is* a valid revnum. | |
595 | if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo): |
|
597 | if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo): | |
596 | return False |
|
598 | return False | |
597 | return True |
|
599 | return True | |
598 | except ValueError: |
|
600 | except ValueError: | |
599 | return False |
|
601 | return False | |
600 |
|
602 | |||
601 |
|
603 | |||
602 | def shortesthexnodeidprefix(repo, node: bytes, minlength: int = 1, cache=None): |
|
604 | def shortesthexnodeidprefix(repo, node: bytes, minlength: int = 1, cache=None): | |
603 | """Find the shortest unambiguous prefix that matches hexnode. |
|
605 | """Find the shortest unambiguous prefix that matches hexnode. | |
604 |
|
606 | |||
605 | If "cache" is not None, it must be a dictionary that can be used for |
|
607 | If "cache" is not None, it must be a dictionary that can be used for | |
606 | caching between calls to this method. |
|
608 | caching between calls to this method. | |
607 | """ |
|
609 | """ | |
608 | # _partialmatch() of filtered changelog could take O(len(repo)) time, |
|
610 | # _partialmatch() of filtered changelog could take O(len(repo)) time, | |
609 | # which would be unacceptably slow. so we look for hash collision in |
|
611 | # which would be unacceptably slow. so we look for hash collision in | |
610 | # unfiltered space, which means some hashes may be slightly longer. |
|
612 | # unfiltered space, which means some hashes may be slightly longer. | |
611 |
|
613 | |||
612 | minlength = max(minlength, 1) |
|
614 | minlength = max(minlength, 1) | |
613 |
|
615 | |||
614 | def disambiguate(prefix: bytes): |
|
616 | def disambiguate(prefix: bytes): | |
615 | """Disambiguate against revnums.""" |
|
617 | """Disambiguate against revnums.""" | |
616 | if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'): |
|
618 | if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'): | |
617 | if mayberevnum(repo, prefix): |
|
619 | if mayberevnum(repo, prefix): | |
618 | return b'x' + prefix |
|
620 | return b'x' + prefix | |
619 | else: |
|
621 | else: | |
620 | return prefix |
|
622 | return prefix | |
621 |
|
623 | |||
622 | hexnode = hex(node) |
|
624 | hexnode = hex(node) | |
623 | for length in range(len(prefix), len(hexnode) + 1): |
|
625 | for length in range(len(prefix), len(hexnode) + 1): | |
624 | prefix = hexnode[:length] |
|
626 | prefix = hexnode[:length] | |
625 | if not mayberevnum(repo, prefix): |
|
627 | if not mayberevnum(repo, prefix): | |
626 | return prefix |
|
628 | return prefix | |
627 |
|
629 | |||
628 | cl = repo.unfiltered().changelog |
|
630 | cl = repo.unfiltered().changelog | |
629 | revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin') |
|
631 | revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin') | |
630 | if revset: |
|
632 | if revset: | |
631 | revs = None |
|
633 | revs = None | |
632 | if cache is not None: |
|
634 | if cache is not None: | |
633 | revs = cache.get(b'disambiguationrevset') |
|
635 | revs = cache.get(b'disambiguationrevset') | |
634 | if revs is None: |
|
636 | if revs is None: | |
635 | revs = repo.anyrevs([revset], user=True) |
|
637 | revs = repo.anyrevs([revset], user=True) | |
636 | if cache is not None: |
|
638 | if cache is not None: | |
637 | cache[b'disambiguationrevset'] = revs |
|
639 | cache[b'disambiguationrevset'] = revs | |
638 | if cl.rev(node) in revs: |
|
640 | if cl.rev(node) in revs: | |
639 | hexnode = hex(node) |
|
641 | hexnode = hex(node) | |
640 | nodetree = None |
|
642 | nodetree = None | |
641 | if cache is not None: |
|
643 | if cache is not None: | |
642 | nodetree = cache.get(b'disambiguationnodetree') |
|
644 | nodetree = cache.get(b'disambiguationnodetree') | |
643 | is_invalidated = getattr(nodetree, 'is_invalidated', lambda: False) |
|
645 | is_invalidated = getattr(nodetree, 'is_invalidated', lambda: False) | |
644 | if is_invalidated(): |
|
646 | if is_invalidated(): | |
645 | nodetree = None |
|
647 | nodetree = None | |
646 | if not nodetree: |
|
648 | if not nodetree: | |
647 | if hasattr(parsers, 'nodetree') and isinstance( |
|
649 | if hasattr(parsers, 'nodetree') and isinstance( | |
648 | cl.index, parsers.index |
|
650 | cl.index, parsers.index | |
649 | ): |
|
651 | ): | |
650 | index = cl.index |
|
652 | index = cl.index | |
651 | nodetree = parsers.nodetree(index, len(revs)) |
|
653 | nodetree = parsers.nodetree(index, len(revs)) | |
652 | elif getattr(cl.index, 'is_rust', False): |
|
654 | elif getattr(cl.index, 'is_rust', False): | |
653 | nodetree = rustrevlog.NodeTree(cl.index) |
|
655 | nodetree = rustrevlog.NodeTree(cl.index) | |
654 |
|
656 | |||
655 | if nodetree is not None: |
|
657 | if nodetree is not None: | |
656 | for r in revs: |
|
658 | for r in revs: | |
657 | nodetree.insert(r) |
|
659 | nodetree.insert(r) | |
658 | if cache is not None: |
|
660 | if cache is not None: | |
659 | cache[b'disambiguationnodetree'] = nodetree |
|
661 | cache[b'disambiguationnodetree'] = nodetree | |
660 | length = max(nodetree.shortest(node), minlength) |
|
662 | length = max(nodetree.shortest(node), minlength) | |
661 | prefix = hexnode[:length] |
|
663 | prefix = hexnode[:length] | |
662 | return disambiguate(prefix) |
|
664 | return disambiguate(prefix) | |
663 | for length in range(minlength, len(hexnode) + 1): |
|
665 | for length in range(minlength, len(hexnode) + 1): | |
664 | matches = [] |
|
666 | matches = [] | |
665 | prefix = hexnode[:length] |
|
667 | prefix = hexnode[:length] | |
666 | for rev in revs: |
|
668 | for rev in revs: | |
667 | otherhexnode = repo[rev].hex() |
|
669 | otherhexnode = repo[rev].hex() | |
668 | if prefix == otherhexnode[:length]: |
|
670 | if prefix == otherhexnode[:length]: | |
669 | matches.append(otherhexnode) |
|
671 | matches.append(otherhexnode) | |
670 | if len(matches) == 1: |
|
672 | if len(matches) == 1: | |
671 | return disambiguate(prefix) |
|
673 | return disambiguate(prefix) | |
672 |
|
674 | |||
673 | try: |
|
675 | try: | |
674 | return disambiguate(cl.shortest(node, minlength)) |
|
676 | return disambiguate(cl.shortest(node, minlength)) | |
675 | except error.LookupError: |
|
677 | except error.LookupError: | |
676 | raise error.RepoLookupError() |
|
678 | raise error.RepoLookupError() | |
677 |
|
679 | |||
678 |
|
680 | |||
679 | def isrevsymbol(repo, symbol: bytes) -> bool: |
|
681 | def isrevsymbol(repo, symbol: bytes) -> bool: | |
680 | """Checks if a symbol exists in the repo. |
|
682 | """Checks if a symbol exists in the repo. | |
681 |
|
683 | |||
682 | See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the |
|
684 | See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the | |
683 | symbol is an ambiguous nodeid prefix. |
|
685 | symbol is an ambiguous nodeid prefix. | |
684 | """ |
|
686 | """ | |
685 | try: |
|
687 | try: | |
686 | revsymbol(repo, symbol) |
|
688 | revsymbol(repo, symbol) | |
687 | return True |
|
689 | return True | |
688 | except error.RepoLookupError: |
|
690 | except error.RepoLookupError: | |
689 | return False |
|
691 | return False | |
690 |
|
692 | |||
691 |
|
693 | |||
692 | def revsymbol(repo, symbol: bytes): |
|
694 | def revsymbol(repo, symbol: bytes): | |
693 | """Returns a context given a single revision symbol (as string). |
|
695 | """Returns a context given a single revision symbol (as string). | |
694 |
|
696 | |||
695 | This is similar to revsingle(), but accepts only a single revision symbol, |
|
697 | This is similar to revsingle(), but accepts only a single revision symbol, | |
696 | i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but |
|
698 | i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but | |
697 | not "max(public())". |
|
699 | not "max(public())". | |
698 | """ |
|
700 | """ | |
699 | if not isinstance(symbol, bytes): |
|
701 | if not isinstance(symbol, bytes): | |
700 | msg = ( |
|
702 | msg = ( | |
701 | b"symbol (%s of type %s) was not a string, did you mean " |
|
703 | b"symbol (%s of type %s) was not a string, did you mean " | |
702 | b"repo[symbol]?" % (symbol, type(symbol)) |
|
704 | b"repo[symbol]?" % (symbol, type(symbol)) | |
703 | ) |
|
705 | ) | |
704 | raise error.ProgrammingError(msg) |
|
706 | raise error.ProgrammingError(msg) | |
705 | try: |
|
707 | try: | |
706 | if symbol in (b'.', b'tip', b'null'): |
|
708 | if symbol in (b'.', b'tip', b'null'): | |
707 | return repo[symbol] |
|
709 | return repo[symbol] | |
708 |
|
710 | |||
709 | try: |
|
711 | try: | |
710 | r = int(symbol) |
|
712 | r = int(symbol) | |
711 | if b'%d' % r != symbol: |
|
713 | if b'%d' % r != symbol: | |
712 | raise ValueError |
|
714 | raise ValueError | |
713 | l = len(repo.changelog) |
|
715 | l = len(repo.changelog) | |
714 | if r < 0: |
|
716 | if r < 0: | |
715 | r += l |
|
717 | r += l | |
716 | if r < 0 or r >= l and r != wdirrev: |
|
718 | if r < 0 or r >= l and r != wdirrev: | |
717 | raise ValueError |
|
719 | raise ValueError | |
718 | return repo[r] |
|
720 | return repo[r] | |
719 | except error.FilteredIndexError: |
|
721 | except error.FilteredIndexError: | |
720 | raise |
|
722 | raise | |
721 | except (ValueError, OverflowError, IndexError): |
|
723 | except (ValueError, OverflowError, IndexError): | |
722 | pass |
|
724 | pass | |
723 |
|
725 | |||
724 | if len(symbol) == 2 * repo.nodeconstants.nodelen: |
|
726 | if len(symbol) == 2 * repo.nodeconstants.nodelen: | |
725 | try: |
|
727 | try: | |
726 | node = bin(symbol) |
|
728 | node = bin(symbol) | |
727 | rev = repo.changelog.rev(node) |
|
729 | rev = repo.changelog.rev(node) | |
728 | return repo[rev] |
|
730 | return repo[rev] | |
729 | except error.FilteredLookupError: |
|
731 | except error.FilteredLookupError: | |
730 | raise |
|
732 | raise | |
731 | except (binascii.Error, LookupError): |
|
733 | except (binascii.Error, LookupError): | |
732 | pass |
|
734 | pass | |
733 |
|
735 | |||
734 | # look up bookmarks through the name interface |
|
736 | # look up bookmarks through the name interface | |
735 | try: |
|
737 | try: | |
736 | node = repo.names.singlenode(repo, symbol) |
|
738 | node = repo.names.singlenode(repo, symbol) | |
737 | rev = repo.changelog.rev(node) |
|
739 | rev = repo.changelog.rev(node) | |
738 | return repo[rev] |
|
740 | return repo[rev] | |
739 | except KeyError: |
|
741 | except KeyError: | |
740 | pass |
|
742 | pass | |
741 |
|
743 | |||
742 | node = resolvehexnodeidprefix(repo, symbol) |
|
744 | node = resolvehexnodeidprefix(repo, symbol) | |
743 | if node is not None: |
|
745 | if node is not None: | |
744 | rev = repo.changelog.rev(node) |
|
746 | rev = repo.changelog.rev(node) | |
745 | return repo[rev] |
|
747 | return repo[rev] | |
746 |
|
748 | |||
747 | raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol) |
|
749 | raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol) | |
748 |
|
750 | |||
749 | except error.WdirUnsupported: |
|
751 | except error.WdirUnsupported: | |
750 | return repo[None] |
|
752 | return repo[None] | |
751 | except ( |
|
753 | except ( | |
752 | error.FilteredIndexError, |
|
754 | error.FilteredIndexError, | |
753 | error.FilteredLookupError, |
|
755 | error.FilteredLookupError, | |
754 | error.FilteredRepoLookupError, |
|
756 | error.FilteredRepoLookupError, | |
755 | ): |
|
757 | ): | |
756 | raise _filterederror(repo, symbol) |
|
758 | raise _filterederror(repo, symbol) | |
757 |
|
759 | |||
758 |
|
760 | |||
759 | def _filterederror(repo, changeid: bytes) -> error.FilteredRepoLookupError: |
|
761 | def _filterederror(repo, changeid: bytes) -> error.FilteredRepoLookupError: | |
760 | """build an exception to be raised about a filtered changeid |
|
762 | """build an exception to be raised about a filtered changeid | |
761 |
|
763 | |||
762 | This is extracted in a function to help extensions (eg: evolve) to |
|
764 | This is extracted in a function to help extensions (eg: evolve) to | |
763 | experiment with various message variants.""" |
|
765 | experiment with various message variants.""" | |
764 | if repo.filtername.startswith(b'visible'): |
|
766 | if repo.filtername.startswith(b'visible'): | |
765 | # Check if the changeset is obsolete |
|
767 | # Check if the changeset is obsolete | |
766 | unfilteredrepo = repo.unfiltered() |
|
768 | unfilteredrepo = repo.unfiltered() | |
767 | ctx = revsymbol(unfilteredrepo, changeid) |
|
769 | ctx = revsymbol(unfilteredrepo, changeid) | |
768 |
|
770 | |||
769 | # If the changeset is obsolete, enrich the message with the reason |
|
771 | # If the changeset is obsolete, enrich the message with the reason | |
770 | # that made this changeset not visible |
|
772 | # that made this changeset not visible | |
771 | if ctx.obsolete(): |
|
773 | if ctx.obsolete(): | |
772 | msg = obsutil._getfilteredreason(repo, changeid, ctx) |
|
774 | msg = obsutil._getfilteredreason(repo, changeid, ctx) | |
773 | else: |
|
775 | else: | |
774 | msg = _(b"hidden revision '%s'") % changeid |
|
776 | msg = _(b"hidden revision '%s'") % changeid | |
775 |
|
777 | |||
776 | hint = _(b'use --hidden to access hidden revisions') |
|
778 | hint = _(b'use --hidden to access hidden revisions') | |
777 |
|
779 | |||
778 | return error.FilteredRepoLookupError(msg, hint=hint) |
|
780 | return error.FilteredRepoLookupError(msg, hint=hint) | |
779 | msg = _(b"filtered revision '%s' (not in '%s' subset)") |
|
781 | msg = _(b"filtered revision '%s' (not in '%s' subset)") | |
780 | msg %= (changeid, repo.filtername) |
|
782 | msg %= (changeid, repo.filtername) | |
781 | return error.FilteredRepoLookupError(msg) |
|
783 | return error.FilteredRepoLookupError(msg) | |
782 |
|
784 | |||
783 |
|
785 | |||
784 | def revsingle(repo, revspec, default=b'.', localalias=None): |
|
786 | def revsingle(repo, revspec, default=b'.', localalias=None): | |
785 | if not revspec and revspec != 0: |
|
787 | if not revspec and revspec != 0: | |
786 | return repo[default] |
|
788 | return repo[default] | |
787 |
|
789 | |||
788 | l = revrange(repo, [revspec], localalias=localalias) |
|
790 | l = revrange(repo, [revspec], localalias=localalias) | |
789 | if not l: |
|
791 | if not l: | |
790 | raise error.InputError(_(b'empty revision set')) |
|
792 | raise error.InputError(_(b'empty revision set')) | |
791 | return repo[l.last()] |
|
793 | return repo[l.last()] | |
792 |
|
794 | |||
793 |
|
795 | |||
794 | def _pairspec(revspec) -> bool: |
|
796 | def _pairspec(revspec) -> bool: | |
795 | tree = revsetlang.parse(revspec) |
|
797 | tree = revsetlang.parse(revspec) | |
796 | return tree and tree[0] in ( |
|
798 | return tree and tree[0] in ( | |
797 | b'range', |
|
799 | b'range', | |
798 | b'rangepre', |
|
800 | b'rangepre', | |
799 | b'rangepost', |
|
801 | b'rangepost', | |
800 | b'rangeall', |
|
802 | b'rangeall', | |
801 | ) |
|
803 | ) | |
802 |
|
804 | |||
803 |
|
805 | |||
804 | def revpair(repo, revs): |
|
806 | def revpair(repo, revs): | |
805 | if not revs: |
|
807 | if not revs: | |
806 | return repo[b'.'], repo[None] |
|
808 | return repo[b'.'], repo[None] | |
807 |
|
809 | |||
808 | l = revrange(repo, revs) |
|
810 | l = revrange(repo, revs) | |
809 |
|
811 | |||
810 | if not l: |
|
812 | if not l: | |
811 | raise error.InputError(_(b'empty revision range')) |
|
813 | raise error.InputError(_(b'empty revision range')) | |
812 |
|
814 | |||
813 | first = l.first() |
|
815 | first = l.first() | |
814 | second = l.last() |
|
816 | second = l.last() | |
815 |
|
817 | |||
816 | if ( |
|
818 | if ( | |
817 | first == second |
|
819 | first == second | |
818 | and len(revs) >= 2 |
|
820 | and len(revs) >= 2 | |
819 | and not all(revrange(repo, [r]) for r in revs) |
|
821 | and not all(revrange(repo, [r]) for r in revs) | |
820 | ): |
|
822 | ): | |
821 | raise error.InputError(_(b'empty revision on one side of range')) |
|
823 | raise error.InputError(_(b'empty revision on one side of range')) | |
822 |
|
824 | |||
823 | # if top-level is range expression, the result must always be a pair |
|
825 | # if top-level is range expression, the result must always be a pair | |
824 | if first == second and len(revs) == 1 and not _pairspec(revs[0]): |
|
826 | if first == second and len(revs) == 1 and not _pairspec(revs[0]): | |
825 | return repo[first], repo[None] |
|
827 | return repo[first], repo[None] | |
826 |
|
828 | |||
827 | return repo[first], repo[second] |
|
829 | return repo[first], repo[second] | |
828 |
|
830 | |||
829 |
|
831 | |||
830 | def revrange(repo, specs, localalias=None): |
|
832 | def revrange(repo, specs, localalias=None): | |
831 | """Execute 1 to many revsets and return the union. |
|
833 | """Execute 1 to many revsets and return the union. | |
832 |
|
834 | |||
833 | This is the preferred mechanism for executing revsets using user-specified |
|
835 | This is the preferred mechanism for executing revsets using user-specified | |
834 | config options, such as revset aliases. |
|
836 | config options, such as revset aliases. | |
835 |
|
837 | |||
836 | The revsets specified by ``specs`` will be executed via a chained ``OR`` |
|
838 | The revsets specified by ``specs`` will be executed via a chained ``OR`` | |
837 | expression. If ``specs`` is empty, an empty result is returned. |
|
839 | expression. If ``specs`` is empty, an empty result is returned. | |
838 |
|
840 | |||
839 | ``specs`` can contain integers, in which case they are assumed to be |
|
841 | ``specs`` can contain integers, in which case they are assumed to be | |
840 | revision numbers. |
|
842 | revision numbers. | |
841 |
|
843 | |||
842 | It is assumed the revsets are already formatted. If you have arguments |
|
844 | It is assumed the revsets are already formatted. If you have arguments | |
843 | that need to be expanded in the revset, call ``revsetlang.formatspec()`` |
|
845 | that need to be expanded in the revset, call ``revsetlang.formatspec()`` | |
844 | and pass the result as an element of ``specs``. |
|
846 | and pass the result as an element of ``specs``. | |
845 |
|
847 | |||
846 | Specifying a single revset is allowed. |
|
848 | Specifying a single revset is allowed. | |
847 |
|
849 | |||
848 | Returns a ``smartset.abstractsmartset`` which is a list-like interface over |
|
850 | Returns a ``smartset.abstractsmartset`` which is a list-like interface over | |
849 | integer revisions. |
|
851 | integer revisions. | |
850 | """ |
|
852 | """ | |
851 | allspecs = [] |
|
853 | allspecs = [] | |
852 | for spec in specs: |
|
854 | for spec in specs: | |
853 | if isinstance(spec, int): |
|
855 | if isinstance(spec, int): | |
854 | spec = revsetlang.formatspec(b'%d', spec) |
|
856 | spec = revsetlang.formatspec(b'%d', spec) | |
855 | allspecs.append(spec) |
|
857 | allspecs.append(spec) | |
856 | return repo.anyrevs(allspecs, user=True, localalias=localalias) |
|
858 | return repo.anyrevs(allspecs, user=True, localalias=localalias) | |
857 |
|
859 | |||
858 |
|
860 | |||
859 | def increasingwindows( |
|
861 | def increasingwindows( | |
860 | windowsize: int = 8, sizelimit: int = 512 |
|
862 | windowsize: int = 8, sizelimit: int = 512 | |
861 | ) -> Iterable[int]: |
|
863 | ) -> Iterable[int]: | |
862 | while True: |
|
864 | while True: | |
863 | yield windowsize |
|
865 | yield windowsize | |
864 | if windowsize < sizelimit: |
|
866 | if windowsize < sizelimit: | |
865 | windowsize *= 2 |
|
867 | windowsize *= 2 | |
866 |
|
868 | |||
867 |
|
869 | |||
868 | def walkchangerevs(repo, revs, makefilematcher, prepare): |
|
870 | def walkchangerevs(repo, revs, makefilematcher, prepare): | |
869 | """Iterate over files and the revs in a "windowed" way. |
|
871 | """Iterate over files and the revs in a "windowed" way. | |
870 |
|
872 | |||
871 | Callers most commonly need to iterate backwards over the history |
|
873 | Callers most commonly need to iterate backwards over the history | |
872 | in which they are interested. Doing so has awful (quadratic-looking) |
|
874 | in which they are interested. Doing so has awful (quadratic-looking) | |
873 | performance, so we use iterators in a "windowed" way. |
|
875 | performance, so we use iterators in a "windowed" way. | |
874 |
|
876 | |||
875 | We walk a window of revisions in the desired order. Within the |
|
877 | We walk a window of revisions in the desired order. Within the | |
876 | window, we first walk forwards to gather data, then in the desired |
|
878 | window, we first walk forwards to gather data, then in the desired | |
877 | order (usually backwards) to display it. |
|
879 | order (usually backwards) to display it. | |
878 |
|
880 | |||
879 | This function returns an iterator yielding contexts. Before |
|
881 | This function returns an iterator yielding contexts. Before | |
880 | yielding each context, the iterator will first call the prepare |
|
882 | yielding each context, the iterator will first call the prepare | |
881 | function on each context in the window in forward order.""" |
|
883 | function on each context in the window in forward order.""" | |
882 |
|
884 | |||
883 | if not revs: |
|
885 | if not revs: | |
884 | return [] |
|
886 | return [] | |
885 | change = repo.__getitem__ |
|
887 | change = repo.__getitem__ | |
886 |
|
888 | |||
887 | def iterate(): |
|
889 | def iterate(): | |
888 | it = iter(revs) |
|
890 | it = iter(revs) | |
889 | stopiteration = False |
|
891 | stopiteration = False | |
890 | for windowsize in increasingwindows(): |
|
892 | for windowsize in increasingwindows(): | |
891 | nrevs = [] |
|
893 | nrevs = [] | |
892 | for i in range(windowsize): |
|
894 | for i in range(windowsize): | |
893 | rev = next(it, None) |
|
895 | rev = next(it, None) | |
894 | if rev is None: |
|
896 | if rev is None: | |
895 | stopiteration = True |
|
897 | stopiteration = True | |
896 | break |
|
898 | break | |
897 | nrevs.append(rev) |
|
899 | nrevs.append(rev) | |
898 | for rev in sorted(nrevs): |
|
900 | for rev in sorted(nrevs): | |
899 | ctx = change(rev) |
|
901 | ctx = change(rev) | |
900 | prepare(ctx, makefilematcher(ctx)) |
|
902 | prepare(ctx, makefilematcher(ctx)) | |
901 | for rev in nrevs: |
|
903 | for rev in nrevs: | |
902 | yield change(rev) |
|
904 | yield change(rev) | |
903 |
|
905 | |||
904 | if stopiteration: |
|
906 | if stopiteration: | |
905 | break |
|
907 | break | |
906 |
|
908 | |||
907 | return iterate() |
|
909 | return iterate() | |
908 |
|
910 | |||
909 |
|
911 | |||
910 | def meaningfulparents(repo, ctx): |
|
912 | def meaningfulparents(repo, ctx): | |
911 | """Return list of meaningful (or all if debug) parentrevs for rev. |
|
913 | """Return list of meaningful (or all if debug) parentrevs for rev. | |
912 |
|
914 | |||
913 | For merges (two non-nullrev revisions) both parents are meaningful. |
|
915 | For merges (two non-nullrev revisions) both parents are meaningful. | |
914 | Otherwise the first parent revision is considered meaningful if it |
|
916 | Otherwise the first parent revision is considered meaningful if it | |
915 | is not the preceding revision. |
|
917 | is not the preceding revision. | |
916 | """ |
|
918 | """ | |
917 | parents = ctx.parents() |
|
919 | parents = ctx.parents() | |
918 | if len(parents) > 1: |
|
920 | if len(parents) > 1: | |
919 | return parents |
|
921 | return parents | |
920 | if repo.ui.debugflag: |
|
922 | if repo.ui.debugflag: | |
921 | return [parents[0], repo[nullrev]] |
|
923 | return [parents[0], repo[nullrev]] | |
922 | if parents[0].rev() >= intrev(ctx) - 1: |
|
924 | if parents[0].rev() >= intrev(ctx) - 1: | |
923 | return [] |
|
925 | return [] | |
924 | return parents |
|
926 | return parents | |
925 |
|
927 | |||
926 |
|
928 | |||
927 | def getuipathfn( |
|
929 | def getuipathfn( | |
928 | repo, |
|
930 | repo, | |
929 | legacyrelativevalue: bool = False, |
|
931 | legacyrelativevalue: bool = False, | |
930 | forcerelativevalue: Optional[bool] = None, |
|
932 | forcerelativevalue: Optional[bool] = None, | |
931 | ) -> typelib.UiPathFn: |
|
933 | ) -> typelib.UiPathFn: | |
932 | """Return a function that produced paths for presenting to the user. |
|
934 | """Return a function that produced paths for presenting to the user. | |
933 |
|
935 | |||
934 | The returned function takes a repo-relative path and produces a path |
|
936 | The returned function takes a repo-relative path and produces a path | |
935 | that can be presented in the UI. |
|
937 | that can be presented in the UI. | |
936 |
|
938 | |||
937 | Depending on the value of ui.relative-paths, either a repo-relative or |
|
939 | Depending on the value of ui.relative-paths, either a repo-relative or | |
938 | cwd-relative path will be produced. |
|
940 | cwd-relative path will be produced. | |
939 |
|
941 | |||
940 | legacyrelativevalue is the value to use if ui.relative-paths=legacy |
|
942 | legacyrelativevalue is the value to use if ui.relative-paths=legacy | |
941 |
|
943 | |||
942 | If forcerelativevalue is not None, then that value will be used regardless |
|
944 | If forcerelativevalue is not None, then that value will be used regardless | |
943 | of what ui.relative-paths is set to. |
|
945 | of what ui.relative-paths is set to. | |
944 | """ |
|
946 | """ | |
945 | if forcerelativevalue is not None: |
|
947 | if forcerelativevalue is not None: | |
946 | relative = forcerelativevalue |
|
948 | relative = forcerelativevalue | |
947 | else: |
|
949 | else: | |
948 | config = repo.ui.config(b'ui', b'relative-paths') |
|
950 | config = repo.ui.config(b'ui', b'relative-paths') | |
949 | if config == b'legacy': |
|
951 | if config == b'legacy': | |
950 | relative = legacyrelativevalue |
|
952 | relative = legacyrelativevalue | |
951 | else: |
|
953 | else: | |
952 | relative = stringutil.parsebool(config) |
|
954 | relative = stringutil.parsebool(config) | |
953 | if relative is None: |
|
955 | if relative is None: | |
954 | raise error.ConfigError( |
|
956 | raise error.ConfigError( | |
955 | _(b"ui.relative-paths is not a boolean ('%s')") % config |
|
957 | _(b"ui.relative-paths is not a boolean ('%s')") % config | |
956 | ) |
|
958 | ) | |
957 |
|
959 | |||
958 | if relative: |
|
960 | if relative: | |
959 | cwd = repo.getcwd() |
|
961 | cwd = repo.getcwd() | |
960 | if cwd != b'': |
|
962 | if cwd != b'': | |
961 | # this branch would work even if cwd == b'' (ie cwd = repo |
|
963 | # this branch would work even if cwd == b'' (ie cwd = repo | |
962 | # root), but its generality makes the returned function slower |
|
964 | # root), but its generality makes the returned function slower | |
963 | pathto = repo.pathto |
|
965 | pathto = repo.pathto | |
964 | return lambda f: pathto(f, cwd) |
|
966 | return lambda f: pathto(f, cwd) | |
965 | if repo.ui.configbool(b'ui', b'slash'): |
|
967 | if repo.ui.configbool(b'ui', b'slash'): | |
966 | return lambda f: f |
|
968 | return lambda f: f | |
967 | else: |
|
969 | else: | |
968 | return util.localpath |
|
970 | return util.localpath | |
969 |
|
971 | |||
970 |
|
972 | |||
971 | def subdiruipathfn( |
|
973 | def subdiruipathfn( | |
972 | subpath: bytes, uipathfn: typelib.UiPathFn |
|
974 | subpath: bytes, uipathfn: typelib.UiPathFn | |
973 | ) -> typelib.UiPathFn: |
|
975 | ) -> typelib.UiPathFn: | |
974 | '''Create a new uipathfn that treats the file as relative to subpath.''' |
|
976 | '''Create a new uipathfn that treats the file as relative to subpath.''' | |
975 | return lambda f: uipathfn(posixpath.join(subpath, f)) |
|
977 | return lambda f: uipathfn(posixpath.join(subpath, f)) | |
976 |
|
978 | |||
977 |
|
979 | |||
978 | def anypats(pats, opts) -> bool: |
|
980 | def anypats(pats, opts) -> bool: | |
979 | """Checks if any patterns, including --include and --exclude were given. |
|
981 | """Checks if any patterns, including --include and --exclude were given. | |
980 |
|
982 | |||
981 | Some commands (e.g. addremove) use this condition for deciding whether to |
|
983 | Some commands (e.g. addremove) use this condition for deciding whether to | |
982 | print absolute or relative paths. |
|
984 | print absolute or relative paths. | |
983 | """ |
|
985 | """ | |
984 | return bool(pats or opts.get(b'include') or opts.get(b'exclude')) |
|
986 | return bool(pats or opts.get(b'include') or opts.get(b'exclude')) | |
985 |
|
987 | |||
986 |
|
988 | |||
987 | def expandpats(pats: Iterable[bytes]) -> List[bytes]: |
|
989 | def expandpats(pats: Iterable[bytes]) -> List[bytes]: | |
988 | """Expand bare globs when running on windows. |
|
990 | """Expand bare globs when running on windows. | |
989 | On posix we assume it already has already been done by sh.""" |
|
991 | On posix we assume it already has already been done by sh.""" | |
990 | if not util.expandglobs: |
|
992 | if not util.expandglobs: | |
991 | return list(pats) |
|
993 | return list(pats) | |
992 | ret = [] |
|
994 | ret = [] | |
993 | for kindpat in pats: |
|
995 | for kindpat in pats: | |
994 | kind, pat = matchmod._patsplit(kindpat, None) |
|
996 | kind, pat = matchmod._patsplit(kindpat, None) | |
995 | if kind is None: |
|
997 | if kind is None: | |
996 | try: |
|
998 | try: | |
997 | globbed = glob.glob(pat) |
|
999 | globbed = glob.glob(pat) | |
998 | except re.error: |
|
1000 | except re.error: | |
999 | globbed = [pat] |
|
1001 | globbed = [pat] | |
1000 | if globbed: |
|
1002 | if globbed: | |
1001 | ret.extend(globbed) |
|
1003 | ret.extend(globbed) | |
1002 | continue |
|
1004 | continue | |
1003 | ret.append(kindpat) |
|
1005 | ret.append(kindpat) | |
1004 | return ret |
|
1006 | return ret | |
1005 |
|
1007 | |||
1006 |
|
1008 | |||
1007 | def matchandpats( |
|
1009 | def matchandpats( | |
1008 | ctx, |
|
1010 | ctx, | |
1009 | pats=(), |
|
1011 | pats=(), | |
1010 | opts=None, |
|
1012 | opts=None, | |
1011 | globbed: bool = False, |
|
1013 | globbed: bool = False, | |
1012 | default: bytes = b'relpath', |
|
1014 | default: bytes = b'relpath', | |
1013 | badfn=None, |
|
1015 | badfn=None, | |
1014 | ): |
|
1016 | ): | |
1015 | """Return a matcher and the patterns that were used. |
|
1017 | """Return a matcher and the patterns that were used. | |
1016 | The matcher will warn about bad matches, unless an alternate badfn callback |
|
1018 | The matcher will warn about bad matches, unless an alternate badfn callback | |
1017 | is provided.""" |
|
1019 | is provided.""" | |
1018 | if opts is None: |
|
1020 | if opts is None: | |
1019 | opts = {} |
|
1021 | opts = {} | |
1020 | if not globbed and default == b'relpath': |
|
1022 | if not globbed and default == b'relpath': | |
1021 | pats = expandpats(pats or []) |
|
1023 | pats = expandpats(pats or []) | |
1022 |
|
1024 | |||
1023 | uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True) |
|
1025 | uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True) | |
1024 |
|
1026 | |||
1025 | def bad(f, msg): |
|
1027 | def bad(f, msg): | |
1026 | ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg)) |
|
1028 | ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg)) | |
1027 |
|
1029 | |||
1028 | if badfn is None: |
|
1030 | if badfn is None: | |
1029 | badfn = bad |
|
1031 | badfn = bad | |
1030 |
|
1032 | |||
1031 | m = ctx.match( |
|
1033 | m = ctx.match( | |
1032 | pats, |
|
1034 | pats, | |
1033 | opts.get(b'include'), |
|
1035 | opts.get(b'include'), | |
1034 | opts.get(b'exclude'), |
|
1036 | opts.get(b'exclude'), | |
1035 | default, |
|
1037 | default, | |
1036 | listsubrepos=opts.get(b'subrepos'), |
|
1038 | listsubrepos=opts.get(b'subrepos'), | |
1037 | badfn=badfn, |
|
1039 | badfn=badfn, | |
1038 | ) |
|
1040 | ) | |
1039 |
|
1041 | |||
1040 | if m.always(): |
|
1042 | if m.always(): | |
1041 | pats = [] |
|
1043 | pats = [] | |
1042 | return m, pats |
|
1044 | return m, pats | |
1043 |
|
1045 | |||
1044 |
|
1046 | |||
1045 | def match( |
|
1047 | def match( | |
1046 | ctx, |
|
1048 | ctx, | |
1047 | pats=(), |
|
1049 | pats=(), | |
1048 | opts=None, |
|
1050 | opts=None, | |
1049 | globbed: bool = False, |
|
1051 | globbed: bool = False, | |
1050 | default: bytes = b'relpath', |
|
1052 | default: bytes = b'relpath', | |
1051 | badfn=None, |
|
1053 | badfn=None, | |
1052 | ): |
|
1054 | ): | |
1053 | '''Return a matcher that will warn about bad matches.''' |
|
1055 | '''Return a matcher that will warn about bad matches.''' | |
1054 | return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0] |
|
1056 | return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0] | |
1055 |
|
1057 | |||
1056 |
|
1058 | |||
1057 | def matchall(repo): |
|
1059 | def matchall(repo): | |
1058 | '''Return a matcher that will efficiently match everything.''' |
|
1060 | '''Return a matcher that will efficiently match everything.''' | |
1059 | return matchmod.always() |
|
1061 | return matchmod.always() | |
1060 |
|
1062 | |||
1061 |
|
1063 | |||
1062 | def matchfiles(repo, files, badfn=None) -> matchmod.exactmatcher: |
|
1064 | def matchfiles(repo, files, badfn=None) -> matchmod.exactmatcher: | |
1063 | '''Return a matcher that will efficiently match exactly these files.''' |
|
1065 | '''Return a matcher that will efficiently match exactly these files.''' | |
1064 | return matchmod.exact(files, badfn=badfn) |
|
1066 | return matchmod.exact(files, badfn=badfn) | |
1065 |
|
1067 | |||
1066 |
|
1068 | |||
1067 | def parsefollowlinespattern(repo, rev, pat: bytes, msg: bytes) -> bytes: |
|
1069 | def parsefollowlinespattern(repo, rev, pat: bytes, msg: bytes) -> bytes: | |
1068 | """Return a file name from `pat` pattern suitable for usage in followlines |
|
1070 | """Return a file name from `pat` pattern suitable for usage in followlines | |
1069 | logic. |
|
1071 | logic. | |
1070 | """ |
|
1072 | """ | |
1071 | if not matchmod.patkind(pat): |
|
1073 | if not matchmod.patkind(pat): | |
1072 | return pathutil.canonpath(repo.root, repo.getcwd(), pat) |
|
1074 | return pathutil.canonpath(repo.root, repo.getcwd(), pat) | |
1073 | else: |
|
1075 | else: | |
1074 | ctx = repo[rev] |
|
1076 | ctx = repo[rev] | |
1075 | m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx) |
|
1077 | m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx) | |
1076 | files = [f for f in ctx if m(f)] |
|
1078 | files = [f for f in ctx if m(f)] | |
1077 | if len(files) != 1: |
|
1079 | if len(files) != 1: | |
1078 | raise error.ParseError(msg) |
|
1080 | raise error.ParseError(msg) | |
1079 | return files[0] |
|
1081 | return files[0] | |
1080 |
|
1082 | |||
1081 |
|
1083 | |||
1082 | def getorigvfs(ui: "uimod.ui", repo): |
|
1084 | def getorigvfs(ui: "uimod.ui", repo): | |
1083 | """return a vfs suitable to save 'orig' file |
|
1085 | """return a vfs suitable to save 'orig' file | |
1084 |
|
1086 | |||
1085 | return None if no special directory is configured""" |
|
1087 | return None if no special directory is configured""" | |
1086 | origbackuppath = ui.config(b'ui', b'origbackuppath') |
|
1088 | origbackuppath = ui.config(b'ui', b'origbackuppath') | |
1087 | if not origbackuppath: |
|
1089 | if not origbackuppath: | |
1088 | return None |
|
1090 | return None | |
1089 | return vfs.vfs(repo.wvfs.join(origbackuppath)) |
|
1091 | return vfs.vfs(repo.wvfs.join(origbackuppath)) | |
1090 |
|
1092 | |||
1091 |
|
1093 | |||
1092 | def backuppath(ui: "uimod.ui", repo, filepath: bytes) -> bytes: |
|
1094 | def backuppath(ui: "uimod.ui", repo, filepath: bytes) -> bytes: | |
1093 | """customize where working copy backup files (.orig files) are created |
|
1095 | """customize where working copy backup files (.orig files) are created | |
1094 |
|
1096 | |||
1095 | Fetch user defined path from config file: [ui] origbackuppath = <path> |
|
1097 | Fetch user defined path from config file: [ui] origbackuppath = <path> | |
1096 | Fall back to default (filepath with .orig suffix) if not specified |
|
1098 | Fall back to default (filepath with .orig suffix) if not specified | |
1097 |
|
1099 | |||
1098 | filepath is repo-relative |
|
1100 | filepath is repo-relative | |
1099 |
|
1101 | |||
1100 | Returns an absolute path |
|
1102 | Returns an absolute path | |
1101 | """ |
|
1103 | """ | |
1102 | origvfs = getorigvfs(ui, repo) |
|
1104 | origvfs = getorigvfs(ui, repo) | |
1103 | if origvfs is None: |
|
1105 | if origvfs is None: | |
1104 | return repo.wjoin(filepath + b".orig") |
|
1106 | return repo.wjoin(filepath + b".orig") | |
1105 |
|
1107 | |||
1106 | origbackupdir = origvfs.dirname(filepath) |
|
1108 | origbackupdir = origvfs.dirname(filepath) | |
1107 | if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir): |
|
1109 | if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir): | |
1108 | ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir)) |
|
1110 | ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir)) | |
1109 |
|
1111 | |||
1110 | # Remove any files that conflict with the backup file's path |
|
1112 | # Remove any files that conflict with the backup file's path | |
1111 | for f in reversed(list(pathutil.finddirs(filepath))): |
|
1113 | for f in reversed(list(pathutil.finddirs(filepath))): | |
1112 | if origvfs.isfileorlink(f): |
|
1114 | if origvfs.isfileorlink(f): | |
1113 | ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f)) |
|
1115 | ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f)) | |
1114 | origvfs.unlink(f) |
|
1116 | origvfs.unlink(f) | |
1115 | break |
|
1117 | break | |
1116 |
|
1118 | |||
1117 | origvfs.makedirs(origbackupdir) |
|
1119 | origvfs.makedirs(origbackupdir) | |
1118 |
|
1120 | |||
1119 | if origvfs.isdir(filepath) and not origvfs.islink(filepath): |
|
1121 | if origvfs.isdir(filepath) and not origvfs.islink(filepath): | |
1120 | ui.note( |
|
1122 | ui.note( | |
1121 | _(b'removing conflicting directory: %s\n') % origvfs.join(filepath) |
|
1123 | _(b'removing conflicting directory: %s\n') % origvfs.join(filepath) | |
1122 | ) |
|
1124 | ) | |
1123 | origvfs.rmtree(filepath, forcibly=True) |
|
1125 | origvfs.rmtree(filepath, forcibly=True) | |
1124 |
|
1126 | |||
1125 | return origvfs.join(filepath) |
|
1127 | return origvfs.join(filepath) | |
1126 |
|
1128 | |||
1127 |
|
1129 | |||
1128 | class _containsnode: |
|
1130 | class _containsnode: | |
1129 | """proxy __contains__(node) to container.__contains__ which accepts revs""" |
|
1131 | """proxy __contains__(node) to container.__contains__ which accepts revs""" | |
1130 |
|
1132 | |||
1131 | def __init__(self, repo, revcontainer): |
|
1133 | def __init__(self, repo, revcontainer): | |
1132 | self._torev = repo.changelog.rev |
|
1134 | self._torev = repo.changelog.rev | |
1133 | self._revcontains = revcontainer.__contains__ |
|
1135 | self._revcontains = revcontainer.__contains__ | |
1134 |
|
1136 | |||
1135 | def __contains__(self, node) -> bool: |
|
1137 | def __contains__(self, node) -> bool: | |
1136 | return self._revcontains(self._torev(node)) |
|
1138 | return self._revcontains(self._torev(node)) | |
1137 |
|
1139 | |||
1138 |
|
1140 | |||
1139 | def cleanupnodes( |
|
1141 | def cleanupnodes( | |
1140 | repo, |
|
1142 | repo, | |
1141 | replacements, |
|
1143 | replacements, | |
1142 | operation, |
|
1144 | operation, | |
1143 | moves=None, |
|
1145 | moves=None, | |
1144 | metadata=None, |
|
1146 | metadata=None, | |
1145 | fixphase=False, |
|
1147 | fixphase=False, | |
1146 | targetphase=None, |
|
1148 | targetphase=None, | |
1147 | backup=True, |
|
1149 | backup=True, | |
1148 | ) -> None: |
|
1150 | ) -> None: | |
1149 | """do common cleanups when old nodes are replaced by new nodes |
|
1151 | """do common cleanups when old nodes are replaced by new nodes | |
1150 |
|
1152 | |||
1151 | That includes writing obsmarkers or stripping nodes, and moving bookmarks. |
|
1153 | That includes writing obsmarkers or stripping nodes, and moving bookmarks. | |
1152 | (we might also want to move working directory parent in the future) |
|
1154 | (we might also want to move working directory parent in the future) | |
1153 |
|
1155 | |||
1154 | By default, bookmark moves are calculated automatically from 'replacements', |
|
1156 | By default, bookmark moves are calculated automatically from 'replacements', | |
1155 | but 'moves' can be used to override that. Also, 'moves' may include |
|
1157 | but 'moves' can be used to override that. Also, 'moves' may include | |
1156 | additional bookmark moves that should not have associated obsmarkers. |
|
1158 | additional bookmark moves that should not have associated obsmarkers. | |
1157 |
|
1159 | |||
1158 | replacements is {oldnode: [newnode]} or a iterable of nodes if they do not |
|
1160 | replacements is {oldnode: [newnode]} or a iterable of nodes if they do not | |
1159 | have replacements. operation is a string, like "rebase". |
|
1161 | have replacements. operation is a string, like "rebase". | |
1160 |
|
1162 | |||
1161 | metadata is dictionary containing metadata to be stored in obsmarker if |
|
1163 | metadata is dictionary containing metadata to be stored in obsmarker if | |
1162 | obsolescence is enabled. |
|
1164 | obsolescence is enabled. | |
1163 | """ |
|
1165 | """ | |
1164 | assert fixphase or targetphase is None |
|
1166 | assert fixphase or targetphase is None | |
1165 | if not replacements and not moves: |
|
1167 | if not replacements and not moves: | |
1166 | return |
|
1168 | return | |
1167 |
|
1169 | |||
1168 | # translate mapping's other forms |
|
1170 | # translate mapping's other forms | |
1169 | if not hasattr(replacements, 'items'): |
|
1171 | if not hasattr(replacements, 'items'): | |
1170 | replacements = {(n,): () for n in replacements} |
|
1172 | replacements = {(n,): () for n in replacements} | |
1171 | else: |
|
1173 | else: | |
1172 | # upgrading non tuple "source" to tuple ones for BC |
|
1174 | # upgrading non tuple "source" to tuple ones for BC | |
1173 | repls = {} |
|
1175 | repls = {} | |
1174 | for key, value in replacements.items(): |
|
1176 | for key, value in replacements.items(): | |
1175 | if not isinstance(key, tuple): |
|
1177 | if not isinstance(key, tuple): | |
1176 | key = (key,) |
|
1178 | key = (key,) | |
1177 | repls[key] = value |
|
1179 | repls[key] = value | |
1178 | replacements = repls |
|
1180 | replacements = repls | |
1179 |
|
1181 | |||
1180 | # Unfiltered repo is needed since nodes in replacements might be hidden. |
|
1182 | # Unfiltered repo is needed since nodes in replacements might be hidden. | |
1181 | unfi = repo.unfiltered() |
|
1183 | unfi = repo.unfiltered() | |
1182 |
|
1184 | |||
1183 | # Calculate bookmark movements |
|
1185 | # Calculate bookmark movements | |
1184 | if moves is None: |
|
1186 | if moves is None: | |
1185 | moves = {} |
|
1187 | moves = {} | |
1186 | for oldnodes, newnodes in replacements.items(): |
|
1188 | for oldnodes, newnodes in replacements.items(): | |
1187 | for oldnode in oldnodes: |
|
1189 | for oldnode in oldnodes: | |
1188 | if oldnode in moves: |
|
1190 | if oldnode in moves: | |
1189 | continue |
|
1191 | continue | |
1190 | if len(newnodes) > 1: |
|
1192 | if len(newnodes) > 1: | |
1191 | # usually a split, take the one with biggest rev number |
|
1193 | # usually a split, take the one with biggest rev number | |
1192 | newnode = next(unfi.set(b'max(%ln)', newnodes)).node() |
|
1194 | newnode = next(unfi.set(b'max(%ln)', newnodes)).node() | |
1193 | elif len(newnodes) == 0: |
|
1195 | elif len(newnodes) == 0: | |
1194 | # move bookmark backwards |
|
1196 | # move bookmark backwards | |
1195 | allreplaced = [] |
|
1197 | allreplaced = [] | |
1196 | for rep in replacements: |
|
1198 | for rep in replacements: | |
1197 | allreplaced.extend(rep) |
|
1199 | allreplaced.extend(rep) | |
1198 | roots = list( |
|
1200 | roots = list( | |
1199 | unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced) |
|
1201 | unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced) | |
1200 | ) |
|
1202 | ) | |
1201 | if roots: |
|
1203 | if roots: | |
1202 | newnode = roots[0].node() |
|
1204 | newnode = roots[0].node() | |
1203 | else: |
|
1205 | else: | |
1204 | newnode = repo.nullid |
|
1206 | newnode = repo.nullid | |
1205 | else: |
|
1207 | else: | |
1206 | newnode = newnodes[0] |
|
1208 | newnode = newnodes[0] | |
1207 | moves[oldnode] = newnode |
|
1209 | moves[oldnode] = newnode | |
1208 |
|
1210 | |||
1209 | allnewnodes = [n for ns in replacements.values() for n in ns] |
|
1211 | allnewnodes = [n for ns in replacements.values() for n in ns] | |
1210 | toretract = {} |
|
1212 | toretract = {} | |
1211 | toadvance = {} |
|
1213 | toadvance = {} | |
1212 | if fixphase: |
|
1214 | if fixphase: | |
1213 | precursors = {} |
|
1215 | precursors = {} | |
1214 | for oldnodes, newnodes in replacements.items(): |
|
1216 | for oldnodes, newnodes in replacements.items(): | |
1215 | for oldnode in oldnodes: |
|
1217 | for oldnode in oldnodes: | |
1216 | for newnode in newnodes: |
|
1218 | for newnode in newnodes: | |
1217 | precursors.setdefault(newnode, []).append(oldnode) |
|
1219 | precursors.setdefault(newnode, []).append(oldnode) | |
1218 |
|
1220 | |||
1219 | allnewnodes.sort(key=lambda n: unfi[n].rev()) |
|
1221 | allnewnodes.sort(key=lambda n: unfi[n].rev()) | |
1220 | newphases = {} |
|
1222 | newphases = {} | |
1221 |
|
1223 | |||
1222 | def phase(ctx): |
|
1224 | def phase(ctx): | |
1223 | return newphases.get(ctx.node(), ctx.phase()) |
|
1225 | return newphases.get(ctx.node(), ctx.phase()) | |
1224 |
|
1226 | |||
1225 | for newnode in allnewnodes: |
|
1227 | for newnode in allnewnodes: | |
1226 | ctx = unfi[newnode] |
|
1228 | ctx = unfi[newnode] | |
1227 | parentphase = max(phase(p) for p in ctx.parents()) |
|
1229 | parentphase = max(phase(p) for p in ctx.parents()) | |
1228 | if targetphase is None: |
|
1230 | if targetphase is None: | |
1229 | oldphase = max( |
|
1231 | oldphase = max( | |
1230 | unfi[oldnode].phase() for oldnode in precursors[newnode] |
|
1232 | unfi[oldnode].phase() for oldnode in precursors[newnode] | |
1231 | ) |
|
1233 | ) | |
1232 | newphase = max(oldphase, parentphase) |
|
1234 | newphase = max(oldphase, parentphase) | |
1233 | else: |
|
1235 | else: | |
1234 | newphase = max(targetphase, parentphase) |
|
1236 | newphase = max(targetphase, parentphase) | |
1235 | newphases[newnode] = newphase |
|
1237 | newphases[newnode] = newphase | |
1236 | if newphase > ctx.phase(): |
|
1238 | if newphase > ctx.phase(): | |
1237 | toretract.setdefault(newphase, []).append(newnode) |
|
1239 | toretract.setdefault(newphase, []).append(newnode) | |
1238 | elif newphase < ctx.phase(): |
|
1240 | elif newphase < ctx.phase(): | |
1239 | toadvance.setdefault(newphase, []).append(newnode) |
|
1241 | toadvance.setdefault(newphase, []).append(newnode) | |
1240 |
|
1242 | |||
1241 | with repo.transaction(b'cleanup') as tr: |
|
1243 | with repo.transaction(b'cleanup') as tr: | |
1242 | # Move bookmarks |
|
1244 | # Move bookmarks | |
1243 | bmarks = repo._bookmarks |
|
1245 | bmarks = repo._bookmarks | |
1244 | bmarkchanges = [] |
|
1246 | bmarkchanges = [] | |
1245 | for oldnode, newnode in moves.items(): |
|
1247 | for oldnode, newnode in moves.items(): | |
1246 | oldbmarks = repo.nodebookmarks(oldnode) |
|
1248 | oldbmarks = repo.nodebookmarks(oldnode) | |
1247 | if not oldbmarks: |
|
1249 | if not oldbmarks: | |
1248 | continue |
|
1250 | continue | |
1249 | from . import bookmarks # avoid import cycle |
|
1251 | from . import bookmarks # avoid import cycle | |
1250 |
|
1252 | |||
1251 | repo.ui.debug( |
|
1253 | repo.ui.debug( | |
1252 | b'moving bookmarks %r from %s to %s\n' |
|
1254 | b'moving bookmarks %r from %s to %s\n' | |
1253 | % ( |
|
1255 | % ( | |
1254 | pycompat.rapply(pycompat.maybebytestr, oldbmarks), |
|
1256 | pycompat.rapply(pycompat.maybebytestr, oldbmarks), | |
1255 | hex(oldnode), |
|
1257 | hex(oldnode), | |
1256 | hex(newnode), |
|
1258 | hex(newnode), | |
1257 | ) |
|
1259 | ) | |
1258 | ) |
|
1260 | ) | |
1259 | # Delete divergent bookmarks being parents of related newnodes |
|
1261 | # Delete divergent bookmarks being parents of related newnodes | |
1260 | deleterevs = repo.revs( |
|
1262 | deleterevs = repo.revs( | |
1261 | b'parents(roots(%ln & (::%n))) - parents(%n)', |
|
1263 | b'parents(roots(%ln & (::%n))) - parents(%n)', | |
1262 | allnewnodes, |
|
1264 | allnewnodes, | |
1263 | newnode, |
|
1265 | newnode, | |
1264 | oldnode, |
|
1266 | oldnode, | |
1265 | ) |
|
1267 | ) | |
1266 | deletenodes = _containsnode(repo, deleterevs) |
|
1268 | deletenodes = _containsnode(repo, deleterevs) | |
1267 | for name in oldbmarks: |
|
1269 | for name in oldbmarks: | |
1268 | bmarkchanges.append((name, newnode)) |
|
1270 | bmarkchanges.append((name, newnode)) | |
1269 | for b in bookmarks.divergent2delete(repo, deletenodes, name): |
|
1271 | for b in bookmarks.divergent2delete(repo, deletenodes, name): | |
1270 | bmarkchanges.append((b, None)) |
|
1272 | bmarkchanges.append((b, None)) | |
1271 |
|
1273 | |||
1272 | if bmarkchanges: |
|
1274 | if bmarkchanges: | |
1273 | bmarks.applychanges(repo, tr, bmarkchanges) |
|
1275 | bmarks.applychanges(repo, tr, bmarkchanges) | |
1274 |
|
1276 | |||
1275 | for phase, nodes in toretract.items(): |
|
1277 | for phase, nodes in toretract.items(): | |
1276 | phases.retractboundary(repo, tr, phase, nodes) |
|
1278 | phases.retractboundary(repo, tr, phase, nodes) | |
1277 | for phase, nodes in toadvance.items(): |
|
1279 | for phase, nodes in toadvance.items(): | |
1278 | phases.advanceboundary(repo, tr, phase, nodes) |
|
1280 | phases.advanceboundary(repo, tr, phase, nodes) | |
1279 |
|
1281 | |||
1280 | mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived') |
|
1282 | mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived') | |
1281 | # Obsolete or strip nodes |
|
1283 | # Obsolete or strip nodes | |
1282 | if obsolete.isenabled(repo, obsolete.createmarkersopt): |
|
1284 | if obsolete.isenabled(repo, obsolete.createmarkersopt): | |
1283 | # If a node is already obsoleted, and we want to obsolete it |
|
1285 | # If a node is already obsoleted, and we want to obsolete it | |
1284 | # without a successor, skip that obssolete request since it's |
|
1286 | # without a successor, skip that obssolete request since it's | |
1285 | # unnecessary. That's the "if s or not isobs(n)" check below. |
|
1287 | # unnecessary. That's the "if s or not isobs(n)" check below. | |
1286 | # Also sort the node in topology order, that might be useful for |
|
1288 | # Also sort the node in topology order, that might be useful for | |
1287 | # some obsstore logic. |
|
1289 | # some obsstore logic. | |
1288 | # NOTE: the sorting might belong to createmarkers. |
|
1290 | # NOTE: the sorting might belong to createmarkers. | |
1289 | torev = unfi.changelog.rev |
|
1291 | torev = unfi.changelog.rev | |
1290 | sortfunc = lambda ns: torev(ns[0][0]) |
|
1292 | sortfunc = lambda ns: torev(ns[0][0]) | |
1291 | rels = [] |
|
1293 | rels = [] | |
1292 | for ns, s in sorted(replacements.items(), key=sortfunc): |
|
1294 | for ns, s in sorted(replacements.items(), key=sortfunc): | |
1293 | rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s)) |
|
1295 | rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s)) | |
1294 | rels.append(rel) |
|
1296 | rels.append(rel) | |
1295 | if rels: |
|
1297 | if rels: | |
1296 | obsolete.createmarkers( |
|
1298 | obsolete.createmarkers( | |
1297 | repo, rels, operation=operation, metadata=metadata |
|
1299 | repo, rels, operation=operation, metadata=metadata | |
1298 | ) |
|
1300 | ) | |
1299 | elif phases.supportarchived(repo) and mayusearchived: |
|
1301 | elif phases.supportarchived(repo) and mayusearchived: | |
1300 | # this assume we do not have "unstable" nodes above the cleaned ones |
|
1302 | # this assume we do not have "unstable" nodes above the cleaned ones | |
1301 | allreplaced = set() |
|
1303 | allreplaced = set() | |
1302 | for ns in replacements.keys(): |
|
1304 | for ns in replacements.keys(): | |
1303 | allreplaced.update(ns) |
|
1305 | allreplaced.update(ns) | |
1304 | if backup: |
|
1306 | if backup: | |
1305 | from . import repair # avoid import cycle |
|
1307 | from . import repair # avoid import cycle | |
1306 |
|
1308 | |||
1307 | node = min(allreplaced, key=repo.changelog.rev) |
|
1309 | node = min(allreplaced, key=repo.changelog.rev) | |
1308 | repair.backupbundle( |
|
1310 | repair.backupbundle( | |
1309 | repo, allreplaced, allreplaced, node, operation |
|
1311 | repo, allreplaced, allreplaced, node, operation | |
1310 | ) |
|
1312 | ) | |
1311 | phases.retractboundary(repo, tr, phases.archived, allreplaced) |
|
1313 | phases.retractboundary(repo, tr, phases.archived, allreplaced) | |
1312 | else: |
|
1314 | else: | |
1313 | from . import repair # avoid import cycle |
|
1315 | from . import repair # avoid import cycle | |
1314 |
|
1316 | |||
1315 | tostrip = list(n for ns in replacements for n in ns) |
|
1317 | tostrip = list(n for ns in replacements for n in ns) | |
1316 | if tostrip: |
|
1318 | if tostrip: | |
1317 | repair.delayedstrip( |
|
1319 | repair.delayedstrip( | |
1318 | repo.ui, repo, tostrip, operation, backup=backup |
|
1320 | repo.ui, repo, tostrip, operation, backup=backup | |
1319 | ) |
|
1321 | ) | |
1320 |
|
1322 | |||
1321 |
|
1323 | |||
1322 | def addremove( |
|
1324 | def addremove( | |
1323 | repo, |
|
1325 | repo, | |
1324 | matcher, |
|
1326 | matcher, | |
1325 | prefix: bytes, |
|
1327 | prefix: bytes, | |
1326 | uipathfn: typelib.UiPathFn, |
|
1328 | uipathfn: typelib.UiPathFn, | |
1327 | opts=None, |
|
1329 | opts=None, | |
1328 | open_tr=None, |
|
1330 | open_tr=None, | |
1329 | ) -> int: |
|
1331 | ) -> int: | |
1330 | if opts is None: |
|
1332 | if opts is None: | |
1331 | opts = {} |
|
1333 | opts = {} | |
1332 | m = matcher |
|
1334 | m = matcher | |
1333 | dry_run = opts.get(b'dry_run') |
|
1335 | dry_run = opts.get(b'dry_run') | |
1334 | try: |
|
1336 | try: | |
1335 | similarity = float(opts.get(b'similarity') or 0) |
|
1337 | similarity = float(opts.get(b'similarity') or 0) | |
1336 | except ValueError: |
|
1338 | except ValueError: | |
1337 | raise error.InputError(_(b'similarity must be a number')) |
|
1339 | raise error.InputError(_(b'similarity must be a number')) | |
1338 | if similarity < 0 or similarity > 100: |
|
1340 | if similarity < 0 or similarity > 100: | |
1339 | raise error.InputError(_(b'similarity must be between 0 and 100')) |
|
1341 | raise error.InputError(_(b'similarity must be between 0 and 100')) | |
1340 | similarity /= 100.0 |
|
1342 | similarity /= 100.0 | |
1341 |
|
1343 | |||
1342 | ret = 0 |
|
1344 | ret = 0 | |
1343 |
|
1345 | |||
1344 | wctx = repo[None] |
|
1346 | wctx = repo[None] | |
1345 | for subpath in sorted(wctx.substate): |
|
1347 | for subpath in sorted(wctx.substate): | |
1346 | submatch = matchmod.subdirmatcher(subpath, m) |
|
1348 | submatch = matchmod.subdirmatcher(subpath, m) | |
1347 | if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()): |
|
1349 | if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()): | |
1348 | sub = wctx.sub(subpath) |
|
1350 | sub = wctx.sub(subpath) | |
1349 | subprefix = repo.wvfs.reljoin(prefix, subpath) |
|
1351 | subprefix = repo.wvfs.reljoin(prefix, subpath) | |
1350 | subuipathfn = subdiruipathfn(subpath, uipathfn) |
|
1352 | subuipathfn = subdiruipathfn(subpath, uipathfn) | |
1351 | try: |
|
1353 | try: | |
1352 | if sub.addremove(submatch, subprefix, subuipathfn, opts): |
|
1354 | if sub.addremove(submatch, subprefix, subuipathfn, opts): | |
1353 | ret = 1 |
|
1355 | ret = 1 | |
1354 | except error.LookupError: |
|
1356 | except error.LookupError: | |
1355 | repo.ui.status( |
|
1357 | repo.ui.status( | |
1356 | _(b"skipping missing subrepository: %s\n") |
|
1358 | _(b"skipping missing subrepository: %s\n") | |
1357 | % uipathfn(subpath) |
|
1359 | % uipathfn(subpath) | |
1358 | ) |
|
1360 | ) | |
1359 |
|
1361 | |||
1360 | rejected = [] |
|
1362 | rejected = [] | |
1361 |
|
1363 | |||
1362 | def badfn(f: bytes, msg: bytes) -> None: |
|
1364 | def badfn(f: bytes, msg: bytes) -> None: | |
1363 | if f in m.files(): |
|
1365 | if f in m.files(): | |
1364 | m.bad(f, msg) |
|
1366 | m.bad(f, msg) | |
1365 | rejected.append(f) |
|
1367 | rejected.append(f) | |
1366 |
|
1368 | |||
1367 | badmatch = matchmod.badmatch(m, badfn) |
|
1369 | badmatch = matchmod.badmatch(m, badfn) | |
1368 | added, unknown, deleted, removed, forgotten = _interestingfiles( |
|
1370 | added, unknown, deleted, removed, forgotten = _interestingfiles( | |
1369 | repo, badmatch |
|
1371 | repo, badmatch | |
1370 | ) |
|
1372 | ) | |
1371 |
|
1373 | |||
1372 | unknownset = set(unknown + forgotten) |
|
1374 | unknownset = set(unknown + forgotten) | |
1373 | toprint = unknownset.copy() |
|
1375 | toprint = unknownset.copy() | |
1374 | toprint.update(deleted) |
|
1376 | toprint.update(deleted) | |
1375 | for abs in sorted(toprint): |
|
1377 | for abs in sorted(toprint): | |
1376 | if repo.ui.verbose or not m.exact(abs): |
|
1378 | if repo.ui.verbose or not m.exact(abs): | |
1377 | if abs in unknownset: |
|
1379 | if abs in unknownset: | |
1378 | status = _(b'adding %s\n') % uipathfn(abs) |
|
1380 | status = _(b'adding %s\n') % uipathfn(abs) | |
1379 | label = b'ui.addremove.added' |
|
1381 | label = b'ui.addremove.added' | |
1380 | else: |
|
1382 | else: | |
1381 | status = _(b'removing %s\n') % uipathfn(abs) |
|
1383 | status = _(b'removing %s\n') % uipathfn(abs) | |
1382 | label = b'ui.addremove.removed' |
|
1384 | label = b'ui.addremove.removed' | |
1383 | repo.ui.status(status, label=label) |
|
1385 | repo.ui.status(status, label=label) | |
1384 |
|
1386 | |||
1385 | renames = _findrenames( |
|
1387 | renames = _findrenames( | |
1386 | repo, m, added + unknown, removed + deleted, similarity, uipathfn |
|
1388 | repo, m, added + unknown, removed + deleted, similarity, uipathfn | |
1387 | ) |
|
1389 | ) | |
1388 |
|
1390 | |||
1389 | if not dry_run and (unknown or forgotten or deleted or renames): |
|
1391 | if not dry_run and (unknown or forgotten or deleted or renames): | |
1390 | if open_tr is not None: |
|
1392 | if open_tr is not None: | |
1391 | open_tr() |
|
1393 | open_tr() | |
1392 | _markchanges(repo, unknown + forgotten, deleted, renames) |
|
1394 | _markchanges(repo, unknown + forgotten, deleted, renames) | |
1393 |
|
1395 | |||
1394 | for f in rejected: |
|
1396 | for f in rejected: | |
1395 | if f in m.files(): |
|
1397 | if f in m.files(): | |
1396 | return 1 |
|
1398 | return 1 | |
1397 | return ret |
|
1399 | return ret | |
1398 |
|
1400 | |||
1399 |
|
1401 | |||
1400 | def marktouched(repo, files, similarity: float = 0.0) -> int: |
|
1402 | def marktouched(repo, files, similarity: float = 0.0) -> int: | |
1401 | """Assert that files have somehow been operated upon. files are relative to |
|
1403 | """Assert that files have somehow been operated upon. files are relative to | |
1402 | the repo root.""" |
|
1404 | the repo root.""" | |
1403 | m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x)) |
|
1405 | m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x)) | |
1404 | rejected = [] |
|
1406 | rejected = [] | |
1405 |
|
1407 | |||
1406 | added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m) |
|
1408 | added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m) | |
1407 |
|
1409 | |||
1408 | if repo.ui.verbose: |
|
1410 | if repo.ui.verbose: | |
1409 | unknownset = set(unknown + forgotten) |
|
1411 | unknownset = set(unknown + forgotten) | |
1410 | toprint = unknownset.copy() |
|
1412 | toprint = unknownset.copy() | |
1411 | toprint.update(deleted) |
|
1413 | toprint.update(deleted) | |
1412 | for abs in sorted(toprint): |
|
1414 | for abs in sorted(toprint): | |
1413 | if abs in unknownset: |
|
1415 | if abs in unknownset: | |
1414 | status = _(b'adding %s\n') % abs |
|
1416 | status = _(b'adding %s\n') % abs | |
1415 | else: |
|
1417 | else: | |
1416 | status = _(b'removing %s\n') % abs |
|
1418 | status = _(b'removing %s\n') % abs | |
1417 | repo.ui.status(status) |
|
1419 | repo.ui.status(status) | |
1418 |
|
1420 | |||
1419 | # TODO: We should probably have the caller pass in uipathfn and apply it to |
|
1421 | # TODO: We should probably have the caller pass in uipathfn and apply it to | |
1420 | # the messages above too. legacyrelativevalue=True is consistent with how |
|
1422 | # the messages above too. legacyrelativevalue=True is consistent with how | |
1421 | # it used to work. |
|
1423 | # it used to work. | |
1422 | uipathfn = getuipathfn(repo, legacyrelativevalue=True) |
|
1424 | uipathfn = getuipathfn(repo, legacyrelativevalue=True) | |
1423 | renames = _findrenames( |
|
1425 | renames = _findrenames( | |
1424 | repo, m, added + unknown, removed + deleted, similarity, uipathfn |
|
1426 | repo, m, added + unknown, removed + deleted, similarity, uipathfn | |
1425 | ) |
|
1427 | ) | |
1426 |
|
1428 | |||
1427 | _markchanges(repo, unknown + forgotten, deleted, renames) |
|
1429 | _markchanges(repo, unknown + forgotten, deleted, renames) | |
1428 |
|
1430 | |||
1429 | for f in rejected: |
|
1431 | for f in rejected: | |
1430 | if f in m.files(): |
|
1432 | if f in m.files(): | |
1431 | return 1 |
|
1433 | return 1 | |
1432 | return 0 |
|
1434 | return 0 | |
1433 |
|
1435 | |||
1434 |
|
1436 | |||
1435 | def _interestingfiles( |
|
1437 | def _interestingfiles( | |
1436 | repo, matcher |
|
1438 | repo, matcher | |
1437 | ) -> Tuple[List[bytes], List[bytes], List[bytes], List[bytes], List[bytes]]: |
|
1439 | ) -> Tuple[List[bytes], List[bytes], List[bytes], List[bytes], List[bytes]]: | |
1438 | """Walk dirstate with matcher, looking for files that addremove would care |
|
1440 | """Walk dirstate with matcher, looking for files that addremove would care | |
1439 | about. |
|
1441 | about. | |
1440 |
|
1442 | |||
1441 | This is different from dirstate.status because it doesn't care about |
|
1443 | This is different from dirstate.status because it doesn't care about | |
1442 | whether files are modified or clean.""" |
|
1444 | whether files are modified or clean.""" | |
1443 | added, unknown, deleted, removed, forgotten = [], [], [], [], [] |
|
1445 | added, unknown, deleted, removed, forgotten = [], [], [], [], [] | |
1444 | audit_path = pathutil.pathauditor(repo.root, cached=True) |
|
1446 | audit_path = pathutil.pathauditor(repo.root, cached=True) | |
1445 |
|
1447 | |||
1446 | ctx = repo[None] |
|
1448 | ctx = repo[None] | |
1447 | dirstate = repo.dirstate |
|
1449 | dirstate = repo.dirstate | |
1448 | matcher = repo.narrowmatch(matcher, includeexact=True) |
|
1450 | matcher = repo.narrowmatch(matcher, includeexact=True) | |
1449 | walkresults = dirstate.walk( |
|
1451 | walkresults = dirstate.walk( | |
1450 | matcher, |
|
1452 | matcher, | |
1451 | subrepos=sorted(ctx.substate), |
|
1453 | subrepos=sorted(ctx.substate), | |
1452 | unknown=True, |
|
1454 | unknown=True, | |
1453 | ignored=False, |
|
1455 | ignored=False, | |
1454 | full=False, |
|
1456 | full=False, | |
1455 | ) |
|
1457 | ) | |
1456 | for abs, st in walkresults.items(): |
|
1458 | for abs, st in walkresults.items(): | |
1457 | entry = dirstate.get_entry(abs) |
|
1459 | entry = dirstate.get_entry(abs) | |
1458 | if (not entry.any_tracked) and audit_path.check(abs): |
|
1460 | if (not entry.any_tracked) and audit_path.check(abs): | |
1459 | unknown.append(abs) |
|
1461 | unknown.append(abs) | |
1460 | elif (not entry.removed) and not st: |
|
1462 | elif (not entry.removed) and not st: | |
1461 | deleted.append(abs) |
|
1463 | deleted.append(abs) | |
1462 | elif entry.removed and st: |
|
1464 | elif entry.removed and st: | |
1463 | forgotten.append(abs) |
|
1465 | forgotten.append(abs) | |
1464 | # for finding renames |
|
1466 | # for finding renames | |
1465 | elif entry.removed and not st: |
|
1467 | elif entry.removed and not st: | |
1466 | removed.append(abs) |
|
1468 | removed.append(abs) | |
1467 | elif entry.added: |
|
1469 | elif entry.added: | |
1468 | added.append(abs) |
|
1470 | added.append(abs) | |
1469 |
|
1471 | |||
1470 | return added, unknown, deleted, removed, forgotten |
|
1472 | return added, unknown, deleted, removed, forgotten | |
1471 |
|
1473 | |||
1472 |
|
1474 | |||
1473 | def _findrenames( |
|
1475 | def _findrenames( | |
1474 | repo, matcher, added, removed, similarity, uipathfn: typelib.UiPathFn |
|
1476 | repo, matcher, added, removed, similarity, uipathfn: typelib.UiPathFn | |
1475 | ) -> Dict[bytes, bytes]: |
|
1477 | ) -> Dict[bytes, bytes]: | |
1476 | '''Find renames from removed files to added ones.''' |
|
1478 | '''Find renames from removed files to added ones.''' | |
1477 | renames = {} |
|
1479 | renames = {} | |
1478 | if similarity > 0: |
|
1480 | if similarity > 0: | |
1479 | for old, new, score in similar.findrenames( |
|
1481 | for old, new, score in similar.findrenames( | |
1480 | repo, added, removed, similarity |
|
1482 | repo, added, removed, similarity | |
1481 | ): |
|
1483 | ): | |
1482 | if ( |
|
1484 | if ( | |
1483 | repo.ui.verbose |
|
1485 | repo.ui.verbose | |
1484 | or not matcher.exact(old) |
|
1486 | or not matcher.exact(old) | |
1485 | or not matcher.exact(new) |
|
1487 | or not matcher.exact(new) | |
1486 | ): |
|
1488 | ): | |
1487 | repo.ui.status( |
|
1489 | repo.ui.status( | |
1488 | _( |
|
1490 | _( | |
1489 | b'recording removal of %s as rename to %s ' |
|
1491 | b'recording removal of %s as rename to %s ' | |
1490 | b'(%d%% similar)\n' |
|
1492 | b'(%d%% similar)\n' | |
1491 | ) |
|
1493 | ) | |
1492 | % (uipathfn(old), uipathfn(new), score * 100) |
|
1494 | % (uipathfn(old), uipathfn(new), score * 100) | |
1493 | ) |
|
1495 | ) | |
1494 | renames[new] = old |
|
1496 | renames[new] = old | |
1495 | return renames |
|
1497 | return renames | |
1496 |
|
1498 | |||
1497 |
|
1499 | |||
1498 | def _markchanges(repo, unknown, deleted, renames) -> None: |
|
1500 | def _markchanges(repo, unknown, deleted, renames) -> None: | |
1499 | """Marks the files in unknown as added, the files in deleted as removed, |
|
1501 | """Marks the files in unknown as added, the files in deleted as removed, | |
1500 | and the files in renames as copied.""" |
|
1502 | and the files in renames as copied.""" | |
1501 | wctx = repo[None] |
|
1503 | wctx = repo[None] | |
1502 | with repo.wlock(): |
|
1504 | with repo.wlock(): | |
1503 | wctx.forget(deleted) |
|
1505 | wctx.forget(deleted) | |
1504 | wctx.add(unknown) |
|
1506 | wctx.add(unknown) | |
1505 | for new, old in renames.items(): |
|
1507 | for new, old in renames.items(): | |
1506 | wctx.copy(old, new) |
|
1508 | wctx.copy(old, new) | |
1507 |
|
1509 | |||
1508 |
|
1510 | |||
1509 | def getrenamedfn(repo, endrev=None): |
|
1511 | def getrenamedfn(repo, endrev=None): | |
1510 | if copiesmod.usechangesetcentricalgo(repo): |
|
1512 | if copiesmod.usechangesetcentricalgo(repo): | |
1511 |
|
1513 | |||
1512 | def getrenamed(fn, rev): |
|
1514 | def getrenamed(fn, rev): | |
1513 | ctx = repo[rev] |
|
1515 | ctx = repo[rev] | |
1514 | p1copies = ctx.p1copies() |
|
1516 | p1copies = ctx.p1copies() | |
1515 | if fn in p1copies: |
|
1517 | if fn in p1copies: | |
1516 | return p1copies[fn] |
|
1518 | return p1copies[fn] | |
1517 | p2copies = ctx.p2copies() |
|
1519 | p2copies = ctx.p2copies() | |
1518 | if fn in p2copies: |
|
1520 | if fn in p2copies: | |
1519 | return p2copies[fn] |
|
1521 | return p2copies[fn] | |
1520 | return None |
|
1522 | return None | |
1521 |
|
1523 | |||
1522 | return getrenamed |
|
1524 | return getrenamed | |
1523 |
|
1525 | |||
1524 | rcache = {} |
|
1526 | rcache = {} | |
1525 | if endrev is None: |
|
1527 | if endrev is None: | |
1526 | endrev = len(repo) |
|
1528 | endrev = len(repo) | |
1527 |
|
1529 | |||
1528 | def getrenamed(fn, rev): |
|
1530 | def getrenamed(fn, rev): | |
1529 | """looks up all renames for a file (up to endrev) the first |
|
1531 | """looks up all renames for a file (up to endrev) the first | |
1530 | time the file is given. It indexes on the changerev and only |
|
1532 | time the file is given. It indexes on the changerev and only | |
1531 | parses the manifest if linkrev != changerev. |
|
1533 | parses the manifest if linkrev != changerev. | |
1532 | Returns rename info for fn at changerev rev.""" |
|
1534 | Returns rename info for fn at changerev rev.""" | |
1533 | if fn not in rcache: |
|
1535 | if fn not in rcache: | |
1534 | rcache[fn] = {} |
|
1536 | rcache[fn] = {} | |
1535 | fl = repo.file(fn) |
|
1537 | fl = repo.file(fn) | |
1536 | for i in fl: |
|
1538 | for i in fl: | |
1537 | lr = fl.linkrev(i) |
|
1539 | lr = fl.linkrev(i) | |
1538 | renamed = fl.renamed(fl.node(i)) |
|
1540 | renamed = fl.renamed(fl.node(i)) | |
1539 | rcache[fn][lr] = renamed and renamed[0] |
|
1541 | rcache[fn][lr] = renamed and renamed[0] | |
1540 | if lr >= endrev: |
|
1542 | if lr >= endrev: | |
1541 | break |
|
1543 | break | |
1542 | if rev in rcache[fn]: |
|
1544 | if rev in rcache[fn]: | |
1543 | return rcache[fn][rev] |
|
1545 | return rcache[fn][rev] | |
1544 |
|
1546 | |||
1545 | # If linkrev != rev (i.e. rev not found in rcache) fallback to |
|
1547 | # If linkrev != rev (i.e. rev not found in rcache) fallback to | |
1546 | # filectx logic. |
|
1548 | # filectx logic. | |
1547 | try: |
|
1549 | try: | |
1548 | return repo[rev][fn].copysource() |
|
1550 | return repo[rev][fn].copysource() | |
1549 | except error.LookupError: |
|
1551 | except error.LookupError: | |
1550 | return None |
|
1552 | return None | |
1551 |
|
1553 | |||
1552 | return getrenamed |
|
1554 | return getrenamed | |
1553 |
|
1555 | |||
1554 |
|
1556 | |||
1555 | def getcopiesfn(repo, endrev=None): |
|
1557 | def getcopiesfn(repo, endrev=None): | |
1556 | if copiesmod.usechangesetcentricalgo(repo): |
|
1558 | if copiesmod.usechangesetcentricalgo(repo): | |
1557 |
|
1559 | |||
1558 | def copiesfn(ctx): |
|
1560 | def copiesfn(ctx): | |
1559 | if ctx.p2copies(): |
|
1561 | if ctx.p2copies(): | |
1560 | allcopies = ctx.p1copies().copy() |
|
1562 | allcopies = ctx.p1copies().copy() | |
1561 | # There should be no overlap |
|
1563 | # There should be no overlap | |
1562 | allcopies.update(ctx.p2copies()) |
|
1564 | allcopies.update(ctx.p2copies()) | |
1563 | return sorted(allcopies.items()) |
|
1565 | return sorted(allcopies.items()) | |
1564 | else: |
|
1566 | else: | |
1565 | return sorted(ctx.p1copies().items()) |
|
1567 | return sorted(ctx.p1copies().items()) | |
1566 |
|
1568 | |||
1567 | else: |
|
1569 | else: | |
1568 | getrenamed = getrenamedfn(repo, endrev) |
|
1570 | getrenamed = getrenamedfn(repo, endrev) | |
1569 |
|
1571 | |||
1570 | def copiesfn(ctx): |
|
1572 | def copiesfn(ctx): | |
1571 | copies = [] |
|
1573 | copies = [] | |
1572 | for fn in ctx.files(): |
|
1574 | for fn in ctx.files(): | |
1573 | rename = getrenamed(fn, ctx.rev()) |
|
1575 | rename = getrenamed(fn, ctx.rev()) | |
1574 | if rename: |
|
1576 | if rename: | |
1575 | copies.append((fn, rename)) |
|
1577 | copies.append((fn, rename)) | |
1576 | return copies |
|
1578 | return copies | |
1577 |
|
1579 | |||
1578 | return copiesfn |
|
1580 | return copiesfn | |
1579 |
|
1581 | |||
1580 |
|
1582 | |||
1581 | def dirstatecopy( |
|
1583 | def dirstatecopy( | |
1582 | ui: "uimod.ui", |
|
1584 | ui: "uimod.ui", | |
1583 | repo, |
|
1585 | repo, | |
1584 | wctx, |
|
1586 | wctx, | |
1585 | src, |
|
1587 | src, | |
1586 | dst, |
|
1588 | dst, | |
1587 | dryrun: bool = False, |
|
1589 | dryrun: bool = False, | |
1588 | cwd: Optional[bytes] = None, |
|
1590 | cwd: Optional[bytes] = None, | |
1589 | ) -> None: |
|
1591 | ) -> None: | |
1590 | """Update the dirstate to reflect the intent of copying src to dst. For |
|
1592 | """Update the dirstate to reflect the intent of copying src to dst. For | |
1591 | different reasons it might not end with dst being marked as copied from src. |
|
1593 | different reasons it might not end with dst being marked as copied from src. | |
1592 | """ |
|
1594 | """ | |
1593 | origsrc = repo.dirstate.copied(src) or src |
|
1595 | origsrc = repo.dirstate.copied(src) or src | |
1594 | if dst == origsrc: # copying back a copy? |
|
1596 | if dst == origsrc: # copying back a copy? | |
1595 | entry = repo.dirstate.get_entry(dst) |
|
1597 | entry = repo.dirstate.get_entry(dst) | |
1596 | if (entry.added or not entry.tracked) and not dryrun: |
|
1598 | if (entry.added or not entry.tracked) and not dryrun: | |
1597 | repo.dirstate.set_tracked(dst) |
|
1599 | repo.dirstate.set_tracked(dst) | |
1598 | else: |
|
1600 | else: | |
1599 | if repo.dirstate.get_entry(origsrc).added and origsrc == src: |
|
1601 | if repo.dirstate.get_entry(origsrc).added and origsrc == src: | |
1600 | if not ui.quiet: |
|
1602 | if not ui.quiet: | |
1601 | ui.warn( |
|
1603 | ui.warn( | |
1602 | _( |
|
1604 | _( | |
1603 | b"%s has not been committed yet, so no copy " |
|
1605 | b"%s has not been committed yet, so no copy " | |
1604 | b"data will be stored for %s.\n" |
|
1606 | b"data will be stored for %s.\n" | |
1605 | ) |
|
1607 | ) | |
1606 | % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)) |
|
1608 | % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)) | |
1607 | ) |
|
1609 | ) | |
1608 | if not repo.dirstate.get_entry(dst).tracked and not dryrun: |
|
1610 | if not repo.dirstate.get_entry(dst).tracked and not dryrun: | |
1609 | wctx.add([dst]) |
|
1611 | wctx.add([dst]) | |
1610 | elif not dryrun: |
|
1612 | elif not dryrun: | |
1611 | wctx.copy(origsrc, dst) |
|
1613 | wctx.copy(origsrc, dst) | |
1612 |
|
1614 | |||
1613 |
|
1615 | |||
1614 | def movedirstate(repo, newctx, match=None) -> None: |
|
1616 | def movedirstate(repo, newctx, match=None) -> None: | |
1615 | """Move the dirstate to newctx and adjust it as necessary. |
|
1617 | """Move the dirstate to newctx and adjust it as necessary. | |
1616 |
|
1618 | |||
1617 | A matcher can be provided as an optimization. It is probably a bug to pass |
|
1619 | A matcher can be provided as an optimization. It is probably a bug to pass | |
1618 | a matcher that doesn't match all the differences between the parent of the |
|
1620 | a matcher that doesn't match all the differences between the parent of the | |
1619 | working copy and newctx. |
|
1621 | working copy and newctx. | |
1620 | """ |
|
1622 | """ | |
1621 | oldctx = repo[b'.'] |
|
1623 | oldctx = repo[b'.'] | |
1622 | ds = repo.dirstate |
|
1624 | ds = repo.dirstate | |
1623 | copies = dict(ds.copies()) |
|
1625 | copies = dict(ds.copies()) | |
1624 | ds.setparents(newctx.node(), repo.nullid) |
|
1626 | ds.setparents(newctx.node(), repo.nullid) | |
1625 | s = newctx.status(oldctx, match=match) |
|
1627 | s = newctx.status(oldctx, match=match) | |
1626 |
|
1628 | |||
1627 | for f in s.modified: |
|
1629 | for f in s.modified: | |
1628 | ds.update_file_p1(f, p1_tracked=True) |
|
1630 | ds.update_file_p1(f, p1_tracked=True) | |
1629 |
|
1631 | |||
1630 | for f in s.added: |
|
1632 | for f in s.added: | |
1631 | ds.update_file_p1(f, p1_tracked=False) |
|
1633 | ds.update_file_p1(f, p1_tracked=False) | |
1632 |
|
1634 | |||
1633 | for f in s.removed: |
|
1635 | for f in s.removed: | |
1634 | ds.update_file_p1(f, p1_tracked=True) |
|
1636 | ds.update_file_p1(f, p1_tracked=True) | |
1635 |
|
1637 | |||
1636 | # Merge old parent and old working dir copies |
|
1638 | # Merge old parent and old working dir copies | |
1637 | oldcopies = copiesmod.pathcopies(newctx, oldctx, match) |
|
1639 | oldcopies = copiesmod.pathcopies(newctx, oldctx, match) | |
1638 | oldcopies.update(copies) |
|
1640 | oldcopies.update(copies) | |
1639 | copies = {dst: oldcopies.get(src, src) for dst, src in oldcopies.items()} |
|
1641 | copies = {dst: oldcopies.get(src, src) for dst, src in oldcopies.items()} | |
1640 | # Adjust the dirstate copies |
|
1642 | # Adjust the dirstate copies | |
1641 | for dst, src in copies.items(): |
|
1643 | for dst, src in copies.items(): | |
1642 | if src not in newctx or dst in newctx or not ds.get_entry(dst).added: |
|
1644 | if src not in newctx or dst in newctx or not ds.get_entry(dst).added: | |
1643 | src = None |
|
1645 | src = None | |
1644 | ds.copy(src, dst) |
|
1646 | ds.copy(src, dst) | |
1645 | repo._quick_access_changeid_invalidate() |
|
1647 | repo._quick_access_changeid_invalidate() | |
1646 |
|
1648 | |||
1647 |
|
1649 | |||
1648 | def filterrequirements(requirements): |
|
1650 | def filterrequirements(requirements): | |
1649 | """filters the requirements into two sets: |
|
1651 | """filters the requirements into two sets: | |
1650 |
|
1652 | |||
1651 | wcreq: requirements which should be written in .hg/requires |
|
1653 | wcreq: requirements which should be written in .hg/requires | |
1652 | storereq: which should be written in .hg/store/requires |
|
1654 | storereq: which should be written in .hg/store/requires | |
1653 |
|
1655 | |||
1654 | Returns (wcreq, storereq) |
|
1656 | Returns (wcreq, storereq) | |
1655 | """ |
|
1657 | """ | |
1656 | if requirementsmod.SHARESAFE_REQUIREMENT in requirements: |
|
1658 | if requirementsmod.SHARESAFE_REQUIREMENT in requirements: | |
1657 | wc, store = set(), set() |
|
1659 | wc, store = set(), set() | |
1658 | for r in requirements: |
|
1660 | for r in requirements: | |
1659 | if r in requirementsmod.WORKING_DIR_REQUIREMENTS: |
|
1661 | if r in requirementsmod.WORKING_DIR_REQUIREMENTS: | |
1660 | wc.add(r) |
|
1662 | wc.add(r) | |
1661 | else: |
|
1663 | else: | |
1662 | store.add(r) |
|
1664 | store.add(r) | |
1663 | return wc, store |
|
1665 | return wc, store | |
1664 | return requirements, None |
|
1666 | return requirements, None | |
1665 |
|
1667 | |||
1666 |
|
1668 | |||
1667 | def istreemanifest(repo) -> bool: |
|
1669 | def istreemanifest(repo) -> bool: | |
1668 | """returns whether the repository is using treemanifest or not""" |
|
1670 | """returns whether the repository is using treemanifest or not""" | |
1669 | return requirementsmod.TREEMANIFEST_REQUIREMENT in repo.requirements |
|
1671 | return requirementsmod.TREEMANIFEST_REQUIREMENT in repo.requirements | |
1670 |
|
1672 | |||
1671 |
|
1673 | |||
1672 | def writereporequirements(repo, requirements=None, maywritestore=True) -> None: |
|
1674 | def writereporequirements(repo, requirements=None, maywritestore=True) -> None: | |
1673 | """writes requirements for the repo |
|
1675 | """writes requirements for the repo | |
1674 |
|
1676 | |||
1675 | Requirements are written to .hg/requires and .hg/store/requires based |
|
1677 | Requirements are written to .hg/requires and .hg/store/requires based | |
1676 | on whether share-safe mode is enabled and which requirements are wdir |
|
1678 | on whether share-safe mode is enabled and which requirements are wdir | |
1677 | requirements and which are store requirements |
|
1679 | requirements and which are store requirements | |
1678 | """ |
|
1680 | """ | |
1679 | if requirements: |
|
1681 | if requirements: | |
1680 | repo.requirements = requirements |
|
1682 | repo.requirements = requirements | |
1681 | wcreq, storereq = filterrequirements(repo.requirements) |
|
1683 | wcreq, storereq = filterrequirements(repo.requirements) | |
1682 | if wcreq is not None: |
|
1684 | if wcreq is not None: | |
1683 | writerequires(repo.vfs, wcreq) |
|
1685 | writerequires(repo.vfs, wcreq) | |
1684 | if storereq is not None: |
|
1686 | if storereq is not None: | |
1685 | writerequires(repo.svfs, storereq, maywrite=maywritestore) |
|
1687 | writerequires(repo.svfs, storereq, maywrite=maywritestore) | |
1686 | elif repo.ui.configbool(b'format', b'usestore'): |
|
1688 | elif repo.ui.configbool(b'format', b'usestore'): | |
1687 | # only remove store requires if we are using store |
|
1689 | # only remove store requires if we are using store | |
1688 | if maywritestore: |
|
1690 | if maywritestore: | |
1689 | repo.svfs.tryunlink(b'requires') |
|
1691 | repo.svfs.tryunlink(b'requires') | |
1690 |
|
1692 | |||
1691 |
|
1693 | |||
1692 | def readrequires(vfs, allowmissing): |
|
1694 | def readrequires(vfs, allowmissing): | |
1693 | """reads the require file present at root of this vfs |
|
1695 | """reads the require file present at root of this vfs | |
1694 | and return a set of requirements |
|
1696 | and return a set of requirements | |
1695 |
|
1697 | |||
1696 | If allowmissing is True, we suppress FileNotFoundError if raised""" |
|
1698 | If allowmissing is True, we suppress FileNotFoundError if raised""" | |
1697 | # requires file contains a newline-delimited list of |
|
1699 | # requires file contains a newline-delimited list of | |
1698 | # features/capabilities the opener (us) must have in order to use |
|
1700 | # features/capabilities the opener (us) must have in order to use | |
1699 | # the repository. This file was introduced in Mercurial 0.9.2, |
|
1701 | # the repository. This file was introduced in Mercurial 0.9.2, | |
1700 | # which means very old repositories may not have one. We assume |
|
1702 | # which means very old repositories may not have one. We assume | |
1701 | # a missing file translates to no requirements. |
|
1703 | # a missing file translates to no requirements. | |
1702 | read = vfs.tryread if allowmissing else vfs.read |
|
1704 | read = vfs.tryread if allowmissing else vfs.read | |
1703 | return set(read(b'requires').splitlines()) |
|
1705 | return set(read(b'requires').splitlines()) | |
1704 |
|
1706 | |||
1705 |
|
1707 | |||
1706 | def writerequires(opener, requirements, maywrite=True) -> None: |
|
1708 | def writerequires(opener, requirements, maywrite=True) -> None: | |
1707 | on_disk = readrequires(opener, True) |
|
1709 | on_disk = readrequires(opener, True) | |
1708 | if not (on_disk == set(requirements)): |
|
1710 | if not (on_disk == set(requirements)): | |
1709 | if not maywrite: |
|
1711 | if not maywrite: | |
1710 | raise error.Abort(_(b"store requirements are not as expected")) |
|
1712 | raise error.Abort(_(b"store requirements are not as expected")) | |
1711 | with opener(b'requires', b'w', atomictemp=True) as fp: |
|
1713 | with opener(b'requires', b'w', atomictemp=True) as fp: | |
1712 | for r in sorted(requirements): |
|
1714 | for r in sorted(requirements): | |
1713 | fp.write(b"%s\n" % r) |
|
1715 | fp.write(b"%s\n" % r) | |
1714 |
|
1716 | |||
1715 |
|
1717 | |||
1716 | class filecachesubentry: |
|
1718 | class filecachesubentry: | |
1717 | _cacheable: Optional[bool] = None |
|
1719 | _cacheable: Optional[bool] = None | |
1718 |
|
1720 | |||
1719 | def __init__(self, path, stat: bool): |
|
1721 | def __init__(self, path, stat: bool): | |
1720 | self.path = path |
|
1722 | self.path = path | |
1721 | self.cachestat = None |
|
1723 | self.cachestat = None | |
1722 | self._cacheable = None |
|
1724 | self._cacheable = None | |
1723 |
|
1725 | |||
1724 | if stat: |
|
1726 | if stat: | |
1725 | self.cachestat = filecachesubentry.stat(self.path) |
|
1727 | self.cachestat = filecachesubentry.stat(self.path) | |
1726 |
|
1728 | |||
1727 | if self.cachestat: |
|
1729 | if self.cachestat: | |
1728 | self._cacheable = self.cachestat.cacheable() |
|
1730 | self._cacheable = self.cachestat.cacheable() | |
1729 | else: |
|
1731 | else: | |
1730 | # None means we don't know yet |
|
1732 | # None means we don't know yet | |
1731 | self._cacheable = None |
|
1733 | self._cacheable = None | |
1732 |
|
1734 | |||
1733 | def refresh(self) -> None: |
|
1735 | def refresh(self) -> None: | |
1734 | if self.cacheable(): |
|
1736 | if self.cacheable(): | |
1735 | self.cachestat = filecachesubentry.stat(self.path) |
|
1737 | self.cachestat = filecachesubentry.stat(self.path) | |
1736 |
|
1738 | |||
1737 | def cacheable(self) -> bool: |
|
1739 | def cacheable(self) -> bool: | |
1738 | if self._cacheable is not None: |
|
1740 | if self._cacheable is not None: | |
1739 | return self._cacheable |
|
1741 | return self._cacheable | |
1740 |
|
1742 | |||
1741 | # we don't know yet, assume it is for now |
|
1743 | # we don't know yet, assume it is for now | |
1742 | return True |
|
1744 | return True | |
1743 |
|
1745 | |||
1744 | def changed(self) -> bool: |
|
1746 | def changed(self) -> bool: | |
1745 | # no point in going further if we can't cache it |
|
1747 | # no point in going further if we can't cache it | |
1746 | if not self.cacheable(): |
|
1748 | if not self.cacheable(): | |
1747 | return True |
|
1749 | return True | |
1748 |
|
1750 | |||
1749 | newstat = filecachesubentry.stat(self.path) |
|
1751 | newstat = filecachesubentry.stat(self.path) | |
1750 |
|
1752 | |||
1751 | # we may not know if it's cacheable yet, check again now |
|
1753 | # we may not know if it's cacheable yet, check again now | |
1752 | if newstat and self._cacheable is None: |
|
1754 | if newstat and self._cacheable is None: | |
1753 | self._cacheable = newstat.cacheable() |
|
1755 | self._cacheable = newstat.cacheable() | |
1754 |
|
1756 | |||
1755 | # check again |
|
1757 | # check again | |
1756 | if not self._cacheable: |
|
1758 | if not self._cacheable: | |
1757 | return True |
|
1759 | return True | |
1758 |
|
1760 | |||
1759 | if self.cachestat != newstat: |
|
1761 | if self.cachestat != newstat: | |
1760 | self.cachestat = newstat |
|
1762 | self.cachestat = newstat | |
1761 | return True |
|
1763 | return True | |
1762 | else: |
|
1764 | else: | |
1763 | return False |
|
1765 | return False | |
1764 |
|
1766 | |||
1765 | @staticmethod |
|
1767 | @staticmethod | |
1766 | def stat(path: bytes) -> Optional[typelib.CacheStat]: |
|
1768 | def stat(path: bytes) -> Optional[typelib.CacheStat]: | |
1767 | # TODO have a cleaner approach on httpstaticrepo side |
|
1769 | # TODO have a cleaner approach on httpstaticrepo side | |
1768 | if path.startswith(b'https://') or path.startswith(b'http://'): |
|
1770 | if path.startswith(b'https://') or path.startswith(b'http://'): | |
1769 | return util.uncacheable_cachestat() |
|
1771 | return util.uncacheable_cachestat() | |
1770 | try: |
|
1772 | try: | |
1771 | return util.cachestat(path) |
|
1773 | return util.cachestat(path) | |
1772 | except FileNotFoundError: |
|
1774 | except FileNotFoundError: | |
1773 | return None |
|
1775 | return None | |
1774 |
|
1776 | |||
1775 |
|
1777 | |||
1776 | class filecacheentry: |
|
1778 | class filecacheentry: | |
1777 | def __init__(self, paths, stat: bool = True) -> None: |
|
1779 | def __init__(self, paths, stat: bool = True) -> None: | |
1778 | self._entries = [] |
|
1780 | self._entries = [] | |
1779 | for path in paths: |
|
1781 | for path in paths: | |
1780 | self._entries.append(filecachesubentry(path, stat)) |
|
1782 | self._entries.append(filecachesubentry(path, stat)) | |
1781 |
|
1783 | |||
1782 | def changed(self) -> bool: |
|
1784 | def changed(self) -> bool: | |
1783 | '''true if any entry has changed''' |
|
1785 | '''true if any entry has changed''' | |
1784 | for entry in self._entries: |
|
1786 | for entry in self._entries: | |
1785 | if entry.changed(): |
|
1787 | if entry.changed(): | |
1786 | return True |
|
1788 | return True | |
1787 | return False |
|
1789 | return False | |
1788 |
|
1790 | |||
1789 | def refresh(self) -> None: |
|
1791 | def refresh(self) -> None: | |
1790 | for entry in self._entries: |
|
1792 | for entry in self._entries: | |
1791 | entry.refresh() |
|
1793 | entry.refresh() | |
1792 |
|
1794 | |||
1793 |
|
1795 | |||
1794 | class filecache: |
|
1796 | class filecache: | |
1795 | """A property like decorator that tracks files under .hg/ for updates. |
|
1797 | """A property like decorator that tracks files under .hg/ for updates. | |
1796 |
|
1798 | |||
1797 | On first access, the files defined as arguments are stat()ed and the |
|
1799 | On first access, the files defined as arguments are stat()ed and the | |
1798 | results cached. The decorated function is called. The results are stashed |
|
1800 | results cached. The decorated function is called. The results are stashed | |
1799 | away in a ``_filecache`` dict on the object whose method is decorated. |
|
1801 | away in a ``_filecache`` dict on the object whose method is decorated. | |
1800 |
|
1802 | |||
1801 | On subsequent access, the cached result is used as it is set to the |
|
1803 | On subsequent access, the cached result is used as it is set to the | |
1802 | instance dictionary. |
|
1804 | instance dictionary. | |
1803 |
|
1805 | |||
1804 | On external property set/delete operations, the caller must update the |
|
1806 | On external property set/delete operations, the caller must update the | |
1805 | corresponding _filecache entry appropriately. Use __class__.<attr>.set() |
|
1807 | corresponding _filecache entry appropriately. Use __class__.<attr>.set() | |
1806 | instead of directly setting <attr>. |
|
1808 | instead of directly setting <attr>. | |
1807 |
|
1809 | |||
1808 | When using the property API, the cached data is always used if available. |
|
1810 | When using the property API, the cached data is always used if available. | |
1809 | No stat() is performed to check if the file has changed. |
|
1811 | No stat() is performed to check if the file has changed. | |
1810 |
|
1812 | |||
1811 | Others can muck about with the state of the ``_filecache`` dict. e.g. they |
|
1813 | Others can muck about with the state of the ``_filecache`` dict. e.g. they | |
1812 | can populate an entry before the property's getter is called. In this case, |
|
1814 | can populate an entry before the property's getter is called. In this case, | |
1813 | entries in ``_filecache`` will be used during property operations, |
|
1815 | entries in ``_filecache`` will be used during property operations, | |
1814 | if available. If the underlying file changes, it is up to external callers |
|
1816 | if available. If the underlying file changes, it is up to external callers | |
1815 | to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached |
|
1817 | to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached | |
1816 | method result as well as possibly calling ``del obj._filecache[attr]`` to |
|
1818 | method result as well as possibly calling ``del obj._filecache[attr]`` to | |
1817 | remove the ``filecacheentry``. |
|
1819 | remove the ``filecacheentry``. | |
1818 | """ |
|
1820 | """ | |
1819 |
|
1821 | |||
1820 | paths: Tuple[bytes, ...] |
|
1822 | paths: Tuple[bytes, ...] | |
1821 |
|
1823 | |||
1822 | def __init__(self, *paths: bytes) -> None: |
|
1824 | def __init__(self, *paths: bytes) -> None: | |
1823 | self.paths = paths |
|
1825 | self.paths = paths | |
1824 |
|
1826 | |||
1825 | def tracked_paths(self, obj): |
|
1827 | def tracked_paths(self, obj): | |
1826 | return [self.join(obj, path) for path in self.paths] |
|
1828 | return [self.join(obj, path) for path in self.paths] | |
1827 |
|
1829 | |||
1828 | def join(self, obj, fname: bytes): |
|
1830 | def join(self, obj, fname: bytes): | |
1829 | """Used to compute the runtime path of a cached file. |
|
1831 | """Used to compute the runtime path of a cached file. | |
1830 |
|
1832 | |||
1831 | Users should subclass filecache and provide their own version of this |
|
1833 | Users should subclass filecache and provide their own version of this | |
1832 | function to call the appropriate join function on 'obj' (an instance |
|
1834 | function to call the appropriate join function on 'obj' (an instance | |
1833 | of the class that its member function was decorated). |
|
1835 | of the class that its member function was decorated). | |
1834 | """ |
|
1836 | """ | |
1835 | raise NotImplementedError |
|
1837 | raise NotImplementedError | |
1836 |
|
1838 | |||
1837 | def __call__(self, func): |
|
1839 | def __call__(self, func): | |
1838 | self.func = func |
|
1840 | self.func = func | |
1839 | self.sname = func.__name__ |
|
1841 | self.sname = func.__name__ | |
1840 | # XXX We should be using a unicode string instead of bytes for the main |
|
1842 | # XXX We should be using a unicode string instead of bytes for the main | |
1841 | # name (and the _filecache key). The fact we use bytes is a remains |
|
1843 | # name (and the _filecache key). The fact we use bytes is a remains | |
1842 | # from Python2, since the name is derived from an attribute name a |
|
1844 | # from Python2, since the name is derived from an attribute name a | |
1843 | # `str` is a better fit now that we support Python3 only |
|
1845 | # `str` is a better fit now that we support Python3 only | |
1844 | self.name = pycompat.sysbytes(self.sname) |
|
1846 | self.name = pycompat.sysbytes(self.sname) | |
1845 | return self |
|
1847 | return self | |
1846 |
|
1848 | |||
1847 | def __get__(self, obj, type=None): |
|
1849 | def __get__(self, obj, type=None): | |
1848 | # if accessed on the class, return the descriptor itself. |
|
1850 | # if accessed on the class, return the descriptor itself. | |
1849 | if obj is None: |
|
1851 | if obj is None: | |
1850 | return self |
|
1852 | return self | |
1851 |
|
1853 | |||
1852 | assert self.sname not in obj.__dict__ |
|
1854 | assert self.sname not in obj.__dict__ | |
1853 |
|
1855 | |||
1854 | entry = obj._filecache.get(self.name) |
|
1856 | entry = obj._filecache.get(self.name) | |
1855 |
|
1857 | |||
1856 | if entry: |
|
1858 | if entry: | |
1857 | if entry.changed(): |
|
1859 | if entry.changed(): | |
1858 | entry.obj = self.func(obj) |
|
1860 | entry.obj = self.func(obj) | |
1859 | else: |
|
1861 | else: | |
1860 | paths = self.tracked_paths(obj) |
|
1862 | paths = self.tracked_paths(obj) | |
1861 |
|
1863 | |||
1862 | # We stat -before- creating the object so our cache doesn't lie if |
|
1864 | # We stat -before- creating the object so our cache doesn't lie if | |
1863 | # a writer modified between the time we read and stat |
|
1865 | # a writer modified between the time we read and stat | |
1864 | entry = filecacheentry(paths, True) |
|
1866 | entry = filecacheentry(paths, True) | |
1865 | entry.obj = self.func(obj) |
|
1867 | entry.obj = self.func(obj) | |
1866 |
|
1868 | |||
1867 | obj._filecache[self.name] = entry |
|
1869 | obj._filecache[self.name] = entry | |
1868 |
|
1870 | |||
1869 | obj.__dict__[self.sname] = entry.obj |
|
1871 | obj.__dict__[self.sname] = entry.obj | |
1870 | return entry.obj |
|
1872 | return entry.obj | |
1871 |
|
1873 | |||
1872 | # don't implement __set__(), which would make __dict__ lookup as slow as |
|
1874 | # don't implement __set__(), which would make __dict__ lookup as slow as | |
1873 | # function call. |
|
1875 | # function call. | |
1874 |
|
1876 | |||
1875 | def set(self, obj, value): |
|
1877 | def set(self, obj, value): | |
1876 | if self.name not in obj._filecache: |
|
1878 | if self.name not in obj._filecache: | |
1877 | # we add an entry for the missing value because X in __dict__ |
|
1879 | # we add an entry for the missing value because X in __dict__ | |
1878 | # implies X in _filecache |
|
1880 | # implies X in _filecache | |
1879 | paths = self.tracked_paths(obj) |
|
1881 | paths = self.tracked_paths(obj) | |
1880 | ce = filecacheentry(paths, False) |
|
1882 | ce = filecacheentry(paths, False) | |
1881 | obj._filecache[self.name] = ce |
|
1883 | obj._filecache[self.name] = ce | |
1882 | else: |
|
1884 | else: | |
1883 | ce = obj._filecache[self.name] |
|
1885 | ce = obj._filecache[self.name] | |
1884 |
|
1886 | |||
1885 | ce.obj = value # update cached copy |
|
1887 | ce.obj = value # update cached copy | |
1886 | obj.__dict__[self.sname] = value # update copy returned by obj.x |
|
1888 | obj.__dict__[self.sname] = value # update copy returned by obj.x | |
1887 |
|
1889 | |||
1888 |
|
1890 | |||
1889 | def extdatasource(repo, source: bytes): |
|
1891 | def extdatasource(repo, source: bytes): | |
1890 | """Gather a map of rev -> value dict from the specified source |
|
1892 | """Gather a map of rev -> value dict from the specified source | |
1891 |
|
1893 | |||
1892 | A source spec is treated as a URL, with a special case shell: type |
|
1894 | A source spec is treated as a URL, with a special case shell: type | |
1893 | for parsing the output from a shell command. |
|
1895 | for parsing the output from a shell command. | |
1894 |
|
1896 | |||
1895 | The data is parsed as a series of newline-separated records where |
|
1897 | The data is parsed as a series of newline-separated records where | |
1896 | each record is a revision specifier optionally followed by a space |
|
1898 | each record is a revision specifier optionally followed by a space | |
1897 | and a freeform string value. If the revision is known locally, it |
|
1899 | and a freeform string value. If the revision is known locally, it | |
1898 | is converted to a rev, otherwise the record is skipped. |
|
1900 | is converted to a rev, otherwise the record is skipped. | |
1899 |
|
1901 | |||
1900 | Note that both key and value are treated as UTF-8 and converted to |
|
1902 | Note that both key and value are treated as UTF-8 and converted to | |
1901 | the local encoding. This allows uniformity between local and |
|
1903 | the local encoding. This allows uniformity between local and | |
1902 | remote data sources. |
|
1904 | remote data sources. | |
1903 | """ |
|
1905 | """ | |
1904 |
|
1906 | |||
1905 | spec = repo.ui.config(b"extdata", source) |
|
1907 | spec = repo.ui.config(b"extdata", source) | |
1906 | if not spec: |
|
1908 | if not spec: | |
1907 | raise error.Abort(_(b"unknown extdata source '%s'") % source) |
|
1909 | raise error.Abort(_(b"unknown extdata source '%s'") % source) | |
1908 |
|
1910 | |||
1909 | data = {} |
|
1911 | data = {} | |
1910 | src = proc = None |
|
1912 | src = proc = None | |
1911 | try: |
|
1913 | try: | |
1912 | if spec.startswith(b"shell:"): |
|
1914 | if spec.startswith(b"shell:"): | |
1913 | # external commands should be run relative to the repo root |
|
1915 | # external commands should be run relative to the repo root | |
1914 | cmd = spec[6:] |
|
1916 | cmd = spec[6:] | |
1915 | proc = subprocess.Popen( |
|
1917 | proc = subprocess.Popen( | |
1916 | procutil.tonativestr(cmd), |
|
1918 | procutil.tonativestr(cmd), | |
1917 | shell=True, |
|
1919 | shell=True, | |
1918 | bufsize=-1, |
|
1920 | bufsize=-1, | |
1919 | close_fds=procutil.closefds, |
|
1921 | close_fds=procutil.closefds, | |
1920 | stdout=subprocess.PIPE, |
|
1922 | stdout=subprocess.PIPE, | |
1921 | cwd=procutil.tonativestr(repo.root), |
|
1923 | cwd=procutil.tonativestr(repo.root), | |
1922 | ) |
|
1924 | ) | |
1923 | src = proc.stdout |
|
1925 | src = proc.stdout | |
1924 | else: |
|
1926 | else: | |
1925 | # treat as a URL or file |
|
1927 | # treat as a URL or file | |
1926 | src = url.open(repo.ui, spec) |
|
1928 | src = url.open(repo.ui, spec) | |
1927 | for l in src: |
|
1929 | for l in src: | |
1928 | if b" " in l: |
|
1930 | if b" " in l: | |
1929 | k, v = l.strip().split(b" ", 1) |
|
1931 | k, v = l.strip().split(b" ", 1) | |
1930 | else: |
|
1932 | else: | |
1931 | k, v = l.strip(), b"" |
|
1933 | k, v = l.strip(), b"" | |
1932 |
|
1934 | |||
1933 | k = encoding.tolocal(k) |
|
1935 | k = encoding.tolocal(k) | |
1934 | try: |
|
1936 | try: | |
1935 | data[revsingle(repo, k).rev()] = encoding.tolocal(v) |
|
1937 | data[revsingle(repo, k).rev()] = encoding.tolocal(v) | |
1936 | except (error.LookupError, error.RepoLookupError, error.InputError): |
|
1938 | except (error.LookupError, error.RepoLookupError, error.InputError): | |
1937 | pass # we ignore data for nodes that don't exist locally |
|
1939 | pass # we ignore data for nodes that don't exist locally | |
1938 | finally: |
|
1940 | finally: | |
1939 | if proc: |
|
1941 | if proc: | |
1940 | try: |
|
1942 | try: | |
1941 | proc.communicate() |
|
1943 | proc.communicate() | |
1942 | except ValueError: |
|
1944 | except ValueError: | |
1943 | # This happens if we started iterating src and then |
|
1945 | # This happens if we started iterating src and then | |
1944 | # get a parse error on a line. It should be safe to ignore. |
|
1946 | # get a parse error on a line. It should be safe to ignore. | |
1945 | pass |
|
1947 | pass | |
1946 | if src: |
|
1948 | if src: | |
1947 | src.close() |
|
1949 | src.close() | |
1948 | if proc and proc.returncode != 0: |
|
1950 | if proc and proc.returncode != 0: | |
1949 | raise error.Abort( |
|
1951 | raise error.Abort( | |
1950 | _(b"extdata command '%s' failed: %s") |
|
1952 | _(b"extdata command '%s' failed: %s") | |
1951 | % (cmd, procutil.explainexit(proc.returncode)) |
|
1953 | % (cmd, procutil.explainexit(proc.returncode)) | |
1952 | ) |
|
1954 | ) | |
1953 |
|
1955 | |||
1954 | return data |
|
1956 | return data | |
1955 |
|
1957 | |||
1956 |
|
1958 | |||
1957 | class progress: |
|
1959 | class progress: | |
1958 | ui: "uimod.ui" |
|
1960 | ui: "uimod.ui" | |
1959 | pos: Optional[int] # None once complete |
|
1961 | pos: Optional[int] # None once complete | |
1960 | topic: bytes |
|
1962 | topic: bytes | |
1961 | unit: bytes |
|
1963 | unit: bytes | |
1962 | total: Optional[int] |
|
1964 | total: Optional[int] | |
1963 | debug: bool |
|
1965 | debug: bool | |
1964 |
|
1966 | |||
1965 | def __init__( |
|
1967 | def __init__( | |
1966 | self, |
|
1968 | self, | |
1967 | ui: "uimod.ui", |
|
1969 | ui: "uimod.ui", | |
1968 | updatebar, |
|
1970 | updatebar, | |
1969 | topic: bytes, |
|
1971 | topic: bytes, | |
1970 | unit: bytes = b"", |
|
1972 | unit: bytes = b"", | |
1971 | total: Optional[int] = None, |
|
1973 | total: Optional[int] = None, | |
1972 | ) -> None: |
|
1974 | ) -> None: | |
1973 | self.ui = ui |
|
1975 | self.ui = ui | |
1974 | self.pos = 0 |
|
1976 | self.pos = 0 | |
1975 | self.topic = topic |
|
1977 | self.topic = topic | |
1976 | self.unit = unit |
|
1978 | self.unit = unit | |
1977 | self.total = total |
|
1979 | self.total = total | |
1978 | self.debug = ui.configbool(b'progress', b'debug') |
|
1980 | self.debug = ui.configbool(b'progress', b'debug') | |
1979 | self._updatebar = updatebar |
|
1981 | self._updatebar = updatebar | |
1980 |
|
1982 | |||
1981 | def __enter__(self): |
|
1983 | def __enter__(self): | |
1982 | return self |
|
1984 | return self | |
1983 |
|
1985 | |||
1984 | def __exit__(self, exc_type, exc_value, exc_tb): |
|
1986 | def __exit__(self, exc_type, exc_value, exc_tb): | |
1985 | self.complete() |
|
1987 | self.complete() | |
1986 |
|
1988 | |||
1987 | def update( |
|
1989 | def update( | |
1988 | self, pos: int, item: bytes = b"", total: Optional[int] = None |
|
1990 | self, pos: int, item: bytes = b"", total: Optional[int] = None | |
1989 | ) -> None: |
|
1991 | ) -> None: | |
1990 | assert pos is not None |
|
1992 | assert pos is not None | |
1991 | if total: |
|
1993 | if total: | |
1992 | self.total = total |
|
1994 | self.total = total | |
1993 | self.pos = pos |
|
1995 | self.pos = pos | |
1994 | self._updatebar(self.topic, self.pos, item, self.unit, self.total) |
|
1996 | self._updatebar(self.topic, self.pos, item, self.unit, self.total) | |
1995 | if self.debug: |
|
1997 | if self.debug: | |
1996 | self._printdebug(item) |
|
1998 | self._printdebug(item) | |
1997 |
|
1999 | |||
1998 | def increment( |
|
2000 | def increment( | |
1999 | self, step: int = 1, item: bytes = b"", total: Optional[int] = None |
|
2001 | self, step: int = 1, item: bytes = b"", total: Optional[int] = None | |
2000 | ) -> None: |
|
2002 | ) -> None: | |
2001 | self.update(self.pos + step, item, total) |
|
2003 | self.update(self.pos + step, item, total) | |
2002 |
|
2004 | |||
2003 | def complete(self) -> None: |
|
2005 | def complete(self) -> None: | |
2004 | self.pos = None |
|
2006 | self.pos = None | |
2005 | self.unit = b"" |
|
2007 | self.unit = b"" | |
2006 | self.total = None |
|
2008 | self.total = None | |
2007 | self._updatebar(self.topic, self.pos, b"", self.unit, self.total) |
|
2009 | self._updatebar(self.topic, self.pos, b"", self.unit, self.total) | |
2008 |
|
2010 | |||
2009 | def _printdebug(self, item: bytes) -> None: |
|
2011 | def _printdebug(self, item: bytes) -> None: | |
2010 | unit = b'' |
|
2012 | unit = b'' | |
2011 | if self.unit: |
|
2013 | if self.unit: | |
2012 | unit = b' ' + self.unit |
|
2014 | unit = b' ' + self.unit | |
2013 | if item: |
|
2015 | if item: | |
2014 | item = b' ' + item |
|
2016 | item = b' ' + item | |
2015 |
|
2017 | |||
2016 | if self.total: |
|
2018 | if self.total: | |
2017 | pct = 100.0 * self.pos / self.total |
|
2019 | pct = 100.0 * self.pos / self.total | |
2018 | self.ui.debug( |
|
2020 | self.ui.debug( | |
2019 | b'%s:%s %d/%d%s (%4.2f%%)\n' |
|
2021 | b'%s:%s %d/%d%s (%4.2f%%)\n' | |
2020 | % (self.topic, item, self.pos, self.total, unit, pct) |
|
2022 | % (self.topic, item, self.pos, self.total, unit, pct) | |
2021 | ) |
|
2023 | ) | |
2022 | else: |
|
2024 | else: | |
2023 | self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit)) |
|
2025 | self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit)) | |
2024 |
|
2026 | |||
2025 |
|
2027 | |||
2026 | def gdinitconfig(ui: "uimod.ui"): |
|
2028 | def gdinitconfig(ui: "uimod.ui"): | |
2027 | """helper function to know if a repo should be created as general delta""" |
|
2029 | """helper function to know if a repo should be created as general delta""" | |
2028 | # experimental config: format.generaldelta |
|
2030 | # experimental config: format.generaldelta | |
2029 | return ui.configbool(b'format', b'generaldelta') or ui.configbool( |
|
2031 | return ui.configbool(b'format', b'generaldelta') or ui.configbool( | |
2030 | b'format', b'usegeneraldelta' |
|
2032 | b'format', b'usegeneraldelta' | |
2031 | ) |
|
2033 | ) | |
2032 |
|
2034 | |||
2033 |
|
2035 | |||
2034 | def gddeltaconfig(ui: "uimod.ui"): |
|
2036 | def gddeltaconfig(ui: "uimod.ui"): | |
2035 | """helper function to know if incoming deltas should be optimized |
|
2037 | """helper function to know if incoming deltas should be optimized | |
2036 |
|
2038 | |||
2037 | The `format.generaldelta` config is an old form of the config that also |
|
2039 | The `format.generaldelta` config is an old form of the config that also | |
2038 | implies that incoming delta-bases should be never be trusted. This function |
|
2040 | implies that incoming delta-bases should be never be trusted. This function | |
2039 | exists for this purpose. |
|
2041 | exists for this purpose. | |
2040 | """ |
|
2042 | """ | |
2041 | # experimental config: format.generaldelta |
|
2043 | # experimental config: format.generaldelta | |
2042 | return ui.configbool(b'format', b'generaldelta') |
|
2044 | return ui.configbool(b'format', b'generaldelta') | |
2043 |
|
2045 | |||
2044 |
|
2046 | |||
2045 | class simplekeyvaluefile: |
|
2047 | class simplekeyvaluefile: | |
2046 | """A simple file with key=value lines |
|
2048 | """A simple file with key=value lines | |
2047 |
|
2049 | |||
2048 | Keys must be alphanumerics and start with a letter, values must not |
|
2050 | Keys must be alphanumerics and start with a letter, values must not | |
2049 | contain '\n' characters""" |
|
2051 | contain '\n' characters""" | |
2050 |
|
2052 | |||
2051 | firstlinekey = b'__firstline' |
|
2053 | firstlinekey = b'__firstline' | |
2052 |
|
2054 | |||
2053 | def __init__(self, vfs, path: bytes, keys=None) -> None: |
|
2055 | def __init__(self, vfs, path: bytes, keys=None) -> None: | |
2054 | self.vfs = vfs |
|
2056 | self.vfs = vfs | |
2055 | self.path = path |
|
2057 | self.path = path | |
2056 |
|
2058 | |||
2057 | def read(self, firstlinenonkeyval: bool = False): |
|
2059 | def read(self, firstlinenonkeyval: bool = False): | |
2058 | """Read the contents of a simple key-value file |
|
2060 | """Read the contents of a simple key-value file | |
2059 |
|
2061 | |||
2060 | 'firstlinenonkeyval' indicates whether the first line of file should |
|
2062 | 'firstlinenonkeyval' indicates whether the first line of file should | |
2061 | be treated as a key-value pair or reuturned fully under the |
|
2063 | be treated as a key-value pair or reuturned fully under the | |
2062 | __firstline key.""" |
|
2064 | __firstline key.""" | |
2063 | lines = self.vfs.readlines(self.path) |
|
2065 | lines = self.vfs.readlines(self.path) | |
2064 | d = {} |
|
2066 | d = {} | |
2065 | if firstlinenonkeyval: |
|
2067 | if firstlinenonkeyval: | |
2066 | if not lines: |
|
2068 | if not lines: | |
2067 | e = _(b"empty simplekeyvalue file") |
|
2069 | e = _(b"empty simplekeyvalue file") | |
2068 | raise error.CorruptedState(e) |
|
2070 | raise error.CorruptedState(e) | |
2069 | # we don't want to include '\n' in the __firstline |
|
2071 | # we don't want to include '\n' in the __firstline | |
2070 | d[self.firstlinekey] = lines[0][:-1] |
|
2072 | d[self.firstlinekey] = lines[0][:-1] | |
2071 | del lines[0] |
|
2073 | del lines[0] | |
2072 |
|
2074 | |||
2073 | try: |
|
2075 | try: | |
2074 | # the 'if line.strip()' part prevents us from failing on empty |
|
2076 | # the 'if line.strip()' part prevents us from failing on empty | |
2075 | # lines which only contain '\n' therefore are not skipped |
|
2077 | # lines which only contain '\n' therefore are not skipped | |
2076 | # by 'if line' |
|
2078 | # by 'if line' | |
2077 | updatedict = dict( |
|
2079 | updatedict = dict( | |
2078 | line[:-1].split(b'=', 1) for line in lines if line.strip() |
|
2080 | line[:-1].split(b'=', 1) for line in lines if line.strip() | |
2079 | ) |
|
2081 | ) | |
2080 | if self.firstlinekey in updatedict: |
|
2082 | if self.firstlinekey in updatedict: | |
2081 | e = _(b"%r can't be used as a key") |
|
2083 | e = _(b"%r can't be used as a key") | |
2082 | raise error.CorruptedState(e % self.firstlinekey) |
|
2084 | raise error.CorruptedState(e % self.firstlinekey) | |
2083 | d.update(updatedict) |
|
2085 | d.update(updatedict) | |
2084 | except ValueError as e: |
|
2086 | except ValueError as e: | |
2085 | raise error.CorruptedState(stringutil.forcebytestr(e)) |
|
2087 | raise error.CorruptedState(stringutil.forcebytestr(e)) | |
2086 | return d |
|
2088 | return d | |
2087 |
|
2089 | |||
2088 | def write(self, data, firstline: Optional[bytes] = None) -> None: |
|
2090 | def write(self, data, firstline: Optional[bytes] = None) -> None: | |
2089 | """Write key=>value mapping to a file |
|
2091 | """Write key=>value mapping to a file | |
2090 | data is a dict. Keys must be alphanumerical and start with a letter. |
|
2092 | data is a dict. Keys must be alphanumerical and start with a letter. | |
2091 | Values must not contain newline characters. |
|
2093 | Values must not contain newline characters. | |
2092 |
|
2094 | |||
2093 | If 'firstline' is not None, it is written to file before |
|
2095 | If 'firstline' is not None, it is written to file before | |
2094 | everything else, as it is, not in a key=value form""" |
|
2096 | everything else, as it is, not in a key=value form""" | |
2095 | lines = [] |
|
2097 | lines = [] | |
2096 | if firstline is not None: |
|
2098 | if firstline is not None: | |
2097 | lines.append(b'%s\n' % firstline) |
|
2099 | lines.append(b'%s\n' % firstline) | |
2098 |
|
2100 | |||
2099 | for k, v in data.items(): |
|
2101 | for k, v in data.items(): | |
2100 | if k == self.firstlinekey: |
|
2102 | if k == self.firstlinekey: | |
2101 | e = b"key name '%s' is reserved" % self.firstlinekey |
|
2103 | e = b"key name '%s' is reserved" % self.firstlinekey | |
2102 | raise error.ProgrammingError(e) |
|
2104 | raise error.ProgrammingError(e) | |
2103 | if not k[0:1].isalpha(): |
|
2105 | if not k[0:1].isalpha(): | |
2104 | e = b"keys must start with a letter in a key-value file" |
|
2106 | e = b"keys must start with a letter in a key-value file" | |
2105 | raise error.ProgrammingError(e) |
|
2107 | raise error.ProgrammingError(e) | |
2106 | if not k.isalnum(): |
|
2108 | if not k.isalnum(): | |
2107 | e = b"invalid key name in a simple key-value file" |
|
2109 | e = b"invalid key name in a simple key-value file" | |
2108 | raise error.ProgrammingError(e) |
|
2110 | raise error.ProgrammingError(e) | |
2109 | if b'\n' in v: |
|
2111 | if b'\n' in v: | |
2110 | e = b"invalid value in a simple key-value file" |
|
2112 | e = b"invalid value in a simple key-value file" | |
2111 | raise error.ProgrammingError(e) |
|
2113 | raise error.ProgrammingError(e) | |
2112 | lines.append(b"%s=%s\n" % (k, v)) |
|
2114 | lines.append(b"%s=%s\n" % (k, v)) | |
2113 | with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp: |
|
2115 | with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp: | |
2114 | fp.write(b''.join(lines)) |
|
2116 | fp.write(b''.join(lines)) | |
2115 |
|
2117 | |||
2116 |
|
2118 | |||
2117 | _reportobsoletedsource: List[bytes] = [ |
|
2119 | _reportobsoletedsource: List[bytes] = [ | |
2118 | b'debugobsolete', |
|
2120 | b'debugobsolete', | |
2119 | b'pull', |
|
2121 | b'pull', | |
2120 | b'push', |
|
2122 | b'push', | |
2121 | b'serve', |
|
2123 | b'serve', | |
2122 | b'unbundle', |
|
2124 | b'unbundle', | |
2123 | ] |
|
2125 | ] | |
2124 |
|
2126 | |||
2125 | _reportnewcssource: List[bytes] = [ |
|
2127 | _reportnewcssource: List[bytes] = [ | |
2126 | b'pull', |
|
2128 | b'pull', | |
2127 | b'unbundle', |
|
2129 | b'unbundle', | |
2128 | ] |
|
2130 | ] | |
2129 |
|
2131 | |||
2130 |
|
2132 | |||
2131 | def prefetchfiles(repo, revmatches) -> None: |
|
2133 | def prefetchfiles(repo, revmatches) -> None: | |
2132 | """Invokes the registered file prefetch functions, allowing extensions to |
|
2134 | """Invokes the registered file prefetch functions, allowing extensions to | |
2133 | ensure the corresponding files are available locally, before the command |
|
2135 | ensure the corresponding files are available locally, before the command | |
2134 | uses them. |
|
2136 | uses them. | |
2135 |
|
2137 | |||
2136 | Args: |
|
2138 | Args: | |
2137 | revmatches: a list of (revision, match) tuples to indicate the files to |
|
2139 | revmatches: a list of (revision, match) tuples to indicate the files to | |
2138 | fetch at each revision. If any of the match elements is None, it matches |
|
2140 | fetch at each revision. If any of the match elements is None, it matches | |
2139 | all files. |
|
2141 | all files. | |
2140 | """ |
|
2142 | """ | |
2141 |
|
2143 | |||
2142 | def _matcher(m): |
|
2144 | def _matcher(m): | |
2143 | if m: |
|
2145 | if m: | |
2144 | assert isinstance(m, matchmod.basematcher) |
|
2146 | assert isinstance(m, matchmod.basematcher) | |
2145 | # The command itself will complain about files that don't exist, so |
|
2147 | # The command itself will complain about files that don't exist, so | |
2146 | # don't duplicate the message. |
|
2148 | # don't duplicate the message. | |
2147 | return matchmod.badmatch(m, lambda fn, msg: None) |
|
2149 | return matchmod.badmatch(m, lambda fn, msg: None) | |
2148 | else: |
|
2150 | else: | |
2149 | return matchall(repo) |
|
2151 | return matchall(repo) | |
2150 |
|
2152 | |||
2151 | revbadmatches = [(rev, _matcher(match)) for (rev, match) in revmatches] |
|
2153 | revbadmatches = [(rev, _matcher(match)) for (rev, match) in revmatches] | |
2152 |
|
2154 | |||
2153 | fileprefetchhooks(repo, revbadmatches) |
|
2155 | fileprefetchhooks(repo, revbadmatches) | |
2154 |
|
2156 | |||
2155 |
|
2157 | |||
2156 | # a list of (repo, revs, match) prefetch functions |
|
2158 | # a list of (repo, revs, match) prefetch functions | |
2157 | fileprefetchhooks = util.hooks() |
|
2159 | fileprefetchhooks = util.hooks() | |
2158 |
|
2160 | |||
2159 | # A marker that tells the evolve extension to suppress its own reporting |
|
2161 | # A marker that tells the evolve extension to suppress its own reporting | |
2160 | _reportstroubledchangesets: bool = True |
|
2162 | _reportstroubledchangesets: bool = True | |
2161 |
|
2163 | |||
2162 |
|
2164 | |||
2163 | def registersummarycallback( |
|
2165 | def registersummarycallback( | |
2164 | repo, otr, txnname: bytes = b'', as_validator: bool = False |
|
2166 | repo, otr, txnname: bytes = b'', as_validator: bool = False | |
2165 | ) -> None: |
|
2167 | ) -> None: | |
2166 | """register a callback to issue a summary after the transaction is closed |
|
2168 | """register a callback to issue a summary after the transaction is closed | |
2167 |
|
2169 | |||
2168 | If as_validator is true, then the callbacks are registered as transaction |
|
2170 | If as_validator is true, then the callbacks are registered as transaction | |
2169 | validators instead |
|
2171 | validators instead | |
2170 | """ |
|
2172 | """ | |
2171 |
|
2173 | |||
2172 | def txmatch(sources): |
|
2174 | def txmatch(sources): | |
2173 | return any(txnname.startswith(source) for source in sources) |
|
2175 | return any(txnname.startswith(source) for source in sources) | |
2174 |
|
2176 | |||
2175 | categories = [] |
|
2177 | categories = [] | |
2176 |
|
2178 | |||
2177 | def reportsummary(func): |
|
2179 | def reportsummary(func): | |
2178 | """decorator for report callbacks.""" |
|
2180 | """decorator for report callbacks.""" | |
2179 | # The repoview life cycle is shorter than the one of the actual |
|
2181 | # The repoview life cycle is shorter than the one of the actual | |
2180 | # underlying repository. So the filtered object can die before the |
|
2182 | # underlying repository. So the filtered object can die before the | |
2181 | # weakref is used leading to troubles. We keep a reference to the |
|
2183 | # weakref is used leading to troubles. We keep a reference to the | |
2182 | # unfiltered object and restore the filtering when retrieving the |
|
2184 | # unfiltered object and restore the filtering when retrieving the | |
2183 | # repository through the weakref. |
|
2185 | # repository through the weakref. | |
2184 | filtername = repo.filtername |
|
2186 | filtername = repo.filtername | |
2185 | reporef = weakref.ref(repo.unfiltered()) |
|
2187 | reporef = weakref.ref(repo.unfiltered()) | |
2186 |
|
2188 | |||
2187 | def wrapped(tr): |
|
2189 | def wrapped(tr): | |
2188 | repo = reporef() |
|
2190 | repo = reporef() | |
2189 | if filtername: |
|
2191 | if filtername: | |
2190 | assert repo is not None # help pytype |
|
2192 | assert repo is not None # help pytype | |
2191 | repo = repo.filtered(filtername) |
|
2193 | repo = repo.filtered(filtername) | |
2192 | func(repo, tr) |
|
2194 | func(repo, tr) | |
2193 |
|
2195 | |||
2194 | newcat = b'%02i-txnreport' % len(categories) |
|
2196 | newcat = b'%02i-txnreport' % len(categories) | |
2195 | if as_validator: |
|
2197 | if as_validator: | |
2196 | otr.addvalidator(newcat, wrapped) |
|
2198 | otr.addvalidator(newcat, wrapped) | |
2197 | else: |
|
2199 | else: | |
2198 | otr.addpostclose(newcat, wrapped) |
|
2200 | otr.addpostclose(newcat, wrapped) | |
2199 | categories.append(newcat) |
|
2201 | categories.append(newcat) | |
2200 | return wrapped |
|
2202 | return wrapped | |
2201 |
|
2203 | |||
2202 | @reportsummary |
|
2204 | @reportsummary | |
2203 | def reportchangegroup(repo, tr): |
|
2205 | def reportchangegroup(repo, tr): | |
2204 | cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0) |
|
2206 | cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0) | |
2205 | cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0) |
|
2207 | cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0) | |
2206 | cgfiles = tr.changes.get(b'changegroup-count-files', 0) |
|
2208 | cgfiles = tr.changes.get(b'changegroup-count-files', 0) | |
2207 | cgheads = tr.changes.get(b'changegroup-count-heads', 0) |
|
2209 | cgheads = tr.changes.get(b'changegroup-count-heads', 0) | |
2208 | if cgchangesets or cgrevisions or cgfiles: |
|
2210 | if cgchangesets or cgrevisions or cgfiles: | |
2209 | htext = b"" |
|
2211 | htext = b"" | |
2210 | if cgheads: |
|
2212 | if cgheads: | |
2211 | htext = _(b" (%+d heads)") % cgheads |
|
2213 | htext = _(b" (%+d heads)") % cgheads | |
2212 | msg = _(b"added %d changesets with %d changes to %d files%s\n") |
|
2214 | msg = _(b"added %d changesets with %d changes to %d files%s\n") | |
2213 | if as_validator: |
|
2215 | if as_validator: | |
2214 | msg = _(b"adding %d changesets with %d changes to %d files%s\n") |
|
2216 | msg = _(b"adding %d changesets with %d changes to %d files%s\n") | |
2215 | assert repo is not None # help pytype |
|
2217 | assert repo is not None # help pytype | |
2216 | repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext)) |
|
2218 | repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext)) | |
2217 |
|
2219 | |||
2218 | if txmatch(_reportobsoletedsource): |
|
2220 | if txmatch(_reportobsoletedsource): | |
2219 |
|
2221 | |||
2220 | @reportsummary |
|
2222 | @reportsummary | |
2221 | def reportobsoleted(repo, tr): |
|
2223 | def reportobsoleted(repo, tr): | |
2222 | obsoleted = obsutil.getobsoleted(repo, tr) |
|
2224 | obsoleted = obsutil.getobsoleted(repo, tr) | |
2223 | newmarkers = len(tr.changes.get(b'obsmarkers', ())) |
|
2225 | newmarkers = len(tr.changes.get(b'obsmarkers', ())) | |
2224 | if newmarkers: |
|
2226 | if newmarkers: | |
2225 | repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers) |
|
2227 | repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers) | |
2226 | if obsoleted: |
|
2228 | if obsoleted: | |
2227 | msg = _(b'obsoleted %i changesets\n') |
|
2229 | msg = _(b'obsoleted %i changesets\n') | |
2228 | if as_validator: |
|
2230 | if as_validator: | |
2229 | msg = _(b'obsoleting %i changesets\n') |
|
2231 | msg = _(b'obsoleting %i changesets\n') | |
2230 | repo.ui.status(msg % len(obsoleted)) |
|
2232 | repo.ui.status(msg % len(obsoleted)) | |
2231 |
|
2233 | |||
2232 | if obsolete.isenabled( |
|
2234 | if obsolete.isenabled( | |
2233 | repo, obsolete.createmarkersopt |
|
2235 | repo, obsolete.createmarkersopt | |
2234 | ) and repo.ui.configbool( |
|
2236 | ) and repo.ui.configbool( | |
2235 | b'experimental', b'evolution.report-instabilities' |
|
2237 | b'experimental', b'evolution.report-instabilities' | |
2236 | ): |
|
2238 | ): | |
2237 | instabilitytypes = [ |
|
2239 | instabilitytypes = [ | |
2238 | (b'orphan', b'orphan'), |
|
2240 | (b'orphan', b'orphan'), | |
2239 | (b'phase-divergent', b'phasedivergent'), |
|
2241 | (b'phase-divergent', b'phasedivergent'), | |
2240 | (b'content-divergent', b'contentdivergent'), |
|
2242 | (b'content-divergent', b'contentdivergent'), | |
2241 | ] |
|
2243 | ] | |
2242 |
|
2244 | |||
2243 | def getinstabilitycounts(repo): |
|
2245 | def getinstabilitycounts(repo): | |
2244 | filtered = repo.changelog.filteredrevs |
|
2246 | filtered = repo.changelog.filteredrevs | |
2245 | counts = {} |
|
2247 | counts = {} | |
2246 | for instability, revset in instabilitytypes: |
|
2248 | for instability, revset in instabilitytypes: | |
2247 | counts[instability] = len( |
|
2249 | counts[instability] = len( | |
2248 | set(obsolete.getrevs(repo, revset)) - filtered |
|
2250 | set(obsolete.getrevs(repo, revset)) - filtered | |
2249 | ) |
|
2251 | ) | |
2250 | return counts |
|
2252 | return counts | |
2251 |
|
2253 | |||
2252 | oldinstabilitycounts = getinstabilitycounts(repo) |
|
2254 | oldinstabilitycounts = getinstabilitycounts(repo) | |
2253 |
|
2255 | |||
2254 | @reportsummary |
|
2256 | @reportsummary | |
2255 | def reportnewinstabilities(repo, tr): |
|
2257 | def reportnewinstabilities(repo, tr): | |
2256 | newinstabilitycounts = getinstabilitycounts(repo) |
|
2258 | newinstabilitycounts = getinstabilitycounts(repo) | |
2257 | for instability, revset in instabilitytypes: |
|
2259 | for instability, revset in instabilitytypes: | |
2258 | delta = ( |
|
2260 | delta = ( | |
2259 | newinstabilitycounts[instability] |
|
2261 | newinstabilitycounts[instability] | |
2260 | - oldinstabilitycounts[instability] |
|
2262 | - oldinstabilitycounts[instability] | |
2261 | ) |
|
2263 | ) | |
2262 | msg = getinstabilitymessage(delta, instability) |
|
2264 | msg = getinstabilitymessage(delta, instability) | |
2263 | if msg: |
|
2265 | if msg: | |
2264 | repo.ui.warn(msg) |
|
2266 | repo.ui.warn(msg) | |
2265 |
|
2267 | |||
2266 | if txmatch(_reportnewcssource): |
|
2268 | if txmatch(_reportnewcssource): | |
2267 |
|
2269 | |||
2268 | @reportsummary |
|
2270 | @reportsummary | |
2269 | def reportnewcs(repo, tr): |
|
2271 | def reportnewcs(repo, tr): | |
2270 | """Report the range of new revisions pulled/unbundled.""" |
|
2272 | """Report the range of new revisions pulled/unbundled.""" | |
2271 | origrepolen = tr.changes.get(b'origrepolen', len(repo)) |
|
2273 | origrepolen = tr.changes.get(b'origrepolen', len(repo)) | |
2272 | unfi = repo.unfiltered() |
|
2274 | unfi = repo.unfiltered() | |
2273 | if origrepolen >= len(unfi): |
|
2275 | if origrepolen >= len(unfi): | |
2274 | return |
|
2276 | return | |
2275 |
|
2277 | |||
2276 | # Compute the bounds of new visible revisions' range. |
|
2278 | # Compute the bounds of new visible revisions' range. | |
2277 | revs = smartset.spanset(repo, start=origrepolen) |
|
2279 | revs = smartset.spanset(repo, start=origrepolen) | |
2278 | if revs: |
|
2280 | if revs: | |
2279 | minrev, maxrev = repo[revs.min()], repo[revs.max()] |
|
2281 | minrev, maxrev = repo[revs.min()], repo[revs.max()] | |
2280 |
|
2282 | |||
2281 | if minrev == maxrev: |
|
2283 | if minrev == maxrev: | |
2282 | revrange = minrev |
|
2284 | revrange = minrev | |
2283 | else: |
|
2285 | else: | |
2284 | revrange = b'%s:%s' % (minrev, maxrev) |
|
2286 | revrange = b'%s:%s' % (minrev, maxrev) | |
2285 | draft = len(repo.revs(b'%ld and draft()', revs)) |
|
2287 | draft = len(repo.revs(b'%ld and draft()', revs)) | |
2286 | secret = len(repo.revs(b'%ld and secret()', revs)) |
|
2288 | secret = len(repo.revs(b'%ld and secret()', revs)) | |
2287 | if not (draft or secret): |
|
2289 | if not (draft or secret): | |
2288 | msg = _(b'new changesets %s\n') % revrange |
|
2290 | msg = _(b'new changesets %s\n') % revrange | |
2289 | elif draft and secret: |
|
2291 | elif draft and secret: | |
2290 | msg = _(b'new changesets %s (%d drafts, %d secrets)\n') |
|
2292 | msg = _(b'new changesets %s (%d drafts, %d secrets)\n') | |
2291 | msg %= (revrange, draft, secret) |
|
2293 | msg %= (revrange, draft, secret) | |
2292 | elif draft: |
|
2294 | elif draft: | |
2293 | msg = _(b'new changesets %s (%d drafts)\n') |
|
2295 | msg = _(b'new changesets %s (%d drafts)\n') | |
2294 | msg %= (revrange, draft) |
|
2296 | msg %= (revrange, draft) | |
2295 | elif secret: |
|
2297 | elif secret: | |
2296 | msg = _(b'new changesets %s (%d secrets)\n') |
|
2298 | msg = _(b'new changesets %s (%d secrets)\n') | |
2297 | msg %= (revrange, secret) |
|
2299 | msg %= (revrange, secret) | |
2298 | else: |
|
2300 | else: | |
2299 | errormsg = b'entered unreachable condition' |
|
2301 | errormsg = b'entered unreachable condition' | |
2300 | raise error.ProgrammingError(errormsg) |
|
2302 | raise error.ProgrammingError(errormsg) | |
2301 | repo.ui.status(msg) |
|
2303 | repo.ui.status(msg) | |
2302 |
|
2304 | |||
2303 | # search new changesets directly pulled as obsolete |
|
2305 | # search new changesets directly pulled as obsolete | |
2304 | duplicates = tr.changes.get(b'revduplicates', ()) |
|
2306 | duplicates = tr.changes.get(b'revduplicates', ()) | |
2305 | obsadded = unfi.revs( |
|
2307 | obsadded = unfi.revs( | |
2306 | b'(%d: + %ld) and obsolete()', origrepolen, duplicates |
|
2308 | b'(%d: + %ld) and obsolete()', origrepolen, duplicates | |
2307 | ) |
|
2309 | ) | |
2308 | cl = repo.changelog |
|
2310 | cl = repo.changelog | |
2309 | extinctadded = [r for r in obsadded if r not in cl] |
|
2311 | extinctadded = [r for r in obsadded if r not in cl] | |
2310 | if extinctadded: |
|
2312 | if extinctadded: | |
2311 | # They are not just obsolete, but obsolete and invisible |
|
2313 | # They are not just obsolete, but obsolete and invisible | |
2312 | # we call them "extinct" internally but the terms have not been |
|
2314 | # we call them "extinct" internally but the terms have not been | |
2313 | # exposed to users. |
|
2315 | # exposed to users. | |
2314 | msg = b'(%d other changesets obsolete on arrival)\n' |
|
2316 | msg = b'(%d other changesets obsolete on arrival)\n' | |
2315 | repo.ui.status(msg % len(extinctadded)) |
|
2317 | repo.ui.status(msg % len(extinctadded)) | |
2316 |
|
2318 | |||
2317 | @reportsummary |
|
2319 | @reportsummary | |
2318 | def reportphasechanges(repo, tr): |
|
2320 | def reportphasechanges(repo, tr): | |
2319 | """Report statistics of phase changes for changesets pre-existing |
|
2321 | """Report statistics of phase changes for changesets pre-existing | |
2320 | pull/unbundle. |
|
2322 | pull/unbundle. | |
2321 | """ |
|
2323 | """ | |
2322 | origrepolen = tr.changes.get(b'origrepolen', len(repo)) |
|
2324 | origrepolen = tr.changes.get(b'origrepolen', len(repo)) | |
2323 | published = [] |
|
2325 | published = [] | |
2324 | for revs, (old, new) in tr.changes.get(b'phases', []): |
|
2326 | for revs, (old, new) in tr.changes.get(b'phases', []): | |
2325 | if new != phases.public: |
|
2327 | if new != phases.public: | |
2326 | continue |
|
2328 | continue | |
2327 | published.extend(rev for rev in revs if rev < origrepolen) |
|
2329 | published.extend(rev for rev in revs if rev < origrepolen) | |
2328 | if not published: |
|
2330 | if not published: | |
2329 | return |
|
2331 | return | |
2330 | msg = _(b'%d local changesets published\n') |
|
2332 | msg = _(b'%d local changesets published\n') | |
2331 | if as_validator: |
|
2333 | if as_validator: | |
2332 | msg = _(b'%d local changesets will be published\n') |
|
2334 | msg = _(b'%d local changesets will be published\n') | |
2333 | repo.ui.status(msg % len(published)) |
|
2335 | repo.ui.status(msg % len(published)) | |
2334 |
|
2336 | |||
2335 |
|
2337 | |||
2336 | def getinstabilitymessage(delta: int, instability: bytes) -> Optional[bytes]: |
|
2338 | def getinstabilitymessage(delta: int, instability: bytes) -> Optional[bytes]: | |
2337 | """function to return the message to show warning about new instabilities |
|
2339 | """function to return the message to show warning about new instabilities | |
2338 |
|
2340 | |||
2339 | exists as a separate function so that extension can wrap to show more |
|
2341 | exists as a separate function so that extension can wrap to show more | |
2340 | information like how to fix instabilities""" |
|
2342 | information like how to fix instabilities""" | |
2341 | if delta > 0: |
|
2343 | if delta > 0: | |
2342 | return _(b'%i new %s changesets\n') % (delta, instability) |
|
2344 | return _(b'%i new %s changesets\n') % (delta, instability) | |
2343 |
|
2345 | |||
2344 |
|
2346 | |||
2345 | def nodesummaries(repo, nodes, maxnumnodes: int = 4) -> bytes: |
|
2347 | def nodesummaries(repo, nodes, maxnumnodes: int = 4) -> bytes: | |
2346 | if len(nodes) <= maxnumnodes or repo.ui.verbose: |
|
2348 | if len(nodes) <= maxnumnodes or repo.ui.verbose: | |
2347 | return b' '.join(short(h) for h in nodes) |
|
2349 | return b' '.join(short(h) for h in nodes) | |
2348 | first = b' '.join(short(h) for h in nodes[:maxnumnodes]) |
|
2350 | first = b' '.join(short(h) for h in nodes[:maxnumnodes]) | |
2349 | return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes) |
|
2351 | return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes) | |
2350 |
|
2352 | |||
2351 |
|
2353 | |||
2352 | def enforcesinglehead(repo, tr, desc: bytes, accountclosed, filtername) -> None: |
|
2354 | def enforcesinglehead(repo, tr, desc: bytes, accountclosed, filtername) -> None: | |
2353 | """check that no named branch has multiple heads""" |
|
2355 | """check that no named branch has multiple heads""" | |
2354 | if desc in (b'strip', b'repair'): |
|
2356 | if desc in (b'strip', b'repair'): | |
2355 | # skip the logic during strip |
|
2357 | # skip the logic during strip | |
2356 | return |
|
2358 | return | |
2357 | visible = repo.filtered(filtername) |
|
2359 | visible = repo.filtered(filtername) | |
2358 | # possible improvement: we could restrict the check to affected branch |
|
2360 | # possible improvement: we could restrict the check to affected branch | |
2359 | bm = visible.branchmap() |
|
2361 | bm = visible.branchmap() | |
2360 | for name in bm: |
|
2362 | for name in bm: | |
2361 | heads = bm.branchheads(name, closed=accountclosed) |
|
2363 | heads = bm.branchheads(name, closed=accountclosed) | |
2362 | if len(heads) > 1: |
|
2364 | if len(heads) > 1: | |
2363 | msg = _(b'rejecting multiple heads on branch "%s"') |
|
2365 | msg = _(b'rejecting multiple heads on branch "%s"') | |
2364 | msg %= name |
|
2366 | msg %= name | |
2365 | hint = _(b'%d heads: %s') |
|
2367 | hint = _(b'%d heads: %s') | |
2366 | hint %= (len(heads), nodesummaries(repo, heads)) |
|
2368 | hint %= (len(heads), nodesummaries(repo, heads)) | |
2367 | raise error.Abort(msg, hint=hint) |
|
2369 | raise error.Abort(msg, hint=hint) | |
2368 |
|
2370 | |||
2369 |
|
2371 | |||
2370 | def wrapconvertsink(sink): |
|
2372 | def wrapconvertsink(sink): | |
2371 | """Allow extensions to wrap the sink returned by convcmd.convertsink() |
|
2373 | """Allow extensions to wrap the sink returned by convcmd.convertsink() | |
2372 | before it is used, whether or not the convert extension was formally loaded. |
|
2374 | before it is used, whether or not the convert extension was formally loaded. | |
2373 | """ |
|
2375 | """ | |
2374 | return sink |
|
2376 | return sink | |
2375 |
|
2377 | |||
2376 |
|
2378 | |||
2377 | def unhidehashlikerevs(repo, specs, hiddentype: bytes): |
|
2379 | def unhidehashlikerevs(repo, specs, hiddentype: bytes): | |
2378 | """parse the user specs and unhide changesets whose hash or revision number |
|
2380 | """parse the user specs and unhide changesets whose hash or revision number | |
2379 | is passed. |
|
2381 | is passed. | |
2380 |
|
2382 | |||
2381 | hiddentype can be: 1) 'warn': warn while unhiding changesets |
|
2383 | hiddentype can be: 1) 'warn': warn while unhiding changesets | |
2382 | 2) 'nowarn': don't warn while unhiding changesets |
|
2384 | 2) 'nowarn': don't warn while unhiding changesets | |
2383 |
|
2385 | |||
2384 | returns a repo object with the required changesets unhidden |
|
2386 | returns a repo object with the required changesets unhidden | |
2385 | """ |
|
2387 | """ | |
2386 | if not specs: |
|
2388 | if not specs: | |
2387 | return repo |
|
2389 | return repo | |
2388 |
|
2390 | |||
2389 | if not repo.filtername or not repo.ui.configbool( |
|
2391 | if not repo.filtername or not repo.ui.configbool( | |
2390 | b'experimental', b'directaccess' |
|
2392 | b'experimental', b'directaccess' | |
2391 | ): |
|
2393 | ): | |
2392 | return repo |
|
2394 | return repo | |
2393 |
|
2395 | |||
2394 | if repo.filtername not in (b'visible', b'visible-hidden'): |
|
2396 | if repo.filtername not in (b'visible', b'visible-hidden'): | |
2395 | return repo |
|
2397 | return repo | |
2396 |
|
2398 | |||
2397 | symbols = set() |
|
2399 | symbols = set() | |
2398 | for spec in specs: |
|
2400 | for spec in specs: | |
2399 | try: |
|
2401 | try: | |
2400 | tree = revsetlang.parse(spec) |
|
2402 | tree = revsetlang.parse(spec) | |
2401 | except error.ParseError: # will be reported by scmutil.revrange() |
|
2403 | except error.ParseError: # will be reported by scmutil.revrange() | |
2402 | continue |
|
2404 | continue | |
2403 |
|
2405 | |||
2404 | symbols.update(revsetlang.gethashlikesymbols(tree)) |
|
2406 | symbols.update(revsetlang.gethashlikesymbols(tree)) | |
2405 |
|
2407 | |||
2406 | if not symbols: |
|
2408 | if not symbols: | |
2407 | return repo |
|
2409 | return repo | |
2408 |
|
2410 | |||
2409 | revs = _getrevsfromsymbols(repo, symbols) |
|
2411 | revs = _getrevsfromsymbols(repo, symbols) | |
2410 |
|
2412 | |||
2411 | if not revs: |
|
2413 | if not revs: | |
2412 | return repo |
|
2414 | return repo | |
2413 |
|
2415 | |||
2414 | if hiddentype == b'warn': |
|
2416 | if hiddentype == b'warn': | |
2415 | unfi = repo.unfiltered() |
|
2417 | unfi = repo.unfiltered() | |
2416 | revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs]) |
|
2418 | revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs]) | |
2417 | repo.ui.warn( |
|
2419 | repo.ui.warn( | |
2418 | _( |
|
2420 | _( | |
2419 | b"warning: accessing hidden changesets for write " |
|
2421 | b"warning: accessing hidden changesets for write " | |
2420 | b"operation: %s\n" |
|
2422 | b"operation: %s\n" | |
2421 | ) |
|
2423 | ) | |
2422 | % revstr |
|
2424 | % revstr | |
2423 | ) |
|
2425 | ) | |
2424 |
|
2426 | |||
2425 | # we have to use new filtername to separate branch/tags cache until we can |
|
2427 | # we have to use new filtername to separate branch/tags cache until we can | |
2426 | # disbale these cache when revisions are dynamically pinned. |
|
2428 | # disbale these cache when revisions are dynamically pinned. | |
2427 | return repo.filtered(b'visible-hidden', revs) |
|
2429 | return repo.filtered(b'visible-hidden', revs) | |
2428 |
|
2430 | |||
2429 |
|
2431 | |||
2430 | def _getrevsfromsymbols(repo, symbols) -> Set[int]: |
|
2432 | def _getrevsfromsymbols(repo, symbols) -> Set[int]: | |
2431 | """parse the list of symbols and returns a set of revision numbers of hidden |
|
2433 | """parse the list of symbols and returns a set of revision numbers of hidden | |
2432 | changesets present in symbols""" |
|
2434 | changesets present in symbols""" | |
2433 | revs = set() |
|
2435 | revs = set() | |
2434 | unfi = repo.unfiltered() |
|
2436 | unfi = repo.unfiltered() | |
2435 | unficl = unfi.changelog |
|
2437 | unficl = unfi.changelog | |
2436 | cl = repo.changelog |
|
2438 | cl = repo.changelog | |
2437 | tiprev = len(unficl) |
|
2439 | tiprev = len(unficl) | |
2438 | allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums') |
|
2440 | allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums') | |
2439 | for s in symbols: |
|
2441 | for s in symbols: | |
2440 | try: |
|
2442 | try: | |
2441 | n = int(s) |
|
2443 | n = int(s) | |
2442 | if n <= tiprev: |
|
2444 | if n <= tiprev: | |
2443 | if not allowrevnums: |
|
2445 | if not allowrevnums: | |
2444 | continue |
|
2446 | continue | |
2445 | else: |
|
2447 | else: | |
2446 | if n not in cl: |
|
2448 | if n not in cl: | |
2447 | revs.add(n) |
|
2449 | revs.add(n) | |
2448 | continue |
|
2450 | continue | |
2449 | except ValueError: |
|
2451 | except ValueError: | |
2450 | pass |
|
2452 | pass | |
2451 |
|
2453 | |||
2452 | try: |
|
2454 | try: | |
2453 | s = resolvehexnodeidprefix(unfi, s) |
|
2455 | s = resolvehexnodeidprefix(unfi, s) | |
2454 | except (error.LookupError, error.WdirUnsupported): |
|
2456 | except (error.LookupError, error.WdirUnsupported): | |
2455 | s = None |
|
2457 | s = None | |
2456 |
|
2458 | |||
2457 | if s is not None: |
|
2459 | if s is not None: | |
2458 | rev = unficl.rev(s) |
|
2460 | rev = unficl.rev(s) | |
2459 | if rev not in cl: |
|
2461 | if rev not in cl: | |
2460 | revs.add(rev) |
|
2462 | revs.add(rev) | |
2461 |
|
2463 | |||
2462 | return revs |
|
2464 | return revs | |
2463 |
|
2465 | |||
2464 |
|
2466 | |||
2465 | def bookmarkrevs(repo, mark: bytes): |
|
2467 | def bookmarkrevs(repo, mark: bytes): | |
2466 | """Select revisions reachable by a given bookmark |
|
2468 | """Select revisions reachable by a given bookmark | |
2467 |
|
2469 | |||
2468 | If the bookmarked revision isn't a head, an empty set will be returned. |
|
2470 | If the bookmarked revision isn't a head, an empty set will be returned. | |
2469 | """ |
|
2471 | """ | |
2470 | return repo.revs(format_bookmark_revspec(mark)) |
|
2472 | return repo.revs(format_bookmark_revspec(mark)) | |
2471 |
|
2473 | |||
2472 |
|
2474 | |||
2473 | def format_bookmark_revspec(mark: bytes) -> bytes: |
|
2475 | def format_bookmark_revspec(mark: bytes) -> bytes: | |
2474 | """Build a revset expression to select revisions reachable by a given |
|
2476 | """Build a revset expression to select revisions reachable by a given | |
2475 | bookmark""" |
|
2477 | bookmark""" | |
2476 | mark = b'literal:' + mark |
|
2478 | mark = b'literal:' + mark | |
2477 | return revsetlang.formatspec( |
|
2479 | return revsetlang.formatspec( | |
2478 | b"ancestors(bookmark(%s)) - " |
|
2480 | b"ancestors(bookmark(%s)) - " | |
2479 | b"ancestors(head() and not bookmark(%s)) - " |
|
2481 | b"ancestors(head() and not bookmark(%s)) - " | |
2480 | b"ancestors(bookmark() and not bookmark(%s))", |
|
2482 | b"ancestors(bookmark() and not bookmark(%s))", | |
2481 | mark, |
|
2483 | mark, | |
2482 | mark, |
|
2484 | mark, | |
2483 | mark, |
|
2485 | mark, | |
2484 | ) |
|
2486 | ) | |
2485 |
|
2487 | |||
2486 |
|
2488 | |||
2487 | def ismember(ui: "uimod.ui", username: bytes, userlist: List[bytes]) -> bool: |
|
2489 | def ismember(ui: "uimod.ui", username: bytes, userlist: List[bytes]) -> bool: | |
2488 | """Check if username is a member of userlist. |
|
2490 | """Check if username is a member of userlist. | |
2489 |
|
2491 | |||
2490 | If userlist has a single '*' member, all users are considered members. |
|
2492 | If userlist has a single '*' member, all users are considered members. | |
2491 | Can be overridden by extensions to provide more complex authorization |
|
2493 | Can be overridden by extensions to provide more complex authorization | |
2492 | schemes. |
|
2494 | schemes. | |
2493 | """ |
|
2495 | """ | |
2494 | return userlist == [b'*'] or username in userlist |
|
2496 | return userlist == [b'*'] or username in userlist | |
2495 |
|
2497 | |||
2496 |
|
2498 | |||
2497 | RESOURCE_HIGH: int = 3 |
|
2499 | RESOURCE_HIGH: int = 3 | |
2498 | RESOURCE_MEDIUM: int = 2 |
|
2500 | RESOURCE_MEDIUM: int = 2 | |
2499 | RESOURCE_LOW: int = 1 |
|
2501 | RESOURCE_LOW: int = 1 | |
2500 | RESOURCE_DEFAULT: int = 0 |
|
2502 | RESOURCE_DEFAULT: int = 0 | |
2501 |
|
2503 | |||
2502 | RESOURCE_MAPPING: Dict[bytes, int] = { |
|
2504 | RESOURCE_MAPPING: Dict[bytes, int] = { | |
2503 | b'default': RESOURCE_DEFAULT, |
|
2505 | b'default': RESOURCE_DEFAULT, | |
2504 | b'low': RESOURCE_LOW, |
|
2506 | b'low': RESOURCE_LOW, | |
2505 | b'medium': RESOURCE_MEDIUM, |
|
2507 | b'medium': RESOURCE_MEDIUM, | |
2506 | b'high': RESOURCE_HIGH, |
|
2508 | b'high': RESOURCE_HIGH, | |
2507 | } |
|
2509 | } | |
2508 |
|
2510 | |||
2509 | DEFAULT_RESOURCE: int = RESOURCE_MEDIUM |
|
2511 | DEFAULT_RESOURCE: int = RESOURCE_MEDIUM | |
2510 |
|
2512 | |||
2511 |
|
2513 | |||
2512 | def get_resource_profile( |
|
2514 | def get_resource_profile( | |
2513 | ui: "uimod.ui", dimension: Optional[bytes] = None |
|
2515 | ui: "uimod.ui", dimension: Optional[bytes] = None | |
2514 | ) -> int: |
|
2516 | ) -> int: | |
2515 | """return the resource profile for a dimension |
|
2517 | """return the resource profile for a dimension | |
2516 |
|
2518 | |||
2517 | If no dimension is specified, the generic value is returned""" |
|
2519 | If no dimension is specified, the generic value is returned""" | |
2518 | generic_name = ui.config(b'usage', b'resources') |
|
2520 | generic_name = ui.config(b'usage', b'resources') | |
2519 | value = RESOURCE_MAPPING.get(generic_name, RESOURCE_DEFAULT) |
|
2521 | value = RESOURCE_MAPPING.get(generic_name, RESOURCE_DEFAULT) | |
2520 | if value == RESOURCE_DEFAULT: |
|
2522 | if value == RESOURCE_DEFAULT: | |
2521 | value = DEFAULT_RESOURCE |
|
2523 | value = DEFAULT_RESOURCE | |
2522 | if dimension is not None: |
|
2524 | if dimension is not None: | |
2523 | sub_name = ui.config(b'usage', b'resources.%s' % dimension) |
|
2525 | sub_name = ui.config(b'usage', b'resources.%s' % dimension) | |
2524 | sub_value = RESOURCE_MAPPING.get(sub_name, RESOURCE_DEFAULT) |
|
2526 | sub_value = RESOURCE_MAPPING.get(sub_name, RESOURCE_DEFAULT) | |
2525 | if sub_value != RESOURCE_DEFAULT: |
|
2527 | if sub_value != RESOURCE_DEFAULT: | |
2526 | value = sub_value |
|
2528 | value = sub_value | |
2527 | return value |
|
2529 | return value |
General Comments 0
You need to be logged in to leave comments.
Login now