Show More
The requested changes are too big and content was truncated. Show full diff
@@ -0,0 +1,33 b'' | |||||
|
1 | #!/bin/bash | |||
|
2 | ||||
|
3 | set -e | |||
|
4 | set -u | |||
|
5 | ||||
|
6 | # Find the python3 setup that would run pytype | |||
|
7 | PYTYPE=`which pytype` | |||
|
8 | PYTHON3=`head -n1 ${PYTYPE} | sed -s 's/#!//'` | |||
|
9 | ||||
|
10 | # Existing stubs that pytype processes live here | |||
|
11 | TYPESHED=$(${PYTHON3} -c "import pytype; print(pytype.__path__[0])")/typeshed/stubs | |||
|
12 | HG_STUBS=${TYPESHED}/mercurial | |||
|
13 | ||||
|
14 | echo "Patching typeshed at $HG_STUBS" | |||
|
15 | ||||
|
16 | rm -rf ${HG_STUBS} | |||
|
17 | mkdir -p ${HG_STUBS} | |||
|
18 | ||||
|
19 | cat > ${HG_STUBS}/METADATA.toml <<EOF | |||
|
20 | version = "0.1" | |||
|
21 | EOF | |||
|
22 | ||||
|
23 | ||||
|
24 | mkdir -p ${HG_STUBS}/mercurial/cext ${HG_STUBS}/mercurial/thirdparty/attr | |||
|
25 | ||||
|
26 | touch ${HG_STUBS}/mercurial/__init__.pyi | |||
|
27 | touch ${HG_STUBS}/mercurial/cext/__init__.pyi | |||
|
28 | touch ${HG_STUBS}/mercurial/thirdparty/__init__.pyi | |||
|
29 | ||||
|
30 | ln -sf $(hg root)/mercurial/cext/*.{pyi,typed} \ | |||
|
31 | ${HG_STUBS}/mercurial/cext | |||
|
32 | ln -sf $(hg root)/mercurial/thirdparty/attr/*.{pyi,typed} \ | |||
|
33 | ${HG_STUBS}/mercurial/thirdparty/attr |
@@ -0,0 +1,1 b'' | |||||
|
1 | partial |
@@ -0,0 +1,486 b'' | |||||
|
1 | import sys | |||
|
2 | ||||
|
3 | from typing import ( | |||
|
4 | Any, | |||
|
5 | Callable, | |||
|
6 | ClassVar, | |||
|
7 | Dict, | |||
|
8 | Generic, | |||
|
9 | List, | |||
|
10 | Mapping, | |||
|
11 | Optional, | |||
|
12 | Protocol, | |||
|
13 | Sequence, | |||
|
14 | Tuple, | |||
|
15 | Type, | |||
|
16 | TypeVar, | |||
|
17 | Union, | |||
|
18 | overload, | |||
|
19 | ) | |||
|
20 | ||||
|
21 | # `import X as X` is required to make these public | |||
|
22 | from . import converters as converters | |||
|
23 | from . import exceptions as exceptions | |||
|
24 | from . import filters as filters | |||
|
25 | from . import setters as setters | |||
|
26 | from . import validators as validators | |||
|
27 | from ._cmp import cmp_using as cmp_using | |||
|
28 | from ._version_info import VersionInfo | |||
|
29 | ||||
|
30 | __version__: str | |||
|
31 | __version_info__: VersionInfo | |||
|
32 | __title__: str | |||
|
33 | __description__: str | |||
|
34 | __url__: str | |||
|
35 | __uri__: str | |||
|
36 | __author__: str | |||
|
37 | __email__: str | |||
|
38 | __license__: str | |||
|
39 | __copyright__: str | |||
|
40 | ||||
|
41 | _T = TypeVar("_T") | |||
|
42 | _C = TypeVar("_C", bound=type) | |||
|
43 | ||||
|
44 | _EqOrderType = Union[bool, Callable[[Any], Any]] | |||
|
45 | _ValidatorType = Callable[[Any, Attribute[_T], _T], Any] | |||
|
46 | _ConverterType = Callable[[Any], Any] | |||
|
47 | _FilterType = Callable[[Attribute[_T], _T], bool] | |||
|
48 | _ReprType = Callable[[Any], str] | |||
|
49 | _ReprArgType = Union[bool, _ReprType] | |||
|
50 | _OnSetAttrType = Callable[[Any, Attribute[Any], Any], Any] | |||
|
51 | _OnSetAttrArgType = Union[ | |||
|
52 | _OnSetAttrType, List[_OnSetAttrType], setters._NoOpType | |||
|
53 | ] | |||
|
54 | _FieldTransformer = Callable[ | |||
|
55 | [type, List[Attribute[Any]]], List[Attribute[Any]] | |||
|
56 | ] | |||
|
57 | # FIXME: in reality, if multiple validators are passed they must be in a list | |||
|
58 | # or tuple, but those are invariant and so would prevent subtypes of | |||
|
59 | # _ValidatorType from working when passed in a list or tuple. | |||
|
60 | _ValidatorArgType = Union[_ValidatorType[_T], Sequence[_ValidatorType[_T]]] | |||
|
61 | ||||
|
62 | # A protocol to be able to statically accept an attrs class. | |||
|
63 | class AttrsInstance(Protocol): | |||
|
64 | __attrs_attrs__: ClassVar[Any] | |||
|
65 | ||||
|
66 | # _make -- | |||
|
67 | ||||
|
68 | NOTHING: object | |||
|
69 | ||||
|
70 | # NOTE: Factory lies about its return type to make this possible: | |||
|
71 | # `x: List[int] # = Factory(list)` | |||
|
72 | # Work around mypy issue #4554 in the common case by using an overload. | |||
|
73 | if sys.version_info >= (3, 8): | |||
|
74 | from typing import Literal | |||
|
75 | @overload | |||
|
76 | def Factory(factory: Callable[[], _T]) -> _T: ... | |||
|
77 | @overload | |||
|
78 | def Factory( | |||
|
79 | factory: Callable[[Any], _T], | |||
|
80 | takes_self: Literal[True], | |||
|
81 | ) -> _T: ... | |||
|
82 | @overload | |||
|
83 | def Factory( | |||
|
84 | factory: Callable[[], _T], | |||
|
85 | takes_self: Literal[False], | |||
|
86 | ) -> _T: ... | |||
|
87 | ||||
|
88 | else: | |||
|
89 | @overload | |||
|
90 | def Factory(factory: Callable[[], _T]) -> _T: ... | |||
|
91 | @overload | |||
|
92 | def Factory( | |||
|
93 | factory: Union[Callable[[Any], _T], Callable[[], _T]], | |||
|
94 | takes_self: bool = ..., | |||
|
95 | ) -> _T: ... | |||
|
96 | ||||
|
97 | # Static type inference support via __dataclass_transform__ implemented as per: | |||
|
98 | # https://github.com/microsoft/pyright/blob/1.1.135/specs/dataclass_transforms.md | |||
|
99 | # This annotation must be applied to all overloads of "define" and "attrs" | |||
|
100 | # | |||
|
101 | # NOTE: This is a typing construct and does not exist at runtime. Extensions | |||
|
102 | # wrapping attrs decorators should declare a separate __dataclass_transform__ | |||
|
103 | # signature in the extension module using the specification linked above to | |||
|
104 | # provide pyright support. | |||
|
105 | def __dataclass_transform__( | |||
|
106 | *, | |||
|
107 | eq_default: bool = True, | |||
|
108 | order_default: bool = False, | |||
|
109 | kw_only_default: bool = False, | |||
|
110 | field_descriptors: Tuple[Union[type, Callable[..., Any]], ...] = (()), | |||
|
111 | ) -> Callable[[_T], _T]: ... | |||
|
112 | ||||
|
113 | class Attribute(Generic[_T]): | |||
|
114 | name: str | |||
|
115 | default: Optional[_T] | |||
|
116 | validator: Optional[_ValidatorType[_T]] | |||
|
117 | repr: _ReprArgType | |||
|
118 | cmp: _EqOrderType | |||
|
119 | eq: _EqOrderType | |||
|
120 | order: _EqOrderType | |||
|
121 | hash: Optional[bool] | |||
|
122 | init: bool | |||
|
123 | converter: Optional[_ConverterType] | |||
|
124 | metadata: Dict[Any, Any] | |||
|
125 | type: Optional[Type[_T]] | |||
|
126 | kw_only: bool | |||
|
127 | on_setattr: _OnSetAttrType | |||
|
128 | def evolve(self, **changes: Any) -> "Attribute[Any]": ... | |||
|
129 | ||||
|
130 | # NOTE: We had several choices for the annotation to use for type arg: | |||
|
131 | # 1) Type[_T] | |||
|
132 | # - Pros: Handles simple cases correctly | |||
|
133 | # - Cons: Might produce less informative errors in the case of conflicting | |||
|
134 | # TypeVars e.g. `attr.ib(default='bad', type=int)` | |||
|
135 | # 2) Callable[..., _T] | |||
|
136 | # - Pros: Better error messages than #1 for conflicting TypeVars | |||
|
137 | # - Cons: Terrible error messages for validator checks. | |||
|
138 | # e.g. attr.ib(type=int, validator=validate_str) | |||
|
139 | # -> error: Cannot infer function type argument | |||
|
140 | # 3) type (and do all of the work in the mypy plugin) | |||
|
141 | # - Pros: Simple here, and we could customize the plugin with our own errors. | |||
|
142 | # - Cons: Would need to write mypy plugin code to handle all the cases. | |||
|
143 | # We chose option #1. | |||
|
144 | ||||
|
145 | # `attr` lies about its return type to make the following possible: | |||
|
146 | # attr() -> Any | |||
|
147 | # attr(8) -> int | |||
|
148 | # attr(validator=<some callable>) -> Whatever the callable expects. | |||
|
149 | # This makes this type of assignments possible: | |||
|
150 | # x: int = attr(8) | |||
|
151 | # | |||
|
152 | # This form catches explicit None or no default but with no other arguments | |||
|
153 | # returns Any. | |||
|
154 | @overload | |||
|
155 | def attrib( | |||
|
156 | default: None = ..., | |||
|
157 | validator: None = ..., | |||
|
158 | repr: _ReprArgType = ..., | |||
|
159 | cmp: Optional[_EqOrderType] = ..., | |||
|
160 | hash: Optional[bool] = ..., | |||
|
161 | init: bool = ..., | |||
|
162 | metadata: Optional[Mapping[Any, Any]] = ..., | |||
|
163 | type: None = ..., | |||
|
164 | converter: None = ..., | |||
|
165 | factory: None = ..., | |||
|
166 | kw_only: bool = ..., | |||
|
167 | eq: Optional[_EqOrderType] = ..., | |||
|
168 | order: Optional[_EqOrderType] = ..., | |||
|
169 | on_setattr: Optional[_OnSetAttrArgType] = ..., | |||
|
170 | ) -> Any: ... | |||
|
171 | ||||
|
172 | # This form catches an explicit None or no default and infers the type from the | |||
|
173 | # other arguments. | |||
|
174 | @overload | |||
|
175 | def attrib( | |||
|
176 | default: None = ..., | |||
|
177 | validator: Optional[_ValidatorArgType[_T]] = ..., | |||
|
178 | repr: _ReprArgType = ..., | |||
|
179 | cmp: Optional[_EqOrderType] = ..., | |||
|
180 | hash: Optional[bool] = ..., | |||
|
181 | init: bool = ..., | |||
|
182 | metadata: Optional[Mapping[Any, Any]] = ..., | |||
|
183 | type: Optional[Type[_T]] = ..., | |||
|
184 | converter: Optional[_ConverterType] = ..., | |||
|
185 | factory: Optional[Callable[[], _T]] = ..., | |||
|
186 | kw_only: bool = ..., | |||
|
187 | eq: Optional[_EqOrderType] = ..., | |||
|
188 | order: Optional[_EqOrderType] = ..., | |||
|
189 | on_setattr: Optional[_OnSetAttrArgType] = ..., | |||
|
190 | ) -> _T: ... | |||
|
191 | ||||
|
192 | # This form catches an explicit default argument. | |||
|
193 | @overload | |||
|
194 | def attrib( | |||
|
195 | default: _T, | |||
|
196 | validator: Optional[_ValidatorArgType[_T]] = ..., | |||
|
197 | repr: _ReprArgType = ..., | |||
|
198 | cmp: Optional[_EqOrderType] = ..., | |||
|
199 | hash: Optional[bool] = ..., | |||
|
200 | init: bool = ..., | |||
|
201 | metadata: Optional[Mapping[Any, Any]] = ..., | |||
|
202 | type: Optional[Type[_T]] = ..., | |||
|
203 | converter: Optional[_ConverterType] = ..., | |||
|
204 | factory: Optional[Callable[[], _T]] = ..., | |||
|
205 | kw_only: bool = ..., | |||
|
206 | eq: Optional[_EqOrderType] = ..., | |||
|
207 | order: Optional[_EqOrderType] = ..., | |||
|
208 | on_setattr: Optional[_OnSetAttrArgType] = ..., | |||
|
209 | ) -> _T: ... | |||
|
210 | ||||
|
211 | # This form covers type=non-Type: e.g. forward references (str), Any | |||
|
212 | @overload | |||
|
213 | def attrib( | |||
|
214 | default: Optional[_T] = ..., | |||
|
215 | validator: Optional[_ValidatorArgType[_T]] = ..., | |||
|
216 | repr: _ReprArgType = ..., | |||
|
217 | cmp: Optional[_EqOrderType] = ..., | |||
|
218 | hash: Optional[bool] = ..., | |||
|
219 | init: bool = ..., | |||
|
220 | metadata: Optional[Mapping[Any, Any]] = ..., | |||
|
221 | type: object = ..., | |||
|
222 | converter: Optional[_ConverterType] = ..., | |||
|
223 | factory: Optional[Callable[[], _T]] = ..., | |||
|
224 | kw_only: bool = ..., | |||
|
225 | eq: Optional[_EqOrderType] = ..., | |||
|
226 | order: Optional[_EqOrderType] = ..., | |||
|
227 | on_setattr: Optional[_OnSetAttrArgType] = ..., | |||
|
228 | ) -> Any: ... | |||
|
229 | @overload | |||
|
230 | def field( | |||
|
231 | *, | |||
|
232 | default: None = ..., | |||
|
233 | validator: None = ..., | |||
|
234 | repr: _ReprArgType = ..., | |||
|
235 | hash: Optional[bool] = ..., | |||
|
236 | init: bool = ..., | |||
|
237 | metadata: Optional[Mapping[Any, Any]] = ..., | |||
|
238 | converter: None = ..., | |||
|
239 | factory: None = ..., | |||
|
240 | kw_only: bool = ..., | |||
|
241 | eq: Optional[bool] = ..., | |||
|
242 | order: Optional[bool] = ..., | |||
|
243 | on_setattr: Optional[_OnSetAttrArgType] = ..., | |||
|
244 | ) -> Any: ... | |||
|
245 | ||||
|
246 | # This form catches an explicit None or no default and infers the type from the | |||
|
247 | # other arguments. | |||
|
248 | @overload | |||
|
249 | def field( | |||
|
250 | *, | |||
|
251 | default: None = ..., | |||
|
252 | validator: Optional[_ValidatorArgType[_T]] = ..., | |||
|
253 | repr: _ReprArgType = ..., | |||
|
254 | hash: Optional[bool] = ..., | |||
|
255 | init: bool = ..., | |||
|
256 | metadata: Optional[Mapping[Any, Any]] = ..., | |||
|
257 | converter: Optional[_ConverterType] = ..., | |||
|
258 | factory: Optional[Callable[[], _T]] = ..., | |||
|
259 | kw_only: bool = ..., | |||
|
260 | eq: Optional[_EqOrderType] = ..., | |||
|
261 | order: Optional[_EqOrderType] = ..., | |||
|
262 | on_setattr: Optional[_OnSetAttrArgType] = ..., | |||
|
263 | ) -> _T: ... | |||
|
264 | ||||
|
265 | # This form catches an explicit default argument. | |||
|
266 | @overload | |||
|
267 | def field( | |||
|
268 | *, | |||
|
269 | default: _T, | |||
|
270 | validator: Optional[_ValidatorArgType[_T]] = ..., | |||
|
271 | repr: _ReprArgType = ..., | |||
|
272 | hash: Optional[bool] = ..., | |||
|
273 | init: bool = ..., | |||
|
274 | metadata: Optional[Mapping[Any, Any]] = ..., | |||
|
275 | converter: Optional[_ConverterType] = ..., | |||
|
276 | factory: Optional[Callable[[], _T]] = ..., | |||
|
277 | kw_only: bool = ..., | |||
|
278 | eq: Optional[_EqOrderType] = ..., | |||
|
279 | order: Optional[_EqOrderType] = ..., | |||
|
280 | on_setattr: Optional[_OnSetAttrArgType] = ..., | |||
|
281 | ) -> _T: ... | |||
|
282 | ||||
|
283 | # This form covers type=non-Type: e.g. forward references (str), Any | |||
|
284 | @overload | |||
|
285 | def field( | |||
|
286 | *, | |||
|
287 | default: Optional[_T] = ..., | |||
|
288 | validator: Optional[_ValidatorArgType[_T]] = ..., | |||
|
289 | repr: _ReprArgType = ..., | |||
|
290 | hash: Optional[bool] = ..., | |||
|
291 | init: bool = ..., | |||
|
292 | metadata: Optional[Mapping[Any, Any]] = ..., | |||
|
293 | converter: Optional[_ConverterType] = ..., | |||
|
294 | factory: Optional[Callable[[], _T]] = ..., | |||
|
295 | kw_only: bool = ..., | |||
|
296 | eq: Optional[_EqOrderType] = ..., | |||
|
297 | order: Optional[_EqOrderType] = ..., | |||
|
298 | on_setattr: Optional[_OnSetAttrArgType] = ..., | |||
|
299 | ) -> Any: ... | |||
|
300 | @overload | |||
|
301 | @__dataclass_transform__(order_default=True, field_descriptors=(attrib, field)) | |||
|
302 | def attrs( | |||
|
303 | maybe_cls: _C, | |||
|
304 | these: Optional[Dict[str, Any]] = ..., | |||
|
305 | repr_ns: Optional[str] = ..., | |||
|
306 | repr: bool = ..., | |||
|
307 | cmp: Optional[_EqOrderType] = ..., | |||
|
308 | hash: Optional[bool] = ..., | |||
|
309 | init: bool = ..., | |||
|
310 | slots: bool = ..., | |||
|
311 | frozen: bool = ..., | |||
|
312 | weakref_slot: bool = ..., | |||
|
313 | str: bool = ..., | |||
|
314 | auto_attribs: bool = ..., | |||
|
315 | kw_only: bool = ..., | |||
|
316 | cache_hash: bool = ..., | |||
|
317 | auto_exc: bool = ..., | |||
|
318 | eq: Optional[_EqOrderType] = ..., | |||
|
319 | order: Optional[_EqOrderType] = ..., | |||
|
320 | auto_detect: bool = ..., | |||
|
321 | collect_by_mro: bool = ..., | |||
|
322 | getstate_setstate: Optional[bool] = ..., | |||
|
323 | on_setattr: Optional[_OnSetAttrArgType] = ..., | |||
|
324 | field_transformer: Optional[_FieldTransformer] = ..., | |||
|
325 | match_args: bool = ..., | |||
|
326 | ) -> _C: ... | |||
|
327 | @overload | |||
|
328 | @__dataclass_transform__(order_default=True, field_descriptors=(attrib, field)) | |||
|
329 | def attrs( | |||
|
330 | maybe_cls: None = ..., | |||
|
331 | these: Optional[Dict[str, Any]] = ..., | |||
|
332 | repr_ns: Optional[str] = ..., | |||
|
333 | repr: bool = ..., | |||
|
334 | cmp: Optional[_EqOrderType] = ..., | |||
|
335 | hash: Optional[bool] = ..., | |||
|
336 | init: bool = ..., | |||
|
337 | slots: bool = ..., | |||
|
338 | frozen: bool = ..., | |||
|
339 | weakref_slot: bool = ..., | |||
|
340 | str: bool = ..., | |||
|
341 | auto_attribs: bool = ..., | |||
|
342 | kw_only: bool = ..., | |||
|
343 | cache_hash: bool = ..., | |||
|
344 | auto_exc: bool = ..., | |||
|
345 | eq: Optional[_EqOrderType] = ..., | |||
|
346 | order: Optional[_EqOrderType] = ..., | |||
|
347 | auto_detect: bool = ..., | |||
|
348 | collect_by_mro: bool = ..., | |||
|
349 | getstate_setstate: Optional[bool] = ..., | |||
|
350 | on_setattr: Optional[_OnSetAttrArgType] = ..., | |||
|
351 | field_transformer: Optional[_FieldTransformer] = ..., | |||
|
352 | match_args: bool = ..., | |||
|
353 | ) -> Callable[[_C], _C]: ... | |||
|
354 | @overload | |||
|
355 | @__dataclass_transform__(field_descriptors=(attrib, field)) | |||
|
356 | def define( | |||
|
357 | maybe_cls: _C, | |||
|
358 | *, | |||
|
359 | these: Optional[Dict[str, Any]] = ..., | |||
|
360 | repr: bool = ..., | |||
|
361 | hash: Optional[bool] = ..., | |||
|
362 | init: bool = ..., | |||
|
363 | slots: bool = ..., | |||
|
364 | frozen: bool = ..., | |||
|
365 | weakref_slot: bool = ..., | |||
|
366 | str: bool = ..., | |||
|
367 | auto_attribs: bool = ..., | |||
|
368 | kw_only: bool = ..., | |||
|
369 | cache_hash: bool = ..., | |||
|
370 | auto_exc: bool = ..., | |||
|
371 | eq: Optional[bool] = ..., | |||
|
372 | order: Optional[bool] = ..., | |||
|
373 | auto_detect: bool = ..., | |||
|
374 | getstate_setstate: Optional[bool] = ..., | |||
|
375 | on_setattr: Optional[_OnSetAttrArgType] = ..., | |||
|
376 | field_transformer: Optional[_FieldTransformer] = ..., | |||
|
377 | match_args: bool = ..., | |||
|
378 | ) -> _C: ... | |||
|
379 | @overload | |||
|
380 | @__dataclass_transform__(field_descriptors=(attrib, field)) | |||
|
381 | def define( | |||
|
382 | maybe_cls: None = ..., | |||
|
383 | *, | |||
|
384 | these: Optional[Dict[str, Any]] = ..., | |||
|
385 | repr: bool = ..., | |||
|
386 | hash: Optional[bool] = ..., | |||
|
387 | init: bool = ..., | |||
|
388 | slots: bool = ..., | |||
|
389 | frozen: bool = ..., | |||
|
390 | weakref_slot: bool = ..., | |||
|
391 | str: bool = ..., | |||
|
392 | auto_attribs: bool = ..., | |||
|
393 | kw_only: bool = ..., | |||
|
394 | cache_hash: bool = ..., | |||
|
395 | auto_exc: bool = ..., | |||
|
396 | eq: Optional[bool] = ..., | |||
|
397 | order: Optional[bool] = ..., | |||
|
398 | auto_detect: bool = ..., | |||
|
399 | getstate_setstate: Optional[bool] = ..., | |||
|
400 | on_setattr: Optional[_OnSetAttrArgType] = ..., | |||
|
401 | field_transformer: Optional[_FieldTransformer] = ..., | |||
|
402 | match_args: bool = ..., | |||
|
403 | ) -> Callable[[_C], _C]: ... | |||
|
404 | ||||
|
405 | mutable = define | |||
|
406 | frozen = define # they differ only in their defaults | |||
|
407 | ||||
|
408 | def fields(cls: Type[AttrsInstance]) -> Any: ... | |||
|
409 | def fields_dict(cls: Type[AttrsInstance]) -> Dict[str, Attribute[Any]]: ... | |||
|
410 | def validate(inst: AttrsInstance) -> None: ... | |||
|
411 | def resolve_types( | |||
|
412 | cls: _C, | |||
|
413 | globalns: Optional[Dict[str, Any]] = ..., | |||
|
414 | localns: Optional[Dict[str, Any]] = ..., | |||
|
415 | attribs: Optional[List[Attribute[Any]]] = ..., | |||
|
416 | ) -> _C: ... | |||
|
417 | ||||
|
418 | # TODO: add support for returning a proper attrs class from the mypy plugin | |||
|
419 | # we use Any instead of _CountingAttr so that e.g. `make_class('Foo', | |||
|
420 | # [attr.ib()])` is valid | |||
|
421 | def make_class( | |||
|
422 | name: str, | |||
|
423 | attrs: Union[List[str], Tuple[str, ...], Dict[str, Any]], | |||
|
424 | bases: Tuple[type, ...] = ..., | |||
|
425 | repr_ns: Optional[str] = ..., | |||
|
426 | repr: bool = ..., | |||
|
427 | cmp: Optional[_EqOrderType] = ..., | |||
|
428 | hash: Optional[bool] = ..., | |||
|
429 | init: bool = ..., | |||
|
430 | slots: bool = ..., | |||
|
431 | frozen: bool = ..., | |||
|
432 | weakref_slot: bool = ..., | |||
|
433 | str: bool = ..., | |||
|
434 | auto_attribs: bool = ..., | |||
|
435 | kw_only: bool = ..., | |||
|
436 | cache_hash: bool = ..., | |||
|
437 | auto_exc: bool = ..., | |||
|
438 | eq: Optional[_EqOrderType] = ..., | |||
|
439 | order: Optional[_EqOrderType] = ..., | |||
|
440 | collect_by_mro: bool = ..., | |||
|
441 | on_setattr: Optional[_OnSetAttrArgType] = ..., | |||
|
442 | field_transformer: Optional[_FieldTransformer] = ..., | |||
|
443 | ) -> type: ... | |||
|
444 | ||||
|
445 | # _funcs -- | |||
|
446 | ||||
|
447 | # TODO: add support for returning TypedDict from the mypy plugin | |||
|
448 | # FIXME: asdict/astuple do not honor their factory args. Waiting on one of | |||
|
449 | # these: | |||
|
450 | # https://github.com/python/mypy/issues/4236 | |||
|
451 | # https://github.com/python/typing/issues/253 | |||
|
452 | # XXX: remember to fix attrs.asdict/astuple too! | |||
|
453 | def asdict( | |||
|
454 | inst: AttrsInstance, | |||
|
455 | recurse: bool = ..., | |||
|
456 | filter: Optional[_FilterType[Any]] = ..., | |||
|
457 | dict_factory: Type[Mapping[Any, Any]] = ..., | |||
|
458 | retain_collection_types: bool = ..., | |||
|
459 | value_serializer: Optional[ | |||
|
460 | Callable[[type, Attribute[Any], Any], Any] | |||
|
461 | ] = ..., | |||
|
462 | tuple_keys: Optional[bool] = ..., | |||
|
463 | ) -> Dict[str, Any]: ... | |||
|
464 | ||||
|
465 | # TODO: add support for returning NamedTuple from the mypy plugin | |||
|
466 | def astuple( | |||
|
467 | inst: AttrsInstance, | |||
|
468 | recurse: bool = ..., | |||
|
469 | filter: Optional[_FilterType[Any]] = ..., | |||
|
470 | tuple_factory: Type[Sequence[Any]] = ..., | |||
|
471 | retain_collection_types: bool = ..., | |||
|
472 | ) -> Tuple[Any, ...]: ... | |||
|
473 | def has(cls: type) -> bool: ... | |||
|
474 | def assoc(inst: _T, **changes: Any) -> _T: ... | |||
|
475 | def evolve(inst: _T, **changes: Any) -> _T: ... | |||
|
476 | ||||
|
477 | # _config -- | |||
|
478 | ||||
|
479 | def set_run_validators(run: bool) -> None: ... | |||
|
480 | def get_run_validators() -> bool: ... | |||
|
481 | ||||
|
482 | # aliases -- | |||
|
483 | ||||
|
484 | s = attributes = attrs | |||
|
485 | ib = attr = attrib | |||
|
486 | dataclass = attrs # Technically, partial(attrs, auto_attribs=True) ;) |
@@ -0,0 +1,155 b'' | |||||
|
1 | # SPDX-License-Identifier: MIT | |||
|
2 | ||||
|
3 | ||||
|
4 | import functools | |||
|
5 | import types | |||
|
6 | ||||
|
7 | from ._make import _make_ne | |||
|
8 | ||||
|
9 | ||||
|
10 | _operation_names = {"eq": "==", "lt": "<", "le": "<=", "gt": ">", "ge": ">="} | |||
|
11 | ||||
|
12 | ||||
|
13 | def cmp_using( | |||
|
14 | eq=None, | |||
|
15 | lt=None, | |||
|
16 | le=None, | |||
|
17 | gt=None, | |||
|
18 | ge=None, | |||
|
19 | require_same_type=True, | |||
|
20 | class_name="Comparable", | |||
|
21 | ): | |||
|
22 | """ | |||
|
23 | Create a class that can be passed into `attr.ib`'s ``eq``, ``order``, and | |||
|
24 | ``cmp`` arguments to customize field comparison. | |||
|
25 | ||||
|
26 | The resulting class will have a full set of ordering methods if | |||
|
27 | at least one of ``{lt, le, gt, ge}`` and ``eq`` are provided. | |||
|
28 | ||||
|
29 | :param Optional[callable] eq: `callable` used to evaluate equality | |||
|
30 | of two objects. | |||
|
31 | :param Optional[callable] lt: `callable` used to evaluate whether | |||
|
32 | one object is less than another object. | |||
|
33 | :param Optional[callable] le: `callable` used to evaluate whether | |||
|
34 | one object is less than or equal to another object. | |||
|
35 | :param Optional[callable] gt: `callable` used to evaluate whether | |||
|
36 | one object is greater than another object. | |||
|
37 | :param Optional[callable] ge: `callable` used to evaluate whether | |||
|
38 | one object is greater than or equal to another object. | |||
|
39 | ||||
|
40 | :param bool require_same_type: When `True`, equality and ordering methods | |||
|
41 | will return `NotImplemented` if objects are not of the same type. | |||
|
42 | ||||
|
43 | :param Optional[str] class_name: Name of class. Defaults to 'Comparable'. | |||
|
44 | ||||
|
45 | See `comparison` for more details. | |||
|
46 | ||||
|
47 | .. versionadded:: 21.1.0 | |||
|
48 | """ | |||
|
49 | ||||
|
50 | body = { | |||
|
51 | "__slots__": ["value"], | |||
|
52 | "__init__": _make_init(), | |||
|
53 | "_requirements": [], | |||
|
54 | "_is_comparable_to": _is_comparable_to, | |||
|
55 | } | |||
|
56 | ||||
|
57 | # Add operations. | |||
|
58 | num_order_functions = 0 | |||
|
59 | has_eq_function = False | |||
|
60 | ||||
|
61 | if eq is not None: | |||
|
62 | has_eq_function = True | |||
|
63 | body["__eq__"] = _make_operator("eq", eq) | |||
|
64 | body["__ne__"] = _make_ne() | |||
|
65 | ||||
|
66 | if lt is not None: | |||
|
67 | num_order_functions += 1 | |||
|
68 | body["__lt__"] = _make_operator("lt", lt) | |||
|
69 | ||||
|
70 | if le is not None: | |||
|
71 | num_order_functions += 1 | |||
|
72 | body["__le__"] = _make_operator("le", le) | |||
|
73 | ||||
|
74 | if gt is not None: | |||
|
75 | num_order_functions += 1 | |||
|
76 | body["__gt__"] = _make_operator("gt", gt) | |||
|
77 | ||||
|
78 | if ge is not None: | |||
|
79 | num_order_functions += 1 | |||
|
80 | body["__ge__"] = _make_operator("ge", ge) | |||
|
81 | ||||
|
82 | type_ = types.new_class( | |||
|
83 | class_name, (object,), {}, lambda ns: ns.update(body) | |||
|
84 | ) | |||
|
85 | ||||
|
86 | # Add same type requirement. | |||
|
87 | if require_same_type: | |||
|
88 | type_._requirements.append(_check_same_type) | |||
|
89 | ||||
|
90 | # Add total ordering if at least one operation was defined. | |||
|
91 | if 0 < num_order_functions < 4: | |||
|
92 | if not has_eq_function: | |||
|
93 | # functools.total_ordering requires __eq__ to be defined, | |||
|
94 | # so raise early error here to keep a nice stack. | |||
|
95 | raise ValueError( | |||
|
96 | "eq must be define is order to complete ordering from " | |||
|
97 | "lt, le, gt, ge." | |||
|
98 | ) | |||
|
99 | type_ = functools.total_ordering(type_) | |||
|
100 | ||||
|
101 | return type_ | |||
|
102 | ||||
|
103 | ||||
|
104 | def _make_init(): | |||
|
105 | """ | |||
|
106 | Create __init__ method. | |||
|
107 | """ | |||
|
108 | ||||
|
109 | def __init__(self, value): | |||
|
110 | """ | |||
|
111 | Initialize object with *value*. | |||
|
112 | """ | |||
|
113 | self.value = value | |||
|
114 | ||||
|
115 | return __init__ | |||
|
116 | ||||
|
117 | ||||
|
118 | def _make_operator(name, func): | |||
|
119 | """ | |||
|
120 | Create operator method. | |||
|
121 | """ | |||
|
122 | ||||
|
123 | def method(self, other): | |||
|
124 | if not self._is_comparable_to(other): | |||
|
125 | return NotImplemented | |||
|
126 | ||||
|
127 | result = func(self.value, other.value) | |||
|
128 | if result is NotImplemented: | |||
|
129 | return NotImplemented | |||
|
130 | ||||
|
131 | return result | |||
|
132 | ||||
|
133 | method.__name__ = "__%s__" % (name,) | |||
|
134 | method.__doc__ = "Return a %s b. Computed by attrs." % ( | |||
|
135 | _operation_names[name], | |||
|
136 | ) | |||
|
137 | ||||
|
138 | return method | |||
|
139 | ||||
|
140 | ||||
|
141 | def _is_comparable_to(self, other): | |||
|
142 | """ | |||
|
143 | Check whether `other` is comparable to `self`. | |||
|
144 | """ | |||
|
145 | for func in self._requirements: | |||
|
146 | if not func(self, other): | |||
|
147 | return False | |||
|
148 | return True | |||
|
149 | ||||
|
150 | ||||
|
151 | def _check_same_type(self, other): | |||
|
152 | """ | |||
|
153 | Return True if *self* and *other* are of the same type, False otherwise. | |||
|
154 | """ | |||
|
155 | return other.value.__class__ is self.value.__class__ |
@@ -0,0 +1,13 b'' | |||||
|
1 | from typing import Any, Callable, Optional, Type | |||
|
2 | ||||
|
3 | _CompareWithType = Callable[[Any, Any], bool] | |||
|
4 | ||||
|
5 | def cmp_using( | |||
|
6 | eq: Optional[_CompareWithType], | |||
|
7 | lt: Optional[_CompareWithType], | |||
|
8 | le: Optional[_CompareWithType], | |||
|
9 | gt: Optional[_CompareWithType], | |||
|
10 | ge: Optional[_CompareWithType], | |||
|
11 | require_same_type: bool, | |||
|
12 | class_name: str, | |||
|
13 | ) -> Type: ... |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
@@ -1,5 +1,8 b'' | |||||
1 | /assign_reviewer @mercurial.review |
|
1 | /assign_reviewer @mercurial.review | |
2 |
|
2 | |||
|
3 | ||||
|
4 | <!-- | |||
|
5 | ||||
3 | Welcome to the Mercurial Merge Request creation process: |
|
6 | Welcome to the Mercurial Merge Request creation process: | |
4 |
|
7 | |||
5 | * Set a simple title for your MR, |
|
8 | * Set a simple title for your MR, | |
@@ -11,3 +14,5 b' More details here:' | |||||
11 |
|
14 | |||
12 | * https://www.mercurial-scm.org/wiki/ContributingChanges |
|
15 | * https://www.mercurial-scm.org/wiki/ContributingChanges | |
13 | * https://www.mercurial-scm.org/wiki/Heptapod |
|
16 | * https://www.mercurial-scm.org/wiki/Heptapod | |
|
17 | ||||
|
18 | --> |
@@ -138,6 +138,7 b' tests:' | |||||
138 | # Run Rust tests if cargo is installed |
|
138 | # Run Rust tests if cargo is installed | |
139 | if command -v $(CARGO) >/dev/null 2>&1; then \ |
|
139 | if command -v $(CARGO) >/dev/null 2>&1; then \ | |
140 | $(MAKE) rust-tests; \ |
|
140 | $(MAKE) rust-tests; \ | |
|
141 | $(MAKE) cargo-clippy; \ | |||
141 | fi |
|
142 | fi | |
142 | cd tests && $(PYTHON) run-tests.py $(TESTFLAGS) |
|
143 | cd tests && $(PYTHON) run-tests.py $(TESTFLAGS) | |
143 |
|
144 | |||
@@ -152,9 +153,13 b' testpy-%:' | |||||
152 | cd tests && $(HGPYTHONS)/$*/bin/python run-tests.py $(TESTFLAGS) |
|
153 | cd tests && $(HGPYTHONS)/$*/bin/python run-tests.py $(TESTFLAGS) | |
153 |
|
154 | |||
154 | rust-tests: |
|
155 | rust-tests: | |
155 |
cd $(HGROOT)/rust |
|
156 | cd $(HGROOT)/rust \ | |
156 | && $(CARGO) test --quiet --all --features "$(HG_RUST_FEATURES)" |
|
157 | && $(CARGO) test --quiet --all --features "$(HG_RUST_FEATURES)" | |
157 |
|
158 | |||
|
159 | cargo-clippy: | |||
|
160 | cd $(HGROOT)/rust \ | |||
|
161 | && $(CARGO) clippy --all --features "$(HG_RUST_FEATURES)" -- -D warnings | |||
|
162 | ||||
158 | check-code: |
|
163 | check-code: | |
159 | hg manifest | xargs python contrib/check-code.py |
|
164 | hg manifest | xargs python contrib/check-code.py | |
160 |
|
165 |
@@ -372,10 +372,6 b' commonpypats = [' | |||||
372 | ), |
|
372 | ), | |
373 | (r'[^^+=*/!<>&| %-](\s=|=\s)[^= ]', "wrong whitespace around ="), |
|
373 | (r'[^^+=*/!<>&| %-](\s=|=\s)[^= ]', "wrong whitespace around ="), | |
374 | ( |
|
374 | ( | |
375 | r'\([^()]*( =[^=]|[^<>!=]= )', |
|
|||
376 | "no whitespace around = for named parameters", |
|
|||
377 | ), |
|
|||
378 | ( |
|
|||
379 | r'raise [^,(]+, (\([^\)]+\)|[^,\(\)]+)$', |
|
375 | r'raise [^,(]+, (\([^\)]+\)|[^,\(\)]+)$', | |
380 | "don't use old-style two-argument raise, use Exception(message)", |
|
376 | "don't use old-style two-argument raise, use Exception(message)", | |
381 | ), |
|
377 | ), |
@@ -12,6 +12,36 b' cd `hg root`' | |||||
12 | # endeavor to empty this list out over time, as some of these are |
|
12 | # endeavor to empty this list out over time, as some of these are | |
13 | # probably hiding real problems. |
|
13 | # probably hiding real problems. | |
14 | # |
|
14 | # | |
|
15 | # hgext/absorb.py # [attribute-error] | |||
|
16 | # hgext/bugzilla.py # [pyi-error], [attribute-error] | |||
|
17 | # hgext/convert/bzr.py # [attribute-error] | |||
|
18 | # hgext/convert/cvs.py # [attribute-error], [wrong-arg-types] | |||
|
19 | # hgext/convert/cvsps.py # [attribute-error] | |||
|
20 | # hgext/convert/p4.py # [wrong-arg-types] (__file: mercurial.utils.procutil._pfile -> IO) | |||
|
21 | # hgext/convert/subversion.py # [attribute-error], [name-error], [pyi-error] | |||
|
22 | # hgext/fastannotate/context.py # no linelog.copyfrom() | |||
|
23 | # hgext/fastannotate/formatter.py # [unsupported-operands] | |||
|
24 | # hgext/fsmonitor/__init__.py # [name-error] | |||
|
25 | # hgext/git/__init__.py # [attribute-error] | |||
|
26 | # hgext/githelp.py # [attribute-error] [wrong-arg-types] | |||
|
27 | # hgext/hgk.py # [attribute-error] | |||
|
28 | # hgext/histedit.py # [attribute-error], [wrong-arg-types] | |||
|
29 | # hgext/infinitepush # using bytes for str literal; scheduled for removal | |||
|
30 | # hgext/keyword.py # [attribute-error] | |||
|
31 | # hgext/largefiles/storefactory.py # [attribute-error] | |||
|
32 | # hgext/lfs/__init__.py # [attribute-error] | |||
|
33 | # hgext/narrow/narrowbundle2.py # [attribute-error] | |||
|
34 | # hgext/narrow/narrowcommands.py # [attribute-error], [name-error] | |||
|
35 | # hgext/rebase.py # [attribute-error] | |||
|
36 | # hgext/remotefilelog/basepack.py # [attribute-error], [wrong-arg-count] | |||
|
37 | # hgext/remotefilelog/basestore.py # [attribute-error] | |||
|
38 | # hgext/remotefilelog/contentstore.py # [missing-parameter], [wrong-keyword-args], [attribute-error] | |||
|
39 | # hgext/remotefilelog/fileserverclient.py # [attribute-error] | |||
|
40 | # hgext/remotefilelog/shallowbundle.py # [attribute-error] | |||
|
41 | # hgext/remotefilelog/remotefilectx.py # [module-attr] (This is an actual bug) | |||
|
42 | # hgext/sqlitestore.py # [attribute-error] | |||
|
43 | # hgext/zeroconf/__init__.py # bytes vs str; tests fail on macOS | |||
|
44 | # | |||
15 | # mercurial/bundlerepo.py # no vfs and ui attrs on bundlerepo |
|
45 | # mercurial/bundlerepo.py # no vfs and ui attrs on bundlerepo | |
16 | # mercurial/context.py # many [attribute-error] |
|
46 | # mercurial/context.py # many [attribute-error] | |
17 | # mercurial/crecord.py # tons of [attribute-error], [module-attr] |
|
47 | # mercurial/crecord.py # tons of [attribute-error], [module-attr] | |
@@ -31,7 +61,6 b' cd `hg root`' | |||||
31 | # mercurial/pure/parsers.py # [attribute-error] |
|
61 | # mercurial/pure/parsers.py # [attribute-error] | |
32 | # mercurial/repoview.py # [attribute-error] |
|
62 | # mercurial/repoview.py # [attribute-error] | |
33 | # mercurial/testing/storage.py # tons of [attribute-error] |
|
63 | # mercurial/testing/storage.py # tons of [attribute-error] | |
34 | # mercurial/ui.py # [attribute-error], [wrong-arg-types] |
|
|||
35 | # mercurial/unionrepo.py # ui, svfs, unfiltered [attribute-error] |
|
64 | # mercurial/unionrepo.py # ui, svfs, unfiltered [attribute-error] | |
36 | # mercurial/win32.py # [not-callable] |
|
65 | # mercurial/win32.py # [not-callable] | |
37 | # mercurial/wireprotoframing.py # [unsupported-operands], [attribute-error], [import-error] |
|
66 | # mercurial/wireprotoframing.py # [unsupported-operands], [attribute-error], [import-error] | |
@@ -43,7 +72,37 b' cd `hg root`' | |||||
43 |
|
72 | |||
44 | # TODO: include hgext and hgext3rd |
|
73 | # TODO: include hgext and hgext3rd | |
45 |
|
74 | |||
46 |
pytype -V 3.7 --keep-going --jobs auto |
|
75 | pytype -V 3.7 --keep-going --jobs auto \ | |
|
76 | doc/check-seclevel.py hgdemandimport hgext mercurial \ | |||
|
77 | -x hgext/absorb.py \ | |||
|
78 | -x hgext/bugzilla.py \ | |||
|
79 | -x hgext/convert/bzr.py \ | |||
|
80 | -x hgext/convert/cvs.py \ | |||
|
81 | -x hgext/convert/cvsps.py \ | |||
|
82 | -x hgext/convert/p4.py \ | |||
|
83 | -x hgext/convert/subversion.py \ | |||
|
84 | -x hgext/fastannotate/context.py \ | |||
|
85 | -x hgext/fastannotate/formatter.py \ | |||
|
86 | -x hgext/fsmonitor/__init__.py \ | |||
|
87 | -x hgext/git/__init__.py \ | |||
|
88 | -x hgext/githelp.py \ | |||
|
89 | -x hgext/hgk.py \ | |||
|
90 | -x hgext/histedit.py \ | |||
|
91 | -x hgext/infinitepush \ | |||
|
92 | -x hgext/keyword.py \ | |||
|
93 | -x hgext/largefiles/storefactory.py \ | |||
|
94 | -x hgext/lfs/__init__.py \ | |||
|
95 | -x hgext/narrow/narrowbundle2.py \ | |||
|
96 | -x hgext/narrow/narrowcommands.py \ | |||
|
97 | -x hgext/rebase.py \ | |||
|
98 | -x hgext/remotefilelog/basepack.py \ | |||
|
99 | -x hgext/remotefilelog/basestore.py \ | |||
|
100 | -x hgext/remotefilelog/contentstore.py \ | |||
|
101 | -x hgext/remotefilelog/fileserverclient.py \ | |||
|
102 | -x hgext/remotefilelog/remotefilectx.py \ | |||
|
103 | -x hgext/remotefilelog/shallowbundle.py \ | |||
|
104 | -x hgext/sqlitestore.py \ | |||
|
105 | -x hgext/zeroconf/__init__.py \ | |||
47 | -x mercurial/bundlerepo.py \ |
|
106 | -x mercurial/bundlerepo.py \ | |
48 | -x mercurial/context.py \ |
|
107 | -x mercurial/context.py \ | |
49 | -x mercurial/crecord.py \ |
|
108 | -x mercurial/crecord.py \ | |
@@ -64,9 +123,11 b' pytype -V 3.7 --keep-going --jobs auto m' | |||||
64 | -x mercurial/repoview.py \ |
|
123 | -x mercurial/repoview.py \ | |
65 | -x mercurial/testing/storage.py \ |
|
124 | -x mercurial/testing/storage.py \ | |
66 | -x mercurial/thirdparty \ |
|
125 | -x mercurial/thirdparty \ | |
67 | -x mercurial/ui.py \ |
|
|||
68 | -x mercurial/unionrepo.py \ |
|
126 | -x mercurial/unionrepo.py \ | |
69 | -x mercurial/win32.py \ |
|
127 | -x mercurial/win32.py \ | |
70 | -x mercurial/wireprotoframing.py \ |
|
128 | -x mercurial/wireprotoframing.py \ | |
71 | -x mercurial/wireprotov1peer.py \ |
|
129 | -x mercurial/wireprotov1peer.py \ | |
72 | -x mercurial/wireprotov1server.py |
|
130 | -x mercurial/wireprotov1server.py | |
|
131 | ||||
|
132 | echo 'pytype crashed while generating the following type stubs:' | |||
|
133 | find .pytype/pyi -name '*.pyi' | xargs grep -l '# Caught error' | sort |
@@ -20,7 +20,7 b' for inline in (True, False):' | |||||
20 | index, cache = parsers.parse_index2(data, inline) |
|
20 | index, cache = parsers.parse_index2(data, inline) | |
21 | index.slicechunktodensity(list(range(len(index))), 0.5, 262144) |
|
21 | index.slicechunktodensity(list(range(len(index))), 0.5, 262144) | |
22 | index.stats() |
|
22 | index.stats() | |
23 | index.findsnapshots({}, 0) |
|
23 | index.findsnapshots({}, 0, len(index) - 1) | |
24 | 10 in index |
|
24 | 10 in index | |
25 | for rev in range(len(index)): |
|
25 | for rev in range(len(index)): | |
26 | index.reachableroots(0, [len(index)-1], [rev]) |
|
26 | index.reachableroots(0, [len(index)-1], [rev]) |
@@ -42,6 +42,7 b' rust-cargo-test:' | |||||
42 | script: |
|
42 | script: | |
43 | - echo "python used, $PYTHON" |
|
43 | - echo "python used, $PYTHON" | |
44 | - make rust-tests |
|
44 | - make rust-tests | |
|
45 | - make cargo-clippy | |||
45 | variables: |
|
46 | variables: | |
46 | PYTHON: python3 |
|
47 | PYTHON: python3 | |
47 | CI_CLEVER_CLOUD_FLAVOR: S |
|
48 | CI_CLEVER_CLOUD_FLAVOR: S | |
@@ -91,7 +92,8 b' check-pytype:' | |||||
91 | - hg -R /tmp/mercurial-ci/ update `hg log --rev '.' --template '{node}'` |
|
92 | - hg -R /tmp/mercurial-ci/ update `hg log --rev '.' --template '{node}'` | |
92 | - cd /tmp/mercurial-ci/ |
|
93 | - cd /tmp/mercurial-ci/ | |
93 | - make local PYTHON=$PYTHON |
|
94 | - make local PYTHON=$PYTHON | |
94 |
- $PYTHON -m pip install --user -U libcst==0.3.20 pytype==2022. |
|
95 | - $PYTHON -m pip install --user -U libcst==0.3.20 pytype==2022.11.18 | |
|
96 | - ./contrib/setup-pytype.sh | |||
95 | script: |
|
97 | script: | |
96 | - echo "Entering script section" |
|
98 | - echo "Entering script section" | |
97 | - sh contrib/check-pytype.sh |
|
99 | - sh contrib/check-pytype.sh |
@@ -235,6 +235,7 b' revlogopts = getattr(' | |||||
235 |
|
235 | |||
236 | cmdtable = {} |
|
236 | cmdtable = {} | |
237 |
|
237 | |||
|
238 | ||||
238 | # for "historical portability": |
|
239 | # for "historical portability": | |
239 | # define parsealiases locally, because cmdutil.parsealiases has been |
|
240 | # define parsealiases locally, because cmdutil.parsealiases has been | |
240 | # available since 1.5 (or 6252852b4332) |
|
241 | # available since 1.5 (or 6252852b4332) | |
@@ -573,7 +574,6 b' def _timer(' | |||||
573 |
|
574 | |||
574 |
|
575 | |||
575 | def formatone(fm, timings, title=None, result=None, displayall=False): |
|
576 | def formatone(fm, timings, title=None, result=None, displayall=False): | |
576 |
|
||||
577 | count = len(timings) |
|
577 | count = len(timings) | |
578 |
|
578 | |||
579 | fm.startitem() |
|
579 | fm.startitem() | |
@@ -815,7 +815,12 b' def perfstatus(ui, repo, **opts):' | |||||
815 | ) |
|
815 | ) | |
816 | sum(map(bool, s)) |
|
816 | sum(map(bool, s)) | |
817 |
|
817 | |||
818 | timer(status_dirstate) |
|
818 | if util.safehasattr(dirstate, 'running_status'): | |
|
819 | with dirstate.running_status(repo): | |||
|
820 | timer(status_dirstate) | |||
|
821 | dirstate.invalidate() | |||
|
822 | else: | |||
|
823 | timer(status_dirstate) | |||
819 | else: |
|
824 | else: | |
820 | timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown'])))) |
|
825 | timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown'])))) | |
821 | fm.end() |
|
826 | fm.end() | |
@@ -997,11 +1002,16 b' def perfdiscovery(ui, repo, path, **opts' | |||||
997 | timer, fm = gettimer(ui, opts) |
|
1002 | timer, fm = gettimer(ui, opts) | |
998 |
|
1003 | |||
999 | try: |
|
1004 | try: | |
1000 | from mercurial.utils.urlutil import get_unique_pull_path |
|
1005 | from mercurial.utils.urlutil import get_unique_pull_path_obj | |
1001 |
|
1006 | |||
1002 |
path = get_unique_pull_path(b'perfdiscovery', |
|
1007 | path = get_unique_pull_path_obj(b'perfdiscovery', ui, path) | |
1003 | except ImportError: |
|
1008 | except ImportError: | |
1004 | path = ui.expandpath(path) |
|
1009 | try: | |
|
1010 | from mercurial.utils.urlutil import get_unique_pull_path | |||
|
1011 | ||||
|
1012 | path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0] | |||
|
1013 | except ImportError: | |||
|
1014 | path = ui.expandpath(path) | |||
1005 |
|
1015 | |||
1006 | def s(): |
|
1016 | def s(): | |
1007 | repos[1] = hg.peer(ui, opts, path) |
|
1017 | repos[1] = hg.peer(ui, opts, path) | |
@@ -1469,7 +1479,8 b' def perfdirstatewrite(ui, repo, **opts):' | |||||
1469 | def d(): |
|
1479 | def d(): | |
1470 | ds.write(repo.currenttransaction()) |
|
1480 | ds.write(repo.currenttransaction()) | |
1471 |
|
1481 | |||
1472 | timer(d, setup=setup) |
|
1482 | with repo.wlock(): | |
|
1483 | timer(d, setup=setup) | |||
1473 | fm.end() |
|
1484 | fm.end() | |
1474 |
|
1485 | |||
1475 |
|
1486 | |||
@@ -1613,7 +1624,11 b' def perfphasesremote(ui, repo, dest=None' | |||||
1613 | b'default repository not configured!', |
|
1624 | b'default repository not configured!', | |
1614 | hint=b"see 'hg help config.paths'", |
|
1625 | hint=b"see 'hg help config.paths'", | |
1615 | ) |
|
1626 | ) | |
1616 | dest = path.pushloc or path.loc |
|
1627 | if util.safehasattr(path, 'main_path'): | |
|
1628 | path = path.get_push_variant() | |||
|
1629 | dest = path.loc | |||
|
1630 | else: | |||
|
1631 | dest = path.pushloc or path.loc | |||
1617 | ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest)) |
|
1632 | ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest)) | |
1618 | other = hg.peer(repo, opts, dest) |
|
1633 | other = hg.peer(repo, opts, dest) | |
1619 |
|
1634 |
@@ -7,14 +7,12 b'' | |||||
7 |
|
7 | |||
8 |
|
8 | |||
9 | import abc |
|
9 | import abc | |
|
10 | import builtins | |||
10 | import re |
|
11 | import re | |
11 | import sys |
|
|||
12 |
|
12 | |||
13 | #################### |
|
13 | #################### | |
14 | # for Python3 compatibility (almost comes from mercurial/pycompat.py) |
|
14 | # for Python3 compatibility (almost comes from mercurial/pycompat.py) | |
15 |
|
15 | |||
16 | ispy3 = sys.version_info[0] >= 3 |
|
|||
17 |
|
||||
18 |
|
16 | |||
19 | def identity(a): |
|
17 | def identity(a): | |
20 | return a |
|
18 | return a | |
@@ -38,27 +36,19 b' def rapply(f, xs):' | |||||
38 | return _rapply(f, xs) |
|
36 | return _rapply(f, xs) | |
39 |
|
37 | |||
40 |
|
38 | |||
41 | if ispy3: |
|
39 | def bytestr(s): | |
42 | import builtins |
|
40 | # tiny version of pycompat.bytestr | |
43 |
|
41 | return s.encode('latin1') | ||
44 | def bytestr(s): |
|
|||
45 | # tiny version of pycompat.bytestr |
|
|||
46 | return s.encode('latin1') |
|
|||
47 |
|
||||
48 | def sysstr(s): |
|
|||
49 | if isinstance(s, builtins.str): |
|
|||
50 | return s |
|
|||
51 | return s.decode('latin-1') |
|
|||
52 |
|
||||
53 | def opentext(f): |
|
|||
54 | return open(f, 'r') |
|
|||
55 |
|
42 | |||
56 |
|
43 | |||
57 | else: |
|
44 | def sysstr(s): | |
58 | bytestr = str |
|
45 | if isinstance(s, builtins.str): | |
59 | sysstr = identity |
|
46 | return s | |
|
47 | return s.decode('latin-1') | |||
60 |
|
48 | |||
61 | opentext = open |
|
49 | ||
|
50 | def opentext(f): | |||
|
51 | return open(f, 'r') | |||
62 |
|
52 | |||
63 |
|
53 | |||
64 | def b2s(x): |
|
54 | def b2s(x): |
@@ -46,7 +46,7 b' def showavailables(ui, initlevel):' | |||||
46 |
|
46 | |||
47 |
|
47 | |||
48 | def checkseclevel(ui, doc, name, initlevel): |
|
48 | def checkseclevel(ui, doc, name, initlevel): | |
49 | ui.notenoi18n('checking "%s"\n' % name) |
|
49 | ui.notenoi18n(('checking "%s"\n' % name).encode('utf-8')) | |
50 | if not isinstance(doc, bytes): |
|
50 | if not isinstance(doc, bytes): | |
51 | doc = doc.encode('utf-8') |
|
51 | doc = doc.encode('utf-8') | |
52 | blocks, pruned = minirst.parse(doc, 0, ['verbose']) |
|
52 | blocks, pruned = minirst.parse(doc, 0, ['verbose']) | |
@@ -70,14 +70,18 b' def checkseclevel(ui, doc, name, initlev' | |||||
70 | nextlevel = mark2level[mark] |
|
70 | nextlevel = mark2level[mark] | |
71 | if curlevel < nextlevel and curlevel + 1 != nextlevel: |
|
71 | if curlevel < nextlevel and curlevel + 1 != nextlevel: | |
72 | ui.warnnoi18n( |
|
72 | ui.warnnoi18n( | |
73 | 'gap of section level at "%s" of %s\n' % (title, name) |
|
73 | ('gap of section level at "%s" of %s\n' % (title, name)).encode( | |
|
74 | 'utf-8' | |||
|
75 | ) | |||
74 | ) |
|
76 | ) | |
75 | showavailables(ui, initlevel) |
|
77 | showavailables(ui, initlevel) | |
76 | errorcnt += 1 |
|
78 | errorcnt += 1 | |
77 | continue |
|
79 | continue | |
78 | ui.notenoi18n( |
|
80 | ui.notenoi18n( | |
79 | 'appropriate section level for "%s %s"\n' |
|
81 | ( | |
80 | % (mark * (nextlevel * 2), title) |
|
82 | 'appropriate section level for "%s %s"\n' | |
|
83 | % (mark * (nextlevel * 2), title) | |||
|
84 | ).encode('utf-8') | |||
81 | ) |
|
85 | ) | |
82 | curlevel = nextlevel |
|
86 | curlevel = nextlevel | |
83 |
|
87 | |||
@@ -90,7 +94,9 b' def checkcmdtable(ui, cmdtable, namefmt,' | |||||
90 | name = k.split(b"|")[0].lstrip(b"^") |
|
94 | name = k.split(b"|")[0].lstrip(b"^") | |
91 | if not entry[0].__doc__: |
|
95 | if not entry[0].__doc__: | |
92 | ui.notenoi18n( |
|
96 | ui.notenoi18n( | |
93 | 'skip checking %s: no help document\n' % (namefmt % name) |
|
97 | ( | |
|
98 | 'skip checking %s: no help document\n' % (namefmt % name) | |||
|
99 | ).encode('utf-8') | |||
94 | ) |
|
100 | ) | |
95 | continue |
|
101 | continue | |
96 | errorcnt += checkseclevel( |
|
102 | errorcnt += checkseclevel( | |
@@ -117,7 +123,9 b' def checkhghelps(ui):' | |||||
117 | mod = extensions.load(ui, name, None) |
|
123 | mod = extensions.load(ui, name, None) | |
118 | if not mod.__doc__: |
|
124 | if not mod.__doc__: | |
119 | ui.notenoi18n( |
|
125 | ui.notenoi18n( | |
120 | 'skip checking %s extension: no help document\n' % name |
|
126 | ( | |
|
127 | 'skip checking %s extension: no help document\n' % name | |||
|
128 | ).encode('utf-8') | |||
121 | ) |
|
129 | ) | |
122 | continue |
|
130 | continue | |
123 | errorcnt += checkseclevel( |
|
131 | errorcnt += checkseclevel( | |
@@ -144,7 +152,9 b' def checkfile(ui, filename, initlevel):' | |||||
144 | doc = fp.read() |
|
152 | doc = fp.read() | |
145 |
|
153 | |||
146 | ui.notenoi18n( |
|
154 | ui.notenoi18n( | |
147 | 'checking input from %s with initlevel %d\n' % (filename, initlevel) |
|
155 | ( | |
|
156 | 'checking input from %s with initlevel %d\n' % (filename, initlevel) | |||
|
157 | ).encode('utf-8') | |||
148 | ) |
|
158 | ) | |
149 | return checkseclevel(ui, doc, 'input from %s' % filename, initlevel) |
|
159 | return checkseclevel(ui, doc, 'input from %s' % filename, initlevel) | |
150 |
|
160 |
@@ -23,8 +23,6 b' This also has some limitations compared ' | |||||
23 | enabled. |
|
23 | enabled. | |
24 | """ |
|
24 | """ | |
25 |
|
25 | |||
26 | # This line is unnecessary, but it satisfies test-check-py3-compat.t. |
|
|||
27 |
|
||||
28 | import contextlib |
|
26 | import contextlib | |
29 | import importlib.util |
|
27 | import importlib.util | |
30 | import sys |
|
28 | import sys | |
@@ -39,10 +37,16 b' class _lazyloaderex(importlib.util.LazyL' | |||||
39 | the ignore list. |
|
37 | the ignore list. | |
40 | """ |
|
38 | """ | |
41 |
|
39 | |||
|
40 | _HAS_DYNAMIC_ATTRIBUTES = True # help pytype not flag self.loader | |||
|
41 | ||||
42 | def exec_module(self, module): |
|
42 | def exec_module(self, module): | |
43 | """Make the module load lazily.""" |
|
43 | """Make the module load lazily.""" | |
44 | with tracing.log('demandimport %s', module): |
|
44 | with tracing.log('demandimport %s', module): | |
45 | if _deactivated or module.__name__ in ignores: |
|
45 | if _deactivated or module.__name__ in ignores: | |
|
46 | # Reset the loader on the module as super() does (issue6725) | |||
|
47 | module.__spec__.loader = self.loader | |||
|
48 | module.__loader__ = self.loader | |||
|
49 | ||||
46 | self.loader.exec_module(module) |
|
50 | self.loader.exec_module(module) | |
47 | else: |
|
51 | else: | |
48 | super().exec_module(module) |
|
52 | super().exec_module(module) |
@@ -881,7 +881,7 b' class fixupstate:' | |||||
881 |
|
881 | |||
882 | dirstate._fsmonitorstate.invalidate = noop |
|
882 | dirstate._fsmonitorstate.invalidate = noop | |
883 | try: |
|
883 | try: | |
884 |
with dirstate. |
|
884 | with dirstate.changing_parents(self.repo): | |
885 | dirstate.rebuild(ctx.node(), ctx.manifest(), self.paths) |
|
885 | dirstate.rebuild(ctx.node(), ctx.manifest(), self.paths) | |
886 | finally: |
|
886 | finally: | |
887 | restore() |
|
887 | restore() |
@@ -46,6 +46,7 b' command = registrar.command(cmdtable)' | |||||
46 | _(b'mark a branch as closed, hiding it from the branch list'), |
|
46 | _(b'mark a branch as closed, hiding it from the branch list'), | |
47 | ), |
|
47 | ), | |
48 | (b's', b'secret', None, _(b'use the secret phase for committing')), |
|
48 | (b's', b'secret', None, _(b'use the secret phase for committing')), | |
|
49 | (b'', b'draft', None, _(b'use the draft phase for committing')), | |||
49 | (b'n', b'note', b'', _(b'store a note on the amend')), |
|
50 | (b'n', b'note', b'', _(b'store a note on the amend')), | |
50 | ] |
|
51 | ] | |
51 | + cmdutil.walkopts |
|
52 | + cmdutil.walkopts | |
@@ -64,6 +65,7 b' def amend(ui, repo, *pats, **opts):' | |||||
64 |
|
65 | |||
65 | See :hg:`help commit` for more details. |
|
66 | See :hg:`help commit` for more details. | |
66 | """ |
|
67 | """ | |
|
68 | cmdutil.check_at_most_one_arg(opts, 'draft', 'secret') | |||
67 | cmdutil.check_note_size(opts) |
|
69 | cmdutil.check_note_size(opts) | |
68 |
|
70 | |||
69 | with repo.wlock(), repo.lock(): |
|
71 | with repo.wlock(), repo.lock(): |
@@ -59,21 +59,29 b' def mvcheck(orig, ui, repo, *pats, **opt' | |||||
59 | opts = pycompat.byteskwargs(opts) |
|
59 | opts = pycompat.byteskwargs(opts) | |
60 | renames = None |
|
60 | renames = None | |
61 | disabled = opts.pop(b'no_automv', False) |
|
61 | disabled = opts.pop(b'no_automv', False) | |
62 | if not disabled: |
|
62 | with repo.wlock(): | |
63 | threshold = ui.configint(b'automv', b'similarity') |
|
63 | if not disabled: | |
64 | if not 0 <= threshold <= 100: |
|
64 | threshold = ui.configint(b'automv', b'similarity') | |
65 | raise error.Abort(_(b'automv.similarity must be between 0 and 100')) |
|
65 | if not 0 <= threshold <= 100: | |
66 | if threshold > 0: |
|
66 | raise error.Abort( | |
67 | match = scmutil.match(repo[None], pats, opts) |
|
67 | _(b'automv.similarity must be between 0 and 100') | |
68 | added, removed = _interestingfiles(repo, match) |
|
68 | ) | |
69 | uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True) |
|
69 | if threshold > 0: | |
70 | renames = _findrenames( |
|
70 | match = scmutil.match(repo[None], pats, opts) | |
71 | repo, uipathfn, added, removed, threshold / 100.0 |
|
71 | added, removed = _interestingfiles(repo, match) | |
72 | ) |
|
72 | uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True) | |
|
73 | renames = _findrenames( | |||
|
74 | repo, uipathfn, added, removed, threshold / 100.0 | |||
|
75 | ) | |||
73 |
|
76 | |||
74 | with repo.wlock(): |
|
|||
75 | if renames is not None: |
|
77 | if renames is not None: | |
76 | scmutil._markchanges(repo, (), (), renames) |
|
78 | with repo.dirstate.changing_files(repo): | |
|
79 | # XXX this should be wider and integrated with the commit | |||
|
80 | # transaction. At the same time as we do the `addremove` logic | |||
|
81 | # for commit. However we can't really do better with the | |||
|
82 | # current extension structure, and this is not worse than what | |||
|
83 | # happened before. | |||
|
84 | scmutil._markchanges(repo, (), (), renames) | |||
77 | return orig(ui, repo, *pats, **pycompat.strkwargs(opts)) |
|
85 | return orig(ui, repo, *pats, **pycompat.strkwargs(opts)) | |
78 |
|
86 | |||
79 |
|
87 |
@@ -217,6 +217,8 b' def blackbox(ui, repo, *revs, **opts):' | |||||
217 | return |
|
217 | return | |
218 |
|
218 | |||
219 | limit = opts.get('limit') |
|
219 | limit = opts.get('limit') | |
|
220 | assert limit is not None # help pytype | |||
|
221 | ||||
220 | fp = repo.vfs(b'blackbox.log', b'r') |
|
222 | fp = repo.vfs(b'blackbox.log', b'r') | |
221 | lines = fp.read().split(b'\n') |
|
223 | lines = fp.read().split(b'\n') | |
222 |
|
224 |
@@ -31,11 +31,14 b' demandimport.IGNORES.update(' | |||||
31 |
|
31 | |||
32 | try: |
|
32 | try: | |
33 | # bazaar imports |
|
33 | # bazaar imports | |
|
34 | # pytype: disable=import-error | |||
34 | import breezy.bzr.bzrdir |
|
35 | import breezy.bzr.bzrdir | |
35 | import breezy.errors |
|
36 | import breezy.errors | |
36 | import breezy.revision |
|
37 | import breezy.revision | |
37 | import breezy.revisionspec |
|
38 | import breezy.revisionspec | |
38 |
|
39 | |||
|
40 | # pytype: enable=import-error | |||
|
41 | ||||
39 | bzrdir = breezy.bzr.bzrdir |
|
42 | bzrdir = breezy.bzr.bzrdir | |
40 | errors = breezy.errors |
|
43 | errors = breezy.errors | |
41 | revision = breezy.revision |
|
44 | revision = breezy.revision |
@@ -608,7 +608,10 b' class mercurial_source(common.converter_' | |||||
608 | files = copyfiles = ctx.manifest() |
|
608 | files = copyfiles = ctx.manifest() | |
609 | if parents: |
|
609 | if parents: | |
610 | if self._changescache[0] == rev: |
|
610 | if self._changescache[0] == rev: | |
611 | ma, r = self._changescache[1] |
|
611 | # TODO: add type hints to avoid this warning, instead of | |
|
612 | # suppressing it: | |||
|
613 | # No attribute '__iter__' on None [attribute-error] | |||
|
614 | ma, r = self._changescache[1] # pytype: disable=attribute-error | |||
612 | else: |
|
615 | else: | |
613 | ma, r = self._changedfiles(parents[0], ctx) |
|
616 | ma, r = self._changedfiles(parents[0], ctx) | |
614 | if not full: |
|
617 | if not full: |
@@ -243,6 +243,7 b' class monotone_source(common.converter_s' | |||||
243 | m = self.cert_re.match(e) |
|
243 | m = self.cert_re.match(e) | |
244 | if m: |
|
244 | if m: | |
245 | name, value = m.groups() |
|
245 | name, value = m.groups() | |
|
246 | assert value is not None # help pytype | |||
246 | value = value.replace(br'\"', b'"') |
|
247 | value = value.replace(br'\"', b'"') | |
247 | value = value.replace(br'\\', b'\\') |
|
248 | value = value.replace(br'\\', b'\\') | |
248 | certs[name] = value |
|
249 | certs[name] = value |
@@ -47,11 +47,14 b' NoRepo = common.NoRepo' | |||||
47 | # these bindings. |
|
47 | # these bindings. | |
48 |
|
48 | |||
49 | try: |
|
49 | try: | |
|
50 | # pytype: disable=import-error | |||
50 | import svn |
|
51 | import svn | |
51 | import svn.client |
|
52 | import svn.client | |
52 | import svn.core |
|
53 | import svn.core | |
53 | import svn.ra |
|
54 | import svn.ra | |
54 | import svn.delta |
|
55 | import svn.delta | |
|
56 | ||||
|
57 | # pytype: enable=import-error | |||
55 | from . import transport |
|
58 | from . import transport | |
56 | import warnings |
|
59 | import warnings | |
57 |
|
60 | |||
@@ -722,7 +725,13 b' class svn_source(converter_source):' | |||||
722 | def getchanges(self, rev, full): |
|
725 | def getchanges(self, rev, full): | |
723 | # reuse cache from getchangedfiles |
|
726 | # reuse cache from getchangedfiles | |
724 | if self._changescache[0] == rev and not full: |
|
727 | if self._changescache[0] == rev and not full: | |
|
728 | # TODO: add type hints to avoid this warning, instead of | |||
|
729 | # suppressing it: | |||
|
730 | # No attribute '__iter__' on None [attribute-error] | |||
|
731 | ||||
|
732 | # pytype: disable=attribute-error | |||
725 | (files, copies) = self._changescache[1] |
|
733 | (files, copies) = self._changescache[1] | |
|
734 | # pytype: enable=attribute-error | |||
726 | else: |
|
735 | else: | |
727 | (files, copies) = self._getchanges(rev, full) |
|
736 | (files, copies) = self._getchanges(rev, full) | |
728 | # caller caches the result, so free it here to release memory |
|
737 | # caller caches the result, so free it here to release memory |
@@ -17,10 +17,13 b'' | |||||
17 | # You should have received a copy of the GNU General Public License |
|
17 | # You should have received a copy of the GNU General Public License | |
18 | # along with this program; if not, see <http://www.gnu.org/licenses/>. |
|
18 | # along with this program; if not, see <http://www.gnu.org/licenses/>. | |
19 |
|
19 | |||
|
20 | # pytype: disable=import-error | |||
20 | import svn.client |
|
21 | import svn.client | |
21 | import svn.core |
|
22 | import svn.core | |
22 | import svn.ra |
|
23 | import svn.ra | |
23 |
|
24 | |||
|
25 | # pytype: enable=import-error | |||
|
26 | ||||
24 | Pool = svn.core.Pool |
|
27 | Pool = svn.core.Pool | |
25 | SubversionException = svn.core.SubversionException |
|
28 | SubversionException = svn.core.SubversionException | |
26 |
|
29 | |||
@@ -37,7 +40,7 b' svn_config = None' | |||||
37 |
|
40 | |||
38 | def _create_auth_baton(pool): |
|
41 | def _create_auth_baton(pool): | |
39 | """Create a Subversion authentication baton.""" |
|
42 | """Create a Subversion authentication baton.""" | |
40 | import svn.client |
|
43 | import svn.client # pytype: disable=import-error | |
41 |
|
44 | |||
42 | # Give the client context baton a suite of authentication |
|
45 | # Give the client context baton a suite of authentication | |
43 | # providers.h |
|
46 | # providers.h |
@@ -421,30 +421,31 b' def reposetup(ui, repo):' | |||||
421 | wlock = None |
|
421 | wlock = None | |
422 | try: |
|
422 | try: | |
423 | wlock = self.wlock() |
|
423 | wlock = self.wlock() | |
424 |
|
|
424 | with self.dirstate.changing_files(self): | |
425 |
|
|
425 | for f in self.dirstate: | |
426 | continue |
|
426 | if not self.dirstate.get_entry(f).maybe_clean: | |
427 | if oldeol is not None: |
|
|||
428 | if not oldeol.match(f) and not neweol.match(f): |
|
|||
429 | continue |
|
427 | continue | |
430 |
|
|
428 | if oldeol is not None: | |
431 |
f |
|
429 | if not oldeol.match(f) and not neweol.match(f): | |
432 |
|
|
430 | continue | |
433 |
|
|
431 | oldkey = None | |
434 |
|
|
432 | for pattern, key, m in oldeol.patterns: | |
435 |
|
|
433 | if m(f): | |
436 | for pattern, key, m in neweol.patterns: |
|
434 | oldkey = key | |
437 |
|
|
435 | break | |
438 |
|
|
436 | newkey = None | |
439 |
|
|
437 | for pattern, key, m in neweol.patterns: | |
440 |
if |
|
438 | if m(f): | |
441 |
|
|
439 | newkey = key | |
442 | # all normal files need to be looked at again since |
|
440 | break | |
443 | # the new .hgeol file specify a different filter |
|
441 | if oldkey == newkey: | |
444 | self.dirstate.set_possibly_dirty(f) |
|
442 | continue | |
445 | # Write the cache to update mtime and cache .hgeol |
|
443 | # all normal files need to be looked at again since | |
446 | with self.vfs(b"eol.cache", b"w") as f: |
|
444 | # the new .hgeol file specify a different filter | |
447 | f.write(hgeoldata) |
|
445 | self.dirstate.set_possibly_dirty(f) | |
|
446 | # Write the cache to update mtime and cache .hgeol | |||
|
447 | with self.vfs(b"eol.cache", b"w") as f: | |||
|
448 | f.write(hgeoldata) | |||
448 | except errormod.LockUnavailable: |
|
449 | except errormod.LockUnavailable: | |
449 | # If we cannot lock the repository and clear the |
|
450 | # If we cannot lock the repository and clear the | |
450 | # dirstate, then a commit might not see all files |
|
451 | # dirstate, then a commit might not see all files |
@@ -151,8 +151,11 b' def annotatepeer(repo):' | |||||
151 | ui = repo.ui |
|
151 | ui = repo.ui | |
152 |
|
152 | |||
153 | remotedest = ui.config(b'fastannotate', b'remotepath', b'default') |
|
153 | remotedest = ui.config(b'fastannotate', b'remotepath', b'default') | |
154 | r = urlutil.get_unique_pull_path(b'fastannotate', repo, ui, remotedest) |
|
154 | remotepath = urlutil.get_unique_pull_path_obj( | |
155 | remotepath = r[0] |
|
155 | b'fastannotate', | |
|
156 | ui, | |||
|
157 | remotedest, | |||
|
158 | ) | |||
156 | peer = hg.peer(ui, {}, remotepath) |
|
159 | peer = hg.peer(ui, {}, remotepath) | |
157 |
|
160 | |||
158 | try: |
|
161 | try: |
@@ -108,9 +108,9 b" def fetch(ui, repo, source=b'default', *" | |||||
108 | ) |
|
108 | ) | |
109 | ) |
|
109 | ) | |
110 |
|
110 | |||
111 |
path = urlutil.get_unique_pull_path(b'fetch', |
|
111 | path = urlutil.get_unique_pull_path_obj(b'fetch', ui, source) | |
112 | other = hg.peer(repo, opts, path) |
|
112 | other = hg.peer(repo, opts, path) | |
113 | ui.status(_(b'pulling from %s\n') % urlutil.hidepassword(path)) |
|
113 | ui.status(_(b'pulling from %s\n') % urlutil.hidepassword(path.loc)) | |
114 | revs = None |
|
114 | revs = None | |
115 | if opts[b'rev']: |
|
115 | if opts[b'rev']: | |
116 | try: |
|
116 | try: |
@@ -779,7 +779,7 b' def writeworkingdir(repo, ctx, filedata,' | |||||
779 | newp1 = replacements.get(oldp1, oldp1) |
|
779 | newp1 = replacements.get(oldp1, oldp1) | |
780 | if newp1 != oldp1: |
|
780 | if newp1 != oldp1: | |
781 | assert repo.dirstate.p2() == nullid |
|
781 | assert repo.dirstate.p2() == nullid | |
782 |
with repo.dirstate. |
|
782 | with repo.dirstate.changing_parents(repo): | |
783 | scmutil.movedirstate(repo, repo[newp1]) |
|
783 | scmutil.movedirstate(repo, repo[newp1]) | |
784 |
|
784 | |||
785 |
|
785 |
@@ -26,8 +26,6 b'' | |||||
26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
|
26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
|
27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
28 |
|
28 | |||
29 | # no unicode literals |
|
|||
30 |
|
||||
31 | import inspect |
|
29 | import inspect | |
32 | import math |
|
30 | import math | |
33 | import os |
|
31 | import os | |
@@ -94,7 +92,9 b' if os.name == "nt":' | |||||
94 |
|
92 | |||
95 | LPDWORD = ctypes.POINTER(wintypes.DWORD) |
|
93 | LPDWORD = ctypes.POINTER(wintypes.DWORD) | |
96 |
|
94 | |||
97 | CreateFile = ctypes.windll.kernel32.CreateFileA |
|
95 | _kernel32 = ctypes.windll.kernel32 # pytype: disable=module-attr | |
|
96 | ||||
|
97 | CreateFile = _kernel32.CreateFileA | |||
98 | CreateFile.argtypes = [ |
|
98 | CreateFile.argtypes = [ | |
99 | wintypes.LPSTR, |
|
99 | wintypes.LPSTR, | |
100 | wintypes.DWORD, |
|
100 | wintypes.DWORD, | |
@@ -106,11 +106,11 b' if os.name == "nt":' | |||||
106 | ] |
|
106 | ] | |
107 | CreateFile.restype = wintypes.HANDLE |
|
107 | CreateFile.restype = wintypes.HANDLE | |
108 |
|
108 | |||
109 |
CloseHandle = |
|
109 | CloseHandle = _kernel32.CloseHandle | |
110 | CloseHandle.argtypes = [wintypes.HANDLE] |
|
110 | CloseHandle.argtypes = [wintypes.HANDLE] | |
111 | CloseHandle.restype = wintypes.BOOL |
|
111 | CloseHandle.restype = wintypes.BOOL | |
112 |
|
112 | |||
113 |
ReadFile = |
|
113 | ReadFile = _kernel32.ReadFile | |
114 | ReadFile.argtypes = [ |
|
114 | ReadFile.argtypes = [ | |
115 | wintypes.HANDLE, |
|
115 | wintypes.HANDLE, | |
116 | wintypes.LPVOID, |
|
116 | wintypes.LPVOID, | |
@@ -120,7 +120,7 b' if os.name == "nt":' | |||||
120 | ] |
|
120 | ] | |
121 | ReadFile.restype = wintypes.BOOL |
|
121 | ReadFile.restype = wintypes.BOOL | |
122 |
|
122 | |||
123 |
WriteFile = |
|
123 | WriteFile = _kernel32.WriteFile | |
124 | WriteFile.argtypes = [ |
|
124 | WriteFile.argtypes = [ | |
125 | wintypes.HANDLE, |
|
125 | wintypes.HANDLE, | |
126 | wintypes.LPVOID, |
|
126 | wintypes.LPVOID, | |
@@ -130,15 +130,15 b' if os.name == "nt":' | |||||
130 | ] |
|
130 | ] | |
131 | WriteFile.restype = wintypes.BOOL |
|
131 | WriteFile.restype = wintypes.BOOL | |
132 |
|
132 | |||
133 |
GetLastError = |
|
133 | GetLastError = _kernel32.GetLastError | |
134 | GetLastError.argtypes = [] |
|
134 | GetLastError.argtypes = [] | |
135 | GetLastError.restype = wintypes.DWORD |
|
135 | GetLastError.restype = wintypes.DWORD | |
136 |
|
136 | |||
137 |
SetLastError = |
|
137 | SetLastError = _kernel32.SetLastError | |
138 | SetLastError.argtypes = [wintypes.DWORD] |
|
138 | SetLastError.argtypes = [wintypes.DWORD] | |
139 | SetLastError.restype = None |
|
139 | SetLastError.restype = None | |
140 |
|
140 | |||
141 |
FormatMessage = |
|
141 | FormatMessage = _kernel32.FormatMessageA | |
142 | FormatMessage.argtypes = [ |
|
142 | FormatMessage.argtypes = [ | |
143 | wintypes.DWORD, |
|
143 | wintypes.DWORD, | |
144 | wintypes.LPVOID, |
|
144 | wintypes.LPVOID, | |
@@ -150,9 +150,9 b' if os.name == "nt":' | |||||
150 | ] |
|
150 | ] | |
151 | FormatMessage.restype = wintypes.DWORD |
|
151 | FormatMessage.restype = wintypes.DWORD | |
152 |
|
152 | |||
153 |
LocalFree = |
|
153 | LocalFree = _kernel32.LocalFree | |
154 |
|
154 | |||
155 |
GetOverlappedResult = |
|
155 | GetOverlappedResult = _kernel32.GetOverlappedResult | |
156 | GetOverlappedResult.argtypes = [ |
|
156 | GetOverlappedResult.argtypes = [ | |
157 | wintypes.HANDLE, |
|
157 | wintypes.HANDLE, | |
158 | ctypes.POINTER(OVERLAPPED), |
|
158 | ctypes.POINTER(OVERLAPPED), | |
@@ -161,9 +161,7 b' if os.name == "nt":' | |||||
161 | ] |
|
161 | ] | |
162 | GetOverlappedResult.restype = wintypes.BOOL |
|
162 | GetOverlappedResult.restype = wintypes.BOOL | |
163 |
|
163 | |||
164 | GetOverlappedResultEx = getattr( |
|
164 | GetOverlappedResultEx = getattr(_kernel32, "GetOverlappedResultEx", None) | |
165 | ctypes.windll.kernel32, "GetOverlappedResultEx", None |
|
|||
166 | ) |
|
|||
167 | if GetOverlappedResultEx is not None: |
|
165 | if GetOverlappedResultEx is not None: | |
168 | GetOverlappedResultEx.argtypes = [ |
|
166 | GetOverlappedResultEx.argtypes = [ | |
169 | wintypes.HANDLE, |
|
167 | wintypes.HANDLE, | |
@@ -174,7 +172,7 b' if os.name == "nt":' | |||||
174 | ] |
|
172 | ] | |
175 | GetOverlappedResultEx.restype = wintypes.BOOL |
|
173 | GetOverlappedResultEx.restype = wintypes.BOOL | |
176 |
|
174 | |||
177 |
WaitForSingleObjectEx = |
|
175 | WaitForSingleObjectEx = _kernel32.WaitForSingleObjectEx | |
178 | WaitForSingleObjectEx.argtypes = [ |
|
176 | WaitForSingleObjectEx.argtypes = [ | |
179 | wintypes.HANDLE, |
|
177 | wintypes.HANDLE, | |
180 | wintypes.DWORD, |
|
178 | wintypes.DWORD, | |
@@ -182,7 +180,7 b' if os.name == "nt":' | |||||
182 | ] |
|
180 | ] | |
183 | WaitForSingleObjectEx.restype = wintypes.DWORD |
|
181 | WaitForSingleObjectEx.restype = wintypes.DWORD | |
184 |
|
182 | |||
185 |
CreateEvent = |
|
183 | CreateEvent = _kernel32.CreateEventA | |
186 | CreateEvent.argtypes = [ |
|
184 | CreateEvent.argtypes = [ | |
187 | LPDWORD, |
|
185 | LPDWORD, | |
188 | wintypes.BOOL, |
|
186 | wintypes.BOOL, | |
@@ -192,7 +190,7 b' if os.name == "nt":' | |||||
192 | CreateEvent.restype = wintypes.HANDLE |
|
190 | CreateEvent.restype = wintypes.HANDLE | |
193 |
|
191 | |||
194 | # Windows Vista is the minimum supported client for CancelIoEx. |
|
192 | # Windows Vista is the minimum supported client for CancelIoEx. | |
195 |
CancelIoEx = |
|
193 | CancelIoEx = _kernel32.CancelIoEx | |
196 | CancelIoEx.argtypes = [wintypes.HANDLE, ctypes.POINTER(OVERLAPPED)] |
|
194 | CancelIoEx.argtypes = [wintypes.HANDLE, ctypes.POINTER(OVERLAPPED)] | |
197 | CancelIoEx.restype = wintypes.BOOL |
|
195 | CancelIoEx.restype = wintypes.BOOL | |
198 |
|
196 | |||
@@ -691,9 +689,9 b' class CLIProcessTransport(Transport):' | |||||
691 | if self.closed: |
|
689 | if self.closed: | |
692 | self.close() |
|
690 | self.close() | |
693 | self.closed = False |
|
691 | self.closed = False | |
694 | self._connect() |
|
692 | proc = self._connect() | |
695 |
res = |
|
693 | res = proc.stdin.write(data) | |
696 |
|
|
694 | proc.stdin.close() | |
697 | self.closed = True |
|
695 | self.closed = True | |
698 | return res |
|
696 | return res | |
699 |
|
697 | |||
@@ -988,8 +986,12 b' class client:' | |||||
988 | # if invoked via an application with graphical user interface, |
|
986 | # if invoked via an application with graphical user interface, | |
989 | # this call will cause a brief command window pop-up. |
|
987 | # this call will cause a brief command window pop-up. | |
990 | # Using the flag STARTF_USESHOWWINDOW to avoid this behavior. |
|
988 | # Using the flag STARTF_USESHOWWINDOW to avoid this behavior. | |
|
989 | ||||
|
990 | # pytype: disable=module-attr | |||
991 | startupinfo = subprocess.STARTUPINFO() |
|
991 | startupinfo = subprocess.STARTUPINFO() | |
992 | startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW |
|
992 | startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW | |
|
993 | # pytype: enable=module-attr | |||
|
994 | ||||
993 | args["startupinfo"] = startupinfo |
|
995 | args["startupinfo"] = startupinfo | |
994 |
|
996 | |||
995 | p = subprocess.Popen(cmd, **args) |
|
997 | p = subprocess.Popen(cmd, **args) | |
@@ -1026,7 +1028,11 b' class client:' | |||||
1026 | if self.transport == CLIProcessTransport: |
|
1028 | if self.transport == CLIProcessTransport: | |
1027 | kwargs["binpath"] = self.binpath |
|
1029 | kwargs["binpath"] = self.binpath | |
1028 |
|
1030 | |||
|
1031 | # Only CLIProcessTransport has the binpath kwarg | |||
|
1032 | # pytype: disable=wrong-keyword-args | |||
1029 | self.tport = self.transport(self.sockpath, self.timeout, **kwargs) |
|
1033 | self.tport = self.transport(self.sockpath, self.timeout, **kwargs) | |
|
1034 | # pytype: enable=wrong-keyword-args | |||
|
1035 | ||||
1030 | self.sendConn = self.sendCodec(self.tport) |
|
1036 | self.sendConn = self.sendCodec(self.tport) | |
1031 | self.recvConn = self.recvCodec(self.tport) |
|
1037 | self.recvConn = self.recvCodec(self.tport) | |
1032 | self.pid = os.getpid() |
|
1038 | self.pid = os.getpid() |
@@ -26,8 +26,6 b'' | |||||
26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
|
26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
|
27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
28 |
|
28 | |||
29 | # no unicode literals |
|
|||
30 |
|
||||
31 |
|
29 | |||
32 | def parse_version(vstr): |
|
30 | def parse_version(vstr): | |
33 | res = 0 |
|
31 | res = 0 |
@@ -26,45 +26,28 b'' | |||||
26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
|
26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
|
27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
28 |
|
28 | |||
29 | # no unicode literals |
|
|||
30 |
|
||||
31 | import sys |
|
29 | import sys | |
32 |
|
30 | |||
33 |
|
31 | |||
34 | """Compatibility module across Python 2 and 3.""" |
|
32 | """Compatibility module across Python 2 and 3.""" | |
35 |
|
33 | |||
36 |
|
34 | |||
37 | PYTHON2 = sys.version_info < (3, 0) |
|
|||
38 | PYTHON3 = sys.version_info >= (3, 0) |
|
35 | PYTHON3 = sys.version_info >= (3, 0) | |
39 |
|
36 | |||
40 | # This is adapted from https://bitbucket.org/gutworth/six, and used under the |
|
37 | # This is adapted from https://bitbucket.org/gutworth/six, and used under the | |
41 | # MIT license. See LICENSE for a full copyright notice. |
|
38 | # MIT license. See LICENSE for a full copyright notice. | |
42 | if PYTHON3: |
|
|||
43 |
|
||||
44 | def reraise(tp, value, tb=None): |
|
|||
45 | try: |
|
|||
46 | if value is None: |
|
|||
47 | value = tp() |
|
|||
48 | if value.__traceback__ is not tb: |
|
|||
49 | raise value.with_traceback(tb) |
|
|||
50 | raise value |
|
|||
51 | finally: |
|
|||
52 | value = None |
|
|||
53 | tb = None |
|
|||
54 |
|
39 | |||
55 |
|
40 | |||
56 | else: |
|
|||
57 | exec( |
|
|||
58 | """ |
|
|||
59 | def reraise(tp, value, tb=None): |
|
41 | def reraise(tp, value, tb=None): | |
60 | try: |
|
42 | try: | |
61 |
|
|
43 | if value is None: | |
|
44 | value = tp() | |||
|
45 | if value.__traceback__ is not tb: | |||
|
46 | raise value.with_traceback(tb) | |||
|
47 | raise value | |||
62 | finally: |
|
48 | finally: | |
|
49 | value = None | |||
63 | tb = None |
|
50 | tb = None | |
64 | """.strip() |
|
51 | ||
65 | ) |
|
|||
66 |
|
52 | |||
67 | if PYTHON3: |
|
53 | UNICODE = str | |
68 | UNICODE = str |
|
|||
69 | else: |
|
|||
70 | UNICODE = unicode # noqa: F821 We handled versioning above |
|
@@ -26,8 +26,6 b'' | |||||
26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
|
26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
|
27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
28 |
|
28 | |||
29 | # no unicode literals |
|
|||
30 |
|
||||
31 | import sys |
|
29 | import sys | |
32 |
|
30 | |||
33 | from . import compat |
|
31 | from . import compat |
@@ -26,8 +26,6 b'' | |||||
26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
|
26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
|
27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
28 |
|
28 | |||
29 | # no unicode literals |
|
|||
30 |
|
||||
31 | import ctypes |
|
29 | import ctypes | |
32 |
|
30 | |||
33 |
|
31 |
@@ -26,8 +26,6 b'' | |||||
26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
|
26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
|
27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
28 |
|
28 | |||
29 | # no unicode literals |
|
|||
30 |
|
||||
31 | import binascii |
|
29 | import binascii | |
32 | import collections |
|
30 | import collections | |
33 | import ctypes |
|
31 | import ctypes | |
@@ -53,17 +51,15 b' BSER_TEMPLATE = b"\\x0b"' | |||||
53 | BSER_SKIP = b"\x0c" |
|
51 | BSER_SKIP = b"\x0c" | |
54 | BSER_UTF8STRING = b"\x0d" |
|
52 | BSER_UTF8STRING = b"\x0d" | |
55 |
|
53 | |||
56 | if compat.PYTHON3: |
|
54 | STRING_TYPES = (str, bytes) | |
57 | STRING_TYPES = (str, bytes) |
|
55 | unicode = str | |
58 | unicode = str |
|
56 | ||
59 |
|
57 | |||
60 |
|
|
58 | def tobytes(i): | |
61 |
|
|
59 | return str(i).encode("ascii") | |
62 |
|
60 | |||
63 | long = int |
|
61 | ||
64 | else: |
|
62 | long = int | |
65 | STRING_TYPES = (unicode, str) |
|
|||
66 | tobytes = bytes |
|
|||
67 |
|
63 | |||
68 | # Leave room for the serialization header, which includes |
|
64 | # Leave room for the serialization header, which includes | |
69 | # our overall length. To make things simpler, we'll use an |
|
65 | # our overall length. To make things simpler, we'll use an | |
@@ -89,7 +85,7 b' def _int_size(x):' | |||||
89 | def _buf_pos(buf, pos): |
|
85 | def _buf_pos(buf, pos): | |
90 | ret = buf[pos] |
|
86 | ret = buf[pos] | |
91 | # Normalize the return type to bytes |
|
87 | # Normalize the return type to bytes | |
92 |
if |
|
88 | if not isinstance(ret, bytes): | |
93 | ret = bytes((ret,)) |
|
89 | ret = bytes((ret,)) | |
94 | return ret |
|
90 | return ret | |
95 |
|
91 | |||
@@ -252,10 +248,7 b' class _bser_buffer:' | |||||
252 | else: |
|
248 | else: | |
253 | raise RuntimeError("Cannot represent this mapping value") |
|
249 | raise RuntimeError("Cannot represent this mapping value") | |
254 | self.wpos += needed |
|
250 | self.wpos += needed | |
255 | if compat.PYTHON3: |
|
251 | iteritems = val.items() | |
256 | iteritems = val.items() |
|
|||
257 | else: |
|
|||
258 | iteritems = val.iteritems() # noqa: B301 Checked version above |
|
|||
259 | for k, v in iteritems: |
|
252 | for k, v in iteritems: | |
260 | self.append_string(k) |
|
253 | self.append_string(k) | |
261 | self.append_recursive(v) |
|
254 | self.append_recursive(v) |
@@ -260,7 +260,12 b' class gitdirstate:' | |||||
260 | # # TODO what the heck is this |
|
260 | # # TODO what the heck is this | |
261 | _filecache = set() |
|
261 | _filecache = set() | |
262 |
|
262 | |||
263 |
def |
|
263 | def is_changing_parents(self): | |
|
264 | # TODO: we need to implement the context manager bits and | |||
|
265 | # correctly stage/revert index edits. | |||
|
266 | return False | |||
|
267 | ||||
|
268 | def is_changing_any(self): | |||
264 | # TODO: we need to implement the context manager bits and |
|
269 | # TODO: we need to implement the context manager bits and | |
265 | # correctly stage/revert index edits. |
|
270 | # correctly stage/revert index edits. | |
266 | return False |
|
271 | return False | |
@@ -322,14 +327,6 b' class gitdirstate:' | |||||
322 | r[path] = s |
|
327 | r[path] = s | |
323 | return r |
|
328 | return r | |
324 |
|
329 | |||
325 | def savebackup(self, tr, backupname): |
|
|||
326 | # TODO: figure out a strategy for saving index backups. |
|
|||
327 | pass |
|
|||
328 |
|
||||
329 | def restorebackup(self, tr, backupname): |
|
|||
330 | # TODO: figure out a strategy for saving index backups. |
|
|||
331 | pass |
|
|||
332 |
|
||||
333 | def set_tracked(self, f, reset_copy=False): |
|
330 | def set_tracked(self, f, reset_copy=False): | |
334 | # TODO: support copies and reset_copy=True |
|
331 | # TODO: support copies and reset_copy=True | |
335 | uf = pycompat.fsdecode(f) |
|
332 | uf = pycompat.fsdecode(f) | |
@@ -384,7 +381,7 b' class gitdirstate:' | |||||
384 | pass |
|
381 | pass | |
385 |
|
382 | |||
386 | @contextlib.contextmanager |
|
383 | @contextlib.contextmanager | |
387 |
def |
|
384 | def changing_parents(self, repo): | |
388 | # TODO: track this maybe? |
|
385 | # TODO: track this maybe? | |
389 | yield |
|
386 | yield | |
390 |
|
387 | |||
@@ -392,10 +389,6 b' class gitdirstate:' | |||||
392 | # TODO: should this be added to the dirstate interface? |
|
389 | # TODO: should this be added to the dirstate interface? | |
393 | self._plchangecallbacks[category] = callback |
|
390 | self._plchangecallbacks[category] = callback | |
394 |
|
391 | |||
395 | def clearbackup(self, tr, backupname): |
|
|||
396 | # TODO |
|
|||
397 | pass |
|
|||
398 |
|
||||
399 | def setbranch(self, branch): |
|
392 | def setbranch(self, branch): | |
400 | raise error.Abort( |
|
393 | raise error.Abort( | |
401 | b'git repos do not support branches. try using bookmarks' |
|
394 | b'git repos do not support branches. try using bookmarks' |
@@ -9,7 +9,7 b' def get_pygit2():' | |||||
9 | global pygit2_module |
|
9 | global pygit2_module | |
10 | if pygit2_module is None: |
|
10 | if pygit2_module is None: | |
11 | try: |
|
11 | try: | |
12 | import pygit2 as pygit2_module |
|
12 | import pygit2 as pygit2_module # pytype: disable=import-error | |
13 |
|
13 | |||
14 | pygit2_module.InvalidSpecError |
|
14 | pygit2_module.InvalidSpecError | |
15 | except (ImportError, AttributeError): |
|
15 | except (ImportError, AttributeError): |
@@ -352,7 +352,8 b' def _dosign(ui, repo, *revs, **opts):' | |||||
352 | sigsfile.close() |
|
352 | sigsfile.close() | |
353 |
|
353 | |||
354 | if b'.hgsigs' not in repo.dirstate: |
|
354 | if b'.hgsigs' not in repo.dirstate: | |
355 | repo[None].add([b".hgsigs"]) |
|
355 | with repo.dirstate.changing_files(repo): | |
|
356 | repo[None].add([b".hgsigs"]) | |||
356 |
|
357 | |||
357 | if opts[b"no_commit"]: |
|
358 | if opts[b"no_commit"]: | |
358 | return |
|
359 | return |
@@ -1051,12 +1051,11 b' def findoutgoing(ui, repo, remote=None, ' | |||||
1051 | if opts is None: |
|
1051 | if opts is None: | |
1052 | opts = {} |
|
1052 | opts = {} | |
1053 | path = urlutil.get_unique_push_path(b'histedit', repo, ui, remote) |
|
1053 | path = urlutil.get_unique_push_path(b'histedit', repo, ui, remote) | |
1054 | dest = path.pushloc or path.loc |
|
1054 | ||
1055 |
|
1055 | ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(path.loc)) | ||
1056 | ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest)) |
|
|||
1057 |
|
1056 | |||
1058 | revs, checkout = hg.addbranchrevs(repo, repo, (path.branch, []), None) |
|
1057 | revs, checkout = hg.addbranchrevs(repo, repo, (path.branch, []), None) | |
1059 |
other = hg.peer(repo, opts, |
|
1058 | other = hg.peer(repo, opts, path) | |
1060 |
|
1059 | |||
1061 | if revs: |
|
1060 | if revs: | |
1062 | revs = [repo.lookup(rev) for rev in revs] |
|
1061 | revs = [repo.lookup(rev) for rev in revs] |
@@ -32,7 +32,10 b' from mercurial import (' | |||||
32 | pycompat, |
|
32 | pycompat, | |
33 | registrar, |
|
33 | registrar, | |
34 | ) |
|
34 | ) | |
35 |
from mercurial.utils import |
|
35 | from mercurial.utils import ( | |
|
36 | dateutil, | |||
|
37 | stringutil, | |||
|
38 | ) | |||
36 | from .. import notify |
|
39 | from .. import notify | |
37 |
|
40 | |||
38 | configtable = {} |
|
41 | configtable = {} | |
@@ -98,7 +101,7 b' def _report_commit(ui, repo, ctx):' | |||||
98 | try: |
|
101 | try: | |
99 | msg = mail.parsebytes(data) |
|
102 | msg = mail.parsebytes(data) | |
100 | except emailerrors.MessageParseError as inst: |
|
103 | except emailerrors.MessageParseError as inst: | |
101 | raise error.Abort(inst) |
|
104 | raise error.Abort(stringutil.forcebytestr(inst)) | |
102 |
|
105 | |||
103 | msg['In-reply-to'] = notify.messageid(ctx, domain, messageidseed) |
|
106 | msg['In-reply-to'] = notify.messageid(ctx, domain, messageidseed) | |
104 | msg['Message-Id'] = notify.messageid( |
|
107 | msg['Message-Id'] = notify.messageid( |
@@ -31,7 +31,10 b' from mercurial import (' | |||||
31 | pycompat, |
|
31 | pycompat, | |
32 | registrar, |
|
32 | registrar, | |
33 | ) |
|
33 | ) | |
34 |
from mercurial.utils import |
|
34 | from mercurial.utils import ( | |
|
35 | dateutil, | |||
|
36 | stringutil, | |||
|
37 | ) | |||
35 | from .. import notify |
|
38 | from .. import notify | |
36 |
|
39 | |||
37 | configtable = {} |
|
40 | configtable = {} | |
@@ -97,7 +100,7 b' def _report_commit(ui, repo, ctx):' | |||||
97 | try: |
|
100 | try: | |
98 | msg = mail.parsebytes(data) |
|
101 | msg = mail.parsebytes(data) | |
99 | except emailerrors.MessageParseError as inst: |
|
102 | except emailerrors.MessageParseError as inst: | |
100 | raise error.Abort(inst) |
|
103 | raise error.Abort(stringutil.forcebytestr(inst)) | |
101 |
|
104 | |||
102 | msg['In-reply-to'] = notify.messageid(ctx, domain, messageidseed) |
|
105 | msg['In-reply-to'] = notify.messageid(ctx, domain, messageidseed) | |
103 | msg['Message-Id'] = notify.messageid( |
|
106 | msg['Message-Id'] = notify.messageid( |
@@ -683,12 +683,10 b' def _lookupwrap(orig):' | |||||
683 | def _pull(orig, ui, repo, source=b"default", **opts): |
|
683 | def _pull(orig, ui, repo, source=b"default", **opts): | |
684 | opts = pycompat.byteskwargs(opts) |
|
684 | opts = pycompat.byteskwargs(opts) | |
685 | # Copy paste from `pull` command |
|
685 | # Copy paste from `pull` command | |
686 |
|
|
686 | path = urlutil.get_unique_pull_path_obj( | |
687 | b"infinite-push's pull", |
|
687 | b"infinite-push's pull", | |
688 | repo, |
|
|||
689 | ui, |
|
688 | ui, | |
690 | source, |
|
689 | source, | |
691 | default_branches=opts.get(b'branch'), |
|
|||
692 | ) |
|
690 | ) | |
693 |
|
691 | |||
694 | scratchbookmarks = {} |
|
692 | scratchbookmarks = {} | |
@@ -709,7 +707,7 b' def _pull(orig, ui, repo, source=b"defau' | |||||
709 | bookmarks.append(bookmark) |
|
707 | bookmarks.append(bookmark) | |
710 |
|
708 | |||
711 | if scratchbookmarks: |
|
709 | if scratchbookmarks: | |
712 |
other = hg.peer(repo, opts, |
|
710 | other = hg.peer(repo, opts, path) | |
713 | try: |
|
711 | try: | |
714 | fetchedbookmarks = other.listkeyspatterns( |
|
712 | fetchedbookmarks = other.listkeyspatterns( | |
715 | b'bookmarks', patterns=scratchbookmarks |
|
713 | b'bookmarks', patterns=scratchbookmarks | |
@@ -734,14 +732,14 b' def _pull(orig, ui, repo, source=b"defau' | |||||
734 | try: |
|
732 | try: | |
735 | # Remote scratch bookmarks will be deleted because remotenames doesn't |
|
733 | # Remote scratch bookmarks will be deleted because remotenames doesn't | |
736 | # know about them. Let's save it before pull and restore after |
|
734 | # know about them. Let's save it before pull and restore after | |
737 |
remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, |
|
735 | remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, path.loc) | |
738 |
result = orig(ui, repo, |
|
736 | result = orig(ui, repo, path.loc, **pycompat.strkwargs(opts)) | |
739 | # TODO(stash): race condition is possible |
|
737 | # TODO(stash): race condition is possible | |
740 | # if scratch bookmarks was updated right after orig. |
|
738 | # if scratch bookmarks was updated right after orig. | |
741 | # But that's unlikely and shouldn't be harmful. |
|
739 | # But that's unlikely and shouldn't be harmful. | |
742 | if common.isremotebooksenabled(ui): |
|
740 | if common.isremotebooksenabled(ui): | |
743 | remotescratchbookmarks.update(scratchbookmarks) |
|
741 | remotescratchbookmarks.update(scratchbookmarks) | |
744 |
_saveremotebookmarks(repo, remotescratchbookmarks, |
|
742 | _saveremotebookmarks(repo, remotescratchbookmarks, path.loc) | |
745 | else: |
|
743 | else: | |
746 | _savelocalbookmarks(repo, scratchbookmarks) |
|
744 | _savelocalbookmarks(repo, scratchbookmarks) | |
747 | return result |
|
745 | return result | |
@@ -849,14 +847,14 b' def _push(orig, ui, repo, *dests, **opts' | |||||
849 | raise error.Abort(msg) |
|
847 | raise error.Abort(msg) | |
850 |
|
848 | |||
851 | path = paths[0] |
|
849 | path = paths[0] | |
852 |
destpath = path. |
|
850 | destpath = path.loc | |
853 | # Remote scratch bookmarks will be deleted because remotenames doesn't |
|
851 | # Remote scratch bookmarks will be deleted because remotenames doesn't | |
854 | # know about them. Let's save it before push and restore after |
|
852 | # know about them. Let's save it before push and restore after | |
855 | remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, destpath) |
|
853 | remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, destpath) | |
856 | result = orig(ui, repo, *dests, **pycompat.strkwargs(opts)) |
|
854 | result = orig(ui, repo, *dests, **pycompat.strkwargs(opts)) | |
857 | if common.isremotebooksenabled(ui): |
|
855 | if common.isremotebooksenabled(ui): | |
858 | if bookmark and scratchpush: |
|
856 | if bookmark and scratchpush: | |
859 |
other = hg.peer(repo, opts, |
|
857 | other = hg.peer(repo, opts, path) | |
860 | try: |
|
858 | try: | |
861 | fetchedbookmarks = other.listkeyspatterns( |
|
859 | fetchedbookmarks = other.listkeyspatterns( | |
862 | b'bookmarks', patterns=[bookmark] |
|
860 | b'bookmarks', patterns=[bookmark] |
@@ -567,8 +567,12 b' def journal(ui, repo, *args, **opts):' | |||||
567 | ) |
|
567 | ) | |
568 | fm.write(b'newnodes', b'%s', formatnodes(entry.newhashes)) |
|
568 | fm.write(b'newnodes', b'%s', formatnodes(entry.newhashes)) | |
569 | fm.condwrite(ui.verbose, b'user', b' %-8s', entry.user) |
|
569 | fm.condwrite(ui.verbose, b'user', b' %-8s', entry.user) | |
|
570 | ||||
|
571 | # ``name`` is bytes, or None only if 'all' was an option. | |||
570 | fm.condwrite( |
|
572 | fm.condwrite( | |
|
573 | # pytype: disable=attribute-error | |||
571 | opts.get(b'all') or name.startswith(b're:'), |
|
574 | opts.get(b'all') or name.startswith(b're:'), | |
|
575 | # pytype: enable=attribute-error | |||
572 | b'name', |
|
576 | b'name', | |
573 | b' %-8s', |
|
577 | b' %-8s', | |
574 | entry.name, |
|
578 | entry.name, |
@@ -437,7 +437,7 b' def _kwfwrite(ui, repo, expand, *pats, *' | |||||
437 | if len(wctx.parents()) > 1: |
|
437 | if len(wctx.parents()) > 1: | |
438 | raise error.Abort(_(b'outstanding uncommitted merge')) |
|
438 | raise error.Abort(_(b'outstanding uncommitted merge')) | |
439 | kwt = getattr(repo, '_keywordkwt', None) |
|
439 | kwt = getattr(repo, '_keywordkwt', None) | |
440 | with repo.wlock(): |
|
440 | with repo.wlock(), repo.dirstate.changing_files(repo): | |
441 | status = _status(ui, repo, wctx, kwt, *pats, **opts) |
|
441 | status = _status(ui, repo, wctx, kwt, *pats, **opts) | |
442 | if status.modified or status.added or status.removed or status.deleted: |
|
442 | if status.modified or status.added or status.removed or status.deleted: | |
443 | raise error.Abort(_(b'outstanding uncommitted changes')) |
|
443 | raise error.Abort(_(b'outstanding uncommitted changes')) | |
@@ -530,17 +530,18 b' def demo(ui, repo, *args, **opts):' | |||||
530 | demoitems(b'keywordmaps', kwmaps.items()) |
|
530 | demoitems(b'keywordmaps', kwmaps.items()) | |
531 | keywords = b'$' + b'$\n$'.join(sorted(kwmaps.keys())) + b'$\n' |
|
531 | keywords = b'$' + b'$\n$'.join(sorted(kwmaps.keys())) + b'$\n' | |
532 | repo.wvfs.write(fn, keywords) |
|
532 | repo.wvfs.write(fn, keywords) | |
533 | repo[None].add([fn]) |
|
|||
534 | ui.note(_(b'\nkeywords written to %s:\n') % fn) |
|
|||
535 | ui.note(keywords) |
|
|||
536 | with repo.wlock(): |
|
533 | with repo.wlock(): | |
|
534 | with repo.dirstate.changing_files(repo): | |||
|
535 | repo[None].add([fn]) | |||
|
536 | ui.note(_(b'\nkeywords written to %s:\n') % fn) | |||
|
537 | ui.note(keywords) | |||
537 | repo.dirstate.setbranch(b'demobranch') |
|
538 | repo.dirstate.setbranch(b'demobranch') | |
538 | for name, cmd in ui.configitems(b'hooks'): |
|
539 | for name, cmd in ui.configitems(b'hooks'): | |
539 | if name.split(b'.', 1)[0].find(b'commit') > -1: |
|
540 | if name.split(b'.', 1)[0].find(b'commit') > -1: | |
540 | repo.ui.setconfig(b'hooks', name, b'', b'keyword') |
|
541 | repo.ui.setconfig(b'hooks', name, b'', b'keyword') | |
541 | msg = _(b'hg keyword configuration and expansion example') |
|
542 | msg = _(b'hg keyword configuration and expansion example') | |
542 | ui.note((b"hg ci -m '%s'\n" % msg)) |
|
543 | ui.note((b"hg ci -m '%s'\n" % msg)) | |
543 | repo.commit(text=msg) |
|
544 | repo.commit(text=msg) | |
544 | ui.status(_(b'\n\tkeywords expanded\n')) |
|
545 | ui.status(_(b'\n\tkeywords expanded\n')) | |
545 | ui.write(repo.wread(fn)) |
|
546 | ui.write(repo.wread(fn)) | |
546 | repo.wvfs.rmtree(repo.root) |
|
547 | repo.wvfs.rmtree(repo.root) | |
@@ -696,7 +697,7 b' def kw_amend(orig, ui, repo, old, extra,' | |||||
696 | kwt = getattr(repo, '_keywordkwt', None) |
|
697 | kwt = getattr(repo, '_keywordkwt', None) | |
697 | if kwt is None: |
|
698 | if kwt is None: | |
698 | return orig(ui, repo, old, extra, pats, opts) |
|
699 | return orig(ui, repo, old, extra, pats, opts) | |
699 |
with repo.wlock(), repo.dirstate. |
|
700 | with repo.wlock(), repo.dirstate.changing_parents(repo): | |
700 | kwt.postcommit = True |
|
701 | kwt.postcommit = True | |
701 | newid = orig(ui, repo, old, extra, pats, opts) |
|
702 | newid = orig(ui, repo, old, extra, pats, opts) | |
702 | if newid != old.node(): |
|
703 | if newid != old.node(): | |
@@ -762,7 +763,7 b' def kw_dorecord(orig, ui, repo, commitfu' | |||||
762 | if ctx != recctx: |
|
763 | if ctx != recctx: | |
763 | modified, added = _preselect(wstatus, recctx.files()) |
|
764 | modified, added = _preselect(wstatus, recctx.files()) | |
764 | kwt.restrict = False |
|
765 | kwt.restrict = False | |
765 |
with repo.dirstate. |
|
766 | with repo.dirstate.changing_parents(repo): | |
766 | kwt.overwrite(recctx, modified, False, True) |
|
767 | kwt.overwrite(recctx, modified, False, True) | |
767 | kwt.overwrite(recctx, added, False, True, True) |
|
768 | kwt.overwrite(recctx, added, False, True, True) | |
768 | kwt.restrict = True |
|
769 | kwt.restrict = True |
@@ -107,6 +107,7 b' command.' | |||||
107 |
|
107 | |||
108 | from mercurial import ( |
|
108 | from mercurial import ( | |
109 | cmdutil, |
|
109 | cmdutil, | |
|
110 | configitems, | |||
110 | extensions, |
|
111 | extensions, | |
111 | exthelper, |
|
112 | exthelper, | |
112 | hg, |
|
113 | hg, | |
@@ -135,7 +136,7 b' eh.merge(proto.eh)' | |||||
135 | eh.configitem( |
|
136 | eh.configitem( | |
136 | b'largefiles', |
|
137 | b'largefiles', | |
137 | b'minsize', |
|
138 | b'minsize', | |
138 |
default= |
|
139 | default=configitems.dynamicdefault, | |
139 | ) |
|
140 | ) | |
140 | eh.configitem( |
|
141 | eh.configitem( | |
141 | b'largefiles', |
|
142 | b'largefiles', |
@@ -219,7 +219,9 b' def lfconvert(ui, src, dest, *pats, **op' | |||||
219 | success = True |
|
219 | success = True | |
220 | finally: |
|
220 | finally: | |
221 | if tolfile: |
|
221 | if tolfile: | |
222 | rdst.dirstate.clear() |
|
222 | # XXX is this the right context semantically ? | |
|
223 | with rdst.dirstate.changing_parents(rdst): | |||
|
224 | rdst.dirstate.clear() | |||
223 | release(dstlock, dstwlock) |
|
225 | release(dstlock, dstwlock) | |
224 | if not success: |
|
226 | if not success: | |
225 | # we failed, remove the new directory |
|
227 | # we failed, remove the new directory | |
@@ -517,53 +519,52 b' def updatelfiles(' | |||||
517 | filelist = set(filelist) |
|
519 | filelist = set(filelist) | |
518 | lfiles = [f for f in lfiles if f in filelist] |
|
520 | lfiles = [f for f in lfiles if f in filelist] | |
519 |
|
521 | |||
520 | with lfdirstate.parentchange(): |
|
522 | update = {} | |
521 | update = {} |
|
523 | dropped = set() | |
522 | dropped = set() |
|
524 | updated, removed = 0, 0 | |
523 | updated, removed = 0, 0 |
|
525 | wvfs = repo.wvfs | |
524 |
|
|
526 | wctx = repo[None] | |
525 | wctx = repo[None] |
|
527 | for lfile in lfiles: | |
526 | for lfile in lfiles: |
|
528 | lfileorig = os.path.relpath( | |
527 | lfileorig = os.path.relpath( |
|
529 | scmutil.backuppath(ui, repo, lfile), start=repo.root | |
528 | scmutil.backuppath(ui, repo, lfile), start=repo.root |
|
530 | ) | |
529 | ) |
|
531 | standin = lfutil.standin(lfile) | |
530 | standin = lfutil.standin(lfile) |
|
532 | standinorig = os.path.relpath( | |
531 | standinorig = os.path.relpath( |
|
533 | scmutil.backuppath(ui, repo, standin), start=repo.root | |
532 | scmutil.backuppath(ui, repo, standin), start=repo.root |
|
534 | ) | |
533 | ) |
|
535 | if wvfs.exists(standin): | |
534 | if wvfs.exists(standin): |
|
536 | if wvfs.exists(standinorig) and wvfs.exists(lfile): | |
535 | if wvfs.exists(standinorig) and wvfs.exists(lfile): |
|
537 | shutil.copyfile(wvfs.join(lfile), wvfs.join(lfileorig)) | |
536 | shutil.copyfile(wvfs.join(lfile), wvfs.join(lfileorig)) |
|
538 | wvfs.unlinkpath(standinorig) | |
537 | wvfs.unlinkpath(standinorig) |
|
539 | expecthash = lfutil.readasstandin(wctx[standin]) | |
538 | expecthash = lfutil.readasstandin(wctx[standin]) |
|
540 | if expecthash != b'': | |
539 | if expecthash != b'': |
|
541 | if lfile not in wctx: # not switched to normal file | |
540 | if lfile not in wctx: # not switched to normal file |
|
542 | if repo.dirstate.get_entry(standin).any_tracked: | |
541 | if repo.dirstate.get_entry(standin).any_tracked: |
|
543 | wvfs.unlinkpath(lfile, ignoremissing=True) | |
542 | wvfs.unlinkpath(lfile, ignoremissing=True) |
|
544 | else: | |
543 |
|
|
545 | dropped.add(lfile) | |
544 | dropped.add(lfile) |
|
|||
545 |
|
546 | |||
546 |
|
|
547 | # allocate an entry in largefiles dirstate to prevent | |
547 |
|
|
548 | # lfilesrepo.status() from reporting missing files as | |
548 |
|
|
549 | # removed. | |
549 |
|
|
550 | lfdirstate.hacky_extension_update_file( | |
550 |
|
|
551 | lfile, | |
551 |
|
|
552 | p1_tracked=True, | |
552 |
|
|
553 | wc_tracked=True, | |
553 |
|
|
554 | possibly_dirty=True, | |
554 |
|
|
555 | ) | |
555 |
|
|
556 | update[lfile] = expecthash | |
556 |
|
|
557 | else: | |
557 |
|
|
558 | # Remove lfiles for which the standin is deleted, unless the | |
558 |
|
|
559 | # lfile is added to the repository again. This happens when a | |
559 |
|
|
560 | # largefile is converted back to a normal file: the standin | |
560 |
|
|
561 | # disappears, but a new (normal) file appears as the lfile. | |
561 |
|
|
562 | if ( | |
562 |
|
|
563 | wvfs.exists(lfile) | |
563 |
|
|
564 | and repo.dirstate.normalize(lfile) not in wctx | |
564 |
|
|
565 | ): | |
565 |
|
|
566 | wvfs.unlinkpath(lfile) | |
566 |
|
|
567 | removed += 1 | |
567 |
|
568 | |||
568 | # largefile processing might be slow and be interrupted - be prepared |
|
569 | # largefile processing might be slow and be interrupted - be prepared | |
569 | lfdirstate.write(repo.currenttransaction()) |
|
570 | lfdirstate.write(repo.currenttransaction()) | |
@@ -580,41 +581,42 b' def updatelfiles(' | |||||
580 | statuswriter(_(b'getting changed largefiles\n')) |
|
581 | statuswriter(_(b'getting changed largefiles\n')) | |
581 | cachelfiles(ui, repo, None, lfiles) |
|
582 | cachelfiles(ui, repo, None, lfiles) | |
582 |
|
583 | |||
583 | with lfdirstate.parentchange(): |
|
584 | for lfile in lfiles: | |
584 | for lfile in lfiles: |
|
585 | update1 = 0 | |
585 | update1 = 0 |
|
|||
586 |
|
586 | |||
587 |
|
|
587 | expecthash = update.get(lfile) | |
588 |
|
|
588 | if expecthash: | |
589 |
|
|
589 | if not lfutil.copyfromcache(repo, expecthash, lfile): | |
590 |
|
|
590 | # failed ... but already removed and set to normallookup | |
591 |
|
|
591 | continue | |
592 |
|
|
592 | # Synchronize largefile dirstate to the last modified | |
593 |
|
|
593 | # time of the file | |
594 |
|
|
594 | lfdirstate.hacky_extension_update_file( | |
595 | lfile, p1_tracked=True, wc_tracked=True |
|
595 | lfile, | |
596 |
|
|
596 | p1_tracked=True, | |
|
597 | wc_tracked=True, | |||
|
598 | ) | |||
|
599 | update1 = 1 | |||
|
600 | ||||
|
601 | # copy the exec mode of largefile standin from the repository's | |||
|
602 | # dirstate to its state in the lfdirstate. | |||
|
603 | standin = lfutil.standin(lfile) | |||
|
604 | if wvfs.exists(standin): | |||
|
605 | # exec is decided by the users permissions using mask 0o100 | |||
|
606 | standinexec = wvfs.stat(standin).st_mode & 0o100 | |||
|
607 | st = wvfs.stat(lfile) | |||
|
608 | mode = st.st_mode | |||
|
609 | if standinexec != mode & 0o100: | |||
|
610 | # first remove all X bits, then shift all R bits to X | |||
|
611 | mode &= ~0o111 | |||
|
612 | if standinexec: | |||
|
613 | mode |= (mode >> 2) & 0o111 & ~util.umask | |||
|
614 | wvfs.chmod(lfile, mode) | |||
597 | update1 = 1 |
|
615 | update1 = 1 | |
598 |
|
616 | |||
599 | # copy the exec mode of largefile standin from the repository's |
|
617 | updated += update1 | |
600 | # dirstate to its state in the lfdirstate. |
|
|||
601 | standin = lfutil.standin(lfile) |
|
|||
602 | if wvfs.exists(standin): |
|
|||
603 | # exec is decided by the users permissions using mask 0o100 |
|
|||
604 | standinexec = wvfs.stat(standin).st_mode & 0o100 |
|
|||
605 | st = wvfs.stat(lfile) |
|
|||
606 | mode = st.st_mode |
|
|||
607 | if standinexec != mode & 0o100: |
|
|||
608 | # first remove all X bits, then shift all R bits to X |
|
|||
609 | mode &= ~0o111 |
|
|||
610 | if standinexec: |
|
|||
611 | mode |= (mode >> 2) & 0o111 & ~util.umask |
|
|||
612 | wvfs.chmod(lfile, mode) |
|
|||
613 | update1 = 1 |
|
|||
614 |
|
618 | |||
615 | updated += update1 |
|
619 | lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup) | |
616 |
|
||||
617 | lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup) |
|
|||
618 |
|
620 | |||
619 | lfdirstate.write(repo.currenttransaction()) |
|
621 | lfdirstate.write(repo.currenttransaction()) | |
620 | if lfiles: |
|
622 | if lfiles: |
@@ -159,6 +159,9 b' def findfile(repo, hash):' | |||||
159 |
|
159 | |||
160 |
|
160 | |||
161 | class largefilesdirstate(dirstate.dirstate): |
|
161 | class largefilesdirstate(dirstate.dirstate): | |
|
162 | _large_file_dirstate = True | |||
|
163 | _tr_key_suffix = b'-large-files' | |||
|
164 | ||||
162 | def __getitem__(self, key): |
|
165 | def __getitem__(self, key): | |
163 | return super(largefilesdirstate, self).__getitem__(unixpath(key)) |
|
166 | return super(largefilesdirstate, self).__getitem__(unixpath(key)) | |
164 |
|
167 | |||
@@ -204,7 +207,13 b' def openlfdirstate(ui, repo, create=True' | |||||
204 | """ |
|
207 | """ | |
205 | Return a dirstate object that tracks largefiles: i.e. its root is |
|
208 | Return a dirstate object that tracks largefiles: i.e. its root is | |
206 | the repo root, but it is saved in .hg/largefiles/dirstate. |
|
209 | the repo root, but it is saved in .hg/largefiles/dirstate. | |
|
210 | ||||
|
211 | If a dirstate object already exists and is being used for a 'changing_*' | |||
|
212 | context, it will be returned. | |||
207 | """ |
|
213 | """ | |
|
214 | sub_dirstate = getattr(repo.dirstate, '_sub_dirstate', None) | |||
|
215 | if sub_dirstate is not None: | |||
|
216 | return sub_dirstate | |||
208 | vfs = repo.vfs |
|
217 | vfs = repo.vfs | |
209 | lfstoredir = longname |
|
218 | lfstoredir = longname | |
210 | opener = vfsmod.vfs(vfs.join(lfstoredir)) |
|
219 | opener = vfsmod.vfs(vfs.join(lfstoredir)) | |
@@ -223,20 +232,29 b' def openlfdirstate(ui, repo, create=True' | |||||
223 | # it. This ensures that we create it on the first meaningful |
|
232 | # it. This ensures that we create it on the first meaningful | |
224 | # largefiles operation in a new clone. |
|
233 | # largefiles operation in a new clone. | |
225 | if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')): |
|
234 | if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')): | |
226 | matcher = getstandinmatcher(repo) |
|
235 | try: | |
227 | standins = repo.dirstate.walk( |
|
236 | with repo.wlock(wait=False), lfdirstate.changing_files(repo): | |
228 | matcher, subrepos=[], unknown=False, ignored=False |
|
237 | matcher = getstandinmatcher(repo) | |
229 | ) |
|
238 | standins = repo.dirstate.walk( | |
|
239 | matcher, subrepos=[], unknown=False, ignored=False | |||
|
240 | ) | |||
|
241 | ||||
|
242 | if len(standins) > 0: | |||
|
243 | vfs.makedirs(lfstoredir) | |||
230 |
|
244 | |||
231 | if len(standins) > 0: |
|
245 | for standin in standins: | |
232 | vfs.makedirs(lfstoredir) |
|
246 | lfile = splitstandin(standin) | |
233 |
|
247 | lfdirstate.hacky_extension_update_file( | ||
234 | with lfdirstate.parentchange(): |
|
248 | lfile, | |
235 | for standin in standins: |
|
249 | p1_tracked=True, | |
236 | lfile = splitstandin(standin) |
|
250 | wc_tracked=True, | |
237 | lfdirstate.update_file( |
|
251 | possibly_dirty=True, | |
238 | lfile, p1_tracked=True, wc_tracked=True, possibly_dirty=True |
|
252 | ) | |
239 | ) |
|
253 | except error.LockError: | |
|
254 | # Assume that whatever was holding the lock was important. | |||
|
255 | # If we were doing something important, we would already have | |||
|
256 | # either the lock or a largefile dirstate. | |||
|
257 | pass | |||
240 | return lfdirstate |
|
258 | return lfdirstate | |
241 |
|
259 | |||
242 |
|
260 | |||
@@ -565,10 +583,14 b' def getstandinsstate(repo):' | |||||
565 | def synclfdirstate(repo, lfdirstate, lfile, normallookup): |
|
583 | def synclfdirstate(repo, lfdirstate, lfile, normallookup): | |
566 | lfstandin = standin(lfile) |
|
584 | lfstandin = standin(lfile) | |
567 | if lfstandin not in repo.dirstate: |
|
585 | if lfstandin not in repo.dirstate: | |
568 | lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=False) |
|
586 | lfdirstate.hacky_extension_update_file( | |
|
587 | lfile, | |||
|
588 | p1_tracked=False, | |||
|
589 | wc_tracked=False, | |||
|
590 | ) | |||
569 | else: |
|
591 | else: | |
570 | entry = repo.dirstate.get_entry(lfstandin) |
|
592 | entry = repo.dirstate.get_entry(lfstandin) | |
571 | lfdirstate.update_file( |
|
593 | lfdirstate.hacky_extension_update_file( | |
572 | lfile, |
|
594 | lfile, | |
573 | wc_tracked=entry.tracked, |
|
595 | wc_tracked=entry.tracked, | |
574 | p1_tracked=entry.p1_tracked, |
|
596 | p1_tracked=entry.p1_tracked, | |
@@ -580,8 +602,7 b' def synclfdirstate(repo, lfdirstate, lfi' | |||||
580 | def markcommitted(orig, ctx, node): |
|
602 | def markcommitted(orig, ctx, node): | |
581 | repo = ctx.repo() |
|
603 | repo = ctx.repo() | |
582 |
|
604 | |||
583 | lfdirstate = openlfdirstate(repo.ui, repo) |
|
605 | with repo.dirstate.changing_parents(repo): | |
584 | with lfdirstate.parentchange(): |
|
|||
585 | orig(node) |
|
606 | orig(node) | |
586 |
|
607 | |||
587 | # ATTENTION: "ctx.files()" may differ from "repo[node].files()" |
|
608 | # ATTENTION: "ctx.files()" may differ from "repo[node].files()" | |
@@ -593,11 +614,11 b' def markcommitted(orig, ctx, node):' | |||||
593 | # - have to be marked as "n" after commit, but |
|
614 | # - have to be marked as "n" after commit, but | |
594 | # - aren't listed in "repo[node].files()" |
|
615 | # - aren't listed in "repo[node].files()" | |
595 |
|
616 | |||
|
617 | lfdirstate = openlfdirstate(repo.ui, repo) | |||
596 | for f in ctx.files(): |
|
618 | for f in ctx.files(): | |
597 | lfile = splitstandin(f) |
|
619 | lfile = splitstandin(f) | |
598 | if lfile is not None: |
|
620 | if lfile is not None: | |
599 | synclfdirstate(repo, lfdirstate, lfile, False) |
|
621 | synclfdirstate(repo, lfdirstate, lfile, False) | |
600 | lfdirstate.write(repo.currenttransaction()) |
|
|||
601 |
|
622 | |||
602 | # As part of committing, copy all of the largefiles into the cache. |
|
623 | # As part of committing, copy all of the largefiles into the cache. | |
603 | # |
|
624 | # | |
@@ -668,11 +689,16 b' def updatestandinsbymatch(repo, match):' | |||||
668 | # It can cost a lot of time (several seconds) |
|
689 | # It can cost a lot of time (several seconds) | |
669 | # otherwise to update all standins if the largefiles are |
|
690 | # otherwise to update all standins if the largefiles are | |
670 | # large. |
|
691 | # large. | |
671 | lfdirstate = openlfdirstate(ui, repo) |
|
|||
672 | dirtymatch = matchmod.always() |
|
692 | dirtymatch = matchmod.always() | |
673 | unsure, s, mtime_boundary = lfdirstate.status( |
|
693 | with repo.dirstate.running_status(repo): | |
674 | dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False |
|
694 | lfdirstate = openlfdirstate(ui, repo) | |
675 | ) |
|
695 | unsure, s, mtime_boundary = lfdirstate.status( | |
|
696 | dirtymatch, | |||
|
697 | subrepos=[], | |||
|
698 | ignored=False, | |||
|
699 | clean=False, | |||
|
700 | unknown=False, | |||
|
701 | ) | |||
676 | modifiedfiles = unsure + s.modified + s.added + s.removed |
|
702 | modifiedfiles = unsure + s.modified + s.added + s.removed | |
677 | lfiles = listlfiles(repo) |
|
703 | lfiles = listlfiles(repo) | |
678 | # this only loops through largefiles that exist (not |
|
704 | # this only loops through largefiles that exist (not |
@@ -8,6 +8,7 b'' | |||||
8 |
|
8 | |||
9 | '''Overridden Mercurial commands and functions for the largefiles extension''' |
|
9 | '''Overridden Mercurial commands and functions for the largefiles extension''' | |
10 |
|
10 | |||
|
11 | import contextlib | |||
11 | import copy |
|
12 | import copy | |
12 | import os |
|
13 | import os | |
13 |
|
14 | |||
@@ -21,6 +22,7 b' from mercurial import (' | |||||
21 | archival, |
|
22 | archival, | |
22 | cmdutil, |
|
23 | cmdutil, | |
23 | copies as copiesmod, |
|
24 | copies as copiesmod, | |
|
25 | dirstate, | |||
24 | error, |
|
26 | error, | |
25 | exchange, |
|
27 | exchange, | |
26 | extensions, |
|
28 | extensions, | |
@@ -311,6 +313,48 b' def cmdutilremove(' | |||||
311 | ) |
|
313 | ) | |
312 |
|
314 | |||
313 |
|
315 | |||
|
316 | @eh.wrapfunction(dirstate.dirstate, b'_changing') | |||
|
317 | @contextlib.contextmanager | |||
|
318 | def _changing(orig, self, repo, change_type): | |||
|
319 | pre = sub_dirstate = getattr(self, '_sub_dirstate', None) | |||
|
320 | try: | |||
|
321 | lfd = getattr(self, '_large_file_dirstate', False) | |||
|
322 | if sub_dirstate is None and not lfd: | |||
|
323 | sub_dirstate = lfutil.openlfdirstate(repo.ui, repo) | |||
|
324 | self._sub_dirstate = sub_dirstate | |||
|
325 | if not lfd: | |||
|
326 | assert self._sub_dirstate is not None | |||
|
327 | with orig(self, repo, change_type): | |||
|
328 | if sub_dirstate is None: | |||
|
329 | yield | |||
|
330 | else: | |||
|
331 | with sub_dirstate._changing(repo, change_type): | |||
|
332 | yield | |||
|
333 | finally: | |||
|
334 | self._sub_dirstate = pre | |||
|
335 | ||||
|
336 | ||||
|
337 | @eh.wrapfunction(dirstate.dirstate, b'running_status') | |||
|
338 | @contextlib.contextmanager | |||
|
339 | def running_status(orig, self, repo): | |||
|
340 | pre = sub_dirstate = getattr(self, '_sub_dirstate', None) | |||
|
341 | try: | |||
|
342 | lfd = getattr(self, '_large_file_dirstate', False) | |||
|
343 | if sub_dirstate is None and not lfd: | |||
|
344 | sub_dirstate = lfutil.openlfdirstate(repo.ui, repo) | |||
|
345 | self._sub_dirstate = sub_dirstate | |||
|
346 | if not lfd: | |||
|
347 | assert self._sub_dirstate is not None | |||
|
348 | with orig(self, repo): | |||
|
349 | if sub_dirstate is None: | |||
|
350 | yield | |||
|
351 | else: | |||
|
352 | with sub_dirstate.running_status(repo): | |||
|
353 | yield | |||
|
354 | finally: | |||
|
355 | self._sub_dirstate = pre | |||
|
356 | ||||
|
357 | ||||
314 | @eh.wrapfunction(subrepo.hgsubrepo, b'status') |
|
358 | @eh.wrapfunction(subrepo.hgsubrepo, b'status') | |
315 | def overridestatusfn(orig, repo, rev2, **opts): |
|
359 | def overridestatusfn(orig, repo, rev2, **opts): | |
316 | with lfstatus(repo._repo): |
|
360 | with lfstatus(repo._repo): | |
@@ -511,10 +555,12 b' def overridedebugstate(orig, ui, repo, *' | |||||
511 | # largefiles. This makes the merge proceed and we can then handle this |
|
555 | # largefiles. This makes the merge proceed and we can then handle this | |
512 | # case further in the overridden calculateupdates function below. |
|
556 | # case further in the overridden calculateupdates function below. | |
513 | @eh.wrapfunction(merge, b'_checkunknownfile') |
|
557 | @eh.wrapfunction(merge, b'_checkunknownfile') | |
514 |
def overridecheckunknownfile( |
|
558 | def overridecheckunknownfile( | |
515 | if lfutil.standin(repo.dirstate.normalize(f)) in wctx: |
|
559 | origfn, dirstate, wvfs, dircache, wctx, mctx, f, f2=None | |
|
560 | ): | |||
|
561 | if lfutil.standin(dirstate.normalize(f)) in wctx: | |||
516 | return False |
|
562 | return False | |
517 |
return origfn( |
|
563 | return origfn(dirstate, wvfs, dircache, wctx, mctx, f, f2) | |
518 |
|
564 | |||
519 |
|
565 | |||
520 | # The manifest merge handles conflicts on the manifest level. We want |
|
566 | # The manifest merge handles conflicts on the manifest level. We want | |
@@ -658,18 +704,12 b' def overridecalculateupdates(' | |||||
658 | def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata): |
|
704 | def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata): | |
659 | if MERGE_ACTION_LARGEFILE_MARK_REMOVED in actions: |
|
705 | if MERGE_ACTION_LARGEFILE_MARK_REMOVED in actions: | |
660 | lfdirstate = lfutil.openlfdirstate(repo.ui, repo) |
|
706 | lfdirstate = lfutil.openlfdirstate(repo.ui, repo) | |
661 | with lfdirstate.parentchange(): |
|
707 | for lfile, args, msg in actions[MERGE_ACTION_LARGEFILE_MARK_REMOVED]: | |
662 | for lfile, args, msg in actions[ |
|
708 | # this should be executed before 'orig', to execute 'remove' | |
663 | MERGE_ACTION_LARGEFILE_MARK_REMOVED |
|
709 | # before all other actions | |
664 | ]: |
|
710 | repo.dirstate.update_file(lfile, p1_tracked=True, wc_tracked=False) | |
665 | # this should be executed before 'orig', to execute 'remove' |
|
711 | # make sure lfile doesn't get synclfdirstate'd as normal | |
666 | # before all other actions |
|
712 | lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=True) | |
667 | repo.dirstate.update_file( |
|
|||
668 | lfile, p1_tracked=True, wc_tracked=False |
|
|||
669 | ) |
|
|||
670 | # make sure lfile doesn't get synclfdirstate'd as normal |
|
|||
671 | lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=True) |
|
|||
672 | lfdirstate.write(repo.currenttransaction()) |
|
|||
673 |
|
713 | |||
674 | return orig(repo, actions, branchmerge, getfiledata) |
|
714 | return orig(repo, actions, branchmerge, getfiledata) | |
675 |
|
715 | |||
@@ -901,7 +941,7 b' def overriderevert(orig, ui, repo, ctx, ' | |||||
901 | # Because we put the standins in a bad state (by updating them) |
|
941 | # Because we put the standins in a bad state (by updating them) | |
902 | # and then return them to a correct state we need to lock to |
|
942 | # and then return them to a correct state we need to lock to | |
903 | # prevent others from changing them in their incorrect state. |
|
943 | # prevent others from changing them in their incorrect state. | |
904 | with repo.wlock(): |
|
944 | with repo.wlock(), repo.dirstate.running_status(repo): | |
905 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
945 | lfdirstate = lfutil.openlfdirstate(ui, repo) | |
906 | s = lfutil.lfdirstatestatus(lfdirstate, repo) |
|
946 | s = lfutil.lfdirstatestatus(lfdirstate, repo) | |
907 | lfdirstate.write(repo.currenttransaction()) |
|
947 | lfdirstate.write(repo.currenttransaction()) | |
@@ -1436,7 +1476,7 b' def outgoinghook(ui, repo, other, opts, ' | |||||
1436 |
|
1476 | |||
1437 | def addfunc(fn, lfhash): |
|
1477 | def addfunc(fn, lfhash): | |
1438 | if fn not in toupload: |
|
1478 | if fn not in toupload: | |
1439 | toupload[fn] = [] |
|
1479 | toupload[fn] = [] # pytype: disable=unsupported-operands | |
1440 | toupload[fn].append(lfhash) |
|
1480 | toupload[fn].append(lfhash) | |
1441 | lfhashes.add(lfhash) |
|
1481 | lfhashes.add(lfhash) | |
1442 |
|
1482 | |||
@@ -1520,20 +1560,34 b' def overridesummary(orig, ui, repo, *pat' | |||||
1520 |
|
1560 | |||
1521 |
|
1561 | |||
1522 | @eh.wrapfunction(scmutil, b'addremove') |
|
1562 | @eh.wrapfunction(scmutil, b'addremove') | |
1523 | def scmutiladdremove(orig, repo, matcher, prefix, uipathfn, opts=None): |
|
1563 | def scmutiladdremove( | |
|
1564 | orig, | |||
|
1565 | repo, | |||
|
1566 | matcher, | |||
|
1567 | prefix, | |||
|
1568 | uipathfn, | |||
|
1569 | opts=None, | |||
|
1570 | open_tr=None, | |||
|
1571 | ): | |||
1524 | if opts is None: |
|
1572 | if opts is None: | |
1525 | opts = {} |
|
1573 | opts = {} | |
1526 | if not lfutil.islfilesrepo(repo): |
|
1574 | if not lfutil.islfilesrepo(repo): | |
1527 | return orig(repo, matcher, prefix, uipathfn, opts) |
|
1575 | return orig(repo, matcher, prefix, uipathfn, opts, open_tr=open_tr) | |
|
1576 | ||||
|
1577 | # open the transaction and changing_files context | |||
|
1578 | if open_tr is not None: | |||
|
1579 | open_tr() | |||
|
1580 | ||||
1528 | # Get the list of missing largefiles so we can remove them |
|
1581 | # Get the list of missing largefiles so we can remove them | |
1529 | lfdirstate = lfutil.openlfdirstate(repo.ui, repo) |
|
1582 | with repo.dirstate.running_status(repo): | |
1530 | unsure, s, mtime_boundary = lfdirstate.status( |
|
1583 | lfdirstate = lfutil.openlfdirstate(repo.ui, repo) | |
1531 | matchmod.always(), |
|
1584 | unsure, s, mtime_boundary = lfdirstate.status( | |
1532 | subrepos=[], |
|
1585 | matchmod.always(), | |
1533 | ignored=False, |
|
1586 | subrepos=[], | |
1534 |
|
|
1587 | ignored=False, | |
1535 |
|
|
1588 | clean=False, | |
1536 | ) |
|
1589 | unknown=False, | |
|
1590 | ) | |||
1537 |
|
1591 | |||
1538 | # Call into the normal remove code, but the removing of the standin, we want |
|
1592 | # Call into the normal remove code, but the removing of the standin, we want | |
1539 | # to have handled by original addremove. Monkey patching here makes sure |
|
1593 | # to have handled by original addremove. Monkey patching here makes sure | |
@@ -1567,7 +1621,8 b' def scmutiladdremove(orig, repo, matcher' | |||||
1567 | # function to take care of the rest. Make sure it doesn't do anything with |
|
1621 | # function to take care of the rest. Make sure it doesn't do anything with | |
1568 | # largefiles by passing a matcher that will ignore them. |
|
1622 | # largefiles by passing a matcher that will ignore them. | |
1569 | matcher = composenormalfilematcher(matcher, repo[None].manifest(), added) |
|
1623 | matcher = composenormalfilematcher(matcher, repo[None].manifest(), added) | |
1570 | return orig(repo, matcher, prefix, uipathfn, opts) |
|
1624 | ||
|
1625 | return orig(repo, matcher, prefix, uipathfn, opts, open_tr=open_tr) | |||
1571 |
|
1626 | |||
1572 |
|
1627 | |||
1573 | # Calling purge with --all will cause the largefiles to be deleted. |
|
1628 | # Calling purge with --all will cause the largefiles to be deleted. | |
@@ -1737,7 +1792,7 b' def mergeupdate(orig, repo, node, branch' | |||||
1737 | matcher = kwargs.get('matcher', None) |
|
1792 | matcher = kwargs.get('matcher', None) | |
1738 | # note if this is a partial update |
|
1793 | # note if this is a partial update | |
1739 | partial = matcher and not matcher.always() |
|
1794 | partial = matcher and not matcher.always() | |
1740 | with repo.wlock(): |
|
1795 | with repo.wlock(), repo.dirstate.changing_parents(repo): | |
1741 | # branch | | | |
|
1796 | # branch | | | | |
1742 | # merge | force | partial | action |
|
1797 | # merge | force | partial | action | |
1743 | # -------+-------+---------+-------------- |
|
1798 | # -------+-------+---------+-------------- | |
@@ -1752,15 +1807,15 b' def mergeupdate(orig, repo, node, branch' | |||||
1752 | # |
|
1807 | # | |
1753 | # (*) don't care |
|
1808 | # (*) don't care | |
1754 | # (*1) deprecated, but used internally (e.g: "rebase --collapse") |
|
1809 | # (*1) deprecated, but used internally (e.g: "rebase --collapse") | |
1755 |
|
1810 | with repo.dirstate.running_status(repo): | ||
1756 | lfdirstate = lfutil.openlfdirstate(repo.ui, repo) |
|
1811 | lfdirstate = lfutil.openlfdirstate(repo.ui, repo) | |
1757 | unsure, s, mtime_boundary = lfdirstate.status( |
|
1812 | unsure, s, mtime_boundary = lfdirstate.status( | |
1758 | matchmod.always(), |
|
1813 | matchmod.always(), | |
1759 | subrepos=[], |
|
1814 | subrepos=[], | |
1760 | ignored=False, |
|
1815 | ignored=False, | |
1761 | clean=True, |
|
1816 | clean=True, | |
1762 | unknown=False, |
|
1817 | unknown=False, | |
1763 | ) |
|
1818 | ) | |
1764 | oldclean = set(s.clean) |
|
1819 | oldclean = set(s.clean) | |
1765 | pctx = repo[b'.'] |
|
1820 | pctx = repo[b'.'] | |
1766 | dctx = repo[node] |
|
1821 | dctx = repo[node] | |
@@ -1787,7 +1842,14 b' def mergeupdate(orig, repo, node, branch' | |||||
1787 | # mark all clean largefiles as dirty, just in case the update gets |
|
1842 | # mark all clean largefiles as dirty, just in case the update gets | |
1788 | # interrupted before largefiles and lfdirstate are synchronized |
|
1843 | # interrupted before largefiles and lfdirstate are synchronized | |
1789 | for lfile in oldclean: |
|
1844 | for lfile in oldclean: | |
1790 |
lfdirstate. |
|
1845 | entry = lfdirstate.get_entry(lfile) | |
|
1846 | lfdirstate.hacky_extension_update_file( | |||
|
1847 | lfile, | |||
|
1848 | wc_tracked=entry.tracked, | |||
|
1849 | p1_tracked=entry.p1_tracked, | |||
|
1850 | p2_info=entry.p2_info, | |||
|
1851 | possibly_dirty=True, | |||
|
1852 | ) | |||
1791 | lfdirstate.write(repo.currenttransaction()) |
|
1853 | lfdirstate.write(repo.currenttransaction()) | |
1792 |
|
1854 | |||
1793 | oldstandins = lfutil.getstandinsstate(repo) |
|
1855 | oldstandins = lfutil.getstandinsstate(repo) | |
@@ -1798,24 +1860,22 b' def mergeupdate(orig, repo, node, branch' | |||||
1798 | raise error.ProgrammingError( |
|
1860 | raise error.ProgrammingError( | |
1799 | b'largefiles is not compatible with in-memory merge' |
|
1861 | b'largefiles is not compatible with in-memory merge' | |
1800 | ) |
|
1862 | ) | |
1801 | with lfdirstate.parentchange(): |
|
1863 | result = orig(repo, node, branchmerge, force, *args, **kwargs) | |
1802 | result = orig(repo, node, branchmerge, force, *args, **kwargs) |
|
|||
1803 |
|
1864 | |||
1804 |
|
|
1865 | newstandins = lfutil.getstandinsstate(repo) | |
1805 |
|
|
1866 | filelist = lfutil.getlfilestoupdate(oldstandins, newstandins) | |
1806 |
|
1867 | |||
1807 |
|
|
1868 | # to avoid leaving all largefiles as dirty and thus rehash them, mark | |
1808 |
|
|
1869 | # all the ones that didn't change as clean | |
1809 |
|
|
1870 | for lfile in oldclean.difference(filelist): | |
1810 |
|
|
1871 | lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True) | |
1811 | lfdirstate.write(repo.currenttransaction()) |
|
|||
1812 |
|
1872 | |||
1813 |
|
|
1873 | if branchmerge or force or partial: | |
1814 |
|
|
1874 | filelist.extend(s.deleted + s.removed) | |
1815 |
|
1875 | |||
1816 |
|
|
1876 | lfcommands.updatelfiles( | |
1817 |
|
|
1877 | repo.ui, repo, filelist=filelist, normallookup=partial | |
1818 |
|
|
1878 | ) | |
1819 |
|
1879 | |||
1820 | return result |
|
1880 | return result | |
1821 |
|
1881 |
@@ -139,7 +139,7 b' def reposetup(ui, repo):' | |||||
139 | except error.LockError: |
|
139 | except error.LockError: | |
140 | wlock = util.nullcontextmanager() |
|
140 | wlock = util.nullcontextmanager() | |
141 | gotlock = False |
|
141 | gotlock = False | |
142 | with wlock: |
|
142 | with wlock, self.dirstate.running_status(self): | |
143 |
|
143 | |||
144 | # First check if paths or patterns were specified on the |
|
144 | # First check if paths or patterns were specified on the | |
145 | # command line. If there were, and they don't match any |
|
145 | # command line. If there were, and they don't match any | |
@@ -321,6 +321,8 b' def reposetup(ui, repo):' | |||||
321 |
|
321 | |||
322 | if gotlock: |
|
322 | if gotlock: | |
323 | lfdirstate.write(self.currenttransaction()) |
|
323 | lfdirstate.write(self.currenttransaction()) | |
|
324 | else: | |||
|
325 | lfdirstate.invalidate() | |||
324 |
|
326 | |||
325 | self.lfstatus = True |
|
327 | self.lfstatus = True | |
326 | return scmutil.status(*result) |
|
328 | return scmutil.status(*result) |
@@ -36,22 +36,23 b' def openstore(repo=None, remote=None, pu' | |||||
36 | b'lfpullsource', repo, ui, lfpullsource |
|
36 | b'lfpullsource', repo, ui, lfpullsource | |
37 | ) |
|
37 | ) | |
38 | else: |
|
38 | else: | |
39 |
path |
|
39 | path = urlutil.get_unique_pull_path_obj( | |
40 |
b'lfpullsource', |
|
40 | b'lfpullsource', ui, lfpullsource | |
41 | ) |
|
41 | ) | |
42 |
|
42 | |||
43 | # XXX we should not explicitly pass b'default', as this will result in |
|
43 | # XXX we should not explicitly pass b'default', as this will result in | |
44 | # b'default' being returned if no `paths.default` was defined. We |
|
44 | # b'default' being returned if no `paths.default` was defined. We | |
45 | # should explicitely handle the lack of value instead. |
|
45 | # should explicitely handle the lack of value instead. | |
46 | if repo is None: |
|
46 | if repo is None: | |
47 |
path |
|
47 | path = urlutil.get_unique_pull_path_obj( | |
48 |
b'lfs', |
|
48 | b'lfs', | |
|
49 | ui, | |||
|
50 | b'default', | |||
49 | ) |
|
51 | ) | |
50 | remote = hg.peer(repo or ui, {}, path) |
|
52 | remote = hg.peer(repo or ui, {}, path) | |
51 | elif path == b'default-push' or path == b'default': |
|
53 | elif path.loc == b'default-push' or path.loc == b'default': | |
52 | remote = repo |
|
54 | remote = repo | |
53 | else: |
|
55 | else: | |
54 | path, _branches = urlutil.parseurl(path) |
|
|||
55 | remote = hg.peer(repo or ui, {}, path) |
|
56 | remote = hg.peer(repo or ui, {}, path) | |
56 |
|
57 | |||
57 | # The path could be a scheme so use Mercurial's normal functionality |
|
58 | # The path could be a scheme so use Mercurial's normal functionality |
@@ -168,12 +168,16 b' class local:' | |||||
168 | # producing the response (but the server has no way of telling us |
|
168 | # producing the response (but the server has no way of telling us | |
169 | # that), and we really don't need to try to write the response to |
|
169 | # that), and we really don't need to try to write the response to | |
170 | # the localstore, because it's not going to match the expected. |
|
170 | # the localstore, because it's not going to match the expected. | |
|
171 | # The server also uses this method to store data uploaded by the | |||
|
172 | # client, so if this happens on the server side, it's possible | |||
|
173 | # that the client crashed or an antivirus interfered with the | |||
|
174 | # upload. | |||
171 | if content_length is not None and int(content_length) != size: |
|
175 | if content_length is not None and int(content_length) != size: | |
172 | msg = ( |
|
176 | msg = ( | |
173 | b"Response length (%d) does not match Content-Length " |
|
177 | b"Response length (%d) does not match Content-Length " | |
174 |
b"header (%d) |
|
178 | b"header (%d) for %s" | |
175 | ) |
|
179 | ) | |
176 | raise LfsRemoteError(_(msg) % (size, int(content_length))) |
|
180 | raise LfsRemoteError(_(msg) % (size, int(content_length), oid)) | |
177 |
|
181 | |||
178 | realoid = hex(sha256.digest()) |
|
182 | realoid = hex(sha256.digest()) | |
179 | if realoid != oid: |
|
183 | if realoid != oid: |
@@ -82,7 +82,6 b' from mercurial.pycompat import (' | |||||
82 | from mercurial import ( |
|
82 | from mercurial import ( | |
83 | cmdutil, |
|
83 | cmdutil, | |
84 | commands, |
|
84 | commands, | |
85 | dirstateguard, |
|
|||
86 | encoding, |
|
85 | encoding, | |
87 | error, |
|
86 | error, | |
88 | extensions, |
|
87 | extensions, | |
@@ -791,7 +790,10 b' class queue:' | |||||
791 | if self.added: |
|
790 | if self.added: | |
792 | qrepo = self.qrepo() |
|
791 | qrepo = self.qrepo() | |
793 | if qrepo: |
|
792 | if qrepo: | |
794 | qrepo[None].add(f for f in self.added if f not in qrepo[None]) |
|
793 | with qrepo.wlock(), qrepo.dirstate.changing_files(qrepo): | |
|
794 | qrepo[None].add( | |||
|
795 | f for f in self.added if f not in qrepo[None] | |||
|
796 | ) | |||
795 | self.added = [] |
|
797 | self.added = [] | |
796 |
|
798 | |||
797 | def removeundo(self, repo): |
|
799 | def removeundo(self, repo): | |
@@ -1082,7 +1084,7 b' class queue:' | |||||
1082 |
|
1084 | |||
1083 | if merge and files: |
|
1085 | if merge and files: | |
1084 | # Mark as removed/merged and update dirstate parent info |
|
1086 | # Mark as removed/merged and update dirstate parent info | |
1085 |
with repo.dirstate. |
|
1087 | with repo.dirstate.changing_parents(repo): | |
1086 | for f in files: |
|
1088 | for f in files: | |
1087 | repo.dirstate.update_file_p1(f, p1_tracked=True) |
|
1089 | repo.dirstate.update_file_p1(f, p1_tracked=True) | |
1088 | p1 = repo.dirstate.p1() |
|
1090 | p1 = repo.dirstate.p1() | |
@@ -1129,7 +1131,8 b' class queue:' | |||||
1129 | if not keep: |
|
1131 | if not keep: | |
1130 | r = self.qrepo() |
|
1132 | r = self.qrepo() | |
1131 | if r: |
|
1133 | if r: | |
1132 | r[None].forget(patches) |
|
1134 | with r.wlock(), r.dirstate.changing_files(r): | |
|
1135 | r[None].forget(patches) | |||
1133 | for p in patches: |
|
1136 | for p in patches: | |
1134 | try: |
|
1137 | try: | |
1135 | os.unlink(self.join(p)) |
|
1138 | os.unlink(self.join(p)) | |
@@ -1153,7 +1156,7 b' class queue:' | |||||
1153 | sortedseries.append((idx, p)) |
|
1156 | sortedseries.append((idx, p)) | |
1154 |
|
1157 | |||
1155 | sortedseries.sort(reverse=True) |
|
1158 | sortedseries.sort(reverse=True) | |
1156 |
for |
|
1159 | for i, p in sortedseries: | |
1157 | if i != -1: |
|
1160 | if i != -1: | |
1158 | del self.fullseries[i] |
|
1161 | del self.fullseries[i] | |
1159 | else: |
|
1162 | else: | |
@@ -1177,7 +1180,6 b' class queue:' | |||||
1177 | firstrev = repo[self.applied[0].node].rev() |
|
1180 | firstrev = repo[self.applied[0].node].rev() | |
1178 | patches = [] |
|
1181 | patches = [] | |
1179 | for i, rev in enumerate(revs): |
|
1182 | for i, rev in enumerate(revs): | |
1180 |
|
||||
1181 | if rev < firstrev: |
|
1183 | if rev < firstrev: | |
1182 | raise error.Abort(_(b'revision %d is not managed') % rev) |
|
1184 | raise error.Abort(_(b'revision %d is not managed') % rev) | |
1183 |
|
1185 | |||
@@ -1465,7 +1467,8 b' class queue:' | |||||
1465 | p.close() |
|
1467 | p.close() | |
1466 | r = self.qrepo() |
|
1468 | r = self.qrepo() | |
1467 | if r: |
|
1469 | if r: | |
1468 | r[None].add([patchfn]) |
|
1470 | with r.wlock(), r.dirstate.changing_files(r): | |
|
1471 | r[None].add([patchfn]) | |||
1469 | except: # re-raises |
|
1472 | except: # re-raises | |
1470 | repo.rollback() |
|
1473 | repo.rollback() | |
1471 | raise |
|
1474 | raise | |
@@ -1830,7 +1833,7 b' class queue:' | |||||
1830 | if keepchanges and tobackup: |
|
1833 | if keepchanges and tobackup: | |
1831 | raise error.Abort(_(b"local changes found, qrefresh first")) |
|
1834 | raise error.Abort(_(b"local changes found, qrefresh first")) | |
1832 | self.backup(repo, tobackup) |
|
1835 | self.backup(repo, tobackup) | |
1833 |
with repo.dirstate. |
|
1836 | with repo.dirstate.changing_parents(repo): | |
1834 | for f in a: |
|
1837 | for f in a: | |
1835 | repo.wvfs.unlinkpath(f, ignoremissing=True) |
|
1838 | repo.wvfs.unlinkpath(f, ignoremissing=True) | |
1836 | repo.dirstate.update_file( |
|
1839 | repo.dirstate.update_file( | |
@@ -1988,73 +1991,67 b' class queue:' | |||||
1988 |
|
1991 | |||
1989 | bmlist = repo[top].bookmarks() |
|
1992 | bmlist = repo[top].bookmarks() | |
1990 |
|
1993 | |||
1991 |
with repo.dirstate. |
|
1994 | with repo.dirstate.changing_parents(repo): | |
1992 | # XXX do we actually need the dirstateguard |
|
1995 | if diffopts.git or diffopts.upgrade: | |
1993 |
|
|
1996 | copies = {} | |
1994 |
|
|
1997 | for dst in a: | |
1995 |
|
|
1998 | src = repo.dirstate.copied(dst) | |
1996 | if diffopts.git or diffopts.upgrade: |
|
1999 | # during qfold, the source file for copies may | |
1997 | copies = {} |
|
2000 | # be removed. Treat this as a simple add. | |
1998 |
f |
|
2001 | if src is not None and src in repo.dirstate: | |
1999 |
src |
|
2002 | copies.setdefault(src, []).append(dst) | |
2000 | # during qfold, the source file for copies may |
|
2003 | repo.dirstate.update_file( | |
2001 | # be removed. Treat this as a simple add. |
|
2004 | dst, p1_tracked=False, wc_tracked=True | |
2002 | if src is not None and src in repo.dirstate: |
|
2005 | ) | |
2003 | copies.setdefault(src, []).append(dst) |
|
2006 | # remember the copies between patchparent and qtip | |
2004 | repo.dirstate.update_file( |
|
2007 | for dst in aaa: | |
2005 | dst, p1_tracked=False, wc_tracked=True |
|
2008 | src = ctx[dst].copysource() | |
|
2009 | if src: | |||
|
2010 | copies.setdefault(src, []).extend( | |||
|
2011 | copies.get(dst, []) | |||
2006 | ) |
|
2012 | ) | |
2007 | # remember the copies between patchparent and qtip |
|
2013 | if dst in a: | |
2008 |
|
|
2014 | copies[src].append(dst) | |
2009 | src = ctx[dst].copysource() |
|
2015 | # we can't copy a file created by the patch itself | |
2010 |
|
|
2016 | if dst in copies: | |
2011 |
|
|
2017 | del copies[dst] | |
2012 |
|
|
2018 | for src, dsts in copies.items(): | |
2013 |
|
|
2019 | for dst in dsts: | |
2014 |
|
|
2020 | repo.dirstate.copy(src, dst) | |
2015 | copies[src].append(dst) |
|
2021 | else: | |
2016 | # we can't copy a file created by the patch itself |
|
2022 | for dst in a: | |
2017 |
|
|
2023 | repo.dirstate.update_file( | |
2018 | del copies[dst] |
|
2024 | dst, p1_tracked=False, wc_tracked=True | |
2019 |
|
|
2025 | ) | |
2020 |
|
|
2026 | # Drop useless copy information | |
2021 |
|
|
2027 | for f in list(repo.dirstate.copies()): | |
2022 |
|
|
2028 | repo.dirstate.copy(None, f) | |
2023 |
|
|
2029 | for f in r: | |
2024 |
|
|
2030 | repo.dirstate.update_file_p1(f, p1_tracked=True) | |
2025 | dst, p1_tracked=False, wc_tracked=True |
|
2031 | # if the patch excludes a modified file, mark that | |
2026 | ) |
|
2032 | # file with mtime=0 so status can see it. | |
2027 | # Drop useless copy information |
|
2033 | mm = [] | |
2028 | for f in list(repo.dirstate.copies()): |
|
2034 | for i in range(len(m) - 1, -1, -1): | |
2029 | repo.dirstate.copy(None, f) |
|
2035 | if not match1(m[i]): | |
2030 |
|
|
2036 | mm.append(m[i]) | |
2031 | repo.dirstate.update_file_p1(f, p1_tracked=True) |
|
2037 | del m[i] | |
2032 | # if the patch excludes a modified file, mark that |
|
2038 | for f in m: | |
2033 | # file with mtime=0 so status can see it. |
|
2039 | repo.dirstate.update_file_p1(f, p1_tracked=True) | |
2034 |
|
|
2040 | for f in mm: | |
2035 | for i in range(len(m) - 1, -1, -1): |
|
2041 | repo.dirstate.update_file_p1(f, p1_tracked=True) | |
2036 |
|
|
2042 | for f in forget: | |
2037 | mm.append(m[i]) |
|
2043 | repo.dirstate.update_file_p1(f, p1_tracked=False) | |
2038 | del m[i] |
|
2044 | ||
2039 | for f in m: |
|
2045 | user = ph.user or ctx.user() | |
2040 | repo.dirstate.update_file_p1(f, p1_tracked=True) |
|
2046 | ||
2041 | for f in mm: |
|
2047 | oldphase = repo[top].phase() | |
2042 | repo.dirstate.update_file_p1(f, p1_tracked=True) |
|
2048 | ||
2043 | for f in forget: |
|
2049 | # assumes strip can roll itself back if interrupted | |
2044 | repo.dirstate.update_file_p1(f, p1_tracked=False) |
|
2050 | repo.setparents(*cparents) | |
2045 |
|
2051 | repo.dirstate.write(repo.currenttransaction()) | ||
2046 | user = ph.user or ctx.user() |
|
2052 | self.applied.pop() | |
2047 |
|
2053 | self.applieddirty = True | ||
2048 | oldphase = repo[top].phase() |
|
2054 | strip(self.ui, repo, [top], update=False, backup=False) | |
2049 |
|
||||
2050 | # assumes strip can roll itself back if interrupted |
|
|||
2051 | repo.setparents(*cparents) |
|
|||
2052 | self.applied.pop() |
|
|||
2053 | self.applieddirty = True |
|
|||
2054 | strip(self.ui, repo, [top], update=False, backup=False) |
|
|||
2055 | dsguard.close() |
|
|||
2056 | finally: |
|
|||
2057 | release(dsguard) |
|
|||
2058 |
|
2055 | |||
2059 | try: |
|
2056 | try: | |
2060 | # might be nice to attempt to roll back strip after this |
|
2057 | # might be nice to attempt to roll back strip after this | |
@@ -2124,8 +2121,9 b' class queue:' | |||||
2124 | finally: |
|
2121 | finally: | |
2125 | lockmod.release(tr, lock) |
|
2122 | lockmod.release(tr, lock) | |
2126 | except: # re-raises |
|
2123 | except: # re-raises | |
2127 |
|
|
2124 | with repo.dirstate.changing_parents(repo): | |
2128 | repo.dirstate.rebuild(ctx.node(), ctx.manifest()) |
|
2125 | ctx = repo[cparents[0]] | |
|
2126 | repo.dirstate.rebuild(ctx.node(), ctx.manifest()) | |||
2129 | self.savedirty() |
|
2127 | self.savedirty() | |
2130 | self.ui.warn( |
|
2128 | self.ui.warn( | |
2131 | _( |
|
2129 | _( | |
@@ -2760,18 +2758,19 b' def qinit(ui, repo, create):' | |||||
2760 | r = q.init(repo, create) |
|
2758 | r = q.init(repo, create) | |
2761 | q.savedirty() |
|
2759 | q.savedirty() | |
2762 | if r: |
|
2760 | if r: | |
2763 | if not os.path.exists(r.wjoin(b'.hgignore')): |
|
2761 | with r.wlock(), r.dirstate.changing_files(r): | |
2764 |
f |
|
2762 | if not os.path.exists(r.wjoin(b'.hgignore')): | |
2765 |
fp.w |
|
2763 | fp = r.wvfs(b'.hgignore', b'w') | |
2766 |
fp.write(b'^\\. |
|
2764 | fp.write(b'^\\.hg\n') | |
2767 |
fp.write(b' |
|
2765 | fp.write(b'^\\.mq\n') | |
2768 |
fp.write(b's |
|
2766 | fp.write(b'syntax: glob\n') | |
2769 |
fp.write(b' |
|
2767 | fp.write(b'status\n') | |
2770 |
fp. |
|
2768 | fp.write(b'guards\n') | |
2771 | if not os.path.exists(r.wjoin(b'series')): |
|
2769 | fp.close() | |
2772 | r.wvfs(b'series', b'w').close() |
|
2770 | if not os.path.exists(r.wjoin(b'series')): | |
2773 | r[None].add([b'.hgignore', b'series']) |
|
2771 | r.wvfs(b'series', b'w').close() | |
2774 | commands.add(ui, r) |
|
2772 | r[None].add([b'.hgignore', b'series']) | |
|
2773 | commands.add(ui, r) | |||
2775 | return 0 |
|
2774 | return 0 | |
2776 |
|
2775 | |||
2777 |
|
2776 | |||
@@ -2854,16 +2853,17 b' def clone(ui, source, dest=None, **opts)' | |||||
2854 | # main repo (destination and sources) |
|
2853 | # main repo (destination and sources) | |
2855 | if dest is None: |
|
2854 | if dest is None: | |
2856 | dest = hg.defaultdest(source) |
|
2855 | dest = hg.defaultdest(source) | |
2857 |
|
|
2856 | source_path = urlutil.get_clone_path_obj(ui, source) | |
2858 | sr = hg.peer(ui, opts, source_path) |
|
2857 | sr = hg.peer(ui, opts, source_path) | |
2859 |
|
2858 | |||
2860 | # patches repo (source only) |
|
2859 | # patches repo (source only) | |
2861 | if opts.get(b'patches'): |
|
2860 | if opts.get(b'patches'): | |
2862 |
|
|
2861 | patches_path = urlutil.get_clone_path_obj(ui, opts.get(b'patches')) | |
2863 | else: |
|
2862 | else: | |
2864 | patchespath = patchdir(sr) |
|
2863 | # XXX path: we should turn this into a path object | |
|
2864 | patches_path = patchdir(sr) | |||
2865 | try: |
|
2865 | try: | |
2866 | hg.peer(ui, opts, patchespath) |
|
2866 | hg.peer(ui, opts, patches_path) | |
2867 | except error.RepoError: |
|
2867 | except error.RepoError: | |
2868 | raise error.Abort( |
|
2868 | raise error.Abort( | |
2869 | _(b'versioned patch repository not found (see init --mq)') |
|
2869 | _(b'versioned patch repository not found (see init --mq)') | |
@@ -3223,45 +3223,46 b' def fold(ui, repo, *files, **opts):' | |||||
3223 | raise error.Abort(_(b'qfold requires at least one patch name')) |
|
3223 | raise error.Abort(_(b'qfold requires at least one patch name')) | |
3224 | if not q.checktoppatch(repo)[0]: |
|
3224 | if not q.checktoppatch(repo)[0]: | |
3225 | raise error.Abort(_(b'no patches applied')) |
|
3225 | raise error.Abort(_(b'no patches applied')) | |
3226 | q.checklocalchanges(repo) |
|
3226 | ||
3227 |
|
3227 | with repo.wlock(): | ||
3228 | message = cmdutil.logmessage(ui, opts) |
|
3228 | q.checklocalchanges(repo) | |
3229 |
|
3229 | |||
3230 | parent = q.lookup(b'qtip') |
|
3230 | message = cmdutil.logmessage(ui, opts) | |
3231 | patches = [] |
|
3231 | ||
3232 | messages = [] |
|
3232 | parent = q.lookup(b'qtip') | |
3233 | for f in files: |
|
3233 | patches = [] | |
3234 | p = q.lookup(f) |
|
3234 | messages = [] | |
3235 | if p in patches or p == parent: |
|
3235 | for f in files: | |
3236 | ui.warn(_(b'skipping already folded patch %s\n') % p) |
|
3236 | p = q.lookup(f) | |
3237 | if q.isapplied(p): |
|
3237 | if p in patches or p == parent: | |
3238 | raise error.Abort( |
|
3238 | ui.warn(_(b'skipping already folded patch %s\n') % p) | |
3239 | _(b'qfold cannot fold already applied patch %s') % p |
|
3239 | if q.isapplied(p): | |
3240 | ) |
|
3240 | raise error.Abort( | |
3241 | patches.append(p) |
|
3241 | _(b'qfold cannot fold already applied patch %s') % p | |
3242 |
|
3242 | ) | ||
3243 | for p in patches: |
|
3243 | patches.append(p) | |
|
3244 | ||||
|
3245 | for p in patches: | |||
|
3246 | if not message: | |||
|
3247 | ph = patchheader(q.join(p), q.plainmode) | |||
|
3248 | if ph.message: | |||
|
3249 | messages.append(ph.message) | |||
|
3250 | pf = q.join(p) | |||
|
3251 | (patchsuccess, files, fuzz) = q.patch(repo, pf) | |||
|
3252 | if not patchsuccess: | |||
|
3253 | raise error.Abort(_(b'error folding patch %s') % p) | |||
|
3254 | ||||
3244 | if not message: |
|
3255 | if not message: | |
3245 | ph = patchheader(q.join(p), q.plainmode) |
|
3256 | ph = patchheader(q.join(parent), q.plainmode) | |
3246 |
|
|
3257 | message = ph.message | |
3247 |
|
|
3258 | for msg in messages: | |
3248 | pf = q.join(p) |
|
3259 | if msg: | |
3249 | (patchsuccess, files, fuzz) = q.patch(repo, pf) |
|
3260 | if message: | |
3250 | if not patchsuccess: |
|
3261 | message.append(b'* * *') | |
3251 | raise error.Abort(_(b'error folding patch %s') % p) |
|
3262 | message.extend(msg) | |
3252 |
|
3263 | message = b'\n'.join(message) | ||
3253 | if not message: |
|
3264 | ||
3254 | ph = patchheader(q.join(parent), q.plainmode) |
|
3265 | diffopts = q.patchopts(q.diffopts(), *patches) | |
3255 | message = ph.message |
|
|||
3256 | for msg in messages: |
|
|||
3257 | if msg: |
|
|||
3258 | if message: |
|
|||
3259 | message.append(b'* * *') |
|
|||
3260 | message.extend(msg) |
|
|||
3261 | message = b'\n'.join(message) |
|
|||
3262 |
|
||||
3263 | diffopts = q.patchopts(q.diffopts(), *patches) |
|
|||
3264 | with repo.wlock(): |
|
|||
3265 | q.refresh( |
|
3266 | q.refresh( | |
3266 | repo, |
|
3267 | repo, | |
3267 | msg=message, |
|
3268 | msg=message, | |
@@ -3627,8 +3628,8 b' def rename(ui, repo, patch, name=None, *' | |||||
3627 | util.rename(q.join(patch), absdest) |
|
3628 | util.rename(q.join(patch), absdest) | |
3628 | r = q.qrepo() |
|
3629 | r = q.qrepo() | |
3629 | if r and patch in r.dirstate: |
|
3630 | if r and patch in r.dirstate: | |
3630 | wctx = r[None] |
|
3631 | with r.wlock(), r.dirstate.changing_files(r): | |
3631 | with r.wlock(): |
|
3632 | wctx = r[None] | |
3632 | if r.dirstate.get_entry(patch).added: |
|
3633 | if r.dirstate.get_entry(patch).added: | |
3633 | r.dirstate.set_untracked(patch) |
|
3634 | r.dirstate.set_untracked(patch) | |
3634 | r.dirstate.set_tracked(name) |
|
3635 | r.dirstate.set_tracked(name) |
@@ -320,7 +320,7 b' def _narrow(' | |||||
320 | repo.store.markremoved(f) |
|
320 | repo.store.markremoved(f) | |
321 |
|
321 | |||
322 | ui.status(_(b'deleting unwanted files from working copy\n')) |
|
322 | ui.status(_(b'deleting unwanted files from working copy\n')) | |
323 |
with repo.dirstate. |
|
323 | with repo.dirstate.changing_parents(repo): | |
324 | narrowspec.updateworkingcopy(repo, assumeclean=True) |
|
324 | narrowspec.updateworkingcopy(repo, assumeclean=True) | |
325 | narrowspec.copytoworkingcopy(repo) |
|
325 | narrowspec.copytoworkingcopy(repo) | |
326 |
|
326 | |||
@@ -380,7 +380,7 b' def _widen(' | |||||
380 | if ellipsesremote: |
|
380 | if ellipsesremote: | |
381 | ds = repo.dirstate |
|
381 | ds = repo.dirstate | |
382 | p1, p2 = ds.p1(), ds.p2() |
|
382 | p1, p2 = ds.p1(), ds.p2() | |
383 |
with ds. |
|
383 | with ds.changing_parents(repo): | |
384 | ds.setparents(repo.nullid, repo.nullid) |
|
384 | ds.setparents(repo.nullid, repo.nullid) | |
385 | if isoldellipses: |
|
385 | if isoldellipses: | |
386 | with wrappedextraprepare: |
|
386 | with wrappedextraprepare: | |
@@ -416,13 +416,15 b' def _widen(' | |||||
416 | repo, trmanager.transaction, source=b'widen' |
|
416 | repo, trmanager.transaction, source=b'widen' | |
417 | ) |
|
417 | ) | |
418 | # TODO: we should catch error.Abort here |
|
418 | # TODO: we should catch error.Abort here | |
419 | bundle2.processbundle(repo, bundle, op=op) |
|
419 | bundle2.processbundle(repo, bundle, op=op, remote=remote) | |
420 |
|
420 | |||
421 | if ellipsesremote: |
|
421 | if ellipsesremote: | |
422 |
with ds. |
|
422 | with ds.changing_parents(repo): | |
423 | ds.setparents(p1, p2) |
|
423 | ds.setparents(p1, p2) | |
424 |
|
424 | |||
425 |
with repo.transaction(b'widening'), repo.dirstate. |
|
425 | with repo.transaction(b'widening'), repo.dirstate.changing_parents( | |
|
426 | repo | |||
|
427 | ): | |||
426 | repo.setnewnarrowpats() |
|
428 | repo.setnewnarrowpats() | |
427 | narrowspec.updateworkingcopy(repo) |
|
429 | narrowspec.updateworkingcopy(repo) | |
428 | narrowspec.copytoworkingcopy(repo) |
|
430 | narrowspec.copytoworkingcopy(repo) | |
@@ -591,7 +593,7 b' def trackedcmd(ui, repo, remotepath=None' | |||||
591 | if update_working_copy: |
|
593 | if update_working_copy: | |
592 | with repo.wlock(), repo.lock(), repo.transaction( |
|
594 | with repo.wlock(), repo.lock(), repo.transaction( | |
593 | b'narrow-wc' |
|
595 | b'narrow-wc' | |
594 |
), repo.dirstate. |
|
596 | ), repo.dirstate.changing_parents(repo): | |
595 | narrowspec.updateworkingcopy(repo) |
|
597 | narrowspec.updateworkingcopy(repo) | |
596 | narrowspec.copytoworkingcopy(repo) |
|
598 | narrowspec.copytoworkingcopy(repo) | |
597 | return 0 |
|
599 | return 0 | |
@@ -606,10 +608,9 b' def trackedcmd(ui, repo, remotepath=None' | |||||
606 | # Find the revisions we have in common with the remote. These will |
|
608 | # Find the revisions we have in common with the remote. These will | |
607 | # be used for finding local-only changes for narrowing. They will |
|
609 | # be used for finding local-only changes for narrowing. They will | |
608 | # also define the set of revisions to update for widening. |
|
610 | # also define the set of revisions to update for widening. | |
609 |
|
|
611 | path = urlutil.get_unique_pull_path_obj(b'tracked', ui, remotepath) | |
610 | url, branches = r |
|
612 | ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(path.loc)) | |
611 | ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(url)) |
|
613 | remote = hg.peer(repo, opts, path) | |
612 | remote = hg.peer(repo, opts, url) |
|
|||
613 |
|
614 | |||
614 | try: |
|
615 | try: | |
615 | # check narrow support before doing anything if widening needs to be |
|
616 | # check narrow support before doing anything if widening needs to be |
@@ -19,8 +19,8 b' def wraprepo(repo):' | |||||
19 | dirstate = super(narrowrepository, self)._makedirstate() |
|
19 | dirstate = super(narrowrepository, self)._makedirstate() | |
20 | return narrowdirstate.wrapdirstate(self, dirstate) |
|
20 | return narrowdirstate.wrapdirstate(self, dirstate) | |
21 |
|
21 | |||
22 | def peer(self): |
|
22 | def peer(self, path=None): | |
23 | peer = super(narrowrepository, self).peer() |
|
23 | peer = super(narrowrepository, self).peer(path=path) | |
24 | peer._caps.add(wireprototypes.NARROWCAP) |
|
24 | peer._caps.add(wireprototypes.NARROWCAP) | |
25 | peer._caps.add(wireprototypes.ELLIPSESCAP) |
|
25 | peer._caps.add(wireprototypes.ELLIPSESCAP) | |
26 | return peer |
|
26 | return peer |
@@ -450,7 +450,7 b' class notifier:' | |||||
450 | try: |
|
450 | try: | |
451 | msg = mail.parsebytes(data) |
|
451 | msg = mail.parsebytes(data) | |
452 | except emailerrors.MessageParseError as inst: |
|
452 | except emailerrors.MessageParseError as inst: | |
453 | raise error.Abort(inst) |
|
453 | raise error.Abort(stringutil.forcebytestr(inst)) | |
454 |
|
454 | |||
455 | # store sender and subject |
|
455 | # store sender and subject | |
456 | sender = msg['From'] |
|
456 | sender = msg['From'] |
@@ -286,9 +286,12 b' def vcrcommand(name, flags, spec, helpca' | |||||
286 | import hgdemandimport |
|
286 | import hgdemandimport | |
287 |
|
287 | |||
288 | with hgdemandimport.deactivated(): |
|
288 | with hgdemandimport.deactivated(): | |
|
289 | # pytype: disable=import-error | |||
289 | import vcr as vcrmod |
|
290 | import vcr as vcrmod | |
290 | import vcr.stubs as stubs |
|
291 | import vcr.stubs as stubs | |
291 |
|
292 | |||
|
293 | # pytype: enable=import-error | |||
|
294 | ||||
292 | vcr = vcrmod.VCR( |
|
295 | vcr = vcrmod.VCR( | |
293 | serializer='json', |
|
296 | serializer='json', | |
294 | before_record_request=sanitiserequest, |
|
297 | before_record_request=sanitiserequest, | |
@@ -350,11 +353,14 b' def urlencodenested(params):' | |||||
350 | """ |
|
353 | """ | |
351 | flatparams = util.sortdict() |
|
354 | flatparams = util.sortdict() | |
352 |
|
355 | |||
353 | def process(prefix, obj): |
|
356 | def process(prefix: bytes, obj): | |
354 | if isinstance(obj, bool): |
|
357 | if isinstance(obj, bool): | |
355 | obj = {True: b'true', False: b'false'}[obj] # Python -> PHP form |
|
358 | obj = {True: b'true', False: b'false'}[obj] # Python -> PHP form | |
356 | lister = lambda l: [(b'%d' % k, v) for k, v in enumerate(l)] |
|
359 | lister = lambda l: [(b'%d' % k, v) for k, v in enumerate(l)] | |
|
360 | # .items() will only be called for a dict type | |||
|
361 | # pytype: disable=attribute-error | |||
357 | items = {list: lister, dict: lambda x: x.items()}.get(type(obj)) |
|
362 | items = {list: lister, dict: lambda x: x.items()}.get(type(obj)) | |
|
363 | # pytype: enable=attribute-error | |||
358 | if items is None: |
|
364 | if items is None: | |
359 | flatparams[prefix] = obj |
|
365 | flatparams[prefix] = obj | |
360 | else: |
|
366 | else: |
@@ -30,7 +30,6 b' from mercurial import (' | |||||
30 | commands, |
|
30 | commands, | |
31 | copies, |
|
31 | copies, | |
32 | destutil, |
|
32 | destutil, | |
33 | dirstateguard, |
|
|||
34 | error, |
|
33 | error, | |
35 | extensions, |
|
34 | extensions, | |
36 | logcmdutil, |
|
35 | logcmdutil, | |
@@ -1271,15 +1270,9 b' def _origrebase(ui, repo, action, opts, ' | |||||
1271 | # one transaction here. Otherwise, transactions are obtained when |
|
1270 | # one transaction here. Otherwise, transactions are obtained when | |
1272 | # committing each node, which is slower but allows partial success. |
|
1271 | # committing each node, which is slower but allows partial success. | |
1273 | with util.acceptintervention(tr): |
|
1272 | with util.acceptintervention(tr): | |
1274 | # Same logic for the dirstate guard, except we don't create one when |
|
1273 | rbsrt._performrebase(tr) | |
1275 | # rebasing in-memory (it's not needed). |
|
1274 | if not rbsrt.dryrun: | |
1276 | dsguard = None |
|
1275 | rbsrt._finishrebase() | |
1277 | if singletr and not rbsrt.inmemory: |
|
|||
1278 | dsguard = dirstateguard.dirstateguard(repo, b'rebase') |
|
|||
1279 | with util.acceptintervention(dsguard): |
|
|||
1280 | rbsrt._performrebase(tr) |
|
|||
1281 | if not rbsrt.dryrun: |
|
|||
1282 | rbsrt._finishrebase() |
|
|||
1283 |
|
1276 | |||
1284 |
|
1277 | |||
1285 | def _definedestmap(ui, repo, inmemory, destf, srcf, basef, revf, destspace): |
|
1278 | def _definedestmap(ui, repo, inmemory, destf, srcf, basef, revf, destspace): | |
@@ -1500,10 +1493,10 b' def commitmemorynode(repo, wctx, editor,' | |||||
1500 | def commitnode(repo, editor, extra, user, date, commitmsg): |
|
1493 | def commitnode(repo, editor, extra, user, date, commitmsg): | |
1501 | """Commit the wd changes with parents p1 and p2. |
|
1494 | """Commit the wd changes with parents p1 and p2. | |
1502 | Return node of committed revision.""" |
|
1495 | Return node of committed revision.""" | |
1503 |
|
|
1496 | tr = util.nullcontextmanager | |
1504 | if not repo.ui.configbool(b'rebase', b'singletransaction'): |
|
1497 | if not repo.ui.configbool(b'rebase', b'singletransaction'): | |
1505 | dsguard = dirstateguard.dirstateguard(repo, b'rebase') |
|
1498 | tr = lambda: repo.transaction(b'rebase') | |
1506 |
with |
|
1499 | with tr(): | |
1507 | # Commit might fail if unresolved files exist |
|
1500 | # Commit might fail if unresolved files exist | |
1508 | newnode = repo.commit( |
|
1501 | newnode = repo.commit( | |
1509 | text=commitmsg, user=user, date=date, extra=extra, editor=editor |
|
1502 | text=commitmsg, user=user, date=date, extra=extra, editor=editor | |
@@ -1520,12 +1513,14 b' def rebasenode(repo, rev, p1, p2, base, ' | |||||
1520 | p1ctx = repo[p1] |
|
1513 | p1ctx = repo[p1] | |
1521 | if wctx.isinmemory(): |
|
1514 | if wctx.isinmemory(): | |
1522 | wctx.setbase(p1ctx) |
|
1515 | wctx.setbase(p1ctx) | |
|
1516 | scope = util.nullcontextmanager | |||
1523 | else: |
|
1517 | else: | |
1524 | if repo[b'.'].rev() != p1: |
|
1518 | if repo[b'.'].rev() != p1: | |
1525 | repo.ui.debug(b" update to %d:%s\n" % (p1, p1ctx)) |
|
1519 | repo.ui.debug(b" update to %d:%s\n" % (p1, p1ctx)) | |
1526 | mergemod.clean_update(p1ctx) |
|
1520 | mergemod.clean_update(p1ctx) | |
1527 | else: |
|
1521 | else: | |
1528 | repo.ui.debug(b" already in destination\n") |
|
1522 | repo.ui.debug(b" already in destination\n") | |
|
1523 | scope = lambda: repo.dirstate.changing_parents(repo) | |||
1529 | # This is, alas, necessary to invalidate workingctx's manifest cache, |
|
1524 | # This is, alas, necessary to invalidate workingctx's manifest cache, | |
1530 | # as well as other data we litter on it in other places. |
|
1525 | # as well as other data we litter on it in other places. | |
1531 | wctx = repo[None] |
|
1526 | wctx = repo[None] | |
@@ -1535,26 +1530,27 b' def rebasenode(repo, rev, p1, p2, base, ' | |||||
1535 | if base is not None: |
|
1530 | if base is not None: | |
1536 | repo.ui.debug(b" detach base %d:%s\n" % (base, repo[base])) |
|
1531 | repo.ui.debug(b" detach base %d:%s\n" % (base, repo[base])) | |
1537 |
|
1532 | |||
1538 | # See explanation in merge.graft() |
|
1533 | with scope(): | |
1539 | mergeancestor = repo.changelog.isancestor(p1ctx.node(), ctx.node()) |
|
1534 | # See explanation in merge.graft() | |
1540 | stats = mergemod._update( |
|
1535 | mergeancestor = repo.changelog.isancestor(p1ctx.node(), ctx.node()) | |
1541 | repo, |
|
1536 | stats = mergemod._update( | |
1542 |
|
|
1537 | repo, | |
1543 | branchmerge=True, |
|
1538 | rev, | |
1544 |
|
|
1539 | branchmerge=True, | |
1545 | ancestor=base, |
|
1540 | force=True, | |
1546 |
|
|
1541 | ancestor=base, | |
1547 | labels=[b'dest', b'source', b'parent of source'], |
|
1542 | mergeancestor=mergeancestor, | |
1548 | wc=wctx, |
|
1543 | labels=[b'dest', b'source', b'parent of source'], | |
1549 | ) |
|
1544 | wc=wctx, | |
1550 | wctx.setparents(p1ctx.node(), repo[p2].node()) |
|
1545 | ) | |
1551 | if collapse: |
|
1546 | wctx.setparents(p1ctx.node(), repo[p2].node()) | |
1552 | copies.graftcopies(wctx, ctx, p1ctx) |
|
1547 | if collapse: | |
1553 | else: |
|
1548 | copies.graftcopies(wctx, ctx, p1ctx) | |
1554 | # If we're not using --collapse, we need to |
|
1549 | else: | |
1555 | # duplicate copies between the revision we're |
|
1550 | # If we're not using --collapse, we need to | |
1556 | # rebasing and its first parent. |
|
1551 | # duplicate copies between the revision we're | |
1557 | copies.graftcopies(wctx, ctx, ctx.p1()) |
|
1552 | # rebasing and its first parent. | |
|
1553 | copies.graftcopies(wctx, ctx, ctx.p1()) | |||
1558 |
|
1554 | |||
1559 | if stats.unresolvedcount > 0: |
|
1555 | if stats.unresolvedcount > 0: | |
1560 | if wctx.isinmemory(): |
|
1556 | if wctx.isinmemory(): |
@@ -39,7 +39,7 b' command = registrar.command(cmdtable)' | |||||
39 | try: |
|
39 | try: | |
40 | # Silence a warning about python-Levenshtein. |
|
40 | # Silence a warning about python-Levenshtein. | |
41 | # |
|
41 | # | |
42 |
# We don't need the |
|
42 | # We don't need the performance that much and it gets annoying in tests. | |
43 | import warnings |
|
43 | import warnings | |
44 |
|
44 | |||
45 | with warnings.catch_warnings(): |
|
45 | with warnings.catch_warnings(): | |
@@ -50,7 +50,7 b' try:' | |||||
50 | module="fuzzywuzzy.fuzz", |
|
50 | module="fuzzywuzzy.fuzz", | |
51 | ) |
|
51 | ) | |
52 |
|
52 | |||
53 | import fuzzywuzzy.fuzz as fuzz |
|
53 | import fuzzywuzzy.fuzz as fuzz # pytype: disable=import-error | |
54 |
|
54 | |||
55 | fuzz.token_set_ratio |
|
55 | fuzz.token_set_ratio | |
56 | except ImportError: |
|
56 | except ImportError: |
@@ -67,8 +67,8 b' def relink(ui, repo, origin=None, **opts' | |||||
67 |
|
67 | |||
68 | if origin is None and b'default-relink' in ui.paths: |
|
68 | if origin is None and b'default-relink' in ui.paths: | |
69 | origin = b'default-relink' |
|
69 | origin = b'default-relink' | |
70 |
path |
|
70 | path = urlutil.get_unique_pull_path_obj(b'relink', ui, origin) | |
71 | src = hg.repository(repo.baseui, path) |
|
71 | src = hg.repository(repo.baseui, path.loc) | |
72 | ui.status(_(b'relinking %s to %s\n') % (src.store.path, repo.store.path)) |
|
72 | ui.status(_(b'relinking %s to %s\n') % (src.store.path, repo.store.path)) | |
73 | if repo.root == src.root: |
|
73 | if repo.root == src.root: | |
74 | ui.status(_(b'there is nothing to relink\n')) |
|
74 | ui.status(_(b'there is nothing to relink\n')) |
@@ -299,6 +299,7 b' class remotefilelog:' | |||||
299 | deltaprevious=False, |
|
299 | deltaprevious=False, | |
300 | deltamode=None, |
|
300 | deltamode=None, | |
301 | sidedata_helpers=None, |
|
301 | sidedata_helpers=None, | |
|
302 | debug_info=None, | |||
302 | ): |
|
303 | ): | |
303 | # we don't use any of these parameters here |
|
304 | # we don't use any of these parameters here | |
304 | del nodesorder, revisiondata, assumehaveparentrevisions, deltaprevious |
|
305 | del nodesorder, revisiondata, assumehaveparentrevisions, deltaprevious |
@@ -247,7 +247,7 b' def parsesizeflags(raw):' | |||||
247 | index = raw.index(b'\0') |
|
247 | index = raw.index(b'\0') | |
248 | except ValueError: |
|
248 | except ValueError: | |
249 | raise BadRemotefilelogHeader( |
|
249 | raise BadRemotefilelogHeader( | |
250 | "unexpected remotefilelog header: illegal format" |
|
250 | b"unexpected remotefilelog header: illegal format" | |
251 | ) |
|
251 | ) | |
252 | header = raw[:index] |
|
252 | header = raw[:index] | |
253 | if header.startswith(b'v'): |
|
253 | if header.startswith(b'v'): | |
@@ -267,7 +267,7 b' def parsesizeflags(raw):' | |||||
267 | size = int(header) |
|
267 | size = int(header) | |
268 | if size is None: |
|
268 | if size is None: | |
269 | raise BadRemotefilelogHeader( |
|
269 | raise BadRemotefilelogHeader( | |
270 | "unexpected remotefilelog header: no size found" |
|
270 | b"unexpected remotefilelog header: no size found" | |
271 | ) |
|
271 | ) | |
272 | return index + 1, size, flags |
|
272 | return index + 1, size, flags | |
273 |
|
273 |
@@ -80,9 +80,25 b' class ShortRepository:' | |||||
80 | def __repr__(self): |
|
80 | def __repr__(self): | |
81 | return b'<ShortRepository: %s>' % self.scheme |
|
81 | return b'<ShortRepository: %s>' % self.scheme | |
82 |
|
82 | |||
|
83 | def make_peer(self, ui, path, *args, **kwargs): | |||
|
84 | new_url = self.resolve(path.rawloc) | |||
|
85 | path = path.copy(new_raw_location=new_url) | |||
|
86 | cls = hg.peer_schemes.get(path.url.scheme) | |||
|
87 | if cls is not None: | |||
|
88 | return cls.make_peer(ui, path, *args, **kwargs) | |||
|
89 | return None | |||
|
90 | ||||
83 | def instance(self, ui, url, create, intents=None, createopts=None): |
|
91 | def instance(self, ui, url, create, intents=None, createopts=None): | |
84 | url = self.resolve(url) |
|
92 | url = self.resolve(url) | |
85 | return hg._peerlookup(url).instance( |
|
93 | u = urlutil.url(url) | |
|
94 | scheme = u.scheme or b'file' | |||
|
95 | if scheme in hg.peer_schemes: | |||
|
96 | cls = hg.peer_schemes[scheme] | |||
|
97 | elif scheme in hg.repo_schemes: | |||
|
98 | cls = hg.repo_schemes[scheme] | |||
|
99 | else: | |||
|
100 | cls = hg.LocalFactory | |||
|
101 | return cls.instance( | |||
86 | ui, url, create, intents=intents, createopts=createopts |
|
102 | ui, url, create, intents=intents, createopts=createopts | |
87 | ) |
|
103 | ) | |
88 |
|
104 | |||
@@ -119,24 +135,29 b' schemes = {' | |||||
119 | } |
|
135 | } | |
120 |
|
136 | |||
121 |
|
137 | |||
|
138 | def _check_drive_letter(scheme: bytes) -> None: | |||
|
139 | """check if a scheme conflict with a Windows drive letter""" | |||
|
140 | if ( | |||
|
141 | pycompat.iswindows | |||
|
142 | and len(scheme) == 1 | |||
|
143 | and scheme.isalpha() | |||
|
144 | and os.path.exists(b'%s:\\' % scheme) | |||
|
145 | ): | |||
|
146 | msg = _(b'custom scheme %s:// conflicts with drive letter %s:\\\n') | |||
|
147 | msg %= (scheme, scheme.upper()) | |||
|
148 | raise error.Abort(msg) | |||
|
149 | ||||
|
150 | ||||
122 | def extsetup(ui): |
|
151 | def extsetup(ui): | |
123 | schemes.update(dict(ui.configitems(b'schemes'))) |
|
152 | schemes.update(dict(ui.configitems(b'schemes'))) | |
124 | t = templater.engine(templater.parse) |
|
153 | t = templater.engine(templater.parse) | |
125 | for scheme, url in schemes.items(): |
|
154 | for scheme, url in schemes.items(): | |
126 | if ( |
|
155 | _check_drive_letter(scheme) | |
127 | pycompat.iswindows |
|
156 | url_scheme = urlutil.url(url).scheme | |
128 | and len(scheme) == 1 |
|
157 | if url_scheme in hg.peer_schemes: | |
129 | and scheme.isalpha() |
|
158 | hg.peer_schemes[scheme] = ShortRepository(url, scheme, t) | |
130 | and os.path.exists(b'%s:\\' % scheme) |
|
159 | else: | |
131 | ): |
|
160 | hg.repo_schemes[scheme] = ShortRepository(url, scheme, t) | |
132 | raise error.Abort( |
|
|||
133 | _( |
|
|||
134 | b'custom scheme %s:// conflicts with drive ' |
|
|||
135 | b'letter %s:\\\n' |
|
|||
136 | ) |
|
|||
137 | % (scheme, scheme.upper()) |
|
|||
138 | ) |
|
|||
139 | hg.schemes[scheme] = ShortRepository(url, scheme, t) |
|
|||
140 |
|
161 | |||
141 | extensions.wrapfunction(urlutil, b'hasdriveletter', hasdriveletter) |
|
162 | extensions.wrapfunction(urlutil, b'hasdriveletter', hasdriveletter) | |
142 |
|
163 | |||
@@ -144,7 +165,11 b' def extsetup(ui):' | |||||
144 | @command(b'debugexpandscheme', norepo=True) |
|
165 | @command(b'debugexpandscheme', norepo=True) | |
145 | def expandscheme(ui, url, **opts): |
|
166 | def expandscheme(ui, url, **opts): | |
146 | """given a repo path, provide the scheme-expanded path""" |
|
167 | """given a repo path, provide the scheme-expanded path""" | |
147 | repo = hg._peerlookup(url) |
|
168 | scheme = urlutil.url(url).scheme | |
148 | if isinstance(repo, ShortRepository): |
|
169 | if scheme in hg.peer_schemes: | |
149 | url = repo.resolve(url) |
|
170 | cls = hg.peer_schemes[scheme] | |
|
171 | else: | |||
|
172 | cls = hg.repo_schemes.get(scheme) | |||
|
173 | if cls is not None and isinstance(cls, ShortRepository): | |||
|
174 | url = cls.resolve(url) | |||
150 | ui.write(url + b'\n') |
|
175 | ui.write(url + b'\n') |
@@ -134,7 +134,7 b' def dosplit(ui, repo, tr, ctx, opts):' | |||||
134 | # Set working parent to ctx.p1(), and keep working copy as ctx's content |
|
134 | # Set working parent to ctx.p1(), and keep working copy as ctx's content | |
135 | if ctx.node() != repo.dirstate.p1(): |
|
135 | if ctx.node() != repo.dirstate.p1(): | |
136 | hg.clean(repo, ctx.node(), show_stats=False) |
|
136 | hg.clean(repo, ctx.node(), show_stats=False) | |
137 |
with repo.dirstate. |
|
137 | with repo.dirstate.changing_parents(repo): | |
138 | scmutil.movedirstate(repo, ctx.p1()) |
|
138 | scmutil.movedirstate(repo, ctx.p1()) | |
139 |
|
139 | |||
140 | # Any modified, added, removed, deleted result means split is incomplete |
|
140 | # Any modified, added, removed, deleted result means split is incomplete |
@@ -80,7 +80,7 b' from mercurial.utils import (' | |||||
80 | ) |
|
80 | ) | |
81 |
|
81 | |||
82 | try: |
|
82 | try: | |
83 | from mercurial import zstd |
|
83 | from mercurial import zstd # pytype: disable=import-error | |
84 |
|
84 | |||
85 | zstd.__version__ |
|
85 | zstd.__version__ | |
86 | except ImportError: |
|
86 | except ImportError: | |
@@ -608,6 +608,7 b' class sqlitefilestore:' | |||||
608 | assumehaveparentrevisions=False, |
|
608 | assumehaveparentrevisions=False, | |
609 | deltamode=repository.CG_DELTAMODE_STD, |
|
609 | deltamode=repository.CG_DELTAMODE_STD, | |
610 | sidedata_helpers=None, |
|
610 | sidedata_helpers=None, | |
|
611 | debug_info=None, | |||
611 | ): |
|
612 | ): | |
612 | if nodesorder not in (b'nodes', b'storage', b'linear', None): |
|
613 | if nodesorder not in (b'nodes', b'storage', b'linear', None): | |
613 | raise error.ProgrammingError( |
|
614 | raise error.ProgrammingError( |
@@ -817,8 +817,8 b' def _dotransplant(ui, repo, *revs, **opt' | |||||
817 |
|
817 | |||
818 | sourcerepo = opts.get(b'source') |
|
818 | sourcerepo = opts.get(b'source') | |
819 | if sourcerepo: |
|
819 | if sourcerepo: | |
820 |
|
|
820 | path = urlutil.get_unique_pull_path_obj(b'transplant', ui, sourcerepo) | |
821 |
peer = hg.peer(repo, opts, |
|
821 | peer = hg.peer(repo, opts, path) | |
822 | heads = pycompat.maplist(peer.lookup, opts.get(b'branch', ())) |
|
822 | heads = pycompat.maplist(peer.lookup, opts.get(b'branch', ())) | |
823 | target = set(heads) |
|
823 | target = set(heads) | |
824 | for r in revs: |
|
824 | for r in revs: |
@@ -236,7 +236,7 b' def uncommit(ui, repo, *pats, **opts):' | |||||
236 | # Fully removed the old commit |
|
236 | # Fully removed the old commit | |
237 | mapping[old.node()] = () |
|
237 | mapping[old.node()] = () | |
238 |
|
238 | |||
239 |
with repo.dirstate. |
|
239 | with repo.dirstate.changing_parents(repo): | |
240 | scmutil.movedirstate(repo, repo[newid], match) |
|
240 | scmutil.movedirstate(repo, repo[newid], match) | |
241 |
|
241 | |||
242 | scmutil.cleanupnodes(repo, mapping, b'uncommit', fixphase=True) |
|
242 | scmutil.cleanupnodes(repo, mapping, b'uncommit', fixphase=True) | |
@@ -317,7 +317,7 b' def unamend(ui, repo, **opts):' | |||||
317 | newpredctx = repo[newprednode] |
|
317 | newpredctx = repo[newprednode] | |
318 | dirstate = repo.dirstate |
|
318 | dirstate = repo.dirstate | |
319 |
|
319 | |||
320 |
with dirstate. |
|
320 | with dirstate.changing_parents(repo): | |
321 | scmutil.movedirstate(repo, newpredctx) |
|
321 | scmutil.movedirstate(repo, newpredctx) | |
322 |
|
322 | |||
323 | mapping = {curctx.node(): (newprednode,)} |
|
323 | mapping = {curctx.node(): (newprednode,)} |
@@ -216,17 +216,23 b' def reposetup(ui, repo):' | |||||
216 | def wrap_revert(orig, repo, ctx, names, uipathfn, actions, *args, **kwargs): |
|
216 | def wrap_revert(orig, repo, ctx, names, uipathfn, actions, *args, **kwargs): | |
217 | # reset dirstate cache for file we touch |
|
217 | # reset dirstate cache for file we touch | |
218 | ds = repo.dirstate |
|
218 | ds = repo.dirstate | |
219 | with ds.parentchange(): |
|
219 | for filename in actions[b'revert'][0]: | |
220 | for filename in actions[b'revert'][0]: |
|
220 | entry = ds.get_entry(filename) | |
221 | entry = ds.get_entry(filename) |
|
221 | if entry is not None: | |
222 |
if entry |
|
222 | if entry.p1_tracked: | |
223 | if entry.p1_tracked: |
|
223 | # If we revert the file, it is possibly dirty. However, | |
224 | ds.update_file( |
|
224 | # this extension meddle with the file content and therefore | |
225 | filename, |
|
225 | # its size. As a result, we cannot simply call | |
226 | entry.tracked, |
|
226 | # `dirstate.set_possibly_dirty` as it will not affet the | |
227 | p1_tracked=True, |
|
227 | # expected size of the file. | |
228 | p2_info=entry.p2_info, |
|
228 | # | |
229 | ) |
|
229 | # At least, now, the quirk is properly documented. | |
|
230 | ds.hacky_extension_update_file( | |||
|
231 | filename, | |||
|
232 | entry.tracked, | |||
|
233 | p1_tracked=entry.p1_tracked, | |||
|
234 | p2_info=entry.p2_info, | |||
|
235 | ) | |||
230 | return orig(repo, ctx, names, uipathfn, actions, *args, **kwargs) |
|
236 | return orig(repo, ctx, names, uipathfn, actions, *args, **kwargs) | |
231 |
|
237 | |||
232 |
|
238 |
@@ -154,9 +154,14 b' class tarit:' | |||||
154 | ) |
|
154 | ) | |
155 | self.fileobj = gzfileobj |
|
155 | self.fileobj = gzfileobj | |
156 | return ( |
|
156 | return ( | |
|
157 | # taropen() wants Literal['a', 'r', 'w', 'x'] for the mode, | |||
|
158 | # but Literal[] is only available in 3.8+ without the | |||
|
159 | # typing_extensions backport. | |||
|
160 | # pytype: disable=wrong-arg-types | |||
157 | tarfile.TarFile.taropen( # pytype: disable=attribute-error |
|
161 | tarfile.TarFile.taropen( # pytype: disable=attribute-error | |
158 | name, pycompat.sysstr(mode), gzfileobj |
|
162 | name, pycompat.sysstr(mode), gzfileobj | |
159 | ) |
|
163 | ) | |
|
164 | # pytype: enable=wrong-arg-types | |||
160 | ) |
|
165 | ) | |
161 | else: |
|
166 | else: | |
162 | try: |
|
167 | try: |
@@ -315,8 +315,17 b' class bundleoperation:' | |||||
315 | * a way to construct a bundle response when applicable. |
|
315 | * a way to construct a bundle response when applicable. | |
316 | """ |
|
316 | """ | |
317 |
|
317 | |||
318 | def __init__(self, repo, transactiongetter, captureoutput=True, source=b''): |
|
318 | def __init__( | |
|
319 | self, | |||
|
320 | repo, | |||
|
321 | transactiongetter, | |||
|
322 | captureoutput=True, | |||
|
323 | source=b'', | |||
|
324 | remote=None, | |||
|
325 | ): | |||
319 | self.repo = repo |
|
326 | self.repo = repo | |
|
327 | # the peer object who produced this bundle if available | |||
|
328 | self.remote = remote | |||
320 | self.ui = repo.ui |
|
329 | self.ui = repo.ui | |
321 | self.records = unbundlerecords() |
|
330 | self.records = unbundlerecords() | |
322 | self.reply = None |
|
331 | self.reply = None | |
@@ -363,7 +372,7 b' def _notransaction():' | |||||
363 | raise TransactionUnavailable() |
|
372 | raise TransactionUnavailable() | |
364 |
|
373 | |||
365 |
|
374 | |||
366 | def applybundle(repo, unbundler, tr, source, url=None, **kwargs): |
|
375 | def applybundle(repo, unbundler, tr, source, url=None, remote=None, **kwargs): | |
367 | # transform me into unbundler.apply() as soon as the freeze is lifted |
|
376 | # transform me into unbundler.apply() as soon as the freeze is lifted | |
368 | if isinstance(unbundler, unbundle20): |
|
377 | if isinstance(unbundler, unbundle20): | |
369 | tr.hookargs[b'bundle2'] = b'1' |
|
378 | tr.hookargs[b'bundle2'] = b'1' | |
@@ -371,10 +380,12 b' def applybundle(repo, unbundler, tr, sou' | |||||
371 | tr.hookargs[b'source'] = source |
|
380 | tr.hookargs[b'source'] = source | |
372 | if url is not None and b'url' not in tr.hookargs: |
|
381 | if url is not None and b'url' not in tr.hookargs: | |
373 | tr.hookargs[b'url'] = url |
|
382 | tr.hookargs[b'url'] = url | |
374 | return processbundle(repo, unbundler, lambda: tr, source=source) |
|
383 | return processbundle( | |
|
384 | repo, unbundler, lambda: tr, source=source, remote=remote | |||
|
385 | ) | |||
375 | else: |
|
386 | else: | |
376 | # the transactiongetter won't be used, but we might as well set it |
|
387 | # the transactiongetter won't be used, but we might as well set it | |
377 | op = bundleoperation(repo, lambda: tr, source=source) |
|
388 | op = bundleoperation(repo, lambda: tr, source=source, remote=remote) | |
378 | _processchangegroup(op, unbundler, tr, source, url, **kwargs) |
|
389 | _processchangegroup(op, unbundler, tr, source, url, **kwargs) | |
379 | return op |
|
390 | return op | |
380 |
|
391 | |||
@@ -450,7 +461,14 b' class partiterator:' | |||||
450 | ) |
|
461 | ) | |
451 |
|
462 | |||
452 |
|
463 | |||
453 | def processbundle(repo, unbundler, transactiongetter=None, op=None, source=b''): |
|
464 | def processbundle( | |
|
465 | repo, | |||
|
466 | unbundler, | |||
|
467 | transactiongetter=None, | |||
|
468 | op=None, | |||
|
469 | source=b'', | |||
|
470 | remote=None, | |||
|
471 | ): | |||
454 | """This function process a bundle, apply effect to/from a repo |
|
472 | """This function process a bundle, apply effect to/from a repo | |
455 |
|
473 | |||
456 | It iterates over each part then searches for and uses the proper handling |
|
474 | It iterates over each part then searches for and uses the proper handling | |
@@ -466,7 +484,12 b' def processbundle(repo, unbundler, trans' | |||||
466 | if op is None: |
|
484 | if op is None: | |
467 | if transactiongetter is None: |
|
485 | if transactiongetter is None: | |
468 | transactiongetter = _notransaction |
|
486 | transactiongetter = _notransaction | |
469 | op = bundleoperation(repo, transactiongetter, source=source) |
|
487 | op = bundleoperation( | |
|
488 | repo, | |||
|
489 | transactiongetter, | |||
|
490 | source=source, | |||
|
491 | remote=remote, | |||
|
492 | ) | |||
470 | # todo: |
|
493 | # todo: | |
471 | # - replace this is a init function soon. |
|
494 | # - replace this is a init function soon. | |
472 | # - exception catching |
|
495 | # - exception catching | |
@@ -494,6 +517,10 b' def processparts(repo, op, unbundler):' | |||||
494 |
|
517 | |||
495 |
|
518 | |||
496 | def _processchangegroup(op, cg, tr, source, url, **kwargs): |
|
519 | def _processchangegroup(op, cg, tr, source, url, **kwargs): | |
|
520 | if op.remote is not None and op.remote.path is not None: | |||
|
521 | remote_path = op.remote.path | |||
|
522 | kwargs = kwargs.copy() | |||
|
523 | kwargs['delta_base_reuse_policy'] = remote_path.delta_reuse_policy | |||
497 | ret = cg.apply(op.repo, tr, source, url, **kwargs) |
|
524 | ret = cg.apply(op.repo, tr, source, url, **kwargs) | |
498 | op.records.add( |
|
525 | op.records.add( | |
499 | b'changegroup', |
|
526 | b'changegroup', | |
@@ -1938,7 +1965,12 b' def writebundle(' | |||||
1938 | raise error.Abort( |
|
1965 | raise error.Abort( | |
1939 | _(b'old bundle types only supports v1 changegroups') |
|
1966 | _(b'old bundle types only supports v1 changegroups') | |
1940 | ) |
|
1967 | ) | |
|
1968 | ||||
|
1969 | # HG20 is the case without 2 values to unpack, but is handled above. | |||
|
1970 | # pytype: disable=bad-unpacking | |||
1941 | header, comp = bundletypes[bundletype] |
|
1971 | header, comp = bundletypes[bundletype] | |
|
1972 | # pytype: enable=bad-unpacking | |||
|
1973 | ||||
1942 | if comp not in util.compengines.supportedbundletypes: |
|
1974 | if comp not in util.compengines.supportedbundletypes: | |
1943 | raise error.Abort(_(b'unknown stream compression type: %s') % comp) |
|
1975 | raise error.Abort(_(b'unknown stream compression type: %s') % comp) | |
1944 | compengine = util.compengines.forbundletype(comp) |
|
1976 | compengine = util.compengines.forbundletype(comp) |
@@ -5,6 +5,10 b'' | |||||
5 |
|
5 | |||
6 | import collections |
|
6 | import collections | |
7 |
|
7 | |||
|
8 | from typing import ( | |||
|
9 | cast, | |||
|
10 | ) | |||
|
11 | ||||
8 | from .i18n import _ |
|
12 | from .i18n import _ | |
9 |
|
13 | |||
10 | from .thirdparty import attr |
|
14 | from .thirdparty import attr | |
@@ -247,7 +251,7 b' def parsebundlespec(repo, spec, strict=T' | |||||
247 | # required to apply it. If we see this metadata, compare against what the |
|
251 | # required to apply it. If we see this metadata, compare against what the | |
248 | # repo supports and error if the bundle isn't compatible. |
|
252 | # repo supports and error if the bundle isn't compatible. | |
249 | if version == b'packed1' and b'requirements' in params: |
|
253 | if version == b'packed1' and b'requirements' in params: | |
250 | requirements = set(params[b'requirements'].split(b',')) |
|
254 | requirements = set(cast(bytes, params[b'requirements']).split(b',')) | |
251 | missingreqs = requirements - requirementsmod.STREAM_FIXED_REQUIREMENTS |
|
255 | missingreqs = requirements - requirementsmod.STREAM_FIXED_REQUIREMENTS | |
252 | if missingreqs: |
|
256 | if missingreqs: | |
253 | raise error.UnsupportedBundleSpecification( |
|
257 | raise error.UnsupportedBundleSpecification( |
@@ -88,7 +88,7 b' class bundlerevlog(revlog.revlog):' | |||||
88 | ) |
|
88 | ) | |
89 |
|
89 | |||
90 | if not self.index.has_node(deltabase): |
|
90 | if not self.index.has_node(deltabase): | |
91 | raise LookupError( |
|
91 | raise error.LookupError( | |
92 | deltabase, self.display_id, _(b'unknown delta base') |
|
92 | deltabase, self.display_id, _(b'unknown delta base') | |
93 | ) |
|
93 | ) | |
94 |
|
94 | |||
@@ -458,8 +458,8 b' class bundlerepository:' | |||||
458 | def cancopy(self): |
|
458 | def cancopy(self): | |
459 | return False |
|
459 | return False | |
460 |
|
460 | |||
461 | def peer(self): |
|
461 | def peer(self, path=None): | |
462 | return bundlepeer(self) |
|
462 | return bundlepeer(self, path=path) | |
463 |
|
463 | |||
464 | def getcwd(self): |
|
464 | def getcwd(self): | |
465 | return encoding.getcwd() # always outside the repo |
|
465 | return encoding.getcwd() # always outside the repo |
@@ -5,7 +5,7 b' from typing import (' | |||||
5 |
|
5 | |||
6 | version: int |
|
6 | version: int | |
7 |
|
7 | |||
8 |
def bdiff(a: bytes, b: bytes): |
|
8 | def bdiff(a: bytes, b: bytes) -> bytes: ... | |
9 | def blocks(a: bytes, b: bytes) -> List[Tuple[int, int, int, int]]: ... |
|
9 | def blocks(a: bytes, b: bytes) -> List[Tuple[int, int, int, int]]: ... | |
10 | def fixws(s: bytes, allws: bool) -> bytes: ... |
|
10 | def fixws(s: bytes, allws: bool) -> bytes: ... | |
11 | def splitnewlines(text: bytes) -> List[bytes]: ... |
|
11 | def splitnewlines(text: bytes) -> List[bytes]: ... |
@@ -2,6 +2,7 b' from typing import (' | |||||
2 | AnyStr, |
|
2 | AnyStr, | |
3 | IO, |
|
3 | IO, | |
4 | List, |
|
4 | List, | |
|
5 | Optional, | |||
5 | Sequence, |
|
6 | Sequence, | |
6 | ) |
|
7 | ) | |
7 |
|
8 | |||
@@ -15,7 +16,7 b' class stat:' | |||||
15 | st_mtime: int |
|
16 | st_mtime: int | |
16 | st_ctime: int |
|
17 | st_ctime: int | |
17 |
|
18 | |||
18 | def listdir(path: bytes, st: bool, skip: bool) -> List[stat]: ... |
|
19 | def listdir(path: bytes, st: bool, skip: Optional[bool]) -> List[stat]: ... | |
19 | def posixfile(name: AnyStr, mode: bytes, buffering: int) -> IO: ... |
|
20 | def posixfile(name: AnyStr, mode: bytes, buffering: int) -> IO: ... | |
20 | def statfiles(names: Sequence[bytes]) -> List[stat]: ... |
|
21 | def statfiles(names: Sequence[bytes]) -> List[stat]: ... | |
21 | def setprocname(name: bytes) -> None: ... |
|
22 | def setprocname(name: bytes) -> None: ... |
@@ -177,7 +177,7 b' static inline bool dirstate_item_c_remov' | |||||
177 | (dirstate_flag_p1_tracked | dirstate_flag_p2_info)); |
|
177 | (dirstate_flag_p1_tracked | dirstate_flag_p2_info)); | |
178 | } |
|
178 | } | |
179 |
|
179 | |||
180 |
static inline bool dirstate_item_c_m |
|
180 | static inline bool dirstate_item_c_modified(dirstateItemObject *self) | |
181 | { |
|
181 | { | |
182 | return ((self->flags & dirstate_flag_wc_tracked) && |
|
182 | return ((self->flags & dirstate_flag_wc_tracked) && | |
183 | (self->flags & dirstate_flag_p1_tracked) && |
|
183 | (self->flags & dirstate_flag_p1_tracked) && | |
@@ -195,7 +195,7 b' static inline char dirstate_item_c_v1_st' | |||||
195 | { |
|
195 | { | |
196 | if (dirstate_item_c_removed(self)) { |
|
196 | if (dirstate_item_c_removed(self)) { | |
197 | return 'r'; |
|
197 | return 'r'; | |
198 |
} else if (dirstate_item_c_m |
|
198 | } else if (dirstate_item_c_modified(self)) { | |
199 | return 'm'; |
|
199 | return 'm'; | |
200 | } else if (dirstate_item_c_added(self)) { |
|
200 | } else if (dirstate_item_c_added(self)) { | |
201 | return 'a'; |
|
201 | return 'a'; | |
@@ -642,9 +642,9 b' static PyObject *dirstate_item_get_p2_in' | |||||
642 | } |
|
642 | } | |
643 | }; |
|
643 | }; | |
644 |
|
644 | |||
645 |
static PyObject *dirstate_item_get_m |
|
645 | static PyObject *dirstate_item_get_modified(dirstateItemObject *self) | |
646 | { |
|
646 | { | |
647 |
if (dirstate_item_c_m |
|
647 | if (dirstate_item_c_modified(self)) { | |
648 | Py_RETURN_TRUE; |
|
648 | Py_RETURN_TRUE; | |
649 | } else { |
|
649 | } else { | |
650 | Py_RETURN_FALSE; |
|
650 | Py_RETURN_FALSE; | |
@@ -709,7 +709,7 b' static PyGetSetDef dirstate_item_getset[' | |||||
709 | NULL}, |
|
709 | NULL}, | |
710 | {"added", (getter)dirstate_item_get_added, NULL, "added", NULL}, |
|
710 | {"added", (getter)dirstate_item_get_added, NULL, "added", NULL}, | |
711 | {"p2_info", (getter)dirstate_item_get_p2_info, NULL, "p2_info", NULL}, |
|
711 | {"p2_info", (getter)dirstate_item_get_p2_info, NULL, "p2_info", NULL}, | |
712 |
{"m |
|
712 | {"modified", (getter)dirstate_item_get_modified, NULL, "modified", NULL}, | |
713 | {"from_p2", (getter)dirstate_item_get_from_p2, NULL, "from_p2", NULL}, |
|
713 | {"from_p2", (getter)dirstate_item_get_from_p2, NULL, "from_p2", NULL}, | |
714 | {"maybe_clean", (getter)dirstate_item_get_maybe_clean, NULL, "maybe_clean", |
|
714 | {"maybe_clean", (getter)dirstate_item_get_maybe_clean, NULL, "maybe_clean", | |
715 | NULL}, |
|
715 | NULL}, | |
@@ -1187,7 +1187,7 b' void dirs_module_init(PyObject *mod);' | |||||
1187 | void manifest_module_init(PyObject *mod); |
|
1187 | void manifest_module_init(PyObject *mod); | |
1188 | void revlog_module_init(PyObject *mod); |
|
1188 | void revlog_module_init(PyObject *mod); | |
1189 |
|
1189 | |||
1190 |
static const int version = 2 |
|
1190 | static const int version = 21; | |
1191 |
|
1191 | |||
1192 | static void module_init(PyObject *mod) |
|
1192 | static void module_init(PyObject *mod) | |
1193 | { |
|
1193 | { |
@@ -76,3 +76,7 b' class nodetree:' | |||||
76 |
|
76 | |||
77 | def insert(self, rev: int) -> None: ... |
|
77 | def insert(self, rev: int) -> None: ... | |
78 | def shortest(self, node: bytes) -> int: ... |
|
78 | def shortest(self, node: bytes) -> int: ... | |
|
79 | ||||
|
80 | # The IndexObject type here is defined in C, and there's no type for a buffer | |||
|
81 | # return, as of py3.11. https://github.com/python/typing/issues/593 | |||
|
82 | def parse_index2(data: object, inline: object, format: int = ...) -> Tuple[object, Optional[Tuple[int, object]]]: ... |
@@ -1446,16 +1446,25 b' static PyObject *index_issnapshot(indexO' | |||||
1446 | static PyObject *index_findsnapshots(indexObject *self, PyObject *args) |
|
1446 | static PyObject *index_findsnapshots(indexObject *self, PyObject *args) | |
1447 | { |
|
1447 | { | |
1448 | Py_ssize_t start_rev; |
|
1448 | Py_ssize_t start_rev; | |
|
1449 | Py_ssize_t end_rev; | |||
1449 | PyObject *cache; |
|
1450 | PyObject *cache; | |
1450 | Py_ssize_t base; |
|
1451 | Py_ssize_t base; | |
1451 | Py_ssize_t rev; |
|
1452 | Py_ssize_t rev; | |
1452 | PyObject *key = NULL; |
|
1453 | PyObject *key = NULL; | |
1453 | PyObject *value = NULL; |
|
1454 | PyObject *value = NULL; | |
1454 | const Py_ssize_t length = index_length(self); |
|
1455 | const Py_ssize_t length = index_length(self); | |
1455 |
if (!PyArg_ParseTuple(args, "O!n", &PyDict_Type, &cache, &start_rev |
|
1456 | if (!PyArg_ParseTuple(args, "O!nn", &PyDict_Type, &cache, &start_rev, | |
|
1457 | &end_rev)) { | |||
1456 | return NULL; |
|
1458 | return NULL; | |
1457 | } |
|
1459 | } | |
1458 | for (rev = start_rev; rev < length; rev++) { |
|
1460 | end_rev += 1; | |
|
1461 | if (end_rev > length) { | |||
|
1462 | end_rev = length; | |||
|
1463 | } | |||
|
1464 | if (start_rev < 0) { | |||
|
1465 | start_rev = 0; | |||
|
1466 | } | |||
|
1467 | for (rev = start_rev; rev < end_rev; rev++) { | |||
1459 | int issnap; |
|
1468 | int issnap; | |
1460 | PyObject *allvalues = NULL; |
|
1469 | PyObject *allvalues = NULL; | |
1461 | issnap = index_issnapshotrev(self, rev); |
|
1470 | issnap = index_issnapshotrev(self, rev); | |
@@ -1480,7 +1489,7 b' static PyObject *index_findsnapshots(ind' | |||||
1480 | } |
|
1489 | } | |
1481 | if (allvalues == NULL) { |
|
1490 | if (allvalues == NULL) { | |
1482 | int r; |
|
1491 | int r; | |
1483 |
allvalues = Py |
|
1492 | allvalues = PySet_New(0); | |
1484 | if (!allvalues) { |
|
1493 | if (!allvalues) { | |
1485 | goto bail; |
|
1494 | goto bail; | |
1486 | } |
|
1495 | } | |
@@ -1491,7 +1500,7 b' static PyObject *index_findsnapshots(ind' | |||||
1491 | } |
|
1500 | } | |
1492 | } |
|
1501 | } | |
1493 | value = PyLong_FromSsize_t(rev); |
|
1502 | value = PyLong_FromSsize_t(rev); | |
1494 |
if (Py |
|
1503 | if (PySet_Add(allvalues, value)) { | |
1495 | goto bail; |
|
1504 | goto bail; | |
1496 | } |
|
1505 | } | |
1497 | Py_CLEAR(key); |
|
1506 | Py_CLEAR(key); |
@@ -8,6 +8,11 b'' | |||||
8 |
|
8 | |||
9 | import struct |
|
9 | import struct | |
10 |
|
10 | |||
|
11 | from typing import ( | |||
|
12 | List, | |||
|
13 | Tuple, | |||
|
14 | ) | |||
|
15 | ||||
11 | from ..pure.bdiff import * |
|
16 | from ..pure.bdiff import * | |
12 | from . import _bdiff # pytype: disable=import-error |
|
17 | from . import _bdiff # pytype: disable=import-error | |
13 |
|
18 | |||
@@ -15,7 +20,7 b' ffi = _bdiff.ffi' | |||||
15 | lib = _bdiff.lib |
|
20 | lib = _bdiff.lib | |
16 |
|
21 | |||
17 |
|
22 | |||
18 | def blocks(sa, sb): |
|
23 | def blocks(sa: bytes, sb: bytes) -> List[Tuple[int, int, int, int]]: | |
19 | a = ffi.new(b"struct bdiff_line**") |
|
24 | a = ffi.new(b"struct bdiff_line**") | |
20 | b = ffi.new(b"struct bdiff_line**") |
|
25 | b = ffi.new(b"struct bdiff_line**") | |
21 | ac = ffi.new(b"char[]", str(sa)) |
|
26 | ac = ffi.new(b"char[]", str(sa)) | |
@@ -29,7 +34,7 b' def blocks(sa, sb):' | |||||
29 | count = lib.bdiff_diff(a[0], an, b[0], bn, l) |
|
34 | count = lib.bdiff_diff(a[0], an, b[0], bn, l) | |
30 | if count < 0: |
|
35 | if count < 0: | |
31 | raise MemoryError |
|
36 | raise MemoryError | |
32 |
rl = [ |
|
37 | rl = [(0, 0, 0, 0)] * count | |
33 | h = l.next |
|
38 | h = l.next | |
34 | i = 0 |
|
39 | i = 0 | |
35 | while h: |
|
40 | while h: | |
@@ -43,7 +48,7 b' def blocks(sa, sb):' | |||||
43 | return rl |
|
48 | return rl | |
44 |
|
49 | |||
45 |
|
50 | |||
46 | def bdiff(sa, sb): |
|
51 | def bdiff(sa: bytes, sb: bytes) -> bytes: | |
47 | a = ffi.new(b"struct bdiff_line**") |
|
52 | a = ffi.new(b"struct bdiff_line**") | |
48 | b = ffi.new(b"struct bdiff_line**") |
|
53 | b = ffi.new(b"struct bdiff_line**") | |
49 | ac = ffi.new(b"char[]", str(sa)) |
|
54 | ac = ffi.new(b"char[]", str(sa)) |
@@ -6,6 +6,8 b'' | |||||
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 |
|
8 | |||
|
9 | from typing import List | |||
|
10 | ||||
9 | from ..pure.mpatch import * |
|
11 | from ..pure.mpatch import * | |
10 | from ..pure.mpatch import mpatchError # silence pyflakes |
|
12 | from ..pure.mpatch import mpatchError # silence pyflakes | |
11 | from . import _mpatch # pytype: disable=import-error |
|
13 | from . import _mpatch # pytype: disable=import-error | |
@@ -26,7 +28,7 b' def cffi_get_next_item(arg, pos):' | |||||
26 | return container[0] |
|
28 | return container[0] | |
27 |
|
29 | |||
28 |
|
30 | |||
29 | def patches(text, bins): |
|
31 | def patches(text: bytes, bins: List[bytes]) -> bytes: | |
30 | lgt = len(bins) |
|
32 | lgt = len(bins) | |
31 | all = [] |
|
33 | all = [] | |
32 | if not lgt: |
|
34 | if not lgt: |
@@ -105,6 +105,164 b' def writechunks(ui, chunks, filename, vf' | |||||
105 | os.unlink(cleanup) |
|
105 | os.unlink(cleanup) | |
106 |
|
106 | |||
107 |
|
107 | |||
|
108 | def _dbg_ubdl_line( | |||
|
109 | ui, | |||
|
110 | indent, | |||
|
111 | key, | |||
|
112 | base_value=None, | |||
|
113 | percentage_base=None, | |||
|
114 | percentage_key=None, | |||
|
115 | ): | |||
|
116 | """Print one line of debug_unbundle_debug_info""" | |||
|
117 | line = b"DEBUG-UNBUNDLING: " | |||
|
118 | line += b' ' * (2 * indent) | |||
|
119 | key += b":" | |||
|
120 | padding = b'' | |||
|
121 | if base_value is not None: | |||
|
122 | assert len(key) + 1 + (2 * indent) <= _KEY_PART_WIDTH | |||
|
123 | line += key.ljust(_KEY_PART_WIDTH - (2 * indent)) | |||
|
124 | if isinstance(base_value, float): | |||
|
125 | line += b"%14.3f seconds" % base_value | |||
|
126 | else: | |||
|
127 | line += b"%10d" % base_value | |||
|
128 | padding = b' ' | |||
|
129 | else: | |||
|
130 | line += key | |||
|
131 | ||||
|
132 | if percentage_base is not None: | |||
|
133 | line += padding | |||
|
134 | padding = b'' | |||
|
135 | assert base_value is not None | |||
|
136 | percentage = base_value * 100 // percentage_base | |||
|
137 | if percentage_key is not None: | |||
|
138 | line += b" (%3d%% of %s)" % ( | |||
|
139 | percentage, | |||
|
140 | percentage_key, | |||
|
141 | ) | |||
|
142 | else: | |||
|
143 | line += b" (%3d%%)" % percentage | |||
|
144 | ||||
|
145 | line += b'\n' | |||
|
146 | ui.write_err(line) | |||
|
147 | ||||
|
148 | ||||
|
149 | def _sumf(items): | |||
|
150 | # python < 3.8 does not support a `start=0.0` argument to sum | |||
|
151 | # So we have to cheat a bit until we drop support for those version | |||
|
152 | if not items: | |||
|
153 | return 0.0 | |||
|
154 | return sum(items) | |||
|
155 | ||||
|
156 | ||||
|
157 | def display_unbundle_debug_info(ui, debug_info): | |||
|
158 | """display an unbundling report from debug information""" | |||
|
159 | cl_info = [] | |||
|
160 | mn_info = [] | |||
|
161 | fl_info = [] | |||
|
162 | _dispatch = [ | |||
|
163 | (b'CHANGELOG:', cl_info), | |||
|
164 | (b'MANIFESTLOG:', mn_info), | |||
|
165 | (b'FILELOG:', fl_info), | |||
|
166 | ] | |||
|
167 | for e in debug_info: | |||
|
168 | for prefix, info in _dispatch: | |||
|
169 | if e["target-revlog"].startswith(prefix): | |||
|
170 | info.append(e) | |||
|
171 | break | |||
|
172 | else: | |||
|
173 | assert False, 'unreachable' | |||
|
174 | each_info = [ | |||
|
175 | (b'changelog', cl_info), | |||
|
176 | (b'manifests', mn_info), | |||
|
177 | (b'files', fl_info), | |||
|
178 | ] | |||
|
179 | ||||
|
180 | # General Revision Countss | |||
|
181 | _dbg_ubdl_line(ui, 0, b'revisions', len(debug_info)) | |||
|
182 | for key, info in each_info: | |||
|
183 | if not info: | |||
|
184 | continue | |||
|
185 | _dbg_ubdl_line(ui, 1, key, len(info), len(debug_info)) | |||
|
186 | ||||
|
187 | # General Time spent | |||
|
188 | all_durations = [e['duration'] for e in debug_info] | |||
|
189 | all_durations.sort() | |||
|
190 | total_duration = _sumf(all_durations) | |||
|
191 | _dbg_ubdl_line(ui, 0, b'total-time', total_duration) | |||
|
192 | ||||
|
193 | for key, info in each_info: | |||
|
194 | if not info: | |||
|
195 | continue | |||
|
196 | durations = [e['duration'] for e in info] | |||
|
197 | durations.sort() | |||
|
198 | _dbg_ubdl_line(ui, 1, key, _sumf(durations), total_duration) | |||
|
199 | ||||
|
200 | # Count and cache reuse per delta types | |||
|
201 | each_types = {} | |||
|
202 | for key, info in each_info: | |||
|
203 | each_types[key] = types = { | |||
|
204 | b'full': 0, | |||
|
205 | b'full-cached': 0, | |||
|
206 | b'snapshot': 0, | |||
|
207 | b'snapshot-cached': 0, | |||
|
208 | b'delta': 0, | |||
|
209 | b'delta-cached': 0, | |||
|
210 | b'unknown': 0, | |||
|
211 | b'unknown-cached': 0, | |||
|
212 | } | |||
|
213 | for e in info: | |||
|
214 | types[e['type']] += 1 | |||
|
215 | if e['using-cached-base']: | |||
|
216 | types[e['type'] + b'-cached'] += 1 | |||
|
217 | ||||
|
218 | EXPECTED_TYPES = (b'full', b'snapshot', b'delta', b'unknown') | |||
|
219 | if debug_info: | |||
|
220 | _dbg_ubdl_line(ui, 0, b'type-count') | |||
|
221 | for key, info in each_info: | |||
|
222 | if info: | |||
|
223 | _dbg_ubdl_line(ui, 1, key) | |||
|
224 | t = each_types[key] | |||
|
225 | for tn in EXPECTED_TYPES: | |||
|
226 | if t[tn]: | |||
|
227 | tc = tn + b'-cached' | |||
|
228 | _dbg_ubdl_line(ui, 2, tn, t[tn]) | |||
|
229 | _dbg_ubdl_line(ui, 3, b'cached', t[tc], t[tn]) | |||
|
230 | ||||
|
231 | # time perf delta types and reuse | |||
|
232 | each_type_time = {} | |||
|
233 | for key, info in each_info: | |||
|
234 | each_type_time[key] = t = { | |||
|
235 | b'full': [], | |||
|
236 | b'full-cached': [], | |||
|
237 | b'snapshot': [], | |||
|
238 | b'snapshot-cached': [], | |||
|
239 | b'delta': [], | |||
|
240 | b'delta-cached': [], | |||
|
241 | b'unknown': [], | |||
|
242 | b'unknown-cached': [], | |||
|
243 | } | |||
|
244 | for e in info: | |||
|
245 | t[e['type']].append(e['duration']) | |||
|
246 | if e['using-cached-base']: | |||
|
247 | t[e['type'] + b'-cached'].append(e['duration']) | |||
|
248 | for t_key, value in list(t.items()): | |||
|
249 | value.sort() | |||
|
250 | t[t_key] = _sumf(value) | |||
|
251 | ||||
|
252 | if debug_info: | |||
|
253 | _dbg_ubdl_line(ui, 0, b'type-time') | |||
|
254 | for key, info in each_info: | |||
|
255 | if info: | |||
|
256 | _dbg_ubdl_line(ui, 1, key) | |||
|
257 | t = each_type_time[key] | |||
|
258 | td = total_duration # to same space on next lines | |||
|
259 | for tn in EXPECTED_TYPES: | |||
|
260 | if t[tn]: | |||
|
261 | tc = tn + b'-cached' | |||
|
262 | _dbg_ubdl_line(ui, 2, tn, t[tn], td, b"total") | |||
|
263 | _dbg_ubdl_line(ui, 3, b'cached', t[tc], td, b"total") | |||
|
264 | ||||
|
265 | ||||
108 | class cg1unpacker: |
|
266 | class cg1unpacker: | |
109 | """Unpacker for cg1 changegroup streams. |
|
267 | """Unpacker for cg1 changegroup streams. | |
110 |
|
268 | |||
@@ -254,7 +412,16 b' class cg1unpacker:' | |||||
254 | pos = next |
|
412 | pos = next | |
255 | yield closechunk() |
|
413 | yield closechunk() | |
256 |
|
414 | |||
257 | def _unpackmanifests(self, repo, revmap, trp, prog, addrevisioncb=None): |
|
415 | def _unpackmanifests( | |
|
416 | self, | |||
|
417 | repo, | |||
|
418 | revmap, | |||
|
419 | trp, | |||
|
420 | prog, | |||
|
421 | addrevisioncb=None, | |||
|
422 | debug_info=None, | |||
|
423 | delta_base_reuse_policy=None, | |||
|
424 | ): | |||
258 | self.callback = prog.increment |
|
425 | self.callback = prog.increment | |
259 | # no need to check for empty manifest group here: |
|
426 | # no need to check for empty manifest group here: | |
260 | # if the result of the merge of 1 and 2 is the same in 3 and 4, |
|
427 | # if the result of the merge of 1 and 2 is the same in 3 and 4, | |
@@ -263,7 +430,14 b' class cg1unpacker:' | |||||
263 | self.manifestheader() |
|
430 | self.manifestheader() | |
264 | deltas = self.deltaiter() |
|
431 | deltas = self.deltaiter() | |
265 | storage = repo.manifestlog.getstorage(b'') |
|
432 | storage = repo.manifestlog.getstorage(b'') | |
266 | storage.addgroup(deltas, revmap, trp, addrevisioncb=addrevisioncb) |
|
433 | storage.addgroup( | |
|
434 | deltas, | |||
|
435 | revmap, | |||
|
436 | trp, | |||
|
437 | addrevisioncb=addrevisioncb, | |||
|
438 | debug_info=debug_info, | |||
|
439 | delta_base_reuse_policy=delta_base_reuse_policy, | |||
|
440 | ) | |||
267 | prog.complete() |
|
441 | prog.complete() | |
268 | self.callback = None |
|
442 | self.callback = None | |
269 |
|
443 | |||
@@ -276,6 +450,7 b' class cg1unpacker:' | |||||
276 | targetphase=phases.draft, |
|
450 | targetphase=phases.draft, | |
277 | expectedtotal=None, |
|
451 | expectedtotal=None, | |
278 | sidedata_categories=None, |
|
452 | sidedata_categories=None, | |
|
453 | delta_base_reuse_policy=None, | |||
279 | ): |
|
454 | ): | |
280 | """Add the changegroup returned by source.read() to this repo. |
|
455 | """Add the changegroup returned by source.read() to this repo. | |
281 | srctype is a string like 'push', 'pull', or 'unbundle'. url is |
|
456 | srctype is a string like 'push', 'pull', or 'unbundle'. url is | |
@@ -289,9 +464,19 b' class cg1unpacker:' | |||||
289 |
|
464 | |||
290 | `sidedata_categories` is an optional set of the remote's sidedata wanted |
|
465 | `sidedata_categories` is an optional set of the remote's sidedata wanted | |
291 | categories. |
|
466 | categories. | |
|
467 | ||||
|
468 | `delta_base_reuse_policy` is an optional argument, when set to a value | |||
|
469 | it will control the way the delta contained into the bundle are reused | |||
|
470 | when applied in the revlog. | |||
|
471 | ||||
|
472 | See `DELTA_BASE_REUSE_*` entry in mercurial.revlogutils.constants. | |||
292 | """ |
|
473 | """ | |
293 | repo = repo.unfiltered() |
|
474 | repo = repo.unfiltered() | |
294 |
|
475 | |||
|
476 | debug_info = None | |||
|
477 | if repo.ui.configbool(b'debug', b'unbundling-stats'): | |||
|
478 | debug_info = [] | |||
|
479 | ||||
295 | # Only useful if we're adding sidedata categories. If both peers have |
|
480 | # Only useful if we're adding sidedata categories. If both peers have | |
296 | # the same categories, then we simply don't do anything. |
|
481 | # the same categories, then we simply don't do anything. | |
297 | adding_sidedata = ( |
|
482 | adding_sidedata = ( | |
@@ -366,6 +551,8 b' class cg1unpacker:' | |||||
366 | alwayscache=True, |
|
551 | alwayscache=True, | |
367 | addrevisioncb=onchangelog, |
|
552 | addrevisioncb=onchangelog, | |
368 | duplicaterevisioncb=ondupchangelog, |
|
553 | duplicaterevisioncb=ondupchangelog, | |
|
554 | debug_info=debug_info, | |||
|
555 | delta_base_reuse_policy=delta_base_reuse_policy, | |||
369 | ): |
|
556 | ): | |
370 | repo.ui.develwarn( |
|
557 | repo.ui.develwarn( | |
371 | b'applied empty changelog from changegroup', |
|
558 | b'applied empty changelog from changegroup', | |
@@ -413,6 +600,8 b' class cg1unpacker:' | |||||
413 | trp, |
|
600 | trp, | |
414 | progress, |
|
601 | progress, | |
415 | addrevisioncb=on_manifest_rev, |
|
602 | addrevisioncb=on_manifest_rev, | |
|
603 | debug_info=debug_info, | |||
|
604 | delta_base_reuse_policy=delta_base_reuse_policy, | |||
416 | ) |
|
605 | ) | |
417 |
|
606 | |||
418 | needfiles = {} |
|
607 | needfiles = {} | |
@@ -449,6 +638,8 b' class cg1unpacker:' | |||||
449 | efiles, |
|
638 | efiles, | |
450 | needfiles, |
|
639 | needfiles, | |
451 | addrevisioncb=on_filelog_rev, |
|
640 | addrevisioncb=on_filelog_rev, | |
|
641 | debug_info=debug_info, | |||
|
642 | delta_base_reuse_policy=delta_base_reuse_policy, | |||
452 | ) |
|
643 | ) | |
453 |
|
644 | |||
454 | if sidedata_helpers: |
|
645 | if sidedata_helpers: | |
@@ -567,6 +758,8 b' class cg1unpacker:' | |||||
567 | b'changegroup-runhooks-%020i' % clstart, |
|
758 | b'changegroup-runhooks-%020i' % clstart, | |
568 | lambda tr: repo._afterlock(runhooks), |
|
759 | lambda tr: repo._afterlock(runhooks), | |
569 | ) |
|
760 | ) | |
|
761 | if debug_info is not None: | |||
|
762 | display_unbundle_debug_info(repo.ui, debug_info) | |||
570 | finally: |
|
763 | finally: | |
571 | repo.ui.flush() |
|
764 | repo.ui.flush() | |
572 | # never return 0 here: |
|
765 | # never return 0 here: | |
@@ -626,9 +819,24 b' class cg3unpacker(cg2unpacker):' | |||||
626 | protocol_flags = 0 |
|
819 | protocol_flags = 0 | |
627 | return node, p1, p2, deltabase, cs, flags, protocol_flags |
|
820 | return node, p1, p2, deltabase, cs, flags, protocol_flags | |
628 |
|
821 | |||
629 | def _unpackmanifests(self, repo, revmap, trp, prog, addrevisioncb=None): |
|
822 | def _unpackmanifests( | |
|
823 | self, | |||
|
824 | repo, | |||
|
825 | revmap, | |||
|
826 | trp, | |||
|
827 | prog, | |||
|
828 | addrevisioncb=None, | |||
|
829 | debug_info=None, | |||
|
830 | delta_base_reuse_policy=None, | |||
|
831 | ): | |||
630 | super(cg3unpacker, self)._unpackmanifests( |
|
832 | super(cg3unpacker, self)._unpackmanifests( | |
631 | repo, revmap, trp, prog, addrevisioncb=addrevisioncb |
|
833 | repo, | |
|
834 | revmap, | |||
|
835 | trp, | |||
|
836 | prog, | |||
|
837 | addrevisioncb=addrevisioncb, | |||
|
838 | debug_info=debug_info, | |||
|
839 | delta_base_reuse_policy=delta_base_reuse_policy, | |||
632 | ) |
|
840 | ) | |
633 | for chunkdata in iter(self.filelogheader, {}): |
|
841 | for chunkdata in iter(self.filelogheader, {}): | |
634 | # If we get here, there are directory manifests in the changegroup |
|
842 | # If we get here, there are directory manifests in the changegroup | |
@@ -636,7 +844,12 b' class cg3unpacker(cg2unpacker):' | |||||
636 | repo.ui.debug(b"adding %s revisions\n" % d) |
|
844 | repo.ui.debug(b"adding %s revisions\n" % d) | |
637 | deltas = self.deltaiter() |
|
845 | deltas = self.deltaiter() | |
638 | if not repo.manifestlog.getstorage(d).addgroup( |
|
846 | if not repo.manifestlog.getstorage(d).addgroup( | |
639 | deltas, revmap, trp, addrevisioncb=addrevisioncb |
|
847 | deltas, | |
|
848 | revmap, | |||
|
849 | trp, | |||
|
850 | addrevisioncb=addrevisioncb, | |||
|
851 | debug_info=debug_info, | |||
|
852 | delta_base_reuse_policy=delta_base_reuse_policy, | |||
640 | ): |
|
853 | ): | |
641 | raise error.Abort(_(b"received dir revlog group is empty")) |
|
854 | raise error.Abort(_(b"received dir revlog group is empty")) | |
642 |
|
855 | |||
@@ -869,6 +1082,7 b' def deltagroup(' | |||||
869 | fullclnodes=None, |
|
1082 | fullclnodes=None, | |
870 | precomputedellipsis=None, |
|
1083 | precomputedellipsis=None, | |
871 | sidedata_helpers=None, |
|
1084 | sidedata_helpers=None, | |
|
1085 | debug_info=None, | |||
872 | ): |
|
1086 | ): | |
873 | """Calculate deltas for a set of revisions. |
|
1087 | """Calculate deltas for a set of revisions. | |
874 |
|
1088 | |||
@@ -978,6 +1192,7 b' def deltagroup(' | |||||
978 | assumehaveparentrevisions=not ellipses, |
|
1192 | assumehaveparentrevisions=not ellipses, | |
979 | deltamode=deltamode, |
|
1193 | deltamode=deltamode, | |
980 | sidedata_helpers=sidedata_helpers, |
|
1194 | sidedata_helpers=sidedata_helpers, | |
|
1195 | debug_info=debug_info, | |||
981 | ) |
|
1196 | ) | |
982 |
|
1197 | |||
983 | for i, revision in enumerate(revisions): |
|
1198 | for i, revision in enumerate(revisions): | |
@@ -1003,6 +1218,187 b' def deltagroup(' | |||||
1003 | progress.complete() |
|
1218 | progress.complete() | |
1004 |
|
1219 | |||
1005 |
|
1220 | |||
|
1221 | def make_debug_info(): | |||
|
1222 | """ "build a "new" debug_info dictionnary | |||
|
1223 | ||||
|
1224 | That dictionnary can be used to gather information about the bundle process | |||
|
1225 | """ | |||
|
1226 | return { | |||
|
1227 | 'revision-total': 0, | |||
|
1228 | 'revision-changelog': 0, | |||
|
1229 | 'revision-manifest': 0, | |||
|
1230 | 'revision-files': 0, | |||
|
1231 | 'file-count': 0, | |||
|
1232 | 'merge-total': 0, | |||
|
1233 | 'available-delta': 0, | |||
|
1234 | 'available-full': 0, | |||
|
1235 | 'delta-against-prev': 0, | |||
|
1236 | 'delta-full': 0, | |||
|
1237 | 'delta-against-p1': 0, | |||
|
1238 | 'denied-delta-candeltafn': 0, | |||
|
1239 | 'denied-base-not-available': 0, | |||
|
1240 | 'reused-storage-delta': 0, | |||
|
1241 | 'computed-delta': 0, | |||
|
1242 | } | |||
|
1243 | ||||
|
1244 | ||||
|
1245 | def merge_debug_info(base, other): | |||
|
1246 | """merge the debug information from <other> into <base> | |||
|
1247 | ||||
|
1248 | This function can be used to gather lower level information into higher level ones. | |||
|
1249 | """ | |||
|
1250 | for key in ( | |||
|
1251 | 'revision-total', | |||
|
1252 | 'revision-changelog', | |||
|
1253 | 'revision-manifest', | |||
|
1254 | 'revision-files', | |||
|
1255 | 'merge-total', | |||
|
1256 | 'available-delta', | |||
|
1257 | 'available-full', | |||
|
1258 | 'delta-against-prev', | |||
|
1259 | 'delta-full', | |||
|
1260 | 'delta-against-p1', | |||
|
1261 | 'denied-delta-candeltafn', | |||
|
1262 | 'denied-base-not-available', | |||
|
1263 | 'reused-storage-delta', | |||
|
1264 | 'computed-delta', | |||
|
1265 | ): | |||
|
1266 | base[key] += other[key] | |||
|
1267 | ||||
|
1268 | ||||
|
1269 | _KEY_PART_WIDTH = 17 | |||
|
1270 | ||||
|
1271 | ||||
|
1272 | def _dbg_bdl_line( | |||
|
1273 | ui, | |||
|
1274 | indent, | |||
|
1275 | key, | |||
|
1276 | base_value=None, | |||
|
1277 | percentage_base=None, | |||
|
1278 | percentage_key=None, | |||
|
1279 | percentage_ref=None, | |||
|
1280 | extra=None, | |||
|
1281 | ): | |||
|
1282 | """Print one line of debug_bundle_debug_info""" | |||
|
1283 | line = b"DEBUG-BUNDLING: " | |||
|
1284 | line += b' ' * (2 * indent) | |||
|
1285 | key += b":" | |||
|
1286 | if base_value is not None: | |||
|
1287 | assert len(key) + 1 + (2 * indent) <= _KEY_PART_WIDTH | |||
|
1288 | line += key.ljust(_KEY_PART_WIDTH - (2 * indent)) | |||
|
1289 | line += b"%10d" % base_value | |||
|
1290 | else: | |||
|
1291 | line += key | |||
|
1292 | ||||
|
1293 | if percentage_base is not None: | |||
|
1294 | assert base_value is not None | |||
|
1295 | percentage = base_value * 100 // percentage_base | |||
|
1296 | if percentage_key is not None: | |||
|
1297 | line += b" (%d%% of %s %d)" % ( | |||
|
1298 | percentage, | |||
|
1299 | percentage_key, | |||
|
1300 | percentage_ref, | |||
|
1301 | ) | |||
|
1302 | else: | |||
|
1303 | line += b" (%d%%)" % percentage | |||
|
1304 | ||||
|
1305 | if extra: | |||
|
1306 | line += b" " | |||
|
1307 | line += extra | |||
|
1308 | ||||
|
1309 | line += b'\n' | |||
|
1310 | ui.write_err(line) | |||
|
1311 | ||||
|
1312 | ||||
|
1313 | def display_bundling_debug_info( | |||
|
1314 | ui, | |||
|
1315 | debug_info, | |||
|
1316 | cl_debug_info, | |||
|
1317 | mn_debug_info, | |||
|
1318 | fl_debug_info, | |||
|
1319 | ): | |||
|
1320 | """display debug information gathered during a bundling through `ui`""" | |||
|
1321 | d = debug_info | |||
|
1322 | c = cl_debug_info | |||
|
1323 | m = mn_debug_info | |||
|
1324 | f = fl_debug_info | |||
|
1325 | all_info = [ | |||
|
1326 | (b"changelog", b"cl", c), | |||
|
1327 | (b"manifests", b"mn", m), | |||
|
1328 | (b"files", b"fl", f), | |||
|
1329 | ] | |||
|
1330 | _dbg_bdl_line(ui, 0, b'revisions', d['revision-total']) | |||
|
1331 | _dbg_bdl_line(ui, 1, b'changelog', d['revision-changelog']) | |||
|
1332 | _dbg_bdl_line(ui, 1, b'manifest', d['revision-manifest']) | |||
|
1333 | extra = b'(for %d revlogs)' % d['file-count'] | |||
|
1334 | _dbg_bdl_line(ui, 1, b'files', d['revision-files'], extra=extra) | |||
|
1335 | if d['merge-total']: | |||
|
1336 | _dbg_bdl_line(ui, 1, b'merge', d['merge-total'], d['revision-total']) | |||
|
1337 | for k, __, v in all_info: | |||
|
1338 | if v['merge-total']: | |||
|
1339 | _dbg_bdl_line(ui, 2, k, v['merge-total'], v['revision-total']) | |||
|
1340 | ||||
|
1341 | _dbg_bdl_line(ui, 0, b'deltas') | |||
|
1342 | _dbg_bdl_line( | |||
|
1343 | ui, | |||
|
1344 | 1, | |||
|
1345 | b'from-storage', | |||
|
1346 | d['reused-storage-delta'], | |||
|
1347 | percentage_base=d['available-delta'], | |||
|
1348 | percentage_key=b"available", | |||
|
1349 | percentage_ref=d['available-delta'], | |||
|
1350 | ) | |||
|
1351 | ||||
|
1352 | if d['denied-delta-candeltafn']: | |||
|
1353 | _dbg_bdl_line(ui, 2, b'denied-fn', d['denied-delta-candeltafn']) | |||
|
1354 | for __, k, v in all_info: | |||
|
1355 | if v['denied-delta-candeltafn']: | |||
|
1356 | _dbg_bdl_line(ui, 3, k, v['denied-delta-candeltafn']) | |||
|
1357 | ||||
|
1358 | if d['denied-base-not-available']: | |||
|
1359 | _dbg_bdl_line(ui, 2, b'denied-nb', d['denied-base-not-available']) | |||
|
1360 | for k, __, v in all_info: | |||
|
1361 | if v['denied-base-not-available']: | |||
|
1362 | _dbg_bdl_line(ui, 3, k, v['denied-base-not-available']) | |||
|
1363 | ||||
|
1364 | if d['computed-delta']: | |||
|
1365 | _dbg_bdl_line(ui, 1, b'computed', d['computed-delta']) | |||
|
1366 | ||||
|
1367 | if d['available-full']: | |||
|
1368 | _dbg_bdl_line( | |||
|
1369 | ui, | |||
|
1370 | 2, | |||
|
1371 | b'full', | |||
|
1372 | d['delta-full'], | |||
|
1373 | percentage_base=d['available-full'], | |||
|
1374 | percentage_key=b"native", | |||
|
1375 | percentage_ref=d['available-full'], | |||
|
1376 | ) | |||
|
1377 | for k, __, v in all_info: | |||
|
1378 | if v['available-full']: | |||
|
1379 | _dbg_bdl_line( | |||
|
1380 | ui, | |||
|
1381 | 3, | |||
|
1382 | k, | |||
|
1383 | v['delta-full'], | |||
|
1384 | percentage_base=v['available-full'], | |||
|
1385 | percentage_key=b"native", | |||
|
1386 | percentage_ref=v['available-full'], | |||
|
1387 | ) | |||
|
1388 | ||||
|
1389 | if d['delta-against-prev']: | |||
|
1390 | _dbg_bdl_line(ui, 2, b'previous', d['delta-against-prev']) | |||
|
1391 | for k, __, v in all_info: | |||
|
1392 | if v['delta-against-prev']: | |||
|
1393 | _dbg_bdl_line(ui, 3, k, v['delta-against-prev']) | |||
|
1394 | ||||
|
1395 | if d['delta-against-p1']: | |||
|
1396 | _dbg_bdl_line(ui, 2, b'parent-1', d['delta-against-prev']) | |||
|
1397 | for k, __, v in all_info: | |||
|
1398 | if v['delta-against-p1']: | |||
|
1399 | _dbg_bdl_line(ui, 3, k, v['delta-against-p1']) | |||
|
1400 | ||||
|
1401 | ||||
1006 | class cgpacker: |
|
1402 | class cgpacker: | |
1007 | def __init__( |
|
1403 | def __init__( | |
1008 | self, |
|
1404 | self, | |
@@ -1086,13 +1482,21 b' class cgpacker:' | |||||
1086 | self._verbosenote = lambda s: None |
|
1482 | self._verbosenote = lambda s: None | |
1087 |
|
1483 | |||
1088 | def generate( |
|
1484 | def generate( | |
1089 | self, commonrevs, clnodes, fastpathlinkrev, source, changelog=True |
|
1485 | self, | |
|
1486 | commonrevs, | |||
|
1487 | clnodes, | |||
|
1488 | fastpathlinkrev, | |||
|
1489 | source, | |||
|
1490 | changelog=True, | |||
1090 | ): |
|
1491 | ): | |
1091 | """Yield a sequence of changegroup byte chunks. |
|
1492 | """Yield a sequence of changegroup byte chunks. | |
1092 | If changelog is False, changelog data won't be added to changegroup |
|
1493 | If changelog is False, changelog data won't be added to changegroup | |
1093 | """ |
|
1494 | """ | |
1094 |
|
1495 | |||
|
1496 | debug_info = None | |||
1095 | repo = self._repo |
|
1497 | repo = self._repo | |
|
1498 | if repo.ui.configbool(b'debug', b'bundling-stats'): | |||
|
1499 | debug_info = make_debug_info() | |||
1096 | cl = repo.changelog |
|
1500 | cl = repo.changelog | |
1097 |
|
1501 | |||
1098 | self._verbosenote(_(b'uncompressed size of bundle content:\n')) |
|
1502 | self._verbosenote(_(b'uncompressed size of bundle content:\n')) | |
@@ -1107,14 +1511,19 b' class cgpacker:' | |||||
1107 | # correctly advertise its sidedata categories directly. |
|
1511 | # correctly advertise its sidedata categories directly. | |
1108 | remote_sidedata = repo._wanted_sidedata |
|
1512 | remote_sidedata = repo._wanted_sidedata | |
1109 | sidedata_helpers = sidedatamod.get_sidedata_helpers( |
|
1513 | sidedata_helpers = sidedatamod.get_sidedata_helpers( | |
1110 |
repo, |
|
1514 | repo, | |
|
1515 | remote_sidedata, | |||
1111 | ) |
|
1516 | ) | |
1112 |
|
1517 | |||
|
1518 | cl_debug_info = None | |||
|
1519 | if debug_info is not None: | |||
|
1520 | cl_debug_info = make_debug_info() | |||
1113 | clstate, deltas = self._generatechangelog( |
|
1521 | clstate, deltas = self._generatechangelog( | |
1114 | cl, |
|
1522 | cl, | |
1115 | clnodes, |
|
1523 | clnodes, | |
1116 | generate=changelog, |
|
1524 | generate=changelog, | |
1117 | sidedata_helpers=sidedata_helpers, |
|
1525 | sidedata_helpers=sidedata_helpers, | |
|
1526 | debug_info=cl_debug_info, | |||
1118 | ) |
|
1527 | ) | |
1119 | for delta in deltas: |
|
1528 | for delta in deltas: | |
1120 | for chunk in _revisiondeltatochunks( |
|
1529 | for chunk in _revisiondeltatochunks( | |
@@ -1126,6 +1535,9 b' class cgpacker:' | |||||
1126 | close = closechunk() |
|
1535 | close = closechunk() | |
1127 | size += len(close) |
|
1536 | size += len(close) | |
1128 | yield closechunk() |
|
1537 | yield closechunk() | |
|
1538 | if debug_info is not None: | |||
|
1539 | merge_debug_info(debug_info, cl_debug_info) | |||
|
1540 | debug_info['revision-changelog'] = cl_debug_info['revision-total'] | |||
1129 |
|
1541 | |||
1130 | self._verbosenote(_(b'%8.i (changelog)\n') % size) |
|
1542 | self._verbosenote(_(b'%8.i (changelog)\n') % size) | |
1131 |
|
1543 | |||
@@ -1133,6 +1545,9 b' class cgpacker:' | |||||
1133 | manifests = clstate[b'manifests'] |
|
1545 | manifests = clstate[b'manifests'] | |
1134 | changedfiles = clstate[b'changedfiles'] |
|
1546 | changedfiles = clstate[b'changedfiles'] | |
1135 |
|
1547 | |||
|
1548 | if debug_info is not None: | |||
|
1549 | debug_info['file-count'] = len(changedfiles) | |||
|
1550 | ||||
1136 | # We need to make sure that the linkrev in the changegroup refers to |
|
1551 | # We need to make sure that the linkrev in the changegroup refers to | |
1137 | # the first changeset that introduced the manifest or file revision. |
|
1552 | # the first changeset that introduced the manifest or file revision. | |
1138 | # The fastpath is usually safer than the slowpath, because the filelogs |
|
1553 | # The fastpath is usually safer than the slowpath, because the filelogs | |
@@ -1156,6 +1571,9 b' class cgpacker:' | |||||
1156 | fnodes = {} # needed file nodes |
|
1571 | fnodes = {} # needed file nodes | |
1157 |
|
1572 | |||
1158 | size = 0 |
|
1573 | size = 0 | |
|
1574 | mn_debug_info = None | |||
|
1575 | if debug_info is not None: | |||
|
1576 | mn_debug_info = make_debug_info() | |||
1159 | it = self.generatemanifests( |
|
1577 | it = self.generatemanifests( | |
1160 | commonrevs, |
|
1578 | commonrevs, | |
1161 | clrevorder, |
|
1579 | clrevorder, | |
@@ -1165,6 +1583,7 b' class cgpacker:' | |||||
1165 | source, |
|
1583 | source, | |
1166 | clstate[b'clrevtomanifestrev'], |
|
1584 | clstate[b'clrevtomanifestrev'], | |
1167 | sidedata_helpers=sidedata_helpers, |
|
1585 | sidedata_helpers=sidedata_helpers, | |
|
1586 | debug_info=mn_debug_info, | |||
1168 | ) |
|
1587 | ) | |
1169 |
|
1588 | |||
1170 | for tree, deltas in it: |
|
1589 | for tree, deltas in it: | |
@@ -1185,6 +1604,9 b' class cgpacker:' | |||||
1185 | close = closechunk() |
|
1604 | close = closechunk() | |
1186 | size += len(close) |
|
1605 | size += len(close) | |
1187 | yield close |
|
1606 | yield close | |
|
1607 | if debug_info is not None: | |||
|
1608 | merge_debug_info(debug_info, mn_debug_info) | |||
|
1609 | debug_info['revision-manifest'] = mn_debug_info['revision-total'] | |||
1188 |
|
1610 | |||
1189 | self._verbosenote(_(b'%8.i (manifests)\n') % size) |
|
1611 | self._verbosenote(_(b'%8.i (manifests)\n') % size) | |
1190 | yield self._manifestsend |
|
1612 | yield self._manifestsend | |
@@ -1199,6 +1621,9 b' class cgpacker:' | |||||
1199 | manifests.clear() |
|
1621 | manifests.clear() | |
1200 | clrevs = {cl.rev(x) for x in clnodes} |
|
1622 | clrevs = {cl.rev(x) for x in clnodes} | |
1201 |
|
1623 | |||
|
1624 | fl_debug_info = None | |||
|
1625 | if debug_info is not None: | |||
|
1626 | fl_debug_info = make_debug_info() | |||
1202 | it = self.generatefiles( |
|
1627 | it = self.generatefiles( | |
1203 | changedfiles, |
|
1628 | changedfiles, | |
1204 | commonrevs, |
|
1629 | commonrevs, | |
@@ -1208,6 +1633,7 b' class cgpacker:' | |||||
1208 | fnodes, |
|
1633 | fnodes, | |
1209 | clrevs, |
|
1634 | clrevs, | |
1210 | sidedata_helpers=sidedata_helpers, |
|
1635 | sidedata_helpers=sidedata_helpers, | |
|
1636 | debug_info=fl_debug_info, | |||
1211 | ) |
|
1637 | ) | |
1212 |
|
1638 | |||
1213 | for path, deltas in it: |
|
1639 | for path, deltas in it: | |
@@ -1230,12 +1656,29 b' class cgpacker:' | |||||
1230 | self._verbosenote(_(b'%8.i %s\n') % (size, path)) |
|
1656 | self._verbosenote(_(b'%8.i %s\n') % (size, path)) | |
1231 |
|
1657 | |||
1232 | yield closechunk() |
|
1658 | yield closechunk() | |
|
1659 | if debug_info is not None: | |||
|
1660 | merge_debug_info(debug_info, fl_debug_info) | |||
|
1661 | debug_info['revision-files'] = fl_debug_info['revision-total'] | |||
|
1662 | ||||
|
1663 | if debug_info is not None: | |||
|
1664 | display_bundling_debug_info( | |||
|
1665 | repo.ui, | |||
|
1666 | debug_info, | |||
|
1667 | cl_debug_info, | |||
|
1668 | mn_debug_info, | |||
|
1669 | fl_debug_info, | |||
|
1670 | ) | |||
1233 |
|
1671 | |||
1234 | if clnodes: |
|
1672 | if clnodes: | |
1235 | repo.hook(b'outgoing', node=hex(clnodes[0]), source=source) |
|
1673 | repo.hook(b'outgoing', node=hex(clnodes[0]), source=source) | |
1236 |
|
1674 | |||
1237 | def _generatechangelog( |
|
1675 | def _generatechangelog( | |
1238 | self, cl, nodes, generate=True, sidedata_helpers=None |
|
1676 | self, | |
|
1677 | cl, | |||
|
1678 | nodes, | |||
|
1679 | generate=True, | |||
|
1680 | sidedata_helpers=None, | |||
|
1681 | debug_info=None, | |||
1239 | ): |
|
1682 | ): | |
1240 | """Generate data for changelog chunks. |
|
1683 | """Generate data for changelog chunks. | |
1241 |
|
1684 | |||
@@ -1332,6 +1775,7 b' class cgpacker:' | |||||
1332 | fullclnodes=self._fullclnodes, |
|
1775 | fullclnodes=self._fullclnodes, | |
1333 | precomputedellipsis=self._precomputedellipsis, |
|
1776 | precomputedellipsis=self._precomputedellipsis, | |
1334 | sidedata_helpers=sidedata_helpers, |
|
1777 | sidedata_helpers=sidedata_helpers, | |
|
1778 | debug_info=debug_info, | |||
1335 | ) |
|
1779 | ) | |
1336 |
|
1780 | |||
1337 | return state, gen |
|
1781 | return state, gen | |
@@ -1346,6 +1790,7 b' class cgpacker:' | |||||
1346 | source, |
|
1790 | source, | |
1347 | clrevtolocalrev, |
|
1791 | clrevtolocalrev, | |
1348 | sidedata_helpers=None, |
|
1792 | sidedata_helpers=None, | |
|
1793 | debug_info=None, | |||
1349 | ): |
|
1794 | ): | |
1350 | """Returns an iterator of changegroup chunks containing manifests. |
|
1795 | """Returns an iterator of changegroup chunks containing manifests. | |
1351 |
|
1796 | |||
@@ -1444,6 +1889,7 b' class cgpacker:' | |||||
1444 | fullclnodes=self._fullclnodes, |
|
1889 | fullclnodes=self._fullclnodes, | |
1445 | precomputedellipsis=self._precomputedellipsis, |
|
1890 | precomputedellipsis=self._precomputedellipsis, | |
1446 | sidedata_helpers=sidedata_helpers, |
|
1891 | sidedata_helpers=sidedata_helpers, | |
|
1892 | debug_info=debug_info, | |||
1447 | ) |
|
1893 | ) | |
1448 |
|
1894 | |||
1449 | if not self._oldmatcher.visitdir(store.tree[:-1]): |
|
1895 | if not self._oldmatcher.visitdir(store.tree[:-1]): | |
@@ -1483,6 +1929,7 b' class cgpacker:' | |||||
1483 | fnodes, |
|
1929 | fnodes, | |
1484 | clrevs, |
|
1930 | clrevs, | |
1485 | sidedata_helpers=None, |
|
1931 | sidedata_helpers=None, | |
|
1932 | debug_info=None, | |||
1486 | ): |
|
1933 | ): | |
1487 | changedfiles = [ |
|
1934 | changedfiles = [ | |
1488 | f |
|
1935 | f | |
@@ -1578,6 +2025,7 b' class cgpacker:' | |||||
1578 | fullclnodes=self._fullclnodes, |
|
2025 | fullclnodes=self._fullclnodes, | |
1579 | precomputedellipsis=self._precomputedellipsis, |
|
2026 | precomputedellipsis=self._precomputedellipsis, | |
1580 | sidedata_helpers=sidedata_helpers, |
|
2027 | sidedata_helpers=sidedata_helpers, | |
|
2028 | debug_info=debug_info, | |||
1581 | ) |
|
2029 | ) | |
1582 |
|
2030 | |||
1583 | yield fname, deltas |
|
2031 | yield fname, deltas | |
@@ -1867,7 +2315,12 b' def _changegroupinfo(repo, nodes, source' | |||||
1867 |
|
2315 | |||
1868 |
|
2316 | |||
1869 | def makechangegroup( |
|
2317 | def makechangegroup( | |
1870 | repo, outgoing, version, source, fastpath=False, bundlecaps=None |
|
2318 | repo, | |
|
2319 | outgoing, | |||
|
2320 | version, | |||
|
2321 | source, | |||
|
2322 | fastpath=False, | |||
|
2323 | bundlecaps=None, | |||
1871 | ): |
|
2324 | ): | |
1872 | cgstream = makestream( |
|
2325 | cgstream = makestream( | |
1873 | repo, |
|
2326 | repo, | |
@@ -1917,7 +2370,12 b' def makestream(' | |||||
1917 |
|
2370 | |||
1918 | repo.hook(b'preoutgoing', throw=True, source=source) |
|
2371 | repo.hook(b'preoutgoing', throw=True, source=source) | |
1919 | _changegroupinfo(repo, csets, source) |
|
2372 | _changegroupinfo(repo, csets, source) | |
1920 | return bundler.generate(commonrevs, csets, fastpathlinkrev, source) |
|
2373 | return bundler.generate( | |
|
2374 | commonrevs, | |||
|
2375 | csets, | |||
|
2376 | fastpathlinkrev, | |||
|
2377 | source, | |||
|
2378 | ) | |||
1921 |
|
2379 | |||
1922 |
|
2380 | |||
1923 | def _addchangegroupfiles( |
|
2381 | def _addchangegroupfiles( | |
@@ -1928,6 +2386,8 b' def _addchangegroupfiles(' | |||||
1928 | expectedfiles, |
|
2386 | expectedfiles, | |
1929 | needfiles, |
|
2387 | needfiles, | |
1930 | addrevisioncb=None, |
|
2388 | addrevisioncb=None, | |
|
2389 | debug_info=None, | |||
|
2390 | delta_base_reuse_policy=None, | |||
1931 | ): |
|
2391 | ): | |
1932 | revisions = 0 |
|
2392 | revisions = 0 | |
1933 | files = 0 |
|
2393 | files = 0 | |
@@ -1948,6 +2408,8 b' def _addchangegroupfiles(' | |||||
1948 | revmap, |
|
2408 | revmap, | |
1949 | trp, |
|
2409 | trp, | |
1950 | addrevisioncb=addrevisioncb, |
|
2410 | addrevisioncb=addrevisioncb, | |
|
2411 | debug_info=debug_info, | |||
|
2412 | delta_base_reuse_policy=delta_base_reuse_policy, | |||
1951 | ) |
|
2413 | ) | |
1952 | if not added: |
|
2414 | if not added: | |
1953 | raise error.Abort(_(b"received file revlog group is empty")) |
|
2415 | raise error.Abort(_(b"received file revlog group is empty")) |
@@ -11,6 +11,15 b' import errno' | |||||
11 | import os |
|
11 | import os | |
12 | import re |
|
12 | import re | |
13 |
|
13 | |||
|
14 | from typing import ( | |||
|
15 | Any, | |||
|
16 | AnyStr, | |||
|
17 | Dict, | |||
|
18 | Iterable, | |||
|
19 | Optional, | |||
|
20 | cast, | |||
|
21 | ) | |||
|
22 | ||||
14 | from .i18n import _ |
|
23 | from .i18n import _ | |
15 | from .node import ( |
|
24 | from .node import ( | |
16 | hex, |
|
25 | hex, | |
@@ -29,7 +38,6 b' from . import (' | |||||
29 | changelog, |
|
38 | changelog, | |
30 | copies, |
|
39 | copies, | |
31 | crecord as crecordmod, |
|
40 | crecord as crecordmod, | |
32 | dirstateguard, |
|
|||
33 | encoding, |
|
41 | encoding, | |
34 | error, |
|
42 | error, | |
35 | formatter, |
|
43 | formatter, | |
@@ -65,14 +73,10 b' from .revlogutils import (' | |||||
65 | ) |
|
73 | ) | |
66 |
|
74 | |||
67 | if pycompat.TYPE_CHECKING: |
|
75 | if pycompat.TYPE_CHECKING: | |
68 |
from |
|
76 | from . import ( | |
69 | Any, |
|
77 | ui as uimod, | |
70 | Dict, |
|
|||
71 | ) |
|
78 | ) | |
72 |
|
79 | |||
73 | for t in (Any, Dict): |
|
|||
74 | assert t |
|
|||
75 |
|
||||
76 | stringio = util.stringio |
|
80 | stringio = util.stringio | |
77 |
|
81 | |||
78 | # templates of common command options |
|
82 | # templates of common command options | |
@@ -269,13 +273,16 b' debugrevlogopts = [' | |||||
269 | _linebelow = b"^HG: ------------------------ >8 ------------------------$" |
|
273 | _linebelow = b"^HG: ------------------------ >8 ------------------------$" | |
270 |
|
274 | |||
271 |
|
275 | |||
272 |
def check_at_most_one_arg( |
|
276 | def check_at_most_one_arg( | |
|
277 | opts: Dict[AnyStr, Any], | |||
|
278 | *args: AnyStr, | |||
|
279 | ) -> Optional[AnyStr]: | |||
273 | """abort if more than one of the arguments are in opts |
|
280 | """abort if more than one of the arguments are in opts | |
274 |
|
281 | |||
275 | Returns the unique argument or None if none of them were specified. |
|
282 | Returns the unique argument or None if none of them were specified. | |
276 | """ |
|
283 | """ | |
277 |
|
284 | |||
278 | def to_display(name): |
|
285 | def to_display(name: AnyStr) -> bytes: | |
279 | return pycompat.sysbytes(name).replace(b'_', b'-') |
|
286 | return pycompat.sysbytes(name).replace(b'_', b'-') | |
280 |
|
287 | |||
281 | previous = None |
|
288 | previous = None | |
@@ -290,7 +297,11 b' def check_at_most_one_arg(opts, *args):' | |||||
290 | return previous |
|
297 | return previous | |
291 |
|
298 | |||
292 |
|
299 | |||
293 |
def check_incompatible_arguments( |
|
300 | def check_incompatible_arguments( | |
|
301 | opts: Dict[AnyStr, Any], | |||
|
302 | first: AnyStr, | |||
|
303 | others: Iterable[AnyStr], | |||
|
304 | ) -> None: | |||
294 | """abort if the first argument is given along with any of the others |
|
305 | """abort if the first argument is given along with any of the others | |
295 |
|
306 | |||
296 | Unlike check_at_most_one_arg(), `others` are not mutually exclusive |
|
307 | Unlike check_at_most_one_arg(), `others` are not mutually exclusive | |
@@ -300,7 +311,7 b' def check_incompatible_arguments(opts, f' | |||||
300 | check_at_most_one_arg(opts, first, other) |
|
311 | check_at_most_one_arg(opts, first, other) | |
301 |
|
312 | |||
302 |
|
313 | |||
303 | def resolve_commit_options(ui, opts): |
|
314 | def resolve_commit_options(ui: "uimod.ui", opts: Dict[str, Any]) -> bool: | |
304 | """modify commit options dict to handle related options |
|
315 | """modify commit options dict to handle related options | |
305 |
|
316 | |||
306 | The return value indicates that ``rewrite.update-timestamp`` is the reason |
|
317 | The return value indicates that ``rewrite.update-timestamp`` is the reason | |
@@ -327,7 +338,7 b' def resolve_commit_options(ui, opts):' | |||||
327 | return datemaydiffer |
|
338 | return datemaydiffer | |
328 |
|
339 | |||
329 |
|
340 | |||
330 | def check_note_size(opts): |
|
341 | def check_note_size(opts: Dict[str, Any]) -> None: | |
331 | """make sure note is of valid format""" |
|
342 | """make sure note is of valid format""" | |
332 |
|
343 | |||
333 | note = opts.get('note') |
|
344 | note = opts.get('note') | |
@@ -638,7 +649,7 b' def dorecord(' | |||||
638 | # already called within a `pendingchange`, However we |
|
649 | # already called within a `pendingchange`, However we | |
639 | # are taking a shortcut here in order to be able to |
|
650 | # are taking a shortcut here in order to be able to | |
640 | # quickly deprecated the older API. |
|
651 | # quickly deprecated the older API. | |
641 |
with dirstate. |
|
652 | with dirstate.changing_parents(repo): | |
642 | dirstate.update_file( |
|
653 | dirstate.update_file( | |
643 | realname, |
|
654 | realname, | |
644 | p1_tracked=True, |
|
655 | p1_tracked=True, | |
@@ -1115,12 +1126,12 b' def bailifchanged(repo, merge=True, hint' | |||||
1115 | ctx.sub(s).bailifchanged(hint=hint) |
|
1126 | ctx.sub(s).bailifchanged(hint=hint) | |
1116 |
|
1127 | |||
1117 |
|
1128 | |||
1118 | def logmessage(ui, opts): |
|
1129 | def logmessage(ui: "uimod.ui", opts: Dict[bytes, Any]) -> Optional[bytes]: | |
1119 | """get the log message according to -m and -l option""" |
|
1130 | """get the log message according to -m and -l option""" | |
1120 |
|
1131 | |||
1121 | check_at_most_one_arg(opts, b'message', b'logfile') |
|
1132 | check_at_most_one_arg(opts, b'message', b'logfile') | |
1122 |
|
1133 | |||
1123 | message = opts.get(b'message') |
|
1134 | message = cast(Optional[bytes], opts.get(b'message')) | |
1124 | logfile = opts.get(b'logfile') |
|
1135 | logfile = opts.get(b'logfile') | |
1125 |
|
1136 | |||
1126 | if not message and logfile: |
|
1137 | if not message and logfile: | |
@@ -1465,7 +1476,7 b' def openrevlog(repo, cmd, file_, opts):' | |||||
1465 | return openstorage(repo, cmd, file_, opts, returnrevlog=True) |
|
1476 | return openstorage(repo, cmd, file_, opts, returnrevlog=True) | |
1466 |
|
1477 | |||
1467 |
|
1478 | |||
1468 | def copy(ui, repo, pats, opts, rename=False): |
|
1479 | def copy(ui, repo, pats, opts: Dict[bytes, Any], rename=False): | |
1469 | check_incompatible_arguments(opts, b'forget', [b'dry_run']) |
|
1480 | check_incompatible_arguments(opts, b'forget', [b'dry_run']) | |
1470 |
|
1481 | |||
1471 | # called with the repo lock held |
|
1482 | # called with the repo lock held | |
@@ -1532,7 +1543,7 b' def copy(ui, repo, pats, opts, rename=Fa' | |||||
1532 | new_node = mem_ctx.commit() |
|
1543 | new_node = mem_ctx.commit() | |
1533 |
|
1544 | |||
1534 | if repo.dirstate.p1() == ctx.node(): |
|
1545 | if repo.dirstate.p1() == ctx.node(): | |
1535 |
with repo.dirstate. |
|
1546 | with repo.dirstate.changing_parents(repo): | |
1536 | scmutil.movedirstate(repo, repo[new_node]) |
|
1547 | scmutil.movedirstate(repo, repo[new_node]) | |
1537 | replacements = {ctx.node(): [new_node]} |
|
1548 | replacements = {ctx.node(): [new_node]} | |
1538 | scmutil.cleanupnodes( |
|
1549 | scmutil.cleanupnodes( | |
@@ -1625,7 +1636,7 b' def copy(ui, repo, pats, opts, rename=Fa' | |||||
1625 | new_node = mem_ctx.commit() |
|
1636 | new_node = mem_ctx.commit() | |
1626 |
|
1637 | |||
1627 | if repo.dirstate.p1() == ctx.node(): |
|
1638 | if repo.dirstate.p1() == ctx.node(): | |
1628 |
with repo.dirstate. |
|
1639 | with repo.dirstate.changing_parents(repo): | |
1629 | scmutil.movedirstate(repo, repo[new_node]) |
|
1640 | scmutil.movedirstate(repo, repo[new_node]) | |
1630 | replacements = {ctx.node(): [new_node]} |
|
1641 | replacements = {ctx.node(): [new_node]} | |
1631 | scmutil.cleanupnodes(repo, replacements, b'copy', fixphase=True) |
|
1642 | scmutil.cleanupnodes(repo, replacements, b'copy', fixphase=True) | |
@@ -2778,7 +2789,7 b' def cat(ui, repo, ctx, matcher, basefm, ' | |||||
2778 | basefm, |
|
2789 | basefm, | |
2779 | fntemplate, |
|
2790 | fntemplate, | |
2780 | subprefix, |
|
2791 | subprefix, | |
2781 | **pycompat.strkwargs(opts) |
|
2792 | **pycompat.strkwargs(opts), | |
2782 | ): |
|
2793 | ): | |
2783 | err = 0 |
|
2794 | err = 0 | |
2784 | except error.RepoLookupError: |
|
2795 | except error.RepoLookupError: | |
@@ -2789,29 +2800,135 b' def cat(ui, repo, ctx, matcher, basefm, ' | |||||
2789 | return err |
|
2800 | return err | |
2790 |
|
2801 | |||
2791 |
|
2802 | |||
|
2803 | class _AddRemoveContext: | |||
|
2804 | """a small (hacky) context to deal with lazy opening of context | |||
|
2805 | ||||
|
2806 | This is to be used in the `commit` function right below. This deals with | |||
|
2807 | lazily open a `changing_files` context inside a `transaction` that span the | |||
|
2808 | full commit operation. | |||
|
2809 | ||||
|
2810 | We need : | |||
|
2811 | - a `changing_files` context to wrap the dirstate change within the | |||
|
2812 | "addremove" operation, | |||
|
2813 | - a transaction to make sure these change are not written right after the | |||
|
2814 | addremove, but when the commit operation succeed. | |||
|
2815 | ||||
|
2816 | However it get complicated because: | |||
|
2817 | - opening a transaction "this early" shuffle hooks order, especially the | |||
|
2818 | `precommit` one happening after the `pretxtopen` one which I am not too | |||
|
2819 | enthusiastic about. | |||
|
2820 | - the `mq` extensions + the `record` extension stacks many layers of call | |||
|
2821 | to implement `qrefresh --interactive` and this result with `mq` calling a | |||
|
2822 | `strip` in the middle of this function. Which prevent the existence of | |||
|
2823 | transaction wrapping all of its function code. (however, `qrefresh` never | |||
|
2824 | call the `addremove` bits. | |||
|
2825 | - the largefile extensions (and maybe other extensions?) wraps `addremove` | |||
|
2826 | so slicing `addremove` in smaller bits is a complex endeavour. | |||
|
2827 | ||||
|
2828 | So I eventually took a this shortcut that open the transaction if we | |||
|
2829 | actually needs it, not disturbing much of the rest of the code. | |||
|
2830 | ||||
|
2831 | It will result in some hooks order change for `hg commit --addremove`, | |||
|
2832 | however it seems a corner case enough to ignore that for now (hopefully). | |||
|
2833 | ||||
|
2834 | Notes that None of the above problems seems insurmountable, however I have | |||
|
2835 | been fighting with this specific piece of code for a couple of day already | |||
|
2836 | and I need a solution to keep moving forward on the bigger work around | |||
|
2837 | `changing_files` context that is being introduced at the same time as this | |||
|
2838 | hack. | |||
|
2839 | ||||
|
2840 | Each problem seems to have a solution: | |||
|
2841 | - the hook order issue could be solved by refactoring the many-layer stack | |||
|
2842 | that currently composes a commit and calling them earlier, | |||
|
2843 | - the mq issue could be solved by refactoring `mq` so that the final strip | |||
|
2844 | is done after transaction closure. Be warned that the mq code is quite | |||
|
2845 | antic however. | |||
|
2846 | - large-file could be reworked in parallel of the `addremove` to be | |||
|
2847 | friendlier to this. | |||
|
2848 | ||||
|
2849 | However each of these tasks are too much a diversion right now. In addition | |||
|
2850 | they will be much easier to undertake when the `changing_files` dust has | |||
|
2851 | settled.""" | |||
|
2852 | ||||
|
2853 | def __init__(self, repo): | |||
|
2854 | self._repo = repo | |||
|
2855 | self._transaction = None | |||
|
2856 | self._dirstate_context = None | |||
|
2857 | self._state = None | |||
|
2858 | ||||
|
2859 | def __enter__(self): | |||
|
2860 | assert self._state is None | |||
|
2861 | self._state = True | |||
|
2862 | return self | |||
|
2863 | ||||
|
2864 | def open_transaction(self): | |||
|
2865 | """open a `transaction` and `changing_files` context | |||
|
2866 | ||||
|
2867 | Call this when you know that change to the dirstate will be needed and | |||
|
2868 | we need to open the transaction early | |||
|
2869 | ||||
|
2870 | This will also open the dirstate `changing_files` context, so you should | |||
|
2871 | call `close_dirstate_context` when the distate changes are done. | |||
|
2872 | """ | |||
|
2873 | assert self._state is not None | |||
|
2874 | if self._transaction is None: | |||
|
2875 | self._transaction = self._repo.transaction(b'commit') | |||
|
2876 | self._transaction.__enter__() | |||
|
2877 | if self._dirstate_context is None: | |||
|
2878 | self._dirstate_context = self._repo.dirstate.changing_files( | |||
|
2879 | self._repo | |||
|
2880 | ) | |||
|
2881 | self._dirstate_context.__enter__() | |||
|
2882 | ||||
|
2883 | def close_dirstate_context(self): | |||
|
2884 | """close the change_files if any | |||
|
2885 | ||||
|
2886 | Call this after the (potential) `open_transaction` call to close the | |||
|
2887 | (potential) changing_files context. | |||
|
2888 | """ | |||
|
2889 | if self._dirstate_context is not None: | |||
|
2890 | self._dirstate_context.__exit__(None, None, None) | |||
|
2891 | self._dirstate_context = None | |||
|
2892 | ||||
|
2893 | def __exit__(self, *args): | |||
|
2894 | if self._dirstate_context is not None: | |||
|
2895 | self._dirstate_context.__exit__(*args) | |||
|
2896 | if self._transaction is not None: | |||
|
2897 | self._transaction.__exit__(*args) | |||
|
2898 | ||||
|
2899 | ||||
2792 | def commit(ui, repo, commitfunc, pats, opts): |
|
2900 | def commit(ui, repo, commitfunc, pats, opts): | |
2793 | '''commit the specified files or all outstanding changes''' |
|
2901 | '''commit the specified files or all outstanding changes''' | |
2794 | date = opts.get(b'date') |
|
2902 | date = opts.get(b'date') | |
2795 | if date: |
|
2903 | if date: | |
2796 | opts[b'date'] = dateutil.parsedate(date) |
|
2904 | opts[b'date'] = dateutil.parsedate(date) | |
2797 | message = logmessage(ui, opts) |
|
2905 | ||
2798 | matcher = scmutil.match(repo[None], pats, opts) |
|
2906 | with repo.wlock(), repo.lock(): | |
2799 |
|
2907 | message = logmessage(ui, opts) | ||
2800 | dsguard = None |
|
2908 | matcher = scmutil.match(repo[None], pats, opts) | |
2801 | # extract addremove carefully -- this function can be called from a command |
|
2909 | ||
2802 | # that doesn't support addremove |
|
2910 | with _AddRemoveContext(repo) as c: | |
2803 | if opts.get(b'addremove'): |
|
2911 | # extract addremove carefully -- this function can be called from a | |
2804 | dsguard = dirstateguard.dirstateguard(repo, b'commit') |
|
2912 | # command that doesn't support addremove | |
2805 | with dsguard or util.nullcontextmanager(): |
|
2913 | if opts.get(b'addremove'): | |
2806 | if dsguard: |
|
2914 | relative = scmutil.anypats(pats, opts) | |
2807 | relative = scmutil.anypats(pats, opts) |
|
2915 | uipathfn = scmutil.getuipathfn( | |
2808 | uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative) |
|
2916 | repo, | |
2809 | if scmutil.addremove(repo, matcher, b"", uipathfn, opts) != 0: |
|
2917 | legacyrelativevalue=relative, | |
2810 | raise error.Abort( |
|
|||
2811 | _(b"failed to mark all new/missing files as added/removed") |
|
|||
2812 | ) |
|
2918 | ) | |
2813 |
|
2919 | r = scmutil.addremove( | ||
2814 | return commitfunc(ui, repo, message, matcher, opts) |
|
2920 | repo, | |
|
2921 | matcher, | |||
|
2922 | b"", | |||
|
2923 | uipathfn, | |||
|
2924 | opts, | |||
|
2925 | open_tr=c.open_transaction, | |||
|
2926 | ) | |||
|
2927 | m = _(b"failed to mark all new/missing files as added/removed") | |||
|
2928 | if r != 0: | |||
|
2929 | raise error.Abort(m) | |||
|
2930 | c.close_dirstate_context() | |||
|
2931 | return commitfunc(ui, repo, message, matcher, opts) | |||
2815 |
|
2932 | |||
2816 |
|
2933 | |||
2817 | def samefile(f, ctx1, ctx2): |
|
2934 | def samefile(f, ctx1, ctx2): | |
@@ -2826,7 +2943,7 b' def samefile(f, ctx1, ctx2):' | |||||
2826 | return f not in ctx2.manifest() |
|
2943 | return f not in ctx2.manifest() | |
2827 |
|
2944 | |||
2828 |
|
2945 | |||
2829 | def amend(ui, repo, old, extra, pats, opts): |
|
2946 | def amend(ui, repo, old, extra, pats, opts: Dict[str, Any]): | |
2830 | # avoid cycle context -> subrepo -> cmdutil |
|
2947 | # avoid cycle context -> subrepo -> cmdutil | |
2831 | from . import context |
|
2948 | from . import context | |
2832 |
|
2949 | |||
@@ -2880,12 +2997,13 b' def amend(ui, repo, old, extra, pats, op' | |||||
2880 | matcher = scmutil.match(wctx, pats, opts) |
|
2997 | matcher = scmutil.match(wctx, pats, opts) | |
2881 | relative = scmutil.anypats(pats, opts) |
|
2998 | relative = scmutil.anypats(pats, opts) | |
2882 | uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative) |
|
2999 | uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative) | |
2883 |
if opts.get(b'addremove') |
|
3000 | if opts.get(b'addremove'): | |
2884 | repo, matcher, b"", uipathfn, opts |
|
3001 | with repo.dirstate.changing_files(repo): | |
2885 | ): |
|
3002 | if scmutil.addremove(repo, matcher, b"", uipathfn, opts) != 0: | |
2886 | raise error.Abort( |
|
3003 | m = _( | |
2887 |
|
|
3004 | b"failed to mark all new/missing files as added/removed" | |
2888 | ) |
|
3005 | ) | |
|
3006 | raise error.Abort(m) | |||
2889 |
|
3007 | |||
2890 | # Check subrepos. This depends on in-place wctx._status update in |
|
3008 | # Check subrepos. This depends on in-place wctx._status update in | |
2891 | # subrepo.precommit(). To minimize the risk of this hack, we do |
|
3009 | # subrepo.precommit(). To minimize the risk of this hack, we do | |
@@ -3019,10 +3137,12 b' def amend(ui, repo, old, extra, pats, op' | |||||
3019 | commitphase = None |
|
3137 | commitphase = None | |
3020 | if opts.get(b'secret'): |
|
3138 | if opts.get(b'secret'): | |
3021 | commitphase = phases.secret |
|
3139 | commitphase = phases.secret | |
|
3140 | elif opts.get(b'draft'): | |||
|
3141 | commitphase = phases.draft | |||
3022 | newid = repo.commitctx(new) |
|
3142 | newid = repo.commitctx(new) | |
3023 | ms.reset() |
|
3143 | ms.reset() | |
3024 |
|
3144 | |||
3025 |
with repo.dirstate. |
|
3145 | with repo.dirstate.changing_parents(repo): | |
3026 | # Reroute the working copy parent to the new changeset |
|
3146 | # Reroute the working copy parent to the new changeset | |
3027 | repo.setparents(newid, repo.nullid) |
|
3147 | repo.setparents(newid, repo.nullid) | |
3028 |
|
3148 | |||
@@ -3285,7 +3405,7 b' def revert(ui, repo, ctx, *pats, **opts)' | |||||
3285 | names = {} |
|
3405 | names = {} | |
3286 | uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True) |
|
3406 | uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True) | |
3287 |
|
3407 | |||
3288 | with repo.wlock(): |
|
3408 | with repo.wlock(), repo.dirstate.changing_files(repo): | |
3289 | ## filling of the `names` mapping |
|
3409 | ## filling of the `names` mapping | |
3290 | # walk dirstate to fill `names` |
|
3410 | # walk dirstate to fill `names` | |
3291 |
|
3411 |
@@ -13,6 +13,7 b' import sys' | |||||
13 | from .i18n import _ |
|
13 | from .i18n import _ | |
14 | from .node import ( |
|
14 | from .node import ( | |
15 | hex, |
|
15 | hex, | |
|
16 | nullid, | |||
16 | nullrev, |
|
17 | nullrev, | |
17 | short, |
|
18 | short, | |
18 | wdirrev, |
|
19 | wdirrev, | |
@@ -28,7 +29,6 b' from . import (' | |||||
28 | copies, |
|
29 | copies, | |
29 | debugcommands as debugcommandsmod, |
|
30 | debugcommands as debugcommandsmod, | |
30 | destutil, |
|
31 | destutil, | |
31 | dirstateguard, |
|
|||
32 | discovery, |
|
32 | discovery, | |
33 | encoding, |
|
33 | encoding, | |
34 | error, |
|
34 | error, | |
@@ -252,10 +252,11 b' def add(ui, repo, *pats, **opts):' | |||||
252 | Returns 0 if all files are successfully added. |
|
252 | Returns 0 if all files are successfully added. | |
253 | """ |
|
253 | """ | |
254 |
|
254 | |||
255 | m = scmutil.match(repo[None], pats, pycompat.byteskwargs(opts)) |
|
255 | with repo.wlock(), repo.dirstate.changing_files(repo): | |
256 | uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True) |
|
256 | m = scmutil.match(repo[None], pats, pycompat.byteskwargs(opts)) | |
257 | rejected = cmdutil.add(ui, repo, m, b"", uipathfn, False, **opts) |
|
257 | uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True) | |
258 | return rejected and 1 or 0 |
|
258 | rejected = cmdutil.add(ui, repo, m, b"", uipathfn, False, **opts) | |
|
259 | return rejected and 1 or 0 | |||
259 |
|
260 | |||
260 |
|
261 | |||
261 | @command( |
|
262 | @command( | |
@@ -330,10 +331,11 b' def addremove(ui, repo, *pats, **opts):' | |||||
330 | opts = pycompat.byteskwargs(opts) |
|
331 | opts = pycompat.byteskwargs(opts) | |
331 | if not opts.get(b'similarity'): |
|
332 | if not opts.get(b'similarity'): | |
332 | opts[b'similarity'] = b'100' |
|
333 | opts[b'similarity'] = b'100' | |
333 | matcher = scmutil.match(repo[None], pats, opts) |
|
334 | with repo.wlock(), repo.dirstate.changing_files(repo): | |
334 |
|
|
335 | matcher = scmutil.match(repo[None], pats, opts) | |
335 | uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative) |
|
336 | relative = scmutil.anypats(pats, opts) | |
336 | return scmutil.addremove(repo, matcher, b"", uipathfn, opts) |
|
337 | uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative) | |
|
338 | return scmutil.addremove(repo, matcher, b"", uipathfn, opts) | |||
337 |
|
339 | |||
338 |
|
340 | |||
339 | @command( |
|
341 | @command( | |
@@ -822,7 +824,7 b' def _dobackout(ui, repo, node=None, rev=' | |||||
822 | bheads = repo.branchheads(branch) |
|
824 | bheads = repo.branchheads(branch) | |
823 | rctx = scmutil.revsingle(repo, hex(parent)) |
|
825 | rctx = scmutil.revsingle(repo, hex(parent)) | |
824 | if not opts.get(b'merge') and op1 != node: |
|
826 | if not opts.get(b'merge') and op1 != node: | |
825 | with dirstateguard.dirstateguard(repo, b'backout'): |
|
827 | with repo.transaction(b"backout"): | |
826 | overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')} |
|
828 | overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')} | |
827 | with ui.configoverride(overrides, b'backout'): |
|
829 | with ui.configoverride(overrides, b'backout'): | |
828 | stats = mergemod.back_out(ctx, parent=repo[parent]) |
|
830 | stats = mergemod.back_out(ctx, parent=repo[parent]) | |
@@ -1635,7 +1637,7 b' def bundle(ui, repo, fname, *dests, **op' | |||||
1635 | missing = set() |
|
1637 | missing = set() | |
1636 | excluded = set() |
|
1638 | excluded = set() | |
1637 | for path in urlutil.get_push_paths(repo, ui, dests): |
|
1639 | for path in urlutil.get_push_paths(repo, ui, dests): | |
1638 |
other = hg.peer(repo, opts, path |
|
1640 | other = hg.peer(repo, opts, path) | |
1639 | if revs is not None: |
|
1641 | if revs is not None: | |
1640 | hex_revs = [repo[r].hex() for r in revs] |
|
1642 | hex_revs = [repo[r].hex() for r in revs] | |
1641 | else: |
|
1643 | else: | |
@@ -2008,6 +2010,7 b' def clone(ui, source, dest=None, **opts)' | |||||
2008 | (b'', b'close-branch', None, _(b'mark a branch head as closed')), |
|
2010 | (b'', b'close-branch', None, _(b'mark a branch head as closed')), | |
2009 | (b'', b'amend', None, _(b'amend the parent of the working directory')), |
|
2011 | (b'', b'amend', None, _(b'amend the parent of the working directory')), | |
2010 | (b's', b'secret', None, _(b'use the secret phase for committing')), |
|
2012 | (b's', b'secret', None, _(b'use the secret phase for committing')), | |
|
2013 | (b'', b'draft', None, _(b'use the draft phase for committing')), | |||
2011 | (b'e', b'edit', None, _(b'invoke editor on commit messages')), |
|
2014 | (b'e', b'edit', None, _(b'invoke editor on commit messages')), | |
2012 | ( |
|
2015 | ( | |
2013 | b'', |
|
2016 | b'', | |
@@ -2082,6 +2085,8 b' def commit(ui, repo, *pats, **opts):' | |||||
2082 |
|
2085 | |||
2083 | hg commit --amend --date now |
|
2086 | hg commit --amend --date now | |
2084 | """ |
|
2087 | """ | |
|
2088 | cmdutil.check_at_most_one_arg(opts, 'draft', 'secret') | |||
|
2089 | cmdutil.check_incompatible_arguments(opts, 'subrepos', ['amend']) | |||
2085 | with repo.wlock(), repo.lock(): |
|
2090 | with repo.wlock(), repo.lock(): | |
2086 | return _docommit(ui, repo, *pats, **opts) |
|
2091 | return _docommit(ui, repo, *pats, **opts) | |
2087 |
|
2092 | |||
@@ -2097,7 +2102,6 b' def _docommit(ui, repo, *pats, **opts):' | |||||
2097 | return 1 if ret == 0 else ret |
|
2102 | return 1 if ret == 0 else ret | |
2098 |
|
2103 | |||
2099 | if opts.get('subrepos'): |
|
2104 | if opts.get('subrepos'): | |
2100 | cmdutil.check_incompatible_arguments(opts, 'subrepos', ['amend']) |
|
|||
2101 | # Let --subrepos on the command line override config setting. |
|
2105 | # Let --subrepos on the command line override config setting. | |
2102 | ui.setconfig(b'ui', b'commitsubrepos', True, b'commit') |
|
2106 | ui.setconfig(b'ui', b'commitsubrepos', True, b'commit') | |
2103 |
|
2107 | |||
@@ -2174,6 +2178,8 b' def _docommit(ui, repo, *pats, **opts):' | |||||
2174 | overrides = {} |
|
2178 | overrides = {} | |
2175 | if opts.get(b'secret'): |
|
2179 | if opts.get(b'secret'): | |
2176 | overrides[(b'phases', b'new-commit')] = b'secret' |
|
2180 | overrides[(b'phases', b'new-commit')] = b'secret' | |
|
2181 | elif opts.get(b'draft'): | |||
|
2182 | overrides[(b'phases', b'new-commit')] = b'draft' | |||
2177 |
|
2183 | |||
2178 | baseui = repo.baseui |
|
2184 | baseui = repo.baseui | |
2179 | with baseui.configoverride(overrides, b'commit'): |
|
2185 | with baseui.configoverride(overrides, b'commit'): | |
@@ -2491,7 +2497,19 b' def copy(ui, repo, *pats, **opts):' | |||||
2491 | Returns 0 on success, 1 if errors are encountered. |
|
2497 | Returns 0 on success, 1 if errors are encountered. | |
2492 | """ |
|
2498 | """ | |
2493 | opts = pycompat.byteskwargs(opts) |
|
2499 | opts = pycompat.byteskwargs(opts) | |
2494 | with repo.wlock(): |
|
2500 | ||
|
2501 | context = repo.dirstate.changing_files | |||
|
2502 | rev = opts.get(b'at_rev') | |||
|
2503 | ctx = None | |||
|
2504 | if rev: | |||
|
2505 | ctx = logcmdutil.revsingle(repo, rev) | |||
|
2506 | if ctx.rev() is not None: | |||
|
2507 | ||||
|
2508 | def context(repo): | |||
|
2509 | return util.nullcontextmanager() | |||
|
2510 | ||||
|
2511 | opts[b'at_rev'] = ctx.rev() | |||
|
2512 | with repo.wlock(), context(repo): | |||
2495 | return cmdutil.copy(ui, repo, pats, opts) |
|
2513 | return cmdutil.copy(ui, repo, pats, opts) | |
2496 |
|
2514 | |||
2497 |
|
2515 | |||
@@ -2960,19 +2978,20 b' def forget(ui, repo, *pats, **opts):' | |||||
2960 | if not pats: |
|
2978 | if not pats: | |
2961 | raise error.InputError(_(b'no files specified')) |
|
2979 | raise error.InputError(_(b'no files specified')) | |
2962 |
|
2980 | |||
2963 | m = scmutil.match(repo[None], pats, opts) |
|
2981 | with repo.wlock(), repo.dirstate.changing_files(repo): | |
2964 | dryrun, interactive = opts.get(b'dry_run'), opts.get(b'interactive') |
|
2982 | m = scmutil.match(repo[None], pats, opts) | |
2965 | uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True) |
|
2983 | dryrun, interactive = opts.get(b'dry_run'), opts.get(b'interactive') | |
2966 | rejected = cmdutil.forget( |
|
2984 | uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True) | |
2967 | ui, |
|
2985 | rejected = cmdutil.forget( | |
2968 |
|
|
2986 | ui, | |
2969 |
|
|
2987 | repo, | |
2970 | prefix=b"", |
|
2988 | m, | |
2971 | uipathfn=uipathfn, |
|
2989 | prefix=b"", | |
2972 | explicitonly=False, |
|
2990 | uipathfn=uipathfn, | |
2973 | dryrun=dryrun, |
|
2991 | explicitonly=False, | |
2974 | interactive=interactive, |
|
2992 | dryrun=dryrun, | |
2975 | )[0] |
|
2993 | interactive=interactive, | |
|
2994 | )[0] | |||
2976 | return rejected and 1 or 0 |
|
2995 | return rejected and 1 or 0 | |
2977 |
|
2996 | |||
2978 |
|
2997 | |||
@@ -3911,12 +3930,11 b' def identify(' | |||||
3911 | peer = None |
|
3930 | peer = None | |
3912 | try: |
|
3931 | try: | |
3913 | if source: |
|
3932 | if source: | |
3914 |
|
|
3933 | path = urlutil.get_unique_pull_path_obj(b'identify', ui, source) | |
3915 | b'identify', repo, ui, source |
|
|||
3916 | ) |
|
|||
3917 | # only pass ui when no repo |
|
3934 | # only pass ui when no repo | |
3918 |
peer = hg.peer(repo or ui, opts, |
|
3935 | peer = hg.peer(repo or ui, opts, path) | |
3919 | repo = peer.local() |
|
3936 | repo = peer.local() | |
|
3937 | branches = (path.branch, []) | |||
3920 | revs, checkout = hg.addbranchrevs(repo, peer, branches, None) |
|
3938 | revs, checkout = hg.addbranchrevs(repo, peer, branches, None) | |
3921 |
|
3939 | |||
3922 | fm = ui.formatter(b'identify', opts) |
|
3940 | fm = ui.formatter(b'identify', opts) | |
@@ -4229,12 +4247,10 b' def import_(ui, repo, patch1=None, *patc' | |||||
4229 | if not opts.get(b'no_commit'): |
|
4247 | if not opts.get(b'no_commit'): | |
4230 | lock = repo.lock |
|
4248 | lock = repo.lock | |
4231 | tr = lambda: repo.transaction(b'import') |
|
4249 | tr = lambda: repo.transaction(b'import') | |
4232 | dsguard = util.nullcontextmanager |
|
|||
4233 | else: |
|
4250 | else: | |
4234 | lock = util.nullcontextmanager |
|
4251 | lock = util.nullcontextmanager | |
4235 | tr = util.nullcontextmanager |
|
4252 | tr = util.nullcontextmanager | |
4236 | dsguard = lambda: dirstateguard.dirstateguard(repo, b'import') |
|
4253 | with lock(), tr(): | |
4237 | with lock(), tr(), dsguard(): |
|
|||
4238 | parents = repo[None].parents() |
|
4254 | parents = repo[None].parents() | |
4239 | for patchurl in patches: |
|
4255 | for patchurl in patches: | |
4240 | if patchurl == b'-': |
|
4256 | if patchurl == b'-': | |
@@ -4383,17 +4399,15 b' def incoming(ui, repo, source=b"default"' | |||||
4383 | if opts.get(b'bookmarks'): |
|
4399 | if opts.get(b'bookmarks'): | |
4384 | srcs = urlutil.get_pull_paths(repo, ui, [source]) |
|
4400 | srcs = urlutil.get_pull_paths(repo, ui, [source]) | |
4385 | for path in srcs: |
|
4401 | for path in srcs: | |
4386 | source, branches = urlutil.parseurl( |
|
4402 | # XXX the "branches" options are not used. Should it be used? | |
4387 | path.rawloc, opts.get(b'branch') |
|
4403 | other = hg.peer(repo, opts, path) | |
4388 | ) |
|
|||
4389 | other = hg.peer(repo, opts, source) |
|
|||
4390 | try: |
|
4404 | try: | |
4391 | if b'bookmarks' not in other.listkeys(b'namespaces'): |
|
4405 | if b'bookmarks' not in other.listkeys(b'namespaces'): | |
4392 | ui.warn(_(b"remote doesn't support bookmarks\n")) |
|
4406 | ui.warn(_(b"remote doesn't support bookmarks\n")) | |
4393 | return 0 |
|
4407 | return 0 | |
4394 | ui.pager(b'incoming') |
|
4408 | ui.pager(b'incoming') | |
4395 | ui.status( |
|
4409 | ui.status( | |
4396 |
_(b'comparing with %s\n') % urlutil.hidepassword( |
|
4410 | _(b'comparing with %s\n') % urlutil.hidepassword(path.loc) | |
4397 | ) |
|
4411 | ) | |
4398 | return bookmarks.incoming( |
|
4412 | return bookmarks.incoming( | |
4399 | ui, repo, other, mode=path.bookmarks_mode |
|
4413 | ui, repo, other, mode=path.bookmarks_mode | |
@@ -4426,7 +4440,7 b' def init(ui, dest=b".", **opts):' | |||||
4426 | Returns 0 on success. |
|
4440 | Returns 0 on success. | |
4427 | """ |
|
4441 | """ | |
4428 | opts = pycompat.byteskwargs(opts) |
|
4442 | opts = pycompat.byteskwargs(opts) | |
4429 |
path = urlutil.get_clone_path(ui, dest) |
|
4443 | path = urlutil.get_clone_path_obj(ui, dest) | |
4430 | peer = hg.peer(ui, opts, path, create=True) |
|
4444 | peer = hg.peer(ui, opts, path, create=True) | |
4431 | peer.close() |
|
4445 | peer.close() | |
4432 |
|
4446 | |||
@@ -5038,14 +5052,13 b' def outgoing(ui, repo, *dests, **opts):' | |||||
5038 | opts = pycompat.byteskwargs(opts) |
|
5052 | opts = pycompat.byteskwargs(opts) | |
5039 | if opts.get(b'bookmarks'): |
|
5053 | if opts.get(b'bookmarks'): | |
5040 | for path in urlutil.get_push_paths(repo, ui, dests): |
|
5054 | for path in urlutil.get_push_paths(repo, ui, dests): | |
5041 | dest = path.pushloc or path.loc |
|
5055 | other = hg.peer(repo, opts, path) | |
5042 | other = hg.peer(repo, opts, dest) |
|
|||
5043 | try: |
|
5056 | try: | |
5044 | if b'bookmarks' not in other.listkeys(b'namespaces'): |
|
5057 | if b'bookmarks' not in other.listkeys(b'namespaces'): | |
5045 | ui.warn(_(b"remote doesn't support bookmarks\n")) |
|
5058 | ui.warn(_(b"remote doesn't support bookmarks\n")) | |
5046 | return 0 |
|
5059 | return 0 | |
5047 | ui.status( |
|
5060 | ui.status( | |
5048 |
_(b'comparing with %s\n') % urlutil.hidepassword( |
|
5061 | _(b'comparing with %s\n') % urlutil.hidepassword(path.loc) | |
5049 | ) |
|
5062 | ) | |
5050 | ui.pager(b'outgoing') |
|
5063 | ui.pager(b'outgoing') | |
5051 | return bookmarks.outgoing(ui, repo, other) |
|
5064 | return bookmarks.outgoing(ui, repo, other) | |
@@ -5434,12 +5447,12 b' def pull(ui, repo, *sources, **opts):' | |||||
5434 | raise error.InputError(msg, hint=hint) |
|
5447 | raise error.InputError(msg, hint=hint) | |
5435 |
|
5448 | |||
5436 | for path in urlutil.get_pull_paths(repo, ui, sources): |
|
5449 | for path in urlutil.get_pull_paths(repo, ui, sources): | |
5437 | source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch')) |
|
5450 | ui.status(_(b'pulling from %s\n') % urlutil.hidepassword(path.loc)) | |
5438 | ui.status(_(b'pulling from %s\n') % urlutil.hidepassword(source)) |
|
|||
5439 | ui.flush() |
|
5451 | ui.flush() | |
5440 |
other = hg.peer(repo, opts, |
|
5452 | other = hg.peer(repo, opts, path) | |
5441 | update_conflict = None |
|
5453 | update_conflict = None | |
5442 | try: |
|
5454 | try: | |
|
5455 | branches = (path.branch, opts.get(b'branch', [])) | |||
5443 | revs, checkout = hg.addbranchrevs( |
|
5456 | revs, checkout = hg.addbranchrevs( | |
5444 | repo, other, branches, opts.get(b'rev') |
|
5457 | repo, other, branches, opts.get(b'rev') | |
5445 | ) |
|
5458 | ) | |
@@ -5515,8 +5528,12 b' def pull(ui, repo, *sources, **opts):' | |||||
5515 | elif opts.get(b'branch'): |
|
5528 | elif opts.get(b'branch'): | |
5516 | brev = opts[b'branch'][0] |
|
5529 | brev = opts[b'branch'][0] | |
5517 | else: |
|
5530 | else: | |
5518 |
brev = branch |
|
5531 | brev = path.branch | |
5519 | repo._subtoppath = source |
|
5532 | ||
|
5533 | # XXX path: we are losing the `path` object here. Keeping it | |||
|
5534 | # would be valuable. For example as a "variant" as we do | |||
|
5535 | # for pushes. | |||
|
5536 | repo._subtoppath = path.loc | |||
5520 | try: |
|
5537 | try: | |
5521 | update_conflict = postincoming( |
|
5538 | update_conflict = postincoming( | |
5522 | ui, repo, modheads, opts.get(b'update'), checkout, brev |
|
5539 | ui, repo, modheads, opts.get(b'update'), checkout, brev | |
@@ -5766,7 +5783,7 b' def push(ui, repo, *dests, **opts):' | |||||
5766 | some_pushed = False |
|
5783 | some_pushed = False | |
5767 | result = 0 |
|
5784 | result = 0 | |
5768 | for path in urlutil.get_push_paths(repo, ui, dests): |
|
5785 | for path in urlutil.get_push_paths(repo, ui, dests): | |
5769 |
dest = path. |
|
5786 | dest = path.loc | |
5770 | branches = (path.branch, opts.get(b'branch') or []) |
|
5787 | branches = (path.branch, opts.get(b'branch') or []) | |
5771 | ui.status(_(b'pushing to %s\n') % urlutil.hidepassword(dest)) |
|
5788 | ui.status(_(b'pushing to %s\n') % urlutil.hidepassword(dest)) | |
5772 | revs, checkout = hg.addbranchrevs( |
|
5789 | revs, checkout = hg.addbranchrevs( | |
@@ -5940,12 +5957,13 b' def remove(ui, repo, *pats, **opts):' | |||||
5940 | if not pats and not after: |
|
5957 | if not pats and not after: | |
5941 | raise error.InputError(_(b'no files specified')) |
|
5958 | raise error.InputError(_(b'no files specified')) | |
5942 |
|
5959 | |||
5943 | m = scmutil.match(repo[None], pats, opts) |
|
5960 | with repo.wlock(), repo.dirstate.changing_files(repo): | |
5944 | subrepos = opts.get(b'subrepos') |
|
5961 | m = scmutil.match(repo[None], pats, opts) | |
5945 | uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True) |
|
5962 | subrepos = opts.get(b'subrepos') | |
5946 | return cmdutil.remove( |
|
5963 | uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True) | |
5947 | ui, repo, m, b"", uipathfn, after, force, subrepos, dryrun=dryrun |
|
5964 | return cmdutil.remove( | |
5948 | ) |
|
5965 | ui, repo, m, b"", uipathfn, after, force, subrepos, dryrun=dryrun | |
|
5966 | ) | |||
5949 |
|
5967 | |||
5950 |
|
5968 | |||
5951 | @command( |
|
5969 | @command( | |
@@ -5994,7 +6012,18 b' def rename(ui, repo, *pats, **opts):' | |||||
5994 | Returns 0 on success, 1 if errors are encountered. |
|
6012 | Returns 0 on success, 1 if errors are encountered. | |
5995 | """ |
|
6013 | """ | |
5996 | opts = pycompat.byteskwargs(opts) |
|
6014 | opts = pycompat.byteskwargs(opts) | |
5997 | with repo.wlock(): |
|
6015 | context = repo.dirstate.changing_files | |
|
6016 | rev = opts.get(b'at_rev') | |||
|
6017 | ctx = None | |||
|
6018 | if rev: | |||
|
6019 | ctx = logcmdutil.revsingle(repo, rev) | |||
|
6020 | if ctx.rev() is not None: | |||
|
6021 | ||||
|
6022 | def context(repo): | |||
|
6023 | return util.nullcontextmanager() | |||
|
6024 | ||||
|
6025 | opts[b'at_rev'] = ctx.rev() | |||
|
6026 | with repo.wlock(), context(repo): | |||
5998 | return cmdutil.copy(ui, repo, pats, opts, rename=True) |
|
6027 | return cmdutil.copy(ui, repo, pats, opts, rename=True) | |
5999 |
|
6028 | |||
6000 |
|
6029 | |||
@@ -6260,7 +6289,7 b' def resolve(ui, repo, *pats, **opts):' | |||||
6260 | # |
|
6289 | # | |
6261 | # All this should eventually happens, but in the mean time, we use this |
|
6290 | # All this should eventually happens, but in the mean time, we use this | |
6262 | # context manager slightly out of the context it should be. |
|
6291 | # context manager slightly out of the context it should be. | |
6263 |
with repo.dirstate. |
|
6292 | with repo.dirstate.changing_parents(repo): | |
6264 | mergestatemod.recordupdates(repo, ms.actions(), branchmerge, None) |
|
6293 | mergestatemod.recordupdates(repo, ms.actions(), branchmerge, None) | |
6265 |
|
6294 | |||
6266 | if not didwork and pats: |
|
6295 | if not didwork and pats: | |
@@ -7252,23 +7281,22 b' def summary(ui, repo, **opts):' | |||||
7252 | # XXX We should actually skip this if no default is specified, instead |
|
7281 | # XXX We should actually skip this if no default is specified, instead | |
7253 | # of passing "default" which will resolve as "./default/" if no default |
|
7282 | # of passing "default" which will resolve as "./default/" if no default | |
7254 | # path is defined. |
|
7283 | # path is defined. | |
7255 |
|
|
7284 | path = urlutil.get_unique_pull_path_obj(b'summary', ui, b'default') | |
7256 | b'summary', repo, ui, b'default' |
|
7285 | sbranch = path.branch | |
7257 | ) |
|
|||
7258 | sbranch = branches[0] |
|
|||
7259 | try: |
|
7286 | try: | |
7260 |
other = hg.peer(repo, {}, |
|
7287 | other = hg.peer(repo, {}, path) | |
7261 | except error.RepoError: |
|
7288 | except error.RepoError: | |
7262 | if opts.get(b'remote'): |
|
7289 | if opts.get(b'remote'): | |
7263 | raise |
|
7290 | raise | |
7264 |
return |
|
7291 | return path.loc, sbranch, None, None, None | |
|
7292 | branches = (path.branch, []) | |||
7265 | revs, checkout = hg.addbranchrevs(repo, other, branches, None) |
|
7293 | revs, checkout = hg.addbranchrevs(repo, other, branches, None) | |
7266 | if revs: |
|
7294 | if revs: | |
7267 | revs = [other.lookup(rev) for rev in revs] |
|
7295 | revs = [other.lookup(rev) for rev in revs] | |
7268 |
ui.debug(b'comparing with %s\n' % urlutil.hidepassword( |
|
7296 | ui.debug(b'comparing with %s\n' % urlutil.hidepassword(path.loc)) | |
7269 | with repo.ui.silent(): |
|
7297 | with repo.ui.silent(): | |
7270 | commoninc = discovery.findcommonincoming(repo, other, heads=revs) |
|
7298 | commoninc = discovery.findcommonincoming(repo, other, heads=revs) | |
7271 |
return |
|
7299 | return path.loc, sbranch, other, commoninc, commoninc[1] | |
7272 |
|
7300 | |||
7273 | if needsincoming: |
|
7301 | if needsincoming: | |
7274 | source, sbranch, sother, commoninc, incoming = getincoming() |
|
7302 | source, sbranch, sother, commoninc, incoming = getincoming() | |
@@ -7284,9 +7312,10 b' def summary(ui, repo, **opts):' | |||||
7284 | d = b'default-push' |
|
7312 | d = b'default-push' | |
7285 | elif b'default' in ui.paths: |
|
7313 | elif b'default' in ui.paths: | |
7286 | d = b'default' |
|
7314 | d = b'default' | |
|
7315 | path = None | |||
7287 | if d is not None: |
|
7316 | if d is not None: | |
7288 | path = urlutil.get_unique_push_path(b'summary', repo, ui, d) |
|
7317 | path = urlutil.get_unique_push_path(b'summary', repo, ui, d) | |
7289 |
dest = path. |
|
7318 | dest = path.loc | |
7290 | dbranch = path.branch |
|
7319 | dbranch = path.branch | |
7291 | else: |
|
7320 | else: | |
7292 | dest = b'default' |
|
7321 | dest = b'default' | |
@@ -7294,7 +7323,7 b' def summary(ui, repo, **opts):' | |||||
7294 | revs, checkout = hg.addbranchrevs(repo, repo, (dbranch, []), None) |
|
7323 | revs, checkout = hg.addbranchrevs(repo, repo, (dbranch, []), None) | |
7295 | if source != dest: |
|
7324 | if source != dest: | |
7296 | try: |
|
7325 | try: | |
7297 | dother = hg.peer(repo, {}, dest) |
|
7326 | dother = hg.peer(repo, {}, path if path is not None else dest) | |
7298 | except error.RepoError: |
|
7327 | except error.RepoError: | |
7299 | if opts.get(b'remote'): |
|
7328 | if opts.get(b'remote'): | |
7300 | raise |
|
7329 | raise | |
@@ -7472,8 +7501,11 b' def tag(ui, repo, name1, *names, **opts)' | |||||
7472 | ) |
|
7501 | ) | |
7473 | node = logcmdutil.revsingle(repo, rev_).node() |
|
7502 | node = logcmdutil.revsingle(repo, rev_).node() | |
7474 |
|
7503 | |||
|
7504 | # don't allow tagging the null rev or the working directory | |||
7475 | if node is None: |
|
7505 | if node is None: | |
7476 | raise error.InputError(_(b"cannot tag working directory")) |
|
7506 | raise error.InputError(_(b"cannot tag working directory")) | |
|
7507 | elif not opts.get(b'remove') and node == nullid: | |||
|
7508 | raise error.InputError(_(b"cannot tag null revision")) | |||
7477 |
|
7509 | |||
7478 | if not message: |
|
7510 | if not message: | |
7479 | # we don't translate commit messages |
|
7511 | # we don't translate commit messages | |
@@ -7494,13 +7526,6 b' def tag(ui, repo, name1, *names, **opts)' | |||||
7494 | editform=editform, **pycompat.strkwargs(opts) |
|
7526 | editform=editform, **pycompat.strkwargs(opts) | |
7495 | ) |
|
7527 | ) | |
7496 |
|
7528 | |||
7497 | # don't allow tagging the null rev |
|
|||
7498 | if ( |
|
|||
7499 | not opts.get(b'remove') |
|
|||
7500 | and logcmdutil.revsingle(repo, rev_).rev() == nullrev |
|
|||
7501 | ): |
|
|||
7502 | raise error.InputError(_(b"cannot tag null revision")) |
|
|||
7503 |
|
||||
7504 | tagsmod.tag( |
|
7529 | tagsmod.tag( | |
7505 | repo, |
|
7530 | repo, | |
7506 | names, |
|
7531 | names, |
@@ -588,6 +588,18 b' coreconfigitem(' | |||||
588 | b'revlog.debug-delta', |
|
588 | b'revlog.debug-delta', | |
589 | default=False, |
|
589 | default=False, | |
590 | ) |
|
590 | ) | |
|
591 | # display extra information about the bundling process | |||
|
592 | coreconfigitem( | |||
|
593 | b'debug', | |||
|
594 | b'bundling-stats', | |||
|
595 | default=False, | |||
|
596 | ) | |||
|
597 | # display extra information about the unbundling process | |||
|
598 | coreconfigitem( | |||
|
599 | b'debug', | |||
|
600 | b'unbundling-stats', | |||
|
601 | default=False, | |||
|
602 | ) | |||
591 | coreconfigitem( |
|
603 | coreconfigitem( | |
592 | b'defaults', |
|
604 | b'defaults', | |
593 | b'.*', |
|
605 | b'.*', | |
@@ -734,6 +746,14 b' coreconfigitem(' | |||||
734 | b'discovery.exchange-heads', |
|
746 | b'discovery.exchange-heads', | |
735 | default=True, |
|
747 | default=True, | |
736 | ) |
|
748 | ) | |
|
749 | # If devel.debug.abort-update is True, then any merge with the working copy, | |||
|
750 | # e.g. [hg update], will be aborted after figuring out what needs to be done, | |||
|
751 | # but before spawning the parallel worker | |||
|
752 | coreconfigitem( | |||
|
753 | b'devel', | |||
|
754 | b'debug.abort-update', | |||
|
755 | default=False, | |||
|
756 | ) | |||
737 | # If discovery.grow-sample is False, the sample size used in set discovery will |
|
757 | # If discovery.grow-sample is False, the sample size used in set discovery will | |
738 | # not be increased through the process |
|
758 | # not be increased through the process | |
739 | coreconfigitem( |
|
759 | coreconfigitem( | |
@@ -911,6 +931,13 b' coreconfigitem(' | |||||
911 | b'changegroup4', |
|
931 | b'changegroup4', | |
912 | default=False, |
|
932 | default=False, | |
913 | ) |
|
933 | ) | |
|
934 | ||||
|
935 | # might remove rank configuration once the computation has no impact | |||
|
936 | coreconfigitem( | |||
|
937 | b'experimental', | |||
|
938 | b'changelog-v2.compute-rank', | |||
|
939 | default=True, | |||
|
940 | ) | |||
914 | coreconfigitem( |
|
941 | coreconfigitem( | |
915 | b'experimental', |
|
942 | b'experimental', | |
916 | b'cleanup-as-archived', |
|
943 | b'cleanup-as-archived', | |
@@ -1774,6 +1801,13 b' coreconfigitem(' | |||||
1774 | ) |
|
1801 | ) | |
1775 | coreconfigitem( |
|
1802 | coreconfigitem( | |
1776 | b'merge-tools', |
|
1803 | b'merge-tools', | |
|
1804 | br'.*\.regappend$', | |||
|
1805 | default=b"", | |||
|
1806 | generic=True, | |||
|
1807 | priority=-1, | |||
|
1808 | ) | |||
|
1809 | coreconfigitem( | |||
|
1810 | b'merge-tools', | |||
1777 | br'.*\.symlink$', |
|
1811 | br'.*\.symlink$', | |
1778 | default=False, |
|
1812 | default=False, | |
1779 | generic=True, |
|
1813 | generic=True, | |
@@ -2023,6 +2057,11 b' coreconfigitem(' | |||||
2023 | ) |
|
2057 | ) | |
2024 | coreconfigitem( |
|
2058 | coreconfigitem( | |
2025 | b'storage', |
|
2059 | b'storage', | |
|
2060 | b'revlog.delta-parent-search.candidate-group-chunk-size', | |||
|
2061 | default=10, | |||
|
2062 | ) | |||
|
2063 | coreconfigitem( | |||
|
2064 | b'storage', | |||
2026 | b'revlog.issue6528.fix-incoming', |
|
2065 | b'revlog.issue6528.fix-incoming', | |
2027 | default=True, |
|
2066 | default=True, | |
2028 | ) |
|
2067 | ) | |
@@ -2044,6 +2083,7 b' coreconfigitem(' | |||||
2044 | b'revlog.reuse-external-delta', |
|
2083 | b'revlog.reuse-external-delta', | |
2045 | default=True, |
|
2084 | default=True, | |
2046 | ) |
|
2085 | ) | |
|
2086 | # This option is True unless `format.generaldelta` is set. | |||
2047 | coreconfigitem( |
|
2087 | coreconfigitem( | |
2048 | b'storage', |
|
2088 | b'storage', | |
2049 | b'revlog.reuse-external-delta-parent', |
|
2089 | b'revlog.reuse-external-delta-parent', | |
@@ -2123,7 +2163,7 b' coreconfigitem(' | |||||
2123 | coreconfigitem( |
|
2163 | coreconfigitem( | |
2124 | b'server', |
|
2164 | b'server', | |
2125 | b'pullbundle', |
|
2165 | b'pullbundle', | |
2126 |
default= |
|
2166 | default=True, | |
2127 | ) |
|
2167 | ) | |
2128 | coreconfigitem( |
|
2168 | coreconfigitem( | |
2129 | b'server', |
|
2169 | b'server', |
@@ -1595,7 +1595,7 b' class workingctx(committablectx):' | |||||
1595 | if p2node is None: |
|
1595 | if p2node is None: | |
1596 | p2node = self._repo.nodeconstants.nullid |
|
1596 | p2node = self._repo.nodeconstants.nullid | |
1597 | dirstate = self._repo.dirstate |
|
1597 | dirstate = self._repo.dirstate | |
1598 |
with dirstate. |
|
1598 | with dirstate.changing_parents(self._repo): | |
1599 | copies = dirstate.setparents(p1node, p2node) |
|
1599 | copies = dirstate.setparents(p1node, p2node) | |
1600 | pctx = self._repo[p1node] |
|
1600 | pctx = self._repo[p1node] | |
1601 | if copies: |
|
1601 | if copies: | |
@@ -1854,47 +1854,42 b' class workingctx(committablectx):' | |||||
1854 |
|
1854 | |||
1855 | def _poststatusfixup(self, status, fixup): |
|
1855 | def _poststatusfixup(self, status, fixup): | |
1856 | """update dirstate for files that are actually clean""" |
|
1856 | """update dirstate for files that are actually clean""" | |
|
1857 | dirstate = self._repo.dirstate | |||
1857 | poststatus = self._repo.postdsstatus() |
|
1858 | poststatus = self._repo.postdsstatus() | |
1858 | if fixup or poststatus or self._repo.dirstate._dirty: |
|
1859 | if fixup: | |
|
1860 | if dirstate.is_changing_parents: | |||
|
1861 | normal = lambda f, pfd: dirstate.update_file( | |||
|
1862 | f, | |||
|
1863 | p1_tracked=True, | |||
|
1864 | wc_tracked=True, | |||
|
1865 | ) | |||
|
1866 | else: | |||
|
1867 | normal = dirstate.set_clean | |||
|
1868 | for f, pdf in fixup: | |||
|
1869 | normal(f, pdf) | |||
|
1870 | if poststatus or self._repo.dirstate._dirty: | |||
1859 | try: |
|
1871 | try: | |
1860 | oldid = self._repo.dirstate.identity() |
|
|||
1861 |
|
||||
1862 | # updating the dirstate is optional |
|
1872 | # updating the dirstate is optional | |
1863 | # so we don't wait on the lock |
|
1873 | # so we don't wait on the lock | |
1864 | # wlock can invalidate the dirstate, so cache normal _after_ |
|
1874 | # wlock can invalidate the dirstate, so cache normal _after_ | |
1865 | # taking the lock |
|
1875 | # taking the lock | |
|
1876 | pre_dirty = dirstate._dirty | |||
1866 | with self._repo.wlock(False): |
|
1877 | with self._repo.wlock(False): | |
1867 |
|
|
1878 | assert self._repo.dirstate is dirstate | |
1868 |
|
|
1879 | post_dirty = dirstate._dirty | |
1869 |
|
|
1880 | if post_dirty: | |
1870 |
|
|
1881 | tr = self._repo.currenttransaction() | |
1871 |
|
|
1882 | dirstate.write(tr) | |
1872 | f, p1_tracked=True, wc_tracked=True |
|
1883 | elif pre_dirty: | |
1873 | ) |
|
1884 | # the wlock grabbing detected that dirtate changes | |
1874 |
|
|
1885 | # needed to be dropped | |
1875 |
|
|
1886 | m = b'skip updating dirstate: identity mismatch\n' | |
1876 | for f, pdf in fixup: |
|
1887 | self._repo.ui.debug(m) | |
1877 | normal(f, pdf) |
|
1888 | if poststatus: | |
1878 | # write changes out explicitly, because nesting |
|
1889 | for ps in poststatus: | |
1879 | # wlock at runtime may prevent 'wlock.release()' |
|
1890 | ps(self, status) | |
1880 | # after this block from doing so for subsequent |
|
|||
1881 | # changing files |
|
|||
1882 | tr = self._repo.currenttransaction() |
|
|||
1883 | self._repo.dirstate.write(tr) |
|
|||
1884 |
|
||||
1885 | if poststatus: |
|
|||
1886 | for ps in poststatus: |
|
|||
1887 | ps(self, status) |
|
|||
1888 | else: |
|
|||
1889 | # in this case, writing changes out breaks |
|
|||
1890 | # consistency, because .hg/dirstate was |
|
|||
1891 | # already changed simultaneously after last |
|
|||
1892 | # caching (see also issue5584 for detail) |
|
|||
1893 | self._repo.ui.debug( |
|
|||
1894 | b'skip updating dirstate: identity mismatch\n' |
|
|||
1895 | ) |
|
|||
1896 | except error.LockError: |
|
1891 | except error.LockError: | |
1897 | pass |
|
1892 | dirstate.invalidate() | |
1898 | finally: |
|
1893 | finally: | |
1899 | # Even if the wlock couldn't be grabbed, clear out the list. |
|
1894 | # Even if the wlock couldn't be grabbed, clear out the list. | |
1900 | self._repo.clearpostdsstatus() |
|
1895 | self._repo.clearpostdsstatus() | |
@@ -1904,25 +1899,27 b' class workingctx(committablectx):' | |||||
1904 | subrepos = [] |
|
1899 | subrepos = [] | |
1905 | if b'.hgsub' in self: |
|
1900 | if b'.hgsub' in self: | |
1906 | subrepos = sorted(self.substate) |
|
1901 | subrepos = sorted(self.substate) | |
1907 |
|
|
1902 | dirstate = self._repo.dirstate | |
1908 | match, subrepos, ignored=ignored, clean=clean, unknown=unknown |
|
1903 | with dirstate.running_status(self._repo): | |
1909 | ) |
|
1904 | cmp, s, mtime_boundary = dirstate.status( | |
1910 |
|
1905 | match, subrepos, ignored=ignored, clean=clean, unknown=unknown | ||
1911 | # check for any possibly clean files |
|
|||
1912 | fixup = [] |
|
|||
1913 | if cmp: |
|
|||
1914 | modified2, deleted2, clean_set, fixup = self._checklookup( |
|
|||
1915 | cmp, mtime_boundary |
|
|||
1916 | ) |
|
1906 | ) | |
1917 | s.modified.extend(modified2) |
|
1907 | ||
1918 | s.deleted.extend(deleted2) |
|
1908 | # check for any possibly clean files | |
1919 |
|
1909 | fixup = [] | ||
1920 |
if c |
|
1910 | if cmp: | |
1921 | s.clean.extend(clean_set) |
|
1911 | modified2, deleted2, clean_set, fixup = self._checklookup( | |
1922 | if fixup and clean: |
|
1912 | cmp, mtime_boundary | |
1923 | s.clean.extend((f for f, _ in fixup)) |
|
1913 | ) | |
1924 |
|
1914 | s.modified.extend(modified2) | ||
1925 | self._poststatusfixup(s, fixup) |
|
1915 | s.deleted.extend(deleted2) | |
|
1916 | ||||
|
1917 | if clean_set and clean: | |||
|
1918 | s.clean.extend(clean_set) | |||
|
1919 | if fixup and clean: | |||
|
1920 | s.clean.extend((f for f, _ in fixup)) | |||
|
1921 | ||||
|
1922 | self._poststatusfixup(s, fixup) | |||
1926 |
|
1923 | |||
1927 | if match.always(): |
|
1924 | if match.always(): | |
1928 | # cache for performance |
|
1925 | # cache for performance | |
@@ -2050,7 +2047,7 b' class workingctx(committablectx):' | |||||
2050 | return sorted(f for f in ds.matches(match) if ds.get_entry(f).tracked) |
|
2047 | return sorted(f for f in ds.matches(match) if ds.get_entry(f).tracked) | |
2051 |
|
2048 | |||
2052 | def markcommitted(self, node): |
|
2049 | def markcommitted(self, node): | |
2053 |
with self._repo.dirstate. |
|
2050 | with self._repo.dirstate.changing_parents(self._repo): | |
2054 | for f in self.modified() + self.added(): |
|
2051 | for f in self.modified() + self.added(): | |
2055 | self._repo.dirstate.update_file( |
|
2052 | self._repo.dirstate.update_file( | |
2056 | f, p1_tracked=True, wc_tracked=True |
|
2053 | f, p1_tracked=True, wc_tracked=True |
This diff has been collapsed as it changes many lines, (526 lines changed) Show them Hide them | |||||
@@ -21,7 +21,6 b' import re' | |||||
21 | import socket |
|
21 | import socket | |
22 | import ssl |
|
22 | import ssl | |
23 | import stat |
|
23 | import stat | |
24 | import string |
|
|||
25 | import subprocess |
|
24 | import subprocess | |
26 | import sys |
|
25 | import sys | |
27 | import time |
|
26 | import time | |
@@ -73,7 +72,6 b' from . import (' | |||||
73 | repoview, |
|
72 | repoview, | |
74 | requirements, |
|
73 | requirements, | |
75 | revlog, |
|
74 | revlog, | |
76 | revlogutils, |
|
|||
77 | revset, |
|
75 | revset, | |
78 | revsetlang, |
|
76 | revsetlang, | |
79 | scmutil, |
|
77 | scmutil, | |
@@ -89,6 +87,7 b' from . import (' | |||||
89 | upgrade, |
|
87 | upgrade, | |
90 | url as urlmod, |
|
88 | url as urlmod, | |
91 | util, |
|
89 | util, | |
|
90 | verify, | |||
92 | vfs as vfsmod, |
|
91 | vfs as vfsmod, | |
93 | wireprotoframing, |
|
92 | wireprotoframing, | |
94 | wireprotoserver, |
|
93 | wireprotoserver, | |
@@ -556,15 +555,9 b' def debugchangedfiles(ui, repo, rev, **o' | |||||
556 | @command(b'debugcheckstate', [], b'') |
|
555 | @command(b'debugcheckstate', [], b'') | |
557 | def debugcheckstate(ui, repo): |
|
556 | def debugcheckstate(ui, repo): | |
558 | """validate the correctness of the current dirstate""" |
|
557 | """validate the correctness of the current dirstate""" | |
559 | parent1, parent2 = repo.dirstate.parents() |
|
558 | errors = verify.verifier(repo)._verify_dirstate() | |
560 | m1 = repo[parent1].manifest() |
|
|||
561 | m2 = repo[parent2].manifest() |
|
|||
562 | errors = 0 |
|
|||
563 | for err in repo.dirstate.verify(m1, m2): |
|
|||
564 | ui.warn(err[0] % err[1:]) |
|
|||
565 | errors += 1 |
|
|||
566 | if errors: |
|
559 | if errors: | |
567 |
errstr = _(b" |
|
560 | errstr = _(b"dirstate inconsistent with current parent's manifest") | |
568 | raise error.Abort(errstr) |
|
561 | raise error.Abort(errstr) | |
569 |
|
562 | |||
570 |
|
563 | |||
@@ -990,17 +983,29 b' def debugdeltachain(ui, repo, file_=None' | |||||
990 |
|
983 | |||
991 | @command( |
|
984 | @command( | |
992 | b'debug-delta-find', |
|
985 | b'debug-delta-find', | |
993 |
cmdutil.debugrevlogopts |
|
986 | cmdutil.debugrevlogopts | |
|
987 | + cmdutil.formatteropts | |||
|
988 | + [ | |||
|
989 | ( | |||
|
990 | b'', | |||
|
991 | b'source', | |||
|
992 | b'full', | |||
|
993 | _(b'input data feed to the process (full, storage, p1, p2, prev)'), | |||
|
994 | ), | |||
|
995 | ], | |||
994 | _(b'-c|-m|FILE REV'), |
|
996 | _(b'-c|-m|FILE REV'), | |
995 | optionalrepo=True, |
|
997 | optionalrepo=True, | |
996 | ) |
|
998 | ) | |
997 | def debugdeltafind(ui, repo, arg_1, arg_2=None, **opts): |
|
999 | def debugdeltafind(ui, repo, arg_1, arg_2=None, source=b'full', **opts): | |
998 | """display the computation to get to a valid delta for storing REV |
|
1000 | """display the computation to get to a valid delta for storing REV | |
999 |
|
1001 | |||
1000 | This command will replay the process used to find the "best" delta to store |
|
1002 | This command will replay the process used to find the "best" delta to store | |
1001 | a revision and display information about all the steps used to get to that |
|
1003 | a revision and display information about all the steps used to get to that | |
1002 | result. |
|
1004 | result. | |
1003 |
|
1005 | |||
|
1006 | By default, the process is fed with a the full-text for the revision. This | |||
|
1007 | can be controlled with the --source flag. | |||
|
1008 | ||||
1004 | The revision use the revision number of the target storage (not changelog |
|
1009 | The revision use the revision number of the target storage (not changelog | |
1005 | revision number). |
|
1010 | revision number). | |
1006 |
|
1011 | |||
@@ -1017,34 +1022,22 b' def debugdeltafind(ui, repo, arg_1, arg_' | |||||
1017 | rev = int(rev) |
|
1022 | rev = int(rev) | |
1018 |
|
1023 | |||
1019 | revlog = cmdutil.openrevlog(repo, b'debugdeltachain', file_, opts) |
|
1024 | revlog = cmdutil.openrevlog(repo, b'debugdeltachain', file_, opts) | |
1020 |
|
||||
1021 | deltacomputer = deltautil.deltacomputer( |
|
|||
1022 | revlog, |
|
|||
1023 | write_debug=ui.write, |
|
|||
1024 | debug_search=not ui.quiet, |
|
|||
1025 | ) |
|
|||
1026 |
|
||||
1027 | node = revlog.node(rev) |
|
|||
1028 | p1r, p2r = revlog.parentrevs(rev) |
|
1025 | p1r, p2r = revlog.parentrevs(rev) | |
1029 | p1 = revlog.node(p1r) |
|
1026 | ||
1030 | p2 = revlog.node(p2r) |
|
1027 | if source == b'full': | |
1031 | btext = [revlog.revision(rev)] |
|
1028 | base_rev = nullrev | |
1032 | textlen = len(btext[0]) |
|
1029 | elif source == b'storage': | |
1033 | cachedelta = None |
|
1030 | base_rev = revlog.deltaparent(rev) | |
1034 | flags = revlog.flags(rev) |
|
1031 | elif source == b'p1': | |
1035 |
|
1032 | base_rev = p1r | ||
1036 | revinfo = revlogutils.revisioninfo( |
|
1033 | elif source == b'p2': | |
1037 | node, |
|
1034 | base_rev = p2r | |
1038 | p1, |
|
1035 | elif source == b'prev': | |
1039 | p2, |
|
1036 | base_rev = rev - 1 | |
1040 | btext, |
|
1037 | else: | |
1041 | textlen, |
|
1038 | raise error.InputError(b"invalid --source value: %s" % source) | |
1042 | cachedelta, |
|
1039 | ||
1043 | flags, |
|
1040 | revlog_debug.debug_delta_find(ui, revlog, rev, base_rev=base_rev) | |
1044 | ) |
|
|||
1045 |
|
||||
1046 | fh = revlog._datafp() |
|
|||
1047 | deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev) |
|
|||
1048 |
|
1041 | |||
1049 |
|
1042 | |||
1050 | @command( |
|
1043 | @command( | |
@@ -1236,12 +1229,12 b' def debugdiscovery(ui, repo, remoteurl=b' | |||||
1236 | random.seed(int(opts[b'seed'])) |
|
1229 | random.seed(int(opts[b'seed'])) | |
1237 |
|
1230 | |||
1238 | if not remote_revs: |
|
1231 | if not remote_revs: | |
1239 |
|
1232 | path = urlutil.get_unique_pull_path_obj( | ||
1240 | remoteurl, branches = urlutil.get_unique_pull_path( |
|
1233 | b'debugdiscovery', ui, remoteurl | |
1241 | b'debugdiscovery', repo, ui, remoteurl |
|
|||
1242 | ) |
|
1234 | ) | |
1243 | remote = hg.peer(repo, opts, remoteurl) |
|
1235 | branches = (path.branch, []) | |
1244 | ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(remoteurl)) |
|
1236 | remote = hg.peer(repo, opts, path) | |
|
1237 | ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(path.loc)) | |||
1245 | else: |
|
1238 | else: | |
1246 | branches = (None, []) |
|
1239 | branches = (None, []) | |
1247 | remote_filtered_revs = logcmdutil.revrange( |
|
1240 | remote_filtered_revs = logcmdutil.revrange( | |
@@ -3135,6 +3128,9 b' def debugrebuilddirstate(ui, repo, rev, ' | |||||
3135 | """ |
|
3128 | """ | |
3136 | ctx = scmutil.revsingle(repo, rev) |
|
3129 | ctx = scmutil.revsingle(repo, rev) | |
3137 | with repo.wlock(): |
|
3130 | with repo.wlock(): | |
|
3131 | if repo.currenttransaction() is not None: | |||
|
3132 | msg = b'rebuild the dirstate outside of a transaction' | |||
|
3133 | raise error.ProgrammingError(msg) | |||
3138 | dirstate = repo.dirstate |
|
3134 | dirstate = repo.dirstate | |
3139 | changedfiles = None |
|
3135 | changedfiles = None | |
3140 | # See command doc for what minimal does. |
|
3136 | # See command doc for what minimal does. | |
@@ -3146,7 +3142,8 b' def debugrebuilddirstate(ui, repo, rev, ' | |||||
3146 | dsnotadded = {f for f in dsonly if not dirstate.get_entry(f).added} |
|
3142 | dsnotadded = {f for f in dsonly if not dirstate.get_entry(f).added} | |
3147 | changedfiles = manifestonly | dsnotadded |
|
3143 | changedfiles = manifestonly | dsnotadded | |
3148 |
|
3144 | |||
3149 | dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles) |
|
3145 | with dirstate.changing_parents(repo): | |
|
3146 | dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles) | |||
3150 |
|
3147 | |||
3151 |
|
3148 | |||
3152 | @command( |
|
3149 | @command( | |
@@ -3207,348 +3204,10 b' def debugrevlog(ui, repo, file_=None, **' | |||||
3207 | r = cmdutil.openrevlog(repo, b'debugrevlog', file_, opts) |
|
3204 | r = cmdutil.openrevlog(repo, b'debugrevlog', file_, opts) | |
3208 |
|
3205 | |||
3209 | if opts.get(b"dump"): |
|
3206 | if opts.get(b"dump"): | |
3210 | numrevs = len(r) |
|
3207 | revlog_debug.dump(ui, r) | |
3211 | ui.write( |
|
|||
3212 | ( |
|
|||
3213 | b"# rev p1rev p2rev start end deltastart base p1 p2" |
|
|||
3214 | b" rawsize totalsize compression heads chainlen\n" |
|
|||
3215 | ) |
|
|||
3216 | ) |
|
|||
3217 | ts = 0 |
|
|||
3218 | heads = set() |
|
|||
3219 |
|
||||
3220 | for rev in range(numrevs): |
|
|||
3221 | dbase = r.deltaparent(rev) |
|
|||
3222 | if dbase == -1: |
|
|||
3223 | dbase = rev |
|
|||
3224 | cbase = r.chainbase(rev) |
|
|||
3225 | clen = r.chainlen(rev) |
|
|||
3226 | p1, p2 = r.parentrevs(rev) |
|
|||
3227 | rs = r.rawsize(rev) |
|
|||
3228 | ts = ts + rs |
|
|||
3229 | heads -= set(r.parentrevs(rev)) |
|
|||
3230 | heads.add(rev) |
|
|||
3231 | try: |
|
|||
3232 | compression = ts / r.end(rev) |
|
|||
3233 | except ZeroDivisionError: |
|
|||
3234 | compression = 0 |
|
|||
3235 | ui.write( |
|
|||
3236 | b"%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d " |
|
|||
3237 | b"%11d %5d %8d\n" |
|
|||
3238 | % ( |
|
|||
3239 | rev, |
|
|||
3240 | p1, |
|
|||
3241 | p2, |
|
|||
3242 | r.start(rev), |
|
|||
3243 | r.end(rev), |
|
|||
3244 | r.start(dbase), |
|
|||
3245 | r.start(cbase), |
|
|||
3246 | r.start(p1), |
|
|||
3247 | r.start(p2), |
|
|||
3248 | rs, |
|
|||
3249 | ts, |
|
|||
3250 | compression, |
|
|||
3251 | len(heads), |
|
|||
3252 | clen, |
|
|||
3253 | ) |
|
|||
3254 | ) |
|
|||
3255 | return 0 |
|
|||
3256 |
|
||||
3257 | format = r._format_version |
|
|||
3258 | v = r._format_flags |
|
|||
3259 | flags = [] |
|
|||
3260 | gdelta = False |
|
|||
3261 | if v & revlog.FLAG_INLINE_DATA: |
|
|||
3262 | flags.append(b'inline') |
|
|||
3263 | if v & revlog.FLAG_GENERALDELTA: |
|
|||
3264 | gdelta = True |
|
|||
3265 | flags.append(b'generaldelta') |
|
|||
3266 | if not flags: |
|
|||
3267 | flags = [b'(none)'] |
|
|||
3268 |
|
||||
3269 | ### tracks merge vs single parent |
|
|||
3270 | nummerges = 0 |
|
|||
3271 |
|
||||
3272 | ### tracks ways the "delta" are build |
|
|||
3273 | # nodelta |
|
|||
3274 | numempty = 0 |
|
|||
3275 | numemptytext = 0 |
|
|||
3276 | numemptydelta = 0 |
|
|||
3277 | # full file content |
|
|||
3278 | numfull = 0 |
|
|||
3279 | # intermediate snapshot against a prior snapshot |
|
|||
3280 | numsemi = 0 |
|
|||
3281 | # snapshot count per depth |
|
|||
3282 | numsnapdepth = collections.defaultdict(lambda: 0) |
|
|||
3283 | # delta against previous revision |
|
|||
3284 | numprev = 0 |
|
|||
3285 | # delta against first or second parent (not prev) |
|
|||
3286 | nump1 = 0 |
|
|||
3287 | nump2 = 0 |
|
|||
3288 | # delta against neither prev nor parents |
|
|||
3289 | numother = 0 |
|
|||
3290 | # delta against prev that are also first or second parent |
|
|||
3291 | # (details of `numprev`) |
|
|||
3292 | nump1prev = 0 |
|
|||
3293 | nump2prev = 0 |
|
|||
3294 |
|
||||
3295 | # data about delta chain of each revs |
|
|||
3296 | chainlengths = [] |
|
|||
3297 | chainbases = [] |
|
|||
3298 | chainspans = [] |
|
|||
3299 |
|
||||
3300 | # data about each revision |
|
|||
3301 | datasize = [None, 0, 0] |
|
|||
3302 | fullsize = [None, 0, 0] |
|
|||
3303 | semisize = [None, 0, 0] |
|
|||
3304 | # snapshot count per depth |
|
|||
3305 | snapsizedepth = collections.defaultdict(lambda: [None, 0, 0]) |
|
|||
3306 | deltasize = [None, 0, 0] |
|
|||
3307 | chunktypecounts = {} |
|
|||
3308 | chunktypesizes = {} |
|
|||
3309 |
|
||||
3310 | def addsize(size, l): |
|
|||
3311 | if l[0] is None or size < l[0]: |
|
|||
3312 | l[0] = size |
|
|||
3313 | if size > l[1]: |
|
|||
3314 | l[1] = size |
|
|||
3315 | l[2] += size |
|
|||
3316 |
|
||||
3317 | numrevs = len(r) |
|
|||
3318 | for rev in range(numrevs): |
|
|||
3319 | p1, p2 = r.parentrevs(rev) |
|
|||
3320 | delta = r.deltaparent(rev) |
|
|||
3321 | if format > 0: |
|
|||
3322 | addsize(r.rawsize(rev), datasize) |
|
|||
3323 | if p2 != nullrev: |
|
|||
3324 | nummerges += 1 |
|
|||
3325 | size = r.length(rev) |
|
|||
3326 | if delta == nullrev: |
|
|||
3327 | chainlengths.append(0) |
|
|||
3328 | chainbases.append(r.start(rev)) |
|
|||
3329 | chainspans.append(size) |
|
|||
3330 | if size == 0: |
|
|||
3331 | numempty += 1 |
|
|||
3332 | numemptytext += 1 |
|
|||
3333 | else: |
|
|||
3334 | numfull += 1 |
|
|||
3335 | numsnapdepth[0] += 1 |
|
|||
3336 | addsize(size, fullsize) |
|
|||
3337 | addsize(size, snapsizedepth[0]) |
|
|||
3338 | else: |
|
|||
3339 | chainlengths.append(chainlengths[delta] + 1) |
|
|||
3340 | baseaddr = chainbases[delta] |
|
|||
3341 | revaddr = r.start(rev) |
|
|||
3342 | chainbases.append(baseaddr) |
|
|||
3343 | chainspans.append((revaddr - baseaddr) + size) |
|
|||
3344 | if size == 0: |
|
|||
3345 | numempty += 1 |
|
|||
3346 | numemptydelta += 1 |
|
|||
3347 | elif r.issnapshot(rev): |
|
|||
3348 | addsize(size, semisize) |
|
|||
3349 | numsemi += 1 |
|
|||
3350 | depth = r.snapshotdepth(rev) |
|
|||
3351 | numsnapdepth[depth] += 1 |
|
|||
3352 | addsize(size, snapsizedepth[depth]) |
|
|||
3353 | else: |
|
|||
3354 | addsize(size, deltasize) |
|
|||
3355 | if delta == rev - 1: |
|
|||
3356 | numprev += 1 |
|
|||
3357 | if delta == p1: |
|
|||
3358 | nump1prev += 1 |
|
|||
3359 | elif delta == p2: |
|
|||
3360 | nump2prev += 1 |
|
|||
3361 | elif delta == p1: |
|
|||
3362 | nump1 += 1 |
|
|||
3363 | elif delta == p2: |
|
|||
3364 | nump2 += 1 |
|
|||
3365 | elif delta != nullrev: |
|
|||
3366 | numother += 1 |
|
|||
3367 |
|
||||
3368 | # Obtain data on the raw chunks in the revlog. |
|
|||
3369 | if util.safehasattr(r, b'_getsegmentforrevs'): |
|
|||
3370 | segment = r._getsegmentforrevs(rev, rev)[1] |
|
|||
3371 | else: |
|
|||
3372 | segment = r._revlog._getsegmentforrevs(rev, rev)[1] |
|
|||
3373 | if segment: |
|
|||
3374 | chunktype = bytes(segment[0:1]) |
|
|||
3375 | else: |
|
|||
3376 | chunktype = b'empty' |
|
|||
3377 |
|
||||
3378 | if chunktype not in chunktypecounts: |
|
|||
3379 | chunktypecounts[chunktype] = 0 |
|
|||
3380 | chunktypesizes[chunktype] = 0 |
|
|||
3381 |
|
||||
3382 | chunktypecounts[chunktype] += 1 |
|
|||
3383 | chunktypesizes[chunktype] += size |
|
|||
3384 |
|
||||
3385 | # Adjust size min value for empty cases |
|
|||
3386 | for size in (datasize, fullsize, semisize, deltasize): |
|
|||
3387 | if size[0] is None: |
|
|||
3388 | size[0] = 0 |
|
|||
3389 |
|
||||
3390 | numdeltas = numrevs - numfull - numempty - numsemi |
|
|||
3391 | numoprev = numprev - nump1prev - nump2prev |
|
|||
3392 | totalrawsize = datasize[2] |
|
|||
3393 | datasize[2] /= numrevs |
|
|||
3394 | fulltotal = fullsize[2] |
|
|||
3395 | if numfull == 0: |
|
|||
3396 | fullsize[2] = 0 |
|
|||
3397 | else: |
|
3208 | else: | |
3398 | fullsize[2] /= numfull |
|
3209 | revlog_debug.debug_revlog(ui, r) | |
3399 | semitotal = semisize[2] |
|
3210 | return 0 | |
3400 | snaptotal = {} |
|
|||
3401 | if numsemi > 0: |
|
|||
3402 | semisize[2] /= numsemi |
|
|||
3403 | for depth in snapsizedepth: |
|
|||
3404 | snaptotal[depth] = snapsizedepth[depth][2] |
|
|||
3405 | snapsizedepth[depth][2] /= numsnapdepth[depth] |
|
|||
3406 |
|
||||
3407 | deltatotal = deltasize[2] |
|
|||
3408 | if numdeltas > 0: |
|
|||
3409 | deltasize[2] /= numdeltas |
|
|||
3410 | totalsize = fulltotal + semitotal + deltatotal |
|
|||
3411 | avgchainlen = sum(chainlengths) / numrevs |
|
|||
3412 | maxchainlen = max(chainlengths) |
|
|||
3413 | maxchainspan = max(chainspans) |
|
|||
3414 | compratio = 1 |
|
|||
3415 | if totalsize: |
|
|||
3416 | compratio = totalrawsize / totalsize |
|
|||
3417 |
|
||||
3418 | basedfmtstr = b'%%%dd\n' |
|
|||
3419 | basepcfmtstr = b'%%%dd %s(%%5.2f%%%%)\n' |
|
|||
3420 |
|
||||
3421 | def dfmtstr(max): |
|
|||
3422 | return basedfmtstr % len(str(max)) |
|
|||
3423 |
|
||||
3424 | def pcfmtstr(max, padding=0): |
|
|||
3425 | return basepcfmtstr % (len(str(max)), b' ' * padding) |
|
|||
3426 |
|
||||
3427 | def pcfmt(value, total): |
|
|||
3428 | if total: |
|
|||
3429 | return (value, 100 * float(value) / total) |
|
|||
3430 | else: |
|
|||
3431 | return value, 100.0 |
|
|||
3432 |
|
||||
3433 | ui.writenoi18n(b'format : %d\n' % format) |
|
|||
3434 | ui.writenoi18n(b'flags : %s\n' % b', '.join(flags)) |
|
|||
3435 |
|
||||
3436 | ui.write(b'\n') |
|
|||
3437 | fmt = pcfmtstr(totalsize) |
|
|||
3438 | fmt2 = dfmtstr(totalsize) |
|
|||
3439 | ui.writenoi18n(b'revisions : ' + fmt2 % numrevs) |
|
|||
3440 | ui.writenoi18n(b' merges : ' + fmt % pcfmt(nummerges, numrevs)) |
|
|||
3441 | ui.writenoi18n( |
|
|||
3442 | b' normal : ' + fmt % pcfmt(numrevs - nummerges, numrevs) |
|
|||
3443 | ) |
|
|||
3444 | ui.writenoi18n(b'revisions : ' + fmt2 % numrevs) |
|
|||
3445 | ui.writenoi18n(b' empty : ' + fmt % pcfmt(numempty, numrevs)) |
|
|||
3446 | ui.writenoi18n( |
|
|||
3447 | b' text : ' |
|
|||
3448 | + fmt % pcfmt(numemptytext, numemptytext + numemptydelta) |
|
|||
3449 | ) |
|
|||
3450 | ui.writenoi18n( |
|
|||
3451 | b' delta : ' |
|
|||
3452 | + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta) |
|
|||
3453 | ) |
|
|||
3454 | ui.writenoi18n( |
|
|||
3455 | b' snapshot : ' + fmt % pcfmt(numfull + numsemi, numrevs) |
|
|||
3456 | ) |
|
|||
3457 | for depth in sorted(numsnapdepth): |
|
|||
3458 | ui.write( |
|
|||
3459 | (b' lvl-%-3d : ' % depth) |
|
|||
3460 | + fmt % pcfmt(numsnapdepth[depth], numrevs) |
|
|||
3461 | ) |
|
|||
3462 | ui.writenoi18n(b' deltas : ' + fmt % pcfmt(numdeltas, numrevs)) |
|
|||
3463 | ui.writenoi18n(b'revision size : ' + fmt2 % totalsize) |
|
|||
3464 | ui.writenoi18n( |
|
|||
3465 | b' snapshot : ' + fmt % pcfmt(fulltotal + semitotal, totalsize) |
|
|||
3466 | ) |
|
|||
3467 | for depth in sorted(numsnapdepth): |
|
|||
3468 | ui.write( |
|
|||
3469 | (b' lvl-%-3d : ' % depth) |
|
|||
3470 | + fmt % pcfmt(snaptotal[depth], totalsize) |
|
|||
3471 | ) |
|
|||
3472 | ui.writenoi18n(b' deltas : ' + fmt % pcfmt(deltatotal, totalsize)) |
|
|||
3473 |
|
||||
3474 | def fmtchunktype(chunktype): |
|
|||
3475 | if chunktype == b'empty': |
|
|||
3476 | return b' %s : ' % chunktype |
|
|||
3477 | elif chunktype in pycompat.bytestr(string.ascii_letters): |
|
|||
3478 | return b' 0x%s (%s) : ' % (hex(chunktype), chunktype) |
|
|||
3479 | else: |
|
|||
3480 | return b' 0x%s : ' % hex(chunktype) |
|
|||
3481 |
|
||||
3482 | ui.write(b'\n') |
|
|||
3483 | ui.writenoi18n(b'chunks : ' + fmt2 % numrevs) |
|
|||
3484 | for chunktype in sorted(chunktypecounts): |
|
|||
3485 | ui.write(fmtchunktype(chunktype)) |
|
|||
3486 | ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs)) |
|
|||
3487 | ui.writenoi18n(b'chunks size : ' + fmt2 % totalsize) |
|
|||
3488 | for chunktype in sorted(chunktypecounts): |
|
|||
3489 | ui.write(fmtchunktype(chunktype)) |
|
|||
3490 | ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize)) |
|
|||
3491 |
|
||||
3492 | ui.write(b'\n') |
|
|||
3493 | fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio)) |
|
|||
3494 | ui.writenoi18n(b'avg chain length : ' + fmt % avgchainlen) |
|
|||
3495 | ui.writenoi18n(b'max chain length : ' + fmt % maxchainlen) |
|
|||
3496 | ui.writenoi18n(b'max chain reach : ' + fmt % maxchainspan) |
|
|||
3497 | ui.writenoi18n(b'compression ratio : ' + fmt % compratio) |
|
|||
3498 |
|
||||
3499 | if format > 0: |
|
|||
3500 | ui.write(b'\n') |
|
|||
3501 | ui.writenoi18n( |
|
|||
3502 | b'uncompressed data size (min/max/avg) : %d / %d / %d\n' |
|
|||
3503 | % tuple(datasize) |
|
|||
3504 | ) |
|
|||
3505 | ui.writenoi18n( |
|
|||
3506 | b'full revision size (min/max/avg) : %d / %d / %d\n' |
|
|||
3507 | % tuple(fullsize) |
|
|||
3508 | ) |
|
|||
3509 | ui.writenoi18n( |
|
|||
3510 | b'inter-snapshot size (min/max/avg) : %d / %d / %d\n' |
|
|||
3511 | % tuple(semisize) |
|
|||
3512 | ) |
|
|||
3513 | for depth in sorted(snapsizedepth): |
|
|||
3514 | if depth == 0: |
|
|||
3515 | continue |
|
|||
3516 | ui.writenoi18n( |
|
|||
3517 | b' level-%-3d (min/max/avg) : %d / %d / %d\n' |
|
|||
3518 | % ((depth,) + tuple(snapsizedepth[depth])) |
|
|||
3519 | ) |
|
|||
3520 | ui.writenoi18n( |
|
|||
3521 | b'delta size (min/max/avg) : %d / %d / %d\n' |
|
|||
3522 | % tuple(deltasize) |
|
|||
3523 | ) |
|
|||
3524 |
|
||||
3525 | if numdeltas > 0: |
|
|||
3526 | ui.write(b'\n') |
|
|||
3527 | fmt = pcfmtstr(numdeltas) |
|
|||
3528 | fmt2 = pcfmtstr(numdeltas, 4) |
|
|||
3529 | ui.writenoi18n( |
|
|||
3530 | b'deltas against prev : ' + fmt % pcfmt(numprev, numdeltas) |
|
|||
3531 | ) |
|
|||
3532 | if numprev > 0: |
|
|||
3533 | ui.writenoi18n( |
|
|||
3534 | b' where prev = p1 : ' + fmt2 % pcfmt(nump1prev, numprev) |
|
|||
3535 | ) |
|
|||
3536 | ui.writenoi18n( |
|
|||
3537 | b' where prev = p2 : ' + fmt2 % pcfmt(nump2prev, numprev) |
|
|||
3538 | ) |
|
|||
3539 | ui.writenoi18n( |
|
|||
3540 | b' other : ' + fmt2 % pcfmt(numoprev, numprev) |
|
|||
3541 | ) |
|
|||
3542 | if gdelta: |
|
|||
3543 | ui.writenoi18n( |
|
|||
3544 | b'deltas against p1 : ' + fmt % pcfmt(nump1, numdeltas) |
|
|||
3545 | ) |
|
|||
3546 | ui.writenoi18n( |
|
|||
3547 | b'deltas against p2 : ' + fmt % pcfmt(nump2, numdeltas) |
|
|||
3548 | ) |
|
|||
3549 | ui.writenoi18n( |
|
|||
3550 | b'deltas against other : ' + fmt % pcfmt(numother, numdeltas) |
|
|||
3551 | ) |
|
|||
3552 |
|
3211 | |||
3553 |
|
3212 | |||
3554 | @command( |
|
3213 | @command( | |
@@ -3935,10 +3594,8 b' def debugssl(ui, repo, source=None, **op' | |||||
3935 | ) |
|
3594 | ) | |
3936 | source = b"default" |
|
3595 | source = b"default" | |
3937 |
|
3596 | |||
3938 |
|
|
3597 | path = urlutil.get_unique_pull_path_obj(b'debugssl', ui, source) | |
3939 | b'debugssl', repo, ui, source |
|
3598 | url = path.url | |
3940 | ) |
|
|||
3941 | url = urlutil.url(source) |
|
|||
3942 |
|
3599 | |||
3943 | defaultport = {b'https': 443, b'ssh': 22} |
|
3600 | defaultport = {b'https': 443, b'ssh': 22} | |
3944 | if url.scheme in defaultport: |
|
3601 | if url.scheme in defaultport: | |
@@ -4049,20 +3706,19 b' def debugbackupbundle(ui, repo, *pats, *' | |||||
4049 | for backup in backups: |
|
3706 | for backup in backups: | |
4050 | # Much of this is copied from the hg incoming logic |
|
3707 | # Much of this is copied from the hg incoming logic | |
4051 | source = os.path.relpath(backup, encoding.getcwd()) |
|
3708 | source = os.path.relpath(backup, encoding.getcwd()) | |
4052 |
|
|
3709 | path = urlutil.get_unique_pull_path_obj( | |
4053 | b'debugbackupbundle', |
|
3710 | b'debugbackupbundle', | |
4054 | repo, |
|
|||
4055 | ui, |
|
3711 | ui, | |
4056 | source, |
|
3712 | source, | |
4057 | default_branches=opts.get(b'branch'), |
|
|||
4058 | ) |
|
3713 | ) | |
4059 | try: |
|
3714 | try: | |
4060 |
other = hg.peer(repo, opts, |
|
3715 | other = hg.peer(repo, opts, path) | |
4061 | except error.LookupError as ex: |
|
3716 | except error.LookupError as ex: | |
4062 |
msg = _(b"\nwarning: unable to open bundle %s") % |
|
3717 | msg = _(b"\nwarning: unable to open bundle %s") % path.loc | |
4063 | hint = _(b"\n(missing parent rev %s)\n") % short(ex.name) |
|
3718 | hint = _(b"\n(missing parent rev %s)\n") % short(ex.name) | |
4064 | ui.warn(msg, hint=hint) |
|
3719 | ui.warn(msg, hint=hint) | |
4065 | continue |
|
3720 | continue | |
|
3721 | branches = (path.branch, opts.get(b'branch', [])) | |||
4066 | revs, checkout = hg.addbranchrevs( |
|
3722 | revs, checkout = hg.addbranchrevs( | |
4067 | repo, other, branches, opts.get(b"rev") |
|
3723 | repo, other, branches, opts.get(b"rev") | |
4068 | ) |
|
3724 | ) | |
@@ -4085,29 +3741,29 b' def debugbackupbundle(ui, repo, *pats, *' | |||||
4085 | with repo.lock(), repo.transaction(b"unbundle") as tr: |
|
3741 | with repo.lock(), repo.transaction(b"unbundle") as tr: | |
4086 | if scmutil.isrevsymbol(other, recovernode): |
|
3742 | if scmutil.isrevsymbol(other, recovernode): | |
4087 | ui.status(_(b"Unbundling %s\n") % (recovernode)) |
|
3743 | ui.status(_(b"Unbundling %s\n") % (recovernode)) | |
4088 |
f = hg.openpath(ui, |
|
3744 | f = hg.openpath(ui, path.loc) | |
4089 |
gen = exchange.readbundle(ui, f, |
|
3745 | gen = exchange.readbundle(ui, f, path.loc) | |
4090 | if isinstance(gen, bundle2.unbundle20): |
|
3746 | if isinstance(gen, bundle2.unbundle20): | |
4091 | bundle2.applybundle( |
|
3747 | bundle2.applybundle( | |
4092 | repo, |
|
3748 | repo, | |
4093 | gen, |
|
3749 | gen, | |
4094 | tr, |
|
3750 | tr, | |
4095 | source=b"unbundle", |
|
3751 | source=b"unbundle", | |
4096 |
url=b"bundle:" + |
|
3752 | url=b"bundle:" + path.loc, | |
4097 | ) |
|
3753 | ) | |
4098 | else: |
|
3754 | else: | |
4099 |
gen.apply(repo, b"unbundle", b"bundle:" + |
|
3755 | gen.apply(repo, b"unbundle", b"bundle:" + path.loc) | |
4100 | break |
|
3756 | break | |
4101 | else: |
|
3757 | else: | |
4102 | backupdate = encoding.strtolocal( |
|
3758 | backupdate = encoding.strtolocal( | |
4103 | time.strftime( |
|
3759 | time.strftime( | |
4104 | "%a %H:%M, %Y-%m-%d", |
|
3760 | "%a %H:%M, %Y-%m-%d", | |
4105 |
time.localtime(os.path.getmtime( |
|
3761 | time.localtime(os.path.getmtime(path.loc)), | |
4106 | ) |
|
3762 | ) | |
4107 | ) |
|
3763 | ) | |
4108 | ui.status(b"\n%s\n" % (backupdate.ljust(50))) |
|
3764 | ui.status(b"\n%s\n" % (backupdate.ljust(50))) | |
4109 | if ui.verbose: |
|
3765 | if ui.verbose: | |
4110 |
ui.status(b"%s%s\n" % (b"bundle:".ljust(13), |
|
3766 | ui.status(b"%s%s\n" % (b"bundle:".ljust(13), path.loc)) | |
4111 | else: |
|
3767 | else: | |
4112 | opts[ |
|
3768 | opts[ | |
4113 | b"template" |
|
3769 | b"template" | |
@@ -4134,8 +3790,21 b' def debugsub(ui, repo, rev=None):' | |||||
4134 | ui.writenoi18n(b' revision %s\n' % v[1]) |
|
3790 | ui.writenoi18n(b' revision %s\n' % v[1]) | |
4135 |
|
3791 | |||
4136 |
|
3792 | |||
4137 | @command(b'debugshell', optionalrepo=True) |
|
3793 | @command( | |
4138 | def debugshell(ui, repo): |
|
3794 | b'debugshell', | |
|
3795 | [ | |||
|
3796 | ( | |||
|
3797 | b'c', | |||
|
3798 | b'command', | |||
|
3799 | b'', | |||
|
3800 | _(b'program passed in as a string'), | |||
|
3801 | _(b'COMMAND'), | |||
|
3802 | ) | |||
|
3803 | ], | |||
|
3804 | _(b'[-c COMMAND]'), | |||
|
3805 | optionalrepo=True, | |||
|
3806 | ) | |||
|
3807 | def debugshell(ui, repo, **opts): | |||
4139 | """run an interactive Python interpreter |
|
3808 | """run an interactive Python interpreter | |
4140 |
|
3809 | |||
4141 | The local namespace is provided with a reference to the ui and |
|
3810 | The local namespace is provided with a reference to the ui and | |
@@ -4148,10 +3817,58 b' def debugshell(ui, repo):' | |||||
4148 | 'repo': repo, |
|
3817 | 'repo': repo, | |
4149 | } |
|
3818 | } | |
4150 |
|
3819 | |||
|
3820 | # py2exe disables initialization of the site module, which is responsible | |||
|
3821 | # for arranging for ``quit()`` to exit the interpreter. Manually initialize | |||
|
3822 | # the stuff that site normally does here, so that the interpreter can be | |||
|
3823 | # quit in a consistent manner, whether run with pyoxidizer, exewrapper.c, | |||
|
3824 | # py.exe, or py2exe. | |||
|
3825 | if getattr(sys, "frozen", None) == 'console_exe': | |||
|
3826 | try: | |||
|
3827 | import site | |||
|
3828 | ||||
|
3829 | site.setcopyright() | |||
|
3830 | site.sethelper() | |||
|
3831 | site.setquit() | |||
|
3832 | except ImportError: | |||
|
3833 | site = None # Keep PyCharm happy | |||
|
3834 | ||||
|
3835 | command = opts.get('command') | |||
|
3836 | if command: | |||
|
3837 | compiled = code.compile_command(encoding.strfromlocal(command)) | |||
|
3838 | code.InteractiveInterpreter(locals=imported_objects).runcode(compiled) | |||
|
3839 | return | |||
|
3840 | ||||
4151 | code.interact(local=imported_objects) |
|
3841 | code.interact(local=imported_objects) | |
4152 |
|
3842 | |||
4153 |
|
3843 | |||
4154 | @command( |
|
3844 | @command( | |
|
3845 | b'debug-revlog-stats', | |||
|
3846 | [ | |||
|
3847 | (b'c', b'changelog', None, _(b'Display changelog statistics')), | |||
|
3848 | (b'm', b'manifest', None, _(b'Display manifest statistics')), | |||
|
3849 | (b'f', b'filelogs', None, _(b'Display filelogs statistics')), | |||
|
3850 | ] | |||
|
3851 | + cmdutil.formatteropts, | |||
|
3852 | ) | |||
|
3853 | def debug_revlog_stats(ui, repo, **opts): | |||
|
3854 | """display statistics about revlogs in the store""" | |||
|
3855 | opts = pycompat.byteskwargs(opts) | |||
|
3856 | changelog = opts[b"changelog"] | |||
|
3857 | manifest = opts[b"manifest"] | |||
|
3858 | filelogs = opts[b"filelogs"] | |||
|
3859 | ||||
|
3860 | if changelog is None and manifest is None and filelogs is None: | |||
|
3861 | changelog = True | |||
|
3862 | manifest = True | |||
|
3863 | filelogs = True | |||
|
3864 | ||||
|
3865 | repo = repo.unfiltered() | |||
|
3866 | fm = ui.formatter(b'debug-revlog-stats', opts) | |||
|
3867 | revlog_debug.debug_revlog_stats(repo, fm, changelog, manifest, filelogs) | |||
|
3868 | fm.end() | |||
|
3869 | ||||
|
3870 | ||||
|
3871 | @command( | |||
4155 | b'debugsuccessorssets', |
|
3872 | b'debugsuccessorssets', | |
4156 | [(b'', b'closest', False, _(b'return closest successors sets only'))], |
|
3873 | [(b'', b'closest', False, _(b'return closest successors sets only'))], | |
4157 | _(b'[REV]'), |
|
3874 | _(b'[REV]'), | |
@@ -4843,7 +4560,8 b' def debugwireproto(ui, repo, path=None, ' | |||||
4843 | _(b'--peer %s not supported with HTTP peers') % opts[b'peer'] |
|
4560 | _(b'--peer %s not supported with HTTP peers') % opts[b'peer'] | |
4844 | ) |
|
4561 | ) | |
4845 | else: |
|
4562 | else: | |
4846 | peer = httppeer.makepeer(ui, path, opener=opener) |
|
4563 | peer_path = urlutil.try_path(ui, path) | |
|
4564 | peer = httppeer.makepeer(ui, peer_path, opener=opener) | |||
4847 |
|
4565 | |||
4848 | # We /could/ populate stdin/stdout with sock.makefile()... |
|
4566 | # We /could/ populate stdin/stdout with sock.makefile()... | |
4849 | else: |
|
4567 | else: |
@@ -120,7 +120,7 b' def difffeatureopts(' | |||||
120 | ) |
|
120 | ) | |
121 | buildopts[b'ignorewseol'] = get(b'ignore_space_at_eol', b'ignorewseol') |
|
121 | buildopts[b'ignorewseol'] = get(b'ignore_space_at_eol', b'ignorewseol') | |
122 | if formatchanging: |
|
122 | if formatchanging: | |
123 |
buildopts[b'text'] = opts |
|
123 | buildopts[b'text'] = None if opts is None else opts.get(b'text') | |
124 | binary = None if opts is None else opts.get(b'binary') |
|
124 | binary = None if opts is None else opts.get(b'binary') | |
125 | buildopts[b'nobinary'] = ( |
|
125 | buildopts[b'nobinary'] = ( | |
126 | not binary |
|
126 | not binary |
This diff has been collapsed as it changes many lines, (557 lines changed) Show them Hide them | |||||
@@ -31,7 +31,6 b' from . import (' | |||||
31 | ) |
|
31 | ) | |
32 |
|
32 | |||
33 | from .dirstateutils import ( |
|
33 | from .dirstateutils import ( | |
34 | docket as docketmod, |
|
|||
35 | timestamp, |
|
34 | timestamp, | |
36 | ) |
|
35 | ) | |
37 |
|
36 | |||
@@ -66,10 +65,17 b' class rootcache(filecache):' | |||||
66 | return obj._join(fname) |
|
65 | return obj._join(fname) | |
67 |
|
66 | |||
68 |
|
67 | |||
69 | def requires_parents_change(func): |
|
68 | def check_invalidated(func): | |
|
69 | """check that the func is called with a non-invalidated dirstate | |||
|
70 | ||||
|
71 | The dirstate is in an "invalidated state" after an error occured during its | |||
|
72 | modification and remains so until we exited the top level scope that framed | |||
|
73 | such change. | |||
|
74 | """ | |||
|
75 | ||||
70 | def wrap(self, *args, **kwargs): |
|
76 | def wrap(self, *args, **kwargs): | |
71 |
if |
|
77 | if self._invalidated_context: | |
72 |
msg = 'calling `%s` |
|
78 | msg = 'calling `%s` after the dirstate was invalidated' | |
73 | msg %= func.__name__ |
|
79 | msg %= func.__name__ | |
74 | raise error.ProgrammingError(msg) |
|
80 | raise error.ProgrammingError(msg) | |
75 | return func(self, *args, **kwargs) |
|
81 | return func(self, *args, **kwargs) | |
@@ -77,19 +83,63 b' def requires_parents_change(func):' | |||||
77 | return wrap |
|
83 | return wrap | |
78 |
|
84 | |||
79 |
|
85 | |||
80 |
def requires_ |
|
86 | def requires_changing_parents(func): | |
81 | def wrap(self, *args, **kwargs): |
|
87 | def wrap(self, *args, **kwargs): | |
82 |
if self. |
|
88 | if not self.is_changing_parents: | |
83 |
msg = 'calling `%s` |
|
89 | msg = 'calling `%s` outside of a changing_parents context' | |
|
90 | msg %= func.__name__ | |||
|
91 | raise error.ProgrammingError(msg) | |||
|
92 | return func(self, *args, **kwargs) | |||
|
93 | ||||
|
94 | return check_invalidated(wrap) | |||
|
95 | ||||
|
96 | ||||
|
97 | def requires_changing_files(func): | |||
|
98 | def wrap(self, *args, **kwargs): | |||
|
99 | if not self.is_changing_files: | |||
|
100 | msg = 'calling `%s` outside of a `changing_files`' | |||
84 | msg %= func.__name__ |
|
101 | msg %= func.__name__ | |
85 | raise error.ProgrammingError(msg) |
|
102 | raise error.ProgrammingError(msg) | |
86 | return func(self, *args, **kwargs) |
|
103 | return func(self, *args, **kwargs) | |
87 |
|
104 | |||
88 | return wrap |
|
105 | return check_invalidated(wrap) | |
|
106 | ||||
|
107 | ||||
|
108 | def requires_changing_any(func): | |||
|
109 | def wrap(self, *args, **kwargs): | |||
|
110 | if not self.is_changing_any: | |||
|
111 | msg = 'calling `%s` outside of a changing context' | |||
|
112 | msg %= func.__name__ | |||
|
113 | raise error.ProgrammingError(msg) | |||
|
114 | return func(self, *args, **kwargs) | |||
|
115 | ||||
|
116 | return check_invalidated(wrap) | |||
|
117 | ||||
|
118 | ||||
|
119 | def requires_changing_files_or_status(func): | |||
|
120 | def wrap(self, *args, **kwargs): | |||
|
121 | if not (self.is_changing_files or self._running_status > 0): | |||
|
122 | msg = ( | |||
|
123 | 'calling `%s` outside of a changing_files ' | |||
|
124 | 'or running_status context' | |||
|
125 | ) | |||
|
126 | msg %= func.__name__ | |||
|
127 | raise error.ProgrammingError(msg) | |||
|
128 | return func(self, *args, **kwargs) | |||
|
129 | ||||
|
130 | return check_invalidated(wrap) | |||
|
131 | ||||
|
132 | ||||
|
133 | CHANGE_TYPE_PARENTS = "parents" | |||
|
134 | CHANGE_TYPE_FILES = "files" | |||
89 |
|
135 | |||
90 |
|
136 | |||
91 | @interfaceutil.implementer(intdirstate.idirstate) |
|
137 | @interfaceutil.implementer(intdirstate.idirstate) | |
92 | class dirstate: |
|
138 | class dirstate: | |
|
139 | ||||
|
140 | # used by largefile to avoid overwritting transaction callback | |||
|
141 | _tr_key_suffix = b'' | |||
|
142 | ||||
93 | def __init__( |
|
143 | def __init__( | |
94 | self, |
|
144 | self, | |
95 | opener, |
|
145 | opener, | |
@@ -124,7 +174,16 b' class dirstate:' | |||||
124 | self._dirty_tracked_set = False |
|
174 | self._dirty_tracked_set = False | |
125 | self._ui = ui |
|
175 | self._ui = ui | |
126 | self._filecache = {} |
|
176 | self._filecache = {} | |
127 | self._parentwriters = 0 |
|
177 | # nesting level of `changing_parents` context | |
|
178 | self._changing_level = 0 | |||
|
179 | # the change currently underway | |||
|
180 | self._change_type = None | |||
|
181 | # number of open _running_status context | |||
|
182 | self._running_status = 0 | |||
|
183 | # True if the current dirstate changing operations have been | |||
|
184 | # invalidated (used to make sure all nested contexts have been exited) | |||
|
185 | self._invalidated_context = False | |||
|
186 | self._attached_to_a_transaction = False | |||
128 | self._filename = b'dirstate' |
|
187 | self._filename = b'dirstate' | |
129 | self._filename_th = b'dirstate-tracked-hint' |
|
188 | self._filename_th = b'dirstate-tracked-hint' | |
130 | self._pendingfilename = b'%s.pending' % self._filename |
|
189 | self._pendingfilename = b'%s.pending' % self._filename | |
@@ -136,6 +195,12 b' class dirstate:' | |||||
136 | # raises an exception). |
|
195 | # raises an exception). | |
137 | self._cwd |
|
196 | self._cwd | |
138 |
|
197 | |||
|
198 | def refresh(self): | |||
|
199 | if '_branch' in vars(self): | |||
|
200 | del self._branch | |||
|
201 | if '_map' in vars(self) and self._map.may_need_refresh(): | |||
|
202 | self.invalidate() | |||
|
203 | ||||
139 | def prefetch_parents(self): |
|
204 | def prefetch_parents(self): | |
140 | """make sure the parents are loaded |
|
205 | """make sure the parents are loaded | |
141 |
|
206 | |||
@@ -144,39 +209,193 b' class dirstate:' | |||||
144 | self._pl |
|
209 | self._pl | |
145 |
|
210 | |||
146 | @contextlib.contextmanager |
|
211 | @contextlib.contextmanager | |
147 | def parentchange(self): |
|
212 | @check_invalidated | |
148 | """Context manager for handling dirstate parents. |
|
213 | def running_status(self, repo): | |
|
214 | """Wrap a status operation | |||
|
215 | ||||
|
216 | This context is not mutally exclusive with the `changing_*` context. It | |||
|
217 | also do not warrant for the `wlock` to be taken. | |||
|
218 | ||||
|
219 | If the wlock is taken, this context will behave in a simple way, and | |||
|
220 | ensure the data are scheduled for write when leaving the top level | |||
|
221 | context. | |||
149 |
|
222 | |||
150 | If an exception occurs in the scope of the context manager, |
|
223 | If the lock is not taken, it will only warrant that the data are either | |
151 | the incoherent dirstate won't be written when wlock is |
|
224 | committed (written) and rolled back (invalidated) when exiting the top | |
152 | released. |
|
225 | level context. The write/invalidate action must be performed by the | |
|
226 | wrapped code. | |||
|
227 | ||||
|
228 | ||||
|
229 | The expected logic is: | |||
|
230 | ||||
|
231 | A: read the dirstate | |||
|
232 | B: run status | |||
|
233 | This might make the dirstate dirty by updating cache, | |||
|
234 | especially in Rust. | |||
|
235 | C: do more "post status fixup if relevant | |||
|
236 | D: try to take the w-lock (this will invalidate the changes if they were raced) | |||
|
237 | E0: if dirstate changed on disk → discard change (done by dirstate internal) | |||
|
238 | E1: elif lock was acquired → write the changes | |||
|
239 | E2: else → discard the changes | |||
153 | """ |
|
240 | """ | |
154 | self._parentwriters += 1 |
|
241 | has_lock = repo.currentwlock() is not None | |
155 | yield |
|
242 | is_changing = self.is_changing_any | |
156 | # Typically we want the "undo" step of a context manager in a |
|
243 | tr = repo.currenttransaction() | |
157 | # finally block so it happens even when an exception |
|
244 | has_tr = tr is not None | |
158 | # occurs. In this case, however, we only want to decrement |
|
245 | nested = bool(self._running_status) | |
159 | # parentwriters if the code in the with statement exits |
|
246 | ||
160 | # normally, so we don't have a try/finally here on purpose. |
|
247 | first_and_alone = not (is_changing or has_tr or nested) | |
161 | self._parentwriters -= 1 |
|
248 | ||
|
249 | # enforce no change happened outside of a proper context. | |||
|
250 | if first_and_alone and self._dirty: | |||
|
251 | has_tr = repo.currenttransaction() is not None | |||
|
252 | if not has_tr and self._changing_level == 0 and self._dirty: | |||
|
253 | msg = "entering a status context, but dirstate is already dirty" | |||
|
254 | raise error.ProgrammingError(msg) | |||
|
255 | ||||
|
256 | should_write = has_lock and not (nested or is_changing) | |||
|
257 | ||||
|
258 | self._running_status += 1 | |||
|
259 | try: | |||
|
260 | yield | |||
|
261 | except Exception: | |||
|
262 | self.invalidate() | |||
|
263 | raise | |||
|
264 | finally: | |||
|
265 | self._running_status -= 1 | |||
|
266 | if self._invalidated_context: | |||
|
267 | should_write = False | |||
|
268 | self.invalidate() | |||
|
269 | ||||
|
270 | if should_write: | |||
|
271 | assert repo.currenttransaction() is tr | |||
|
272 | self.write(tr) | |||
|
273 | elif not has_lock: | |||
|
274 | if self._dirty: | |||
|
275 | msg = b'dirstate dirty while exiting an isolated status context' | |||
|
276 | repo.ui.develwarn(msg) | |||
|
277 | self.invalidate() | |||
|
278 | ||||
|
279 | @contextlib.contextmanager | |||
|
280 | @check_invalidated | |||
|
281 | def _changing(self, repo, change_type): | |||
|
282 | if repo.currentwlock() is None: | |||
|
283 | msg = b"trying to change the dirstate without holding the wlock" | |||
|
284 | raise error.ProgrammingError(msg) | |||
|
285 | ||||
|
286 | has_tr = repo.currenttransaction() is not None | |||
|
287 | if not has_tr and self._changing_level == 0 and self._dirty: | |||
|
288 | msg = b"entering a changing context, but dirstate is already dirty" | |||
|
289 | repo.ui.develwarn(msg) | |||
|
290 | ||||
|
291 | assert self._changing_level >= 0 | |||
|
292 | # different type of change are mutually exclusive | |||
|
293 | if self._change_type is None: | |||
|
294 | assert self._changing_level == 0 | |||
|
295 | self._change_type = change_type | |||
|
296 | elif self._change_type != change_type: | |||
|
297 | msg = ( | |||
|
298 | 'trying to open "%s" dirstate-changing context while a "%s" is' | |||
|
299 | ' already open' | |||
|
300 | ) | |||
|
301 | msg %= (change_type, self._change_type) | |||
|
302 | raise error.ProgrammingError(msg) | |||
|
303 | should_write = False | |||
|
304 | self._changing_level += 1 | |||
|
305 | try: | |||
|
306 | yield | |||
|
307 | except: # re-raises | |||
|
308 | self.invalidate() # this will set `_invalidated_context` | |||
|
309 | raise | |||
|
310 | finally: | |||
|
311 | assert self._changing_level > 0 | |||
|
312 | self._changing_level -= 1 | |||
|
313 | # If the dirstate is being invalidated, call invalidate again. | |||
|
314 | # This will throw away anything added by a upper context and | |||
|
315 | # reset the `_invalidated_context` flag when relevant | |||
|
316 | if self._changing_level <= 0: | |||
|
317 | self._change_type = None | |||
|
318 | assert self._changing_level == 0 | |||
|
319 | if self._invalidated_context: | |||
|
320 | # make sure we invalidate anything an upper context might | |||
|
321 | # have changed. | |||
|
322 | self.invalidate() | |||
|
323 | else: | |||
|
324 | should_write = self._changing_level <= 0 | |||
|
325 | tr = repo.currenttransaction() | |||
|
326 | if has_tr != (tr is not None): | |||
|
327 | if has_tr: | |||
|
328 | m = "transaction vanished while changing dirstate" | |||
|
329 | else: | |||
|
330 | m = "transaction appeared while changing dirstate" | |||
|
331 | raise error.ProgrammingError(m) | |||
|
332 | if should_write: | |||
|
333 | self.write(tr) | |||
|
334 | ||||
|
335 | @contextlib.contextmanager | |||
|
336 | def changing_parents(self, repo): | |||
|
337 | with self._changing(repo, CHANGE_TYPE_PARENTS) as c: | |||
|
338 | yield c | |||
|
339 | ||||
|
340 | @contextlib.contextmanager | |||
|
341 | def changing_files(self, repo): | |||
|
342 | with self._changing(repo, CHANGE_TYPE_FILES) as c: | |||
|
343 | yield c | |||
|
344 | ||||
|
345 | # here to help migration to the new code | |||
|
346 | def parentchange(self): | |||
|
347 | msg = ( | |||
|
348 | "Mercurial 6.4 and later requires call to " | |||
|
349 | "`dirstate.changing_parents(repo)`" | |||
|
350 | ) | |||
|
351 | raise error.ProgrammingError(msg) | |||
|
352 | ||||
|
353 | @property | |||
|
354 | def is_changing_any(self): | |||
|
355 | """Returns true if the dirstate is in the middle of a set of changes. | |||
|
356 | ||||
|
357 | This returns True for any kind of change. | |||
|
358 | """ | |||
|
359 | return self._changing_level > 0 | |||
162 |
|
360 | |||
163 | def pendingparentchange(self): |
|
361 | def pendingparentchange(self): | |
|
362 | return self.is_changing_parent() | |||
|
363 | ||||
|
364 | def is_changing_parent(self): | |||
164 | """Returns true if the dirstate is in the middle of a set of changes |
|
365 | """Returns true if the dirstate is in the middle of a set of changes | |
165 | that modify the dirstate parent. |
|
366 | that modify the dirstate parent. | |
166 | """ |
|
367 | """ | |
167 | return self._parentwriters > 0 |
|
368 | self._ui.deprecwarn(b"dirstate.is_changing_parents", b"6.5") | |
|
369 | return self.is_changing_parents | |||
|
370 | ||||
|
371 | @property | |||
|
372 | def is_changing_parents(self): | |||
|
373 | """Returns true if the dirstate is in the middle of a set of changes | |||
|
374 | that modify the dirstate parent. | |||
|
375 | """ | |||
|
376 | if self._changing_level <= 0: | |||
|
377 | return False | |||
|
378 | return self._change_type == CHANGE_TYPE_PARENTS | |||
|
379 | ||||
|
380 | @property | |||
|
381 | def is_changing_files(self): | |||
|
382 | """Returns true if the dirstate is in the middle of a set of changes | |||
|
383 | that modify the files tracked or their sources. | |||
|
384 | """ | |||
|
385 | if self._changing_level <= 0: | |||
|
386 | return False | |||
|
387 | return self._change_type == CHANGE_TYPE_FILES | |||
168 |
|
388 | |||
169 | @propertycache |
|
389 | @propertycache | |
170 | def _map(self): |
|
390 | def _map(self): | |
171 | """Return the dirstate contents (see documentation for dirstatemap).""" |
|
391 | """Return the dirstate contents (see documentation for dirstatemap).""" | |
172 |
|
|
392 | return self._mapcls( | |
173 | self._ui, |
|
393 | self._ui, | |
174 | self._opener, |
|
394 | self._opener, | |
175 | self._root, |
|
395 | self._root, | |
176 | self._nodeconstants, |
|
396 | self._nodeconstants, | |
177 | self._use_dirstate_v2, |
|
397 | self._use_dirstate_v2, | |
178 | ) |
|
398 | ) | |
179 | return self._map |
|
|||
180 |
|
399 | |||
181 | @property |
|
400 | @property | |
182 | def _sparsematcher(self): |
|
401 | def _sparsematcher(self): | |
@@ -365,6 +584,7 b' class dirstate:' | |||||
365 | def branch(self): |
|
584 | def branch(self): | |
366 | return encoding.tolocal(self._branch) |
|
585 | return encoding.tolocal(self._branch) | |
367 |
|
586 | |||
|
587 | @requires_changing_parents | |||
368 | def setparents(self, p1, p2=None): |
|
588 | def setparents(self, p1, p2=None): | |
369 | """Set dirstate parents to p1 and p2. |
|
589 | """Set dirstate parents to p1 and p2. | |
370 |
|
590 | |||
@@ -376,10 +596,10 b' class dirstate:' | |||||
376 | """ |
|
596 | """ | |
377 | if p2 is None: |
|
597 | if p2 is None: | |
378 | p2 = self._nodeconstants.nullid |
|
598 | p2 = self._nodeconstants.nullid | |
379 |
if self._ |
|
599 | if self._changing_level == 0: | |
380 | raise ValueError( |
|
600 | raise ValueError( | |
381 | b"cannot set dirstate parent outside of " |
|
601 | b"cannot set dirstate parent outside of " | |
382 |
b"dirstate. |
|
602 | b"dirstate.changing_parents context manager" | |
383 | ) |
|
603 | ) | |
384 |
|
604 | |||
385 | self._dirty = True |
|
605 | self._dirty = True | |
@@ -419,9 +639,14 b' class dirstate:' | |||||
419 | delattr(self, a) |
|
639 | delattr(self, a) | |
420 | self._dirty = False |
|
640 | self._dirty = False | |
421 | self._dirty_tracked_set = False |
|
641 | self._dirty_tracked_set = False | |
422 |
self._ |
|
642 | self._invalidated_context = bool( | |
|
643 | self._changing_level > 0 | |||
|
644 | or self._attached_to_a_transaction | |||
|
645 | or self._running_status | |||
|
646 | ) | |||
423 | self._origpl = None |
|
647 | self._origpl = None | |
424 |
|
648 | |||
|
649 | @requires_changing_any | |||
425 | def copy(self, source, dest): |
|
650 | def copy(self, source, dest): | |
426 | """Mark dest as a copy of source. Unmark dest if source is None.""" |
|
651 | """Mark dest as a copy of source. Unmark dest if source is None.""" | |
427 | if source == dest: |
|
652 | if source == dest: | |
@@ -439,7 +664,7 b' class dirstate:' | |||||
439 | def copies(self): |
|
664 | def copies(self): | |
440 | return self._map.copymap |
|
665 | return self._map.copymap | |
441 |
|
666 | |||
442 |
@requires_ |
|
667 | @requires_changing_files | |
443 | def set_tracked(self, filename, reset_copy=False): |
|
668 | def set_tracked(self, filename, reset_copy=False): | |
444 | """a "public" method for generic code to mark a file as tracked |
|
669 | """a "public" method for generic code to mark a file as tracked | |
445 |
|
670 | |||
@@ -461,7 +686,7 b' class dirstate:' | |||||
461 | self._dirty_tracked_set = True |
|
686 | self._dirty_tracked_set = True | |
462 | return pre_tracked |
|
687 | return pre_tracked | |
463 |
|
688 | |||
464 |
@requires_ |
|
689 | @requires_changing_files | |
465 | def set_untracked(self, filename): |
|
690 | def set_untracked(self, filename): | |
466 | """a "public" method for generic code to mark a file as untracked |
|
691 | """a "public" method for generic code to mark a file as untracked | |
467 |
|
692 | |||
@@ -476,7 +701,7 b' class dirstate:' | |||||
476 | self._dirty_tracked_set = True |
|
701 | self._dirty_tracked_set = True | |
477 | return ret |
|
702 | return ret | |
478 |
|
703 | |||
479 | @requires_no_parents_change |
|
704 | @requires_changing_files_or_status | |
480 | def set_clean(self, filename, parentfiledata): |
|
705 | def set_clean(self, filename, parentfiledata): | |
481 | """record that the current state of the file on disk is known to be clean""" |
|
706 | """record that the current state of the file on disk is known to be clean""" | |
482 | self._dirty = True |
|
707 | self._dirty = True | |
@@ -485,13 +710,13 b' class dirstate:' | |||||
485 | (mode, size, mtime) = parentfiledata |
|
710 | (mode, size, mtime) = parentfiledata | |
486 | self._map.set_clean(filename, mode, size, mtime) |
|
711 | self._map.set_clean(filename, mode, size, mtime) | |
487 |
|
712 | |||
488 | @requires_no_parents_change |
|
713 | @requires_changing_files_or_status | |
489 | def set_possibly_dirty(self, filename): |
|
714 | def set_possibly_dirty(self, filename): | |
490 | """record that the current state of the file on disk is unknown""" |
|
715 | """record that the current state of the file on disk is unknown""" | |
491 | self._dirty = True |
|
716 | self._dirty = True | |
492 | self._map.set_possibly_dirty(filename) |
|
717 | self._map.set_possibly_dirty(filename) | |
493 |
|
718 | |||
494 |
@requires_parents |
|
719 | @requires_changing_parents | |
495 | def update_file_p1( |
|
720 | def update_file_p1( | |
496 | self, |
|
721 | self, | |
497 | filename, |
|
722 | filename, | |
@@ -503,7 +728,7 b' class dirstate:' | |||||
503 | rewriting operation. |
|
728 | rewriting operation. | |
504 |
|
729 | |||
505 | It should not be called during a merge (p2 != nullid) and only within |
|
730 | It should not be called during a merge (p2 != nullid) and only within | |
506 |
a `with dirstate. |
|
731 | a `with dirstate.changing_parents(repo):` context. | |
507 | """ |
|
732 | """ | |
508 | if self.in_merge: |
|
733 | if self.in_merge: | |
509 | msg = b'update_file_reference should not be called when merging' |
|
734 | msg = b'update_file_reference should not be called when merging' | |
@@ -531,7 +756,7 b' class dirstate:' | |||||
531 | has_meaningful_mtime=False, |
|
756 | has_meaningful_mtime=False, | |
532 | ) |
|
757 | ) | |
533 |
|
758 | |||
534 |
@requires_parents |
|
759 | @requires_changing_parents | |
535 | def update_file( |
|
760 | def update_file( | |
536 | self, |
|
761 | self, | |
537 | filename, |
|
762 | filename, | |
@@ -546,12 +771,57 b' class dirstate:' | |||||
546 | This is to be called when the direstates parent changes to keep track |
|
771 | This is to be called when the direstates parent changes to keep track | |
547 | of what is the file situation in regards to the working copy and its parent. |
|
772 | of what is the file situation in regards to the working copy and its parent. | |
548 |
|
773 | |||
549 |
This function must be called within a `dirstate. |
|
774 | This function must be called within a `dirstate.changing_parents` context. | |
550 |
|
775 | |||
551 | note: the API is at an early stage and we might need to adjust it |
|
776 | note: the API is at an early stage and we might need to adjust it | |
552 | depending of what information ends up being relevant and useful to |
|
777 | depending of what information ends up being relevant and useful to | |
553 | other processing. |
|
778 | other processing. | |
554 | """ |
|
779 | """ | |
|
780 | self._update_file( | |||
|
781 | filename=filename, | |||
|
782 | wc_tracked=wc_tracked, | |||
|
783 | p1_tracked=p1_tracked, | |||
|
784 | p2_info=p2_info, | |||
|
785 | possibly_dirty=possibly_dirty, | |||
|
786 | parentfiledata=parentfiledata, | |||
|
787 | ) | |||
|
788 | ||||
|
789 | def hacky_extension_update_file(self, *args, **kwargs): | |||
|
790 | """NEVER USE THIS, YOU DO NOT NEED IT | |||
|
791 | ||||
|
792 | This function is a variant of "update_file" to be called by a small set | |||
|
793 | of extensions, it also adjust the internal state of file, but can be | |||
|
794 | called outside an `changing_parents` context. | |||
|
795 | ||||
|
796 | A very small number of extension meddle with the working copy content | |||
|
797 | in a way that requires to adjust the dirstate accordingly. At the time | |||
|
798 | this command is written they are : | |||
|
799 | - keyword, | |||
|
800 | - largefile, | |||
|
801 | PLEASE DO NOT GROW THIS LIST ANY FURTHER. | |||
|
802 | ||||
|
803 | This function could probably be replaced by more semantic one (like | |||
|
804 | "adjust expected size" or "always revalidate file content", etc) | |||
|
805 | however at the time where this is writen, this is too much of a detour | |||
|
806 | to be considered. | |||
|
807 | """ | |||
|
808 | if not (self._changing_level > 0 or self._running_status > 0): | |||
|
809 | msg = "requires a changes context" | |||
|
810 | raise error.ProgrammingError(msg) | |||
|
811 | self._update_file( | |||
|
812 | *args, | |||
|
813 | **kwargs, | |||
|
814 | ) | |||
|
815 | ||||
|
816 | def _update_file( | |||
|
817 | self, | |||
|
818 | filename, | |||
|
819 | wc_tracked, | |||
|
820 | p1_tracked, | |||
|
821 | p2_info=False, | |||
|
822 | possibly_dirty=False, | |||
|
823 | parentfiledata=None, | |||
|
824 | ): | |||
555 |
|
825 | |||
556 | # note: I do not think we need to double check name clash here since we |
|
826 | # note: I do not think we need to double check name clash here since we | |
557 | # are in a update/merge case that should already have taken care of |
|
827 | # are in a update/merge case that should already have taken care of | |
@@ -680,12 +950,16 b' class dirstate:' | |||||
680 | return self._normalize(path, isknown, ignoremissing) |
|
950 | return self._normalize(path, isknown, ignoremissing) | |
681 | return path |
|
951 | return path | |
682 |
|
952 | |||
|
953 | # XXX this method is barely used, as a result: | |||
|
954 | # - its semantic is unclear | |||
|
955 | # - do we really needs it ? | |||
|
956 | @requires_changing_parents | |||
683 | def clear(self): |
|
957 | def clear(self): | |
684 | self._map.clear() |
|
958 | self._map.clear() | |
685 | self._dirty = True |
|
959 | self._dirty = True | |
686 |
|
960 | |||
|
961 | @requires_changing_parents | |||
687 | def rebuild(self, parent, allfiles, changedfiles=None): |
|
962 | def rebuild(self, parent, allfiles, changedfiles=None): | |
688 |
|
||||
689 | matcher = self._sparsematcher |
|
963 | matcher = self._sparsematcher | |
690 | if matcher is not None and not matcher.always(): |
|
964 | if matcher is not None and not matcher.always(): | |
691 | # should not add non-matching files |
|
965 | # should not add non-matching files | |
@@ -724,7 +998,6 b' class dirstate:' | |||||
724 | self._map.setparents(parent, self._nodeconstants.nullid) |
|
998 | self._map.setparents(parent, self._nodeconstants.nullid) | |
725 |
|
999 | |||
726 | for f in to_lookup: |
|
1000 | for f in to_lookup: | |
727 |
|
||||
728 | if self.in_merge: |
|
1001 | if self.in_merge: | |
729 | self.set_tracked(f) |
|
1002 | self.set_tracked(f) | |
730 | else: |
|
1003 | else: | |
@@ -749,20 +1022,41 b' class dirstate:' | |||||
749 | def write(self, tr): |
|
1022 | def write(self, tr): | |
750 | if not self._dirty: |
|
1023 | if not self._dirty: | |
751 | return |
|
1024 | return | |
|
1025 | # make sure we don't request a write of invalidated content | |||
|
1026 | # XXX move before the dirty check once `unlock` stop calling `write` | |||
|
1027 | assert not self._invalidated_context | |||
752 |
|
1028 | |||
753 | write_key = self._use_tracked_hint and self._dirty_tracked_set |
|
1029 | write_key = self._use_tracked_hint and self._dirty_tracked_set | |
754 | if tr: |
|
1030 | if tr: | |
|
1031 | ||||
|
1032 | def on_abort(tr): | |||
|
1033 | self._attached_to_a_transaction = False | |||
|
1034 | self.invalidate() | |||
|
1035 | ||||
|
1036 | # make sure we invalidate the current change on abort | |||
|
1037 | if tr is not None: | |||
|
1038 | tr.addabort( | |||
|
1039 | b'dirstate-invalidate%s' % self._tr_key_suffix, | |||
|
1040 | on_abort, | |||
|
1041 | ) | |||
|
1042 | ||||
|
1043 | self._attached_to_a_transaction = True | |||
|
1044 | ||||
|
1045 | def on_success(f): | |||
|
1046 | self._attached_to_a_transaction = False | |||
|
1047 | self._writedirstate(tr, f), | |||
|
1048 | ||||
755 | # delay writing in-memory changes out |
|
1049 | # delay writing in-memory changes out | |
756 | tr.addfilegenerator( |
|
1050 | tr.addfilegenerator( | |
757 | b'dirstate-1-main', |
|
1051 | b'dirstate-1-main%s' % self._tr_key_suffix, | |
758 | (self._filename,), |
|
1052 | (self._filename,), | |
759 | lambda f: self._writedirstate(tr, f), |
|
1053 | on_success, | |
760 | location=b'plain', |
|
1054 | location=b'plain', | |
761 | post_finalize=True, |
|
1055 | post_finalize=True, | |
762 | ) |
|
1056 | ) | |
763 | if write_key: |
|
1057 | if write_key: | |
764 | tr.addfilegenerator( |
|
1058 | tr.addfilegenerator( | |
765 | b'dirstate-2-key-post', |
|
1059 | b'dirstate-2-key-post%s' % self._tr_key_suffix, | |
766 | (self._filename_th,), |
|
1060 | (self._filename_th,), | |
767 | lambda f: self._write_tracked_hint(tr, f), |
|
1061 | lambda f: self._write_tracked_hint(tr, f), | |
768 | location=b'plain', |
|
1062 | location=b'plain', | |
@@ -798,6 +1092,8 b' class dirstate:' | |||||
798 | self._plchangecallbacks[category] = callback |
|
1092 | self._plchangecallbacks[category] = callback | |
799 |
|
1093 | |||
800 | def _writedirstate(self, tr, st): |
|
1094 | def _writedirstate(self, tr, st): | |
|
1095 | # make sure we don't write invalidated content | |||
|
1096 | assert not self._invalidated_context | |||
801 | # notify callbacks about parents change |
|
1097 | # notify callbacks about parents change | |
802 | if self._origpl is not None and self._origpl != self._pl: |
|
1098 | if self._origpl is not None and self._origpl != self._pl: | |
803 | for c, callback in sorted(self._plchangecallbacks.items()): |
|
1099 | for c, callback in sorted(self._plchangecallbacks.items()): | |
@@ -936,7 +1232,8 b' class dirstate:' | |||||
936 | badfn(ff, badtype(kind)) |
|
1232 | badfn(ff, badtype(kind)) | |
937 | if nf in dmap: |
|
1233 | if nf in dmap: | |
938 | results[nf] = None |
|
1234 | results[nf] = None | |
939 | except OSError as inst: # nf not found on disk - it is dirstate only |
|
1235 | except (OSError) as inst: | |
|
1236 | # nf not found on disk - it is dirstate only | |||
940 | if nf in dmap: # does it exactly match a missing file? |
|
1237 | if nf in dmap: # does it exactly match a missing file? | |
941 | results[nf] = None |
|
1238 | results[nf] = None | |
942 | else: # does it match a missing directory? |
|
1239 | else: # does it match a missing directory? | |
@@ -1246,7 +1543,7 b' class dirstate:' | |||||
1246 | ) |
|
1543 | ) | |
1247 | ) |
|
1544 | ) | |
1248 |
|
1545 | |||
1249 |
for |
|
1546 | for fn, message in bad: | |
1250 | matcher.bad(fn, encoding.strtolocal(message)) |
|
1547 | matcher.bad(fn, encoding.strtolocal(message)) | |
1251 |
|
1548 | |||
1252 | status = scmutil.status( |
|
1549 | status = scmutil.status( | |
@@ -1276,6 +1573,9 b' class dirstate:' | |||||
1276 | files that have definitely not been modified since the |
|
1573 | files that have definitely not been modified since the | |
1277 | dirstate was written |
|
1574 | dirstate was written | |
1278 | """ |
|
1575 | """ | |
|
1576 | if not self._running_status: | |||
|
1577 | msg = "Calling `status` outside a `running_status` context" | |||
|
1578 | raise error.ProgrammingError(msg) | |||
1279 | listignored, listclean, listunknown = ignored, clean, unknown |
|
1579 | listignored, listclean, listunknown = ignored, clean, unknown | |
1280 | lookup, modified, added, unknown, ignored = [], [], [], [], [] |
|
1580 | lookup, modified, added, unknown, ignored = [], [], [], [], [] | |
1281 | removed, deleted, clean = [], [], [] |
|
1581 | removed, deleted, clean = [], [], [] | |
@@ -1435,142 +1735,47 b' class dirstate:' | |||||
1435 | else: |
|
1735 | else: | |
1436 | return self._filename |
|
1736 | return self._filename | |
1437 |
|
1737 | |||
1438 |
def |
|
1738 | def all_file_names(self): | |
1439 | if not self._use_dirstate_v2: |
|
1739 | """list all filename currently used by this dirstate | |
1440 | return None |
|
|||
1441 | return backupname + b'.v2-data' |
|
|||
1442 |
|
||||
1443 | def _new_backup_data_filename(self, backupname): |
|
|||
1444 | """return a filename to backup a data-file or None""" |
|
|||
1445 | if not self._use_dirstate_v2: |
|
|||
1446 | return None |
|
|||
1447 | if self._map.docket.uuid is None: |
|
|||
1448 | # not created yet, nothing to backup |
|
|||
1449 | return None |
|
|||
1450 | data_filename = self._map.docket.data_filename() |
|
|||
1451 | return data_filename, self.data_backup_filename(backupname) |
|
|||
1452 |
|
||||
1453 | def backup_data_file(self, backupname): |
|
|||
1454 | if not self._use_dirstate_v2: |
|
|||
1455 | return None |
|
|||
1456 | docket = docketmod.DirstateDocket.parse( |
|
|||
1457 | self._opener.read(backupname), |
|
|||
1458 | self._nodeconstants, |
|
|||
1459 | ) |
|
|||
1460 | return self.data_backup_filename(backupname), docket.data_filename() |
|
|||
1461 |
|
||||
1462 | def savebackup(self, tr, backupname): |
|
|||
1463 | '''Save current dirstate into backup file''' |
|
|||
1464 | filename = self._actualfilename(tr) |
|
|||
1465 | assert backupname != filename |
|
|||
1466 |
|
|
1740 | ||
1467 | # use '_writedirstate' instead of 'write' to write changes certainly, |
|
1741 | This is only used to do `hg rollback` related backup in the transaction | |
1468 | # because the latter omits writing out if transaction is running. |
|
1742 | """ | |
1469 | # output file will be used to create backup of dirstate at this point. |
|
1743 | if not self._opener.exists(self._filename): | |
1470 | if self._dirty or not self._opener.exists(filename): |
|
1744 | # no data every written to disk yet | |
1471 | self._writedirstate( |
|
1745 | return () | |
1472 | tr, |
|
1746 | elif self._use_dirstate_v2: | |
1473 | self._opener(filename, b"w", atomictemp=True, checkambig=True), |
|
1747 | return ( | |
|
1748 | self._filename, | |||
|
1749 | self._map.docket.data_filename(), | |||
1474 | ) |
|
1750 | ) | |
|
1751 | else: | |||
|
1752 | return (self._filename,) | |||
1475 |
|
1753 | |||
1476 | if tr: |
|
1754 | def verify(self, m1, m2, p1, narrow_matcher=None): | |
1477 | # ensure that subsequent tr.writepending returns True for |
|
1755 | """ | |
1478 | # changes written out above, even if dirstate is never |
|
1756 | check the dirstate contents against the parent manifest and yield errors | |
1479 | # changed after this |
|
1757 | """ | |
1480 | tr.addfilegenerator( |
|
1758 | missing_from_p1 = _( | |
1481 | b'dirstate-1-main', |
|
1759 | b"%s marked as tracked in p1 (%s) but not in manifest1\n" | |
1482 | (self._filename,), |
|
|||
1483 | lambda f: self._writedirstate(tr, f), |
|
|||
1484 | location=b'plain', |
|
|||
1485 | post_finalize=True, |
|
|||
1486 | ) |
|
|||
1487 |
|
||||
1488 | # ensure that pending file written above is unlinked at |
|
|||
1489 | # failure, even if tr.writepending isn't invoked until the |
|
|||
1490 | # end of this transaction |
|
|||
1491 | tr.registertmp(filename, location=b'plain') |
|
|||
1492 |
|
||||
1493 | self._opener.tryunlink(backupname) |
|
|||
1494 | # hardlink backup is okay because _writedirstate is always called |
|
|||
1495 | # with an "atomictemp=True" file. |
|
|||
1496 | util.copyfile( |
|
|||
1497 | self._opener.join(filename), |
|
|||
1498 | self._opener.join(backupname), |
|
|||
1499 | hardlink=True, |
|
|||
1500 | ) |
|
1760 | ) | |
1501 | data_pair = self._new_backup_data_filename(backupname) |
|
1761 | unexpected_in_p1 = _(b"%s marked as added, but also in manifest1\n") | |
1502 | if data_pair is not None: |
|
1762 | missing_from_ps = _( | |
1503 | data_filename, bck_data_filename = data_pair |
|
1763 | b"%s marked as modified, but not in either manifest\n" | |
1504 | util.copyfile( |
|
1764 | ) | |
1505 | self._opener.join(data_filename), |
|
1765 | missing_from_ds = _( | |
1506 | self._opener.join(bck_data_filename), |
|
1766 | b"%s in manifest1, but not marked as tracked in p1 (%s)\n" | |
1507 | hardlink=True, |
|
1767 | ) | |
1508 | ) |
|
|||
1509 | if tr is not None: |
|
|||
1510 | # ensure that pending file written above is unlinked at |
|
|||
1511 | # failure, even if tr.writepending isn't invoked until the |
|
|||
1512 | # end of this transaction |
|
|||
1513 | tr.registertmp(bck_data_filename, location=b'plain') |
|
|||
1514 |
|
||||
1515 | def restorebackup(self, tr, backupname): |
|
|||
1516 | '''Restore dirstate by backup file''' |
|
|||
1517 | # this "invalidate()" prevents "wlock.release()" from writing |
|
|||
1518 | # changes of dirstate out after restoring from backup file |
|
|||
1519 | self.invalidate() |
|
|||
1520 | o = self._opener |
|
|||
1521 | if not o.exists(backupname): |
|
|||
1522 | # there was no file backup, delete existing files |
|
|||
1523 | filename = self._actualfilename(tr) |
|
|||
1524 | data_file = None |
|
|||
1525 | if self._use_dirstate_v2 and self._map.docket.uuid is not None: |
|
|||
1526 | data_file = self._map.docket.data_filename() |
|
|||
1527 | if o.exists(filename): |
|
|||
1528 | o.unlink(filename) |
|
|||
1529 | if data_file is not None and o.exists(data_file): |
|
|||
1530 | o.unlink(data_file) |
|
|||
1531 | return |
|
|||
1532 | filename = self._actualfilename(tr) |
|
|||
1533 | data_pair = self.backup_data_file(backupname) |
|
|||
1534 | if o.exists(filename) and util.samefile( |
|
|||
1535 | o.join(backupname), o.join(filename) |
|
|||
1536 | ): |
|
|||
1537 | o.unlink(backupname) |
|
|||
1538 | else: |
|
|||
1539 | o.rename(backupname, filename, checkambig=True) |
|
|||
1540 |
|
||||
1541 | if data_pair is not None: |
|
|||
1542 | data_backup, target = data_pair |
|
|||
1543 | if o.exists(target) and util.samefile( |
|
|||
1544 | o.join(data_backup), o.join(target) |
|
|||
1545 | ): |
|
|||
1546 | o.unlink(data_backup) |
|
|||
1547 | else: |
|
|||
1548 | o.rename(data_backup, target, checkambig=True) |
|
|||
1549 |
|
||||
1550 | def clearbackup(self, tr, backupname): |
|
|||
1551 | '''Clear backup file''' |
|
|||
1552 | o = self._opener |
|
|||
1553 | if o.exists(backupname): |
|
|||
1554 | data_backup = self.backup_data_file(backupname) |
|
|||
1555 | o.unlink(backupname) |
|
|||
1556 | if data_backup is not None: |
|
|||
1557 | o.unlink(data_backup[0]) |
|
|||
1558 |
|
||||
1559 | def verify(self, m1, m2): |
|
|||
1560 | """check the dirstate content again the parent manifest and yield errors""" |
|
|||
1561 | missing_from_p1 = b"%s in state %s, but not in manifest1\n" |
|
|||
1562 | unexpected_in_p1 = b"%s in state %s, but also in manifest1\n" |
|
|||
1563 | missing_from_ps = b"%s in state %s, but not in either manifest\n" |
|
|||
1564 | missing_from_ds = b"%s in manifest1, but listed as state %s\n" |
|
|||
1565 | for f, entry in self.items(): |
|
1768 | for f, entry in self.items(): | |
1566 |
|
|
1769 | if entry.p1_tracked: | |
1567 |
if |
|
1770 | if entry.modified and f not in m1 and f not in m2: | |
1568 |
yield |
|
1771 | yield missing_from_ps % f | |
1569 |
|
|
1772 | elif f not in m1: | |
1570 |
yield ( |
|
1773 | yield missing_from_p1 % (f, node.short(p1)) | |
1571 |
if |
|
1774 | if entry.added and f in m1: | |
1572 |
yield |
|
1775 | yield unexpected_in_p1 % f | |
1573 | for f in m1: |
|
1776 | for f in m1: | |
1574 | state = self.get_entry(f).state |
|
1777 | if narrow_matcher is not None and not narrow_matcher(f): | |
1575 | if state not in b"nrm": |
|
1778 | continue | |
1576 | yield (missing_from_ds, f, state) |
|
1779 | entry = self.get_entry(f) | |
|
1780 | if not entry.p1_tracked: | |||
|
1781 | yield missing_from_ds % (f, node.short(p1)) |
@@ -58,6 +58,34 b' class _dirstatemapcommon:' | |||||
58 | # for consistent view between _pl() and _read() invocations |
|
58 | # for consistent view between _pl() and _read() invocations | |
59 | self._pendingmode = None |
|
59 | self._pendingmode = None | |
60 |
|
60 | |||
|
61 | def _set_identity(self): | |||
|
62 | self.identity = self._get_current_identity() | |||
|
63 | ||||
|
64 | def _get_current_identity(self): | |||
|
65 | try: | |||
|
66 | return util.cachestat(self._opener.join(self._filename)) | |||
|
67 | except FileNotFoundError: | |||
|
68 | return None | |||
|
69 | ||||
|
70 | def may_need_refresh(self): | |||
|
71 | if 'identity' not in vars(self): | |||
|
72 | # no existing identity, we need a refresh | |||
|
73 | return True | |||
|
74 | if self.identity is None: | |||
|
75 | return True | |||
|
76 | if not self.identity.cacheable(): | |||
|
77 | # We cannot trust the entry | |||
|
78 | # XXX this is a problem on windows, NFS, or other inode less system | |||
|
79 | return True | |||
|
80 | current_identity = self._get_current_identity() | |||
|
81 | if current_identity is None: | |||
|
82 | return True | |||
|
83 | if not current_identity.cacheable(): | |||
|
84 | # We cannot trust the entry | |||
|
85 | # XXX this is a problem on windows, NFS, or other inode less system | |||
|
86 | return True | |||
|
87 | return current_identity != self.identity | |||
|
88 | ||||
61 | def preload(self): |
|
89 | def preload(self): | |
62 | """Loads the underlying data, if it's not already loaded""" |
|
90 | """Loads the underlying data, if it's not already loaded""" | |
63 | self._map |
|
91 | self._map | |
@@ -118,6 +146,9 b' class _dirstatemapcommon:' | |||||
118 | raise error.ProgrammingError(b'dirstate docket name collision') |
|
146 | raise error.ProgrammingError(b'dirstate docket name collision') | |
119 | data_filename = new_docket.data_filename() |
|
147 | data_filename = new_docket.data_filename() | |
120 | self._opener.write(data_filename, packed) |
|
148 | self._opener.write(data_filename, packed) | |
|
149 | # tell the transaction that we are adding a new file | |||
|
150 | if tr is not None: | |||
|
151 | tr.addbackup(data_filename, location=b'plain') | |||
121 | # Write the new docket after the new data file has been |
|
152 | # Write the new docket after the new data file has been | |
122 | # written. Because `st` was opened with `atomictemp=True`, |
|
153 | # written. Because `st` was opened with `atomictemp=True`, | |
123 | # the actual `.hg/dirstate` file is only affected on close. |
|
154 | # the actual `.hg/dirstate` file is only affected on close. | |
@@ -127,6 +158,8 b' class _dirstatemapcommon:' | |||||
127 | # the new data file was written. |
|
158 | # the new data file was written. | |
128 | if old_docket.uuid: |
|
159 | if old_docket.uuid: | |
129 | data_filename = old_docket.data_filename() |
|
160 | data_filename = old_docket.data_filename() | |
|
161 | if tr is not None: | |||
|
162 | tr.addbackup(data_filename, location=b'plain') | |||
130 | unlink = lambda _tr=None: self._opener.unlink(data_filename) |
|
163 | unlink = lambda _tr=None: self._opener.unlink(data_filename) | |
131 | if tr: |
|
164 | if tr: | |
132 | category = b"dirstate-v2-clean-" + old_docket.uuid |
|
165 | category = b"dirstate-v2-clean-" + old_docket.uuid | |
@@ -258,9 +291,7 b' class dirstatemap(_dirstatemapcommon):' | |||||
258 |
|
291 | |||
259 | def read(self): |
|
292 | def read(self): | |
260 | # ignore HG_PENDING because identity is used only for writing |
|
293 | # ignore HG_PENDING because identity is used only for writing | |
261 | self.identity = util.filestat.frompath( |
|
294 | self._set_identity() | |
262 | self._opener.join(self._filename) |
|
|||
263 | ) |
|
|||
264 |
|
295 | |||
265 | if self._use_dirstate_v2: |
|
296 | if self._use_dirstate_v2: | |
266 | if not self.docket.uuid: |
|
297 | if not self.docket.uuid: | |
@@ -523,9 +554,7 b' if rustmod is not None:' | |||||
523 | Fills the Dirstatemap when called. |
|
554 | Fills the Dirstatemap when called. | |
524 | """ |
|
555 | """ | |
525 | # ignore HG_PENDING because identity is used only for writing |
|
556 | # ignore HG_PENDING because identity is used only for writing | |
526 |
self.identity |
|
557 | self._set_identity() | |
527 | self._opener.join(self._filename) |
|
|||
528 | ) |
|
|||
529 |
|
558 | |||
530 | if self._use_dirstate_v2: |
|
559 | if self._use_dirstate_v2: | |
531 | if self.docket.uuid: |
|
560 | if self.docket.uuid: | |
@@ -614,6 +643,14 b' if rustmod is not None:' | |||||
614 | if append: |
|
643 | if append: | |
615 | docket = self.docket |
|
644 | docket = self.docket | |
616 | data_filename = docket.data_filename() |
|
645 | data_filename = docket.data_filename() | |
|
646 | # We mark it for backup to make sure a future `hg rollback` (or | |||
|
647 | # `hg recover`?) call find the data it needs to restore a | |||
|
648 | # working repository. | |||
|
649 | # | |||
|
650 | # The backup can use a hardlink because the format is resistant | |||
|
651 | # to trailing "dead" data. | |||
|
652 | if tr is not None: | |||
|
653 | tr.addbackup(data_filename, location=b'plain') | |||
617 | with self._opener(data_filename, b'r+b') as fp: |
|
654 | with self._opener(data_filename, b'r+b') as fp: | |
618 | fp.seek(docket.data_size) |
|
655 | fp.seek(docket.data_size) | |
619 | assert fp.tell() == docket.data_size |
|
656 | assert fp.tell() == docket.data_size |
@@ -980,7 +980,8 b' def _getlocal(ui, rpath, wd=None):' | |||||
980 | lui.readconfig(os.path.join(path, b".hg", b"hgrc-not-shared"), path) |
|
980 | lui.readconfig(os.path.join(path, b".hg", b"hgrc-not-shared"), path) | |
981 |
|
981 | |||
982 | if rpath: |
|
982 | if rpath: | |
983 |
path = urlutil.get_clone_path(lui, rpath) |
|
983 | path_obj = urlutil.get_clone_path_obj(lui, rpath) | |
|
984 | path = path_obj.rawloc | |||
984 | lui = ui.copy() |
|
985 | lui = ui.copy() | |
985 | if rcutil.use_repo_hgrc(): |
|
986 | if rcutil.use_repo_hgrc(): | |
986 | _readsharedsourceconfig(lui, path) |
|
987 | _readsharedsourceconfig(lui, path) |
@@ -1183,7 +1183,12 b' def _pushbundle2(pushop):' | |||||
1183 | trgetter = None |
|
1183 | trgetter = None | |
1184 | if pushback: |
|
1184 | if pushback: | |
1185 | trgetter = pushop.trmanager.transaction |
|
1185 | trgetter = pushop.trmanager.transaction | |
1186 |
op = bundle2.processbundle( |
|
1186 | op = bundle2.processbundle( | |
|
1187 | pushop.repo, | |||
|
1188 | reply, | |||
|
1189 | trgetter, | |||
|
1190 | remote=pushop.remote, | |||
|
1191 | ) | |||
1187 | except error.BundleValueError as exc: |
|
1192 | except error.BundleValueError as exc: | |
1188 | raise error.RemoteError(_(b'missing support for %s') % exc) |
|
1193 | raise error.RemoteError(_(b'missing support for %s') % exc) | |
1189 | except bundle2.AbortFromPart as exc: |
|
1194 | except bundle2.AbortFromPart as exc: | |
@@ -1903,10 +1908,18 b' def _pullbundle2(pullop):' | |||||
1903 |
|
1908 | |||
1904 | try: |
|
1909 | try: | |
1905 | op = bundle2.bundleoperation( |
|
1910 | op = bundle2.bundleoperation( | |
1906 | pullop.repo, pullop.gettransaction, source=b'pull' |
|
1911 | pullop.repo, | |
|
1912 | pullop.gettransaction, | |||
|
1913 | source=b'pull', | |||
|
1914 | remote=pullop.remote, | |||
1907 | ) |
|
1915 | ) | |
1908 | op.modes[b'bookmarks'] = b'records' |
|
1916 | op.modes[b'bookmarks'] = b'records' | |
1909 |
bundle2.processbundle( |
|
1917 | bundle2.processbundle( | |
|
1918 | pullop.repo, | |||
|
1919 | bundle, | |||
|
1920 | op=op, | |||
|
1921 | remote=pullop.remote, | |||
|
1922 | ) | |||
1910 | except bundle2.AbortFromPart as exc: |
|
1923 | except bundle2.AbortFromPart as exc: | |
1911 | pullop.repo.ui.error(_(b'remote: abort: %s\n') % exc) |
|
1924 | pullop.repo.ui.error(_(b'remote: abort: %s\n') % exc) | |
1912 | raise error.RemoteError(_(b'pull failed on remote'), hint=exc.hint) |
|
1925 | raise error.RemoteError(_(b'pull failed on remote'), hint=exc.hint) | |
@@ -1995,7 +2008,12 b' def _pullchangeset(pullop):' | |||||
1995 | ).result() |
|
2008 | ).result() | |
1996 |
|
2009 | |||
1997 | bundleop = bundle2.applybundle( |
|
2010 | bundleop = bundle2.applybundle( | |
1998 | pullop.repo, cg, tr, b'pull', pullop.remote.url() |
|
2011 | pullop.repo, | |
|
2012 | cg, | |||
|
2013 | tr, | |||
|
2014 | b'pull', | |||
|
2015 | pullop.remote.url(), | |||
|
2016 | remote=pullop.remote, | |||
1999 | ) |
|
2017 | ) | |
2000 | pullop.cgresult = bundle2.combinechangegroupresults(bundleop) |
|
2018 | pullop.cgresult = bundle2.combinechangegroupresults(bundleop) | |
2001 |
|
2019 |
@@ -111,6 +111,7 b' class filelog:' | |||||
111 | assumehaveparentrevisions=False, |
|
111 | assumehaveparentrevisions=False, | |
112 | deltamode=repository.CG_DELTAMODE_STD, |
|
112 | deltamode=repository.CG_DELTAMODE_STD, | |
113 | sidedata_helpers=None, |
|
113 | sidedata_helpers=None, | |
|
114 | debug_info=None, | |||
114 | ): |
|
115 | ): | |
115 | return self._revlog.emitrevisions( |
|
116 | return self._revlog.emitrevisions( | |
116 | nodes, |
|
117 | nodes, | |
@@ -119,6 +120,7 b' class filelog:' | |||||
119 | assumehaveparentrevisions=assumehaveparentrevisions, |
|
120 | assumehaveparentrevisions=assumehaveparentrevisions, | |
120 | deltamode=deltamode, |
|
121 | deltamode=deltamode, | |
121 | sidedata_helpers=sidedata_helpers, |
|
122 | sidedata_helpers=sidedata_helpers, | |
|
123 | debug_info=debug_info, | |||
122 | ) |
|
124 | ) | |
123 |
|
125 | |||
124 | def addrevision( |
|
126 | def addrevision( | |
@@ -151,6 +153,8 b' class filelog:' | |||||
151 | addrevisioncb=None, |
|
153 | addrevisioncb=None, | |
152 | duplicaterevisioncb=None, |
|
154 | duplicaterevisioncb=None, | |
153 | maybemissingparents=False, |
|
155 | maybemissingparents=False, | |
|
156 | debug_info=None, | |||
|
157 | delta_base_reuse_policy=None, | |||
154 | ): |
|
158 | ): | |
155 | if maybemissingparents: |
|
159 | if maybemissingparents: | |
156 | raise error.Abort( |
|
160 | raise error.Abort( | |
@@ -171,6 +175,8 b' class filelog:' | |||||
171 | transaction, |
|
175 | transaction, | |
172 | addrevisioncb=addrevisioncb, |
|
176 | addrevisioncb=addrevisioncb, | |
173 | duplicaterevisioncb=duplicaterevisioncb, |
|
177 | duplicaterevisioncb=duplicaterevisioncb, | |
|
178 | debug_info=debug_info, | |||
|
179 | delta_base_reuse_policy=delta_base_reuse_policy, | |||
174 | ) |
|
180 | ) | |
175 |
|
181 | |||
176 | def getstrippoint(self, minlink): |
|
182 | def getstrippoint(self, minlink): |
@@ -158,7 +158,7 b' def findexternaltool(ui, tool):' | |||||
158 | continue |
|
158 | continue | |
159 | p = util.lookupreg(k, _toolstr(ui, tool, b"regname")) |
|
159 | p = util.lookupreg(k, _toolstr(ui, tool, b"regname")) | |
160 | if p: |
|
160 | if p: | |
161 |
p = procutil.findexe(p + _toolstr(ui, tool, b"regappend" |
|
161 | p = procutil.findexe(p + _toolstr(ui, tool, b"regappend")) | |
162 | if p: |
|
162 | if p: | |
163 | return p |
|
163 | return p | |
164 | exe = _toolstr(ui, tool, b"executable", tool) |
|
164 | exe = _toolstr(ui, tool, b"executable", tool) | |
@@ -478,8 +478,9 b' def _merge(repo, local, other, base, mod' | |||||
478 | """ |
|
478 | """ | |
479 | Uses the internal non-interactive simple merge algorithm for merging |
|
479 | Uses the internal non-interactive simple merge algorithm for merging | |
480 | files. It will fail if there are any conflicts and leave markers in |
|
480 | files. It will fail if there are any conflicts and leave markers in | |
481 |
the partially merged file. Markers will have two sections, one for each |
|
481 | the partially merged file. Markers will have two sections, one for each | |
482 |
of merge, unless mode equals 'union' |
|
482 | side of merge, unless mode equals 'union' or 'union-other-first' which | |
|
483 | suppresses the markers.""" | |||
483 | ui = repo.ui |
|
484 | ui = repo.ui | |
484 |
|
485 | |||
485 | try: |
|
486 | try: | |
@@ -510,12 +511,28 b' def _merge(repo, local, other, base, mod' | |||||
510 | def _iunion(repo, mynode, local, other, base, toolconf, backup): |
|
511 | def _iunion(repo, mynode, local, other, base, toolconf, backup): | |
511 | """ |
|
512 | """ | |
512 | Uses the internal non-interactive simple merge algorithm for merging |
|
513 | Uses the internal non-interactive simple merge algorithm for merging | |
513 |
files. It will use both l |
|
514 | files. It will use both local and other sides for conflict regions by | |
|
515 | adding local on top of other. | |||
514 | No markers are inserted.""" |
|
516 | No markers are inserted.""" | |
515 | return _merge(repo, local, other, base, b'union') |
|
517 | return _merge(repo, local, other, base, b'union') | |
516 |
|
518 | |||
517 |
|
519 | |||
518 | @internaltool( |
|
520 | @internaltool( | |
|
521 | b'union-other-first', | |||
|
522 | fullmerge, | |||
|
523 | _( | |||
|
524 | b"warning: conflicts while merging %s! " | |||
|
525 | b"(edit, then use 'hg resolve --mark')\n" | |||
|
526 | ), | |||
|
527 | precheck=_mergecheck, | |||
|
528 | ) | |||
|
529 | def _iunion_other_first(repo, mynode, local, other, base, toolconf, backup): | |||
|
530 | """ | |||
|
531 | Like :union, but add other on top of local.""" | |||
|
532 | return _merge(repo, local, other, base, b'union-other-first') | |||
|
533 | ||||
|
534 | ||||
|
535 | @internaltool( | |||
519 | b'merge', |
|
536 | b'merge', | |
520 | fullmerge, |
|
537 | fullmerge, | |
521 | _( |
|
538 | _( |
@@ -10,6 +10,18 b' import itertools' | |||||
10 | import re |
|
10 | import re | |
11 | import textwrap |
|
11 | import textwrap | |
12 |
|
12 | |||
|
13 | from typing import ( | |||
|
14 | Callable, | |||
|
15 | Dict, | |||
|
16 | Iterable, | |||
|
17 | List, | |||
|
18 | Optional, | |||
|
19 | Set, | |||
|
20 | Tuple, | |||
|
21 | Union, | |||
|
22 | cast, | |||
|
23 | ) | |||
|
24 | ||||
13 | from .i18n import ( |
|
25 | from .i18n import ( | |
14 | _, |
|
26 | _, | |
15 | gettext, |
|
27 | gettext, | |
@@ -40,7 +52,16 b' from .utils import (' | |||||
40 | stringutil, |
|
52 | stringutil, | |
41 | ) |
|
53 | ) | |
42 |
|
54 | |||
43 | _exclkeywords = { |
|
55 | _DocLoader = Callable[[uimod.ui], bytes] | |
|
56 | # Old extensions may not register with a category | |||
|
57 | _HelpEntry = Union["_HelpEntryNoCategory", "_HelpEntryWithCategory"] | |||
|
58 | _HelpEntryNoCategory = Tuple[List[bytes], bytes, _DocLoader] | |||
|
59 | _HelpEntryWithCategory = Tuple[List[bytes], bytes, _DocLoader, bytes] | |||
|
60 | _SelectFn = Callable[[object], bool] | |||
|
61 | _SynonymTable = Dict[bytes, List[bytes]] | |||
|
62 | _TopicHook = Callable[[uimod.ui, bytes, bytes], bytes] | |||
|
63 | ||||
|
64 | _exclkeywords: Set[bytes] = { | |||
44 | b"(ADVANCED)", |
|
65 | b"(ADVANCED)", | |
45 | b"(DEPRECATED)", |
|
66 | b"(DEPRECATED)", | |
46 | b"(EXPERIMENTAL)", |
|
67 | b"(EXPERIMENTAL)", | |
@@ -56,7 +77,7 b' from .utils import (' | |||||
56 | # Extensions with custom categories should insert them into this list |
|
77 | # Extensions with custom categories should insert them into this list | |
57 | # after/before the appropriate item, rather than replacing the list or |
|
78 | # after/before the appropriate item, rather than replacing the list or | |
58 | # assuming absolute positions. |
|
79 | # assuming absolute positions. | |
59 | CATEGORY_ORDER = [ |
|
80 | CATEGORY_ORDER: List[bytes] = [ | |
60 | registrar.command.CATEGORY_REPO_CREATION, |
|
81 | registrar.command.CATEGORY_REPO_CREATION, | |
61 | registrar.command.CATEGORY_REMOTE_REPO_MANAGEMENT, |
|
82 | registrar.command.CATEGORY_REMOTE_REPO_MANAGEMENT, | |
62 | registrar.command.CATEGORY_COMMITTING, |
|
83 | registrar.command.CATEGORY_COMMITTING, | |
@@ -74,7 +95,7 b' CATEGORY_ORDER = [' | |||||
74 |
|
95 | |||
75 | # Human-readable category names. These are translated. |
|
96 | # Human-readable category names. These are translated. | |
76 | # Extensions with custom categories should add their names here. |
|
97 | # Extensions with custom categories should add their names here. | |
77 | CATEGORY_NAMES = { |
|
98 | CATEGORY_NAMES: Dict[bytes, bytes] = { | |
78 | registrar.command.CATEGORY_REPO_CREATION: b'Repository creation', |
|
99 | registrar.command.CATEGORY_REPO_CREATION: b'Repository creation', | |
79 | registrar.command.CATEGORY_REMOTE_REPO_MANAGEMENT: b'Remote repository management', |
|
100 | registrar.command.CATEGORY_REMOTE_REPO_MANAGEMENT: b'Remote repository management', | |
80 | registrar.command.CATEGORY_COMMITTING: b'Change creation', |
|
101 | registrar.command.CATEGORY_COMMITTING: b'Change creation', | |
@@ -102,7 +123,7 b" TOPIC_CATEGORY_NONE = b'none'" | |||||
102 | # Extensions with custom categories should insert them into this list |
|
123 | # Extensions with custom categories should insert them into this list | |
103 | # after/before the appropriate item, rather than replacing the list or |
|
124 | # after/before the appropriate item, rather than replacing the list or | |
104 | # assuming absolute positions. |
|
125 | # assuming absolute positions. | |
105 | TOPIC_CATEGORY_ORDER = [ |
|
126 | TOPIC_CATEGORY_ORDER: List[bytes] = [ | |
106 | TOPIC_CATEGORY_IDS, |
|
127 | TOPIC_CATEGORY_IDS, | |
107 | TOPIC_CATEGORY_OUTPUT, |
|
128 | TOPIC_CATEGORY_OUTPUT, | |
108 | TOPIC_CATEGORY_CONFIG, |
|
129 | TOPIC_CATEGORY_CONFIG, | |
@@ -112,7 +133,7 b' TOPIC_CATEGORY_ORDER = [' | |||||
112 | ] |
|
133 | ] | |
113 |
|
134 | |||
114 | # Human-readable topic category names. These are translated. |
|
135 | # Human-readable topic category names. These are translated. | |
115 | TOPIC_CATEGORY_NAMES = { |
|
136 | TOPIC_CATEGORY_NAMES: Dict[bytes, bytes] = { | |
116 | TOPIC_CATEGORY_IDS: b'Mercurial identifiers', |
|
137 | TOPIC_CATEGORY_IDS: b'Mercurial identifiers', | |
117 | TOPIC_CATEGORY_OUTPUT: b'Mercurial output', |
|
138 | TOPIC_CATEGORY_OUTPUT: b'Mercurial output', | |
118 | TOPIC_CATEGORY_CONFIG: b'Mercurial configuration', |
|
139 | TOPIC_CATEGORY_CONFIG: b'Mercurial configuration', | |
@@ -122,7 +143,12 b' TOPIC_CATEGORY_NAMES = {' | |||||
122 | } |
|
143 | } | |
123 |
|
144 | |||
124 |
|
145 | |||
125 | def listexts(header, exts, indent=1, showdeprecated=False): |
|
146 | def listexts( | |
|
147 | header: bytes, | |||
|
148 | exts: Dict[bytes, bytes], | |||
|
149 | indent: int = 1, | |||
|
150 | showdeprecated: bool = False, | |||
|
151 | ) -> List[bytes]: | |||
126 | '''return a text listing of the given extensions''' |
|
152 | '''return a text listing of the given extensions''' | |
127 | rst = [] |
|
153 | rst = [] | |
128 | if exts: |
|
154 | if exts: | |
@@ -135,7 +161,7 b' def listexts(header, exts, indent=1, sho' | |||||
135 | return rst |
|
161 | return rst | |
136 |
|
162 | |||
137 |
|
163 | |||
138 | def extshelp(ui): |
|
164 | def extshelp(ui: uimod.ui) -> bytes: | |
139 | rst = loaddoc(b'extensions')(ui).splitlines(True) |
|
165 | rst = loaddoc(b'extensions')(ui).splitlines(True) | |
140 | rst.extend( |
|
166 | rst.extend( | |
141 | listexts( |
|
167 | listexts( | |
@@ -153,7 +179,7 b' def extshelp(ui):' | |||||
153 | return doc |
|
179 | return doc | |
154 |
|
180 | |||
155 |
|
181 | |||
156 | def parsedefaultmarker(text): |
|
182 | def parsedefaultmarker(text: bytes) -> Optional[Tuple[bytes, List[bytes]]]: | |
157 | """given a text 'abc (DEFAULT: def.ghi)', |
|
183 | """given a text 'abc (DEFAULT: def.ghi)', | |
158 | returns (b'abc', (b'def', b'ghi')). Otherwise return None""" |
|
184 | returns (b'abc', (b'def', b'ghi')). Otherwise return None""" | |
159 | if text[-1:] == b')': |
|
185 | if text[-1:] == b')': | |
@@ -164,7 +190,7 b' def parsedefaultmarker(text):' | |||||
164 | return text[:pos], item.split(b'.', 2) |
|
190 | return text[:pos], item.split(b'.', 2) | |
165 |
|
191 | |||
166 |
|
192 | |||
167 | def optrst(header, options, verbose, ui): |
|
193 | def optrst(header: bytes, options, verbose: bool, ui: uimod.ui) -> bytes: | |
168 | data = [] |
|
194 | data = [] | |
169 | multioccur = False |
|
195 | multioccur = False | |
170 | for option in options: |
|
196 | for option in options: | |
@@ -220,13 +246,15 b' def optrst(header, options, verbose, ui)' | |||||
220 | return b''.join(rst) |
|
246 | return b''.join(rst) | |
221 |
|
247 | |||
222 |
|
248 | |||
223 | def indicateomitted(rst, omitted, notomitted=None): |
|
249 | def indicateomitted( | |
|
250 | rst: List[bytes], omitted: bytes, notomitted: Optional[bytes] = None | |||
|
251 | ) -> None: | |||
224 | rst.append(b'\n\n.. container:: omitted\n\n %s\n\n' % omitted) |
|
252 | rst.append(b'\n\n.. container:: omitted\n\n %s\n\n' % omitted) | |
225 | if notomitted: |
|
253 | if notomitted: | |
226 | rst.append(b'\n\n.. container:: notomitted\n\n %s\n\n' % notomitted) |
|
254 | rst.append(b'\n\n.. container:: notomitted\n\n %s\n\n' % notomitted) | |
227 |
|
255 | |||
228 |
|
256 | |||
229 | def filtercmd(ui, cmd, func, kw, doc): |
|
257 | def filtercmd(ui: uimod.ui, cmd: bytes, func, kw: bytes, doc: bytes) -> bool: | |
230 | if not ui.debugflag and cmd.startswith(b"debug") and kw != b"debug": |
|
258 | if not ui.debugflag and cmd.startswith(b"debug") and kw != b"debug": | |
231 | # Debug command, and user is not looking for those. |
|
259 | # Debug command, and user is not looking for those. | |
232 | return True |
|
260 | return True | |
@@ -249,11 +277,13 b' def filtercmd(ui, cmd, func, kw, doc):' | |||||
249 | return False |
|
277 | return False | |
250 |
|
278 | |||
251 |
|
279 | |||
252 | def filtertopic(ui, topic): |
|
280 | def filtertopic(ui: uimod.ui, topic: bytes) -> bool: | |
253 | return ui.configbool(b'help', b'hidden-topic.%s' % topic, False) |
|
281 | return ui.configbool(b'help', b'hidden-topic.%s' % topic, False) | |
254 |
|
282 | |||
255 |
|
283 | |||
256 |
def topicmatch( |
|
284 | def topicmatch( | |
|
285 | ui: uimod.ui, commands, kw: bytes | |||
|
286 | ) -> Dict[bytes, List[Tuple[bytes, bytes]]]: | |||
257 | """Return help topics matching kw. |
|
287 | """Return help topics matching kw. | |
258 |
|
288 | |||
259 | Returns {'section': [(name, summary), ...], ...} where section is |
|
289 | Returns {'section': [(name, summary), ...], ...} where section is | |
@@ -326,10 +356,10 b' def topicmatch(ui, commands, kw):' | |||||
326 | return results |
|
356 | return results | |
327 |
|
357 | |||
328 |
|
358 | |||
329 | def loaddoc(topic, subdir=None): |
|
359 | def loaddoc(topic: bytes, subdir: Optional[bytes] = None) -> _DocLoader: | |
330 | """Return a delayed loader for help/topic.txt.""" |
|
360 | """Return a delayed loader for help/topic.txt.""" | |
331 |
|
361 | |||
332 | def loader(ui): |
|
362 | def loader(ui: uimod.ui) -> bytes: | |
333 | package = b'mercurial.helptext' |
|
363 | package = b'mercurial.helptext' | |
334 | if subdir: |
|
364 | if subdir: | |
335 | package += b'.' + subdir |
|
365 | package += b'.' + subdir | |
@@ -342,7 +372,7 b' def loaddoc(topic, subdir=None):' | |||||
342 | return loader |
|
372 | return loader | |
343 |
|
373 | |||
344 |
|
374 | |||
345 | internalstable = sorted( |
|
375 | internalstable: List[_HelpEntryNoCategory] = sorted( | |
346 | [ |
|
376 | [ | |
347 | ( |
|
377 | ( | |
348 | [b'bid-merge'], |
|
378 | [b'bid-merge'], | |
@@ -407,7 +437,7 b' internalstable = sorted(' | |||||
407 | ) |
|
437 | ) | |
408 |
|
438 | |||
409 |
|
439 | |||
410 | def internalshelp(ui): |
|
440 | def internalshelp(ui: uimod.ui) -> bytes: | |
411 | """Generate the index for the "internals" topic.""" |
|
441 | """Generate the index for the "internals" topic.""" | |
412 | lines = [ |
|
442 | lines = [ | |
413 | b'To access a subtopic, use "hg help internals.{subtopic-name}"\n', |
|
443 | b'To access a subtopic, use "hg help internals.{subtopic-name}"\n', | |
@@ -419,7 +449,7 b' def internalshelp(ui):' | |||||
419 | return b''.join(lines) |
|
449 | return b''.join(lines) | |
420 |
|
450 | |||
421 |
|
451 | |||
422 | helptable = sorted( |
|
452 | helptable: List[_HelpEntryWithCategory] = sorted( | |
423 | [ |
|
453 | [ | |
424 | ( |
|
454 | ( | |
425 | [b'bundlespec'], |
|
455 | [b'bundlespec'], | |
@@ -581,20 +611,27 b' helptable = sorted(' | |||||
581 | ) |
|
611 | ) | |
582 |
|
612 | |||
583 | # Maps topics with sub-topics to a list of their sub-topics. |
|
613 | # Maps topics with sub-topics to a list of their sub-topics. | |
584 | subtopics = { |
|
614 | subtopics: Dict[bytes, List[_HelpEntryNoCategory]] = { | |
585 | b'internals': internalstable, |
|
615 | b'internals': internalstable, | |
586 | } |
|
616 | } | |
587 |
|
617 | |||
588 | # Map topics to lists of callable taking the current topic help and |
|
618 | # Map topics to lists of callable taking the current topic help and | |
589 | # returning the updated version |
|
619 | # returning the updated version | |
590 | helphooks = {} |
|
620 | helphooks: Dict[bytes, List[_TopicHook]] = {} | |
591 |
|
621 | |||
592 |
|
622 | |||
593 | def addtopichook(topic, rewriter): |
|
623 | def addtopichook(topic: bytes, rewriter: _TopicHook) -> None: | |
594 | helphooks.setdefault(topic, []).append(rewriter) |
|
624 | helphooks.setdefault(topic, []).append(rewriter) | |
595 |
|
625 | |||
596 |
|
626 | |||
597 | def makeitemsdoc(ui, topic, doc, marker, items, dedent=False): |
|
627 | def makeitemsdoc( | |
|
628 | ui: uimod.ui, | |||
|
629 | topic: bytes, | |||
|
630 | doc: bytes, | |||
|
631 | marker: bytes, | |||
|
632 | items: Dict[bytes, bytes], | |||
|
633 | dedent: bool = False, | |||
|
634 | ) -> bytes: | |||
598 | """Extract docstring from the items key to function mapping, build a |
|
635 | """Extract docstring from the items key to function mapping, build a | |
599 | single documentation block and use it to overwrite the marker in doc. |
|
636 | single documentation block and use it to overwrite the marker in doc. | |
600 | """ |
|
637 | """ | |
@@ -622,8 +659,10 b' def makeitemsdoc(ui, topic, doc, marker,' | |||||
622 | return doc.replace(marker, entries) |
|
659 | return doc.replace(marker, entries) | |
623 |
|
660 | |||
624 |
|
661 | |||
625 | def addtopicsymbols(topic, marker, symbols, dedent=False): |
|
662 | def addtopicsymbols( | |
626 | def add(ui, topic, doc): |
|
663 | topic: bytes, marker: bytes, symbols, dedent: bool = False | |
|
664 | ) -> None: | |||
|
665 | def add(ui: uimod.ui, topic: bytes, doc: bytes): | |||
627 | return makeitemsdoc(ui, topic, doc, marker, symbols, dedent=dedent) |
|
666 | return makeitemsdoc(ui, topic, doc, marker, symbols, dedent=dedent) | |
628 |
|
667 | |||
629 | addtopichook(topic, add) |
|
668 | addtopichook(topic, add) | |
@@ -647,7 +686,7 b' addtopicsymbols(' | |||||
647 | ) |
|
686 | ) | |
648 |
|
687 | |||
649 |
|
688 | |||
650 | def inserttweakrc(ui, topic, doc): |
|
689 | def inserttweakrc(ui: uimod.ui, topic: bytes, doc: bytes) -> bytes: | |
651 | marker = b'.. tweakdefaultsmarker' |
|
690 | marker = b'.. tweakdefaultsmarker' | |
652 | repl = uimod.tweakrc |
|
691 | repl = uimod.tweakrc | |
653 |
|
692 | |||
@@ -658,7 +697,9 b' def inserttweakrc(ui, topic, doc):' | |||||
658 | return re.sub(br'( *)%s' % re.escape(marker), sub, doc) |
|
697 | return re.sub(br'( *)%s' % re.escape(marker), sub, doc) | |
659 |
|
698 | |||
660 |
|
699 | |||
661 |
def _getcategorizedhelpcmds( |
|
700 | def _getcategorizedhelpcmds( | |
|
701 | ui: uimod.ui, cmdtable, name: bytes, select: Optional[_SelectFn] = None | |||
|
702 | ) -> Tuple[Dict[bytes, List[bytes]], Dict[bytes, bytes], _SynonymTable]: | |||
662 | # Category -> list of commands |
|
703 | # Category -> list of commands | |
663 | cats = {} |
|
704 | cats = {} | |
664 | # Command -> short description |
|
705 | # Command -> short description | |
@@ -687,16 +728,18 b' def _getcategorizedhelpcmds(ui, cmdtable' | |||||
687 | return cats, h, syns |
|
728 | return cats, h, syns | |
688 |
|
729 | |||
689 |
|
730 | |||
690 |
def _getcategorizedhelptopics( |
|
731 | def _getcategorizedhelptopics( | |
|
732 | ui: uimod.ui, topictable: List[_HelpEntry] | |||
|
733 | ) -> Tuple[Dict[bytes, List[Tuple[bytes, bytes]]], Dict[bytes, List[bytes]]]: | |||
691 | # Group commands by category. |
|
734 | # Group commands by category. | |
692 | topiccats = {} |
|
735 | topiccats = {} | |
693 | syns = {} |
|
736 | syns = {} | |
694 | for topic in topictable: |
|
737 | for topic in topictable: | |
695 | names, header, doc = topic[0:3] |
|
738 | names, header, doc = topic[0:3] | |
696 | if len(topic) > 3 and topic[3]: |
|
739 | if len(topic) > 3 and topic[3]: | |
697 | category = topic[3] |
|
740 | category: bytes = cast(bytes, topic[3]) # help pytype | |
698 | else: |
|
741 | else: | |
699 | category = TOPIC_CATEGORY_NONE |
|
742 | category: bytes = TOPIC_CATEGORY_NONE | |
700 |
|
743 | |||
701 | topicname = names[0] |
|
744 | topicname = names[0] | |
702 | syns[topicname] = list(names) |
|
745 | syns[topicname] = list(names) | |
@@ -709,15 +752,15 b" addtopichook(b'config', inserttweakrc)" | |||||
709 |
|
752 | |||
710 |
|
753 | |||
711 | def help_( |
|
754 | def help_( | |
712 | ui, |
|
755 | ui: uimod.ui, | |
713 | commands, |
|
756 | commands, | |
714 | name, |
|
757 | name: bytes, | |
715 | unknowncmd=False, |
|
758 | unknowncmd: bool = False, | |
716 | full=True, |
|
759 | full: bool = True, | |
717 | subtopic=None, |
|
760 | subtopic: Optional[bytes] = None, | |
718 | fullname=None, |
|
761 | fullname: Optional[bytes] = None, | |
719 | **opts |
|
762 | **opts | |
720 | ): |
|
763 | ) -> bytes: | |
721 | """ |
|
764 | """ | |
722 | Generate the help for 'name' as unformatted restructured text. If |
|
765 | Generate the help for 'name' as unformatted restructured text. If | |
723 | 'name' is None, describe the commands available. |
|
766 | 'name' is None, describe the commands available. | |
@@ -725,7 +768,7 b' def help_(' | |||||
725 |
|
768 | |||
726 | opts = pycompat.byteskwargs(opts) |
|
769 | opts = pycompat.byteskwargs(opts) | |
727 |
|
770 | |||
728 |
def helpcmd(name, subtopic |
|
771 | def helpcmd(name: bytes, subtopic: Optional[bytes]) -> List[bytes]: | |
729 | try: |
|
772 | try: | |
730 | aliases, entry = cmdutil.findcmd( |
|
773 | aliases, entry = cmdutil.findcmd( | |
731 | name, commands.table, strict=unknowncmd |
|
774 | name, commands.table, strict=unknowncmd | |
@@ -826,7 +869,7 b' def help_(' | |||||
826 |
|
869 | |||
827 | return rst |
|
870 | return rst | |
828 |
|
871 | |||
829 | def helplist(select=None, **opts): |
|
872 | def helplist(select: Optional[_SelectFn] = None, **opts) -> List[bytes]: | |
830 | cats, h, syns = _getcategorizedhelpcmds( |
|
873 | cats, h, syns = _getcategorizedhelpcmds( | |
831 | ui, commands.table, name, select |
|
874 | ui, commands.table, name, select | |
832 | ) |
|
875 | ) | |
@@ -846,7 +889,7 b' def help_(' | |||||
846 | else: |
|
889 | else: | |
847 | rst.append(_(b'list of commands:\n')) |
|
890 | rst.append(_(b'list of commands:\n')) | |
848 |
|
891 | |||
849 | def appendcmds(cmds): |
|
892 | def appendcmds(cmds: Iterable[bytes]) -> None: | |
850 | cmds = sorted(cmds) |
|
893 | cmds = sorted(cmds) | |
851 | for c in cmds: |
|
894 | for c in cmds: | |
852 | display_cmd = c |
|
895 | display_cmd = c | |
@@ -955,7 +998,7 b' def help_(' | |||||
955 | ) |
|
998 | ) | |
956 | return rst |
|
999 | return rst | |
957 |
|
1000 | |||
958 | def helptopic(name, subtopic=None): |
|
1001 | def helptopic(name: bytes, subtopic: Optional[bytes] = None) -> List[bytes]: | |
959 | # Look for sub-topic entry first. |
|
1002 | # Look for sub-topic entry first. | |
960 | header, doc = None, None |
|
1003 | header, doc = None, None | |
961 | if subtopic and name in subtopics: |
|
1004 | if subtopic and name in subtopics: | |
@@ -998,7 +1041,7 b' def help_(' | |||||
998 | pass |
|
1041 | pass | |
999 | return rst |
|
1042 | return rst | |
1000 |
|
1043 | |||
1001 | def helpext(name, subtopic=None): |
|
1044 | def helpext(name: bytes, subtopic: Optional[bytes] = None) -> List[bytes]: | |
1002 | try: |
|
1045 | try: | |
1003 | mod = extensions.find(name) |
|
1046 | mod = extensions.find(name) | |
1004 | doc = gettext(pycompat.getdoc(mod)) or _(b'no help text available') |
|
1047 | doc = gettext(pycompat.getdoc(mod)) or _(b'no help text available') | |
@@ -1040,7 +1083,9 b' def help_(' | |||||
1040 | ) |
|
1083 | ) | |
1041 | return rst |
|
1084 | return rst | |
1042 |
|
1085 | |||
1043 |
def helpextcmd( |
|
1086 | def helpextcmd( | |
|
1087 | name: bytes, subtopic: Optional[bytes] = None | |||
|
1088 | ) -> List[bytes]: | |||
1044 | cmd, ext, doc = extensions.disabledcmd( |
|
1089 | cmd, ext, doc = extensions.disabledcmd( | |
1045 | ui, name, ui.configbool(b'ui', b'strict') |
|
1090 | ui, name, ui.configbool(b'ui', b'strict') | |
1046 | ) |
|
1091 | ) | |
@@ -1127,8 +1172,14 b' def help_(' | |||||
1127 |
|
1172 | |||
1128 |
|
1173 | |||
1129 | def formattedhelp( |
|
1174 | def formattedhelp( | |
1130 | ui, commands, fullname, keep=None, unknowncmd=False, full=True, **opts |
|
1175 | ui: uimod.ui, | |
1131 | ): |
|
1176 | commands, | |
|
1177 | fullname: Optional[bytes], | |||
|
1178 | keep: Optional[Iterable[bytes]] = None, | |||
|
1179 | unknowncmd: bool = False, | |||
|
1180 | full: bool = True, | |||
|
1181 | **opts | |||
|
1182 | ) -> bytes: | |||
1132 | """get help for a given topic (as a dotted name) as rendered rst |
|
1183 | """get help for a given topic (as a dotted name) as rendered rst | |
1133 |
|
1184 | |||
1134 | Either returns the rendered help text or raises an exception. |
|
1185 | Either returns the rendered help text or raises an exception. |
@@ -1922,6 +1922,42 b' The following sub-options can be defined' | |||||
1922 | - ``ignore``: ignore bookmarks during exchange. |
|
1922 | - ``ignore``: ignore bookmarks during exchange. | |
1923 | (This currently only affect pulling) |
|
1923 | (This currently only affect pulling) | |
1924 |
|
1924 | |||
|
1925 | .. container:: verbose | |||
|
1926 | ||||
|
1927 | ``delta-reuse-policy`` | |||
|
1928 | Control the policy regarding deltas sent by the remote during pulls. | |||
|
1929 | ||||
|
1930 | This is an advanced option that non-admin users should not need to understand | |||
|
1931 | or set. This option can be used to speed up pulls from trusted central | |||
|
1932 | servers, or to fix-up deltas from older servers. | |||
|
1933 | ||||
|
1934 | It supports the following values: | |||
|
1935 | ||||
|
1936 | - ``default``: use the policy defined by | |||
|
1937 | `storage.revlog.reuse-external-delta-parent`, | |||
|
1938 | ||||
|
1939 | - ``no-reuse``: start a new optimal delta search for each new revision we add | |||
|
1940 | to the repository. The deltas from the server will be reused when the base | |||
|
1941 | it applies to is tested (this can be frequent if that base is the one and | |||
|
1942 | unique parent of that revision). This can significantly slowdown pulls but | |||
|
1943 | will result in an optimized storage space if the remote peer is sending poor | |||
|
1944 | quality deltas. | |||
|
1945 | ||||
|
1946 | - ``try-base``: try to reuse the deltas from the remote peer as long as they | |||
|
1947 | create a valid delta-chain in the local repository. This speeds up the | |||
|
1948 | unbundling process, but can result in sub-optimal storage space if the | |||
|
1949 | remote peer is sending poor quality deltas. | |||
|
1950 | ||||
|
1951 | - ``forced``: the deltas from the peer will be reused in all cases, even if | |||
|
1952 | the resulting delta-chain is "invalid". This setting will ensure the bundle | |||
|
1953 | is applied at minimal CPU cost, but it can result in longer delta chains | |||
|
1954 | being created on the client, making revisions potentially slower to access | |||
|
1955 | in the future. If you think you need this option, you should make sure you | |||
|
1956 | are also talking to the Mercurial developer community to get confirmation. | |||
|
1957 | ||||
|
1958 | See `hg help config.storage.revlog.reuse-external-delta-parent` for a similar | |||
|
1959 | global option. That option defines the behavior of `default`. | |||
|
1960 | ||||
1925 | The following special named paths exist: |
|
1961 | The following special named paths exist: | |
1926 |
|
1962 | |||
1927 | ``default`` |
|
1963 | ``default`` | |
@@ -2281,6 +2317,21 b' category impact performance and reposito' | |||||
2281 | To fix affected revisions that already exist within the repository, one can |
|
2317 | To fix affected revisions that already exist within the repository, one can | |
2282 | use :hg:`debug-repair-issue-6528`. |
|
2318 | use :hg:`debug-repair-issue-6528`. | |
2283 |
|
2319 | |||
|
2320 | .. container:: verbose | |||
|
2321 | ||||
|
2322 | ``revlog.delta-parent-search.candidate-group-chunk-size`` | |||
|
2323 | Tune the number of delta bases the storage will consider in the | |||
|
2324 | same "round" of search. In some very rare cases, using a smaller value | |||
|
2325 | might result in faster processing at the possible expense of storage | |||
|
2326 | space, while using larger values might result in slower processing at the | |||
|
2327 | possible benefit of storage space. A value of "0" means no limitation. | |||
|
2328 | ||||
|
2329 | default: no limitation | |||
|
2330 | ||||
|
2331 | This is unlikely that you'll have to tune this configuration. If you think | |||
|
2332 | you do, consider talking with the mercurial developer community about your | |||
|
2333 | repositories. | |||
|
2334 | ||||
2284 | ``revlog.optimize-delta-parent-choice`` |
|
2335 | ``revlog.optimize-delta-parent-choice`` | |
2285 | When storing a merge revision, both parents will be equally considered as |
|
2336 | When storing a merge revision, both parents will be equally considered as | |
2286 | a possible delta base. This results in better delta selection and improved |
|
2337 | a possible delta base. This results in better delta selection and improved |
@@ -76,8 +76,8 b' instructions on how to install from sour' | |||||
76 | MSRV |
|
76 | MSRV | |
77 | ==== |
|
77 | ==== | |
78 |
|
78 | |||
79 |
The minimum supported Rust version is currently 1. |
|
79 | The minimum supported Rust version is currently 1.61.0. The project's policy is | |
80 |
to follow the version from Debian |
|
80 | to follow the version from Debian testing, to make the distributions' job easier. | |
81 |
|
81 | |||
82 | rhg |
|
82 | rhg | |
83 | === |
|
83 | === |
@@ -65,28 +65,12 b' release = lock.release' | |||||
65 | sharedbookmarks = b'bookmarks' |
|
65 | sharedbookmarks = b'bookmarks' | |
66 |
|
66 | |||
67 |
|
67 | |||
68 | def _local(path): |
|
|||
69 | path = util.expandpath(urlutil.urllocalpath(path)) |
|
|||
70 |
|
||||
71 | try: |
|
|||
72 | # we use os.stat() directly here instead of os.path.isfile() |
|
|||
73 | # because the latter started returning `False` on invalid path |
|
|||
74 | # exceptions starting in 3.8 and we care about handling |
|
|||
75 | # invalid paths specially here. |
|
|||
76 | st = os.stat(path) |
|
|||
77 | isfile = stat.S_ISREG(st.st_mode) |
|
|||
78 | except ValueError as e: |
|
|||
79 | raise error.Abort( |
|
|||
80 | _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e)) |
|
|||
81 | ) |
|
|||
82 | except OSError: |
|
|||
83 | isfile = False |
|
|||
84 |
|
||||
85 | return isfile and bundlerepo or localrepo |
|
|||
86 |
|
||||
87 |
|
||||
88 | def addbranchrevs(lrepo, other, branches, revs): |
|
68 | def addbranchrevs(lrepo, other, branches, revs): | |
89 | peer = other.peer() # a courtesy to callers using a localrepo for other |
|
69 | if util.safehasattr(other, 'peer'): | |
|
70 | # a courtesy to callers using a localrepo for other | |||
|
71 | peer = other.peer() | |||
|
72 | else: | |||
|
73 | peer = other | |||
90 | hashbranch, branches = branches |
|
74 | hashbranch, branches = branches | |
91 | if not hashbranch and not branches: |
|
75 | if not hashbranch and not branches: | |
92 | x = revs or None |
|
76 | x = revs or None | |
@@ -129,10 +113,47 b' def addbranchrevs(lrepo, other, branches' | |||||
129 | return revs, revs[0] |
|
113 | return revs, revs[0] | |
130 |
|
114 | |||
131 |
|
115 | |||
132 | schemes = { |
|
116 | def _isfile(path): | |
|
117 | try: | |||
|
118 | # we use os.stat() directly here instead of os.path.isfile() | |||
|
119 | # because the latter started returning `False` on invalid path | |||
|
120 | # exceptions starting in 3.8 and we care about handling | |||
|
121 | # invalid paths specially here. | |||
|
122 | st = os.stat(path) | |||
|
123 | except ValueError as e: | |||
|
124 | msg = stringutil.forcebytestr(e) | |||
|
125 | raise error.Abort(_(b'invalid path %s: %s') % (path, msg)) | |||
|
126 | except OSError: | |||
|
127 | return False | |||
|
128 | else: | |||
|
129 | return stat.S_ISREG(st.st_mode) | |||
|
130 | ||||
|
131 | ||||
|
132 | class LocalFactory: | |||
|
133 | """thin wrapper to dispatch between localrepo and bundle repo""" | |||
|
134 | ||||
|
135 | @staticmethod | |||
|
136 | def islocal(path: bytes) -> bool: | |||
|
137 | path = util.expandpath(urlutil.urllocalpath(path)) | |||
|
138 | return not _isfile(path) | |||
|
139 | ||||
|
140 | @staticmethod | |||
|
141 | def instance(ui, path, *args, **kwargs): | |||
|
142 | path = util.expandpath(urlutil.urllocalpath(path)) | |||
|
143 | if _isfile(path): | |||
|
144 | cls = bundlerepo | |||
|
145 | else: | |||
|
146 | cls = localrepo | |||
|
147 | return cls.instance(ui, path, *args, **kwargs) | |||
|
148 | ||||
|
149 | ||||
|
150 | repo_schemes = { | |||
133 | b'bundle': bundlerepo, |
|
151 | b'bundle': bundlerepo, | |
134 | b'union': unionrepo, |
|
152 | b'union': unionrepo, | |
135 |
b'file': |
|
153 | b'file': LocalFactory, | |
|
154 | } | |||
|
155 | ||||
|
156 | peer_schemes = { | |||
136 | b'http': httppeer, |
|
157 | b'http': httppeer, | |
137 | b'https': httppeer, |
|
158 | b'https': httppeer, | |
138 | b'ssh': sshpeer, |
|
159 | b'ssh': sshpeer, | |
@@ -140,27 +161,23 b' schemes = {' | |||||
140 | } |
|
161 | } | |
141 |
|
162 | |||
142 |
|
163 | |||
143 | def _peerlookup(path): |
|
|||
144 | u = urlutil.url(path) |
|
|||
145 | scheme = u.scheme or b'file' |
|
|||
146 | thing = schemes.get(scheme) or schemes[b'file'] |
|
|||
147 | try: |
|
|||
148 | return thing(path) |
|
|||
149 | except TypeError: |
|
|||
150 | # we can't test callable(thing) because 'thing' can be an unloaded |
|
|||
151 | # module that implements __call__ |
|
|||
152 | if not util.safehasattr(thing, b'instance'): |
|
|||
153 | raise |
|
|||
154 | return thing |
|
|||
155 |
|
||||
156 |
|
||||
157 | def islocal(repo): |
|
164 | def islocal(repo): | |
158 | '''return true if repo (or path pointing to repo) is local''' |
|
165 | '''return true if repo (or path pointing to repo) is local''' | |
159 | if isinstance(repo, bytes): |
|
166 | if isinstance(repo, bytes): | |
160 | try: |
|
167 | u = urlutil.url(repo) | |
161 | return _peerlookup(repo).islocal(repo) |
|
168 | scheme = u.scheme or b'file' | |
162 | except AttributeError: |
|
169 | if scheme in peer_schemes: | |
163 | return False |
|
170 | cls = peer_schemes[scheme] | |
|
171 | cls.make_peer # make sure we load the module | |||
|
172 | elif scheme in repo_schemes: | |||
|
173 | cls = repo_schemes[scheme] | |||
|
174 | cls.instance # make sure we load the module | |||
|
175 | else: | |||
|
176 | cls = LocalFactory | |||
|
177 | if util.safehasattr(cls, 'islocal'): | |||
|
178 | return cls.islocal(repo) # pytype: disable=module-attr | |||
|
179 | return False | |||
|
180 | repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4") | |||
164 | return repo.local() |
|
181 | return repo.local() | |
165 |
|
182 | |||
166 |
|
183 | |||
@@ -177,13 +194,7 b' def openpath(ui, path, sendaccept=True):' | |||||
177 | wirepeersetupfuncs = [] |
|
194 | wirepeersetupfuncs = [] | |
178 |
|
195 | |||
179 |
|
196 | |||
180 | def _peerorrepo( |
|
197 | def _setup_repo_or_peer(ui, obj, presetupfuncs=None): | |
181 | ui, path, create=False, presetupfuncs=None, intents=None, createopts=None |
|
|||
182 | ): |
|
|||
183 | """return a repository object for the specified path""" |
|
|||
184 | obj = _peerlookup(path).instance( |
|
|||
185 | ui, path, create, intents=intents, createopts=createopts |
|
|||
186 | ) |
|
|||
187 | ui = getattr(obj, "ui", ui) |
|
198 | ui = getattr(obj, "ui", ui) | |
188 | for f in presetupfuncs or []: |
|
199 | for f in presetupfuncs or []: | |
189 | f(ui, obj) |
|
200 | f(ui, obj) | |
@@ -195,14 +206,12 b' def _peerorrepo(' | |||||
195 | if hook: |
|
206 | if hook: | |
196 | with util.timedcm('reposetup %r', name) as stats: |
|
207 | with util.timedcm('reposetup %r', name) as stats: | |
197 | hook(ui, obj) |
|
208 | hook(ui, obj) | |
198 | ui.log( |
|
209 | msg = b' > reposetup for %s took %s\n' | |
199 |
|
|
210 | ui.log(b'extension', msg, name, stats) | |
200 | ) |
|
|||
201 | ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats) |
|
211 | ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats) | |
202 | if not obj.local(): |
|
212 | if not obj.local(): | |
203 | for f in wirepeersetupfuncs: |
|
213 | for f in wirepeersetupfuncs: | |
204 | f(ui, obj) |
|
214 | f(ui, obj) | |
205 | return obj |
|
|||
206 |
|
215 | |||
207 |
|
216 | |||
208 | def repository( |
|
217 | def repository( | |
@@ -214,28 +223,59 b' def repository(' | |||||
214 | createopts=None, |
|
223 | createopts=None, | |
215 | ): |
|
224 | ): | |
216 | """return a repository object for the specified path""" |
|
225 | """return a repository object for the specified path""" | |
217 | peer = _peerorrepo( |
|
226 | scheme = urlutil.url(path).scheme | |
|
227 | if scheme is None: | |||
|
228 | scheme = b'file' | |||
|
229 | cls = repo_schemes.get(scheme) | |||
|
230 | if cls is None: | |||
|
231 | if scheme in peer_schemes: | |||
|
232 | raise error.Abort(_(b"repository '%s' is not local") % path) | |||
|
233 | cls = LocalFactory | |||
|
234 | repo = cls.instance( | |||
218 | ui, |
|
235 | ui, | |
219 | path, |
|
236 | path, | |
220 | create, |
|
237 | create, | |
221 | presetupfuncs=presetupfuncs, |
|
|||
222 | intents=intents, |
|
238 | intents=intents, | |
223 | createopts=createopts, |
|
239 | createopts=createopts, | |
224 | ) |
|
240 | ) | |
225 | repo = peer.local() |
|
241 | _setup_repo_or_peer(ui, repo, presetupfuncs=presetupfuncs) | |
226 | if not repo: |
|
|||
227 | raise error.Abort( |
|
|||
228 | _(b"repository '%s' is not local") % (path or peer.url()) |
|
|||
229 | ) |
|
|||
230 | return repo.filtered(b'visible') |
|
242 | return repo.filtered(b'visible') | |
231 |
|
243 | |||
232 |
|
244 | |||
233 | def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None): |
|
245 | def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None): | |
234 | '''return a repository peer for the specified path''' |
|
246 | '''return a repository peer for the specified path''' | |
|
247 | ui = getattr(uiorrepo, 'ui', uiorrepo) | |||
235 | rui = remoteui(uiorrepo, opts) |
|
248 | rui = remoteui(uiorrepo, opts) | |
236 | return _peerorrepo( |
|
249 | if util.safehasattr(path, 'url'): | |
237 | rui, path, create, intents=intents, createopts=createopts |
|
250 | # this is already a urlutil.path object | |
238 | ).peer() |
|
251 | peer_path = path | |
|
252 | else: | |||
|
253 | peer_path = urlutil.path(ui, None, rawloc=path, validate_path=False) | |||
|
254 | scheme = peer_path.url.scheme # pytype: disable=attribute-error | |||
|
255 | if scheme in peer_schemes: | |||
|
256 | cls = peer_schemes[scheme] | |||
|
257 | peer = cls.make_peer( | |||
|
258 | rui, | |||
|
259 | peer_path, | |||
|
260 | create, | |||
|
261 | intents=intents, | |||
|
262 | createopts=createopts, | |||
|
263 | ) | |||
|
264 | _setup_repo_or_peer(rui, peer) | |||
|
265 | else: | |||
|
266 | # this is a repository | |||
|
267 | repo_path = peer_path.loc # pytype: disable=attribute-error | |||
|
268 | if not repo_path: | |||
|
269 | repo_path = peer_path.rawloc # pytype: disable=attribute-error | |||
|
270 | repo = repository( | |||
|
271 | rui, | |||
|
272 | repo_path, | |||
|
273 | create, | |||
|
274 | intents=intents, | |||
|
275 | createopts=createopts, | |||
|
276 | ) | |||
|
277 | peer = repo.peer(path=peer_path) | |||
|
278 | return peer | |||
239 |
|
279 | |||
240 |
|
280 | |||
241 | def defaultdest(source): |
|
281 | def defaultdest(source): | |
@@ -290,17 +330,23 b' def share(' | |||||
290 | ): |
|
330 | ): | |
291 | '''create a shared repository''' |
|
331 | '''create a shared repository''' | |
292 |
|
332 | |||
293 | if not islocal(source): |
|
333 | not_local_msg = _(b'can only share local repositories') | |
294 | raise error.Abort(_(b'can only share local repositories')) |
|
334 | if util.safehasattr(source, 'local'): | |
|
335 | if source.local() is None: | |||
|
336 | raise error.Abort(not_local_msg) | |||
|
337 | elif not islocal(source): | |||
|
338 | # XXX why are we getting bytes here ? | |||
|
339 | raise error.Abort(not_local_msg) | |||
295 |
|
340 | |||
296 | if not dest: |
|
341 | if not dest: | |
297 | dest = defaultdest(source) |
|
342 | dest = defaultdest(source) | |
298 | else: |
|
343 | else: | |
299 |
dest = urlutil.get_clone_path(ui, dest) |
|
344 | dest = urlutil.get_clone_path_obj(ui, dest).loc | |
300 |
|
345 | |||
301 | if isinstance(source, bytes): |
|
346 | if isinstance(source, bytes): | |
302 |
|
|
347 | source_path = urlutil.get_clone_path_obj(ui, source) | |
303 | srcrepo = repository(ui, source) |
|
348 | srcrepo = repository(ui, source_path.loc) | |
|
349 | branches = (source_path.branch, []) | |||
304 | rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None) |
|
350 | rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None) | |
305 | else: |
|
351 | else: | |
306 | srcrepo = source.local() |
|
352 | srcrepo = source.local() | |
@@ -661,12 +707,23 b' def clone(' | |||||
661 | """ |
|
707 | """ | |
662 |
|
708 | |||
663 | if isinstance(source, bytes): |
|
709 | if isinstance(source, bytes): | |
664 |
src = urlutil.get_clone_path(ui, source |
|
710 | src_path = urlutil.get_clone_path_obj(ui, source) | |
665 | origsource, source, branches = src |
|
711 | if src_path is None: | |
666 |
srcpeer = peer(ui, peeropts, |
|
712 | srcpeer = peer(ui, peeropts, b'') | |
|
713 | origsource = source = b'' | |||
|
714 | branches = (None, branch or []) | |||
|
715 | else: | |||
|
716 | srcpeer = peer(ui, peeropts, src_path) | |||
|
717 | origsource = src_path.rawloc | |||
|
718 | branches = (src_path.branch, branch or []) | |||
|
719 | source = src_path.loc | |||
667 | else: |
|
720 | else: | |
668 | srcpeer = source.peer() # in case we were called with a localrepo |
|
721 | if util.safehasattr(source, 'peer'): | |
|
722 | srcpeer = source.peer() # in case we were called with a localrepo | |||
|
723 | else: | |||
|
724 | srcpeer = source | |||
669 | branches = (None, branch or []) |
|
725 | branches = (None, branch or []) | |
|
726 | # XXX path: simply use the peer `path` object when this become available | |||
670 | origsource = source = srcpeer.url() |
|
727 | origsource = source = srcpeer.url() | |
671 | srclock = destlock = destwlock = cleandir = None |
|
728 | srclock = destlock = destwlock = cleandir = None | |
672 | destpeer = None |
|
729 | destpeer = None | |
@@ -678,7 +735,11 b' def clone(' | |||||
678 | if dest: |
|
735 | if dest: | |
679 | ui.status(_(b"destination directory: %s\n") % dest) |
|
736 | ui.status(_(b"destination directory: %s\n") % dest) | |
680 | else: |
|
737 | else: | |
681 |
dest = urlutil.get_clone_path(ui, dest) |
|
738 | dest_path = urlutil.get_clone_path_obj(ui, dest) | |
|
739 | if dest_path is not None: | |||
|
740 | dest = dest_path.rawloc | |||
|
741 | else: | |||
|
742 | dest = b'' | |||
682 |
|
743 | |||
683 | dest = urlutil.urllocalpath(dest) |
|
744 | dest = urlutil.urllocalpath(dest) | |
684 | source = urlutil.urllocalpath(source) |
|
745 | source = urlutil.urllocalpath(source) | |
@@ -1271,23 +1332,28 b' def _incoming(' | |||||
1271 | msg %= len(srcs) |
|
1332 | msg %= len(srcs) | |
1272 | raise error.Abort(msg) |
|
1333 | raise error.Abort(msg) | |
1273 | path = srcs[0] |
|
1334 | path = srcs[0] | |
1274 | source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch')) |
|
1335 | if subpath is None: | |
1275 | if subpath is not None: |
|
1336 | peer_path = path | |
|
1337 | url = path.loc | |||
|
1338 | else: | |||
|
1339 | # XXX path: we are losing the `path` object here. Keeping it would be | |||
|
1340 | # valuable. For example as a "variant" as we do for pushes. | |||
1276 | subpath = urlutil.url(subpath) |
|
1341 | subpath = urlutil.url(subpath) | |
1277 | if subpath.isabs(): |
|
1342 | if subpath.isabs(): | |
1278 |
|
|
1343 | peer_path = url = bytes(subpath) | |
1279 | else: |
|
1344 | else: | |
1280 |
p = urlutil.url( |
|
1345 | p = urlutil.url(path.loc) | |
1281 | if p.islocal(): |
|
1346 | if p.islocal(): | |
1282 | normpath = os.path.normpath |
|
1347 | normpath = os.path.normpath | |
1283 | else: |
|
1348 | else: | |
1284 | normpath = posixpath.normpath |
|
1349 | normpath = posixpath.normpath | |
1285 | p.path = normpath(b'%s/%s' % (p.path, subpath)) |
|
1350 | p.path = normpath(b'%s/%s' % (p.path, subpath)) | |
1286 |
|
|
1351 | peer_path = url = bytes(p) | |
1287 |
other = peer(repo, opts, |
|
1352 | other = peer(repo, opts, peer_path) | |
1288 | cleanupfn = other.close |
|
1353 | cleanupfn = other.close | |
1289 | try: |
|
1354 | try: | |
1290 |
ui.status(_(b'comparing with %s\n') % urlutil.hidepassword( |
|
1355 | ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(url)) | |
|
1356 | branches = (path.branch, opts.get(b'branch', [])) | |||
1291 | revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev')) |
|
1357 | revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev')) | |
1292 |
|
1358 | |||
1293 | if revs: |
|
1359 | if revs: | |
@@ -1346,7 +1412,7 b' def _outgoing(ui, repo, dests, opts, sub' | |||||
1346 | out = set() |
|
1412 | out = set() | |
1347 | others = [] |
|
1413 | others = [] | |
1348 | for path in urlutil.get_push_paths(repo, ui, dests): |
|
1414 | for path in urlutil.get_push_paths(repo, ui, dests): | |
1349 |
dest = path. |
|
1415 | dest = path.loc | |
1350 | if subpath is not None: |
|
1416 | if subpath is not None: | |
1351 | subpath = urlutil.url(subpath) |
|
1417 | subpath = urlutil.url(subpath) | |
1352 | if subpath.isabs(): |
|
1418 | if subpath.isabs(): |
@@ -230,8 +230,9 b' class requestcontext:' | |||||
230 |
|
230 | |||
231 | def sendtemplate(self, name, **kwargs): |
|
231 | def sendtemplate(self, name, **kwargs): | |
232 | """Helper function to send a response generated from a template.""" |
|
232 | """Helper function to send a response generated from a template.""" | |
233 | kwargs = pycompat.byteskwargs(kwargs) |
|
233 | if self.req.method != b'HEAD': | |
234 | self.res.setbodygen(self.tmpl.generate(name, kwargs)) |
|
234 | kwargs = pycompat.byteskwargs(kwargs) | |
|
235 | self.res.setbodygen(self.tmpl.generate(name, kwargs)) | |||
235 | return self.res.sendresponse() |
|
236 | return self.res.sendresponse() | |
236 |
|
237 | |||
237 |
|
238 |
@@ -485,6 +485,7 b' class wsgiresponse:' | |||||
485 | self._bodybytes is None |
|
485 | self._bodybytes is None | |
486 | and self._bodygen is None |
|
486 | and self._bodygen is None | |
487 | and not self._bodywillwrite |
|
487 | and not self._bodywillwrite | |
|
488 | and self._req.method != b'HEAD' | |||
488 | ): |
|
489 | ): | |
489 | raise error.ProgrammingError(b'response body not defined') |
|
490 | raise error.ProgrammingError(b'response body not defined') | |
490 |
|
491 | |||
@@ -594,6 +595,8 b' class wsgiresponse:' | |||||
594 | yield chunk |
|
595 | yield chunk | |
595 | elif self._bodywillwrite: |
|
596 | elif self._bodywillwrite: | |
596 | self._bodywritefn = write |
|
597 | self._bodywritefn = write | |
|
598 | elif self._req.method == b'HEAD': | |||
|
599 | pass | |||
597 | else: |
|
600 | else: | |
598 | error.ProgrammingError(b'do not know how to send body') |
|
601 | error.ProgrammingError(b'do not know how to send body') | |
599 |
|
602 |
@@ -151,6 +151,9 b' class _httprequesthandler(httpservermod.' | |||||
151 | def do_GET(self): |
|
151 | def do_GET(self): | |
152 | self.do_POST() |
|
152 | self.do_POST() | |
153 |
|
153 | |||
|
154 | def do_HEAD(self): | |||
|
155 | self.do_POST() | |||
|
156 | ||||
154 | def do_hgweb(self): |
|
157 | def do_hgweb(self): | |
155 | self.sent_headers = False |
|
158 | self.sent_headers = False | |
156 | path, query = _splitURI(self.path) |
|
159 | path, query = _splitURI(self.path) | |
@@ -246,7 +249,11 b' class _httprequesthandler(httpservermod.' | |||||
246 | self.send_header(*h) |
|
249 | self.send_header(*h) | |
247 | if h[0].lower() == 'content-length': |
|
250 | if h[0].lower() == 'content-length': | |
248 | self.length = int(h[1]) |
|
251 | self.length = int(h[1]) | |
249 | if self.length is None and saved_status[0] != common.HTTP_NOT_MODIFIED: |
|
252 | if ( | |
|
253 | self.length is None | |||
|
254 | and saved_status[0] != common.HTTP_NOT_MODIFIED | |||
|
255 | and self.command != 'HEAD' | |||
|
256 | ): | |||
250 | self._chunked = ( |
|
257 | self._chunked = ( | |
251 | not self.close_connection and self.request_version == 'HTTP/1.1' |
|
258 | not self.close_connection and self.request_version == 'HTTP/1.1' | |
252 | ) |
|
259 | ) |
@@ -1299,6 +1299,9 b' def archive(web):' | |||||
1299 | b'sendresponse() should not emit data if writing later' |
|
1299 | b'sendresponse() should not emit data if writing later' | |
1300 | ) |
|
1300 | ) | |
1301 |
|
1301 | |||
|
1302 | if web.req.method == b'HEAD': | |||
|
1303 | return [] | |||
|
1304 | ||||
1302 | bodyfh = web.res.getbodyfile() |
|
1305 | bodyfh = web.res.getbodyfile() | |
1303 |
|
1306 | |||
1304 | archival.archive( |
|
1307 | archival.archive( |
@@ -382,8 +382,7 b' def parsev1commandresponse(ui, baseurl, ' | |||||
382 |
|
382 | |||
383 | class httppeer(wireprotov1peer.wirepeer): |
|
383 | class httppeer(wireprotov1peer.wirepeer): | |
384 | def __init__(self, ui, path, url, opener, requestbuilder, caps): |
|
384 | def __init__(self, ui, path, url, opener, requestbuilder, caps): | |
385 | self.ui = ui |
|
385 | super().__init__(ui, path=path) | |
386 | self._path = path |
|
|||
387 | self._url = url |
|
386 | self._url = url | |
388 | self._caps = caps |
|
387 | self._caps = caps | |
389 | self.limitedarguments = caps is not None and b'httppostargs' not in caps |
|
388 | self.limitedarguments = caps is not None and b'httppostargs' not in caps | |
@@ -398,14 +397,11 b' class httppeer(wireprotov1peer.wirepeer)' | |||||
398 | # Begin of ipeerconnection interface. |
|
397 | # Begin of ipeerconnection interface. | |
399 |
|
398 | |||
400 | def url(self): |
|
399 | def url(self): | |
401 |
return self. |
|
400 | return self.path.loc | |
402 |
|
401 | |||
403 | def local(self): |
|
402 | def local(self): | |
404 | return None |
|
403 | return None | |
405 |
|
404 | |||
406 | def peer(self): |
|
|||
407 | return self |
|
|||
408 |
|
||||
409 | def canpush(self): |
|
405 | def canpush(self): | |
410 | return True |
|
406 | return True | |
411 |
|
407 | |||
@@ -605,14 +601,13 b' def makepeer(ui, path, opener=None, requ' | |||||
605 | ``requestbuilder`` is the type used for constructing HTTP requests. |
|
601 | ``requestbuilder`` is the type used for constructing HTTP requests. | |
606 | It exists as an argument so extensions can override the default. |
|
602 | It exists as an argument so extensions can override the default. | |
607 | """ |
|
603 | """ | |
608 | u = urlutil.url(path) |
|
604 | if path.url.query or path.url.fragment: | |
609 | if u.query or u.fragment: |
|
605 | msg = _(b'unsupported URL component: "%s"') | |
610 | raise error.Abort( |
|
606 | msg %= path.url.query or path.url.fragment | |
611 | _(b'unsupported URL component: "%s"') % (u.query or u.fragment) |
|
607 | raise error.Abort(msg) | |
612 | ) |
|
|||
613 |
|
608 | |||
614 | # urllib cannot handle URLs with embedded user or passwd. |
|
609 | # urllib cannot handle URLs with embedded user or passwd. | |
615 | url, authinfo = u.authinfo() |
|
610 | url, authinfo = path.url.authinfo() | |
616 | ui.debug(b'using %s\n' % url) |
|
611 | ui.debug(b'using %s\n' % url) | |
617 |
|
612 | |||
618 | opener = opener or urlmod.opener(ui, authinfo) |
|
613 | opener = opener or urlmod.opener(ui, authinfo) | |
@@ -624,11 +619,11 b' def makepeer(ui, path, opener=None, requ' | |||||
624 | ) |
|
619 | ) | |
625 |
|
620 | |||
626 |
|
621 | |||
627 |
def |
|
622 | def make_peer(ui, path, create, intents=None, createopts=None): | |
628 | if create: |
|
623 | if create: | |
629 | raise error.Abort(_(b'cannot create new http repository')) |
|
624 | raise error.Abort(_(b'cannot create new http repository')) | |
630 | try: |
|
625 | try: | |
631 |
if path. |
|
626 | if path.url.scheme == b'https' and not urlmod.has_https: | |
632 | raise error.Abort( |
|
627 | raise error.Abort( | |
633 | _(b'Python support for SSL and HTTPS is not installed') |
|
628 | _(b'Python support for SSL and HTTPS is not installed') | |
634 | ) |
|
629 | ) | |
@@ -638,7 +633,7 b' def instance(ui, path, create, intents=N' | |||||
638 | return inst |
|
633 | return inst | |
639 | except error.RepoError as httpexception: |
|
634 | except error.RepoError as httpexception: | |
640 | try: |
|
635 | try: | |
641 |
r = statichttprepo. |
|
636 | r = statichttprepo.make_peer(ui, b"static-" + path.loc, create) | |
642 | ui.note(_(b'(falling back to static-http)\n')) |
|
637 | ui.note(_(b'(falling back to static-http)\n')) | |
643 | return r |
|
638 | return r | |
644 | except error.RepoError: |
|
639 | except error.RepoError: |
@@ -12,6 +12,7 b' class idirstate(interfaceutil.Interface)' | |||||
12 | sparsematchfn, |
|
12 | sparsematchfn, | |
13 | nodeconstants, |
|
13 | nodeconstants, | |
14 | use_dirstate_v2, |
|
14 | use_dirstate_v2, | |
|
15 | use_tracked_hint=False, | |||
15 | ): |
|
16 | ): | |
16 | """Create a new dirstate object. |
|
17 | """Create a new dirstate object. | |
17 |
|
18 | |||
@@ -23,6 +24,15 b' class idirstate(interfaceutil.Interface)' | |||||
23 | # TODO: all these private methods and attributes should be made |
|
24 | # TODO: all these private methods and attributes should be made | |
24 | # public or removed from the interface. |
|
25 | # public or removed from the interface. | |
25 | _ignore = interfaceutil.Attribute("""Matcher for ignored files.""") |
|
26 | _ignore = interfaceutil.Attribute("""Matcher for ignored files.""") | |
|
27 | is_changing_any = interfaceutil.Attribute( | |||
|
28 | """True if any changes in progress.""" | |||
|
29 | ) | |||
|
30 | is_changing_parents = interfaceutil.Attribute( | |||
|
31 | """True if parents changes in progress.""" | |||
|
32 | ) | |||
|
33 | is_changing_files = interfaceutil.Attribute( | |||
|
34 | """True if file tracking changes in progress.""" | |||
|
35 | ) | |||
26 |
|
36 | |||
27 | def _ignorefiles(): |
|
37 | def _ignorefiles(): | |
28 | """Return a list of files containing patterns to ignore.""" |
|
38 | """Return a list of files containing patterns to ignore.""" | |
@@ -34,7 +44,7 b' class idirstate(interfaceutil.Interface)' | |||||
34 | _checkexec = interfaceutil.Attribute("""Callable for checking exec bits.""") |
|
44 | _checkexec = interfaceutil.Attribute("""Callable for checking exec bits.""") | |
35 |
|
45 | |||
36 | @contextlib.contextmanager |
|
46 | @contextlib.contextmanager | |
37 |
def |
|
47 | def changing_parents(repo): | |
38 | """Context manager for handling dirstate parents. |
|
48 | """Context manager for handling dirstate parents. | |
39 |
|
49 | |||
40 | If an exception occurs in the scope of the context manager, |
|
50 | If an exception occurs in the scope of the context manager, | |
@@ -42,16 +52,26 b' class idirstate(interfaceutil.Interface)' | |||||
42 | released. |
|
52 | released. | |
43 | """ |
|
53 | """ | |
44 |
|
54 | |||
45 | def pendingparentchange(): |
|
55 | @contextlib.contextmanager | |
46 | """Returns true if the dirstate is in the middle of a set of changes |
|
56 | def changing_files(repo): | |
47 | that modify the dirstate parent. |
|
57 | """Context manager for handling dirstate files. | |
|
58 | ||||
|
59 | If an exception occurs in the scope of the context manager, | |||
|
60 | the incoherent dirstate won't be written when wlock is | |||
|
61 | released. | |||
48 | """ |
|
62 | """ | |
49 |
|
63 | |||
50 | def hasdir(d): |
|
64 | def hasdir(d): | |
51 | pass |
|
65 | pass | |
52 |
|
66 | |||
53 | def flagfunc(buildfallback): |
|
67 | def flagfunc(buildfallback): | |
54 | pass |
|
68 | """build a callable that returns flags associated with a filename | |
|
69 | ||||
|
70 | The information is extracted from three possible layers: | |||
|
71 | 1. the file system if it supports the information | |||
|
72 | 2. the "fallback" information stored in the dirstate if any | |||
|
73 | 3. a more expensive mechanism inferring the flags from the parents. | |||
|
74 | """ | |||
55 |
|
75 | |||
56 | def getcwd(): |
|
76 | def getcwd(): | |
57 | """Return the path from which a canonical path is calculated. |
|
77 | """Return the path from which a canonical path is calculated. | |
@@ -61,12 +81,12 b' class idirstate(interfaceutil.Interface)' | |||||
61 | used to get real file paths. Use vfs functions instead. |
|
81 | used to get real file paths. Use vfs functions instead. | |
62 | """ |
|
82 | """ | |
63 |
|
83 | |||
|
84 | def pathto(f, cwd=None): | |||
|
85 | pass | |||
|
86 | ||||
64 | def get_entry(path): |
|
87 | def get_entry(path): | |
65 | """return a DirstateItem for the associated path""" |
|
88 | """return a DirstateItem for the associated path""" | |
66 |
|
89 | |||
67 | def pathto(f, cwd=None): |
|
|||
68 | pass |
|
|||
69 |
|
||||
70 | def __contains__(key): |
|
90 | def __contains__(key): | |
71 | """Check if bytestring `key` is known to the dirstate.""" |
|
91 | """Check if bytestring `key` is known to the dirstate.""" | |
72 |
|
92 | |||
@@ -96,7 +116,7 b' class idirstate(interfaceutil.Interface)' | |||||
96 | def setparents(p1, p2=None): |
|
116 | def setparents(p1, p2=None): | |
97 | """Set dirstate parents to p1 and p2. |
|
117 | """Set dirstate parents to p1 and p2. | |
98 |
|
118 | |||
99 |
When moving from two parents to one, |
|
119 | When moving from two parents to one, "merged" entries a | |
100 | adjusted to normal and previous copy records discarded and |
|
120 | adjusted to normal and previous copy records discarded and | |
101 | returned by the call. |
|
121 | returned by the call. | |
102 |
|
122 | |||
@@ -147,7 +167,7 b' class idirstate(interfaceutil.Interface)' | |||||
147 | pass |
|
167 | pass | |
148 |
|
168 | |||
149 | def identity(): |
|
169 | def identity(): | |
150 | """Return identity of dirstate it to detect changing in storage |
|
170 | """Return identity of dirstate itself to detect changing in storage | |
151 |
|
171 | |||
152 | If identity of previous dirstate is equal to this, writing |
|
172 | If identity of previous dirstate is equal to this, writing | |
153 | changes based on the former dirstate out can keep consistency. |
|
173 | changes based on the former dirstate out can keep consistency. | |
@@ -200,11 +220,7 b' class idirstate(interfaceutil.Interface)' | |||||
200 | return files in the dirstate (in whatever state) filtered by match |
|
220 | return files in the dirstate (in whatever state) filtered by match | |
201 | """ |
|
221 | """ | |
202 |
|
222 | |||
203 | def savebackup(tr, backupname): |
|
223 | def verify(m1, m2, p1, narrow_matcher=None): | |
204 | '''Save current dirstate into backup file''' |
|
224 | """ | |
205 |
|
225 | check the dirstate contents against the parent manifest and yield errors | ||
206 | def restorebackup(tr, backupname): |
|
226 | """ | |
207 | '''Restore dirstate by backup file''' |
|
|||
208 |
|
||||
209 | def clearbackup(tr, backupname): |
|
|||
210 | '''Clear backup file''' |
|
@@ -103,6 +103,7 b' class ipeerconnection(interfaceutil.Inte' | |||||
103 | """ |
|
103 | """ | |
104 |
|
104 | |||
105 | ui = interfaceutil.Attribute("""ui.ui instance""") |
|
105 | ui = interfaceutil.Attribute("""ui.ui instance""") | |
|
106 | path = interfaceutil.Attribute("""a urlutil.path instance or None""") | |||
106 |
|
107 | |||
107 | def url(): |
|
108 | def url(): | |
108 | """Returns a URL string representing this peer. |
|
109 | """Returns a URL string representing this peer. | |
@@ -123,12 +124,6 b' class ipeerconnection(interfaceutil.Inte' | |||||
123 | can be used to interface with it. Otherwise returns ``None``. |
|
124 | can be used to interface with it. Otherwise returns ``None``. | |
124 | """ |
|
125 | """ | |
125 |
|
126 | |||
126 | def peer(): |
|
|||
127 | """Returns an object conforming to this interface. |
|
|||
128 |
|
||||
129 | Most implementations will ``return self``. |
|
|||
130 | """ |
|
|||
131 |
|
||||
132 | def canpush(): |
|
127 | def canpush(): | |
133 | """Returns a boolean indicating if this peer can be pushed to.""" |
|
128 | """Returns a boolean indicating if this peer can be pushed to.""" | |
134 |
|
129 | |||
@@ -393,6 +388,10 b' class peer:' | |||||
393 |
|
388 | |||
394 | limitedarguments = False |
|
389 | limitedarguments = False | |
395 |
|
390 | |||
|
391 | def __init__(self, ui, path=None): | |||
|
392 | self.ui = ui | |||
|
393 | self.path = path | |||
|
394 | ||||
396 | def capable(self, name): |
|
395 | def capable(self, name): | |
397 | caps = self.capabilities() |
|
396 | caps = self.capabilities() | |
398 | if name in caps: |
|
397 | if name in caps: | |
@@ -1613,7 +1612,7 b' class ilocalrepositorymain(interfaceutil' | |||||
1613 | def close(): |
|
1612 | def close(): | |
1614 | """Close the handle on this repository.""" |
|
1613 | """Close the handle on this repository.""" | |
1615 |
|
1614 | |||
1616 | def peer(): |
|
1615 | def peer(path=None): | |
1617 | """Obtain an object conforming to the ``peer`` interface.""" |
|
1616 | """Obtain an object conforming to the ``peer`` interface.""" | |
1618 |
|
1617 | |||
1619 | def unfiltered(): |
|
1618 | def unfiltered(): |
@@ -10,11 +10,16 b'' | |||||
10 | import functools |
|
10 | import functools | |
11 | import os |
|
11 | import os | |
12 | import random |
|
12 | import random | |
|
13 | import re | |||
13 | import sys |
|
14 | import sys | |
14 | import time |
|
15 | import time | |
15 | import weakref |
|
16 | import weakref | |
16 |
|
17 | |||
17 | from concurrent import futures |
|
18 | from concurrent import futures | |
|
19 | from typing import ( | |||
|
20 | Optional, | |||
|
21 | ) | |||
|
22 | ||||
18 | from .i18n import _ |
|
23 | from .i18n import _ | |
19 | from .node import ( |
|
24 | from .node import ( | |
20 | bin, |
|
25 | bin, | |
@@ -37,7 +42,6 b' from . import (' | |||||
37 | commit, |
|
42 | commit, | |
38 | context, |
|
43 | context, | |
39 | dirstate, |
|
44 | dirstate, | |
40 | dirstateguard, |
|
|||
41 | discovery, |
|
45 | discovery, | |
42 | encoding, |
|
46 | encoding, | |
43 | error, |
|
47 | error, | |
@@ -96,6 +100,8 b' release = lockmod.release' | |||||
96 | urlerr = util.urlerr |
|
100 | urlerr = util.urlerr | |
97 | urlreq = util.urlreq |
|
101 | urlreq = util.urlreq | |
98 |
|
102 | |||
|
103 | RE_SKIP_DIRSTATE_ROLLBACK = re.compile(b"^(dirstate|narrowspec.dirstate).*") | |||
|
104 | ||||
99 | # set of (path, vfs-location) tuples. vfs-location is: |
|
105 | # set of (path, vfs-location) tuples. vfs-location is: | |
100 | # - 'plain for vfs relative paths |
|
106 | # - 'plain for vfs relative paths | |
101 | # - '' for svfs relative paths |
|
107 | # - '' for svfs relative paths | |
@@ -299,13 +305,12 b' class localcommandexecutor:' | |||||
299 | class localpeer(repository.peer): |
|
305 | class localpeer(repository.peer): | |
300 | '''peer for a local repo; reflects only the most recent API''' |
|
306 | '''peer for a local repo; reflects only the most recent API''' | |
301 |
|
307 | |||
302 | def __init__(self, repo, caps=None): |
|
308 | def __init__(self, repo, caps=None, path=None): | |
303 | super(localpeer, self).__init__() |
|
309 | super(localpeer, self).__init__(repo.ui, path=path) | |
304 |
|
310 | |||
305 | if caps is None: |
|
311 | if caps is None: | |
306 | caps = moderncaps.copy() |
|
312 | caps = moderncaps.copy() | |
307 | self._repo = repo.filtered(b'served') |
|
313 | self._repo = repo.filtered(b'served') | |
308 | self.ui = repo.ui |
|
|||
309 |
|
314 | |||
310 | if repo._wanted_sidedata: |
|
315 | if repo._wanted_sidedata: | |
311 | formatted = bundle2.format_remote_wanted_sidedata(repo) |
|
316 | formatted = bundle2.format_remote_wanted_sidedata(repo) | |
@@ -321,9 +326,6 b' class localpeer(repository.peer):' | |||||
321 | def local(self): |
|
326 | def local(self): | |
322 | return self._repo |
|
327 | return self._repo | |
323 |
|
328 | |||
324 | def peer(self): |
|
|||
325 | return self |
|
|||
326 |
|
||||
327 | def canpush(self): |
|
329 | def canpush(self): | |
328 | return True |
|
330 | return True | |
329 |
|
331 | |||
@@ -451,8 +453,8 b' class locallegacypeer(localpeer):' | |||||
451 | """peer extension which implements legacy methods too; used for tests with |
|
453 | """peer extension which implements legacy methods too; used for tests with | |
452 | restricted capabilities""" |
|
454 | restricted capabilities""" | |
453 |
|
455 | |||
454 | def __init__(self, repo): |
|
456 | def __init__(self, repo, path=None): | |
455 | super(locallegacypeer, self).__init__(repo, caps=legacycaps) |
|
457 | super(locallegacypeer, self).__init__(repo, caps=legacycaps, path=path) | |
456 |
|
458 | |||
457 | # Begin of baselegacywirecommands interface. |
|
459 | # Begin of baselegacywirecommands interface. | |
458 |
|
460 | |||
@@ -526,7 +528,7 b' def _readrequires(vfs, allowmissing):' | |||||
526 | return set(read(b'requires').splitlines()) |
|
528 | return set(read(b'requires').splitlines()) | |
527 |
|
529 | |||
528 |
|
530 | |||
529 | def makelocalrepository(baseui, path, intents=None): |
|
531 | def makelocalrepository(baseui, path: bytes, intents=None): | |
530 | """Create a local repository object. |
|
532 | """Create a local repository object. | |
531 |
|
533 | |||
532 | Given arguments needed to construct a local repository, this function |
|
534 | Given arguments needed to construct a local repository, this function | |
@@ -612,7 +614,6 b' def makelocalrepository(baseui, path, in' | |||||
612 | # to be reshared |
|
614 | # to be reshared | |
613 | hint = _(b"see `hg help config.format.use-share-safe` for more information") |
|
615 | hint = _(b"see `hg help config.format.use-share-safe` for more information") | |
614 | if requirementsmod.SHARESAFE_REQUIREMENT in requirements: |
|
616 | if requirementsmod.SHARESAFE_REQUIREMENT in requirements: | |
615 |
|
||||
616 | if ( |
|
617 | if ( | |
617 | shared |
|
618 | shared | |
618 | and requirementsmod.SHARESAFE_REQUIREMENT |
|
619 | and requirementsmod.SHARESAFE_REQUIREMENT | |
@@ -845,7 +846,13 b' def makelocalrepository(baseui, path, in' | |||||
845 | ) |
|
846 | ) | |
846 |
|
847 | |||
847 |
|
848 | |||
848 | def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None): |
|
849 | def loadhgrc( | |
|
850 | ui, | |||
|
851 | wdirvfs: vfsmod.vfs, | |||
|
852 | hgvfs: vfsmod.vfs, | |||
|
853 | requirements, | |||
|
854 | sharedvfs: Optional[vfsmod.vfs] = None, | |||
|
855 | ): | |||
849 | """Load hgrc files/content into a ui instance. |
|
856 | """Load hgrc files/content into a ui instance. | |
850 |
|
857 | |||
851 | This is called during repository opening to load any additional |
|
858 | This is called during repository opening to load any additional | |
@@ -1058,6 +1065,8 b' def resolverevlogstorevfsoptions(ui, req' | |||||
1058 | options[b'revlogv2'] = True |
|
1065 | options[b'revlogv2'] = True | |
1059 | if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements: |
|
1066 | if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements: | |
1060 | options[b'changelogv2'] = True |
|
1067 | options[b'changelogv2'] = True | |
|
1068 | cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank') | |||
|
1069 | options[b'changelogv2.compute-rank'] = cmp_rank | |||
1061 |
|
1070 | |||
1062 | if requirementsmod.GENERALDELTA_REQUIREMENT in requirements: |
|
1071 | if requirementsmod.GENERALDELTA_REQUIREMENT in requirements: | |
1063 | options[b'generaldelta'] = True |
|
1072 | options[b'generaldelta'] = True | |
@@ -1071,6 +1080,11 b' def resolverevlogstorevfsoptions(ui, req' | |||||
1071 | b'storage', b'revlog.optimize-delta-parent-choice' |
|
1080 | b'storage', b'revlog.optimize-delta-parent-choice' | |
1072 | ) |
|
1081 | ) | |
1073 | options[b'deltabothparents'] = deltabothparents |
|
1082 | options[b'deltabothparents'] = deltabothparents | |
|
1083 | dps_cgds = ui.configint( | |||
|
1084 | b'storage', | |||
|
1085 | b'revlog.delta-parent-search.candidate-group-chunk-size', | |||
|
1086 | ) | |||
|
1087 | options[b'delta-parent-search.candidate-group-chunk-size'] = dps_cgds | |||
1074 | options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta') |
|
1088 | options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta') | |
1075 |
|
1089 | |||
1076 | issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming') |
|
1090 | issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming') | |
@@ -1311,8 +1325,6 b' class localrepository:' | |||||
1311 | # XXX cache is a complicatged business someone |
|
1325 | # XXX cache is a complicatged business someone | |
1312 | # should investigate this in depth at some point |
|
1326 | # should investigate this in depth at some point | |
1313 | b'cache/', |
|
1327 | b'cache/', | |
1314 | # XXX shouldn't be dirstate covered by the wlock? |
|
|||
1315 | b'dirstate', |
|
|||
1316 | # XXX bisect was still a bit too messy at the time |
|
1328 | # XXX bisect was still a bit too messy at the time | |
1317 | # this changeset was introduced. Someone should fix |
|
1329 | # this changeset was introduced. Someone should fix | |
1318 | # the remainig bit and drop this line |
|
1330 | # the remainig bit and drop this line | |
@@ -1323,15 +1335,15 b' class localrepository:' | |||||
1323 | self, |
|
1335 | self, | |
1324 | baseui, |
|
1336 | baseui, | |
1325 | ui, |
|
1337 | ui, | |
1326 | origroot, |
|
1338 | origroot: bytes, | |
1327 | wdirvfs, |
|
1339 | wdirvfs: vfsmod.vfs, | |
1328 | hgvfs, |
|
1340 | hgvfs: vfsmod.vfs, | |
1329 | requirements, |
|
1341 | requirements, | |
1330 | supportedrequirements, |
|
1342 | supportedrequirements, | |
1331 | sharedpath, |
|
1343 | sharedpath: bytes, | |
1332 | store, |
|
1344 | store, | |
1333 | cachevfs, |
|
1345 | cachevfs: vfsmod.vfs, | |
1334 | wcachevfs, |
|
1346 | wcachevfs: vfsmod.vfs, | |
1335 | features, |
|
1347 | features, | |
1336 | intents=None, |
|
1348 | intents=None, | |
1337 | ): |
|
1349 | ): | |
@@ -1453,6 +1465,7 b' class localrepository:' | |||||
1453 | # - bookmark changes |
|
1465 | # - bookmark changes | |
1454 | self.filteredrevcache = {} |
|
1466 | self.filteredrevcache = {} | |
1455 |
|
1467 | |||
|
1468 | self._dirstate = None | |||
1456 | # post-dirstate-status hooks |
|
1469 | # post-dirstate-status hooks | |
1457 | self._postdsstatus = [] |
|
1470 | self._postdsstatus = [] | |
1458 |
|
1471 | |||
@@ -1620,8 +1633,8 b' class localrepository:' | |||||
1620 | parts.pop() |
|
1633 | parts.pop() | |
1621 | return False |
|
1634 | return False | |
1622 |
|
1635 | |||
1623 | def peer(self): |
|
1636 | def peer(self, path=None): | |
1624 | return localpeer(self) # not cached to avoid reference cycle |
|
1637 | return localpeer(self, path=path) # not cached to avoid reference cycle | |
1625 |
|
1638 | |||
1626 | def unfiltered(self): |
|
1639 | def unfiltered(self): | |
1627 | """Return unfiltered version of the repository |
|
1640 | """Return unfiltered version of the repository | |
@@ -1738,9 +1751,13 b' class localrepository:' | |||||
1738 | def manifestlog(self): |
|
1751 | def manifestlog(self): | |
1739 | return self.store.manifestlog(self, self._storenarrowmatch) |
|
1752 | return self.store.manifestlog(self, self._storenarrowmatch) | |
1740 |
|
1753 | |||
1741 | @repofilecache(b'dirstate') |
|
1754 | @unfilteredpropertycache | |
1742 | def dirstate(self): |
|
1755 | def dirstate(self): | |
1743 |
|
|
1756 | if self._dirstate is None: | |
|
1757 | self._dirstate = self._makedirstate() | |||
|
1758 | else: | |||
|
1759 | self._dirstate.refresh() | |||
|
1760 | return self._dirstate | |||
1744 |
|
1761 | |||
1745 | def _makedirstate(self): |
|
1762 | def _makedirstate(self): | |
1746 | """Extension point for wrapping the dirstate per-repo.""" |
|
1763 | """Extension point for wrapping the dirstate per-repo.""" | |
@@ -1977,7 +1994,7 b' class localrepository:' | |||||
1977 | def __iter__(self): |
|
1994 | def __iter__(self): | |
1978 | return iter(self.changelog) |
|
1995 | return iter(self.changelog) | |
1979 |
|
1996 | |||
1980 | def revs(self, expr, *args): |
|
1997 | def revs(self, expr: bytes, *args): | |
1981 | """Find revisions matching a revset. |
|
1998 | """Find revisions matching a revset. | |
1982 |
|
1999 | |||
1983 | The revset is specified as a string ``expr`` that may contain |
|
2000 | The revset is specified as a string ``expr`` that may contain | |
@@ -1993,7 +2010,7 b' class localrepository:' | |||||
1993 | tree = revsetlang.spectree(expr, *args) |
|
2010 | tree = revsetlang.spectree(expr, *args) | |
1994 | return revset.makematcher(tree)(self) |
|
2011 | return revset.makematcher(tree)(self) | |
1995 |
|
2012 | |||
1996 | def set(self, expr, *args): |
|
2013 | def set(self, expr: bytes, *args): | |
1997 | """Find revisions matching a revset and emit changectx instances. |
|
2014 | """Find revisions matching a revset and emit changectx instances. | |
1998 |
|
2015 | |||
1999 | This is a convenience wrapper around ``revs()`` that iterates the |
|
2016 | This is a convenience wrapper around ``revs()`` that iterates the | |
@@ -2005,7 +2022,7 b' class localrepository:' | |||||
2005 | for r in self.revs(expr, *args): |
|
2022 | for r in self.revs(expr, *args): | |
2006 | yield self[r] |
|
2023 | yield self[r] | |
2007 |
|
2024 | |||
2008 | def anyrevs(self, specs, user=False, localalias=None): |
|
2025 | def anyrevs(self, specs: bytes, user=False, localalias=None): | |
2009 | """Find revisions matching one of the given revsets. |
|
2026 | """Find revisions matching one of the given revsets. | |
2010 |
|
2027 | |||
2011 | Revset aliases from the configuration are not expanded by default. To |
|
2028 | Revset aliases from the configuration are not expanded by default. To | |
@@ -2030,7 +2047,7 b' class localrepository:' | |||||
2030 | m = revset.matchany(None, specs, localalias=localalias) |
|
2047 | m = revset.matchany(None, specs, localalias=localalias) | |
2031 | return m(self) |
|
2048 | return m(self) | |
2032 |
|
2049 | |||
2033 | def url(self): |
|
2050 | def url(self) -> bytes: | |
2034 | return b'file:' + self.root |
|
2051 | return b'file:' + self.root | |
2035 |
|
2052 | |||
2036 | def hook(self, name, throw=False, **args): |
|
2053 | def hook(self, name, throw=False, **args): | |
@@ -2108,7 +2125,7 b' class localrepository:' | |||||
2108 | # writing to the cache), but the rest of Mercurial wants them in |
|
2125 | # writing to the cache), but the rest of Mercurial wants them in | |
2109 | # local encoding. |
|
2126 | # local encoding. | |
2110 | tags = {} |
|
2127 | tags = {} | |
2111 |
for |
|
2128 | for name, (node, hist) in alltags.items(): | |
2112 | if node != self.nullid: |
|
2129 | if node != self.nullid: | |
2113 | tags[encoding.tolocal(name)] = node |
|
2130 | tags[encoding.tolocal(name)] = node | |
2114 | tags[b'tip'] = self.changelog.tip() |
|
2131 | tags[b'tip'] = self.changelog.tip() | |
@@ -2229,7 +2246,7 b' class localrepository:' | |||||
2229 | return b'store' |
|
2246 | return b'store' | |
2230 | return None |
|
2247 | return None | |
2231 |
|
2248 | |||
2232 | def wjoin(self, f, *insidef): |
|
2249 | def wjoin(self, f: bytes, *insidef: bytes) -> bytes: | |
2233 | return self.vfs.reljoin(self.root, f, *insidef) |
|
2250 | return self.vfs.reljoin(self.root, f, *insidef) | |
2234 |
|
2251 | |||
2235 | def setparents(self, p1, p2=None): |
|
2252 | def setparents(self, p1, p2=None): | |
@@ -2238,17 +2255,17 b' class localrepository:' | |||||
2238 | self[None].setparents(p1, p2) |
|
2255 | self[None].setparents(p1, p2) | |
2239 | self._quick_access_changeid_invalidate() |
|
2256 | self._quick_access_changeid_invalidate() | |
2240 |
|
2257 | |||
2241 | def filectx(self, path, changeid=None, fileid=None, changectx=None): |
|
2258 | def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None): | |
2242 | """changeid must be a changeset revision, if specified. |
|
2259 | """changeid must be a changeset revision, if specified. | |
2243 | fileid can be a file revision or node.""" |
|
2260 | fileid can be a file revision or node.""" | |
2244 | return context.filectx( |
|
2261 | return context.filectx( | |
2245 | self, path, changeid, fileid, changectx=changectx |
|
2262 | self, path, changeid, fileid, changectx=changectx | |
2246 | ) |
|
2263 | ) | |
2247 |
|
2264 | |||
2248 | def getcwd(self): |
|
2265 | def getcwd(self) -> bytes: | |
2249 | return self.dirstate.getcwd() |
|
2266 | return self.dirstate.getcwd() | |
2250 |
|
2267 | |||
2251 | def pathto(self, f, cwd=None): |
|
2268 | def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes: | |
2252 | return self.dirstate.pathto(f, cwd) |
|
2269 | return self.dirstate.pathto(f, cwd) | |
2253 |
|
2270 | |||
2254 | def _loadfilter(self, filter): |
|
2271 | def _loadfilter(self, filter): | |
@@ -2300,14 +2317,21 b' class localrepository:' | |||||
2300 | def adddatafilter(self, name, filter): |
|
2317 | def adddatafilter(self, name, filter): | |
2301 | self._datafilters[name] = filter |
|
2318 | self._datafilters[name] = filter | |
2302 |
|
2319 | |||
2303 | def wread(self, filename): |
|
2320 | def wread(self, filename: bytes) -> bytes: | |
2304 | if self.wvfs.islink(filename): |
|
2321 | if self.wvfs.islink(filename): | |
2305 | data = self.wvfs.readlink(filename) |
|
2322 | data = self.wvfs.readlink(filename) | |
2306 | else: |
|
2323 | else: | |
2307 | data = self.wvfs.read(filename) |
|
2324 | data = self.wvfs.read(filename) | |
2308 | return self._filter(self._encodefilterpats, filename, data) |
|
2325 | return self._filter(self._encodefilterpats, filename, data) | |
2309 |
|
2326 | |||
2310 | def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs): |
|
2327 | def wwrite( | |
|
2328 | self, | |||
|
2329 | filename: bytes, | |||
|
2330 | data: bytes, | |||
|
2331 | flags: bytes, | |||
|
2332 | backgroundclose=False, | |||
|
2333 | **kwargs | |||
|
2334 | ) -> int: | |||
2311 | """write ``data`` into ``filename`` in the working directory |
|
2335 | """write ``data`` into ``filename`` in the working directory | |
2312 |
|
2336 | |||
2313 | This returns length of written (maybe decoded) data. |
|
2337 | This returns length of written (maybe decoded) data. | |
@@ -2325,7 +2349,7 b' class localrepository:' | |||||
2325 | self.wvfs.setflags(filename, False, False) |
|
2349 | self.wvfs.setflags(filename, False, False) | |
2326 | return len(data) |
|
2350 | return len(data) | |
2327 |
|
2351 | |||
2328 | def wwritedata(self, filename, data): |
|
2352 | def wwritedata(self, filename: bytes, data: bytes) -> bytes: | |
2329 | return self._filter(self._decodefilterpats, filename, data) |
|
2353 | return self._filter(self._decodefilterpats, filename, data) | |
2330 |
|
2354 | |||
2331 | def currenttransaction(self): |
|
2355 | def currenttransaction(self): | |
@@ -2356,6 +2380,21 b' class localrepository:' | |||||
2356 | hint=_(b"run 'hg recover' to clean up transaction"), |
|
2380 | hint=_(b"run 'hg recover' to clean up transaction"), | |
2357 | ) |
|
2381 | ) | |
2358 |
|
2382 | |||
|
2383 | # At that point your dirstate should be clean: | |||
|
2384 | # | |||
|
2385 | # - If you don't have the wlock, why would you still have a dirty | |||
|
2386 | # dirstate ? | |||
|
2387 | # | |||
|
2388 | # - If you hold the wlock, you should not be opening a transaction in | |||
|
2389 | # the middle of a `distate.changing_*` block. The transaction needs to | |||
|
2390 | # be open before that and wrap the change-context. | |||
|
2391 | # | |||
|
2392 | # - If you are not within a `dirstate.changing_*` context, why is our | |||
|
2393 | # dirstate dirty? | |||
|
2394 | if self.dirstate._dirty: | |||
|
2395 | m = "cannot open a transaction with a dirty dirstate" | |||
|
2396 | raise error.ProgrammingError(m) | |||
|
2397 | ||||
2359 | idbase = b"%.40f#%f" % (random.random(), time.time()) |
|
2398 | idbase = b"%.40f#%f" % (random.random(), time.time()) | |
2360 | ha = hex(hashutil.sha1(idbase).digest()) |
|
2399 | ha = hex(hashutil.sha1(idbase).digest()) | |
2361 | txnid = b'TXN:' + ha |
|
2400 | txnid = b'TXN:' + ha | |
@@ -2514,7 +2553,6 b' class localrepository:' | |||||
2514 | # out) in this transaction |
|
2553 | # out) in this transaction | |
2515 | narrowspec.restorebackup(self, b'journal.narrowspec') |
|
2554 | narrowspec.restorebackup(self, b'journal.narrowspec') | |
2516 | narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate') |
|
2555 | narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate') | |
2517 | repo.dirstate.restorebackup(None, b'journal.dirstate') |
|
|||
2518 |
|
2556 | |||
2519 | repo.invalidate(clearfilecache=True) |
|
2557 | repo.invalidate(clearfilecache=True) | |
2520 |
|
2558 | |||
@@ -2612,33 +2650,50 b' class localrepository:' | |||||
2612 | tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats) |
|
2650 | tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats) | |
2613 | self._transref = weakref.ref(tr) |
|
2651 | self._transref = weakref.ref(tr) | |
2614 | scmutil.registersummarycallback(self, tr, desc) |
|
2652 | scmutil.registersummarycallback(self, tr, desc) | |
|
2653 | # This only exist to deal with the need of rollback to have viable | |||
|
2654 | # parents at the end of the operation. So backup viable parents at the | |||
|
2655 | # time of this operation. | |||
|
2656 | # | |||
|
2657 | # We only do it when the `wlock` is taken, otherwise other might be | |||
|
2658 | # altering the dirstate under us. | |||
|
2659 | # | |||
|
2660 | # This is really not a great way to do this (first, because we cannot | |||
|
2661 | # always do it). There are more viable alternative that exists | |||
|
2662 | # | |||
|
2663 | # - backing only the working copy parent in a dedicated files and doing | |||
|
2664 | # a clean "keep-update" to them on `hg rollback`. | |||
|
2665 | # | |||
|
2666 | # - slightly changing the behavior an applying a logic similar to "hg | |||
|
2667 | # strip" to pick a working copy destination on `hg rollback` | |||
|
2668 | if self.currentwlock() is not None: | |||
|
2669 | ds = self.dirstate | |||
|
2670 | ||||
|
2671 | def backup_dirstate(tr): | |||
|
2672 | for f in ds.all_file_names(): | |||
|
2673 | # hardlink backup is okay because `dirstate` is always | |||
|
2674 | # atomically written and possible data file are append only | |||
|
2675 | # and resistant to trailing data. | |||
|
2676 | tr.addbackup(f, hardlink=True, location=b'plain') | |||
|
2677 | ||||
|
2678 | tr.addvalidator(b'dirstate-backup', backup_dirstate) | |||
2615 | return tr |
|
2679 | return tr | |
2616 |
|
2680 | |||
2617 | def _journalfiles(self): |
|
2681 | def _journalfiles(self): | |
2618 |
|
|
2682 | return ( | |
2619 | (self.svfs, b'journal'), |
|
2683 | (self.svfs, b'journal'), | |
2620 | (self.svfs, b'journal.narrowspec'), |
|
2684 | (self.svfs, b'journal.narrowspec'), | |
2621 | (self.vfs, b'journal.narrowspec.dirstate'), |
|
2685 | (self.vfs, b'journal.narrowspec.dirstate'), | |
2622 | (self.vfs, b'journal.dirstate'), |
|
|||
2623 | ) |
|
|||
2624 | middle = [] |
|
|||
2625 | dirstate_data = self.dirstate.data_backup_filename(b'journal.dirstate') |
|
|||
2626 | if dirstate_data is not None: |
|
|||
2627 | middle.append((self.vfs, dirstate_data)) |
|
|||
2628 | end = ( |
|
|||
2629 | (self.vfs, b'journal.branch'), |
|
2686 | (self.vfs, b'journal.branch'), | |
2630 | (self.vfs, b'journal.desc'), |
|
2687 | (self.vfs, b'journal.desc'), | |
2631 | (bookmarks.bookmarksvfs(self), b'journal.bookmarks'), |
|
2688 | (bookmarks.bookmarksvfs(self), b'journal.bookmarks'), | |
2632 | (self.svfs, b'journal.phaseroots'), |
|
2689 | (self.svfs, b'journal.phaseroots'), | |
2633 | ) |
|
2690 | ) | |
2634 | return first + tuple(middle) + end |
|
|||
2635 |
|
2691 | |||
2636 | def undofiles(self): |
|
2692 | def undofiles(self): | |
2637 | return [(vfs, undoname(x)) for vfs, x in self._journalfiles()] |
|
2693 | return [(vfs, undoname(x)) for vfs, x in self._journalfiles()] | |
2638 |
|
2694 | |||
2639 | @unfilteredmethod |
|
2695 | @unfilteredmethod | |
2640 | def _writejournal(self, desc): |
|
2696 | def _writejournal(self, desc): | |
2641 | self.dirstate.savebackup(None, b'journal.dirstate') |
|
|||
2642 | narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate') |
|
2697 | narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate') | |
2643 | narrowspec.savebackup(self, b'journal.narrowspec') |
|
2698 | narrowspec.savebackup(self, b'journal.narrowspec') | |
2644 | self.vfs.write( |
|
2699 | self.vfs.write( | |
@@ -2673,23 +2728,23 b' class localrepository:' | |||||
2673 | return False |
|
2728 | return False | |
2674 |
|
2729 | |||
2675 | def rollback(self, dryrun=False, force=False): |
|
2730 | def rollback(self, dryrun=False, force=False): | |
2676 |
wlock = lock = |
|
2731 | wlock = lock = None | |
2677 | try: |
|
2732 | try: | |
2678 | wlock = self.wlock() |
|
2733 | wlock = self.wlock() | |
2679 | lock = self.lock() |
|
2734 | lock = self.lock() | |
2680 | if self.svfs.exists(b"undo"): |
|
2735 | if self.svfs.exists(b"undo"): | |
2681 | dsguard = dirstateguard.dirstateguard(self, b'rollback') |
|
2736 | return self._rollback(dryrun, force) | |
2682 |
|
||||
2683 | return self._rollback(dryrun, force, dsguard) |
|
|||
2684 | else: |
|
2737 | else: | |
2685 | self.ui.warn(_(b"no rollback information available\n")) |
|
2738 | self.ui.warn(_(b"no rollback information available\n")) | |
2686 | return 1 |
|
2739 | return 1 | |
2687 | finally: |
|
2740 | finally: | |
2688 |
release( |
|
2741 | release(lock, wlock) | |
2689 |
|
2742 | |||
2690 | @unfilteredmethod # Until we get smarter cache management |
|
2743 | @unfilteredmethod # Until we get smarter cache management | |
2691 |
def _rollback(self, dryrun, force |
|
2744 | def _rollback(self, dryrun, force): | |
2692 | ui = self.ui |
|
2745 | ui = self.ui | |
|
2746 | ||||
|
2747 | parents = self.dirstate.parents() | |||
2693 | try: |
|
2748 | try: | |
2694 | args = self.vfs.read(b'undo.desc').splitlines() |
|
2749 | args = self.vfs.read(b'undo.desc').splitlines() | |
2695 | (oldlen, desc, detail) = (int(args[0]), args[1], None) |
|
2750 | (oldlen, desc, detail) = (int(args[0]), args[1], None) | |
@@ -2706,9 +2761,11 b' class localrepository:' | |||||
2706 | msg = _( |
|
2761 | msg = _( | |
2707 | b'repository tip rolled back to revision %d (undo %s)\n' |
|
2762 | b'repository tip rolled back to revision %d (undo %s)\n' | |
2708 | ) % (oldtip, desc) |
|
2763 | ) % (oldtip, desc) | |
|
2764 | parentgone = any(self[p].rev() > oldtip for p in parents) | |||
2709 | except IOError: |
|
2765 | except IOError: | |
2710 | msg = _(b'rolling back unknown transaction\n') |
|
2766 | msg = _(b'rolling back unknown transaction\n') | |
2711 | desc = None |
|
2767 | desc = None | |
|
2768 | parentgone = True | |||
2712 |
|
2769 | |||
2713 | if not force and self[b'.'] != self[b'tip'] and desc == b'commit': |
|
2770 | if not force and self[b'.'] != self[b'tip'] and desc == b'commit': | |
2714 | raise error.Abort( |
|
2771 | raise error.Abort( | |
@@ -2723,11 +2780,18 b' class localrepository:' | |||||
2723 | if dryrun: |
|
2780 | if dryrun: | |
2724 | return 0 |
|
2781 | return 0 | |
2725 |
|
2782 | |||
2726 | parents = self.dirstate.parents() |
|
|||
2727 | self.destroying() |
|
2783 | self.destroying() | |
2728 | vfsmap = {b'plain': self.vfs, b'': self.svfs} |
|
2784 | vfsmap = {b'plain': self.vfs, b'': self.svfs} | |
|
2785 | skip_journal_pattern = None | |||
|
2786 | if not parentgone: | |||
|
2787 | skip_journal_pattern = RE_SKIP_DIRSTATE_ROLLBACK | |||
2729 | transaction.rollback( |
|
2788 | transaction.rollback( | |
2730 | self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles |
|
2789 | self.svfs, | |
|
2790 | vfsmap, | |||
|
2791 | b'undo', | |||
|
2792 | ui.warn, | |||
|
2793 | checkambigfiles=_cachedfiles, | |||
|
2794 | skip_journal_pattern=skip_journal_pattern, | |||
2731 | ) |
|
2795 | ) | |
2732 | bookmarksvfs = bookmarks.bookmarksvfs(self) |
|
2796 | bookmarksvfs = bookmarks.bookmarksvfs(self) | |
2733 | if bookmarksvfs.exists(b'undo.bookmarks'): |
|
2797 | if bookmarksvfs.exists(b'undo.bookmarks'): | |
@@ -2737,16 +2801,20 b' class localrepository:' | |||||
2737 | if self.svfs.exists(b'undo.phaseroots'): |
|
2801 | if self.svfs.exists(b'undo.phaseroots'): | |
2738 | self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True) |
|
2802 | self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True) | |
2739 | self.invalidate() |
|
2803 | self.invalidate() | |
2740 |
|
2804 | self.dirstate.invalidate() | ||
2741 | has_node = self.changelog.index.has_node |
|
2805 | ||
2742 | parentgone = any(not has_node(p) for p in parents) |
|
|||
2743 | if parentgone: |
|
2806 | if parentgone: | |
2744 | # prevent dirstateguard from overwriting already restored one |
|
2807 | # replace this with some explicit parent update in the future. | |
2745 | dsguard.close() |
|
2808 | has_node = self.changelog.index.has_node | |
|
2809 | if not all(has_node(p) for p in self.dirstate._pl): | |||
|
2810 | # There was no dirstate to backup initially, we need to drop | |||
|
2811 | # the existing one. | |||
|
2812 | with self.dirstate.changing_parents(self): | |||
|
2813 | self.dirstate.setparents(self.nullid) | |||
|
2814 | self.dirstate.clear() | |||
2746 |
|
2815 | |||
2747 | narrowspec.restorebackup(self, b'undo.narrowspec') |
|
2816 | narrowspec.restorebackup(self, b'undo.narrowspec') | |
2748 | narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate') |
|
2817 | narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate') | |
2749 | self.dirstate.restorebackup(None, b'undo.dirstate') |
|
|||
2750 | try: |
|
2818 | try: | |
2751 | branch = self.vfs.read(b'undo.branch') |
|
2819 | branch = self.vfs.read(b'undo.branch') | |
2752 | self.dirstate.setbranch(encoding.tolocal(branch)) |
|
2820 | self.dirstate.setbranch(encoding.tolocal(branch)) | |
@@ -2880,7 +2948,6 b' class localrepository:' | |||||
2880 | filtered.branchmap().write(filtered) |
|
2948 | filtered.branchmap().write(filtered) | |
2881 |
|
2949 | |||
2882 | def invalidatecaches(self): |
|
2950 | def invalidatecaches(self): | |
2883 |
|
||||
2884 | if '_tagscache' in vars(self): |
|
2951 | if '_tagscache' in vars(self): | |
2885 | # can't use delattr on proxy |
|
2952 | # can't use delattr on proxy | |
2886 | del self.__dict__['_tagscache'] |
|
2953 | del self.__dict__['_tagscache'] | |
@@ -2903,13 +2970,9 b' class localrepository:' | |||||
2903 | rereads the dirstate. Use dirstate.invalidate() if you want to |
|
2970 | rereads the dirstate. Use dirstate.invalidate() if you want to | |
2904 | explicitly read the dirstate again (i.e. restoring it to a previous |
|
2971 | explicitly read the dirstate again (i.e. restoring it to a previous | |
2905 | known good state).""" |
|
2972 | known good state).""" | |
2906 | if hasunfilteredcache(self, 'dirstate'): |
|
2973 | unfi = self.unfiltered() | |
2907 | for k in self.dirstate._filecache: |
|
2974 | if 'dirstate' in unfi.__dict__: | |
2908 | try: |
|
2975 | del unfi.__dict__['dirstate'] | |
2909 | delattr(self.dirstate, k) |
|
|||
2910 | except AttributeError: |
|
|||
2911 | pass |
|
|||
2912 | delattr(self.unfiltered(), 'dirstate') |
|
|||
2913 |
|
2976 | |||
2914 | def invalidate(self, clearfilecache=False): |
|
2977 | def invalidate(self, clearfilecache=False): | |
2915 | """Invalidates both store and non-store parts other than dirstate |
|
2978 | """Invalidates both store and non-store parts other than dirstate | |
@@ -2921,9 +2984,6 b' class localrepository:' | |||||
2921 | """ |
|
2984 | """ | |
2922 | unfiltered = self.unfiltered() # all file caches are stored unfiltered |
|
2985 | unfiltered = self.unfiltered() # all file caches are stored unfiltered | |
2923 | for k in list(self._filecache.keys()): |
|
2986 | for k in list(self._filecache.keys()): | |
2924 | # dirstate is invalidated separately in invalidatedirstate() |
|
|||
2925 | if k == b'dirstate': |
|
|||
2926 | continue |
|
|||
2927 | if ( |
|
2987 | if ( | |
2928 | k == b'changelog' |
|
2988 | k == b'changelog' | |
2929 | and self.currenttransaction() |
|
2989 | and self.currenttransaction() | |
@@ -3052,12 +3112,19 b' class localrepository:' | |||||
3052 | self.ui.develwarn(b'"wlock" acquired after "lock"') |
|
3112 | self.ui.develwarn(b'"wlock" acquired after "lock"') | |
3053 |
|
3113 | |||
3054 | def unlock(): |
|
3114 | def unlock(): | |
3055 |
if self.dirstate. |
|
3115 | if self.dirstate.is_changing_any: | |
|
3116 | msg = b"wlock release in the middle of a changing parents" | |||
|
3117 | self.ui.develwarn(msg) | |||
3056 | self.dirstate.invalidate() |
|
3118 | self.dirstate.invalidate() | |
3057 | else: |
|
3119 | else: | |
|
3120 | if self.dirstate._dirty: | |||
|
3121 | msg = b"dirty dirstate on wlock release" | |||
|
3122 | self.ui.develwarn(msg) | |||
3058 | self.dirstate.write(None) |
|
3123 | self.dirstate.write(None) | |
3059 |
|
3124 | |||
3060 |
self. |
|
3125 | unfi = self.unfiltered() | |
|
3126 | if 'dirstate' in unfi.__dict__: | |||
|
3127 | del unfi.__dict__['dirstate'] | |||
3061 |
|
3128 | |||
3062 | l = self._lock( |
|
3129 | l = self._lock( | |
3063 | self.vfs, |
|
3130 | self.vfs, | |
@@ -3520,14 +3587,13 b' def aftertrans(files):' | |||||
3520 | return a |
|
3587 | return a | |
3521 |
|
3588 | |||
3522 |
|
3589 | |||
3523 | def undoname(fn): |
|
3590 | def undoname(fn: bytes) -> bytes: | |
3524 | base, name = os.path.split(fn) |
|
3591 | base, name = os.path.split(fn) | |
3525 | assert name.startswith(b'journal') |
|
3592 | assert name.startswith(b'journal') | |
3526 | return os.path.join(base, name.replace(b'journal', b'undo', 1)) |
|
3593 | return os.path.join(base, name.replace(b'journal', b'undo', 1)) | |
3527 |
|
3594 | |||
3528 |
|
3595 | |||
3529 | def instance(ui, path, create, intents=None, createopts=None): |
|
3596 | def instance(ui, path: bytes, create, intents=None, createopts=None): | |
3530 |
|
||||
3531 | # prevent cyclic import localrepo -> upgrade -> localrepo |
|
3597 | # prevent cyclic import localrepo -> upgrade -> localrepo | |
3532 | from . import upgrade |
|
3598 | from . import upgrade | |
3533 |
|
3599 | |||
@@ -3543,7 +3609,7 b' def instance(ui, path, create, intents=N' | |||||
3543 | return repo |
|
3609 | return repo | |
3544 |
|
3610 | |||
3545 |
|
3611 | |||
3546 | def islocal(path): |
|
3612 | def islocal(path: bytes) -> bool: | |
3547 | return True |
|
3613 | return True | |
3548 |
|
3614 | |||
3549 |
|
3615 | |||
@@ -3803,7 +3869,7 b' def filterknowncreateopts(ui, createopts' | |||||
3803 | return {k: v for k, v in createopts.items() if k not in known} |
|
3869 | return {k: v for k, v in createopts.items() if k not in known} | |
3804 |
|
3870 | |||
3805 |
|
3871 | |||
3806 | def createrepository(ui, path, createopts=None, requirements=None): |
|
3872 | def createrepository(ui, path: bytes, createopts=None, requirements=None): | |
3807 | """Create a new repository in a vfs. |
|
3873 | """Create a new repository in a vfs. | |
3808 |
|
3874 | |||
3809 | ``path`` path to the new repo's working directory. |
|
3875 | ``path`` path to the new repo's working directory. |
@@ -113,7 +113,7 b' def activepath(repo, remote):' | |||||
113 | if local: |
|
113 | if local: | |
114 | rpath = util.pconvert(remote._repo.root) |
|
114 | rpath = util.pconvert(remote._repo.root) | |
115 | elif not isinstance(remote, bytes): |
|
115 | elif not isinstance(remote, bytes): | |
116 |
rpath = remote. |
|
116 | rpath = remote.url() | |
117 |
|
117 | |||
118 | # represent the remotepath with user defined path name if exists |
|
118 | # represent the remotepath with user defined path name if exists | |
119 | for path, url in repo.ui.configitems(b'paths'): |
|
119 | for path, url in repo.ui.configitems(b'paths'): |
@@ -1836,6 +1836,7 b' class manifestrevlog:' | |||||
1836 | assumehaveparentrevisions=False, |
|
1836 | assumehaveparentrevisions=False, | |
1837 | deltamode=repository.CG_DELTAMODE_STD, |
|
1837 | deltamode=repository.CG_DELTAMODE_STD, | |
1838 | sidedata_helpers=None, |
|
1838 | sidedata_helpers=None, | |
|
1839 | debug_info=None, | |||
1839 | ): |
|
1840 | ): | |
1840 | return self._revlog.emitrevisions( |
|
1841 | return self._revlog.emitrevisions( | |
1841 | nodes, |
|
1842 | nodes, | |
@@ -1844,6 +1845,7 b' class manifestrevlog:' | |||||
1844 | assumehaveparentrevisions=assumehaveparentrevisions, |
|
1845 | assumehaveparentrevisions=assumehaveparentrevisions, | |
1845 | deltamode=deltamode, |
|
1846 | deltamode=deltamode, | |
1846 | sidedata_helpers=sidedata_helpers, |
|
1847 | sidedata_helpers=sidedata_helpers, | |
|
1848 | debug_info=debug_info, | |||
1847 | ) |
|
1849 | ) | |
1848 |
|
1850 | |||
1849 | def addgroup( |
|
1851 | def addgroup( | |
@@ -1854,6 +1856,8 b' class manifestrevlog:' | |||||
1854 | alwayscache=False, |
|
1856 | alwayscache=False, | |
1855 | addrevisioncb=None, |
|
1857 | addrevisioncb=None, | |
1856 | duplicaterevisioncb=None, |
|
1858 | duplicaterevisioncb=None, | |
|
1859 | debug_info=None, | |||
|
1860 | delta_base_reuse_policy=None, | |||
1857 | ): |
|
1861 | ): | |
1858 | return self._revlog.addgroup( |
|
1862 | return self._revlog.addgroup( | |
1859 | deltas, |
|
1863 | deltas, | |
@@ -1862,6 +1866,8 b' class manifestrevlog:' | |||||
1862 | alwayscache=alwayscache, |
|
1866 | alwayscache=alwayscache, | |
1863 | addrevisioncb=addrevisioncb, |
|
1867 | addrevisioncb=addrevisioncb, | |
1864 | duplicaterevisioncb=duplicaterevisioncb, |
|
1868 | duplicaterevisioncb=duplicaterevisioncb, | |
|
1869 | debug_info=debug_info, | |||
|
1870 | delta_base_reuse_policy=delta_base_reuse_policy, | |||
1865 | ) |
|
1871 | ) | |
1866 |
|
1872 | |||
1867 | def rawsize(self, rev): |
|
1873 | def rawsize(self, rev): |
@@ -368,7 +368,7 b' def _donormalize(patterns, default, root' | |||||
368 | % ( |
|
368 | % ( | |
369 | pat, |
|
369 | pat, | |
370 | inst.message, |
|
370 | inst.message, | |
371 | ) # pytype: disable=unsupported-operands |
|
371 | ) | |
372 | ) |
|
372 | ) | |
373 | except IOError as inst: |
|
373 | except IOError as inst: | |
374 | if warn: |
|
374 | if warn: |
@@ -94,6 +94,13 b' class diffopts:' | |||||
94 | opts.update(kwargs) |
|
94 | opts.update(kwargs) | |
95 | return diffopts(**opts) |
|
95 | return diffopts(**opts) | |
96 |
|
96 | |||
|
97 | def __bytes__(self): | |||
|
98 | return b", ".join( | |||
|
99 | b"%s: %r" % (k, getattr(self, k)) for k in self.defaults | |||
|
100 | ) | |||
|
101 | ||||
|
102 | __str__ = encoding.strmethod(__bytes__) | |||
|
103 | ||||
97 |
|
104 | |||
98 | defaultopts = diffopts() |
|
105 | defaultopts = diffopts() | |
99 |
|
106 |
@@ -46,7 +46,7 b' def _getcheckunknownconfig(repo, section' | |||||
46 | return config |
|
46 | return config | |
47 |
|
47 | |||
48 |
|
48 | |||
49 |
def _checkunknownfile( |
|
49 | def _checkunknownfile(dirstate, wvfs, dircache, wctx, mctx, f, f2=None): | |
50 | if wctx.isinmemory(): |
|
50 | if wctx.isinmemory(): | |
51 | # Nothing to do in IMM because nothing in the "working copy" can be an |
|
51 | # Nothing to do in IMM because nothing in the "working copy" can be an | |
52 | # unknown file. |
|
52 | # unknown file. | |
@@ -58,9 +58,8 b' def _checkunknownfile(repo, wctx, mctx, ' | |||||
58 | if f2 is None: |
|
58 | if f2 is None: | |
59 | f2 = f |
|
59 | f2 = f | |
60 | return ( |
|
60 | return ( | |
61 | repo.wvfs.audit.check(f) |
|
61 | wvfs.isfileorlink_checkdir(dircache, f) | |
62 | and repo.wvfs.isfileorlink(f) |
|
62 | and dirstate.normalize(f) not in dirstate | |
63 | and repo.dirstate.normalize(f) not in repo.dirstate |
|
|||
64 | and mctx[f2].cmp(wctx[f]) |
|
63 | and mctx[f2].cmp(wctx[f]) | |
65 | ) |
|
64 | ) | |
66 |
|
65 | |||
@@ -136,6 +135,9 b' def _checkunknownfiles(repo, wctx, mctx,' | |||||
136 | pathconfig = repo.ui.configbool( |
|
135 | pathconfig = repo.ui.configbool( | |
137 | b'experimental', b'merge.checkpathconflicts' |
|
136 | b'experimental', b'merge.checkpathconflicts' | |
138 | ) |
|
137 | ) | |
|
138 | dircache = dict() | |||
|
139 | dirstate = repo.dirstate | |||
|
140 | wvfs = repo.wvfs | |||
139 | if not force: |
|
141 | if not force: | |
140 |
|
142 | |||
141 | def collectconflicts(conflicts, config): |
|
143 | def collectconflicts(conflicts, config): | |
@@ -151,7 +153,7 b' def _checkunknownfiles(repo, wctx, mctx,' | |||||
151 | mergestatemod.ACTION_DELETED_CHANGED, |
|
153 | mergestatemod.ACTION_DELETED_CHANGED, | |
152 | ) |
|
154 | ) | |
153 | ): |
|
155 | ): | |
154 |
if _checkunknownfile( |
|
156 | if _checkunknownfile(dirstate, wvfs, dircache, wctx, mctx, f): | |
155 | fileconflicts.add(f) |
|
157 | fileconflicts.add(f) | |
156 | elif pathconfig and f not in wctx: |
|
158 | elif pathconfig and f not in wctx: | |
157 | path = checkunknowndirs(repo, wctx, f) |
|
159 | path = checkunknowndirs(repo, wctx, f) | |
@@ -160,7 +162,9 b' def _checkunknownfiles(repo, wctx, mctx,' | |||||
160 | for f, args, msg in mresult.getactions( |
|
162 | for f, args, msg in mresult.getactions( | |
161 | [mergestatemod.ACTION_LOCAL_DIR_RENAME_GET] |
|
163 | [mergestatemod.ACTION_LOCAL_DIR_RENAME_GET] | |
162 | ): |
|
164 | ): | |
163 |
if _checkunknownfile( |
|
165 | if _checkunknownfile( | |
|
166 | dirstate, wvfs, dircache, wctx, mctx, f, args[0] | |||
|
167 | ): | |||
164 | fileconflicts.add(f) |
|
168 | fileconflicts.add(f) | |
165 |
|
169 | |||
166 | allconflicts = fileconflicts | pathconflicts |
|
170 | allconflicts = fileconflicts | pathconflicts | |
@@ -173,7 +177,9 b' def _checkunknownfiles(repo, wctx, mctx,' | |||||
173 | mresult.getactions([mergestatemod.ACTION_CREATED_MERGE]) |
|
177 | mresult.getactions([mergestatemod.ACTION_CREATED_MERGE]) | |
174 | ): |
|
178 | ): | |
175 | fl2, anc = args |
|
179 | fl2, anc = args | |
176 |
different = _checkunknownfile( |
|
180 | different = _checkunknownfile( | |
|
181 | dirstate, wvfs, dircache, wctx, mctx, f | |||
|
182 | ) | |||
177 | if repo.dirstate._ignore(f): |
|
183 | if repo.dirstate._ignore(f): | |
178 | config = ignoredconfig |
|
184 | config = ignoredconfig | |
179 | else: |
|
185 | else: | |
@@ -240,16 +246,21 b' def _checkunknownfiles(repo, wctx, mctx,' | |||||
240 | else: |
|
246 | else: | |
241 | repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f) |
|
247 | repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f) | |
242 |
|
248 | |||
243 | for f, args, msg in list( |
|
249 | def transformargs(f, args): | |
244 | mresult.getactions([mergestatemod.ACTION_CREATED]) |
|
|||
245 | ): |
|
|||
246 | backup = ( |
|
250 | backup = ( | |
247 | f in fileconflicts |
|
251 | f in fileconflicts | |
248 |
or |
|
252 | or pathconflicts | |
249 | or any(p in pathconflicts for p in pathutil.finddirs(f)) |
|
253 | and ( | |
|
254 | f in pathconflicts | |||
|
255 | or any(p in pathconflicts for p in pathutil.finddirs(f)) | |||
|
256 | ) | |||
250 | ) |
|
257 | ) | |
251 | (flags,) = args |
|
258 | (flags,) = args | |
252 | mresult.addfile(f, mergestatemod.ACTION_GET, (flags, backup), msg) |
|
259 | return (flags, backup) | |
|
260 | ||||
|
261 | mresult.mapaction( | |||
|
262 | mergestatemod.ACTION_CREATED, mergestatemod.ACTION_GET, transformargs | |||
|
263 | ) | |||
253 |
|
264 | |||
254 |
|
265 | |||
255 | def _forgetremoved(wctx, mctx, branchmerge, mresult): |
|
266 | def _forgetremoved(wctx, mctx, branchmerge, mresult): | |
@@ -581,6 +592,18 b' class mergeresult:' | |||||
581 | self._filemapping[filename] = (action, data, message) |
|
592 | self._filemapping[filename] = (action, data, message) | |
582 | self._actionmapping[action][filename] = (data, message) |
|
593 | self._actionmapping[action][filename] = (data, message) | |
583 |
|
594 | |||
|
595 | def mapaction(self, actionfrom, actionto, transform): | |||
|
596 | """changes all occurrences of action `actionfrom` into `actionto`, | |||
|
597 | transforming its args with the function `transform`. | |||
|
598 | """ | |||
|
599 | orig = self._actionmapping[actionfrom] | |||
|
600 | del self._actionmapping[actionfrom] | |||
|
601 | dest = self._actionmapping[actionto] | |||
|
602 | for f, (data, msg) in orig.items(): | |||
|
603 | data = transform(f, data) | |||
|
604 | self._filemapping[f] = (actionto, data, msg) | |||
|
605 | dest[f] = (data, msg) | |||
|
606 | ||||
584 | def getfile(self, filename, default_return=None): |
|
607 | def getfile(self, filename, default_return=None): | |
585 | """returns (action, args, msg) about this file |
|
608 | """returns (action, args, msg) about this file | |
586 |
|
609 | |||
@@ -1142,6 +1165,8 b' def calculateupdates(' | |||||
1142 | followcopies, |
|
1165 | followcopies, | |
1143 | ) |
|
1166 | ) | |
1144 | _checkunknownfiles(repo, wctx, mctx, force, mresult, mergeforce) |
|
1167 | _checkunknownfiles(repo, wctx, mctx, force, mresult, mergeforce) | |
|
1168 | if repo.ui.configbool(b'devel', b'debug.abort-update'): | |||
|
1169 | exit(1) | |||
1145 |
|
1170 | |||
1146 | else: # only when merge.preferancestor=* - the default |
|
1171 | else: # only when merge.preferancestor=* - the default | |
1147 | repo.ui.note( |
|
1172 | repo.ui.note( | |
@@ -2130,7 +2155,7 b' def _update(' | |||||
2130 | assert len(getfiledata) == ( |
|
2155 | assert len(getfiledata) == ( | |
2131 | mresult.len((mergestatemod.ACTION_GET,)) if wantfiledata else 0 |
|
2156 | mresult.len((mergestatemod.ACTION_GET,)) if wantfiledata else 0 | |
2132 | ) |
|
2157 | ) | |
2133 |
with repo.dirstate. |
|
2158 | with repo.dirstate.changing_parents(repo): | |
2134 | ### Filter Filedata |
|
2159 | ### Filter Filedata | |
2135 | # |
|
2160 | # | |
2136 | # We gathered "cache" information for the clean file while |
|
2161 | # We gathered "cache" information for the clean file while | |
@@ -2352,7 +2377,7 b' def graft(' | |||||
2352 | # fix up dirstate for copies and renames |
|
2377 | # fix up dirstate for copies and renames | |
2353 | copies.graftcopies(wctx, ctx, base) |
|
2378 | copies.graftcopies(wctx, ctx, base) | |
2354 | else: |
|
2379 | else: | |
2355 |
with repo.dirstate. |
|
2380 | with repo.dirstate.changing_parents(repo): | |
2356 | repo.setparents(pctx.node(), pother) |
|
2381 | repo.setparents(pctx.node(), pother) | |
2357 | repo.dirstate.write(repo.currenttransaction()) |
|
2382 | repo.dirstate.write(repo.currenttransaction()) | |
2358 | # fix up dirstate for copies and renames |
|
2383 | # fix up dirstate for copies and renames |
@@ -322,10 +322,16 b' def updateworkingcopy(repo, assumeclean=' | |||||
322 | addedmatch = matchmod.differencematcher(newmatch, oldmatch) |
|
322 | addedmatch = matchmod.differencematcher(newmatch, oldmatch) | |
323 | removedmatch = matchmod.differencematcher(oldmatch, newmatch) |
|
323 | removedmatch = matchmod.differencematcher(oldmatch, newmatch) | |
324 |
|
324 | |||
|
325 | assert repo.currentwlock() is not None | |||
325 | ds = repo.dirstate |
|
326 | ds = repo.dirstate | |
326 | lookup, status, _mtime_boundary = ds.status( |
|
327 | with ds.running_status(repo): | |
327 | removedmatch, subrepos=[], ignored=True, clean=True, unknown=True |
|
328 | lookup, status, _mtime_boundary = ds.status( | |
328 | ) |
|
329 | removedmatch, | |
|
330 | subrepos=[], | |||
|
331 | ignored=True, | |||
|
332 | clean=True, | |||
|
333 | unknown=True, | |||
|
334 | ) | |||
329 | trackeddirty = status.modified + status.added |
|
335 | trackeddirty = status.modified + status.added | |
330 | clean = status.clean |
|
336 | clean = status.clean | |
331 | if assumeclean: |
|
337 | if assumeclean: |
@@ -570,22 +570,23 b' class workingbackend(fsbackend):' | |||||
570 | self.changed.add(fname) |
|
570 | self.changed.add(fname) | |
571 |
|
571 | |||
572 | def close(self): |
|
572 | def close(self): | |
573 | wctx = self.repo[None] |
|
573 | with self.repo.dirstate.changing_files(self.repo): | |
574 | changed = set(self.changed) |
|
574 | wctx = self.repo[None] | |
575 | for src, dst in self.copied: |
|
575 | changed = set(self.changed) | |
576 | scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst) |
|
576 | for src, dst in self.copied: | |
577 | if self.removed: |
|
577 | scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst) | |
578 |
|
|
578 | if self.removed: | |
579 |
|
|
579 | wctx.forget(sorted(self.removed)) | |
580 |
|
|
580 | for f in self.removed: | |
581 | # File was deleted and no longer belongs to the |
|
581 | if f not in self.repo.dirstate: | |
582 | # dirstate, it was probably marked added then |
|
582 | # File was deleted and no longer belongs to the | |
583 | # deleted, and should not be considered by |
|
583 | # dirstate, it was probably marked added then | |
584 | # marktouched(). |
|
584 | # deleted, and should not be considered by | |
585 |
ch |
|
585 | # marktouched(). | |
586 | if changed: |
|
586 | changed.discard(f) | |
587 | scmutil.marktouched(self.repo, changed, self.similarity) |
|
587 | if changed: | |
588 | return sorted(self.changed) |
|
588 | scmutil.marktouched(self.repo, changed, self.similarity) | |
|
589 | return sorted(self.changed) | |||
589 |
|
590 | |||
590 |
|
591 | |||
591 | class filestore: |
|
592 | class filestore: |
@@ -4,6 +4,13 b' import os' | |||||
4 | import posixpath |
|
4 | import posixpath | |
5 | import stat |
|
5 | import stat | |
6 |
|
6 | |||
|
7 | from typing import ( | |||
|
8 | Any, | |||
|
9 | Callable, | |||
|
10 | Iterator, | |||
|
11 | Optional, | |||
|
12 | ) | |||
|
13 | ||||
7 | from .i18n import _ |
|
14 | from .i18n import _ | |
8 | from . import ( |
|
15 | from . import ( | |
9 | encoding, |
|
16 | encoding, | |
@@ -13,15 +20,6 b' from . import (' | |||||
13 | util, |
|
20 | util, | |
14 | ) |
|
21 | ) | |
15 |
|
22 | |||
16 | if pycompat.TYPE_CHECKING: |
|
|||
17 | from typing import ( |
|
|||
18 | Any, |
|
|||
19 | Callable, |
|
|||
20 | Iterator, |
|
|||
21 | Optional, |
|
|||
22 | ) |
|
|||
23 |
|
||||
24 |
|
||||
25 | rustdirs = policy.importrust('dirstate', 'Dirs') |
|
23 | rustdirs = policy.importrust('dirstate', 'Dirs') | |
26 | parsers = policy.importmod('parsers') |
|
24 | parsers = policy.importmod('parsers') | |
27 |
|
25 | |||
@@ -56,7 +54,7 b' class pathauditor:' | |||||
56 |
|
54 | |||
57 | def __init__(self, root, callback=None, realfs=True, cached=False): |
|
55 | def __init__(self, root, callback=None, realfs=True, cached=False): | |
58 | self.audited = set() |
|
56 | self.audited = set() | |
59 |
self.auditeddir = |
|
57 | self.auditeddir = dict() | |
60 | self.root = root |
|
58 | self.root = root | |
61 | self._realfs = realfs |
|
59 | self._realfs = realfs | |
62 | self._cached = cached |
|
60 | self._cached = cached | |
@@ -72,8 +70,7 b' class pathauditor:' | |||||
72 | path may contain a pattern (e.g. foodir/**.txt)""" |
|
70 | path may contain a pattern (e.g. foodir/**.txt)""" | |
73 |
|
71 | |||
74 | path = util.localpath(path) |
|
72 | path = util.localpath(path) | |
75 |
|
|
73 | if path in self.audited: | |
76 | if normpath in self.audited: |
|
|||
77 | return |
|
74 | return | |
78 | # AIX ignores "/" at end of path, others raise EISDIR. |
|
75 | # AIX ignores "/" at end of path, others raise EISDIR. | |
79 | if util.endswithsep(path): |
|
76 | if util.endswithsep(path): | |
@@ -90,13 +87,14 b' class pathauditor:' | |||||
90 | _(b"path contains illegal component: %s") % path |
|
87 | _(b"path contains illegal component: %s") % path | |
91 | ) |
|
88 | ) | |
92 | # Windows shortname aliases |
|
89 | # Windows shortname aliases | |
93 |
f |
|
90 | if b"~" in path: | |
94 |
|
|
91 | for p in parts: | |
95 |
|
|
92 | if b"~" in p: | |
96 | if last.isdigit() and first.upper() in [b"HG", b"HG8B6C"]: |
|
93 | first, last = p.split(b"~", 1) | |
97 | raise error.InputError( |
|
94 | if last.isdigit() and first.upper() in [b"HG", b"HG8B6C"]: | |
98 | _(b"path contains illegal component: %s") % path |
|
95 | raise error.InputError( | |
99 | ) |
|
96 | _(b"path contains illegal component: %s") % path | |
|
97 | ) | |||
100 | if b'.hg' in _lowerclean(path): |
|
98 | if b'.hg' in _lowerclean(path): | |
101 | lparts = [_lowerclean(p) for p in parts] |
|
99 | lparts = [_lowerclean(p) for p in parts] | |
102 | for p in b'.hg', b'.hg.': |
|
100 | for p in b'.hg', b'.hg.': | |
@@ -108,36 +106,43 b' class pathauditor:' | |||||
108 | % (path, pycompat.bytestr(base)) |
|
106 | % (path, pycompat.bytestr(base)) | |
109 | ) |
|
107 | ) | |
110 |
|
108 | |||
111 | normparts = util.splitpath(normpath) |
|
109 | if self._realfs: | |
112 | assert len(parts) == len(normparts) |
|
110 | # It's important that we check the path parts starting from the root. | |
113 |
|
111 | # We don't want to add "foo/bar/baz" to auditeddir before checking if | ||
114 | parts.pop() |
|
112 | # there's a "foo/.hg" directory. This also means we won't accidentally | |
115 | normparts.pop() |
|
113 | # traverse a symlink into some other filesystem (which is potentially | |
116 | # It's important that we check the path parts starting from the root. |
|
114 | # expensive to access). | |
117 | # We don't want to add "foo/bar/baz" to auditeddir before checking if |
|
115 | for prefix in finddirs_rev_noroot(path): | |
118 | # there's a "foo/.hg" directory. This also means we won't accidentally |
|
116 | if prefix in self.auditeddir: | |
119 | # traverse a symlink into some other filesystem (which is potentially |
|
117 | res = self.auditeddir[prefix] | |
120 | # expensive to access). |
|
118 | else: | |
121 | for i in range(len(parts)): |
|
119 | res = pathauditor._checkfs_exists( | |
122 | prefix = pycompat.ossep.join(parts[: i + 1]) |
|
120 | self.root, prefix, path, self.callback | |
123 | normprefix = pycompat.ossep.join(normparts[: i + 1]) |
|
121 | ) | |
124 | if normprefix in self.auditeddir: |
|
122 | if self._cached: | |
125 | continue |
|
123 | self.auditeddir[prefix] = res | |
126 |
if |
|
124 | if not res: | |
127 | self._checkfs(prefix, path) |
|
125 | break | |
128 | if self._cached: |
|
|||
129 | self.auditeddir.add(normprefix) |
|
|||
130 |
|
126 | |||
131 | if self._cached: |
|
127 | if self._cached: | |
132 |
self.audited.add( |
|
128 | self.audited.add(path) | |
133 |
|
129 | |||
134 | def _checkfs(self, prefix, path): |
|
130 | @staticmethod | |
135 | # type: (bytes, bytes) -> None |
|
131 | def _checkfs_exists( | |
136 | """raise exception if a file system backed check fails""" |
|
132 | root, | |
137 | curpath = os.path.join(self.root, prefix) |
|
133 | prefix: bytes, | |
|
134 | path: bytes, | |||
|
135 | callback: Optional[Callable[[bytes], bool]] = None, | |||
|
136 | ): | |||
|
137 | """raise exception if a file system backed check fails. | |||
|
138 | ||||
|
139 | Return a bool that indicates that the directory (or file) exists.""" | |||
|
140 | curpath = os.path.join(root, prefix) | |||
138 | try: |
|
141 | try: | |
139 | st = os.lstat(curpath) |
|
142 | st = os.lstat(curpath) | |
140 | except OSError as err: |
|
143 | except OSError as err: | |
|
144 | if err.errno == errno.ENOENT: | |||
|
145 | return False | |||
141 | # EINVAL can be raised as invalid path syntax under win32. |
|
146 | # EINVAL can be raised as invalid path syntax under win32. | |
142 | # They must be ignored for patterns can be checked too. |
|
147 | # They must be ignored for patterns can be checked too. | |
143 | if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL): |
|
148 | if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL): | |
@@ -152,9 +157,10 b' class pathauditor:' | |||||
152 | elif stat.S_ISDIR(st.st_mode) and os.path.isdir( |
|
157 | elif stat.S_ISDIR(st.st_mode) and os.path.isdir( | |
153 | os.path.join(curpath, b'.hg') |
|
158 | os.path.join(curpath, b'.hg') | |
154 | ): |
|
159 | ): | |
155 |
if not |
|
160 | if not callback or not callback(curpath): | |
156 | msg = _(b"path '%s' is inside nested repo %r") |
|
161 | msg = _(b"path '%s' is inside nested repo %r") | |
157 | raise error.Abort(msg % (path, pycompat.bytestr(prefix))) |
|
162 | raise error.Abort(msg % (path, pycompat.bytestr(prefix))) | |
|
163 | return True | |||
158 |
|
164 | |||
159 | def check(self, path): |
|
165 | def check(self, path): | |
160 | # type: (bytes) -> bool |
|
166 | # type: (bytes) -> bool | |
@@ -314,6 +320,13 b' def finddirs(path):' | |||||
314 | yield b'' |
|
320 | yield b'' | |
315 |
|
321 | |||
316 |
|
322 | |||
|
323 | def finddirs_rev_noroot(path: bytes) -> Iterator[bytes]: | |||
|
324 | pos = path.find(pycompat.ossep) | |||
|
325 | while pos != -1: | |||
|
326 | yield path[:pos] | |||
|
327 | pos = path.find(pycompat.ossep, pos + 1) | |||
|
328 | ||||
|
329 | ||||
317 | class dirs: |
|
330 | class dirs: | |
318 | '''a multiset of directory names from a set of file paths''' |
|
331 | '''a multiset of directory names from a set of file paths''' | |
319 |
|
332 |
@@ -76,7 +76,7 b' def _importfrom(pkgname, modname):' | |||||
76 | ('cext', 'bdiff'): 3, |
|
76 | ('cext', 'bdiff'): 3, | |
77 | ('cext', 'mpatch'): 1, |
|
77 | ('cext', 'mpatch'): 1, | |
78 | ('cext', 'osutil'): 4, |
|
78 | ('cext', 'osutil'): 4, | |
79 |
('cext', 'parsers'): 2 |
|
79 | ('cext', 'parsers'): 21, | |
80 | } |
|
80 | } | |
81 |
|
81 | |||
82 | # map import request to other package or module |
|
82 | # map import request to other package or module |
@@ -17,8 +17,23 b' import select' | |||||
17 | import stat |
|
17 | import stat | |
18 | import sys |
|
18 | import sys | |
19 | import tempfile |
|
19 | import tempfile | |
|
20 | import typing | |||
20 | import unicodedata |
|
21 | import unicodedata | |
21 |
|
22 | |||
|
23 | from typing import ( | |||
|
24 | Any, | |||
|
25 | AnyStr, | |||
|
26 | Iterable, | |||
|
27 | Iterator, | |||
|
28 | List, | |||
|
29 | Match, | |||
|
30 | NoReturn, | |||
|
31 | Optional, | |||
|
32 | Sequence, | |||
|
33 | Tuple, | |||
|
34 | Union, | |||
|
35 | ) | |||
|
36 | ||||
22 | from .i18n import _ |
|
37 | from .i18n import _ | |
23 | from .pycompat import ( |
|
38 | from .pycompat import ( | |
24 | getattr, |
|
39 | getattr, | |
@@ -44,7 +59,7 b' except AttributeError:' | |||||
44 | # vaguely unix-like but don't have hardlink support. For those |
|
59 | # vaguely unix-like but don't have hardlink support. For those | |
45 | # poor souls, just say we tried and that it failed so we fall back |
|
60 | # poor souls, just say we tried and that it failed so we fall back | |
46 | # to copies. |
|
61 | # to copies. | |
47 | def oslink(src, dst): |
|
62 | def oslink(src: bytes, dst: bytes) -> NoReturn: | |
48 | raise OSError( |
|
63 | raise OSError( | |
49 | errno.EINVAL, b'hardlinks not supported: %s to %s' % (src, dst) |
|
64 | errno.EINVAL, b'hardlinks not supported: %s to %s' % (src, dst) | |
50 | ) |
|
65 | ) | |
@@ -54,15 +69,47 b' readlink = os.readlink' | |||||
54 | unlink = os.unlink |
|
69 | unlink = os.unlink | |
55 | rename = os.rename |
|
70 | rename = os.rename | |
56 | removedirs = os.removedirs |
|
71 | removedirs = os.removedirs | |
57 | expandglobs = False |
|
72 | ||
|
73 | if typing.TYPE_CHECKING: | |||
|
74 | # Replace the various overloads that come along with aliasing stdlib methods | |||
|
75 | # with the narrow definition that we care about in the type checking phase | |||
|
76 | # only. This ensures that both Windows and POSIX see only the definition | |||
|
77 | # that is actually available. | |||
|
78 | # | |||
|
79 | # Note that if we check pycompat.TYPE_CHECKING here, it is always False, and | |||
|
80 | # the methods aren't replaced. | |||
|
81 | ||||
|
82 | def normpath(path: bytes) -> bytes: | |||
|
83 | raise NotImplementedError | |||
|
84 | ||||
|
85 | def abspath(path: AnyStr) -> AnyStr: | |||
|
86 | raise NotImplementedError | |||
58 |
|
87 | |||
59 | umask = os.umask(0) |
|
88 | def oslink(src: bytes, dst: bytes) -> None: | |
|
89 | raise NotImplementedError | |||
|
90 | ||||
|
91 | def readlink(path: bytes) -> bytes: | |||
|
92 | raise NotImplementedError | |||
|
93 | ||||
|
94 | def unlink(path: bytes) -> None: | |||
|
95 | raise NotImplementedError | |||
|
96 | ||||
|
97 | def rename(src: bytes, dst: bytes) -> None: | |||
|
98 | raise NotImplementedError | |||
|
99 | ||||
|
100 | def removedirs(name: bytes) -> None: | |||
|
101 | raise NotImplementedError | |||
|
102 | ||||
|
103 | ||||
|
104 | expandglobs: bool = False | |||
|
105 | ||||
|
106 | umask: int = os.umask(0) | |||
60 | os.umask(umask) |
|
107 | os.umask(umask) | |
61 |
|
108 | |||
62 | posixfile = open |
|
109 | posixfile = open | |
63 |
|
110 | |||
64 |
|
111 | |||
65 | def split(p): |
|
112 | def split(p: bytes) -> Tuple[bytes, bytes]: | |
66 | """Same as posixpath.split, but faster |
|
113 | """Same as posixpath.split, but faster | |
67 |
|
114 | |||
68 | >>> import posixpath |
|
115 | >>> import posixpath | |
@@ -85,17 +132,17 b' def split(p):' | |||||
85 | return ht[0] + b'/', ht[1] |
|
132 | return ht[0] + b'/', ht[1] | |
86 |
|
133 | |||
87 |
|
134 | |||
88 | def openhardlinks(): |
|
135 | def openhardlinks() -> bool: | |
89 | '''return true if it is safe to hold open file handles to hardlinks''' |
|
136 | '''return true if it is safe to hold open file handles to hardlinks''' | |
90 | return True |
|
137 | return True | |
91 |
|
138 | |||
92 |
|
139 | |||
93 | def nlinks(name): |
|
140 | def nlinks(name: bytes) -> int: | |
94 | '''return number of hardlinks for the given file''' |
|
141 | '''return number of hardlinks for the given file''' | |
95 | return os.lstat(name).st_nlink |
|
142 | return os.lstat(name).st_nlink | |
96 |
|
143 | |||
97 |
|
144 | |||
98 | def parsepatchoutput(output_line): |
|
145 | def parsepatchoutput(output_line: bytes) -> bytes: | |
99 | """parses the output produced by patch and returns the filename""" |
|
146 | """parses the output produced by patch and returns the filename""" | |
100 | pf = output_line[14:] |
|
147 | pf = output_line[14:] | |
101 | if pycompat.sysplatform == b'OpenVMS': |
|
148 | if pycompat.sysplatform == b'OpenVMS': | |
@@ -107,7 +154,9 b' def parsepatchoutput(output_line):' | |||||
107 | return pf |
|
154 | return pf | |
108 |
|
155 | |||
109 |
|
156 | |||
110 | def sshargs(sshcmd, host, user, port): |
|
157 | def sshargs( | |
|
158 | sshcmd: bytes, host: bytes, user: Optional[bytes], port: Optional[bytes] | |||
|
159 | ) -> bytes: | |||
111 | '''Build argument list for ssh''' |
|
160 | '''Build argument list for ssh''' | |
112 | args = user and (b"%s@%s" % (user, host)) or host |
|
161 | args = user and (b"%s@%s" % (user, host)) or host | |
113 | if b'-' in args[:1]: |
|
162 | if b'-' in args[:1]: | |
@@ -120,12 +169,12 b' def sshargs(sshcmd, host, user, port):' | |||||
120 | return args |
|
169 | return args | |
121 |
|
170 | |||
122 |
|
171 | |||
123 | def isexec(f): |
|
172 | def isexec(f: bytes) -> bool: | |
124 | """check whether a file is executable""" |
|
173 | """check whether a file is executable""" | |
125 | return os.lstat(f).st_mode & 0o100 != 0 |
|
174 | return os.lstat(f).st_mode & 0o100 != 0 | |
126 |
|
175 | |||
127 |
|
176 | |||
128 | def setflags(f, l, x): |
|
177 | def setflags(f: bytes, l: bool, x: bool) -> None: | |
129 | st = os.lstat(f) |
|
178 | st = os.lstat(f) | |
130 | s = st.st_mode |
|
179 | s = st.st_mode | |
131 | if l: |
|
180 | if l: | |
@@ -169,7 +218,12 b' def setflags(f, l, x):' | |||||
169 | os.chmod(f, s & 0o666) |
|
218 | os.chmod(f, s & 0o666) | |
170 |
|
219 | |||
171 |
|
220 | |||
172 | def copymode(src, dst, mode=None, enforcewritable=False): |
|
221 | def copymode( | |
|
222 | src: bytes, | |||
|
223 | dst: bytes, | |||
|
224 | mode: Optional[bytes] = None, | |||
|
225 | enforcewritable: bool = False, | |||
|
226 | ) -> None: | |||
173 | """Copy the file mode from the file at path src to dst. |
|
227 | """Copy the file mode from the file at path src to dst. | |
174 | If src doesn't exist, we're using mode instead. If mode is None, we're |
|
228 | If src doesn't exist, we're using mode instead. If mode is None, we're | |
175 | using umask.""" |
|
229 | using umask.""" | |
@@ -189,7 +243,7 b' def copymode(src, dst, mode=None, enforc' | |||||
189 | os.chmod(dst, new_mode) |
|
243 | os.chmod(dst, new_mode) | |
190 |
|
244 | |||
191 |
|
245 | |||
192 | def checkexec(path): |
|
246 | def checkexec(path: bytes) -> bool: | |
193 | """ |
|
247 | """ | |
194 | Check whether the given path is on a filesystem with UNIX-like exec flags |
|
248 | Check whether the given path is on a filesystem with UNIX-like exec flags | |
195 |
|
249 | |||
@@ -230,7 +284,7 b' def checkexec(path):' | |||||
230 | else: |
|
284 | else: | |
231 | # checkisexec exists, check if it actually is exec |
|
285 | # checkisexec exists, check if it actually is exec | |
232 | if m & EXECFLAGS != 0: |
|
286 | if m & EXECFLAGS != 0: | |
233 |
# ensure check |
|
287 | # ensure checknoexec exists, check it isn't exec | |
234 | try: |
|
288 | try: | |
235 | m = os.stat(checknoexec).st_mode |
|
289 | m = os.stat(checknoexec).st_mode | |
236 | except FileNotFoundError: |
|
290 | except FileNotFoundError: | |
@@ -269,7 +323,7 b' def checkexec(path):' | |||||
269 | return False |
|
323 | return False | |
270 |
|
324 | |||
271 |
|
325 | |||
272 | def checklink(path): |
|
326 | def checklink(path: bytes) -> bool: | |
273 | """check whether the given path is on a symlink-capable filesystem""" |
|
327 | """check whether the given path is on a symlink-capable filesystem""" | |
274 | # mktemp is not racy because symlink creation will fail if the |
|
328 | # mktemp is not racy because symlink creation will fail if the | |
275 | # file already exists |
|
329 | # file already exists | |
@@ -334,13 +388,13 b' def checklink(path):' | |||||
334 | return False |
|
388 | return False | |
335 |
|
389 | |||
336 |
|
390 | |||
337 | def checkosfilename(path): |
|
391 | def checkosfilename(path: bytes) -> Optional[bytes]: | |
338 | """Check that the base-relative path is a valid filename on this platform. |
|
392 | """Check that the base-relative path is a valid filename on this platform. | |
339 | Returns None if the path is ok, or a UI string describing the problem.""" |
|
393 | Returns None if the path is ok, or a UI string describing the problem.""" | |
340 | return None # on posix platforms, every path is ok |
|
394 | return None # on posix platforms, every path is ok | |
341 |
|
395 | |||
342 |
|
396 | |||
343 | def getfsmountpoint(dirpath): |
|
397 | def getfsmountpoint(dirpath: bytes) -> Optional[bytes]: | |
344 | """Get the filesystem mount point from a directory (best-effort) |
|
398 | """Get the filesystem mount point from a directory (best-effort) | |
345 |
|
399 | |||
346 | Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc. |
|
400 | Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc. | |
@@ -348,7 +402,7 b' def getfsmountpoint(dirpath):' | |||||
348 | return getattr(osutil, 'getfsmountpoint', lambda x: None)(dirpath) |
|
402 | return getattr(osutil, 'getfsmountpoint', lambda x: None)(dirpath) | |
349 |
|
403 | |||
350 |
|
404 | |||
351 | def getfstype(dirpath): |
|
405 | def getfstype(dirpath: bytes) -> Optional[bytes]: | |
352 | """Get the filesystem type name from a directory (best-effort) |
|
406 | """Get the filesystem type name from a directory (best-effort) | |
353 |
|
407 | |||
354 | Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc. |
|
408 | Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc. | |
@@ -356,29 +410,29 b' def getfstype(dirpath):' | |||||
356 | return getattr(osutil, 'getfstype', lambda x: None)(dirpath) |
|
410 | return getattr(osutil, 'getfstype', lambda x: None)(dirpath) | |
357 |
|
411 | |||
358 |
|
412 | |||
359 | def get_password(): |
|
413 | def get_password() -> bytes: | |
360 | return encoding.strtolocal(getpass.getpass('')) |
|
414 | return encoding.strtolocal(getpass.getpass('')) | |
361 |
|
415 | |||
362 |
|
416 | |||
363 | def setbinary(fd): |
|
417 | def setbinary(fd) -> None: | |
364 | pass |
|
418 | pass | |
365 |
|
419 | |||
366 |
|
420 | |||
367 | def pconvert(path): |
|
421 | def pconvert(path: bytes) -> bytes: | |
368 | return path |
|
422 | return path | |
369 |
|
423 | |||
370 |
|
424 | |||
371 | def localpath(path): |
|
425 | def localpath(path: bytes) -> bytes: | |
372 | return path |
|
426 | return path | |
373 |
|
427 | |||
374 |
|
428 | |||
375 | def samefile(fpath1, fpath2): |
|
429 | def samefile(fpath1: bytes, fpath2: bytes) -> bool: | |
376 | """Returns whether path1 and path2 refer to the same file. This is only |
|
430 | """Returns whether path1 and path2 refer to the same file. This is only | |
377 | guaranteed to work for files, not directories.""" |
|
431 | guaranteed to work for files, not directories.""" | |
378 | return os.path.samefile(fpath1, fpath2) |
|
432 | return os.path.samefile(fpath1, fpath2) | |
379 |
|
433 | |||
380 |
|
434 | |||
381 | def samedevice(fpath1, fpath2): |
|
435 | def samedevice(fpath1: bytes, fpath2: bytes) -> bool: | |
382 | """Returns whether fpath1 and fpath2 are on the same device. This is only |
|
436 | """Returns whether fpath1 and fpath2 are on the same device. This is only | |
383 | guaranteed to work for files, not directories.""" |
|
437 | guaranteed to work for files, not directories.""" | |
384 | st1 = os.lstat(fpath1) |
|
438 | st1 = os.lstat(fpath1) | |
@@ -387,18 +441,18 b' def samedevice(fpath1, fpath2):' | |||||
387 |
|
441 | |||
388 |
|
442 | |||
389 | # os.path.normcase is a no-op, which doesn't help us on non-native filesystems |
|
443 | # os.path.normcase is a no-op, which doesn't help us on non-native filesystems | |
390 | def normcase(path): |
|
444 | def normcase(path: bytes) -> bytes: | |
391 | return path.lower() |
|
445 | return path.lower() | |
392 |
|
446 | |||
393 |
|
447 | |||
394 | # what normcase does to ASCII strings |
|
448 | # what normcase does to ASCII strings | |
395 | normcasespec = encoding.normcasespecs.lower |
|
449 | normcasespec: int = encoding.normcasespecs.lower | |
396 | # fallback normcase function for non-ASCII strings |
|
450 | # fallback normcase function for non-ASCII strings | |
397 | normcasefallback = normcase |
|
451 | normcasefallback = normcase | |
398 |
|
452 | |||
399 | if pycompat.isdarwin: |
|
453 | if pycompat.isdarwin: | |
400 |
|
454 | |||
401 | def normcase(path): |
|
455 | def normcase(path: bytes) -> bytes: | |
402 | """ |
|
456 | """ | |
403 | Normalize a filename for OS X-compatible comparison: |
|
457 | Normalize a filename for OS X-compatible comparison: | |
404 | - escape-encode invalid characters |
|
458 | - escape-encode invalid characters | |
@@ -423,7 +477,7 b' if pycompat.isdarwin:' | |||||
423 |
|
477 | |||
424 | normcasespec = encoding.normcasespecs.lower |
|
478 | normcasespec = encoding.normcasespecs.lower | |
425 |
|
479 | |||
426 | def normcasefallback(path): |
|
480 | def normcasefallback(path: bytes) -> bytes: | |
427 | try: |
|
481 | try: | |
428 | u = path.decode('utf-8') |
|
482 | u = path.decode('utf-8') | |
429 | except UnicodeDecodeError: |
|
483 | except UnicodeDecodeError: | |
@@ -464,7 +518,7 b" if pycompat.sysplatform == b'cygwin':" | |||||
464 | ) |
|
518 | ) | |
465 |
|
519 | |||
466 | # use upper-ing as normcase as same as NTFS workaround |
|
520 | # use upper-ing as normcase as same as NTFS workaround | |
467 | def normcase(path): |
|
521 | def normcase(path: bytes) -> bytes: | |
468 | pathlen = len(path) |
|
522 | pathlen = len(path) | |
469 | if (pathlen == 0) or (path[0] != pycompat.ossep): |
|
523 | if (pathlen == 0) or (path[0] != pycompat.ossep): | |
470 | # treat as relative |
|
524 | # treat as relative | |
@@ -490,20 +544,20 b" if pycompat.sysplatform == b'cygwin':" | |||||
490 | # but these translations are not supported by native |
|
544 | # but these translations are not supported by native | |
491 | # tools, so the exec bit tends to be set erroneously. |
|
545 | # tools, so the exec bit tends to be set erroneously. | |
492 | # Therefore, disable executable bit access on Cygwin. |
|
546 | # Therefore, disable executable bit access on Cygwin. | |
493 | def checkexec(path): |
|
547 | def checkexec(path: bytes) -> bool: | |
494 | return False |
|
548 | return False | |
495 |
|
549 | |||
496 | # Similarly, Cygwin's symlink emulation is likely to create |
|
550 | # Similarly, Cygwin's symlink emulation is likely to create | |
497 | # problems when Mercurial is used from both Cygwin and native |
|
551 | # problems when Mercurial is used from both Cygwin and native | |
498 | # Windows, with other native tools, or on shared volumes |
|
552 | # Windows, with other native tools, or on shared volumes | |
499 | def checklink(path): |
|
553 | def checklink(path: bytes) -> bool: | |
500 | return False |
|
554 | return False | |
501 |
|
555 | |||
502 |
|
556 | |||
503 | _needsshellquote = None |
|
557 | _needsshellquote: Optional[Match[bytes]] = None | |
504 |
|
558 | |||
505 |
|
559 | |||
506 | def shellquote(s): |
|
560 | def shellquote(s: bytes) -> bytes: | |
507 | if pycompat.sysplatform == b'OpenVMS': |
|
561 | if pycompat.sysplatform == b'OpenVMS': | |
508 | return b'"%s"' % s |
|
562 | return b'"%s"' % s | |
509 | global _needsshellquote |
|
563 | global _needsshellquote | |
@@ -516,12 +570,12 b' def shellquote(s):' | |||||
516 | return b"'%s'" % s.replace(b"'", b"'\\''") |
|
570 | return b"'%s'" % s.replace(b"'", b"'\\''") | |
517 |
|
571 | |||
518 |
|
572 | |||
519 | def shellsplit(s): |
|
573 | def shellsplit(s: bytes) -> List[bytes]: | |
520 | """Parse a command string in POSIX shell way (best-effort)""" |
|
574 | """Parse a command string in POSIX shell way (best-effort)""" | |
521 | return pycompat.shlexsplit(s, posix=True) |
|
575 | return pycompat.shlexsplit(s, posix=True) | |
522 |
|
576 | |||
523 |
|
577 | |||
524 | def testpid(pid): |
|
578 | def testpid(pid: int) -> bool: | |
525 | '''return False if pid dead, True if running or not sure''' |
|
579 | '''return False if pid dead, True if running or not sure''' | |
526 | if pycompat.sysplatform == b'OpenVMS': |
|
580 | if pycompat.sysplatform == b'OpenVMS': | |
527 | return True |
|
581 | return True | |
@@ -532,12 +586,12 b' def testpid(pid):' | |||||
532 | return inst.errno != errno.ESRCH |
|
586 | return inst.errno != errno.ESRCH | |
533 |
|
587 | |||
534 |
|
588 | |||
535 | def isowner(st): |
|
589 | def isowner(st: os.stat_result) -> bool: | |
536 | """Return True if the stat object st is from the current user.""" |
|
590 | """Return True if the stat object st is from the current user.""" | |
537 | return st.st_uid == os.getuid() |
|
591 | return st.st_uid == os.getuid() | |
538 |
|
592 | |||
539 |
|
593 | |||
540 | def findexe(command): |
|
594 | def findexe(command: bytes) -> Optional[bytes]: | |
541 | """Find executable for command searching like which does. |
|
595 | """Find executable for command searching like which does. | |
542 | If command is a basename then PATH is searched for command. |
|
596 | If command is a basename then PATH is searched for command. | |
543 | PATH isn't searched if command is an absolute or relative path. |
|
597 | PATH isn't searched if command is an absolute or relative path. | |
@@ -545,7 +599,7 b' def findexe(command):' | |||||
545 | if pycompat.sysplatform == b'OpenVMS': |
|
599 | if pycompat.sysplatform == b'OpenVMS': | |
546 | return command |
|
600 | return command | |
547 |
|
601 | |||
548 | def findexisting(executable): |
|
602 | def findexisting(executable: bytes) -> Optional[bytes]: | |
549 | b'Will return executable if existing file' |
|
603 | b'Will return executable if existing file' | |
550 | if os.path.isfile(executable) and os.access(executable, os.X_OK): |
|
604 | if os.path.isfile(executable) and os.access(executable, os.X_OK): | |
551 | return executable |
|
605 | return executable | |
@@ -564,14 +618,14 b' def findexe(command):' | |||||
564 | return None |
|
618 | return None | |
565 |
|
619 | |||
566 |
|
620 | |||
567 | def setsignalhandler(): |
|
621 | def setsignalhandler() -> None: | |
568 | pass |
|
622 | pass | |
569 |
|
623 | |||
570 |
|
624 | |||
571 | _wantedkinds = {stat.S_IFREG, stat.S_IFLNK} |
|
625 | _wantedkinds = {stat.S_IFREG, stat.S_IFLNK} | |
572 |
|
626 | |||
573 |
|
627 | |||
574 | def statfiles(files): |
|
628 | def statfiles(files: Sequence[bytes]) -> Iterator[Optional[os.stat_result]]: | |
575 | """Stat each file in files. Yield each stat, or None if a file does not |
|
629 | """Stat each file in files. Yield each stat, or None if a file does not | |
576 | exist or has a type we don't care about.""" |
|
630 | exist or has a type we don't care about.""" | |
577 | lstat = os.lstat |
|
631 | lstat = os.lstat | |
@@ -586,12 +640,12 b' def statfiles(files):' | |||||
586 | yield st |
|
640 | yield st | |
587 |
|
641 | |||
588 |
|
642 | |||
589 | def getuser(): |
|
643 | def getuser() -> bytes: | |
590 | '''return name of current user''' |
|
644 | '''return name of current user''' | |
591 | return pycompat.fsencode(getpass.getuser()) |
|
645 | return pycompat.fsencode(getpass.getuser()) | |
592 |
|
646 | |||
593 |
|
647 | |||
594 | def username(uid=None): |
|
648 | def username(uid: Optional[int] = None) -> Optional[bytes]: | |
595 | """Return the name of the user with the given uid. |
|
649 | """Return the name of the user with the given uid. | |
596 |
|
650 | |||
597 | If uid is None, return the name of the current user.""" |
|
651 | If uid is None, return the name of the current user.""" | |
@@ -604,7 +658,7 b' def username(uid=None):' | |||||
604 | return b'%d' % uid |
|
658 | return b'%d' % uid | |
605 |
|
659 | |||
606 |
|
660 | |||
607 | def groupname(gid=None): |
|
661 | def groupname(gid: Optional[int] = None) -> Optional[bytes]: | |
608 | """Return the name of the group with the given gid. |
|
662 | """Return the name of the group with the given gid. | |
609 |
|
663 | |||
610 | If gid is None, return the name of the current group.""" |
|
664 | If gid is None, return the name of the current group.""" | |
@@ -617,7 +671,7 b' def groupname(gid=None):' | |||||
617 | return pycompat.bytestr(gid) |
|
671 | return pycompat.bytestr(gid) | |
618 |
|
672 | |||
619 |
|
673 | |||
620 | def groupmembers(name): |
|
674 | def groupmembers(name: bytes) -> List[bytes]: | |
621 | """Return the list of members of the group with the given |
|
675 | """Return the list of members of the group with the given | |
622 | name, KeyError if the group does not exist. |
|
676 | name, KeyError if the group does not exist. | |
623 | """ |
|
677 | """ | |
@@ -625,23 +679,27 b' def groupmembers(name):' | |||||
625 | return pycompat.rapply(pycompat.fsencode, list(grp.getgrnam(name).gr_mem)) |
|
679 | return pycompat.rapply(pycompat.fsencode, list(grp.getgrnam(name).gr_mem)) | |
626 |
|
680 | |||
627 |
|
681 | |||
628 | def spawndetached(args): |
|
682 | def spawndetached(args: List[bytes]) -> int: | |
629 | return os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0), args[0], args) |
|
683 | return os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0), args[0], args) | |
630 |
|
684 | |||
631 |
|
685 | |||
632 | def gethgcmd(): |
|
686 | def gethgcmd(): # TODO: convert to bytes, like on Windows? | |
633 | return sys.argv[:1] |
|
687 | return sys.argv[:1] | |
634 |
|
688 | |||
635 |
|
689 | |||
636 | def makedir(path, notindexed): |
|
690 | def makedir(path: bytes, notindexed: bool) -> None: | |
637 | os.mkdir(path) |
|
691 | os.mkdir(path) | |
638 |
|
692 | |||
639 |
|
693 | |||
640 | def lookupreg(key, name=None, scope=None): |
|
694 | def lookupreg( | |
|
695 | key: bytes, | |||
|
696 | name: Optional[bytes] = None, | |||
|
697 | scope: Optional[Union[int, Iterable[int]]] = None, | |||
|
698 | ) -> Optional[bytes]: | |||
641 | return None |
|
699 | return None | |
642 |
|
700 | |||
643 |
|
701 | |||
644 | def hidewindow(): |
|
702 | def hidewindow() -> None: | |
645 | """Hide current shell window. |
|
703 | """Hide current shell window. | |
646 |
|
704 | |||
647 | Used to hide the window opened when starting asynchronous |
|
705 | Used to hide the window opened when starting asynchronous | |
@@ -651,15 +709,15 b' def hidewindow():' | |||||
651 |
|
709 | |||
652 |
|
710 | |||
653 | class cachestat: |
|
711 | class cachestat: | |
654 | def __init__(self, path): |
|
712 | def __init__(self, path: bytes) -> None: | |
655 | self.stat = os.stat(path) |
|
713 | self.stat = os.stat(path) | |
656 |
|
714 | |||
657 | def cacheable(self): |
|
715 | def cacheable(self) -> bool: | |
658 | return bool(self.stat.st_ino) |
|
716 | return bool(self.stat.st_ino) | |
659 |
|
717 | |||
660 | __hash__ = object.__hash__ |
|
718 | __hash__ = object.__hash__ | |
661 |
|
719 | |||
662 | def __eq__(self, other): |
|
720 | def __eq__(self, other: Any) -> bool: | |
663 | try: |
|
721 | try: | |
664 | # Only dev, ino, size, mtime and atime are likely to change. Out |
|
722 | # Only dev, ino, size, mtime and atime are likely to change. Out | |
665 | # of these, we shouldn't compare atime but should compare the |
|
723 | # of these, we shouldn't compare atime but should compare the | |
@@ -680,18 +738,18 b' class cachestat:' | |||||
680 | except AttributeError: |
|
738 | except AttributeError: | |
681 | return False |
|
739 | return False | |
682 |
|
740 | |||
683 | def __ne__(self, other): |
|
741 | def __ne__(self, other: Any) -> bool: | |
684 | return not self == other |
|
742 | return not self == other | |
685 |
|
743 | |||
686 |
|
744 | |||
687 | def statislink(st): |
|
745 | def statislink(st: Optional[os.stat_result]) -> bool: | |
688 | '''check whether a stat result is a symlink''' |
|
746 | '''check whether a stat result is a symlink''' | |
689 |
return |
|
747 | return stat.S_ISLNK(st.st_mode) if st else False | |
690 |
|
748 | |||
691 |
|
749 | |||
692 | def statisexec(st): |
|
750 | def statisexec(st: Optional[os.stat_result]) -> bool: | |
693 | '''check whether a stat result is an executable file''' |
|
751 | '''check whether a stat result is an executable file''' | |
694 |
return |
|
752 | return (st.st_mode & 0o100 != 0) if st else False | |
695 |
|
753 | |||
696 |
|
754 | |||
697 | def poll(fds): |
|
755 | def poll(fds): | |
@@ -708,7 +766,7 b' def poll(fds):' | |||||
708 | return sorted(list(set(sum(res, [])))) |
|
766 | return sorted(list(set(sum(res, [])))) | |
709 |
|
767 | |||
710 |
|
768 | |||
711 | def readpipe(pipe): |
|
769 | def readpipe(pipe) -> bytes: | |
712 | """Read all available data from a pipe.""" |
|
770 | """Read all available data from a pipe.""" | |
713 | # We can't fstat() a pipe because Linux will always report 0. |
|
771 | # We can't fstat() a pipe because Linux will always report 0. | |
714 | # So, we set the pipe to non-blocking mode and read everything |
|
772 | # So, we set the pipe to non-blocking mode and read everything | |
@@ -733,7 +791,7 b' def readpipe(pipe):' | |||||
733 | fcntl.fcntl(pipe, fcntl.F_SETFL, oldflags) |
|
791 | fcntl.fcntl(pipe, fcntl.F_SETFL, oldflags) | |
734 |
|
792 | |||
735 |
|
793 | |||
736 | def bindunixsocket(sock, path): |
|
794 | def bindunixsocket(sock, path: bytes) -> None: | |
737 | """Bind the UNIX domain socket to the specified path""" |
|
795 | """Bind the UNIX domain socket to the specified path""" | |
738 | # use relative path instead of full path at bind() if possible, since |
|
796 | # use relative path instead of full path at bind() if possible, since | |
739 | # AF_UNIX path has very small length limit (107 chars) on common |
|
797 | # AF_UNIX path has very small length limit (107 chars) on common |
@@ -10,8 +10,13 b' import difflib' | |||||
10 | import re |
|
10 | import re | |
11 | import struct |
|
11 | import struct | |
12 |
|
12 | |||
|
13 | from typing import ( | |||
|
14 | List, | |||
|
15 | Tuple, | |||
|
16 | ) | |||
13 |
|
17 | |||
14 | def splitnewlines(text): |
|
18 | ||
|
19 | def splitnewlines(text: bytes) -> List[bytes]: | |||
15 | '''like str.splitlines, but only split on newlines.''' |
|
20 | '''like str.splitlines, but only split on newlines.''' | |
16 | lines = [l + b'\n' for l in text.split(b'\n')] |
|
21 | lines = [l + b'\n' for l in text.split(b'\n')] | |
17 | if lines: |
|
22 | if lines: | |
@@ -22,7 +27,9 b' def splitnewlines(text):' | |||||
22 | return lines |
|
27 | return lines | |
23 |
|
28 | |||
24 |
|
29 | |||
25 |
def _normalizeblocks( |
|
30 | def _normalizeblocks( | |
|
31 | a: List[bytes], b: List[bytes], blocks | |||
|
32 | ) -> List[Tuple[int, int, int]]: | |||
26 | prev = None |
|
33 | prev = None | |
27 | r = [] |
|
34 | r = [] | |
28 | for curr in blocks: |
|
35 | for curr in blocks: | |
@@ -57,7 +64,7 b' def _normalizeblocks(a, b, blocks):' | |||||
57 | return r |
|
64 | return r | |
58 |
|
65 | |||
59 |
|
66 | |||
60 | def bdiff(a, b): |
|
67 | def bdiff(a: bytes, b: bytes) -> bytes: | |
61 | a = bytes(a).splitlines(True) |
|
68 | a = bytes(a).splitlines(True) | |
62 | b = bytes(b).splitlines(True) |
|
69 | b = bytes(b).splitlines(True) | |
63 |
|
70 | |||
@@ -84,7 +91,7 b' def bdiff(a, b):' | |||||
84 | return b"".join(bin) |
|
91 | return b"".join(bin) | |
85 |
|
92 | |||
86 |
|
93 | |||
87 | def blocks(a, b): |
|
94 | def blocks(a: bytes, b: bytes) -> List[Tuple[int, int, int, int]]: | |
88 | an = splitnewlines(a) |
|
95 | an = splitnewlines(a) | |
89 | bn = splitnewlines(b) |
|
96 | bn = splitnewlines(b) | |
90 | d = difflib.SequenceMatcher(None, an, bn).get_matching_blocks() |
|
97 | d = difflib.SequenceMatcher(None, an, bn).get_matching_blocks() | |
@@ -92,7 +99,7 b' def blocks(a, b):' | |||||
92 | return [(i, i + n, j, j + n) for (i, j, n) in d] |
|
99 | return [(i, i + n, j, j + n) for (i, j, n) in d] | |
93 |
|
100 | |||
94 |
|
101 | |||
95 | def fixws(text, allws): |
|
102 | def fixws(text: bytes, allws: bool) -> bytes: | |
96 | if allws: |
|
103 | if allws: | |
97 | text = re.sub(b'[ \t\r]+', b'', text) |
|
104 | text = re.sub(b'[ \t\r]+', b'', text) | |
98 | else: |
|
105 | else: |
@@ -9,6 +9,11 b'' | |||||
9 | import io |
|
9 | import io | |
10 | import struct |
|
10 | import struct | |
11 |
|
11 | |||
|
12 | from typing import ( | |||
|
13 | List, | |||
|
14 | Tuple, | |||
|
15 | ) | |||
|
16 | ||||
12 |
|
17 | |||
13 | stringio = io.BytesIO |
|
18 | stringio = io.BytesIO | |
14 |
|
19 | |||
@@ -28,7 +33,9 b' class mpatchError(Exception):' | |||||
28 | # temporary string buffers. |
|
33 | # temporary string buffers. | |
29 |
|
34 | |||
30 |
|
35 | |||
31 | def _pull(dst, src, l): # pull l bytes from src |
|
36 | def _pull( | |
|
37 | dst: List[Tuple[int, int]], src: List[Tuple[int, int]], l: int | |||
|
38 | ) -> None: # pull l bytes from src | |||
32 | while l: |
|
39 | while l: | |
33 | f = src.pop() |
|
40 | f = src.pop() | |
34 | if f[0] > l: # do we need to split? |
|
41 | if f[0] > l: # do we need to split? | |
@@ -39,7 +46,7 b' def _pull(dst, src, l): # pull l bytes ' | |||||
39 | l -= f[0] |
|
46 | l -= f[0] | |
40 |
|
47 | |||
41 |
|
48 | |||
42 | def _move(m, dest, src, count): |
|
49 | def _move(m: stringio, dest: int, src: int, count: int) -> None: | |
43 | """move count bytes from src to dest |
|
50 | """move count bytes from src to dest | |
44 |
|
51 | |||
45 | The file pointer is left at the end of dest. |
|
52 | The file pointer is left at the end of dest. | |
@@ -50,7 +57,9 b' def _move(m, dest, src, count):' | |||||
50 | m.write(buf) |
|
57 | m.write(buf) | |
51 |
|
58 | |||
52 |
|
59 | |||
53 |
def _collect( |
|
60 | def _collect( | |
|
61 | m: stringio, buf: int, list: List[Tuple[int, int]] | |||
|
62 | ) -> Tuple[int, int]: | |||
54 | start = buf |
|
63 | start = buf | |
55 | for l, p in reversed(list): |
|
64 | for l, p in reversed(list): | |
56 | _move(m, buf, p, l) |
|
65 | _move(m, buf, p, l) | |
@@ -58,7 +67,7 b' def _collect(m, buf, list):' | |||||
58 | return (buf - start, start) |
|
67 | return (buf - start, start) | |
59 |
|
68 | |||
60 |
|
69 | |||
61 | def patches(a, bins): |
|
70 | def patches(a: bytes, bins: List[bytes]) -> bytes: | |
62 | if not bins: |
|
71 | if not bins: | |
63 | return a |
|
72 | return a | |
64 |
|
73 | |||
@@ -111,7 +120,7 b' def patches(a, bins):' | |||||
111 | return m.read(t[0]) |
|
120 | return m.read(t[0]) | |
112 |
|
121 | |||
113 |
|
122 | |||
114 | def patchedsize(orig, delta): |
|
123 | def patchedsize(orig: int, delta: bytes) -> int: | |
115 | outlen, last, bin = 0, 0, 0 |
|
124 | outlen, last, bin = 0, 0, 0 | |
116 | binend = len(delta) |
|
125 | binend = len(delta) | |
117 | data = 12 |
|
126 | data = 12 |
@@ -435,6 +435,11 b' class DirstateItem:' | |||||
435 | return self._wc_tracked and not (self._p1_tracked or self._p2_info) |
|
435 | return self._wc_tracked and not (self._p1_tracked or self._p2_info) | |
436 |
|
436 | |||
437 | @property |
|
437 | @property | |
|
438 | def modified(self): | |||
|
439 | """True if the file has been modified""" | |||
|
440 | return self._wc_tracked and self._p1_tracked and self._p2_info | |||
|
441 | ||||
|
442 | @property | |||
438 | def maybe_clean(self): |
|
443 | def maybe_clean(self): | |
439 | """True if the file has a chance to be in the "clean" state""" |
|
444 | """True if the file has a chance to be in the "clean" state""" | |
440 | if not self._wc_tracked: |
|
445 | if not self._wc_tracked: |
@@ -28,6 +28,24 b' import sys' | |||||
28 | import tempfile |
|
28 | import tempfile | |
29 | import xmlrpc.client as xmlrpclib |
|
29 | import xmlrpc.client as xmlrpclib | |
30 |
|
30 | |||
|
31 | from typing import ( | |||
|
32 | Any, | |||
|
33 | AnyStr, | |||
|
34 | BinaryIO, | |||
|
35 | Dict, | |||
|
36 | Iterable, | |||
|
37 | Iterator, | |||
|
38 | List, | |||
|
39 | Mapping, | |||
|
40 | NoReturn, | |||
|
41 | Optional, | |||
|
42 | Sequence, | |||
|
43 | Tuple, | |||
|
44 | Type, | |||
|
45 | TypeVar, | |||
|
46 | cast, | |||
|
47 | overload, | |||
|
48 | ) | |||
31 |
|
49 | |||
32 | ispy3 = sys.version_info[0] >= 3 |
|
50 | ispy3 = sys.version_info[0] >= 3 | |
33 | ispypy = '__pypy__' in sys.builtin_module_names |
|
51 | ispypy = '__pypy__' in sys.builtin_module_names | |
@@ -38,6 +56,10 b' if not globals(): # hide this from non-' | |||||
38 |
|
56 | |||
39 | TYPE_CHECKING = typing.TYPE_CHECKING |
|
57 | TYPE_CHECKING = typing.TYPE_CHECKING | |
40 |
|
58 | |||
|
59 | _GetOptResult = Tuple[List[Tuple[bytes, bytes]], List[bytes]] | |||
|
60 | _T0 = TypeVar('_T0') | |||
|
61 | _Tbytestr = TypeVar('_Tbytestr', bound='bytestr') | |||
|
62 | ||||
41 |
|
63 | |||
42 | def future_set_exception_info(f, exc_info): |
|
64 | def future_set_exception_info(f, exc_info): | |
43 | f.set_exception(exc_info[0]) |
|
65 | f.set_exception(exc_info[0]) | |
@@ -46,7 +68,7 b' def future_set_exception_info(f, exc_inf' | |||||
46 | FileNotFoundError = builtins.FileNotFoundError |
|
68 | FileNotFoundError = builtins.FileNotFoundError | |
47 |
|
69 | |||
48 |
|
70 | |||
49 | def identity(a): |
|
71 | def identity(a: _T0) -> _T0: | |
50 | return a |
|
72 | return a | |
51 |
|
73 | |||
52 |
|
74 | |||
@@ -94,21 +116,17 b" if os.name == r'nt':" | |||||
94 |
|
116 | |||
95 | fsencode = os.fsencode |
|
117 | fsencode = os.fsencode | |
96 | fsdecode = os.fsdecode |
|
118 | fsdecode = os.fsdecode | |
97 | oscurdir = os.curdir.encode('ascii') |
|
119 | oscurdir: bytes = os.curdir.encode('ascii') | |
98 | oslinesep = os.linesep.encode('ascii') |
|
120 | oslinesep: bytes = os.linesep.encode('ascii') | |
99 | osname = os.name.encode('ascii') |
|
121 | osname: bytes = os.name.encode('ascii') | |
100 | ospathsep = os.pathsep.encode('ascii') |
|
122 | ospathsep: bytes = os.pathsep.encode('ascii') | |
101 | ospardir = os.pardir.encode('ascii') |
|
123 | ospardir: bytes = os.pardir.encode('ascii') | |
102 | ossep = os.sep.encode('ascii') |
|
124 | ossep: bytes = os.sep.encode('ascii') | |
103 | osaltsep = os.altsep |
|
125 | osaltsep: Optional[bytes] = os.altsep.encode('ascii') if os.altsep else None | |
104 | if osaltsep: |
|
126 | osdevnull: bytes = os.devnull.encode('ascii') | |
105 | osaltsep = osaltsep.encode('ascii') |
|
|||
106 | osdevnull = os.devnull.encode('ascii') |
|
|||
107 |
|
127 | |||
108 | sysplatform = sys.platform.encode('ascii') |
|
128 | sysplatform: bytes = sys.platform.encode('ascii') | |
109 | sysexecutable = sys.executable |
|
129 | sysexecutable: bytes = os.fsencode(sys.executable) if sys.executable else b'' | |
110 | if sysexecutable: |
|
|||
111 | sysexecutable = os.fsencode(sysexecutable) |
|
|||
112 |
|
130 | |||
113 |
|
131 | |||
114 | def maplist(*args): |
|
132 | def maplist(*args): | |
@@ -128,7 +146,7 b' getargspec = inspect.getfullargspec' | |||||
128 |
|
146 | |||
129 | long = int |
|
147 | long = int | |
130 |
|
148 | |||
131 | if getattr(sys, 'argv', None) is not None: |
|
149 | if builtins.getattr(sys, 'argv', None) is not None: | |
132 | # On POSIX, the char** argv array is converted to Python str using |
|
150 | # On POSIX, the char** argv array is converted to Python str using | |
133 | # Py_DecodeLocale(). The inverse of this is Py_EncodeLocale(), which |
|
151 | # Py_DecodeLocale(). The inverse of this is Py_EncodeLocale(), which | |
134 | # isn't directly callable from Python code. In practice, os.fsencode() |
|
152 | # isn't directly callable from Python code. In practice, os.fsencode() | |
@@ -143,6 +161,7 b" if getattr(sys, 'argv', None) is not Non" | |||||
143 | # (this is how Python 2 worked). To get that, we encode with the mbcs |
|
161 | # (this is how Python 2 worked). To get that, we encode with the mbcs | |
144 | # encoding, which will pass CP_ACP to the underlying Windows API to |
|
162 | # encoding, which will pass CP_ACP to the underlying Windows API to | |
145 | # produce bytes. |
|
163 | # produce bytes. | |
|
164 | sysargv: List[bytes] = [] | |||
146 | if os.name == r'nt': |
|
165 | if os.name == r'nt': | |
147 | sysargv = [a.encode("mbcs", "ignore") for a in sys.argv] |
|
166 | sysargv = [a.encode("mbcs", "ignore") for a in sys.argv] | |
148 | else: |
|
167 | else: | |
@@ -211,38 +230,53 b' class bytestr(bytes):' | |||||
211 | # https://github.com/google/pytype/issues/500 |
|
230 | # https://github.com/google/pytype/issues/500 | |
212 | if TYPE_CHECKING: |
|
231 | if TYPE_CHECKING: | |
213 |
|
232 | |||
214 | def __init__(self, s=b''): |
|
233 | def __init__(self, s: object = b'') -> None: | |
215 | pass |
|
234 | pass | |
216 |
|
235 | |||
217 | def __new__(cls, s=b''): |
|
236 | def __new__(cls: Type[_Tbytestr], s: object = b'') -> _Tbytestr: | |
218 | if isinstance(s, bytestr): |
|
237 | if isinstance(s, bytestr): | |
219 | return s |
|
238 | return s | |
220 | if not isinstance( |
|
239 | if not isinstance( | |
221 | s, (bytes, bytearray) |
|
240 | s, (bytes, bytearray) | |
222 | ) and not hasattr( # hasattr-py3-only |
|
241 | ) and not builtins.hasattr( # hasattr-py3-only | |
223 | s, u'__bytes__' |
|
242 | s, u'__bytes__' | |
224 | ): |
|
243 | ): | |
225 | s = str(s).encode('ascii') |
|
244 | s = str(s).encode('ascii') | |
226 | return bytes.__new__(cls, s) |
|
245 | return bytes.__new__(cls, s) | |
227 |
|
246 | |||
228 | def __getitem__(self, key): |
|
247 | # The base class uses `int` return in py3, but the point of this class is to | |
|
248 | # behave like py2. | |||
|
249 | def __getitem__(self, key) -> bytes: # pytype: disable=signature-mismatch | |||
229 | s = bytes.__getitem__(self, key) |
|
250 | s = bytes.__getitem__(self, key) | |
230 | if not isinstance(s, bytes): |
|
251 | if not isinstance(s, bytes): | |
231 | s = bytechr(s) |
|
252 | s = bytechr(s) | |
232 | return s |
|
253 | return s | |
233 |
|
254 | |||
234 | def __iter__(self): |
|
255 | # The base class expects `Iterator[int]` return in py3, but the point of | |
|
256 | # this class is to behave like py2. | |||
|
257 | def __iter__(self) -> Iterator[bytes]: # pytype: disable=signature-mismatch | |||
235 | return iterbytestr(bytes.__iter__(self)) |
|
258 | return iterbytestr(bytes.__iter__(self)) | |
236 |
|
259 | |||
237 | def __repr__(self): |
|
260 | def __repr__(self) -> str: | |
238 | return bytes.__repr__(self)[1:] # drop b'' |
|
261 | return bytes.__repr__(self)[1:] # drop b'' | |
239 |
|
262 | |||
240 |
|
263 | |||
241 | def iterbytestr(s): |
|
264 | def iterbytestr(s: Iterable[int]) -> Iterator[bytes]: | |
242 | """Iterate bytes as if it were a str object of Python 2""" |
|
265 | """Iterate bytes as if it were a str object of Python 2""" | |
243 | return map(bytechr, s) |
|
266 | return map(bytechr, s) | |
244 |
|
267 | |||
245 |
|
268 | |||
|
269 | if TYPE_CHECKING: | |||
|
270 | ||||
|
271 | @overload | |||
|
272 | def maybebytestr(s: bytes) -> bytestr: | |||
|
273 | ... | |||
|
274 | ||||
|
275 | @overload | |||
|
276 | def maybebytestr(s: _T0) -> _T0: | |||
|
277 | ... | |||
|
278 | ||||
|
279 | ||||
246 | def maybebytestr(s): |
|
280 | def maybebytestr(s): | |
247 | """Promote bytes to bytestr""" |
|
281 | """Promote bytes to bytestr""" | |
248 | if isinstance(s, bytes): |
|
282 | if isinstance(s, bytes): | |
@@ -250,7 +284,7 b' def maybebytestr(s):' | |||||
250 | return s |
|
284 | return s | |
251 |
|
285 | |||
252 |
|
286 | |||
253 | def sysbytes(s): |
|
287 | def sysbytes(s: AnyStr) -> bytes: | |
254 | """Convert an internal str (e.g. keyword, __doc__) back to bytes |
|
288 | """Convert an internal str (e.g. keyword, __doc__) back to bytes | |
255 |
|
289 | |||
256 | This never raises UnicodeEncodeError, but only ASCII characters |
|
290 | This never raises UnicodeEncodeError, but only ASCII characters | |
@@ -261,7 +295,7 b' def sysbytes(s):' | |||||
261 | return s.encode('utf-8') |
|
295 | return s.encode('utf-8') | |
262 |
|
296 | |||
263 |
|
297 | |||
264 | def sysstr(s): |
|
298 | def sysstr(s: AnyStr) -> str: | |
265 | """Return a keyword str to be passed to Python functions such as |
|
299 | """Return a keyword str to be passed to Python functions such as | |
266 | getattr() and str.encode() |
|
300 | getattr() and str.encode() | |
267 |
|
301 | |||
@@ -274,29 +308,29 b' def sysstr(s):' | |||||
274 | return s.decode('latin-1') |
|
308 | return s.decode('latin-1') | |
275 |
|
309 | |||
276 |
|
310 | |||
277 | def strurl(url): |
|
311 | def strurl(url: AnyStr) -> str: | |
278 | """Converts a bytes url back to str""" |
|
312 | """Converts a bytes url back to str""" | |
279 | if isinstance(url, bytes): |
|
313 | if isinstance(url, bytes): | |
280 | return url.decode('ascii') |
|
314 | return url.decode('ascii') | |
281 | return url |
|
315 | return url | |
282 |
|
316 | |||
283 |
|
317 | |||
284 | def bytesurl(url): |
|
318 | def bytesurl(url: AnyStr) -> bytes: | |
285 | """Converts a str url to bytes by encoding in ascii""" |
|
319 | """Converts a str url to bytes by encoding in ascii""" | |
286 | if isinstance(url, str): |
|
320 | if isinstance(url, str): | |
287 | return url.encode('ascii') |
|
321 | return url.encode('ascii') | |
288 | return url |
|
322 | return url | |
289 |
|
323 | |||
290 |
|
324 | |||
291 | def raisewithtb(exc, tb): |
|
325 | def raisewithtb(exc: BaseException, tb) -> NoReturn: | |
292 | """Raise exception with the given traceback""" |
|
326 | """Raise exception with the given traceback""" | |
293 | raise exc.with_traceback(tb) |
|
327 | raise exc.with_traceback(tb) | |
294 |
|
328 | |||
295 |
|
329 | |||
296 | def getdoc(obj): |
|
330 | def getdoc(obj: object) -> Optional[bytes]: | |
297 | """Get docstring as bytes; may be None so gettext() won't confuse it |
|
331 | """Get docstring as bytes; may be None so gettext() won't confuse it | |
298 | with _('')""" |
|
332 | with _('')""" | |
299 | doc = getattr(obj, '__doc__', None) |
|
333 | doc = builtins.getattr(obj, '__doc__', None) | |
300 | if doc is None: |
|
334 | if doc is None: | |
301 | return doc |
|
335 | return doc | |
302 | return sysbytes(doc) |
|
336 | return sysbytes(doc) | |
@@ -319,14 +353,22 b' xrange = builtins.range' | |||||
319 | unicode = str |
|
353 | unicode = str | |
320 |
|
354 | |||
321 |
|
355 | |||
322 | def open(name, mode=b'r', buffering=-1, encoding=None): |
|
356 | def open( | |
|
357 | name, | |||
|
358 | mode: AnyStr = b'r', | |||
|
359 | buffering: int = -1, | |||
|
360 | encoding: Optional[str] = None, | |||
|
361 | ) -> Any: | |||
|
362 | # TODO: assert binary mode, and cast result to BinaryIO? | |||
323 | return builtins.open(name, sysstr(mode), buffering, encoding) |
|
363 | return builtins.open(name, sysstr(mode), buffering, encoding) | |
324 |
|
364 | |||
325 |
|
365 | |||
326 | safehasattr = _wrapattrfunc(builtins.hasattr) |
|
366 | safehasattr = _wrapattrfunc(builtins.hasattr) | |
327 |
|
367 | |||
328 |
|
368 | |||
329 | def _getoptbwrapper(orig, args, shortlist, namelist): |
|
369 | def _getoptbwrapper( | |
|
370 | orig, args: Sequence[bytes], shortlist: bytes, namelist: Sequence[bytes] | |||
|
371 | ) -> _GetOptResult: | |||
330 | """ |
|
372 | """ | |
331 | Takes bytes arguments, converts them to unicode, pass them to |
|
373 | Takes bytes arguments, converts them to unicode, pass them to | |
332 | getopt.getopt(), convert the returned values back to bytes and then |
|
374 | getopt.getopt(), convert the returned values back to bytes and then | |
@@ -342,7 +384,7 b' def _getoptbwrapper(orig, args, shortlis' | |||||
342 | return opts, args |
|
384 | return opts, args | |
343 |
|
385 | |||
344 |
|
386 | |||
345 | def strkwargs(dic): |
|
387 | def strkwargs(dic: Mapping[bytes, _T0]) -> Dict[str, _T0]: | |
346 | """ |
|
388 | """ | |
347 | Converts the keys of a python dictonary to str i.e. unicodes so that |
|
389 | Converts the keys of a python dictonary to str i.e. unicodes so that | |
348 | they can be passed as keyword arguments as dictionaries with bytes keys |
|
390 | they can be passed as keyword arguments as dictionaries with bytes keys | |
@@ -352,7 +394,7 b' def strkwargs(dic):' | |||||
352 | return dic |
|
394 | return dic | |
353 |
|
395 | |||
354 |
|
396 | |||
355 | def byteskwargs(dic): |
|
397 | def byteskwargs(dic: Mapping[str, _T0]) -> Dict[bytes, _T0]: | |
356 | """ |
|
398 | """ | |
357 | Converts keys of python dictionaries to bytes as they were converted to |
|
399 | Converts keys of python dictionaries to bytes as they were converted to | |
358 | str to pass that dictonary as a keyword argument on Python 3. |
|
400 | str to pass that dictonary as a keyword argument on Python 3. | |
@@ -362,7 +404,9 b' def byteskwargs(dic):' | |||||
362 |
|
404 | |||
363 |
|
405 | |||
364 | # TODO: handle shlex.shlex(). |
|
406 | # TODO: handle shlex.shlex(). | |
365 | def shlexsplit(s, comments=False, posix=True): |
|
407 | def shlexsplit( | |
|
408 | s: bytes, comments: bool = False, posix: bool = True | |||
|
409 | ) -> List[bytes]: | |||
366 | """ |
|
410 | """ | |
367 | Takes bytes argument, convert it to str i.e. unicodes, pass that into |
|
411 | Takes bytes argument, convert it to str i.e. unicodes, pass that into | |
368 | shlex.split(), convert the returned value to bytes and return that for |
|
412 | shlex.split(), convert the returned value to bytes and return that for | |
@@ -377,46 +421,59 b' itervalues = lambda x: x.values()' | |||||
377 |
|
421 | |||
378 | json_loads = json.loads |
|
422 | json_loads = json.loads | |
379 |
|
423 | |||
380 | isjython = sysplatform.startswith(b'java') |
|
424 | isjython: bool = sysplatform.startswith(b'java') | |
381 |
|
425 | |||
382 | isdarwin = sysplatform.startswith(b'darwin') |
|
426 | isdarwin: bool = sysplatform.startswith(b'darwin') | |
383 | islinux = sysplatform.startswith(b'linux') |
|
427 | islinux: bool = sysplatform.startswith(b'linux') | |
384 | isposix = osname == b'posix' |
|
428 | isposix: bool = osname == b'posix' | |
385 | iswindows = osname == b'nt' |
|
429 | iswindows: bool = osname == b'nt' | |
386 |
|
430 | |||
387 |
|
431 | |||
388 | def getoptb(args, shortlist, namelist): |
|
432 | def getoptb( | |
|
433 | args: Sequence[bytes], shortlist: bytes, namelist: Sequence[bytes] | |||
|
434 | ) -> _GetOptResult: | |||
389 | return _getoptbwrapper(getopt.getopt, args, shortlist, namelist) |
|
435 | return _getoptbwrapper(getopt.getopt, args, shortlist, namelist) | |
390 |
|
436 | |||
391 |
|
437 | |||
392 | def gnugetoptb(args, shortlist, namelist): |
|
438 | def gnugetoptb( | |
|
439 | args: Sequence[bytes], shortlist: bytes, namelist: Sequence[bytes] | |||
|
440 | ) -> _GetOptResult: | |||
393 | return _getoptbwrapper(getopt.gnu_getopt, args, shortlist, namelist) |
|
441 | return _getoptbwrapper(getopt.gnu_getopt, args, shortlist, namelist) | |
394 |
|
442 | |||
395 |
|
443 | |||
396 | def mkdtemp(suffix=b'', prefix=b'tmp', dir=None): |
|
444 | def mkdtemp( | |
|
445 | suffix: bytes = b'', prefix: bytes = b'tmp', dir: Optional[bytes] = None | |||
|
446 | ) -> bytes: | |||
397 | return tempfile.mkdtemp(suffix, prefix, dir) |
|
447 | return tempfile.mkdtemp(suffix, prefix, dir) | |
398 |
|
448 | |||
399 |
|
449 | |||
400 | # text=True is not supported; use util.from/tonativeeol() instead |
|
450 | # text=True is not supported; use util.from/tonativeeol() instead | |
401 | def mkstemp(suffix=b'', prefix=b'tmp', dir=None): |
|
451 | def mkstemp( | |
|
452 | suffix: bytes = b'', prefix: bytes = b'tmp', dir: Optional[bytes] = None | |||
|
453 | ) -> Tuple[int, bytes]: | |||
402 | return tempfile.mkstemp(suffix, prefix, dir) |
|
454 | return tempfile.mkstemp(suffix, prefix, dir) | |
403 |
|
455 | |||
404 |
|
456 | |||
405 | # TemporaryFile does not support an "encoding=" argument on python2. |
|
457 | # TemporaryFile does not support an "encoding=" argument on python2. | |
406 | # This wrapper file are always open in byte mode. |
|
458 | # This wrapper file are always open in byte mode. | |
407 | def unnamedtempfile(mode=None, *args, **kwargs): |
|
459 | def unnamedtempfile(mode: Optional[bytes] = None, *args, **kwargs) -> BinaryIO: | |
408 | if mode is None: |
|
460 | if mode is None: | |
409 | mode = 'w+b' |
|
461 | mode = 'w+b' | |
410 | else: |
|
462 | else: | |
411 | mode = sysstr(mode) |
|
463 | mode = sysstr(mode) | |
412 | assert 'b' in mode |
|
464 | assert 'b' in mode | |
413 | return tempfile.TemporaryFile(mode, *args, **kwargs) |
|
465 | return cast(BinaryIO, tempfile.TemporaryFile(mode, *args, **kwargs)) | |
414 |
|
466 | |||
415 |
|
467 | |||
416 | # NamedTemporaryFile does not support an "encoding=" argument on python2. |
|
468 | # NamedTemporaryFile does not support an "encoding=" argument on python2. | |
417 | # This wrapper file are always open in byte mode. |
|
469 | # This wrapper file are always open in byte mode. | |
418 | def namedtempfile( |
|
470 | def namedtempfile( | |
419 | mode=b'w+b', bufsize=-1, suffix=b'', prefix=b'tmp', dir=None, delete=True |
|
471 | mode: bytes = b'w+b', | |
|
472 | bufsize: int = -1, | |||
|
473 | suffix: bytes = b'', | |||
|
474 | prefix: bytes = b'tmp', | |||
|
475 | dir: Optional[bytes] = None, | |||
|
476 | delete: bool = True, | |||
420 | ): |
|
477 | ): | |
421 | mode = sysstr(mode) |
|
478 | mode = sysstr(mode) | |
422 | assert 'b' in mode |
|
479 | assert 'b' in mode |
@@ -38,12 +38,15 b' from .revlogutils.constants import (' | |||||
38 | COMP_MODE_DEFAULT, |
|
38 | COMP_MODE_DEFAULT, | |
39 | COMP_MODE_INLINE, |
|
39 | COMP_MODE_INLINE, | |
40 | COMP_MODE_PLAIN, |
|
40 | COMP_MODE_PLAIN, | |
|
41 | DELTA_BASE_REUSE_NO, | |||
|
42 | DELTA_BASE_REUSE_TRY, | |||
41 | ENTRY_RANK, |
|
43 | ENTRY_RANK, | |
42 | FEATURES_BY_VERSION, |
|
44 | FEATURES_BY_VERSION, | |
43 | FLAG_GENERALDELTA, |
|
45 | FLAG_GENERALDELTA, | |
44 | FLAG_INLINE_DATA, |
|
46 | FLAG_INLINE_DATA, | |
45 | INDEX_HEADER, |
|
47 | INDEX_HEADER, | |
46 | KIND_CHANGELOG, |
|
48 | KIND_CHANGELOG, | |
|
49 | KIND_FILELOG, | |||
47 | RANK_UNKNOWN, |
|
50 | RANK_UNKNOWN, | |
48 | REVLOGV0, |
|
51 | REVLOGV0, | |
49 | REVLOGV1, |
|
52 | REVLOGV1, | |
@@ -125,7 +128,7 b" rustrevlog = policy.importrust('revlog')" | |||||
125 | # Aliased for performance. |
|
128 | # Aliased for performance. | |
126 | _zlibdecompress = zlib.decompress |
|
129 | _zlibdecompress = zlib.decompress | |
127 |
|
130 | |||
128 | # max size of revlog with inline data |
|
131 | # max size of inline data embedded into a revlog | |
129 | _maxinline = 131072 |
|
132 | _maxinline = 131072 | |
130 |
|
133 | |||
131 | # Flag processors for REVIDX_ELLIPSIS. |
|
134 | # Flag processors for REVIDX_ELLIPSIS. | |
@@ -347,6 +350,7 b' class revlog:' | |||||
347 | self._chunkcachesize = 65536 |
|
350 | self._chunkcachesize = 65536 | |
348 | self._maxchainlen = None |
|
351 | self._maxchainlen = None | |
349 | self._deltabothparents = True |
|
352 | self._deltabothparents = True | |
|
353 | self._candidate_group_chunk_size = 0 | |||
350 | self._debug_delta = False |
|
354 | self._debug_delta = False | |
351 | self.index = None |
|
355 | self.index = None | |
352 | self._docket = None |
|
356 | self._docket = None | |
@@ -363,6 +367,11 b' class revlog:' | |||||
363 | self._srdensitythreshold = 0.50 |
|
367 | self._srdensitythreshold = 0.50 | |
364 | self._srmingapsize = 262144 |
|
368 | self._srmingapsize = 262144 | |
365 |
|
369 | |||
|
370 | # other optionnals features | |||
|
371 | ||||
|
372 | # might remove rank configuration once the computation has no impact | |||
|
373 | self._compute_rank = False | |||
|
374 | ||||
366 | # Make copy of flag processors so each revlog instance can support |
|
375 | # Make copy of flag processors so each revlog instance can support | |
367 | # custom flags. |
|
376 | # custom flags. | |
368 | self._flagprocessors = dict(flagutil.flagprocessors) |
|
377 | self._flagprocessors = dict(flagutil.flagprocessors) | |
@@ -404,6 +413,7 b' class revlog:' | |||||
404 |
|
413 | |||
405 | if b'changelogv2' in opts and self.revlog_kind == KIND_CHANGELOG: |
|
414 | if b'changelogv2' in opts and self.revlog_kind == KIND_CHANGELOG: | |
406 | new_header = CHANGELOGV2 |
|
415 | new_header = CHANGELOGV2 | |
|
416 | self._compute_rank = opts.get(b'changelogv2.compute-rank', True) | |||
407 | elif b'revlogv2' in opts: |
|
417 | elif b'revlogv2' in opts: | |
408 | new_header = REVLOGV2 |
|
418 | new_header = REVLOGV2 | |
409 | elif b'revlogv1' in opts: |
|
419 | elif b'revlogv1' in opts: | |
@@ -421,6 +431,9 b' class revlog:' | |||||
421 | self._maxchainlen = opts[b'maxchainlen'] |
|
431 | self._maxchainlen = opts[b'maxchainlen'] | |
422 | if b'deltabothparents' in opts: |
|
432 | if b'deltabothparents' in opts: | |
423 | self._deltabothparents = opts[b'deltabothparents'] |
|
433 | self._deltabothparents = opts[b'deltabothparents'] | |
|
434 | dps_cgds = opts.get(b'delta-parent-search.candidate-group-chunk-size') | |||
|
435 | if dps_cgds: | |||
|
436 | self._candidate_group_chunk_size = dps_cgds | |||
424 | self._lazydelta = bool(opts.get(b'lazydelta', True)) |
|
437 | self._lazydelta = bool(opts.get(b'lazydelta', True)) | |
425 | self._lazydeltabase = False |
|
438 | self._lazydeltabase = False | |
426 | if self._lazydelta: |
|
439 | if self._lazydelta: | |
@@ -505,7 +518,6 b' class revlog:' | |||||
505 | self._docket = docket |
|
518 | self._docket = docket | |
506 | self._docket_file = entry_point |
|
519 | self._docket_file = entry_point | |
507 | else: |
|
520 | else: | |
508 | entry_data = b'' |
|
|||
509 | self._initempty = True |
|
521 | self._initempty = True | |
510 | entry_data = self._get_data(entry_point, mmapindexthreshold) |
|
522 | entry_data = self._get_data(entry_point, mmapindexthreshold) | |
511 | if len(entry_data) > 0: |
|
523 | if len(entry_data) > 0: | |
@@ -653,9 +665,12 b' class revlog:' | |||||
653 | @util.propertycache |
|
665 | @util.propertycache | |
654 | def display_id(self): |
|
666 | def display_id(self): | |
655 | """The public facing "ID" of the revlog that we use in message""" |
|
667 | """The public facing "ID" of the revlog that we use in message""" | |
656 | # Maybe we should build a user facing representation of |
|
668 | if self.revlog_kind == KIND_FILELOG: | |
657 | # revlog.target instead of using `self.radix` |
|
669 | # Reference the file without the "data/" prefix, so it is familiar | |
658 | return self.radix |
|
670 | # to the user. | |
|
671 | return self.target[1] | |||
|
672 | else: | |||
|
673 | return self.radix | |||
659 |
|
674 | |||
660 | def _get_decompressor(self, t): |
|
675 | def _get_decompressor(self, t): | |
661 | try: |
|
676 | try: | |
@@ -2445,6 +2460,16 b' class revlog:' | |||||
2445 | self, write_debug=write_debug |
|
2460 | self, write_debug=write_debug | |
2446 | ) |
|
2461 | ) | |
2447 |
|
2462 | |||
|
2463 | if cachedelta is not None and len(cachedelta) == 2: | |||
|
2464 | # If the cached delta has no information about how it should be | |||
|
2465 | # reused, add the default reuse instruction according to the | |||
|
2466 | # revlog's configuration. | |||
|
2467 | if self._generaldelta and self._lazydeltabase: | |||
|
2468 | delta_base_reuse = DELTA_BASE_REUSE_TRY | |||
|
2469 | else: | |||
|
2470 | delta_base_reuse = DELTA_BASE_REUSE_NO | |||
|
2471 | cachedelta = (cachedelta[0], cachedelta[1], delta_base_reuse) | |||
|
2472 | ||||
2448 | revinfo = revlogutils.revisioninfo( |
|
2473 | revinfo = revlogutils.revisioninfo( | |
2449 | node, |
|
2474 | node, | |
2450 | p1, |
|
2475 | p1, | |
@@ -2492,7 +2517,7 b' class revlog:' | |||||
2492 | sidedata_offset = 0 |
|
2517 | sidedata_offset = 0 | |
2493 |
|
2518 | |||
2494 | rank = RANK_UNKNOWN |
|
2519 | rank = RANK_UNKNOWN | |
2495 | if self._format_version == CHANGELOGV2: |
|
2520 | if self._compute_rank: | |
2496 | if (p1r, p2r) == (nullrev, nullrev): |
|
2521 | if (p1r, p2r) == (nullrev, nullrev): | |
2497 | rank = 1 |
|
2522 | rank = 1 | |
2498 | elif p1r != nullrev and p2r == nullrev: |
|
2523 | elif p1r != nullrev and p2r == nullrev: | |
@@ -2637,6 +2662,8 b' class revlog:' | |||||
2637 | alwayscache=False, |
|
2662 | alwayscache=False, | |
2638 | addrevisioncb=None, |
|
2663 | addrevisioncb=None, | |
2639 | duplicaterevisioncb=None, |
|
2664 | duplicaterevisioncb=None, | |
|
2665 | debug_info=None, | |||
|
2666 | delta_base_reuse_policy=None, | |||
2640 | ): |
|
2667 | ): | |
2641 | """ |
|
2668 | """ | |
2642 | add a delta group |
|
2669 | add a delta group | |
@@ -2652,6 +2679,14 b' class revlog:' | |||||
2652 | if self._adding_group: |
|
2679 | if self._adding_group: | |
2653 | raise error.ProgrammingError(b'cannot nest addgroup() calls') |
|
2680 | raise error.ProgrammingError(b'cannot nest addgroup() calls') | |
2654 |
|
2681 | |||
|
2682 | # read the default delta-base reuse policy from revlog config if the | |||
|
2683 | # group did not specify one. | |||
|
2684 | if delta_base_reuse_policy is None: | |||
|
2685 | if self._generaldelta and self._lazydeltabase: | |||
|
2686 | delta_base_reuse_policy = DELTA_BASE_REUSE_TRY | |||
|
2687 | else: | |||
|
2688 | delta_base_reuse_policy = DELTA_BASE_REUSE_NO | |||
|
2689 | ||||
2655 | self._adding_group = True |
|
2690 | self._adding_group = True | |
2656 | empty = True |
|
2691 | empty = True | |
2657 | try: |
|
2692 | try: | |
@@ -2662,6 +2697,7 b' class revlog:' | |||||
2662 | deltacomputer = deltautil.deltacomputer( |
|
2697 | deltacomputer = deltautil.deltacomputer( | |
2663 | self, |
|
2698 | self, | |
2664 | write_debug=write_debug, |
|
2699 | write_debug=write_debug, | |
|
2700 | debug_info=debug_info, | |||
2665 | ) |
|
2701 | ) | |
2666 | # loop through our set of deltas |
|
2702 | # loop through our set of deltas | |
2667 | for data in deltas: |
|
2703 | for data in deltas: | |
@@ -2731,7 +2767,7 b' class revlog:' | |||||
2731 | p1, |
|
2767 | p1, | |
2732 | p2, |
|
2768 | p2, | |
2733 | flags, |
|
2769 | flags, | |
2734 | (baserev, delta), |
|
2770 | (baserev, delta, delta_base_reuse_policy), | |
2735 | alwayscache=alwayscache, |
|
2771 | alwayscache=alwayscache, | |
2736 | deltacomputer=deltacomputer, |
|
2772 | deltacomputer=deltacomputer, | |
2737 | sidedata=sidedata, |
|
2773 | sidedata=sidedata, | |
@@ -2886,6 +2922,7 b' class revlog:' | |||||
2886 | assumehaveparentrevisions=False, |
|
2922 | assumehaveparentrevisions=False, | |
2887 | deltamode=repository.CG_DELTAMODE_STD, |
|
2923 | deltamode=repository.CG_DELTAMODE_STD, | |
2888 | sidedata_helpers=None, |
|
2924 | sidedata_helpers=None, | |
|
2925 | debug_info=None, | |||
2889 | ): |
|
2926 | ): | |
2890 | if nodesorder not in (b'nodes', b'storage', b'linear', None): |
|
2927 | if nodesorder not in (b'nodes', b'storage', b'linear', None): | |
2891 | raise error.ProgrammingError( |
|
2928 | raise error.ProgrammingError( | |
@@ -2915,6 +2952,7 b' class revlog:' | |||||
2915 | revisiondata=revisiondata, |
|
2952 | revisiondata=revisiondata, | |
2916 | assumehaveparentrevisions=assumehaveparentrevisions, |
|
2953 | assumehaveparentrevisions=assumehaveparentrevisions, | |
2917 | sidedata_helpers=sidedata_helpers, |
|
2954 | sidedata_helpers=sidedata_helpers, | |
|
2955 | debug_info=debug_info, | |||
2918 | ) |
|
2956 | ) | |
2919 |
|
2957 | |||
2920 | DELTAREUSEALWAYS = b'always' |
|
2958 | DELTAREUSEALWAYS = b'always' |
@@ -67,7 +67,7 b' class revisioninfo:' | |||||
67 | node: expected hash of the revision |
|
67 | node: expected hash of the revision | |
68 | p1, p2: parent revs of the revision |
|
68 | p1, p2: parent revs of the revision | |
69 | btext: built text cache consisting of a one-element list |
|
69 | btext: built text cache consisting of a one-element list | |
70 | cachedelta: (baserev, uncompressed_delta) or None |
|
70 | cachedelta: (baserev, uncompressed_delta, usage_mode) or None | |
71 | flags: flags associated to the revision storage |
|
71 | flags: flags associated to the revision storage | |
72 |
|
72 | |||
73 | One of btext[0] or cachedelta must be set. |
|
73 | One of btext[0] or cachedelta must be set. |
@@ -301,3 +301,18 b' FEATURES_BY_VERSION = {' | |||||
301 |
|
301 | |||
302 |
|
302 | |||
303 | SPARSE_REVLOG_MAX_CHAIN_LENGTH = 1000 |
|
303 | SPARSE_REVLOG_MAX_CHAIN_LENGTH = 1000 | |
|
304 | ||||
|
305 | ### What should be done with a cached delta and its base ? | |||
|
306 | ||||
|
307 | # Ignore the cache when considering candidates. | |||
|
308 | # | |||
|
309 | # The cached delta might be used, but the delta base will not be scheduled for | |||
|
310 | # usage earlier than in "normal" order. | |||
|
311 | DELTA_BASE_REUSE_NO = 0 | |||
|
312 | ||||
|
313 | # Prioritize trying the cached delta base | |||
|
314 | # | |||
|
315 | # The delta base will be tested for validy first. So that the cached deltas get | |||
|
316 | # used when possible. | |||
|
317 | DELTA_BASE_REUSE_TRY = 1 | |||
|
318 | DELTA_BASE_REUSE_FORCE = 2 |
This diff has been collapsed as it changes many lines, (503 lines changed) Show them Hide them | |||||
@@ -6,12 +6,19 b'' | |||||
6 | # This software may be used and distributed according to the terms of the |
|
6 | # This software may be used and distributed according to the terms of the | |
7 | # GNU General Public License version 2 or any later version. |
|
7 | # GNU General Public License version 2 or any later version. | |
8 |
|
8 | |||
|
9 | import collections | |||
|
10 | import string | |||
|
11 | ||||
9 | from .. import ( |
|
12 | from .. import ( | |
|
13 | mdiff, | |||
10 | node as nodemod, |
|
14 | node as nodemod, | |
|
15 | revlogutils, | |||
|
16 | util, | |||
11 | ) |
|
17 | ) | |
12 |
|
18 | |||
13 | from . import ( |
|
19 | from . import ( | |
14 | constants, |
|
20 | constants, | |
|
21 | deltas as deltautil, | |||
15 | ) |
|
22 | ) | |
16 |
|
23 | |||
17 | INDEX_ENTRY_DEBUG_COLUMN = [] |
|
24 | INDEX_ENTRY_DEBUG_COLUMN = [] | |
@@ -216,3 +223,499 b' def debug_index(' | |||||
216 | fm.plain(b'\n') |
|
223 | fm.plain(b'\n') | |
217 |
|
224 | |||
218 | fm.end() |
|
225 | fm.end() | |
|
226 | ||||
|
227 | ||||
|
228 | def dump(ui, revlog): | |||
|
229 | """perform the work for `hg debugrevlog --dump""" | |||
|
230 | # XXX seems redundant with debug index ? | |||
|
231 | r = revlog | |||
|
232 | numrevs = len(r) | |||
|
233 | ui.write( | |||
|
234 | ( | |||
|
235 | b"# rev p1rev p2rev start end deltastart base p1 p2" | |||
|
236 | b" rawsize totalsize compression heads chainlen\n" | |||
|
237 | ) | |||
|
238 | ) | |||
|
239 | ts = 0 | |||
|
240 | heads = set() | |||
|
241 | ||||
|
242 | for rev in range(numrevs): | |||
|
243 | dbase = r.deltaparent(rev) | |||
|
244 | if dbase == -1: | |||
|
245 | dbase = rev | |||
|
246 | cbase = r.chainbase(rev) | |||
|
247 | clen = r.chainlen(rev) | |||
|
248 | p1, p2 = r.parentrevs(rev) | |||
|
249 | rs = r.rawsize(rev) | |||
|
250 | ts = ts + rs | |||
|
251 | heads -= set(r.parentrevs(rev)) | |||
|
252 | heads.add(rev) | |||
|
253 | try: | |||
|
254 | compression = ts / r.end(rev) | |||
|
255 | except ZeroDivisionError: | |||
|
256 | compression = 0 | |||
|
257 | ui.write( | |||
|
258 | b"%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d " | |||
|
259 | b"%11d %5d %8d\n" | |||
|
260 | % ( | |||
|
261 | rev, | |||
|
262 | p1, | |||
|
263 | p2, | |||
|
264 | r.start(rev), | |||
|
265 | r.end(rev), | |||
|
266 | r.start(dbase), | |||
|
267 | r.start(cbase), | |||
|
268 | r.start(p1), | |||
|
269 | r.start(p2), | |||
|
270 | rs, | |||
|
271 | ts, | |||
|
272 | compression, | |||
|
273 | len(heads), | |||
|
274 | clen, | |||
|
275 | ) | |||
|
276 | ) | |||
|
277 | ||||
|
278 | ||||
|
279 | def debug_revlog(ui, revlog): | |||
|
280 | """code for `hg debugrevlog`""" | |||
|
281 | r = revlog | |||
|
282 | format = r._format_version | |||
|
283 | v = r._format_flags | |||
|
284 | flags = [] | |||
|
285 | gdelta = False | |||
|
286 | if v & constants.FLAG_INLINE_DATA: | |||
|
287 | flags.append(b'inline') | |||
|
288 | if v & constants.FLAG_GENERALDELTA: | |||
|
289 | gdelta = True | |||
|
290 | flags.append(b'generaldelta') | |||
|
291 | if not flags: | |||
|
292 | flags = [b'(none)'] | |||
|
293 | ||||
|
294 | ### the total size of stored content if incompressed. | |||
|
295 | full_text_total_size = 0 | |||
|
296 | ### tracks merge vs single parent | |||
|
297 | nummerges = 0 | |||
|
298 | ||||
|
299 | ### tracks ways the "delta" are build | |||
|
300 | # nodelta | |||
|
301 | numempty = 0 | |||
|
302 | numemptytext = 0 | |||
|
303 | numemptydelta = 0 | |||
|
304 | # full file content | |||
|
305 | numfull = 0 | |||
|
306 | # intermediate snapshot against a prior snapshot | |||
|
307 | numsemi = 0 | |||
|
308 | # snapshot count per depth | |||
|
309 | numsnapdepth = collections.defaultdict(lambda: 0) | |||
|
310 | # number of snapshots with a non-ancestor delta | |||
|
311 | numsnapdepth_nad = collections.defaultdict(lambda: 0) | |||
|
312 | # delta against previous revision | |||
|
313 | numprev = 0 | |||
|
314 | # delta against prev, where prev is a non-ancestor | |||
|
315 | numprev_nad = 0 | |||
|
316 | # delta against first or second parent (not prev) | |||
|
317 | nump1 = 0 | |||
|
318 | nump2 = 0 | |||
|
319 | # delta against neither prev nor parents | |||
|
320 | numother = 0 | |||
|
321 | # delta against other that is a non-ancestor | |||
|
322 | numother_nad = 0 | |||
|
323 | # delta against prev that are also first or second parent | |||
|
324 | # (details of `numprev`) | |||
|
325 | nump1prev = 0 | |||
|
326 | nump2prev = 0 | |||
|
327 | ||||
|
328 | # data about delta chain of each revs | |||
|
329 | chainlengths = [] | |||
|
330 | chainbases = [] | |||
|
331 | chainspans = [] | |||
|
332 | ||||
|
333 | # data about each revision | |||
|
334 | datasize = [None, 0, 0] | |||
|
335 | fullsize = [None, 0, 0] | |||
|
336 | semisize = [None, 0, 0] | |||
|
337 | # snapshot count per depth | |||
|
338 | snapsizedepth = collections.defaultdict(lambda: [None, 0, 0]) | |||
|
339 | deltasize = [None, 0, 0] | |||
|
340 | chunktypecounts = {} | |||
|
341 | chunktypesizes = {} | |||
|
342 | ||||
|
343 | def addsize(size, l): | |||
|
344 | if l[0] is None or size < l[0]: | |||
|
345 | l[0] = size | |||
|
346 | if size > l[1]: | |||
|
347 | l[1] = size | |||
|
348 | l[2] += size | |||
|
349 | ||||
|
350 | numrevs = len(r) | |||
|
351 | for rev in range(numrevs): | |||
|
352 | p1, p2 = r.parentrevs(rev) | |||
|
353 | delta = r.deltaparent(rev) | |||
|
354 | if format > 0: | |||
|
355 | s = r.rawsize(rev) | |||
|
356 | full_text_total_size += s | |||
|
357 | addsize(s, datasize) | |||
|
358 | if p2 != nodemod.nullrev: | |||
|
359 | nummerges += 1 | |||
|
360 | size = r.length(rev) | |||
|
361 | if delta == nodemod.nullrev: | |||
|
362 | chainlengths.append(0) | |||
|
363 | chainbases.append(r.start(rev)) | |||
|
364 | chainspans.append(size) | |||
|
365 | if size == 0: | |||
|
366 | numempty += 1 | |||
|
367 | numemptytext += 1 | |||
|
368 | else: | |||
|
369 | numfull += 1 | |||
|
370 | numsnapdepth[0] += 1 | |||
|
371 | addsize(size, fullsize) | |||
|
372 | addsize(size, snapsizedepth[0]) | |||
|
373 | else: | |||
|
374 | nad = ( | |||
|
375 | delta != p1 and delta != p2 and not r.isancestorrev(delta, rev) | |||
|
376 | ) | |||
|
377 | chainlengths.append(chainlengths[delta] + 1) | |||
|
378 | baseaddr = chainbases[delta] | |||
|
379 | revaddr = r.start(rev) | |||
|
380 | chainbases.append(baseaddr) | |||
|
381 | chainspans.append((revaddr - baseaddr) + size) | |||
|
382 | if size == 0: | |||
|
383 | numempty += 1 | |||
|
384 | numemptydelta += 1 | |||
|
385 | elif r.issnapshot(rev): | |||
|
386 | addsize(size, semisize) | |||
|
387 | numsemi += 1 | |||
|
388 | depth = r.snapshotdepth(rev) | |||
|
389 | numsnapdepth[depth] += 1 | |||
|
390 | if nad: | |||
|
391 | numsnapdepth_nad[depth] += 1 | |||
|
392 | addsize(size, snapsizedepth[depth]) | |||
|
393 | else: | |||
|
394 | addsize(size, deltasize) | |||
|
395 | if delta == rev - 1: | |||
|
396 | numprev += 1 | |||
|
397 | if delta == p1: | |||
|
398 | nump1prev += 1 | |||
|
399 | elif delta == p2: | |||
|
400 | nump2prev += 1 | |||
|
401 | elif nad: | |||
|
402 | numprev_nad += 1 | |||
|
403 | elif delta == p1: | |||
|
404 | nump1 += 1 | |||
|
405 | elif delta == p2: | |||
|
406 | nump2 += 1 | |||
|
407 | elif delta != nodemod.nullrev: | |||
|
408 | numother += 1 | |||
|
409 | numother_nad += 1 | |||
|
410 | ||||
|
411 | # Obtain data on the raw chunks in the revlog. | |||
|
412 | if util.safehasattr(r, '_getsegmentforrevs'): | |||
|
413 | segment = r._getsegmentforrevs(rev, rev)[1] | |||
|
414 | else: | |||
|
415 | segment = r._revlog._getsegmentforrevs(rev, rev)[1] | |||
|
416 | if segment: | |||
|
417 | chunktype = bytes(segment[0:1]) | |||
|
418 | else: | |||
|
419 | chunktype = b'empty' | |||
|
420 | ||||
|
421 | if chunktype not in chunktypecounts: | |||
|
422 | chunktypecounts[chunktype] = 0 | |||
|
423 | chunktypesizes[chunktype] = 0 | |||
|
424 | ||||
|
425 | chunktypecounts[chunktype] += 1 | |||
|
426 | chunktypesizes[chunktype] += size | |||
|
427 | ||||
|
428 | # Adjust size min value for empty cases | |||
|
429 | for size in (datasize, fullsize, semisize, deltasize): | |||
|
430 | if size[0] is None: | |||
|
431 | size[0] = 0 | |||
|
432 | ||||
|
433 | numdeltas = numrevs - numfull - numempty - numsemi | |||
|
434 | numoprev = numprev - nump1prev - nump2prev - numprev_nad | |||
|
435 | num_other_ancestors = numother - numother_nad | |||
|
436 | totalrawsize = datasize[2] | |||
|
437 | datasize[2] /= numrevs | |||
|
438 | fulltotal = fullsize[2] | |||
|
439 | if numfull == 0: | |||
|
440 | fullsize[2] = 0 | |||
|
441 | else: | |||
|
442 | fullsize[2] /= numfull | |||
|
443 | semitotal = semisize[2] | |||
|
444 | snaptotal = {} | |||
|
445 | if numsemi > 0: | |||
|
446 | semisize[2] /= numsemi | |||
|
447 | for depth in snapsizedepth: | |||
|
448 | snaptotal[depth] = snapsizedepth[depth][2] | |||
|
449 | snapsizedepth[depth][2] /= numsnapdepth[depth] | |||
|
450 | ||||
|
451 | deltatotal = deltasize[2] | |||
|
452 | if numdeltas > 0: | |||
|
453 | deltasize[2] /= numdeltas | |||
|
454 | totalsize = fulltotal + semitotal + deltatotal | |||
|
455 | avgchainlen = sum(chainlengths) / numrevs | |||
|
456 | maxchainlen = max(chainlengths) | |||
|
457 | maxchainspan = max(chainspans) | |||
|
458 | compratio = 1 | |||
|
459 | if totalsize: | |||
|
460 | compratio = totalrawsize / totalsize | |||
|
461 | ||||
|
462 | basedfmtstr = b'%%%dd\n' | |||
|
463 | basepcfmtstr = b'%%%dd %s(%%5.2f%%%%)\n' | |||
|
464 | ||||
|
465 | def dfmtstr(max): | |||
|
466 | return basedfmtstr % len(str(max)) | |||
|
467 | ||||
|
468 | def pcfmtstr(max, padding=0): | |||
|
469 | return basepcfmtstr % (len(str(max)), b' ' * padding) | |||
|
470 | ||||
|
471 | def pcfmt(value, total): | |||
|
472 | if total: | |||
|
473 | return (value, 100 * float(value) / total) | |||
|
474 | else: | |||
|
475 | return value, 100.0 | |||
|
476 | ||||
|
477 | ui.writenoi18n(b'format : %d\n' % format) | |||
|
478 | ui.writenoi18n(b'flags : %s\n' % b', '.join(flags)) | |||
|
479 | ||||
|
480 | ui.write(b'\n') | |||
|
481 | fmt = pcfmtstr(totalsize) | |||
|
482 | fmt2 = dfmtstr(totalsize) | |||
|
483 | ui.writenoi18n(b'revisions : ' + fmt2 % numrevs) | |||
|
484 | ui.writenoi18n(b' merges : ' + fmt % pcfmt(nummerges, numrevs)) | |||
|
485 | ui.writenoi18n( | |||
|
486 | b' normal : ' + fmt % pcfmt(numrevs - nummerges, numrevs) | |||
|
487 | ) | |||
|
488 | ui.writenoi18n(b'revisions : ' + fmt2 % numrevs) | |||
|
489 | ui.writenoi18n(b' empty : ' + fmt % pcfmt(numempty, numrevs)) | |||
|
490 | ui.writenoi18n( | |||
|
491 | b' text : ' | |||
|
492 | + fmt % pcfmt(numemptytext, numemptytext + numemptydelta) | |||
|
493 | ) | |||
|
494 | ui.writenoi18n( | |||
|
495 | b' delta : ' | |||
|
496 | + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta) | |||
|
497 | ) | |||
|
498 | ui.writenoi18n( | |||
|
499 | b' snapshot : ' + fmt % pcfmt(numfull + numsemi, numrevs) | |||
|
500 | ) | |||
|
501 | for depth in sorted(numsnapdepth): | |||
|
502 | base = b' lvl-%-3d : ' % depth | |||
|
503 | count = fmt % pcfmt(numsnapdepth[depth], numrevs) | |||
|
504 | pieces = [base, count] | |||
|
505 | if numsnapdepth_nad[depth]: | |||
|
506 | pieces[-1] = count = count[:-1] # drop the final '\n' | |||
|
507 | more = b' non-ancestor-bases: ' | |||
|
508 | anc_count = fmt | |||
|
509 | anc_count %= pcfmt(numsnapdepth_nad[depth], numsnapdepth[depth]) | |||
|
510 | pieces.append(more) | |||
|
511 | pieces.append(anc_count) | |||
|
512 | ui.write(b''.join(pieces)) | |||
|
513 | ui.writenoi18n(b' deltas : ' + fmt % pcfmt(numdeltas, numrevs)) | |||
|
514 | ui.writenoi18n(b'revision size : ' + fmt2 % totalsize) | |||
|
515 | ui.writenoi18n( | |||
|
516 | b' snapshot : ' + fmt % pcfmt(fulltotal + semitotal, totalsize) | |||
|
517 | ) | |||
|
518 | for depth in sorted(numsnapdepth): | |||
|
519 | ui.write( | |||
|
520 | (b' lvl-%-3d : ' % depth) | |||
|
521 | + fmt % pcfmt(snaptotal[depth], totalsize) | |||
|
522 | ) | |||
|
523 | ui.writenoi18n(b' deltas : ' + fmt % pcfmt(deltatotal, totalsize)) | |||
|
524 | ||||
|
525 | letters = string.ascii_letters.encode('ascii') | |||
|
526 | ||||
|
527 | def fmtchunktype(chunktype): | |||
|
528 | if chunktype == b'empty': | |||
|
529 | return b' %s : ' % chunktype | |||
|
530 | elif chunktype in letters: | |||
|
531 | return b' 0x%s (%s) : ' % (nodemod.hex(chunktype), chunktype) | |||
|
532 | else: | |||
|
533 | return b' 0x%s : ' % nodemod.hex(chunktype) | |||
|
534 | ||||
|
535 | ui.write(b'\n') | |||
|
536 | ui.writenoi18n(b'chunks : ' + fmt2 % numrevs) | |||
|
537 | for chunktype in sorted(chunktypecounts): | |||
|
538 | ui.write(fmtchunktype(chunktype)) | |||
|
539 | ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs)) | |||
|
540 | ui.writenoi18n(b'chunks size : ' + fmt2 % totalsize) | |||
|
541 | for chunktype in sorted(chunktypecounts): | |||
|
542 | ui.write(fmtchunktype(chunktype)) | |||
|
543 | ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize)) | |||
|
544 | ||||
|
545 | ui.write(b'\n') | |||
|
546 | b_total = b"%d" % full_text_total_size | |||
|
547 | p_total = [] | |||
|
548 | while len(b_total) > 3: | |||
|
549 | p_total.append(b_total[-3:]) | |||
|
550 | b_total = b_total[:-3] | |||
|
551 | p_total.append(b_total) | |||
|
552 | p_total.reverse() | |||
|
553 | b_total = b' '.join(p_total) | |||
|
554 | ||||
|
555 | ui.write(b'\n') | |||
|
556 | ui.writenoi18n(b'total-stored-content: %s bytes\n' % b_total) | |||
|
557 | ui.write(b'\n') | |||
|
558 | fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio)) | |||
|
559 | ui.writenoi18n(b'avg chain length : ' + fmt % avgchainlen) | |||
|
560 | ui.writenoi18n(b'max chain length : ' + fmt % maxchainlen) | |||
|
561 | ui.writenoi18n(b'max chain reach : ' + fmt % maxchainspan) | |||
|
562 | ui.writenoi18n(b'compression ratio : ' + fmt % compratio) | |||
|
563 | ||||
|
564 | if format > 0: | |||
|
565 | ui.write(b'\n') | |||
|
566 | ui.writenoi18n( | |||
|
567 | b'uncompressed data size (min/max/avg) : %d / %d / %d\n' | |||
|
568 | % tuple(datasize) | |||
|
569 | ) | |||
|
570 | ui.writenoi18n( | |||
|
571 | b'full revision size (min/max/avg) : %d / %d / %d\n' | |||
|
572 | % tuple(fullsize) | |||
|
573 | ) | |||
|
574 | ui.writenoi18n( | |||
|
575 | b'inter-snapshot size (min/max/avg) : %d / %d / %d\n' | |||
|
576 | % tuple(semisize) | |||
|
577 | ) | |||
|
578 | for depth in sorted(snapsizedepth): | |||
|
579 | if depth == 0: | |||
|
580 | continue | |||
|
581 | ui.writenoi18n( | |||
|
582 | b' level-%-3d (min/max/avg) : %d / %d / %d\n' | |||
|
583 | % ((depth,) + tuple(snapsizedepth[depth])) | |||
|
584 | ) | |||
|
585 | ui.writenoi18n( | |||
|
586 | b'delta size (min/max/avg) : %d / %d / %d\n' | |||
|
587 | % tuple(deltasize) | |||
|
588 | ) | |||
|
589 | ||||
|
590 | if numdeltas > 0: | |||
|
591 | ui.write(b'\n') | |||
|
592 | fmt = pcfmtstr(numdeltas) | |||
|
593 | fmt2 = pcfmtstr(numdeltas, 4) | |||
|
594 | ui.writenoi18n( | |||
|
595 | b'deltas against prev : ' + fmt % pcfmt(numprev, numdeltas) | |||
|
596 | ) | |||
|
597 | if numprev > 0: | |||
|
598 | ui.writenoi18n( | |||
|
599 | b' where prev = p1 : ' + fmt2 % pcfmt(nump1prev, numprev) | |||
|
600 | ) | |||
|
601 | ui.writenoi18n( | |||
|
602 | b' where prev = p2 : ' + fmt2 % pcfmt(nump2prev, numprev) | |||
|
603 | ) | |||
|
604 | ui.writenoi18n( | |||
|
605 | b' other-ancestor : ' + fmt2 % pcfmt(numoprev, numprev) | |||
|
606 | ) | |||
|
607 | ui.writenoi18n( | |||
|
608 | b' unrelated : ' + fmt2 % pcfmt(numoprev, numprev) | |||
|
609 | ) | |||
|
610 | if gdelta: | |||
|
611 | ui.writenoi18n( | |||
|
612 | b'deltas against p1 : ' + fmt % pcfmt(nump1, numdeltas) | |||
|
613 | ) | |||
|
614 | ui.writenoi18n( | |||
|
615 | b'deltas against p2 : ' + fmt % pcfmt(nump2, numdeltas) | |||
|
616 | ) | |||
|
617 | ui.writenoi18n( | |||
|
618 | b'deltas against ancs : ' | |||
|
619 | + fmt % pcfmt(num_other_ancestors, numdeltas) | |||
|
620 | ) | |||
|
621 | ui.writenoi18n( | |||
|
622 | b'deltas against other : ' | |||
|
623 | + fmt % pcfmt(numother_nad, numdeltas) | |||
|
624 | ) | |||
|
625 | ||||
|
626 | ||||
|
627 | def debug_delta_find(ui, revlog, rev, base_rev=nodemod.nullrev): | |||
|
628 | """display the search process for a delta""" | |||
|
629 | deltacomputer = deltautil.deltacomputer( | |||
|
630 | revlog, | |||
|
631 | write_debug=ui.write, | |||
|
632 | debug_search=not ui.quiet, | |||
|
633 | ) | |||
|
634 | ||||
|
635 | node = revlog.node(rev) | |||
|
636 | p1r, p2r = revlog.parentrevs(rev) | |||
|
637 | p1 = revlog.node(p1r) | |||
|
638 | p2 = revlog.node(p2r) | |||
|
639 | full_text = revlog.revision(rev) | |||
|
640 | btext = [full_text] | |||
|
641 | textlen = len(btext[0]) | |||
|
642 | cachedelta = None | |||
|
643 | flags = revlog.flags(rev) | |||
|
644 | ||||
|
645 | if base_rev != nodemod.nullrev: | |||
|
646 | base_text = revlog.revision(base_rev) | |||
|
647 | delta = mdiff.textdiff(base_text, full_text) | |||
|
648 | ||||
|
649 | cachedelta = (base_rev, delta, constants.DELTA_BASE_REUSE_TRY) | |||
|
650 | btext = [None] | |||
|
651 | ||||
|
652 | revinfo = revlogutils.revisioninfo( | |||
|
653 | node, | |||
|
654 | p1, | |||
|
655 | p2, | |||
|
656 | btext, | |||
|
657 | textlen, | |||
|
658 | cachedelta, | |||
|
659 | flags, | |||
|
660 | ) | |||
|
661 | ||||
|
662 | fh = revlog._datafp() | |||
|
663 | deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev) | |||
|
664 | ||||
|
665 | ||||
|
666 | def _get_revlogs(repo, changelog: bool, manifest: bool, filelogs: bool): | |||
|
667 | """yield revlogs from this repository""" | |||
|
668 | if changelog: | |||
|
669 | yield repo.changelog | |||
|
670 | ||||
|
671 | if manifest: | |||
|
672 | # XXX: Handle tree manifest | |||
|
673 | root_mf = repo.manifestlog.getstorage(b'') | |||
|
674 | assert not root_mf._treeondisk | |||
|
675 | yield root_mf._revlog | |||
|
676 | ||||
|
677 | if filelogs: | |||
|
678 | files = set() | |||
|
679 | for rev in repo: | |||
|
680 | ctx = repo[rev] | |||
|
681 | files |= set(ctx.files()) | |||
|
682 | ||||
|
683 | for f in sorted(files): | |||
|
684 | yield repo.file(f)._revlog | |||
|
685 | ||||
|
686 | ||||
|
687 | def debug_revlog_stats( | |||
|
688 | repo, fm, changelog: bool, manifest: bool, filelogs: bool | |||
|
689 | ): | |||
|
690 | """Format revlog statistics for debugging purposes | |||
|
691 | ||||
|
692 | fm: the output formatter. | |||
|
693 | """ | |||
|
694 | fm.plain(b'rev-count data-size inl type target \n') | |||
|
695 | ||||
|
696 | for rlog in _get_revlogs(repo, changelog, manifest, filelogs): | |||
|
697 | fm.startitem() | |||
|
698 | nb_rev = len(rlog) | |||
|
699 | inline = rlog._inline | |||
|
700 | data_size = rlog._get_data_offset(nb_rev - 1) | |||
|
701 | ||||
|
702 | target = rlog.target | |||
|
703 | revlog_type = b'unknown' | |||
|
704 | revlog_target = b'' | |||
|
705 | if target[0] == constants.KIND_CHANGELOG: | |||
|
706 | revlog_type = b'changelog' | |||
|
707 | elif target[0] == constants.KIND_MANIFESTLOG: | |||
|
708 | revlog_type = b'manifest' | |||
|
709 | revlog_target = target[1] | |||
|
710 | elif target[0] == constants.KIND_FILELOG: | |||
|
711 | revlog_type = b'file' | |||
|
712 | revlog_target = target[1] | |||
|
713 | ||||
|
714 | fm.write(b'revlog.rev-count', b'%9d', nb_rev) | |||
|
715 | fm.write(b'revlog.data-size', b'%12d', data_size) | |||
|
716 | ||||
|
717 | fm.write(b'revlog.inline', b' %-3s', b'yes' if inline else b'no') | |||
|
718 | fm.write(b'revlog.type', b' %-9s', revlog_type) | |||
|
719 | fm.write(b'revlog.target', b' %s', revlog_target) | |||
|
720 | ||||
|
721 | fm.plain(b'\n') |
@@ -20,6 +20,8 b' from .constants import (' | |||||
20 | COMP_MODE_DEFAULT, |
|
20 | COMP_MODE_DEFAULT, | |
21 | COMP_MODE_INLINE, |
|
21 | COMP_MODE_INLINE, | |
22 | COMP_MODE_PLAIN, |
|
22 | COMP_MODE_PLAIN, | |
|
23 | DELTA_BASE_REUSE_FORCE, | |||
|
24 | DELTA_BASE_REUSE_NO, | |||
23 | KIND_CHANGELOG, |
|
25 | KIND_CHANGELOG, | |
24 | KIND_FILELOG, |
|
26 | KIND_FILELOG, | |
25 | KIND_MANIFESTLOG, |
|
27 | KIND_MANIFESTLOG, | |
@@ -576,13 +578,20 b' def drop_u_compression(delta):' | |||||
576 | ) |
|
578 | ) | |
577 |
|
579 | |||
578 |
|
580 | |||
579 | def isgooddeltainfo(revlog, deltainfo, revinfo): |
|
581 | def is_good_delta_info(revlog, deltainfo, revinfo): | |
580 | """Returns True if the given delta is good. Good means that it is within |
|
582 | """Returns True if the given delta is good. Good means that it is within | |
581 | the disk span, disk size, and chain length bounds that we know to be |
|
583 | the disk span, disk size, and chain length bounds that we know to be | |
582 | performant.""" |
|
584 | performant.""" | |
583 | if deltainfo is None: |
|
585 | if deltainfo is None: | |
584 | return False |
|
586 | return False | |
585 |
|
587 | |||
|
588 | if ( | |||
|
589 | revinfo.cachedelta is not None | |||
|
590 | and deltainfo.base == revinfo.cachedelta[0] | |||
|
591 | and revinfo.cachedelta[2] == DELTA_BASE_REUSE_FORCE | |||
|
592 | ): | |||
|
593 | return True | |||
|
594 | ||||
586 | # - 'deltainfo.distance' is the distance from the base revision -- |
|
595 | # - 'deltainfo.distance' is the distance from the base revision -- | |
587 | # bounding it limits the amount of I/O we need to do. |
|
596 | # bounding it limits the amount of I/O we need to do. | |
588 | # - 'deltainfo.compresseddeltalen' is the sum of the total size of |
|
597 | # - 'deltainfo.compresseddeltalen' is the sum of the total size of | |
@@ -655,7 +664,16 b' def isgooddeltainfo(revlog, deltainfo, r' | |||||
655 | LIMIT_BASE2TEXT = 500 |
|
664 | LIMIT_BASE2TEXT = 500 | |
656 |
|
665 | |||
657 |
|
666 | |||
658 | def _candidategroups(revlog, textlen, p1, p2, cachedelta): |
|
667 | def _candidategroups( | |
|
668 | revlog, | |||
|
669 | textlen, | |||
|
670 | p1, | |||
|
671 | p2, | |||
|
672 | cachedelta, | |||
|
673 | excluded_bases=None, | |||
|
674 | target_rev=None, | |||
|
675 | snapshot_cache=None, | |||
|
676 | ): | |||
659 | """Provides group of revision to be tested as delta base |
|
677 | """Provides group of revision to be tested as delta base | |
660 |
|
678 | |||
661 | This top level function focus on emitting groups with unique and worthwhile |
|
679 | This top level function focus on emitting groups with unique and worthwhile | |
@@ -666,15 +684,31 b' def _candidategroups(revlog, textlen, p1' | |||||
666 | yield None |
|
684 | yield None | |
667 | return |
|
685 | return | |
668 |
|
686 | |||
|
687 | if ( | |||
|
688 | cachedelta is not None | |||
|
689 | and nullrev == cachedelta[0] | |||
|
690 | and cachedelta[2] == DELTA_BASE_REUSE_FORCE | |||
|
691 | ): | |||
|
692 | # instruction are to forcibly do a full snapshot | |||
|
693 | yield None | |||
|
694 | return | |||
|
695 | ||||
669 | deltalength = revlog.length |
|
696 | deltalength = revlog.length | |
670 | deltaparent = revlog.deltaparent |
|
697 | deltaparent = revlog.deltaparent | |
671 | sparse = revlog._sparserevlog |
|
698 | sparse = revlog._sparserevlog | |
672 | good = None |
|
699 | good = None | |
673 |
|
700 | |||
674 | deltas_limit = textlen * LIMIT_DELTA2TEXT |
|
701 | deltas_limit = textlen * LIMIT_DELTA2TEXT | |
|
702 | group_chunk_size = revlog._candidate_group_chunk_size | |||
675 |
|
703 | |||
676 | tested = {nullrev} |
|
704 | tested = {nullrev} | |
677 |
candidates = _refinedgroups( |
|
705 | candidates = _refinedgroups( | |
|
706 | revlog, | |||
|
707 | p1, | |||
|
708 | p2, | |||
|
709 | cachedelta, | |||
|
710 | snapshot_cache=snapshot_cache, | |||
|
711 | ) | |||
678 | while True: |
|
712 | while True: | |
679 | temptative = candidates.send(good) |
|
713 | temptative = candidates.send(good) | |
680 | if temptative is None: |
|
714 | if temptative is None: | |
@@ -694,15 +728,37 b' def _candidategroups(revlog, textlen, p1' | |||||
694 | # filter out revision we tested already |
|
728 | # filter out revision we tested already | |
695 | if rev in tested: |
|
729 | if rev in tested: | |
696 | continue |
|
730 | continue | |
697 | tested.add(rev) |
|
731 | ||
|
732 | if ( | |||
|
733 | cachedelta is not None | |||
|
734 | and rev == cachedelta[0] | |||
|
735 | and cachedelta[2] == DELTA_BASE_REUSE_FORCE | |||
|
736 | ): | |||
|
737 | # instructions are to forcibly consider/use this delta base | |||
|
738 | group.append(rev) | |||
|
739 | continue | |||
|
740 | ||||
|
741 | # an higher authority deamed the base unworthy (e.g. censored) | |||
|
742 | if excluded_bases is not None and rev in excluded_bases: | |||
|
743 | tested.add(rev) | |||
|
744 | continue | |||
|
745 | # We are in some recomputation cases and that rev is too high in | |||
|
746 | # the revlog | |||
|
747 | if target_rev is not None and rev >= target_rev: | |||
|
748 | tested.add(rev) | |||
|
749 | continue | |||
698 | # filter out delta base that will never produce good delta |
|
750 | # filter out delta base that will never produce good delta | |
699 | if deltas_limit < revlog.length(rev): |
|
751 | if deltas_limit < revlog.length(rev): | |
|
752 | tested.add(rev) | |||
700 | continue |
|
753 | continue | |
701 | if sparse and revlog.rawsize(rev) < (textlen // LIMIT_BASE2TEXT): |
|
754 | if sparse and revlog.rawsize(rev) < (textlen // LIMIT_BASE2TEXT): | |
|
755 | tested.add(rev) | |||
702 | continue |
|
756 | continue | |
703 | # no delta for rawtext-changing revs (see "candelta" for why) |
|
757 | # no delta for rawtext-changing revs (see "candelta" for why) | |
704 | if revlog.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS: |
|
758 | if revlog.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS: | |
|
759 | tested.add(rev) | |||
705 | continue |
|
760 | continue | |
|
761 | ||||
706 | # If we reach here, we are about to build and test a delta. |
|
762 | # If we reach here, we are about to build and test a delta. | |
707 | # The delta building process will compute the chaininfo in all |
|
763 | # The delta building process will compute the chaininfo in all | |
708 | # case, since that computation is cached, it is fine to access it |
|
764 | # case, since that computation is cached, it is fine to access it | |
@@ -710,9 +766,11 b' def _candidategroups(revlog, textlen, p1' | |||||
710 | chainlen, chainsize = revlog._chaininfo(rev) |
|
766 | chainlen, chainsize = revlog._chaininfo(rev) | |
711 | # if chain will be too long, skip base |
|
767 | # if chain will be too long, skip base | |
712 | if revlog._maxchainlen and chainlen >= revlog._maxchainlen: |
|
768 | if revlog._maxchainlen and chainlen >= revlog._maxchainlen: | |
|
769 | tested.add(rev) | |||
713 | continue |
|
770 | continue | |
714 | # if chain already have too much data, skip base |
|
771 | # if chain already have too much data, skip base | |
715 | if deltas_limit < chainsize: |
|
772 | if deltas_limit < chainsize: | |
|
773 | tested.add(rev) | |||
716 | continue |
|
774 | continue | |
717 | if sparse and revlog.upperboundcomp is not None: |
|
775 | if sparse and revlog.upperboundcomp is not None: | |
718 | maxcomp = revlog.upperboundcomp |
|
776 | maxcomp = revlog.upperboundcomp | |
@@ -731,36 +789,46 b' def _candidategroups(revlog, textlen, p1' | |||||
731 | snapshotlimit = textlen >> snapshotdepth |
|
789 | snapshotlimit = textlen >> snapshotdepth | |
732 | if snapshotlimit < lowestrealisticdeltalen: |
|
790 | if snapshotlimit < lowestrealisticdeltalen: | |
733 | # delta lower bound is larger than accepted upper bound |
|
791 | # delta lower bound is larger than accepted upper bound | |
|
792 | tested.add(rev) | |||
734 | continue |
|
793 | continue | |
735 |
|
794 | |||
736 | # check the relative constraint on the delta size |
|
795 | # check the relative constraint on the delta size | |
737 | revlength = revlog.length(rev) |
|
796 | revlength = revlog.length(rev) | |
738 | if revlength < lowestrealisticdeltalen: |
|
797 | if revlength < lowestrealisticdeltalen: | |
739 | # delta probable lower bound is larger than target base |
|
798 | # delta probable lower bound is larger than target base | |
|
799 | tested.add(rev) | |||
740 | continue |
|
800 | continue | |
741 |
|
801 | |||
742 | group.append(rev) |
|
802 | group.append(rev) | |
743 | if group: |
|
803 | if group: | |
744 | # XXX: in the sparse revlog case, group can become large, |
|
804 | # When the size of the candidate group is big, it can result in a | |
745 | # impacting performances. Some bounding or slicing mecanism |
|
805 | # quite significant performance impact. To reduce this, we can send | |
746 | # would help to reduce this impact. |
|
806 | # them in smaller batches until the new batch does not provide any | |
747 | good = yield tuple(group) |
|
807 | # improvements. | |
|
808 | # | |||
|
809 | # This might reduce the overall efficiency of the compression in | |||
|
810 | # some corner cases, but that should also prevent very pathological | |||
|
811 | # cases from being an issue. (eg. 20 000 candidates). | |||
|
812 | # | |||
|
813 | # XXX note that the ordering of the group becomes important as it | |||
|
814 | # now impacts the final result. The current order is unprocessed | |||
|
815 | # and can be improved. | |||
|
816 | if group_chunk_size == 0: | |||
|
817 | tested.update(group) | |||
|
818 | good = yield tuple(group) | |||
|
819 | else: | |||
|
820 | prev_good = good | |||
|
821 | for start in range(0, len(group), group_chunk_size): | |||
|
822 | sub_group = group[start : start + group_chunk_size] | |||
|
823 | tested.update(sub_group) | |||
|
824 | good = yield tuple(sub_group) | |||
|
825 | if prev_good == good: | |||
|
826 | break | |||
|
827 | ||||
748 | yield None |
|
828 | yield None | |
749 |
|
829 | |||
750 |
|
830 | |||
751 |
def _ |
|
831 | def _refinedgroups(revlog, p1, p2, cachedelta, snapshot_cache=None): | |
752 | """find snapshot from start_rev to tip""" |
|
|||
753 | if util.safehasattr(revlog.index, b'findsnapshots'): |
|
|||
754 | revlog.index.findsnapshots(cache, start_rev) |
|
|||
755 | else: |
|
|||
756 | deltaparent = revlog.deltaparent |
|
|||
757 | issnapshot = revlog.issnapshot |
|
|||
758 | for rev in revlog.revs(start_rev): |
|
|||
759 | if issnapshot(rev): |
|
|||
760 | cache[deltaparent(rev)].append(rev) |
|
|||
761 |
|
||||
762 |
|
||||
763 | def _refinedgroups(revlog, p1, p2, cachedelta): |
|
|||
764 | good = None |
|
832 | good = None | |
765 | # First we try to reuse a the delta contained in the bundle. |
|
833 | # First we try to reuse a the delta contained in the bundle. | |
766 | # (or from the source revlog) |
|
834 | # (or from the source revlog) | |
@@ -768,15 +836,28 b' def _refinedgroups(revlog, p1, p2, cache' | |||||
768 | # This logic only applies to general delta repositories and can be disabled |
|
836 | # This logic only applies to general delta repositories and can be disabled | |
769 | # through configuration. Disabling reuse source delta is useful when |
|
837 | # through configuration. Disabling reuse source delta is useful when | |
770 | # we want to make sure we recomputed "optimal" deltas. |
|
838 | # we want to make sure we recomputed "optimal" deltas. | |
771 | if cachedelta and revlog._generaldelta and revlog._lazydeltabase: |
|
839 | debug_info = None | |
|
840 | if cachedelta is not None and cachedelta[2] > DELTA_BASE_REUSE_NO: | |||
772 | # Assume what we received from the server is a good choice |
|
841 | # Assume what we received from the server is a good choice | |
773 | # build delta will reuse the cache |
|
842 | # build delta will reuse the cache | |
|
843 | if debug_info is not None: | |||
|
844 | debug_info['cached-delta.tested'] += 1 | |||
774 | good = yield (cachedelta[0],) |
|
845 | good = yield (cachedelta[0],) | |
775 | if good is not None: |
|
846 | if good is not None: | |
|
847 | if debug_info is not None: | |||
|
848 | debug_info['cached-delta.accepted'] += 1 | |||
776 | yield None |
|
849 | yield None | |
777 | return |
|
850 | return | |
778 | snapshots = collections.defaultdict(list) |
|
851 | if snapshot_cache is None: | |
779 | for candidates in _rawgroups(revlog, p1, p2, cachedelta, snapshots): |
|
852 | snapshot_cache = SnapshotCache() | |
|
853 | groups = _rawgroups( | |||
|
854 | revlog, | |||
|
855 | p1, | |||
|
856 | p2, | |||
|
857 | cachedelta, | |||
|
858 | snapshot_cache, | |||
|
859 | ) | |||
|
860 | for candidates in groups: | |||
780 | good = yield candidates |
|
861 | good = yield candidates | |
781 | if good is not None: |
|
862 | if good is not None: | |
782 | break |
|
863 | break | |
@@ -797,19 +878,22 b' def _refinedgroups(revlog, p1, p2, cache' | |||||
797 | break |
|
878 | break | |
798 | good = yield (base,) |
|
879 | good = yield (base,) | |
799 | # refine snapshot up |
|
880 | # refine snapshot up | |
800 | if not snapshots: |
|
881 | if not snapshot_cache.snapshots: | |
801 |
|
|
882 | snapshot_cache.update(revlog, good + 1) | |
802 | previous = None |
|
883 | previous = None | |
803 | while good != previous: |
|
884 | while good != previous: | |
804 | previous = good |
|
885 | previous = good | |
805 | children = tuple(sorted(c for c in snapshots[good])) |
|
886 | children = tuple(sorted(c for c in snapshot_cache.snapshots[good])) | |
806 | good = yield children |
|
887 | good = yield children | |
807 |
|
888 | |||
808 | # we have found nothing |
|
889 | if debug_info is not None: | |
|
890 | if good is None: | |||
|
891 | debug_info['no-solution'] += 1 | |||
|
892 | ||||
809 | yield None |
|
893 | yield None | |
810 |
|
894 | |||
811 |
|
895 | |||
812 |
def _rawgroups(revlog, p1, p2, cachedelta, snapshot |
|
896 | def _rawgroups(revlog, p1, p2, cachedelta, snapshot_cache=None): | |
813 | """Provides group of revision to be tested as delta base |
|
897 | """Provides group of revision to be tested as delta base | |
814 |
|
898 | |||
815 | This lower level function focus on emitting delta theorically interresting |
|
899 | This lower level function focus on emitting delta theorically interresting | |
@@ -840,9 +924,9 b' def _rawgroups(revlog, p1, p2, cachedelt' | |||||
840 | yield parents |
|
924 | yield parents | |
841 |
|
925 | |||
842 | if sparse and parents: |
|
926 | if sparse and parents: | |
843 |
if snapshot |
|
927 | if snapshot_cache is None: | |
844 | # map: base-rev: snapshot-rev |
|
928 | # map: base-rev: [snapshot-revs] | |
845 | snapshots = collections.defaultdict(list) |
|
929 | snapshot_cache = SnapshotCache() | |
846 | # See if we can use an existing snapshot in the parent chains to use as |
|
930 | # See if we can use an existing snapshot in the parent chains to use as | |
847 | # a base for a new intermediate-snapshot |
|
931 | # a base for a new intermediate-snapshot | |
848 | # |
|
932 | # | |
@@ -856,7 +940,7 b' def _rawgroups(revlog, p1, p2, cachedelt' | |||||
856 | break |
|
940 | break | |
857 | parents_snaps[idx].add(s) |
|
941 | parents_snaps[idx].add(s) | |
858 | snapfloor = min(parents_snaps[0]) + 1 |
|
942 | snapfloor = min(parents_snaps[0]) + 1 | |
859 |
|
|
943 | snapshot_cache.update(revlog, snapfloor) | |
860 | # search for the highest "unrelated" revision |
|
944 | # search for the highest "unrelated" revision | |
861 | # |
|
945 | # | |
862 | # Adding snapshots used by "unrelated" revision increase the odd we |
|
946 | # Adding snapshots used by "unrelated" revision increase the odd we | |
@@ -879,14 +963,14 b' def _rawgroups(revlog, p1, p2, cachedelt' | |||||
879 | # chain. |
|
963 | # chain. | |
880 | max_depth = max(parents_snaps.keys()) |
|
964 | max_depth = max(parents_snaps.keys()) | |
881 | chain = deltachain(other) |
|
965 | chain = deltachain(other) | |
882 |
for |
|
966 | for depth, s in enumerate(chain): | |
883 | if s < snapfloor: |
|
967 | if s < snapfloor: | |
884 | continue |
|
968 | continue | |
885 |
if max_depth < |
|
969 | if max_depth < depth: | |
886 | break |
|
970 | break | |
887 | if not revlog.issnapshot(s): |
|
971 | if not revlog.issnapshot(s): | |
888 | break |
|
972 | break | |
889 |
parents_snaps[ |
|
973 | parents_snaps[depth].add(s) | |
890 | # Test them as possible intermediate snapshot base |
|
974 | # Test them as possible intermediate snapshot base | |
891 | # We test them from highest to lowest level. High level one are more |
|
975 | # We test them from highest to lowest level. High level one are more | |
892 | # likely to result in small delta |
|
976 | # likely to result in small delta | |
@@ -894,7 +978,7 b' def _rawgroups(revlog, p1, p2, cachedelt' | |||||
894 | for idx, snaps in sorted(parents_snaps.items(), reverse=True): |
|
978 | for idx, snaps in sorted(parents_snaps.items(), reverse=True): | |
895 | siblings = set() |
|
979 | siblings = set() | |
896 | for s in snaps: |
|
980 | for s in snaps: | |
897 | siblings.update(snapshots[s]) |
|
981 | siblings.update(snapshot_cache.snapshots[s]) | |
898 | # Before considering making a new intermediate snapshot, we check |
|
982 | # Before considering making a new intermediate snapshot, we check | |
899 | # if an existing snapshot, children of base we consider, would be |
|
983 | # if an existing snapshot, children of base we consider, would be | |
900 | # suitable. |
|
984 | # suitable. | |
@@ -922,7 +1006,8 b' def _rawgroups(revlog, p1, p2, cachedelt' | |||||
922 | # revisions instead of starting our own. Without such re-use, |
|
1006 | # revisions instead of starting our own. Without such re-use, | |
923 | # topological branches would keep reopening new full chains. Creating |
|
1007 | # topological branches would keep reopening new full chains. Creating | |
924 | # more and more snapshot as the repository grow. |
|
1008 | # more and more snapshot as the repository grow. | |
925 | yield tuple(snapshots[nullrev]) |
|
1009 | full = [r for r in snapshot_cache.snapshots[nullrev] if snapfloor <= r] | |
|
1010 | yield tuple(sorted(full)) | |||
926 |
|
1011 | |||
927 | if not sparse: |
|
1012 | if not sparse: | |
928 | # other approach failed try against prev to hopefully save us a |
|
1013 | # other approach failed try against prev to hopefully save us a | |
@@ -930,11 +1015,74 b' def _rawgroups(revlog, p1, p2, cachedelt' | |||||
930 | yield (prev,) |
|
1015 | yield (prev,) | |
931 |
|
1016 | |||
932 |
|
1017 | |||
|
1018 | class SnapshotCache: | |||
|
1019 | __slots__ = ('snapshots', '_start_rev', '_end_rev') | |||
|
1020 | ||||
|
1021 | def __init__(self): | |||
|
1022 | self.snapshots = collections.defaultdict(set) | |||
|
1023 | self._start_rev = None | |||
|
1024 | self._end_rev = None | |||
|
1025 | ||||
|
1026 | def update(self, revlog, start_rev=0): | |||
|
1027 | """find snapshots from start_rev to tip""" | |||
|
1028 | nb_revs = len(revlog) | |||
|
1029 | end_rev = nb_revs - 1 | |||
|
1030 | if start_rev > end_rev: | |||
|
1031 | return # range is empty | |||
|
1032 | ||||
|
1033 | if self._start_rev is None: | |||
|
1034 | assert self._end_rev is None | |||
|
1035 | self._update(revlog, start_rev, end_rev) | |||
|
1036 | elif not (self._start_rev <= start_rev and end_rev <= self._end_rev): | |||
|
1037 | if start_rev < self._start_rev: | |||
|
1038 | self._update(revlog, start_rev, self._start_rev - 1) | |||
|
1039 | if self._end_rev < end_rev: | |||
|
1040 | self._update(revlog, self._end_rev + 1, end_rev) | |||
|
1041 | ||||
|
1042 | if self._start_rev is None: | |||
|
1043 | assert self._end_rev is None | |||
|
1044 | self._end_rev = end_rev | |||
|
1045 | self._start_rev = start_rev | |||
|
1046 | else: | |||
|
1047 | self._start_rev = min(self._start_rev, start_rev) | |||
|
1048 | self._end_rev = max(self._end_rev, end_rev) | |||
|
1049 | assert self._start_rev <= self._end_rev, ( | |||
|
1050 | self._start_rev, | |||
|
1051 | self._end_rev, | |||
|
1052 | ) | |||
|
1053 | ||||
|
1054 | def _update(self, revlog, start_rev, end_rev): | |||
|
1055 | """internal method that actually do update content""" | |||
|
1056 | assert self._start_rev is None or ( | |||
|
1057 | start_rev < self._start_rev or start_rev > self._end_rev | |||
|
1058 | ), (self._start_rev, self._end_rev, start_rev, end_rev) | |||
|
1059 | assert self._start_rev is None or ( | |||
|
1060 | end_rev < self._start_rev or end_rev > self._end_rev | |||
|
1061 | ), (self._start_rev, self._end_rev, start_rev, end_rev) | |||
|
1062 | cache = self.snapshots | |||
|
1063 | if util.safehasattr(revlog.index, b'findsnapshots'): | |||
|
1064 | revlog.index.findsnapshots(cache, start_rev, end_rev) | |||
|
1065 | else: | |||
|
1066 | deltaparent = revlog.deltaparent | |||
|
1067 | issnapshot = revlog.issnapshot | |||
|
1068 | for rev in revlog.revs(start_rev, end_rev): | |||
|
1069 | if issnapshot(rev): | |||
|
1070 | cache[deltaparent(rev)].add(rev) | |||
|
1071 | ||||
|
1072 | ||||
933 | class deltacomputer: |
|
1073 | class deltacomputer: | |
934 | def __init__(self, revlog, write_debug=None, debug_search=False): |
|
1074 | def __init__( | |
|
1075 | self, | |||
|
1076 | revlog, | |||
|
1077 | write_debug=None, | |||
|
1078 | debug_search=False, | |||
|
1079 | debug_info=None, | |||
|
1080 | ): | |||
935 | self.revlog = revlog |
|
1081 | self.revlog = revlog | |
936 | self._write_debug = write_debug |
|
1082 | self._write_debug = write_debug | |
937 | self._debug_search = debug_search |
|
1083 | self._debug_search = debug_search | |
|
1084 | self._debug_info = debug_info | |||
|
1085 | self._snapshot_cache = SnapshotCache() | |||
938 |
|
1086 | |||
939 | def buildtext(self, revinfo, fh): |
|
1087 | def buildtext(self, revinfo, fh): | |
940 | """Builds a fulltext version of a revision |
|
1088 | """Builds a fulltext version of a revision | |
@@ -998,7 +1146,7 b' class deltacomputer:' | |||||
998 | snapshotdepth = len(revlog._deltachain(deltabase)[0]) |
|
1146 | snapshotdepth = len(revlog._deltachain(deltabase)[0]) | |
999 | delta = None |
|
1147 | delta = None | |
1000 | if revinfo.cachedelta: |
|
1148 | if revinfo.cachedelta: | |
1001 |
cachebase |
|
1149 | cachebase = revinfo.cachedelta[0] | |
1002 | # check if the diff still apply |
|
1150 | # check if the diff still apply | |
1003 | currentbase = cachebase |
|
1151 | currentbase = cachebase | |
1004 | while ( |
|
1152 | while ( | |
@@ -1103,11 +1251,14 b' class deltacomputer:' | |||||
1103 | if revinfo.flags & REVIDX_RAWTEXT_CHANGING_FLAGS: |
|
1251 | if revinfo.flags & REVIDX_RAWTEXT_CHANGING_FLAGS: | |
1104 | return self._fullsnapshotinfo(fh, revinfo, target_rev) |
|
1252 | return self._fullsnapshotinfo(fh, revinfo, target_rev) | |
1105 |
|
1253 | |||
1106 | if self._write_debug is not None: |
|
1254 | gather_debug = ( | |
|
1255 | self._write_debug is not None or self._debug_info is not None | |||
|
1256 | ) | |||
|
1257 | debug_search = self._write_debug is not None and self._debug_search | |||
|
1258 | ||||
|
1259 | if gather_debug: | |||
1107 | start = util.timer() |
|
1260 | start = util.timer() | |
1108 |
|
1261 | |||
1109 | debug_search = self._write_debug is not None and self._debug_search |
|
|||
1110 |
|
||||
1111 | # count the number of different delta we tried (for debug purpose) |
|
1262 | # count the number of different delta we tried (for debug purpose) | |
1112 | dbg_try_count = 0 |
|
1263 | dbg_try_count = 0 | |
1113 | # count the number of "search round" we did. (for debug purpose) |
|
1264 | # count the number of "search round" we did. (for debug purpose) | |
@@ -1122,7 +1273,7 b' class deltacomputer:' | |||||
1122 | deltainfo = None |
|
1273 | deltainfo = None | |
1123 | p1r, p2r = revlog.rev(p1), revlog.rev(p2) |
|
1274 | p1r, p2r = revlog.rev(p1), revlog.rev(p2) | |
1124 |
|
1275 | |||
1125 |
if |
|
1276 | if gather_debug: | |
1126 | if p1r != nullrev: |
|
1277 | if p1r != nullrev: | |
1127 | p1_chain_len = revlog._chaininfo(p1r)[0] |
|
1278 | p1_chain_len = revlog._chaininfo(p1r)[0] | |
1128 | else: |
|
1279 | else: | |
@@ -1137,7 +1288,14 b' class deltacomputer:' | |||||
1137 | self._write_debug(msg) |
|
1288 | self._write_debug(msg) | |
1138 |
|
1289 | |||
1139 | groups = _candidategroups( |
|
1290 | groups = _candidategroups( | |
1140 | self.revlog, revinfo.textlen, p1r, p2r, cachedelta |
|
1291 | self.revlog, | |
|
1292 | revinfo.textlen, | |||
|
1293 | p1r, | |||
|
1294 | p2r, | |||
|
1295 | cachedelta, | |||
|
1296 | excluded_bases, | |||
|
1297 | target_rev, | |||
|
1298 | snapshot_cache=self._snapshot_cache, | |||
1141 | ) |
|
1299 | ) | |
1142 | candidaterevs = next(groups) |
|
1300 | candidaterevs = next(groups) | |
1143 | while candidaterevs is not None: |
|
1301 | while candidaterevs is not None: | |
@@ -1147,7 +1305,13 b' class deltacomputer:' | |||||
1147 | if deltainfo is not None: |
|
1305 | if deltainfo is not None: | |
1148 | prev = deltainfo.base |
|
1306 | prev = deltainfo.base | |
1149 |
|
1307 | |||
1150 | if p1 in candidaterevs or p2 in candidaterevs: |
|
1308 | if ( | |
|
1309 | cachedelta is not None | |||
|
1310 | and len(candidaterevs) == 1 | |||
|
1311 | and cachedelta[0] in candidaterevs | |||
|
1312 | ): | |||
|
1313 | round_type = b"cached-delta" | |||
|
1314 | elif p1 in candidaterevs or p2 in candidaterevs: | |||
1151 | round_type = b"parents" |
|
1315 | round_type = b"parents" | |
1152 | elif prev is not None and all(c < prev for c in candidaterevs): |
|
1316 | elif prev is not None and all(c < prev for c in candidaterevs): | |
1153 | round_type = b"refine-down" |
|
1317 | round_type = b"refine-down" | |
@@ -1195,16 +1359,7 b' class deltacomputer:' | |||||
1195 | msg = b"DBG-DELTAS-SEARCH: base=%d\n" |
|
1359 | msg = b"DBG-DELTAS-SEARCH: base=%d\n" | |
1196 | msg %= self.revlog.deltaparent(candidaterev) |
|
1360 | msg %= self.revlog.deltaparent(candidaterev) | |
1197 | self._write_debug(msg) |
|
1361 | self._write_debug(msg) | |
1198 | if candidaterev in excluded_bases: |
|
1362 | ||
1199 | if debug_search: |
|
|||
1200 | msg = b"DBG-DELTAS-SEARCH: EXCLUDED\n" |
|
|||
1201 | self._write_debug(msg) |
|
|||
1202 | continue |
|
|||
1203 | if candidaterev >= target_rev: |
|
|||
1204 | if debug_search: |
|
|||
1205 | msg = b"DBG-DELTAS-SEARCH: TOO-HIGH\n" |
|
|||
1206 | self._write_debug(msg) |
|
|||
1207 | continue |
|
|||
1208 | dbg_try_count += 1 |
|
1363 | dbg_try_count += 1 | |
1209 |
|
1364 | |||
1210 | if debug_search: |
|
1365 | if debug_search: | |
@@ -1216,7 +1371,7 b' class deltacomputer:' | |||||
1216 | msg %= delta_end - delta_start |
|
1371 | msg %= delta_end - delta_start | |
1217 | self._write_debug(msg) |
|
1372 | self._write_debug(msg) | |
1218 | if candidatedelta is not None: |
|
1373 | if candidatedelta is not None: | |
1219 | if isgooddeltainfo(self.revlog, candidatedelta, revinfo): |
|
1374 | if is_good_delta_info(self.revlog, candidatedelta, revinfo): | |
1220 | if debug_search: |
|
1375 | if debug_search: | |
1221 | msg = b"DBG-DELTAS-SEARCH: DELTA: length=%d (GOOD)\n" |
|
1376 | msg = b"DBG-DELTAS-SEARCH: DELTA: length=%d (GOOD)\n" | |
1222 | msg %= candidatedelta.deltalen |
|
1377 | msg %= candidatedelta.deltalen | |
@@ -1244,12 +1399,28 b' class deltacomputer:' | |||||
1244 | else: |
|
1399 | else: | |
1245 | dbg_type = b"delta" |
|
1400 | dbg_type = b"delta" | |
1246 |
|
1401 | |||
1247 |
if |
|
1402 | if gather_debug: | |
1248 | end = util.timer() |
|
1403 | end = util.timer() | |
|
1404 | if dbg_type == b'full': | |||
|
1405 | used_cached = ( | |||
|
1406 | cachedelta is not None | |||
|
1407 | and dbg_try_rounds == 0 | |||
|
1408 | and dbg_try_count == 0 | |||
|
1409 | and cachedelta[0] == nullrev | |||
|
1410 | ) | |||
|
1411 | else: | |||
|
1412 | used_cached = ( | |||
|
1413 | cachedelta is not None | |||
|
1414 | and dbg_try_rounds == 1 | |||
|
1415 | and dbg_try_count == 1 | |||
|
1416 | and deltainfo.base == cachedelta[0] | |||
|
1417 | ) | |||
1249 | dbg = { |
|
1418 | dbg = { | |
1250 | 'duration': end - start, |
|
1419 | 'duration': end - start, | |
1251 | 'revision': target_rev, |
|
1420 | 'revision': target_rev, | |
|
1421 | 'delta-base': deltainfo.base, # pytype: disable=attribute-error | |||
1252 | 'search_round_count': dbg_try_rounds, |
|
1422 | 'search_round_count': dbg_try_rounds, | |
|
1423 | 'using-cached-base': used_cached, | |||
1253 | 'delta_try_count': dbg_try_count, |
|
1424 | 'delta_try_count': dbg_try_count, | |
1254 | 'type': dbg_type, |
|
1425 | 'type': dbg_type, | |
1255 | 'p1-chain-len': p1_chain_len, |
|
1426 | 'p1-chain-len': p1_chain_len, | |
@@ -1279,31 +1450,39 b' class deltacomputer:' | |||||
1279 | target_revlog += b'%s:' % target_key |
|
1450 | target_revlog += b'%s:' % target_key | |
1280 | dbg['target-revlog'] = target_revlog |
|
1451 | dbg['target-revlog'] = target_revlog | |
1281 |
|
1452 | |||
1282 | msg = ( |
|
1453 | if self._debug_info is not None: | |
1283 | b"DBG-DELTAS:" |
|
1454 | self._debug_info.append(dbg) | |
1284 | b" %-12s" |
|
1455 | ||
1285 | b" rev=%d:" |
|
1456 | if self._write_debug is not None: | |
1286 |
|
|
1457 | msg = ( | |
1287 |
b" |
|
1458 | b"DBG-DELTAS:" | |
1288 |
b" |
|
1459 | b" %-12s" | |
1289 |
b" |
|
1460 | b" rev=%d:" | |
1290 |
b" |
|
1461 | b" delta-base=%d" | |
1291 |
b" |
|
1462 | b" is-cached=%d" | |
1292 |
b" - |
|
1463 | b" - search-rounds=%d" | |
1293 |
b" |
|
1464 | b" try-count=%d" | |
1294 | ) |
|
1465 | b" - delta-type=%-6s" | |
1295 | msg %= ( |
|
1466 | b" snap-depth=%d" | |
1296 | dbg["target-revlog"], |
|
1467 | b" - p1-chain-length=%d" | |
1297 | dbg["revision"], |
|
1468 | b" p2-chain-length=%d" | |
1298 | dbg["search_round_count"], |
|
1469 | b" - duration=%f" | |
1299 |
|
|
1470 | b"\n" | |
1300 |
|
|
1471 | ) | |
1301 | dbg["snapshot-depth"], |
|
1472 | msg %= ( | |
1302 |
dbg[" |
|
1473 | dbg["target-revlog"], | |
1303 |
dbg[" |
|
1474 | dbg["revision"], | |
1304 |
dbg["d |
|
1475 | dbg["delta-base"], | |
1305 | ) |
|
1476 | dbg["using-cached-base"], | |
1306 | self._write_debug(msg) |
|
1477 | dbg["search_round_count"], | |
|
1478 | dbg["delta_try_count"], | |||
|
1479 | dbg["type"], | |||
|
1480 | dbg["snapshot-depth"], | |||
|
1481 | dbg["p1-chain-len"], | |||
|
1482 | dbg["p2-chain-len"], | |||
|
1483 | dbg["duration"], | |||
|
1484 | ) | |||
|
1485 | self._write_debug(msg) | |||
1307 | return deltainfo |
|
1486 | return deltainfo | |
1308 |
|
1487 | |||
1309 |
|
1488 |
@@ -90,7 +90,7 b' if stable_docket_file:' | |||||
90 | # * 8 bytes: pending size of data |
|
90 | # * 8 bytes: pending size of data | |
91 | # * 8 bytes: pending size of sidedata |
|
91 | # * 8 bytes: pending size of sidedata | |
92 | # * 1 bytes: default compression header |
|
92 | # * 1 bytes: default compression header | |
93 |
S_HEADER = struct.Struct(constants.INDEX_HEADER_FMT + b'BBBBBB |
|
93 | S_HEADER = struct.Struct(constants.INDEX_HEADER_FMT + b'BBBBBBQQQQQQc') | |
94 | # * 1 bytes: size of index uuid |
|
94 | # * 1 bytes: size of index uuid | |
95 | # * 8 bytes: size of file |
|
95 | # * 8 bytes: size of file | |
96 | S_OLD_UID = struct.Struct('>BL') |
|
96 | S_OLD_UID = struct.Struct('>BL') |
@@ -1868,13 +1868,12 b' def outgoing(repo, subset, x):' | |||||
1868 | dests = [] |
|
1868 | dests = [] | |
1869 | missing = set() |
|
1869 | missing = set() | |
1870 | for path in urlutil.get_push_paths(repo, repo.ui, dests): |
|
1870 | for path in urlutil.get_push_paths(repo, repo.ui, dests): | |
1871 | dest = path.pushloc or path.loc |
|
|||
1872 | branches = path.branch, [] |
|
1871 | branches = path.branch, [] | |
1873 |
|
1872 | |||
1874 | revs, checkout = hg.addbranchrevs(repo, repo, branches, []) |
|
1873 | revs, checkout = hg.addbranchrevs(repo, repo, branches, []) | |
1875 | if revs: |
|
1874 | if revs: | |
1876 | revs = [repo.lookup(rev) for rev in revs] |
|
1875 | revs = [repo.lookup(rev) for rev in revs] | |
1877 |
other = hg.peer(repo, {}, |
|
1876 | other = hg.peer(repo, {}, path) | |
1878 | try: |
|
1877 | try: | |
1879 | with repo.ui.silent(): |
|
1878 | with repo.ui.silent(): | |
1880 | outgoing = discovery.findcommonoutgoing( |
|
1879 | outgoing = discovery.findcommonoutgoing( | |
@@ -2130,11 +2129,9 b' def remote(repo, subset, x):' | |||||
2130 | dest = getstring(l[1], _(b"remote requires a repository path")) |
|
2129 | dest = getstring(l[1], _(b"remote requires a repository path")) | |
2131 | if not dest: |
|
2130 | if not dest: | |
2132 | dest = b'default' |
|
2131 | dest = b'default' | |
2133 |
|
|
2132 | path = urlutil.get_unique_pull_path_obj(b'remote', repo.ui, dest) | |
2134 | b'remote', repo, repo.ui, dest |
|
2133 | ||
2135 | ) |
|
2134 | other = hg.peer(repo, {}, path) | |
2136 |
|
||||
2137 | other = hg.peer(repo, {}, dest) |
|
|||
2138 | n = other.lookup(q) |
|
2135 | n = other.lookup(q) | |
2139 | if n in repo: |
|
2136 | if n in repo: | |
2140 | r = repo[n].rev() |
|
2137 | r = repo[n].rev() |
@@ -4,6 +4,11 b' import fcntl' | |||||
4 | import os |
|
4 | import os | |
5 | import sys |
|
5 | import sys | |
6 |
|
6 | |||
|
7 | from typing import ( | |||
|
8 | List, | |||
|
9 | Tuple, | |||
|
10 | ) | |||
|
11 | ||||
7 | from .pycompat import getattr |
|
12 | from .pycompat import getattr | |
8 | from . import ( |
|
13 | from . import ( | |
9 | encoding, |
|
14 | encoding, | |
@@ -11,6 +16,9 b' from . import (' | |||||
11 | util, |
|
16 | util, | |
12 | ) |
|
17 | ) | |
13 |
|
18 | |||
|
19 | if pycompat.TYPE_CHECKING: | |||
|
20 | from . import ui as uimod | |||
|
21 | ||||
14 | # BSD 'more' escapes ANSI color sequences by default. This can be disabled by |
|
22 | # BSD 'more' escapes ANSI color sequences by default. This can be disabled by | |
15 | # $MORE variable, but there's no compatible option with Linux 'more'. Given |
|
23 | # $MORE variable, but there's no compatible option with Linux 'more'. Given | |
16 | # OS X is widely used and most modern Unix systems would have 'less', setting |
|
24 | # OS X is widely used and most modern Unix systems would have 'less', setting | |
@@ -18,7 +26,7 b' from . import (' | |||||
18 | fallbackpager = b'less' |
|
26 | fallbackpager = b'less' | |
19 |
|
27 | |||
20 |
|
28 | |||
21 | def _rcfiles(path): |
|
29 | def _rcfiles(path: bytes) -> List[bytes]: | |
22 | rcs = [os.path.join(path, b'hgrc')] |
|
30 | rcs = [os.path.join(path, b'hgrc')] | |
23 | rcdir = os.path.join(path, b'hgrc.d') |
|
31 | rcdir = os.path.join(path, b'hgrc.d') | |
24 | try: |
|
32 | try: | |
@@ -34,7 +42,7 b' def _rcfiles(path):' | |||||
34 | return rcs |
|
42 | return rcs | |
35 |
|
43 | |||
36 |
|
44 | |||
37 | def systemrcpath(): |
|
45 | def systemrcpath() -> List[bytes]: | |
38 | path = [] |
|
46 | path = [] | |
39 | if pycompat.sysplatform == b'plan9': |
|
47 | if pycompat.sysplatform == b'plan9': | |
40 | root = b'lib/mercurial' |
|
48 | root = b'lib/mercurial' | |
@@ -49,7 +57,7 b' def systemrcpath():' | |||||
49 | return path |
|
57 | return path | |
50 |
|
58 | |||
51 |
|
59 | |||
52 | def userrcpath(): |
|
60 | def userrcpath() -> List[bytes]: | |
53 | if pycompat.sysplatform == b'plan9': |
|
61 | if pycompat.sysplatform == b'plan9': | |
54 | return [encoding.environ[b'home'] + b'/lib/hgrc'] |
|
62 | return [encoding.environ[b'home'] + b'/lib/hgrc'] | |
55 | elif pycompat.isdarwin: |
|
63 | elif pycompat.isdarwin: | |
@@ -65,7 +73,7 b' def userrcpath():' | |||||
65 | ] |
|
73 | ] | |
66 |
|
74 | |||
67 |
|
75 | |||
68 | def termsize(ui): |
|
76 | def termsize(ui: "uimod.ui") -> Tuple[int, int]: | |
69 | try: |
|
77 | try: | |
70 | import termios |
|
78 | import termios | |
71 |
|
79 | |||
@@ -88,7 +96,7 b' def termsize(ui):' | |||||
88 | except ValueError: |
|
96 | except ValueError: | |
89 | pass |
|
97 | pass | |
90 | except IOError as e: |
|
98 | except IOError as e: | |
91 | if e[0] == errno.EINVAL: # pytype: disable=unsupported-operands |
|
99 | if e.errno == errno.EINVAL: | |
92 | pass |
|
100 | pass | |
93 | else: |
|
101 | else: | |
94 | raise |
|
102 | raise |
@@ -1219,7 +1219,7 b' def cleanupnodes(' | |||||
1219 | ) |
|
1219 | ) | |
1220 |
|
1220 | |||
1221 |
|
1221 | |||
1222 | def addremove(repo, matcher, prefix, uipathfn, opts=None): |
|
1222 | def addremove(repo, matcher, prefix, uipathfn, opts=None, open_tr=None): | |
1223 | if opts is None: |
|
1223 | if opts is None: | |
1224 | opts = {} |
|
1224 | opts = {} | |
1225 | m = matcher |
|
1225 | m = matcher | |
@@ -1279,7 +1279,9 b' def addremove(repo, matcher, prefix, uip' | |||||
1279 | repo, m, added + unknown, removed + deleted, similarity, uipathfn |
|
1279 | repo, m, added + unknown, removed + deleted, similarity, uipathfn | |
1280 | ) |
|
1280 | ) | |
1281 |
|
1281 | |||
1282 | if not dry_run: |
|
1282 | if not dry_run and (unknown or forgotten or deleted or renames): | |
|
1283 | if open_tr is not None: | |||
|
1284 | open_tr() | |||
1283 | _markchanges(repo, unknown + forgotten, deleted, renames) |
|
1285 | _markchanges(repo, unknown + forgotten, deleted, renames) | |
1284 |
|
1286 | |||
1285 | for f in rejected: |
|
1287 | for f in rejected: | |
@@ -1863,7 +1865,12 b' def gdinitconfig(ui):' | |||||
1863 |
|
1865 | |||
1864 |
|
1866 | |||
1865 | def gddeltaconfig(ui): |
|
1867 | def gddeltaconfig(ui): | |
1866 |
"""helper function to know if incoming delta should be optimi |
|
1868 | """helper function to know if incoming deltas should be optimized | |
|
1869 | ||||
|
1870 | The `format.generaldelta` config is an old form of the config that also | |||
|
1871 | implies that incoming delta-bases should be never be trusted. This function | |||
|
1872 | exists for this purpose. | |||
|
1873 | """ | |||
1867 | # experimental config: format.generaldelta |
|
1874 | # experimental config: format.generaldelta | |
1868 | return ui.configbool(b'format', b'generaldelta') |
|
1875 | return ui.configbool(b'format', b'generaldelta') | |
1869 |
|
1876 |
@@ -1,4 +1,10 b'' | |||||
1 | import os |
|
1 | import os | |
|
2 | import winreg # pytype: disable=import-error | |||
|
3 | ||||
|
4 | from typing import ( | |||
|
5 | List, | |||
|
6 | Tuple, | |||
|
7 | ) | |||
2 |
|
8 | |||
3 | from . import ( |
|
9 | from . import ( | |
4 | encoding, |
|
10 | encoding, | |
@@ -7,19 +13,14 b' from . import (' | |||||
7 | win32, |
|
13 | win32, | |
8 | ) |
|
14 | ) | |
9 |
|
15 | |||
10 | try: |
|
16 | if pycompat.TYPE_CHECKING: | |
11 | import _winreg as winreg # pytype: disable=import-error |
|
17 | from . import ui as uimod | |
12 |
|
||||
13 | winreg.CloseKey |
|
|||
14 | except ImportError: |
|
|||
15 | # py2 only |
|
|||
16 | import winreg # pytype: disable=import-error |
|
|||
17 |
|
18 | |||
18 | # MS-DOS 'more' is the only pager available by default on Windows. |
|
19 | # MS-DOS 'more' is the only pager available by default on Windows. | |
19 | fallbackpager = b'more' |
|
20 | fallbackpager = b'more' | |
20 |
|
21 | |||
21 |
|
22 | |||
22 | def systemrcpath(): |
|
23 | def systemrcpath() -> List[bytes]: | |
23 | '''return default os-specific hgrc search path''' |
|
24 | '''return default os-specific hgrc search path''' | |
24 | rcpath = [] |
|
25 | rcpath = [] | |
25 | filename = win32.executablepath() |
|
26 | filename = win32.executablepath() | |
@@ -27,7 +28,7 b' def systemrcpath():' | |||||
27 | progrc = os.path.join(os.path.dirname(filename), b'mercurial.ini') |
|
28 | progrc = os.path.join(os.path.dirname(filename), b'mercurial.ini') | |
28 | rcpath.append(progrc) |
|
29 | rcpath.append(progrc) | |
29 |
|
30 | |||
30 | def _processdir(progrcd): |
|
31 | def _processdir(progrcd: bytes) -> None: | |
31 | if os.path.isdir(progrcd): |
|
32 | if os.path.isdir(progrcd): | |
32 | for f, kind in sorted(util.listdir(progrcd)): |
|
33 | for f, kind in sorted(util.listdir(progrcd)): | |
33 | if f.endswith(b'.rc'): |
|
34 | if f.endswith(b'.rc'): | |
@@ -68,7 +69,7 b' def systemrcpath():' | |||||
68 | return rcpath |
|
69 | return rcpath | |
69 |
|
70 | |||
70 |
|
71 | |||
71 | def userrcpath(): |
|
72 | def userrcpath() -> List[bytes]: | |
72 | '''return os-specific hgrc search path to the user dir''' |
|
73 | '''return os-specific hgrc search path to the user dir''' | |
73 | home = _legacy_expanduser(b'~') |
|
74 | home = _legacy_expanduser(b'~') | |
74 | path = [os.path.join(home, b'mercurial.ini'), os.path.join(home, b'.hgrc')] |
|
75 | path = [os.path.join(home, b'mercurial.ini'), os.path.join(home, b'.hgrc')] | |
@@ -79,7 +80,7 b' def userrcpath():' | |||||
79 | return path |
|
80 | return path | |
80 |
|
81 | |||
81 |
|
82 | |||
82 | def _legacy_expanduser(path): |
|
83 | def _legacy_expanduser(path: bytes) -> bytes: | |
83 | """Expand ~ and ~user constructs in the pre 3.8 style""" |
|
84 | """Expand ~ and ~user constructs in the pre 3.8 style""" | |
84 |
|
85 | |||
85 | # Python 3.8+ changed the expansion of '~' from HOME to USERPROFILE. See |
|
86 | # Python 3.8+ changed the expansion of '~' from HOME to USERPROFILE. See | |
@@ -111,5 +112,5 b' def _legacy_expanduser(path):' | |||||
111 | return userhome + path[i:] |
|
112 | return userhome + path[i:] | |
112 |
|
113 | |||
113 |
|
114 | |||
114 | def termsize(ui): |
|
115 | def termsize(ui: "uimod.ui") -> Tuple[int, int]: | |
115 | return win32.termsize() |
|
116 | return win32.termsize() |
@@ -247,6 +247,14 b' class Shelf:' | |||||
247 | for ext in shelvefileextensions: |
|
247 | for ext in shelvefileextensions: | |
248 | self.vfs.tryunlink(self.name + b'.' + ext) |
|
248 | self.vfs.tryunlink(self.name + b'.' + ext) | |
249 |
|
249 | |||
|
250 | def changed_files(self, ui, repo): | |||
|
251 | try: | |||
|
252 | ctx = repo.unfiltered()[self.readinfo()[b'node']] | |||
|
253 | return ctx.files() | |||
|
254 | except (FileNotFoundError, error.RepoLookupError): | |||
|
255 | filename = self.vfs.join(self.name + b'.patch') | |||
|
256 | return patch.changedfiles(ui, repo, filename) | |||
|
257 | ||||
250 |
|
258 | |||
251 | def _optimized_match(repo, node): |
|
259 | def _optimized_match(repo, node): | |
252 | """ |
|
260 | """ | |
@@ -424,10 +432,26 b' def _restoreactivebookmark(repo, mark):' | |||||
424 |
|
432 | |||
425 | def _aborttransaction(repo, tr): |
|
433 | def _aborttransaction(repo, tr): | |
426 | """Abort current transaction for shelve/unshelve, but keep dirstate""" |
|
434 | """Abort current transaction for shelve/unshelve, but keep dirstate""" | |
427 | dirstatebackupname = b'dirstate.shelve' |
|
435 | # disable the transaction invalidation of the dirstate, to preserve the | |
428 | repo.dirstate.savebackup(None, dirstatebackupname) |
|
436 | # current change in memory. | |
429 | tr.abort() |
|
437 | ds = repo.dirstate | |
430 | repo.dirstate.restorebackup(None, dirstatebackupname) |
|
438 | # The assert below check that nobody else did such wrapping. | |
|
439 | # | |||
|
440 | # These is not such other wrapping currently, but if someone try to | |||
|
441 | # implement one in the future, this will explicitly break here instead of | |||
|
442 | # misbehaving in subtle ways. | |||
|
443 | assert 'invalidate' not in vars(ds) | |||
|
444 | try: | |||
|
445 | # note : we could simply disable the transaction abort callback, but | |||
|
446 | # other code also tries to rollback and invalidate this. | |||
|
447 | ds.invalidate = lambda: None | |||
|
448 | tr.abort() | |||
|
449 | finally: | |||
|
450 | del ds.invalidate | |||
|
451 | # manually write the change in memory since we can no longer rely on the | |||
|
452 | # transaction to do so. | |||
|
453 | assert repo.currenttransaction() is None | |||
|
454 | repo.dirstate.write(None) | |||
431 |
|
455 | |||
432 |
|
456 | |||
433 | def getshelvename(repo, parent, opts): |
|
457 | def getshelvename(repo, parent, opts): | |
@@ -599,7 +623,8 b' def _docreatecmd(ui, repo, pats, opts):' | |||||
599 | activebookmark = _backupactivebookmark(repo) |
|
623 | activebookmark = _backupactivebookmark(repo) | |
600 | extra = {b'internal': b'shelve'} |
|
624 | extra = {b'internal': b'shelve'} | |
601 | if includeunknown: |
|
625 | if includeunknown: | |
602 | _includeunknownfiles(repo, pats, opts, extra) |
|
626 | with repo.dirstate.changing_files(repo): | |
|
627 | _includeunknownfiles(repo, pats, opts, extra) | |||
603 |
|
628 | |||
604 | if _iswctxonnewbranch(repo) and not _isbareshelve(pats, opts): |
|
629 | if _iswctxonnewbranch(repo) and not _isbareshelve(pats, opts): | |
605 | # In non-bare shelve we don't store newly created branch |
|
630 | # In non-bare shelve we don't store newly created branch | |
@@ -629,7 +654,7 b' def _docreatecmd(ui, repo, pats, opts):' | |||||
629 |
|
654 | |||
630 | ui.status(_(b'shelved as %s\n') % name) |
|
655 | ui.status(_(b'shelved as %s\n') % name) | |
631 | if opts[b'keep']: |
|
656 | if opts[b'keep']: | |
632 |
with repo.dirstate. |
|
657 | with repo.dirstate.changing_parents(repo): | |
633 | scmutil.movedirstate(repo, parent, match) |
|
658 | scmutil.movedirstate(repo, parent, match) | |
634 | else: |
|
659 | else: | |
635 | hg.update(repo, parent.node()) |
|
660 | hg.update(repo, parent.node()) | |
@@ -854,18 +879,18 b' def unshelvecontinue(ui, repo, state, op' | |||||
854 | shelvectx = repo[state.parents[1]] |
|
879 | shelvectx = repo[state.parents[1]] | |
855 | pendingctx = state.pendingctx |
|
880 | pendingctx = state.pendingctx | |
856 |
|
881 | |||
857 |
with repo.dirstate. |
|
882 | with repo.dirstate.changing_parents(repo): | |
858 | repo.setparents(state.pendingctx.node(), repo.nullid) |
|
883 | repo.setparents(state.pendingctx.node(), repo.nullid) | |
859 | repo.dirstate.write(repo.currenttransaction()) |
|
884 | repo.dirstate.write(repo.currenttransaction()) | |
860 |
|
885 | |||
861 | targetphase = _target_phase(repo) |
|
886 | targetphase = _target_phase(repo) | |
862 | overrides = {(b'phases', b'new-commit'): targetphase} |
|
887 | overrides = {(b'phases', b'new-commit'): targetphase} | |
863 | with repo.ui.configoverride(overrides, b'unshelve'): |
|
888 | with repo.ui.configoverride(overrides, b'unshelve'): | |
864 |
with repo.dirstate. |
|
889 | with repo.dirstate.changing_parents(repo): | |
865 | repo.setparents(state.parents[0], repo.nullid) |
|
890 | repo.setparents(state.parents[0], repo.nullid) | |
866 |
|
|
891 | newnode, ispartialunshelve = _createunshelvectx( | |
867 |
|
|
892 | ui, repo, shelvectx, basename, interactive, opts | |
868 |
|
|
893 | ) | |
869 |
|
894 | |||
870 | if newnode is None: |
|
895 | if newnode is None: | |
871 | shelvectx = state.pendingctx |
|
896 | shelvectx = state.pendingctx | |
@@ -1060,11 +1085,11 b' def _rebaserestoredcommit(' | |||||
1060 | ) |
|
1085 | ) | |
1061 | raise error.ConflictResolutionRequired(b'unshelve') |
|
1086 | raise error.ConflictResolutionRequired(b'unshelve') | |
1062 |
|
1087 | |||
1063 |
with repo.dirstate. |
|
1088 | with repo.dirstate.changing_parents(repo): | |
1064 | repo.setparents(tmpwctx.node(), repo.nullid) |
|
1089 | repo.setparents(tmpwctx.node(), repo.nullid) | |
1065 |
|
|
1090 | newnode, ispartialunshelve = _createunshelvectx( | |
1066 |
|
|
1091 | ui, repo, shelvectx, basename, interactive, opts | |
1067 |
|
|
1092 | ) | |
1068 |
|
1093 | |||
1069 | if newnode is None: |
|
1094 | if newnode is None: | |
1070 | shelvectx = tmpwctx |
|
1095 | shelvectx = tmpwctx | |
@@ -1210,7 +1235,8 b' def _dounshelve(ui, repo, basename, opts' | |||||
1210 | restorebranch(ui, repo, branchtorestore) |
|
1235 | restorebranch(ui, repo, branchtorestore) | |
1211 | shelvedstate.clear(repo) |
|
1236 | shelvedstate.clear(repo) | |
1212 | _finishunshelve(repo, oldtiprev, tr, activebookmark) |
|
1237 | _finishunshelve(repo, oldtiprev, tr, activebookmark) | |
1213 | _forgetunknownfiles(repo, shelvectx, addedbefore) |
|
1238 | with repo.dirstate.changing_files(repo): | |
|
1239 | _forgetunknownfiles(repo, shelvectx, addedbefore) | |||
1214 | if not ispartialunshelve: |
|
1240 | if not ispartialunshelve: | |
1215 | unshelvecleanup(ui, repo, basename, opts) |
|
1241 | unshelvecleanup(ui, repo, basename, opts) | |
1216 | finally: |
|
1242 | finally: |
@@ -512,6 +512,8 b' def simplemerge(' | |||||
512 | conflicts = False |
|
512 | conflicts = False | |
513 | if mode == b'union': |
|
513 | if mode == b'union': | |
514 | lines = _resolve(m3, (1, 2)) |
|
514 | lines = _resolve(m3, (1, 2)) | |
|
515 | elif mode == b'union-other-first': | |||
|
516 | lines = _resolve(m3, (2, 1)) | |||
515 | elif mode == b'local': |
|
517 | elif mode == b'local': | |
516 | lines = _resolve(m3, (1,)) |
|
518 | lines = _resolve(m3, (1,)) | |
517 | elif mode == b'other': |
|
519 | elif mode == b'other': |
@@ -451,7 +451,7 b' def filterupdatesactions(repo, wctx, mct' | |||||
451 | message, |
|
451 | message, | |
452 | ) |
|
452 | ) | |
453 |
|
453 | |||
454 |
with repo.dirstate. |
|
454 | with repo.dirstate.changing_parents(repo): | |
455 | mergemod.applyupdates( |
|
455 | mergemod.applyupdates( | |
456 | repo, |
|
456 | repo, | |
457 | tmresult, |
|
457 | tmresult, | |
@@ -655,7 +655,7 b' def clearrules(repo, force=False):' | |||||
655 | The remaining sparse config only has profiles, if defined. The working |
|
655 | The remaining sparse config only has profiles, if defined. The working | |
656 | directory is refreshed, as needed. |
|
656 | directory is refreshed, as needed. | |
657 | """ |
|
657 | """ | |
658 |
with repo.wlock(), repo.dirstate. |
|
658 | with repo.wlock(), repo.dirstate.changing_parents(repo): | |
659 | raw = repo.vfs.tryread(b'sparse') |
|
659 | raw = repo.vfs.tryread(b'sparse') | |
660 | includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse') |
|
660 | includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse') | |
661 |
|
661 | |||
@@ -671,7 +671,7 b' def importfromfiles(repo, opts, paths, f' | |||||
671 | The updated sparse config is written out and the working directory |
|
671 | The updated sparse config is written out and the working directory | |
672 | is refreshed, as needed. |
|
672 | is refreshed, as needed. | |
673 | """ |
|
673 | """ | |
674 |
with repo.wlock(), repo.dirstate. |
|
674 | with repo.wlock(), repo.dirstate.changing_parents(repo): | |
675 | # read current configuration |
|
675 | # read current configuration | |
676 | raw = repo.vfs.tryread(b'sparse') |
|
676 | raw = repo.vfs.tryread(b'sparse') | |
677 | includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse') |
|
677 | includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse') | |
@@ -730,7 +730,7 b' def updateconfig(' | |||||
730 |
|
730 | |||
731 | The new config is written out and a working directory refresh is performed. |
|
731 | The new config is written out and a working directory refresh is performed. | |
732 | """ |
|
732 | """ | |
733 |
with repo.wlock(), repo.lock(), repo.dirstate. |
|
733 | with repo.wlock(), repo.lock(), repo.dirstate.changing_parents(repo): | |
734 | raw = repo.vfs.tryread(b'sparse') |
|
734 | raw = repo.vfs.tryread(b'sparse') | |
735 | oldinclude, oldexclude, oldprofiles = parseconfig( |
|
735 | oldinclude, oldexclude, oldprofiles = parseconfig( | |
736 | repo.ui, raw, b'sparse' |
|
736 | repo.ui, raw, b'sparse' |
@@ -372,7 +372,7 b' def _performhandshake(ui, stdin, stdout,' | |||||
372 |
|
372 | |||
373 | class sshv1peer(wireprotov1peer.wirepeer): |
|
373 | class sshv1peer(wireprotov1peer.wirepeer): | |
374 | def __init__( |
|
374 | def __init__( | |
375 |
self, ui, |
|
375 | self, ui, path, proc, stdin, stdout, stderr, caps, autoreadstderr=True | |
376 | ): |
|
376 | ): | |
377 | """Create a peer from an existing SSH connection. |
|
377 | """Create a peer from an existing SSH connection. | |
378 |
|
378 | |||
@@ -383,8 +383,7 b' class sshv1peer(wireprotov1peer.wirepeer' | |||||
383 | ``autoreadstderr`` denotes whether to automatically read from |
|
383 | ``autoreadstderr`` denotes whether to automatically read from | |
384 | stderr and to forward its output. |
|
384 | stderr and to forward its output. | |
385 | """ |
|
385 | """ | |
386 | self._url = url |
|
386 | super().__init__(ui, path=path) | |
387 | self.ui = ui |
|
|||
388 | # self._subprocess is unused. Keeping a handle on the process |
|
387 | # self._subprocess is unused. Keeping a handle on the process | |
389 | # holds a reference and prevents it from being garbage collected. |
|
388 | # holds a reference and prevents it from being garbage collected. | |
390 | self._subprocess = proc |
|
389 | self._subprocess = proc | |
@@ -411,14 +410,11 b' class sshv1peer(wireprotov1peer.wirepeer' | |||||
411 | # Begin of ipeerconnection interface. |
|
410 | # Begin of ipeerconnection interface. | |
412 |
|
411 | |||
413 | def url(self): |
|
412 | def url(self): | |
414 |
return self. |
|
413 | return self.path.loc | |
415 |
|
414 | |||
416 | def local(self): |
|
415 | def local(self): | |
417 | return None |
|
416 | return None | |
418 |
|
417 | |||
419 | def peer(self): |
|
|||
420 | return self |
|
|||
421 |
|
||||
422 | def canpush(self): |
|
418 | def canpush(self): | |
423 | return True |
|
419 | return True | |
424 |
|
420 | |||
@@ -610,16 +606,16 b' def makepeer(ui, path, proc, stdin, stdo' | |||||
610 | ) |
|
606 | ) | |
611 |
|
607 | |||
612 |
|
608 | |||
613 |
def |
|
609 | def make_peer(ui, path, create, intents=None, createopts=None): | |
614 | """Create an SSH peer. |
|
610 | """Create an SSH peer. | |
615 |
|
611 | |||
616 | The returned object conforms to the ``wireprotov1peer.wirepeer`` interface. |
|
612 | The returned object conforms to the ``wireprotov1peer.wirepeer`` interface. | |
617 | """ |
|
613 | """ | |
618 | u = urlutil.url(path, parsequery=False, parsefragment=False) |
|
614 | u = urlutil.url(path.loc, parsequery=False, parsefragment=False) | |
619 | if u.scheme != b'ssh' or not u.host or u.path is None: |
|
615 | if u.scheme != b'ssh' or not u.host or u.path is None: | |
620 | raise error.RepoError(_(b"couldn't parse location %s") % path) |
|
616 | raise error.RepoError(_(b"couldn't parse location %s") % path) | |
621 |
|
617 | |||
622 | urlutil.checksafessh(path) |
|
618 | urlutil.checksafessh(path.loc) | |
623 |
|
619 | |||
624 | if u.passwd is not None: |
|
620 | if u.passwd is not None: | |
625 | raise error.RepoError(_(b'password in URL not supported')) |
|
621 | raise error.RepoError(_(b'password in URL not supported')) |
@@ -225,6 +225,7 b' class statichttprepository(' | |||||
225 | self.encodepats = None |
|
225 | self.encodepats = None | |
226 | self.decodepats = None |
|
226 | self.decodepats = None | |
227 | self._transref = None |
|
227 | self._transref = None | |
|
228 | self._dirstate = None | |||
228 |
|
229 | |||
229 | def _restrictcapabilities(self, caps): |
|
230 | def _restrictcapabilities(self, caps): | |
230 | caps = super(statichttprepository, self)._restrictcapabilities(caps) |
|
231 | caps = super(statichttprepository, self)._restrictcapabilities(caps) | |
@@ -236,8 +237,8 b' class statichttprepository(' | |||||
236 | def local(self): |
|
237 | def local(self): | |
237 | return False |
|
238 | return False | |
238 |
|
239 | |||
239 | def peer(self): |
|
240 | def peer(self, path=None): | |
240 | return statichttppeer(self) |
|
241 | return statichttppeer(self, path=path) | |
241 |
|
242 | |||
242 | def wlock(self, wait=True): |
|
243 | def wlock(self, wait=True): | |
243 | raise error.LockUnavailable( |
|
244 | raise error.LockUnavailable( | |
@@ -259,7 +260,8 b' class statichttprepository(' | |||||
259 | pass # statichttprepository are read only |
|
260 | pass # statichttprepository are read only | |
260 |
|
261 | |||
261 |
|
262 | |||
262 |
def |
|
263 | def make_peer(ui, path, create, intents=None, createopts=None): | |
263 | if create: |
|
264 | if create: | |
264 | raise error.Abort(_(b'cannot create new static-http repository')) |
|
265 | raise error.Abort(_(b'cannot create new static-http repository')) | |
265 | return statichttprepository(ui, path[7:]) |
|
266 | url = path.loc[7:] | |
|
267 | return statichttprepository(ui, url).peer(path=path) |
@@ -1049,7 +1049,7 b' def main(argv=None):' | |||||
1049 | # process options |
|
1049 | # process options | |
1050 | try: |
|
1050 | try: | |
1051 | opts, args = pycompat.getoptb( |
|
1051 | opts, args = pycompat.getoptb( | |
1052 |
|
|
1052 | pycompat.sysargv[optstart:], | |
1053 | b"hl:f:o:p:", |
|
1053 | b"hl:f:o:p:", | |
1054 | [b"help", b"limit=", b"file=", b"output-file=", b"script-path="], |
|
1054 | [b"help", b"limit=", b"file=", b"output-file=", b"script-path="], | |
1055 | ) |
|
1055 | ) |
@@ -241,31 +241,32 b' def debugstrip(ui, repo, *revs, **opts):' | |||||
241 |
|
241 | |||
242 | revs = sorted(rootnodes) |
|
242 | revs = sorted(rootnodes) | |
243 | if update and opts.get(b'keep'): |
|
243 | if update and opts.get(b'keep'): | |
244 | urev = _findupdatetarget(repo, revs) |
|
244 | with repo.dirstate.changing_parents(repo): | |
245 | uctx = repo[urev] |
|
245 | urev = _findupdatetarget(repo, revs) | |
|
246 | uctx = repo[urev] | |||
246 |
|
247 | |||
247 | # only reset the dirstate for files that would actually change |
|
248 | # only reset the dirstate for files that would actually change | |
248 | # between the working context and uctx |
|
249 | # between the working context and uctx | |
249 | descendantrevs = repo.revs(b"only(., %d)", uctx.rev()) |
|
250 | descendantrevs = repo.revs(b"only(., %d)", uctx.rev()) | |
250 | changedfiles = [] |
|
251 | changedfiles = [] | |
251 | for rev in descendantrevs: |
|
252 | for rev in descendantrevs: | |
252 | # blindly reset the files, regardless of what actually changed |
|
253 | # blindly reset the files, regardless of what actually changed | |
253 | changedfiles.extend(repo[rev].files()) |
|
254 | changedfiles.extend(repo[rev].files()) | |
254 |
|
255 | |||
255 | # reset files that only changed in the dirstate too |
|
256 | # reset files that only changed in the dirstate too | |
256 | dirstate = repo.dirstate |
|
257 | dirstate = repo.dirstate | |
257 | dirchanges = [ |
|
258 | dirchanges = [ | |
258 | f for f in dirstate if not dirstate.get_entry(f).maybe_clean |
|
259 | f for f in dirstate if not dirstate.get_entry(f).maybe_clean | |
259 | ] |
|
260 | ] | |
260 | changedfiles.extend(dirchanges) |
|
261 | changedfiles.extend(dirchanges) | |
261 |
|
262 | |||
262 | repo.dirstate.rebuild(urev, uctx.manifest(), changedfiles) |
|
263 | repo.dirstate.rebuild(urev, uctx.manifest(), changedfiles) | |
263 | repo.dirstate.write(repo.currenttransaction()) |
|
264 | repo.dirstate.write(repo.currenttransaction()) | |
264 |
|
265 | |||
265 | # clear resolve state |
|
266 | # clear resolve state | |
266 | mergestatemod.mergestate.clean(repo) |
|
267 | mergestatemod.mergestate.clean(repo) | |
267 |
|
268 | |||
268 | update = False |
|
269 | update = False | |
269 |
|
270 | |||
270 | strip( |
|
271 | strip( | |
271 | ui, |
|
272 | ui, |
@@ -569,9 +569,20 b' class hgsubrepo(abstractsubrepo):' | |||||
569 |
|
569 | |||
570 | @annotatesubrepoerror |
|
570 | @annotatesubrepoerror | |
571 | def add(self, ui, match, prefix, uipathfn, explicitonly, **opts): |
|
571 | def add(self, ui, match, prefix, uipathfn, explicitonly, **opts): | |
572 | return cmdutil.add( |
|
572 | # XXX Ideally, we could let the caller take the `changing_files` | |
573 | ui, self._repo, match, prefix, uipathfn, explicitonly, **opts |
|
573 | # context. However this is not an abstraction that make sense for | |
574 | ) |
|
574 | # other repository types, and leaking that details purely related to | |
|
575 | # dirstate seems unfortunate. So for now the context will be used here. | |||
|
576 | with self._repo.wlock(), self._repo.dirstate.changing_files(self._repo): | |||
|
577 | return cmdutil.add( | |||
|
578 | ui, | |||
|
579 | self._repo, | |||
|
580 | match, | |||
|
581 | prefix, | |||
|
582 | uipathfn, | |||
|
583 | explicitonly, | |||
|
584 | **opts, | |||
|
585 | ) | |||
575 |
|
586 | |||
576 | @annotatesubrepoerror |
|
587 | @annotatesubrepoerror | |
577 | def addremove(self, m, prefix, uipathfn, opts): |
|
588 | def addremove(self, m, prefix, uipathfn, opts): | |
@@ -580,7 +591,18 b' class hgsubrepo(abstractsubrepo):' | |||||
580 | # be used to process sibling subrepos however. |
|
591 | # be used to process sibling subrepos however. | |
581 | opts = copy.copy(opts) |
|
592 | opts = copy.copy(opts) | |
582 | opts[b'subrepos'] = True |
|
593 | opts[b'subrepos'] = True | |
583 | return scmutil.addremove(self._repo, m, prefix, uipathfn, opts) |
|
594 | # XXX Ideally, we could let the caller take the `changing_files` | |
|
595 | # context. However this is not an abstraction that make sense for | |||
|
596 | # other repository types, and leaking that details purely related to | |||
|
597 | # dirstate seems unfortunate. So for now the context will be used here. | |||
|
598 | with self._repo.wlock(), self._repo.dirstate.changing_files(self._repo): | |||
|
599 | return scmutil.addremove( | |||
|
600 | self._repo, | |||
|
601 | m, | |||
|
602 | prefix, | |||
|
603 | uipathfn, | |||
|
604 | opts, | |||
|
605 | ) | |||
584 |
|
606 | |||
585 | @annotatesubrepoerror |
|
607 | @annotatesubrepoerror | |
586 | def cat(self, match, fm, fntemplate, prefix, **opts): |
|
608 | def cat(self, match, fm, fntemplate, prefix, **opts): | |
@@ -621,7 +643,7 b' class hgsubrepo(abstractsubrepo):' | |||||
621 | match, |
|
643 | match, | |
622 | prefix=prefix, |
|
644 | prefix=prefix, | |
623 | listsubrepos=True, |
|
645 | listsubrepos=True, | |
624 | **opts |
|
646 | **opts, | |
625 | ) |
|
647 | ) | |
626 | except error.RepoLookupError as inst: |
|
648 | except error.RepoLookupError as inst: | |
627 | self.ui.warn( |
|
649 | self.ui.warn( | |
@@ -946,16 +968,21 b' class hgsubrepo(abstractsubrepo):' | |||||
946 |
|
968 | |||
947 | @annotatesubrepoerror |
|
969 | @annotatesubrepoerror | |
948 | def forget(self, match, prefix, uipathfn, dryrun, interactive): |
|
970 | def forget(self, match, prefix, uipathfn, dryrun, interactive): | |
949 | return cmdutil.forget( |
|
971 | # XXX Ideally, we could let the caller take the `changing_files` | |
950 | self.ui, |
|
972 | # context. However this is not an abstraction that make sense for | |
951 | self._repo, |
|
973 | # other repository types, and leaking that details purely related to | |
952 | match, |
|
974 | # dirstate seems unfortunate. So for now the context will be used here. | |
953 | prefix, |
|
975 | with self._repo.wlock(), self._repo.dirstate.changing_files(self._repo): | |
954 | uipathfn, |
|
976 | return cmdutil.forget( | |
955 |
|
|
977 | self.ui, | |
956 | dryrun=dryrun, |
|
978 | self._repo, | |
957 | interactive=interactive, |
|
979 | match, | |
958 | ) |
|
980 | prefix, | |
|
981 | uipathfn, | |||
|
982 | True, | |||
|
983 | dryrun=dryrun, | |||
|
984 | interactive=interactive, | |||
|
985 | ) | |||
959 |
|
986 | |||
960 | @annotatesubrepoerror |
|
987 | @annotatesubrepoerror | |
961 | def removefiles( |
|
988 | def removefiles( | |
@@ -969,17 +996,22 b' class hgsubrepo(abstractsubrepo):' | |||||
969 | dryrun, |
|
996 | dryrun, | |
970 | warnings, |
|
997 | warnings, | |
971 | ): |
|
998 | ): | |
972 | return cmdutil.remove( |
|
999 | # XXX Ideally, we could let the caller take the `changing_files` | |
973 | self.ui, |
|
1000 | # context. However this is not an abstraction that make sense for | |
974 | self._repo, |
|
1001 | # other repository types, and leaking that details purely related to | |
975 | matcher, |
|
1002 | # dirstate seems unfortunate. So for now the context will be used here. | |
976 | prefix, |
|
1003 | with self._repo.wlock(), self._repo.dirstate.changing_files(self._repo): | |
977 | uipathfn, |
|
1004 | return cmdutil.remove( | |
978 |
|
|
1005 | self.ui, | |
979 |
|
|
1006 | self._repo, | |
980 |
|
|
1007 | matcher, | |
981 |
|
|
1008 | prefix, | |
982 | ) |
|
1009 | uipathfn, | |
|
1010 | after, | |||
|
1011 | force, | |||
|
1012 | subrepos, | |||
|
1013 | dryrun, | |||
|
1014 | ) | |||
983 |
|
1015 | |||
984 | @annotatesubrepoerror |
|
1016 | @annotatesubrepoerror | |
985 | def revert(self, substate, *pats, **opts): |
|
1017 | def revert(self, substate, *pats, **opts): | |
@@ -1009,7 +1041,12 b' class hgsubrepo(abstractsubrepo):' | |||||
1009 | pats = [b'set:modified()'] |
|
1041 | pats = [b'set:modified()'] | |
1010 | else: |
|
1042 | else: | |
1011 | pats = [] |
|
1043 | pats = [] | |
1012 | cmdutil.revert(self.ui, self._repo, ctx, *pats, **opts) |
|
1044 | # XXX Ideally, we could let the caller take the `changing_files` | |
|
1045 | # context. However this is not an abstraction that make sense for | |||
|
1046 | # other repository types, and leaking that details purely related to | |||
|
1047 | # dirstate seems unfortunate. So for now the context will be used here. | |||
|
1048 | with self._repo.wlock(), self._repo.dirstate.changing_files(self._repo): | |||
|
1049 | cmdutil.revert(self.ui, self._repo, ctx, *pats, **opts) | |||
1013 |
|
1050 | |||
1014 | def shortid(self, revid): |
|
1051 | def shortid(self, revid): | |
1015 | return revid[:12] |
|
1052 | return revid[:12] | |
@@ -1123,7 +1160,7 b' class svnsubrepo(abstractsubrepo):' | |||||
1123 | stdout=subprocess.PIPE, |
|
1160 | stdout=subprocess.PIPE, | |
1124 | stderr=subprocess.PIPE, |
|
1161 | stderr=subprocess.PIPE, | |
1125 | env=procutil.tonativeenv(env), |
|
1162 | env=procutil.tonativeenv(env), | |
1126 | **extrakw |
|
1163 | **extrakw, | |
1127 | ) |
|
1164 | ) | |
1128 | stdout, stderr = map(util.fromnativeeol, p.communicate()) |
|
1165 | stdout, stderr = map(util.fromnativeeol, p.communicate()) | |
1129 | stderr = stderr.strip() |
|
1166 | stderr = stderr.strip() | |
@@ -1488,7 +1525,7 b' class gitsubrepo(abstractsubrepo):' | |||||
1488 | close_fds=procutil.closefds, |
|
1525 | close_fds=procutil.closefds, | |
1489 | stdout=subprocess.PIPE, |
|
1526 | stdout=subprocess.PIPE, | |
1490 | stderr=errpipe, |
|
1527 | stderr=errpipe, | |
1491 | **extrakw |
|
1528 | **extrakw, | |
1492 | ) |
|
1529 | ) | |
1493 | if stream: |
|
1530 | if stream: | |
1494 | return p.stdout, None |
|
1531 | return p.stdout, None |
@@ -664,8 +664,9 b' def _tag(' | |||||
664 |
|
664 | |||
665 | repo.invalidatecaches() |
|
665 | repo.invalidatecaches() | |
666 |
|
666 | |||
667 | if b'.hgtags' not in repo.dirstate: |
|
667 | with repo.dirstate.changing_files(repo): | |
668 | repo[None].add([b'.hgtags']) |
|
668 | if b'.hgtags' not in repo.dirstate: | |
|
669 | repo[None].add([b'.hgtags']) | |||
669 |
|
670 | |||
670 | m = matchmod.exact([b'.hgtags']) |
|
671 | m = matchmod.exact([b'.hgtags']) | |
671 | tagnode = repo.commit( |
|
672 | tagnode = repo.commit( |
@@ -177,10 +177,17 b' def tokenize(program, start, end, term=N' | |||||
177 | quote = program[pos : pos + 2] |
|
177 | quote = program[pos : pos + 2] | |
178 | s = pos = pos + 2 |
|
178 | s = pos = pos + 2 | |
179 | while pos < end: # find closing escaped quote |
|
179 | while pos < end: # find closing escaped quote | |
|
180 | # pycompat.bytestr (and bytes) both have .startswith() that | |||
|
181 | # takes an optional start and an optional end, but pytype thinks | |||
|
182 | # it only takes 2 args. | |||
|
183 | ||||
|
184 | # pytype: disable=wrong-arg-count | |||
180 | if program.startswith(b'\\\\\\', pos, end): |
|
185 | if program.startswith(b'\\\\\\', pos, end): | |
181 | pos += 4 # skip over double escaped characters |
|
186 | pos += 4 # skip over double escaped characters | |
182 | continue |
|
187 | continue | |
183 | if program.startswith(quote, pos, end): |
|
188 | if program.startswith(quote, pos, end): | |
|
189 | # pytype: enable=wrong-arg-count | |||
|
190 | ||||
184 | # interpret as if it were a part of an outer string |
|
191 | # interpret as if it were a part of an outer string | |
185 | data = parser.unescapestr(program[s:pos]) |
|
192 | data = parser.unescapestr(program[s:pos]) | |
186 | if token == b'template': |
|
193 | if token == b'template': | |
@@ -300,7 +307,14 b' def _scantemplate(tmpl, start, stop, quo' | |||||
300 | return |
|
307 | return | |
301 |
|
308 | |||
302 | parseres, pos = p.parse(tokenize(tmpl, n + 1, stop, b'}')) |
|
309 | parseres, pos = p.parse(tokenize(tmpl, n + 1, stop, b'}')) | |
|
310 | ||||
|
311 | # pycompat.bytestr (and bytes) both have .startswith() that | |||
|
312 | # takes an optional start and an optional end, but pytype thinks | |||
|
313 | # it only takes 2 args. | |||
|
314 | ||||
|
315 | # pytype: disable=wrong-arg-count | |||
303 | if not tmpl.startswith(b'}', pos): |
|
316 | if not tmpl.startswith(b'}', pos): | |
|
317 | # pytype: enable=wrong-arg-count | |||
304 | raise error.ParseError(_(b"invalid token"), pos) |
|
318 | raise error.ParseError(_(b"invalid token"), pos) | |
305 | yield (b'template', parseres, n) |
|
319 | yield (b'template', parseres, n) | |
306 | pos += 1 |
|
320 | pos += 1 |
@@ -1,6 +1,6 b'' | |||||
1 | The MIT License (MIT) |
|
1 | The MIT License (MIT) | |
2 |
|
2 | |||
3 | Copyright (c) 2015 Hynek Schlawack |
|
3 | Copyright (c) 2015 Hynek Schlawack and the attrs contributors | |
4 |
|
4 | |||
5 | Permission is hereby granted, free of charge, to any person obtaining a copy |
|
5 | Permission is hereby granted, free of charge, to any person obtaining a copy | |
6 | of this software and associated documentation files (the "Software"), to deal |
|
6 | of this software and associated documentation files (the "Software"), to deal |
@@ -1,37 +1,35 b'' | |||||
1 | from __future__ import absolute_import, division, print_function |
|
1 | # SPDX-License-Identifier: MIT | |
|
2 | ||||
|
3 | ||||
|
4 | import sys | |||
|
5 | ||||
|
6 | from functools import partial | |||
2 |
|
7 | |||
3 | from ._funcs import ( |
|
8 | from . import converters, exceptions, filters, setters, validators | |
4 | asdict, |
|
9 | from ._cmp import cmp_using | |
5 | assoc, |
|
10 | from ._config import get_run_validators, set_run_validators | |
6 | astuple, |
|
11 | from ._funcs import asdict, assoc, astuple, evolve, has, resolve_types | |
7 | evolve, |
|
|||
8 | has, |
|
|||
9 | ) |
|
|||
10 | from ._make import ( |
|
12 | from ._make import ( | |
|
13 | NOTHING, | |||
11 | Attribute, |
|
14 | Attribute, | |
12 | Factory, |
|
15 | Factory, | |
13 | NOTHING, |
|
16 | attrib, | |
14 | attr, |
|
17 | attrs, | |
15 | attributes, |
|
|||
16 | fields, |
|
18 | fields, | |
|
19 | fields_dict, | |||
17 | make_class, |
|
20 | make_class, | |
18 | validate, |
|
21 | validate, | |
19 | ) |
|
22 | ) | |
20 |
from ._ |
|
23 | from ._version_info import VersionInfo | |
21 | get_run_validators, |
|
|||
22 | set_run_validators, |
|
|||
23 | ) |
|
|||
24 | from . import exceptions |
|
|||
25 | from . import filters |
|
|||
26 | from . import converters |
|
|||
27 | from . import validators |
|
|||
28 |
|
24 | |||
29 |
|
25 | |||
30 |
__version__ = " |
|
26 | __version__ = "22.1.0" | |
|
27 | __version_info__ = VersionInfo._from_version_string(__version__) | |||
31 |
|
28 | |||
32 | __title__ = "attrs" |
|
29 | __title__ = "attrs" | |
33 | __description__ = "Classes Without Boilerplate" |
|
30 | __description__ = "Classes Without Boilerplate" | |
34 |
__ur |
|
31 | __url__ = "https://www.attrs.org/" | |
|
32 | __uri__ = __url__ | |||
35 | __doc__ = __description__ + " <" + __uri__ + ">" |
|
33 | __doc__ = __description__ + " <" + __uri__ + ">" | |
36 |
|
34 | |||
37 | __author__ = "Hynek Schlawack" |
|
35 | __author__ = "Hynek Schlawack" | |
@@ -41,8 +39,9 b' from . import validators' | |||||
41 | __copyright__ = "Copyright (c) 2015 Hynek Schlawack" |
|
39 | __copyright__ = "Copyright (c) 2015 Hynek Schlawack" | |
42 |
|
40 | |||
43 |
|
41 | |||
44 |
s = attrs = attr |
|
42 | s = attributes = attrs | |
45 |
ib = attr |
|
43 | ib = attr = attrib | |
|
44 | dataclass = partial(attrs, auto_attribs=True) # happy Easter ;) | |||
46 |
|
45 | |||
47 | __all__ = [ |
|
46 | __all__ = [ | |
48 | "Attribute", |
|
47 | "Attribute", | |
@@ -55,17 +54,26 b' ib = attrib = attr' | |||||
55 | "attrib", |
|
54 | "attrib", | |
56 | "attributes", |
|
55 | "attributes", | |
57 | "attrs", |
|
56 | "attrs", | |
|
57 | "cmp_using", | |||
58 | "converters", |
|
58 | "converters", | |
59 | "evolve", |
|
59 | "evolve", | |
60 | "exceptions", |
|
60 | "exceptions", | |
61 | "fields", |
|
61 | "fields", | |
|
62 | "fields_dict", | |||
62 | "filters", |
|
63 | "filters", | |
63 | "get_run_validators", |
|
64 | "get_run_validators", | |
64 | "has", |
|
65 | "has", | |
65 | "ib", |
|
66 | "ib", | |
66 | "make_class", |
|
67 | "make_class", | |
|
68 | "resolve_types", | |||
67 | "s", |
|
69 | "s", | |
68 | "set_run_validators", |
|
70 | "set_run_validators", | |
|
71 | "setters", | |||
69 | "validate", |
|
72 | "validate", | |
70 | "validators", |
|
73 | "validators", | |
71 | ] |
|
74 | ] | |
|
75 | ||||
|
76 | if sys.version_info[:2] >= (3, 6): | |||
|
77 | from ._next_gen import define, field, frozen, mutable # noqa: F401 | |||
|
78 | ||||
|
79 | __all__.extend(("define", "field", "frozen", "mutable")) |
@@ -1,90 +1,185 b'' | |||||
1 | from __future__ import absolute_import, division, print_function |
|
1 | # SPDX-License-Identifier: MIT | |
|
2 | ||||
|
3 | ||||
|
4 | import inspect | |||
|
5 | import platform | |||
|
6 | import sys | |||
|
7 | import threading | |||
|
8 | import types | |||
|
9 | import warnings | |||
|
10 | ||||
|
11 | from collections.abc import Mapping, Sequence # noqa | |||
|
12 | ||||
|
13 | ||||
|
14 | PYPY = platform.python_implementation() == "PyPy" | |||
|
15 | PY36 = sys.version_info[:2] >= (3, 6) | |||
|
16 | HAS_F_STRINGS = PY36 | |||
|
17 | PY310 = sys.version_info[:2] >= (3, 10) | |||
2 |
|
18 | |||
3 | import sys |
|
19 | ||
4 | import types |
|
20 | if PYPY or PY36: | |
|
21 | ordered_dict = dict | |||
|
22 | else: | |||
|
23 | from collections import OrderedDict | |||
|
24 | ||||
|
25 | ordered_dict = OrderedDict | |||
|
26 | ||||
|
27 | ||||
|
28 | def just_warn(*args, **kw): | |||
|
29 | warnings.warn( | |||
|
30 | "Running interpreter doesn't sufficiently support code object " | |||
|
31 | "introspection. Some features like bare super() or accessing " | |||
|
32 | "__class__ will not work with slotted classes.", | |||
|
33 | RuntimeWarning, | |||
|
34 | stacklevel=2, | |||
|
35 | ) | |||
5 |
|
36 | |||
6 |
|
37 | |||
7 | PY2 = sys.version_info[0] == 2 |
|
38 | class _AnnotationExtractor: | |
|
39 | """ | |||
|
40 | Extract type annotations from a callable, returning None whenever there | |||
|
41 | is none. | |||
|
42 | """ | |||
|
43 | ||||
|
44 | __slots__ = ["sig"] | |||
|
45 | ||||
|
46 | def __init__(self, callable): | |||
|
47 | try: | |||
|
48 | self.sig = inspect.signature(callable) | |||
|
49 | except (ValueError, TypeError): # inspect failed | |||
|
50 | self.sig = None | |||
|
51 | ||||
|
52 | def get_first_param_type(self): | |||
|
53 | """ | |||
|
54 | Return the type annotation of the first argument if it's not empty. | |||
|
55 | """ | |||
|
56 | if not self.sig: | |||
|
57 | return None | |||
|
58 | ||||
|
59 | params = list(self.sig.parameters.values()) | |||
|
60 | if params and params[0].annotation is not inspect.Parameter.empty: | |||
|
61 | return params[0].annotation | |||
|
62 | ||||
|
63 | return None | |||
|
64 | ||||
|
65 | def get_return_type(self): | |||
|
66 | """ | |||
|
67 | Return the return type if it's not empty. | |||
|
68 | """ | |||
|
69 | if ( | |||
|
70 | self.sig | |||
|
71 | and self.sig.return_annotation is not inspect.Signature.empty | |||
|
72 | ): | |||
|
73 | return self.sig.return_annotation | |||
|
74 | ||||
|
75 | return None | |||
8 |
|
76 | |||
9 |
|
77 | |||
10 | if PY2: |
|
78 | def make_set_closure_cell(): | |
11 | from UserDict import IterableUserDict |
|
79 | """Return a function of two arguments (cell, value) which sets | |
12 |
|
80 | the value stored in the closure cell `cell` to `value`. | ||
13 | # We 'bundle' isclass instead of using inspect as importing inspect is |
|
81 | """ | |
14 | # fairly expensive (order of 10-15 ms for a modern machine in 2016) |
|
82 | # pypy makes this easy. (It also supports the logic below, but | |
15 | def isclass(klass): |
|
83 | # why not do the easy/fast thing?) | |
16 | return isinstance(klass, (type, types.ClassType)) |
|
84 | if PYPY: | |
17 |
|
85 | |||
18 | # TYPE is used in exceptions, repr(int) is different on Python 2 and 3. |
|
86 | def set_closure_cell(cell, value): | |
19 | TYPE = "type" |
|
87 | cell.__setstate__((value,)) | |
|
88 | ||||
|
89 | return set_closure_cell | |||
20 |
|
90 | |||
21 | def iteritems(d): |
|
91 | # Otherwise gotta do it the hard way. | |
22 | return d.iteritems() |
|
|||
23 |
|
92 | |||
24 | def iterkeys(d): |
|
93 | # Create a function that will set its first cellvar to `value`. | |
25 | return d.iterkeys() |
|
94 | def set_first_cellvar_to(value): | |
|
95 | x = value | |||
|
96 | return | |||
26 |
|
97 | |||
27 | # Python 2 is bereft of a read-only dict proxy, so we make one! |
|
98 | # This function will be eliminated as dead code, but | |
28 | class ReadOnlyDict(IterableUserDict): |
|
99 | # not before its reference to `x` forces `x` to be | |
29 | """ |
|
100 | # represented as a closure cell rather than a local. | |
30 | Best-effort read-only dict wrapper. |
|
101 | def force_x_to_be_a_cell(): # pragma: no cover | |
31 | """ |
|
102 | return x | |
32 |
|
103 | |||
33 | def __setitem__(self, key, val): |
|
104 | try: | |
34 | # We gently pretend we're a Python 3 mappingproxy. |
|
105 | # Extract the code object and make sure our assumptions about | |
35 | raise TypeError("'mappingproxy' object does not support item " |
|
106 | # the closure behavior are correct. | |
36 | "assignment") |
|
107 | co = set_first_cellvar_to.__code__ | |
|
108 | if co.co_cellvars != ("x",) or co.co_freevars != (): | |||
|
109 | raise AssertionError # pragma: no cover | |||
37 |
|
110 | |||
38 | def update(self, _): |
|
111 | # Convert this code object to a code object that sets the | |
39 | # We gently pretend we're a Python 3 mappingproxy. |
|
112 | # function's first _freevar_ (not cellvar) to the argument. | |
40 | raise AttributeError("'mappingproxy' object has no attribute " |
|
113 | if sys.version_info >= (3, 8): | |
41 | "'update'") |
|
|||
42 |
|
114 | |||
43 | def __delitem__(self, _): |
|
115 | def set_closure_cell(cell, value): | |
44 | # We gently pretend we're a Python 3 mappingproxy. |
|
116 | cell.cell_contents = value | |
45 | raise TypeError("'mappingproxy' object does not support item " |
|
|||
46 | "deletion") |
|
|||
47 |
|
117 | |||
48 |
|
|
118 | else: | |
49 | # We gently pretend we're a Python 3 mappingproxy. |
|
119 | args = [co.co_argcount] | |
50 | raise AttributeError("'mappingproxy' object has no attribute " |
|
120 | args.append(co.co_kwonlyargcount) | |
51 | "'clear'") |
|
121 | args.extend( | |
52 |
|
122 | [ | ||
53 | def pop(self, key, default=None): |
|
123 | co.co_nlocals, | |
54 | # We gently pretend we're a Python 3 mappingproxy. |
|
124 | co.co_stacksize, | |
55 | raise AttributeError("'mappingproxy' object has no attribute " |
|
125 | co.co_flags, | |
56 | "'pop'") |
|
126 | co.co_code, | |
|
127 | co.co_consts, | |||
|
128 | co.co_names, | |||
|
129 | co.co_varnames, | |||
|
130 | co.co_filename, | |||
|
131 | co.co_name, | |||
|
132 | co.co_firstlineno, | |||
|
133 | co.co_lnotab, | |||
|
134 | # These two arguments are reversed: | |||
|
135 | co.co_cellvars, | |||
|
136 | co.co_freevars, | |||
|
137 | ] | |||
|
138 | ) | |||
|
139 | set_first_freevar_code = types.CodeType(*args) | |||
57 |
|
140 | |||
58 | def popitem(self): |
|
141 | def set_closure_cell(cell, value): | |
59 | # We gently pretend we're a Python 3 mappingproxy. |
|
142 | # Create a function using the set_first_freevar_code, | |
60 | raise AttributeError("'mappingproxy' object has no attribute " |
|
143 | # whose first closure cell is `cell`. Calling it will | |
61 | "'popitem'") |
|
144 | # change the value of that cell. | |
62 |
|
145 | setter = types.FunctionType( | ||
63 | def setdefault(self, key, default=None): |
|
146 | set_first_freevar_code, {}, "setter", (), (cell,) | |
64 | # We gently pretend we're a Python 3 mappingproxy. |
|
147 | ) | |
65 | raise AttributeError("'mappingproxy' object has no attribute " |
|
148 | # And call it to set the cell. | |
66 | "'setdefault'") |
|
149 | setter(value) | |
67 |
|
150 | |||
68 | def __repr__(self): |
|
151 | # Make sure it works on this interpreter: | |
69 | # Override to be identical to the Python 3 version. |
|
152 | def make_func_with_cell(): | |
70 | return "mappingproxy(" + repr(self.data) + ")" |
|
153 | x = None | |
|
154 | ||||
|
155 | def func(): | |||
|
156 | return x # pragma: no cover | |||
71 |
|
157 | |||
72 | def metadata_proxy(d): |
|
158 | return func | |
73 | res = ReadOnlyDict() |
|
159 | ||
74 | res.data.update(d) # We blocked update, so we have to do it like this. |
|
160 | cell = make_func_with_cell().__closure__[0] | |
75 | return res |
|
161 | set_closure_cell(cell, 100) | |
|
162 | if cell.cell_contents != 100: | |||
|
163 | raise AssertionError # pragma: no cover | |||
76 |
|
164 | |||
77 | else: |
|
165 | except Exception: | |
78 | def isclass(klass): |
|
166 | return just_warn | |
79 | return isinstance(klass, type) |
|
167 | else: | |
|
168 | return set_closure_cell | |||
80 |
|
169 | |||
81 | TYPE = "class" |
|
170 | ||
|
171 | set_closure_cell = make_set_closure_cell() | |||
82 |
|
172 | |||
83 | def iteritems(d): |
|
173 | # Thread-local global to track attrs instances which are already being repr'd. | |
84 | return d.items() |
|
174 | # This is needed because there is no other (thread-safe) way to pass info | |
85 |
|
175 | # about the instances that are already being repr'd through the call stack | ||
86 | def iterkeys(d): |
|
176 | # in order to ensure we don't perform infinite recursion. | |
87 | return d.keys() |
|
177 | # | |
88 |
|
178 | # For instance, if an instance contains a dict which contains that instance, | ||
89 | def metadata_proxy(d): |
|
179 | # we need to know that we're already repr'ing the outside instance from within | |
90 | return types.MappingProxyType(dict(d)) |
|
180 | # the dict's repr() call. | |
|
181 | # | |||
|
182 | # This lives here rather than in _make.py so that the functions in _make.py | |||
|
183 | # don't have a direct reference to the thread-local in their globals dict. | |||
|
184 | # If they have such a reference, it breaks cloudpickle. | |||
|
185 | repr_context = threading.local() |
@@ -1,4 +1,4 b'' | |||||
1 | from __future__ import absolute_import, division, print_function |
|
1 | # SPDX-License-Identifier: MIT | |
2 |
|
2 | |||
3 |
|
3 | |||
4 | __all__ = ["set_run_validators", "get_run_validators"] |
|
4 | __all__ = ["set_run_validators", "get_run_validators"] | |
@@ -9,6 +9,10 b' from __future__ import absolute_import, ' | |||||
9 | def set_run_validators(run): |
|
9 | def set_run_validators(run): | |
10 | """ |
|
10 | """ | |
11 | Set whether or not validators are run. By default, they are run. |
|
11 | Set whether or not validators are run. By default, they are run. | |
|
12 | ||||
|
13 | .. deprecated:: 21.3.0 It will not be removed, but it also will not be | |||
|
14 | moved to new ``attrs`` namespace. Use `attrs.validators.set_disabled()` | |||
|
15 | instead. | |||
12 | """ |
|
16 | """ | |
13 | if not isinstance(run, bool): |
|
17 | if not isinstance(run, bool): | |
14 | raise TypeError("'run' must be bool.") |
|
18 | raise TypeError("'run' must be bool.") | |
@@ -19,5 +23,9 b' def set_run_validators(run):' | |||||
19 | def get_run_validators(): |
|
23 | def get_run_validators(): | |
20 | """ |
|
24 | """ | |
21 | Return whether or not validators are run. |
|
25 | Return whether or not validators are run. | |
|
26 | ||||
|
27 | .. deprecated:: 21.3.0 It will not be removed, but it also will not be | |||
|
28 | moved to new ``attrs`` namespace. Use `attrs.validators.get_disabled()` | |||
|
29 | instead. | |||
22 | """ |
|
30 | """ | |
23 | return _run_validators |
|
31 | return _run_validators |
@@ -1,14 +1,20 b'' | |||||
1 | from __future__ import absolute_import, division, print_function |
|
1 | # SPDX-License-Identifier: MIT | |
|
2 | ||||
2 |
|
3 | |||
3 | import copy |
|
4 | import copy | |
4 |
|
5 | |||
5 | from ._compat import iteritems |
|
6 | from ._make import NOTHING, _obj_setattr, fields | |
6 | from ._make import NOTHING, fields, _obj_setattr |
|
|||
7 | from .exceptions import AttrsAttributeNotFoundError |
|
7 | from .exceptions import AttrsAttributeNotFoundError | |
8 |
|
8 | |||
9 |
|
9 | |||
10 | def asdict(inst, recurse=True, filter=None, dict_factory=dict, |
|
10 | def asdict( | |
11 | retain_collection_types=False): |
|
11 | inst, | |
|
12 | recurse=True, | |||
|
13 | filter=None, | |||
|
14 | dict_factory=dict, | |||
|
15 | retain_collection_types=False, | |||
|
16 | value_serializer=None, | |||
|
17 | ): | |||
12 | """ |
|
18 | """ | |
13 | Return the ``attrs`` attribute values of *inst* as a dict. |
|
19 | Return the ``attrs`` attribute values of *inst* as a dict. | |
14 |
|
20 | |||
@@ -17,9 +23,9 b' def asdict(inst, recurse=True, filter=No' | |||||
17 | :param inst: Instance of an ``attrs``-decorated class. |
|
23 | :param inst: Instance of an ``attrs``-decorated class. | |
18 | :param bool recurse: Recurse into classes that are also |
|
24 | :param bool recurse: Recurse into classes that are also | |
19 | ``attrs``-decorated. |
|
25 | ``attrs``-decorated. | |
20 |
:param callable filter: A callable whose return code deter |
|
26 | :param callable filter: A callable whose return code determines whether an | |
21 | attribute or element is included (``True``) or dropped (``False``). Is |
|
27 | attribute or element is included (``True``) or dropped (``False``). Is | |
22 |
called with the |
|
28 | called with the `attrs.Attribute` as the first argument and the | |
23 | value as the second argument. |
|
29 | value as the second argument. | |
24 | :param callable dict_factory: A callable to produce dictionaries from. For |
|
30 | :param callable dict_factory: A callable to produce dictionaries from. For | |
25 | example, to produce ordered dictionaries instead of normal Python |
|
31 | example, to produce ordered dictionaries instead of normal Python | |
@@ -27,6 +33,10 b' def asdict(inst, recurse=True, filter=No' | |||||
27 | :param bool retain_collection_types: Do not convert to ``list`` when |
|
33 | :param bool retain_collection_types: Do not convert to ``list`` when | |
28 | encountering an attribute whose type is ``tuple`` or ``set``. Only |
|
34 | encountering an attribute whose type is ``tuple`` or ``set``. Only | |
29 | meaningful if ``recurse`` is ``True``. |
|
35 | meaningful if ``recurse`` is ``True``. | |
|
36 | :param Optional[callable] value_serializer: A hook that is called for every | |||
|
37 | attribute or dict key/value. It receives the current instance, field | |||
|
38 | and value and must return the (updated) value. The hook is run *after* | |||
|
39 | the optional *filter* has been applied. | |||
30 |
|
40 | |||
31 | :rtype: return type of *dict_factory* |
|
41 | :rtype: return type of *dict_factory* | |
32 |
|
42 | |||
@@ -35,6 +45,9 b' def asdict(inst, recurse=True, filter=No' | |||||
35 |
|
45 | |||
36 | .. versionadded:: 16.0.0 *dict_factory* |
|
46 | .. versionadded:: 16.0.0 *dict_factory* | |
37 | .. versionadded:: 16.1.0 *retain_collection_types* |
|
47 | .. versionadded:: 16.1.0 *retain_collection_types* | |
|
48 | .. versionadded:: 20.3.0 *value_serializer* | |||
|
49 | .. versionadded:: 21.3.0 If a dict has a collection for a key, it is | |||
|
50 | serialized as a tuple. | |||
38 | """ |
|
51 | """ | |
39 | attrs = fields(inst.__class__) |
|
52 | attrs = fields(inst.__class__) | |
40 | rv = dict_factory() |
|
53 | rv = dict_factory() | |
@@ -42,24 +55,58 b' def asdict(inst, recurse=True, filter=No' | |||||
42 | v = getattr(inst, a.name) |
|
55 | v = getattr(inst, a.name) | |
43 | if filter is not None and not filter(a, v): |
|
56 | if filter is not None and not filter(a, v): | |
44 | continue |
|
57 | continue | |
|
58 | ||||
|
59 | if value_serializer is not None: | |||
|
60 | v = value_serializer(inst, a, v) | |||
|
61 | ||||
45 | if recurse is True: |
|
62 | if recurse is True: | |
46 | if has(v.__class__): |
|
63 | if has(v.__class__): | |
47 |
rv[a.name] = asdict( |
|
64 | rv[a.name] = asdict( | |
48 | dict_factory=dict_factory) |
|
65 | v, | |
49 | elif isinstance(v, (tuple, list, set)): |
|
66 | recurse=True, | |
|
67 | filter=filter, | |||
|
68 | dict_factory=dict_factory, | |||
|
69 | retain_collection_types=retain_collection_types, | |||
|
70 | value_serializer=value_serializer, | |||
|
71 | ) | |||
|
72 | elif isinstance(v, (tuple, list, set, frozenset)): | |||
50 | cf = v.__class__ if retain_collection_types is True else list |
|
73 | cf = v.__class__ if retain_collection_types is True else list | |
51 |
rv[a.name] = cf( |
|
74 | rv[a.name] = cf( | |
52 | asdict(i, recurse=True, filter=filter, |
|
75 | [ | |
53 |
|
|
76 | _asdict_anything( | |
54 | if has(i.__class__) else i |
|
77 | i, | |
55 |
|
|
78 | is_key=False, | |
56 | ]) |
|
79 | filter=filter, | |
|
80 | dict_factory=dict_factory, | |||
|
81 | retain_collection_types=retain_collection_types, | |||
|
82 | value_serializer=value_serializer, | |||
|
83 | ) | |||
|
84 | for i in v | |||
|
85 | ] | |||
|
86 | ) | |||
57 | elif isinstance(v, dict): |
|
87 | elif isinstance(v, dict): | |
58 | df = dict_factory |
|
88 | df = dict_factory | |
59 |
rv[a.name] = df( |
|
89 | rv[a.name] = df( | |
60 | asdict(kk, dict_factory=df) if has(kk.__class__) else kk, |
|
90 | ( | |
61 | asdict(vv, dict_factory=df) if has(vv.__class__) else vv) |
|
91 | _asdict_anything( | |
62 |
|
|
92 | kk, | |
|
93 | is_key=True, | |||
|
94 | filter=filter, | |||
|
95 | dict_factory=df, | |||
|
96 | retain_collection_types=retain_collection_types, | |||
|
97 | value_serializer=value_serializer, | |||
|
98 | ), | |||
|
99 | _asdict_anything( | |||
|
100 | vv, | |||
|
101 | is_key=False, | |||
|
102 | filter=filter, | |||
|
103 | dict_factory=df, | |||
|
104 | retain_collection_types=retain_collection_types, | |||
|
105 | value_serializer=value_serializer, | |||
|
106 | ), | |||
|
107 | ) | |||
|
108 | for kk, vv in v.items() | |||
|
109 | ) | |||
63 | else: |
|
110 | else: | |
64 | rv[a.name] = v |
|
111 | rv[a.name] = v | |
65 | else: |
|
112 | else: | |
@@ -67,8 +114,86 b' def asdict(inst, recurse=True, filter=No' | |||||
67 | return rv |
|
114 | return rv | |
68 |
|
115 | |||
69 |
|
116 | |||
70 | def astuple(inst, recurse=True, filter=None, tuple_factory=tuple, |
|
117 | def _asdict_anything( | |
71 | retain_collection_types=False): |
|
118 | val, | |
|
119 | is_key, | |||
|
120 | filter, | |||
|
121 | dict_factory, | |||
|
122 | retain_collection_types, | |||
|
123 | value_serializer, | |||
|
124 | ): | |||
|
125 | """ | |||
|
126 | ``asdict`` only works on attrs instances, this works on anything. | |||
|
127 | """ | |||
|
128 | if getattr(val.__class__, "__attrs_attrs__", None) is not None: | |||
|
129 | # Attrs class. | |||
|
130 | rv = asdict( | |||
|
131 | val, | |||
|
132 | recurse=True, | |||
|
133 | filter=filter, | |||
|
134 | dict_factory=dict_factory, | |||
|
135 | retain_collection_types=retain_collection_types, | |||
|
136 | value_serializer=value_serializer, | |||
|
137 | ) | |||
|
138 | elif isinstance(val, (tuple, list, set, frozenset)): | |||
|
139 | if retain_collection_types is True: | |||
|
140 | cf = val.__class__ | |||
|
141 | elif is_key: | |||
|
142 | cf = tuple | |||
|
143 | else: | |||
|
144 | cf = list | |||
|
145 | ||||
|
146 | rv = cf( | |||
|
147 | [ | |||
|
148 | _asdict_anything( | |||
|
149 | i, | |||
|
150 | is_key=False, | |||
|
151 | filter=filter, | |||
|
152 | dict_factory=dict_factory, | |||
|
153 | retain_collection_types=retain_collection_types, | |||
|
154 | value_serializer=value_serializer, | |||
|
155 | ) | |||
|
156 | for i in val | |||
|
157 | ] | |||
|
158 | ) | |||
|
159 | elif isinstance(val, dict): | |||
|
160 | df = dict_factory | |||
|
161 | rv = df( | |||
|
162 | ( | |||
|
163 | _asdict_anything( | |||
|
164 | kk, | |||
|
165 | is_key=True, | |||
|
166 | filter=filter, | |||
|
167 | dict_factory=df, | |||
|
168 | retain_collection_types=retain_collection_types, | |||
|
169 | value_serializer=value_serializer, | |||
|
170 | ), | |||
|
171 | _asdict_anything( | |||
|
172 | vv, | |||
|
173 | is_key=False, | |||
|
174 | filter=filter, | |||
|
175 | dict_factory=df, | |||
|
176 | retain_collection_types=retain_collection_types, | |||
|
177 | value_serializer=value_serializer, | |||
|
178 | ), | |||
|
179 | ) | |||
|
180 | for kk, vv in val.items() | |||
|
181 | ) | |||
|
182 | else: | |||
|
183 | rv = val | |||
|
184 | if value_serializer is not None: | |||
|
185 | rv = value_serializer(None, None, rv) | |||
|
186 | ||||
|
187 | return rv | |||
|
188 | ||||
|
189 | ||||
|
190 | def astuple( | |||
|
191 | inst, | |||
|
192 | recurse=True, | |||
|
193 | filter=None, | |||
|
194 | tuple_factory=tuple, | |||
|
195 | retain_collection_types=False, | |||
|
196 | ): | |||
72 | """ |
|
197 | """ | |
73 | Return the ``attrs`` attribute values of *inst* as a tuple. |
|
198 | Return the ``attrs`` attribute values of *inst* as a tuple. | |
74 |
|
199 | |||
@@ -79,7 +204,7 b' def astuple(inst, recurse=True, filter=N' | |||||
79 | ``attrs``-decorated. |
|
204 | ``attrs``-decorated. | |
80 | :param callable filter: A callable whose return code determines whether an |
|
205 | :param callable filter: A callable whose return code determines whether an | |
81 | attribute or element is included (``True``) or dropped (``False``). Is |
|
206 | attribute or element is included (``True``) or dropped (``False``). Is | |
82 |
called with the |
|
207 | called with the `attrs.Attribute` as the first argument and the | |
83 | value as the second argument. |
|
208 | value as the second argument. | |
84 | :param callable tuple_factory: A callable to produce tuples from. For |
|
209 | :param callable tuple_factory: A callable to produce tuples from. For | |
85 | example, to produce lists instead of tuples. |
|
210 | example, to produce lists instead of tuples. | |
@@ -104,38 +229,61 b' def astuple(inst, recurse=True, filter=N' | |||||
104 | continue |
|
229 | continue | |
105 | if recurse is True: |
|
230 | if recurse is True: | |
106 | if has(v.__class__): |
|
231 | if has(v.__class__): | |
107 | rv.append(astuple(v, recurse=True, filter=filter, |
|
232 | rv.append( | |
108 | tuple_factory=tuple_factory, |
|
233 | astuple( | |
109 | retain_collection_types=retain)) |
|
234 | v, | |
110 | elif isinstance(v, (tuple, list, set)): |
|
235 | recurse=True, | |
|
236 | filter=filter, | |||
|
237 | tuple_factory=tuple_factory, | |||
|
238 | retain_collection_types=retain, | |||
|
239 | ) | |||
|
240 | ) | |||
|
241 | elif isinstance(v, (tuple, list, set, frozenset)): | |||
111 | cf = v.__class__ if retain is True else list |
|
242 | cf = v.__class__ if retain is True else list | |
112 |
rv.append( |
|
243 | rv.append( | |
113 | astuple(j, recurse=True, filter=filter, |
|
244 | cf( | |
114 | tuple_factory=tuple_factory, |
|
245 | [ | |
115 |
|
|
246 | astuple( | |
116 |
|
|
247 | j, | |
117 | for j in v |
|
248 | recurse=True, | |
118 | ])) |
|
249 | filter=filter, | |
|
250 | tuple_factory=tuple_factory, | |||
|
251 | retain_collection_types=retain, | |||
|
252 | ) | |||
|
253 | if has(j.__class__) | |||
|
254 | else j | |||
|
255 | for j in v | |||
|
256 | ] | |||
|
257 | ) | |||
|
258 | ) | |||
119 | elif isinstance(v, dict): |
|
259 | elif isinstance(v, dict): | |
120 | df = v.__class__ if retain is True else dict |
|
260 | df = v.__class__ if retain is True else dict | |
121 |
rv.append( |
|
261 | rv.append( | |
|
262 | df( | |||
122 | ( |
|
263 | ( | |
123 | astuple( |
|
264 | astuple( | |
124 | kk, |
|
265 | kk, | |
125 | tuple_factory=tuple_factory, |
|
266 | tuple_factory=tuple_factory, | |
126 | retain_collection_types=retain |
|
267 | retain_collection_types=retain, | |
127 |
) |
|
268 | ) | |
|
269 | if has(kk.__class__) | |||
|
270 | else kk, | |||
128 | astuple( |
|
271 | astuple( | |
129 | vv, |
|
272 | vv, | |
130 | tuple_factory=tuple_factory, |
|
273 | tuple_factory=tuple_factory, | |
131 | retain_collection_types=retain |
|
274 | retain_collection_types=retain, | |
132 |
) |
|
275 | ) | |
|
276 | if has(vv.__class__) | |||
|
277 | else vv, | |||
133 | ) |
|
278 | ) | |
134 |
for kk, vv in |
|
279 | for kk, vv in v.items() | |
|
280 | ) | |||
|
281 | ) | |||
135 | else: |
|
282 | else: | |
136 | rv.append(v) |
|
283 | rv.append(v) | |
137 | else: |
|
284 | else: | |
138 | rv.append(v) |
|
285 | rv.append(v) | |
|
286 | ||||
139 | return rv if tuple_factory is list else tuple_factory(rv) |
|
287 | return rv if tuple_factory is list else tuple_factory(rv) | |
140 |
|
288 | |||
141 |
|
289 | |||
@@ -146,7 +294,7 b' def has(cls):' | |||||
146 | :param type cls: Class to introspect. |
|
294 | :param type cls: Class to introspect. | |
147 | :raise TypeError: If *cls* is not a class. |
|
295 | :raise TypeError: If *cls* is not a class. | |
148 |
|
296 | |||
149 |
:rtype: |
|
297 | :rtype: bool | |
150 | """ |
|
298 | """ | |
151 | return getattr(cls, "__attrs_attrs__", None) is not None |
|
299 | return getattr(cls, "__attrs_attrs__", None) is not None | |
152 |
|
300 | |||
@@ -166,19 +314,26 b' def assoc(inst, **changes):' | |||||
166 | class. |
|
314 | class. | |
167 |
|
315 | |||
168 | .. deprecated:: 17.1.0 |
|
316 | .. deprecated:: 17.1.0 | |
169 |
Use |
|
317 | Use `attrs.evolve` instead if you can. | |
|
318 | This function will not be removed du to the slightly different approach | |||
|
319 | compared to `attrs.evolve`. | |||
170 | """ |
|
320 | """ | |
171 | import warnings |
|
321 | import warnings | |
172 | warnings.warn("assoc is deprecated and will be removed after 2018/01.", |
|
322 | ||
173 | DeprecationWarning) |
|
323 | warnings.warn( | |
|
324 | "assoc is deprecated and will be removed after 2018/01.", | |||
|
325 | DeprecationWarning, | |||
|
326 | stacklevel=2, | |||
|
327 | ) | |||
174 | new = copy.copy(inst) |
|
328 | new = copy.copy(inst) | |
175 | attrs = fields(inst.__class__) |
|
329 | attrs = fields(inst.__class__) | |
176 |
for k, v in |
|
330 | for k, v in changes.items(): | |
177 | a = getattr(attrs, k, NOTHING) |
|
331 | a = getattr(attrs, k, NOTHING) | |
178 | if a is NOTHING: |
|
332 | if a is NOTHING: | |
179 | raise AttrsAttributeNotFoundError( |
|
333 | raise AttrsAttributeNotFoundError( | |
180 | "{k} is not an attrs attribute on {cl}." |
|
334 | "{k} is not an attrs attribute on {cl}.".format( | |
181 |
|
|
335 | k=k, cl=new.__class__ | |
|
336 | ) | |||
182 | ) |
|
337 | ) | |
183 | _obj_setattr(new, k, v) |
|
338 | _obj_setattr(new, k, v) | |
184 | return new |
|
339 | return new | |
@@ -209,4 +364,57 b' def evolve(inst, **changes):' | |||||
209 | init_name = attr_name if attr_name[0] != "_" else attr_name[1:] |
|
364 | init_name = attr_name if attr_name[0] != "_" else attr_name[1:] | |
210 | if init_name not in changes: |
|
365 | if init_name not in changes: | |
211 | changes[init_name] = getattr(inst, attr_name) |
|
366 | changes[init_name] = getattr(inst, attr_name) | |
|
367 | ||||
212 | return cls(**changes) |
|
368 | return cls(**changes) | |
|
369 | ||||
|
370 | ||||
|
371 | def resolve_types(cls, globalns=None, localns=None, attribs=None): | |||
|
372 | """ | |||
|
373 | Resolve any strings and forward annotations in type annotations. | |||
|
374 | ||||
|
375 | This is only required if you need concrete types in `Attribute`'s *type* | |||
|
376 | field. In other words, you don't need to resolve your types if you only | |||
|
377 | use them for static type checking. | |||
|
378 | ||||
|
379 | With no arguments, names will be looked up in the module in which the class | |||
|
380 | was created. If this is not what you want, e.g. if the name only exists | |||
|
381 | inside a method, you may pass *globalns* or *localns* to specify other | |||
|
382 | dictionaries in which to look up these names. See the docs of | |||
|
383 | `typing.get_type_hints` for more details. | |||
|
384 | ||||
|
385 | :param type cls: Class to resolve. | |||
|
386 | :param Optional[dict] globalns: Dictionary containing global variables. | |||
|
387 | :param Optional[dict] localns: Dictionary containing local variables. | |||
|
388 | :param Optional[list] attribs: List of attribs for the given class. | |||
|
389 | This is necessary when calling from inside a ``field_transformer`` | |||
|
390 | since *cls* is not an ``attrs`` class yet. | |||
|
391 | ||||
|
392 | :raise TypeError: If *cls* is not a class. | |||
|
393 | :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` | |||
|
394 | class and you didn't pass any attribs. | |||
|
395 | :raise NameError: If types cannot be resolved because of missing variables. | |||
|
396 | ||||
|
397 | :returns: *cls* so you can use this function also as a class decorator. | |||
|
398 | Please note that you have to apply it **after** `attrs.define`. That | |||
|
399 | means the decorator has to come in the line **before** `attrs.define`. | |||
|
400 | ||||
|
401 | .. versionadded:: 20.1.0 | |||
|
402 | .. versionadded:: 21.1.0 *attribs* | |||
|
403 | ||||
|
404 | """ | |||
|
405 | # Since calling get_type_hints is expensive we cache whether we've | |||
|
406 | # done it already. | |||
|
407 | if getattr(cls, "__attrs_types_resolved__", None) != cls: | |||
|
408 | import typing | |||
|
409 | ||||
|
410 | hints = typing.get_type_hints(cls, globalns=globalns, localns=localns) | |||
|
411 | for field in fields(cls) if attribs is None else attribs: | |||
|
412 | if field.name in hints: | |||
|
413 | # Since fields have been frozen we must work around it. | |||
|
414 | _obj_setattr(field, "type", hints[field.name]) | |||
|
415 | # We store the class we resolved so that subclasses know they haven't | |||
|
416 | # been resolved. | |||
|
417 | cls.__attrs_types_resolved__ = cls | |||
|
418 | ||||
|
419 | # Return the class so you can use it as a decorator too. | |||
|
420 | return cls |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: file renamed from rust/hg-core/src/config.rs to rust/hg-core/src/config/mod.rs |
|
NO CONTENT: file renamed from rust/hg-core/src/config.rs to rust/hg-core/src/config/mod.rs | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: file renamed from rust/hg-core/src/revlog.rs to rust/hg-core/src/revlog/mod.rs |
|
NO CONTENT: file renamed from rust/hg-core/src/revlog.rs to rust/hg-core/src/revlog/mod.rs | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed | ||
The requested commit or file is too big and content was truncated. Show full diff |
General Comments 0
You need to be logged in to leave comments.
Login now