##// END OF EJS Templates
branching: merge stable into default...
marmoute -
r51069:596a6b9b merge default
parent child Browse files
Show More
@@ -0,0 +1,33
1 #!/bin/bash
2
3 set -e
4 set -u
5
6 # Find the python3 setup that would run pytype
7 PYTYPE=`which pytype`
8 PYTHON3=`head -n1 ${PYTYPE} | sed -s 's/#!//'`
9
10 # Existing stubs that pytype processes live here
11 TYPESHED=$(${PYTHON3} -c "import pytype; print(pytype.__path__[0])")/typeshed/stubs
12 HG_STUBS=${TYPESHED}/mercurial
13
14 echo "Patching typeshed at $HG_STUBS"
15
16 rm -rf ${HG_STUBS}
17 mkdir -p ${HG_STUBS}
18
19 cat > ${HG_STUBS}/METADATA.toml <<EOF
20 version = "0.1"
21 EOF
22
23
24 mkdir -p ${HG_STUBS}/mercurial/cext ${HG_STUBS}/mercurial/thirdparty/attr
25
26 touch ${HG_STUBS}/mercurial/__init__.pyi
27 touch ${HG_STUBS}/mercurial/cext/__init__.pyi
28 touch ${HG_STUBS}/mercurial/thirdparty/__init__.pyi
29
30 ln -sf $(hg root)/mercurial/cext/*.{pyi,typed} \
31 ${HG_STUBS}/mercurial/cext
32 ln -sf $(hg root)/mercurial/thirdparty/attr/*.{pyi,typed} \
33 ${HG_STUBS}/mercurial/thirdparty/attr
@@ -0,0 +1,1
1 partial
@@ -0,0 +1,486
1 import sys
2
3 from typing import (
4 Any,
5 Callable,
6 ClassVar,
7 Dict,
8 Generic,
9 List,
10 Mapping,
11 Optional,
12 Protocol,
13 Sequence,
14 Tuple,
15 Type,
16 TypeVar,
17 Union,
18 overload,
19 )
20
21 # `import X as X` is required to make these public
22 from . import converters as converters
23 from . import exceptions as exceptions
24 from . import filters as filters
25 from . import setters as setters
26 from . import validators as validators
27 from ._cmp import cmp_using as cmp_using
28 from ._version_info import VersionInfo
29
30 __version__: str
31 __version_info__: VersionInfo
32 __title__: str
33 __description__: str
34 __url__: str
35 __uri__: str
36 __author__: str
37 __email__: str
38 __license__: str
39 __copyright__: str
40
41 _T = TypeVar("_T")
42 _C = TypeVar("_C", bound=type)
43
44 _EqOrderType = Union[bool, Callable[[Any], Any]]
45 _ValidatorType = Callable[[Any, Attribute[_T], _T], Any]
46 _ConverterType = Callable[[Any], Any]
47 _FilterType = Callable[[Attribute[_T], _T], bool]
48 _ReprType = Callable[[Any], str]
49 _ReprArgType = Union[bool, _ReprType]
50 _OnSetAttrType = Callable[[Any, Attribute[Any], Any], Any]
51 _OnSetAttrArgType = Union[
52 _OnSetAttrType, List[_OnSetAttrType], setters._NoOpType
53 ]
54 _FieldTransformer = Callable[
55 [type, List[Attribute[Any]]], List[Attribute[Any]]
56 ]
57 # FIXME: in reality, if multiple validators are passed they must be in a list
58 # or tuple, but those are invariant and so would prevent subtypes of
59 # _ValidatorType from working when passed in a list or tuple.
60 _ValidatorArgType = Union[_ValidatorType[_T], Sequence[_ValidatorType[_T]]]
61
62 # A protocol to be able to statically accept an attrs class.
63 class AttrsInstance(Protocol):
64 __attrs_attrs__: ClassVar[Any]
65
66 # _make --
67
68 NOTHING: object
69
70 # NOTE: Factory lies about its return type to make this possible:
71 # `x: List[int] # = Factory(list)`
72 # Work around mypy issue #4554 in the common case by using an overload.
73 if sys.version_info >= (3, 8):
74 from typing import Literal
75 @overload
76 def Factory(factory: Callable[[], _T]) -> _T: ...
77 @overload
78 def Factory(
79 factory: Callable[[Any], _T],
80 takes_self: Literal[True],
81 ) -> _T: ...
82 @overload
83 def Factory(
84 factory: Callable[[], _T],
85 takes_self: Literal[False],
86 ) -> _T: ...
87
88 else:
89 @overload
90 def Factory(factory: Callable[[], _T]) -> _T: ...
91 @overload
92 def Factory(
93 factory: Union[Callable[[Any], _T], Callable[[], _T]],
94 takes_self: bool = ...,
95 ) -> _T: ...
96
97 # Static type inference support via __dataclass_transform__ implemented as per:
98 # https://github.com/microsoft/pyright/blob/1.1.135/specs/dataclass_transforms.md
99 # This annotation must be applied to all overloads of "define" and "attrs"
100 #
101 # NOTE: This is a typing construct and does not exist at runtime. Extensions
102 # wrapping attrs decorators should declare a separate __dataclass_transform__
103 # signature in the extension module using the specification linked above to
104 # provide pyright support.
105 def __dataclass_transform__(
106 *,
107 eq_default: bool = True,
108 order_default: bool = False,
109 kw_only_default: bool = False,
110 field_descriptors: Tuple[Union[type, Callable[..., Any]], ...] = (()),
111 ) -> Callable[[_T], _T]: ...
112
113 class Attribute(Generic[_T]):
114 name: str
115 default: Optional[_T]
116 validator: Optional[_ValidatorType[_T]]
117 repr: _ReprArgType
118 cmp: _EqOrderType
119 eq: _EqOrderType
120 order: _EqOrderType
121 hash: Optional[bool]
122 init: bool
123 converter: Optional[_ConverterType]
124 metadata: Dict[Any, Any]
125 type: Optional[Type[_T]]
126 kw_only: bool
127 on_setattr: _OnSetAttrType
128 def evolve(self, **changes: Any) -> "Attribute[Any]": ...
129
130 # NOTE: We had several choices for the annotation to use for type arg:
131 # 1) Type[_T]
132 # - Pros: Handles simple cases correctly
133 # - Cons: Might produce less informative errors in the case of conflicting
134 # TypeVars e.g. `attr.ib(default='bad', type=int)`
135 # 2) Callable[..., _T]
136 # - Pros: Better error messages than #1 for conflicting TypeVars
137 # - Cons: Terrible error messages for validator checks.
138 # e.g. attr.ib(type=int, validator=validate_str)
139 # -> error: Cannot infer function type argument
140 # 3) type (and do all of the work in the mypy plugin)
141 # - Pros: Simple here, and we could customize the plugin with our own errors.
142 # - Cons: Would need to write mypy plugin code to handle all the cases.
143 # We chose option #1.
144
145 # `attr` lies about its return type to make the following possible:
146 # attr() -> Any
147 # attr(8) -> int
148 # attr(validator=<some callable>) -> Whatever the callable expects.
149 # This makes this type of assignments possible:
150 # x: int = attr(8)
151 #
152 # This form catches explicit None or no default but with no other arguments
153 # returns Any.
154 @overload
155 def attrib(
156 default: None = ...,
157 validator: None = ...,
158 repr: _ReprArgType = ...,
159 cmp: Optional[_EqOrderType] = ...,
160 hash: Optional[bool] = ...,
161 init: bool = ...,
162 metadata: Optional[Mapping[Any, Any]] = ...,
163 type: None = ...,
164 converter: None = ...,
165 factory: None = ...,
166 kw_only: bool = ...,
167 eq: Optional[_EqOrderType] = ...,
168 order: Optional[_EqOrderType] = ...,
169 on_setattr: Optional[_OnSetAttrArgType] = ...,
170 ) -> Any: ...
171
172 # This form catches an explicit None or no default and infers the type from the
173 # other arguments.
174 @overload
175 def attrib(
176 default: None = ...,
177 validator: Optional[_ValidatorArgType[_T]] = ...,
178 repr: _ReprArgType = ...,
179 cmp: Optional[_EqOrderType] = ...,
180 hash: Optional[bool] = ...,
181 init: bool = ...,
182 metadata: Optional[Mapping[Any, Any]] = ...,
183 type: Optional[Type[_T]] = ...,
184 converter: Optional[_ConverterType] = ...,
185 factory: Optional[Callable[[], _T]] = ...,
186 kw_only: bool = ...,
187 eq: Optional[_EqOrderType] = ...,
188 order: Optional[_EqOrderType] = ...,
189 on_setattr: Optional[_OnSetAttrArgType] = ...,
190 ) -> _T: ...
191
192 # This form catches an explicit default argument.
193 @overload
194 def attrib(
195 default: _T,
196 validator: Optional[_ValidatorArgType[_T]] = ...,
197 repr: _ReprArgType = ...,
198 cmp: Optional[_EqOrderType] = ...,
199 hash: Optional[bool] = ...,
200 init: bool = ...,
201 metadata: Optional[Mapping[Any, Any]] = ...,
202 type: Optional[Type[_T]] = ...,
203 converter: Optional[_ConverterType] = ...,
204 factory: Optional[Callable[[], _T]] = ...,
205 kw_only: bool = ...,
206 eq: Optional[_EqOrderType] = ...,
207 order: Optional[_EqOrderType] = ...,
208 on_setattr: Optional[_OnSetAttrArgType] = ...,
209 ) -> _T: ...
210
211 # This form covers type=non-Type: e.g. forward references (str), Any
212 @overload
213 def attrib(
214 default: Optional[_T] = ...,
215 validator: Optional[_ValidatorArgType[_T]] = ...,
216 repr: _ReprArgType = ...,
217 cmp: Optional[_EqOrderType] = ...,
218 hash: Optional[bool] = ...,
219 init: bool = ...,
220 metadata: Optional[Mapping[Any, Any]] = ...,
221 type: object = ...,
222 converter: Optional[_ConverterType] = ...,
223 factory: Optional[Callable[[], _T]] = ...,
224 kw_only: bool = ...,
225 eq: Optional[_EqOrderType] = ...,
226 order: Optional[_EqOrderType] = ...,
227 on_setattr: Optional[_OnSetAttrArgType] = ...,
228 ) -> Any: ...
229 @overload
230 def field(
231 *,
232 default: None = ...,
233 validator: None = ...,
234 repr: _ReprArgType = ...,
235 hash: Optional[bool] = ...,
236 init: bool = ...,
237 metadata: Optional[Mapping[Any, Any]] = ...,
238 converter: None = ...,
239 factory: None = ...,
240 kw_only: bool = ...,
241 eq: Optional[bool] = ...,
242 order: Optional[bool] = ...,
243 on_setattr: Optional[_OnSetAttrArgType] = ...,
244 ) -> Any: ...
245
246 # This form catches an explicit None or no default and infers the type from the
247 # other arguments.
248 @overload
249 def field(
250 *,
251 default: None = ...,
252 validator: Optional[_ValidatorArgType[_T]] = ...,
253 repr: _ReprArgType = ...,
254 hash: Optional[bool] = ...,
255 init: bool = ...,
256 metadata: Optional[Mapping[Any, Any]] = ...,
257 converter: Optional[_ConverterType] = ...,
258 factory: Optional[Callable[[], _T]] = ...,
259 kw_only: bool = ...,
260 eq: Optional[_EqOrderType] = ...,
261 order: Optional[_EqOrderType] = ...,
262 on_setattr: Optional[_OnSetAttrArgType] = ...,
263 ) -> _T: ...
264
265 # This form catches an explicit default argument.
266 @overload
267 def field(
268 *,
269 default: _T,
270 validator: Optional[_ValidatorArgType[_T]] = ...,
271 repr: _ReprArgType = ...,
272 hash: Optional[bool] = ...,
273 init: bool = ...,
274 metadata: Optional[Mapping[Any, Any]] = ...,
275 converter: Optional[_ConverterType] = ...,
276 factory: Optional[Callable[[], _T]] = ...,
277 kw_only: bool = ...,
278 eq: Optional[_EqOrderType] = ...,
279 order: Optional[_EqOrderType] = ...,
280 on_setattr: Optional[_OnSetAttrArgType] = ...,
281 ) -> _T: ...
282
283 # This form covers type=non-Type: e.g. forward references (str), Any
284 @overload
285 def field(
286 *,
287 default: Optional[_T] = ...,
288 validator: Optional[_ValidatorArgType[_T]] = ...,
289 repr: _ReprArgType = ...,
290 hash: Optional[bool] = ...,
291 init: bool = ...,
292 metadata: Optional[Mapping[Any, Any]] = ...,
293 converter: Optional[_ConverterType] = ...,
294 factory: Optional[Callable[[], _T]] = ...,
295 kw_only: bool = ...,
296 eq: Optional[_EqOrderType] = ...,
297 order: Optional[_EqOrderType] = ...,
298 on_setattr: Optional[_OnSetAttrArgType] = ...,
299 ) -> Any: ...
300 @overload
301 @__dataclass_transform__(order_default=True, field_descriptors=(attrib, field))
302 def attrs(
303 maybe_cls: _C,
304 these: Optional[Dict[str, Any]] = ...,
305 repr_ns: Optional[str] = ...,
306 repr: bool = ...,
307 cmp: Optional[_EqOrderType] = ...,
308 hash: Optional[bool] = ...,
309 init: bool = ...,
310 slots: bool = ...,
311 frozen: bool = ...,
312 weakref_slot: bool = ...,
313 str: bool = ...,
314 auto_attribs: bool = ...,
315 kw_only: bool = ...,
316 cache_hash: bool = ...,
317 auto_exc: bool = ...,
318 eq: Optional[_EqOrderType] = ...,
319 order: Optional[_EqOrderType] = ...,
320 auto_detect: bool = ...,
321 collect_by_mro: bool = ...,
322 getstate_setstate: Optional[bool] = ...,
323 on_setattr: Optional[_OnSetAttrArgType] = ...,
324 field_transformer: Optional[_FieldTransformer] = ...,
325 match_args: bool = ...,
326 ) -> _C: ...
327 @overload
328 @__dataclass_transform__(order_default=True, field_descriptors=(attrib, field))
329 def attrs(
330 maybe_cls: None = ...,
331 these: Optional[Dict[str, Any]] = ...,
332 repr_ns: Optional[str] = ...,
333 repr: bool = ...,
334 cmp: Optional[_EqOrderType] = ...,
335 hash: Optional[bool] = ...,
336 init: bool = ...,
337 slots: bool = ...,
338 frozen: bool = ...,
339 weakref_slot: bool = ...,
340 str: bool = ...,
341 auto_attribs: bool = ...,
342 kw_only: bool = ...,
343 cache_hash: bool = ...,
344 auto_exc: bool = ...,
345 eq: Optional[_EqOrderType] = ...,
346 order: Optional[_EqOrderType] = ...,
347 auto_detect: bool = ...,
348 collect_by_mro: bool = ...,
349 getstate_setstate: Optional[bool] = ...,
350 on_setattr: Optional[_OnSetAttrArgType] = ...,
351 field_transformer: Optional[_FieldTransformer] = ...,
352 match_args: bool = ...,
353 ) -> Callable[[_C], _C]: ...
354 @overload
355 @__dataclass_transform__(field_descriptors=(attrib, field))
356 def define(
357 maybe_cls: _C,
358 *,
359 these: Optional[Dict[str, Any]] = ...,
360 repr: bool = ...,
361 hash: Optional[bool] = ...,
362 init: bool = ...,
363 slots: bool = ...,
364 frozen: bool = ...,
365 weakref_slot: bool = ...,
366 str: bool = ...,
367 auto_attribs: bool = ...,
368 kw_only: bool = ...,
369 cache_hash: bool = ...,
370 auto_exc: bool = ...,
371 eq: Optional[bool] = ...,
372 order: Optional[bool] = ...,
373 auto_detect: bool = ...,
374 getstate_setstate: Optional[bool] = ...,
375 on_setattr: Optional[_OnSetAttrArgType] = ...,
376 field_transformer: Optional[_FieldTransformer] = ...,
377 match_args: bool = ...,
378 ) -> _C: ...
379 @overload
380 @__dataclass_transform__(field_descriptors=(attrib, field))
381 def define(
382 maybe_cls: None = ...,
383 *,
384 these: Optional[Dict[str, Any]] = ...,
385 repr: bool = ...,
386 hash: Optional[bool] = ...,
387 init: bool = ...,
388 slots: bool = ...,
389 frozen: bool = ...,
390 weakref_slot: bool = ...,
391 str: bool = ...,
392 auto_attribs: bool = ...,
393 kw_only: bool = ...,
394 cache_hash: bool = ...,
395 auto_exc: bool = ...,
396 eq: Optional[bool] = ...,
397 order: Optional[bool] = ...,
398 auto_detect: bool = ...,
399 getstate_setstate: Optional[bool] = ...,
400 on_setattr: Optional[_OnSetAttrArgType] = ...,
401 field_transformer: Optional[_FieldTransformer] = ...,
402 match_args: bool = ...,
403 ) -> Callable[[_C], _C]: ...
404
405 mutable = define
406 frozen = define # they differ only in their defaults
407
408 def fields(cls: Type[AttrsInstance]) -> Any: ...
409 def fields_dict(cls: Type[AttrsInstance]) -> Dict[str, Attribute[Any]]: ...
410 def validate(inst: AttrsInstance) -> None: ...
411 def resolve_types(
412 cls: _C,
413 globalns: Optional[Dict[str, Any]] = ...,
414 localns: Optional[Dict[str, Any]] = ...,
415 attribs: Optional[List[Attribute[Any]]] = ...,
416 ) -> _C: ...
417
418 # TODO: add support for returning a proper attrs class from the mypy plugin
419 # we use Any instead of _CountingAttr so that e.g. `make_class('Foo',
420 # [attr.ib()])` is valid
421 def make_class(
422 name: str,
423 attrs: Union[List[str], Tuple[str, ...], Dict[str, Any]],
424 bases: Tuple[type, ...] = ...,
425 repr_ns: Optional[str] = ...,
426 repr: bool = ...,
427 cmp: Optional[_EqOrderType] = ...,
428 hash: Optional[bool] = ...,
429 init: bool = ...,
430 slots: bool = ...,
431 frozen: bool = ...,
432 weakref_slot: bool = ...,
433 str: bool = ...,
434 auto_attribs: bool = ...,
435 kw_only: bool = ...,
436 cache_hash: bool = ...,
437 auto_exc: bool = ...,
438 eq: Optional[_EqOrderType] = ...,
439 order: Optional[_EqOrderType] = ...,
440 collect_by_mro: bool = ...,
441 on_setattr: Optional[_OnSetAttrArgType] = ...,
442 field_transformer: Optional[_FieldTransformer] = ...,
443 ) -> type: ...
444
445 # _funcs --
446
447 # TODO: add support for returning TypedDict from the mypy plugin
448 # FIXME: asdict/astuple do not honor their factory args. Waiting on one of
449 # these:
450 # https://github.com/python/mypy/issues/4236
451 # https://github.com/python/typing/issues/253
452 # XXX: remember to fix attrs.asdict/astuple too!
453 def asdict(
454 inst: AttrsInstance,
455 recurse: bool = ...,
456 filter: Optional[_FilterType[Any]] = ...,
457 dict_factory: Type[Mapping[Any, Any]] = ...,
458 retain_collection_types: bool = ...,
459 value_serializer: Optional[
460 Callable[[type, Attribute[Any], Any], Any]
461 ] = ...,
462 tuple_keys: Optional[bool] = ...,
463 ) -> Dict[str, Any]: ...
464
465 # TODO: add support for returning NamedTuple from the mypy plugin
466 def astuple(
467 inst: AttrsInstance,
468 recurse: bool = ...,
469 filter: Optional[_FilterType[Any]] = ...,
470 tuple_factory: Type[Sequence[Any]] = ...,
471 retain_collection_types: bool = ...,
472 ) -> Tuple[Any, ...]: ...
473 def has(cls: type) -> bool: ...
474 def assoc(inst: _T, **changes: Any) -> _T: ...
475 def evolve(inst: _T, **changes: Any) -> _T: ...
476
477 # _config --
478
479 def set_run_validators(run: bool) -> None: ...
480 def get_run_validators() -> bool: ...
481
482 # aliases --
483
484 s = attributes = attrs
485 ib = attr = attrib
486 dataclass = attrs # Technically, partial(attrs, auto_attribs=True) ;)
@@ -0,0 +1,155
1 # SPDX-License-Identifier: MIT
2
3
4 import functools
5 import types
6
7 from ._make import _make_ne
8
9
10 _operation_names = {"eq": "==", "lt": "<", "le": "<=", "gt": ">", "ge": ">="}
11
12
13 def cmp_using(
14 eq=None,
15 lt=None,
16 le=None,
17 gt=None,
18 ge=None,
19 require_same_type=True,
20 class_name="Comparable",
21 ):
22 """
23 Create a class that can be passed into `attr.ib`'s ``eq``, ``order``, and
24 ``cmp`` arguments to customize field comparison.
25
26 The resulting class will have a full set of ordering methods if
27 at least one of ``{lt, le, gt, ge}`` and ``eq`` are provided.
28
29 :param Optional[callable] eq: `callable` used to evaluate equality
30 of two objects.
31 :param Optional[callable] lt: `callable` used to evaluate whether
32 one object is less than another object.
33 :param Optional[callable] le: `callable` used to evaluate whether
34 one object is less than or equal to another object.
35 :param Optional[callable] gt: `callable` used to evaluate whether
36 one object is greater than another object.
37 :param Optional[callable] ge: `callable` used to evaluate whether
38 one object is greater than or equal to another object.
39
40 :param bool require_same_type: When `True`, equality and ordering methods
41 will return `NotImplemented` if objects are not of the same type.
42
43 :param Optional[str] class_name: Name of class. Defaults to 'Comparable'.
44
45 See `comparison` for more details.
46
47 .. versionadded:: 21.1.0
48 """
49
50 body = {
51 "__slots__": ["value"],
52 "__init__": _make_init(),
53 "_requirements": [],
54 "_is_comparable_to": _is_comparable_to,
55 }
56
57 # Add operations.
58 num_order_functions = 0
59 has_eq_function = False
60
61 if eq is not None:
62 has_eq_function = True
63 body["__eq__"] = _make_operator("eq", eq)
64 body["__ne__"] = _make_ne()
65
66 if lt is not None:
67 num_order_functions += 1
68 body["__lt__"] = _make_operator("lt", lt)
69
70 if le is not None:
71 num_order_functions += 1
72 body["__le__"] = _make_operator("le", le)
73
74 if gt is not None:
75 num_order_functions += 1
76 body["__gt__"] = _make_operator("gt", gt)
77
78 if ge is not None:
79 num_order_functions += 1
80 body["__ge__"] = _make_operator("ge", ge)
81
82 type_ = types.new_class(
83 class_name, (object,), {}, lambda ns: ns.update(body)
84 )
85
86 # Add same type requirement.
87 if require_same_type:
88 type_._requirements.append(_check_same_type)
89
90 # Add total ordering if at least one operation was defined.
91 if 0 < num_order_functions < 4:
92 if not has_eq_function:
93 # functools.total_ordering requires __eq__ to be defined,
94 # so raise early error here to keep a nice stack.
95 raise ValueError(
96 "eq must be define is order to complete ordering from "
97 "lt, le, gt, ge."
98 )
99 type_ = functools.total_ordering(type_)
100
101 return type_
102
103
104 def _make_init():
105 """
106 Create __init__ method.
107 """
108
109 def __init__(self, value):
110 """
111 Initialize object with *value*.
112 """
113 self.value = value
114
115 return __init__
116
117
118 def _make_operator(name, func):
119 """
120 Create operator method.
121 """
122
123 def method(self, other):
124 if not self._is_comparable_to(other):
125 return NotImplemented
126
127 result = func(self.value, other.value)
128 if result is NotImplemented:
129 return NotImplemented
130
131 return result
132
133 method.__name__ = "__%s__" % (name,)
134 method.__doc__ = "Return a %s b. Computed by attrs." % (
135 _operation_names[name],
136 )
137
138 return method
139
140
141 def _is_comparable_to(self, other):
142 """
143 Check whether `other` is comparable to `self`.
144 """
145 for func in self._requirements:
146 if not func(self, other):
147 return False
148 return True
149
150
151 def _check_same_type(self, other):
152 """
153 Return True if *self* and *other* are of the same type, False otherwise.
154 """
155 return other.value.__class__ is self.value.__class__
@@ -0,0 +1,13
1 from typing import Any, Callable, Optional, Type
2
3 _CompareWithType = Callable[[Any, Any], bool]
4
5 def cmp_using(
6 eq: Optional[_CompareWithType],
7 lt: Optional[_CompareWithType],
8 le: Optional[_CompareWithType],
9 gt: Optional[_CompareWithType],
10 ge: Optional[_CompareWithType],
11 require_same_type: bool,
12 class_name: str,
13 ) -> Type: ...
@@ -0,0 +1,220
1 # SPDX-License-Identifier: MIT
2
3 """
4 These are Python 3.6+-only and keyword-only APIs that call `attr.s` and
5 `attr.ib` with different default values.
6 """
7
8
9 from functools import partial
10
11 from . import setters
12 from ._funcs import asdict as _asdict
13 from ._funcs import astuple as _astuple
14 from ._make import (
15 NOTHING,
16 _frozen_setattrs,
17 _ng_default_on_setattr,
18 attrib,
19 attrs,
20 )
21 from .exceptions import UnannotatedAttributeError
22
23
24 def define(
25 maybe_cls=None,
26 *,
27 these=None,
28 repr=None,
29 hash=None,
30 init=None,
31 slots=True,
32 frozen=False,
33 weakref_slot=True,
34 str=False,
35 auto_attribs=None,
36 kw_only=False,
37 cache_hash=False,
38 auto_exc=True,
39 eq=None,
40 order=False,
41 auto_detect=True,
42 getstate_setstate=None,
43 on_setattr=None,
44 field_transformer=None,
45 match_args=True,
46 ):
47 r"""
48 Define an ``attrs`` class.
49
50 Differences to the classic `attr.s` that it uses underneath:
51
52 - Automatically detect whether or not *auto_attribs* should be `True` (c.f.
53 *auto_attribs* parameter).
54 - If *frozen* is `False`, run converters and validators when setting an
55 attribute by default.
56 - *slots=True*
57
58 .. caution::
59
60 Usually this has only upsides and few visible effects in everyday
61 programming. But it *can* lead to some suprising behaviors, so please
62 make sure to read :term:`slotted classes`.
63 - *auto_exc=True*
64 - *auto_detect=True*
65 - *order=False*
66 - Some options that were only relevant on Python 2 or were kept around for
67 backwards-compatibility have been removed.
68
69 Please note that these are all defaults and you can change them as you
70 wish.
71
72 :param Optional[bool] auto_attribs: If set to `True` or `False`, it behaves
73 exactly like `attr.s`. If left `None`, `attr.s` will try to guess:
74
75 1. If any attributes are annotated and no unannotated `attrs.fields`\ s
76 are found, it assumes *auto_attribs=True*.
77 2. Otherwise it assumes *auto_attribs=False* and tries to collect
78 `attrs.fields`\ s.
79
80 For now, please refer to `attr.s` for the rest of the parameters.
81
82 .. versionadded:: 20.1.0
83 .. versionchanged:: 21.3.0 Converters are also run ``on_setattr``.
84 """
85
86 def do_it(cls, auto_attribs):
87 return attrs(
88 maybe_cls=cls,
89 these=these,
90 repr=repr,
91 hash=hash,
92 init=init,
93 slots=slots,
94 frozen=frozen,
95 weakref_slot=weakref_slot,
96 str=str,
97 auto_attribs=auto_attribs,
98 kw_only=kw_only,
99 cache_hash=cache_hash,
100 auto_exc=auto_exc,
101 eq=eq,
102 order=order,
103 auto_detect=auto_detect,
104 collect_by_mro=True,
105 getstate_setstate=getstate_setstate,
106 on_setattr=on_setattr,
107 field_transformer=field_transformer,
108 match_args=match_args,
109 )
110
111 def wrap(cls):
112 """
113 Making this a wrapper ensures this code runs during class creation.
114
115 We also ensure that frozen-ness of classes is inherited.
116 """
117 nonlocal frozen, on_setattr
118
119 had_on_setattr = on_setattr not in (None, setters.NO_OP)
120
121 # By default, mutable classes convert & validate on setattr.
122 if frozen is False and on_setattr is None:
123 on_setattr = _ng_default_on_setattr
124
125 # However, if we subclass a frozen class, we inherit the immutability
126 # and disable on_setattr.
127 for base_cls in cls.__bases__:
128 if base_cls.__setattr__ is _frozen_setattrs:
129 if had_on_setattr:
130 raise ValueError(
131 "Frozen classes can't use on_setattr "
132 "(frozen-ness was inherited)."
133 )
134
135 on_setattr = setters.NO_OP
136 break
137
138 if auto_attribs is not None:
139 return do_it(cls, auto_attribs)
140
141 try:
142 return do_it(cls, True)
143 except UnannotatedAttributeError:
144 return do_it(cls, False)
145
146 # maybe_cls's type depends on the usage of the decorator. It's a class
147 # if it's used as `@attrs` but ``None`` if used as `@attrs()`.
148 if maybe_cls is None:
149 return wrap
150 else:
151 return wrap(maybe_cls)
152
153
154 mutable = define
155 frozen = partial(define, frozen=True, on_setattr=None)
156
157
158 def field(
159 *,
160 default=NOTHING,
161 validator=None,
162 repr=True,
163 hash=None,
164 init=True,
165 metadata=None,
166 converter=None,
167 factory=None,
168 kw_only=False,
169 eq=None,
170 order=None,
171 on_setattr=None,
172 ):
173 """
174 Identical to `attr.ib`, except keyword-only and with some arguments
175 removed.
176
177 .. versionadded:: 20.1.0
178 """
179 return attrib(
180 default=default,
181 validator=validator,
182 repr=repr,
183 hash=hash,
184 init=init,
185 metadata=metadata,
186 converter=converter,
187 factory=factory,
188 kw_only=kw_only,
189 eq=eq,
190 order=order,
191 on_setattr=on_setattr,
192 )
193
194
195 def asdict(inst, *, recurse=True, filter=None, value_serializer=None):
196 """
197 Same as `attr.asdict`, except that collections types are always retained
198 and dict is always used as *dict_factory*.
199
200 .. versionadded:: 21.3.0
201 """
202 return _asdict(
203 inst=inst,
204 recurse=recurse,
205 filter=filter,
206 value_serializer=value_serializer,
207 retain_collection_types=True,
208 )
209
210
211 def astuple(inst, *, recurse=True, filter=None):
212 """
213 Same as `attr.astuple`, except that collections types are always retained
214 and `tuple` is always used as the *tuple_factory*.
215
216 .. versionadded:: 21.3.0
217 """
218 return _astuple(
219 inst=inst, recurse=recurse, filter=filter, retain_collection_types=True
220 )
@@ -0,0 +1,86
1 # SPDX-License-Identifier: MIT
2
3
4 from functools import total_ordering
5
6 from ._funcs import astuple
7 from ._make import attrib, attrs
8
9
10 @total_ordering
11 @attrs(eq=False, order=False, slots=True, frozen=True)
12 class VersionInfo:
13 """
14 A version object that can be compared to tuple of length 1--4:
15
16 >>> attr.VersionInfo(19, 1, 0, "final") <= (19, 2)
17 True
18 >>> attr.VersionInfo(19, 1, 0, "final") < (19, 1, 1)
19 True
20 >>> vi = attr.VersionInfo(19, 2, 0, "final")
21 >>> vi < (19, 1, 1)
22 False
23 >>> vi < (19,)
24 False
25 >>> vi == (19, 2,)
26 True
27 >>> vi == (19, 2, 1)
28 False
29
30 .. versionadded:: 19.2
31 """
32
33 year = attrib(type=int)
34 minor = attrib(type=int)
35 micro = attrib(type=int)
36 releaselevel = attrib(type=str)
37
38 @classmethod
39 def _from_version_string(cls, s):
40 """
41 Parse *s* and return a _VersionInfo.
42 """
43 v = s.split(".")
44 if len(v) == 3:
45 v.append("final")
46
47 return cls(
48 year=int(v[0]), minor=int(v[1]), micro=int(v[2]), releaselevel=v[3]
49 )
50
51 def _ensure_tuple(self, other):
52 """
53 Ensure *other* is a tuple of a valid length.
54
55 Returns a possibly transformed *other* and ourselves as a tuple of
56 the same length as *other*.
57 """
58
59 if self.__class__ is other.__class__:
60 other = astuple(other)
61
62 if not isinstance(other, tuple):
63 raise NotImplementedError
64
65 if not (1 <= len(other) <= 4):
66 raise NotImplementedError
67
68 return astuple(self)[: len(other)], other
69
70 def __eq__(self, other):
71 try:
72 us, them = self._ensure_tuple(other)
73 except NotImplementedError:
74 return NotImplemented
75
76 return us == them
77
78 def __lt__(self, other):
79 try:
80 us, them = self._ensure_tuple(other)
81 except NotImplementedError:
82 return NotImplemented
83
84 # Since alphabetically "dev0" < "final" < "post1" < "post2", we don't
85 # have to do anything special with releaselevel for now.
86 return us < them
@@ -0,0 +1,9
1 class VersionInfo:
2 @property
3 def year(self) -> int: ...
4 @property
5 def minor(self) -> int: ...
6 @property
7 def micro(self) -> int: ...
8 @property
9 def releaselevel(self) -> str: ...
@@ -0,0 +1,13
1 from typing import Callable, Optional, TypeVar, overload
2
3 from . import _ConverterType
4
5 _T = TypeVar("_T")
6
7 def pipe(*validators: _ConverterType) -> _ConverterType: ...
8 def optional(converter: _ConverterType) -> _ConverterType: ...
9 @overload
10 def default_if_none(default: _T) -> _ConverterType: ...
11 @overload
12 def default_if_none(*, factory: Callable[[], _T]) -> _ConverterType: ...
13 def to_bool(val: str) -> bool: ...
@@ -0,0 +1,17
1 from typing import Any
2
3 class FrozenError(AttributeError):
4 msg: str = ...
5
6 class FrozenInstanceError(FrozenError): ...
7 class FrozenAttributeError(FrozenError): ...
8 class AttrsAttributeNotFoundError(ValueError): ...
9 class NotAnAttrsClassError(ValueError): ...
10 class DefaultAlreadySetError(RuntimeError): ...
11 class UnannotatedAttributeError(RuntimeError): ...
12 class PythonTooOldError(RuntimeError): ...
13
14 class NotCallableError(TypeError):
15 msg: str = ...
16 value: Any = ...
17 def __init__(self, msg: str, value: Any) -> None: ...
@@ -0,0 +1,6
1 from typing import Any, Union
2
3 from . import Attribute, _FilterType
4
5 def include(*what: Union[type, Attribute[Any]]) -> _FilterType[Any]: ...
6 def exclude(*what: Union[type, Attribute[Any]]) -> _FilterType[Any]: ...
1 NO CONTENT: new file 100644
@@ -0,0 +1,73
1 # SPDX-License-Identifier: MIT
2
3 """
4 Commonly used hooks for on_setattr.
5 """
6
7
8 from . import _config
9 from .exceptions import FrozenAttributeError
10
11
12 def pipe(*setters):
13 """
14 Run all *setters* and return the return value of the last one.
15
16 .. versionadded:: 20.1.0
17 """
18
19 def wrapped_pipe(instance, attrib, new_value):
20 rv = new_value
21
22 for setter in setters:
23 rv = setter(instance, attrib, rv)
24
25 return rv
26
27 return wrapped_pipe
28
29
30 def frozen(_, __, ___):
31 """
32 Prevent an attribute to be modified.
33
34 .. versionadded:: 20.1.0
35 """
36 raise FrozenAttributeError()
37
38
39 def validate(instance, attrib, new_value):
40 """
41 Run *attrib*'s validator on *new_value* if it has one.
42
43 .. versionadded:: 20.1.0
44 """
45 if _config._run_validators is False:
46 return new_value
47
48 v = attrib.validator
49 if not v:
50 return new_value
51
52 v(instance, attrib, new_value)
53
54 return new_value
55
56
57 def convert(instance, attrib, new_value):
58 """
59 Run *attrib*'s converter -- if it has one -- on *new_value* and return the
60 result.
61
62 .. versionadded:: 20.1.0
63 """
64 c = attrib.converter
65 if c:
66 return c(new_value)
67
68 return new_value
69
70
71 # Sentinel for disabling class-wide *on_setattr* hooks for certain attributes.
72 # autodata stopped working, so the docstring is inlined in the API docs.
73 NO_OP = object()
@@ -0,0 +1,19
1 from typing import Any, NewType, NoReturn, TypeVar, cast
2
3 from . import Attribute, _OnSetAttrType
4
5 _T = TypeVar("_T")
6
7 def frozen(
8 instance: Any, attribute: Attribute[Any], new_value: Any
9 ) -> NoReturn: ...
10 def pipe(*setters: _OnSetAttrType) -> _OnSetAttrType: ...
11 def validate(instance: Any, attribute: Attribute[_T], new_value: _T) -> _T: ...
12
13 # convert is allowed to return Any, because they can be chained using pipe.
14 def convert(
15 instance: Any, attribute: Attribute[Any], new_value: Any
16 ) -> Any: ...
17
18 _NoOpType = NewType("_NoOpType", object)
19 NO_OP: _NoOpType
@@ -0,0 +1,80
1 from typing import (
2 Any,
3 AnyStr,
4 Callable,
5 Container,
6 ContextManager,
7 Iterable,
8 List,
9 Mapping,
10 Match,
11 Optional,
12 Pattern,
13 Tuple,
14 Type,
15 TypeVar,
16 Union,
17 overload,
18 )
19
20 from . import _ValidatorType
21 from . import _ValidatorArgType
22
23 _T = TypeVar("_T")
24 _T1 = TypeVar("_T1")
25 _T2 = TypeVar("_T2")
26 _T3 = TypeVar("_T3")
27 _I = TypeVar("_I", bound=Iterable)
28 _K = TypeVar("_K")
29 _V = TypeVar("_V")
30 _M = TypeVar("_M", bound=Mapping)
31
32 def set_disabled(run: bool) -> None: ...
33 def get_disabled() -> bool: ...
34 def disabled() -> ContextManager[None]: ...
35
36 # To be more precise on instance_of use some overloads.
37 # If there are more than 3 items in the tuple then we fall back to Any
38 @overload
39 def instance_of(type: Type[_T]) -> _ValidatorType[_T]: ...
40 @overload
41 def instance_of(type: Tuple[Type[_T]]) -> _ValidatorType[_T]: ...
42 @overload
43 def instance_of(
44 type: Tuple[Type[_T1], Type[_T2]]
45 ) -> _ValidatorType[Union[_T1, _T2]]: ...
46 @overload
47 def instance_of(
48 type: Tuple[Type[_T1], Type[_T2], Type[_T3]]
49 ) -> _ValidatorType[Union[_T1, _T2, _T3]]: ...
50 @overload
51 def instance_of(type: Tuple[type, ...]) -> _ValidatorType[Any]: ...
52 def provides(interface: Any) -> _ValidatorType[Any]: ...
53 def optional(
54 validator: Union[_ValidatorType[_T], List[_ValidatorType[_T]]]
55 ) -> _ValidatorType[Optional[_T]]: ...
56 def in_(options: Container[_T]) -> _ValidatorType[_T]: ...
57 def and_(*validators: _ValidatorType[_T]) -> _ValidatorType[_T]: ...
58 def matches_re(
59 regex: Union[Pattern[AnyStr], AnyStr],
60 flags: int = ...,
61 func: Optional[
62 Callable[[AnyStr, AnyStr, int], Optional[Match[AnyStr]]]
63 ] = ...,
64 ) -> _ValidatorType[AnyStr]: ...
65 def deep_iterable(
66 member_validator: _ValidatorArgType[_T],
67 iterable_validator: Optional[_ValidatorType[_I]] = ...,
68 ) -> _ValidatorType[_I]: ...
69 def deep_mapping(
70 key_validator: _ValidatorType[_K],
71 value_validator: _ValidatorType[_V],
72 mapping_validator: Optional[_ValidatorType[_M]] = ...,
73 ) -> _ValidatorType[_M]: ...
74 def is_callable() -> _ValidatorType[_T]: ...
75 def lt(val: _T) -> _ValidatorType[_T]: ...
76 def le(val: _T) -> _ValidatorType[_T]: ...
77 def ge(val: _T) -> _ValidatorType[_T]: ...
78 def gt(val: _T) -> _ValidatorType[_T]: ...
79 def max_len(length: int) -> _ValidatorType[_T]: ...
80 def min_len(length: int) -> _ValidatorType[_T]: ...
@@ -0,0 +1,28
1 # typelib.py - type hint aliases and support
2 #
3 # Copyright 2022 Matt Harbison <matt_harbison@yahoo.com>
4 #
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
7
8 import typing
9
10 # Note: this is slightly different from pycompat.TYPE_CHECKING, as using
11 # pycompat causes the BinaryIO_Proxy type to be resolved to ``object`` when
12 # used as the base class during a pytype run.
13 TYPE_CHECKING = typing.TYPE_CHECKING
14
15
16 # The BinaryIO class provides empty methods, which at runtime means that
17 # ``__getattr__`` on the proxy classes won't get called for the methods that
18 # should delegate to the internal object. So to avoid runtime changes because
19 # of the required typing inheritance, just use BinaryIO when typechecking, and
20 # ``object`` otherwise.
21 if TYPE_CHECKING:
22 from typing import (
23 BinaryIO,
24 )
25
26 BinaryIO_Proxy = BinaryIO
27 else:
28 BinaryIO_Proxy = object
@@ -0,0 +1,119
1 use std::fs;
2 use std::io;
3 use std::os::unix::fs::{MetadataExt, PermissionsExt};
4 use std::path::Path;
5
6 const EXECFLAGS: u32 = 0o111;
7
8 fn is_executable(path: impl AsRef<Path>) -> Result<bool, io::Error> {
9 let metadata = fs::metadata(path)?;
10 let mode = metadata.mode();
11 Ok(mode & EXECFLAGS != 0)
12 }
13
14 fn make_executable(path: impl AsRef<Path>) -> Result<(), io::Error> {
15 let mode = fs::metadata(path.as_ref())?.mode();
16 fs::set_permissions(
17 path,
18 fs::Permissions::from_mode((mode & 0o777) | EXECFLAGS),
19 )?;
20 Ok(())
21 }
22
23 fn copy_mode(
24 src: impl AsRef<Path>,
25 dst: impl AsRef<Path>,
26 ) -> Result<(), io::Error> {
27 let mode = match fs::symlink_metadata(src) {
28 Ok(metadata) => metadata.mode(),
29 Err(e) if e.kind() == io::ErrorKind::NotFound =>
30 // copymode in python has a more complicated handling of FileNotFound
31 // error, which we don't need because all it does is applying
32 // umask, which the OS already does when we mkdir.
33 {
34 return Ok(())
35 }
36 Err(e) => return Err(e),
37 };
38 fs::set_permissions(dst, fs::Permissions::from_mode(mode))?;
39 Ok(())
40 }
41
42 fn check_exec_impl(path: impl AsRef<Path>) -> Result<bool, io::Error> {
43 let basedir = path.as_ref().join(".hg");
44 let cachedir = basedir.join("wcache");
45 let storedir = basedir.join("store");
46
47 if !cachedir.exists() {
48 // we want to create the 'cache' directory, not the '.hg' one.
49 // Automatically creating '.hg' directory could silently spawn
50 // invalid Mercurial repositories. That seems like a bad idea.
51 fs::create_dir(&cachedir)
52 .and_then(|()| {
53 if storedir.exists() {
54 copy_mode(&storedir, &cachedir)
55 } else {
56 copy_mode(&basedir, &cachedir)
57 }
58 })
59 .ok();
60 }
61
62 let leave_file: bool;
63 let checkdir: &Path;
64 let checkisexec = cachedir.join("checkisexec");
65 let checknoexec = cachedir.join("checknoexec");
66 if cachedir.is_dir() {
67 // Check if both files already exist in cache and have correct
68 // permissions. if so, we assume that permissions work.
69 // If not, we delete the files and try again.
70 match is_executable(&checkisexec) {
71 Err(e) if e.kind() == io::ErrorKind::NotFound => (),
72 Err(e) => return Err(e),
73 Ok(is_exec) => {
74 if is_exec {
75 let noexec_is_exec = match is_executable(&checknoexec) {
76 Err(e) if e.kind() == io::ErrorKind::NotFound => {
77 fs::write(&checknoexec, "")?;
78 is_executable(&checknoexec)?
79 }
80 Err(e) => return Err(e),
81 Ok(exec) => exec,
82 };
83 if !noexec_is_exec {
84 // check-exec is exec and check-no-exec is not exec
85 return Ok(true);
86 }
87 fs::remove_file(&checknoexec)?;
88 }
89 fs::remove_file(&checkisexec)?;
90 }
91 }
92 checkdir = &cachedir;
93 leave_file = true;
94 } else {
95 // no cache directory (probably because .hg doesn't exist):
96 // check directly in `path` and don't leave the temp file behind
97 checkdir = path.as_ref();
98 leave_file = false;
99 };
100
101 let tmp_file = tempfile::NamedTempFile::new_in(checkdir)?;
102 if !is_executable(tmp_file.path())? {
103 make_executable(tmp_file.path())?;
104 if is_executable(tmp_file.path())? {
105 if leave_file {
106 tmp_file.persist(checkisexec).ok();
107 }
108 return Ok(true);
109 }
110 }
111
112 Ok(false)
113 }
114
115 /// This function is a rust rewrite of [checkexec] function from [posix.py]
116 /// Returns true if the filesystem supports execute permissions.
117 pub fn check_exec(path: impl AsRef<Path>) -> bool {
118 check_exec_impl(path).unwrap_or(false)
119 }
@@ -0,0 +1,77
1 Force revlog max inline value to be smaller than default
2
3 $ mkdir $TESTTMP/ext
4 $ cat << EOF > $TESTTMP/ext/small_inline.py
5 > from mercurial import revlog
6 > revlog._maxinline = 8
7 > EOF
8
9 $ cat << EOF >> $HGRCPATH
10 > [extensions]
11 > small_inline=$TESTTMP/ext/small_inline.py
12 > EOF
13
14 $ hg init repo
15 $ cd repo
16
17 Try on an empty repository
18
19 $ hg debug-revlog-stats
20 rev-count data-size inl type target
21 0 0 yes changelog
22 0 0 yes manifest
23
24 $ mkdir folder
25 $ touch a b folder/c folder/d
26 $ hg commit -Aqm 0
27 $ echo "text" > a
28 $ hg rm b
29 $ echo "longer string" > folder/d
30 $ hg commit -Aqm 1
31
32 Differences in data size observed with pure is due to different compression
33 algorithms
34
35 $ hg debug-revlog-stats
36 rev-count data-size inl type target
37 2 138 no changelog (no-pure !)
38 2 137 no changelog (pure !)
39 2 177 no manifest (no-pure !)
40 2 168 no manifest (pure !)
41 2 6 yes file a
42 1 0 yes file b
43 1 0 yes file folder/c
44 2 15 no file folder/d
45
46 Test 'changelog' command argument
47
48 $ hg debug-revlog-stats -c
49 rev-count data-size inl type target
50 2 138 no changelog (no-pure !)
51 2 137 no changelog (pure !)
52
53 Test 'manifest' command argument
54
55 $ hg debug-revlog-stats -m
56 rev-count data-size inl type target
57 2 177 no manifest (no-pure !)
58 2 168 no manifest (pure !)
59
60 Test 'file' command argument
61
62 $ hg debug-revlog-stats -f
63 rev-count data-size inl type target
64 2 6 yes file a
65 1 0 yes file b
66 1 0 yes file folder/c
67 2 15 no file folder/d
68
69 Test multiple command arguments
70
71 $ hg debug-revlog-stats -cm
72 rev-count data-size inl type target
73 2 138 no changelog (no-pure !)
74 2 137 no changelog (pure !)
75 2 177 no manifest (no-pure !)
76 2 168 no manifest (pure !)
77
@@ -0,0 +1,102
1 #require serve
2
3 Some tests for hgweb responding to HEAD requests
4
5 $ hg init test
6 $ cd test
7 $ mkdir da
8 $ echo foo > da/foo
9 $ echo foo > foo
10 $ hg ci -Ambase
11 adding da/foo
12 adding foo
13 $ hg bookmark -r0 '@'
14 $ hg bookmark -r0 'a b c'
15 $ hg bookmark -r0 'd/e/f'
16 $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
17 $ cat hg.pid >> $DAEMON_PIDS
18
19 manifest
20
21 $ get-with-headers.py localhost:$HGPORT --method=HEAD 'file/tip/?style=raw' - date etag server
22 200 Script output follows
23 content-type: text/plain; charset=ascii
24
25 $ get-with-headers.py localhost:$HGPORT --method=HEAD 'file/tip/da?style=raw' - date etag server
26 200 Script output follows
27 content-type: text/plain; charset=ascii
28
29
30 plain file
31
32 $ get-with-headers.py localhost:$HGPORT --method=HEAD 'file/tip/foo?style=raw' - date etag server
33 200 Script output follows
34 content-disposition: inline; filename="foo"
35 content-length: 4
36 content-type: application/binary
37
38
39 should give a 404 - static file that does not exist
40
41 $ get-with-headers.py localhost:$HGPORT --method=HEAD 'static/bogus' - date etag server
42 404 Not Found
43 content-type: text/html; charset=ascii
44
45 [1]
46
47 should give a 404 - bad revision
48
49 $ get-with-headers.py localhost:$HGPORT --method=HEAD 'file/spam/foo?style=raw' - date etag server
50 404 Not Found
51 content-type: text/plain; charset=ascii
52
53 [1]
54
55 should give a 400 - bad command
56
57 $ get-with-headers.py localhost:$HGPORT --method=HEAD 'file/tip/foo?cmd=spam&style=raw' - date etag server
58 400* (glob)
59 content-type: text/plain; charset=ascii
60
61 [1]
62
63 should give a 404 - file does not exist
64
65 $ get-with-headers.py localhost:$HGPORT --method=HEAD 'file/tip/bork?style=raw' - date etag server
66 404 Not Found
67 content-type: text/plain; charset=ascii
68
69 [1]
70
71 try bad style
72
73 $ get-with-headers.py localhost:$HGPORT --method=HEAD 'file/tip/?style=foobar' - date etag server
74 200 Script output follows
75 content-type: text/html; charset=ascii
76
77
78 log
79
80 $ get-with-headers.py localhost:$HGPORT --method=HEAD 'log?style=raw' - date etag server
81 200 Script output follows
82 content-type: text/plain; charset=ascii
83
84
85 access bookmarks
86
87 $ get-with-headers.py localhost:$HGPORT --method=HEAD 'rev/@?style=paper' - date etag server
88 200 Script output follows
89 content-type: text/html; charset=ascii
90
91
92 static file
93
94 $ get-with-headers.py localhost:$HGPORT --method=HEAD 'static/style-gitweb.css' - date etag server
95 200 Script output follows
96 content-length: 9074
97 content-type: text/css
98
99
100 $ killdaemons.py
101
102 $ cd ..
@@ -0,0 +1,333
1 ==========================================================
2 Test various things around delta computation within revlog
3 ==========================================================
4
5
6 basic setup
7 -----------
8
9 $ cat << EOF >> $HGRCPATH
10 > [debug]
11 > revlog.debug-delta=yes
12 > EOF
13 $ cat << EOF >> sha256line.py
14 > # a way to quickly produce file of significant size and poorly compressable content.
15 > import hashlib
16 > import sys
17 > for line in sys.stdin:
18 > print(hashlib.sha256(line.encode('utf8')).hexdigest())
19 > EOF
20
21 $ hg init base-repo
22 $ cd base-repo
23
24 create a "large" file
25
26 $ $TESTDIR/seq.py 1000 | $PYTHON $TESTTMP/sha256line.py > my-file.txt
27 $ hg add my-file.txt
28 $ hg commit -m initial-commit
29 DBG-DELTAS: FILELOG:my-file.txt: rev=0: delta-base=0 * (glob)
30 DBG-DELTAS: MANIFESTLOG: * (glob)
31 DBG-DELTAS: CHANGELOG: * (glob)
32
33 Add more change at the end of the file
34
35 $ $TESTDIR/seq.py 1001 1200 | $PYTHON $TESTTMP/sha256line.py >> my-file.txt
36 $ hg commit -m "large-change"
37 DBG-DELTAS: FILELOG:my-file.txt: rev=1: delta-base=0 * (glob)
38 DBG-DELTAS: MANIFESTLOG: * (glob)
39 DBG-DELTAS: CHANGELOG: * (glob)
40
41 Add small change at the start
42
43 $ hg up 'desc("initial-commit")' --quiet
44 $ mv my-file.txt foo
45 $ echo "small change at the start" > my-file.txt
46 $ cat foo >> my-file.txt
47 $ rm foo
48 $ hg commit -m "small-change"
49 DBG-DELTAS: FILELOG:my-file.txt: rev=2: delta-base=0 * (glob)
50 DBG-DELTAS: MANIFESTLOG: * (glob)
51 DBG-DELTAS: CHANGELOG: * (glob)
52 created new head
53
54
55 $ hg log -r 'head()' -T '{node}\n' >> ../base-heads.nodes
56 $ hg log -r 'desc("initial-commit")' -T '{node}\n' >> ../initial.node
57 $ hg log -r 'desc("small-change")' -T '{node}\n' >> ../small.node
58 $ hg log -r 'desc("large-change")' -T '{node}\n' >> ../large.node
59 $ cd ..
60
61 Check delta find policy and result for merge on commit
62 ======================================================
63
64 Check that delta of merge pick best of the two parents
65 ------------------------------------------------------
66
67 As we check against both parents, the one with the largest change should
68 produce the smallest delta and be picked.
69
70 $ hg clone base-repo test-parents --quiet
71 $ hg -R test-parents update 'nodefromfile("small.node")' --quiet
72 $ hg -R test-parents merge 'nodefromfile("large.node")' --quiet
73
74 The delta base is the "large" revision as it produce a smaller delta.
75
76 $ hg -R test-parents commit -m "merge from small change"
77 DBG-DELTAS: FILELOG:my-file.txt: rev=3: delta-base=1 * (glob)
78 DBG-DELTAS: MANIFESTLOG: * (glob)
79 DBG-DELTAS: CHANGELOG: * (glob)
80
81 Check that the behavior tested above can we disabled
82 ----------------------------------------------------
83
84 We disable the checking of both parent at the same time. The `small` change,
85 that produce a less optimal delta, should be picked first as it is "closer" to
86 the new commit.
87
88 $ hg clone base-repo test-no-parents --quiet
89 $ hg -R test-no-parents update 'nodefromfile("small.node")' --quiet
90 $ hg -R test-no-parents merge 'nodefromfile("large.node")' --quiet
91
92 The delta base is the "large" revision as it produce a smaller delta.
93
94 $ hg -R test-no-parents commit -m "merge from small change" \
95 > --config storage.revlog.optimize-delta-parent-choice=no
96 DBG-DELTAS: FILELOG:my-file.txt: rev=3: delta-base=2 * (glob)
97 DBG-DELTAS: MANIFESTLOG: * (glob)
98 DBG-DELTAS: CHANGELOG: * (glob)
99
100
101 Check delta-find policy and result when unbundling
102 ==================================================
103
104 Build a bundle with all delta built against p1
105
106 $ hg bundle -R test-parents --all --config devel.bundle.delta=p1 all-p1.hg
107 4 changesets found
108
109 Default policy of trusting delta from the bundle
110 ------------------------------------------------
111
112 Keeping the `p1` delta used in the bundle is sub-optimal for storage, but
113 strusting in-bundle delta is faster to apply.
114
115 $ hg init bundle-default
116 $ hg -R bundle-default unbundle all-p1.hg --quiet
117 DBG-DELTAS: CHANGELOG: * (glob)
118 DBG-DELTAS: CHANGELOG: * (glob)
119 DBG-DELTAS: CHANGELOG: * (glob)
120 DBG-DELTAS: CHANGELOG: * (glob)
121 DBG-DELTAS: MANIFESTLOG: * (glob)
122 DBG-DELTAS: MANIFESTLOG: * (glob)
123 DBG-DELTAS: MANIFESTLOG: * (glob)
124 DBG-DELTAS: MANIFESTLOG: * (glob)
125 DBG-DELTAS: FILELOG:my-file.txt: rev=0: delta-base=0 * (glob)
126 DBG-DELTAS: FILELOG:my-file.txt: rev=1: delta-base=0 * (glob)
127 DBG-DELTAS: FILELOG:my-file.txt: rev=2: delta-base=0 * (glob)
128 DBG-DELTAS: FILELOG:my-file.txt: rev=3: delta-base=2 * (glob)
129
130 (confirm the file revision are in the same order, 2 should be smaller than 1)
131
132 $ hg -R bundle-default debugdata my-file.txt 2 | wc -l
133 \s*1001 (re)
134 $ hg -R bundle-default debugdata my-file.txt 1 | wc -l
135 \s*1200 (re)
136
137 explicitly enabled
138 ------------------
139
140 Keeping the `p1` delta used in the bundle is sub-optimal for storage, but
141 strusting in-bundle delta is faster to apply.
142
143 $ hg init bundle-reuse-enabled
144 $ hg -R bundle-reuse-enabled unbundle all-p1.hg --quiet \
145 > --config storage.revlog.reuse-external-delta-parent=yes
146 DBG-DELTAS: CHANGELOG: * (glob)
147 DBG-DELTAS: CHANGELOG: * (glob)
148 DBG-DELTAS: CHANGELOG: * (glob)
149 DBG-DELTAS: CHANGELOG: * (glob)
150 DBG-DELTAS: MANIFESTLOG: * (glob)
151 DBG-DELTAS: MANIFESTLOG: * (glob)
152 DBG-DELTAS: MANIFESTLOG: * (glob)
153 DBG-DELTAS: MANIFESTLOG: * (glob)
154 DBG-DELTAS: FILELOG:my-file.txt: rev=0: delta-base=0 * (glob)
155 DBG-DELTAS: FILELOG:my-file.txt: rev=1: delta-base=0 * (glob)
156 DBG-DELTAS: FILELOG:my-file.txt: rev=2: delta-base=0 * (glob)
157 DBG-DELTAS: FILELOG:my-file.txt: rev=3: delta-base=2 * (glob)
158
159 (confirm the file revision are in the same order, 2 should be smaller than 1)
160
161 $ hg -R bundle-reuse-enabled debugdata my-file.txt 2 | wc -l
162 \s*1001 (re)
163 $ hg -R bundle-reuse-enabled debugdata my-file.txt 1 | wc -l
164 \s*1200 (re)
165
166 explicitly disabled
167 -------------------
168
169 Not reusing the delta-base from the parent means we the delta will be made
170 against the "best" parent. (so not the same as the previous two)
171
172 $ hg init bundle-reuse-disabled
173 $ hg -R bundle-reuse-disabled unbundle all-p1.hg --quiet \
174 > --config storage.revlog.reuse-external-delta-parent=no
175 DBG-DELTAS: CHANGELOG: * (glob)
176 DBG-DELTAS: CHANGELOG: * (glob)
177 DBG-DELTAS: CHANGELOG: * (glob)
178 DBG-DELTAS: CHANGELOG: * (glob)
179 DBG-DELTAS: MANIFESTLOG: * (glob)
180 DBG-DELTAS: MANIFESTLOG: * (glob)
181 DBG-DELTAS: MANIFESTLOG: * (glob)
182 DBG-DELTAS: MANIFESTLOG: * (glob)
183 DBG-DELTAS: FILELOG:my-file.txt: rev=0: delta-base=0 * (glob)
184 DBG-DELTAS: FILELOG:my-file.txt: rev=1: delta-base=0 * (glob)
185 DBG-DELTAS: FILELOG:my-file.txt: rev=2: delta-base=0 * (glob)
186 DBG-DELTAS: FILELOG:my-file.txt: rev=3: delta-base=1 * (glob)
187
188 (confirm the file revision are in the same order, 2 should be smaller than 1)
189
190 $ hg -R bundle-reuse-disabled debugdata my-file.txt 2 | wc -l
191 \s*1001 (re)
192 $ hg -R bundle-reuse-disabled debugdata my-file.txt 1 | wc -l
193 \s*1200 (re)
194
195
196 Check the path.*:delta-reuse-policy option
197 ==========================================
198
199 Get a repository with the bad parent picked and a clone ready to pull the merge
200
201 $ cp -ar bundle-reuse-enabled peer-bad-delta
202 $ hg clone peer-bad-delta local-pre-pull --rev `cat large.node` --rev `cat small.node` --quiet
203 DBG-DELTAS: CHANGELOG: * (glob)
204 DBG-DELTAS: CHANGELOG: * (glob)
205 DBG-DELTAS: CHANGELOG: * (glob)
206 DBG-DELTAS: MANIFESTLOG: * (glob)
207 DBG-DELTAS: MANIFESTLOG: * (glob)
208 DBG-DELTAS: MANIFESTLOG: * (glob)
209 DBG-DELTAS: FILELOG:my-file.txt: rev=0: delta-base=0 * (glob)
210 DBG-DELTAS: FILELOG:my-file.txt: rev=1: delta-base=0 * (glob)
211 DBG-DELTAS: FILELOG:my-file.txt: rev=2: delta-base=0 * (glob)
212
213 Check the parent order for the file
214
215 $ hg -R local-pre-pull debugdata my-file.txt 2 | wc -l
216 \s*1001 (re)
217 $ hg -R local-pre-pull debugdata my-file.txt 1 | wc -l
218 \s*1200 (re)
219
220 Pull with no value (so the default)
221 -----------------------------------
222
223 default is to reuse the (bad) delta
224
225 $ cp -ar local-pre-pull local-no-value
226 $ hg -R local-no-value pull --quiet
227 DBG-DELTAS: CHANGELOG: * (glob)
228 DBG-DELTAS: MANIFESTLOG: * (glob)
229 DBG-DELTAS: FILELOG:my-file.txt: rev=3: delta-base=2 * (glob)
230
231 Pull with explicitly the default
232 --------------------------------
233
234 default is to reuse the (bad) delta
235
236 $ cp -ar local-pre-pull local-default
237 $ hg -R local-default pull --quiet --config 'paths.default:delta-reuse-policy=default'
238 DBG-DELTAS: CHANGELOG: * (glob)
239 DBG-DELTAS: MANIFESTLOG: * (glob)
240 DBG-DELTAS: FILELOG:my-file.txt: rev=3: delta-base=2 * (glob)
241
242 Pull with no-reuse
243 ------------------
244
245 We don't reuse the base, so we get a better delta
246
247 $ cp -ar local-pre-pull local-no-reuse
248 $ hg -R local-no-reuse pull --quiet --config 'paths.default:delta-reuse-policy=no-reuse'
249 DBG-DELTAS: CHANGELOG: * (glob)
250 DBG-DELTAS: MANIFESTLOG: * (glob)
251 DBG-DELTAS: FILELOG:my-file.txt: rev=3: delta-base=1 * (glob)
252
253 Pull with try-base
254 ------------------
255
256 We requested to use the (bad) delta
257
258 $ cp -ar local-pre-pull local-try-base
259 $ hg -R local-try-base pull --quiet --config 'paths.default:delta-reuse-policy=try-base'
260 DBG-DELTAS: CHANGELOG: * (glob)
261 DBG-DELTAS: MANIFESTLOG: * (glob)
262 DBG-DELTAS: FILELOG:my-file.txt: rev=3: delta-base=2 * (glob)
263
264 Case where we force a "bad" delta to be applied
265 ===============================================
266
267 We build a very different file content to force a full snapshot
268
269 $ cp -ar peer-bad-delta peer-bad-delta-with-full
270 $ cp -ar local-pre-pull local-pre-pull-full
271 $ echo '[paths]' >> local-pre-pull-full/.hg/hgrc
272 $ echo 'default=../peer-bad-delta-with-full' >> local-pre-pull-full/.hg/hgrc
273
274 $ hg -R peer-bad-delta-with-full update 'desc("merge")' --quiet
275 $ ($TESTDIR/seq.py 2000 2100; $TESTDIR/seq.py 500 510; $TESTDIR/seq.py 3000 3050) \
276 > | $PYTHON $TESTTMP/sha256line.py > peer-bad-delta-with-full/my-file.txt
277 $ hg -R peer-bad-delta-with-full commit -m 'trigger-full'
278 DBG-DELTAS: FILELOG:my-file.txt: rev=4: delta-base=4 * (glob)
279 DBG-DELTAS: MANIFESTLOG: * (glob)
280 DBG-DELTAS: CHANGELOG: * (glob)
281
282 Check that "try-base" behavior challenge the delta
283 --------------------------------------------------
284
285 The bundling process creates a delta against the previous revision, however this
286 is an invalid chain for the client, so it is not considered and we do a full
287 snapshot again.
288
289 $ cp -ar local-pre-pull-full local-try-base-full
290 $ hg -R local-try-base-full pull --quiet \
291 > --config 'paths.default:delta-reuse-policy=try-base'
292 DBG-DELTAS: CHANGELOG: * (glob)
293 DBG-DELTAS: CHANGELOG: * (glob)
294 DBG-DELTAS: MANIFESTLOG: * (glob)
295 DBG-DELTAS: MANIFESTLOG: * (glob)
296 DBG-DELTAS: FILELOG:my-file.txt: rev=3: delta-base=2 * (glob)
297 DBG-DELTAS: FILELOG:my-file.txt: rev=4: delta-base=4 * (glob)
298
299 Check that "forced" behavior do not challenge the delta, even if it is full.
300 ---------------------------------------------------------------------------
301
302 A full bundle should be accepted as full bundle without recomputation
303
304 $ cp -ar local-pre-pull-full local-forced-full
305 $ hg -R local-forced-full pull --quiet \
306 > --config 'paths.default:delta-reuse-policy=forced'
307 DBG-DELTAS: CHANGELOG: * (glob)
308 DBG-DELTAS: CHANGELOG: * (glob)
309 DBG-DELTAS: MANIFESTLOG: * (glob)
310 DBG-DELTAS: MANIFESTLOG: * (glob)
311 DBG-DELTAS: FILELOG:my-file.txt: rev=3: delta-base=2 * (glob)
312 DBG-DELTAS: FILELOG:my-file.txt: rev=4: delta-base=4 is-cached=1 - search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - * (glob)
313
314 Check that "forced" behavior do not challenge the delta, even if it is bad.
315 ---------------------------------------------------------------------------
316
317 The client does not challenge anything and applies the bizarre delta directly.
318
319 Note: If the bundling process becomes smarter, this test might no longer work
320 (as the server won't be sending "bad" deltas anymore) and might need something
321 more subtle to test this behavior.
322
323 $ hg bundle -R peer-bad-delta-with-full --all --config devel.bundle.delta=p1 all-p1.hg
324 5 changesets found
325 $ cp -ar local-pre-pull-full local-forced-full-p1
326 $ hg -R local-forced-full-p1 pull --quiet \
327 > --config 'paths.*:delta-reuse-policy=forced' all-p1.hg
328 DBG-DELTAS: CHANGELOG: * (glob)
329 DBG-DELTAS: CHANGELOG: * (glob)
330 DBG-DELTAS: MANIFESTLOG: * (glob)
331 DBG-DELTAS: MANIFESTLOG: * (glob)
332 DBG-DELTAS: FILELOG:my-file.txt: rev=3: delta-base=2 * (glob)
333 DBG-DELTAS: FILELOG:my-file.txt: rev=4: delta-base=3 * (glob)
@@ -1,5 +1,8
1 1 /assign_reviewer @mercurial.review
2 2
3
4 <!--
5
3 6 Welcome to the Mercurial Merge Request creation process:
4 7
5 8 * Set a simple title for your MR,
@@ -11,3 +14,5 More details here:
11 14
12 15 * https://www.mercurial-scm.org/wiki/ContributingChanges
13 16 * https://www.mercurial-scm.org/wiki/Heptapod
17
18 -->
@@ -138,6 +138,7 tests:
138 138 # Run Rust tests if cargo is installed
139 139 if command -v $(CARGO) >/dev/null 2>&1; then \
140 140 $(MAKE) rust-tests; \
141 $(MAKE) cargo-clippy; \
141 142 fi
142 143 cd tests && $(PYTHON) run-tests.py $(TESTFLAGS)
143 144
@@ -152,9 +153,13 testpy-%:
152 153 cd tests && $(HGPYTHONS)/$*/bin/python run-tests.py $(TESTFLAGS)
153 154
154 155 rust-tests:
155 cd $(HGROOT)/rust/hg-cpython \
156 cd $(HGROOT)/rust \
156 157 && $(CARGO) test --quiet --all --features "$(HG_RUST_FEATURES)"
157 158
159 cargo-clippy:
160 cd $(HGROOT)/rust \
161 && $(CARGO) clippy --all --features "$(HG_RUST_FEATURES)" -- -D warnings
162
158 163 check-code:
159 164 hg manifest | xargs python contrib/check-code.py
160 165
@@ -372,10 +372,6 commonpypats = [
372 372 ),
373 373 (r'[^^+=*/!<>&| %-](\s=|=\s)[^= ]', "wrong whitespace around ="),
374 374 (
375 r'\([^()]*( =[^=]|[^<>!=]= )',
376 "no whitespace around = for named parameters",
377 ),
378 (
379 375 r'raise [^,(]+, (\([^\)]+\)|[^,\(\)]+)$',
380 376 "don't use old-style two-argument raise, use Exception(message)",
381 377 ),
@@ -12,6 +12,36 cd `hg root`
12 12 # endeavor to empty this list out over time, as some of these are
13 13 # probably hiding real problems.
14 14 #
15 # hgext/absorb.py # [attribute-error]
16 # hgext/bugzilla.py # [pyi-error], [attribute-error]
17 # hgext/convert/bzr.py # [attribute-error]
18 # hgext/convert/cvs.py # [attribute-error], [wrong-arg-types]
19 # hgext/convert/cvsps.py # [attribute-error]
20 # hgext/convert/p4.py # [wrong-arg-types] (__file: mercurial.utils.procutil._pfile -> IO)
21 # hgext/convert/subversion.py # [attribute-error], [name-error], [pyi-error]
22 # hgext/fastannotate/context.py # no linelog.copyfrom()
23 # hgext/fastannotate/formatter.py # [unsupported-operands]
24 # hgext/fsmonitor/__init__.py # [name-error]
25 # hgext/git/__init__.py # [attribute-error]
26 # hgext/githelp.py # [attribute-error] [wrong-arg-types]
27 # hgext/hgk.py # [attribute-error]
28 # hgext/histedit.py # [attribute-error], [wrong-arg-types]
29 # hgext/infinitepush # using bytes for str literal; scheduled for removal
30 # hgext/keyword.py # [attribute-error]
31 # hgext/largefiles/storefactory.py # [attribute-error]
32 # hgext/lfs/__init__.py # [attribute-error]
33 # hgext/narrow/narrowbundle2.py # [attribute-error]
34 # hgext/narrow/narrowcommands.py # [attribute-error], [name-error]
35 # hgext/rebase.py # [attribute-error]
36 # hgext/remotefilelog/basepack.py # [attribute-error], [wrong-arg-count]
37 # hgext/remotefilelog/basestore.py # [attribute-error]
38 # hgext/remotefilelog/contentstore.py # [missing-parameter], [wrong-keyword-args], [attribute-error]
39 # hgext/remotefilelog/fileserverclient.py # [attribute-error]
40 # hgext/remotefilelog/shallowbundle.py # [attribute-error]
41 # hgext/remotefilelog/remotefilectx.py # [module-attr] (This is an actual bug)
42 # hgext/sqlitestore.py # [attribute-error]
43 # hgext/zeroconf/__init__.py # bytes vs str; tests fail on macOS
44 #
15 45 # mercurial/bundlerepo.py # no vfs and ui attrs on bundlerepo
16 46 # mercurial/context.py # many [attribute-error]
17 47 # mercurial/crecord.py # tons of [attribute-error], [module-attr]
@@ -31,7 +61,6 cd `hg root`
31 61 # mercurial/pure/parsers.py # [attribute-error]
32 62 # mercurial/repoview.py # [attribute-error]
33 63 # mercurial/testing/storage.py # tons of [attribute-error]
34 # mercurial/ui.py # [attribute-error], [wrong-arg-types]
35 64 # mercurial/unionrepo.py # ui, svfs, unfiltered [attribute-error]
36 65 # mercurial/win32.py # [not-callable]
37 66 # mercurial/wireprotoframing.py # [unsupported-operands], [attribute-error], [import-error]
@@ -43,7 +72,37 cd `hg root`
43 72
44 73 # TODO: include hgext and hgext3rd
45 74
46 pytype -V 3.7 --keep-going --jobs auto mercurial \
75 pytype -V 3.7 --keep-going --jobs auto \
76 doc/check-seclevel.py hgdemandimport hgext mercurial \
77 -x hgext/absorb.py \
78 -x hgext/bugzilla.py \
79 -x hgext/convert/bzr.py \
80 -x hgext/convert/cvs.py \
81 -x hgext/convert/cvsps.py \
82 -x hgext/convert/p4.py \
83 -x hgext/convert/subversion.py \
84 -x hgext/fastannotate/context.py \
85 -x hgext/fastannotate/formatter.py \
86 -x hgext/fsmonitor/__init__.py \
87 -x hgext/git/__init__.py \
88 -x hgext/githelp.py \
89 -x hgext/hgk.py \
90 -x hgext/histedit.py \
91 -x hgext/infinitepush \
92 -x hgext/keyword.py \
93 -x hgext/largefiles/storefactory.py \
94 -x hgext/lfs/__init__.py \
95 -x hgext/narrow/narrowbundle2.py \
96 -x hgext/narrow/narrowcommands.py \
97 -x hgext/rebase.py \
98 -x hgext/remotefilelog/basepack.py \
99 -x hgext/remotefilelog/basestore.py \
100 -x hgext/remotefilelog/contentstore.py \
101 -x hgext/remotefilelog/fileserverclient.py \
102 -x hgext/remotefilelog/remotefilectx.py \
103 -x hgext/remotefilelog/shallowbundle.py \
104 -x hgext/sqlitestore.py \
105 -x hgext/zeroconf/__init__.py \
47 106 -x mercurial/bundlerepo.py \
48 107 -x mercurial/context.py \
49 108 -x mercurial/crecord.py \
@@ -64,9 +123,11 pytype -V 3.7 --keep-going --jobs auto m
64 123 -x mercurial/repoview.py \
65 124 -x mercurial/testing/storage.py \
66 125 -x mercurial/thirdparty \
67 -x mercurial/ui.py \
68 126 -x mercurial/unionrepo.py \
69 127 -x mercurial/win32.py \
70 128 -x mercurial/wireprotoframing.py \
71 129 -x mercurial/wireprotov1peer.py \
72 130 -x mercurial/wireprotov1server.py
131
132 echo 'pytype crashed while generating the following type stubs:'
133 find .pytype/pyi -name '*.pyi' | xargs grep -l '# Caught error' | sort
@@ -20,7 +20,7 for inline in (True, False):
20 20 index, cache = parsers.parse_index2(data, inline)
21 21 index.slicechunktodensity(list(range(len(index))), 0.5, 262144)
22 22 index.stats()
23 index.findsnapshots({}, 0)
23 index.findsnapshots({}, 0, len(index) - 1)
24 24 10 in index
25 25 for rev in range(len(index)):
26 26 index.reachableroots(0, [len(index)-1], [rev])
@@ -42,6 +42,7 rust-cargo-test:
42 42 script:
43 43 - echo "python used, $PYTHON"
44 44 - make rust-tests
45 - make cargo-clippy
45 46 variables:
46 47 PYTHON: python3
47 48 CI_CLEVER_CLOUD_FLAVOR: S
@@ -91,7 +92,8 check-pytype:
91 92 - hg -R /tmp/mercurial-ci/ update `hg log --rev '.' --template '{node}'`
92 93 - cd /tmp/mercurial-ci/
93 94 - make local PYTHON=$PYTHON
94 - $PYTHON -m pip install --user -U libcst==0.3.20 pytype==2022.03.29
95 - $PYTHON -m pip install --user -U libcst==0.3.20 pytype==2022.11.18
96 - ./contrib/setup-pytype.sh
95 97 script:
96 98 - echo "Entering script section"
97 99 - sh contrib/check-pytype.sh
@@ -235,6 +235,7 revlogopts = getattr(
235 235
236 236 cmdtable = {}
237 237
238
238 239 # for "historical portability":
239 240 # define parsealiases locally, because cmdutil.parsealiases has been
240 241 # available since 1.5 (or 6252852b4332)
@@ -573,7 +574,6 def _timer(
573 574
574 575
575 576 def formatone(fm, timings, title=None, result=None, displayall=False):
576
577 577 count = len(timings)
578 578
579 579 fm.startitem()
@@ -815,7 +815,12 def perfstatus(ui, repo, **opts):
815 815 )
816 816 sum(map(bool, s))
817 817
818 timer(status_dirstate)
818 if util.safehasattr(dirstate, 'running_status'):
819 with dirstate.running_status(repo):
820 timer(status_dirstate)
821 dirstate.invalidate()
822 else:
823 timer(status_dirstate)
819 824 else:
820 825 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
821 826 fm.end()
@@ -997,11 +1002,16 def perfdiscovery(ui, repo, path, **opts
997 1002 timer, fm = gettimer(ui, opts)
998 1003
999 1004 try:
1000 from mercurial.utils.urlutil import get_unique_pull_path
1001
1002 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1005 from mercurial.utils.urlutil import get_unique_pull_path_obj
1006
1007 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1003 1008 except ImportError:
1004 path = ui.expandpath(path)
1009 try:
1010 from mercurial.utils.urlutil import get_unique_pull_path
1011
1012 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1013 except ImportError:
1014 path = ui.expandpath(path)
1005 1015
1006 1016 def s():
1007 1017 repos[1] = hg.peer(ui, opts, path)
@@ -1469,7 +1479,8 def perfdirstatewrite(ui, repo, **opts):
1469 1479 def d():
1470 1480 ds.write(repo.currenttransaction())
1471 1481
1472 timer(d, setup=setup)
1482 with repo.wlock():
1483 timer(d, setup=setup)
1473 1484 fm.end()
1474 1485
1475 1486
@@ -1613,7 +1624,11 def perfphasesremote(ui, repo, dest=None
1613 1624 b'default repository not configured!',
1614 1625 hint=b"see 'hg help config.paths'",
1615 1626 )
1616 dest = path.pushloc or path.loc
1627 if util.safehasattr(path, 'main_path'):
1628 path = path.get_push_variant()
1629 dest = path.loc
1630 else:
1631 dest = path.pushloc or path.loc
1617 1632 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1618 1633 other = hg.peer(repo, opts, dest)
1619 1634
@@ -7,14 +7,12
7 7
8 8
9 9 import abc
10 import builtins
10 11 import re
11 import sys
12 12
13 13 ####################
14 14 # for Python3 compatibility (almost comes from mercurial/pycompat.py)
15 15
16 ispy3 = sys.version_info[0] >= 3
17
18 16
19 17 def identity(a):
20 18 return a
@@ -38,27 +36,19 def rapply(f, xs):
38 36 return _rapply(f, xs)
39 37
40 38
41 if ispy3:
42 import builtins
43
44 def bytestr(s):
45 # tiny version of pycompat.bytestr
46 return s.encode('latin1')
47
48 def sysstr(s):
49 if isinstance(s, builtins.str):
50 return s
51 return s.decode('latin-1')
52
53 def opentext(f):
54 return open(f, 'r')
39 def bytestr(s):
40 # tiny version of pycompat.bytestr
41 return s.encode('latin1')
55 42
56 43
57 else:
58 bytestr = str
59 sysstr = identity
44 def sysstr(s):
45 if isinstance(s, builtins.str):
46 return s
47 return s.decode('latin-1')
60 48
61 opentext = open
49
50 def opentext(f):
51 return open(f, 'r')
62 52
63 53
64 54 def b2s(x):
@@ -46,7 +46,7 def showavailables(ui, initlevel):
46 46
47 47
48 48 def checkseclevel(ui, doc, name, initlevel):
49 ui.notenoi18n('checking "%s"\n' % name)
49 ui.notenoi18n(('checking "%s"\n' % name).encode('utf-8'))
50 50 if not isinstance(doc, bytes):
51 51 doc = doc.encode('utf-8')
52 52 blocks, pruned = minirst.parse(doc, 0, ['verbose'])
@@ -70,14 +70,18 def checkseclevel(ui, doc, name, initlev
70 70 nextlevel = mark2level[mark]
71 71 if curlevel < nextlevel and curlevel + 1 != nextlevel:
72 72 ui.warnnoi18n(
73 'gap of section level at "%s" of %s\n' % (title, name)
73 ('gap of section level at "%s" of %s\n' % (title, name)).encode(
74 'utf-8'
75 )
74 76 )
75 77 showavailables(ui, initlevel)
76 78 errorcnt += 1
77 79 continue
78 80 ui.notenoi18n(
79 'appropriate section level for "%s %s"\n'
80 % (mark * (nextlevel * 2), title)
81 (
82 'appropriate section level for "%s %s"\n'
83 % (mark * (nextlevel * 2), title)
84 ).encode('utf-8')
81 85 )
82 86 curlevel = nextlevel
83 87
@@ -90,7 +94,9 def checkcmdtable(ui, cmdtable, namefmt,
90 94 name = k.split(b"|")[0].lstrip(b"^")
91 95 if not entry[0].__doc__:
92 96 ui.notenoi18n(
93 'skip checking %s: no help document\n' % (namefmt % name)
97 (
98 'skip checking %s: no help document\n' % (namefmt % name)
99 ).encode('utf-8')
94 100 )
95 101 continue
96 102 errorcnt += checkseclevel(
@@ -117,7 +123,9 def checkhghelps(ui):
117 123 mod = extensions.load(ui, name, None)
118 124 if not mod.__doc__:
119 125 ui.notenoi18n(
120 'skip checking %s extension: no help document\n' % name
126 (
127 'skip checking %s extension: no help document\n' % name
128 ).encode('utf-8')
121 129 )
122 130 continue
123 131 errorcnt += checkseclevel(
@@ -144,7 +152,9 def checkfile(ui, filename, initlevel):
144 152 doc = fp.read()
145 153
146 154 ui.notenoi18n(
147 'checking input from %s with initlevel %d\n' % (filename, initlevel)
155 (
156 'checking input from %s with initlevel %d\n' % (filename, initlevel)
157 ).encode('utf-8')
148 158 )
149 159 return checkseclevel(ui, doc, 'input from %s' % filename, initlevel)
150 160
@@ -23,8 +23,6 This also has some limitations compared
23 23 enabled.
24 24 """
25 25
26 # This line is unnecessary, but it satisfies test-check-py3-compat.t.
27
28 26 import contextlib
29 27 import importlib.util
30 28 import sys
@@ -39,10 +37,16 class _lazyloaderex(importlib.util.LazyL
39 37 the ignore list.
40 38 """
41 39
40 _HAS_DYNAMIC_ATTRIBUTES = True # help pytype not flag self.loader
41
42 42 def exec_module(self, module):
43 43 """Make the module load lazily."""
44 44 with tracing.log('demandimport %s', module):
45 45 if _deactivated or module.__name__ in ignores:
46 # Reset the loader on the module as super() does (issue6725)
47 module.__spec__.loader = self.loader
48 module.__loader__ = self.loader
49
46 50 self.loader.exec_module(module)
47 51 else:
48 52 super().exec_module(module)
@@ -881,7 +881,7 class fixupstate:
881 881
882 882 dirstate._fsmonitorstate.invalidate = noop
883 883 try:
884 with dirstate.parentchange():
884 with dirstate.changing_parents(self.repo):
885 885 dirstate.rebuild(ctx.node(), ctx.manifest(), self.paths)
886 886 finally:
887 887 restore()
@@ -46,6 +46,7 command = registrar.command(cmdtable)
46 46 _(b'mark a branch as closed, hiding it from the branch list'),
47 47 ),
48 48 (b's', b'secret', None, _(b'use the secret phase for committing')),
49 (b'', b'draft', None, _(b'use the draft phase for committing')),
49 50 (b'n', b'note', b'', _(b'store a note on the amend')),
50 51 ]
51 52 + cmdutil.walkopts
@@ -64,6 +65,7 def amend(ui, repo, *pats, **opts):
64 65
65 66 See :hg:`help commit` for more details.
66 67 """
68 cmdutil.check_at_most_one_arg(opts, 'draft', 'secret')
67 69 cmdutil.check_note_size(opts)
68 70
69 71 with repo.wlock(), repo.lock():
@@ -59,21 +59,29 def mvcheck(orig, ui, repo, *pats, **opt
59 59 opts = pycompat.byteskwargs(opts)
60 60 renames = None
61 61 disabled = opts.pop(b'no_automv', False)
62 if not disabled:
63 threshold = ui.configint(b'automv', b'similarity')
64 if not 0 <= threshold <= 100:
65 raise error.Abort(_(b'automv.similarity must be between 0 and 100'))
66 if threshold > 0:
67 match = scmutil.match(repo[None], pats, opts)
68 added, removed = _interestingfiles(repo, match)
69 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
70 renames = _findrenames(
71 repo, uipathfn, added, removed, threshold / 100.0
72 )
62 with repo.wlock():
63 if not disabled:
64 threshold = ui.configint(b'automv', b'similarity')
65 if not 0 <= threshold <= 100:
66 raise error.Abort(
67 _(b'automv.similarity must be between 0 and 100')
68 )
69 if threshold > 0:
70 match = scmutil.match(repo[None], pats, opts)
71 added, removed = _interestingfiles(repo, match)
72 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
73 renames = _findrenames(
74 repo, uipathfn, added, removed, threshold / 100.0
75 )
73 76
74 with repo.wlock():
75 77 if renames is not None:
76 scmutil._markchanges(repo, (), (), renames)
78 with repo.dirstate.changing_files(repo):
79 # XXX this should be wider and integrated with the commit
80 # transaction. At the same time as we do the `addremove` logic
81 # for commit. However we can't really do better with the
82 # current extension structure, and this is not worse than what
83 # happened before.
84 scmutil._markchanges(repo, (), (), renames)
77 85 return orig(ui, repo, *pats, **pycompat.strkwargs(opts))
78 86
79 87
@@ -217,6 +217,8 def blackbox(ui, repo, *revs, **opts):
217 217 return
218 218
219 219 limit = opts.get('limit')
220 assert limit is not None # help pytype
221
220 222 fp = repo.vfs(b'blackbox.log', b'r')
221 223 lines = fp.read().split(b'\n')
222 224
@@ -31,11 +31,14 demandimport.IGNORES.update(
31 31
32 32 try:
33 33 # bazaar imports
34 # pytype: disable=import-error
34 35 import breezy.bzr.bzrdir
35 36 import breezy.errors
36 37 import breezy.revision
37 38 import breezy.revisionspec
38 39
40 # pytype: enable=import-error
41
39 42 bzrdir = breezy.bzr.bzrdir
40 43 errors = breezy.errors
41 44 revision = breezy.revision
@@ -608,7 +608,10 class mercurial_source(common.converter_
608 608 files = copyfiles = ctx.manifest()
609 609 if parents:
610 610 if self._changescache[0] == rev:
611 ma, r = self._changescache[1]
611 # TODO: add type hints to avoid this warning, instead of
612 # suppressing it:
613 # No attribute '__iter__' on None [attribute-error]
614 ma, r = self._changescache[1] # pytype: disable=attribute-error
612 615 else:
613 616 ma, r = self._changedfiles(parents[0], ctx)
614 617 if not full:
@@ -243,6 +243,7 class monotone_source(common.converter_s
243 243 m = self.cert_re.match(e)
244 244 if m:
245 245 name, value = m.groups()
246 assert value is not None # help pytype
246 247 value = value.replace(br'\"', b'"')
247 248 value = value.replace(br'\\', b'\\')
248 249 certs[name] = value
@@ -47,11 +47,14 NoRepo = common.NoRepo
47 47 # these bindings.
48 48
49 49 try:
50 # pytype: disable=import-error
50 51 import svn
51 52 import svn.client
52 53 import svn.core
53 54 import svn.ra
54 55 import svn.delta
56
57 # pytype: enable=import-error
55 58 from . import transport
56 59 import warnings
57 60
@@ -722,7 +725,13 class svn_source(converter_source):
722 725 def getchanges(self, rev, full):
723 726 # reuse cache from getchangedfiles
724 727 if self._changescache[0] == rev and not full:
728 # TODO: add type hints to avoid this warning, instead of
729 # suppressing it:
730 # No attribute '__iter__' on None [attribute-error]
731
732 # pytype: disable=attribute-error
725 733 (files, copies) = self._changescache[1]
734 # pytype: enable=attribute-error
726 735 else:
727 736 (files, copies) = self._getchanges(rev, full)
728 737 # caller caches the result, so free it here to release memory
@@ -17,10 +17,13
17 17 # You should have received a copy of the GNU General Public License
18 18 # along with this program; if not, see <http://www.gnu.org/licenses/>.
19 19
20 # pytype: disable=import-error
20 21 import svn.client
21 22 import svn.core
22 23 import svn.ra
23 24
25 # pytype: enable=import-error
26
24 27 Pool = svn.core.Pool
25 28 SubversionException = svn.core.SubversionException
26 29
@@ -37,7 +40,7 svn_config = None
37 40
38 41 def _create_auth_baton(pool):
39 42 """Create a Subversion authentication baton."""
40 import svn.client
43 import svn.client # pytype: disable=import-error
41 44
42 45 # Give the client context baton a suite of authentication
43 46 # providers.h
@@ -421,30 +421,31 def reposetup(ui, repo):
421 421 wlock = None
422 422 try:
423 423 wlock = self.wlock()
424 for f in self.dirstate:
425 if not self.dirstate.get_entry(f).maybe_clean:
426 continue
427 if oldeol is not None:
428 if not oldeol.match(f) and not neweol.match(f):
424 with self.dirstate.changing_files(self):
425 for f in self.dirstate:
426 if not self.dirstate.get_entry(f).maybe_clean:
429 427 continue
430 oldkey = None
431 for pattern, key, m in oldeol.patterns:
432 if m(f):
433 oldkey = key
434 break
435 newkey = None
436 for pattern, key, m in neweol.patterns:
437 if m(f):
438 newkey = key
439 break
440 if oldkey == newkey:
441 continue
442 # all normal files need to be looked at again since
443 # the new .hgeol file specify a different filter
444 self.dirstate.set_possibly_dirty(f)
445 # Write the cache to update mtime and cache .hgeol
446 with self.vfs(b"eol.cache", b"w") as f:
447 f.write(hgeoldata)
428 if oldeol is not None:
429 if not oldeol.match(f) and not neweol.match(f):
430 continue
431 oldkey = None
432 for pattern, key, m in oldeol.patterns:
433 if m(f):
434 oldkey = key
435 break
436 newkey = None
437 for pattern, key, m in neweol.patterns:
438 if m(f):
439 newkey = key
440 break
441 if oldkey == newkey:
442 continue
443 # all normal files need to be looked at again since
444 # the new .hgeol file specify a different filter
445 self.dirstate.set_possibly_dirty(f)
446 # Write the cache to update mtime and cache .hgeol
447 with self.vfs(b"eol.cache", b"w") as f:
448 f.write(hgeoldata)
448 449 except errormod.LockUnavailable:
449 450 # If we cannot lock the repository and clear the
450 451 # dirstate, then a commit might not see all files
@@ -151,8 +151,11 def annotatepeer(repo):
151 151 ui = repo.ui
152 152
153 153 remotedest = ui.config(b'fastannotate', b'remotepath', b'default')
154 r = urlutil.get_unique_pull_path(b'fastannotate', repo, ui, remotedest)
155 remotepath = r[0]
154 remotepath = urlutil.get_unique_pull_path_obj(
155 b'fastannotate',
156 ui,
157 remotedest,
158 )
156 159 peer = hg.peer(ui, {}, remotepath)
157 160
158 161 try:
@@ -108,9 +108,9 def fetch(ui, repo, source=b'default', *
108 108 )
109 109 )
110 110
111 path = urlutil.get_unique_pull_path(b'fetch', repo, ui, source)[0]
111 path = urlutil.get_unique_pull_path_obj(b'fetch', ui, source)
112 112 other = hg.peer(repo, opts, path)
113 ui.status(_(b'pulling from %s\n') % urlutil.hidepassword(path))
113 ui.status(_(b'pulling from %s\n') % urlutil.hidepassword(path.loc))
114 114 revs = None
115 115 if opts[b'rev']:
116 116 try:
@@ -779,7 +779,7 def writeworkingdir(repo, ctx, filedata,
779 779 newp1 = replacements.get(oldp1, oldp1)
780 780 if newp1 != oldp1:
781 781 assert repo.dirstate.p2() == nullid
782 with repo.dirstate.parentchange():
782 with repo.dirstate.changing_parents(repo):
783 783 scmutil.movedirstate(repo, repo[newp1])
784 784
785 785
@@ -26,8 +26,6
26 26 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 27 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 28
29 # no unicode literals
30
31 29 import inspect
32 30 import math
33 31 import os
@@ -94,7 +92,9 if os.name == "nt":
94 92
95 93 LPDWORD = ctypes.POINTER(wintypes.DWORD)
96 94
97 CreateFile = ctypes.windll.kernel32.CreateFileA
95 _kernel32 = ctypes.windll.kernel32 # pytype: disable=module-attr
96
97 CreateFile = _kernel32.CreateFileA
98 98 CreateFile.argtypes = [
99 99 wintypes.LPSTR,
100 100 wintypes.DWORD,
@@ -106,11 +106,11 if os.name == "nt":
106 106 ]
107 107 CreateFile.restype = wintypes.HANDLE
108 108
109 CloseHandle = ctypes.windll.kernel32.CloseHandle
109 CloseHandle = _kernel32.CloseHandle
110 110 CloseHandle.argtypes = [wintypes.HANDLE]
111 111 CloseHandle.restype = wintypes.BOOL
112 112
113 ReadFile = ctypes.windll.kernel32.ReadFile
113 ReadFile = _kernel32.ReadFile
114 114 ReadFile.argtypes = [
115 115 wintypes.HANDLE,
116 116 wintypes.LPVOID,
@@ -120,7 +120,7 if os.name == "nt":
120 120 ]
121 121 ReadFile.restype = wintypes.BOOL
122 122
123 WriteFile = ctypes.windll.kernel32.WriteFile
123 WriteFile = _kernel32.WriteFile
124 124 WriteFile.argtypes = [
125 125 wintypes.HANDLE,
126 126 wintypes.LPVOID,
@@ -130,15 +130,15 if os.name == "nt":
130 130 ]
131 131 WriteFile.restype = wintypes.BOOL
132 132
133 GetLastError = ctypes.windll.kernel32.GetLastError
133 GetLastError = _kernel32.GetLastError
134 134 GetLastError.argtypes = []
135 135 GetLastError.restype = wintypes.DWORD
136 136
137 SetLastError = ctypes.windll.kernel32.SetLastError
137 SetLastError = _kernel32.SetLastError
138 138 SetLastError.argtypes = [wintypes.DWORD]
139 139 SetLastError.restype = None
140 140
141 FormatMessage = ctypes.windll.kernel32.FormatMessageA
141 FormatMessage = _kernel32.FormatMessageA
142 142 FormatMessage.argtypes = [
143 143 wintypes.DWORD,
144 144 wintypes.LPVOID,
@@ -150,9 +150,9 if os.name == "nt":
150 150 ]
151 151 FormatMessage.restype = wintypes.DWORD
152 152
153 LocalFree = ctypes.windll.kernel32.LocalFree
153 LocalFree = _kernel32.LocalFree
154 154
155 GetOverlappedResult = ctypes.windll.kernel32.GetOverlappedResult
155 GetOverlappedResult = _kernel32.GetOverlappedResult
156 156 GetOverlappedResult.argtypes = [
157 157 wintypes.HANDLE,
158 158 ctypes.POINTER(OVERLAPPED),
@@ -161,9 +161,7 if os.name == "nt":
161 161 ]
162 162 GetOverlappedResult.restype = wintypes.BOOL
163 163
164 GetOverlappedResultEx = getattr(
165 ctypes.windll.kernel32, "GetOverlappedResultEx", None
166 )
164 GetOverlappedResultEx = getattr(_kernel32, "GetOverlappedResultEx", None)
167 165 if GetOverlappedResultEx is not None:
168 166 GetOverlappedResultEx.argtypes = [
169 167 wintypes.HANDLE,
@@ -174,7 +172,7 if os.name == "nt":
174 172 ]
175 173 GetOverlappedResultEx.restype = wintypes.BOOL
176 174
177 WaitForSingleObjectEx = ctypes.windll.kernel32.WaitForSingleObjectEx
175 WaitForSingleObjectEx = _kernel32.WaitForSingleObjectEx
178 176 WaitForSingleObjectEx.argtypes = [
179 177 wintypes.HANDLE,
180 178 wintypes.DWORD,
@@ -182,7 +180,7 if os.name == "nt":
182 180 ]
183 181 WaitForSingleObjectEx.restype = wintypes.DWORD
184 182
185 CreateEvent = ctypes.windll.kernel32.CreateEventA
183 CreateEvent = _kernel32.CreateEventA
186 184 CreateEvent.argtypes = [
187 185 LPDWORD,
188 186 wintypes.BOOL,
@@ -192,7 +190,7 if os.name == "nt":
192 190 CreateEvent.restype = wintypes.HANDLE
193 191
194 192 # Windows Vista is the minimum supported client for CancelIoEx.
195 CancelIoEx = ctypes.windll.kernel32.CancelIoEx
193 CancelIoEx = _kernel32.CancelIoEx
196 194 CancelIoEx.argtypes = [wintypes.HANDLE, ctypes.POINTER(OVERLAPPED)]
197 195 CancelIoEx.restype = wintypes.BOOL
198 196
@@ -691,9 +689,9 class CLIProcessTransport(Transport):
691 689 if self.closed:
692 690 self.close()
693 691 self.closed = False
694 self._connect()
695 res = self.proc.stdin.write(data)
696 self.proc.stdin.close()
692 proc = self._connect()
693 res = proc.stdin.write(data)
694 proc.stdin.close()
697 695 self.closed = True
698 696 return res
699 697
@@ -988,8 +986,12 class client:
988 986 # if invoked via an application with graphical user interface,
989 987 # this call will cause a brief command window pop-up.
990 988 # Using the flag STARTF_USESHOWWINDOW to avoid this behavior.
989
990 # pytype: disable=module-attr
991 991 startupinfo = subprocess.STARTUPINFO()
992 992 startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
993 # pytype: enable=module-attr
994
993 995 args["startupinfo"] = startupinfo
994 996
995 997 p = subprocess.Popen(cmd, **args)
@@ -1026,7 +1028,11 class client:
1026 1028 if self.transport == CLIProcessTransport:
1027 1029 kwargs["binpath"] = self.binpath
1028 1030
1031 # Only CLIProcessTransport has the binpath kwarg
1032 # pytype: disable=wrong-keyword-args
1029 1033 self.tport = self.transport(self.sockpath, self.timeout, **kwargs)
1034 # pytype: enable=wrong-keyword-args
1035
1030 1036 self.sendConn = self.sendCodec(self.tport)
1031 1037 self.recvConn = self.recvCodec(self.tport)
1032 1038 self.pid = os.getpid()
@@ -26,8 +26,6
26 26 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 27 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 28
29 # no unicode literals
30
31 29
32 30 def parse_version(vstr):
33 31 res = 0
@@ -26,45 +26,28
26 26 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 27 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 28
29 # no unicode literals
30
31 29 import sys
32 30
33 31
34 32 """Compatibility module across Python 2 and 3."""
35 33
36 34
37 PYTHON2 = sys.version_info < (3, 0)
38 35 PYTHON3 = sys.version_info >= (3, 0)
39 36
40 37 # This is adapted from https://bitbucket.org/gutworth/six, and used under the
41 38 # MIT license. See LICENSE for a full copyright notice.
42 if PYTHON3:
43
44 def reraise(tp, value, tb=None):
45 try:
46 if value is None:
47 value = tp()
48 if value.__traceback__ is not tb:
49 raise value.with_traceback(tb)
50 raise value
51 finally:
52 value = None
53 tb = None
54 39
55 40
56 else:
57 exec(
58 """
59 41 def reraise(tp, value, tb=None):
60 42 try:
61 raise tp, value, tb
43 if value is None:
44 value = tp()
45 if value.__traceback__ is not tb:
46 raise value.with_traceback(tb)
47 raise value
62 48 finally:
49 value = None
63 50 tb = None
64 """.strip()
65 )
51
66 52
67 if PYTHON3:
68 UNICODE = str
69 else:
70 UNICODE = unicode # noqa: F821 We handled versioning above
53 UNICODE = str
@@ -26,8 +26,6
26 26 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 27 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 28
29 # no unicode literals
30
31 29 import sys
32 30
33 31 from . import compat
@@ -26,8 +26,6
26 26 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 27 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 28
29 # no unicode literals
30
31 29 import ctypes
32 30
33 31
@@ -26,8 +26,6
26 26 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 27 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 28
29 # no unicode literals
30
31 29 import binascii
32 30 import collections
33 31 import ctypes
@@ -53,17 +51,15 BSER_TEMPLATE = b"\x0b"
53 51 BSER_SKIP = b"\x0c"
54 52 BSER_UTF8STRING = b"\x0d"
55 53
56 if compat.PYTHON3:
57 STRING_TYPES = (str, bytes)
58 unicode = str
54 STRING_TYPES = (str, bytes)
55 unicode = str
56
59 57
60 def tobytes(i):
61 return str(i).encode("ascii")
58 def tobytes(i):
59 return str(i).encode("ascii")
62 60
63 long = int
64 else:
65 STRING_TYPES = (unicode, str)
66 tobytes = bytes
61
62 long = int
67 63
68 64 # Leave room for the serialization header, which includes
69 65 # our overall length. To make things simpler, we'll use an
@@ -89,7 +85,7 def _int_size(x):
89 85 def _buf_pos(buf, pos):
90 86 ret = buf[pos]
91 87 # Normalize the return type to bytes
92 if compat.PYTHON3 and not isinstance(ret, bytes):
88 if not isinstance(ret, bytes):
93 89 ret = bytes((ret,))
94 90 return ret
95 91
@@ -252,10 +248,7 class _bser_buffer:
252 248 else:
253 249 raise RuntimeError("Cannot represent this mapping value")
254 250 self.wpos += needed
255 if compat.PYTHON3:
256 iteritems = val.items()
257 else:
258 iteritems = val.iteritems() # noqa: B301 Checked version above
251 iteritems = val.items()
259 252 for k, v in iteritems:
260 253 self.append_string(k)
261 254 self.append_recursive(v)
@@ -260,7 +260,12 class gitdirstate:
260 260 # # TODO what the heck is this
261 261 _filecache = set()
262 262
263 def pendingparentchange(self):
263 def is_changing_parents(self):
264 # TODO: we need to implement the context manager bits and
265 # correctly stage/revert index edits.
266 return False
267
268 def is_changing_any(self):
264 269 # TODO: we need to implement the context manager bits and
265 270 # correctly stage/revert index edits.
266 271 return False
@@ -322,14 +327,6 class gitdirstate:
322 327 r[path] = s
323 328 return r
324 329
325 def savebackup(self, tr, backupname):
326 # TODO: figure out a strategy for saving index backups.
327 pass
328
329 def restorebackup(self, tr, backupname):
330 # TODO: figure out a strategy for saving index backups.
331 pass
332
333 330 def set_tracked(self, f, reset_copy=False):
334 331 # TODO: support copies and reset_copy=True
335 332 uf = pycompat.fsdecode(f)
@@ -384,7 +381,7 class gitdirstate:
384 381 pass
385 382
386 383 @contextlib.contextmanager
387 def parentchange(self):
384 def changing_parents(self, repo):
388 385 # TODO: track this maybe?
389 386 yield
390 387
@@ -392,10 +389,6 class gitdirstate:
392 389 # TODO: should this be added to the dirstate interface?
393 390 self._plchangecallbacks[category] = callback
394 391
395 def clearbackup(self, tr, backupname):
396 # TODO
397 pass
398
399 392 def setbranch(self, branch):
400 393 raise error.Abort(
401 394 b'git repos do not support branches. try using bookmarks'
@@ -9,7 +9,7 def get_pygit2():
9 9 global pygit2_module
10 10 if pygit2_module is None:
11 11 try:
12 import pygit2 as pygit2_module
12 import pygit2 as pygit2_module # pytype: disable=import-error
13 13
14 14 pygit2_module.InvalidSpecError
15 15 except (ImportError, AttributeError):
@@ -352,7 +352,8 def _dosign(ui, repo, *revs, **opts):
352 352 sigsfile.close()
353 353
354 354 if b'.hgsigs' not in repo.dirstate:
355 repo[None].add([b".hgsigs"])
355 with repo.dirstate.changing_files(repo):
356 repo[None].add([b".hgsigs"])
356 357
357 358 if opts[b"no_commit"]:
358 359 return
@@ -1051,12 +1051,11 def findoutgoing(ui, repo, remote=None,
1051 1051 if opts is None:
1052 1052 opts = {}
1053 1053 path = urlutil.get_unique_push_path(b'histedit', repo, ui, remote)
1054 dest = path.pushloc or path.loc
1055
1056 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1054
1055 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(path.loc))
1057 1056
1058 1057 revs, checkout = hg.addbranchrevs(repo, repo, (path.branch, []), None)
1059 other = hg.peer(repo, opts, dest)
1058 other = hg.peer(repo, opts, path)
1060 1059
1061 1060 if revs:
1062 1061 revs = [repo.lookup(rev) for rev in revs]
@@ -32,7 +32,10 from mercurial import (
32 32 pycompat,
33 33 registrar,
34 34 )
35 from mercurial.utils import dateutil
35 from mercurial.utils import (
36 dateutil,
37 stringutil,
38 )
36 39 from .. import notify
37 40
38 41 configtable = {}
@@ -98,7 +101,7 def _report_commit(ui, repo, ctx):
98 101 try:
99 102 msg = mail.parsebytes(data)
100 103 except emailerrors.MessageParseError as inst:
101 raise error.Abort(inst)
104 raise error.Abort(stringutil.forcebytestr(inst))
102 105
103 106 msg['In-reply-to'] = notify.messageid(ctx, domain, messageidseed)
104 107 msg['Message-Id'] = notify.messageid(
@@ -31,7 +31,10 from mercurial import (
31 31 pycompat,
32 32 registrar,
33 33 )
34 from mercurial.utils import dateutil
34 from mercurial.utils import (
35 dateutil,
36 stringutil,
37 )
35 38 from .. import notify
36 39
37 40 configtable = {}
@@ -97,7 +100,7 def _report_commit(ui, repo, ctx):
97 100 try:
98 101 msg = mail.parsebytes(data)
99 102 except emailerrors.MessageParseError as inst:
100 raise error.Abort(inst)
103 raise error.Abort(stringutil.forcebytestr(inst))
101 104
102 105 msg['In-reply-to'] = notify.messageid(ctx, domain, messageidseed)
103 106 msg['Message-Id'] = notify.messageid(
@@ -683,12 +683,10 def _lookupwrap(orig):
683 683 def _pull(orig, ui, repo, source=b"default", **opts):
684 684 opts = pycompat.byteskwargs(opts)
685 685 # Copy paste from `pull` command
686 source, branches = urlutil.get_unique_pull_path(
686 path = urlutil.get_unique_pull_path_obj(
687 687 b"infinite-push's pull",
688 repo,
689 688 ui,
690 689 source,
691 default_branches=opts.get(b'branch'),
692 690 )
693 691
694 692 scratchbookmarks = {}
@@ -709,7 +707,7 def _pull(orig, ui, repo, source=b"defau
709 707 bookmarks.append(bookmark)
710 708
711 709 if scratchbookmarks:
712 other = hg.peer(repo, opts, source)
710 other = hg.peer(repo, opts, path)
713 711 try:
714 712 fetchedbookmarks = other.listkeyspatterns(
715 713 b'bookmarks', patterns=scratchbookmarks
@@ -734,14 +732,14 def _pull(orig, ui, repo, source=b"defau
734 732 try:
735 733 # Remote scratch bookmarks will be deleted because remotenames doesn't
736 734 # know about them. Let's save it before pull and restore after
737 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, source)
738 result = orig(ui, repo, source, **pycompat.strkwargs(opts))
735 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, path.loc)
736 result = orig(ui, repo, path.loc, **pycompat.strkwargs(opts))
739 737 # TODO(stash): race condition is possible
740 738 # if scratch bookmarks was updated right after orig.
741 739 # But that's unlikely and shouldn't be harmful.
742 740 if common.isremotebooksenabled(ui):
743 741 remotescratchbookmarks.update(scratchbookmarks)
744 _saveremotebookmarks(repo, remotescratchbookmarks, source)
742 _saveremotebookmarks(repo, remotescratchbookmarks, path.loc)
745 743 else:
746 744 _savelocalbookmarks(repo, scratchbookmarks)
747 745 return result
@@ -849,14 +847,14 def _push(orig, ui, repo, *dests, **opts
849 847 raise error.Abort(msg)
850 848
851 849 path = paths[0]
852 destpath = path.pushloc or path.loc
850 destpath = path.loc
853 851 # Remote scratch bookmarks will be deleted because remotenames doesn't
854 852 # know about them. Let's save it before push and restore after
855 853 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, destpath)
856 854 result = orig(ui, repo, *dests, **pycompat.strkwargs(opts))
857 855 if common.isremotebooksenabled(ui):
858 856 if bookmark and scratchpush:
859 other = hg.peer(repo, opts, destpath)
857 other = hg.peer(repo, opts, path)
860 858 try:
861 859 fetchedbookmarks = other.listkeyspatterns(
862 860 b'bookmarks', patterns=[bookmark]
@@ -567,8 +567,12 def journal(ui, repo, *args, **opts):
567 567 )
568 568 fm.write(b'newnodes', b'%s', formatnodes(entry.newhashes))
569 569 fm.condwrite(ui.verbose, b'user', b' %-8s', entry.user)
570
571 # ``name`` is bytes, or None only if 'all' was an option.
570 572 fm.condwrite(
573 # pytype: disable=attribute-error
571 574 opts.get(b'all') or name.startswith(b're:'),
575 # pytype: enable=attribute-error
572 576 b'name',
573 577 b' %-8s',
574 578 entry.name,
@@ -437,7 +437,7 def _kwfwrite(ui, repo, expand, *pats, *
437 437 if len(wctx.parents()) > 1:
438 438 raise error.Abort(_(b'outstanding uncommitted merge'))
439 439 kwt = getattr(repo, '_keywordkwt', None)
440 with repo.wlock():
440 with repo.wlock(), repo.dirstate.changing_files(repo):
441 441 status = _status(ui, repo, wctx, kwt, *pats, **opts)
442 442 if status.modified or status.added or status.removed or status.deleted:
443 443 raise error.Abort(_(b'outstanding uncommitted changes'))
@@ -530,17 +530,18 def demo(ui, repo, *args, **opts):
530 530 demoitems(b'keywordmaps', kwmaps.items())
531 531 keywords = b'$' + b'$\n$'.join(sorted(kwmaps.keys())) + b'$\n'
532 532 repo.wvfs.write(fn, keywords)
533 repo[None].add([fn])
534 ui.note(_(b'\nkeywords written to %s:\n') % fn)
535 ui.note(keywords)
536 533 with repo.wlock():
534 with repo.dirstate.changing_files(repo):
535 repo[None].add([fn])
536 ui.note(_(b'\nkeywords written to %s:\n') % fn)
537 ui.note(keywords)
537 538 repo.dirstate.setbranch(b'demobranch')
538 for name, cmd in ui.configitems(b'hooks'):
539 if name.split(b'.', 1)[0].find(b'commit') > -1:
540 repo.ui.setconfig(b'hooks', name, b'', b'keyword')
541 msg = _(b'hg keyword configuration and expansion example')
542 ui.note((b"hg ci -m '%s'\n" % msg))
543 repo.commit(text=msg)
539 for name, cmd in ui.configitems(b'hooks'):
540 if name.split(b'.', 1)[0].find(b'commit') > -1:
541 repo.ui.setconfig(b'hooks', name, b'', b'keyword')
542 msg = _(b'hg keyword configuration and expansion example')
543 ui.note((b"hg ci -m '%s'\n" % msg))
544 repo.commit(text=msg)
544 545 ui.status(_(b'\n\tkeywords expanded\n'))
545 546 ui.write(repo.wread(fn))
546 547 repo.wvfs.rmtree(repo.root)
@@ -696,7 +697,7 def kw_amend(orig, ui, repo, old, extra,
696 697 kwt = getattr(repo, '_keywordkwt', None)
697 698 if kwt is None:
698 699 return orig(ui, repo, old, extra, pats, opts)
699 with repo.wlock(), repo.dirstate.parentchange():
700 with repo.wlock(), repo.dirstate.changing_parents(repo):
700 701 kwt.postcommit = True
701 702 newid = orig(ui, repo, old, extra, pats, opts)
702 703 if newid != old.node():
@@ -762,7 +763,7 def kw_dorecord(orig, ui, repo, commitfu
762 763 if ctx != recctx:
763 764 modified, added = _preselect(wstatus, recctx.files())
764 765 kwt.restrict = False
765 with repo.dirstate.parentchange():
766 with repo.dirstate.changing_parents(repo):
766 767 kwt.overwrite(recctx, modified, False, True)
767 768 kwt.overwrite(recctx, added, False, True, True)
768 769 kwt.restrict = True
@@ -107,6 +107,7 command.
107 107
108 108 from mercurial import (
109 109 cmdutil,
110 configitems,
110 111 extensions,
111 112 exthelper,
112 113 hg,
@@ -135,7 +136,7 eh.merge(proto.eh)
135 136 eh.configitem(
136 137 b'largefiles',
137 138 b'minsize',
138 default=eh.configitem.dynamicdefault,
139 default=configitems.dynamicdefault,
139 140 )
140 141 eh.configitem(
141 142 b'largefiles',
@@ -219,7 +219,9 def lfconvert(ui, src, dest, *pats, **op
219 219 success = True
220 220 finally:
221 221 if tolfile:
222 rdst.dirstate.clear()
222 # XXX is this the right context semantically ?
223 with rdst.dirstate.changing_parents(rdst):
224 rdst.dirstate.clear()
223 225 release(dstlock, dstwlock)
224 226 if not success:
225 227 # we failed, remove the new directory
@@ -517,53 +519,52 def updatelfiles(
517 519 filelist = set(filelist)
518 520 lfiles = [f for f in lfiles if f in filelist]
519 521
520 with lfdirstate.parentchange():
521 update = {}
522 dropped = set()
523 updated, removed = 0, 0
524 wvfs = repo.wvfs
525 wctx = repo[None]
526 for lfile in lfiles:
527 lfileorig = os.path.relpath(
528 scmutil.backuppath(ui, repo, lfile), start=repo.root
529 )
530 standin = lfutil.standin(lfile)
531 standinorig = os.path.relpath(
532 scmutil.backuppath(ui, repo, standin), start=repo.root
533 )
534 if wvfs.exists(standin):
535 if wvfs.exists(standinorig) and wvfs.exists(lfile):
536 shutil.copyfile(wvfs.join(lfile), wvfs.join(lfileorig))
537 wvfs.unlinkpath(standinorig)
538 expecthash = lfutil.readasstandin(wctx[standin])
539 if expecthash != b'':
540 if lfile not in wctx: # not switched to normal file
541 if repo.dirstate.get_entry(standin).any_tracked:
542 wvfs.unlinkpath(lfile, ignoremissing=True)
543 else:
544 dropped.add(lfile)
522 update = {}
523 dropped = set()
524 updated, removed = 0, 0
525 wvfs = repo.wvfs
526 wctx = repo[None]
527 for lfile in lfiles:
528 lfileorig = os.path.relpath(
529 scmutil.backuppath(ui, repo, lfile), start=repo.root
530 )
531 standin = lfutil.standin(lfile)
532 standinorig = os.path.relpath(
533 scmutil.backuppath(ui, repo, standin), start=repo.root
534 )
535 if wvfs.exists(standin):
536 if wvfs.exists(standinorig) and wvfs.exists(lfile):
537 shutil.copyfile(wvfs.join(lfile), wvfs.join(lfileorig))
538 wvfs.unlinkpath(standinorig)
539 expecthash = lfutil.readasstandin(wctx[standin])
540 if expecthash != b'':
541 if lfile not in wctx: # not switched to normal file
542 if repo.dirstate.get_entry(standin).any_tracked:
543 wvfs.unlinkpath(lfile, ignoremissing=True)
544 else:
545 dropped.add(lfile)
545 546
546 # use normallookup() to allocate an entry in largefiles
547 # dirstate to prevent lfilesrepo.status() from reporting
548 # missing files as removed.
549 lfdirstate.update_file(
550 lfile,
551 p1_tracked=True,
552 wc_tracked=True,
553 possibly_dirty=True,
554 )
555 update[lfile] = expecthash
556 else:
557 # Remove lfiles for which the standin is deleted, unless the
558 # lfile is added to the repository again. This happens when a
559 # largefile is converted back to a normal file: the standin
560 # disappears, but a new (normal) file appears as the lfile.
561 if (
562 wvfs.exists(lfile)
563 and repo.dirstate.normalize(lfile) not in wctx
564 ):
565 wvfs.unlinkpath(lfile)
566 removed += 1
547 # allocate an entry in largefiles dirstate to prevent
548 # lfilesrepo.status() from reporting missing files as
549 # removed.
550 lfdirstate.hacky_extension_update_file(
551 lfile,
552 p1_tracked=True,
553 wc_tracked=True,
554 possibly_dirty=True,
555 )
556 update[lfile] = expecthash
557 else:
558 # Remove lfiles for which the standin is deleted, unless the
559 # lfile is added to the repository again. This happens when a
560 # largefile is converted back to a normal file: the standin
561 # disappears, but a new (normal) file appears as the lfile.
562 if (
563 wvfs.exists(lfile)
564 and repo.dirstate.normalize(lfile) not in wctx
565 ):
566 wvfs.unlinkpath(lfile)
567 removed += 1
567 568
568 569 # largefile processing might be slow and be interrupted - be prepared
569 570 lfdirstate.write(repo.currenttransaction())
@@ -580,41 +581,42 def updatelfiles(
580 581 statuswriter(_(b'getting changed largefiles\n'))
581 582 cachelfiles(ui, repo, None, lfiles)
582 583
583 with lfdirstate.parentchange():
584 for lfile in lfiles:
585 update1 = 0
584 for lfile in lfiles:
585 update1 = 0
586 586
587 expecthash = update.get(lfile)
588 if expecthash:
589 if not lfutil.copyfromcache(repo, expecthash, lfile):
590 # failed ... but already removed and set to normallookup
591 continue
592 # Synchronize largefile dirstate to the last modified
593 # time of the file
594 lfdirstate.update_file(
595 lfile, p1_tracked=True, wc_tracked=True
596 )
587 expecthash = update.get(lfile)
588 if expecthash:
589 if not lfutil.copyfromcache(repo, expecthash, lfile):
590 # failed ... but already removed and set to normallookup
591 continue
592 # Synchronize largefile dirstate to the last modified
593 # time of the file
594 lfdirstate.hacky_extension_update_file(
595 lfile,
596 p1_tracked=True,
597 wc_tracked=True,
598 )
599 update1 = 1
600
601 # copy the exec mode of largefile standin from the repository's
602 # dirstate to its state in the lfdirstate.
603 standin = lfutil.standin(lfile)
604 if wvfs.exists(standin):
605 # exec is decided by the users permissions using mask 0o100
606 standinexec = wvfs.stat(standin).st_mode & 0o100
607 st = wvfs.stat(lfile)
608 mode = st.st_mode
609 if standinexec != mode & 0o100:
610 # first remove all X bits, then shift all R bits to X
611 mode &= ~0o111
612 if standinexec:
613 mode |= (mode >> 2) & 0o111 & ~util.umask
614 wvfs.chmod(lfile, mode)
597 615 update1 = 1
598 616
599 # copy the exec mode of largefile standin from the repository's
600 # dirstate to its state in the lfdirstate.
601 standin = lfutil.standin(lfile)
602 if wvfs.exists(standin):
603 # exec is decided by the users permissions using mask 0o100
604 standinexec = wvfs.stat(standin).st_mode & 0o100
605 st = wvfs.stat(lfile)
606 mode = st.st_mode
607 if standinexec != mode & 0o100:
608 # first remove all X bits, then shift all R bits to X
609 mode &= ~0o111
610 if standinexec:
611 mode |= (mode >> 2) & 0o111 & ~util.umask
612 wvfs.chmod(lfile, mode)
613 update1 = 1
617 updated += update1
614 618
615 updated += update1
616
617 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
619 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
618 620
619 621 lfdirstate.write(repo.currenttransaction())
620 622 if lfiles:
@@ -159,6 +159,9 def findfile(repo, hash):
159 159
160 160
161 161 class largefilesdirstate(dirstate.dirstate):
162 _large_file_dirstate = True
163 _tr_key_suffix = b'-large-files'
164
162 165 def __getitem__(self, key):
163 166 return super(largefilesdirstate, self).__getitem__(unixpath(key))
164 167
@@ -204,7 +207,13 def openlfdirstate(ui, repo, create=True
204 207 """
205 208 Return a dirstate object that tracks largefiles: i.e. its root is
206 209 the repo root, but it is saved in .hg/largefiles/dirstate.
210
211 If a dirstate object already exists and is being used for a 'changing_*'
212 context, it will be returned.
207 213 """
214 sub_dirstate = getattr(repo.dirstate, '_sub_dirstate', None)
215 if sub_dirstate is not None:
216 return sub_dirstate
208 217 vfs = repo.vfs
209 218 lfstoredir = longname
210 219 opener = vfsmod.vfs(vfs.join(lfstoredir))
@@ -223,20 +232,29 def openlfdirstate(ui, repo, create=True
223 232 # it. This ensures that we create it on the first meaningful
224 233 # largefiles operation in a new clone.
225 234 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
226 matcher = getstandinmatcher(repo)
227 standins = repo.dirstate.walk(
228 matcher, subrepos=[], unknown=False, ignored=False
229 )
235 try:
236 with repo.wlock(wait=False), lfdirstate.changing_files(repo):
237 matcher = getstandinmatcher(repo)
238 standins = repo.dirstate.walk(
239 matcher, subrepos=[], unknown=False, ignored=False
240 )
241
242 if len(standins) > 0:
243 vfs.makedirs(lfstoredir)
230 244
231 if len(standins) > 0:
232 vfs.makedirs(lfstoredir)
233
234 with lfdirstate.parentchange():
235 for standin in standins:
236 lfile = splitstandin(standin)
237 lfdirstate.update_file(
238 lfile, p1_tracked=True, wc_tracked=True, possibly_dirty=True
239 )
245 for standin in standins:
246 lfile = splitstandin(standin)
247 lfdirstate.hacky_extension_update_file(
248 lfile,
249 p1_tracked=True,
250 wc_tracked=True,
251 possibly_dirty=True,
252 )
253 except error.LockError:
254 # Assume that whatever was holding the lock was important.
255 # If we were doing something important, we would already have
256 # either the lock or a largefile dirstate.
257 pass
240 258 return lfdirstate
241 259
242 260
@@ -565,10 +583,14 def getstandinsstate(repo):
565 583 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
566 584 lfstandin = standin(lfile)
567 585 if lfstandin not in repo.dirstate:
568 lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=False)
586 lfdirstate.hacky_extension_update_file(
587 lfile,
588 p1_tracked=False,
589 wc_tracked=False,
590 )
569 591 else:
570 592 entry = repo.dirstate.get_entry(lfstandin)
571 lfdirstate.update_file(
593 lfdirstate.hacky_extension_update_file(
572 594 lfile,
573 595 wc_tracked=entry.tracked,
574 596 p1_tracked=entry.p1_tracked,
@@ -580,8 +602,7 def synclfdirstate(repo, lfdirstate, lfi
580 602 def markcommitted(orig, ctx, node):
581 603 repo = ctx.repo()
582 604
583 lfdirstate = openlfdirstate(repo.ui, repo)
584 with lfdirstate.parentchange():
605 with repo.dirstate.changing_parents(repo):
585 606 orig(node)
586 607
587 608 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
@@ -593,11 +614,11 def markcommitted(orig, ctx, node):
593 614 # - have to be marked as "n" after commit, but
594 615 # - aren't listed in "repo[node].files()"
595 616
617 lfdirstate = openlfdirstate(repo.ui, repo)
596 618 for f in ctx.files():
597 619 lfile = splitstandin(f)
598 620 if lfile is not None:
599 621 synclfdirstate(repo, lfdirstate, lfile, False)
600 lfdirstate.write(repo.currenttransaction())
601 622
602 623 # As part of committing, copy all of the largefiles into the cache.
603 624 #
@@ -668,11 +689,16 def updatestandinsbymatch(repo, match):
668 689 # It can cost a lot of time (several seconds)
669 690 # otherwise to update all standins if the largefiles are
670 691 # large.
671 lfdirstate = openlfdirstate(ui, repo)
672 692 dirtymatch = matchmod.always()
673 unsure, s, mtime_boundary = lfdirstate.status(
674 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
675 )
693 with repo.dirstate.running_status(repo):
694 lfdirstate = openlfdirstate(ui, repo)
695 unsure, s, mtime_boundary = lfdirstate.status(
696 dirtymatch,
697 subrepos=[],
698 ignored=False,
699 clean=False,
700 unknown=False,
701 )
676 702 modifiedfiles = unsure + s.modified + s.added + s.removed
677 703 lfiles = listlfiles(repo)
678 704 # this only loops through largefiles that exist (not
@@ -8,6 +8,7
8 8
9 9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 10
11 import contextlib
11 12 import copy
12 13 import os
13 14
@@ -21,6 +22,7 from mercurial import (
21 22 archival,
22 23 cmdutil,
23 24 copies as copiesmod,
25 dirstate,
24 26 error,
25 27 exchange,
26 28 extensions,
@@ -311,6 +313,48 def cmdutilremove(
311 313 )
312 314
313 315
316 @eh.wrapfunction(dirstate.dirstate, b'_changing')
317 @contextlib.contextmanager
318 def _changing(orig, self, repo, change_type):
319 pre = sub_dirstate = getattr(self, '_sub_dirstate', None)
320 try:
321 lfd = getattr(self, '_large_file_dirstate', False)
322 if sub_dirstate is None and not lfd:
323 sub_dirstate = lfutil.openlfdirstate(repo.ui, repo)
324 self._sub_dirstate = sub_dirstate
325 if not lfd:
326 assert self._sub_dirstate is not None
327 with orig(self, repo, change_type):
328 if sub_dirstate is None:
329 yield
330 else:
331 with sub_dirstate._changing(repo, change_type):
332 yield
333 finally:
334 self._sub_dirstate = pre
335
336
337 @eh.wrapfunction(dirstate.dirstate, b'running_status')
338 @contextlib.contextmanager
339 def running_status(orig, self, repo):
340 pre = sub_dirstate = getattr(self, '_sub_dirstate', None)
341 try:
342 lfd = getattr(self, '_large_file_dirstate', False)
343 if sub_dirstate is None and not lfd:
344 sub_dirstate = lfutil.openlfdirstate(repo.ui, repo)
345 self._sub_dirstate = sub_dirstate
346 if not lfd:
347 assert self._sub_dirstate is not None
348 with orig(self, repo):
349 if sub_dirstate is None:
350 yield
351 else:
352 with sub_dirstate.running_status(repo):
353 yield
354 finally:
355 self._sub_dirstate = pre
356
357
314 358 @eh.wrapfunction(subrepo.hgsubrepo, b'status')
315 359 def overridestatusfn(orig, repo, rev2, **opts):
316 360 with lfstatus(repo._repo):
@@ -511,10 +555,12 def overridedebugstate(orig, ui, repo, *
511 555 # largefiles. This makes the merge proceed and we can then handle this
512 556 # case further in the overridden calculateupdates function below.
513 557 @eh.wrapfunction(merge, b'_checkunknownfile')
514 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
515 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
558 def overridecheckunknownfile(
559 origfn, dirstate, wvfs, dircache, wctx, mctx, f, f2=None
560 ):
561 if lfutil.standin(dirstate.normalize(f)) in wctx:
516 562 return False
517 return origfn(repo, wctx, mctx, f, f2)
563 return origfn(dirstate, wvfs, dircache, wctx, mctx, f, f2)
518 564
519 565
520 566 # The manifest merge handles conflicts on the manifest level. We want
@@ -658,18 +704,12 def overridecalculateupdates(
658 704 def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
659 705 if MERGE_ACTION_LARGEFILE_MARK_REMOVED in actions:
660 706 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
661 with lfdirstate.parentchange():
662 for lfile, args, msg in actions[
663 MERGE_ACTION_LARGEFILE_MARK_REMOVED
664 ]:
665 # this should be executed before 'orig', to execute 'remove'
666 # before all other actions
667 repo.dirstate.update_file(
668 lfile, p1_tracked=True, wc_tracked=False
669 )
670 # make sure lfile doesn't get synclfdirstate'd as normal
671 lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=True)
672 lfdirstate.write(repo.currenttransaction())
707 for lfile, args, msg in actions[MERGE_ACTION_LARGEFILE_MARK_REMOVED]:
708 # this should be executed before 'orig', to execute 'remove'
709 # before all other actions
710 repo.dirstate.update_file(lfile, p1_tracked=True, wc_tracked=False)
711 # make sure lfile doesn't get synclfdirstate'd as normal
712 lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=True)
673 713
674 714 return orig(repo, actions, branchmerge, getfiledata)
675 715
@@ -901,7 +941,7 def overriderevert(orig, ui, repo, ctx,
901 941 # Because we put the standins in a bad state (by updating them)
902 942 # and then return them to a correct state we need to lock to
903 943 # prevent others from changing them in their incorrect state.
904 with repo.wlock():
944 with repo.wlock(), repo.dirstate.running_status(repo):
905 945 lfdirstate = lfutil.openlfdirstate(ui, repo)
906 946 s = lfutil.lfdirstatestatus(lfdirstate, repo)
907 947 lfdirstate.write(repo.currenttransaction())
@@ -1436,7 +1476,7 def outgoinghook(ui, repo, other, opts,
1436 1476
1437 1477 def addfunc(fn, lfhash):
1438 1478 if fn not in toupload:
1439 toupload[fn] = []
1479 toupload[fn] = [] # pytype: disable=unsupported-operands
1440 1480 toupload[fn].append(lfhash)
1441 1481 lfhashes.add(lfhash)
1442 1482
@@ -1520,20 +1560,34 def overridesummary(orig, ui, repo, *pat
1520 1560
1521 1561
1522 1562 @eh.wrapfunction(scmutil, b'addremove')
1523 def scmutiladdremove(orig, repo, matcher, prefix, uipathfn, opts=None):
1563 def scmutiladdremove(
1564 orig,
1565 repo,
1566 matcher,
1567 prefix,
1568 uipathfn,
1569 opts=None,
1570 open_tr=None,
1571 ):
1524 1572 if opts is None:
1525 1573 opts = {}
1526 1574 if not lfutil.islfilesrepo(repo):
1527 return orig(repo, matcher, prefix, uipathfn, opts)
1575 return orig(repo, matcher, prefix, uipathfn, opts, open_tr=open_tr)
1576
1577 # open the transaction and changing_files context
1578 if open_tr is not None:
1579 open_tr()
1580
1528 1581 # Get the list of missing largefiles so we can remove them
1529 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1530 unsure, s, mtime_boundary = lfdirstate.status(
1531 matchmod.always(),
1532 subrepos=[],
1533 ignored=False,
1534 clean=False,
1535 unknown=False,
1536 )
1582 with repo.dirstate.running_status(repo):
1583 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1584 unsure, s, mtime_boundary = lfdirstate.status(
1585 matchmod.always(),
1586 subrepos=[],
1587 ignored=False,
1588 clean=False,
1589 unknown=False,
1590 )
1537 1591
1538 1592 # Call into the normal remove code, but the removing of the standin, we want
1539 1593 # to have handled by original addremove. Monkey patching here makes sure
@@ -1567,7 +1621,8 def scmutiladdremove(orig, repo, matcher
1567 1621 # function to take care of the rest. Make sure it doesn't do anything with
1568 1622 # largefiles by passing a matcher that will ignore them.
1569 1623 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1570 return orig(repo, matcher, prefix, uipathfn, opts)
1624
1625 return orig(repo, matcher, prefix, uipathfn, opts, open_tr=open_tr)
1571 1626
1572 1627
1573 1628 # Calling purge with --all will cause the largefiles to be deleted.
@@ -1737,7 +1792,7 def mergeupdate(orig, repo, node, branch
1737 1792 matcher = kwargs.get('matcher', None)
1738 1793 # note if this is a partial update
1739 1794 partial = matcher and not matcher.always()
1740 with repo.wlock():
1795 with repo.wlock(), repo.dirstate.changing_parents(repo):
1741 1796 # branch | | |
1742 1797 # merge | force | partial | action
1743 1798 # -------+-------+---------+--------------
@@ -1752,15 +1807,15 def mergeupdate(orig, repo, node, branch
1752 1807 #
1753 1808 # (*) don't care
1754 1809 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1755
1756 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1757 unsure, s, mtime_boundary = lfdirstate.status(
1758 matchmod.always(),
1759 subrepos=[],
1760 ignored=False,
1761 clean=True,
1762 unknown=False,
1763 )
1810 with repo.dirstate.running_status(repo):
1811 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1812 unsure, s, mtime_boundary = lfdirstate.status(
1813 matchmod.always(),
1814 subrepos=[],
1815 ignored=False,
1816 clean=True,
1817 unknown=False,
1818 )
1764 1819 oldclean = set(s.clean)
1765 1820 pctx = repo[b'.']
1766 1821 dctx = repo[node]
@@ -1787,7 +1842,14 def mergeupdate(orig, repo, node, branch
1787 1842 # mark all clean largefiles as dirty, just in case the update gets
1788 1843 # interrupted before largefiles and lfdirstate are synchronized
1789 1844 for lfile in oldclean:
1790 lfdirstate.set_possibly_dirty(lfile)
1845 entry = lfdirstate.get_entry(lfile)
1846 lfdirstate.hacky_extension_update_file(
1847 lfile,
1848 wc_tracked=entry.tracked,
1849 p1_tracked=entry.p1_tracked,
1850 p2_info=entry.p2_info,
1851 possibly_dirty=True,
1852 )
1791 1853 lfdirstate.write(repo.currenttransaction())
1792 1854
1793 1855 oldstandins = lfutil.getstandinsstate(repo)
@@ -1798,24 +1860,22 def mergeupdate(orig, repo, node, branch
1798 1860 raise error.ProgrammingError(
1799 1861 b'largefiles is not compatible with in-memory merge'
1800 1862 )
1801 with lfdirstate.parentchange():
1802 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1863 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1803 1864
1804 newstandins = lfutil.getstandinsstate(repo)
1805 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1865 newstandins = lfutil.getstandinsstate(repo)
1866 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1806 1867
1807 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1808 # all the ones that didn't change as clean
1809 for lfile in oldclean.difference(filelist):
1810 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True)
1811 lfdirstate.write(repo.currenttransaction())
1868 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1869 # all the ones that didn't change as clean
1870 for lfile in oldclean.difference(filelist):
1871 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True)
1812 1872
1813 if branchmerge or force or partial:
1814 filelist.extend(s.deleted + s.removed)
1873 if branchmerge or force or partial:
1874 filelist.extend(s.deleted + s.removed)
1815 1875
1816 lfcommands.updatelfiles(
1817 repo.ui, repo, filelist=filelist, normallookup=partial
1818 )
1876 lfcommands.updatelfiles(
1877 repo.ui, repo, filelist=filelist, normallookup=partial
1878 )
1819 1879
1820 1880 return result
1821 1881
@@ -139,7 +139,7 def reposetup(ui, repo):
139 139 except error.LockError:
140 140 wlock = util.nullcontextmanager()
141 141 gotlock = False
142 with wlock:
142 with wlock, self.dirstate.running_status(self):
143 143
144 144 # First check if paths or patterns were specified on the
145 145 # command line. If there were, and they don't match any
@@ -321,6 +321,8 def reposetup(ui, repo):
321 321
322 322 if gotlock:
323 323 lfdirstate.write(self.currenttransaction())
324 else:
325 lfdirstate.invalidate()
324 326
325 327 self.lfstatus = True
326 328 return scmutil.status(*result)
@@ -36,22 +36,23 def openstore(repo=None, remote=None, pu
36 36 b'lfpullsource', repo, ui, lfpullsource
37 37 )
38 38 else:
39 path, _branches = urlutil.get_unique_pull_path(
40 b'lfpullsource', repo, ui, lfpullsource
39 path = urlutil.get_unique_pull_path_obj(
40 b'lfpullsource', ui, lfpullsource
41 41 )
42 42
43 43 # XXX we should not explicitly pass b'default', as this will result in
44 44 # b'default' being returned if no `paths.default` was defined. We
45 45 # should explicitely handle the lack of value instead.
46 46 if repo is None:
47 path, _branches = urlutil.get_unique_pull_path(
48 b'lfs', repo, ui, b'default'
47 path = urlutil.get_unique_pull_path_obj(
48 b'lfs',
49 ui,
50 b'default',
49 51 )
50 52 remote = hg.peer(repo or ui, {}, path)
51 elif path == b'default-push' or path == b'default':
53 elif path.loc == b'default-push' or path.loc == b'default':
52 54 remote = repo
53 55 else:
54 path, _branches = urlutil.parseurl(path)
55 56 remote = hg.peer(repo or ui, {}, path)
56 57
57 58 # The path could be a scheme so use Mercurial's normal functionality
@@ -168,12 +168,16 class local:
168 168 # producing the response (but the server has no way of telling us
169 169 # that), and we really don't need to try to write the response to
170 170 # the localstore, because it's not going to match the expected.
171 # The server also uses this method to store data uploaded by the
172 # client, so if this happens on the server side, it's possible
173 # that the client crashed or an antivirus interfered with the
174 # upload.
171 175 if content_length is not None and int(content_length) != size:
172 176 msg = (
173 177 b"Response length (%d) does not match Content-Length "
174 b"header (%d): likely server-side crash"
178 b"header (%d) for %s"
175 179 )
176 raise LfsRemoteError(_(msg) % (size, int(content_length)))
180 raise LfsRemoteError(_(msg) % (size, int(content_length), oid))
177 181
178 182 realoid = hex(sha256.digest())
179 183 if realoid != oid:
@@ -82,7 +82,6 from mercurial.pycompat import (
82 82 from mercurial import (
83 83 cmdutil,
84 84 commands,
85 dirstateguard,
86 85 encoding,
87 86 error,
88 87 extensions,
@@ -791,7 +790,10 class queue:
791 790 if self.added:
792 791 qrepo = self.qrepo()
793 792 if qrepo:
794 qrepo[None].add(f for f in self.added if f not in qrepo[None])
793 with qrepo.wlock(), qrepo.dirstate.changing_files(qrepo):
794 qrepo[None].add(
795 f for f in self.added if f not in qrepo[None]
796 )
795 797 self.added = []
796 798
797 799 def removeundo(self, repo):
@@ -1082,7 +1084,7 class queue:
1082 1084
1083 1085 if merge and files:
1084 1086 # Mark as removed/merged and update dirstate parent info
1085 with repo.dirstate.parentchange():
1087 with repo.dirstate.changing_parents(repo):
1086 1088 for f in files:
1087 1089 repo.dirstate.update_file_p1(f, p1_tracked=True)
1088 1090 p1 = repo.dirstate.p1()
@@ -1129,7 +1131,8 class queue:
1129 1131 if not keep:
1130 1132 r = self.qrepo()
1131 1133 if r:
1132 r[None].forget(patches)
1134 with r.wlock(), r.dirstate.changing_files(r):
1135 r[None].forget(patches)
1133 1136 for p in patches:
1134 1137 try:
1135 1138 os.unlink(self.join(p))
@@ -1153,7 +1156,7 class queue:
1153 1156 sortedseries.append((idx, p))
1154 1157
1155 1158 sortedseries.sort(reverse=True)
1156 for (i, p) in sortedseries:
1159 for i, p in sortedseries:
1157 1160 if i != -1:
1158 1161 del self.fullseries[i]
1159 1162 else:
@@ -1177,7 +1180,6 class queue:
1177 1180 firstrev = repo[self.applied[0].node].rev()
1178 1181 patches = []
1179 1182 for i, rev in enumerate(revs):
1180
1181 1183 if rev < firstrev:
1182 1184 raise error.Abort(_(b'revision %d is not managed') % rev)
1183 1185
@@ -1465,7 +1467,8 class queue:
1465 1467 p.close()
1466 1468 r = self.qrepo()
1467 1469 if r:
1468 r[None].add([patchfn])
1470 with r.wlock(), r.dirstate.changing_files(r):
1471 r[None].add([patchfn])
1469 1472 except: # re-raises
1470 1473 repo.rollback()
1471 1474 raise
@@ -1830,7 +1833,7 class queue:
1830 1833 if keepchanges and tobackup:
1831 1834 raise error.Abort(_(b"local changes found, qrefresh first"))
1832 1835 self.backup(repo, tobackup)
1833 with repo.dirstate.parentchange():
1836 with repo.dirstate.changing_parents(repo):
1834 1837 for f in a:
1835 1838 repo.wvfs.unlinkpath(f, ignoremissing=True)
1836 1839 repo.dirstate.update_file(
@@ -1988,73 +1991,67 class queue:
1988 1991
1989 1992 bmlist = repo[top].bookmarks()
1990 1993
1991 with repo.dirstate.parentchange():
1992 # XXX do we actually need the dirstateguard
1993 dsguard = None
1994 try:
1995 dsguard = dirstateguard.dirstateguard(repo, b'mq.refresh')
1996 if diffopts.git or diffopts.upgrade:
1997 copies = {}
1998 for dst in a:
1999 src = repo.dirstate.copied(dst)
2000 # during qfold, the source file for copies may
2001 # be removed. Treat this as a simple add.
2002 if src is not None and src in repo.dirstate:
2003 copies.setdefault(src, []).append(dst)
2004 repo.dirstate.update_file(
2005 dst, p1_tracked=False, wc_tracked=True
1994 with repo.dirstate.changing_parents(repo):
1995 if diffopts.git or diffopts.upgrade:
1996 copies = {}
1997 for dst in a:
1998 src = repo.dirstate.copied(dst)
1999 # during qfold, the source file for copies may
2000 # be removed. Treat this as a simple add.
2001 if src is not None and src in repo.dirstate:
2002 copies.setdefault(src, []).append(dst)
2003 repo.dirstate.update_file(
2004 dst, p1_tracked=False, wc_tracked=True
2005 )
2006 # remember the copies between patchparent and qtip
2007 for dst in aaa:
2008 src = ctx[dst].copysource()
2009 if src:
2010 copies.setdefault(src, []).extend(
2011 copies.get(dst, [])
2006 2012 )
2007 # remember the copies between patchparent and qtip
2008 for dst in aaa:
2009 src = ctx[dst].copysource()
2010 if src:
2011 copies.setdefault(src, []).extend(
2012 copies.get(dst, [])
2013 )
2014 if dst in a:
2015 copies[src].append(dst)
2016 # we can't copy a file created by the patch itself
2017 if dst in copies:
2018 del copies[dst]
2019 for src, dsts in copies.items():
2020 for dst in dsts:
2021 repo.dirstate.copy(src, dst)
2022 else:
2023 for dst in a:
2024 repo.dirstate.update_file(
2025 dst, p1_tracked=False, wc_tracked=True
2026 )
2027 # Drop useless copy information
2028 for f in list(repo.dirstate.copies()):
2029 repo.dirstate.copy(None, f)
2030 for f in r:
2031 repo.dirstate.update_file_p1(f, p1_tracked=True)
2032 # if the patch excludes a modified file, mark that
2033 # file with mtime=0 so status can see it.
2034 mm = []
2035 for i in range(len(m) - 1, -1, -1):
2036 if not match1(m[i]):
2037 mm.append(m[i])
2038 del m[i]
2039 for f in m:
2040 repo.dirstate.update_file_p1(f, p1_tracked=True)
2041 for f in mm:
2042 repo.dirstate.update_file_p1(f, p1_tracked=True)
2043 for f in forget:
2044 repo.dirstate.update_file_p1(f, p1_tracked=False)
2045
2046 user = ph.user or ctx.user()
2047
2048 oldphase = repo[top].phase()
2049
2050 # assumes strip can roll itself back if interrupted
2051 repo.setparents(*cparents)
2052 self.applied.pop()
2053 self.applieddirty = True
2054 strip(self.ui, repo, [top], update=False, backup=False)
2055 dsguard.close()
2056 finally:
2057 release(dsguard)
2013 if dst in a:
2014 copies[src].append(dst)
2015 # we can't copy a file created by the patch itself
2016 if dst in copies:
2017 del copies[dst]
2018 for src, dsts in copies.items():
2019 for dst in dsts:
2020 repo.dirstate.copy(src, dst)
2021 else:
2022 for dst in a:
2023 repo.dirstate.update_file(
2024 dst, p1_tracked=False, wc_tracked=True
2025 )
2026 # Drop useless copy information
2027 for f in list(repo.dirstate.copies()):
2028 repo.dirstate.copy(None, f)
2029 for f in r:
2030 repo.dirstate.update_file_p1(f, p1_tracked=True)
2031 # if the patch excludes a modified file, mark that
2032 # file with mtime=0 so status can see it.
2033 mm = []
2034 for i in range(len(m) - 1, -1, -1):
2035 if not match1(m[i]):
2036 mm.append(m[i])
2037 del m[i]
2038 for f in m:
2039 repo.dirstate.update_file_p1(f, p1_tracked=True)
2040 for f in mm:
2041 repo.dirstate.update_file_p1(f, p1_tracked=True)
2042 for f in forget:
2043 repo.dirstate.update_file_p1(f, p1_tracked=False)
2044
2045 user = ph.user or ctx.user()
2046
2047 oldphase = repo[top].phase()
2048
2049 # assumes strip can roll itself back if interrupted
2050 repo.setparents(*cparents)
2051 repo.dirstate.write(repo.currenttransaction())
2052 self.applied.pop()
2053 self.applieddirty = True
2054 strip(self.ui, repo, [top], update=False, backup=False)
2058 2055
2059 2056 try:
2060 2057 # might be nice to attempt to roll back strip after this
@@ -2124,8 +2121,9 class queue:
2124 2121 finally:
2125 2122 lockmod.release(tr, lock)
2126 2123 except: # re-raises
2127 ctx = repo[cparents[0]]
2128 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
2124 with repo.dirstate.changing_parents(repo):
2125 ctx = repo[cparents[0]]
2126 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
2129 2127 self.savedirty()
2130 2128 self.ui.warn(
2131 2129 _(
@@ -2760,18 +2758,19 def qinit(ui, repo, create):
2760 2758 r = q.init(repo, create)
2761 2759 q.savedirty()
2762 2760 if r:
2763 if not os.path.exists(r.wjoin(b'.hgignore')):
2764 fp = r.wvfs(b'.hgignore', b'w')
2765 fp.write(b'^\\.hg\n')
2766 fp.write(b'^\\.mq\n')
2767 fp.write(b'syntax: glob\n')
2768 fp.write(b'status\n')
2769 fp.write(b'guards\n')
2770 fp.close()
2771 if not os.path.exists(r.wjoin(b'series')):
2772 r.wvfs(b'series', b'w').close()
2773 r[None].add([b'.hgignore', b'series'])
2774 commands.add(ui, r)
2761 with r.wlock(), r.dirstate.changing_files(r):
2762 if not os.path.exists(r.wjoin(b'.hgignore')):
2763 fp = r.wvfs(b'.hgignore', b'w')
2764 fp.write(b'^\\.hg\n')
2765 fp.write(b'^\\.mq\n')
2766 fp.write(b'syntax: glob\n')
2767 fp.write(b'status\n')
2768 fp.write(b'guards\n')
2769 fp.close()
2770 if not os.path.exists(r.wjoin(b'series')):
2771 r.wvfs(b'series', b'w').close()
2772 r[None].add([b'.hgignore', b'series'])
2773 commands.add(ui, r)
2775 2774 return 0
2776 2775
2777 2776
@@ -2854,16 +2853,17 def clone(ui, source, dest=None, **opts)
2854 2853 # main repo (destination and sources)
2855 2854 if dest is None:
2856 2855 dest = hg.defaultdest(source)
2857 __, source_path, __ = urlutil.get_clone_path(ui, source)
2856 source_path = urlutil.get_clone_path_obj(ui, source)
2858 2857 sr = hg.peer(ui, opts, source_path)
2859 2858
2860 2859 # patches repo (source only)
2861 2860 if opts.get(b'patches'):
2862 __, patchespath, __ = urlutil.get_clone_path(ui, opts.get(b'patches'))
2861 patches_path = urlutil.get_clone_path_obj(ui, opts.get(b'patches'))
2863 2862 else:
2864 patchespath = patchdir(sr)
2863 # XXX path: we should turn this into a path object
2864 patches_path = patchdir(sr)
2865 2865 try:
2866 hg.peer(ui, opts, patchespath)
2866 hg.peer(ui, opts, patches_path)
2867 2867 except error.RepoError:
2868 2868 raise error.Abort(
2869 2869 _(b'versioned patch repository not found (see init --mq)')
@@ -3223,45 +3223,46 def fold(ui, repo, *files, **opts):
3223 3223 raise error.Abort(_(b'qfold requires at least one patch name'))
3224 3224 if not q.checktoppatch(repo)[0]:
3225 3225 raise error.Abort(_(b'no patches applied'))
3226 q.checklocalchanges(repo)
3227
3228 message = cmdutil.logmessage(ui, opts)
3229
3230 parent = q.lookup(b'qtip')
3231 patches = []
3232 messages = []
3233 for f in files:
3234 p = q.lookup(f)
3235 if p in patches or p == parent:
3236 ui.warn(_(b'skipping already folded patch %s\n') % p)
3237 if q.isapplied(p):
3238 raise error.Abort(
3239 _(b'qfold cannot fold already applied patch %s') % p
3240 )
3241 patches.append(p)
3242
3243 for p in patches:
3226
3227 with repo.wlock():
3228 q.checklocalchanges(repo)
3229
3230 message = cmdutil.logmessage(ui, opts)
3231
3232 parent = q.lookup(b'qtip')
3233 patches = []
3234 messages = []
3235 for f in files:
3236 p = q.lookup(f)
3237 if p in patches or p == parent:
3238 ui.warn(_(b'skipping already folded patch %s\n') % p)
3239 if q.isapplied(p):
3240 raise error.Abort(
3241 _(b'qfold cannot fold already applied patch %s') % p
3242 )
3243 patches.append(p)
3244
3245 for p in patches:
3246 if not message:
3247 ph = patchheader(q.join(p), q.plainmode)
3248 if ph.message:
3249 messages.append(ph.message)
3250 pf = q.join(p)
3251 (patchsuccess, files, fuzz) = q.patch(repo, pf)
3252 if not patchsuccess:
3253 raise error.Abort(_(b'error folding patch %s') % p)
3254
3244 3255 if not message:
3245 ph = patchheader(q.join(p), q.plainmode)
3246 if ph.message:
3247 messages.append(ph.message)
3248 pf = q.join(p)
3249 (patchsuccess, files, fuzz) = q.patch(repo, pf)
3250 if not patchsuccess:
3251 raise error.Abort(_(b'error folding patch %s') % p)
3252
3253 if not message:
3254 ph = patchheader(q.join(parent), q.plainmode)
3255 message = ph.message
3256 for msg in messages:
3257 if msg:
3258 if message:
3259 message.append(b'* * *')
3260 message.extend(msg)
3261 message = b'\n'.join(message)
3262
3263 diffopts = q.patchopts(q.diffopts(), *patches)
3264 with repo.wlock():
3256 ph = patchheader(q.join(parent), q.plainmode)
3257 message = ph.message
3258 for msg in messages:
3259 if msg:
3260 if message:
3261 message.append(b'* * *')
3262 message.extend(msg)
3263 message = b'\n'.join(message)
3264
3265 diffopts = q.patchopts(q.diffopts(), *patches)
3265 3266 q.refresh(
3266 3267 repo,
3267 3268 msg=message,
@@ -3627,8 +3628,8 def rename(ui, repo, patch, name=None, *
3627 3628 util.rename(q.join(patch), absdest)
3628 3629 r = q.qrepo()
3629 3630 if r and patch in r.dirstate:
3630 wctx = r[None]
3631 with r.wlock():
3631 with r.wlock(), r.dirstate.changing_files(r):
3632 wctx = r[None]
3632 3633 if r.dirstate.get_entry(patch).added:
3633 3634 r.dirstate.set_untracked(patch)
3634 3635 r.dirstate.set_tracked(name)
@@ -320,7 +320,7 def _narrow(
320 320 repo.store.markremoved(f)
321 321
322 322 ui.status(_(b'deleting unwanted files from working copy\n'))
323 with repo.dirstate.parentchange():
323 with repo.dirstate.changing_parents(repo):
324 324 narrowspec.updateworkingcopy(repo, assumeclean=True)
325 325 narrowspec.copytoworkingcopy(repo)
326 326
@@ -380,7 +380,7 def _widen(
380 380 if ellipsesremote:
381 381 ds = repo.dirstate
382 382 p1, p2 = ds.p1(), ds.p2()
383 with ds.parentchange():
383 with ds.changing_parents(repo):
384 384 ds.setparents(repo.nullid, repo.nullid)
385 385 if isoldellipses:
386 386 with wrappedextraprepare:
@@ -416,13 +416,15 def _widen(
416 416 repo, trmanager.transaction, source=b'widen'
417 417 )
418 418 # TODO: we should catch error.Abort here
419 bundle2.processbundle(repo, bundle, op=op)
419 bundle2.processbundle(repo, bundle, op=op, remote=remote)
420 420
421 421 if ellipsesremote:
422 with ds.parentchange():
422 with ds.changing_parents(repo):
423 423 ds.setparents(p1, p2)
424 424
425 with repo.transaction(b'widening'), repo.dirstate.parentchange():
425 with repo.transaction(b'widening'), repo.dirstate.changing_parents(
426 repo
427 ):
426 428 repo.setnewnarrowpats()
427 429 narrowspec.updateworkingcopy(repo)
428 430 narrowspec.copytoworkingcopy(repo)
@@ -591,7 +593,7 def trackedcmd(ui, repo, remotepath=None
591 593 if update_working_copy:
592 594 with repo.wlock(), repo.lock(), repo.transaction(
593 595 b'narrow-wc'
594 ), repo.dirstate.parentchange():
596 ), repo.dirstate.changing_parents(repo):
595 597 narrowspec.updateworkingcopy(repo)
596 598 narrowspec.copytoworkingcopy(repo)
597 599 return 0
@@ -606,10 +608,9 def trackedcmd(ui, repo, remotepath=None
606 608 # Find the revisions we have in common with the remote. These will
607 609 # be used for finding local-only changes for narrowing. They will
608 610 # also define the set of revisions to update for widening.
609 r = urlutil.get_unique_pull_path(b'tracked', repo, ui, remotepath)
610 url, branches = r
611 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(url))
612 remote = hg.peer(repo, opts, url)
611 path = urlutil.get_unique_pull_path_obj(b'tracked', ui, remotepath)
612 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(path.loc))
613 remote = hg.peer(repo, opts, path)
613 614
614 615 try:
615 616 # check narrow support before doing anything if widening needs to be
@@ -19,8 +19,8 def wraprepo(repo):
19 19 dirstate = super(narrowrepository, self)._makedirstate()
20 20 return narrowdirstate.wrapdirstate(self, dirstate)
21 21
22 def peer(self):
23 peer = super(narrowrepository, self).peer()
22 def peer(self, path=None):
23 peer = super(narrowrepository, self).peer(path=path)
24 24 peer._caps.add(wireprototypes.NARROWCAP)
25 25 peer._caps.add(wireprototypes.ELLIPSESCAP)
26 26 return peer
@@ -450,7 +450,7 class notifier:
450 450 try:
451 451 msg = mail.parsebytes(data)
452 452 except emailerrors.MessageParseError as inst:
453 raise error.Abort(inst)
453 raise error.Abort(stringutil.forcebytestr(inst))
454 454
455 455 # store sender and subject
456 456 sender = msg['From']
@@ -286,9 +286,12 def vcrcommand(name, flags, spec, helpca
286 286 import hgdemandimport
287 287
288 288 with hgdemandimport.deactivated():
289 # pytype: disable=import-error
289 290 import vcr as vcrmod
290 291 import vcr.stubs as stubs
291 292
293 # pytype: enable=import-error
294
292 295 vcr = vcrmod.VCR(
293 296 serializer='json',
294 297 before_record_request=sanitiserequest,
@@ -350,11 +353,14 def urlencodenested(params):
350 353 """
351 354 flatparams = util.sortdict()
352 355
353 def process(prefix, obj):
356 def process(prefix: bytes, obj):
354 357 if isinstance(obj, bool):
355 358 obj = {True: b'true', False: b'false'}[obj] # Python -> PHP form
356 359 lister = lambda l: [(b'%d' % k, v) for k, v in enumerate(l)]
360 # .items() will only be called for a dict type
361 # pytype: disable=attribute-error
357 362 items = {list: lister, dict: lambda x: x.items()}.get(type(obj))
363 # pytype: enable=attribute-error
358 364 if items is None:
359 365 flatparams[prefix] = obj
360 366 else:
@@ -30,7 +30,6 from mercurial import (
30 30 commands,
31 31 copies,
32 32 destutil,
33 dirstateguard,
34 33 error,
35 34 extensions,
36 35 logcmdutil,
@@ -1271,15 +1270,9 def _origrebase(ui, repo, action, opts,
1271 1270 # one transaction here. Otherwise, transactions are obtained when
1272 1271 # committing each node, which is slower but allows partial success.
1273 1272 with util.acceptintervention(tr):
1274 # Same logic for the dirstate guard, except we don't create one when
1275 # rebasing in-memory (it's not needed).
1276 dsguard = None
1277 if singletr and not rbsrt.inmemory:
1278 dsguard = dirstateguard.dirstateguard(repo, b'rebase')
1279 with util.acceptintervention(dsguard):
1280 rbsrt._performrebase(tr)
1281 if not rbsrt.dryrun:
1282 rbsrt._finishrebase()
1273 rbsrt._performrebase(tr)
1274 if not rbsrt.dryrun:
1275 rbsrt._finishrebase()
1283 1276
1284 1277
1285 1278 def _definedestmap(ui, repo, inmemory, destf, srcf, basef, revf, destspace):
@@ -1500,10 +1493,10 def commitmemorynode(repo, wctx, editor,
1500 1493 def commitnode(repo, editor, extra, user, date, commitmsg):
1501 1494 """Commit the wd changes with parents p1 and p2.
1502 1495 Return node of committed revision."""
1503 dsguard = util.nullcontextmanager()
1496 tr = util.nullcontextmanager
1504 1497 if not repo.ui.configbool(b'rebase', b'singletransaction'):
1505 dsguard = dirstateguard.dirstateguard(repo, b'rebase')
1506 with dsguard:
1498 tr = lambda: repo.transaction(b'rebase')
1499 with tr():
1507 1500 # Commit might fail if unresolved files exist
1508 1501 newnode = repo.commit(
1509 1502 text=commitmsg, user=user, date=date, extra=extra, editor=editor
@@ -1520,12 +1513,14 def rebasenode(repo, rev, p1, p2, base,
1520 1513 p1ctx = repo[p1]
1521 1514 if wctx.isinmemory():
1522 1515 wctx.setbase(p1ctx)
1516 scope = util.nullcontextmanager
1523 1517 else:
1524 1518 if repo[b'.'].rev() != p1:
1525 1519 repo.ui.debug(b" update to %d:%s\n" % (p1, p1ctx))
1526 1520 mergemod.clean_update(p1ctx)
1527 1521 else:
1528 1522 repo.ui.debug(b" already in destination\n")
1523 scope = lambda: repo.dirstate.changing_parents(repo)
1529 1524 # This is, alas, necessary to invalidate workingctx's manifest cache,
1530 1525 # as well as other data we litter on it in other places.
1531 1526 wctx = repo[None]
@@ -1535,26 +1530,27 def rebasenode(repo, rev, p1, p2, base,
1535 1530 if base is not None:
1536 1531 repo.ui.debug(b" detach base %d:%s\n" % (base, repo[base]))
1537 1532
1538 # See explanation in merge.graft()
1539 mergeancestor = repo.changelog.isancestor(p1ctx.node(), ctx.node())
1540 stats = mergemod._update(
1541 repo,
1542 rev,
1543 branchmerge=True,
1544 force=True,
1545 ancestor=base,
1546 mergeancestor=mergeancestor,
1547 labels=[b'dest', b'source', b'parent of source'],
1548 wc=wctx,
1549 )
1550 wctx.setparents(p1ctx.node(), repo[p2].node())
1551 if collapse:
1552 copies.graftcopies(wctx, ctx, p1ctx)
1553 else:
1554 # If we're not using --collapse, we need to
1555 # duplicate copies between the revision we're
1556 # rebasing and its first parent.
1557 copies.graftcopies(wctx, ctx, ctx.p1())
1533 with scope():
1534 # See explanation in merge.graft()
1535 mergeancestor = repo.changelog.isancestor(p1ctx.node(), ctx.node())
1536 stats = mergemod._update(
1537 repo,
1538 rev,
1539 branchmerge=True,
1540 force=True,
1541 ancestor=base,
1542 mergeancestor=mergeancestor,
1543 labels=[b'dest', b'source', b'parent of source'],
1544 wc=wctx,
1545 )
1546 wctx.setparents(p1ctx.node(), repo[p2].node())
1547 if collapse:
1548 copies.graftcopies(wctx, ctx, p1ctx)
1549 else:
1550 # If we're not using --collapse, we need to
1551 # duplicate copies between the revision we're
1552 # rebasing and its first parent.
1553 copies.graftcopies(wctx, ctx, ctx.p1())
1558 1554
1559 1555 if stats.unresolvedcount > 0:
1560 1556 if wctx.isinmemory():
@@ -39,7 +39,7 command = registrar.command(cmdtable)
39 39 try:
40 40 # Silence a warning about python-Levenshtein.
41 41 #
42 # We don't need the the performance that much and it get anoying in tests.
42 # We don't need the performance that much and it gets annoying in tests.
43 43 import warnings
44 44
45 45 with warnings.catch_warnings():
@@ -50,7 +50,7 try:
50 50 module="fuzzywuzzy.fuzz",
51 51 )
52 52
53 import fuzzywuzzy.fuzz as fuzz
53 import fuzzywuzzy.fuzz as fuzz # pytype: disable=import-error
54 54
55 55 fuzz.token_set_ratio
56 56 except ImportError:
@@ -67,8 +67,8 def relink(ui, repo, origin=None, **opts
67 67
68 68 if origin is None and b'default-relink' in ui.paths:
69 69 origin = b'default-relink'
70 path, __ = urlutil.get_unique_pull_path(b'relink', repo, ui, origin)
71 src = hg.repository(repo.baseui, path)
70 path = urlutil.get_unique_pull_path_obj(b'relink', ui, origin)
71 src = hg.repository(repo.baseui, path.loc)
72 72 ui.status(_(b'relinking %s to %s\n') % (src.store.path, repo.store.path))
73 73 if repo.root == src.root:
74 74 ui.status(_(b'there is nothing to relink\n'))
@@ -299,6 +299,7 class remotefilelog:
299 299 deltaprevious=False,
300 300 deltamode=None,
301 301 sidedata_helpers=None,
302 debug_info=None,
302 303 ):
303 304 # we don't use any of these parameters here
304 305 del nodesorder, revisiondata, assumehaveparentrevisions, deltaprevious
@@ -247,7 +247,7 def parsesizeflags(raw):
247 247 index = raw.index(b'\0')
248 248 except ValueError:
249 249 raise BadRemotefilelogHeader(
250 "unexpected remotefilelog header: illegal format"
250 b"unexpected remotefilelog header: illegal format"
251 251 )
252 252 header = raw[:index]
253 253 if header.startswith(b'v'):
@@ -267,7 +267,7 def parsesizeflags(raw):
267 267 size = int(header)
268 268 if size is None:
269 269 raise BadRemotefilelogHeader(
270 "unexpected remotefilelog header: no size found"
270 b"unexpected remotefilelog header: no size found"
271 271 )
272 272 return index + 1, size, flags
273 273
@@ -80,9 +80,25 class ShortRepository:
80 80 def __repr__(self):
81 81 return b'<ShortRepository: %s>' % self.scheme
82 82
83 def make_peer(self, ui, path, *args, **kwargs):
84 new_url = self.resolve(path.rawloc)
85 path = path.copy(new_raw_location=new_url)
86 cls = hg.peer_schemes.get(path.url.scheme)
87 if cls is not None:
88 return cls.make_peer(ui, path, *args, **kwargs)
89 return None
90
83 91 def instance(self, ui, url, create, intents=None, createopts=None):
84 92 url = self.resolve(url)
85 return hg._peerlookup(url).instance(
93 u = urlutil.url(url)
94 scheme = u.scheme or b'file'
95 if scheme in hg.peer_schemes:
96 cls = hg.peer_schemes[scheme]
97 elif scheme in hg.repo_schemes:
98 cls = hg.repo_schemes[scheme]
99 else:
100 cls = hg.LocalFactory
101 return cls.instance(
86 102 ui, url, create, intents=intents, createopts=createopts
87 103 )
88 104
@@ -119,24 +135,29 schemes = {
119 135 }
120 136
121 137
138 def _check_drive_letter(scheme: bytes) -> None:
139 """check if a scheme conflict with a Windows drive letter"""
140 if (
141 pycompat.iswindows
142 and len(scheme) == 1
143 and scheme.isalpha()
144 and os.path.exists(b'%s:\\' % scheme)
145 ):
146 msg = _(b'custom scheme %s:// conflicts with drive letter %s:\\\n')
147 msg %= (scheme, scheme.upper())
148 raise error.Abort(msg)
149
150
122 151 def extsetup(ui):
123 152 schemes.update(dict(ui.configitems(b'schemes')))
124 153 t = templater.engine(templater.parse)
125 154 for scheme, url in schemes.items():
126 if (
127 pycompat.iswindows
128 and len(scheme) == 1
129 and scheme.isalpha()
130 and os.path.exists(b'%s:\\' % scheme)
131 ):
132 raise error.Abort(
133 _(
134 b'custom scheme %s:// conflicts with drive '
135 b'letter %s:\\\n'
136 )
137 % (scheme, scheme.upper())
138 )
139 hg.schemes[scheme] = ShortRepository(url, scheme, t)
155 _check_drive_letter(scheme)
156 url_scheme = urlutil.url(url).scheme
157 if url_scheme in hg.peer_schemes:
158 hg.peer_schemes[scheme] = ShortRepository(url, scheme, t)
159 else:
160 hg.repo_schemes[scheme] = ShortRepository(url, scheme, t)
140 161
141 162 extensions.wrapfunction(urlutil, b'hasdriveletter', hasdriveletter)
142 163
@@ -144,7 +165,11 def extsetup(ui):
144 165 @command(b'debugexpandscheme', norepo=True)
145 166 def expandscheme(ui, url, **opts):
146 167 """given a repo path, provide the scheme-expanded path"""
147 repo = hg._peerlookup(url)
148 if isinstance(repo, ShortRepository):
149 url = repo.resolve(url)
168 scheme = urlutil.url(url).scheme
169 if scheme in hg.peer_schemes:
170 cls = hg.peer_schemes[scheme]
171 else:
172 cls = hg.repo_schemes.get(scheme)
173 if cls is not None and isinstance(cls, ShortRepository):
174 url = cls.resolve(url)
150 175 ui.write(url + b'\n')
@@ -134,7 +134,7 def dosplit(ui, repo, tr, ctx, opts):
134 134 # Set working parent to ctx.p1(), and keep working copy as ctx's content
135 135 if ctx.node() != repo.dirstate.p1():
136 136 hg.clean(repo, ctx.node(), show_stats=False)
137 with repo.dirstate.parentchange():
137 with repo.dirstate.changing_parents(repo):
138 138 scmutil.movedirstate(repo, ctx.p1())
139 139
140 140 # Any modified, added, removed, deleted result means split is incomplete
@@ -80,7 +80,7 from mercurial.utils import (
80 80 )
81 81
82 82 try:
83 from mercurial import zstd
83 from mercurial import zstd # pytype: disable=import-error
84 84
85 85 zstd.__version__
86 86 except ImportError:
@@ -608,6 +608,7 class sqlitefilestore:
608 608 assumehaveparentrevisions=False,
609 609 deltamode=repository.CG_DELTAMODE_STD,
610 610 sidedata_helpers=None,
611 debug_info=None,
611 612 ):
612 613 if nodesorder not in (b'nodes', b'storage', b'linear', None):
613 614 raise error.ProgrammingError(
@@ -817,8 +817,8 def _dotransplant(ui, repo, *revs, **opt
817 817
818 818 sourcerepo = opts.get(b'source')
819 819 if sourcerepo:
820 u = urlutil.get_unique_pull_path(b'transplant', repo, ui, sourcerepo)[0]
821 peer = hg.peer(repo, opts, u)
820 path = urlutil.get_unique_pull_path_obj(b'transplant', ui, sourcerepo)
821 peer = hg.peer(repo, opts, path)
822 822 heads = pycompat.maplist(peer.lookup, opts.get(b'branch', ()))
823 823 target = set(heads)
824 824 for r in revs:
@@ -236,7 +236,7 def uncommit(ui, repo, *pats, **opts):
236 236 # Fully removed the old commit
237 237 mapping[old.node()] = ()
238 238
239 with repo.dirstate.parentchange():
239 with repo.dirstate.changing_parents(repo):
240 240 scmutil.movedirstate(repo, repo[newid], match)
241 241
242 242 scmutil.cleanupnodes(repo, mapping, b'uncommit', fixphase=True)
@@ -317,7 +317,7 def unamend(ui, repo, **opts):
317 317 newpredctx = repo[newprednode]
318 318 dirstate = repo.dirstate
319 319
320 with dirstate.parentchange():
320 with dirstate.changing_parents(repo):
321 321 scmutil.movedirstate(repo, newpredctx)
322 322
323 323 mapping = {curctx.node(): (newprednode,)}
@@ -216,17 +216,23 def reposetup(ui, repo):
216 216 def wrap_revert(orig, repo, ctx, names, uipathfn, actions, *args, **kwargs):
217 217 # reset dirstate cache for file we touch
218 218 ds = repo.dirstate
219 with ds.parentchange():
220 for filename in actions[b'revert'][0]:
221 entry = ds.get_entry(filename)
222 if entry is not None:
223 if entry.p1_tracked:
224 ds.update_file(
225 filename,
226 entry.tracked,
227 p1_tracked=True,
228 p2_info=entry.p2_info,
229 )
219 for filename in actions[b'revert'][0]:
220 entry = ds.get_entry(filename)
221 if entry is not None:
222 if entry.p1_tracked:
223 # If we revert the file, it is possibly dirty. However,
224 # this extension meddle with the file content and therefore
225 # its size. As a result, we cannot simply call
226 # `dirstate.set_possibly_dirty` as it will not affet the
227 # expected size of the file.
228 #
229 # At least, now, the quirk is properly documented.
230 ds.hacky_extension_update_file(
231 filename,
232 entry.tracked,
233 p1_tracked=entry.p1_tracked,
234 p2_info=entry.p2_info,
235 )
230 236 return orig(repo, ctx, names, uipathfn, actions, *args, **kwargs)
231 237
232 238
@@ -154,9 +154,14 class tarit:
154 154 )
155 155 self.fileobj = gzfileobj
156 156 return (
157 # taropen() wants Literal['a', 'r', 'w', 'x'] for the mode,
158 # but Literal[] is only available in 3.8+ without the
159 # typing_extensions backport.
160 # pytype: disable=wrong-arg-types
157 161 tarfile.TarFile.taropen( # pytype: disable=attribute-error
158 162 name, pycompat.sysstr(mode), gzfileobj
159 163 )
164 # pytype: enable=wrong-arg-types
160 165 )
161 166 else:
162 167 try:
@@ -315,8 +315,17 class bundleoperation:
315 315 * a way to construct a bundle response when applicable.
316 316 """
317 317
318 def __init__(self, repo, transactiongetter, captureoutput=True, source=b''):
318 def __init__(
319 self,
320 repo,
321 transactiongetter,
322 captureoutput=True,
323 source=b'',
324 remote=None,
325 ):
319 326 self.repo = repo
327 # the peer object who produced this bundle if available
328 self.remote = remote
320 329 self.ui = repo.ui
321 330 self.records = unbundlerecords()
322 331 self.reply = None
@@ -363,7 +372,7 def _notransaction():
363 372 raise TransactionUnavailable()
364 373
365 374
366 def applybundle(repo, unbundler, tr, source, url=None, **kwargs):
375 def applybundle(repo, unbundler, tr, source, url=None, remote=None, **kwargs):
367 376 # transform me into unbundler.apply() as soon as the freeze is lifted
368 377 if isinstance(unbundler, unbundle20):
369 378 tr.hookargs[b'bundle2'] = b'1'
@@ -371,10 +380,12 def applybundle(repo, unbundler, tr, sou
371 380 tr.hookargs[b'source'] = source
372 381 if url is not None and b'url' not in tr.hookargs:
373 382 tr.hookargs[b'url'] = url
374 return processbundle(repo, unbundler, lambda: tr, source=source)
383 return processbundle(
384 repo, unbundler, lambda: tr, source=source, remote=remote
385 )
375 386 else:
376 387 # the transactiongetter won't be used, but we might as well set it
377 op = bundleoperation(repo, lambda: tr, source=source)
388 op = bundleoperation(repo, lambda: tr, source=source, remote=remote)
378 389 _processchangegroup(op, unbundler, tr, source, url, **kwargs)
379 390 return op
380 391
@@ -450,7 +461,14 class partiterator:
450 461 )
451 462
452 463
453 def processbundle(repo, unbundler, transactiongetter=None, op=None, source=b''):
464 def processbundle(
465 repo,
466 unbundler,
467 transactiongetter=None,
468 op=None,
469 source=b'',
470 remote=None,
471 ):
454 472 """This function process a bundle, apply effect to/from a repo
455 473
456 474 It iterates over each part then searches for and uses the proper handling
@@ -466,7 +484,12 def processbundle(repo, unbundler, trans
466 484 if op is None:
467 485 if transactiongetter is None:
468 486 transactiongetter = _notransaction
469 op = bundleoperation(repo, transactiongetter, source=source)
487 op = bundleoperation(
488 repo,
489 transactiongetter,
490 source=source,
491 remote=remote,
492 )
470 493 # todo:
471 494 # - replace this is a init function soon.
472 495 # - exception catching
@@ -494,6 +517,10 def processparts(repo, op, unbundler):
494 517
495 518
496 519 def _processchangegroup(op, cg, tr, source, url, **kwargs):
520 if op.remote is not None and op.remote.path is not None:
521 remote_path = op.remote.path
522 kwargs = kwargs.copy()
523 kwargs['delta_base_reuse_policy'] = remote_path.delta_reuse_policy
497 524 ret = cg.apply(op.repo, tr, source, url, **kwargs)
498 525 op.records.add(
499 526 b'changegroup',
@@ -1938,7 +1965,12 def writebundle(
1938 1965 raise error.Abort(
1939 1966 _(b'old bundle types only supports v1 changegroups')
1940 1967 )
1968
1969 # HG20 is the case without 2 values to unpack, but is handled above.
1970 # pytype: disable=bad-unpacking
1941 1971 header, comp = bundletypes[bundletype]
1972 # pytype: enable=bad-unpacking
1973
1942 1974 if comp not in util.compengines.supportedbundletypes:
1943 1975 raise error.Abort(_(b'unknown stream compression type: %s') % comp)
1944 1976 compengine = util.compengines.forbundletype(comp)
@@ -5,6 +5,10
5 5
6 6 import collections
7 7
8 from typing import (
9 cast,
10 )
11
8 12 from .i18n import _
9 13
10 14 from .thirdparty import attr
@@ -247,7 +251,7 def parsebundlespec(repo, spec, strict=T
247 251 # required to apply it. If we see this metadata, compare against what the
248 252 # repo supports and error if the bundle isn't compatible.
249 253 if version == b'packed1' and b'requirements' in params:
250 requirements = set(params[b'requirements'].split(b','))
254 requirements = set(cast(bytes, params[b'requirements']).split(b','))
251 255 missingreqs = requirements - requirementsmod.STREAM_FIXED_REQUIREMENTS
252 256 if missingreqs:
253 257 raise error.UnsupportedBundleSpecification(
@@ -88,7 +88,7 class bundlerevlog(revlog.revlog):
88 88 )
89 89
90 90 if not self.index.has_node(deltabase):
91 raise LookupError(
91 raise error.LookupError(
92 92 deltabase, self.display_id, _(b'unknown delta base')
93 93 )
94 94
@@ -458,8 +458,8 class bundlerepository:
458 458 def cancopy(self):
459 459 return False
460 460
461 def peer(self):
462 return bundlepeer(self)
461 def peer(self, path=None):
462 return bundlepeer(self, path=path)
463 463
464 464 def getcwd(self):
465 465 return encoding.getcwd() # always outside the repo
@@ -5,7 +5,7 from typing import (
5 5
6 6 version: int
7 7
8 def bdiff(a: bytes, b: bytes): bytes
8 def bdiff(a: bytes, b: bytes) -> bytes: ...
9 9 def blocks(a: bytes, b: bytes) -> List[Tuple[int, int, int, int]]: ...
10 10 def fixws(s: bytes, allws: bool) -> bytes: ...
11 11 def splitnewlines(text: bytes) -> List[bytes]: ...
@@ -2,6 +2,7 from typing import (
2 2 AnyStr,
3 3 IO,
4 4 List,
5 Optional,
5 6 Sequence,
6 7 )
7 8
@@ -15,7 +16,7 class stat:
15 16 st_mtime: int
16 17 st_ctime: int
17 18
18 def listdir(path: bytes, st: bool, skip: bool) -> List[stat]: ...
19 def listdir(path: bytes, st: bool, skip: Optional[bool]) -> List[stat]: ...
19 20 def posixfile(name: AnyStr, mode: bytes, buffering: int) -> IO: ...
20 21 def statfiles(names: Sequence[bytes]) -> List[stat]: ...
21 22 def setprocname(name: bytes) -> None: ...
@@ -177,7 +177,7 static inline bool dirstate_item_c_remov
177 177 (dirstate_flag_p1_tracked | dirstate_flag_p2_info));
178 178 }
179 179
180 static inline bool dirstate_item_c_merged(dirstateItemObject *self)
180 static inline bool dirstate_item_c_modified(dirstateItemObject *self)
181 181 {
182 182 return ((self->flags & dirstate_flag_wc_tracked) &&
183 183 (self->flags & dirstate_flag_p1_tracked) &&
@@ -195,7 +195,7 static inline char dirstate_item_c_v1_st
195 195 {
196 196 if (dirstate_item_c_removed(self)) {
197 197 return 'r';
198 } else if (dirstate_item_c_merged(self)) {
198 } else if (dirstate_item_c_modified(self)) {
199 199 return 'm';
200 200 } else if (dirstate_item_c_added(self)) {
201 201 return 'a';
@@ -642,9 +642,9 static PyObject *dirstate_item_get_p2_in
642 642 }
643 643 };
644 644
645 static PyObject *dirstate_item_get_merged(dirstateItemObject *self)
645 static PyObject *dirstate_item_get_modified(dirstateItemObject *self)
646 646 {
647 if (dirstate_item_c_merged(self)) {
647 if (dirstate_item_c_modified(self)) {
648 648 Py_RETURN_TRUE;
649 649 } else {
650 650 Py_RETURN_FALSE;
@@ -709,7 +709,7 static PyGetSetDef dirstate_item_getset[
709 709 NULL},
710 710 {"added", (getter)dirstate_item_get_added, NULL, "added", NULL},
711 711 {"p2_info", (getter)dirstate_item_get_p2_info, NULL, "p2_info", NULL},
712 {"merged", (getter)dirstate_item_get_merged, NULL, "merged", NULL},
712 {"modified", (getter)dirstate_item_get_modified, NULL, "modified", NULL},
713 713 {"from_p2", (getter)dirstate_item_get_from_p2, NULL, "from_p2", NULL},
714 714 {"maybe_clean", (getter)dirstate_item_get_maybe_clean, NULL, "maybe_clean",
715 715 NULL},
@@ -1187,7 +1187,7 void dirs_module_init(PyObject *mod);
1187 1187 void manifest_module_init(PyObject *mod);
1188 1188 void revlog_module_init(PyObject *mod);
1189 1189
1190 static const int version = 20;
1190 static const int version = 21;
1191 1191
1192 1192 static void module_init(PyObject *mod)
1193 1193 {
@@ -76,3 +76,7 class nodetree:
76 76
77 77 def insert(self, rev: int) -> None: ...
78 78 def shortest(self, node: bytes) -> int: ...
79
80 # The IndexObject type here is defined in C, and there's no type for a buffer
81 # return, as of py3.11. https://github.com/python/typing/issues/593
82 def parse_index2(data: object, inline: object, format: int = ...) -> Tuple[object, Optional[Tuple[int, object]]]: ...
@@ -1446,16 +1446,25 static PyObject *index_issnapshot(indexO
1446 1446 static PyObject *index_findsnapshots(indexObject *self, PyObject *args)
1447 1447 {
1448 1448 Py_ssize_t start_rev;
1449 Py_ssize_t end_rev;
1449 1450 PyObject *cache;
1450 1451 Py_ssize_t base;
1451 1452 Py_ssize_t rev;
1452 1453 PyObject *key = NULL;
1453 1454 PyObject *value = NULL;
1454 1455 const Py_ssize_t length = index_length(self);
1455 if (!PyArg_ParseTuple(args, "O!n", &PyDict_Type, &cache, &start_rev)) {
1456 if (!PyArg_ParseTuple(args, "O!nn", &PyDict_Type, &cache, &start_rev,
1457 &end_rev)) {
1456 1458 return NULL;
1457 1459 }
1458 for (rev = start_rev; rev < length; rev++) {
1460 end_rev += 1;
1461 if (end_rev > length) {
1462 end_rev = length;
1463 }
1464 if (start_rev < 0) {
1465 start_rev = 0;
1466 }
1467 for (rev = start_rev; rev < end_rev; rev++) {
1459 1468 int issnap;
1460 1469 PyObject *allvalues = NULL;
1461 1470 issnap = index_issnapshotrev(self, rev);
@@ -1480,7 +1489,7 static PyObject *index_findsnapshots(ind
1480 1489 }
1481 1490 if (allvalues == NULL) {
1482 1491 int r;
1483 allvalues = PyList_New(0);
1492 allvalues = PySet_New(0);
1484 1493 if (!allvalues) {
1485 1494 goto bail;
1486 1495 }
@@ -1491,7 +1500,7 static PyObject *index_findsnapshots(ind
1491 1500 }
1492 1501 }
1493 1502 value = PyLong_FromSsize_t(rev);
1494 if (PyList_Append(allvalues, value)) {
1503 if (PySet_Add(allvalues, value)) {
1495 1504 goto bail;
1496 1505 }
1497 1506 Py_CLEAR(key);
@@ -8,6 +8,11
8 8
9 9 import struct
10 10
11 from typing import (
12 List,
13 Tuple,
14 )
15
11 16 from ..pure.bdiff import *
12 17 from . import _bdiff # pytype: disable=import-error
13 18
@@ -15,7 +20,7 ffi = _bdiff.ffi
15 20 lib = _bdiff.lib
16 21
17 22
18 def blocks(sa, sb):
23 def blocks(sa: bytes, sb: bytes) -> List[Tuple[int, int, int, int]]:
19 24 a = ffi.new(b"struct bdiff_line**")
20 25 b = ffi.new(b"struct bdiff_line**")
21 26 ac = ffi.new(b"char[]", str(sa))
@@ -29,7 +34,7 def blocks(sa, sb):
29 34 count = lib.bdiff_diff(a[0], an, b[0], bn, l)
30 35 if count < 0:
31 36 raise MemoryError
32 rl = [None] * count
37 rl = [(0, 0, 0, 0)] * count
33 38 h = l.next
34 39 i = 0
35 40 while h:
@@ -43,7 +48,7 def blocks(sa, sb):
43 48 return rl
44 49
45 50
46 def bdiff(sa, sb):
51 def bdiff(sa: bytes, sb: bytes) -> bytes:
47 52 a = ffi.new(b"struct bdiff_line**")
48 53 b = ffi.new(b"struct bdiff_line**")
49 54 ac = ffi.new(b"char[]", str(sa))
@@ -6,6 +6,8
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8
9 from typing import List
10
9 11 from ..pure.mpatch import *
10 12 from ..pure.mpatch import mpatchError # silence pyflakes
11 13 from . import _mpatch # pytype: disable=import-error
@@ -26,7 +28,7 def cffi_get_next_item(arg, pos):
26 28 return container[0]
27 29
28 30
29 def patches(text, bins):
31 def patches(text: bytes, bins: List[bytes]) -> bytes:
30 32 lgt = len(bins)
31 33 all = []
32 34 if not lgt:
@@ -105,6 +105,164 def writechunks(ui, chunks, filename, vf
105 105 os.unlink(cleanup)
106 106
107 107
108 def _dbg_ubdl_line(
109 ui,
110 indent,
111 key,
112 base_value=None,
113 percentage_base=None,
114 percentage_key=None,
115 ):
116 """Print one line of debug_unbundle_debug_info"""
117 line = b"DEBUG-UNBUNDLING: "
118 line += b' ' * (2 * indent)
119 key += b":"
120 padding = b''
121 if base_value is not None:
122 assert len(key) + 1 + (2 * indent) <= _KEY_PART_WIDTH
123 line += key.ljust(_KEY_PART_WIDTH - (2 * indent))
124 if isinstance(base_value, float):
125 line += b"%14.3f seconds" % base_value
126 else:
127 line += b"%10d" % base_value
128 padding = b' '
129 else:
130 line += key
131
132 if percentage_base is not None:
133 line += padding
134 padding = b''
135 assert base_value is not None
136 percentage = base_value * 100 // percentage_base
137 if percentage_key is not None:
138 line += b" (%3d%% of %s)" % (
139 percentage,
140 percentage_key,
141 )
142 else:
143 line += b" (%3d%%)" % percentage
144
145 line += b'\n'
146 ui.write_err(line)
147
148
149 def _sumf(items):
150 # python < 3.8 does not support a `start=0.0` argument to sum
151 # So we have to cheat a bit until we drop support for those version
152 if not items:
153 return 0.0
154 return sum(items)
155
156
157 def display_unbundle_debug_info(ui, debug_info):
158 """display an unbundling report from debug information"""
159 cl_info = []
160 mn_info = []
161 fl_info = []
162 _dispatch = [
163 (b'CHANGELOG:', cl_info),
164 (b'MANIFESTLOG:', mn_info),
165 (b'FILELOG:', fl_info),
166 ]
167 for e in debug_info:
168 for prefix, info in _dispatch:
169 if e["target-revlog"].startswith(prefix):
170 info.append(e)
171 break
172 else:
173 assert False, 'unreachable'
174 each_info = [
175 (b'changelog', cl_info),
176 (b'manifests', mn_info),
177 (b'files', fl_info),
178 ]
179
180 # General Revision Countss
181 _dbg_ubdl_line(ui, 0, b'revisions', len(debug_info))
182 for key, info in each_info:
183 if not info:
184 continue
185 _dbg_ubdl_line(ui, 1, key, len(info), len(debug_info))
186
187 # General Time spent
188 all_durations = [e['duration'] for e in debug_info]
189 all_durations.sort()
190 total_duration = _sumf(all_durations)
191 _dbg_ubdl_line(ui, 0, b'total-time', total_duration)
192
193 for key, info in each_info:
194 if not info:
195 continue
196 durations = [e['duration'] for e in info]
197 durations.sort()
198 _dbg_ubdl_line(ui, 1, key, _sumf(durations), total_duration)
199
200 # Count and cache reuse per delta types
201 each_types = {}
202 for key, info in each_info:
203 each_types[key] = types = {
204 b'full': 0,
205 b'full-cached': 0,
206 b'snapshot': 0,
207 b'snapshot-cached': 0,
208 b'delta': 0,
209 b'delta-cached': 0,
210 b'unknown': 0,
211 b'unknown-cached': 0,
212 }
213 for e in info:
214 types[e['type']] += 1
215 if e['using-cached-base']:
216 types[e['type'] + b'-cached'] += 1
217
218 EXPECTED_TYPES = (b'full', b'snapshot', b'delta', b'unknown')
219 if debug_info:
220 _dbg_ubdl_line(ui, 0, b'type-count')
221 for key, info in each_info:
222 if info:
223 _dbg_ubdl_line(ui, 1, key)
224 t = each_types[key]
225 for tn in EXPECTED_TYPES:
226 if t[tn]:
227 tc = tn + b'-cached'
228 _dbg_ubdl_line(ui, 2, tn, t[tn])
229 _dbg_ubdl_line(ui, 3, b'cached', t[tc], t[tn])
230
231 # time perf delta types and reuse
232 each_type_time = {}
233 for key, info in each_info:
234 each_type_time[key] = t = {
235 b'full': [],
236 b'full-cached': [],
237 b'snapshot': [],
238 b'snapshot-cached': [],
239 b'delta': [],
240 b'delta-cached': [],
241 b'unknown': [],
242 b'unknown-cached': [],
243 }
244 for e in info:
245 t[e['type']].append(e['duration'])
246 if e['using-cached-base']:
247 t[e['type'] + b'-cached'].append(e['duration'])
248 for t_key, value in list(t.items()):
249 value.sort()
250 t[t_key] = _sumf(value)
251
252 if debug_info:
253 _dbg_ubdl_line(ui, 0, b'type-time')
254 for key, info in each_info:
255 if info:
256 _dbg_ubdl_line(ui, 1, key)
257 t = each_type_time[key]
258 td = total_duration # to same space on next lines
259 for tn in EXPECTED_TYPES:
260 if t[tn]:
261 tc = tn + b'-cached'
262 _dbg_ubdl_line(ui, 2, tn, t[tn], td, b"total")
263 _dbg_ubdl_line(ui, 3, b'cached', t[tc], td, b"total")
264
265
108 266 class cg1unpacker:
109 267 """Unpacker for cg1 changegroup streams.
110 268
@@ -254,7 +412,16 class cg1unpacker:
254 412 pos = next
255 413 yield closechunk()
256 414
257 def _unpackmanifests(self, repo, revmap, trp, prog, addrevisioncb=None):
415 def _unpackmanifests(
416 self,
417 repo,
418 revmap,
419 trp,
420 prog,
421 addrevisioncb=None,
422 debug_info=None,
423 delta_base_reuse_policy=None,
424 ):
258 425 self.callback = prog.increment
259 426 # no need to check for empty manifest group here:
260 427 # if the result of the merge of 1 and 2 is the same in 3 and 4,
@@ -263,7 +430,14 class cg1unpacker:
263 430 self.manifestheader()
264 431 deltas = self.deltaiter()
265 432 storage = repo.manifestlog.getstorage(b'')
266 storage.addgroup(deltas, revmap, trp, addrevisioncb=addrevisioncb)
433 storage.addgroup(
434 deltas,
435 revmap,
436 trp,
437 addrevisioncb=addrevisioncb,
438 debug_info=debug_info,
439 delta_base_reuse_policy=delta_base_reuse_policy,
440 )
267 441 prog.complete()
268 442 self.callback = None
269 443
@@ -276,6 +450,7 class cg1unpacker:
276 450 targetphase=phases.draft,
277 451 expectedtotal=None,
278 452 sidedata_categories=None,
453 delta_base_reuse_policy=None,
279 454 ):
280 455 """Add the changegroup returned by source.read() to this repo.
281 456 srctype is a string like 'push', 'pull', or 'unbundle'. url is
@@ -289,9 +464,19 class cg1unpacker:
289 464
290 465 `sidedata_categories` is an optional set of the remote's sidedata wanted
291 466 categories.
467
468 `delta_base_reuse_policy` is an optional argument, when set to a value
469 it will control the way the delta contained into the bundle are reused
470 when applied in the revlog.
471
472 See `DELTA_BASE_REUSE_*` entry in mercurial.revlogutils.constants.
292 473 """
293 474 repo = repo.unfiltered()
294 475
476 debug_info = None
477 if repo.ui.configbool(b'debug', b'unbundling-stats'):
478 debug_info = []
479
295 480 # Only useful if we're adding sidedata categories. If both peers have
296 481 # the same categories, then we simply don't do anything.
297 482 adding_sidedata = (
@@ -366,6 +551,8 class cg1unpacker:
366 551 alwayscache=True,
367 552 addrevisioncb=onchangelog,
368 553 duplicaterevisioncb=ondupchangelog,
554 debug_info=debug_info,
555 delta_base_reuse_policy=delta_base_reuse_policy,
369 556 ):
370 557 repo.ui.develwarn(
371 558 b'applied empty changelog from changegroup',
@@ -413,6 +600,8 class cg1unpacker:
413 600 trp,
414 601 progress,
415 602 addrevisioncb=on_manifest_rev,
603 debug_info=debug_info,
604 delta_base_reuse_policy=delta_base_reuse_policy,
416 605 )
417 606
418 607 needfiles = {}
@@ -449,6 +638,8 class cg1unpacker:
449 638 efiles,
450 639 needfiles,
451 640 addrevisioncb=on_filelog_rev,
641 debug_info=debug_info,
642 delta_base_reuse_policy=delta_base_reuse_policy,
452 643 )
453 644
454 645 if sidedata_helpers:
@@ -567,6 +758,8 class cg1unpacker:
567 758 b'changegroup-runhooks-%020i' % clstart,
568 759 lambda tr: repo._afterlock(runhooks),
569 760 )
761 if debug_info is not None:
762 display_unbundle_debug_info(repo.ui, debug_info)
570 763 finally:
571 764 repo.ui.flush()
572 765 # never return 0 here:
@@ -626,9 +819,24 class cg3unpacker(cg2unpacker):
626 819 protocol_flags = 0
627 820 return node, p1, p2, deltabase, cs, flags, protocol_flags
628 821
629 def _unpackmanifests(self, repo, revmap, trp, prog, addrevisioncb=None):
822 def _unpackmanifests(
823 self,
824 repo,
825 revmap,
826 trp,
827 prog,
828 addrevisioncb=None,
829 debug_info=None,
830 delta_base_reuse_policy=None,
831 ):
630 832 super(cg3unpacker, self)._unpackmanifests(
631 repo, revmap, trp, prog, addrevisioncb=addrevisioncb
833 repo,
834 revmap,
835 trp,
836 prog,
837 addrevisioncb=addrevisioncb,
838 debug_info=debug_info,
839 delta_base_reuse_policy=delta_base_reuse_policy,
632 840 )
633 841 for chunkdata in iter(self.filelogheader, {}):
634 842 # If we get here, there are directory manifests in the changegroup
@@ -636,7 +844,12 class cg3unpacker(cg2unpacker):
636 844 repo.ui.debug(b"adding %s revisions\n" % d)
637 845 deltas = self.deltaiter()
638 846 if not repo.manifestlog.getstorage(d).addgroup(
639 deltas, revmap, trp, addrevisioncb=addrevisioncb
847 deltas,
848 revmap,
849 trp,
850 addrevisioncb=addrevisioncb,
851 debug_info=debug_info,
852 delta_base_reuse_policy=delta_base_reuse_policy,
640 853 ):
641 854 raise error.Abort(_(b"received dir revlog group is empty"))
642 855
@@ -869,6 +1082,7 def deltagroup(
869 1082 fullclnodes=None,
870 1083 precomputedellipsis=None,
871 1084 sidedata_helpers=None,
1085 debug_info=None,
872 1086 ):
873 1087 """Calculate deltas for a set of revisions.
874 1088
@@ -978,6 +1192,7 def deltagroup(
978 1192 assumehaveparentrevisions=not ellipses,
979 1193 deltamode=deltamode,
980 1194 sidedata_helpers=sidedata_helpers,
1195 debug_info=debug_info,
981 1196 )
982 1197
983 1198 for i, revision in enumerate(revisions):
@@ -1003,6 +1218,187 def deltagroup(
1003 1218 progress.complete()
1004 1219
1005 1220
1221 def make_debug_info():
1222 """ "build a "new" debug_info dictionnary
1223
1224 That dictionnary can be used to gather information about the bundle process
1225 """
1226 return {
1227 'revision-total': 0,
1228 'revision-changelog': 0,
1229 'revision-manifest': 0,
1230 'revision-files': 0,
1231 'file-count': 0,
1232 'merge-total': 0,
1233 'available-delta': 0,
1234 'available-full': 0,
1235 'delta-against-prev': 0,
1236 'delta-full': 0,
1237 'delta-against-p1': 0,
1238 'denied-delta-candeltafn': 0,
1239 'denied-base-not-available': 0,
1240 'reused-storage-delta': 0,
1241 'computed-delta': 0,
1242 }
1243
1244
1245 def merge_debug_info(base, other):
1246 """merge the debug information from <other> into <base>
1247
1248 This function can be used to gather lower level information into higher level ones.
1249 """
1250 for key in (
1251 'revision-total',
1252 'revision-changelog',
1253 'revision-manifest',
1254 'revision-files',
1255 'merge-total',
1256 'available-delta',
1257 'available-full',
1258 'delta-against-prev',
1259 'delta-full',
1260 'delta-against-p1',
1261 'denied-delta-candeltafn',
1262 'denied-base-not-available',
1263 'reused-storage-delta',
1264 'computed-delta',
1265 ):
1266 base[key] += other[key]
1267
1268
1269 _KEY_PART_WIDTH = 17
1270
1271
1272 def _dbg_bdl_line(
1273 ui,
1274 indent,
1275 key,
1276 base_value=None,
1277 percentage_base=None,
1278 percentage_key=None,
1279 percentage_ref=None,
1280 extra=None,
1281 ):
1282 """Print one line of debug_bundle_debug_info"""
1283 line = b"DEBUG-BUNDLING: "
1284 line += b' ' * (2 * indent)
1285 key += b":"
1286 if base_value is not None:
1287 assert len(key) + 1 + (2 * indent) <= _KEY_PART_WIDTH
1288 line += key.ljust(_KEY_PART_WIDTH - (2 * indent))
1289 line += b"%10d" % base_value
1290 else:
1291 line += key
1292
1293 if percentage_base is not None:
1294 assert base_value is not None
1295 percentage = base_value * 100 // percentage_base
1296 if percentage_key is not None:
1297 line += b" (%d%% of %s %d)" % (
1298 percentage,
1299 percentage_key,
1300 percentage_ref,
1301 )
1302 else:
1303 line += b" (%d%%)" % percentage
1304
1305 if extra:
1306 line += b" "
1307 line += extra
1308
1309 line += b'\n'
1310 ui.write_err(line)
1311
1312
1313 def display_bundling_debug_info(
1314 ui,
1315 debug_info,
1316 cl_debug_info,
1317 mn_debug_info,
1318 fl_debug_info,
1319 ):
1320 """display debug information gathered during a bundling through `ui`"""
1321 d = debug_info
1322 c = cl_debug_info
1323 m = mn_debug_info
1324 f = fl_debug_info
1325 all_info = [
1326 (b"changelog", b"cl", c),
1327 (b"manifests", b"mn", m),
1328 (b"files", b"fl", f),
1329 ]
1330 _dbg_bdl_line(ui, 0, b'revisions', d['revision-total'])
1331 _dbg_bdl_line(ui, 1, b'changelog', d['revision-changelog'])
1332 _dbg_bdl_line(ui, 1, b'manifest', d['revision-manifest'])
1333 extra = b'(for %d revlogs)' % d['file-count']
1334 _dbg_bdl_line(ui, 1, b'files', d['revision-files'], extra=extra)
1335 if d['merge-total']:
1336 _dbg_bdl_line(ui, 1, b'merge', d['merge-total'], d['revision-total'])
1337 for k, __, v in all_info:
1338 if v['merge-total']:
1339 _dbg_bdl_line(ui, 2, k, v['merge-total'], v['revision-total'])
1340
1341 _dbg_bdl_line(ui, 0, b'deltas')
1342 _dbg_bdl_line(
1343 ui,
1344 1,
1345 b'from-storage',
1346 d['reused-storage-delta'],
1347 percentage_base=d['available-delta'],
1348 percentage_key=b"available",
1349 percentage_ref=d['available-delta'],
1350 )
1351
1352 if d['denied-delta-candeltafn']:
1353 _dbg_bdl_line(ui, 2, b'denied-fn', d['denied-delta-candeltafn'])
1354 for __, k, v in all_info:
1355 if v['denied-delta-candeltafn']:
1356 _dbg_bdl_line(ui, 3, k, v['denied-delta-candeltafn'])
1357
1358 if d['denied-base-not-available']:
1359 _dbg_bdl_line(ui, 2, b'denied-nb', d['denied-base-not-available'])
1360 for k, __, v in all_info:
1361 if v['denied-base-not-available']:
1362 _dbg_bdl_line(ui, 3, k, v['denied-base-not-available'])
1363
1364 if d['computed-delta']:
1365 _dbg_bdl_line(ui, 1, b'computed', d['computed-delta'])
1366
1367 if d['available-full']:
1368 _dbg_bdl_line(
1369 ui,
1370 2,
1371 b'full',
1372 d['delta-full'],
1373 percentage_base=d['available-full'],
1374 percentage_key=b"native",
1375 percentage_ref=d['available-full'],
1376 )
1377 for k, __, v in all_info:
1378 if v['available-full']:
1379 _dbg_bdl_line(
1380 ui,
1381 3,
1382 k,
1383 v['delta-full'],
1384 percentage_base=v['available-full'],
1385 percentage_key=b"native",
1386 percentage_ref=v['available-full'],
1387 )
1388
1389 if d['delta-against-prev']:
1390 _dbg_bdl_line(ui, 2, b'previous', d['delta-against-prev'])
1391 for k, __, v in all_info:
1392 if v['delta-against-prev']:
1393 _dbg_bdl_line(ui, 3, k, v['delta-against-prev'])
1394
1395 if d['delta-against-p1']:
1396 _dbg_bdl_line(ui, 2, b'parent-1', d['delta-against-prev'])
1397 for k, __, v in all_info:
1398 if v['delta-against-p1']:
1399 _dbg_bdl_line(ui, 3, k, v['delta-against-p1'])
1400
1401
1006 1402 class cgpacker:
1007 1403 def __init__(
1008 1404 self,
@@ -1086,13 +1482,21 class cgpacker:
1086 1482 self._verbosenote = lambda s: None
1087 1483
1088 1484 def generate(
1089 self, commonrevs, clnodes, fastpathlinkrev, source, changelog=True
1485 self,
1486 commonrevs,
1487 clnodes,
1488 fastpathlinkrev,
1489 source,
1490 changelog=True,
1090 1491 ):
1091 1492 """Yield a sequence of changegroup byte chunks.
1092 1493 If changelog is False, changelog data won't be added to changegroup
1093 1494 """
1094 1495
1496 debug_info = None
1095 1497 repo = self._repo
1498 if repo.ui.configbool(b'debug', b'bundling-stats'):
1499 debug_info = make_debug_info()
1096 1500 cl = repo.changelog
1097 1501
1098 1502 self._verbosenote(_(b'uncompressed size of bundle content:\n'))
@@ -1107,14 +1511,19 class cgpacker:
1107 1511 # correctly advertise its sidedata categories directly.
1108 1512 remote_sidedata = repo._wanted_sidedata
1109 1513 sidedata_helpers = sidedatamod.get_sidedata_helpers(
1110 repo, remote_sidedata
1514 repo,
1515 remote_sidedata,
1111 1516 )
1112 1517
1518 cl_debug_info = None
1519 if debug_info is not None:
1520 cl_debug_info = make_debug_info()
1113 1521 clstate, deltas = self._generatechangelog(
1114 1522 cl,
1115 1523 clnodes,
1116 1524 generate=changelog,
1117 1525 sidedata_helpers=sidedata_helpers,
1526 debug_info=cl_debug_info,
1118 1527 )
1119 1528 for delta in deltas:
1120 1529 for chunk in _revisiondeltatochunks(
@@ -1126,6 +1535,9 class cgpacker:
1126 1535 close = closechunk()
1127 1536 size += len(close)
1128 1537 yield closechunk()
1538 if debug_info is not None:
1539 merge_debug_info(debug_info, cl_debug_info)
1540 debug_info['revision-changelog'] = cl_debug_info['revision-total']
1129 1541
1130 1542 self._verbosenote(_(b'%8.i (changelog)\n') % size)
1131 1543
@@ -1133,6 +1545,9 class cgpacker:
1133 1545 manifests = clstate[b'manifests']
1134 1546 changedfiles = clstate[b'changedfiles']
1135 1547
1548 if debug_info is not None:
1549 debug_info['file-count'] = len(changedfiles)
1550
1136 1551 # We need to make sure that the linkrev in the changegroup refers to
1137 1552 # the first changeset that introduced the manifest or file revision.
1138 1553 # The fastpath is usually safer than the slowpath, because the filelogs
@@ -1156,6 +1571,9 class cgpacker:
1156 1571 fnodes = {} # needed file nodes
1157 1572
1158 1573 size = 0
1574 mn_debug_info = None
1575 if debug_info is not None:
1576 mn_debug_info = make_debug_info()
1159 1577 it = self.generatemanifests(
1160 1578 commonrevs,
1161 1579 clrevorder,
@@ -1165,6 +1583,7 class cgpacker:
1165 1583 source,
1166 1584 clstate[b'clrevtomanifestrev'],
1167 1585 sidedata_helpers=sidedata_helpers,
1586 debug_info=mn_debug_info,
1168 1587 )
1169 1588
1170 1589 for tree, deltas in it:
@@ -1185,6 +1604,9 class cgpacker:
1185 1604 close = closechunk()
1186 1605 size += len(close)
1187 1606 yield close
1607 if debug_info is not None:
1608 merge_debug_info(debug_info, mn_debug_info)
1609 debug_info['revision-manifest'] = mn_debug_info['revision-total']
1188 1610
1189 1611 self._verbosenote(_(b'%8.i (manifests)\n') % size)
1190 1612 yield self._manifestsend
@@ -1199,6 +1621,9 class cgpacker:
1199 1621 manifests.clear()
1200 1622 clrevs = {cl.rev(x) for x in clnodes}
1201 1623
1624 fl_debug_info = None
1625 if debug_info is not None:
1626 fl_debug_info = make_debug_info()
1202 1627 it = self.generatefiles(
1203 1628 changedfiles,
1204 1629 commonrevs,
@@ -1208,6 +1633,7 class cgpacker:
1208 1633 fnodes,
1209 1634 clrevs,
1210 1635 sidedata_helpers=sidedata_helpers,
1636 debug_info=fl_debug_info,
1211 1637 )
1212 1638
1213 1639 for path, deltas in it:
@@ -1230,12 +1656,29 class cgpacker:
1230 1656 self._verbosenote(_(b'%8.i %s\n') % (size, path))
1231 1657
1232 1658 yield closechunk()
1659 if debug_info is not None:
1660 merge_debug_info(debug_info, fl_debug_info)
1661 debug_info['revision-files'] = fl_debug_info['revision-total']
1662
1663 if debug_info is not None:
1664 display_bundling_debug_info(
1665 repo.ui,
1666 debug_info,
1667 cl_debug_info,
1668 mn_debug_info,
1669 fl_debug_info,
1670 )
1233 1671
1234 1672 if clnodes:
1235 1673 repo.hook(b'outgoing', node=hex(clnodes[0]), source=source)
1236 1674
1237 1675 def _generatechangelog(
1238 self, cl, nodes, generate=True, sidedata_helpers=None
1676 self,
1677 cl,
1678 nodes,
1679 generate=True,
1680 sidedata_helpers=None,
1681 debug_info=None,
1239 1682 ):
1240 1683 """Generate data for changelog chunks.
1241 1684
@@ -1332,6 +1775,7 class cgpacker:
1332 1775 fullclnodes=self._fullclnodes,
1333 1776 precomputedellipsis=self._precomputedellipsis,
1334 1777 sidedata_helpers=sidedata_helpers,
1778 debug_info=debug_info,
1335 1779 )
1336 1780
1337 1781 return state, gen
@@ -1346,6 +1790,7 class cgpacker:
1346 1790 source,
1347 1791 clrevtolocalrev,
1348 1792 sidedata_helpers=None,
1793 debug_info=None,
1349 1794 ):
1350 1795 """Returns an iterator of changegroup chunks containing manifests.
1351 1796
@@ -1444,6 +1889,7 class cgpacker:
1444 1889 fullclnodes=self._fullclnodes,
1445 1890 precomputedellipsis=self._precomputedellipsis,
1446 1891 sidedata_helpers=sidedata_helpers,
1892 debug_info=debug_info,
1447 1893 )
1448 1894
1449 1895 if not self._oldmatcher.visitdir(store.tree[:-1]):
@@ -1483,6 +1929,7 class cgpacker:
1483 1929 fnodes,
1484 1930 clrevs,
1485 1931 sidedata_helpers=None,
1932 debug_info=None,
1486 1933 ):
1487 1934 changedfiles = [
1488 1935 f
@@ -1578,6 +2025,7 class cgpacker:
1578 2025 fullclnodes=self._fullclnodes,
1579 2026 precomputedellipsis=self._precomputedellipsis,
1580 2027 sidedata_helpers=sidedata_helpers,
2028 debug_info=debug_info,
1581 2029 )
1582 2030
1583 2031 yield fname, deltas
@@ -1867,7 +2315,12 def _changegroupinfo(repo, nodes, source
1867 2315
1868 2316
1869 2317 def makechangegroup(
1870 repo, outgoing, version, source, fastpath=False, bundlecaps=None
2318 repo,
2319 outgoing,
2320 version,
2321 source,
2322 fastpath=False,
2323 bundlecaps=None,
1871 2324 ):
1872 2325 cgstream = makestream(
1873 2326 repo,
@@ -1917,7 +2370,12 def makestream(
1917 2370
1918 2371 repo.hook(b'preoutgoing', throw=True, source=source)
1919 2372 _changegroupinfo(repo, csets, source)
1920 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
2373 return bundler.generate(
2374 commonrevs,
2375 csets,
2376 fastpathlinkrev,
2377 source,
2378 )
1921 2379
1922 2380
1923 2381 def _addchangegroupfiles(
@@ -1928,6 +2386,8 def _addchangegroupfiles(
1928 2386 expectedfiles,
1929 2387 needfiles,
1930 2388 addrevisioncb=None,
2389 debug_info=None,
2390 delta_base_reuse_policy=None,
1931 2391 ):
1932 2392 revisions = 0
1933 2393 files = 0
@@ -1948,6 +2408,8 def _addchangegroupfiles(
1948 2408 revmap,
1949 2409 trp,
1950 2410 addrevisioncb=addrevisioncb,
2411 debug_info=debug_info,
2412 delta_base_reuse_policy=delta_base_reuse_policy,
1951 2413 )
1952 2414 if not added:
1953 2415 raise error.Abort(_(b"received file revlog group is empty"))
@@ -11,6 +11,15 import errno
11 11 import os
12 12 import re
13 13
14 from typing import (
15 Any,
16 AnyStr,
17 Dict,
18 Iterable,
19 Optional,
20 cast,
21 )
22
14 23 from .i18n import _
15 24 from .node import (
16 25 hex,
@@ -29,7 +38,6 from . import (
29 38 changelog,
30 39 copies,
31 40 crecord as crecordmod,
32 dirstateguard,
33 41 encoding,
34 42 error,
35 43 formatter,
@@ -65,14 +73,10 from .revlogutils import (
65 73 )
66 74
67 75 if pycompat.TYPE_CHECKING:
68 from typing import (
69 Any,
70 Dict,
76 from . import (
77 ui as uimod,
71 78 )
72 79
73 for t in (Any, Dict):
74 assert t
75
76 80 stringio = util.stringio
77 81
78 82 # templates of common command options
@@ -269,13 +273,16 debugrevlogopts = [
269 273 _linebelow = b"^HG: ------------------------ >8 ------------------------$"
270 274
271 275
272 def check_at_most_one_arg(opts, *args):
276 def check_at_most_one_arg(
277 opts: Dict[AnyStr, Any],
278 *args: AnyStr,
279 ) -> Optional[AnyStr]:
273 280 """abort if more than one of the arguments are in opts
274 281
275 282 Returns the unique argument or None if none of them were specified.
276 283 """
277 284
278 def to_display(name):
285 def to_display(name: AnyStr) -> bytes:
279 286 return pycompat.sysbytes(name).replace(b'_', b'-')
280 287
281 288 previous = None
@@ -290,7 +297,11 def check_at_most_one_arg(opts, *args):
290 297 return previous
291 298
292 299
293 def check_incompatible_arguments(opts, first, others):
300 def check_incompatible_arguments(
301 opts: Dict[AnyStr, Any],
302 first: AnyStr,
303 others: Iterable[AnyStr],
304 ) -> None:
294 305 """abort if the first argument is given along with any of the others
295 306
296 307 Unlike check_at_most_one_arg(), `others` are not mutually exclusive
@@ -300,7 +311,7 def check_incompatible_arguments(opts, f
300 311 check_at_most_one_arg(opts, first, other)
301 312
302 313
303 def resolve_commit_options(ui, opts):
314 def resolve_commit_options(ui: "uimod.ui", opts: Dict[str, Any]) -> bool:
304 315 """modify commit options dict to handle related options
305 316
306 317 The return value indicates that ``rewrite.update-timestamp`` is the reason
@@ -327,7 +338,7 def resolve_commit_options(ui, opts):
327 338 return datemaydiffer
328 339
329 340
330 def check_note_size(opts):
341 def check_note_size(opts: Dict[str, Any]) -> None:
331 342 """make sure note is of valid format"""
332 343
333 344 note = opts.get('note')
@@ -638,7 +649,7 def dorecord(
638 649 # already called within a `pendingchange`, However we
639 650 # are taking a shortcut here in order to be able to
640 651 # quickly deprecated the older API.
641 with dirstate.parentchange():
652 with dirstate.changing_parents(repo):
642 653 dirstate.update_file(
643 654 realname,
644 655 p1_tracked=True,
@@ -1115,12 +1126,12 def bailifchanged(repo, merge=True, hint
1115 1126 ctx.sub(s).bailifchanged(hint=hint)
1116 1127
1117 1128
1118 def logmessage(ui, opts):
1129 def logmessage(ui: "uimod.ui", opts: Dict[bytes, Any]) -> Optional[bytes]:
1119 1130 """get the log message according to -m and -l option"""
1120 1131
1121 1132 check_at_most_one_arg(opts, b'message', b'logfile')
1122 1133
1123 message = opts.get(b'message')
1134 message = cast(Optional[bytes], opts.get(b'message'))
1124 1135 logfile = opts.get(b'logfile')
1125 1136
1126 1137 if not message and logfile:
@@ -1465,7 +1476,7 def openrevlog(repo, cmd, file_, opts):
1465 1476 return openstorage(repo, cmd, file_, opts, returnrevlog=True)
1466 1477
1467 1478
1468 def copy(ui, repo, pats, opts, rename=False):
1479 def copy(ui, repo, pats, opts: Dict[bytes, Any], rename=False):
1469 1480 check_incompatible_arguments(opts, b'forget', [b'dry_run'])
1470 1481
1471 1482 # called with the repo lock held
@@ -1532,7 +1543,7 def copy(ui, repo, pats, opts, rename=Fa
1532 1543 new_node = mem_ctx.commit()
1533 1544
1534 1545 if repo.dirstate.p1() == ctx.node():
1535 with repo.dirstate.parentchange():
1546 with repo.dirstate.changing_parents(repo):
1536 1547 scmutil.movedirstate(repo, repo[new_node])
1537 1548 replacements = {ctx.node(): [new_node]}
1538 1549 scmutil.cleanupnodes(
@@ -1625,7 +1636,7 def copy(ui, repo, pats, opts, rename=Fa
1625 1636 new_node = mem_ctx.commit()
1626 1637
1627 1638 if repo.dirstate.p1() == ctx.node():
1628 with repo.dirstate.parentchange():
1639 with repo.dirstate.changing_parents(repo):
1629 1640 scmutil.movedirstate(repo, repo[new_node])
1630 1641 replacements = {ctx.node(): [new_node]}
1631 1642 scmutil.cleanupnodes(repo, replacements, b'copy', fixphase=True)
@@ -2778,7 +2789,7 def cat(ui, repo, ctx, matcher, basefm,
2778 2789 basefm,
2779 2790 fntemplate,
2780 2791 subprefix,
2781 **pycompat.strkwargs(opts)
2792 **pycompat.strkwargs(opts),
2782 2793 ):
2783 2794 err = 0
2784 2795 except error.RepoLookupError:
@@ -2789,29 +2800,135 def cat(ui, repo, ctx, matcher, basefm,
2789 2800 return err
2790 2801
2791 2802
2803 class _AddRemoveContext:
2804 """a small (hacky) context to deal with lazy opening of context
2805
2806 This is to be used in the `commit` function right below. This deals with
2807 lazily open a `changing_files` context inside a `transaction` that span the
2808 full commit operation.
2809
2810 We need :
2811 - a `changing_files` context to wrap the dirstate change within the
2812 "addremove" operation,
2813 - a transaction to make sure these change are not written right after the
2814 addremove, but when the commit operation succeed.
2815
2816 However it get complicated because:
2817 - opening a transaction "this early" shuffle hooks order, especially the
2818 `precommit` one happening after the `pretxtopen` one which I am not too
2819 enthusiastic about.
2820 - the `mq` extensions + the `record` extension stacks many layers of call
2821 to implement `qrefresh --interactive` and this result with `mq` calling a
2822 `strip` in the middle of this function. Which prevent the existence of
2823 transaction wrapping all of its function code. (however, `qrefresh` never
2824 call the `addremove` bits.
2825 - the largefile extensions (and maybe other extensions?) wraps `addremove`
2826 so slicing `addremove` in smaller bits is a complex endeavour.
2827
2828 So I eventually took a this shortcut that open the transaction if we
2829 actually needs it, not disturbing much of the rest of the code.
2830
2831 It will result in some hooks order change for `hg commit --addremove`,
2832 however it seems a corner case enough to ignore that for now (hopefully).
2833
2834 Notes that None of the above problems seems insurmountable, however I have
2835 been fighting with this specific piece of code for a couple of day already
2836 and I need a solution to keep moving forward on the bigger work around
2837 `changing_files` context that is being introduced at the same time as this
2838 hack.
2839
2840 Each problem seems to have a solution:
2841 - the hook order issue could be solved by refactoring the many-layer stack
2842 that currently composes a commit and calling them earlier,
2843 - the mq issue could be solved by refactoring `mq` so that the final strip
2844 is done after transaction closure. Be warned that the mq code is quite
2845 antic however.
2846 - large-file could be reworked in parallel of the `addremove` to be
2847 friendlier to this.
2848
2849 However each of these tasks are too much a diversion right now. In addition
2850 they will be much easier to undertake when the `changing_files` dust has
2851 settled."""
2852
2853 def __init__(self, repo):
2854 self._repo = repo
2855 self._transaction = None
2856 self._dirstate_context = None
2857 self._state = None
2858
2859 def __enter__(self):
2860 assert self._state is None
2861 self._state = True
2862 return self
2863
2864 def open_transaction(self):
2865 """open a `transaction` and `changing_files` context
2866
2867 Call this when you know that change to the dirstate will be needed and
2868 we need to open the transaction early
2869
2870 This will also open the dirstate `changing_files` context, so you should
2871 call `close_dirstate_context` when the distate changes are done.
2872 """
2873 assert self._state is not None
2874 if self._transaction is None:
2875 self._transaction = self._repo.transaction(b'commit')
2876 self._transaction.__enter__()
2877 if self._dirstate_context is None:
2878 self._dirstate_context = self._repo.dirstate.changing_files(
2879 self._repo
2880 )
2881 self._dirstate_context.__enter__()
2882
2883 def close_dirstate_context(self):
2884 """close the change_files if any
2885
2886 Call this after the (potential) `open_transaction` call to close the
2887 (potential) changing_files context.
2888 """
2889 if self._dirstate_context is not None:
2890 self._dirstate_context.__exit__(None, None, None)
2891 self._dirstate_context = None
2892
2893 def __exit__(self, *args):
2894 if self._dirstate_context is not None:
2895 self._dirstate_context.__exit__(*args)
2896 if self._transaction is not None:
2897 self._transaction.__exit__(*args)
2898
2899
2792 2900 def commit(ui, repo, commitfunc, pats, opts):
2793 2901 '''commit the specified files or all outstanding changes'''
2794 2902 date = opts.get(b'date')
2795 2903 if date:
2796 2904 opts[b'date'] = dateutil.parsedate(date)
2797 message = logmessage(ui, opts)
2798 matcher = scmutil.match(repo[None], pats, opts)
2799
2800 dsguard = None
2801 # extract addremove carefully -- this function can be called from a command
2802 # that doesn't support addremove
2803 if opts.get(b'addremove'):
2804 dsguard = dirstateguard.dirstateguard(repo, b'commit')
2805 with dsguard or util.nullcontextmanager():
2806 if dsguard:
2807 relative = scmutil.anypats(pats, opts)
2808 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
2809 if scmutil.addremove(repo, matcher, b"", uipathfn, opts) != 0:
2810 raise error.Abort(
2811 _(b"failed to mark all new/missing files as added/removed")
2905
2906 with repo.wlock(), repo.lock():
2907 message = logmessage(ui, opts)
2908 matcher = scmutil.match(repo[None], pats, opts)
2909
2910 with _AddRemoveContext(repo) as c:
2911 # extract addremove carefully -- this function can be called from a
2912 # command that doesn't support addremove
2913 if opts.get(b'addremove'):
2914 relative = scmutil.anypats(pats, opts)
2915 uipathfn = scmutil.getuipathfn(
2916 repo,
2917 legacyrelativevalue=relative,
2812 2918 )
2813
2814 return commitfunc(ui, repo, message, matcher, opts)
2919 r = scmutil.addremove(
2920 repo,
2921 matcher,
2922 b"",
2923 uipathfn,
2924 opts,
2925 open_tr=c.open_transaction,
2926 )
2927 m = _(b"failed to mark all new/missing files as added/removed")
2928 if r != 0:
2929 raise error.Abort(m)
2930 c.close_dirstate_context()
2931 return commitfunc(ui, repo, message, matcher, opts)
2815 2932
2816 2933
2817 2934 def samefile(f, ctx1, ctx2):
@@ -2826,7 +2943,7 def samefile(f, ctx1, ctx2):
2826 2943 return f not in ctx2.manifest()
2827 2944
2828 2945
2829 def amend(ui, repo, old, extra, pats, opts):
2946 def amend(ui, repo, old, extra, pats, opts: Dict[str, Any]):
2830 2947 # avoid cycle context -> subrepo -> cmdutil
2831 2948 from . import context
2832 2949
@@ -2880,12 +2997,13 def amend(ui, repo, old, extra, pats, op
2880 2997 matcher = scmutil.match(wctx, pats, opts)
2881 2998 relative = scmutil.anypats(pats, opts)
2882 2999 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
2883 if opts.get(b'addremove') and scmutil.addremove(
2884 repo, matcher, b"", uipathfn, opts
2885 ):
2886 raise error.Abort(
2887 _(b"failed to mark all new/missing files as added/removed")
2888 )
3000 if opts.get(b'addremove'):
3001 with repo.dirstate.changing_files(repo):
3002 if scmutil.addremove(repo, matcher, b"", uipathfn, opts) != 0:
3003 m = _(
3004 b"failed to mark all new/missing files as added/removed"
3005 )
3006 raise error.Abort(m)
2889 3007
2890 3008 # Check subrepos. This depends on in-place wctx._status update in
2891 3009 # subrepo.precommit(). To minimize the risk of this hack, we do
@@ -3019,10 +3137,12 def amend(ui, repo, old, extra, pats, op
3019 3137 commitphase = None
3020 3138 if opts.get(b'secret'):
3021 3139 commitphase = phases.secret
3140 elif opts.get(b'draft'):
3141 commitphase = phases.draft
3022 3142 newid = repo.commitctx(new)
3023 3143 ms.reset()
3024 3144
3025 with repo.dirstate.parentchange():
3145 with repo.dirstate.changing_parents(repo):
3026 3146 # Reroute the working copy parent to the new changeset
3027 3147 repo.setparents(newid, repo.nullid)
3028 3148
@@ -3285,7 +3405,7 def revert(ui, repo, ctx, *pats, **opts)
3285 3405 names = {}
3286 3406 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
3287 3407
3288 with repo.wlock():
3408 with repo.wlock(), repo.dirstate.changing_files(repo):
3289 3409 ## filling of the `names` mapping
3290 3410 # walk dirstate to fill `names`
3291 3411
@@ -13,6 +13,7 import sys
13 13 from .i18n import _
14 14 from .node import (
15 15 hex,
16 nullid,
16 17 nullrev,
17 18 short,
18 19 wdirrev,
@@ -28,7 +29,6 from . import (
28 29 copies,
29 30 debugcommands as debugcommandsmod,
30 31 destutil,
31 dirstateguard,
32 32 discovery,
33 33 encoding,
34 34 error,
@@ -252,10 +252,11 def add(ui, repo, *pats, **opts):
252 252 Returns 0 if all files are successfully added.
253 253 """
254 254
255 m = scmutil.match(repo[None], pats, pycompat.byteskwargs(opts))
256 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
257 rejected = cmdutil.add(ui, repo, m, b"", uipathfn, False, **opts)
258 return rejected and 1 or 0
255 with repo.wlock(), repo.dirstate.changing_files(repo):
256 m = scmutil.match(repo[None], pats, pycompat.byteskwargs(opts))
257 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
258 rejected = cmdutil.add(ui, repo, m, b"", uipathfn, False, **opts)
259 return rejected and 1 or 0
259 260
260 261
261 262 @command(
@@ -330,10 +331,11 def addremove(ui, repo, *pats, **opts):
330 331 opts = pycompat.byteskwargs(opts)
331 332 if not opts.get(b'similarity'):
332 333 opts[b'similarity'] = b'100'
333 matcher = scmutil.match(repo[None], pats, opts)
334 relative = scmutil.anypats(pats, opts)
335 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
336 return scmutil.addremove(repo, matcher, b"", uipathfn, opts)
334 with repo.wlock(), repo.dirstate.changing_files(repo):
335 matcher = scmutil.match(repo[None], pats, opts)
336 relative = scmutil.anypats(pats, opts)
337 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
338 return scmutil.addremove(repo, matcher, b"", uipathfn, opts)
337 339
338 340
339 341 @command(
@@ -822,7 +824,7 def _dobackout(ui, repo, node=None, rev=
822 824 bheads = repo.branchheads(branch)
823 825 rctx = scmutil.revsingle(repo, hex(parent))
824 826 if not opts.get(b'merge') and op1 != node:
825 with dirstateguard.dirstateguard(repo, b'backout'):
827 with repo.transaction(b"backout"):
826 828 overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
827 829 with ui.configoverride(overrides, b'backout'):
828 830 stats = mergemod.back_out(ctx, parent=repo[parent])
@@ -1635,7 +1637,7 def bundle(ui, repo, fname, *dests, **op
1635 1637 missing = set()
1636 1638 excluded = set()
1637 1639 for path in urlutil.get_push_paths(repo, ui, dests):
1638 other = hg.peer(repo, opts, path.rawloc)
1640 other = hg.peer(repo, opts, path)
1639 1641 if revs is not None:
1640 1642 hex_revs = [repo[r].hex() for r in revs]
1641 1643 else:
@@ -2008,6 +2010,7 def clone(ui, source, dest=None, **opts)
2008 2010 (b'', b'close-branch', None, _(b'mark a branch head as closed')),
2009 2011 (b'', b'amend', None, _(b'amend the parent of the working directory')),
2010 2012 (b's', b'secret', None, _(b'use the secret phase for committing')),
2013 (b'', b'draft', None, _(b'use the draft phase for committing')),
2011 2014 (b'e', b'edit', None, _(b'invoke editor on commit messages')),
2012 2015 (
2013 2016 b'',
@@ -2082,6 +2085,8 def commit(ui, repo, *pats, **opts):
2082 2085
2083 2086 hg commit --amend --date now
2084 2087 """
2088 cmdutil.check_at_most_one_arg(opts, 'draft', 'secret')
2089 cmdutil.check_incompatible_arguments(opts, 'subrepos', ['amend'])
2085 2090 with repo.wlock(), repo.lock():
2086 2091 return _docommit(ui, repo, *pats, **opts)
2087 2092
@@ -2097,7 +2102,6 def _docommit(ui, repo, *pats, **opts):
2097 2102 return 1 if ret == 0 else ret
2098 2103
2099 2104 if opts.get('subrepos'):
2100 cmdutil.check_incompatible_arguments(opts, 'subrepos', ['amend'])
2101 2105 # Let --subrepos on the command line override config setting.
2102 2106 ui.setconfig(b'ui', b'commitsubrepos', True, b'commit')
2103 2107
@@ -2174,6 +2178,8 def _docommit(ui, repo, *pats, **opts):
2174 2178 overrides = {}
2175 2179 if opts.get(b'secret'):
2176 2180 overrides[(b'phases', b'new-commit')] = b'secret'
2181 elif opts.get(b'draft'):
2182 overrides[(b'phases', b'new-commit')] = b'draft'
2177 2183
2178 2184 baseui = repo.baseui
2179 2185 with baseui.configoverride(overrides, b'commit'):
@@ -2491,7 +2497,19 def copy(ui, repo, *pats, **opts):
2491 2497 Returns 0 on success, 1 if errors are encountered.
2492 2498 """
2493 2499 opts = pycompat.byteskwargs(opts)
2494 with repo.wlock():
2500
2501 context = repo.dirstate.changing_files
2502 rev = opts.get(b'at_rev')
2503 ctx = None
2504 if rev:
2505 ctx = logcmdutil.revsingle(repo, rev)
2506 if ctx.rev() is not None:
2507
2508 def context(repo):
2509 return util.nullcontextmanager()
2510
2511 opts[b'at_rev'] = ctx.rev()
2512 with repo.wlock(), context(repo):
2495 2513 return cmdutil.copy(ui, repo, pats, opts)
2496 2514
2497 2515
@@ -2960,19 +2978,20 def forget(ui, repo, *pats, **opts):
2960 2978 if not pats:
2961 2979 raise error.InputError(_(b'no files specified'))
2962 2980
2963 m = scmutil.match(repo[None], pats, opts)
2964 dryrun, interactive = opts.get(b'dry_run'), opts.get(b'interactive')
2965 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
2966 rejected = cmdutil.forget(
2967 ui,
2968 repo,
2969 m,
2970 prefix=b"",
2971 uipathfn=uipathfn,
2972 explicitonly=False,
2973 dryrun=dryrun,
2974 interactive=interactive,
2975 )[0]
2981 with repo.wlock(), repo.dirstate.changing_files(repo):
2982 m = scmutil.match(repo[None], pats, opts)
2983 dryrun, interactive = opts.get(b'dry_run'), opts.get(b'interactive')
2984 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
2985 rejected = cmdutil.forget(
2986 ui,
2987 repo,
2988 m,
2989 prefix=b"",
2990 uipathfn=uipathfn,
2991 explicitonly=False,
2992 dryrun=dryrun,
2993 interactive=interactive,
2994 )[0]
2976 2995 return rejected and 1 or 0
2977 2996
2978 2997
@@ -3911,12 +3930,11 def identify(
3911 3930 peer = None
3912 3931 try:
3913 3932 if source:
3914 source, branches = urlutil.get_unique_pull_path(
3915 b'identify', repo, ui, source
3916 )
3933 path = urlutil.get_unique_pull_path_obj(b'identify', ui, source)
3917 3934 # only pass ui when no repo
3918 peer = hg.peer(repo or ui, opts, source)
3935 peer = hg.peer(repo or ui, opts, path)
3919 3936 repo = peer.local()
3937 branches = (path.branch, [])
3920 3938 revs, checkout = hg.addbranchrevs(repo, peer, branches, None)
3921 3939
3922 3940 fm = ui.formatter(b'identify', opts)
@@ -4229,12 +4247,10 def import_(ui, repo, patch1=None, *patc
4229 4247 if not opts.get(b'no_commit'):
4230 4248 lock = repo.lock
4231 4249 tr = lambda: repo.transaction(b'import')
4232 dsguard = util.nullcontextmanager
4233 4250 else:
4234 4251 lock = util.nullcontextmanager
4235 4252 tr = util.nullcontextmanager
4236 dsguard = lambda: dirstateguard.dirstateguard(repo, b'import')
4237 with lock(), tr(), dsguard():
4253 with lock(), tr():
4238 4254 parents = repo[None].parents()
4239 4255 for patchurl in patches:
4240 4256 if patchurl == b'-':
@@ -4383,17 +4399,15 def incoming(ui, repo, source=b"default"
4383 4399 if opts.get(b'bookmarks'):
4384 4400 srcs = urlutil.get_pull_paths(repo, ui, [source])
4385 4401 for path in srcs:
4386 source, branches = urlutil.parseurl(
4387 path.rawloc, opts.get(b'branch')
4388 )
4389 other = hg.peer(repo, opts, source)
4402 # XXX the "branches" options are not used. Should it be used?
4403 other = hg.peer(repo, opts, path)
4390 4404 try:
4391 4405 if b'bookmarks' not in other.listkeys(b'namespaces'):
4392 4406 ui.warn(_(b"remote doesn't support bookmarks\n"))
4393 4407 return 0
4394 4408 ui.pager(b'incoming')
4395 4409 ui.status(
4396 _(b'comparing with %s\n') % urlutil.hidepassword(source)
4410 _(b'comparing with %s\n') % urlutil.hidepassword(path.loc)
4397 4411 )
4398 4412 return bookmarks.incoming(
4399 4413 ui, repo, other, mode=path.bookmarks_mode
@@ -4426,7 +4440,7 def init(ui, dest=b".", **opts):
4426 4440 Returns 0 on success.
4427 4441 """
4428 4442 opts = pycompat.byteskwargs(opts)
4429 path = urlutil.get_clone_path(ui, dest)[1]
4443 path = urlutil.get_clone_path_obj(ui, dest)
4430 4444 peer = hg.peer(ui, opts, path, create=True)
4431 4445 peer.close()
4432 4446
@@ -5038,14 +5052,13 def outgoing(ui, repo, *dests, **opts):
5038 5052 opts = pycompat.byteskwargs(opts)
5039 5053 if opts.get(b'bookmarks'):
5040 5054 for path in urlutil.get_push_paths(repo, ui, dests):
5041 dest = path.pushloc or path.loc
5042 other = hg.peer(repo, opts, dest)
5055 other = hg.peer(repo, opts, path)
5043 5056 try:
5044 5057 if b'bookmarks' not in other.listkeys(b'namespaces'):
5045 5058 ui.warn(_(b"remote doesn't support bookmarks\n"))
5046 5059 return 0
5047 5060 ui.status(
5048 _(b'comparing with %s\n') % urlutil.hidepassword(dest)
5061 _(b'comparing with %s\n') % urlutil.hidepassword(path.loc)
5049 5062 )
5050 5063 ui.pager(b'outgoing')
5051 5064 return bookmarks.outgoing(ui, repo, other)
@@ -5434,12 +5447,12 def pull(ui, repo, *sources, **opts):
5434 5447 raise error.InputError(msg, hint=hint)
5435 5448
5436 5449 for path in urlutil.get_pull_paths(repo, ui, sources):
5437 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
5438 ui.status(_(b'pulling from %s\n') % urlutil.hidepassword(source))
5450 ui.status(_(b'pulling from %s\n') % urlutil.hidepassword(path.loc))
5439 5451 ui.flush()
5440 other = hg.peer(repo, opts, source)
5452 other = hg.peer(repo, opts, path)
5441 5453 update_conflict = None
5442 5454 try:
5455 branches = (path.branch, opts.get(b'branch', []))
5443 5456 revs, checkout = hg.addbranchrevs(
5444 5457 repo, other, branches, opts.get(b'rev')
5445 5458 )
@@ -5515,8 +5528,12 def pull(ui, repo, *sources, **opts):
5515 5528 elif opts.get(b'branch'):
5516 5529 brev = opts[b'branch'][0]
5517 5530 else:
5518 brev = branches[0]
5519 repo._subtoppath = source
5531 brev = path.branch
5532
5533 # XXX path: we are losing the `path` object here. Keeping it
5534 # would be valuable. For example as a "variant" as we do
5535 # for pushes.
5536 repo._subtoppath = path.loc
5520 5537 try:
5521 5538 update_conflict = postincoming(
5522 5539 ui, repo, modheads, opts.get(b'update'), checkout, brev
@@ -5766,7 +5783,7 def push(ui, repo, *dests, **opts):
5766 5783 some_pushed = False
5767 5784 result = 0
5768 5785 for path in urlutil.get_push_paths(repo, ui, dests):
5769 dest = path.pushloc or path.loc
5786 dest = path.loc
5770 5787 branches = (path.branch, opts.get(b'branch') or [])
5771 5788 ui.status(_(b'pushing to %s\n') % urlutil.hidepassword(dest))
5772 5789 revs, checkout = hg.addbranchrevs(
@@ -5940,12 +5957,13 def remove(ui, repo, *pats, **opts):
5940 5957 if not pats and not after:
5941 5958 raise error.InputError(_(b'no files specified'))
5942 5959
5943 m = scmutil.match(repo[None], pats, opts)
5944 subrepos = opts.get(b'subrepos')
5945 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
5946 return cmdutil.remove(
5947 ui, repo, m, b"", uipathfn, after, force, subrepos, dryrun=dryrun
5948 )
5960 with repo.wlock(), repo.dirstate.changing_files(repo):
5961 m = scmutil.match(repo[None], pats, opts)
5962 subrepos = opts.get(b'subrepos')
5963 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
5964 return cmdutil.remove(
5965 ui, repo, m, b"", uipathfn, after, force, subrepos, dryrun=dryrun
5966 )
5949 5967
5950 5968
5951 5969 @command(
@@ -5994,7 +6012,18 def rename(ui, repo, *pats, **opts):
5994 6012 Returns 0 on success, 1 if errors are encountered.
5995 6013 """
5996 6014 opts = pycompat.byteskwargs(opts)
5997 with repo.wlock():
6015 context = repo.dirstate.changing_files
6016 rev = opts.get(b'at_rev')
6017 ctx = None
6018 if rev:
6019 ctx = logcmdutil.revsingle(repo, rev)
6020 if ctx.rev() is not None:
6021
6022 def context(repo):
6023 return util.nullcontextmanager()
6024
6025 opts[b'at_rev'] = ctx.rev()
6026 with repo.wlock(), context(repo):
5998 6027 return cmdutil.copy(ui, repo, pats, opts, rename=True)
5999 6028
6000 6029
@@ -6260,7 +6289,7 def resolve(ui, repo, *pats, **opts):
6260 6289 #
6261 6290 # All this should eventually happens, but in the mean time, we use this
6262 6291 # context manager slightly out of the context it should be.
6263 with repo.dirstate.parentchange():
6292 with repo.dirstate.changing_parents(repo):
6264 6293 mergestatemod.recordupdates(repo, ms.actions(), branchmerge, None)
6265 6294
6266 6295 if not didwork and pats:
@@ -7252,23 +7281,22 def summary(ui, repo, **opts):
7252 7281 # XXX We should actually skip this if no default is specified, instead
7253 7282 # of passing "default" which will resolve as "./default/" if no default
7254 7283 # path is defined.
7255 source, branches = urlutil.get_unique_pull_path(
7256 b'summary', repo, ui, b'default'
7257 )
7258 sbranch = branches[0]
7284 path = urlutil.get_unique_pull_path_obj(b'summary', ui, b'default')
7285 sbranch = path.branch
7259 7286 try:
7260 other = hg.peer(repo, {}, source)
7287 other = hg.peer(repo, {}, path)
7261 7288 except error.RepoError:
7262 7289 if opts.get(b'remote'):
7263 7290 raise
7264 return source, sbranch, None, None, None
7291 return path.loc, sbranch, None, None, None
7292 branches = (path.branch, [])
7265 7293 revs, checkout = hg.addbranchrevs(repo, other, branches, None)
7266 7294 if revs:
7267 7295 revs = [other.lookup(rev) for rev in revs]
7268 ui.debug(b'comparing with %s\n' % urlutil.hidepassword(source))
7296 ui.debug(b'comparing with %s\n' % urlutil.hidepassword(path.loc))
7269 7297 with repo.ui.silent():
7270 7298 commoninc = discovery.findcommonincoming(repo, other, heads=revs)
7271 return source, sbranch, other, commoninc, commoninc[1]
7299 return path.loc, sbranch, other, commoninc, commoninc[1]
7272 7300
7273 7301 if needsincoming:
7274 7302 source, sbranch, sother, commoninc, incoming = getincoming()
@@ -7284,9 +7312,10 def summary(ui, repo, **opts):
7284 7312 d = b'default-push'
7285 7313 elif b'default' in ui.paths:
7286 7314 d = b'default'
7315 path = None
7287 7316 if d is not None:
7288 7317 path = urlutil.get_unique_push_path(b'summary', repo, ui, d)
7289 dest = path.pushloc or path.loc
7318 dest = path.loc
7290 7319 dbranch = path.branch
7291 7320 else:
7292 7321 dest = b'default'
@@ -7294,7 +7323,7 def summary(ui, repo, **opts):
7294 7323 revs, checkout = hg.addbranchrevs(repo, repo, (dbranch, []), None)
7295 7324 if source != dest:
7296 7325 try:
7297 dother = hg.peer(repo, {}, dest)
7326 dother = hg.peer(repo, {}, path if path is not None else dest)
7298 7327 except error.RepoError:
7299 7328 if opts.get(b'remote'):
7300 7329 raise
@@ -7472,8 +7501,11 def tag(ui, repo, name1, *names, **opts)
7472 7501 )
7473 7502 node = logcmdutil.revsingle(repo, rev_).node()
7474 7503
7504 # don't allow tagging the null rev or the working directory
7475 7505 if node is None:
7476 7506 raise error.InputError(_(b"cannot tag working directory"))
7507 elif not opts.get(b'remove') and node == nullid:
7508 raise error.InputError(_(b"cannot tag null revision"))
7477 7509
7478 7510 if not message:
7479 7511 # we don't translate commit messages
@@ -7494,13 +7526,6 def tag(ui, repo, name1, *names, **opts)
7494 7526 editform=editform, **pycompat.strkwargs(opts)
7495 7527 )
7496 7528
7497 # don't allow tagging the null rev
7498 if (
7499 not opts.get(b'remove')
7500 and logcmdutil.revsingle(repo, rev_).rev() == nullrev
7501 ):
7502 raise error.InputError(_(b"cannot tag null revision"))
7503
7504 7529 tagsmod.tag(
7505 7530 repo,
7506 7531 names,
@@ -588,6 +588,18 coreconfigitem(
588 588 b'revlog.debug-delta',
589 589 default=False,
590 590 )
591 # display extra information about the bundling process
592 coreconfigitem(
593 b'debug',
594 b'bundling-stats',
595 default=False,
596 )
597 # display extra information about the unbundling process
598 coreconfigitem(
599 b'debug',
600 b'unbundling-stats',
601 default=False,
602 )
591 603 coreconfigitem(
592 604 b'defaults',
593 605 b'.*',
@@ -734,6 +746,14 coreconfigitem(
734 746 b'discovery.exchange-heads',
735 747 default=True,
736 748 )
749 # If devel.debug.abort-update is True, then any merge with the working copy,
750 # e.g. [hg update], will be aborted after figuring out what needs to be done,
751 # but before spawning the parallel worker
752 coreconfigitem(
753 b'devel',
754 b'debug.abort-update',
755 default=False,
756 )
737 757 # If discovery.grow-sample is False, the sample size used in set discovery will
738 758 # not be increased through the process
739 759 coreconfigitem(
@@ -911,6 +931,13 coreconfigitem(
911 931 b'changegroup4',
912 932 default=False,
913 933 )
934
935 # might remove rank configuration once the computation has no impact
936 coreconfigitem(
937 b'experimental',
938 b'changelog-v2.compute-rank',
939 default=True,
940 )
914 941 coreconfigitem(
915 942 b'experimental',
916 943 b'cleanup-as-archived',
@@ -1774,6 +1801,13 coreconfigitem(
1774 1801 )
1775 1802 coreconfigitem(
1776 1803 b'merge-tools',
1804 br'.*\.regappend$',
1805 default=b"",
1806 generic=True,
1807 priority=-1,
1808 )
1809 coreconfigitem(
1810 b'merge-tools',
1777 1811 br'.*\.symlink$',
1778 1812 default=False,
1779 1813 generic=True,
@@ -2023,6 +2057,11 coreconfigitem(
2023 2057 )
2024 2058 coreconfigitem(
2025 2059 b'storage',
2060 b'revlog.delta-parent-search.candidate-group-chunk-size',
2061 default=10,
2062 )
2063 coreconfigitem(
2064 b'storage',
2026 2065 b'revlog.issue6528.fix-incoming',
2027 2066 default=True,
2028 2067 )
@@ -2044,6 +2083,7 coreconfigitem(
2044 2083 b'revlog.reuse-external-delta',
2045 2084 default=True,
2046 2085 )
2086 # This option is True unless `format.generaldelta` is set.
2047 2087 coreconfigitem(
2048 2088 b'storage',
2049 2089 b'revlog.reuse-external-delta-parent',
@@ -2123,7 +2163,7 coreconfigitem(
2123 2163 coreconfigitem(
2124 2164 b'server',
2125 2165 b'pullbundle',
2126 default=False,
2166 default=True,
2127 2167 )
2128 2168 coreconfigitem(
2129 2169 b'server',
@@ -1595,7 +1595,7 class workingctx(committablectx):
1595 1595 if p2node is None:
1596 1596 p2node = self._repo.nodeconstants.nullid
1597 1597 dirstate = self._repo.dirstate
1598 with dirstate.parentchange():
1598 with dirstate.changing_parents(self._repo):
1599 1599 copies = dirstate.setparents(p1node, p2node)
1600 1600 pctx = self._repo[p1node]
1601 1601 if copies:
@@ -1854,47 +1854,42 class workingctx(committablectx):
1854 1854
1855 1855 def _poststatusfixup(self, status, fixup):
1856 1856 """update dirstate for files that are actually clean"""
1857 dirstate = self._repo.dirstate
1857 1858 poststatus = self._repo.postdsstatus()
1858 if fixup or poststatus or self._repo.dirstate._dirty:
1859 if fixup:
1860 if dirstate.is_changing_parents:
1861 normal = lambda f, pfd: dirstate.update_file(
1862 f,
1863 p1_tracked=True,
1864 wc_tracked=True,
1865 )
1866 else:
1867 normal = dirstate.set_clean
1868 for f, pdf in fixup:
1869 normal(f, pdf)
1870 if poststatus or self._repo.dirstate._dirty:
1859 1871 try:
1860 oldid = self._repo.dirstate.identity()
1861
1862 1872 # updating the dirstate is optional
1863 1873 # so we don't wait on the lock
1864 1874 # wlock can invalidate the dirstate, so cache normal _after_
1865 1875 # taking the lock
1876 pre_dirty = dirstate._dirty
1866 1877 with self._repo.wlock(False):
1867 dirstate = self._repo.dirstate
1868 if dirstate.identity() == oldid:
1869 if fixup:
1870 if dirstate.pendingparentchange():
1871 normal = lambda f, pfd: dirstate.update_file(
1872 f, p1_tracked=True, wc_tracked=True
1873 )
1874 else:
1875 normal = dirstate.set_clean
1876 for f, pdf in fixup:
1877 normal(f, pdf)
1878 # write changes out explicitly, because nesting
1879 # wlock at runtime may prevent 'wlock.release()'
1880 # after this block from doing so for subsequent
1881 # changing files
1882 tr = self._repo.currenttransaction()
1883 self._repo.dirstate.write(tr)
1884
1885 if poststatus:
1886 for ps in poststatus:
1887 ps(self, status)
1888 else:
1889 # in this case, writing changes out breaks
1890 # consistency, because .hg/dirstate was
1891 # already changed simultaneously after last
1892 # caching (see also issue5584 for detail)
1893 self._repo.ui.debug(
1894 b'skip updating dirstate: identity mismatch\n'
1895 )
1878 assert self._repo.dirstate is dirstate
1879 post_dirty = dirstate._dirty
1880 if post_dirty:
1881 tr = self._repo.currenttransaction()
1882 dirstate.write(tr)
1883 elif pre_dirty:
1884 # the wlock grabbing detected that dirtate changes
1885 # needed to be dropped
1886 m = b'skip updating dirstate: identity mismatch\n'
1887 self._repo.ui.debug(m)
1888 if poststatus:
1889 for ps in poststatus:
1890 ps(self, status)
1896 1891 except error.LockError:
1897 pass
1892 dirstate.invalidate()
1898 1893 finally:
1899 1894 # Even if the wlock couldn't be grabbed, clear out the list.
1900 1895 self._repo.clearpostdsstatus()
@@ -1904,25 +1899,27 class workingctx(committablectx):
1904 1899 subrepos = []
1905 1900 if b'.hgsub' in self:
1906 1901 subrepos = sorted(self.substate)
1907 cmp, s, mtime_boundary = self._repo.dirstate.status(
1908 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1909 )
1910
1911 # check for any possibly clean files
1912 fixup = []
1913 if cmp:
1914 modified2, deleted2, clean_set, fixup = self._checklookup(
1915 cmp, mtime_boundary
1902 dirstate = self._repo.dirstate
1903 with dirstate.running_status(self._repo):
1904 cmp, s, mtime_boundary = dirstate.status(
1905 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1916 1906 )
1917 s.modified.extend(modified2)
1918 s.deleted.extend(deleted2)
1919
1920 if clean_set and clean:
1921 s.clean.extend(clean_set)
1922 if fixup and clean:
1923 s.clean.extend((f for f, _ in fixup))
1924
1925 self._poststatusfixup(s, fixup)
1907
1908 # check for any possibly clean files
1909 fixup = []
1910 if cmp:
1911 modified2, deleted2, clean_set, fixup = self._checklookup(
1912 cmp, mtime_boundary
1913 )
1914 s.modified.extend(modified2)
1915 s.deleted.extend(deleted2)
1916
1917 if clean_set and clean:
1918 s.clean.extend(clean_set)
1919 if fixup and clean:
1920 s.clean.extend((f for f, _ in fixup))
1921
1922 self._poststatusfixup(s, fixup)
1926 1923
1927 1924 if match.always():
1928 1925 # cache for performance
@@ -2050,7 +2047,7 class workingctx(committablectx):
2050 2047 return sorted(f for f in ds.matches(match) if ds.get_entry(f).tracked)
2051 2048
2052 2049 def markcommitted(self, node):
2053 with self._repo.dirstate.parentchange():
2050 with self._repo.dirstate.changing_parents(self._repo):
2054 2051 for f in self.modified() + self.added():
2055 2052 self._repo.dirstate.update_file(
2056 2053 f, p1_tracked=True, wc_tracked=True
This diff has been collapsed as it changes many lines, (526 lines changed) Show them Hide them
@@ -21,7 +21,6 import re
21 21 import socket
22 22 import ssl
23 23 import stat
24 import string
25 24 import subprocess
26 25 import sys
27 26 import time
@@ -73,7 +72,6 from . import (
73 72 repoview,
74 73 requirements,
75 74 revlog,
76 revlogutils,
77 75 revset,
78 76 revsetlang,
79 77 scmutil,
@@ -89,6 +87,7 from . import (
89 87 upgrade,
90 88 url as urlmod,
91 89 util,
90 verify,
92 91 vfs as vfsmod,
93 92 wireprotoframing,
94 93 wireprotoserver,
@@ -556,15 +555,9 def debugchangedfiles(ui, repo, rev, **o
556 555 @command(b'debugcheckstate', [], b'')
557 556 def debugcheckstate(ui, repo):
558 557 """validate the correctness of the current dirstate"""
559 parent1, parent2 = repo.dirstate.parents()
560 m1 = repo[parent1].manifest()
561 m2 = repo[parent2].manifest()
562 errors = 0
563 for err in repo.dirstate.verify(m1, m2):
564 ui.warn(err[0] % err[1:])
565 errors += 1
558 errors = verify.verifier(repo)._verify_dirstate()
566 559 if errors:
567 errstr = _(b".hg/dirstate inconsistent with current parent's manifest")
560 errstr = _(b"dirstate inconsistent with current parent's manifest")
568 561 raise error.Abort(errstr)
569 562
570 563
@@ -990,17 +983,29 def debugdeltachain(ui, repo, file_=None
990 983
991 984 @command(
992 985 b'debug-delta-find',
993 cmdutil.debugrevlogopts + cmdutil.formatteropts,
986 cmdutil.debugrevlogopts
987 + cmdutil.formatteropts
988 + [
989 (
990 b'',
991 b'source',
992 b'full',
993 _(b'input data feed to the process (full, storage, p1, p2, prev)'),
994 ),
995 ],
994 996 _(b'-c|-m|FILE REV'),
995 997 optionalrepo=True,
996 998 )
997 def debugdeltafind(ui, repo, arg_1, arg_2=None, **opts):
999 def debugdeltafind(ui, repo, arg_1, arg_2=None, source=b'full', **opts):
998 1000 """display the computation to get to a valid delta for storing REV
999 1001
1000 1002 This command will replay the process used to find the "best" delta to store
1001 1003 a revision and display information about all the steps used to get to that
1002 1004 result.
1003 1005
1006 By default, the process is fed with a the full-text for the revision. This
1007 can be controlled with the --source flag.
1008
1004 1009 The revision use the revision number of the target storage (not changelog
1005 1010 revision number).
1006 1011
@@ -1017,34 +1022,22 def debugdeltafind(ui, repo, arg_1, arg_
1017 1022 rev = int(rev)
1018 1023
1019 1024 revlog = cmdutil.openrevlog(repo, b'debugdeltachain', file_, opts)
1020
1021 deltacomputer = deltautil.deltacomputer(
1022 revlog,
1023 write_debug=ui.write,
1024 debug_search=not ui.quiet,
1025 )
1026
1027 node = revlog.node(rev)
1028 1025 p1r, p2r = revlog.parentrevs(rev)
1029 p1 = revlog.node(p1r)
1030 p2 = revlog.node(p2r)
1031 btext = [revlog.revision(rev)]
1032 textlen = len(btext[0])
1033 cachedelta = None
1034 flags = revlog.flags(rev)
1035
1036 revinfo = revlogutils.revisioninfo(
1037 node,
1038 p1,
1039 p2,
1040 btext,
1041 textlen,
1042 cachedelta,
1043 flags,
1044 )
1045
1046 fh = revlog._datafp()
1047 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
1026
1027 if source == b'full':
1028 base_rev = nullrev
1029 elif source == b'storage':
1030 base_rev = revlog.deltaparent(rev)
1031 elif source == b'p1':
1032 base_rev = p1r
1033 elif source == b'p2':
1034 base_rev = p2r
1035 elif source == b'prev':
1036 base_rev = rev - 1
1037 else:
1038 raise error.InputError(b"invalid --source value: %s" % source)
1039
1040 revlog_debug.debug_delta_find(ui, revlog, rev, base_rev=base_rev)
1048 1041
1049 1042
1050 1043 @command(
@@ -1236,12 +1229,12 def debugdiscovery(ui, repo, remoteurl=b
1236 1229 random.seed(int(opts[b'seed']))
1237 1230
1238 1231 if not remote_revs:
1239
1240 remoteurl, branches = urlutil.get_unique_pull_path(
1241 b'debugdiscovery', repo, ui, remoteurl
1232 path = urlutil.get_unique_pull_path_obj(
1233 b'debugdiscovery', ui, remoteurl
1242 1234 )
1243 remote = hg.peer(repo, opts, remoteurl)
1244 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(remoteurl))
1235 branches = (path.branch, [])
1236 remote = hg.peer(repo, opts, path)
1237 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(path.loc))
1245 1238 else:
1246 1239 branches = (None, [])
1247 1240 remote_filtered_revs = logcmdutil.revrange(
@@ -3135,6 +3128,9 def debugrebuilddirstate(ui, repo, rev,
3135 3128 """
3136 3129 ctx = scmutil.revsingle(repo, rev)
3137 3130 with repo.wlock():
3131 if repo.currenttransaction() is not None:
3132 msg = b'rebuild the dirstate outside of a transaction'
3133 raise error.ProgrammingError(msg)
3138 3134 dirstate = repo.dirstate
3139 3135 changedfiles = None
3140 3136 # See command doc for what minimal does.
@@ -3146,7 +3142,8 def debugrebuilddirstate(ui, repo, rev,
3146 3142 dsnotadded = {f for f in dsonly if not dirstate.get_entry(f).added}
3147 3143 changedfiles = manifestonly | dsnotadded
3148 3144
3149 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
3145 with dirstate.changing_parents(repo):
3146 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
3150 3147
3151 3148
3152 3149 @command(
@@ -3207,348 +3204,10 def debugrevlog(ui, repo, file_=None, **
3207 3204 r = cmdutil.openrevlog(repo, b'debugrevlog', file_, opts)
3208 3205
3209 3206 if opts.get(b"dump"):
3210 numrevs = len(r)
3211 ui.write(
3212 (
3213 b"# rev p1rev p2rev start end deltastart base p1 p2"
3214 b" rawsize totalsize compression heads chainlen\n"
3215 )
3216 )
3217 ts = 0
3218 heads = set()
3219
3220 for rev in range(numrevs):
3221 dbase = r.deltaparent(rev)
3222 if dbase == -1:
3223 dbase = rev
3224 cbase = r.chainbase(rev)
3225 clen = r.chainlen(rev)
3226 p1, p2 = r.parentrevs(rev)
3227 rs = r.rawsize(rev)
3228 ts = ts + rs
3229 heads -= set(r.parentrevs(rev))
3230 heads.add(rev)
3231 try:
3232 compression = ts / r.end(rev)
3233 except ZeroDivisionError:
3234 compression = 0
3235 ui.write(
3236 b"%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
3237 b"%11d %5d %8d\n"
3238 % (
3239 rev,
3240 p1,
3241 p2,
3242 r.start(rev),
3243 r.end(rev),
3244 r.start(dbase),
3245 r.start(cbase),
3246 r.start(p1),
3247 r.start(p2),
3248 rs,
3249 ts,
3250 compression,
3251 len(heads),
3252 clen,
3253 )
3254 )
3255 return 0
3256
3257 format = r._format_version
3258 v = r._format_flags
3259 flags = []
3260 gdelta = False
3261 if v & revlog.FLAG_INLINE_DATA:
3262 flags.append(b'inline')
3263 if v & revlog.FLAG_GENERALDELTA:
3264 gdelta = True
3265 flags.append(b'generaldelta')
3266 if not flags:
3267 flags = [b'(none)']
3268
3269 ### tracks merge vs single parent
3270 nummerges = 0
3271
3272 ### tracks ways the "delta" are build
3273 # nodelta
3274 numempty = 0
3275 numemptytext = 0
3276 numemptydelta = 0
3277 # full file content
3278 numfull = 0
3279 # intermediate snapshot against a prior snapshot
3280 numsemi = 0
3281 # snapshot count per depth
3282 numsnapdepth = collections.defaultdict(lambda: 0)
3283 # delta against previous revision
3284 numprev = 0
3285 # delta against first or second parent (not prev)
3286 nump1 = 0
3287 nump2 = 0
3288 # delta against neither prev nor parents
3289 numother = 0
3290 # delta against prev that are also first or second parent
3291 # (details of `numprev`)
3292 nump1prev = 0
3293 nump2prev = 0
3294
3295 # data about delta chain of each revs
3296 chainlengths = []
3297 chainbases = []
3298 chainspans = []
3299
3300 # data about each revision
3301 datasize = [None, 0, 0]
3302 fullsize = [None, 0, 0]
3303 semisize = [None, 0, 0]
3304 # snapshot count per depth
3305 snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
3306 deltasize = [None, 0, 0]
3307 chunktypecounts = {}
3308 chunktypesizes = {}
3309
3310 def addsize(size, l):
3311 if l[0] is None or size < l[0]:
3312 l[0] = size
3313 if size > l[1]:
3314 l[1] = size
3315 l[2] += size
3316
3317 numrevs = len(r)
3318 for rev in range(numrevs):
3319 p1, p2 = r.parentrevs(rev)
3320 delta = r.deltaparent(rev)
3321 if format > 0:
3322 addsize(r.rawsize(rev), datasize)
3323 if p2 != nullrev:
3324 nummerges += 1
3325 size = r.length(rev)
3326 if delta == nullrev:
3327 chainlengths.append(0)
3328 chainbases.append(r.start(rev))
3329 chainspans.append(size)
3330 if size == 0:
3331 numempty += 1
3332 numemptytext += 1
3333 else:
3334 numfull += 1
3335 numsnapdepth[0] += 1
3336 addsize(size, fullsize)
3337 addsize(size, snapsizedepth[0])
3338 else:
3339 chainlengths.append(chainlengths[delta] + 1)
3340 baseaddr = chainbases[delta]
3341 revaddr = r.start(rev)
3342 chainbases.append(baseaddr)
3343 chainspans.append((revaddr - baseaddr) + size)
3344 if size == 0:
3345 numempty += 1
3346 numemptydelta += 1
3347 elif r.issnapshot(rev):
3348 addsize(size, semisize)
3349 numsemi += 1
3350 depth = r.snapshotdepth(rev)
3351 numsnapdepth[depth] += 1
3352 addsize(size, snapsizedepth[depth])
3353 else:
3354 addsize(size, deltasize)
3355 if delta == rev - 1:
3356 numprev += 1
3357 if delta == p1:
3358 nump1prev += 1
3359 elif delta == p2:
3360 nump2prev += 1
3361 elif delta == p1:
3362 nump1 += 1
3363 elif delta == p2:
3364 nump2 += 1
3365 elif delta != nullrev:
3366 numother += 1
3367
3368 # Obtain data on the raw chunks in the revlog.
3369 if util.safehasattr(r, b'_getsegmentforrevs'):
3370 segment = r._getsegmentforrevs(rev, rev)[1]
3371 else:
3372 segment = r._revlog._getsegmentforrevs(rev, rev)[1]
3373 if segment:
3374 chunktype = bytes(segment[0:1])
3375 else:
3376 chunktype = b'empty'
3377
3378 if chunktype not in chunktypecounts:
3379 chunktypecounts[chunktype] = 0
3380 chunktypesizes[chunktype] = 0
3381
3382 chunktypecounts[chunktype] += 1
3383 chunktypesizes[chunktype] += size
3384
3385 # Adjust size min value for empty cases
3386 for size in (datasize, fullsize, semisize, deltasize):
3387 if size[0] is None:
3388 size[0] = 0
3389
3390 numdeltas = numrevs - numfull - numempty - numsemi
3391 numoprev = numprev - nump1prev - nump2prev
3392 totalrawsize = datasize[2]
3393 datasize[2] /= numrevs
3394 fulltotal = fullsize[2]
3395 if numfull == 0:
3396 fullsize[2] = 0
3207 revlog_debug.dump(ui, r)
3397 3208 else:
3398 fullsize[2] /= numfull
3399 semitotal = semisize[2]
3400 snaptotal = {}
3401 if numsemi > 0:
3402 semisize[2] /= numsemi
3403 for depth in snapsizedepth:
3404 snaptotal[depth] = snapsizedepth[depth][2]
3405 snapsizedepth[depth][2] /= numsnapdepth[depth]
3406
3407 deltatotal = deltasize[2]
3408 if numdeltas > 0:
3409 deltasize[2] /= numdeltas
3410 totalsize = fulltotal + semitotal + deltatotal
3411 avgchainlen = sum(chainlengths) / numrevs
3412 maxchainlen = max(chainlengths)
3413 maxchainspan = max(chainspans)
3414 compratio = 1
3415 if totalsize:
3416 compratio = totalrawsize / totalsize
3417
3418 basedfmtstr = b'%%%dd\n'
3419 basepcfmtstr = b'%%%dd %s(%%5.2f%%%%)\n'
3420
3421 def dfmtstr(max):
3422 return basedfmtstr % len(str(max))
3423
3424 def pcfmtstr(max, padding=0):
3425 return basepcfmtstr % (len(str(max)), b' ' * padding)
3426
3427 def pcfmt(value, total):
3428 if total:
3429 return (value, 100 * float(value) / total)
3430 else:
3431 return value, 100.0
3432
3433 ui.writenoi18n(b'format : %d\n' % format)
3434 ui.writenoi18n(b'flags : %s\n' % b', '.join(flags))
3435
3436 ui.write(b'\n')
3437 fmt = pcfmtstr(totalsize)
3438 fmt2 = dfmtstr(totalsize)
3439 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
3440 ui.writenoi18n(b' merges : ' + fmt % pcfmt(nummerges, numrevs))
3441 ui.writenoi18n(
3442 b' normal : ' + fmt % pcfmt(numrevs - nummerges, numrevs)
3443 )
3444 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
3445 ui.writenoi18n(b' empty : ' + fmt % pcfmt(numempty, numrevs))
3446 ui.writenoi18n(
3447 b' text : '
3448 + fmt % pcfmt(numemptytext, numemptytext + numemptydelta)
3449 )
3450 ui.writenoi18n(
3451 b' delta : '
3452 + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta)
3453 )
3454 ui.writenoi18n(
3455 b' snapshot : ' + fmt % pcfmt(numfull + numsemi, numrevs)
3456 )
3457 for depth in sorted(numsnapdepth):
3458 ui.write(
3459 (b' lvl-%-3d : ' % depth)
3460 + fmt % pcfmt(numsnapdepth[depth], numrevs)
3461 )
3462 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(numdeltas, numrevs))
3463 ui.writenoi18n(b'revision size : ' + fmt2 % totalsize)
3464 ui.writenoi18n(
3465 b' snapshot : ' + fmt % pcfmt(fulltotal + semitotal, totalsize)
3466 )
3467 for depth in sorted(numsnapdepth):
3468 ui.write(
3469 (b' lvl-%-3d : ' % depth)
3470 + fmt % pcfmt(snaptotal[depth], totalsize)
3471 )
3472 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(deltatotal, totalsize))
3473
3474 def fmtchunktype(chunktype):
3475 if chunktype == b'empty':
3476 return b' %s : ' % chunktype
3477 elif chunktype in pycompat.bytestr(string.ascii_letters):
3478 return b' 0x%s (%s) : ' % (hex(chunktype), chunktype)
3479 else:
3480 return b' 0x%s : ' % hex(chunktype)
3481
3482 ui.write(b'\n')
3483 ui.writenoi18n(b'chunks : ' + fmt2 % numrevs)
3484 for chunktype in sorted(chunktypecounts):
3485 ui.write(fmtchunktype(chunktype))
3486 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
3487 ui.writenoi18n(b'chunks size : ' + fmt2 % totalsize)
3488 for chunktype in sorted(chunktypecounts):
3489 ui.write(fmtchunktype(chunktype))
3490 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
3491
3492 ui.write(b'\n')
3493 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
3494 ui.writenoi18n(b'avg chain length : ' + fmt % avgchainlen)
3495 ui.writenoi18n(b'max chain length : ' + fmt % maxchainlen)
3496 ui.writenoi18n(b'max chain reach : ' + fmt % maxchainspan)
3497 ui.writenoi18n(b'compression ratio : ' + fmt % compratio)
3498
3499 if format > 0:
3500 ui.write(b'\n')
3501 ui.writenoi18n(
3502 b'uncompressed data size (min/max/avg) : %d / %d / %d\n'
3503 % tuple(datasize)
3504 )
3505 ui.writenoi18n(
3506 b'full revision size (min/max/avg) : %d / %d / %d\n'
3507 % tuple(fullsize)
3508 )
3509 ui.writenoi18n(
3510 b'inter-snapshot size (min/max/avg) : %d / %d / %d\n'
3511 % tuple(semisize)
3512 )
3513 for depth in sorted(snapsizedepth):
3514 if depth == 0:
3515 continue
3516 ui.writenoi18n(
3517 b' level-%-3d (min/max/avg) : %d / %d / %d\n'
3518 % ((depth,) + tuple(snapsizedepth[depth]))
3519 )
3520 ui.writenoi18n(
3521 b'delta size (min/max/avg) : %d / %d / %d\n'
3522 % tuple(deltasize)
3523 )
3524
3525 if numdeltas > 0:
3526 ui.write(b'\n')
3527 fmt = pcfmtstr(numdeltas)
3528 fmt2 = pcfmtstr(numdeltas, 4)
3529 ui.writenoi18n(
3530 b'deltas against prev : ' + fmt % pcfmt(numprev, numdeltas)
3531 )
3532 if numprev > 0:
3533 ui.writenoi18n(
3534 b' where prev = p1 : ' + fmt2 % pcfmt(nump1prev, numprev)
3535 )
3536 ui.writenoi18n(
3537 b' where prev = p2 : ' + fmt2 % pcfmt(nump2prev, numprev)
3538 )
3539 ui.writenoi18n(
3540 b' other : ' + fmt2 % pcfmt(numoprev, numprev)
3541 )
3542 if gdelta:
3543 ui.writenoi18n(
3544 b'deltas against p1 : ' + fmt % pcfmt(nump1, numdeltas)
3545 )
3546 ui.writenoi18n(
3547 b'deltas against p2 : ' + fmt % pcfmt(nump2, numdeltas)
3548 )
3549 ui.writenoi18n(
3550 b'deltas against other : ' + fmt % pcfmt(numother, numdeltas)
3551 )
3209 revlog_debug.debug_revlog(ui, r)
3210 return 0
3552 3211
3553 3212
3554 3213 @command(
@@ -3935,10 +3594,8 def debugssl(ui, repo, source=None, **op
3935 3594 )
3936 3595 source = b"default"
3937 3596
3938 source, branches = urlutil.get_unique_pull_path(
3939 b'debugssl', repo, ui, source
3940 )
3941 url = urlutil.url(source)
3597 path = urlutil.get_unique_pull_path_obj(b'debugssl', ui, source)
3598 url = path.url
3942 3599
3943 3600 defaultport = {b'https': 443, b'ssh': 22}
3944 3601 if url.scheme in defaultport:
@@ -4049,20 +3706,19 def debugbackupbundle(ui, repo, *pats, *
4049 3706 for backup in backups:
4050 3707 # Much of this is copied from the hg incoming logic
4051 3708 source = os.path.relpath(backup, encoding.getcwd())
4052 source, branches = urlutil.get_unique_pull_path(
3709 path = urlutil.get_unique_pull_path_obj(
4053 3710 b'debugbackupbundle',
4054 repo,
4055 3711 ui,
4056 3712 source,
4057 default_branches=opts.get(b'branch'),
4058 3713 )
4059 3714 try:
4060 other = hg.peer(repo, opts, source)
3715 other = hg.peer(repo, opts, path)
4061 3716 except error.LookupError as ex:
4062 msg = _(b"\nwarning: unable to open bundle %s") % source
3717 msg = _(b"\nwarning: unable to open bundle %s") % path.loc
4063 3718 hint = _(b"\n(missing parent rev %s)\n") % short(ex.name)
4064 3719 ui.warn(msg, hint=hint)
4065 3720 continue
3721 branches = (path.branch, opts.get(b'branch', []))
4066 3722 revs, checkout = hg.addbranchrevs(
4067 3723 repo, other, branches, opts.get(b"rev")
4068 3724 )
@@ -4085,29 +3741,29 def debugbackupbundle(ui, repo, *pats, *
4085 3741 with repo.lock(), repo.transaction(b"unbundle") as tr:
4086 3742 if scmutil.isrevsymbol(other, recovernode):
4087 3743 ui.status(_(b"Unbundling %s\n") % (recovernode))
4088 f = hg.openpath(ui, source)
4089 gen = exchange.readbundle(ui, f, source)
3744 f = hg.openpath(ui, path.loc)
3745 gen = exchange.readbundle(ui, f, path.loc)
4090 3746 if isinstance(gen, bundle2.unbundle20):
4091 3747 bundle2.applybundle(
4092 3748 repo,
4093 3749 gen,
4094 3750 tr,
4095 3751 source=b"unbundle",
4096 url=b"bundle:" + source,
3752 url=b"bundle:" + path.loc,
4097 3753 )
4098 3754 else:
4099 gen.apply(repo, b"unbundle", b"bundle:" + source)
3755 gen.apply(repo, b"unbundle", b"bundle:" + path.loc)
4100 3756 break
4101 3757 else:
4102 3758 backupdate = encoding.strtolocal(
4103 3759 time.strftime(
4104 3760 "%a %H:%M, %Y-%m-%d",
4105 time.localtime(os.path.getmtime(source)),
3761 time.localtime(os.path.getmtime(path.loc)),
4106 3762 )
4107 3763 )
4108 3764 ui.status(b"\n%s\n" % (backupdate.ljust(50)))
4109 3765 if ui.verbose:
4110 ui.status(b"%s%s\n" % (b"bundle:".ljust(13), source))
3766 ui.status(b"%s%s\n" % (b"bundle:".ljust(13), path.loc))
4111 3767 else:
4112 3768 opts[
4113 3769 b"template"
@@ -4134,8 +3790,21 def debugsub(ui, repo, rev=None):
4134 3790 ui.writenoi18n(b' revision %s\n' % v[1])
4135 3791
4136 3792
4137 @command(b'debugshell', optionalrepo=True)
4138 def debugshell(ui, repo):
3793 @command(
3794 b'debugshell',
3795 [
3796 (
3797 b'c',
3798 b'command',
3799 b'',
3800 _(b'program passed in as a string'),
3801 _(b'COMMAND'),
3802 )
3803 ],
3804 _(b'[-c COMMAND]'),
3805 optionalrepo=True,
3806 )
3807 def debugshell(ui, repo, **opts):
4139 3808 """run an interactive Python interpreter
4140 3809
4141 3810 The local namespace is provided with a reference to the ui and
@@ -4148,10 +3817,58 def debugshell(ui, repo):
4148 3817 'repo': repo,
4149 3818 }
4150 3819
3820 # py2exe disables initialization of the site module, which is responsible
3821 # for arranging for ``quit()`` to exit the interpreter. Manually initialize
3822 # the stuff that site normally does here, so that the interpreter can be
3823 # quit in a consistent manner, whether run with pyoxidizer, exewrapper.c,
3824 # py.exe, or py2exe.
3825 if getattr(sys, "frozen", None) == 'console_exe':
3826 try:
3827 import site
3828
3829 site.setcopyright()
3830 site.sethelper()
3831 site.setquit()
3832 except ImportError:
3833 site = None # Keep PyCharm happy
3834
3835 command = opts.get('command')
3836 if command:
3837 compiled = code.compile_command(encoding.strfromlocal(command))
3838 code.InteractiveInterpreter(locals=imported_objects).runcode(compiled)
3839 return
3840
4151 3841 code.interact(local=imported_objects)
4152 3842
4153 3843
4154 3844 @command(
3845 b'debug-revlog-stats',
3846 [
3847 (b'c', b'changelog', None, _(b'Display changelog statistics')),
3848 (b'm', b'manifest', None, _(b'Display manifest statistics')),
3849 (b'f', b'filelogs', None, _(b'Display filelogs statistics')),
3850 ]
3851 + cmdutil.formatteropts,
3852 )
3853 def debug_revlog_stats(ui, repo, **opts):
3854 """display statistics about revlogs in the store"""
3855 opts = pycompat.byteskwargs(opts)
3856 changelog = opts[b"changelog"]
3857 manifest = opts[b"manifest"]
3858 filelogs = opts[b"filelogs"]
3859
3860 if changelog is None and manifest is None and filelogs is None:
3861 changelog = True
3862 manifest = True
3863 filelogs = True
3864
3865 repo = repo.unfiltered()
3866 fm = ui.formatter(b'debug-revlog-stats', opts)
3867 revlog_debug.debug_revlog_stats(repo, fm, changelog, manifest, filelogs)
3868 fm.end()
3869
3870
3871 @command(
4155 3872 b'debugsuccessorssets',
4156 3873 [(b'', b'closest', False, _(b'return closest successors sets only'))],
4157 3874 _(b'[REV]'),
@@ -4843,7 +4560,8 def debugwireproto(ui, repo, path=None,
4843 4560 _(b'--peer %s not supported with HTTP peers') % opts[b'peer']
4844 4561 )
4845 4562 else:
4846 peer = httppeer.makepeer(ui, path, opener=opener)
4563 peer_path = urlutil.try_path(ui, path)
4564 peer = httppeer.makepeer(ui, peer_path, opener=opener)
4847 4565
4848 4566 # We /could/ populate stdin/stdout with sock.makefile()...
4849 4567 else:
@@ -120,7 +120,7 def difffeatureopts(
120 120 )
121 121 buildopts[b'ignorewseol'] = get(b'ignore_space_at_eol', b'ignorewseol')
122 122 if formatchanging:
123 buildopts[b'text'] = opts and opts.get(b'text')
123 buildopts[b'text'] = None if opts is None else opts.get(b'text')
124 124 binary = None if opts is None else opts.get(b'binary')
125 125 buildopts[b'nobinary'] = (
126 126 not binary
This diff has been collapsed as it changes many lines, (557 lines changed) Show them Hide them
@@ -31,7 +31,6 from . import (
31 31 )
32 32
33 33 from .dirstateutils import (
34 docket as docketmod,
35 34 timestamp,
36 35 )
37 36
@@ -66,10 +65,17 class rootcache(filecache):
66 65 return obj._join(fname)
67 66
68 67
69 def requires_parents_change(func):
68 def check_invalidated(func):
69 """check that the func is called with a non-invalidated dirstate
70
71 The dirstate is in an "invalidated state" after an error occured during its
72 modification and remains so until we exited the top level scope that framed
73 such change.
74 """
75
70 76 def wrap(self, *args, **kwargs):
71 if not self.pendingparentchange():
72 msg = 'calling `%s` outside of a parentchange context'
77 if self._invalidated_context:
78 msg = 'calling `%s` after the dirstate was invalidated'
73 79 msg %= func.__name__
74 80 raise error.ProgrammingError(msg)
75 81 return func(self, *args, **kwargs)
@@ -77,19 +83,63 def requires_parents_change(func):
77 83 return wrap
78 84
79 85
80 def requires_no_parents_change(func):
86 def requires_changing_parents(func):
81 87 def wrap(self, *args, **kwargs):
82 if self.pendingparentchange():
83 msg = 'calling `%s` inside of a parentchange context'
88 if not self.is_changing_parents:
89 msg = 'calling `%s` outside of a changing_parents context'
90 msg %= func.__name__
91 raise error.ProgrammingError(msg)
92 return func(self, *args, **kwargs)
93
94 return check_invalidated(wrap)
95
96
97 def requires_changing_files(func):
98 def wrap(self, *args, **kwargs):
99 if not self.is_changing_files:
100 msg = 'calling `%s` outside of a `changing_files`'
84 101 msg %= func.__name__
85 102 raise error.ProgrammingError(msg)
86 103 return func(self, *args, **kwargs)
87 104
88 return wrap
105 return check_invalidated(wrap)
106
107
108 def requires_changing_any(func):
109 def wrap(self, *args, **kwargs):
110 if not self.is_changing_any:
111 msg = 'calling `%s` outside of a changing context'
112 msg %= func.__name__
113 raise error.ProgrammingError(msg)
114 return func(self, *args, **kwargs)
115
116 return check_invalidated(wrap)
117
118
119 def requires_changing_files_or_status(func):
120 def wrap(self, *args, **kwargs):
121 if not (self.is_changing_files or self._running_status > 0):
122 msg = (
123 'calling `%s` outside of a changing_files '
124 'or running_status context'
125 )
126 msg %= func.__name__
127 raise error.ProgrammingError(msg)
128 return func(self, *args, **kwargs)
129
130 return check_invalidated(wrap)
131
132
133 CHANGE_TYPE_PARENTS = "parents"
134 CHANGE_TYPE_FILES = "files"
89 135
90 136
91 137 @interfaceutil.implementer(intdirstate.idirstate)
92 138 class dirstate:
139
140 # used by largefile to avoid overwritting transaction callback
141 _tr_key_suffix = b''
142
93 143 def __init__(
94 144 self,
95 145 opener,
@@ -124,7 +174,16 class dirstate:
124 174 self._dirty_tracked_set = False
125 175 self._ui = ui
126 176 self._filecache = {}
127 self._parentwriters = 0
177 # nesting level of `changing_parents` context
178 self._changing_level = 0
179 # the change currently underway
180 self._change_type = None
181 # number of open _running_status context
182 self._running_status = 0
183 # True if the current dirstate changing operations have been
184 # invalidated (used to make sure all nested contexts have been exited)
185 self._invalidated_context = False
186 self._attached_to_a_transaction = False
128 187 self._filename = b'dirstate'
129 188 self._filename_th = b'dirstate-tracked-hint'
130 189 self._pendingfilename = b'%s.pending' % self._filename
@@ -136,6 +195,12 class dirstate:
136 195 # raises an exception).
137 196 self._cwd
138 197
198 def refresh(self):
199 if '_branch' in vars(self):
200 del self._branch
201 if '_map' in vars(self) and self._map.may_need_refresh():
202 self.invalidate()
203
139 204 def prefetch_parents(self):
140 205 """make sure the parents are loaded
141 206
@@ -144,39 +209,193 class dirstate:
144 209 self._pl
145 210
146 211 @contextlib.contextmanager
147 def parentchange(self):
148 """Context manager for handling dirstate parents.
212 @check_invalidated
213 def running_status(self, repo):
214 """Wrap a status operation
215
216 This context is not mutally exclusive with the `changing_*` context. It
217 also do not warrant for the `wlock` to be taken.
218
219 If the wlock is taken, this context will behave in a simple way, and
220 ensure the data are scheduled for write when leaving the top level
221 context.
149 222
150 If an exception occurs in the scope of the context manager,
151 the incoherent dirstate won't be written when wlock is
152 released.
223 If the lock is not taken, it will only warrant that the data are either
224 committed (written) and rolled back (invalidated) when exiting the top
225 level context. The write/invalidate action must be performed by the
226 wrapped code.
227
228
229 The expected logic is:
230
231 A: read the dirstate
232 B: run status
233 This might make the dirstate dirty by updating cache,
234 especially in Rust.
235 C: do more "post status fixup if relevant
236 D: try to take the w-lock (this will invalidate the changes if they were raced)
237 E0: if dirstate changed on disk → discard change (done by dirstate internal)
238 E1: elif lock was acquired → write the changes
239 E2: else → discard the changes
153 240 """
154 self._parentwriters += 1
155 yield
156 # Typically we want the "undo" step of a context manager in a
157 # finally block so it happens even when an exception
158 # occurs. In this case, however, we only want to decrement
159 # parentwriters if the code in the with statement exits
160 # normally, so we don't have a try/finally here on purpose.
161 self._parentwriters -= 1
241 has_lock = repo.currentwlock() is not None
242 is_changing = self.is_changing_any
243 tr = repo.currenttransaction()
244 has_tr = tr is not None
245 nested = bool(self._running_status)
246
247 first_and_alone = not (is_changing or has_tr or nested)
248
249 # enforce no change happened outside of a proper context.
250 if first_and_alone and self._dirty:
251 has_tr = repo.currenttransaction() is not None
252 if not has_tr and self._changing_level == 0 and self._dirty:
253 msg = "entering a status context, but dirstate is already dirty"
254 raise error.ProgrammingError(msg)
255
256 should_write = has_lock and not (nested or is_changing)
257
258 self._running_status += 1
259 try:
260 yield
261 except Exception:
262 self.invalidate()
263 raise
264 finally:
265 self._running_status -= 1
266 if self._invalidated_context:
267 should_write = False
268 self.invalidate()
269
270 if should_write:
271 assert repo.currenttransaction() is tr
272 self.write(tr)
273 elif not has_lock:
274 if self._dirty:
275 msg = b'dirstate dirty while exiting an isolated status context'
276 repo.ui.develwarn(msg)
277 self.invalidate()
278
279 @contextlib.contextmanager
280 @check_invalidated
281 def _changing(self, repo, change_type):
282 if repo.currentwlock() is None:
283 msg = b"trying to change the dirstate without holding the wlock"
284 raise error.ProgrammingError(msg)
285
286 has_tr = repo.currenttransaction() is not None
287 if not has_tr and self._changing_level == 0 and self._dirty:
288 msg = b"entering a changing context, but dirstate is already dirty"
289 repo.ui.develwarn(msg)
290
291 assert self._changing_level >= 0
292 # different type of change are mutually exclusive
293 if self._change_type is None:
294 assert self._changing_level == 0
295 self._change_type = change_type
296 elif self._change_type != change_type:
297 msg = (
298 'trying to open "%s" dirstate-changing context while a "%s" is'
299 ' already open'
300 )
301 msg %= (change_type, self._change_type)
302 raise error.ProgrammingError(msg)
303 should_write = False
304 self._changing_level += 1
305 try:
306 yield
307 except: # re-raises
308 self.invalidate() # this will set `_invalidated_context`
309 raise
310 finally:
311 assert self._changing_level > 0
312 self._changing_level -= 1
313 # If the dirstate is being invalidated, call invalidate again.
314 # This will throw away anything added by a upper context and
315 # reset the `_invalidated_context` flag when relevant
316 if self._changing_level <= 0:
317 self._change_type = None
318 assert self._changing_level == 0
319 if self._invalidated_context:
320 # make sure we invalidate anything an upper context might
321 # have changed.
322 self.invalidate()
323 else:
324 should_write = self._changing_level <= 0
325 tr = repo.currenttransaction()
326 if has_tr != (tr is not None):
327 if has_tr:
328 m = "transaction vanished while changing dirstate"
329 else:
330 m = "transaction appeared while changing dirstate"
331 raise error.ProgrammingError(m)
332 if should_write:
333 self.write(tr)
334
335 @contextlib.contextmanager
336 def changing_parents(self, repo):
337 with self._changing(repo, CHANGE_TYPE_PARENTS) as c:
338 yield c
339
340 @contextlib.contextmanager
341 def changing_files(self, repo):
342 with self._changing(repo, CHANGE_TYPE_FILES) as c:
343 yield c
344
345 # here to help migration to the new code
346 def parentchange(self):
347 msg = (
348 "Mercurial 6.4 and later requires call to "
349 "`dirstate.changing_parents(repo)`"
350 )
351 raise error.ProgrammingError(msg)
352
353 @property
354 def is_changing_any(self):
355 """Returns true if the dirstate is in the middle of a set of changes.
356
357 This returns True for any kind of change.
358 """
359 return self._changing_level > 0
162 360
163 361 def pendingparentchange(self):
362 return self.is_changing_parent()
363
364 def is_changing_parent(self):
164 365 """Returns true if the dirstate is in the middle of a set of changes
165 366 that modify the dirstate parent.
166 367 """
167 return self._parentwriters > 0
368 self._ui.deprecwarn(b"dirstate.is_changing_parents", b"6.5")
369 return self.is_changing_parents
370
371 @property
372 def is_changing_parents(self):
373 """Returns true if the dirstate is in the middle of a set of changes
374 that modify the dirstate parent.
375 """
376 if self._changing_level <= 0:
377 return False
378 return self._change_type == CHANGE_TYPE_PARENTS
379
380 @property
381 def is_changing_files(self):
382 """Returns true if the dirstate is in the middle of a set of changes
383 that modify the files tracked or their sources.
384 """
385 if self._changing_level <= 0:
386 return False
387 return self._change_type == CHANGE_TYPE_FILES
168 388
169 389 @propertycache
170 390 def _map(self):
171 391 """Return the dirstate contents (see documentation for dirstatemap)."""
172 self._map = self._mapcls(
392 return self._mapcls(
173 393 self._ui,
174 394 self._opener,
175 395 self._root,
176 396 self._nodeconstants,
177 397 self._use_dirstate_v2,
178 398 )
179 return self._map
180 399
181 400 @property
182 401 def _sparsematcher(self):
@@ -365,6 +584,7 class dirstate:
365 584 def branch(self):
366 585 return encoding.tolocal(self._branch)
367 586
587 @requires_changing_parents
368 588 def setparents(self, p1, p2=None):
369 589 """Set dirstate parents to p1 and p2.
370 590
@@ -376,10 +596,10 class dirstate:
376 596 """
377 597 if p2 is None:
378 598 p2 = self._nodeconstants.nullid
379 if self._parentwriters == 0:
599 if self._changing_level == 0:
380 600 raise ValueError(
381 601 b"cannot set dirstate parent outside of "
382 b"dirstate.parentchange context manager"
602 b"dirstate.changing_parents context manager"
383 603 )
384 604
385 605 self._dirty = True
@@ -419,9 +639,14 class dirstate:
419 639 delattr(self, a)
420 640 self._dirty = False
421 641 self._dirty_tracked_set = False
422 self._parentwriters = 0
642 self._invalidated_context = bool(
643 self._changing_level > 0
644 or self._attached_to_a_transaction
645 or self._running_status
646 )
423 647 self._origpl = None
424 648
649 @requires_changing_any
425 650 def copy(self, source, dest):
426 651 """Mark dest as a copy of source. Unmark dest if source is None."""
427 652 if source == dest:
@@ -439,7 +664,7 class dirstate:
439 664 def copies(self):
440 665 return self._map.copymap
441 666
442 @requires_no_parents_change
667 @requires_changing_files
443 668 def set_tracked(self, filename, reset_copy=False):
444 669 """a "public" method for generic code to mark a file as tracked
445 670
@@ -461,7 +686,7 class dirstate:
461 686 self._dirty_tracked_set = True
462 687 return pre_tracked
463 688
464 @requires_no_parents_change
689 @requires_changing_files
465 690 def set_untracked(self, filename):
466 691 """a "public" method for generic code to mark a file as untracked
467 692
@@ -476,7 +701,7 class dirstate:
476 701 self._dirty_tracked_set = True
477 702 return ret
478 703
479 @requires_no_parents_change
704 @requires_changing_files_or_status
480 705 def set_clean(self, filename, parentfiledata):
481 706 """record that the current state of the file on disk is known to be clean"""
482 707 self._dirty = True
@@ -485,13 +710,13 class dirstate:
485 710 (mode, size, mtime) = parentfiledata
486 711 self._map.set_clean(filename, mode, size, mtime)
487 712
488 @requires_no_parents_change
713 @requires_changing_files_or_status
489 714 def set_possibly_dirty(self, filename):
490 715 """record that the current state of the file on disk is unknown"""
491 716 self._dirty = True
492 717 self._map.set_possibly_dirty(filename)
493 718
494 @requires_parents_change
719 @requires_changing_parents
495 720 def update_file_p1(
496 721 self,
497 722 filename,
@@ -503,7 +728,7 class dirstate:
503 728 rewriting operation.
504 729
505 730 It should not be called during a merge (p2 != nullid) and only within
506 a `with dirstate.parentchange():` context.
731 a `with dirstate.changing_parents(repo):` context.
507 732 """
508 733 if self.in_merge:
509 734 msg = b'update_file_reference should not be called when merging'
@@ -531,7 +756,7 class dirstate:
531 756 has_meaningful_mtime=False,
532 757 )
533 758
534 @requires_parents_change
759 @requires_changing_parents
535 760 def update_file(
536 761 self,
537 762 filename,
@@ -546,12 +771,57 class dirstate:
546 771 This is to be called when the direstates parent changes to keep track
547 772 of what is the file situation in regards to the working copy and its parent.
548 773
549 This function must be called within a `dirstate.parentchange` context.
774 This function must be called within a `dirstate.changing_parents` context.
550 775
551 776 note: the API is at an early stage and we might need to adjust it
552 777 depending of what information ends up being relevant and useful to
553 778 other processing.
554 779 """
780 self._update_file(
781 filename=filename,
782 wc_tracked=wc_tracked,
783 p1_tracked=p1_tracked,
784 p2_info=p2_info,
785 possibly_dirty=possibly_dirty,
786 parentfiledata=parentfiledata,
787 )
788
789 def hacky_extension_update_file(self, *args, **kwargs):
790 """NEVER USE THIS, YOU DO NOT NEED IT
791
792 This function is a variant of "update_file" to be called by a small set
793 of extensions, it also adjust the internal state of file, but can be
794 called outside an `changing_parents` context.
795
796 A very small number of extension meddle with the working copy content
797 in a way that requires to adjust the dirstate accordingly. At the time
798 this command is written they are :
799 - keyword,
800 - largefile,
801 PLEASE DO NOT GROW THIS LIST ANY FURTHER.
802
803 This function could probably be replaced by more semantic one (like
804 "adjust expected size" or "always revalidate file content", etc)
805 however at the time where this is writen, this is too much of a detour
806 to be considered.
807 """
808 if not (self._changing_level > 0 or self._running_status > 0):
809 msg = "requires a changes context"
810 raise error.ProgrammingError(msg)
811 self._update_file(
812 *args,
813 **kwargs,
814 )
815
816 def _update_file(
817 self,
818 filename,
819 wc_tracked,
820 p1_tracked,
821 p2_info=False,
822 possibly_dirty=False,
823 parentfiledata=None,
824 ):
555 825
556 826 # note: I do not think we need to double check name clash here since we
557 827 # are in a update/merge case that should already have taken care of
@@ -680,12 +950,16 class dirstate:
680 950 return self._normalize(path, isknown, ignoremissing)
681 951 return path
682 952
953 # XXX this method is barely used, as a result:
954 # - its semantic is unclear
955 # - do we really needs it ?
956 @requires_changing_parents
683 957 def clear(self):
684 958 self._map.clear()
685 959 self._dirty = True
686 960
961 @requires_changing_parents
687 962 def rebuild(self, parent, allfiles, changedfiles=None):
688
689 963 matcher = self._sparsematcher
690 964 if matcher is not None and not matcher.always():
691 965 # should not add non-matching files
@@ -724,7 +998,6 class dirstate:
724 998 self._map.setparents(parent, self._nodeconstants.nullid)
725 999
726 1000 for f in to_lookup:
727
728 1001 if self.in_merge:
729 1002 self.set_tracked(f)
730 1003 else:
@@ -749,20 +1022,41 class dirstate:
749 1022 def write(self, tr):
750 1023 if not self._dirty:
751 1024 return
1025 # make sure we don't request a write of invalidated content
1026 # XXX move before the dirty check once `unlock` stop calling `write`
1027 assert not self._invalidated_context
752 1028
753 1029 write_key = self._use_tracked_hint and self._dirty_tracked_set
754 1030 if tr:
1031
1032 def on_abort(tr):
1033 self._attached_to_a_transaction = False
1034 self.invalidate()
1035
1036 # make sure we invalidate the current change on abort
1037 if tr is not None:
1038 tr.addabort(
1039 b'dirstate-invalidate%s' % self._tr_key_suffix,
1040 on_abort,
1041 )
1042
1043 self._attached_to_a_transaction = True
1044
1045 def on_success(f):
1046 self._attached_to_a_transaction = False
1047 self._writedirstate(tr, f),
1048
755 1049 # delay writing in-memory changes out
756 1050 tr.addfilegenerator(
757 b'dirstate-1-main',
1051 b'dirstate-1-main%s' % self._tr_key_suffix,
758 1052 (self._filename,),
759 lambda f: self._writedirstate(tr, f),
1053 on_success,
760 1054 location=b'plain',
761 1055 post_finalize=True,
762 1056 )
763 1057 if write_key:
764 1058 tr.addfilegenerator(
765 b'dirstate-2-key-post',
1059 b'dirstate-2-key-post%s' % self._tr_key_suffix,
766 1060 (self._filename_th,),
767 1061 lambda f: self._write_tracked_hint(tr, f),
768 1062 location=b'plain',
@@ -798,6 +1092,8 class dirstate:
798 1092 self._plchangecallbacks[category] = callback
799 1093
800 1094 def _writedirstate(self, tr, st):
1095 # make sure we don't write invalidated content
1096 assert not self._invalidated_context
801 1097 # notify callbacks about parents change
802 1098 if self._origpl is not None and self._origpl != self._pl:
803 1099 for c, callback in sorted(self._plchangecallbacks.items()):
@@ -936,7 +1232,8 class dirstate:
936 1232 badfn(ff, badtype(kind))
937 1233 if nf in dmap:
938 1234 results[nf] = None
939 except OSError as inst: # nf not found on disk - it is dirstate only
1235 except (OSError) as inst:
1236 # nf not found on disk - it is dirstate only
940 1237 if nf in dmap: # does it exactly match a missing file?
941 1238 results[nf] = None
942 1239 else: # does it match a missing directory?
@@ -1246,7 +1543,7 class dirstate:
1246 1543 )
1247 1544 )
1248 1545
1249 for (fn, message) in bad:
1546 for fn, message in bad:
1250 1547 matcher.bad(fn, encoding.strtolocal(message))
1251 1548
1252 1549 status = scmutil.status(
@@ -1276,6 +1573,9 class dirstate:
1276 1573 files that have definitely not been modified since the
1277 1574 dirstate was written
1278 1575 """
1576 if not self._running_status:
1577 msg = "Calling `status` outside a `running_status` context"
1578 raise error.ProgrammingError(msg)
1279 1579 listignored, listclean, listunknown = ignored, clean, unknown
1280 1580 lookup, modified, added, unknown, ignored = [], [], [], [], []
1281 1581 removed, deleted, clean = [], [], []
@@ -1435,142 +1735,47 class dirstate:
1435 1735 else:
1436 1736 return self._filename
1437 1737
1438 def data_backup_filename(self, backupname):
1439 if not self._use_dirstate_v2:
1440 return None
1441 return backupname + b'.v2-data'
1442
1443 def _new_backup_data_filename(self, backupname):
1444 """return a filename to backup a data-file or None"""
1445 if not self._use_dirstate_v2:
1446 return None
1447 if self._map.docket.uuid is None:
1448 # not created yet, nothing to backup
1449 return None
1450 data_filename = self._map.docket.data_filename()
1451 return data_filename, self.data_backup_filename(backupname)
1452
1453 def backup_data_file(self, backupname):
1454 if not self._use_dirstate_v2:
1455 return None
1456 docket = docketmod.DirstateDocket.parse(
1457 self._opener.read(backupname),
1458 self._nodeconstants,
1459 )
1460 return self.data_backup_filename(backupname), docket.data_filename()
1461
1462 def savebackup(self, tr, backupname):
1463 '''Save current dirstate into backup file'''
1464 filename = self._actualfilename(tr)
1465 assert backupname != filename
1738 def all_file_names(self):
1739 """list all filename currently used by this dirstate
1466 1740
1467 # use '_writedirstate' instead of 'write' to write changes certainly,
1468 # because the latter omits writing out if transaction is running.
1469 # output file will be used to create backup of dirstate at this point.
1470 if self._dirty or not self._opener.exists(filename):
1471 self._writedirstate(
1472 tr,
1473 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1741 This is only used to do `hg rollback` related backup in the transaction
1742 """
1743 if not self._opener.exists(self._filename):
1744 # no data every written to disk yet
1745 return ()
1746 elif self._use_dirstate_v2:
1747 return (
1748 self._filename,
1749 self._map.docket.data_filename(),
1474 1750 )
1751 else:
1752 return (self._filename,)
1475 1753
1476 if tr:
1477 # ensure that subsequent tr.writepending returns True for
1478 # changes written out above, even if dirstate is never
1479 # changed after this
1480 tr.addfilegenerator(
1481 b'dirstate-1-main',
1482 (self._filename,),
1483 lambda f: self._writedirstate(tr, f),
1484 location=b'plain',
1485 post_finalize=True,
1486 )
1487
1488 # ensure that pending file written above is unlinked at
1489 # failure, even if tr.writepending isn't invoked until the
1490 # end of this transaction
1491 tr.registertmp(filename, location=b'plain')
1492
1493 self._opener.tryunlink(backupname)
1494 # hardlink backup is okay because _writedirstate is always called
1495 # with an "atomictemp=True" file.
1496 util.copyfile(
1497 self._opener.join(filename),
1498 self._opener.join(backupname),
1499 hardlink=True,
1754 def verify(self, m1, m2, p1, narrow_matcher=None):
1755 """
1756 check the dirstate contents against the parent manifest and yield errors
1757 """
1758 missing_from_p1 = _(
1759 b"%s marked as tracked in p1 (%s) but not in manifest1\n"
1500 1760 )
1501 data_pair = self._new_backup_data_filename(backupname)
1502 if data_pair is not None:
1503 data_filename, bck_data_filename = data_pair
1504 util.copyfile(
1505 self._opener.join(data_filename),
1506 self._opener.join(bck_data_filename),
1507 hardlink=True,
1508 )
1509 if tr is not None:
1510 # ensure that pending file written above is unlinked at
1511 # failure, even if tr.writepending isn't invoked until the
1512 # end of this transaction
1513 tr.registertmp(bck_data_filename, location=b'plain')
1514
1515 def restorebackup(self, tr, backupname):
1516 '''Restore dirstate by backup file'''
1517 # this "invalidate()" prevents "wlock.release()" from writing
1518 # changes of dirstate out after restoring from backup file
1519 self.invalidate()
1520 o = self._opener
1521 if not o.exists(backupname):
1522 # there was no file backup, delete existing files
1523 filename = self._actualfilename(tr)
1524 data_file = None
1525 if self._use_dirstate_v2 and self._map.docket.uuid is not None:
1526 data_file = self._map.docket.data_filename()
1527 if o.exists(filename):
1528 o.unlink(filename)
1529 if data_file is not None and o.exists(data_file):
1530 o.unlink(data_file)
1531 return
1532 filename = self._actualfilename(tr)
1533 data_pair = self.backup_data_file(backupname)
1534 if o.exists(filename) and util.samefile(
1535 o.join(backupname), o.join(filename)
1536 ):
1537 o.unlink(backupname)
1538 else:
1539 o.rename(backupname, filename, checkambig=True)
1540
1541 if data_pair is not None:
1542 data_backup, target = data_pair
1543 if o.exists(target) and util.samefile(
1544 o.join(data_backup), o.join(target)
1545 ):
1546 o.unlink(data_backup)
1547 else:
1548 o.rename(data_backup, target, checkambig=True)
1549
1550 def clearbackup(self, tr, backupname):
1551 '''Clear backup file'''
1552 o = self._opener
1553 if o.exists(backupname):
1554 data_backup = self.backup_data_file(backupname)
1555 o.unlink(backupname)
1556 if data_backup is not None:
1557 o.unlink(data_backup[0])
1558
1559 def verify(self, m1, m2):
1560 """check the dirstate content again the parent manifest and yield errors"""
1561 missing_from_p1 = b"%s in state %s, but not in manifest1\n"
1562 unexpected_in_p1 = b"%s in state %s, but also in manifest1\n"
1563 missing_from_ps = b"%s in state %s, but not in either manifest\n"
1564 missing_from_ds = b"%s in manifest1, but listed as state %s\n"
1761 unexpected_in_p1 = _(b"%s marked as added, but also in manifest1\n")
1762 missing_from_ps = _(
1763 b"%s marked as modified, but not in either manifest\n"
1764 )
1765 missing_from_ds = _(
1766 b"%s in manifest1, but not marked as tracked in p1 (%s)\n"
1767 )
1565 1768 for f, entry in self.items():
1566 state = entry.state
1567 if state in b"nr" and f not in m1:
1568 yield (missing_from_p1, f, state)
1569 if state in b"a" and f in m1:
1570 yield (unexpected_in_p1, f, state)
1571 if state in b"m" and f not in m1 and f not in m2:
1572 yield (missing_from_ps, f, state)
1769 if entry.p1_tracked:
1770 if entry.modified and f not in m1 and f not in m2:
1771 yield missing_from_ps % f
1772 elif f not in m1:
1773 yield missing_from_p1 % (f, node.short(p1))
1774 if entry.added and f in m1:
1775 yield unexpected_in_p1 % f
1573 1776 for f in m1:
1574 state = self.get_entry(f).state
1575 if state not in b"nrm":
1576 yield (missing_from_ds, f, state)
1777 if narrow_matcher is not None and not narrow_matcher(f):
1778 continue
1779 entry = self.get_entry(f)
1780 if not entry.p1_tracked:
1781 yield missing_from_ds % (f, node.short(p1))
@@ -58,6 +58,34 class _dirstatemapcommon:
58 58 # for consistent view between _pl() and _read() invocations
59 59 self._pendingmode = None
60 60
61 def _set_identity(self):
62 self.identity = self._get_current_identity()
63
64 def _get_current_identity(self):
65 try:
66 return util.cachestat(self._opener.join(self._filename))
67 except FileNotFoundError:
68 return None
69
70 def may_need_refresh(self):
71 if 'identity' not in vars(self):
72 # no existing identity, we need a refresh
73 return True
74 if self.identity is None:
75 return True
76 if not self.identity.cacheable():
77 # We cannot trust the entry
78 # XXX this is a problem on windows, NFS, or other inode less system
79 return True
80 current_identity = self._get_current_identity()
81 if current_identity is None:
82 return True
83 if not current_identity.cacheable():
84 # We cannot trust the entry
85 # XXX this is a problem on windows, NFS, or other inode less system
86 return True
87 return current_identity != self.identity
88
61 89 def preload(self):
62 90 """Loads the underlying data, if it's not already loaded"""
63 91 self._map
@@ -118,6 +146,9 class _dirstatemapcommon:
118 146 raise error.ProgrammingError(b'dirstate docket name collision')
119 147 data_filename = new_docket.data_filename()
120 148 self._opener.write(data_filename, packed)
149 # tell the transaction that we are adding a new file
150 if tr is not None:
151 tr.addbackup(data_filename, location=b'plain')
121 152 # Write the new docket after the new data file has been
122 153 # written. Because `st` was opened with `atomictemp=True`,
123 154 # the actual `.hg/dirstate` file is only affected on close.
@@ -127,6 +158,8 class _dirstatemapcommon:
127 158 # the new data file was written.
128 159 if old_docket.uuid:
129 160 data_filename = old_docket.data_filename()
161 if tr is not None:
162 tr.addbackup(data_filename, location=b'plain')
130 163 unlink = lambda _tr=None: self._opener.unlink(data_filename)
131 164 if tr:
132 165 category = b"dirstate-v2-clean-" + old_docket.uuid
@@ -258,9 +291,7 class dirstatemap(_dirstatemapcommon):
258 291
259 292 def read(self):
260 293 # ignore HG_PENDING because identity is used only for writing
261 self.identity = util.filestat.frompath(
262 self._opener.join(self._filename)
263 )
294 self._set_identity()
264 295
265 296 if self._use_dirstate_v2:
266 297 if not self.docket.uuid:
@@ -523,9 +554,7 if rustmod is not None:
523 554 Fills the Dirstatemap when called.
524 555 """
525 556 # ignore HG_PENDING because identity is used only for writing
526 self.identity = util.filestat.frompath(
527 self._opener.join(self._filename)
528 )
557 self._set_identity()
529 558
530 559 if self._use_dirstate_v2:
531 560 if self.docket.uuid:
@@ -614,6 +643,14 if rustmod is not None:
614 643 if append:
615 644 docket = self.docket
616 645 data_filename = docket.data_filename()
646 # We mark it for backup to make sure a future `hg rollback` (or
647 # `hg recover`?) call find the data it needs to restore a
648 # working repository.
649 #
650 # The backup can use a hardlink because the format is resistant
651 # to trailing "dead" data.
652 if tr is not None:
653 tr.addbackup(data_filename, location=b'plain')
617 654 with self._opener(data_filename, b'r+b') as fp:
618 655 fp.seek(docket.data_size)
619 656 assert fp.tell() == docket.data_size
@@ -980,7 +980,8 def _getlocal(ui, rpath, wd=None):
980 980 lui.readconfig(os.path.join(path, b".hg", b"hgrc-not-shared"), path)
981 981
982 982 if rpath:
983 path = urlutil.get_clone_path(lui, rpath)[0]
983 path_obj = urlutil.get_clone_path_obj(lui, rpath)
984 path = path_obj.rawloc
984 985 lui = ui.copy()
985 986 if rcutil.use_repo_hgrc():
986 987 _readsharedsourceconfig(lui, path)
@@ -1183,7 +1183,12 def _pushbundle2(pushop):
1183 1183 trgetter = None
1184 1184 if pushback:
1185 1185 trgetter = pushop.trmanager.transaction
1186 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1186 op = bundle2.processbundle(
1187 pushop.repo,
1188 reply,
1189 trgetter,
1190 remote=pushop.remote,
1191 )
1187 1192 except error.BundleValueError as exc:
1188 1193 raise error.RemoteError(_(b'missing support for %s') % exc)
1189 1194 except bundle2.AbortFromPart as exc:
@@ -1903,10 +1908,18 def _pullbundle2(pullop):
1903 1908
1904 1909 try:
1905 1910 op = bundle2.bundleoperation(
1906 pullop.repo, pullop.gettransaction, source=b'pull'
1911 pullop.repo,
1912 pullop.gettransaction,
1913 source=b'pull',
1914 remote=pullop.remote,
1907 1915 )
1908 1916 op.modes[b'bookmarks'] = b'records'
1909 bundle2.processbundle(pullop.repo, bundle, op=op)
1917 bundle2.processbundle(
1918 pullop.repo,
1919 bundle,
1920 op=op,
1921 remote=pullop.remote,
1922 )
1910 1923 except bundle2.AbortFromPart as exc:
1911 1924 pullop.repo.ui.error(_(b'remote: abort: %s\n') % exc)
1912 1925 raise error.RemoteError(_(b'pull failed on remote'), hint=exc.hint)
@@ -1995,7 +2008,12 def _pullchangeset(pullop):
1995 2008 ).result()
1996 2009
1997 2010 bundleop = bundle2.applybundle(
1998 pullop.repo, cg, tr, b'pull', pullop.remote.url()
2011 pullop.repo,
2012 cg,
2013 tr,
2014 b'pull',
2015 pullop.remote.url(),
2016 remote=pullop.remote,
1999 2017 )
2000 2018 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
2001 2019
@@ -111,6 +111,7 class filelog:
111 111 assumehaveparentrevisions=False,
112 112 deltamode=repository.CG_DELTAMODE_STD,
113 113 sidedata_helpers=None,
114 debug_info=None,
114 115 ):
115 116 return self._revlog.emitrevisions(
116 117 nodes,
@@ -119,6 +120,7 class filelog:
119 120 assumehaveparentrevisions=assumehaveparentrevisions,
120 121 deltamode=deltamode,
121 122 sidedata_helpers=sidedata_helpers,
123 debug_info=debug_info,
122 124 )
123 125
124 126 def addrevision(
@@ -151,6 +153,8 class filelog:
151 153 addrevisioncb=None,
152 154 duplicaterevisioncb=None,
153 155 maybemissingparents=False,
156 debug_info=None,
157 delta_base_reuse_policy=None,
154 158 ):
155 159 if maybemissingparents:
156 160 raise error.Abort(
@@ -171,6 +175,8 class filelog:
171 175 transaction,
172 176 addrevisioncb=addrevisioncb,
173 177 duplicaterevisioncb=duplicaterevisioncb,
178 debug_info=debug_info,
179 delta_base_reuse_policy=delta_base_reuse_policy,
174 180 )
175 181
176 182 def getstrippoint(self, minlink):
@@ -158,7 +158,7 def findexternaltool(ui, tool):
158 158 continue
159 159 p = util.lookupreg(k, _toolstr(ui, tool, b"regname"))
160 160 if p:
161 p = procutil.findexe(p + _toolstr(ui, tool, b"regappend", b""))
161 p = procutil.findexe(p + _toolstr(ui, tool, b"regappend"))
162 162 if p:
163 163 return p
164 164 exe = _toolstr(ui, tool, b"executable", tool)
@@ -478,8 +478,9 def _merge(repo, local, other, base, mod
478 478 """
479 479 Uses the internal non-interactive simple merge algorithm for merging
480 480 files. It will fail if there are any conflicts and leave markers in
481 the partially merged file. Markers will have two sections, one for each side
482 of merge, unless mode equals 'union' which suppresses the markers."""
481 the partially merged file. Markers will have two sections, one for each
482 side of merge, unless mode equals 'union' or 'union-other-first' which
483 suppresses the markers."""
483 484 ui = repo.ui
484 485
485 486 try:
@@ -510,12 +511,28 def _merge(repo, local, other, base, mod
510 511 def _iunion(repo, mynode, local, other, base, toolconf, backup):
511 512 """
512 513 Uses the internal non-interactive simple merge algorithm for merging
513 files. It will use both left and right sides for conflict regions.
514 files. It will use both local and other sides for conflict regions by
515 adding local on top of other.
514 516 No markers are inserted."""
515 517 return _merge(repo, local, other, base, b'union')
516 518
517 519
518 520 @internaltool(
521 b'union-other-first',
522 fullmerge,
523 _(
524 b"warning: conflicts while merging %s! "
525 b"(edit, then use 'hg resolve --mark')\n"
526 ),
527 precheck=_mergecheck,
528 )
529 def _iunion_other_first(repo, mynode, local, other, base, toolconf, backup):
530 """
531 Like :union, but add other on top of local."""
532 return _merge(repo, local, other, base, b'union-other-first')
533
534
535 @internaltool(
519 536 b'merge',
520 537 fullmerge,
521 538 _(
@@ -10,6 +10,18 import itertools
10 10 import re
11 11 import textwrap
12 12
13 from typing import (
14 Callable,
15 Dict,
16 Iterable,
17 List,
18 Optional,
19 Set,
20 Tuple,
21 Union,
22 cast,
23 )
24
13 25 from .i18n import (
14 26 _,
15 27 gettext,
@@ -40,7 +52,16 from .utils import (
40 52 stringutil,
41 53 )
42 54
43 _exclkeywords = {
55 _DocLoader = Callable[[uimod.ui], bytes]
56 # Old extensions may not register with a category
57 _HelpEntry = Union["_HelpEntryNoCategory", "_HelpEntryWithCategory"]
58 _HelpEntryNoCategory = Tuple[List[bytes], bytes, _DocLoader]
59 _HelpEntryWithCategory = Tuple[List[bytes], bytes, _DocLoader, bytes]
60 _SelectFn = Callable[[object], bool]
61 _SynonymTable = Dict[bytes, List[bytes]]
62 _TopicHook = Callable[[uimod.ui, bytes, bytes], bytes]
63
64 _exclkeywords: Set[bytes] = {
44 65 b"(ADVANCED)",
45 66 b"(DEPRECATED)",
46 67 b"(EXPERIMENTAL)",
@@ -56,7 +77,7 from .utils import (
56 77 # Extensions with custom categories should insert them into this list
57 78 # after/before the appropriate item, rather than replacing the list or
58 79 # assuming absolute positions.
59 CATEGORY_ORDER = [
80 CATEGORY_ORDER: List[bytes] = [
60 81 registrar.command.CATEGORY_REPO_CREATION,
61 82 registrar.command.CATEGORY_REMOTE_REPO_MANAGEMENT,
62 83 registrar.command.CATEGORY_COMMITTING,
@@ -74,7 +95,7 CATEGORY_ORDER = [
74 95
75 96 # Human-readable category names. These are translated.
76 97 # Extensions with custom categories should add their names here.
77 CATEGORY_NAMES = {
98 CATEGORY_NAMES: Dict[bytes, bytes] = {
78 99 registrar.command.CATEGORY_REPO_CREATION: b'Repository creation',
79 100 registrar.command.CATEGORY_REMOTE_REPO_MANAGEMENT: b'Remote repository management',
80 101 registrar.command.CATEGORY_COMMITTING: b'Change creation',
@@ -102,7 +123,7 TOPIC_CATEGORY_NONE = b'none'
102 123 # Extensions with custom categories should insert them into this list
103 124 # after/before the appropriate item, rather than replacing the list or
104 125 # assuming absolute positions.
105 TOPIC_CATEGORY_ORDER = [
126 TOPIC_CATEGORY_ORDER: List[bytes] = [
106 127 TOPIC_CATEGORY_IDS,
107 128 TOPIC_CATEGORY_OUTPUT,
108 129 TOPIC_CATEGORY_CONFIG,
@@ -112,7 +133,7 TOPIC_CATEGORY_ORDER = [
112 133 ]
113 134
114 135 # Human-readable topic category names. These are translated.
115 TOPIC_CATEGORY_NAMES = {
136 TOPIC_CATEGORY_NAMES: Dict[bytes, bytes] = {
116 137 TOPIC_CATEGORY_IDS: b'Mercurial identifiers',
117 138 TOPIC_CATEGORY_OUTPUT: b'Mercurial output',
118 139 TOPIC_CATEGORY_CONFIG: b'Mercurial configuration',
@@ -122,7 +143,12 TOPIC_CATEGORY_NAMES = {
122 143 }
123 144
124 145
125 def listexts(header, exts, indent=1, showdeprecated=False):
146 def listexts(
147 header: bytes,
148 exts: Dict[bytes, bytes],
149 indent: int = 1,
150 showdeprecated: bool = False,
151 ) -> List[bytes]:
126 152 '''return a text listing of the given extensions'''
127 153 rst = []
128 154 if exts:
@@ -135,7 +161,7 def listexts(header, exts, indent=1, sho
135 161 return rst
136 162
137 163
138 def extshelp(ui):
164 def extshelp(ui: uimod.ui) -> bytes:
139 165 rst = loaddoc(b'extensions')(ui).splitlines(True)
140 166 rst.extend(
141 167 listexts(
@@ -153,7 +179,7 def extshelp(ui):
153 179 return doc
154 180
155 181
156 def parsedefaultmarker(text):
182 def parsedefaultmarker(text: bytes) -> Optional[Tuple[bytes, List[bytes]]]:
157 183 """given a text 'abc (DEFAULT: def.ghi)',
158 184 returns (b'abc', (b'def', b'ghi')). Otherwise return None"""
159 185 if text[-1:] == b')':
@@ -164,7 +190,7 def parsedefaultmarker(text):
164 190 return text[:pos], item.split(b'.', 2)
165 191
166 192
167 def optrst(header, options, verbose, ui):
193 def optrst(header: bytes, options, verbose: bool, ui: uimod.ui) -> bytes:
168 194 data = []
169 195 multioccur = False
170 196 for option in options:
@@ -220,13 +246,15 def optrst(header, options, verbose, ui)
220 246 return b''.join(rst)
221 247
222 248
223 def indicateomitted(rst, omitted, notomitted=None):
249 def indicateomitted(
250 rst: List[bytes], omitted: bytes, notomitted: Optional[bytes] = None
251 ) -> None:
224 252 rst.append(b'\n\n.. container:: omitted\n\n %s\n\n' % omitted)
225 253 if notomitted:
226 254 rst.append(b'\n\n.. container:: notomitted\n\n %s\n\n' % notomitted)
227 255
228 256
229 def filtercmd(ui, cmd, func, kw, doc):
257 def filtercmd(ui: uimod.ui, cmd: bytes, func, kw: bytes, doc: bytes) -> bool:
230 258 if not ui.debugflag and cmd.startswith(b"debug") and kw != b"debug":
231 259 # Debug command, and user is not looking for those.
232 260 return True
@@ -249,11 +277,13 def filtercmd(ui, cmd, func, kw, doc):
249 277 return False
250 278
251 279
252 def filtertopic(ui, topic):
280 def filtertopic(ui: uimod.ui, topic: bytes) -> bool:
253 281 return ui.configbool(b'help', b'hidden-topic.%s' % topic, False)
254 282
255 283
256 def topicmatch(ui, commands, kw):
284 def topicmatch(
285 ui: uimod.ui, commands, kw: bytes
286 ) -> Dict[bytes, List[Tuple[bytes, bytes]]]:
257 287 """Return help topics matching kw.
258 288
259 289 Returns {'section': [(name, summary), ...], ...} where section is
@@ -326,10 +356,10 def topicmatch(ui, commands, kw):
326 356 return results
327 357
328 358
329 def loaddoc(topic, subdir=None):
359 def loaddoc(topic: bytes, subdir: Optional[bytes] = None) -> _DocLoader:
330 360 """Return a delayed loader for help/topic.txt."""
331 361
332 def loader(ui):
362 def loader(ui: uimod.ui) -> bytes:
333 363 package = b'mercurial.helptext'
334 364 if subdir:
335 365 package += b'.' + subdir
@@ -342,7 +372,7 def loaddoc(topic, subdir=None):
342 372 return loader
343 373
344 374
345 internalstable = sorted(
375 internalstable: List[_HelpEntryNoCategory] = sorted(
346 376 [
347 377 (
348 378 [b'bid-merge'],
@@ -407,7 +437,7 internalstable = sorted(
407 437 )
408 438
409 439
410 def internalshelp(ui):
440 def internalshelp(ui: uimod.ui) -> bytes:
411 441 """Generate the index for the "internals" topic."""
412 442 lines = [
413 443 b'To access a subtopic, use "hg help internals.{subtopic-name}"\n',
@@ -419,7 +449,7 def internalshelp(ui):
419 449 return b''.join(lines)
420 450
421 451
422 helptable = sorted(
452 helptable: List[_HelpEntryWithCategory] = sorted(
423 453 [
424 454 (
425 455 [b'bundlespec'],
@@ -581,20 +611,27 helptable = sorted(
581 611 )
582 612
583 613 # Maps topics with sub-topics to a list of their sub-topics.
584 subtopics = {
614 subtopics: Dict[bytes, List[_HelpEntryNoCategory]] = {
585 615 b'internals': internalstable,
586 616 }
587 617
588 618 # Map topics to lists of callable taking the current topic help and
589 619 # returning the updated version
590 helphooks = {}
620 helphooks: Dict[bytes, List[_TopicHook]] = {}
591 621
592 622
593 def addtopichook(topic, rewriter):
623 def addtopichook(topic: bytes, rewriter: _TopicHook) -> None:
594 624 helphooks.setdefault(topic, []).append(rewriter)
595 625
596 626
597 def makeitemsdoc(ui, topic, doc, marker, items, dedent=False):
627 def makeitemsdoc(
628 ui: uimod.ui,
629 topic: bytes,
630 doc: bytes,
631 marker: bytes,
632 items: Dict[bytes, bytes],
633 dedent: bool = False,
634 ) -> bytes:
598 635 """Extract docstring from the items key to function mapping, build a
599 636 single documentation block and use it to overwrite the marker in doc.
600 637 """
@@ -622,8 +659,10 def makeitemsdoc(ui, topic, doc, marker,
622 659 return doc.replace(marker, entries)
623 660
624 661
625 def addtopicsymbols(topic, marker, symbols, dedent=False):
626 def add(ui, topic, doc):
662 def addtopicsymbols(
663 topic: bytes, marker: bytes, symbols, dedent: bool = False
664 ) -> None:
665 def add(ui: uimod.ui, topic: bytes, doc: bytes):
627 666 return makeitemsdoc(ui, topic, doc, marker, symbols, dedent=dedent)
628 667
629 668 addtopichook(topic, add)
@@ -647,7 +686,7 addtopicsymbols(
647 686 )
648 687
649 688
650 def inserttweakrc(ui, topic, doc):
689 def inserttweakrc(ui: uimod.ui, topic: bytes, doc: bytes) -> bytes:
651 690 marker = b'.. tweakdefaultsmarker'
652 691 repl = uimod.tweakrc
653 692
@@ -658,7 +697,9 def inserttweakrc(ui, topic, doc):
658 697 return re.sub(br'( *)%s' % re.escape(marker), sub, doc)
659 698
660 699
661 def _getcategorizedhelpcmds(ui, cmdtable, name, select=None):
700 def _getcategorizedhelpcmds(
701 ui: uimod.ui, cmdtable, name: bytes, select: Optional[_SelectFn] = None
702 ) -> Tuple[Dict[bytes, List[bytes]], Dict[bytes, bytes], _SynonymTable]:
662 703 # Category -> list of commands
663 704 cats = {}
664 705 # Command -> short description
@@ -687,16 +728,18 def _getcategorizedhelpcmds(ui, cmdtable
687 728 return cats, h, syns
688 729
689 730
690 def _getcategorizedhelptopics(ui, topictable):
731 def _getcategorizedhelptopics(
732 ui: uimod.ui, topictable: List[_HelpEntry]
733 ) -> Tuple[Dict[bytes, List[Tuple[bytes, bytes]]], Dict[bytes, List[bytes]]]:
691 734 # Group commands by category.
692 735 topiccats = {}
693 736 syns = {}
694 737 for topic in topictable:
695 738 names, header, doc = topic[0:3]
696 739 if len(topic) > 3 and topic[3]:
697 category = topic[3]
740 category: bytes = cast(bytes, topic[3]) # help pytype
698 741 else:
699 category = TOPIC_CATEGORY_NONE
742 category: bytes = TOPIC_CATEGORY_NONE
700 743
701 744 topicname = names[0]
702 745 syns[topicname] = list(names)
@@ -709,15 +752,15 addtopichook(b'config', inserttweakrc)
709 752
710 753
711 754 def help_(
712 ui,
755 ui: uimod.ui,
713 756 commands,
714 name,
715 unknowncmd=False,
716 full=True,
717 subtopic=None,
718 fullname=None,
757 name: bytes,
758 unknowncmd: bool = False,
759 full: bool = True,
760 subtopic: Optional[bytes] = None,
761 fullname: Optional[bytes] = None,
719 762 **opts
720 ):
763 ) -> bytes:
721 764 """
722 765 Generate the help for 'name' as unformatted restructured text. If
723 766 'name' is None, describe the commands available.
@@ -725,7 +768,7 def help_(
725 768
726 769 opts = pycompat.byteskwargs(opts)
727 770
728 def helpcmd(name, subtopic=None):
771 def helpcmd(name: bytes, subtopic: Optional[bytes]) -> List[bytes]:
729 772 try:
730 773 aliases, entry = cmdutil.findcmd(
731 774 name, commands.table, strict=unknowncmd
@@ -826,7 +869,7 def help_(
826 869
827 870 return rst
828 871
829 def helplist(select=None, **opts):
872 def helplist(select: Optional[_SelectFn] = None, **opts) -> List[bytes]:
830 873 cats, h, syns = _getcategorizedhelpcmds(
831 874 ui, commands.table, name, select
832 875 )
@@ -846,7 +889,7 def help_(
846 889 else:
847 890 rst.append(_(b'list of commands:\n'))
848 891
849 def appendcmds(cmds):
892 def appendcmds(cmds: Iterable[bytes]) -> None:
850 893 cmds = sorted(cmds)
851 894 for c in cmds:
852 895 display_cmd = c
@@ -955,7 +998,7 def help_(
955 998 )
956 999 return rst
957 1000
958 def helptopic(name, subtopic=None):
1001 def helptopic(name: bytes, subtopic: Optional[bytes] = None) -> List[bytes]:
959 1002 # Look for sub-topic entry first.
960 1003 header, doc = None, None
961 1004 if subtopic and name in subtopics:
@@ -998,7 +1041,7 def help_(
998 1041 pass
999 1042 return rst
1000 1043
1001 def helpext(name, subtopic=None):
1044 def helpext(name: bytes, subtopic: Optional[bytes] = None) -> List[bytes]:
1002 1045 try:
1003 1046 mod = extensions.find(name)
1004 1047 doc = gettext(pycompat.getdoc(mod)) or _(b'no help text available')
@@ -1040,7 +1083,9 def help_(
1040 1083 )
1041 1084 return rst
1042 1085
1043 def helpextcmd(name, subtopic=None):
1086 def helpextcmd(
1087 name: bytes, subtopic: Optional[bytes] = None
1088 ) -> List[bytes]:
1044 1089 cmd, ext, doc = extensions.disabledcmd(
1045 1090 ui, name, ui.configbool(b'ui', b'strict')
1046 1091 )
@@ -1127,8 +1172,14 def help_(
1127 1172
1128 1173
1129 1174 def formattedhelp(
1130 ui, commands, fullname, keep=None, unknowncmd=False, full=True, **opts
1131 ):
1175 ui: uimod.ui,
1176 commands,
1177 fullname: Optional[bytes],
1178 keep: Optional[Iterable[bytes]] = None,
1179 unknowncmd: bool = False,
1180 full: bool = True,
1181 **opts
1182 ) -> bytes:
1132 1183 """get help for a given topic (as a dotted name) as rendered rst
1133 1184
1134 1185 Either returns the rendered help text or raises an exception.
@@ -1922,6 +1922,42 The following sub-options can be defined
1922 1922 - ``ignore``: ignore bookmarks during exchange.
1923 1923 (This currently only affect pulling)
1924 1924
1925 .. container:: verbose
1926
1927 ``delta-reuse-policy``
1928 Control the policy regarding deltas sent by the remote during pulls.
1929
1930 This is an advanced option that non-admin users should not need to understand
1931 or set. This option can be used to speed up pulls from trusted central
1932 servers, or to fix-up deltas from older servers.
1933
1934 It supports the following values:
1935
1936 - ``default``: use the policy defined by
1937 `storage.revlog.reuse-external-delta-parent`,
1938
1939 - ``no-reuse``: start a new optimal delta search for each new revision we add
1940 to the repository. The deltas from the server will be reused when the base
1941 it applies to is tested (this can be frequent if that base is the one and
1942 unique parent of that revision). This can significantly slowdown pulls but
1943 will result in an optimized storage space if the remote peer is sending poor
1944 quality deltas.
1945
1946 - ``try-base``: try to reuse the deltas from the remote peer as long as they
1947 create a valid delta-chain in the local repository. This speeds up the
1948 unbundling process, but can result in sub-optimal storage space if the
1949 remote peer is sending poor quality deltas.
1950
1951 - ``forced``: the deltas from the peer will be reused in all cases, even if
1952 the resulting delta-chain is "invalid". This setting will ensure the bundle
1953 is applied at minimal CPU cost, but it can result in longer delta chains
1954 being created on the client, making revisions potentially slower to access
1955 in the future. If you think you need this option, you should make sure you
1956 are also talking to the Mercurial developer community to get confirmation.
1957
1958 See `hg help config.storage.revlog.reuse-external-delta-parent` for a similar
1959 global option. That option defines the behavior of `default`.
1960
1925 1961 The following special named paths exist:
1926 1962
1927 1963 ``default``
@@ -2281,6 +2317,21 category impact performance and reposito
2281 2317 To fix affected revisions that already exist within the repository, one can
2282 2318 use :hg:`debug-repair-issue-6528`.
2283 2319
2320 .. container:: verbose
2321
2322 ``revlog.delta-parent-search.candidate-group-chunk-size``
2323 Tune the number of delta bases the storage will consider in the
2324 same "round" of search. In some very rare cases, using a smaller value
2325 might result in faster processing at the possible expense of storage
2326 space, while using larger values might result in slower processing at the
2327 possible benefit of storage space. A value of "0" means no limitation.
2328
2329 default: no limitation
2330
2331 This is unlikely that you'll have to tune this configuration. If you think
2332 you do, consider talking with the mercurial developer community about your
2333 repositories.
2334
2284 2335 ``revlog.optimize-delta-parent-choice``
2285 2336 When storing a merge revision, both parents will be equally considered as
2286 2337 a possible delta base. This results in better delta selection and improved
@@ -76,8 +76,8 instructions on how to install from sour
76 76 MSRV
77 77 ====
78 78
79 The minimum supported Rust version is currently 1.48.0. The project's policy is
80 to follow the version from Debian stable, to make the distributions' job easier.
79 The minimum supported Rust version is currently 1.61.0. The project's policy is
80 to follow the version from Debian testing, to make the distributions' job easier.
81 81
82 82 rhg
83 83 ===
@@ -65,28 +65,12 release = lock.release
65 65 sharedbookmarks = b'bookmarks'
66 66
67 67
68 def _local(path):
69 path = util.expandpath(urlutil.urllocalpath(path))
70
71 try:
72 # we use os.stat() directly here instead of os.path.isfile()
73 # because the latter started returning `False` on invalid path
74 # exceptions starting in 3.8 and we care about handling
75 # invalid paths specially here.
76 st = os.stat(path)
77 isfile = stat.S_ISREG(st.st_mode)
78 except ValueError as e:
79 raise error.Abort(
80 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
81 )
82 except OSError:
83 isfile = False
84
85 return isfile and bundlerepo or localrepo
86
87
88 68 def addbranchrevs(lrepo, other, branches, revs):
89 peer = other.peer() # a courtesy to callers using a localrepo for other
69 if util.safehasattr(other, 'peer'):
70 # a courtesy to callers using a localrepo for other
71 peer = other.peer()
72 else:
73 peer = other
90 74 hashbranch, branches = branches
91 75 if not hashbranch and not branches:
92 76 x = revs or None
@@ -129,10 +113,47 def addbranchrevs(lrepo, other, branches
129 113 return revs, revs[0]
130 114
131 115
132 schemes = {
116 def _isfile(path):
117 try:
118 # we use os.stat() directly here instead of os.path.isfile()
119 # because the latter started returning `False` on invalid path
120 # exceptions starting in 3.8 and we care about handling
121 # invalid paths specially here.
122 st = os.stat(path)
123 except ValueError as e:
124 msg = stringutil.forcebytestr(e)
125 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
126 except OSError:
127 return False
128 else:
129 return stat.S_ISREG(st.st_mode)
130
131
132 class LocalFactory:
133 """thin wrapper to dispatch between localrepo and bundle repo"""
134
135 @staticmethod
136 def islocal(path: bytes) -> bool:
137 path = util.expandpath(urlutil.urllocalpath(path))
138 return not _isfile(path)
139
140 @staticmethod
141 def instance(ui, path, *args, **kwargs):
142 path = util.expandpath(urlutil.urllocalpath(path))
143 if _isfile(path):
144 cls = bundlerepo
145 else:
146 cls = localrepo
147 return cls.instance(ui, path, *args, **kwargs)
148
149
150 repo_schemes = {
133 151 b'bundle': bundlerepo,
134 152 b'union': unionrepo,
135 b'file': _local,
153 b'file': LocalFactory,
154 }
155
156 peer_schemes = {
136 157 b'http': httppeer,
137 158 b'https': httppeer,
138 159 b'ssh': sshpeer,
@@ -140,27 +161,23 schemes = {
140 161 }
141 162
142 163
143 def _peerlookup(path):
144 u = urlutil.url(path)
145 scheme = u.scheme or b'file'
146 thing = schemes.get(scheme) or schemes[b'file']
147 try:
148 return thing(path)
149 except TypeError:
150 # we can't test callable(thing) because 'thing' can be an unloaded
151 # module that implements __call__
152 if not util.safehasattr(thing, b'instance'):
153 raise
154 return thing
155
156
157 164 def islocal(repo):
158 165 '''return true if repo (or path pointing to repo) is local'''
159 166 if isinstance(repo, bytes):
160 try:
161 return _peerlookup(repo).islocal(repo)
162 except AttributeError:
163 return False
167 u = urlutil.url(repo)
168 scheme = u.scheme or b'file'
169 if scheme in peer_schemes:
170 cls = peer_schemes[scheme]
171 cls.make_peer # make sure we load the module
172 elif scheme in repo_schemes:
173 cls = repo_schemes[scheme]
174 cls.instance # make sure we load the module
175 else:
176 cls = LocalFactory
177 if util.safehasattr(cls, 'islocal'):
178 return cls.islocal(repo) # pytype: disable=module-attr
179 return False
180 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
164 181 return repo.local()
165 182
166 183
@@ -177,13 +194,7 def openpath(ui, path, sendaccept=True):
177 194 wirepeersetupfuncs = []
178 195
179 196
180 def _peerorrepo(
181 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
182 ):
183 """return a repository object for the specified path"""
184 obj = _peerlookup(path).instance(
185 ui, path, create, intents=intents, createopts=createopts
186 )
197 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
187 198 ui = getattr(obj, "ui", ui)
188 199 for f in presetupfuncs or []:
189 200 f(ui, obj)
@@ -195,14 +206,12 def _peerorrepo(
195 206 if hook:
196 207 with util.timedcm('reposetup %r', name) as stats:
197 208 hook(ui, obj)
198 ui.log(
199 b'extension', b' > reposetup for %s took %s\n', name, stats
200 )
209 msg = b' > reposetup for %s took %s\n'
210 ui.log(b'extension', msg, name, stats)
201 211 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
202 212 if not obj.local():
203 213 for f in wirepeersetupfuncs:
204 214 f(ui, obj)
205 return obj
206 215
207 216
208 217 def repository(
@@ -214,28 +223,59 def repository(
214 223 createopts=None,
215 224 ):
216 225 """return a repository object for the specified path"""
217 peer = _peerorrepo(
226 scheme = urlutil.url(path).scheme
227 if scheme is None:
228 scheme = b'file'
229 cls = repo_schemes.get(scheme)
230 if cls is None:
231 if scheme in peer_schemes:
232 raise error.Abort(_(b"repository '%s' is not local") % path)
233 cls = LocalFactory
234 repo = cls.instance(
218 235 ui,
219 236 path,
220 237 create,
221 presetupfuncs=presetupfuncs,
222 238 intents=intents,
223 239 createopts=createopts,
224 240 )
225 repo = peer.local()
226 if not repo:
227 raise error.Abort(
228 _(b"repository '%s' is not local") % (path or peer.url())
229 )
241 _setup_repo_or_peer(ui, repo, presetupfuncs=presetupfuncs)
230 242 return repo.filtered(b'visible')
231 243
232 244
233 245 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
234 246 '''return a repository peer for the specified path'''
247 ui = getattr(uiorrepo, 'ui', uiorrepo)
235 248 rui = remoteui(uiorrepo, opts)
236 return _peerorrepo(
237 rui, path, create, intents=intents, createopts=createopts
238 ).peer()
249 if util.safehasattr(path, 'url'):
250 # this is already a urlutil.path object
251 peer_path = path
252 else:
253 peer_path = urlutil.path(ui, None, rawloc=path, validate_path=False)
254 scheme = peer_path.url.scheme # pytype: disable=attribute-error
255 if scheme in peer_schemes:
256 cls = peer_schemes[scheme]
257 peer = cls.make_peer(
258 rui,
259 peer_path,
260 create,
261 intents=intents,
262 createopts=createopts,
263 )
264 _setup_repo_or_peer(rui, peer)
265 else:
266 # this is a repository
267 repo_path = peer_path.loc # pytype: disable=attribute-error
268 if not repo_path:
269 repo_path = peer_path.rawloc # pytype: disable=attribute-error
270 repo = repository(
271 rui,
272 repo_path,
273 create,
274 intents=intents,
275 createopts=createopts,
276 )
277 peer = repo.peer(path=peer_path)
278 return peer
239 279
240 280
241 281 def defaultdest(source):
@@ -290,17 +330,23 def share(
290 330 ):
291 331 '''create a shared repository'''
292 332
293 if not islocal(source):
294 raise error.Abort(_(b'can only share local repositories'))
333 not_local_msg = _(b'can only share local repositories')
334 if util.safehasattr(source, 'local'):
335 if source.local() is None:
336 raise error.Abort(not_local_msg)
337 elif not islocal(source):
338 # XXX why are we getting bytes here ?
339 raise error.Abort(not_local_msg)
295 340
296 341 if not dest:
297 342 dest = defaultdest(source)
298 343 else:
299 dest = urlutil.get_clone_path(ui, dest)[1]
344 dest = urlutil.get_clone_path_obj(ui, dest).loc
300 345
301 346 if isinstance(source, bytes):
302 origsource, source, branches = urlutil.get_clone_path(ui, source)
303 srcrepo = repository(ui, source)
347 source_path = urlutil.get_clone_path_obj(ui, source)
348 srcrepo = repository(ui, source_path.loc)
349 branches = (source_path.branch, [])
304 350 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
305 351 else:
306 352 srcrepo = source.local()
@@ -661,12 +707,23 def clone(
661 707 """
662 708
663 709 if isinstance(source, bytes):
664 src = urlutil.get_clone_path(ui, source, branch)
665 origsource, source, branches = src
666 srcpeer = peer(ui, peeropts, source)
710 src_path = urlutil.get_clone_path_obj(ui, source)
711 if src_path is None:
712 srcpeer = peer(ui, peeropts, b'')
713 origsource = source = b''
714 branches = (None, branch or [])
715 else:
716 srcpeer = peer(ui, peeropts, src_path)
717 origsource = src_path.rawloc
718 branches = (src_path.branch, branch or [])
719 source = src_path.loc
667 720 else:
668 srcpeer = source.peer() # in case we were called with a localrepo
721 if util.safehasattr(source, 'peer'):
722 srcpeer = source.peer() # in case we were called with a localrepo
723 else:
724 srcpeer = source
669 725 branches = (None, branch or [])
726 # XXX path: simply use the peer `path` object when this become available
670 727 origsource = source = srcpeer.url()
671 728 srclock = destlock = destwlock = cleandir = None
672 729 destpeer = None
@@ -678,7 +735,11 def clone(
678 735 if dest:
679 736 ui.status(_(b"destination directory: %s\n") % dest)
680 737 else:
681 dest = urlutil.get_clone_path(ui, dest)[0]
738 dest_path = urlutil.get_clone_path_obj(ui, dest)
739 if dest_path is not None:
740 dest = dest_path.rawloc
741 else:
742 dest = b''
682 743
683 744 dest = urlutil.urllocalpath(dest)
684 745 source = urlutil.urllocalpath(source)
@@ -1271,23 +1332,28 def _incoming(
1271 1332 msg %= len(srcs)
1272 1333 raise error.Abort(msg)
1273 1334 path = srcs[0]
1274 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
1275 if subpath is not None:
1335 if subpath is None:
1336 peer_path = path
1337 url = path.loc
1338 else:
1339 # XXX path: we are losing the `path` object here. Keeping it would be
1340 # valuable. For example as a "variant" as we do for pushes.
1276 1341 subpath = urlutil.url(subpath)
1277 1342 if subpath.isabs():
1278 source = bytes(subpath)
1343 peer_path = url = bytes(subpath)
1279 1344 else:
1280 p = urlutil.url(source)
1345 p = urlutil.url(path.loc)
1281 1346 if p.islocal():
1282 1347 normpath = os.path.normpath
1283 1348 else:
1284 1349 normpath = posixpath.normpath
1285 1350 p.path = normpath(b'%s/%s' % (p.path, subpath))
1286 source = bytes(p)
1287 other = peer(repo, opts, source)
1351 peer_path = url = bytes(p)
1352 other = peer(repo, opts, peer_path)
1288 1353 cleanupfn = other.close
1289 1354 try:
1290 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1355 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(url))
1356 branches = (path.branch, opts.get(b'branch', []))
1291 1357 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1292 1358
1293 1359 if revs:
@@ -1346,7 +1412,7 def _outgoing(ui, repo, dests, opts, sub
1346 1412 out = set()
1347 1413 others = []
1348 1414 for path in urlutil.get_push_paths(repo, ui, dests):
1349 dest = path.pushloc or path.loc
1415 dest = path.loc
1350 1416 if subpath is not None:
1351 1417 subpath = urlutil.url(subpath)
1352 1418 if subpath.isabs():
@@ -230,8 +230,9 class requestcontext:
230 230
231 231 def sendtemplate(self, name, **kwargs):
232 232 """Helper function to send a response generated from a template."""
233 kwargs = pycompat.byteskwargs(kwargs)
234 self.res.setbodygen(self.tmpl.generate(name, kwargs))
233 if self.req.method != b'HEAD':
234 kwargs = pycompat.byteskwargs(kwargs)
235 self.res.setbodygen(self.tmpl.generate(name, kwargs))
235 236 return self.res.sendresponse()
236 237
237 238
@@ -485,6 +485,7 class wsgiresponse:
485 485 self._bodybytes is None
486 486 and self._bodygen is None
487 487 and not self._bodywillwrite
488 and self._req.method != b'HEAD'
488 489 ):
489 490 raise error.ProgrammingError(b'response body not defined')
490 491
@@ -594,6 +595,8 class wsgiresponse:
594 595 yield chunk
595 596 elif self._bodywillwrite:
596 597 self._bodywritefn = write
598 elif self._req.method == b'HEAD':
599 pass
597 600 else:
598 601 error.ProgrammingError(b'do not know how to send body')
599 602
@@ -151,6 +151,9 class _httprequesthandler(httpservermod.
151 151 def do_GET(self):
152 152 self.do_POST()
153 153
154 def do_HEAD(self):
155 self.do_POST()
156
154 157 def do_hgweb(self):
155 158 self.sent_headers = False
156 159 path, query = _splitURI(self.path)
@@ -246,7 +249,11 class _httprequesthandler(httpservermod.
246 249 self.send_header(*h)
247 250 if h[0].lower() == 'content-length':
248 251 self.length = int(h[1])
249 if self.length is None and saved_status[0] != common.HTTP_NOT_MODIFIED:
252 if (
253 self.length is None
254 and saved_status[0] != common.HTTP_NOT_MODIFIED
255 and self.command != 'HEAD'
256 ):
250 257 self._chunked = (
251 258 not self.close_connection and self.request_version == 'HTTP/1.1'
252 259 )
@@ -1299,6 +1299,9 def archive(web):
1299 1299 b'sendresponse() should not emit data if writing later'
1300 1300 )
1301 1301
1302 if web.req.method == b'HEAD':
1303 return []
1304
1302 1305 bodyfh = web.res.getbodyfile()
1303 1306
1304 1307 archival.archive(
@@ -382,8 +382,7 def parsev1commandresponse(ui, baseurl,
382 382
383 383 class httppeer(wireprotov1peer.wirepeer):
384 384 def __init__(self, ui, path, url, opener, requestbuilder, caps):
385 self.ui = ui
386 self._path = path
385 super().__init__(ui, path=path)
387 386 self._url = url
388 387 self._caps = caps
389 388 self.limitedarguments = caps is not None and b'httppostargs' not in caps
@@ -398,14 +397,11 class httppeer(wireprotov1peer.wirepeer)
398 397 # Begin of ipeerconnection interface.
399 398
400 399 def url(self):
401 return self._path
400 return self.path.loc
402 401
403 402 def local(self):
404 403 return None
405 404
406 def peer(self):
407 return self
408
409 405 def canpush(self):
410 406 return True
411 407
@@ -605,14 +601,13 def makepeer(ui, path, opener=None, requ
605 601 ``requestbuilder`` is the type used for constructing HTTP requests.
606 602 It exists as an argument so extensions can override the default.
607 603 """
608 u = urlutil.url(path)
609 if u.query or u.fragment:
610 raise error.Abort(
611 _(b'unsupported URL component: "%s"') % (u.query or u.fragment)
612 )
604 if path.url.query or path.url.fragment:
605 msg = _(b'unsupported URL component: "%s"')
606 msg %= path.url.query or path.url.fragment
607 raise error.Abort(msg)
613 608
614 609 # urllib cannot handle URLs with embedded user or passwd.
615 url, authinfo = u.authinfo()
610 url, authinfo = path.url.authinfo()
616 611 ui.debug(b'using %s\n' % url)
617 612
618 613 opener = opener or urlmod.opener(ui, authinfo)
@@ -624,11 +619,11 def makepeer(ui, path, opener=None, requ
624 619 )
625 620
626 621
627 def instance(ui, path, create, intents=None, createopts=None):
622 def make_peer(ui, path, create, intents=None, createopts=None):
628 623 if create:
629 624 raise error.Abort(_(b'cannot create new http repository'))
630 625 try:
631 if path.startswith(b'https:') and not urlmod.has_https:
626 if path.url.scheme == b'https' and not urlmod.has_https:
632 627 raise error.Abort(
633 628 _(b'Python support for SSL and HTTPS is not installed')
634 629 )
@@ -638,7 +633,7 def instance(ui, path, create, intents=N
638 633 return inst
639 634 except error.RepoError as httpexception:
640 635 try:
641 r = statichttprepo.instance(ui, b"static-" + path, create)
636 r = statichttprepo.make_peer(ui, b"static-" + path.loc, create)
642 637 ui.note(_(b'(falling back to static-http)\n'))
643 638 return r
644 639 except error.RepoError:
@@ -12,6 +12,7 class idirstate(interfaceutil.Interface)
12 12 sparsematchfn,
13 13 nodeconstants,
14 14 use_dirstate_v2,
15 use_tracked_hint=False,
15 16 ):
16 17 """Create a new dirstate object.
17 18
@@ -23,6 +24,15 class idirstate(interfaceutil.Interface)
23 24 # TODO: all these private methods and attributes should be made
24 25 # public or removed from the interface.
25 26 _ignore = interfaceutil.Attribute("""Matcher for ignored files.""")
27 is_changing_any = interfaceutil.Attribute(
28 """True if any changes in progress."""
29 )
30 is_changing_parents = interfaceutil.Attribute(
31 """True if parents changes in progress."""
32 )
33 is_changing_files = interfaceutil.Attribute(
34 """True if file tracking changes in progress."""
35 )
26 36
27 37 def _ignorefiles():
28 38 """Return a list of files containing patterns to ignore."""
@@ -34,7 +44,7 class idirstate(interfaceutil.Interface)
34 44 _checkexec = interfaceutil.Attribute("""Callable for checking exec bits.""")
35 45
36 46 @contextlib.contextmanager
37 def parentchange():
47 def changing_parents(repo):
38 48 """Context manager for handling dirstate parents.
39 49
40 50 If an exception occurs in the scope of the context manager,
@@ -42,16 +52,26 class idirstate(interfaceutil.Interface)
42 52 released.
43 53 """
44 54
45 def pendingparentchange():
46 """Returns true if the dirstate is in the middle of a set of changes
47 that modify the dirstate parent.
55 @contextlib.contextmanager
56 def changing_files(repo):
57 """Context manager for handling dirstate files.
58
59 If an exception occurs in the scope of the context manager,
60 the incoherent dirstate won't be written when wlock is
61 released.
48 62 """
49 63
50 64 def hasdir(d):
51 65 pass
52 66
53 67 def flagfunc(buildfallback):
54 pass
68 """build a callable that returns flags associated with a filename
69
70 The information is extracted from three possible layers:
71 1. the file system if it supports the information
72 2. the "fallback" information stored in the dirstate if any
73 3. a more expensive mechanism inferring the flags from the parents.
74 """
55 75
56 76 def getcwd():
57 77 """Return the path from which a canonical path is calculated.
@@ -61,12 +81,12 class idirstate(interfaceutil.Interface)
61 81 used to get real file paths. Use vfs functions instead.
62 82 """
63 83
84 def pathto(f, cwd=None):
85 pass
86
64 87 def get_entry(path):
65 88 """return a DirstateItem for the associated path"""
66 89
67 def pathto(f, cwd=None):
68 pass
69
70 90 def __contains__(key):
71 91 """Check if bytestring `key` is known to the dirstate."""
72 92
@@ -96,7 +116,7 class idirstate(interfaceutil.Interface)
96 116 def setparents(p1, p2=None):
97 117 """Set dirstate parents to p1 and p2.
98 118
99 When moving from two parents to one, 'm' merged entries a
119 When moving from two parents to one, "merged" entries a
100 120 adjusted to normal and previous copy records discarded and
101 121 returned by the call.
102 122
@@ -147,7 +167,7 class idirstate(interfaceutil.Interface)
147 167 pass
148 168
149 169 def identity():
150 """Return identity of dirstate it to detect changing in storage
170 """Return identity of dirstate itself to detect changing in storage
151 171
152 172 If identity of previous dirstate is equal to this, writing
153 173 changes based on the former dirstate out can keep consistency.
@@ -200,11 +220,7 class idirstate(interfaceutil.Interface)
200 220 return files in the dirstate (in whatever state) filtered by match
201 221 """
202 222
203 def savebackup(tr, backupname):
204 '''Save current dirstate into backup file'''
205
206 def restorebackup(tr, backupname):
207 '''Restore dirstate by backup file'''
208
209 def clearbackup(tr, backupname):
210 '''Clear backup file'''
223 def verify(m1, m2, p1, narrow_matcher=None):
224 """
225 check the dirstate contents against the parent manifest and yield errors
226 """
@@ -103,6 +103,7 class ipeerconnection(interfaceutil.Inte
103 103 """
104 104
105 105 ui = interfaceutil.Attribute("""ui.ui instance""")
106 path = interfaceutil.Attribute("""a urlutil.path instance or None""")
106 107
107 108 def url():
108 109 """Returns a URL string representing this peer.
@@ -123,12 +124,6 class ipeerconnection(interfaceutil.Inte
123 124 can be used to interface with it. Otherwise returns ``None``.
124 125 """
125 126
126 def peer():
127 """Returns an object conforming to this interface.
128
129 Most implementations will ``return self``.
130 """
131
132 127 def canpush():
133 128 """Returns a boolean indicating if this peer can be pushed to."""
134 129
@@ -393,6 +388,10 class peer:
393 388
394 389 limitedarguments = False
395 390
391 def __init__(self, ui, path=None):
392 self.ui = ui
393 self.path = path
394
396 395 def capable(self, name):
397 396 caps = self.capabilities()
398 397 if name in caps:
@@ -1613,7 +1612,7 class ilocalrepositorymain(interfaceutil
1613 1612 def close():
1614 1613 """Close the handle on this repository."""
1615 1614
1616 def peer():
1615 def peer(path=None):
1617 1616 """Obtain an object conforming to the ``peer`` interface."""
1618 1617
1619 1618 def unfiltered():
@@ -10,11 +10,16
10 10 import functools
11 11 import os
12 12 import random
13 import re
13 14 import sys
14 15 import time
15 16 import weakref
16 17
17 18 from concurrent import futures
19 from typing import (
20 Optional,
21 )
22
18 23 from .i18n import _
19 24 from .node import (
20 25 bin,
@@ -37,7 +42,6 from . import (
37 42 commit,
38 43 context,
39 44 dirstate,
40 dirstateguard,
41 45 discovery,
42 46 encoding,
43 47 error,
@@ -96,6 +100,8 release = lockmod.release
96 100 urlerr = util.urlerr
97 101 urlreq = util.urlreq
98 102
103 RE_SKIP_DIRSTATE_ROLLBACK = re.compile(b"^(dirstate|narrowspec.dirstate).*")
104
99 105 # set of (path, vfs-location) tuples. vfs-location is:
100 106 # - 'plain for vfs relative paths
101 107 # - '' for svfs relative paths
@@ -299,13 +305,12 class localcommandexecutor:
299 305 class localpeer(repository.peer):
300 306 '''peer for a local repo; reflects only the most recent API'''
301 307
302 def __init__(self, repo, caps=None):
303 super(localpeer, self).__init__()
308 def __init__(self, repo, caps=None, path=None):
309 super(localpeer, self).__init__(repo.ui, path=path)
304 310
305 311 if caps is None:
306 312 caps = moderncaps.copy()
307 313 self._repo = repo.filtered(b'served')
308 self.ui = repo.ui
309 314
310 315 if repo._wanted_sidedata:
311 316 formatted = bundle2.format_remote_wanted_sidedata(repo)
@@ -321,9 +326,6 class localpeer(repository.peer):
321 326 def local(self):
322 327 return self._repo
323 328
324 def peer(self):
325 return self
326
327 329 def canpush(self):
328 330 return True
329 331
@@ -451,8 +453,8 class locallegacypeer(localpeer):
451 453 """peer extension which implements legacy methods too; used for tests with
452 454 restricted capabilities"""
453 455
454 def __init__(self, repo):
455 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
456 def __init__(self, repo, path=None):
457 super(locallegacypeer, self).__init__(repo, caps=legacycaps, path=path)
456 458
457 459 # Begin of baselegacywirecommands interface.
458 460
@@ -526,7 +528,7 def _readrequires(vfs, allowmissing):
526 528 return set(read(b'requires').splitlines())
527 529
528 530
529 def makelocalrepository(baseui, path, intents=None):
531 def makelocalrepository(baseui, path: bytes, intents=None):
530 532 """Create a local repository object.
531 533
532 534 Given arguments needed to construct a local repository, this function
@@ -612,7 +614,6 def makelocalrepository(baseui, path, in
612 614 # to be reshared
613 615 hint = _(b"see `hg help config.format.use-share-safe` for more information")
614 616 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
615
616 617 if (
617 618 shared
618 619 and requirementsmod.SHARESAFE_REQUIREMENT
@@ -845,7 +846,13 def makelocalrepository(baseui, path, in
845 846 )
846 847
847 848
848 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
849 def loadhgrc(
850 ui,
851 wdirvfs: vfsmod.vfs,
852 hgvfs: vfsmod.vfs,
853 requirements,
854 sharedvfs: Optional[vfsmod.vfs] = None,
855 ):
849 856 """Load hgrc files/content into a ui instance.
850 857
851 858 This is called during repository opening to load any additional
@@ -1058,6 +1065,8 def resolverevlogstorevfsoptions(ui, req
1058 1065 options[b'revlogv2'] = True
1059 1066 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1060 1067 options[b'changelogv2'] = True
1068 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1069 options[b'changelogv2.compute-rank'] = cmp_rank
1061 1070
1062 1071 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1063 1072 options[b'generaldelta'] = True
@@ -1071,6 +1080,11 def resolverevlogstorevfsoptions(ui, req
1071 1080 b'storage', b'revlog.optimize-delta-parent-choice'
1072 1081 )
1073 1082 options[b'deltabothparents'] = deltabothparents
1083 dps_cgds = ui.configint(
1084 b'storage',
1085 b'revlog.delta-parent-search.candidate-group-chunk-size',
1086 )
1087 options[b'delta-parent-search.candidate-group-chunk-size'] = dps_cgds
1074 1088 options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
1075 1089
1076 1090 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
@@ -1311,8 +1325,6 class localrepository:
1311 1325 # XXX cache is a complicatged business someone
1312 1326 # should investigate this in depth at some point
1313 1327 b'cache/',
1314 # XXX shouldn't be dirstate covered by the wlock?
1315 b'dirstate',
1316 1328 # XXX bisect was still a bit too messy at the time
1317 1329 # this changeset was introduced. Someone should fix
1318 1330 # the remainig bit and drop this line
@@ -1323,15 +1335,15 class localrepository:
1323 1335 self,
1324 1336 baseui,
1325 1337 ui,
1326 origroot,
1327 wdirvfs,
1328 hgvfs,
1338 origroot: bytes,
1339 wdirvfs: vfsmod.vfs,
1340 hgvfs: vfsmod.vfs,
1329 1341 requirements,
1330 1342 supportedrequirements,
1331 sharedpath,
1343 sharedpath: bytes,
1332 1344 store,
1333 cachevfs,
1334 wcachevfs,
1345 cachevfs: vfsmod.vfs,
1346 wcachevfs: vfsmod.vfs,
1335 1347 features,
1336 1348 intents=None,
1337 1349 ):
@@ -1453,6 +1465,7 class localrepository:
1453 1465 # - bookmark changes
1454 1466 self.filteredrevcache = {}
1455 1467
1468 self._dirstate = None
1456 1469 # post-dirstate-status hooks
1457 1470 self._postdsstatus = []
1458 1471
@@ -1620,8 +1633,8 class localrepository:
1620 1633 parts.pop()
1621 1634 return False
1622 1635
1623 def peer(self):
1624 return localpeer(self) # not cached to avoid reference cycle
1636 def peer(self, path=None):
1637 return localpeer(self, path=path) # not cached to avoid reference cycle
1625 1638
1626 1639 def unfiltered(self):
1627 1640 """Return unfiltered version of the repository
@@ -1738,9 +1751,13 class localrepository:
1738 1751 def manifestlog(self):
1739 1752 return self.store.manifestlog(self, self._storenarrowmatch)
1740 1753
1741 @repofilecache(b'dirstate')
1754 @unfilteredpropertycache
1742 1755 def dirstate(self):
1743 return self._makedirstate()
1756 if self._dirstate is None:
1757 self._dirstate = self._makedirstate()
1758 else:
1759 self._dirstate.refresh()
1760 return self._dirstate
1744 1761
1745 1762 def _makedirstate(self):
1746 1763 """Extension point for wrapping the dirstate per-repo."""
@@ -1977,7 +1994,7 class localrepository:
1977 1994 def __iter__(self):
1978 1995 return iter(self.changelog)
1979 1996
1980 def revs(self, expr, *args):
1997 def revs(self, expr: bytes, *args):
1981 1998 """Find revisions matching a revset.
1982 1999
1983 2000 The revset is specified as a string ``expr`` that may contain
@@ -1993,7 +2010,7 class localrepository:
1993 2010 tree = revsetlang.spectree(expr, *args)
1994 2011 return revset.makematcher(tree)(self)
1995 2012
1996 def set(self, expr, *args):
2013 def set(self, expr: bytes, *args):
1997 2014 """Find revisions matching a revset and emit changectx instances.
1998 2015
1999 2016 This is a convenience wrapper around ``revs()`` that iterates the
@@ -2005,7 +2022,7 class localrepository:
2005 2022 for r in self.revs(expr, *args):
2006 2023 yield self[r]
2007 2024
2008 def anyrevs(self, specs, user=False, localalias=None):
2025 def anyrevs(self, specs: bytes, user=False, localalias=None):
2009 2026 """Find revisions matching one of the given revsets.
2010 2027
2011 2028 Revset aliases from the configuration are not expanded by default. To
@@ -2030,7 +2047,7 class localrepository:
2030 2047 m = revset.matchany(None, specs, localalias=localalias)
2031 2048 return m(self)
2032 2049
2033 def url(self):
2050 def url(self) -> bytes:
2034 2051 return b'file:' + self.root
2035 2052
2036 2053 def hook(self, name, throw=False, **args):
@@ -2108,7 +2125,7 class localrepository:
2108 2125 # writing to the cache), but the rest of Mercurial wants them in
2109 2126 # local encoding.
2110 2127 tags = {}
2111 for (name, (node, hist)) in alltags.items():
2128 for name, (node, hist) in alltags.items():
2112 2129 if node != self.nullid:
2113 2130 tags[encoding.tolocal(name)] = node
2114 2131 tags[b'tip'] = self.changelog.tip()
@@ -2229,7 +2246,7 class localrepository:
2229 2246 return b'store'
2230 2247 return None
2231 2248
2232 def wjoin(self, f, *insidef):
2249 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2233 2250 return self.vfs.reljoin(self.root, f, *insidef)
2234 2251
2235 2252 def setparents(self, p1, p2=None):
@@ -2238,17 +2255,17 class localrepository:
2238 2255 self[None].setparents(p1, p2)
2239 2256 self._quick_access_changeid_invalidate()
2240 2257
2241 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2258 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2242 2259 """changeid must be a changeset revision, if specified.
2243 2260 fileid can be a file revision or node."""
2244 2261 return context.filectx(
2245 2262 self, path, changeid, fileid, changectx=changectx
2246 2263 )
2247 2264
2248 def getcwd(self):
2265 def getcwd(self) -> bytes:
2249 2266 return self.dirstate.getcwd()
2250 2267
2251 def pathto(self, f, cwd=None):
2268 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2252 2269 return self.dirstate.pathto(f, cwd)
2253 2270
2254 2271 def _loadfilter(self, filter):
@@ -2300,14 +2317,21 class localrepository:
2300 2317 def adddatafilter(self, name, filter):
2301 2318 self._datafilters[name] = filter
2302 2319
2303 def wread(self, filename):
2320 def wread(self, filename: bytes) -> bytes:
2304 2321 if self.wvfs.islink(filename):
2305 2322 data = self.wvfs.readlink(filename)
2306 2323 else:
2307 2324 data = self.wvfs.read(filename)
2308 2325 return self._filter(self._encodefilterpats, filename, data)
2309 2326
2310 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2327 def wwrite(
2328 self,
2329 filename: bytes,
2330 data: bytes,
2331 flags: bytes,
2332 backgroundclose=False,
2333 **kwargs
2334 ) -> int:
2311 2335 """write ``data`` into ``filename`` in the working directory
2312 2336
2313 2337 This returns length of written (maybe decoded) data.
@@ -2325,7 +2349,7 class localrepository:
2325 2349 self.wvfs.setflags(filename, False, False)
2326 2350 return len(data)
2327 2351
2328 def wwritedata(self, filename, data):
2352 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2329 2353 return self._filter(self._decodefilterpats, filename, data)
2330 2354
2331 2355 def currenttransaction(self):
@@ -2356,6 +2380,21 class localrepository:
2356 2380 hint=_(b"run 'hg recover' to clean up transaction"),
2357 2381 )
2358 2382
2383 # At that point your dirstate should be clean:
2384 #
2385 # - If you don't have the wlock, why would you still have a dirty
2386 # dirstate ?
2387 #
2388 # - If you hold the wlock, you should not be opening a transaction in
2389 # the middle of a `distate.changing_*` block. The transaction needs to
2390 # be open before that and wrap the change-context.
2391 #
2392 # - If you are not within a `dirstate.changing_*` context, why is our
2393 # dirstate dirty?
2394 if self.dirstate._dirty:
2395 m = "cannot open a transaction with a dirty dirstate"
2396 raise error.ProgrammingError(m)
2397
2359 2398 idbase = b"%.40f#%f" % (random.random(), time.time())
2360 2399 ha = hex(hashutil.sha1(idbase).digest())
2361 2400 txnid = b'TXN:' + ha
@@ -2514,7 +2553,6 class localrepository:
2514 2553 # out) in this transaction
2515 2554 narrowspec.restorebackup(self, b'journal.narrowspec')
2516 2555 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2517 repo.dirstate.restorebackup(None, b'journal.dirstate')
2518 2556
2519 2557 repo.invalidate(clearfilecache=True)
2520 2558
@@ -2612,33 +2650,50 class localrepository:
2612 2650 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2613 2651 self._transref = weakref.ref(tr)
2614 2652 scmutil.registersummarycallback(self, tr, desc)
2653 # This only exist to deal with the need of rollback to have viable
2654 # parents at the end of the operation. So backup viable parents at the
2655 # time of this operation.
2656 #
2657 # We only do it when the `wlock` is taken, otherwise other might be
2658 # altering the dirstate under us.
2659 #
2660 # This is really not a great way to do this (first, because we cannot
2661 # always do it). There are more viable alternative that exists
2662 #
2663 # - backing only the working copy parent in a dedicated files and doing
2664 # a clean "keep-update" to them on `hg rollback`.
2665 #
2666 # - slightly changing the behavior an applying a logic similar to "hg
2667 # strip" to pick a working copy destination on `hg rollback`
2668 if self.currentwlock() is not None:
2669 ds = self.dirstate
2670
2671 def backup_dirstate(tr):
2672 for f in ds.all_file_names():
2673 # hardlink backup is okay because `dirstate` is always
2674 # atomically written and possible data file are append only
2675 # and resistant to trailing data.
2676 tr.addbackup(f, hardlink=True, location=b'plain')
2677
2678 tr.addvalidator(b'dirstate-backup', backup_dirstate)
2615 2679 return tr
2616 2680
2617 2681 def _journalfiles(self):
2618 first = (
2682 return (
2619 2683 (self.svfs, b'journal'),
2620 2684 (self.svfs, b'journal.narrowspec'),
2621 2685 (self.vfs, b'journal.narrowspec.dirstate'),
2622 (self.vfs, b'journal.dirstate'),
2623 )
2624 middle = []
2625 dirstate_data = self.dirstate.data_backup_filename(b'journal.dirstate')
2626 if dirstate_data is not None:
2627 middle.append((self.vfs, dirstate_data))
2628 end = (
2629 2686 (self.vfs, b'journal.branch'),
2630 2687 (self.vfs, b'journal.desc'),
2631 2688 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2632 2689 (self.svfs, b'journal.phaseroots'),
2633 2690 )
2634 return first + tuple(middle) + end
2635 2691
2636 2692 def undofiles(self):
2637 2693 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2638 2694
2639 2695 @unfilteredmethod
2640 2696 def _writejournal(self, desc):
2641 self.dirstate.savebackup(None, b'journal.dirstate')
2642 2697 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2643 2698 narrowspec.savebackup(self, b'journal.narrowspec')
2644 2699 self.vfs.write(
@@ -2673,23 +2728,23 class localrepository:
2673 2728 return False
2674 2729
2675 2730 def rollback(self, dryrun=False, force=False):
2676 wlock = lock = dsguard = None
2731 wlock = lock = None
2677 2732 try:
2678 2733 wlock = self.wlock()
2679 2734 lock = self.lock()
2680 2735 if self.svfs.exists(b"undo"):
2681 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2682
2683 return self._rollback(dryrun, force, dsguard)
2736 return self._rollback(dryrun, force)
2684 2737 else:
2685 2738 self.ui.warn(_(b"no rollback information available\n"))
2686 2739 return 1
2687 2740 finally:
2688 release(dsguard, lock, wlock)
2741 release(lock, wlock)
2689 2742
2690 2743 @unfilteredmethod # Until we get smarter cache management
2691 def _rollback(self, dryrun, force, dsguard):
2744 def _rollback(self, dryrun, force):
2692 2745 ui = self.ui
2746
2747 parents = self.dirstate.parents()
2693 2748 try:
2694 2749 args = self.vfs.read(b'undo.desc').splitlines()
2695 2750 (oldlen, desc, detail) = (int(args[0]), args[1], None)
@@ -2706,9 +2761,11 class localrepository:
2706 2761 msg = _(
2707 2762 b'repository tip rolled back to revision %d (undo %s)\n'
2708 2763 ) % (oldtip, desc)
2764 parentgone = any(self[p].rev() > oldtip for p in parents)
2709 2765 except IOError:
2710 2766 msg = _(b'rolling back unknown transaction\n')
2711 2767 desc = None
2768 parentgone = True
2712 2769
2713 2770 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2714 2771 raise error.Abort(
@@ -2723,11 +2780,18 class localrepository:
2723 2780 if dryrun:
2724 2781 return 0
2725 2782
2726 parents = self.dirstate.parents()
2727 2783 self.destroying()
2728 2784 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2785 skip_journal_pattern = None
2786 if not parentgone:
2787 skip_journal_pattern = RE_SKIP_DIRSTATE_ROLLBACK
2729 2788 transaction.rollback(
2730 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2789 self.svfs,
2790 vfsmap,
2791 b'undo',
2792 ui.warn,
2793 checkambigfiles=_cachedfiles,
2794 skip_journal_pattern=skip_journal_pattern,
2731 2795 )
2732 2796 bookmarksvfs = bookmarks.bookmarksvfs(self)
2733 2797 if bookmarksvfs.exists(b'undo.bookmarks'):
@@ -2737,16 +2801,20 class localrepository:
2737 2801 if self.svfs.exists(b'undo.phaseroots'):
2738 2802 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2739 2803 self.invalidate()
2740
2741 has_node = self.changelog.index.has_node
2742 parentgone = any(not has_node(p) for p in parents)
2804 self.dirstate.invalidate()
2805
2743 2806 if parentgone:
2744 # prevent dirstateguard from overwriting already restored one
2745 dsguard.close()
2807 # replace this with some explicit parent update in the future.
2808 has_node = self.changelog.index.has_node
2809 if not all(has_node(p) for p in self.dirstate._pl):
2810 # There was no dirstate to backup initially, we need to drop
2811 # the existing one.
2812 with self.dirstate.changing_parents(self):
2813 self.dirstate.setparents(self.nullid)
2814 self.dirstate.clear()
2746 2815
2747 2816 narrowspec.restorebackup(self, b'undo.narrowspec')
2748 2817 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2749 self.dirstate.restorebackup(None, b'undo.dirstate')
2750 2818 try:
2751 2819 branch = self.vfs.read(b'undo.branch')
2752 2820 self.dirstate.setbranch(encoding.tolocal(branch))
@@ -2880,7 +2948,6 class localrepository:
2880 2948 filtered.branchmap().write(filtered)
2881 2949
2882 2950 def invalidatecaches(self):
2883
2884 2951 if '_tagscache' in vars(self):
2885 2952 # can't use delattr on proxy
2886 2953 del self.__dict__['_tagscache']
@@ -2903,13 +2970,9 class localrepository:
2903 2970 rereads the dirstate. Use dirstate.invalidate() if you want to
2904 2971 explicitly read the dirstate again (i.e. restoring it to a previous
2905 2972 known good state)."""
2906 if hasunfilteredcache(self, 'dirstate'):
2907 for k in self.dirstate._filecache:
2908 try:
2909 delattr(self.dirstate, k)
2910 except AttributeError:
2911 pass
2912 delattr(self.unfiltered(), 'dirstate')
2973 unfi = self.unfiltered()
2974 if 'dirstate' in unfi.__dict__:
2975 del unfi.__dict__['dirstate']
2913 2976
2914 2977 def invalidate(self, clearfilecache=False):
2915 2978 """Invalidates both store and non-store parts other than dirstate
@@ -2921,9 +2984,6 class localrepository:
2921 2984 """
2922 2985 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2923 2986 for k in list(self._filecache.keys()):
2924 # dirstate is invalidated separately in invalidatedirstate()
2925 if k == b'dirstate':
2926 continue
2927 2987 if (
2928 2988 k == b'changelog'
2929 2989 and self.currenttransaction()
@@ -3052,12 +3112,19 class localrepository:
3052 3112 self.ui.develwarn(b'"wlock" acquired after "lock"')
3053 3113
3054 3114 def unlock():
3055 if self.dirstate.pendingparentchange():
3115 if self.dirstate.is_changing_any:
3116 msg = b"wlock release in the middle of a changing parents"
3117 self.ui.develwarn(msg)
3056 3118 self.dirstate.invalidate()
3057 3119 else:
3120 if self.dirstate._dirty:
3121 msg = b"dirty dirstate on wlock release"
3122 self.ui.develwarn(msg)
3058 3123 self.dirstate.write(None)
3059 3124
3060 self._filecache[b'dirstate'].refresh()
3125 unfi = self.unfiltered()
3126 if 'dirstate' in unfi.__dict__:
3127 del unfi.__dict__['dirstate']
3061 3128
3062 3129 l = self._lock(
3063 3130 self.vfs,
@@ -3520,14 +3587,13 def aftertrans(files):
3520 3587 return a
3521 3588
3522 3589
3523 def undoname(fn):
3590 def undoname(fn: bytes) -> bytes:
3524 3591 base, name = os.path.split(fn)
3525 3592 assert name.startswith(b'journal')
3526 3593 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3527 3594
3528 3595
3529 def instance(ui, path, create, intents=None, createopts=None):
3530
3596 def instance(ui, path: bytes, create, intents=None, createopts=None):
3531 3597 # prevent cyclic import localrepo -> upgrade -> localrepo
3532 3598 from . import upgrade
3533 3599
@@ -3543,7 +3609,7 def instance(ui, path, create, intents=N
3543 3609 return repo
3544 3610
3545 3611
3546 def islocal(path):
3612 def islocal(path: bytes) -> bool:
3547 3613 return True
3548 3614
3549 3615
@@ -3803,7 +3869,7 def filterknowncreateopts(ui, createopts
3803 3869 return {k: v for k, v in createopts.items() if k not in known}
3804 3870
3805 3871
3806 def createrepository(ui, path, createopts=None, requirements=None):
3872 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3807 3873 """Create a new repository in a vfs.
3808 3874
3809 3875 ``path`` path to the new repo's working directory.
@@ -113,7 +113,7 def activepath(repo, remote):
113 113 if local:
114 114 rpath = util.pconvert(remote._repo.root)
115 115 elif not isinstance(remote, bytes):
116 rpath = remote._url
116 rpath = remote.url()
117 117
118 118 # represent the remotepath with user defined path name if exists
119 119 for path, url in repo.ui.configitems(b'paths'):
@@ -1836,6 +1836,7 class manifestrevlog:
1836 1836 assumehaveparentrevisions=False,
1837 1837 deltamode=repository.CG_DELTAMODE_STD,
1838 1838 sidedata_helpers=None,
1839 debug_info=None,
1839 1840 ):
1840 1841 return self._revlog.emitrevisions(
1841 1842 nodes,
@@ -1844,6 +1845,7 class manifestrevlog:
1844 1845 assumehaveparentrevisions=assumehaveparentrevisions,
1845 1846 deltamode=deltamode,
1846 1847 sidedata_helpers=sidedata_helpers,
1848 debug_info=debug_info,
1847 1849 )
1848 1850
1849 1851 def addgroup(
@@ -1854,6 +1856,8 class manifestrevlog:
1854 1856 alwayscache=False,
1855 1857 addrevisioncb=None,
1856 1858 duplicaterevisioncb=None,
1859 debug_info=None,
1860 delta_base_reuse_policy=None,
1857 1861 ):
1858 1862 return self._revlog.addgroup(
1859 1863 deltas,
@@ -1862,6 +1866,8 class manifestrevlog:
1862 1866 alwayscache=alwayscache,
1863 1867 addrevisioncb=addrevisioncb,
1864 1868 duplicaterevisioncb=duplicaterevisioncb,
1869 debug_info=debug_info,
1870 delta_base_reuse_policy=delta_base_reuse_policy,
1865 1871 )
1866 1872
1867 1873 def rawsize(self, rev):
@@ -368,7 +368,7 def _donormalize(patterns, default, root
368 368 % (
369 369 pat,
370 370 inst.message,
371 ) # pytype: disable=unsupported-operands
371 )
372 372 )
373 373 except IOError as inst:
374 374 if warn:
@@ -94,6 +94,13 class diffopts:
94 94 opts.update(kwargs)
95 95 return diffopts(**opts)
96 96
97 def __bytes__(self):
98 return b", ".join(
99 b"%s: %r" % (k, getattr(self, k)) for k in self.defaults
100 )
101
102 __str__ = encoding.strmethod(__bytes__)
103
97 104
98 105 defaultopts = diffopts()
99 106
@@ -46,7 +46,7 def _getcheckunknownconfig(repo, section
46 46 return config
47 47
48 48
49 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
49 def _checkunknownfile(dirstate, wvfs, dircache, wctx, mctx, f, f2=None):
50 50 if wctx.isinmemory():
51 51 # Nothing to do in IMM because nothing in the "working copy" can be an
52 52 # unknown file.
@@ -58,9 +58,8 def _checkunknownfile(repo, wctx, mctx,
58 58 if f2 is None:
59 59 f2 = f
60 60 return (
61 repo.wvfs.audit.check(f)
62 and repo.wvfs.isfileorlink(f)
63 and repo.dirstate.normalize(f) not in repo.dirstate
61 wvfs.isfileorlink_checkdir(dircache, f)
62 and dirstate.normalize(f) not in dirstate
64 63 and mctx[f2].cmp(wctx[f])
65 64 )
66 65
@@ -136,6 +135,9 def _checkunknownfiles(repo, wctx, mctx,
136 135 pathconfig = repo.ui.configbool(
137 136 b'experimental', b'merge.checkpathconflicts'
138 137 )
138 dircache = dict()
139 dirstate = repo.dirstate
140 wvfs = repo.wvfs
139 141 if not force:
140 142
141 143 def collectconflicts(conflicts, config):
@@ -151,7 +153,7 def _checkunknownfiles(repo, wctx, mctx,
151 153 mergestatemod.ACTION_DELETED_CHANGED,
152 154 )
153 155 ):
154 if _checkunknownfile(repo, wctx, mctx, f):
156 if _checkunknownfile(dirstate, wvfs, dircache, wctx, mctx, f):
155 157 fileconflicts.add(f)
156 158 elif pathconfig and f not in wctx:
157 159 path = checkunknowndirs(repo, wctx, f)
@@ -160,7 +162,9 def _checkunknownfiles(repo, wctx, mctx,
160 162 for f, args, msg in mresult.getactions(
161 163 [mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]
162 164 ):
163 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
165 if _checkunknownfile(
166 dirstate, wvfs, dircache, wctx, mctx, f, args[0]
167 ):
164 168 fileconflicts.add(f)
165 169
166 170 allconflicts = fileconflicts | pathconflicts
@@ -173,7 +177,9 def _checkunknownfiles(repo, wctx, mctx,
173 177 mresult.getactions([mergestatemod.ACTION_CREATED_MERGE])
174 178 ):
175 179 fl2, anc = args
176 different = _checkunknownfile(repo, wctx, mctx, f)
180 different = _checkunknownfile(
181 dirstate, wvfs, dircache, wctx, mctx, f
182 )
177 183 if repo.dirstate._ignore(f):
178 184 config = ignoredconfig
179 185 else:
@@ -240,16 +246,21 def _checkunknownfiles(repo, wctx, mctx,
240 246 else:
241 247 repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f)
242 248
243 for f, args, msg in list(
244 mresult.getactions([mergestatemod.ACTION_CREATED])
245 ):
249 def transformargs(f, args):
246 250 backup = (
247 251 f in fileconflicts
248 or f in pathconflicts
249 or any(p in pathconflicts for p in pathutil.finddirs(f))
252 or pathconflicts
253 and (
254 f in pathconflicts
255 or any(p in pathconflicts for p in pathutil.finddirs(f))
256 )
250 257 )
251 258 (flags,) = args
252 mresult.addfile(f, mergestatemod.ACTION_GET, (flags, backup), msg)
259 return (flags, backup)
260
261 mresult.mapaction(
262 mergestatemod.ACTION_CREATED, mergestatemod.ACTION_GET, transformargs
263 )
253 264
254 265
255 266 def _forgetremoved(wctx, mctx, branchmerge, mresult):
@@ -581,6 +592,18 class mergeresult:
581 592 self._filemapping[filename] = (action, data, message)
582 593 self._actionmapping[action][filename] = (data, message)
583 594
595 def mapaction(self, actionfrom, actionto, transform):
596 """changes all occurrences of action `actionfrom` into `actionto`,
597 transforming its args with the function `transform`.
598 """
599 orig = self._actionmapping[actionfrom]
600 del self._actionmapping[actionfrom]
601 dest = self._actionmapping[actionto]
602 for f, (data, msg) in orig.items():
603 data = transform(f, data)
604 self._filemapping[f] = (actionto, data, msg)
605 dest[f] = (data, msg)
606
584 607 def getfile(self, filename, default_return=None):
585 608 """returns (action, args, msg) about this file
586 609
@@ -1142,6 +1165,8 def calculateupdates(
1142 1165 followcopies,
1143 1166 )
1144 1167 _checkunknownfiles(repo, wctx, mctx, force, mresult, mergeforce)
1168 if repo.ui.configbool(b'devel', b'debug.abort-update'):
1169 exit(1)
1145 1170
1146 1171 else: # only when merge.preferancestor=* - the default
1147 1172 repo.ui.note(
@@ -2130,7 +2155,7 def _update(
2130 2155 assert len(getfiledata) == (
2131 2156 mresult.len((mergestatemod.ACTION_GET,)) if wantfiledata else 0
2132 2157 )
2133 with repo.dirstate.parentchange():
2158 with repo.dirstate.changing_parents(repo):
2134 2159 ### Filter Filedata
2135 2160 #
2136 2161 # We gathered "cache" information for the clean file while
@@ -2352,7 +2377,7 def graft(
2352 2377 # fix up dirstate for copies and renames
2353 2378 copies.graftcopies(wctx, ctx, base)
2354 2379 else:
2355 with repo.dirstate.parentchange():
2380 with repo.dirstate.changing_parents(repo):
2356 2381 repo.setparents(pctx.node(), pother)
2357 2382 repo.dirstate.write(repo.currenttransaction())
2358 2383 # fix up dirstate for copies and renames
@@ -322,10 +322,16 def updateworkingcopy(repo, assumeclean=
322 322 addedmatch = matchmod.differencematcher(newmatch, oldmatch)
323 323 removedmatch = matchmod.differencematcher(oldmatch, newmatch)
324 324
325 assert repo.currentwlock() is not None
325 326 ds = repo.dirstate
326 lookup, status, _mtime_boundary = ds.status(
327 removedmatch, subrepos=[], ignored=True, clean=True, unknown=True
328 )
327 with ds.running_status(repo):
328 lookup, status, _mtime_boundary = ds.status(
329 removedmatch,
330 subrepos=[],
331 ignored=True,
332 clean=True,
333 unknown=True,
334 )
329 335 trackeddirty = status.modified + status.added
330 336 clean = status.clean
331 337 if assumeclean:
@@ -570,22 +570,23 class workingbackend(fsbackend):
570 570 self.changed.add(fname)
571 571
572 572 def close(self):
573 wctx = self.repo[None]
574 changed = set(self.changed)
575 for src, dst in self.copied:
576 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
577 if self.removed:
578 wctx.forget(sorted(self.removed))
579 for f in self.removed:
580 if f not in self.repo.dirstate:
581 # File was deleted and no longer belongs to the
582 # dirstate, it was probably marked added then
583 # deleted, and should not be considered by
584 # marktouched().
585 changed.discard(f)
586 if changed:
587 scmutil.marktouched(self.repo, changed, self.similarity)
588 return sorted(self.changed)
573 with self.repo.dirstate.changing_files(self.repo):
574 wctx = self.repo[None]
575 changed = set(self.changed)
576 for src, dst in self.copied:
577 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
578 if self.removed:
579 wctx.forget(sorted(self.removed))
580 for f in self.removed:
581 if f not in self.repo.dirstate:
582 # File was deleted and no longer belongs to the
583 # dirstate, it was probably marked added then
584 # deleted, and should not be considered by
585 # marktouched().
586 changed.discard(f)
587 if changed:
588 scmutil.marktouched(self.repo, changed, self.similarity)
589 return sorted(self.changed)
589 590
590 591
591 592 class filestore:
@@ -4,6 +4,13 import os
4 4 import posixpath
5 5 import stat
6 6
7 from typing import (
8 Any,
9 Callable,
10 Iterator,
11 Optional,
12 )
13
7 14 from .i18n import _
8 15 from . import (
9 16 encoding,
@@ -13,15 +20,6 from . import (
13 20 util,
14 21 )
15 22
16 if pycompat.TYPE_CHECKING:
17 from typing import (
18 Any,
19 Callable,
20 Iterator,
21 Optional,
22 )
23
24
25 23 rustdirs = policy.importrust('dirstate', 'Dirs')
26 24 parsers = policy.importmod('parsers')
27 25
@@ -56,7 +54,7 class pathauditor:
56 54
57 55 def __init__(self, root, callback=None, realfs=True, cached=False):
58 56 self.audited = set()
59 self.auditeddir = set()
57 self.auditeddir = dict()
60 58 self.root = root
61 59 self._realfs = realfs
62 60 self._cached = cached
@@ -72,8 +70,7 class pathauditor:
72 70 path may contain a pattern (e.g. foodir/**.txt)"""
73 71
74 72 path = util.localpath(path)
75 normpath = self.normcase(path)
76 if normpath in self.audited:
73 if path in self.audited:
77 74 return
78 75 # AIX ignores "/" at end of path, others raise EISDIR.
79 76 if util.endswithsep(path):
@@ -90,13 +87,14 class pathauditor:
90 87 _(b"path contains illegal component: %s") % path
91 88 )
92 89 # Windows shortname aliases
93 for p in parts:
94 if b"~" in p:
95 first, last = p.split(b"~", 1)
96 if last.isdigit() and first.upper() in [b"HG", b"HG8B6C"]:
97 raise error.InputError(
98 _(b"path contains illegal component: %s") % path
99 )
90 if b"~" in path:
91 for p in parts:
92 if b"~" in p:
93 first, last = p.split(b"~", 1)
94 if last.isdigit() and first.upper() in [b"HG", b"HG8B6C"]:
95 raise error.InputError(
96 _(b"path contains illegal component: %s") % path
97 )
100 98 if b'.hg' in _lowerclean(path):
101 99 lparts = [_lowerclean(p) for p in parts]
102 100 for p in b'.hg', b'.hg.':
@@ -108,36 +106,43 class pathauditor:
108 106 % (path, pycompat.bytestr(base))
109 107 )
110 108
111 normparts = util.splitpath(normpath)
112 assert len(parts) == len(normparts)
113
114 parts.pop()
115 normparts.pop()
116 # It's important that we check the path parts starting from the root.
117 # We don't want to add "foo/bar/baz" to auditeddir before checking if
118 # there's a "foo/.hg" directory. This also means we won't accidentally
119 # traverse a symlink into some other filesystem (which is potentially
120 # expensive to access).
121 for i in range(len(parts)):
122 prefix = pycompat.ossep.join(parts[: i + 1])
123 normprefix = pycompat.ossep.join(normparts[: i + 1])
124 if normprefix in self.auditeddir:
125 continue
126 if self._realfs:
127 self._checkfs(prefix, path)
128 if self._cached:
129 self.auditeddir.add(normprefix)
109 if self._realfs:
110 # It's important that we check the path parts starting from the root.
111 # We don't want to add "foo/bar/baz" to auditeddir before checking if
112 # there's a "foo/.hg" directory. This also means we won't accidentally
113 # traverse a symlink into some other filesystem (which is potentially
114 # expensive to access).
115 for prefix in finddirs_rev_noroot(path):
116 if prefix in self.auditeddir:
117 res = self.auditeddir[prefix]
118 else:
119 res = pathauditor._checkfs_exists(
120 self.root, prefix, path, self.callback
121 )
122 if self._cached:
123 self.auditeddir[prefix] = res
124 if not res:
125 break
130 126
131 127 if self._cached:
132 self.audited.add(normpath)
128 self.audited.add(path)
133 129
134 def _checkfs(self, prefix, path):
135 # type: (bytes, bytes) -> None
136 """raise exception if a file system backed check fails"""
137 curpath = os.path.join(self.root, prefix)
130 @staticmethod
131 def _checkfs_exists(
132 root,
133 prefix: bytes,
134 path: bytes,
135 callback: Optional[Callable[[bytes], bool]] = None,
136 ):
137 """raise exception if a file system backed check fails.
138
139 Return a bool that indicates that the directory (or file) exists."""
140 curpath = os.path.join(root, prefix)
138 141 try:
139 142 st = os.lstat(curpath)
140 143 except OSError as err:
144 if err.errno == errno.ENOENT:
145 return False
141 146 # EINVAL can be raised as invalid path syntax under win32.
142 147 # They must be ignored for patterns can be checked too.
143 148 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
@@ -152,9 +157,10 class pathauditor:
152 157 elif stat.S_ISDIR(st.st_mode) and os.path.isdir(
153 158 os.path.join(curpath, b'.hg')
154 159 ):
155 if not self.callback or not self.callback(curpath):
160 if not callback or not callback(curpath):
156 161 msg = _(b"path '%s' is inside nested repo %r")
157 162 raise error.Abort(msg % (path, pycompat.bytestr(prefix)))
163 return True
158 164
159 165 def check(self, path):
160 166 # type: (bytes) -> bool
@@ -314,6 +320,13 def finddirs(path):
314 320 yield b''
315 321
316 322
323 def finddirs_rev_noroot(path: bytes) -> Iterator[bytes]:
324 pos = path.find(pycompat.ossep)
325 while pos != -1:
326 yield path[:pos]
327 pos = path.find(pycompat.ossep, pos + 1)
328
329
317 330 class dirs:
318 331 '''a multiset of directory names from a set of file paths'''
319 332
@@ -76,7 +76,7 def _importfrom(pkgname, modname):
76 76 ('cext', 'bdiff'): 3,
77 77 ('cext', 'mpatch'): 1,
78 78 ('cext', 'osutil'): 4,
79 ('cext', 'parsers'): 20,
79 ('cext', 'parsers'): 21,
80 80 }
81 81
82 82 # map import request to other package or module
@@ -17,8 +17,23 import select
17 17 import stat
18 18 import sys
19 19 import tempfile
20 import typing
20 21 import unicodedata
21 22
23 from typing import (
24 Any,
25 AnyStr,
26 Iterable,
27 Iterator,
28 List,
29 Match,
30 NoReturn,
31 Optional,
32 Sequence,
33 Tuple,
34 Union,
35 )
36
22 37 from .i18n import _
23 38 from .pycompat import (
24 39 getattr,
@@ -44,7 +59,7 except AttributeError:
44 59 # vaguely unix-like but don't have hardlink support. For those
45 60 # poor souls, just say we tried and that it failed so we fall back
46 61 # to copies.
47 def oslink(src, dst):
62 def oslink(src: bytes, dst: bytes) -> NoReturn:
48 63 raise OSError(
49 64 errno.EINVAL, b'hardlinks not supported: %s to %s' % (src, dst)
50 65 )
@@ -54,15 +69,47 readlink = os.readlink
54 69 unlink = os.unlink
55 70 rename = os.rename
56 71 removedirs = os.removedirs
57 expandglobs = False
72
73 if typing.TYPE_CHECKING:
74 # Replace the various overloads that come along with aliasing stdlib methods
75 # with the narrow definition that we care about in the type checking phase
76 # only. This ensures that both Windows and POSIX see only the definition
77 # that is actually available.
78 #
79 # Note that if we check pycompat.TYPE_CHECKING here, it is always False, and
80 # the methods aren't replaced.
81
82 def normpath(path: bytes) -> bytes:
83 raise NotImplementedError
84
85 def abspath(path: AnyStr) -> AnyStr:
86 raise NotImplementedError
58 87
59 umask = os.umask(0)
88 def oslink(src: bytes, dst: bytes) -> None:
89 raise NotImplementedError
90
91 def readlink(path: bytes) -> bytes:
92 raise NotImplementedError
93
94 def unlink(path: bytes) -> None:
95 raise NotImplementedError
96
97 def rename(src: bytes, dst: bytes) -> None:
98 raise NotImplementedError
99
100 def removedirs(name: bytes) -> None:
101 raise NotImplementedError
102
103
104 expandglobs: bool = False
105
106 umask: int = os.umask(0)
60 107 os.umask(umask)
61 108
62 109 posixfile = open
63 110
64 111
65 def split(p):
112 def split(p: bytes) -> Tuple[bytes, bytes]:
66 113 """Same as posixpath.split, but faster
67 114
68 115 >>> import posixpath
@@ -85,17 +132,17 def split(p):
85 132 return ht[0] + b'/', ht[1]
86 133
87 134
88 def openhardlinks():
135 def openhardlinks() -> bool:
89 136 '''return true if it is safe to hold open file handles to hardlinks'''
90 137 return True
91 138
92 139
93 def nlinks(name):
140 def nlinks(name: bytes) -> int:
94 141 '''return number of hardlinks for the given file'''
95 142 return os.lstat(name).st_nlink
96 143
97 144
98 def parsepatchoutput(output_line):
145 def parsepatchoutput(output_line: bytes) -> bytes:
99 146 """parses the output produced by patch and returns the filename"""
100 147 pf = output_line[14:]
101 148 if pycompat.sysplatform == b'OpenVMS':
@@ -107,7 +154,9 def parsepatchoutput(output_line):
107 154 return pf
108 155
109 156
110 def sshargs(sshcmd, host, user, port):
157 def sshargs(
158 sshcmd: bytes, host: bytes, user: Optional[bytes], port: Optional[bytes]
159 ) -> bytes:
111 160 '''Build argument list for ssh'''
112 161 args = user and (b"%s@%s" % (user, host)) or host
113 162 if b'-' in args[:1]:
@@ -120,12 +169,12 def sshargs(sshcmd, host, user, port):
120 169 return args
121 170
122 171
123 def isexec(f):
172 def isexec(f: bytes) -> bool:
124 173 """check whether a file is executable"""
125 174 return os.lstat(f).st_mode & 0o100 != 0
126 175
127 176
128 def setflags(f, l, x):
177 def setflags(f: bytes, l: bool, x: bool) -> None:
129 178 st = os.lstat(f)
130 179 s = st.st_mode
131 180 if l:
@@ -169,7 +218,12 def setflags(f, l, x):
169 218 os.chmod(f, s & 0o666)
170 219
171 220
172 def copymode(src, dst, mode=None, enforcewritable=False):
221 def copymode(
222 src: bytes,
223 dst: bytes,
224 mode: Optional[bytes] = None,
225 enforcewritable: bool = False,
226 ) -> None:
173 227 """Copy the file mode from the file at path src to dst.
174 228 If src doesn't exist, we're using mode instead. If mode is None, we're
175 229 using umask."""
@@ -189,7 +243,7 def copymode(src, dst, mode=None, enforc
189 243 os.chmod(dst, new_mode)
190 244
191 245
192 def checkexec(path):
246 def checkexec(path: bytes) -> bool:
193 247 """
194 248 Check whether the given path is on a filesystem with UNIX-like exec flags
195 249
@@ -230,7 +284,7 def checkexec(path):
230 284 else:
231 285 # checkisexec exists, check if it actually is exec
232 286 if m & EXECFLAGS != 0:
233 # ensure checkisexec exists, check it isn't exec
287 # ensure checknoexec exists, check it isn't exec
234 288 try:
235 289 m = os.stat(checknoexec).st_mode
236 290 except FileNotFoundError:
@@ -269,7 +323,7 def checkexec(path):
269 323 return False
270 324
271 325
272 def checklink(path):
326 def checklink(path: bytes) -> bool:
273 327 """check whether the given path is on a symlink-capable filesystem"""
274 328 # mktemp is not racy because symlink creation will fail if the
275 329 # file already exists
@@ -334,13 +388,13 def checklink(path):
334 388 return False
335 389
336 390
337 def checkosfilename(path):
391 def checkosfilename(path: bytes) -> Optional[bytes]:
338 392 """Check that the base-relative path is a valid filename on this platform.
339 393 Returns None if the path is ok, or a UI string describing the problem."""
340 394 return None # on posix platforms, every path is ok
341 395
342 396
343 def getfsmountpoint(dirpath):
397 def getfsmountpoint(dirpath: bytes) -> Optional[bytes]:
344 398 """Get the filesystem mount point from a directory (best-effort)
345 399
346 400 Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc.
@@ -348,7 +402,7 def getfsmountpoint(dirpath):
348 402 return getattr(osutil, 'getfsmountpoint', lambda x: None)(dirpath)
349 403
350 404
351 def getfstype(dirpath):
405 def getfstype(dirpath: bytes) -> Optional[bytes]:
352 406 """Get the filesystem type name from a directory (best-effort)
353 407
354 408 Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc.
@@ -356,29 +410,29 def getfstype(dirpath):
356 410 return getattr(osutil, 'getfstype', lambda x: None)(dirpath)
357 411
358 412
359 def get_password():
413 def get_password() -> bytes:
360 414 return encoding.strtolocal(getpass.getpass(''))
361 415
362 416
363 def setbinary(fd):
417 def setbinary(fd) -> None:
364 418 pass
365 419
366 420
367 def pconvert(path):
421 def pconvert(path: bytes) -> bytes:
368 422 return path
369 423
370 424
371 def localpath(path):
425 def localpath(path: bytes) -> bytes:
372 426 return path
373 427
374 428
375 def samefile(fpath1, fpath2):
429 def samefile(fpath1: bytes, fpath2: bytes) -> bool:
376 430 """Returns whether path1 and path2 refer to the same file. This is only
377 431 guaranteed to work for files, not directories."""
378 432 return os.path.samefile(fpath1, fpath2)
379 433
380 434
381 def samedevice(fpath1, fpath2):
435 def samedevice(fpath1: bytes, fpath2: bytes) -> bool:
382 436 """Returns whether fpath1 and fpath2 are on the same device. This is only
383 437 guaranteed to work for files, not directories."""
384 438 st1 = os.lstat(fpath1)
@@ -387,18 +441,18 def samedevice(fpath1, fpath2):
387 441
388 442
389 443 # os.path.normcase is a no-op, which doesn't help us on non-native filesystems
390 def normcase(path):
444 def normcase(path: bytes) -> bytes:
391 445 return path.lower()
392 446
393 447
394 448 # what normcase does to ASCII strings
395 normcasespec = encoding.normcasespecs.lower
449 normcasespec: int = encoding.normcasespecs.lower
396 450 # fallback normcase function for non-ASCII strings
397 451 normcasefallback = normcase
398 452
399 453 if pycompat.isdarwin:
400 454
401 def normcase(path):
455 def normcase(path: bytes) -> bytes:
402 456 """
403 457 Normalize a filename for OS X-compatible comparison:
404 458 - escape-encode invalid characters
@@ -423,7 +477,7 if pycompat.isdarwin:
423 477
424 478 normcasespec = encoding.normcasespecs.lower
425 479
426 def normcasefallback(path):
480 def normcasefallback(path: bytes) -> bytes:
427 481 try:
428 482 u = path.decode('utf-8')
429 483 except UnicodeDecodeError:
@@ -464,7 +518,7 if pycompat.sysplatform == b'cygwin':
464 518 )
465 519
466 520 # use upper-ing as normcase as same as NTFS workaround
467 def normcase(path):
521 def normcase(path: bytes) -> bytes:
468 522 pathlen = len(path)
469 523 if (pathlen == 0) or (path[0] != pycompat.ossep):
470 524 # treat as relative
@@ -490,20 +544,20 if pycompat.sysplatform == b'cygwin':
490 544 # but these translations are not supported by native
491 545 # tools, so the exec bit tends to be set erroneously.
492 546 # Therefore, disable executable bit access on Cygwin.
493 def checkexec(path):
547 def checkexec(path: bytes) -> bool:
494 548 return False
495 549
496 550 # Similarly, Cygwin's symlink emulation is likely to create
497 551 # problems when Mercurial is used from both Cygwin and native
498 552 # Windows, with other native tools, or on shared volumes
499 def checklink(path):
553 def checklink(path: bytes) -> bool:
500 554 return False
501 555
502 556
503 _needsshellquote = None
557 _needsshellquote: Optional[Match[bytes]] = None
504 558
505 559
506 def shellquote(s):
560 def shellquote(s: bytes) -> bytes:
507 561 if pycompat.sysplatform == b'OpenVMS':
508 562 return b'"%s"' % s
509 563 global _needsshellquote
@@ -516,12 +570,12 def shellquote(s):
516 570 return b"'%s'" % s.replace(b"'", b"'\\''")
517 571
518 572
519 def shellsplit(s):
573 def shellsplit(s: bytes) -> List[bytes]:
520 574 """Parse a command string in POSIX shell way (best-effort)"""
521 575 return pycompat.shlexsplit(s, posix=True)
522 576
523 577
524 def testpid(pid):
578 def testpid(pid: int) -> bool:
525 579 '''return False if pid dead, True if running or not sure'''
526 580 if pycompat.sysplatform == b'OpenVMS':
527 581 return True
@@ -532,12 +586,12 def testpid(pid):
532 586 return inst.errno != errno.ESRCH
533 587
534 588
535 def isowner(st):
589 def isowner(st: os.stat_result) -> bool:
536 590 """Return True if the stat object st is from the current user."""
537 591 return st.st_uid == os.getuid()
538 592
539 593
540 def findexe(command):
594 def findexe(command: bytes) -> Optional[bytes]:
541 595 """Find executable for command searching like which does.
542 596 If command is a basename then PATH is searched for command.
543 597 PATH isn't searched if command is an absolute or relative path.
@@ -545,7 +599,7 def findexe(command):
545 599 if pycompat.sysplatform == b'OpenVMS':
546 600 return command
547 601
548 def findexisting(executable):
602 def findexisting(executable: bytes) -> Optional[bytes]:
549 603 b'Will return executable if existing file'
550 604 if os.path.isfile(executable) and os.access(executable, os.X_OK):
551 605 return executable
@@ -564,14 +618,14 def findexe(command):
564 618 return None
565 619
566 620
567 def setsignalhandler():
621 def setsignalhandler() -> None:
568 622 pass
569 623
570 624
571 625 _wantedkinds = {stat.S_IFREG, stat.S_IFLNK}
572 626
573 627
574 def statfiles(files):
628 def statfiles(files: Sequence[bytes]) -> Iterator[Optional[os.stat_result]]:
575 629 """Stat each file in files. Yield each stat, or None if a file does not
576 630 exist or has a type we don't care about."""
577 631 lstat = os.lstat
@@ -586,12 +640,12 def statfiles(files):
586 640 yield st
587 641
588 642
589 def getuser():
643 def getuser() -> bytes:
590 644 '''return name of current user'''
591 645 return pycompat.fsencode(getpass.getuser())
592 646
593 647
594 def username(uid=None):
648 def username(uid: Optional[int] = None) -> Optional[bytes]:
595 649 """Return the name of the user with the given uid.
596 650
597 651 If uid is None, return the name of the current user."""
@@ -604,7 +658,7 def username(uid=None):
604 658 return b'%d' % uid
605 659
606 660
607 def groupname(gid=None):
661 def groupname(gid: Optional[int] = None) -> Optional[bytes]:
608 662 """Return the name of the group with the given gid.
609 663
610 664 If gid is None, return the name of the current group."""
@@ -617,7 +671,7 def groupname(gid=None):
617 671 return pycompat.bytestr(gid)
618 672
619 673
620 def groupmembers(name):
674 def groupmembers(name: bytes) -> List[bytes]:
621 675 """Return the list of members of the group with the given
622 676 name, KeyError if the group does not exist.
623 677 """
@@ -625,23 +679,27 def groupmembers(name):
625 679 return pycompat.rapply(pycompat.fsencode, list(grp.getgrnam(name).gr_mem))
626 680
627 681
628 def spawndetached(args):
682 def spawndetached(args: List[bytes]) -> int:
629 683 return os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0), args[0], args)
630 684
631 685
632 def gethgcmd():
686 def gethgcmd(): # TODO: convert to bytes, like on Windows?
633 687 return sys.argv[:1]
634 688
635 689
636 def makedir(path, notindexed):
690 def makedir(path: bytes, notindexed: bool) -> None:
637 691 os.mkdir(path)
638 692
639 693
640 def lookupreg(key, name=None, scope=None):
694 def lookupreg(
695 key: bytes,
696 name: Optional[bytes] = None,
697 scope: Optional[Union[int, Iterable[int]]] = None,
698 ) -> Optional[bytes]:
641 699 return None
642 700
643 701
644 def hidewindow():
702 def hidewindow() -> None:
645 703 """Hide current shell window.
646 704
647 705 Used to hide the window opened when starting asynchronous
@@ -651,15 +709,15 def hidewindow():
651 709
652 710
653 711 class cachestat:
654 def __init__(self, path):
712 def __init__(self, path: bytes) -> None:
655 713 self.stat = os.stat(path)
656 714
657 def cacheable(self):
715 def cacheable(self) -> bool:
658 716 return bool(self.stat.st_ino)
659 717
660 718 __hash__ = object.__hash__
661 719
662 def __eq__(self, other):
720 def __eq__(self, other: Any) -> bool:
663 721 try:
664 722 # Only dev, ino, size, mtime and atime are likely to change. Out
665 723 # of these, we shouldn't compare atime but should compare the
@@ -680,18 +738,18 class cachestat:
680 738 except AttributeError:
681 739 return False
682 740
683 def __ne__(self, other):
741 def __ne__(self, other: Any) -> bool:
684 742 return not self == other
685 743
686 744
687 def statislink(st):
745 def statislink(st: Optional[os.stat_result]) -> bool:
688 746 '''check whether a stat result is a symlink'''
689 return st and stat.S_ISLNK(st.st_mode)
747 return stat.S_ISLNK(st.st_mode) if st else False
690 748
691 749
692 def statisexec(st):
750 def statisexec(st: Optional[os.stat_result]) -> bool:
693 751 '''check whether a stat result is an executable file'''
694 return st and (st.st_mode & 0o100 != 0)
752 return (st.st_mode & 0o100 != 0) if st else False
695 753
696 754
697 755 def poll(fds):
@@ -708,7 +766,7 def poll(fds):
708 766 return sorted(list(set(sum(res, []))))
709 767
710 768
711 def readpipe(pipe):
769 def readpipe(pipe) -> bytes:
712 770 """Read all available data from a pipe."""
713 771 # We can't fstat() a pipe because Linux will always report 0.
714 772 # So, we set the pipe to non-blocking mode and read everything
@@ -733,7 +791,7 def readpipe(pipe):
733 791 fcntl.fcntl(pipe, fcntl.F_SETFL, oldflags)
734 792
735 793
736 def bindunixsocket(sock, path):
794 def bindunixsocket(sock, path: bytes) -> None:
737 795 """Bind the UNIX domain socket to the specified path"""
738 796 # use relative path instead of full path at bind() if possible, since
739 797 # AF_UNIX path has very small length limit (107 chars) on common
@@ -10,8 +10,13 import difflib
10 10 import re
11 11 import struct
12 12
13 from typing import (
14 List,
15 Tuple,
16 )
13 17
14 def splitnewlines(text):
18
19 def splitnewlines(text: bytes) -> List[bytes]:
15 20 '''like str.splitlines, but only split on newlines.'''
16 21 lines = [l + b'\n' for l in text.split(b'\n')]
17 22 if lines:
@@ -22,7 +27,9 def splitnewlines(text):
22 27 return lines
23 28
24 29
25 def _normalizeblocks(a, b, blocks):
30 def _normalizeblocks(
31 a: List[bytes], b: List[bytes], blocks
32 ) -> List[Tuple[int, int, int]]:
26 33 prev = None
27 34 r = []
28 35 for curr in blocks:
@@ -57,7 +64,7 def _normalizeblocks(a, b, blocks):
57 64 return r
58 65
59 66
60 def bdiff(a, b):
67 def bdiff(a: bytes, b: bytes) -> bytes:
61 68 a = bytes(a).splitlines(True)
62 69 b = bytes(b).splitlines(True)
63 70
@@ -84,7 +91,7 def bdiff(a, b):
84 91 return b"".join(bin)
85 92
86 93
87 def blocks(a, b):
94 def blocks(a: bytes, b: bytes) -> List[Tuple[int, int, int, int]]:
88 95 an = splitnewlines(a)
89 96 bn = splitnewlines(b)
90 97 d = difflib.SequenceMatcher(None, an, bn).get_matching_blocks()
@@ -92,7 +99,7 def blocks(a, b):
92 99 return [(i, i + n, j, j + n) for (i, j, n) in d]
93 100
94 101
95 def fixws(text, allws):
102 def fixws(text: bytes, allws: bool) -> bytes:
96 103 if allws:
97 104 text = re.sub(b'[ \t\r]+', b'', text)
98 105 else:
@@ -9,6 +9,11
9 9 import io
10 10 import struct
11 11
12 from typing import (
13 List,
14 Tuple,
15 )
16
12 17
13 18 stringio = io.BytesIO
14 19
@@ -28,7 +33,9 class mpatchError(Exception):
28 33 # temporary string buffers.
29 34
30 35
31 def _pull(dst, src, l): # pull l bytes from src
36 def _pull(
37 dst: List[Tuple[int, int]], src: List[Tuple[int, int]], l: int
38 ) -> None: # pull l bytes from src
32 39 while l:
33 40 f = src.pop()
34 41 if f[0] > l: # do we need to split?
@@ -39,7 +46,7 def _pull(dst, src, l): # pull l bytes
39 46 l -= f[0]
40 47
41 48
42 def _move(m, dest, src, count):
49 def _move(m: stringio, dest: int, src: int, count: int) -> None:
43 50 """move count bytes from src to dest
44 51
45 52 The file pointer is left at the end of dest.
@@ -50,7 +57,9 def _move(m, dest, src, count):
50 57 m.write(buf)
51 58
52 59
53 def _collect(m, buf, list):
60 def _collect(
61 m: stringio, buf: int, list: List[Tuple[int, int]]
62 ) -> Tuple[int, int]:
54 63 start = buf
55 64 for l, p in reversed(list):
56 65 _move(m, buf, p, l)
@@ -58,7 +67,7 def _collect(m, buf, list):
58 67 return (buf - start, start)
59 68
60 69
61 def patches(a, bins):
70 def patches(a: bytes, bins: List[bytes]) -> bytes:
62 71 if not bins:
63 72 return a
64 73
@@ -111,7 +120,7 def patches(a, bins):
111 120 return m.read(t[0])
112 121
113 122
114 def patchedsize(orig, delta):
123 def patchedsize(orig: int, delta: bytes) -> int:
115 124 outlen, last, bin = 0, 0, 0
116 125 binend = len(delta)
117 126 data = 12
@@ -435,6 +435,11 class DirstateItem:
435 435 return self._wc_tracked and not (self._p1_tracked or self._p2_info)
436 436
437 437 @property
438 def modified(self):
439 """True if the file has been modified"""
440 return self._wc_tracked and self._p1_tracked and self._p2_info
441
442 @property
438 443 def maybe_clean(self):
439 444 """True if the file has a chance to be in the "clean" state"""
440 445 if not self._wc_tracked:
@@ -28,6 +28,24 import sys
28 28 import tempfile
29 29 import xmlrpc.client as xmlrpclib
30 30
31 from typing import (
32 Any,
33 AnyStr,
34 BinaryIO,
35 Dict,
36 Iterable,
37 Iterator,
38 List,
39 Mapping,
40 NoReturn,
41 Optional,
42 Sequence,
43 Tuple,
44 Type,
45 TypeVar,
46 cast,
47 overload,
48 )
31 49
32 50 ispy3 = sys.version_info[0] >= 3
33 51 ispypy = '__pypy__' in sys.builtin_module_names
@@ -38,6 +56,10 if not globals(): # hide this from non-
38 56
39 57 TYPE_CHECKING = typing.TYPE_CHECKING
40 58
59 _GetOptResult = Tuple[List[Tuple[bytes, bytes]], List[bytes]]
60 _T0 = TypeVar('_T0')
61 _Tbytestr = TypeVar('_Tbytestr', bound='bytestr')
62
41 63
42 64 def future_set_exception_info(f, exc_info):
43 65 f.set_exception(exc_info[0])
@@ -46,7 +68,7 def future_set_exception_info(f, exc_inf
46 68 FileNotFoundError = builtins.FileNotFoundError
47 69
48 70
49 def identity(a):
71 def identity(a: _T0) -> _T0:
50 72 return a
51 73
52 74
@@ -94,21 +116,17 if os.name == r'nt':
94 116
95 117 fsencode = os.fsencode
96 118 fsdecode = os.fsdecode
97 oscurdir = os.curdir.encode('ascii')
98 oslinesep = os.linesep.encode('ascii')
99 osname = os.name.encode('ascii')
100 ospathsep = os.pathsep.encode('ascii')
101 ospardir = os.pardir.encode('ascii')
102 ossep = os.sep.encode('ascii')
103 osaltsep = os.altsep
104 if osaltsep:
105 osaltsep = osaltsep.encode('ascii')
106 osdevnull = os.devnull.encode('ascii')
119 oscurdir: bytes = os.curdir.encode('ascii')
120 oslinesep: bytes = os.linesep.encode('ascii')
121 osname: bytes = os.name.encode('ascii')
122 ospathsep: bytes = os.pathsep.encode('ascii')
123 ospardir: bytes = os.pardir.encode('ascii')
124 ossep: bytes = os.sep.encode('ascii')
125 osaltsep: Optional[bytes] = os.altsep.encode('ascii') if os.altsep else None
126 osdevnull: bytes = os.devnull.encode('ascii')
107 127
108 sysplatform = sys.platform.encode('ascii')
109 sysexecutable = sys.executable
110 if sysexecutable:
111 sysexecutable = os.fsencode(sysexecutable)
128 sysplatform: bytes = sys.platform.encode('ascii')
129 sysexecutable: bytes = os.fsencode(sys.executable) if sys.executable else b''
112 130
113 131
114 132 def maplist(*args):
@@ -128,7 +146,7 getargspec = inspect.getfullargspec
128 146
129 147 long = int
130 148
131 if getattr(sys, 'argv', None) is not None:
149 if builtins.getattr(sys, 'argv', None) is not None:
132 150 # On POSIX, the char** argv array is converted to Python str using
133 151 # Py_DecodeLocale(). The inverse of this is Py_EncodeLocale(), which
134 152 # isn't directly callable from Python code. In practice, os.fsencode()
@@ -143,6 +161,7 if getattr(sys, 'argv', None) is not Non
143 161 # (this is how Python 2 worked). To get that, we encode with the mbcs
144 162 # encoding, which will pass CP_ACP to the underlying Windows API to
145 163 # produce bytes.
164 sysargv: List[bytes] = []
146 165 if os.name == r'nt':
147 166 sysargv = [a.encode("mbcs", "ignore") for a in sys.argv]
148 167 else:
@@ -211,38 +230,53 class bytestr(bytes):
211 230 # https://github.com/google/pytype/issues/500
212 231 if TYPE_CHECKING:
213 232
214 def __init__(self, s=b''):
233 def __init__(self, s: object = b'') -> None:
215 234 pass
216 235
217 def __new__(cls, s=b''):
236 def __new__(cls: Type[_Tbytestr], s: object = b'') -> _Tbytestr:
218 237 if isinstance(s, bytestr):
219 238 return s
220 239 if not isinstance(
221 240 s, (bytes, bytearray)
222 ) and not hasattr( # hasattr-py3-only
241 ) and not builtins.hasattr( # hasattr-py3-only
223 242 s, u'__bytes__'
224 243 ):
225 244 s = str(s).encode('ascii')
226 245 return bytes.__new__(cls, s)
227 246
228 def __getitem__(self, key):
247 # The base class uses `int` return in py3, but the point of this class is to
248 # behave like py2.
249 def __getitem__(self, key) -> bytes: # pytype: disable=signature-mismatch
229 250 s = bytes.__getitem__(self, key)
230 251 if not isinstance(s, bytes):
231 252 s = bytechr(s)
232 253 return s
233 254
234 def __iter__(self):
255 # The base class expects `Iterator[int]` return in py3, but the point of
256 # this class is to behave like py2.
257 def __iter__(self) -> Iterator[bytes]: # pytype: disable=signature-mismatch
235 258 return iterbytestr(bytes.__iter__(self))
236 259
237 def __repr__(self):
260 def __repr__(self) -> str:
238 261 return bytes.__repr__(self)[1:] # drop b''
239 262
240 263
241 def iterbytestr(s):
264 def iterbytestr(s: Iterable[int]) -> Iterator[bytes]:
242 265 """Iterate bytes as if it were a str object of Python 2"""
243 266 return map(bytechr, s)
244 267
245 268
269 if TYPE_CHECKING:
270
271 @overload
272 def maybebytestr(s: bytes) -> bytestr:
273 ...
274
275 @overload
276 def maybebytestr(s: _T0) -> _T0:
277 ...
278
279
246 280 def maybebytestr(s):
247 281 """Promote bytes to bytestr"""
248 282 if isinstance(s, bytes):
@@ -250,7 +284,7 def maybebytestr(s):
250 284 return s
251 285
252 286
253 def sysbytes(s):
287 def sysbytes(s: AnyStr) -> bytes:
254 288 """Convert an internal str (e.g. keyword, __doc__) back to bytes
255 289
256 290 This never raises UnicodeEncodeError, but only ASCII characters
@@ -261,7 +295,7 def sysbytes(s):
261 295 return s.encode('utf-8')
262 296
263 297
264 def sysstr(s):
298 def sysstr(s: AnyStr) -> str:
265 299 """Return a keyword str to be passed to Python functions such as
266 300 getattr() and str.encode()
267 301
@@ -274,29 +308,29 def sysstr(s):
274 308 return s.decode('latin-1')
275 309
276 310
277 def strurl(url):
311 def strurl(url: AnyStr) -> str:
278 312 """Converts a bytes url back to str"""
279 313 if isinstance(url, bytes):
280 314 return url.decode('ascii')
281 315 return url
282 316
283 317
284 def bytesurl(url):
318 def bytesurl(url: AnyStr) -> bytes:
285 319 """Converts a str url to bytes by encoding in ascii"""
286 320 if isinstance(url, str):
287 321 return url.encode('ascii')
288 322 return url
289 323
290 324
291 def raisewithtb(exc, tb):
325 def raisewithtb(exc: BaseException, tb) -> NoReturn:
292 326 """Raise exception with the given traceback"""
293 327 raise exc.with_traceback(tb)
294 328
295 329
296 def getdoc(obj):
330 def getdoc(obj: object) -> Optional[bytes]:
297 331 """Get docstring as bytes; may be None so gettext() won't confuse it
298 332 with _('')"""
299 doc = getattr(obj, '__doc__', None)
333 doc = builtins.getattr(obj, '__doc__', None)
300 334 if doc is None:
301 335 return doc
302 336 return sysbytes(doc)
@@ -319,14 +353,22 xrange = builtins.range
319 353 unicode = str
320 354
321 355
322 def open(name, mode=b'r', buffering=-1, encoding=None):
356 def open(
357 name,
358 mode: AnyStr = b'r',
359 buffering: int = -1,
360 encoding: Optional[str] = None,
361 ) -> Any:
362 # TODO: assert binary mode, and cast result to BinaryIO?
323 363 return builtins.open(name, sysstr(mode), buffering, encoding)
324 364
325 365
326 366 safehasattr = _wrapattrfunc(builtins.hasattr)
327 367
328 368
329 def _getoptbwrapper(orig, args, shortlist, namelist):
369 def _getoptbwrapper(
370 orig, args: Sequence[bytes], shortlist: bytes, namelist: Sequence[bytes]
371 ) -> _GetOptResult:
330 372 """
331 373 Takes bytes arguments, converts them to unicode, pass them to
332 374 getopt.getopt(), convert the returned values back to bytes and then
@@ -342,7 +384,7 def _getoptbwrapper(orig, args, shortlis
342 384 return opts, args
343 385
344 386
345 def strkwargs(dic):
387 def strkwargs(dic: Mapping[bytes, _T0]) -> Dict[str, _T0]:
346 388 """
347 389 Converts the keys of a python dictonary to str i.e. unicodes so that
348 390 they can be passed as keyword arguments as dictionaries with bytes keys
@@ -352,7 +394,7 def strkwargs(dic):
352 394 return dic
353 395
354 396
355 def byteskwargs(dic):
397 def byteskwargs(dic: Mapping[str, _T0]) -> Dict[bytes, _T0]:
356 398 """
357 399 Converts keys of python dictionaries to bytes as they were converted to
358 400 str to pass that dictonary as a keyword argument on Python 3.
@@ -362,7 +404,9 def byteskwargs(dic):
362 404
363 405
364 406 # TODO: handle shlex.shlex().
365 def shlexsplit(s, comments=False, posix=True):
407 def shlexsplit(
408 s: bytes, comments: bool = False, posix: bool = True
409 ) -> List[bytes]:
366 410 """
367 411 Takes bytes argument, convert it to str i.e. unicodes, pass that into
368 412 shlex.split(), convert the returned value to bytes and return that for
@@ -377,46 +421,59 itervalues = lambda x: x.values()
377 421
378 422 json_loads = json.loads
379 423
380 isjython = sysplatform.startswith(b'java')
424 isjython: bool = sysplatform.startswith(b'java')
381 425
382 isdarwin = sysplatform.startswith(b'darwin')
383 islinux = sysplatform.startswith(b'linux')
384 isposix = osname == b'posix'
385 iswindows = osname == b'nt'
426 isdarwin: bool = sysplatform.startswith(b'darwin')
427 islinux: bool = sysplatform.startswith(b'linux')
428 isposix: bool = osname == b'posix'
429 iswindows: bool = osname == b'nt'
386 430
387 431
388 def getoptb(args, shortlist, namelist):
432 def getoptb(
433 args: Sequence[bytes], shortlist: bytes, namelist: Sequence[bytes]
434 ) -> _GetOptResult:
389 435 return _getoptbwrapper(getopt.getopt, args, shortlist, namelist)
390 436
391 437
392 def gnugetoptb(args, shortlist, namelist):
438 def gnugetoptb(
439 args: Sequence[bytes], shortlist: bytes, namelist: Sequence[bytes]
440 ) -> _GetOptResult:
393 441 return _getoptbwrapper(getopt.gnu_getopt, args, shortlist, namelist)
394 442
395 443
396 def mkdtemp(suffix=b'', prefix=b'tmp', dir=None):
444 def mkdtemp(
445 suffix: bytes = b'', prefix: bytes = b'tmp', dir: Optional[bytes] = None
446 ) -> bytes:
397 447 return tempfile.mkdtemp(suffix, prefix, dir)
398 448
399 449
400 450 # text=True is not supported; use util.from/tonativeeol() instead
401 def mkstemp(suffix=b'', prefix=b'tmp', dir=None):
451 def mkstemp(
452 suffix: bytes = b'', prefix: bytes = b'tmp', dir: Optional[bytes] = None
453 ) -> Tuple[int, bytes]:
402 454 return tempfile.mkstemp(suffix, prefix, dir)
403 455
404 456
405 457 # TemporaryFile does not support an "encoding=" argument on python2.
406 458 # This wrapper file are always open in byte mode.
407 def unnamedtempfile(mode=None, *args, **kwargs):
459 def unnamedtempfile(mode: Optional[bytes] = None, *args, **kwargs) -> BinaryIO:
408 460 if mode is None:
409 461 mode = 'w+b'
410 462 else:
411 463 mode = sysstr(mode)
412 464 assert 'b' in mode
413 return tempfile.TemporaryFile(mode, *args, **kwargs)
465 return cast(BinaryIO, tempfile.TemporaryFile(mode, *args, **kwargs))
414 466
415 467
416 468 # NamedTemporaryFile does not support an "encoding=" argument on python2.
417 469 # This wrapper file are always open in byte mode.
418 470 def namedtempfile(
419 mode=b'w+b', bufsize=-1, suffix=b'', prefix=b'tmp', dir=None, delete=True
471 mode: bytes = b'w+b',
472 bufsize: int = -1,
473 suffix: bytes = b'',
474 prefix: bytes = b'tmp',
475 dir: Optional[bytes] = None,
476 delete: bool = True,
420 477 ):
421 478 mode = sysstr(mode)
422 479 assert 'b' in mode
@@ -38,12 +38,15 from .revlogutils.constants import (
38 38 COMP_MODE_DEFAULT,
39 39 COMP_MODE_INLINE,
40 40 COMP_MODE_PLAIN,
41 DELTA_BASE_REUSE_NO,
42 DELTA_BASE_REUSE_TRY,
41 43 ENTRY_RANK,
42 44 FEATURES_BY_VERSION,
43 45 FLAG_GENERALDELTA,
44 46 FLAG_INLINE_DATA,
45 47 INDEX_HEADER,
46 48 KIND_CHANGELOG,
49 KIND_FILELOG,
47 50 RANK_UNKNOWN,
48 51 REVLOGV0,
49 52 REVLOGV1,
@@ -125,7 +128,7 rustrevlog = policy.importrust('revlog')
125 128 # Aliased for performance.
126 129 _zlibdecompress = zlib.decompress
127 130
128 # max size of revlog with inline data
131 # max size of inline data embedded into a revlog
129 132 _maxinline = 131072
130 133
131 134 # Flag processors for REVIDX_ELLIPSIS.
@@ -347,6 +350,7 class revlog:
347 350 self._chunkcachesize = 65536
348 351 self._maxchainlen = None
349 352 self._deltabothparents = True
353 self._candidate_group_chunk_size = 0
350 354 self._debug_delta = False
351 355 self.index = None
352 356 self._docket = None
@@ -363,6 +367,11 class revlog:
363 367 self._srdensitythreshold = 0.50
364 368 self._srmingapsize = 262144
365 369
370 # other optionnals features
371
372 # might remove rank configuration once the computation has no impact
373 self._compute_rank = False
374
366 375 # Make copy of flag processors so each revlog instance can support
367 376 # custom flags.
368 377 self._flagprocessors = dict(flagutil.flagprocessors)
@@ -404,6 +413,7 class revlog:
404 413
405 414 if b'changelogv2' in opts and self.revlog_kind == KIND_CHANGELOG:
406 415 new_header = CHANGELOGV2
416 self._compute_rank = opts.get(b'changelogv2.compute-rank', True)
407 417 elif b'revlogv2' in opts:
408 418 new_header = REVLOGV2
409 419 elif b'revlogv1' in opts:
@@ -421,6 +431,9 class revlog:
421 431 self._maxchainlen = opts[b'maxchainlen']
422 432 if b'deltabothparents' in opts:
423 433 self._deltabothparents = opts[b'deltabothparents']
434 dps_cgds = opts.get(b'delta-parent-search.candidate-group-chunk-size')
435 if dps_cgds:
436 self._candidate_group_chunk_size = dps_cgds
424 437 self._lazydelta = bool(opts.get(b'lazydelta', True))
425 438 self._lazydeltabase = False
426 439 if self._lazydelta:
@@ -505,7 +518,6 class revlog:
505 518 self._docket = docket
506 519 self._docket_file = entry_point
507 520 else:
508 entry_data = b''
509 521 self._initempty = True
510 522 entry_data = self._get_data(entry_point, mmapindexthreshold)
511 523 if len(entry_data) > 0:
@@ -653,9 +665,12 class revlog:
653 665 @util.propertycache
654 666 def display_id(self):
655 667 """The public facing "ID" of the revlog that we use in message"""
656 # Maybe we should build a user facing representation of
657 # revlog.target instead of using `self.radix`
658 return self.radix
668 if self.revlog_kind == KIND_FILELOG:
669 # Reference the file without the "data/" prefix, so it is familiar
670 # to the user.
671 return self.target[1]
672 else:
673 return self.radix
659 674
660 675 def _get_decompressor(self, t):
661 676 try:
@@ -2445,6 +2460,16 class revlog:
2445 2460 self, write_debug=write_debug
2446 2461 )
2447 2462
2463 if cachedelta is not None and len(cachedelta) == 2:
2464 # If the cached delta has no information about how it should be
2465 # reused, add the default reuse instruction according to the
2466 # revlog's configuration.
2467 if self._generaldelta and self._lazydeltabase:
2468 delta_base_reuse = DELTA_BASE_REUSE_TRY
2469 else:
2470 delta_base_reuse = DELTA_BASE_REUSE_NO
2471 cachedelta = (cachedelta[0], cachedelta[1], delta_base_reuse)
2472
2448 2473 revinfo = revlogutils.revisioninfo(
2449 2474 node,
2450 2475 p1,
@@ -2492,7 +2517,7 class revlog:
2492 2517 sidedata_offset = 0
2493 2518
2494 2519 rank = RANK_UNKNOWN
2495 if self._format_version == CHANGELOGV2:
2520 if self._compute_rank:
2496 2521 if (p1r, p2r) == (nullrev, nullrev):
2497 2522 rank = 1
2498 2523 elif p1r != nullrev and p2r == nullrev:
@@ -2637,6 +2662,8 class revlog:
2637 2662 alwayscache=False,
2638 2663 addrevisioncb=None,
2639 2664 duplicaterevisioncb=None,
2665 debug_info=None,
2666 delta_base_reuse_policy=None,
2640 2667 ):
2641 2668 """
2642 2669 add a delta group
@@ -2652,6 +2679,14 class revlog:
2652 2679 if self._adding_group:
2653 2680 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2654 2681
2682 # read the default delta-base reuse policy from revlog config if the
2683 # group did not specify one.
2684 if delta_base_reuse_policy is None:
2685 if self._generaldelta and self._lazydeltabase:
2686 delta_base_reuse_policy = DELTA_BASE_REUSE_TRY
2687 else:
2688 delta_base_reuse_policy = DELTA_BASE_REUSE_NO
2689
2655 2690 self._adding_group = True
2656 2691 empty = True
2657 2692 try:
@@ -2662,6 +2697,7 class revlog:
2662 2697 deltacomputer = deltautil.deltacomputer(
2663 2698 self,
2664 2699 write_debug=write_debug,
2700 debug_info=debug_info,
2665 2701 )
2666 2702 # loop through our set of deltas
2667 2703 for data in deltas:
@@ -2731,7 +2767,7 class revlog:
2731 2767 p1,
2732 2768 p2,
2733 2769 flags,
2734 (baserev, delta),
2770 (baserev, delta, delta_base_reuse_policy),
2735 2771 alwayscache=alwayscache,
2736 2772 deltacomputer=deltacomputer,
2737 2773 sidedata=sidedata,
@@ -2886,6 +2922,7 class revlog:
2886 2922 assumehaveparentrevisions=False,
2887 2923 deltamode=repository.CG_DELTAMODE_STD,
2888 2924 sidedata_helpers=None,
2925 debug_info=None,
2889 2926 ):
2890 2927 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2891 2928 raise error.ProgrammingError(
@@ -2915,6 +2952,7 class revlog:
2915 2952 revisiondata=revisiondata,
2916 2953 assumehaveparentrevisions=assumehaveparentrevisions,
2917 2954 sidedata_helpers=sidedata_helpers,
2955 debug_info=debug_info,
2918 2956 )
2919 2957
2920 2958 DELTAREUSEALWAYS = b'always'
@@ -67,7 +67,7 class revisioninfo:
67 67 node: expected hash of the revision
68 68 p1, p2: parent revs of the revision
69 69 btext: built text cache consisting of a one-element list
70 cachedelta: (baserev, uncompressed_delta) or None
70 cachedelta: (baserev, uncompressed_delta, usage_mode) or None
71 71 flags: flags associated to the revision storage
72 72
73 73 One of btext[0] or cachedelta must be set.
@@ -301,3 +301,18 FEATURES_BY_VERSION = {
301 301
302 302
303 303 SPARSE_REVLOG_MAX_CHAIN_LENGTH = 1000
304
305 ### What should be done with a cached delta and its base ?
306
307 # Ignore the cache when considering candidates.
308 #
309 # The cached delta might be used, but the delta base will not be scheduled for
310 # usage earlier than in "normal" order.
311 DELTA_BASE_REUSE_NO = 0
312
313 # Prioritize trying the cached delta base
314 #
315 # The delta base will be tested for validy first. So that the cached deltas get
316 # used when possible.
317 DELTA_BASE_REUSE_TRY = 1
318 DELTA_BASE_REUSE_FORCE = 2
This diff has been collapsed as it changes many lines, (503 lines changed) Show them Hide them
@@ -6,12 +6,19
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 import collections
10 import string
11
9 12 from .. import (
13 mdiff,
10 14 node as nodemod,
15 revlogutils,
16 util,
11 17 )
12 18
13 19 from . import (
14 20 constants,
21 deltas as deltautil,
15 22 )
16 23
17 24 INDEX_ENTRY_DEBUG_COLUMN = []
@@ -216,3 +223,499 def debug_index(
216 223 fm.plain(b'\n')
217 224
218 225 fm.end()
226
227
228 def dump(ui, revlog):
229 """perform the work for `hg debugrevlog --dump"""
230 # XXX seems redundant with debug index ?
231 r = revlog
232 numrevs = len(r)
233 ui.write(
234 (
235 b"# rev p1rev p2rev start end deltastart base p1 p2"
236 b" rawsize totalsize compression heads chainlen\n"
237 )
238 )
239 ts = 0
240 heads = set()
241
242 for rev in range(numrevs):
243 dbase = r.deltaparent(rev)
244 if dbase == -1:
245 dbase = rev
246 cbase = r.chainbase(rev)
247 clen = r.chainlen(rev)
248 p1, p2 = r.parentrevs(rev)
249 rs = r.rawsize(rev)
250 ts = ts + rs
251 heads -= set(r.parentrevs(rev))
252 heads.add(rev)
253 try:
254 compression = ts / r.end(rev)
255 except ZeroDivisionError:
256 compression = 0
257 ui.write(
258 b"%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
259 b"%11d %5d %8d\n"
260 % (
261 rev,
262 p1,
263 p2,
264 r.start(rev),
265 r.end(rev),
266 r.start(dbase),
267 r.start(cbase),
268 r.start(p1),
269 r.start(p2),
270 rs,
271 ts,
272 compression,
273 len(heads),
274 clen,
275 )
276 )
277
278
279 def debug_revlog(ui, revlog):
280 """code for `hg debugrevlog`"""
281 r = revlog
282 format = r._format_version
283 v = r._format_flags
284 flags = []
285 gdelta = False
286 if v & constants.FLAG_INLINE_DATA:
287 flags.append(b'inline')
288 if v & constants.FLAG_GENERALDELTA:
289 gdelta = True
290 flags.append(b'generaldelta')
291 if not flags:
292 flags = [b'(none)']
293
294 ### the total size of stored content if incompressed.
295 full_text_total_size = 0
296 ### tracks merge vs single parent
297 nummerges = 0
298
299 ### tracks ways the "delta" are build
300 # nodelta
301 numempty = 0
302 numemptytext = 0
303 numemptydelta = 0
304 # full file content
305 numfull = 0
306 # intermediate snapshot against a prior snapshot
307 numsemi = 0
308 # snapshot count per depth
309 numsnapdepth = collections.defaultdict(lambda: 0)
310 # number of snapshots with a non-ancestor delta
311 numsnapdepth_nad = collections.defaultdict(lambda: 0)
312 # delta against previous revision
313 numprev = 0
314 # delta against prev, where prev is a non-ancestor
315 numprev_nad = 0
316 # delta against first or second parent (not prev)
317 nump1 = 0
318 nump2 = 0
319 # delta against neither prev nor parents
320 numother = 0
321 # delta against other that is a non-ancestor
322 numother_nad = 0
323 # delta against prev that are also first or second parent
324 # (details of `numprev`)
325 nump1prev = 0
326 nump2prev = 0
327
328 # data about delta chain of each revs
329 chainlengths = []
330 chainbases = []
331 chainspans = []
332
333 # data about each revision
334 datasize = [None, 0, 0]
335 fullsize = [None, 0, 0]
336 semisize = [None, 0, 0]
337 # snapshot count per depth
338 snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
339 deltasize = [None, 0, 0]
340 chunktypecounts = {}
341 chunktypesizes = {}
342
343 def addsize(size, l):
344 if l[0] is None or size < l[0]:
345 l[0] = size
346 if size > l[1]:
347 l[1] = size
348 l[2] += size
349
350 numrevs = len(r)
351 for rev in range(numrevs):
352 p1, p2 = r.parentrevs(rev)
353 delta = r.deltaparent(rev)
354 if format > 0:
355 s = r.rawsize(rev)
356 full_text_total_size += s
357 addsize(s, datasize)
358 if p2 != nodemod.nullrev:
359 nummerges += 1
360 size = r.length(rev)
361 if delta == nodemod.nullrev:
362 chainlengths.append(0)
363 chainbases.append(r.start(rev))
364 chainspans.append(size)
365 if size == 0:
366 numempty += 1
367 numemptytext += 1
368 else:
369 numfull += 1
370 numsnapdepth[0] += 1
371 addsize(size, fullsize)
372 addsize(size, snapsizedepth[0])
373 else:
374 nad = (
375 delta != p1 and delta != p2 and not r.isancestorrev(delta, rev)
376 )
377 chainlengths.append(chainlengths[delta] + 1)
378 baseaddr = chainbases[delta]
379 revaddr = r.start(rev)
380 chainbases.append(baseaddr)
381 chainspans.append((revaddr - baseaddr) + size)
382 if size == 0:
383 numempty += 1
384 numemptydelta += 1
385 elif r.issnapshot(rev):
386 addsize(size, semisize)
387 numsemi += 1
388 depth = r.snapshotdepth(rev)
389 numsnapdepth[depth] += 1
390 if nad:
391 numsnapdepth_nad[depth] += 1
392 addsize(size, snapsizedepth[depth])
393 else:
394 addsize(size, deltasize)
395 if delta == rev - 1:
396 numprev += 1
397 if delta == p1:
398 nump1prev += 1
399 elif delta == p2:
400 nump2prev += 1
401 elif nad:
402 numprev_nad += 1
403 elif delta == p1:
404 nump1 += 1
405 elif delta == p2:
406 nump2 += 1
407 elif delta != nodemod.nullrev:
408 numother += 1
409 numother_nad += 1
410
411 # Obtain data on the raw chunks in the revlog.
412 if util.safehasattr(r, '_getsegmentforrevs'):
413 segment = r._getsegmentforrevs(rev, rev)[1]
414 else:
415 segment = r._revlog._getsegmentforrevs(rev, rev)[1]
416 if segment:
417 chunktype = bytes(segment[0:1])
418 else:
419 chunktype = b'empty'
420
421 if chunktype not in chunktypecounts:
422 chunktypecounts[chunktype] = 0
423 chunktypesizes[chunktype] = 0
424
425 chunktypecounts[chunktype] += 1
426 chunktypesizes[chunktype] += size
427
428 # Adjust size min value for empty cases
429 for size in (datasize, fullsize, semisize, deltasize):
430 if size[0] is None:
431 size[0] = 0
432
433 numdeltas = numrevs - numfull - numempty - numsemi
434 numoprev = numprev - nump1prev - nump2prev - numprev_nad
435 num_other_ancestors = numother - numother_nad
436 totalrawsize = datasize[2]
437 datasize[2] /= numrevs
438 fulltotal = fullsize[2]
439 if numfull == 0:
440 fullsize[2] = 0
441 else:
442 fullsize[2] /= numfull
443 semitotal = semisize[2]
444 snaptotal = {}
445 if numsemi > 0:
446 semisize[2] /= numsemi
447 for depth in snapsizedepth:
448 snaptotal[depth] = snapsizedepth[depth][2]
449 snapsizedepth[depth][2] /= numsnapdepth[depth]
450
451 deltatotal = deltasize[2]
452 if numdeltas > 0:
453 deltasize[2] /= numdeltas
454 totalsize = fulltotal + semitotal + deltatotal
455 avgchainlen = sum(chainlengths) / numrevs
456 maxchainlen = max(chainlengths)
457 maxchainspan = max(chainspans)
458 compratio = 1
459 if totalsize:
460 compratio = totalrawsize / totalsize
461
462 basedfmtstr = b'%%%dd\n'
463 basepcfmtstr = b'%%%dd %s(%%5.2f%%%%)\n'
464
465 def dfmtstr(max):
466 return basedfmtstr % len(str(max))
467
468 def pcfmtstr(max, padding=0):
469 return basepcfmtstr % (len(str(max)), b' ' * padding)
470
471 def pcfmt(value, total):
472 if total:
473 return (value, 100 * float(value) / total)
474 else:
475 return value, 100.0
476
477 ui.writenoi18n(b'format : %d\n' % format)
478 ui.writenoi18n(b'flags : %s\n' % b', '.join(flags))
479
480 ui.write(b'\n')
481 fmt = pcfmtstr(totalsize)
482 fmt2 = dfmtstr(totalsize)
483 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
484 ui.writenoi18n(b' merges : ' + fmt % pcfmt(nummerges, numrevs))
485 ui.writenoi18n(
486 b' normal : ' + fmt % pcfmt(numrevs - nummerges, numrevs)
487 )
488 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
489 ui.writenoi18n(b' empty : ' + fmt % pcfmt(numempty, numrevs))
490 ui.writenoi18n(
491 b' text : '
492 + fmt % pcfmt(numemptytext, numemptytext + numemptydelta)
493 )
494 ui.writenoi18n(
495 b' delta : '
496 + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta)
497 )
498 ui.writenoi18n(
499 b' snapshot : ' + fmt % pcfmt(numfull + numsemi, numrevs)
500 )
501 for depth in sorted(numsnapdepth):
502 base = b' lvl-%-3d : ' % depth
503 count = fmt % pcfmt(numsnapdepth[depth], numrevs)
504 pieces = [base, count]
505 if numsnapdepth_nad[depth]:
506 pieces[-1] = count = count[:-1] # drop the final '\n'
507 more = b' non-ancestor-bases: '
508 anc_count = fmt
509 anc_count %= pcfmt(numsnapdepth_nad[depth], numsnapdepth[depth])
510 pieces.append(more)
511 pieces.append(anc_count)
512 ui.write(b''.join(pieces))
513 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(numdeltas, numrevs))
514 ui.writenoi18n(b'revision size : ' + fmt2 % totalsize)
515 ui.writenoi18n(
516 b' snapshot : ' + fmt % pcfmt(fulltotal + semitotal, totalsize)
517 )
518 for depth in sorted(numsnapdepth):
519 ui.write(
520 (b' lvl-%-3d : ' % depth)
521 + fmt % pcfmt(snaptotal[depth], totalsize)
522 )
523 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(deltatotal, totalsize))
524
525 letters = string.ascii_letters.encode('ascii')
526
527 def fmtchunktype(chunktype):
528 if chunktype == b'empty':
529 return b' %s : ' % chunktype
530 elif chunktype in letters:
531 return b' 0x%s (%s) : ' % (nodemod.hex(chunktype), chunktype)
532 else:
533 return b' 0x%s : ' % nodemod.hex(chunktype)
534
535 ui.write(b'\n')
536 ui.writenoi18n(b'chunks : ' + fmt2 % numrevs)
537 for chunktype in sorted(chunktypecounts):
538 ui.write(fmtchunktype(chunktype))
539 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
540 ui.writenoi18n(b'chunks size : ' + fmt2 % totalsize)
541 for chunktype in sorted(chunktypecounts):
542 ui.write(fmtchunktype(chunktype))
543 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
544
545 ui.write(b'\n')
546 b_total = b"%d" % full_text_total_size
547 p_total = []
548 while len(b_total) > 3:
549 p_total.append(b_total[-3:])
550 b_total = b_total[:-3]
551 p_total.append(b_total)
552 p_total.reverse()
553 b_total = b' '.join(p_total)
554
555 ui.write(b'\n')
556 ui.writenoi18n(b'total-stored-content: %s bytes\n' % b_total)
557 ui.write(b'\n')
558 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
559 ui.writenoi18n(b'avg chain length : ' + fmt % avgchainlen)
560 ui.writenoi18n(b'max chain length : ' + fmt % maxchainlen)
561 ui.writenoi18n(b'max chain reach : ' + fmt % maxchainspan)
562 ui.writenoi18n(b'compression ratio : ' + fmt % compratio)
563
564 if format > 0:
565 ui.write(b'\n')
566 ui.writenoi18n(
567 b'uncompressed data size (min/max/avg) : %d / %d / %d\n'
568 % tuple(datasize)
569 )
570 ui.writenoi18n(
571 b'full revision size (min/max/avg) : %d / %d / %d\n'
572 % tuple(fullsize)
573 )
574 ui.writenoi18n(
575 b'inter-snapshot size (min/max/avg) : %d / %d / %d\n'
576 % tuple(semisize)
577 )
578 for depth in sorted(snapsizedepth):
579 if depth == 0:
580 continue
581 ui.writenoi18n(
582 b' level-%-3d (min/max/avg) : %d / %d / %d\n'
583 % ((depth,) + tuple(snapsizedepth[depth]))
584 )
585 ui.writenoi18n(
586 b'delta size (min/max/avg) : %d / %d / %d\n'
587 % tuple(deltasize)
588 )
589
590 if numdeltas > 0:
591 ui.write(b'\n')
592 fmt = pcfmtstr(numdeltas)
593 fmt2 = pcfmtstr(numdeltas, 4)
594 ui.writenoi18n(
595 b'deltas against prev : ' + fmt % pcfmt(numprev, numdeltas)
596 )
597 if numprev > 0:
598 ui.writenoi18n(
599 b' where prev = p1 : ' + fmt2 % pcfmt(nump1prev, numprev)
600 )
601 ui.writenoi18n(
602 b' where prev = p2 : ' + fmt2 % pcfmt(nump2prev, numprev)
603 )
604 ui.writenoi18n(
605 b' other-ancestor : ' + fmt2 % pcfmt(numoprev, numprev)
606 )
607 ui.writenoi18n(
608 b' unrelated : ' + fmt2 % pcfmt(numoprev, numprev)
609 )
610 if gdelta:
611 ui.writenoi18n(
612 b'deltas against p1 : ' + fmt % pcfmt(nump1, numdeltas)
613 )
614 ui.writenoi18n(
615 b'deltas against p2 : ' + fmt % pcfmt(nump2, numdeltas)
616 )
617 ui.writenoi18n(
618 b'deltas against ancs : '
619 + fmt % pcfmt(num_other_ancestors, numdeltas)
620 )
621 ui.writenoi18n(
622 b'deltas against other : '
623 + fmt % pcfmt(numother_nad, numdeltas)
624 )
625
626
627 def debug_delta_find(ui, revlog, rev, base_rev=nodemod.nullrev):
628 """display the search process for a delta"""
629 deltacomputer = deltautil.deltacomputer(
630 revlog,
631 write_debug=ui.write,
632 debug_search=not ui.quiet,
633 )
634
635 node = revlog.node(rev)
636 p1r, p2r = revlog.parentrevs(rev)
637 p1 = revlog.node(p1r)
638 p2 = revlog.node(p2r)
639 full_text = revlog.revision(rev)
640 btext = [full_text]
641 textlen = len(btext[0])
642 cachedelta = None
643 flags = revlog.flags(rev)
644
645 if base_rev != nodemod.nullrev:
646 base_text = revlog.revision(base_rev)
647 delta = mdiff.textdiff(base_text, full_text)
648
649 cachedelta = (base_rev, delta, constants.DELTA_BASE_REUSE_TRY)
650 btext = [None]
651
652 revinfo = revlogutils.revisioninfo(
653 node,
654 p1,
655 p2,
656 btext,
657 textlen,
658 cachedelta,
659 flags,
660 )
661
662 fh = revlog._datafp()
663 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
664
665
666 def _get_revlogs(repo, changelog: bool, manifest: bool, filelogs: bool):
667 """yield revlogs from this repository"""
668 if changelog:
669 yield repo.changelog
670
671 if manifest:
672 # XXX: Handle tree manifest
673 root_mf = repo.manifestlog.getstorage(b'')
674 assert not root_mf._treeondisk
675 yield root_mf._revlog
676
677 if filelogs:
678 files = set()
679 for rev in repo:
680 ctx = repo[rev]
681 files |= set(ctx.files())
682
683 for f in sorted(files):
684 yield repo.file(f)._revlog
685
686
687 def debug_revlog_stats(
688 repo, fm, changelog: bool, manifest: bool, filelogs: bool
689 ):
690 """Format revlog statistics for debugging purposes
691
692 fm: the output formatter.
693 """
694 fm.plain(b'rev-count data-size inl type target \n')
695
696 for rlog in _get_revlogs(repo, changelog, manifest, filelogs):
697 fm.startitem()
698 nb_rev = len(rlog)
699 inline = rlog._inline
700 data_size = rlog._get_data_offset(nb_rev - 1)
701
702 target = rlog.target
703 revlog_type = b'unknown'
704 revlog_target = b''
705 if target[0] == constants.KIND_CHANGELOG:
706 revlog_type = b'changelog'
707 elif target[0] == constants.KIND_MANIFESTLOG:
708 revlog_type = b'manifest'
709 revlog_target = target[1]
710 elif target[0] == constants.KIND_FILELOG:
711 revlog_type = b'file'
712 revlog_target = target[1]
713
714 fm.write(b'revlog.rev-count', b'%9d', nb_rev)
715 fm.write(b'revlog.data-size', b'%12d', data_size)
716
717 fm.write(b'revlog.inline', b' %-3s', b'yes' if inline else b'no')
718 fm.write(b'revlog.type', b' %-9s', revlog_type)
719 fm.write(b'revlog.target', b' %s', revlog_target)
720
721 fm.plain(b'\n')
@@ -20,6 +20,8 from .constants import (
20 20 COMP_MODE_DEFAULT,
21 21 COMP_MODE_INLINE,
22 22 COMP_MODE_PLAIN,
23 DELTA_BASE_REUSE_FORCE,
24 DELTA_BASE_REUSE_NO,
23 25 KIND_CHANGELOG,
24 26 KIND_FILELOG,
25 27 KIND_MANIFESTLOG,
@@ -576,13 +578,20 def drop_u_compression(delta):
576 578 )
577 579
578 580
579 def isgooddeltainfo(revlog, deltainfo, revinfo):
581 def is_good_delta_info(revlog, deltainfo, revinfo):
580 582 """Returns True if the given delta is good. Good means that it is within
581 583 the disk span, disk size, and chain length bounds that we know to be
582 584 performant."""
583 585 if deltainfo is None:
584 586 return False
585 587
588 if (
589 revinfo.cachedelta is not None
590 and deltainfo.base == revinfo.cachedelta[0]
591 and revinfo.cachedelta[2] == DELTA_BASE_REUSE_FORCE
592 ):
593 return True
594
586 595 # - 'deltainfo.distance' is the distance from the base revision --
587 596 # bounding it limits the amount of I/O we need to do.
588 597 # - 'deltainfo.compresseddeltalen' is the sum of the total size of
@@ -655,7 +664,16 def isgooddeltainfo(revlog, deltainfo, r
655 664 LIMIT_BASE2TEXT = 500
656 665
657 666
658 def _candidategroups(revlog, textlen, p1, p2, cachedelta):
667 def _candidategroups(
668 revlog,
669 textlen,
670 p1,
671 p2,
672 cachedelta,
673 excluded_bases=None,
674 target_rev=None,
675 snapshot_cache=None,
676 ):
659 677 """Provides group of revision to be tested as delta base
660 678
661 679 This top level function focus on emitting groups with unique and worthwhile
@@ -666,15 +684,31 def _candidategroups(revlog, textlen, p1
666 684 yield None
667 685 return
668 686
687 if (
688 cachedelta is not None
689 and nullrev == cachedelta[0]
690 and cachedelta[2] == DELTA_BASE_REUSE_FORCE
691 ):
692 # instruction are to forcibly do a full snapshot
693 yield None
694 return
695
669 696 deltalength = revlog.length
670 697 deltaparent = revlog.deltaparent
671 698 sparse = revlog._sparserevlog
672 699 good = None
673 700
674 701 deltas_limit = textlen * LIMIT_DELTA2TEXT
702 group_chunk_size = revlog._candidate_group_chunk_size
675 703
676 704 tested = {nullrev}
677 candidates = _refinedgroups(revlog, p1, p2, cachedelta)
705 candidates = _refinedgroups(
706 revlog,
707 p1,
708 p2,
709 cachedelta,
710 snapshot_cache=snapshot_cache,
711 )
678 712 while True:
679 713 temptative = candidates.send(good)
680 714 if temptative is None:
@@ -694,15 +728,37 def _candidategroups(revlog, textlen, p1
694 728 # filter out revision we tested already
695 729 if rev in tested:
696 730 continue
697 tested.add(rev)
731
732 if (
733 cachedelta is not None
734 and rev == cachedelta[0]
735 and cachedelta[2] == DELTA_BASE_REUSE_FORCE
736 ):
737 # instructions are to forcibly consider/use this delta base
738 group.append(rev)
739 continue
740
741 # an higher authority deamed the base unworthy (e.g. censored)
742 if excluded_bases is not None and rev in excluded_bases:
743 tested.add(rev)
744 continue
745 # We are in some recomputation cases and that rev is too high in
746 # the revlog
747 if target_rev is not None and rev >= target_rev:
748 tested.add(rev)
749 continue
698 750 # filter out delta base that will never produce good delta
699 751 if deltas_limit < revlog.length(rev):
752 tested.add(rev)
700 753 continue
701 754 if sparse and revlog.rawsize(rev) < (textlen // LIMIT_BASE2TEXT):
755 tested.add(rev)
702 756 continue
703 757 # no delta for rawtext-changing revs (see "candelta" for why)
704 758 if revlog.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS:
759 tested.add(rev)
705 760 continue
761
706 762 # If we reach here, we are about to build and test a delta.
707 763 # The delta building process will compute the chaininfo in all
708 764 # case, since that computation is cached, it is fine to access it
@@ -710,9 +766,11 def _candidategroups(revlog, textlen, p1
710 766 chainlen, chainsize = revlog._chaininfo(rev)
711 767 # if chain will be too long, skip base
712 768 if revlog._maxchainlen and chainlen >= revlog._maxchainlen:
769 tested.add(rev)
713 770 continue
714 771 # if chain already have too much data, skip base
715 772 if deltas_limit < chainsize:
773 tested.add(rev)
716 774 continue
717 775 if sparse and revlog.upperboundcomp is not None:
718 776 maxcomp = revlog.upperboundcomp
@@ -731,36 +789,46 def _candidategroups(revlog, textlen, p1
731 789 snapshotlimit = textlen >> snapshotdepth
732 790 if snapshotlimit < lowestrealisticdeltalen:
733 791 # delta lower bound is larger than accepted upper bound
792 tested.add(rev)
734 793 continue
735 794
736 795 # check the relative constraint on the delta size
737 796 revlength = revlog.length(rev)
738 797 if revlength < lowestrealisticdeltalen:
739 798 # delta probable lower bound is larger than target base
799 tested.add(rev)
740 800 continue
741 801
742 802 group.append(rev)
743 803 if group:
744 # XXX: in the sparse revlog case, group can become large,
745 # impacting performances. Some bounding or slicing mecanism
746 # would help to reduce this impact.
747 good = yield tuple(group)
804 # When the size of the candidate group is big, it can result in a
805 # quite significant performance impact. To reduce this, we can send
806 # them in smaller batches until the new batch does not provide any
807 # improvements.
808 #
809 # This might reduce the overall efficiency of the compression in
810 # some corner cases, but that should also prevent very pathological
811 # cases from being an issue. (eg. 20 000 candidates).
812 #
813 # XXX note that the ordering of the group becomes important as it
814 # now impacts the final result. The current order is unprocessed
815 # and can be improved.
816 if group_chunk_size == 0:
817 tested.update(group)
818 good = yield tuple(group)
819 else:
820 prev_good = good
821 for start in range(0, len(group), group_chunk_size):
822 sub_group = group[start : start + group_chunk_size]
823 tested.update(sub_group)
824 good = yield tuple(sub_group)
825 if prev_good == good:
826 break
827
748 828 yield None
749 829
750 830
751 def _findsnapshots(revlog, cache, start_rev):
752 """find snapshot from start_rev to tip"""
753 if util.safehasattr(revlog.index, b'findsnapshots'):
754 revlog.index.findsnapshots(cache, start_rev)
755 else:
756 deltaparent = revlog.deltaparent
757 issnapshot = revlog.issnapshot
758 for rev in revlog.revs(start_rev):
759 if issnapshot(rev):
760 cache[deltaparent(rev)].append(rev)
761
762
763 def _refinedgroups(revlog, p1, p2, cachedelta):
831 def _refinedgroups(revlog, p1, p2, cachedelta, snapshot_cache=None):
764 832 good = None
765 833 # First we try to reuse a the delta contained in the bundle.
766 834 # (or from the source revlog)
@@ -768,15 +836,28 def _refinedgroups(revlog, p1, p2, cache
768 836 # This logic only applies to general delta repositories and can be disabled
769 837 # through configuration. Disabling reuse source delta is useful when
770 838 # we want to make sure we recomputed "optimal" deltas.
771 if cachedelta and revlog._generaldelta and revlog._lazydeltabase:
839 debug_info = None
840 if cachedelta is not None and cachedelta[2] > DELTA_BASE_REUSE_NO:
772 841 # Assume what we received from the server is a good choice
773 842 # build delta will reuse the cache
843 if debug_info is not None:
844 debug_info['cached-delta.tested'] += 1
774 845 good = yield (cachedelta[0],)
775 846 if good is not None:
847 if debug_info is not None:
848 debug_info['cached-delta.accepted'] += 1
776 849 yield None
777 850 return
778 snapshots = collections.defaultdict(list)
779 for candidates in _rawgroups(revlog, p1, p2, cachedelta, snapshots):
851 if snapshot_cache is None:
852 snapshot_cache = SnapshotCache()
853 groups = _rawgroups(
854 revlog,
855 p1,
856 p2,
857 cachedelta,
858 snapshot_cache,
859 )
860 for candidates in groups:
780 861 good = yield candidates
781 862 if good is not None:
782 863 break
@@ -797,19 +878,22 def _refinedgroups(revlog, p1, p2, cache
797 878 break
798 879 good = yield (base,)
799 880 # refine snapshot up
800 if not snapshots:
801 _findsnapshots(revlog, snapshots, good + 1)
881 if not snapshot_cache.snapshots:
882 snapshot_cache.update(revlog, good + 1)
802 883 previous = None
803 884 while good != previous:
804 885 previous = good
805 children = tuple(sorted(c for c in snapshots[good]))
886 children = tuple(sorted(c for c in snapshot_cache.snapshots[good]))
806 887 good = yield children
807 888
808 # we have found nothing
889 if debug_info is not None:
890 if good is None:
891 debug_info['no-solution'] += 1
892
809 893 yield None
810 894
811 895
812 def _rawgroups(revlog, p1, p2, cachedelta, snapshots=None):
896 def _rawgroups(revlog, p1, p2, cachedelta, snapshot_cache=None):
813 897 """Provides group of revision to be tested as delta base
814 898
815 899 This lower level function focus on emitting delta theorically interresting
@@ -840,9 +924,9 def _rawgroups(revlog, p1, p2, cachedelt
840 924 yield parents
841 925
842 926 if sparse and parents:
843 if snapshots is None:
844 # map: base-rev: snapshot-rev
845 snapshots = collections.defaultdict(list)
927 if snapshot_cache is None:
928 # map: base-rev: [snapshot-revs]
929 snapshot_cache = SnapshotCache()
846 930 # See if we can use an existing snapshot in the parent chains to use as
847 931 # a base for a new intermediate-snapshot
848 932 #
@@ -856,7 +940,7 def _rawgroups(revlog, p1, p2, cachedelt
856 940 break
857 941 parents_snaps[idx].add(s)
858 942 snapfloor = min(parents_snaps[0]) + 1
859 _findsnapshots(revlog, snapshots, snapfloor)
943 snapshot_cache.update(revlog, snapfloor)
860 944 # search for the highest "unrelated" revision
861 945 #
862 946 # Adding snapshots used by "unrelated" revision increase the odd we
@@ -879,14 +963,14 def _rawgroups(revlog, p1, p2, cachedelt
879 963 # chain.
880 964 max_depth = max(parents_snaps.keys())
881 965 chain = deltachain(other)
882 for idx, s in enumerate(chain):
966 for depth, s in enumerate(chain):
883 967 if s < snapfloor:
884 968 continue
885 if max_depth < idx:
969 if max_depth < depth:
886 970 break
887 971 if not revlog.issnapshot(s):
888 972 break
889 parents_snaps[idx].add(s)
973 parents_snaps[depth].add(s)
890 974 # Test them as possible intermediate snapshot base
891 975 # We test them from highest to lowest level. High level one are more
892 976 # likely to result in small delta
@@ -894,7 +978,7 def _rawgroups(revlog, p1, p2, cachedelt
894 978 for idx, snaps in sorted(parents_snaps.items(), reverse=True):
895 979 siblings = set()
896 980 for s in snaps:
897 siblings.update(snapshots[s])
981 siblings.update(snapshot_cache.snapshots[s])
898 982 # Before considering making a new intermediate snapshot, we check
899 983 # if an existing snapshot, children of base we consider, would be
900 984 # suitable.
@@ -922,7 +1006,8 def _rawgroups(revlog, p1, p2, cachedelt
922 1006 # revisions instead of starting our own. Without such re-use,
923 1007 # topological branches would keep reopening new full chains. Creating
924 1008 # more and more snapshot as the repository grow.
925 yield tuple(snapshots[nullrev])
1009 full = [r for r in snapshot_cache.snapshots[nullrev] if snapfloor <= r]
1010 yield tuple(sorted(full))
926 1011
927 1012 if not sparse:
928 1013 # other approach failed try against prev to hopefully save us a
@@ -930,11 +1015,74 def _rawgroups(revlog, p1, p2, cachedelt
930 1015 yield (prev,)
931 1016
932 1017
1018 class SnapshotCache:
1019 __slots__ = ('snapshots', '_start_rev', '_end_rev')
1020
1021 def __init__(self):
1022 self.snapshots = collections.defaultdict(set)
1023 self._start_rev = None
1024 self._end_rev = None
1025
1026 def update(self, revlog, start_rev=0):
1027 """find snapshots from start_rev to tip"""
1028 nb_revs = len(revlog)
1029 end_rev = nb_revs - 1
1030 if start_rev > end_rev:
1031 return # range is empty
1032
1033 if self._start_rev is None:
1034 assert self._end_rev is None
1035 self._update(revlog, start_rev, end_rev)
1036 elif not (self._start_rev <= start_rev and end_rev <= self._end_rev):
1037 if start_rev < self._start_rev:
1038 self._update(revlog, start_rev, self._start_rev - 1)
1039 if self._end_rev < end_rev:
1040 self._update(revlog, self._end_rev + 1, end_rev)
1041
1042 if self._start_rev is None:
1043 assert self._end_rev is None
1044 self._end_rev = end_rev
1045 self._start_rev = start_rev
1046 else:
1047 self._start_rev = min(self._start_rev, start_rev)
1048 self._end_rev = max(self._end_rev, end_rev)
1049 assert self._start_rev <= self._end_rev, (
1050 self._start_rev,
1051 self._end_rev,
1052 )
1053
1054 def _update(self, revlog, start_rev, end_rev):
1055 """internal method that actually do update content"""
1056 assert self._start_rev is None or (
1057 start_rev < self._start_rev or start_rev > self._end_rev
1058 ), (self._start_rev, self._end_rev, start_rev, end_rev)
1059 assert self._start_rev is None or (
1060 end_rev < self._start_rev or end_rev > self._end_rev
1061 ), (self._start_rev, self._end_rev, start_rev, end_rev)
1062 cache = self.snapshots
1063 if util.safehasattr(revlog.index, b'findsnapshots'):
1064 revlog.index.findsnapshots(cache, start_rev, end_rev)
1065 else:
1066 deltaparent = revlog.deltaparent
1067 issnapshot = revlog.issnapshot
1068 for rev in revlog.revs(start_rev, end_rev):
1069 if issnapshot(rev):
1070 cache[deltaparent(rev)].add(rev)
1071
1072
933 1073 class deltacomputer:
934 def __init__(self, revlog, write_debug=None, debug_search=False):
1074 def __init__(
1075 self,
1076 revlog,
1077 write_debug=None,
1078 debug_search=False,
1079 debug_info=None,
1080 ):
935 1081 self.revlog = revlog
936 1082 self._write_debug = write_debug
937 1083 self._debug_search = debug_search
1084 self._debug_info = debug_info
1085 self._snapshot_cache = SnapshotCache()
938 1086
939 1087 def buildtext(self, revinfo, fh):
940 1088 """Builds a fulltext version of a revision
@@ -998,7 +1146,7 class deltacomputer:
998 1146 snapshotdepth = len(revlog._deltachain(deltabase)[0])
999 1147 delta = None
1000 1148 if revinfo.cachedelta:
1001 cachebase, cachediff = revinfo.cachedelta
1149 cachebase = revinfo.cachedelta[0]
1002 1150 # check if the diff still apply
1003 1151 currentbase = cachebase
1004 1152 while (
@@ -1103,11 +1251,14 class deltacomputer:
1103 1251 if revinfo.flags & REVIDX_RAWTEXT_CHANGING_FLAGS:
1104 1252 return self._fullsnapshotinfo(fh, revinfo, target_rev)
1105 1253
1106 if self._write_debug is not None:
1254 gather_debug = (
1255 self._write_debug is not None or self._debug_info is not None
1256 )
1257 debug_search = self._write_debug is not None and self._debug_search
1258
1259 if gather_debug:
1107 1260 start = util.timer()
1108 1261
1109 debug_search = self._write_debug is not None and self._debug_search
1110
1111 1262 # count the number of different delta we tried (for debug purpose)
1112 1263 dbg_try_count = 0
1113 1264 # count the number of "search round" we did. (for debug purpose)
@@ -1122,7 +1273,7 class deltacomputer:
1122 1273 deltainfo = None
1123 1274 p1r, p2r = revlog.rev(p1), revlog.rev(p2)
1124 1275
1125 if self._write_debug is not None:
1276 if gather_debug:
1126 1277 if p1r != nullrev:
1127 1278 p1_chain_len = revlog._chaininfo(p1r)[0]
1128 1279 else:
@@ -1137,7 +1288,14 class deltacomputer:
1137 1288 self._write_debug(msg)
1138 1289
1139 1290 groups = _candidategroups(
1140 self.revlog, revinfo.textlen, p1r, p2r, cachedelta
1291 self.revlog,
1292 revinfo.textlen,
1293 p1r,
1294 p2r,
1295 cachedelta,
1296 excluded_bases,
1297 target_rev,
1298 snapshot_cache=self._snapshot_cache,
1141 1299 )
1142 1300 candidaterevs = next(groups)
1143 1301 while candidaterevs is not None:
@@ -1147,7 +1305,13 class deltacomputer:
1147 1305 if deltainfo is not None:
1148 1306 prev = deltainfo.base
1149 1307
1150 if p1 in candidaterevs or p2 in candidaterevs:
1308 if (
1309 cachedelta is not None
1310 and len(candidaterevs) == 1
1311 and cachedelta[0] in candidaterevs
1312 ):
1313 round_type = b"cached-delta"
1314 elif p1 in candidaterevs or p2 in candidaterevs:
1151 1315 round_type = b"parents"
1152 1316 elif prev is not None and all(c < prev for c in candidaterevs):
1153 1317 round_type = b"refine-down"
@@ -1195,16 +1359,7 class deltacomputer:
1195 1359 msg = b"DBG-DELTAS-SEARCH: base=%d\n"
1196 1360 msg %= self.revlog.deltaparent(candidaterev)
1197 1361 self._write_debug(msg)
1198 if candidaterev in excluded_bases:
1199 if debug_search:
1200 msg = b"DBG-DELTAS-SEARCH: EXCLUDED\n"
1201 self._write_debug(msg)
1202 continue
1203 if candidaterev >= target_rev:
1204 if debug_search:
1205 msg = b"DBG-DELTAS-SEARCH: TOO-HIGH\n"
1206 self._write_debug(msg)
1207 continue
1362
1208 1363 dbg_try_count += 1
1209 1364
1210 1365 if debug_search:
@@ -1216,7 +1371,7 class deltacomputer:
1216 1371 msg %= delta_end - delta_start
1217 1372 self._write_debug(msg)
1218 1373 if candidatedelta is not None:
1219 if isgooddeltainfo(self.revlog, candidatedelta, revinfo):
1374 if is_good_delta_info(self.revlog, candidatedelta, revinfo):
1220 1375 if debug_search:
1221 1376 msg = b"DBG-DELTAS-SEARCH: DELTA: length=%d (GOOD)\n"
1222 1377 msg %= candidatedelta.deltalen
@@ -1244,12 +1399,28 class deltacomputer:
1244 1399 else:
1245 1400 dbg_type = b"delta"
1246 1401
1247 if self._write_debug is not None:
1402 if gather_debug:
1248 1403 end = util.timer()
1404 if dbg_type == b'full':
1405 used_cached = (
1406 cachedelta is not None
1407 and dbg_try_rounds == 0
1408 and dbg_try_count == 0
1409 and cachedelta[0] == nullrev
1410 )
1411 else:
1412 used_cached = (
1413 cachedelta is not None
1414 and dbg_try_rounds == 1
1415 and dbg_try_count == 1
1416 and deltainfo.base == cachedelta[0]
1417 )
1249 1418 dbg = {
1250 1419 'duration': end - start,
1251 1420 'revision': target_rev,
1421 'delta-base': deltainfo.base, # pytype: disable=attribute-error
1252 1422 'search_round_count': dbg_try_rounds,
1423 'using-cached-base': used_cached,
1253 1424 'delta_try_count': dbg_try_count,
1254 1425 'type': dbg_type,
1255 1426 'p1-chain-len': p1_chain_len,
@@ -1279,31 +1450,39 class deltacomputer:
1279 1450 target_revlog += b'%s:' % target_key
1280 1451 dbg['target-revlog'] = target_revlog
1281 1452
1282 msg = (
1283 b"DBG-DELTAS:"
1284 b" %-12s"
1285 b" rev=%d:"
1286 b" search-rounds=%d"
1287 b" try-count=%d"
1288 b" - delta-type=%-6s"
1289 b" snap-depth=%d"
1290 b" - p1-chain-length=%d"
1291 b" p2-chain-length=%d"
1292 b" - duration=%f"
1293 b"\n"
1294 )
1295 msg %= (
1296 dbg["target-revlog"],
1297 dbg["revision"],
1298 dbg["search_round_count"],
1299 dbg["delta_try_count"],
1300 dbg["type"],
1301 dbg["snapshot-depth"],
1302 dbg["p1-chain-len"],
1303 dbg["p2-chain-len"],
1304 dbg["duration"],
1305 )
1306 self._write_debug(msg)
1453 if self._debug_info is not None:
1454 self._debug_info.append(dbg)
1455
1456 if self._write_debug is not None:
1457 msg = (
1458 b"DBG-DELTAS:"
1459 b" %-12s"
1460 b" rev=%d:"
1461 b" delta-base=%d"
1462 b" is-cached=%d"
1463 b" - search-rounds=%d"
1464 b" try-count=%d"
1465 b" - delta-type=%-6s"
1466 b" snap-depth=%d"
1467 b" - p1-chain-length=%d"
1468 b" p2-chain-length=%d"
1469 b" - duration=%f"
1470 b"\n"
1471 )
1472 msg %= (
1473 dbg["target-revlog"],
1474 dbg["revision"],
1475 dbg["delta-base"],
1476 dbg["using-cached-base"],
1477 dbg["search_round_count"],
1478 dbg["delta_try_count"],
1479 dbg["type"],
1480 dbg["snapshot-depth"],
1481 dbg["p1-chain-len"],
1482 dbg["p2-chain-len"],
1483 dbg["duration"],
1484 )
1485 self._write_debug(msg)
1307 1486 return deltainfo
1308 1487
1309 1488
@@ -90,7 +90,7 if stable_docket_file:
90 90 # * 8 bytes: pending size of data
91 91 # * 8 bytes: pending size of sidedata
92 92 # * 1 bytes: default compression header
93 S_HEADER = struct.Struct(constants.INDEX_HEADER_FMT + b'BBBBBBLLLLLLc')
93 S_HEADER = struct.Struct(constants.INDEX_HEADER_FMT + b'BBBBBBQQQQQQc')
94 94 # * 1 bytes: size of index uuid
95 95 # * 8 bytes: size of file
96 96 S_OLD_UID = struct.Struct('>BL')
@@ -1868,13 +1868,12 def outgoing(repo, subset, x):
1868 1868 dests = []
1869 1869 missing = set()
1870 1870 for path in urlutil.get_push_paths(repo, repo.ui, dests):
1871 dest = path.pushloc or path.loc
1872 1871 branches = path.branch, []
1873 1872
1874 1873 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1875 1874 if revs:
1876 1875 revs = [repo.lookup(rev) for rev in revs]
1877 other = hg.peer(repo, {}, dest)
1876 other = hg.peer(repo, {}, path)
1878 1877 try:
1879 1878 with repo.ui.silent():
1880 1879 outgoing = discovery.findcommonoutgoing(
@@ -2130,11 +2129,9 def remote(repo, subset, x):
2130 2129 dest = getstring(l[1], _(b"remote requires a repository path"))
2131 2130 if not dest:
2132 2131 dest = b'default'
2133 dest, branches = urlutil.get_unique_pull_path(
2134 b'remote', repo, repo.ui, dest
2135 )
2136
2137 other = hg.peer(repo, {}, dest)
2132 path = urlutil.get_unique_pull_path_obj(b'remote', repo.ui, dest)
2133
2134 other = hg.peer(repo, {}, path)
2138 2135 n = other.lookup(q)
2139 2136 if n in repo:
2140 2137 r = repo[n].rev()
@@ -4,6 +4,11 import fcntl
4 4 import os
5 5 import sys
6 6
7 from typing import (
8 List,
9 Tuple,
10 )
11
7 12 from .pycompat import getattr
8 13 from . import (
9 14 encoding,
@@ -11,6 +16,9 from . import (
11 16 util,
12 17 )
13 18
19 if pycompat.TYPE_CHECKING:
20 from . import ui as uimod
21
14 22 # BSD 'more' escapes ANSI color sequences by default. This can be disabled by
15 23 # $MORE variable, but there's no compatible option with Linux 'more'. Given
16 24 # OS X is widely used and most modern Unix systems would have 'less', setting
@@ -18,7 +26,7 from . import (
18 26 fallbackpager = b'less'
19 27
20 28
21 def _rcfiles(path):
29 def _rcfiles(path: bytes) -> List[bytes]:
22 30 rcs = [os.path.join(path, b'hgrc')]
23 31 rcdir = os.path.join(path, b'hgrc.d')
24 32 try:
@@ -34,7 +42,7 def _rcfiles(path):
34 42 return rcs
35 43
36 44
37 def systemrcpath():
45 def systemrcpath() -> List[bytes]:
38 46 path = []
39 47 if pycompat.sysplatform == b'plan9':
40 48 root = b'lib/mercurial'
@@ -49,7 +57,7 def systemrcpath():
49 57 return path
50 58
51 59
52 def userrcpath():
60 def userrcpath() -> List[bytes]:
53 61 if pycompat.sysplatform == b'plan9':
54 62 return [encoding.environ[b'home'] + b'/lib/hgrc']
55 63 elif pycompat.isdarwin:
@@ -65,7 +73,7 def userrcpath():
65 73 ]
66 74
67 75
68 def termsize(ui):
76 def termsize(ui: "uimod.ui") -> Tuple[int, int]:
69 77 try:
70 78 import termios
71 79
@@ -88,7 +96,7 def termsize(ui):
88 96 except ValueError:
89 97 pass
90 98 except IOError as e:
91 if e[0] == errno.EINVAL: # pytype: disable=unsupported-operands
99 if e.errno == errno.EINVAL:
92 100 pass
93 101 else:
94 102 raise
@@ -1219,7 +1219,7 def cleanupnodes(
1219 1219 )
1220 1220
1221 1221
1222 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1222 def addremove(repo, matcher, prefix, uipathfn, opts=None, open_tr=None):
1223 1223 if opts is None:
1224 1224 opts = {}
1225 1225 m = matcher
@@ -1279,7 +1279,9 def addremove(repo, matcher, prefix, uip
1279 1279 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1280 1280 )
1281 1281
1282 if not dry_run:
1282 if not dry_run and (unknown or forgotten or deleted or renames):
1283 if open_tr is not None:
1284 open_tr()
1283 1285 _markchanges(repo, unknown + forgotten, deleted, renames)
1284 1286
1285 1287 for f in rejected:
@@ -1863,7 +1865,12 def gdinitconfig(ui):
1863 1865
1864 1866
1865 1867 def gddeltaconfig(ui):
1866 """helper function to know if incoming delta should be optimised"""
1868 """helper function to know if incoming deltas should be optimized
1869
1870 The `format.generaldelta` config is an old form of the config that also
1871 implies that incoming delta-bases should be never be trusted. This function
1872 exists for this purpose.
1873 """
1867 1874 # experimental config: format.generaldelta
1868 1875 return ui.configbool(b'format', b'generaldelta')
1869 1876
@@ -1,4 +1,10
1 1 import os
2 import winreg # pytype: disable=import-error
3
4 from typing import (
5 List,
6 Tuple,
7 )
2 8
3 9 from . import (
4 10 encoding,
@@ -7,19 +13,14 from . import (
7 13 win32,
8 14 )
9 15
10 try:
11 import _winreg as winreg # pytype: disable=import-error
12
13 winreg.CloseKey
14 except ImportError:
15 # py2 only
16 import winreg # pytype: disable=import-error
16 if pycompat.TYPE_CHECKING:
17 from . import ui as uimod
17 18
18 19 # MS-DOS 'more' is the only pager available by default on Windows.
19 20 fallbackpager = b'more'
20 21
21 22
22 def systemrcpath():
23 def systemrcpath() -> List[bytes]:
23 24 '''return default os-specific hgrc search path'''
24 25 rcpath = []
25 26 filename = win32.executablepath()
@@ -27,7 +28,7 def systemrcpath():
27 28 progrc = os.path.join(os.path.dirname(filename), b'mercurial.ini')
28 29 rcpath.append(progrc)
29 30
30 def _processdir(progrcd):
31 def _processdir(progrcd: bytes) -> None:
31 32 if os.path.isdir(progrcd):
32 33 for f, kind in sorted(util.listdir(progrcd)):
33 34 if f.endswith(b'.rc'):
@@ -68,7 +69,7 def systemrcpath():
68 69 return rcpath
69 70
70 71
71 def userrcpath():
72 def userrcpath() -> List[bytes]:
72 73 '''return os-specific hgrc search path to the user dir'''
73 74 home = _legacy_expanduser(b'~')
74 75 path = [os.path.join(home, b'mercurial.ini'), os.path.join(home, b'.hgrc')]
@@ -79,7 +80,7 def userrcpath():
79 80 return path
80 81
81 82
82 def _legacy_expanduser(path):
83 def _legacy_expanduser(path: bytes) -> bytes:
83 84 """Expand ~ and ~user constructs in the pre 3.8 style"""
84 85
85 86 # Python 3.8+ changed the expansion of '~' from HOME to USERPROFILE. See
@@ -111,5 +112,5 def _legacy_expanduser(path):
111 112 return userhome + path[i:]
112 113
113 114
114 def termsize(ui):
115 def termsize(ui: "uimod.ui") -> Tuple[int, int]:
115 116 return win32.termsize()
@@ -247,6 +247,14 class Shelf:
247 247 for ext in shelvefileextensions:
248 248 self.vfs.tryunlink(self.name + b'.' + ext)
249 249
250 def changed_files(self, ui, repo):
251 try:
252 ctx = repo.unfiltered()[self.readinfo()[b'node']]
253 return ctx.files()
254 except (FileNotFoundError, error.RepoLookupError):
255 filename = self.vfs.join(self.name + b'.patch')
256 return patch.changedfiles(ui, repo, filename)
257
250 258
251 259 def _optimized_match(repo, node):
252 260 """
@@ -424,10 +432,26 def _restoreactivebookmark(repo, mark):
424 432
425 433 def _aborttransaction(repo, tr):
426 434 """Abort current transaction for shelve/unshelve, but keep dirstate"""
427 dirstatebackupname = b'dirstate.shelve'
428 repo.dirstate.savebackup(None, dirstatebackupname)
429 tr.abort()
430 repo.dirstate.restorebackup(None, dirstatebackupname)
435 # disable the transaction invalidation of the dirstate, to preserve the
436 # current change in memory.
437 ds = repo.dirstate
438 # The assert below check that nobody else did such wrapping.
439 #
440 # These is not such other wrapping currently, but if someone try to
441 # implement one in the future, this will explicitly break here instead of
442 # misbehaving in subtle ways.
443 assert 'invalidate' not in vars(ds)
444 try:
445 # note : we could simply disable the transaction abort callback, but
446 # other code also tries to rollback and invalidate this.
447 ds.invalidate = lambda: None
448 tr.abort()
449 finally:
450 del ds.invalidate
451 # manually write the change in memory since we can no longer rely on the
452 # transaction to do so.
453 assert repo.currenttransaction() is None
454 repo.dirstate.write(None)
431 455
432 456
433 457 def getshelvename(repo, parent, opts):
@@ -599,7 +623,8 def _docreatecmd(ui, repo, pats, opts):
599 623 activebookmark = _backupactivebookmark(repo)
600 624 extra = {b'internal': b'shelve'}
601 625 if includeunknown:
602 _includeunknownfiles(repo, pats, opts, extra)
626 with repo.dirstate.changing_files(repo):
627 _includeunknownfiles(repo, pats, opts, extra)
603 628
604 629 if _iswctxonnewbranch(repo) and not _isbareshelve(pats, opts):
605 630 # In non-bare shelve we don't store newly created branch
@@ -629,7 +654,7 def _docreatecmd(ui, repo, pats, opts):
629 654
630 655 ui.status(_(b'shelved as %s\n') % name)
631 656 if opts[b'keep']:
632 with repo.dirstate.parentchange():
657 with repo.dirstate.changing_parents(repo):
633 658 scmutil.movedirstate(repo, parent, match)
634 659 else:
635 660 hg.update(repo, parent.node())
@@ -854,18 +879,18 def unshelvecontinue(ui, repo, state, op
854 879 shelvectx = repo[state.parents[1]]
855 880 pendingctx = state.pendingctx
856 881
857 with repo.dirstate.parentchange():
882 with repo.dirstate.changing_parents(repo):
858 883 repo.setparents(state.pendingctx.node(), repo.nullid)
859 884 repo.dirstate.write(repo.currenttransaction())
860 885
861 886 targetphase = _target_phase(repo)
862 887 overrides = {(b'phases', b'new-commit'): targetphase}
863 888 with repo.ui.configoverride(overrides, b'unshelve'):
864 with repo.dirstate.parentchange():
889 with repo.dirstate.changing_parents(repo):
865 890 repo.setparents(state.parents[0], repo.nullid)
866 newnode, ispartialunshelve = _createunshelvectx(
867 ui, repo, shelvectx, basename, interactive, opts
868 )
891 newnode, ispartialunshelve = _createunshelvectx(
892 ui, repo, shelvectx, basename, interactive, opts
893 )
869 894
870 895 if newnode is None:
871 896 shelvectx = state.pendingctx
@@ -1060,11 +1085,11 def _rebaserestoredcommit(
1060 1085 )
1061 1086 raise error.ConflictResolutionRequired(b'unshelve')
1062 1087
1063 with repo.dirstate.parentchange():
1088 with repo.dirstate.changing_parents(repo):
1064 1089 repo.setparents(tmpwctx.node(), repo.nullid)
1065 newnode, ispartialunshelve = _createunshelvectx(
1066 ui, repo, shelvectx, basename, interactive, opts
1067 )
1090 newnode, ispartialunshelve = _createunshelvectx(
1091 ui, repo, shelvectx, basename, interactive, opts
1092 )
1068 1093
1069 1094 if newnode is None:
1070 1095 shelvectx = tmpwctx
@@ -1210,7 +1235,8 def _dounshelve(ui, repo, basename, opts
1210 1235 restorebranch(ui, repo, branchtorestore)
1211 1236 shelvedstate.clear(repo)
1212 1237 _finishunshelve(repo, oldtiprev, tr, activebookmark)
1213 _forgetunknownfiles(repo, shelvectx, addedbefore)
1238 with repo.dirstate.changing_files(repo):
1239 _forgetunknownfiles(repo, shelvectx, addedbefore)
1214 1240 if not ispartialunshelve:
1215 1241 unshelvecleanup(ui, repo, basename, opts)
1216 1242 finally:
@@ -512,6 +512,8 def simplemerge(
512 512 conflicts = False
513 513 if mode == b'union':
514 514 lines = _resolve(m3, (1, 2))
515 elif mode == b'union-other-first':
516 lines = _resolve(m3, (2, 1))
515 517 elif mode == b'local':
516 518 lines = _resolve(m3, (1,))
517 519 elif mode == b'other':
@@ -451,7 +451,7 def filterupdatesactions(repo, wctx, mct
451 451 message,
452 452 )
453 453
454 with repo.dirstate.parentchange():
454 with repo.dirstate.changing_parents(repo):
455 455 mergemod.applyupdates(
456 456 repo,
457 457 tmresult,
@@ -655,7 +655,7 def clearrules(repo, force=False):
655 655 The remaining sparse config only has profiles, if defined. The working
656 656 directory is refreshed, as needed.
657 657 """
658 with repo.wlock(), repo.dirstate.parentchange():
658 with repo.wlock(), repo.dirstate.changing_parents(repo):
659 659 raw = repo.vfs.tryread(b'sparse')
660 660 includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse')
661 661
@@ -671,7 +671,7 def importfromfiles(repo, opts, paths, f
671 671 The updated sparse config is written out and the working directory
672 672 is refreshed, as needed.
673 673 """
674 with repo.wlock(), repo.dirstate.parentchange():
674 with repo.wlock(), repo.dirstate.changing_parents(repo):
675 675 # read current configuration
676 676 raw = repo.vfs.tryread(b'sparse')
677 677 includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse')
@@ -730,7 +730,7 def updateconfig(
730 730
731 731 The new config is written out and a working directory refresh is performed.
732 732 """
733 with repo.wlock(), repo.lock(), repo.dirstate.parentchange():
733 with repo.wlock(), repo.lock(), repo.dirstate.changing_parents(repo):
734 734 raw = repo.vfs.tryread(b'sparse')
735 735 oldinclude, oldexclude, oldprofiles = parseconfig(
736 736 repo.ui, raw, b'sparse'
@@ -372,7 +372,7 def _performhandshake(ui, stdin, stdout,
372 372
373 373 class sshv1peer(wireprotov1peer.wirepeer):
374 374 def __init__(
375 self, ui, url, proc, stdin, stdout, stderr, caps, autoreadstderr=True
375 self, ui, path, proc, stdin, stdout, stderr, caps, autoreadstderr=True
376 376 ):
377 377 """Create a peer from an existing SSH connection.
378 378
@@ -383,8 +383,7 class sshv1peer(wireprotov1peer.wirepeer
383 383 ``autoreadstderr`` denotes whether to automatically read from
384 384 stderr and to forward its output.
385 385 """
386 self._url = url
387 self.ui = ui
386 super().__init__(ui, path=path)
388 387 # self._subprocess is unused. Keeping a handle on the process
389 388 # holds a reference and prevents it from being garbage collected.
390 389 self._subprocess = proc
@@ -411,14 +410,11 class sshv1peer(wireprotov1peer.wirepeer
411 410 # Begin of ipeerconnection interface.
412 411
413 412 def url(self):
414 return self._url
413 return self.path.loc
415 414
416 415 def local(self):
417 416 return None
418 417
419 def peer(self):
420 return self
421
422 418 def canpush(self):
423 419 return True
424 420
@@ -610,16 +606,16 def makepeer(ui, path, proc, stdin, stdo
610 606 )
611 607
612 608
613 def instance(ui, path, create, intents=None, createopts=None):
609 def make_peer(ui, path, create, intents=None, createopts=None):
614 610 """Create an SSH peer.
615 611
616 612 The returned object conforms to the ``wireprotov1peer.wirepeer`` interface.
617 613 """
618 u = urlutil.url(path, parsequery=False, parsefragment=False)
614 u = urlutil.url(path.loc, parsequery=False, parsefragment=False)
619 615 if u.scheme != b'ssh' or not u.host or u.path is None:
620 616 raise error.RepoError(_(b"couldn't parse location %s") % path)
621 617
622 urlutil.checksafessh(path)
618 urlutil.checksafessh(path.loc)
623 619
624 620 if u.passwd is not None:
625 621 raise error.RepoError(_(b'password in URL not supported'))
@@ -225,6 +225,7 class statichttprepository(
225 225 self.encodepats = None
226 226 self.decodepats = None
227 227 self._transref = None
228 self._dirstate = None
228 229
229 230 def _restrictcapabilities(self, caps):
230 231 caps = super(statichttprepository, self)._restrictcapabilities(caps)
@@ -236,8 +237,8 class statichttprepository(
236 237 def local(self):
237 238 return False
238 239
239 def peer(self):
240 return statichttppeer(self)
240 def peer(self, path=None):
241 return statichttppeer(self, path=path)
241 242
242 243 def wlock(self, wait=True):
243 244 raise error.LockUnavailable(
@@ -259,7 +260,8 class statichttprepository(
259 260 pass # statichttprepository are read only
260 261
261 262
262 def instance(ui, path, create, intents=None, createopts=None):
263 def make_peer(ui, path, create, intents=None, createopts=None):
263 264 if create:
264 265 raise error.Abort(_(b'cannot create new static-http repository'))
265 return statichttprepository(ui, path[7:])
266 url = path.loc[7:]
267 return statichttprepository(ui, url).peer(path=path)
@@ -1049,7 +1049,7 def main(argv=None):
1049 1049 # process options
1050 1050 try:
1051 1051 opts, args = pycompat.getoptb(
1052 sys.argv[optstart:],
1052 pycompat.sysargv[optstart:],
1053 1053 b"hl:f:o:p:",
1054 1054 [b"help", b"limit=", b"file=", b"output-file=", b"script-path="],
1055 1055 )
@@ -241,31 +241,32 def debugstrip(ui, repo, *revs, **opts):
241 241
242 242 revs = sorted(rootnodes)
243 243 if update and opts.get(b'keep'):
244 urev = _findupdatetarget(repo, revs)
245 uctx = repo[urev]
244 with repo.dirstate.changing_parents(repo):
245 urev = _findupdatetarget(repo, revs)
246 uctx = repo[urev]
246 247
247 # only reset the dirstate for files that would actually change
248 # between the working context and uctx
249 descendantrevs = repo.revs(b"only(., %d)", uctx.rev())
250 changedfiles = []
251 for rev in descendantrevs:
252 # blindly reset the files, regardless of what actually changed
253 changedfiles.extend(repo[rev].files())
248 # only reset the dirstate for files that would actually change
249 # between the working context and uctx
250 descendantrevs = repo.revs(b"only(., %d)", uctx.rev())
251 changedfiles = []
252 for rev in descendantrevs:
253 # blindly reset the files, regardless of what actually changed
254 changedfiles.extend(repo[rev].files())
254 255
255 # reset files that only changed in the dirstate too
256 dirstate = repo.dirstate
257 dirchanges = [
258 f for f in dirstate if not dirstate.get_entry(f).maybe_clean
259 ]
260 changedfiles.extend(dirchanges)
256 # reset files that only changed in the dirstate too
257 dirstate = repo.dirstate
258 dirchanges = [
259 f for f in dirstate if not dirstate.get_entry(f).maybe_clean
260 ]
261 changedfiles.extend(dirchanges)
261 262
262 repo.dirstate.rebuild(urev, uctx.manifest(), changedfiles)
263 repo.dirstate.write(repo.currenttransaction())
263 repo.dirstate.rebuild(urev, uctx.manifest(), changedfiles)
264 repo.dirstate.write(repo.currenttransaction())
264 265
265 # clear resolve state
266 mergestatemod.mergestate.clean(repo)
266 # clear resolve state
267 mergestatemod.mergestate.clean(repo)
267 268
268 update = False
269 update = False
269 270
270 271 strip(
271 272 ui,
@@ -569,9 +569,20 class hgsubrepo(abstractsubrepo):
569 569
570 570 @annotatesubrepoerror
571 571 def add(self, ui, match, prefix, uipathfn, explicitonly, **opts):
572 return cmdutil.add(
573 ui, self._repo, match, prefix, uipathfn, explicitonly, **opts
574 )
572 # XXX Ideally, we could let the caller take the `changing_files`
573 # context. However this is not an abstraction that make sense for
574 # other repository types, and leaking that details purely related to
575 # dirstate seems unfortunate. So for now the context will be used here.
576 with self._repo.wlock(), self._repo.dirstate.changing_files(self._repo):
577 return cmdutil.add(
578 ui,
579 self._repo,
580 match,
581 prefix,
582 uipathfn,
583 explicitonly,
584 **opts,
585 )
575 586
576 587 @annotatesubrepoerror
577 588 def addremove(self, m, prefix, uipathfn, opts):
@@ -580,7 +591,18 class hgsubrepo(abstractsubrepo):
580 591 # be used to process sibling subrepos however.
581 592 opts = copy.copy(opts)
582 593 opts[b'subrepos'] = True
583 return scmutil.addremove(self._repo, m, prefix, uipathfn, opts)
594 # XXX Ideally, we could let the caller take the `changing_files`
595 # context. However this is not an abstraction that make sense for
596 # other repository types, and leaking that details purely related to
597 # dirstate seems unfortunate. So for now the context will be used here.
598 with self._repo.wlock(), self._repo.dirstate.changing_files(self._repo):
599 return scmutil.addremove(
600 self._repo,
601 m,
602 prefix,
603 uipathfn,
604 opts,
605 )
584 606
585 607 @annotatesubrepoerror
586 608 def cat(self, match, fm, fntemplate, prefix, **opts):
@@ -621,7 +643,7 class hgsubrepo(abstractsubrepo):
621 643 match,
622 644 prefix=prefix,
623 645 listsubrepos=True,
624 **opts
646 **opts,
625 647 )
626 648 except error.RepoLookupError as inst:
627 649 self.ui.warn(
@@ -946,16 +968,21 class hgsubrepo(abstractsubrepo):
946 968
947 969 @annotatesubrepoerror
948 970 def forget(self, match, prefix, uipathfn, dryrun, interactive):
949 return cmdutil.forget(
950 self.ui,
951 self._repo,
952 match,
953 prefix,
954 uipathfn,
955 True,
956 dryrun=dryrun,
957 interactive=interactive,
958 )
971 # XXX Ideally, we could let the caller take the `changing_files`
972 # context. However this is not an abstraction that make sense for
973 # other repository types, and leaking that details purely related to
974 # dirstate seems unfortunate. So for now the context will be used here.
975 with self._repo.wlock(), self._repo.dirstate.changing_files(self._repo):
976 return cmdutil.forget(
977 self.ui,
978 self._repo,
979 match,
980 prefix,
981 uipathfn,
982 True,
983 dryrun=dryrun,
984 interactive=interactive,
985 )
959 986
960 987 @annotatesubrepoerror
961 988 def removefiles(
@@ -969,17 +996,22 class hgsubrepo(abstractsubrepo):
969 996 dryrun,
970 997 warnings,
971 998 ):
972 return cmdutil.remove(
973 self.ui,
974 self._repo,
975 matcher,
976 prefix,
977 uipathfn,
978 after,
979 force,
980 subrepos,
981 dryrun,
982 )
999 # XXX Ideally, we could let the caller take the `changing_files`
1000 # context. However this is not an abstraction that make sense for
1001 # other repository types, and leaking that details purely related to
1002 # dirstate seems unfortunate. So for now the context will be used here.
1003 with self._repo.wlock(), self._repo.dirstate.changing_files(self._repo):
1004 return cmdutil.remove(
1005 self.ui,
1006 self._repo,
1007 matcher,
1008 prefix,
1009 uipathfn,
1010 after,
1011 force,
1012 subrepos,
1013 dryrun,
1014 )
983 1015
984 1016 @annotatesubrepoerror
985 1017 def revert(self, substate, *pats, **opts):
@@ -1009,7 +1041,12 class hgsubrepo(abstractsubrepo):
1009 1041 pats = [b'set:modified()']
1010 1042 else:
1011 1043 pats = []
1012 cmdutil.revert(self.ui, self._repo, ctx, *pats, **opts)
1044 # XXX Ideally, we could let the caller take the `changing_files`
1045 # context. However this is not an abstraction that make sense for
1046 # other repository types, and leaking that details purely related to
1047 # dirstate seems unfortunate. So for now the context will be used here.
1048 with self._repo.wlock(), self._repo.dirstate.changing_files(self._repo):
1049 cmdutil.revert(self.ui, self._repo, ctx, *pats, **opts)
1013 1050
1014 1051 def shortid(self, revid):
1015 1052 return revid[:12]
@@ -1123,7 +1160,7 class svnsubrepo(abstractsubrepo):
1123 1160 stdout=subprocess.PIPE,
1124 1161 stderr=subprocess.PIPE,
1125 1162 env=procutil.tonativeenv(env),
1126 **extrakw
1163 **extrakw,
1127 1164 )
1128 1165 stdout, stderr = map(util.fromnativeeol, p.communicate())
1129 1166 stderr = stderr.strip()
@@ -1488,7 +1525,7 class gitsubrepo(abstractsubrepo):
1488 1525 close_fds=procutil.closefds,
1489 1526 stdout=subprocess.PIPE,
1490 1527 stderr=errpipe,
1491 **extrakw
1528 **extrakw,
1492 1529 )
1493 1530 if stream:
1494 1531 return p.stdout, None
@@ -664,8 +664,9 def _tag(
664 664
665 665 repo.invalidatecaches()
666 666
667 if b'.hgtags' not in repo.dirstate:
668 repo[None].add([b'.hgtags'])
667 with repo.dirstate.changing_files(repo):
668 if b'.hgtags' not in repo.dirstate:
669 repo[None].add([b'.hgtags'])
669 670
670 671 m = matchmod.exact([b'.hgtags'])
671 672 tagnode = repo.commit(
@@ -177,10 +177,17 def tokenize(program, start, end, term=N
177 177 quote = program[pos : pos + 2]
178 178 s = pos = pos + 2
179 179 while pos < end: # find closing escaped quote
180 # pycompat.bytestr (and bytes) both have .startswith() that
181 # takes an optional start and an optional end, but pytype thinks
182 # it only takes 2 args.
183
184 # pytype: disable=wrong-arg-count
180 185 if program.startswith(b'\\\\\\', pos, end):
181 186 pos += 4 # skip over double escaped characters
182 187 continue
183 188 if program.startswith(quote, pos, end):
189 # pytype: enable=wrong-arg-count
190
184 191 # interpret as if it were a part of an outer string
185 192 data = parser.unescapestr(program[s:pos])
186 193 if token == b'template':
@@ -300,7 +307,14 def _scantemplate(tmpl, start, stop, quo
300 307 return
301 308
302 309 parseres, pos = p.parse(tokenize(tmpl, n + 1, stop, b'}'))
310
311 # pycompat.bytestr (and bytes) both have .startswith() that
312 # takes an optional start and an optional end, but pytype thinks
313 # it only takes 2 args.
314
315 # pytype: disable=wrong-arg-count
303 316 if not tmpl.startswith(b'}', pos):
317 # pytype: enable=wrong-arg-count
304 318 raise error.ParseError(_(b"invalid token"), pos)
305 319 yield (b'template', parseres, n)
306 320 pos += 1
@@ -1,6 +1,6
1 1 The MIT License (MIT)
2 2
3 Copyright (c) 2015 Hynek Schlawack
3 Copyright (c) 2015 Hynek Schlawack and the attrs contributors
4 4
5 5 Permission is hereby granted, free of charge, to any person obtaining a copy
6 6 of this software and associated documentation files (the "Software"), to deal
@@ -1,37 +1,35
1 from __future__ import absolute_import, division, print_function
1 # SPDX-License-Identifier: MIT
2
3
4 import sys
5
6 from functools import partial
2 7
3 from ._funcs import (
4 asdict,
5 assoc,
6 astuple,
7 evolve,
8 has,
9 )
8 from . import converters, exceptions, filters, setters, validators
9 from ._cmp import cmp_using
10 from ._config import get_run_validators, set_run_validators
11 from ._funcs import asdict, assoc, astuple, evolve, has, resolve_types
10 12 from ._make import (
13 NOTHING,
11 14 Attribute,
12 15 Factory,
13 NOTHING,
14 attr,
15 attributes,
16 attrib,
17 attrs,
16 18 fields,
19 fields_dict,
17 20 make_class,
18 21 validate,
19 22 )
20 from ._config import (
21 get_run_validators,
22 set_run_validators,
23 )
24 from . import exceptions
25 from . import filters
26 from . import converters
27 from . import validators
23 from ._version_info import VersionInfo
28 24
29 25
30 __version__ = "17.2.0"
26 __version__ = "22.1.0"
27 __version_info__ = VersionInfo._from_version_string(__version__)
31 28
32 29 __title__ = "attrs"
33 30 __description__ = "Classes Without Boilerplate"
34 __uri__ = "http://www.attrs.org/"
31 __url__ = "https://www.attrs.org/"
32 __uri__ = __url__
35 33 __doc__ = __description__ + " <" + __uri__ + ">"
36 34
37 35 __author__ = "Hynek Schlawack"
@@ -41,8 +39,9 from . import validators
41 39 __copyright__ = "Copyright (c) 2015 Hynek Schlawack"
42 40
43 41
44 s = attrs = attributes
45 ib = attrib = attr
42 s = attributes = attrs
43 ib = attr = attrib
44 dataclass = partial(attrs, auto_attribs=True) # happy Easter ;)
46 45
47 46 __all__ = [
48 47 "Attribute",
@@ -55,17 +54,26 ib = attrib = attr
55 54 "attrib",
56 55 "attributes",
57 56 "attrs",
57 "cmp_using",
58 58 "converters",
59 59 "evolve",
60 60 "exceptions",
61 61 "fields",
62 "fields_dict",
62 63 "filters",
63 64 "get_run_validators",
64 65 "has",
65 66 "ib",
66 67 "make_class",
68 "resolve_types",
67 69 "s",
68 70 "set_run_validators",
71 "setters",
69 72 "validate",
70 73 "validators",
71 74 ]
75
76 if sys.version_info[:2] >= (3, 6):
77 from ._next_gen import define, field, frozen, mutable # noqa: F401
78
79 __all__.extend(("define", "field", "frozen", "mutable"))
@@ -1,90 +1,185
1 from __future__ import absolute_import, division, print_function
1 # SPDX-License-Identifier: MIT
2
3
4 import inspect
5 import platform
6 import sys
7 import threading
8 import types
9 import warnings
10
11 from collections.abc import Mapping, Sequence # noqa
12
13
14 PYPY = platform.python_implementation() == "PyPy"
15 PY36 = sys.version_info[:2] >= (3, 6)
16 HAS_F_STRINGS = PY36
17 PY310 = sys.version_info[:2] >= (3, 10)
2 18
3 import sys
4 import types
19
20 if PYPY or PY36:
21 ordered_dict = dict
22 else:
23 from collections import OrderedDict
24
25 ordered_dict = OrderedDict
26
27
28 def just_warn(*args, **kw):
29 warnings.warn(
30 "Running interpreter doesn't sufficiently support code object "
31 "introspection. Some features like bare super() or accessing "
32 "__class__ will not work with slotted classes.",
33 RuntimeWarning,
34 stacklevel=2,
35 )
5 36
6 37
7 PY2 = sys.version_info[0] == 2
38 class _AnnotationExtractor:
39 """
40 Extract type annotations from a callable, returning None whenever there
41 is none.
42 """
43
44 __slots__ = ["sig"]
45
46 def __init__(self, callable):
47 try:
48 self.sig = inspect.signature(callable)
49 except (ValueError, TypeError): # inspect failed
50 self.sig = None
51
52 def get_first_param_type(self):
53 """
54 Return the type annotation of the first argument if it's not empty.
55 """
56 if not self.sig:
57 return None
58
59 params = list(self.sig.parameters.values())
60 if params and params[0].annotation is not inspect.Parameter.empty:
61 return params[0].annotation
62
63 return None
64
65 def get_return_type(self):
66 """
67 Return the return type if it's not empty.
68 """
69 if (
70 self.sig
71 and self.sig.return_annotation is not inspect.Signature.empty
72 ):
73 return self.sig.return_annotation
74
75 return None
8 76
9 77
10 if PY2:
11 from UserDict import IterableUserDict
12
13 # We 'bundle' isclass instead of using inspect as importing inspect is
14 # fairly expensive (order of 10-15 ms for a modern machine in 2016)
15 def isclass(klass):
16 return isinstance(klass, (type, types.ClassType))
78 def make_set_closure_cell():
79 """Return a function of two arguments (cell, value) which sets
80 the value stored in the closure cell `cell` to `value`.
81 """
82 # pypy makes this easy. (It also supports the logic below, but
83 # why not do the easy/fast thing?)
84 if PYPY:
17 85
18 # TYPE is used in exceptions, repr(int) is different on Python 2 and 3.
19 TYPE = "type"
86 def set_closure_cell(cell, value):
87 cell.__setstate__((value,))
88
89 return set_closure_cell
20 90
21 def iteritems(d):
22 return d.iteritems()
91 # Otherwise gotta do it the hard way.
23 92
24 def iterkeys(d):
25 return d.iterkeys()
93 # Create a function that will set its first cellvar to `value`.
94 def set_first_cellvar_to(value):
95 x = value
96 return
26 97
27 # Python 2 is bereft of a read-only dict proxy, so we make one!
28 class ReadOnlyDict(IterableUserDict):
29 """
30 Best-effort read-only dict wrapper.
31 """
98 # This function will be eliminated as dead code, but
99 # not before its reference to `x` forces `x` to be
100 # represented as a closure cell rather than a local.
101 def force_x_to_be_a_cell(): # pragma: no cover
102 return x
32 103
33 def __setitem__(self, key, val):
34 # We gently pretend we're a Python 3 mappingproxy.
35 raise TypeError("'mappingproxy' object does not support item "
36 "assignment")
104 try:
105 # Extract the code object and make sure our assumptions about
106 # the closure behavior are correct.
107 co = set_first_cellvar_to.__code__
108 if co.co_cellvars != ("x",) or co.co_freevars != ():
109 raise AssertionError # pragma: no cover
37 110
38 def update(self, _):
39 # We gently pretend we're a Python 3 mappingproxy.
40 raise AttributeError("'mappingproxy' object has no attribute "
41 "'update'")
111 # Convert this code object to a code object that sets the
112 # function's first _freevar_ (not cellvar) to the argument.
113 if sys.version_info >= (3, 8):
42 114
43 def __delitem__(self, _):
44 # We gently pretend we're a Python 3 mappingproxy.
45 raise TypeError("'mappingproxy' object does not support item "
46 "deletion")
115 def set_closure_cell(cell, value):
116 cell.cell_contents = value
47 117
48 def clear(self):
49 # We gently pretend we're a Python 3 mappingproxy.
50 raise AttributeError("'mappingproxy' object has no attribute "
51 "'clear'")
52
53 def pop(self, key, default=None):
54 # We gently pretend we're a Python 3 mappingproxy.
55 raise AttributeError("'mappingproxy' object has no attribute "
56 "'pop'")
118 else:
119 args = [co.co_argcount]
120 args.append(co.co_kwonlyargcount)
121 args.extend(
122 [
123 co.co_nlocals,
124 co.co_stacksize,
125 co.co_flags,
126 co.co_code,
127 co.co_consts,
128 co.co_names,
129 co.co_varnames,
130 co.co_filename,
131 co.co_name,
132 co.co_firstlineno,
133 co.co_lnotab,
134 # These two arguments are reversed:
135 co.co_cellvars,
136 co.co_freevars,
137 ]
138 )
139 set_first_freevar_code = types.CodeType(*args)
57 140
58 def popitem(self):
59 # We gently pretend we're a Python 3 mappingproxy.
60 raise AttributeError("'mappingproxy' object has no attribute "
61 "'popitem'")
62
63 def setdefault(self, key, default=None):
64 # We gently pretend we're a Python 3 mappingproxy.
65 raise AttributeError("'mappingproxy' object has no attribute "
66 "'setdefault'")
141 def set_closure_cell(cell, value):
142 # Create a function using the set_first_freevar_code,
143 # whose first closure cell is `cell`. Calling it will
144 # change the value of that cell.
145 setter = types.FunctionType(
146 set_first_freevar_code, {}, "setter", (), (cell,)
147 )
148 # And call it to set the cell.
149 setter(value)
67 150
68 def __repr__(self):
69 # Override to be identical to the Python 3 version.
70 return "mappingproxy(" + repr(self.data) + ")"
151 # Make sure it works on this interpreter:
152 def make_func_with_cell():
153 x = None
154
155 def func():
156 return x # pragma: no cover
71 157
72 def metadata_proxy(d):
73 res = ReadOnlyDict()
74 res.data.update(d) # We blocked update, so we have to do it like this.
75 return res
158 return func
159
160 cell = make_func_with_cell().__closure__[0]
161 set_closure_cell(cell, 100)
162 if cell.cell_contents != 100:
163 raise AssertionError # pragma: no cover
76 164
77 else:
78 def isclass(klass):
79 return isinstance(klass, type)
165 except Exception:
166 return just_warn
167 else:
168 return set_closure_cell
80 169
81 TYPE = "class"
170
171 set_closure_cell = make_set_closure_cell()
82 172
83 def iteritems(d):
84 return d.items()
85
86 def iterkeys(d):
87 return d.keys()
88
89 def metadata_proxy(d):
90 return types.MappingProxyType(dict(d))
173 # Thread-local global to track attrs instances which are already being repr'd.
174 # This is needed because there is no other (thread-safe) way to pass info
175 # about the instances that are already being repr'd through the call stack
176 # in order to ensure we don't perform infinite recursion.
177 #
178 # For instance, if an instance contains a dict which contains that instance,
179 # we need to know that we're already repr'ing the outside instance from within
180 # the dict's repr() call.
181 #
182 # This lives here rather than in _make.py so that the functions in _make.py
183 # don't have a direct reference to the thread-local in their globals dict.
184 # If they have such a reference, it breaks cloudpickle.
185 repr_context = threading.local()
@@ -1,4 +1,4
1 from __future__ import absolute_import, division, print_function
1 # SPDX-License-Identifier: MIT
2 2
3 3
4 4 __all__ = ["set_run_validators", "get_run_validators"]
@@ -9,6 +9,10 from __future__ import absolute_import,
9 9 def set_run_validators(run):
10 10 """
11 11 Set whether or not validators are run. By default, they are run.
12
13 .. deprecated:: 21.3.0 It will not be removed, but it also will not be
14 moved to new ``attrs`` namespace. Use `attrs.validators.set_disabled()`
15 instead.
12 16 """
13 17 if not isinstance(run, bool):
14 18 raise TypeError("'run' must be bool.")
@@ -19,5 +23,9 def set_run_validators(run):
19 23 def get_run_validators():
20 24 """
21 25 Return whether or not validators are run.
26
27 .. deprecated:: 21.3.0 It will not be removed, but it also will not be
28 moved to new ``attrs`` namespace. Use `attrs.validators.get_disabled()`
29 instead.
22 30 """
23 31 return _run_validators
@@ -1,14 +1,20
1 from __future__ import absolute_import, division, print_function
1 # SPDX-License-Identifier: MIT
2
2 3
3 4 import copy
4 5
5 from ._compat import iteritems
6 from ._make import NOTHING, fields, _obj_setattr
6 from ._make import NOTHING, _obj_setattr, fields
7 7 from .exceptions import AttrsAttributeNotFoundError
8 8
9 9
10 def asdict(inst, recurse=True, filter=None, dict_factory=dict,
11 retain_collection_types=False):
10 def asdict(
11 inst,
12 recurse=True,
13 filter=None,
14 dict_factory=dict,
15 retain_collection_types=False,
16 value_serializer=None,
17 ):
12 18 """
13 19 Return the ``attrs`` attribute values of *inst* as a dict.
14 20
@@ -17,9 +23,9 def asdict(inst, recurse=True, filter=No
17 23 :param inst: Instance of an ``attrs``-decorated class.
18 24 :param bool recurse: Recurse into classes that are also
19 25 ``attrs``-decorated.
20 :param callable filter: A callable whose return code deteremines whether an
26 :param callable filter: A callable whose return code determines whether an
21 27 attribute or element is included (``True``) or dropped (``False``). Is
22 called with the :class:`attr.Attribute` as the first argument and the
28 called with the `attrs.Attribute` as the first argument and the
23 29 value as the second argument.
24 30 :param callable dict_factory: A callable to produce dictionaries from. For
25 31 example, to produce ordered dictionaries instead of normal Python
@@ -27,6 +33,10 def asdict(inst, recurse=True, filter=No
27 33 :param bool retain_collection_types: Do not convert to ``list`` when
28 34 encountering an attribute whose type is ``tuple`` or ``set``. Only
29 35 meaningful if ``recurse`` is ``True``.
36 :param Optional[callable] value_serializer: A hook that is called for every
37 attribute or dict key/value. It receives the current instance, field
38 and value and must return the (updated) value. The hook is run *after*
39 the optional *filter* has been applied.
30 40
31 41 :rtype: return type of *dict_factory*
32 42
@@ -35,6 +45,9 def asdict(inst, recurse=True, filter=No
35 45
36 46 .. versionadded:: 16.0.0 *dict_factory*
37 47 .. versionadded:: 16.1.0 *retain_collection_types*
48 .. versionadded:: 20.3.0 *value_serializer*
49 .. versionadded:: 21.3.0 If a dict has a collection for a key, it is
50 serialized as a tuple.
38 51 """
39 52 attrs = fields(inst.__class__)
40 53 rv = dict_factory()
@@ -42,24 +55,58 def asdict(inst, recurse=True, filter=No
42 55 v = getattr(inst, a.name)
43 56 if filter is not None and not filter(a, v):
44 57 continue
58
59 if value_serializer is not None:
60 v = value_serializer(inst, a, v)
61
45 62 if recurse is True:
46 63 if has(v.__class__):
47 rv[a.name] = asdict(v, recurse=True, filter=filter,
48 dict_factory=dict_factory)
49 elif isinstance(v, (tuple, list, set)):
64 rv[a.name] = asdict(
65 v,
66 recurse=True,
67 filter=filter,
68 dict_factory=dict_factory,
69 retain_collection_types=retain_collection_types,
70 value_serializer=value_serializer,
71 )
72 elif isinstance(v, (tuple, list, set, frozenset)):
50 73 cf = v.__class__ if retain_collection_types is True else list
51 rv[a.name] = cf([
52 asdict(i, recurse=True, filter=filter,
53 dict_factory=dict_factory)
54 if has(i.__class__) else i
55 for i in v
56 ])
74 rv[a.name] = cf(
75 [
76 _asdict_anything(
77 i,
78 is_key=False,
79 filter=filter,
80 dict_factory=dict_factory,
81 retain_collection_types=retain_collection_types,
82 value_serializer=value_serializer,
83 )
84 for i in v
85 ]
86 )
57 87 elif isinstance(v, dict):
58 88 df = dict_factory
59 rv[a.name] = df((
60 asdict(kk, dict_factory=df) if has(kk.__class__) else kk,
61 asdict(vv, dict_factory=df) if has(vv.__class__) else vv)
62 for kk, vv in iteritems(v))
89 rv[a.name] = df(
90 (
91 _asdict_anything(
92 kk,
93 is_key=True,
94 filter=filter,
95 dict_factory=df,
96 retain_collection_types=retain_collection_types,
97 value_serializer=value_serializer,
98 ),
99 _asdict_anything(
100 vv,
101 is_key=False,
102 filter=filter,
103 dict_factory=df,
104 retain_collection_types=retain_collection_types,
105 value_serializer=value_serializer,
106 ),
107 )
108 for kk, vv in v.items()
109 )
63 110 else:
64 111 rv[a.name] = v
65 112 else:
@@ -67,8 +114,86 def asdict(inst, recurse=True, filter=No
67 114 return rv
68 115
69 116
70 def astuple(inst, recurse=True, filter=None, tuple_factory=tuple,
71 retain_collection_types=False):
117 def _asdict_anything(
118 val,
119 is_key,
120 filter,
121 dict_factory,
122 retain_collection_types,
123 value_serializer,
124 ):
125 """
126 ``asdict`` only works on attrs instances, this works on anything.
127 """
128 if getattr(val.__class__, "__attrs_attrs__", None) is not None:
129 # Attrs class.
130 rv = asdict(
131 val,
132 recurse=True,
133 filter=filter,
134 dict_factory=dict_factory,
135 retain_collection_types=retain_collection_types,
136 value_serializer=value_serializer,
137 )
138 elif isinstance(val, (tuple, list, set, frozenset)):
139 if retain_collection_types is True:
140 cf = val.__class__
141 elif is_key:
142 cf = tuple
143 else:
144 cf = list
145
146 rv = cf(
147 [
148 _asdict_anything(
149 i,
150 is_key=False,
151 filter=filter,
152 dict_factory=dict_factory,
153 retain_collection_types=retain_collection_types,
154 value_serializer=value_serializer,
155 )
156 for i in val
157 ]
158 )
159 elif isinstance(val, dict):
160 df = dict_factory
161 rv = df(
162 (
163 _asdict_anything(
164 kk,
165 is_key=True,
166 filter=filter,
167 dict_factory=df,
168 retain_collection_types=retain_collection_types,
169 value_serializer=value_serializer,
170 ),
171 _asdict_anything(
172 vv,
173 is_key=False,
174 filter=filter,
175 dict_factory=df,
176 retain_collection_types=retain_collection_types,
177 value_serializer=value_serializer,
178 ),
179 )
180 for kk, vv in val.items()
181 )
182 else:
183 rv = val
184 if value_serializer is not None:
185 rv = value_serializer(None, None, rv)
186
187 return rv
188
189
190 def astuple(
191 inst,
192 recurse=True,
193 filter=None,
194 tuple_factory=tuple,
195 retain_collection_types=False,
196 ):
72 197 """
73 198 Return the ``attrs`` attribute values of *inst* as a tuple.
74 199
@@ -79,7 +204,7 def astuple(inst, recurse=True, filter=N
79 204 ``attrs``-decorated.
80 205 :param callable filter: A callable whose return code determines whether an
81 206 attribute or element is included (``True``) or dropped (``False``). Is
82 called with the :class:`attr.Attribute` as the first argument and the
207 called with the `attrs.Attribute` as the first argument and the
83 208 value as the second argument.
84 209 :param callable tuple_factory: A callable to produce tuples from. For
85 210 example, to produce lists instead of tuples.
@@ -104,38 +229,61 def astuple(inst, recurse=True, filter=N
104 229 continue
105 230 if recurse is True:
106 231 if has(v.__class__):
107 rv.append(astuple(v, recurse=True, filter=filter,
108 tuple_factory=tuple_factory,
109 retain_collection_types=retain))
110 elif isinstance(v, (tuple, list, set)):
232 rv.append(
233 astuple(
234 v,
235 recurse=True,
236 filter=filter,
237 tuple_factory=tuple_factory,
238 retain_collection_types=retain,
239 )
240 )
241 elif isinstance(v, (tuple, list, set, frozenset)):
111 242 cf = v.__class__ if retain is True else list
112 rv.append(cf([
113 astuple(j, recurse=True, filter=filter,
114 tuple_factory=tuple_factory,
115 retain_collection_types=retain)
116 if has(j.__class__) else j
117 for j in v
118 ]))
243 rv.append(
244 cf(
245 [
246 astuple(
247 j,
248 recurse=True,
249 filter=filter,
250 tuple_factory=tuple_factory,
251 retain_collection_types=retain,
252 )
253 if has(j.__class__)
254 else j
255 for j in v
256 ]
257 )
258 )
119 259 elif isinstance(v, dict):
120 260 df = v.__class__ if retain is True else dict
121 rv.append(df(
261 rv.append(
262 df(
122 263 (
123 264 astuple(
124 265 kk,
125 266 tuple_factory=tuple_factory,
126 retain_collection_types=retain
127 ) if has(kk.__class__) else kk,
267 retain_collection_types=retain,
268 )
269 if has(kk.__class__)
270 else kk,
128 271 astuple(
129 272 vv,
130 273 tuple_factory=tuple_factory,
131 retain_collection_types=retain
132 ) if has(vv.__class__) else vv
274 retain_collection_types=retain,
275 )
276 if has(vv.__class__)
277 else vv,
133 278 )
134 for kk, vv in iteritems(v)))
279 for kk, vv in v.items()
280 )
281 )
135 282 else:
136 283 rv.append(v)
137 284 else:
138 285 rv.append(v)
286
139 287 return rv if tuple_factory is list else tuple_factory(rv)
140 288
141 289
@@ -146,7 +294,7 def has(cls):
146 294 :param type cls: Class to introspect.
147 295 :raise TypeError: If *cls* is not a class.
148 296
149 :rtype: :class:`bool`
297 :rtype: bool
150 298 """
151 299 return getattr(cls, "__attrs_attrs__", None) is not None
152 300
@@ -166,19 +314,26 def assoc(inst, **changes):
166 314 class.
167 315
168 316 .. deprecated:: 17.1.0
169 Use :func:`evolve` instead.
317 Use `attrs.evolve` instead if you can.
318 This function will not be removed du to the slightly different approach
319 compared to `attrs.evolve`.
170 320 """
171 321 import warnings
172 warnings.warn("assoc is deprecated and will be removed after 2018/01.",
173 DeprecationWarning)
322
323 warnings.warn(
324 "assoc is deprecated and will be removed after 2018/01.",
325 DeprecationWarning,
326 stacklevel=2,
327 )
174 328 new = copy.copy(inst)
175 329 attrs = fields(inst.__class__)
176 for k, v in iteritems(changes):
330 for k, v in changes.items():
177 331 a = getattr(attrs, k, NOTHING)
178 332 if a is NOTHING:
179 333 raise AttrsAttributeNotFoundError(
180 "{k} is not an attrs attribute on {cl}."
181 .format(k=k, cl=new.__class__)
334 "{k} is not an attrs attribute on {cl}.".format(
335 k=k, cl=new.__class__
336 )
182 337 )
183 338 _obj_setattr(new, k, v)
184 339 return new
@@ -209,4 +364,57 def evolve(inst, **changes):
209 364 init_name = attr_name if attr_name[0] != "_" else attr_name[1:]
210 365 if init_name not in changes:
211 366 changes[init_name] = getattr(inst, attr_name)
367
212 368 return cls(**changes)
369
370
371 def resolve_types(cls, globalns=None, localns=None, attribs=None):
372 """
373 Resolve any strings and forward annotations in type annotations.
374
375 This is only required if you need concrete types in `Attribute`'s *type*
376 field. In other words, you don't need to resolve your types if you only
377 use them for static type checking.
378
379 With no arguments, names will be looked up in the module in which the class
380 was created. If this is not what you want, e.g. if the name only exists
381 inside a method, you may pass *globalns* or *localns* to specify other
382 dictionaries in which to look up these names. See the docs of
383 `typing.get_type_hints` for more details.
384
385 :param type cls: Class to resolve.
386 :param Optional[dict] globalns: Dictionary containing global variables.
387 :param Optional[dict] localns: Dictionary containing local variables.
388 :param Optional[list] attribs: List of attribs for the given class.
389 This is necessary when calling from inside a ``field_transformer``
390 since *cls* is not an ``attrs`` class yet.
391
392 :raise TypeError: If *cls* is not a class.
393 :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
394 class and you didn't pass any attribs.
395 :raise NameError: If types cannot be resolved because of missing variables.
396
397 :returns: *cls* so you can use this function also as a class decorator.
398 Please note that you have to apply it **after** `attrs.define`. That
399 means the decorator has to come in the line **before** `attrs.define`.
400
401 .. versionadded:: 20.1.0
402 .. versionadded:: 21.1.0 *attribs*
403
404 """
405 # Since calling get_type_hints is expensive we cache whether we've
406 # done it already.
407 if getattr(cls, "__attrs_types_resolved__", None) != cls:
408 import typing
409
410 hints = typing.get_type_hints(cls, globalns=globalns, localns=localns)
411 for field in fields(cls) if attribs is None else attribs:
412 if field.name in hints:
413 # Since fields have been frozen we must work around it.
414 _obj_setattr(field, "type", hints[field.name])
415 # We store the class we resolved so that subclasses know they haven't
416 # been resolved.
417 cls.__attrs_types_resolved__ = cls
418
419 # Return the class so you can use it as a decorator too.
420 return cls
This diff has been collapsed as it changes many lines, (3118 lines changed) Show them Hide them
@@ -1,50 +1,79
1 from __future__ import absolute_import, division, print_function
2
3 import hashlib
1 # SPDX-License-Identifier: MIT
2
3 import copy
4 4 import linecache
5 import sys
6 import types
7 import typing
5 8
6 9 from operator import itemgetter
7 10
8 from . import _config
9 from ._compat import PY2, iteritems, isclass, iterkeys, metadata_proxy
11 # We need to import _compat itself in addition to the _compat members to avoid
12 # having the thread-local in the globals here.
13 from . import _compat, _config, setters
14 from ._compat import (
15 HAS_F_STRINGS,
16 PY310,
17 PYPY,
18 _AnnotationExtractor,
19 ordered_dict,
20 set_closure_cell,
21 )
10 22 from .exceptions import (
11 23 DefaultAlreadySetError,
12 24 FrozenInstanceError,
13 25 NotAnAttrsClassError,
26 UnannotatedAttributeError,
14 27 )
15 28
16 29
17 30 # This is used at least twice, so cache it here.
18 31 _obj_setattr = object.__setattr__
19 _init_convert_pat = "__attr_convert_{}"
32 _init_converter_pat = "__attr_converter_%s"
20 33 _init_factory_pat = "__attr_factory_{}"
21 _tuple_property_pat = " {attr_name} = property(itemgetter({index}))"
22 _empty_metadata_singleton = metadata_proxy({})
23
24
25 class _Nothing(object):
34 _tuple_property_pat = (
35 " {attr_name} = _attrs_property(_attrs_itemgetter({index}))"
36 )
37 _classvar_prefixes = (
38 "typing.ClassVar",
39 "t.ClassVar",
40 "ClassVar",
41 "typing_extensions.ClassVar",
42 )
43 # we don't use a double-underscore prefix because that triggers
44 # name mangling when trying to create a slot for the field
45 # (when slots=True)
46 _hash_cache_field = "_attrs_cached_hash"
47
48 _empty_metadata_singleton = types.MappingProxyType({})
49
50 # Unique object for unequivocal getattr() defaults.
51 _sentinel = object()
52
53 _ng_default_on_setattr = setters.pipe(setters.convert, setters.validate)
54
55
56 class _Nothing:
26 57 """
27 58 Sentinel class to indicate the lack of a value when ``None`` is ambiguous.
28 59
29 All instances of `_Nothing` are equal.
60 ``_Nothing`` is a singleton. There is only ever one of it.
61
62 .. versionchanged:: 21.1.0 ``bool(NOTHING)`` is now False.
30 63 """
31 def __copy__(self):
32 return self
33
34 def __deepcopy__(self, _):
35 return self
36
37 def __eq__(self, other):
38 return other.__class__ == _Nothing
39
40 def __ne__(self, other):
41 return not self == other
64
65 _singleton = None
66
67 def __new__(cls):
68 if _Nothing._singleton is None:
69 _Nothing._singleton = super().__new__(cls)
70 return _Nothing._singleton
42 71
43 72 def __repr__(self):
44 73 return "NOTHING"
45 74
46 def __hash__(self):
47 return 0xdeadbeef
75 def __bool__(self):
76 return False
48 77
49 78
50 79 NOTHING = _Nothing()
@@ -53,92 +82,255 Sentinel to indicate the lack of a value
53 82 """
54 83
55 84
56 def attr(default=NOTHING, validator=None,
57 repr=True, cmp=True, hash=None, init=True,
58 convert=None, metadata={}):
59 r"""
85 class _CacheHashWrapper(int):
86 """
87 An integer subclass that pickles / copies as None
88
89 This is used for non-slots classes with ``cache_hash=True``, to avoid
90 serializing a potentially (even likely) invalid hash value. Since ``None``
91 is the default value for uncalculated hashes, whenever this is copied,
92 the copy's value for the hash should automatically reset.
93
94 See GH #613 for more details.
95 """
96
97 def __reduce__(self, _none_constructor=type(None), _args=()):
98 return _none_constructor, _args
99
100
101 def attrib(
102 default=NOTHING,
103 validator=None,
104 repr=True,
105 cmp=None,
106 hash=None,
107 init=True,
108 metadata=None,
109 type=None,
110 converter=None,
111 factory=None,
112 kw_only=False,
113 eq=None,
114 order=None,
115 on_setattr=None,
116 ):
117 """
60 118 Create a new attribute on a class.
61 119
62 120 .. warning::
63 121
64 122 Does *not* do anything unless the class is also decorated with
65 :func:`attr.s`!
123 `attr.s`!
66 124
67 125 :param default: A value that is used if an ``attrs``-generated ``__init__``
68 126 is used and no value is passed while instantiating or the attribute is
69 127 excluded using ``init=False``.
70 128
71 If the value is an instance of :class:`Factory`, its callable will be
72 used to construct a new value (useful for mutable datatypes like lists
129 If the value is an instance of `attrs.Factory`, its callable will be
130 used to construct a new value (useful for mutable data types like lists
73 131 or dicts).
74 132
75 If a default is not set (or set manually to ``attr.NOTHING``), a value
76 *must* be supplied when instantiating; otherwise a :exc:`TypeError`
133 If a default is not set (or set manually to `attrs.NOTHING`), a value
134 *must* be supplied when instantiating; otherwise a `TypeError`
77 135 will be raised.
78 136
79 137 The default can also be set using decorator notation as shown below.
80 138
81 :type default: Any value.
82
83 :param validator: :func:`callable` that is called by ``attrs``-generated
139 :type default: Any value
140
141 :param callable factory: Syntactic sugar for
142 ``default=attr.Factory(factory)``.
143
144 :param validator: `callable` that is called by ``attrs``-generated
84 145 ``__init__`` methods after the instance has been initialized. They
85 receive the initialized instance, the :class:`Attribute`, and the
146 receive the initialized instance, the :func:`~attrs.Attribute`, and the
86 147 passed value.
87 148
88 149 The return value is *not* inspected so the validator has to throw an
89 150 exception itself.
90 151
91 If a ``list`` is passed, its items are treated as validators and must
152 If a `list` is passed, its items are treated as validators and must
92 153 all pass.
93 154
94 155 Validators can be globally disabled and re-enabled using
95 :func:`get_run_validators`.
156 `get_run_validators`.
96 157
97 158 The validator can also be set using decorator notation as shown below.
98 159
99 :type validator: ``callable`` or a ``list`` of ``callable``\ s.
100
101 :param bool repr: Include this attribute in the generated ``__repr__``
102 method.
103 :param bool cmp: Include this attribute in the generated comparison methods
104 (``__eq__`` et al).
105 :param hash: Include this attribute in the generated ``__hash__``
106 method. If ``None`` (default), mirror *cmp*'s value. This is the
107 correct behavior according the Python spec. Setting this value to
108 anything else than ``None`` is *discouraged*.
109 :type hash: ``bool`` or ``None``
160 :type validator: `callable` or a `list` of `callable`\\ s.
161
162 :param repr: Include this attribute in the generated ``__repr__``
163 method. If ``True``, include the attribute; if ``False``, omit it. By
164 default, the built-in ``repr()`` function is used. To override how the
165 attribute value is formatted, pass a ``callable`` that takes a single
166 value and returns a string. Note that the resulting string is used
167 as-is, i.e. it will be used directly *instead* of calling ``repr()``
168 (the default).
169 :type repr: a `bool` or a `callable` to use a custom function.
170
171 :param eq: If ``True`` (default), include this attribute in the
172 generated ``__eq__`` and ``__ne__`` methods that check two instances
173 for equality. To override how the attribute value is compared,
174 pass a ``callable`` that takes a single value and returns the value
175 to be compared.
176 :type eq: a `bool` or a `callable`.
177
178 :param order: If ``True`` (default), include this attributes in the
179 generated ``__lt__``, ``__le__``, ``__gt__`` and ``__ge__`` methods.
180 To override how the attribute value is ordered,
181 pass a ``callable`` that takes a single value and returns the value
182 to be ordered.
183 :type order: a `bool` or a `callable`.
184
185 :param cmp: Setting *cmp* is equivalent to setting *eq* and *order* to the
186 same value. Must not be mixed with *eq* or *order*.
187 :type cmp: a `bool` or a `callable`.
188
189 :param Optional[bool] hash: Include this attribute in the generated
190 ``__hash__`` method. If ``None`` (default), mirror *eq*'s value. This
191 is the correct behavior according the Python spec. Setting this value
192 to anything else than ``None`` is *discouraged*.
110 193 :param bool init: Include this attribute in the generated ``__init__``
111 194 method. It is possible to set this to ``False`` and set a default
112 195 value. In that case this attributed is unconditionally initialized
113 196 with the specified default value or factory.
114 :param callable convert: :func:`callable` that is called by
197 :param callable converter: `callable` that is called by
115 198 ``attrs``-generated ``__init__`` methods to convert attribute's value
116 199 to the desired format. It is given the passed-in value, and the
117 200 returned value will be used as the new value of the attribute. The
118 201 value is converted before being passed to the validator, if any.
119 202 :param metadata: An arbitrary mapping, to be used by third-party
120 components. See :ref:`extending_metadata`.
121
122 .. versionchanged:: 17.1.0 *validator* can be a ``list`` now.
123 .. versionchanged:: 17.1.0
124 *hash* is ``None`` and therefore mirrors *cmp* by default .
203 components. See `extending_metadata`.
204 :param type: The type of the attribute. In Python 3.6 or greater, the
205 preferred method to specify the type is using a variable annotation
206 (see :pep:`526`).
207 This argument is provided for backward compatibility.
208 Regardless of the approach used, the type will be stored on
209 ``Attribute.type``.
210
211 Please note that ``attrs`` doesn't do anything with this metadata by
212 itself. You can use it as part of your own code or for
213 `static type checking <types>`.
214 :param kw_only: Make this attribute keyword-only (Python 3+)
215 in the generated ``__init__`` (if ``init`` is ``False``, this
216 parameter is ignored).
217 :param on_setattr: Allows to overwrite the *on_setattr* setting from
218 `attr.s`. If left `None`, the *on_setattr* value from `attr.s` is used.
219 Set to `attrs.setters.NO_OP` to run **no** `setattr` hooks for this
220 attribute -- regardless of the setting in `attr.s`.
221 :type on_setattr: `callable`, or a list of callables, or `None`, or
222 `attrs.setters.NO_OP`
223
224 .. versionadded:: 15.2.0 *convert*
225 .. versionadded:: 16.3.0 *metadata*
226 .. versionchanged:: 17.1.0 *validator* can be a ``list`` now.
227 .. versionchanged:: 17.1.0
228 *hash* is ``None`` and therefore mirrors *eq* by default.
229 .. versionadded:: 17.3.0 *type*
230 .. deprecated:: 17.4.0 *convert*
231 .. versionadded:: 17.4.0 *converter* as a replacement for the deprecated
232 *convert* to achieve consistency with other noun-based arguments.
233 .. versionadded:: 18.1.0
234 ``factory=f`` is syntactic sugar for ``default=attr.Factory(f)``.
235 .. versionadded:: 18.2.0 *kw_only*
236 .. versionchanged:: 19.2.0 *convert* keyword argument removed.
237 .. versionchanged:: 19.2.0 *repr* also accepts a custom callable.
238 .. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01.
239 .. versionadded:: 19.2.0 *eq* and *order*
240 .. versionadded:: 20.1.0 *on_setattr*
241 .. versionchanged:: 20.3.0 *kw_only* backported to Python 2
242 .. versionchanged:: 21.1.0
243 *eq*, *order*, and *cmp* also accept a custom callable
244 .. versionchanged:: 21.1.0 *cmp* undeprecated
125 245 """
246 eq, eq_key, order, order_key = _determine_attrib_eq_order(
247 cmp, eq, order, True
248 )
249
126 250 if hash is not None and hash is not True and hash is not False:
127 251 raise TypeError(
128 252 "Invalid value for hash. Must be True, False, or None."
129 253 )
254
255 if factory is not None:
256 if default is not NOTHING:
257 raise ValueError(
258 "The `default` and `factory` arguments are mutually "
259 "exclusive."
260 )
261 if not callable(factory):
262 raise ValueError("The `factory` argument must be a callable.")
263 default = Factory(factory)
264
265 if metadata is None:
266 metadata = {}
267
268 # Apply syntactic sugar by auto-wrapping.
269 if isinstance(on_setattr, (list, tuple)):
270 on_setattr = setters.pipe(*on_setattr)
271
272 if validator and isinstance(validator, (list, tuple)):
273 validator = and_(*validator)
274
275 if converter and isinstance(converter, (list, tuple)):
276 converter = pipe(*converter)
277
130 278 return _CountingAttr(
131 279 default=default,
132 280 validator=validator,
133 281 repr=repr,
134 cmp=cmp,
282 cmp=None,
135 283 hash=hash,
136 284 init=init,
137 convert=convert,
285 converter=converter,
138 286 metadata=metadata,
287 type=type,
288 kw_only=kw_only,
289 eq=eq,
290 eq_key=eq_key,
291 order=order,
292 order_key=order_key,
293 on_setattr=on_setattr,
139 294 )
140 295
141 296
297 def _compile_and_eval(script, globs, locs=None, filename=""):
298 """
299 "Exec" the script with the given global (globs) and local (locs) variables.
300 """
301 bytecode = compile(script, filename, "exec")
302 eval(bytecode, globs, locs)
303
304
305 def _make_method(name, script, filename, globs):
306 """
307 Create the method with the script given and return the method object.
308 """
309 locs = {}
310
311 # In order of debuggers like PDB being able to step through the code,
312 # we add a fake linecache entry.
313 count = 1
314 base_filename = filename
315 while True:
316 linecache_tuple = (
317 len(script),
318 None,
319 script.splitlines(True),
320 filename,
321 )
322 old_val = linecache.cache.setdefault(filename, linecache_tuple)
323 if old_val == linecache_tuple:
324 break
325 else:
326 filename = "{}-{}>".format(base_filename[:-1], count)
327 count += 1
328
329 _compile_and_eval(script, globs, locs, filename)
330
331 return locs[name]
332
333
142 334 def _make_attr_tuple_class(cls_name, attr_names):
143 335 """
144 336 Create a tuple subclass to hold `Attribute`s for an `attrs` class.
@@ -156,75 +348,273 def _make_attr_tuple_class(cls_name, att
156 348 ]
157 349 if attr_names:
158 350 for i, attr_name in enumerate(attr_names):
159 attr_class_template.append(_tuple_property_pat.format(
160 index=i,
161 attr_name=attr_name,
162 ))
351 attr_class_template.append(
352 _tuple_property_pat.format(index=i, attr_name=attr_name)
353 )
163 354 else:
164 355 attr_class_template.append(" pass")
165 globs = {"itemgetter": itemgetter}
166 eval(compile("\n".join(attr_class_template), "", "exec"), globs)
356 globs = {"_attrs_itemgetter": itemgetter, "_attrs_property": property}
357 _compile_and_eval("\n".join(attr_class_template), globs)
167 358 return globs[attr_class_name]
168 359
169 360
170 def _transform_attrs(cls, these):
361 # Tuple class for extracted attributes from a class definition.
362 # `base_attrs` is a subset of `attrs`.
363 _Attributes = _make_attr_tuple_class(
364 "_Attributes",
365 [
366 # all attributes to build dunder methods for
367 "attrs",
368 # attributes that have been inherited
369 "base_attrs",
370 # map inherited attributes to their originating classes
371 "base_attrs_map",
372 ],
373 )
374
375
376 def _is_class_var(annot):
377 """
378 Check whether *annot* is a typing.ClassVar.
379
380 The string comparison hack is used to avoid evaluating all string
381 annotations which would put attrs-based classes at a performance
382 disadvantage compared to plain old classes.
383 """
384 annot = str(annot)
385
386 # Annotation can be quoted.
387 if annot.startswith(("'", '"')) and annot.endswith(("'", '"')):
388 annot = annot[1:-1]
389
390 return annot.startswith(_classvar_prefixes)
391
392
393 def _has_own_attribute(cls, attrib_name):
394 """
395 Check whether *cls* defines *attrib_name* (and doesn't just inherit it).
396
397 Requires Python 3.
398 """
399 attr = getattr(cls, attrib_name, _sentinel)
400 if attr is _sentinel:
401 return False
402
403 for base_cls in cls.__mro__[1:]:
404 a = getattr(base_cls, attrib_name, None)
405 if attr is a:
406 return False
407
408 return True
409
410
411 def _get_annotations(cls):
412 """
413 Get annotations for *cls*.
414 """
415 if _has_own_attribute(cls, "__annotations__"):
416 return cls.__annotations__
417
418 return {}
419
420
421 def _counter_getter(e):
422 """
423 Key function for sorting to avoid re-creating a lambda for every class.
171 424 """
172 Transforms all `_CountingAttr`s on a class into `Attribute`s and saves the
173 list in `__attrs_attrs__`.
425 return e[1].counter
426
427
428 def _collect_base_attrs(cls, taken_attr_names):
429 """
430 Collect attr.ibs from base classes of *cls*, except *taken_attr_names*.
431 """
432 base_attrs = []
433 base_attr_map = {} # A dictionary of base attrs to their classes.
434
435 # Traverse the MRO and collect attributes.
436 for base_cls in reversed(cls.__mro__[1:-1]):
437 for a in getattr(base_cls, "__attrs_attrs__", []):
438 if a.inherited or a.name in taken_attr_names:
439 continue
440
441 a = a.evolve(inherited=True)
442 base_attrs.append(a)
443 base_attr_map[a.name] = base_cls
444
445 # For each name, only keep the freshest definition i.e. the furthest at the
446 # back. base_attr_map is fine because it gets overwritten with every new
447 # instance.
448 filtered = []
449 seen = set()
450 for a in reversed(base_attrs):
451 if a.name in seen:
452 continue
453 filtered.insert(0, a)
454 seen.add(a.name)
455
456 return filtered, base_attr_map
457
458
459 def _collect_base_attrs_broken(cls, taken_attr_names):
460 """
461 Collect attr.ibs from base classes of *cls*, except *taken_attr_names*.
462
463 N.B. *taken_attr_names* will be mutated.
464
465 Adhere to the old incorrect behavior.
466
467 Notably it collects from the front and considers inherited attributes which
468 leads to the buggy behavior reported in #428.
469 """
470 base_attrs = []
471 base_attr_map = {} # A dictionary of base attrs to their classes.
472
473 # Traverse the MRO and collect attributes.
474 for base_cls in cls.__mro__[1:-1]:
475 for a in getattr(base_cls, "__attrs_attrs__", []):
476 if a.name in taken_attr_names:
477 continue
478
479 a = a.evolve(inherited=True)
480 taken_attr_names.add(a.name)
481 base_attrs.append(a)
482 base_attr_map[a.name] = base_cls
483
484 return base_attrs, base_attr_map
485
486
487 def _transform_attrs(
488 cls, these, auto_attribs, kw_only, collect_by_mro, field_transformer
489 ):
490 """
491 Transform all `_CountingAttr`s on a class into `Attribute`s.
174 492
175 493 If *these* is passed, use that and don't look for them on the class.
494
495 *collect_by_mro* is True, collect them in the correct MRO order, otherwise
496 use the old -- incorrect -- order. See #428.
497
498 Return an `_Attributes`.
176 499 """
177 super_cls = []
178 for c in reversed(cls.__mro__[1:-1]):
179 sub_attrs = getattr(c, "__attrs_attrs__", None)
180 if sub_attrs is not None:
181 super_cls.extend(a for a in sub_attrs if a not in super_cls)
182 if these is None:
183 ca_list = [(name, attr)
184 for name, attr
185 in cls.__dict__.items()
186 if isinstance(attr, _CountingAttr)]
500 cd = cls.__dict__
501 anns = _get_annotations(cls)
502
503 if these is not None:
504 ca_list = [(name, ca) for name, ca in these.items()]
505
506 if not isinstance(these, ordered_dict):
507 ca_list.sort(key=_counter_getter)
508 elif auto_attribs is True:
509 ca_names = {
510 name
511 for name, attr in cd.items()
512 if isinstance(attr, _CountingAttr)
513 }
514 ca_list = []
515 annot_names = set()
516 for attr_name, type in anns.items():
517 if _is_class_var(type):
518 continue
519 annot_names.add(attr_name)
520 a = cd.get(attr_name, NOTHING)
521
522 if not isinstance(a, _CountingAttr):
523 if a is NOTHING:
524 a = attrib()
525 else:
526 a = attrib(default=a)
527 ca_list.append((attr_name, a))
528
529 unannotated = ca_names - annot_names
530 if len(unannotated) > 0:
531 raise UnannotatedAttributeError(
532 "The following `attr.ib`s lack a type annotation: "
533 + ", ".join(
534 sorted(unannotated, key=lambda n: cd.get(n).counter)
535 )
536 + "."
537 )
187 538 else:
188 ca_list = [(name, ca)
189 for name, ca
190 in iteritems(these)]
191
192 non_super_attrs = [
193 Attribute.from_counting_attr(name=attr_name, ca=ca)
194 for attr_name, ca
195 in sorted(ca_list, key=lambda e: e[1].counter)
539 ca_list = sorted(
540 (
541 (name, attr)
542 for name, attr in cd.items()
543 if isinstance(attr, _CountingAttr)
544 ),
545 key=lambda e: e[1].counter,
546 )
547
548 own_attrs = [
549 Attribute.from_counting_attr(
550 name=attr_name, ca=ca, type=anns.get(attr_name)
551 )
552 for attr_name, ca in ca_list
196 553 ]
197 attr_names = [a.name for a in super_cls + non_super_attrs]
198
199 AttrsClass = _make_attr_tuple_class(cls.__name__, attr_names)
200
201 cls.__attrs_attrs__ = AttrsClass(super_cls + [
202 Attribute.from_counting_attr(name=attr_name, ca=ca)
203 for attr_name, ca
204 in sorted(ca_list, key=lambda e: e[1].counter)
205 ])
206
554
555 if collect_by_mro:
556 base_attrs, base_attr_map = _collect_base_attrs(
557 cls, {a.name for a in own_attrs}
558 )
559 else:
560 base_attrs, base_attr_map = _collect_base_attrs_broken(
561 cls, {a.name for a in own_attrs}
562 )
563
564 if kw_only:
565 own_attrs = [a.evolve(kw_only=True) for a in own_attrs]
566 base_attrs = [a.evolve(kw_only=True) for a in base_attrs]
567
568 attrs = base_attrs + own_attrs
569
570 # Mandatory vs non-mandatory attr order only matters when they are part of
571 # the __init__ signature and when they aren't kw_only (which are moved to
572 # the end and can be mandatory or non-mandatory in any order, as they will
573 # be specified as keyword args anyway). Check the order of those attrs:
207 574 had_default = False
208 for a in cls.__attrs_attrs__:
209 if these is None and a not in super_cls:
210 setattr(cls, a.name, a)
211 if had_default is True and a.default is NOTHING and a.init is True:
575 for a in (a for a in attrs if a.init is not False and a.kw_only is False):
576 if had_default is True and a.default is NOTHING:
212 577 raise ValueError(
213 578 "No mandatory attributes allowed after an attribute with a "
214 "default value or factory. Attribute in question: {a!r}"
215 .format(a=a)
579 "default value or factory. Attribute in question: %r" % (a,)
216 580 )
217 elif had_default is False and \
218 a.default is not NOTHING and \
219 a.init is not False:
581
582 if had_default is False and a.default is not NOTHING:
220 583 had_default = True
221 584
222
223 def _frozen_setattrs(self, name, value):
224 """
225 Attached to frozen classes as __setattr__.
226 """
227 raise FrozenInstanceError()
585 if field_transformer is not None:
586 attrs = field_transformer(cls, attrs)
587
588 # Create AttrsClass *after* applying the field_transformer since it may
589 # add or remove attributes!
590 attr_names = [a.name for a in attrs]
591 AttrsClass = _make_attr_tuple_class(cls.__name__, attr_names)
592
593 return _Attributes((AttrsClass(attrs), base_attrs, base_attr_map))
594
595
596 if PYPY:
597
598 def _frozen_setattrs(self, name, value):
599 """
600 Attached to frozen classes as __setattr__.
601 """
602 if isinstance(self, BaseException) and name in (
603 "__cause__",
604 "__context__",
605 ):
606 BaseException.__setattr__(self, name, value)
607 return
608
609 raise FrozenInstanceError()
610
611 else:
612
613 def _frozen_setattrs(self, name, value):
614 """
615 Attached to frozen classes as __setattr__.
616 """
617 raise FrozenInstanceError()
228 618
229 619
230 620 def _frozen_delattrs(self, name):
@@ -234,44 +624,661 def _frozen_delattrs(self, name):
234 624 raise FrozenInstanceError()
235 625
236 626
237 def attributes(maybe_cls=None, these=None, repr_ns=None,
238 repr=True, cmp=True, hash=None, init=True,
239 slots=False, frozen=False, str=False):
627 class _ClassBuilder:
628 """
629 Iteratively build *one* class.
630 """
631
632 __slots__ = (
633 "_attr_names",
634 "_attrs",
635 "_base_attr_map",
636 "_base_names",
637 "_cache_hash",
638 "_cls",
639 "_cls_dict",
640 "_delete_attribs",
641 "_frozen",
642 "_has_pre_init",
643 "_has_post_init",
644 "_is_exc",
645 "_on_setattr",
646 "_slots",
647 "_weakref_slot",
648 "_wrote_own_setattr",
649 "_has_custom_setattr",
650 )
651
652 def __init__(
653 self,
654 cls,
655 these,
656 slots,
657 frozen,
658 weakref_slot,
659 getstate_setstate,
660 auto_attribs,
661 kw_only,
662 cache_hash,
663 is_exc,
664 collect_by_mro,
665 on_setattr,
666 has_custom_setattr,
667 field_transformer,
668 ):
669 attrs, base_attrs, base_map = _transform_attrs(
670 cls,
671 these,
672 auto_attribs,
673 kw_only,
674 collect_by_mro,
675 field_transformer,
676 )
677
678 self._cls = cls
679 self._cls_dict = dict(cls.__dict__) if slots else {}
680 self._attrs = attrs
681 self._base_names = {a.name for a in base_attrs}
682 self._base_attr_map = base_map
683 self._attr_names = tuple(a.name for a in attrs)
684 self._slots = slots
685 self._frozen = frozen
686 self._weakref_slot = weakref_slot
687 self._cache_hash = cache_hash
688 self._has_pre_init = bool(getattr(cls, "__attrs_pre_init__", False))
689 self._has_post_init = bool(getattr(cls, "__attrs_post_init__", False))
690 self._delete_attribs = not bool(these)
691 self._is_exc = is_exc
692 self._on_setattr = on_setattr
693
694 self._has_custom_setattr = has_custom_setattr
695 self._wrote_own_setattr = False
696
697 self._cls_dict["__attrs_attrs__"] = self._attrs
698
699 if frozen:
700 self._cls_dict["__setattr__"] = _frozen_setattrs
701 self._cls_dict["__delattr__"] = _frozen_delattrs
702
703 self._wrote_own_setattr = True
704 elif on_setattr in (
705 _ng_default_on_setattr,
706 setters.validate,
707 setters.convert,
708 ):
709 has_validator = has_converter = False
710 for a in attrs:
711 if a.validator is not None:
712 has_validator = True
713 if a.converter is not None:
714 has_converter = True
715
716 if has_validator and has_converter:
717 break
718 if (
719 (
720 on_setattr == _ng_default_on_setattr
721 and not (has_validator or has_converter)
722 )
723 or (on_setattr == setters.validate and not has_validator)
724 or (on_setattr == setters.convert and not has_converter)
725 ):
726 # If class-level on_setattr is set to convert + validate, but
727 # there's no field to convert or validate, pretend like there's
728 # no on_setattr.
729 self._on_setattr = None
730
731 if getstate_setstate:
732 (
733 self._cls_dict["__getstate__"],
734 self._cls_dict["__setstate__"],
735 ) = self._make_getstate_setstate()
736
737 def __repr__(self):
738 return "<_ClassBuilder(cls={cls})>".format(cls=self._cls.__name__)
739
740 def build_class(self):
741 """
742 Finalize class based on the accumulated configuration.
743
744 Builder cannot be used after calling this method.
745 """
746 if self._slots is True:
747 return self._create_slots_class()
748 else:
749 return self._patch_original_class()
750
751 def _patch_original_class(self):
752 """
753 Apply accumulated methods and return the class.
754 """
755 cls = self._cls
756 base_names = self._base_names
757
758 # Clean class of attribute definitions (`attr.ib()`s).
759 if self._delete_attribs:
760 for name in self._attr_names:
761 if (
762 name not in base_names
763 and getattr(cls, name, _sentinel) is not _sentinel
764 ):
765 try:
766 delattr(cls, name)
767 except AttributeError:
768 # This can happen if a base class defines a class
769 # variable and we want to set an attribute with the
770 # same name by using only a type annotation.
771 pass
772
773 # Attach our dunder methods.
774 for name, value in self._cls_dict.items():
775 setattr(cls, name, value)
776
777 # If we've inherited an attrs __setattr__ and don't write our own,
778 # reset it to object's.
779 if not self._wrote_own_setattr and getattr(
780 cls, "__attrs_own_setattr__", False
781 ):
782 cls.__attrs_own_setattr__ = False
783
784 if not self._has_custom_setattr:
785 cls.__setattr__ = _obj_setattr
786
787 return cls
788
789 def _create_slots_class(self):
790 """
791 Build and return a new class with a `__slots__` attribute.
792 """
793 cd = {
794 k: v
795 for k, v in self._cls_dict.items()
796 if k not in tuple(self._attr_names) + ("__dict__", "__weakref__")
797 }
798
799 # If our class doesn't have its own implementation of __setattr__
800 # (either from the user or by us), check the bases, if one of them has
801 # an attrs-made __setattr__, that needs to be reset. We don't walk the
802 # MRO because we only care about our immediate base classes.
803 # XXX: This can be confused by subclassing a slotted attrs class with
804 # XXX: a non-attrs class and subclass the resulting class with an attrs
805 # XXX: class. See `test_slotted_confused` for details. For now that's
806 # XXX: OK with us.
807 if not self._wrote_own_setattr:
808 cd["__attrs_own_setattr__"] = False
809
810 if not self._has_custom_setattr:
811 for base_cls in self._cls.__bases__:
812 if base_cls.__dict__.get("__attrs_own_setattr__", False):
813 cd["__setattr__"] = _obj_setattr
814 break
815
816 # Traverse the MRO to collect existing slots
817 # and check for an existing __weakref__.
818 existing_slots = dict()
819 weakref_inherited = False
820 for base_cls in self._cls.__mro__[1:-1]:
821 if base_cls.__dict__.get("__weakref__", None) is not None:
822 weakref_inherited = True
823 existing_slots.update(
824 {
825 name: getattr(base_cls, name)
826 for name in getattr(base_cls, "__slots__", [])
827 }
828 )
829
830 base_names = set(self._base_names)
831
832 names = self._attr_names
833 if (
834 self._weakref_slot
835 and "__weakref__" not in getattr(self._cls, "__slots__", ())
836 and "__weakref__" not in names
837 and not weakref_inherited
838 ):
839 names += ("__weakref__",)
840
841 # We only add the names of attributes that aren't inherited.
842 # Setting __slots__ to inherited attributes wastes memory.
843 slot_names = [name for name in names if name not in base_names]
844 # There are slots for attributes from current class
845 # that are defined in parent classes.
846 # As their descriptors may be overridden by a child class,
847 # we collect them here and update the class dict
848 reused_slots = {
849 slot: slot_descriptor
850 for slot, slot_descriptor in existing_slots.items()
851 if slot in slot_names
852 }
853 slot_names = [name for name in slot_names if name not in reused_slots]
854 cd.update(reused_slots)
855 if self._cache_hash:
856 slot_names.append(_hash_cache_field)
857 cd["__slots__"] = tuple(slot_names)
858
859 cd["__qualname__"] = self._cls.__qualname__
860
861 # Create new class based on old class and our methods.
862 cls = type(self._cls)(self._cls.__name__, self._cls.__bases__, cd)
863
864 # The following is a fix for
865 # <https://github.com/python-attrs/attrs/issues/102>. On Python 3,
866 # if a method mentions `__class__` or uses the no-arg super(), the
867 # compiler will bake a reference to the class in the method itself
868 # as `method.__closure__`. Since we replace the class with a
869 # clone, we rewrite these references so it keeps working.
870 for item in cls.__dict__.values():
871 if isinstance(item, (classmethod, staticmethod)):
872 # Class- and staticmethods hide their functions inside.
873 # These might need to be rewritten as well.
874 closure_cells = getattr(item.__func__, "__closure__", None)
875 elif isinstance(item, property):
876 # Workaround for property `super()` shortcut (PY3-only).
877 # There is no universal way for other descriptors.
878 closure_cells = getattr(item.fget, "__closure__", None)
879 else:
880 closure_cells = getattr(item, "__closure__", None)
881
882 if not closure_cells: # Catch None or the empty list.
883 continue
884 for cell in closure_cells:
885 try:
886 match = cell.cell_contents is self._cls
887 except ValueError: # ValueError: Cell is empty
888 pass
889 else:
890 if match:
891 set_closure_cell(cell, cls)
892
893 return cls
894
895 def add_repr(self, ns):
896 self._cls_dict["__repr__"] = self._add_method_dunders(
897 _make_repr(self._attrs, ns, self._cls)
898 )
899 return self
900
901 def add_str(self):
902 repr = self._cls_dict.get("__repr__")
903 if repr is None:
904 raise ValueError(
905 "__str__ can only be generated if a __repr__ exists."
906 )
907
908 def __str__(self):
909 return self.__repr__()
910
911 self._cls_dict["__str__"] = self._add_method_dunders(__str__)
912 return self
913
914 def _make_getstate_setstate(self):
915 """
916 Create custom __setstate__ and __getstate__ methods.
917 """
918 # __weakref__ is not writable.
919 state_attr_names = tuple(
920 an for an in self._attr_names if an != "__weakref__"
921 )
922
923 def slots_getstate(self):
924 """
925 Automatically created by attrs.
926 """
927 return tuple(getattr(self, name) for name in state_attr_names)
928
929 hash_caching_enabled = self._cache_hash
930
931 def slots_setstate(self, state):
932 """
933 Automatically created by attrs.
934 """
935 __bound_setattr = _obj_setattr.__get__(self, Attribute)
936 for name, value in zip(state_attr_names, state):
937 __bound_setattr(name, value)
938
939 # The hash code cache is not included when the object is
940 # serialized, but it still needs to be initialized to None to
941 # indicate that the first call to __hash__ should be a cache
942 # miss.
943 if hash_caching_enabled:
944 __bound_setattr(_hash_cache_field, None)
945
946 return slots_getstate, slots_setstate
947
948 def make_unhashable(self):
949 self._cls_dict["__hash__"] = None
950 return self
951
952 def add_hash(self):
953 self._cls_dict["__hash__"] = self._add_method_dunders(
954 _make_hash(
955 self._cls,
956 self._attrs,
957 frozen=self._frozen,
958 cache_hash=self._cache_hash,
959 )
960 )
961
962 return self
963
964 def add_init(self):
965 self._cls_dict["__init__"] = self._add_method_dunders(
966 _make_init(
967 self._cls,
968 self._attrs,
969 self._has_pre_init,
970 self._has_post_init,
971 self._frozen,
972 self._slots,
973 self._cache_hash,
974 self._base_attr_map,
975 self._is_exc,
976 self._on_setattr,
977 attrs_init=False,
978 )
979 )
980
981 return self
982
983 def add_match_args(self):
984 self._cls_dict["__match_args__"] = tuple(
985 field.name
986 for field in self._attrs
987 if field.init and not field.kw_only
988 )
989
990 def add_attrs_init(self):
991 self._cls_dict["__attrs_init__"] = self._add_method_dunders(
992 _make_init(
993 self._cls,
994 self._attrs,
995 self._has_pre_init,
996 self._has_post_init,
997 self._frozen,
998 self._slots,
999 self._cache_hash,
1000 self._base_attr_map,
1001 self._is_exc,
1002 self._on_setattr,
1003 attrs_init=True,
1004 )
1005 )
1006
1007 return self
1008
1009 def add_eq(self):
1010 cd = self._cls_dict
1011
1012 cd["__eq__"] = self._add_method_dunders(
1013 _make_eq(self._cls, self._attrs)
1014 )
1015 cd["__ne__"] = self._add_method_dunders(_make_ne())
1016
1017 return self
1018
1019 def add_order(self):
1020 cd = self._cls_dict
1021
1022 cd["__lt__"], cd["__le__"], cd["__gt__"], cd["__ge__"] = (
1023 self._add_method_dunders(meth)
1024 for meth in _make_order(self._cls, self._attrs)
1025 )
1026
1027 return self
1028
1029 def add_setattr(self):
1030 if self._frozen:
1031 return self
1032
1033 sa_attrs = {}
1034 for a in self._attrs:
1035 on_setattr = a.on_setattr or self._on_setattr
1036 if on_setattr and on_setattr is not setters.NO_OP:
1037 sa_attrs[a.name] = a, on_setattr
1038
1039 if not sa_attrs:
1040 return self
1041
1042 if self._has_custom_setattr:
1043 # We need to write a __setattr__ but there already is one!
1044 raise ValueError(
1045 "Can't combine custom __setattr__ with on_setattr hooks."
1046 )
1047
1048 # docstring comes from _add_method_dunders
1049 def __setattr__(self, name, val):
1050 try:
1051 a, hook = sa_attrs[name]
1052 except KeyError:
1053 nval = val
1054 else:
1055 nval = hook(self, a, val)
1056
1057 _obj_setattr(self, name, nval)
1058
1059 self._cls_dict["__attrs_own_setattr__"] = True
1060 self._cls_dict["__setattr__"] = self._add_method_dunders(__setattr__)
1061 self._wrote_own_setattr = True
1062
1063 return self
1064
1065 def _add_method_dunders(self, method):
1066 """
1067 Add __module__ and __qualname__ to a *method* if possible.
1068 """
1069 try:
1070 method.__module__ = self._cls.__module__
1071 except AttributeError:
1072 pass
1073
1074 try:
1075 method.__qualname__ = ".".join(
1076 (self._cls.__qualname__, method.__name__)
1077 )
1078 except AttributeError:
1079 pass
1080
1081 try:
1082 method.__doc__ = "Method generated by attrs for class %s." % (
1083 self._cls.__qualname__,
1084 )
1085 except AttributeError:
1086 pass
1087
1088 return method
1089
1090
1091 def _determine_attrs_eq_order(cmp, eq, order, default_eq):
1092 """
1093 Validate the combination of *cmp*, *eq*, and *order*. Derive the effective
1094 values of eq and order. If *eq* is None, set it to *default_eq*.
1095 """
1096 if cmp is not None and any((eq is not None, order is not None)):
1097 raise ValueError("Don't mix `cmp` with `eq' and `order`.")
1098
1099 # cmp takes precedence due to bw-compatibility.
1100 if cmp is not None:
1101 return cmp, cmp
1102
1103 # If left None, equality is set to the specified default and ordering
1104 # mirrors equality.
1105 if eq is None:
1106 eq = default_eq
1107
1108 if order is None:
1109 order = eq
1110
1111 if eq is False and order is True:
1112 raise ValueError("`order` can only be True if `eq` is True too.")
1113
1114 return eq, order
1115
1116
1117 def _determine_attrib_eq_order(cmp, eq, order, default_eq):
1118 """
1119 Validate the combination of *cmp*, *eq*, and *order*. Derive the effective
1120 values of eq and order. If *eq* is None, set it to *default_eq*.
1121 """
1122 if cmp is not None and any((eq is not None, order is not None)):
1123 raise ValueError("Don't mix `cmp` with `eq' and `order`.")
1124
1125 def decide_callable_or_boolean(value):
1126 """
1127 Decide whether a key function is used.
1128 """
1129 if callable(value):
1130 value, key = True, value
1131 else:
1132 key = None
1133 return value, key
1134
1135 # cmp takes precedence due to bw-compatibility.
1136 if cmp is not None:
1137 cmp, cmp_key = decide_callable_or_boolean(cmp)
1138 return cmp, cmp_key, cmp, cmp_key
1139
1140 # If left None, equality is set to the specified default and ordering
1141 # mirrors equality.
1142 if eq is None:
1143 eq, eq_key = default_eq, None
1144 else:
1145 eq, eq_key = decide_callable_or_boolean(eq)
1146
1147 if order is None:
1148 order, order_key = eq, eq_key
1149 else:
1150 order, order_key = decide_callable_or_boolean(order)
1151
1152 if eq is False and order is True:
1153 raise ValueError("`order` can only be True if `eq` is True too.")
1154
1155 return eq, eq_key, order, order_key
1156
1157
1158 def _determine_whether_to_implement(
1159 cls, flag, auto_detect, dunders, default=True
1160 ):
1161 """
1162 Check whether we should implement a set of methods for *cls*.
1163
1164 *flag* is the argument passed into @attr.s like 'init', *auto_detect* the
1165 same as passed into @attr.s and *dunders* is a tuple of attribute names
1166 whose presence signal that the user has implemented it themselves.
1167
1168 Return *default* if no reason for either for or against is found.
1169 """
1170 if flag is True or flag is False:
1171 return flag
1172
1173 if flag is None and auto_detect is False:
1174 return default
1175
1176 # Logically, flag is None and auto_detect is True here.
1177 for dunder in dunders:
1178 if _has_own_attribute(cls, dunder):
1179 return False
1180
1181 return default
1182
1183
1184 def attrs(
1185 maybe_cls=None,
1186 these=None,
1187 repr_ns=None,
1188 repr=None,
1189 cmp=None,
1190 hash=None,
1191 init=None,
1192 slots=False,
1193 frozen=False,
1194 weakref_slot=True,
1195 str=False,
1196 auto_attribs=False,
1197 kw_only=False,
1198 cache_hash=False,
1199 auto_exc=False,
1200 eq=None,
1201 order=None,
1202 auto_detect=False,
1203 collect_by_mro=False,
1204 getstate_setstate=None,
1205 on_setattr=None,
1206 field_transformer=None,
1207 match_args=True,
1208 ):
240 1209 r"""
241 1210 A class decorator that adds `dunder
242 1211 <https://wiki.python.org/moin/DunderAlias>`_\ -methods according to the
243 specified attributes using :func:`attr.ib` or the *these* argument.
244
245 :param these: A dictionary of name to :func:`attr.ib` mappings. This is
1212 specified attributes using `attr.ib` or the *these* argument.
1213
1214 :param these: A dictionary of name to `attr.ib` mappings. This is
246 1215 useful to avoid the definition of your attributes within the class body
247 1216 because you can't (e.g. if you want to add ``__repr__`` methods to
248 1217 Django models) or don't want to.
249 1218
250 1219 If *these* is not ``None``, ``attrs`` will *not* search the class body
251 for attributes.
252
253 :type these: :class:`dict` of :class:`str` to :func:`attr.ib`
1220 for attributes and will *not* remove any attributes from it.
1221
1222 If *these* is an ordered dict (`dict` on Python 3.6+,
1223 `collections.OrderedDict` otherwise), the order is deduced from
1224 the order of the attributes inside *these*. Otherwise the order
1225 of the definition of the attributes is used.
1226
1227 :type these: `dict` of `str` to `attr.ib`
254 1228
255 1229 :param str repr_ns: When using nested classes, there's no way in Python 2
256 1230 to automatically detect that. Therefore it's possible to set the
257 1231 namespace explicitly for a more meaningful ``repr`` output.
1232 :param bool auto_detect: Instead of setting the *init*, *repr*, *eq*,
1233 *order*, and *hash* arguments explicitly, assume they are set to
1234 ``True`` **unless any** of the involved methods for one of the
1235 arguments is implemented in the *current* class (i.e. it is *not*
1236 inherited from some base class).
1237
1238 So for example by implementing ``__eq__`` on a class yourself,
1239 ``attrs`` will deduce ``eq=False`` and will create *neither*
1240 ``__eq__`` *nor* ``__ne__`` (but Python classes come with a sensible
1241 ``__ne__`` by default, so it *should* be enough to only implement
1242 ``__eq__`` in most cases).
1243
1244 .. warning::
1245
1246 If you prevent ``attrs`` from creating the ordering methods for you
1247 (``order=False``, e.g. by implementing ``__le__``), it becomes
1248 *your* responsibility to make sure its ordering is sound. The best
1249 way is to use the `functools.total_ordering` decorator.
1250
1251
1252 Passing ``True`` or ``False`` to *init*, *repr*, *eq*, *order*,
1253 *cmp*, or *hash* overrides whatever *auto_detect* would determine.
1254
1255 *auto_detect* requires Python 3. Setting it ``True`` on Python 2 raises
1256 an `attrs.exceptions.PythonTooOldError`.
1257
258 1258 :param bool repr: Create a ``__repr__`` method with a human readable
259 represantation of ``attrs`` attributes..
1259 representation of ``attrs`` attributes..
260 1260 :param bool str: Create a ``__str__`` method that is identical to
261 1261 ``__repr__``. This is usually not necessary except for
262 :class:`Exception`\ s.
263 :param bool cmp: Create ``__eq__``, ``__ne__``, ``__lt__``, ``__le__``,
264 ``__gt__``, and ``__ge__`` methods that compare the class as if it were
265 a tuple of its ``attrs`` attributes. But the attributes are *only*
266 compared, if the type of both classes is *identical*!
267 :param hash: If ``None`` (default), the ``__hash__`` method is generated
268 according how *cmp* and *frozen* are set.
1262 `Exception`\ s.
1263 :param Optional[bool] eq: If ``True`` or ``None`` (default), add ``__eq__``
1264 and ``__ne__`` methods that check two instances for equality.
1265
1266 They compare the instances as if they were tuples of their ``attrs``
1267 attributes if and only if the types of both classes are *identical*!
1268 :param Optional[bool] order: If ``True``, add ``__lt__``, ``__le__``,
1269 ``__gt__``, and ``__ge__`` methods that behave like *eq* above and
1270 allow instances to be ordered. If ``None`` (default) mirror value of
1271 *eq*.
1272 :param Optional[bool] cmp: Setting *cmp* is equivalent to setting *eq*
1273 and *order* to the same value. Must not be mixed with *eq* or *order*.
1274 :param Optional[bool] hash: If ``None`` (default), the ``__hash__`` method
1275 is generated according how *eq* and *frozen* are set.
269 1276
270 1277 1. If *both* are True, ``attrs`` will generate a ``__hash__`` for you.
271 2. If *cmp* is True and *frozen* is False, ``__hash__`` will be set to
1278 2. If *eq* is True and *frozen* is False, ``__hash__`` will be set to
272 1279 None, marking it unhashable (which it is).
273 3. If *cmp* is False, ``__hash__`` will be left untouched meaning the
274 ``__hash__`` method of the superclass will be used (if superclass is
1280 3. If *eq* is False, ``__hash__`` will be left untouched meaning the
1281 ``__hash__`` method of the base class will be used (if base class is
275 1282 ``object``, this means it will fall back to id-based hashing.).
276 1283
277 1284 Although not recommended, you can decide for yourself and force
@@ -279,29 +1286,37 def attributes(maybe_cls=None, these=Non
279 1286 didn't freeze it programmatically) by passing ``True`` or not. Both of
280 1287 these cases are rather special and should be used carefully.
281 1288
282 See the `Python documentation \
283 <https://docs.python.org/3/reference/datamodel.html#object.__hash__>`_
284 and the `GitHub issue that led to the default behavior \
285 <https://github.com/python-attrs/attrs/issues/136>`_ for more details.
286 :type hash: ``bool`` or ``None``
287 :param bool init: Create a ``__init__`` method that initialiazes the
288 ``attrs`` attributes. Leading underscores are stripped for the
289 argument name. If a ``__attrs_post_init__`` method exists on the
290 class, it will be called after the class is fully initialized.
291 :param bool slots: Create a slots_-style class that's more
292 memory-efficient. See :ref:`slots` for further ramifications.
1289 See our documentation on `hashing`, Python's documentation on
1290 `object.__hash__`, and the `GitHub issue that led to the default \
1291 behavior <https://github.com/python-attrs/attrs/issues/136>`_ for more
1292 details.
1293 :param bool init: Create a ``__init__`` method that initializes the
1294 ``attrs`` attributes. Leading underscores are stripped for the argument
1295 name. If a ``__attrs_pre_init__`` method exists on the class, it will
1296 be called before the class is initialized. If a ``__attrs_post_init__``
1297 method exists on the class, it will be called after the class is fully
1298 initialized.
1299
1300 If ``init`` is ``False``, an ``__attrs_init__`` method will be
1301 injected instead. This allows you to define a custom ``__init__``
1302 method that can do pre-init work such as ``super().__init__()``,
1303 and then call ``__attrs_init__()`` and ``__attrs_post_init__()``.
1304 :param bool slots: Create a `slotted class <slotted classes>` that's more
1305 memory-efficient. Slotted classes are generally superior to the default
1306 dict classes, but have some gotchas you should know about, so we
1307 encourage you to read the `glossary entry <slotted classes>`.
293 1308 :param bool frozen: Make instances immutable after initialization. If
294 1309 someone attempts to modify a frozen instance,
295 :exc:`attr.exceptions.FrozenInstanceError` is raised.
296
297 Please note:
1310 `attr.exceptions.FrozenInstanceError` is raised.
1311
1312 .. note::
298 1313
299 1314 1. This is achieved by installing a custom ``__setattr__`` method
300 on your class so you can't implement an own one.
1315 on your class, so you can't implement your own.
301 1316
302 1317 2. True immutability is impossible in Python.
303 1318
304 3. This *does* have a minor a runtime performance :ref:`impact
1319 3. This *does* have a minor a runtime performance `impact
305 1320 <how-frozen>` when initializing new instances. In other words:
306 1321 ``__init__`` is slightly slower with ``frozen=True``.
307 1322
@@ -310,316 +1325,651 def attributes(maybe_cls=None, these=Non
310 1325 circumvent that limitation by using
311 1326 ``object.__setattr__(self, "attribute_name", value)``.
312 1327
313 .. _slots: https://docs.python.org/3.5/reference/datamodel.html#slots
314
315 .. versionadded:: 16.0.0 *slots*
316 .. versionadded:: 16.1.0 *frozen*
317 .. versionadded:: 16.3.0 *str*, and support for ``__attrs_post_init__``.
318 .. versionchanged::
319 17.1.0 *hash* supports ``None`` as value which is also the default
320 now.
1328 5. Subclasses of a frozen class are frozen too.
1329
1330 :param bool weakref_slot: Make instances weak-referenceable. This has no
1331 effect unless ``slots`` is also enabled.
1332 :param bool auto_attribs: If ``True``, collect :pep:`526`-annotated
1333 attributes (Python 3.6 and later only) from the class body.
1334
1335 In this case, you **must** annotate every field. If ``attrs``
1336 encounters a field that is set to an `attr.ib` but lacks a type
1337 annotation, an `attr.exceptions.UnannotatedAttributeError` is
1338 raised. Use ``field_name: typing.Any = attr.ib(...)`` if you don't
1339 want to set a type.
1340
1341 If you assign a value to those attributes (e.g. ``x: int = 42``), that
1342 value becomes the default value like if it were passed using
1343 ``attr.ib(default=42)``. Passing an instance of `attrs.Factory` also
1344 works as expected in most cases (see warning below).
1345
1346 Attributes annotated as `typing.ClassVar`, and attributes that are
1347 neither annotated nor set to an `attr.ib` are **ignored**.
1348
1349 .. warning::
1350 For features that use the attribute name to create decorators (e.g.
1351 `validators <validators>`), you still *must* assign `attr.ib` to
1352 them. Otherwise Python will either not find the name or try to use
1353 the default value to call e.g. ``validator`` on it.
1354
1355 These errors can be quite confusing and probably the most common bug
1356 report on our bug tracker.
1357
1358 :param bool kw_only: Make all attributes keyword-only (Python 3+)
1359 in the generated ``__init__`` (if ``init`` is ``False``, this
1360 parameter is ignored).
1361 :param bool cache_hash: Ensure that the object's hash code is computed
1362 only once and stored on the object. If this is set to ``True``,
1363 hashing must be either explicitly or implicitly enabled for this
1364 class. If the hash code is cached, avoid any reassignments of
1365 fields involved in hash code computation or mutations of the objects
1366 those fields point to after object creation. If such changes occur,
1367 the behavior of the object's hash code is undefined.
1368 :param bool auto_exc: If the class subclasses `BaseException`
1369 (which implicitly includes any subclass of any exception), the
1370 following happens to behave like a well-behaved Python exceptions
1371 class:
1372
1373 - the values for *eq*, *order*, and *hash* are ignored and the
1374 instances compare and hash by the instance's ids (N.B. ``attrs`` will
1375 *not* remove existing implementations of ``__hash__`` or the equality
1376 methods. It just won't add own ones.),
1377 - all attributes that are either passed into ``__init__`` or have a
1378 default value are additionally available as a tuple in the ``args``
1379 attribute,
1380 - the value of *str* is ignored leaving ``__str__`` to base classes.
1381 :param bool collect_by_mro: Setting this to `True` fixes the way ``attrs``
1382 collects attributes from base classes. The default behavior is
1383 incorrect in certain cases of multiple inheritance. It should be on by
1384 default but is kept off for backward-compatibility.
1385
1386 See issue `#428 <https://github.com/python-attrs/attrs/issues/428>`_ for
1387 more details.
1388
1389 :param Optional[bool] getstate_setstate:
1390 .. note::
1391 This is usually only interesting for slotted classes and you should
1392 probably just set *auto_detect* to `True`.
1393
1394 If `True`, ``__getstate__`` and
1395 ``__setstate__`` are generated and attached to the class. This is
1396 necessary for slotted classes to be pickleable. If left `None`, it's
1397 `True` by default for slotted classes and ``False`` for dict classes.
1398
1399 If *auto_detect* is `True`, and *getstate_setstate* is left `None`,
1400 and **either** ``__getstate__`` or ``__setstate__`` is detected directly
1401 on the class (i.e. not inherited), it is set to `False` (this is usually
1402 what you want).
1403
1404 :param on_setattr: A callable that is run whenever the user attempts to set
1405 an attribute (either by assignment like ``i.x = 42`` or by using
1406 `setattr` like ``setattr(i, "x", 42)``). It receives the same arguments
1407 as validators: the instance, the attribute that is being modified, and
1408 the new value.
1409
1410 If no exception is raised, the attribute is set to the return value of
1411 the callable.
1412
1413 If a list of callables is passed, they're automatically wrapped in an
1414 `attrs.setters.pipe`.
1415 :type on_setattr: `callable`, or a list of callables, or `None`, or
1416 `attrs.setters.NO_OP`
1417
1418 :param Optional[callable] field_transformer:
1419 A function that is called with the original class object and all
1420 fields right before ``attrs`` finalizes the class. You can use
1421 this, e.g., to automatically add converters or validators to
1422 fields based on their types. See `transform-fields` for more details.
1423
1424 :param bool match_args:
1425 If `True` (default), set ``__match_args__`` on the class to support
1426 :pep:`634` (Structural Pattern Matching). It is a tuple of all
1427 non-keyword-only ``__init__`` parameter names on Python 3.10 and later.
1428 Ignored on older Python versions.
1429
1430 .. versionadded:: 16.0.0 *slots*
1431 .. versionadded:: 16.1.0 *frozen*
1432 .. versionadded:: 16.3.0 *str*
1433 .. versionadded:: 16.3.0 Support for ``__attrs_post_init__``.
1434 .. versionchanged:: 17.1.0
1435 *hash* supports ``None`` as value which is also the default now.
1436 .. versionadded:: 17.3.0 *auto_attribs*
1437 .. versionchanged:: 18.1.0
1438 If *these* is passed, no attributes are deleted from the class body.
1439 .. versionchanged:: 18.1.0 If *these* is ordered, the order is retained.
1440 .. versionadded:: 18.2.0 *weakref_slot*
1441 .. deprecated:: 18.2.0
1442 ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now raise a
1443 `DeprecationWarning` if the classes compared are subclasses of
1444 each other. ``__eq`` and ``__ne__`` never tried to compared subclasses
1445 to each other.
1446 .. versionchanged:: 19.2.0
1447 ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now do not consider
1448 subclasses comparable anymore.
1449 .. versionadded:: 18.2.0 *kw_only*
1450 .. versionadded:: 18.2.0 *cache_hash*
1451 .. versionadded:: 19.1.0 *auto_exc*
1452 .. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01.
1453 .. versionadded:: 19.2.0 *eq* and *order*
1454 .. versionadded:: 20.1.0 *auto_detect*
1455 .. versionadded:: 20.1.0 *collect_by_mro*
1456 .. versionadded:: 20.1.0 *getstate_setstate*
1457 .. versionadded:: 20.1.0 *on_setattr*
1458 .. versionadded:: 20.3.0 *field_transformer*
1459 .. versionchanged:: 21.1.0
1460 ``init=False`` injects ``__attrs_init__``
1461 .. versionchanged:: 21.1.0 Support for ``__attrs_pre_init__``
1462 .. versionchanged:: 21.1.0 *cmp* undeprecated
1463 .. versionadded:: 21.3.0 *match_args*
321 1464 """
1465 eq_, order_ = _determine_attrs_eq_order(cmp, eq, order, None)
1466 hash_ = hash # work around the lack of nonlocal
1467
1468 if isinstance(on_setattr, (list, tuple)):
1469 on_setattr = setters.pipe(*on_setattr)
1470
322 1471 def wrap(cls):
323 if getattr(cls, "__class__", None) is None:
324 raise TypeError("attrs only works with new-style classes.")
325
326 if repr is False and str is True:
327 raise ValueError(
328 "__str__ can only be generated if a __repr__ exists."
329 )
330
331 if slots:
332 # Only need this later if we're using slots.
333 if these is None:
334 ca_list = [name
335 for name, attr
336 in cls.__dict__.items()
337 if isinstance(attr, _CountingAttr)]
338 else:
339 ca_list = list(iterkeys(these))
340 _transform_attrs(cls, these)
341
342 # Can't just re-use frozen name because Python's scoping. :(
343 # Can't compare function objects because Python 2 is terrible. :(
344 effectively_frozen = _has_frozen_superclass(cls) or frozen
345 if repr is True:
346 cls = _add_repr(cls, ns=repr_ns)
1472 is_frozen = frozen or _has_frozen_base_class(cls)
1473 is_exc = auto_exc is True and issubclass(cls, BaseException)
1474 has_own_setattr = auto_detect and _has_own_attribute(
1475 cls, "__setattr__"
1476 )
1477
1478 if has_own_setattr and is_frozen:
1479 raise ValueError("Can't freeze a class with a custom __setattr__.")
1480
1481 builder = _ClassBuilder(
1482 cls,
1483 these,
1484 slots,
1485 is_frozen,
1486 weakref_slot,
1487 _determine_whether_to_implement(
1488 cls,
1489 getstate_setstate,
1490 auto_detect,
1491 ("__getstate__", "__setstate__"),
1492 default=slots,
1493 ),
1494 auto_attribs,
1495 kw_only,
1496 cache_hash,
1497 is_exc,
1498 collect_by_mro,
1499 on_setattr,
1500 has_own_setattr,
1501 field_transformer,
1502 )
1503 if _determine_whether_to_implement(
1504 cls, repr, auto_detect, ("__repr__",)
1505 ):
1506 builder.add_repr(repr_ns)
347 1507 if str is True:
348 cls.__str__ = cls.__repr__
349 if cmp is True:
350 cls = _add_cmp(cls)
351
1508 builder.add_str()
1509
1510 eq = _determine_whether_to_implement(
1511 cls, eq_, auto_detect, ("__eq__", "__ne__")
1512 )
1513 if not is_exc and eq is True:
1514 builder.add_eq()
1515 if not is_exc and _determine_whether_to_implement(
1516 cls, order_, auto_detect, ("__lt__", "__le__", "__gt__", "__ge__")
1517 ):
1518 builder.add_order()
1519
1520 builder.add_setattr()
1521
1522 if (
1523 hash_ is None
1524 and auto_detect is True
1525 and _has_own_attribute(cls, "__hash__")
1526 ):
1527 hash = False
1528 else:
1529 hash = hash_
352 1530 if hash is not True and hash is not False and hash is not None:
1531 # Can't use `hash in` because 1 == True for example.
353 1532 raise TypeError(
354 1533 "Invalid value for hash. Must be True, False, or None."
355 1534 )
356 elif hash is False or (hash is None and cmp is False):
357 pass
358 elif hash is True or (hash is None and cmp is True and frozen is True):
359 cls = _add_hash(cls)
1535 elif hash is False or (hash is None and eq is False) or is_exc:
1536 # Don't do anything. Should fall back to __object__'s __hash__
1537 # which is by id.
1538 if cache_hash:
1539 raise TypeError(
1540 "Invalid value for cache_hash. To use hash caching,"
1541 " hashing must be either explicitly or implicitly "
1542 "enabled."
1543 )
1544 elif hash is True or (
1545 hash is None and eq is True and is_frozen is True
1546 ):
1547 # Build a __hash__ if told so, or if it's safe.
1548 builder.add_hash()
360 1549 else:
361 cls.__hash__ = None
362
363 if init is True:
364 cls = _add_init(cls, effectively_frozen)
365 if effectively_frozen is True:
366 cls.__setattr__ = _frozen_setattrs
367 cls.__delattr__ = _frozen_delattrs
368 if slots is True:
369 # slots and frozen require __getstate__/__setstate__ to work
370 cls = _add_pickle(cls)
371 if slots is True:
372 cls_dict = dict(cls.__dict__)
373 cls_dict["__slots__"] = tuple(ca_list)
374 for ca_name in ca_list:
375 # It might not actually be in there, e.g. if using 'these'.
376 cls_dict.pop(ca_name, None)
377 cls_dict.pop("__dict__", None)
378
379 qualname = getattr(cls, "__qualname__", None)
380 cls = type(cls)(cls.__name__, cls.__bases__, cls_dict)
381 if qualname is not None:
382 cls.__qualname__ = qualname
383
384 return cls
385
386 # attrs_or class type depends on the usage of the decorator. It's a class
387 # if it's used as `@attributes` but ``None`` if used # as `@attributes()`.
1550 # Raise TypeError on attempts to hash.
1551 if cache_hash:
1552 raise TypeError(
1553 "Invalid value for cache_hash. To use hash caching,"
1554 " hashing must be either explicitly or implicitly "
1555 "enabled."
1556 )
1557 builder.make_unhashable()
1558
1559 if _determine_whether_to_implement(
1560 cls, init, auto_detect, ("__init__",)
1561 ):
1562 builder.add_init()
1563 else:
1564 builder.add_attrs_init()
1565 if cache_hash:
1566 raise TypeError(
1567 "Invalid value for cache_hash. To use hash caching,"
1568 " init must be True."
1569 )
1570
1571 if (
1572 PY310
1573 and match_args
1574 and not _has_own_attribute(cls, "__match_args__")
1575 ):
1576 builder.add_match_args()
1577
1578 return builder.build_class()
1579
1580 # maybe_cls's type depends on the usage of the decorator. It's a class
1581 # if it's used as `@attrs` but ``None`` if used as `@attrs()`.
388 1582 if maybe_cls is None:
389 1583 return wrap
390 1584 else:
391 1585 return wrap(maybe_cls)
392 1586
393 1587
394 if PY2:
395 def _has_frozen_superclass(cls):
396 """
397 Check whether *cls* has a frozen ancestor by looking at its
398 __setattr__.
399 """
400 return (
401 getattr(
402 cls.__setattr__, "__module__", None
403 ) == _frozen_setattrs.__module__ and
404 cls.__setattr__.__name__ == _frozen_setattrs.__name__
1588 _attrs = attrs
1589 """
1590 Internal alias so we can use it in functions that take an argument called
1591 *attrs*.
1592 """
1593
1594
1595 def _has_frozen_base_class(cls):
1596 """
1597 Check whether *cls* has a frozen ancestor by looking at its
1598 __setattr__.
1599 """
1600 return cls.__setattr__ is _frozen_setattrs
1601
1602
1603 def _generate_unique_filename(cls, func_name):
1604 """
1605 Create a "filename" suitable for a function being generated.
1606 """
1607 unique_filename = "<attrs generated {} {}.{}>".format(
1608 func_name,
1609 cls.__module__,
1610 getattr(cls, "__qualname__", cls.__name__),
1611 )
1612 return unique_filename
1613
1614
1615 def _make_hash(cls, attrs, frozen, cache_hash):
1616 attrs = tuple(
1617 a for a in attrs if a.hash is True or (a.hash is None and a.eq is True)
1618 )
1619
1620 tab = " "
1621
1622 unique_filename = _generate_unique_filename(cls, "hash")
1623 type_hash = hash(unique_filename)
1624 # If eq is custom generated, we need to include the functions in globs
1625 globs = {}
1626
1627 hash_def = "def __hash__(self"
1628 hash_func = "hash(("
1629 closing_braces = "))"
1630 if not cache_hash:
1631 hash_def += "):"
1632 else:
1633 hash_def += ", *"
1634
1635 hash_def += (
1636 ", _cache_wrapper="
1637 + "__import__('attr._make')._make._CacheHashWrapper):"
405 1638 )
406 else:
407 def _has_frozen_superclass(cls):
1639 hash_func = "_cache_wrapper(" + hash_func
1640 closing_braces += ")"
1641
1642 method_lines = [hash_def]
1643
1644 def append_hash_computation_lines(prefix, indent):
408 1645 """
409 Check whether *cls* has a frozen ancestor by looking at its
410 __setattr__.
1646 Generate the code for actually computing the hash code.
1647 Below this will either be returned directly or used to compute
1648 a value which is then cached, depending on the value of cache_hash
411 1649 """
412 return cls.__setattr__ == _frozen_setattrs
413
414
415 def _attrs_to_tuple(obj, attrs):
416 """
417 Create a tuple of all values of *obj*'s *attrs*.
418 """
419 return tuple(getattr(obj, a.name) for a in attrs)
420
421
422 def _add_hash(cls, attrs=None):
1650
1651 method_lines.extend(
1652 [
1653 indent + prefix + hash_func,
1654 indent + " %d," % (type_hash,),
1655 ]
1656 )
1657
1658 for a in attrs:
1659 if a.eq_key:
1660 cmp_name = "_%s_key" % (a.name,)
1661 globs[cmp_name] = a.eq_key
1662 method_lines.append(
1663 indent + " %s(self.%s)," % (cmp_name, a.name)
1664 )
1665 else:
1666 method_lines.append(indent + " self.%s," % a.name)
1667
1668 method_lines.append(indent + " " + closing_braces)
1669
1670 if cache_hash:
1671 method_lines.append(tab + "if self.%s is None:" % _hash_cache_field)
1672 if frozen:
1673 append_hash_computation_lines(
1674 "object.__setattr__(self, '%s', " % _hash_cache_field, tab * 2
1675 )
1676 method_lines.append(tab * 2 + ")") # close __setattr__
1677 else:
1678 append_hash_computation_lines(
1679 "self.%s = " % _hash_cache_field, tab * 2
1680 )
1681 method_lines.append(tab + "return self.%s" % _hash_cache_field)
1682 else:
1683 append_hash_computation_lines("return ", tab)
1684
1685 script = "\n".join(method_lines)
1686 return _make_method("__hash__", script, unique_filename, globs)
1687
1688
1689 def _add_hash(cls, attrs):
423 1690 """
424 1691 Add a hash method to *cls*.
425 1692 """
426 if attrs is None:
427 attrs = [a
428 for a in cls.__attrs_attrs__
429 if a.hash is True or (a.hash is None and a.cmp is True)]
430
431 def hash_(self):
1693 cls.__hash__ = _make_hash(cls, attrs, frozen=False, cache_hash=False)
1694 return cls
1695
1696
1697 def _make_ne():
1698 """
1699 Create __ne__ method.
1700 """
1701
1702 def __ne__(self, other):
432 1703 """
433 Automatically created by attrs.
1704 Check equality and either forward a NotImplemented or
1705 return the result negated.
434 1706 """
435 return hash(_attrs_to_tuple(self, attrs))
436
437 cls.__hash__ = hash_
438 return cls
439
440
441 def _add_cmp(cls, attrs=None):
1707 result = self.__eq__(other)
1708 if result is NotImplemented:
1709 return NotImplemented
1710
1711 return not result
1712
1713 return __ne__
1714
1715
1716 def _make_eq(cls, attrs):
1717 """
1718 Create __eq__ method for *cls* with *attrs*.
442 1719 """
443 Add comparison methods to *cls*.
1720 attrs = [a for a in attrs if a.eq]
1721
1722 unique_filename = _generate_unique_filename(cls, "eq")
1723 lines = [
1724 "def __eq__(self, other):",
1725 " if other.__class__ is not self.__class__:",
1726 " return NotImplemented",
1727 ]
1728
1729 # We can't just do a big self.x = other.x and... clause due to
1730 # irregularities like nan == nan is false but (nan,) == (nan,) is true.
1731 globs = {}
1732 if attrs:
1733 lines.append(" return (")
1734 others = [" ) == ("]
1735 for a in attrs:
1736 if a.eq_key:
1737 cmp_name = "_%s_key" % (a.name,)
1738 # Add the key function to the global namespace
1739 # of the evaluated function.
1740 globs[cmp_name] = a.eq_key
1741 lines.append(
1742 " %s(self.%s),"
1743 % (
1744 cmp_name,
1745 a.name,
1746 )
1747 )
1748 others.append(
1749 " %s(other.%s),"
1750 % (
1751 cmp_name,
1752 a.name,
1753 )
1754 )
1755 else:
1756 lines.append(" self.%s," % (a.name,))
1757 others.append(" other.%s," % (a.name,))
1758
1759 lines += others + [" )"]
1760 else:
1761 lines.append(" return True")
1762
1763 script = "\n".join(lines)
1764
1765 return _make_method("__eq__", script, unique_filename, globs)
1766
1767
1768 def _make_order(cls, attrs):
444 1769 """
445 if attrs is None:
446 attrs = [a for a in cls.__attrs_attrs__ if a.cmp]
1770 Create ordering methods for *cls* with *attrs*.
1771 """
1772 attrs = [a for a in attrs if a.order]
447 1773
448 1774 def attrs_to_tuple(obj):
449 1775 """
450 1776 Save us some typing.
451 1777 """
452 return _attrs_to_tuple(obj, attrs)
453
454 def eq(self, other):
1778 return tuple(
1779 key(value) if key else value
1780 for value, key in (
1781 (getattr(obj, a.name), a.order_key) for a in attrs
1782 )
1783 )
1784
1785 def __lt__(self, other):
1786 """
1787 Automatically created by attrs.
1788 """
1789 if other.__class__ is self.__class__:
1790 return attrs_to_tuple(self) < attrs_to_tuple(other)
1791
1792 return NotImplemented
1793
1794 def __le__(self, other):
1795 """
1796 Automatically created by attrs.
1797 """
1798 if other.__class__ is self.__class__:
1799 return attrs_to_tuple(self) <= attrs_to_tuple(other)
1800
1801 return NotImplemented
1802
1803 def __gt__(self, other):
1804 """
1805 Automatically created by attrs.
1806 """
1807 if other.__class__ is self.__class__:
1808 return attrs_to_tuple(self) > attrs_to_tuple(other)
1809
1810 return NotImplemented
1811
1812 def __ge__(self, other):
455 1813 """
456 1814 Automatically created by attrs.
457 1815 """
458 1816 if other.__class__ is self.__class__:
459 return attrs_to_tuple(self) == attrs_to_tuple(other)
460 else:
461 return NotImplemented
462
463 def ne(self, other):
464 """
465 Automatically created by attrs.
466 """
467 result = eq(self, other)
468 if result is NotImplemented:
469 return NotImplemented
470 else:
471 return not result
472
473 def lt(self, other):
474 """
475 Automatically created by attrs.
476 """
477 if isinstance(other, self.__class__):
478 return attrs_to_tuple(self) < attrs_to_tuple(other)
479 else:
480 return NotImplemented
481
482 def le(self, other):
483 """
484 Automatically created by attrs.
485 """
486 if isinstance(other, self.__class__):
487 return attrs_to_tuple(self) <= attrs_to_tuple(other)
488 else:
489 return NotImplemented
490
491 def gt(self, other):
492 """
493 Automatically created by attrs.
494 """
495 if isinstance(other, self.__class__):
496 return attrs_to_tuple(self) > attrs_to_tuple(other)
497 else:
498 return NotImplemented
499
500 def ge(self, other):
501 """
502 Automatically created by attrs.
503 """
504 if isinstance(other, self.__class__):
505 1817 return attrs_to_tuple(self) >= attrs_to_tuple(other)
506 else:
507 return NotImplemented
508
509 cls.__eq__ = eq
510 cls.__ne__ = ne
511 cls.__lt__ = lt
512 cls.__le__ = le
513 cls.__gt__ = gt
514 cls.__ge__ = ge
1818
1819 return NotImplemented
1820
1821 return __lt__, __le__, __gt__, __ge__
1822
1823
1824 def _add_eq(cls, attrs=None):
1825 """
1826 Add equality methods to *cls* with *attrs*.
1827 """
1828 if attrs is None:
1829 attrs = cls.__attrs_attrs__
1830
1831 cls.__eq__ = _make_eq(cls, attrs)
1832 cls.__ne__ = _make_ne()
515 1833
516 1834 return cls
517 1835
518 1836
1837 if HAS_F_STRINGS:
1838
1839 def _make_repr(attrs, ns, cls):
1840 unique_filename = _generate_unique_filename(cls, "repr")
1841 # Figure out which attributes to include, and which function to use to
1842 # format them. The a.repr value can be either bool or a custom
1843 # callable.
1844 attr_names_with_reprs = tuple(
1845 (a.name, (repr if a.repr is True else a.repr), a.init)
1846 for a in attrs
1847 if a.repr is not False
1848 )
1849 globs = {
1850 name + "_repr": r
1851 for name, r, _ in attr_names_with_reprs
1852 if r != repr
1853 }
1854 globs["_compat"] = _compat
1855 globs["AttributeError"] = AttributeError
1856 globs["NOTHING"] = NOTHING
1857 attribute_fragments = []
1858 for name, r, i in attr_names_with_reprs:
1859 accessor = (
1860 "self." + name
1861 if i
1862 else 'getattr(self, "' + name + '", NOTHING)'
1863 )
1864 fragment = (
1865 "%s={%s!r}" % (name, accessor)
1866 if r == repr
1867 else "%s={%s_repr(%s)}" % (name, name, accessor)
1868 )
1869 attribute_fragments.append(fragment)
1870 repr_fragment = ", ".join(attribute_fragments)
1871
1872 if ns is None:
1873 cls_name_fragment = (
1874 '{self.__class__.__qualname__.rsplit(">.", 1)[-1]}'
1875 )
1876 else:
1877 cls_name_fragment = ns + ".{self.__class__.__name__}"
1878
1879 lines = [
1880 "def __repr__(self):",
1881 " try:",
1882 " already_repring = _compat.repr_context.already_repring",
1883 " except AttributeError:",
1884 " already_repring = {id(self),}",
1885 " _compat.repr_context.already_repring = already_repring",
1886 " else:",
1887 " if id(self) in already_repring:",
1888 " return '...'",
1889 " else:",
1890 " already_repring.add(id(self))",
1891 " try:",
1892 " return f'%s(%s)'" % (cls_name_fragment, repr_fragment),
1893 " finally:",
1894 " already_repring.remove(id(self))",
1895 ]
1896
1897 return _make_method(
1898 "__repr__", "\n".join(lines), unique_filename, globs=globs
1899 )
1900
1901 else:
1902
1903 def _make_repr(attrs, ns, _):
1904 """
1905 Make a repr method that includes relevant *attrs*, adding *ns* to the
1906 full name.
1907 """
1908
1909 # Figure out which attributes to include, and which function to use to
1910 # format them. The a.repr value can be either bool or a custom
1911 # callable.
1912 attr_names_with_reprs = tuple(
1913 (a.name, repr if a.repr is True else a.repr)
1914 for a in attrs
1915 if a.repr is not False
1916 )
1917
1918 def __repr__(self):
1919 """
1920 Automatically created by attrs.
1921 """
1922 try:
1923 already_repring = _compat.repr_context.already_repring
1924 except AttributeError:
1925 already_repring = set()
1926 _compat.repr_context.already_repring = already_repring
1927
1928 if id(self) in already_repring:
1929 return "..."
1930 real_cls = self.__class__
1931 if ns is None:
1932 class_name = real_cls.__qualname__.rsplit(">.", 1)[-1]
1933 else:
1934 class_name = ns + "." + real_cls.__name__
1935
1936 # Since 'self' remains on the stack (i.e.: strongly referenced)
1937 # for the duration of this call, it's safe to depend on id(...)
1938 # stability, and not need to track the instance and therefore
1939 # worry about properties like weakref- or hash-ability.
1940 already_repring.add(id(self))
1941 try:
1942 result = [class_name, "("]
1943 first = True
1944 for name, attr_repr in attr_names_with_reprs:
1945 if first:
1946 first = False
1947 else:
1948 result.append(", ")
1949 result.extend(
1950 (name, "=", attr_repr(getattr(self, name, NOTHING)))
1951 )
1952 return "".join(result) + ")"
1953 finally:
1954 already_repring.remove(id(self))
1955
1956 return __repr__
1957
1958
519 1959 def _add_repr(cls, ns=None, attrs=None):
520 1960 """
521 1961 Add a repr method to *cls*.
522 1962 """
523 1963 if attrs is None:
524 attrs = [a for a in cls.__attrs_attrs__ if a.repr]
525
526 def repr_(self):
527 """
528 Automatically created by attrs.
529 """
530 real_cls = self.__class__
531 if ns is None:
532 qualname = getattr(real_cls, "__qualname__", None)
533 if qualname is not None:
534 class_name = qualname.rsplit(">.", 1)[-1]
535 else:
536 class_name = real_cls.__name__
537 else:
538 class_name = ns + "." + real_cls.__name__
539
540 return "{0}({1})".format(
541 class_name,
542 ", ".join(a.name + "=" + repr(getattr(self, a.name))
543 for a in attrs)
544 )
545 cls.__repr__ = repr_
546 return cls
547
548
549 def _add_init(cls, frozen):
550 """
551 Add a __init__ method to *cls*. If *frozen* is True, make it immutable.
552 """
553 attrs = [a for a in cls.__attrs_attrs__
554 if a.init or a.default is not NOTHING]
555
556 # We cache the generated init methods for the same kinds of attributes.
557 sha1 = hashlib.sha1()
558 r = repr(attrs)
559 if not isinstance(r, bytes):
560 r = r.encode('utf-8')
561 sha1.update(r)
562 unique_filename = "<attrs generated init {0}>".format(
563 sha1.hexdigest()
564 )
565
566 script, globs = _attrs_to_script(
567 attrs,
568 frozen,
569 getattr(cls, "__attrs_post_init__", False),
570 )
571 locs = {}
572 bytecode = compile(script, unique_filename, "exec")
573 attr_dict = dict((a.name, a) for a in attrs)
574 globs.update({
575 "NOTHING": NOTHING,
576 "attr_dict": attr_dict,
577 })
578 if frozen is True:
579 # Save the lookup overhead in __init__ if we need to circumvent
580 # immutability.
581 globs["_cached_setattr"] = _obj_setattr
582 eval(bytecode, globs, locs)
583 init = locs["__init__"]
584
585 # In order of debuggers like PDB being able to step through the code,
586 # we add a fake linecache entry.
587 linecache.cache[unique_filename] = (
588 len(script),
589 None,
590 script.splitlines(True),
591 unique_filename
592 )
593 cls.__init__ = init
594 return cls
595
596
597 def _add_pickle(cls):
598 """
599 Add pickle helpers, needed for frozen and slotted classes
600 """
601 def _slots_getstate__(obj):
602 """
603 Play nice with pickle.
604 """
605 return tuple(getattr(obj, a.name) for a in fields(obj.__class__))
606
607 def _slots_setstate__(obj, state):
608 """
609 Play nice with pickle.
610 """
611 __bound_setattr = _obj_setattr.__get__(obj, Attribute)
612 for a, value in zip(fields(obj.__class__), state):
613 __bound_setattr(a.name, value)
614
615 cls.__getstate__ = _slots_getstate__
616 cls.__setstate__ = _slots_setstate__
1964 attrs = cls.__attrs_attrs__
1965
1966 cls.__repr__ = _make_repr(attrs, ns, cls)
617 1967 return cls
618 1968
619 1969
620 1970 def fields(cls):
621 1971 """
622 Returns the tuple of ``attrs`` attributes for a class.
1972 Return the tuple of ``attrs`` attributes for a class.
623 1973
624 1974 The tuple also allows accessing the fields by their names (see below for
625 1975 examples).
@@ -630,12 +1980,12 def fields(cls):
630 1980 :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
631 1981 class.
632 1982
633 :rtype: tuple (with name accesors) of :class:`attr.Attribute`
1983 :rtype: tuple (with name accessors) of `attrs.Attribute`
634 1984
635 1985 .. versionchanged:: 16.2.0 Returned tuple allows accessing the fields
636 1986 by name.
637 1987 """
638 if not isclass(cls):
1988 if not isinstance(cls, type):
639 1989 raise TypeError("Passed object must be a class.")
640 1990 attrs = getattr(cls, "__attrs_attrs__", None)
641 1991 if attrs is None:
@@ -645,6 +1995,34 def fields(cls):
645 1995 return attrs
646 1996
647 1997
1998 def fields_dict(cls):
1999 """
2000 Return an ordered dictionary of ``attrs`` attributes for a class, whose
2001 keys are the attribute names.
2002
2003 :param type cls: Class to introspect.
2004
2005 :raise TypeError: If *cls* is not a class.
2006 :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
2007 class.
2008
2009 :rtype: an ordered dict where keys are attribute names and values are
2010 `attrs.Attribute`\\ s. This will be a `dict` if it's
2011 naturally ordered like on Python 3.6+ or an
2012 :class:`~collections.OrderedDict` otherwise.
2013
2014 .. versionadded:: 18.1.0
2015 """
2016 if not isinstance(cls, type):
2017 raise TypeError("Passed object must be a class.")
2018 attrs = getattr(cls, "__attrs_attrs__", None)
2019 if attrs is None:
2020 raise NotAnAttrsClassError(
2021 "{cls!r} is not an attrs-decorated class.".format(cls=cls)
2022 )
2023 return ordered_dict((a.name, a) for a in attrs)
2024
2025
648 2026 def validate(inst):
649 2027 """
650 2028 Validate all attributes on *inst* that have a validator.
@@ -662,240 +2040,623 def validate(inst):
662 2040 v(inst, a, getattr(inst, a.name))
663 2041
664 2042
665 def _attrs_to_script(attrs, frozen, post_init):
2043 def _is_slot_cls(cls):
2044 return "__slots__" in cls.__dict__
2045
2046
2047 def _is_slot_attr(a_name, base_attr_map):
2048 """
2049 Check if the attribute name comes from a slot class.
2050 """
2051 return a_name in base_attr_map and _is_slot_cls(base_attr_map[a_name])
2052
2053
2054 def _make_init(
2055 cls,
2056 attrs,
2057 pre_init,
2058 post_init,
2059 frozen,
2060 slots,
2061 cache_hash,
2062 base_attr_map,
2063 is_exc,
2064 cls_on_setattr,
2065 attrs_init,
2066 ):
2067 has_cls_on_setattr = (
2068 cls_on_setattr is not None and cls_on_setattr is not setters.NO_OP
2069 )
2070
2071 if frozen and has_cls_on_setattr:
2072 raise ValueError("Frozen classes can't use on_setattr.")
2073
2074 needs_cached_setattr = cache_hash or frozen
2075 filtered_attrs = []
2076 attr_dict = {}
2077 for a in attrs:
2078 if not a.init and a.default is NOTHING:
2079 continue
2080
2081 filtered_attrs.append(a)
2082 attr_dict[a.name] = a
2083
2084 if a.on_setattr is not None:
2085 if frozen is True:
2086 raise ValueError("Frozen classes can't use on_setattr.")
2087
2088 needs_cached_setattr = True
2089 elif has_cls_on_setattr and a.on_setattr is not setters.NO_OP:
2090 needs_cached_setattr = True
2091
2092 unique_filename = _generate_unique_filename(cls, "init")
2093
2094 script, globs, annotations = _attrs_to_init_script(
2095 filtered_attrs,
2096 frozen,
2097 slots,
2098 pre_init,
2099 post_init,
2100 cache_hash,
2101 base_attr_map,
2102 is_exc,
2103 has_cls_on_setattr,
2104 attrs_init,
2105 )
2106 if cls.__module__ in sys.modules:
2107 # This makes typing.get_type_hints(CLS.__init__) resolve string types.
2108 globs.update(sys.modules[cls.__module__].__dict__)
2109
2110 globs.update({"NOTHING": NOTHING, "attr_dict": attr_dict})
2111
2112 if needs_cached_setattr:
2113 # Save the lookup overhead in __init__ if we need to circumvent
2114 # setattr hooks.
2115 globs["_setattr"] = _obj_setattr
2116
2117 init = _make_method(
2118 "__attrs_init__" if attrs_init else "__init__",
2119 script,
2120 unique_filename,
2121 globs,
2122 )
2123 init.__annotations__ = annotations
2124
2125 return init
2126
2127
2128 def _setattr(attr_name, value_var, has_on_setattr):
2129 """
2130 Use the cached object.setattr to set *attr_name* to *value_var*.
2131 """
2132 return "_setattr(self, '%s', %s)" % (attr_name, value_var)
2133
2134
2135 def _setattr_with_converter(attr_name, value_var, has_on_setattr):
2136 """
2137 Use the cached object.setattr to set *attr_name* to *value_var*, but run
2138 its converter first.
2139 """
2140 return "_setattr(self, '%s', %s(%s))" % (
2141 attr_name,
2142 _init_converter_pat % (attr_name,),
2143 value_var,
2144 )
2145
2146
2147 def _assign(attr_name, value, has_on_setattr):
2148 """
2149 Unless *attr_name* has an on_setattr hook, use normal assignment. Otherwise
2150 relegate to _setattr.
2151 """
2152 if has_on_setattr:
2153 return _setattr(attr_name, value, True)
2154
2155 return "self.%s = %s" % (attr_name, value)
2156
2157
2158 def _assign_with_converter(attr_name, value_var, has_on_setattr):
2159 """
2160 Unless *attr_name* has an on_setattr hook, use normal assignment after
2161 conversion. Otherwise relegate to _setattr_with_converter.
2162 """
2163 if has_on_setattr:
2164 return _setattr_with_converter(attr_name, value_var, True)
2165
2166 return "self.%s = %s(%s)" % (
2167 attr_name,
2168 _init_converter_pat % (attr_name,),
2169 value_var,
2170 )
2171
2172
2173 def _attrs_to_init_script(
2174 attrs,
2175 frozen,
2176 slots,
2177 pre_init,
2178 post_init,
2179 cache_hash,
2180 base_attr_map,
2181 is_exc,
2182 has_cls_on_setattr,
2183 attrs_init,
2184 ):
666 2185 """
667 2186 Return a script of an initializer for *attrs* and a dict of globals.
668 2187
669 2188 The globals are expected by the generated script.
670 2189
671 If *frozen* is True, we cannot set the attributes directly so we use
2190 If *frozen* is True, we cannot set the attributes directly so we use
672 2191 a cached ``object.__setattr__``.
673 2192 """
674 2193 lines = []
2194 if pre_init:
2195 lines.append("self.__attrs_pre_init__()")
2196
675 2197 if frozen is True:
676 lines.append(
677 # Circumvent the __setattr__ descriptor to save one lookup per
678 # assignment.
679 "_setattr = _cached_setattr.__get__(self, self.__class__)"
680 )
681
682 def fmt_setter(attr_name, value_var):
683 return "_setattr('%(attr_name)s', %(value_var)s)" % {
684 "attr_name": attr_name,
685 "value_var": value_var,
686 }
687
688 def fmt_setter_with_converter(attr_name, value_var):
689 conv_name = _init_convert_pat.format(attr_name)
690 return "_setattr('%(attr_name)s', %(conv)s(%(value_var)s))" % {
691 "attr_name": attr_name,
692 "value_var": value_var,
693 "conv": conv_name,
694 }
2198 if slots is True:
2199 fmt_setter = _setattr
2200 fmt_setter_with_converter = _setattr_with_converter
2201 else:
2202 # Dict frozen classes assign directly to __dict__.
2203 # But only if the attribute doesn't come from an ancestor slot
2204 # class.
2205 # Note _inst_dict will be used again below if cache_hash is True
2206 lines.append("_inst_dict = self.__dict__")
2207
2208 def fmt_setter(attr_name, value_var, has_on_setattr):
2209 if _is_slot_attr(attr_name, base_attr_map):
2210 return _setattr(attr_name, value_var, has_on_setattr)
2211
2212 return "_inst_dict['%s'] = %s" % (attr_name, value_var)
2213
2214 def fmt_setter_with_converter(
2215 attr_name, value_var, has_on_setattr
2216 ):
2217 if has_on_setattr or _is_slot_attr(attr_name, base_attr_map):
2218 return _setattr_with_converter(
2219 attr_name, value_var, has_on_setattr
2220 )
2221
2222 return "_inst_dict['%s'] = %s(%s)" % (
2223 attr_name,
2224 _init_converter_pat % (attr_name,),
2225 value_var,
2226 )
2227
695 2228 else:
696 def fmt_setter(attr_name, value):
697 return "self.%(attr_name)s = %(value)s" % {
698 "attr_name": attr_name,
699 "value": value,
700 }
701
702 def fmt_setter_with_converter(attr_name, value_var):
703 conv_name = _init_convert_pat.format(attr_name)
704 return "self.%(attr_name)s = %(conv)s(%(value_var)s)" % {
705 "attr_name": attr_name,
706 "value_var": value_var,
707 "conv": conv_name,
708 }
2229 # Not frozen.
2230 fmt_setter = _assign
2231 fmt_setter_with_converter = _assign_with_converter
709 2232
710 2233 args = []
2234 kw_only_args = []
711 2235 attrs_to_validate = []
712 2236
713 2237 # This is a dictionary of names to validator and converter callables.
714 2238 # Injecting this into __init__ globals lets us avoid lookups.
715 2239 names_for_globals = {}
2240 annotations = {"return": None}
716 2241
717 2242 for a in attrs:
718 2243 if a.validator:
719 2244 attrs_to_validate.append(a)
2245
720 2246 attr_name = a.name
2247 has_on_setattr = a.on_setattr is not None or (
2248 a.on_setattr is not setters.NO_OP and has_cls_on_setattr
2249 )
721 2250 arg_name = a.name.lstrip("_")
2251
722 2252 has_factory = isinstance(a.default, Factory)
723 2253 if has_factory and a.default.takes_self:
724 2254 maybe_self = "self"
725 2255 else:
726 2256 maybe_self = ""
2257
727 2258 if a.init is False:
728 2259 if has_factory:
729 2260 init_factory_name = _init_factory_pat.format(a.name)
730 if a.convert is not None:
731 lines.append(fmt_setter_with_converter(
732 attr_name,
733 init_factory_name + "({0})".format(maybe_self)))
734 conv_name = _init_convert_pat.format(a.name)
735 names_for_globals[conv_name] = a.convert
2261 if a.converter is not None:
2262 lines.append(
2263 fmt_setter_with_converter(
2264 attr_name,
2265 init_factory_name + "(%s)" % (maybe_self,),
2266 has_on_setattr,
2267 )
2268 )
2269 conv_name = _init_converter_pat % (a.name,)
2270 names_for_globals[conv_name] = a.converter
736 2271 else:
737 lines.append(fmt_setter(
738 attr_name,
739 init_factory_name + "({0})".format(maybe_self)
740 ))
2272 lines.append(
2273 fmt_setter(
2274 attr_name,
2275 init_factory_name + "(%s)" % (maybe_self,),
2276 has_on_setattr,
2277 )
2278 )
741 2279 names_for_globals[init_factory_name] = a.default.factory
742 2280 else:
743 if a.convert is not None:
744 lines.append(fmt_setter_with_converter(
745 attr_name,
746 "attr_dict['{attr_name}'].default"
747 .format(attr_name=attr_name)
748 ))
749 conv_name = _init_convert_pat.format(a.name)
750 names_for_globals[conv_name] = a.convert
2281 if a.converter is not None:
2282 lines.append(
2283 fmt_setter_with_converter(
2284 attr_name,
2285 "attr_dict['%s'].default" % (attr_name,),
2286 has_on_setattr,
2287 )
2288 )
2289 conv_name = _init_converter_pat % (a.name,)
2290 names_for_globals[conv_name] = a.converter
751 2291 else:
752 lines.append(fmt_setter(
753 attr_name,
754 "attr_dict['{attr_name}'].default"
755 .format(attr_name=attr_name)
756 ))
2292 lines.append(
2293 fmt_setter(
2294 attr_name,
2295 "attr_dict['%s'].default" % (attr_name,),
2296 has_on_setattr,
2297 )
2298 )
757 2299 elif a.default is not NOTHING and not has_factory:
758 args.append(
759 "{arg_name}=attr_dict['{attr_name}'].default".format(
760 arg_name=arg_name,
761 attr_name=attr_name,
2300 arg = "%s=attr_dict['%s'].default" % (arg_name, attr_name)
2301 if a.kw_only:
2302 kw_only_args.append(arg)
2303 else:
2304 args.append(arg)
2305
2306 if a.converter is not None:
2307 lines.append(
2308 fmt_setter_with_converter(
2309 attr_name, arg_name, has_on_setattr
2310 )
762 2311 )
763 )
764 if a.convert is not None:
765 lines.append(fmt_setter_with_converter(attr_name, arg_name))
766 names_for_globals[_init_convert_pat.format(a.name)] = a.convert
2312 names_for_globals[
2313 _init_converter_pat % (a.name,)
2314 ] = a.converter
767 2315 else:
768 lines.append(fmt_setter(attr_name, arg_name))
2316 lines.append(fmt_setter(attr_name, arg_name, has_on_setattr))
2317
769 2318 elif has_factory:
770 args.append("{arg_name}=NOTHING".format(arg_name=arg_name))
771 lines.append("if {arg_name} is not NOTHING:"
772 .format(arg_name=arg_name))
2319 arg = "%s=NOTHING" % (arg_name,)
2320 if a.kw_only:
2321 kw_only_args.append(arg)
2322 else:
2323 args.append(arg)
2324 lines.append("if %s is not NOTHING:" % (arg_name,))
2325
773 2326 init_factory_name = _init_factory_pat.format(a.name)
774 if a.convert is not None:
775 lines.append(" " + fmt_setter_with_converter(attr_name,
776 arg_name))
2327 if a.converter is not None:
2328 lines.append(
2329 " "
2330 + fmt_setter_with_converter(
2331 attr_name, arg_name, has_on_setattr
2332 )
2333 )
777 2334 lines.append("else:")
778 lines.append(" " + fmt_setter_with_converter(
779 attr_name,
780 init_factory_name + "({0})".format(maybe_self)
781 ))
782 names_for_globals[_init_convert_pat.format(a.name)] = a.convert
2335 lines.append(
2336 " "
2337 + fmt_setter_with_converter(
2338 attr_name,
2339 init_factory_name + "(" + maybe_self + ")",
2340 has_on_setattr,
2341 )
2342 )
2343 names_for_globals[
2344 _init_converter_pat % (a.name,)
2345 ] = a.converter
783 2346 else:
784 lines.append(" " + fmt_setter(attr_name, arg_name))
2347 lines.append(
2348 " " + fmt_setter(attr_name, arg_name, has_on_setattr)
2349 )
785 2350 lines.append("else:")
786 lines.append(" " + fmt_setter(
787 attr_name,
788 init_factory_name + "({0})".format(maybe_self)
789 ))
2351 lines.append(
2352 " "
2353 + fmt_setter(
2354 attr_name,
2355 init_factory_name + "(" + maybe_self + ")",
2356 has_on_setattr,
2357 )
2358 )
790 2359 names_for_globals[init_factory_name] = a.default.factory
791 2360 else:
792 args.append(arg_name)
793 if a.convert is not None:
794 lines.append(fmt_setter_with_converter(attr_name, arg_name))
795 names_for_globals[_init_convert_pat.format(a.name)] = a.convert
2361 if a.kw_only:
2362 kw_only_args.append(arg_name)
796 2363 else:
797 lines.append(fmt_setter(attr_name, arg_name))
2364 args.append(arg_name)
2365
2366 if a.converter is not None:
2367 lines.append(
2368 fmt_setter_with_converter(
2369 attr_name, arg_name, has_on_setattr
2370 )
2371 )
2372 names_for_globals[
2373 _init_converter_pat % (a.name,)
2374 ] = a.converter
2375 else:
2376 lines.append(fmt_setter(attr_name, arg_name, has_on_setattr))
2377
2378 if a.init is True:
2379 if a.type is not None and a.converter is None:
2380 annotations[arg_name] = a.type
2381 elif a.converter is not None:
2382 # Try to get the type from the converter.
2383 t = _AnnotationExtractor(a.converter).get_first_param_type()
2384 if t:
2385 annotations[arg_name] = t
798 2386
799 2387 if attrs_to_validate: # we can skip this if there are no validators.
800 2388 names_for_globals["_config"] = _config
801 2389 lines.append("if _config._run_validators is True:")
802 2390 for a in attrs_to_validate:
803 val_name = "__attr_validator_{}".format(a.name)
804 attr_name = "__attr_{}".format(a.name)
805 lines.append(" {}(self, {}, self.{})".format(
806 val_name, attr_name, a.name))
2391 val_name = "__attr_validator_" + a.name
2392 attr_name = "__attr_" + a.name
2393 lines.append(
2394 " %s(self, %s, self.%s)" % (val_name, attr_name, a.name)
2395 )
807 2396 names_for_globals[val_name] = a.validator
808 2397 names_for_globals[attr_name] = a
2398
809 2399 if post_init:
810 2400 lines.append("self.__attrs_post_init__()")
811 2401
812 return """\
813 def __init__(self, {args}):
2402 # because this is set only after __attrs_post_init__ is called, a crash
2403 # will result if post-init tries to access the hash code. This seemed
2404 # preferable to setting this beforehand, in which case alteration to
2405 # field values during post-init combined with post-init accessing the
2406 # hash code would result in silent bugs.
2407 if cache_hash:
2408 if frozen:
2409 if slots:
2410 # if frozen and slots, then _setattr defined above
2411 init_hash_cache = "_setattr(self, '%s', %s)"
2412 else:
2413 # if frozen and not slots, then _inst_dict defined above
2414 init_hash_cache = "_inst_dict['%s'] = %s"
2415 else:
2416 init_hash_cache = "self.%s = %s"
2417 lines.append(init_hash_cache % (_hash_cache_field, "None"))
2418
2419 # For exceptions we rely on BaseException.__init__ for proper
2420 # initialization.
2421 if is_exc:
2422 vals = ",".join("self." + a.name for a in attrs if a.init)
2423
2424 lines.append("BaseException.__init__(self, %s)" % (vals,))
2425
2426 args = ", ".join(args)
2427 if kw_only_args:
2428 args += "%s*, %s" % (
2429 ", " if args else "", # leading comma
2430 ", ".join(kw_only_args), # kw_only args
2431 )
2432 return (
2433 """\
2434 def {init_name}(self, {args}):
814 2435 {lines}
815 2436 """.format(
816 args=", ".join(args),
817 lines="\n ".join(lines) if lines else "pass",
818 ), names_for_globals
819
820
821 class Attribute(object):
2437 init_name=("__attrs_init__" if attrs_init else "__init__"),
2438 args=args,
2439 lines="\n ".join(lines) if lines else "pass",
2440 ),
2441 names_for_globals,
2442 annotations,
2443 )
2444
2445
2446 class Attribute:
822 2447 """
823 2448 *Read-only* representation of an attribute.
824 2449
825 :attribute name: The name of the attribute.
826
827 Plus *all* arguments of :func:`attr.ib`.
2450 The class has *all* arguments of `attr.ib` (except for ``factory``
2451 which is only syntactic sugar for ``default=Factory(...)`` plus the
2452 following:
2453
2454 - ``name`` (`str`): The name of the attribute.
2455 - ``inherited`` (`bool`): Whether or not that attribute has been inherited
2456 from a base class.
2457 - ``eq_key`` and ``order_key`` (`typing.Callable` or `None`): The callables
2458 that are used for comparing and ordering objects by this attribute,
2459 respectively. These are set by passing a callable to `attr.ib`'s ``eq``,
2460 ``order``, or ``cmp`` arguments. See also :ref:`comparison customization
2461 <custom-comparison>`.
2462
2463 Instances of this class are frequently used for introspection purposes
2464 like:
2465
2466 - `fields` returns a tuple of them.
2467 - Validators get them passed as the first argument.
2468 - The :ref:`field transformer <transform-fields>` hook receives a list of
2469 them.
2470
2471 .. versionadded:: 20.1.0 *inherited*
2472 .. versionadded:: 20.1.0 *on_setattr*
2473 .. versionchanged:: 20.2.0 *inherited* is not taken into account for
2474 equality checks and hashing anymore.
2475 .. versionadded:: 21.1.0 *eq_key* and *order_key*
2476
2477 For the full version history of the fields, see `attr.ib`.
828 2478 """
2479
829 2480 __slots__ = (
830 "name", "default", "validator", "repr", "cmp", "hash", "init",
831 "convert", "metadata",
2481 "name",
2482 "default",
2483 "validator",
2484 "repr",
2485 "eq",
2486 "eq_key",
2487 "order",
2488 "order_key",
2489 "hash",
2490 "init",
2491 "metadata",
2492 "type",
2493 "converter",
2494 "kw_only",
2495 "inherited",
2496 "on_setattr",
832 2497 )
833 2498
834 def __init__(self, name, default, validator, repr, cmp, hash, init,
835 convert=None, metadata=None):
2499 def __init__(
2500 self,
2501 name,
2502 default,
2503 validator,
2504 repr,
2505 cmp, # XXX: unused, remove along with other cmp code.
2506 hash,
2507 init,
2508 inherited,
2509 metadata=None,
2510 type=None,
2511 converter=None,
2512 kw_only=False,
2513 eq=None,
2514 eq_key=None,
2515 order=None,
2516 order_key=None,
2517 on_setattr=None,
2518 ):
2519 eq, eq_key, order, order_key = _determine_attrib_eq_order(
2520 cmp, eq_key or eq, order_key or order, True
2521 )
2522
836 2523 # Cache this descriptor here to speed things up later.
837 2524 bound_setattr = _obj_setattr.__get__(self, Attribute)
838 2525
2526 # Despite the big red warning, people *do* instantiate `Attribute`
2527 # themselves.
839 2528 bound_setattr("name", name)
840 2529 bound_setattr("default", default)
841 2530 bound_setattr("validator", validator)
842 2531 bound_setattr("repr", repr)
843 bound_setattr("cmp", cmp)
2532 bound_setattr("eq", eq)
2533 bound_setattr("eq_key", eq_key)
2534 bound_setattr("order", order)
2535 bound_setattr("order_key", order_key)
844 2536 bound_setattr("hash", hash)
845 2537 bound_setattr("init", init)
846 bound_setattr("convert", convert)
847 bound_setattr("metadata", (metadata_proxy(metadata) if metadata
848 else _empty_metadata_singleton))
2538 bound_setattr("converter", converter)
2539 bound_setattr(
2540 "metadata",
2541 (
2542 types.MappingProxyType(dict(metadata)) # Shallow copy
2543 if metadata
2544 else _empty_metadata_singleton
2545 ),
2546 )
2547 bound_setattr("type", type)
2548 bound_setattr("kw_only", kw_only)
2549 bound_setattr("inherited", inherited)
2550 bound_setattr("on_setattr", on_setattr)
849 2551
850 2552 def __setattr__(self, name, value):
851 2553 raise FrozenInstanceError()
852 2554
853 2555 @classmethod
854 def from_counting_attr(cls, name, ca):
2556 def from_counting_attr(cls, name, ca, type=None):
2557 # type holds the annotated value. deal with conflicts:
2558 if type is None:
2559 type = ca.type
2560 elif ca.type is not None:
2561 raise ValueError(
2562 "Type annotation and type argument cannot both be present"
2563 )
855 2564 inst_dict = {
856 2565 k: getattr(ca, k)
857 for k
858 in Attribute.__slots__
859 if k not in (
860 "name", "validator", "default",
861 ) # exclude methods
2566 for k in Attribute.__slots__
2567 if k
2568 not in (
2569 "name",
2570 "validator",
2571 "default",
2572 "type",
2573 "inherited",
2574 ) # exclude methods and deprecated alias
862 2575 }
863 return cls(name=name, validator=ca._validator, default=ca._default,
864 **inst_dict)
2576 return cls(
2577 name=name,
2578 validator=ca._validator,
2579 default=ca._default,
2580 type=type,
2581 cmp=None,
2582 inherited=False,
2583 **inst_dict
2584 )
2585
2586 # Don't use attr.evolve since fields(Attribute) doesn't work
2587 def evolve(self, **changes):
2588 """
2589 Copy *self* and apply *changes*.
2590
2591 This works similarly to `attr.evolve` but that function does not work
2592 with ``Attribute``.
2593
2594 It is mainly meant to be used for `transform-fields`.
2595
2596 .. versionadded:: 20.3.0
2597 """
2598 new = copy.copy(self)
2599
2600 new._setattrs(changes.items())
2601
2602 return new
865 2603
866 2604 # Don't use _add_pickle since fields(Attribute) doesn't work
867 2605 def __getstate__(self):
868 2606 """
869 2607 Play nice with pickle.
870 2608 """
871 return tuple(getattr(self, name) if name != "metadata"
872 else dict(self.metadata)
873 for name in self.__slots__)
2609 return tuple(
2610 getattr(self, name) if name != "metadata" else dict(self.metadata)
2611 for name in self.__slots__
2612 )
874 2613
875 2614 def __setstate__(self, state):
876 2615 """
877 2616 Play nice with pickle.
878 2617 """
2618 self._setattrs(zip(self.__slots__, state))
2619
2620 def _setattrs(self, name_values_pairs):
879 2621 bound_setattr = _obj_setattr.__get__(self, Attribute)
880 for name, value in zip(self.__slots__, state):
2622 for name, value in name_values_pairs:
881 2623 if name != "metadata":
882 2624 bound_setattr(name, value)
883 2625 else:
884 bound_setattr(name, metadata_proxy(value) if value else
885 _empty_metadata_singleton)
886
887
888 _a = [Attribute(name=name, default=NOTHING, validator=None,
889 repr=True, cmp=True, hash=(name != "metadata"), init=True)
890 for name in Attribute.__slots__]
2626 bound_setattr(
2627 name,
2628 types.MappingProxyType(dict(value))
2629 if value
2630 else _empty_metadata_singleton,
2631 )
2632
2633
2634 _a = [
2635 Attribute(
2636 name=name,
2637 default=NOTHING,
2638 validator=None,
2639 repr=True,
2640 cmp=None,
2641 eq=True,
2642 order=False,
2643 hash=(name != "metadata"),
2644 init=True,
2645 inherited=False,
2646 )
2647 for name in Attribute.__slots__
2648 ]
891 2649
892 2650 Attribute = _add_hash(
893 _add_cmp(_add_repr(Attribute, attrs=_a), attrs=_a),
894 attrs=[a for a in _a if a.hash]
2651 _add_eq(
2652 _add_repr(Attribute, attrs=_a),
2653 attrs=[a for a in _a if a.name != "inherited"],
2654 ),
2655 attrs=[a for a in _a if a.hash and a.name != "inherited"],
895 2656 )
896 2657
897 2658
898 class _CountingAttr(object):
2659 class _CountingAttr:
899 2660 """
900 2661 Intermediate representation of attributes that uses a counter to preserve
901 2662 the order in which the attributes have been defined.
@@ -903,35 +2664,105 class _CountingAttr(object):
903 2664 *Internal* data structure of the attrs library. Running into is most
904 2665 likely the result of a bug like a forgotten `@attr.s` decorator.
905 2666 """
906 __slots__ = ("counter", "_default", "repr", "cmp", "hash", "init",
907 "metadata", "_validator", "convert")
2667
2668 __slots__ = (
2669 "counter",
2670 "_default",
2671 "repr",
2672 "eq",
2673 "eq_key",
2674 "order",
2675 "order_key",
2676 "hash",
2677 "init",
2678 "metadata",
2679 "_validator",
2680 "converter",
2681 "type",
2682 "kw_only",
2683 "on_setattr",
2684 )
908 2685 __attrs_attrs__ = tuple(
909 Attribute(name=name, default=NOTHING, validator=None,
910 repr=True, cmp=True, hash=True, init=True)
911 for name
912 in ("counter", "_default", "repr", "cmp", "hash", "init",)
2686 Attribute(
2687 name=name,
2688 default=NOTHING,
2689 validator=None,
2690 repr=True,
2691 cmp=None,
2692 hash=True,
2693 init=True,
2694 kw_only=False,
2695 eq=True,
2696 eq_key=None,
2697 order=False,
2698 order_key=None,
2699 inherited=False,
2700 on_setattr=None,
2701 )
2702 for name in (
2703 "counter",
2704 "_default",
2705 "repr",
2706 "eq",
2707 "order",
2708 "hash",
2709 "init",
2710 "on_setattr",
2711 )
913 2712 ) + (
914 Attribute(name="metadata", default=None, validator=None,
915 repr=True, cmp=True, hash=False, init=True),
2713 Attribute(
2714 name="metadata",
2715 default=None,
2716 validator=None,
2717 repr=True,
2718 cmp=None,
2719 hash=False,
2720 init=True,
2721 kw_only=False,
2722 eq=True,
2723 eq_key=None,
2724 order=False,
2725 order_key=None,
2726 inherited=False,
2727 on_setattr=None,
2728 ),
916 2729 )
917 2730 cls_counter = 0
918 2731
919 def __init__(self, default, validator, repr, cmp, hash, init, convert,
920 metadata):
2732 def __init__(
2733 self,
2734 default,
2735 validator,
2736 repr,
2737 cmp,
2738 hash,
2739 init,
2740 converter,
2741 metadata,
2742 type,
2743 kw_only,
2744 eq,
2745 eq_key,
2746 order,
2747 order_key,
2748 on_setattr,
2749 ):
921 2750 _CountingAttr.cls_counter += 1
922 2751 self.counter = _CountingAttr.cls_counter
923 2752 self._default = default
924 # If validator is a list/tuple, wrap it using helper validator.
925 if validator and isinstance(validator, (list, tuple)):
926 self._validator = and_(*validator)
927 else:
928 self._validator = validator
2753 self._validator = validator
2754 self.converter = converter
929 2755 self.repr = repr
930 self.cmp = cmp
2756 self.eq = eq
2757 self.eq_key = eq_key
2758 self.order = order
2759 self.order_key = order_key
931 2760 self.hash = hash
932 2761 self.init = init
933 self.convert = convert
934 2762 self.metadata = metadata
2763 self.type = type
2764 self.kw_only = kw_only
2765 self.on_setattr = on_setattr
935 2766
936 2767 def validator(self, meth):
937 2768 """
@@ -965,15 +2796,14 class _CountingAttr(object):
965 2796 return meth
966 2797
967 2798
968 _CountingAttr = _add_cmp(_add_repr(_CountingAttr))
969
970
971 @attributes(slots=True, init=False)
972 class Factory(object):
2799 _CountingAttr = _add_eq(_add_repr(_CountingAttr))
2800
2801
2802 class Factory:
973 2803 """
974 2804 Stores a factory callable.
975 2805
976 If passed as the default value to :func:`attr.ib`, the factory is used to
2806 If passed as the default value to `attrs.field`, the factory is used to
977 2807 generate a new value.
978 2808
979 2809 :param callable factory: A callable that takes either none or exactly one
@@ -983,8 +2813,8 class Factory(object):
983 2813
984 2814 .. versionadded:: 17.1.0 *takes_self*
985 2815 """
986 factory = attr()
987 takes_self = attr()
2816
2817 __slots__ = ("factory", "takes_self")
988 2818
989 2819 def __init__(self, factory, takes_self=False):
990 2820 """
@@ -994,47 +2824,122 class Factory(object):
994 2824 self.factory = factory
995 2825 self.takes_self = takes_self
996 2826
2827 def __getstate__(self):
2828 """
2829 Play nice with pickle.
2830 """
2831 return tuple(getattr(self, name) for name in self.__slots__)
2832
2833 def __setstate__(self, state):
2834 """
2835 Play nice with pickle.
2836 """
2837 for name, value in zip(self.__slots__, state):
2838 setattr(self, name, value)
2839
2840
2841 _f = [
2842 Attribute(
2843 name=name,
2844 default=NOTHING,
2845 validator=None,
2846 repr=True,
2847 cmp=None,
2848 eq=True,
2849 order=False,
2850 hash=True,
2851 init=True,
2852 inherited=False,
2853 )
2854 for name in Factory.__slots__
2855 ]
2856
2857 Factory = _add_hash(_add_eq(_add_repr(Factory, attrs=_f), attrs=_f), attrs=_f)
2858
997 2859
998 2860 def make_class(name, attrs, bases=(object,), **attributes_arguments):
999 2861 """
1000 2862 A quick way to create a new class called *name* with *attrs*.
1001 2863
1002 :param name: The name for the new class.
1003 :type name: str
2864 :param str name: The name for the new class.
1004 2865
1005 2866 :param attrs: A list of names or a dictionary of mappings of names to
1006 2867 attributes.
1007 :type attrs: :class:`list` or :class:`dict`
2868
2869 If *attrs* is a list or an ordered dict (`dict` on Python 3.6+,
2870 `collections.OrderedDict` otherwise), the order is deduced from
2871 the order of the names or attributes inside *attrs*. Otherwise the
2872 order of the definition of the attributes is used.
2873 :type attrs: `list` or `dict`
1008 2874
1009 2875 :param tuple bases: Classes that the new class will subclass.
1010 2876
1011 :param attributes_arguments: Passed unmodified to :func:`attr.s`.
2877 :param attributes_arguments: Passed unmodified to `attr.s`.
1012 2878
1013 2879 :return: A new class with *attrs*.
1014 2880 :rtype: type
1015 2881
1016 .. versionadded:: 17.1.0 *bases*
2882 .. versionadded:: 17.1.0 *bases*
2883 .. versionchanged:: 18.1.0 If *attrs* is ordered, the order is retained.
1017 2884 """
1018 2885 if isinstance(attrs, dict):
1019 2886 cls_dict = attrs
1020 2887 elif isinstance(attrs, (list, tuple)):
1021 cls_dict = dict((a, attr()) for a in attrs)
2888 cls_dict = {a: attrib() for a in attrs}
1022 2889 else:
1023 2890 raise TypeError("attrs argument must be a dict or a list.")
1024 2891
1025 return attributes(**attributes_arguments)(type(name, bases, cls_dict))
1026
1027
1028 # These are required by whithin this module so we define them here and merely
1029 # import into .validators.
1030
1031
1032 @attributes(slots=True, hash=True)
1033 class _AndValidator(object):
2892 pre_init = cls_dict.pop("__attrs_pre_init__", None)
2893 post_init = cls_dict.pop("__attrs_post_init__", None)
2894 user_init = cls_dict.pop("__init__", None)
2895
2896 body = {}
2897 if pre_init is not None:
2898 body["__attrs_pre_init__"] = pre_init
2899 if post_init is not None:
2900 body["__attrs_post_init__"] = post_init
2901 if user_init is not None:
2902 body["__init__"] = user_init
2903
2904 type_ = types.new_class(name, bases, {}, lambda ns: ns.update(body))
2905
2906 # For pickling to work, the __module__ variable needs to be set to the
2907 # frame where the class is created. Bypass this step in environments where
2908 # sys._getframe is not defined (Jython for example) or sys._getframe is not
2909 # defined for arguments greater than 0 (IronPython).
2910 try:
2911 type_.__module__ = sys._getframe(1).f_globals.get(
2912 "__name__", "__main__"
2913 )
2914 except (AttributeError, ValueError):
2915 pass
2916
2917 # We do it here for proper warnings with meaningful stacklevel.
2918 cmp = attributes_arguments.pop("cmp", None)
2919 (
2920 attributes_arguments["eq"],
2921 attributes_arguments["order"],
2922 ) = _determine_attrs_eq_order(
2923 cmp,
2924 attributes_arguments.get("eq"),
2925 attributes_arguments.get("order"),
2926 True,
2927 )
2928
2929 return _attrs(these=cls_dict, **attributes_arguments)(type_)
2930
2931
2932 # These are required by within this module so we define them here and merely
2933 # import into .validators / .converters.
2934
2935
2936 @attrs(slots=True, hash=True)
2937 class _AndValidator:
1034 2938 """
1035 2939 Compose many validators to a single one.
1036 2940 """
1037 _validators = attr()
2941
2942 _validators = attrib()
1038 2943
1039 2944 def __call__(self, inst, attr, value):
1040 2945 for v in self._validators:
@@ -1047,16 +2952,55 def and_(*validators):
1047 2952
1048 2953 When called on a value, it runs all wrapped validators.
1049 2954
1050 :param validators: Arbitrary number of validators.
1051 :type validators: callables
2955 :param callables validators: Arbitrary number of validators.
1052 2956
1053 2957 .. versionadded:: 17.1.0
1054 2958 """
1055 2959 vals = []
1056 2960 for validator in validators:
1057 2961 vals.extend(
1058 validator._validators if isinstance(validator, _AndValidator)
2962 validator._validators
2963 if isinstance(validator, _AndValidator)
1059 2964 else [validator]
1060 2965 )
1061 2966
1062 2967 return _AndValidator(tuple(vals))
2968
2969
2970 def pipe(*converters):
2971 """
2972 A converter that composes multiple converters into one.
2973
2974 When called on a value, it runs all wrapped converters, returning the
2975 *last* value.
2976
2977 Type annotations will be inferred from the wrapped converters', if
2978 they have any.
2979
2980 :param callables converters: Arbitrary number of converters.
2981
2982 .. versionadded:: 20.1.0
2983 """
2984
2985 def pipe_converter(val):
2986 for converter in converters:
2987 val = converter(val)
2988
2989 return val
2990
2991 if not converters:
2992 # If the converter list is empty, pipe_converter is the identity.
2993 A = typing.TypeVar("A")
2994 pipe_converter.__annotations__ = {"val": A, "return": A}
2995 else:
2996 # Get parameter type from first converter.
2997 t = _AnnotationExtractor(converters[0]).get_first_param_type()
2998 if t:
2999 pipe_converter.__annotations__["val"] = t
3000
3001 # Get return type from last converter.
3002 rt = _AnnotationExtractor(converters[-1]).get_return_type()
3003 if rt:
3004 pipe_converter.__annotations__["return"] = rt
3005
3006 return pipe_converter
@@ -1,8 +1,22
1 # SPDX-License-Identifier: MIT
2
1 3 """
2 4 Commonly useful converters.
3 5 """
4 6
5 from __future__ import absolute_import, division, print_function
7
8 import typing
9
10 from ._compat import _AnnotationExtractor
11 from ._make import NOTHING, Factory, pipe
12
13
14 __all__ = [
15 "default_if_none",
16 "optional",
17 "pipe",
18 "to_bool",
19 ]
6 20
7 21
8 22 def optional(converter):
@@ -10,10 +24,13 def optional(converter):
10 24 A converter that allows an attribute to be optional. An optional attribute
11 25 is one which can be set to ``None``.
12 26
27 Type annotations will be inferred from the wrapped converter's, if it
28 has any.
29
13 30 :param callable converter: the converter that is used for non-``None``
14 31 values.
15 32
16 .. versionadded:: 17.1.0
33 .. versionadded:: 17.1.0
17 34 """
18 35
19 36 def optional_converter(val):
@@ -21,4 +38,107 def optional(converter):
21 38 return None
22 39 return converter(val)
23 40
41 xtr = _AnnotationExtractor(converter)
42
43 t = xtr.get_first_param_type()
44 if t:
45 optional_converter.__annotations__["val"] = typing.Optional[t]
46
47 rt = xtr.get_return_type()
48 if rt:
49 optional_converter.__annotations__["return"] = typing.Optional[rt]
50
24 51 return optional_converter
52
53
54 def default_if_none(default=NOTHING, factory=None):
55 """
56 A converter that allows to replace ``None`` values by *default* or the
57 result of *factory*.
58
59 :param default: Value to be used if ``None`` is passed. Passing an instance
60 of `attrs.Factory` is supported, however the ``takes_self`` option
61 is *not*.
62 :param callable factory: A callable that takes no parameters whose result
63 is used if ``None`` is passed.
64
65 :raises TypeError: If **neither** *default* or *factory* is passed.
66 :raises TypeError: If **both** *default* and *factory* are passed.
67 :raises ValueError: If an instance of `attrs.Factory` is passed with
68 ``takes_self=True``.
69
70 .. versionadded:: 18.2.0
71 """
72 if default is NOTHING and factory is None:
73 raise TypeError("Must pass either `default` or `factory`.")
74
75 if default is not NOTHING and factory is not None:
76 raise TypeError(
77 "Must pass either `default` or `factory` but not both."
78 )
79
80 if factory is not None:
81 default = Factory(factory)
82
83 if isinstance(default, Factory):
84 if default.takes_self:
85 raise ValueError(
86 "`takes_self` is not supported by default_if_none."
87 )
88
89 def default_if_none_converter(val):
90 if val is not None:
91 return val
92
93 return default.factory()
94
95 else:
96
97 def default_if_none_converter(val):
98 if val is not None:
99 return val
100
101 return default
102
103 return default_if_none_converter
104
105
106 def to_bool(val):
107 """
108 Convert "boolean" strings (e.g., from env. vars.) to real booleans.
109
110 Values mapping to :code:`True`:
111
112 - :code:`True`
113 - :code:`"true"` / :code:`"t"`
114 - :code:`"yes"` / :code:`"y"`
115 - :code:`"on"`
116 - :code:`"1"`
117 - :code:`1`
118
119 Values mapping to :code:`False`:
120
121 - :code:`False`
122 - :code:`"false"` / :code:`"f"`
123 - :code:`"no"` / :code:`"n"`
124 - :code:`"off"`
125 - :code:`"0"`
126 - :code:`0`
127
128 :raises ValueError: for any other value.
129
130 .. versionadded:: 21.3.0
131 """
132 if isinstance(val, str):
133 val = val.lower()
134 truthy = {True, "true", "t", "yes", "y", "on", "1", 1}
135 falsy = {False, "false", "f", "no", "n", "off", "0", 0}
136 try:
137 if val in truthy:
138 return True
139 if val in falsy:
140 return False
141 except TypeError:
142 # Raised when "val" is not hashable (e.g., lists)
143 pass
144 raise ValueError("Cannot convert value to bool: {}".format(val))
@@ -1,17 +1,35
1 from __future__ import absolute_import, division, print_function
1 # SPDX-License-Identifier: MIT
2 2
3 3
4 class FrozenInstanceError(AttributeError):
4 class FrozenError(AttributeError):
5 5 """
6 A frozen/immutable instance has been attempted to be modified.
6 A frozen/immutable instance or attribute have been attempted to be
7 modified.
7 8
8 9 It mirrors the behavior of ``namedtuples`` by using the same error message
9 and subclassing :exc:`AttributeError`.
10 and subclassing `AttributeError`.
11
12 .. versionadded:: 20.1.0
13 """
14
15 msg = "can't set attribute"
16 args = [msg]
17
18
19 class FrozenInstanceError(FrozenError):
20 """
21 A frozen instance has been attempted to be modified.
10 22
11 23 .. versionadded:: 16.1.0
12 24 """
13 msg = "can't set attribute"
14 args = [msg]
25
26
27 class FrozenAttributeError(FrozenError):
28 """
29 A frozen attribute has been attempted to be modified.
30
31 .. versionadded:: 20.1.0
32 """
15 33
16 34
17 35 class AttrsAttributeNotFoundError(ValueError):
@@ -37,3 +55,38 class DefaultAlreadySetError(RuntimeErro
37 55
38 56 .. versionadded:: 17.1.0
39 57 """
58
59
60 class UnannotatedAttributeError(RuntimeError):
61 """
62 A class with ``auto_attribs=True`` has an ``attr.ib()`` without a type
63 annotation.
64
65 .. versionadded:: 17.3.0
66 """
67
68
69 class PythonTooOldError(RuntimeError):
70 """
71 It was attempted to use an ``attrs`` feature that requires a newer Python
72 version.
73
74 .. versionadded:: 18.2.0
75 """
76
77
78 class NotCallableError(TypeError):
79 """
80 A ``attr.ib()`` requiring a callable has been set with a value
81 that is not callable.
82
83 .. versionadded:: 19.2.0
84 """
85
86 def __init__(self, msg, value):
87 super(TypeError, self).__init__(msg, value)
88 self.msg = msg
89 self.value = value
90
91 def __str__(self):
92 return str(self.msg)
@@ -1,10 +1,9
1 # SPDX-License-Identifier: MIT
2
1 3 """
2 Commonly useful filters for :func:`attr.asdict`.
4 Commonly useful filters for `attr.asdict`.
3 5 """
4 6
5 from __future__ import absolute_import, division, print_function
6
7 from ._compat import isclass
8 7 from ._make import Attribute
9 8
10 9
@@ -13,19 +12,19 def _split_what(what):
13 12 Returns a tuple of `frozenset`s of classes and attributes.
14 13 """
15 14 return (
16 frozenset(cls for cls in what if isclass(cls)),
15 frozenset(cls for cls in what if isinstance(cls, type)),
17 16 frozenset(cls for cls in what if isinstance(cls, Attribute)),
18 17 )
19 18
20 19
21 20 def include(*what):
22 r"""
23 Whitelist *what*.
21 """
22 Include *what*.
24 23
25 :param what: What to whitelist.
26 :type what: :class:`list` of :class:`type` or :class:`attr.Attribute`\ s
24 :param what: What to include.
25 :type what: `list` of `type` or `attrs.Attribute`\\ s
27 26
28 :rtype: :class:`callable`
27 :rtype: `callable`
29 28 """
30 29 cls, attrs = _split_what(what)
31 30
@@ -36,13 +35,13 def include(*what):
36 35
37 36
38 37 def exclude(*what):
39 r"""
40 Blacklist *what*.
38 """
39 Exclude *what*.
41 40
42 :param what: What to blacklist.
43 :type what: :class:`list` of classes or :class:`attr.Attribute`\ s.
41 :param what: What to exclude.
42 :type what: `list` of classes or `attrs.Attribute`\\ s.
44 43
45 :rtype: :class:`callable`
44 :rtype: `callable`
46 45 """
47 46 cls, attrs = _split_what(what)
48 47
This diff has been collapsed as it changes many lines, (522 lines changed) Show them Hide them
@@ -1,24 +1,99
1 # SPDX-License-Identifier: MIT
2
1 3 """
2 4 Commonly useful validators.
3 5 """
4 6
5 from __future__ import absolute_import, division, print_function
7
8 import operator
9 import re
10
11 from contextlib import contextmanager
6 12
7 from ._make import attr, attributes, and_, _AndValidator
13 from ._config import get_run_validators, set_run_validators
14 from ._make import _AndValidator, and_, attrib, attrs
15 from .exceptions import NotCallableError
16
17
18 try:
19 Pattern = re.Pattern
20 except AttributeError: # Python <3.7 lacks a Pattern type.
21 Pattern = type(re.compile(""))
8 22
9 23
10 24 __all__ = [
11 25 "and_",
26 "deep_iterable",
27 "deep_mapping",
28 "disabled",
29 "ge",
30 "get_disabled",
31 "gt",
12 32 "in_",
13 33 "instance_of",
34 "is_callable",
35 "le",
36 "lt",
37 "matches_re",
38 "max_len",
39 "min_len",
14 40 "optional",
15 41 "provides",
42 "set_disabled",
16 43 ]
17 44
18 45
19 @attributes(repr=False, slots=True, hash=True)
20 class _InstanceOfValidator(object):
21 type = attr()
46 def set_disabled(disabled):
47 """
48 Globally disable or enable running validators.
49
50 By default, they are run.
51
52 :param disabled: If ``True``, disable running all validators.
53 :type disabled: bool
54
55 .. warning::
56
57 This function is not thread-safe!
58
59 .. versionadded:: 21.3.0
60 """
61 set_run_validators(not disabled)
62
63
64 def get_disabled():
65 """
66 Return a bool indicating whether validators are currently disabled or not.
67
68 :return: ``True`` if validators are currently disabled.
69 :rtype: bool
70
71 .. versionadded:: 21.3.0
72 """
73 return not get_run_validators()
74
75
76 @contextmanager
77 def disabled():
78 """
79 Context manager that disables running validators within its context.
80
81 .. warning::
82
83 This context manager is not thread-safe!
84
85 .. versionadded:: 21.3.0
86 """
87 set_run_validators(False)
88 try:
89 yield
90 finally:
91 set_run_validators(True)
92
93
94 @attrs(repr=False, slots=True, hash=True)
95 class _InstanceOfValidator:
96 type = attrib()
22 97
23 98 def __call__(self, inst, attr, value):
24 99 """
@@ -27,38 +102,116 class _InstanceOfValidator(object):
27 102 if not isinstance(value, self.type):
28 103 raise TypeError(
29 104 "'{name}' must be {type!r} (got {value!r} that is a "
30 "{actual!r})."
31 .format(name=attr.name, type=self.type,
32 actual=value.__class__, value=value),
33 attr, self.type, value,
105 "{actual!r}).".format(
106 name=attr.name,
107 type=self.type,
108 actual=value.__class__,
109 value=value,
110 ),
111 attr,
112 self.type,
113 value,
34 114 )
35 115
36 116 def __repr__(self):
37 return (
38 "<instance_of validator for type {type!r}>"
39 .format(type=self.type)
117 return "<instance_of validator for type {type!r}>".format(
118 type=self.type
40 119 )
41 120
42 121
43 122 def instance_of(type):
44 123 """
45 A validator that raises a :exc:`TypeError` if the initializer is called
46 with a wrong type for this particular attribute (checks are perfomed using
47 :func:`isinstance` therefore it's also valid to pass a tuple of types).
124 A validator that raises a `TypeError` if the initializer is called
125 with a wrong type for this particular attribute (checks are performed using
126 `isinstance` therefore it's also valid to pass a tuple of types).
48 127
49 128 :param type: The type to check for.
50 129 :type type: type or tuple of types
51 130
52 131 :raises TypeError: With a human readable error message, the attribute
53 (of type :class:`attr.Attribute`), the expected type, and the value it
132 (of type `attrs.Attribute`), the expected type, and the value it
54 133 got.
55 134 """
56 135 return _InstanceOfValidator(type)
57 136
58 137
59 @attributes(repr=False, slots=True, hash=True)
60 class _ProvidesValidator(object):
61 interface = attr()
138 @attrs(repr=False, frozen=True, slots=True)
139 class _MatchesReValidator:
140 pattern = attrib()
141 match_func = attrib()
142
143 def __call__(self, inst, attr, value):
144 """
145 We use a callable class to be able to change the ``__repr__``.
146 """
147 if not self.match_func(value):
148 raise ValueError(
149 "'{name}' must match regex {pattern!r}"
150 " ({value!r} doesn't)".format(
151 name=attr.name, pattern=self.pattern.pattern, value=value
152 ),
153 attr,
154 self.pattern,
155 value,
156 )
157
158 def __repr__(self):
159 return "<matches_re validator for pattern {pattern!r}>".format(
160 pattern=self.pattern
161 )
162
163
164 def matches_re(regex, flags=0, func=None):
165 r"""
166 A validator that raises `ValueError` if the initializer is called
167 with a string that doesn't match *regex*.
168
169 :param regex: a regex string or precompiled pattern to match against
170 :param int flags: flags that will be passed to the underlying re function
171 (default 0)
172 :param callable func: which underlying `re` function to call. Valid options
173 are `re.fullmatch`, `re.search`, and `re.match`; the default ``None``
174 means `re.fullmatch`. For performance reasons, the pattern is always
175 precompiled using `re.compile`.
176
177 .. versionadded:: 19.2.0
178 .. versionchanged:: 21.3.0 *regex* can be a pre-compiled pattern.
179 """
180 valid_funcs = (re.fullmatch, None, re.search, re.match)
181 if func not in valid_funcs:
182 raise ValueError(
183 "'func' must be one of {}.".format(
184 ", ".join(
185 sorted(
186 e and e.__name__ or "None" for e in set(valid_funcs)
187 )
188 )
189 )
190 )
191
192 if isinstance(regex, Pattern):
193 if flags:
194 raise TypeError(
195 "'flags' can only be used with a string pattern; "
196 "pass flags to re.compile() instead"
197 )
198 pattern = regex
199 else:
200 pattern = re.compile(regex, flags)
201
202 if func is re.match:
203 match_func = pattern.match
204 elif func is re.search:
205 match_func = pattern.search
206 else:
207 match_func = pattern.fullmatch
208
209 return _MatchesReValidator(pattern, match_func)
210
211
212 @attrs(repr=False, slots=True, hash=True)
213 class _ProvidesValidator:
214 interface = attrib()
62 215
63 216 def __call__(self, inst, attr, value):
64 217 """
@@ -67,37 +220,40 class _ProvidesValidator(object):
67 220 if not self.interface.providedBy(value):
68 221 raise TypeError(
69 222 "'{name}' must provide {interface!r} which {value!r} "
70 "doesn't."
71 .format(name=attr.name, interface=self.interface, value=value),
72 attr, self.interface, value,
223 "doesn't.".format(
224 name=attr.name, interface=self.interface, value=value
225 ),
226 attr,
227 self.interface,
228 value,
73 229 )
74 230
75 231 def __repr__(self):
76 return (
77 "<provides validator for interface {interface!r}>"
78 .format(interface=self.interface)
232 return "<provides validator for interface {interface!r}>".format(
233 interface=self.interface
79 234 )
80 235
81 236
82 237 def provides(interface):
83 238 """
84 A validator that raises a :exc:`TypeError` if the initializer is called
239 A validator that raises a `TypeError` if the initializer is called
85 240 with an object that does not provide the requested *interface* (checks are
86 241 performed using ``interface.providedBy(value)`` (see `zope.interface
87 242 <https://zopeinterface.readthedocs.io/en/latest/>`_).
88 243
89 :param zope.interface.Interface interface: The interface to check for.
244 :param interface: The interface to check for.
245 :type interface: ``zope.interface.Interface``
90 246
91 247 :raises TypeError: With a human readable error message, the attribute
92 (of type :class:`attr.Attribute`), the expected interface, and the
248 (of type `attrs.Attribute`), the expected interface, and the
93 249 value it got.
94 250 """
95 251 return _ProvidesValidator(interface)
96 252
97 253
98 @attributes(repr=False, slots=True, hash=True)
99 class _OptionalValidator(object):
100 validator = attr()
254 @attrs(repr=False, slots=True, hash=True)
255 class _OptionalValidator:
256 validator = attrib()
101 257
102 258 def __call__(self, inst, attr, value):
103 259 if value is None:
@@ -106,9 +262,8 class _OptionalValidator(object):
106 262 self.validator(inst, attr, value)
107 263
108 264 def __repr__(self):
109 return (
110 "<optional validator for {what} or None>"
111 .format(what=repr(self.validator))
265 return "<optional validator for {what} or None>".format(
266 what=repr(self.validator)
112 267 )
113 268
114 269
@@ -120,7 +275,7 def optional(validator):
120 275
121 276 :param validator: A validator (or a list of validators) that is used for
122 277 non-``None`` values.
123 :type validator: callable or :class:`list` of callables.
278 :type validator: callable or `list` of callables.
124 279
125 280 .. versionadded:: 15.1.0
126 281 .. versionchanged:: 17.1.0 *validator* can be a list of validators.
@@ -130,37 +285,310 def optional(validator):
130 285 return _OptionalValidator(validator)
131 286
132 287
133 @attributes(repr=False, slots=True, hash=True)
134 class _InValidator(object):
135 options = attr()
288 @attrs(repr=False, slots=True, hash=True)
289 class _InValidator:
290 options = attrib()
136 291
137 292 def __call__(self, inst, attr, value):
138 if value not in self.options:
293 try:
294 in_options = value in self.options
295 except TypeError: # e.g. `1 in "abc"`
296 in_options = False
297
298 if not in_options:
139 299 raise ValueError(
140 "'{name}' must be in {options!r} (got {value!r})"
141 .format(name=attr.name, options=self.options, value=value)
300 "'{name}' must be in {options!r} (got {value!r})".format(
301 name=attr.name, options=self.options, value=value
302 ),
303 attr,
304 self.options,
305 value,
142 306 )
143 307
144 308 def __repr__(self):
145 return (
146 "<in_ validator with options {options!r}>"
147 .format(options=self.options)
309 return "<in_ validator with options {options!r}>".format(
310 options=self.options
148 311 )
149 312
150 313
151 314 def in_(options):
152 315 """
153 A validator that raises a :exc:`ValueError` if the initializer is called
316 A validator that raises a `ValueError` if the initializer is called
154 317 with a value that does not belong in the options provided. The check is
155 318 performed using ``value in options``.
156 319
157 320 :param options: Allowed options.
158 :type options: list, tuple, :class:`enum.Enum`, ...
321 :type options: list, tuple, `enum.Enum`, ...
159 322
160 323 :raises ValueError: With a human readable error message, the attribute (of
161 type :class:`attr.Attribute`), the expected options, and the value it
324 type `attrs.Attribute`), the expected options, and the value it
162 325 got.
163 326
164 327 .. versionadded:: 17.1.0
328 .. versionchanged:: 22.1.0
329 The ValueError was incomplete until now and only contained the human
330 readable error message. Now it contains all the information that has
331 been promised since 17.1.0.
165 332 """
166 333 return _InValidator(options)
334
335
336 @attrs(repr=False, slots=False, hash=True)
337 class _IsCallableValidator:
338 def __call__(self, inst, attr, value):
339 """
340 We use a callable class to be able to change the ``__repr__``.
341 """
342 if not callable(value):
343 message = (
344 "'{name}' must be callable "
345 "(got {value!r} that is a {actual!r})."
346 )
347 raise NotCallableError(
348 msg=message.format(
349 name=attr.name, value=value, actual=value.__class__
350 ),
351 value=value,
352 )
353
354 def __repr__(self):
355 return "<is_callable validator>"
356
357
358 def is_callable():
359 """
360 A validator that raises a `attr.exceptions.NotCallableError` if the
361 initializer is called with a value for this particular attribute
362 that is not callable.
363
364 .. versionadded:: 19.1.0
365
366 :raises `attr.exceptions.NotCallableError`: With a human readable error
367 message containing the attribute (`attrs.Attribute`) name,
368 and the value it got.
369 """
370 return _IsCallableValidator()
371
372
373 @attrs(repr=False, slots=True, hash=True)
374 class _DeepIterable:
375 member_validator = attrib(validator=is_callable())
376 iterable_validator = attrib(
377 default=None, validator=optional(is_callable())
378 )
379
380 def __call__(self, inst, attr, value):
381 """
382 We use a callable class to be able to change the ``__repr__``.
383 """
384 if self.iterable_validator is not None:
385 self.iterable_validator(inst, attr, value)
386
387 for member in value:
388 self.member_validator(inst, attr, member)
389
390 def __repr__(self):
391 iterable_identifier = (
392 ""
393 if self.iterable_validator is None
394 else " {iterable!r}".format(iterable=self.iterable_validator)
395 )
396 return (
397 "<deep_iterable validator for{iterable_identifier}"
398 " iterables of {member!r}>"
399 ).format(
400 iterable_identifier=iterable_identifier,
401 member=self.member_validator,
402 )
403
404
405 def deep_iterable(member_validator, iterable_validator=None):
406 """
407 A validator that performs deep validation of an iterable.
408
409 :param member_validator: Validator(s) to apply to iterable members
410 :param iterable_validator: Validator to apply to iterable itself
411 (optional)
412
413 .. versionadded:: 19.1.0
414
415 :raises TypeError: if any sub-validators fail
416 """
417 if isinstance(member_validator, (list, tuple)):
418 member_validator = and_(*member_validator)
419 return _DeepIterable(member_validator, iterable_validator)
420
421
422 @attrs(repr=False, slots=True, hash=True)
423 class _DeepMapping:
424 key_validator = attrib(validator=is_callable())
425 value_validator = attrib(validator=is_callable())
426 mapping_validator = attrib(default=None, validator=optional(is_callable()))
427
428 def __call__(self, inst, attr, value):
429 """
430 We use a callable class to be able to change the ``__repr__``.
431 """
432 if self.mapping_validator is not None:
433 self.mapping_validator(inst, attr, value)
434
435 for key in value:
436 self.key_validator(inst, attr, key)
437 self.value_validator(inst, attr, value[key])
438
439 def __repr__(self):
440 return (
441 "<deep_mapping validator for objects mapping {key!r} to {value!r}>"
442 ).format(key=self.key_validator, value=self.value_validator)
443
444
445 def deep_mapping(key_validator, value_validator, mapping_validator=None):
446 """
447 A validator that performs deep validation of a dictionary.
448
449 :param key_validator: Validator to apply to dictionary keys
450 :param value_validator: Validator to apply to dictionary values
451 :param mapping_validator: Validator to apply to top-level mapping
452 attribute (optional)
453
454 .. versionadded:: 19.1.0
455
456 :raises TypeError: if any sub-validators fail
457 """
458 return _DeepMapping(key_validator, value_validator, mapping_validator)
459
460
461 @attrs(repr=False, frozen=True, slots=True)
462 class _NumberValidator:
463 bound = attrib()
464 compare_op = attrib()
465 compare_func = attrib()
466
467 def __call__(self, inst, attr, value):
468 """
469 We use a callable class to be able to change the ``__repr__``.
470 """
471 if not self.compare_func(value, self.bound):
472 raise ValueError(
473 "'{name}' must be {op} {bound}: {value}".format(
474 name=attr.name,
475 op=self.compare_op,
476 bound=self.bound,
477 value=value,
478 )
479 )
480
481 def __repr__(self):
482 return "<Validator for x {op} {bound}>".format(
483 op=self.compare_op, bound=self.bound
484 )
485
486
487 def lt(val):
488 """
489 A validator that raises `ValueError` if the initializer is called
490 with a number larger or equal to *val*.
491
492 :param val: Exclusive upper bound for values
493
494 .. versionadded:: 21.3.0
495 """
496 return _NumberValidator(val, "<", operator.lt)
497
498
499 def le(val):
500 """
501 A validator that raises `ValueError` if the initializer is called
502 with a number greater than *val*.
503
504 :param val: Inclusive upper bound for values
505
506 .. versionadded:: 21.3.0
507 """
508 return _NumberValidator(val, "<=", operator.le)
509
510
511 def ge(val):
512 """
513 A validator that raises `ValueError` if the initializer is called
514 with a number smaller than *val*.
515
516 :param val: Inclusive lower bound for values
517
518 .. versionadded:: 21.3.0
519 """
520 return _NumberValidator(val, ">=", operator.ge)
521
522
523 def gt(val):
524 """
525 A validator that raises `ValueError` if the initializer is called
526 with a number smaller or equal to *val*.
527
528 :param val: Exclusive lower bound for values
529
530 .. versionadded:: 21.3.0
531 """
532 return _NumberValidator(val, ">", operator.gt)
533
534
535 @attrs(repr=False, frozen=True, slots=True)
536 class _MaxLengthValidator:
537 max_length = attrib()
538
539 def __call__(self, inst, attr, value):
540 """
541 We use a callable class to be able to change the ``__repr__``.
542 """
543 if len(value) > self.max_length:
544 raise ValueError(
545 "Length of '{name}' must be <= {max}: {len}".format(
546 name=attr.name, max=self.max_length, len=len(value)
547 )
548 )
549
550 def __repr__(self):
551 return "<max_len validator for {max}>".format(max=self.max_length)
552
553
554 def max_len(length):
555 """
556 A validator that raises `ValueError` if the initializer is called
557 with a string or iterable that is longer than *length*.
558
559 :param int length: Maximum length of the string or iterable
560
561 .. versionadded:: 21.3.0
562 """
563 return _MaxLengthValidator(length)
564
565
566 @attrs(repr=False, frozen=True, slots=True)
567 class _MinLengthValidator:
568 min_length = attrib()
569
570 def __call__(self, inst, attr, value):
571 """
572 We use a callable class to be able to change the ``__repr__``.
573 """
574 if len(value) < self.min_length:
575 raise ValueError(
576 "Length of '{name}' must be => {min}: {len}".format(
577 name=attr.name, min=self.min_length, len=len(value)
578 )
579 )
580
581 def __repr__(self):
582 return "<min_len validator for {min}>".format(min=self.min_length)
583
584
585 def min_len(length):
586 """
587 A validator that raises `ValueError` if the initializer is called
588 with a string or iterable that is shorter than *length*.
589
590 :param int length: Minimum length of the string or iterable
591
592 .. versionadded:: 22.1.0
593 """
594 return _MinLengthValidator(length)
@@ -668,49 +668,84 class transaction(util.transactional):
668 668 self._file.close()
669 669 self._backupsfile.close()
670 670
671 quick = self._can_quick_abort(entries)
671 672 try:
672 if not entries and not self._backupentries:
673 if self._backupjournal:
674 self._opener.unlink(self._backupjournal)
675 if self._journal:
676 self._opener.unlink(self._journal)
677 return
678
679 self._report(_(b"transaction abort!\n"))
680
681 try:
682 for cat in sorted(self._abortcallback):
683 self._abortcallback[cat](self)
684 # Prevent double usage and help clear cycles.
685 self._abortcallback = None
686 _playback(
687 self._journal,
688 self._report,
689 self._opener,
690 self._vfsmap,
691 entries,
692 self._backupentries,
693 False,
694 checkambigfiles=self._checkambigfiles,
695 )
696 self._report(_(b"rollback completed\n"))
697 except BaseException as exc:
698 self._report(_(b"rollback failed - please run hg recover\n"))
699 self._report(
700 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
701 )
673 if not quick:
674 self._report(_(b"transaction abort!\n"))
675 for cat in sorted(self._abortcallback):
676 self._abortcallback[cat](self)
677 # Prevent double usage and help clear cycles.
678 self._abortcallback = None
679 if quick:
680 self._do_quick_abort(entries)
681 else:
682 self._do_full_abort(entries)
702 683 finally:
703 684 self._journal = None
704 685 self._releasefn(self, False) # notify failure of transaction
705 686 self._releasefn = None # Help prevent cycles.
706 687
688 def _can_quick_abort(self, entries):
689 """False if any semantic content have been written on disk
690
691 True if nothing, except temporary files has been writen on disk."""
692 if entries:
693 return False
694 for e in self._backupentries:
695 if e[1]:
696 return False
697 return True
698
699 def _do_quick_abort(self, entries):
700 """(Silently) do a quick cleanup (see _can_quick_abort)"""
701 assert self._can_quick_abort(entries)
702 tmp_files = [e for e in self._backupentries if not e[1]]
703 for vfs_id, old_path, tmp_path, xxx in tmp_files:
704 assert not old_path
705 vfs = self._vfsmap[vfs_id]
706 try:
707 vfs.unlink(tmp_path)
708 except FileNotFoundError:
709 pass
710 if self._backupjournal:
711 self._opener.unlink(self._backupjournal)
712 if self._journal:
713 self._opener.unlink(self._journal)
714
715 def _do_full_abort(self, entries):
716 """(Noisily) rollback all the change introduced by the transaction"""
717 try:
718 _playback(
719 self._journal,
720 self._report,
721 self._opener,
722 self._vfsmap,
723 entries,
724 self._backupentries,
725 False,
726 checkambigfiles=self._checkambigfiles,
727 )
728 self._report(_(b"rollback completed\n"))
729 except BaseException as exc:
730 self._report(_(b"rollback failed - please run hg recover\n"))
731 self._report(
732 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
733 )
734
707 735
708 736 BAD_VERSION_MSG = _(
709 737 b"journal was created by a different version of Mercurial\n"
710 738 )
711 739
712 740
713 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
741 def rollback(
742 opener,
743 vfsmap,
744 file,
745 report,
746 checkambigfiles=None,
747 skip_journal_pattern=None,
748 ):
714 749 """Rolls back the transaction contained in the given file
715 750
716 751 Reads the entries in the specified file, and the corresponding
@@ -755,6 +790,9 def rollback(opener, vfsmap, file, repor
755 790 line = line[:-1]
756 791 l, f, b, c = line.split(b'\0')
757 792 backupentries.append((l, f, b, bool(c)))
793 if skip_journal_pattern is not None:
794 keep = lambda x: not skip_journal_pattern.match(x[1])
795 backupentries = [x for x in backupentries if keep(x)]
758 796
759 797 _playback(
760 798 file,
@@ -19,6 +19,21 import subprocess
19 19 import sys
20 20 import traceback
21 21
22 from typing import (
23 Any,
24 Callable,
25 Dict,
26 List,
27 NoReturn,
28 Optional,
29 Tuple,
30 Type,
31 TypeVar,
32 Union,
33 cast,
34 overload,
35 )
36
22 37 from .i18n import _
23 38 from .node import hex
24 39 from .pycompat import (
@@ -48,15 +63,23 from .utils import (
48 63 urlutil,
49 64 )
50 65
66 _ConfigItems = Dict[Tuple[bytes, bytes], object] # {(section, name) : value}
67 # The **opts args of the various write() methods can be basically anything, but
68 # there's no way to express it as "anything but str". So type it to be the
69 # handful of known types that are used.
70 _MsgOpts = Union[bytes, bool, List["_PromptChoice"]]
71 _PromptChoice = Tuple[bytes, bytes]
72 _Tui = TypeVar('_Tui', bound="ui")
73
51 74 urlreq = util.urlreq
52 75
53 76 # for use with str.translate(None, _keepalnum), to keep just alphanumerics
54 _keepalnum = b''.join(
77 _keepalnum: bytes = b''.join(
55 78 c for c in map(pycompat.bytechr, range(256)) if not c.isalnum()
56 79 )
57 80
58 81 # The config knobs that will be altered (if unset) by ui.tweakdefaults.
59 tweakrc = b"""
82 tweakrc: bytes = b"""
60 83 [ui]
61 84 # The rollback command is dangerous. As a rule, don't use it.
62 85 rollback = False
@@ -83,7 +106,7 showfunc = 1
83 106 word-diff = 1
84 107 """
85 108
86 samplehgrcs = {
109 samplehgrcs: Dict[bytes, bytes] = {
87 110 b'user': b"""# example user config (see 'hg help config' for more info)
88 111 [ui]
89 112 # name and email, e.g.
@@ -172,7 +195,7 def _maybebytesurl(maybestr):
172 195 class httppasswordmgrdbproxy:
173 196 """Delays loading urllib2 until it's needed."""
174 197
175 def __init__(self):
198 def __init__(self) -> None:
176 199 self._mgr = None
177 200
178 201 def _get_mgr(self):
@@ -195,7 +218,7 class httppasswordmgrdbproxy:
195 218 )
196 219
197 220
198 def _catchterm(*args):
221 def _catchterm(*args) -> NoReturn:
199 222 raise error.SignalInterrupt
200 223
201 224
@@ -204,11 +227,11 def _catchterm(*args):
204 227 _unset = object()
205 228
206 229 # _reqexithandlers: callbacks run at the end of a request
207 _reqexithandlers = []
230 _reqexithandlers: List = []
208 231
209 232
210 233 class ui:
211 def __init__(self, src=None):
234 def __init__(self, src: Optional["ui"] = None) -> None:
212 235 """Create a fresh new ui object if no src given
213 236
214 237 Use uimod.ui.load() to create a ui which knows global and user configs.
@@ -303,13 +326,13 class ui:
303 326 if k in self.environ:
304 327 self._exportableenviron[k] = self.environ[k]
305 328
306 def _new_source(self):
329 def _new_source(self) -> None:
307 330 self._ocfg.new_source()
308 331 self._tcfg.new_source()
309 332 self._ucfg.new_source()
310 333
311 334 @classmethod
312 def load(cls):
335 def load(cls: Type[_Tui]) -> _Tui:
313 336 """Create a ui and load global and user configs"""
314 337 u = cls()
315 338 # we always trust global config files and environment variables
@@ -335,7 +358,7 class ui:
335 358 u._new_source() # anything after that is a different level
336 359 return u
337 360
338 def _maybetweakdefaults(self):
361 def _maybetweakdefaults(self) -> None:
339 362 if not self.configbool(b'ui', b'tweakdefaults'):
340 363 return
341 364 if self._tweaked or self.plain(b'tweakdefaults'):
@@ -355,17 +378,17 class ui:
355 378 if not self.hasconfig(section, name):
356 379 self.setconfig(section, name, value, b"<tweakdefaults>")
357 380
358 def copy(self):
381 def copy(self: _Tui) -> _Tui:
359 382 return self.__class__(self)
360 383
361 def resetstate(self):
384 def resetstate(self) -> None:
362 385 """Clear internal state that shouldn't persist across commands"""
363 386 if self._progbar:
364 387 self._progbar.resetstate() # reset last-print time of progress bar
365 388 self.httppasswordmgrdb = httppasswordmgrdbproxy()
366 389
367 390 @contextlib.contextmanager
368 def timeblockedsection(self, key):
391 def timeblockedsection(self, key: bytes):
369 392 # this is open-coded below - search for timeblockedsection to find them
370 393 starttime = util.timer()
371 394 try:
@@ -410,10 +433,10 class ui:
410 433 finally:
411 434 self._uninterruptible = False
412 435
413 def formatter(self, topic, opts):
436 def formatter(self, topic: bytes, opts):
414 437 return formatter.formatter(self, self, topic, opts)
415 438
416 def _trusted(self, fp, f):
439 def _trusted(self, fp, f: bytes) -> bool:
417 440 st = util.fstat(fp)
418 441 if util.isowner(st):
419 442 return True
@@ -439,7 +462,7 class ui:
439 462
440 463 def read_resource_config(
441 464 self, name, root=None, trust=False, sections=None, remap=None
442 ):
465 ) -> None:
443 466 try:
444 467 fp = resourceutil.open_resource(name[0], name[1])
445 468 except IOError:
@@ -453,7 +476,7 class ui:
453 476
454 477 def readconfig(
455 478 self, filename, root=None, trust=False, sections=None, remap=None
456 ):
479 ) -> None:
457 480 try:
458 481 fp = open(filename, 'rb')
459 482 except IOError:
@@ -465,7 +488,7 class ui:
465 488
466 489 def _readconfig(
467 490 self, filename, fp, root=None, trust=False, sections=None, remap=None
468 ):
491 ) -> None:
469 492 with fp:
470 493 cfg = config.config()
471 494 trusted = sections or trust or self._trusted(fp, filename)
@@ -481,7 +504,9 class ui:
481 504
482 505 self._applyconfig(cfg, trusted, root)
483 506
484 def applyconfig(self, configitems, source=b"", root=None):
507 def applyconfig(
508 self, configitems: _ConfigItems, source=b"", root=None
509 ) -> None:
485 510 """Add configitems from a non-file source. Unlike with ``setconfig()``,
486 511 they can be overridden by subsequent config file reads. The items are
487 512 in the same format as ``configoverride()``, namely a dict of the
@@ -497,7 +522,7 class ui:
497 522
498 523 self._applyconfig(cfg, True, root)
499 524
500 def _applyconfig(self, cfg, trusted, root):
525 def _applyconfig(self, cfg, trusted, root) -> None:
501 526 if self.plain():
502 527 for k in (
503 528 b'debug',
@@ -540,7 +565,7 class ui:
540 565 root = os.path.expanduser(b'~')
541 566 self.fixconfig(root=root)
542 567
543 def fixconfig(self, root=None, section=None):
568 def fixconfig(self, root=None, section=None) -> None:
544 569 if section in (None, b'paths'):
545 570 # expand vars and ~
546 571 # translate paths relative to root (or home) into absolute paths
@@ -603,12 +628,12 class ui:
603 628 self._ucfg.backup(section, item),
604 629 )
605 630
606 def restoreconfig(self, data):
631 def restoreconfig(self, data) -> None:
607 632 self._ocfg.restore(data[0])
608 633 self._tcfg.restore(data[1])
609 634 self._ucfg.restore(data[2])
610 635
611 def setconfig(self, section, name, value, source=b''):
636 def setconfig(self, section, name, value, source=b'') -> None:
612 637 for cfg in (self._ocfg, self._tcfg, self._ucfg):
613 638 cfg.set(section, name, value, source)
614 639 self.fixconfig(section=section)
@@ -994,7 +1019,7 class ui:
994 1019 for name, value in self.configitems(section, untrusted):
995 1020 yield section, name, value
996 1021
997 def plain(self, feature=None):
1022 def plain(self, feature: Optional[bytes] = None) -> bool:
998 1023 """is plain mode active?
999 1024
1000 1025 Plain mode means that all configuration variables which affect
@@ -1068,46 +1093,16 class ui:
1068 1093 )
1069 1094 return user
1070 1095
1071 def shortuser(self, user):
1096 def shortuser(self, user: bytes) -> bytes:
1072 1097 """Return a short representation of a user name or email address."""
1073 1098 if not self.verbose:
1074 1099 user = stringutil.shortuser(user)
1075 1100 return user
1076 1101
1077 def expandpath(self, loc, default=None):
1078 """Return repository location relative to cwd or from [paths]"""
1079 msg = b'ui.expandpath is deprecated, use `get_*` functions from urlutil'
1080 self.deprecwarn(msg, b'6.0')
1081 try:
1082 p = self.getpath(loc)
1083 if p:
1084 return p.rawloc
1085 except error.RepoError:
1086 pass
1087
1088 if default:
1089 try:
1090 p = self.getpath(default)
1091 if p:
1092 return p.rawloc
1093 except error.RepoError:
1094 pass
1095
1096 return loc
1097
1098 1102 @util.propertycache
1099 1103 def paths(self):
1100 1104 return urlutil.paths(self)
1101 1105
1102 def getpath(self, *args, **kwargs):
1103 """see paths.getpath for details
1104
1105 This method exist as `getpath` need a ui for potential warning message.
1106 """
1107 msg = b'ui.getpath is deprecated, use `get_*` functions from urlutil'
1108 self.deprecwarn(msg, b'6.0')
1109 return self.paths.getpath(self, *args, **kwargs)
1110
1111 1106 @property
1112 1107 def fout(self):
1113 1108 return self._fout
@@ -1146,14 +1141,18 class ui:
1146 1141 self._fmsgout, self._fmsgerr = _selectmsgdests(self)
1147 1142
1148 1143 @contextlib.contextmanager
1149 def silent(self, error=False, subproc=False, labeled=False):
1144 def silent(
1145 self, error: bool = False, subproc: bool = False, labeled: bool = False
1146 ):
1150 1147 self.pushbuffer(error=error, subproc=subproc, labeled=labeled)
1151 1148 try:
1152 1149 yield
1153 1150 finally:
1154 1151 self.popbuffer()
1155 1152
1156 def pushbuffer(self, error=False, subproc=False, labeled=False):
1153 def pushbuffer(
1154 self, error: bool = False, subproc: bool = False, labeled: bool = False
1155 ) -> None:
1157 1156 """install a buffer to capture standard output of the ui object
1158 1157
1159 1158 If error is True, the error output will be captured too.
@@ -1172,7 +1171,7 class ui:
1172 1171 self._bufferstates.append((error, subproc, labeled))
1173 1172 self._bufferapplylabels = labeled
1174 1173
1175 def popbuffer(self):
1174 def popbuffer(self) -> bytes:
1176 1175 '''pop the last buffer and return the buffered output'''
1177 1176 self._bufferstates.pop()
1178 1177 if self._bufferstates:
@@ -1182,25 +1181,25 class ui:
1182 1181
1183 1182 return b"".join(self._buffers.pop())
1184 1183
1185 def _isbuffered(self, dest):
1184 def _isbuffered(self, dest) -> bool:
1186 1185 if dest is self._fout:
1187 1186 return bool(self._buffers)
1188 1187 if dest is self._ferr:
1189 1188 return bool(self._bufferstates and self._bufferstates[-1][0])
1190 1189 return False
1191 1190
1192 def canwritewithoutlabels(self):
1191 def canwritewithoutlabels(self) -> bool:
1193 1192 '''check if write skips the label'''
1194 1193 if self._buffers and not self._bufferapplylabels:
1195 1194 return True
1196 1195 return self._colormode is None
1197 1196
1198 def canbatchlabeledwrites(self):
1197 def canbatchlabeledwrites(self) -> bool:
1199 1198 '''check if write calls with labels are batchable'''
1200 1199 # Windows color printing is special, see ``write``.
1201 1200 return self._colormode != b'win32'
1202 1201
1203 def write(self, *args, **opts):
1202 def write(self, *args: bytes, **opts: _MsgOpts) -> None:
1204 1203 """write args to output
1205 1204
1206 1205 By default, this method simply writes to the buffer or stdout.
@@ -1258,10 +1257,10 class ui:
1258 1257 util.timer() - starttime
1259 1258 ) * 1000
1260 1259
1261 def write_err(self, *args, **opts):
1260 def write_err(self, *args: bytes, **opts: _MsgOpts) -> None:
1262 1261 self._write(self._ferr, *args, **opts)
1263 1262
1264 def _write(self, dest, *args, **opts):
1263 def _write(self, dest, *args: bytes, **opts: _MsgOpts) -> None:
1265 1264 # update write() as well if you touch this code
1266 1265 if self._isbuffered(dest):
1267 1266 label = opts.get('label', b'')
@@ -1272,7 +1271,7 class ui:
1272 1271 else:
1273 1272 self._writenobuf(dest, *args, **opts)
1274 1273
1275 def _writenobuf(self, dest, *args, **opts):
1274 def _writenobuf(self, dest, *args: bytes, **opts: _MsgOpts) -> None:
1276 1275 # update write() as well if you touch this code
1277 1276 if not opts.get('keepprogressbar', False):
1278 1277 self._progclear()
@@ -1314,7 +1313,7 class ui:
1314 1313 util.timer() - starttime
1315 1314 ) * 1000
1316 1315
1317 def _writemsg(self, dest, *args, **opts):
1316 def _writemsg(self, dest, *args: bytes, **opts: _MsgOpts) -> None:
1318 1317 timestamp = self.showtimestamp and opts.get('type') in {
1319 1318 b'debug',
1320 1319 b'error',
@@ -1331,10 +1330,10 class ui:
1331 1330 if timestamp:
1332 1331 dest.flush()
1333 1332
1334 def _writemsgnobuf(self, dest, *args, **opts):
1333 def _writemsgnobuf(self, dest, *args: bytes, **opts: _MsgOpts) -> None:
1335 1334 _writemsgwith(self._writenobuf, dest, *args, **opts)
1336 1335
1337 def flush(self):
1336 def flush(self) -> None:
1338 1337 # opencode timeblockedsection because this is a critical path
1339 1338 starttime = util.timer()
1340 1339 try:
@@ -1354,7 +1353,7 class ui:
1354 1353 util.timer() - starttime
1355 1354 ) * 1000
1356 1355
1357 def _isatty(self, fh):
1356 def _isatty(self, fh) -> bool:
1358 1357 if self.configbool(b'ui', b'nontty'):
1359 1358 return False
1360 1359 return procutil.isatty(fh)
@@ -1392,10 +1391,10 class ui:
1392 1391 finally:
1393 1392 self.restorefinout(fin, fout)
1394 1393
1395 def disablepager(self):
1394 def disablepager(self) -> None:
1396 1395 self._disablepager = True
1397 1396
1398 def pager(self, command):
1397 def pager(self, command: bytes) -> None:
1399 1398 """Start a pager for subsequent command output.
1400 1399
1401 1400 Commands which produce a long stream of output should call
@@ -1476,7 +1475,7 class ui:
1476 1475 # warning about a missing pager command.
1477 1476 self.disablepager()
1478 1477
1479 def _runpager(self, command, env=None):
1478 def _runpager(self, command: bytes, env=None) -> bool:
1480 1479 """Actually start the pager and set up file descriptors.
1481 1480
1482 1481 This is separate in part so that extensions (like chg) can
@@ -1556,7 +1555,7 class ui:
1556 1555 self._exithandlers.append((func, args, kwargs))
1557 1556 return func
1558 1557
1559 def interface(self, feature):
1558 def interface(self, feature: bytes) -> bytes:
1560 1559 """what interface to use for interactive console features?
1561 1560
1562 1561 The interface is controlled by the value of `ui.interface` but also by
@@ -1611,12 +1610,12 class ui:
1611 1610 defaultinterface = b"text"
1612 1611 i = self.config(b"ui", b"interface")
1613 1612 if i in alldefaults:
1614 defaultinterface = i
1613 defaultinterface = cast(bytes, i) # cast to help pytype
1615 1614
1616 choseninterface = defaultinterface
1615 choseninterface: bytes = defaultinterface
1617 1616 f = self.config(b"ui", b"interface.%s" % feature)
1618 1617 if f in availableinterfaces:
1619 choseninterface = f
1618 choseninterface = cast(bytes, f) # cast to help pytype
1620 1619
1621 1620 if i is not None and defaultinterface != i:
1622 1621 if f is not None:
@@ -1656,7 +1655,7 class ui:
1656 1655
1657 1656 return i
1658 1657
1659 def termwidth(self):
1658 def termwidth(self) -> int:
1660 1659 """how wide is the terminal in columns?"""
1661 1660 if b'COLUMNS' in encoding.environ:
1662 1661 try:
@@ -1693,7 +1692,11 class ui:
1693 1692
1694 1693 return i
1695 1694
1696 def _readline(self, prompt=b' ', promptopts=None):
1695 def _readline(
1696 self,
1697 prompt: bytes = b' ',
1698 promptopts: Optional[Dict[str, _MsgOpts]] = None,
1699 ) -> bytes:
1697 1700 # Replacing stdin/stdout temporarily is a hard problem on Python 3
1698 1701 # because they have to be text streams with *no buffering*. Instead,
1699 1702 # we use rawinput() only if call_readline() will be invoked by
@@ -1748,14 +1751,38 class ui:
1748 1751
1749 1752 return line
1750 1753
1754 if pycompat.TYPE_CHECKING:
1755
1756 @overload
1757 def prompt(self, msg: bytes, default: bytes) -> bytes:
1758 pass
1759
1760 @overload
1761 def prompt(self, msg: bytes, default: None) -> Optional[bytes]:
1762 pass
1763
1751 1764 def prompt(self, msg, default=b"y"):
1752 1765 """Prompt user with msg, read response.
1753 1766 If ui is not interactive, the default is returned.
1754 1767 """
1755 1768 return self._prompt(msg, default=default)
1756 1769
1757 def _prompt(self, msg, **opts):
1758 default = opts['default']
1770 if pycompat.TYPE_CHECKING:
1771
1772 @overload
1773 def _prompt(
1774 self, msg: bytes, default: bytes, **opts: _MsgOpts
1775 ) -> bytes:
1776 pass
1777
1778 @overload
1779 def _prompt(
1780 self, msg: bytes, default: None, **opts: _MsgOpts
1781 ) -> Optional[bytes]:
1782 pass
1783
1784 def _prompt(self, msg, default=b'y', **opts):
1785 opts = {**opts, 'default': default}
1759 1786 if not self.interactive():
1760 1787 self._writemsg(self._fmsgout, msg, b' ', type=b'prompt', **opts)
1761 1788 self._writemsg(
@@ -1775,7 +1802,7 class ui:
1775 1802 raise error.ResponseExpected()
1776 1803
1777 1804 @staticmethod
1778 def extractchoices(prompt):
1805 def extractchoices(prompt: bytes) -> Tuple[bytes, List[_PromptChoice]]:
1779 1806 """Extract prompt message and list of choices from specified prompt.
1780 1807
1781 1808 This returns tuple "(message, choices)", and "choices" is the
@@ -1795,6 +1822,9 class ui:
1795 1822 # choices containing spaces, ASCII, or basically anything
1796 1823 # except an ampersand followed by a character.
1797 1824 m = re.match(br'(?s)(.+?)\$\$([^$]*&[^ $].*)', prompt)
1825
1826 assert m is not None # help pytype
1827
1798 1828 msg = m.group(1)
1799 1829 choices = [p.strip(b' ') for p in m.group(2).split(b'$$')]
1800 1830
@@ -1804,7 +1834,7 class ui:
1804 1834
1805 1835 return (msg, [choicetuple(s) for s in choices])
1806 1836
1807 def promptchoice(self, prompt, default=0):
1837 def promptchoice(self, prompt: bytes, default: int = 0) -> int:
1808 1838 """Prompt user with a message, read response, and ensure it matches
1809 1839 one of the provided choices. The prompt is formatted as follows:
1810 1840
@@ -1824,7 +1854,9 class ui:
1824 1854 # TODO: shouldn't it be a warning?
1825 1855 self._writemsg(self._fmsgout, _(b"unrecognized response\n"))
1826 1856
1827 def getpass(self, prompt=None, default=None):
1857 def getpass(
1858 self, prompt: Optional[bytes] = None, default: Optional[bytes] = None
1859 ) -> Optional[bytes]:
1828 1860 if not self.interactive():
1829 1861 return default
1830 1862 try:
@@ -1847,7 +1879,7 class ui:
1847 1879 except EOFError:
1848 1880 raise error.ResponseExpected()
1849 1881
1850 def status(self, *msg, **opts):
1882 def status(self, *msg: bytes, **opts: _MsgOpts) -> None:
1851 1883 """write status message to output (if ui.quiet is False)
1852 1884
1853 1885 This adds an output label of "ui.status".
@@ -1855,21 +1887,21 class ui:
1855 1887 if not self.quiet:
1856 1888 self._writemsg(self._fmsgout, type=b'status', *msg, **opts)
1857 1889
1858 def warn(self, *msg, **opts):
1890 def warn(self, *msg: bytes, **opts: _MsgOpts) -> None:
1859 1891 """write warning message to output (stderr)
1860 1892
1861 1893 This adds an output label of "ui.warning".
1862 1894 """
1863 1895 self._writemsg(self._fmsgerr, type=b'warning', *msg, **opts)
1864 1896
1865 def error(self, *msg, **opts):
1897 def error(self, *msg: bytes, **opts: _MsgOpts) -> None:
1866 1898 """write error message to output (stderr)
1867 1899
1868 1900 This adds an output label of "ui.error".
1869 1901 """
1870 1902 self._writemsg(self._fmsgerr, type=b'error', *msg, **opts)
1871 1903
1872 def note(self, *msg, **opts):
1904 def note(self, *msg: bytes, **opts: _MsgOpts) -> None:
1873 1905 """write note to output (if ui.verbose is True)
1874 1906
1875 1907 This adds an output label of "ui.note".
@@ -1877,7 +1909,7 class ui:
1877 1909 if self.verbose:
1878 1910 self._writemsg(self._fmsgout, type=b'note', *msg, **opts)
1879 1911
1880 def debug(self, *msg, **opts):
1912 def debug(self, *msg: bytes, **opts: _MsgOpts) -> None:
1881 1913 """write debug message to output (if ui.debugflag is True)
1882 1914
1883 1915 This adds an output label of "ui.debug".
@@ -1894,14 +1926,14 class ui:
1894 1926
1895 1927 def edit(
1896 1928 self,
1897 text,
1898 user,
1899 extra=None,
1929 text: bytes,
1930 user: bytes,
1931 extra: Optional[Dict[bytes, Any]] = None, # TODO: value type of bytes?
1900 1932 editform=None,
1901 1933 pending=None,
1902 repopath=None,
1903 action=None,
1904 ):
1934 repopath: Optional[bytes] = None,
1935 action: Optional[bytes] = None,
1936 ) -> bytes:
1905 1937 if action is None:
1906 1938 self.develwarn(
1907 1939 b'action is None but will soon be a required '
@@ -1970,13 +2002,13 class ui:
1970 2002
1971 2003 def system(
1972 2004 self,
1973 cmd,
2005 cmd: bytes,
1974 2006 environ=None,
1975 cwd=None,
1976 onerr=None,
1977 errprefix=None,
1978 blockedtag=None,
1979 ):
2007 cwd: Optional[bytes] = None,
2008 onerr: Optional[Callable[[bytes], Exception]] = None,
2009 errprefix: Optional[bytes] = None,
2010 blockedtag: Optional[bytes] = None,
2011 ) -> int:
1980 2012 """execute shell command with appropriate output stream. command
1981 2013 output will be redirected if fout is not stdout.
1982 2014
@@ -2003,12 +2035,12 class ui:
2003 2035 raise onerr(errmsg)
2004 2036 return rc
2005 2037
2006 def _runsystem(self, cmd, environ, cwd, out):
2038 def _runsystem(self, cmd: bytes, environ, cwd: Optional[bytes], out) -> int:
2007 2039 """actually execute the given shell command (can be overridden by
2008 2040 extensions like chg)"""
2009 2041 return procutil.system(cmd, environ=environ, cwd=cwd, out=out)
2010 2042
2011 def traceback(self, exc=None, force=False):
2043 def traceback(self, exc=None, force: bool = False):
2012 2044 """print exception traceback if traceback printing enabled or forced.
2013 2045 only to call in exception handler. returns true if traceback
2014 2046 printed."""
@@ -2054,7 +2086,7 class ui:
2054 2086 )
2055 2087
2056 2088 @util.propertycache
2057 def _progbar(self):
2089 def _progbar(self) -> Optional[progress.progbar]:
2058 2090 """setup the progbar singleton to the ui object"""
2059 2091 if (
2060 2092 self.quiet
@@ -2065,14 +2097,16 class ui:
2065 2097 return None
2066 2098 return getprogbar(self)
2067 2099
2068 def _progclear(self):
2100 def _progclear(self) -> None:
2069 2101 """clear progress bar output if any. use it before any output"""
2070 2102 if not haveprogbar(): # nothing loaded yet
2071 2103 return
2072 2104 if self._progbar is not None and self._progbar.printed:
2073 2105 self._progbar.clear()
2074 2106
2075 def makeprogress(self, topic, unit=b"", total=None):
2107 def makeprogress(
2108 self, topic: bytes, unit: bytes = b"", total: Optional[int] = None
2109 ) -> scmutil.progress:
2076 2110 """Create a progress helper for the specified topic"""
2077 2111 if getattr(self._fmsgerr, 'structured', False):
2078 2112 # channel for machine-readable output with metadata, just send
@@ -2104,7 +2138,7 class ui:
2104 2138 """Returns a logger of the given name; or None if not registered"""
2105 2139 return self._loggers.get(name)
2106 2140
2107 def setlogger(self, name, logger):
2141 def setlogger(self, name, logger) -> None:
2108 2142 """Install logger which can be identified later by the given name
2109 2143
2110 2144 More than one loggers can be registered. Use extension or module
@@ -2112,7 +2146,7 class ui:
2112 2146 """
2113 2147 self._loggers[name] = logger
2114 2148
2115 def log(self, event, msgfmt, *msgargs, **opts):
2149 def log(self, event, msgfmt, *msgargs, **opts) -> None:
2116 2150 """hook for logging facility extensions
2117 2151
2118 2152 event should be a readily-identifiable subsystem, which will
@@ -2139,7 +2173,7 class ui:
2139 2173 finally:
2140 2174 self._loggers = registeredloggers
2141 2175
2142 def label(self, msg, label):
2176 def label(self, msg: bytes, label: bytes) -> bytes:
2143 2177 """style msg based on supplied label
2144 2178
2145 2179 If some color mode is enabled, this will add the necessary control
@@ -2153,7 +2187,9 class ui:
2153 2187 return color.colorlabel(self, msg, label)
2154 2188 return msg
2155 2189
2156 def develwarn(self, msg, stacklevel=1, config=None):
2190 def develwarn(
2191 self, msg: bytes, stacklevel: int = 1, config: Optional[bytes] = None
2192 ) -> None:
2157 2193 """issue a developer warning message
2158 2194
2159 2195 Use 'stacklevel' to report the offender some layers further up in the
@@ -2185,7 +2221,9 class ui:
2185 2221 del curframe
2186 2222 del calframe
2187 2223
2188 def deprecwarn(self, msg, version, stacklevel=2):
2224 def deprecwarn(
2225 self, msg: bytes, version: bytes, stacklevel: int = 2
2226 ) -> None:
2189 2227 """issue a deprecation warning
2190 2228
2191 2229 - msg: message explaining what is deprecated and how to upgrade,
@@ -2209,7 +2247,7 class ui:
2209 2247 return self._exportableenviron
2210 2248
2211 2249 @contextlib.contextmanager
2212 def configoverride(self, overrides, source=b""):
2250 def configoverride(self, overrides: _ConfigItems, source: bytes = b""):
2213 2251 """Context manager for temporary config overrides
2214 2252 `overrides` must be a dict of the following structure:
2215 2253 {(section, name) : value}"""
@@ -2227,7 +2265,7 class ui:
2227 2265 if (b'ui', b'quiet') in overrides:
2228 2266 self.fixconfig(section=b'ui')
2229 2267
2230 def estimatememory(self):
2268 def estimatememory(self) -> Optional[int]:
2231 2269 """Provide an estimate for the available system memory in Bytes.
2232 2270
2233 2271 This can be overriden via ui.available-memory. It returns None, if
@@ -2246,10 +2284,10 class ui:
2246 2284
2247 2285 # we instantiate one globally shared progress bar to avoid
2248 2286 # competing progress bars when multiple UI objects get created
2249 _progresssingleton = None
2287 _progresssingleton: Optional[progress.progbar] = None
2250 2288
2251 2289
2252 def getprogbar(ui):
2290 def getprogbar(ui: ui) -> progress.progbar:
2253 2291 global _progresssingleton
2254 2292 if _progresssingleton is None:
2255 2293 # passing 'ui' object to the singleton is fishy,
@@ -2258,11 +2296,11 def getprogbar(ui):
2258 2296 return _progresssingleton
2259 2297
2260 2298
2261 def haveprogbar():
2299 def haveprogbar() -> bool:
2262 2300 return _progresssingleton is not None
2263 2301
2264 2302
2265 def _selectmsgdests(ui):
2303 def _selectmsgdests(ui: ui):
2266 2304 name = ui.config(b'ui', b'message-output')
2267 2305 if name == b'channel':
2268 2306 if ui.fmsg:
@@ -2278,7 +2316,7 def _selectmsgdests(ui):
2278 2316 raise error.Abort(b'invalid ui.message-output destination: %s' % name)
2279 2317
2280 2318
2281 def _writemsgwith(write, dest, *args, **opts):
2319 def _writemsgwith(write, dest, *args: bytes, **opts: _MsgOpts) -> None:
2282 2320 """Write ui message with the given ui._write*() function
2283 2321
2284 2322 The specified message type is translated to 'ui.<type>' label if the dest
@@ -113,7 +113,7 class unionrevlog(revlog.revlog):
113 113 self.bundlerevs.add(n)
114 114 n += 1
115 115
116 def _chunk(self, rev):
116 def _chunk(self, rev, df=None):
117 117 if rev <= self.repotiprev:
118 118 return revlog.revlog._chunk(self, rev)
119 119 return self.revlog2._chunk(self.node(rev))
@@ -146,7 +146,19 class unionrevlog(revlog.revlog):
146 146 func = super(unionrevlog, self)._revisiondata
147 147 return func(node, _df=_df, raw=raw)
148 148
149 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
149 def addrevision(
150 self,
151 text,
152 transaction,
153 link,
154 p1,
155 p2,
156 cachedelta=None,
157 node=None,
158 flags=revlog.REVIDX_DEFAULT_FLAGS,
159 deltacomputer=None,
160 sidedata=None,
161 ):
150 162 raise NotImplementedError
151 163
152 164 def addgroup(
@@ -157,7 +169,8 class unionrevlog(revlog.revlog):
157 169 alwayscache=False,
158 170 addrevisioncb=None,
159 171 duplicaterevisioncb=None,
160 maybemissingparents=False,
172 debug_info=None,
173 delta_base_reuse_policy=None,
161 174 ):
162 175 raise NotImplementedError
163 176
@@ -257,8 +270,8 class unionrepository:
257 270 def cancopy(self):
258 271 return False
259 272
260 def peer(self):
261 return unionpeer(self)
273 def peer(self, path=None):
274 return unionpeer(self, path=None)
262 275
263 276 def getcwd(self):
264 277 return encoding.getcwd() # always outside the repo
@@ -60,6 +60,7 from .utils import (
60 60
61 61 if pycompat.TYPE_CHECKING:
62 62 from typing import (
63 Iterable,
63 64 Iterator,
64 65 List,
65 66 Optional,
@@ -642,12 +643,12 class observedbufferedinputpipe(buffered
642 643 ``read()`` and ``readline()``.
643 644 """
644 645
645 def _fillbuffer(self):
646 res = super(observedbufferedinputpipe, self)._fillbuffer()
646 def _fillbuffer(self, size=_chunksize):
647 res = super(observedbufferedinputpipe, self)._fillbuffer(size=size)
647 648
648 649 fn = getattr(self._input._observer, 'osread', None)
649 650 if fn:
650 fn(res, _chunksize)
651 fn(res, size)
651 652
652 653 return res
653 654
@@ -2542,6 +2543,7 class atomictempfile:
2542 2543 # delegated methods
2543 2544 self.read = self._fp.read
2544 2545 self.write = self._fp.write
2546 self.writelines = self._fp.writelines
2545 2547 self.seek = self._fp.seek
2546 2548 self.tell = self._fp.tell
2547 2549 self.fileno = self._fp.fileno
@@ -2909,7 +2911,7 def iterfile(fp):
2909 2911
2910 2912
2911 2913 def iterlines(iterator):
2912 # type: (Iterator[bytes]) -> Iterator[bytes]
2914 # type: (Iterable[bytes]) -> Iterator[bytes]
2913 2915 for chunk in iterator:
2914 2916 for line in chunk.splitlines():
2915 2917 yield line
@@ -3212,10 +3214,7 def uvarintdecodestream(fh):
3212 3214
3213 3215 The passed argument is anything that has a ``.read(N)`` method.
3214 3216
3215 >>> try:
3216 ... from StringIO import StringIO as BytesIO
3217 ... except ImportError:
3218 ... from io import BytesIO
3217 >>> from io import BytesIO
3219 3218 >>> uvarintdecodestream(BytesIO(b'\\x00'))
3220 3219 0
3221 3220 >>> uvarintdecodestream(BytesIO(b'\\x01'))
@@ -18,6 +18,10 import sys
18 18 import threading
19 19 import time
20 20
21 from typing import (
22 BinaryIO,
23 )
24
21 25 from ..i18n import _
22 26 from ..pycompat import (
23 27 getattr,
@@ -29,6 +33,7 from .. import (
29 33 error,
30 34 policy,
31 35 pycompat,
36 typelib,
32 37 )
33 38
34 39 # Import like this to keep import-checker happy
@@ -118,8 +123,8 def unwrap_line_buffered(stream):
118 123 return stream
119 124
120 125
121 class WriteAllWrapper:
122 def __init__(self, orig):
126 class WriteAllWrapper(typelib.BinaryIO_Proxy):
127 def __init__(self, orig: BinaryIO):
123 128 self.orig = orig
124 129
125 130 def __getattr__(self, attr):
@@ -580,7 +585,7 def hgcmd():
580 585 return _gethgcmd()
581 586
582 587
583 def rundetached(args, condfn):
588 def rundetached(args, condfn) -> int:
584 589 """Execute the argument list in a detached process.
585 590
586 591 condfn is a callable which is called repeatedly and should return
@@ -616,6 +621,12 def rundetached(args, condfn):
616 621 if prevhandler is not None:
617 622 signal.signal(signal.SIGCHLD, prevhandler)
618 623
624 # pytype seems to get confused by not having a return in the finally
625 # block, and thinks the return value should be Optional[int] here. It
626 # appears to be https://github.com/google/pytype/issues/938, without
627 # the `with` clause.
628 pass # pytype: disable=bad-return-type
629
619 630
620 631 @contextlib.contextmanager
621 632 def uninterruptible(warn):
@@ -190,9 +190,9 def fileidlookup(store, fileid, identifi
190 190
191 191 ``fileid`` can be:
192 192
193 * A 20 or 32 byte binary node.
193 * A binary node of appropiate size (e.g. 20/32 Bytes).
194 194 * An integer revision number
195 * A 40 or 64 byte hex node.
195 * A hex node of appropiate size (e.g. 40/64 Bytes).
196 196 * A bytes that can be parsed as an integer representing a revision number.
197 197
198 198 ``identifier`` is used to populate ``error.LookupError`` with an identifier
@@ -208,14 +208,14 def fileidlookup(store, fileid, identifi
208 208 b'%d' % fileid, identifier, _(b'no match found')
209 209 )
210 210
211 if len(fileid) in (20, 32):
211 if len(fileid) == len(store.nullid):
212 212 try:
213 213 store.rev(fileid)
214 214 return fileid
215 215 except error.LookupError:
216 216 pass
217 217
218 if len(fileid) in (40, 64):
218 if len(fileid) == 2 * len(store.nullid):
219 219 try:
220 220 rawnode = bin(fileid)
221 221 store.rev(rawnode)
@@ -305,6 +305,7 def emitrevisions(
305 305 revisiondata=False,
306 306 assumehaveparentrevisions=False,
307 307 sidedata_helpers=None,
308 debug_info=None,
308 309 ):
309 310 """Generic implementation of ifiledata.emitrevisions().
310 311
@@ -370,6 +371,10 def emitrevisions(
370 371 ``sidedata_helpers`` (optional)
371 372 If not None, means that sidedata should be included.
372 373 See `revlogutil.sidedata.get_sidedata_helpers`.
374
375 ``debug_info`
376 An optionnal dictionnary to gather information about the bundling
377 process (if present, see config: debug.bundling.stats.
373 378 """
374 379
375 380 fnode = store.node
@@ -407,31 +412,59 def emitrevisions(
407 412 if rev == nullrev:
408 413 continue
409 414
415 debug_delta_source = None
416 if debug_info is not None:
417 debug_info['revision-total'] += 1
418
410 419 node = fnode(rev)
411 420 p1rev, p2rev = parents(rev)
412 421
422 if debug_info is not None:
423 if p1rev != p2rev and p1rev != nullrev and p2rev != nullrev:
424 debug_info['merge-total'] += 1
425
413 426 if deltaparentfn:
414 427 deltaparentrev = deltaparentfn(rev)
428 if debug_info is not None:
429 if deltaparentrev == nullrev:
430 debug_info['available-full'] += 1
431 else:
432 debug_info['available-delta'] += 1
433
415 434 else:
416 435 deltaparentrev = nullrev
417 436
418 437 # Forced delta against previous mode.
419 438 if deltamode == repository.CG_DELTAMODE_PREV:
439 if debug_info is not None:
440 debug_delta_source = "prev"
420 441 baserev = prevrev
421 442
422 443 # We're instructed to send fulltext. Honor that.
423 444 elif deltamode == repository.CG_DELTAMODE_FULL:
445 if debug_info is not None:
446 debug_delta_source = "full"
424 447 baserev = nullrev
425 448 # We're instructed to use p1. Honor that
426 449 elif deltamode == repository.CG_DELTAMODE_P1:
450 if debug_info is not None:
451 debug_delta_source = "p1"
427 452 baserev = p1rev
428 453
429 454 # There is a delta in storage. We try to use that because it
430 455 # amounts to effectively copying data from storage and is
431 456 # therefore the fastest.
432 457 elif is_usable_base(deltaparentrev):
458 if debug_info is not None:
459 debug_delta_source = "storage"
460 baserev = deltaparentrev
461 elif deltaparentrev == nullrev:
462 if debug_info is not None:
463 debug_delta_source = "storage"
433 464 baserev = deltaparentrev
434 465 else:
466 if deltaparentrev != nullrev and debug_info is not None:
467 debug_info['denied-base-not-available'] += 1
435 468 # No guarantee the receiver has the delta parent, or Storage has a
436 469 # fulltext revision.
437 470 #
@@ -441,22 +474,37 def emitrevisions(
441 474 # be close to this revision content.
442 475 #
443 476 # note: we could optimize between p1 and p2 in merges cases.
444 if is_usable_base(p1rev):
477 elif is_usable_base(p1rev):
478 if debug_info is not None:
479 debug_delta_source = "p1"
445 480 baserev = p1rev
446 481 # if p1 was not an option, try p2
447 482 elif is_usable_base(p2rev):
483 if debug_info is not None:
484 debug_delta_source = "p2"
448 485 baserev = p2rev
449 486 # Send delta against prev in despair
450 487 #
451 488 # using the closest available ancestors first might be better?
452 489 elif prevrev is not None:
490 if debug_info is not None:
491 debug_delta_source = "prev"
453 492 baserev = prevrev
454 493 else:
494 if debug_info is not None:
495 debug_delta_source = "full"
455 496 baserev = nullrev
456 497
457 498 # But we can't actually use our chosen delta base for whatever
458 499 # reason. Reset to fulltext.
459 if baserev != nullrev and (candeltafn and not candeltafn(baserev, rev)):
500 if (
501 baserev != nullrev
502 and candeltafn is not None
503 and not candeltafn(baserev, rev)
504 ):
505 if debug_info is not None:
506 debug_delta_source = "full"
507 debug_info['denied-delta-candeltafn'] += 1
460 508 baserev = nullrev
461 509
462 510 revision = None
@@ -468,6 +516,9 def emitrevisions(
468 516 try:
469 517 revision = store.rawdata(node)
470 518 except error.CensoredNodeError as e:
519 if debug_info is not None:
520 debug_delta_source = "full"
521 debug_info['denied-delta-not-available'] += 1
471 522 revision = e.tombstone
472 523
473 524 if baserev != nullrev:
@@ -479,12 +530,46 def emitrevisions(
479 530 elif (
480 531 baserev == nullrev and deltamode != repository.CG_DELTAMODE_PREV
481 532 ):
533 if debug_info is not None:
534 debug_info['computed-delta'] += 1 # close enough
535 debug_info['delta-full'] += 1
482 536 revision = store.rawdata(node)
483 537 emitted.add(rev)
484 538 else:
485 539 if revdifffn:
540 if debug_info is not None:
541 if debug_delta_source == "full":
542 debug_info['computed-delta'] += 1
543 debug_info['delta-full'] += 1
544 elif debug_delta_source == "prev":
545 debug_info['computed-delta'] += 1
546 debug_info['delta-against-prev'] += 1
547 elif debug_delta_source == "p1":
548 debug_info['computed-delta'] += 1
549 debug_info['delta-against-p1'] += 1
550 elif debug_delta_source == "storage":
551 debug_info['reused-storage-delta'] += 1
552 else:
553 assert False, 'unreachable'
554
486 555 delta = revdifffn(baserev, rev)
487 556 else:
557 if debug_info is not None:
558 if debug_delta_source == "full":
559 debug_info['computed-delta'] += 1
560 debug_info['delta-full'] += 1
561 elif debug_delta_source == "prev":
562 debug_info['computed-delta'] += 1
563 debug_info['delta-against-prev'] += 1
564 elif debug_delta_source == "p1":
565 debug_info['computed-delta'] += 1
566 debug_info['delta-against-p1'] += 1
567 elif debug_delta_source == "storage":
568 # seem quite unlikelry to happens
569 debug_info['computed-delta'] += 1
570 debug_info['reused-storage-delta'] += 1
571 else:
572 assert False, 'unreachable'
488 573 delta = mdiff.textdiff(
489 574 store.rawdata(baserev), store.rawdata(rev)
490 575 )
@@ -14,6 +14,11 import re as remod
14 14 import textwrap
15 15 import types
16 16
17 from typing import (
18 Optional,
19 overload,
20 )
21
17 22 from ..i18n import _
18 23 from ..thirdparty import attr
19 24
@@ -30,6 +35,16 from .. import (
30 35 regexbytesescapemap = {i: (b'\\' + i) for i in _respecial}
31 36
32 37
38 @overload
39 def reescape(pat: bytes) -> bytes:
40 ...
41
42
43 @overload
44 def reescape(pat: str) -> str:
45 ...
46
47
33 48 def reescape(pat):
34 49 """Drop-in replacement for re.escape."""
35 50 # NOTE: it is intentional that this works on unicodes and not
@@ -45,12 +60,12 def reescape(pat):
45 60 return pat.encode('latin1')
46 61
47 62
48 def pprint(o, bprefix=False, indent=0, level=0):
63 def pprint(o, bprefix: bool = False, indent: int = 0, level: int = 0) -> bytes:
49 64 """Pretty print an object."""
50 65 return b''.join(pprintgen(o, bprefix=bprefix, indent=indent, level=level))
51 66
52 67
53 def pprintgen(o, bprefix=False, indent=0, level=0):
68 def pprintgen(o, bprefix: bool = False, indent: int = 0, level: int = 0):
54 69 """Pretty print an object to a generator of atoms.
55 70
56 71 ``bprefix`` is a flag influencing whether bytestrings are preferred with
@@ -250,7 +265,7 def pprintgen(o, bprefix=False, indent=0
250 265 yield pycompat.byterepr(o)
251 266
252 267
253 def prettyrepr(o):
268 def prettyrepr(o) -> bytes:
254 269 """Pretty print a representation of a possibly-nested object"""
255 270 lines = []
256 271 rs = pycompat.byterepr(o)
@@ -281,7 +296,7 def prettyrepr(o):
281 296 return b'\n'.join(b' ' * l + s for l, s in lines)
282 297
283 298
284 def buildrepr(r):
299 def buildrepr(r) -> bytes:
285 300 """Format an optional printable representation from unexpanded bits
286 301
287 302 ======== =================================
@@ -305,12 +320,12 def buildrepr(r):
305 320 return pprint(r)
306 321
307 322
308 def binary(s):
323 def binary(s: bytes) -> bool:
309 324 """return true if a string is binary data"""
310 325 return bool(s and b'\0' in s)
311 326
312 327
313 def _splitpattern(pattern):
328 def _splitpattern(pattern: bytes):
314 329 if pattern.startswith(b're:'):
315 330 return b're', pattern[3:]
316 331 elif pattern.startswith(b'literal:'):
@@ -318,7 +333,7 def _splitpattern(pattern):
318 333 return b'literal', pattern
319 334
320 335
321 def stringmatcher(pattern, casesensitive=True):
336 def stringmatcher(pattern: bytes, casesensitive: bool = True):
322 337 """
323 338 accepts a string, possibly starting with 're:' or 'literal:' prefix.
324 339 returns the matcher name, pattern, and matcher function.
@@ -379,7 +394,7 def stringmatcher(pattern, casesensitive
379 394 raise error.ProgrammingError(b'unhandled pattern kind: %s' % kind)
380 395
381 396
382 def substringregexp(pattern, flags=0):
397 def substringregexp(pattern: bytes, flags: int = 0):
383 398 """Build a regexp object from a string pattern possibly starting with
384 399 're:' or 'literal:' prefix.
385 400
@@ -431,7 +446,7 def substringregexp(pattern, flags=0):
431 446 raise error.ProgrammingError(b'unhandled pattern kind: %s' % kind)
432 447
433 448
434 def shortuser(user):
449 def shortuser(user: bytes) -> bytes:
435 450 """Return a short representation of a user name or email address."""
436 451 f = user.find(b'@')
437 452 if f >= 0:
@@ -448,7 +463,7 def shortuser(user):
448 463 return user
449 464
450 465
451 def emailuser(user):
466 def emailuser(user: bytes) -> bytes:
452 467 """Return the user portion of an email address."""
453 468 f = user.find(b'@')
454 469 if f >= 0:
@@ -459,7 +474,7 def emailuser(user):
459 474 return user
460 475
461 476
462 def email(author):
477 def email(author: bytes) -> bytes:
463 478 '''get email of author.'''
464 479 r = author.find(b'>')
465 480 if r == -1:
@@ -467,7 +482,7 def email(author):
467 482 return author[author.find(b'<') + 1 : r]
468 483
469 484
470 def person(author):
485 def person(author: bytes) -> bytes:
471 486 """Returns the name before an email address,
472 487 interpreting it as per RFC 5322
473 488
@@ -612,7 +627,7 def parsemailmap(mailmapcontent):
612 627 return mailmap
613 628
614 629
615 def mapname(mailmap, author):
630 def mapname(mailmap, author: bytes) -> bytes:
616 631 """Returns the author field according to the mailmap cache, or
617 632 the original author field.
618 633
@@ -663,7 +678,7 def mapname(mailmap, author):
663 678 _correctauthorformat = remod.compile(br'^[^<]+\s<[^<>]+@[^<>]+>$')
664 679
665 680
666 def isauthorwellformed(author):
681 def isauthorwellformed(author: bytes) -> bool:
667 682 """Return True if the author field is well formed
668 683 (ie "Contributor Name <contrib@email.dom>")
669 684
@@ -685,7 +700,7 def isauthorwellformed(author):
685 700 return _correctauthorformat.match(author) is not None
686 701
687 702
688 def firstline(text):
703 def firstline(text: bytes) -> bytes:
689 704 """Return the first line of the input"""
690 705 # Try to avoid running splitlines() on the whole string
691 706 i = text.find(b'\n')
@@ -697,21 +712,26 def firstline(text):
697 712 return b''
698 713
699 714
700 def ellipsis(text, maxlength=400):
715 def ellipsis(text: bytes, maxlength: int = 400) -> bytes:
701 716 """Trim string to at most maxlength (default: 400) columns in display."""
702 717 return encoding.trim(text, maxlength, ellipsis=b'...')
703 718
704 719
705 def escapestr(s):
720 def escapestr(s: bytes) -> bytes:
721 # "bytes" is also a typing shortcut for bytes, bytearray, and memoryview
706 722 if isinstance(s, memoryview):
707 723 s = bytes(s)
708 724 # call underlying function of s.encode('string_escape') directly for
709 725 # Python 3 compatibility
726 # pytype: disable=bad-return-type
710 727 return codecs.escape_encode(s)[0] # pytype: disable=module-attr
728 # pytype: enable=bad-return-type
711 729
712 730
713 def unescapestr(s):
731 def unescapestr(s: bytes) -> bytes:
732 # pytype: disable=bad-return-type
714 733 return codecs.escape_decode(s)[0] # pytype: disable=module-attr
734 # pytype: enable=bad-return-type
715 735
716 736
717 737 def forcebytestr(obj):
@@ -724,7 +744,7 def forcebytestr(obj):
724 744 return pycompat.bytestr(encoding.strtolocal(str(obj)))
725 745
726 746
727 def uirepr(s):
747 def uirepr(s: bytes) -> bytes:
728 748 # Avoid double backslash in Windows path repr()
729 749 return pycompat.byterepr(pycompat.bytestr(s)).replace(b'\\\\', b'\\')
730 750
@@ -838,7 +858,9 def _MBTextWrapper(**kwargs):
838 858 return tw(**kwargs)
839 859
840 860
841 def wrap(line, width, initindent=b'', hangindent=b''):
861 def wrap(
862 line: bytes, width: int, initindent: bytes = b'', hangindent: bytes = b''
863 ) -> bytes:
842 864 maxindent = max(len(hangindent), len(initindent))
843 865 if width <= maxindent:
844 866 # adjust for weird terminal size
@@ -875,7 +897,7 def wrap(line, width, initindent=b'', ha
875 897 }
876 898
877 899
878 def parsebool(s):
900 def parsebool(s: bytes) -> Optional[bool]:
879 901 """Parse s into a boolean.
880 902
881 903 If s is not a valid boolean, returns None.
@@ -883,7 +905,8 def parsebool(s):
883 905 return _booleans.get(s.lower(), None)
884 906
885 907
886 def parselist(value):
908 # TODO: make arg mandatory (and fix code below?)
909 def parselist(value: Optional[bytes]):
887 910 """parse a configuration value as a list of comma/space separated strings
888 911
889 912 >>> parselist(b'this,is "a small" ,test')
@@ -973,7 +996,7 def parselist(value):
973 996 return result or []
974 997
975 998
976 def evalpythonliteral(s):
999 def evalpythonliteral(s: bytes):
977 1000 """Evaluate a string containing a Python literal expression"""
978 1001 # We could backport our tokenizer hack to rewrite '' to u'' if we want
979 1002 return ast.literal_eval(s.decode('latin1'))
@@ -24,6 +24,10 from . import (
24 24 stringutil,
25 25 )
26 26
27 from ..revlogutils import (
28 constants as revlog_constants,
29 )
30
27 31
28 32 if pycompat.TYPE_CHECKING:
29 33 from typing import (
@@ -241,7 +245,7 class url:
241 245 u.user = self.user
242 246 u.passwd = self.passwd
243 247 u.host = self.host
244 u.path = self.path
248 u.port = self.port
245 249 u.query = self.query
246 250 u.fragment = self.fragment
247 251 u._localpath = self._localpath
@@ -480,10 +484,10 def get_push_paths(repo, ui, dests):
480 484 if not dests:
481 485 if b'default-push' in ui.paths:
482 486 for p in ui.paths[b'default-push']:
483 yield p
487 yield p.get_push_variant()
484 488 elif b'default' in ui.paths:
485 489 for p in ui.paths[b'default']:
486 yield p
490 yield p.get_push_variant()
487 491 else:
488 492 raise error.ConfigError(
489 493 _(b'default repository not configured!'),
@@ -493,14 +497,14 def get_push_paths(repo, ui, dests):
493 497 for dest in dests:
494 498 if dest in ui.paths:
495 499 for p in ui.paths[dest]:
496 yield p
500 yield p.get_push_variant()
497 501 else:
498 502 path = try_path(ui, dest)
499 503 if path is None:
500 504 msg = _(b'repository %s does not exist')
501 505 msg %= dest
502 506 raise error.RepoError(msg)
503 yield path
507 yield path.get_push_variant()
504 508
505 509
506 510 def get_pull_paths(repo, ui, sources):
@@ -522,8 +526,6 def get_unique_push_path(action, repo, u
522 526 This is useful for command and action that does not support multiple
523 527 destination (yet).
524 528
525 Note that for now, we cannot get multiple destination so this function is "trivial".
526
527 529 The `action` parameter will be used for the error message.
528 530 """
529 531 if dest is None:
@@ -544,80 +546,61 def get_unique_push_path(action, repo, u
544 546 return dests[0]
545 547
546 548
547 def get_unique_pull_path(action, repo, ui, source=None, default_branches=()):
549 def get_unique_pull_path_obj(action, ui, source=None):
548 550 """return a unique `(path, branch)` or abort if multiple are found
549 551
550 552 This is useful for command and action that does not support multiple
551 553 destination (yet).
552 554
553 Note that for now, we cannot get multiple destination so this function is "trivial".
555 The `action` parameter will be used for the error message.
554 556
555 The `action` parameter will be used for the error message.
557 note: Ideally, this function would be called `get_unique_pull_path` to
558 mirror the `get_unique_push_path`, but the name was already taken.
556 559 """
557 urls = []
558 if source is None:
559 if b'default' in ui.paths:
560 urls.extend(p.rawloc for p in ui.paths[b'default'])
561 else:
562 # XXX this is the historical default behavior, but that is not
563 # great, consider breaking BC on this.
564 urls.append(b'default')
565 else:
566 if source in ui.paths:
567 urls.extend(p.rawloc for p in ui.paths[source])
568 else:
569 # Try to resolve as a local path or URI.
570 path = try_path(ui, source)
571 if path is not None:
572 urls.append(path.rawloc)
573 else:
574 urls.append(source)
575 if len(urls) != 1:
560 sources = []
561 if source is not None:
562 sources.append(source)
563
564 pull_paths = list(get_pull_paths(None, ui, sources=sources))
565 path_count = len(pull_paths)
566 if path_count != 1:
576 567 if source is None:
577 568 msg = _(
578 569 b"default path points to %d urls while %s only supports one"
579 570 )
580 msg %= (len(urls), action)
571 msg %= (path_count, action)
581 572 else:
582 573 msg = _(b"path points to %d urls while %s only supports one: %s")
583 msg %= (len(urls), action, source)
574 msg %= (path_count, action, source)
584 575 raise error.Abort(msg)
585 return parseurl(urls[0], default_branches)
576 return pull_paths[0]
577
578
579 def get_unique_pull_path(action, repo, ui, source=None, default_branches=()):
580 """return a unique `(url, branch)` or abort if multiple are found
581
582 See `get_unique_pull_path_obj` for details.
583 """
584 path = get_unique_pull_path_obj(action, ui, source=source)
585 return parseurl(path.rawloc, default_branches)
586 586
587 587
588 def get_clone_path(ui, source, default_branches=()):
589 """return the `(origsource, path, branch)` selected as clone source"""
590 urls = []
591 if source is None:
592 if b'default' in ui.paths:
593 urls.extend(p.rawloc for p in ui.paths[b'default'])
594 else:
595 # XXX this is the historical default behavior, but that is not
596 # great, consider breaking BC on this.
597 urls.append(b'default')
598 else:
599 if source in ui.paths:
600 urls.extend(p.rawloc for p in ui.paths[source])
601 else:
602 # Try to resolve as a local path or URI.
603 path = try_path(ui, source)
604 if path is not None:
605 urls.append(path.rawloc)
606 else:
607 urls.append(source)
608 if len(urls) != 1:
609 if source is None:
610 msg = _(
611 b"default path points to %d urls while only one is supported"
612 )
613 msg %= len(urls)
614 else:
615 msg = _(b"path points to %d urls while only one is supported: %s")
616 msg %= (len(urls), source)
617 raise error.Abort(msg)
618 url = urls[0]
619 clone_path, branch = parseurl(url, default_branches)
620 return url, clone_path, branch
588 def get_clone_path_obj(ui, source):
589 """return the `(origsource, url, branch)` selected as clone source"""
590 if source == b'':
591 return None
592 return get_unique_pull_path_obj(b'clone', ui, source=source)
593
594
595 def get_clone_path(ui, source, default_branches=None):
596 """return the `(origsource, url, branch)` selected as clone source"""
597 path = get_clone_path_obj(ui, source)
598 if path is None:
599 return (b'', b'', (None, default_branches))
600 if default_branches is None:
601 default_branches = []
602 branches = (path.branch, default_branches)
603 return path.rawloc, path.loc, branches
621 604
622 605
623 606 def parseurl(path, branches=None):
@@ -673,43 +656,6 class paths(dict):
673 656 new_paths.extend(_chain_path(p, ui, self))
674 657 self[name] = new_paths
675 658
676 def getpath(self, ui, name, default=None):
677 """Return a ``path`` from a string, falling back to default.
678
679 ``name`` can be a named path or locations. Locations are filesystem
680 paths or URIs.
681
682 Returns None if ``name`` is not a registered path, a URI, or a local
683 path to a repo.
684 """
685 msg = b'getpath is deprecated, use `get_*` functions from urlutil'
686 ui.deprecwarn(msg, b'6.0')
687 # Only fall back to default if no path was requested.
688 if name is None:
689 if not default:
690 default = ()
691 elif not isinstance(default, (tuple, list)):
692 default = (default,)
693 for k in default:
694 try:
695 return self[k][0]
696 except KeyError:
697 continue
698 return None
699
700 # Most likely empty string.
701 # This may need to raise in the future.
702 if not name:
703 return None
704 if name in self:
705 return self[name][0]
706 else:
707 # Try to resolve as a local path or URI.
708 path = try_path(ui, name)
709 if path is None:
710 raise error.RepoError(_(b'repository %s does not exist') % name)
711 return path.rawloc
712
713 659
714 660 _pathsuboptions = {}
715 661
@@ -736,7 +682,7 def pathsuboption(option, attr):
736 682 return register
737 683
738 684
739 @pathsuboption(b'pushurl', b'pushloc')
685 @pathsuboption(b'pushurl', b'_pushloc')
740 686 def pushurlpathoption(ui, path, value):
741 687 u = url(value)
742 688 # Actually require a URL.
@@ -788,6 +734,27 def bookmarks_mode_option(ui, path, valu
788 734 return value
789 735
790 736
737 DELTA_REUSE_POLICIES = {
738 b'default': None,
739 b'try-base': revlog_constants.DELTA_BASE_REUSE_TRY,
740 b'no-reuse': revlog_constants.DELTA_BASE_REUSE_NO,
741 b'forced': revlog_constants.DELTA_BASE_REUSE_FORCE,
742 }
743
744
745 @pathsuboption(b'delta-reuse-policy', b'delta_reuse_policy')
746 def delta_reuse_policy(ui, path, value):
747 if value not in DELTA_REUSE_POLICIES:
748 path_name = path.name
749 if path_name is None:
750 # this is an "anonymous" path, config comes from the global one
751 path_name = b'*'
752 msg = _(b'(paths.%s:delta-reuse-policy has unknown value: "%s")\n')
753 msg %= (path_name, value)
754 ui.warn(msg)
755 return DELTA_REUSE_POLICIES.get(value)
756
757
791 758 @pathsuboption(b'multi-urls', b'multi_urls')
792 759 def multiurls_pathoption(ui, path, value):
793 760 res = stringutil.parsebool(value)
@@ -848,7 +815,8 class path:
848 815 ``ui`` is the ``ui`` instance the path is coming from.
849 816 ``name`` is the symbolic name of the path.
850 817 ``rawloc`` is the raw location, as defined in the config.
851 ``pushloc`` is the raw locations pushes should be made to.
818 ``_pushloc`` is the raw locations pushes should be made to.
819 (see the `get_push_variant` method)
852 820
853 821 If ``name`` is not defined, we require that the location be a) a local
854 822 filesystem path with a .hg directory or b) a URL. If not,
@@ -864,21 +832,11 class path:
864 832 if not rawloc:
865 833 raise ValueError(b'rawloc must be defined')
866 834
867 # Locations may define branches via syntax <base>#<branch>.
868 u = url(rawloc)
869 branch = None
870 if u.fragment:
871 branch = u.fragment
872 u.fragment = None
835 self.name = name
873 836
874 self.url = u
875 # the url from the config/command line before dealing with `path://`
876 self.raw_url = u.copy()
877 self.branch = branch
878
879 self.name = name
880 self.rawloc = rawloc
881 self.loc = b'%s' % u
837 # set by path variant to point to their "non-push" version
838 self.main_path = None
839 self._setup_url(rawloc)
882 840
883 841 if validate_path:
884 842 self._validate_path()
@@ -892,16 +850,66 class path:
892 850
893 851 self._apply_suboptions(ui, sub_opts)
894 852
895 def copy(self):
896 """make a copy of this path object"""
853 def _setup_url(self, rawloc):
854 # Locations may define branches via syntax <base>#<branch>.
855 u = url(rawloc)
856 branch = None
857 if u.fragment:
858 branch = u.fragment
859 u.fragment = None
860
861 self.url = u
862 # the url from the config/command line before dealing with `path://`
863 self.raw_url = u.copy()
864 self.branch = branch
865
866 self.rawloc = rawloc
867 self.loc = b'%s' % u
868
869 def copy(self, new_raw_location=None):
870 """make a copy of this path object
871
872 When `new_raw_location` is set, the new path will point to it.
873 This is used by the scheme extension so expand the scheme.
874 """
897 875 new = self.__class__()
898 876 for k, v in self.__dict__.items():
899 877 new_copy = getattr(v, 'copy', None)
900 878 if new_copy is not None:
901 879 v = new_copy()
902 880 new.__dict__[k] = v
881 if new_raw_location is not None:
882 new._setup_url(new_raw_location)
903 883 return new
904 884
885 @property
886 def is_push_variant(self):
887 """is this a path variant to be used for pushing"""
888 return self.main_path is not None
889
890 def get_push_variant(self):
891 """get a "copy" of the path, but suitable for pushing
892
893 This means using the value of the `pushurl` option (if any) as the url.
894
895 The original path is available in the `main_path` attribute.
896 """
897 if self.main_path:
898 return self
899 new = self.copy()
900 new.main_path = self
901 if self._pushloc:
902 new._setup_url(self._pushloc)
903 return new
904
905 def pushloc(self):
906 """compatibility layer for the deprecated attributes"""
907 from .. import util # avoid a cycle
908
909 msg = "don't use path.pushloc, use path.get_push_variant()"
910 util.nouideprecwarn(msg, b"6.5")
911 return self._pushloc
912
905 913 def _validate_path(self):
906 914 # When given a raw location but not a symbolic name, validate the
907 915 # location is valid.
@@ -15,6 +15,7 from .utils import stringutil
15 15 from . import (
16 16 error,
17 17 pycompat,
18 requirements,
18 19 revlog,
19 20 util,
20 21 )
@@ -210,6 +211,12 class verifier:
210 211 self._crosscheckfiles(filelinkrevs, filenodes)
211 212 totalfiles, filerevisions = self._verifyfiles(filenodes, filelinkrevs)
212 213
214 if self.errors:
215 ui.warn(_(b"not checking dirstate because of previous errors\n"))
216 dirstate_errors = 0
217 else:
218 dirstate_errors = self._verify_dirstate()
219
213 220 # final report
214 221 ui.status(
215 222 _(b"checked %d changesets with %d changes to %d files\n")
@@ -225,6 +232,11 class verifier:
225 232 msg = _(b"(first damaged changeset appears to be %d)\n")
226 233 msg %= min(self.badrevs)
227 234 ui.warn(msg)
235 if dirstate_errors:
236 ui.warn(
237 _(b"dirstate inconsistent with current parent's manifest\n")
238 )
239 ui.warn(_(b"%d dirstate errors\n") % dirstate_errors)
228 240 return 1
229 241 return 0
230 242
@@ -585,3 +597,25 class verifier:
585 597 self._warn(_(b"warning: orphan data file '%s'") % f)
586 598
587 599 return len(files), revisions
600
601 def _verify_dirstate(self):
602 """Check that the dirstate is consistent with the parent's manifest"""
603 repo = self.repo
604 ui = self.ui
605 ui.status(_(b"checking dirstate\n"))
606
607 parent1, parent2 = repo.dirstate.parents()
608 m1 = repo[parent1].manifest()
609 m2 = repo[parent2].manifest()
610 dirstate_errors = 0
611
612 is_narrow = requirements.NARROW_REQUIREMENT in repo.requirements
613 narrow_matcher = repo.narrowmatch() if is_narrow else None
614
615 for err in repo.dirstate.verify(m1, m2, parent1, narrow_matcher):
616 ui.error(err)
617 dirstate_errors += 1
618
619 if dirstate_errors:
620 self.errors += dirstate_errors
621 return dirstate_errors
@@ -11,6 +11,10 import shutil
11 11 import stat
12 12 import threading
13 13
14 from typing import (
15 Optional,
16 )
17
14 18 from .i18n import _
15 19 from .pycompat import (
16 20 delattr,
@@ -26,7 +30,7 from . import (
26 30 )
27 31
28 32
29 def _avoidambig(path, oldstat):
33 def _avoidambig(path: bytes, oldstat):
30 34 """Avoid file stat ambiguity forcibly
31 35
32 36 This function causes copying ``path`` file, if it is owned by
@@ -60,16 +64,17 class abstractvfs:
60 64 '''Prevent instantiation; don't call this from subclasses.'''
61 65 raise NotImplementedError('attempted instantiating ' + str(type(self)))
62 66
63 def __call__(self, path, mode=b'rb', **kwargs):
67 # TODO: type return, which is util.posixfile wrapped by a proxy
68 def __call__(self, path: bytes, mode: bytes = b'rb', **kwargs):
64 69 raise NotImplementedError
65 70
66 def _auditpath(self, path, mode):
71 def _auditpath(self, path: bytes, mode: bytes):
67 72 raise NotImplementedError
68 73
69 def join(self, path, *insidef):
74 def join(self, path: Optional[bytes], *insidef: bytes) -> bytes:
70 75 raise NotImplementedError
71 76
72 def tryread(self, path):
77 def tryread(self, path: bytes) -> bytes:
73 78 '''gracefully return an empty string for missing files'''
74 79 try:
75 80 return self.read(path)
@@ -77,7 +82,7 class abstractvfs:
77 82 pass
78 83 return b""
79 84
80 def tryreadlines(self, path, mode=b'rb'):
85 def tryreadlines(self, path: bytes, mode: bytes = b'rb'):
81 86 '''gracefully return an empty array for missing files'''
82 87 try:
83 88 return self.readlines(path, mode=mode)
@@ -95,57 +100,61 class abstractvfs:
95 100 """
96 101 return self.__call__
97 102
98 def read(self, path):
103 def read(self, path: bytes) -> bytes:
99 104 with self(path, b'rb') as fp:
100 105 return fp.read()
101 106
102 def readlines(self, path, mode=b'rb'):
107 def readlines(self, path: bytes, mode: bytes = b'rb'):
103 108 with self(path, mode=mode) as fp:
104 109 return fp.readlines()
105 110
106 def write(self, path, data, backgroundclose=False, **kwargs):
111 def write(
112 self, path: bytes, data: bytes, backgroundclose=False, **kwargs
113 ) -> int:
107 114 with self(path, b'wb', backgroundclose=backgroundclose, **kwargs) as fp:
108 115 return fp.write(data)
109 116
110 def writelines(self, path, data, mode=b'wb', notindexed=False):
117 def writelines(
118 self, path: bytes, data: bytes, mode: bytes = b'wb', notindexed=False
119 ) -> None:
111 120 with self(path, mode=mode, notindexed=notindexed) as fp:
112 121 return fp.writelines(data)
113 122
114 def append(self, path, data):
123 def append(self, path: bytes, data: bytes) -> int:
115 124 with self(path, b'ab') as fp:
116 125 return fp.write(data)
117 126
118 def basename(self, path):
127 def basename(self, path: bytes) -> bytes:
119 128 """return base element of a path (as os.path.basename would do)
120 129
121 130 This exists to allow handling of strange encoding if needed."""
122 131 return os.path.basename(path)
123 132
124 def chmod(self, path, mode):
133 def chmod(self, path: bytes, mode: int) -> None:
125 134 return os.chmod(self.join(path), mode)
126 135
127 def dirname(self, path):
136 def dirname(self, path: bytes) -> bytes:
128 137 """return dirname element of a path (as os.path.dirname would do)
129 138
130 139 This exists to allow handling of strange encoding if needed."""
131 140 return os.path.dirname(path)
132 141
133 def exists(self, path=None):
142 def exists(self, path: Optional[bytes] = None) -> bool:
134 143 return os.path.exists(self.join(path))
135 144
136 145 def fstat(self, fp):
137 146 return util.fstat(fp)
138 147
139 def isdir(self, path=None):
148 def isdir(self, path: Optional[bytes] = None) -> bool:
140 149 return os.path.isdir(self.join(path))
141 150
142 def isfile(self, path=None):
151 def isfile(self, path: Optional[bytes] = None) -> bool:
143 152 return os.path.isfile(self.join(path))
144 153
145 def islink(self, path=None):
154 def islink(self, path: Optional[bytes] = None) -> bool:
146 155 return os.path.islink(self.join(path))
147 156
148 def isfileorlink(self, path=None):
157 def isfileorlink(self, path: Optional[bytes] = None) -> bool:
149 158 """return whether path is a regular file or a symlink
150 159
151 160 Unlike isfile, this doesn't follow symlinks."""
@@ -156,7 +165,7 class abstractvfs:
156 165 mode = st.st_mode
157 166 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
158 167
159 def _join(self, *paths):
168 def _join(self, *paths: bytes) -> bytes:
160 169 root_idx = 0
161 170 for idx, p in enumerate(paths):
162 171 if os.path.isabs(p) or p.startswith(self._dir_sep):
@@ -166,41 +175,48 class abstractvfs:
166 175 paths = [p for p in paths if p]
167 176 return self._dir_sep.join(paths)
168 177
169 def reljoin(self, *paths):
178 def reljoin(self, *paths: bytes) -> bytes:
170 179 """join various elements of a path together (as os.path.join would do)
171 180
172 181 The vfs base is not injected so that path stay relative. This exists
173 182 to allow handling of strange encoding if needed."""
174 183 return self._join(*paths)
175 184
176 def split(self, path):
185 def split(self, path: bytes):
177 186 """split top-most element of a path (as os.path.split would do)
178 187
179 188 This exists to allow handling of strange encoding if needed."""
180 189 return os.path.split(path)
181 190
182 def lexists(self, path=None):
191 def lexists(self, path: Optional[bytes] = None) -> bool:
183 192 return os.path.lexists(self.join(path))
184 193
185 def lstat(self, path=None):
194 def lstat(self, path: Optional[bytes] = None):
186 195 return os.lstat(self.join(path))
187 196
188 def listdir(self, path=None):
197 def listdir(self, path: Optional[bytes] = None):
189 198 return os.listdir(self.join(path))
190 199
191 def makedir(self, path=None, notindexed=True):
200 def makedir(self, path: Optional[bytes] = None, notindexed=True):
192 201 return util.makedir(self.join(path), notindexed)
193 202
194 def makedirs(self, path=None, mode=None):
203 def makedirs(
204 self, path: Optional[bytes] = None, mode: Optional[int] = None
205 ):
195 206 return util.makedirs(self.join(path), mode)
196 207
197 def makelock(self, info, path):
208 def makelock(self, info, path: bytes):
198 209 return util.makelock(info, self.join(path))
199 210
200 def mkdir(self, path=None):
211 def mkdir(self, path: Optional[bytes] = None):
201 212 return os.mkdir(self.join(path))
202 213
203 def mkstemp(self, suffix=b'', prefix=b'tmp', dir=None):
214 def mkstemp(
215 self,
216 suffix: bytes = b'',
217 prefix: bytes = b'tmp',
218 dir: Optional[bytes] = None,
219 ):
204 220 fd, name = pycompat.mkstemp(
205 221 suffix=suffix, prefix=prefix, dir=self.join(dir)
206 222 )
@@ -210,13 +226,13 class abstractvfs:
210 226 else:
211 227 return fd, fname
212 228
213 def readdir(self, path=None, stat=None, skip=None):
229 def readdir(self, path: Optional[bytes] = None, stat=None, skip=None):
214 230 return util.listdir(self.join(path), stat, skip)
215 231
216 def readlock(self, path):
232 def readlock(self, path: bytes) -> bytes:
217 233 return util.readlock(self.join(path))
218 234
219 def rename(self, src, dst, checkambig=False):
235 def rename(self, src: bytes, dst: bytes, checkambig=False):
220 236 """Rename from src to dst
221 237
222 238 checkambig argument is used with util.filestat, and is useful
@@ -238,18 +254,20 class abstractvfs:
238 254 return ret
239 255 return util.rename(srcpath, dstpath)
240 256
241 def readlink(self, path):
257 def readlink(self, path: bytes) -> bytes:
242 258 return util.readlink(self.join(path))
243 259
244 def removedirs(self, path=None):
260 def removedirs(self, path: Optional[bytes] = None):
245 261 """Remove a leaf directory and all empty intermediate ones"""
246 262 return util.removedirs(self.join(path))
247 263
248 def rmdir(self, path=None):
264 def rmdir(self, path: Optional[bytes] = None):
249 265 """Remove an empty directory."""
250 266 return os.rmdir(self.join(path))
251 267
252 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
268 def rmtree(
269 self, path: Optional[bytes] = None, ignore_errors=False, forcibly=False
270 ):
253 271 """Remove a directory tree recursively
254 272
255 273 If ``forcibly``, this tries to remove READ-ONLY files, too.
@@ -272,28 +290,30 class abstractvfs:
272 290 self.join(path), ignore_errors=ignore_errors, onerror=onerror
273 291 )
274 292
275 def setflags(self, path, l, x):
293 def setflags(self, path: bytes, l: bool, x: bool):
276 294 return util.setflags(self.join(path), l, x)
277 295
278 def stat(self, path=None):
296 def stat(self, path: Optional[bytes] = None):
279 297 return os.stat(self.join(path))
280 298
281 def unlink(self, path=None):
299 def unlink(self, path: Optional[bytes] = None):
282 300 return util.unlink(self.join(path))
283 301
284 def tryunlink(self, path=None):
302 def tryunlink(self, path: Optional[bytes] = None):
285 303 """Attempt to remove a file, ignoring missing file errors."""
286 304 util.tryunlink(self.join(path))
287 305
288 def unlinkpath(self, path=None, ignoremissing=False, rmdir=True):
306 def unlinkpath(
307 self, path: Optional[bytes] = None, ignoremissing=False, rmdir=True
308 ):
289 309 return util.unlinkpath(
290 310 self.join(path), ignoremissing=ignoremissing, rmdir=rmdir
291 311 )
292 312
293 def utime(self, path=None, t=None):
313 def utime(self, path: Optional[bytes] = None, t=None):
294 314 return os.utime(self.join(path), t)
295 315
296 def walk(self, path=None, onerror=None):
316 def walk(self, path: Optional[bytes] = None, onerror=None):
297 317 """Yield (dirpath, dirs, files) tuple for each directories under path
298 318
299 319 ``dirpath`` is relative one from the root of this vfs. This
@@ -360,7 +380,7 class vfs(abstractvfs):
360 380
361 381 def __init__(
362 382 self,
363 base,
383 base: bytes,
364 384 audit=True,
365 385 cacheaudited=False,
366 386 expandpath=False,
@@ -381,7 +401,7 class vfs(abstractvfs):
381 401 self.options = {}
382 402
383 403 @util.propertycache
384 def _cansymlink(self):
404 def _cansymlink(self) -> bool:
385 405 return util.checklink(self.base)
386 406
387 407 @util.propertycache
@@ -393,7 +413,7 class vfs(abstractvfs):
393 413 return
394 414 os.chmod(name, self.createmode & 0o666)
395 415
396 def _auditpath(self, path, mode):
416 def _auditpath(self, path, mode) -> None:
397 417 if self._audit:
398 418 if os.path.isabs(path) and path.startswith(self.base):
399 419 path = os.path.relpath(path, self.base)
@@ -402,10 +422,35 class vfs(abstractvfs):
402 422 raise error.Abort(b"%s: %r" % (r, path))
403 423 self.audit(path, mode=mode)
404 424
425 def isfileorlink_checkdir(
426 self, dircache, path: Optional[bytes] = None
427 ) -> bool:
428 """return True if the path is a regular file or a symlink and
429 the directories along the path are "normal", that is
430 not symlinks or nested hg repositories.
431
432 Ignores the `_audit` setting, and checks the directories regardless.
433 `dircache` is used to cache the directory checks.
434 """
435 try:
436 for prefix in pathutil.finddirs_rev_noroot(util.localpath(path)):
437 if prefix in dircache:
438 res = dircache[prefix]
439 else:
440 res = pathutil.pathauditor._checkfs_exists(
441 self.base, prefix, path
442 )
443 dircache[prefix] = res
444 if not res:
445 return False
446 except (OSError, error.Abort):
447 return False
448 return self.isfileorlink(path)
449
405 450 def __call__(
406 451 self,
407 path,
408 mode=b"r",
452 path: bytes,
453 mode: bytes = b"rb",
409 454 atomictemp=False,
410 455 notindexed=False,
411 456 backgroundclose=False,
@@ -518,7 +563,7 class vfs(abstractvfs):
518 563
519 564 return fp
520 565
521 def symlink(self, src, dst):
566 def symlink(self, src: bytes, dst: bytes) -> None:
522 567 self.audit(dst)
523 568 linkname = self.join(dst)
524 569 util.tryunlink(linkname)
@@ -538,7 +583,7 class vfs(abstractvfs):
538 583 else:
539 584 self.write(dst, src)
540 585
541 def join(self, path, *insidef):
586 def join(self, path: Optional[bytes], *insidef: bytes) -> bytes:
542 587 if path:
543 588 parts = [self.base, path]
544 589 parts.extend(insidef)
@@ -551,7 +596,7 opener = vfs
551 596
552 597
553 598 class proxyvfs(abstractvfs):
554 def __init__(self, vfs):
599 def __init__(self, vfs: "vfs"):
555 600 self.vfs = vfs
556 601
557 602 def _auditpath(self, path, mode):
@@ -569,14 +614,14 class proxyvfs(abstractvfs):
569 614 class filtervfs(proxyvfs, abstractvfs):
570 615 '''Wrapper vfs for filtering filenames with a function.'''
571 616
572 def __init__(self, vfs, filter):
617 def __init__(self, vfs: "vfs", filter):
573 618 proxyvfs.__init__(self, vfs)
574 619 self._filter = filter
575 620
576 def __call__(self, path, *args, **kwargs):
621 def __call__(self, path: bytes, *args, **kwargs):
577 622 return self.vfs(self._filter(path), *args, **kwargs)
578 623
579 def join(self, path, *insidef):
624 def join(self, path: Optional[bytes], *insidef: bytes) -> bytes:
580 625 if path:
581 626 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
582 627 else:
@@ -589,15 +634,15 filteropener = filtervfs
589 634 class readonlyvfs(proxyvfs):
590 635 '''Wrapper vfs preventing any writing.'''
591 636
592 def __init__(self, vfs):
637 def __init__(self, vfs: "vfs"):
593 638 proxyvfs.__init__(self, vfs)
594 639
595 def __call__(self, path, mode=b'r', *args, **kw):
640 def __call__(self, path: bytes, mode: bytes = b'rb', *args, **kw):
596 641 if mode not in (b'r', b'rb'):
597 642 raise error.Abort(_(b'this vfs is read only'))
598 643 return self.vfs(path, mode, *args, **kw)
599 644
600 def join(self, path, *insidef):
645 def join(self, path: Optional[bytes], *insidef: bytes) -> bytes:
601 646 return self.vfs.join(path, *insidef)
602 647
603 648
@@ -14,6 +14,13 import os
14 14 import random
15 15 import subprocess
16 16
17 from typing import (
18 List,
19 NoReturn,
20 Optional,
21 Tuple,
22 )
23
17 24 from . import (
18 25 encoding,
19 26 pycompat,
@@ -356,7 +363,7 except AttributeError:
356 363 _kernel32.PeekNamedPipe.restype = _BOOL
357 364
358 365
359 def _raiseoserror(name):
366 def _raiseoserror(name: bytes) -> NoReturn:
360 367 # Force the code to a signed int to avoid an 'int too large' error.
361 368 # See https://bugs.python.org/issue28474
362 369 code = _kernel32.GetLastError()
@@ -368,7 +375,7 def _raiseoserror(name):
368 375 )
369 376
370 377
371 def _getfileinfo(name):
378 def _getfileinfo(name: bytes) -> _BY_HANDLE_FILE_INFORMATION:
372 379 fh = _kernel32.CreateFileA(
373 380 name,
374 381 0,
@@ -389,7 +396,7 def _getfileinfo(name):
389 396 _kernel32.CloseHandle(fh)
390 397
391 398
392 def checkcertificatechain(cert, build=True):
399 def checkcertificatechain(cert: bytes, build: bool = True) -> bool:
393 400 """Tests the given certificate to see if there is a complete chain to a
394 401 trusted root certificate. As a side effect, missing certificates are
395 402 downloaded and installed unless ``build=False``. True is returned if a
@@ -439,7 +446,7 def checkcertificatechain(cert, build=Tr
439 446 _crypt32.CertFreeCertificateContext(certctx)
440 447
441 448
442 def oslink(src, dst):
449 def oslink(src: bytes, dst: bytes) -> None:
443 450 try:
444 451 if not _kernel32.CreateHardLinkA(dst, src, None):
445 452 _raiseoserror(src)
@@ -447,12 +454,12 def oslink(src, dst):
447 454 _raiseoserror(src)
448 455
449 456
450 def nlinks(name):
457 def nlinks(name: bytes) -> int:
451 458 '''return number of hardlinks for the given file'''
452 459 return _getfileinfo(name).nNumberOfLinks
453 460
454 461
455 def samefile(path1, path2):
462 def samefile(path1: bytes, path2: bytes) -> bool:
456 463 '''Returns whether path1 and path2 refer to the same file or directory.'''
457 464 res1 = _getfileinfo(path1)
458 465 res2 = _getfileinfo(path2)
@@ -463,14 +470,14 def samefile(path1, path2):
463 470 )
464 471
465 472
466 def samedevice(path1, path2):
473 def samedevice(path1: bytes, path2: bytes) -> bool:
467 474 '''Returns whether path1 and path2 are on the same device.'''
468 475 res1 = _getfileinfo(path1)
469 476 res2 = _getfileinfo(path2)
470 477 return res1.dwVolumeSerialNumber == res2.dwVolumeSerialNumber
471 478
472 479
473 def peekpipe(pipe):
480 def peekpipe(pipe) -> int:
474 481 handle = msvcrt.get_osfhandle(pipe.fileno()) # pytype: disable=module-attr
475 482 avail = _DWORD()
476 483
@@ -485,14 +492,14 def peekpipe(pipe):
485 492 return avail.value
486 493
487 494
488 def lasterrorwaspipeerror(err):
495 def lasterrorwaspipeerror(err) -> bool:
489 496 if err.errno != errno.EINVAL:
490 497 return False
491 498 err = _kernel32.GetLastError()
492 499 return err == _ERROR_BROKEN_PIPE or err == _ERROR_NO_DATA
493 500
494 501
495 def testpid(pid):
502 def testpid(pid: int) -> bool:
496 503 """return True if pid is still running or unable to
497 504 determine, False otherwise"""
498 505 h = _kernel32.OpenProcess(_PROCESS_QUERY_INFORMATION, False, pid)
@@ -506,7 +513,7 def testpid(pid):
506 513 return _kernel32.GetLastError() != _ERROR_INVALID_PARAMETER
507 514
508 515
509 def executablepath():
516 def executablepath() -> bytes:
510 517 '''return full path of hg.exe'''
511 518 size = 600
512 519 buf = ctypes.create_string_buffer(size + 1)
@@ -520,7 +527,7 def executablepath():
520 527 return buf.value
521 528
522 529
523 def getvolumename(path):
530 def getvolumename(path: bytes) -> Optional[bytes]:
524 531 """Get the mount point of the filesystem from a directory or file
525 532 (best-effort)
526 533
@@ -541,7 +548,7 def getvolumename(path):
541 548 return buf.value
542 549
543 550
544 def getfstype(path):
551 def getfstype(path: bytes) -> Optional[bytes]:
545 552 """Get the filesystem type name from a directory or file (best-effort)
546 553
547 554 Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc.
@@ -572,7 +579,7 def getfstype(path):
572 579 return name.value
573 580
574 581
575 def getuser():
582 def getuser() -> bytes:
576 583 '''return name of current user'''
577 584 size = _DWORD(300)
578 585 buf = ctypes.create_string_buffer(size.value + 1)
@@ -581,10 +588,10 def getuser():
581 588 return buf.value
582 589
583 590
584 _signalhandler = []
591 _signalhandler: List[_SIGNAL_HANDLER] = []
585 592
586 593
587 def setsignalhandler():
594 def setsignalhandler() -> None:
588 595 """Register a termination handler for console events including
589 596 CTRL+C. python signal handlers do not work well with socket
590 597 operations.
@@ -601,7 +608,7 def setsignalhandler():
601 608 raise ctypes.WinError() # pytype: disable=module-attr
602 609
603 610
604 def hidewindow():
611 def hidewindow() -> None:
605 612 def callback(hwnd, pid):
606 613 wpid = _DWORD()
607 614 _user32.GetWindowThreadProcessId(hwnd, ctypes.byref(wpid))
@@ -614,7 +621,7 def hidewindow():
614 621 _user32.EnumWindows(_WNDENUMPROC(callback), pid)
615 622
616 623
617 def termsize():
624 def termsize() -> Tuple[int, int]:
618 625 # cmd.exe does not handle CR like a unix console, the CR is
619 626 # counted in the line length. On 80 columns consoles, if 80
620 627 # characters are written, the following CR won't apply on the
@@ -635,7 +642,7 def termsize():
635 642 return width, height
636 643
637 644
638 def enablevtmode():
645 def enablevtmode() -> bool:
639 646 """Enable virtual terminal mode for the associated console. Return True if
640 647 enabled, else False."""
641 648
@@ -661,7 +668,7 def enablevtmode():
661 668 return True
662 669
663 670
664 def spawndetached(args):
671 def spawndetached(args: List[bytes]) -> int:
665 672 # No standard library function really spawns a fully detached
666 673 # process under win32 because they allocate pipes or other objects
667 674 # to handle standard streams communications. Passing these objects
@@ -703,7 +710,7 def spawndetached(args):
703 710 return pi.dwProcessId
704 711
705 712
706 def unlink(f):
713 def unlink(f: bytes) -> None:
707 714 '''try to implement POSIX' unlink semantics on Windows'''
708 715
709 716 if os.path.isdir(f):
@@ -758,7 +765,7 def unlink(f):
758 765 pass
759 766
760 767
761 def makedir(path, notindexed):
768 def makedir(path: bytes, notindexed: bool) -> None:
762 769 os.mkdir(path)
763 770 if notindexed:
764 771 _kernel32.SetFileAttributesA(path, _FILE_ATTRIBUTE_NOT_CONTENT_INDEXED)
@@ -14,8 +14,24 import re
14 14 import stat
15 15 import string
16 16 import sys
17 import typing
17 18 import winreg # pytype: disable=import-error
18 19
20 from typing import (
21 AnyStr,
22 BinaryIO,
23 Iterable,
24 Iterator,
25 List,
26 Mapping,
27 NoReturn,
28 Optional,
29 Pattern,
30 Sequence,
31 Tuple,
32 Union,
33 )
34
19 35 from .i18n import _
20 36 from .pycompat import getattr
21 37 from . import (
@@ -23,6 +39,7 from . import (
23 39 error,
24 40 policy,
25 41 pycompat,
42 typelib,
26 43 win32,
27 44 )
28 45
@@ -44,7 +61,19 split = os.path.split
44 61 testpid = win32.testpid
45 62 unlink = win32.unlink
46 63
47 umask = 0o022
64 if typing.TYPE_CHECKING:
65 # Replace the various overloads that come along with aliasing stdlib methods
66 # with the narrow definition that we care about in the type checking phase
67 # only. This ensures that both Windows and POSIX see only the definition
68 # that is actually available.
69 #
70 # Note that if we check pycompat.TYPE_CHECKING here, it is always False, and
71 # the methods aren't replaced.
72 def split(p: bytes) -> Tuple[bytes, bytes]:
73 raise NotImplementedError
74
75
76 umask: int = 0o022
48 77
49 78
50 79 class mixedfilemodewrapper:
@@ -178,15 +207,7 def posixfile(name, mode=b'r', buffering
178 207 listdir = osutil.listdir
179 208
180 209
181 # copied from .utils.procutil, remove after Python 2 support was dropped
182 def _isatty(fp):
183 try:
184 return fp.isatty()
185 except AttributeError:
186 return False
187
188
189 def get_password():
210 def get_password() -> bytes:
190 211 """Prompt for password with echo off, using Windows getch().
191 212
192 213 This shouldn't be called directly- use ``ui.getpass()`` instead, which
@@ -208,7 +229,7 def get_password():
208 229 return encoding.unitolocal(pw)
209 230
210 231
211 class winstdout:
232 class winstdout(typelib.BinaryIO_Proxy):
212 233 """Some files on Windows misbehave.
213 234
214 235 When writing to a broken pipe, EINVAL instead of EPIPE may be raised.
@@ -217,7 +238,7 class winstdout:
217 238 error may happen. Python 3 already works around that.
218 239 """
219 240
220 def __init__(self, fp):
241 def __init__(self, fp: BinaryIO):
221 242 self.fp = fp
222 243
223 244 def __getattr__(self, key):
@@ -247,11 +268,11 class winstdout:
247 268 raise IOError(errno.EPIPE, 'Broken pipe')
248 269
249 270
250 def openhardlinks():
271 def openhardlinks() -> bool:
251 272 return True
252 273
253 274
254 def parsepatchoutput(output_line):
275 def parsepatchoutput(output_line: bytes) -> bytes:
255 276 """parses the output produced by patch and returns the filename"""
256 277 pf = output_line[14:]
257 278 if pf[0] == b'`':
@@ -259,7 +280,9 def parsepatchoutput(output_line):
259 280 return pf
260 281
261 282
262 def sshargs(sshcmd, host, user, port):
283 def sshargs(
284 sshcmd: bytes, host: bytes, user: Optional[bytes], port: Optional[bytes]
285 ) -> bytes:
263 286 '''Build argument list for ssh or Plink'''
264 287 pflag = b'plink' in sshcmd.lower() and b'-P' or b'-p'
265 288 args = user and (b"%s@%s" % (user, host)) or host
@@ -274,23 +297,28 def sshargs(sshcmd, host, user, port):
274 297 return args
275 298
276 299
277 def setflags(f, l, x):
278 pass
279
280
281 def copymode(src, dst, mode=None, enforcewritable=False):
300 def setflags(f: bytes, l: bool, x: bool) -> None:
282 301 pass
283 302
284 303
285 def checkexec(path):
304 def copymode(
305 src: bytes,
306 dst: bytes,
307 mode: Optional[bytes] = None,
308 enforcewritable: bool = False,
309 ) -> None:
310 pass
311
312
313 def checkexec(path: bytes) -> bool:
286 314 return False
287 315
288 316
289 def checklink(path):
317 def checklink(path: bytes) -> bool:
290 318 return False
291 319
292 320
293 def setbinary(fd):
321 def setbinary(fd) -> None:
294 322 # When run without console, pipes may expose invalid
295 323 # fileno(), usually set to -1.
296 324 fno = getattr(fd, 'fileno', None)
@@ -298,27 +326,28 def setbinary(fd):
298 326 msvcrt.setmode(fno(), os.O_BINARY) # pytype: disable=module-attr
299 327
300 328
301 def pconvert(path):
329 def pconvert(path: bytes) -> bytes:
302 330 return path.replace(pycompat.ossep, b'/')
303 331
304 332
305 def localpath(path):
333 def localpath(path: bytes) -> bytes:
306 334 return path.replace(b'/', b'\\')
307 335
308 336
309 def normpath(path):
337 def normpath(path: bytes) -> bytes:
310 338 return pconvert(os.path.normpath(path))
311 339
312 340
313 def normcase(path):
341 def normcase(path: bytes) -> bytes:
314 342 return encoding.upper(path) # NTFS compares via upper()
315 343
316 344
317 DRIVE_RE_B = re.compile(b'^[a-z]:')
318 DRIVE_RE_S = re.compile('^[a-z]:')
345 DRIVE_RE_B: Pattern[bytes] = re.compile(b'^[a-z]:')
346 DRIVE_RE_S: Pattern[str] = re.compile('^[a-z]:')
319 347
320 348
321 def abspath(path):
349 # TODO: why is this accepting str?
350 def abspath(path: AnyStr) -> AnyStr:
322 351 abs_path = os.path.abspath(path) # re-exports
323 352 # Python on Windows is inconsistent regarding the capitalization of drive
324 353 # letter and this cause issue with various path comparison along the way.
@@ -334,15 +363,15 def abspath(path):
334 363
335 364
336 365 # see posix.py for definitions
337 normcasespec = encoding.normcasespecs.upper
366 normcasespec: int = encoding.normcasespecs.upper
338 367 normcasefallback = encoding.upperfallback
339 368
340 369
341 def samestat(s1, s2):
370 def samestat(s1: os.stat_result, s2: os.stat_result) -> bool:
342 371 return False
343 372
344 373
345 def shelltocmdexe(path, env):
374 def shelltocmdexe(path: bytes, env: Mapping[bytes, bytes]) -> bytes:
346 375 r"""Convert shell variables in the form $var and ${var} inside ``path``
347 376 to %var% form. Existing Windows style variables are left unchanged.
348 377
@@ -467,11 +496,11 def shelltocmdexe(path, env):
467 496 # the number of backslashes that precede double quotes and add another
468 497 # backslash before every double quote (being careful with the double
469 498 # quote we've appended to the end)
470 _quotere = None
499 _quotere: Optional[Pattern[bytes]] = None
471 500 _needsshellquote = None
472 501
473 502
474 def shellquote(s):
503 def shellquote(s: bytes) -> bytes:
475 504 r"""
476 505 >>> shellquote(br'C:\Users\xyz')
477 506 '"C:\\Users\\xyz"'
@@ -501,24 +530,24 def shellquote(s):
501 530 return b'"%s"' % _quotere.sub(br'\1\1\\\2', s)
502 531
503 532
504 def _unquote(s):
533 def _unquote(s: bytes) -> bytes:
505 534 if s.startswith(b'"') and s.endswith(b'"'):
506 535 return s[1:-1]
507 536 return s
508 537
509 538
510 def shellsplit(s):
539 def shellsplit(s: bytes) -> List[bytes]:
511 540 """Parse a command string in cmd.exe way (best-effort)"""
512 541 return pycompat.maplist(_unquote, pycompat.shlexsplit(s, posix=False))
513 542
514 543
515 544 # if you change this stub into a real check, please try to implement the
516 545 # username and groupname functions above, too.
517 def isowner(st):
546 def isowner(st: os.stat_result) -> bool:
518 547 return True
519 548
520 549
521 def findexe(command):
550 def findexe(command: bytes) -> Optional[bytes]:
522 551 """Find executable for command searching like cmd.exe does.
523 552 If command is a basename then PATH is searched for command.
524 553 PATH isn't searched if command is an absolute or relative path.
@@ -529,7 +558,7 def findexe(command):
529 558 if os.path.splitext(command)[1].lower() in pathexts:
530 559 pathexts = [b'']
531 560
532 def findexisting(pathcommand):
561 def findexisting(pathcommand: bytes) -> Optional[bytes]:
533 562 """Will append extension (if needed) and return existing file"""
534 563 for ext in pathexts:
535 564 executable = pathcommand + ext
@@ -550,7 +579,7 def findexe(command):
550 579 _wantedkinds = {stat.S_IFREG, stat.S_IFLNK}
551 580
552 581
553 def statfiles(files):
582 def statfiles(files: Sequence[bytes]) -> Iterator[Optional[os.stat_result]]:
554 583 """Stat each file in files. Yield each stat, or None if a file
555 584 does not exist or has a type we don't care about.
556 585
@@ -576,7 +605,7 def statfiles(files):
576 605 yield cache.get(base, None)
577 606
578 607
579 def username(uid=None):
608 def username(uid: Optional[int] = None) -> Optional[bytes]:
580 609 """Return the name of the user with the given uid.
581 610
582 611 If uid is None, return the name of the current user."""
@@ -591,14 +620,14 def username(uid=None):
591 620 return None
592 621
593 622
594 def groupname(gid=None):
623 def groupname(gid: Optional[int] = None) -> Optional[bytes]:
595 624 """Return the name of the group with the given gid.
596 625
597 626 If gid is None, return the name of the current group."""
598 627 return None
599 628
600 629
601 def readlink(pathname):
630 def readlink(pathname: bytes) -> bytes:
602 631 path = pycompat.fsdecode(pathname)
603 632 try:
604 633 link = os.readlink(path)
@@ -611,7 +640,7 def readlink(pathname):
611 640 return pycompat.fsencode(link)
612 641
613 642
614 def removedirs(name):
643 def removedirs(name: bytes) -> None:
615 644 """special version of os.removedirs that does not remove symlinked
616 645 directories or junction points if they actually contain files"""
617 646 if listdir(name):
@@ -630,7 +659,7 def removedirs(name):
630 659 head, tail = os.path.split(head)
631 660
632 661
633 def rename(src, dst):
662 def rename(src: bytes, dst: bytes) -> None:
634 663 '''atomically rename file src to dst, replacing dst if it exists'''
635 664 try:
636 665 os.rename(src, dst)
@@ -639,28 +668,32 def rename(src, dst):
639 668 os.rename(src, dst)
640 669
641 670
642 def gethgcmd():
671 def gethgcmd() -> List[bytes]:
643 672 return [encoding.strtolocal(arg) for arg in [sys.executable] + sys.argv[:1]]
644 673
645 674
646 def groupmembers(name):
675 def groupmembers(name: bytes) -> List[bytes]:
647 676 # Don't support groups on Windows for now
648 677 raise KeyError
649 678
650 679
651 def isexec(f):
680 def isexec(f: bytes) -> bool:
652 681 return False
653 682
654 683
655 684 class cachestat:
656 def __init__(self, path):
685 def __init__(self, path: bytes) -> None:
657 686 pass
658 687
659 def cacheable(self):
688 def cacheable(self) -> bool:
660 689 return False
661 690
662 691
663 def lookupreg(key, valname=None, scope=None):
692 def lookupreg(
693 key: bytes,
694 valname: Optional[bytes] = None,
695 scope: Optional[Union[int, Iterable[int]]] = None,
696 ) -> Optional[bytes]:
664 697 """Look up a key/value name in the Windows registry.
665 698
666 699 valname: value name. If unspecified, the default value for the key
@@ -693,25 +726,25 def lookupreg(key, valname=None, scope=N
693 726 pass
694 727
695 728
696 expandglobs = True
729 expandglobs: bool = True
697 730
698 731
699 def statislink(st):
732 def statislink(st: Optional[os.stat_result]) -> bool:
700 733 '''check whether a stat result is a symlink'''
701 734 return False
702 735
703 736
704 def statisexec(st):
737 def statisexec(st: Optional[os.stat_result]) -> bool:
705 738 '''check whether a stat result is an executable file'''
706 739 return False
707 740
708 741
709 def poll(fds):
742 def poll(fds) -> List:
710 743 # see posix.py for description
711 744 raise NotImplementedError()
712 745
713 746
714 def readpipe(pipe):
747 def readpipe(pipe) -> bytes:
715 748 """Read all available data from a pipe."""
716 749 chunks = []
717 750 while True:
@@ -727,5 +760,5 def readpipe(pipe):
727 760 return b''.join(chunks)
728 761
729 762
730 def bindunixsocket(sock, path):
763 def bindunixsocket(sock, path: bytes) -> NoReturn:
731 764 raise NotImplementedError('unsupported platform')
@@ -61,45 +61,6 def ismainthread():
61 61 return threading.current_thread() == threading.main_thread()
62 62
63 63
64 class _blockingreader:
65 """Wrap unbuffered stream such that pickle.load() works with it.
66
67 pickle.load() expects that calls to read() and readinto() read as many
68 bytes as requested. On EOF, it is fine to read fewer bytes. In this case,
69 pickle.load() raises an EOFError.
70 """
71
72 def __init__(self, wrapped):
73 self._wrapped = wrapped
74
75 def readline(self):
76 return self._wrapped.readline()
77
78 def readinto(self, buf):
79 pos = 0
80 size = len(buf)
81
82 with memoryview(buf) as view:
83 while pos < size:
84 with view[pos:] as subview:
85 ret = self._wrapped.readinto(subview)
86 if not ret:
87 break
88 pos += ret
89
90 return pos
91
92 # issue multiple reads until size is fulfilled (or EOF is encountered)
93 def read(self, size=-1):
94 if size < 0:
95 return self._wrapped.readall()
96
97 buf = bytearray(size)
98 n_read = self.readinto(buf)
99 del buf[n_read:]
100 return bytes(buf)
101
102
103 64 if pycompat.isposix or pycompat.iswindows:
104 65 _STARTUP_COST = 0.01
105 66 # The Windows worker is thread based. If tasks are CPU bound, threads
@@ -276,11 +237,26 def _posixworker(ui, func, staticargs, a
276 237 selector = selectors.DefaultSelector()
277 238 for rfd, wfd in pipes:
278 239 os.close(wfd)
279 # The stream has to be unbuffered. Otherwise, if all data is read from
280 # the raw file into the buffer, the selector thinks that the FD is not
281 # ready to read while pickle.load() could read from the buffer. This
282 # would delay the processing of readable items.
283 selector.register(os.fdopen(rfd, 'rb', 0), selectors.EVENT_READ)
240 # Buffering is needed for performance, but it also presents a problem:
241 # selector doesn't take the buffered data into account,
242 # so we have to arrange it so that the buffers are empty when select is called
243 # (see [peek_nonblock])
244 selector.register(os.fdopen(rfd, 'rb', 4096), selectors.EVENT_READ)
245
246 def peek_nonblock(f):
247 os.set_blocking(f.fileno(), False)
248 res = f.peek()
249 os.set_blocking(f.fileno(), True)
250 return res
251
252 def load_all(f):
253 # The pytype error likely goes away on a modern version of
254 # pytype having a modern typeshed snapshot.
255 # pytype: disable=wrong-arg-types
256 yield pickle.load(f)
257 while len(peek_nonblock(f)) > 0:
258 yield pickle.load(f)
259 # pytype: enable=wrong-arg-types
284 260
285 261 def cleanup():
286 262 signal.signal(signal.SIGINT, oldhandler)
@@ -294,15 +270,11 def _posixworker(ui, func, staticargs, a
294 270 while openpipes > 0:
295 271 for key, events in selector.select():
296 272 try:
297 # The pytype error likely goes away on a modern version of
298 # pytype having a modern typeshed snapshot.
299 # pytype: disable=wrong-arg-types
300 res = pickle.load(_blockingreader(key.fileobj))
301 # pytype: enable=wrong-arg-types
302 if hasretval and res[0]:
303 retval.update(res[1])
304 else:
305 yield res
273 for res in load_all(key.fileobj):
274 if hasretval and res[0]:
275 retval.update(res[1])
276 else:
277 yield res
306 278 except EOFError:
307 279 selector.unregister(key.fileobj)
308 280 # pytype: disable=attribute-error
@@ -2,6 +2,9
2 2
3 3 == New Features ==
4 4
5 * There is a new internal merge tool called `internal:union-other-first`.
6 It works like `internal:union` but add other side on top of local.
7
5 8 == Default Format Change ==
6 9
7 10 These changes affect newly created repositories (or new clones) done with
@@ -16,3 +19,7 Mercurial XXX.
16 19 == Internal API Changes ==
17 20
18 21 == Miscellaneous ==
22
23 * pullbundle support no longer requires setting a server-side option,
24 providing a .hg/pullbundles.manifest according to the syntax specified in
25 'hg help -e clonebundles' is enough.
This diff has been collapsed as it changes many lines, (720 lines changed) Show them Hide them
@@ -10,21 +10,26 checksum = "fe438c63458706e03479442743ba
10 10
11 11 [[package]]
12 12 name = "adler"
13 version = "0.2.3"
13 version = "1.0.2"
14 14 source = "registry+https://github.com/rust-lang/crates.io-index"
15 checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e"
15 checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
16 16
17 17 [[package]]
18 18 name = "ahash"
19 version = "0.4.7"
19 version = "0.8.2"
20 20 source = "registry+https://github.com/rust-lang/crates.io-index"
21 checksum = "739f4a8db6605981345c5654f3a85b056ce52f37a39d34da03f25bf2151ea16e"
21 checksum = "bf6ccdb167abbf410dcb915cabd428929d7f6a04980b54a11f26a39f1c7f7107"
22 dependencies = [
23 "cfg-if",
24 "once_cell",
25 "version_check",
26 ]
22 27
23 28 [[package]]
24 29 name = "aho-corasick"
25 version = "0.7.18"
30 version = "0.7.19"
26 31 source = "registry+https://github.com/rust-lang/crates.io-index"
27 checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f"
32 checksum = "b4f55bd91a0978cbfd91c457a164bab8b4001c833b7f323132c0a4e1922dd44e"
28 33 dependencies = [
29 34 "memchr",
30 35 ]
@@ -36,12 +41,12 source = "registry+https://github.com/ru
36 41 checksum = "250f629c0161ad8107cf89319e990051fae62832fd343083bea452d93e2205fd"
37 42
38 43 [[package]]
39 name = "ansi_term"
40 version = "0.12.1"
44 name = "android_system_properties"
45 version = "0.1.5"
41 46 source = "registry+https://github.com/rust-lang/crates.io-index"
42 checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2"
47 checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311"
43 48 dependencies = [
44 "winapi",
49 "libc",
45 50 ]
46 51
47 52 [[package]]
@@ -57,9 +62,9 dependencies = [
57 62
58 63 [[package]]
59 64 name = "autocfg"
60 version = "1.0.1"
65 version = "1.1.0"
61 66 source = "registry+https://github.com/rust-lang/crates.io-index"
62 checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
67 checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
63 68
64 69 [[package]]
65 70 name = "bitflags"
@@ -87,14 +92,20 dependencies = [
87 92
88 93 [[package]]
89 94 name = "block-buffer"
90 version = "0.10.2"
95 version = "0.10.3"
91 96 source = "registry+https://github.com/rust-lang/crates.io-index"
92 checksum = "0bf7fe51849ea569fd452f37822f606a5cabb684dc918707a0193fd4664ff324"
97 checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e"
93 98 dependencies = [
94 99 "generic-array",
95 100 ]
96 101
97 102 [[package]]
103 name = "bumpalo"
104 version = "3.11.1"
105 source = "registry+https://github.com/rust-lang/crates.io-index"
106 checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba"
107
108 [[package]]
98 109 name = "byteorder"
99 110 version = "1.4.3"
100 111 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -102,18 +113,18 checksum = "14c189c53d098945499cdfa7ecc6
102 113
103 114 [[package]]
104 115 name = "bytes-cast"
105 version = "0.2.0"
116 version = "0.3.0"
106 117 source = "registry+https://github.com/rust-lang/crates.io-index"
107 checksum = "0d434f9a4ecbe987e7ccfda7274b6f82ea52c9b63742565a65cb5e8ba0f2c452"
118 checksum = "a20de93b91d7703ca0e39e12930e310acec5ff4d715f4166e0ab026babb352e8"
108 119 dependencies = [
109 120 "bytes-cast-derive",
110 121 ]
111 122
112 123 [[package]]
113 124 name = "bytes-cast-derive"
114 version = "0.1.0"
125 version = "0.2.0"
115 126 source = "registry+https://github.com/rust-lang/crates.io-index"
116 checksum = "cb936af9de38476664d6b58e529aff30d482e4ce1c5e150293d00730b0d81fdb"
127 checksum = "7470a6fcce58cde3d62cce758bf71007978b75247e6becd9255c9b884bcb4f71"
117 128 dependencies = [
118 129 "proc-macro2",
119 130 "quote",
@@ -122,58 +133,80 dependencies = [
122 133
123 134 [[package]]
124 135 name = "cc"
125 version = "1.0.66"
136 version = "1.0.76"
126 137 source = "registry+https://github.com/rust-lang/crates.io-index"
127 checksum = "4c0496836a84f8d0495758516b8621a622beb77c0fed418570e50764093ced48"
138 checksum = "76a284da2e6fe2092f2353e51713435363112dfd60030e22add80be333fb928f"
128 139 dependencies = [
129 140 "jobserver",
130 141 ]
131 142
132 143 [[package]]
133 144 name = "cfg-if"
134 version = "0.1.10"
135 source = "registry+https://github.com/rust-lang/crates.io-index"
136 checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
137
138 [[package]]
139 name = "cfg-if"
140 145 version = "1.0.0"
141 146 source = "registry+https://github.com/rust-lang/crates.io-index"
142 147 checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
143 148
144 149 [[package]]
145 150 name = "chrono"
146 version = "0.4.19"
151 version = "0.4.23"
147 152 source = "registry+https://github.com/rust-lang/crates.io-index"
148 checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73"
153 checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f"
149 154 dependencies = [
150 "libc",
155 "iana-time-zone",
156 "js-sys",
151 157 "num-integer",
152 158 "num-traits",
153 159 "time",
160 "wasm-bindgen",
154 161 "winapi",
155 162 ]
156 163
157 164 [[package]]
158 165 name = "clap"
159 version = "2.34.0"
166 version = "4.0.24"
160 167 source = "registry+https://github.com/rust-lang/crates.io-index"
161 checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c"
168 checksum = "60494cedb60cb47462c0ff7be53de32c0e42a6fc2c772184554fa12bd9489c03"
162 169 dependencies = [
163 "ansi_term",
164 170 "atty",
165 171 "bitflags",
172 "clap_derive",
173 "clap_lex",
174 "once_cell",
166 175 "strsim",
167 "textwrap",
168 "unicode-width",
169 "vec_map",
176 "termcolor",
170 177 ]
171 178
172 179 [[package]]
173 name = "const_fn"
174 version = "0.4.4"
180 name = "clap_derive"
181 version = "4.0.21"
175 182 source = "registry+https://github.com/rust-lang/crates.io-index"
176 checksum = "cd51eab21ab4fd6a3bf889e2d0958c0a6e3a61ad04260325e919e652a2a62826"
183 checksum = "0177313f9f02afc995627906bbd8967e2be069f5261954222dac78290c2b9014"
184 dependencies = [
185 "heck",
186 "proc-macro-error",
187 "proc-macro2",
188 "quote",
189 "syn",
190 ]
191
192 [[package]]
193 name = "clap_lex"
194 version = "0.3.0"
195 source = "registry+https://github.com/rust-lang/crates.io-index"
196 checksum = "0d4198f73e42b4936b35b5bb248d81d2b595ecb170da0bac7655c54eedfa8da8"
197 dependencies = [
198 "os_str_bytes",
199 ]
200
201 [[package]]
202 name = "codespan-reporting"
203 version = "0.11.1"
204 source = "registry+https://github.com/rust-lang/crates.io-index"
205 checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e"
206 dependencies = [
207 "termcolor",
208 "unicode-width",
209 ]
177 210
178 211 [[package]]
179 212 name = "convert_case"
@@ -182,28 +215,25 source = "registry+https://github.com/ru
182 215 checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e"
183 216
184 217 [[package]]
185 name = "cpufeatures"
186 version = "0.1.4"
218 name = "core-foundation-sys"
219 version = "0.8.3"
187 220 source = "registry+https://github.com/rust-lang/crates.io-index"
188 checksum = "ed00c67cb5d0a7d64a44f6ad2668db7e7530311dd53ea79bcd4fb022c64911c8"
189 dependencies = [
190 "libc",
191 ]
221 checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc"
192 222
193 223 [[package]]
194 224 name = "cpufeatures"
195 version = "0.2.1"
225 version = "0.2.5"
196 226 source = "registry+https://github.com/rust-lang/crates.io-index"
197 checksum = "95059428f66df56b63431fdb4e1947ed2190586af5c5a8a8b71122bdf5a7f469"
227 checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320"
198 228 dependencies = [
199 229 "libc",
200 230 ]
201 231
202 232 [[package]]
203 233 name = "cpython"
204 version = "0.7.0"
234 version = "0.7.1"
205 235 source = "registry+https://github.com/rust-lang/crates.io-index"
206 checksum = "b7d46ba8ace7f3a1d204ac5060a706d0a68de6b42eafb6a586cc08bebcffe664"
236 checksum = "3052106c29da7390237bc2310c1928335733b286287754ea85e6093d2495280e"
207 237 dependencies = [
208 238 "libc",
209 239 "num-traits",
@@ -213,20 +243,20 dependencies = [
213 243
214 244 [[package]]
215 245 name = "crc32fast"
216 version = "1.2.1"
246 version = "1.3.2"
217 247 source = "registry+https://github.com/rust-lang/crates.io-index"
218 checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a"
248 checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d"
219 249 dependencies = [
220 "cfg-if 1.0.0",
250 "cfg-if",
221 251 ]
222 252
223 253 [[package]]
224 254 name = "crossbeam-channel"
225 version = "0.5.2"
255 version = "0.5.6"
226 256 source = "registry+https://github.com/rust-lang/crates.io-index"
227 checksum = "e54ea8bc3fb1ee042f5aace6e3c6e025d3874866da222930f70ce62aceba0bfa"
257 checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521"
228 258 dependencies = [
229 "cfg-if 1.0.0",
259 "cfg-if",
230 260 "crossbeam-utils",
231 261 ]
232 262
@@ -236,51 +266,93 version = "0.8.2"
236 266 source = "registry+https://github.com/rust-lang/crates.io-index"
237 267 checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc"
238 268 dependencies = [
239 "cfg-if 1.0.0",
269 "cfg-if",
240 270 "crossbeam-epoch",
241 271 "crossbeam-utils",
242 272 ]
243 273
244 274 [[package]]
245 275 name = "crossbeam-epoch"
246 version = "0.9.1"
276 version = "0.9.11"
247 277 source = "registry+https://github.com/rust-lang/crates.io-index"
248 checksum = "a1aaa739f95311c2c7887a76863f500026092fb1dce0161dab577e559ef3569d"
278 checksum = "f916dfc5d356b0ed9dae65f1db9fc9770aa2851d2662b988ccf4fe3516e86348"
249 279 dependencies = [
250 "cfg-if 1.0.0",
251 "const_fn",
280 "autocfg",
281 "cfg-if",
252 282 "crossbeam-utils",
253 "lazy_static",
254 283 "memoffset",
255 284 "scopeguard",
256 285 ]
257 286
258 287 [[package]]
259 288 name = "crossbeam-utils"
260 version = "0.8.1"
289 version = "0.8.12"
261 290 source = "registry+https://github.com/rust-lang/crates.io-index"
262 checksum = "02d96d1e189ef58269ebe5b97953da3274d83a93af647c2ddd6f9dab28cedb8d"
291 checksum = "edbafec5fa1f196ca66527c1b12c2ec4745ca14b50f1ad8f9f6f720b55d11fac"
263 292 dependencies = [
264 "autocfg",
265 "cfg-if 1.0.0",
266 "lazy_static",
293 "cfg-if",
267 294 ]
268 295
269 296 [[package]]
270 297 name = "crypto-common"
271 version = "0.1.2"
298 version = "0.1.6"
272 299 source = "registry+https://github.com/rust-lang/crates.io-index"
273 checksum = "a4600d695eb3f6ce1cd44e6e291adceb2cc3ab12f20a33777ecd0bf6eba34e06"
300 checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3"
274 301 dependencies = [
275 302 "generic-array",
303 "typenum",
276 304 ]
277 305
278 306 [[package]]
279 307 name = "ctor"
280 version = "0.1.16"
308 version = "0.1.26"
309 source = "registry+https://github.com/rust-lang/crates.io-index"
310 checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096"
311 dependencies = [
312 "quote",
313 "syn",
314 ]
315
316 [[package]]
317 name = "cxx"
318 version = "1.0.81"
319 source = "registry+https://github.com/rust-lang/crates.io-index"
320 checksum = "97abf9f0eca9e52b7f81b945524e76710e6cb2366aead23b7d4fbf72e281f888"
321 dependencies = [
322 "cc",
323 "cxxbridge-flags",
324 "cxxbridge-macro",
325 "link-cplusplus",
326 ]
327
328 [[package]]
329 name = "cxx-build"
330 version = "1.0.81"
281 331 source = "registry+https://github.com/rust-lang/crates.io-index"
282 checksum = "7fbaabec2c953050352311293be5c6aba8e141ba19d6811862b232d6fd020484"
332 checksum = "7cc32cc5fea1d894b77d269ddb9f192110069a8a9c1f1d441195fba90553dea3"
283 333 dependencies = [
334 "cc",
335 "codespan-reporting",
336 "once_cell",
337 "proc-macro2",
338 "quote",
339 "scratch",
340 "syn",
341 ]
342
343 [[package]]
344 name = "cxxbridge-flags"
345 version = "1.0.81"
346 source = "registry+https://github.com/rust-lang/crates.io-index"
347 checksum = "8ca220e4794c934dc6b1207c3b42856ad4c302f2df1712e9f8d2eec5afaacf1f"
348
349 [[package]]
350 name = "cxxbridge-macro"
351 version = "1.0.81"
352 source = "registry+https://github.com/rust-lang/crates.io-index"
353 checksum = "b846f081361125bfc8dc9d3940c84e1fd83ba54bbca7b17cd29483c828be0704"
354 dependencies = [
355 "proc-macro2",
284 356 "quote",
285 357 "syn",
286 358 ]
@@ -300,9 +372,9 dependencies = [
300 372
301 373 [[package]]
302 374 name = "diff"
303 version = "0.1.12"
375 version = "0.1.13"
304 376 source = "registry+https://github.com/rust-lang/crates.io-index"
305 checksum = "0e25ea47919b1560c4e3b7fe0aaab9becf5b84a10325ddf7db0f0ba5e1026499"
377 checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8"
306 378
307 379 [[package]]
308 380 name = "digest"
@@ -315,25 +387,25 dependencies = [
315 387
316 388 [[package]]
317 389 name = "digest"
318 version = "0.10.2"
390 version = "0.10.5"
319 391 source = "registry+https://github.com/rust-lang/crates.io-index"
320 checksum = "8cb780dce4f9a8f5c087362b3a4595936b2019e7c8b30f2c3e9a7e94e6ae9837"
392 checksum = "adfbc57365a37acbd2ebf2b64d7e69bb766e2fea813521ed536f5d0520dcf86c"
321 393 dependencies = [
322 "block-buffer 0.10.2",
394 "block-buffer 0.10.3",
323 395 "crypto-common",
324 396 ]
325 397
326 398 [[package]]
327 399 name = "either"
328 version = "1.6.1"
400 version = "1.8.0"
329 401 source = "registry+https://github.com/rust-lang/crates.io-index"
330 checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457"
402 checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797"
331 403
332 404 [[package]]
333 405 name = "env_logger"
334 version = "0.9.0"
406 version = "0.9.3"
335 407 source = "registry+https://github.com/rust-lang/crates.io-index"
336 checksum = "0b2cf0344971ee6c64c31be0d530793fba457d322dfec2810c453d0ef228f9c3"
408 checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7"
337 409 dependencies = [
338 410 "atty",
339 411 "humantime",
@@ -344,22 +416,20 dependencies = [
344 416
345 417 [[package]]
346 418 name = "fastrand"
347 version = "1.7.0"
419 version = "1.8.0"
348 420 source = "registry+https://github.com/rust-lang/crates.io-index"
349 checksum = "c3fcf0cee53519c866c09b5de1f6c56ff9d647101f81c1964fa632e148896cdf"
421 checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499"
350 422 dependencies = [
351 423 "instant",
352 424 ]
353 425
354 426 [[package]]
355 427 name = "flate2"
356 version = "1.0.22"
428 version = "1.0.24"
357 429 source = "registry+https://github.com/rust-lang/crates.io-index"
358 checksum = "1e6988e897c1c9c485f43b47a529cef42fde0547f9d8d41a7062518f1d8fc53f"
430 checksum = "f82b0f4c27ad9f8bfd1f3208d882da2b09c301bc1c828fd3a00d0216d2fbbff6"
359 431 dependencies = [
360 "cfg-if 1.0.0",
361 432 "crc32fast",
362 "libc",
363 433 "libz-sys",
364 434 "miniz_oxide",
365 435 ]
@@ -386,9 +456,9 dependencies = [
386 456
387 457 [[package]]
388 458 name = "generic-array"
389 version = "0.14.4"
459 version = "0.14.6"
390 460 source = "registry+https://github.com/rust-lang/crates.io-index"
391 checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817"
461 checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9"
392 462 dependencies = [
393 463 "typenum",
394 464 "version_check",
@@ -396,47 +466,47 dependencies = [
396 466
397 467 [[package]]
398 468 name = "getrandom"
399 version = "0.1.15"
469 version = "0.1.16"
400 470 source = "registry+https://github.com/rust-lang/crates.io-index"
401 checksum = "fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6"
471 checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce"
402 472 dependencies = [
403 "cfg-if 0.1.10",
473 "cfg-if",
404 474 "libc",
405 475 "wasi 0.9.0+wasi-snapshot-preview1",
406 476 ]
407 477
408 478 [[package]]
409 479 name = "getrandom"
410 version = "0.2.4"
480 version = "0.2.8"
411 481 source = "registry+https://github.com/rust-lang/crates.io-index"
412 checksum = "418d37c8b1d42553c93648be529cb70f920d3baf8ef469b74b9638df426e0b4c"
482 checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31"
413 483 dependencies = [
414 "cfg-if 1.0.0",
484 "cfg-if",
415 485 "libc",
416 "wasi 0.10.0+wasi-snapshot-preview1",
486 "wasi 0.11.0+wasi-snapshot-preview1",
417 487 ]
418 488
419 489 [[package]]
420 name = "glob"
421 version = "0.3.0"
490 name = "hashbrown"
491 version = "0.13.1"
422 492 source = "registry+https://github.com/rust-lang/crates.io-index"
423 checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574"
424
425 [[package]]
426 name = "hashbrown"
427 version = "0.9.1"
428 source = "registry+https://github.com/rust-lang/crates.io-index"
429 checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04"
493 checksum = "33ff8ae62cd3a9102e5637afc8452c55acf3844001bd5374e0b0bd7b6616c038"
430 494 dependencies = [
431 495 "ahash",
432 496 "rayon",
433 497 ]
434 498
435 499 [[package]]
500 name = "heck"
501 version = "0.4.0"
502 source = "registry+https://github.com/rust-lang/crates.io-index"
503 checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9"
504
505 [[package]]
436 506 name = "hermit-abi"
437 version = "0.1.17"
507 version = "0.1.19"
438 508 source = "registry+https://github.com/rust-lang/crates.io-index"
439 checksum = "5aca5565f760fb5b220e499d72710ed156fdb74e631659e99377d9ebfbd13ae8"
509 checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
440 510 dependencies = [
441 511 "libc",
442 512 ]
@@ -462,12 +532,12 dependencies = [
462 532 "hashbrown",
463 533 "home",
464 534 "im-rc",
465 "itertools 0.10.3",
535 "itertools",
466 536 "lazy_static",
467 537 "libc",
468 538 "log",
539 "logging_timer",
469 540 "memmap2",
470 "micro-timer",
471 541 "once_cell",
472 542 "ouroboros",
473 543 "pretty_assertions",
@@ -500,9 +570,9 dependencies = [
500 570
501 571 [[package]]
502 572 name = "home"
503 version = "0.5.3"
573 version = "0.5.4"
504 574 source = "registry+https://github.com/rust-lang/crates.io-index"
505 checksum = "2456aef2e6b6a9784192ae780c0f15bc57df0e918585282325e8c8ac27737654"
575 checksum = "747309b4b440c06d57b0b25f2aee03ee9b5e5397d288c60e21fc709bb98a7408"
506 576 dependencies = [
507 577 "winapi",
508 578 ]
@@ -514,13 +584,37 source = "registry+https://github.com/ru
514 584 checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4"
515 585
516 586 [[package]]
587 name = "iana-time-zone"
588 version = "0.1.53"
589 source = "registry+https://github.com/rust-lang/crates.io-index"
590 checksum = "64c122667b287044802d6ce17ee2ddf13207ed924c712de9a66a5814d5b64765"
591 dependencies = [
592 "android_system_properties",
593 "core-foundation-sys",
594 "iana-time-zone-haiku",
595 "js-sys",
596 "wasm-bindgen",
597 "winapi",
598 ]
599
600 [[package]]
601 name = "iana-time-zone-haiku"
602 version = "0.1.1"
603 source = "registry+https://github.com/rust-lang/crates.io-index"
604 checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca"
605 dependencies = [
606 "cxx",
607 "cxx-build",
608 ]
609
610 [[package]]
517 611 name = "im-rc"
518 version = "15.0.0"
612 version = "15.1.0"
519 613 source = "registry+https://github.com/rust-lang/crates.io-index"
520 checksum = "3ca8957e71f04a205cb162508f9326aea04676c8dfd0711220190d6b83664f3f"
614 checksum = "af1955a75fa080c677d3972822ec4bad316169ab1cfc6c257a942c2265dbe5fe"
521 615 dependencies = [
522 616 "bitmaps",
523 "rand_core 0.5.1",
617 "rand_core 0.6.4",
524 618 "rand_xoshiro",
525 619 "sized-chunks",
526 620 "typenum",
@@ -533,37 +627,37 version = "0.1.12"
533 627 source = "registry+https://github.com/rust-lang/crates.io-index"
534 628 checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c"
535 629 dependencies = [
536 "cfg-if 1.0.0",
630 "cfg-if",
537 631 ]
538 632
539 633 [[package]]
540 634 name = "itertools"
541 version = "0.9.0"
635 version = "0.10.5"
542 636 source = "registry+https://github.com/rust-lang/crates.io-index"
543 checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b"
544 dependencies = [
545 "either",
546 ]
547
548 [[package]]
549 name = "itertools"
550 version = "0.10.3"
551 source = "registry+https://github.com/rust-lang/crates.io-index"
552 checksum = "a9a9d19fa1e79b6215ff29b9d6880b706147f16e9b1dbb1e4e5947b5b02bc5e3"
637 checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473"
553 638 dependencies = [
554 639 "either",
555 640 ]
556 641
557 642 [[package]]
558 643 name = "jobserver"
559 version = "0.1.21"
644 version = "0.1.25"
560 645 source = "registry+https://github.com/rust-lang/crates.io-index"
561 checksum = "5c71313ebb9439f74b00d9d2dcec36440beaf57a6aa0623068441dd7cd81a7f2"
646 checksum = "068b1ee6743e4d11fb9c6a1e6064b3693a1b600e7f5f5988047d98b3dc9fb90b"
562 647 dependencies = [
563 648 "libc",
564 649 ]
565 650
566 651 [[package]]
652 name = "js-sys"
653 version = "0.3.60"
654 source = "registry+https://github.com/rust-lang/crates.io-index"
655 checksum = "49409df3e3bf0856b916e2ceaca09ee28e6871cf7d9ce97a692cacfdb2a25a47"
656 dependencies = [
657 "wasm-bindgen",
658 ]
659
660 [[package]]
567 661 name = "lazy_static"
568 662 version = "1.4.0"
569 663 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -571,21 +665,21 checksum = "e2abad23fbc42b3700f2f279844d
571 665
572 666 [[package]]
573 667 name = "libc"
574 version = "0.2.124"
668 version = "0.2.137"
575 669 source = "registry+https://github.com/rust-lang/crates.io-index"
576 checksum = "21a41fed9d98f27ab1c6d161da622a4fa35e8a54a8adc24bbf3ddd0ef70b0e50"
670 checksum = "fc7fcc620a3bff7cdd7a365be3376c97191aeaccc2a603e600951e452615bf89"
577 671
578 672 [[package]]
579 673 name = "libm"
580 version = "0.2.1"
674 version = "0.2.6"
581 675 source = "registry+https://github.com/rust-lang/crates.io-index"
582 checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a"
676 checksum = "348108ab3fba42ec82ff6e9564fc4ca0247bdccdc68dd8af9764bbc79c3c8ffb"
583 677
584 678 [[package]]
585 679 name = "libz-sys"
586 version = "1.1.2"
680 version = "1.1.8"
587 681 source = "registry+https://github.com/rust-lang/crates.io-index"
588 checksum = "602113192b08db8f38796c4e85c39e960c145965140e918018bcde1952429655"
682 checksum = "9702761c3935f8cc2f101793272e202c72b99da8f4224a19ddcf1279a6450bbf"
589 683 dependencies = [
590 684 "cc",
591 685 "pkg-config",
@@ -593,25 +687,56 dependencies = [
593 687 ]
594 688
595 689 [[package]]
690 name = "link-cplusplus"
691 version = "1.0.7"
692 source = "registry+https://github.com/rust-lang/crates.io-index"
693 checksum = "9272ab7b96c9046fbc5bc56c06c117cb639fe2d509df0c421cad82d2915cf369"
694 dependencies = [
695 "cc",
696 ]
697
698 [[package]]
596 699 name = "log"
597 version = "0.4.14"
700 version = "0.4.17"
598 701 source = "registry+https://github.com/rust-lang/crates.io-index"
599 checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710"
702 checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
703 dependencies = [
704 "cfg-if",
705 ]
706
707 [[package]]
708 name = "logging_timer"
709 version = "1.1.0"
710 source = "registry+https://github.com/rust-lang/crates.io-index"
711 checksum = "64e96f261d684b7089aa576bb74e823241dccd994b27d30fabf1dcb3af284fe9"
600 712 dependencies = [
601 "cfg-if 1.0.0",
713 "log",
714 "logging_timer_proc_macros",
715 ]
716
717 [[package]]
718 name = "logging_timer_proc_macros"
719 version = "1.1.0"
720 source = "registry+https://github.com/rust-lang/crates.io-index"
721 checksum = "10a9062912d7952c5588cc474795e0b9ee008e7e6781127945b85413d4b99d81"
722 dependencies = [
723 "log",
724 "proc-macro2",
725 "quote",
726 "syn",
602 727 ]
603 728
604 729 [[package]]
605 730 name = "memchr"
606 version = "2.4.1"
731 version = "2.5.0"
607 732 source = "registry+https://github.com/rust-lang/crates.io-index"
608 checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a"
733 checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
609 734
610 735 [[package]]
611 736 name = "memmap2"
612 version = "0.5.7"
737 version = "0.5.8"
613 738 source = "registry+https://github.com/rust-lang/crates.io-index"
614 checksum = "95af15f345b17af2efc8ead6080fb8bc376f8cec1b35277b935637595fe77498"
739 checksum = "4b182332558b18d807c4ce1ca8ca983b34c3ee32765e47b3f0f69b90355cc1dc"
615 740 dependencies = [
616 741 "libc",
617 742 "stable_deref_trait",
@@ -619,50 +744,27 dependencies = [
619 744
620 745 [[package]]
621 746 name = "memoffset"
622 version = "0.6.1"
747 version = "0.6.5"
623 748 source = "registry+https://github.com/rust-lang/crates.io-index"
624 checksum = "157b4208e3059a8f9e78d559edc658e13df41410cb3ae03979c83130067fdd87"
749 checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce"
625 750 dependencies = [
626 751 "autocfg",
627 752 ]
628 753
629 754 [[package]]
630 name = "micro-timer"
631 version = "0.4.0"
632 source = "registry+https://github.com/rust-lang/crates.io-index"
633 checksum = "5de32cb59a062672560d6f0842c4aa7714727457b9fe2daf8987d995a176a405"
634 dependencies = [
635 "micro-timer-macros",
636 "scopeguard",
637 ]
638
639 [[package]]
640 name = "micro-timer-macros"
641 version = "0.4.0"
755 name = "miniz_oxide"
756 version = "0.5.4"
642 757 source = "registry+https://github.com/rust-lang/crates.io-index"
643 checksum = "cee948b94700125b52dfb68dd17c19f6326696c1df57f92c05ee857463c93ba1"
644 dependencies = [
645 "proc-macro2",
646 "quote",
647 "scopeguard",
648 "syn",
649 ]
650
651 [[package]]
652 name = "miniz_oxide"
653 version = "0.4.3"
654 source = "registry+https://github.com/rust-lang/crates.io-index"
655 checksum = "0f2d26ec3309788e423cfbf68ad1800f061638098d76a83681af979dc4eda19d"
758 checksum = "96590ba8f175222643a85693f33d26e9c8a015f599c216509b1a6894af675d34"
656 759 dependencies = [
657 760 "adler",
658 "autocfg",
659 761 ]
660 762
661 763 [[package]]
662 764 name = "num-integer"
663 version = "0.1.44"
765 version = "0.1.45"
664 766 source = "registry+https://github.com/rust-lang/crates.io-index"
665 checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db"
767 checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9"
666 768 dependencies = [
667 769 "autocfg",
668 770 "num-traits",
@@ -670,9 +772,9 dependencies = [
670 772
671 773 [[package]]
672 774 name = "num-traits"
673 version = "0.2.14"
775 version = "0.2.15"
674 776 source = "registry+https://github.com/rust-lang/crates.io-index"
675 checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290"
777 checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd"
676 778 dependencies = [
677 779 "autocfg",
678 780 "libm",
@@ -680,9 +782,9 dependencies = [
680 782
681 783 [[package]]
682 784 name = "num_cpus"
683 version = "1.13.0"
785 version = "1.14.0"
684 786 source = "registry+https://github.com/rust-lang/crates.io-index"
685 checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3"
787 checksum = "f6058e64324c71e02bc2b150e4f3bc8286db6c83092132ffa3f6b1eab0f9def5"
686 788 dependencies = [
687 789 "hermit-abi",
688 790 "libc",
@@ -690,9 +792,9 dependencies = [
690 792
691 793 [[package]]
692 794 name = "once_cell"
693 version = "1.14.0"
795 version = "1.16.0"
694 796 source = "registry+https://github.com/rust-lang/crates.io-index"
695 checksum = "2f7254b99e31cad77da24b08ebf628882739a608578bb1bcdfc1f9c21260d7c0"
797 checksum = "86f0b0d4bf799edbc74508c1e8bf170ff5f41238e5f8225603ca7caaae2b7860"
696 798
697 799 [[package]]
698 800 name = "opaque-debug"
@@ -701,21 +803,26 source = "registry+https://github.com/ru
701 803 checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5"
702 804
703 805 [[package]]
806 name = "os_str_bytes"
807 version = "6.4.0"
808 source = "registry+https://github.com/rust-lang/crates.io-index"
809 checksum = "7b5bf27447411e9ee3ff51186bf7a08e16c341efdde93f4d823e8844429bed7e"
810
811 [[package]]
704 812 name = "ouroboros"
705 version = "0.15.0"
813 version = "0.15.5"
706 814 source = "registry+https://github.com/rust-lang/crates.io-index"
707 checksum = "9f31a3b678685b150cba82b702dcdc5e155893f63610cf388d30cd988d4ca2bf"
815 checksum = "dfbb50b356159620db6ac971c6d5c9ab788c9cc38a6f49619fca2a27acb062ca"
708 816 dependencies = [
709 817 "aliasable",
710 818 "ouroboros_macro",
711 "stable_deref_trait",
712 819 ]
713 820
714 821 [[package]]
715 822 name = "ouroboros_macro"
716 version = "0.15.0"
823 version = "0.15.5"
717 824 source = "registry+https://github.com/rust-lang/crates.io-index"
718 checksum = "084fd65d5dd8b3772edccb5ffd1e4b7eba43897ecd0f9401e330e8c542959408"
825 checksum = "4a0d9d1a6191c4f391f87219d1ea42b23f09ee84d64763cd05ee6ea88d9f384d"
719 826 dependencies = [
720 827 "Inflector",
721 828 "proc-macro-error",
@@ -726,41 +833,41 dependencies = [
726 833
727 834 [[package]]
728 835 name = "output_vt100"
729 version = "0.1.2"
836 version = "0.1.3"
730 837 source = "registry+https://github.com/rust-lang/crates.io-index"
731 checksum = "53cdc5b785b7a58c5aad8216b3dfa114df64b0b06ae6e1501cef91df2fbdf8f9"
838 checksum = "628223faebab4e3e40667ee0b2336d34a5b960ff60ea743ddfdbcf7770bcfb66"
732 839 dependencies = [
733 840 "winapi",
734 841 ]
735 842
736 843 [[package]]
737 844 name = "paste"
738 version = "1.0.5"
845 version = "1.0.9"
739 846 source = "registry+https://github.com/rust-lang/crates.io-index"
740 checksum = "acbf547ad0c65e31259204bd90935776d1c693cec2f4ff7abb7a1bbbd40dfe58"
847 checksum = "b1de2e551fb905ac83f73f7aedf2f0cb4a0da7e35efa24a202a936269f1f18e1"
741 848
742 849 [[package]]
743 850 name = "pkg-config"
744 version = "0.3.19"
851 version = "0.3.26"
745 852 source = "registry+https://github.com/rust-lang/crates.io-index"
746 checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c"
853 checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160"
747 854
748 855 [[package]]
749 856 name = "ppv-lite86"
750 version = "0.2.10"
857 version = "0.2.17"
751 858 source = "registry+https://github.com/rust-lang/crates.io-index"
752 checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857"
859 checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de"
753 860
754 861 [[package]]
755 862 name = "pretty_assertions"
756 version = "1.1.0"
863 version = "1.3.0"
757 864 source = "registry+https://github.com/rust-lang/crates.io-index"
758 checksum = "76d5b548b725018ab5496482b45cb8bef21e9fed1858a6d674e3a8a0f0bb5d50"
865 checksum = "a25e9bcb20aa780fd0bb16b72403a9064d6b3f22f026946029acb941a50af755"
759 866 dependencies = [
760 "ansi_term",
761 867 "ctor",
762 868 "diff",
763 869 "output_vt100",
870 "yansi",
764 871 ]
765 872
766 873 [[package]]
@@ -789,18 +896,18 dependencies = [
789 896
790 897 [[package]]
791 898 name = "proc-macro2"
792 version = "1.0.24"
899 version = "1.0.47"
793 900 source = "registry+https://github.com/rust-lang/crates.io-index"
794 checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71"
901 checksum = "5ea3d908b0e36316caf9e9e2c4625cdde190a7e6f440d794667ed17a1855e725"
795 902 dependencies = [
796 "unicode-xid",
903 "unicode-ident",
797 904 ]
798 905
799 906 [[package]]
800 907 name = "python3-sys"
801 version = "0.7.0"
908 version = "0.7.1"
802 909 source = "registry+https://github.com/rust-lang/crates.io-index"
803 checksum = "b18b32e64c103d5045f44644d7ddddd65336f7a0521f6fde673240a9ecceb77e"
910 checksum = "49f8b50d72fb3015735aa403eebf19bbd72c093bfeeae24ee798be5f2f1aab52"
804 911 dependencies = [
805 912 "libc",
806 913 "regex",
@@ -808,9 +915,9 dependencies = [
808 915
809 916 [[package]]
810 917 name = "quote"
811 version = "1.0.7"
918 version = "1.0.21"
812 919 source = "registry+https://github.com/rust-lang/crates.io-index"
813 checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37"
920 checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179"
814 921 dependencies = [
815 922 "proc-macro2",
816 923 ]
@@ -821,7 +928,7 version = "0.7.3"
821 928 source = "registry+https://github.com/rust-lang/crates.io-index"
822 929 checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03"
823 930 dependencies = [
824 "getrandom 0.1.15",
931 "getrandom 0.1.16",
825 932 "libc",
826 933 "rand_chacha 0.2.2",
827 934 "rand_core 0.5.1",
@@ -836,7 +943,7 checksum = "34af8d1a0e25924bc5b7c43c079c
836 943 dependencies = [
837 944 "libc",
838 945 "rand_chacha 0.3.1",
839 "rand_core 0.6.3",
946 "rand_core 0.6.4",
840 947 ]
841 948
842 949 [[package]]
@@ -856,7 +963,7 source = "registry+https://github.com/ru
856 963 checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
857 964 dependencies = [
858 965 "ppv-lite86",
859 "rand_core 0.6.3",
966 "rand_core 0.6.4",
860 967 ]
861 968
862 969 [[package]]
@@ -865,16 +972,16 version = "0.5.1"
865 972 source = "registry+https://github.com/rust-lang/crates.io-index"
866 973 checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19"
867 974 dependencies = [
868 "getrandom 0.1.15",
975 "getrandom 0.1.16",
869 976 ]
870 977
871 978 [[package]]
872 979 name = "rand_core"
873 version = "0.6.3"
980 version = "0.6.4"
874 981 source = "registry+https://github.com/rust-lang/crates.io-index"
875 checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7"
982 checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
876 983 dependencies = [
877 "getrandom 0.2.4",
984 "getrandom 0.2.8",
878 985 ]
879 986
880 987 [[package]]
@@ -902,16 +1009,16 version = "0.3.1"
902 1009 source = "registry+https://github.com/rust-lang/crates.io-index"
903 1010 checksum = "59cad018caf63deb318e5a4586d99a24424a364f40f1e5778c29aca23f4fc73e"
904 1011 dependencies = [
905 "rand_core 0.6.3",
1012 "rand_core 0.6.4",
906 1013 ]
907 1014
908 1015 [[package]]
909 1016 name = "rand_xoshiro"
910 version = "0.4.0"
1017 version = "0.6.0"
911 1018 source = "registry+https://github.com/rust-lang/crates.io-index"
912 checksum = "a9fcdd2e881d02f1d9390ae47ad8e5696a9e4be7b547a1da2afbc61973217004"
1019 checksum = "6f97cdb2a36ed4183de61b2f824cc45c9f1037f28afe0a322e9fff4c108b5aaa"
913 1020 dependencies = [
914 "rand_core 0.5.1",
1021 "rand_core 0.6.4",
915 1022 ]
916 1023
917 1024 [[package]]
@@ -938,18 +1045,18 dependencies = [
938 1045
939 1046 [[package]]
940 1047 name = "redox_syscall"
941 version = "0.2.11"
1048 version = "0.2.16"
942 1049 source = "registry+https://github.com/rust-lang/crates.io-index"
943 checksum = "8380fe0152551244f0747b1bf41737e0f8a74f97a14ccefd1148187271634f3c"
1050 checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a"
944 1051 dependencies = [
945 1052 "bitflags",
946 1053 ]
947 1054
948 1055 [[package]]
949 1056 name = "regex"
950 version = "1.5.5"
1057 version = "1.7.0"
951 1058 source = "registry+https://github.com/rust-lang/crates.io-index"
952 checksum = "1a11647b6b25ff05a515cb92c365cec08801e83423a235b51e231e1808747286"
1059 checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a"
953 1060 dependencies = [
954 1061 "aho-corasick",
955 1062 "memchr",
@@ -958,9 +1065,9 dependencies = [
958 1065
959 1066 [[package]]
960 1067 name = "regex-syntax"
961 version = "0.6.25"
1068 version = "0.6.28"
962 1069 source = "registry+https://github.com/rust-lang/crates.io-index"
963 checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b"
1070 checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848"
964 1071
965 1072 [[package]]
966 1073 name = "remove_dir_all"
@@ -985,7 +1092,7 dependencies = [
985 1092 "home",
986 1093 "lazy_static",
987 1094 "log",
988 "micro-timer",
1095 "logging_timer",
989 1096 "rayon",
990 1097 "regex",
991 1098 "users",
@@ -1017,20 +1124,26 source = "registry+https://github.com/ru
1017 1124 checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
1018 1125
1019 1126 [[package]]
1127 name = "scratch"
1128 version = "1.0.2"
1129 source = "registry+https://github.com/rust-lang/crates.io-index"
1130 checksum = "9c8132065adcfd6e02db789d9285a0deb2f3fcb04002865ab67d5fb103533898"
1131
1132 [[package]]
1020 1133 name = "semver"
1021 version = "1.0.6"
1134 version = "1.0.14"
1022 1135 source = "registry+https://github.com/rust-lang/crates.io-index"
1023 checksum = "a4a3381e03edd24287172047536f20cabde766e2cd3e65e6b00fb3af51c4f38d"
1136 checksum = "e25dfac463d778e353db5be2449d1cce89bd6fd23c9f1ea21310ce6e5a1b29c4"
1024 1137
1025 1138 [[package]]
1026 1139 name = "sha-1"
1027 version = "0.9.6"
1140 version = "0.9.8"
1028 1141 source = "registry+https://github.com/rust-lang/crates.io-index"
1029 checksum = "8c4cfa741c5832d0ef7fab46cabed29c2aae926db0b11bb2069edd8db5e64e16"
1142 checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6"
1030 1143 dependencies = [
1031 1144 "block-buffer 0.9.0",
1032 "cfg-if 1.0.0",
1033 "cpufeatures 0.1.4",
1145 "cfg-if",
1146 "cpufeatures",
1034 1147 "digest 0.9.0",
1035 1148 "opaque-debug",
1036 1149 ]
@@ -1041,16 +1154,16 version = "0.10.0"
1041 1154 source = "registry+https://github.com/rust-lang/crates.io-index"
1042 1155 checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f"
1043 1156 dependencies = [
1044 "cfg-if 1.0.0",
1045 "cpufeatures 0.2.1",
1046 "digest 0.10.2",
1157 "cfg-if",
1158 "cpufeatures",
1159 "digest 0.10.5",
1047 1160 ]
1048 1161
1049 1162 [[package]]
1050 1163 name = "sized-chunks"
1051 version = "0.6.2"
1164 version = "0.6.5"
1052 1165 source = "registry+https://github.com/rust-lang/crates.io-index"
1053 checksum = "1ec31ceca5644fa6d444cc77548b88b67f46db6f7c71683b0f9336e671830d2f"
1166 checksum = "16d69225bde7a69b235da73377861095455d298f2b970996eec25ddbb42b3d1e"
1054 1167 dependencies = [
1055 1168 "bitmaps",
1056 1169 "typenum",
@@ -1070,19 +1183,19 checksum = "a2eb9349b6444b326872e140eb1c
1070 1183
1071 1184 [[package]]
1072 1185 name = "strsim"
1073 version = "0.8.0"
1186 version = "0.10.0"
1074 1187 source = "registry+https://github.com/rust-lang/crates.io-index"
1075 checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
1188 checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
1076 1189
1077 1190 [[package]]
1078 1191 name = "syn"
1079 version = "1.0.54"
1192 version = "1.0.103"
1080 1193 source = "registry+https://github.com/rust-lang/crates.io-index"
1081 checksum = "9a2af957a63d6bd42255c359c93d9bfdb97076bd3b820897ce55ffbfbf107f44"
1194 checksum = "a864042229133ada95abf3b54fdc62ef5ccabe9515b64717bcb9a1919e59445d"
1082 1195 dependencies = [
1083 1196 "proc-macro2",
1084 1197 "quote",
1085 "unicode-xid",
1198 "unicode-ident",
1086 1199 ]
1087 1200
1088 1201 [[package]]
@@ -1091,7 +1204,7 version = "3.3.0"
1091 1204 source = "registry+https://github.com/rust-lang/crates.io-index"
1092 1205 checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4"
1093 1206 dependencies = [
1094 "cfg-if 1.0.0",
1207 "cfg-if",
1095 1208 "fastrand",
1096 1209 "libc",
1097 1210 "redox_syscall",
@@ -1101,23 +1214,14 dependencies = [
1101 1214
1102 1215 [[package]]
1103 1216 name = "termcolor"
1104 version = "1.1.2"
1217 version = "1.1.3"
1105 1218 source = "registry+https://github.com/rust-lang/crates.io-index"
1106 checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4"
1219 checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755"
1107 1220 dependencies = [
1108 1221 "winapi-util",
1109 1222 ]
1110 1223
1111 1224 [[package]]
1112 name = "textwrap"
1113 version = "0.11.0"
1114 source = "registry+https://github.com/rust-lang/crates.io-index"
1115 checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
1116 dependencies = [
1117 "unicode-width",
1118 ]
1119
1120 [[package]]
1121 1225 name = "thread_local"
1122 1226 version = "1.1.4"
1123 1227 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1139,32 +1243,32 dependencies = [
1139 1243
1140 1244 [[package]]
1141 1245 name = "twox-hash"
1142 version = "1.6.2"
1246 version = "1.6.3"
1143 1247 source = "registry+https://github.com/rust-lang/crates.io-index"
1144 checksum = "4ee73e6e4924fe940354b8d4d98cad5231175d615cd855b758adc658c0aac6a0"
1248 checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675"
1145 1249 dependencies = [
1146 "cfg-if 1.0.0",
1250 "cfg-if",
1147 1251 "rand 0.8.5",
1148 1252 "static_assertions",
1149 1253 ]
1150 1254
1151 1255 [[package]]
1152 1256 name = "typenum"
1153 version = "1.12.0"
1257 version = "1.15.0"
1154 1258 source = "registry+https://github.com/rust-lang/crates.io-index"
1155 checksum = "373c8a200f9e67a0c95e62a4f52fbf80c23b4381c05a17845531982fa99e6b33"
1259 checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987"
1260
1261 [[package]]
1262 name = "unicode-ident"
1263 version = "1.0.5"
1264 source = "registry+https://github.com/rust-lang/crates.io-index"
1265 checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3"
1156 1266
1157 1267 [[package]]
1158 1268 name = "unicode-width"
1159 version = "0.1.9"
1269 version = "0.1.10"
1160 1270 source = "registry+https://github.com/rust-lang/crates.io-index"
1161 checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973"
1162
1163 [[package]]
1164 name = "unicode-xid"
1165 version = "0.2.1"
1166 source = "registry+https://github.com/rust-lang/crates.io-index"
1167 checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564"
1271 checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b"
1168 1272
1169 1273 [[package]]
1170 1274 name = "users"
@@ -1178,9 +1282,9 dependencies = [
1178 1282
1179 1283 [[package]]
1180 1284 name = "vcpkg"
1181 version = "0.2.11"
1285 version = "0.2.15"
1182 1286 source = "registry+https://github.com/rust-lang/crates.io-index"
1183 checksum = "b00bca6106a5e23f3eee943593759b7fcddb00554332e856d990c893966879fb"
1287 checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426"
1184 1288
1185 1289 [[package]]
1186 1290 name = "vcsgraph"
@@ -1190,20 +1294,14 checksum = "4cb68c231e2575f7503a7c192138
1190 1294 dependencies = [
1191 1295 "hex",
1192 1296 "rand 0.7.3",
1193 "sha-1 0.9.6",
1297 "sha-1 0.9.8",
1194 1298 ]
1195 1299
1196 1300 [[package]]
1197 name = "vec_map"
1198 version = "0.8.2"
1301 name = "version_check"
1302 version = "0.9.4"
1199 1303 source = "registry+https://github.com/rust-lang/crates.io-index"
1200 checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
1201
1202 [[package]]
1203 name = "version_check"
1204 version = "0.9.2"
1205 source = "registry+https://github.com/rust-lang/crates.io-index"
1206 checksum = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed"
1304 checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
1207 1305
1208 1306 [[package]]
1209 1307 name = "wasi"
@@ -1218,14 +1316,74 source = "registry+https://github.com/ru
1218 1316 checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f"
1219 1317
1220 1318 [[package]]
1319 name = "wasi"
1320 version = "0.11.0+wasi-snapshot-preview1"
1321 source = "registry+https://github.com/rust-lang/crates.io-index"
1322 checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
1323
1324 [[package]]
1325 name = "wasm-bindgen"
1326 version = "0.2.83"
1327 source = "registry+https://github.com/rust-lang/crates.io-index"
1328 checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268"
1329 dependencies = [
1330 "cfg-if",
1331 "wasm-bindgen-macro",
1332 ]
1333
1334 [[package]]
1335 name = "wasm-bindgen-backend"
1336 version = "0.2.83"
1337 source = "registry+https://github.com/rust-lang/crates.io-index"
1338 checksum = "4c8ffb332579b0557b52d268b91feab8df3615f265d5270fec2a8c95b17c1142"
1339 dependencies = [
1340 "bumpalo",
1341 "log",
1342 "once_cell",
1343 "proc-macro2",
1344 "quote",
1345 "syn",
1346 "wasm-bindgen-shared",
1347 ]
1348
1349 [[package]]
1350 name = "wasm-bindgen-macro"
1351 version = "0.2.83"
1352 source = "registry+https://github.com/rust-lang/crates.io-index"
1353 checksum = "052be0f94026e6cbc75cdefc9bae13fd6052cdcaf532fa6c45e7ae33a1e6c810"
1354 dependencies = [
1355 "quote",
1356 "wasm-bindgen-macro-support",
1357 ]
1358
1359 [[package]]
1360 name = "wasm-bindgen-macro-support"
1361 version = "0.2.83"
1362 source = "registry+https://github.com/rust-lang/crates.io-index"
1363 checksum = "07bc0c051dc5f23e307b13285f9d75df86bfdf816c5721e573dec1f9b8aa193c"
1364 dependencies = [
1365 "proc-macro2",
1366 "quote",
1367 "syn",
1368 "wasm-bindgen-backend",
1369 "wasm-bindgen-shared",
1370 ]
1371
1372 [[package]]
1373 name = "wasm-bindgen-shared"
1374 version = "0.2.83"
1375 source = "registry+https://github.com/rust-lang/crates.io-index"
1376 checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f"
1377
1378 [[package]]
1221 1379 name = "which"
1222 version = "4.2.5"
1380 version = "4.3.0"
1223 1381 source = "registry+https://github.com/rust-lang/crates.io-index"
1224 checksum = "5c4fb54e6113b6a8772ee41c3404fb0301ac79604489467e0a9ce1f3e97c24ae"
1382 checksum = "1c831fbbee9e129a8cf93e7747a82da9d95ba8e16621cae60ec2cdc849bacb7b"
1225 1383 dependencies = [
1226 1384 "either",
1227 "lazy_static",
1228 1385 "libc",
1386 "once_cell",
1229 1387 ]
1230 1388
1231 1389 [[package]]
@@ -1260,19 +1418,25 source = "registry+https://github.com/ru
1260 1418 checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
1261 1419
1262 1420 [[package]]
1421 name = "yansi"
1422 version = "0.5.1"
1423 source = "registry+https://github.com/rust-lang/crates.io-index"
1424 checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec"
1425
1426 [[package]]
1263 1427 name = "zstd"
1264 version = "0.5.4+zstd.1.4.7"
1428 version = "0.11.2+zstd.1.5.2"
1265 1429 source = "registry+https://github.com/rust-lang/crates.io-index"
1266 checksum = "69996ebdb1ba8b1517f61387a883857818a66c8a295f487b1ffd8fd9d2c82910"
1430 checksum = "20cc960326ece64f010d2d2107537f26dc589a6573a316bd5b1dba685fa5fde4"
1267 1431 dependencies = [
1268 1432 "zstd-safe",
1269 1433 ]
1270 1434
1271 1435 [[package]]
1272 1436 name = "zstd-safe"
1273 version = "2.0.6+zstd.1.4.7"
1437 version = "5.0.2+zstd.1.5.2"
1274 1438 source = "registry+https://github.com/rust-lang/crates.io-index"
1275 checksum = "98aa931fb69ecee256d44589d19754e61851ae4769bf963b385119b1cc37a49e"
1439 checksum = "1d2a5585e04f9eea4b2a3d1eca508c4dee9592a89ef6f450c11719da0726f4db"
1276 1440 dependencies = [
1277 1441 "libc",
1278 1442 "zstd-sys",
@@ -1280,12 +1444,10 dependencies = [
1280 1444
1281 1445 [[package]]
1282 1446 name = "zstd-sys"
1283 version = "1.4.18+zstd.1.4.7"
1447 version = "2.0.1+zstd.1.5.2"
1284 1448 source = "registry+https://github.com/rust-lang/crates.io-index"
1285 checksum = "a1e6e8778706838f43f771d80d37787cb2fe06dafe89dd3aebaf6721b9eaec81"
1449 checksum = "9fd07cbbc53846d9145dbffdf6dd09a7a0aa52be46741825f5c97bdd4f73f12b"
1286 1450 dependencies = [
1287 1451 "cc",
1288 "glob",
1289 "itertools 0.9.0",
1290 1452 "libc",
1291 1453 ]
@@ -77,8 +77,8 Example usage:
77 77 Developing Rust
78 78 ===============
79 79
80 The current version of Rust in use is ``1.48.0``, because it's what Debian
81 stable has. You can use ``rustup override set 1.48.0`` at the root of the repo
80 The current version of Rust in use is ``1.61.0``, because it's what Debian
81 testing has. You can use ``rustup override set 1.61.0`` at the root of the repo
82 82 to make it easier on you.
83 83
84 84 Go to the ``hg-cpython`` folder::
@@ -3,50 +3,50 name = "hg-core"
3 3 version = "0.1.0"
4 4 authors = ["Georges Racinet <gracinet@anybox.fr>"]
5 5 description = "Mercurial pure Rust core library, with no assumption on Python bindings (FFI)"
6 edition = "2018"
6 edition = "2021"
7 7
8 8 [lib]
9 9 name = "hg"
10 10
11 11 [dependencies]
12 12 bitflags = "1.3.2"
13 bytes-cast = "0.2.0"
13 bytes-cast = "0.3.0"
14 14 byteorder = "1.4.3"
15 15 derive_more = "0.99.17"
16 hashbrown = { version = "0.9.1", features = ["rayon"] }
17 home = "0.5.3"
18 im-rc = "15.0"
19 itertools = "0.10.3"
16 hashbrown = { version = "0.13.1", features = ["rayon"] }
17 home = "0.5.4"
18 im-rc = "15.1.0"
19 itertools = "0.10.5"
20 20 lazy_static = "1.4.0"
21 libc = "0.2"
22 ouroboros = "0.15.0"
23 rand = "0.8.4"
21 libc = "0.2.137"
22 logging_timer = "1.1.0"
23 ouroboros = "0.15.5"
24 rand = "0.8.5"
24 25 rand_pcg = "0.3.1"
25 26 rand_distr = "0.4.3"
26 27 rayon = "1.6.1"
27 regex = "1.5.5"
28 regex = "1.7.0"
28 29 sha-1 = "0.10.0"
29 twox-hash = "1.6.2"
30 twox-hash = "1.6.3"
30 31 same-file = "1.0.6"
31 tempfile = "3.1.0"
32 tempfile = "3.3.0"
32 33 thread_local = "1.1.4"
33 crossbeam-channel = "0.5.0"
34 micro-timer = "0.4.0"
35 log = "0.4.8"
36 memmap2 = { version = "0.5.3", features = ["stable_deref_trait"] }
37 zstd = "0.5.3"
34 crossbeam-channel = "0.5.6"
35 log = "0.4.17"
36 memmap2 = { version = "0.5.8", features = ["stable_deref_trait"] }
37 zstd = "0.11.2"
38 38 format-bytes = "0.3.0"
39 39 # once_cell 1.15 uses edition 2021, while the heptapod CI
40 40 # uses an old version of Cargo that doesn't support it.
41 once_cell = "1.14.0"
41 once_cell = "1.16.0"
42 42
43 43 # We don't use the `miniz-oxide` backend to not change rhg benchmarks and until
44 44 # we have a clearer view of which backend is the fastest.
45 45 [dependencies.flate2]
46 version = "1.0.22"
46 version = "1.0.24"
47 47 features = ["zlib"]
48 48 default-features = false
49 49
50 50 [dev-dependencies]
51 clap = "2.34.0"
51 clap = { version = "4.0.24", features = ["derive"] }
52 52 pretty_assertions = "1.1.0"
@@ -3,7 +3,6
3 3 // This software may be used and distributed according to the terms of the
4 4 // GNU General Public License version 2 or any later version.
5 5
6 use clap::*;
7 6 use hg::revlog::node::*;
8 7 use hg::revlog::nodemap::*;
9 8 use hg::revlog::*;
@@ -13,7 +12,6 use std::fs::File;
13 12 use std::io;
14 13 use std::io::Write;
15 14 use std::path::{Path, PathBuf};
16 use std::str::FromStr;
17 15 use std::time::Instant;
18 16
19 17 mod index;
@@ -42,7 +40,7 fn create(index: &Index, path: &Path) ->
42 40 nm.insert(index, index.node(rev).unwrap(), rev).unwrap();
43 41 }
44 42 eprintln!("Nodemap constructed in RAM in {:?}", start.elapsed());
45 file.write(&nm.into_readonly_and_added_bytes().1)?;
43 file.write_all(&nm.into_readonly_and_added_bytes().1)?;
46 44 eprintln!("Nodemap written to disk");
47 45 Ok(())
48 46 }
@@ -57,12 +55,7 fn bench(index: &Index, nm: &NodeTree, q
57 55 let len = index.len() as u32;
58 56 let mut rng = rand::thread_rng();
59 57 let nodes: Vec<Node> = (0..queries)
60 .map(|_| {
61 index
62 .node((rng.gen::<u32>() % len) as Revision)
63 .unwrap()
64 .clone()
65 })
58 .map(|_| *index.node((rng.gen::<u32>() % len) as Revision).unwrap())
66 59 .collect();
67 60 if queries < 10 {
68 61 let nodes_hex: Vec<String> =
@@ -86,61 +79,66 fn bench(index: &Index, nm: &NodeTree, q
86 79 }
87 80
88 81 fn main() {
89 let matches = App::new("Nodemap pure Rust example")
90 .arg(
91 Arg::with_name("REPOSITORY")
92 .help("Path to the repository, always necessary for its index")
93 .required(true),
94 )
95 .arg(
96 Arg::with_name("NODEMAP_FILE")
97 .help("Path to the nodemap file, independent of REPOSITORY")
98 .required(true),
99 )
100 .subcommand(
101 SubCommand::with_name("create")
102 .about("Create NODEMAP_FILE by scanning repository index"),
103 )
104 .subcommand(
105 SubCommand::with_name("query")
106 .about("Query NODEMAP_FILE for PREFIX")
107 .arg(Arg::with_name("PREFIX").required(true)),
108 )
109 .subcommand(
110 SubCommand::with_name("bench")
111 .about(
112 "Perform #QUERIES random successful queries on NODEMAP_FILE")
113 .arg(Arg::with_name("QUERIES").required(true)),
114 )
115 .get_matches();
82 use clap::{Parser, Subcommand};
116 83
117 let repo = matches.value_of("REPOSITORY").unwrap();
118 let nm_path = matches.value_of("NODEMAP_FILE").unwrap();
119
120 let index = mmap_index(&Path::new(repo));
84 #[derive(Parser)]
85 #[command()]
86 /// Nodemap pure Rust example
87 struct App {
88 // Path to the repository, always necessary for its index
89 #[arg(short, long)]
90 repository: PathBuf,
91 // Path to the nodemap file, independent of REPOSITORY
92 #[arg(short, long)]
93 nodemap_file: PathBuf,
94 #[command(subcommand)]
95 command: Command,
96 }
121 97
122 if let Some(_) = matches.subcommand_matches("create") {
123 println!("Creating nodemap file {} for repository {}", nm_path, repo);
124 create(&index, &Path::new(nm_path)).unwrap();
125 return;
98 #[derive(Subcommand)]
99 enum Command {
100 /// Create `NODEMAP_FILE` by scanning repository index
101 Create,
102 /// Query `NODEMAP_FILE` for `prefix`
103 Query { prefix: String },
104 /// Perform #`QUERIES` random successful queries on `NODEMAP_FILE`
105 Bench { queries: usize },
126 106 }
127 107
128 let nm = mmap_nodemap(&Path::new(nm_path));
129 if let Some(matches) = matches.subcommand_matches("query") {
130 let prefix = matches.value_of("PREFIX").unwrap();
131 println!(
132 "Querying {} in nodemap file {} of repository {}",
133 prefix, nm_path, repo
134 );
135 query(&index, &nm, prefix);
136 }
137 if let Some(matches) = matches.subcommand_matches("bench") {
138 let queries =
139 usize::from_str(matches.value_of("QUERIES").unwrap()).unwrap();
140 println!(
141 "Doing {} random queries in nodemap file {} of repository {}",
142 queries, nm_path, repo
143 );
144 bench(&index, &nm, queries);
108 let app = App::parse();
109
110 let repo = &app.repository;
111 let nm_path = &app.nodemap_file;
112
113 let index = mmap_index(repo);
114 let nm = mmap_nodemap(nm_path);
115
116 match &app.command {
117 Command::Create => {
118 println!(
119 "Creating nodemap file {} for repository {}",
120 nm_path.display(),
121 repo.display()
122 );
123 create(&index, Path::new(nm_path)).unwrap();
124 }
125 Command::Bench { queries } => {
126 println!(
127 "Doing {} random queries in nodemap file {} of repository {}",
128 queries,
129 nm_path.display(),
130 repo.display()
131 );
132 bench(&index, &nm, *queries);
133 }
134 Command::Query { prefix } => {
135 println!(
136 "Querying {} in nodemap file {} of repository {}",
137 prefix,
138 nm_path.display(),
139 repo.display()
140 );
141 query(&index, &nm, prefix);
142 }
145 143 }
146 144 }
@@ -175,7 +175,7 impl<G: Graph> MissingAncestors<G> {
175 175 ///
176 176 /// This is useful in unit tests, but also setdiscovery.py does
177 177 /// read the bases attribute of a ancestor.missingancestors instance.
178 pub fn get_bases<'a>(&'a self) -> &'a HashSet<Revision> {
178 pub fn get_bases(&self) -> &HashSet<Revision> {
179 179 &self.bases
180 180 }
181 181
@@ -288,7 +288,7 impl<G: Graph> MissingAncestors<G> {
288 288 .collect();
289 289 let revs_visit = &mut revs;
290 290 let mut both_visit: HashSet<Revision> =
291 revs_visit.intersection(&bases_visit).cloned().collect();
291 revs_visit.intersection(bases_visit).cloned().collect();
292 292 if revs_visit.is_empty() {
293 293 return Ok(Vec::new());
294 294 }
@@ -357,7 +357,6 mod tests {
357 357
358 358 use super::*;
359 359 use crate::testing::{SampleGraph, VecGraph};
360 use std::iter::FromIterator;
361 360
362 361 fn list_ancestors<G: Graph>(
363 362 graph: G,
@@ -504,18 +503,18 mod tests {
504 503 MissingAncestors::new(SampleGraph, [5, 3, 1, 3].iter().cloned());
505 504 let mut as_vec: Vec<Revision> =
506 505 missing_ancestors.get_bases().iter().cloned().collect();
507 as_vec.sort();
506 as_vec.sort_unstable();
508 507 assert_eq!(as_vec, [1, 3, 5]);
509 508 assert_eq!(missing_ancestors.max_base, 5);
510 509
511 510 missing_ancestors.add_bases([3, 7, 8].iter().cloned());
512 511 as_vec = missing_ancestors.get_bases().iter().cloned().collect();
513 as_vec.sort();
512 as_vec.sort_unstable();
514 513 assert_eq!(as_vec, [1, 3, 5, 7, 8]);
515 514 assert_eq!(missing_ancestors.max_base, 8);
516 515
517 516 as_vec = missing_ancestors.bases_heads()?.iter().cloned().collect();
518 as_vec.sort();
517 as_vec.sort_unstable();
519 518 assert_eq!(as_vec, [3, 5, 7, 8]);
520 519 Ok(())
521 520 }
@@ -532,7 +531,7 mod tests {
532 531 .remove_ancestors_from(&mut revset)
533 532 .unwrap();
534 533 let mut as_vec: Vec<Revision> = revset.into_iter().collect();
535 as_vec.sort();
534 as_vec.sort_unstable();
536 535 assert_eq!(as_vec.as_slice(), expected);
537 536 }
538 537
@@ -573,6 +572,7 mod tests {
573 572 /// the one in test-ancestor.py. An early version of Rust MissingAncestors
574 573 /// failed this, yet none of the integration tests of the whole suite
575 574 /// catched it.
575 #[allow(clippy::unnecessary_cast)]
576 576 #[test]
577 577 fn test_remove_ancestors_from_case1() {
578 578 let graph: VecGraph = vec![
This diff has been collapsed as it changes many lines, (653 lines changed) Show them Hide them
@@ -1,654 +1,1
1 // config.rs
2 //
3 // Copyright 2020
4 // Valentin Gatien-Baron,
5 // Raphaël Gomès <rgomes@octobus.net>
6 //
7 // This software may be used and distributed according to the terms of the
8 // GNU General Public License version 2 or any later version.
9 1
10 use super::layer;
11 use super::values;
12 use crate::config::layer::{
13 ConfigError, ConfigLayer, ConfigOrigin, ConfigValue,
14 };
15 use crate::config::plain_info::PlainInfo;
16 use crate::utils::files::get_bytes_from_os_str;
17 use format_bytes::{write_bytes, DisplayBytes};
18 use std::collections::HashSet;
19 use std::env;
20 use std::fmt;
21 use std::path::{Path, PathBuf};
22 use std::str;
23
24 use crate::errors::{HgResultExt, IoResultExt};
25
26 /// Holds the config values for the current repository
27 /// TODO update this docstring once we support more sources
28 #[derive(Clone)]
29 pub struct Config {
30 layers: Vec<layer::ConfigLayer>,
31 plain: PlainInfo,
32 }
33
34 impl DisplayBytes for Config {
35 fn display_bytes(
36 &self,
37 out: &mut dyn std::io::Write,
38 ) -> std::io::Result<()> {
39 for (index, layer) in self.layers.iter().rev().enumerate() {
40 write_bytes!(
41 out,
42 b"==== Layer {} (trusted: {}) ====\n{}",
43 index,
44 if layer.trusted {
45 &b"yes"[..]
46 } else {
47 &b"no"[..]
48 },
49 layer
50 )?;
51 }
52 Ok(())
53 }
54 }
55
56 pub enum ConfigSource {
57 /// Absolute path to a config file
58 AbsPath(PathBuf),
59 /// Already parsed (from the CLI, env, Python resources, etc.)
60 Parsed(layer::ConfigLayer),
61 }
62
63 #[derive(Debug)]
64 pub struct ConfigValueParseError {
65 pub origin: ConfigOrigin,
66 pub line: Option<usize>,
67 pub section: Vec<u8>,
68 pub item: Vec<u8>,
69 pub value: Vec<u8>,
70 pub expected_type: &'static str,
71 }
72
73 impl fmt::Display for ConfigValueParseError {
74 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
75 // TODO: add origin and line number information, here and in
76 // corresponding python code
77 write!(
78 f,
79 "config error: {}.{} is not a {} ('{}')",
80 String::from_utf8_lossy(&self.section),
81 String::from_utf8_lossy(&self.item),
82 self.expected_type,
83 String::from_utf8_lossy(&self.value)
84 )
85 }
86 }
87
88 /// Returns true if the config item is disabled by PLAIN or PLAINEXCEPT
89 fn should_ignore(plain: &PlainInfo, section: &[u8], item: &[u8]) -> bool {
90 // duplication with [_applyconfig] in [ui.py],
91 if !plain.is_plain() {
92 return false;
93 }
94 if section == b"alias" {
95 return plain.plainalias();
96 }
97 if section == b"revsetalias" {
98 return plain.plainrevsetalias();
99 }
100 if section == b"templatealias" {
101 return plain.plaintemplatealias();
102 }
103 if section == b"ui" {
104 let to_delete: &[&[u8]] = &[
105 b"debug",
106 b"fallbackencoding",
107 b"quiet",
108 b"slash",
109 b"logtemplate",
110 b"message-output",
111 b"statuscopies",
112 b"style",
113 b"traceback",
114 b"verbose",
115 ];
116 return to_delete.contains(&item);
117 }
118 let sections_to_delete: &[&[u8]] =
119 &[b"defaults", b"commands", b"command-templates"];
120 return sections_to_delete.contains(&section);
121 }
122
123 impl Config {
124 /// The configuration to use when printing configuration-loading errors
125 pub fn empty() -> Self {
126 Self {
127 layers: Vec::new(),
128 plain: PlainInfo::empty(),
129 }
130 }
131
132 /// Load system and user configuration from various files.
133 ///
134 /// This is also affected by some environment variables.
135 pub fn load_non_repo() -> Result<Self, ConfigError> {
136 let mut config = Self::empty();
137 let opt_rc_path = env::var_os("HGRCPATH");
138 // HGRCPATH replaces system config
139 if opt_rc_path.is_none() {
140 config.add_system_config()?
141 }
142
143 config.add_for_environment_variable("EDITOR", b"ui", b"editor");
144 config.add_for_environment_variable("VISUAL", b"ui", b"editor");
145 config.add_for_environment_variable("PAGER", b"pager", b"pager");
146
147 // These are set by `run-tests.py --rhg` to enable fallback for the
148 // entire test suite. Alternatives would be setting configuration
149 // through `$HGRCPATH` but some tests override that, or changing the
150 // `hg` shell alias to include `--config` but that disrupts tests that
151 // print command lines and check expected output.
152 config.add_for_environment_variable(
153 "RHG_ON_UNSUPPORTED",
154 b"rhg",
155 b"on-unsupported",
156 );
157 config.add_for_environment_variable(
158 "RHG_FALLBACK_EXECUTABLE",
159 b"rhg",
160 b"fallback-executable",
161 );
162
163 // HGRCPATH replaces user config
164 if opt_rc_path.is_none() {
165 config.add_user_config()?
166 }
167 if let Some(rc_path) = &opt_rc_path {
168 for path in env::split_paths(rc_path) {
169 if !path.as_os_str().is_empty() {
170 if path.is_dir() {
171 config.add_trusted_dir(&path)?
172 } else {
173 config.add_trusted_file(&path)?
174 }
175 }
176 }
177 }
178 Ok(config)
179 }
180
181 pub fn load_cli_args(
182 &mut self,
183 cli_config_args: impl IntoIterator<Item = impl AsRef<[u8]>>,
184 color_arg: Option<Vec<u8>>,
185 ) -> Result<(), ConfigError> {
186 if let Some(layer) = ConfigLayer::parse_cli_args(cli_config_args)? {
187 self.layers.push(layer)
188 }
189 if let Some(arg) = color_arg {
190 let mut layer = ConfigLayer::new(ConfigOrigin::CommandLineColor);
191 layer.add(b"ui"[..].into(), b"color"[..].into(), arg, None);
192 self.layers.push(layer)
193 }
194 Ok(())
195 }
196
197 fn add_trusted_dir(&mut self, path: &Path) -> Result<(), ConfigError> {
198 if let Some(entries) = std::fs::read_dir(path)
199 .when_reading_file(path)
200 .io_not_found_as_none()?
201 {
202 let mut file_paths = entries
203 .map(|result| {
204 result.when_reading_file(path).map(|entry| entry.path())
205 })
206 .collect::<Result<Vec<_>, _>>()?;
207 file_paths.sort();
208 for file_path in &file_paths {
209 if file_path.extension() == Some(std::ffi::OsStr::new("rc")) {
210 self.add_trusted_file(&file_path)?
211 }
212 }
213 }
214 Ok(())
215 }
216
217 fn add_trusted_file(&mut self, path: &Path) -> Result<(), ConfigError> {
218 if let Some(data) = std::fs::read(path)
219 .when_reading_file(path)
220 .io_not_found_as_none()?
221 {
222 self.layers.extend(ConfigLayer::parse(path, &data)?)
223 }
224 Ok(())
225 }
226
227 fn add_for_environment_variable(
228 &mut self,
229 var: &str,
230 section: &[u8],
231 key: &[u8],
232 ) {
233 if let Some(value) = env::var_os(var) {
234 let origin = layer::ConfigOrigin::Environment(var.into());
235 let mut layer = ConfigLayer::new(origin);
236 layer.add(
237 section.to_owned(),
238 key.to_owned(),
239 get_bytes_from_os_str(value),
240 None,
241 );
242 self.layers.push(layer)
243 }
244 }
245
246 #[cfg(unix)] // TODO: other platforms
247 fn add_system_config(&mut self) -> Result<(), ConfigError> {
248 let mut add_for_prefix = |prefix: &Path| -> Result<(), ConfigError> {
249 let etc = prefix.join("etc").join("mercurial");
250 self.add_trusted_file(&etc.join("hgrc"))?;
251 self.add_trusted_dir(&etc.join("hgrc.d"))
252 };
253 let root = Path::new("/");
254 // TODO: use `std::env::args_os().next().unwrap()` a.k.a. argv[0]
255 // instead? TODO: can this be a relative path?
256 let hg = crate::utils::current_exe()?;
257 // TODO: this order (per-installation then per-system) matches
258 // `systemrcpath()` in `mercurial/scmposix.py`, but
259 // `mercurial/helptext/config.txt` suggests it should be reversed
260 if let Some(installation_prefix) = hg.parent().and_then(Path::parent) {
261 if installation_prefix != root {
262 add_for_prefix(&installation_prefix)?
263 }
264 }
265 add_for_prefix(root)?;
266 Ok(())
267 }
268
269 #[cfg(unix)] // TODO: other plateforms
270 fn add_user_config(&mut self) -> Result<(), ConfigError> {
271 let opt_home = home::home_dir();
272 if let Some(home) = &opt_home {
273 self.add_trusted_file(&home.join(".hgrc"))?
274 }
275 let darwin = cfg!(any(target_os = "macos", target_os = "ios"));
276 if !darwin {
277 if let Some(config_home) = env::var_os("XDG_CONFIG_HOME")
278 .map(PathBuf::from)
279 .or_else(|| opt_home.map(|home| home.join(".config")))
280 {
281 self.add_trusted_file(&config_home.join("hg").join("hgrc"))?
282 }
283 }
284 Ok(())
285 }
286
287 /// Loads in order, which means that the precedence is the same
288 /// as the order of `sources`.
289 pub fn load_from_explicit_sources(
290 sources: Vec<ConfigSource>,
291 ) -> Result<Self, ConfigError> {
292 let mut layers = vec![];
293
294 for source in sources.into_iter() {
295 match source {
296 ConfigSource::Parsed(c) => layers.push(c),
297 ConfigSource::AbsPath(c) => {
298 // TODO check if it should be trusted
299 // mercurial/ui.py:427
300 let data = match std::fs::read(&c) {
301 Err(_) => continue, // same as the python code
302 Ok(data) => data,
303 };
304 layers.extend(ConfigLayer::parse(&c, &data)?)
305 }
306 }
307 }
308
309 Ok(Config {
310 layers,
311 plain: PlainInfo::empty(),
312 })
313 }
314
315 /// Loads the per-repository config into a new `Config` which is combined
316 /// with `self`.
317 pub(crate) fn combine_with_repo(
318 &self,
319 repo_config_files: &[PathBuf],
320 ) -> Result<Self, ConfigError> {
321 let (cli_layers, other_layers) = self
322 .layers
323 .iter()
324 .cloned()
325 .partition(ConfigLayer::is_from_command_line);
326
327 let mut repo_config = Self {
328 layers: other_layers,
329 plain: PlainInfo::empty(),
330 };
331 for path in repo_config_files {
332 // TODO: check if this file should be trusted:
333 // `mercurial/ui.py:427`
334 repo_config.add_trusted_file(path)?;
335 }
336 repo_config.layers.extend(cli_layers);
337 Ok(repo_config)
338 }
339
340 pub fn apply_plain(&mut self, plain: PlainInfo) {
341 self.plain = plain;
342 }
343
344 fn get_parse<'config, T: 'config>(
345 &'config self,
346 section: &[u8],
347 item: &[u8],
348 expected_type: &'static str,
349 parse: impl Fn(&'config [u8]) -> Option<T>,
350 ) -> Result<Option<T>, ConfigValueParseError> {
351 match self.get_inner(&section, &item) {
352 Some((layer, v)) => match parse(&v.bytes) {
353 Some(b) => Ok(Some(b)),
354 None => Err(ConfigValueParseError {
355 origin: layer.origin.to_owned(),
356 line: v.line,
357 value: v.bytes.to_owned(),
358 section: section.to_owned(),
359 item: item.to_owned(),
360 expected_type,
361 }),
362 },
363 None => Ok(None),
364 }
365 }
366
367 /// Returns an `Err` if the first value found is not a valid UTF-8 string.
368 /// Otherwise, returns an `Ok(value)` if found, or `None`.
369 pub fn get_str(
370 &self,
371 section: &[u8],
372 item: &[u8],
373 ) -> Result<Option<&str>, ConfigValueParseError> {
374 self.get_parse(section, item, "ASCII or UTF-8 string", |value| {
375 str::from_utf8(value).ok()
376 })
377 }
378
379 /// Returns an `Err` if the first value found is not a valid unsigned
380 /// integer. Otherwise, returns an `Ok(value)` if found, or `None`.
381 pub fn get_u32(
382 &self,
383 section: &[u8],
384 item: &[u8],
385 ) -> Result<Option<u32>, ConfigValueParseError> {
386 self.get_parse(section, item, "valid integer", |value| {
387 str::from_utf8(value).ok()?.parse().ok()
388 })
389 }
390
391 /// Returns an `Err` if the first value found is not a valid file size
392 /// value such as `30` (default unit is bytes), `7 MB`, or `42.5 kb`.
393 /// Otherwise, returns an `Ok(value_in_bytes)` if found, or `None`.
394 pub fn get_byte_size(
395 &self,
396 section: &[u8],
397 item: &[u8],
398 ) -> Result<Option<u64>, ConfigValueParseError> {
399 self.get_parse(section, item, "byte quantity", values::parse_byte_size)
400 }
401
402 /// Returns an `Err` if the first value found is not a valid boolean.
403 /// Otherwise, returns an `Ok(option)`, where `option` is the boolean if
404 /// found, or `None`.
405 pub fn get_option(
406 &self,
407 section: &[u8],
408 item: &[u8],
409 ) -> Result<Option<bool>, ConfigValueParseError> {
410 self.get_parse(section, item, "boolean", values::parse_bool)
411 }
412
413 /// Returns the corresponding boolean in the config. Returns `Ok(false)`
414 /// if the value is not found, an `Err` if it's not a valid boolean.
415 pub fn get_bool(
416 &self,
417 section: &[u8],
418 item: &[u8],
419 ) -> Result<bool, ConfigValueParseError> {
420 Ok(self.get_option(section, item)?.unwrap_or(false))
421 }
422
423 /// Returns `true` if the extension is enabled, `false` otherwise
424 pub fn is_extension_enabled(&self, extension: &[u8]) -> bool {
425 let value = self.get(b"extensions", extension);
426 match value {
427 Some(c) => !c.starts_with(b"!"),
428 None => false,
429 }
430 }
431
432 /// If there is an `item` value in `section`, parse and return a list of
433 /// byte strings.
434 pub fn get_list(
435 &self,
436 section: &[u8],
437 item: &[u8],
438 ) -> Option<Vec<Vec<u8>>> {
439 self.get(section, item).map(values::parse_list)
440 }
441
442 /// Returns the raw value bytes of the first one found, or `None`.
443 pub fn get(&self, section: &[u8], item: &[u8]) -> Option<&[u8]> {
444 self.get_inner(section, item)
445 .map(|(_, value)| value.bytes.as_ref())
446 }
447
448 /// Returns the raw value bytes of the first one found, or `None`.
449 pub fn get_with_origin(
450 &self,
451 section: &[u8],
452 item: &[u8],
453 ) -> Option<(&[u8], &ConfigOrigin)> {
454 self.get_inner(section, item)
455 .map(|(layer, value)| (value.bytes.as_ref(), &layer.origin))
456 }
457
458 /// Returns the layer and the value of the first one found, or `None`.
459 fn get_inner(
460 &self,
461 section: &[u8],
462 item: &[u8],
463 ) -> Option<(&ConfigLayer, &ConfigValue)> {
464 // Filter out the config items that are hidden by [PLAIN].
465 // This differs from python hg where we delete them from the config.
466 let should_ignore = should_ignore(&self.plain, &section, &item);
467 for layer in self.layers.iter().rev() {
468 if !layer.trusted {
469 continue;
470 }
471 //The [PLAIN] config should not affect the defaults.
472 //
473 // However, PLAIN should also affect the "tweaked" defaults (unless
474 // "tweakdefault" is part of "HGPLAINEXCEPT").
475 //
476 // In practice the tweak-default layer is only added when it is
477 // relevant, so we can safely always take it into
478 // account here.
479 if should_ignore && !(layer.origin == ConfigOrigin::Tweakdefaults)
480 {
481 continue;
482 }
483 if let Some(v) = layer.get(&section, &item) {
484 return Some((&layer, v));
485 }
486 }
487 None
488 }
489
490 /// Return all keys defined for the given section
491 pub fn get_section_keys(&self, section: &[u8]) -> HashSet<&[u8]> {
492 self.layers
493 .iter()
494 .flat_map(|layer| layer.iter_keys(section))
495 .collect()
496 }
497
498 /// Returns whether any key is defined in the given section
499 pub fn has_non_empty_section(&self, section: &[u8]) -> bool {
500 self.layers
501 .iter()
502 .any(|layer| layer.has_non_empty_section(section))
503 }
504
505 /// Yields (key, value) pairs for everything in the given section
506 pub fn iter_section<'a>(
507 &'a self,
508 section: &'a [u8],
509 ) -> impl Iterator<Item = (&[u8], &[u8])> + 'a {
510 // TODO: Use `Iterator`’s `.peekable()` when its `peek_mut` is
511 // available:
512 // https://doc.rust-lang.org/nightly/std/iter/struct.Peekable.html#method.peek_mut
513 struct Peekable<I: Iterator> {
514 iter: I,
515 /// Remember a peeked value, even if it was None.
516 peeked: Option<Option<I::Item>>,
517 }
518
519 impl<I: Iterator> Peekable<I> {
520 fn new(iter: I) -> Self {
521 Self { iter, peeked: None }
522 }
523
524 fn next(&mut self) {
525 self.peeked = None
526 }
527
528 fn peek_mut(&mut self) -> Option<&mut I::Item> {
529 let iter = &mut self.iter;
530 self.peeked.get_or_insert_with(|| iter.next()).as_mut()
531 }
532 }
533
534 // Deduplicate keys redefined in multiple layers
535 let mut keys_already_seen = HashSet::new();
536 let mut key_is_new =
537 move |&(key, _value): &(&'a [u8], &'a [u8])| -> bool {
538 keys_already_seen.insert(key)
539 };
540 // This is similar to `flat_map` + `filter_map`, except with a single
541 // closure that owns `key_is_new` (and therefore the
542 // `keys_already_seen` set):
543 let mut layer_iters = Peekable::new(
544 self.layers
545 .iter()
546 .rev()
547 .map(move |layer| layer.iter_section(section)),
548 );
549 std::iter::from_fn(move || loop {
550 if let Some(pair) = layer_iters.peek_mut()?.find(&mut key_is_new) {
551 return Some(pair);
552 } else {
553 layer_iters.next();
554 }
555 })
556 }
557
558 /// Get raw values bytes from all layers (even untrusted ones) in order
559 /// of precedence.
560 #[cfg(test)]
561 fn get_all(&self, section: &[u8], item: &[u8]) -> Vec<&[u8]> {
562 let mut res = vec![];
563 for layer in self.layers.iter().rev() {
564 if let Some(v) = layer.get(&section, &item) {
565 res.push(v.bytes.as_ref());
566 }
567 }
568 res
569 }
570
571 // a config layer that's introduced by ui.tweakdefaults
572 fn tweakdefaults_layer() -> ConfigLayer {
573 let mut layer = ConfigLayer::new(ConfigOrigin::Tweakdefaults);
574
575 let mut add = |section: &[u8], item: &[u8], value: &[u8]| {
576 layer.add(
577 section[..].into(),
578 item[..].into(),
579 value[..].into(),
580 None,
581 );
582 };
583 // duplication of [tweakrc] from [ui.py]
584 add(b"ui", b"rollback", b"False");
585 add(b"ui", b"statuscopies", b"yes");
586 add(b"ui", b"interface", b"curses");
587 add(b"ui", b"relative-paths", b"yes");
588 add(b"commands", b"grep.all-files", b"True");
589 add(b"commands", b"update.check", b"noconflict");
590 add(b"commands", b"status.verbose", b"True");
591 add(b"commands", b"resolve.explicit-re-merge", b"True");
592 add(b"git", b"git", b"1");
593 add(b"git", b"showfunc", b"1");
594 add(b"git", b"word-diff", b"1");
595 return layer;
596 }
597
598 // introduce the tweaked defaults as implied by ui.tweakdefaults
599 pub fn tweakdefaults<'a>(&mut self) -> () {
600 self.layers.insert(0, Config::tweakdefaults_layer());
601 }
602 }
603
604 #[cfg(test)]
605 mod tests {
606 use super::*;
607 use pretty_assertions::assert_eq;
608 use std::fs::File;
609 use std::io::Write;
610
611 #[test]
612 fn test_include_layer_ordering() {
613 let tmpdir = tempfile::tempdir().unwrap();
614 let tmpdir_path = tmpdir.path();
615 let mut included_file =
616 File::create(&tmpdir_path.join("included.rc")).unwrap();
617
618 included_file.write_all(b"[section]\nitem=value1").unwrap();
619 let base_config_path = tmpdir_path.join("base.rc");
620 let mut config_file = File::create(&base_config_path).unwrap();
621 let data =
622 b"[section]\nitem=value0\n%include included.rc\nitem=value2\n\
623 [section2]\ncount = 4\nsize = 1.5 KB\nnot-count = 1.5\nnot-size = 1 ub";
624 config_file.write_all(data).unwrap();
625
626 let sources = vec![ConfigSource::AbsPath(base_config_path)];
627 let config = Config::load_from_explicit_sources(sources)
628 .expect("expected valid config");
629
630 let (_, value) = config.get_inner(b"section", b"item").unwrap();
631 assert_eq!(
632 value,
633 &ConfigValue {
634 bytes: b"value2".to_vec(),
635 line: Some(4)
636 }
637 );
638
639 let value = config.get(b"section", b"item").unwrap();
640 assert_eq!(value, b"value2",);
641 assert_eq!(
642 config.get_all(b"section", b"item"),
643 [b"value2", b"value1", b"value0"]
644 );
645
646 assert_eq!(config.get_u32(b"section2", b"count").unwrap(), Some(4));
647 assert_eq!(
648 config.get_byte_size(b"section2", b"size").unwrap(),
649 Some(1024 + 512)
650 );
651 assert!(config.get_u32(b"section2", b"not-count").is_err());
652 assert!(config.get_byte_size(b"section2", b"not-size").is_err());
653 }
654 }
@@ -94,11 +94,7 impl ConfigLayer {
94 94
95 95 /// Returns whether this layer comes from `--config` CLI arguments
96 96 pub(crate) fn is_from_command_line(&self) -> bool {
97 if let ConfigOrigin::CommandLine = self.origin {
98 true
99 } else {
100 false
101 }
97 matches!(self.origin, ConfigOrigin::CommandLine)
102 98 }
103 99
104 100 /// Add an entry to the config, overwriting the old one if already present.
@@ -111,13 +107,13 impl ConfigLayer {
111 107 ) {
112 108 self.sections
113 109 .entry(section)
114 .or_insert_with(|| HashMap::new())
110 .or_insert_with(HashMap::new)
115 111 .insert(item, ConfigValue { bytes: value, line });
116 112 }
117 113
118 114 /// Returns the config value in `<section>.<item>` if it exists
119 115 pub fn get(&self, section: &[u8], item: &[u8]) -> Option<&ConfigValue> {
120 Some(self.sections.get(section)?.get(item)?)
116 self.sections.get(section)?.get(item)
121 117 }
122 118
123 119 /// Returns the keys defined in the given section
@@ -171,7 +167,7 impl ConfigLayer {
171 167
172 168 while let Some((index, bytes)) = lines_iter.next() {
173 169 let line = Some(index + 1);
174 if let Some(m) = INCLUDE_RE.captures(&bytes) {
170 if let Some(m) = INCLUDE_RE.captures(bytes) {
175 171 let filename_bytes = &m[1];
176 172 let filename_bytes = crate::utils::expand_vars(filename_bytes);
177 173 // `Path::parent` only fails for the root directory,
@@ -205,18 +201,18 impl ConfigLayer {
205 201 }
206 202 }
207 203 }
208 } else if let Some(_) = EMPTY_RE.captures(&bytes) {
209 } else if let Some(m) = SECTION_RE.captures(&bytes) {
204 } else if EMPTY_RE.captures(bytes).is_some() {
205 } else if let Some(m) = SECTION_RE.captures(bytes) {
210 206 section = m[1].to_vec();
211 } else if let Some(m) = ITEM_RE.captures(&bytes) {
207 } else if let Some(m) = ITEM_RE.captures(bytes) {
212 208 let item = m[1].to_vec();
213 209 let mut value = m[2].to_vec();
214 210 loop {
215 211 match lines_iter.peek() {
216 212 None => break,
217 213 Some((_, v)) => {
218 if let Some(_) = COMMENT_RE.captures(&v) {
219 } else if let Some(_) = CONT_RE.captures(&v) {
214 if COMMENT_RE.captures(v).is_some() {
215 } else if CONT_RE.captures(v).is_some() {
220 216 value.extend(b"\n");
221 217 value.extend(&m[1]);
222 218 } else {
@@ -227,7 +223,7 impl ConfigLayer {
227 223 lines_iter.next();
228 224 }
229 225 current_layer.add(section.clone(), item, value, line);
230 } else if let Some(m) = UNSET_RE.captures(&bytes) {
226 } else if let Some(m) = UNSET_RE.captures(bytes) {
231 227 if let Some(map) = current_layer.sections.get_mut(&section) {
232 228 map.remove(&m[1]);
233 229 }
@@ -261,7 +257,7 impl DisplayBytes for ConfigLayer {
261 257 sections.sort_by(|e0, e1| e0.0.cmp(e1.0));
262 258
263 259 for (section, items) in sections.into_iter() {
264 let mut items: Vec<_> = items.into_iter().collect();
260 let mut items: Vec<_> = items.iter().collect();
265 261 items.sort_by(|e0, e1| e0.0.cmp(e1.0));
266 262
267 263 for (item, config_entry) in items {
This diff has been collapsed as it changes many lines, (619 lines changed) Show them Hide them
@@ -9,10 +9,625
9 9
10 10 //! Mercurial config parsing and interfaces.
11 11
12 mod config;
13 12 mod layer;
14 13 mod plain_info;
15 14 mod values;
16 pub use config::{Config, ConfigSource, ConfigValueParseError};
17 15 pub use layer::{ConfigError, ConfigOrigin, ConfigParseError};
18 16 pub use plain_info::PlainInfo;
17
18 use self::layer::ConfigLayer;
19 use self::layer::ConfigValue;
20 use crate::errors::{HgResultExt, IoResultExt};
21 use crate::utils::files::get_bytes_from_os_str;
22 use format_bytes::{write_bytes, DisplayBytes};
23 use std::collections::HashSet;
24 use std::env;
25 use std::fmt;
26 use std::path::{Path, PathBuf};
27 use std::str;
28
29 /// Holds the config values for the current repository
30 /// TODO update this docstring once we support more sources
31 #[derive(Clone)]
32 pub struct Config {
33 layers: Vec<layer::ConfigLayer>,
34 plain: PlainInfo,
35 }
36
37 impl DisplayBytes for Config {
38 fn display_bytes(
39 &self,
40 out: &mut dyn std::io::Write,
41 ) -> std::io::Result<()> {
42 for (index, layer) in self.layers.iter().rev().enumerate() {
43 write_bytes!(
44 out,
45 b"==== Layer {} (trusted: {}) ====\n{}",
46 index,
47 if layer.trusted {
48 &b"yes"[..]
49 } else {
50 &b"no"[..]
51 },
52 layer
53 )?;
54 }
55 Ok(())
56 }
57 }
58
59 pub enum ConfigSource {
60 /// Absolute path to a config file
61 AbsPath(PathBuf),
62 /// Already parsed (from the CLI, env, Python resources, etc.)
63 Parsed(layer::ConfigLayer),
64 }
65
66 #[derive(Debug)]
67 pub struct ConfigValueParseError {
68 pub origin: ConfigOrigin,
69 pub line: Option<usize>,
70 pub section: Vec<u8>,
71 pub item: Vec<u8>,
72 pub value: Vec<u8>,
73 pub expected_type: &'static str,
74 }
75
76 impl fmt::Display for ConfigValueParseError {
77 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
78 // TODO: add origin and line number information, here and in
79 // corresponding python code
80 write!(
81 f,
82 "config error: {}.{} is not a {} ('{}')",
83 String::from_utf8_lossy(&self.section),
84 String::from_utf8_lossy(&self.item),
85 self.expected_type,
86 String::from_utf8_lossy(&self.value)
87 )
88 }
89 }
90
91 /// Returns true if the config item is disabled by PLAIN or PLAINEXCEPT
92 fn should_ignore(plain: &PlainInfo, section: &[u8], item: &[u8]) -> bool {
93 // duplication with [_applyconfig] in [ui.py],
94 if !plain.is_plain() {
95 return false;
96 }
97 if section == b"alias" {
98 return plain.plainalias();
99 }
100 if section == b"revsetalias" {
101 return plain.plainrevsetalias();
102 }
103 if section == b"templatealias" {
104 return plain.plaintemplatealias();
105 }
106 if section == b"ui" {
107 let to_delete: &[&[u8]] = &[
108 b"debug",
109 b"fallbackencoding",
110 b"quiet",
111 b"slash",
112 b"logtemplate",
113 b"message-output",
114 b"statuscopies",
115 b"style",
116 b"traceback",
117 b"verbose",
118 ];
119 return to_delete.contains(&item);
120 }
121 let sections_to_delete: &[&[u8]] =
122 &[b"defaults", b"commands", b"command-templates"];
123 sections_to_delete.contains(&section)
124 }
125
126 impl Config {
127 /// The configuration to use when printing configuration-loading errors
128 pub fn empty() -> Self {
129 Self {
130 layers: Vec::new(),
131 plain: PlainInfo::empty(),
132 }
133 }
134
135 /// Load system and user configuration from various files.
136 ///
137 /// This is also affected by some environment variables.
138 pub fn load_non_repo() -> Result<Self, ConfigError> {
139 let mut config = Self::empty();
140 let opt_rc_path = env::var_os("HGRCPATH");
141 // HGRCPATH replaces system config
142 if opt_rc_path.is_none() {
143 config.add_system_config()?
144 }
145
146 config.add_for_environment_variable("EDITOR", b"ui", b"editor");
147 config.add_for_environment_variable("VISUAL", b"ui", b"editor");
148 config.add_for_environment_variable("PAGER", b"pager", b"pager");
149
150 // These are set by `run-tests.py --rhg` to enable fallback for the
151 // entire test suite. Alternatives would be setting configuration
152 // through `$HGRCPATH` but some tests override that, or changing the
153 // `hg` shell alias to include `--config` but that disrupts tests that
154 // print command lines and check expected output.
155 config.add_for_environment_variable(
156 "RHG_ON_UNSUPPORTED",
157 b"rhg",
158 b"on-unsupported",
159 );
160 config.add_for_environment_variable(
161 "RHG_FALLBACK_EXECUTABLE",
162 b"rhg",
163 b"fallback-executable",
164 );
165
166 // HGRCPATH replaces user config
167 if opt_rc_path.is_none() {
168 config.add_user_config()?
169 }
170 if let Some(rc_path) = &opt_rc_path {
171 for path in env::split_paths(rc_path) {
172 if !path.as_os_str().is_empty() {
173 if path.is_dir() {
174 config.add_trusted_dir(&path)?
175 } else {
176 config.add_trusted_file(&path)?
177 }
178 }
179 }
180 }
181 Ok(config)
182 }
183
184 pub fn load_cli_args(
185 &mut self,
186 cli_config_args: impl IntoIterator<Item = impl AsRef<[u8]>>,
187 color_arg: Option<Vec<u8>>,
188 ) -> Result<(), ConfigError> {
189 if let Some(layer) = ConfigLayer::parse_cli_args(cli_config_args)? {
190 self.layers.push(layer)
191 }
192 if let Some(arg) = color_arg {
193 let mut layer = ConfigLayer::new(ConfigOrigin::CommandLineColor);
194 layer.add(b"ui"[..].into(), b"color"[..].into(), arg, None);
195 self.layers.push(layer)
196 }
197 Ok(())
198 }
199
200 fn add_trusted_dir(&mut self, path: &Path) -> Result<(), ConfigError> {
201 if let Some(entries) = std::fs::read_dir(path)
202 .when_reading_file(path)
203 .io_not_found_as_none()?
204 {
205 let mut file_paths = entries
206 .map(|result| {
207 result.when_reading_file(path).map(|entry| entry.path())
208 })
209 .collect::<Result<Vec<_>, _>>()?;
210 file_paths.sort();
211 for file_path in &file_paths {
212 if file_path.extension() == Some(std::ffi::OsStr::new("rc")) {
213 self.add_trusted_file(file_path)?
214 }
215 }
216 }
217 Ok(())
218 }
219
220 fn add_trusted_file(&mut self, path: &Path) -> Result<(), ConfigError> {
221 if let Some(data) = std::fs::read(path)
222 .when_reading_file(path)
223 .io_not_found_as_none()?
224 {
225 self.layers.extend(ConfigLayer::parse(path, &data)?)
226 }
227 Ok(())
228 }
229
230 fn add_for_environment_variable(
231 &mut self,
232 var: &str,
233 section: &[u8],
234 key: &[u8],
235 ) {
236 if let Some(value) = env::var_os(var) {
237 let origin = layer::ConfigOrigin::Environment(var.into());
238 let mut layer = ConfigLayer::new(origin);
239 layer.add(
240 section.to_owned(),
241 key.to_owned(),
242 get_bytes_from_os_str(value),
243 None,
244 );
245 self.layers.push(layer)
246 }
247 }
248
249 #[cfg(unix)] // TODO: other platforms
250 fn add_system_config(&mut self) -> Result<(), ConfigError> {
251 let mut add_for_prefix = |prefix: &Path| -> Result<(), ConfigError> {
252 let etc = prefix.join("etc").join("mercurial");
253 self.add_trusted_file(&etc.join("hgrc"))?;
254 self.add_trusted_dir(&etc.join("hgrc.d"))
255 };
256 let root = Path::new("/");
257 // TODO: use `std::env::args_os().next().unwrap()` a.k.a. argv[0]
258 // instead? TODO: can this be a relative path?
259 let hg = crate::utils::current_exe()?;
260 // TODO: this order (per-installation then per-system) matches
261 // `systemrcpath()` in `mercurial/scmposix.py`, but
262 // `mercurial/helptext/config.txt` suggests it should be reversed
263 if let Some(installation_prefix) = hg.parent().and_then(Path::parent) {
264 if installation_prefix != root {
265 add_for_prefix(installation_prefix)?
266 }
267 }
268 add_for_prefix(root)?;
269 Ok(())
270 }
271
272 #[cfg(unix)] // TODO: other plateforms
273 fn add_user_config(&mut self) -> Result<(), ConfigError> {
274 let opt_home = home::home_dir();
275 if let Some(home) = &opt_home {
276 self.add_trusted_file(&home.join(".hgrc"))?
277 }
278 let darwin = cfg!(any(target_os = "macos", target_os = "ios"));
279 if !darwin {
280 if let Some(config_home) = env::var_os("XDG_CONFIG_HOME")
281 .map(PathBuf::from)
282 .or_else(|| opt_home.map(|home| home.join(".config")))
283 {
284 self.add_trusted_file(&config_home.join("hg").join("hgrc"))?
285 }
286 }
287 Ok(())
288 }
289
290 /// Loads in order, which means that the precedence is the same
291 /// as the order of `sources`.
292 pub fn load_from_explicit_sources(
293 sources: Vec<ConfigSource>,
294 ) -> Result<Self, ConfigError> {
295 let mut layers = vec![];
296
297 for source in sources.into_iter() {
298 match source {
299 ConfigSource::Parsed(c) => layers.push(c),
300 ConfigSource::AbsPath(c) => {
301 // TODO check if it should be trusted
302 // mercurial/ui.py:427
303 let data = match std::fs::read(&c) {
304 Err(_) => continue, // same as the python code
305 Ok(data) => data,
306 };
307 layers.extend(ConfigLayer::parse(&c, &data)?)
308 }
309 }
310 }
311
312 Ok(Config {
313 layers,
314 plain: PlainInfo::empty(),
315 })
316 }
317
318 /// Loads the per-repository config into a new `Config` which is combined
319 /// with `self`.
320 pub(crate) fn combine_with_repo(
321 &self,
322 repo_config_files: &[PathBuf],
323 ) -> Result<Self, ConfigError> {
324 let (cli_layers, other_layers) = self
325 .layers
326 .iter()
327 .cloned()
328 .partition(ConfigLayer::is_from_command_line);
329
330 let mut repo_config = Self {
331 layers: other_layers,
332 plain: PlainInfo::empty(),
333 };
334 for path in repo_config_files {
335 // TODO: check if this file should be trusted:
336 // `mercurial/ui.py:427`
337 repo_config.add_trusted_file(path)?;
338 }
339 repo_config.layers.extend(cli_layers);
340 Ok(repo_config)
341 }
342
343 pub fn apply_plain(&mut self, plain: PlainInfo) {
344 self.plain = plain;
345 }
346
347 fn get_parse<'config, T: 'config>(
348 &'config self,
349 section: &[u8],
350 item: &[u8],
351 expected_type: &'static str,
352 parse: impl Fn(&'config [u8]) -> Option<T>,
353 ) -> Result<Option<T>, ConfigValueParseError> {
354 match self.get_inner(section, item) {
355 Some((layer, v)) => match parse(&v.bytes) {
356 Some(b) => Ok(Some(b)),
357 None => Err(ConfigValueParseError {
358 origin: layer.origin.to_owned(),
359 line: v.line,
360 value: v.bytes.to_owned(),
361 section: section.to_owned(),
362 item: item.to_owned(),
363 expected_type,
364 }),
365 },
366 None => Ok(None),
367 }
368 }
369
370 /// Returns an `Err` if the first value found is not a valid UTF-8 string.
371 /// Otherwise, returns an `Ok(value)` if found, or `None`.
372 pub fn get_str(
373 &self,
374 section: &[u8],
375 item: &[u8],
376 ) -> Result<Option<&str>, ConfigValueParseError> {
377 self.get_parse(section, item, "ASCII or UTF-8 string", |value| {
378 str::from_utf8(value).ok()
379 })
380 }
381
382 /// Returns an `Err` if the first value found is not a valid unsigned
383 /// integer. Otherwise, returns an `Ok(value)` if found, or `None`.
384 pub fn get_u32(
385 &self,
386 section: &[u8],
387 item: &[u8],
388 ) -> Result<Option<u32>, ConfigValueParseError> {
389 self.get_parse(section, item, "valid integer", |value| {
390 str::from_utf8(value).ok()?.parse().ok()
391 })
392 }
393
394 /// Returns an `Err` if the first value found is not a valid file size
395 /// value such as `30` (default unit is bytes), `7 MB`, or `42.5 kb`.
396 /// Otherwise, returns an `Ok(value_in_bytes)` if found, or `None`.
397 pub fn get_byte_size(
398 &self,
399 section: &[u8],
400 item: &[u8],
401 ) -> Result<Option<u64>, ConfigValueParseError> {
402 self.get_parse(section, item, "byte quantity", values::parse_byte_size)
403 }
404
405 /// Returns an `Err` if the first value found is not a valid boolean.
406 /// Otherwise, returns an `Ok(option)`, where `option` is the boolean if
407 /// found, or `None`.
408 pub fn get_option(
409 &self,
410 section: &[u8],
411 item: &[u8],
412 ) -> Result<Option<bool>, ConfigValueParseError> {
413 self.get_parse(section, item, "boolean", values::parse_bool)
414 }
415
416 /// Returns the corresponding boolean in the config. Returns `Ok(false)`
417 /// if the value is not found, an `Err` if it's not a valid boolean.
418 pub fn get_bool(
419 &self,
420 section: &[u8],
421 item: &[u8],
422 ) -> Result<bool, ConfigValueParseError> {
423 Ok(self.get_option(section, item)?.unwrap_or(false))
424 }
425
426 /// Returns `true` if the extension is enabled, `false` otherwise
427 pub fn is_extension_enabled(&self, extension: &[u8]) -> bool {
428 let value = self.get(b"extensions", extension);
429 match value {
430 Some(c) => !c.starts_with(b"!"),
431 None => false,
432 }
433 }
434
435 /// If there is an `item` value in `section`, parse and return a list of
436 /// byte strings.
437 pub fn get_list(
438 &self,
439 section: &[u8],
440 item: &[u8],
441 ) -> Option<Vec<Vec<u8>>> {
442 self.get(section, item).map(values::parse_list)
443 }
444
445 /// Returns the raw value bytes of the first one found, or `None`.
446 pub fn get(&self, section: &[u8], item: &[u8]) -> Option<&[u8]> {
447 self.get_inner(section, item)
448 .map(|(_, value)| value.bytes.as_ref())
449 }
450
451 /// Returns the raw value bytes of the first one found, or `None`.
452 pub fn get_with_origin(
453 &self,
454 section: &[u8],
455 item: &[u8],
456 ) -> Option<(&[u8], &ConfigOrigin)> {
457 self.get_inner(section, item)
458 .map(|(layer, value)| (value.bytes.as_ref(), &layer.origin))
459 }
460
461 /// Returns the layer and the value of the first one found, or `None`.
462 fn get_inner(
463 &self,
464 section: &[u8],
465 item: &[u8],
466 ) -> Option<(&ConfigLayer, &ConfigValue)> {
467 // Filter out the config items that are hidden by [PLAIN].
468 // This differs from python hg where we delete them from the config.
469 let should_ignore = should_ignore(&self.plain, section, item);
470 for layer in self.layers.iter().rev() {
471 if !layer.trusted {
472 continue;
473 }
474 //The [PLAIN] config should not affect the defaults.
475 //
476 // However, PLAIN should also affect the "tweaked" defaults (unless
477 // "tweakdefault" is part of "HGPLAINEXCEPT").
478 //
479 // In practice the tweak-default layer is only added when it is
480 // relevant, so we can safely always take it into
481 // account here.
482 if should_ignore && !(layer.origin == ConfigOrigin::Tweakdefaults)
483 {
484 continue;
485 }
486 if let Some(v) = layer.get(section, item) {
487 return Some((layer, v));
488 }
489 }
490 None
491 }
492
493 /// Return all keys defined for the given section
494 pub fn get_section_keys(&self, section: &[u8]) -> HashSet<&[u8]> {
495 self.layers
496 .iter()
497 .flat_map(|layer| layer.iter_keys(section))
498 .collect()
499 }
500
501 /// Returns whether any key is defined in the given section
502 pub fn has_non_empty_section(&self, section: &[u8]) -> bool {
503 self.layers
504 .iter()
505 .any(|layer| layer.has_non_empty_section(section))
506 }
507
508 /// Yields (key, value) pairs for everything in the given section
509 pub fn iter_section<'a>(
510 &'a self,
511 section: &'a [u8],
512 ) -> impl Iterator<Item = (&[u8], &[u8])> + 'a {
513 // Deduplicate keys redefined in multiple layers
514 let mut keys_already_seen = HashSet::new();
515 let mut key_is_new =
516 move |&(key, _value): &(&'a [u8], &'a [u8])| -> bool {
517 keys_already_seen.insert(key)
518 };
519 // This is similar to `flat_map` + `filter_map`, except with a single
520 // closure that owns `key_is_new` (and therefore the
521 // `keys_already_seen` set):
522 let mut layer_iters = self
523 .layers
524 .iter()
525 .rev()
526 .map(move |layer| layer.iter_section(section))
527 .peekable();
528 std::iter::from_fn(move || loop {
529 if let Some(pair) = layer_iters.peek_mut()?.find(&mut key_is_new) {
530 return Some(pair);
531 } else {
532 layer_iters.next();
533 }
534 })
535 }
536
537 /// Get raw values bytes from all layers (even untrusted ones) in order
538 /// of precedence.
539 #[cfg(test)]
540 fn get_all(&self, section: &[u8], item: &[u8]) -> Vec<&[u8]> {
541 let mut res = vec![];
542 for layer in self.layers.iter().rev() {
543 if let Some(v) = layer.get(section, item) {
544 res.push(v.bytes.as_ref());
545 }
546 }
547 res
548 }
549
550 // a config layer that's introduced by ui.tweakdefaults
551 fn tweakdefaults_layer() -> ConfigLayer {
552 let mut layer = ConfigLayer::new(ConfigOrigin::Tweakdefaults);
553
554 let mut add = |section: &[u8], item: &[u8], value: &[u8]| {
555 layer.add(
556 section[..].into(),
557 item[..].into(),
558 value[..].into(),
559 None,
560 );
561 };
562 // duplication of [tweakrc] from [ui.py]
563 add(b"ui", b"rollback", b"False");
564 add(b"ui", b"statuscopies", b"yes");
565 add(b"ui", b"interface", b"curses");
566 add(b"ui", b"relative-paths", b"yes");
567 add(b"commands", b"grep.all-files", b"True");
568 add(b"commands", b"update.check", b"noconflict");
569 add(b"commands", b"status.verbose", b"True");
570 add(b"commands", b"resolve.explicit-re-merge", b"True");
571 add(b"git", b"git", b"1");
572 add(b"git", b"showfunc", b"1");
573 add(b"git", b"word-diff", b"1");
574 layer
575 }
576
577 // introduce the tweaked defaults as implied by ui.tweakdefaults
578 pub fn tweakdefaults(&mut self) {
579 self.layers.insert(0, Config::tweakdefaults_layer());
580 }
581 }
582
583 #[cfg(test)]
584 mod tests {
585 use super::*;
586 use pretty_assertions::assert_eq;
587 use std::fs::File;
588 use std::io::Write;
589
590 #[test]
591 fn test_include_layer_ordering() {
592 let tmpdir = tempfile::tempdir().unwrap();
593 let tmpdir_path = tmpdir.path();
594 let mut included_file =
595 File::create(&tmpdir_path.join("included.rc")).unwrap();
596
597 included_file.write_all(b"[section]\nitem=value1").unwrap();
598 let base_config_path = tmpdir_path.join("base.rc");
599 let mut config_file = File::create(&base_config_path).unwrap();
600 let data =
601 b"[section]\nitem=value0\n%include included.rc\nitem=value2\n\
602 [section2]\ncount = 4\nsize = 1.5 KB\nnot-count = 1.5\nnot-size = 1 ub";
603 config_file.write_all(data).unwrap();
604
605 let sources = vec![ConfigSource::AbsPath(base_config_path)];
606 let config = Config::load_from_explicit_sources(sources)
607 .expect("expected valid config");
608
609 let (_, value) = config.get_inner(b"section", b"item").unwrap();
610 assert_eq!(
611 value,
612 &ConfigValue {
613 bytes: b"value2".to_vec(),
614 line: Some(4)
615 }
616 );
617
618 let value = config.get(b"section", b"item").unwrap();
619 assert_eq!(value, b"value2",);
620 assert_eq!(
621 config.get_all(b"section", b"item"),
622 [b"value2", b"value1", b"value0"]
623 );
624
625 assert_eq!(config.get_u32(b"section2", b"count").unwrap(), Some(4));
626 assert_eq!(
627 config.get_byte_size(b"section2", b"size").unwrap(),
628 Some(1024 + 512)
629 );
630 assert!(config.get_u32(b"section2", b"not-count").is_err());
631 assert!(config.get_byte_size(b"section2", b"not-size").is_err());
632 }
633 }
@@ -30,10 +30,8 pub(super) fn parse_byte_size(value: &[u
30 30 ("b", 1 << 0), // Needs to be last
31 31 ];
32 32 for &(unit, multiplier) in UNITS {
33 // TODO: use `value.strip_suffix(unit)` when we require Rust 1.45+
34 if value.ends_with(unit) {
35 let value_before_unit = &value[..value.len() - unit.len()];
36 let float: f64 = value_before_unit.trim().parse().ok()?;
33 if let Some(value) = value.strip_suffix(unit) {
34 let float: f64 = value.trim().parse().ok()?;
37 35 if float >= 0.0 {
38 36 return Some((float * multiplier as f64).round() as u64);
39 37 } else {
@@ -202,11 +200,7 fn parse_list_without_trim_start(input:
202 200
203 201 // https://docs.python.org/3/library/stdtypes.html?#bytes.isspace
204 202 fn is_space(byte: u8) -> bool {
205 if let b' ' | b'\t' | b'\n' | b'\r' | b'\x0b' | b'\x0c' = byte {
206 true
207 } else {
208 false
209 }
203 matches!(byte, b' ' | b'\t' | b'\n' | b'\r' | b'\x0b' | b'\x0c')
210 204 }
211 205 }
212 206
@@ -59,7 +59,7 impl CopySource {
59 59 Self {
60 60 rev,
61 61 path: winner.path,
62 overwritten: overwritten,
62 overwritten,
63 63 }
64 64 }
65 65
@@ -489,7 +489,7 fn chain_changes<'a>(
489 489 if cs1 == cs2 {
490 490 cs1.mark_delete(current_rev);
491 491 } else {
492 cs1.mark_delete_with_pair(current_rev, &cs2);
492 cs1.mark_delete_with_pair(current_rev, cs2);
493 493 }
494 494 e2.insert(cs1.clone());
495 495 }
@@ -513,15 +513,14 fn add_one_copy(
513 513 ) {
514 514 let dest = path_map.tokenize(path_dest);
515 515 let source = path_map.tokenize(path_source);
516 let entry;
517 if let Some(v) = base_copies.get(&source) {
518 entry = match &v.path {
516 let entry = if let Some(v) = base_copies.get(&source) {
517 match &v.path {
519 518 Some(path) => Some((*(path)).to_owned()),
520 519 None => Some(source.to_owned()),
521 520 }
522 521 } else {
523 entry = Some(source.to_owned());
524 }
522 Some(source.to_owned())
523 };
525 524 // Each new entry is introduced by the children, we
526 525 // record this information as we will need it to take
527 526 // the right decision when merging conflicting copy
@@ -563,17 +562,15 fn merge_copies_dict(
563 562 MergePick::Major | MergePick::Any => (src_major, src_minor),
564 563 MergePick::Minor => (src_minor, src_major),
565 564 };
566 MergeResult::UseNewValue(CopySource::new_from_merge(
565 MergeResult::New(CopySource::new_from_merge(
567 566 current_merge,
568 567 winner,
569 568 loser,
570 569 ))
571 570 } else {
572 571 match pick {
573 MergePick::Any | MergePick::Major => {
574 MergeResult::UseRightValue
575 }
576 MergePick::Minor => MergeResult::UseLeftValue,
572 MergePick::Any | MergePick::Major => MergeResult::Right,
573 MergePick::Minor => MergeResult::Left,
577 574 }
578 575 }
579 576 })
@@ -613,7 +610,7 fn compare_value(
613 610 // eventually.
614 611 (MergePick::Minor, true)
615 612 } else if src_major.path == src_minor.path {
616 debug_assert!(src_major.rev != src_major.rev);
613 debug_assert!(src_major.rev != src_minor.rev);
617 614 // we have the same value, but from other source;
618 615 if src_major.is_overwritten_by(src_minor) {
619 616 (MergePick::Minor, false)
@@ -623,7 +620,7 fn compare_value(
623 620 (MergePick::Any, true)
624 621 }
625 622 } else {
626 debug_assert!(src_major.rev != src_major.rev);
623 debug_assert!(src_major.rev != src_minor.rev);
627 624 let action = merge_case_for_dest();
628 625 if src_minor.path.is_some()
629 626 && src_major.path.is_none()
@@ -118,7 +118,7 fn test_combine_changeset_copies() {
118 118 // keys to copy source values. Note: the arrows for map literal syntax
119 119 // point **backwards** compared to the logical direction of copy!
120 120
121 use crate::NULL_REVISION as NULL;
121 use crate::revlog::NULL_REVISION as NULL;
122 122 use Action::*;
123 123 use MergeCase::*;
124 124
@@ -181,7 +181,7 mod tests {
181 181 let mut revs: HashSet<Revision> = revs.iter().cloned().collect();
182 182 retain_heads(graph, &mut revs)?;
183 183 let mut as_vec: Vec<Revision> = revs.iter().cloned().collect();
184 as_vec.sort();
184 as_vec.sort_unstable();
185 185 Ok(as_vec)
186 186 }
187 187
@@ -206,7 +206,7 mod tests {
206 206 ) -> Result<Vec<Revision>, GraphError> {
207 207 let heads = heads(graph, revs.iter())?;
208 208 let mut as_vec: Vec<Revision> = heads.iter().cloned().collect();
209 as_vec.sort();
209 as_vec.sort_unstable();
210 210 Ok(as_vec)
211 211 }
212 212
@@ -231,7 +231,7 mod tests {
231 231 ) -> Result<Vec<Revision>, GraphError> {
232 232 let set: HashSet<_> = revs.iter().cloned().collect();
233 233 let mut as_vec = roots(graph, &set)?;
234 as_vec.sort();
234 as_vec.sort_unstable();
235 235 Ok(as_vec)
236 236 }
237 237
@@ -32,7 +32,7 impl DirstateParents {
32 32 };
33 33
34 34 pub fn is_merge(&self) -> bool {
35 return !(self.p2 == NULL_NODE);
35 !(self.p2 == NULL_NODE)
36 36 }
37 37 }
38 38
@@ -232,7 +232,7 mod tests {
232 232 #[test]
233 233 fn test_delete_path_empty_path() {
234 234 let mut map =
235 DirsMultiset::from_manifest(&vec![HgPathBuf::new()]).unwrap();
235 DirsMultiset::from_manifest(&[HgPathBuf::new()]).unwrap();
236 236 let path = HgPath::new(b"");
237 237 assert_eq!(Ok(()), map.delete_path(path));
238 238 assert_eq!(
@@ -1,7 +1,6
1 1 use crate::dirstate_tree::on_disk::DirstateV2ParseError;
2 2 use crate::errors::HgError;
3 3 use bitflags::bitflags;
4 use std::convert::{TryFrom, TryInto};
5 4 use std::fs;
6 5 use std::io;
7 6 use std::time::{SystemTime, UNIX_EPOCH};
@@ -181,11 +180,7 impl TruncatedTimestamp {
181 180 if self.truncated_seconds != other.truncated_seconds {
182 181 false
183 182 } else if self.nanoseconds == 0 || other.nanoseconds == 0 {
184 if self.second_ambiguous {
185 false
186 } else {
187 true
188 }
183 !self.second_ambiguous
189 184 } else {
190 185 self.nanoseconds == other.nanoseconds
191 186 }
@@ -423,6 +418,8 impl DirstateEntry {
423 418 }
424 419
425 420 pub fn maybe_clean(&self) -> bool {
421 #[allow(clippy::if_same_then_else)]
422 #[allow(clippy::needless_bool)]
426 423 if !self.flags.contains(Flags::WDIR_TRACKED) {
427 424 false
428 425 } else if !self.flags.contains(Flags::P1_TRACKED) {
@@ -512,6 +509,8 impl DirstateEntry {
512 509 // TODO: return an Option instead?
513 510 panic!("Accessing v1_mtime of an untracked DirstateEntry")
514 511 }
512
513 #[allow(clippy::if_same_then_else)]
515 514 if self.removed() {
516 515 0
517 516 } else if self.flags.contains(Flags::P2_INFO) {
@@ -703,9 +702,9 impl TryFrom<u8> for EntryState {
703 702 }
704 703 }
705 704
706 impl Into<u8> for EntryState {
707 fn into(self) -> u8 {
708 match self {
705 impl From<EntryState> for u8 {
706 fn from(val: EntryState) -> Self {
707 match val {
709 708 EntryState::Normal => b'n',
710 709 EntryState::Added => b'a',
711 710 EntryState::Removed => b'r',
@@ -8,8 +8,6 use crate::utils::hg_path::HgPath;
8 8 use crate::{dirstate::EntryState, DirstateEntry, DirstateParents};
9 9 use byteorder::{BigEndian, WriteBytesExt};
10 10 use bytes_cast::{unaligned, BytesCast};
11 use micro_timer::timed;
12 use std::convert::TryFrom;
13 11
14 12 /// Parents are stored in the dirstate as byte hashes.
15 13 pub const PARENT_SIZE: usize = 20;
@@ -30,7 +28,7 pub fn parse_dirstate_parents(
30 28 Ok(parents)
31 29 }
32 30
33 #[timed]
31 #[logging_timer::time("trace")]
34 32 pub fn parse_dirstate(contents: &[u8]) -> Result<ParseResult, HgError> {
35 33 let mut copies = Vec::new();
36 34 let mut entries = Vec::new();
@@ -1,5 +1,4
1 1 use bytes_cast::BytesCast;
2 use micro_timer::timed;
3 2 use std::borrow::Cow;
4 3 use std::path::PathBuf;
5 4
@@ -16,6 +15,7 use crate::dirstate::ParentFileData;
16 15 use crate::dirstate::StateMapIter;
17 16 use crate::dirstate::TruncatedTimestamp;
18 17 use crate::matchers::Matcher;
18 use crate::utils::filter_map_results;
19 19 use crate::utils::hg_path::{HgPath, HgPathBuf};
20 20 use crate::DirstateEntry;
21 21 use crate::DirstateError;
@@ -321,9 +321,7 impl<'tree, 'on_disk> NodeRef<'tree, 'on
321 321 on_disk: &'on_disk [u8],
322 322 ) -> Result<Option<&'tree HgPath>, DirstateV2ParseError> {
323 323 match self {
324 NodeRef::InMemory(_path, node) => {
325 Ok(node.copy_source.as_ref().map(|s| &**s))
326 }
324 NodeRef::InMemory(_path, node) => Ok(node.copy_source.as_deref()),
327 325 NodeRef::OnDisk(node) => node.copy_source(on_disk),
328 326 }
329 327 }
@@ -341,9 +339,9 impl<'tree, 'on_disk> NodeRef<'tree, 'on
341 339 Cow::Owned(in_memory) => BorrowedPath::InMemory(in_memory),
342 340 })
343 341 }
344 NodeRef::OnDisk(node) => node
345 .copy_source(on_disk)?
346 .map(|source| BorrowedPath::OnDisk(source)),
342 NodeRef::OnDisk(node) => {
343 node.copy_source(on_disk)?.map(BorrowedPath::OnDisk)
344 }
347 345 })
348 346 }
349 347
@@ -419,10 +417,7 impl Default for NodeData {
419 417
420 418 impl NodeData {
421 419 fn has_entry(&self) -> bool {
422 match self {
423 NodeData::Entry(_) => true,
424 _ => false,
425 }
420 matches!(self, NodeData::Entry(_))
426 421 }
427 422
428 423 fn as_entry(&self) -> Option<&DirstateEntry> {
@@ -454,7 +449,7 impl<'on_disk> DirstateMap<'on_disk> {
454 449 }
455 450 }
456 451
457 #[timed]
452 #[logging_timer::time("trace")]
458 453 pub fn new_v2(
459 454 on_disk: &'on_disk [u8],
460 455 data_size: usize,
@@ -467,7 +462,7 impl<'on_disk> DirstateMap<'on_disk> {
467 462 }
468 463 }
469 464
470 #[timed]
465 #[logging_timer::time("trace")]
471 466 pub fn new_v1(
472 467 on_disk: &'on_disk [u8],
473 468 ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
@@ -510,7 +505,7 impl<'on_disk> DirstateMap<'on_disk> {
510 505 Ok(())
511 506 },
512 507 )?;
513 let parents = Some(parents.clone());
508 let parents = Some(*parents);
514 509
515 510 Ok((map, parents))
516 511 }
@@ -656,6 +651,7 impl<'on_disk> DirstateMap<'on_disk> {
656 651 }
657 652 }
658 653
654 #[allow(clippy::too_many_arguments)]
659 655 fn reset_state(
660 656 &mut self,
661 657 filename: &HgPath,
@@ -681,10 +677,8 impl<'on_disk> DirstateMap<'on_disk> {
681 677 .checked_sub(1)
682 678 .expect("tracked count to be >= 0");
683 679 }
684 } else {
685 if wc_tracked {
686 ancestor.tracked_descendants_count += 1;
687 }
680 } else if wc_tracked {
681 ancestor.tracked_descendants_count += 1;
688 682 }
689 683 })?;
690 684
@@ -734,7 +728,7 impl<'on_disk> DirstateMap<'on_disk> {
734 728 ancestor.tracked_descendants_count += tracked_count_increment;
735 729 })?;
736 730 if let Some(old_entry) = old_entry_opt {
737 let mut e = old_entry.clone();
731 let mut e = old_entry;
738 732 if e.tracked() {
739 733 // XXX
740 734 // This is probably overkill for more case, but we need this to
@@ -775,7 +769,7 impl<'on_disk> DirstateMap<'on_disk> {
775 769 .expect("tracked_descendants_count should be >= 0");
776 770 })?
777 771 .expect("node should exist");
778 let mut new_entry = old_entry.clone();
772 let mut new_entry = old_entry;
779 773 new_entry.set_untracked();
780 774 node.data = NodeData::Entry(new_entry);
781 775 Ok(())
@@ -803,7 +797,7 impl<'on_disk> DirstateMap<'on_disk> {
803 797 }
804 798 })?
805 799 .expect("node should exist");
806 let mut new_entry = old_entry.clone();
800 let mut new_entry = old_entry;
807 801 new_entry.set_clean(mode, size, mtime);
808 802 node.data = NodeData::Entry(new_entry);
809 803 Ok(())
@@ -912,32 +906,14 impl<'on_disk> DirstateMap<'on_disk> {
912 906 })
913 907 }
914 908
915 fn count_dropped_path(unreachable_bytes: &mut u32, path: &Cow<HgPath>) {
909 fn count_dropped_path(unreachable_bytes: &mut u32, path: Cow<HgPath>) {
916 910 if let Cow::Borrowed(path) = path {
917 911 *unreachable_bytes += path.len() as u32
918 912 }
919 913 }
920 914 }
921 915
922 /// Like `Iterator::filter_map`, but over a fallible iterator of `Result`s.
923 ///
924 /// The callback is only called for incoming `Ok` values. Errors are passed
925 /// through as-is. In order to let it use the `?` operator the callback is
926 /// expected to return a `Result` of `Option`, instead of an `Option` of
927 /// `Result`.
928 fn filter_map_results<'a, I, F, A, B, E>(
929 iter: I,
930 f: F,
931 ) -> impl Iterator<Item = Result<B, E>> + 'a
932 where
933 I: Iterator<Item = Result<A, E>> + 'a,
934 F: Fn(A) -> Result<Option<B>, E> + 'a,
935 {
936 iter.filter_map(move |result| match result {
937 Ok(node) => f(node).transpose(),
938 Err(e) => Some(Err(e)),
939 })
940 }
916 type DebugDirstateTuple<'a> = (&'a HgPath, (u8, i32, i32, i32));
941 917
942 918 impl OwningDirstateMap {
943 919 pub fn clear(&mut self) {
@@ -1124,7 +1100,10 impl OwningDirstateMap {
1124 1100 }
1125 1101 let mut had_copy_source = false;
1126 1102 if let Some(source) = &node.copy_source {
1127 DirstateMap::count_dropped_path(unreachable_bytes, source);
1103 DirstateMap::count_dropped_path(
1104 unreachable_bytes,
1105 Cow::Borrowed(source),
1106 );
1128 1107 had_copy_source = true;
1129 1108 node.copy_source = None
1130 1109 }
@@ -1144,7 +1123,7 impl OwningDirstateMap {
1144 1123 nodes.remove_entry(first_path_component).unwrap();
1145 1124 DirstateMap::count_dropped_path(
1146 1125 unreachable_bytes,
1147 key.full_path(),
1126 Cow::Borrowed(key.full_path()),
1148 1127 )
1149 1128 }
1150 1129 Ok(Some((dropped, remove)))
@@ -1208,7 +1187,7 impl OwningDirstateMap {
1208 1187 })
1209 1188 }
1210 1189
1211 #[timed]
1190 #[logging_timer::time("trace")]
1212 1191 pub fn pack_v1(
1213 1192 &self,
1214 1193 parents: DirstateParents,
@@ -1248,7 +1227,7 impl OwningDirstateMap {
1248 1227 /// appended to the existing data file whose content is at
1249 1228 /// `map.on_disk` (true), instead of written to a new data file
1250 1229 /// (false), and the previous size of data on disk.
1251 #[timed]
1230 #[logging_timer::time("trace")]
1252 1231 pub fn pack_v2(
1253 1232 &self,
1254 1233 can_append: bool,
@@ -1343,7 +1322,10 impl OwningDirstateMap {
1343 1322 *count = count
1344 1323 .checked_sub(1)
1345 1324 .expect("nodes_with_copy_source_count should be >= 0");
1346 DirstateMap::count_dropped_path(unreachable_bytes, source);
1325 DirstateMap::count_dropped_path(
1326 unreachable_bytes,
1327 Cow::Borrowed(source),
1328 );
1347 1329 }
1348 1330 node.copy_source.take().map(Cow::into_owned)
1349 1331 }))
@@ -1356,7 +1338,7 impl OwningDirstateMap {
1356 1338 value: &HgPath,
1357 1339 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1358 1340 self.with_dmap_mut(|map| {
1359 let node = map.get_or_insert_node(&key, |_ancestor| {})?;
1341 let node = map.get_or_insert_node(key, |_ancestor| {})?;
1360 1342 let had_copy_source = node.copy_source.is_none();
1361 1343 let old = node
1362 1344 .copy_source
@@ -1374,6 +1356,10 impl OwningDirstateMap {
1374 1356 map.nodes_with_entry_count as usize
1375 1357 }
1376 1358
1359 pub fn is_empty(&self) -> bool {
1360 self.len() == 0
1361 }
1362
1377 1363 pub fn contains_key(
1378 1364 &self,
1379 1365 key: &HgPath,
@@ -1467,12 +1453,8 impl OwningDirstateMap {
1467 1453 &self,
1468 1454 all: bool,
1469 1455 ) -> Box<
1470 dyn Iterator<
1471 Item = Result<
1472 (&HgPath, (u8, i32, i32, i32)),
1473 DirstateV2ParseError,
1474 >,
1475 > + Send
1456 dyn Iterator<Item = Result<DebugDirstateTuple, DirstateV2ParseError>>
1457 + Send
1476 1458 + '_,
1477 1459 > {
1478 1460 let map = self.get_map();
@@ -1856,11 +1838,8 mod tests {
1856 1838 map.set_untracked(p(b"some/nested/removed"))?;
1857 1839 assert_eq!(map.get_map().unreachable_bytes, 0);
1858 1840
1859 match map.get_map().root {
1860 ChildNodes::InMemory(_) => {
1861 panic!("root should not have been mutated")
1862 }
1863 _ => (),
1841 if let ChildNodes::InMemory(_) = map.get_map().root {
1842 panic!("root should not have been mutated")
1864 1843 }
1865 1844 // We haven't mutated enough (nothing, actually), we should still be in
1866 1845 // the append strategy
@@ -1871,9 +1850,8 mod tests {
1871 1850 let unreachable_bytes = map.get_map().unreachable_bytes;
1872 1851 assert!(unreachable_bytes > 0);
1873 1852
1874 match map.get_map().root {
1875 ChildNodes::OnDisk(_) => panic!("root should have been mutated"),
1876 _ => (),
1853 if let ChildNodes::OnDisk(_) = map.get_map().root {
1854 panic!("root should have been mutated")
1877 1855 }
1878 1856
1879 1857 // This should not mutate the structure either, since `root` has
@@ -1881,22 +1859,20 mod tests {
1881 1859 map.set_untracked(p(b"merged"))?;
1882 1860 assert_eq!(map.get_map().unreachable_bytes, unreachable_bytes);
1883 1861
1884 match map.get_map().get_node(p(b"other/added_with_p2"))?.unwrap() {
1885 NodeRef::InMemory(_, _) => {
1886 panic!("'other/added_with_p2' should not have been mutated")
1887 }
1888 _ => (),
1862 if let NodeRef::InMemory(_, _) =
1863 map.get_map().get_node(p(b"other/added_with_p2"))?.unwrap()
1864 {
1865 panic!("'other/added_with_p2' should not have been mutated")
1889 1866 }
1890 1867 // But this should, since it's in a different path
1891 1868 // than `<root>some/nested/add`
1892 1869 map.set_untracked(p(b"other/added_with_p2"))?;
1893 1870 assert!(map.get_map().unreachable_bytes > unreachable_bytes);
1894 1871
1895 match map.get_map().get_node(p(b"other/added_with_p2"))?.unwrap() {
1896 NodeRef::OnDisk(_) => {
1897 panic!("'other/added_with_p2' should have been mutated")
1898 }
1899 _ => (),
1872 if let NodeRef::OnDisk(_) =
1873 map.get_map().get_node(p(b"other/added_with_p2"))?.unwrap()
1874 {
1875 panic!("'other/added_with_p2' should have been mutated")
1900 1876 }
1901 1877
1902 1878 // We have rewritten most of the tree, we should create a new file
@@ -17,7 +17,6 use bytes_cast::BytesCast;
17 17 use format_bytes::format_bytes;
18 18 use rand::Rng;
19 19 use std::borrow::Cow;
20 use std::convert::{TryFrom, TryInto};
21 20 use std::fmt::Write;
22 21
23 22 /// Added at the start of `.hg/dirstate` when the "v2" format is used.
@@ -247,11 +246,9 impl<'on_disk> Docket<'on_disk> {
247 246 pub fn parents(&self) -> DirstateParents {
248 247 use crate::Node;
249 248 let p1 = Node::try_from(&self.header.parent_1[..USED_NODE_ID_BYTES])
250 .unwrap()
251 .clone();
249 .unwrap();
252 250 let p2 = Node::try_from(&self.header.parent_2[..USED_NODE_ID_BYTES])
253 .unwrap()
254 .clone();
251 .unwrap();
255 252 DirstateParents { p1, p2 }
256 253 }
257 254
@@ -323,7 +320,7 impl Node {
323 320 read_hg_path(on_disk, self.full_path)
324 321 }
325 322
326 pub(super) fn base_name_start<'on_disk>(
323 pub(super) fn base_name_start(
327 324 &self,
328 325 ) -> Result<usize, DirstateV2ParseError> {
329 326 let start = self.base_name_start.get();
@@ -356,7 +353,7 impl Node {
356 353 ))
357 354 }
358 355
359 pub(super) fn has_copy_source<'on_disk>(&self) -> bool {
356 pub(super) fn has_copy_source(&self) -> bool {
360 357 self.copy_source.start.get() != 0
361 358 }
362 359
@@ -415,12 +412,12 impl Node {
415 412 } else {
416 413 libc::S_IFREG
417 414 };
418 let permisions = if self.flags().contains(Flags::MODE_EXEC_PERM) {
415 let permissions = if self.flags().contains(Flags::MODE_EXEC_PERM) {
419 416 0o755
420 417 } else {
421 418 0o644
422 419 };
423 (file_type | permisions).into()
420 file_type | permissions
424 421 }
425 422
426 423 fn mtime(&self) -> Result<TruncatedTimestamp, DirstateV2ParseError> {
@@ -602,32 +599,6 where
602 599 .map(|(slice, _rest)| slice)
603 600 }
604 601
605 pub(crate) fn for_each_tracked_path<'on_disk>(
606 on_disk: &'on_disk [u8],
607 metadata: &[u8],
608 mut f: impl FnMut(&'on_disk HgPath),
609 ) -> Result<(), DirstateV2ParseError> {
610 let (meta, _) = TreeMetadata::from_bytes(metadata).map_err(|e| {
611 DirstateV2ParseError::new(format!("when parsing tree metadata, {}", e))
612 })?;
613 fn recur<'on_disk>(
614 on_disk: &'on_disk [u8],
615 nodes: ChildNodes,
616 f: &mut impl FnMut(&'on_disk HgPath),
617 ) -> Result<(), DirstateV2ParseError> {
618 for node in read_nodes(on_disk, nodes)? {
619 if let Some(entry) = node.entry()? {
620 if entry.tracked() {
621 f(node.full_path(on_disk)?)
622 }
623 }
624 recur(on_disk, node.children, f)?
625 }
626 Ok(())
627 }
628 recur(on_disk, meta.root_nodes, &mut f)
629 }
630
631 602 /// Returns new data and metadata, together with whether that data should be
632 603 /// appended to the existing data file whose content is at
633 604 /// `dirstate_map.on_disk` (true), instead of written to a new data file
@@ -24,7 +24,7 impl OwningDirstateMap {
24 24
25 25 OwningDirstateMapBuilder {
26 26 on_disk,
27 map_builder: |bytes| DirstateMap::empty(&bytes),
27 map_builder: |bytes| DirstateMap::empty(bytes),
28 28 }
29 29 .build()
30 30 }
@@ -42,7 +42,7 impl OwningDirstateMap {
42 42 OwningDirstateMapTryBuilder {
43 43 on_disk,
44 44 map_builder: |bytes| {
45 DirstateMap::new_v1(&bytes).map(|(dmap, p)| {
45 DirstateMap::new_v1(bytes).map(|(dmap, p)| {
46 46 parents = p.unwrap_or(DirstateParents::NULL);
47 47 dmap
48 48 })
@@ -66,7 +66,7 impl OwningDirstateMap {
66 66 OwningDirstateMapTryBuilder {
67 67 on_disk,
68 68 map_builder: |bytes| {
69 DirstateMap::new_v2(&bytes, data_size, metadata)
69 DirstateMap::new_v2(bytes, data_size, metadata)
70 70 },
71 71 }
72 72 .try_build()
@@ -15,12 +15,10 use crate::utils::files::get_path_from_b
15 15 use crate::utils::hg_path::HgPath;
16 16 use crate::BadMatch;
17 17 use crate::DirstateStatus;
18 use crate::HgPathBuf;
19 18 use crate::HgPathCow;
20 19 use crate::PatternFileWarning;
21 20 use crate::StatusError;
22 21 use crate::StatusOptions;
23 use micro_timer::timed;
24 22 use once_cell::sync::OnceCell;
25 23 use rayon::prelude::*;
26 24 use sha1::{Digest, Sha1};
@@ -40,7 +38,7 use std::time::SystemTime;
40 38 /// and its use of `itertools::merge_join_by`. When reaching a path that only
41 39 /// exists in one of the two trees, depending on information requested by
42 40 /// `options` we may need to traverse the remaining subtree.
43 #[timed]
41 #[logging_timer::time("trace")]
44 42 pub fn status<'dirstate>(
45 43 dmap: &'dirstate mut DirstateMap,
46 44 matcher: &(dyn Matcher + Sync),
@@ -147,7 +145,6 pub fn status<'dirstate>(
147 145 let hg_path = &BorrowedPath::OnDisk(HgPath::new(""));
148 146 let has_ignored_ancestor = HasIgnoredAncestor::create(None, hg_path);
149 147 let root_cached_mtime = None;
150 let root_dir_metadata = None;
151 148 // If the path we have for the repository root is a symlink, do follow it.
152 149 // (As opposed to symlinks within the working directory which are not
153 150 // followed, using `std::fs::symlink_metadata`.)
@@ -155,8 +152,12 pub fn status<'dirstate>(
155 152 &has_ignored_ancestor,
156 153 dmap.root.as_ref(),
157 154 hg_path,
158 &root_dir,
159 root_dir_metadata,
155 &DirEntry {
156 hg_path: Cow::Borrowed(HgPath::new(b"")),
157 fs_path: Cow::Borrowed(root_dir),
158 symlink_metadata: None,
159 file_type: FakeFileType::Directory,
160 },
160 161 root_cached_mtime,
161 162 is_at_repo_root,
162 163 )?;
@@ -244,7 +245,7 impl<'a> HasIgnoredAncestor<'a> {
244 245 None => false,
245 246 Some(parent) => {
246 247 *(parent.cache.get_or_init(|| {
247 parent.force(ignore_fn) || ignore_fn(&self.path)
248 parent.force(ignore_fn) || ignore_fn(self.path)
248 249 }))
249 250 }
250 251 }
@@ -340,7 +341,7 impl<'a, 'tree, 'on_disk> StatusCommon<'
340 341 /// need to call `read_dir`.
341 342 fn can_skip_fs_readdir(
342 343 &self,
343 directory_metadata: Option<&std::fs::Metadata>,
344 directory_entry: &DirEntry,
344 345 cached_directory_mtime: Option<TruncatedTimestamp>,
345 346 ) -> bool {
346 347 if !self.options.list_unknown && !self.options.list_ignored {
@@ -356,9 +357,9 impl<'a, 'tree, 'on_disk> StatusCommon<'
356 357 // The dirstate contains a cached mtime for this directory, set
357 358 // by a previous run of the `status` algorithm which found this
358 359 // directory eligible for `read_dir` caching.
359 if let Some(meta) = directory_metadata {
360 if let Ok(meta) = directory_entry.symlink_metadata() {
360 361 if cached_mtime
361 .likely_equal_to_mtime_of(meta)
362 .likely_equal_to_mtime_of(&meta)
362 363 .unwrap_or(false)
363 364 {
364 365 // The mtime of that directory has not changed
@@ -379,33 +380,48 impl<'a, 'tree, 'on_disk> StatusCommon<'
379 380 has_ignored_ancestor: &'ancestor HasIgnoredAncestor<'ancestor>,
380 381 dirstate_nodes: ChildNodesRef<'tree, 'on_disk>,
381 382 directory_hg_path: &BorrowedPath<'tree, 'on_disk>,
382 directory_fs_path: &Path,
383 directory_metadata: Option<&std::fs::Metadata>,
383 directory_entry: &DirEntry,
384 384 cached_directory_mtime: Option<TruncatedTimestamp>,
385 385 is_at_repo_root: bool,
386 386 ) -> Result<bool, DirstateV2ParseError> {
387 if self.can_skip_fs_readdir(directory_metadata, cached_directory_mtime)
388 {
387 if self.can_skip_fs_readdir(directory_entry, cached_directory_mtime) {
389 388 dirstate_nodes
390 389 .par_iter()
391 390 .map(|dirstate_node| {
392 let fs_path = directory_fs_path.join(get_path_from_bytes(
391 let fs_path = &directory_entry.fs_path;
392 let fs_path = fs_path.join(get_path_from_bytes(
393 393 dirstate_node.base_name(self.dmap.on_disk)?.as_bytes(),
394 394 ));
395 395 match std::fs::symlink_metadata(&fs_path) {
396 Ok(fs_metadata) => self.traverse_fs_and_dirstate(
397 &fs_path,
398 &fs_metadata,
399 dirstate_node,
400 has_ignored_ancestor,
401 ),
396 Ok(fs_metadata) => {
397 let file_type =
398 match fs_metadata.file_type().try_into() {
399 Ok(file_type) => file_type,
400 Err(_) => return Ok(()),
401 };
402 let entry = DirEntry {
403 hg_path: Cow::Borrowed(
404 dirstate_node
405 .full_path(self.dmap.on_disk)?,
406 ),
407 fs_path: Cow::Borrowed(&fs_path),
408 symlink_metadata: Some(fs_metadata),
409 file_type,
410 };
411 self.traverse_fs_and_dirstate(
412 &entry,
413 dirstate_node,
414 has_ignored_ancestor,
415 )
416 }
402 417 Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
403 418 self.traverse_dirstate_only(dirstate_node)
404 419 }
405 420 Err(error) => {
406 421 let hg_path =
407 422 dirstate_node.full_path(self.dmap.on_disk)?;
408 Ok(self.io_error(error, hg_path))
423 self.io_error(error, hg_path);
424 Ok(())
409 425 }
410 426 }
411 427 })
@@ -419,7 +435,7 impl<'a, 'tree, 'on_disk> StatusCommon<'
419 435
420 436 let mut fs_entries = if let Ok(entries) = self.read_dir(
421 437 directory_hg_path,
422 directory_fs_path,
438 &directory_entry.fs_path,
423 439 is_at_repo_root,
424 440 ) {
425 441 entries
@@ -435,7 +451,7 impl<'a, 'tree, 'on_disk> StatusCommon<'
435 451 let dirstate_nodes = dirstate_nodes.sorted();
436 452 // `sort_unstable_by_key` doesn’t allow keys borrowing from the value:
437 453 // https://github.com/rust-lang/rust/issues/34162
438 fs_entries.sort_unstable_by(|e1, e2| e1.base_name.cmp(&e2.base_name));
454 fs_entries.sort_unstable_by(|e1, e2| e1.hg_path.cmp(&e2.hg_path));
439 455
440 456 // Propagate here any error that would happen inside the comparison
441 457 // callback below
@@ -451,35 +467,31 impl<'a, 'tree, 'on_disk> StatusCommon<'
451 467 dirstate_node
452 468 .base_name(self.dmap.on_disk)
453 469 .unwrap()
454 .cmp(&fs_entry.base_name)
470 .cmp(&fs_entry.hg_path)
455 471 },
456 472 )
457 473 .par_bridge()
458 474 .map(|pair| {
459 475 use itertools::EitherOrBoth::*;
460 let has_dirstate_node_or_is_ignored;
461 match pair {
476 let has_dirstate_node_or_is_ignored = match pair {
462 477 Both(dirstate_node, fs_entry) => {
463 478 self.traverse_fs_and_dirstate(
464 &fs_entry.full_path,
465 &fs_entry.metadata,
479 fs_entry,
466 480 dirstate_node,
467 481 has_ignored_ancestor,
468 482 )?;
469 has_dirstate_node_or_is_ignored = true
483 true
470 484 }
471 485 Left(dirstate_node) => {
472 486 self.traverse_dirstate_only(dirstate_node)?;
473 has_dirstate_node_or_is_ignored = true;
487 true
474 488 }
475 Right(fs_entry) => {
476 has_dirstate_node_or_is_ignored = self.traverse_fs_only(
477 has_ignored_ancestor.force(&self.ignore_fn),
478 directory_hg_path,
479 fs_entry,
480 )
481 }
482 }
489 Right(fs_entry) => self.traverse_fs_only(
490 has_ignored_ancestor.force(&self.ignore_fn),
491 directory_hg_path,
492 fs_entry,
493 ),
494 };
483 495 Ok(has_dirstate_node_or_is_ignored)
484 496 })
485 497 .try_reduce(|| true, |a, b| Ok(a && b))
@@ -487,23 +499,21 impl<'a, 'tree, 'on_disk> StatusCommon<'
487 499
488 500 fn traverse_fs_and_dirstate<'ancestor>(
489 501 &self,
490 fs_path: &Path,
491 fs_metadata: &std::fs::Metadata,
502 fs_entry: &DirEntry,
492 503 dirstate_node: NodeRef<'tree, 'on_disk>,
493 504 has_ignored_ancestor: &'ancestor HasIgnoredAncestor<'ancestor>,
494 505 ) -> Result<(), DirstateV2ParseError> {
495 506 let outdated_dircache =
496 507 self.check_for_outdated_directory_cache(&dirstate_node)?;
497 508 let hg_path = &dirstate_node.full_path_borrowed(self.dmap.on_disk)?;
498 let file_type = fs_metadata.file_type();
499 let file_or_symlink = file_type.is_file() || file_type.is_symlink();
509 let file_or_symlink = fs_entry.is_file() || fs_entry.is_symlink();
500 510 if !file_or_symlink {
501 511 // If we previously had a file here, it was removed (with
502 512 // `hg rm` or similar) or deleted before it could be
503 513 // replaced by a directory or something else.
504 514 self.mark_removed_or_deleted_if_file(&dirstate_node)?;
505 515 }
506 if file_type.is_dir() {
516 if fs_entry.is_dir() {
507 517 if self.options.collect_traversed_dirs {
508 518 self.outcome
509 519 .lock()
@@ -512,7 +522,7 impl<'a, 'tree, 'on_disk> StatusCommon<'
512 522 .push(hg_path.detach_from_tree())
513 523 }
514 524 let is_ignored = HasIgnoredAncestor::create(
515 Some(&has_ignored_ancestor),
525 Some(has_ignored_ancestor),
516 526 hg_path,
517 527 );
518 528 let is_at_repo_root = false;
@@ -521,26 +531,25 impl<'a, 'tree, 'on_disk> StatusCommon<'
521 531 &is_ignored,
522 532 dirstate_node.children(self.dmap.on_disk)?,
523 533 hg_path,
524 fs_path,
525 Some(fs_metadata),
534 fs_entry,
526 535 dirstate_node.cached_directory_mtime()?,
527 536 is_at_repo_root,
528 537 )?;
529 538 self.maybe_save_directory_mtime(
530 539 children_all_have_dirstate_node_or_are_ignored,
531 fs_metadata,
540 fs_entry,
532 541 dirstate_node,
533 542 outdated_dircache,
534 543 )?
535 544 } else {
536 if file_or_symlink && self.matcher.matches(&hg_path) {
545 if file_or_symlink && self.matcher.matches(hg_path) {
537 546 if let Some(entry) = dirstate_node.entry()? {
538 547 if !entry.any_tracked() {
539 548 // Forward-compat if we start tracking unknown/ignored
540 549 // files for caching reasons
541 550 self.mark_unknown_or_ignored(
542 551 has_ignored_ancestor.force(&self.ignore_fn),
543 &hg_path,
552 hg_path,
544 553 );
545 554 }
546 555 if entry.added() {
@@ -550,7 +559,7 impl<'a, 'tree, 'on_disk> StatusCommon<'
550 559 } else if entry.modified() {
551 560 self.push_outcome(Outcome::Modified, &dirstate_node)?;
552 561 } else {
553 self.handle_normal_file(&dirstate_node, fs_metadata)?;
562 self.handle_normal_file(&dirstate_node, fs_entry)?;
554 563 }
555 564 } else {
556 565 // `node.entry.is_none()` indicates a "directory"
@@ -578,7 +587,7 impl<'a, 'tree, 'on_disk> StatusCommon<'
578 587 fn maybe_save_directory_mtime(
579 588 &self,
580 589 children_all_have_dirstate_node_or_are_ignored: bool,
581 directory_metadata: &std::fs::Metadata,
590 directory_entry: &DirEntry,
582 591 dirstate_node: NodeRef<'tree, 'on_disk>,
583 592 outdated_directory_cache: bool,
584 593 ) -> Result<(), DirstateV2ParseError> {
@@ -605,14 +614,17 impl<'a, 'tree, 'on_disk> StatusCommon<'
605 614 // resolution based on the filesystem (for example ext3
606 615 // only stores integer seconds), kernel (see
607 616 // https://stackoverflow.com/a/14393315/1162888), etc.
608 let directory_mtime = if let Ok(option) =
609 TruncatedTimestamp::for_reliable_mtime_of(
610 directory_metadata,
611 status_start,
612 ) {
613 if let Some(directory_mtime) = option {
614 directory_mtime
615 } else {
617 let metadata = match directory_entry.symlink_metadata() {
618 Ok(meta) => meta,
619 Err(_) => return Ok(()),
620 };
621
622 let directory_mtime = match TruncatedTimestamp::for_reliable_mtime_of(
623 &metadata,
624 status_start,
625 ) {
626 Ok(Some(directory_mtime)) => directory_mtime,
627 Ok(None) => {
616 628 // The directory was modified too recently,
617 629 // don’t cache its `read_dir` results.
618 630 //
@@ -630,9 +642,10 impl<'a, 'tree, 'on_disk> StatusCommon<'
630 642 // by the same script.
631 643 return Ok(());
632 644 }
633 } else {
634 // OS/libc does not support mtime?
635 return Ok(());
645 Err(_) => {
646 // OS/libc does not support mtime?
647 return Ok(());
648 }
636 649 };
637 650 // We’ve observed (through `status_start`) that time has
638 651 // “progressed” since `directory_mtime`, so any further
@@ -671,18 +684,23 impl<'a, 'tree, 'on_disk> StatusCommon<'
671 684 fn handle_normal_file(
672 685 &self,
673 686 dirstate_node: &NodeRef<'tree, 'on_disk>,
674 fs_metadata: &std::fs::Metadata,
687 fs_entry: &DirEntry,
675 688 ) -> Result<(), DirstateV2ParseError> {
676 689 // Keep the low 31 bits
677 690 fn truncate_u64(value: u64) -> i32 {
678 691 (value & 0x7FFF_FFFF) as i32
679 692 }
680 693
694 let fs_metadata = match fs_entry.symlink_metadata() {
695 Ok(meta) => meta,
696 Err(_) => return Ok(()),
697 };
698
681 699 let entry = dirstate_node
682 700 .entry()?
683 701 .expect("handle_normal_file called with entry-less node");
684 702 let mode_changed =
685 || self.options.check_exec && entry.mode_changed(fs_metadata);
703 || self.options.check_exec && entry.mode_changed(&fs_metadata);
686 704 let size = entry.size();
687 705 let size_changed = size != truncate_u64(fs_metadata.len());
688 706 if size >= 0 && size_changed && fs_metadata.file_type().is_symlink() {
@@ -695,19 +713,20 impl<'a, 'tree, 'on_disk> StatusCommon<'
695 713 {
696 714 self.push_outcome(Outcome::Modified, dirstate_node)?
697 715 } else {
698 let mtime_looks_clean;
699 if let Some(dirstate_mtime) = entry.truncated_mtime() {
700 let fs_mtime = TruncatedTimestamp::for_mtime_of(fs_metadata)
716 let mtime_looks_clean = if let Some(dirstate_mtime) =
717 entry.truncated_mtime()
718 {
719 let fs_mtime = TruncatedTimestamp::for_mtime_of(&fs_metadata)
701 720 .expect("OS/libc does not support mtime?");
702 721 // There might be a change in the future if for example the
703 722 // internal clock become off while process run, but this is a
704 723 // case where the issues the user would face
705 724 // would be a lot worse and there is nothing we
706 725 // can really do.
707 mtime_looks_clean = fs_mtime.likely_equal(dirstate_mtime)
726 fs_mtime.likely_equal(dirstate_mtime)
708 727 } else {
709 728 // No mtime in the dirstate entry
710 mtime_looks_clean = false
729 false
711 730 };
712 731 if !mtime_looks_clean {
713 732 self.push_outcome(Outcome::Unsure, dirstate_node)?
@@ -751,7 +770,7 impl<'a, 'tree, 'on_disk> StatusCommon<'
751 770 if entry.removed() {
752 771 self.push_outcome(Outcome::Removed, dirstate_node)?
753 772 } else {
754 self.push_outcome(Outcome::Deleted, &dirstate_node)?
773 self.push_outcome(Outcome::Deleted, dirstate_node)?
755 774 }
756 775 }
757 776 }
@@ -767,10 +786,9 impl<'a, 'tree, 'on_disk> StatusCommon<'
767 786 directory_hg_path: &HgPath,
768 787 fs_entry: &DirEntry,
769 788 ) -> bool {
770 let hg_path = directory_hg_path.join(&fs_entry.base_name);
771 let file_type = fs_entry.metadata.file_type();
772 let file_or_symlink = file_type.is_file() || file_type.is_symlink();
773 if file_type.is_dir() {
789 let hg_path = directory_hg_path.join(&fs_entry.hg_path);
790 let file_or_symlink = fs_entry.is_file() || fs_entry.is_symlink();
791 if fs_entry.is_dir() {
774 792 let is_ignored =
775 793 has_ignored_ancestor || (self.ignore_fn)(&hg_path);
776 794 let traverse_children = if is_ignored {
@@ -783,11 +801,9 impl<'a, 'tree, 'on_disk> StatusCommon<'
783 801 };
784 802 if traverse_children {
785 803 let is_at_repo_root = false;
786 if let Ok(children_fs_entries) = self.read_dir(
787 &hg_path,
788 &fs_entry.full_path,
789 is_at_repo_root,
790 ) {
804 if let Ok(children_fs_entries) =
805 self.read_dir(&hg_path, &fs_entry.fs_path, is_at_repo_root)
806 {
791 807 children_fs_entries.par_iter().for_each(|child_fs_entry| {
792 808 self.traverse_fs_only(
793 809 is_ignored,
@@ -801,26 +817,24 impl<'a, 'tree, 'on_disk> StatusCommon<'
801 817 }
802 818 }
803 819 is_ignored
820 } else if file_or_symlink {
821 if self.matcher.matches(&hg_path) {
822 self.mark_unknown_or_ignored(
823 has_ignored_ancestor,
824 &BorrowedPath::InMemory(&hg_path),
825 )
826 } else {
827 // We haven’t computed whether this path is ignored. It
828 // might not be, and a future run of status might have a
829 // different matcher that matches it. So treat it as not
830 // ignored. That is, inhibit readdir caching of the parent
831 // directory.
832 false
833 }
804 834 } else {
805 if file_or_symlink {
806 if self.matcher.matches(&hg_path) {
807 self.mark_unknown_or_ignored(
808 has_ignored_ancestor,
809 &BorrowedPath::InMemory(&hg_path),
810 )
811 } else {
812 // We haven’t computed whether this path is ignored. It
813 // might not be, and a future run of status might have a
814 // different matcher that matches it. So treat it as not
815 // ignored. That is, inhibit readdir caching of the parent
816 // directory.
817 false
818 }
819 } else {
820 // This is neither a directory, a plain file, or a symlink.
821 // Treat it like an ignored file.
822 true
823 }
835 // This is neither a directory, a plain file, or a symlink.
836 // Treat it like an ignored file.
837 true
824 838 }
825 839 }
826 840
@@ -830,7 +844,7 impl<'a, 'tree, 'on_disk> StatusCommon<'
830 844 has_ignored_ancestor: bool,
831 845 hg_path: &BorrowedPath<'_, 'on_disk>,
832 846 ) -> bool {
833 let is_ignored = has_ignored_ancestor || (self.ignore_fn)(&hg_path);
847 let is_ignored = has_ignored_ancestor || (self.ignore_fn)(hg_path);
834 848 if is_ignored {
835 849 if self.options.list_ignored {
836 850 self.push_outcome_without_copy_source(
@@ -838,27 +852,53 impl<'a, 'tree, 'on_disk> StatusCommon<'
838 852 hg_path,
839 853 )
840 854 }
841 } else {
842 if self.options.list_unknown {
843 self.push_outcome_without_copy_source(
844 Outcome::Unknown,
845 hg_path,
846 )
847 }
855 } else if self.options.list_unknown {
856 self.push_outcome_without_copy_source(Outcome::Unknown, hg_path)
848 857 }
849 858 is_ignored
850 859 }
851 860 }
852 861
853 struct DirEntry {
854 base_name: HgPathBuf,
855 full_path: PathBuf,
856 metadata: std::fs::Metadata,
862 /// Since [`std::fs::FileType`] cannot be built directly, we emulate what we
863 /// care about.
864 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
865 enum FakeFileType {
866 File,
867 Directory,
868 Symlink,
857 869 }
858 870
859 impl DirEntry {
860 /// Returns **unsorted** entries in the given directory, with name and
861 /// metadata.
871 impl TryFrom<std::fs::FileType> for FakeFileType {
872 type Error = ();
873
874 fn try_from(f: std::fs::FileType) -> Result<Self, Self::Error> {
875 if f.is_dir() {
876 Ok(Self::Directory)
877 } else if f.is_file() {
878 Ok(Self::File)
879 } else if f.is_symlink() {
880 Ok(Self::Symlink)
881 } else {
882 // Things like FIFO etc.
883 Err(())
884 }
885 }
886 }
887
888 struct DirEntry<'a> {
889 /// Path as stored in the dirstate, or just the filename for optimization.
890 hg_path: HgPathCow<'a>,
891 /// Filesystem path
892 fs_path: Cow<'a, Path>,
893 /// Lazily computed
894 symlink_metadata: Option<std::fs::Metadata>,
895 /// Already computed for ergonomics.
896 file_type: FakeFileType,
897 }
898
899 impl<'a> DirEntry<'a> {
900 /// Returns **unsorted** entries in the given directory, with name,
901 /// metadata and file type.
862 902 ///
863 903 /// If a `.hg` sub-directory is encountered:
864 904 ///
@@ -872,7 +912,7 impl DirEntry {
872 912 let mut results = Vec::new();
873 913 for entry in read_dir_path.read_dir()? {
874 914 let entry = entry?;
875 let metadata = match entry.metadata() {
915 let file_type = match entry.file_type() {
876 916 Ok(v) => v,
877 917 Err(e) => {
878 918 // race with file deletion?
@@ -889,7 +929,7 impl DirEntry {
889 929 if is_at_repo_root {
890 930 // Skip the repo’s own .hg (might be a symlink)
891 931 continue;
892 } else if metadata.is_dir() {
932 } else if file_type.is_dir() {
893 933 // A .hg sub-directory at another location means a subrepo,
894 934 // skip it entirely.
895 935 return Ok(Vec::new());
@@ -900,15 +940,40 impl DirEntry {
900 940 } else {
901 941 entry.path()
902 942 };
903 let base_name = get_bytes_from_os_string(file_name).into();
943 let filename =
944 Cow::Owned(get_bytes_from_os_string(file_name).into());
945 let file_type = match FakeFileType::try_from(file_type) {
946 Ok(file_type) => file_type,
947 Err(_) => continue,
948 };
904 949 results.push(DirEntry {
905 base_name,
906 full_path,
907 metadata,
950 hg_path: filename,
951 fs_path: Cow::Owned(full_path.to_path_buf()),
952 symlink_metadata: None,
953 file_type,
908 954 })
909 955 }
910 956 Ok(results)
911 957 }
958
959 fn symlink_metadata(&self) -> Result<std::fs::Metadata, std::io::Error> {
960 match &self.symlink_metadata {
961 Some(meta) => Ok(meta.clone()),
962 None => std::fs::symlink_metadata(&self.fs_path),
963 }
964 }
965
966 fn is_dir(&self) -> bool {
967 self.file_type == FakeFileType::Directory
968 }
969
970 fn is_file(&self) -> bool {
971 self.file_type == FakeFileType::File
972 }
973
974 fn is_symlink(&self) -> bool {
975 self.file_type == FakeFileType::Symlink
976 }
912 977 }
913 978
914 979 /// Return the `mtime` of a temporary file newly-created in the `.hg` directory
@@ -194,7 +194,7 impl<G: Graph + Clone> PartialDiscovery<
194 194 size: usize,
195 195 ) -> Vec<Revision> {
196 196 if !self.randomize {
197 sample.sort();
197 sample.sort_unstable();
198 198 sample.truncate(size);
199 199 return sample;
200 200 }
@@ -513,14 +513,14 mod tests {
513 513 ) -> Vec<Revision> {
514 514 let mut as_vec: Vec<Revision> =
515 515 disco.undecided.as_ref().unwrap().iter().cloned().collect();
516 as_vec.sort();
516 as_vec.sort_unstable();
517 517 as_vec
518 518 }
519 519
520 520 fn sorted_missing(disco: &PartialDiscovery<SampleGraph>) -> Vec<Revision> {
521 521 let mut as_vec: Vec<Revision> =
522 522 disco.missing.iter().cloned().collect();
523 as_vec.sort();
523 as_vec.sort_unstable();
524 524 as_vec
525 525 }
526 526
@@ -529,7 +529,7 mod tests {
529 529 ) -> Result<Vec<Revision>, GraphError> {
530 530 let mut as_vec: Vec<Revision> =
531 531 disco.common_heads()?.iter().cloned().collect();
532 as_vec.sort();
532 as_vec.sort_unstable();
533 533 Ok(as_vec)
534 534 }
535 535
@@ -621,7 +621,7 mod tests {
621 621 disco.undecided = Some((1..=13).collect());
622 622
623 623 let mut sample_vec = disco.take_quick_sample(vec![], 4)?;
624 sample_vec.sort();
624 sample_vec.sort_unstable();
625 625 assert_eq!(sample_vec, vec![10, 11, 12, 13]);
626 626 Ok(())
627 627 }
@@ -632,7 +632,7 mod tests {
632 632 disco.ensure_undecided()?;
633 633
634 634 let mut sample_vec = disco.take_quick_sample(vec![12], 4)?;
635 sample_vec.sort();
635 sample_vec.sort_unstable();
636 636 // r12's only parent is r9, whose unique grand-parent through the
637 637 // diamond shape is r4. This ends there because the distance from r4
638 638 // to the root is only 3.
@@ -650,11 +650,11 mod tests {
650 650 assert_eq!(cache.get(&10).cloned(), None);
651 651
652 652 let mut children_4 = cache.get(&4).cloned().unwrap();
653 children_4.sort();
653 children_4.sort_unstable();
654 654 assert_eq!(children_4, vec![5, 6, 7]);
655 655
656 656 let mut children_7 = cache.get(&7).cloned().unwrap();
657 children_7.sort();
657 children_7.sort_unstable();
658 658 assert_eq!(children_7, vec![9, 11]);
659 659
660 660 Ok(())
@@ -684,7 +684,7 mod tests {
684 684 let (sample_set, size) = disco.bidirectional_sample(7)?;
685 685 assert_eq!(size, 7);
686 686 let mut sample: Vec<Revision> = sample_set.into_iter().collect();
687 sample.sort();
687 sample.sort_unstable();
688 688 // our DAG is a bit too small for the results to be really interesting
689 689 // at least it shows that
690 690 // - we went both ways
@@ -313,7 +313,7 pub fn build_single_regex(
313 313 PatternSyntax::RootGlob
314 314 | PatternSyntax::Path
315 315 | PatternSyntax::RelGlob
316 | PatternSyntax::RootFiles => normalize_path_bytes(&pattern),
316 | PatternSyntax::RootFiles => normalize_path_bytes(pattern),
317 317 PatternSyntax::Include | PatternSyntax::SubInclude => {
318 318 return Err(PatternError::NonRegexPattern(entry.clone()))
319 319 }
@@ -368,7 +368,7 pub fn parse_pattern_file_contents(
368 368 let mut warnings: Vec<PatternFileWarning> = vec![];
369 369
370 370 let mut current_syntax =
371 default_syntax_override.unwrap_or(b"relre:".as_ref());
371 default_syntax_override.unwrap_or_else(|| b"relre:".as_ref());
372 372
373 373 for (line_number, mut line) in lines.split(|c| *c == b'\n').enumerate() {
374 374 let line_number = line_number + 1;
@@ -402,7 +402,7 pub fn parse_pattern_file_contents(
402 402 continue;
403 403 }
404 404
405 let mut line_syntax: &[u8] = &current_syntax;
405 let mut line_syntax: &[u8] = current_syntax;
406 406
407 407 for (s, rels) in SYNTAXES.iter() {
408 408 if let Some(rest) = line.drop_prefix(rels) {
@@ -418,7 +418,7 pub fn parse_pattern_file_contents(
418 418 }
419 419
420 420 inputs.push(IgnorePattern::new(
421 parse_pattern_syntax(&line_syntax).map_err(|e| match e {
421 parse_pattern_syntax(line_syntax).map_err(|e| match e {
422 422 PatternError::UnsupportedSyntax(syntax) => {
423 423 PatternError::UnsupportedSyntaxInFile(
424 424 syntax,
@@ -428,7 +428,7 pub fn parse_pattern_file_contents(
428 428 }
429 429 _ => e,
430 430 })?,
431 &line,
431 line,
432 432 file_path,
433 433 ));
434 434 }
@@ -502,7 +502,7 pub fn get_patterns_from_file(
502 502 }
503 503 PatternSyntax::SubInclude => {
504 504 let mut sub_include = SubInclude::new(
505 &root_dir,
505 root_dir,
506 506 &entry.pattern,
507 507 &entry.source,
508 508 )?;
@@ -564,11 +564,11 impl SubInclude {
564 564 let prefix = canonical_path(root_dir, root_dir, new_root)?;
565 565
566 566 Ok(Self {
567 prefix: path_to_hg_path_buf(prefix).and_then(|mut p| {
567 prefix: path_to_hg_path_buf(prefix).map(|mut p| {
568 568 if !p.is_empty() {
569 569 p.push_byte(b'/');
570 570 }
571 Ok(p)
571 p
572 572 })?,
573 573 path: path.to_owned(),
574 574 root: new_root.to_owned(),
@@ -581,14 +581,14 impl SubInclude {
581 581 /// phase.
582 582 pub fn filter_subincludes(
583 583 ignore_patterns: Vec<IgnorePattern>,
584 ) -> Result<(Vec<Box<SubInclude>>, Vec<IgnorePattern>), HgPathError> {
584 ) -> Result<(Vec<SubInclude>, Vec<IgnorePattern>), HgPathError> {
585 585 let mut subincludes = vec![];
586 586 let mut others = vec![];
587 587
588 588 for pattern in ignore_patterns {
589 589 if let PatternSyntax::ExpandedSubInclude(sub_include) = pattern.syntax
590 590 {
591 subincludes.push(sub_include);
591 subincludes.push(*sub_include);
592 592 } else {
593 593 others.push(pattern)
594 594 }
@@ -30,6 +30,7 pub mod matchers;
30 30 pub mod repo;
31 31 pub mod revlog;
32 32 pub use revlog::*;
33 pub mod checkexec;
33 34 pub mod config;
34 35 pub mod lock;
35 36 pub mod logging;
@@ -47,10 +48,6 use std::collections::HashMap;
47 48 use std::fmt;
48 49 use twox_hash::RandomXxHashBuilder64;
49 50
50 /// This is a contract between the `micro-timer` crate and us, to expose
51 /// the `log` crate as `crate::log`.
52 use log;
53
54 51 pub type LineNumber = usize;
55 52
56 53 /// Rust's default hasher is too slow because it tries to prevent collision
@@ -2,7 +2,6
2 2
3 3 use crate::errors::HgError;
4 4 use crate::errors::HgResultExt;
5 use crate::utils::StrExt;
6 5 use crate::vfs::Vfs;
7 6 use std::io;
8 7 use std::io::ErrorKind;
@@ -107,8 +106,8 fn unlock(hg_vfs: Vfs, lock_filename: &s
107 106 /// running anymore.
108 107 fn lock_should_be_broken(data: &Option<String>) -> bool {
109 108 (|| -> Option<bool> {
110 let (prefix, pid) = data.as_ref()?.split_2(':')?;
111 if prefix != &*LOCK_PREFIX {
109 let (prefix, pid) = data.as_ref()?.split_once(':')?;
110 if prefix != *LOCK_PREFIX {
112 111 return Some(false);
113 112 }
114 113 let process_is_running;
@@ -145,6 +144,8 lazy_static::lazy_static! {
145 144
146 145 /// Same as https://github.com/python/cpython/blob/v3.10.0/Modules/socketmodule.c#L5414
147 146 const BUFFER_SIZE: usize = 1024;
147 // This cast is *needed* for platforms with signed chars
148 #[allow(clippy::unnecessary_cast)]
148 149 let mut buffer = [0 as libc::c_char; BUFFER_SIZE];
149 150 let hostname_bytes = unsafe {
150 151 let result = libc::gethostname(buffer.as_mut_ptr(), BUFFER_SIZE);
@@ -27,12 +27,9 use crate::filepatterns::normalize_path_
27 27 use std::borrow::ToOwned;
28 28 use std::collections::HashSet;
29 29 use std::fmt::{Display, Error, Formatter};
30 use std::iter::FromIterator;
31 30 use std::ops::Deref;
32 31 use std::path::{Path, PathBuf};
33 32
34 use micro_timer::timed;
35
36 33 #[derive(Debug, PartialEq)]
37 34 pub enum VisitChildrenSet {
38 35 /// Don't visit anything
@@ -305,11 +302,11 impl<'a> Matcher for IncludeMatcher<'a>
305 302 }
306 303
307 304 fn matches(&self, filename: &HgPath) -> bool {
308 (self.match_fn)(filename.as_ref())
305 (self.match_fn)(filename)
309 306 }
310 307
311 308 fn visit_children_set(&self, directory: &HgPath) -> VisitChildrenSet {
312 let dir = directory.as_ref();
309 let dir = directory;
313 310 if self.prefix && self.roots.contains(dir) {
314 311 return VisitChildrenSet::Recursive;
315 312 }
@@ -321,11 +318,11 impl<'a> Matcher for IncludeMatcher<'a>
321 318 return VisitChildrenSet::This;
322 319 }
323 320
324 if self.parents.contains(directory.as_ref()) {
321 if self.parents.contains(dir.as_ref()) {
325 322 let multiset = self.get_all_parents_children();
326 323 if let Some(children) = multiset.get(dir) {
327 324 return VisitChildrenSet::Set(
328 children.into_iter().map(HgPathBuf::from).collect(),
325 children.iter().map(HgPathBuf::from).collect(),
329 326 );
330 327 }
331 328 }
@@ -449,7 +446,7 impl Matcher for IntersectionMatcher {
449 446 VisitChildrenSet::This
450 447 }
451 448 (VisitChildrenSet::Set(m1), VisitChildrenSet::Set(m2)) => {
452 let set: HashSet<_> = m1.intersection(&m2).cloned().collect();
449 let set: HashSet<_> = m1.intersection(m2).cloned().collect();
453 450 if set.is_empty() {
454 451 VisitChildrenSet::Empty
455 452 } else {
@@ -612,7 +609,7 impl RegexMatcher {
612 609 /// This can fail when the pattern is invalid or not supported by the
613 610 /// underlying engine (the `regex` crate), for instance anything with
614 611 /// back-references.
615 #[timed]
612 #[logging_timer::time("trace")]
616 613 fn re_matcher(pattern: &[u8]) -> PatternResult<RegexMatcher> {
617 614 use std::io::Write;
618 615
@@ -702,10 +699,9 fn roots_and_dirs(
702 699 PatternSyntax::RootGlob | PatternSyntax::Glob => {
703 700 let mut root = HgPathBuf::new();
704 701 for p in pattern.split(|c| *c == b'/') {
705 if p.iter().any(|c| match *c {
706 b'[' | b'{' | b'*' | b'?' => true,
707 _ => false,
708 }) {
702 if p.iter()
703 .any(|c| matches!(*c, b'[' | b'{' | b'*' | b'?'))
704 {
709 705 break;
710 706 }
711 707 root.push(HgPathBuf::from_bytes(p).as_ref());
@@ -783,10 +779,10 fn roots_dirs_and_parents(
783 779
784 780 /// Returns a function that checks whether a given file (in the general sense)
785 781 /// should be matched.
786 fn build_match<'a, 'b>(
782 fn build_match<'a>(
787 783 ignore_patterns: Vec<IgnorePattern>,
788 ) -> PatternResult<(Vec<u8>, IgnoreFnType<'b>)> {
789 let mut match_funcs: Vec<IgnoreFnType<'b>> = vec![];
784 ) -> PatternResult<(Vec<u8>, IgnoreFnType<'a>)> {
785 let mut match_funcs: Vec<IgnoreFnType<'a>> = vec![];
790 786 // For debugging and printing
791 787 let mut patterns = vec![];
792 788
@@ -924,9 +920,8 impl<'a> IncludeMatcher<'a> {
924 920 dirs,
925 921 parents,
926 922 } = roots_dirs_and_parents(&ignore_patterns)?;
927 let prefix = ignore_patterns.iter().all(|k| match k.syntax {
928 PatternSyntax::Path | PatternSyntax::RelPath => true,
929 _ => false,
923 let prefix = ignore_patterns.iter().all(|k| {
924 matches!(k.syntax, PatternSyntax::Path | PatternSyntax::RelPath)
930 925 });
931 926 let (patterns, match_fn) = build_match(ignore_patterns)?;
932 927
@@ -37,12 +37,14 pub fn matcher(
37 37 }
38 38 // Treat "narrowspec does not exist" the same as "narrowspec file exists
39 39 // and is empty".
40 let store_spec = repo.store_vfs().try_read(FILENAME)?.unwrap_or(vec![]);
41 let working_copy_spec =
42 repo.hg_vfs().try_read(DIRSTATE_FILENAME)?.unwrap_or(vec![]);
40 let store_spec = repo.store_vfs().try_read(FILENAME)?.unwrap_or_default();
41 let working_copy_spec = repo
42 .hg_vfs()
43 .try_read(DIRSTATE_FILENAME)?
44 .unwrap_or_default();
43 45 if store_spec != working_copy_spec {
44 46 return Err(HgError::abort(
45 "working copy's narrowspec is stale",
47 "abort: working copy's narrowspec is stale",
46 48 exit_codes::STATE_ERROR,
47 49 Some("run 'hg tracked --update-working-copy'".into()),
48 50 )
@@ -6,8 +6,8
6 6 // GNU General Public License version 2 or any later version.
7 7
8 8 use crate::repo::Repo;
9 use crate::revlog::revlog::RevlogError;
10 9 use crate::revlog::Node;
10 use crate::revlog::RevlogError;
11 11
12 12 use crate::utils::hg_path::HgPath;
13 13
@@ -53,10 +53,13 fn find_item<'a>(
53 53 }
54 54 }
55 55
56 // Tuple of (missing, found) paths in the manifest
57 type ManifestQueryResponse<'a> = (Vec<(&'a HgPath, Node)>, Vec<&'a HgPath>);
58
56 59 fn find_files_in_manifest<'query>(
57 60 manifest: &Manifest,
58 61 query: impl Iterator<Item = &'query HgPath>,
59 ) -> Result<(Vec<(&'query HgPath, Node)>, Vec<&'query HgPath>), HgError> {
62 ) -> Result<ManifestQueryResponse<'query>, HgError> {
60 63 let mut manifest = put_back(manifest.iter());
61 64 let mut res = vec![];
62 65 let mut missing = vec![];
@@ -67,7 +70,7 fn find_files_in_manifest<'query>(
67 70 Some(item) => res.push((file, item)),
68 71 }
69 72 }
70 return Ok((res, missing));
73 Ok((res, missing))
71 74 }
72 75
73 76 /// Output the given revision of files
@@ -91,10 +94,8 pub fn cat<'a>(
91 94
92 95 files.sort_unstable();
93 96
94 let (found, missing) = find_files_in_manifest(
95 &manifest,
96 files.into_iter().map(|f| f.as_ref()),
97 )?;
97 let (found, missing) =
98 find_files_in_manifest(&manifest, files.into_iter())?;
98 99
99 100 for (file_path, file_node) in found {
100 101 found_any = true;
@@ -7,7 +7,7
7 7
8 8 use crate::repo::Repo;
9 9 use crate::requirements;
10 use crate::revlog::revlog::{Revlog, RevlogError};
10 use crate::revlog::{Revlog, RevlogError};
11 11
12 12 /// Kind of data to debug
13 13 #[derive(Debug, Copy, Clone)]
@@ -5,78 +5,41
5 5 // This software may be used and distributed according to the terms of the
6 6 // GNU General Public License version 2 or any later version.
7 7
8 use crate::dirstate::parsers::parse_dirstate_entries;
9 use crate::dirstate_tree::on_disk::{for_each_tracked_path, read_docket};
10 8 use crate::errors::HgError;
9 use crate::matchers::Matcher;
11 10 use crate::repo::Repo;
12 11 use crate::revlog::manifest::Manifest;
13 use crate::revlog::revlog::RevlogError;
12 use crate::revlog::RevlogError;
13 use crate::utils::filter_map_results;
14 14 use crate::utils::hg_path::HgPath;
15 use crate::DirstateError;
16 use rayon::prelude::*;
17
18 /// List files under Mercurial control in the working directory
19 /// by reading the dirstate
20 pub struct Dirstate {
21 /// The `dirstate` content.
22 content: Vec<u8>,
23 v2_metadata: Option<Vec<u8>>,
24 }
25
26 impl Dirstate {
27 pub fn new(repo: &Repo) -> Result<Self, HgError> {
28 let mut content = repo.hg_vfs().read("dirstate")?;
29 let v2_metadata = if repo.has_dirstate_v2() {
30 let docket = read_docket(&content)?;
31 let meta = docket.tree_metadata().to_vec();
32 content = repo.hg_vfs().read(docket.data_filename())?;
33 Some(meta)
34 } else {
35 None
36 };
37 Ok(Self {
38 content,
39 v2_metadata,
40 })
41 }
42
43 pub fn tracked_files(&self) -> Result<Vec<&HgPath>, DirstateError> {
44 let mut files = Vec::new();
45 if !self.content.is_empty() {
46 if let Some(meta) = &self.v2_metadata {
47 for_each_tracked_path(&self.content, meta, |path| {
48 files.push(path)
49 })?
50 } else {
51 let _parents = parse_dirstate_entries(
52 &self.content,
53 |path, entry, _copy_source| {
54 if entry.tracked() {
55 files.push(path)
56 }
57 Ok(())
58 },
59 )?;
60 }
61 }
62 files.par_sort_unstable();
63 Ok(files)
64 }
65 }
66 15
67 16 /// List files under Mercurial control at a given revision.
68 17 pub fn list_rev_tracked_files(
69 18 repo: &Repo,
70 19 revset: &str,
20 narrow_matcher: Box<dyn Matcher>,
71 21 ) -> Result<FilesForRev, RevlogError> {
72 22 let rev = crate::revset::resolve_single(revset, repo)?;
73 Ok(FilesForRev(repo.manifest_for_rev(rev)?))
23 Ok(FilesForRev {
24 manifest: repo.manifest_for_rev(rev)?,
25 narrow_matcher,
26 })
74 27 }
75 28
76 pub struct FilesForRev(Manifest);
29 pub struct FilesForRev {
30 manifest: Manifest,
31 narrow_matcher: Box<dyn Matcher>,
32 }
77 33
78 34 impl FilesForRev {
79 35 pub fn iter(&self) -> impl Iterator<Item = Result<&HgPath, HgError>> {
80 self.0.iter().map(|entry| Ok(entry?.path))
36 filter_map_results(self.manifest.iter(), |entry| {
37 let path = entry.path;
38 Ok(if self.narrow_matcher.matches(path) {
39 Some(path)
40 } else {
41 None
42 })
43 })
81 44 }
82 45 }
@@ -7,5 +7,4 mod debugdata;
7 7 mod list_tracked_files;
8 8 pub use cat::{cat, CatOutput};
9 9 pub use debugdata::{debug_data, DebugDataKind};
10 pub use list_tracked_files::Dirstate;
11 10 pub use list_tracked_files::{list_rev_tracked_files, FilesForRev};
@@ -8,7 +8,7 use crate::errors::{HgError, IoResultExt
8 8 use crate::lock::{try_with_lock_no_wait, LockError};
9 9 use crate::manifest::{Manifest, Manifestlog};
10 10 use crate::revlog::filelog::Filelog;
11 use crate::revlog::revlog::RevlogError;
11 use crate::revlog::RevlogError;
12 12 use crate::utils::files::get_path_from_bytes;
13 13 use crate::utils::hg_path::HgPath;
14 14 use crate::utils::SliceExt;
@@ -68,9 +68,9 impl Repo {
68 68 return Ok(ancestor.to_path_buf());
69 69 }
70 70 }
71 return Err(RepoError::NotFound {
71 Err(RepoError::NotFound {
72 72 at: current_directory,
73 });
73 })
74 74 }
75 75
76 76 /// Find a repository, either at the given path (which must contain a `.hg`
@@ -87,13 +87,11 impl Repo {
87 87 ) -> Result<Self, RepoError> {
88 88 if let Some(root) = explicit_path {
89 89 if is_dir(root.join(".hg"))? {
90 Self::new_at_path(root.to_owned(), config)
90 Self::new_at_path(root, config)
91 91 } else if is_file(&root)? {
92 92 Err(HgError::unsupported("bundle repository").into())
93 93 } else {
94 Err(RepoError::NotFound {
95 at: root.to_owned(),
96 })
94 Err(RepoError::NotFound { at: root })
97 95 }
98 96 } else {
99 97 let root = Self::find_repo_root()?;
@@ -108,9 +106,8 impl Repo {
108 106 ) -> Result<Self, RepoError> {
109 107 let dot_hg = working_directory.join(".hg");
110 108
111 let mut repo_config_files = Vec::new();
112 repo_config_files.push(dot_hg.join("hgrc"));
113 repo_config_files.push(dot_hg.join("hgrc-not-shared"));
109 let mut repo_config_files =
110 vec![dot_hg.join("hgrc"), dot_hg.join("hgrc-not-shared")];
114 111
115 112 let hg_vfs = Vfs { base: &dot_hg };
116 113 let mut reqs = requirements::load_if_exists(hg_vfs)?;
@@ -254,7 +251,7 impl Repo {
254 251 .hg_vfs()
255 252 .read("dirstate")
256 253 .io_not_found_as_none()?
257 .unwrap_or(Vec::new()))
254 .unwrap_or_default())
258 255 }
259 256
260 257 pub fn dirstate_parents(&self) -> Result<DirstateParents, HgError> {
@@ -277,8 +274,7 impl Repo {
277 274 .set(Some(docket.uuid.to_owned()));
278 275 docket.parents()
279 276 } else {
280 crate::dirstate::parsers::parse_dirstate_parents(&dirstate)?
281 .clone()
277 *crate::dirstate::parsers::parse_dirstate_parents(&dirstate)?
282 278 };
283 279 self.dirstate_parents.set(parents);
284 280 Ok(parents)
@@ -1,7 +1,7
1 1 use crate::errors::HgError;
2 use crate::revlog::revlog::{Revlog, RevlogEntry, RevlogError};
3 2 use crate::revlog::Revision;
4 3 use crate::revlog::{Node, NodePrefix};
4 use crate::revlog::{Revlog, RevlogEntry, RevlogError};
5 5 use crate::utils::hg_path::HgPath;
6 6 use crate::vfs::Vfs;
7 7 use itertools::Itertools;
@@ -165,7 +165,7 impl<'changelog> ChangelogRevisionData<'
165 165 pub fn files(&self) -> impl Iterator<Item = &HgPath> {
166 166 self.bytes[self.timestamp_end + 1..self.files_end]
167 167 .split(|b| b == &b'\n')
168 .map(|path| HgPath::new(path))
168 .map(HgPath::new)
169 169 }
170 170
171 171 /// The change description.
@@ -1,10 +1,10
1 1 use crate::errors::HgError;
2 2 use crate::repo::Repo;
3 3 use crate::revlog::path_encode::path_encode;
4 use crate::revlog::revlog::RevlogEntry;
5 use crate::revlog::revlog::{Revlog, RevlogError};
6 4 use crate::revlog::NodePrefix;
7 5 use crate::revlog::Revision;
6 use crate::revlog::RevlogEntry;
7 use crate::revlog::{Revlog, RevlogError};
8 8 use crate::utils::files::get_path_from_bytes;
9 9 use crate::utils::hg_path::HgPath;
10 10 use crate::utils::SliceExt;
@@ -49,7 +49,7 impl Filelog {
49 49 file_rev: Revision,
50 50 ) -> Result<FilelogRevisionData, RevlogError> {
51 51 let data: Vec<u8> = self.revlog.get_rev_data(file_rev)?.into_owned();
52 Ok(FilelogRevisionData(data.into()))
52 Ok(FilelogRevisionData(data))
53 53 }
54 54
55 55 /// The given node ID is that of the file as found in a filelog, not of a
@@ -161,7 +161,7 impl FilelogEntry<'_> {
161 161 // this `FilelogEntry` does not have such metadata:
162 162 let file_data_len = uncompressed_len;
163 163
164 return file_data_len != other_len;
164 file_data_len != other_len
165 165 }
166 166
167 167 pub fn data(&self) -> Result<FilelogRevisionData, HgError> {
@@ -1,4 +1,3
1 use std::convert::TryInto;
2 1 use std::ops::Deref;
3 2
4 3 use byteorder::{BigEndian, ByteOrder};
@@ -22,11 +21,11 pub struct IndexHeaderFlags {
22 21 impl IndexHeaderFlags {
23 22 /// Corresponds to FLAG_INLINE_DATA in python
24 23 pub fn is_inline(self) -> bool {
25 return self.flags & 1 != 0;
24 self.flags & 1 != 0
26 25 }
27 26 /// Corresponds to FLAG_GENERALDELTA in python
28 27 pub fn uses_generaldelta(self) -> bool {
29 return self.flags & 2 != 0;
28 self.flags & 2 != 0
30 29 }
31 30 }
32 31
@@ -36,9 +35,9 impl IndexHeader {
36 35 fn format_flags(&self) -> IndexHeaderFlags {
37 36 // No "unknown flags" check here, unlike in python. Maybe there should
38 37 // be.
39 return IndexHeaderFlags {
38 IndexHeaderFlags {
40 39 flags: BigEndian::read_u16(&self.header_bytes[0..2]),
41 };
40 }
42 41 }
43 42
44 43 /// The only revlog version currently supported by rhg.
@@ -46,7 +45,7 impl IndexHeader {
46 45
47 46 /// Corresponds to `_format_version` in Python.
48 47 fn format_version(&self) -> u16 {
49 return BigEndian::read_u16(&self.header_bytes[2..4]);
48 BigEndian::read_u16(&self.header_bytes[2..4])
50 49 }
51 50
52 51 const EMPTY_INDEX_HEADER: IndexHeader = IndexHeader {
@@ -60,7 +59,7 impl IndexHeader {
60 59 };
61 60
62 61 fn parse(index_bytes: &[u8]) -> Result<IndexHeader, HgError> {
63 if index_bytes.len() == 0 {
62 if index_bytes.is_empty() {
64 63 return Ok(IndexHeader::EMPTY_INDEX_HEADER);
65 64 }
66 65 if index_bytes.len() < 4 {
@@ -68,13 +67,13 impl IndexHeader {
68 67 "corrupted revlog: can't read the index format header",
69 68 ));
70 69 }
71 return Ok(IndexHeader {
70 Ok(IndexHeader {
72 71 header_bytes: {
73 72 let bytes: [u8; 4] =
74 73 index_bytes[0..4].try_into().expect("impossible");
75 74 bytes
76 75 },
77 });
76 })
78 77 }
79 78 }
80 79
@@ -128,8 +127,7 impl Index {
128 127 uses_generaldelta,
129 128 })
130 129 } else {
131 Err(HgError::corrupted("unexpected inline revlog length")
132 .into())
130 Err(HgError::corrupted("unexpected inline revlog length"))
133 131 }
134 132 } else {
135 133 Ok(Self {
@@ -327,6 +325,7 mod tests {
327 325
328 326 #[cfg(test)]
329 327 impl IndexEntryBuilder {
328 #[allow(clippy::new_without_default)]
330 329 pub fn new() -> Self {
331 330 Self {
332 331 is_first: false,
@@ -466,8 +465,8 mod tests {
466 465 .with_inline(false)
467 466 .build();
468 467
469 assert_eq!(is_inline(&bytes), false);
470 assert_eq!(uses_generaldelta(&bytes), false);
468 assert!(!is_inline(&bytes));
469 assert!(!uses_generaldelta(&bytes));
471 470 }
472 471
473 472 #[test]
@@ -478,8 +477,8 mod tests {
478 477 .with_inline(true)
479 478 .build();
480 479
481 assert_eq!(is_inline(&bytes), true);
482 assert_eq!(uses_generaldelta(&bytes), false);
480 assert!(is_inline(&bytes));
481 assert!(!uses_generaldelta(&bytes));
483 482 }
484 483
485 484 #[test]
@@ -490,8 +489,8 mod tests {
490 489 .with_inline(true)
491 490 .build();
492 491
493 assert_eq!(is_inline(&bytes), true);
494 assert_eq!(uses_generaldelta(&bytes), true);
492 assert!(is_inline(&bytes));
493 assert!(uses_generaldelta(&bytes));
495 494 }
496 495
497 496 #[test]
@@ -1,7 +1,7
1 1 use crate::errors::HgError;
2 use crate::revlog::revlog::{Revlog, RevlogError};
3 2 use crate::revlog::Revision;
4 3 use crate::revlog::{Node, NodePrefix};
4 use crate::revlog::{Revlog, RevlogError};
5 5 use crate::utils::hg_path::HgPath;
6 6 use crate::utils::SliceExt;
7 7 use crate::vfs::Vfs;
This diff has been collapsed as it changes many lines, (642 lines changed) Show them Hide them
@@ -1,4 +1,4
1 // Copyright 2018-2020 Georges Racinet <georges.racinet@octobus.net>
1 // Copyright 2018-2023 Georges Racinet <georges.racinet@octobus.net>
2 2 // and Mercurial contributors
3 3 //
4 4 // This software may be used and distributed according to the terms of the
@@ -15,7 +15,22 pub mod filelog;
15 15 pub mod index;
16 16 pub mod manifest;
17 17 pub mod patch;
18 pub mod revlog;
18
19 use std::borrow::Cow;
20 use std::io::Read;
21 use std::ops::Deref;
22 use std::path::Path;
23
24 use flate2::read::ZlibDecoder;
25 use sha1::{Digest, Sha1};
26 use zstd;
27
28 use self::node::{NODE_BYTES_LENGTH, NULL_NODE};
29 use self::nodemap_docket::NodeMapDocket;
30 use super::index::Index;
31 use super::nodemap::{NodeMap, NodeMapError};
32 use crate::errors::HgError;
33 use crate::vfs::Vfs;
19 34
20 35 /// Mercurial revision numbers
21 36 ///
@@ -70,3 +85,626 pub trait RevlogIndex {
70 85 /// `NULL_REVISION` is not considered to be out of bounds.
71 86 fn node(&self, rev: Revision) -> Option<&Node>;
72 87 }
88
89 const REVISION_FLAG_CENSORED: u16 = 1 << 15;
90 const REVISION_FLAG_ELLIPSIS: u16 = 1 << 14;
91 const REVISION_FLAG_EXTSTORED: u16 = 1 << 13;
92 const REVISION_FLAG_HASCOPIESINFO: u16 = 1 << 12;
93
94 // Keep this in sync with REVIDX_KNOWN_FLAGS in
95 // mercurial/revlogutils/flagutil.py
96 const REVIDX_KNOWN_FLAGS: u16 = REVISION_FLAG_CENSORED
97 | REVISION_FLAG_ELLIPSIS
98 | REVISION_FLAG_EXTSTORED
99 | REVISION_FLAG_HASCOPIESINFO;
100
101 const NULL_REVLOG_ENTRY_FLAGS: u16 = 0;
102
103 #[derive(Debug, derive_more::From)]
104 pub enum RevlogError {
105 InvalidRevision,
106 /// Working directory is not supported
107 WDirUnsupported,
108 /// Found more than one entry whose ID match the requested prefix
109 AmbiguousPrefix,
110 #[from]
111 Other(HgError),
112 }
113
114 impl From<NodeMapError> for RevlogError {
115 fn from(error: NodeMapError) -> Self {
116 match error {
117 NodeMapError::MultipleResults => RevlogError::AmbiguousPrefix,
118 NodeMapError::RevisionNotInIndex(rev) => RevlogError::corrupted(
119 format!("nodemap point to revision {} not in index", rev),
120 ),
121 }
122 }
123 }
124
125 fn corrupted<S: AsRef<str>>(context: S) -> HgError {
126 HgError::corrupted(format!("corrupted revlog, {}", context.as_ref()))
127 }
128
129 impl RevlogError {
130 fn corrupted<S: AsRef<str>>(context: S) -> Self {
131 RevlogError::Other(corrupted(context))
132 }
133 }
134
135 /// Read only implementation of revlog.
136 pub struct Revlog {
137 /// When index and data are not interleaved: bytes of the revlog index.
138 /// When index and data are interleaved: bytes of the revlog index and
139 /// data.
140 index: Index,
141 /// When index and data are not interleaved: bytes of the revlog data
142 data_bytes: Option<Box<dyn Deref<Target = [u8]> + Send>>,
143 /// When present on disk: the persistent nodemap for this revlog
144 nodemap: Option<nodemap::NodeTree>,
145 }
146
147 impl Revlog {
148 /// Open a revlog index file.
149 ///
150 /// It will also open the associated data file if index and data are not
151 /// interleaved.
152 pub fn open(
153 store_vfs: &Vfs,
154 index_path: impl AsRef<Path>,
155 data_path: Option<&Path>,
156 use_nodemap: bool,
157 ) -> Result<Self, HgError> {
158 let index_path = index_path.as_ref();
159 let index = {
160 match store_vfs.mmap_open_opt(&index_path)? {
161 None => Index::new(Box::new(vec![])),
162 Some(index_mmap) => {
163 let index = Index::new(Box::new(index_mmap))?;
164 Ok(index)
165 }
166 }
167 }?;
168
169 let default_data_path = index_path.with_extension("d");
170
171 // type annotation required
172 // won't recognize Mmap as Deref<Target = [u8]>
173 let data_bytes: Option<Box<dyn Deref<Target = [u8]> + Send>> =
174 if index.is_inline() {
175 None
176 } else {
177 let data_path = data_path.unwrap_or(&default_data_path);
178 let data_mmap = store_vfs.mmap_open(data_path)?;
179 Some(Box::new(data_mmap))
180 };
181
182 let nodemap = if index.is_inline() || !use_nodemap {
183 None
184 } else {
185 NodeMapDocket::read_from_file(store_vfs, index_path)?.map(
186 |(docket, data)| {
187 nodemap::NodeTree::load_bytes(
188 Box::new(data),
189 docket.data_length,
190 )
191 },
192 )
193 };
194
195 Ok(Revlog {
196 index,
197 data_bytes,
198 nodemap,
199 })
200 }
201
202 /// Return number of entries of the `Revlog`.
203 pub fn len(&self) -> usize {
204 self.index.len()
205 }
206
207 /// Returns `true` if the `Revlog` has zero `entries`.
208 pub fn is_empty(&self) -> bool {
209 self.index.is_empty()
210 }
211
212 /// Returns the node ID for the given revision number, if it exists in this
213 /// revlog
214 pub fn node_from_rev(&self, rev: Revision) -> Option<&Node> {
215 if rev == NULL_REVISION {
216 return Some(&NULL_NODE);
217 }
218 Some(self.index.get_entry(rev)?.hash())
219 }
220
221 /// Return the revision number for the given node ID, if it exists in this
222 /// revlog
223 pub fn rev_from_node(
224 &self,
225 node: NodePrefix,
226 ) -> Result<Revision, RevlogError> {
227 if node.is_prefix_of(&NULL_NODE) {
228 return Ok(NULL_REVISION);
229 }
230
231 if let Some(nodemap) = &self.nodemap {
232 return nodemap
233 .find_bin(&self.index, node)?
234 .ok_or(RevlogError::InvalidRevision);
235 }
236
237 // Fallback to linear scan when a persistent nodemap is not present.
238 // This happens when the persistent-nodemap experimental feature is not
239 // enabled, or for small revlogs.
240 //
241 // TODO: consider building a non-persistent nodemap in memory to
242 // optimize these cases.
243 let mut found_by_prefix = None;
244 for rev in (0..self.len() as Revision).rev() {
245 let index_entry = self.index.get_entry(rev).ok_or_else(|| {
246 HgError::corrupted(
247 "revlog references a revision not in the index",
248 )
249 })?;
250 if node == *index_entry.hash() {
251 return Ok(rev);
252 }
253 if node.is_prefix_of(index_entry.hash()) {
254 if found_by_prefix.is_some() {
255 return Err(RevlogError::AmbiguousPrefix);
256 }
257 found_by_prefix = Some(rev)
258 }
259 }
260 found_by_prefix.ok_or(RevlogError::InvalidRevision)
261 }
262
263 /// Returns whether the given revision exists in this revlog.
264 pub fn has_rev(&self, rev: Revision) -> bool {
265 self.index.get_entry(rev).is_some()
266 }
267
268 /// Return the full data associated to a revision.
269 ///
270 /// All entries required to build the final data out of deltas will be
271 /// retrieved as needed, and the deltas will be applied to the inital
272 /// snapshot to rebuild the final data.
273 pub fn get_rev_data(
274 &self,
275 rev: Revision,
276 ) -> Result<Cow<[u8]>, RevlogError> {
277 if rev == NULL_REVISION {
278 return Ok(Cow::Borrowed(&[]));
279 };
280 Ok(self.get_entry(rev)?.data()?)
281 }
282
283 /// Check the hash of some given data against the recorded hash.
284 pub fn check_hash(
285 &self,
286 p1: Revision,
287 p2: Revision,
288 expected: &[u8],
289 data: &[u8],
290 ) -> bool {
291 let e1 = self.index.get_entry(p1);
292 let h1 = match e1 {
293 Some(ref entry) => entry.hash(),
294 None => &NULL_NODE,
295 };
296 let e2 = self.index.get_entry(p2);
297 let h2 = match e2 {
298 Some(ref entry) => entry.hash(),
299 None => &NULL_NODE,
300 };
301
302 hash(data, h1.as_bytes(), h2.as_bytes()) == expected
303 }
304
305 /// Build the full data of a revision out its snapshot
306 /// and its deltas.
307 fn build_data_from_deltas(
308 snapshot: RevlogEntry,
309 deltas: &[RevlogEntry],
310 ) -> Result<Vec<u8>, HgError> {
311 let snapshot = snapshot.data_chunk()?;
312 let deltas = deltas
313 .iter()
314 .rev()
315 .map(RevlogEntry::data_chunk)
316 .collect::<Result<Vec<_>, _>>()?;
317 let patches: Vec<_> =
318 deltas.iter().map(|d| patch::PatchList::new(d)).collect();
319 let patch = patch::fold_patch_lists(&patches);
320 Ok(patch.apply(&snapshot))
321 }
322
323 /// Return the revlog data.
324 fn data(&self) -> &[u8] {
325 match &self.data_bytes {
326 Some(data_bytes) => data_bytes,
327 None => panic!(
328 "forgot to load the data or trying to access inline data"
329 ),
330 }
331 }
332
333 pub fn make_null_entry(&self) -> RevlogEntry {
334 RevlogEntry {
335 revlog: self,
336 rev: NULL_REVISION,
337 bytes: b"",
338 compressed_len: 0,
339 uncompressed_len: 0,
340 base_rev_or_base_of_delta_chain: None,
341 p1: NULL_REVISION,
342 p2: NULL_REVISION,
343 flags: NULL_REVLOG_ENTRY_FLAGS,
344 hash: NULL_NODE,
345 }
346 }
347
348 /// Get an entry of the revlog.
349 pub fn get_entry(
350 &self,
351 rev: Revision,
352 ) -> Result<RevlogEntry, RevlogError> {
353 if rev == NULL_REVISION {
354 return Ok(self.make_null_entry());
355 }
356 let index_entry = self
357 .index
358 .get_entry(rev)
359 .ok_or(RevlogError::InvalidRevision)?;
360 let start = index_entry.offset();
361 let end = start + index_entry.compressed_len() as usize;
362 let data = if self.index.is_inline() {
363 self.index.data(start, end)
364 } else {
365 &self.data()[start..end]
366 };
367 let entry = RevlogEntry {
368 revlog: self,
369 rev,
370 bytes: data,
371 compressed_len: index_entry.compressed_len(),
372 uncompressed_len: index_entry.uncompressed_len(),
373 base_rev_or_base_of_delta_chain: if index_entry
374 .base_revision_or_base_of_delta_chain()
375 == rev
376 {
377 None
378 } else {
379 Some(index_entry.base_revision_or_base_of_delta_chain())
380 },
381 p1: index_entry.p1(),
382 p2: index_entry.p2(),
383 flags: index_entry.flags(),
384 hash: *index_entry.hash(),
385 };
386 Ok(entry)
387 }
388
389 /// when resolving internal references within revlog, any errors
390 /// should be reported as corruption, instead of e.g. "invalid revision"
391 fn get_entry_internal(
392 &self,
393 rev: Revision,
394 ) -> Result<RevlogEntry, HgError> {
395 self.get_entry(rev)
396 .map_err(|_| corrupted(format!("revision {} out of range", rev)))
397 }
398 }
399
400 /// The revlog entry's bytes and the necessary informations to extract
401 /// the entry's data.
402 #[derive(Clone)]
403 pub struct RevlogEntry<'a> {
404 revlog: &'a Revlog,
405 rev: Revision,
406 bytes: &'a [u8],
407 compressed_len: u32,
408 uncompressed_len: i32,
409 base_rev_or_base_of_delta_chain: Option<Revision>,
410 p1: Revision,
411 p2: Revision,
412 flags: u16,
413 hash: Node,
414 }
415
416 impl<'a> RevlogEntry<'a> {
417 pub fn revision(&self) -> Revision {
418 self.rev
419 }
420
421 pub fn node(&self) -> &Node {
422 &self.hash
423 }
424
425 pub fn uncompressed_len(&self) -> Option<u32> {
426 u32::try_from(self.uncompressed_len).ok()
427 }
428
429 pub fn has_p1(&self) -> bool {
430 self.p1 != NULL_REVISION
431 }
432
433 pub fn p1_entry(&self) -> Result<Option<RevlogEntry>, RevlogError> {
434 if self.p1 == NULL_REVISION {
435 Ok(None)
436 } else {
437 Ok(Some(self.revlog.get_entry(self.p1)?))
438 }
439 }
440
441 pub fn p2_entry(&self) -> Result<Option<RevlogEntry>, RevlogError> {
442 if self.p2 == NULL_REVISION {
443 Ok(None)
444 } else {
445 Ok(Some(self.revlog.get_entry(self.p2)?))
446 }
447 }
448
449 pub fn p1(&self) -> Option<Revision> {
450 if self.p1 == NULL_REVISION {
451 None
452 } else {
453 Some(self.p1)
454 }
455 }
456
457 pub fn p2(&self) -> Option<Revision> {
458 if self.p2 == NULL_REVISION {
459 None
460 } else {
461 Some(self.p2)
462 }
463 }
464
465 pub fn is_censored(&self) -> bool {
466 (self.flags & REVISION_FLAG_CENSORED) != 0
467 }
468
469 pub fn has_length_affecting_flag_processor(&self) -> bool {
470 // Relevant Python code: revlog.size()
471 // note: ELLIPSIS is known to not change the content
472 (self.flags & (REVIDX_KNOWN_FLAGS ^ REVISION_FLAG_ELLIPSIS)) != 0
473 }
474
475 /// The data for this entry, after resolving deltas if any.
476 pub fn rawdata(&self) -> Result<Cow<'a, [u8]>, HgError> {
477 let mut entry = self.clone();
478 let mut delta_chain = vec![];
479
480 // The meaning of `base_rev_or_base_of_delta_chain` depends on
481 // generaldelta. See the doc on `ENTRY_DELTA_BASE` in
482 // `mercurial/revlogutils/constants.py` and the code in
483 // [_chaininfo] and in [index_deltachain].
484 let uses_generaldelta = self.revlog.index.uses_generaldelta();
485 while let Some(base_rev) = entry.base_rev_or_base_of_delta_chain {
486 let base_rev = if uses_generaldelta {
487 base_rev
488 } else {
489 entry.rev - 1
490 };
491 delta_chain.push(entry);
492 entry = self.revlog.get_entry_internal(base_rev)?;
493 }
494
495 let data = if delta_chain.is_empty() {
496 entry.data_chunk()?
497 } else {
498 Revlog::build_data_from_deltas(entry, &delta_chain)?.into()
499 };
500
501 Ok(data)
502 }
503
504 fn check_data(
505 &self,
506 data: Cow<'a, [u8]>,
507 ) -> Result<Cow<'a, [u8]>, HgError> {
508 if self.revlog.check_hash(
509 self.p1,
510 self.p2,
511 self.hash.as_bytes(),
512 &data,
513 ) {
514 Ok(data)
515 } else {
516 if (self.flags & REVISION_FLAG_ELLIPSIS) != 0 {
517 return Err(HgError::unsupported(
518 "ellipsis revisions are not supported by rhg",
519 ));
520 }
521 Err(corrupted(format!(
522 "hash check failed for revision {}",
523 self.rev
524 )))
525 }
526 }
527
528 pub fn data(&self) -> Result<Cow<'a, [u8]>, HgError> {
529 let data = self.rawdata()?;
530 if self.is_censored() {
531 return Err(HgError::CensoredNodeError);
532 }
533 self.check_data(data)
534 }
535
536 /// Extract the data contained in the entry.
537 /// This may be a delta. (See `is_delta`.)
538 fn data_chunk(&self) -> Result<Cow<'a, [u8]>, HgError> {
539 if self.bytes.is_empty() {
540 return Ok(Cow::Borrowed(&[]));
541 }
542 match self.bytes[0] {
543 // Revision data is the entirety of the entry, including this
544 // header.
545 b'\0' => Ok(Cow::Borrowed(self.bytes)),
546 // Raw revision data follows.
547 b'u' => Ok(Cow::Borrowed(&self.bytes[1..])),
548 // zlib (RFC 1950) data.
549 b'x' => Ok(Cow::Owned(self.uncompressed_zlib_data()?)),
550 // zstd data.
551 b'\x28' => Ok(Cow::Owned(self.uncompressed_zstd_data()?)),
552 // A proper new format should have had a repo/store requirement.
553 format_type => Err(corrupted(format!(
554 "unknown compression header '{}'",
555 format_type
556 ))),
557 }
558 }
559
560 fn uncompressed_zlib_data(&self) -> Result<Vec<u8>, HgError> {
561 let mut decoder = ZlibDecoder::new(self.bytes);
562 if self.is_delta() {
563 let mut buf = Vec::with_capacity(self.compressed_len as usize);
564 decoder
565 .read_to_end(&mut buf)
566 .map_err(|e| corrupted(e.to_string()))?;
567 Ok(buf)
568 } else {
569 let cap = self.uncompressed_len.max(0) as usize;
570 let mut buf = vec![0; cap];
571 decoder
572 .read_exact(&mut buf)
573 .map_err(|e| corrupted(e.to_string()))?;
574 Ok(buf)
575 }
576 }
577
578 fn uncompressed_zstd_data(&self) -> Result<Vec<u8>, HgError> {
579 if self.is_delta() {
580 let mut buf = Vec::with_capacity(self.compressed_len as usize);
581 zstd::stream::copy_decode(self.bytes, &mut buf)
582 .map_err(|e| corrupted(e.to_string()))?;
583 Ok(buf)
584 } else {
585 let cap = self.uncompressed_len.max(0) as usize;
586 let mut buf = vec![0; cap];
587 let len = zstd::bulk::decompress_to_buffer(self.bytes, &mut buf)
588 .map_err(|e| corrupted(e.to_string()))?;
589 if len != self.uncompressed_len as usize {
590 Err(corrupted("uncompressed length does not match"))
591 } else {
592 Ok(buf)
593 }
594 }
595 }
596
597 /// Tell if the entry is a snapshot or a delta
598 /// (influences on decompression).
599 fn is_delta(&self) -> bool {
600 self.base_rev_or_base_of_delta_chain.is_some()
601 }
602 }
603
604 /// Calculate the hash of a revision given its data and its parents.
605 fn hash(
606 data: &[u8],
607 p1_hash: &[u8],
608 p2_hash: &[u8],
609 ) -> [u8; NODE_BYTES_LENGTH] {
610 let mut hasher = Sha1::new();
611 let (a, b) = (p1_hash, p2_hash);
612 if a > b {
613 hasher.update(b);
614 hasher.update(a);
615 } else {
616 hasher.update(a);
617 hasher.update(b);
618 }
619 hasher.update(data);
620 *hasher.finalize().as_ref()
621 }
622
623 #[cfg(test)]
624 mod tests {
625 use super::*;
626 use crate::index::{IndexEntryBuilder, INDEX_ENTRY_SIZE};
627 use itertools::Itertools;
628
629 #[test]
630 fn test_empty() {
631 let temp = tempfile::tempdir().unwrap();
632 let vfs = Vfs { base: temp.path() };
633 std::fs::write(temp.path().join("foo.i"), b"").unwrap();
634 let revlog = Revlog::open(&vfs, "foo.i", None, false).unwrap();
635 assert!(revlog.is_empty());
636 assert_eq!(revlog.len(), 0);
637 assert!(revlog.get_entry(0).is_err());
638 assert!(!revlog.has_rev(0));
639 }
640
641 #[test]
642 fn test_inline() {
643 let temp = tempfile::tempdir().unwrap();
644 let vfs = Vfs { base: temp.path() };
645 let node0 = Node::from_hex("2ed2a3912a0b24502043eae84ee4b279c18b90dd")
646 .unwrap();
647 let node1 = Node::from_hex("b004912a8510032a0350a74daa2803dadfb00e12")
648 .unwrap();
649 let node2 = Node::from_hex("dd6ad206e907be60927b5a3117b97dffb2590582")
650 .unwrap();
651 let entry0_bytes = IndexEntryBuilder::new()
652 .is_first(true)
653 .with_version(1)
654 .with_inline(true)
655 .with_offset(INDEX_ENTRY_SIZE)
656 .with_node(node0)
657 .build();
658 let entry1_bytes = IndexEntryBuilder::new()
659 .with_offset(INDEX_ENTRY_SIZE)
660 .with_node(node1)
661 .build();
662 let entry2_bytes = IndexEntryBuilder::new()
663 .with_offset(INDEX_ENTRY_SIZE)
664 .with_p1(0)
665 .with_p2(1)
666 .with_node(node2)
667 .build();
668 let contents = vec![entry0_bytes, entry1_bytes, entry2_bytes]
669 .into_iter()
670 .flatten()
671 .collect_vec();
672 std::fs::write(temp.path().join("foo.i"), contents).unwrap();
673 let revlog = Revlog::open(&vfs, "foo.i", None, false).unwrap();
674
675 let entry0 = revlog.get_entry(0).ok().unwrap();
676 assert_eq!(entry0.revision(), 0);
677 assert_eq!(*entry0.node(), node0);
678 assert!(!entry0.has_p1());
679 assert_eq!(entry0.p1(), None);
680 assert_eq!(entry0.p2(), None);
681 let p1_entry = entry0.p1_entry().unwrap();
682 assert!(p1_entry.is_none());
683 let p2_entry = entry0.p2_entry().unwrap();
684 assert!(p2_entry.is_none());
685
686 let entry1 = revlog.get_entry(1).ok().unwrap();
687 assert_eq!(entry1.revision(), 1);
688 assert_eq!(*entry1.node(), node1);
689 assert!(!entry1.has_p1());
690 assert_eq!(entry1.p1(), None);
691 assert_eq!(entry1.p2(), None);
692 let p1_entry = entry1.p1_entry().unwrap();
693 assert!(p1_entry.is_none());
694 let p2_entry = entry1.p2_entry().unwrap();
695 assert!(p2_entry.is_none());
696
697 let entry2 = revlog.get_entry(2).ok().unwrap();
698 assert_eq!(entry2.revision(), 2);
699 assert_eq!(*entry2.node(), node2);
700 assert!(entry2.has_p1());
701 assert_eq!(entry2.p1(), Some(0));
702 assert_eq!(entry2.p2(), Some(1));
703 let p1_entry = entry2.p1_entry().unwrap();
704 assert!(p1_entry.is_some());
705 assert_eq!(p1_entry.unwrap().revision(), 0);
706 let p2_entry = entry2.p2_entry().unwrap();
707 assert!(p2_entry.is_some());
708 assert_eq!(p2_entry.unwrap().revision(), 1);
709 }
710 }
@@ -10,7 +10,6
10 10
11 11 use crate::errors::HgError;
12 12 use bytes_cast::BytesCast;
13 use std::convert::{TryFrom, TryInto};
14 13 use std::fmt;
15 14
16 15 /// The length in bytes of a `Node`
@@ -315,7 +314,7 impl From<Node> for NodePrefix {
315 314
316 315 impl PartialEq<Node> for NodePrefix {
317 316 fn eq(&self, other: &Node) -> bool {
318 Self::from(*other) == *self
317 self.data == other.data && self.nybbles_len() == other.nybbles_len()
319 318 }
320 319 }
321 320
@@ -71,7 +71,7 pub trait NodeMap {
71 71 ///
72 72 /// If several Revisions match the given prefix, a [`MultipleResults`]
73 73 /// error is returned.
74 fn find_bin<'a>(
74 fn find_bin(
75 75 &self,
76 76 idx: &impl RevlogIndex,
77 77 prefix: NodePrefix,
@@ -88,7 +88,7 pub trait NodeMap {
88 88 ///
89 89 /// If several Revisions match the given prefix, a [`MultipleResults`]
90 90 /// error is returned.
91 fn unique_prefix_len_bin<'a>(
91 fn unique_prefix_len_bin(
92 92 &self,
93 93 idx: &impl RevlogIndex,
94 94 node_prefix: NodePrefix,
@@ -249,7 +249,7 fn has_prefix_or_none(
249 249 rev: Revision,
250 250 ) -> Result<Option<Revision>, NodeMapError> {
251 251 idx.node(rev)
252 .ok_or_else(|| NodeMapError::RevisionNotInIndex(rev))
252 .ok_or(NodeMapError::RevisionNotInIndex(rev))
253 253 .map(|node| {
254 254 if prefix.is_prefix_of(node) {
255 255 Some(rev)
@@ -468,7 +468,7 impl NodeTree {
468 468 if let Element::Rev(old_rev) = deepest.element {
469 469 let old_node = index
470 470 .node(old_rev)
471 .ok_or_else(|| NodeMapError::RevisionNotInIndex(old_rev))?;
471 .ok_or(NodeMapError::RevisionNotInIndex(old_rev))?;
472 472 if old_node == node {
473 473 return Ok(()); // avoid creating lots of useless blocks
474 474 }
@@ -865,7 +865,7 mod tests {
865 865 hex: &str,
866 866 ) -> Result<(), NodeMapError> {
867 867 let node = pad_node(hex);
868 self.index.insert(rev, node.clone());
868 self.index.insert(rev, node);
869 869 self.nt.insert(&self.index, &node, rev)?;
870 870 Ok(())
871 871 }
@@ -887,13 +887,13 mod tests {
887 887 /// Drain `added` and restart a new one
888 888 fn commit(self) -> Self {
889 889 let mut as_vec: Vec<Block> =
890 self.nt.readonly.iter().map(|block| block.clone()).collect();
890 self.nt.readonly.iter().copied().collect();
891 891 as_vec.extend(self.nt.growable);
892 892 as_vec.push(self.nt.root);
893 893
894 894 Self {
895 895 index: self.index,
896 nt: NodeTree::from(as_vec).into(),
896 nt: NodeTree::from(as_vec),
897 897 }
898 898 }
899 899 }
@@ -967,15 +967,15 mod tests {
967 967 let idx = &mut nt_idx.index;
968 968
969 969 let node0_hex = hex_pad_right("444444");
970 let mut node1_hex = hex_pad_right("444444").clone();
970 let mut node1_hex = hex_pad_right("444444");
971 971 node1_hex.pop();
972 972 node1_hex.push('5');
973 973 let node0 = Node::from_hex(&node0_hex).unwrap();
974 974 let node1 = Node::from_hex(&node1_hex).unwrap();
975 975
976 idx.insert(0, node0.clone());
976 idx.insert(0, node0);
977 977 nt.insert(idx, &node0, 0)?;
978 idx.insert(1, node1.clone());
978 idx.insert(1, node1);
979 979 nt.insert(idx, &node1, 1)?;
980 980
981 981 assert_eq!(nt.find_bin(idx, (&node0).into())?, Some(0));
@@ -3,7 +3,6 use bytes_cast::{unaligned, BytesCast};
3 3 use memmap2::Mmap;
4 4 use std::path::{Path, PathBuf};
5 5
6 use crate::utils::strip_suffix;
7 6 use crate::vfs::Vfs;
8 7
9 8 const ONDISK_VERSION: u8 = 1;
@@ -97,8 +96,9 fn rawdata_path(docket_path: &Path, uid:
97 96 .expect("expected a base name")
98 97 .to_str()
99 98 .expect("expected an ASCII file name in the store");
100 let prefix = strip_suffix(docket_name, ".n.a")
101 .or_else(|| strip_suffix(docket_name, ".n"))
99 let prefix = docket_name
100 .strip_suffix(".n.a")
101 .or_else(|| docket_name.strip_suffix(".n"))
102 102 .expect("expected docket path in .n or .n.a");
103 103 let name = format!("{}-{}.nd", prefix, uid);
104 104 docket_path
@@ -2,6 +2,7 use sha1::{Digest, Sha1};
2 2
3 3 #[derive(PartialEq, Debug)]
4 4 #[allow(non_camel_case_types)]
5 #[allow(clippy::upper_case_acronyms)]
5 6 enum path_state {
6 7 START, /* first byte of a path component */
7 8 A, /* "AUX" */
@@ -27,6 +28,7 enum path_state {
27 28
28 29 /* state machine for dir-encoding */
29 30 #[allow(non_camel_case_types)]
31 #[allow(clippy::upper_case_acronyms)]
30 32 enum dir_state {
31 33 DDOT,
32 34 DH,
@@ -34,65 +36,104 enum dir_state {
34 36 DDEFAULT,
35 37 }
36 38
39 trait Sink {
40 fn write_byte(&mut self, c: u8);
41 fn write_bytes(&mut self, c: &[u8]);
42 }
43
37 44 fn inset(bitset: &[u32; 8], c: u8) -> bool {
38 45 bitset[(c as usize) >> 5] & (1 << (c & 31)) != 0
39 46 }
40 47
41 fn charcopy(dest: Option<&mut [u8]>, destlen: &mut usize, c: u8) {
42 if let Some(slice) = dest {
43 slice[*destlen] = c
44 }
45 *destlen += 1
48 const MAXENCODE: usize = 4096 * 4;
49
50 struct DestArr<const N: usize> {
51 buf: [u8; N],
52 pub len: usize,
46 53 }
47 54
48 fn memcopy(dest: Option<&mut [u8]>, destlen: &mut usize, src: &[u8]) {
49 if let Some(slice) = dest {
50 slice[*destlen..*destlen + src.len()].copy_from_slice(src)
55 impl<const N: usize> DestArr<N> {
56 pub fn create() -> Self {
57 DestArr {
58 buf: [0; N],
59 len: 0,
60 }
51 61 }
52 *destlen += src.len();
62
63 pub fn contents(&self) -> &[u8] {
64 &self.buf[..self.len]
65 }
53 66 }
54 67
55 fn rewrap_option<'a, 'b: 'a>(
56 x: &'a mut Option<&'b mut [u8]>,
57 ) -> Option<&'a mut [u8]> {
58 match x {
59 None => None,
60 Some(y) => Some(y),
68 impl<const N: usize> Sink for DestArr<N> {
69 fn write_byte(&mut self, c: u8) {
70 self.buf[self.len] = c;
71 self.len += 1;
72 }
73
74 fn write_bytes(&mut self, src: &[u8]) {
75 self.buf[self.len..self.len + src.len()].copy_from_slice(src);
76 self.len += src.len();
61 77 }
62 78 }
63 79
64 fn hexencode<'a>(mut dest: Option<&'a mut [u8]>, destlen: &mut usize, c: u8) {
80 struct MeasureDest {
81 pub len: usize,
82 }
83
84 impl Sink for Vec<u8> {
85 fn write_byte(&mut self, c: u8) {
86 self.push(c)
87 }
88
89 fn write_bytes(&mut self, src: &[u8]) {
90 self.extend_from_slice(src)
91 }
92 }
93
94 impl MeasureDest {
95 fn create() -> Self {
96 Self { len: 0 }
97 }
98 }
99
100 impl Sink for MeasureDest {
101 fn write_byte(&mut self, _c: u8) {
102 self.len += 1;
103 }
104
105 fn write_bytes(&mut self, src: &[u8]) {
106 self.len += src.len();
107 }
108 }
109
110 fn hexencode(dest: &mut impl Sink, c: u8) {
65 111 let hexdigit = b"0123456789abcdef";
66 charcopy(
67 rewrap_option(&mut dest),
68 destlen,
69 hexdigit[(c as usize) >> 4],
70 );
71 charcopy(dest, destlen, hexdigit[(c as usize) & 15]);
112 dest.write_byte(hexdigit[(c as usize) >> 4]);
113 dest.write_byte(hexdigit[(c as usize) & 15]);
72 114 }
73 115
74 116 /* 3-byte escape: tilde followed by two hex digits */
75 fn escape3(mut dest: Option<&mut [u8]>, destlen: &mut usize, c: u8) {
76 charcopy(rewrap_option(&mut dest), destlen, b'~');
77 hexencode(dest, destlen, c);
117 fn escape3(dest: &mut impl Sink, c: u8) {
118 dest.write_byte(b'~');
119 hexencode(dest, c);
78 120 }
79 121
80 fn encode_dir(mut dest: Option<&mut [u8]>, src: &[u8]) -> usize {
122 fn encode_dir(dest: &mut impl Sink, src: &[u8]) {
81 123 let mut state = dir_state::DDEFAULT;
82 124 let mut i = 0;
83 let mut destlen = 0;
84 125
85 126 while i < src.len() {
86 127 match state {
87 128 dir_state::DDOT => match src[i] {
88 129 b'd' | b'i' => {
89 130 state = dir_state::DHGDI;
90 charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
131 dest.write_byte(src[i]);
91 132 i += 1;
92 133 }
93 134 b'h' => {
94 135 state = dir_state::DH;
95 charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
136 dest.write_byte(src[i]);
96 137 i += 1;
97 138 }
98 139 _ => {
@@ -102,7 +143,7 fn encode_dir(mut dest: Option<&mut [u8]
102 143 dir_state::DH => {
103 144 if src[i] == b'g' {
104 145 state = dir_state::DHGDI;
105 charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
146 dest.write_byte(src[i]);
106 147 i += 1;
107 148 } else {
108 149 state = dir_state::DDEFAULT;
@@ -110,8 +151,8 fn encode_dir(mut dest: Option<&mut [u8]
110 151 }
111 152 dir_state::DHGDI => {
112 153 if src[i] == b'/' {
113 memcopy(rewrap_option(&mut dest), &mut destlen, b".hg");
114 charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
154 dest.write_bytes(b".hg");
155 dest.write_byte(src[i]);
115 156 i += 1;
116 157 }
117 158 state = dir_state::DDEFAULT;
@@ -120,66 +161,64 fn encode_dir(mut dest: Option<&mut [u8]
120 161 if src[i] == b'.' {
121 162 state = dir_state::DDOT
122 163 }
123 charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
164 dest.write_byte(src[i]);
124 165 i += 1;
125 166 }
126 167 }
127 168 }
128 destlen
129 169 }
130 170
131 171 fn _encode(
132 172 twobytes: &[u32; 8],
133 173 onebyte: &[u32; 8],
134 mut dest: Option<&mut [u8]>,
174 dest: &mut impl Sink,
135 175 src: &[u8],
136 176 encodedir: bool,
137 ) -> usize {
177 ) {
138 178 let mut state = path_state::START;
139 179 let mut i = 0;
140 let mut destlen = 0;
141 180 let len = src.len();
142 181
143 182 while i < len {
144 183 match state {
145 184 path_state::START => match src[i] {
146 185 b'/' => {
147 charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
186 dest.write_byte(src[i]);
148 187 i += 1;
149 188 }
150 189 b'.' => {
151 190 state = path_state::LDOT;
152 escape3(rewrap_option(&mut dest), &mut destlen, src[i]);
191 escape3(dest, src[i]);
153 192 i += 1;
154 193 }
155 194 b' ' => {
156 195 state = path_state::DEFAULT;
157 escape3(rewrap_option(&mut dest), &mut destlen, src[i]);
196 escape3(dest, src[i]);
158 197 i += 1;
159 198 }
160 199 b'a' => {
161 200 state = path_state::A;
162 charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
201 dest.write_byte(src[i]);
163 202 i += 1;
164 203 }
165 204 b'c' => {
166 205 state = path_state::C;
167 charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
206 dest.write_byte(src[i]);
168 207 i += 1;
169 208 }
170 209 b'l' => {
171 210 state = path_state::L;
172 charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
211 dest.write_byte(src[i]);
173 212 i += 1;
174 213 }
175 214 b'n' => {
176 215 state = path_state::N;
177 charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
216 dest.write_byte(src[i]);
178 217 i += 1;
179 218 }
180 219 b'p' => {
181 220 state = path_state::P;
182 charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
221 dest.write_byte(src[i]);
183 222 i += 1;
184 223 }
185 224 _ => {
@@ -189,7 +228,7 fn _encode(
189 228 path_state::A => {
190 229 if src[i] == b'u' {
191 230 state = path_state::AU;
192 charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
231 dest.write_byte(src[i]);
193 232 i += 1;
194 233 } else {
195 234 state = path_state::DEFAULT;
@@ -206,18 +245,14 fn _encode(
206 245 path_state::THIRD => {
207 246 state = path_state::DEFAULT;
208 247 match src[i] {
209 b'.' | b'/' | b'\0' => escape3(
210 rewrap_option(&mut dest),
211 &mut destlen,
212 src[i - 1],
213 ),
248 b'.' | b'/' | b'\0' => escape3(dest, src[i - 1]),
214 249 _ => i -= 1,
215 250 }
216 251 }
217 252 path_state::C => {
218 253 if src[i] == b'o' {
219 254 state = path_state::CO;
220 charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
255 dest.write_byte(src[i]);
221 256 i += 1;
222 257 } else {
223 258 state = path_state::DEFAULT;
@@ -240,41 +275,25 fn _encode(
240 275 i += 1;
241 276 } else {
242 277 state = path_state::DEFAULT;
243 charcopy(
244 rewrap_option(&mut dest),
245 &mut destlen,
246 src[i - 1],
247 );
278 dest.write_byte(src[i - 1]);
248 279 }
249 280 }
250 281 path_state::COMLPTn => {
251 282 state = path_state::DEFAULT;
252 283 match src[i] {
253 284 b'.' | b'/' | b'\0' => {
254 escape3(
255 rewrap_option(&mut dest),
256 &mut destlen,
257 src[i - 2],
258 );
259 charcopy(
260 rewrap_option(&mut dest),
261 &mut destlen,
262 src[i - 1],
263 );
285 escape3(dest, src[i - 2]);
286 dest.write_byte(src[i - 1]);
264 287 }
265 288 _ => {
266 memcopy(
267 rewrap_option(&mut dest),
268 &mut destlen,
269 &src[i - 2..i],
270 );
289 dest.write_bytes(&src[i - 2..i]);
271 290 }
272 291 }
273 292 }
274 293 path_state::L => {
275 294 if src[i] == b'p' {
276 295 state = path_state::LP;
277 charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
296 dest.write_byte(src[i]);
278 297 i += 1;
279 298 } else {
280 299 state = path_state::DEFAULT;
@@ -291,7 +310,7 fn _encode(
291 310 path_state::N => {
292 311 if src[i] == b'u' {
293 312 state = path_state::NU;
294 charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
313 dest.write_byte(src[i]);
295 314 i += 1;
296 315 } else {
297 316 state = path_state::DEFAULT;
@@ -308,7 +327,7 fn _encode(
308 327 path_state::P => {
309 328 if src[i] == b'r' {
310 329 state = path_state::PR;
311 charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
330 dest.write_byte(src[i]);
312 331 i += 1;
313 332 } else {
314 333 state = path_state::DEFAULT;
@@ -325,12 +344,12 fn _encode(
325 344 path_state::LDOT => match src[i] {
326 345 b'd' | b'i' => {
327 346 state = path_state::HGDI;
328 charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
347 dest.write_byte(src[i]);
329 348 i += 1;
330 349 }
331 350 b'h' => {
332 351 state = path_state::H;
333 charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
352 dest.write_byte(src[i]);
334 353 i += 1;
335 354 }
336 355 _ => {
@@ -340,30 +359,30 fn _encode(
340 359 path_state::DOT => match src[i] {
341 360 b'/' | b'\0' => {
342 361 state = path_state::START;
343 memcopy(rewrap_option(&mut dest), &mut destlen, b"~2e");
344 charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
362 dest.write_bytes(b"~2e");
363 dest.write_byte(src[i]);
345 364 i += 1;
346 365 }
347 366 b'd' | b'i' => {
348 367 state = path_state::HGDI;
349 charcopy(rewrap_option(&mut dest), &mut destlen, b'.');
350 charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
368 dest.write_byte(b'.');
369 dest.write_byte(src[i]);
351 370 i += 1;
352 371 }
353 372 b'h' => {
354 373 state = path_state::H;
355 memcopy(rewrap_option(&mut dest), &mut destlen, b".h");
374 dest.write_bytes(b".h");
356 375 i += 1;
357 376 }
358 377 _ => {
359 378 state = path_state::DEFAULT;
360 charcopy(rewrap_option(&mut dest), &mut destlen, b'.');
379 dest.write_byte(b'.');
361 380 }
362 381 },
363 382 path_state::H => {
364 383 if src[i] == b'g' {
365 384 state = path_state::HGDI;
366 charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
385 dest.write_byte(src[i]);
367 386 i += 1;
368 387 } else {
369 388 state = path_state::DEFAULT;
@@ -373,13 +392,9 fn _encode(
373 392 if src[i] == b'/' {
374 393 state = path_state::START;
375 394 if encodedir {
376 memcopy(
377 rewrap_option(&mut dest),
378 &mut destlen,
379 b".hg",
380 );
395 dest.write_bytes(b".hg");
381 396 }
382 charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
397 dest.write_byte(src[i]);
383 398 i += 1
384 399 } else {
385 400 state = path_state::DEFAULT;
@@ -388,18 +403,18 fn _encode(
388 403 path_state::SPACE => match src[i] {
389 404 b'/' | b'\0' => {
390 405 state = path_state::START;
391 memcopy(rewrap_option(&mut dest), &mut destlen, b"~20");
392 charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
406 dest.write_bytes(b"~20");
407 dest.write_byte(src[i]);
393 408 i += 1;
394 409 }
395 410 _ => {
396 411 state = path_state::DEFAULT;
397 charcopy(rewrap_option(&mut dest), &mut destlen, b' ');
412 dest.write_byte(b' ');
398 413 }
399 414 },
400 415 path_state::DEFAULT => {
401 416 while i != len && inset(onebyte, src[i]) {
402 charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
417 dest.write_byte(src[i]);
403 418 i += 1;
404 419 }
405 420 if i == len {
@@ -416,17 +431,13 fn _encode(
416 431 }
417 432 b'/' => {
418 433 state = path_state::START;
419 charcopy(rewrap_option(&mut dest), &mut destlen, b'/');
434 dest.write_byte(b'/');
420 435 i += 1;
421 436 }
422 437 _ => {
423 438 if inset(onebyte, src[i]) {
424 439 loop {
425 charcopy(
426 rewrap_option(&mut dest),
427 &mut destlen,
428 src[i],
429 );
440 dest.write_byte(src[i]);
430 441 i += 1;
431 442 if !(i < len && inset(onebyte, src[i])) {
432 443 break;
@@ -435,22 +446,14 fn _encode(
435 446 } else if inset(twobytes, src[i]) {
436 447 let c = src[i];
437 448 i += 1;
438 charcopy(
439 rewrap_option(&mut dest),
440 &mut destlen,
441 b'_',
442 );
443 charcopy(
444 rewrap_option(&mut dest),
445 &mut destlen,
446 if c == b'_' { b'_' } else { c + 32 },
447 );
449 dest.write_byte(b'_');
450 dest.write_byte(if c == b'_' {
451 b'_'
452 } else {
453 c + 32
454 });
448 455 } else {
449 escape3(
450 rewrap_option(&mut dest),
451 &mut destlen,
452 src[i],
453 );
456 escape3(dest, src[i]);
454 457 i += 1;
455 458 }
456 459 }
@@ -462,17 +465,13 fn _encode(
462 465 path_state::START => (),
463 466 path_state::A => (),
464 467 path_state::AU => (),
465 path_state::THIRD => {
466 escape3(rewrap_option(&mut dest), &mut destlen, src[i - 1])
467 }
468 path_state::THIRD => escape3(dest, src[i - 1]),
468 469 path_state::C => (),
469 470 path_state::CO => (),
470 path_state::COMLPT => {
471 charcopy(rewrap_option(&mut dest), &mut destlen, src[i - 1])
472 }
471 path_state::COMLPT => dest.write_byte(src[i - 1]),
473 472 path_state::COMLPTn => {
474 escape3(rewrap_option(&mut dest), &mut destlen, src[i - 2]);
475 charcopy(rewrap_option(&mut dest), &mut destlen, src[i - 1]);
473 escape3(dest, src[i - 2]);
474 dest.write_byte(src[i - 1]);
476 475 }
477 476 path_state::L => (),
478 477 path_state::LP => (),
@@ -482,19 +481,18 fn _encode(
482 481 path_state::PR => (),
483 482 path_state::LDOT => (),
484 483 path_state::DOT => {
485 memcopy(rewrap_option(&mut dest), &mut destlen, b"~2e");
484 dest.write_bytes(b"~2e");
486 485 }
487 486 path_state::H => (),
488 487 path_state::HGDI => (),
489 488 path_state::SPACE => {
490 memcopy(rewrap_option(&mut dest), &mut destlen, b"~20");
489 dest.write_bytes(b"~20");
491 490 }
492 491 path_state::DEFAULT => (),
493 };
494 destlen
492 }
495 493 }
496 494
497 fn basic_encode(dest: Option<&mut [u8]>, src: &[u8]) -> usize {
495 fn basic_encode(dest: &mut impl Sink, src: &[u8]) {
498 496 let twobytes: [u32; 8] = [0, 0, 0x87ff_fffe, 0, 0, 0, 0, 0];
499 497 let onebyte: [u32; 8] =
500 498 [1, 0x2bff_3bfa, 0x6800_0001, 0x2fff_ffff, 0, 0, 0, 0];
@@ -503,24 +501,22 fn basic_encode(dest: Option<&mut [u8]>,
503 501
504 502 const MAXSTOREPATHLEN: usize = 120;
505 503
506 fn lower_encode(mut dest: Option<&mut [u8]>, src: &[u8]) -> usize {
504 fn lower_encode(dest: &mut impl Sink, src: &[u8]) {
507 505 let onebyte: [u32; 8] =
508 506 [1, 0x2bff_fbfb, 0xe800_0001, 0x2fff_ffff, 0, 0, 0, 0];
509 507 let lower: [u32; 8] = [0, 0, 0x07ff_fffe, 0, 0, 0, 0, 0];
510 let mut destlen = 0;
511 508 for c in src {
512 509 if inset(&onebyte, *c) {
513 charcopy(rewrap_option(&mut dest), &mut destlen, *c)
510 dest.write_byte(*c)
514 511 } else if inset(&lower, *c) {
515 charcopy(rewrap_option(&mut dest), &mut destlen, *c + 32)
512 dest.write_byte(*c + 32)
516 513 } else {
517 escape3(rewrap_option(&mut dest), &mut destlen, *c)
514 escape3(dest, *c)
518 515 }
519 516 }
520 destlen
521 517 }
522 518
523 fn aux_encode(dest: Option<&mut [u8]>, src: &[u8]) -> usize {
519 fn aux_encode(dest: &mut impl Sink, src: &[u8]) {
524 520 let twobytes = [0; 8];
525 521 let onebyte: [u32; 8] = [!0, 0xffff_3ffe, !0, !0, !0, !0, !0, !0];
526 522 _encode(&twobytes, &onebyte, dest, src, false)
@@ -529,118 +525,98 fn aux_encode(dest: Option<&mut [u8]>, s
529 525 fn hash_mangle(src: &[u8], sha: &[u8]) -> Vec<u8> {
530 526 let dirprefixlen = 8;
531 527 let maxshortdirslen = 68;
532 let mut destlen = 0;
533 528
534 529 let last_slash = src.iter().rposition(|b| *b == b'/');
535 let last_dot: Option<usize> = {
536 let s = last_slash.unwrap_or(0);
537 src[s..]
538 .iter()
539 .rposition(|b| *b == b'.')
540 .and_then(|i| Some(i + s))
530 let basename_start = match last_slash {
531 Some(slash) => slash + 1,
532 None => 0,
533 };
534 let basename = &src[basename_start..];
535 let ext = match basename.iter().rposition(|b| *b == b'.') {
536 None => &[],
537 Some(dot) => &basename[dot..],
541 538 };
542 539
543 let mut dest = vec![0; MAXSTOREPATHLEN];
544 memcopy(Some(&mut dest), &mut destlen, b"dh/");
540 let mut dest = Vec::with_capacity(MAXSTOREPATHLEN);
541 dest.write_bytes(b"dh/");
545 542
546 {
547 let mut first = true;
548 for slice in src[..last_slash.unwrap_or_else(|| src.len())]
549 .split(|b| *b == b'/')
550 {
543 if let Some(last_slash) = last_slash {
544 for slice in src[..last_slash].split(|b| *b == b'/') {
551 545 let slice = &slice[..std::cmp::min(slice.len(), dirprefixlen)];
552 if destlen + (slice.len() + if first { 0 } else { 1 })
553 > maxshortdirslen + 3
554 {
546 if dest.len() + slice.len() > maxshortdirslen + 3 {
555 547 break;
556 548 } else {
557 if !first {
558 charcopy(Some(&mut dest), &mut destlen, b'/')
559 };
560 memcopy(Some(&mut dest), &mut destlen, slice);
561 if dest[destlen - 1] == b'.' || dest[destlen - 1] == b' ' {
562 dest[destlen - 1] = b'_'
563 }
549 dest.write_bytes(slice);
564 550 }
565 first = false;
566 }
567 if !first {
568 charcopy(Some(&mut dest), &mut destlen, b'/');
551 dest.write_byte(b'/');
569 552 }
570 553 }
571 554
572 let used = destlen + 40 + {
573 if let Some(l) = last_dot {
574 src.len() - l
575 } else {
576 0
577 }
578 };
555 let used = dest.len() + 40 + ext.len();
579 556
580 557 if MAXSTOREPATHLEN > used {
581 558 let slop = MAXSTOREPATHLEN - used;
582 let basenamelen = match last_slash {
583 Some(l) => src.len() - l - 1,
584 None => src.len(),
585 };
586 let basenamelen = std::cmp::min(basenamelen, slop);
587 if basenamelen > 0 {
588 let start = match last_slash {
589 Some(l) => l + 1,
590 None => 0,
591 };
592 memcopy(
593 Some(&mut dest),
594 &mut destlen,
595 &src[start..][..basenamelen],
596 )
597 }
559 let len = std::cmp::min(basename.len(), slop);
560 dest.write_bytes(&basename[..len])
598 561 }
599 562 for c in sha {
600 hexencode(Some(&mut dest), &mut destlen, *c);
601 }
602 if let Some(l) = last_dot {
603 memcopy(Some(&mut dest), &mut destlen, &src[l..]);
563 hexencode(&mut dest, *c);
604 564 }
605 if destlen == dest.len() {
606 dest
607 } else {
608 // sometimes the path are shorter than MAXSTOREPATHLEN
609 dest[..destlen].to_vec()
610 }
565 dest.write_bytes(ext);
566 dest.shrink_to_fit();
567 dest
611 568 }
612 569
613 const MAXENCODE: usize = 4096 * 4;
614 570 fn hash_encode(src: &[u8]) -> Vec<u8> {
615 let dired = &mut [0; MAXENCODE];
616 let lowered = &mut [0; MAXENCODE];
617 let auxed = &mut [0; MAXENCODE];
571 let mut dired: DestArr<MAXENCODE> = DestArr::create();
572 let mut lowered: DestArr<MAXENCODE> = DestArr::create();
573 let mut auxed: DestArr<MAXENCODE> = DestArr::create();
618 574 let baselen = (src.len() - 5) * 3;
619 575 if baselen >= MAXENCODE {
620 576 panic!("path_encode::hash_encore: string too long: {}", baselen)
621 577 };
622 let dirlen = encode_dir(Some(&mut dired[..]), src);
623 let sha = Sha1::digest(&dired[..dirlen]);
624 let lowerlen = lower_encode(Some(&mut lowered[..]), &dired[..dirlen][5..]);
625 let auxlen = aux_encode(Some(&mut auxed[..]), &lowered[..lowerlen]);
626 hash_mangle(&auxed[..auxlen], &sha)
578 encode_dir(&mut dired, src);
579 let sha = Sha1::digest(dired.contents());
580 lower_encode(&mut lowered, &dired.contents()[5..]);
581 aux_encode(&mut auxed, lowered.contents());
582 hash_mangle(auxed.contents(), &sha)
627 583 }
628 584
629 585 pub fn path_encode(path: &[u8]) -> Vec<u8> {
630 586 let newlen = if path.len() <= MAXSTOREPATHLEN {
631 basic_encode(None, path)
587 let mut measure = MeasureDest::create();
588 basic_encode(&mut measure, path);
589 measure.len
632 590 } else {
633 MAXSTOREPATHLEN + 1
591 return hash_encode(path);
634 592 };
635 593 if newlen <= MAXSTOREPATHLEN {
636 594 if newlen == path.len() {
637 595 path.to_vec()
638 596 } else {
639 let mut res = vec![0; newlen];
640 basic_encode(Some(&mut res), path);
641 res
597 let mut dest = Vec::with_capacity(newlen);
598 basic_encode(&mut dest, path);
599 assert!(dest.len() == newlen);
600 dest
642 601 }
643 602 } else {
644 hash_encode(&path)
603 hash_encode(path)
645 604 }
646 605 }
606
607 #[cfg(test)]
608 mod tests {
609 use super::*;
610 use crate::utils::hg_path::HgPathBuf;
611
612 #[test]
613 fn test_long_filename_at_root() {
614 let input = b"data/ABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJ.i";
615 let expected = b"dh/abcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghij.i708243a2237a7afae259ea3545a72a2ef11c247b.i";
616 let res = path_encode(input);
617 assert_eq!(
618 HgPathBuf::from_bytes(&res),
619 HgPathBuf::from_bytes(expected)
620 );
621 }
622 }
@@ -4,9 +4,9
4 4
5 5 use crate::errors::HgError;
6 6 use crate::repo::Repo;
7 use crate::revlog::revlog::{Revlog, RevlogError};
8 7 use crate::revlog::NodePrefix;
9 8 use crate::revlog::{Revision, NULL_REVISION, WORKING_DIRECTORY_HEX};
9 use crate::revlog::{Revlog, RevlogError};
10 10 use crate::Node;
11 11
12 12 /// Resolve a query string into a single revision.
@@ -21,7 +21,7 pub fn resolve_single(
21 21 match input {
22 22 "." => {
23 23 let p1 = repo.dirstate_parents()?.p1;
24 return Ok(changelog.revlog.rev_from_node(p1.into())?);
24 return changelog.revlog.rev_from_node(p1.into());
25 25 }
26 26 "null" => return Ok(NULL_REVISION),
27 27 _ => {}
@@ -33,7 +33,7 pub fn resolve_single(
33 33 let msg = format!("cannot parse revset '{}'", input);
34 34 Err(HgError::unsupported(msg).into())
35 35 }
36 result => return result,
36 result => result,
37 37 }
38 38 }
39 39
@@ -164,7 +164,7 pub(crate) fn parse_config(
164 164 fn read_temporary_includes(
165 165 repo: &Repo,
166 166 ) -> Result<Vec<Vec<u8>>, SparseConfigError> {
167 let raw = repo.hg_vfs().try_read("tempsparse")?.unwrap_or(vec![]);
167 let raw = repo.hg_vfs().try_read("tempsparse")?.unwrap_or_default();
168 168 if raw.is_empty() {
169 169 return Ok(vec![]);
170 170 }
@@ -179,7 +179,7 fn patterns_for_rev(
179 179 if !repo.has_sparse() {
180 180 return Ok(None);
181 181 }
182 let raw = repo.hg_vfs().try_read("sparse")?.unwrap_or(vec![]);
182 let raw = repo.hg_vfs().try_read("sparse")?.unwrap_or_default();
183 183
184 184 if raw.is_empty() {
185 185 return Ok(None);
@@ -200,9 +200,10 fn patterns_for_rev(
200 200 let output =
201 201 cat(repo, &rev.to_string(), vec![HgPath::new(&profile)])
202 202 .map_err(|_| {
203 HgError::corrupted(format!(
203 HgError::corrupted(
204 204 "dirstate points to non-existent parent node"
205 ))
205 .to_string(),
206 )
206 207 })?;
207 208 if output.results.is_empty() {
208 209 config.warnings.push(SparseWarning::ProfileNotFound {
@@ -252,9 +253,9 pub fn matcher(
252 253 repo.changelog()?
253 254 .rev_from_node(parents.p1.into())
254 255 .map_err(|_| {
255 HgError::corrupted(format!(
256 "dirstate points to non-existent parent node"
257 ))
256 HgError::corrupted(
257 "dirstate points to non-existent parent node".to_string(),
258 )
258 259 })?;
259 260 if p1_rev != NULL_REVISION {
260 261 revs.push(p1_rev)
@@ -263,9 +264,9 pub fn matcher(
263 264 repo.changelog()?
264 265 .rev_from_node(parents.p2.into())
265 266 .map_err(|_| {
266 HgError::corrupted(format!(
267 "dirstate points to non-existent parent node"
268 ))
267 HgError::corrupted(
268 "dirstate points to non-existent parent node".to_string(),
269 )
269 270 })?;
270 271 if p2_rev != NULL_REVISION {
271 272 revs.push(p2_rev)
@@ -325,7 +326,7 fn force_include_matcher(
325 326 }
326 327 let forced_include_matcher = IncludeMatcher::new(
327 328 temp_includes
328 .into_iter()
329 .iter()
329 330 .map(|include| {
330 331 IgnorePattern::new(PatternSyntax::Path, include, Path::new(""))
331 332 })
@@ -137,26 +137,8 impl SliceExt for [u8] {
137 137 }
138 138
139 139 fn split_2_by_slice(&self, separator: &[u8]) -> Option<(&[u8], &[u8])> {
140 if let Some(pos) = find_slice_in_slice(self, separator) {
141 Some((&self[..pos], &self[pos + separator.len()..]))
142 } else {
143 None
144 }
145 }
146 }
147
148 pub trait StrExt {
149 // TODO: Use https://doc.rust-lang.org/nightly/std/primitive.str.html#method.split_once
150 // once we require Rust 1.52+
151 fn split_2(&self, separator: char) -> Option<(&str, &str)>;
152 }
153
154 impl StrExt for str {
155 fn split_2(&self, separator: char) -> Option<(&str, &str)> {
156 let mut iter = self.splitn(2, separator);
157 let a = iter.next()?;
158 let b = iter.next()?;
159 Some((a, b))
140 find_slice_in_slice(self, separator)
141 .map(|pos| (&self[..pos], &self[pos + separator.len()..]))
160 142 }
161 143 }
162 144
@@ -211,28 +193,20 impl<'a> Escaped for &'a HgPath {
211 193 }
212 194 }
213 195
214 // TODO: use the str method when we require Rust 1.45
215 pub(crate) fn strip_suffix<'a>(s: &'a str, suffix: &str) -> Option<&'a str> {
216 if s.ends_with(suffix) {
217 Some(&s[..s.len() - suffix.len()])
218 } else {
219 None
220 }
221 }
222
223 196 #[cfg(unix)]
224 197 pub fn shell_quote(value: &[u8]) -> Vec<u8> {
225 // TODO: Use the `matches!` macro when we require Rust 1.42+
226 if value.iter().all(|&byte| match byte {
227 b'a'..=b'z'
228 | b'A'..=b'Z'
229 | b'0'..=b'9'
230 | b'.'
231 | b'_'
232 | b'/'
233 | b'+'
234 | b'-' => true,
235 _ => false,
198 if value.iter().all(|&byte| {
199 matches!(
200 byte,
201 b'a'..=b'z'
202 | b'A'..=b'Z'
203 | b'0'..=b'9'
204 | b'.'
205 | b'_'
206 | b'/'
207 | b'+'
208 | b'-'
209 )
236 210 }) {
237 211 value.to_owned()
238 212 } else {
@@ -317,9 +291,9 fn test_expand_vars() {
317 291 }
318 292
319 293 pub(crate) enum MergeResult<V> {
320 UseLeftValue,
321 UseRightValue,
322 UseNewValue(V),
294 Left,
295 Right,
296 New(V),
323 297 }
324 298
325 299 /// Return the union of the two given maps,
@@ -360,10 +334,10 where
360 334 ordmap_union_with_merge_by_iter(right, left, |key, a, b| {
361 335 // Also swapped in `merge` arguments:
362 336 match merge(key, b, a) {
363 MergeResult::UseNewValue(v) => MergeResult::UseNewValue(v),
337 MergeResult::New(v) => MergeResult::New(v),
364 338 // … and swap back in `merge` result:
365 MergeResult::UseLeftValue => MergeResult::UseRightValue,
366 MergeResult::UseRightValue => MergeResult::UseLeftValue,
339 MergeResult::Left => MergeResult::Right,
340 MergeResult::Right => MergeResult::Left,
367 341 }
368 342 })
369 343 } else {
@@ -388,11 +362,11 where
388 362 left.insert(key, right_value);
389 363 }
390 364 Some(left_value) => match merge(&key, left_value, &right_value) {
391 MergeResult::UseLeftValue => {}
392 MergeResult::UseRightValue => {
365 MergeResult::Left => {}
366 MergeResult::Right => {
393 367 left.insert(key, right_value);
394 368 }
395 MergeResult::UseNewValue(new_value) => {
369 MergeResult::New(new_value) => {
396 370 left.insert(key, new_value);
397 371 }
398 372 },
@@ -417,7 +391,7 where
417 391 // TODO: if/when https://github.com/bodil/im-rs/pull/168 is accepted,
418 392 // change these from `Vec<(K, V)>` to `Vec<(&K, Cow<V>)>`
419 393 // with `left_updates` only borrowing from `right` and `right_updates` from
420 // `left`, and with `Cow::Owned` used for `MergeResult::UseNewValue`.
394 // `left`, and with `Cow::Owned` used for `MergeResult::New`.
421 395 //
422 396 // This would allow moving all `.clone()` calls to after we’ve decided
423 397 // which of `right_updates` or `left_updates` to use
@@ -438,13 +412,13 where
438 412 old: (key, left_value),
439 413 new: (_, right_value),
440 414 } => match merge(key, left_value, right_value) {
441 MergeResult::UseLeftValue => {
415 MergeResult::Left => {
442 416 right_updates.push((key.clone(), left_value.clone()))
443 417 }
444 MergeResult::UseRightValue => {
418 MergeResult::Right => {
445 419 left_updates.push((key.clone(), right_value.clone()))
446 420 }
447 MergeResult::UseNewValue(new_value) => {
421 MergeResult::New(new_value) => {
448 422 left_updates.push((key.clone(), new_value.clone()));
449 423 right_updates.push((key.clone(), new_value))
450 424 }
@@ -503,3 +477,23 where
503 477 Ok(())
504 478 }
505 479 }
480
481 /// Like `Iterator::filter_map`, but over a fallible iterator of `Result`s.
482 ///
483 /// The callback is only called for incoming `Ok` values. Errors are passed
484 /// through as-is. In order to let it use the `?` operator the callback is
485 /// expected to return a `Result` of `Option`, instead of an `Option` of
486 /// `Result`.
487 pub fn filter_map_results<'a, I, F, A, B, E>(
488 iter: I,
489 f: F,
490 ) -> impl Iterator<Item = Result<B, E>> + 'a
491 where
492 I: Iterator<Item = Result<A, E>> + 'a,
493 F: Fn(A) -> Result<Option<B>, E> + 'a,
494 {
495 iter.filter_map(move |result| match result {
496 Ok(node) => f(node).transpose(),
497 Err(e) => Some(Err(e)),
498 })
499 }
@@ -230,7 +230,7 pub fn canonical_path(
230 230 // TODO hint to the user about using --cwd
231 231 // Bubble up the responsibility to Python for now
232 232 Err(HgPathError::NotUnderRoot {
233 path: original_name.to_owned(),
233 path: original_name,
234 234 root: root.to_owned(),
235 235 })
236 236 }
@@ -424,7 +424,7 mod tests {
424 424 assert_eq!(
425 425 canonical_path(&root, Path::new(""), &beneath_repo),
426 426 Err(HgPathError::NotUnderRoot {
427 path: beneath_repo.to_owned(),
427 path: beneath_repo,
428 428 root: root.to_owned()
429 429 })
430 430 );
@@ -8,7 +8,6
8 8 use crate::utils::SliceExt;
9 9 use std::borrow::Borrow;
10 10 use std::borrow::Cow;
11 use std::convert::TryFrom;
12 11 use std::ffi::{OsStr, OsString};
13 12 use std::fmt;
14 13 use std::ops::Deref;
@@ -206,7 +205,7 impl HgPath {
206 205 /// ```
207 206 pub fn split_filename(&self) -> (&Self, &Self) {
208 207 match &self.inner.iter().rposition(|c| *c == b'/') {
209 None => (HgPath::new(""), &self),
208 None => (HgPath::new(""), self),
210 209 Some(size) => (
211 210 HgPath::new(&self.inner[..*size]),
212 211 HgPath::new(&self.inner[*size + 1..]),
@@ -327,7 +326,7 impl HgPath {
327 326 #[cfg(unix)]
328 327 /// Split a pathname into drive and path. On Posix, drive is always empty.
329 328 pub fn split_drive(&self) -> (&HgPath, &HgPath) {
330 (HgPath::new(b""), &self)
329 (HgPath::new(b""), self)
331 330 }
332 331
333 332 /// Checks for errors in the path, short-circuiting at the first one.
@@ -397,7 +396,7 impl HgPathBuf {
397 396 Default::default()
398 397 }
399 398
400 pub fn push<T: ?Sized + AsRef<HgPath>>(&mut self, other: &T) -> () {
399 pub fn push<T: ?Sized + AsRef<HgPath>>(&mut self, other: &T) {
401 400 if !self.inner.is_empty() && self.inner.last() != Some(&b'/') {
402 401 self.inner.push(b'/');
403 402 }
@@ -432,7 +431,7 impl Deref for HgPathBuf {
432 431
433 432 #[inline]
434 433 fn deref(&self) -> &HgPath {
435 &HgPath::new(&self.inner)
434 HgPath::new(&self.inner)
436 435 }
437 436 }
438 437
@@ -442,15 +441,15 impl<T: ?Sized + AsRef<HgPath>> From<&T>
442 441 }
443 442 }
444 443
445 impl Into<Vec<u8>> for HgPathBuf {
446 fn into(self) -> Vec<u8> {
447 self.inner
444 impl From<HgPathBuf> for Vec<u8> {
445 fn from(val: HgPathBuf) -> Self {
446 val.inner
448 447 }
449 448 }
450 449
451 450 impl Borrow<HgPath> for HgPathBuf {
452 451 fn borrow(&self) -> &HgPath {
453 &HgPath::new(self.as_bytes())
452 HgPath::new(self.as_bytes())
454 453 }
455 454 }
456 455
@@ -492,7 +491,7 pub fn hg_path_to_os_string<P: AsRef<HgP
492 491 #[cfg(unix)]
493 492 {
494 493 use std::os::unix::ffi::OsStrExt;
495 os_str = std::ffi::OsStr::from_bytes(&hg_path.as_ref().as_bytes());
494 os_str = std::ffi::OsStr::from_bytes(hg_path.as_ref().as_bytes());
496 495 }
497 496 // TODO Handle other platforms
498 497 // TODO: convert from WTF8 to Windows MBCS (ANSI encoding).
@@ -512,7 +511,7 pub fn os_string_to_hg_path_buf<S: AsRef
512 511 #[cfg(unix)]
513 512 {
514 513 use std::os::unix::ffi::OsStrExt;
515 buf = HgPathBuf::from_bytes(&os_string.as_ref().as_bytes());
514 buf = HgPathBuf::from_bytes(os_string.as_ref().as_bytes());
516 515 }
517 516 // TODO Handle other platforms
518 517 // TODO: convert from WTF8 to Windows MBCS (ANSI encoding).
@@ -529,7 +528,7 pub fn path_to_hg_path_buf<P: AsRef<Path
529 528 #[cfg(unix)]
530 529 {
531 530 use std::os::unix::ffi::OsStrExt;
532 buf = HgPathBuf::from_bytes(&os_str.as_bytes());
531 buf = HgPathBuf::from_bytes(os_str.as_bytes());
533 532 }
534 533 // TODO Handle other platforms
535 534 // TODO: convert from WTF8 to Windows MBCS (ANSI encoding).
@@ -48,7 +48,7 impl Vfs<'_> {
48 48 match self.read(relative_path) {
49 49 Err(e) => match &e {
50 50 HgError::IoError { error, .. } => match error.kind() {
51 ErrorKind::NotFound => return Ok(None),
51 ErrorKind::NotFound => Ok(None),
52 52 _ => Err(e),
53 53 },
54 54 _ => Err(e),
@@ -38,7 +38,7 fn build_random_graph(
38 38 // p2 is a random revision lower than i and different from p1
39 39 let mut p2 = rng.gen_range(0..i - 1) as Revision;
40 40 if p2 >= p1 {
41 p2 = p2 + 1;
41 p2 += 1;
42 42 }
43 43 vg.push([p1, p2]);
44 44 } else if rng.gen_bool(prevprob) {
@@ -53,7 +53,7 fn build_random_graph(
53 53 /// Compute the ancestors set of all revisions of a VecGraph
54 54 fn ancestors_sets(vg: &VecGraph) -> Vec<HashSet<Revision>> {
55 55 let mut ancs: Vec<HashSet<Revision>> = Vec::new();
56 for i in 0..vg.len() {
56 (0..vg.len()).for_each(|i| {
57 57 let mut ancs_i = HashSet::new();
58 58 ancs_i.insert(i as Revision);
59 59 for p in vg[i].iter().cloned() {
@@ -62,7 +62,7 fn ancestors_sets(vg: &VecGraph) -> Vec<
62 62 }
63 63 }
64 64 ancs.push(ancs_i);
65 }
65 });
66 66 ancs
67 67 }
68 68
@@ -95,9 +95,9 impl<'a> NaiveMissingAncestors<'a> {
95 95 random_seed: &str,
96 96 ) -> Self {
97 97 Self {
98 ancestors_sets: ancestors_sets,
98 ancestors_sets,
99 99 bases: bases.clone(),
100 graph: graph,
100 graph,
101 101 history: vec![MissingAncestorsAction::InitialBases(bases.clone())],
102 102 random_seed: random_seed.into(),
103 103 }
@@ -116,7 +116,7 impl<'a> NaiveMissingAncestors<'a> {
116 116 for base in self.bases.iter().cloned() {
117 117 if base != NULL_REVISION {
118 118 for rev in &self.ancestors_sets[base as usize] {
119 revs.remove(&rev);
119 revs.remove(rev);
120 120 }
121 121 }
122 122 }
@@ -140,12 +140,12 impl<'a> NaiveMissingAncestors<'a> {
140 140 for base in self.bases.iter().cloned() {
141 141 if base != NULL_REVISION {
142 142 for rev in &self.ancestors_sets[base as usize] {
143 missing.remove(&rev);
143 missing.remove(rev);
144 144 }
145 145 }
146 146 }
147 147 let mut res: Vec<Revision> = missing.iter().cloned().collect();
148 res.sort();
148 res.sort_unstable();
149 149 res
150 150 }
151 151
@@ -196,7 +196,7 fn sample_revs<R: RngCore>(
196 196 let nb = min(maxrev as usize, log_normal.sample(rng).floor() as usize);
197 197
198 198 let dist = Uniform::from(NULL_REVISION..maxrev);
199 return rng.sample_iter(&dist).take(nb).collect();
199 rng.sample_iter(&dist).take(nb).collect()
200 200 }
201 201
202 202 /// Produces the hexadecimal representation of a slice of bytes
@@ -2,18 +2,18
2 2 name = "hg-cpython"
3 3 version = "0.1.0"
4 4 authors = ["Georges Racinet <gracinet@anybox.fr>"]
5 edition = "2018"
5 edition = "2021"
6 6
7 7 [lib]
8 8 name='rusthg'
9 9 crate-type = ["cdylib"]
10 10
11 11 [dependencies]
12 cpython = { version = "0.7.0", features = ["extension-module"] }
13 crossbeam-channel = "0.5.2"
12 cpython = { version = "0.7.1", features = ["extension-module"] }
13 crossbeam-channel = "0.5.6"
14 14 hg-core = { path = "../hg-core"}
15 libc = "0.2.119"
16 log = "0.4.14"
17 env_logger = "0.9.0"
15 libc = "0.2.137"
16 log = "0.4.17"
17 env_logger = "0.9.3"
18 18 stable_deref_trait = "1.2.0"
19 19 vcsgraph = "0.2.0"
@@ -10,7 +10,6
10 10
11 11 use cpython::{ObjectProtocol, PyObject, PyResult, Python};
12 12 use hg::Revision;
13 use std::iter::FromIterator;
14 13
15 14 /// Utility function to convert a Python iterable into various collections
16 15 ///
@@ -103,7 +103,7 pub fn combine_changeset_copies_wrapper(
103 103 // thread can drop it. Otherwise the GIL would be implicitly
104 104 // acquired here through `impl Drop for PyBytes`.
105 105 if let Some(bytes) = opt_bytes {
106 if let Err(_) = pybytes_sender.send(bytes.unwrap()) {
106 if pybytes_sender.send(bytes.unwrap()).is_err() {
107 107 // The channel is disconnected, meaning the parent
108 108 // thread panicked or returned
109 109 // early through
@@ -98,7 +98,7 py_class!(pub class Dirs |py| {
98 98
99 99 def __contains__(&self, item: PyObject) -> PyResult<bool> {
100 100 Ok(self.inner(py).borrow().contains(HgPath::new(
101 item.extract::<PyBytes>(py)?.data(py).as_ref(),
101 item.extract::<PyBytes>(py)?.data(py),
102 102 )))
103 103 }
104 104 });
@@ -9,7 +9,6
9 9 //! `hg-core` package.
10 10
11 11 use std::cell::{RefCell, RefMut};
12 use std::convert::TryInto;
13 12
14 13 use cpython::{
15 14 exc, PyBool, PyBytes, PyClone, PyDict, PyErr, PyList, PyNone, PyObject,
@@ -105,9 +104,7 py_class!(pub class DirstateMap |py| {
105 104 let bytes = f.extract::<PyBytes>(py)?;
106 105 let path = HgPath::new(bytes.data(py));
107 106 let res = self.inner(py).borrow_mut().set_tracked(path);
108 let was_tracked = res.or_else(|_| {
109 Err(PyErr::new::<exc::OSError, _>(py, "Dirstate error".to_string()))
110 })?;
107 let was_tracked = res.map_err(|_| PyErr::new::<exc::OSError, _>(py, "Dirstate error".to_string()))?;
111 108 Ok(was_tracked.to_py_object(py))
112 109 }
113 110
@@ -115,9 +112,7 py_class!(pub class DirstateMap |py| {
115 112 let bytes = f.extract::<PyBytes>(py)?;
116 113 let path = HgPath::new(bytes.data(py));
117 114 let res = self.inner(py).borrow_mut().set_untracked(path);
118 let was_tracked = res.or_else(|_| {
119 Err(PyErr::new::<exc::OSError, _>(py, "Dirstate error".to_string()))
120 })?;
115 let was_tracked = res.map_err(|_| PyErr::new::<exc::OSError, _>(py, "Dirstate error".to_string()))?;
121 116 Ok(was_tracked.to_py_object(py))
122 117 }
123 118
@@ -137,9 +132,7 py_class!(pub class DirstateMap |py| {
137 132 let res = self.inner(py).borrow_mut().set_clean(
138 133 path, mode, size, timestamp,
139 134 );
140 res.or_else(|_| {
141 Err(PyErr::new::<exc::OSError, _>(py, "Dirstate error".to_string()))
142 })?;
135 res.map_err(|_| PyErr::new::<exc::OSError, _>(py, "Dirstate error".to_string()))?;
143 136 Ok(PyNone)
144 137 }
145 138
@@ -147,9 +140,7 py_class!(pub class DirstateMap |py| {
147 140 let bytes = f.extract::<PyBytes>(py)?;
148 141 let path = HgPath::new(bytes.data(py));
149 142 let res = self.inner(py).borrow_mut().set_possibly_dirty(path);
150 res.or_else(|_| {
151 Err(PyErr::new::<exc::OSError, _>(py, "Dirstate error".to_string()))
152 })?;
143 res.map_err(|_| PyErr::new::<exc::OSError, _>(py, "Dirstate error".to_string()))?;
153 144 Ok(PyNone)
154 145 }
155 146
@@ -196,9 +187,7 py_class!(pub class DirstateMap |py| {
196 187 has_meaningful_mtime,
197 188 parent_file_data,
198 189 );
199 res.or_else(|_| {
200 Err(PyErr::new::<exc::OSError, _>(py, "Dirstate error".to_string()))
201 })?;
190 res.map_err(|_| PyErr::new::<exc::OSError, _>(py, "Dirstate error".to_string()))?;
202 191 Ok(PyNone)
203 192 }
204 193
@@ -40,7 +40,7 py_class!(pub class DirstateItem |py| {
40 40 }
41 41 }
42 42 let entry = DirstateEntry::from_v2_data(DirstateV2Data {
43 wc_tracked: wc_tracked,
43 wc_tracked,
44 44 p1_tracked,
45 45 p2_info,
46 46 mode_size: mode_size_opt,
@@ -151,6 +151,10 py_class!(pub class DirstateItem |py| {
151 151 Ok(self.entry(py).get().added())
152 152 }
153 153
154 @property
155 def modified(&self) -> PyResult<bool> {
156 Ok(self.entry(py).get().modified())
157 }
154 158
155 159 @property
156 160 def p2_info(&self) -> PyResult<bool> {
@@ -72,12 +72,11 fn collect_bad_matches(
72 72 for (path, bad_match) in collection.iter() {
73 73 let message = match bad_match {
74 74 BadMatch::OsError(code) => get_error_message(*code)?,
75 BadMatch::BadType(bad_type) => format!(
76 "unsupported file type (type is {})",
77 bad_type.to_string()
78 )
79 .to_py_object(py)
80 .into_object(),
75 BadMatch::BadType(bad_type) => {
76 format!("unsupported file type (type is {})", bad_type)
77 .to_py_object(py)
78 .into_object()
79 }
81 80 };
82 81 list.append(
83 82 py,
@@ -18,6 +18,11
18 18 //! >>> ancestor.__doc__
19 19 //! 'Generic DAG ancestor algorithms - Rust implementation'
20 20 //! ```
21 #![allow(clippy::too_many_arguments)] // rust-cpython macros
22 #![allow(clippy::zero_ptr)] // rust-cpython macros
23 #![allow(clippy::needless_update)] // rust-cpython macros
24 #![allow(clippy::manual_strip)] // rust-cpython macros
25 #![allow(clippy::type_complexity)] // rust-cpython macros
21 26
22 27 /// This crate uses nested private macros, `extern crate` is still needed in
23 28 /// 2018 edition.
@@ -47,6 +47,7 fn require_send<T: Send>() {}
47 47
48 48 #[allow(unused)]
49 49 fn static_assert_pybytes_is_send() {
50 #[allow(clippy::no_effect)]
50 51 require_send::<PyBytes>;
51 52 }
52 53
@@ -144,9 +144,9 py_class!(pub class MixedIndex |py| {
144 144 // __delitem__ is both for `del idx[r]` and `del idx[r1:r2]`
145 145 self.cindex(py).borrow().inner().del_item(py, key)?;
146 146 let mut opt = self.get_nodetree(py)?.borrow_mut();
147 let mut nt = opt.as_mut().unwrap();
147 let nt = opt.as_mut().unwrap();
148 148 nt.invalidate_all();
149 self.fill_nodemap(py, &mut nt)?;
149 self.fill_nodemap(py, nt)?;
150 150 Ok(())
151 151 }
152 152
@@ -1,7 +1,6
1 1 use cpython::exc::ValueError;
2 2 use cpython::{PyBytes, PyDict, PyErr, PyObject, PyResult, PyTuple, Python};
3 3 use hg::revlog::Node;
4 use std::convert::TryFrom;
5 4
6 5 #[allow(unused)]
7 6 pub fn print_python_trace(py: Python) -> PyResult<PyObject> {
@@ -5,21 +5,21 authors = [
5 5 "Antoine Cezar <antoine.cezar@octobus.net>",
6 6 "Raphaël Gomès <raphael.gomes@octobus.net>",
7 7 ]
8 edition = "2018"
8 edition = "2021"
9 9
10 10 [dependencies]
11 11 atty = "0.2.14"
12 12 hg-core = { path = "../hg-core"}
13 chrono = "0.4.19"
14 clap = "2.34.0"
13 chrono = "0.4.23"
14 clap = { version = "4.0.24", features = ["cargo"] }
15 15 derive_more = "0.99.17"
16 home = "0.5.3"
16 home = "0.5.4"
17 17 lazy_static = "1.4.0"
18 log = "0.4.14"
19 micro-timer = "0.4.0"
20 regex = "1.5.5"
21 env_logger = "0.9.0"
18 log = "0.4.17"
19 logging_timer = "1.1.0"
20 regex = "1.7.0"
21 env_logger = "0.9.3"
22 22 format-bytes = "0.3.0"
23 23 users = "0.11.0"
24 which = "4.2.5"
24 which = "4.3.0"
25 25 rayon = "1.6.1"
@@ -205,16 +205,14 impl ColorMode {
205 205 return Err(HgError::unsupported("debug color mode"));
206 206 }
207 207 let auto = enabled == b"auto";
208 let always;
209 if !auto {
208 let always = if !auto {
210 209 let enabled_bool = config.get_bool(b"ui", b"color")?;
211 210 if !enabled_bool {
212 211 return Ok(None);
213 212 }
214 always = enabled == b"always"
215 || *origin == ConfigOrigin::CommandLineColor
213 enabled == b"always" || *origin == ConfigOrigin::CommandLineColor
216 214 } else {
217 always = false
215 false
218 216 };
219 217 let formatted = always
220 218 || (std::env::var_os("TERM").unwrap_or_default() != "dumb"
@@ -245,11 +243,8 pub struct ColorConfig {
245 243 impl ColorConfig {
246 244 // Similar to _modesetup in mercurial/color.py
247 245 pub fn new(config: &Config) -> Result<Option<Self>, HgError> {
248 Ok(match ColorMode::get(config)? {
249 None => None,
250 Some(ColorMode::Ansi) => Some(ColorConfig {
251 styles: effects_from_config(config),
252 }),
253 })
246 Ok(ColorMode::get(config)?.map(|ColorMode::Ansi| ColorConfig {
247 styles: effects_from_config(config),
248 }))
254 249 }
255 250 }
@@ -3,35 +3,34 use clap::Arg;
3 3 use format_bytes::format_bytes;
4 4 use hg::operations::cat;
5 5 use hg::utils::hg_path::HgPathBuf;
6 use micro_timer::timed;
7 use std::convert::TryFrom;
6 use std::ffi::OsString;
7 use std::os::unix::prelude::OsStrExt;
8 8
9 9 pub const HELP_TEXT: &str = "
10 10 Output the current or given revision of files
11 11 ";
12 12
13 pub fn args() -> clap::App<'static, 'static> {
14 clap::SubCommand::with_name("cat")
13 pub fn args() -> clap::Command {
14 clap::command!("cat")
15 15 .arg(
16 Arg::with_name("rev")
16 Arg::new("rev")
17 17 .help("search the repository as it is in REV")
18 .short("-r")
19 .long("--rev")
20 .value_name("REV")
21 .takes_value(true),
18 .short('r')
19 .long("rev")
20 .value_name("REV"),
22 21 )
23 22 .arg(
24 clap::Arg::with_name("files")
23 clap::Arg::new("files")
25 24 .required(true)
26 .multiple(true)
27 .empty_values(false)
25 .num_args(1..)
28 26 .value_name("FILE")
27 .value_parser(clap::value_parser!(std::ffi::OsString))
29 28 .help("Files to output"),
30 29 )
31 30 .about(HELP_TEXT)
32 31 }
33 32
34 #[timed]
33 #[logging_timer::time("trace")]
35 34 pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
36 35 let cat_enabled_default = true;
37 36 let cat_enabled = invocation.config.get_option(b"rhg", b"cat")?;
@@ -42,11 +41,15 pub fn run(invocation: &crate::CliInvoca
42 41 ));
43 42 }
44 43
45 let rev = invocation.subcommand_args.value_of("rev");
46 let file_args = match invocation.subcommand_args.values_of("files") {
47 Some(files) => files.collect(),
48 None => vec![],
49 };
44 let rev = invocation.subcommand_args.get_one::<String>("rev");
45 let file_args =
46 match invocation.subcommand_args.get_many::<OsString>("files") {
47 Some(files) => files
48 .filter(|s| !s.is_empty())
49 .map(|s| s.as_os_str())
50 .collect(),
51 None => vec![],
52 };
50 53
51 54 let repo = invocation.repo?;
52 55 let cwd = hg::utils::current_dir()?;
@@ -54,8 +57,8 pub fn run(invocation: &crate::CliInvoca
54 57 let working_directory = cwd.join(working_directory); // Make it absolute
55 58
56 59 let mut files = vec![];
57 for file in file_args.iter() {
58 if file.starts_with("set:") {
60 for file in file_args {
61 if file.as_bytes().starts_with(b"set:") {
59 62 let message = "fileset";
60 63 return Err(CommandError::unsupported(message));
61 64 }
@@ -63,7 +66,7 pub fn run(invocation: &crate::CliInvoca
63 66 let normalized = cwd.join(&file);
64 67 // TODO: actually normalize `..` path segments etc?
65 68 let dotted = normalized.components().any(|c| c.as_os_str() == "..");
66 if file == &"." || dotted {
69 if file.as_bytes() == b"." || dotted {
67 70 let message = "`..` or `.` path segment";
68 71 return Err(CommandError::unsupported(message));
69 72 }
@@ -75,7 +78,7 pub fn run(invocation: &crate::CliInvoca
75 78 .map_err(|_| {
76 79 CommandError::abort(format!(
77 80 "abort: {} not under root '{}'\n(consider using '--cwd {}')",
78 file,
81 String::from_utf8_lossy(file.as_bytes()),
79 82 working_directory.display(),
80 83 relative_path.display(),
81 84 ))
@@ -92,7 +95,7 pub fn run(invocation: &crate::CliInvoca
92 95 None => format!("{:x}", repo.dirstate_parents()?.p1),
93 96 };
94 97
95 let output = cat(&repo, &rev, files).map_err(|e| (e, rev.as_str()))?;
98 let output = cat(repo, &rev, files).map_err(|e| (e, rev.as_str()))?;
96 99 for (_file, contents) in output.results {
97 100 invocation.ui.write_stdout(&contents)?;
98 101 }
@@ -8,14 +8,13 pub const HELP_TEXT: &str = "
8 8 With one argument of the form section.name, print just the value of that config item.
9 9 ";
10 10
11 pub fn args() -> clap::App<'static, 'static> {
12 clap::SubCommand::with_name("config")
11 pub fn args() -> clap::Command {
12 clap::command!("config")
13 13 .arg(
14 Arg::with_name("name")
14 Arg::new("name")
15 15 .help("the section.name to print")
16 16 .value_name("NAME")
17 .required(true)
18 .takes_value(true),
17 .required(true),
19 18 )
20 19 .about(HELP_TEXT)
21 20 }
@@ -23,7 +22,7 pub fn args() -> clap::App<'static, 'sta
23 22 pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
24 23 let (section, name) = invocation
25 24 .subcommand_args
26 .value_of("name")
25 .get_one::<String>("name")
27 26 .expect("missing required CLI argument")
28 27 .as_bytes()
29 28 .split_2(b'.')
@@ -2,33 +2,32 use crate::error::CommandError;
2 2 use clap::Arg;
3 3 use clap::ArgGroup;
4 4 use hg::operations::{debug_data, DebugDataKind};
5 use micro_timer::timed;
6 5
7 6 pub const HELP_TEXT: &str = "
8 7 Dump the contents of a data file revision
9 8 ";
10 9
11 pub fn args() -> clap::App<'static, 'static> {
12 clap::SubCommand::with_name("debugdata")
10 pub fn args() -> clap::Command {
11 clap::command!("debugdata")
13 12 .arg(
14 Arg::with_name("changelog")
13 Arg::new("changelog")
15 14 .help("open changelog")
16 .short("-c")
17 .long("--changelog"),
15 .short('c')
16 .action(clap::ArgAction::SetTrue),
18 17 )
19 18 .arg(
20 Arg::with_name("manifest")
19 Arg::new("manifest")
21 20 .help("open manifest")
22 .short("-m")
23 .long("--manifest"),
21 .short('m')
22 .action(clap::ArgAction::SetTrue),
24 23 )
25 24 .group(
26 ArgGroup::with_name("")
25 ArgGroup::new("revlog")
27 26 .args(&["changelog", "manifest"])
28 27 .required(true),
29 28 )
30 29 .arg(
31 Arg::with_name("rev")
30 Arg::new("rev")
32 31 .help("revision")
33 32 .required(true)
34 33 .value_name("REV"),
@@ -36,23 +35,25 pub fn args() -> clap::App<'static, 'sta
36 35 .about(HELP_TEXT)
37 36 }
38 37
39 #[timed]
38 #[logging_timer::time("trace")]
40 39 pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
41 40 let args = invocation.subcommand_args;
42 41 let rev = args
43 .value_of("rev")
42 .get_one::<String>("rev")
44 43 .expect("rev should be a required argument");
45 let kind =
46 match (args.is_present("changelog"), args.is_present("manifest")) {
47 (true, false) => DebugDataKind::Changelog,
48 (false, true) => DebugDataKind::Manifest,
49 (true, true) => {
50 unreachable!("Should not happen since options are exclusive")
51 }
52 (false, false) => {
53 unreachable!("Should not happen since options are required")
54 }
55 };
44 let kind = match (
45 args.get_one::<bool>("changelog").unwrap(),
46 args.get_one::<bool>("manifest").unwrap(),
47 ) {
48 (true, false) => DebugDataKind::Changelog,
49 (false, true) => DebugDataKind::Manifest,
50 (true, true) => {
51 unreachable!("Should not happen since options are exclusive")
52 }
53 (false, false) => {
54 unreachable!("Should not happen since options are required")
55 }
56 };
56 57
57 58 let repo = invocation.repo?;
58 59 if repo.has_narrow() {
@@ -60,7 +61,7 pub fn run(invocation: &crate::CliInvoca
60 61 "support for ellipsis nodes is missing and repo has narrow enabled",
61 62 ));
62 63 }
63 let data = debug_data(repo, rev, kind).map_err(|e| (e, rev))?;
64 let data = debug_data(repo, rev, kind).map_err(|e| (e, rev.as_ref()))?;
64 65
65 66 let mut stdout = invocation.ui.stdout_buffer();
66 67 stdout.write_all(&data)?;
@@ -1,5 +1,4
1 1 use crate::error::CommandError;
2 use clap::SubCommand;
3 2 use hg;
4 3 use hg::matchers::get_ignore_matcher;
5 4 use hg::StatusError;
@@ -13,8 +12,8 This is a pure Rust version of `hg debug
13 12 Some options might be missing, check the list below.
14 13 ";
15 14
16 pub fn args() -> clap::App<'static, 'static> {
17 SubCommand::with_name("debugignorerhg").about(HELP_TEXT)
15 pub fn args() -> clap::Command {
16 clap::command!("debugignorerhg").about(HELP_TEXT)
18 17 }
19 18
20 19 pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
@@ -24,10 +23,10 pub fn run(invocation: &crate::CliInvoca
24 23
25 24 let (ignore_matcher, warnings) = get_ignore_matcher(
26 25 vec![ignore_file],
27 &repo.working_directory_path().to_owned(),
26 repo.working_directory_path(),
28 27 &mut |_source, _pattern_bytes| (),
29 28 )
30 .map_err(|e| StatusError::from(e))?;
29 .map_err(StatusError::from)?;
31 30
32 31 if !warnings.is_empty() {
33 32 warn!("Pattern warnings: {:?}", &warnings);
@@ -4,8 +4,8 pub const HELP_TEXT: &str = "
4 4 Print the current repo requirements.
5 5 ";
6 6
7 pub fn args() -> clap::App<'static, 'static> {
8 clap::SubCommand::with_name("debugrequirements").about(HELP_TEXT)
7 pub fn args() -> clap::Command {
8 clap::command!("debugrequirements").about(HELP_TEXT)
9 9 }
10 10
11 11 pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
@@ -1,19 +1,21
1 use std::os::unix::prelude::OsStrExt;
1 use std::{
2 ffi::{OsStr, OsString},
3 os::unix::prelude::OsStrExt,
4 };
2 5
3 6 use crate::error::CommandError;
4 use clap::SubCommand;
5 7 use hg::{self, utils::hg_path::HgPath};
6 8
7 9 pub const HELP_TEXT: &str = "";
8 10
9 pub fn args() -> clap::App<'static, 'static> {
10 SubCommand::with_name("debugrhgsparse")
11 pub fn args() -> clap::Command {
12 clap::command!("debugrhgsparse")
11 13 .arg(
12 clap::Arg::with_name("files")
14 clap::Arg::new("files")
15 .value_name("FILES")
13 16 .required(true)
14 .multiple(true)
15 .empty_values(false)
16 .value_name("FILES")
17 .num_args(1..)
18 .value_parser(clap::value_parser!(std::ffi::OsString))
17 19 .help("Files to check against sparse profile"),
18 20 )
19 21 .about(HELP_TEXT)
@@ -22,9 +24,13 pub fn args() -> clap::App<'static, 'sta
22 24 pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
23 25 let repo = invocation.repo?;
24 26
25 let (matcher, _warnings) = hg::sparse::matcher(&repo).unwrap();
26 let files = invocation.subcommand_args.values_of_os("files");
27 let (matcher, _warnings) = hg::sparse::matcher(repo).unwrap();
28 let files = invocation.subcommand_args.get_many::<OsString>("files");
27 29 if let Some(files) = files {
30 let files: Vec<&OsStr> = files
31 .filter(|s| !s.is_empty())
32 .map(|s| s.as_os_str())
33 .collect();
28 34 for file in files {
29 35 invocation.ui.write_stdout(b"matches: ")?;
30 36 invocation.ui.write_stdout(
@@ -1,12 +1,13
1 1 use crate::error::CommandError;
2 use crate::ui::Ui;
2 use crate::ui::{print_narrow_sparse_warnings, Ui};
3 3 use crate::utils::path_utils::RelativizePaths;
4 4 use clap::Arg;
5 use hg::errors::HgError;
5 use hg::narrow;
6 6 use hg::operations::list_rev_tracked_files;
7 use hg::operations::Dirstate;
8 7 use hg::repo::Repo;
8 use hg::utils::filter_map_results;
9 9 use hg::utils::hg_path::HgPath;
10 use rayon::prelude::*;
10 11
11 12 pub const HELP_TEXT: &str = "
12 13 List tracked files.
@@ -14,15 +15,14 List tracked files.
14 15 Returns 0 on success.
15 16 ";
16 17
17 pub fn args() -> clap::App<'static, 'static> {
18 clap::SubCommand::with_name("files")
18 pub fn args() -> clap::Command {
19 clap::command!("files")
19 20 .arg(
20 Arg::with_name("rev")
21 Arg::new("rev")
21 22 .help("search the repository as it is in REV")
22 .short("-r")
23 .long("--revision")
24 .value_name("REV")
25 .takes_value(true),
23 .short('r')
24 .long("revision")
25 .value_name("REV"),
26 26 )
27 27 .about(HELP_TEXT)
28 28 }
@@ -35,7 +35,7 pub fn run(invocation: &crate::CliInvoca
35 35 ));
36 36 }
37 37
38 let rev = invocation.subcommand_args.value_of("rev");
38 let rev = invocation.subcommand_args.get_one::<String>("rev");
39 39
40 40 let repo = invocation.repo?;
41 41
@@ -51,36 +51,45 pub fn run(invocation: &crate::CliInvoca
51 51 ));
52 52 }
53 53
54 let (narrow_matcher, narrow_warnings) = narrow::matcher(repo)?;
55 print_narrow_sparse_warnings(&narrow_warnings, &[], invocation.ui, repo)?;
56
54 57 if let Some(rev) = rev {
55 if repo.has_narrow() {
56 return Err(CommandError::unsupported(
57 "rhg files -r <rev> is not supported in narrow clones",
58 ));
59 }
60 let files = list_rev_tracked_files(repo, rev).map_err(|e| (e, rev))?;
58 let files = list_rev_tracked_files(repo, rev, narrow_matcher)
59 .map_err(|e| (e, rev.as_ref()))?;
61 60 display_files(invocation.ui, repo, files.iter())
62 61 } else {
63 // The dirstate always reflects the sparse narrowspec, so if
64 // we only have sparse without narrow all is fine.
65 // If we have narrow, then [hg files] needs to check if
66 // the store narrowspec is in sync with the one of the dirstate,
67 // so we can't support that without explicit code.
68 if repo.has_narrow() {
69 return Err(CommandError::unsupported(
70 "rhg files is not supported in narrow clones",
71 ));
72 }
73 let distate = Dirstate::new(repo)?;
74 let files = distate.tracked_files()?;
75 display_files(invocation.ui, repo, files.into_iter().map(Ok))
62 // The dirstate always reflects the sparse narrowspec.
63 let dirstate = repo.dirstate_map()?;
64 let files_res: Result<Vec<_>, _> =
65 filter_map_results(dirstate.iter(), |(path, entry)| {
66 Ok(if entry.tracked() && narrow_matcher.matches(path) {
67 Some(path)
68 } else {
69 None
70 })
71 })
72 .collect();
73
74 let mut files = files_res?;
75 files.par_sort_unstable();
76
77 display_files(
78 invocation.ui,
79 repo,
80 files.into_iter().map::<Result<_, CommandError>, _>(Ok),
81 )
76 82 }
77 83 }
78 84
79 fn display_files<'a>(
85 fn display_files<'a, E>(
80 86 ui: &Ui,
81 87 repo: &Repo,
82 files: impl IntoIterator<Item = Result<&'a HgPath, HgError>>,
83 ) -> Result<(), CommandError> {
88 files: impl IntoIterator<Item = Result<&'a HgPath, E>>,
89 ) -> Result<(), CommandError>
90 where
91 CommandError: From<E>,
92 {
84 93 let mut stdout = ui.stdout_buffer();
85 94 let mut any = false;
86 95
@@ -9,8 +9,8 Print the root directory of the current
9 9 Returns 0 on success.
10 10 ";
11 11
12 pub fn args() -> clap::App<'static, 'static> {
13 clap::SubCommand::with_name("root").about(HELP_TEXT)
12 pub fn args() -> clap::Command {
13 clap::command!("root").about(HELP_TEXT)
14 14 }
15 15
16 16 pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
@@ -6,9 +6,11
6 6 // GNU General Public License version 2 or any later version.
7 7
8 8 use crate::error::CommandError;
9 use crate::ui::Ui;
9 use crate::ui::{
10 format_pattern_file_warning, print_narrow_sparse_warnings, Ui,
11 };
10 12 use crate::utils::path_utils::RelativizePaths;
11 use clap::{Arg, SubCommand};
13 use clap::Arg;
12 14 use format_bytes::format_bytes;
13 15 use hg::config::Config;
14 16 use hg::dirstate::has_exec_bit;
@@ -20,7 +22,6 use hg::manifest::Manifest;
20 22 use hg::matchers::{AlwaysMatcher, IntersectionMatcher};
21 23 use hg::repo::Repo;
22 24 use hg::utils::files::get_bytes_from_os_string;
23 use hg::utils::files::get_bytes_from_path;
24 25 use hg::utils::files::get_path_from_bytes;
25 26 use hg::utils::hg_path::{hg_path_to_path_buf, HgPath};
26 27 use hg::DirstateStatus;
@@ -41,75 +42,86 This is a pure Rust version of `hg statu
41 42 Some options might be missing, check the list below.
42 43 ";
43 44
44 pub fn args() -> clap::App<'static, 'static> {
45 SubCommand::with_name("status")
45 pub fn args() -> clap::Command {
46 clap::command!("status")
46 47 .alias("st")
47 48 .about(HELP_TEXT)
48 49 .arg(
49 Arg::with_name("all")
50 Arg::new("all")
50 51 .help("show status of all files")
51 .short("-A")
52 .long("--all"),
52 .short('A')
53 .action(clap::ArgAction::SetTrue)
54 .long("all"),
53 55 )
54 56 .arg(
55 Arg::with_name("modified")
57 Arg::new("modified")
56 58 .help("show only modified files")
57 .short("-m")
58 .long("--modified"),
59 .short('m')
60 .action(clap::ArgAction::SetTrue)
61 .long("modified"),
59 62 )
60 63 .arg(
61 Arg::with_name("added")
64 Arg::new("added")
62 65 .help("show only added files")
63 .short("-a")
64 .long("--added"),
66 .short('a')
67 .action(clap::ArgAction::SetTrue)
68 .long("added"),
65 69 )
66 70 .arg(
67 Arg::with_name("removed")
71 Arg::new("removed")
68 72 .help("show only removed files")
69 .short("-r")
70 .long("--removed"),
73 .short('r')
74 .action(clap::ArgAction::SetTrue)
75 .long("removed"),
71 76 )
72 77 .arg(
73 Arg::with_name("clean")
78 Arg::new("clean")
74 79 .help("show only clean files")
75 .short("-c")
76 .long("--clean"),
80 .short('c')
81 .action(clap::ArgAction::SetTrue)
82 .long("clean"),
77 83 )
78 84 .arg(
79 Arg::with_name("deleted")
85 Arg::new("deleted")
80 86 .help("show only deleted files")
81 .short("-d")
82 .long("--deleted"),
87 .short('d')
88 .action(clap::ArgAction::SetTrue)
89 .long("deleted"),
83 90 )
84 91 .arg(
85 Arg::with_name("unknown")
92 Arg::new("unknown")
86 93 .help("show only unknown (not tracked) files")
87 .short("-u")
88 .long("--unknown"),
94 .short('u')
95 .action(clap::ArgAction::SetTrue)
96 .long("unknown"),
89 97 )
90 98 .arg(
91 Arg::with_name("ignored")
99 Arg::new("ignored")
92 100 .help("show only ignored files")
93 .short("-i")
94 .long("--ignored"),
101 .short('i')
102 .action(clap::ArgAction::SetTrue)
103 .long("ignored"),
95 104 )
96 105 .arg(
97 Arg::with_name("copies")
106 Arg::new("copies")
98 107 .help("show source of copied files (DEFAULT: ui.statuscopies)")
99 .short("-C")
100 .long("--copies"),
108 .short('C')
109 .action(clap::ArgAction::SetTrue)
110 .long("copies"),
101 111 )
102 112 .arg(
103 Arg::with_name("no-status")
113 Arg::new("no-status")
104 114 .help("hide status prefix")
105 .short("-n")
106 .long("--no-status"),
115 .short('n')
116 .action(clap::ArgAction::SetTrue)
117 .long("no-status"),
107 118 )
108 119 .arg(
109 Arg::with_name("verbose")
120 Arg::new("verbose")
110 121 .help("enable additional output")
111 .short("-v")
112 .long("--verbose"),
122 .short('v')
123 .action(clap::ArgAction::SetTrue)
124 .long("verbose"),
113 125 )
114 126 }
115 127
@@ -158,7 +170,7 impl DisplayStates {
158 170 }
159 171
160 172 fn has_unfinished_merge(repo: &Repo) -> Result<bool, CommandError> {
161 return Ok(repo.dirstate_parents()?.is_merge());
173 Ok(repo.dirstate_parents()?.is_merge())
162 174 }
163 175
164 176 fn has_unfinished_state(repo: &Repo) -> Result<bool, CommandError> {
@@ -181,7 +193,7 fn has_unfinished_state(repo: &Repo) ->
181 193 return Ok(true);
182 194 }
183 195 }
184 return Ok(false);
196 Ok(false)
185 197 }
186 198
187 199 pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
@@ -200,25 +212,25 pub fn run(invocation: &crate::CliInvoca
200 212 let config = invocation.config;
201 213 let args = invocation.subcommand_args;
202 214
203 let verbose = !args.is_present("print0")
204 && (args.is_present("verbose")
205 || config.get_bool(b"ui", b"verbose")?
206 || config.get_bool(b"commands", b"status.verbose")?);
215 // TODO add `!args.get_flag("print0") &&` when we support `print0`
216 let verbose = args.get_flag("verbose")
217 || config.get_bool(b"ui", b"verbose")?
218 || config.get_bool(b"commands", b"status.verbose")?;
207 219
208 let all = args.is_present("all");
220 let all = args.get_flag("all");
209 221 let display_states = if all {
210 222 // TODO when implementing `--quiet`: it excludes clean files
211 223 // from `--all`
212 224 ALL_DISPLAY_STATES
213 225 } else {
214 226 let requested = DisplayStates {
215 modified: args.is_present("modified"),
216 added: args.is_present("added"),
217 removed: args.is_present("removed"),
218 clean: args.is_present("clean"),
219 deleted: args.is_present("deleted"),
220 unknown: args.is_present("unknown"),
221 ignored: args.is_present("ignored"),
227 modified: args.get_flag("modified"),
228 added: args.get_flag("added"),
229 removed: args.get_flag("removed"),
230 clean: args.get_flag("clean"),
231 deleted: args.get_flag("deleted"),
232 unknown: args.get_flag("unknown"),
233 ignored: args.get_flag("ignored"),
222 234 };
223 235 if requested.is_empty() {
224 236 DEFAULT_DISPLAY_STATES
@@ -226,27 +238,25 pub fn run(invocation: &crate::CliInvoca
226 238 requested
227 239 }
228 240 };
229 let no_status = args.is_present("no-status");
241 let no_status = args.get_flag("no-status");
230 242 let list_copies = all
231 || args.is_present("copies")
243 || args.get_flag("copies")
232 244 || config.get_bool(b"ui", b"statuscopies")?;
233 245
234 246 let repo = invocation.repo?;
235 247
236 if verbose {
237 if has_unfinished_state(repo)? {
238 return Err(CommandError::unsupported(
239 "verbose status output is not supported by rhg (and is needed because we're in an unfinished operation)",
240 ));
241 };
248 if verbose && has_unfinished_state(repo)? {
249 return Err(CommandError::unsupported(
250 "verbose status output is not supported by rhg (and is needed because we're in an unfinished operation)",
251 ));
242 252 }
243 253
244 254 let mut dmap = repo.dirstate_map_mut()?;
245 255
256 let check_exec = hg::checkexec::check_exec(repo.working_directory_path());
257
246 258 let options = StatusOptions {
247 // we're currently supporting file systems with exec flags only
248 // anyway
249 check_exec: true,
259 check_exec,
250 260 list_clean: display_states.clean,
251 261 list_unknown: display_states.unknown,
252 262 list_ignored: display_states.ignored,
@@ -260,7 +270,7 pub fn run(invocation: &crate::CliInvoca
260 270 let after_status = |res: StatusResult| -> Result<_, CommandError> {
261 271 let (mut ds_status, pattern_warnings) = res?;
262 272 for warning in pattern_warnings {
263 ui.write_stderr(&print_pattern_file_warning(&warning, &repo))?;
273 ui.write_stderr(&format_pattern_file_warning(&warning, repo))?;
264 274 }
265 275
266 276 for (path, error) in ds_status.bad {
@@ -301,6 +311,7 pub fn run(invocation: &crate::CliInvoca
301 311 unsure_is_modified(
302 312 working_directory_vfs,
303 313 store_vfs,
314 check_exec,
304 315 &manifest,
305 316 &to_check.path,
306 317 )
@@ -375,31 +386,12 pub fn run(invocation: &crate::CliInvoca
375 386 (false, false) => Box::new(AlwaysMatcher),
376 387 };
377 388
378 for warning in narrow_warnings.into_iter().chain(sparse_warnings) {
379 match &warning {
380 sparse::SparseWarning::RootWarning { context, line } => {
381 let msg = format_bytes!(
382 b"warning: {} profile cannot use paths \"
383 starting with /, ignoring {}\n",
384 context,
385 line
386 );
387 ui.write_stderr(&msg)?;
388 }
389 sparse::SparseWarning::ProfileNotFound { profile, rev } => {
390 let msg = format_bytes!(
391 b"warning: sparse profile '{}' not found \"
392 in rev {} - ignoring it\n",
393 profile,
394 rev
395 );
396 ui.write_stderr(&msg)?;
397 }
398 sparse::SparseWarning::Pattern(e) => {
399 ui.write_stderr(&print_pattern_file_warning(e, &repo))?;
400 }
401 }
402 }
389 print_narrow_sparse_warnings(
390 &narrow_warnings,
391 &sparse_warnings,
392 ui,
393 repo,
394 )?;
403 395 let (fixup, mut dirstate_write_needed, filesystem_time_at_status_start) =
404 396 dmap.with_status(
405 397 matcher.as_ref(),
@@ -543,6 +535,7 impl DisplayStatusPaths<'_> {
543 535 fn unsure_is_modified(
544 536 working_directory_vfs: hg::vfs::Vfs,
545 537 store_vfs: hg::vfs::Vfs,
538 check_exec: bool,
546 539 manifest: &Manifest,
547 540 hg_path: &HgPath,
548 541 ) -> Result<bool, HgError> {
@@ -550,20 +543,30 fn unsure_is_modified(
550 543 let fs_path = hg_path_to_path_buf(hg_path).expect("HgPath conversion");
551 544 let fs_metadata = vfs.symlink_metadata(&fs_path)?;
552 545 let is_symlink = fs_metadata.file_type().is_symlink();
546
547 let entry = manifest
548 .find_by_path(hg_path)?
549 .expect("ambgious file not in p1");
550
553 551 // TODO: Also account for `FALLBACK_SYMLINK` and `FALLBACK_EXEC` from the
554 552 // dirstate
555 553 let fs_flags = if is_symlink {
556 554 Some(b'l')
557 } else if has_exec_bit(&fs_metadata) {
555 } else if check_exec && has_exec_bit(&fs_metadata) {
558 556 Some(b'x')
559 557 } else {
560 558 None
561 559 };
562 560
563 let entry = manifest
564 .find_by_path(hg_path)?
565 .expect("ambgious file not in p1");
566 if entry.flags != fs_flags {
561 let entry_flags = if check_exec {
562 entry.flags
563 } else if entry.flags == Some(b'x') {
564 None
565 } else {
566 entry.flags
567 };
568
569 if entry_flags != fs_flags {
567 570 return Ok(true);
568 571 }
569 572 let filelog = hg::filelog::Filelog::open_vfs(&store_vfs, hg_path)?;
@@ -571,8 +574,8 fn unsure_is_modified(
571 574 let file_node = entry.node_id()?;
572 575 let filelog_entry = filelog.entry_for_node(file_node).map_err(|_| {
573 576 HgError::corrupted(format!(
574 "filelog missing node {:?} from manifest",
575 file_node
577 "filelog {:?} missing node {:?} from manifest",
578 hg_path, file_node
576 579 ))
577 580 })?;
578 581 if filelog_entry.file_data_len_not_equal_to(fs_len) {
@@ -596,30 +599,3 fn unsure_is_modified(
596 599 };
597 600 Ok(p1_contents != &*fs_contents)
598 601 }
599
600 fn print_pattern_file_warning(
601 warning: &PatternFileWarning,
602 repo: &Repo,
603 ) -> Vec<u8> {
604 match warning {
605 PatternFileWarning::InvalidSyntax(path, syntax) => format_bytes!(
606 b"{}: ignoring invalid syntax '{}'\n",
607 get_bytes_from_path(path),
608 &*syntax
609 ),
610 PatternFileWarning::NoSuchFile(path) => {
611 let path = if let Ok(relative) =
612 path.strip_prefix(repo.working_directory_path())
613 {
614 relative
615 } else {
616 &*path
617 };
618 format_bytes!(
619 b"skipping unreadable pattern file '{}': \
620 No such file or directory\n",
621 get_bytes_from_path(path),
622 )
623 }
624 }
625 }
@@ -7,7 +7,7 use hg::dirstate_tree::on_disk::Dirstate
7 7 use hg::errors::HgError;
8 8 use hg::exit_codes;
9 9 use hg::repo::RepoError;
10 use hg::revlog::revlog::RevlogError;
10 use hg::revlog::RevlogError;
11 11 use hg::sparse::SparseConfigError;
12 12 use hg::utils::files::get_bytes_from_path;
13 13 use hg::{DirstateError, DirstateMapError, StatusError};
@@ -50,7 +50,7 impl CommandError {
50 50 // of error messages to handle non-UTF-8 filenames etc:
51 51 // https://www.mercurial-scm.org/wiki/EncodingStrategy#Mixing_output
52 52 message: utf8_to_local(message.as_ref()).into(),
53 detailed_exit_code: detailed_exit_code,
53 detailed_exit_code,
54 54 hint: None,
55 55 }
56 56 }
@@ -1,10 +1,7
1 1 extern crate log;
2 2 use crate::error::CommandError;
3 3 use crate::ui::{local_to_utf8, Ui};
4 use clap::App;
5 use clap::AppSettings;
6 use clap::Arg;
7 use clap::ArgMatches;
4 use clap::{command, Arg, ArgMatches};
8 5 use format_bytes::{format_bytes, join};
9 6 use hg::config::{Config, ConfigSource, PlainInfo};
10 7 use hg::repo::{Repo, RepoError};
@@ -35,55 +32,47 fn main_with_result(
35 32 ) -> Result<(), CommandError> {
36 33 check_unsupported(config, repo)?;
37 34
38 let app = App::new("rhg")
39 .global_setting(AppSettings::AllowInvalidUtf8)
40 .global_setting(AppSettings::DisableVersion)
41 .setting(AppSettings::SubcommandRequired)
42 .setting(AppSettings::VersionlessSubcommands)
35 let app = command!()
36 .subcommand_required(true)
43 37 .arg(
44 Arg::with_name("repository")
38 Arg::new("repository")
45 39 .help("repository root directory")
46 .short("-R")
47 .long("--repository")
40 .short('R')
48 41 .value_name("REPO")
49 .takes_value(true)
50 42 // Both ok: `hg -R ./foo log` or `hg log -R ./foo`
51 43 .global(true),
52 44 )
53 45 .arg(
54 Arg::with_name("config")
46 Arg::new("config")
55 47 .help("set/override config option (use 'section.name=value')")
56 .long("--config")
57 48 .value_name("CONFIG")
58 .takes_value(true)
59 49 .global(true)
50 .long("config")
60 51 // Ok: `--config section.key1=val --config section.key2=val2`
61 .multiple(true)
62 52 // Not ok: `--config section.key1=val section.key2=val2`
63 .number_of_values(1),
53 .action(clap::ArgAction::Append),
64 54 )
65 55 .arg(
66 Arg::with_name("cwd")
56 Arg::new("cwd")
67 57 .help("change working directory")
68 .long("--cwd")
69 58 .value_name("DIR")
70 .takes_value(true)
59 .long("cwd")
71 60 .global(true),
72 61 )
73 62 .arg(
74 Arg::with_name("color")
63 Arg::new("color")
75 64 .help("when to colorize (boolean, always, auto, never, or debug)")
76 .long("--color")
77 65 .value_name("TYPE")
78 .takes_value(true)
66 .long("color")
79 67 .global(true),
80 68 )
81 69 .version("0.0.1");
82 70 let app = add_subcommand_args(app);
83 71
84 let matches = app.clone().get_matches_from_safe(argv.iter())?;
72 let matches = app.try_get_matches_from(argv.iter())?;
85 73
86 let (subcommand_name, subcommand_matches) = matches.subcommand();
74 let (subcommand_name, subcommand_args) =
75 matches.subcommand().expect("subcommand required");
87 76
88 77 // Mercurial allows users to define "defaults" for commands, fallback
89 78 // if a default is detected for the current command
@@ -104,9 +93,7 fn main_with_result(
104 93 }
105 94 }
106 95 let run = subcommand_run_fn(subcommand_name)
107 .expect("unknown subcommand name from clap despite AppSettings::SubcommandRequired");
108 let subcommand_args = subcommand_matches
109 .expect("no subcommand arguments from clap despite AppSettings::SubcommandRequired");
96 .expect("unknown subcommand name from clap despite Command::subcommand_required");
110 97
111 98 let invocation = CliInvocation {
112 99 ui,
@@ -216,7 +203,7 fn rhg_main(argv: Vec<OsString>) -> ! {
216 203 // Same as `_matchscheme` in `mercurial/util.py`
217 204 regex::bytes::Regex::new("^[a-zA-Z0-9+.\\-]+:").unwrap();
218 205 }
219 if SCHEME_RE.is_match(&repo_path_bytes) {
206 if SCHEME_RE.is_match(repo_path_bytes) {
220 207 exit(
221 208 &argv,
222 209 &initial_current_dir,
@@ -236,7 +223,7 fn rhg_main(argv: Vec<OsString>) -> ! {
236 223 )
237 224 }
238 225 }
239 let repo_arg = early_args.repo.unwrap_or(Vec::new());
226 let repo_arg = early_args.repo.unwrap_or_default();
240 227 let repo_path: Option<PathBuf> = {
241 228 if repo_arg.is_empty() {
242 229 None
@@ -267,7 +254,7 fn rhg_main(argv: Vec<OsString>) -> ! {
267 254 let non_repo_config_val = {
268 255 let non_repo_val = non_repo_config.get(b"paths", &repo_arg);
269 256 match &non_repo_val {
270 Some(val) if val.len() > 0 => home::home_dir()
257 Some(val) if !val.is_empty() => home::home_dir()
271 258 .unwrap_or_else(|| PathBuf::from("~"))
272 259 .join(get_path_from_bytes(val))
273 260 .canonicalize()
@@ -283,7 +270,7 fn rhg_main(argv: Vec<OsString>) -> ! {
283 270 Some(val) => {
284 271 let local_config_val = val.get(b"paths", &repo_arg);
285 272 match &local_config_val {
286 Some(val) if val.len() > 0 => {
273 Some(val) if !val.is_empty() => {
287 274 // presence of a local_config assures that
288 275 // current_dir
289 276 // wont result in an Error
@@ -297,7 +284,8 fn rhg_main(argv: Vec<OsString>) -> ! {
297 284 }
298 285 }
299 286 };
300 config_val.or(Some(get_path_from_bytes(&repo_arg).to_path_buf()))
287 config_val
288 .or_else(|| Some(get_path_from_bytes(&repo_arg).to_path_buf()))
301 289 }
302 290 };
303 291
@@ -317,7 +305,7 fn rhg_main(argv: Vec<OsString>) -> ! {
317 305 )
318 306 };
319 307 let early_exit = |config: &Config, error: CommandError| -> ! {
320 simple_exit(&Ui::new_infallible(config), &config, Err(error))
308 simple_exit(&Ui::new_infallible(config), config, Err(error))
321 309 };
322 310 let repo_result = match Repo::find(&non_repo_config, repo_path.to_owned())
323 311 {
@@ -341,13 +329,13 fn rhg_main(argv: Vec<OsString>) -> ! {
341 329 && config_cow
342 330 .as_ref()
343 331 .get_bool(b"ui", b"tweakdefaults")
344 .unwrap_or_else(|error| early_exit(&config, error.into()))
332 .unwrap_or_else(|error| early_exit(config, error.into()))
345 333 {
346 334 config_cow.to_mut().tweakdefaults()
347 335 };
348 336 let config = config_cow.as_ref();
349 let ui = Ui::new(&config)
350 .unwrap_or_else(|error| early_exit(&config, error.into()));
337 let ui = Ui::new(config)
338 .unwrap_or_else(|error| early_exit(config, error.into()));
351 339
352 340 if let Ok(true) = config.get_bool(b"rhg", b"fallback-immediately") {
353 341 exit(
@@ -373,7 +361,7 fn rhg_main(argv: Vec<OsString>) -> ! {
373 361 repo_result.as_ref(),
374 362 config,
375 363 );
376 simple_exit(&ui, &config, result)
364 simple_exit(&ui, config, result)
377 365 }
378 366
379 367 fn main() -> ! {
@@ -435,9 +423,9 fn exit<'a>(
435 423 }
436 424 Some(executable) => executable,
437 425 };
438 let executable_path = get_path_from_bytes(&executable);
426 let executable_path = get_path_from_bytes(executable);
439 427 let this_executable = args.next().expect("exepcted argv[0] to exist");
440 if executable_path == &PathBuf::from(this_executable) {
428 if executable_path == *this_executable {
441 429 // Avoid spawning infinitely many processes until resource
442 430 // exhaustion.
443 431 let _ = ui.write_stderr(&format_bytes!(
@@ -535,7 +523,7 macro_rules! subcommands {
535 523 )+
536 524 }
537 525
538 fn add_subcommand_args<'a, 'b>(app: App<'a, 'b>) -> App<'a, 'b> {
526 fn add_subcommand_args(app: clap::Command) -> clap::Command {
539 527 app
540 528 $(
541 529 .subcommand(commands::$command::args())
@@ -569,7 +557,7 subcommands! {
569 557
570 558 pub struct CliInvocation<'a> {
571 559 ui: &'a Ui,
572 subcommand_args: &'a ArgMatches<'a>,
560 subcommand_args: &'a ArgMatches,
573 561 config: &'a Config,
574 562 /// References inside `Result` is a bit peculiar but allow
575 563 /// `invocation.repo?` to work out with `&CliInvocation` since this
@@ -752,6 +740,7 fn check_extensions(config: &Config) ->
752 740 }
753 741
754 742 /// Array of tuples of (auto upgrade conf, feature conf, local requirement)
743 #[allow(clippy::type_complexity)]
755 744 const AUTO_UPGRADES: &[((&str, &str), (&str, &str), &str)] = &[
756 745 (
757 746 ("format", "use-share-safe.automatic-upgrade-of-mismatching-repositories"),
@@ -1,10 +1,15
1 1 use crate::color::ColorConfig;
2 2 use crate::color::Effect;
3 use crate::error::CommandError;
3 4 use format_bytes::format_bytes;
4 5 use format_bytes::write_bytes;
5 6 use hg::config::Config;
6 7 use hg::config::PlainInfo;
7 8 use hg::errors::HgError;
9 use hg::repo::Repo;
10 use hg::sparse;
11 use hg::utils::files::get_bytes_from_path;
12 use hg::PatternFileWarning;
8 13 use std::borrow::Cow;
9 14 use std::io;
10 15 use std::io::{ErrorKind, Write};
@@ -223,3 +228,68 fn isatty(config: &Config) -> Result<boo
223 228 atty::is(atty::Stream::Stdout)
224 229 })
225 230 }
231
232 /// Return the formatted bytestring corresponding to a pattern file warning,
233 /// as expected by the CLI.
234 pub(crate) fn format_pattern_file_warning(
235 warning: &PatternFileWarning,
236 repo: &Repo,
237 ) -> Vec<u8> {
238 match warning {
239 PatternFileWarning::InvalidSyntax(path, syntax) => format_bytes!(
240 b"{}: ignoring invalid syntax '{}'\n",
241 get_bytes_from_path(path),
242 &*syntax
243 ),
244 PatternFileWarning::NoSuchFile(path) => {
245 let path = if let Ok(relative) =
246 path.strip_prefix(repo.working_directory_path())
247 {
248 relative
249 } else {
250 &*path
251 };
252 format_bytes!(
253 b"skipping unreadable pattern file '{}': \
254 No such file or directory\n",
255 get_bytes_from_path(path),
256 )
257 }
258 }
259 }
260
261 /// Print with `Ui` the formatted bytestring corresponding to a
262 /// sparse/narrow warning, as expected by the CLI.
263 pub(crate) fn print_narrow_sparse_warnings(
264 narrow_warnings: &[sparse::SparseWarning],
265 sparse_warnings: &[sparse::SparseWarning],
266 ui: &Ui,
267 repo: &Repo,
268 ) -> Result<(), CommandError> {
269 for warning in narrow_warnings.iter().chain(sparse_warnings) {
270 match &warning {
271 sparse::SparseWarning::RootWarning { context, line } => {
272 let msg = format_bytes!(
273 b"warning: {} profile cannot use paths \"
274 starting with /, ignoring {}\n",
275 context,
276 line
277 );
278 ui.write_stderr(&msg)?;
279 }
280 sparse::SparseWarning::ProfileNotFound { profile, rev } => {
281 let msg = format_bytes!(
282 b"warning: sparse profile '{}' not found \"
283 in rev {} - ignoring it\n",
284 profile,
285 rev
286 );
287 ui.write_stderr(&msg)?;
288 }
289 sparse::SparseWarning::Pattern(e) => {
290 ui.write_stderr(&format_pattern_file_warning(e, repo))?;
291 }
292 }
293 }
294 Ok(())
295 }
@@ -23,7 +23,7 impl RelativizePaths {
23 23 let repo_root = repo.working_directory_path();
24 24 let repo_root = cwd.join(repo_root); // Make it absolute
25 25 let repo_root_hgpath =
26 HgPathBuf::from(get_bytes_from_path(repo_root.to_owned()));
26 HgPathBuf::from(get_bytes_from_path(&repo_root));
27 27
28 28 if let Ok(cwd_relative_to_repo) = cwd.strip_prefix(&repo_root) {
29 29 // The current directory is inside the repo, so we can work with
@@ -131,11 +131,7 from distutils.errors import (
131 131 DistutilsError,
132 132 DistutilsExecError,
133 133 )
134 from distutils.sysconfig import get_python_inc, get_config_var
135 from distutils.version import StrictVersion
136
137 # Explain to distutils.StrictVersion how our release candidates are versioned
138 StrictVersion.version_re = re.compile(r'^(\d+)\.(\d+)(\.(\d+))?-?(rc(\d+))?$')
134 from distutils.sysconfig import get_python_inc
139 135
140 136
141 137 def write_if_changed(path, content):
@@ -1504,11 +1500,13 class RustStandaloneExtension(RustExtens
1504 1500 target = [target_dir]
1505 1501 target.extend(self.name.split('.'))
1506 1502 target[-1] += DYLIB_SUFFIX
1503 target = os.path.join(*target)
1504 os.makedirs(os.path.dirname(target), exist_ok=True)
1507 1505 shutil.copy2(
1508 1506 os.path.join(
1509 1507 self.rusttargetdir, self.dylibname + self.rustdylibsuffix()
1510 1508 ),
1511 os.path.join(*target),
1509 target,
1512 1510 )
1513 1511
1514 1512
@@ -1653,6 +1651,10 packagedata = {
1653 1651 'mercurial.helptext.internals': [
1654 1652 '*.txt',
1655 1653 ],
1654 'mercurial.thirdparty.attr': [
1655 '*.pyi',
1656 'py.typed',
1657 ],
1656 1658 }
1657 1659
1658 1660
@@ -1738,39 +1740,6 if os.name == 'nt':
1738 1740 # form W.X.Y.Z, where W,X,Y,Z are numbers in the range 0..65535
1739 1741 setupversion = setupversion.split(r'+', 1)[0]
1740 1742
1741 if sys.platform == 'darwin' and os.path.exists('/usr/bin/xcodebuild'):
1742 version = runcmd(['/usr/bin/xcodebuild', '-version'], {})[1].splitlines()
1743 if version:
1744 version = version[0].decode('utf-8')
1745 xcode4 = version.startswith('Xcode') and StrictVersion(
1746 version.split()[1]
1747 ) >= StrictVersion('4.0')
1748 xcode51 = re.match(r'^Xcode\s+5\.1', version) is not None
1749 else:
1750 # xcodebuild returns empty on OS X Lion with XCode 4.3 not
1751 # installed, but instead with only command-line tools. Assume
1752 # that only happens on >= Lion, thus no PPC support.
1753 xcode4 = True
1754 xcode51 = False
1755
1756 # XCode 4.0 dropped support for ppc architecture, which is hardcoded in
1757 # distutils.sysconfig
1758 if xcode4:
1759 os.environ['ARCHFLAGS'] = ''
1760
1761 # XCode 5.1 changes clang such that it now fails to compile if the
1762 # -mno-fused-madd flag is passed, but the version of Python shipped with
1763 # OS X 10.9 Mavericks includes this flag. This causes problems in all
1764 # C extension modules, and a bug has been filed upstream at
1765 # http://bugs.python.org/issue21244. We also need to patch this here
1766 # so Mercurial can continue to compile in the meantime.
1767 if xcode51:
1768 cflags = get_config_var('CFLAGS')
1769 if cflags and re.search(r'-mno-fused-madd\b', cflags) is not None:
1770 os.environ['CFLAGS'] = (
1771 os.environ.get('CFLAGS', '') + ' -Qunused-arguments'
1772 )
1773
1774 1743 setup(
1775 1744 name='mercurial',
1776 1745 version=setupversion,
@@ -32,17 +32,10 import os
32 32 import re
33 33 import sys
34 34
35 # Python 3 adapters
36 ispy3 = sys.version_info[0] >= 3
37 if ispy3:
38 35
39 def iterbytes(s):
40 for i in range(len(s)):
41 yield s[i : i + 1]
42
43
44 else:
45 iterbytes = iter
36 def iterbytes(s):
37 for i in range(len(s)):
38 yield s[i : i + 1]
46 39
47 40
48 41 def visit(opts, filenames, outfile):
@@ -1,7 +1,7
1 1 #!/usr/bin/env python
2 2
3 """This does HTTP GET requests given a host:port and path and returns
4 a subset of the headers plus the body of the result."""
3 """This does HTTP requests (GET by default) given a host:port and path and
4 returns a subset of the headers plus the body of the result."""
5 5
6 6
7 7 import argparse
@@ -39,6 +39,7 parser.add_argument(
39 39 'value is <header>=<value>',
40 40 )
41 41 parser.add_argument('--bodyfile', help='Write HTTP response body to a file')
42 parser.add_argument('--method', default='GET', help='HTTP method to use')
42 43 parser.add_argument('host')
43 44 parser.add_argument('path')
44 45 parser.add_argument('show', nargs='*')
@@ -54,7 +55,7 requestheaders = args.requestheader
54 55 tag = None
55 56
56 57
57 def request(host, path, show):
58 def request(method, host, path, show):
58 59 assert not path.startswith('/'), path
59 60 global tag
60 61 headers = {}
@@ -68,7 +69,7 def request(host, path, show):
68 69 headers[key] = value
69 70
70 71 conn = httplib.HTTPConnection(host)
71 conn.request("GET", '/' + path, None, headers)
72 conn.request(method, '/' + path, None, headers)
72 73 response = conn.getresponse()
73 74 stdout.write(
74 75 b'%d %s\n' % (response.status, response.reason.encode('ascii'))
@@ -121,9 +122,9 def request(host, path, show):
121 122 return response.status
122 123
123 124
124 status = request(args.host, args.path, args.show)
125 status = request(args.method, args.host, args.path, args.show)
125 126 if twice:
126 status = request(args.host, args.path, args.show)
127 status = request(args.method, args.host, args.path, args.show)
127 128
128 129 if 200 <= status <= 305:
129 130 sys.exit(0)
@@ -15,10 +15,10 def wrapcapable(orig, self, name, *args,
15 15 if name in b'$CAP'.split(b' '):
16 16 return False
17 17 return orig(self, name, *args, **kwargs)
18 def wrappeer(orig, self):
18 def wrappeer(orig, self, path=None):
19 19 # Since we're disabling some newer features, we need to make sure local
20 20 # repos add in the legacy features again.
21 return localrepo.locallegacypeer(self)
21 return localrepo.locallegacypeer(self, path=path)
22 22 EOF
23 23
24 24 echo '[extensions]' >> $HGRCPATH
@@ -19,7 +19,7 def getflogheads(ui, repo, path):
19 19 Used for testing purpose
20 20 """
21 21
22 dest = urlutil.get_unique_pull_path(b'getflogheads', repo, ui)[0]
22 dest = urlutil.get_unique_pull_path_obj(b'getflogheads', ui)
23 23 peer = hg.peer(repo, {}, dest)
24 24
25 25 try:
@@ -272,14 +272,11 def checkportisavailable(port):
272 272 with contextlib.closing(socket.socket(family, socket.SOCK_STREAM)) as s:
273 273 s.bind(('localhost', port))
274 274 return True
275 except PermissionError:
276 return False
275 277 except socket.error as exc:
276 278 if WINDOWS and exc.errno == errno.WSAEACCES:
277 279 return False
278 # TODO: make a proper exception handler after dropping py2. This
279 # works because socket.error is an alias for OSError on py3,
280 # which is also the baseclass of PermissionError.
281 elif isinstance(exc, PermissionError):
282 return False
283 280 if exc.errno not in (
284 281 errno.EADDRINUSE,
285 282 errno.EADDRNOTAVAIL,
@@ -3255,6 +3252,18 class TestRunner:
3255 3252 # adds an extension to HGRC. Also include run-test.py directory to
3256 3253 # import modules like heredoctest.
3257 3254 pypath = [self._pythondir, self._testdir, runtestdir]
3255
3256 # Setting PYTHONPATH with an activated venv causes the modules installed
3257 # in it to be ignored. Therefore, include the related paths in sys.path
3258 # in PYTHONPATH.
3259 virtual_env = osenvironb.get(b"VIRTUAL_ENV")
3260 if virtual_env:
3261 virtual_env = os.path.join(virtual_env, b'')
3262 for p in sys.path:
3263 p = _sys2bytes(p)
3264 if p.startswith(virtual_env):
3265 pypath.append(p)
3266
3258 3267 # We have to augment PYTHONPATH, rather than simply replacing
3259 3268 # it, in case external libraries are only available via current
3260 3269 # PYTHONPATH. (In particular, the Subversion bindings on OS X
@@ -116,11 +116,11 Extension disabled for lack of a hook
116 116 bundle2-output-part: "phase-heads" 24 bytes payload
117 117 bundle2-input-bundle: with-transaction
118 118 bundle2-input-part: "replycaps" supported
119 bundle2-input-part: total payload size 207
119 bundle2-input-part: total payload size * (glob)
120 120 bundle2-input-part: "check:phases" supported
121 bundle2-input-part: total payload size 24
121 bundle2-input-part: total payload size * (glob)
122 122 bundle2-input-part: "check:updated-heads" supported
123 bundle2-input-part: total payload size 20
123 bundle2-input-part: total payload size * (glob)
124 124 bundle2-input-part: "changegroup" (params: 1 mandatory) supported
125 125 adding changesets
126 126 add changeset ef1ea85a6374
@@ -131,9 +131,9 Extension disabled for lack of a hook
131 131 adding foo/Bar/file.txt revisions
132 132 adding foo/file.txt revisions
133 133 adding quux/file.py revisions
134 bundle2-input-part: total payload size 1553
134 bundle2-input-part: total payload size * (glob)
135 135 bundle2-input-part: "phase-heads" supported
136 bundle2-input-part: total payload size 24
136 bundle2-input-part: total payload size * (glob)
137 137 bundle2-input-bundle: 5 parts total
138 138 updating the branch cache
139 139 added 3 changesets with 3 changes to 3 files
@@ -182,11 +182,11 Extension disabled for lack of acl.sourc
182 182 bundle2-output-part: "phase-heads" 24 bytes payload
183 183 bundle2-input-bundle: with-transaction
184 184 bundle2-input-part: "replycaps" supported
185 bundle2-input-part: total payload size 207
185 bundle2-input-part: total payload size * (glob)
186 186 bundle2-input-part: "check:phases" supported
187 bundle2-input-part: total payload size 24
187 bundle2-input-part: total payload size * (glob)
188 188 bundle2-input-part: "check:updated-heads" supported
189 bundle2-input-part: total payload size 20
189 bundle2-input-part: total payload size * (glob)
190 190 invalid branch cache (served): tip differs
191 191 bundle2-input-part: "changegroup" (params: 1 mandatory) supported
192 192 adding changesets
@@ -200,9 +200,9 Extension disabled for lack of acl.sourc
200 200 adding quux/file.py revisions
201 201 calling hook pretxnchangegroup.acl: hgext.acl.hook
202 202 acl: changes have source "push" - skipping
203 bundle2-input-part: total payload size 1553
203 bundle2-input-part: total payload size * (glob)
204 204 bundle2-input-part: "phase-heads" supported
205 bundle2-input-part: total payload size 24
205 bundle2-input-part: total payload size * (glob)
206 206 bundle2-input-bundle: 5 parts total
207 207 truncating cache/rbc-revs-v1 to 8
208 208 updating the branch cache
@@ -252,11 +252,11 No [acl.allow]/[acl.deny]
252 252 bundle2-output-part: "phase-heads" 24 bytes payload
253 253 bundle2-input-bundle: with-transaction
254 254 bundle2-input-part: "replycaps" supported
255 bundle2-input-part: total payload size 207
255 bundle2-input-part: total payload size * (glob)
256 256 bundle2-input-part: "check:phases" supported
257 bundle2-input-part: total payload size 24
257 bundle2-input-part: total payload size * (glob)
258 258 bundle2-input-part: "check:updated-heads" supported
259 bundle2-input-part: total payload size 20
259 bundle2-input-part: total payload size * (glob)
260 260 invalid branch cache (served): tip differs
261 261 bundle2-input-part: "changegroup" (params: 1 mandatory) supported
262 262 adding changesets
@@ -280,9 +280,9 No [acl.allow]/[acl.deny]
280 280 acl: path access granted: "f9cafe1212c8"
281 281 acl: branch access granted: "911600dab2ae" on branch "default"
282 282 acl: path access granted: "911600dab2ae"
283 bundle2-input-part: total payload size 1553
283 bundle2-input-part: total payload size * (glob)
284 284 bundle2-input-part: "phase-heads" supported
285 bundle2-input-part: total payload size 24
285 bundle2-input-part: total payload size * (glob)
286 286 bundle2-input-bundle: 5 parts total
287 287 truncating cache/rbc-revs-v1 to 8
288 288 updating the branch cache
@@ -332,11 +332,11 Empty [acl.allow]
332 332 bundle2-output-part: "phase-heads" 24 bytes payload
333 333 bundle2-input-bundle: with-transaction
334 334 bundle2-input-part: "replycaps" supported
335 bundle2-input-part: total payload size 207
335 bundle2-input-part: total payload size * (glob)
336 336 bundle2-input-part: "check:phases" supported
337 bundle2-input-part: total payload size 24
337 bundle2-input-part: total payload size * (glob)
338 338 bundle2-input-part: "check:updated-heads" supported
339 bundle2-input-part: total payload size 20
339 bundle2-input-part: total payload size * (glob)
340 340 invalid branch cache (served): tip differs
341 341 bundle2-input-part: "changegroup" (params: 1 mandatory) supported
342 342 adding changesets
@@ -356,8 +356,8 Empty [acl.allow]
356 356 acl: acl.deny not enabled
357 357 acl: branch access granted: "ef1ea85a6374" on branch "default"
358 358 error: pretxnchangegroup.acl hook failed: acl: user "fred" not allowed on "foo/file.txt" (changeset "ef1ea85a6374")
359 bundle2-input-part: total payload size 1553
360 bundle2-input-part: total payload size 24
359 bundle2-input-part: total payload size * (glob)
360 bundle2-input-part: total payload size * (glob)
361 361 bundle2-input-bundle: 5 parts total
362 362 transaction abort!
363 363 rollback completed
@@ -403,11 +403,11 fred is allowed inside foo/
403 403 bundle2-output-part: "phase-heads" 24 bytes payload
404 404 bundle2-input-bundle: with-transaction
405 405 bundle2-input-part: "replycaps" supported
406 bundle2-input-part: total payload size 207
406 bundle2-input-part: total payload size * (glob)
407 407 bundle2-input-part: "check:phases" supported
408 bundle2-input-part: total payload size 24
408 bundle2-input-part: total payload size * (glob)
409 409 bundle2-input-part: "check:updated-heads" supported
410 bundle2-input-part: total payload size 20
410 bundle2-input-part: total payload size * (glob)
411 411 invalid branch cache (served): tip differs
412 412 bundle2-input-part: "changegroup" (params: 1 mandatory) supported
413 413 adding changesets
@@ -431,8 +431,8 fred is allowed inside foo/
431 431 acl: path access granted: "f9cafe1212c8"
432 432 acl: branch access granted: "911600dab2ae" on branch "default"
433 433 error: pretxnchangegroup.acl hook failed: acl: user "fred" not allowed on "quux/file.py" (changeset "911600dab2ae")
434 bundle2-input-part: total payload size 1553
435 bundle2-input-part: total payload size 24
434 bundle2-input-part: total payload size * (glob)
435 bundle2-input-part: total payload size * (glob)
436 436 bundle2-input-bundle: 5 parts total
437 437 transaction abort!
438 438 rollback completed
@@ -478,11 +478,11 Empty [acl.deny]
478 478 bundle2-output-part: "phase-heads" 24 bytes payload
479 479 bundle2-input-bundle: with-transaction
480 480 bundle2-input-part: "replycaps" supported
481 bundle2-input-part: total payload size 207
481 bundle2-input-part: total payload size * (glob)
482 482 bundle2-input-part: "check:phases" supported
483 bundle2-input-part: total payload size 24
483 bundle2-input-part: total payload size * (glob)
484 484 bundle2-input-part: "check:updated-heads" supported
485 bundle2-input-part: total payload size 20
485 bundle2-input-part: total payload size * (glob)
486 486 invalid branch cache (served): tip differs
487 487 bundle2-input-part: "changegroup" (params: 1 mandatory) supported
488 488 adding changesets
@@ -502,8 +502,8 Empty [acl.deny]
502 502 acl: acl.deny enabled, 0 entries for user barney
503 503 acl: branch access granted: "ef1ea85a6374" on branch "default"
504 504 error: pretxnchangegroup.acl hook failed: acl: user "barney" not allowed on "foo/file.txt" (changeset "ef1ea85a6374")
505 bundle2-input-part: total payload size 1553
506 bundle2-input-part: total payload size 24
505 bundle2-input-part: total payload size * (glob)
506 bundle2-input-part: total payload size * (glob)
507 507 bundle2-input-bundle: 5 parts total
508 508 transaction abort!
509 509 rollback completed
@@ -550,11 +550,11 fred is allowed inside foo/, but not foo
550 550 bundle2-output-part: "phase-heads" 24 bytes payload
551 551 bundle2-input-bundle: with-transaction
552 552 bundle2-input-part: "replycaps" supported
553 bundle2-input-part: total payload size 207
553 bundle2-input-part: total payload size * (glob)
554 554 bundle2-input-part: "check:phases" supported
555 bundle2-input-part: total payload size 24
555 bundle2-input-part: total payload size * (glob)
556 556 bundle2-input-part: "check:updated-heads" supported
557 bundle2-input-part: total payload size 20
557 bundle2-input-part: total payload size * (glob)
558 558 invalid branch cache (served): tip differs
559 559 bundle2-input-part: "changegroup" (params: 1 mandatory) supported
560 560 adding changesets
@@ -578,8 +578,8 fred is allowed inside foo/, but not foo
578 578 acl: path access granted: "f9cafe1212c8"
579 579 acl: branch access granted: "911600dab2ae" on branch "default"
580 580 error: pretxnchangegroup.acl hook failed: acl: user "fred" not allowed on "quux/file.py" (changeset "911600dab2ae")
581 bundle2-input-part: total payload size 1553
582 bundle2-input-part: total payload size 24
581 bundle2-input-part: total payload size * (glob)
582 bundle2-input-part: total payload size * (glob)
583 583 bundle2-input-bundle: 5 parts total
584 584 transaction abort!
585 585 rollback completed
@@ -627,11 +627,11 fred is allowed inside foo/, but not foo
627 627 bundle2-output-part: "phase-heads" 24 bytes payload
628 628 bundle2-input-bundle: with-transaction
629 629 bundle2-input-part: "replycaps" supported
630 bundle2-input-part: total payload size 207
630 bundle2-input-part: total payload size * (glob)
631 631 bundle2-input-part: "check:phases" supported
632 bundle2-input-part: total payload size 24
632 bundle2-input-part: total payload size * (glob)
633 633 bundle2-input-part: "check:updated-heads" supported
634 bundle2-input-part: total payload size 20
634 bundle2-input-part: total payload size * (glob)
635 635 invalid branch cache (served): tip differs
636 636 bundle2-input-part: "changegroup" (params: 1 mandatory) supported
637 637 adding changesets
@@ -653,8 +653,8 fred is allowed inside foo/, but not foo
653 653 acl: path access granted: "ef1ea85a6374"
654 654 acl: branch access granted: "f9cafe1212c8" on branch "default"
655 655 error: pretxnchangegroup.acl hook failed: acl: user "fred" denied on "foo/Bar/file.txt" (changeset "f9cafe1212c8")
656 bundle2-input-part: total payload size 1553
657 bundle2-input-part: total payload size 24
656 bundle2-input-part: total payload size * (glob)
657 bundle2-input-part: total payload size * (glob)
658 658 bundle2-input-bundle: 5 parts total
659 659 transaction abort!
660 660 rollback completed
@@ -701,11 +701,11 fred is allowed inside foo/, but not foo
701 701 bundle2-output-part: "phase-heads" 24 bytes payload
702 702 bundle2-input-bundle: with-transaction
703 703 bundle2-input-part: "replycaps" supported
704 bundle2-input-part: total payload size 207
704 bundle2-input-part: total payload size * (glob)
705 705 bundle2-input-part: "check:phases" supported
706 bundle2-input-part: total payload size 24
706 bundle2-input-part: total payload size * (glob)
707 707 bundle2-input-part: "check:updated-heads" supported
708 bundle2-input-part: total payload size 20
708 bundle2-input-part: total payload size * (glob)
709 709 invalid branch cache (served): tip differs
710 710 bundle2-input-part: "changegroup" (params: 1 mandatory) supported
711 711 adding changesets
@@ -725,8 +725,8 fred is allowed inside foo/, but not foo
725 725 acl: acl.deny enabled, 0 entries for user barney
726 726 acl: branch access granted: "ef1ea85a6374" on branch "default"
727 727 error: pretxnchangegroup.acl hook failed: acl: user "barney" not allowed on "foo/file.txt" (changeset "ef1ea85a6374")
728 bundle2-input-part: total payload size 1553
729 bundle2-input-part: total payload size 24
728 bundle2-input-part: total payload size * (glob)
729 bundle2-input-part: total payload size * (glob)
730 730 bundle2-input-bundle: 5 parts total
731 731 transaction abort!
732 732 rollback completed
@@ -776,13 +776,13 fred is not blocked from moving bookmark
776 776 bundle2-output-part: "bookmarks" 37 bytes payload
777 777 bundle2-input-bundle: with-transaction
778 778 bundle2-input-part: "replycaps" supported
779 bundle2-input-part: total payload size 207
779 bundle2-input-part: total payload size * (glob)
780 780 bundle2-input-part: "check:bookmarks" supported
781 bundle2-input-part: total payload size 37
781 bundle2-input-part: total payload size * (glob)
782 782 bundle2-input-part: "check:phases" supported
783 bundle2-input-part: total payload size 24
783 bundle2-input-part: total payload size * (glob)
784 784 bundle2-input-part: "check:updated-heads" supported
785 bundle2-input-part: total payload size 20
785 bundle2-input-part: total payload size * (glob)
786 786 invalid branch cache (served): tip differs
787 787 bundle2-input-part: "changegroup" (params: 1 mandatory) supported
788 788 adding changesets
@@ -798,11 +798,11 fred is not blocked from moving bookmark
798 798 acl: acl.deny enabled, 2 entries for user fred
799 799 acl: branch access granted: "ef1ea85a6374" on branch "default"
800 800 acl: path access granted: "ef1ea85a6374"
801 bundle2-input-part: total payload size 520
801 bundle2-input-part: total payload size * (glob)
802 802 bundle2-input-part: "phase-heads" supported
803 bundle2-input-part: total payload size 24
803 bundle2-input-part: total payload size * (glob)
804 804 bundle2-input-part: "bookmarks" supported
805 bundle2-input-part: total payload size 37
805 bundle2-input-part: total payload size * (glob)
806 806 calling hook prepushkey.acl: hgext.acl.hook
807 807 acl: checking access for user "fred"
808 808 acl: acl.allow.bookmarks not enabled
@@ -865,13 +865,13 fred is not allowed to move bookmarks
865 865 bundle2-output-part: "bookmarks" 37 bytes payload
866 866 bundle2-input-bundle: with-transaction
867 867 bundle2-input-part: "replycaps" supported
868 bundle2-input-part: total payload size 207
868 bundle2-input-part: total payload size * (glob)
869 869 bundle2-input-part: "check:bookmarks" supported
870 bundle2-input-part: total payload size 37
870 bundle2-input-part: total payload size * (glob)
871 871 bundle2-input-part: "check:phases" supported
872 bundle2-input-part: total payload size 24
872 bundle2-input-part: total payload size * (glob)
873 873 bundle2-input-part: "check:updated-heads" supported
874 bundle2-input-part: total payload size 20
874 bundle2-input-part: total payload size * (glob)
875 875 invalid branch cache (served): tip differs
876 876 bundle2-input-part: "changegroup" (params: 1 mandatory) supported
877 877 adding changesets
@@ -887,11 +887,11 fred is not allowed to move bookmarks
887 887 acl: acl.deny enabled, 2 entries for user fred
888 888 acl: branch access granted: "ef1ea85a6374" on branch "default"
889 889 acl: path access granted: "ef1ea85a6374"
890 bundle2-input-part: total payload size 520
890 bundle2-input-part: total payload size * (glob)
891 891 bundle2-input-part: "phase-heads" supported
892 bundle2-input-part: total payload size 24
892 bundle2-input-part: total payload size * (glob)
893 893 bundle2-input-part: "bookmarks" supported
894 bundle2-input-part: total payload size 37
894 bundle2-input-part: total payload size * (glob)
895 895 calling hook prepushkey.acl: hgext.acl.hook
896 896 acl: checking access for user "fred"
897 897 acl: acl.allow.bookmarks not enabled
@@ -954,11 +954,11 barney is allowed everywhere
954 954 bundle2-output-part: "phase-heads" 24 bytes payload
955 955 bundle2-input-bundle: with-transaction
956 956 bundle2-input-part: "replycaps" supported
957 bundle2-input-part: total payload size 207
957 bundle2-input-part: total payload size * (glob)
958 958 bundle2-input-part: "check:phases" supported
959 bundle2-input-part: total payload size 24
959 bundle2-input-part: total payload size * (glob)
960 960 bundle2-input-part: "check:updated-heads" supported
961 bundle2-input-part: total payload size 20
961 bundle2-input-part: total payload size * (glob)
962 962 invalid branch cache (served): tip differs
963 963 bundle2-input-part: "changegroup" (params: 1 mandatory) supported
964 964 adding changesets
@@ -982,9 +982,9 barney is allowed everywhere
982 982 acl: path access granted: "f9cafe1212c8"
983 983 acl: branch access granted: "911600dab2ae" on branch "default"
984 984 acl: path access granted: "911600dab2ae"
985 bundle2-input-part: total payload size 1553
985 bundle2-input-part: total payload size * (glob)
986 986 bundle2-input-part: "phase-heads" supported
987 bundle2-input-part: total payload size 24
987 bundle2-input-part: total payload size * (glob)
988 988 bundle2-input-bundle: 5 parts total
989 989 updating the branch cache
990 990 added 3 changesets with 3 changes to 3 files
@@ -1040,11 +1040,11 wilma can change files with a .txt exten
1040 1040 bundle2-output-part: "phase-heads" 24 bytes payload
1041 1041 bundle2-input-bundle: with-transaction
1042 1042 bundle2-input-part: "replycaps" supported
1043 bundle2-input-part: total payload size 207
1043 bundle2-input-part: total payload size * (glob)
1044 1044 bundle2-input-part: "check:phases" supported
1045 bundle2-input-part: total payload size 24
1045 bundle2-input-part: total payload size * (glob)
1046 1046 bundle2-input-part: "check:updated-heads" supported
1047 bundle2-input-part: total payload size 20
1047 bundle2-input-part: total payload size * (glob)
1048 1048 invalid branch cache (served): tip differs
1049 1049 bundle2-input-part: "changegroup" (params: 1 mandatory) supported
1050 1050 adding changesets
@@ -1068,8 +1068,8 wilma can change files with a .txt exten
1068 1068 acl: path access granted: "f9cafe1212c8"
1069 1069 acl: branch access granted: "911600dab2ae" on branch "default"
1070 1070 error: pretxnchangegroup.acl hook failed: acl: user "wilma" not allowed on "quux/file.py" (changeset "911600dab2ae")
1071 bundle2-input-part: total payload size 1553
1072 bundle2-input-part: total payload size 24
1071 bundle2-input-part: total payload size * (glob)
1072 bundle2-input-part: total payload size * (glob)
1073 1073 bundle2-input-bundle: 5 parts total
1074 1074 transaction abort!
1075 1075 rollback completed
@@ -1124,11 +1124,11 file specified by acl.config does not ex
1124 1124 bundle2-output-part: "phase-heads" 24 bytes payload
1125 1125 bundle2-input-bundle: with-transaction
1126 1126 bundle2-input-part: "replycaps" supported
1127 bundle2-input-part: total payload size 207
1127 bundle2-input-part: total payload size * (glob)
1128 1128 bundle2-input-part: "check:phases" supported
1129 bundle2-input-part: total payload size 24
1129 bundle2-input-part: total payload size * (glob)
1130 1130 bundle2-input-part: "check:updated-heads" supported
1131 bundle2-input-part: total payload size 20
1131 bundle2-input-part: total payload size * (glob)
1132 1132 invalid branch cache (served): tip differs
1133 1133 bundle2-input-part: "changegroup" (params: 1 mandatory) supported
1134 1134 adding changesets
@@ -1143,8 +1143,8 file specified by acl.config does not ex
1143 1143 calling hook pretxnchangegroup.acl: hgext.acl.hook
1144 1144 acl: checking access for user "barney"
1145 1145 error: pretxnchangegroup.acl hook raised an exception: [Errno *] * (glob)
1146 bundle2-input-part: total payload size 1553
1147 bundle2-input-part: total payload size 24
1146 bundle2-input-part: total payload size * (glob)
1147 bundle2-input-part: total payload size * (glob)
1148 1148 bundle2-input-bundle: 5 parts total
1149 1149 transaction abort!
1150 1150 rollback completed
@@ -1202,11 +1202,11 betty is allowed inside foo/ by a acl.co
1202 1202 bundle2-output-part: "phase-heads" 24 bytes payload
1203 1203 bundle2-input-bundle: with-transaction
1204 1204 bundle2-input-part: "replycaps" supported
1205 bundle2-input-part: total payload size 207
1205 bundle2-input-part: total payload size * (glob)
1206 1206 bundle2-input-part: "check:phases" supported
1207 bundle2-input-part: total payload size 24
1207 bundle2-input-part: total payload size * (glob)
1208 1208 bundle2-input-part: "check:updated-heads" supported
1209 bundle2-input-part: total payload size 20
1209 bundle2-input-part: total payload size * (glob)
1210 1210 invalid branch cache (served): tip differs
1211 1211 bundle2-input-part: "changegroup" (params: 1 mandatory) supported
1212 1212 adding changesets
@@ -1230,8 +1230,8 betty is allowed inside foo/ by a acl.co
1230 1230 acl: path access granted: "f9cafe1212c8"
1231 1231 acl: branch access granted: "911600dab2ae" on branch "default"
1232 1232 error: pretxnchangegroup.acl hook failed: acl: user "betty" not allowed on "quux/file.py" (changeset "911600dab2ae")
1233 bundle2-input-part: total payload size 1553
1234 bundle2-input-part: total payload size 24
1233 bundle2-input-part: total payload size * (glob)
1234 bundle2-input-part: total payload size * (glob)
1235 1235 bundle2-input-bundle: 5 parts total
1236 1236 transaction abort!
1237 1237 rollback completed
@@ -1291,11 +1291,11 acl.config can set only [acl.allow]/[acl
1291 1291 bundle2-output-part: "phase-heads" 24 bytes payload
1292 1292 bundle2-input-bundle: with-transaction
1293 1293 bundle2-input-part: "replycaps" supported
1294 bundle2-input-part: total payload size 207
1294 bundle2-input-part: total payload size * (glob)
1295 1295 bundle2-input-part: "check:phases" supported
1296 bundle2-input-part: total payload size 24
1296 bundle2-input-part: total payload size * (glob)
1297 1297 bundle2-input-part: "check:updated-heads" supported
1298 bundle2-input-part: total payload size 20
1298 bundle2-input-part: total payload size * (glob)
1299 1299 invalid branch cache (served): tip differs
1300 1300 bundle2-input-part: "changegroup" (params: 1 mandatory) supported
1301 1301 adding changesets
@@ -1319,9 +1319,9 acl.config can set only [acl.allow]/[acl
1319 1319 acl: path access granted: "f9cafe1212c8"
1320 1320 acl: branch access granted: "911600dab2ae" on branch "default"
1321 1321 acl: path access granted: "911600dab2ae"
1322 bundle2-input-part: total payload size 1553
1322 bundle2-input-part: total payload size * (glob)
1323 1323 bundle2-input-part: "phase-heads" supported
1324 bundle2-input-part: total payload size 24
1324 bundle2-input-part: total payload size * (glob)
1325 1325 bundle2-input-bundle: 5 parts total
1326 1326 updating the branch cache
1327 1327 added 3 changesets with 3 changes to 3 files
@@ -1381,11 +1381,11 fred is always allowed
1381 1381 bundle2-output-part: "phase-heads" 24 bytes payload
1382 1382 bundle2-input-bundle: with-transaction
1383 1383 bundle2-input-part: "replycaps" supported
1384 bundle2-input-part: total payload size 207
1384 bundle2-input-part: total payload size * (glob)
1385 1385 bundle2-input-part: "check:phases" supported
1386 bundle2-input-part: total payload size 24
1386 bundle2-input-part: total payload size * (glob)
1387 1387 bundle2-input-part: "check:updated-heads" supported
1388 bundle2-input-part: total payload size 20
1388 bundle2-input-part: total payload size * (glob)
1389 1389 invalid branch cache (served): tip differs
1390 1390 bundle2-input-part: "changegroup" (params: 1 mandatory) supported
1391 1391 adding changesets
@@ -1409,9 +1409,9 fred is always allowed
1409 1409 acl: path access granted: "f9cafe1212c8"
1410 1410 acl: branch access granted: "911600dab2ae" on branch "default"
1411 1411 acl: path access granted: "911600dab2ae"
1412 bundle2-input-part: total payload size 1553
1412 bundle2-input-part: total payload size * (glob)
1413 1413 bundle2-input-part: "phase-heads" supported
1414 bundle2-input-part: total payload size 24
1414 bundle2-input-part: total payload size * (glob)
1415 1415 bundle2-input-bundle: 5 parts total
1416 1416 truncating cache/rbc-revs-v1 to 8
1417 1417 updating the branch cache
@@ -1468,11 +1468,11 no one is allowed inside foo/Bar/
1468 1468 bundle2-output-part: "phase-heads" 24 bytes payload
1469 1469 bundle2-input-bundle: with-transaction
1470 1470 bundle2-input-part: "replycaps" supported
1471 bundle2-input-part: total payload size 207
1471 bundle2-input-part: total payload size * (glob)
1472 1472 bundle2-input-part: "check:phases" supported
1473 bundle2-input-part: total payload size 24
1473 bundle2-input-part: total payload size * (glob)
1474 1474 bundle2-input-part: "check:updated-heads" supported
1475 bundle2-input-part: total payload size 20
1475 bundle2-input-part: total payload size * (glob)
1476 1476 invalid branch cache (served): tip differs
1477 1477 bundle2-input-part: "changegroup" (params: 1 mandatory) supported
1478 1478 adding changesets
@@ -1494,8 +1494,8 no one is allowed inside foo/Bar/
1494 1494 acl: path access granted: "ef1ea85a6374"
1495 1495 acl: branch access granted: "f9cafe1212c8" on branch "default"
1496 1496 error: pretxnchangegroup.acl hook failed: acl: user "fred" denied on "foo/Bar/file.txt" (changeset "f9cafe1212c8")
1497 bundle2-input-part: total payload size 1553
1498 bundle2-input-part: total payload size 24
1497 bundle2-input-part: total payload size * (glob)
1498 bundle2-input-part: total payload size * (glob)
1499 1499 bundle2-input-bundle: 5 parts total
1500 1500 transaction abort!
1501 1501 rollback completed
@@ -1551,11 +1551,11 OS-level groups
1551 1551 bundle2-output-part: "phase-heads" 24 bytes payload
1552 1552 bundle2-input-bundle: with-transaction
1553 1553 bundle2-input-part: "replycaps" supported
1554 bundle2-input-part: total payload size 207
1554 bundle2-input-part: total payload size * (glob)
1555 1555 bundle2-input-part: "check:phases" supported
1556 bundle2-input-part: total payload size 24
1556 bundle2-input-part: total payload size * (glob)
1557 1557 bundle2-input-part: "check:updated-heads" supported
1558 bundle2-input-part: total payload size 20
1558 bundle2-input-part: total payload size * (glob)
1559 1559 invalid branch cache (served): tip differs
1560 1560 bundle2-input-part: "changegroup" (params: 1 mandatory) supported
1561 1561 adding changesets
@@ -1580,9 +1580,9 OS-level groups
1580 1580 acl: path access granted: "f9cafe1212c8"
1581 1581 acl: branch access granted: "911600dab2ae" on branch "default"
1582 1582 acl: path access granted: "911600dab2ae"
1583 bundle2-input-part: total payload size 1553
1583 bundle2-input-part: total payload size * (glob)
1584 1584 bundle2-input-part: "phase-heads" supported
1585 bundle2-input-part: total payload size 24
1585 bundle2-input-part: total payload size * (glob)
1586 1586 bundle2-input-bundle: 5 parts total
1587 1587 updating the branch cache
1588 1588 added 3 changesets with 3 changes to 3 files
@@ -1638,11 +1638,11 OS-level groups
1638 1638 bundle2-output-part: "phase-heads" 24 bytes payload
1639 1639 bundle2-input-bundle: with-transaction
1640 1640 bundle2-input-part: "replycaps" supported
1641 bundle2-input-part: total payload size 207
1641 bundle2-input-part: total payload size * (glob)
1642 1642 bundle2-input-part: "check:phases" supported
1643 bundle2-input-part: total payload size 24
1643 bundle2-input-part: total payload size * (glob)
1644 1644 bundle2-input-part: "check:updated-heads" supported
1645 bundle2-input-part: total payload size 20
1645 bundle2-input-part: total payload size * (glob)
1646 1646 invalid branch cache (served): tip differs
1647 1647 bundle2-input-part: "changegroup" (params: 1 mandatory) supported
1648 1648 adding changesets
@@ -1666,8 +1666,8 OS-level groups
1666 1666 acl: path access granted: "ef1ea85a6374"
1667 1667 acl: branch access granted: "f9cafe1212c8" on branch "default"
1668 1668 error: pretxnchangegroup.acl hook failed: acl: user "fred" denied on "foo/Bar/file.txt" (changeset "f9cafe1212c8")
1669 bundle2-input-part: total payload size 1553
1670 bundle2-input-part: total payload size 24
1669 bundle2-input-part: total payload size * (glob)
1670 bundle2-input-part: total payload size * (glob)
1671 1671 bundle2-input-bundle: 5 parts total
1672 1672 transaction abort!
1673 1673 rollback completed
@@ -1761,11 +1761,11 No branch acls specified
1761 1761 bundle2-output-part: "phase-heads" 48 bytes payload
1762 1762 bundle2-input-bundle: with-transaction
1763 1763 bundle2-input-part: "replycaps" supported
1764 bundle2-input-part: total payload size 207
1764 bundle2-input-part: total payload size * (glob)
1765 1765 bundle2-input-part: "check:phases" supported
1766 bundle2-input-part: total payload size 48
1766 bundle2-input-part: total payload size * (glob)
1767 1767 bundle2-input-part: "check:updated-heads" supported
1768 bundle2-input-part: total payload size 40
1768 bundle2-input-part: total payload size * (glob)
1769 1769 bundle2-input-part: "changegroup" (params: 1 mandatory) supported
1770 1770 adding changesets
1771 1771 add changeset ef1ea85a6374
@@ -1792,9 +1792,9 No branch acls specified
1792 1792 acl: path access granted: "911600dab2ae"
1793 1793 acl: branch access granted: "e8fc755d4d82" on branch "foobar"
1794 1794 acl: path access granted: "e8fc755d4d82"
1795 bundle2-input-part: total payload size 2068
1795 bundle2-input-part: total payload size * (glob)
1796 1796 bundle2-input-part: "phase-heads" supported
1797 bundle2-input-part: total payload size 48
1797 bundle2-input-part: total payload size * (glob)
1798 1798 bundle2-input-bundle: 5 parts total
1799 1799 updating the branch cache
1800 1800 invalid branch cache (served.hidden): tip differs
@@ -1848,11 +1848,11 Branch acl deny test
1848 1848 bundle2-output-part: "phase-heads" 48 bytes payload
1849 1849 bundle2-input-bundle: with-transaction
1850 1850 bundle2-input-part: "replycaps" supported
1851 bundle2-input-part: total payload size 207
1851 bundle2-input-part: total payload size * (glob)
1852 1852 bundle2-input-part: "check:phases" supported
1853 bundle2-input-part: total payload size 48
1853 bundle2-input-part: total payload size * (glob)
1854 1854 bundle2-input-part: "check:updated-heads" supported
1855 bundle2-input-part: total payload size 40
1855 bundle2-input-part: total payload size * (glob)
1856 1856 bundle2-input-part: "changegroup" (params: 1 mandatory) supported
1857 1857 adding changesets
1858 1858 add changeset ef1ea85a6374
@@ -1878,8 +1878,8 Branch acl deny test
1878 1878 acl: branch access granted: "911600dab2ae" on branch "default"
1879 1879 acl: path access granted: "911600dab2ae"
1880 1880 error: pretxnchangegroup.acl hook failed: acl: user "astro" denied on branch "foobar" (changeset "e8fc755d4d82")
1881 bundle2-input-part: total payload size 2068
1882 bundle2-input-part: total payload size 48
1881 bundle2-input-part: total payload size * (glob)
1882 bundle2-input-part: total payload size * (glob)
1883 1883 bundle2-input-bundle: 5 parts total
1884 1884 transaction abort!
1885 1885 rollback completed
@@ -1926,11 +1926,11 Branch acl empty allow test
1926 1926 bundle2-output-part: "phase-heads" 48 bytes payload
1927 1927 bundle2-input-bundle: with-transaction
1928 1928 bundle2-input-part: "replycaps" supported
1929 bundle2-input-part: total payload size 207
1929 bundle2-input-part: total payload size * (glob)
1930 1930 bundle2-input-part: "check:phases" supported
1931 bundle2-input-part: total payload size 48
1931 bundle2-input-part: total payload size * (glob)
1932 1932 bundle2-input-part: "check:updated-heads" supported
1933 bundle2-input-part: total payload size 40
1933 bundle2-input-part: total payload size * (glob)
1934 1934 bundle2-input-part: "changegroup" (params: 1 mandatory) supported
1935 1935 adding changesets
1936 1936 add changeset ef1ea85a6374
@@ -1950,8 +1950,8 Branch acl empty allow test
1950 1950 acl: acl.allow not enabled
1951 1951 acl: acl.deny not enabled
1952 1952 error: pretxnchangegroup.acl hook failed: acl: user "astro" not allowed on branch "default" (changeset "ef1ea85a6374")
1953 bundle2-input-part: total payload size 2068
1954 bundle2-input-part: total payload size 48
1953 bundle2-input-part: total payload size * (glob)
1954 bundle2-input-part: total payload size * (glob)
1955 1955 bundle2-input-bundle: 5 parts total
1956 1956 transaction abort!
1957 1957 rollback completed
@@ -2000,11 +2000,11 Branch acl allow other
2000 2000 bundle2-output-part: "phase-heads" 48 bytes payload
2001 2001 bundle2-input-bundle: with-transaction
2002 2002 bundle2-input-part: "replycaps" supported
2003 bundle2-input-part: total payload size 207
2003 bundle2-input-part: total payload size * (glob)
2004 2004 bundle2-input-part: "check:phases" supported
2005 bundle2-input-part: total payload size 48
2005 bundle2-input-part: total payload size * (glob)
2006 2006 bundle2-input-part: "check:updated-heads" supported
2007 bundle2-input-part: total payload size 40
2007 bundle2-input-part: total payload size * (glob)
2008 2008 bundle2-input-part: "changegroup" (params: 1 mandatory) supported
2009 2009 adding changesets
2010 2010 add changeset ef1ea85a6374
@@ -2024,8 +2024,8 Branch acl allow other
2024 2024 acl: acl.allow not enabled
2025 2025 acl: acl.deny not enabled
2026 2026 error: pretxnchangegroup.acl hook failed: acl: user "astro" not allowed on branch "default" (changeset "ef1ea85a6374")
2027 bundle2-input-part: total payload size 2068
2028 bundle2-input-part: total payload size 48
2027 bundle2-input-part: total payload size * (glob)
2028 bundle2-input-part: total payload size * (glob)
2029 2029 bundle2-input-bundle: 5 parts total
2030 2030 transaction abort!
2031 2031 rollback completed
@@ -2068,11 +2068,11 Branch acl allow other
2068 2068 bundle2-output-part: "phase-heads" 48 bytes payload
2069 2069 bundle2-input-bundle: with-transaction
2070 2070 bundle2-input-part: "replycaps" supported
2071 bundle2-input-part: total payload size 207
2071 bundle2-input-part: total payload size * (glob)
2072 2072 bundle2-input-part: "check:phases" supported
2073 bundle2-input-part: total payload size 48
2073 bundle2-input-part: total payload size * (glob)
2074 2074 bundle2-input-part: "check:updated-heads" supported
2075 bundle2-input-part: total payload size 40
2075 bundle2-input-part: total payload size * (glob)
2076 2076 bundle2-input-part: "changegroup" (params: 1 mandatory) supported
2077 2077 adding changesets
2078 2078 add changeset ef1ea85a6374
@@ -2099,9 +2099,9 Branch acl allow other
2099 2099 acl: path access granted: "911600dab2ae"
2100 2100 acl: branch access granted: "e8fc755d4d82" on branch "foobar"
2101 2101 acl: path access granted: "e8fc755d4d82"
2102 bundle2-input-part: total payload size 2068
2102 bundle2-input-part: total payload size * (glob)
2103 2103 bundle2-input-part: "phase-heads" supported
2104 bundle2-input-part: total payload size 48
2104 bundle2-input-part: total payload size * (glob)
2105 2105 bundle2-input-bundle: 5 parts total
2106 2106 updating the branch cache
2107 2107 invalid branch cache (served.hidden): tip differs
@@ -2160,11 +2160,11 push foobar into the remote
2160 2160 bundle2-output-part: "phase-heads" 48 bytes payload
2161 2161 bundle2-input-bundle: with-transaction
2162 2162 bundle2-input-part: "replycaps" supported
2163 bundle2-input-part: total payload size 207
2163 bundle2-input-part: total payload size * (glob)
2164 2164 bundle2-input-part: "check:phases" supported
2165 bundle2-input-part: total payload size 48
2165 bundle2-input-part: total payload size * (glob)
2166 2166 bundle2-input-part: "check:updated-heads" supported
2167 bundle2-input-part: total payload size 40
2167 bundle2-input-part: total payload size * (glob)
2168 2168 bundle2-input-part: "changegroup" (params: 1 mandatory) supported
2169 2169 adding changesets
2170 2170 add changeset ef1ea85a6374
@@ -2191,9 +2191,9 push foobar into the remote
2191 2191 acl: path access granted: "911600dab2ae"
2192 2192 acl: branch access granted: "e8fc755d4d82" on branch "foobar"
2193 2193 acl: path access granted: "e8fc755d4d82"
2194 bundle2-input-part: total payload size 2068
2194 bundle2-input-part: total payload size * (glob)
2195 2195 bundle2-input-part: "phase-heads" supported
2196 bundle2-input-part: total payload size 48
2196 bundle2-input-part: total payload size * (glob)
2197 2197 bundle2-input-bundle: 5 parts total
2198 2198 updating the branch cache
2199 2199 invalid branch cache (served.hidden): tip differs
@@ -2251,11 +2251,11 Branch acl conflicting deny
2251 2251 bundle2-output-part: "phase-heads" 48 bytes payload
2252 2252 bundle2-input-bundle: with-transaction
2253 2253 bundle2-input-part: "replycaps" supported
2254 bundle2-input-part: total payload size 207
2254 bundle2-input-part: total payload size * (glob)
2255 2255 bundle2-input-part: "check:phases" supported
2256 bundle2-input-part: total payload size 48
2256 bundle2-input-part: total payload size * (glob)
2257 2257 bundle2-input-part: "check:updated-heads" supported
2258 bundle2-input-part: total payload size 40
2258 bundle2-input-part: total payload size * (glob)
2259 2259 bundle2-input-part: "changegroup" (params: 1 mandatory) supported
2260 2260 adding changesets
2261 2261 add changeset ef1ea85a6374
@@ -2275,8 +2275,8 Branch acl conflicting deny
2275 2275 acl: acl.allow not enabled
2276 2276 acl: acl.deny not enabled
2277 2277 error: pretxnchangegroup.acl hook failed: acl: user "george" denied on branch "default" (changeset "ef1ea85a6374")
2278 bundle2-input-part: total payload size 2068
2279 bundle2-input-part: total payload size 48
2278 bundle2-input-part: total payload size * (glob)
2279 bundle2-input-part: total payload size * (glob)
2280 2280 bundle2-input-bundle: 5 parts total
2281 2281 transaction abort!
2282 2282 rollback completed
@@ -2324,11 +2324,11 User 'astro' must not be denied
2324 2324 bundle2-output-part: "phase-heads" 48 bytes payload
2325 2325 bundle2-input-bundle: with-transaction
2326 2326 bundle2-input-part: "replycaps" supported
2327 bundle2-input-part: total payload size 207
2327 bundle2-input-part: total payload size * (glob)
2328 2328 bundle2-input-part: "check:phases" supported
2329 bundle2-input-part: total payload size 48
2329 bundle2-input-part: total payload size * (glob)
2330 2330 bundle2-input-part: "check:updated-heads" supported
2331 bundle2-input-part: total payload size 40
2331 bundle2-input-part: total payload size * (glob)
2332 2332 bundle2-input-part: "changegroup" (params: 1 mandatory) supported
2333 2333 adding changesets
2334 2334 add changeset ef1ea85a6374
@@ -2355,9 +2355,9 User 'astro' must not be denied
2355 2355 acl: path access granted: "911600dab2ae"
2356 2356 acl: branch access granted: "e8fc755d4d82" on branch "foobar"
2357 2357 acl: path access granted: "e8fc755d4d82"
2358 bundle2-input-part: total payload size 2068
2358 bundle2-input-part: total payload size * (glob)
2359 2359 bundle2-input-part: "phase-heads" supported
2360 bundle2-input-part: total payload size 48
2360 bundle2-input-part: total payload size * (glob)
2361 2361 bundle2-input-bundle: 5 parts total
2362 2362 updating the branch cache
2363 2363 invalid branch cache (served.hidden): tip differs
@@ -2409,11 +2409,11 Non-astro users must be denied
2409 2409 bundle2-output-part: "phase-heads" 48 bytes payload
2410 2410 bundle2-input-bundle: with-transaction
2411 2411 bundle2-input-part: "replycaps" supported
2412 bundle2-input-part: total payload size 207
2412 bundle2-input-part: total payload size * (glob)
2413 2413 bundle2-input-part: "check:phases" supported
2414 bundle2-input-part: total payload size 48
2414 bundle2-input-part: total payload size * (glob)
2415 2415 bundle2-input-part: "check:updated-heads" supported
2416 bundle2-input-part: total payload size 40
2416 bundle2-input-part: total payload size * (glob)
2417 2417 bundle2-input-part: "changegroup" (params: 1 mandatory) supported
2418 2418 adding changesets
2419 2419 add changeset ef1ea85a6374
@@ -2433,8 +2433,8 Non-astro users must be denied
2433 2433 acl: acl.allow not enabled
2434 2434 acl: acl.deny not enabled
2435 2435 error: pretxnchangegroup.acl hook failed: acl: user "george" denied on branch "default" (changeset "ef1ea85a6374")
2436 bundle2-input-part: total payload size 2068
2437 bundle2-input-part: total payload size 48
2436 bundle2-input-part: total payload size * (glob)
2437 bundle2-input-part: total payload size * (glob)
2438 2438 bundle2-input-bundle: 5 parts total
2439 2439 transaction abort!
2440 2440 rollback completed
@@ -119,6 +119,7 help
119 119 --close-branch mark a branch head as closed
120 120 --amend amend the parent of the working directory
121 121 -s --secret use the secret phase for committing
122 --draft use the draft phase for committing
122 123 -e --edit invoke editor on commit messages
123 124 -i --interactive use interactive mode
124 125 -I --include PATTERN [+] include names matching the given patterns
@@ -190,6 +190,7 broken repositories will refuse to push
190 190 checking manifests
191 191 crosschecking files in changesets and manifests
192 192 checking files
193 checking dirstate
193 194 checked 5 changesets with 12 changes to 4 files
194 195 checking subrepo links
195 196 subrepo 't' not found in revision 04aa62396ec6
@@ -560,6 +560,12 Close branch
560 560 close=1
561 561 phase=secret
562 562
563 `hg amend --draft` sets phase to draft
564
565 $ hg amend --draft -m declassified
566 $ hg log --limit 1 -T 'phase={phase}\n'
567 phase=draft
568
563 569 $ cd ..
564 570
565 571 Corner case of amend from issue6157:
@@ -53,7 +53,7 another bad extension
53 53
54 54 $ hg -q help help 2>&1 |grep extension
55 55 *** failed to import extension "badext" from $TESTTMP/badext.py: bit bucket overflow
56 *** failed to import extension "badext2": No module named 'badext2' (py3 !)
56 *** failed to import extension "badext2": No module named 'badext2'
57 57
58 58 show traceback
59 59
@@ -61,15 +61,15 show traceback
61 61 *** failed to import extension "badext" from $TESTTMP/badext.py: bit bucket overflow
62 62 Traceback (most recent call last):
63 63 Exception: bit bucket overflow
64 *** failed to import extension "badext2": No module named 'badext2' (py3 !)
64 *** failed to import extension "badext2": No module named 'badext2'
65 65 Traceback (most recent call last):
66 ImportError: No module named 'hgext.badext2' (py3 no-py36 !)
66 ImportError: No module named 'hgext.badext2' (no-py36 !)
67 67 ModuleNotFoundError: No module named 'hgext.badext2' (py36 !)
68 Traceback (most recent call last): (py3 !)
69 ImportError: No module named 'hgext3rd.badext2' (py3 no-py36 !)
68 Traceback (most recent call last):
69 ImportError: No module named 'hgext3rd.badext2' (no-py36 !)
70 70 ModuleNotFoundError: No module named 'hgext3rd.badext2' (py36 !)
71 Traceback (most recent call last): (py3 !)
72 ImportError: No module named 'badext2' (py3 no-py36 !)
71 Traceback (most recent call last):
72 ImportError: No module named 'badext2' (no-py36 !)
73 73 ModuleNotFoundError: No module named 'badext2' (py36 !)
74 74
75 75 names of extensions failed to load can be accessed via extensions.notloaded()
@@ -111,25 +111,25 show traceback for ImportError of hgext.
111 111 YYYY/MM/DD HH:MM:SS (PID)> - loading extension: badext2
112 112 YYYY/MM/DD HH:MM:SS (PID)> - could not import hgext.badext2 (No module named *badext2*): trying hgext3rd.badext2 (glob)
113 113 Traceback (most recent call last):
114 ImportError: No module named 'hgext.badext2' (py3 no-py36 !)
114 ImportError: No module named 'hgext.badext2' (no-py36 !)
115 115 ModuleNotFoundError: No module named 'hgext.badext2' (py36 !)
116 116 YYYY/MM/DD HH:MM:SS (PID)> - could not import hgext3rd.badext2 (No module named *badext2*): trying badext2 (glob)
117 117 Traceback (most recent call last):
118 ImportError: No module named 'hgext.badext2' (py3 no-py36 !)
118 ImportError: No module named 'hgext.badext2' (no-py36 !)
119 119 ModuleNotFoundError: No module named 'hgext.badext2' (py36 !)
120 Traceback (most recent call last): (py3 !)
121 ImportError: No module named 'hgext3rd.badext2' (py3 no-py36 !)
120 Traceback (most recent call last):
121 ImportError: No module named 'hgext3rd.badext2' (no-py36 !)
122 122 ModuleNotFoundError: No module named 'hgext3rd.badext2' (py36 !)
123 *** failed to import extension "badext2": No module named 'badext2' (py3 !)
123 *** failed to import extension "badext2": No module named 'badext2'
124 124 Traceback (most recent call last):
125 ImportError: No module named 'hgext.badext2' (py3 no-py36 !)
125 ImportError: No module named 'hgext.badext2' (no-py36 !)
126 126 ModuleNotFoundError: No module named 'hgext.badext2' (py36 !)
127 Traceback (most recent call last): (py3 !)
128 ImportError: No module named 'hgext3rd.badext2' (py3 no-py36 !)
127 Traceback (most recent call last):
128 ImportError: No module named 'hgext3rd.badext2' (no-py36 !)
129 129 ModuleNotFoundError: No module named 'hgext3rd.badext2' (py36 !)
130 Traceback (most recent call last): (py3 !)
130 Traceback (most recent call last):
131 131 ModuleNotFoundError: No module named 'badext2' (py36 !)
132 ImportError: No module named 'badext2' (py3 no-py36 !)
132 ImportError: No module named 'badext2' (no-py36 !)
133 133 YYYY/MM/DD HH:MM:SS (PID)> > loaded 2 extensions, total time * (glob)
134 134 YYYY/MM/DD HH:MM:SS (PID)> - loading configtable attributes
135 135 YYYY/MM/DD HH:MM:SS (PID)> - executing uisetup hooks
@@ -157,7 +157,7 confirm that there's no crash when an ex
157 157
158 158 $ hg help --keyword baddocext
159 159 *** failed to import extension "badext" from $TESTTMP/badext.py: bit bucket overflow
160 *** failed to import extension "badext2": No module named 'badext2' (py3 !)
160 *** failed to import extension "badext2": No module named 'badext2'
161 161 Topics:
162 162
163 163 extensions Using Additional Features
@@ -121,6 +121,7 Verify should succeed:
121 121 checking manifests
122 122 crosschecking files in changesets and manifests
123 123 checking files
124 checking dirstate
124 125 checked 1 changesets with 1 changes to 1 files
125 126
126 127 Repository root:
@@ -575,8 +575,9 test rollback
575 575
576 576 $ echo foo > f1
577 577 $ hg bookmark tmp-rollback
578 $ hg ci -Amr
578 $ hg add .
579 579 adding f1
580 $ hg ci -mr
580 581 $ hg bookmarks
581 582 X2 1:925d80f479bb
582 583 Y 2:db815d6d32e6
@@ -1125,8 +1126,6 repositories visible to an external hook
1125 1126 $ hg add a
1126 1127 $ hg commit -m '#0'
1127 1128 $ hg --config hooks.pretxnclose="sh $TESTTMP/savepending.sh" bookmarks INVISIBLE
1128 transaction abort!
1129 rollback completed
1130 1129 abort: pretxnclose hook exited with status 1
1131 1130 [40]
1132 1131 $ cp .hg/bookmarks.pending.saved .hg/bookmarks.pending
@@ -1158,8 +1157,6 repositories visible to an external hook
1158 1157 x y 2:db815d6d32e6
1159 1158 @unrelated
1160 1159 no bookmarks set
1161 transaction abort!
1162 rollback completed
1163 1160 abort: pretxnclose hook exited with status 1
1164 1161 [40]
1165 1162
@@ -1242,8 +1239,6 add hooks:
1242 1239 attempt to create on a default changeset
1243 1240
1244 1241 $ hg bookmark -r 81dcce76aa0b NEW
1245 transaction abort!
1246 rollback completed
1247 1242 abort: pretxnclose-bookmark.force-public hook exited with status 1
1248 1243 [40]
1249 1244
@@ -1254,7 +1249,5 create on a public changeset
1254 1249 move to the other branch
1255 1250
1256 1251 $ hg bookmark -f -r 125c9a1d6df6 NEW
1257 transaction abort!
1258 rollback completed
1259 1252 abort: pretxnclose-bookmark.force-forward hook exited with status 1
1260 1253 [40]
@@ -17,7 +17,7
17 17 > hg -R test bundle -r "$i" test-"$i".hg test-"$i"
18 18 > cd test-"$i"
19 19 > hg unbundle ../test-"$i".hg
20 > hg verify
20 > hg verify -q
21 21 > hg tip -q
22 22 > cd ..
23 23 > done
@@ -29,11 +29,6
29 29 added 1 changesets with 1 changes to 1 files
30 30 new changesets bfaf4b5cbf01 (1 drafts)
31 31 (run 'hg update' to get a working copy)
32 checking changesets
33 checking manifests
34 crosschecking files in changesets and manifests
35 checking files
36 checked 1 changesets with 1 changes to 1 files
37 32 0:bfaf4b5cbf01
38 33 searching for changes
39 34 2 changesets found
@@ -43,11 +38,6
43 38 added 2 changesets with 2 changes to 1 files
44 39 new changesets bfaf4b5cbf01:21f32785131f (2 drafts)
45 40 (run 'hg update' to get a working copy)
46 checking changesets
47 checking manifests
48 crosschecking files in changesets and manifests
49 checking files
50 checked 2 changesets with 2 changes to 1 files
51 41 1:21f32785131f
52 42 searching for changes
53 43 3 changesets found
@@ -57,11 +47,6
57 47 added 3 changesets with 3 changes to 1 files
58 48 new changesets bfaf4b5cbf01:4ce51a113780 (3 drafts)
59 49 (run 'hg update' to get a working copy)
60 checking changesets
61 checking manifests
62 crosschecking files in changesets and manifests
63 checking files
64 checked 3 changesets with 3 changes to 1 files
65 50 2:4ce51a113780
66 51 searching for changes
67 52 4 changesets found
@@ -71,11 +56,6
71 56 added 4 changesets with 4 changes to 1 files
72 57 new changesets bfaf4b5cbf01:93ee6ab32777 (4 drafts)
73 58 (run 'hg update' to get a working copy)
74 checking changesets
75 checking manifests
76 crosschecking files in changesets and manifests
77 checking files
78 checked 4 changesets with 4 changes to 1 files
79 59 3:93ee6ab32777
80 60 searching for changes
81 61 2 changesets found
@@ -85,11 +65,6
85 65 added 2 changesets with 2 changes to 1 files
86 66 new changesets bfaf4b5cbf01:c70afb1ee985 (2 drafts)
87 67 (run 'hg update' to get a working copy)
88 checking changesets
89 checking manifests
90 crosschecking files in changesets and manifests
91 checking files
92 checked 2 changesets with 2 changes to 1 files
93 68 1:c70afb1ee985
94 69 searching for changes
95 70 3 changesets found
@@ -99,11 +74,6
99 74 added 3 changesets with 3 changes to 1 files
100 75 new changesets bfaf4b5cbf01:f03ae5a9b979 (3 drafts)
101 76 (run 'hg update' to get a working copy)
102 checking changesets
103 checking manifests
104 crosschecking files in changesets and manifests
105 checking files
106 checked 3 changesets with 3 changes to 1 files
107 77 2:f03ae5a9b979
108 78 searching for changes
109 79 4 changesets found
@@ -113,11 +83,6
113 83 added 4 changesets with 5 changes to 2 files
114 84 new changesets bfaf4b5cbf01:095cb14b1b4d (4 drafts)
115 85 (run 'hg update' to get a working copy)
116 checking changesets
117 checking manifests
118 crosschecking files in changesets and manifests
119 checking files
120 checked 4 changesets with 5 changes to 2 files
121 86 3:095cb14b1b4d
122 87 searching for changes
123 88 5 changesets found
@@ -127,11 +92,6
127 92 added 5 changesets with 6 changes to 3 files
128 93 new changesets bfaf4b5cbf01:faa2e4234c7a (5 drafts)
129 94 (run 'hg update' to get a working copy)
130 checking changesets
131 checking manifests
132 crosschecking files in changesets and manifests
133 checking files
134 checked 5 changesets with 6 changes to 3 files
135 95 4:faa2e4234c7a
136 96 searching for changes
137 97 5 changesets found
@@ -141,11 +101,6
141 101 added 5 changesets with 5 changes to 2 files
142 102 new changesets bfaf4b5cbf01:916f1afdef90 (5 drafts)
143 103 (run 'hg update' to get a working copy)
144 checking changesets
145 checking manifests
146 crosschecking files in changesets and manifests
147 checking files
148 checked 5 changesets with 5 changes to 2 files
149 104 4:916f1afdef90
150 105 $ cd test-8
151 106 $ hg pull ../test-7
@@ -158,12 +113,7
158 113 new changesets c70afb1ee985:faa2e4234c7a
159 114 1 local changesets published
160 115 (run 'hg heads' to see heads, 'hg merge' to merge)
161 $ hg verify
162 checking changesets
163 checking manifests
164 crosschecking files in changesets and manifests
165 checking files
166 checked 9 changesets with 7 changes to 4 files
116 $ hg verify -q
167 117 $ hg rollback
168 118 repository tip rolled back to revision 4 (undo pull)
169 119 $ cd ..
@@ -243,12 +193,7 revision 8
243 193
244 194 $ hg tip -q
245 195 8:916f1afdef90
246 $ hg verify
247 checking changesets
248 checking manifests
249 crosschecking files in changesets and manifests
250 checking files
251 checked 9 changesets with 7 changes to 4 files
196 $ hg verify -q
252 197 $ hg rollback
253 198 repository tip rolled back to revision 2 (undo unbundle)
254 199
@@ -268,12 +213,7 revision 4
268 213
269 214 $ hg tip -q
270 215 4:916f1afdef90
271 $ hg verify
272 checking changesets
273 checking manifests
274 crosschecking files in changesets and manifests
275 checking files
276 checked 5 changesets with 5 changes to 2 files
216 $ hg verify -q
277 217 $ hg rollback
278 218 repository tip rolled back to revision 2 (undo unbundle)
279 219 $ hg unbundle ../test-bundle-branch2.hg
@@ -288,12 +228,7 revision 6
288 228
289 229 $ hg tip -q
290 230 6:faa2e4234c7a
291 $ hg verify
292 checking changesets
293 checking manifests
294 crosschecking files in changesets and manifests
295 checking files
296 checked 7 changesets with 6 changes to 3 files
231 $ hg verify -q
297 232 $ hg rollback
298 233 repository tip rolled back to revision 2 (undo unbundle)
299 234 $ hg unbundle ../test-bundle-cset-7.hg
@@ -308,12 +243,7 revision 4
308 243
309 244 $ hg tip -q
310 245 4:916f1afdef90
311 $ hg verify
312 checking changesets
313 checking manifests
314 crosschecking files in changesets and manifests
315 checking files
316 checked 5 changesets with 5 changes to 2 files
246 $ hg verify -q
317 247
318 248 $ cd ../test
319 249 $ hg merge 7
@@ -342,11 +272,6 revision 9
342 272
343 273 $ hg tip -q
344 274 9:03fc0b0e347c
345 $ hg verify
346 checking changesets
347 checking manifests
348 crosschecking files in changesets and manifests
349 checking files
350 checked 10 changesets with 7 changes to 4 files
275 $ hg verify -q
351 276
352 277 $ cd ..
@@ -28,12 +28,7 Setting up test
28 28 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
29 29 $ hg mv afile anotherfile
30 30 $ hg commit -m "0.3m"
31 $ hg verify
32 checking changesets
33 checking manifests
34 crosschecking files in changesets and manifests
35 checking files
36 checked 9 changesets with 7 changes to 4 files
31 $ hg verify -q
37 32 $ cd ..
38 33 $ hg init empty
39 34
@@ -70,12 +65,7 Verify empty
70 65
71 66 $ hg -R empty heads
72 67 [1]
73 $ hg -R empty verify
74 checking changesets
75 checking manifests
76 crosschecking files in changesets and manifests
77 checking files
78 checked 0 changesets with 0 changes to 0 files
68 $ hg -R empty verify -q
79 69
80 70 #if repobundlerepo
81 71
@@ -853,12 +843,7 full history bundle, refuses to verify n
853 843
854 844 but, regular verify must continue to work
855 845
856 $ hg -R orig verify
857 checking changesets
858 checking manifests
859 crosschecking files in changesets and manifests
860 checking files
861 checked 2 changesets with 2 changes to 2 files
846 $ hg -R orig verify -q
862 847
863 848 #if repobundlerepo
864 849 diff against bundle
@@ -939,12 +924,7 bundle single branch
939 924
940 925 $ hg clone -q -r0 . part2
941 926 $ hg -q -R part2 pull bundle.hg
942 $ hg -R part2 verify
943 checking changesets
944 checking manifests
945 crosschecking files in changesets and manifests
946 checking files
947 checked 3 changesets with 5 changes to 4 files
927 $ hg -R part2 verify -q
948 928 #endif
949 929
950 930 == Test bundling no commits
@@ -1039,6 +1019,24 Test the option that create and no-delta
1039 1019 $ hg bundle -a --config devel.bundle.delta=full ./full.hg
1040 1020 3 changesets found
1041 1021
1022
1023 Test the debug statistic when building a bundle
1024 -----------------------------------------------
1025
1026 $ hg bundle -a ./default.hg --config debug.bundling-stats=yes
1027 3 changesets found
1028 DEBUG-BUNDLING: revisions: 9
1029 DEBUG-BUNDLING: changelog: 3
1030 DEBUG-BUNDLING: manifest: 3
1031 DEBUG-BUNDLING: files: 3 (for 3 revlogs)
1032 DEBUG-BUNDLING: deltas:
1033 DEBUG-BUNDLING: from-storage: 2 (100% of available 2)
1034 DEBUG-BUNDLING: computed: 7
1035 DEBUG-BUNDLING: full: 7 (100% of native 7)
1036 DEBUG-BUNDLING: changelog: 3 (100% of native 3)
1037 DEBUG-BUNDLING: manifests: 1 (100% of native 1)
1038 DEBUG-BUNDLING: files: 3 (100% of native 3)
1039
1042 1040 Test the debug output when applying delta
1043 1041 -----------------------------------------
1044 1042
@@ -1048,18 +1046,62 Test the debug output when applying delt
1048 1046 > --config storage.revlog.reuse-external-delta=no \
1049 1047 > --config storage.revlog.reuse-external-delta-parent=no
1050 1048 adding changesets
1051 DBG-DELTAS: CHANGELOG: rev=0: search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
1052 DBG-DELTAS: CHANGELOG: rev=1: search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=0 p2-chain-length=-1 - duration=* (glob)
1053 DBG-DELTAS: CHANGELOG: rev=2: search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=0 p2-chain-length=-1 - duration=* (glob)
1049 DBG-DELTAS: CHANGELOG: rev=0: delta-base=0 is-cached=1 - search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
1050 DBG-DELTAS: CHANGELOG: rev=1: delta-base=1 is-cached=1 - search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=0 p2-chain-length=-1 - duration=* (glob)
1051 DBG-DELTAS: CHANGELOG: rev=2: delta-base=2 is-cached=1 - search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=0 p2-chain-length=-1 - duration=* (glob)
1054 1052 adding manifests
1055 DBG-DELTAS: MANIFESTLOG: rev=0: search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
1056 DBG-DELTAS: MANIFESTLOG: rev=1: search-rounds=1 try-count=1 - delta-type=delta snap-depth=0 - p1-chain-length=0 p2-chain-length=-1 - duration=* (glob)
1057 DBG-DELTAS: MANIFESTLOG: rev=2: search-rounds=1 try-count=1 - delta-type=delta snap-depth=0 - p1-chain-length=1 p2-chain-length=-1 - duration=* (glob)
1053 DBG-DELTAS: MANIFESTLOG: rev=0: delta-base=0 is-cached=1 - search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
1054 DBG-DELTAS: MANIFESTLOG: rev=1: delta-base=0 is-cached=1 - search-rounds=1 try-count=1 - delta-type=delta snap-depth=0 - p1-chain-length=0 p2-chain-length=-1 - duration=* (glob)
1055 DBG-DELTAS: MANIFESTLOG: rev=2: delta-base=1 is-cached=1 - search-rounds=1 try-count=1 - delta-type=delta snap-depth=0 - p1-chain-length=1 p2-chain-length=-1 - duration=* (glob)
1058 1056 adding file changes
1059 DBG-DELTAS: FILELOG:a: rev=0: search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
1060 DBG-DELTAS: FILELOG:b: rev=0: search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
1061 DBG-DELTAS: FILELOG:c: rev=0: search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
1057 DBG-DELTAS: FILELOG:a: rev=0: delta-base=0 is-cached=1 - search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
1058 DBG-DELTAS: FILELOG:b: rev=0: delta-base=0 is-cached=1 - search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
1059 DBG-DELTAS: FILELOG:c: rev=0: delta-base=0 is-cached=1 - search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
1062 1060 added 3 changesets with 3 changes to 3 files
1063 1061 new changesets 4fe08cd4693e:4652c276ac4f (3 drafts)
1064 1062 (run 'hg update' to get a working copy)
1065 1063
1064
1065 Test the debug statistic when applying a bundle
1066 -----------------------------------------------
1067
1068 $ hg init bar
1069 $ hg -R bar unbundle ./default.hg --config debug.unbundling-stats=yes
1070 adding changesets
1071 adding manifests
1072 adding file changes
1073 DEBUG-UNBUNDLING: revisions: 9
1074 DEBUG-UNBUNDLING: changelog: 3 ( 33%)
1075 DEBUG-UNBUNDLING: manifests: 3 ( 33%)
1076 DEBUG-UNBUNDLING: files: 3 ( 33%)
1077 DEBUG-UNBUNDLING: total-time: ?????????????? seconds (glob)
1078 DEBUG-UNBUNDLING: changelog: ?????????????? seconds (???%) (glob)
1079 DEBUG-UNBUNDLING: manifests: ?????????????? seconds (???%) (glob)
1080 DEBUG-UNBUNDLING: files: ?????????????? seconds (???%) (glob)
1081 DEBUG-UNBUNDLING: type-count:
1082 DEBUG-UNBUNDLING: changelog:
1083 DEBUG-UNBUNDLING: full: 3
1084 DEBUG-UNBUNDLING: cached: 3 (100%)
1085 DEBUG-UNBUNDLING: manifests:
1086 DEBUG-UNBUNDLING: full: 1
1087 DEBUG-UNBUNDLING: cached: 1 (100%)
1088 DEBUG-UNBUNDLING: delta: 2
1089 DEBUG-UNBUNDLING: cached: 2 (100%)
1090 DEBUG-UNBUNDLING: files:
1091 DEBUG-UNBUNDLING: full: 3
1092 DEBUG-UNBUNDLING: cached: 3 (100%)
1093 DEBUG-UNBUNDLING: type-time:
1094 DEBUG-UNBUNDLING: changelog:
1095 DEBUG-UNBUNDLING: full: ?????????????? seconds (???% of total) (glob)
1096 DEBUG-UNBUNDLING: cached: ?????????????? seconds (???% of total) (glob)
1097 DEBUG-UNBUNDLING: manifests:
1098 DEBUG-UNBUNDLING: full: ?????????????? seconds (???% of total) (glob)
1099 DEBUG-UNBUNDLING: cached: ?????????????? seconds (???% of total) (glob)
1100 DEBUG-UNBUNDLING: delta: ?????????????? seconds (???% of total) (glob)
1101 DEBUG-UNBUNDLING: cached: ?????????????? seconds (???% of total) (glob)
1102 DEBUG-UNBUNDLING: files:
1103 DEBUG-UNBUNDLING: full: ?????????????? seconds (???% of total) (glob)
1104 DEBUG-UNBUNDLING: cached: ?????????????? seconds (???% of total) (glob)
1105 added 3 changesets with 3 changes to 3 files
1106 new changesets 4fe08cd4693e:4652c276ac4f (3 drafts)
1107 (run 'hg update' to get a working copy)
@@ -739,12 +739,10 Check output capture control.
739 739 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
740 740 pushing to ssh://user@dummy/other
741 741 searching for changes
742 remote: Fail early! (no-py3 chg !)
743 742 remote: adding changesets
744 743 remote: adding manifests
745 744 remote: adding file changes
746 remote: Fail early! (py3 !)
747 remote: Fail early! (no-py3 no-chg !)
745 remote: Fail early!
748 746 remote: transaction abort!
749 747 remote: Cleaning up the mess...
750 748 remote: rollback completed
@@ -175,6 +175,7 Repo fails verification due to censorshi
175 175 checking files
176 176 target@1: censored file data
177 177 target@2: censored file data
178 not checking dirstate because of previous errors
178 179 checked 5 changesets with 7 changes to 2 files
179 180 2 integrity errors encountered!
180 181 (first damaged changeset appears to be 1)
@@ -205,12 +206,7 Set censor policy to ignore in trusted $
205 206
206 207 Repo passes verification with warnings with explicit config
207 208
208 $ hg verify
209 checking changesets
210 checking manifests
211 crosschecking files in changesets and manifests
212 checking files
213 checked 5 changesets with 7 changes to 2 files
209 $ hg verify -q
214 210
215 211 May update to revision with censored data with explicit config
216 212
@@ -330,24 +326,14 Repo with censored nodes can be cloned a
330 326 $ hg cat -r $C1 target | head -n 10
331 327 $ hg cat -r 0 target | head -n 10
332 328 Initially untainted file
333 $ hg verify
334 checking changesets
335 checking manifests
336 crosschecking files in changesets and manifests
337 checking files
338 checked 12 changesets with 13 changes to 2 files
329 $ hg verify -q
339 330
340 331 Repo cloned before tainted content introduced can pull censored nodes
341 332
342 333 $ cd ../rpull
343 334 $ hg cat -r tip target | head -n 10
344 335 Initially untainted file
345 $ hg verify
346 checking changesets
347 checking manifests
348 crosschecking files in changesets and manifests
349 checking files
350 checked 1 changesets with 2 changes to 2 files
336 $ hg verify -q
351 337 $ hg pull -r $H1 -r $H2
352 338 pulling from $TESTTMP/r
353 339 searching for changes
@@ -369,12 +355,7 Repo cloned before tainted content intro
369 355 $ hg cat -r $C1 target | head -n 10
370 356 $ hg cat -r 0 target | head -n 10
371 357 Initially untainted file
372 $ hg verify
373 checking changesets
374 checking manifests
375 crosschecking files in changesets and manifests
376 checking files
377 checked 12 changesets with 13 changes to 2 files
358 $ hg verify -q
378 359
379 360 Censored nodes can be pushed if they censor previously unexchanged nodes
380 361
@@ -429,12 +410,7 Censored nodes can be bundled up and unb
429 410 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
430 411 $ cat target | head -n 10
431 412 Re-sanitized; nothing to see here
432 $ hg verify
433 checking changesets
434 checking manifests
435 crosschecking files in changesets and manifests
436 checking files
437 checked 14 changesets with 15 changes to 2 files
413 $ hg verify -q
438 414
439 415 Grepping only warns, doesn't error out
440 416
@@ -488,12 +464,7 Censored nodes can be imported on top of
488 464 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
489 465 $ cat target | head -n 10
490 466 Re-sanitized; nothing to see here
491 $ hg verify
492 checking changesets
493 checking manifests
494 crosschecking files in changesets and manifests
495 checking files
496 checked 14 changesets with 15 changes to 2 files
467 $ hg verify -q
497 468 $ cd ../r
498 469
499 470 Can import bundle where first revision of a file is censored
@@ -43,11 +43,6 start a commit...
43 43 see what happened
44 44
45 45 $ wait
46 $ hg verify
47 checking changesets
48 checking manifests
49 crosschecking files in changesets and manifests
50 checking files
51 checked 2 changesets with 2 changes to 1 files
46 $ hg verify -q
52 47
53 48 $ cd ..
@@ -66,12 +66,7
66 66 5 7 09bb521d218d de68e904d169 000000000000
67 67 6 8 1fde233dfb0f f54c32f13478 000000000000
68 68
69 $ hg verify
70 checking changesets
71 checking manifests
72 crosschecking files in changesets and manifests
73 checking files
74 checked 9 changesets with 7 changes to 4 files
69 $ hg verify -q
75 70
76 71 $ cd ..
77 72
@@ -80,7 +75,7
80 75 > echo ---- hg clone -r "$i" test test-"$i"
81 76 > hg clone -r "$i" test test-"$i"
82 77 > cd test-"$i"
83 > hg verify
78 > hg verify -q
84 79 > cd ..
85 80 > done
86 81
@@ -92,11 +87,6
92 87 new changesets f9ee2f85a263
93 88 updating to branch default
94 89 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
95 checking changesets
96 checking manifests
97 crosschecking files in changesets and manifests
98 checking files
99 checked 1 changesets with 1 changes to 1 files
100 90
101 91 ---- hg clone -r 1 test test-1
102 92 adding changesets
@@ -106,11 +96,6
106 96 new changesets f9ee2f85a263:34c2bf6b0626
107 97 updating to branch default
108 98 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
109 checking changesets
110 checking manifests
111 crosschecking files in changesets and manifests
112 checking files
113 checked 2 changesets with 2 changes to 1 files
114 99
115 100 ---- hg clone -r 2 test test-2
116 101 adding changesets
@@ -120,11 +105,6
120 105 new changesets f9ee2f85a263:e38ba6f5b7e0
121 106 updating to branch default
122 107 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
123 checking changesets
124 checking manifests
125 crosschecking files in changesets and manifests
126 checking files
127 checked 3 changesets with 3 changes to 1 files
128 108
129 109 ---- hg clone -r 3 test test-3
130 110 adding changesets
@@ -134,11 +114,6
134 114 new changesets f9ee2f85a263:eebf5a27f8ca
135 115 updating to branch default
136 116 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
137 checking changesets
138 checking manifests
139 crosschecking files in changesets and manifests
140 checking files
141 checked 4 changesets with 4 changes to 1 files
142 117
143 118 ---- hg clone -r 4 test test-4
144 119 adding changesets
@@ -148,11 +123,6
148 123 new changesets f9ee2f85a263:095197eb4973
149 124 updating to branch default
150 125 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
151 checking changesets
152 checking manifests
153 crosschecking files in changesets and manifests
154 checking files
155 checked 2 changesets with 2 changes to 1 files
156 126
157 127 ---- hg clone -r 5 test test-5
158 128 adding changesets
@@ -162,11 +132,6
162 132 new changesets f9ee2f85a263:1bb50a9436a7
163 133 updating to branch default
164 134 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
165 checking changesets
166 checking manifests
167 crosschecking files in changesets and manifests
168 checking files
169 checked 3 changesets with 3 changes to 1 files
170 135
171 136 ---- hg clone -r 6 test test-6
172 137 adding changesets
@@ -176,11 +141,6
176 141 new changesets f9ee2f85a263:7373c1169842
177 142 updating to branch default
178 143 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
179 checking changesets
180 checking manifests
181 crosschecking files in changesets and manifests
182 checking files
183 checked 4 changesets with 5 changes to 2 files
184 144
185 145 ---- hg clone -r 7 test test-7
186 146 adding changesets
@@ -190,11 +150,6
190 150 new changesets f9ee2f85a263:a6a34bfa0076
191 151 updating to branch default
192 152 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
193 checking changesets
194 checking manifests
195 crosschecking files in changesets and manifests
196 checking files
197 checked 5 changesets with 6 changes to 3 files
198 153
199 154 ---- hg clone -r 8 test test-8
200 155 adding changesets
@@ -204,11 +159,6
204 159 new changesets f9ee2f85a263:aa35859c02ea
205 160 updating to branch default
206 161 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
207 checking changesets
208 checking manifests
209 crosschecking files in changesets and manifests
210 checking files
211 checked 5 changesets with 5 changes to 2 files
212 162
213 163 $ cd test-8
214 164 $ hg pull ../test-7
@@ -220,12 +170,7
220 170 added 4 changesets with 2 changes to 3 files (+1 heads)
221 171 new changesets 095197eb4973:a6a34bfa0076
222 172 (run 'hg heads' to see heads, 'hg merge' to merge)
223 $ hg verify
224 checking changesets
225 checking manifests
226 crosschecking files in changesets and manifests
227 checking files
228 checked 9 changesets with 7 changes to 4 files
173 $ hg verify -q
229 174 $ cd ..
230 175
231 176 $ hg clone test test-9
@@ -110,12 +110,7 tests, and dot-encode need the store ena
110 110 new changesets 96ee1d7354c4:06ddac466af5
111 111 updating to branch default
112 112 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
113 $ hg verify -R server-no-store
114 checking changesets
115 checking manifests
116 crosschecking files in changesets and manifests
117 checking files
118 checked 5004 changesets with 1088 changes to 1088 files
113 $ hg verify -R server-no-store -q
119 114 $ hg -R server serve -p $HGPORT -d --pid-file=hg-1.pid --error errors-1.txt
120 115 $ cat hg-1.pid > $DAEMON_PIDS
121 116 $ hg -R server-no-store serve -p $HGPORT2 -d --pid-file=hg-2.pid --error errors-2.txt
@@ -129,12 +124,7 store → no-store cloning
129 124
130 125 $ hg clone --quiet --stream -U http://localhost:$HGPORT clone-remove-store --config format.usestore=no
131 126 $ cat errors-1.txt
132 $ hg -R clone-remove-store verify
133 checking changesets
134 checking manifests
135 crosschecking files in changesets and manifests
136 checking files
137 checked 5004 changesets with 1088 changes to 1088 files
127 $ hg -R clone-remove-store verify -q
138 128 $ hg debugrequires -R clone-remove-store | grep store
139 129 [1]
140 130
@@ -143,12 +133,7 no-store → store cloning
143 133
144 134 $ hg clone --quiet --stream -U http://localhost:$HGPORT2 clone-add-store --config format.usestore=yes
145 135 $ cat errors-2.txt
146 $ hg -R clone-add-store verify
147 checking changesets
148 checking manifests
149 crosschecking files in changesets and manifests
150 checking files
151 checked 5004 changesets with 1088 changes to 1088 files
136 $ hg -R clone-add-store verify -q
152 137 $ hg debugrequires -R clone-add-store | grep store
153 138 store
154 139
@@ -171,12 +156,7 Test streaming from/to repository withou
171 156 new changesets 96ee1d7354c4:06ddac466af5
172 157 updating to branch default
173 158 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
174 $ hg verify -R server-no-fncache
175 checking changesets
176 checking manifests
177 crosschecking files in changesets and manifests
178 checking files
179 checked 5004 changesets with 1088 changes to 1088 files
159 $ hg verify -R server-no-fncache -q
180 160 $ hg -R server serve -p $HGPORT -d --pid-file=hg-1.pid --error errors-1.txt
181 161 $ cat hg-1.pid > $DAEMON_PIDS
182 162 $ hg -R server-no-fncache serve -p $HGPORT2 -d --pid-file=hg-2.pid --error errors-2.txt
@@ -190,12 +170,7 fncache → no-fncache cloning
190 170
191 171 $ hg clone --quiet --stream -U http://localhost:$HGPORT clone-remove-fncache --config format.usefncache=no
192 172 $ cat errors-1.txt
193 $ hg -R clone-remove-fncache verify
194 checking changesets
195 checking manifests
196 crosschecking files in changesets and manifests
197 checking files
198 checked 5004 changesets with 1088 changes to 1088 files
173 $ hg -R clone-remove-fncache verify -q
199 174 $ hg debugrequires -R clone-remove-fncache | grep fncache
200 175 [1]
201 176
@@ -204,12 +179,7 no-fncache → fncache cloning
204 179
205 180 $ hg clone --quiet --stream -U http://localhost:$HGPORT2 clone-add-fncache --config format.usefncache=yes
206 181 $ cat errors-2.txt
207 $ hg -R clone-add-fncache verify
208 checking changesets
209 checking manifests
210 crosschecking files in changesets and manifests
211 checking files
212 checked 5004 changesets with 1088 changes to 1088 files
182 $ hg -R clone-add-fncache verify -q
213 183 $ hg debugrequires -R clone-add-fncache | grep fncache
214 184 fncache
215 185
@@ -231,12 +201,7 Test streaming from/to repository withou
231 201 new changesets 96ee1d7354c4:06ddac466af5
232 202 updating to branch default
233 203 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
234 $ hg verify -R server-no-dotencode
235 checking changesets
236 checking manifests
237 crosschecking files in changesets and manifests
238 checking files
239 checked 5004 changesets with 1088 changes to 1088 files
204 $ hg verify -R server-no-dotencode -q
240 205 $ hg -R server serve -p $HGPORT -d --pid-file=hg-1.pid --error errors-1.txt
241 206 $ cat hg-1.pid > $DAEMON_PIDS
242 207 $ hg -R server-no-dotencode serve -p $HGPORT2 -d --pid-file=hg-2.pid --error errors-2.txt
@@ -250,12 +215,7 dotencode → no-dotencode cloning
250 215
251 216 $ hg clone --quiet --stream -U http://localhost:$HGPORT clone-remove-dotencode --config format.dotencode=no
252 217 $ cat errors-1.txt
253 $ hg -R clone-remove-dotencode verify
254 checking changesets
255 checking manifests
256 crosschecking files in changesets and manifests
257 checking files
258 checked 5004 changesets with 1088 changes to 1088 files
218 $ hg -R clone-remove-dotencode verify -q
259 219 $ hg debugrequires -R clone-remove-dotencode | grep dotencode
260 220 [1]
261 221
@@ -264,12 +224,7 no-dotencode → dotencode cloning
264 224
265 225 $ hg clone --quiet --stream -U http://localhost:$HGPORT2 clone-add-dotencode --config format.dotencode=yes
266 226 $ cat errors-2.txt
267 $ hg -R clone-add-dotencode verify
268 checking changesets
269 checking manifests
270 crosschecking files in changesets and manifests
271 checking files
272 checked 5004 changesets with 1088 changes to 1088 files
227 $ hg -R clone-add-dotencode verify -q
273 228 $ hg debugrequires -R clone-add-dotencode | grep dotencode
274 229 dotencode
275 230
@@ -289,12 +244,7 The resulting clone should not use share
289 244 $ cat hg-1.pid > $DAEMON_PIDS
290 245
291 246 $ hg clone --quiet --stream -U http://localhost:$HGPORT clone-from-share
292 $ hg -R clone-from-share verify
293 checking changesets
294 checking manifests
295 crosschecking files in changesets and manifests
296 checking files
297 checked 5004 changesets with 1088 changes to 1088 files
247 $ hg -R clone-from-share verify -q
298 248 $ hg debugrequires -R clone-from-share | egrep 'share$'
299 249 [1]
300 250
@@ -313,12 +263,7 Test streaming from/to repository withou
313 263 new changesets 96ee1d7354c4:06ddac466af5
314 264 updating to branch default
315 265 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
316 $ hg verify -R server-no-share-safe
317 checking changesets
318 checking manifests
319 crosschecking files in changesets and manifests
320 checking files
321 checked 5004 changesets with 1088 changes to 1088 files
266 $ hg verify -R server-no-share-safe -q
322 267 $ hg -R server serve -p $HGPORT -d --pid-file=hg-1.pid --error errors-1.txt
323 268 $ cat hg-1.pid > $DAEMON_PIDS
324 269 $ hg -R server-no-share-safe serve -p $HGPORT2 -d --pid-file=hg-2.pid --error errors-2.txt
@@ -332,12 +277,7 share-safe → no-share-safe cloning
332 277
333 278 $ hg clone --quiet --stream -U http://localhost:$HGPORT clone-remove-share-safe --config format.use-share-safe=no
334 279 $ cat errors-1.txt
335 $ hg -R clone-remove-share-safe verify
336 checking changesets
337 checking manifests
338 crosschecking files in changesets and manifests
339 checking files
340 checked 5004 changesets with 1088 changes to 1088 files
280 $ hg -R clone-remove-share-safe verify -q
341 281 $ hg debugrequires -R clone-remove-share-safe | grep share-safe
342 282 [1]
343 283
@@ -346,12 +286,7 no-share-safe → share-safe cloning
346 286
347 287 $ hg clone --quiet --stream -U http://localhost:$HGPORT2 clone-add-share-safe --config format.use-share-safe=yes
348 288 $ cat errors-2.txt
349 $ hg -R clone-add-share-safe verify
350 checking changesets
351 checking manifests
352 crosschecking files in changesets and manifests
353 checking files
354 checked 5004 changesets with 1088 changes to 1088 files
289 $ hg -R clone-add-share-safe verify -q
355 290 $ hg debugrequires -R clone-add-share-safe | grep share-safe
356 291 share-safe
357 292
@@ -374,12 +309,7 persistent nodemap affects revlog, but t
374 309 new changesets 96ee1d7354c4:06ddac466af5
375 310 updating to branch default
376 311 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
377 $ hg verify -R server-no-persistent-nodemap
378 checking changesets
379 checking manifests
380 crosschecking files in changesets and manifests
381 checking files
382 checked 5004 changesets with 1088 changes to 1088 files
312 $ hg verify -R server-no-persistent-nodemap -q
383 313 $ hg -R server serve -p $HGPORT -d --pid-file=hg-1.pid --error errors-1.txt
384 314 $ cat hg-1.pid > $DAEMON_PIDS
385 315 $ hg -R server-no-persistent-nodemap serve -p $HGPORT2 -d --pid-file=hg-2.pid --error errors-2.txt
@@ -401,12 +331,7 persistent-nodemap → no-persistent-nodemap cloning
401 331
402 332 $ hg clone --quiet --stream -U http://localhost:$HGPORT clone-remove-persistent-nodemap --config format.use-persistent-nodemap=no
403 333 $ cat errors-1.txt
404 $ hg -R clone-remove-persistent-nodemap verify
405 checking changesets
406 checking manifests
407 crosschecking files in changesets and manifests
408 checking files
409 checked 5004 changesets with 1088 changes to 1088 files
334 $ hg -R clone-remove-persistent-nodemap verify -q
410 335 $ hg debugrequires -R clone-remove-persistent-nodemap | grep persistent-nodemap
411 336 [1]
412 337
@@ -421,12 +346,7 no-persistent-nodemap → persistent-nodemap cloning
421 346
422 347 $ hg clone --quiet --stream -U http://localhost:$HGPORT2 clone-add-persistent-nodemap --config format.use-persistent-nodemap=yes
423 348 $ cat errors-2.txt
424 $ hg -R clone-add-persistent-nodemap verify
425 checking changesets
426 checking manifests
427 crosschecking files in changesets and manifests
428 checking files
429 checked 5004 changesets with 1088 changes to 1088 files
349 $ hg -R clone-add-persistent-nodemap verify -q
430 350 $ hg debugrequires -R clone-add-persistent-nodemap | grep persistent-nodemap
431 351 persistent-nodemap
432 352
@@ -94,12 +94,7 This is present here to reuse the testin
94 94
95 95 Check that the clone went well
96 96
97 $ hg verify -R local-clone
98 checking changesets
99 checking manifests
100 crosschecking files in changesets and manifests
101 checking files
102 checked 3 changesets with 1088 changes to 1088 files
97 $ hg verify -R local-clone -q
103 98
104 99 Check uncompressed
105 100 ==================
@@ -651,12 +646,7 clone it
651 646 updating to branch default
652 647 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
653 648 #endif
654 $ hg verify -R with-bookmarks
655 checking changesets
656 checking manifests
657 crosschecking files in changesets and manifests
658 checking files
659 checked 3 changesets with 1088 changes to 1088 files
649 $ hg verify -R with-bookmarks -q
660 650 $ hg -R with-bookmarks bookmarks
661 651 some-bookmark 2:5223b5e3265f
662 652
@@ -692,12 +682,7 Clone as publishing
692 682 updating to branch default
693 683 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
694 684 #endif
695 $ hg verify -R phase-publish
696 checking changesets
697 checking manifests
698 crosschecking files in changesets and manifests
699 checking files
700 checked 3 changesets with 1088 changes to 1088 files
685 $ hg verify -R phase-publish -q
701 686 $ hg -R phase-publish phase -r 'all()'
702 687 0: public
703 688 1: public
@@ -747,12 +732,7 stream v1 unsuitable for non-publishing
747 732 1: draft
748 733 2: draft
749 734 #endif
750 $ hg verify -R phase-no-publish
751 checking changesets
752 checking manifests
753 crosschecking files in changesets and manifests
754 checking files
755 checked 3 changesets with 1088 changes to 1088 files
735 $ hg verify -R phase-no-publish -q
756 736
757 737 $ killdaemons.py
758 738
@@ -801,12 +781,7 Clone non-publishing with obsolescence
801 781 0: draft
802 782 $ hg debugobsolete -R with-obsolescence
803 783 8c206a663911c1f97f2f9d7382e417ae55872cfa 0 {5223b5e3265f0df40bb743da62249413d74ac70f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
804 $ hg verify -R with-obsolescence
805 checking changesets
806 checking manifests
807 crosschecking files in changesets and manifests
808 checking files
809 checked 4 changesets with 1089 changes to 1088 files
784 $ hg verify -R with-obsolescence -q
810 785
811 786 $ hg clone -U --stream --config experimental.evolution=0 http://localhost:$HGPORT with-obsolescence-no-evolution
812 787 streaming all changes
@@ -59,12 +59,7 Ensure branchcache got copied over:
59 59
60 60 $ cat a
61 61 a
62 $ hg verify
63 checking changesets
64 checking manifests
65 crosschecking files in changesets and manifests
66 checking files
67 checked 11 changesets with 11 changes to 2 files
62 $ hg verify -q
68 63
69 64 Invalid dest '' must abort:
70 65
@@ -122,12 +117,7 Ensure branchcache got copied over:
122 117
123 118 $ cat a 2>/dev/null || echo "a not present"
124 119 a not present
125 $ hg verify
126 checking changesets
127 checking manifests
128 crosschecking files in changesets and manifests
129 checking files
130 checked 11 changesets with 11 changes to 2 files
120 $ hg verify -q
131 121
132 122 Default destination:
133 123
@@ -167,12 +157,7 Use --pull:
167 157 new changesets acb14030fe0a:a7949464abda
168 158 updating to branch default
169 159 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
170 $ hg -R g verify
171 checking changesets
172 checking manifests
173 crosschecking files in changesets and manifests
174 checking files
175 checked 11 changesets with 11 changes to 2 files
160 $ hg -R g verify -q
176 161
177 162 Invalid dest '' with --pull must abort (issue2528):
178 163
@@ -541,6 +541,7 changelog and manifest would have invali
541 541 checking manifests
542 542 crosschecking files in changesets and manifests
543 543 checking files
544 checking dirstate
544 545 checked 2 changesets with 2 changes to 1 files
545 546 $ hg revert --no-backup -aq
546 547
@@ -825,6 +826,7 structured message channel:
825 826 message: '\xa6Ditem@Cpos\xf6EtopicMcrosscheckingEtotal\xf6DtypeHprogressDunit@'
826 827 message: '\xa2DdataOchecking files\nDtypeFstatus'
827 828 message: '\xa6Ditem@Cpos\xf6EtopicHcheckingEtotal\xf6DtypeHprogressDunit@'
829 message: '\xa2DdataRchecking dirstate\nDtypeFstatus'
828 830 message: '\xa2DdataX/checked 0 changesets with 0 changes to 0 files\nDtypeFstatus'
829 831
830 832 >>> from hgclient import checkwith, readchannel, runcommand, stringio
@@ -123,13 +123,13 No changes, just a different message:
123 123 uncompressed size of bundle content:
124 124 254 (changelog)
125 125 163 (manifests)
126 131 a
126 133 a
127 127 saved backup bundle to $TESTTMP/repo/.hg/strip-backup/47343646fa3d-c2758885-amend.hg
128 128 1 changesets found
129 129 uncompressed size of bundle content:
130 130 250 (changelog)
131 131 163 (manifests)
132 131 a
132 133 a
133 133 adding branch
134 134 adding changesets
135 135 adding manifests
@@ -267,13 +267,13 then, test editing custom commit message
267 267 uncompressed size of bundle content:
268 268 249 (changelog)
269 269 163 (manifests)
270 133 a
270 135 a
271 271 saved backup bundle to $TESTTMP/repo/.hg/strip-backup/a9a13940fc03-7c2e8674-amend.hg
272 272 1 changesets found
273 273 uncompressed size of bundle content:
274 274 257 (changelog)
275 275 163 (manifests)
276 133 a
276 135 a
277 277 adding branch
278 278 adding changesets
279 279 adding manifests
@@ -303,13 +303,13 Same, but with changes in working dir (d
303 303 uncompressed size of bundle content:
304 304 257 (changelog)
305 305 163 (manifests)
306 133 a
306 135 a
307 307 saved backup bundle to $TESTTMP/repo/.hg/strip-backup/64a124ba1b44-10374b8f-amend.hg
308 308 1 changesets found
309 309 uncompressed size of bundle content:
310 310 257 (changelog)
311 311 163 (manifests)
312 135 a
312 137 a
313 313 adding branch
314 314 adding changesets
315 315 adding manifests
@@ -77,6 +77,7 Show debug commands if there are no othe
77 77 debug-delta-find
78 78 debug-repair-issue6528
79 79 debug-revlog-index
80 debug-revlog-stats
80 81 debugancestor
81 82 debugantivirusrunning
82 83 debugapplystreamclonebundle
@@ -264,13 +265,14 Show all commands + options
264 265 bundle: exact, force, rev, branch, base, all, type, ssh, remotecmd, insecure
265 266 cat: output, rev, decode, include, exclude, template
266 267 clone: noupdate, updaterev, rev, branch, pull, uncompressed, stream, ssh, remotecmd, insecure
267 commit: addremove, close-branch, amend, secret, edit, force-close-branch, interactive, include, exclude, message, logfile, date, user, subrepos
268 commit: addremove, close-branch, amend, secret, draft, edit, force-close-branch, interactive, include, exclude, message, logfile, date, user, subrepos
268 269 config: untrusted, exp-all-known, edit, local, source, shared, non-shared, global, template
269 270 continue: dry-run
270 271 copy: forget, after, at-rev, force, include, exclude, dry-run
271 debug-delta-find: changelog, manifest, dir, template
272 debug-delta-find: changelog, manifest, dir, template, source
272 273 debug-repair-issue6528: to-report, from-report, paranoid, dry-run
273 274 debug-revlog-index: changelog, manifest, dir, template
275 debug-revlog-stats: changelog, manifest, filelogs, template
274 276 debugancestor:
275 277 debugantivirusrunning:
276 278 debugapplystreamclonebundle:
@@ -326,7 +328,7 Show all commands + options
326 328 debugrevspec: optimize, show-revs, show-set, show-stage, no-optimized, verify-optimized
327 329 debugserve: sshstdio, logiofd, logiofile
328 330 debugsetparents:
329 debugshell:
331 debugshell: command
330 332 debugsidedata: changelog, manifest, dir
331 333 debugssl:
332 334 debugstrip: rev, force, no-backup, nobackup, , keep, bookmark, soft
@@ -42,8 +42,10 f.close()
42 42 os.utime('foo', (1000, 1000))
43 43
44 44 # add+commit 'foo'
45 repo[None].add([b'foo'])
46 repo.commit(text=b'commit1', date=b"0 0")
45 with repo.wlock(), repo.lock(), repo.transaction(b'test-context'):
46 with repo.dirstate.changing_files(repo):
47 repo[None].add([b'foo'])
48 repo.commit(text=b'commit1', date=b"0 0")
47 49
48 50 d = repo[None][b'foo'].date()
49 51 if os.name == 'nt':
@@ -108,16 +110,20 actx2 = repo[b'.']
108 110
109 111 repo.wwrite(b'bar-m', b'bar-m\n', b'')
110 112 repo.wwrite(b'bar-r', b'bar-r\n', b'')
111 repo[None].add([b'bar-m', b'bar-r'])
112 repo.commit(text=b'add bar-m, bar-r', date=b"0 0")
113 with repo.wlock(), repo.lock(), repo.transaction(b'test-context'):
114 with repo.dirstate.changing_files(repo):
115 repo[None].add([b'bar-m', b'bar-r'])
116 repo.commit(text=b'add bar-m, bar-r', date=b"0 0")
113 117
114 118 # ancestor "wcctx ~ 1"
115 119 actx1 = repo[b'.']
116 120
117 121 repo.wwrite(b'bar-m', b'bar-m bar-m\n', b'')
118 122 repo.wwrite(b'bar-a', b'bar-a\n', b'')
119 repo[None].add([b'bar-a'])
120 repo[None].forget([b'bar-r'])
123 with repo.wlock(), repo.lock(), repo.transaction(b'test-context'):
124 with repo.dirstate.changing_files(repo):
125 repo[None].add([b'bar-a'])
126 repo[None].forget([b'bar-r'])
121 127
122 128 # status at this point:
123 129 # M bar-m
@@ -237,7 +243,8 for i in [b'1', b'2', b'3']:
237 243 with repo.wlock(), repo.lock(), repo.transaction(b'test'):
238 244 with open(b'4', 'wb') as f:
239 245 f.write(b'4')
240 repo.dirstate.set_tracked(b'4')
246 with repo.dirstate.changing_files(repo):
247 repo.dirstate.set_tracked(b'4')
241 248 repo.commit(b'4')
242 249 revsbefore = len(repo.changelog)
243 250 repo.invalidate(clearfilecache=True)
@@ -14,12 +14,7
14 14
15 15 $ echo adding more to file a >> a
16 16 $ hg commit -m third
17 $ hg verify
18 checking changesets
19 checking manifests
20 crosschecking files in changesets and manifests
21 checking files
22 checked 3 changesets with 3 changes to 1 files
17 $ hg verify -q
23 18
24 19 Dumping revlog of file a to stdout:
25 20 $ "$PYTHON" "$CONTRIBDIR/dumprevlog" .hg/store/data/a.i
@@ -79,12 +74,7 Rebuild fncache with clone --pull:
79 74
80 75 Verify:
81 76
82 $ hg -R repo-c verify
83 checking changesets
84 checking manifests
85 crosschecking files in changesets and manifests
86 checking files
87 checked 3 changesets with 3 changes to 1 files
77 $ hg -R repo-c verify -q
88 78
89 79 Compare repos:
90 80
@@ -307,7 +307,7 error case are ignored
307 307 malformatted run limit entry, missing "-": 500
308 308 ! wall * comb * user * sys * (best of 5) (glob)
309 309 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
310 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12 (py3 !)
310 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12
311 311 ! wall * comb * user * sys * (best of 5) (glob)
312 312 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
313 313 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
@@ -292,12 +292,12 ensure that the filemap contains duplica
292 292 $ rm -rf source/.hg/store/data/dir/file4
293 293 #endif
294 294 $ hg -q convert --filemap renames.fmap --datesort source dummydest
295 abort: data/dir/file3@e96dce0bc6a217656a3a410e5e6bec2c4f42bf7c: no match found (reporevlogstore !)
295 abort: dir/file3@e96dce0bc6a217656a3a410e5e6bec2c4f42bf7c: no match found (reporevlogstore !)
296 296 abort: data/dir/file3/index@e96dce0bc6a2: no node (reposimplestore !)
297 297 [50]
298 298 $ hg -q convert --filemap renames.fmap --datesort --config convert.hg.ignoreerrors=1 source renames.repo
299 ignoring: data/dir/file3@e96dce0bc6a217656a3a410e5e6bec2c4f42bf7c: no match found (reporevlogstore !)
300 ignoring: data/dir/file4@6edd55f559cdce67132b12ca09e09cee08b60442: no match found (reporevlogstore !)
299 ignoring: dir/file3@e96dce0bc6a217656a3a410e5e6bec2c4f42bf7c: no match found (reporevlogstore !)
300 ignoring: dir/file4@6edd55f559cdce67132b12ca09e09cee08b60442: no match found (reporevlogstore !)
301 301 ignoring: data/dir/file3/index@e96dce0bc6a2: no node (reposimplestore !)
302 302 ignoring: data/dir/file4/index@6edd55f559cd: no node (reposimplestore !)
303 303 $ hg up -q -R renames.repo
@@ -312,12 +312,7 ensure that the filemap contains duplica
312 312 |
313 313 o 0 "0: add foo baz dir/" files: dir2/dir3/file dir2/dir3/subdir/file3 foo2
314 314
315 $ hg -R renames.repo verify
316 checking changesets
317 checking manifests
318 crosschecking files in changesets and manifests
319 checking files
320 checked 5 changesets with 7 changes to 4 files
315 $ hg -R renames.repo verify -q
321 316
322 317 $ hg -R renames.repo manifest --debug
323 318 d43feacba7a4f1f2080dde4a4b985bd8a0236d46 644 copied2
@@ -182,18 +182,13 break it
182 182 sorting...
183 183 converting...
184 184 4 init
185 ignoring: data/b@1e88685f5ddec574a34c70af492f95b6debc8741: no match found (reporevlogstore !)
185 ignoring: b@1e88685f5ddec574a34c70af492f95b6debc8741: no match found (reporevlogstore !)
186 186 ignoring: data/b/index@1e88685f5dde: no node (reposimplestore !)
187 187 3 changeall
188 188 2 changebagain
189 189 1 merge
190 190 0 moveb
191 $ hg -R fixed verify
192 checking changesets
193 checking manifests
194 crosschecking files in changesets and manifests
195 checking files
196 checked 5 changesets with 5 changes to 3 files
191 $ hg -R fixed verify -q
197 192
198 193 manifest -r 0
199 194
@@ -96,12 +96,7 this should show the rename information
96 96 $ hg cat a > asum
97 97 $ md5sum.py asum
98 98 60b725f10c9c85c70d97880dfe8191b3 asum
99 $ hg verify
100 checking changesets
101 checking manifests
102 crosschecking files in changesets and manifests
103 checking files
104 checked 2 changesets with 2 changes to 2 files
99 $ hg verify -q
105 100
106 101 $ cd ..
107 102
@@ -39,6 +39,9
39 39 chunks size : 191
40 40 0x75 (u) : 191 (100.00%)
41 41
42
43 total-stored-content: 188 bytes
44
42 45 avg chain length : 0
43 46 max chain length : 0
44 47 max chain reach : 67
@@ -74,6 +77,9
74 77 empty : 0 ( 0.00%)
75 78 0x75 (u) : 88 (100.00%)
76 79
80
81 total-stored-content: 86 bytes
82
77 83 avg chain length : 0
78 84 max chain length : 0
79 85 max chain reach : 44
@@ -107,6 +113,9
107 113 chunks size : 3
108 114 0x75 (u) : 3 (100.00%)
109 115
116
117 total-stored-content: 2 bytes
118
110 119 avg chain length : 0
111 120 max chain length : 0
112 121 max chain reach : 3
@@ -212,7 +221,7 debugdelta chain basic output
212 221 {
213 222 "chainid": 1,
214 223 "chainlen": 1,
215 "chainratio": 1.0232558139534884, (py3 !)
224 "chainratio": 1.0232558139534884,
216 225 "chainsize": 44,
217 226 "compsize": 44,
218 227 "deltatype": "base",
@@ -252,7 +261,7 debugdelta chain basic output
252 261 {
253 262 "chainid": 3,
254 263 "chainlen": 1,
255 "chainratio": 1.0232558139534884, (py3 !)
264 "chainratio": 1.0232558139534884,
256 265 "chainsize": 44,
257 266 "compsize": 44,
258 267 "deltatype": "base",
@@ -293,7 +302,7 debugdelta chain with sparse read enable
293 302 {
294 303 "chainid": 1,
295 304 "chainlen": 1,
296 "chainratio": 1.0232558139534884, (py3 !)
305 "chainratio": 1.0232558139534884,
297 306 "chainsize": 44,
298 307 "compsize": 44,
299 308 "deltatype": "base",
@@ -333,7 +342,7 debugdelta chain with sparse read enable
333 342 {
334 343 "chainid": 3,
335 344 "chainlen": 1,
336 "chainratio": 1.0232558139534884, (py3 !)
345 "chainratio": 1.0232558139534884,
337 346 "chainsize": 44,
338 347 "compsize": 44,
339 348 "deltatype": "base",
@@ -715,3 +724,8 Test debugpeer
715 724 pushable: yes
716 725
717 726 #endif
727
728 Test debugshell
729
730 $ hg debugshell -c 'ui.write(b"%s\n" % ui.username())'
731 test
@@ -8,7 +8,6 import sys
8 8 import types
9 9
10 10 # Don't import pycompat because it has too many side-effects.
11 ispy3 = sys.version_info[0] >= 3
12 11 ispy311 = (sys.version_info.major, sys.version_info.minor) >= (3, 11)
13 12
14 13 # Only run if demandimport is allowed
@@ -25,14 +24,11 if sys.flags.optimize:
25 24 if sys.version_info[0:2] == (3, 5):
26 25 sys.exit(80)
27 26
28 if ispy3:
29 from importlib.util import _LazyModule
27 from importlib.util import _LazyModule
30 28
31 try:
32 from importlib.util import _Module as moduletype
33 except ImportError:
34 moduletype = types.ModuleType
35 else:
29 try:
30 from importlib.util import _Module as moduletype
31 except ImportError:
36 32 moduletype = types.ModuleType
37 33
38 34 if os.name != 'nt':
@@ -68,10 +64,7 from mercurial import node
68 64
69 65 # We use assert instead of a unittest test case because having imports inside
70 66 # functions changes behavior of the demand importer.
71 if ispy3:
72 assert not isinstance(node, _LazyModule)
73 else:
74 assert f(node) == "<module 'mercurial.node' from '?'>", f(node)
67 assert not isinstance(node, _LazyModule)
75 68
76 69 # now enable it for real
77 70 del os.environ['HGDEMANDIMPORT']
@@ -81,11 +74,8 demandimport.enable()
81 74 assert 'mercurial.error' not in sys.modules
82 75 from mercurial import error as errorproxy
83 76
84 if ispy3:
85 assert isinstance(errorproxy, _LazyModule)
86 assert f(errorproxy) == "<module 'mercurial.error' from '?'>", f(errorproxy)
87 else:
88 assert f(errorproxy) == "<unloaded module 'error'>", f(errorproxy)
77 assert isinstance(errorproxy, _LazyModule)
78 assert f(errorproxy) == "<module 'mercurial.error' from '?'>", f(errorproxy)
89 79
90 80 doc = ' '.join(errorproxy.__doc__.split()[:3])
91 81 assert doc == 'Mercurial exceptions. This', doc
@@ -96,22 +86,16 assert errorproxy.__name__ == 'mercurial
96 86 name = errorproxy.__dict__['__name__']
97 87 assert name == 'mercurial.error', name
98 88
99 if ispy3:
100 assert not isinstance(errorproxy, _LazyModule)
101 assert f(errorproxy) == "<module 'mercurial.error' from '?'>", f(errorproxy)
102 else:
103 assert f(errorproxy) == "<proxied module 'error'>", f(errorproxy)
89 assert not isinstance(errorproxy, _LazyModule)
90 assert f(errorproxy) == "<module 'mercurial.error' from '?'>", f(errorproxy)
104 91
105 92 import os
106 93
107 if ispy3:
108 assert not isinstance(os, _LazyModule)
109 if ispy311:
110 assert f(os) == "<module 'os' (frozen)>", f(os)
111 else:
112 assert f(os) == "<module 'os' from '?'>", f(os)
94 assert not isinstance(os, _LazyModule)
95 if ispy311:
96 assert f(os) == "<module 'os' (frozen)>", f(os)
113 97 else:
114 assert f(os) == "<unloaded module 'os'>", f(os)
98 assert f(os) == "<module 'os' from '?'>", f(os)
115 99
116 100 assert f(os.system) == '<built-in function system>', f(os.system)
117 101 if ispy311:
@@ -122,13 +106,10 else:
122 106 assert 'mercurial.utils.procutil' not in sys.modules
123 107 from mercurial.utils import procutil
124 108
125 if ispy3:
126 assert isinstance(procutil, _LazyModule)
127 assert f(procutil) == "<module 'mercurial.utils.procutil' from '?'>", f(
128 procutil
129 )
130 else:
131 assert f(procutil) == "<unloaded module 'procutil'>", f(procutil)
109 assert isinstance(procutil, _LazyModule)
110 assert f(procutil) == "<module 'mercurial.utils.procutil' from '?'>", f(
111 procutil
112 )
132 113
133 114 assert f(procutil.system) == '<function system at 0x?>', f(procutil.system)
134 115 assert procutil.__class__ == moduletype, procutil.__class__
@@ -140,84 +121,51 assert f(procutil.system) == '<function
140 121 assert 'mercurial.hgweb' not in sys.modules
141 122 from mercurial import hgweb
142 123
143 if ispy3:
144 assert isinstance(hgweb, _LazyModule)
145 assert f(hgweb) == "<module 'mercurial.hgweb' from '?'>", f(hgweb)
146 assert isinstance(hgweb.hgweb_mod, _LazyModule)
147 assert (
148 f(hgweb.hgweb_mod) == "<module 'mercurial.hgweb.hgweb_mod' from '?'>"
149 ), f(hgweb.hgweb_mod)
150 else:
151 assert f(hgweb) == "<unloaded module 'hgweb'>", f(hgweb)
152 assert f(hgweb.hgweb_mod) == "<unloaded module 'hgweb_mod'>", f(
153 hgweb.hgweb_mod
154 )
124 assert isinstance(hgweb, _LazyModule)
125 assert f(hgweb) == "<module 'mercurial.hgweb' from '?'>", f(hgweb)
126 assert isinstance(hgweb.hgweb_mod, _LazyModule)
127 assert f(hgweb.hgweb_mod) == "<module 'mercurial.hgweb.hgweb_mod' from '?'>", f(
128 hgweb.hgweb_mod
129 )
155 130
156 131 assert f(hgweb) == "<module 'mercurial.hgweb' from '?'>", f(hgweb)
157 132
158 133 import re as fred
159 134
160 if ispy3:
161 assert not isinstance(fred, _LazyModule)
162 assert f(fred) == "<module 're' from '?'>"
163 else:
164 assert f(fred) == "<unloaded module 're'>", f(fred)
135 assert not isinstance(fred, _LazyModule)
136 assert f(fred) == "<module 're' from '?'>"
165 137
166 138 import re as remod
167 139
168 if ispy3:
169 assert not isinstance(remod, _LazyModule)
170 assert f(remod) == "<module 're' from '?'>"
171 else:
172 assert f(remod) == "<unloaded module 're'>", f(remod)
140 assert not isinstance(remod, _LazyModule)
141 assert f(remod) == "<module 're' from '?'>"
173 142
174 143 import sys as re
175 144
176 if ispy3:
177 assert not isinstance(re, _LazyModule)
178 assert f(re) == "<module 'sys' (built-in)>"
179 else:
180 assert f(re) == "<unloaded module 'sys'>", f(re)
145 assert not isinstance(re, _LazyModule)
146 assert f(re) == "<module 'sys' (built-in)>"
181 147
182 if ispy3:
183 assert not isinstance(fred, _LazyModule)
184 assert f(fred) == "<module 're' from '?'>", f(fred)
185 else:
186 assert f(fred) == "<unloaded module 're'>", f(fred)
148 assert not isinstance(fred, _LazyModule)
149 assert f(fred) == "<module 're' from '?'>", f(fred)
187 150
188 151 assert f(fred.sub) == '<function sub at 0x?>', f(fred.sub)
189 152
190 if ispy3:
191 assert not isinstance(fred, _LazyModule)
192 assert f(fred) == "<module 're' from '?'>", f(fred)
193 else:
194 assert f(fred) == "<proxied module 're'>", f(fred)
153 assert not isinstance(fred, _LazyModule)
154 assert f(fred) == "<module 're' from '?'>", f(fred)
195 155
196 156 remod.escape # use remod
197 157 assert f(remod) == "<module 're' from '?'>", f(remod)
198 158
199 if ispy3:
200 assert not isinstance(re, _LazyModule)
201 assert f(re) == "<module 'sys' (built-in)>"
202 assert f(type(re.stderr)) == "<class '_io.TextIOWrapper'>", f(
203 type(re.stderr)
204 )
205 assert f(re) == "<module 'sys' (built-in)>"
206 else:
207 assert f(re) == "<unloaded module 'sys'>", f(re)
208 assert f(re.stderr) == "<open file '<whatever>', mode 'w' at 0x?>", f(
209 re.stderr
210 )
211 assert f(re) == "<proxied module 'sys'>", f(re)
159 assert not isinstance(re, _LazyModule)
160 assert f(re) == "<module 'sys' (built-in)>"
161 assert f(type(re.stderr)) == "<class '_io.TextIOWrapper'>", f(type(re.stderr))
162 assert f(re) == "<module 'sys' (built-in)>"
212 163
213 164 assert 'telnetlib' not in sys.modules
214 165 import telnetlib
215 166
216 if ispy3:
217 assert isinstance(telnetlib, _LazyModule)
218 assert f(telnetlib) == "<module 'telnetlib' from '?'>"
219 else:
220 assert f(telnetlib) == "<unloaded module 'telnetlib'>", f(telnetlib)
167 assert isinstance(telnetlib, _LazyModule)
168 assert f(telnetlib) == "<module 'telnetlib' from '?'>"
221 169
222 170 try:
223 171 from telnetlib import unknownattr
@@ -240,3 +188,11 assert 'ftplib' not in sys.modules
240 188 zipfileimp = __import__('ftplib', globals(), locals(), ['unknownattr'])
241 189 assert f(zipfileimp) == "<module 'ftplib' from '?'>", f(zipfileimp)
242 190 assert not util.safehasattr(zipfileimp, 'unknownattr')
191
192
193 # test deactivation for issue6725
194 del sys.modules['telnetlib']
195 with demandimport.deactivated():
196 import telnetlib
197 assert telnetlib.__loader__ == telnetlib.__spec__.loader
198 assert telnetlib.__loader__.get_resource_reader
@@ -2,6 +2,9 Set up
2 2
3 3 $ hg init repo
4 4 $ cd repo
5 $ echo a > a
6 $ hg add a
7 $ hg commit -m a
5 8
6 9 Try to import an empty patch
7 10
@@ -7,8 +7,6 import re
7 7 import subprocess
8 8 import sys
9 9
10 ispy3 = sys.version_info[0] >= 3
11
12 10 if 'TERM' in os.environ:
13 11 del os.environ['TERM']
14 12
@@ -40,9 +38,7 def testmod(name, optionflags=0, testtar
40 38
41 39 # minimal copy of doctest.testmod()
42 40 finder = doctest.DocTestFinder()
43 checker = None
44 if ispy3:
45 checker = py3docchecker()
41 checker = py3docchecker()
46 42 runner = doctest.DocTestRunner(checker=checker, optionflags=optionflags)
47 43 for test in finder.find(mod, name):
48 44 runner.run(test)
@@ -91,8 +87,7 for f in files:
91 87 if not re.search(br'\n\s*>>>', fh.read()):
92 88 continue
93 89
94 if ispy3:
95 f = f.decode()
90 f = f.decode()
96 91
97 92 modname = f.replace('.py', '').replace('\\', '.').replace('/', '.')
98 93
@@ -9,12 +9,7 Try some commands:
9 9 $ hg grep wah
10 10 [1]
11 11 $ hg manifest
12 $ hg verify
13 checking changesets
14 checking manifests
15 crosschecking files in changesets and manifests
16 checking files
17 checked 0 changesets with 0 changes to 0 files
12 $ hg verify -q
18 13
19 14 Check the basic files created:
20 15
@@ -37,16 +32,10 Poke at a clone:
37 32 updating to branch default
38 33 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
39 34 $ cd b
40 $ hg verify
41 checking changesets
42 checking manifests
43 crosschecking files in changesets and manifests
44 checking files
45 checked 0 changesets with 0 changes to 0 files
35 $ hg verify -q
46 36 $ ls .hg
47 37 00changelog.i
48 38 cache
49 dirstate
50 39 hgrc
51 40 requires
52 41 store
@@ -93,9 +93,4 revision 4
93 93 0 0 2ed2a3912a0b 000000000000 000000000000
94 94 1 1 79d7492df40a 2ed2a3912a0b 000000000000
95 95
96 $ hg verify
97 checking changesets
98 checking manifests
99 crosschecking files in changesets and manifests
100 checking files
101 checked 5 changesets with 4 changes to 2 files
96 $ hg verify -q
@@ -574,9 +574,9 Python 3's lazy importer verifies module
574 574 module stub. Our custom lazy importer for Python 2 always returns a stub.
575 575
576 576 $ (PYTHONPATH=${PYTHONPATH}${PATHSEP}${TESTTMP}; hg --config extensions.checkrelativity=$TESTTMP/checkrelativity.py checkrelativity) || true
577 *** failed to import extension "checkrelativity" from $TESTTMP/checkrelativity.py: No module named 'extlibroot.lsub1.lsub2.notexist' (py3 !)
578 hg: unknown command 'checkrelativity' (py3 !)
579 (use 'hg help' for a list of commands) (py3 !)
577 *** failed to import extension "checkrelativity" from $TESTTMP/checkrelativity.py: No module named 'extlibroot.lsub1.lsub2.notexist'
578 hg: unknown command 'checkrelativity'
579 (use 'hg help' for a list of commands)
580 580
581 581 #endif
582 582
@@ -1863,7 +1863,7 Prohibit the use of unicode strings as t
1863 1863 > test_unicode_default_value = $TESTTMP/test_unicode_default_value.py
1864 1864 > EOF
1865 1865 $ hg -R $TESTTMP/opt-unicode-default dummy
1866 *** failed to import extension "test_unicode_default_value" from $TESTTMP/test_unicode_default_value.py: unicode 'value' found in cmdtable.dummy (py3 !)
1866 *** failed to import extension "test_unicode_default_value" from $TESTTMP/test_unicode_default_value.py: unicode 'value' found in cmdtable.dummy
1867 1867 *** (use b'' to make it byte string)
1868 1868 hg: unknown command 'dummy'
1869 1869 (did you mean summary?)
@@ -135,11 +135,6 Everything should be clean now:
135 135
136 136 $ hg status
137 137
138 $ hg verify
139 checking changesets
140 checking manifests
141 crosschecking files in changesets and manifests
142 checking files
143 checked 4 changesets with 10 changes to 4 files
138 $ hg verify -q
144 139
145 140 $ cd ..
@@ -165,7 +165,7 def fakeuncacheable():
165 165
166 166 def test_filecache_synced():
167 167 # test old behavior that caused filecached properties to go out of sync
168 os.system('hg init && echo a >> a && hg ci -qAm.')
168 os.system('hg init && echo a >> a && hg add a && hg ci -qm.')
169 169 repo = hg.repository(uimod.ui.load())
170 170 # first rollback clears the filecache, but changelog to stays in __dict__
171 171 repo.rollback()
@@ -213,11 +213,11 Ensure the data got to the server OK
213 213 File "*/mercurial/revlogutils/flagutil.py", line *, in insertflagprocessor (glob) (no-pyoxidizer !)
214 214 File "mercurial.revlogutils.flagutil", line *, in insertflagprocessor (glob) (pyoxidizer !)
215 215 raise error.Abort(msg)
216 mercurial.error.Abort: cannot register multiple processors on flag '0x8'. (py3 !)
216 mercurial.error.Abort: cannot register multiple processors on flag '0x8'.
217 217 *** failed to set up extension duplicate: cannot register multiple processors on flag '0x8'.
218 218 $ hg st 2>&1 | egrep 'cannot register multiple processors|flagprocessorext'
219 219 File "*/tests/flagprocessorext.py", line *, in extsetup (glob)
220 mercurial.error.Abort: cannot register multiple processors on flag '0x8'. (py3 !)
220 mercurial.error.Abort: cannot register multiple processors on flag '0x8'.
221 221 *** failed to set up extension duplicate: cannot register multiple processors on flag '0x8'.
222 222 File "*/tests/flagprocessorext.py", line *, in b64decode (glob)
223 223
@@ -49,12 +49,7 Testing a.i.hg/c:
49 49
50 50 Testing verify:
51 51
52 $ hg verify
53 checking changesets
54 checking manifests
55 crosschecking files in changesets and manifests
56 checking files
57 checked 3 changesets with 3 changes to 3 files
52 $ hg verify -q
58 53
59 54 $ rm .hg/store/fncache
60 55
@@ -66,6 +61,7 Testing verify:
66 61 warning: revlog 'data/a.i' not in fncache!
67 62 warning: revlog 'data/a.i.hg/c.i' not in fncache!
68 63 warning: revlog 'data/a.i/b.i' not in fncache!
64 checking dirstate
69 65 checked 3 changesets with 3 changes to 3 files
70 66 3 warnings encountered!
71 67 hint: run "hg debugrebuildfncache" to recover from corrupt fncache
@@ -78,12 +74,7 Follow the hint to make sure it works
78 74 adding data/a.i/b.i
79 75 3 items added, 0 removed from fncache
80 76
81 $ hg verify
82 checking changesets
83 checking manifests
84 crosschecking files in changesets and manifests
85 checking files
86 checked 3 changesets with 3 changes to 3 files
77 $ hg verify -q
87 78
88 79 $ cd ..
89 80
@@ -112,12 +103,10 Non store repo:
112 103 .hg/phaseroots
113 104 .hg/requires
114 105 .hg/undo
115 .hg/undo.backup.dirstate
116 106 .hg/undo.backupfiles
117 107 .hg/undo.bookmarks
118 108 .hg/undo.branch
119 109 .hg/undo.desc
120 .hg/undo.dirstate
121 110 .hg/undo.phaseroots
122 111 .hg/wcache
123 112 .hg/wcache/checkisexec (execbit !)
@@ -156,11 +145,9 Non fncache repo:
156 145 .hg/store/undo
157 146 .hg/store/undo.backupfiles
158 147 .hg/store/undo.phaseroots
159 .hg/undo.backup.dirstate
160 148 .hg/undo.bookmarks
161 149 .hg/undo.branch
162 150 .hg/undo.desc
163 .hg/undo.dirstate
164 151 .hg/wcache
165 152 .hg/wcache/checkisexec (execbit !)
166 153 .hg/wcache/checklink (symlink !)
@@ -313,6 +300,7 Aborted transactions can be recovered la
313 300
314 301 $ cat > ../exceptionext.py <<EOF
315 302 > import os
303 > import signal
316 304 > from mercurial import (
317 305 > commands,
318 306 > error,
@@ -324,19 +312,14 Aborted transactions can be recovered la
324 312 > def trwrapper(orig, self, *args, **kwargs):
325 313 > tr = orig(self, *args, **kwargs)
326 314 > def fail(tr):
327 > raise error.Abort(b"forced transaction failure")
315 > os.kill(os.getpid(), signal.SIGKILL)
328 316 > # zzz prefix to ensure it sorted after store.write
329 317 > tr.addfinalize(b'zzz-forcefails', fail)
330 318 > return tr
331 319 >
332 > def abortwrapper(orig, self, *args, **kwargs):
333 > raise error.Abort(b"forced transaction failure")
334 >
335 320 > def uisetup(ui):
336 321 > extensions.wrapfunction(localrepo.localrepository, 'transaction',
337 322 > trwrapper)
338 > extensions.wrapfunction(transaction.transaction, '_abort',
339 > abortwrapper)
340 323 >
341 324 > cmdtable = {}
342 325 >
@@ -348,8 +331,12 Clean cached versions
348 331
349 332 $ hg up -q 1
350 333 $ touch z
351 $ hg ci -qAm z 2>/dev/null
352 [255]
334 # Cannot rely on the return code value as chg use a different one.
335 # So we use a `|| echo` trick
336 # XXX-CHG fixing chg behavior would be nice here.
337 $ hg ci -qAm z || echo "He's Dead, Jim." 2>/dev/null
338 Killed (?)
339 He's Dead, Jim.
353 340 $ cat .hg/store/fncache | sort
354 341 data/y.i
355 342 data/z.i
@@ -359,6 +346,7 Clean cached versions
359 346 checking manifests
360 347 crosschecking files in changesets and manifests
361 348 checking files
349 checking dirstate
362 350 checked 1 changesets with 1 changes to 1 files
363 351 $ cat .hg/store/fncache
364 352 data/y.i
@@ -142,6 +142,12 diff even works transparently in both sy
142 142 alpha
143 143 +blah
144 144
145 status --all shows all files, including clean:
146 $ hg status --all
147 M alpha
148 ? gamma
149 C beta
150
145 151 Remove a file, it shows as such:
146 152 $ rm alpha
147 153 $ hg status
@@ -306,7 +312,7 This covers gitlog._partialmatch()
306 312
307 313 $ hg log -r dead
308 314 abort: unknown revision 'dead'
309 [255]
315 [10]
310 316
311 317 This coveres changelog.findmissing()
312 318 $ hg merge --preview 3d9be8deba43
@@ -272,7 +272,7 Testing --traceback:
272 272 #if no-chg no-rhg
273 273 $ hg --cwd c --config x --traceback id 2>&1 | grep -i 'traceback'
274 274 Traceback (most recent call last):
275 Traceback (most recent call last): (py3 !)
275 Traceback (most recent call last):
276 276 #else
277 277 Traceback for '--config' errors not supported with chg.
278 278 $ hg --cwd c --config x --traceback id 2>&1 | grep -i 'traceback'
@@ -151,12 +151,7 Create a non-inlined filelog in r3:
151 151
152 152 Push to repo r1 should break up most hardlinks in r2:
153 153
154 $ hg -R r2 verify
155 checking changesets
156 checking manifests
157 crosschecking files in changesets and manifests
158 checking files
159 checked 2 changesets with 2 changes to 2 files
154 $ hg -R r2 verify -q
160 155
161 156 $ cd r3
162 157 $ hg push
@@ -182,13 +177,7 Push to repo r1 should break up most har
182 177 1 r2/.hg/store/fncache
183 178 #endif
184 179
185 $ hg -R r2 verify
186 checking changesets
187 checking manifests
188 crosschecking files in changesets and manifests
189 checking files
190 checked 2 changesets with 2 changes to 2 files
191
180 $ hg -R r2 verify -q
192 181
193 182 $ cd r1
194 183 $ hg up
@@ -272,11 +261,10 r4 has hardlinks in the working dir (not
272 261 2 r4/.hg/store/undo.backup.phaseroots
273 262 2 r4/.hg/store/undo.backupfiles
274 263 2 r4/.hg/store/undo.phaseroots
275 [24] r4/\.hg/undo\.backup\.dirstate (re)
264 2 r4/\.hg/undo\.backup\.dirstate (re)
276 265 2 r4/.hg/undo.bookmarks
277 266 2 r4/.hg/undo.branch
278 267 2 r4/.hg/undo.desc
279 [24] r4/\.hg/undo\.dirstate (re)
280 268 2 r4/.hg/wcache/checkisexec (execbit !)
281 269 2 r4/.hg/wcache/checklink-target (symlink !)
282 270 2 r4/.hg/wcache/checknoexec (execbit !)
@@ -288,9 +276,9 r4 has hardlinks in the working dir (not
288 276
289 277 Update back to revision 12 in r4 should break hardlink of file f1 and f3:
290 278 #if hardlink-whitelisted
291 $ nlinksdir r4/.hg/undo.backup.dirstate r4/.hg/undo.dirstate
292 4 r4/.hg/undo.backup.dirstate
293 4 r4/.hg/undo.dirstate
279 $ nlinksdir r4/.hg/undo.backup.dirstate r4/.hg/dirstate
280 2 r4/.hg/dirstate
281 2 r4/.hg/undo.backup.dirstate
294 282 #endif
295 283
296 284
@@ -330,11 +318,10 Update back to revision 12 in r4 should
330 318 2 r4/.hg/store/undo.backup.phaseroots
331 319 2 r4/.hg/store/undo.backupfiles
332 320 2 r4/.hg/store/undo.phaseroots
333 [24] r4/\.hg/undo\.backup\.dirstate (re)
321 2 r4/\.hg/undo\.backup\.dirstate (re)
334 322 2 r4/.hg/undo.bookmarks
335 323 2 r4/.hg/undo.branch
336 324 2 r4/.hg/undo.desc
337 [24] r4/\.hg/undo\.dirstate (re)
338 325 2 r4/.hg/wcache/checkisexec (execbit !)
339 326 2 r4/.hg/wcache/checklink-target (symlink !)
340 327 2 r4/.hg/wcache/checknoexec (execbit !)
@@ -346,9 +333,9 Update back to revision 12 in r4 should
346 333 2 r4/f3 (no-execbit !)
347 334
348 335 #if hardlink-whitelisted
349 $ nlinksdir r4/.hg/undo.backup.dirstate r4/.hg/undo.dirstate
350 4 r4/.hg/undo.backup.dirstate
351 4 r4/.hg/undo.dirstate
336 $ nlinksdir r4/.hg/undo.backup.dirstate r4/.hg/dirstate
337 1 r4/.hg/dirstate
338 2 r4/.hg/undo.backup.dirstate
352 339 #endif
353 340
354 341 Test hardlinking outside hg:
@@ -985,6 +985,8 Test list of internal help commands
985 985 details.
986 986 debug-revlog-index
987 987 dump index data for a revlog
988 debug-revlog-stats
989 display statistics about revlogs in the store
988 990 debugancestor
989 991 find the ancestor revision of two revisions in a given index
990 992 debugantivirusrunning
@@ -2170,8 +2172,11 Test dynamic list of merge tools only sh
2170 2172
2171 2173 ":union"
2172 2174 Uses the internal non-interactive simple merge algorithm for merging
2173 files. It will use both left and right sides for conflict regions. No
2174 markers are inserted.
2175 files. It will use both local and other sides for conflict regions by
2176 adding local on top of other. No markers are inserted.
2177
2178 ":union-other-first"
2179 Like :union, but add other on top of local.
2175 2180
2176 2181 Internal tools are always available and do not require a GUI but will by
2177 2182 default not handle symlinks or binary files. See next section for detail
@@ -644,6 +644,15 test that prepushkey can prevent incomin
644 644 HG_TXNNAME=push
645 645 HG_URL=file:$TESTTMP/a
646 646
647 txnabort Python hook: bundle2,changes,source,txnid,txnname,url
648 txnabort hook: HG_BUNDLE2=1
649 HG_HOOKNAME=txnabort.1
650 HG_HOOKTYPE=txnabort
651 HG_SOURCE=push
652 HG_TXNID=TXN:$ID$
653 HG_TXNNAME=push
654 HG_URL=file:$TESTTMP/a
655
647 656 abort: prepushkey hook exited with status 1
648 657 [40]
649 658 $ cd ../a
@@ -975,19 +984,19 test python hooks
975 984 Traceback (most recent call last):
976 985 SyntaxError: * (glob)
977 986 exception from second failed import attempt:
978 Traceback (most recent call last): (py3 !)
979 SyntaxError: * (glob) (py3 !)
980 987 Traceback (most recent call last):
981 ImportError: No module named 'hgext_syntaxerror' (py3 no-py36 !)
988 SyntaxError: * (glob)
989 Traceback (most recent call last):
990 ImportError: No module named 'hgext_syntaxerror' (no-py36 !)
982 991 ModuleNotFoundError: No module named 'hgext_syntaxerror' (py36 !)
983 992 Traceback (most recent call last):
984 SyntaxError: * (glob) (py3 !)
985 Traceback (most recent call last): (py3 !)
986 ImportError: No module named 'hgext_syntaxerror' (py3 no-py36 !)
993 SyntaxError: * (glob)
994 Traceback (most recent call last):
995 ImportError: No module named 'hgext_syntaxerror' (no-py36 !)
987 996 ModuleNotFoundError: No module named 'hgext_syntaxerror' (py36 !)
988 Traceback (most recent call last): (py3 !)
997 Traceback (most recent call last):
989 998 raise error.HookLoadError( (py38 !)
990 mercurial.error.HookLoadError: preoutgoing.syntaxerror hook is invalid: import of "syntaxerror" failed (py3 !)
999 mercurial.error.HookLoadError: preoutgoing.syntaxerror hook is invalid: import of "syntaxerror" failed
991 1000 abort: preoutgoing.syntaxerror hook is invalid: import of "syntaxerror" failed
992 1001
993 1002 $ echo '[hooks]' > ../a/.hg/hgrc
@@ -1120,7 +1129,7 test python hook configured with python:
1120 1129
1121 1130 $ hg id
1122 1131 loading pre-identify.npmd hook failed:
1123 abort: No module named 'repo' (py3 !)
1132 abort: No module named 'repo'
1124 1133 [255]
1125 1134
1126 1135 $ cd ../../b
@@ -1140,24 +1149,24 make sure --traceback works on hook impo
1140 1149 $ hg --traceback commit -ma 2>&1 | egrep '^exception|ImportError|ModuleNotFoundError|Traceback|HookLoadError|abort'
1141 1150 exception from first failed import attempt:
1142 1151 Traceback (most recent call last):
1143 ImportError: No module named 'somebogusmodule' (py3 no-py36 !)
1152 ImportError: No module named 'somebogusmodule' (no-py36 !)
1144 1153 ModuleNotFoundError: No module named 'somebogusmodule' (py36 !)
1145 1154 exception from second failed import attempt:
1146 Traceback (most recent call last): (py3 !)
1147 ImportError: No module named 'somebogusmodule' (py3 no-py36 !)
1148 ModuleNotFoundError: No module named 'somebogusmodule' (py36 !)
1149 Traceback (most recent call last): (py3 !)
1150 ImportError: No module named 'hgext_importfail' (py3 no-py36 !)
1151 ModuleNotFoundError: No module named 'hgext_importfail' (py36 !)
1152 Traceback (most recent call last): (py3 !)
1153 ImportError: No module named 'somebogusmodule' (py3 no-py36 !)
1155 Traceback (most recent call last):
1156 ImportError: No module named 'somebogusmodule' (no-py36 !)
1154 1157 ModuleNotFoundError: No module named 'somebogusmodule' (py36 !)
1155 1158 Traceback (most recent call last):
1156 ImportError: No module named 'hgext_importfail' (py3 no-py36 !)
1159 ImportError: No module named 'hgext_importfail' (no-py36 !)
1160 ModuleNotFoundError: No module named 'hgext_importfail' (py36 !)
1161 Traceback (most recent call last):
1162 ImportError: No module named 'somebogusmodule' (no-py36 !)
1163 ModuleNotFoundError: No module named 'somebogusmodule' (py36 !)
1164 Traceback (most recent call last):
1165 ImportError: No module named 'hgext_importfail' (no-py36 !)
1157 1166 ModuleNotFoundError: No module named 'hgext_importfail' (py36 !)
1158 1167 Traceback (most recent call last):
1159 1168 raise error.HookLoadError( (py38 !)
1160 mercurial.error.HookLoadError: precommit.importfail hook is invalid: import of "importfail" failed (py3 !)
1169 mercurial.error.HookLoadError: precommit.importfail hook is invalid: import of "importfail" failed
1161 1170 abort: precommit.importfail hook is invalid: import of "importfail" failed
1162 1171
1163 1172 Issue1827: Hooks Update & Commit not completely post operation
@@ -132,8 +132,8 Failure on subsequent HTTP request on th
132 132 readline(*) -> (2) \r\n (glob)
133 133 sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py36 !)
134 134 sendall(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py36 !)
135 write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py3 no-py36 !)
136 write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py3 no-py36 !)
135 write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (no-py36 !)
136 write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (no-py36 !)
137 137 readline(~) -> (26) GET /?cmd=batch HTTP/1.1\r\n (glob)
138 138 readline(*) -> (1?) Accept-Encoding* (glob)
139 139 read limit reached; closing socket
@@ -174,8 +174,8 Failure to read getbundle HTTP request
174 174 readline(*) -> (2) \r\n (glob)
175 175 sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py36 !)
176 176 sendall(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py36 !)
177 write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py3 no-py36 !)
178 write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py3 no-py36 !)
177 write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (no-py36 !)
178 write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (no-py36 !)
179 179 readline(~) -> (26) GET /?cmd=batch HTTP/1.1\r\n (glob)
180 180 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
181 181 readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
@@ -193,8 +193,8 Failure to read getbundle HTTP request
193 193 readline(*) -> (2) \r\n (glob)
194 194 sendall(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (py36 !)
195 195 sendall(42) -> 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n; (py36 !)
196 write(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (py3 no-py36 !)
197 write(42) -> 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n; (py3 no-py36 !)
196 write(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (no-py36 !)
197 write(42) -> 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n; (no-py36 !)
198 198 readline(24 from ~) -> (*) GET /?cmd=getbundle HTTP* (glob)
199 199 read limit reached; closing socket
200 200 readline(~) -> (30) GET /?cmd=getbundle HTTP/1.1\r\n
@@ -230,8 +230,8 Now do a variation using POST to send ar
230 230 readline(*) -> (2) \r\n (glob)
231 231 sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py36 !)
232 232 sendall(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx httppostargs known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py36 !)
233 write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py3 no-py36 !)
234 write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx httppostargs known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py3 no-py36 !)
233 write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (no-py36 !)
234 write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx httppostargs known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (no-py36 !)
235 235 readline(~) -> (27) POST /?cmd=batch HTTP/1.1\r\n (glob)
236 236 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
237 237 readline(*) -> (41) content-type: application/mercurial-0.1\r\n (glob)
@@ -256,7 +256,7 Now do a variation using POST to send ar
256 256 Traceback (most recent call last):
257 257 Exception: connection closed after receiving N bytes
258 258
259 write(126) -> HTTP/1.1 500 Internal Server Error\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nTransfer-Encoding: chunked\r\n\r\n (py3 no-py36 !)
259 write(126) -> HTTP/1.1 500 Internal Server Error\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nTransfer-Encoding: chunked\r\n\r\n (no-py36 !)
260 260
261 261 $ rm -f error.log
262 262
@@ -283,13 +283,13 Server sends a single character from the
283 283 readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
284 284 readline(*) -> (2) \r\n (glob)
285 285 sendall(1 from 160) -> (0) H (py36 !)
286 write(1 from 160) -> (0) H (py3 no-py36 !)
286 write(1 from 160) -> (0) H (no-py36 !)
287 287 write limit reached; closing socket
288 288 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=capabilities': (glob)
289 289 Traceback (most recent call last):
290 290 Exception: connection closed after sending N bytes
291 291
292 write(286) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\nHTTP/1.1 500 Internal Server Error\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nTransfer-Encoding: chunked\r\n\r\n (glob) (py3 no-py36 !)
292 write(286) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\nHTTP/1.1 500 Internal Server Error\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nTransfer-Encoding: chunked\r\n\r\n (glob) (no-py36 !)
293 293
294 294 $ rm -f error.log
295 295
@@ -317,8 +317,8 Server sends an incomplete capabilities
317 317 readline(*) -> (2) \r\n (glob)
318 318 sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py36 !)
319 319 sendall(20 from *) -> (0) batch branchmap bund (glob) (py36 !)
320 write(160) -> (20) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py3 no-py36 !)
321 write(20 from *) -> (0) batch branchmap bund (glob) (py3 no-py36 !)
320 write(160) -> (20) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (no-py36 !)
321 write(20 from *) -> (0) batch branchmap bund (glob) (no-py36 !)
322 322 write limit reached; closing socket
323 323 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=capabilities': (glob)
324 324 Traceback (most recent call last):
@@ -356,8 +356,8 TODO this output is horrible
356 356 readline(*) -> (2) \r\n (glob)
357 357 sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py36 !)
358 358 sendall(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py36 !)
359 write(160) -> (568) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py3 no-py36 !)
360 write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py3 no-py36 !)
359 write(160) -> (568) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (no-py36 !)
360 write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (no-py36 !)
361 361 readline(~) -> (26) GET /?cmd=batch HTTP/1.1\r\n
362 362 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
363 363 readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
@@ -368,13 +368,13 TODO this output is horrible
368 368 readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
369 369 readline(*) -> (2) \r\n (glob)
370 370 sendall(118 from 159) -> (0) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: applicat (py36 !)
371 write(118 from 159) -> (0) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: applicat (py3 no-py36 !)
371 write(118 from 159) -> (0) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: applicat (no-py36 !)
372 372 write limit reached; closing socket
373 373 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=batch': (glob)
374 374 Traceback (most recent call last):
375 375 Exception: connection closed after sending N bytes
376 376
377 write(285) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\nHTTP/1.1 500 Internal Server Error\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nTransfer-Encoding: chunked\r\n\r\n (py3 no-py36 !)
377 write(285) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\nHTTP/1.1 500 Internal Server Error\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nTransfer-Encoding: chunked\r\n\r\n (no-py36 !)
378 378
379 379 $ rm -f error.log
380 380
@@ -402,8 +402,8 Server sends an incomplete HTTP response
402 402 readline(*) -> (2) \r\n (glob)
403 403 sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py36 !)
404 404 sendall(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py36 !)
405 write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py3 no-py36 !)
406 write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py3 no-py36 !)
405 write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (no-py36 !)
406 write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (no-py36 !)
407 407 readline(~) -> (26) GET /?cmd=batch HTTP/1.1\r\n
408 408 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
409 409 readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
@@ -415,8 +415,8 Server sends an incomplete HTTP response
415 415 readline(*) -> (2) \r\n (glob)
416 416 sendall(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (py36 !)
417 417 sendall(24 from 42) -> (0) 96ee1d7354c4ad7372047672 (py36 !)
418 write(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (py3 no-py36 !)
419 write(24 from 42) -> (0) 96ee1d7354c4ad7372047672 (py3 no-py36 !)
418 write(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (no-py36 !)
419 write(24 from 42) -> (0) 96ee1d7354c4ad7372047672 (no-py36 !)
420 420 write limit reached; closing socket
421 421 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=batch': (glob)
422 422 Traceback (most recent call last):
@@ -455,8 +455,8 TODO this output is terrible
455 455 readline(*) -> (2) \r\n (glob)
456 456 sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py36 !)
457 457 sendall(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py36 !)
458 write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py3 no-py36 !)
459 write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py3 no-py36 !)
458 write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (no-py36 !)
459 write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (no-py36 !)
460 460 readline(~) -> (26) GET /?cmd=batch HTTP/1.1\r\n
461 461 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
462 462 readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
@@ -468,8 +468,8 TODO this output is terrible
468 468 readline(*) -> (2) \r\n (glob)
469 469 sendall(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (py36 !)
470 470 sendall(42) -> 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n; (py36 !)
471 write(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (py3 no-py36 !)
472 write(42) -> 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n; (py3 no-py36 !)
471 write(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (no-py36 !)
472 write(42) -> 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n; (no-py36 !)
473 473 readline(~) -> (30) GET /?cmd=getbundle HTTP/1.1\r\n
474 474 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
475 475 readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
@@ -480,13 +480,13 TODO this output is terrible
480 480 readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
481 481 readline(*) -> (2) \r\n (glob)
482 482 sendall(129 from 167) -> (0) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercuri (py36 !)
483 write(129 from 167) -> (0) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercuri (py3 no-py36 !)
483 write(129 from 167) -> (0) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercuri (no-py36 !)
484 484 write limit reached; closing socket
485 485 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
486 486 Traceback (most recent call last):
487 487 Exception: connection closed after sending N bytes
488 488
489 write(293) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\nHTTP/1.1 500 Internal Server Error\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nTransfer-Encoding: chunked\r\n\r\n (py3 no-py36 !)
489 write(293) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\nHTTP/1.1 500 Internal Server Error\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nTransfer-Encoding: chunked\r\n\r\n (no-py36 !)
490 490
491 491 $ rm -f error.log
492 492
@@ -522,7 +522,7 Server stops before it sends transfer en
522 522 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
523 523 Traceback (most recent call last):
524 524 Exception: connection closed after sending N bytes
525 write(293) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\nHTTP/1.1 500 Internal Server Error\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nTransfer-Encoding: chunked\r\n\r\n (py3 !)
525 write(293) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\nHTTP/1.1 500 Internal Server Error\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nTransfer-Encoding: chunked\r\n\r\n
526 526
527 527 #endif
528 528
@@ -553,8 +553,8 Server sends empty HTTP body for getbund
553 553 readline(*) -> (2) \r\n (glob)
554 554 sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py36 !)
555 555 sendall(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py36 !)
556 write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py3 no-py36 !)
557 write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py3 no-py36 !)
556 write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (no-py36 !)
557 write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (no-py36 !)
558 558 readline(~) -> (26) GET /?cmd=batch HTTP/1.1\r\n
559 559 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
560 560 readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
@@ -566,8 +566,8 Server sends empty HTTP body for getbund
566 566 readline(*) -> (2) \r\n (glob)
567 567 sendall(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (py36 !)
568 568 sendall(42) -> 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n; (py36 !)
569 write(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (py3 no-py36 !)
570 write(42) -> 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n; (py3 no-py36 !)
569 write(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (no-py36 !)
570 write(42) -> 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n; (no-py36 !)
571 571 readline(~) -> (30) GET /?cmd=getbundle HTTP/1.1\r\n
572 572 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
573 573 readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
@@ -578,13 +578,13 Server sends empty HTTP body for getbund
578 578 readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
579 579 readline(*) -> (2) \r\n (glob)
580 580 sendall(167 from 167) -> (0) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n (py36 !)
581 write(167 from 167) -> (0) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n (py3 no-py36 !)
581 write(167 from 167) -> (0) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n (no-py36 !)
582 582 write limit reached; closing socket
583 583 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
584 584 Traceback (most recent call last):
585 585 Exception: connection closed after sending N bytes
586 586
587 write(293) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\nHTTP/1.1 500 Internal Server Error\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nTransfer-Encoding: chunked\r\n\r\n (py3 no-py36 !)
587 write(293) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\nHTTP/1.1 500 Internal Server Error\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nTransfer-Encoding: chunked\r\n\r\n (no-py36 !)
588 588
589 589 $ rm -f error.log
590 590
@@ -613,8 +613,8 Server sends partial compression string
613 613 readline(*) -> (2) \r\n (glob)
614 614 sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py36 !)
615 615 sendall(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py36 !)
616 write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py3 no-py36 !)
617 write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py3 no-py36 !)
616 write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (no-py36 !)
617 write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (no-py36 !)
618 618 readline(~) -> (26) GET /?cmd=batch HTTP/1.1\r\n
619 619 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
620 620 readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
@@ -626,7 +626,7 Server sends partial compression string
626 626 readline(*) -> (2) \r\n (glob)
627 627 sendall(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (py36 !)
628 628 sendall(42) -> 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n; (py36 !)
629 write(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (py3 no-py36 !)
629 write(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (no-py36 !)
630 630 readline(~) -> (30) GET /?cmd=getbundle HTTP/1.1\r\n
631 631 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
632 632 readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
@@ -640,7 +640,7 Server sends partial compression string
640 640 sendall(6) -> 1\\r\\n\x04\\r\\n (esc) (py36 !)
641 641 sendall(9) -> 4\r\nnone\r\n (py36 !)
642 642 sendall(9 from 9) -> (0) 4\r\nHG20\r\n (py36 !)
643 write(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n (py3 no-py36 !)
643 write(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n (no-py36 !)
644 644 write limit reached; closing socket
645 645 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
646 646 Traceback (most recent call last):
@@ -679,8 +679,8 Server sends partial bundle2 header magi
679 679
680 680 #else
681 681 $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -11
682 readline(~) -> (2) \r\n (py3 !)
683 write(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n (py3 !)
682 readline(~) -> (2) \r\n
683 write(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
684 684 write(6) -> 1\\r\\n\x04\\r\\n (esc)
685 685 write(9) -> 4\r\nnone\r\n
686 686 write(6 from 9) -> (0) 4\r\nHG2
@@ -724,8 +724,8 Server sends incomplete bundle2 stream p
724 724
725 725 #else
726 726 $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -12
727 readline(~) -> (2) \r\n (py3 !)
728 write(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n (py3 !)
727 readline(~) -> (2) \r\n
728 write(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
729 729 write(41) -> Content-Type: application/mercurial-0.2\r\n
730 730 write(6) -> 1\\r\\n\x04\\r\\n (esc)
731 731 write(9) -> 4\r\nnone\r\n
@@ -771,8 +771,8 Servers stops after bundle2 stream param
771 771
772 772 #else
773 773 $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -12
774 readline(~) -> (2) \r\n (py3 !)
775 write(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n (py3 !)
774 readline(~) -> (2) \r\n
775 write(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
776 776 write(41) -> Content-Type: application/mercurial-0.2\r\n
777 777 write(6) -> 1\\r\\n\x04\\r\\n (esc)
778 778 write(9) -> 4\r\nnone\r\n
@@ -820,8 +820,8 Server stops sending after bundle2 part
820 820 #else
821 821
822 822 $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -13
823 readline(~) -> (2) \r\n (py3 !)
824 write(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n (py3 !)
823 readline(~) -> (2) \r\n
824 write(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
825 825 write(41) -> Content-Type: application/mercurial-0.2\r\n
826 826 write(6) -> 1\\r\\n\x04\\r\\n (esc)
827 827 write(9) -> 4\r\nnone\r\n
@@ -873,8 +873,8 Server stops sending after bundle2 part
873 873
874 874 #else
875 875 $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -14
876 readline(~) -> (2) \r\n (py3 !)
877 write(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n (py3 !)
876 readline(~) -> (2) \r\n
877 write(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
878 878 write(41) -> Content-Type: application/mercurial-0.2\r\n
879 879 write(6) -> 1\\r\\n\x04\\r\\n (esc)
880 880 write(9) -> 4\r\nnone\r\n
@@ -929,7 +929,7 Server stops after bundle2 part payload
929 929
930 930 #else
931 931 $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -15
932 write(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n (py3 !)
932 write(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
933 933 write(28) -> Transfer-Encoding: chunked\r\n
934 934 write(6) -> 1\\r\\n\x04\\r\\n (esc)
935 935 write(9) -> 4\r\nnone\r\n
@@ -986,8 +986,8 Server stops sending in middle of bundle
986 986
987 987 #else
988 988 $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -16
989 readline(~) -> (2) \r\n (py3 !)
990 write(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n (py3 !)
989 readline(~) -> (2) \r\n
990 write(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
991 991 write(41) -> Content-Type: application/mercurial-0.2\r\n
992 992 write(6) -> 1\\r\\n\x04\\r\\n (esc)
993 993 write(9) -> 4\r\nnone\r\n
@@ -45,12 +45,7 clone via stream
45 45 no changes found
46 46 updating to branch default
47 47 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
48 $ hg verify -R copy
49 checking changesets
50 checking manifests
51 crosschecking files in changesets and manifests
52 checking files
53 checked 1 changesets with 4 changes to 4 files
48 $ hg verify -R copy -q
54 49 #endif
55 50
56 51 try to clone via stream, should use pull instead
@@ -99,12 +94,7 clone via pull
99 94 new changesets 8b6053c928fe
100 95 updating to branch default
101 96 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
102 $ hg verify -R copy-pull
103 checking changesets
104 checking manifests
105 crosschecking files in changesets and manifests
106 checking files
107 checked 1 changesets with 4 changes to 4 files
97 $ hg verify -R copy-pull -q
108 98 $ cd test
109 99 $ echo bar > bar
110 100 $ hg commit -A -d '1 0' -m 2
@@ -25,7 +25,7 clone remote via stream
25 25 $ for i in 0 1 2 3 4 5 6 7 8; do
26 26 > hg clone -r "$i" http://localhost:$HGPORT/ test-"$i"
27 27 > if cd test-"$i"; then
28 > hg verify
28 > hg verify -q
29 29 > cd ..
30 30 > fi
31 31 > done
@@ -36,11 +36,6 clone remote via stream
36 36 new changesets bfaf4b5cbf01
37 37 updating to branch default
38 38 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
39 checking changesets
40 checking manifests
41 crosschecking files in changesets and manifests
42 checking files
43 checked 1 changesets with 1 changes to 1 files
44 39 adding changesets
45 40 adding manifests
46 41 adding file changes
@@ -48,11 +43,6 clone remote via stream
48 43 new changesets bfaf4b5cbf01:21f32785131f
49 44 updating to branch default
50 45 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
51 checking changesets
52 checking manifests
53 crosschecking files in changesets and manifests
54 checking files
55 checked 2 changesets with 2 changes to 1 files
56 46 adding changesets
57 47 adding manifests
58 48 adding file changes
@@ -60,11 +50,6 clone remote via stream
60 50 new changesets bfaf4b5cbf01:4ce51a113780
61 51 updating to branch default
62 52 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
63 checking changesets
64 checking manifests
65 crosschecking files in changesets and manifests
66 checking files
67 checked 3 changesets with 3 changes to 1 files
68 53 adding changesets
69 54 adding manifests
70 55 adding file changes
@@ -72,11 +57,6 clone remote via stream
72 57 new changesets bfaf4b5cbf01:93ee6ab32777
73 58 updating to branch default
74 59 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
75 checking changesets
76 checking manifests
77 crosschecking files in changesets and manifests
78 checking files
79 checked 4 changesets with 4 changes to 1 files
80 60 adding changesets
81 61 adding manifests
82 62 adding file changes
@@ -84,11 +64,6 clone remote via stream
84 64 new changesets bfaf4b5cbf01:c70afb1ee985
85 65 updating to branch default
86 66 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
87 checking changesets
88 checking manifests
89 crosschecking files in changesets and manifests
90 checking files
91 checked 2 changesets with 2 changes to 1 files
92 67 adding changesets
93 68 adding manifests
94 69 adding file changes
@@ -96,11 +71,6 clone remote via stream
96 71 new changesets bfaf4b5cbf01:f03ae5a9b979
97 72 updating to branch default
98 73 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
99 checking changesets
100 checking manifests
101 crosschecking files in changesets and manifests
102 checking files
103 checked 3 changesets with 3 changes to 1 files
104 74 adding changesets
105 75 adding manifests
106 76 adding file changes
@@ -108,11 +78,6 clone remote via stream
108 78 new changesets bfaf4b5cbf01:095cb14b1b4d
109 79 updating to branch default
110 80 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
111 checking changesets
112 checking manifests
113 crosschecking files in changesets and manifests
114 checking files
115 checked 4 changesets with 5 changes to 2 files
116 81 adding changesets
117 82 adding manifests
118 83 adding file changes
@@ -120,11 +85,6 clone remote via stream
120 85 new changesets bfaf4b5cbf01:faa2e4234c7a
121 86 updating to branch default
122 87 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
123 checking changesets
124 checking manifests
125 crosschecking files in changesets and manifests
126 checking files
127 checked 5 changesets with 6 changes to 3 files
128 88 adding changesets
129 89 adding manifests
130 90 adding file changes
@@ -132,11 +92,6 clone remote via stream
132 92 new changesets bfaf4b5cbf01:916f1afdef90
133 93 updating to branch default
134 94 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
135 checking changesets
136 checking manifests
137 crosschecking files in changesets and manifests
138 checking files
139 checked 5 changesets with 5 changes to 2 files
140 95 $ cd test-8
141 96 $ hg pull ../test-7
142 97 pulling from ../test-7
@@ -147,12 +102,7 clone remote via stream
147 102 added 4 changesets with 2 changes to 3 files (+1 heads)
148 103 new changesets c70afb1ee985:faa2e4234c7a
149 104 (run 'hg heads' to see heads, 'hg merge' to merge)
150 $ hg verify
151 checking changesets
152 checking manifests
153 crosschecking files in changesets and manifests
154 checking files
155 checked 9 changesets with 7 changes to 4 files
105 $ hg verify -q
156 106 $ cd ..
157 107 $ cd test-1
158 108 $ hg pull -r 4 http://localhost:$HGPORT/
@@ -164,12 +114,7 clone remote via stream
164 114 added 1 changesets with 0 changes to 0 files (+1 heads)
165 115 new changesets c70afb1ee985
166 116 (run 'hg heads' to see heads, 'hg merge' to merge)
167 $ hg verify
168 checking changesets
169 checking manifests
170 crosschecking files in changesets and manifests
171 checking files
172 checked 3 changesets with 2 changes to 1 files
117 $ hg verify -q
173 118 $ hg pull http://localhost:$HGPORT/
174 119 pulling from http://localhost:$HGPORT/
175 120 searching for changes
@@ -190,12 +135,7 clone remote via stream
190 135 added 2 changesets with 0 changes to 0 files (+1 heads)
191 136 new changesets c70afb1ee985:f03ae5a9b979
192 137 (run 'hg heads' to see heads, 'hg merge' to merge)
193 $ hg verify
194 checking changesets
195 checking manifests
196 crosschecking files in changesets and manifests
197 checking files
198 checked 5 changesets with 3 changes to 1 files
138 $ hg verify -q
199 139 $ hg pull http://localhost:$HGPORT/
200 140 pulling from http://localhost:$HGPORT/
201 141 searching for changes
@@ -205,12 +145,7 clone remote via stream
205 145 added 4 changesets with 4 changes to 4 files
206 146 new changesets 93ee6ab32777:916f1afdef90
207 147 (run 'hg update' to get a working copy)
208 $ hg verify
209 checking changesets
210 checking manifests
211 crosschecking files in changesets and manifests
212 checking files
213 checked 9 changesets with 7 changes to 4 files
148 $ hg verify -q
214 149 $ cd ..
215 150
216 151 no default destination if url has no path:
@@ -22,12 +22,7 url for proxy, stream
22 22 updating to branch default
23 23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 24 $ cd b
25 $ hg verify
26 checking changesets
27 checking manifests
28 crosschecking files in changesets and manifests
29 checking files
30 checked 1 changesets with 1 changes to 1 files
25 $ hg verify -q
31 26 $ cd ..
32 27
33 28 url for proxy, pull
@@ -42,12 +37,7 url for proxy, pull
42 37 updating to branch default
43 38 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
44 39 $ cd b-pull
45 $ hg verify
46 checking changesets
47 checking manifests
48 crosschecking files in changesets and manifests
49 checking files
50 checked 1 changesets with 1 changes to 1 files
40 $ hg verify -q
51 41 $ cd ..
52 42
53 43 host:port for proxy
@@ -34,12 +34,7 clone via stream
34 34 transferred * bytes in * seconds (*/sec) (glob)
35 35 updating to branch default
36 36 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
37 $ hg verify -R copy
38 checking changesets
39 checking manifests
40 crosschecking files in changesets and manifests
41 checking files
42 checked 1 changesets with 4 changes to 4 files
37 $ hg verify -R copy -q
43 38 #endif
44 39
45 40 try to clone via stream, should use pull instead
@@ -88,12 +83,7 clone via pull
88 83 new changesets 8b6053c928fe
89 84 updating to branch default
90 85 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
91 $ hg verify -R copy-pull
92 checking changesets
93 checking manifests
94 crosschecking files in changesets and manifests
95 checking files
96 checked 1 changesets with 4 changes to 4 files
86 $ hg verify -R copy-pull -q
97 87 $ cd test
98 88 $ echo bar > bar
99 89 $ hg commit -A -d '1 0' -m 2
@@ -137,12 +137,7 Inability to verify peer certificate wil
137 137 new changesets 8b6053c928fe
138 138 updating to branch default
139 139 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
140 $ hg verify -R copy-pull
141 checking changesets
142 checking manifests
143 crosschecking files in changesets and manifests
144 checking files
145 checked 1 changesets with 4 changes to 4 files
140 $ hg verify -R copy-pull -q
146 141 $ cd test
147 142 $ echo bar > bar
148 143 $ hg commit -A -d '1 0' -m 2
@@ -159,9 +159,4 Test that --exact on a bad header doesn'
159 159 rollback completed
160 160 abort: patch is damaged or loses information
161 161 [255]
162 $ hg verify
163 checking changesets
164 checking manifests
165 crosschecking files in changesets and manifests
166 checking files
167 checked 2 changesets with 2 changes to 1 files
162 $ hg verify -q
@@ -7,12 +7,7
7 7 > hg commit -A -m $i
8 8 > done
9 9 adding foo
10 $ hg verify
11 checking changesets
12 checking manifests
13 crosschecking files in changesets and manifests
14 checking files
15 checked 9 changesets with 9 changes to 1 files
10 $ hg verify -q
16 11 $ hg serve -p $HGPORT -d --pid-file=hg.pid
17 12 $ cat hg.pid >> $DAEMON_PIDS
18 13 $ cd ..
@@ -365,12 +360,7 test outgoing
365 360 > echo $i >> foo
366 361 > hg commit -A -m $i
367 362 > done
368 $ hg verify
369 checking changesets
370 checking manifests
371 crosschecking files in changesets and manifests
372 checking files
373 checked 14 changesets with 14 changes to 1 files
363 $ hg verify -q
374 364 $ cd ..
375 365 $ hg -R test-dev outgoing test
376 366 comparing with test
@@ -46,8 +46,8 multihead push works.
46 46 remote: bc22f9a30a82 multihead1
47 47 remote: ee4802bf6864 multihead2
48 48 $ scratchnodes
49 bc22f9a30a821118244deacbd732e394ed0b686c ab1bc557aa090a9e4145512c734b6e8a828393a5
50 ee4802bf6864326a6b3dcfff5a03abc2a0a69b8f ab1bc557aa090a9e4145512c734b6e8a828393a5
49 bc22f9a30a821118244deacbd732e394ed0b686c de1b7d132ba98f0172cd974e3e69dfa80faa335c
50 ee4802bf6864326a6b3dcfff5a03abc2a0a69b8f de1b7d132ba98f0172cd974e3e69dfa80faa335c
51 51
52 52 Create two new scratch bookmarks
53 53 $ hg up 0
@@ -95,11 +95,9 new directories are setgid
95 95 00660 ./.hg/store/undo
96 96 00660 ./.hg/store/undo.backupfiles
97 97 00660 ./.hg/store/undo.phaseroots
98 00660 ./.hg/undo.backup.dirstate
99 98 00660 ./.hg/undo.bookmarks
100 99 00660 ./.hg/undo.branch
101 100 00660 ./.hg/undo.desc
102 00660 ./.hg/undo.dirstate
103 101 00770 ./.hg/wcache/
104 102 00711 ./.hg/wcache/checkisexec
105 103 007.. ./.hg/wcache/checklink (re)
@@ -137,7 +135,6 group can still write everything
137 135 00660 ../push/.hg/cache/branch2-base
138 136 00660 ../push/.hg/cache/rbc-names-v1
139 137 00660 ../push/.hg/cache/rbc-revs-v1
140 00660 ../push/.hg/dirstate
141 138 00660 ../push/.hg/requires
142 139 00770 ../push/.hg/store/
143 140 00660 ../push/.hg/store/00changelog.i
@@ -160,7 +157,6 group can still write everything
160 157 00660 ../push/.hg/undo.bookmarks
161 158 00660 ../push/.hg/undo.branch
162 159 00660 ../push/.hg/undo.desc
163 00660 ../push/.hg/undo.dirstate
164 160 00770 ../push/.hg/wcache/
165 161
166 162
@@ -3,7 +3,7 hg debuginstall
3 3 checking encoding (ascii)...
4 4 checking Python executable (*) (glob)
5 5 checking Python implementation (*) (glob)
6 checking Python version (3.*) (glob) (py3 !)
6 checking Python version (3.*) (glob)
7 7 checking Python lib (.*[Ll]ib.*)... (re) (no-pyoxidizer !)
8 8 checking Python lib (.*pyoxidizer.*)... (re) (pyoxidizer !)
9 9 checking Python security support (*) (glob)
@@ -68,7 +68,7 hg debuginstall with no username
68 68 checking encoding (ascii)...
69 69 checking Python executable (*) (glob)
70 70 checking Python implementation (*) (glob)
71 checking Python version (3.*) (glob) (py3 !)
71 checking Python version (3.*) (glob)
72 72 checking Python lib (.*[Ll]ib.*)... (re) (no-pyoxidizer !)
73 73 checking Python lib (.*pyoxidizer.*)... (re) (pyoxidizer !)
74 74 checking Python security support (*) (glob)
@@ -118,7 +118,7 path variables are expanded (~ is the sa
118 118 checking encoding (ascii)...
119 119 checking Python executable (*) (glob)
120 120 checking Python implementation (*) (glob)
121 checking Python version (3.*) (glob) (py3 !)
121 checking Python version (3.*) (glob)
122 122 checking Python lib (.*[Ll]ib.*)... (re) (no-pyoxidizer !)
123 123 checking Python lib (.*pyoxidizer.*)... (re) (pyoxidizer !)
124 124 checking Python security support (*) (glob)
@@ -148,7 +148,7 not found (this is intentionally using b
148 148 checking encoding (ascii)...
149 149 checking Python executable (*) (glob)
150 150 checking Python implementation (*) (glob)
151 checking Python version (3.*) (glob) (py3 !)
151 checking Python version (3.*) (glob)
152 152 checking Python lib (.*[Ll]ib.*)... (re) (no-pyoxidizer !)
153 153 checking Python lib (.*pyoxidizer.*)... (re) (pyoxidizer !)
154 154 checking Python security support (*) (glob)
@@ -238,42 +238,3 since it's bin on most platforms but Scr
238 238 checking username (test)
239 239 no problems detected
240 240 #endif
241
242 #if virtualenv no-py3 network-io no-pyoxidizer
243
244 Note: --no-site-packages is the default for all versions enabled by hghave
245
246 $ "$PYTHON" -m virtualenv installenv >> pip.log
247 DEPRECATION: Python 2.7 will reach the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 won't be maintained after that date. A future version of pip will drop support for Python 2.7. (?)
248 DEPRECATION: Python 2.7 will reach the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 won't be maintained after that date. A future version of pip will drop support for Python 2.7. More details about Python 2 support in pip, can be found at https://pip.pypa.io/en/latest/development/release-process/#python-2-support (?)
249
250 Note: we use this weird path to run pip and hg to avoid platform differences,
251 since it's bin on most platforms but Scripts on Windows.
252 $ ./installenv/*/pip install $TESTDIR/.. >> pip.log
253 DEPRECATION: Python 2.7 will reach the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 won't be maintained after that date. A future version of pip will drop support for Python 2.7. (?)
254 DEPRECATION: Python 2.7 will reach the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 won't be maintained after that date. A future version of pip will drop support for Python 2.7. More details about Python 2 support in pip, can be found at https://pip.pypa.io/en/latest/development/release-process/#python-2-support (?)
255 DEPRECATION: Python 2.7 reached the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 is no longer maintained. pip 21.0 will drop support for Python 2.7 in January 2021. More details about Python 2 support in pip can be found at https://pip.pypa.io/en/latest/development/release-process/#python-2-support pip 21.0 will remove support for this functionality. (?)
256 $ ./installenv/*/hg debuginstall || cat pip.log
257 checking encoding (ascii)...
258 checking Python executable (*) (glob)
259 checking Python implementation (*) (glob)
260 checking Python version (2.*) (glob)
261 checking Python lib (*)... (glob)
262 checking Python security support (*) (glob)
263 TLS 1.2 not supported by Python install; network connections lack modern security (?)
264 SNI not supported by Python install; may have connectivity issues with some servers (?)
265 checking Rust extensions \((installed|missing)\) (re)
266 checking Mercurial version (*) (glob)
267 checking Mercurial custom build (*) (glob)
268 checking module policy (*) (glob)
269 checking installed modules (*/mercurial)... (glob)
270 checking registered compression engines (*) (glob)
271 checking available compression engines (*) (glob)
272 checking available compression engines for wire protocol (*) (glob)
273 checking "re2" regexp engine \((available|missing)\) (re)
274 checking templates ($TESTTMP/installenv/*/site-packages/mercurial/templates)... (glob)
275 checking default template ($TESTTMP/installenv/*/site-packages/mercurial/templates/map-cmdline.default) (glob)
276 checking commit editor... (*) (glob)
277 checking username (test)
278 no problems detected
279 #endif
@@ -37,12 +37,7 https://bz.mercurial-scm.org/1175
37 37 updating the branch cache
38 38 committed changeset 5:83a687e8a97c80992ba385bbfd766be181bfb1d1
39 39
40 $ hg verify
41 checking changesets
42 checking manifests
43 crosschecking files in changesets and manifests
44 checking files
45 checked 6 changesets with 4 changes to 4 files
40 $ hg verify -q
46 41
47 42 $ hg export --git tip
48 43 # HG changeset patch
@@ -25,13 +25,7 recover, explicit verify
25 25 abort: abandoned transaction found
26 26 (run 'hg recover' to clean up transaction)
27 27 [255]
28 $ hg recover --verify
29 rolling back interrupted transaction
30 checking changesets
31 checking manifests
32 crosschecking files in changesets and manifests
33 checking files
34 checked 1 changesets with 1 changes to 1 files
28 $ hg recover --verify -q
35 29
36 30 recover, no verify
37 31
@@ -492,7 +492,8 rollback and revert expansion
492 492 $ echo '$Id$' > y
493 493 $ echo '$Id$' > z
494 494 $ hg add y
495 $ hg commit -Am "rollback only" z
495 $ hg add z
496 $ hg commit -m "rollback only" z
496 497 $ cat z
497 498 $Id: z,v 45a5d3adce53 1970/01/01 00:00:00 test $
498 499 $ hg --verbose rollback
@@ -838,12 +839,7 Stat, verify and show custom expansion (
838 839
839 840 $ hg status
840 841 ? c
841 $ hg verify
842 checking changesets
843 checking manifests
844 crosschecking files in changesets and manifests
845 checking files
846 checked 3 changesets with 4 changes to 3 files
842 $ hg verify -q
847 843 $ cat a b
848 844 expand $Id: a bb948857c743 Thu, 01 Jan 1970 00:00:02 +0000 user $
849 845 do not process $Id:
@@ -771,14 +771,26 wdir(), but a matching revision is detec
771 771 $ hg log -qr 'file("set:exec()")'
772 772 9:be1b433a65b1
773 773
774 Test a fatal error interrupting an update. Verify that status report dirty
775 files correctly after an interrupted update. Also verify that checking all
776 hashes reveals it isn't clean.
774 Test a fatal error interrupting an update
775 -----------------------------------------
776
777 In a previous version this test was tasked to:
778 | verify that status report dirty files correctly after an interrupted
779 | update. Also verify that checking all hashes reveals it isn't clean.
780
781 In the mean time improvement to the update logic means it is much harder to get the dirstate file written too early. So the original intend seems "fine".
782
783 However, it shows another error where the standin file for large1 seems to be
784 silently updated, confusing the general logic. This seems to have been broken
785 before our updates and the test is marked as such.
777 786
778 787 Start with clean dirstates:
779 788 $ hg up --quiet --clean --rev "8^"
780 789 $ sleep 1
790 $ cat large1
791 large1 in #3
781 792 $ hg st
793
782 794 Update standins without updating largefiles - large1 is modified and largeX is
783 795 added:
784 796 $ cat << EOF > ../crashupdatelfiles.py
@@ -790,18 +802,25 added:
790 802 $ hg up -Cr "8" --config extensions.crashupdatelfiles=../crashupdatelfiles.py
791 803 [254]
792 804 Check large1 content and status ... and that update will undo modifications:
805 $ hg id
806 d65e59e952a9+ (known-bad-output !)
807 d65e59e952a9 (missing-correct-output !)
793 808 $ cat large1
794 809 large1 in #3
795 810 $ hg st
796 M large1
797 ! largeX
798 $ hg up -Cr .
811 $ hg up -Cr 8
799 812 getting changed largefiles
800 2 largefiles updated, 0 removed
813 1 largefiles updated, 0 removed (known-bad-output !)
814 2 largefiles updated, 0 removed (missing-correct-output !)
801 815 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
802 816 $ cat large1
803 manually modified before 'hg transplant --continue'
817 large1 in #3 (known-bad-output !)
818 manually modified before 'hg transplant --continue' (missing-correct-output !)
804 819 $ hg st
820 M large1 (known-bad-output !)
821
822 $ hg revert --all --no-backup
823 reverting .hglf/large1 (known-bad-output !)
805 824 Force largefiles rehashing and check that all changes have been caught by
806 825 status and update:
807 826 $ rm .hg/largefiles/dirstate
@@ -151,14 +151,7 largefiles clients refuse to push largef
151 151 $ hg commit -m "m2"
152 152 Invoking status precommit hook
153 153 A f2
154 $ hg verify --large
155 checking changesets
156 checking manifests
157 crosschecking files in changesets and manifests
158 checking files
159 checked 2 changesets with 2 changes to 2 files
160 searching 1 changesets for largefiles
161 verified existence of 1 revisions of 1 largefiles
154 $ hg verify --large -q
162 155 $ hg serve --config extensions.largefiles=! -R ../r6 -d -p $HGPORT --pid-file ../hg.pid
163 156 $ cat ../hg.pid >> $DAEMON_PIDS
164 157 $ hg push http://localhost:$HGPORT
@@ -249,6 +242,7 test 'verify' with remotestore:
249 242 checking manifests
250 243 crosschecking files in changesets and manifests
251 244 checking files
245 checking dirstate
252 246 checked 1 changesets with 1 changes to 1 files
253 247 searching 1 changesets for largefiles
254 248 changeset 0:cf03e5bb9936: f1 missing
@@ -280,14 +274,7 largefiles pulled on update - a largefil
280 274 $ [ ! -f http-clone/.hg/largefiles/02a439e5c31c526465ab1a0ca1f431f76b827b90 ]
281 275 $ [ ! -f http-clone/f1 ]
282 276 $ [ ! -f http-clone-usercache ]
283 $ hg -R http-clone verify --large --lfc
284 checking changesets
285 checking manifests
286 crosschecking files in changesets and manifests
287 checking files
288 checked 1 changesets with 1 changes to 1 files
289 searching 1 changesets for largefiles
290 verified contents of 1 revisions of 1 largefiles
277 $ hg -R http-clone verify --large --lfc -q
291 278 $ hg -R http-clone up -Cqr null
292 279
293 280 largefiles pulled on update - no server side problems:
@@ -343,14 +330,7 largefiles should batch verify remote ca
343 330 adding file changes
344 331 added 2 changesets with 2 changes to 2 files
345 332 new changesets 567253b0f523:04d19c27a332 (2 drafts)
346 $ hg -R batchverifyclone verify --large --lfa
347 checking changesets
348 checking manifests
349 crosschecking files in changesets and manifests
350 checking files
351 checked 2 changesets with 2 changes to 2 files
352 searching 2 changesets for largefiles
353 verified existence of 2 revisions of 2 largefiles
333 $ hg -R batchverifyclone verify --large --lfa -q
354 334 $ tail -1 access.log
355 335 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=statlfile+sha%3D972a1a11f19934401291cc99117ec614933374ce%3Bstatlfile+sha%3Dc801c9cfe94400963fcb683246217d5db77f9a9a x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
356 336 $ hg -R batchverifyclone update
@@ -381,14 +361,7 available locally.
381 361 added 1 changesets with 1 changes to 1 files
382 362 new changesets 6bba8cb6935d (1 drafts)
383 363 (run 'hg update' to get a working copy)
384 $ hg -R batchverifyclone verify --lfa
385 checking changesets
386 checking manifests
387 crosschecking files in changesets and manifests
388 checking files
389 checked 3 changesets with 3 changes to 3 files
390 searching 3 changesets for largefiles
391 verified existence of 3 revisions of 3 largefiles
364 $ hg -R batchverifyclone verify --lfa -q
392 365 $ tail -1 access.log
393 366 $LOCALIP - - [$LOGDATE$] "GET /?cmd=statlfile HTTP/1.1" 200 - x-hgarg-1:sha=c8559c3c9cfb42131794b7d8009230403b9b454c x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
394 367
@@ -1029,14 +1029,7 Test cloning with --all-largefiles flag
1029 1029 2 largefiles updated, 0 removed
1030 1030 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
1031 1031 8 additional largefiles cached
1032 $ hg -R a-clone1 verify --large --lfa --lfc
1033 checking changesets
1034 checking manifests
1035 crosschecking files in changesets and manifests
1036 checking files
1037 checked 8 changesets with 24 changes to 10 files
1038 searching 8 changesets for largefiles
1039 verified contents of 13 revisions of 6 largefiles
1032 $ hg -R a-clone1 verify --large --lfa --lfc -q
1040 1033 $ hg -R a-clone1 sum
1041 1034 parent: 1:ce8896473775
1042 1035 edit files
@@ -1122,7 +1115,7 redo pull with --lfrev and check it pull
1122 1115 6 changesets found
1123 1116 uncompressed size of bundle content:
1124 1117 1389 (changelog)
1125 1599 (manifests)
1118 1698 (manifests)
1126 1119 254 .hglf/large1
1127 1120 564 .hglf/large3
1128 1121 572 .hglf/sub/large4
@@ -1552,6 +1545,7 revert some files to an older revision
1552 1545 checking manifests
1553 1546 crosschecking files in changesets and manifests
1554 1547 checking files
1548 checking dirstate
1555 1549 checked 10 changesets with 28 changes to 10 files
1556 1550 searching 1 changesets for largefiles
1557 1551 verified existence of 3 revisions of 3 largefiles
@@ -1561,15 +1555,8 and make sure that this is caught:
1561 1555
1562 1556 $ mv $TESTTMP/d/.hg/largefiles/e166e74c7303192238d60af5a9c4ce9bef0b7928 .
1563 1557 $ rm .hg/largefiles/e166e74c7303192238d60af5a9c4ce9bef0b7928
1564 $ hg verify --large
1565 checking changesets
1566 checking manifests
1567 crosschecking files in changesets and manifests
1568 checking files
1569 checked 10 changesets with 28 changes to 10 files
1570 searching 1 changesets for largefiles
1558 $ hg verify --large -q
1571 1559 changeset 9:598410d3eb9a: sub/large4 references missing $TESTTMP/d/.hg/largefiles/e166e74c7303192238d60af5a9c4ce9bef0b7928
1572 verified existence of 3 revisions of 3 largefiles
1573 1560 [1]
1574 1561
1575 1562 - introduce corruption and make sure that it is caught when checking content:
@@ -345,6 +345,7 process.
345 345 checking manifests
346 346 crosschecking files in changesets and manifests
347 347 checking files
348 checking dirstate
348 349 checked 8 changesets with 13 changes to 9 files
349 350 searching 7 changesets for largefiles
350 351 changeset 0:d4892ec57ce2: large references missing $TESTTMP/largefiles-repo-hg/.hg/largefiles/2e000fa7e85759c7f4c254d4d9c33ef481e459a7
@@ -357,7 +357,7 Test a checksum failure during the proce
357 357 $LOCALIP - - [$ERRDATE$] HG error: super(badstore, self).download(oid, src, contentlength)
358 358 $LOCALIP - - [$ERRDATE$] HG error: raise LfsCorruptionError( (glob) (py38 !)
359 359 $LOCALIP - - [$ERRDATE$] HG error: _(b'corrupt remote lfs object: %s') % oid (glob) (no-py38 !)
360 $LOCALIP - - [$ERRDATE$] HG error: hgext.lfs.blobstore.LfsCorruptionError: corrupt remote lfs object: b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c (py3 !)
360 $LOCALIP - - [$ERRDATE$] HG error: hgext.lfs.blobstore.LfsCorruptionError: corrupt remote lfs object: b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c (glob)
361 361 $LOCALIP - - [$ERRDATE$] HG error: (glob)
362 362 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d': (glob)
363 363 Traceback (most recent call last):
@@ -388,7 +388,7 Test a checksum failure during the proce
388 388 $LOCALIP - - [$ERRDATE$] HG error: blobstore._verify(oid, b'dummy content') (glob)
389 389 $LOCALIP - - [$ERRDATE$] HG error: raise LfsCorruptionError( (glob) (py38 !)
390 390 $LOCALIP - - [$ERRDATE$] HG error: hint=_(b'run hg verify'), (glob) (no-py38 !)
391 $LOCALIP - - [$ERRDATE$] HG error: hgext.lfs.blobstore.LfsCorruptionError: detected corrupt lfs object: 276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d (py3 !)
391 $LOCALIP - - [$ERRDATE$] HG error: hgext.lfs.blobstore.LfsCorruptionError: detected corrupt lfs object: 276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d (glob)
392 392 $LOCALIP - - [$ERRDATE$] HG error: (glob)
393 393
394 394 Basic Authorization headers are returned by the Batch API, and sent back with
@@ -787,8 +787,9 Repo with damaged lfs objects in any rev
787 787 checking manifests
788 788 crosschecking files in changesets and manifests
789 789 checking files
790 l@1: unpacking 46a2f24864bc: integrity check failed on data/l:0
791 large@0: unpacking 2c531e0992ff: integrity check failed on data/large:0
790 l@1: unpacking 46a2f24864bc: integrity check failed on l:0
791 large@0: unpacking 2c531e0992ff: integrity check failed on large:0
792 not checking dirstate because of previous errors
792 793 checked 5 changesets with 10 changes to 4 files
793 794 2 integrity errors encountered!
794 795 (first damaged changeset appears to be 0)
@@ -851,6 +852,7 blob, and the output shows that it isn't
851 852 checking files
852 853 lfs: found 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b in the local lfs store
853 854 lfs blob sha256:66100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e renamed large -> l
855 checking dirstate
854 856 checked 5 changesets with 10 changes to 4 files
855 857
856 858 Verify will not try to download lfs blobs, if told not to by the config option
@@ -865,6 +867,7 Verify will not try to download lfs blob
865 867 checking files
866 868 lfs: found 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b in the local lfs store
867 869 lfs blob sha256:66100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e renamed large -> l
870 checking dirstate
868 871 checked 5 changesets with 10 changes to 4 files
869 872
870 873 Verify will copy/link all lfs objects into the local store that aren't already
@@ -885,6 +888,7 the (uncorrupted) remote store.
885 888 lfs: found 89b6070915a3d573ff3599d1cda305bc5e38549b15c4847ab034169da66e1ca8 in the local lfs store
886 889 lfs: adding b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c to the usercache
887 890 lfs: found b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c in the local lfs store
891 checking dirstate
888 892 checked 5 changesets with 10 changes to 4 files
889 893
890 894 Verify will not copy/link a corrupted file from the usercache into the local
@@ -897,11 +901,12 store, and poison it. (The verify with
897 901 checking manifests
898 902 crosschecking files in changesets and manifests
899 903 checking files
900 l@1: unpacking 46a2f24864bc: integrity check failed on data/l:0
904 l@1: unpacking 46a2f24864bc: integrity check failed on l:0
901 905 lfs: found 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b in the local lfs store
902 large@0: unpacking 2c531e0992ff: integrity check failed on data/large:0
906 large@0: unpacking 2c531e0992ff: integrity check failed on large:0
903 907 lfs: found 89b6070915a3d573ff3599d1cda305bc5e38549b15c4847ab034169da66e1ca8 in the local lfs store
904 908 lfs: found b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c in the local lfs store
909 not checking dirstate because of previous errors
905 910 checked 5 changesets with 10 changes to 4 files
906 911 2 integrity errors encountered!
907 912 (first damaged changeset appears to be 0)
@@ -917,6 +922,7 store, and poison it. (The verify with
917 922 lfs: found 66100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e in the local lfs store
918 923 lfs: found 89b6070915a3d573ff3599d1cda305bc5e38549b15c4847ab034169da66e1ca8 in the local lfs store
919 924 lfs: found b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c in the local lfs store
925 checking dirstate
920 926 checked 5 changesets with 10 changes to 4 files
921 927
922 928 Damaging a file required by the update destination fails the update.
@@ -941,8 +947,9 usercache or local store.
941 947 checking manifests
942 948 crosschecking files in changesets and manifests
943 949 checking files
944 l@1: unpacking 46a2f24864bc: integrity check failed on data/l:0
945 large@0: unpacking 2c531e0992ff: integrity check failed on data/large:0
950 l@1: unpacking 46a2f24864bc: integrity check failed on l:0
951 large@0: unpacking 2c531e0992ff: integrity check failed on large:0
952 not checking dirstate because of previous errors
946 953 checked 5 changesets with 10 changes to 4 files
947 954 2 integrity errors encountered!
948 955 (first damaged changeset appears to be 0)
@@ -967,11 +974,12 avoids the corrupt lfs object in the ori
967 974 checking manifests
968 975 crosschecking files in changesets and manifests
969 976 checking files
970 l@1: unpacking 46a2f24864bc: integrity check failed on data/l:0
977 l@1: unpacking 46a2f24864bc: integrity check failed on l:0
971 978 lfs: found 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b in the local lfs store
972 large@0: unpacking 2c531e0992ff: integrity check failed on data/large:0
979 large@0: unpacking 2c531e0992ff: integrity check failed on large:0
973 980 lfs: found 89b6070915a3d573ff3599d1cda305bc5e38549b15c4847ab034169da66e1ca8 in the local lfs store
974 981 lfs: found b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c in the local lfs store
982 not checking dirstate because of previous errors
975 983 checked 5 changesets with 10 changes to 4 files
976 984 2 integrity errors encountered!
977 985 (first damaged changeset appears to be 0)
@@ -987,7 +995,7 avoids the corrupt lfs object in the ori
987 995 Accessing a corrupt file will complain
988 996
989 997 $ hg --cwd fromcorrupt2 cat -r 0 large
990 abort: integrity check failed on data/large:0
998 abort: integrity check failed on large:0
991 999 [50]
992 1000
993 1001 lfs -> normal -> lfs round trip conversions are possible. The 'none()'
@@ -246,12 +246,7 Pure removes should actually remove all
246 246
247 247 $ hg up -qC .
248 248
249 $ hg verify
250 checking changesets
251 checking manifests
252 crosschecking files in changesets and manifests
253 checking files
254 checked 2 changesets with 8 changes to 8 files
249 $ hg verify -q
255 250
256 251 $ hg rollback -q --config ui.rollback=True
257 252 $ hg rm b.txt d.txt
@@ -270,12 +265,7 A mix of adds and removes should remove
270 265 ccc.txt\x00149da44f2a4e14f488b7bd4157945a9837408c00 (esc)
271 266 e.txt\x00149da44f2a4e14f488b7bd4157945a9837408c00 (esc)
272 267
273 $ hg verify
274 checking changesets
275 checking manifests
276 crosschecking files in changesets and manifests
277 checking files
278 checked 2 changesets with 9 changes to 9 files
268 $ hg verify -q
279 269 $ cd ..
280 270
281 271 Test manifest cache interraction with shares
@@ -140,3 +140,23 Merge using internal:union tool:
140 140 third line
141 141 line 4b
142 142 line 4a
143
144 Merge using internal:union-other-first tool:
145
146 $ hg update -C 4
147 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
148
149 $ echo "[merge-patterns]" > .hg/hgrc
150 $ echo "* = internal:union-other-first" >> .hg/hgrc
151
152 $ hg merge 3
153 merging f
154 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
155 (branch merge, don't forget to commit)
156
157 $ cat f
158 line 1
159 line 2
160 third line
161 line 4a
162 line 4b
@@ -101,4 +101,5 Checking that repository has all the req
101 101 checking directory manifests (tree !)
102 102 crosschecking files in changesets and manifests
103 103 checking files
104 checking dirstate
104 105 checked 40 changesets with 1 changes to 1 files
@@ -164,12 +164,7 Check that the resulting history is vali
164 164 remote: adding file changes
165 165 remote: added 4 changesets with 4 changes to 2 files
166 166 $ cd ../master
167 $ hg verify
168 checking changesets
169 checking manifests
170 crosschecking files in changesets and manifests
171 checking files
172 checked 8 changesets with 10 changes to 3 files
167 $ hg verify -q
173 168
174 169 Can not push to wider repo if change affects paths in wider repo that are
175 170 not also in narrower repo
@@ -218,8 +213,8 TODO: lfs shouldn't abort like this
218 213 remote: adding manifests
219 214 remote: adding file changes
220 215 remote: added 1 changesets with 0 changes to 0 files (no-lfs-on !)
221 remote: error: pretxnchangegroup.lfs hook raised an exception: data/inside2/f@f59b4e0218355383d2789196f1092abcf2262b0c: no match found (lfs-on !)
216 remote: error: pretxnchangegroup.lfs hook raised an exception: inside2/f@f59b4e0218355383d2789196f1092abcf2262b0c: no match found (lfs-on !)
222 217 remote: transaction abort! (lfs-on !)
223 218 remote: rollback completed (lfs-on !)
224 remote: abort: data/inside2/f@f59b4e0218355383d2789196f1092abcf2262b0c: no match found (lfs-on !)
219 remote: abort: inside2/f@f59b4e0218355383d2789196f1092abcf2262b0c: no match found (lfs-on !)
225 220 abort: stream ended unexpectedly (got 0 bytes, expected 4) (lfs-on !)
@@ -74,7 +74,7 have this method available in narrowhg p
74 74 > narrowspec.copytoworkingcopy(repo)
75 75 > newmatcher = narrowspec.match(repo.root, includes, excludes)
76 76 > added = matchmod.differencematcher(newmatcher, currentmatcher)
77 > with repo.dirstate.parentchange():
77 > with repo.dirstate.changing_parents(repo):
78 78 > for f in repo[b'.'].manifest().walk(added):
79 79 > repo.dirstate.update_file(
80 80 > f,
@@ -161,13 +161,7 We should also be able to unshare withou
161 161 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
162 162 $ cd share-unshare
163 163 $ hg unshare
164 $ hg verify
165 checking changesets
166 checking manifests
167 checking directory manifests (tree !)
168 crosschecking files in changesets and manifests
169 checking files
170 checked 11 changesets with 3 changes to 3 files
164 $ hg verify -q
171 165 $ cd ..
172 166
173 167 Dirstate should be left alone when upgrading from version of hg that didn't support narrow+share
@@ -274,13 +274,7 make narrow clone with every third node.
274 274 I path:d3
275 275 I path:d6
276 276 I path:d9
277 $ hg verify
278 checking changesets
279 checking manifests
280 checking directory manifests (tree !)
281 crosschecking files in changesets and manifests
282 checking files
283 checked 11 changesets with 4 changes to 4 files
277 $ hg verify -q
284 278 $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
285 279 10: add d10/f
286 280 9: add d9/f
@@ -321,13 +315,7 make narrow clone with every third node.
321 315
322 316 Verify shouldn't claim the repo is corrupt after a widen.
323 317
324 $ hg verify
325 checking changesets
326 checking manifests
327 checking directory manifests (tree !)
328 crosschecking files in changesets and manifests
329 checking files
330 checked 11 changesets with 5 changes to 5 files
318 $ hg verify -q
331 319
332 320 Widening preserves parent of local commit
333 321
@@ -280,13 +280,7 make narrow clone with every third node.
280 280 I path:d3
281 281 I path:d6
282 282 I path:d9
283 $ hg verify
284 checking changesets
285 checking manifests
286 checking directory manifests (tree !)
287 crosschecking files in changesets and manifests
288 checking files
289 checked 8 changesets with 4 changes to 4 files
283 $ hg verify -q
290 284 $ hg l
291 285 @ ...7: add d10/f
292 286 |
@@ -340,13 +334,7 make narrow clone with every third node.
340 334
341 335 Verify shouldn't claim the repo is corrupt after a widen.
342 336
343 $ hg verify
344 checking changesets
345 checking manifests
346 checking directory manifests (tree !)
347 crosschecking files in changesets and manifests
348 checking files
349 checked 9 changesets with 5 changes to 5 files
337 $ hg verify -q
350 338
351 339 Widening preserves parent of local commit
352 340
@@ -467,7 +467,7 non-ascii content and truncation of mult
467 467 Content-Transfer-Encoding: 8bit
468 468 X-Test: foo
469 469 Date: * (glob)
470 Subject: =?utf-8?b?w6AuLi4=?= (py3 !)
470 Subject: =?utf-8?b?w6AuLi4=?=
471 471 From: test@test.com
472 472 X-Hg-Notification: changeset 0f25f9c22b4c
473 473 Message-Id: <*> (glob)
@@ -47,12 +47,7 Push it. The bundle should not refer to
47 47 adding manifests
48 48 adding file changes
49 49 added 2 changesets with 2 changes to 2 files
50 $ hg -R ../other verify
51 checking changesets
52 checking manifests
53 crosschecking files in changesets and manifests
54 checking files
55 checked 2 changesets with 2 changes to 2 files
50 $ hg -R ../other verify -q
56 51
57 52 Adding a changeset going extinct locally
58 53 ------------------------------------------
@@ -513,7 +513,7 mime encoded mbox (base64):
513 513 X-Mercurial-Series-Id: <f81ef97829467e868fc4.240@test-hostname>
514 514 User-Agent: Mercurial-patchbomb/* (glob)
515 515 Date: Thu, 01 Jan 1970 00:04:00 +0000
516 From: =?iso-8859-1?q?Q?= <quux> (py3 !)
516 From: =?iso-8859-1?q?Q?= <quux>
517 517 To: foo
518 518 Cc: bar
519 519
@@ -2398,9 +2398,9 test multi-address parsing:
2398 2398 User-Agent: Mercurial-patchbomb/* (glob)
2399 2399 Date: Tue, 01 Jan 1980 00:01:00 +0000
2400 2400 From: quux
2401 To: =?iso-8859-1?q?spam?= <spam>, eggs, toast (py3 !)
2402 Cc: foo, bar@example.com, =?iso-8859-1?q?A=2C_B_=3C=3E?= <a@example.com> (py3 !)
2403 Bcc: =?iso-8859-1?q?Quux=2C_A=2E?= <quux> (py3 !)
2401 To: =?iso-8859-1?q?spam?= <spam>, eggs, toast
2402 Cc: foo, bar@example.com, =?iso-8859-1?q?A=2C_B_=3C=3E?= <a@example.com>
2403 Bcc: =?iso-8859-1?q?Quux=2C_A=2E?= <quux>
2404 2404
2405 2405 # HG changeset patch
2406 2406 # User test
@@ -2717,7 +2717,7 Test without revisions specified
2717 2717 MIME-Version: 1.0
2718 2718 Content-Type: text/plain; charset="iso-8859-1"
2719 2719 Content-Transfer-Encoding: quoted-printable
2720 Subject: =?utf-8?b?W1BBVENIIDIgb2YgNl0gw6dh?= (py3 !)
2720 Subject: =?utf-8?b?W1BBVENIIDIgb2YgNl0gw6dh?=
2721 2721 X-Mercurial-Node: f81ef97829467e868fc405fccbcfa66217e4d3e6
2722 2722 X-Mercurial-Series-Index: 2
2723 2723 X-Mercurial-Series-Total: 6
@@ -19,31 +19,17
19 19
20 20 $ hg commit -m "1"
21 21
22 $ hg verify
23 checking changesets
24 checking manifests
25 crosschecking files in changesets and manifests
26 checking files
27 checked 1 changesets with 1 changes to 1 files
22 $ hg verify -q
28 23
29 24 $ chmod -r .hg/store/data/a.i
30 25
31 $ hg verify
32 checking changesets
33 checking manifests
34 crosschecking files in changesets and manifests
35 checking files
26 $ hg verify -q
36 27 abort: Permission denied: '$TESTTMP/t/.hg/store/data/a.i'
37 28 [255]
38 29
39 30 $ chmod +r .hg/store/data/a.i
40 31
41 $ hg verify
42 checking changesets
43 checking manifests
44 crosschecking files in changesets and manifests
45 checking files
46 checked 1 changesets with 1 changes to 1 files
32 $ hg verify -q
47 33
48 34 $ chmod -w .hg/store/data/a.i
49 35
@@ -9,7 +9,7
9 9 > txnclose-phase.test = sh $TESTTMP/hook.sh
10 10 > EOF
11 11
12 $ hglog() { hg log --template "{rev} {phaseidx} {desc}\n" $*; }
12 $ hglog() { hg log -G --template "{rev} {phaseidx} {desc}\n" $*; }
13 13 $ mkcommit() {
14 14 > echo "$1" > "$1"
15 15 > hg add "$1"
@@ -36,7 +36,8 Cannot change null revision phase
36 36 New commit are draft by default
37 37
38 38 $ hglog
39 0 1 A
39 @ 0 1 A
40
40 41
41 42 Following commit are draft too
42 43
@@ -45,8 +46,10 Following commit are draft too
45 46 test-hook-close-phase: 27547f69f25460a52fff66ad004e58da7ad3fb56: -> draft
46 47
47 48 $ hglog
48 1 1 B
49 0 1 A
49 @ 1 1 B
50 |
51 o 0 1 A
52
50 53
51 54 Working directory phase is secret when its parent is secret.
52 55
@@ -103,8 +106,10 Draft commit are properly created over p
103 106 $ hg phase
104 107 1: public
105 108 $ hglog
106 1 0 B
107 0 0 A
109 @ 1 0 B
110 |
111 o 0 0 A
112
108 113
109 114 $ mkcommit C
110 115 test-debug-phase: new rev 2: x -> 1
@@ -114,10 +119,14 Draft commit are properly created over p
114 119 test-hook-close-phase: b3325c91a4d916bcc4cdc83ea3fe4ece46a42f6e: -> draft
115 120
116 121 $ hglog
117 3 1 D
118 2 1 C
119 1 0 B
120 0 0 A
122 @ 3 1 D
123 |
124 o 2 1 C
125 |
126 o 1 0 B
127 |
128 o 0 0 A
129
121 130
122 131 Test creating changeset as secret
123 132
@@ -125,11 +134,16 Test creating changeset as secret
125 134 test-debug-phase: new rev 4: x -> 2
126 135 test-hook-close-phase: a603bfb5a83e312131cebcd05353c217d4d21dde: -> secret
127 136 $ hglog
128 4 2 E
129 3 1 D
130 2 1 C
131 1 0 B
132 0 0 A
137 @ 4 2 E
138 |
139 o 3 1 D
140 |
141 o 2 1 C
142 |
143 o 1 0 B
144 |
145 o 0 0 A
146
133 147
134 148 Test the secret property is inherited
135 149
@@ -137,12 +151,18 Test the secret property is inherited
137 151 test-debug-phase: new rev 5: x -> 2
138 152 test-hook-close-phase: a030c6be5127abc010fcbff1851536552e6951a8: -> secret
139 153 $ hglog
140 5 2 H
141 4 2 E
142 3 1 D
143 2 1 C
144 1 0 B
145 0 0 A
154 @ 5 2 H
155 |
156 o 4 2 E
157 |
158 o 3 1 D
159 |
160 o 2 1 C
161 |
162 o 1 0 B
163 |
164 o 0 0 A
165
146 166
147 167 Even on merge
148 168
@@ -152,13 +172,20 Even on merge
152 172 created new head
153 173 test-hook-close-phase: cf9fe039dfd67e829edf6522a45de057b5c86519: -> draft
154 174 $ hglog
155 6 1 B'
156 5 2 H
157 4 2 E
158 3 1 D
159 2 1 C
160 1 0 B
161 0 0 A
175 @ 6 1 B'
176 |
177 | o 5 2 H
178 | |
179 | o 4 2 E
180 | |
181 | o 3 1 D
182 | |
183 | o 2 1 C
184 |/
185 o 1 0 B
186 |
187 o 0 0 A
188
162 189 $ hg merge 4 # E
163 190 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
164 191 (branch merge, don't forget to commit)
@@ -170,14 +197,22 Even on merge
170 197 test-hook-close-phase: 17a481b3bccb796c0521ae97903d81c52bfee4af: -> secret
171 198
172 199 $ hglog
173 7 2 merge B' and E
174 6 1 B'
175 5 2 H
176 4 2 E
177 3 1 D
178 2 1 C
179 1 0 B
180 0 0 A
200 @ 7 2 merge B' and E
201 |\
202 | o 6 1 B'
203 | |
204 +---o 5 2 H
205 | |
206 o | 4 2 E
207 | |
208 o | 3 1 D
209 | |
210 o | 2 1 C
211 |/
212 o 1 0 B
213 |
214 o 0 0 A
215
181 216
182 217 Test secret changeset are not pushed
183 218
@@ -221,21 +256,34 Test secret changeset are not pushed
221 256 test-hook-close-phase: b3325c91a4d916bcc4cdc83ea3fe4ece46a42f6e: -> draft
222 257 test-hook-close-phase: cf9fe039dfd67e829edf6522a45de057b5c86519: -> draft
223 258 $ hglog
224 7 2 merge B' and E
225 6 1 B'
226 5 2 H
227 4 2 E
228 3 1 D
229 2 1 C
230 1 0 B
231 0 0 A
259 @ 7 2 merge B' and E
260 |\
261 | o 6 1 B'
262 | |
263 +---o 5 2 H
264 | |
265 o | 4 2 E
266 | |
267 o | 3 1 D
268 | |
269 o | 2 1 C
270 |/
271 o 1 0 B
272 |
273 o 0 0 A
274
232 275 $ cd ../push-dest
233 276 $ hglog
234 4 1 B'
235 3 1 D
236 2 1 C
237 1 0 B
238 0 0 A
277 o 4 1 B'
278 |
279 | o 3 1 D
280 | |
281 | o 2 1 C
282 |/
283 o 1 0 B
284 |
285 o 0 0 A
286
239 287
240 288 (Issue3303)
241 289 Check that remote secret changeset are ignore when checking creation of remote heads
@@ -328,11 +376,16 Test secret changeset are not pull
328 376 test-hook-close-phase: cf9fe039dfd67e829edf6522a45de057b5c86519: -> public
329 377 (run 'hg heads' to see heads, 'hg merge' to merge)
330 378 $ hglog
331 4 0 B'
332 3 0 D
333 2 0 C
334 1 0 B
335 0 0 A
379 o 4 0 B'
380 |
381 | o 3 0 D
382 | |
383 | o 2 0 C
384 |/
385 o 1 0 B
386 |
387 o 0 0 A
388
336 389 $ cd ..
337 390
338 391 But secret can still be bundled explicitly
@@ -357,11 +410,16 Test secret changeset are not cloned
357 410 test-hook-close-phase: b3325c91a4d916bcc4cdc83ea3fe4ece46a42f6e: -> public
358 411 test-hook-close-phase: cf9fe039dfd67e829edf6522a45de057b5c86519: -> public
359 412 $ hglog -R clone-dest
360 4 0 B'
361 3 0 D
362 2 0 C
363 1 0 B
364 0 0 A
413 o 4 0 B'
414 |
415 | o 3 0 D
416 | |
417 | o 2 0 C
418 |/
419 o 1 0 B
420 |
421 o 0 0 A
422
365 423
366 424 Test summary
367 425
@@ -385,16 +443,28 Test revset
385 443
386 444 $ cd initialrepo
387 445 $ hglog -r 'public()'
388 0 0 A
389 1 0 B
446 o 1 0 B
447 |
448 o 0 0 A
449
390 450 $ hglog -r 'draft()'
391 2 1 C
392 3 1 D
393 6 1 B'
451 o 6 1 B'
452 |
453 ~
454 o 3 1 D
455 |
456 o 2 1 C
457 |
458 ~
394 459 $ hglog -r 'secret()'
395 4 2 E
396 5 2 H
397 7 2 merge B' and E
460 @ 7 2 merge B' and E
461 |\
462 | ~
463 | o 5 2 H
464 |/
465 o 4 2 E
466 |
467 ~
398 468
399 469 test that phase are displayed in log at debug level
400 470
@@ -730,12 +800,7 test verify repo containing hidden chang
730 800 because repo.cancopy() is False
731 801
732 802 $ cd ../initialrepo
733 $ hg verify
734 checking changesets
735 checking manifests
736 crosschecking files in changesets and manifests
737 checking files
738 checked 8 changesets with 7 changes to 7 files
803 $ hg verify -q
739 804
740 805 $ cd ..
741 806
@@ -753,8 +818,6 repositories visible to an external hook
753 818 $ hg phase 6
754 819 6: draft
755 820 $ hg --config hooks.pretxnclose="sh $TESTTMP/savepending.sh" phase -f -s 6
756 transaction abort!
757 rollback completed
758 821 abort: pretxnclose hook exited with status 1
759 822 [40]
760 823 $ cp .hg/store/phaseroots.pending.saved .hg/store/phaseroots.pending
@@ -776,8 +839,6 repositories visible to an external hook
776 839 7: secret
777 840 @push-dest
778 841 6: draft
779 transaction abort!
780 rollback completed
781 842 abort: pretxnclose hook exited with status 1
782 843 [40]
783 844
@@ -850,13 +911,9 Install a hook that prevent b3325c91a4d9
850 911 Try various actions. only the draft move should succeed
851 912
852 913 $ hg phase --public b3325c91a4d9
853 transaction abort!
854 rollback completed
855 914 abort: pretxnclose-phase.nopublish_D hook exited with status 1
856 915 [40]
857 916 $ hg phase --public a603bfb5a83e
858 transaction abort!
859 rollback completed
860 917 abort: pretxnclose-phase.nopublish_D hook exited with status 1
861 918 [40]
862 919 $ hg phase --draft 17a481b3bccb
@@ -867,8 +924,6 Try various actions. only the draft move
867 924 test-hook-close-phase: a603bfb5a83e312131cebcd05353c217d4d21dde: secret -> draft
868 925 test-hook-close-phase: 17a481b3bccb796c0521ae97903d81c52bfee4af: secret -> draft
869 926 $ hg phase --public 17a481b3bccb
870 transaction abort!
871 rollback completed
872 927 abort: pretxnclose-phase.nopublish_D hook exited with status 1
873 928 [40]
874 929
@@ -1047,3 +1102,30 But what about obsoleted changesets?
1047 1102 $ hg up tip
1048 1103 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
1049 1104 $ cd ..
1105
1106 Testing that command line flags override configuration
1107
1108 $ hg init commit-overrides
1109 $ cd commit-overrides
1110
1111 `hg commit --draft` overrides new-commit=secret
1112
1113 $ mkcommit A --config phases.new-commit='secret' --draft
1114 test-debug-phase: new rev 0: x -> 1
1115 test-hook-close-phase: 4a2df7238c3b48766b5e22fafbb8a2f506ec8256: -> draft
1116 $ hglog
1117 @ 0 1 A
1118
1119
1120 `hg commit --secret` overrides new-commit=draft
1121
1122 $ mkcommit B --config phases.new-commit='draft' --secret
1123 test-debug-phase: new rev 1: x -> 2
1124 test-hook-close-phase: 27547f69f25460a52fff66ad004e58da7ad3fb56: -> secret
1125 $ hglog
1126 @ 1 2 B
1127 |
1128 o 0 1 A
1129
1130
1131 $ cd ..
@@ -33,8 +33,6 Test pullbundle functionality
33 33
34 34 $ cd repo
35 35 $ cat <<EOF > .hg/hgrc
36 > [server]
37 > pullbundle = True
38 36 > [experimental]
39 37 > evolution = True
40 38 > [extensions]
@@ -8,12 +8,7
8 8 adding foo
9 9 $ hg commit -m 1
10 10
11 $ hg verify
12 checking changesets
13 checking manifests
14 crosschecking files in changesets and manifests
15 checking files
16 checked 1 changesets with 1 changes to 1 files
11 $ hg verify -q
17 12
18 13 $ hg serve -p $HGPORT -d --pid-file=hg.pid
19 14 $ cat hg.pid >> $DAEMON_PIDS
@@ -30,12 +25,7
30 25 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
31 26
32 27 $ cd copy
33 $ hg verify
34 checking changesets
35 checking manifests
36 crosschecking files in changesets and manifests
37 checking files
38 checked 1 changesets with 1 changes to 1 files
28 $ hg verify -q
39 29
40 30 $ hg co
41 31 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
@@ -23,11 +23,6
23 23 $ chmod +w a/.hg/store # let test clean up
24 24
25 25 $ cd b
26 $ hg verify
27 checking changesets
28 checking manifests
29 crosschecking files in changesets and manifests
30 checking files
31 checked 1 changesets with 1 changes to 1 files
26 $ hg verify -q
32 27
33 28 $ cd ..
@@ -65,11 +65,6 start a pull...
65 65 see the result
66 66
67 67 $ wait
68 $ hg verify
69 checking changesets
70 checking manifests
71 crosschecking files in changesets and manifests
72 checking files
73 checked 11 changesets with 11 changes to 1 files
68 $ hg verify -q
74 69
75 70 $ cd ..
@@ -18,7 +18,7 Testing of the '--rev' flag
18 18 > echo
19 19 > hg init test-revflag-"$i"
20 20 > hg -R test-revflag push -r "$i" test-revflag-"$i"
21 > hg -R test-revflag-"$i" verify
21 > hg -R test-revflag-"$i" verify -q
22 22 > done
23 23
24 24 pushing to test-revflag-0
@@ -27,11 +27,6 Testing of the '--rev' flag
27 27 adding manifests
28 28 adding file changes
29 29 added 1 changesets with 1 changes to 1 files
30 checking changesets
31 checking manifests
32 crosschecking files in changesets and manifests
33 checking files
34 checked 1 changesets with 1 changes to 1 files
35 30
36 31 pushing to test-revflag-1
37 32 searching for changes
@@ -39,11 +34,6 Testing of the '--rev' flag
39 34 adding manifests
40 35 adding file changes
41 36 added 2 changesets with 2 changes to 1 files
42 checking changesets
43 checking manifests
44 crosschecking files in changesets and manifests
45 checking files
46 checked 2 changesets with 2 changes to 1 files
47 37
48 38 pushing to test-revflag-2
49 39 searching for changes
@@ -51,11 +41,6 Testing of the '--rev' flag
51 41 adding manifests
52 42 adding file changes
53 43 added 3 changesets with 3 changes to 1 files
54 checking changesets
55 checking manifests
56 crosschecking files in changesets and manifests
57 checking files
58 checked 3 changesets with 3 changes to 1 files
59 44
60 45 pushing to test-revflag-3
61 46 searching for changes
@@ -63,11 +48,6 Testing of the '--rev' flag
63 48 adding manifests
64 49 adding file changes
65 50 added 4 changesets with 4 changes to 1 files
66 checking changesets
67 checking manifests
68 crosschecking files in changesets and manifests
69 checking files
70 checked 4 changesets with 4 changes to 1 files
71 51
72 52 pushing to test-revflag-4
73 53 searching for changes
@@ -75,11 +55,6 Testing of the '--rev' flag
75 55 adding manifests
76 56 adding file changes
77 57 added 2 changesets with 2 changes to 1 files
78 checking changesets
79 checking manifests
80 crosschecking files in changesets and manifests
81 checking files
82 checked 2 changesets with 2 changes to 1 files
83 58
84 59 pushing to test-revflag-5
85 60 searching for changes
@@ -87,11 +62,6 Testing of the '--rev' flag
87 62 adding manifests
88 63 adding file changes
89 64 added 3 changesets with 3 changes to 1 files
90 checking changesets
91 checking manifests
92 crosschecking files in changesets and manifests
93 checking files
94 checked 3 changesets with 3 changes to 1 files
95 65
96 66 pushing to test-revflag-6
97 67 searching for changes
@@ -99,11 +69,6 Testing of the '--rev' flag
99 69 adding manifests
100 70 adding file changes
101 71 added 4 changesets with 5 changes to 2 files
102 checking changesets
103 checking manifests
104 crosschecking files in changesets and manifests
105 checking files
106 checked 4 changesets with 5 changes to 2 files
107 72
108 73 pushing to test-revflag-7
109 74 searching for changes
@@ -111,11 +76,6 Testing of the '--rev' flag
111 76 adding manifests
112 77 adding file changes
113 78 added 5 changesets with 6 changes to 3 files
114 checking changesets
115 checking manifests
116 crosschecking files in changesets and manifests
117 checking files
118 checked 5 changesets with 6 changes to 3 files
119 79
120 80 pushing to test-revflag-8
121 81 searching for changes
@@ -123,11 +83,6 Testing of the '--rev' flag
123 83 adding manifests
124 84 adding file changes
125 85 added 5 changesets with 5 changes to 2 files
126 checking changesets
127 checking manifests
128 crosschecking files in changesets and manifests
129 checking files
130 checked 5 changesets with 5 changes to 2 files
131 86
132 87 $ cd test-revflag-8
133 88
@@ -141,12 +96,7 Testing of the '--rev' flag
141 96 new changesets c70afb1ee985:faa2e4234c7a
142 97 (run 'hg heads' to see heads, 'hg merge' to merge)
143 98
144 $ hg verify
145 checking changesets
146 checking manifests
147 crosschecking files in changesets and manifests
148 checking files
149 checked 9 changesets with 7 changes to 4 files
99 $ hg verify -q
150 100
151 101 $ cd ..
152 102
@@ -189,13 +139,9 Test spurious filelog entries:
189 139
190 140 Expected to fail:
191 141
192 $ hg verify
193 checking changesets
194 checking manifests
195 crosschecking files in changesets and manifests
196 checking files
142 $ hg verify -q
197 143 beta@1: dddc47b3ba30 not in manifests
198 checked 2 changesets with 4 changes to 2 files
144 not checking dirstate because of previous errors
199 145 1 integrity errors encountered!
200 146 (first damaged changeset appears to be 1)
201 147 [1]
@@ -224,13 +170,9 Test missing filelog entries:
224 170
225 171 Expected to fail:
226 172
227 $ hg verify
228 checking changesets
229 checking manifests
230 crosschecking files in changesets and manifests
231 checking files
173 $ hg verify -q
232 174 beta@1: manifest refers to unknown revision dddc47b3ba30
233 checked 2 changesets with 2 changes to 2 files
175 not checking dirstate because of previous errors
234 176 1 integrity errors encountered!
235 177 (first damaged changeset appears to be 1)
236 178 [1]
@@ -68,6 +68,7 help record (record)
68 68 --close-branch mark a branch head as closed
69 69 --amend amend the parent of the working directory
70 70 -s --secret use the secret phase for committing
71 --draft use the draft phase for committing
71 72 -e --edit invoke editor on commit messages
72 73 -I --include PATTERN [+] include names matching the given patterns
73 74 -X --exclude PATTERN [+] exclude names matching the given patterns
@@ -6,8 +6,12 Test situations that "should" only be re
6 6 - something (that doesn't respect the lock file) writing to the .hg directory
7 7 while we're running
8 8
9 $ hg init a
10 $ cd a
9
10 Initial setup
11 -------------
12
13 $ hg init base-repo
14 $ cd base-repo
11 15
12 16 $ cat > "$TESTTMP_FORWARD_SLASH/waitlock_editor.sh" <<EOF
13 17 > [ -n "\${WAITLOCK_ANNOUNCE:-}" ] && touch "\${WAITLOCK_ANNOUNCE}"
@@ -26,46 +30,63 this all starts, so let's make one.
26 30 $ echo r0 > r0
27 31 $ hg commit -qAm 'r0'
28 32
33 $ cd ..
34 $ cp -R base-repo main-client
35 $ cp -R base-repo racing-client
36
37 $ mkdir sync
38 $ EDITOR_STARTED="$TESTTMP_FORWARD_SLASH/sync/.editor_started"
39 $ MISCHIEF_MANAGED="$TESTTMP_FORWARD_SLASH/sync/.mischief_managed"
40 $ JOBS_FINISHED="$TESTTMP_FORWARD_SLASH/sync/.jobs_finished"
41
42 Actual test
43 -----------
44
29 45 Start an hg commit that will take a while
30 $ EDITOR_STARTED="$TESTTMP_FORWARD_SLASH/a/.editor_started"
31 $ MISCHIEF_MANAGED="$TESTTMP_FORWARD_SLASH/a/.mischief_managed"
32 $ JOBS_FINISHED="$TESTTMP_FORWARD_SLASH/a/.jobs_finished"
46
47 $ cd main-client
33 48
34 49 #if fail-if-detected
35 $ cat >> .hg/hgrc << EOF
50 $ cat >> $HGRCPATH << EOF
36 51 > [debug]
37 52 > revlog.verifyposition.changelog = fail
38 53 > EOF
39 54 #endif
40 55
41 $ cat >> .hg/hgrc << EOF
42 > [ui]
43 > editor=sh $TESTTMP_FORWARD_SLASH/waitlock_editor.sh
44 > EOF
45
46 56 $ echo foo > foo
47 $ (unset HGEDITOR;
48 > WAITLOCK_ANNOUNCE="${EDITOR_STARTED}" \
49 > WAITLOCK_FILE="${MISCHIEF_MANAGED}" \
50 > hg commit -qAm 'r1 (foo)' --edit foo > .foo_commit_out 2>&1 ; touch "${JOBS_FINISHED}") &
57 $ (
58 > unset HGEDITOR;
59 > WAITLOCK_ANNOUNCE="${EDITOR_STARTED}" \
60 > WAITLOCK_FILE="${MISCHIEF_MANAGED}" \
61 > hg commit -qAm 'r1 (foo)' --edit foo \
62 > --config ui.editor="sh $TESTTMP_FORWARD_SLASH/waitlock_editor.sh" \
63 > > .foo_commit_out 2>&1 ;\
64 > touch "${JOBS_FINISHED}"
65 > ) &
51 66
52 67 Wait for the "editor" to actually start
53 68 $ sh "$RUNTESTDIR_FORWARD_SLASH/testlib/wait-on-file" 5 "${EDITOR_STARTED}"
54 69
55 $ cat >> .hg/hgrc << EOF
56 > [ui]
57 > editor=
58 > EOF
59 70
60 Break the locks, and make another commit.
61 $ hg debuglocks -LW
71 Do a concurrent edition
72 $ cd ../racing-client
73 $ touch ../pre-race
74 $ sleep 1
62 75 $ echo bar > bar
63 $ hg commit -qAm 'r2 (bar)' bar
64 $ hg debugrevlogindex -c
76 $ hg --repository ../racing-client commit -qAm 'r2 (bar)' bar
77 $ hg --repository ../racing-client debugrevlogindex -c
65 78 rev linkrev nodeid p1 p2
66 79 0 0 222799e2f90b 000000000000 000000000000
67 80 1 1 6f124f6007a0 222799e2f90b 000000000000
68 81
82 We simulate an network FS race by overwriting raced repo content with the new
83 content of the files changed in the racing repository
84
85 $ for x in `find . -type f -newer ../pre-race`; do
86 > cp $x ../main-client/$x
87 > done
88 $ cd ../main-client
89
69 90 Awaken the editor from that first commit
70 91 $ touch "${MISCHIEF_MANAGED}"
71 92 And wait for it to finish
@@ -85,10 +106,10 happen for the changelog (the linkrev sh
85 106
86 107 #if fail-if-detected
87 108 $ cat .foo_commit_out
109 note: commit message saved in .hg/last-message.txt
110 note: use 'hg commit --logfile .hg/last-message.txt --edit' to reuse it
88 111 transaction abort!
89 112 rollback completed
90 note: commit message saved in .hg/last-message.txt
91 note: use 'hg commit --logfile .hg/last-message.txt --edit' to reuse it
92 113 abort: 00changelog.i: file cursor at position 249, expected 121
93 114 And no corruption in the changelog.
94 115 $ hg debugrevlogindex -c
@@ -393,7 +393,6 New operations are blocked with the corr
393 393 .hg/merge/state
394 394 .hg/rebasestate
395 395 .hg/undo.backup.dirstate
396 .hg/undo.dirstate
397 396 .hg/updatestate
398 397
399 398 $ hg rebase -s 3 -d tip
@@ -315,7 +315,7 Check that the right ancestors is used w
315 315 adding manifests
316 316 adding file changes
317 317 adding f1.txt revisions
318 bundle2-input-part: total payload size 1686
318 bundle2-input-part: total payload size 1739
319 319 bundle2-input-part: "cache:rev-branch-cache" (advisory) supported
320 320 bundle2-input-part: total payload size 74
321 321 bundle2-input-part: "phase-heads" supported
@@ -168,8 +168,6 rebase can then be continued
168 168 rebasing 1:112478962961 B "B"
169 169 rebasing 3:26805aba1e60 C "C"
170 170 rebasing 5:f585351a92f8 D tip "D"
171 transaction abort!
172 rollback completed
173 171 abort: edit failed: false exited with status 1
174 172 [250]
175 173 $ hg tglog
@@ -17,7 +17,7
17 17 > try:
18 18 > for file in pats:
19 19 > if opts.get('normal_lookup'):
20 > with repo.dirstate.parentchange():
20 > with repo.dirstate.changing_parents(repo):
21 21 > repo.dirstate.update_file(
22 22 > file,
23 23 > p1_tracked=True,
@@ -51,6 +51,7 Record help
51 51 --close-branch mark a branch head as closed
52 52 --amend amend the parent of the working directory
53 53 -s --secret use the secret phase for committing
54 --draft use the draft phase for committing
54 55 -e --edit invoke editor on commit messages
55 56 -I --include PATTERN [+] include names matching the given patterns
56 57 -X --exclude PATTERN [+] exclude names matching the given patterns
@@ -38,7 +38,7 Verify corrupt cache error message
38 38 $ chmod u+w $CACHEDIR/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0
39 39 $ echo x > $CACHEDIR/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0
40 40 $ hg up tip 2>&1 | egrep "^[^ ].*unexpected remotefilelog"
41 hgext.remotefilelog.shallowutil.BadRemotefilelogHeader: unexpected remotefilelog header: illegal format (py3 !)
41 abort: unexpected remotefilelog header: illegal format
42 42
43 43 Verify detection and remediation when remotefilelog.validatecachelog is set
44 44
@@ -66,6 +66,7
66 66 (expected 1)
67 67 b@?: 736c29771fba not in manifests
68 68 warning: orphan data file 'data/c.i'
69 not checking dirstate because of previous errors
69 70 checked 2 changesets with 3 changes to 2 files
70 71 2 warnings encountered!
71 72 2 integrity errors encountered!
@@ -79,6 +80,7
79 80 checking manifests
80 81 crosschecking files in changesets and manifests
81 82 checking files
83 checking dirstate
82 84 checked 2 changesets with 2 changes to 2 files
83 85 $ teststrip 0 2 r .hg/store/data/b.i
84 86 % before update 0, strip 2
@@ -93,6 +95,7
93 95 checking manifests
94 96 crosschecking files in changesets and manifests
95 97 checking files
98 checking dirstate
96 99 checked 4 changesets with 4 changes to 3 files
97 100 % journal contents
98 101 (no journal)
@@ -124,6 +127,7
124 127 b@?: rev 1 points to nonexistent changeset 2
125 128 (expected 1)
126 129 c@?: rev 0 points to nonexistent changeset 3
130 not checking dirstate because of previous errors
127 131 checked 2 changesets with 4 changes to 3 files
128 132 1 warnings encountered!
129 133 7 integrity errors encountered!
@@ -138,6 +142,7
138 142 checking manifests
139 143 crosschecking files in changesets and manifests
140 144 checking files
145 checking dirstate
141 146 checked 2 changesets with 2 changes to 2 files
142 147
143 148 $ cd ..
@@ -19,8 +19,10 def addcommit(name, time):
19 19 f = open(name, 'wb')
20 20 f.write(b'%s\n' % name)
21 21 f.close()
22 repo[None].add([name])
23 commit(name, time)
22 with repo.wlock():
23 with repo.dirstate.changing_files(repo):
24 repo[None].add([name])
25 commit(name, time)
24 26
25 27
26 28 def update(rev):
@@ -1,7 +1,6
1 1 # test revlog interaction about raw data (flagprocessor)
2 2
3 3
4 import collections
5 4 import hashlib
6 5 import sys
7 6
@@ -54,10 +53,6 tvfs.options = {
54 53 b'sparse-revlog': True,
55 54 }
56 55
57 # The test wants to control whether to use delta explicitly, based on
58 # "storedeltachains".
59 revlog.revlog._isgooddeltainfo = lambda self, d, textlen: self._storedeltachains
60
61 56
62 57 def abort(msg):
63 58 print('abort: %s' % msg)
@@ -471,21 +466,21 def issnapshottest(rlog):
471 466 print(' got: %s' % result)
472 467
473 468
474 snapshotmapall = {0: [6, 8, 11, 17, 19, 25], 8: [21], -1: [0, 30]}
475 snapshotmap15 = {0: [17, 19, 25], 8: [21], -1: [30]}
469 snapshotmapall = {0: {6, 8, 11, 17, 19, 25}, 8: {21}, -1: {0, 30}}
470 snapshotmap15 = {0: {17, 19, 25}, 8: {21}, -1: {30}}
476 471
477 472
478 473 def findsnapshottest(rlog):
479 resultall = collections.defaultdict(list)
480 deltas._findsnapshots(rlog, resultall, 0)
481 resultall = dict(resultall.items())
474 cache = deltas.SnapshotCache()
475 cache.update(rlog)
476 resultall = dict(cache.snapshots)
482 477 if resultall != snapshotmapall:
483 478 print('snapshot map differ:')
484 479 print(' expected: %s' % snapshotmapall)
485 480 print(' got: %s' % resultall)
486 result15 = collections.defaultdict(list)
487 deltas._findsnapshots(rlog, result15, 15)
488 result15 = dict(result15.items())
481 cache15 = deltas.SnapshotCache()
482 cache15.update(rlog, 15)
483 result15 = dict(cache15.snapshots)
489 484 if result15 != snapshotmap15:
490 485 print('snapshot map differ:')
491 486 print(' expected: %s' % snapshotmap15)
@@ -117,16 +117,6 The two repository should be identical,
117 117 hg verify should be happy
118 118 -------------------------
119 119
120 $ hg verify
121 checking changesets
122 checking manifests
123 crosschecking files in changesets and manifests
124 checking files
125 checked 1 changesets with 1 changes to 1 files
120 $ hg verify -q
126 121
127 $ hg verify -R ../cloned-repo
128 checking changesets
129 checking manifests
130 crosschecking files in changesets and manifests
131 checking files
132 checked 1 changesets with 1 changes to 1 files
122 $ hg verify -R ../cloned-repo -q
@@ -75,16 +75,25 TODO: bad error message
75 75 $ "$real_hg" cat -r "$tip" hide
76 76 [1]
77 77
78 A naive implementation of [rhg files] leaks the paths that are supposed to be
79 hidden by narrow, so we just fall back to hg.
78 A naive implementation of `rhg files` would leak the paths that are supposed
79 to be hidden by narrow.
80 80
81 81 $ $NO_FALLBACK rhg files -r "$tip"
82 unsupported feature: rhg files -r <rev> is not supported in narrow clones
83 [252]
82 dir1/x
83 dir1/y
84 84 $ "$real_hg" files -r "$tip"
85 85 dir1/x
86 86 dir1/y
87 87
88 The working copy version works with narrow correctly
89
90 $ $NO_FALLBACK rhg files
91 dir1/x
92 dir1/y
93 $ "$real_hg" files
94 dir1/x
95 dir1/y
96
88 97 Hg status needs to do some filtering based on narrow spec
89 98
90 99 $ mkdir dir2
@@ -96,12 +105,7 Adding "orphaned" index files:
96 105
97 106 $ (cd ..; cp repo-sparse/.hg/store/data/hide.i repo-narrow/.hg/store/data/hide.i)
98 107 $ (cd ..; mkdir repo-narrow/.hg/store/data/dir2; cp repo-sparse/.hg/store/data/dir2/z.i repo-narrow/.hg/store/data/dir2/z.i)
99 $ "$real_hg" verify
100 checking changesets
101 checking manifests
102 crosschecking files in changesets and manifests
103 checking files
104 checked 1 changesets with 2 changes to 2 files
108 $ "$real_hg" verify -q
105 109
106 110 $ "$real_hg" files -r "$tip"
107 111 dir1/x
@@ -4,12 +4,11
4 4
5 5 Unimplemented command
6 6 $ $NO_FALLBACK rhg unimplemented-command
7 unsupported feature: error: Found argument 'unimplemented-command' which wasn't expected, or isn't valid in this context
7 unsupported feature: error: The subcommand 'unimplemented-command' wasn't recognized
8 8
9 USAGE:
10 rhg [OPTIONS] <SUBCOMMAND>
9 Usage: rhg [OPTIONS] <COMMAND>
11 10
12 For more information try --help
11 For more information try '--help'
13 12
14 13 [252]
15 14 $ rhg unimplemented-command --config rhg.on-unsupported=abort-silent
@@ -159,10 +158,11 Fallback to Python
159 158 $ $NO_FALLBACK rhg cat original --exclude="*.rs"
160 159 unsupported feature: error: Found argument '--exclude' which wasn't expected, or isn't valid in this context
161 160
162 USAGE:
163 rhg cat [OPTIONS] <FILE>...
161 If you tried to supply '--exclude' as a value rather than a flag, use '-- --exclude'
164 162
165 For more information try --help
163 Usage: rhg cat <FILE>...
164
165 For more information try '--help'
166 166
167 167 [252]
168 168 $ rhg cat original --exclude="*.rs"
@@ -190,10 +190,11 Check that `fallback-immediately` overri
190 190 Blocking recursive fallback. The 'rhg.fallback-executable = rhg' config points to `rhg` itself.
191 191 unsupported feature: error: Found argument '--exclude' which wasn't expected, or isn't valid in this context
192 192
193 USAGE:
194 rhg cat [OPTIONS] <FILE>...
193 If you tried to supply '--exclude' as a value rather than a flag, use '-- --exclude'
195 194
196 For more information try --help
195 Usage: rhg cat <FILE>...
196
197 For more information try '--help'
197 198
198 199 [252]
199 200
@@ -2,14 +2,9 setup repo
2 2 $ hg init t
3 3 $ cd t
4 4 $ echo a > a
5 $ hg commit -Am'add a'
6 adding a
7 $ hg verify
8 checking changesets
9 checking manifests
10 crosschecking files in changesets and manifests
11 checking files
12 checked 1 changesets with 1 changes to 1 files
5 $ hg add a
6 $ hg commit -m 'add a'
7 $ hg verify -q
13 8 $ hg parents
14 9 changeset: 0:1f0dee641bb7
15 10 tag: tip
@@ -23,12 +18,7 rollback to null revision
23 18 $ hg rollback
24 19 repository tip rolled back to revision -1 (undo commit)
25 20 working directory now based on revision -1
26 $ hg verify
27 checking changesets
28 checking manifests
29 crosschecking files in changesets and manifests
30 checking files
31 checked 0 changesets with 0 changes to 0 files
21 $ hg verify -q
32 22 $ hg parents
33 23 $ hg status
34 24 A a
@@ -75,21 +65,45 working dir unaffected by rollback: do n
75 65 $ hg commit -m'modify a again'
76 66 $ echo b > b
77 67 $ hg bookmark bar -r default #making bar active, before the transaction
78 $ hg commit -Am'add b'
79 adding b
80 $ hg log --template '{rev} {branch} {desc|firstline}\n'
81 2 test add b
82 1 test modify a again
83 0 default add a again
68 $ hg log -G --template '{rev} [{branch}] ({bookmarks}) {desc|firstline}\n'
69 @ 1 [test] (foo) modify a again
70 |
71 o 0 [default] (bar) add a again
72
73 $ hg add b
74 $ hg commit -m'add b'
75 $ hg log -G --template '{rev} [{branch}] ({bookmarks}) {desc|firstline}\n'
76 @ 2 [test] (foo) add b
77 |
78 o 1 [test] () modify a again
79 |
80 o 0 [default] (bar) add a again
81
84 82 $ hg update bar
85 83 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
86 84 (activating bookmark bar)
87 85 $ cat .hg/undo.branch ; echo
88 86 test
87 $ hg log -G --template '{rev} [{branch}] ({bookmarks}) {desc|firstline}\n'
88 o 2 [test] (foo) add b
89 |
90 o 1 [test] () modify a again
91 |
92 @ 0 [default] (bar) add a again
93
94 $ hg rollback
95 abort: rollback of last commit while not checked out may lose data
96 (use -f to force)
97 [255]
89 98 $ hg rollback -f
90 99 repository tip rolled back to revision 1 (undo commit)
91 100 $ hg id -n
92 101 0
102 $ hg log -G --template '{rev} [{branch}] ({bookmarks}) {desc|firstline}\n'
103 o 1 [test] (foo) modify a again
104 |
105 @ 0 [default] (bar) add a again
106
93 107 $ hg branch
94 108 default
95 109 $ cat .hg/bookmarks.current ; echo
@@ -186,19 +200,14 same again, but emulate an old client th
186 200 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
187 201 $ hg rollback
188 202 rolling back unknown transaction
203 working directory now based on revision 0
189 204 $ cat a
190 205 a
191 206
192 207 corrupt journal test
193 208 $ echo "foo" > .hg/store/journal
194 $ hg recover --verify
195 rolling back interrupted transaction
209 $ hg recover --verify -q
196 210 couldn't read journal entry 'foo\n'!
197 checking changesets
198 checking manifests
199 crosschecking files in changesets and manifests
200 checking files
201 checked 2 changesets with 2 changes to 1 files
202 211
203 212 rollback disabled by config
204 213 $ cat >> $HGRCPATH <<EOF
@@ -433,12 +442,7 An I/O error writing "rollback completed
433 442 abort: pretxncommit hook exited with status 1
434 443 [40]
435 444
436 $ hg verify
437 checking changesets
438 checking manifests
439 crosschecking files in changesets and manifests
440 checking files
441 checked 1 changesets with 1 changes to 1 files
445 $ hg verify -q
442 446
443 447 $ cd ..
444 448
@@ -458,11 +462,6 of a transaction.
458 462
459 463 $ hg --config ui.ioerrors=pretxncommit,pretxnclose,txnclose,txnabort,msgabort,msgrollback commit -m 'multiple errors'
460 464
461 $ hg verify
462 checking changesets
463 checking manifests
464 crosschecking files in changesets and manifests
465 checking files
466 checked 2 changesets with 2 changes to 1 files
465 $ hg verify -q
467 466
468 467 $ cd ..
@@ -2086,4 +2086,4 Test that a proper "python" has been set
2086 2086 $ ./test-py3.py
2087 2087 3.* (glob)
2088 2088 $ ./test-py.py
2089 3.* (glob) (py3 !)
2089 3.* (glob)
@@ -99,8 +99,6 Therefore, this test scenario ignores ch
99 99 bm2 2:c2e0ac586386 (svfs !)
100 100 * bm3 2:c2e0ac586386
101 101 bmX 2:c2e0ac586386 (vfs !)
102 transaction abort!
103 rollback completed
104 102 abort: pretxnclose hook exited with status 1
105 103 [40]
106 104 $ hg book bm1
@@ -124,8 +122,6 src), because (1) HG_PENDING refers only
124 122 bm2 2:c2e0ac586386 (svfs !)
125 123 bm3 2:c2e0ac586386
126 124 * bmX 2:c2e0ac586386
127 transaction abort!
128 rollback completed
129 125 abort: pretxnclose hook exited with status 1
130 126 [40]
131 127 $ hg book bm3
@@ -1600,6 +1600,7 shelve --list --patch should work even w
1600 1600 $ rm -r .hg/shelve*
1601 1601
1602 1602 #if phasebased
1603 $ cp $HGRCPATH $TESTTMP/hgrc-saved
1603 1604 $ cat <<EOF >> $HGRCPATH
1604 1605 > [shelve]
1605 1606 > store = strip
@@ -1628,3 +1629,32 Override the disabling, re-enabling phas
1628 1629 #if stripbased
1629 1630 $ hg log --hidden --template '{user}\n'
1630 1631 #endif
1632
1633 clean up
1634
1635 #if phasebased
1636 $ mv $TESTTMP/hgrc-saved $HGRCPATH
1637 #endif
1638
1639 changed files should be reachable in all shelves
1640
1641 create an extension that emits changed files
1642
1643 $ cat > shelve-changed-files.py << EOF
1644 > """Command to emit changed files for a shelf"""
1645 >
1646 > from mercurial import registrar, shelve
1647 >
1648 > cmdtable = {}
1649 > command = registrar.command(cmdtable)
1650 >
1651 >
1652 > @command(b'shelve-changed-files')
1653 > def shelve_changed_files(ui, repo, name):
1654 > shelf = shelve.ShelfDir(repo).get(name)
1655 > for file in shelf.changed_files(ui, repo):
1656 > ui.write(file + b'\n')
1657 > EOF
1658
1659 $ hg --config extensions.shelve-changed-files=shelve-changed-files.py shelve-changed-files default
1660 somefile.py
@@ -5,12 +5,7
5 5 adding foo
6 6 $ hg commit -m "1"
7 7
8 $ hg verify
9 checking changesets
10 checking manifests
11 crosschecking files in changesets and manifests
12 checking files
13 checked 1 changesets with 1 changes to 1 files
8 $ hg verify -q
14 9
15 10 $ hg clone . ../branch
16 11 updating to branch default
@@ -34,12 +29,7
34 29 1 local changesets published
35 30 (run 'hg update' to get a working copy)
36 31
37 $ hg verify
38 checking changesets
39 checking manifests
40 crosschecking files in changesets and manifests
41 checking files
42 checked 2 changesets with 2 changes to 1 files
32 $ hg verify -q
43 33
44 34 $ hg co
45 35 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
@@ -105,11 +105,11 repeatedly while some of it changes rare
105 105 delta : 0 (100.00%)
106 106 snapshot : 383 ( 7.66%)
107 107 lvl-0 : 3 ( 0.06%)
108 lvl-1 : 18 ( 0.36%)
109 lvl-2 : 62 ( 1.24%)
110 lvl-3 : 108 ( 2.16%)
111 lvl-4 : 191 ( 3.82%)
112 lvl-5 : 1 ( 0.02%)
108 lvl-1 : 18 ( 0.36%) non-ancestor-bases: 9 (50.00%)
109 lvl-2 : 62 ( 1.24%) non-ancestor-bases: 58 (93.55%)
110 lvl-3 : 108 ( 2.16%) non-ancestor-bases: 108 (100.00%)
111 lvl-4 : 191 ( 3.82%) non-ancestor-bases: 180 (94.24%)
112 lvl-5 : 1 ( 0.02%) non-ancestor-bases: 1 (100.00%)
113 113 deltas : 4618 (92.34%)
114 114 revision size : 58616973
115 115 snapshot : 9247844 (15.78%)
@@ -126,6 +126,9 repeatedly while some of it changes rare
126 126 chunks size : 58616973
127 127 0x28 : 58616973 (100.00%)
128 128
129
130 total-stored-content: 1 732 705 361 bytes
131
129 132 avg chain length : 9
130 133 max chain length : 15
131 134 max chain reach : 27366701
@@ -144,9 +147,11 repeatedly while some of it changes rare
144 147 deltas against prev : 3906 (84.58%)
145 148 where prev = p1 : 3906 (100.00%)
146 149 where prev = p2 : 0 ( 0.00%)
147 other : 0 ( 0.00%)
150 other-ancestor : 0 ( 0.00%)
151 unrelated : 0 ( 0.00%)
148 152 deltas against p1 : 649 (14.05%)
149 153 deltas against p2 : 63 ( 1.36%)
154 deltas against ancs : 0 ( 0.00%)
150 155 deltas against other : 0 ( 0.00%)
151 156
152 157
@@ -159,7 +164,7 Test `debug-delta-find`
159 164 4971 4970 -1 3 5 4930 snap 19179 346472 427596 1.23414 15994877 15567281 36.40652 427596 179288 1.00000 5
160 165 $ hg debug-delta-find SPARSE-REVLOG-TEST-FILE 4971
161 166 DBG-DELTAS-SEARCH: SEARCH rev=4971
162 DBG-DELTAS-SEARCH: ROUND #1 - 2 candidates - search-down
167 DBG-DELTAS-SEARCH: ROUND #1 - 1 candidates - search-down
163 168 DBG-DELTAS-SEARCH: CANDIDATE: rev=4962
164 169 DBG-DELTAS-SEARCH: type=snapshot-4
165 170 DBG-DELTAS-SEARCH: size=18296
@@ -167,11 +172,43 Test `debug-delta-find`
167 172 DBG-DELTAS-SEARCH: uncompressed-delta-size=30377
168 173 DBG-DELTAS-SEARCH: delta-search-time=* (glob)
169 174 DBG-DELTAS-SEARCH: DELTA: length=16872 (BAD)
170 DBG-DELTAS-SEARCH: CANDIDATE: rev=4971
175 DBG-DELTAS-SEARCH: ROUND #2 - 1 candidates - search-down
176 DBG-DELTAS-SEARCH: CANDIDATE: rev=4930
177 DBG-DELTAS-SEARCH: type=snapshot-3
178 DBG-DELTAS-SEARCH: size=39228
179 DBG-DELTAS-SEARCH: base=4799
180 DBG-DELTAS-SEARCH: uncompressed-delta-size=33050
181 DBG-DELTAS-SEARCH: delta-search-time=* (glob)
182 DBG-DELTAS-SEARCH: DELTA: length=19179 (GOOD)
183 DBG-DELTAS-SEARCH: ROUND #3 - 1 candidates - refine-down
184 DBG-DELTAS-SEARCH: CONTENDER: rev=4930 - length=19179
185 DBG-DELTAS-SEARCH: CANDIDATE: rev=4799
186 DBG-DELTAS-SEARCH: type=snapshot-2
187 DBG-DELTAS-SEARCH: size=50213
188 DBG-DELTAS-SEARCH: base=4623
189 DBG-DELTAS-SEARCH: uncompressed-delta-size=82661
190 DBG-DELTAS-SEARCH: delta-search-time=* (glob)
191 DBG-DELTAS-SEARCH: DELTA: length=49132 (BAD)
192 DBG-DELTAS: FILELOG:SPARSE-REVLOG-TEST-FILE: rev=4971: delta-base=4930 is-cached=0 - search-rounds=3 try-count=3 - delta-type=snapshot snap-depth=4 - p1-chain-length=15 p2-chain-length=-1 - duration=* (glob)
193
194 $ cat << EOF >>.hg/hgrc
195 > [storage]
196 > revlog.optimize-delta-parent-choice = no
197 > revlog.reuse-external-delta = yes
198 > EOF
199
200 $ hg debug-delta-find SPARSE-REVLOG-TEST-FILE 4971 --quiet
201 DBG-DELTAS: FILELOG:SPARSE-REVLOG-TEST-FILE: rev=4971: delta-base=4930 is-cached=0 - search-rounds=3 try-count=3 - delta-type=snapshot snap-depth=4 - p1-chain-length=15 p2-chain-length=-1 - duration=* (glob)
202 $ hg debug-delta-find SPARSE-REVLOG-TEST-FILE 4971 --source full
203 DBG-DELTAS-SEARCH: SEARCH rev=4971
204 DBG-DELTAS-SEARCH: ROUND #1 - 1 candidates - search-down
205 DBG-DELTAS-SEARCH: CANDIDATE: rev=4962
171 206 DBG-DELTAS-SEARCH: type=snapshot-4
172 DBG-DELTAS-SEARCH: size=19179
207 DBG-DELTAS-SEARCH: size=18296
173 208 DBG-DELTAS-SEARCH: base=4930
174 DBG-DELTAS-SEARCH: TOO-HIGH
209 DBG-DELTAS-SEARCH: uncompressed-delta-size=30377
210 DBG-DELTAS-SEARCH: delta-search-time=* (glob)
211 DBG-DELTAS-SEARCH: DELTA: length=16872 (BAD)
175 212 DBG-DELTAS-SEARCH: ROUND #2 - 1 candidates - search-down
176 213 DBG-DELTAS-SEARCH: CANDIDATE: rev=4930
177 214 DBG-DELTAS-SEARCH: type=snapshot-3
@@ -189,6 +226,101 Test `debug-delta-find`
189 226 DBG-DELTAS-SEARCH: uncompressed-delta-size=82661
190 227 DBG-DELTAS-SEARCH: delta-search-time=* (glob)
191 228 DBG-DELTAS-SEARCH: DELTA: length=49132 (BAD)
192 DBG-DELTAS: FILELOG:SPARSE-REVLOG-TEST-FILE: rev=4971: search-rounds=3 try-count=3 - delta-type=snapshot snap-depth=4 - p1-chain-length=15 p2-chain-length=-1 - duration=* (glob)
229 DBG-DELTAS: FILELOG:SPARSE-REVLOG-TEST-FILE: rev=4971: delta-base=4930 is-cached=0 - search-rounds=3 try-count=3 - delta-type=snapshot snap-depth=4 - p1-chain-length=15 p2-chain-length=-1 - duration=* (glob)
230 $ hg debug-delta-find SPARSE-REVLOG-TEST-FILE 4971 --source storage
231 DBG-DELTAS-SEARCH: SEARCH rev=4971
232 DBG-DELTAS-SEARCH: ROUND #1 - 1 candidates - cached-delta
233 DBG-DELTAS-SEARCH: CANDIDATE: rev=4930
234 DBG-DELTAS-SEARCH: type=snapshot-3
235 DBG-DELTAS-SEARCH: size=39228
236 DBG-DELTAS-SEARCH: base=4799
237 DBG-DELTAS-SEARCH: uncompressed-delta-size=33050
238 DBG-DELTAS-SEARCH: delta-search-time=* (glob)
239 DBG-DELTAS-SEARCH: DELTA: length=19179 (GOOD)
240 DBG-DELTAS: FILELOG:SPARSE-REVLOG-TEST-FILE: rev=4971: delta-base=4930 is-cached=1 - search-rounds=1 try-count=1 - delta-type=snapshot snap-depth=4 - p1-chain-length=15 p2-chain-length=-1 - duration=* (glob)
241 $ hg debug-delta-find SPARSE-REVLOG-TEST-FILE 4971 --source p1
242 DBG-DELTAS-SEARCH: SEARCH rev=4971
243 DBG-DELTAS-SEARCH: ROUND #1 - 1 candidates - search-down
244 DBG-DELTAS-SEARCH: CANDIDATE: rev=4962
245 DBG-DELTAS-SEARCH: type=snapshot-4
246 DBG-DELTAS-SEARCH: size=18296
247 DBG-DELTAS-SEARCH: base=4930
248 DBG-DELTAS-SEARCH: uncompressed-delta-size=30377
249 DBG-DELTAS-SEARCH: delta-search-time=* (glob)
250 DBG-DELTAS-SEARCH: DELTA: length=16872 (BAD)
251 DBG-DELTAS-SEARCH: ROUND #2 - 1 candidates - search-down
252 DBG-DELTAS-SEARCH: CANDIDATE: rev=4930
253 DBG-DELTAS-SEARCH: type=snapshot-3
254 DBG-DELTAS-SEARCH: size=39228
255 DBG-DELTAS-SEARCH: base=4799
256 DBG-DELTAS-SEARCH: uncompressed-delta-size=33050
257 DBG-DELTAS-SEARCH: delta-search-time=* (glob)
258 DBG-DELTAS-SEARCH: DELTA: length=19179 (GOOD)
259 DBG-DELTAS-SEARCH: ROUND #3 - 1 candidates - refine-down
260 DBG-DELTAS-SEARCH: CONTENDER: rev=4930 - length=19179
261 DBG-DELTAS-SEARCH: CANDIDATE: rev=4799
262 DBG-DELTAS-SEARCH: type=snapshot-2
263 DBG-DELTAS-SEARCH: size=50213
264 DBG-DELTAS-SEARCH: base=4623
265 DBG-DELTAS-SEARCH: uncompressed-delta-size=82661
266 DBG-DELTAS-SEARCH: delta-search-time=* (glob)
267 DBG-DELTAS-SEARCH: DELTA: length=49132 (BAD)
268 DBG-DELTAS: FILELOG:SPARSE-REVLOG-TEST-FILE: rev=4971: delta-base=4930 is-cached=0 - search-rounds=3 try-count=3 - delta-type=snapshot snap-depth=4 - p1-chain-length=15 p2-chain-length=-1 - duration=* (glob)
269 $ hg debug-delta-find SPARSE-REVLOG-TEST-FILE 4971 --source p2
270 DBG-DELTAS-SEARCH: SEARCH rev=4971
271 DBG-DELTAS-SEARCH: ROUND #1 - 1 candidates - search-down
272 DBG-DELTAS-SEARCH: CANDIDATE: rev=4962
273 DBG-DELTAS-SEARCH: type=snapshot-4
274 DBG-DELTAS-SEARCH: size=18296
275 DBG-DELTAS-SEARCH: base=4930
276 DBG-DELTAS-SEARCH: uncompressed-delta-size=30377
277 DBG-DELTAS-SEARCH: delta-search-time=* (glob)
278 DBG-DELTAS-SEARCH: DELTA: length=16872 (BAD)
279 DBG-DELTAS-SEARCH: ROUND #2 - 1 candidates - search-down
280 DBG-DELTAS-SEARCH: CANDIDATE: rev=4930
281 DBG-DELTAS-SEARCH: type=snapshot-3
282 DBG-DELTAS-SEARCH: size=39228
283 DBG-DELTAS-SEARCH: base=4799
284 DBG-DELTAS-SEARCH: uncompressed-delta-size=33050
285 DBG-DELTAS-SEARCH: delta-search-time=* (glob)
286 DBG-DELTAS-SEARCH: DELTA: length=19179 (GOOD)
287 DBG-DELTAS-SEARCH: ROUND #3 - 1 candidates - refine-down
288 DBG-DELTAS-SEARCH: CONTENDER: rev=4930 - length=19179
289 DBG-DELTAS-SEARCH: CANDIDATE: rev=4799
290 DBG-DELTAS-SEARCH: type=snapshot-2
291 DBG-DELTAS-SEARCH: size=50213
292 DBG-DELTAS-SEARCH: base=4623
293 DBG-DELTAS-SEARCH: uncompressed-delta-size=82661
294 DBG-DELTAS-SEARCH: delta-search-time=* (glob)
295 DBG-DELTAS-SEARCH: DELTA: length=49132 (BAD)
296 DBG-DELTAS: FILELOG:SPARSE-REVLOG-TEST-FILE: rev=4971: delta-base=4930 is-cached=0 - search-rounds=3 try-count=3 - delta-type=snapshot snap-depth=4 - p1-chain-length=15 p2-chain-length=-1 - duration=* (glob)
297 $ hg debug-delta-find SPARSE-REVLOG-TEST-FILE 4971 --source prev
298 DBG-DELTAS-SEARCH: SEARCH rev=4971
299 DBG-DELTAS-SEARCH: ROUND #1 - 1 candidates - search-down
300 DBG-DELTAS-SEARCH: CANDIDATE: rev=4962
301 DBG-DELTAS-SEARCH: type=snapshot-4
302 DBG-DELTAS-SEARCH: size=18296
303 DBG-DELTAS-SEARCH: base=4930
304 DBG-DELTAS-SEARCH: uncompressed-delta-size=30377
305 DBG-DELTAS-SEARCH: delta-search-time=* (glob)
306 DBG-DELTAS-SEARCH: DELTA: length=16872 (BAD)
307 DBG-DELTAS-SEARCH: ROUND #2 - 1 candidates - search-down
308 DBG-DELTAS-SEARCH: CANDIDATE: rev=4930
309 DBG-DELTAS-SEARCH: type=snapshot-3
310 DBG-DELTAS-SEARCH: size=39228
311 DBG-DELTAS-SEARCH: base=4799
312 DBG-DELTAS-SEARCH: uncompressed-delta-size=33050
313 DBG-DELTAS-SEARCH: delta-search-time=* (glob)
314 DBG-DELTAS-SEARCH: DELTA: length=19179 (GOOD)
315 DBG-DELTAS-SEARCH: ROUND #3 - 1 candidates - refine-down
316 DBG-DELTAS-SEARCH: CONTENDER: rev=4930 - length=19179
317 DBG-DELTAS-SEARCH: CANDIDATE: rev=4799
318 DBG-DELTAS-SEARCH: type=snapshot-2
319 DBG-DELTAS-SEARCH: size=50213
320 DBG-DELTAS-SEARCH: base=4623
321 DBG-DELTAS-SEARCH: uncompressed-delta-size=82661
322 DBG-DELTAS-SEARCH: delta-search-time=* (glob)
323 DBG-DELTAS-SEARCH: DELTA: length=49132 (BAD)
324 DBG-DELTAS: FILELOG:SPARSE-REVLOG-TEST-FILE: rev=4971: delta-base=4930 is-cached=0 - search-rounds=3 try-count=3 - delta-type=snapshot snap-depth=4 - p1-chain-length=15 p2-chain-length=-1 - duration=* (glob)
193 325
194 326 $ cd ..
@@ -156,8 +156,6 was always recording three commits, one
156 156 record change 3/3 to 'a'?
157 157 (enter ? for help) [Ynesfdaq?] y
158 158
159 transaction abort!
160 rollback completed
161 159 abort: edit failed: false exited with status 1
162 160 [250]
163 161 $ hg status
@@ -71,12 +71,7 clone remote via stream
71 71 updating to branch default
72 72 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
73 73 $ cd local-stream
74 $ hg verify
75 checking changesets
76 checking manifests
77 crosschecking files in changesets and manifests
78 checking files
79 checked 3 changesets with 2 changes to 2 files
74 $ hg verify -q
80 75 $ hg branches
81 76 default 0:1160648e36ce
82 77 $ cd $TESTTMP
@@ -117,12 +112,7 clone remote via pull
117 112 verify
118 113
119 114 $ cd local
120 $ hg verify
121 checking changesets
122 checking manifests
123 crosschecking files in changesets and manifests
124 checking files
125 checked 3 changesets with 2 changes to 2 files
115 $ hg verify -q
126 116 $ cat >> .hg/hgrc <<EOF
127 117 > [hooks]
128 118 > changegroup = sh -c "printenv.py --line changegroup-in-local 0 ../dummylog"
@@ -214,12 +204,7 check remote tip
214 204 date: Thu Jan 01 00:00:00 1970 +0000
215 205 summary: add
216 206
217 $ hg verify
218 checking changesets
219 checking manifests
220 crosschecking files in changesets and manifests
221 checking files
222 checked 4 changesets with 3 changes to 2 files
207 $ hg verify -q
223 208 $ hg cat -r tip foo
224 209 bleah
225 210 $ echo z > z
@@ -292,10 +277,8 push should succeed even though it has a
292 277 remote: adding changesets
293 278 remote: adding manifests
294 279 remote: adding file changes
295 remote: added 1 changesets with 1 changes to 1 files (py3 !)
296 remote: added 1 changesets with 1 changes to 1 files (no-py3 no-chg !)
280 remote: added 1 changesets with 1 changes to 1 files
297 281 remote: KABOOM
298 remote: added 1 changesets with 1 changes to 1 files (no-py3 chg !)
299 282 $ hg -R ../remote heads
300 283 changeset: 5:1383141674ec
301 284 tag: tip
@@ -462,10 +445,8 stderr from remote commands should be pr
462 445 remote: adding changesets
463 446 remote: adding manifests
464 447 remote: adding file changes
465 remote: added 1 changesets with 1 changes to 1 files (py3 !)
466 remote: added 1 changesets with 1 changes to 1 files (no-py3 no-chg !)
448 remote: added 1 changesets with 1 changes to 1 files
467 449 remote: KABOOM
468 remote: added 1 changesets with 1 changes to 1 files (no-py3 chg !)
469 450 local stdout
470 451
471 452 debug output
@@ -20,7 +20,7 clone remote via stream
20 20 $ for i in 0 1 2 3 4 5 6 7 8; do
21 21 > hg clone --stream -r "$i" ssh://user@dummy/remote test-"$i"
22 22 > if cd test-"$i"; then
23 > hg verify
23 > hg verify -q
24 24 > cd ..
25 25 > fi
26 26 > done
@@ -31,11 +31,6 clone remote via stream
31 31 new changesets bfaf4b5cbf01
32 32 updating to branch default
33 33 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
34 checking changesets
35 checking manifests
36 crosschecking files in changesets and manifests
37 checking files
38 checked 1 changesets with 1 changes to 1 files
39 34 adding changesets
40 35 adding manifests
41 36 adding file changes
@@ -43,11 +38,6 clone remote via stream
43 38 new changesets bfaf4b5cbf01:21f32785131f
44 39 updating to branch default
45 40 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
46 checking changesets
47 checking manifests
48 crosschecking files in changesets and manifests
49 checking files
50 checked 2 changesets with 2 changes to 1 files
51 41 adding changesets
52 42 adding manifests
53 43 adding file changes
@@ -55,11 +45,6 clone remote via stream
55 45 new changesets bfaf4b5cbf01:4ce51a113780
56 46 updating to branch default
57 47 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
58 checking changesets
59 checking manifests
60 crosschecking files in changesets and manifests
61 checking files
62 checked 3 changesets with 3 changes to 1 files
63 48 adding changesets
64 49 adding manifests
65 50 adding file changes
@@ -67,11 +52,6 clone remote via stream
67 52 new changesets bfaf4b5cbf01:93ee6ab32777
68 53 updating to branch default
69 54 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
70 checking changesets
71 checking manifests
72 crosschecking files in changesets and manifests
73 checking files
74 checked 4 changesets with 4 changes to 1 files
75 55 adding changesets
76 56 adding manifests
77 57 adding file changes
@@ -79,11 +59,6 clone remote via stream
79 59 new changesets bfaf4b5cbf01:c70afb1ee985
80 60 updating to branch default
81 61 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
82 checking changesets
83 checking manifests
84 crosschecking files in changesets and manifests
85 checking files
86 checked 2 changesets with 2 changes to 1 files
87 62 adding changesets
88 63 adding manifests
89 64 adding file changes
@@ -91,11 +66,6 clone remote via stream
91 66 new changesets bfaf4b5cbf01:f03ae5a9b979
92 67 updating to branch default
93 68 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
94 checking changesets
95 checking manifests
96 crosschecking files in changesets and manifests
97 checking files
98 checked 3 changesets with 3 changes to 1 files
99 69 adding changesets
100 70 adding manifests
101 71 adding file changes
@@ -103,11 +73,6 clone remote via stream
103 73 new changesets bfaf4b5cbf01:095cb14b1b4d
104 74 updating to branch default
105 75 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
106 checking changesets
107 checking manifests
108 crosschecking files in changesets and manifests
109 checking files
110 checked 4 changesets with 5 changes to 2 files
111 76 adding changesets
112 77 adding manifests
113 78 adding file changes
@@ -115,11 +80,6 clone remote via stream
115 80 new changesets bfaf4b5cbf01:faa2e4234c7a
116 81 updating to branch default
117 82 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
118 checking changesets
119 checking manifests
120 crosschecking files in changesets and manifests
121 checking files
122 checked 5 changesets with 6 changes to 3 files
123 83 adding changesets
124 84 adding manifests
125 85 adding file changes
@@ -127,11 +87,6 clone remote via stream
127 87 new changesets bfaf4b5cbf01:916f1afdef90
128 88 updating to branch default
129 89 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
130 checking changesets
131 checking manifests
132 crosschecking files in changesets and manifests
133 checking files
134 checked 5 changesets with 5 changes to 2 files
135 90 $ cd test-8
136 91 $ hg pull ../test-7
137 92 pulling from ../test-7
@@ -142,12 +97,7 clone remote via stream
142 97 added 4 changesets with 2 changes to 3 files (+1 heads)
143 98 new changesets c70afb1ee985:faa2e4234c7a
144 99 (run 'hg heads' to see heads, 'hg merge' to merge)
145 $ hg verify
146 checking changesets
147 checking manifests
148 crosschecking files in changesets and manifests
149 checking files
150 checked 9 changesets with 7 changes to 4 files
100 $ hg verify -q
151 101 $ cd ..
152 102 $ cd test-1
153 103 $ hg pull -r 4 ssh://user@dummy/remote
@@ -159,12 +109,7 clone remote via stream
159 109 added 1 changesets with 0 changes to 0 files (+1 heads)
160 110 new changesets c70afb1ee985
161 111 (run 'hg heads' to see heads, 'hg merge' to merge)
162 $ hg verify
163 checking changesets
164 checking manifests
165 crosschecking files in changesets and manifests
166 checking files
167 checked 3 changesets with 2 changes to 1 files
112 $ hg verify -q
168 113 $ hg pull ssh://user@dummy/remote
169 114 pulling from ssh://user@dummy/remote
170 115 searching for changes
@@ -185,12 +130,7 clone remote via stream
185 130 added 2 changesets with 0 changes to 0 files (+1 heads)
186 131 new changesets c70afb1ee985:f03ae5a9b979
187 132 (run 'hg heads' to see heads, 'hg merge' to merge)
188 $ hg verify
189 checking changesets
190 checking manifests
191 crosschecking files in changesets and manifests
192 checking files
193 checked 5 changesets with 3 changes to 1 files
133 $ hg verify -q
194 134 $ hg pull ssh://user@dummy/remote
195 135 pulling from ssh://user@dummy/remote
196 136 searching for changes
@@ -200,11 +140,6 clone remote via stream
200 140 added 4 changesets with 4 changes to 4 files
201 141 new changesets 93ee6ab32777:916f1afdef90
202 142 (run 'hg update' to get a working copy)
203 $ hg verify
204 checking changesets
205 checking manifests
206 crosschecking files in changesets and manifests
207 checking files
208 checked 9 changesets with 7 changes to 4 files
143 $ hg verify -q
209 144
210 145 $ cd ..
@@ -61,12 +61,7 clone remote via stream
61 61 updating to branch default
62 62 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
63 63 $ cd local-stream
64 $ hg verify
65 checking changesets
66 checking manifests
67 crosschecking files in changesets and manifests
68 checking files
69 checked 3 changesets with 2 changes to 2 files
64 $ hg verify -q
70 65 $ hg branches
71 66 default 0:1160648e36ce
72 67 $ cd $TESTTMP
@@ -103,12 +98,7 clone remote via pull
103 98 verify
104 99
105 100 $ cd local
106 $ hg verify
107 checking changesets
108 checking manifests
109 crosschecking files in changesets and manifests
110 checking files
111 checked 3 changesets with 2 changes to 2 files
101 $ hg verify -q
112 102 $ cat >> .hg/hgrc <<EOF
113 103 > [hooks]
114 104 > changegroup = sh -c "printenv.py changegroup-in-local 0 ../dummylog"
@@ -200,12 +190,7 check remote tip
200 190 date: Thu Jan 01 00:00:00 1970 +0000
201 191 summary: add
202 192
203 $ hg verify
204 checking changesets
205 checking manifests
206 crosschecking files in changesets and manifests
207 checking files
208 checked 4 changesets with 3 changes to 2 files
193 $ hg verify -q
209 194 $ hg cat -r tip foo
210 195 bleah
211 196 $ echo z > z
@@ -289,11 +274,9 push should succeed even though it has a
289 274 remote: adding changesets
290 275 remote: adding manifests
291 276 remote: adding file changes
292 remote: added 1 changesets with 1 changes to 1 files (py3 !)
293 remote: added 1 changesets with 1 changes to 1 files (no-py3 no-chg !)
277 remote: added 1 changesets with 1 changes to 1 files
294 278 remote: KABOOM
295 279 remote: KABOOM IN PROCESS
296 remote: added 1 changesets with 1 changes to 1 files (no-py3 chg !)
297 280 $ hg -R ../remote heads
298 281 changeset: 5:1383141674ec
299 282 tag: tip
@@ -323,7 +306,7 try again with remote chg, which should
323 306 remote: adding changesets
324 307 remote: adding manifests
325 308 remote: adding file changes
326 remote: added 1 changesets with 1 changes to 1 files (py3 !)
309 remote: added 1 changesets with 1 changes to 1 files
327 310 remote: KABOOM
328 311 remote: KABOOM IN PROCESS
329 312
@@ -514,11 +497,9 stderr from remote commands should be pr
514 497 remote: adding changesets
515 498 remote: adding manifests
516 499 remote: adding file changes
517 remote: added 1 changesets with 1 changes to 1 files (py3 !)
518 remote: added 1 changesets with 1 changes to 1 files (no-py3 no-chg !)
500 remote: added 1 changesets with 1 changes to 1 files
519 501 remote: KABOOM
520 502 remote: KABOOM IN PROCESS
521 remote: added 1 changesets with 1 changes to 1 files (no-py3 chg !)
522 503 local stdout
523 504
524 505 debug output
@@ -38,12 +38,7 one pull
38 38 updating to branch default
39 39 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
40 40 $ cd local
41 $ hg verify
42 checking changesets
43 checking manifests
44 crosschecking files in changesets and manifests
45 checking files
46 checked 1 changesets with 2 changes to 2 files
41 $ hg verify -q
47 42 $ cat bar
48 43 foo
49 44 $ cd ../remote
@@ -134,13 +129,7 test with "/" URI (issue747) and subrepo
134 129 new changesets be090ea66256:322ea90975df
135 130 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
136 131 $ cd local2
137 $ hg verify
138 checking changesets
139 checking manifests
140 crosschecking files in changesets and manifests
141 checking files
142 checked 1 changesets with 3 changes to 3 files
143 checking subrepo links
132 $ hg verify -q
144 133 $ cat a
145 134 a
146 135 $ hg paths
@@ -155,12 +144,7 test with empty repo (issue965)
155 144 updating to branch default
156 145 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
157 146 $ cd local3
158 $ hg verify
159 checking changesets
160 checking manifests
161 crosschecking files in changesets and manifests
162 checking files
163 checked 0 changesets with 0 changes to 0 files
147 $ hg verify -q
164 148 $ hg paths
165 149 default = static-http://localhost:$HGPORT/remotempty
166 150
@@ -80,35 +80,20 2 1 0 2 0 1 2
80 80 > echo "% Trying to strip revision $i"
81 81 > hg --cwd $i strip $i
82 82 > echo "% Verifying"
83 > hg --cwd $i verify
83 > hg --cwd $i verify -q
84 84 > echo
85 85 > done
86 86 % Trying to strip revision 0
87 87 saved backup bundle to $TESTTMP/files/0/.hg/strip-backup/cbb8c2f0a2e3-239800b9-backup.hg
88 88 % Verifying
89 checking changesets
90 checking manifests
91 crosschecking files in changesets and manifests
92 checking files
93 checked 2 changesets with 12 changes to 6 files
94 89
95 90 % Trying to strip revision 1
96 91 saved backup bundle to $TESTTMP/files/1/.hg/strip-backup/124ecc0cbec9-6104543f-backup.hg
97 92 % Verifying
98 checking changesets
99 checking manifests
100 crosschecking files in changesets and manifests
101 checking files
102 checked 2 changesets with 12 changes to 6 files
103 93
104 94 % Trying to strip revision 2
105 95 saved backup bundle to $TESTTMP/files/2/.hg/strip-backup/f6439b304a1a-c6505a5f-backup.hg
106 96 % Verifying
107 checking changesets
108 checking manifests
109 crosschecking files in changesets and manifests
110 checking files
111 checked 2 changesets with 12 changes to 6 files
112 97
113 98 $ cd ..
114 99
@@ -139,26 +124,16 Do a similar test where the manifest rev
139 124 > echo "% Trying to strip revision $i"
140 125 > hg --cwd $i strip $i
141 126 > echo "% Verifying"
142 > hg --cwd $i verify
127 > hg --cwd $i verify -q
143 128 > echo
144 129 > done
145 130 % Trying to strip revision 2
146 131 saved backup bundle to $TESTTMP/manifests/2/.hg/strip-backup/f3015ad03c03-4d98bdc2-backup.hg
147 132 % Verifying
148 checking changesets
149 checking manifests
150 crosschecking files in changesets and manifests
151 checking files
152 checked 3 changesets with 3 changes to 2 files
153 133
154 134 % Trying to strip revision 3
155 135 saved backup bundle to $TESTTMP/manifests/3/.hg/strip-backup/9632aa303aa4-69192e3f-backup.hg
156 136 % Verifying
157 checking changesets
158 checking manifests
159 crosschecking files in changesets and manifests
160 checking files
161 checked 3 changesets with 3 changes to 2 files
162 137
163 138 $ cd ..
164 139
@@ -194,27 +169,16 Now a similar test for a non-root manife
194 169 > echo "% Trying to strip revision $i"
195 170 > hg --cwd $i strip $i
196 171 > echo "% Verifying"
197 > hg --cwd $i verify
172 > hg --cwd $i verify -q
198 173 > echo
199 174 > done
200 175 % Trying to strip revision 2
201 176 saved backup bundle to $TESTTMP/treemanifests/2/.hg/strip-backup/145f5c75f9ac-a105cfbe-backup.hg
202 177 % Verifying
203 checking changesets
204 checking manifests
205 checking directory manifests
206 crosschecking files in changesets and manifests
207 checking files
208 checked 3 changesets with 4 changes to 3 files
209 178
210 179 % Trying to strip revision 3
211 180 saved backup bundle to $TESTTMP/treemanifests/3/.hg/strip-backup/e4e3de5c3cb2-f4c70376-backup.hg
212 181 % Verifying
213 checking changesets
214 checking manifests
215 checking directory manifests
216 crosschecking files in changesets and manifests
217 checking files
218 checked 3 changesets with 4 changes to 3 files
219 182
183
220 184 $ cd ..
@@ -111,13 +111,7 verify will warn if locked-in subrepo re
111 111
112 112 $ hg ci -m "amended subrepo (again)"
113 113 $ hg --config extensions.strip= --hidden strip -R subrepo -qr 'tip' --config devel.strip-obsmarkers=no
114 $ hg verify
115 checking changesets
116 checking manifests
117 crosschecking files in changesets and manifests
118 checking files
119 checked 5 changesets with 5 changes to 2 files
120 checking subrepo links
114 $ hg verify -q
121 115 subrepo 'subrepo' is hidden in revision a66de08943b6
122 116 subrepo 'subrepo' is hidden in revision 674d05939c1e
123 117 subrepo 'subrepo' not found in revision a7d05d9055a4
@@ -125,13 +119,7 verify will warn if locked-in subrepo re
125 119 verifying shouldn't init a new subrepo if the reference doesn't exist
126 120
127 121 $ mv subrepo b
128 $ hg verify
129 checking changesets
130 checking manifests
131 crosschecking files in changesets and manifests
132 checking files
133 checked 5 changesets with 5 changes to 2 files
134 checking subrepo links
122 $ hg verify -q
135 123 0: repository $TESTTMP/repo/subrepo not found
136 124 1: repository $TESTTMP/repo/subrepo not found
137 125 3: repository $TESTTMP/repo/subrepo not found
@@ -134,8 +134,6 not the `wlock`, then get aborted on a s
134 134 $ hg phase --rev 0
135 135 0: draft
136 136 $ cat ../log.err
137 transaction abort!
138 rollback completed
139 137 abort: pretxnclose.test hook exited with status 1
140 138
141 139 Actual testing
@@ -153,7 +151,7 Changing tracked file
153 151 $ touch $TESTTMP/transaction-continue
154 152 $ wait
155 153 $ hg status
156 R default_a (missing-correct-output !)
154 R default_a
157 155 $ hg revert --all --quiet
158 156
159 157 Changing branch from default
@@ -204,10 +202,8 updating working copy
204 202 $ touch $TESTTMP/transaction-continue
205 203 $ wait
206 204 $ hg log --rev . -T '{desc}\n'
207 babar_l (missing-correct-output !)
208 babar_m (known-bad-output !)
205 babar_l
209 206 $ hg st
210 ! babar_m (known-bad-output !)
211 207
212 208 $ hg purge --no-confirm
213 209 $ hg up --quiet babar
@@ -399,13 +399,7 Pushing to an empty repo works
399 399 added 11 changesets with 15 changes to 10 files (+3 heads)
400 400 $ hg debugrequires -R clone | grep treemanifest
401 401 treemanifest
402 $ hg -R clone verify
403 checking changesets
404 checking manifests
405 checking directory manifests
406 crosschecking files in changesets and manifests
407 checking files
408 checked 11 changesets with 15 changes to 10 files
402 $ hg -R clone verify -q
409 403
410 404 Create deeper repo with tree manifests.
411 405
@@ -567,13 +561,7 Add some more changes to the deep repo
567 561 $ hg ci -m troz
568 562
569 563 Verify works
570 $ hg verify
571 checking changesets
572 checking manifests
573 checking directory manifests
574 crosschecking files in changesets and manifests
575 checking files
576 checked 4 changesets with 18 changes to 8 files
564 $ hg verify -q
577 565
578 566 #if repofncache
579 567 Dirlogs are included in fncache
@@ -631,6 +619,7 Verify reports missing dirlog
631 619 b/bar/orange/fly/housefly.txt@0: in changeset but not in manifest
632 620 b/foo/apple/bees/flower.py@0: in changeset but not in manifest
633 621 checking files
622 not checking dirstate because of previous errors
634 623 checked 4 changesets with 18 changes to 8 files
635 624 6 warnings encountered! (reporevlogstore !)
636 625 9 integrity errors encountered!
@@ -656,6 +645,7 Verify reports missing dirlog entry
656 645 (expected None)
657 646 crosschecking files in changesets and manifests
658 647 checking files
648 not checking dirstate because of previous errors
659 649 checked 4 changesets with 18 changes to 8 files
660 650 2 warnings encountered!
661 651 8 integrity errors encountered!
@@ -707,13 +697,7 Tree manifest revlogs exist.
707 697 deepclone/.hg/store/meta/~2e_a/00manifest.i (reporevlogstore !)
708 698 Verify passes.
709 699 $ cd deepclone
710 $ hg verify
711 checking changesets
712 checking manifests
713 checking directory manifests
714 crosschecking files in changesets and manifests
715 checking files
716 checked 4 changesets with 18 changes to 8 files
700 $ hg verify -q
717 701 $ cd ..
718 702
719 703 #if reporevlogstore
@@ -755,33 +739,15 Create clones using old repo formats to
755 739
756 740 Local clone with basicstore
757 741 $ hg clone -U deeprepo-basicstore local-clone-basicstore
758 $ hg -R local-clone-basicstore verify
759 checking changesets
760 checking manifests
761 checking directory manifests
762 crosschecking files in changesets and manifests
763 checking files
764 checked 4 changesets with 18 changes to 8 files
742 $ hg -R local-clone-basicstore verify -q
765 743
766 744 Local clone with encodedstore
767 745 $ hg clone -U deeprepo-encodedstore local-clone-encodedstore
768 $ hg -R local-clone-encodedstore verify
769 checking changesets
770 checking manifests
771 checking directory manifests
772 crosschecking files in changesets and manifests
773 checking files
774 checked 4 changesets with 18 changes to 8 files
746 $ hg -R local-clone-encodedstore verify -q
775 747
776 748 Local clone with fncachestore
777 749 $ hg clone -U deeprepo local-clone-fncachestore
778 $ hg -R local-clone-fncachestore verify
779 checking changesets
780 checking manifests
781 checking directory manifests
782 crosschecking files in changesets and manifests
783 checking files
784 checked 4 changesets with 18 changes to 8 files
750 $ hg -R local-clone-fncachestore verify -q
785 751
786 752 Stream clone with basicstore
787 753 $ hg clone --config experimental.changegroup3=True --stream -U \
@@ -789,13 +755,7 Stream clone with basicstore
789 755 streaming all changes
790 756 28 files to transfer, * of data (glob)
791 757 transferred * in * seconds (*) (glob)
792 $ hg -R stream-clone-basicstore verify
793 checking changesets
794 checking manifests
795 checking directory manifests
796 crosschecking files in changesets and manifests
797 checking files
798 checked 4 changesets with 18 changes to 8 files
758 $ hg -R stream-clone-basicstore verify -q
799 759
800 760 Stream clone with encodedstore
801 761 $ hg clone --config experimental.changegroup3=True --stream -U \
@@ -803,13 +763,7 Stream clone with encodedstore
803 763 streaming all changes
804 764 28 files to transfer, * of data (glob)
805 765 transferred * in * seconds (*) (glob)
806 $ hg -R stream-clone-encodedstore verify
807 checking changesets
808 checking manifests
809 checking directory manifests
810 crosschecking files in changesets and manifests
811 checking files
812 checked 4 changesets with 18 changes to 8 files
766 $ hg -R stream-clone-encodedstore verify -q
813 767
814 768 Stream clone with fncachestore
815 769 $ hg clone --config experimental.changegroup3=True --stream -U \
@@ -817,13 +771,7 Stream clone with fncachestore
817 771 streaming all changes
818 772 22 files to transfer, * of data (glob)
819 773 transferred * in * seconds (*) (glob)
820 $ hg -R stream-clone-fncachestore verify
821 checking changesets
822 checking manifests
823 checking directory manifests
824 crosschecking files in changesets and manifests
825 checking files
826 checked 4 changesets with 18 changes to 8 files
774 $ hg -R stream-clone-fncachestore verify -q
827 775
828 776 Packed bundle
829 777 $ hg -R deeprepo debugcreatestreamclonebundle repo-packed.hg
@@ -363,13 +363,7 Testing whether unamend retains copies o
363 363 $ hg mv c wat
364 364 $ hg unamend
365 365
366 $ hg verify -v
367 repository uses revlog format 1
368 checking changesets
369 checking manifests
370 crosschecking files in changesets and manifests
371 checking files
372 checked 28 changesets with 16 changes to 11 files
366 $ hg verify -q
373 367
374 368 Retained copies in new prdecessor commit
375 369
@@ -133,12 +133,7 union repos can be cloned ... and clones
133 133 $ hg -R repo3 paths
134 134 default = union:repo1+repo2
135 135
136 $ hg -R repo3 verify
137 checking changesets
138 checking manifests
139 crosschecking files in changesets and manifests
140 checking files
141 checked 6 changesets with 11 changes to 6 files
136 $ hg -R repo3 verify -q
142 137
143 138 $ hg -R repo3 heads --template '{rev}:{node|short} {desc|firstline}\n'
144 139 5:2f0d178c469c repo2-3
@@ -853,12 +853,7 manifest should be generaldelta
853 853
854 854 verify should be happy
855 855
856 $ hg verify
857 checking changesets
858 checking manifests
859 crosschecking files in changesets and manifests
860 checking files
861 checked 3 changesets with 3 changes to 3 files
856 $ hg verify -q
862 857
863 858 old store should be backed up
864 859
@@ -972,7 +967,7 We can restrict optimization to some rev
972 967 Check that the repo still works fine
973 968
974 969 $ hg log -G --stat
975 @ changeset: 2:fca376863211 (py3 !)
970 @ changeset: 2:fca376863211
976 971 | tag: tip
977 972 | parent: 0:ba592bf28da2
978 973 | user: test
@@ -995,12 +990,7 Check that the repo still works fine
995 990
996 991
997 992
998 $ hg verify
999 checking changesets
1000 checking manifests
1001 crosschecking files in changesets and manifests
1002 checking files
1003 checked 3 changesets with 3 changes to 3 files
993 $ hg verify -q
1004 994
1005 995 Check we can select negatively
1006 996
@@ -1047,12 +1037,7 Check we can select negatively
1047 1037 store replacement complete; repository was inconsistent for *s (glob)
1048 1038 finalizing requirements file and making repository readable again
1049 1039 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1050 $ hg verify
1051 checking changesets
1052 checking manifests
1053 crosschecking files in changesets and manifests
1054 checking files
1055 checked 3 changesets with 3 changes to 3 files
1040 $ hg verify -q
1056 1041
1057 1042 Check that we can select changelog only
1058 1043
@@ -1098,12 +1083,7 Check that we can select changelog only
1098 1083 store replacement complete; repository was inconsistent for *s (glob)
1099 1084 finalizing requirements file and making repository readable again
1100 1085 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1101 $ hg verify
1102 checking changesets
1103 checking manifests
1104 crosschecking files in changesets and manifests
1105 checking files
1106 checked 3 changesets with 3 changes to 3 files
1086 $ hg verify -q
1107 1087
1108 1088 Check that we can select filelog only
1109 1089
@@ -1149,12 +1129,7 Check that we can select filelog only
1149 1129 store replacement complete; repository was inconsistent for *s (glob)
1150 1130 finalizing requirements file and making repository readable again
1151 1131 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1152 $ hg verify
1153 checking changesets
1154 checking manifests
1155 crosschecking files in changesets and manifests
1156 checking files
1157 checked 3 changesets with 3 changes to 3 files
1132 $ hg verify -q
1158 1133
1159 1134
1160 1135 Check you can't skip revlog clone during important format downgrade
@@ -1224,12 +1199,7 Check you can't skip revlog clone during
1224 1199 store replacement complete; repository was inconsistent for *s (glob)
1225 1200 finalizing requirements file and making repository readable again
1226 1201 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1227 $ hg verify
1228 checking changesets
1229 checking manifests
1230 crosschecking files in changesets and manifests
1231 checking files
1232 checked 3 changesets with 3 changes to 3 files
1202 $ hg verify -q
1233 1203
1234 1204 Check you can't skip revlog clone during important format upgrade
1235 1205
@@ -1285,12 +1255,7 Check you can't skip revlog clone during
1285 1255 store replacement complete; repository was inconsistent for *s (glob)
1286 1256 finalizing requirements file and making repository readable again
1287 1257 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1288 $ hg verify
1289 checking changesets
1290 checking manifests
1291 crosschecking files in changesets and manifests
1292 checking files
1293 checked 3 changesets with 3 changes to 3 files
1258 $ hg verify -q
1294 1259
1295 1260 $ cd ..
1296 1261
@@ -1413,12 +1378,7 Check upgrading a large file repository
1413 1378 lfs
1414 1379 $ find .hg/store/lfs -type f
1415 1380 .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1416 $ hg verify
1417 checking changesets
1418 checking manifests
1419 crosschecking files in changesets and manifests
1420 checking files
1421 checked 2 changesets with 2 changes to 2 files
1381 $ hg verify -q
1422 1382 $ hg debugdata lfs.bin 0
1423 1383 version https://git-lfs.github.com/spec/v1
1424 1384 oid sha256:d0beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
@@ -52,7 +52,7 Test largefile URL
52 52 $ hg -R server debuglfput null.txt
53 53 a57b57b39ee4dc3da1e03526596007f480ecdbe8
54 54
55 $ hg --traceback debugdownload "largefile://a57b57b39ee4dc3da1e03526596007f480ecdbe8" --config paths.default=http://localhost:$HGPORT/
55 $ hg debugdownload "largefile://a57b57b39ee4dc3da1e03526596007f480ecdbe8" --config paths.default=http://localhost:$HGPORT/
56 56 1 0000000000000000000000000000000000000000
57 57
58 58 from within a repository
@@ -50,7 +50,7 def mocktimer(incr=0.1, *additional_targ
50 50
51 51 # attr.s default factory for util.timedstats.start binds the timer we
52 52 # need to mock out.
53 _start_default = (util.timedcmstats.start.default, 'factory')
53 _start_default = (util.timedcmstats.__attrs_attrs__.start.default, 'factory')
54 54
55 55
56 56 @contextlib.contextmanager
@@ -20,6 +20,7 verify
20 20 checking manifests
21 21 crosschecking files in changesets and manifests
22 22 checking files
23 checking dirstate
23 24 checked 1 changesets with 3 changes to 3 files
24 25
25 26 verify with journal
@@ -31,6 +32,7 verify with journal
31 32 checking manifests
32 33 crosschecking files in changesets and manifests
33 34 checking files
35 checking dirstate
34 36 checked 1 changesets with 3 changes to 3 files
35 37 $ rm .hg/store/journal
36 38
@@ -55,6 +57,7 introduce some bugs in repo
55 57 warning: revlog 'data/bar.txt.i' not in fncache!
56 58 0: empty or missing bar.txt
57 59 bar.txt@0: manifest refers to unknown revision 256559129457
60 not checking dirstate because of previous errors
58 61 checked 1 changesets with 0 changes to 3 files
59 62 3 warnings encountered!
60 63 hint: run "hg debugrebuildfncache" to recover from corrupt fncache
@@ -83,6 +86,7 Entire changelog missing
83 86 0: empty or missing changelog
84 87 manifest@0: d0b6632564d4 not in changesets
85 88 manifest@1: 941fc4534185 not in changesets
89 not checking dirstate because of previous errors
86 90 3 integrity errors encountered!
87 91 (first damaged changeset appears to be 0)
88 92 [1]
@@ -93,6 +97,7 Entire manifest log missing
93 97 $ rm .hg/store/00manifest.*
94 98 $ hg verify -q
95 99 0: empty or missing manifest
100 not checking dirstate because of previous errors
96 101 1 integrity errors encountered!
97 102 (first damaged changeset appears to be 0)
98 103 [1]
@@ -106,6 +111,7 Entire filelog missing
106 111 0: empty or missing file
107 112 file@0: manifest refers to unknown revision 362fef284ce2
108 113 file@1: manifest refers to unknown revision c10f2164107d
114 not checking dirstate because of previous errors
109 115 1 warnings encountered!
110 116 hint: run "hg debugrebuildfncache" to recover from corrupt fncache
111 117 3 integrity errors encountered!
@@ -119,7 +125,13 Entire changelog and manifest log missin
119 125 $ rm .hg/store/00manifest.*
120 126 $ hg verify -q
121 127 warning: orphan data file 'data/file.i'
128 warning: ignoring unknown working parent c5ddb05ab828!
129 file marked as tracked in p1 (000000000000) but not in manifest1
122 130 1 warnings encountered!
131 1 integrity errors encountered!
132 dirstate inconsistent with current parent's manifest
133 1 dirstate errors
134 [1]
123 135 $ cp -R .hg/store-full/. .hg/store
124 136
125 137 Entire changelog and filelog missing
@@ -134,6 +146,7 Entire changelog and filelog missing
134 146 ?: empty or missing file
135 147 file@0: manifest refers to unknown revision 362fef284ce2
136 148 file@1: manifest refers to unknown revision c10f2164107d
149 not checking dirstate because of previous errors
137 150 1 warnings encountered!
138 151 hint: run "hg debugrebuildfncache" to recover from corrupt fncache
139 152 6 integrity errors encountered!
@@ -149,6 +162,7 Entire manifest log and filelog missing
149 162 0: empty or missing manifest
150 163 warning: revlog 'data/file.i' not in fncache!
151 164 0: empty or missing file
165 not checking dirstate because of previous errors
152 166 1 warnings encountered!
153 167 hint: run "hg debugrebuildfncache" to recover from corrupt fncache
154 168 2 integrity errors encountered!
@@ -164,6 +178,7 Changelog missing entry
164 178 manifest@?: 941fc4534185 not in changesets
165 179 file@?: rev 1 points to nonexistent changeset 1
166 180 (expected 0)
181 not checking dirstate because of previous errors
167 182 1 warnings encountered!
168 183 3 integrity errors encountered!
169 184 [1]
@@ -175,6 +190,7 Manifest log missing entry
175 190 $ hg verify -q
176 191 manifest@1: changeset refers to unknown revision 941fc4534185
177 192 file@1: c10f2164107d not in manifests
193 not checking dirstate because of previous errors
178 194 2 integrity errors encountered!
179 195 (first damaged changeset appears to be 1)
180 196 [1]
@@ -185,6 +201,7 Filelog missing entry
185 201 $ cp -f .hg/store-partial/data/file.* .hg/store/data
186 202 $ hg verify -q
187 203 file@1: manifest refers to unknown revision c10f2164107d
204 not checking dirstate because of previous errors
188 205 1 integrity errors encountered!
189 206 (first damaged changeset appears to be 1)
190 207 [1]
@@ -198,6 +215,7 Changelog and manifest log missing entry
198 215 file@?: rev 1 points to nonexistent changeset 1
199 216 (expected 0)
200 217 file@?: c10f2164107d not in manifests
218 not checking dirstate because of previous errors
201 219 1 warnings encountered!
202 220 2 integrity errors encountered!
203 221 [1]
@@ -211,6 +229,7 Changelog and filelog missing entry
211 229 manifest@?: rev 1 points to nonexistent changeset 1
212 230 manifest@?: 941fc4534185 not in changesets
213 231 file@?: manifest refers to unknown revision c10f2164107d
232 not checking dirstate because of previous errors
214 233 3 integrity errors encountered!
215 234 [1]
216 235 $ cp -R .hg/store-full/. .hg/store
@@ -221,6 +240,7 Manifest and filelog missing entry
221 240 $ cp -f .hg/store-partial/data/file.* .hg/store/data
222 241 $ hg verify -q
223 242 manifest@1: changeset refers to unknown revision 941fc4534185
243 not checking dirstate because of previous errors
224 244 1 integrity errors encountered!
225 245 (first damaged changeset appears to be 1)
226 246 [1]
@@ -236,6 +256,7 Corrupt changelog base node to cause fai
236 256 manifest@?: d0b6632564d4 not in changesets
237 257 file@?: rev 0 points to unexpected changeset 0
238 258 (expected 1)
259 not checking dirstate because of previous errors
239 260 1 warnings encountered!
240 261 4 integrity errors encountered!
241 262 (first damaged changeset appears to be 0)
@@ -249,6 +270,7 Corrupt manifest log base node to cause
249 270 $ hg verify -q
250 271 manifest@0: reading delta d0b6632564d4: * (glob)
251 272 file@0: 362fef284ce2 not in manifests
273 not checking dirstate because of previous errors
252 274 2 integrity errors encountered!
253 275 (first damaged changeset appears to be 0)
254 276 [1]
@@ -260,6 +282,7 Corrupt filelog base node to cause failu
260 282 > 2> /dev/null
261 283 $ hg verify -q
262 284 file@0: unpacking 362fef284ce2: * (glob)
285 not checking dirstate because of previous errors
263 286 1 integrity errors encountered!
264 287 (first damaged changeset appears to be 0)
265 288 [1]
@@ -275,12 +298,7 test changelog without a manifest
275 298 marked working directory as branch foo
276 299 (branches are permanent and global, did you want a bookmark?)
277 300 $ hg ci -m branchfoo
278 $ hg verify
279 checking changesets
280 checking manifests
281 crosschecking files in changesets and manifests
282 checking files
283 checked 1 changesets with 0 changes to 0 files
301 $ hg verify -q
284 302
285 303 test revlog corruption
286 304
@@ -292,14 +310,10 test revlog corruption
292 310 $ dd if=.hg/store/data/a.i of=start bs=1 count=20 2>/dev/null
293 311 $ cat start b > .hg/store/data/a.i
294 312
295 $ hg verify
296 checking changesets
297 checking manifests
298 crosschecking files in changesets and manifests
299 checking files
300 a@1: broken revlog! (index data/a is corrupted)
313 $ hg verify -q
314 a@1: broken revlog! (index a is corrupted)
301 315 warning: orphan data file 'data/a.i'
302 checked 2 changesets with 0 changes to 1 files
316 not checking dirstate because of previous errors
303 317 1 warnings encountered!
304 318 1 integrity errors encountered!
305 319 (first damaged changeset appears to be 1)
@@ -317,6 +331,7 test revlog format 0
317 331 checking manifests
318 332 crosschecking files in changesets and manifests
319 333 checking files
334 checking dirstate
320 335 checked 1 changesets with 1 changes to 1 files
321 336 $ cd ..
322 337
@@ -330,12 +345,7 test flag processor and skipflags
330 345 > EOF
331 346 $ echo '[BASE64]content' > base64
332 347 $ hg commit -Aqm 'flag processor content' base64
333 $ hg verify
334 checking changesets
335 checking manifests
336 crosschecking files in changesets and manifests
337 checking files
338 checked 1 changesets with 1 changes to 1 files
348 $ hg verify -q
339 349
340 350 $ cat >> $TESTTMP/break-base64.py <<EOF
341 351 > import base64
@@ -345,20 +355,11 test flag processor and skipflags
345 355 > breakbase64=$TESTTMP/break-base64.py
346 356 > EOF
347 357
348 $ hg verify
349 checking changesets
350 checking manifests
351 crosschecking files in changesets and manifests
352 checking files
353 base64@0: unpacking 794cee7777cb: integrity check failed on data/base64:0
354 checked 1 changesets with 1 changes to 1 files
358 $ hg verify -q
359 base64@0: unpacking 794cee7777cb: integrity check failed on base64:0
360 not checking dirstate because of previous errors
355 361 1 integrity errors encountered!
356 362 (first damaged changeset appears to be 0)
357 363 [1]
358 $ hg verify --config verify.skipflags=2147483647
359 checking changesets
360 checking manifests
361 crosschecking files in changesets and manifests
362 checking files
363 checked 1 changesets with 1 changes to 1 files
364 $ hg verify --config verify.skipflags=2147483647 -q
364 365
@@ -86,9 +86,9 Known exception should be caught, but pr
86 86 $ hg --config "extensions.t=$abspath" --config 'worker.numcpus=8' \
87 87 > test 100000.0 abort --traceback 2>&1 | egrep '(WorkerError|Abort)'
88 88 raise error.Abort(b'known exception')
89 mercurial.error.Abort: known exception (py3 !)
89 mercurial.error.Abort: known exception
90 90 raise error.WorkerError(status)
91 mercurial.error.WorkerError: 255 (py3 !)
91 mercurial.error.WorkerError: 255
92 92
93 93 Traceback must be printed for unknown exceptions
94 94
@@ -1,96 +0,0
1 # dirstateguard.py - class to allow restoring dirstate after failure
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
7
8
9 import os
10 from .i18n import _
11
12 from . import (
13 error,
14 narrowspec,
15 requirements,
16 util,
17 )
18
19
20 class dirstateguard(util.transactional):
21 """Restore dirstate at unexpected failure.
22
23 At the construction, this class does:
24
25 - write current ``repo.dirstate`` out, and
26 - save ``.hg/dirstate`` into the backup file
27
28 This restores ``.hg/dirstate`` from backup file, if ``release()``
29 is invoked before ``close()``.
30
31 This just removes the backup file at ``close()`` before ``release()``.
32 """
33
34 def __init__(self, repo, name):
35 self._repo = repo
36 self._active = False
37 self._closed = False
38
39 def getname(prefix):
40 fd, fname = repo.vfs.mkstemp(prefix=prefix)
41 os.close(fd)
42 return fname
43
44 self._backupname = getname(b'dirstate.backup.%s.' % name)
45 repo.dirstate.savebackup(repo.currenttransaction(), self._backupname)
46 # Don't make this the empty string, things may join it with stuff and
47 # blindly try to unlink it, which could be bad.
48 self._narrowspecbackupname = None
49 if requirements.NARROW_REQUIREMENT in repo.requirements:
50 self._narrowspecbackupname = getname(
51 b'narrowspec.backup.%s.' % name
52 )
53 narrowspec.savewcbackup(repo, self._narrowspecbackupname)
54 self._active = True
55
56 def __del__(self):
57 if self._active: # still active
58 # this may occur, even if this class is used correctly:
59 # for example, releasing other resources like transaction
60 # may raise exception before ``dirstateguard.release`` in
61 # ``release(tr, ....)``.
62 self._abort()
63
64 def close(self):
65 if not self._active: # already inactivated
66 msg = (
67 _(b"can't close already inactivated backup: %s")
68 % self._backupname
69 )
70 raise error.Abort(msg)
71
72 self._repo.dirstate.clearbackup(
73 self._repo.currenttransaction(), self._backupname
74 )
75 if self._narrowspecbackupname:
76 narrowspec.clearwcbackup(self._repo, self._narrowspecbackupname)
77 self._active = False
78 self._closed = True
79
80 def _abort(self):
81 if self._narrowspecbackupname:
82 narrowspec.restorewcbackup(self._repo, self._narrowspecbackupname)
83 self._repo.dirstate.restorebackup(
84 self._repo.currenttransaction(), self._backupname
85 )
86 self._active = False
87
88 def release(self):
89 if not self._closed:
90 if not self._active: # already inactivated
91 msg = (
92 _(b"can't release already inactivated backup: %s")
93 % self._backupname
94 )
95 raise error.Abort(msg)
96 self._abort()
This diff has been collapsed as it changes many lines, (644 lines changed) Show them Hide them
@@ -1,644 +0,0
1 use std::borrow::Cow;
2 use std::convert::TryFrom;
3 use std::io::Read;
4 use std::ops::Deref;
5 use std::path::Path;
6
7 use flate2::read::ZlibDecoder;
8 use sha1::{Digest, Sha1};
9 use zstd;
10
11 use super::index::Index;
12 use super::node::{NodePrefix, NODE_BYTES_LENGTH, NULL_NODE};
13 use super::nodemap;
14 use super::nodemap::{NodeMap, NodeMapError};
15 use super::nodemap_docket::NodeMapDocket;
16 use super::patch;
17 use crate::errors::HgError;
18 use crate::revlog::Revision;
19 use crate::vfs::Vfs;
20 use crate::{Node, NULL_REVISION};
21
22 const REVISION_FLAG_CENSORED: u16 = 1 << 15;
23 const REVISION_FLAG_ELLIPSIS: u16 = 1 << 14;
24 const REVISION_FLAG_EXTSTORED: u16 = 1 << 13;
25 const REVISION_FLAG_HASCOPIESINFO: u16 = 1 << 12;
26
27 // Keep this in sync with REVIDX_KNOWN_FLAGS in
28 // mercurial/revlogutils/flagutil.py
29 const REVIDX_KNOWN_FLAGS: u16 = REVISION_FLAG_CENSORED
30 | REVISION_FLAG_ELLIPSIS
31 | REVISION_FLAG_EXTSTORED
32 | REVISION_FLAG_HASCOPIESINFO;
33
34 const NULL_REVLOG_ENTRY_FLAGS: u16 = 0;
35
36 #[derive(Debug, derive_more::From)]
37 pub enum RevlogError {
38 InvalidRevision,
39 /// Working directory is not supported
40 WDirUnsupported,
41 /// Found more than one entry whose ID match the requested prefix
42 AmbiguousPrefix,
43 #[from]
44 Other(HgError),
45 }
46
47 impl From<NodeMapError> for RevlogError {
48 fn from(error: NodeMapError) -> Self {
49 match error {
50 NodeMapError::MultipleResults => RevlogError::AmbiguousPrefix,
51 NodeMapError::RevisionNotInIndex(rev) => RevlogError::corrupted(
52 format!("nodemap point to revision {} not in index", rev),
53 ),
54 }
55 }
56 }
57
58 fn corrupted<S: AsRef<str>>(context: S) -> HgError {
59 HgError::corrupted(format!("corrupted revlog, {}", context.as_ref()))
60 }
61
62 impl RevlogError {
63 fn corrupted<S: AsRef<str>>(context: S) -> Self {
64 RevlogError::Other(corrupted(context))
65 }
66 }
67
68 /// Read only implementation of revlog.
69 pub struct Revlog {
70 /// When index and data are not interleaved: bytes of the revlog index.
71 /// When index and data are interleaved: bytes of the revlog index and
72 /// data.
73 index: Index,
74 /// When index and data are not interleaved: bytes of the revlog data
75 data_bytes: Option<Box<dyn Deref<Target = [u8]> + Send>>,
76 /// When present on disk: the persistent nodemap for this revlog
77 nodemap: Option<nodemap::NodeTree>,
78 }
79
80 impl Revlog {
81 /// Open a revlog index file.
82 ///
83 /// It will also open the associated data file if index and data are not
84 /// interleaved.
85 pub fn open(
86 store_vfs: &Vfs,
87 index_path: impl AsRef<Path>,
88 data_path: Option<&Path>,
89 use_nodemap: bool,
90 ) -> Result<Self, HgError> {
91 let index_path = index_path.as_ref();
92 let index = {
93 match store_vfs.mmap_open_opt(&index_path)? {
94 None => Index::new(Box::new(vec![])),
95 Some(index_mmap) => {
96 let index = Index::new(Box::new(index_mmap))?;
97 Ok(index)
98 }
99 }
100 }?;
101
102 let default_data_path = index_path.with_extension("d");
103
104 // type annotation required
105 // won't recognize Mmap as Deref<Target = [u8]>
106 let data_bytes: Option<Box<dyn Deref<Target = [u8]> + Send>> =
107 if index.is_inline() {
108 None
109 } else {
110 let data_path = data_path.unwrap_or(&default_data_path);
111 let data_mmap = store_vfs.mmap_open(data_path)?;
112 Some(Box::new(data_mmap))
113 };
114
115 let nodemap = if index.is_inline() {
116 None
117 } else if !use_nodemap {
118 None
119 } else {
120 NodeMapDocket::read_from_file(store_vfs, index_path)?.map(
121 |(docket, data)| {
122 nodemap::NodeTree::load_bytes(
123 Box::new(data),
124 docket.data_length,
125 )
126 },
127 )
128 };
129
130 Ok(Revlog {
131 index,
132 data_bytes,
133 nodemap,
134 })
135 }
136
137 /// Return number of entries of the `Revlog`.
138 pub fn len(&self) -> usize {
139 self.index.len()
140 }
141
142 /// Returns `true` if the `Revlog` has zero `entries`.
143 pub fn is_empty(&self) -> bool {
144 self.index.is_empty()
145 }
146
147 /// Returns the node ID for the given revision number, if it exists in this
148 /// revlog
149 pub fn node_from_rev(&self, rev: Revision) -> Option<&Node> {
150 if rev == NULL_REVISION {
151 return Some(&NULL_NODE);
152 }
153 Some(self.index.get_entry(rev)?.hash())
154 }
155
156 /// Return the revision number for the given node ID, if it exists in this
157 /// revlog
158 pub fn rev_from_node(
159 &self,
160 node: NodePrefix,
161 ) -> Result<Revision, RevlogError> {
162 if node.is_prefix_of(&NULL_NODE) {
163 return Ok(NULL_REVISION);
164 }
165
166 if let Some(nodemap) = &self.nodemap {
167 return nodemap
168 .find_bin(&self.index, node)?
169 .ok_or(RevlogError::InvalidRevision);
170 }
171
172 // Fallback to linear scan when a persistent nodemap is not present.
173 // This happens when the persistent-nodemap experimental feature is not
174 // enabled, or for small revlogs.
175 //
176 // TODO: consider building a non-persistent nodemap in memory to
177 // optimize these cases.
178 let mut found_by_prefix = None;
179 for rev in (0..self.len() as Revision).rev() {
180 let index_entry =
181 self.index.get_entry(rev).ok_or(HgError::corrupted(
182 "revlog references a revision not in the index",
183 ))?;
184 if node == *index_entry.hash() {
185 return Ok(rev);
186 }
187 if node.is_prefix_of(index_entry.hash()) {
188 if found_by_prefix.is_some() {
189 return Err(RevlogError::AmbiguousPrefix);
190 }
191 found_by_prefix = Some(rev)
192 }
193 }
194 found_by_prefix.ok_or(RevlogError::InvalidRevision)
195 }
196
197 /// Returns whether the given revision exists in this revlog.
198 pub fn has_rev(&self, rev: Revision) -> bool {
199 self.index.get_entry(rev).is_some()
200 }
201
202 /// Return the full data associated to a revision.
203 ///
204 /// All entries required to build the final data out of deltas will be
205 /// retrieved as needed, and the deltas will be applied to the inital
206 /// snapshot to rebuild the final data.
207 pub fn get_rev_data(
208 &self,
209 rev: Revision,
210 ) -> Result<Cow<[u8]>, RevlogError> {
211 if rev == NULL_REVISION {
212 return Ok(Cow::Borrowed(&[]));
213 };
214 Ok(self.get_entry(rev)?.data()?)
215 }
216
217 /// Check the hash of some given data against the recorded hash.
218 pub fn check_hash(
219 &self,
220 p1: Revision,
221 p2: Revision,
222 expected: &[u8],
223 data: &[u8],
224 ) -> bool {
225 let e1 = self.index.get_entry(p1);
226 let h1 = match e1 {
227 Some(ref entry) => entry.hash(),
228 None => &NULL_NODE,
229 };
230 let e2 = self.index.get_entry(p2);
231 let h2 = match e2 {
232 Some(ref entry) => entry.hash(),
233 None => &NULL_NODE,
234 };
235
236 &hash(data, h1.as_bytes(), h2.as_bytes()) == expected
237 }
238
239 /// Build the full data of a revision out its snapshot
240 /// and its deltas.
241 fn build_data_from_deltas(
242 snapshot: RevlogEntry,
243 deltas: &[RevlogEntry],
244 ) -> Result<Vec<u8>, HgError> {
245 let snapshot = snapshot.data_chunk()?;
246 let deltas = deltas
247 .iter()
248 .rev()
249 .map(RevlogEntry::data_chunk)
250 .collect::<Result<Vec<_>, _>>()?;
251 let patches: Vec<_> =
252 deltas.iter().map(|d| patch::PatchList::new(d)).collect();
253 let patch = patch::fold_patch_lists(&patches);
254 Ok(patch.apply(&snapshot))
255 }
256
257 /// Return the revlog data.
258 fn data(&self) -> &[u8] {
259 match self.data_bytes {
260 Some(ref data_bytes) => &data_bytes,
261 None => panic!(
262 "forgot to load the data or trying to access inline data"
263 ),
264 }
265 }
266
267 pub fn make_null_entry(&self) -> RevlogEntry {
268 RevlogEntry {
269 revlog: self,
270 rev: NULL_REVISION,
271 bytes: b"",
272 compressed_len: 0,
273 uncompressed_len: 0,
274 base_rev_or_base_of_delta_chain: None,
275 p1: NULL_REVISION,
276 p2: NULL_REVISION,
277 flags: NULL_REVLOG_ENTRY_FLAGS,
278 hash: NULL_NODE,
279 }
280 }
281
282 /// Get an entry of the revlog.
283 pub fn get_entry(
284 &self,
285 rev: Revision,
286 ) -> Result<RevlogEntry, RevlogError> {
287 if rev == NULL_REVISION {
288 return Ok(self.make_null_entry());
289 }
290 let index_entry = self
291 .index
292 .get_entry(rev)
293 .ok_or(RevlogError::InvalidRevision)?;
294 let start = index_entry.offset();
295 let end = start + index_entry.compressed_len() as usize;
296 let data = if self.index.is_inline() {
297 self.index.data(start, end)
298 } else {
299 &self.data()[start..end]
300 };
301 let entry = RevlogEntry {
302 revlog: self,
303 rev,
304 bytes: data,
305 compressed_len: index_entry.compressed_len(),
306 uncompressed_len: index_entry.uncompressed_len(),
307 base_rev_or_base_of_delta_chain: if index_entry
308 .base_revision_or_base_of_delta_chain()
309 == rev
310 {
311 None
312 } else {
313 Some(index_entry.base_revision_or_base_of_delta_chain())
314 },
315 p1: index_entry.p1(),
316 p2: index_entry.p2(),
317 flags: index_entry.flags(),
318 hash: *index_entry.hash(),
319 };
320 Ok(entry)
321 }
322
323 /// when resolving internal references within revlog, any errors
324 /// should be reported as corruption, instead of e.g. "invalid revision"
325 fn get_entry_internal(
326 &self,
327 rev: Revision,
328 ) -> Result<RevlogEntry, HgError> {
329 self.get_entry(rev)
330 .map_err(|_| corrupted(format!("revision {} out of range", rev)))
331 }
332 }
333
334 /// The revlog entry's bytes and the necessary informations to extract
335 /// the entry's data.
336 #[derive(Clone)]
337 pub struct RevlogEntry<'a> {
338 revlog: &'a Revlog,
339 rev: Revision,
340 bytes: &'a [u8],
341 compressed_len: u32,
342 uncompressed_len: i32,
343 base_rev_or_base_of_delta_chain: Option<Revision>,
344 p1: Revision,
345 p2: Revision,
346 flags: u16,
347 hash: Node,
348 }
349
350 impl<'a> RevlogEntry<'a> {
351 pub fn revision(&self) -> Revision {
352 self.rev
353 }
354
355 pub fn node(&self) -> &Node {
356 &self.hash
357 }
358
359 pub fn uncompressed_len(&self) -> Option<u32> {
360 u32::try_from(self.uncompressed_len).ok()
361 }
362
363 pub fn has_p1(&self) -> bool {
364 self.p1 != NULL_REVISION
365 }
366
367 pub fn p1_entry(&self) -> Result<Option<RevlogEntry>, RevlogError> {
368 if self.p1 == NULL_REVISION {
369 Ok(None)
370 } else {
371 Ok(Some(self.revlog.get_entry(self.p1)?))
372 }
373 }
374
375 pub fn p2_entry(&self) -> Result<Option<RevlogEntry>, RevlogError> {
376 if self.p2 == NULL_REVISION {
377 Ok(None)
378 } else {
379 Ok(Some(self.revlog.get_entry(self.p2)?))
380 }
381 }
382
383 pub fn p1(&self) -> Option<Revision> {
384 if self.p1 == NULL_REVISION {
385 None
386 } else {
387 Some(self.p1)
388 }
389 }
390
391 pub fn p2(&self) -> Option<Revision> {
392 if self.p2 == NULL_REVISION {
393 None
394 } else {
395 Some(self.p2)
396 }
397 }
398
399 pub fn is_censored(&self) -> bool {
400 (self.flags & REVISION_FLAG_CENSORED) != 0
401 }
402
403 pub fn has_length_affecting_flag_processor(&self) -> bool {
404 // Relevant Python code: revlog.size()
405 // note: ELLIPSIS is known to not change the content
406 (self.flags & (REVIDX_KNOWN_FLAGS ^ REVISION_FLAG_ELLIPSIS)) != 0
407 }
408
409 /// The data for this entry, after resolving deltas if any.
410 pub fn rawdata(&self) -> Result<Cow<'a, [u8]>, HgError> {
411 let mut entry = self.clone();
412 let mut delta_chain = vec![];
413
414 // The meaning of `base_rev_or_base_of_delta_chain` depends on
415 // generaldelta. See the doc on `ENTRY_DELTA_BASE` in
416 // `mercurial/revlogutils/constants.py` and the code in
417 // [_chaininfo] and in [index_deltachain].
418 let uses_generaldelta = self.revlog.index.uses_generaldelta();
419 while let Some(base_rev) = entry.base_rev_or_base_of_delta_chain {
420 let base_rev = if uses_generaldelta {
421 base_rev
422 } else {
423 entry.rev - 1
424 };
425 delta_chain.push(entry);
426 entry = self.revlog.get_entry_internal(base_rev)?;
427 }
428
429 let data = if delta_chain.is_empty() {
430 entry.data_chunk()?
431 } else {
432 Revlog::build_data_from_deltas(entry, &delta_chain)?.into()
433 };
434
435 Ok(data)
436 }
437
438 fn check_data(
439 &self,
440 data: Cow<'a, [u8]>,
441 ) -> Result<Cow<'a, [u8]>, HgError> {
442 if self.revlog.check_hash(
443 self.p1,
444 self.p2,
445 self.hash.as_bytes(),
446 &data,
447 ) {
448 Ok(data)
449 } else {
450 if (self.flags & REVISION_FLAG_ELLIPSIS) != 0 {
451 return Err(HgError::unsupported(
452 "ellipsis revisions are not supported by rhg",
453 ));
454 }
455 Err(corrupted(format!(
456 "hash check failed for revision {}",
457 self.rev
458 )))
459 }
460 }
461
462 pub fn data(&self) -> Result<Cow<'a, [u8]>, HgError> {
463 let data = self.rawdata()?;
464 if self.is_censored() {
465 return Err(HgError::CensoredNodeError);
466 }
467 self.check_data(data)
468 }
469
470 /// Extract the data contained in the entry.
471 /// This may be a delta. (See `is_delta`.)
472 fn data_chunk(&self) -> Result<Cow<'a, [u8]>, HgError> {
473 if self.bytes.is_empty() {
474 return Ok(Cow::Borrowed(&[]));
475 }
476 match self.bytes[0] {
477 // Revision data is the entirety of the entry, including this
478 // header.
479 b'\0' => Ok(Cow::Borrowed(self.bytes)),
480 // Raw revision data follows.
481 b'u' => Ok(Cow::Borrowed(&self.bytes[1..])),
482 // zlib (RFC 1950) data.
483 b'x' => Ok(Cow::Owned(self.uncompressed_zlib_data()?)),
484 // zstd data.
485 b'\x28' => Ok(Cow::Owned(self.uncompressed_zstd_data()?)),
486 // A proper new format should have had a repo/store requirement.
487 format_type => Err(corrupted(format!(
488 "unknown compression header '{}'",
489 format_type
490 ))),
491 }
492 }
493
494 fn uncompressed_zlib_data(&self) -> Result<Vec<u8>, HgError> {
495 let mut decoder = ZlibDecoder::new(self.bytes);
496 if self.is_delta() {
497 let mut buf = Vec::with_capacity(self.compressed_len as usize);
498 decoder
499 .read_to_end(&mut buf)
500 .map_err(|e| corrupted(e.to_string()))?;
501 Ok(buf)
502 } else {
503 let cap = self.uncompressed_len.max(0) as usize;
504 let mut buf = vec![0; cap];
505 decoder
506 .read_exact(&mut buf)
507 .map_err(|e| corrupted(e.to_string()))?;
508 Ok(buf)
509 }
510 }
511
512 fn uncompressed_zstd_data(&self) -> Result<Vec<u8>, HgError> {
513 if self.is_delta() {
514 let mut buf = Vec::with_capacity(self.compressed_len as usize);
515 zstd::stream::copy_decode(self.bytes, &mut buf)
516 .map_err(|e| corrupted(e.to_string()))?;
517 Ok(buf)
518 } else {
519 let cap = self.uncompressed_len.max(0) as usize;
520 let mut buf = vec![0; cap];
521 let len = zstd::block::decompress_to_buffer(self.bytes, &mut buf)
522 .map_err(|e| corrupted(e.to_string()))?;
523 if len != self.uncompressed_len as usize {
524 Err(corrupted("uncompressed length does not match"))
525 } else {
526 Ok(buf)
527 }
528 }
529 }
530
531 /// Tell if the entry is a snapshot or a delta
532 /// (influences on decompression).
533 fn is_delta(&self) -> bool {
534 self.base_rev_or_base_of_delta_chain.is_some()
535 }
536 }
537
538 /// Calculate the hash of a revision given its data and its parents.
539 fn hash(
540 data: &[u8],
541 p1_hash: &[u8],
542 p2_hash: &[u8],
543 ) -> [u8; NODE_BYTES_LENGTH] {
544 let mut hasher = Sha1::new();
545 let (a, b) = (p1_hash, p2_hash);
546 if a > b {
547 hasher.update(b);
548 hasher.update(a);
549 } else {
550 hasher.update(a);
551 hasher.update(b);
552 }
553 hasher.update(data);
554 *hasher.finalize().as_ref()
555 }
556
557 #[cfg(test)]
558 mod tests {
559 use super::*;
560 use crate::index::{IndexEntryBuilder, INDEX_ENTRY_SIZE};
561 use itertools::Itertools;
562
563 #[test]
564 fn test_empty() {
565 let temp = tempfile::tempdir().unwrap();
566 let vfs = Vfs { base: temp.path() };
567 std::fs::write(temp.path().join("foo.i"), b"").unwrap();
568 let revlog = Revlog::open(&vfs, "foo.i", None, false).unwrap();
569 assert!(revlog.is_empty());
570 assert_eq!(revlog.len(), 0);
571 assert!(revlog.get_entry(0).is_err());
572 assert!(!revlog.has_rev(0));
573 }
574
575 #[test]
576 fn test_inline() {
577 let temp = tempfile::tempdir().unwrap();
578 let vfs = Vfs { base: temp.path() };
579 let node0 = Node::from_hex("2ed2a3912a0b24502043eae84ee4b279c18b90dd")
580 .unwrap();
581 let node1 = Node::from_hex("b004912a8510032a0350a74daa2803dadfb00e12")
582 .unwrap();
583 let node2 = Node::from_hex("dd6ad206e907be60927b5a3117b97dffb2590582")
584 .unwrap();
585 let entry0_bytes = IndexEntryBuilder::new()
586 .is_first(true)
587 .with_version(1)
588 .with_inline(true)
589 .with_offset(INDEX_ENTRY_SIZE)
590 .with_node(node0)
591 .build();
592 let entry1_bytes = IndexEntryBuilder::new()
593 .with_offset(INDEX_ENTRY_SIZE)
594 .with_node(node1)
595 .build();
596 let entry2_bytes = IndexEntryBuilder::new()
597 .with_offset(INDEX_ENTRY_SIZE)
598 .with_p1(0)
599 .with_p2(1)
600 .with_node(node2)
601 .build();
602 let contents = vec![entry0_bytes, entry1_bytes, entry2_bytes]
603 .into_iter()
604 .flatten()
605 .collect_vec();
606 std::fs::write(temp.path().join("foo.i"), contents).unwrap();
607 let revlog = Revlog::open(&vfs, "foo.i", None, false).unwrap();
608
609 let entry0 = revlog.get_entry(0).ok().unwrap();
610 assert_eq!(entry0.revision(), 0);
611 assert_eq!(*entry0.node(), node0);
612 assert!(!entry0.has_p1());
613 assert_eq!(entry0.p1(), None);
614 assert_eq!(entry0.p2(), None);
615 let p1_entry = entry0.p1_entry().unwrap();
616 assert!(p1_entry.is_none());
617 let p2_entry = entry0.p2_entry().unwrap();
618 assert!(p2_entry.is_none());
619
620 let entry1 = revlog.get_entry(1).ok().unwrap();
621 assert_eq!(entry1.revision(), 1);
622 assert_eq!(*entry1.node(), node1);
623 assert!(!entry1.has_p1());
624 assert_eq!(entry1.p1(), None);
625 assert_eq!(entry1.p2(), None);
626 let p1_entry = entry1.p1_entry().unwrap();
627 assert!(p1_entry.is_none());
628 let p2_entry = entry1.p2_entry().unwrap();
629 assert!(p2_entry.is_none());
630
631 let entry2 = revlog.get_entry(2).ok().unwrap();
632 assert_eq!(entry2.revision(), 2);
633 assert_eq!(*entry2.node(), node2);
634 assert!(entry2.has_p1());
635 assert_eq!(entry2.p1(), Some(0));
636 assert_eq!(entry2.p2(), Some(1));
637 let p1_entry = entry2.p1_entry().unwrap();
638 assert!(p1_entry.is_some());
639 assert_eq!(p1_entry.unwrap().revision(), 0);
640 let p2_entry = entry2.p2_entry().unwrap();
641 assert!(p2_entry.is_some());
642 assert_eq!(p2_entry.unwrap().revision(), 1);
643 }
644 }
General Comments 0
You need to be logged in to leave comments. Login now