Show More
The requested changes are too big and content was truncated. Show full diff
@@ -0,0 +1,87 b'' | |||||
|
1 | # Copyright Mercurial Contributors | |||
|
2 | # | |||
|
3 | # This software may be used and distributed according to the terms of the | |||
|
4 | # GNU General Public License version 2 or any later version. | |||
|
5 | ||||
|
6 | from __future__ import absolute_import | |||
|
7 | ||||
|
8 | import functools | |||
|
9 | import stat | |||
|
10 | ||||
|
11 | ||||
|
12 | rangemask = 0x7FFFFFFF | |||
|
13 | ||||
|
14 | ||||
|
15 | @functools.total_ordering | |||
|
16 | class timestamp(tuple): | |||
|
17 | """ | |||
|
18 | A Unix timestamp with optional nanoseconds precision, | |||
|
19 | modulo 2**31 seconds. | |||
|
20 | ||||
|
21 | A 2-tuple containing: | |||
|
22 | ||||
|
23 | `truncated_seconds`: seconds since the Unix epoch, | |||
|
24 | truncated to its lower 31 bits | |||
|
25 | ||||
|
26 | `subsecond_nanoseconds`: number of nanoseconds since `truncated_seconds`. | |||
|
27 | When this is zero, the sub-second precision is considered unknown. | |||
|
28 | """ | |||
|
29 | ||||
|
30 | def __new__(cls, value): | |||
|
31 | truncated_seconds, subsec_nanos = value | |||
|
32 | value = (truncated_seconds & rangemask, subsec_nanos) | |||
|
33 | return super(timestamp, cls).__new__(cls, value) | |||
|
34 | ||||
|
35 | def __eq__(self, other): | |||
|
36 | self_secs, self_subsec_nanos = self | |||
|
37 | other_secs, other_subsec_nanos = other | |||
|
38 | return self_secs == other_secs and ( | |||
|
39 | self_subsec_nanos == other_subsec_nanos | |||
|
40 | or self_subsec_nanos == 0 | |||
|
41 | or other_subsec_nanos == 0 | |||
|
42 | ) | |||
|
43 | ||||
|
44 | def __gt__(self, other): | |||
|
45 | self_secs, self_subsec_nanos = self | |||
|
46 | other_secs, other_subsec_nanos = other | |||
|
47 | if self_secs > other_secs: | |||
|
48 | return True | |||
|
49 | if self_secs < other_secs: | |||
|
50 | return False | |||
|
51 | if self_subsec_nanos == 0 or other_subsec_nanos == 0: | |||
|
52 | # they are considered equal, so not "greater than" | |||
|
53 | return False | |||
|
54 | return self_subsec_nanos > other_subsec_nanos | |||
|
55 | ||||
|
56 | ||||
|
57 | def zero(): | |||
|
58 | """ | |||
|
59 | Returns the `timestamp` at the Unix epoch. | |||
|
60 | """ | |||
|
61 | return tuple.__new__(timestamp, (0, 0)) | |||
|
62 | ||||
|
63 | ||||
|
64 | def mtime_of(stat_result): | |||
|
65 | """ | |||
|
66 | Takes an `os.stat_result`-like object and returns a `timestamp` object | |||
|
67 | for its modification time. | |||
|
68 | """ | |||
|
69 | try: | |||
|
70 | # TODO: add this attribute to `osutil.stat` objects, | |||
|
71 | # see `mercurial/cext/osutil.c`. | |||
|
72 | # | |||
|
73 | # This attribute is also not available on Python 2. | |||
|
74 | nanos = stat_result.st_mtime_ns | |||
|
75 | except AttributeError: | |||
|
76 | # https://docs.python.org/2/library/os.html#os.stat_float_times | |||
|
77 | # "For compatibility with older Python versions, | |||
|
78 | # accessing stat_result as a tuple always returns integers." | |||
|
79 | secs = stat_result[stat.ST_MTIME] | |||
|
80 | ||||
|
81 | subsec_nanos = 0 | |||
|
82 | else: | |||
|
83 | billion = int(1e9) | |||
|
84 | secs = nanos // billion | |||
|
85 | subsec_nanos = nanos % billion | |||
|
86 | ||||
|
87 | return timestamp((secs, subsec_nanos)) |
@@ -0,0 +1,414 b'' | |||||
|
1 | # v2.py - Pure-Python implementation of the dirstate-v2 file format | |||
|
2 | # | |||
|
3 | # Copyright Mercurial Contributors | |||
|
4 | # | |||
|
5 | # This software may be used and distributed according to the terms of the | |||
|
6 | # GNU General Public License version 2 or any later version. | |||
|
7 | ||||
|
8 | from __future__ import absolute_import | |||
|
9 | ||||
|
10 | import struct | |||
|
11 | ||||
|
12 | from ..thirdparty import attr | |||
|
13 | from .. import error, policy | |||
|
14 | ||||
|
15 | parsers = policy.importmod('parsers') | |||
|
16 | ||||
|
17 | ||||
|
18 | # Must match the constant of the same name in | |||
|
19 | # `rust/hg-core/src/dirstate_tree/on_disk.rs` | |||
|
20 | TREE_METADATA_SIZE = 44 | |||
|
21 | NODE_SIZE = 44 | |||
|
22 | ||||
|
23 | ||||
|
24 | # Must match the `TreeMetadata` Rust struct in | |||
|
25 | # `rust/hg-core/src/dirstate_tree/on_disk.rs`. See doc-comments there. | |||
|
26 | # | |||
|
27 | # * 4 bytes: start offset of root nodes | |||
|
28 | # * 4 bytes: number of root nodes | |||
|
29 | # * 4 bytes: total number of nodes in the tree that have an entry | |||
|
30 | # * 4 bytes: total number of nodes in the tree that have a copy source | |||
|
31 | # * 4 bytes: number of bytes in the data file that are not used anymore | |||
|
32 | # * 4 bytes: unused | |||
|
33 | # * 20 bytes: SHA-1 hash of ignore patterns | |||
|
34 | TREE_METADATA = struct.Struct('>LLLLL4s20s') | |||
|
35 | ||||
|
36 | ||||
|
37 | # Must match the `Node` Rust struct in | |||
|
38 | # `rust/hg-core/src/dirstate_tree/on_disk.rs`. See doc-comments there. | |||
|
39 | # | |||
|
40 | # * 4 bytes: start offset of full path | |||
|
41 | # * 2 bytes: length of the full path | |||
|
42 | # * 2 bytes: length within the full path before its "base name" | |||
|
43 | # * 4 bytes: start offset of the copy source if any, or zero for no copy source | |||
|
44 | # * 2 bytes: length of the copy source if any, or unused | |||
|
45 | # * 4 bytes: start offset of child nodes | |||
|
46 | # * 4 bytes: number of child nodes | |||
|
47 | # * 4 bytes: number of descendant nodes that have an entry | |||
|
48 | # * 4 bytes: number of descendant nodes that have a "tracked" state | |||
|
49 | # * 1 byte: flags | |||
|
50 | # * 4 bytes: expected size | |||
|
51 | # * 4 bytes: mtime seconds | |||
|
52 | # * 4 bytes: mtime nanoseconds | |||
|
53 | NODE = struct.Struct('>LHHLHLLLLHlll') | |||
|
54 | ||||
|
55 | ||||
|
56 | assert TREE_METADATA_SIZE == TREE_METADATA.size | |||
|
57 | assert NODE_SIZE == NODE.size | |||
|
58 | ||||
|
59 | # match constant in mercurial/pure/parsers.py | |||
|
60 | DIRSTATE_V2_DIRECTORY = 1 << 5 | |||
|
61 | ||||
|
62 | ||||
|
63 | def parse_dirstate(map, copy_map, data, tree_metadata): | |||
|
64 | """parse a full v2-dirstate from a binary data into dictionnaries: | |||
|
65 | ||||
|
66 | - map: a {path: entry} mapping that will be filled | |||
|
67 | - copy_map: a {path: copy-source} mapping that will be filled | |||
|
68 | - data: a binary blob contains v2 nodes data | |||
|
69 | - tree_metadata:: a binary blob of the top level node (from the docket) | |||
|
70 | """ | |||
|
71 | ( | |||
|
72 | root_nodes_start, | |||
|
73 | root_nodes_len, | |||
|
74 | _nodes_with_entry_count, | |||
|
75 | _nodes_with_copy_source_count, | |||
|
76 | _unreachable_bytes, | |||
|
77 | _unused, | |||
|
78 | _ignore_patterns_hash, | |||
|
79 | ) = TREE_METADATA.unpack(tree_metadata) | |||
|
80 | parse_nodes(map, copy_map, data, root_nodes_start, root_nodes_len) | |||
|
81 | ||||
|
82 | ||||
|
83 | def parse_nodes(map, copy_map, data, start, len): | |||
|
84 | """parse <len> nodes from <data> starting at offset <start> | |||
|
85 | ||||
|
86 | This is used by parse_dirstate to recursively fill `map` and `copy_map`. | |||
|
87 | ||||
|
88 | All directory specific information is ignored and do not need any | |||
|
89 | processing (DIRECTORY, ALL_UNKNOWN_RECORDED, ALL_IGNORED_RECORDED) | |||
|
90 | """ | |||
|
91 | for i in range(len): | |||
|
92 | node_start = start + NODE_SIZE * i | |||
|
93 | node_bytes = slice_with_len(data, node_start, NODE_SIZE) | |||
|
94 | ( | |||
|
95 | path_start, | |||
|
96 | path_len, | |||
|
97 | _basename_start, | |||
|
98 | copy_source_start, | |||
|
99 | copy_source_len, | |||
|
100 | children_start, | |||
|
101 | children_count, | |||
|
102 | _descendants_with_entry_count, | |||
|
103 | _tracked_descendants_count, | |||
|
104 | flags, | |||
|
105 | size, | |||
|
106 | mtime_s, | |||
|
107 | mtime_ns, | |||
|
108 | ) = NODE.unpack(node_bytes) | |||
|
109 | ||||
|
110 | # Parse child nodes of this node recursively | |||
|
111 | parse_nodes(map, copy_map, data, children_start, children_count) | |||
|
112 | ||||
|
113 | item = parsers.DirstateItem.from_v2_data(flags, size, mtime_s, mtime_ns) | |||
|
114 | if not item.any_tracked: | |||
|
115 | continue | |||
|
116 | path = slice_with_len(data, path_start, path_len) | |||
|
117 | map[path] = item | |||
|
118 | if copy_source_start: | |||
|
119 | copy_map[path] = slice_with_len( | |||
|
120 | data, copy_source_start, copy_source_len | |||
|
121 | ) | |||
|
122 | ||||
|
123 | ||||
|
124 | def slice_with_len(data, start, len): | |||
|
125 | return data[start : start + len] | |||
|
126 | ||||
|
127 | ||||
|
128 | @attr.s | |||
|
129 | class Node(object): | |||
|
130 | path = attr.ib() | |||
|
131 | entry = attr.ib() | |||
|
132 | parent = attr.ib(default=None) | |||
|
133 | children_count = attr.ib(default=0) | |||
|
134 | children_offset = attr.ib(default=0) | |||
|
135 | descendants_with_entry = attr.ib(default=0) | |||
|
136 | tracked_descendants = attr.ib(default=0) | |||
|
137 | ||||
|
138 | def pack(self, copy_map, paths_offset): | |||
|
139 | path = self.path | |||
|
140 | copy = copy_map.get(path) | |||
|
141 | entry = self.entry | |||
|
142 | ||||
|
143 | path_start = paths_offset | |||
|
144 | path_len = len(path) | |||
|
145 | basename_start = path.rfind(b'/') + 1 # 0 if rfind returns -1 | |||
|
146 | if copy is not None: | |||
|
147 | copy_source_start = paths_offset + len(path) | |||
|
148 | copy_source_len = len(copy) | |||
|
149 | else: | |||
|
150 | copy_source_start = 0 | |||
|
151 | copy_source_len = 0 | |||
|
152 | if entry is not None: | |||
|
153 | flags, size, mtime_s, mtime_ns = entry.v2_data() | |||
|
154 | else: | |||
|
155 | # There are no mtime-cached directories in the Python implementation | |||
|
156 | flags = DIRSTATE_V2_DIRECTORY | |||
|
157 | size = 0 | |||
|
158 | mtime_s = 0 | |||
|
159 | mtime_ns = 0 | |||
|
160 | return NODE.pack( | |||
|
161 | path_start, | |||
|
162 | path_len, | |||
|
163 | basename_start, | |||
|
164 | copy_source_start, | |||
|
165 | copy_source_len, | |||
|
166 | self.children_offset, | |||
|
167 | self.children_count, | |||
|
168 | self.descendants_with_entry, | |||
|
169 | self.tracked_descendants, | |||
|
170 | flags, | |||
|
171 | size, | |||
|
172 | mtime_s, | |||
|
173 | mtime_ns, | |||
|
174 | ) | |||
|
175 | ||||
|
176 | ||||
|
177 | def pack_dirstate(map, copy_map, now): | |||
|
178 | """ | |||
|
179 | Pack `map` and `copy_map` into the dirstate v2 binary format and return | |||
|
180 | the bytearray. | |||
|
181 | `now` is a timestamp of the current filesystem time used to detect race | |||
|
182 | conditions in writing the dirstate to disk, see inline comment. | |||
|
183 | ||||
|
184 | The on-disk format expects a tree-like structure where the leaves are | |||
|
185 | written first (and sorted per-directory), going up levels until the root | |||
|
186 | node and writing that one to the docket. See more details on the on-disk | |||
|
187 | format in `mercurial/helptext/internals/dirstate-v2`. | |||
|
188 | ||||
|
189 | Since both `map` and `copy_map` are flat dicts we need to figure out the | |||
|
190 | hierarchy. This algorithm does so without having to build the entire tree | |||
|
191 | in-memory: it only keeps the minimum number of nodes around to satisfy the | |||
|
192 | format. | |||
|
193 | ||||
|
194 | # Algorithm explanation | |||
|
195 | ||||
|
196 | This explanation does not talk about the different counters for tracked | |||
|
197 | descendents and storing the copies, but that work is pretty simple once this | |||
|
198 | algorithm is in place. | |||
|
199 | ||||
|
200 | ## Building a subtree | |||
|
201 | ||||
|
202 | First, sort `map`: this makes it so the leaves of the tree are contiguous | |||
|
203 | per directory (i.e. a/b/c and a/b/d will be next to each other in the list), | |||
|
204 | and enables us to use the ordering of folders to have a "cursor" of the | |||
|
205 | current folder we're in without ever going twice in the same branch of the | |||
|
206 | tree. The cursor is a node that remembers its parent and any information | |||
|
207 | relevant to the format (see the `Node` class), building the relevant part | |||
|
208 | of the tree lazily. | |||
|
209 | Then, for each file in `map`, move the cursor into the tree to the | |||
|
210 | corresponding folder of the file: for example, if the very first file | |||
|
211 | is "a/b/c", we start from `Node[""]`, create `Node["a"]` which points to | |||
|
212 | its parent `Node[""]`, then create `Node["a/b"]`, which points to its parent | |||
|
213 | `Node["a"]`. These nodes are kept around in a stack. | |||
|
214 | If the next file in `map` is in the same subtree ("a/b/d" or "a/b/e/f"), we | |||
|
215 | add it to the stack and keep looping with the same logic of creating the | |||
|
216 | tree nodes as needed. If however the next file in `map` is *not* in the same | |||
|
217 | subtree ("a/other", if we're still in the "a/b" folder), then we know that | |||
|
218 | the subtree we're in is complete. | |||
|
219 | ||||
|
220 | ## Writing the subtree | |||
|
221 | ||||
|
222 | We have the entire subtree in the stack, so we start writing it to disk | |||
|
223 | folder by folder. The way we write a folder is to pop the stack into a list | |||
|
224 | until the folder changes, revert this list of direct children (to satisfy | |||
|
225 | the format requirement that children be sorted). This process repeats until | |||
|
226 | we hit the "other" subtree. | |||
|
227 | ||||
|
228 | An example: | |||
|
229 | a | |||
|
230 | dir1/b | |||
|
231 | dir1/c | |||
|
232 | dir2/dir3/d | |||
|
233 | dir2/dir3/e | |||
|
234 | dir2/f | |||
|
235 | ||||
|
236 | Would have us: | |||
|
237 | - add to the stack until "dir2/dir3/e" | |||
|
238 | - realize that "dir2/f" is in a different subtree | |||
|
239 | - pop "dir2/dir3/e", "dir2/dir3/d", reverse them so they're sorted and | |||
|
240 | pack them since the next entry is "dir2/dir3" | |||
|
241 | - go back up to "dir2" | |||
|
242 | - add "dir2/f" to the stack | |||
|
243 | - realize we're done with the map | |||
|
244 | - pop "dir2/f", "dir2/dir3" from the stack, reverse and pack them | |||
|
245 | - go up to the root node, do the same to write "a", "dir1" and "dir2" in | |||
|
246 | that order | |||
|
247 | ||||
|
248 | ## Special case for the root node | |||
|
249 | ||||
|
250 | The root node is not serialized in the format, but its information is | |||
|
251 | written to the docket. Again, see more details on the on-disk format in | |||
|
252 | `mercurial/helptext/internals/dirstate-v2`. | |||
|
253 | """ | |||
|
254 | data = bytearray() | |||
|
255 | root_nodes_start = 0 | |||
|
256 | root_nodes_len = 0 | |||
|
257 | nodes_with_entry_count = 0 | |||
|
258 | nodes_with_copy_source_count = 0 | |||
|
259 | # Will always be 0 since this implementation always re-writes everything | |||
|
260 | # to disk | |||
|
261 | unreachable_bytes = 0 | |||
|
262 | unused = b'\x00' * 4 | |||
|
263 | # This is an optimization that's only useful for the Rust implementation | |||
|
264 | ignore_patterns_hash = b'\x00' * 20 | |||
|
265 | ||||
|
266 | if len(map) == 0: | |||
|
267 | tree_metadata = TREE_METADATA.pack( | |||
|
268 | root_nodes_start, | |||
|
269 | root_nodes_len, | |||
|
270 | nodes_with_entry_count, | |||
|
271 | nodes_with_copy_source_count, | |||
|
272 | unreachable_bytes, | |||
|
273 | unused, | |||
|
274 | ignore_patterns_hash, | |||
|
275 | ) | |||
|
276 | return data, tree_metadata | |||
|
277 | ||||
|
278 | sorted_map = sorted(map.items(), key=lambda x: x[0]) | |||
|
279 | ||||
|
280 | # Use a stack to not have to only remember the nodes we currently need | |||
|
281 | # instead of building the entire tree in memory | |||
|
282 | stack = [] | |||
|
283 | current_node = Node(b"", None) | |||
|
284 | stack.append(current_node) | |||
|
285 | ||||
|
286 | for index, (path, entry) in enumerate(sorted_map, 1): | |||
|
287 | if entry.need_delay(now): | |||
|
288 | # The file was last modified "simultaneously" with the current | |||
|
289 | # write to dirstate (i.e. within the same second for file- | |||
|
290 | # systems with a granularity of 1 sec). This commonly happens | |||
|
291 | # for at least a couple of files on 'update'. | |||
|
292 | # The user could change the file without changing its size | |||
|
293 | # within the same second. Invalidate the file's mtime in | |||
|
294 | # dirstate, forcing future 'status' calls to compare the | |||
|
295 | # contents of the file if the size is the same. This prevents | |||
|
296 | # mistakenly treating such files as clean. | |||
|
297 | entry.set_possibly_dirty() | |||
|
298 | nodes_with_entry_count += 1 | |||
|
299 | if path in copy_map: | |||
|
300 | nodes_with_copy_source_count += 1 | |||
|
301 | current_folder = get_folder(path) | |||
|
302 | current_node = move_to_correct_node_in_tree( | |||
|
303 | current_folder, current_node, stack | |||
|
304 | ) | |||
|
305 | ||||
|
306 | current_node.children_count += 1 | |||
|
307 | # Entries from `map` are never `None` | |||
|
308 | if entry.tracked: | |||
|
309 | current_node.tracked_descendants += 1 | |||
|
310 | current_node.descendants_with_entry += 1 | |||
|
311 | stack.append(Node(path, entry, current_node)) | |||
|
312 | ||||
|
313 | should_pack = True | |||
|
314 | next_path = None | |||
|
315 | if index < len(sorted_map): | |||
|
316 | # Determine if the next entry is in the same sub-tree, if so don't | |||
|
317 | # pack yet | |||
|
318 | next_path = sorted_map[index][0] | |||
|
319 | should_pack = not get_folder(next_path).startswith(current_folder) | |||
|
320 | if should_pack: | |||
|
321 | pack_directory_children(current_node, copy_map, data, stack) | |||
|
322 | while stack and current_node.path != b"": | |||
|
323 | # Go up the tree and write until we reach the folder of the next | |||
|
324 | # entry (if any, otherwise the root) | |||
|
325 | parent = current_node.parent | |||
|
326 | in_parent_folder_of_next_entry = next_path is not None and ( | |||
|
327 | get_folder(next_path).startswith(get_folder(stack[-1].path)) | |||
|
328 | ) | |||
|
329 | if parent is None or in_parent_folder_of_next_entry: | |||
|
330 | break | |||
|
331 | pack_directory_children(parent, copy_map, data, stack) | |||
|
332 | current_node = parent | |||
|
333 | ||||
|
334 | # Special case for the root node since we don't write it to disk, only its | |||
|
335 | # children to the docket | |||
|
336 | current_node = stack.pop() | |||
|
337 | assert current_node.path == b"", current_node.path | |||
|
338 | assert len(stack) == 0, len(stack) | |||
|
339 | ||||
|
340 | tree_metadata = TREE_METADATA.pack( | |||
|
341 | current_node.children_offset, | |||
|
342 | current_node.children_count, | |||
|
343 | nodes_with_entry_count, | |||
|
344 | nodes_with_copy_source_count, | |||
|
345 | unreachable_bytes, | |||
|
346 | unused, | |||
|
347 | ignore_patterns_hash, | |||
|
348 | ) | |||
|
349 | ||||
|
350 | return data, tree_metadata | |||
|
351 | ||||
|
352 | ||||
|
353 | def get_folder(path): | |||
|
354 | """ | |||
|
355 | Return the folder of the path that's given, an empty string for root paths. | |||
|
356 | """ | |||
|
357 | return path.rsplit(b'/', 1)[0] if b'/' in path else b'' | |||
|
358 | ||||
|
359 | ||||
|
360 | def move_to_correct_node_in_tree(target_folder, current_node, stack): | |||
|
361 | """ | |||
|
362 | Move inside the dirstate node tree to the node corresponding to | |||
|
363 | `target_folder`, creating the missing nodes along the way if needed. | |||
|
364 | """ | |||
|
365 | while target_folder != current_node.path: | |||
|
366 | if target_folder.startswith(current_node.path): | |||
|
367 | # We need to go down a folder | |||
|
368 | prefix = target_folder[len(current_node.path) :].lstrip(b'/') | |||
|
369 | subfolder_name = prefix.split(b'/', 1)[0] | |||
|
370 | if current_node.path: | |||
|
371 | subfolder_path = current_node.path + b'/' + subfolder_name | |||
|
372 | else: | |||
|
373 | subfolder_path = subfolder_name | |||
|
374 | next_node = stack[-1] | |||
|
375 | if next_node.path == target_folder: | |||
|
376 | # This folder is now a file and only contains removed entries | |||
|
377 | # merge with the last node | |||
|
378 | current_node = next_node | |||
|
379 | else: | |||
|
380 | current_node.children_count += 1 | |||
|
381 | current_node = Node(subfolder_path, None, current_node) | |||
|
382 | stack.append(current_node) | |||
|
383 | else: | |||
|
384 | # We need to go up a folder | |||
|
385 | current_node = current_node.parent | |||
|
386 | return current_node | |||
|
387 | ||||
|
388 | ||||
|
389 | def pack_directory_children(node, copy_map, data, stack): | |||
|
390 | """ | |||
|
391 | Write the binary representation of the direct sorted children of `node` to | |||
|
392 | `data` | |||
|
393 | """ | |||
|
394 | direct_children = [] | |||
|
395 | ||||
|
396 | while stack[-1].path != b"" and get_folder(stack[-1].path) == node.path: | |||
|
397 | direct_children.append(stack.pop()) | |||
|
398 | if not direct_children: | |||
|
399 | raise error.ProgrammingError(b"no direct children for %r" % node.path) | |||
|
400 | ||||
|
401 | # Reverse the stack to get the correct sorted order | |||
|
402 | direct_children.reverse() | |||
|
403 | packed_children = bytearray() | |||
|
404 | # Write the paths to `data`. Pack child nodes but don't write them yet | |||
|
405 | for child in direct_children: | |||
|
406 | packed = child.pack(copy_map=copy_map, paths_offset=len(data)) | |||
|
407 | packed_children.extend(packed) | |||
|
408 | data.extend(child.path) | |||
|
409 | data.extend(copy_map.get(child.path, b"")) | |||
|
410 | node.tracked_descendants += child.tracked_descendants | |||
|
411 | node.descendants_with_entry += child.descendants_with_entry | |||
|
412 | # Write the fixed-size child nodes all together | |||
|
413 | node.children_offset = len(data) | |||
|
414 | data.extend(packed_children) |
This diff has been collapsed as it changes many lines, (616 lines changed) Show them Hide them | |||||
@@ -0,0 +1,616 b'' | |||||
|
1 | The *dirstate* is what Mercurial uses internally to track | |||
|
2 | the state of files in the working directory, | |||
|
3 | such as set by commands like `hg add` and `hg rm`. | |||
|
4 | It also contains some cached data that help make `hg status` faster. | |||
|
5 | The name refers both to `.hg/dirstate` on the filesystem | |||
|
6 | and the corresponding data structure in memory while a Mercurial process | |||
|
7 | is running. | |||
|
8 | ||||
|
9 | The original file format, retroactively dubbed `dirstate-v1`, | |||
|
10 | is described at https://www.mercurial-scm.org/wiki/DirState. | |||
|
11 | It is made of a flat sequence of unordered variable-size entries, | |||
|
12 | so accessing any information in it requires parsing all of it. | |||
|
13 | Similarly, saving changes requires rewriting the entire file. | |||
|
14 | ||||
|
15 | The newer `dirsate-v2` file format is designed to fix these limitations | |||
|
16 | and make `hg status` faster. | |||
|
17 | ||||
|
18 | User guide | |||
|
19 | ========== | |||
|
20 | ||||
|
21 | Compatibility | |||
|
22 | ------------- | |||
|
23 | ||||
|
24 | The file format is experimental and may still change. | |||
|
25 | Different versions of Mercurial may not be compatible with each other | |||
|
26 | when working on a local repository that uses this format. | |||
|
27 | When using an incompatible version with the experimental format, | |||
|
28 | anything can happen including data corruption. | |||
|
29 | ||||
|
30 | Since the dirstate is entirely local and not relevant to the wire protocol, | |||
|
31 | `dirstate-v2` does not affect compatibility with remote Mercurial versions. | |||
|
32 | ||||
|
33 | When `share-safe` is enabled, different repositories sharing the same store | |||
|
34 | can use different dirstate formats. | |||
|
35 | ||||
|
36 | Enabling `dirsate-v2` for new local repositories | |||
|
37 | ------------------------------------------------ | |||
|
38 | ||||
|
39 | When creating a new local repository such as with `hg init` or `hg clone`, | |||
|
40 | the `exp-dirstate-v2` boolean in the `format` configuration section | |||
|
41 | controls whether to use this file format. | |||
|
42 | This is disabled by default as of this writing. | |||
|
43 | To enable it for a single repository, run for example:: | |||
|
44 | ||||
|
45 | $ hg init my-project --config format.exp-dirstate-v2=1 | |||
|
46 | ||||
|
47 | Checking the format of an existing local repsitory | |||
|
48 | -------------------------------------------------- | |||
|
49 | ||||
|
50 | The `debugformat` commands prints information about | |||
|
51 | which of multiple optional formats are used in the current repository, | |||
|
52 | including `dirstate-v2`:: | |||
|
53 | ||||
|
54 | $ hg debugformat | |||
|
55 | format-variant repo | |||
|
56 | fncache: yes | |||
|
57 | dirstate-v2: yes | |||
|
58 | […] | |||
|
59 | ||||
|
60 | Upgrading or downgrading an existing local repository | |||
|
61 | ----------------------------------------------------- | |||
|
62 | ||||
|
63 | The `debugupgrade` command does various upgrades or downgrades | |||
|
64 | on a local repository | |||
|
65 | based on the current Mercurial version and on configuration. | |||
|
66 | The same `format.exp-dirstate-v2` configuration is used again. | |||
|
67 | ||||
|
68 | Example to upgrade:: | |||
|
69 | ||||
|
70 | $ hg debugupgrade --config format.exp-dirstate-v2=1 | |||
|
71 | ||||
|
72 | Example to downgrade to `dirstate-v1`:: | |||
|
73 | ||||
|
74 | $ hg debugupgrade --config format.exp-dirstate-v2=0 | |||
|
75 | ||||
|
76 | Both of this commands do nothing but print a list of proposed changes, | |||
|
77 | which may include changes unrelated to the dirstate. | |||
|
78 | Those other changes are controlled by their own configuration keys. | |||
|
79 | Add `--run` to a command to actually apply the proposed changes. | |||
|
80 | ||||
|
81 | Backups of `.hg/requires` and `.hg/dirstate` are created | |||
|
82 | in a `.hg/upgradebackup.*` directory. | |||
|
83 | If something goes wrong, restoring those files should undo the change. | |||
|
84 | ||||
|
85 | Note that upgrading affects compatibility with older versions of Mercurial | |||
|
86 | as noted above. | |||
|
87 | This can be relevant when a repository’s files are on a USB drive | |||
|
88 | or some other removable media, or shared over the network, etc. | |||
|
89 | ||||
|
90 | Internal filesystem representation | |||
|
91 | ================================== | |||
|
92 | ||||
|
93 | Requirements file | |||
|
94 | ----------------- | |||
|
95 | ||||
|
96 | The `.hg/requires` file indicates which of various optional file formats | |||
|
97 | are used by a given repository. | |||
|
98 | Mercurial aborts when seeing a requirement it does not know about, | |||
|
99 | which avoids older version accidentally messing up a respository | |||
|
100 | that uses a format that was introduced later. | |||
|
101 | For versions that do support a format, the presence or absence of | |||
|
102 | the corresponding requirement indicates whether to use that format. | |||
|
103 | ||||
|
104 | When the file contains a `exp-dirstate-v2` line, | |||
|
105 | the `dirstate-v2` format is used. | |||
|
106 | With no such line `dirstate-v1` is used. | |||
|
107 | ||||
|
108 | High level description | |||
|
109 | ---------------------- | |||
|
110 | ||||
|
111 | Whereas `dirstate-v1` uses a single `.hg/disrtate` file, | |||
|
112 | in `dirstate-v2` that file is a "docket" file | |||
|
113 | that only contains some metadata | |||
|
114 | and points to separate data file named `.hg/dirstate.{ID}`, | |||
|
115 | where `{ID}` is a random identifier. | |||
|
116 | ||||
|
117 | This separation allows making data files append-only | |||
|
118 | and therefore safer to memory-map. | |||
|
119 | Creating a new data file (occasionally to clean up unused data) | |||
|
120 | can be done with a different ID | |||
|
121 | without disrupting another Mercurial process | |||
|
122 | that could still be using the previous data file. | |||
|
123 | ||||
|
124 | Both files have a format designed to reduce the need for parsing, | |||
|
125 | by using fixed-size binary components as much as possible. | |||
|
126 | For data that is not fixed-size, | |||
|
127 | references to other parts of a file can be made by storing "pseudo-pointers": | |||
|
128 | integers counted in bytes from the start of a file. | |||
|
129 | For read-only access no data structure is needed, | |||
|
130 | only a bytes buffer (possibly memory-mapped directly from the filesystem) | |||
|
131 | with specific parts read on demand. | |||
|
132 | ||||
|
133 | The data file contains "nodes" organized in a tree. | |||
|
134 | Each node represents a file or directory inside the working directory | |||
|
135 | or its parent changeset. | |||
|
136 | This tree has the same structure as the filesystem, | |||
|
137 | so a node representing a directory has child nodes representing | |||
|
138 | the files and subdirectories contained directly in that directory. | |||
|
139 | ||||
|
140 | The docket file format | |||
|
141 | ---------------------- | |||
|
142 | ||||
|
143 | This is implemented in `rust/hg-core/src/dirstate_tree/on_disk.rs` | |||
|
144 | and `mercurial/dirstateutils/docket.py`. | |||
|
145 | ||||
|
146 | Components of the docket file are found at fixed offsets, | |||
|
147 | counted in bytes from the start of the file: | |||
|
148 | ||||
|
149 | * Offset 0: | |||
|
150 | The 12-bytes marker string "dirstate-v2\n" ending with a newline character. | |||
|
151 | This makes it easier to tell a dirstate-v2 file from a dirstate-v1 file, | |||
|
152 | although it is not strictly necessary | |||
|
153 | since `.hg/requires` determines which format to use. | |||
|
154 | ||||
|
155 | * Offset 12: | |||
|
156 | The changeset node ID on the first parent of the working directory, | |||
|
157 | as up to 32 binary bytes. | |||
|
158 | If a node ID is shorter (20 bytes for SHA-1), | |||
|
159 | it is start-aligned and the rest of the bytes are set to zero. | |||
|
160 | ||||
|
161 | * Offset 44: | |||
|
162 | The changeset node ID on the second parent of the working directory, | |||
|
163 | or all zeros if there isn’t one. | |||
|
164 | Also 32 binary bytes. | |||
|
165 | ||||
|
166 | * Offset 76: | |||
|
167 | Tree metadata on 44 bytes, described below. | |||
|
168 | Its separation in this documentation from the rest of the docket | |||
|
169 | reflects a detail of the current implementation. | |||
|
170 | Since tree metadata is also made of fields at fixed offsets, those could | |||
|
171 | be inlined here by adding 76 bytes to each offset. | |||
|
172 | ||||
|
173 | * Offset 120: | |||
|
174 | The used size of the data file, as a 32-bit big-endian integer. | |||
|
175 | The actual size of the data file may be larger | |||
|
176 | (if another Mercurial processis in appending to it | |||
|
177 | but has not updated the docket yet). | |||
|
178 | That extra data must be ignored. | |||
|
179 | ||||
|
180 | * Offset 124: | |||
|
181 | The length of the data file identifier, as a 8-bit integer. | |||
|
182 | ||||
|
183 | * Offset 125: | |||
|
184 | The data file identifier. | |||
|
185 | ||||
|
186 | * Any additional data is current ignored, and dropped when updating the file. | |||
|
187 | ||||
|
188 | Tree metadata in the docket file | |||
|
189 | -------------------------------- | |||
|
190 | ||||
|
191 | Tree metadata is similarly made of components at fixed offsets. | |||
|
192 | These offsets are counted in bytes from the start of tree metadata, | |||
|
193 | which is 76 bytes after the start of the docket file. | |||
|
194 | ||||
|
195 | This metadata can be thought of as the singular root of the tree | |||
|
196 | formed by nodes in the data file. | |||
|
197 | ||||
|
198 | * Offset 0: | |||
|
199 | Pseudo-pointer to the start of root nodes, | |||
|
200 | counted in bytes from the start of the data file, | |||
|
201 | as a 32-bit big-endian integer. | |||
|
202 | These nodes describe files and directories found directly | |||
|
203 | at the root of the working directory. | |||
|
204 | ||||
|
205 | * Offset 4: | |||
|
206 | Number of root nodes, as a 32-bit big-endian integer. | |||
|
207 | ||||
|
208 | * Offset 8: | |||
|
209 | Total number of nodes in the entire tree that "have a dirstate entry", | |||
|
210 | as a 32-bit big-endian integer. | |||
|
211 | Those nodes represent files that would be present at all in `dirstate-v1`. | |||
|
212 | This is typically less than the total number of nodes. | |||
|
213 | This counter is used to implement `len(dirstatemap)`. | |||
|
214 | ||||
|
215 | * Offset 12: | |||
|
216 | Number of nodes in the entire tree that have a copy source, | |||
|
217 | as a 32-bit big-endian integer. | |||
|
218 | At the next commit, these files are recorded | |||
|
219 | as having been copied or moved/renamed from that source. | |||
|
220 | (A move is recorded as a copy and separate removal of the source.) | |||
|
221 | This counter is used to implement `len(dirstatemap.copymap)`. | |||
|
222 | ||||
|
223 | * Offset 16: | |||
|
224 | An estimation of how many bytes of the data file | |||
|
225 | (within its used size) are unused, as a 32-bit big-endian integer. | |||
|
226 | When appending to an existing data file, | |||
|
227 | some existing nodes or paths can be unreachable from the new root | |||
|
228 | but they still take up space. | |||
|
229 | This counter is used to decide when to write a new data file from scratch | |||
|
230 | instead of appending to an existing one, | |||
|
231 | in order to get rid of that unreachable data | |||
|
232 | and avoid unbounded file size growth. | |||
|
233 | ||||
|
234 | * Offset 20: | |||
|
235 | These four bytes are currently ignored | |||
|
236 | and reset to zero when updating a docket file. | |||
|
237 | This is an attempt at forward compatibility: | |||
|
238 | future Mercurial versions could use this as a bit field | |||
|
239 | to indicate that a dirstate has additional data or constraints. | |||
|
240 | Finding a dirstate file with the relevant bit unset indicates that | |||
|
241 | it was written by a then-older version | |||
|
242 | which is not aware of that future change. | |||
|
243 | ||||
|
244 | * Offset 24: | |||
|
245 | Either 20 zero bytes, or a SHA-1 hash as 20 binary bytes. | |||
|
246 | When present, the hash is of ignore patterns | |||
|
247 | that were used for some previous run of the `status` algorithm. | |||
|
248 | ||||
|
249 | * (Offset 44: end of tree metadata) | |||
|
250 | ||||
|
251 | Optional hash of ignore patterns | |||
|
252 | -------------------------------- | |||
|
253 | ||||
|
254 | The implementation of `status` at `rust/hg-core/src/dirstate_tree/status.rs` | |||
|
255 | has been optimized such that its run time is dominated by calls | |||
|
256 | to `stat` for reading the filesystem metadata of a file or directory, | |||
|
257 | and to `readdir` for listing the contents of a directory. | |||
|
258 | In some cases the algorithm can skip calls to `readdir` | |||
|
259 | (saving significant time) | |||
|
260 | because the dirstate already contains enough of the relevant information | |||
|
261 | to build the correct `status` results. | |||
|
262 | ||||
|
263 | The default configuration of `hg status` is to list unknown files | |||
|
264 | but not ignored files. | |||
|
265 | In this case, it matters for the `readdir`-skipping optimization | |||
|
266 | if a given file used to be ignored but became unknown | |||
|
267 | because `.hgignore` changed. | |||
|
268 | To detect the possibility of such a change, | |||
|
269 | the tree metadata contains an optional hash of all ignore patterns. | |||
|
270 | ||||
|
271 | We define: | |||
|
272 | ||||
|
273 | * "Root" ignore files as: | |||
|
274 | ||||
|
275 | - `.hgignore` at the root of the repository if it exists | |||
|
276 | - And all files from `ui.ignore.*` config. | |||
|
277 | ||||
|
278 | This set of files is sorted by the string representation of their path. | |||
|
279 | ||||
|
280 | * The "expanded contents" of an ignore files is the byte string made | |||
|
281 | by the concatenation of its contents followed by the "expanded contents" | |||
|
282 | of other files included with `include:` or `subinclude:` directives, | |||
|
283 | in inclusion order. This definition is recursive, as included files can | |||
|
284 | themselves include more files. | |||
|
285 | ||||
|
286 | This hash is defined as the SHA-1 of the concatenation (in sorted | |||
|
287 | order) of the "expanded contents" of each "root" ignore file. | |||
|
288 | (Note that computing this does not require actually concatenating | |||
|
289 | into a single contiguous byte sequence. | |||
|
290 | Instead a SHA-1 hasher object can be created | |||
|
291 | and fed separate chunks one by one.) | |||
|
292 | ||||
|
293 | The data file format | |||
|
294 | -------------------- | |||
|
295 | ||||
|
296 | This is implemented in `rust/hg-core/src/dirstate_tree/on_disk.rs` | |||
|
297 | and `mercurial/dirstateutils/v2.py`. | |||
|
298 | ||||
|
299 | The data file contains two types of data: paths and nodes. | |||
|
300 | ||||
|
301 | Paths and nodes can be organized in any order in the file, except that sibling | |||
|
302 | nodes must be next to each other and sorted by their path. | |||
|
303 | Contiguity lets the parent refer to them all | |||
|
304 | by their count and a single pseudo-pointer, | |||
|
305 | instead of storing one pseudo-pointer per child node. | |||
|
306 | Sorting allows using binary seach to find a child node with a given name | |||
|
307 | in `O(log(n))` byte sequence comparisons. | |||
|
308 | ||||
|
309 | The current implemention writes paths and child node before a given node | |||
|
310 | for ease of figuring out the value of pseudo-pointers by the time the are to be | |||
|
311 | written, but this is not an obligation and readers must not rely on it. | |||
|
312 | ||||
|
313 | A path is stored as a byte string anywhere in the file, without delimiter. | |||
|
314 | It is refered to by one or more node by a pseudo-pointer to its start, and its | |||
|
315 | length in bytes. Since there is no delimiter, | |||
|
316 | when a path is a substring of another the same bytes could be reused, | |||
|
317 | although the implementation does not exploit this as of this writing. | |||
|
318 | ||||
|
319 | A node is stored on 43 bytes with components at fixed offsets. Paths and | |||
|
320 | child nodes relevant to a node are stored externally and referenced though | |||
|
321 | pseudo-pointers. | |||
|
322 | ||||
|
323 | All integers are stored in big-endian. All pseudo-pointers are 32-bit integers | |||
|
324 | counting bytes from the start of the data file. Path lengths and positions | |||
|
325 | are 16-bit integers, also counted in bytes. | |||
|
326 | ||||
|
327 | Node components are: | |||
|
328 | ||||
|
329 | * Offset 0: | |||
|
330 | Pseudo-pointer to the full path of this node, | |||
|
331 | from the working directory root. | |||
|
332 | ||||
|
333 | * Offset 4: | |||
|
334 | Length of the full path. | |||
|
335 | ||||
|
336 | * Offset 6: | |||
|
337 | Position of the last `/` path separator within the full path, | |||
|
338 | in bytes from the start of the full path, | |||
|
339 | or zero if there isn’t one. | |||
|
340 | The part of the full path after this position is the "base name". | |||
|
341 | Since sibling nodes have the same parent, only their base name vary | |||
|
342 | and needs to be considered when doing binary search to find a given path. | |||
|
343 | ||||
|
344 | * Offset 8: | |||
|
345 | Pseudo-pointer to the "copy source" path for this node, | |||
|
346 | or zero if there is no copy source. | |||
|
347 | ||||
|
348 | * Offset 12: | |||
|
349 | Length of the copy source path, or zero if there isn’t one. | |||
|
350 | ||||
|
351 | * Offset 14: | |||
|
352 | Pseudo-pointer to the start of child nodes. | |||
|
353 | ||||
|
354 | * Offset 18: | |||
|
355 | Number of child nodes, as a 32-bit integer. | |||
|
356 | They occupy 43 times this number of bytes | |||
|
357 | (not counting space for paths, and further descendants). | |||
|
358 | ||||
|
359 | * Offset 22: | |||
|
360 | Number as a 32-bit integer of descendant nodes in this subtree, | |||
|
361 | not including this node itself, | |||
|
362 | that "have a dirstate entry". | |||
|
363 | Those nodes represent files that would be present at all in `dirstate-v1`. | |||
|
364 | This is typically less than the total number of descendants. | |||
|
365 | This counter is used to implement `has_dir`. | |||
|
366 | ||||
|
367 | * Offset 26: | |||
|
368 | Number as a 32-bit integer of descendant nodes in this subtree, | |||
|
369 | not including this node itself, | |||
|
370 | that represent files tracked in the working directory. | |||
|
371 | (For example, `hg rm` makes a file untracked.) | |||
|
372 | This counter is used to implement `has_tracked_dir`. | |||
|
373 | ||||
|
374 | * Offset 30: | |||
|
375 | A `flags` fields that packs some boolean values as bits of a 16-bit integer. | |||
|
376 | Starting from least-significant, bit masks are:: | |||
|
377 | ||||
|
378 | WDIR_TRACKED = 1 << 0 | |||
|
379 | P1_TRACKED = 1 << 1 | |||
|
380 | P2_INFO = 1 << 2 | |||
|
381 | MODE_EXEC_PERM = 1 << 3 | |||
|
382 | MODE_IS_SYMLINK = 1 << 4 | |||
|
383 | HAS_FALLBACK_EXEC = 1 << 5 | |||
|
384 | FALLBACK_EXEC = 1 << 6 | |||
|
385 | HAS_FALLBACK_SYMLINK = 1 << 7 | |||
|
386 | FALLBACK_SYMLINK = 1 << 8 | |||
|
387 | EXPECTED_STATE_IS_MODIFIED = 1 << 9 | |||
|
388 | HAS_MODE_AND_SIZE = 1 << 10 | |||
|
389 | HAS_MTIME = 1 << 11 | |||
|
390 | MTIME_SECOND_AMBIGUOUS = 1 << 12 | |||
|
391 | DIRECTORY = 1 << 13 | |||
|
392 | ALL_UNKNOWN_RECORDED = 1 << 14 | |||
|
393 | ALL_IGNORED_RECORDED = 1 << 15 | |||
|
394 | ||||
|
395 | The meaning of each bit is described below. | |||
|
396 | ||||
|
397 | Other bits are unset. | |||
|
398 | They may be assigned meaning if the future, | |||
|
399 | with the limitation that Mercurial versions that pre-date such meaning | |||
|
400 | will always reset those bits to unset when writing nodes. | |||
|
401 | (A new node is written for any mutation in its subtree, | |||
|
402 | leaving the bytes of the old node unreachable | |||
|
403 | until the data file is rewritten entirely.) | |||
|
404 | ||||
|
405 | * Offset 32: | |||
|
406 | A `size` field described below, as a 32-bit integer. | |||
|
407 | Unlike in dirstate-v1, negative values are not used. | |||
|
408 | ||||
|
409 | * Offset 36: | |||
|
410 | The seconds component of an `mtime` field described below, | |||
|
411 | as a 32-bit integer. | |||
|
412 | Unlike in dirstate-v1, negative values are not used. | |||
|
413 | When `mtime` is used, this is number of seconds since the Unix epoch | |||
|
414 | truncated to its lower 31 bits. | |||
|
415 | ||||
|
416 | * Offset 40: | |||
|
417 | The nanoseconds component of an `mtime` field described below, | |||
|
418 | as a 32-bit integer. | |||
|
419 | When `mtime` is used, | |||
|
420 | this is the number of nanoseconds since `mtime.seconds`, | |||
|
421 | always stritctly less than one billion. | |||
|
422 | ||||
|
423 | This may be zero if more precision is not available. | |||
|
424 | (This can happen because of limitations in any of Mercurial, Python, | |||
|
425 | libc, the operating system, …) | |||
|
426 | ||||
|
427 | When comparing two mtimes and either has this component set to zero, | |||
|
428 | the sub-second precision of both should be ignored. | |||
|
429 | False positives when checking mtime equality due to clock resolution | |||
|
430 | are always possible and the status algorithm needs to deal with them, | |||
|
431 | but having too many false negatives could be harmful too. | |||
|
432 | ||||
|
433 | * (Offset 44: end of this node) | |||
|
434 | ||||
|
435 | The meaning of the boolean values packed in `flags` is: | |||
|
436 | ||||
|
437 | `WDIR_TRACKED` | |||
|
438 | Set if the working directory contains a tracked file at this node’s path. | |||
|
439 | This is typically set and unset by `hg add` and `hg rm`. | |||
|
440 | ||||
|
441 | `P1_TRACKED` | |||
|
442 | Set if the working directory’s first parent changeset | |||
|
443 | (whose node identifier is found in tree metadata) | |||
|
444 | contains a tracked file at this node’s path. | |||
|
445 | This is a cache to reduce manifest lookups. | |||
|
446 | ||||
|
447 | `P2_INFO` | |||
|
448 | Set if the file has been involved in some merge operation. | |||
|
449 | Either because it was actually merged, | |||
|
450 | or because the version in the second parent p2 version was ahead, | |||
|
451 | or because some rename moved it there. | |||
|
452 | In either case `hg status` will want it displayed as modified. | |||
|
453 | ||||
|
454 | Files that would be mentioned at all in the `dirstate-v1` file format | |||
|
455 | have a node with at least one of the above three bits set in `dirstate-v2`. | |||
|
456 | Let’s call these files "tracked anywhere", | |||
|
457 | and "untracked" the nodes with all three of these bits unset. | |||
|
458 | Untracked nodes are typically for directories: | |||
|
459 | they hold child nodes and form the tree structure. | |||
|
460 | Additional untracked nodes may also exist. | |||
|
461 | Although implementations should strive to clean up nodes | |||
|
462 | that are entirely unused, other untracked nodes may also exist. | |||
|
463 | For example, a future version of Mercurial might in some cases | |||
|
464 | add nodes for untracked files or/and ignored files in the working directory | |||
|
465 | in order to optimize `hg status` | |||
|
466 | by enabling it to skip `readdir` in more cases. | |||
|
467 | ||||
|
468 | `HAS_MODE_AND_SIZE` | |||
|
469 | Must be unset for untracked nodes. | |||
|
470 | For files tracked anywhere, if this is set: | |||
|
471 | - The `size` field is the expected file size, | |||
|
472 | in bytes truncated its lower to 31 bits. | |||
|
473 | - The expected execute permission for the file’s owner | |||
|
474 | is given by `MODE_EXEC_PERM` | |||
|
475 | - The expected file type is given by `MODE_IS_SIMLINK`: | |||
|
476 | a symbolic link if set, or a normal file if unset. | |||
|
477 | If this is unset the expected size, permission, and file type are unknown. | |||
|
478 | The `size` field is unused (set to zero). | |||
|
479 | ||||
|
480 | `HAS_MTIME` | |||
|
481 | The nodes contains a "valid" last modification time in the `mtime` field. | |||
|
482 | ||||
|
483 | ||||
|
484 | It means the `mtime` was already strictly in the past when observed, | |||
|
485 | meaning that later changes cannot happen in the same clock tick | |||
|
486 | and must cause a different modification time | |||
|
487 | (unless the system clock jumps back and we get unlucky, | |||
|
488 | which is not impossible but deemed unlikely enough). | |||
|
489 | ||||
|
490 | This means that if `std::fs::symlink_metadata` later reports | |||
|
491 | the same modification time | |||
|
492 | and ignored patterns haven’t changed, | |||
|
493 | we can assume the node to be unchanged on disk. | |||
|
494 | ||||
|
495 | The `mtime` field can then be used to skip more expensive lookup when | |||
|
496 | checking the status of "tracked" nodes. | |||
|
497 | ||||
|
498 | It can also be set for node where `DIRECTORY` is set. | |||
|
499 | See `DIRECTORY` documentation for details. | |||
|
500 | ||||
|
501 | `DIRECTORY` | |||
|
502 | When set, this entry will match a directory that exists or existed on the | |||
|
503 | file system. | |||
|
504 | ||||
|
505 | * When `HAS_MTIME` is set a directory has been seen on the file system and | |||
|
506 | `mtime` matches its last modificiation time. However, `HAS_MTIME` not being set | |||
|
507 | does not indicate the lack of directory on the file system. | |||
|
508 | ||||
|
509 | * When not tracked anywhere, this node does not represent an ignored or | |||
|
510 | unknown file on disk. | |||
|
511 | ||||
|
512 | If `HAS_MTIME` is set | |||
|
513 | and `mtime` matches the last modification time of the directory on disk, | |||
|
514 | the directory is unchanged | |||
|
515 | and we can skip calling `std::fs::read_dir` again for this directory, | |||
|
516 | and iterate child dirstate nodes instead. | |||
|
517 | (as long as `ALL_UNKNOWN_RECORDED` and `ALL_IGNORED_RECORDED` are taken | |||
|
518 | into account) | |||
|
519 | ||||
|
520 | `MODE_EXEC_PERM` | |||
|
521 | Must be unset if `HAS_MODE_AND_SIZE` is unset. | |||
|
522 | If `HAS_MODE_AND_SIZE` is set, | |||
|
523 | this indicates whether the file’s own is expected | |||
|
524 | to have execute permission. | |||
|
525 | ||||
|
526 | Beware that on system without fs support for this information, the value | |||
|
527 | stored in the dirstate might be wrong and should not be relied on. | |||
|
528 | ||||
|
529 | `MODE_IS_SYMLINK` | |||
|
530 | Must be unset if `HAS_MODE_AND_SIZE` is unset. | |||
|
531 | If `HAS_MODE_AND_SIZE` is set, | |||
|
532 | this indicates whether the file is expected to be a symlink | |||
|
533 | as opposed to a normal file. | |||
|
534 | ||||
|
535 | Beware that on system without fs support for this information, the value | |||
|
536 | stored in the dirstate might be wrong and should not be relied on. | |||
|
537 | ||||
|
538 | `EXPECTED_STATE_IS_MODIFIED` | |||
|
539 | Must be unset for untracked nodes. | |||
|
540 | For: | |||
|
541 | - a file tracked anywhere | |||
|
542 | - that has expected metadata (`HAS_MODE_AND_SIZE` and `HAS_MTIME`) | |||
|
543 | - if that metadata matches | |||
|
544 | metadata found in the working directory with `stat` | |||
|
545 | This bit indicates the status of the file. | |||
|
546 | If set, the status is modified. If unset, it is clean. | |||
|
547 | ||||
|
548 | In cases where `hg status` needs to read the contents of a file | |||
|
549 | because metadata is ambiguous, this bit lets it record the result | |||
|
550 | if the result is modified so that a future run of `hg status` | |||
|
551 | does not need to do the same again. | |||
|
552 | It is valid to never set this bit, | |||
|
553 | and consider expected metadata ambiguous if it is set. | |||
|
554 | ||||
|
555 | `ALL_UNKNOWN_RECORDED` | |||
|
556 | If set, all "unknown" children existing on disk (at the time of the last | |||
|
557 | status) have been recorded and the `mtime` associated with | |||
|
558 | `DIRECTORY` can be used for optimization even when "unknown" file | |||
|
559 | are listed. | |||
|
560 | ||||
|
561 | Note that the amount recorded "unknown" children can still be zero if None | |||
|
562 | where present. | |||
|
563 | ||||
|
564 | Also note that having this flag unset does not imply that no "unknown" | |||
|
565 | children have been recorded. Some might be present, but there is no garantee | |||
|
566 | that is will be all of them. | |||
|
567 | ||||
|
568 | `ALL_IGNORED_RECORDED` | |||
|
569 | If set, all "ignored" children existing on disk (at the time of the last | |||
|
570 | status) have been recorded and the `mtime` associated with | |||
|
571 | `DIRECTORY` can be used for optimization even when "ignored" file | |||
|
572 | are listed. | |||
|
573 | ||||
|
574 | Note that the amount recorded "ignored" children can still be zero if None | |||
|
575 | where present. | |||
|
576 | ||||
|
577 | Also note that having this flag unset does not imply that no "ignored" | |||
|
578 | children have been recorded. Some might be present, but there is no garantee | |||
|
579 | that is will be all of them. | |||
|
580 | ||||
|
581 | `HAS_FALLBACK_EXEC` | |||
|
582 | If this flag is set, the entry carries "fallback" information for the | |||
|
583 | executable bit in the `FALLBACK_EXEC` flag. | |||
|
584 | ||||
|
585 | Fallback information can be stored in the dirstate to keep track of | |||
|
586 | filesystem attribute tracked by Mercurial when the underlying file | |||
|
587 | system or operating system does not support that property, (e.g. | |||
|
588 | Windows). | |||
|
589 | ||||
|
590 | `FALLBACK_EXEC` | |||
|
591 | Should be ignored if `HAS_FALLBACK_EXEC` is unset. If set the file for this | |||
|
592 | entry should be considered executable if that information cannot be | |||
|
593 | extracted from the file system. If unset it should be considered | |||
|
594 | non-executable instead. | |||
|
595 | ||||
|
596 | `HAS_FALLBACK_SYMLINK` | |||
|
597 | If this flag is set, the entry carries "fallback" information for symbolic | |||
|
598 | link status in the `FALLBACK_SYMLINK` flag. | |||
|
599 | ||||
|
600 | Fallback information can be stored in the dirstate to keep track of | |||
|
601 | filesystem attribute tracked by Mercurial when the underlying file | |||
|
602 | system or operating system does not support that property, (e.g. | |||
|
603 | Windows). | |||
|
604 | ||||
|
605 | `FALLBACK_SYMLINK` | |||
|
606 | Should be ignored if `HAS_FALLBACK_SYMLINK` is unset. If set the file for | |||
|
607 | this entry should be considered a symlink if that information cannot be | |||
|
608 | extracted from the file system. If unset it should be considered a normal | |||
|
609 | file instead. | |||
|
610 | ||||
|
611 | `MTIME_SECOND_AMBIGUOUS` | |||
|
612 | This flag is relevant only when `HAS_FILE_MTIME` is set. When set, the | |||
|
613 | `mtime` stored in the entry is only valid for comparison with timestamps | |||
|
614 | that have nanosecond information. If available timestamp does not carries | |||
|
615 | nanosecond information, the `mtime` should be ignored and no optimisation | |||
|
616 | can be applied. |
@@ -0,0 +1,72 b'' | |||||
|
1 | == New Features == | |||
|
2 | * `debugrebuildfncache` now has an option to rebuild only the index files | |||
|
3 | * a new `bookmarks.mode` path option have been introduced to control the | |||
|
4 | bookmark update strategy during exchange with a peer. See `hg help paths` for | |||
|
5 | details. | |||
|
6 | * a new `bookmarks.mirror` option has been introduced. See `hg help bookmarks` | |||
|
7 | for details. | |||
|
8 | * more commands support detailed exit codes when config `ui.detailed-exit-codes` is enabled | |||
|
9 | ||||
|
10 | == Default Format Change == | |||
|
11 | ||||
|
12 | == New Experimental Features == | |||
|
13 | ||||
|
14 | * '''Major feature''': version 2 of the dirstate is available (the first version is as old as Mercurial itself). It allows for much faster working copy inspection (status, diff, commit, update, etc.) and richer information (symlink and exec info on Windows, etc.). The format has been frozen with room for some future evolution and the current implementations (Python, Python + C, Python + Rust or pure Rust) should be compatible with any future change or optimization that the format allows. You can get more information [[https://www.mercurial-scm.org/repo/hg/file/tip/mercurial/helptext/internals/dirstate-v2.txt | in the internal documentation]] | |||
|
15 | * Added a new `web.full-garbage-collection-rate` to control performance. See | |||
|
16 | de2e04fe4897a554b9ef433167f11ea4feb2e09c for more information | |||
|
17 | * Added a new `histedit.later-commits-first` option to affect the ordering of commits in `chistedit` to match the order in `hg log -G`. It will affect the text-based version before graduating from experimental. | |||
|
18 | ||||
|
19 | == Bug Fixes == | |||
|
20 | ||||
|
21 | * `hg fix --working-dir` now correctly works when in an uncommitted merge state | |||
|
22 | * Unintentional duplicated calls to `hg fix`'s internals were removed, making it potentially much faster | |||
|
23 | * `rhg cat` can be called without a revision | |||
|
24 | * `rhg cat` can be called with the `.` revision | |||
|
25 | * `rhg cat` is more robust than before with regards to edge cases. Some still remain like a tag or bookmark that is ambiguous with a nodeid prefix, only nodeids (prefixed or not) are supported as of now. | |||
|
26 | * `rhg cat` is even faster | |||
|
27 | * `rhg` (Rust fast-path for `hg`) now supports the full config list syntax | |||
|
28 | * `rhg` now parses some corner-cases for revsets correctly | |||
|
29 | * Fixed an `fsmonitor` on Python 3 during exception handling | |||
|
30 | * Lots of Windows fixes | |||
|
31 | * Lots of miscellaneous other fixes | |||
|
32 | * Removed a CPython-specific compatibility hack to improve support for alternative Python implementations | |||
|
33 | ||||
|
34 | == Backwards Compatibility Changes == | |||
|
35 | ||||
|
36 | ||||
|
37 | == Internal API Changes == | |||
|
38 | ||||
|
39 | The following functions have been removed: | |||
|
40 | ||||
|
41 | * `dirstate.normal` | |||
|
42 | * `dirstate.normallookup` | |||
|
43 | * `dirstate.otherparent` | |||
|
44 | * `dirstate.add` | |||
|
45 | * `dirstate.addfile` | |||
|
46 | * `dirstate.remove` | |||
|
47 | * `dirstate.drop` | |||
|
48 | * `dirstate.dropfile` | |||
|
49 | * `dirstate.__getitem__` | |||
|
50 | * `dirstatemap.nonnormalentries` | |||
|
51 | * `dirstatemap.nonnormalset` | |||
|
52 | * `dirstatemap.otherparentset` | |||
|
53 | * `dirstatemap.non_normal_or_other_parent_paths` | |||
|
54 | * `dirstateitem.dm_nonnormal` | |||
|
55 | * `dirstateitem.dm_otherparent` | |||
|
56 | * `dirstateitem.merged_removed` | |||
|
57 | * `dirstateitem.from_p2` | |||
|
58 | * `dirstateitem.merged` | |||
|
59 | * `dirstateitem.new_merged` | |||
|
60 | * `dirstateitem.new_added` | |||
|
61 | * `dirstateitem.new_from_p2` | |||
|
62 | * `dirstateitem.new_possibly_dirty` | |||
|
63 | * `dirstateitem.new_normal` | |||
|
64 | * `dirstateitem.from_p2_removed` | |||
|
65 | ||||
|
66 | Miscellaneous: | |||
|
67 | ||||
|
68 | * `wireprotov1peer`'s `batchable` is now a simple function and not a generator | |||
|
69 | anymore | |||
|
70 | * The Rust extensions (and by extension the experimental `rhg status`) only use a tree-based dirstate in-memory, even when using dirstate-v1. See bf8837e3d7cec40fe649c47163a3154dda03fa16 for more details | |||
|
71 | * The Rust minimum supported version is now 1.48.0 in accordance with out policy of keeping up with Debian stable | |||
|
72 | * The test harness plays nicer with the NixOS sandbox No newline at end of file |
This diff has been collapsed as it changes many lines, (643 lines changed) Show them Hide them | |||||
@@ -0,0 +1,643 b'' | |||||
|
1 | use crate::dirstate_tree::on_disk::DirstateV2ParseError; | |||
|
2 | use crate::errors::HgError; | |||
|
3 | use bitflags::bitflags; | |||
|
4 | use std::convert::{TryFrom, TryInto}; | |||
|
5 | use std::fs; | |||
|
6 | use std::io; | |||
|
7 | use std::time::{SystemTime, UNIX_EPOCH}; | |||
|
8 | ||||
|
9 | #[derive(Copy, Clone, Debug, Eq, PartialEq)] | |||
|
10 | pub enum EntryState { | |||
|
11 | Normal, | |||
|
12 | Added, | |||
|
13 | Removed, | |||
|
14 | Merged, | |||
|
15 | } | |||
|
16 | ||||
|
17 | /// `size` and `mtime.seconds` are truncated to 31 bits. | |||
|
18 | /// | |||
|
19 | /// TODO: double-check status algorithm correctness for files | |||
|
20 | /// larger than 2 GiB or modified after 2038. | |||
|
21 | #[derive(Debug, Copy, Clone)] | |||
|
22 | pub struct DirstateEntry { | |||
|
23 | pub(crate) flags: Flags, | |||
|
24 | mode_size: Option<(u32, u32)>, | |||
|
25 | mtime: Option<TruncatedTimestamp>, | |||
|
26 | } | |||
|
27 | ||||
|
28 | bitflags! { | |||
|
29 | pub(crate) struct Flags: u8 { | |||
|
30 | const WDIR_TRACKED = 1 << 0; | |||
|
31 | const P1_TRACKED = 1 << 1; | |||
|
32 | const P2_INFO = 1 << 2; | |||
|
33 | const HAS_FALLBACK_EXEC = 1 << 3; | |||
|
34 | const FALLBACK_EXEC = 1 << 4; | |||
|
35 | const HAS_FALLBACK_SYMLINK = 1 << 5; | |||
|
36 | const FALLBACK_SYMLINK = 1 << 6; | |||
|
37 | } | |||
|
38 | } | |||
|
39 | ||||
|
40 | /// A Unix timestamp with nanoseconds precision | |||
|
41 | #[derive(Debug, Copy, Clone)] | |||
|
42 | pub struct TruncatedTimestamp { | |||
|
43 | truncated_seconds: u32, | |||
|
44 | /// Always in the `0 .. 1_000_000_000` range. | |||
|
45 | nanoseconds: u32, | |||
|
46 | } | |||
|
47 | ||||
|
48 | impl TruncatedTimestamp { | |||
|
49 | /// Constructs from a timestamp potentially outside of the supported range, | |||
|
50 | /// and truncate the seconds components to its lower 31 bits. | |||
|
51 | /// | |||
|
52 | /// Panics if the nanoseconds components is not in the expected range. | |||
|
53 | pub fn new_truncate(seconds: i64, nanoseconds: u32) -> Self { | |||
|
54 | assert!(nanoseconds < NSEC_PER_SEC); | |||
|
55 | Self { | |||
|
56 | truncated_seconds: seconds as u32 & RANGE_MASK_31BIT, | |||
|
57 | nanoseconds, | |||
|
58 | } | |||
|
59 | } | |||
|
60 | ||||
|
61 | /// Construct from components. Returns an error if they are not in the | |||
|
62 | /// expcted range. | |||
|
63 | pub fn from_already_truncated( | |||
|
64 | truncated_seconds: u32, | |||
|
65 | nanoseconds: u32, | |||
|
66 | ) -> Result<Self, DirstateV2ParseError> { | |||
|
67 | if truncated_seconds & !RANGE_MASK_31BIT == 0 | |||
|
68 | && nanoseconds < NSEC_PER_SEC | |||
|
69 | { | |||
|
70 | Ok(Self { | |||
|
71 | truncated_seconds, | |||
|
72 | nanoseconds, | |||
|
73 | }) | |||
|
74 | } else { | |||
|
75 | Err(DirstateV2ParseError) | |||
|
76 | } | |||
|
77 | } | |||
|
78 | ||||
|
79 | pub fn for_mtime_of(metadata: &fs::Metadata) -> io::Result<Self> { | |||
|
80 | #[cfg(unix)] | |||
|
81 | { | |||
|
82 | use std::os::unix::fs::MetadataExt; | |||
|
83 | let seconds = metadata.mtime(); | |||
|
84 | // i64 -> u32 with value always in the `0 .. NSEC_PER_SEC` range | |||
|
85 | let nanoseconds = metadata.mtime_nsec().try_into().unwrap(); | |||
|
86 | Ok(Self::new_truncate(seconds, nanoseconds)) | |||
|
87 | } | |||
|
88 | #[cfg(not(unix))] | |||
|
89 | { | |||
|
90 | metadata.modified().map(Self::from) | |||
|
91 | } | |||
|
92 | } | |||
|
93 | ||||
|
94 | /// The lower 31 bits of the number of seconds since the epoch. | |||
|
95 | pub fn truncated_seconds(&self) -> u32 { | |||
|
96 | self.truncated_seconds | |||
|
97 | } | |||
|
98 | ||||
|
99 | /// The sub-second component of this timestamp, in nanoseconds. | |||
|
100 | /// Always in the `0 .. 1_000_000_000` range. | |||
|
101 | /// | |||
|
102 | /// This timestamp is after `(seconds, 0)` by this many nanoseconds. | |||
|
103 | pub fn nanoseconds(&self) -> u32 { | |||
|
104 | self.nanoseconds | |||
|
105 | } | |||
|
106 | ||||
|
107 | /// Returns whether two timestamps are equal modulo 2**31 seconds. | |||
|
108 | /// | |||
|
109 | /// If this returns `true`, the original values converted from `SystemTime` | |||
|
110 | /// or given to `new_truncate` were very likely equal. A false positive is | |||
|
111 | /// possible if they were exactly a multiple of 2**31 seconds apart (around | |||
|
112 | /// 68 years). This is deemed very unlikely to happen by chance, especially | |||
|
113 | /// on filesystems that support sub-second precision. | |||
|
114 | /// | |||
|
115 | /// If someone is manipulating the modification times of some files to | |||
|
116 | /// intentionally make `hg status` return incorrect results, not truncating | |||
|
117 | /// wouldn’t help much since they can set exactly the expected timestamp. | |||
|
118 | /// | |||
|
119 | /// Sub-second precision is ignored if it is zero in either value. | |||
|
120 | /// Some APIs simply return zero when more precision is not available. | |||
|
121 | /// When comparing values from different sources, if only one is truncated | |||
|
122 | /// in that way, doing a simple comparison would cause many false | |||
|
123 | /// negatives. | |||
|
124 | pub fn likely_equal(self, other: Self) -> bool { | |||
|
125 | self.truncated_seconds == other.truncated_seconds | |||
|
126 | && (self.nanoseconds == other.nanoseconds | |||
|
127 | || self.nanoseconds == 0 | |||
|
128 | || other.nanoseconds == 0) | |||
|
129 | } | |||
|
130 | ||||
|
131 | pub fn likely_equal_to_mtime_of( | |||
|
132 | self, | |||
|
133 | metadata: &fs::Metadata, | |||
|
134 | ) -> io::Result<bool> { | |||
|
135 | Ok(self.likely_equal(Self::for_mtime_of(metadata)?)) | |||
|
136 | } | |||
|
137 | } | |||
|
138 | ||||
|
139 | impl From<SystemTime> for TruncatedTimestamp { | |||
|
140 | fn from(system_time: SystemTime) -> Self { | |||
|
141 | // On Unix, `SystemTime` is a wrapper for the `timespec` C struct: | |||
|
142 | // https://www.gnu.org/software/libc/manual/html_node/Time-Types.html#index-struct-timespec | |||
|
143 | // We want to effectively access its fields, but the Rust standard | |||
|
144 | // library does not expose them. The best we can do is: | |||
|
145 | let seconds; | |||
|
146 | let nanoseconds; | |||
|
147 | match system_time.duration_since(UNIX_EPOCH) { | |||
|
148 | Ok(duration) => { | |||
|
149 | seconds = duration.as_secs() as i64; | |||
|
150 | nanoseconds = duration.subsec_nanos(); | |||
|
151 | } | |||
|
152 | Err(error) => { | |||
|
153 | // `system_time` is before `UNIX_EPOCH`. | |||
|
154 | // We need to undo this algorithm: | |||
|
155 | // https://github.com/rust-lang/rust/blob/6bed1f0bc3cc50c10aab26d5f94b16a00776b8a5/library/std/src/sys/unix/time.rs#L40-L41 | |||
|
156 | let negative = error.duration(); | |||
|
157 | let negative_secs = negative.as_secs() as i64; | |||
|
158 | let negative_nanos = negative.subsec_nanos(); | |||
|
159 | if negative_nanos == 0 { | |||
|
160 | seconds = -negative_secs; | |||
|
161 | nanoseconds = 0; | |||
|
162 | } else { | |||
|
163 | // For example if `system_time` was 4.3 seconds before | |||
|
164 | // the Unix epoch we get a Duration that represents | |||
|
165 | // `(-4, -0.3)` but we want `(-5, +0.7)`: | |||
|
166 | seconds = -1 - negative_secs; | |||
|
167 | nanoseconds = NSEC_PER_SEC - negative_nanos; | |||
|
168 | } | |||
|
169 | } | |||
|
170 | }; | |||
|
171 | Self::new_truncate(seconds, nanoseconds) | |||
|
172 | } | |||
|
173 | } | |||
|
174 | ||||
|
175 | const NSEC_PER_SEC: u32 = 1_000_000_000; | |||
|
176 | const RANGE_MASK_31BIT: u32 = 0x7FFF_FFFF; | |||
|
177 | ||||
|
178 | pub const MTIME_UNSET: i32 = -1; | |||
|
179 | ||||
|
180 | /// A `DirstateEntry` with a size of `-2` means that it was merged from the | |||
|
181 | /// other parent. This allows revert to pick the right status back during a | |||
|
182 | /// merge. | |||
|
183 | pub const SIZE_FROM_OTHER_PARENT: i32 = -2; | |||
|
184 | /// A special value used for internal representation of special case in | |||
|
185 | /// dirstate v1 format. | |||
|
186 | pub const SIZE_NON_NORMAL: i32 = -1; | |||
|
187 | ||||
|
188 | impl DirstateEntry { | |||
|
189 | pub fn from_v2_data( | |||
|
190 | wdir_tracked: bool, | |||
|
191 | p1_tracked: bool, | |||
|
192 | p2_info: bool, | |||
|
193 | mode_size: Option<(u32, u32)>, | |||
|
194 | mtime: Option<TruncatedTimestamp>, | |||
|
195 | fallback_exec: Option<bool>, | |||
|
196 | fallback_symlink: Option<bool>, | |||
|
197 | ) -> Self { | |||
|
198 | if let Some((mode, size)) = mode_size { | |||
|
199 | // TODO: return an error for out of range values? | |||
|
200 | assert!(mode & !RANGE_MASK_31BIT == 0); | |||
|
201 | assert!(size & !RANGE_MASK_31BIT == 0); | |||
|
202 | } | |||
|
203 | let mut flags = Flags::empty(); | |||
|
204 | flags.set(Flags::WDIR_TRACKED, wdir_tracked); | |||
|
205 | flags.set(Flags::P1_TRACKED, p1_tracked); | |||
|
206 | flags.set(Flags::P2_INFO, p2_info); | |||
|
207 | if let Some(exec) = fallback_exec { | |||
|
208 | flags.insert(Flags::HAS_FALLBACK_EXEC); | |||
|
209 | if exec { | |||
|
210 | flags.insert(Flags::FALLBACK_EXEC); | |||
|
211 | } | |||
|
212 | } | |||
|
213 | if let Some(exec) = fallback_symlink { | |||
|
214 | flags.insert(Flags::HAS_FALLBACK_SYMLINK); | |||
|
215 | if exec { | |||
|
216 | flags.insert(Flags::FALLBACK_SYMLINK); | |||
|
217 | } | |||
|
218 | } | |||
|
219 | Self { | |||
|
220 | flags, | |||
|
221 | mode_size, | |||
|
222 | mtime, | |||
|
223 | } | |||
|
224 | } | |||
|
225 | ||||
|
226 | pub fn from_v1_data( | |||
|
227 | state: EntryState, | |||
|
228 | mode: i32, | |||
|
229 | size: i32, | |||
|
230 | mtime: i32, | |||
|
231 | ) -> Self { | |||
|
232 | match state { | |||
|
233 | EntryState::Normal => { | |||
|
234 | if size == SIZE_FROM_OTHER_PARENT { | |||
|
235 | Self { | |||
|
236 | // might be missing P1_TRACKED | |||
|
237 | flags: Flags::WDIR_TRACKED | Flags::P2_INFO, | |||
|
238 | mode_size: None, | |||
|
239 | mtime: None, | |||
|
240 | } | |||
|
241 | } else if size == SIZE_NON_NORMAL { | |||
|
242 | Self { | |||
|
243 | flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED, | |||
|
244 | mode_size: None, | |||
|
245 | mtime: None, | |||
|
246 | } | |||
|
247 | } else if mtime == MTIME_UNSET { | |||
|
248 | // TODO: return an error for negative values? | |||
|
249 | let mode = u32::try_from(mode).unwrap(); | |||
|
250 | let size = u32::try_from(size).unwrap(); | |||
|
251 | Self { | |||
|
252 | flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED, | |||
|
253 | mode_size: Some((mode, size)), | |||
|
254 | mtime: None, | |||
|
255 | } | |||
|
256 | } else { | |||
|
257 | // TODO: return an error for negative values? | |||
|
258 | let mode = u32::try_from(mode).unwrap(); | |||
|
259 | let size = u32::try_from(size).unwrap(); | |||
|
260 | let mtime = u32::try_from(mtime).unwrap(); | |||
|
261 | let mtime = | |||
|
262 | TruncatedTimestamp::from_already_truncated(mtime, 0) | |||
|
263 | .unwrap(); | |||
|
264 | Self { | |||
|
265 | flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED, | |||
|
266 | mode_size: Some((mode, size)), | |||
|
267 | mtime: Some(mtime), | |||
|
268 | } | |||
|
269 | } | |||
|
270 | } | |||
|
271 | EntryState::Added => Self { | |||
|
272 | flags: Flags::WDIR_TRACKED, | |||
|
273 | mode_size: None, | |||
|
274 | mtime: None, | |||
|
275 | }, | |||
|
276 | EntryState::Removed => Self { | |||
|
277 | flags: if size == SIZE_NON_NORMAL { | |||
|
278 | Flags::P1_TRACKED | Flags::P2_INFO | |||
|
279 | } else if size == SIZE_FROM_OTHER_PARENT { | |||
|
280 | // We don’t know if P1_TRACKED should be set (file history) | |||
|
281 | Flags::P2_INFO | |||
|
282 | } else { | |||
|
283 | Flags::P1_TRACKED | |||
|
284 | }, | |||
|
285 | mode_size: None, | |||
|
286 | mtime: None, | |||
|
287 | }, | |||
|
288 | EntryState::Merged => Self { | |||
|
289 | flags: Flags::WDIR_TRACKED | |||
|
290 | | Flags::P1_TRACKED // might not be true because of rename ? | |||
|
291 | | Flags::P2_INFO, // might not be true because of rename ? | |||
|
292 | mode_size: None, | |||
|
293 | mtime: None, | |||
|
294 | }, | |||
|
295 | } | |||
|
296 | } | |||
|
297 | ||||
|
298 | /// Creates a new entry in "removed" state. | |||
|
299 | /// | |||
|
300 | /// `size` is expected to be zero, `SIZE_NON_NORMAL`, or | |||
|
301 | /// `SIZE_FROM_OTHER_PARENT` | |||
|
302 | pub fn new_removed(size: i32) -> Self { | |||
|
303 | Self::from_v1_data(EntryState::Removed, 0, size, 0) | |||
|
304 | } | |||
|
305 | ||||
|
306 | pub fn tracked(&self) -> bool { | |||
|
307 | self.flags.contains(Flags::WDIR_TRACKED) | |||
|
308 | } | |||
|
309 | ||||
|
310 | pub fn p1_tracked(&self) -> bool { | |||
|
311 | self.flags.contains(Flags::P1_TRACKED) | |||
|
312 | } | |||
|
313 | ||||
|
314 | fn in_either_parent(&self) -> bool { | |||
|
315 | self.flags.intersects(Flags::P1_TRACKED | Flags::P2_INFO) | |||
|
316 | } | |||
|
317 | ||||
|
318 | pub fn removed(&self) -> bool { | |||
|
319 | self.in_either_parent() && !self.flags.contains(Flags::WDIR_TRACKED) | |||
|
320 | } | |||
|
321 | ||||
|
322 | pub fn p2_info(&self) -> bool { | |||
|
323 | self.flags.contains(Flags::WDIR_TRACKED | Flags::P2_INFO) | |||
|
324 | } | |||
|
325 | ||||
|
326 | pub fn added(&self) -> bool { | |||
|
327 | self.flags.contains(Flags::WDIR_TRACKED) && !self.in_either_parent() | |||
|
328 | } | |||
|
329 | ||||
|
330 | pub fn maybe_clean(&self) -> bool { | |||
|
331 | if !self.flags.contains(Flags::WDIR_TRACKED) { | |||
|
332 | false | |||
|
333 | } else if !self.flags.contains(Flags::P1_TRACKED) { | |||
|
334 | false | |||
|
335 | } else if self.flags.contains(Flags::P2_INFO) { | |||
|
336 | false | |||
|
337 | } else { | |||
|
338 | true | |||
|
339 | } | |||
|
340 | } | |||
|
341 | ||||
|
342 | pub fn any_tracked(&self) -> bool { | |||
|
343 | self.flags.intersects( | |||
|
344 | Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO, | |||
|
345 | ) | |||
|
346 | } | |||
|
347 | ||||
|
348 | /// Returns `(wdir_tracked, p1_tracked, p2_info, mode_size, mtime)` | |||
|
349 | pub(crate) fn v2_data( | |||
|
350 | &self, | |||
|
351 | ) -> ( | |||
|
352 | bool, | |||
|
353 | bool, | |||
|
354 | bool, | |||
|
355 | Option<(u32, u32)>, | |||
|
356 | Option<TruncatedTimestamp>, | |||
|
357 | Option<bool>, | |||
|
358 | Option<bool>, | |||
|
359 | ) { | |||
|
360 | if !self.any_tracked() { | |||
|
361 | // TODO: return an Option instead? | |||
|
362 | panic!("Accessing v1_state of an untracked DirstateEntry") | |||
|
363 | } | |||
|
364 | let wdir_tracked = self.flags.contains(Flags::WDIR_TRACKED); | |||
|
365 | let p1_tracked = self.flags.contains(Flags::P1_TRACKED); | |||
|
366 | let p2_info = self.flags.contains(Flags::P2_INFO); | |||
|
367 | let mode_size = self.mode_size; | |||
|
368 | let mtime = self.mtime; | |||
|
369 | ( | |||
|
370 | wdir_tracked, | |||
|
371 | p1_tracked, | |||
|
372 | p2_info, | |||
|
373 | mode_size, | |||
|
374 | mtime, | |||
|
375 | self.get_fallback_exec(), | |||
|
376 | self.get_fallback_symlink(), | |||
|
377 | ) | |||
|
378 | } | |||
|
379 | ||||
|
380 | fn v1_state(&self) -> EntryState { | |||
|
381 | if !self.any_tracked() { | |||
|
382 | // TODO: return an Option instead? | |||
|
383 | panic!("Accessing v1_state of an untracked DirstateEntry") | |||
|
384 | } | |||
|
385 | if self.removed() { | |||
|
386 | EntryState::Removed | |||
|
387 | } else if self | |||
|
388 | .flags | |||
|
389 | .contains(Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO) | |||
|
390 | { | |||
|
391 | EntryState::Merged | |||
|
392 | } else if self.added() { | |||
|
393 | EntryState::Added | |||
|
394 | } else { | |||
|
395 | EntryState::Normal | |||
|
396 | } | |||
|
397 | } | |||
|
398 | ||||
|
399 | fn v1_mode(&self) -> i32 { | |||
|
400 | if let Some((mode, _size)) = self.mode_size { | |||
|
401 | i32::try_from(mode).unwrap() | |||
|
402 | } else { | |||
|
403 | 0 | |||
|
404 | } | |||
|
405 | } | |||
|
406 | ||||
|
407 | fn v1_size(&self) -> i32 { | |||
|
408 | if !self.any_tracked() { | |||
|
409 | // TODO: return an Option instead? | |||
|
410 | panic!("Accessing v1_size of an untracked DirstateEntry") | |||
|
411 | } | |||
|
412 | if self.removed() | |||
|
413 | && self.flags.contains(Flags::P1_TRACKED | Flags::P2_INFO) | |||
|
414 | { | |||
|
415 | SIZE_NON_NORMAL | |||
|
416 | } else if self.flags.contains(Flags::P2_INFO) { | |||
|
417 | SIZE_FROM_OTHER_PARENT | |||
|
418 | } else if self.removed() { | |||
|
419 | 0 | |||
|
420 | } else if self.added() { | |||
|
421 | SIZE_NON_NORMAL | |||
|
422 | } else if let Some((_mode, size)) = self.mode_size { | |||
|
423 | i32::try_from(size).unwrap() | |||
|
424 | } else { | |||
|
425 | SIZE_NON_NORMAL | |||
|
426 | } | |||
|
427 | } | |||
|
428 | ||||
|
429 | fn v1_mtime(&self) -> i32 { | |||
|
430 | if !self.any_tracked() { | |||
|
431 | // TODO: return an Option instead? | |||
|
432 | panic!("Accessing v1_mtime of an untracked DirstateEntry") | |||
|
433 | } | |||
|
434 | if self.removed() { | |||
|
435 | 0 | |||
|
436 | } else if self.flags.contains(Flags::P2_INFO) { | |||
|
437 | MTIME_UNSET | |||
|
438 | } else if !self.flags.contains(Flags::P1_TRACKED) { | |||
|
439 | MTIME_UNSET | |||
|
440 | } else if let Some(mtime) = self.mtime { | |||
|
441 | i32::try_from(mtime.truncated_seconds()).unwrap() | |||
|
442 | } else { | |||
|
443 | MTIME_UNSET | |||
|
444 | } | |||
|
445 | } | |||
|
446 | ||||
|
447 | // TODO: return `Option<EntryState>`? None when `!self.any_tracked` | |||
|
448 | pub fn state(&self) -> EntryState { | |||
|
449 | self.v1_state() | |||
|
450 | } | |||
|
451 | ||||
|
452 | // TODO: return Option? | |||
|
453 | pub fn mode(&self) -> i32 { | |||
|
454 | self.v1_mode() | |||
|
455 | } | |||
|
456 | ||||
|
457 | // TODO: return Option? | |||
|
458 | pub fn size(&self) -> i32 { | |||
|
459 | self.v1_size() | |||
|
460 | } | |||
|
461 | ||||
|
462 | // TODO: return Option? | |||
|
463 | pub fn mtime(&self) -> i32 { | |||
|
464 | self.v1_mtime() | |||
|
465 | } | |||
|
466 | ||||
|
467 | pub fn get_fallback_exec(&self) -> Option<bool> { | |||
|
468 | if self.flags.contains(Flags::HAS_FALLBACK_EXEC) { | |||
|
469 | Some(self.flags.contains(Flags::FALLBACK_EXEC)) | |||
|
470 | } else { | |||
|
471 | None | |||
|
472 | } | |||
|
473 | } | |||
|
474 | ||||
|
475 | pub fn set_fallback_exec(&mut self, value: Option<bool>) { | |||
|
476 | match value { | |||
|
477 | None => { | |||
|
478 | self.flags.remove(Flags::HAS_FALLBACK_EXEC); | |||
|
479 | self.flags.remove(Flags::FALLBACK_EXEC); | |||
|
480 | } | |||
|
481 | Some(exec) => { | |||
|
482 | self.flags.insert(Flags::HAS_FALLBACK_EXEC); | |||
|
483 | if exec { | |||
|
484 | self.flags.insert(Flags::FALLBACK_EXEC); | |||
|
485 | } | |||
|
486 | } | |||
|
487 | } | |||
|
488 | } | |||
|
489 | ||||
|
490 | pub fn get_fallback_symlink(&self) -> Option<bool> { | |||
|
491 | if self.flags.contains(Flags::HAS_FALLBACK_SYMLINK) { | |||
|
492 | Some(self.flags.contains(Flags::FALLBACK_SYMLINK)) | |||
|
493 | } else { | |||
|
494 | None | |||
|
495 | } | |||
|
496 | } | |||
|
497 | ||||
|
498 | pub fn set_fallback_symlink(&mut self, value: Option<bool>) { | |||
|
499 | match value { | |||
|
500 | None => { | |||
|
501 | self.flags.remove(Flags::HAS_FALLBACK_SYMLINK); | |||
|
502 | self.flags.remove(Flags::FALLBACK_SYMLINK); | |||
|
503 | } | |||
|
504 | Some(symlink) => { | |||
|
505 | self.flags.insert(Flags::HAS_FALLBACK_SYMLINK); | |||
|
506 | if symlink { | |||
|
507 | self.flags.insert(Flags::FALLBACK_SYMLINK); | |||
|
508 | } | |||
|
509 | } | |||
|
510 | } | |||
|
511 | } | |||
|
512 | ||||
|
513 | pub fn truncated_mtime(&self) -> Option<TruncatedTimestamp> { | |||
|
514 | self.mtime | |||
|
515 | } | |||
|
516 | ||||
|
517 | pub fn drop_merge_data(&mut self) { | |||
|
518 | if self.flags.contains(Flags::P2_INFO) { | |||
|
519 | self.flags.remove(Flags::P2_INFO); | |||
|
520 | self.mode_size = None; | |||
|
521 | self.mtime = None; | |||
|
522 | } | |||
|
523 | } | |||
|
524 | ||||
|
525 | pub fn set_possibly_dirty(&mut self) { | |||
|
526 | self.mtime = None | |||
|
527 | } | |||
|
528 | ||||
|
529 | pub fn set_clean( | |||
|
530 | &mut self, | |||
|
531 | mode: u32, | |||
|
532 | size: u32, | |||
|
533 | mtime: TruncatedTimestamp, | |||
|
534 | ) { | |||
|
535 | let size = size & RANGE_MASK_31BIT; | |||
|
536 | self.flags.insert(Flags::WDIR_TRACKED | Flags::P1_TRACKED); | |||
|
537 | self.mode_size = Some((mode, size)); | |||
|
538 | self.mtime = Some(mtime); | |||
|
539 | } | |||
|
540 | ||||
|
541 | pub fn set_tracked(&mut self) { | |||
|
542 | self.flags.insert(Flags::WDIR_TRACKED); | |||
|
543 | // `set_tracked` is replacing various `normallookup` call. So we mark | |||
|
544 | // the files as needing lookup | |||
|
545 | // | |||
|
546 | // Consider dropping this in the future in favor of something less | |||
|
547 | // broad. | |||
|
548 | self.mtime = None; | |||
|
549 | } | |||
|
550 | ||||
|
551 | pub fn set_untracked(&mut self) { | |||
|
552 | self.flags.remove(Flags::WDIR_TRACKED); | |||
|
553 | self.mode_size = None; | |||
|
554 | self.mtime = None; | |||
|
555 | } | |||
|
556 | ||||
|
557 | /// Returns `(state, mode, size, mtime)` for the puprose of serialization | |||
|
558 | /// in the dirstate-v1 format. | |||
|
559 | /// | |||
|
560 | /// This includes marker values such as `mtime == -1`. In the future we may | |||
|
561 | /// want to not represent these cases that way in memory, but serialization | |||
|
562 | /// will need to keep the same format. | |||
|
563 | pub fn v1_data(&self) -> (u8, i32, i32, i32) { | |||
|
564 | ( | |||
|
565 | self.v1_state().into(), | |||
|
566 | self.v1_mode(), | |||
|
567 | self.v1_size(), | |||
|
568 | self.v1_mtime(), | |||
|
569 | ) | |||
|
570 | } | |||
|
571 | ||||
|
572 | pub(crate) fn is_from_other_parent(&self) -> bool { | |||
|
573 | self.state() == EntryState::Normal | |||
|
574 | && self.size() == SIZE_FROM_OTHER_PARENT | |||
|
575 | } | |||
|
576 | ||||
|
577 | // TODO: other platforms | |||
|
578 | #[cfg(unix)] | |||
|
579 | pub fn mode_changed( | |||
|
580 | &self, | |||
|
581 | filesystem_metadata: &std::fs::Metadata, | |||
|
582 | ) -> bool { | |||
|
583 | use std::os::unix::fs::MetadataExt; | |||
|
584 | const EXEC_BIT_MASK: u32 = 0o100; | |||
|
585 | let dirstate_exec_bit = (self.mode() as u32) & EXEC_BIT_MASK; | |||
|
586 | let fs_exec_bit = filesystem_metadata.mode() & EXEC_BIT_MASK; | |||
|
587 | dirstate_exec_bit != fs_exec_bit | |||
|
588 | } | |||
|
589 | ||||
|
590 | /// Returns a `(state, mode, size, mtime)` tuple as for | |||
|
591 | /// `DirstateMapMethods::debug_iter`. | |||
|
592 | pub fn debug_tuple(&self) -> (u8, i32, i32, i32) { | |||
|
593 | (self.state().into(), self.mode(), self.size(), self.mtime()) | |||
|
594 | } | |||
|
595 | ||||
|
596 | /// True if the stored mtime would be ambiguous with the current time | |||
|
597 | pub fn need_delay(&self, now: TruncatedTimestamp) -> bool { | |||
|
598 | if let Some(mtime) = self.mtime { | |||
|
599 | self.state() == EntryState::Normal | |||
|
600 | && mtime.truncated_seconds() == now.truncated_seconds() | |||
|
601 | } else { | |||
|
602 | false | |||
|
603 | } | |||
|
604 | } | |||
|
605 | } | |||
|
606 | ||||
|
607 | impl EntryState { | |||
|
608 | pub fn is_tracked(self) -> bool { | |||
|
609 | use EntryState::*; | |||
|
610 | match self { | |||
|
611 | Normal | Added | Merged => true, | |||
|
612 | Removed => false, | |||
|
613 | } | |||
|
614 | } | |||
|
615 | } | |||
|
616 | ||||
|
617 | impl TryFrom<u8> for EntryState { | |||
|
618 | type Error = HgError; | |||
|
619 | ||||
|
620 | fn try_from(value: u8) -> Result<Self, Self::Error> { | |||
|
621 | match value { | |||
|
622 | b'n' => Ok(EntryState::Normal), | |||
|
623 | b'a' => Ok(EntryState::Added), | |||
|
624 | b'r' => Ok(EntryState::Removed), | |||
|
625 | b'm' => Ok(EntryState::Merged), | |||
|
626 | _ => Err(HgError::CorruptedRepository(format!( | |||
|
627 | "Incorrect dirstate entry state {}", | |||
|
628 | value | |||
|
629 | ))), | |||
|
630 | } | |||
|
631 | } | |||
|
632 | } | |||
|
633 | ||||
|
634 | impl Into<u8> for EntryState { | |||
|
635 | fn into(self) -> u8 { | |||
|
636 | match self { | |||
|
637 | EntryState::Normal => b'n', | |||
|
638 | EntryState::Added => b'a', | |||
|
639 | EntryState::Removed => b'r', | |||
|
640 | EntryState::Merged => b'm', | |||
|
641 | } | |||
|
642 | } | |||
|
643 | } |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
@@ -37,9 +37,9 b' botocore==1.12.243 \\' | |||||
37 | --hash=sha256:397585a7881230274afb8d1877ef69a661b0a311745cd324f14a052fb2a2863a \ |
|
37 | --hash=sha256:397585a7881230274afb8d1877ef69a661b0a311745cd324f14a052fb2a2863a \ | |
38 | --hash=sha256:4496f8da89cb496462a831897ad248e13e431d9fa7e41e06d426fd6658ab6e59 \ |
|
38 | --hash=sha256:4496f8da89cb496462a831897ad248e13e431d9fa7e41e06d426fd6658ab6e59 \ | |
39 | # via boto3, s3transfer |
|
39 | # via boto3, s3transfer | |
40 |
certifi==20 |
|
40 | certifi==2021.5.30 \ | |
41 | --hash=sha256:e4f3620cfea4f83eedc95b24abd9cd56f3c4b146dd0177e83a21b4eb49e21e50 \ |
|
41 | --hash=sha256:2bbf76fd432960138b3ef6dda3dde0544f27cbf8546c458e60baf371917ba9ee \ | |
42 | --hash=sha256:fd7c7c74727ddcf00e9acd26bba8da604ffec95bf1c2144e67aff7a8b50e6cef \ |
|
42 | --hash=sha256:50b1e4f8446b06f41be7dd6338db18e0990601dce795c2b1686458aa7e8fa7d8 \ | |
43 | # via requests |
|
43 | # via requests | |
44 | cffi==1.12.3 \ |
|
44 | cffi==1.12.3 \ | |
45 | --hash=sha256:041c81822e9f84b1d9c401182e174996f0bae9991f33725d059b771744290774 \ |
|
45 | --hash=sha256:041c81822e9f84b1d9c401182e174996f0bae9991f33725d059b771744290774 \ |
@@ -4,9 +4,9 b'' | |||||
4 | # |
|
4 | # | |
5 | # pip-compile --generate-hashes --output-file=contrib/packaging/requirements-windows-py2.txt contrib/packaging/requirements-windows.txt.in |
|
5 | # pip-compile --generate-hashes --output-file=contrib/packaging/requirements-windows-py2.txt contrib/packaging/requirements-windows.txt.in | |
6 | # |
|
6 | # | |
7 |
certifi==202 |
|
7 | certifi==2021.5.30 \ | |
8 | --hash=sha256:5930595817496dd21bb8dc35dad090f1c2cd0adfaf21204bf6732ca5d8ee34d3 \ |
|
8 | --hash=sha256:2bbf76fd432960138b3ef6dda3dde0544f27cbf8546c458e60baf371917ba9ee \ | |
9 | --hash=sha256:8fc0819f1f30ba15bdb34cceffb9ef04d99f420f68eb75d901e9560b8749fc41 \ |
|
9 | --hash=sha256:50b1e4f8446b06f41be7dd6338db18e0990601dce795c2b1686458aa7e8fa7d8 \ | |
10 | # via dulwich |
|
10 | # via dulwich | |
11 | configparser==4.0.2 \ |
|
11 | configparser==4.0.2 \ | |
12 | --hash=sha256:254c1d9c79f60c45dfde850850883d5aaa7f19a23f13561243a050d5a7c3fe4c \ |
|
12 | --hash=sha256:254c1d9c79f60c45dfde850850883d5aaa7f19a23f13561243a050d5a7c3fe4c \ |
@@ -16,9 +16,9 b' cached-property==1.5.2 \\' | |||||
16 | --hash=sha256:9fa5755838eecbb2d234c3aa390bd80fbd3ac6b6869109bfc1b499f7bd89a130 \ |
|
16 | --hash=sha256:9fa5755838eecbb2d234c3aa390bd80fbd3ac6b6869109bfc1b499f7bd89a130 \ | |
17 | --hash=sha256:df4f613cf7ad9a588cc381aaf4a512d26265ecebd5eb9e1ba12f1319eb85a6a0 \ |
|
17 | --hash=sha256:df4f613cf7ad9a588cc381aaf4a512d26265ecebd5eb9e1ba12f1319eb85a6a0 \ | |
18 | # via pygit2 |
|
18 | # via pygit2 | |
19 |
certifi==202 |
|
19 | certifi==2021.5.30 \ | |
20 | --hash=sha256:5930595817496dd21bb8dc35dad090f1c2cd0adfaf21204bf6732ca5d8ee34d3 \ |
|
20 | --hash=sha256:2bbf76fd432960138b3ef6dda3dde0544f27cbf8546c458e60baf371917ba9ee \ | |
21 | --hash=sha256:8fc0819f1f30ba15bdb34cceffb9ef04d99f420f68eb75d901e9560b8749fc41 \ |
|
21 | --hash=sha256:50b1e4f8446b06f41be7dd6338db18e0990601dce795c2b1686458aa7e8fa7d8 \ | |
22 | # via dulwich |
|
22 | # via dulwich | |
23 | cffi==1.14.4 \ |
|
23 | cffi==1.14.4 \ | |
24 | --hash=sha256:00a1ba5e2e95684448de9b89888ccd02c98d512064b4cb987d48f4b40aa0421e \ |
|
24 | --hash=sha256:00a1ba5e2e95684448de9b89888ccd02c98d512064b4cb987d48f4b40aa0421e \ |
@@ -57,10 +57,10 b' from mercurial import (' | |||||
57 | diffutil, |
|
57 | diffutil, | |
58 | error, |
|
58 | error, | |
59 | hg, |
|
59 | hg, | |
|
60 | logcmdutil, | |||
60 | patch, |
|
61 | patch, | |
61 | pycompat, |
|
62 | pycompat, | |
62 | registrar, |
|
63 | registrar, | |
63 | scmutil, |
|
|||
64 | ) |
|
64 | ) | |
65 | from mercurial.utils import dateutil |
|
65 | from mercurial.utils import dateutil | |
66 |
|
66 | |||
@@ -180,7 +180,7 b' def analyze(ui, repo, *revs, **opts):' | |||||
180 |
|
180 | |||
181 | # If a mercurial repo is available, also model the commit history. |
|
181 | # If a mercurial repo is available, also model the commit history. | |
182 | if repo: |
|
182 | if repo: | |
183 |
revs = |
|
183 | revs = logcmdutil.revrange(repo, revs) | |
184 | revs.sort() |
|
184 | revs.sort() | |
185 |
|
185 | |||
186 | progress = ui.makeprogress( |
|
186 | progress = ui.makeprogress( |
@@ -35,6 +35,7 b' from mercurial.node import short' | |||||
35 |
|
35 | |||
36 | from mercurial import ( |
|
36 | from mercurial import ( | |
37 | error, |
|
37 | error, | |
|
38 | logcmdutil, | |||
38 | registrar, |
|
39 | registrar, | |
39 | scmutil, |
|
40 | scmutil, | |
40 | ) |
|
41 | ) | |
@@ -84,7 +85,7 b" def _docensor(ui, repo, path, rev=b'', t" | |||||
84 | if not len(flog): |
|
85 | if not len(flog): | |
85 | raise error.Abort(_(b'cannot censor file with no history')) |
|
86 | raise error.Abort(_(b'cannot censor file with no history')) | |
86 |
|
87 | |||
87 |
rev = |
|
88 | rev = logcmdutil.revsingle(repo, rev, rev).rev() | |
88 | try: |
|
89 | try: | |
89 | ctx = repo[rev] |
|
90 | ctx = repo[rev] | |
90 | except KeyError: |
|
91 | except KeyError: |
@@ -22,7 +22,6 b' from mercurial import (' | |||||
22 | logcmdutil, |
|
22 | logcmdutil, | |
23 | pycompat, |
|
23 | pycompat, | |
24 | registrar, |
|
24 | registrar, | |
25 | scmutil, |
|
|||
26 | ) |
|
25 | ) | |
27 |
|
26 | |||
28 | templateopts = cmdutil.templateopts |
|
27 | templateopts = cmdutil.templateopts | |
@@ -71,7 +70,7 b' def children(ui, repo, file_=None, **opt' | |||||
71 | """ |
|
70 | """ | |
72 | opts = pycompat.byteskwargs(opts) |
|
71 | opts = pycompat.byteskwargs(opts) | |
73 | rev = opts.get(b'rev') |
|
72 | rev = opts.get(b'rev') | |
74 |
ctx = |
|
73 | ctx = logcmdutil.revsingle(repo, rev) | |
75 | if file_: |
|
74 | if file_: | |
76 | fctx = repo.filectx(file_, changeid=ctx.rev()) |
|
75 | fctx = repo.filectx(file_, changeid=ctx.rev()) | |
77 | childctxs = [fcctx.changectx() for fcctx in fctx.children()] |
|
76 | childctxs = [fcctx.changectx() for fcctx in fctx.children()] |
@@ -13,9 +13,9 b' from mercurial import (' | |||||
13 | cmdutil, |
|
13 | cmdutil, | |
14 | context, |
|
14 | context, | |
15 | error, |
|
15 | error, | |
|
16 | logcmdutil, | |||
16 | pycompat, |
|
17 | pycompat, | |
17 | registrar, |
|
18 | registrar, | |
18 | scmutil, |
|
|||
19 | ) |
|
19 | ) | |
20 |
|
20 | |||
21 | cmdtable = {} |
|
21 | cmdtable = {} | |
@@ -68,7 +68,7 b' def close_branch(ui, repo, *revs, **opts' | |||||
68 | opts = pycompat.byteskwargs(opts) |
|
68 | opts = pycompat.byteskwargs(opts) | |
69 |
|
69 | |||
70 | revs += tuple(opts.get(b'rev', [])) |
|
70 | revs += tuple(opts.get(b'rev', [])) | |
71 |
revs = |
|
71 | revs = logcmdutil.revrange(repo, revs) | |
72 |
|
72 | |||
73 | if not revs: |
|
73 | if not revs: | |
74 | raise error.Abort(_(b'no revisions specified')) |
|
74 | raise error.Abort(_(b'no revisions specified')) |
@@ -36,10 +36,10 b' from mercurial import (' | |||||
36 | exchange, |
|
36 | exchange, | |
37 | hg, |
|
37 | hg, | |
38 | lock as lockmod, |
|
38 | lock as lockmod, | |
|
39 | logcmdutil, | |||
39 | merge as mergemod, |
|
40 | merge as mergemod, | |
40 | phases, |
|
41 | phases, | |
41 | pycompat, |
|
42 | pycompat, | |
42 | scmutil, |
|
|||
43 | util, |
|
43 | util, | |
44 | ) |
|
44 | ) | |
45 | from mercurial.utils import dateutil |
|
45 | from mercurial.utils import dateutil | |
@@ -145,7 +145,7 b' class mercurial_sink(common.converter_si' | |||||
145 | _(b'pulling from %s into %s\n') % (pbranch, branch) |
|
145 | _(b'pulling from %s into %s\n') % (pbranch, branch) | |
146 | ) |
|
146 | ) | |
147 | exchange.pull( |
|
147 | exchange.pull( | |
148 | self.repo, prepo, [prepo.lookup(h) for h in heads] |
|
148 | self.repo, prepo, heads=[prepo.lookup(h) for h in heads] | |
149 | ) |
|
149 | ) | |
150 | self.before() |
|
150 | self.before() | |
151 |
|
151 | |||
@@ -564,7 +564,7 b' class mercurial_source(common.converter_' | |||||
564 | ) |
|
564 | ) | |
565 | nodes = set() |
|
565 | nodes = set() | |
566 | parents = set() |
|
566 | parents = set() | |
567 |
for r in |
|
567 | for r in logcmdutil.revrange(self.repo, [hgrevs]): | |
568 | ctx = self.repo[r] |
|
568 | ctx = self.repo[r] | |
569 | nodes.add(ctx.node()) |
|
569 | nodes.add(ctx.node()) | |
570 | parents.update(p.node() for p in ctx.parents()) |
|
570 | parents.update(p.node() for p in ctx.parents()) |
@@ -423,7 +423,7 b' def reposetup(ui, repo):' | |||||
423 | try: |
|
423 | try: | |
424 | wlock = self.wlock() |
|
424 | wlock = self.wlock() | |
425 | for f in self.dirstate: |
|
425 | for f in self.dirstate: | |
426 |
if self.dirstate |
|
426 | if not self.dirstate.get_entry(f).maybe_clean: | |
427 | continue |
|
427 | continue | |
428 | if oldeol is not None: |
|
428 | if oldeol is not None: | |
429 | if not oldeol.match(f) and not neweol.match(f): |
|
429 | if not oldeol.match(f) and not neweol.match(f): |
@@ -101,6 +101,7 b' from mercurial import (' | |||||
101 | error, |
|
101 | error, | |
102 | filemerge, |
|
102 | filemerge, | |
103 | formatter, |
|
103 | formatter, | |
|
104 | logcmdutil, | |||
104 | pycompat, |
|
105 | pycompat, | |
105 | registrar, |
|
106 | registrar, | |
106 | scmutil, |
|
107 | scmutil, | |
@@ -558,17 +559,17 b' def dodiff(ui, repo, cmdline, pats, opts' | |||||
558 | do3way = b'$parent2' in cmdline |
|
559 | do3way = b'$parent2' in cmdline | |
559 |
|
560 | |||
560 | if change: |
|
561 | if change: | |
561 |
ctx2 = |
|
562 | ctx2 = logcmdutil.revsingle(repo, change, None) | |
562 | ctx1a, ctx1b = ctx2.p1(), ctx2.p2() |
|
563 | ctx1a, ctx1b = ctx2.p1(), ctx2.p2() | |
563 | elif from_rev or to_rev: |
|
564 | elif from_rev or to_rev: | |
564 | repo = scmutil.unhidehashlikerevs( |
|
565 | repo = scmutil.unhidehashlikerevs( | |
565 | repo, [from_rev] + [to_rev], b'nowarn' |
|
566 | repo, [from_rev] + [to_rev], b'nowarn' | |
566 | ) |
|
567 | ) | |
567 |
ctx1a = |
|
568 | ctx1a = logcmdutil.revsingle(repo, from_rev, None) | |
568 | ctx1b = repo[nullrev] |
|
569 | ctx1b = repo[nullrev] | |
569 |
ctx2 = |
|
570 | ctx2 = logcmdutil.revsingle(repo, to_rev, None) | |
570 | else: |
|
571 | else: | |
571 |
ctx1a, ctx2 = |
|
572 | ctx1a, ctx2 = logcmdutil.revpair(repo, revs) | |
572 | if not revs: |
|
573 | if not revs: | |
573 | ctx1b = repo[None].p2() |
|
574 | ctx1b = repo[None].p2() | |
574 | else: |
|
575 | else: |
@@ -15,6 +15,7 b' from mercurial import (' | |||||
15 | encoding, |
|
15 | encoding, | |
16 | error, |
|
16 | error, | |
17 | extensions, |
|
17 | extensions, | |
|
18 | logcmdutil, | |||
18 | patch, |
|
19 | patch, | |
19 | pycompat, |
|
20 | pycompat, | |
20 | registrar, |
|
21 | registrar, | |
@@ -75,7 +76,7 b' def _matchpaths(repo, rev, pats, opts, a' | |||||
75 | def bad(x, y): |
|
76 | def bad(x, y): | |
76 | raise error.Abort(b"%s: %s" % (x, y)) |
|
77 | raise error.Abort(b"%s: %s" % (x, y)) | |
77 |
|
78 | |||
78 |
ctx = |
|
79 | ctx = logcmdutil.revsingle(repo, rev) | |
79 | m = scmutil.match(ctx, pats, opts, badfn=bad) |
|
80 | m = scmutil.match(ctx, pats, opts, badfn=bad) | |
80 | for p in ctx.walk(m): |
|
81 | for p in ctx.walk(m): | |
81 | yield p |
|
82 | yield p | |
@@ -317,7 +318,7 b' def debugbuildannotatecache(ui, repo, *p' | |||||
317 | ) |
|
318 | ) | |
318 | if ui.configbool(b'fastannotate', b'unfilteredrepo'): |
|
319 | if ui.configbool(b'fastannotate', b'unfilteredrepo'): | |
319 | repo = repo.unfiltered() |
|
320 | repo = repo.unfiltered() | |
320 |
ctx = |
|
321 | ctx = logcmdutil.revsingle(repo, rev) | |
321 | m = scmutil.match(ctx, pats, opts) |
|
322 | m = scmutil.match(ctx, pats, opts) | |
322 | paths = list(ctx.walk(m)) |
|
323 | paths = list(ctx.walk(m)) | |
323 | if util.safehasattr(repo, 'prefetchfastannotate'): |
|
324 | if util.safehasattr(repo, 'prefetchfastannotate'): |
@@ -140,12 +140,10 b' def peersetup(ui, peer):' | |||||
140 | def getannotate(self, path, lastnode=None): |
|
140 | def getannotate(self, path, lastnode=None): | |
141 | if not self.capable(b'getannotate'): |
|
141 | if not self.capable(b'getannotate'): | |
142 | ui.warn(_(b'remote peer cannot provide annotate cache\n')) |
|
142 | ui.warn(_(b'remote peer cannot provide annotate cache\n')) | |
143 |
|
|
143 | return None, None | |
144 | else: |
|
144 | else: | |
145 | args = {b'path': path, b'lastnode': lastnode or b''} |
|
145 | args = {b'path': path, b'lastnode': lastnode or b''} | |
146 | f = wireprotov1peer.future() |
|
146 | return args, _parseresponse | |
147 | yield args, f |
|
|||
148 | yield _parseresponse(f.value) |
|
|||
149 |
|
147 | |||
150 | peer.__class__ = fastannotatepeer |
|
148 | peer.__class__ = fastannotatepeer | |
151 |
|
149 |
@@ -15,6 +15,7 b' from mercurial.node import hex, nullrev' | |||||
15 | from mercurial.utils import stringutil |
|
15 | from mercurial.utils import stringutil | |
16 | from mercurial import ( |
|
16 | from mercurial import ( | |
17 | error, |
|
17 | error, | |
|
18 | logcmdutil, | |||
18 | pycompat, |
|
19 | pycompat, | |
19 | registrar, |
|
20 | registrar, | |
20 | scmutil, |
|
21 | scmutil, | |
@@ -182,7 +183,7 b' def fastexport(ui, repo, *revs, **opts):' | |||||
182 | if not revs: |
|
183 | if not revs: | |
183 | revs = scmutil.revrange(repo, [b":"]) |
|
184 | revs = scmutil.revrange(repo, [b":"]) | |
184 | else: |
|
185 | else: | |
185 |
revs = |
|
186 | revs = logcmdutil.revrange(repo, revs) | |
186 | if not revs: |
|
187 | if not revs: | |
187 | raise error.Abort(_(b"no revisions matched")) |
|
188 | raise error.Abort(_(b"no revisions matched")) | |
188 | authorfile = opts.get(b"authormap") |
|
189 | authorfile = opts.get(b"authormap") |
@@ -144,6 +144,7 b' from mercurial import (' | |||||
144 | context, |
|
144 | context, | |
145 | copies, |
|
145 | copies, | |
146 | error, |
|
146 | error, | |
|
147 | logcmdutil, | |||
147 | match as matchmod, |
|
148 | match as matchmod, | |
148 | mdiff, |
|
149 | mdiff, | |
149 | merge, |
|
150 | merge, | |
@@ -283,20 +284,29 b' def fix(ui, repo, *pats, **opts):' | |||||
283 | # There are no data dependencies between the workers fixing each file |
|
284 | # There are no data dependencies between the workers fixing each file | |
284 | # revision, so we can use all available parallelism. |
|
285 | # revision, so we can use all available parallelism. | |
285 | def getfixes(items): |
|
286 | def getfixes(items): | |
286 | for rev, path in items: |
|
287 | for srcrev, path, dstrevs in items: | |
287 | ctx = repo[rev] |
|
288 | ctx = repo[srcrev] | |
288 | olddata = ctx[path].data() |
|
289 | olddata = ctx[path].data() | |
289 | metadata, newdata = fixfile( |
|
290 | metadata, newdata = fixfile( | |
290 | ui, repo, opts, fixers, ctx, path, basepaths, basectxs[rev] |
|
291 | ui, | |
|
292 | repo, | |||
|
293 | opts, | |||
|
294 | fixers, | |||
|
295 | ctx, | |||
|
296 | path, | |||
|
297 | basepaths, | |||
|
298 | basectxs[srcrev], | |||
291 | ) |
|
299 | ) | |
292 | # Don't waste memory/time passing unchanged content back, but |
|
300 | # We ungroup the work items now, because the code that consumes | |
293 | # produce one result per item either way. |
|
301 | # these results has to handle each dstrev separately, and in | |
294 | yield ( |
|
302 | # topological order. Because these are handled in topological | |
295 | rev, |
|
303 | # order, it's important that we pass around references to | |
296 | path, |
|
304 | # "newdata" instead of copying it. Otherwise, we would be | |
297 | metadata, |
|
305 | # keeping more copies of file content in memory at a time than | |
298 | newdata if newdata != olddata else None, |
|
306 | # if we hadn't bothered to group/deduplicate the work items. | |
299 | ) |
|
307 | data = newdata if newdata != olddata else None | |
|
308 | for dstrev in dstrevs: | |||
|
309 | yield (dstrev, path, metadata, data) | |||
300 |
|
310 | |||
301 | results = worker.worker( |
|
311 | results = worker.worker( | |
302 | ui, 1.0, getfixes, tuple(), workqueue, threadsafe=False |
|
312 | ui, 1.0, getfixes, tuple(), workqueue, threadsafe=False | |
@@ -376,23 +386,32 b' def cleanup(repo, replacements, wdirwrit' | |||||
376 |
|
386 | |||
377 |
|
387 | |||
378 | def getworkqueue(ui, repo, pats, opts, revstofix, basectxs): |
|
388 | def getworkqueue(ui, repo, pats, opts, revstofix, basectxs): | |
379 |
"""Constructs |
|
389 | """Constructs a list of files to fix and which revisions each fix applies to | |
380 |
|
390 | |||
381 | It is up to the caller how to consume the work items, and the only |
|
391 | To avoid duplicating work, there is usually only one work item for each file | |
382 | dependence between them is that replacement revisions must be committed in |
|
392 | revision that might need to be fixed. There can be multiple work items per | |
383 | topological order. Each work item represents a file in the working copy or |
|
393 | file revision if the same file needs to be fixed in multiple changesets with | |
384 | in some revision that should be fixed and written back to the working copy |
|
394 | different baserevs. Each work item also contains a list of changesets where | |
385 | or into a replacement revision. |
|
395 | the file's data should be replaced with the fixed data. The work items for | |
|
396 | earlier changesets come earlier in the work queue, to improve pipelining by | |||
|
397 | allowing the first changeset to be replaced while fixes are still being | |||
|
398 | computed for later changesets. | |||
386 |
|
399 | |||
387 | Work items for the same revision are grouped together, so that a worker |
|
400 | Also returned is a map from changesets to the count of work items that might | |
388 | pool starting with the first N items in parallel is likely to finish the |
|
401 | affect each changeset. This is used later to count when all of a changeset's | |
389 | first revision's work before other revisions. This can allow us to write |
|
402 | work items have been finished, without having to inspect the remaining work | |
390 | the result to disk and reduce memory footprint. At time of writing, the |
|
403 | queue in each worker subprocess. | |
391 | partition strategy in worker.py seems favorable to this. We also sort the |
|
404 | ||
392 | items by ascending revision number to match the order in which we commit |
|
405 | The example work item (1, "foo/bar.txt", (1, 2, 3)) means that the data of | |
393 | the fixes later. |
|
406 | bar.txt should be read from revision 1, then fixed, and written back to | |
|
407 | revisions 1, 2 and 3. Revision 1 is called the "srcrev" and the list of | |||
|
408 | revisions is called the "dstrevs". In practice the srcrev is always one of | |||
|
409 | the dstrevs, and we make that choice when constructing the work item so that | |||
|
410 | the choice can't be made inconsistently later on. The dstrevs should all | |||
|
411 | have the same file revision for the given path, so the choice of srcrev is | |||
|
412 | arbitrary. The wdirrev can be a dstrev and a srcrev. | |||
394 | """ |
|
413 | """ | |
395 | workqueue = [] |
|
414 | dstrevmap = collections.defaultdict(list) | |
396 | numitems = collections.defaultdict(int) |
|
415 | numitems = collections.defaultdict(int) | |
397 | maxfilesize = ui.configbytes(b'fix', b'maxfilesize') |
|
416 | maxfilesize = ui.configbytes(b'fix', b'maxfilesize') | |
398 | for rev in sorted(revstofix): |
|
417 | for rev in sorted(revstofix): | |
@@ -410,8 +429,21 b' def getworkqueue(ui, repo, pats, opts, r' | |||||
410 | % (util.bytecount(maxfilesize), path) |
|
429 | % (util.bytecount(maxfilesize), path) | |
411 | ) |
|
430 | ) | |
412 | continue |
|
431 | continue | |
413 | workqueue.append((rev, path)) |
|
432 | baserevs = tuple(ctx.rev() for ctx in basectxs[rev]) | |
|
433 | dstrevmap[(fctx.filerev(), baserevs, path)].append(rev) | |||
414 | numitems[rev] += 1 |
|
434 | numitems[rev] += 1 | |
|
435 | workqueue = [ | |||
|
436 | (min(dstrevs), path, dstrevs) | |||
|
437 | for (_filerev, _baserevs, path), dstrevs in dstrevmap.items() | |||
|
438 | ] | |||
|
439 | # Move work items for earlier changesets to the front of the queue, so we | |||
|
440 | # might be able to replace those changesets (in topological order) while | |||
|
441 | # we're still processing later work items. Note the min() in the previous | |||
|
442 | # expression, which means we don't need a custom comparator here. The path | |||
|
443 | # is also important in the sort order to make the output order stable. There | |||
|
444 | # are some situations where this doesn't help much, but some situations | |||
|
445 | # where it lets us buffer O(1) files instead of O(n) files. | |||
|
446 | workqueue.sort() | |||
415 | return workqueue, numitems |
|
447 | return workqueue, numitems | |
416 |
|
448 | |||
417 |
|
449 | |||
@@ -420,7 +452,7 b' def getrevstofix(ui, repo, opts):' | |||||
420 | if opts[b'all']: |
|
452 | if opts[b'all']: | |
421 | revs = repo.revs(b'(not public() and not obsolete()) or wdir()') |
|
453 | revs = repo.revs(b'(not public() and not obsolete()) or wdir()') | |
422 | elif opts[b'source']: |
|
454 | elif opts[b'source']: | |
423 |
source_revs = |
|
455 | source_revs = logcmdutil.revrange(repo, opts[b'source']) | |
424 | revs = set(repo.revs(b'(%ld::) - obsolete()', source_revs)) |
|
456 | revs = set(repo.revs(b'(%ld::) - obsolete()', source_revs)) | |
425 | if wdirrev in source_revs: |
|
457 | if wdirrev in source_revs: | |
426 | # `wdir()::` is currently empty, so manually add wdir |
|
458 | # `wdir()::` is currently empty, so manually add wdir | |
@@ -428,7 +460,7 b' def getrevstofix(ui, repo, opts):' | |||||
428 | if repo[b'.'].rev() in revs: |
|
460 | if repo[b'.'].rev() in revs: | |
429 | revs.add(wdirrev) |
|
461 | revs.add(wdirrev) | |
430 | else: |
|
462 | else: | |
431 |
revs = set( |
|
463 | revs = set(logcmdutil.revrange(repo, opts[b'rev'])) | |
432 | if opts.get(b'working_dir'): |
|
464 | if opts.get(b'working_dir'): | |
433 | revs.add(wdirrev) |
|
465 | revs.add(wdirrev) | |
434 | for rev in revs: |
|
466 | for rev in revs: | |
@@ -516,9 +548,9 b' def getbasepaths(repo, opts, workqueue, ' | |||||
516 | return {} |
|
548 | return {} | |
517 |
|
549 | |||
518 | basepaths = {} |
|
550 | basepaths = {} | |
519 | for rev, path in workqueue: |
|
551 | for srcrev, path, _dstrevs in workqueue: | |
520 | fixctx = repo[rev] |
|
552 | fixctx = repo[srcrev] | |
521 | for basectx in basectxs[rev]: |
|
553 | for basectx in basectxs[srcrev]: | |
522 | basepath = copies.pathcopies(basectx, fixctx).get(path, path) |
|
554 | basepath = copies.pathcopies(basectx, fixctx).get(path, path) | |
523 | if basepath in basectx: |
|
555 | if basepath in basectx: | |
524 | basepaths[(basectx.rev(), fixctx.rev(), path)] = basepath |
|
556 | basepaths[(basectx.rev(), fixctx.rev(), path)] = basepath | |
@@ -618,7 +650,7 b' def getbasectxs(repo, opts, revstofix):' | |||||
618 | # The --base flag overrides the usual logic, and we give every revision |
|
650 | # The --base flag overrides the usual logic, and we give every revision | |
619 | # exactly the set of baserevs that the user specified. |
|
651 | # exactly the set of baserevs that the user specified. | |
620 | if opts.get(b'base'): |
|
652 | if opts.get(b'base'): | |
621 |
baserevs = set( |
|
653 | baserevs = set(logcmdutil.revrange(repo, opts.get(b'base'))) | |
622 | if not baserevs: |
|
654 | if not baserevs: | |
623 | baserevs = {nullrev} |
|
655 | baserevs = {nullrev} | |
624 | basectxs = {repo[rev] for rev in baserevs} |
|
656 | basectxs = {repo[rev] for rev in baserevs} | |
@@ -641,10 +673,10 b' def _prefetchfiles(repo, workqueue, base' | |||||
641 | toprefetch = set() |
|
673 | toprefetch = set() | |
642 |
|
674 | |||
643 | # Prefetch the files that will be fixed. |
|
675 | # Prefetch the files that will be fixed. | |
644 | for rev, path in workqueue: |
|
676 | for srcrev, path, _dstrevs in workqueue: | |
645 | if rev == wdirrev: |
|
677 | if srcrev == wdirrev: | |
646 | continue |
|
678 | continue | |
647 | toprefetch.add((rev, path)) |
|
679 | toprefetch.add((srcrev, path)) | |
648 |
|
680 | |||
649 | # Prefetch the base contents for lineranges(). |
|
681 | # Prefetch the base contents for lineranges(). | |
650 | for (baserev, fixrev, path), basepath in basepaths.items(): |
|
682 | for (baserev, fixrev, path), basepath in basepaths.items(): |
@@ -333,7 +333,11 b' def overridewalk(orig, self, match, subr' | |||||
333 | # for better performance, directly access the inner dirstate map if the |
|
333 | # for better performance, directly access the inner dirstate map if the | |
334 | # standard dirstate implementation is in use. |
|
334 | # standard dirstate implementation is in use. | |
335 | dmap = dmap._map |
|
335 | dmap = dmap._map | |
336 |
nonnormalset = |
|
336 | nonnormalset = { | |
|
337 | f | |||
|
338 | for f, e in self._map.items() | |||
|
339 | if e.v1_state() != "n" or e.v1_mtime() == -1 | |||
|
340 | } | |||
337 |
|
341 | |||
338 | copymap = self._map.copymap |
|
342 | copymap = self._map.copymap | |
339 | getkind = stat.S_IFMT |
|
343 | getkind = stat.S_IFMT | |
@@ -560,8 +564,8 b' def overridestatus(' | |||||
560 | for i, (s1, s2) in enumerate(zip(l1, l2)): |
|
564 | for i, (s1, s2) in enumerate(zip(l1, l2)): | |
561 | if set(s1) != set(s2): |
|
565 | if set(s1) != set(s2): | |
562 | f.write(b'sets at position %d are unequal\n' % i) |
|
566 | f.write(b'sets at position %d are unequal\n' % i) | |
563 |
f.write(b'watchman returned: % |
|
567 | f.write(b'watchman returned: %r\n' % s1) | |
564 |
f.write(b'stat returned: % |
|
568 | f.write(b'stat returned: %r\n' % s2) | |
565 | finally: |
|
569 | finally: | |
566 | f.close() |
|
570 | f.close() | |
567 |
|
571 |
This diff has been collapsed as it changes many lines, (651 lines changed) Show them Hide them | |||||
@@ -282,6 +282,11 b' configitem(' | |||||
282 | default=None, |
|
282 | default=None, | |
283 | ) |
|
283 | ) | |
284 | configitem(b'histedit', b'summary-template', default=b'{rev} {desc|firstline}') |
|
284 | configitem(b'histedit', b'summary-template', default=b'{rev} {desc|firstline}') | |
|
285 | # TODO: Teach the text-based histedit interface to respect this config option | |||
|
286 | # before we make it non-experimental. | |||
|
287 | configitem( | |||
|
288 | b'histedit', b'later-commits-first', default=False, experimental=True | |||
|
289 | ) | |||
285 |
|
290 | |||
286 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
291 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for | |
287 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
292 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should | |
@@ -749,7 +754,7 b' def _isdirtywc(repo):' | |||||
749 |
|
754 | |||
750 |
|
755 | |||
751 | def abortdirty(): |
|
756 | def abortdirty(): | |
752 |
raise error. |
|
757 | raise error.StateError( | |
753 | _(b'working copy has pending changes'), |
|
758 | _(b'working copy has pending changes'), | |
754 | hint=_( |
|
759 | hint=_( | |
755 | b'amend, commit, or revert them and run histedit ' |
|
760 | b'amend, commit, or revert them and run histedit ' | |
@@ -1052,12 +1057,12 b' def findoutgoing(ui, repo, remote=None, ' | |||||
1052 |
|
1057 | |||
1053 | outgoing = discovery.findcommonoutgoing(repo, other, revs, force=force) |
|
1058 | outgoing = discovery.findcommonoutgoing(repo, other, revs, force=force) | |
1054 | if not outgoing.missing: |
|
1059 | if not outgoing.missing: | |
1055 |
raise error. |
|
1060 | raise error.StateError(_(b'no outgoing ancestors')) | |
1056 | roots = list(repo.revs(b"roots(%ln)", outgoing.missing)) |
|
1061 | roots = list(repo.revs(b"roots(%ln)", outgoing.missing)) | |
1057 | if len(roots) > 1: |
|
1062 | if len(roots) > 1: | |
1058 | msg = _(b'there are ambiguous outgoing revisions') |
|
1063 | msg = _(b'there are ambiguous outgoing revisions') | |
1059 | hint = _(b"see 'hg help histedit' for more detail") |
|
1064 | hint = _(b"see 'hg help histedit' for more detail") | |
1060 |
raise error. |
|
1065 | raise error.StateError(msg, hint=hint) | |
1061 | return repo[roots[0]].node() |
|
1066 | return repo[roots[0]].node() | |
1062 |
|
1067 | |||
1063 |
|
1068 | |||
@@ -1193,166 +1198,6 b' class histeditrule(object):' | |||||
1193 | return self.conflicts |
|
1198 | return self.conflicts | |
1194 |
|
1199 | |||
1195 |
|
1200 | |||
1196 | # ============ EVENTS =============== |
|
|||
1197 | def movecursor(state, oldpos, newpos): |
|
|||
1198 | """Change the rule/changeset that the cursor is pointing to, regardless of |
|
|||
1199 | current mode (you can switch between patches from the view patch window).""" |
|
|||
1200 | state[b'pos'] = newpos |
|
|||
1201 |
|
||||
1202 | mode, _ = state[b'mode'] |
|
|||
1203 | if mode == MODE_RULES: |
|
|||
1204 | # Scroll through the list by updating the view for MODE_RULES, so that |
|
|||
1205 | # even if we are not currently viewing the rules, switching back will |
|
|||
1206 | # result in the cursor's rule being visible. |
|
|||
1207 | modestate = state[b'modes'][MODE_RULES] |
|
|||
1208 | if newpos < modestate[b'line_offset']: |
|
|||
1209 | modestate[b'line_offset'] = newpos |
|
|||
1210 | elif newpos > modestate[b'line_offset'] + state[b'page_height'] - 1: |
|
|||
1211 | modestate[b'line_offset'] = newpos - state[b'page_height'] + 1 |
|
|||
1212 |
|
||||
1213 | # Reset the patch view region to the top of the new patch. |
|
|||
1214 | state[b'modes'][MODE_PATCH][b'line_offset'] = 0 |
|
|||
1215 |
|
||||
1216 |
|
||||
1217 | def changemode(state, mode): |
|
|||
1218 | curmode, _ = state[b'mode'] |
|
|||
1219 | state[b'mode'] = (mode, curmode) |
|
|||
1220 | if mode == MODE_PATCH: |
|
|||
1221 | state[b'modes'][MODE_PATCH][b'patchcontents'] = patchcontents(state) |
|
|||
1222 |
|
||||
1223 |
|
||||
1224 | def makeselection(state, pos): |
|
|||
1225 | state[b'selected'] = pos |
|
|||
1226 |
|
||||
1227 |
|
||||
1228 | def swap(state, oldpos, newpos): |
|
|||
1229 | """Swap two positions and calculate necessary conflicts in |
|
|||
1230 | O(|newpos-oldpos|) time""" |
|
|||
1231 |
|
||||
1232 | rules = state[b'rules'] |
|
|||
1233 | assert 0 <= oldpos < len(rules) and 0 <= newpos < len(rules) |
|
|||
1234 |
|
||||
1235 | rules[oldpos], rules[newpos] = rules[newpos], rules[oldpos] |
|
|||
1236 |
|
||||
1237 | # TODO: swap should not know about histeditrule's internals |
|
|||
1238 | rules[newpos].pos = newpos |
|
|||
1239 | rules[oldpos].pos = oldpos |
|
|||
1240 |
|
||||
1241 | start = min(oldpos, newpos) |
|
|||
1242 | end = max(oldpos, newpos) |
|
|||
1243 | for r in pycompat.xrange(start, end + 1): |
|
|||
1244 | rules[newpos].checkconflicts(rules[r]) |
|
|||
1245 | rules[oldpos].checkconflicts(rules[r]) |
|
|||
1246 |
|
||||
1247 | if state[b'selected']: |
|
|||
1248 | makeselection(state, newpos) |
|
|||
1249 |
|
||||
1250 |
|
||||
1251 | def changeaction(state, pos, action): |
|
|||
1252 | """Change the action state on the given position to the new action""" |
|
|||
1253 | rules = state[b'rules'] |
|
|||
1254 | assert 0 <= pos < len(rules) |
|
|||
1255 | rules[pos].action = action |
|
|||
1256 |
|
||||
1257 |
|
||||
1258 | def cycleaction(state, pos, next=False): |
|
|||
1259 | """Changes the action state the next or the previous action from |
|
|||
1260 | the action list""" |
|
|||
1261 | rules = state[b'rules'] |
|
|||
1262 | assert 0 <= pos < len(rules) |
|
|||
1263 | current = rules[pos].action |
|
|||
1264 |
|
||||
1265 | assert current in KEY_LIST |
|
|||
1266 |
|
||||
1267 | index = KEY_LIST.index(current) |
|
|||
1268 | if next: |
|
|||
1269 | index += 1 |
|
|||
1270 | else: |
|
|||
1271 | index -= 1 |
|
|||
1272 | changeaction(state, pos, KEY_LIST[index % len(KEY_LIST)]) |
|
|||
1273 |
|
||||
1274 |
|
||||
1275 | def changeview(state, delta, unit): |
|
|||
1276 | """Change the region of whatever is being viewed (a patch or the list of |
|
|||
1277 | changesets). 'delta' is an amount (+/- 1) and 'unit' is 'page' or 'line'.""" |
|
|||
1278 | mode, _ = state[b'mode'] |
|
|||
1279 | if mode != MODE_PATCH: |
|
|||
1280 | return |
|
|||
1281 | mode_state = state[b'modes'][mode] |
|
|||
1282 | num_lines = len(mode_state[b'patchcontents']) |
|
|||
1283 | page_height = state[b'page_height'] |
|
|||
1284 | unit = page_height if unit == b'page' else 1 |
|
|||
1285 | num_pages = 1 + (num_lines - 1) // page_height |
|
|||
1286 | max_offset = (num_pages - 1) * page_height |
|
|||
1287 | newline = mode_state[b'line_offset'] + delta * unit |
|
|||
1288 | mode_state[b'line_offset'] = max(0, min(max_offset, newline)) |
|
|||
1289 |
|
||||
1290 |
|
||||
1291 | def event(state, ch): |
|
|||
1292 | """Change state based on the current character input |
|
|||
1293 |
|
||||
1294 | This takes the current state and based on the current character input from |
|
|||
1295 | the user we change the state. |
|
|||
1296 | """ |
|
|||
1297 | selected = state[b'selected'] |
|
|||
1298 | oldpos = state[b'pos'] |
|
|||
1299 | rules = state[b'rules'] |
|
|||
1300 |
|
||||
1301 | if ch in (curses.KEY_RESIZE, b"KEY_RESIZE"): |
|
|||
1302 | return E_RESIZE |
|
|||
1303 |
|
||||
1304 | lookup_ch = ch |
|
|||
1305 | if ch is not None and b'0' <= ch <= b'9': |
|
|||
1306 | lookup_ch = b'0' |
|
|||
1307 |
|
||||
1308 | curmode, prevmode = state[b'mode'] |
|
|||
1309 | action = KEYTABLE[curmode].get( |
|
|||
1310 | lookup_ch, KEYTABLE[b'global'].get(lookup_ch) |
|
|||
1311 | ) |
|
|||
1312 | if action is None: |
|
|||
1313 | return |
|
|||
1314 | if action in (b'down', b'move-down'): |
|
|||
1315 | newpos = min(oldpos + 1, len(rules) - 1) |
|
|||
1316 | movecursor(state, oldpos, newpos) |
|
|||
1317 | if selected is not None or action == b'move-down': |
|
|||
1318 | swap(state, oldpos, newpos) |
|
|||
1319 | elif action in (b'up', b'move-up'): |
|
|||
1320 | newpos = max(0, oldpos - 1) |
|
|||
1321 | movecursor(state, oldpos, newpos) |
|
|||
1322 | if selected is not None or action == b'move-up': |
|
|||
1323 | swap(state, oldpos, newpos) |
|
|||
1324 | elif action == b'next-action': |
|
|||
1325 | cycleaction(state, oldpos, next=True) |
|
|||
1326 | elif action == b'prev-action': |
|
|||
1327 | cycleaction(state, oldpos, next=False) |
|
|||
1328 | elif action == b'select': |
|
|||
1329 | selected = oldpos if selected is None else None |
|
|||
1330 | makeselection(state, selected) |
|
|||
1331 | elif action == b'goto' and int(ch) < len(rules) and len(rules) <= 10: |
|
|||
1332 | newrule = next((r for r in rules if r.origpos == int(ch))) |
|
|||
1333 | movecursor(state, oldpos, newrule.pos) |
|
|||
1334 | if selected is not None: |
|
|||
1335 | swap(state, oldpos, newrule.pos) |
|
|||
1336 | elif action.startswith(b'action-'): |
|
|||
1337 | changeaction(state, oldpos, action[7:]) |
|
|||
1338 | elif action == b'showpatch': |
|
|||
1339 | changemode(state, MODE_PATCH if curmode != MODE_PATCH else prevmode) |
|
|||
1340 | elif action == b'help': |
|
|||
1341 | changemode(state, MODE_HELP if curmode != MODE_HELP else prevmode) |
|
|||
1342 | elif action == b'quit': |
|
|||
1343 | return E_QUIT |
|
|||
1344 | elif action == b'histedit': |
|
|||
1345 | return E_HISTEDIT |
|
|||
1346 | elif action == b'page-down': |
|
|||
1347 | return E_PAGEDOWN |
|
|||
1348 | elif action == b'page-up': |
|
|||
1349 | return E_PAGEUP |
|
|||
1350 | elif action == b'line-down': |
|
|||
1351 | return E_LINEDOWN |
|
|||
1352 | elif action == b'line-up': |
|
|||
1353 | return E_LINEUP |
|
|||
1354 |
|
||||
1355 |
|
||||
1356 | def makecommands(rules): |
|
1201 | def makecommands(rules): | |
1357 | """Returns a list of commands consumable by histedit --commands based on |
|
1202 | """Returns a list of commands consumable by histedit --commands based on | |
1358 | our list of rules""" |
|
1203 | our list of rules""" | |
@@ -1390,52 +1235,38 b' def _trunc_tail(line, n):' | |||||
1390 | return line[: n - 2] + b' >' |
|
1235 | return line[: n - 2] + b' >' | |
1391 |
|
1236 | |||
1392 |
|
1237 | |||
1393 | def patchcontents(state): |
|
1238 | class _chistedit_state(object): | |
1394 | repo = state[b'repo'] |
|
1239 | def __init__( | |
1395 | rule = state[b'rules'][state[b'pos']] |
|
1240 | self, | |
1396 | displayer = logcmdutil.changesetdisplayer( |
|
1241 | repo, | |
1397 | repo.ui, repo, {b"patch": True, b"template": b"status"}, buffered=True |
|
1242 | rules, | |
1398 | ) |
|
1243 | stdscr, | |
1399 | overrides = {(b'ui', b'verbose'): True} |
|
1244 | ): | |
1400 | with repo.ui.configoverride(overrides, source=b'histedit'): |
|
1245 | self.repo = repo | |
1401 | displayer.show(rule.ctx) |
|
1246 | self.rules = rules | |
1402 | displayer.close() |
|
1247 | self.stdscr = stdscr | |
1403 | return displayer.hunk[rule.ctx.rev()].splitlines() |
|
1248 | self.later_on_top = repo.ui.configbool( | |
1404 |
|
1249 | b'histedit', b'later-commits-first' | ||
1405 |
|
1250 | ) | ||
1406 | def _chisteditmain(repo, rules, stdscr): |
|
1251 | # The current item in display order, initialized to point to the top | |
1407 | try: |
|
1252 | # of the screen. | |
1408 | curses.use_default_colors() |
|
1253 | self.pos = 0 | |
1409 | except curses.error: |
|
1254 | self.selected = None | |
1410 | pass |
|
1255 | self.mode = (MODE_INIT, MODE_INIT) | |
1411 |
|
1256 | self.page_height = None | ||
1412 | # initialize color pattern |
|
1257 | self.modes = { | |
1413 | curses.init_pair(COLOR_HELP, curses.COLOR_WHITE, curses.COLOR_BLUE) |
|
1258 | MODE_RULES: { | |
1414 | curses.init_pair(COLOR_SELECTED, curses.COLOR_BLACK, curses.COLOR_WHITE) |
|
1259 | b'line_offset': 0, | |
1415 | curses.init_pair(COLOR_WARN, curses.COLOR_BLACK, curses.COLOR_YELLOW) |
|
1260 | }, | |
1416 | curses.init_pair(COLOR_OK, curses.COLOR_BLACK, curses.COLOR_GREEN) |
|
1261 | MODE_PATCH: { | |
1417 | curses.init_pair(COLOR_CURRENT, curses.COLOR_WHITE, curses.COLOR_MAGENTA) |
|
1262 | b'line_offset': 0, | |
1418 | curses.init_pair(COLOR_DIFF_ADD_LINE, curses.COLOR_GREEN, -1) |
|
1263 | }, | |
1419 | curses.init_pair(COLOR_DIFF_DEL_LINE, curses.COLOR_RED, -1) |
|
1264 | } | |
1420 | curses.init_pair(COLOR_DIFF_OFFSET, curses.COLOR_MAGENTA, -1) |
|
1265 | ||
1421 | curses.init_pair(COLOR_ROLL, curses.COLOR_RED, -1) |
|
1266 | def render_commit(self, win): | |
1422 | curses.init_pair( |
|
|||
1423 | COLOR_ROLL_CURRENT, curses.COLOR_BLACK, curses.COLOR_MAGENTA |
|
|||
1424 | ) |
|
|||
1425 | curses.init_pair(COLOR_ROLL_SELECTED, curses.COLOR_RED, curses.COLOR_WHITE) |
|
|||
1426 |
|
||||
1427 | # don't display the cursor |
|
|||
1428 | try: |
|
|||
1429 | curses.curs_set(0) |
|
|||
1430 | except curses.error: |
|
|||
1431 | pass |
|
|||
1432 |
|
||||
1433 | def rendercommit(win, state): |
|
|||
1434 | """Renders the commit window that shows the log of the current selected |
|
1267 | """Renders the commit window that shows the log of the current selected | |
1435 | commit""" |
|
1268 | commit""" | |
1436 | pos = state[b'pos'] |
|
1269 | rule = self.rules[self.display_pos_to_rule_pos(self.pos)] | |
1437 | rules = state[b'rules'] |
|
|||
1438 | rule = rules[pos] |
|
|||
1439 |
|
1270 | |||
1440 | ctx = rule.ctx |
|
1271 | ctx = rule.ctx | |
1441 | win.box() |
|
1272 | win.box() | |
@@ -1449,7 +1280,7 b' def _chisteditmain(repo, rules, stdscr):' | |||||
1449 | line = b"user: %s" % ctx.user() |
|
1280 | line = b"user: %s" % ctx.user() | |
1450 | win.addstr(2, 1, line[:length]) |
|
1281 | win.addstr(2, 1, line[:length]) | |
1451 |
|
1282 | |||
1452 | bms = repo.nodebookmarks(ctx.node()) |
|
1283 | bms = self.repo.nodebookmarks(ctx.node()) | |
1453 | line = b"bookmark: %s" % b' '.join(bms) |
|
1284 | line = b"bookmark: %s" % b' '.join(bms) | |
1454 | win.addstr(3, 1, line[:length]) |
|
1285 | win.addstr(3, 1, line[:length]) | |
1455 |
|
1286 | |||
@@ -1481,8 +1312,8 b' def _chisteditmain(repo, rules, stdscr):' | |||||
1481 | win.addstr(y, 1, conflictstr[:length]) |
|
1312 | win.addstr(y, 1, conflictstr[:length]) | |
1482 | win.noutrefresh() |
|
1313 | win.noutrefresh() | |
1483 |
|
1314 | |||
1484 |
def helplines( |
|
1315 | def helplines(self): | |
1485 | if mode == MODE_PATCH: |
|
1316 | if self.mode[0] == MODE_PATCH: | |
1486 | help = b"""\ |
|
1317 | help = b"""\ | |
1487 | ?: help, k/up: line up, j/down: line down, v: stop viewing patch |
|
1318 | ?: help, k/up: line up, j/down: line down, v: stop viewing patch | |
1488 | pgup: prev page, space/pgdn: next page, c: commit, q: abort |
|
1319 | pgup: prev page, space/pgdn: next page, c: commit, q: abort | |
@@ -1495,40 +1326,70 b' pgup/K: move patch up, pgdn/J: move patc' | |||||
1495 | """ |
|
1326 | """ | |
1496 | return help.splitlines() |
|
1327 | return help.splitlines() | |
1497 |
|
1328 | |||
1498 |
def renderhelp( |
|
1329 | def render_help(self, win): | |
1499 | maxy, maxx = win.getmaxyx() |
|
1330 | maxy, maxx = win.getmaxyx() | |
1500 | mode, _ = state[b'mode'] |
|
1331 | for y, line in enumerate(self.helplines()): | |
1501 | for y, line in enumerate(helplines(mode)): |
|
|||
1502 | if y >= maxy: |
|
1332 | if y >= maxy: | |
1503 | break |
|
1333 | break | |
1504 | addln(win, y, 0, line, curses.color_pair(COLOR_HELP)) |
|
1334 | addln(win, y, 0, line, curses.color_pair(COLOR_HELP)) | |
1505 | win.noutrefresh() |
|
1335 | win.noutrefresh() | |
1506 |
|
1336 | |||
1507 | def renderrules(rulesscr, state): |
|
1337 | def layout(self): | |
1508 | rules = state[b'rules'] |
|
1338 | maxy, maxx = self.stdscr.getmaxyx() | |
1509 | pos = state[b'pos'] |
|
1339 | helplen = len(self.helplines()) | |
1510 | selected = state[b'selected'] |
|
1340 | mainlen = maxy - helplen - 12 | |
1511 | start = state[b'modes'][MODE_RULES][b'line_offset'] |
|
1341 | if mainlen < 1: | |
1512 |
|
1342 | raise error.Abort( | ||
1513 | conflicts = [r.ctx for r in rules if r.conflicts] |
|
1343 | _(b"terminal dimensions %d by %d too small for curses histedit") | |
|
1344 | % (maxy, maxx), | |||
|
1345 | hint=_( | |||
|
1346 | b"enlarge your terminal or use --config ui.interface=text" | |||
|
1347 | ), | |||
|
1348 | ) | |||
|
1349 | return { | |||
|
1350 | b'commit': (12, maxx), | |||
|
1351 | b'help': (helplen, maxx), | |||
|
1352 | b'main': (mainlen, maxx), | |||
|
1353 | } | |||
|
1354 | ||||
|
1355 | def display_pos_to_rule_pos(self, display_pos): | |||
|
1356 | """Converts a position in display order to rule order. | |||
|
1357 | ||||
|
1358 | The `display_pos` is the order from the top in display order, not | |||
|
1359 | considering which items are currently visible on the screen. Thus, | |||
|
1360 | `display_pos=0` is the item at the top (possibly after scrolling to | |||
|
1361 | the top) | |||
|
1362 | """ | |||
|
1363 | if self.later_on_top: | |||
|
1364 | return len(self.rules) - 1 - display_pos | |||
|
1365 | else: | |||
|
1366 | return display_pos | |||
|
1367 | ||||
|
1368 | def render_rules(self, rulesscr): | |||
|
1369 | start = self.modes[MODE_RULES][b'line_offset'] | |||
|
1370 | ||||
|
1371 | conflicts = [r.ctx for r in self.rules if r.conflicts] | |||
1514 | if len(conflicts) > 0: |
|
1372 | if len(conflicts) > 0: | |
1515 | line = b"potential conflict in %s" % b','.join( |
|
1373 | line = b"potential conflict in %s" % b','.join( | |
1516 | map(pycompat.bytestr, conflicts) |
|
1374 | map(pycompat.bytestr, conflicts) | |
1517 | ) |
|
1375 | ) | |
1518 | addln(rulesscr, -1, 0, line, curses.color_pair(COLOR_WARN)) |
|
1376 | addln(rulesscr, -1, 0, line, curses.color_pair(COLOR_WARN)) | |
1519 |
|
1377 | |||
1520 | for y, rule in enumerate(rules[start:]): |
|
1378 | for display_pos in range(start, len(self.rules)): | |
1521 | if y >= state[b'page_height']: |
|
1379 | y = display_pos - start | |
1522 | break |
|
1380 | if y < 0 or y >= self.page_height: | |
|
1381 | continue | |||
|
1382 | rule_pos = self.display_pos_to_rule_pos(display_pos) | |||
|
1383 | rule = self.rules[rule_pos] | |||
1523 | if len(rule.conflicts) > 0: |
|
1384 | if len(rule.conflicts) > 0: | |
1524 | rulesscr.addstr(y, 0, b" ", curses.color_pair(COLOR_WARN)) |
|
1385 | rulesscr.addstr(y, 0, b" ", curses.color_pair(COLOR_WARN)) | |
1525 | else: |
|
1386 | else: | |
1526 | rulesscr.addstr(y, 0, b" ", curses.COLOR_BLACK) |
|
1387 | rulesscr.addstr(y, 0, b" ", curses.COLOR_BLACK) | |
1527 |
|
1388 | |||
1528 |
if |
|
1389 | if display_pos == self.selected: | |
1529 | rollcolor = COLOR_ROLL_SELECTED |
|
1390 | rollcolor = COLOR_ROLL_SELECTED | |
1530 | addln(rulesscr, y, 2, rule, curses.color_pair(COLOR_SELECTED)) |
|
1391 | addln(rulesscr, y, 2, rule, curses.color_pair(COLOR_SELECTED)) | |
1531 |
elif |
|
1392 | elif display_pos == self.pos: | |
1532 | rollcolor = COLOR_ROLL_CURRENT |
|
1393 | rollcolor = COLOR_ROLL_CURRENT | |
1533 | addln( |
|
1394 | addln( | |
1534 | rulesscr, |
|
1395 | rulesscr, | |
@@ -1551,7 +1412,7 b' pgup/K: move patch up, pgdn/J: move patc' | |||||
1551 |
|
1412 | |||
1552 | rulesscr.noutrefresh() |
|
1413 | rulesscr.noutrefresh() | |
1553 |
|
1414 | |||
1554 |
def renderstring( |
|
1415 | def render_string(self, win, output, diffcolors=False): | |
1555 | maxy, maxx = win.getmaxyx() |
|
1416 | maxy, maxx = win.getmaxyx() | |
1556 | length = min(maxy - 1, len(output)) |
|
1417 | length = min(maxy - 1, len(output)) | |
1557 | for y in range(0, length): |
|
1418 | for y in range(0, length): | |
@@ -1573,77 +1434,239 b' pgup/K: move patch up, pgdn/J: move patc' | |||||
1573 | win.addstr(y, 0, line) |
|
1434 | win.addstr(y, 0, line) | |
1574 | win.noutrefresh() |
|
1435 | win.noutrefresh() | |
1575 |
|
1436 | |||
1576 |
def renderpatch( |
|
1437 | def render_patch(self, win): | |
1577 |
start = s |
|
1438 | start = self.modes[MODE_PATCH][b'line_offset'] | |
1578 |
content = s |
|
1439 | content = self.modes[MODE_PATCH][b'patchcontents'] | |
1579 |
renderstring(win |
|
1440 | self.render_string(win, content[start:], diffcolors=True) | |
1580 |
|
1441 | |||
1581 | def layout(mode): |
|
1442 | def event(self, ch): | |
1582 | maxy, maxx = stdscr.getmaxyx() |
|
1443 | """Change state based on the current character input | |
1583 | helplen = len(helplines(mode)) |
|
1444 | ||
1584 | mainlen = maxy - helplen - 12 |
|
1445 | This takes the current state and based on the current character input from | |
1585 | if mainlen < 1: |
|
1446 | the user we change the state. | |
1586 | raise error.Abort( |
|
1447 | """ | |
1587 | _(b"terminal dimensions %d by %d too small for curses histedit") |
|
1448 | oldpos = self.pos | |
1588 | % (maxy, maxx), |
|
1449 | ||
1589 | hint=_( |
|
1450 | if ch in (curses.KEY_RESIZE, b"KEY_RESIZE"): | |
1590 | b"enlarge your terminal or use --config ui.interface=text" |
|
1451 | return E_RESIZE | |
1591 | ), |
|
1452 | ||
1592 | ) |
|
1453 | lookup_ch = ch | |
1593 | return { |
|
1454 | if ch is not None and b'0' <= ch <= b'9': | |
1594 | b'commit': (12, maxx), |
|
1455 | lookup_ch = b'0' | |
1595 | b'help': (helplen, maxx), |
|
1456 | ||
1596 | b'main': (mainlen, maxx), |
|
1457 | curmode, prevmode = self.mode | |
1597 | } |
|
1458 | action = KEYTABLE[curmode].get( | |
|
1459 | lookup_ch, KEYTABLE[b'global'].get(lookup_ch) | |||
|
1460 | ) | |||
|
1461 | if action is None: | |||
|
1462 | return | |||
|
1463 | if action in (b'down', b'move-down'): | |||
|
1464 | newpos = min(oldpos + 1, len(self.rules) - 1) | |||
|
1465 | self.move_cursor(oldpos, newpos) | |||
|
1466 | if self.selected is not None or action == b'move-down': | |||
|
1467 | self.swap(oldpos, newpos) | |||
|
1468 | elif action in (b'up', b'move-up'): | |||
|
1469 | newpos = max(0, oldpos - 1) | |||
|
1470 | self.move_cursor(oldpos, newpos) | |||
|
1471 | if self.selected is not None or action == b'move-up': | |||
|
1472 | self.swap(oldpos, newpos) | |||
|
1473 | elif action == b'next-action': | |||
|
1474 | self.cycle_action(oldpos, next=True) | |||
|
1475 | elif action == b'prev-action': | |||
|
1476 | self.cycle_action(oldpos, next=False) | |||
|
1477 | elif action == b'select': | |||
|
1478 | self.selected = oldpos if self.selected is None else None | |||
|
1479 | self.make_selection(self.selected) | |||
|
1480 | elif action == b'goto' and int(ch) < len(self.rules) <= 10: | |||
|
1481 | newrule = next((r for r in self.rules if r.origpos == int(ch))) | |||
|
1482 | self.move_cursor(oldpos, newrule.pos) | |||
|
1483 | if self.selected is not None: | |||
|
1484 | self.swap(oldpos, newrule.pos) | |||
|
1485 | elif action.startswith(b'action-'): | |||
|
1486 | self.change_action(oldpos, action[7:]) | |||
|
1487 | elif action == b'showpatch': | |||
|
1488 | self.change_mode(MODE_PATCH if curmode != MODE_PATCH else prevmode) | |||
|
1489 | elif action == b'help': | |||
|
1490 | self.change_mode(MODE_HELP if curmode != MODE_HELP else prevmode) | |||
|
1491 | elif action == b'quit': | |||
|
1492 | return E_QUIT | |||
|
1493 | elif action == b'histedit': | |||
|
1494 | return E_HISTEDIT | |||
|
1495 | elif action == b'page-down': | |||
|
1496 | return E_PAGEDOWN | |||
|
1497 | elif action == b'page-up': | |||
|
1498 | return E_PAGEUP | |||
|
1499 | elif action == b'line-down': | |||
|
1500 | return E_LINEDOWN | |||
|
1501 | elif action == b'line-up': | |||
|
1502 | return E_LINEUP | |||
|
1503 | ||||
|
1504 | def patch_contents(self): | |||
|
1505 | repo = self.repo | |||
|
1506 | rule = self.rules[self.display_pos_to_rule_pos(self.pos)] | |||
|
1507 | displayer = logcmdutil.changesetdisplayer( | |||
|
1508 | repo.ui, | |||
|
1509 | repo, | |||
|
1510 | {b"patch": True, b"template": b"status"}, | |||
|
1511 | buffered=True, | |||
|
1512 | ) | |||
|
1513 | overrides = {(b'ui', b'verbose'): True} | |||
|
1514 | with repo.ui.configoverride(overrides, source=b'histedit'): | |||
|
1515 | displayer.show(rule.ctx) | |||
|
1516 | displayer.close() | |||
|
1517 | return displayer.hunk[rule.ctx.rev()].splitlines() | |||
|
1518 | ||||
|
1519 | def move_cursor(self, oldpos, newpos): | |||
|
1520 | """Change the rule/changeset that the cursor is pointing to, regardless of | |||
|
1521 | current mode (you can switch between patches from the view patch window).""" | |||
|
1522 | self.pos = newpos | |||
|
1523 | ||||
|
1524 | mode, _ = self.mode | |||
|
1525 | if mode == MODE_RULES: | |||
|
1526 | # Scroll through the list by updating the view for MODE_RULES, so that | |||
|
1527 | # even if we are not currently viewing the rules, switching back will | |||
|
1528 | # result in the cursor's rule being visible. | |||
|
1529 | modestate = self.modes[MODE_RULES] | |||
|
1530 | if newpos < modestate[b'line_offset']: | |||
|
1531 | modestate[b'line_offset'] = newpos | |||
|
1532 | elif newpos > modestate[b'line_offset'] + self.page_height - 1: | |||
|
1533 | modestate[b'line_offset'] = newpos - self.page_height + 1 | |||
|
1534 | ||||
|
1535 | # Reset the patch view region to the top of the new patch. | |||
|
1536 | self.modes[MODE_PATCH][b'line_offset'] = 0 | |||
|
1537 | ||||
|
1538 | def change_mode(self, mode): | |||
|
1539 | curmode, _ = self.mode | |||
|
1540 | self.mode = (mode, curmode) | |||
|
1541 | if mode == MODE_PATCH: | |||
|
1542 | self.modes[MODE_PATCH][b'patchcontents'] = self.patch_contents() | |||
|
1543 | ||||
|
1544 | def make_selection(self, pos): | |||
|
1545 | self.selected = pos | |||
|
1546 | ||||
|
1547 | def swap(self, oldpos, newpos): | |||
|
1548 | """Swap two positions and calculate necessary conflicts in | |||
|
1549 | O(|newpos-oldpos|) time""" | |||
|
1550 | old_rule_pos = self.display_pos_to_rule_pos(oldpos) | |||
|
1551 | new_rule_pos = self.display_pos_to_rule_pos(newpos) | |||
|
1552 | ||||
|
1553 | rules = self.rules | |||
|
1554 | assert 0 <= old_rule_pos < len(rules) and 0 <= new_rule_pos < len(rules) | |||
|
1555 | ||||
|
1556 | rules[old_rule_pos], rules[new_rule_pos] = ( | |||
|
1557 | rules[new_rule_pos], | |||
|
1558 | rules[old_rule_pos], | |||
|
1559 | ) | |||
|
1560 | ||||
|
1561 | # TODO: swap should not know about histeditrule's internals | |||
|
1562 | rules[new_rule_pos].pos = new_rule_pos | |||
|
1563 | rules[old_rule_pos].pos = old_rule_pos | |||
|
1564 | ||||
|
1565 | start = min(old_rule_pos, new_rule_pos) | |||
|
1566 | end = max(old_rule_pos, new_rule_pos) | |||
|
1567 | for r in pycompat.xrange(start, end + 1): | |||
|
1568 | rules[new_rule_pos].checkconflicts(rules[r]) | |||
|
1569 | rules[old_rule_pos].checkconflicts(rules[r]) | |||
|
1570 | ||||
|
1571 | if self.selected: | |||
|
1572 | self.make_selection(newpos) | |||
|
1573 | ||||
|
1574 | def change_action(self, pos, action): | |||
|
1575 | """Change the action state on the given position to the new action""" | |||
|
1576 | assert 0 <= pos < len(self.rules) | |||
|
1577 | self.rules[pos].action = action | |||
|
1578 | ||||
|
1579 | def cycle_action(self, pos, next=False): | |||
|
1580 | """Changes the action state the next or the previous action from | |||
|
1581 | the action list""" | |||
|
1582 | assert 0 <= pos < len(self.rules) | |||
|
1583 | current = self.rules[pos].action | |||
|
1584 | ||||
|
1585 | assert current in KEY_LIST | |||
|
1586 | ||||
|
1587 | index = KEY_LIST.index(current) | |||
|
1588 | if next: | |||
|
1589 | index += 1 | |||
|
1590 | else: | |||
|
1591 | index -= 1 | |||
|
1592 | self.change_action(pos, KEY_LIST[index % len(KEY_LIST)]) | |||
|
1593 | ||||
|
1594 | def change_view(self, delta, unit): | |||
|
1595 | """Change the region of whatever is being viewed (a patch or the list of | |||
|
1596 | changesets). 'delta' is an amount (+/- 1) and 'unit' is 'page' or 'line'.""" | |||
|
1597 | mode, _ = self.mode | |||
|
1598 | if mode != MODE_PATCH: | |||
|
1599 | return | |||
|
1600 | mode_state = self.modes[mode] | |||
|
1601 | num_lines = len(mode_state[b'patchcontents']) | |||
|
1602 | page_height = self.page_height | |||
|
1603 | unit = page_height if unit == b'page' else 1 | |||
|
1604 | num_pages = 1 + (num_lines - 1) // page_height | |||
|
1605 | max_offset = (num_pages - 1) * page_height | |||
|
1606 | newline = mode_state[b'line_offset'] + delta * unit | |||
|
1607 | mode_state[b'line_offset'] = max(0, min(max_offset, newline)) | |||
|
1608 | ||||
|
1609 | ||||
|
1610 | def _chisteditmain(repo, rules, stdscr): | |||
|
1611 | try: | |||
|
1612 | curses.use_default_colors() | |||
|
1613 | except curses.error: | |||
|
1614 | pass | |||
|
1615 | ||||
|
1616 | # initialize color pattern | |||
|
1617 | curses.init_pair(COLOR_HELP, curses.COLOR_WHITE, curses.COLOR_BLUE) | |||
|
1618 | curses.init_pair(COLOR_SELECTED, curses.COLOR_BLACK, curses.COLOR_WHITE) | |||
|
1619 | curses.init_pair(COLOR_WARN, curses.COLOR_BLACK, curses.COLOR_YELLOW) | |||
|
1620 | curses.init_pair(COLOR_OK, curses.COLOR_BLACK, curses.COLOR_GREEN) | |||
|
1621 | curses.init_pair(COLOR_CURRENT, curses.COLOR_WHITE, curses.COLOR_MAGENTA) | |||
|
1622 | curses.init_pair(COLOR_DIFF_ADD_LINE, curses.COLOR_GREEN, -1) | |||
|
1623 | curses.init_pair(COLOR_DIFF_DEL_LINE, curses.COLOR_RED, -1) | |||
|
1624 | curses.init_pair(COLOR_DIFF_OFFSET, curses.COLOR_MAGENTA, -1) | |||
|
1625 | curses.init_pair(COLOR_ROLL, curses.COLOR_RED, -1) | |||
|
1626 | curses.init_pair( | |||
|
1627 | COLOR_ROLL_CURRENT, curses.COLOR_BLACK, curses.COLOR_MAGENTA | |||
|
1628 | ) | |||
|
1629 | curses.init_pair(COLOR_ROLL_SELECTED, curses.COLOR_RED, curses.COLOR_WHITE) | |||
|
1630 | ||||
|
1631 | # don't display the cursor | |||
|
1632 | try: | |||
|
1633 | curses.curs_set(0) | |||
|
1634 | except curses.error: | |||
|
1635 | pass | |||
1598 |
|
1636 | |||
1599 | def drawvertwin(size, y, x): |
|
1637 | def drawvertwin(size, y, x): | |
1600 | win = curses.newwin(size[0], size[1], y, x) |
|
1638 | win = curses.newwin(size[0], size[1], y, x) | |
1601 | y += size[0] |
|
1639 | y += size[0] | |
1602 | return win, y, x |
|
1640 | return win, y, x | |
1603 |
|
1641 | |||
1604 | state = { |
|
1642 | state = _chistedit_state(repo, rules, stdscr) | |
1605 | b'pos': 0, |
|
|||
1606 | b'rules': rules, |
|
|||
1607 | b'selected': None, |
|
|||
1608 | b'mode': (MODE_INIT, MODE_INIT), |
|
|||
1609 | b'page_height': None, |
|
|||
1610 | b'modes': { |
|
|||
1611 | MODE_RULES: { |
|
|||
1612 | b'line_offset': 0, |
|
|||
1613 | }, |
|
|||
1614 | MODE_PATCH: { |
|
|||
1615 | b'line_offset': 0, |
|
|||
1616 | }, |
|
|||
1617 | }, |
|
|||
1618 | b'repo': repo, |
|
|||
1619 | } |
|
|||
1620 |
|
1643 | |||
1621 | # eventloop |
|
1644 | # eventloop | |
1622 | ch = None |
|
1645 | ch = None | |
1623 | stdscr.clear() |
|
1646 | stdscr.clear() | |
1624 | stdscr.refresh() |
|
1647 | stdscr.refresh() | |
1625 | while True: |
|
1648 | while True: | |
1626 |
oldmode, unused = state |
|
1649 | oldmode, unused = state.mode | |
1627 | if oldmode == MODE_INIT: |
|
1650 | if oldmode == MODE_INIT: | |
1628 |
changemode( |
|
1651 | state.change_mode(MODE_RULES) | |
1629 |
e = event( |
|
1652 | e = state.event(ch) | |
1630 |
|
1653 | |||
1631 | if e == E_QUIT: |
|
1654 | if e == E_QUIT: | |
1632 | return False |
|
1655 | return False | |
1633 | if e == E_HISTEDIT: |
|
1656 | if e == E_HISTEDIT: | |
1634 |
return state |
|
1657 | return state.rules | |
1635 | else: |
|
1658 | else: | |
1636 | if e == E_RESIZE: |
|
1659 | if e == E_RESIZE: | |
1637 | size = screen_size() |
|
1660 | size = screen_size() | |
1638 | if size != stdscr.getmaxyx(): |
|
1661 | if size != stdscr.getmaxyx(): | |
1639 | curses.resizeterm(*size) |
|
1662 | curses.resizeterm(*size) | |
1640 |
|
1663 | |||
1641 | curmode, unused = state[b'mode'] |
|
1664 | sizes = state.layout() | |
1642 | sizes = layout(curmode) |
|
1665 | curmode, unused = state.mode | |
1643 | if curmode != oldmode: |
|
1666 | if curmode != oldmode: | |
1644 |
state |
|
1667 | state.page_height = sizes[b'main'][0] | |
1645 | # Adjust the view to fit the current screen size. |
|
1668 | # Adjust the view to fit the current screen size. | |
1646 |
movecursor(state, state |
|
1669 | state.move_cursor(state.pos, state.pos) | |
1647 |
|
1670 | |||
1648 | # Pack the windows against the top, each pane spread across the |
|
1671 | # Pack the windows against the top, each pane spread across the | |
1649 | # full width of the screen. |
|
1672 | # full width of the screen. | |
@@ -1654,26 +1677,26 b' pgup/K: move patch up, pgdn/J: move patc' | |||||
1654 |
|
1677 | |||
1655 | if e in (E_PAGEDOWN, E_PAGEUP, E_LINEDOWN, E_LINEUP): |
|
1678 | if e in (E_PAGEDOWN, E_PAGEUP, E_LINEDOWN, E_LINEUP): | |
1656 | if e == E_PAGEDOWN: |
|
1679 | if e == E_PAGEDOWN: | |
1657 |
changeview( |
|
1680 | state.change_view(+1, b'page') | |
1658 | elif e == E_PAGEUP: |
|
1681 | elif e == E_PAGEUP: | |
1659 |
changeview( |
|
1682 | state.change_view(-1, b'page') | |
1660 | elif e == E_LINEDOWN: |
|
1683 | elif e == E_LINEDOWN: | |
1661 |
changeview( |
|
1684 | state.change_view(+1, b'line') | |
1662 | elif e == E_LINEUP: |
|
1685 | elif e == E_LINEUP: | |
1663 |
changeview( |
|
1686 | state.change_view(-1, b'line') | |
1664 |
|
1687 | |||
1665 | # start rendering |
|
1688 | # start rendering | |
1666 | commitwin.erase() |
|
1689 | commitwin.erase() | |
1667 | helpwin.erase() |
|
1690 | helpwin.erase() | |
1668 | mainwin.erase() |
|
1691 | mainwin.erase() | |
1669 | if curmode == MODE_PATCH: |
|
1692 | if curmode == MODE_PATCH: | |
1670 |
renderpatch(mainwin |
|
1693 | state.render_patch(mainwin) | |
1671 | elif curmode == MODE_HELP: |
|
1694 | elif curmode == MODE_HELP: | |
1672 |
renderstring(mainwin |
|
1695 | state.render_string(mainwin, __doc__.strip().splitlines()) | |
1673 | else: |
|
1696 | else: | |
1674 |
renderrules(mainwin |
|
1697 | state.render_rules(mainwin) | |
1675 |
rendercommit(commitwin |
|
1698 | state.render_commit(commitwin) | |
1676 |
renderhelp(helpwin |
|
1699 | state.render_help(helpwin) | |
1677 | curses.doupdate() |
|
1700 | curses.doupdate() | |
1678 | # done rendering |
|
1701 | # done rendering | |
1679 | ch = encoding.strtolocal(stdscr.getkey()) |
|
1702 | ch = encoding.strtolocal(stdscr.getkey()) | |
@@ -1697,26 +1720,19 b' def _chistedit(ui, repo, freeargs, opts)' | |||||
1697 | cmdutil.checkunfinished(repo) |
|
1720 | cmdutil.checkunfinished(repo) | |
1698 | cmdutil.bailifchanged(repo) |
|
1721 | cmdutil.bailifchanged(repo) | |
1699 |
|
1722 | |||
1700 | if os.path.exists(os.path.join(repo.path, b'histedit-state')): |
|
|||
1701 | raise error.Abort( |
|
|||
1702 | _( |
|
|||
1703 | b'history edit already in progress, try ' |
|
|||
1704 | b'--continue or --abort' |
|
|||
1705 | ) |
|
|||
1706 | ) |
|
|||
1707 | revs.extend(freeargs) |
|
1723 | revs.extend(freeargs) | |
1708 | if not revs: |
|
1724 | if not revs: | |
1709 | defaultrev = destutil.desthistedit(ui, repo) |
|
1725 | defaultrev = destutil.desthistedit(ui, repo) | |
1710 | if defaultrev is not None: |
|
1726 | if defaultrev is not None: | |
1711 | revs.append(defaultrev) |
|
1727 | revs.append(defaultrev) | |
1712 | if len(revs) != 1: |
|
1728 | if len(revs) != 1: | |
1713 |
raise error. |
|
1729 | raise error.InputError( | |
1714 | _(b'histedit requires exactly one ancestor revision') |
|
1730 | _(b'histedit requires exactly one ancestor revision') | |
1715 | ) |
|
1731 | ) | |
1716 |
|
1732 | |||
1717 |
rr = list(repo.set(b'roots(%ld)', |
|
1733 | rr = list(repo.set(b'roots(%ld)', logcmdutil.revrange(repo, revs))) | |
1718 | if len(rr) != 1: |
|
1734 | if len(rr) != 1: | |
1719 |
raise error. |
|
1735 | raise error.InputError( | |
1720 | _( |
|
1736 | _( | |
1721 | b'The specified revisions must have ' |
|
1737 | b'The specified revisions must have ' | |
1722 | b'exactly one common root' |
|
1738 | b'exactly one common root' | |
@@ -1727,15 +1743,15 b' def _chistedit(ui, repo, freeargs, opts)' | |||||
1727 | topmost = repo.dirstate.p1() |
|
1743 | topmost = repo.dirstate.p1() | |
1728 | revs = between(repo, root, topmost, keep) |
|
1744 | revs = between(repo, root, topmost, keep) | |
1729 | if not revs: |
|
1745 | if not revs: | |
1730 |
raise error. |
|
1746 | raise error.InputError( | |
1731 | _(b'%s is not an ancestor of working directory') % short(root) |
|
1747 | _(b'%s is not an ancestor of working directory') % short(root) | |
1732 | ) |
|
1748 | ) | |
1733 |
|
1749 | |||
1734 |
|
|
1750 | rules = [] | |
1735 | for i, r in enumerate(revs): |
|
1751 | for i, r in enumerate(revs): | |
1736 |
|
|
1752 | rules.append(histeditrule(ui, repo[r], i)) | |
1737 | with util.with_lc_ctype(): |
|
1753 | with util.with_lc_ctype(): | |
1738 |
rc = curses.wrapper(functools.partial(_chisteditmain, repo, |
|
1754 | rc = curses.wrapper(functools.partial(_chisteditmain, repo, rules)) | |
1739 | curses.echo() |
|
1755 | curses.echo() | |
1740 | curses.endwin() |
|
1756 | curses.endwin() | |
1741 | if rc is False: |
|
1757 | if rc is False: | |
@@ -1928,12 +1944,12 b' def _readfile(ui, path):' | |||||
1928 | return f.read() |
|
1944 | return f.read() | |
1929 |
|
1945 | |||
1930 |
|
1946 | |||
1931 |
def _validateargs(ui, repo |
|
1947 | def _validateargs(ui, repo, freeargs, opts, goal, rules, revs): | |
1932 | # TODO only abort if we try to histedit mq patches, not just |
|
1948 | # TODO only abort if we try to histedit mq patches, not just | |
1933 | # blanket if mq patches are applied somewhere |
|
1949 | # blanket if mq patches are applied somewhere | |
1934 | mq = getattr(repo, 'mq', None) |
|
1950 | mq = getattr(repo, 'mq', None) | |
1935 | if mq and mq.applied: |
|
1951 | if mq and mq.applied: | |
1936 |
raise error. |
|
1952 | raise error.StateError(_(b'source has mq patches applied')) | |
1937 |
|
1953 | |||
1938 | # basic argument incompatibility processing |
|
1954 | # basic argument incompatibility processing | |
1939 | outg = opts.get(b'outgoing') |
|
1955 | outg = opts.get(b'outgoing') | |
@@ -1941,31 +1957,26 b' def _validateargs(ui, repo, state, freea' | |||||
1941 | abort = opts.get(b'abort') |
|
1957 | abort = opts.get(b'abort') | |
1942 | force = opts.get(b'force') |
|
1958 | force = opts.get(b'force') | |
1943 | if force and not outg: |
|
1959 | if force and not outg: | |
1944 |
raise error. |
|
1960 | raise error.InputError(_(b'--force only allowed with --outgoing')) | |
1945 | if goal == b'continue': |
|
1961 | if goal == b'continue': | |
1946 | if any((outg, abort, revs, freeargs, rules, editplan)): |
|
1962 | if any((outg, abort, revs, freeargs, rules, editplan)): | |
1947 |
raise error. |
|
1963 | raise error.InputError(_(b'no arguments allowed with --continue')) | |
1948 | elif goal == b'abort': |
|
1964 | elif goal == b'abort': | |
1949 | if any((outg, revs, freeargs, rules, editplan)): |
|
1965 | if any((outg, revs, freeargs, rules, editplan)): | |
1950 |
raise error. |
|
1966 | raise error.InputError(_(b'no arguments allowed with --abort')) | |
1951 | elif goal == b'edit-plan': |
|
1967 | elif goal == b'edit-plan': | |
1952 | if any((outg, revs, freeargs)): |
|
1968 | if any((outg, revs, freeargs)): | |
1953 |
raise error. |
|
1969 | raise error.InputError( | |
1954 | _(b'only --commands argument allowed with --edit-plan') |
|
1970 | _(b'only --commands argument allowed with --edit-plan') | |
1955 | ) |
|
1971 | ) | |
1956 | else: |
|
1972 | else: | |
1957 | if state.inprogress(): |
|
|||
1958 | raise error.Abort( |
|
|||
1959 | _( |
|
|||
1960 | b'history edit already in progress, try ' |
|
|||
1961 | b'--continue or --abort' |
|
|||
1962 | ) |
|
|||
1963 | ) |
|
|||
1964 | if outg: |
|
1973 | if outg: | |
1965 | if revs: |
|
1974 | if revs: | |
1966 | raise error.Abort(_(b'no revisions allowed with --outgoing')) |
|
1975 | raise error.InputError( | |
|
1976 | _(b'no revisions allowed with --outgoing') | |||
|
1977 | ) | |||
1967 | if len(freeargs) > 1: |
|
1978 | if len(freeargs) > 1: | |
1968 |
raise error. |
|
1979 | raise error.InputError( | |
1969 | _(b'only one repo argument allowed with --outgoing') |
|
1980 | _(b'only one repo argument allowed with --outgoing') | |
1970 | ) |
|
1981 | ) | |
1971 | else: |
|
1982 | else: | |
@@ -1976,7 +1987,7 b' def _validateargs(ui, repo, state, freea' | |||||
1976 | revs.append(defaultrev) |
|
1987 | revs.append(defaultrev) | |
1977 |
|
1988 | |||
1978 | if len(revs) != 1: |
|
1989 | if len(revs) != 1: | |
1979 |
raise error. |
|
1990 | raise error.InputError( | |
1980 | _(b'histedit requires exactly one ancestor revision') |
|
1991 | _(b'histedit requires exactly one ancestor revision') | |
1981 | ) |
|
1992 | ) | |
1982 |
|
1993 | |||
@@ -1990,11 +2001,11 b' def _histedit(ui, repo, state, freeargs,' | |||||
1990 | rules = opts.get(b'commands', b'') |
|
2001 | rules = opts.get(b'commands', b'') | |
1991 | state.keep = opts.get(b'keep', False) |
|
2002 | state.keep = opts.get(b'keep', False) | |
1992 |
|
2003 | |||
1993 |
_validateargs(ui, repo |
|
2004 | _validateargs(ui, repo, freeargs, opts, goal, rules, revs) | |
1994 |
|
2005 | |||
1995 | hastags = False |
|
2006 | hastags = False | |
1996 | if revs: |
|
2007 | if revs: | |
1997 |
revs = |
|
2008 | revs = logcmdutil.revrange(repo, revs) | |
1998 | ctxs = [repo[rev] for rev in revs] |
|
2009 | ctxs = [repo[rev] for rev in revs] | |
1999 | for ctx in ctxs: |
|
2010 | for ctx in ctxs: | |
2000 | tags = [tag for tag in ctx.tags() if tag != b'tip'] |
|
2011 | tags = [tag for tag in ctx.tags() if tag != b'tip'] | |
@@ -2009,7 +2020,7 b' def _histedit(ui, repo, state, freeargs,' | |||||
2009 | ), |
|
2020 | ), | |
2010 | default=1, |
|
2021 | default=1, | |
2011 | ): |
|
2022 | ): | |
2012 |
raise error. |
|
2023 | raise error.CanceledError(_(b'histedit cancelled\n')) | |
2013 | # rebuild state |
|
2024 | # rebuild state | |
2014 | if goal == goalcontinue: |
|
2025 | if goal == goalcontinue: | |
2015 | state.read() |
|
2026 | state.read() | |
@@ -2217,9 +2228,9 b' def _newhistedit(ui, repo, state, revs, ' | |||||
2217 | remote = None |
|
2228 | remote = None | |
2218 | root = findoutgoing(ui, repo, remote, force, opts) |
|
2229 | root = findoutgoing(ui, repo, remote, force, opts) | |
2219 | else: |
|
2230 | else: | |
2220 |
rr = list(repo.set(b'roots(%ld)', |
|
2231 | rr = list(repo.set(b'roots(%ld)', logcmdutil.revrange(repo, revs))) | |
2221 | if len(rr) != 1: |
|
2232 | if len(rr) != 1: | |
2222 |
raise error. |
|
2233 | raise error.InputError( | |
2223 | _( |
|
2234 | _( | |
2224 | b'The specified revisions must have ' |
|
2235 | b'The specified revisions must have ' | |
2225 | b'exactly one common root' |
|
2236 | b'exactly one common root' | |
@@ -2229,7 +2240,7 b' def _newhistedit(ui, repo, state, revs, ' | |||||
2229 |
|
2240 | |||
2230 | revs = between(repo, root, topmost, state.keep) |
|
2241 | revs = between(repo, root, topmost, state.keep) | |
2231 | if not revs: |
|
2242 | if not revs: | |
2232 |
raise error. |
|
2243 | raise error.InputError( | |
2233 | _(b'%s is not an ancestor of working directory') % short(root) |
|
2244 | _(b'%s is not an ancestor of working directory') % short(root) | |
2234 | ) |
|
2245 | ) | |
2235 |
|
2246 | |||
@@ -2259,7 +2270,7 b' def _newhistedit(ui, repo, state, revs, ' | |||||
2259 | followcopies=False, |
|
2270 | followcopies=False, | |
2260 | ) |
|
2271 | ) | |
2261 | except error.Abort: |
|
2272 | except error.Abort: | |
2262 |
raise error. |
|
2273 | raise error.StateError( | |
2263 | _( |
|
2274 | _( | |
2264 | b"untracked files in working directory conflict with files in %s" |
|
2275 | b"untracked files in working directory conflict with files in %s" | |
2265 | ) |
|
2276 | ) | |
@@ -2337,7 +2348,9 b' def between(repo, old, new, keep):' | |||||
2337 | if revs and not keep: |
|
2348 | if revs and not keep: | |
2338 | rewriteutil.precheck(repo, revs, b'edit') |
|
2349 | rewriteutil.precheck(repo, revs, b'edit') | |
2339 | if repo.revs(b'(%ld) and merge()', revs): |
|
2350 | if repo.revs(b'(%ld) and merge()', revs): | |
2340 | raise error.Abort(_(b'cannot edit history that contains merges')) |
|
2351 | raise error.StateError( | |
|
2352 | _(b'cannot edit history that contains merges') | |||
|
2353 | ) | |||
2341 | return pycompat.maplist(repo.changelog.node, revs) |
|
2354 | return pycompat.maplist(repo.changelog.node, revs) | |
2342 |
|
2355 | |||
2343 |
|
2356 |
@@ -431,18 +431,19 b' def localrepolistkeys(orig, self, namesp' | |||||
431 | @wireprotov1peer.batchable |
|
431 | @wireprotov1peer.batchable | |
432 | def listkeyspatterns(self, namespace, patterns): |
|
432 | def listkeyspatterns(self, namespace, patterns): | |
433 | if not self.capable(b'pushkey'): |
|
433 | if not self.capable(b'pushkey'): | |
434 |
|
|
434 | return {}, None | |
435 | f = wireprotov1peer.future() |
|
|||
436 | self.ui.debug(b'preparing listkeys for "%s"\n' % namespace) |
|
435 | self.ui.debug(b'preparing listkeys for "%s"\n' % namespace) | |
437 | yield { |
|
436 | ||
|
437 | def decode(d): | |||
|
438 | self.ui.debug( | |||
|
439 | b'received listkey for "%s": %i bytes\n' % (namespace, len(d)) | |||
|
440 | ) | |||
|
441 | return pushkey.decodekeys(d) | |||
|
442 | ||||
|
443 | return { | |||
438 | b'namespace': encoding.fromlocal(namespace), |
|
444 | b'namespace': encoding.fromlocal(namespace), | |
439 | b'patterns': wireprototypes.encodelist(patterns), |
|
445 | b'patterns': wireprototypes.encodelist(patterns), | |
440 |
}, |
|
446 | }, decode | |
441 | d = f.value |
|
|||
442 | self.ui.debug( |
|
|||
443 | b'received listkey for "%s": %i bytes\n' % (namespace, len(d)) |
|
|||
444 | ) |
|
|||
445 | yield pushkey.decodekeys(d) |
|
|||
446 |
|
447 | |||
447 |
|
448 | |||
448 | def _readbundlerevs(bundlerepo): |
|
449 | def _readbundlerevs(bundlerepo): |
@@ -26,6 +26,7 b' from mercurial import (' | |||||
26 | exthelper, |
|
26 | exthelper, | |
27 | hg, |
|
27 | hg, | |
28 | lock, |
|
28 | lock, | |
|
29 | logcmdutil, | |||
29 | match as matchmod, |
|
30 | match as matchmod, | |
30 | pycompat, |
|
31 | pycompat, | |
31 | scmutil, |
|
32 | scmutil, | |
@@ -540,7 +541,7 b' def updatelfiles(' | |||||
540 | expecthash = lfutil.readasstandin(wctx[standin]) |
|
541 | expecthash = lfutil.readasstandin(wctx[standin]) | |
541 | if expecthash != b'': |
|
542 | if expecthash != b'': | |
542 | if lfile not in wctx: # not switched to normal file |
|
543 | if lfile not in wctx: # not switched to normal file | |
543 |
if repo.dirstate |
|
544 | if repo.dirstate.get_entry(standin).any_tracked: | |
544 | wvfs.unlinkpath(lfile, ignoremissing=True) |
|
545 | wvfs.unlinkpath(lfile, ignoremissing=True) | |
545 | else: |
|
546 | else: | |
546 | dropped.add(lfile) |
|
547 | dropped.add(lfile) | |
@@ -568,7 +569,7 b' def updatelfiles(' | |||||
568 | removed += 1 |
|
569 | removed += 1 | |
569 |
|
570 | |||
570 | # largefile processing might be slow and be interrupted - be prepared |
|
571 | # largefile processing might be slow and be interrupted - be prepared | |
571 | lfdirstate.write() |
|
572 | lfdirstate.write(repo.currenttransaction()) | |
572 |
|
573 | |||
573 | if lfiles: |
|
574 | if lfiles: | |
574 | lfiles = [f for f in lfiles if f not in dropped] |
|
575 | lfiles = [f for f in lfiles if f not in dropped] | |
@@ -577,7 +578,7 b' def updatelfiles(' | |||||
577 | repo.wvfs.unlinkpath(lfutil.standin(f)) |
|
578 | repo.wvfs.unlinkpath(lfutil.standin(f)) | |
578 | # This needs to happen for dropped files, otherwise they stay in |
|
579 | # This needs to happen for dropped files, otherwise they stay in | |
579 | # the M state. |
|
580 | # the M state. | |
580 |
lfdirstate._ |
|
581 | lfdirstate._map.reset_state(f) | |
581 |
|
582 | |||
582 | statuswriter(_(b'getting changed largefiles\n')) |
|
583 | statuswriter(_(b'getting changed largefiles\n')) | |
583 | cachelfiles(ui, repo, None, lfiles) |
|
584 | cachelfiles(ui, repo, None, lfiles) | |
@@ -618,7 +619,7 b' def updatelfiles(' | |||||
618 |
|
619 | |||
619 | lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup) |
|
620 | lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup) | |
620 |
|
621 | |||
621 | lfdirstate.write() |
|
622 | lfdirstate.write(repo.currenttransaction()) | |
622 | if lfiles: |
|
623 | if lfiles: | |
623 | statuswriter( |
|
624 | statuswriter( | |
624 | _(b'%d largefiles updated, %d removed\n') % (updated, removed) |
|
625 | _(b'%d largefiles updated, %d removed\n') % (updated, removed) | |
@@ -657,7 +658,7 b' def lfpull(ui, repo, source=b"default", ' | |||||
657 | revs = opts.get('rev', []) |
|
658 | revs = opts.get('rev', []) | |
658 | if not revs: |
|
659 | if not revs: | |
659 | raise error.Abort(_(b'no revisions specified')) |
|
660 | raise error.Abort(_(b'no revisions specified')) | |
660 |
revs = |
|
661 | revs = logcmdutil.revrange(repo, revs) | |
661 |
|
662 | |||
662 | numcached = 0 |
|
663 | numcached = 0 | |
663 | for rev in revs: |
|
664 | for rev in revs: |
@@ -191,10 +191,12 b' class largefilesdirstate(dirstate.dirsta' | |||||
191 | def _ignore(self, f): |
|
191 | def _ignore(self, f): | |
192 | return False |
|
192 | return False | |
193 |
|
193 | |||
194 |
def write(self, tr |
|
194 | def write(self, tr): | |
195 | # (1) disable PENDING mode always |
|
195 | # (1) disable PENDING mode always | |
196 | # (lfdirstate isn't yet managed as a part of the transaction) |
|
196 | # (lfdirstate isn't yet managed as a part of the transaction) | |
197 | # (2) avoid develwarn 'use dirstate.write with ....' |
|
197 | # (2) avoid develwarn 'use dirstate.write with ....' | |
|
198 | if tr: | |||
|
199 | tr.addbackup(b'largefiles/dirstate', location=b'plain') | |||
198 | super(largefilesdirstate, self).write(None) |
|
200 | super(largefilesdirstate, self).write(None) | |
199 |
|
201 | |||
200 |
|
202 | |||
@@ -269,7 +271,7 b' def listlfiles(repo, rev=None, matcher=N' | |||||
269 | return [ |
|
271 | return [ | |
270 | splitstandin(f) |
|
272 | splitstandin(f) | |
271 | for f in repo[rev].walk(matcher) |
|
273 | for f in repo[rev].walk(matcher) | |
272 |
if rev is not None or repo.dirstate |
|
274 | if rev is not None or repo.dirstate.get_entry(f).any_tracked | |
273 | ] |
|
275 | ] | |
274 |
|
276 | |||
275 |
|
277 | |||
@@ -558,24 +560,14 b' def synclfdirstate(repo, lfdirstate, lfi' | |||||
558 | if lfstandin not in repo.dirstate: |
|
560 | if lfstandin not in repo.dirstate: | |
559 | lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=False) |
|
561 | lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=False) | |
560 | else: |
|
562 | else: | |
561 |
|
|
563 | entry = repo.dirstate.get_entry(lfstandin) | |
562 | state, mtime = stat.state, stat.mtime |
|
564 | lfdirstate.update_file( | |
563 | if state == b'n': |
|
565 | lfile, | |
564 | if normallookup or mtime < 0 or not repo.wvfs.exists(lfile): |
|
566 | wc_tracked=entry.tracked, | |
565 | # state 'n' doesn't ensure 'clean' in this case |
|
567 | p1_tracked=entry.p1_tracked, | |
566 | lfdirstate.update_file( |
|
568 | p2_info=entry.p2_info, | |
567 | lfile, p1_tracked=True, wc_tracked=True, possibly_dirty=True |
|
569 | possibly_dirty=True, | |
568 |
|
|
570 | ) | |
569 | else: |
|
|||
570 | lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True) |
|
|||
571 | elif state == b'm': |
|
|||
572 | lfdirstate.update_file( |
|
|||
573 | lfile, p1_tracked=True, wc_tracked=True, merged=True |
|
|||
574 | ) |
|
|||
575 | elif state == b'r': |
|
|||
576 | lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=False) |
|
|||
577 | elif state == b'a': |
|
|||
578 | lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=True) |
|
|||
579 |
|
571 | |||
580 |
|
572 | |||
581 | def markcommitted(orig, ctx, node): |
|
573 | def markcommitted(orig, ctx, node): | |
@@ -598,7 +590,7 b' def markcommitted(orig, ctx, node):' | |||||
598 | lfile = splitstandin(f) |
|
590 | lfile = splitstandin(f) | |
599 | if lfile is not None: |
|
591 | if lfile is not None: | |
600 | synclfdirstate(repo, lfdirstate, lfile, False) |
|
592 | synclfdirstate(repo, lfdirstate, lfile, False) | |
601 | lfdirstate.write() |
|
593 | lfdirstate.write(repo.currenttransaction()) | |
602 |
|
594 | |||
603 | # As part of committing, copy all of the largefiles into the cache. |
|
595 | # As part of committing, copy all of the largefiles into the cache. | |
604 | # |
|
596 | # | |
@@ -713,7 +705,7 b' def updatestandinsbymatch(repo, match):' | |||||
713 | lfdirstate = openlfdirstate(ui, repo) |
|
705 | lfdirstate = openlfdirstate(ui, repo) | |
714 | for fstandin in standins: |
|
706 | for fstandin in standins: | |
715 | lfile = splitstandin(fstandin) |
|
707 | lfile = splitstandin(fstandin) | |
716 |
if lfdirstate |
|
708 | if lfdirstate.get_entry(lfile).tracked: | |
717 | updatestandin(repo, lfile, fstandin) |
|
709 | updatestandin(repo, lfile, fstandin) | |
718 |
|
710 | |||
719 | # Cook up a new matcher that only matches regular files or |
|
711 | # Cook up a new matcher that only matches regular files or | |
@@ -737,10 +729,10 b' def updatestandinsbymatch(repo, match):' | |||||
737 | # standin removal, drop the normal file if it is unknown to dirstate. |
|
729 | # standin removal, drop the normal file if it is unknown to dirstate. | |
738 | # Thus, skip plain largefile names but keep the standin. |
|
730 | # Thus, skip plain largefile names but keep the standin. | |
739 | if f in lfiles or fstandin in standins: |
|
731 | if f in lfiles or fstandin in standins: | |
740 |
if repo.dirstate |
|
732 | if not repo.dirstate.get_entry(fstandin).removed: | |
741 |
if repo.dirstate |
|
733 | if not repo.dirstate.get_entry(f).removed: | |
742 | continue |
|
734 | continue | |
743 |
elif repo.dirstate |
|
735 | elif not repo.dirstate.get_entry(f).any_tracked: | |
744 | continue |
|
736 | continue | |
745 |
|
737 | |||
746 | actualfiles.append(f) |
|
738 | actualfiles.append(f) |
@@ -151,7 +151,7 b' def addlargefiles(ui, repo, isaddremove,' | |||||
151 | ) |
|
151 | ) | |
152 | standins.append(standinname) |
|
152 | standins.append(standinname) | |
153 | lfdirstate.set_tracked(f) |
|
153 | lfdirstate.set_tracked(f) | |
154 | lfdirstate.write() |
|
154 | lfdirstate.write(repo.currenttransaction()) | |
155 | bad += [ |
|
155 | bad += [ | |
156 | lfutil.splitstandin(f) |
|
156 | lfutil.splitstandin(f) | |
157 | for f in repo[None].add(standins) |
|
157 | for f in repo[None].add(standins) | |
@@ -229,7 +229,7 b' def removelargefiles(ui, repo, isaddremo' | |||||
229 | for f in remove: |
|
229 | for f in remove: | |
230 | lfdirstate.set_untracked(lfutil.splitstandin(f)) |
|
230 | lfdirstate.set_untracked(lfutil.splitstandin(f)) | |
231 |
|
231 | |||
232 | lfdirstate.write() |
|
232 | lfdirstate.write(repo.currenttransaction()) | |
233 |
|
233 | |||
234 | return result |
|
234 | return result | |
235 |
|
235 | |||
@@ -659,7 +659,7 b' def mergerecordupdates(orig, repo, actio' | |||||
659 | ) |
|
659 | ) | |
660 | # make sure lfile doesn't get synclfdirstate'd as normal |
|
660 | # make sure lfile doesn't get synclfdirstate'd as normal | |
661 | lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=True) |
|
661 | lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=True) | |
662 | lfdirstate.write() |
|
662 | lfdirstate.write(repo.currenttransaction()) | |
663 |
|
663 | |||
664 | return orig(repo, actions, branchmerge, getfiledata) |
|
664 | return orig(repo, actions, branchmerge, getfiledata) | |
665 |
|
665 | |||
@@ -864,7 +864,7 b' def overridecopy(orig, ui, repo, pats, o' | |||||
864 | util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile)) |
|
864 | util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile)) | |
865 |
|
865 | |||
866 | lfdirstate.set_tracked(destlfile) |
|
866 | lfdirstate.set_tracked(destlfile) | |
867 | lfdirstate.write() |
|
867 | lfdirstate.write(repo.currenttransaction()) | |
868 | except error.Abort as e: |
|
868 | except error.Abort as e: | |
869 | if e.message != _(b'no files to copy'): |
|
869 | if e.message != _(b'no files to copy'): | |
870 | raise e |
|
870 | raise e | |
@@ -896,7 +896,7 b' def overriderevert(orig, ui, repo, ctx, ' | |||||
896 | with repo.wlock(): |
|
896 | with repo.wlock(): | |
897 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
897 | lfdirstate = lfutil.openlfdirstate(ui, repo) | |
898 | s = lfutil.lfdirstatestatus(lfdirstate, repo) |
|
898 | s = lfutil.lfdirstatestatus(lfdirstate, repo) | |
899 | lfdirstate.write() |
|
899 | lfdirstate.write(repo.currenttransaction()) | |
900 | for lfile in s.modified: |
|
900 | for lfile in s.modified: | |
901 | lfutil.updatestandin(repo, lfile, lfutil.standin(lfile)) |
|
901 | lfutil.updatestandin(repo, lfile, lfutil.standin(lfile)) | |
902 | for lfile in s.deleted: |
|
902 | for lfile in s.deleted: | |
@@ -934,7 +934,7 b' def overriderevert(orig, ui, repo, ctx, ' | |||||
934 | standin = lfutil.standin(f) |
|
934 | standin = lfutil.standin(f) | |
935 | if standin in ctx or standin in mctx: |
|
935 | if standin in ctx or standin in mctx: | |
936 | matchfiles.append(standin) |
|
936 | matchfiles.append(standin) | |
937 |
elif standin in wctx or lfdirstate |
|
937 | elif standin in wctx or lfdirstate.get_entry(f).removed: | |
938 | continue |
|
938 | continue | |
939 | else: |
|
939 | else: | |
940 | matchfiles.append(f) |
|
940 | matchfiles.append(f) | |
@@ -1000,7 +1000,7 b' def overridepull(orig, ui, repo, source=' | |||||
1000 | numcached = 0 |
|
1000 | numcached = 0 | |
1001 | repo.firstpulled = revsprepull # for pulled() revset expression |
|
1001 | repo.firstpulled = revsprepull # for pulled() revset expression | |
1002 | try: |
|
1002 | try: | |
1003 |
for rev in |
|
1003 | for rev in logcmdutil.revrange(repo, lfrevs): | |
1004 | ui.note(_(b'pulling largefiles for revision %d\n') % rev) |
|
1004 | ui.note(_(b'pulling largefiles for revision %d\n') % rev) | |
1005 | (cached, missing) = lfcommands.cachelfiles(ui, repo, rev) |
|
1005 | (cached, missing) = lfcommands.cachelfiles(ui, repo, rev) | |
1006 | numcached += len(cached) |
|
1006 | numcached += len(cached) | |
@@ -1027,7 +1027,7 b' def overridepush(orig, ui, repo, *args, ' | |||||
1027 | lfrevs = kwargs.pop('lfrev', None) |
|
1027 | lfrevs = kwargs.pop('lfrev', None) | |
1028 | if lfrevs: |
|
1028 | if lfrevs: | |
1029 | opargs = kwargs.setdefault('opargs', {}) |
|
1029 | opargs = kwargs.setdefault('opargs', {}) | |
1030 |
opargs[b'lfrevs'] = |
|
1030 | opargs[b'lfrevs'] = logcmdutil.revrange(repo, lfrevs) | |
1031 | return orig(ui, repo, *args, **kwargs) |
|
1031 | return orig(ui, repo, *args, **kwargs) | |
1032 |
|
1032 | |||
1033 |
|
1033 | |||
@@ -1383,7 +1383,7 b' def cmdutilforget(' | |||||
1383 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
1383 | lfdirstate = lfutil.openlfdirstate(ui, repo) | |
1384 | for f in forget: |
|
1384 | for f in forget: | |
1385 | lfdirstate.set_untracked(f) |
|
1385 | lfdirstate.set_untracked(f) | |
1386 | lfdirstate.write() |
|
1386 | lfdirstate.write(repo.currenttransaction()) | |
1387 | standins = [lfutil.standin(f) for f in forget] |
|
1387 | standins = [lfutil.standin(f) for f in forget] | |
1388 | for f in standins: |
|
1388 | for f in standins: | |
1389 | repo.wvfs.unlinkpath(f, ignoremissing=True) |
|
1389 | repo.wvfs.unlinkpath(f, ignoremissing=True) | |
@@ -1591,8 +1591,12 b' def overridepurge(orig, ui, repo, *dirs,' | |||||
1591 | node1, node2, match, ignored, clean, unknown, listsubrepos |
|
1591 | node1, node2, match, ignored, clean, unknown, listsubrepos | |
1592 | ) |
|
1592 | ) | |
1593 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
1593 | lfdirstate = lfutil.openlfdirstate(ui, repo) | |
1594 | unknown = [f for f in r.unknown if lfdirstate[f] == b'?'] |
|
1594 | unknown = [ | |
1595 | ignored = [f for f in r.ignored if lfdirstate[f] == b'?'] |
|
1595 | f for f in r.unknown if not lfdirstate.get_entry(f).any_tracked | |
|
1596 | ] | |||
|
1597 | ignored = [ | |||
|
1598 | f for f in r.ignored if not lfdirstate.get_entry(f).any_tracked | |||
|
1599 | ] | |||
1596 | return scmutil.status( |
|
1600 | return scmutil.status( | |
1597 | r.modified, r.added, r.removed, r.deleted, unknown, ignored, r.clean |
|
1601 | r.modified, r.added, r.removed, r.deleted, unknown, ignored, r.clean | |
1598 | ) |
|
1602 | ) | |
@@ -1609,7 +1613,7 b' def overriderollback(orig, ui, repo, **o' | |||||
1609 | orphans = { |
|
1613 | orphans = { | |
1610 | f |
|
1614 | f | |
1611 | for f in repo.dirstate |
|
1615 | for f in repo.dirstate | |
1612 |
if lfutil.isstandin(f) and repo.dirstate |
|
1616 | if lfutil.isstandin(f) and not repo.dirstate.get_entry(f).removed | |
1613 | } |
|
1617 | } | |
1614 | result = orig(ui, repo, **opts) |
|
1618 | result = orig(ui, repo, **opts) | |
1615 | after = repo.dirstate.parents() |
|
1619 | after = repo.dirstate.parents() | |
@@ -1620,7 +1624,7 b' def overriderollback(orig, ui, repo, **o' | |||||
1620 | for f in repo.dirstate: |
|
1624 | for f in repo.dirstate: | |
1621 | if lfutil.isstandin(f): |
|
1625 | if lfutil.isstandin(f): | |
1622 | orphans.discard(f) |
|
1626 | orphans.discard(f) | |
1623 |
if repo.dirstate |
|
1627 | if repo.dirstate.get_entry(f).removed: | |
1624 | repo.wvfs.unlinkpath(f, ignoremissing=True) |
|
1628 | repo.wvfs.unlinkpath(f, ignoremissing=True) | |
1625 | elif f in pctx: |
|
1629 | elif f in pctx: | |
1626 | fctx = pctx[f] |
|
1630 | fctx = pctx[f] | |
@@ -1632,18 +1636,6 b' def overriderollback(orig, ui, repo, **o' | |||||
1632 | for standin in orphans: |
|
1636 | for standin in orphans: | |
1633 | repo.wvfs.unlinkpath(standin, ignoremissing=True) |
|
1637 | repo.wvfs.unlinkpath(standin, ignoremissing=True) | |
1634 |
|
1638 | |||
1635 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
|||
1636 | with lfdirstate.parentchange(): |
|
|||
1637 | orphans = set(lfdirstate) |
|
|||
1638 | lfiles = lfutil.listlfiles(repo) |
|
|||
1639 | for file in lfiles: |
|
|||
1640 | lfutil.synclfdirstate(repo, lfdirstate, file, True) |
|
|||
1641 | orphans.discard(file) |
|
|||
1642 | for lfile in orphans: |
|
|||
1643 | lfdirstate.update_file( |
|
|||
1644 | lfile, p1_tracked=False, wc_tracked=False |
|
|||
1645 | ) |
|
|||
1646 | lfdirstate.write() |
|
|||
1647 | return result |
|
1639 | return result | |
1648 |
|
1640 | |||
1649 |
|
1641 | |||
@@ -1663,7 +1655,7 b' def overridetransplant(orig, ui, repo, *' | |||||
1663 | @eh.wrapcommand(b'cat') |
|
1655 | @eh.wrapcommand(b'cat') | |
1664 | def overridecat(orig, ui, repo, file1, *pats, **opts): |
|
1656 | def overridecat(orig, ui, repo, file1, *pats, **opts): | |
1665 | opts = pycompat.byteskwargs(opts) |
|
1657 | opts = pycompat.byteskwargs(opts) | |
1666 |
ctx = |
|
1658 | ctx = logcmdutil.revsingle(repo, opts.get(b'rev')) | |
1667 | err = 1 |
|
1659 | err = 1 | |
1668 | notbad = set() |
|
1660 | notbad = set() | |
1669 | m = scmutil.match(ctx, (file1,) + pats, opts) |
|
1661 | m = scmutil.match(ctx, (file1,) + pats, opts) | |
@@ -1787,10 +1779,8 b' def mergeupdate(orig, repo, node, branch' | |||||
1787 | # mark all clean largefiles as dirty, just in case the update gets |
|
1779 | # mark all clean largefiles as dirty, just in case the update gets | |
1788 | # interrupted before largefiles and lfdirstate are synchronized |
|
1780 | # interrupted before largefiles and lfdirstate are synchronized | |
1789 | for lfile in oldclean: |
|
1781 | for lfile in oldclean: | |
1790 | entry = lfdirstate._map.get(lfile) |
|
|||
1791 | assert not (entry.merged_removed or entry.from_p2_removed) |
|
|||
1792 | lfdirstate.set_possibly_dirty(lfile) |
|
1782 | lfdirstate.set_possibly_dirty(lfile) | |
1793 | lfdirstate.write() |
|
1783 | lfdirstate.write(repo.currenttransaction()) | |
1794 |
|
1784 | |||
1795 | oldstandins = lfutil.getstandinsstate(repo) |
|
1785 | oldstandins = lfutil.getstandinsstate(repo) | |
1796 | wc = kwargs.get('wc') |
|
1786 | wc = kwargs.get('wc') | |
@@ -1810,7 +1800,7 b' def mergeupdate(orig, repo, node, branch' | |||||
1810 | # all the ones that didn't change as clean |
|
1800 | # all the ones that didn't change as clean | |
1811 | for lfile in oldclean.difference(filelist): |
|
1801 | for lfile in oldclean.difference(filelist): | |
1812 | lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True) |
|
1802 | lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True) | |
1813 | lfdirstate.write() |
|
1803 | lfdirstate.write(repo.currenttransaction()) | |
1814 |
|
1804 | |||
1815 | if branchmerge or force or partial: |
|
1805 | if branchmerge or force or partial: | |
1816 | filelist.extend(s.deleted + s.removed) |
|
1806 | filelist.extend(s.deleted + s.removed) |
@@ -184,17 +184,18 b' def wirereposetup(ui, repo):' | |||||
184 |
|
184 | |||
185 | @wireprotov1peer.batchable |
|
185 | @wireprotov1peer.batchable | |
186 | def statlfile(self, sha): |
|
186 | def statlfile(self, sha): | |
187 | f = wireprotov1peer.future() |
|
187 | def decode(d): | |
|
188 | try: | |||
|
189 | return int(d) | |||
|
190 | except (ValueError, urlerr.httperror): | |||
|
191 | # If the server returns anything but an integer followed by a | |||
|
192 | # newline, newline, it's not speaking our language; if we get | |||
|
193 | # an HTTP error, we can't be sure the largefile is present; | |||
|
194 | # either way, consider it missing. | |||
|
195 | return 2 | |||
|
196 | ||||
188 | result = {b'sha': sha} |
|
197 | result = {b'sha': sha} | |
189 |
|
|
198 | return result, decode | |
190 | try: |
|
|||
191 | yield int(f.value) |
|
|||
192 | except (ValueError, urlerr.httperror): |
|
|||
193 | # If the server returns anything but an integer followed by a |
|
|||
194 | # newline, newline, it's not speaking our language; if we get |
|
|||
195 | # an HTTP error, we can't be sure the largefile is present; |
|
|||
196 | # either way, consider it missing. |
|
|||
197 | yield 2 |
|
|||
198 |
|
199 | |||
199 | repo.__class__ = lfileswirerepository |
|
200 | repo.__class__ = lfileswirerepository | |
200 |
|
201 |
@@ -310,7 +310,7 b' def reposetup(ui, repo):' | |||||
310 | ] |
|
310 | ] | |
311 |
|
311 | |||
312 | if gotlock: |
|
312 | if gotlock: | |
313 | lfdirstate.write() |
|
313 | lfdirstate.write(self.currenttransaction()) | |
314 |
|
314 | |||
315 | self.lfstatus = True |
|
315 | self.lfstatus = True | |
316 | return scmutil.status(*result) |
|
316 | return scmutil.status(*result) |
@@ -137,6 +137,7 b' from mercurial import (' | |||||
137 | filelog, |
|
137 | filelog, | |
138 | filesetlang, |
|
138 | filesetlang, | |
139 | localrepo, |
|
139 | localrepo, | |
|
140 | logcmdutil, | |||
140 | minifileset, |
|
141 | minifileset, | |
141 | pycompat, |
|
142 | pycompat, | |
142 | revlog, |
|
143 | revlog, | |
@@ -417,7 +418,7 b' def lfsfiles(context, mapping):' | |||||
417 | def debuglfsupload(ui, repo, **opts): |
|
418 | def debuglfsupload(ui, repo, **opts): | |
418 | """upload lfs blobs added by the working copy parent or given revisions""" |
|
419 | """upload lfs blobs added by the working copy parent or given revisions""" | |
419 | revs = opts.get('rev', []) |
|
420 | revs = opts.get('rev', []) | |
420 |
pointers = wrapper.extractpointers(repo, |
|
421 | pointers = wrapper.extractpointers(repo, logcmdutil.revrange(repo, revs)) | |
421 | wrapper.uploadblobs(repo, pointers) |
|
422 | wrapper.uploadblobs(repo, pointers) | |
422 |
|
423 | |||
423 |
|
424 |
@@ -1241,7 +1241,7 b' class queue(object):' | |||||
1241 | if opts.get(b'rev'): |
|
1241 | if opts.get(b'rev'): | |
1242 | if not self.applied: |
|
1242 | if not self.applied: | |
1243 | raise error.Abort(_(b'no patches applied')) |
|
1243 | raise error.Abort(_(b'no patches applied')) | |
1244 |
revs = |
|
1244 | revs = logcmdutil.revrange(repo, opts.get(b'rev')) | |
1245 | revs.sort() |
|
1245 | revs.sort() | |
1246 | revpatches = self._revpatches(repo, revs) |
|
1246 | revpatches = self._revpatches(repo, revs) | |
1247 | realpatches += revpatches |
|
1247 | realpatches += revpatches | |
@@ -1267,9 +1267,9 b' class queue(object):' | |||||
1267 | if any((b'.hgsubstate' in files for files in mar)): |
|
1267 | if any((b'.hgsubstate' in files for files in mar)): | |
1268 | return # already listed up |
|
1268 | return # already listed up | |
1269 | # not yet listed up |
|
1269 | # not yet listed up | |
1270 |
if substatestate |
|
1270 | if substatestate.added or not substatestate.any_tracked: | |
1271 | mar[1].append(b'.hgsubstate') |
|
1271 | mar[1].append(b'.hgsubstate') | |
1272 |
elif substatestate |
|
1272 | elif substatestate.removed: | |
1273 | mar[2].append(b'.hgsubstate') |
|
1273 | mar[2].append(b'.hgsubstate') | |
1274 | else: # modified |
|
1274 | else: # modified | |
1275 | mar[0].append(b'.hgsubstate') |
|
1275 | mar[0].append(b'.hgsubstate') | |
@@ -1377,7 +1377,7 b' class queue(object):' | |||||
1377 | self.checkpatchname(patchfn) |
|
1377 | self.checkpatchname(patchfn) | |
1378 | inclsubs = checksubstate(repo) |
|
1378 | inclsubs = checksubstate(repo) | |
1379 | if inclsubs: |
|
1379 | if inclsubs: | |
1380 |
substatestate = repo.dirstate |
|
1380 | substatestate = repo.dirstate.get_entry(b'.hgsubstate') | |
1381 | if opts.get(b'include') or opts.get(b'exclude') or pats: |
|
1381 | if opts.get(b'include') or opts.get(b'exclude') or pats: | |
1382 | # detect missing files in pats |
|
1382 | # detect missing files in pats | |
1383 | def badfn(f, msg): |
|
1383 | def badfn(f, msg): | |
@@ -1908,7 +1908,7 b' class queue(object):' | |||||
1908 |
|
1908 | |||
1909 | inclsubs = checksubstate(repo, patchparent) |
|
1909 | inclsubs = checksubstate(repo, patchparent) | |
1910 | if inclsubs: |
|
1910 | if inclsubs: | |
1911 |
substatestate = repo.dirstate |
|
1911 | substatestate = repo.dirstate.get_entry(b'.hgsubstate') | |
1912 |
|
1912 | |||
1913 | ph = patchheader(self.join(patchfn), self.plainmode) |
|
1913 | ph = patchheader(self.join(patchfn), self.plainmode) | |
1914 | diffopts = self.diffopts( |
|
1914 | diffopts = self.diffopts( | |
@@ -2417,7 +2417,7 b' class queue(object):' | |||||
2417 | raise error.Abort( |
|
2417 | raise error.Abort( | |
2418 | _(b'option "-r" not valid when importing files') |
|
2418 | _(b'option "-r" not valid when importing files') | |
2419 | ) |
|
2419 | ) | |
2420 |
rev = |
|
2420 | rev = logcmdutil.revrange(repo, rev) | |
2421 | rev.sort(reverse=True) |
|
2421 | rev.sort(reverse=True) | |
2422 | elif not files: |
|
2422 | elif not files: | |
2423 | raise error.Abort(_(b'no files or revisions specified')) |
|
2423 | raise error.Abort(_(b'no files or revisions specified')) | |
@@ -3638,7 +3638,7 b' def rename(ui, repo, patch, name=None, *' | |||||
3638 | if r and patch in r.dirstate: |
|
3638 | if r and patch in r.dirstate: | |
3639 | wctx = r[None] |
|
3639 | wctx = r[None] | |
3640 | with r.wlock(): |
|
3640 | with r.wlock(): | |
3641 |
if r.dirstate |
|
3641 | if r.dirstate.get_entry(patch).added: | |
3642 | r.dirstate.set_untracked(patch) |
|
3642 | r.dirstate.set_untracked(patch) | |
3643 | r.dirstate.set_tracked(name) |
|
3643 | r.dirstate.set_tracked(name) | |
3644 | else: |
|
3644 | else: | |
@@ -3878,7 +3878,7 b' def finish(ui, repo, *revrange, **opts):' | |||||
3878 | ui.status(_(b'no patches applied\n')) |
|
3878 | ui.status(_(b'no patches applied\n')) | |
3879 | return 0 |
|
3879 | return 0 | |
3880 |
|
3880 | |||
3881 |
revs = |
|
3881 | revs = logcmdutil.revrange(repo, revrange) | |
3882 | if repo[b'.'].rev() in revs and repo[None].files(): |
|
3882 | if repo[b'.'].rev() in revs and repo[None].files(): | |
3883 | ui.warn(_(b'warning: uncommitted changes in the working directory\n')) |
|
3883 | ui.warn(_(b'warning: uncommitted changes in the working directory\n')) | |
3884 | # queue.finish may changes phases but leave the responsibility to lock the |
|
3884 | # queue.finish may changes phases but leave the responsibility to lock the |
@@ -289,7 +289,7 b' def _narrow(' | |||||
289 | repair.strip(ui, unfi, tostrip, topic=b'narrow', backup=backup) |
|
289 | repair.strip(ui, unfi, tostrip, topic=b'narrow', backup=backup) | |
290 |
|
290 | |||
291 | todelete = [] |
|
291 | todelete = [] | |
292 |
for t, f |
|
292 | for t, f, size in repo.store.datafiles(): | |
293 | if f.startswith(b'data/'): |
|
293 | if f.startswith(b'data/'): | |
294 | file = f[5:-2] |
|
294 | file = f[5:-2] | |
295 | if not newmatch(file): |
|
295 | if not newmatch(file): |
@@ -91,6 +91,7 b' from mercurial import (' | |||||
91 | error, |
|
91 | error, | |
92 | formatter, |
|
92 | formatter, | |
93 | hg, |
|
93 | hg, | |
|
94 | logcmdutil, | |||
94 | mail, |
|
95 | mail, | |
95 | patch, |
|
96 | patch, | |
96 | pycompat, |
|
97 | pycompat, | |
@@ -812,7 +813,7 b' def email(ui, repo, *revs, **opts):' | |||||
812 | raise error.Abort(_(b"bookmark '%s' not found") % bookmark) |
|
813 | raise error.Abort(_(b"bookmark '%s' not found") % bookmark) | |
813 | revs = scmutil.bookmarkrevs(repo, bookmark) |
|
814 | revs = scmutil.bookmarkrevs(repo, bookmark) | |
814 |
|
815 | |||
815 |
revs = |
|
816 | revs = logcmdutil.revrange(repo, revs) | |
816 | if outgoing: |
|
817 | if outgoing: | |
817 | revs = _getoutgoing(repo, dest, revs) |
|
818 | revs = _getoutgoing(repo, dest, revs) | |
818 | if bundle: |
|
819 | if bundle: |
@@ -1354,7 +1354,7 b' def phabsend(ui, repo, *revs, **opts):' | |||||
1354 | """ |
|
1354 | """ | |
1355 | opts = pycompat.byteskwargs(opts) |
|
1355 | opts = pycompat.byteskwargs(opts) | |
1356 | revs = list(revs) + opts.get(b'rev', []) |
|
1356 | revs = list(revs) + opts.get(b'rev', []) | |
1357 |
revs = |
|
1357 | revs = logcmdutil.revrange(repo, revs) | |
1358 | revs.sort() # ascending order to preserve topological parent/child in phab |
|
1358 | revs.sort() # ascending order to preserve topological parent/child in phab | |
1359 |
|
1359 | |||
1360 | if not revs: |
|
1360 | if not revs: | |
@@ -2276,7 +2276,7 b' def phabupdate(ui, repo, *specs, **opts)' | |||||
2276 | if specs: |
|
2276 | if specs: | |
2277 | raise error.InputError(_(b'cannot specify both DREVSPEC and --rev')) |
|
2277 | raise error.InputError(_(b'cannot specify both DREVSPEC and --rev')) | |
2278 |
|
2278 | |||
2279 |
drevmap = getdrevmap(repo, |
|
2279 | drevmap = getdrevmap(repo, logcmdutil.revrange(repo, [revs])) | |
2280 | specs = [] |
|
2280 | specs = [] | |
2281 | unknown = [] |
|
2281 | unknown = [] | |
2282 | for r, d in pycompat.iteritems(drevmap): |
|
2282 | for r, d in pycompat.iteritems(drevmap): |
@@ -35,6 +35,7 b' from mercurial import (' | |||||
35 | dirstateguard, |
|
35 | dirstateguard, | |
36 | error, |
|
36 | error, | |
37 | extensions, |
|
37 | extensions, | |
|
38 | logcmdutil, | |||
38 | merge as mergemod, |
|
39 | merge as mergemod, | |
39 | mergestate as mergestatemod, |
|
40 | mergestate as mergestatemod, | |
40 | mergeutil, |
|
41 | mergeutil, | |
@@ -1302,19 +1303,19 b' def _definedestmap(ui, repo, inmemory, d' | |||||
1302 | dest = None |
|
1303 | dest = None | |
1303 |
|
1304 | |||
1304 | if revf: |
|
1305 | if revf: | |
1305 |
rebaseset = |
|
1306 | rebaseset = logcmdutil.revrange(repo, revf) | |
1306 | if not rebaseset: |
|
1307 | if not rebaseset: | |
1307 | ui.status(_(b'empty "rev" revision set - nothing to rebase\n')) |
|
1308 | ui.status(_(b'empty "rev" revision set - nothing to rebase\n')) | |
1308 | return None |
|
1309 | return None | |
1309 | elif srcf: |
|
1310 | elif srcf: | |
1310 |
src = |
|
1311 | src = logcmdutil.revrange(repo, srcf) | |
1311 | if not src: |
|
1312 | if not src: | |
1312 | ui.status(_(b'empty "source" revision set - nothing to rebase\n')) |
|
1313 | ui.status(_(b'empty "source" revision set - nothing to rebase\n')) | |
1313 | return None |
|
1314 | return None | |
1314 | # `+ (%ld)` to work around `wdir()::` being empty |
|
1315 | # `+ (%ld)` to work around `wdir()::` being empty | |
1315 | rebaseset = repo.revs(b'(%ld):: + (%ld)', src, src) |
|
1316 | rebaseset = repo.revs(b'(%ld):: + (%ld)', src, src) | |
1316 | else: |
|
1317 | else: | |
1317 |
base = |
|
1318 | base = logcmdutil.revrange(repo, basef or [b'.']) | |
1318 | if not base: |
|
1319 | if not base: | |
1319 | ui.status( |
|
1320 | ui.status( | |
1320 | _(b'empty "base" revision set - ' b"can't compute rebase set\n") |
|
1321 | _(b'empty "base" revision set - ' b"can't compute rebase set\n") | |
@@ -1322,7 +1323,7 b' def _definedestmap(ui, repo, inmemory, d' | |||||
1322 | return None |
|
1323 | return None | |
1323 | if destf: |
|
1324 | if destf: | |
1324 | # --base does not support multiple destinations |
|
1325 | # --base does not support multiple destinations | |
1325 |
dest = |
|
1326 | dest = logcmdutil.revsingle(repo, destf) | |
1326 | else: |
|
1327 | else: | |
1327 | dest = repo[_destrebase(repo, base, destspace=destspace)] |
|
1328 | dest = repo[_destrebase(repo, base, destspace=destspace)] | |
1328 | destf = bytes(dest) |
|
1329 | destf = bytes(dest) |
@@ -24,10 +24,10 b' from mercurial import (' | |||||
24 | cmdutil, |
|
24 | cmdutil, | |
25 | config, |
|
25 | config, | |
26 | error, |
|
26 | error, | |
|
27 | logcmdutil, | |||
27 | minirst, |
|
28 | minirst, | |
28 | pycompat, |
|
29 | pycompat, | |
29 | registrar, |
|
30 | registrar, | |
30 | scmutil, |
|
|||
31 | util, |
|
31 | util, | |
32 | ) |
|
32 | ) | |
33 | from mercurial.utils import ( |
|
33 | from mercurial.utils import ( | |
@@ -676,7 +676,7 b' def releasenotes(ui, repo, file_=None, *' | |||||
676 | return _getadmonitionlist(ui, sections) |
|
676 | return _getadmonitionlist(ui, sections) | |
677 |
|
677 | |||
678 | rev = opts.get(b'rev') |
|
678 | rev = opts.get(b'rev') | |
679 |
revs = |
|
679 | revs = logcmdutil.revrange(repo, [rev or b'not public()']) | |
680 | if opts.get(b'check'): |
|
680 | if opts.get(b'check'): | |
681 | return checkadmonitions(ui, repo, sections.names(), revs) |
|
681 | return checkadmonitions(ui, repo, sections.names(), revs) | |
682 |
|
682 |
@@ -378,7 +378,7 b' class manifestrevlogstore(object):' | |||||
378 | ledger.markdataentry(self, treename, node) |
|
378 | ledger.markdataentry(self, treename, node) | |
379 | ledger.markhistoryentry(self, treename, node) |
|
379 | ledger.markhistoryentry(self, treename, node) | |
380 |
|
380 | |||
381 |
for t, path |
|
381 | for t, path, size in self._store.datafiles(): | |
382 | if path[:5] != b'meta/' or path[-2:] != b'.i': |
|
382 | if path[:5] != b'meta/' or path[-2:] != b'.i': | |
383 | continue |
|
383 | continue | |
384 |
|
384 |
@@ -63,12 +63,14 b' def peersetup(ui, peer):' | |||||
63 | raise error.Abort( |
|
63 | raise error.Abort( | |
64 | b'configured remotefile server does not support getfile' |
|
64 | b'configured remotefile server does not support getfile' | |
65 | ) |
|
65 | ) | |
66 | f = wireprotov1peer.future() |
|
66 | ||
67 | yield {b'file': file, b'node': node}, f |
|
67 | def decode(d): | |
68 |
code, data = |
|
68 | code, data = d.split(b'\0', 1) | |
69 | if int(code): |
|
69 | if int(code): | |
70 | raise error.LookupError(file, node, data) |
|
70 | raise error.LookupError(file, node, data) | |
71 |
|
|
71 | return data | |
|
72 | ||||
|
73 | return {b'file': file, b'node': node}, decode | |||
72 |
|
74 | |||
73 | @wireprotov1peer.batchable |
|
75 | @wireprotov1peer.batchable | |
74 | def x_rfl_getflogheads(self, path): |
|
76 | def x_rfl_getflogheads(self, path): | |
@@ -77,10 +79,11 b' def peersetup(ui, peer):' | |||||
77 | b'configured remotefile server does not ' |
|
79 | b'configured remotefile server does not ' | |
78 | b'support getflogheads' |
|
80 | b'support getflogheads' | |
79 | ) |
|
81 | ) | |
80 | f = wireprotov1peer.future() |
|
82 | ||
81 | yield {b'path': path}, f |
|
83 | def decode(d): | |
82 |
|
|
84 | return d.split(b'\n') if d else [] | |
83 | yield heads |
|
85 | ||
|
86 | return {b'path': path}, decode | |||
84 |
|
87 | |||
85 | def _updatecallstreamopts(self, command, opts): |
|
88 | def _updatecallstreamopts(self, command, opts): | |
86 | if command != b'getbundle': |
|
89 | if command != b'getbundle': |
@@ -166,24 +166,24 b' def onetimesetup(ui):' | |||||
166 | n = util.pconvert(fp[striplen:]) |
|
166 | n = util.pconvert(fp[striplen:]) | |
167 | d = store.decodedir(n) |
|
167 | d = store.decodedir(n) | |
168 | t = store.FILETYPE_OTHER |
|
168 | t = store.FILETYPE_OTHER | |
169 |
yield (t, d, |
|
169 | yield (t, d, st.st_size) | |
170 | if kind == stat.S_IFDIR: |
|
170 | if kind == stat.S_IFDIR: | |
171 | visit.append(fp) |
|
171 | visit.append(fp) | |
172 |
|
172 | |||
173 | if scmutil.istreemanifest(repo): |
|
173 | if scmutil.istreemanifest(repo): | |
174 |
for (t, u |
|
174 | for (t, u, s) in repo.store.datafiles(): | |
175 | if u.startswith(b'meta/') and ( |
|
175 | if u.startswith(b'meta/') and ( | |
176 | u.endswith(b'.i') or u.endswith(b'.d') |
|
176 | u.endswith(b'.i') or u.endswith(b'.d') | |
177 | ): |
|
177 | ): | |
178 |
yield (t, u, |
|
178 | yield (t, u, s) | |
179 |
|
179 | |||
180 | # Return .d and .i files that do not match the shallow pattern |
|
180 | # Return .d and .i files that do not match the shallow pattern | |
181 | match = state.match |
|
181 | match = state.match | |
182 | if match and not match.always(): |
|
182 | if match and not match.always(): | |
183 |
for (t, u |
|
183 | for (t, u, s) in repo.store.datafiles(): | |
184 | f = u[5:-2] # trim data/... and .i/.d |
|
184 | f = u[5:-2] # trim data/... and .i/.d | |
185 | if not state.match(f): |
|
185 | if not state.match(f): | |
186 |
yield (t, u, |
|
186 | yield (t, u, s) | |
187 |
|
187 | |||
188 | for x in repo.store.topfiles(): |
|
188 | for x in repo.store.topfiles(): | |
189 | if state.noflatmf and x[1][:11] == b'00manifest.': |
|
189 | if state.noflatmf and x[1][:11] == b'00manifest.': |
@@ -255,14 +255,9 b' def _setupdirstate(ui):' | |||||
255 |
|
255 | |||
256 | # Prevent adding files that are outside the sparse checkout |
|
256 | # Prevent adding files that are outside the sparse checkout | |
257 | editfuncs = [ |
|
257 | editfuncs = [ | |
258 | b'normal', |
|
|||
259 | b'set_tracked', |
|
258 | b'set_tracked', | |
260 | b'set_untracked', |
|
259 | b'set_untracked', | |
261 | b'add', |
|
|||
262 | b'normallookup', |
|
|||
263 | b'copy', |
|
260 | b'copy', | |
264 | b'remove', |
|
|||
265 | b'merge', |
|
|||
266 | ] |
|
261 | ] | |
267 | hint = _( |
|
262 | hint = _( | |
268 | b'include file with `hg debugsparse --include <pattern>` or use ' |
|
263 | b'include file with `hg debugsparse --include <pattern>` or use ' |
@@ -22,6 +22,7 b' from mercurial import (' | |||||
22 | commands, |
|
22 | commands, | |
23 | error, |
|
23 | error, | |
24 | hg, |
|
24 | hg, | |
|
25 | logcmdutil, | |||
25 | pycompat, |
|
26 | pycompat, | |
26 | registrar, |
|
27 | registrar, | |
27 | revsetlang, |
|
28 | revsetlang, | |
@@ -75,7 +76,7 b' def split(ui, repo, *revs, **opts):' | |||||
75 | # If the rebase somehow runs into conflicts, make sure |
|
76 | # If the rebase somehow runs into conflicts, make sure | |
76 | # we close the transaction so the user can continue it. |
|
77 | # we close the transaction so the user can continue it. | |
77 | with util.acceptintervention(tr): |
|
78 | with util.acceptintervention(tr): | |
78 |
revs = |
|
79 | revs = logcmdutil.revrange(repo, revlist or [b'.']) | |
79 | if len(revs) > 1: |
|
80 | if len(revs) > 1: | |
80 | raise error.InputError(_(b'cannot split multiple revisions')) |
|
81 | raise error.InputError(_(b'cannot split multiple revisions')) | |
81 |
|
82 |
@@ -37,7 +37,6 b' from mercurial import (' | |||||
37 | pycompat, |
|
37 | pycompat, | |
38 | registrar, |
|
38 | registrar, | |
39 | revset, |
|
39 | revset, | |
40 | scmutil, |
|
|||
41 | smartset, |
|
40 | smartset, | |
42 | state as statemod, |
|
41 | state as statemod, | |
43 | util, |
|
42 | util, | |
@@ -845,7 +844,7 b' def _dotransplant(ui, repo, *revs, **opt' | |||||
845 | if opts.get(b'prune'): |
|
844 | if opts.get(b'prune'): | |
846 | prune = { |
|
845 | prune = { | |
847 | source[r].node() |
|
846 | source[r].node() | |
848 |
for r in |
|
847 | for r in logcmdutil.revrange(source, opts.get(b'prune')) | |
849 | } |
|
848 | } | |
850 | matchfn = lambda x: tf(x) and x not in prune |
|
849 | matchfn = lambda x: tf(x) and x not in prune | |
851 | else: |
|
850 | else: | |
@@ -853,7 +852,7 b' def _dotransplant(ui, repo, *revs, **opt' | |||||
853 | merges = pycompat.maplist(source.lookup, opts.get(b'merge', ())) |
|
852 | merges = pycompat.maplist(source.lookup, opts.get(b'merge', ())) | |
854 | revmap = {} |
|
853 | revmap = {} | |
855 | if revs: |
|
854 | if revs: | |
856 |
for r in |
|
855 | for r in logcmdutil.revrange(source, revs): | |
857 | revmap[int(r)] = source[r].node() |
|
856 | revmap[int(r)] = source[r].node() | |
858 | elif opts.get(b'all') or not merges: |
|
857 | elif opts.get(b'all') or not merges: | |
859 | if source != repo: |
|
858 | if source != repo: |
@@ -29,6 +29,8 b' from . import (' | |||||
29 | vfs as vfsmod, |
|
29 | vfs as vfsmod, | |
30 | ) |
|
30 | ) | |
31 |
|
31 | |||
|
32 | from .utils import stringutil | |||
|
33 | ||||
32 | stringio = util.stringio |
|
34 | stringio = util.stringio | |
33 |
|
35 | |||
34 | # from unzip source code: |
|
36 | # from unzip source code: | |
@@ -196,7 +198,7 b' class tarit(object):' | |||||
196 | name, pycompat.sysstr(mode + kind), fileobj |
|
198 | name, pycompat.sysstr(mode + kind), fileobj | |
197 | ) |
|
199 | ) | |
198 | except tarfile.CompressionError as e: |
|
200 | except tarfile.CompressionError as e: | |
199 |
raise error.Abort( |
|
201 | raise error.Abort(stringutil.forcebytestr(e)) | |
200 |
|
202 | |||
201 | if isinstance(dest, bytes): |
|
203 | if isinstance(dest, bytes): | |
202 | self.z = taropen(b'w:', name=dest) |
|
204 | self.z = taropen(b'w:', name=dest) |
@@ -1,5 +1,5 b'' | |||||
1 |
#ifndef |
|
1 | #ifndef HG_BDIFF_H | |
2 |
#define |
|
2 | #define HG_BDIFF_H | |
3 |
|
3 | |||
4 | #include "compat.h" |
|
4 | #include "compat.h" | |
5 |
|
5 |
@@ -1,5 +1,5 b'' | |||||
1 |
#ifndef |
|
1 | #ifndef HG_BITMANIPULATION_H | |
2 |
#define |
|
2 | #define HG_BITMANIPULATION_H | |
3 |
|
3 | |||
4 | #include <string.h> |
|
4 | #include <string.h> | |
5 |
|
5 |
@@ -680,8 +680,25 b' def binarydecode(repo, stream):' | |||||
680 | return books |
|
680 | return books | |
681 |
|
681 | |||
682 |
|
682 | |||
683 |
def |
|
683 | def mirroring_remote(ui, repo, remotemarks): | |
684 | ui.debug(b"checking for updated bookmarks\n") |
|
684 | """computes the bookmark changes that set the local bookmarks to | |
|
685 | remotemarks""" | |||
|
686 | changed = [] | |||
|
687 | localmarks = repo._bookmarks | |||
|
688 | for (b, id) in pycompat.iteritems(remotemarks): | |||
|
689 | if id != localmarks.get(b, None) and id in repo: | |||
|
690 | changed.append((b, id, ui.debug, _(b"updating bookmark %s\n") % b)) | |||
|
691 | for b in localmarks: | |||
|
692 | if b not in remotemarks: | |||
|
693 | changed.append( | |||
|
694 | (b, None, ui.debug, _(b"removing bookmark %s\n") % b) | |||
|
695 | ) | |||
|
696 | return changed | |||
|
697 | ||||
|
698 | ||||
|
699 | def merging_from_remote(ui, repo, remotemarks, path, explicit=()): | |||
|
700 | """computes the bookmark changes that merge remote bookmarks into the | |||
|
701 | local bookmarks, based on comparebookmarks""" | |||
685 | localmarks = repo._bookmarks |
|
702 | localmarks = repo._bookmarks | |
686 | ( |
|
703 | ( | |
687 | addsrc, |
|
704 | addsrc, | |
@@ -752,6 +769,20 b' def updatefromremote(ui, repo, remotemar' | |||||
752 | _(b"remote bookmark %s points to locally missing %s\n") |
|
769 | _(b"remote bookmark %s points to locally missing %s\n") | |
753 | % (b, hex(scid)[:12]) |
|
770 | % (b, hex(scid)[:12]) | |
754 | ) |
|
771 | ) | |
|
772 | return changed | |||
|
773 | ||||
|
774 | ||||
|
775 | def updatefromremote( | |||
|
776 | ui, repo, remotemarks, path, trfunc, explicit=(), mode=None | |||
|
777 | ): | |||
|
778 | if mode == b'ignore': | |||
|
779 | # This should move to an higher level to avoid fetching bookmark at all | |||
|
780 | return | |||
|
781 | ui.debug(b"checking for updated bookmarks\n") | |||
|
782 | if mode == b'mirror': | |||
|
783 | changed = mirroring_remote(ui, repo, remotemarks) | |||
|
784 | else: | |||
|
785 | changed = merging_from_remote(ui, repo, remotemarks, path, explicit) | |||
755 |
|
786 | |||
756 | if changed: |
|
787 | if changed: | |
757 | tr = trfunc() |
|
788 | tr = trfunc() | |
@@ -760,11 +791,14 b' def updatefromremote(ui, repo, remotemar' | |||||
760 | for b, node, writer, msg in sorted(changed, key=key): |
|
791 | for b, node, writer, msg in sorted(changed, key=key): | |
761 | changes.append((b, node)) |
|
792 | changes.append((b, node)) | |
762 | writer(msg) |
|
793 | writer(msg) | |
763 |
|
|
794 | repo._bookmarks.applychanges(repo, tr, changes) | |
764 |
|
795 | |||
765 |
|
796 | |||
766 | def incoming(ui, repo, peer): |
|
797 | def incoming(ui, repo, peer, mode=None): | |
767 | """Show bookmarks incoming from other to repo""" |
|
798 | """Show bookmarks incoming from other to repo""" | |
|
799 | if mode == b'ignore': | |||
|
800 | ui.status(_(b"bookmarks exchange disabled with this path\n")) | |||
|
801 | return 0 | |||
768 | ui.status(_(b"searching for changed bookmarks\n")) |
|
802 | ui.status(_(b"searching for changed bookmarks\n")) | |
769 |
|
803 | |||
770 | with peer.commandexecutor() as e: |
|
804 | with peer.commandexecutor() as e: | |
@@ -777,9 +811,6 b' def incoming(ui, repo, peer):' | |||||
777 | ).result() |
|
811 | ).result() | |
778 | ) |
|
812 | ) | |
779 |
|
813 | |||
780 | r = comparebookmarks(repo, remotemarks, repo._bookmarks) |
|
|||
781 | addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r |
|
|||
782 |
|
||||
783 | incomings = [] |
|
814 | incomings = [] | |
784 | if ui.debugflag: |
|
815 | if ui.debugflag: | |
785 | getid = lambda id: id |
|
816 | getid = lambda id: id | |
@@ -795,18 +826,36 b' def incoming(ui, repo, peer):' | |||||
795 | def add(b, id, st): |
|
826 | def add(b, id, st): | |
796 | incomings.append(b" %-25s %s\n" % (b, getid(id))) |
|
827 | incomings.append(b" %-25s %s\n" % (b, getid(id))) | |
797 |
|
828 | |||
798 | for b, scid, dcid in addsrc: |
|
829 | if mode == b'mirror': | |
799 | # i18n: "added" refers to a bookmark |
|
830 | localmarks = repo._bookmarks | |
800 | add(b, hex(scid), _(b'added')) |
|
831 | allmarks = set(remotemarks.keys()) | set(localmarks.keys()) | |
801 | for b, scid, dcid in advsrc: |
|
832 | for b in sorted(allmarks): | |
802 | # i18n: "advanced" refers to a bookmark |
|
833 | loc = localmarks.get(b) | |
803 | add(b, hex(scid), _(b'advanced')) |
|
834 | rem = remotemarks.get(b) | |
804 | for b, scid, dcid in diverge: |
|
835 | if loc == rem: | |
805 | # i18n: "diverged" refers to a bookmark |
|
836 | continue | |
806 | add(b, hex(scid), _(b'diverged')) |
|
837 | elif loc is None: | |
807 | for b, scid, dcid in differ: |
|
838 | add(b, hex(rem), _(b'added')) | |
808 | # i18n: "changed" refers to a bookmark |
|
839 | elif rem is None: | |
809 |
add(b, hex( |
|
840 | add(b, hex(repo.nullid), _(b'removed')) | |
|
841 | else: | |||
|
842 | add(b, hex(rem), _(b'changed')) | |||
|
843 | else: | |||
|
844 | r = comparebookmarks(repo, remotemarks, repo._bookmarks) | |||
|
845 | addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r | |||
|
846 | ||||
|
847 | for b, scid, dcid in addsrc: | |||
|
848 | # i18n: "added" refers to a bookmark | |||
|
849 | add(b, hex(scid), _(b'added')) | |||
|
850 | for b, scid, dcid in advsrc: | |||
|
851 | # i18n: "advanced" refers to a bookmark | |||
|
852 | add(b, hex(scid), _(b'advanced')) | |||
|
853 | for b, scid, dcid in diverge: | |||
|
854 | # i18n: "diverged" refers to a bookmark | |||
|
855 | add(b, hex(scid), _(b'diverged')) | |||
|
856 | for b, scid, dcid in differ: | |||
|
857 | # i18n: "changed" refers to a bookmark | |||
|
858 | add(b, hex(scid), _(b'changed')) | |||
810 |
|
859 | |||
811 | if not incomings: |
|
860 | if not incomings: | |
812 | ui.status(_(b"no changed bookmarks found\n")) |
|
861 | ui.status(_(b"no changed bookmarks found\n")) |
@@ -699,7 +699,9 b' def getremotechanges(' | |||||
699 | }, |
|
699 | }, | |
700 | ).result() |
|
700 | ).result() | |
701 |
|
701 | |||
702 |
pullop = exchange.pulloperation( |
|
702 | pullop = exchange.pulloperation( | |
|
703 | bundlerepo, peer, path=None, heads=reponodes | |||
|
704 | ) | |||
703 | pullop.trmanager = bundletransactionmanager() |
|
705 | pullop.trmanager = bundletransactionmanager() | |
704 | exchange._pullapplyphases(pullop, remotephases) |
|
706 | exchange._pullapplyphases(pullop, remotephases) | |
705 |
|
707 |
@@ -264,7 +264,7 b' PyObject *make_file_foldmap(PyObject *se' | |||||
264 | } |
|
264 | } | |
265 |
|
265 | |||
266 | tuple = (dirstateItemObject *)v; |
|
266 | tuple = (dirstateItemObject *)v; | |
267 | if (tuple->state != 'r') { |
|
267 | if (tuple->flags | dirstate_flag_wc_tracked) { | |
268 | PyObject *normed; |
|
268 | PyObject *normed; | |
269 | if (table != NULL) { |
|
269 | if (table != NULL) { | |
270 | normed = _asciitransform(k, table, |
|
270 | normed = _asciitransform(k, table, |
@@ -161,7 +161,7 b' bail:' | |||||
161 | return ret; |
|
161 | return ret; | |
162 | } |
|
162 | } | |
163 |
|
163 | |||
164 |
static int dirs_fromdict(PyObject *dirs, PyObject *source, |
|
164 | static int dirs_fromdict(PyObject *dirs, PyObject *source, bool only_tracked) | |
165 | { |
|
165 | { | |
166 | PyObject *key, *value; |
|
166 | PyObject *key, *value; | |
167 | Py_ssize_t pos = 0; |
|
167 | Py_ssize_t pos = 0; | |
@@ -171,13 +171,14 b' static int dirs_fromdict(PyObject *dirs,' | |||||
171 | PyErr_SetString(PyExc_TypeError, "expected string key"); |
|
171 | PyErr_SetString(PyExc_TypeError, "expected string key"); | |
172 | return -1; |
|
172 | return -1; | |
173 | } |
|
173 | } | |
174 | if (skipchar) { |
|
174 | if (only_tracked) { | |
175 | if (!dirstate_tuple_check(value)) { |
|
175 | if (!dirstate_tuple_check(value)) { | |
176 | PyErr_SetString(PyExc_TypeError, |
|
176 | PyErr_SetString(PyExc_TypeError, | |
177 | "expected a dirstate tuple"); |
|
177 | "expected a dirstate tuple"); | |
178 | return -1; |
|
178 | return -1; | |
179 | } |
|
179 | } | |
180 |
if (((dirstateItemObject *)value)-> |
|
180 | if (!(((dirstateItemObject *)value)->flags & | |
|
181 | dirstate_flag_wc_tracked)) | |||
181 | continue; |
|
182 | continue; | |
182 | } |
|
183 | } | |
183 |
|
184 | |||
@@ -218,15 +219,17 b' static int dirs_fromiter(PyObject *dirs,' | |||||
218 | * Calculate a refcounted set of directory names for the files in a |
|
219 | * Calculate a refcounted set of directory names for the files in a | |
219 | * dirstate. |
|
220 | * dirstate. | |
220 | */ |
|
221 | */ | |
221 | static int dirs_init(dirsObject *self, PyObject *args) |
|
222 | static int dirs_init(dirsObject *self, PyObject *args, PyObject *kwargs) | |
222 | { |
|
223 | { | |
223 | PyObject *dirs = NULL, *source = NULL; |
|
224 | PyObject *dirs = NULL, *source = NULL; | |
224 | char skipchar = 0; |
|
225 | int only_tracked = 0; | |
225 | int ret = -1; |
|
226 | int ret = -1; | |
|
227 | static char *keywords_name[] = {"map", "only_tracked", NULL}; | |||
226 |
|
228 | |||
227 | self->dict = NULL; |
|
229 | self->dict = NULL; | |
228 |
|
230 | |||
229 |
if (!PyArg_ParseTuple(args, "|O |
|
231 | if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|Oi:__init__", | |
|
232 | keywords_name, &source, &only_tracked)) | |||
230 | return -1; |
|
233 | return -1; | |
231 |
|
234 | |||
232 | dirs = PyDict_New(); |
|
235 | dirs = PyDict_New(); | |
@@ -237,10 +240,10 b' static int dirs_init(dirsObject *self, P' | |||||
237 | if (source == NULL) |
|
240 | if (source == NULL) | |
238 | ret = 0; |
|
241 | ret = 0; | |
239 | else if (PyDict_Check(source)) |
|
242 | else if (PyDict_Check(source)) | |
240 |
ret = dirs_fromdict(dirs, source, |
|
243 | ret = dirs_fromdict(dirs, source, (bool)only_tracked); | |
241 | else if (skipchar) |
|
244 | else if (only_tracked) | |
242 | PyErr_SetString(PyExc_ValueError, |
|
245 | PyErr_SetString(PyExc_ValueError, | |
243 |
" |
|
246 | "`only_tracked` is only supported " | |
244 | "with a dict source"); |
|
247 | "with a dict source"); | |
245 | else |
|
248 | else | |
246 | ret = dirs_fromiter(dirs, source); |
|
249 | ret = dirs_fromiter(dirs, source); |
This diff has been collapsed as it changes many lines, (726 lines changed) Show them Hide them | |||||
@@ -44,42 +44,98 b' static PyObject *dict_new_presized(PyObj' | |||||
44 | return _dict_new_presized(expected_size); |
|
44 | return _dict_new_presized(expected_size); | |
45 | } |
|
45 | } | |
46 |
|
46 | |||
47 | static inline dirstateItemObject *make_dirstate_item(char state, int mode, |
|
|||
48 | int size, int mtime) |
|
|||
49 | { |
|
|||
50 | dirstateItemObject *t = |
|
|||
51 | PyObject_New(dirstateItemObject, &dirstateItemType); |
|
|||
52 | if (!t) { |
|
|||
53 | return NULL; |
|
|||
54 | } |
|
|||
55 | t->state = state; |
|
|||
56 | t->mode = mode; |
|
|||
57 | t->size = size; |
|
|||
58 | t->mtime = mtime; |
|
|||
59 | return t; |
|
|||
60 | } |
|
|||
61 |
|
||||
62 | static PyObject *dirstate_item_new(PyTypeObject *subtype, PyObject *args, |
|
47 | static PyObject *dirstate_item_new(PyTypeObject *subtype, PyObject *args, | |
63 | PyObject *kwds) |
|
48 | PyObject *kwds) | |
64 | { |
|
49 | { | |
65 | /* We do all the initialization here and not a tp_init function because |
|
50 | /* We do all the initialization here and not a tp_init function because | |
66 | * dirstate_item is immutable. */ |
|
51 | * dirstate_item is immutable. */ | |
67 | dirstateItemObject *t; |
|
52 | dirstateItemObject *t; | |
68 | char state; |
|
53 | int wc_tracked; | |
69 | int size, mode, mtime; |
|
54 | int p1_tracked; | |
70 | if (!PyArg_ParseTuple(args, "ciii", &state, &mode, &size, &mtime)) { |
|
55 | int p2_info; | |
|
56 | int has_meaningful_data; | |||
|
57 | int has_meaningful_mtime; | |||
|
58 | int mode; | |||
|
59 | int size; | |||
|
60 | int mtime_s; | |||
|
61 | int mtime_ns; | |||
|
62 | PyObject *parentfiledata; | |||
|
63 | PyObject *fallback_exec; | |||
|
64 | PyObject *fallback_symlink; | |||
|
65 | static char *keywords_name[] = { | |||
|
66 | "wc_tracked", "p1_tracked", "p2_info", | |||
|
67 | "has_meaningful_data", "has_meaningful_mtime", "parentfiledata", | |||
|
68 | "fallback_exec", "fallback_symlink", NULL, | |||
|
69 | }; | |||
|
70 | wc_tracked = 0; | |||
|
71 | p1_tracked = 0; | |||
|
72 | p2_info = 0; | |||
|
73 | has_meaningful_mtime = 1; | |||
|
74 | has_meaningful_data = 1; | |||
|
75 | parentfiledata = Py_None; | |||
|
76 | fallback_exec = Py_None; | |||
|
77 | fallback_symlink = Py_None; | |||
|
78 | if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iiiiiOOO", keywords_name, | |||
|
79 | &wc_tracked, &p1_tracked, &p2_info, | |||
|
80 | &has_meaningful_data, | |||
|
81 | &has_meaningful_mtime, &parentfiledata, | |||
|
82 | &fallback_exec, &fallback_symlink)) { | |||
71 | return NULL; |
|
83 | return NULL; | |
72 | } |
|
84 | } | |
73 |
|
||||
74 | t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1); |
|
85 | t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1); | |
75 | if (!t) { |
|
86 | if (!t) { | |
76 | return NULL; |
|
87 | return NULL; | |
77 | } |
|
88 | } | |
78 | t->state = state; |
|
89 | ||
79 | t->mode = mode; |
|
90 | t->flags = 0; | |
80 | t->size = size; |
|
91 | if (wc_tracked) { | |
81 | t->mtime = mtime; |
|
92 | t->flags |= dirstate_flag_wc_tracked; | |
|
93 | } | |||
|
94 | if (p1_tracked) { | |||
|
95 | t->flags |= dirstate_flag_p1_tracked; | |||
|
96 | } | |||
|
97 | if (p2_info) { | |||
|
98 | t->flags |= dirstate_flag_p2_info; | |||
|
99 | } | |||
|
100 | ||||
|
101 | if (fallback_exec != Py_None) { | |||
|
102 | t->flags |= dirstate_flag_has_fallback_exec; | |||
|
103 | if (PyObject_IsTrue(fallback_exec)) { | |||
|
104 | t->flags |= dirstate_flag_fallback_exec; | |||
|
105 | } | |||
|
106 | } | |||
|
107 | if (fallback_symlink != Py_None) { | |||
|
108 | t->flags |= dirstate_flag_has_fallback_symlink; | |||
|
109 | if (PyObject_IsTrue(fallback_symlink)) { | |||
|
110 | t->flags |= dirstate_flag_fallback_symlink; | |||
|
111 | } | |||
|
112 | } | |||
82 |
|
113 | |||
|
114 | if (parentfiledata != Py_None) { | |||
|
115 | if (!PyArg_ParseTuple(parentfiledata, "ii(ii)", &mode, &size, | |||
|
116 | &mtime_s, &mtime_ns)) { | |||
|
117 | return NULL; | |||
|
118 | } | |||
|
119 | } else { | |||
|
120 | has_meaningful_data = 0; | |||
|
121 | has_meaningful_mtime = 0; | |||
|
122 | } | |||
|
123 | if (has_meaningful_data) { | |||
|
124 | t->flags |= dirstate_flag_has_meaningful_data; | |||
|
125 | t->mode = mode; | |||
|
126 | t->size = size; | |||
|
127 | } else { | |||
|
128 | t->mode = 0; | |||
|
129 | t->size = 0; | |||
|
130 | } | |||
|
131 | if (has_meaningful_mtime) { | |||
|
132 | t->flags |= dirstate_flag_has_mtime; | |||
|
133 | t->mtime_s = mtime_s; | |||
|
134 | t->mtime_ns = mtime_ns; | |||
|
135 | } else { | |||
|
136 | t->mtime_s = 0; | |||
|
137 | t->mtime_ns = 0; | |||
|
138 | } | |||
83 | return (PyObject *)t; |
|
139 | return (PyObject *)t; | |
84 | } |
|
140 | } | |
85 |
|
141 | |||
@@ -88,92 +144,201 b' static void dirstate_item_dealloc(PyObje' | |||||
88 | PyObject_Del(o); |
|
144 | PyObject_Del(o); | |
89 | } |
|
145 | } | |
90 |
|
146 | |||
91 | static Py_ssize_t dirstate_item_length(PyObject *o) |
|
147 | static inline bool dirstate_item_c_tracked(dirstateItemObject *self) | |
|
148 | { | |||
|
149 | return (self->flags & dirstate_flag_wc_tracked); | |||
|
150 | } | |||
|
151 | ||||
|
152 | static inline bool dirstate_item_c_any_tracked(dirstateItemObject *self) | |||
92 | { |
|
153 | { | |
93 | return 4; |
|
154 | const int mask = dirstate_flag_wc_tracked | dirstate_flag_p1_tracked | | |
|
155 | dirstate_flag_p2_info; | |||
|
156 | return (self->flags & mask); | |||
|
157 | } | |||
|
158 | ||||
|
159 | static inline bool dirstate_item_c_added(dirstateItemObject *self) | |||
|
160 | { | |||
|
161 | const int mask = (dirstate_flag_wc_tracked | dirstate_flag_p1_tracked | | |||
|
162 | dirstate_flag_p2_info); | |||
|
163 | const int target = dirstate_flag_wc_tracked; | |||
|
164 | return (self->flags & mask) == target; | |||
94 | } |
|
165 | } | |
95 |
|
166 | |||
96 | static PyObject *dirstate_item_item(PyObject *o, Py_ssize_t i) |
|
167 | static inline bool dirstate_item_c_removed(dirstateItemObject *self) | |
|
168 | { | |||
|
169 | if (self->flags & dirstate_flag_wc_tracked) { | |||
|
170 | return false; | |||
|
171 | } | |||
|
172 | return (self->flags & | |||
|
173 | (dirstate_flag_p1_tracked | dirstate_flag_p2_info)); | |||
|
174 | } | |||
|
175 | ||||
|
176 | static inline bool dirstate_item_c_merged(dirstateItemObject *self) | |||
97 | { |
|
177 | { | |
98 | dirstateItemObject *t = (dirstateItemObject *)o; |
|
178 | return ((self->flags & dirstate_flag_wc_tracked) && | |
99 | switch (i) { |
|
179 | (self->flags & dirstate_flag_p1_tracked) && | |
100 | case 0: |
|
180 | (self->flags & dirstate_flag_p2_info)); | |
101 | return PyBytes_FromStringAndSize(&t->state, 1); |
|
181 | } | |
102 | case 1: |
|
182 | ||
103 | return PyInt_FromLong(t->mode); |
|
183 | static inline bool dirstate_item_c_from_p2(dirstateItemObject *self) | |
104 | case 2: |
|
184 | { | |
105 | return PyInt_FromLong(t->size); |
|
185 | return ((self->flags & dirstate_flag_wc_tracked) && | |
106 | case 3: |
|
186 | !(self->flags & dirstate_flag_p1_tracked) && | |
107 | return PyInt_FromLong(t->mtime); |
|
187 | (self->flags & dirstate_flag_p2_info)); | |
108 | default: |
|
188 | } | |
109 | PyErr_SetString(PyExc_IndexError, "index out of range"); |
|
189 | ||
110 | return NULL; |
|
190 | static inline char dirstate_item_c_v1_state(dirstateItemObject *self) | |
|
191 | { | |||
|
192 | if (dirstate_item_c_removed(self)) { | |||
|
193 | return 'r'; | |||
|
194 | } else if (dirstate_item_c_merged(self)) { | |||
|
195 | return 'm'; | |||
|
196 | } else if (dirstate_item_c_added(self)) { | |||
|
197 | return 'a'; | |||
|
198 | } else { | |||
|
199 | return 'n'; | |||
111 | } |
|
200 | } | |
112 | } |
|
201 | } | |
113 |
|
202 | |||
114 | static PySequenceMethods dirstate_item_sq = { |
|
203 | static inline bool dirstate_item_c_has_fallback_exec(dirstateItemObject *self) | |
115 | dirstate_item_length, /* sq_length */ |
|
204 | { | |
116 | 0, /* sq_concat */ |
|
205 | return (bool)self->flags & dirstate_flag_has_fallback_exec; | |
117 | 0, /* sq_repeat */ |
|
206 | } | |
118 | dirstate_item_item, /* sq_item */ |
|
207 | ||
119 | 0, /* sq_ass_item */ |
|
208 | static inline bool | |
120 | 0, /* sq_contains */ |
|
209 | dirstate_item_c_has_fallback_symlink(dirstateItemObject *self) | |
121 | 0, /* sq_inplace_concat */ |
|
210 | { | |
122 | 0 /* sq_inplace_repeat */ |
|
211 | return (bool)self->flags & dirstate_flag_has_fallback_symlink; | |
|
212 | } | |||
|
213 | ||||
|
214 | static inline int dirstate_item_c_v1_mode(dirstateItemObject *self) | |||
|
215 | { | |||
|
216 | if (self->flags & dirstate_flag_has_meaningful_data) { | |||
|
217 | return self->mode; | |||
|
218 | } else { | |||
|
219 | return 0; | |||
|
220 | } | |||
|
221 | } | |||
|
222 | ||||
|
223 | static inline int dirstate_item_c_v1_size(dirstateItemObject *self) | |||
|
224 | { | |||
|
225 | if (!(self->flags & dirstate_flag_wc_tracked) && | |||
|
226 | (self->flags & dirstate_flag_p2_info)) { | |||
|
227 | if (self->flags & dirstate_flag_p1_tracked) { | |||
|
228 | return dirstate_v1_nonnormal; | |||
|
229 | } else { | |||
|
230 | return dirstate_v1_from_p2; | |||
|
231 | } | |||
|
232 | } else if (dirstate_item_c_removed(self)) { | |||
|
233 | return 0; | |||
|
234 | } else if (self->flags & dirstate_flag_p2_info) { | |||
|
235 | return dirstate_v1_from_p2; | |||
|
236 | } else if (dirstate_item_c_added(self)) { | |||
|
237 | return dirstate_v1_nonnormal; | |||
|
238 | } else if (self->flags & dirstate_flag_has_meaningful_data) { | |||
|
239 | return self->size; | |||
|
240 | } else { | |||
|
241 | return dirstate_v1_nonnormal; | |||
|
242 | } | |||
|
243 | } | |||
|
244 | ||||
|
245 | static inline int dirstate_item_c_v1_mtime(dirstateItemObject *self) | |||
|
246 | { | |||
|
247 | if (dirstate_item_c_removed(self)) { | |||
|
248 | return 0; | |||
|
249 | } else if (!(self->flags & dirstate_flag_has_mtime) || | |||
|
250 | !(self->flags & dirstate_flag_p1_tracked) || | |||
|
251 | !(self->flags & dirstate_flag_wc_tracked) || | |||
|
252 | (self->flags & dirstate_flag_p2_info)) { | |||
|
253 | return ambiguous_time; | |||
|
254 | } else { | |||
|
255 | return self->mtime_s; | |||
|
256 | } | |||
|
257 | } | |||
|
258 | ||||
|
259 | static PyObject *dirstate_item_v2_data(dirstateItemObject *self) | |||
|
260 | { | |||
|
261 | int flags = self->flags; | |||
|
262 | int mode = dirstate_item_c_v1_mode(self); | |||
|
263 | #ifdef S_IXUSR | |||
|
264 | /* This is for platforms with an exec bit */ | |||
|
265 | if ((mode & S_IXUSR) != 0) { | |||
|
266 | flags |= dirstate_flag_mode_exec_perm; | |||
|
267 | } else { | |||
|
268 | flags &= ~dirstate_flag_mode_exec_perm; | |||
|
269 | } | |||
|
270 | #else | |||
|
271 | flags &= ~dirstate_flag_mode_exec_perm; | |||
|
272 | #endif | |||
|
273 | #ifdef S_ISLNK | |||
|
274 | /* This is for platforms with support for symlinks */ | |||
|
275 | if (S_ISLNK(mode)) { | |||
|
276 | flags |= dirstate_flag_mode_is_symlink; | |||
|
277 | } else { | |||
|
278 | flags &= ~dirstate_flag_mode_is_symlink; | |||
|
279 | } | |||
|
280 | #else | |||
|
281 | flags &= ~dirstate_flag_mode_is_symlink; | |||
|
282 | #endif | |||
|
283 | return Py_BuildValue("iiii", flags, self->size, self->mtime_s, | |||
|
284 | self->mtime_ns); | |||
123 | }; |
|
285 | }; | |
124 |
|
286 | |||
125 | static PyObject *dirstate_item_v1_state(dirstateItemObject *self) |
|
287 | static PyObject *dirstate_item_v1_state(dirstateItemObject *self) | |
126 | { |
|
288 | { | |
127 | return PyBytes_FromStringAndSize(&self->state, 1); |
|
289 | char state = dirstate_item_c_v1_state(self); | |
|
290 | return PyBytes_FromStringAndSize(&state, 1); | |||
128 | }; |
|
291 | }; | |
129 |
|
292 | |||
130 | static PyObject *dirstate_item_v1_mode(dirstateItemObject *self) |
|
293 | static PyObject *dirstate_item_v1_mode(dirstateItemObject *self) | |
131 | { |
|
294 | { | |
132 |
return PyInt_FromLong(self |
|
295 | return PyInt_FromLong(dirstate_item_c_v1_mode(self)); | |
133 | }; |
|
296 | }; | |
134 |
|
297 | |||
135 | static PyObject *dirstate_item_v1_size(dirstateItemObject *self) |
|
298 | static PyObject *dirstate_item_v1_size(dirstateItemObject *self) | |
136 | { |
|
299 | { | |
137 |
return PyInt_FromLong(self |
|
300 | return PyInt_FromLong(dirstate_item_c_v1_size(self)); | |
138 | }; |
|
301 | }; | |
139 |
|
302 | |||
140 | static PyObject *dirstate_item_v1_mtime(dirstateItemObject *self) |
|
303 | static PyObject *dirstate_item_v1_mtime(dirstateItemObject *self) | |
141 | { |
|
304 | { | |
142 |
return PyInt_FromLong(self |
|
305 | return PyInt_FromLong(dirstate_item_c_v1_mtime(self)); | |
143 | }; |
|
306 | }; | |
144 |
|
307 | |||
145 |
static PyObject *d |
|
308 | static PyObject *dirstate_item_need_delay(dirstateItemObject *self, | |
|
309 | PyObject *now) | |||
146 | { |
|
310 | { | |
147 | if (self->state != 'n' || self->mtime == ambiguous_time) { |
|
311 | int now_s; | |
148 | Py_RETURN_TRUE; |
|
312 | int now_ns; | |
149 | } else { |
|
313 | if (!PyArg_ParseTuple(now, "ii", &now_s, &now_ns)) { | |
150 | Py_RETURN_FALSE; |
|
314 | return NULL; | |
151 | } |
|
315 | } | |
152 | }; |
|
316 | if (dirstate_item_c_v1_state(self) == 'n' && self->mtime_s == now_s) { | |
153 | static PyObject *dm_otherparent(dirstateItemObject *self) |
|
|||
154 | { |
|
|||
155 | if (self->size == dirstate_v1_from_p2) { |
|
|||
156 | Py_RETURN_TRUE; |
|
317 | Py_RETURN_TRUE; | |
157 | } else { |
|
318 | } else { | |
158 | Py_RETURN_FALSE; |
|
319 | Py_RETURN_FALSE; | |
159 | } |
|
320 | } | |
160 | }; |
|
321 | }; | |
161 |
|
322 | |||
162 |
static PyObject *dirstate_item_ |
|
323 | static PyObject *dirstate_item_mtime_likely_equal_to(dirstateItemObject *self, | |
163 |
PyObject * |
|
324 | PyObject *other) | |
164 | { |
|
325 | { | |
165 | long now; |
|
326 | int other_s; | |
166 | if (!pylong_to_long(value, &now)) { |
|
327 | int other_ns; | |
|
328 | if (!PyArg_ParseTuple(other, "ii", &other_s, &other_ns)) { | |||
167 | return NULL; |
|
329 | return NULL; | |
168 | } |
|
330 | } | |
169 | if (self->state == 'n' && self->mtime == now) { |
|
331 | if ((self->flags & dirstate_flag_has_mtime) && | |
|
332 | self->mtime_s == other_s && | |||
|
333 | (self->mtime_ns == other_ns || self->mtime_ns == 0 || | |||
|
334 | other_ns == 0)) { | |||
170 | Py_RETURN_TRUE; |
|
335 | Py_RETURN_TRUE; | |
171 | } else { |
|
336 | } else { | |
172 | Py_RETURN_FALSE; |
|
337 | Py_RETURN_FALSE; | |
173 | } |
|
338 | } | |
174 | }; |
|
339 | }; | |
175 |
|
340 | |||
176 |
/* This will never change since it's bound to V1 |
|
341 | /* This will never change since it's bound to V1 | |
177 | */ |
|
342 | */ | |
178 | static inline dirstateItemObject * |
|
343 | static inline dirstateItemObject * | |
179 | dirstate_item_from_v1_data(char state, int mode, int size, int mtime) |
|
344 | dirstate_item_from_v1_data(char state, int mode, int size, int mtime) | |
@@ -183,10 +348,56 b' dirstate_item_from_v1_data(char state, i' | |||||
183 | if (!t) { |
|
348 | if (!t) { | |
184 | return NULL; |
|
349 | return NULL; | |
185 | } |
|
350 | } | |
186 | t->state = state; |
|
351 | t->flags = 0; | |
187 |
t->mode = |
|
352 | t->mode = 0; | |
188 |
t->size = |
|
353 | t->size = 0; | |
189 |
t->mtime = |
|
354 | t->mtime_s = 0; | |
|
355 | t->mtime_ns = 0; | |||
|
356 | ||||
|
357 | if (state == 'm') { | |||
|
358 | t->flags = (dirstate_flag_wc_tracked | | |||
|
359 | dirstate_flag_p1_tracked | dirstate_flag_p2_info); | |||
|
360 | } else if (state == 'a') { | |||
|
361 | t->flags = dirstate_flag_wc_tracked; | |||
|
362 | } else if (state == 'r') { | |||
|
363 | if (size == dirstate_v1_nonnormal) { | |||
|
364 | t->flags = | |||
|
365 | dirstate_flag_p1_tracked | dirstate_flag_p2_info; | |||
|
366 | } else if (size == dirstate_v1_from_p2) { | |||
|
367 | t->flags = dirstate_flag_p2_info; | |||
|
368 | } else { | |||
|
369 | t->flags = dirstate_flag_p1_tracked; | |||
|
370 | } | |||
|
371 | } else if (state == 'n') { | |||
|
372 | if (size == dirstate_v1_from_p2) { | |||
|
373 | t->flags = | |||
|
374 | dirstate_flag_wc_tracked | dirstate_flag_p2_info; | |||
|
375 | } else if (size == dirstate_v1_nonnormal) { | |||
|
376 | t->flags = | |||
|
377 | dirstate_flag_wc_tracked | dirstate_flag_p1_tracked; | |||
|
378 | } else if (mtime == ambiguous_time) { | |||
|
379 | t->flags = (dirstate_flag_wc_tracked | | |||
|
380 | dirstate_flag_p1_tracked | | |||
|
381 | dirstate_flag_has_meaningful_data); | |||
|
382 | t->mode = mode; | |||
|
383 | t->size = size; | |||
|
384 | } else { | |||
|
385 | t->flags = (dirstate_flag_wc_tracked | | |||
|
386 | dirstate_flag_p1_tracked | | |||
|
387 | dirstate_flag_has_meaningful_data | | |||
|
388 | dirstate_flag_has_mtime); | |||
|
389 | t->mode = mode; | |||
|
390 | t->size = size; | |||
|
391 | t->mtime_s = mtime; | |||
|
392 | } | |||
|
393 | } else { | |||
|
394 | PyErr_Format(PyExc_RuntimeError, | |||
|
395 | "unknown state: `%c` (%d, %d, %d)", state, mode, | |||
|
396 | size, mtime, NULL); | |||
|
397 | Py_DECREF(t); | |||
|
398 | return NULL; | |||
|
399 | } | |||
|
400 | ||||
190 | return t; |
|
401 | return t; | |
191 | } |
|
402 | } | |
192 |
|
403 | |||
@@ -196,22 +407,52 b' static PyObject *dirstate_item_from_v1_m' | |||||
196 | { |
|
407 | { | |
197 | /* We do all the initialization here and not a tp_init function because |
|
408 | /* We do all the initialization here and not a tp_init function because | |
198 | * dirstate_item is immutable. */ |
|
409 | * dirstate_item is immutable. */ | |
199 | dirstateItemObject *t; |
|
|||
200 | char state; |
|
410 | char state; | |
201 | int size, mode, mtime; |
|
411 | int size, mode, mtime; | |
202 | if (!PyArg_ParseTuple(args, "ciii", &state, &mode, &size, &mtime)) { |
|
412 | if (!PyArg_ParseTuple(args, "ciii", &state, &mode, &size, &mtime)) { | |
203 | return NULL; |
|
413 | return NULL; | |
204 | } |
|
414 | } | |
|
415 | return (PyObject *)dirstate_item_from_v1_data(state, mode, size, mtime); | |||
|
416 | }; | |||
205 |
|
417 | |||
206 | t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1); |
|
418 | static PyObject *dirstate_item_from_v2_meth(PyTypeObject *subtype, | |
|
419 | PyObject *args) | |||
|
420 | { | |||
|
421 | dirstateItemObject *t = | |||
|
422 | PyObject_New(dirstateItemObject, &dirstateItemType); | |||
207 | if (!t) { |
|
423 | if (!t) { | |
208 | return NULL; |
|
424 | return NULL; | |
209 | } |
|
425 | } | |
210 | t->state = state; |
|
426 | if (!PyArg_ParseTuple(args, "iiii", &t->flags, &t->size, &t->mtime_s, | |
211 | t->mode = mode; |
|
427 | &t->mtime_ns)) { | |
212 | t->size = size; |
|
428 | return NULL; | |
213 | t->mtime = mtime; |
|
429 | } | |
214 |
|
430 | if (t->flags & dirstate_flag_expected_state_is_modified) { | ||
|
431 | t->flags &= ~(dirstate_flag_expected_state_is_modified | | |||
|
432 | dirstate_flag_has_meaningful_data | | |||
|
433 | dirstate_flag_has_mtime); | |||
|
434 | } | |||
|
435 | if (t->flags & dirstate_flag_mtime_second_ambiguous) { | |||
|
436 | /* The current code is not able to do the more subtle comparison | |||
|
437 | * that the MTIME_SECOND_AMBIGUOUS requires. So we ignore the | |||
|
438 | * mtime */ | |||
|
439 | t->flags &= ~(dirstate_flag_mtime_second_ambiguous | | |||
|
440 | dirstate_flag_has_meaningful_data | | |||
|
441 | dirstate_flag_has_mtime); | |||
|
442 | } | |||
|
443 | t->mode = 0; | |||
|
444 | if (t->flags & dirstate_flag_has_meaningful_data) { | |||
|
445 | if (t->flags & dirstate_flag_mode_exec_perm) { | |||
|
446 | t->mode = 0755; | |||
|
447 | } else { | |||
|
448 | t->mode = 0644; | |||
|
449 | } | |||
|
450 | if (t->flags & dirstate_flag_mode_is_symlink) { | |||
|
451 | t->mode |= S_IFLNK; | |||
|
452 | } else { | |||
|
453 | t->mode |= S_IFREG; | |||
|
454 | } | |||
|
455 | } | |||
215 | return (PyObject *)t; |
|
456 | return (PyObject *)t; | |
216 | }; |
|
457 | }; | |
217 |
|
458 | |||
@@ -219,11 +460,62 b' static PyObject *dirstate_item_from_v1_m' | |||||
219 | to make sure it is correct. */ |
|
460 | to make sure it is correct. */ | |
220 | static PyObject *dirstate_item_set_possibly_dirty(dirstateItemObject *self) |
|
461 | static PyObject *dirstate_item_set_possibly_dirty(dirstateItemObject *self) | |
221 | { |
|
462 | { | |
222 | self->mtime = ambiguous_time; |
|
463 | self->flags &= ~dirstate_flag_has_mtime; | |
|
464 | Py_RETURN_NONE; | |||
|
465 | } | |||
|
466 | ||||
|
467 | /* See docstring of the python implementation for details */ | |||
|
468 | static PyObject *dirstate_item_set_clean(dirstateItemObject *self, | |||
|
469 | PyObject *args) | |||
|
470 | { | |||
|
471 | int size, mode, mtime_s, mtime_ns; | |||
|
472 | if (!PyArg_ParseTuple(args, "ii(ii)", &mode, &size, &mtime_s, | |||
|
473 | &mtime_ns)) { | |||
|
474 | return NULL; | |||
|
475 | } | |||
|
476 | self->flags = dirstate_flag_wc_tracked | dirstate_flag_p1_tracked | | |||
|
477 | dirstate_flag_has_meaningful_data | | |||
|
478 | dirstate_flag_has_mtime; | |||
|
479 | self->mode = mode; | |||
|
480 | self->size = size; | |||
|
481 | self->mtime_s = mtime_s; | |||
|
482 | self->mtime_ns = mtime_ns; | |||
223 | Py_RETURN_NONE; |
|
483 | Py_RETURN_NONE; | |
224 | } |
|
484 | } | |
225 |
|
485 | |||
|
486 | static PyObject *dirstate_item_set_tracked(dirstateItemObject *self) | |||
|
487 | { | |||
|
488 | self->flags |= dirstate_flag_wc_tracked; | |||
|
489 | self->flags &= ~dirstate_flag_has_mtime; | |||
|
490 | Py_RETURN_NONE; | |||
|
491 | } | |||
|
492 | ||||
|
493 | static PyObject *dirstate_item_set_untracked(dirstateItemObject *self) | |||
|
494 | { | |||
|
495 | self->flags &= ~dirstate_flag_wc_tracked; | |||
|
496 | self->mode = 0; | |||
|
497 | self->size = 0; | |||
|
498 | self->mtime_s = 0; | |||
|
499 | self->mtime_ns = 0; | |||
|
500 | Py_RETURN_NONE; | |||
|
501 | } | |||
|
502 | ||||
|
503 | static PyObject *dirstate_item_drop_merge_data(dirstateItemObject *self) | |||
|
504 | { | |||
|
505 | if (self->flags & dirstate_flag_p2_info) { | |||
|
506 | self->flags &= ~(dirstate_flag_p2_info | | |||
|
507 | dirstate_flag_has_meaningful_data | | |||
|
508 | dirstate_flag_has_mtime); | |||
|
509 | self->mode = 0; | |||
|
510 | self->size = 0; | |||
|
511 | self->mtime_s = 0; | |||
|
512 | self->mtime_ns = 0; | |||
|
513 | } | |||
|
514 | Py_RETURN_NONE; | |||
|
515 | } | |||
226 | static PyMethodDef dirstate_item_methods[] = { |
|
516 | static PyMethodDef dirstate_item_methods[] = { | |
|
517 | {"v2_data", (PyCFunction)dirstate_item_v2_data, METH_NOARGS, | |||
|
518 | "return data suitable for v2 serialization"}, | |||
227 | {"v1_state", (PyCFunction)dirstate_item_v1_state, METH_NOARGS, |
|
519 | {"v1_state", (PyCFunction)dirstate_item_v1_state, METH_NOARGS, | |
228 | "return a \"state\" suitable for v1 serialization"}, |
|
520 | "return a \"state\" suitable for v1 serialization"}, | |
229 | {"v1_mode", (PyCFunction)dirstate_item_v1_mode, METH_NOARGS, |
|
521 | {"v1_mode", (PyCFunction)dirstate_item_v1_mode, METH_NOARGS, | |
@@ -234,40 +526,134 b' static PyMethodDef dirstate_item_methods' | |||||
234 | "return a \"mtime\" suitable for v1 serialization"}, |
|
526 | "return a \"mtime\" suitable for v1 serialization"}, | |
235 | {"need_delay", (PyCFunction)dirstate_item_need_delay, METH_O, |
|
527 | {"need_delay", (PyCFunction)dirstate_item_need_delay, METH_O, | |
236 | "True if the stored mtime would be ambiguous with the current time"}, |
|
528 | "True if the stored mtime would be ambiguous with the current time"}, | |
237 | {"from_v1_data", (PyCFunction)dirstate_item_from_v1_meth, METH_O, |
|
529 | {"mtime_likely_equal_to", (PyCFunction)dirstate_item_mtime_likely_equal_to, | |
238 | "build a new DirstateItem object from V1 data"}, |
|
530 | METH_O, "True if the stored mtime is likely equal to the given mtime"}, | |
|
531 | {"from_v1_data", (PyCFunction)dirstate_item_from_v1_meth, | |||
|
532 | METH_VARARGS | METH_CLASS, "build a new DirstateItem object from V1 data"}, | |||
|
533 | {"from_v2_data", (PyCFunction)dirstate_item_from_v2_meth, | |||
|
534 | METH_VARARGS | METH_CLASS, "build a new DirstateItem object from V2 data"}, | |||
239 | {"set_possibly_dirty", (PyCFunction)dirstate_item_set_possibly_dirty, |
|
535 | {"set_possibly_dirty", (PyCFunction)dirstate_item_set_possibly_dirty, | |
240 | METH_NOARGS, "mark a file as \"possibly dirty\""}, |
|
536 | METH_NOARGS, "mark a file as \"possibly dirty\""}, | |
241 |
{" |
|
537 | {"set_clean", (PyCFunction)dirstate_item_set_clean, METH_VARARGS, | |
242 | "True is the entry is non-normal in the dirstatemap sense"}, |
|
538 | "mark a file as \"clean\""}, | |
243 |
{" |
|
539 | {"set_tracked", (PyCFunction)dirstate_item_set_tracked, METH_NOARGS, | |
244 | "True is the entry is `otherparent` in the dirstatemap sense"}, |
|
540 | "mark a file as \"tracked\""}, | |
|
541 | {"set_untracked", (PyCFunction)dirstate_item_set_untracked, METH_NOARGS, | |||
|
542 | "mark a file as \"untracked\""}, | |||
|
543 | {"drop_merge_data", (PyCFunction)dirstate_item_drop_merge_data, METH_NOARGS, | |||
|
544 | "remove all \"merge-only\" from a DirstateItem"}, | |||
245 | {NULL} /* Sentinel */ |
|
545 | {NULL} /* Sentinel */ | |
246 | }; |
|
546 | }; | |
247 |
|
547 | |||
248 | static PyObject *dirstate_item_get_mode(dirstateItemObject *self) |
|
548 | static PyObject *dirstate_item_get_mode(dirstateItemObject *self) | |
249 | { |
|
549 | { | |
250 |
return PyInt_FromLong(self |
|
550 | return PyInt_FromLong(dirstate_item_c_v1_mode(self)); | |
251 | }; |
|
551 | }; | |
252 |
|
552 | |||
253 | static PyObject *dirstate_item_get_size(dirstateItemObject *self) |
|
553 | static PyObject *dirstate_item_get_size(dirstateItemObject *self) | |
254 | { |
|
554 | { | |
255 |
return PyInt_FromLong(self |
|
555 | return PyInt_FromLong(dirstate_item_c_v1_size(self)); | |
256 | }; |
|
556 | }; | |
257 |
|
557 | |||
258 | static PyObject *dirstate_item_get_mtime(dirstateItemObject *self) |
|
558 | static PyObject *dirstate_item_get_mtime(dirstateItemObject *self) | |
259 | { |
|
559 | { | |
260 |
return PyInt_FromLong(self |
|
560 | return PyInt_FromLong(dirstate_item_c_v1_mtime(self)); | |
261 | }; |
|
561 | }; | |
262 |
|
562 | |||
263 | static PyObject *dirstate_item_get_state(dirstateItemObject *self) |
|
563 | static PyObject *dirstate_item_get_state(dirstateItemObject *self) | |
264 | { |
|
564 | { | |
265 | return PyBytes_FromStringAndSize(&self->state, 1); |
|
565 | char state = dirstate_item_c_v1_state(self); | |
|
566 | return PyBytes_FromStringAndSize(&state, 1); | |||
|
567 | }; | |||
|
568 | ||||
|
569 | static PyObject *dirstate_item_get_has_fallback_exec(dirstateItemObject *self) | |||
|
570 | { | |||
|
571 | if (dirstate_item_c_has_fallback_exec(self)) { | |||
|
572 | Py_RETURN_TRUE; | |||
|
573 | } else { | |||
|
574 | Py_RETURN_FALSE; | |||
|
575 | } | |||
|
576 | }; | |||
|
577 | ||||
|
578 | static PyObject *dirstate_item_get_fallback_exec(dirstateItemObject *self) | |||
|
579 | { | |||
|
580 | if (dirstate_item_c_has_fallback_exec(self)) { | |||
|
581 | if (self->flags & dirstate_flag_fallback_exec) { | |||
|
582 | Py_RETURN_TRUE; | |||
|
583 | } else { | |||
|
584 | Py_RETURN_FALSE; | |||
|
585 | } | |||
|
586 | } else { | |||
|
587 | Py_RETURN_NONE; | |||
|
588 | } | |||
|
589 | }; | |||
|
590 | ||||
|
591 | static int dirstate_item_set_fallback_exec(dirstateItemObject *self, | |||
|
592 | PyObject *value) | |||
|
593 | { | |||
|
594 | if ((value == Py_None) || (value == NULL)) { | |||
|
595 | self->flags &= ~dirstate_flag_has_fallback_exec; | |||
|
596 | } else { | |||
|
597 | self->flags |= dirstate_flag_has_fallback_exec; | |||
|
598 | if (PyObject_IsTrue(value)) { | |||
|
599 | self->flags |= dirstate_flag_fallback_exec; | |||
|
600 | } else { | |||
|
601 | self->flags &= ~dirstate_flag_fallback_exec; | |||
|
602 | } | |||
|
603 | } | |||
|
604 | return 0; | |||
|
605 | }; | |||
|
606 | ||||
|
607 | static PyObject * | |||
|
608 | dirstate_item_get_has_fallback_symlink(dirstateItemObject *self) | |||
|
609 | { | |||
|
610 | if (dirstate_item_c_has_fallback_symlink(self)) { | |||
|
611 | Py_RETURN_TRUE; | |||
|
612 | } else { | |||
|
613 | Py_RETURN_FALSE; | |||
|
614 | } | |||
|
615 | }; | |||
|
616 | ||||
|
617 | static PyObject *dirstate_item_get_fallback_symlink(dirstateItemObject *self) | |||
|
618 | { | |||
|
619 | if (dirstate_item_c_has_fallback_symlink(self)) { | |||
|
620 | if (self->flags & dirstate_flag_fallback_symlink) { | |||
|
621 | Py_RETURN_TRUE; | |||
|
622 | } else { | |||
|
623 | Py_RETURN_FALSE; | |||
|
624 | } | |||
|
625 | } else { | |||
|
626 | Py_RETURN_NONE; | |||
|
627 | } | |||
|
628 | }; | |||
|
629 | ||||
|
630 | static int dirstate_item_set_fallback_symlink(dirstateItemObject *self, | |||
|
631 | PyObject *value) | |||
|
632 | { | |||
|
633 | if ((value == Py_None) || (value == NULL)) { | |||
|
634 | self->flags &= ~dirstate_flag_has_fallback_symlink; | |||
|
635 | } else { | |||
|
636 | self->flags |= dirstate_flag_has_fallback_symlink; | |||
|
637 | if (PyObject_IsTrue(value)) { | |||
|
638 | self->flags |= dirstate_flag_fallback_symlink; | |||
|
639 | } else { | |||
|
640 | self->flags &= ~dirstate_flag_fallback_symlink; | |||
|
641 | } | |||
|
642 | } | |||
|
643 | return 0; | |||
266 | }; |
|
644 | }; | |
267 |
|
645 | |||
268 | static PyObject *dirstate_item_get_tracked(dirstateItemObject *self) |
|
646 | static PyObject *dirstate_item_get_tracked(dirstateItemObject *self) | |
269 | { |
|
647 | { | |
270 | if (self->state == 'a' || self->state == 'm' || self->state == 'n') { |
|
648 | if (dirstate_item_c_tracked(self)) { | |
|
649 | Py_RETURN_TRUE; | |||
|
650 | } else { | |||
|
651 | Py_RETURN_FALSE; | |||
|
652 | } | |||
|
653 | }; | |||
|
654 | static PyObject *dirstate_item_get_p1_tracked(dirstateItemObject *self) | |||
|
655 | { | |||
|
656 | if (self->flags & dirstate_flag_p1_tracked) { | |||
271 | Py_RETURN_TRUE; |
|
657 | Py_RETURN_TRUE; | |
272 | } else { |
|
658 | } else { | |
273 | Py_RETURN_FALSE; |
|
659 | Py_RETURN_FALSE; | |
@@ -276,7 +662,17 b' static PyObject *dirstate_item_get_track' | |||||
276 |
|
662 | |||
277 | static PyObject *dirstate_item_get_added(dirstateItemObject *self) |
|
663 | static PyObject *dirstate_item_get_added(dirstateItemObject *self) | |
278 | { |
|
664 | { | |
279 | if (self->state == 'a') { |
|
665 | if (dirstate_item_c_added(self)) { | |
|
666 | Py_RETURN_TRUE; | |||
|
667 | } else { | |||
|
668 | Py_RETURN_FALSE; | |||
|
669 | } | |||
|
670 | }; | |||
|
671 | ||||
|
672 | static PyObject *dirstate_item_get_p2_info(dirstateItemObject *self) | |||
|
673 | { | |||
|
674 | if (self->flags & dirstate_flag_wc_tracked && | |||
|
675 | self->flags & dirstate_flag_p2_info) { | |||
280 | Py_RETURN_TRUE; |
|
676 | Py_RETURN_TRUE; | |
281 | } else { |
|
677 | } else { | |
282 | Py_RETURN_FALSE; |
|
678 | Py_RETURN_FALSE; | |
@@ -285,16 +681,7 b' static PyObject *dirstate_item_get_added' | |||||
285 |
|
681 | |||
286 | static PyObject *dirstate_item_get_merged(dirstateItemObject *self) |
|
682 | static PyObject *dirstate_item_get_merged(dirstateItemObject *self) | |
287 | { |
|
683 | { | |
288 | if (self->state == 'm') { |
|
684 | if (dirstate_item_c_merged(self)) { | |
289 | Py_RETURN_TRUE; |
|
|||
290 | } else { |
|
|||
291 | Py_RETURN_FALSE; |
|
|||
292 | } |
|
|||
293 | }; |
|
|||
294 |
|
||||
295 | static PyObject *dirstate_item_get_merged_removed(dirstateItemObject *self) |
|
|||
296 | { |
|
|||
297 | if (self->state == 'r' && self->size == dirstate_v1_nonnormal) { |
|
|||
298 | Py_RETURN_TRUE; |
|
685 | Py_RETURN_TRUE; | |
299 | } else { |
|
686 | } else { | |
300 | Py_RETURN_FALSE; |
|
687 | Py_RETURN_FALSE; | |
@@ -303,16 +690,29 b' static PyObject *dirstate_item_get_merge' | |||||
303 |
|
690 | |||
304 | static PyObject *dirstate_item_get_from_p2(dirstateItemObject *self) |
|
691 | static PyObject *dirstate_item_get_from_p2(dirstateItemObject *self) | |
305 | { |
|
692 | { | |
306 | if (self->state == 'n' && self->size == dirstate_v1_from_p2) { |
|
693 | if (dirstate_item_c_from_p2(self)) { | |
307 | Py_RETURN_TRUE; |
|
694 | Py_RETURN_TRUE; | |
308 | } else { |
|
695 | } else { | |
309 | Py_RETURN_FALSE; |
|
696 | Py_RETURN_FALSE; | |
310 | } |
|
697 | } | |
311 | }; |
|
698 | }; | |
312 |
|
699 | |||
313 |
static PyObject *dirstate_item_get_ |
|
700 | static PyObject *dirstate_item_get_maybe_clean(dirstateItemObject *self) | |
314 | { |
|
701 | { | |
315 | if (self->state == 'r' && self->size == dirstate_v1_from_p2) { |
|
702 | if (!(self->flags & dirstate_flag_wc_tracked)) { | |
|
703 | Py_RETURN_FALSE; | |||
|
704 | } else if (!(self->flags & dirstate_flag_p1_tracked)) { | |||
|
705 | Py_RETURN_FALSE; | |||
|
706 | } else if (self->flags & dirstate_flag_p2_info) { | |||
|
707 | Py_RETURN_FALSE; | |||
|
708 | } else { | |||
|
709 | Py_RETURN_TRUE; | |||
|
710 | } | |||
|
711 | }; | |||
|
712 | ||||
|
713 | static PyObject *dirstate_item_get_any_tracked(dirstateItemObject *self) | |||
|
714 | { | |||
|
715 | if (dirstate_item_c_any_tracked(self)) { | |||
316 | Py_RETURN_TRUE; |
|
716 | Py_RETURN_TRUE; | |
317 | } else { |
|
717 | } else { | |
318 | Py_RETURN_FALSE; |
|
718 | Py_RETURN_FALSE; | |
@@ -321,7 +721,7 b' static PyObject *dirstate_item_get_from_' | |||||
321 |
|
721 | |||
322 | static PyObject *dirstate_item_get_removed(dirstateItemObject *self) |
|
722 | static PyObject *dirstate_item_get_removed(dirstateItemObject *self) | |
323 | { |
|
723 | { | |
324 | if (self->state == 'r') { |
|
724 | if (dirstate_item_c_removed(self)) { | |
325 | Py_RETURN_TRUE; |
|
725 | Py_RETURN_TRUE; | |
326 | } else { |
|
726 | } else { | |
327 | Py_RETURN_FALSE; |
|
727 | Py_RETURN_FALSE; | |
@@ -333,14 +733,25 b' static PyGetSetDef dirstate_item_getset[' | |||||
333 | {"size", (getter)dirstate_item_get_size, NULL, "size", NULL}, |
|
733 | {"size", (getter)dirstate_item_get_size, NULL, "size", NULL}, | |
334 | {"mtime", (getter)dirstate_item_get_mtime, NULL, "mtime", NULL}, |
|
734 | {"mtime", (getter)dirstate_item_get_mtime, NULL, "mtime", NULL}, | |
335 | {"state", (getter)dirstate_item_get_state, NULL, "state", NULL}, |
|
735 | {"state", (getter)dirstate_item_get_state, NULL, "state", NULL}, | |
|
736 | {"has_fallback_exec", (getter)dirstate_item_get_has_fallback_exec, NULL, | |||
|
737 | "has_fallback_exec", NULL}, | |||
|
738 | {"fallback_exec", (getter)dirstate_item_get_fallback_exec, | |||
|
739 | (setter)dirstate_item_set_fallback_exec, "fallback_exec", NULL}, | |||
|
740 | {"has_fallback_symlink", (getter)dirstate_item_get_has_fallback_symlink, | |||
|
741 | NULL, "has_fallback_symlink", NULL}, | |||
|
742 | {"fallback_symlink", (getter)dirstate_item_get_fallback_symlink, | |||
|
743 | (setter)dirstate_item_set_fallback_symlink, "fallback_symlink", NULL}, | |||
336 | {"tracked", (getter)dirstate_item_get_tracked, NULL, "tracked", NULL}, |
|
744 | {"tracked", (getter)dirstate_item_get_tracked, NULL, "tracked", NULL}, | |
|
745 | {"p1_tracked", (getter)dirstate_item_get_p1_tracked, NULL, "p1_tracked", | |||
|
746 | NULL}, | |||
337 | {"added", (getter)dirstate_item_get_added, NULL, "added", NULL}, |
|
747 | {"added", (getter)dirstate_item_get_added, NULL, "added", NULL}, | |
338 |
{" |
|
748 | {"p2_info", (getter)dirstate_item_get_p2_info, NULL, "p2_info", NULL}, | |
339 | "merged_removed", NULL}, |
|
|||
340 | {"merged", (getter)dirstate_item_get_merged, NULL, "merged", NULL}, |
|
749 | {"merged", (getter)dirstate_item_get_merged, NULL, "merged", NULL}, | |
341 | {"from_p2_removed", (getter)dirstate_item_get_from_p2_removed, NULL, |
|
|||
342 | "from_p2_removed", NULL}, |
|
|||
343 | {"from_p2", (getter)dirstate_item_get_from_p2, NULL, "from_p2", NULL}, |
|
750 | {"from_p2", (getter)dirstate_item_get_from_p2, NULL, "from_p2", NULL}, | |
|
751 | {"maybe_clean", (getter)dirstate_item_get_maybe_clean, NULL, "maybe_clean", | |||
|
752 | NULL}, | |||
|
753 | {"any_tracked", (getter)dirstate_item_get_any_tracked, NULL, "any_tracked", | |||
|
754 | NULL}, | |||
344 | {"removed", (getter)dirstate_item_get_removed, NULL, "removed", NULL}, |
|
755 | {"removed", (getter)dirstate_item_get_removed, NULL, "removed", NULL}, | |
345 | {NULL} /* Sentinel */ |
|
756 | {NULL} /* Sentinel */ | |
346 | }; |
|
757 | }; | |
@@ -357,7 +768,7 b' PyTypeObject dirstateItemType = {' | |||||
357 | 0, /* tp_compare */ |
|
768 | 0, /* tp_compare */ | |
358 | 0, /* tp_repr */ |
|
769 | 0, /* tp_repr */ | |
359 | 0, /* tp_as_number */ |
|
770 | 0, /* tp_as_number */ | |
360 |
|
|
771 | 0, /* tp_as_sequence */ | |
361 | 0, /* tp_as_mapping */ |
|
772 | 0, /* tp_as_mapping */ | |
362 | 0, /* tp_hash */ |
|
773 | 0, /* tp_hash */ | |
363 | 0, /* tp_call */ |
|
774 | 0, /* tp_call */ | |
@@ -441,6 +852,8 b' static PyObject *parse_dirstate(PyObject' | |||||
441 |
|
852 | |||
442 | entry = (PyObject *)dirstate_item_from_v1_data(state, mode, |
|
853 | entry = (PyObject *)dirstate_item_from_v1_data(state, mode, | |
443 | size, mtime); |
|
854 | size, mtime); | |
|
855 | if (!entry) | |||
|
856 | goto quit; | |||
444 | cpos = memchr(cur, 0, flen); |
|
857 | cpos = memchr(cur, 0, flen); | |
445 | if (cpos) { |
|
858 | if (cpos) { | |
446 | fname = PyBytes_FromStringAndSize(cur, cpos - cur); |
|
859 | fname = PyBytes_FromStringAndSize(cur, cpos - cur); | |
@@ -476,68 +889,6 b' quit:' | |||||
476 | } |
|
889 | } | |
477 |
|
890 | |||
478 | /* |
|
891 | /* | |
479 | * Build a set of non-normal and other parent entries from the dirstate dmap |
|
|||
480 | */ |
|
|||
481 | static PyObject *nonnormalotherparententries(PyObject *self, PyObject *args) |
|
|||
482 | { |
|
|||
483 | PyObject *dmap, *fname, *v; |
|
|||
484 | PyObject *nonnset = NULL, *otherpset = NULL, *result = NULL; |
|
|||
485 | Py_ssize_t pos; |
|
|||
486 |
|
||||
487 | if (!PyArg_ParseTuple(args, "O!:nonnormalentries", &PyDict_Type, |
|
|||
488 | &dmap)) { |
|
|||
489 | goto bail; |
|
|||
490 | } |
|
|||
491 |
|
||||
492 | nonnset = PySet_New(NULL); |
|
|||
493 | if (nonnset == NULL) { |
|
|||
494 | goto bail; |
|
|||
495 | } |
|
|||
496 |
|
||||
497 | otherpset = PySet_New(NULL); |
|
|||
498 | if (otherpset == NULL) { |
|
|||
499 | goto bail; |
|
|||
500 | } |
|
|||
501 |
|
||||
502 | pos = 0; |
|
|||
503 | while (PyDict_Next(dmap, &pos, &fname, &v)) { |
|
|||
504 | dirstateItemObject *t; |
|
|||
505 | if (!dirstate_tuple_check(v)) { |
|
|||
506 | PyErr_SetString(PyExc_TypeError, |
|
|||
507 | "expected a dirstate tuple"); |
|
|||
508 | goto bail; |
|
|||
509 | } |
|
|||
510 | t = (dirstateItemObject *)v; |
|
|||
511 |
|
||||
512 | if (t->state == 'n' && t->size == -2) { |
|
|||
513 | if (PySet_Add(otherpset, fname) == -1) { |
|
|||
514 | goto bail; |
|
|||
515 | } |
|
|||
516 | } |
|
|||
517 |
|
||||
518 | if (t->state == 'n' && t->mtime != -1) { |
|
|||
519 | continue; |
|
|||
520 | } |
|
|||
521 | if (PySet_Add(nonnset, fname) == -1) { |
|
|||
522 | goto bail; |
|
|||
523 | } |
|
|||
524 | } |
|
|||
525 |
|
||||
526 | result = Py_BuildValue("(OO)", nonnset, otherpset); |
|
|||
527 | if (result == NULL) { |
|
|||
528 | goto bail; |
|
|||
529 | } |
|
|||
530 | Py_DECREF(nonnset); |
|
|||
531 | Py_DECREF(otherpset); |
|
|||
532 | return result; |
|
|||
533 | bail: |
|
|||
534 | Py_XDECREF(nonnset); |
|
|||
535 | Py_XDECREF(otherpset); |
|
|||
536 | Py_XDECREF(result); |
|
|||
537 | return NULL; |
|
|||
538 | } |
|
|||
539 |
|
||||
540 | /* |
|
|||
541 | * Efficiently pack a dirstate object into its on-disk format. |
|
892 | * Efficiently pack a dirstate object into its on-disk format. | |
542 | */ |
|
893 | */ | |
543 | static PyObject *pack_dirstate(PyObject *self, PyObject *args) |
|
894 | static PyObject *pack_dirstate(PyObject *self, PyObject *args) | |
@@ -547,11 +898,12 b' static PyObject *pack_dirstate(PyObject ' | |||||
547 | Py_ssize_t nbytes, pos, l; |
|
898 | Py_ssize_t nbytes, pos, l; | |
548 | PyObject *k, *v = NULL, *pn; |
|
899 | PyObject *k, *v = NULL, *pn; | |
549 | char *p, *s; |
|
900 | char *p, *s; | |
550 | int now; |
|
901 | int now_s; | |
|
902 | int now_ns; | |||
551 |
|
903 | |||
552 |
if (!PyArg_ParseTuple(args, "O!O!O!i:pack_dirstate", &PyDict_Type, |
|
904 | if (!PyArg_ParseTuple(args, "O!O!O!(ii):pack_dirstate", &PyDict_Type, | |
553 | &PyDict_Type, ©map, &PyTuple_Type, &pl, |
|
905 | &map, &PyDict_Type, ©map, &PyTuple_Type, &pl, | |
554 | &now)) { |
|
906 | &now_s, &now_ns)) { | |
555 | return NULL; |
|
907 | return NULL; | |
556 | } |
|
908 | } | |
557 |
|
909 | |||
@@ -616,15 +968,15 b' static PyObject *pack_dirstate(PyObject ' | |||||
616 | } |
|
968 | } | |
617 | tuple = (dirstateItemObject *)v; |
|
969 | tuple = (dirstateItemObject *)v; | |
618 |
|
970 | |||
619 | state = tuple->state; |
|
971 | state = dirstate_item_c_v1_state(tuple); | |
620 | mode = tuple->mode; |
|
972 | mode = dirstate_item_c_v1_mode(tuple); | |
621 | size = tuple->size; |
|
973 | size = dirstate_item_c_v1_size(tuple); | |
622 | mtime = tuple->mtime; |
|
974 | mtime = dirstate_item_c_v1_mtime(tuple); | |
623 | if (state == 'n' && mtime == now) { |
|
975 | if (state == 'n' && tuple->mtime_s == now_s) { | |
624 | /* See pure/parsers.py:pack_dirstate for why we do |
|
976 | /* See pure/parsers.py:pack_dirstate for why we do | |
625 | * this. */ |
|
977 | * this. */ | |
626 | mtime = -1; |
|
978 | mtime = -1; | |
627 |
mtime_unset = (PyObject *) |
|
979 | mtime_unset = (PyObject *)dirstate_item_from_v1_data( | |
628 | state, mode, size, mtime); |
|
980 | state, mode, size, mtime); | |
629 | if (!mtime_unset) { |
|
981 | if (!mtime_unset) { | |
630 | goto bail; |
|
982 | goto bail; | |
@@ -869,9 +1221,6 b' PyObject *parse_index2(PyObject *self, P' | |||||
869 |
|
1221 | |||
870 | static PyMethodDef methods[] = { |
|
1222 | static PyMethodDef methods[] = { | |
871 | {"pack_dirstate", pack_dirstate, METH_VARARGS, "pack a dirstate\n"}, |
|
1223 | {"pack_dirstate", pack_dirstate, METH_VARARGS, "pack a dirstate\n"}, | |
872 | {"nonnormalotherparententries", nonnormalotherparententries, METH_VARARGS, |
|
|||
873 | "create a set containing non-normal and other parent entries of given " |
|
|||
874 | "dirstate\n"}, |
|
|||
875 | {"parse_dirstate", parse_dirstate, METH_VARARGS, "parse a dirstate\n"}, |
|
1224 | {"parse_dirstate", parse_dirstate, METH_VARARGS, "parse a dirstate\n"}, | |
876 | {"parse_index2", (PyCFunction)parse_index2, METH_VARARGS | METH_KEYWORDS, |
|
1225 | {"parse_index2", (PyCFunction)parse_index2, METH_VARARGS | METH_KEYWORDS, | |
877 | "parse a revlog index\n"}, |
|
1226 | "parse a revlog index\n"}, | |
@@ -899,7 +1248,6 b' static const int version = 20;' | |||||
899 |
|
1248 | |||
900 | static void module_init(PyObject *mod) |
|
1249 | static void module_init(PyObject *mod) | |
901 | { |
|
1250 | { | |
902 | PyObject *capsule = NULL; |
|
|||
903 | PyModule_AddIntConstant(mod, "version", version); |
|
1251 | PyModule_AddIntConstant(mod, "version", version); | |
904 |
|
1252 | |||
905 | /* This module constant has two purposes. First, it lets us unit test |
|
1253 | /* This module constant has two purposes. First, it lets us unit test | |
@@ -916,12 +1264,6 b' static void module_init(PyObject *mod)' | |||||
916 | manifest_module_init(mod); |
|
1264 | manifest_module_init(mod); | |
917 | revlog_module_init(mod); |
|
1265 | revlog_module_init(mod); | |
918 |
|
1266 | |||
919 | capsule = PyCapsule_New( |
|
|||
920 | make_dirstate_item, |
|
|||
921 | "mercurial.cext.parsers.make_dirstate_item_CAPI", NULL); |
|
|||
922 | if (capsule != NULL) |
|
|||
923 | PyModule_AddObject(mod, "make_dirstate_item_CAPI", capsule); |
|
|||
924 |
|
||||
925 | if (PyType_Ready(&dirstateItemType) < 0) { |
|
1267 | if (PyType_Ready(&dirstateItemType) < 0) { | |
926 | return; |
|
1268 | return; | |
927 | } |
|
1269 | } |
@@ -24,13 +24,31 b'' | |||||
24 | /* clang-format off */ |
|
24 | /* clang-format off */ | |
25 | typedef struct { |
|
25 | typedef struct { | |
26 | PyObject_HEAD |
|
26 | PyObject_HEAD | |
27 | char state; |
|
27 | int flags; | |
28 | int mode; |
|
28 | int mode; | |
29 | int size; |
|
29 | int size; | |
30 | int mtime; |
|
30 | int mtime_s; | |
|
31 | int mtime_ns; | |||
31 | } dirstateItemObject; |
|
32 | } dirstateItemObject; | |
32 | /* clang-format on */ |
|
33 | /* clang-format on */ | |
33 |
|
34 | |||
|
35 | static const int dirstate_flag_wc_tracked = 1 << 0; | |||
|
36 | static const int dirstate_flag_p1_tracked = 1 << 1; | |||
|
37 | static const int dirstate_flag_p2_info = 1 << 2; | |||
|
38 | static const int dirstate_flag_mode_exec_perm = 1 << 3; | |||
|
39 | static const int dirstate_flag_mode_is_symlink = 1 << 4; | |||
|
40 | static const int dirstate_flag_has_fallback_exec = 1 << 5; | |||
|
41 | static const int dirstate_flag_fallback_exec = 1 << 6; | |||
|
42 | static const int dirstate_flag_has_fallback_symlink = 1 << 7; | |||
|
43 | static const int dirstate_flag_fallback_symlink = 1 << 8; | |||
|
44 | static const int dirstate_flag_expected_state_is_modified = 1 << 9; | |||
|
45 | static const int dirstate_flag_has_meaningful_data = 1 << 10; | |||
|
46 | static const int dirstate_flag_has_mtime = 1 << 11; | |||
|
47 | static const int dirstate_flag_mtime_second_ambiguous = 1 << 12; | |||
|
48 | static const int dirstate_flag_directory = 1 << 13; | |||
|
49 | static const int dirstate_flag_all_unknown_recorded = 1 << 14; | |||
|
50 | static const int dirstate_flag_all_ignored_recorded = 1 << 15; | |||
|
51 | ||||
34 | extern PyTypeObject dirstateItemType; |
|
52 | extern PyTypeObject dirstateItemType; | |
35 | #define dirstate_tuple_check(op) (Py_TYPE(op) == &dirstateItemType) |
|
53 | #define dirstate_tuple_check(op) (Py_TYPE(op) == &dirstateItemType) | |
36 |
|
54 |
@@ -626,7 +626,7 b' def dorecord(' | |||||
626 | for realname, tmpname in pycompat.iteritems(backups): |
|
626 | for realname, tmpname in pycompat.iteritems(backups): | |
627 | ui.debug(b'restoring %r to %r\n' % (tmpname, realname)) |
|
627 | ui.debug(b'restoring %r to %r\n' % (tmpname, realname)) | |
628 |
|
628 | |||
629 |
if dirstate |
|
629 | if dirstate.get_entry(realname).maybe_clean: | |
630 | # without normallookup, restoring timestamp |
|
630 | # without normallookup, restoring timestamp | |
631 | # may cause partially committed files |
|
631 | # may cause partially committed files | |
632 | # to be treated as unmodified |
|
632 | # to be treated as unmodified | |
@@ -987,7 +987,7 b' def changebranch(ui, repo, revs, label, ' | |||||
987 | with repo.wlock(), repo.lock(), repo.transaction(b'branches'): |
|
987 | with repo.wlock(), repo.lock(), repo.transaction(b'branches'): | |
988 | # abort in case of uncommitted merge or dirty wdir |
|
988 | # abort in case of uncommitted merge or dirty wdir | |
989 | bailifchanged(repo) |
|
989 | bailifchanged(repo) | |
990 |
revs = |
|
990 | revs = logcmdutil.revrange(repo, revs) | |
991 | if not revs: |
|
991 | if not revs: | |
992 | raise error.InputError(b"empty revision set") |
|
992 | raise error.InputError(b"empty revision set") | |
993 | roots = repo.revs(b'roots(%ld)', revs) |
|
993 | roots = repo.revs(b'roots(%ld)', revs) | |
@@ -1480,7 +1480,7 b' def copy(ui, repo, pats, opts, rename=Fa' | |||||
1480 | # TODO: Remove this restriction and make it also create the copy |
|
1480 | # TODO: Remove this restriction and make it also create the copy | |
1481 | # targets (and remove the rename source if rename==True). |
|
1481 | # targets (and remove the rename source if rename==True). | |
1482 | raise error.InputError(_(b'--at-rev requires --after')) |
|
1482 | raise error.InputError(_(b'--at-rev requires --after')) | |
1483 |
ctx = |
|
1483 | ctx = logcmdutil.revsingle(repo, rev) | |
1484 | if len(ctx.parents()) > 1: |
|
1484 | if len(ctx.parents()) > 1: | |
1485 | raise error.InputError( |
|
1485 | raise error.InputError( | |
1486 | _(b'cannot mark/unmark copy in merge commit') |
|
1486 | _(b'cannot mark/unmark copy in merge commit') | |
@@ -1642,7 +1642,9 b' def copy(ui, repo, pats, opts, rename=Fa' | |||||
1642 | reltarget = repo.pathto(abstarget, cwd) |
|
1642 | reltarget = repo.pathto(abstarget, cwd) | |
1643 | target = repo.wjoin(abstarget) |
|
1643 | target = repo.wjoin(abstarget) | |
1644 | src = repo.wjoin(abssrc) |
|
1644 | src = repo.wjoin(abssrc) | |
1645 |
|
|
1645 | entry = repo.dirstate.get_entry(abstarget) | |
|
1646 | ||||
|
1647 | already_commited = entry.tracked and not entry.added | |||
1646 |
|
1648 | |||
1647 | scmutil.checkportable(ui, abstarget) |
|
1649 | scmutil.checkportable(ui, abstarget) | |
1648 |
|
1650 | |||
@@ -1672,30 +1674,48 b' def copy(ui, repo, pats, opts, rename=Fa' | |||||
1672 | exists = False |
|
1674 | exists = False | |
1673 | samefile = True |
|
1675 | samefile = True | |
1674 |
|
1676 | |||
1675 |
if not after and exists or after and |
|
1677 | if not after and exists or after and already_commited: | |
1676 | if not opts[b'force']: |
|
1678 | if not opts[b'force']: | |
1677 |
if |
|
1679 | if already_commited: | |
1678 | msg = _(b'%s: not overwriting - file already committed\n') |
|
1680 | msg = _(b'%s: not overwriting - file already committed\n') | |
1679 | if after: |
|
1681 | # Check if if the target was added in the parent and the | |
1680 | flags = b'--after --force' |
|
1682 | # source already existed in the grandparent. | |
|
1683 | looks_like_copy_in_pctx = abstarget in pctx and any( | |||
|
1684 | abssrc in gpctx and abstarget not in gpctx | |||
|
1685 | for gpctx in pctx.parents() | |||
|
1686 | ) | |||
|
1687 | if looks_like_copy_in_pctx: | |||
|
1688 | if rename: | |||
|
1689 | hint = _( | |||
|
1690 | b"('hg rename --at-rev .' to record the rename " | |||
|
1691 | b"in the parent of the working copy)\n" | |||
|
1692 | ) | |||
|
1693 | else: | |||
|
1694 | hint = _( | |||
|
1695 | b"('hg copy --at-rev .' to record the copy in " | |||
|
1696 | b"the parent of the working copy)\n" | |||
|
1697 | ) | |||
1681 | else: |
|
1698 | else: | |
1682 |
f |
|
1699 | if after: | |
1683 | if rename: |
|
1700 | flags = b'--after --force' | |
1684 |
|
|
1701 | else: | |
1685 |
|
|
1702 | flags = b'--force' | |
1686 | b"('hg rename %s' to replace the file by " |
|
1703 | if rename: | |
1687 |
|
|
1704 | hint = ( | |
|
1705 | _( | |||
|
1706 | b"('hg rename %s' to replace the file by " | |||
|
1707 | b'recording a rename)\n' | |||
|
1708 | ) | |||
|
1709 | % flags | |||
1688 | ) |
|
1710 | ) | |
1689 |
|
|
1711 | else: | |
1690 |
|
|
1712 | hint = ( | |
1691 |
|
|
1713 | _( | |
1692 | hint = ( |
|
1714 | b"('hg copy %s' to replace the file by " | |
1693 |
|
|
1715 | b'recording a copy)\n' | |
1694 |
|
|
1716 | ) | |
1695 |
|
|
1717 | % flags | |
1696 | ) |
|
1718 | ) | |
1697 | % flags |
|
|||
1698 | ) |
|
|||
1699 | else: |
|
1719 | else: | |
1700 | msg = _(b'%s: not overwriting - file exists\n') |
|
1720 | msg = _(b'%s: not overwriting - file exists\n') | |
1701 | if rename: |
|
1721 | if rename: | |
@@ -3350,7 +3370,11 b' def revert(ui, repo, ctx, *pats, **opts)' | |||||
3350 | for f in localchanges: |
|
3370 | for f in localchanges: | |
3351 | src = repo.dirstate.copied(f) |
|
3371 | src = repo.dirstate.copied(f) | |
3352 | # XXX should we check for rename down to target node? |
|
3372 | # XXX should we check for rename down to target node? | |
3353 | if src and src not in names and repo.dirstate[src] == b'r': |
|
3373 | if ( | |
|
3374 | src | |||
|
3375 | and src not in names | |||
|
3376 | and repo.dirstate.get_entry(src).removed | |||
|
3377 | ): | |||
3354 | dsremoved.add(src) |
|
3378 | dsremoved.add(src) | |
3355 | names[src] = True |
|
3379 | names[src] = True | |
3356 |
|
3380 | |||
@@ -3364,12 +3388,12 b' def revert(ui, repo, ctx, *pats, **opts)' | |||||
3364 | # distinguish between file to forget and the other |
|
3388 | # distinguish between file to forget and the other | |
3365 | added = set() |
|
3389 | added = set() | |
3366 | for abs in dsadded: |
|
3390 | for abs in dsadded: | |
3367 |
if repo.dirstate |
|
3391 | if not repo.dirstate.get_entry(abs).added: | |
3368 | added.add(abs) |
|
3392 | added.add(abs) | |
3369 | dsadded -= added |
|
3393 | dsadded -= added | |
3370 |
|
3394 | |||
3371 | for abs in deladded: |
|
3395 | for abs in deladded: | |
3372 |
if repo.dirstate |
|
3396 | if repo.dirstate.get_entry(abs).added: | |
3373 | dsadded.add(abs) |
|
3397 | dsadded.add(abs) | |
3374 | deladded -= dsadded |
|
3398 | deladded -= dsadded | |
3375 |
|
3399 |
@@ -445,7 +445,7 b' def annotate(ui, repo, *pats, **opts):' | |||||
445 | rev = opts.get(b'rev') |
|
445 | rev = opts.get(b'rev') | |
446 | if rev: |
|
446 | if rev: | |
447 | repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn') |
|
447 | repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn') | |
448 |
ctx = |
|
448 | ctx = logcmdutil.revsingle(repo, rev) | |
449 |
|
449 | |||
450 | ui.pager(b'annotate') |
|
450 | ui.pager(b'annotate') | |
451 | rootfm = ui.formatter(b'annotate', opts) |
|
451 | rootfm = ui.formatter(b'annotate', opts) | |
@@ -526,7 +526,7 b' def annotate(ui, repo, *pats, **opts):' | |||||
526 | ) |
|
526 | ) | |
527 |
|
527 | |||
528 | def bad(x, y): |
|
528 | def bad(x, y): | |
529 |
raise error. |
|
529 | raise error.InputError(b"%s: %s" % (x, y)) | |
530 |
|
530 | |||
531 | m = scmutil.match(ctx, pats, opts, badfn=bad) |
|
531 | m = scmutil.match(ctx, pats, opts, badfn=bad) | |
532 |
|
532 | |||
@@ -536,7 +536,7 b' def annotate(ui, repo, *pats, **opts):' | |||||
536 | ) |
|
536 | ) | |
537 | skiprevs = opts.get(b'skip') |
|
537 | skiprevs = opts.get(b'skip') | |
538 | if skiprevs: |
|
538 | if skiprevs: | |
539 |
skiprevs = |
|
539 | skiprevs = logcmdutil.revrange(repo, skiprevs) | |
540 |
|
540 | |||
541 | uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True) |
|
541 | uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True) | |
542 | for abs in ctx.walk(m): |
|
542 | for abs in ctx.walk(m): | |
@@ -649,7 +649,7 b' def archive(ui, repo, dest, **opts):' | |||||
649 | rev = opts.get(b'rev') |
|
649 | rev = opts.get(b'rev') | |
650 | if rev: |
|
650 | if rev: | |
651 | repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn') |
|
651 | repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn') | |
652 |
ctx = |
|
652 | ctx = logcmdutil.revsingle(repo, rev) | |
653 | if not ctx: |
|
653 | if not ctx: | |
654 | raise error.InputError( |
|
654 | raise error.InputError( | |
655 | _(b'no working directory: please specify a revision') |
|
655 | _(b'no working directory: please specify a revision') | |
@@ -791,7 +791,7 b' def _dobackout(ui, repo, node=None, rev=' | |||||
791 |
|
791 | |||
792 | cmdutil.checkunfinished(repo) |
|
792 | cmdutil.checkunfinished(repo) | |
793 | cmdutil.bailifchanged(repo) |
|
793 | cmdutil.bailifchanged(repo) | |
794 |
ctx = |
|
794 | ctx = logcmdutil.revsingle(repo, rev) | |
795 | node = ctx.node() |
|
795 | node = ctx.node() | |
796 |
|
796 | |||
797 | op1, op2 = repo.dirstate.parents() |
|
797 | op1, op2 = repo.dirstate.parents() | |
@@ -1037,7 +1037,7 b' def bisect(' | |||||
1037 | state = hbisect.load_state(repo) |
|
1037 | state = hbisect.load_state(repo) | |
1038 |
|
1038 | |||
1039 | if rev: |
|
1039 | if rev: | |
1040 |
nodes = [repo[i].node() for i in |
|
1040 | nodes = [repo[i].node() for i in logcmdutil.revrange(repo, rev)] | |
1041 | else: |
|
1041 | else: | |
1042 | nodes = [repo.lookup(b'.')] |
|
1042 | nodes = [repo.lookup(b'.')] | |
1043 |
|
1043 | |||
@@ -1081,7 +1081,7 b' def bisect(' | |||||
1081 | raise error.StateError(_(b'current bisect revision is a merge')) |
|
1081 | raise error.StateError(_(b'current bisect revision is a merge')) | |
1082 | if rev: |
|
1082 | if rev: | |
1083 | if not nodes: |
|
1083 | if not nodes: | |
1084 |
raise error. |
|
1084 | raise error.InputError(_(b'empty revision set')) | |
1085 | node = repo[nodes[-1]].node() |
|
1085 | node = repo[nodes[-1]].node() | |
1086 | with hbisect.restore_state(repo, state, node): |
|
1086 | with hbisect.restore_state(repo, state, node): | |
1087 | while changesets: |
|
1087 | while changesets: | |
@@ -1424,7 +1424,7 b' def branches(ui, repo, active=False, clo' | |||||
1424 | revs = opts.get(b'rev') |
|
1424 | revs = opts.get(b'rev') | |
1425 | selectedbranches = None |
|
1425 | selectedbranches = None | |
1426 | if revs: |
|
1426 | if revs: | |
1427 |
revs = |
|
1427 | revs = logcmdutil.revrange(repo, revs) | |
1428 | getbi = repo.revbranchcache().branchinfo |
|
1428 | getbi = repo.revbranchcache().branchinfo | |
1429 | selectedbranches = {getbi(r)[0] for r in revs} |
|
1429 | selectedbranches = {getbi(r)[0] for r in revs} | |
1430 |
|
1430 | |||
@@ -1558,7 +1558,7 b' def bundle(ui, repo, fname, *dests, **op' | |||||
1558 | revs = None |
|
1558 | revs = None | |
1559 | if b'rev' in opts: |
|
1559 | if b'rev' in opts: | |
1560 | revstrings = opts[b'rev'] |
|
1560 | revstrings = opts[b'rev'] | |
1561 |
revs = |
|
1561 | revs = logcmdutil.revrange(repo, revstrings) | |
1562 | if revstrings and not revs: |
|
1562 | if revstrings and not revs: | |
1563 | raise error.InputError(_(b'no commits to bundle')) |
|
1563 | raise error.InputError(_(b'no commits to bundle')) | |
1564 |
|
1564 | |||
@@ -1590,7 +1590,7 b' def bundle(ui, repo, fname, *dests, **op' | |||||
1590 | ui.warn(_(b"ignoring --base because --all was specified\n")) |
|
1590 | ui.warn(_(b"ignoring --base because --all was specified\n")) | |
1591 | base = [nullrev] |
|
1591 | base = [nullrev] | |
1592 | else: |
|
1592 | else: | |
1593 |
base = |
|
1593 | base = logcmdutil.revrange(repo, opts.get(b'base')) | |
1594 | if cgversion not in changegroup.supportedoutgoingversions(repo): |
|
1594 | if cgversion not in changegroup.supportedoutgoingversions(repo): | |
1595 | raise error.Abort( |
|
1595 | raise error.Abort( | |
1596 | _(b"repository does not support bundle version %s") % cgversion |
|
1596 | _(b"repository does not support bundle version %s") % cgversion | |
@@ -1761,7 +1761,7 b' def cat(ui, repo, file1, *pats, **opts):' | |||||
1761 | rev = opts.get(b'rev') |
|
1761 | rev = opts.get(b'rev') | |
1762 | if rev: |
|
1762 | if rev: | |
1763 | repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn') |
|
1763 | repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn') | |
1764 |
ctx = |
|
1764 | ctx = logcmdutil.revsingle(repo, rev) | |
1765 | m = scmutil.match(ctx, (file1,) + pats, opts) |
|
1765 | m = scmutil.match(ctx, (file1,) + pats, opts) | |
1766 | fntemplate = opts.pop(b'output', b'') |
|
1766 | fntemplate = opts.pop(b'output', b'') | |
1767 | if cmdutil.isstdiofilename(fntemplate): |
|
1767 | if cmdutil.isstdiofilename(fntemplate): | |
@@ -2600,17 +2600,17 b' def diff(ui, repo, *pats, **opts):' | |||||
2600 | cmdutil.check_incompatible_arguments(opts, b'to', [b'rev', b'change']) |
|
2600 | cmdutil.check_incompatible_arguments(opts, b'to', [b'rev', b'change']) | |
2601 | if change: |
|
2601 | if change: | |
2602 | repo = scmutil.unhidehashlikerevs(repo, [change], b'nowarn') |
|
2602 | repo = scmutil.unhidehashlikerevs(repo, [change], b'nowarn') | |
2603 |
ctx2 = |
|
2603 | ctx2 = logcmdutil.revsingle(repo, change, None) | |
2604 | ctx1 = logcmdutil.diff_parent(ctx2) |
|
2604 | ctx1 = logcmdutil.diff_parent(ctx2) | |
2605 | elif from_rev or to_rev: |
|
2605 | elif from_rev or to_rev: | |
2606 | repo = scmutil.unhidehashlikerevs( |
|
2606 | repo = scmutil.unhidehashlikerevs( | |
2607 | repo, [from_rev] + [to_rev], b'nowarn' |
|
2607 | repo, [from_rev] + [to_rev], b'nowarn' | |
2608 | ) |
|
2608 | ) | |
2609 |
ctx1 = |
|
2609 | ctx1 = logcmdutil.revsingle(repo, from_rev, None) | |
2610 |
ctx2 = |
|
2610 | ctx2 = logcmdutil.revsingle(repo, to_rev, None) | |
2611 | else: |
|
2611 | else: | |
2612 | repo = scmutil.unhidehashlikerevs(repo, revs, b'nowarn') |
|
2612 | repo = scmutil.unhidehashlikerevs(repo, revs, b'nowarn') | |
2613 |
ctx1, ctx2 = |
|
2613 | ctx1, ctx2 = logcmdutil.revpair(repo, revs) | |
2614 |
|
2614 | |||
2615 | if reverse: |
|
2615 | if reverse: | |
2616 | ctxleft = ctx2 |
|
2616 | ctxleft = ctx2 | |
@@ -2753,7 +2753,7 b' def export(ui, repo, *changesets, **opts' | |||||
2753 | changesets = [b'.'] |
|
2753 | changesets = [b'.'] | |
2754 |
|
2754 | |||
2755 | repo = scmutil.unhidehashlikerevs(repo, changesets, b'nowarn') |
|
2755 | repo = scmutil.unhidehashlikerevs(repo, changesets, b'nowarn') | |
2756 |
revs = |
|
2756 | revs = logcmdutil.revrange(repo, changesets) | |
2757 |
|
2757 | |||
2758 | if not revs: |
|
2758 | if not revs: | |
2759 | raise error.InputError(_(b"export requires at least one changeset")) |
|
2759 | raise error.InputError(_(b"export requires at least one changeset")) | |
@@ -2864,7 +2864,7 b' def files(ui, repo, *pats, **opts):' | |||||
2864 | rev = opts.get(b'rev') |
|
2864 | rev = opts.get(b'rev') | |
2865 | if rev: |
|
2865 | if rev: | |
2866 | repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn') |
|
2866 | repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn') | |
2867 |
ctx = |
|
2867 | ctx = logcmdutil.revsingle(repo, rev, None) | |
2868 |
|
2868 | |||
2869 | end = b'\n' |
|
2869 | end = b'\n' | |
2870 | if opts.get(b'print0'): |
|
2870 | if opts.get(b'print0'): | |
@@ -3170,12 +3170,12 b' def _dograft(ui, repo, *revs, **opts):' | |||||
3170 | raise error.InputError(_(b'no revisions specified')) |
|
3170 | raise error.InputError(_(b'no revisions specified')) | |
3171 | cmdutil.checkunfinished(repo) |
|
3171 | cmdutil.checkunfinished(repo) | |
3172 | cmdutil.bailifchanged(repo) |
|
3172 | cmdutil.bailifchanged(repo) | |
3173 |
revs = |
|
3173 | revs = logcmdutil.revrange(repo, revs) | |
3174 |
|
3174 | |||
3175 | skipped = set() |
|
3175 | skipped = set() | |
3176 | basectx = None |
|
3176 | basectx = None | |
3177 | if opts.get('base'): |
|
3177 | if opts.get('base'): | |
3178 |
basectx = |
|
3178 | basectx = logcmdutil.revsingle(repo, opts['base'], None) | |
3179 | if basectx is None: |
|
3179 | if basectx is None: | |
3180 | # check for merges |
|
3180 | # check for merges | |
3181 | for rev in repo.revs(b'%ld and merge()', revs): |
|
3181 | for rev in repo.revs(b'%ld and merge()', revs): | |
@@ -3696,7 +3696,7 b' def heads(ui, repo, *branchrevs, **opts)' | |||||
3696 | rev = opts.get(b'rev') |
|
3696 | rev = opts.get(b'rev') | |
3697 | if rev: |
|
3697 | if rev: | |
3698 | repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn') |
|
3698 | repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn') | |
3699 |
start = |
|
3699 | start = logcmdutil.revsingle(repo, rev, None).node() | |
3700 |
|
3700 | |||
3701 | if opts.get(b'topo'): |
|
3701 | if opts.get(b'topo'): | |
3702 | heads = [repo[h] for h in repo.heads(start)] |
|
3702 | heads = [repo[h] for h in repo.heads(start)] | |
@@ -3708,7 +3708,7 b' def heads(ui, repo, *branchrevs, **opts)' | |||||
3708 |
|
3708 | |||
3709 | if branchrevs: |
|
3709 | if branchrevs: | |
3710 | branches = { |
|
3710 | branches = { | |
3711 |
repo[r].branch() for r in |
|
3711 | repo[r].branch() for r in logcmdutil.revrange(repo, branchrevs) | |
3712 | } |
|
3712 | } | |
3713 | heads = [h for h in heads if h.branch() in branches] |
|
3713 | heads = [h for h in heads if h.branch() in branches] | |
3714 |
|
3714 | |||
@@ -3932,7 +3932,7 b' def identify(' | |||||
3932 | else: |
|
3932 | else: | |
3933 | if rev: |
|
3933 | if rev: | |
3934 | repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn') |
|
3934 | repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn') | |
3935 |
ctx = |
|
3935 | ctx = logcmdutil.revsingle(repo, rev, None) | |
3936 |
|
3936 | |||
3937 | if ctx.rev() is None: |
|
3937 | if ctx.rev() is None: | |
3938 | ctx = repo[None] |
|
3938 | ctx = repo[None] | |
@@ -4346,8 +4346,11 b' def incoming(ui, repo, source=b"default"' | |||||
4346 | cmdutil.check_incompatible_arguments(opts, b'subrepos', [b'bundle']) |
|
4346 | cmdutil.check_incompatible_arguments(opts, b'subrepos', [b'bundle']) | |
4347 |
|
4347 | |||
4348 | if opts.get(b'bookmarks'): |
|
4348 | if opts.get(b'bookmarks'): | |
4349 |
srcs = urlutil.get_pull_paths(repo, ui, [source] |
|
4349 | srcs = urlutil.get_pull_paths(repo, ui, [source]) | |
4350 |
for |
|
4350 | for path in srcs: | |
|
4351 | source, branches = urlutil.parseurl( | |||
|
4352 | path.rawloc, opts.get(b'branch') | |||
|
4353 | ) | |||
4351 | other = hg.peer(repo, opts, source) |
|
4354 | other = hg.peer(repo, opts, source) | |
4352 | try: |
|
4355 | try: | |
4353 | if b'bookmarks' not in other.listkeys(b'namespaces'): |
|
4356 | if b'bookmarks' not in other.listkeys(b'namespaces'): | |
@@ -4357,7 +4360,9 b' def incoming(ui, repo, source=b"default"' | |||||
4357 | ui.status( |
|
4360 | ui.status( | |
4358 | _(b'comparing with %s\n') % urlutil.hidepassword(source) |
|
4361 | _(b'comparing with %s\n') % urlutil.hidepassword(source) | |
4359 | ) |
|
4362 | ) | |
4360 |
return bookmarks.incoming( |
|
4363 | return bookmarks.incoming( | |
|
4364 | ui, repo, other, mode=path.bookmarks_mode | |||
|
4365 | ) | |||
4361 | finally: |
|
4366 | finally: | |
4362 | other.close() |
|
4367 | other.close() | |
4363 |
|
4368 | |||
@@ -4445,7 +4450,7 b' def locate(ui, repo, *pats, **opts):' | |||||
4445 | end = b'\0' |
|
4450 | end = b'\0' | |
4446 | else: |
|
4451 | else: | |
4447 | end = b'\n' |
|
4452 | end = b'\n' | |
4448 |
ctx = |
|
4453 | ctx = logcmdutil.revsingle(repo, opts.get(b'rev'), None) | |
4449 |
|
4454 | |||
4450 | ret = 1 |
|
4455 | ret = 1 | |
4451 | m = scmutil.match( |
|
4456 | m = scmutil.match( | |
@@ -4790,7 +4795,7 b' def manifest(ui, repo, node=None, rev=No' | |||||
4790 | mode = {b'l': b'644', b'x': b'755', b'': b'644', b't': b'755'} |
|
4795 | mode = {b'l': b'644', b'x': b'755', b'': b'644', b't': b'755'} | |
4791 | if node: |
|
4796 | if node: | |
4792 | repo = scmutil.unhidehashlikerevs(repo, [node], b'nowarn') |
|
4797 | repo = scmutil.unhidehashlikerevs(repo, [node], b'nowarn') | |
4793 |
ctx = |
|
4798 | ctx = logcmdutil.revsingle(repo, node) | |
4794 | mf = ctx.manifest() |
|
4799 | mf = ctx.manifest() | |
4795 | ui.pager(b'manifest') |
|
4800 | ui.pager(b'manifest') | |
4796 | for f in ctx: |
|
4801 | for f in ctx: | |
@@ -4877,7 +4882,7 b' def merge(ui, repo, node=None, **opts):' | |||||
4877 | node = opts.get(b'rev') |
|
4882 | node = opts.get(b'rev') | |
4878 |
|
4883 | |||
4879 | if node: |
|
4884 | if node: | |
4880 |
ctx = |
|
4885 | ctx = logcmdutil.revsingle(repo, node) | |
4881 | else: |
|
4886 | else: | |
4882 | if ui.configbool(b'commands', b'merge.require-rev'): |
|
4887 | if ui.configbool(b'commands', b'merge.require-rev'): | |
4883 | raise error.InputError( |
|
4888 | raise error.InputError( | |
@@ -5056,7 +5061,7 b' def parents(ui, repo, file_=None, **opts' | |||||
5056 | rev = opts.get(b'rev') |
|
5061 | rev = opts.get(b'rev') | |
5057 | if rev: |
|
5062 | if rev: | |
5058 | repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn') |
|
5063 | repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn') | |
5059 |
ctx = |
|
5064 | ctx = logcmdutil.revsingle(repo, rev, None) | |
5060 |
|
5065 | |||
5061 | if file_: |
|
5066 | if file_: | |
5062 | m = scmutil.match(ctx, (file_,), opts) |
|
5067 | m = scmutil.match(ctx, (file_,), opts) | |
@@ -5219,13 +5224,13 b' def phase(ui, repo, *revs, **opts):' | |||||
5219 | # look for specified revision |
|
5224 | # look for specified revision | |
5220 | revs = list(revs) |
|
5225 | revs = list(revs) | |
5221 | revs.extend(opts[b'rev']) |
|
5226 | revs.extend(opts[b'rev']) | |
5222 |
if |
|
5227 | if revs: | |
|
5228 | revs = logcmdutil.revrange(repo, revs) | |||
|
5229 | else: | |||
5223 | # display both parents as the second parent phase can influence |
|
5230 | # display both parents as the second parent phase can influence | |
5224 | # the phase of a merge commit |
|
5231 | # the phase of a merge commit | |
5225 | revs = [c.rev() for c in repo[None].parents()] |
|
5232 | revs = [c.rev() for c in repo[None].parents()] | |
5226 |
|
5233 | |||
5227 | revs = scmutil.revrange(repo, revs) |
|
|||
5228 |
|
||||
5229 | ret = 0 |
|
5234 | ret = 0 | |
5230 | if targetphase is None: |
|
5235 | if targetphase is None: | |
5231 | # display |
|
5236 | # display | |
@@ -5393,8 +5398,8 b' def pull(ui, repo, *sources, **opts):' | |||||
5393 | hint = _(b'use hg pull followed by hg update DEST') |
|
5398 | hint = _(b'use hg pull followed by hg update DEST') | |
5394 | raise error.InputError(msg, hint=hint) |
|
5399 | raise error.InputError(msg, hint=hint) | |
5395 |
|
5400 | |||
5396 |
|
|
5401 | for path in urlutil.get_pull_paths(repo, ui, sources): | |
5397 | for source, branches in sources: |
|
5402 | source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch')) | |
5398 | ui.status(_(b'pulling from %s\n') % urlutil.hidepassword(source)) |
|
5403 | ui.status(_(b'pulling from %s\n') % urlutil.hidepassword(source)) | |
5399 | ui.flush() |
|
5404 | ui.flush() | |
5400 | other = hg.peer(repo, opts, source) |
|
5405 | other = hg.peer(repo, opts, source) | |
@@ -5451,6 +5456,7 b' def pull(ui, repo, *sources, **opts):' | |||||
5451 | modheads = exchange.pull( |
|
5456 | modheads = exchange.pull( | |
5452 | repo, |
|
5457 | repo, | |
5453 | other, |
|
5458 | other, | |
|
5459 | path=path, | |||
5454 | heads=nodes, |
|
5460 | heads=nodes, | |
5455 | force=opts.get(b'force'), |
|
5461 | force=opts.get(b'force'), | |
5456 | bookmarks=opts.get(b'bookmark', ()), |
|
5462 | bookmarks=opts.get(b'bookmark', ()), | |
@@ -5735,7 +5741,7 b' def push(ui, repo, *dests, **opts):' | |||||
5735 |
|
5741 | |||
5736 | try: |
|
5742 | try: | |
5737 | if revs: |
|
5743 | if revs: | |
5738 |
revs = [repo[r].node() for r in |
|
5744 | revs = [repo[r].node() for r in logcmdutil.revrange(repo, revs)] | |
5739 | if not revs: |
|
5745 | if not revs: | |
5740 | raise error.InputError( |
|
5746 | raise error.InputError( | |
5741 | _(b"specified revisions evaluate to an empty set"), |
|
5747 | _(b"specified revisions evaluate to an empty set"), | |
@@ -6347,7 +6353,7 b' def revert(ui, repo, *pats, **opts):' | |||||
6347 | rev = opts.get(b'rev') |
|
6353 | rev = opts.get(b'rev') | |
6348 | if rev: |
|
6354 | if rev: | |
6349 | repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn') |
|
6355 | repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn') | |
6350 |
ctx = |
|
6356 | ctx = logcmdutil.revsingle(repo, rev) | |
6351 |
|
6357 | |||
6352 | if not ( |
|
6358 | if not ( | |
6353 | pats |
|
6359 | pats | |
@@ -6905,11 +6911,11 b' def status(ui, repo, *pats, **opts):' | |||||
6905 | raise error.InputError(msg) |
|
6911 | raise error.InputError(msg) | |
6906 | elif change: |
|
6912 | elif change: | |
6907 | repo = scmutil.unhidehashlikerevs(repo, [change], b'nowarn') |
|
6913 | repo = scmutil.unhidehashlikerevs(repo, [change], b'nowarn') | |
6908 |
ctx2 = |
|
6914 | ctx2 = logcmdutil.revsingle(repo, change, None) | |
6909 | ctx1 = ctx2.p1() |
|
6915 | ctx1 = ctx2.p1() | |
6910 | else: |
|
6916 | else: | |
6911 | repo = scmutil.unhidehashlikerevs(repo, revs, b'nowarn') |
|
6917 | repo = scmutil.unhidehashlikerevs(repo, revs, b'nowarn') | |
6912 |
ctx1, ctx2 = |
|
6918 | ctx1, ctx2 = logcmdutil.revpair(repo, revs) | |
6913 |
|
6919 | |||
6914 | forcerelativevalue = None |
|
6920 | forcerelativevalue = None | |
6915 | if ui.hasconfig(b'commands', b'status.relative'): |
|
6921 | if ui.hasconfig(b'commands', b'status.relative'): | |
@@ -7453,7 +7459,7 b' def tag(ui, repo, name1, *names, **opts)' | |||||
7453 | b'(use -f to force)' |
|
7459 | b'(use -f to force)' | |
7454 | ) |
|
7460 | ) | |
7455 | ) |
|
7461 | ) | |
7456 |
node = |
|
7462 | node = logcmdutil.revsingle(repo, rev_).node() | |
7457 |
|
7463 | |||
7458 | if not message: |
|
7464 | if not message: | |
7459 | # we don't translate commit messages |
|
7465 | # we don't translate commit messages | |
@@ -7477,7 +7483,7 b' def tag(ui, repo, name1, *names, **opts)' | |||||
7477 | # don't allow tagging the null rev |
|
7483 | # don't allow tagging the null rev | |
7478 | if ( |
|
7484 | if ( | |
7479 | not opts.get(b'remove') |
|
7485 | not opts.get(b'remove') | |
7480 |
and |
|
7486 | and logcmdutil.revsingle(repo, rev_).rev() == nullrev | |
7481 | ): |
|
7487 | ): | |
7482 | raise error.InputError(_(b"cannot tag null revision")) |
|
7488 | raise error.InputError(_(b"cannot tag null revision")) | |
7483 |
|
7489 | |||
@@ -7840,7 +7846,7 b' def update(ui, repo, node=None, **opts):' | |||||
7840 | brev = rev |
|
7846 | brev = rev | |
7841 | if rev: |
|
7847 | if rev: | |
7842 | repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn') |
|
7848 | repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn') | |
7843 |
ctx = |
|
7849 | ctx = logcmdutil.revsingle(repo, rev, default=None) | |
7844 | rev = ctx.rev() |
|
7850 | rev = ctx.rev() | |
7845 | hidden = ctx.hidden() |
|
7851 | hidden = ctx.hidden() | |
7846 | overrides = {(b'ui', b'forcemerge'): opts.get('tool', b'')} |
|
7852 | overrides = {(b'ui', b'forcemerge'): opts.get('tool', b'')} |
@@ -1,5 +1,5 b'' | |||||
1 |
#ifndef |
|
1 | #ifndef HG_COMPAT_H | |
2 |
#define |
|
2 | #define HG_COMPAT_H | |
3 |
|
3 | |||
4 | #ifdef _WIN32 |
|
4 | #ifdef _WIN32 | |
5 | #ifdef _MSC_VER |
|
5 | #ifdef _MSC_VER |
@@ -959,11 +959,6 b' coreconfigitem(' | |||||
959 | ) |
|
959 | ) | |
960 | coreconfigitem( |
|
960 | coreconfigitem( | |
961 | b'experimental', |
|
961 | b'experimental', | |
962 | b'dirstate-tree.in-memory', |
|
|||
963 | default=False, |
|
|||
964 | ) |
|
|||
965 | coreconfigitem( |
|
|||
966 | b'experimental', |
|
|||
967 | b'editortmpinhg', |
|
962 | b'editortmpinhg', | |
968 | default=False, |
|
963 | default=False, | |
969 | ) |
|
964 | ) | |
@@ -1266,6 +1261,11 b' coreconfigitem(' | |||||
1266 | ) |
|
1261 | ) | |
1267 | coreconfigitem( |
|
1262 | coreconfigitem( | |
1268 | b'experimental', |
|
1263 | b'experimental', | |
|
1264 | b'web.full-garbage-collection-rate', | |||
|
1265 | default=1, # still forcing a full collection on each request | |||
|
1266 | ) | |||
|
1267 | coreconfigitem( | |||
|
1268 | b'experimental', | |||
1269 | b'worker.wdir-get-thread-safe', |
|
1269 | b'worker.wdir-get-thread-safe', | |
1270 | default=False, |
|
1270 | default=False, | |
1271 | ) |
|
1271 | ) | |
@@ -1306,7 +1306,7 b' coreconfigitem(' | |||||
1306 | # Enable this dirstate format *when creating a new repository*. |
|
1306 | # Enable this dirstate format *when creating a new repository*. | |
1307 | # Which format to use for existing repos is controlled by .hg/requires |
|
1307 | # Which format to use for existing repos is controlled by .hg/requires | |
1308 | b'format', |
|
1308 | b'format', | |
1309 | b'exp-dirstate-v2', |
|
1309 | b'exp-rc-dirstate-v2', | |
1310 | default=False, |
|
1310 | default=False, | |
1311 | experimental=True, |
|
1311 | experimental=True, | |
1312 | ) |
|
1312 | ) | |
@@ -1880,6 +1880,13 b' coreconfigitem(' | |||||
1880 | default=b'skip', |
|
1880 | default=b'skip', | |
1881 | experimental=True, |
|
1881 | experimental=True, | |
1882 | ) |
|
1882 | ) | |
|
1883 | # experimental as long as format.exp-rc-dirstate-v2 is. | |||
|
1884 | coreconfigitem( | |||
|
1885 | b'storage', | |||
|
1886 | b'dirstate-v2.slow-path', | |||
|
1887 | default=b"abort", | |||
|
1888 | experimental=True, | |||
|
1889 | ) | |||
1883 | coreconfigitem( |
|
1890 | coreconfigitem( | |
1884 | b'storage', |
|
1891 | b'storage', | |
1885 | b'new-repo-backend', |
|
1892 | b'new-repo-backend', |
@@ -1551,11 +1551,11 b' class workingctx(committablectx):' | |||||
1551 | def __iter__(self): |
|
1551 | def __iter__(self): | |
1552 | d = self._repo.dirstate |
|
1552 | d = self._repo.dirstate | |
1553 | for f in d: |
|
1553 | for f in d: | |
1554 | if d[f] != b'r': |
|
1554 | if d.get_entry(f).tracked: | |
1555 | yield f |
|
1555 | yield f | |
1556 |
|
1556 | |||
1557 | def __contains__(self, key): |
|
1557 | def __contains__(self, key): | |
1558 |
return self._repo.dirstate |
|
1558 | return self._repo.dirstate.get_entry(key).tracked | |
1559 |
|
1559 | |||
1560 | def hex(self): |
|
1560 | def hex(self): | |
1561 | return self._repo.nodeconstants.wdirhex |
|
1561 | return self._repo.nodeconstants.wdirhex | |
@@ -2017,7 +2017,7 b' class workingctx(committablectx):' | |||||
2017 | def matches(self, match): |
|
2017 | def matches(self, match): | |
2018 | match = self._repo.narrowmatch(match) |
|
2018 | match = self._repo.narrowmatch(match) | |
2019 | ds = self._repo.dirstate |
|
2019 | ds = self._repo.dirstate | |
2020 |
return sorted(f for f in ds.matches(match) if ds |
|
2020 | return sorted(f for f in ds.matches(match) if ds.get_entry(f).tracked) | |
2021 |
|
2021 | |||
2022 | def markcommitted(self, node): |
|
2022 | def markcommitted(self, node): | |
2023 | with self._repo.dirstate.parentchange(): |
|
2023 | with self._repo.dirstate.parentchange(): |
@@ -94,7 +94,7 b' def _dirstatecopies(repo, match=None):' | |||||
94 | ds = repo.dirstate |
|
94 | ds = repo.dirstate | |
95 | c = ds.copies().copy() |
|
95 | c = ds.copies().copy() | |
96 | for k in list(c): |
|
96 | for k in list(c): | |
97 |
if |
|
97 | if not ds.get_entry(k).tracked or (match and not match(k)): | |
98 | del c[k] |
|
98 | del c[k] | |
99 | return c |
|
99 | return c | |
100 |
|
100 |
@@ -506,7 +506,7 b' def debugcapabilities(ui, path, **opts):' | |||||
506 | ) |
|
506 | ) | |
507 | def debugchangedfiles(ui, repo, rev, **opts): |
|
507 | def debugchangedfiles(ui, repo, rev, **opts): | |
508 | """list the stored files changes for a revision""" |
|
508 | """list the stored files changes for a revision""" | |
509 |
ctx = |
|
509 | ctx = logcmdutil.revsingle(repo, rev, None) | |
510 | files = None |
|
510 | files = None | |
511 |
|
511 | |||
512 | if opts['compute']: |
|
512 | if opts['compute']: | |
@@ -550,24 +550,9 b' def debugcheckstate(ui, repo):' | |||||
550 | m1 = repo[parent1].manifest() |
|
550 | m1 = repo[parent1].manifest() | |
551 | m2 = repo[parent2].manifest() |
|
551 | m2 = repo[parent2].manifest() | |
552 | errors = 0 |
|
552 | errors = 0 | |
553 |
for |
|
553 | for err in repo.dirstate.verify(m1, m2): | |
554 | state = repo.dirstate[f] |
|
554 | ui.warn(err[0] % err[1:]) | |
555 | if state in b"nr" and f not in m1: |
|
555 | errors += 1 | |
556 | ui.warn(_(b"%s in state %s, but not in manifest1\n") % (f, state)) |
|
|||
557 | errors += 1 |
|
|||
558 | if state in b"a" and f in m1: |
|
|||
559 | ui.warn(_(b"%s in state %s, but also in manifest1\n") % (f, state)) |
|
|||
560 | errors += 1 |
|
|||
561 | if state in b"m" and f not in m1 and f not in m2: |
|
|||
562 | ui.warn( |
|
|||
563 | _(b"%s in state %s, but not in either manifest\n") % (f, state) |
|
|||
564 | ) |
|
|||
565 | errors += 1 |
|
|||
566 | for f in m1: |
|
|||
567 | state = repo.dirstate[f] |
|
|||
568 | if state not in b"nrm": |
|
|||
569 | ui.warn(_(b"%s in manifest1, but listed as state %s") % (f, state)) |
|
|||
570 | errors += 1 |
|
|||
571 | if errors: |
|
556 | if errors: | |
572 | errstr = _(b".hg/dirstate inconsistent with current parent's manifest") |
|
557 | errstr = _(b".hg/dirstate inconsistent with current parent's manifest") | |
573 | raise error.Abort(errstr) |
|
558 | raise error.Abort(errstr) | |
@@ -962,35 +947,29 b' def debugstate(ui, repo, **opts):' | |||||
962 | datesort = opts.get('datesort') |
|
947 | datesort = opts.get('datesort') | |
963 |
|
948 | |||
964 | if datesort: |
|
949 | if datesort: | |
965 | keyfunc = lambda x: ( |
|
950 | ||
966 | x[1].v1_mtime(), |
|
951 | def keyfunc(entry): | |
967 | x[0], |
|
952 | filename, _state, _mode, _size, mtime = entry | |
968 |
) |
|
953 | return (mtime, filename) | |
|
954 | ||||
969 | else: |
|
955 | else: | |
970 | keyfunc = None # sort by filename |
|
956 | keyfunc = None # sort by filename | |
971 | if opts['all']: |
|
957 | entries = list(repo.dirstate._map.debug_iter(all=opts['all'])) | |
972 | entries = list(repo.dirstate._map.debug_iter()) |
|
|||
973 | else: |
|
|||
974 | entries = list(pycompat.iteritems(repo.dirstate)) |
|
|||
975 | entries.sort(key=keyfunc) |
|
958 | entries.sort(key=keyfunc) | |
976 |
for |
|
959 | for entry in entries: | |
977 | if ent.v1_mtime() == -1: |
|
960 | filename, state, mode, size, mtime = entry | |
|
961 | if mtime == -1: | |||
978 | timestr = b'unset ' |
|
962 | timestr = b'unset ' | |
979 | elif nodates: |
|
963 | elif nodates: | |
980 | timestr = b'set ' |
|
964 | timestr = b'set ' | |
981 | else: |
|
965 | else: | |
982 | timestr = time.strftime( |
|
966 | timestr = time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime(mtime)) | |
983 | "%Y-%m-%d %H:%M:%S ", time.localtime(ent.v1_mtime()) |
|
|||
984 | ) |
|
|||
985 | timestr = encoding.strtolocal(timestr) |
|
967 | timestr = encoding.strtolocal(timestr) | |
986 |
if |
|
968 | if mode & 0o20000: | |
987 | mode = b'lnk' |
|
969 | mode = b'lnk' | |
988 | else: |
|
970 | else: | |
989 |
mode = b'%3o' % ( |
|
971 | mode = b'%3o' % (mode & 0o777 & ~util.umask) | |
990 | ui.write( |
|
972 | ui.write(b"%c %s %10d %s%s\n" % (state, mode, size, timestr, filename)) | |
991 | b"%c %s %10d %s%s\n" |
|
|||
992 | % (ent.v1_state(), mode, ent.v1_size(), timestr, file_) |
|
|||
993 | ) |
|
|||
994 | for f in repo.dirstate.copies(): |
|
973 | for f in repo.dirstate.copies(): | |
995 | ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f)) |
|
974 | ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f)) | |
996 |
|
975 | |||
@@ -1103,7 +1082,7 b' def debugdiscovery(ui, repo, remoteurl=b' | |||||
1103 | ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(remoteurl)) |
|
1082 | ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(remoteurl)) | |
1104 | else: |
|
1083 | else: | |
1105 | branches = (None, []) |
|
1084 | branches = (None, []) | |
1106 |
remote_filtered_revs = |
|
1085 | remote_filtered_revs = logcmdutil.revrange( | |
1107 | unfi, [b"not (::(%s))" % remote_revs] |
|
1086 | unfi, [b"not (::(%s))" % remote_revs] | |
1108 | ) |
|
1087 | ) | |
1109 | remote_filtered_revs = frozenset(remote_filtered_revs) |
|
1088 | remote_filtered_revs = frozenset(remote_filtered_revs) | |
@@ -1117,7 +1096,7 b' def debugdiscovery(ui, repo, remoteurl=b' | |||||
1117 | remote._repo = remote._repo.filtered(b'debug-discovery-remote-filter') |
|
1096 | remote._repo = remote._repo.filtered(b'debug-discovery-remote-filter') | |
1118 |
|
1097 | |||
1119 | if local_revs: |
|
1098 | if local_revs: | |
1120 |
local_filtered_revs = |
|
1099 | local_filtered_revs = logcmdutil.revrange( | |
1121 | unfi, [b"not (::(%s))" % local_revs] |
|
1100 | unfi, [b"not (::(%s))" % local_revs] | |
1122 | ) |
|
1101 | ) | |
1123 | local_filtered_revs = frozenset(local_filtered_revs) |
|
1102 | local_filtered_revs = frozenset(local_filtered_revs) | |
@@ -1155,7 +1134,7 b' def debugdiscovery(ui, repo, remoteurl=b' | |||||
1155 | def doit(pushedrevs, remoteheads, remote=remote): |
|
1134 | def doit(pushedrevs, remoteheads, remote=remote): | |
1156 | nodes = None |
|
1135 | nodes = None | |
1157 | if pushedrevs: |
|
1136 | if pushedrevs: | |
1158 |
revs = |
|
1137 | revs = logcmdutil.revrange(repo, pushedrevs) | |
1159 | nodes = [repo[r].node() for r in revs] |
|
1138 | nodes = [repo[r].node() for r in revs] | |
1160 | common, any, hds = setdiscovery.findcommonheads( |
|
1139 | common, any, hds = setdiscovery.findcommonheads( | |
1161 | ui, repo, remote, ancestorsof=nodes, audit=data |
|
1140 | ui, repo, remote, ancestorsof=nodes, audit=data | |
@@ -1394,7 +1373,7 b' def debugfileset(ui, repo, expr, **opts)' | |||||
1394 |
|
1373 | |||
1395 | fileset.symbols # force import of fileset so we have predicates to optimize |
|
1374 | fileset.symbols # force import of fileset so we have predicates to optimize | |
1396 | opts = pycompat.byteskwargs(opts) |
|
1375 | opts = pycompat.byteskwargs(opts) | |
1397 |
ctx = |
|
1376 | ctx = logcmdutil.revsingle(repo, opts.get(b'rev'), None) | |
1398 |
|
1377 | |||
1399 | stages = [ |
|
1378 | stages = [ | |
1400 | (b'parsed', pycompat.identity), |
|
1379 | (b'parsed', pycompat.identity), | |
@@ -1495,8 +1474,8 b' def debug_repair_issue6528(ui, repo, **o' | |||||
1495 | filename. |
|
1474 | filename. | |
1496 |
|
1475 | |||
1497 | Note that this does *not* mean that this repairs future affected revisions, |
|
1476 | Note that this does *not* mean that this repairs future affected revisions, | |
1498 |
that needs a separate fix at the exchange level that |
|
1477 | that needs a separate fix at the exchange level that was introduced in | |
1499 | (as of 5.9rc0). |
|
1478 | Mercurial 5.9.1. | |
1500 |
|
1479 | |||
1501 | There is a `--paranoid` flag to test that the fast implementation is correct |
|
1480 | There is a `--paranoid` flag to test that the fast implementation is correct | |
1502 | by checking it against the slow implementation. Since this matter is quite |
|
1481 | by checking it against the slow implementation. Since this matter is quite | |
@@ -2614,7 +2593,7 b' def debugobsolete(ui, repo, precursor=No' | |||||
2614 | l.release() |
|
2593 | l.release() | |
2615 | else: |
|
2594 | else: | |
2616 | if opts[b'rev']: |
|
2595 | if opts[b'rev']: | |
2617 |
revs = |
|
2596 | revs = logcmdutil.revrange(repo, opts[b'rev']) | |
2618 | nodes = [repo[r].node() for r in revs] |
|
2597 | nodes = [repo[r].node() for r in revs] | |
2619 | markers = list( |
|
2598 | markers = list( | |
2620 | obsutil.getmarkers( |
|
2599 | obsutil.getmarkers( | |
@@ -2981,16 +2960,28 b' def debugrebuilddirstate(ui, repo, rev, ' | |||||
2981 | dirstatefiles = set(dirstate) |
|
2960 | dirstatefiles = set(dirstate) | |
2982 | manifestonly = manifestfiles - dirstatefiles |
|
2961 | manifestonly = manifestfiles - dirstatefiles | |
2983 | dsonly = dirstatefiles - manifestfiles |
|
2962 | dsonly = dirstatefiles - manifestfiles | |
2984 |
dsnotadded = {f for f in dsonly if dirstate |
|
2963 | dsnotadded = {f for f in dsonly if not dirstate.get_entry(f).added} | |
2985 | changedfiles = manifestonly | dsnotadded |
|
2964 | changedfiles = manifestonly | dsnotadded | |
2986 |
|
2965 | |||
2987 | dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles) |
|
2966 | dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles) | |
2988 |
|
2967 | |||
2989 |
|
2968 | |||
2990 | @command(b'debugrebuildfncache', [], b'') |
|
2969 | @command( | |
2991 |
|
|
2970 | b'debugrebuildfncache', | |
|
2971 | [ | |||
|
2972 | ( | |||
|
2973 | b'', | |||
|
2974 | b'only-data', | |||
|
2975 | False, | |||
|
2976 | _(b'only look for wrong .d files (much faster)'), | |||
|
2977 | ) | |||
|
2978 | ], | |||
|
2979 | b'', | |||
|
2980 | ) | |||
|
2981 | def debugrebuildfncache(ui, repo, **opts): | |||
2992 | """rebuild the fncache file""" |
|
2982 | """rebuild the fncache file""" | |
2993 | repair.rebuildfncache(ui, repo) |
|
2983 | opts = pycompat.byteskwargs(opts) | |
|
2984 | repair.rebuildfncache(ui, repo, opts.get(b"only_data")) | |||
2994 |
|
2985 | |||
2995 |
|
2986 | |||
2996 | @command( |
|
2987 | @command( | |
@@ -4018,7 +4009,7 b' def debugsuccessorssets(ui, repo, *revs,' | |||||
4018 | cache = {} |
|
4009 | cache = {} | |
4019 | ctx2str = bytes |
|
4010 | ctx2str = bytes | |
4020 | node2str = short |
|
4011 | node2str = short | |
4021 |
for rev in |
|
4012 | for rev in logcmdutil.revrange(repo, revs): | |
4022 | ctx = repo[rev] |
|
4013 | ctx = repo[rev] | |
4023 | ui.write(b'%s\n' % ctx2str(ctx)) |
|
4014 | ui.write(b'%s\n' % ctx2str(ctx)) | |
4024 | for succsset in obsutil.successorssets( |
|
4015 | for succsset in obsutil.successorssets( | |
@@ -4077,7 +4068,7 b' def debugtemplate(ui, repo, tmpl, **opts' | |||||
4077 | raise error.RepoError( |
|
4068 | raise error.RepoError( | |
4078 | _(b'there is no Mercurial repository here (.hg not found)') |
|
4069 | _(b'there is no Mercurial repository here (.hg not found)') | |
4079 | ) |
|
4070 | ) | |
4080 |
revs = |
|
4071 | revs = logcmdutil.revrange(repo, opts['rev']) | |
4081 |
|
4072 | |||
4082 | props = {} |
|
4073 | props = {} | |
4083 | for d in opts['define']: |
|
4074 | for d in opts['define']: |
This diff has been collapsed as it changes many lines, (603 lines changed) Show them Hide them | |||||
@@ -31,6 +31,10 b' from . import (' | |||||
31 | util, |
|
31 | util, | |
32 | ) |
|
32 | ) | |
33 |
|
33 | |||
|
34 | from .dirstateutils import ( | |||
|
35 | timestamp, | |||
|
36 | ) | |||
|
37 | ||||
34 | from .interfaces import ( |
|
38 | from .interfaces import ( | |
35 | dirstate as intdirstate, |
|
39 | dirstate as intdirstate, | |
36 | util as interfaceutil, |
|
40 | util as interfaceutil, | |
@@ -39,13 +43,13 b' from .interfaces import (' | |||||
39 | parsers = policy.importmod('parsers') |
|
43 | parsers = policy.importmod('parsers') | |
40 | rustmod = policy.importrust('dirstate') |
|
44 | rustmod = policy.importrust('dirstate') | |
41 |
|
45 | |||
42 |
|
|
46 | HAS_FAST_DIRSTATE_V2 = rustmod is not None | |
43 |
|
47 | |||
44 | propertycache = util.propertycache |
|
48 | propertycache = util.propertycache | |
45 | filecache = scmutil.filecache |
|
49 | filecache = scmutil.filecache | |
46 | _rangemask = dirstatemap.rangemask |
|
50 | _rangemask = dirstatemap.rangemask | |
47 |
|
51 | |||
48 |
DirstateItem = |
|
52 | DirstateItem = dirstatemap.DirstateItem | |
49 |
|
53 | |||
50 |
|
54 | |||
51 | class repocache(filecache): |
|
55 | class repocache(filecache): | |
@@ -66,7 +70,7 b' def _getfsnow(vfs):' | |||||
66 | '''Get "now" timestamp on filesystem''' |
|
70 | '''Get "now" timestamp on filesystem''' | |
67 | tmpfd, tmpname = vfs.mkstemp() |
|
71 | tmpfd, tmpname = vfs.mkstemp() | |
68 | try: |
|
72 | try: | |
69 |
return os.fstat(tmpfd) |
|
73 | return timestamp.mtime_of(os.fstat(tmpfd)) | |
70 | finally: |
|
74 | finally: | |
71 | os.close(tmpfd) |
|
75 | os.close(tmpfd) | |
72 | vfs.unlink(tmpname) |
|
76 | vfs.unlink(tmpname) | |
@@ -122,7 +126,7 b' class dirstate(object):' | |||||
122 | # UNC path pointing to root share (issue4557) |
|
126 | # UNC path pointing to root share (issue4557) | |
123 | self._rootdir = pathutil.normasprefix(root) |
|
127 | self._rootdir = pathutil.normasprefix(root) | |
124 | self._dirty = False |
|
128 | self._dirty = False | |
125 |
self._lastnormaltime = |
|
129 | self._lastnormaltime = timestamp.zero() | |
126 | self._ui = ui |
|
130 | self._ui = ui | |
127 | self._filecache = {} |
|
131 | self._filecache = {} | |
128 | self._parentwriters = 0 |
|
132 | self._parentwriters = 0 | |
@@ -130,7 +134,6 b' class dirstate(object):' | |||||
130 | self._pendingfilename = b'%s.pending' % self._filename |
|
134 | self._pendingfilename = b'%s.pending' % self._filename | |
131 | self._plchangecallbacks = {} |
|
135 | self._plchangecallbacks = {} | |
132 | self._origpl = None |
|
136 | self._origpl = None | |
133 | self._updatedfiles = set() |
|
|||
134 | self._mapcls = dirstatemap.dirstatemap |
|
137 | self._mapcls = dirstatemap.dirstatemap | |
135 | # Access and cache cwd early, so we don't access it for the first time |
|
138 | # Access and cache cwd early, so we don't access it for the first time | |
136 | # after a working-copy update caused it to not exist (accessing it then |
|
139 | # after a working-copy update caused it to not exist (accessing it then | |
@@ -239,44 +242,59 b' class dirstate(object):' | |||||
239 | return self._rootdir + f |
|
242 | return self._rootdir + f | |
240 |
|
243 | |||
241 | def flagfunc(self, buildfallback): |
|
244 | def flagfunc(self, buildfallback): | |
242 | if self._checklink and self._checkexec: |
|
245 | """build a callable that returns flags associated with a filename | |
|
246 | ||||
|
247 | The information is extracted from three possible layers: | |||
|
248 | 1. the file system if it supports the information | |||
|
249 | 2. the "fallback" information stored in the dirstate if any | |||
|
250 | 3. a more expensive mechanism inferring the flags from the parents. | |||
|
251 | """ | |||
243 |
|
252 | |||
244 | def f(x): |
|
253 | # small hack to cache the result of buildfallback() | |
245 | try: |
|
254 | fallback_func = [] | |
246 | st = os.lstat(self._join(x)) |
|
255 | ||
247 | if util.statislink(st): |
|
256 | def get_flags(x): | |
248 | return b'l' |
|
257 | entry = None | |
249 | if util.statisexec(st): |
|
258 | fallback_value = None | |
250 | return b'x' |
|
259 | try: | |
251 | except OSError: |
|
260 | st = os.lstat(self._join(x)) | |
252 | pass |
|
261 | except OSError: | |
253 | return b'' |
|
262 | return b'' | |
254 |
|
263 | |||
255 | return f |
|
264 | if self._checklink: | |
256 |
|
265 | if util.statislink(st): | ||
257 | fallback = buildfallback() |
|
|||
258 | if self._checklink: |
|
|||
259 |
|
||||
260 | def f(x): |
|
|||
261 | if os.path.islink(self._join(x)): |
|
|||
262 | return b'l' |
|
266 | return b'l' | |
263 | if b'x' in fallback(x): |
|
267 | else: | |
264 |
|
|
268 | entry = self.get_entry(x) | |
265 | return b'' |
|
269 | if entry.has_fallback_symlink: | |
|
270 | if entry.fallback_symlink: | |||
|
271 | return b'l' | |||
|
272 | else: | |||
|
273 | if not fallback_func: | |||
|
274 | fallback_func.append(buildfallback()) | |||
|
275 | fallback_value = fallback_func[0](x) | |||
|
276 | if b'l' in fallback_value: | |||
|
277 | return b'l' | |||
266 |
|
278 | |||
267 | return f |
|
279 | if self._checkexec: | |
268 | if self._checkexec: |
|
280 | if util.statisexec(st): | |
|
281 | return b'x' | |||
|
282 | else: | |||
|
283 | if entry is None: | |||
|
284 | entry = self.get_entry(x) | |||
|
285 | if entry.has_fallback_exec: | |||
|
286 | if entry.fallback_exec: | |||
|
287 | return b'x' | |||
|
288 | else: | |||
|
289 | if fallback_value is None: | |||
|
290 | if not fallback_func: | |||
|
291 | fallback_func.append(buildfallback()) | |||
|
292 | fallback_value = fallback_func[0](x) | |||
|
293 | if b'x' in fallback_value: | |||
|
294 | return b'x' | |||
|
295 | return b'' | |||
269 |
|
296 | |||
270 | def f(x): |
|
297 | return get_flags | |
271 | if b'l' in fallback(x): |
|
|||
272 | return b'l' |
|
|||
273 | if util.isexec(self._join(x)): |
|
|||
274 | return b'x' |
|
|||
275 | return b'' |
|
|||
276 |
|
||||
277 | return f |
|
|||
278 | else: |
|
|||
279 | return fallback |
|
|||
280 |
|
298 | |||
281 | @propertycache |
|
299 | @propertycache | |
282 | def _cwd(self): |
|
300 | def _cwd(self): | |
@@ -328,11 +346,20 b' class dirstate(object):' | |||||
328 | consider migrating all user of this to going through the dirstate entry |
|
346 | consider migrating all user of this to going through the dirstate entry | |
329 | instead. |
|
347 | instead. | |
330 | """ |
|
348 | """ | |
|
349 | msg = b"don't use dirstate[file], use dirstate.get_entry(file)" | |||
|
350 | util.nouideprecwarn(msg, b'6.1', stacklevel=2) | |||
331 | entry = self._map.get(key) |
|
351 | entry = self._map.get(key) | |
332 | if entry is not None: |
|
352 | if entry is not None: | |
333 | return entry.state |
|
353 | return entry.state | |
334 | return b'?' |
|
354 | return b'?' | |
335 |
|
355 | |||
|
356 | def get_entry(self, path): | |||
|
357 | """return a DirstateItem for the associated path""" | |||
|
358 | entry = self._map.get(path) | |||
|
359 | if entry is None: | |||
|
360 | return DirstateItem() | |||
|
361 | return entry | |||
|
362 | ||||
336 | def __contains__(self, key): |
|
363 | def __contains__(self, key): | |
337 | return key in self._map |
|
364 | return key in self._map | |
338 |
|
365 | |||
@@ -344,9 +371,6 b' class dirstate(object):' | |||||
344 |
|
371 | |||
345 | iteritems = items |
|
372 | iteritems = items | |
346 |
|
373 | |||
347 | def directories(self): |
|
|||
348 | return self._map.directories() |
|
|||
349 |
|
||||
350 | def parents(self): |
|
374 | def parents(self): | |
351 | return [self._validate(p) for p in self._pl] |
|
375 | return [self._validate(p) for p in self._pl] | |
352 |
|
376 | |||
@@ -385,32 +409,10 b' class dirstate(object):' | |||||
385 | oldp2 = self._pl[1] |
|
409 | oldp2 = self._pl[1] | |
386 | if self._origpl is None: |
|
410 | if self._origpl is None: | |
387 | self._origpl = self._pl |
|
411 | self._origpl = self._pl | |
388 | self._map.setparents(p1, p2) |
|
412 | nullid = self._nodeconstants.nullid | |
389 | copies = {} |
|
413 | # True if we need to fold p2 related state back to a linear case | |
390 | if ( |
|
414 | fold_p2 = oldp2 != nullid and p2 == nullid | |
391 | oldp2 != self._nodeconstants.nullid |
|
415 | return self._map.setparents(p1, p2, fold_p2=fold_p2) | |
392 | and p2 == self._nodeconstants.nullid |
|
|||
393 | ): |
|
|||
394 | candidatefiles = self._map.non_normal_or_other_parent_paths() |
|
|||
395 |
|
||||
396 | for f in candidatefiles: |
|
|||
397 | s = self._map.get(f) |
|
|||
398 | if s is None: |
|
|||
399 | continue |
|
|||
400 |
|
||||
401 | # Discard "merged" markers when moving away from a merge state |
|
|||
402 | if s.merged: |
|
|||
403 | source = self._map.copymap.get(f) |
|
|||
404 | if source: |
|
|||
405 | copies[f] = source |
|
|||
406 | self._normallookup(f) |
|
|||
407 | # Also fix up otherparent markers |
|
|||
408 | elif s.from_p2: |
|
|||
409 | source = self._map.copymap.get(f) |
|
|||
410 | if source: |
|
|||
411 | copies[f] = source |
|
|||
412 | self._add(f) |
|
|||
413 | return copies |
|
|||
414 |
|
416 | |||
415 | def setbranch(self, branch): |
|
417 | def setbranch(self, branch): | |
416 | self.__class__._branch.set(self, encoding.fromlocal(branch)) |
|
418 | self.__class__._branch.set(self, encoding.fromlocal(branch)) | |
@@ -438,9 +440,8 b' class dirstate(object):' | |||||
438 | for a in ("_map", "_branch", "_ignore"): |
|
440 | for a in ("_map", "_branch", "_ignore"): | |
439 | if a in self.__dict__: |
|
441 | if a in self.__dict__: | |
440 | delattr(self, a) |
|
442 | delattr(self, a) | |
441 |
self._lastnormaltime = |
|
443 | self._lastnormaltime = timestamp.zero() | |
442 | self._dirty = False |
|
444 | self._dirty = False | |
443 | self._updatedfiles.clear() |
|
|||
444 | self._parentwriters = 0 |
|
445 | self._parentwriters = 0 | |
445 | self._origpl = None |
|
446 | self._origpl = None | |
446 |
|
447 | |||
@@ -451,10 +452,8 b' class dirstate(object):' | |||||
451 | self._dirty = True |
|
452 | self._dirty = True | |
452 | if source is not None: |
|
453 | if source is not None: | |
453 | self._map.copymap[dest] = source |
|
454 | self._map.copymap[dest] = source | |
454 | self._updatedfiles.add(source) |
|
455 | else: | |
455 |
self._ |
|
456 | self._map.copymap.pop(dest, None) | |
456 | elif self._map.copymap.pop(dest, None): |
|
|||
457 | self._updatedfiles.add(dest) |
|
|||
458 |
|
457 | |||
459 | def copied(self, file): |
|
458 | def copied(self, file): | |
460 | return self._map.copymap.get(file, None) |
|
459 | return self._map.copymap.get(file, None) | |
@@ -471,18 +470,11 b' class dirstate(object):' | |||||
471 |
|
470 | |||
472 | return True the file was previously untracked, False otherwise. |
|
471 | return True the file was previously untracked, False otherwise. | |
473 | """ |
|
472 | """ | |
|
473 | self._dirty = True | |||
474 | entry = self._map.get(filename) |
|
474 | entry = self._map.get(filename) | |
475 | if entry is None: |
|
475 | if entry is None or not entry.tracked: | |
476 |
self._ |
|
476 | self._check_new_tracked_filename(filename) | |
477 | return True |
|
477 | return self._map.set_tracked(filename) | |
478 | elif not entry.tracked: |
|
|||
479 | self._normallookup(filename) |
|
|||
480 | return True |
|
|||
481 | # XXX This is probably overkill for more case, but we need this to |
|
|||
482 | # fully replace the `normallookup` call with `set_tracked` one. |
|
|||
483 | # Consider smoothing this in the future. |
|
|||
484 | self.set_possibly_dirty(filename) |
|
|||
485 | return False |
|
|||
486 |
|
478 | |||
487 | @requires_no_parents_change |
|
479 | @requires_no_parents_change | |
488 | def set_untracked(self, filename): |
|
480 | def set_untracked(self, filename): | |
@@ -493,28 +485,32 b' class dirstate(object):' | |||||
493 |
|
485 | |||
494 | return True the file was previously tracked, False otherwise. |
|
486 | return True the file was previously tracked, False otherwise. | |
495 | """ |
|
487 | """ | |
496 |
e |
|
488 | ret = self._map.set_untracked(filename) | |
497 |
if |
|
489 | if ret: | |
498 | return False |
|
490 | self._dirty = True | |
499 | elif entry.added: |
|
491 | return ret | |
500 | self._drop(filename) |
|
|||
501 | return True |
|
|||
502 | else: |
|
|||
503 | self._remove(filename) |
|
|||
504 | return True |
|
|||
505 |
|
492 | |||
506 | @requires_no_parents_change |
|
493 | @requires_no_parents_change | |
507 | def set_clean(self, filename, parentfiledata=None): |
|
494 | def set_clean(self, filename, parentfiledata=None): | |
508 | """record that the current state of the file on disk is known to be clean""" |
|
495 | """record that the current state of the file on disk is known to be clean""" | |
509 | self._dirty = True |
|
496 | self._dirty = True | |
510 | self._updatedfiles.add(filename) |
|
497 | if parentfiledata: | |
511 | self._normal(filename, parentfiledata=parentfiledata) |
|
498 | (mode, size, mtime) = parentfiledata | |
|
499 | else: | |||
|
500 | (mode, size, mtime) = self._get_filedata(filename) | |||
|
501 | if not self._map[filename].tracked: | |||
|
502 | self._check_new_tracked_filename(filename) | |||
|
503 | self._map.set_clean(filename, mode, size, mtime) | |||
|
504 | if mtime > self._lastnormaltime: | |||
|
505 | # Remember the most recent modification timeslot for status(), | |||
|
506 | # to make sure we won't miss future size-preserving file content | |||
|
507 | # modifications that happen within the same timeslot. | |||
|
508 | self._lastnormaltime = mtime | |||
512 |
|
509 | |||
513 | @requires_no_parents_change |
|
510 | @requires_no_parents_change | |
514 | def set_possibly_dirty(self, filename): |
|
511 | def set_possibly_dirty(self, filename): | |
515 | """record that the current state of the file on disk is unknown""" |
|
512 | """record that the current state of the file on disk is unknown""" | |
516 | self._dirty = True |
|
513 | self._dirty = True | |
517 | self._updatedfiles.add(filename) |
|
|||
518 | self._map.set_possibly_dirty(filename) |
|
514 | self._map.set_possibly_dirty(filename) | |
519 |
|
515 | |||
520 | @requires_parents_change |
|
516 | @requires_parents_change | |
@@ -539,35 +535,26 b' class dirstate(object):' | |||||
539 | wc_tracked = False |
|
535 | wc_tracked = False | |
540 | else: |
|
536 | else: | |
541 | wc_tracked = entry.tracked |
|
537 | wc_tracked = entry.tracked | |
542 | possibly_dirty = False |
|
538 | if not (p1_tracked or wc_tracked): | |
543 | if p1_tracked and wc_tracked: |
|
|||
544 | # the underlying reference might have changed, we will have to |
|
|||
545 | # check it. |
|
|||
546 | possibly_dirty = True |
|
|||
547 | elif not (p1_tracked or wc_tracked): |
|
|||
548 | # the file is no longer relevant to anyone |
|
539 | # the file is no longer relevant to anyone | |
549 |
self._ |
|
540 | if self._map.get(filename) is not None: | |
|
541 | self._map.reset_state(filename) | |||
|
542 | self._dirty = True | |||
550 | elif (not p1_tracked) and wc_tracked: |
|
543 | elif (not p1_tracked) and wc_tracked: | |
551 | if entry is not None and entry.added: |
|
544 | if entry is not None and entry.added: | |
552 | return # avoid dropping copy information (maybe?) |
|
545 | return # avoid dropping copy information (maybe?) | |
553 | elif p1_tracked and not wc_tracked: |
|
|||
554 | pass |
|
|||
555 | else: |
|
|||
556 | assert False, 'unreachable' |
|
|||
557 |
|
546 | |||
558 | # this mean we are doing call for file we do not really care about the |
|
|||
559 | # data (eg: added or removed), however this should be a minor overhead |
|
|||
560 | # compared to the overall update process calling this. |
|
|||
561 | parentfiledata = None |
|
547 | parentfiledata = None | |
562 | if wc_tracked: |
|
548 | if wc_tracked and p1_tracked: | |
563 | parentfiledata = self._get_filedata(filename) |
|
549 | parentfiledata = self._get_filedata(filename) | |
564 |
|
550 | |||
565 | self._updatedfiles.add(filename) |
|
|||
566 | self._map.reset_state( |
|
551 | self._map.reset_state( | |
567 | filename, |
|
552 | filename, | |
568 | wc_tracked, |
|
553 | wc_tracked, | |
569 | p1_tracked, |
|
554 | p1_tracked, | |
570 | possibly_dirty=possibly_dirty, |
|
555 | # the underlying reference might have changed, we will have to | |
|
556 | # check it. | |||
|
557 | has_meaningful_mtime=False, | |||
571 | parentfiledata=parentfiledata, |
|
558 | parentfiledata=parentfiledata, | |
572 | ) |
|
559 | ) | |
573 | if ( |
|
560 | if ( | |
@@ -585,10 +572,7 b' class dirstate(object):' | |||||
585 | filename, |
|
572 | filename, | |
586 | wc_tracked, |
|
573 | wc_tracked, | |
587 | p1_tracked, |
|
574 | p1_tracked, | |
588 |
p2_ |
|
575 | p2_info=False, | |
589 | merged=False, |
|
|||
590 | clean_p1=False, |
|
|||
591 | clean_p2=False, |
|
|||
592 | possibly_dirty=False, |
|
576 | possibly_dirty=False, | |
593 | parentfiledata=None, |
|
577 | parentfiledata=None, | |
594 | ): |
|
578 | ): | |
@@ -603,47 +587,26 b' class dirstate(object):' | |||||
603 | depending of what information ends up being relevant and useful to |
|
587 | depending of what information ends up being relevant and useful to | |
604 | other processing. |
|
588 | other processing. | |
605 | """ |
|
589 | """ | |
606 | if merged and (clean_p1 or clean_p2): |
|
|||
607 | msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`' |
|
|||
608 | raise error.ProgrammingError(msg) |
|
|||
609 |
|
590 | |||
610 | # note: I do not think we need to double check name clash here since we |
|
591 | # note: I do not think we need to double check name clash here since we | |
611 | # are in a update/merge case that should already have taken care of |
|
592 | # are in a update/merge case that should already have taken care of | |
612 | # this. The test agrees |
|
593 | # this. The test agrees | |
613 |
|
594 | |||
614 | self._dirty = True |
|
595 | self._dirty = True | |
615 | self._updatedfiles.add(filename) |
|
|||
616 |
|
596 | |||
617 | need_parent_file_data = ( |
|
597 | need_parent_file_data = ( | |
618 |
not |
|
598 | not possibly_dirty and not p2_info and wc_tracked and p1_tracked | |
619 | and wc_tracked |
|
|||
620 | and p1_tracked |
|
|||
621 | ) |
|
599 | ) | |
622 |
|
600 | |||
623 | # this mean we are doing call for file we do not really care about the |
|
601 | if need_parent_file_data and parentfiledata is None: | |
624 | # data (eg: added or removed), however this should be a minor overhead |
|
602 | parentfiledata = self._get_filedata(filename) | |
625 | # compared to the overall update process calling this. |
|
|||
626 | if need_parent_file_data: |
|
|||
627 | if parentfiledata is None: |
|
|||
628 | parentfiledata = self._get_filedata(filename) |
|
|||
629 | mtime = parentfiledata[2] |
|
|||
630 |
|
||||
631 | if mtime > self._lastnormaltime: |
|
|||
632 | # Remember the most recent modification timeslot for |
|
|||
633 | # status(), to make sure we won't miss future |
|
|||
634 | # size-preserving file content modifications that happen |
|
|||
635 | # within the same timeslot. |
|
|||
636 | self._lastnormaltime = mtime |
|
|||
637 |
|
603 | |||
638 | self._map.reset_state( |
|
604 | self._map.reset_state( | |
639 | filename, |
|
605 | filename, | |
640 | wc_tracked, |
|
606 | wc_tracked, | |
641 | p1_tracked, |
|
607 | p1_tracked, | |
642 |
p2_ |
|
608 | p2_info=p2_info, | |
643 | merged=merged, |
|
609 | has_meaningful_mtime=not possibly_dirty, | |
644 | clean_p1=clean_p1, |
|
|||
645 | clean_p2=clean_p2, |
|
|||
646 | possibly_dirty=possibly_dirty, |
|
|||
647 | parentfiledata=parentfiledata, |
|
610 | parentfiledata=parentfiledata, | |
648 | ) |
|
611 | ) | |
649 | if ( |
|
612 | if ( | |
@@ -655,263 +618,30 b' class dirstate(object):' | |||||
655 | # modifications that happen within the same timeslot. |
|
618 | # modifications that happen within the same timeslot. | |
656 | self._lastnormaltime = parentfiledata[2] |
|
619 | self._lastnormaltime = parentfiledata[2] | |
657 |
|
620 | |||
658 | def _addpath( |
|
621 | def _check_new_tracked_filename(self, filename): | |
659 | self, |
|
622 | scmutil.checkfilename(filename) | |
660 | f, |
|
623 | if self._map.hastrackeddir(filename): | |
661 | mode=0, |
|
624 | msg = _(b'directory %r already in dirstate') | |
662 | size=None, |
|
625 | msg %= pycompat.bytestr(filename) | |
663 | mtime=None, |
|
626 | raise error.Abort(msg) | |
664 | added=False, |
|
627 | # shadows | |
665 | merged=False, |
|
628 | for d in pathutil.finddirs(filename): | |
666 | from_p2=False, |
|
629 | if self._map.hastrackeddir(d): | |
667 | possibly_dirty=False, |
|
630 | break | |
668 | ): |
|
631 | entry = self._map.get(d) | |
669 | entry = self._map.get(f) |
|
632 | if entry is not None and not entry.removed: | |
670 | if added or entry is not None and entry.removed: |
|
633 | msg = _(b'file %r in dirstate clashes with %r') | |
671 | scmutil.checkfilename(f) |
|
634 | msg %= (pycompat.bytestr(d), pycompat.bytestr(filename)) | |
672 | if self._map.hastrackeddir(f): |
|
|||
673 | msg = _(b'directory %r already in dirstate') |
|
|||
674 | msg %= pycompat.bytestr(f) |
|
|||
675 | raise error.Abort(msg) |
|
635 | raise error.Abort(msg) | |
676 | # shadows |
|
|||
677 | for d in pathutil.finddirs(f): |
|
|||
678 | if self._map.hastrackeddir(d): |
|
|||
679 | break |
|
|||
680 | entry = self._map.get(d) |
|
|||
681 | if entry is not None and not entry.removed: |
|
|||
682 | msg = _(b'file %r in dirstate clashes with %r') |
|
|||
683 | msg %= (pycompat.bytestr(d), pycompat.bytestr(f)) |
|
|||
684 | raise error.Abort(msg) |
|
|||
685 | self._dirty = True |
|
|||
686 | self._updatedfiles.add(f) |
|
|||
687 | self._map.addfile( |
|
|||
688 | f, |
|
|||
689 | mode=mode, |
|
|||
690 | size=size, |
|
|||
691 | mtime=mtime, |
|
|||
692 | added=added, |
|
|||
693 | merged=merged, |
|
|||
694 | from_p2=from_p2, |
|
|||
695 | possibly_dirty=possibly_dirty, |
|
|||
696 | ) |
|
|||
697 |
|
636 | |||
698 | def _get_filedata(self, filename): |
|
637 | def _get_filedata(self, filename): | |
699 | """returns""" |
|
638 | """returns""" | |
700 | s = os.lstat(self._join(filename)) |
|
639 | s = os.lstat(self._join(filename)) | |
701 | mode = s.st_mode |
|
640 | mode = s.st_mode | |
702 | size = s.st_size |
|
641 | size = s.st_size | |
703 |
mtime = s |
|
642 | mtime = timestamp.mtime_of(s) | |
704 | return (mode, size, mtime) |
|
643 | return (mode, size, mtime) | |
705 |
|
644 | |||
706 | def normal(self, f, parentfiledata=None): |
|
|||
707 | """Mark a file normal and clean. |
|
|||
708 |
|
||||
709 | parentfiledata: (mode, size, mtime) of the clean file |
|
|||
710 |
|
||||
711 | parentfiledata should be computed from memory (for mode, |
|
|||
712 | size), as or close as possible from the point where we |
|
|||
713 | determined the file was clean, to limit the risk of the |
|
|||
714 | file having been changed by an external process between the |
|
|||
715 | moment where the file was determined to be clean and now.""" |
|
|||
716 | if self.pendingparentchange(): |
|
|||
717 | util.nouideprecwarn( |
|
|||
718 | b"do not use `normal` inside of update/merge context." |
|
|||
719 | b" Use `update_file` or `update_file_p1`", |
|
|||
720 | b'6.0', |
|
|||
721 | stacklevel=2, |
|
|||
722 | ) |
|
|||
723 | else: |
|
|||
724 | util.nouideprecwarn( |
|
|||
725 | b"do not use `normal` outside of update/merge context." |
|
|||
726 | b" Use `set_tracked`", |
|
|||
727 | b'6.0', |
|
|||
728 | stacklevel=2, |
|
|||
729 | ) |
|
|||
730 | self._normal(f, parentfiledata=parentfiledata) |
|
|||
731 |
|
||||
732 | def _normal(self, f, parentfiledata=None): |
|
|||
733 | if parentfiledata: |
|
|||
734 | (mode, size, mtime) = parentfiledata |
|
|||
735 | else: |
|
|||
736 | (mode, size, mtime) = self._get_filedata(f) |
|
|||
737 | self._addpath(f, mode=mode, size=size, mtime=mtime) |
|
|||
738 | self._map.copymap.pop(f, None) |
|
|||
739 | if f in self._map.nonnormalset: |
|
|||
740 | self._map.nonnormalset.remove(f) |
|
|||
741 | if mtime > self._lastnormaltime: |
|
|||
742 | # Remember the most recent modification timeslot for status(), |
|
|||
743 | # to make sure we won't miss future size-preserving file content |
|
|||
744 | # modifications that happen within the same timeslot. |
|
|||
745 | self._lastnormaltime = mtime |
|
|||
746 |
|
||||
747 | def normallookup(self, f): |
|
|||
748 | '''Mark a file normal, but possibly dirty.''' |
|
|||
749 | if self.pendingparentchange(): |
|
|||
750 | util.nouideprecwarn( |
|
|||
751 | b"do not use `normallookup` inside of update/merge context." |
|
|||
752 | b" Use `update_file` or `update_file_p1`", |
|
|||
753 | b'6.0', |
|
|||
754 | stacklevel=2, |
|
|||
755 | ) |
|
|||
756 | else: |
|
|||
757 | util.nouideprecwarn( |
|
|||
758 | b"do not use `normallookup` outside of update/merge context." |
|
|||
759 | b" Use `set_possibly_dirty` or `set_tracked`", |
|
|||
760 | b'6.0', |
|
|||
761 | stacklevel=2, |
|
|||
762 | ) |
|
|||
763 | self._normallookup(f) |
|
|||
764 |
|
||||
765 | def _normallookup(self, f): |
|
|||
766 | '''Mark a file normal, but possibly dirty.''' |
|
|||
767 | if self.in_merge: |
|
|||
768 | # if there is a merge going on and the file was either |
|
|||
769 | # "merged" or coming from other parent (-2) before |
|
|||
770 | # being removed, restore that state. |
|
|||
771 | entry = self._map.get(f) |
|
|||
772 | if entry is not None: |
|
|||
773 | # XXX this should probably be dealt with a a lower level |
|
|||
774 | # (see `merged_removed` and `from_p2_removed`) |
|
|||
775 | if entry.merged_removed or entry.from_p2_removed: |
|
|||
776 | source = self._map.copymap.get(f) |
|
|||
777 | if entry.merged_removed: |
|
|||
778 | self._merge(f) |
|
|||
779 | elif entry.from_p2_removed: |
|
|||
780 | self._otherparent(f) |
|
|||
781 | if source is not None: |
|
|||
782 | self.copy(source, f) |
|
|||
783 | return |
|
|||
784 | elif entry.merged or entry.from_p2: |
|
|||
785 | return |
|
|||
786 | self._addpath(f, possibly_dirty=True) |
|
|||
787 | self._map.copymap.pop(f, None) |
|
|||
788 |
|
||||
789 | def otherparent(self, f): |
|
|||
790 | '''Mark as coming from the other parent, always dirty.''' |
|
|||
791 | if self.pendingparentchange(): |
|
|||
792 | util.nouideprecwarn( |
|
|||
793 | b"do not use `otherparent` inside of update/merge context." |
|
|||
794 | b" Use `update_file` or `update_file_p1`", |
|
|||
795 | b'6.0', |
|
|||
796 | stacklevel=2, |
|
|||
797 | ) |
|
|||
798 | else: |
|
|||
799 | util.nouideprecwarn( |
|
|||
800 | b"do not use `otherparent` outside of update/merge context." |
|
|||
801 | b"It should have been set by the update/merge code", |
|
|||
802 | b'6.0', |
|
|||
803 | stacklevel=2, |
|
|||
804 | ) |
|
|||
805 | self._otherparent(f) |
|
|||
806 |
|
||||
807 | def _otherparent(self, f): |
|
|||
808 | if not self.in_merge: |
|
|||
809 | msg = _(b"setting %r to other parent only allowed in merges") % f |
|
|||
810 | raise error.Abort(msg) |
|
|||
811 | entry = self._map.get(f) |
|
|||
812 | if entry is not None and entry.tracked: |
|
|||
813 | # merge-like |
|
|||
814 | self._addpath(f, merged=True) |
|
|||
815 | else: |
|
|||
816 | # add-like |
|
|||
817 | self._addpath(f, from_p2=True) |
|
|||
818 | self._map.copymap.pop(f, None) |
|
|||
819 |
|
||||
820 | def add(self, f): |
|
|||
821 | '''Mark a file added.''' |
|
|||
822 | if self.pendingparentchange(): |
|
|||
823 | util.nouideprecwarn( |
|
|||
824 | b"do not use `add` inside of update/merge context." |
|
|||
825 | b" Use `update_file`", |
|
|||
826 | b'6.0', |
|
|||
827 | stacklevel=2, |
|
|||
828 | ) |
|
|||
829 | else: |
|
|||
830 | util.nouideprecwarn( |
|
|||
831 | b"do not use `add` outside of update/merge context." |
|
|||
832 | b" Use `set_tracked`", |
|
|||
833 | b'6.0', |
|
|||
834 | stacklevel=2, |
|
|||
835 | ) |
|
|||
836 | self._add(f) |
|
|||
837 |
|
||||
838 | def _add(self, filename): |
|
|||
839 | """internal function to mark a file as added""" |
|
|||
840 | self._addpath(filename, added=True) |
|
|||
841 | self._map.copymap.pop(filename, None) |
|
|||
842 |
|
||||
843 | def remove(self, f): |
|
|||
844 | '''Mark a file removed''' |
|
|||
845 | if self.pendingparentchange(): |
|
|||
846 | util.nouideprecwarn( |
|
|||
847 | b"do not use `remove` insde of update/merge context." |
|
|||
848 | b" Use `update_file` or `update_file_p1`", |
|
|||
849 | b'6.0', |
|
|||
850 | stacklevel=2, |
|
|||
851 | ) |
|
|||
852 | else: |
|
|||
853 | util.nouideprecwarn( |
|
|||
854 | b"do not use `remove` outside of update/merge context." |
|
|||
855 | b" Use `set_untracked`", |
|
|||
856 | b'6.0', |
|
|||
857 | stacklevel=2, |
|
|||
858 | ) |
|
|||
859 | self._remove(f) |
|
|||
860 |
|
||||
861 | def _remove(self, filename): |
|
|||
862 | """internal function to mark a file removed""" |
|
|||
863 | self._dirty = True |
|
|||
864 | self._updatedfiles.add(filename) |
|
|||
865 | self._map.removefile(filename, in_merge=self.in_merge) |
|
|||
866 |
|
||||
867 | def merge(self, f): |
|
|||
868 | '''Mark a file merged.''' |
|
|||
869 | if self.pendingparentchange(): |
|
|||
870 | util.nouideprecwarn( |
|
|||
871 | b"do not use `merge` inside of update/merge context." |
|
|||
872 | b" Use `update_file`", |
|
|||
873 | b'6.0', |
|
|||
874 | stacklevel=2, |
|
|||
875 | ) |
|
|||
876 | else: |
|
|||
877 | util.nouideprecwarn( |
|
|||
878 | b"do not use `merge` outside of update/merge context." |
|
|||
879 | b"It should have been set by the update/merge code", |
|
|||
880 | b'6.0', |
|
|||
881 | stacklevel=2, |
|
|||
882 | ) |
|
|||
883 | self._merge(f) |
|
|||
884 |
|
||||
885 | def _merge(self, f): |
|
|||
886 | if not self.in_merge: |
|
|||
887 | return self._normallookup(f) |
|
|||
888 | return self._otherparent(f) |
|
|||
889 |
|
||||
890 | def drop(self, f): |
|
|||
891 | '''Drop a file from the dirstate''' |
|
|||
892 | if self.pendingparentchange(): |
|
|||
893 | util.nouideprecwarn( |
|
|||
894 | b"do not use `drop` inside of update/merge context." |
|
|||
895 | b" Use `update_file`", |
|
|||
896 | b'6.0', |
|
|||
897 | stacklevel=2, |
|
|||
898 | ) |
|
|||
899 | else: |
|
|||
900 | util.nouideprecwarn( |
|
|||
901 | b"do not use `drop` outside of update/merge context." |
|
|||
902 | b" Use `set_untracked`", |
|
|||
903 | b'6.0', |
|
|||
904 | stacklevel=2, |
|
|||
905 | ) |
|
|||
906 | self._drop(f) |
|
|||
907 |
|
||||
908 | def _drop(self, filename): |
|
|||
909 | """internal function to drop a file from the dirstate""" |
|
|||
910 | if self._map.dropfile(filename): |
|
|||
911 | self._dirty = True |
|
|||
912 | self._updatedfiles.add(filename) |
|
|||
913 | self._map.copymap.pop(filename, None) |
|
|||
914 |
|
||||
915 | def _discoverpath(self, path, normed, ignoremissing, exists, storemap): |
|
645 | def _discoverpath(self, path, normed, ignoremissing, exists, storemap): | |
916 | if exists is None: |
|
646 | if exists is None: | |
917 | exists = os.path.lexists(os.path.join(self._root, path)) |
|
647 | exists = os.path.lexists(os.path.join(self._root, path)) | |
@@ -990,8 +720,7 b' class dirstate(object):' | |||||
990 |
|
720 | |||
991 | def clear(self): |
|
721 | def clear(self): | |
992 | self._map.clear() |
|
722 | self._map.clear() | |
993 |
self._lastnormaltime = |
|
723 | self._lastnormaltime = timestamp.zero() | |
994 | self._updatedfiles.clear() |
|
|||
995 | self._dirty = True |
|
724 | self._dirty = True | |
996 |
|
725 | |||
997 | def rebuild(self, parent, allfiles, changedfiles=None): |
|
726 | def rebuild(self, parent, allfiles, changedfiles=None): | |
@@ -1022,9 +751,17 b' class dirstate(object):' | |||||
1022 | self._map.setparents(parent, self._nodeconstants.nullid) |
|
751 | self._map.setparents(parent, self._nodeconstants.nullid) | |
1023 |
|
752 | |||
1024 | for f in to_lookup: |
|
753 | for f in to_lookup: | |
1025 | self._normallookup(f) |
|
754 | ||
|
755 | if self.in_merge: | |||
|
756 | self.set_tracked(f) | |||
|
757 | else: | |||
|
758 | self._map.reset_state( | |||
|
759 | f, | |||
|
760 | wc_tracked=True, | |||
|
761 | p1_tracked=True, | |||
|
762 | ) | |||
1026 | for f in to_drop: |
|
763 | for f in to_drop: | |
1027 |
self._ |
|
764 | self._map.reset_state(f) | |
1028 |
|
765 | |||
1029 | self._dirty = True |
|
766 | self._dirty = True | |
1030 |
|
767 | |||
@@ -1048,19 +785,14 b' class dirstate(object):' | |||||
1048 | # See also the wiki page below for detail: |
|
785 | # See also the wiki page below for detail: | |
1049 | # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan |
|
786 | # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan | |
1050 |
|
787 | |||
1051 | # emulate dropping timestamp in 'parsers.pack_dirstate' |
|
788 | # record when mtime start to be ambiguous | |
1052 | now = _getfsnow(self._opener) |
|
789 | now = _getfsnow(self._opener) | |
1053 | self._map.clearambiguoustimes(self._updatedfiles, now) |
|
|||
1054 |
|
||||
1055 | # emulate that all 'dirstate.normal' results are written out |
|
|||
1056 | self._lastnormaltime = 0 |
|
|||
1057 | self._updatedfiles.clear() |
|
|||
1058 |
|
790 | |||
1059 | # delay writing in-memory changes out |
|
791 | # delay writing in-memory changes out | |
1060 | tr.addfilegenerator( |
|
792 | tr.addfilegenerator( | |
1061 | b'dirstate', |
|
793 | b'dirstate', | |
1062 | (self._filename,), |
|
794 | (self._filename,), | |
1063 | lambda f: self._writedirstate(tr, f), |
|
795 | lambda f: self._writedirstate(tr, f, now=now), | |
1064 | location=b'plain', |
|
796 | location=b'plain', | |
1065 | ) |
|
797 | ) | |
1066 | return |
|
798 | return | |
@@ -1079,7 +811,7 b' class dirstate(object):' | |||||
1079 | """ |
|
811 | """ | |
1080 | self._plchangecallbacks[category] = callback |
|
812 | self._plchangecallbacks[category] = callback | |
1081 |
|
813 | |||
1082 | def _writedirstate(self, tr, st): |
|
814 | def _writedirstate(self, tr, st, now=None): | |
1083 | # notify callbacks about parents change |
|
815 | # notify callbacks about parents change | |
1084 | if self._origpl is not None and self._origpl != self._pl: |
|
816 | if self._origpl is not None and self._origpl != self._pl: | |
1085 | for c, callback in sorted( |
|
817 | for c, callback in sorted( | |
@@ -1087,9 +819,11 b' class dirstate(object):' | |||||
1087 | ): |
|
819 | ): | |
1088 | callback(self, self._origpl, self._pl) |
|
820 | callback(self, self._origpl, self._pl) | |
1089 | self._origpl = None |
|
821 | self._origpl = None | |
1090 | # use the modification time of the newly created temporary file as the |
|
822 | ||
1091 | # filesystem's notion of 'now' |
|
823 | if now is None: | |
1092 | now = util.fstat(st)[stat.ST_MTIME] & _rangemask |
|
824 | # use the modification time of the newly created temporary file as the | |
|
825 | # filesystem's notion of 'now' | |||
|
826 | now = timestamp.mtime_of(util.fstat(st)) | |||
1093 |
|
827 | |||
1094 | # enough 'delaywrite' prevents 'pack_dirstate' from dropping |
|
828 | # enough 'delaywrite' prevents 'pack_dirstate' from dropping | |
1095 | # timestamp of each entries in dirstate, because of 'now > mtime' |
|
829 | # timestamp of each entries in dirstate, because of 'now > mtime' | |
@@ -1106,11 +840,12 b' class dirstate(object):' | |||||
1106 | start = int(clock) - (int(clock) % delaywrite) |
|
840 | start = int(clock) - (int(clock) % delaywrite) | |
1107 | end = start + delaywrite |
|
841 | end = start + delaywrite | |
1108 | time.sleep(end - clock) |
|
842 | time.sleep(end - clock) | |
1109 |
|
|
843 | # trust our estimate that the end is near now | |
|
844 | now = timestamp.timestamp((end, 0)) | |||
1110 | break |
|
845 | break | |
1111 |
|
846 | |||
1112 | self._map.write(tr, st, now) |
|
847 | self._map.write(tr, st, now) | |
1113 |
self._lastnormaltime = |
|
848 | self._lastnormaltime = timestamp.zero() | |
1114 | self._dirty = False |
|
849 | self._dirty = False | |
1115 |
|
850 | |||
1116 | def _dirignore(self, f): |
|
851 | def _dirignore(self, f): | |
@@ -1503,7 +1238,7 b' class dirstate(object):' | |||||
1503 | traversed, |
|
1238 | traversed, | |
1504 | dirty, |
|
1239 | dirty, | |
1505 | ) = rustmod.status( |
|
1240 | ) = rustmod.status( | |
1506 |
self._map._ |
|
1241 | self._map._map, | |
1507 | matcher, |
|
1242 | matcher, | |
1508 | self._rootdir, |
|
1243 | self._rootdir, | |
1509 | self._ignorefiles(), |
|
1244 | self._ignorefiles(), | |
@@ -1624,6 +1359,7 b' class dirstate(object):' | |||||
1624 | mexact = match.exact |
|
1359 | mexact = match.exact | |
1625 | dirignore = self._dirignore |
|
1360 | dirignore = self._dirignore | |
1626 | checkexec = self._checkexec |
|
1361 | checkexec = self._checkexec | |
|
1362 | checklink = self._checklink | |||
1627 | copymap = self._map.copymap |
|
1363 | copymap = self._map.copymap | |
1628 | lastnormaltime = self._lastnormaltime |
|
1364 | lastnormaltime = self._lastnormaltime | |
1629 |
|
1365 | |||
@@ -1643,34 +1379,35 b' class dirstate(object):' | |||||
1643 | uadd(fn) |
|
1379 | uadd(fn) | |
1644 | continue |
|
1380 | continue | |
1645 |
|
1381 | |||
1646 | # This is equivalent to 'state, mode, size, time = dmap[fn]' but not |
|
|||
1647 | # written like that for performance reasons. dmap[fn] is not a |
|
|||
1648 | # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE |
|
|||
1649 | # opcode has fast paths when the value to be unpacked is a tuple or |
|
|||
1650 | # a list, but falls back to creating a full-fledged iterator in |
|
|||
1651 | # general. That is much slower than simply accessing and storing the |
|
|||
1652 | # tuple members one by one. |
|
|||
1653 | t = dget(fn) |
|
1382 | t = dget(fn) | |
1654 | mode = t.mode |
|
1383 | mode = t.mode | |
1655 | size = t.size |
|
1384 | size = t.size | |
1656 | time = t.mtime |
|
|||
1657 |
|
1385 | |||
1658 | if not st and t.tracked: |
|
1386 | if not st and t.tracked: | |
1659 | dadd(fn) |
|
1387 | dadd(fn) | |
1660 |
elif t. |
|
1388 | elif t.p2_info: | |
1661 | madd(fn) |
|
1389 | madd(fn) | |
1662 | elif t.added: |
|
1390 | elif t.added: | |
1663 | aadd(fn) |
|
1391 | aadd(fn) | |
1664 | elif t.removed: |
|
1392 | elif t.removed: | |
1665 | radd(fn) |
|
1393 | radd(fn) | |
1666 | elif t.tracked: |
|
1394 | elif t.tracked: | |
1667 | if ( |
|
1395 | if not checklink and t.has_fallback_symlink: | |
|
1396 | # If the file system does not support symlink, the mode | |||
|
1397 | # might not be correctly stored in the dirstate, so do not | |||
|
1398 | # trust it. | |||
|
1399 | ladd(fn) | |||
|
1400 | elif not checkexec and t.has_fallback_exec: | |||
|
1401 | # If the file system does not support exec bits, the mode | |||
|
1402 | # might not be correctly stored in the dirstate, so do not | |||
|
1403 | # trust it. | |||
|
1404 | ladd(fn) | |||
|
1405 | elif ( | |||
1668 | size >= 0 |
|
1406 | size >= 0 | |
1669 | and ( |
|
1407 | and ( | |
1670 | (size != st.st_size and size != st.st_size & _rangemask) |
|
1408 | (size != st.st_size and size != st.st_size & _rangemask) | |
1671 | or ((mode ^ st.st_mode) & 0o100 and checkexec) |
|
1409 | or ((mode ^ st.st_mode) & 0o100 and checkexec) | |
1672 | ) |
|
1410 | ) | |
1673 | or t.from_p2 |
|
|||
1674 | or fn in copymap |
|
1411 | or fn in copymap | |
1675 | ): |
|
1412 | ): | |
1676 | if stat.S_ISLNK(st.st_mode) and size != st.st_size: |
|
1413 | if stat.S_ISLNK(st.st_mode) and size != st.st_size: | |
@@ -1679,12 +1416,9 b' class dirstate(object):' | |||||
1679 | ladd(fn) |
|
1416 | ladd(fn) | |
1680 | else: |
|
1417 | else: | |
1681 | madd(fn) |
|
1418 | madd(fn) | |
1682 | elif ( |
|
1419 | elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)): | |
1683 | time != st[stat.ST_MTIME] |
|
|||
1684 | and time != st[stat.ST_MTIME] & _rangemask |
|
|||
1685 | ): |
|
|||
1686 | ladd(fn) |
|
1420 | ladd(fn) | |
1687 |
elif st |
|
1421 | elif timestamp.mtime_of(st) == lastnormaltime: | |
1688 | # fn may have just been marked as normal and it may have |
|
1422 | # fn may have just been marked as normal and it may have | |
1689 | # changed in the same second without changing its size. |
|
1423 | # changed in the same second without changing its size. | |
1690 | # This can happen if we quickly do multiple commits. |
|
1424 | # This can happen if we quickly do multiple commits. | |
@@ -1703,7 +1437,7 b' class dirstate(object):' | |||||
1703 | """ |
|
1437 | """ | |
1704 | dmap = self._map |
|
1438 | dmap = self._map | |
1705 | if rustmod is not None: |
|
1439 | if rustmod is not None: | |
1706 |
dmap = self._map._ |
|
1440 | dmap = self._map._map | |
1707 |
|
1441 | |||
1708 | if match.always(): |
|
1442 | if match.always(): | |
1709 | return dmap.keys() |
|
1443 | return dmap.keys() | |
@@ -1778,3 +1512,22 b' class dirstate(object):' | |||||
1778 | def clearbackup(self, tr, backupname): |
|
1512 | def clearbackup(self, tr, backupname): | |
1779 | '''Clear backup file''' |
|
1513 | '''Clear backup file''' | |
1780 | self._opener.unlink(backupname) |
|
1514 | self._opener.unlink(backupname) | |
|
1515 | ||||
|
1516 | def verify(self, m1, m2): | |||
|
1517 | """check the dirstate content again the parent manifest and yield errors""" | |||
|
1518 | missing_from_p1 = b"%s in state %s, but not in manifest1\n" | |||
|
1519 | unexpected_in_p1 = b"%s in state %s, but also in manifest1\n" | |||
|
1520 | missing_from_ps = b"%s in state %s, but not in either manifest\n" | |||
|
1521 | missing_from_ds = b"%s in manifest1, but listed as state %s\n" | |||
|
1522 | for f, entry in self.items(): | |||
|
1523 | state = entry.state | |||
|
1524 | if state in b"nr" and f not in m1: | |||
|
1525 | yield (missing_from_p1, f, state) | |||
|
1526 | if state in b"a" and f in m1: | |||
|
1527 | yield (unexpected_in_p1, f, state) | |||
|
1528 | if state in b"m" and f not in m1 and f not in m2: | |||
|
1529 | yield (missing_from_ps, f, state) | |||
|
1530 | for f in m1: | |||
|
1531 | state = self.get_entry(f).state | |||
|
1532 | if state not in b"nrm": | |||
|
1533 | yield (missing_from_ds, f, state) |
This diff has been collapsed as it changes many lines, (1173 lines changed) Show them Hide them | |||||
@@ -20,6 +20,7 b' from . import (' | |||||
20 |
|
20 | |||
21 | from .dirstateutils import ( |
|
21 | from .dirstateutils import ( | |
22 | docket as docketmod, |
|
22 | docket as docketmod, | |
|
23 | v2, | |||
23 | ) |
|
24 | ) | |
24 |
|
25 | |||
25 | parsers = policy.importmod('parsers') |
|
26 | parsers = policy.importmod('parsers') | |
@@ -27,22 +28,276 b" rustmod = policy.importrust('dirstate')" | |||||
27 |
|
28 | |||
28 | propertycache = util.propertycache |
|
29 | propertycache = util.propertycache | |
29 |
|
30 | |||
30 | DirstateItem = parsers.DirstateItem |
|
31 | if rustmod is None: | |
31 |
|
32 | DirstateItem = parsers.DirstateItem | ||
32 |
|
33 | else: | ||
33 | # a special value used internally for `size` if the file come from the other parent |
|
34 | DirstateItem = rustmod.DirstateItem | |
34 | FROM_P2 = -2 |
|
|||
35 |
|
||||
36 | # a special value used internally for `size` if the file is modified/merged/added |
|
|||
37 | NONNORMAL = -1 |
|
|||
38 |
|
||||
39 | # a special value used internally for `time` if the time is ambigeous |
|
|||
40 | AMBIGUOUS_TIME = -1 |
|
|||
41 |
|
35 | |||
42 | rangemask = 0x7FFFFFFF |
|
36 | rangemask = 0x7FFFFFFF | |
43 |
|
37 | |||
44 |
|
38 | |||
45 | class dirstatemap(object): |
|
39 | class _dirstatemapcommon(object): | |
|
40 | """ | |||
|
41 | Methods that are identical for both implementations of the dirstatemap | |||
|
42 | class, with and without Rust extensions enabled. | |||
|
43 | """ | |||
|
44 | ||||
|
45 | # please pytype | |||
|
46 | ||||
|
47 | _map = None | |||
|
48 | copymap = None | |||
|
49 | ||||
|
50 | def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2): | |||
|
51 | self._use_dirstate_v2 = use_dirstate_v2 | |||
|
52 | self._nodeconstants = nodeconstants | |||
|
53 | self._ui = ui | |||
|
54 | self._opener = opener | |||
|
55 | self._root = root | |||
|
56 | self._filename = b'dirstate' | |||
|
57 | self._nodelen = 20 # Also update Rust code when changing this! | |||
|
58 | self._parents = None | |||
|
59 | self._dirtyparents = False | |||
|
60 | self._docket = None | |||
|
61 | ||||
|
62 | # for consistent view between _pl() and _read() invocations | |||
|
63 | self._pendingmode = None | |||
|
64 | ||||
|
65 | def preload(self): | |||
|
66 | """Loads the underlying data, if it's not already loaded""" | |||
|
67 | self._map | |||
|
68 | ||||
|
69 | def get(self, key, default=None): | |||
|
70 | return self._map.get(key, default) | |||
|
71 | ||||
|
72 | def __len__(self): | |||
|
73 | return len(self._map) | |||
|
74 | ||||
|
75 | def __iter__(self): | |||
|
76 | return iter(self._map) | |||
|
77 | ||||
|
78 | def __contains__(self, key): | |||
|
79 | return key in self._map | |||
|
80 | ||||
|
81 | def __getitem__(self, item): | |||
|
82 | return self._map[item] | |||
|
83 | ||||
|
84 | ### sub-class utility method | |||
|
85 | # | |||
|
86 | # Use to allow for generic implementation of some method while still coping | |||
|
87 | # with minor difference between implementation. | |||
|
88 | ||||
|
89 | def _dirs_incr(self, filename, old_entry=None): | |||
|
90 | """incremente the dirstate counter if applicable | |||
|
91 | ||||
|
92 | This might be a no-op for some subclass who deal with directory | |||
|
93 | tracking in a different way. | |||
|
94 | """ | |||
|
95 | ||||
|
96 | def _dirs_decr(self, filename, old_entry=None, remove_variant=False): | |||
|
97 | """decremente the dirstate counter if applicable | |||
|
98 | ||||
|
99 | This might be a no-op for some subclass who deal with directory | |||
|
100 | tracking in a different way. | |||
|
101 | """ | |||
|
102 | ||||
|
103 | def _refresh_entry(self, f, entry): | |||
|
104 | """record updated state of an entry""" | |||
|
105 | ||||
|
106 | def _insert_entry(self, f, entry): | |||
|
107 | """add a new dirstate entry (or replace an unrelated one) | |||
|
108 | ||||
|
109 | The fact it is actually new is the responsability of the caller | |||
|
110 | """ | |||
|
111 | ||||
|
112 | def _drop_entry(self, f): | |||
|
113 | """remove any entry for file f | |||
|
114 | ||||
|
115 | This should also drop associated copy information | |||
|
116 | ||||
|
117 | The fact we actually need to drop it is the responsability of the caller""" | |||
|
118 | ||||
|
119 | ### method to manipulate the entries | |||
|
120 | ||||
|
121 | def set_possibly_dirty(self, filename): | |||
|
122 | """record that the current state of the file on disk is unknown""" | |||
|
123 | entry = self[filename] | |||
|
124 | entry.set_possibly_dirty() | |||
|
125 | self._refresh_entry(filename, entry) | |||
|
126 | ||||
|
127 | def set_clean(self, filename, mode, size, mtime): | |||
|
128 | """mark a file as back to a clean state""" | |||
|
129 | entry = self[filename] | |||
|
130 | size = size & rangemask | |||
|
131 | entry.set_clean(mode, size, mtime) | |||
|
132 | self._refresh_entry(filename, entry) | |||
|
133 | self.copymap.pop(filename, None) | |||
|
134 | ||||
|
135 | def set_tracked(self, filename): | |||
|
136 | new = False | |||
|
137 | entry = self.get(filename) | |||
|
138 | if entry is None: | |||
|
139 | self._dirs_incr(filename) | |||
|
140 | entry = DirstateItem( | |||
|
141 | wc_tracked=True, | |||
|
142 | ) | |||
|
143 | ||||
|
144 | self._insert_entry(filename, entry) | |||
|
145 | new = True | |||
|
146 | elif not entry.tracked: | |||
|
147 | self._dirs_incr(filename, entry) | |||
|
148 | entry.set_tracked() | |||
|
149 | self._refresh_entry(filename, entry) | |||
|
150 | new = True | |||
|
151 | else: | |||
|
152 | # XXX This is probably overkill for more case, but we need this to | |||
|
153 | # fully replace the `normallookup` call with `set_tracked` one. | |||
|
154 | # Consider smoothing this in the future. | |||
|
155 | entry.set_possibly_dirty() | |||
|
156 | self._refresh_entry(filename, entry) | |||
|
157 | return new | |||
|
158 | ||||
|
159 | def set_untracked(self, f): | |||
|
160 | """Mark a file as no longer tracked in the dirstate map""" | |||
|
161 | entry = self.get(f) | |||
|
162 | if entry is None: | |||
|
163 | return False | |||
|
164 | else: | |||
|
165 | self._dirs_decr(f, old_entry=entry, remove_variant=not entry.added) | |||
|
166 | if not entry.p2_info: | |||
|
167 | self.copymap.pop(f, None) | |||
|
168 | entry.set_untracked() | |||
|
169 | self._refresh_entry(f, entry) | |||
|
170 | return True | |||
|
171 | ||||
|
172 | def reset_state( | |||
|
173 | self, | |||
|
174 | filename, | |||
|
175 | wc_tracked=False, | |||
|
176 | p1_tracked=False, | |||
|
177 | p2_info=False, | |||
|
178 | has_meaningful_mtime=True, | |||
|
179 | has_meaningful_data=True, | |||
|
180 | parentfiledata=None, | |||
|
181 | ): | |||
|
182 | """Set a entry to a given state, diregarding all previous state | |||
|
183 | ||||
|
184 | This is to be used by the part of the dirstate API dedicated to | |||
|
185 | adjusting the dirstate after a update/merge. | |||
|
186 | ||||
|
187 | note: calling this might result to no entry existing at all if the | |||
|
188 | dirstate map does not see any point at having one for this file | |||
|
189 | anymore. | |||
|
190 | """ | |||
|
191 | # copy information are now outdated | |||
|
192 | # (maybe new information should be in directly passed to this function) | |||
|
193 | self.copymap.pop(filename, None) | |||
|
194 | ||||
|
195 | if not (p1_tracked or p2_info or wc_tracked): | |||
|
196 | old_entry = self._map.get(filename) | |||
|
197 | self._drop_entry(filename) | |||
|
198 | self._dirs_decr(filename, old_entry=old_entry) | |||
|
199 | return | |||
|
200 | ||||
|
201 | old_entry = self._map.get(filename) | |||
|
202 | self._dirs_incr(filename, old_entry) | |||
|
203 | entry = DirstateItem( | |||
|
204 | wc_tracked=wc_tracked, | |||
|
205 | p1_tracked=p1_tracked, | |||
|
206 | p2_info=p2_info, | |||
|
207 | has_meaningful_mtime=has_meaningful_mtime, | |||
|
208 | parentfiledata=parentfiledata, | |||
|
209 | ) | |||
|
210 | self._insert_entry(filename, entry) | |||
|
211 | ||||
|
212 | ### disk interaction | |||
|
213 | ||||
|
214 | def _opendirstatefile(self): | |||
|
215 | fp, mode = txnutil.trypending(self._root, self._opener, self._filename) | |||
|
216 | if self._pendingmode is not None and self._pendingmode != mode: | |||
|
217 | fp.close() | |||
|
218 | raise error.Abort( | |||
|
219 | _(b'working directory state may be changed parallelly') | |||
|
220 | ) | |||
|
221 | self._pendingmode = mode | |||
|
222 | return fp | |||
|
223 | ||||
|
224 | def _readdirstatefile(self, size=-1): | |||
|
225 | try: | |||
|
226 | with self._opendirstatefile() as fp: | |||
|
227 | return fp.read(size) | |||
|
228 | except IOError as err: | |||
|
229 | if err.errno != errno.ENOENT: | |||
|
230 | raise | |||
|
231 | # File doesn't exist, so the current state is empty | |||
|
232 | return b'' | |||
|
233 | ||||
|
234 | @property | |||
|
235 | def docket(self): | |||
|
236 | if not self._docket: | |||
|
237 | if not self._use_dirstate_v2: | |||
|
238 | raise error.ProgrammingError( | |||
|
239 | b'dirstate only has a docket in v2 format' | |||
|
240 | ) | |||
|
241 | self._docket = docketmod.DirstateDocket.parse( | |||
|
242 | self._readdirstatefile(), self._nodeconstants | |||
|
243 | ) | |||
|
244 | return self._docket | |||
|
245 | ||||
|
246 | def write_v2_no_append(self, tr, st, meta, packed): | |||
|
247 | old_docket = self.docket | |||
|
248 | new_docket = docketmod.DirstateDocket.with_new_uuid( | |||
|
249 | self.parents(), len(packed), meta | |||
|
250 | ) | |||
|
251 | data_filename = new_docket.data_filename() | |||
|
252 | if tr: | |||
|
253 | tr.add(data_filename, 0) | |||
|
254 | self._opener.write(data_filename, packed) | |||
|
255 | # Write the new docket after the new data file has been | |||
|
256 | # written. Because `st` was opened with `atomictemp=True`, | |||
|
257 | # the actual `.hg/dirstate` file is only affected on close. | |||
|
258 | st.write(new_docket.serialize()) | |||
|
259 | st.close() | |||
|
260 | # Remove the old data file after the new docket pointing to | |||
|
261 | # the new data file was written. | |||
|
262 | if old_docket.uuid: | |||
|
263 | data_filename = old_docket.data_filename() | |||
|
264 | unlink = lambda _tr=None: self._opener.unlink(data_filename) | |||
|
265 | if tr: | |||
|
266 | category = b"dirstate-v2-clean-" + old_docket.uuid | |||
|
267 | tr.addpostclose(category, unlink) | |||
|
268 | else: | |||
|
269 | unlink() | |||
|
270 | self._docket = new_docket | |||
|
271 | ||||
|
272 | ### reading/setting parents | |||
|
273 | ||||
|
274 | def parents(self): | |||
|
275 | if not self._parents: | |||
|
276 | if self._use_dirstate_v2: | |||
|
277 | self._parents = self.docket.parents | |||
|
278 | else: | |||
|
279 | read_len = self._nodelen * 2 | |||
|
280 | st = self._readdirstatefile(read_len) | |||
|
281 | l = len(st) | |||
|
282 | if l == read_len: | |||
|
283 | self._parents = ( | |||
|
284 | st[: self._nodelen], | |||
|
285 | st[self._nodelen : 2 * self._nodelen], | |||
|
286 | ) | |||
|
287 | elif l == 0: | |||
|
288 | self._parents = ( | |||
|
289 | self._nodeconstants.nullid, | |||
|
290 | self._nodeconstants.nullid, | |||
|
291 | ) | |||
|
292 | else: | |||
|
293 | raise error.Abort( | |||
|
294 | _(b'working directory state appears damaged!') | |||
|
295 | ) | |||
|
296 | ||||
|
297 | return self._parents | |||
|
298 | ||||
|
299 | ||||
|
300 | class dirstatemap(_dirstatemapcommon): | |||
46 | """Map encapsulating the dirstate's contents. |
|
301 | """Map encapsulating the dirstate's contents. | |
47 |
|
302 | |||
48 | The dirstate contains the following state: |
|
303 | The dirstate contains the following state: | |
@@ -56,19 +311,19 b' class dirstatemap(object):' | |||||
56 | - the state map maps filenames to tuples of (state, mode, size, mtime), |
|
311 | - the state map maps filenames to tuples of (state, mode, size, mtime), | |
57 | where state is a single character representing 'normal', 'added', |
|
312 | where state is a single character representing 'normal', 'added', | |
58 | 'removed', or 'merged'. It is read by treating the dirstate as a |
|
313 | 'removed', or 'merged'. It is read by treating the dirstate as a | |
59 |
dict. File state is updated by calling |
|
314 | dict. File state is updated by calling various methods (see each | |
60 | `dropfile` methods. |
|
315 | documentation for details): | |
|
316 | ||||
|
317 | - `reset_state`, | |||
|
318 | - `set_tracked` | |||
|
319 | - `set_untracked` | |||
|
320 | - `set_clean` | |||
|
321 | - `set_possibly_dirty` | |||
61 |
|
322 | |||
62 | - `copymap` maps destination filenames to their source filename. |
|
323 | - `copymap` maps destination filenames to their source filename. | |
63 |
|
324 | |||
64 | The dirstate also provides the following views onto the state: |
|
325 | The dirstate also provides the following views onto the state: | |
65 |
|
326 | |||
66 | - `nonnormalset` is a set of the filenames that have state other |
|
|||
67 | than 'normal', or are normal but have an mtime of -1 ('normallookup'). |
|
|||
68 |
|
||||
69 | - `otherparentset` is a set of the filenames that are marked as coming |
|
|||
70 | from the second parent when the dirstate is currently being merged. |
|
|||
71 |
|
||||
72 | - `filefoldmap` is a dict mapping normalized filenames to the denormalized |
|
327 | - `filefoldmap` is a dict mapping normalized filenames to the denormalized | |
73 | form that they appear as in the dirstate. |
|
328 | form that they appear as in the dirstate. | |
74 |
|
329 | |||
@@ -76,22 +331,7 b' class dirstatemap(object):' | |||||
76 | denormalized form that they appear as in the dirstate. |
|
331 | denormalized form that they appear as in the dirstate. | |
77 | """ |
|
332 | """ | |
78 |
|
333 | |||
79 | def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2): |
|
334 | ### Core data storage and access | |
80 | self._ui = ui |
|
|||
81 | self._opener = opener |
|
|||
82 | self._root = root |
|
|||
83 | self._filename = b'dirstate' |
|
|||
84 | self._nodelen = 20 |
|
|||
85 | self._nodeconstants = nodeconstants |
|
|||
86 | assert ( |
|
|||
87 | not use_dirstate_v2 |
|
|||
88 | ), "should have detected unsupported requirement" |
|
|||
89 |
|
||||
90 | self._parents = None |
|
|||
91 | self._dirtyparents = False |
|
|||
92 |
|
||||
93 | # for consistent view between _pl() and _read() invocations |
|
|||
94 | self._pendingmode = None |
|
|||
95 |
|
335 | |||
96 | @propertycache |
|
336 | @propertycache | |
97 | def _map(self): |
|
337 | def _map(self): | |
@@ -113,8 +353,6 b' class dirstatemap(object):' | |||||
113 | util.clearcachedproperty(self, b"_alldirs") |
|
353 | util.clearcachedproperty(self, b"_alldirs") | |
114 | util.clearcachedproperty(self, b"filefoldmap") |
|
354 | util.clearcachedproperty(self, b"filefoldmap") | |
115 | util.clearcachedproperty(self, b"dirfoldmap") |
|
355 | util.clearcachedproperty(self, b"dirfoldmap") | |
116 | util.clearcachedproperty(self, b"nonnormalset") |
|
|||
117 | util.clearcachedproperty(self, b"otherparentset") |
|
|||
118 |
|
356 | |||
119 | def items(self): |
|
357 | def items(self): | |
120 | return pycompat.iteritems(self._map) |
|
358 | return pycompat.iteritems(self._map) | |
@@ -122,29 +360,109 b' class dirstatemap(object):' | |||||
122 | # forward for python2,3 compat |
|
360 | # forward for python2,3 compat | |
123 | iteritems = items |
|
361 | iteritems = items | |
124 |
|
362 | |||
125 | debug_iter = items |
|
363 | def debug_iter(self, all): | |
126 |
|
364 | """ | ||
127 | def __len__(self): |
|
365 | Return an iterator of (filename, state, mode, size, mtime) tuples | |
128 | return len(self._map) |
|
|||
129 |
|
||||
130 | def __iter__(self): |
|
|||
131 | return iter(self._map) |
|
|||
132 |
|
|
366 | ||
133 | def get(self, key, default=None): |
|
367 | `all` is unused when Rust is not enabled | |
134 | return self._map.get(key, default) |
|
368 | """ | |
135 |
|
369 | for (filename, item) in self.items(): | ||
136 | def __contains__(self, key): |
|
370 | yield (filename, item.state, item.mode, item.size, item.mtime) | |
137 | return key in self._map |
|
|||
138 |
|
||||
139 | def __getitem__(self, key): |
|
|||
140 | return self._map[key] |
|
|||
141 |
|
371 | |||
142 | def keys(self): |
|
372 | def keys(self): | |
143 | return self._map.keys() |
|
373 | return self._map.keys() | |
144 |
|
374 | |||
145 | def preload(self): |
|
375 | ### reading/setting parents | |
146 | """Loads the underlying data, if it's not already loaded""" |
|
376 | ||
|
377 | def setparents(self, p1, p2, fold_p2=False): | |||
|
378 | self._parents = (p1, p2) | |||
|
379 | self._dirtyparents = True | |||
|
380 | copies = {} | |||
|
381 | if fold_p2: | |||
|
382 | for f, s in pycompat.iteritems(self._map): | |||
|
383 | # Discard "merged" markers when moving away from a merge state | |||
|
384 | if s.p2_info: | |||
|
385 | source = self.copymap.pop(f, None) | |||
|
386 | if source: | |||
|
387 | copies[f] = source | |||
|
388 | s.drop_merge_data() | |||
|
389 | return copies | |||
|
390 | ||||
|
391 | ### disk interaction | |||
|
392 | ||||
|
393 | def read(self): | |||
|
394 | # ignore HG_PENDING because identity is used only for writing | |||
|
395 | self.identity = util.filestat.frompath( | |||
|
396 | self._opener.join(self._filename) | |||
|
397 | ) | |||
|
398 | ||||
|
399 | if self._use_dirstate_v2: | |||
|
400 | if not self.docket.uuid: | |||
|
401 | return | |||
|
402 | st = self._opener.read(self.docket.data_filename()) | |||
|
403 | else: | |||
|
404 | st = self._readdirstatefile() | |||
|
405 | ||||
|
406 | if not st: | |||
|
407 | return | |||
|
408 | ||||
|
409 | # TODO: adjust this estimate for dirstate-v2 | |||
|
410 | if util.safehasattr(parsers, b'dict_new_presized'): | |||
|
411 | # Make an estimate of the number of files in the dirstate based on | |||
|
412 | # its size. This trades wasting some memory for avoiding costly | |||
|
413 | # resizes. Each entry have a prefix of 17 bytes followed by one or | |||
|
414 | # two path names. Studies on various large-scale real-world repositories | |||
|
415 | # found 54 bytes a reasonable upper limit for the average path names. | |||
|
416 | # Copy entries are ignored for the sake of this estimate. | |||
|
417 | self._map = parsers.dict_new_presized(len(st) // 71) | |||
|
418 | ||||
|
419 | # Python's garbage collector triggers a GC each time a certain number | |||
|
420 | # of container objects (the number being defined by | |||
|
421 | # gc.get_threshold()) are allocated. parse_dirstate creates a tuple | |||
|
422 | # for each file in the dirstate. The C version then immediately marks | |||
|
423 | # them as not to be tracked by the collector. However, this has no | |||
|
424 | # effect on when GCs are triggered, only on what objects the GC looks | |||
|
425 | # into. This means that O(number of files) GCs are unavoidable. | |||
|
426 | # Depending on when in the process's lifetime the dirstate is parsed, | |||
|
427 | # this can get very expensive. As a workaround, disable GC while | |||
|
428 | # parsing the dirstate. | |||
|
429 | # | |||
|
430 | # (we cannot decorate the function directly since it is in a C module) | |||
|
431 | if self._use_dirstate_v2: | |||
|
432 | p = self.docket.parents | |||
|
433 | meta = self.docket.tree_metadata | |||
|
434 | parse_dirstate = util.nogc(v2.parse_dirstate) | |||
|
435 | parse_dirstate(self._map, self.copymap, st, meta) | |||
|
436 | else: | |||
|
437 | parse_dirstate = util.nogc(parsers.parse_dirstate) | |||
|
438 | p = parse_dirstate(self._map, self.copymap, st) | |||
|
439 | if not self._dirtyparents: | |||
|
440 | self.setparents(*p) | |||
|
441 | ||||
|
442 | # Avoid excess attribute lookups by fast pathing certain checks | |||
|
443 | self.__contains__ = self._map.__contains__ | |||
|
444 | self.__getitem__ = self._map.__getitem__ | |||
|
445 | self.get = self._map.get | |||
|
446 | ||||
|
447 | def write(self, tr, st, now): | |||
|
448 | if self._use_dirstate_v2: | |||
|
449 | packed, meta = v2.pack_dirstate(self._map, self.copymap, now) | |||
|
450 | self.write_v2_no_append(tr, st, meta, packed) | |||
|
451 | else: | |||
|
452 | packed = parsers.pack_dirstate( | |||
|
453 | self._map, self.copymap, self.parents(), now | |||
|
454 | ) | |||
|
455 | st.write(packed) | |||
|
456 | st.close() | |||
|
457 | self._dirtyparents = False | |||
|
458 | ||||
|
459 | @propertycache | |||
|
460 | def identity(self): | |||
147 | self._map |
|
461 | self._map | |
|
462 | return self.identity | |||
|
463 | ||||
|
464 | ### code related to maintaining and accessing "extra" property | |||
|
465 | # (e.g. "has_dir") | |||
148 |
|
466 | |||
149 | def _dirs_incr(self, filename, old_entry=None): |
|
467 | def _dirs_incr(self, filename, old_entry=None): | |
150 | """incremente the dirstate counter if applicable""" |
|
468 | """incremente the dirstate counter if applicable""" | |
@@ -168,200 +486,6 b' class dirstatemap(object):' | |||||
168 | normed = util.normcase(filename) |
|
486 | normed = util.normcase(filename) | |
169 | self.filefoldmap.pop(normed, None) |
|
487 | self.filefoldmap.pop(normed, None) | |
170 |
|
488 | |||
171 | def set_possibly_dirty(self, filename): |
|
|||
172 | """record that the current state of the file on disk is unknown""" |
|
|||
173 | self[filename].set_possibly_dirty() |
|
|||
174 |
|
||||
175 | def addfile( |
|
|||
176 | self, |
|
|||
177 | f, |
|
|||
178 | mode=0, |
|
|||
179 | size=None, |
|
|||
180 | mtime=None, |
|
|||
181 | added=False, |
|
|||
182 | merged=False, |
|
|||
183 | from_p2=False, |
|
|||
184 | possibly_dirty=False, |
|
|||
185 | ): |
|
|||
186 | """Add a tracked file to the dirstate.""" |
|
|||
187 | if added: |
|
|||
188 | assert not merged |
|
|||
189 | assert not possibly_dirty |
|
|||
190 | assert not from_p2 |
|
|||
191 | state = b'a' |
|
|||
192 | size = NONNORMAL |
|
|||
193 | mtime = AMBIGUOUS_TIME |
|
|||
194 | elif merged: |
|
|||
195 | assert not possibly_dirty |
|
|||
196 | assert not from_p2 |
|
|||
197 | state = b'm' |
|
|||
198 | size = FROM_P2 |
|
|||
199 | mtime = AMBIGUOUS_TIME |
|
|||
200 | elif from_p2: |
|
|||
201 | assert not possibly_dirty |
|
|||
202 | state = b'n' |
|
|||
203 | size = FROM_P2 |
|
|||
204 | mtime = AMBIGUOUS_TIME |
|
|||
205 | elif possibly_dirty: |
|
|||
206 | state = b'n' |
|
|||
207 | size = NONNORMAL |
|
|||
208 | mtime = AMBIGUOUS_TIME |
|
|||
209 | else: |
|
|||
210 | assert size != FROM_P2 |
|
|||
211 | assert size != NONNORMAL |
|
|||
212 | assert size is not None |
|
|||
213 | assert mtime is not None |
|
|||
214 |
|
||||
215 | state = b'n' |
|
|||
216 | size = size & rangemask |
|
|||
217 | mtime = mtime & rangemask |
|
|||
218 | assert state is not None |
|
|||
219 | assert size is not None |
|
|||
220 | assert mtime is not None |
|
|||
221 | old_entry = self.get(f) |
|
|||
222 | self._dirs_incr(f, old_entry) |
|
|||
223 | e = self._map[f] = DirstateItem(state, mode, size, mtime) |
|
|||
224 | if e.dm_nonnormal: |
|
|||
225 | self.nonnormalset.add(f) |
|
|||
226 | if e.dm_otherparent: |
|
|||
227 | self.otherparentset.add(f) |
|
|||
228 |
|
||||
229 | def reset_state( |
|
|||
230 | self, |
|
|||
231 | filename, |
|
|||
232 | wc_tracked, |
|
|||
233 | p1_tracked, |
|
|||
234 | p2_tracked=False, |
|
|||
235 | merged=False, |
|
|||
236 | clean_p1=False, |
|
|||
237 | clean_p2=False, |
|
|||
238 | possibly_dirty=False, |
|
|||
239 | parentfiledata=None, |
|
|||
240 | ): |
|
|||
241 | """Set a entry to a given state, diregarding all previous state |
|
|||
242 |
|
||||
243 | This is to be used by the part of the dirstate API dedicated to |
|
|||
244 | adjusting the dirstate after a update/merge. |
|
|||
245 |
|
||||
246 | note: calling this might result to no entry existing at all if the |
|
|||
247 | dirstate map does not see any point at having one for this file |
|
|||
248 | anymore. |
|
|||
249 | """ |
|
|||
250 | if merged and (clean_p1 or clean_p2): |
|
|||
251 | msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`' |
|
|||
252 | raise error.ProgrammingError(msg) |
|
|||
253 | # copy information are now outdated |
|
|||
254 | # (maybe new information should be in directly passed to this function) |
|
|||
255 | self.copymap.pop(filename, None) |
|
|||
256 |
|
||||
257 | if not (p1_tracked or p2_tracked or wc_tracked): |
|
|||
258 | self.dropfile(filename) |
|
|||
259 | elif merged: |
|
|||
260 | # XXX might be merged and removed ? |
|
|||
261 | entry = self.get(filename) |
|
|||
262 | if entry is not None and entry.tracked: |
|
|||
263 | # XXX mostly replicate dirstate.other parent. We should get |
|
|||
264 | # the higher layer to pass us more reliable data where `merged` |
|
|||
265 | # actually mean merged. Dropping the else clause will show |
|
|||
266 | # failure in `test-graft.t` |
|
|||
267 | self.addfile(filename, merged=True) |
|
|||
268 | else: |
|
|||
269 | self.addfile(filename, from_p2=True) |
|
|||
270 | elif not (p1_tracked or p2_tracked) and wc_tracked: |
|
|||
271 | self.addfile(filename, added=True, possibly_dirty=possibly_dirty) |
|
|||
272 | elif (p1_tracked or p2_tracked) and not wc_tracked: |
|
|||
273 | # XXX might be merged and removed ? |
|
|||
274 | old_entry = self._map.get(filename) |
|
|||
275 | self._dirs_decr(filename, old_entry=old_entry, remove_variant=True) |
|
|||
276 | self._map[filename] = DirstateItem(b'r', 0, 0, 0) |
|
|||
277 | self.nonnormalset.add(filename) |
|
|||
278 | elif clean_p2 and wc_tracked: |
|
|||
279 | if p1_tracked or self.get(filename) is not None: |
|
|||
280 | # XXX the `self.get` call is catching some case in |
|
|||
281 | # `test-merge-remove.t` where the file is tracked in p1, the |
|
|||
282 | # p1_tracked argument is False. |
|
|||
283 | # |
|
|||
284 | # In addition, this seems to be a case where the file is marked |
|
|||
285 | # as merged without actually being the result of a merge |
|
|||
286 | # action. So thing are not ideal here. |
|
|||
287 | self.addfile(filename, merged=True) |
|
|||
288 | else: |
|
|||
289 | self.addfile(filename, from_p2=True) |
|
|||
290 | elif not p1_tracked and p2_tracked and wc_tracked: |
|
|||
291 | self.addfile(filename, from_p2=True, possibly_dirty=possibly_dirty) |
|
|||
292 | elif possibly_dirty: |
|
|||
293 | self.addfile(filename, possibly_dirty=possibly_dirty) |
|
|||
294 | elif wc_tracked: |
|
|||
295 | # this is a "normal" file |
|
|||
296 | if parentfiledata is None: |
|
|||
297 | msg = b'failed to pass parentfiledata for a normal file: %s' |
|
|||
298 | msg %= filename |
|
|||
299 | raise error.ProgrammingError(msg) |
|
|||
300 | mode, size, mtime = parentfiledata |
|
|||
301 | self.addfile(filename, mode=mode, size=size, mtime=mtime) |
|
|||
302 | self.nonnormalset.discard(filename) |
|
|||
303 | else: |
|
|||
304 | assert False, 'unreachable' |
|
|||
305 |
|
||||
306 | def removefile(self, f, in_merge=False): |
|
|||
307 | """ |
|
|||
308 | Mark a file as removed in the dirstate. |
|
|||
309 |
|
||||
310 | The `size` parameter is used to store sentinel values that indicate |
|
|||
311 | the file's previous state. In the future, we should refactor this |
|
|||
312 | to be more explicit about what that state is. |
|
|||
313 | """ |
|
|||
314 | entry = self.get(f) |
|
|||
315 | size = 0 |
|
|||
316 | if in_merge: |
|
|||
317 | # XXX we should not be able to have 'm' state and 'FROM_P2' if not |
|
|||
318 | # during a merge. So I (marmoute) am not sure we need the |
|
|||
319 | # conditionnal at all. Adding double checking this with assert |
|
|||
320 | # would be nice. |
|
|||
321 | if entry is not None: |
|
|||
322 | # backup the previous state |
|
|||
323 | if entry.merged: # merge |
|
|||
324 | size = NONNORMAL |
|
|||
325 | elif entry.from_p2: |
|
|||
326 | size = FROM_P2 |
|
|||
327 | self.otherparentset.add(f) |
|
|||
328 | if entry is not None and not (entry.merged or entry.from_p2): |
|
|||
329 | self.copymap.pop(f, None) |
|
|||
330 | self._dirs_decr(f, old_entry=entry, remove_variant=True) |
|
|||
331 | self._map[f] = DirstateItem(b'r', 0, size, 0) |
|
|||
332 | self.nonnormalset.add(f) |
|
|||
333 |
|
||||
334 | def dropfile(self, f): |
|
|||
335 | """ |
|
|||
336 | Remove a file from the dirstate. Returns True if the file was |
|
|||
337 | previously recorded. |
|
|||
338 | """ |
|
|||
339 | old_entry = self._map.pop(f, None) |
|
|||
340 | self._dirs_decr(f, old_entry=old_entry) |
|
|||
341 | self.nonnormalset.discard(f) |
|
|||
342 | return old_entry is not None |
|
|||
343 |
|
||||
344 | def clearambiguoustimes(self, files, now): |
|
|||
345 | for f in files: |
|
|||
346 | e = self.get(f) |
|
|||
347 | if e is not None and e.need_delay(now): |
|
|||
348 | e.set_possibly_dirty() |
|
|||
349 | self.nonnormalset.add(f) |
|
|||
350 |
|
||||
351 | def nonnormalentries(self): |
|
|||
352 | '''Compute the nonnormal dirstate entries from the dmap''' |
|
|||
353 | try: |
|
|||
354 | return parsers.nonnormalotherparententries(self._map) |
|
|||
355 | except AttributeError: |
|
|||
356 | nonnorm = set() |
|
|||
357 | otherparent = set() |
|
|||
358 | for fname, e in pycompat.iteritems(self._map): |
|
|||
359 | if e.dm_nonnormal: |
|
|||
360 | nonnorm.add(fname) |
|
|||
361 | if e.from_p2: |
|
|||
362 | otherparent.add(fname) |
|
|||
363 | return nonnorm, otherparent |
|
|||
364 |
|
||||
365 | @propertycache |
|
489 | @propertycache | |
366 | def filefoldmap(self): |
|
490 | def filefoldmap(self): | |
367 | """Returns a dictionary mapping normalized case paths to their |
|
491 | """Returns a dictionary mapping normalized case paths to their | |
@@ -384,6 +508,14 b' class dirstatemap(object):' | |||||
384 | f[b'.'] = b'.' # prevents useless util.fspath() invocation |
|
508 | f[b'.'] = b'.' # prevents useless util.fspath() invocation | |
385 | return f |
|
509 | return f | |
386 |
|
510 | |||
|
511 | @propertycache | |||
|
512 | def dirfoldmap(self): | |||
|
513 | f = {} | |||
|
514 | normcase = util.normcase | |||
|
515 | for name in self._dirs: | |||
|
516 | f[normcase(name)] = name | |||
|
517 | return f | |||
|
518 | ||||
387 | def hastrackeddir(self, d): |
|
519 | def hastrackeddir(self, d): | |
388 | """ |
|
520 | """ | |
389 | Returns True if the dirstate contains a tracked (not removed) file |
|
521 | Returns True if the dirstate contains a tracked (not removed) file | |
@@ -400,393 +532,34 b' class dirstatemap(object):' | |||||
400 |
|
532 | |||
401 | @propertycache |
|
533 | @propertycache | |
402 | def _dirs(self): |
|
534 | def _dirs(self): | |
403 |
return pathutil.dirs(self._map, |
|
535 | return pathutil.dirs(self._map, only_tracked=True) | |
404 |
|
536 | |||
405 | @propertycache |
|
537 | @propertycache | |
406 | def _alldirs(self): |
|
538 | def _alldirs(self): | |
407 | return pathutil.dirs(self._map) |
|
539 | return pathutil.dirs(self._map) | |
408 |
|
540 | |||
409 | def _opendirstatefile(self): |
|
541 | ### code related to manipulation of entries and copy-sources | |
410 | fp, mode = txnutil.trypending(self._root, self._opener, self._filename) |
|
|||
411 | if self._pendingmode is not None and self._pendingmode != mode: |
|
|||
412 | fp.close() |
|
|||
413 | raise error.Abort( |
|
|||
414 | _(b'working directory state may be changed parallelly') |
|
|||
415 | ) |
|
|||
416 | self._pendingmode = mode |
|
|||
417 | return fp |
|
|||
418 |
|
||||
419 | def parents(self): |
|
|||
420 | if not self._parents: |
|
|||
421 | try: |
|
|||
422 | fp = self._opendirstatefile() |
|
|||
423 | st = fp.read(2 * self._nodelen) |
|
|||
424 | fp.close() |
|
|||
425 | except IOError as err: |
|
|||
426 | if err.errno != errno.ENOENT: |
|
|||
427 | raise |
|
|||
428 | # File doesn't exist, so the current state is empty |
|
|||
429 | st = b'' |
|
|||
430 |
|
542 | |||
431 | l = len(st) |
|
543 | def _refresh_entry(self, f, entry): | |
432 | if l == self._nodelen * 2: |
|
544 | if not entry.any_tracked: | |
433 | self._parents = ( |
|
545 | self._map.pop(f, None) | |
434 | st[: self._nodelen], |
|
|||
435 | st[self._nodelen : 2 * self._nodelen], |
|
|||
436 | ) |
|
|||
437 | elif l == 0: |
|
|||
438 | self._parents = ( |
|
|||
439 | self._nodeconstants.nullid, |
|
|||
440 | self._nodeconstants.nullid, |
|
|||
441 | ) |
|
|||
442 | else: |
|
|||
443 | raise error.Abort( |
|
|||
444 | _(b'working directory state appears damaged!') |
|
|||
445 | ) |
|
|||
446 |
|
||||
447 | return self._parents |
|
|||
448 |
|
||||
449 | def setparents(self, p1, p2): |
|
|||
450 | self._parents = (p1, p2) |
|
|||
451 | self._dirtyparents = True |
|
|||
452 |
|
||||
453 | def read(self): |
|
|||
454 | # ignore HG_PENDING because identity is used only for writing |
|
|||
455 | self.identity = util.filestat.frompath( |
|
|||
456 | self._opener.join(self._filename) |
|
|||
457 | ) |
|
|||
458 |
|
||||
459 | try: |
|
|||
460 | fp = self._opendirstatefile() |
|
|||
461 | try: |
|
|||
462 | st = fp.read() |
|
|||
463 | finally: |
|
|||
464 | fp.close() |
|
|||
465 | except IOError as err: |
|
|||
466 | if err.errno != errno.ENOENT: |
|
|||
467 | raise |
|
|||
468 | return |
|
|||
469 | if not st: |
|
|||
470 | return |
|
|||
471 |
|
546 | |||
472 | if util.safehasattr(parsers, b'dict_new_presized'): |
|
547 | def _insert_entry(self, f, entry): | |
473 | # Make an estimate of the number of files in the dirstate based on |
|
548 | self._map[f] = entry | |
474 | # its size. This trades wasting some memory for avoiding costly |
|
|||
475 | # resizes. Each entry have a prefix of 17 bytes followed by one or |
|
|||
476 | # two path names. Studies on various large-scale real-world repositories |
|
|||
477 | # found 54 bytes a reasonable upper limit for the average path names. |
|
|||
478 | # Copy entries are ignored for the sake of this estimate. |
|
|||
479 | self._map = parsers.dict_new_presized(len(st) // 71) |
|
|||
480 |
|
||||
481 | # Python's garbage collector triggers a GC each time a certain number |
|
|||
482 | # of container objects (the number being defined by |
|
|||
483 | # gc.get_threshold()) are allocated. parse_dirstate creates a tuple |
|
|||
484 | # for each file in the dirstate. The C version then immediately marks |
|
|||
485 | # them as not to be tracked by the collector. However, this has no |
|
|||
486 | # effect on when GCs are triggered, only on what objects the GC looks |
|
|||
487 | # into. This means that O(number of files) GCs are unavoidable. |
|
|||
488 | # Depending on when in the process's lifetime the dirstate is parsed, |
|
|||
489 | # this can get very expensive. As a workaround, disable GC while |
|
|||
490 | # parsing the dirstate. |
|
|||
491 | # |
|
|||
492 | # (we cannot decorate the function directly since it is in a C module) |
|
|||
493 | parse_dirstate = util.nogc(parsers.parse_dirstate) |
|
|||
494 | p = parse_dirstate(self._map, self.copymap, st) |
|
|||
495 | if not self._dirtyparents: |
|
|||
496 | self.setparents(*p) |
|
|||
497 |
|
||||
498 | # Avoid excess attribute lookups by fast pathing certain checks |
|
|||
499 | self.__contains__ = self._map.__contains__ |
|
|||
500 | self.__getitem__ = self._map.__getitem__ |
|
|||
501 | self.get = self._map.get |
|
|||
502 |
|
549 | |||
503 |
def |
|
550 | def _drop_entry(self, f): | |
504 | st.write( |
|
551 | self._map.pop(f, None) | |
505 | parsers.pack_dirstate(self._map, self.copymap, self.parents(), now) |
|
552 | self.copymap.pop(f, None) | |
506 | ) |
|
|||
507 | st.close() |
|
|||
508 | self._dirtyparents = False |
|
|||
509 | self.nonnormalset, self.otherparentset = self.nonnormalentries() |
|
|||
510 |
|
||||
511 | @propertycache |
|
|||
512 | def nonnormalset(self): |
|
|||
513 | nonnorm, otherparents = self.nonnormalentries() |
|
|||
514 | self.otherparentset = otherparents |
|
|||
515 | return nonnorm |
|
|||
516 |
|
||||
517 | @propertycache |
|
|||
518 | def otherparentset(self): |
|
|||
519 | nonnorm, otherparents = self.nonnormalentries() |
|
|||
520 | self.nonnormalset = nonnorm |
|
|||
521 | return otherparents |
|
|||
522 |
|
||||
523 | def non_normal_or_other_parent_paths(self): |
|
|||
524 | return self.nonnormalset.union(self.otherparentset) |
|
|||
525 |
|
||||
526 | @propertycache |
|
|||
527 | def identity(self): |
|
|||
528 | self._map |
|
|||
529 | return self.identity |
|
|||
530 |
|
||||
531 | @propertycache |
|
|||
532 | def dirfoldmap(self): |
|
|||
533 | f = {} |
|
|||
534 | normcase = util.normcase |
|
|||
535 | for name in self._dirs: |
|
|||
536 | f[normcase(name)] = name |
|
|||
537 | return f |
|
|||
538 |
|
553 | |||
539 |
|
554 | |||
540 | if rustmod is not None: |
|
555 | if rustmod is not None: | |
541 |
|
556 | |||
542 |
class dirstatemap( |
|
557 | class dirstatemap(_dirstatemapcommon): | |
543 | def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2): |
|
|||
544 | self._use_dirstate_v2 = use_dirstate_v2 |
|
|||
545 | self._nodeconstants = nodeconstants |
|
|||
546 | self._ui = ui |
|
|||
547 | self._opener = opener |
|
|||
548 | self._root = root |
|
|||
549 | self._filename = b'dirstate' |
|
|||
550 | self._nodelen = 20 # Also update Rust code when changing this! |
|
|||
551 | self._parents = None |
|
|||
552 | self._dirtyparents = False |
|
|||
553 | self._docket = None |
|
|||
554 |
|
||||
555 | # for consistent view between _pl() and _read() invocations |
|
|||
556 | self._pendingmode = None |
|
|||
557 |
|
||||
558 | self._use_dirstate_tree = self._ui.configbool( |
|
|||
559 | b"experimental", |
|
|||
560 | b"dirstate-tree.in-memory", |
|
|||
561 | False, |
|
|||
562 | ) |
|
|||
563 |
|
||||
564 | def addfile( |
|
|||
565 | self, |
|
|||
566 | f, |
|
|||
567 | mode=0, |
|
|||
568 | size=None, |
|
|||
569 | mtime=None, |
|
|||
570 | added=False, |
|
|||
571 | merged=False, |
|
|||
572 | from_p2=False, |
|
|||
573 | possibly_dirty=False, |
|
|||
574 | ): |
|
|||
575 | return self._rustmap.addfile( |
|
|||
576 | f, |
|
|||
577 | mode, |
|
|||
578 | size, |
|
|||
579 | mtime, |
|
|||
580 | added, |
|
|||
581 | merged, |
|
|||
582 | from_p2, |
|
|||
583 | possibly_dirty, |
|
|||
584 | ) |
|
|||
585 |
|
||||
586 | def reset_state( |
|
|||
587 | self, |
|
|||
588 | filename, |
|
|||
589 | wc_tracked, |
|
|||
590 | p1_tracked, |
|
|||
591 | p2_tracked=False, |
|
|||
592 | merged=False, |
|
|||
593 | clean_p1=False, |
|
|||
594 | clean_p2=False, |
|
|||
595 | possibly_dirty=False, |
|
|||
596 | parentfiledata=None, |
|
|||
597 | ): |
|
|||
598 | """Set a entry to a given state, disregarding all previous state |
|
|||
599 |
|
||||
600 | This is to be used by the part of the dirstate API dedicated to |
|
|||
601 | adjusting the dirstate after a update/merge. |
|
|||
602 |
|
||||
603 | note: calling this might result to no entry existing at all if the |
|
|||
604 | dirstate map does not see any point at having one for this file |
|
|||
605 | anymore. |
|
|||
606 | """ |
|
|||
607 | if merged and (clean_p1 or clean_p2): |
|
|||
608 | msg = ( |
|
|||
609 | b'`merged` argument incompatible with `clean_p1`/`clean_p2`' |
|
|||
610 | ) |
|
|||
611 | raise error.ProgrammingError(msg) |
|
|||
612 | # copy information are now outdated |
|
|||
613 | # (maybe new information should be in directly passed to this function) |
|
|||
614 | self.copymap.pop(filename, None) |
|
|||
615 |
|
558 | |||
616 | if not (p1_tracked or p2_tracked or wc_tracked): |
|
559 | ### Core data storage and access | |
617 | self.dropfile(filename) |
|
|||
618 | elif merged: |
|
|||
619 | # XXX might be merged and removed ? |
|
|||
620 | entry = self.get(filename) |
|
|||
621 | if entry is not None and entry.tracked: |
|
|||
622 | # XXX mostly replicate dirstate.other parent. We should get |
|
|||
623 | # the higher layer to pass us more reliable data where `merged` |
|
|||
624 | # actually mean merged. Dropping the else clause will show |
|
|||
625 | # failure in `test-graft.t` |
|
|||
626 | self.addfile(filename, merged=True) |
|
|||
627 | else: |
|
|||
628 | self.addfile(filename, from_p2=True) |
|
|||
629 | elif not (p1_tracked or p2_tracked) and wc_tracked: |
|
|||
630 | self.addfile( |
|
|||
631 | filename, added=True, possibly_dirty=possibly_dirty |
|
|||
632 | ) |
|
|||
633 | elif (p1_tracked or p2_tracked) and not wc_tracked: |
|
|||
634 | # XXX might be merged and removed ? |
|
|||
635 | self[filename] = DirstateItem(b'r', 0, 0, 0) |
|
|||
636 | self.nonnormalset.add(filename) |
|
|||
637 | elif clean_p2 and wc_tracked: |
|
|||
638 | if p1_tracked or self.get(filename) is not None: |
|
|||
639 | # XXX the `self.get` call is catching some case in |
|
|||
640 | # `test-merge-remove.t` where the file is tracked in p1, the |
|
|||
641 | # p1_tracked argument is False. |
|
|||
642 | # |
|
|||
643 | # In addition, this seems to be a case where the file is marked |
|
|||
644 | # as merged without actually being the result of a merge |
|
|||
645 | # action. So thing are not ideal here. |
|
|||
646 | self.addfile(filename, merged=True) |
|
|||
647 | else: |
|
|||
648 | self.addfile(filename, from_p2=True) |
|
|||
649 | elif not p1_tracked and p2_tracked and wc_tracked: |
|
|||
650 | self.addfile( |
|
|||
651 | filename, from_p2=True, possibly_dirty=possibly_dirty |
|
|||
652 | ) |
|
|||
653 | elif possibly_dirty: |
|
|||
654 | self.addfile(filename, possibly_dirty=possibly_dirty) |
|
|||
655 | elif wc_tracked: |
|
|||
656 | # this is a "normal" file |
|
|||
657 | if parentfiledata is None: |
|
|||
658 | msg = b'failed to pass parentfiledata for a normal file: %s' |
|
|||
659 | msg %= filename |
|
|||
660 | raise error.ProgrammingError(msg) |
|
|||
661 | mode, size, mtime = parentfiledata |
|
|||
662 | self.addfile(filename, mode=mode, size=size, mtime=mtime) |
|
|||
663 | self.nonnormalset.discard(filename) |
|
|||
664 | else: |
|
|||
665 | assert False, 'unreachable' |
|
|||
666 |
|
||||
667 | def removefile(self, *args, **kwargs): |
|
|||
668 | return self._rustmap.removefile(*args, **kwargs) |
|
|||
669 |
|
||||
670 | def dropfile(self, *args, **kwargs): |
|
|||
671 | return self._rustmap.dropfile(*args, **kwargs) |
|
|||
672 |
|
||||
673 | def clearambiguoustimes(self, *args, **kwargs): |
|
|||
674 | return self._rustmap.clearambiguoustimes(*args, **kwargs) |
|
|||
675 |
|
||||
676 | def nonnormalentries(self): |
|
|||
677 | return self._rustmap.nonnormalentries() |
|
|||
678 |
|
||||
679 | def get(self, *args, **kwargs): |
|
|||
680 | return self._rustmap.get(*args, **kwargs) |
|
|||
681 |
|
||||
682 | @property |
|
|||
683 | def copymap(self): |
|
|||
684 | return self._rustmap.copymap() |
|
|||
685 |
|
||||
686 | def directories(self): |
|
|||
687 | return self._rustmap.directories() |
|
|||
688 |
|
||||
689 | def debug_iter(self): |
|
|||
690 | return self._rustmap.debug_iter() |
|
|||
691 |
|
||||
692 | def preload(self): |
|
|||
693 | self._rustmap |
|
|||
694 |
|
||||
695 | def clear(self): |
|
|||
696 | self._rustmap.clear() |
|
|||
697 | self.setparents( |
|
|||
698 | self._nodeconstants.nullid, self._nodeconstants.nullid |
|
|||
699 | ) |
|
|||
700 | util.clearcachedproperty(self, b"_dirs") |
|
|||
701 | util.clearcachedproperty(self, b"_alldirs") |
|
|||
702 | util.clearcachedproperty(self, b"dirfoldmap") |
|
|||
703 |
|
||||
704 | def items(self): |
|
|||
705 | return self._rustmap.items() |
|
|||
706 |
|
||||
707 | def keys(self): |
|
|||
708 | return iter(self._rustmap) |
|
|||
709 |
|
||||
710 | def __contains__(self, key): |
|
|||
711 | return key in self._rustmap |
|
|||
712 |
|
||||
713 | def __getitem__(self, item): |
|
|||
714 | return self._rustmap[item] |
|
|||
715 |
|
||||
716 | def __len__(self): |
|
|||
717 | return len(self._rustmap) |
|
|||
718 |
|
||||
719 | def __iter__(self): |
|
|||
720 | return iter(self._rustmap) |
|
|||
721 |
|
||||
722 | # forward for python2,3 compat |
|
|||
723 | iteritems = items |
|
|||
724 |
|
||||
725 | def _opendirstatefile(self): |
|
|||
726 | fp, mode = txnutil.trypending( |
|
|||
727 | self._root, self._opener, self._filename |
|
|||
728 | ) |
|
|||
729 | if self._pendingmode is not None and self._pendingmode != mode: |
|
|||
730 | fp.close() |
|
|||
731 | raise error.Abort( |
|
|||
732 | _(b'working directory state may be changed parallelly') |
|
|||
733 | ) |
|
|||
734 | self._pendingmode = mode |
|
|||
735 | return fp |
|
|||
736 |
|
||||
737 | def _readdirstatefile(self, size=-1): |
|
|||
738 | try: |
|
|||
739 | with self._opendirstatefile() as fp: |
|
|||
740 | return fp.read(size) |
|
|||
741 | except IOError as err: |
|
|||
742 | if err.errno != errno.ENOENT: |
|
|||
743 | raise |
|
|||
744 | # File doesn't exist, so the current state is empty |
|
|||
745 | return b'' |
|
|||
746 |
|
||||
747 | def setparents(self, p1, p2): |
|
|||
748 | self._parents = (p1, p2) |
|
|||
749 | self._dirtyparents = True |
|
|||
750 |
|
||||
751 | def parents(self): |
|
|||
752 | if not self._parents: |
|
|||
753 | if self._use_dirstate_v2: |
|
|||
754 | self._parents = self.docket.parents |
|
|||
755 | else: |
|
|||
756 | read_len = self._nodelen * 2 |
|
|||
757 | st = self._readdirstatefile(read_len) |
|
|||
758 | l = len(st) |
|
|||
759 | if l == read_len: |
|
|||
760 | self._parents = ( |
|
|||
761 | st[: self._nodelen], |
|
|||
762 | st[self._nodelen : 2 * self._nodelen], |
|
|||
763 | ) |
|
|||
764 | elif l == 0: |
|
|||
765 | self._parents = ( |
|
|||
766 | self._nodeconstants.nullid, |
|
|||
767 | self._nodeconstants.nullid, |
|
|||
768 | ) |
|
|||
769 | else: |
|
|||
770 | raise error.Abort( |
|
|||
771 | _(b'working directory state appears damaged!') |
|
|||
772 | ) |
|
|||
773 |
|
||||
774 | return self._parents |
|
|||
775 |
|
||||
776 | @property |
|
|||
777 | def docket(self): |
|
|||
778 | if not self._docket: |
|
|||
779 | if not self._use_dirstate_v2: |
|
|||
780 | raise error.ProgrammingError( |
|
|||
781 | b'dirstate only has a docket in v2 format' |
|
|||
782 | ) |
|
|||
783 | self._docket = docketmod.DirstateDocket.parse( |
|
|||
784 | self._readdirstatefile(), self._nodeconstants |
|
|||
785 | ) |
|
|||
786 | return self._docket |
|
|||
787 |
|
560 | |||
788 | @propertycache |
|
561 | @propertycache | |
789 |
def _ |
|
562 | def _map(self): | |
790 | """ |
|
563 | """ | |
791 | Fills the Dirstatemap when called. |
|
564 | Fills the Dirstatemap when called. | |
792 | """ |
|
565 | """ | |
@@ -801,27 +574,91 b' if rustmod is not None:' | |||||
801 | data = self._opener.read(self.docket.data_filename()) |
|
574 | data = self._opener.read(self.docket.data_filename()) | |
802 | else: |
|
575 | else: | |
803 | data = b'' |
|
576 | data = b'' | |
804 |
self._ |
|
577 | self._map = rustmod.DirstateMap.new_v2( | |
805 | data, self.docket.data_size, self.docket.tree_metadata |
|
578 | data, self.docket.data_size, self.docket.tree_metadata | |
806 | ) |
|
579 | ) | |
807 | parents = self.docket.parents |
|
580 | parents = self.docket.parents | |
808 | else: |
|
581 | else: | |
809 |
self._ |
|
582 | self._map, parents = rustmod.DirstateMap.new_v1( | |
810 |
|
|
583 | self._readdirstatefile() | |
811 | ) |
|
584 | ) | |
812 |
|
585 | |||
813 | if parents and not self._dirtyparents: |
|
586 | if parents and not self._dirtyparents: | |
814 | self.setparents(*parents) |
|
587 | self.setparents(*parents) | |
815 |
|
588 | |||
816 |
self.__contains__ = self._ |
|
589 | self.__contains__ = self._map.__contains__ | |
817 |
self.__getitem__ = self._ |
|
590 | self.__getitem__ = self._map.__getitem__ | |
818 |
self.get = self._ |
|
591 | self.get = self._map.get | |
819 |
return self._ |
|
592 | return self._map | |
|
593 | ||||
|
594 | @property | |||
|
595 | def copymap(self): | |||
|
596 | return self._map.copymap() | |||
|
597 | ||||
|
598 | def debug_iter(self, all): | |||
|
599 | """ | |||
|
600 | Return an iterator of (filename, state, mode, size, mtime) tuples | |||
|
601 | ||||
|
602 | `all`: also include with `state == b' '` dirstate tree nodes that | |||
|
603 | don't have an associated `DirstateItem`. | |||
|
604 | ||||
|
605 | """ | |||
|
606 | return self._map.debug_iter(all) | |||
|
607 | ||||
|
608 | def clear(self): | |||
|
609 | self._map.clear() | |||
|
610 | self.setparents( | |||
|
611 | self._nodeconstants.nullid, self._nodeconstants.nullid | |||
|
612 | ) | |||
|
613 | util.clearcachedproperty(self, b"_dirs") | |||
|
614 | util.clearcachedproperty(self, b"_alldirs") | |||
|
615 | util.clearcachedproperty(self, b"dirfoldmap") | |||
|
616 | ||||
|
617 | def items(self): | |||
|
618 | return self._map.items() | |||
|
619 | ||||
|
620 | # forward for python2,3 compat | |||
|
621 | iteritems = items | |||
|
622 | ||||
|
623 | def keys(self): | |||
|
624 | return iter(self._map) | |||
|
625 | ||||
|
626 | ### reading/setting parents | |||
|
627 | ||||
|
628 | def setparents(self, p1, p2, fold_p2=False): | |||
|
629 | self._parents = (p1, p2) | |||
|
630 | self._dirtyparents = True | |||
|
631 | copies = {} | |||
|
632 | if fold_p2: | |||
|
633 | # Collect into an intermediate list to avoid a `RuntimeError` | |||
|
634 | # exception due to mutation during iteration. | |||
|
635 | # TODO: move this the whole loop to Rust where `iter_mut` | |||
|
636 | # enables in-place mutation of elements of a collection while | |||
|
637 | # iterating it, without mutating the collection itself. | |||
|
638 | files_with_p2_info = [ | |||
|
639 | f for f, s in self._map.items() if s.p2_info | |||
|
640 | ] | |||
|
641 | rust_map = self._map | |||
|
642 | for f in files_with_p2_info: | |||
|
643 | e = rust_map.get(f) | |||
|
644 | source = self.copymap.pop(f, None) | |||
|
645 | if source: | |||
|
646 | copies[f] = source | |||
|
647 | e.drop_merge_data() | |||
|
648 | rust_map.set_dirstate_item(f, e) | |||
|
649 | return copies | |||
|
650 | ||||
|
651 | ### disk interaction | |||
|
652 | ||||
|
653 | @propertycache | |||
|
654 | def identity(self): | |||
|
655 | self._map | |||
|
656 | return self.identity | |||
820 |
|
657 | |||
821 | def write(self, tr, st, now): |
|
658 | def write(self, tr, st, now): | |
822 | if not self._use_dirstate_v2: |
|
659 | if not self._use_dirstate_v2: | |
823 | p1, p2 = self.parents() |
|
660 | p1, p2 = self.parents() | |
824 |
packed = self._ |
|
661 | packed = self._map.write_v1(p1, p2, now) | |
825 | st.write(packed) |
|
662 | st.write(packed) | |
826 | st.close() |
|
663 | st.close() | |
827 | self._dirtyparents = False |
|
664 | self._dirtyparents = False | |
@@ -829,7 +666,7 b' if rustmod is not None:' | |||||
829 |
|
666 | |||
830 | # We can only append to an existing data file if there is one |
|
667 | # We can only append to an existing data file if there is one | |
831 | can_append = self.docket.uuid is not None |
|
668 | can_append = self.docket.uuid is not None | |
832 |
packed, meta, append = self._ |
|
669 | packed, meta, append = self._map.write_v2(now, can_append) | |
833 | if append: |
|
670 | if append: | |
834 | docket = self.docket |
|
671 | docket = self.docket | |
835 | data_filename = docket.data_filename() |
|
672 | data_filename = docket.data_filename() | |
@@ -847,79 +684,49 b' if rustmod is not None:' | |||||
847 | st.write(docket.serialize()) |
|
684 | st.write(docket.serialize()) | |
848 | st.close() |
|
685 | st.close() | |
849 | else: |
|
686 | else: | |
850 | old_docket = self.docket |
|
687 | self.write_v2_no_append(tr, st, meta, packed) | |
851 | new_docket = docketmod.DirstateDocket.with_new_uuid( |
|
|||
852 | self.parents(), len(packed), meta |
|
|||
853 | ) |
|
|||
854 | data_filename = new_docket.data_filename() |
|
|||
855 | if tr: |
|
|||
856 | tr.add(data_filename, 0) |
|
|||
857 | self._opener.write(data_filename, packed) |
|
|||
858 | # Write the new docket after the new data file has been |
|
|||
859 | # written. Because `st` was opened with `atomictemp=True`, |
|
|||
860 | # the actual `.hg/dirstate` file is only affected on close. |
|
|||
861 | st.write(new_docket.serialize()) |
|
|||
862 | st.close() |
|
|||
863 | # Remove the old data file after the new docket pointing to |
|
|||
864 | # the new data file was written. |
|
|||
865 | if old_docket.uuid: |
|
|||
866 | data_filename = old_docket.data_filename() |
|
|||
867 | unlink = lambda _tr=None: self._opener.unlink(data_filename) |
|
|||
868 | if tr: |
|
|||
869 | category = b"dirstate-v2-clean-" + old_docket.uuid |
|
|||
870 | tr.addpostclose(category, unlink) |
|
|||
871 | else: |
|
|||
872 | unlink() |
|
|||
873 | self._docket = new_docket |
|
|||
874 | # Reload from the newly-written file |
|
688 | # Reload from the newly-written file | |
875 |
util.clearcachedproperty(self, b"_ |
|
689 | util.clearcachedproperty(self, b"_map") | |
876 | self._dirtyparents = False |
|
690 | self._dirtyparents = False | |
877 |
|
691 | |||
|
692 | ### code related to maintaining and accessing "extra" property | |||
|
693 | # (e.g. "has_dir") | |||
|
694 | ||||
878 | @propertycache |
|
695 | @propertycache | |
879 | def filefoldmap(self): |
|
696 | def filefoldmap(self): | |
880 | """Returns a dictionary mapping normalized case paths to their |
|
697 | """Returns a dictionary mapping normalized case paths to their | |
881 | non-normalized versions. |
|
698 | non-normalized versions. | |
882 | """ |
|
699 | """ | |
883 |
return self._ |
|
700 | return self._map.filefoldmapasdict() | |
884 |
|
701 | |||
885 | def hastrackeddir(self, d): |
|
702 | def hastrackeddir(self, d): | |
886 |
return self._ |
|
703 | return self._map.hastrackeddir(d) | |
887 |
|
704 | |||
888 | def hasdir(self, d): |
|
705 | def hasdir(self, d): | |
889 |
return self._ |
|
706 | return self._map.hasdir(d) | |
890 |
|
||||
891 | @propertycache |
|
|||
892 | def identity(self): |
|
|||
893 | self._rustmap |
|
|||
894 | return self.identity |
|
|||
895 |
|
||||
896 | @property |
|
|||
897 | def nonnormalset(self): |
|
|||
898 | nonnorm = self._rustmap.non_normal_entries() |
|
|||
899 | return nonnorm |
|
|||
900 |
|
||||
901 | @propertycache |
|
|||
902 | def otherparentset(self): |
|
|||
903 | otherparents = self._rustmap.other_parent_entries() |
|
|||
904 | return otherparents |
|
|||
905 |
|
||||
906 | def non_normal_or_other_parent_paths(self): |
|
|||
907 | return self._rustmap.non_normal_or_other_parent_paths() |
|
|||
908 |
|
707 | |||
909 | @propertycache |
|
708 | @propertycache | |
910 | def dirfoldmap(self): |
|
709 | def dirfoldmap(self): | |
911 | f = {} |
|
710 | f = {} | |
912 | normcase = util.normcase |
|
711 | normcase = util.normcase | |
913 |
for name in self._ |
|
712 | for name in self._map.tracked_dirs(): | |
914 | f[normcase(name)] = name |
|
713 | f[normcase(name)] = name | |
915 | return f |
|
714 | return f | |
916 |
|
715 | |||
917 | def set_possibly_dirty(self, filename): |
|
716 | ### code related to manipulation of entries and copy-sources | |
918 | """record that the current state of the file on disk is unknown""" |
|
717 | ||
919 | entry = self[filename] |
|
718 | def _refresh_entry(self, f, entry): | |
920 | entry.set_possibly_dirty() |
|
719 | if not entry.any_tracked: | |
921 | self._rustmap.set_v1(filename, entry) |
|
720 | self._map.drop_item_and_copy_source(f) | |
|
721 | else: | |||
|
722 | self._map.addfile(f, entry) | |||
|
723 | ||||
|
724 | def _insert_entry(self, f, entry): | |||
|
725 | self._map.addfile(f, entry) | |||
|
726 | ||||
|
727 | def _drop_entry(self, f): | |||
|
728 | self._map.drop_item_and_copy_source(f) | |||
922 |
|
729 | |||
923 | def __setitem__(self, key, value): |
|
730 | def __setitem__(self, key, value): | |
924 | assert isinstance(value, DirstateItem) |
|
731 | assert isinstance(value, DirstateItem) | |
925 |
self._ |
|
732 | self._map.set_dirstate_item(key, value) |
@@ -10,31 +10,27 b' from __future__ import absolute_import' | |||||
10 | import struct |
|
10 | import struct | |
11 |
|
11 | |||
12 | from ..revlogutils import docket as docket_mod |
|
12 | from ..revlogutils import docket as docket_mod | |
13 |
|
13 | from . import v2 | ||
14 |
|
14 | |||
15 | V2_FORMAT_MARKER = b"dirstate-v2\n" |
|
15 | V2_FORMAT_MARKER = b"dirstate-v2\n" | |
16 |
|
16 | |||
17 | # Must match the constant of the same name in |
|
|||
18 | # `rust/hg-core/src/dirstate_tree/on_disk.rs` |
|
|||
19 | TREE_METADATA_SIZE = 44 |
|
|||
20 |
|
||||
21 | # * 12 bytes: format marker |
|
17 | # * 12 bytes: format marker | |
22 | # * 32 bytes: node ID of the working directory's first parent |
|
18 | # * 32 bytes: node ID of the working directory's first parent | |
23 | # * 32 bytes: node ID of the working directory's second parent |
|
19 | # * 32 bytes: node ID of the working directory's second parent | |
|
20 | # * {TREE_METADATA_SIZE} bytes: tree metadata, parsed separately | |||
24 | # * 4 bytes: big-endian used size of the data file |
|
21 | # * 4 bytes: big-endian used size of the data file | |
25 | # * {TREE_METADATA_SIZE} bytes: tree metadata, parsed separately |
|
|||
26 | # * 1 byte: length of the data file's UUID |
|
22 | # * 1 byte: length of the data file's UUID | |
27 | # * variable: data file's UUID |
|
23 | # * variable: data file's UUID | |
28 | # |
|
24 | # | |
29 | # Node IDs are null-padded if shorter than 32 bytes. |
|
25 | # Node IDs are null-padded if shorter than 32 bytes. | |
30 | # A data file shorter than the specified used size is corrupted (truncated) |
|
26 | # A data file shorter than the specified used size is corrupted (truncated) | |
31 | HEADER = struct.Struct( |
|
27 | HEADER = struct.Struct( | |
32 |
">{}s32s32s |
|
28 | ">{}s32s32s{}sLB".format(len(V2_FORMAT_MARKER), v2.TREE_METADATA_SIZE) | |
33 | ) |
|
29 | ) | |
34 |
|
30 | |||
35 |
|
31 | |||
36 | class DirstateDocket(object): |
|
32 | class DirstateDocket(object): | |
37 |
data_filename_pattern = b'dirstate.%s |
|
33 | data_filename_pattern = b'dirstate.%s' | |
38 |
|
34 | |||
39 | def __init__(self, parents, data_size, tree_metadata, uuid): |
|
35 | def __init__(self, parents, data_size, tree_metadata, uuid): | |
40 | self.parents = parents |
|
36 | self.parents = parents | |
@@ -51,7 +47,7 b' class DirstateDocket(object):' | |||||
51 | if not data: |
|
47 | if not data: | |
52 | parents = (nodeconstants.nullid, nodeconstants.nullid) |
|
48 | parents = (nodeconstants.nullid, nodeconstants.nullid) | |
53 | return cls(parents, 0, b'', None) |
|
49 | return cls(parents, 0, b'', None) | |
54 |
marker, p1, p2, |
|
50 | marker, p1, p2, meta, data_size, uuid_size = HEADER.unpack_from(data) | |
55 | if marker != V2_FORMAT_MARKER: |
|
51 | if marker != V2_FORMAT_MARKER: | |
56 | raise ValueError("expected dirstate-v2 marker") |
|
52 | raise ValueError("expected dirstate-v2 marker") | |
57 | uuid = data[HEADER.size : HEADER.size + uuid_size] |
|
53 | uuid = data[HEADER.size : HEADER.size + uuid_size] | |
@@ -65,8 +61,8 b' class DirstateDocket(object):' | |||||
65 | V2_FORMAT_MARKER, |
|
61 | V2_FORMAT_MARKER, | |
66 | p1, |
|
62 | p1, | |
67 | p2, |
|
63 | p2, | |
|
64 | self.tree_metadata, | |||
68 | self.data_size, |
|
65 | self.data_size, | |
69 | self.tree_metadata, |
|
|||
70 | len(self.uuid), |
|
66 | len(self.uuid), | |
71 | ) |
|
67 | ) | |
72 | return header + self.uuid |
|
68 | return header + self.uuid |
@@ -253,7 +253,7 b' def dispatch(req):' | |||||
253 | status = -1 |
|
253 | status = -1 | |
254 |
|
254 | |||
255 | ret = _flushstdio(req.ui, err) |
|
255 | ret = _flushstdio(req.ui, err) | |
256 | if ret: |
|
256 | if ret and not status: | |
257 | status = ret |
|
257 | status = ret | |
258 | return status |
|
258 | return status | |
259 |
|
259 |
@@ -240,7 +240,9 b' def fromlocal(s):' | |||||
240 | b"decoding near '%s': %s!" % (sub, pycompat.bytestr(inst)) |
|
240 | b"decoding near '%s': %s!" % (sub, pycompat.bytestr(inst)) | |
241 | ) |
|
241 | ) | |
242 | except LookupError as k: |
|
242 | except LookupError as k: | |
243 | raise error.Abort(k, hint=b"please check your locale settings") |
|
243 | raise error.Abort( | |
|
244 | pycompat.bytestr(k), hint=b"please check your locale settings" | |||
|
245 | ) | |||
244 |
|
246 | |||
245 |
|
247 | |||
246 | def unitolocal(u): |
|
248 | def unitolocal(u): | |
@@ -306,7 +308,9 b' def lower(s):' | |||||
306 | except UnicodeError: |
|
308 | except UnicodeError: | |
307 | return s.lower() # we don't know how to fold this except in ASCII |
|
309 | return s.lower() # we don't know how to fold this except in ASCII | |
308 | except LookupError as k: |
|
310 | except LookupError as k: | |
309 | raise error.Abort(k, hint=b"please check your locale settings") |
|
311 | raise error.Abort( | |
|
312 | pycompat.bytestr(k), hint=b"please check your locale settings" | |||
|
313 | ) | |||
310 |
|
314 | |||
311 |
|
315 | |||
312 | def upper(s): |
|
316 | def upper(s): | |
@@ -333,7 +337,9 b' def upperfallback(s):' | |||||
333 | except UnicodeError: |
|
337 | except UnicodeError: | |
334 | return s.upper() # we don't know how to fold this except in ASCII |
|
338 | return s.upper() # we don't know how to fold this except in ASCII | |
335 | except LookupError as k: |
|
339 | except LookupError as k: | |
336 | raise error.Abort(k, hint=b"please check your locale settings") |
|
340 | raise error.Abort( | |
|
341 | pycompat.bytestr(k), hint=b"please check your locale settings" | |||
|
342 | ) | |||
337 |
|
343 | |||
338 |
|
344 | |||
339 | if not _nativeenviron: |
|
345 | if not _nativeenviron: |
@@ -31,6 +31,7 b' if pycompat.TYPE_CHECKING:' | |||||
31 |
|
31 | |||
32 |
|
32 | |||
33 | def _tobytes(exc): |
|
33 | def _tobytes(exc): | |
|
34 | # type: (...) -> bytes | |||
34 | """Byte-stringify exception in the same way as BaseException_str()""" |
|
35 | """Byte-stringify exception in the same way as BaseException_str()""" | |
35 | if not exc.args: |
|
36 | if not exc.args: | |
36 | return b'' |
|
37 | return b'' | |
@@ -47,7 +48,7 b' class Hint(object):' | |||||
47 | """ |
|
48 | """ | |
48 |
|
49 | |||
49 | def __init__(self, *args, **kw): |
|
50 | def __init__(self, *args, **kw): | |
50 | self.hint = kw.pop('hint', None) |
|
51 | self.hint = kw.pop('hint', None) # type: Optional[bytes] | |
51 | super(Hint, self).__init__(*args, **kw) |
|
52 | super(Hint, self).__init__(*args, **kw) | |
52 |
|
53 | |||
53 |
|
54 | |||
@@ -71,6 +72,7 b' class Error(Hint, Exception):' | |||||
71 | if pycompat.ispy3: |
|
72 | if pycompat.ispy3: | |
72 |
|
73 | |||
73 | def __str__(self): |
|
74 | def __str__(self): | |
|
75 | # type: () -> str | |||
74 | # the output would be unreadable if the message was translated, |
|
76 | # the output would be unreadable if the message was translated, | |
75 | # but do not replace it with encoding.strfromlocal(), which |
|
77 | # but do not replace it with encoding.strfromlocal(), which | |
76 | # may raise another exception. |
|
78 | # may raise another exception. | |
@@ -105,6 +107,7 b' class RevlogError(StorageError):' | |||||
105 |
|
107 | |||
106 | class SidedataHashError(RevlogError): |
|
108 | class SidedataHashError(RevlogError): | |
107 | def __init__(self, key, expected, got): |
|
109 | def __init__(self, key, expected, got): | |
|
110 | # type: (int, bytes, bytes) -> None | |||
108 | self.hint = None |
|
111 | self.hint = None | |
109 | self.sidedatakey = key |
|
112 | self.sidedatakey = key | |
110 | self.expecteddigest = expected |
|
113 | self.expecteddigest = expected | |
@@ -117,6 +120,7 b' class FilteredIndexError(IndexError):' | |||||
117 |
|
120 | |||
118 | class LookupError(RevlogError, KeyError): |
|
121 | class LookupError(RevlogError, KeyError): | |
119 | def __init__(self, name, index, message): |
|
122 | def __init__(self, name, index, message): | |
|
123 | # type: (bytes, bytes, bytes) -> None | |||
120 | self.name = name |
|
124 | self.name = name | |
121 | self.index = index |
|
125 | self.index = index | |
122 | # this can't be called 'message' because at least some installs of |
|
126 | # this can't be called 'message' because at least some installs of | |
@@ -343,6 +347,7 b' class OutOfBandError(RemoteError):' | |||||
343 | """Exception raised when a remote repo reports failure""" |
|
347 | """Exception raised when a remote repo reports failure""" | |
344 |
|
348 | |||
345 | def __init__(self, message=None, hint=None): |
|
349 | def __init__(self, message=None, hint=None): | |
|
350 | # type: (Optional[bytes], Optional[bytes]) -> None | |||
346 | from .i18n import _ |
|
351 | from .i18n import _ | |
347 |
|
352 | |||
348 | if message: |
|
353 | if message: |
@@ -1386,11 +1386,16 b' class pulloperation(object):' | |||||
1386 | includepats=None, |
|
1386 | includepats=None, | |
1387 | excludepats=None, |
|
1387 | excludepats=None, | |
1388 | depth=None, |
|
1388 | depth=None, | |
|
1389 | path=None, | |||
1389 | ): |
|
1390 | ): | |
1390 | # repo we pull into |
|
1391 | # repo we pull into | |
1391 | self.repo = repo |
|
1392 | self.repo = repo | |
1392 | # repo we pull from |
|
1393 | # repo we pull from | |
1393 | self.remote = remote |
|
1394 | self.remote = remote | |
|
1395 | # path object used to build this remote | |||
|
1396 | # | |||
|
1397 | # Ideally, the remote peer would carry that directly. | |||
|
1398 | self.remote_path = path | |||
1394 | # revision we try to pull (None is "all") |
|
1399 | # revision we try to pull (None is "all") | |
1395 | self.heads = heads |
|
1400 | self.heads = heads | |
1396 | # bookmark pulled explicitly |
|
1401 | # bookmark pulled explicitly | |
@@ -1556,6 +1561,7 b' def add_confirm_callback(repo, pullop):' | |||||
1556 | def pull( |
|
1561 | def pull( | |
1557 | repo, |
|
1562 | repo, | |
1558 | remote, |
|
1563 | remote, | |
|
1564 | path=None, | |||
1559 | heads=None, |
|
1565 | heads=None, | |
1560 | force=False, |
|
1566 | force=False, | |
1561 | bookmarks=(), |
|
1567 | bookmarks=(), | |
@@ -1611,8 +1617,9 b' def pull(' | |||||
1611 | pullop = pulloperation( |
|
1617 | pullop = pulloperation( | |
1612 | repo, |
|
1618 | repo, | |
1613 | remote, |
|
1619 | remote, | |
1614 |
|
|
1620 | path=path, | |
1615 | force, |
|
1621 | heads=heads, | |
|
1622 | force=force, | |||
1616 | bookmarks=bookmarks, |
|
1623 | bookmarks=bookmarks, | |
1617 | streamclonerequested=streamclonerequested, |
|
1624 | streamclonerequested=streamclonerequested, | |
1618 | includepats=includepats, |
|
1625 | includepats=includepats, | |
@@ -2021,6 +2028,9 b' def _pullbookmarks(pullop):' | |||||
2021 | pullop.stepsdone.add(b'bookmarks') |
|
2028 | pullop.stepsdone.add(b'bookmarks') | |
2022 | repo = pullop.repo |
|
2029 | repo = pullop.repo | |
2023 | remotebookmarks = pullop.remotebookmarks |
|
2030 | remotebookmarks = pullop.remotebookmarks | |
|
2031 | bookmarks_mode = None | |||
|
2032 | if pullop.remote_path is not None: | |||
|
2033 | bookmarks_mode = pullop.remote_path.bookmarks_mode | |||
2024 | bookmod.updatefromremote( |
|
2034 | bookmod.updatefromremote( | |
2025 | repo.ui, |
|
2035 | repo.ui, | |
2026 | repo, |
|
2036 | repo, | |
@@ -2028,6 +2038,7 b' def _pullbookmarks(pullop):' | |||||
2028 | pullop.remote.url(), |
|
2038 | pullop.remote.url(), | |
2029 | pullop.gettransaction, |
|
2039 | pullop.gettransaction, | |
2030 | explicit=pullop.explicitbookmarks, |
|
2040 | explicit=pullop.explicitbookmarks, | |
|
2041 | mode=bookmarks_mode, | |||
2031 | ) |
|
2042 | ) | |
2032 |
|
2043 | |||
2033 |
|
2044 |
@@ -224,8 +224,12 b' def load(ui, name, path, loadingtime=Non' | |||||
224 | minver = getattr(mod, 'minimumhgversion', None) |
|
224 | minver = getattr(mod, 'minimumhgversion', None) | |
225 | if minver: |
|
225 | if minver: | |
226 | curver = util.versiontuple(n=2) |
|
226 | curver = util.versiontuple(n=2) | |
|
227 | extmin = util.versiontuple(stringutil.forcebytestr(minver), 2) | |||
227 |
|
228 | |||
228 | if None in curver or util.versiontuple(minver, 2) > curver: |
|
229 | if None in extmin: | |
|
230 | extmin = (extmin[0] or 0, extmin[1] or 0) | |||
|
231 | ||||
|
232 | if None in curver or extmin > curver: | |||
229 | msg = _( |
|
233 | msg = _( | |
230 | b'(third party extension %s requires version %s or newer ' |
|
234 | b'(third party extension %s requires version %s or newer ' | |
231 | b'of Mercurial (current: %s); disabling)\n' |
|
235 | b'of Mercurial (current: %s); disabling)\n' |
@@ -365,6 +365,11 b' internalstable = sorted(' | |||||
365 | loaddoc(b'config', subdir=b'internals'), |
|
365 | loaddoc(b'config', subdir=b'internals'), | |
366 | ), |
|
366 | ), | |
367 | ( |
|
367 | ( | |
|
368 | [b'dirstate-v2'], | |||
|
369 | _(b'dirstate-v2 file format'), | |||
|
370 | loaddoc(b'dirstate-v2', subdir=b'internals'), | |||
|
371 | ), | |||
|
372 | ( | |||
368 | [b'extensions', b'extension'], |
|
373 | [b'extensions', b'extension'], | |
369 | _(b'Extension API'), |
|
374 | _(b'Extension API'), | |
370 | loaddoc(b'extensions', subdir=b'internals'), |
|
375 | loaddoc(b'extensions', subdir=b'internals'), |
@@ -1748,6 +1748,18 b' The following sub-options can be defined' | |||||
1748 | Revsets specifying bookmarks will not result in the bookmark being |
|
1748 | Revsets specifying bookmarks will not result in the bookmark being | |
1749 | pushed. |
|
1749 | pushed. | |
1750 |
|
1750 | |||
|
1751 | ``bookmarks.mode`` | |||
|
1752 | How bookmark will be dealt during the exchange. It support the following value | |||
|
1753 | ||||
|
1754 | - ``default``: the default behavior, local and remote bookmarks are "merged" | |||
|
1755 | on push/pull. | |||
|
1756 | ||||
|
1757 | - ``mirror``: when pulling, replace local bookmarks by remote bookmarks. This | |||
|
1758 | is useful to replicate a repository, or as an optimization. | |||
|
1759 | ||||
|
1760 | - ``ignore``: ignore bookmarks during exchange. | |||
|
1761 | (This currently only affect pulling) | |||
|
1762 | ||||
1751 | The following special named paths exist: |
|
1763 | The following special named paths exist: | |
1752 |
|
1764 | |||
1753 | ``default`` |
|
1765 | ``default`` |
@@ -942,7 +942,7 b' def clone(' | |||||
942 | exchange.pull( |
|
942 | exchange.pull( | |
943 | local, |
|
943 | local, | |
944 | srcpeer, |
|
944 | srcpeer, | |
945 | revs, |
|
945 | heads=revs, | |
946 | streamclonerequested=stream, |
|
946 | streamclonerequested=stream, | |
947 | includepats=storeincludepats, |
|
947 | includepats=storeincludepats, | |
948 | excludepats=storeexcludepats, |
|
948 | excludepats=storeexcludepats, | |
@@ -1261,13 +1261,14 b' def _incoming(' | |||||
1261 | (remoterepo, incomingchangesetlist, displayer) parameters, |
|
1261 | (remoterepo, incomingchangesetlist, displayer) parameters, | |
1262 | and is supposed to contain only code that can't be unified. |
|
1262 | and is supposed to contain only code that can't be unified. | |
1263 | """ |
|
1263 | """ | |
1264 |
srcs = urlutil.get_pull_paths(repo, ui, [source] |
|
1264 | srcs = urlutil.get_pull_paths(repo, ui, [source]) | |
1265 | srcs = list(srcs) |
|
1265 | srcs = list(srcs) | |
1266 | if len(srcs) != 1: |
|
1266 | if len(srcs) != 1: | |
1267 | msg = _(b'for now, incoming supports only a single source, %d provided') |
|
1267 | msg = _(b'for now, incoming supports only a single source, %d provided') | |
1268 | msg %= len(srcs) |
|
1268 | msg %= len(srcs) | |
1269 | raise error.Abort(msg) |
|
1269 | raise error.Abort(msg) | |
1270 |
|
|
1270 | path = srcs[0] | |
|
1271 | source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch')) | |||
1271 | if subpath is not None: |
|
1272 | if subpath is not None: | |
1272 | subpath = urlutil.url(subpath) |
|
1273 | subpath = urlutil.url(subpath) | |
1273 | if subpath.isabs(): |
|
1274 | if subpath.isabs(): | |
@@ -1285,7 +1286,7 b' def _incoming(' | |||||
1285 | if revs: |
|
1286 | if revs: | |
1286 | revs = [other.lookup(rev) for rev in revs] |
|
1287 | revs = [other.lookup(rev) for rev in revs] | |
1287 | other, chlist, cleanupfn = bundlerepo.getremotechanges( |
|
1288 | other, chlist, cleanupfn = bundlerepo.getremotechanges( | |
1288 |
ui, repo, other, revs, opts |
|
1289 | ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force") | |
1289 | ) |
|
1290 | ) | |
1290 |
|
1291 | |||
1291 | if not chlist: |
|
1292 | if not chlist: | |
@@ -1352,7 +1353,7 b' def _outgoing(ui, repo, dests, opts, sub' | |||||
1352 | ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest)) |
|
1353 | ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest)) | |
1353 | revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev')) |
|
1354 | revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev')) | |
1354 | if revs: |
|
1355 | if revs: | |
1355 |
revs = [repo[rev].node() for rev in |
|
1356 | revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)] | |
1356 |
|
1357 | |||
1357 | other = peer(repo, opts, dest) |
|
1358 | other = peer(repo, opts, dest) | |
1358 | try: |
|
1359 | try: |
@@ -285,6 +285,7 b' class hgwebdir(object):' | |||||
285 | self.lastrefresh = 0 |
|
285 | self.lastrefresh = 0 | |
286 | self.motd = None |
|
286 | self.motd = None | |
287 | self.refresh() |
|
287 | self.refresh() | |
|
288 | self.requests_count = 0 | |||
288 | if not baseui: |
|
289 | if not baseui: | |
289 | # set up environment for new ui |
|
290 | # set up environment for new ui | |
290 | extensions.loadall(self.ui) |
|
291 | extensions.loadall(self.ui) | |
@@ -341,6 +342,10 b' class hgwebdir(object):' | |||||
341 |
|
342 | |||
342 | self.repos = repos |
|
343 | self.repos = repos | |
343 | self.ui = u |
|
344 | self.ui = u | |
|
345 | self.gc_full_collect_rate = self.ui.configint( | |||
|
346 | b'experimental', b'web.full-garbage-collection-rate' | |||
|
347 | ) | |||
|
348 | self.gc_full_collections_done = 0 | |||
344 | encoding.encoding = self.ui.config(b'web', b'encoding') |
|
349 | encoding.encoding = self.ui.config(b'web', b'encoding') | |
345 | self.style = self.ui.config(b'web', b'style') |
|
350 | self.style = self.ui.config(b'web', b'style') | |
346 | self.templatepath = self.ui.config( |
|
351 | self.templatepath = self.ui.config( | |
@@ -383,12 +388,27 b' class hgwebdir(object):' | |||||
383 | finally: |
|
388 | finally: | |
384 | # There are known cycles in localrepository that prevent |
|
389 | # There are known cycles in localrepository that prevent | |
385 | # those objects (and tons of held references) from being |
|
390 | # those objects (and tons of held references) from being | |
386 |
# collected through normal refcounting. |
|
391 | # collected through normal refcounting. | |
387 | # leaks by performing an explicit GC on every request. |
|
392 | # In some cases, the resulting memory consumption can | |
388 | # TODO remove this once leaks are fixed. |
|
393 | # be tamed by performing explicit garbage collections. | |
389 | # TODO only run this on requests that create localrepository |
|
394 | # In presence of actual leaks or big long-lived caches, the | |
390 | # instances instead of every request. |
|
395 | # impact on performance of such collections can become a | |
391 | gc.collect() |
|
396 | # problem, hence the rate shouldn't be set too low. | |
|
397 | # See "Collecting the oldest generation" in | |||
|
398 | # https://devguide.python.org/garbage_collector | |||
|
399 | # for more about such trade-offs. | |||
|
400 | rate = self.gc_full_collect_rate | |||
|
401 | ||||
|
402 | # this is not thread safe, but the consequence (skipping | |||
|
403 | # a garbage collection) is arguably better than risking | |||
|
404 | # to have several threads perform a collection in parallel | |||
|
405 | # (long useless wait on all threads). | |||
|
406 | self.requests_count += 1 | |||
|
407 | if rate > 0 and self.requests_count % rate == 0: | |||
|
408 | gc.collect() | |||
|
409 | self.gc_full_collections_done += 1 | |||
|
410 | else: | |||
|
411 | gc.collect(generation=1) | |||
392 |
|
412 | |||
393 | def _runwsgi(self, req, res): |
|
413 | def _runwsgi(self, req, res): | |
394 | try: |
|
414 | try: |
@@ -132,36 +132,6 b' class idirstate(interfaceutil.Interface)' | |||||
132 | def copies(): |
|
132 | def copies(): | |
133 | pass |
|
133 | pass | |
134 |
|
134 | |||
135 | def normal(f, parentfiledata=None): |
|
|||
136 | """Mark a file normal and clean. |
|
|||
137 |
|
||||
138 | parentfiledata: (mode, size, mtime) of the clean file |
|
|||
139 |
|
||||
140 | parentfiledata should be computed from memory (for mode, |
|
|||
141 | size), as or close as possible from the point where we |
|
|||
142 | determined the file was clean, to limit the risk of the |
|
|||
143 | file having been changed by an external process between the |
|
|||
144 | moment where the file was determined to be clean and now.""" |
|
|||
145 | pass |
|
|||
146 |
|
||||
147 | def normallookup(f): |
|
|||
148 | '''Mark a file normal, but possibly dirty.''' |
|
|||
149 |
|
||||
150 | def otherparent(f): |
|
|||
151 | '''Mark as coming from the other parent, always dirty.''' |
|
|||
152 |
|
||||
153 | def add(f): |
|
|||
154 | '''Mark a file added.''' |
|
|||
155 |
|
||||
156 | def remove(f): |
|
|||
157 | '''Mark a file removed.''' |
|
|||
158 |
|
||||
159 | def merge(f): |
|
|||
160 | '''Mark a file merged.''' |
|
|||
161 |
|
||||
162 | def drop(f): |
|
|||
163 | '''Drop a file from the dirstate''' |
|
|||
164 |
|
||||
165 | def normalize(path, isknown=False, ignoremissing=False): |
|
135 | def normalize(path, isknown=False, ignoremissing=False): | |
166 | """ |
|
136 | """ | |
167 | normalize the case of a pathname when on a casefolding filesystem |
|
137 | normalize the case of a pathname when on a casefolding filesystem |
@@ -917,9 +917,6 b' def gathersupportedrequirements(ui):' | |||||
917 | # Start with all requirements supported by this file. |
|
917 | # Start with all requirements supported by this file. | |
918 | supported = set(localrepository._basesupported) |
|
918 | supported = set(localrepository._basesupported) | |
919 |
|
919 | |||
920 | if dirstate.SUPPORTS_DIRSTATE_V2: |
|
|||
921 | supported.add(requirementsmod.DIRSTATE_V2_REQUIREMENT) |
|
|||
922 |
|
||||
923 | # Execute ``featuresetupfuncs`` entries if they belong to an extension |
|
920 | # Execute ``featuresetupfuncs`` entries if they belong to an extension | |
924 | # relevant to this ui instance. |
|
921 | # relevant to this ui instance. | |
925 | modules = {m.__name__ for n, m in extensions.extensions(ui)} |
|
922 | modules = {m.__name__ for n, m in extensions.extensions(ui)} | |
@@ -1177,6 +1174,32 b' def resolverevlogstorevfsoptions(ui, req' | |||||
1177 | if slow_path == b'abort': |
|
1174 | if slow_path == b'abort': | |
1178 | raise error.Abort(msg, hint=hint) |
|
1175 | raise error.Abort(msg, hint=hint) | |
1179 | options[b'persistent-nodemap'] = True |
|
1176 | options[b'persistent-nodemap'] = True | |
|
1177 | if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements: | |||
|
1178 | slow_path = ui.config(b'storage', b'dirstate-v2.slow-path') | |||
|
1179 | if slow_path not in (b'allow', b'warn', b'abort'): | |||
|
1180 | default = ui.config_default(b'storage', b'dirstate-v2.slow-path') | |||
|
1181 | msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n') | |||
|
1182 | ui.warn(msg % slow_path) | |||
|
1183 | if not ui.quiet: | |||
|
1184 | ui.warn(_(b'falling back to default value: %s\n') % default) | |||
|
1185 | slow_path = default | |||
|
1186 | ||||
|
1187 | msg = _( | |||
|
1188 | b"accessing `dirstate-v2` repository without associated " | |||
|
1189 | b"fast implementation." | |||
|
1190 | ) | |||
|
1191 | hint = _( | |||
|
1192 | b"check `hg help config.format.exp-rc-dirstate-v2` " b"for details" | |||
|
1193 | ) | |||
|
1194 | if not dirstate.HAS_FAST_DIRSTATE_V2: | |||
|
1195 | if slow_path == b'warn': | |||
|
1196 | msg = b"warning: " + msg + b'\n' | |||
|
1197 | ui.warn(msg) | |||
|
1198 | if not ui.quiet: | |||
|
1199 | hint = b'(' + hint + b')\n' | |||
|
1200 | ui.warn(hint) | |||
|
1201 | if slow_path == b'abort': | |||
|
1202 | raise error.Abort(msg, hint=hint) | |||
1180 | if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'): |
|
1203 | if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'): | |
1181 | options[b'persistent-nodemap.mmap'] = True |
|
1204 | options[b'persistent-nodemap.mmap'] = True | |
1182 | if ui.configbool(b'devel', b'persistent-nodemap'): |
|
1205 | if ui.configbool(b'devel', b'persistent-nodemap'): | |
@@ -1266,6 +1289,7 b' class localrepository(object):' | |||||
1266 | requirementsmod.NODEMAP_REQUIREMENT, |
|
1289 | requirementsmod.NODEMAP_REQUIREMENT, | |
1267 | bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT, |
|
1290 | bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT, | |
1268 | requirementsmod.SHARESAFE_REQUIREMENT, |
|
1291 | requirementsmod.SHARESAFE_REQUIREMENT, | |
|
1292 | requirementsmod.DIRSTATE_V2_REQUIREMENT, | |||
1269 | } |
|
1293 | } | |
1270 | _basesupported = supportedformats | { |
|
1294 | _basesupported = supportedformats | { | |
1271 | requirementsmod.STORE_REQUIREMENT, |
|
1295 | requirementsmod.STORE_REQUIREMENT, | |
@@ -3606,18 +3630,10 b' def newreporequirements(ui, createopts):' | |||||
3606 | if ui.configbool(b'format', b'sparse-revlog'): |
|
3630 | if ui.configbool(b'format', b'sparse-revlog'): | |
3607 | requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT) |
|
3631 | requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT) | |
3608 |
|
3632 | |||
3609 | # experimental config: format.exp-dirstate-v2 |
|
3633 | # experimental config: format.exp-rc-dirstate-v2 | |
3610 | # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py` |
|
3634 | # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py` | |
3611 | if ui.configbool(b'format', b'exp-dirstate-v2'): |
|
3635 | if ui.configbool(b'format', b'exp-rc-dirstate-v2'): | |
3612 | if dirstate.SUPPORTS_DIRSTATE_V2: |
|
3636 | requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT) | |
3613 | requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT) |
|
|||
3614 | else: |
|
|||
3615 | raise error.Abort( |
|
|||
3616 | _( |
|
|||
3617 | b"dirstate v2 format requested by config " |
|
|||
3618 | b"but not supported (requires Rust extensions)" |
|
|||
3619 | ) |
|
|||
3620 | ) |
|
|||
3621 |
|
3637 | |||
3622 | # experimental config: format.exp-use-copies-side-data-changeset |
|
3638 | # experimental config: format.exp-use-copies-side-data-changeset | |
3623 | if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'): |
|
3639 | if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'): |
@@ -46,13 +46,12 b' if pycompat.TYPE_CHECKING:' | |||||
46 | Any, |
|
46 | Any, | |
47 | Callable, |
|
47 | Callable, | |
48 | Dict, |
|
48 | Dict, | |
49 | List, |
|
|||
50 | Optional, |
|
49 | Optional, | |
51 | Sequence, |
|
50 | Sequence, | |
52 | Tuple, |
|
51 | Tuple, | |
53 | ) |
|
52 | ) | |
54 |
|
53 | |||
55 |
for t in (Any, Callable, Dict, |
|
54 | for t in (Any, Callable, Dict, Optional, Tuple): | |
56 | assert t |
|
55 | assert t | |
57 |
|
56 | |||
58 |
|
57 | |||
@@ -714,43 +713,43 b' class walkopts(object):' | |||||
714 | """ |
|
713 | """ | |
715 |
|
714 | |||
716 | # raw command-line parameters, which a matcher will be built from |
|
715 | # raw command-line parameters, which a matcher will be built from | |
717 |
pats = attr.ib() |
|
716 | pats = attr.ib() | |
718 |
opts = attr.ib() |
|
717 | opts = attr.ib() | |
719 |
|
718 | |||
720 | # a list of revset expressions to be traversed; if follow, it specifies |
|
719 | # a list of revset expressions to be traversed; if follow, it specifies | |
721 | # the start revisions |
|
720 | # the start revisions | |
722 |
revspec = attr.ib() |
|
721 | revspec = attr.ib() | |
723 |
|
722 | |||
724 | # miscellaneous queries to filter revisions (see "hg help log" for details) |
|
723 | # miscellaneous queries to filter revisions (see "hg help log" for details) | |
725 |
bookmarks = attr.ib(default=attr.Factory(list)) |
|
724 | bookmarks = attr.ib(default=attr.Factory(list)) | |
726 |
branches = attr.ib(default=attr.Factory(list)) |
|
725 | branches = attr.ib(default=attr.Factory(list)) | |
727 |
date = attr.ib(default=None) |
|
726 | date = attr.ib(default=None) | |
728 |
keywords = attr.ib(default=attr.Factory(list)) |
|
727 | keywords = attr.ib(default=attr.Factory(list)) | |
729 |
no_merges = attr.ib(default=False) |
|
728 | no_merges = attr.ib(default=False) | |
730 |
only_merges = attr.ib(default=False) |
|
729 | only_merges = attr.ib(default=False) | |
731 |
prune_ancestors = attr.ib(default=attr.Factory(list)) |
|
730 | prune_ancestors = attr.ib(default=attr.Factory(list)) | |
732 |
users = attr.ib(default=attr.Factory(list)) |
|
731 | users = attr.ib(default=attr.Factory(list)) | |
733 |
|
732 | |||
734 | # miscellaneous matcher arguments |
|
733 | # miscellaneous matcher arguments | |
735 |
include_pats = attr.ib(default=attr.Factory(list)) |
|
734 | include_pats = attr.ib(default=attr.Factory(list)) | |
736 |
exclude_pats = attr.ib(default=attr.Factory(list)) |
|
735 | exclude_pats = attr.ib(default=attr.Factory(list)) | |
737 |
|
736 | |||
738 | # 0: no follow, 1: follow first, 2: follow both parents |
|
737 | # 0: no follow, 1: follow first, 2: follow both parents | |
739 |
follow = attr.ib(default=0) |
|
738 | follow = attr.ib(default=0) | |
740 |
|
739 | |||
741 | # do not attempt filelog-based traversal, which may be fast but cannot |
|
740 | # do not attempt filelog-based traversal, which may be fast but cannot | |
742 | # include revisions where files were removed |
|
741 | # include revisions where files were removed | |
743 |
force_changelog_traversal = attr.ib(default=False) |
|
742 | force_changelog_traversal = attr.ib(default=False) | |
744 |
|
743 | |||
745 | # filter revisions by file patterns, which should be disabled only if |
|
744 | # filter revisions by file patterns, which should be disabled only if | |
746 | # you want to include revisions where files were unmodified |
|
745 | # you want to include revisions where files were unmodified | |
747 |
filter_revisions_by_pats = attr.ib(default=True) |
|
746 | filter_revisions_by_pats = attr.ib(default=True) | |
748 |
|
747 | |||
749 | # sort revisions prior to traversal: 'desc', 'topo', or None |
|
748 | # sort revisions prior to traversal: 'desc', 'topo', or None | |
750 |
sort_revisions = attr.ib(default=None) |
|
749 | sort_revisions = attr.ib(default=None) | |
751 |
|
750 | |||
752 | # limit number of changes displayed; None means unlimited |
|
751 | # limit number of changes displayed; None means unlimited | |
753 |
limit = attr.ib(default=None) |
|
752 | limit = attr.ib(default=None) | |
754 |
|
753 | |||
755 |
|
754 | |||
756 | def parseopts(ui, pats, opts): |
|
755 | def parseopts(ui, pats, opts): | |
@@ -913,6 +912,42 b' def _makenofollowfilematcher(repo, pats,' | |||||
913 | return None |
|
912 | return None | |
914 |
|
913 | |||
915 |
|
914 | |||
|
915 | def revsingle(repo, revspec, default=b'.', localalias=None): | |||
|
916 | """Resolves user-provided revset(s) into a single revision. | |||
|
917 | ||||
|
918 | This just wraps the lower-level scmutil.revsingle() in order to raise an | |||
|
919 | exception indicating user error. | |||
|
920 | """ | |||
|
921 | try: | |||
|
922 | return scmutil.revsingle(repo, revspec, default, localalias) | |||
|
923 | except error.RepoLookupError as e: | |||
|
924 | raise error.InputError(e.args[0], hint=e.hint) | |||
|
925 | ||||
|
926 | ||||
|
927 | def revpair(repo, revs): | |||
|
928 | """Resolves user-provided revset(s) into two revisions. | |||
|
929 | ||||
|
930 | This just wraps the lower-level scmutil.revpair() in order to raise an | |||
|
931 | exception indicating user error. | |||
|
932 | """ | |||
|
933 | try: | |||
|
934 | return scmutil.revpair(repo, revs) | |||
|
935 | except error.RepoLookupError as e: | |||
|
936 | raise error.InputError(e.args[0], hint=e.hint) | |||
|
937 | ||||
|
938 | ||||
|
939 | def revrange(repo, specs, localalias=None): | |||
|
940 | """Resolves user-provided revset(s). | |||
|
941 | ||||
|
942 | This just wraps the lower-level scmutil.revrange() in order to raise an | |||
|
943 | exception indicating user error. | |||
|
944 | """ | |||
|
945 | try: | |||
|
946 | return scmutil.revrange(repo, specs, localalias) | |||
|
947 | except error.RepoLookupError as e: | |||
|
948 | raise error.InputError(e.args[0], hint=e.hint) | |||
|
949 | ||||
|
950 | ||||
916 | _opt2logrevset = { |
|
951 | _opt2logrevset = { | |
917 | b'no_merges': (b'not merge()', None), |
|
952 | b'no_merges': (b'not merge()', None), | |
918 | b'only_merges': (b'merge()', None), |
|
953 | b'only_merges': (b'merge()', None), | |
@@ -988,7 +1023,7 b' def _makerevset(repo, wopts, slowpath):' | |||||
988 | def _initialrevs(repo, wopts): |
|
1023 | def _initialrevs(repo, wopts): | |
989 | """Return the initial set of revisions to be filtered or followed""" |
|
1024 | """Return the initial set of revisions to be filtered or followed""" | |
990 | if wopts.revspec: |
|
1025 | if wopts.revspec: | |
991 |
revs = |
|
1026 | revs = revrange(repo, wopts.revspec) | |
992 | elif wopts.follow and repo.dirstate.p1() == repo.nullid: |
|
1027 | elif wopts.follow and repo.dirstate.p1() == repo.nullid: | |
993 | revs = smartset.baseset() |
|
1028 | revs = smartset.baseset() | |
994 | elif wopts.follow: |
|
1029 | elif wopts.follow: |
@@ -9,13 +9,13 b' from __future__ import absolute_import' | |||||
9 |
|
9 | |||
10 | import collections |
|
10 | import collections | |
11 | import errno |
|
11 | import errno | |
12 | import stat |
|
|||
13 | import struct |
|
12 | import struct | |
14 |
|
13 | |||
15 | from .i18n import _ |
|
14 | from .i18n import _ | |
16 | from .node import nullrev |
|
15 | from .node import nullrev | |
17 | from .thirdparty import attr |
|
16 | from .thirdparty import attr | |
18 | from .utils import stringutil |
|
17 | from .utils import stringutil | |
|
18 | from .dirstateutils import timestamp | |||
19 | from . import ( |
|
19 | from . import ( | |
20 | copies, |
|
20 | copies, | |
21 | encoding, |
|
21 | encoding, | |
@@ -1406,8 +1406,9 b' def batchget(repo, mctx, wctx, wantfiled' | |||||
1406 | if wantfiledata: |
|
1406 | if wantfiledata: | |
1407 | s = wfctx.lstat() |
|
1407 | s = wfctx.lstat() | |
1408 | mode = s.st_mode |
|
1408 | mode = s.st_mode | |
1409 |
mtime = s |
|
1409 | mtime = timestamp.mtime_of(s) | |
1410 | filedata[f] = (mode, size, mtime) # for dirstate.normal |
|
1410 | # for dirstate.update_file's parentfiledata argument: | |
|
1411 | filedata[f] = (mode, size, mtime) | |||
1411 | if i == 100: |
|
1412 | if i == 100: | |
1412 | yield False, (i, f) |
|
1413 | yield False, (i, f) | |
1413 | i = 0 |
|
1414 | i = 0 |
@@ -796,12 +796,13 b' def recordupdates(repo, actions, branchm' | |||||
796 | for f, args, msg in actions.get(ACTION_GET, []): |
|
796 | for f, args, msg in actions.get(ACTION_GET, []): | |
797 | if branchmerge: |
|
797 | if branchmerge: | |
798 | # tracked in p1 can be True also but update_file should not care |
|
798 | # tracked in p1 can be True also but update_file should not care | |
|
799 | old_entry = repo.dirstate.get_entry(f) | |||
|
800 | p1_tracked = old_entry.any_tracked and not old_entry.added | |||
799 | repo.dirstate.update_file( |
|
801 | repo.dirstate.update_file( | |
800 | f, |
|
802 | f, | |
801 |
p1_tracked= |
|
803 | p1_tracked=p1_tracked, | |
802 | p2_tracked=True, |
|
|||
803 | wc_tracked=True, |
|
804 | wc_tracked=True, | |
804 |
|
|
805 | p2_info=True, | |
805 | ) |
|
806 | ) | |
806 | else: |
|
807 | else: | |
807 | parentfiledata = getfiledata[f] if getfiledata else None |
|
808 | parentfiledata = getfiledata[f] if getfiledata else None | |
@@ -818,8 +819,12 b' def recordupdates(repo, actions, branchm' | |||||
818 | if branchmerge: |
|
819 | if branchmerge: | |
819 | # We've done a branch merge, mark this file as merged |
|
820 | # We've done a branch merge, mark this file as merged | |
820 | # so that we properly record the merger later |
|
821 | # so that we properly record the merger later | |
|
822 | p1_tracked = f1 == f | |||
821 | repo.dirstate.update_file( |
|
823 | repo.dirstate.update_file( | |
822 | f, p1_tracked=True, wc_tracked=True, merged=True |
|
824 | f, | |
|
825 | p1_tracked=p1_tracked, | |||
|
826 | wc_tracked=True, | |||
|
827 | p2_info=True, | |||
823 | ) |
|
828 | ) | |
824 | if f1 != f2: # copy/rename |
|
829 | if f1 != f2: # copy/rename | |
825 | if move: |
|
830 | if move: |
@@ -1,5 +1,5 b'' | |||||
1 |
#ifndef |
|
1 | #ifndef HG_MPATCH_H | |
2 |
#define |
|
2 | #define HG_MPATCH_H | |
3 |
|
3 | |||
4 | #define MPATCH_ERR_NO_MEM -3 |
|
4 | #define MPATCH_ERR_NO_MEM -3 | |
5 | #define MPATCH_ERR_CANNOT_BE_DECODED -2 |
|
5 | #define MPATCH_ERR_CANNOT_BE_DECODED -2 |
@@ -299,7 +299,7 b' def checkworkingcopynarrowspec(repo):' | |||||
299 | storespec = repo.svfs.tryread(FILENAME) |
|
299 | storespec = repo.svfs.tryread(FILENAME) | |
300 | wcspec = repo.vfs.tryread(DIRSTATE_FILENAME) |
|
300 | wcspec = repo.vfs.tryread(DIRSTATE_FILENAME) | |
301 | if wcspec != storespec: |
|
301 | if wcspec != storespec: | |
302 |
raise error. |
|
302 | raise error.StateError( | |
303 | _(b"working copy's narrowspec is stale"), |
|
303 | _(b"working copy's narrowspec is stale"), | |
304 | hint=_(b"run 'hg tracked --update-working-copy'"), |
|
304 | hint=_(b"run 'hg tracked --update-working-copy'"), | |
305 | ) |
|
305 | ) |
@@ -21,7 +21,6 b' from __future__ import absolute_import, ' | |||||
21 | from .i18n import _ |
|
21 | from .i18n import _ | |
22 | from . import ( |
|
22 | from . import ( | |
23 | error, |
|
23 | error, | |
24 | pycompat, |
|
|||
25 | util, |
|
24 | util, | |
26 | ) |
|
25 | ) | |
27 | from .utils import stringutil |
|
26 | from .utils import stringutil | |
@@ -216,7 +215,11 b' def unescapestr(s):' | |||||
216 | return stringutil.unescapestr(s) |
|
215 | return stringutil.unescapestr(s) | |
217 | except ValueError as e: |
|
216 | except ValueError as e: | |
218 | # mangle Python's exception into our format |
|
217 | # mangle Python's exception into our format | |
219 | raise error.ParseError(pycompat.bytestr(e).lower()) |
|
218 | # TODO: remove this suppression. For some reason, pytype 2021.09.09 | |
|
219 | # thinks .lower() is being called on Union[ValueError, bytes]. | |||
|
220 | # pytype: disable=attribute-error | |||
|
221 | raise error.ParseError(stringutil.forcebytestr(e).lower()) | |||
|
222 | # pytype: enable=attribute-error | |||
220 |
|
223 | |||
221 |
|
224 | |||
222 | def _prettyformat(tree, leafnodes, level, lines): |
|
225 | def _prettyformat(tree, leafnodes, level, lines): |
@@ -550,7 +550,9 b' class workingbackend(fsbackend):' | |||||
550 | self.copied = [] |
|
550 | self.copied = [] | |
551 |
|
551 | |||
552 | def _checkknown(self, fname): |
|
552 | def _checkknown(self, fname): | |
553 |
if self.repo.dirstate |
|
553 | if not self.repo.dirstate.get_entry(fname).any_tracked and self.exists( | |
|
554 | fname | |||
|
555 | ): | |||
554 | raise PatchError(_(b'cannot patch %s: file is not tracked') % fname) |
|
556 | raise PatchError(_(b'cannot patch %s: file is not tracked') % fname) | |
555 |
|
557 | |||
556 | def setfile(self, fname, data, mode, copysource): |
|
558 | def setfile(self, fname, data, mode, copysource): |
@@ -315,20 +315,19 b' def finddirs(path):' | |||||
315 | class dirs(object): |
|
315 | class dirs(object): | |
316 | '''a multiset of directory names from a set of file paths''' |
|
316 | '''a multiset of directory names from a set of file paths''' | |
317 |
|
317 | |||
318 |
def __init__(self, map, |
|
318 | def __init__(self, map, only_tracked=False): | |
319 | """ |
|
319 | """ | |
320 | a dict map indicates a dirstate while a list indicates a manifest |
|
320 | a dict map indicates a dirstate while a list indicates a manifest | |
321 | """ |
|
321 | """ | |
322 | self._dirs = {} |
|
322 | self._dirs = {} | |
323 | addpath = self.addpath |
|
323 | addpath = self.addpath | |
324 |
if isinstance(map, dict) and |
|
324 | if isinstance(map, dict) and only_tracked: | |
325 | for f, s in pycompat.iteritems(map): |
|
325 | for f, s in pycompat.iteritems(map): | |
326 |
if s.state != |
|
326 | if s.state != b'r': | |
327 | addpath(f) |
|
327 | addpath(f) | |
328 |
elif |
|
328 | elif only_tracked: | |
329 | raise error.ProgrammingError( |
|
329 | msg = b"`only_tracked` is only supported with a dict source" | |
330 | b"skip character is only supported with a dict source" |
|
330 | raise error.ProgrammingError(msg) | |
331 | ) |
|
|||
332 | else: |
|
331 | else: | |
333 | for f in map: |
|
332 | for f in map: | |
334 | addpath(f) |
|
333 | addpath(f) |
This diff has been collapsed as it changes many lines, (502 lines changed) Show them Hide them | |||||
@@ -7,6 +7,7 b'' | |||||
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
|
10 | import stat | |||
10 | import struct |
|
11 | import struct | |
11 | import zlib |
|
12 | import zlib | |
12 |
|
13 | |||
@@ -43,29 +44,143 b' NONNORMAL = -1' | |||||
43 | # a special value used internally for `time` if the time is ambigeous |
|
44 | # a special value used internally for `time` if the time is ambigeous | |
44 | AMBIGUOUS_TIME = -1 |
|
45 | AMBIGUOUS_TIME = -1 | |
45 |
|
46 | |||
|
47 | # Bits of the `flags` byte inside a node in the file format | |||
|
48 | DIRSTATE_V2_WDIR_TRACKED = 1 << 0 | |||
|
49 | DIRSTATE_V2_P1_TRACKED = 1 << 1 | |||
|
50 | DIRSTATE_V2_P2_INFO = 1 << 2 | |||
|
51 | DIRSTATE_V2_MODE_EXEC_PERM = 1 << 3 | |||
|
52 | DIRSTATE_V2_MODE_IS_SYMLINK = 1 << 4 | |||
|
53 | DIRSTATE_V2_HAS_FALLBACK_EXEC = 1 << 5 | |||
|
54 | DIRSTATE_V2_FALLBACK_EXEC = 1 << 6 | |||
|
55 | DIRSTATE_V2_HAS_FALLBACK_SYMLINK = 1 << 7 | |||
|
56 | DIRSTATE_V2_FALLBACK_SYMLINK = 1 << 8 | |||
|
57 | DIRSTATE_V2_EXPECTED_STATE_IS_MODIFIED = 1 << 9 | |||
|
58 | DIRSTATE_V2_HAS_MODE_AND_SIZE = 1 << 10 | |||
|
59 | DIRSTATE_V2_HAS_MTIME = 1 << 11 | |||
|
60 | DIRSTATE_V2_MTIME_SECOND_AMBIGUOUS = 1 << 12 | |||
|
61 | DIRSTATE_V2_DIRECTORY = 1 << 13 | |||
|
62 | DIRSTATE_V2_ALL_UNKNOWN_RECORDED = 1 << 14 | |||
|
63 | DIRSTATE_V2_ALL_IGNORED_RECORDED = 1 << 15 | |||
|
64 | ||||
46 |
|
65 | |||
47 | @attr.s(slots=True, init=False) |
|
66 | @attr.s(slots=True, init=False) | |
48 | class DirstateItem(object): |
|
67 | class DirstateItem(object): | |
49 | """represent a dirstate entry |
|
68 | """represent a dirstate entry | |
50 |
|
69 | |||
51 | It contains: |
|
70 | It hold multiple attributes | |
|
71 | ||||
|
72 | # about file tracking | |||
|
73 | - wc_tracked: is the file tracked by the working copy | |||
|
74 | - p1_tracked: is the file tracked in working copy first parent | |||
|
75 | - p2_info: the file has been involved in some merge operation. Either | |||
|
76 | because it was actually merged, or because the p2 version was | |||
|
77 | ahead, or because some rename moved it there. In either case | |||
|
78 | `hg status` will want it displayed as modified. | |||
52 |
|
79 | |||
53 | - state (one of 'n', 'a', 'r', 'm') |
|
80 | # about the file state expected from p1 manifest: | |
54 | - mode, |
|
81 | - mode: the file mode in p1 | |
55 | - size, |
|
82 | - size: the file size in p1 | |
56 | - mtime, |
|
83 | ||
|
84 | These value can be set to None, which mean we don't have a meaningful value | |||
|
85 | to compare with. Either because we don't really care about them as there | |||
|
86 | `status` is known without having to look at the disk or because we don't | |||
|
87 | know these right now and a full comparison will be needed to find out if | |||
|
88 | the file is clean. | |||
|
89 | ||||
|
90 | # about the file state on disk last time we saw it: | |||
|
91 | - mtime: the last known clean mtime for the file. | |||
|
92 | ||||
|
93 | This value can be set to None if no cachable state exist. Either because we | |||
|
94 | do not care (see previous section) or because we could not cache something | |||
|
95 | yet. | |||
57 | """ |
|
96 | """ | |
58 |
|
97 | |||
59 |
_ |
|
98 | _wc_tracked = attr.ib() | |
|
99 | _p1_tracked = attr.ib() | |||
|
100 | _p2_info = attr.ib() | |||
60 | _mode = attr.ib() |
|
101 | _mode = attr.ib() | |
61 | _size = attr.ib() |
|
102 | _size = attr.ib() | |
62 | _mtime = attr.ib() |
|
103 | _mtime_s = attr.ib() | |
|
104 | _mtime_ns = attr.ib() | |||
|
105 | _fallback_exec = attr.ib() | |||
|
106 | _fallback_symlink = attr.ib() | |||
|
107 | ||||
|
108 | def __init__( | |||
|
109 | self, | |||
|
110 | wc_tracked=False, | |||
|
111 | p1_tracked=False, | |||
|
112 | p2_info=False, | |||
|
113 | has_meaningful_data=True, | |||
|
114 | has_meaningful_mtime=True, | |||
|
115 | parentfiledata=None, | |||
|
116 | fallback_exec=None, | |||
|
117 | fallback_symlink=None, | |||
|
118 | ): | |||
|
119 | self._wc_tracked = wc_tracked | |||
|
120 | self._p1_tracked = p1_tracked | |||
|
121 | self._p2_info = p2_info | |||
|
122 | ||||
|
123 | self._fallback_exec = fallback_exec | |||
|
124 | self._fallback_symlink = fallback_symlink | |||
|
125 | ||||
|
126 | self._mode = None | |||
|
127 | self._size = None | |||
|
128 | self._mtime_s = None | |||
|
129 | self._mtime_ns = None | |||
|
130 | if parentfiledata is None: | |||
|
131 | has_meaningful_mtime = False | |||
|
132 | has_meaningful_data = False | |||
|
133 | if has_meaningful_data: | |||
|
134 | self._mode = parentfiledata[0] | |||
|
135 | self._size = parentfiledata[1] | |||
|
136 | if has_meaningful_mtime: | |||
|
137 | self._mtime_s, self._mtime_ns = parentfiledata[2] | |||
63 |
|
138 | |||
64 | def __init__(self, state, mode, size, mtime): |
|
139 | @classmethod | |
65 | self._state = state |
|
140 | def from_v2_data(cls, flags, size, mtime_s, mtime_ns): | |
66 | self._mode = mode |
|
141 | """Build a new DirstateItem object from V2 data""" | |
67 | self._size = size |
|
142 | has_mode_size = bool(flags & DIRSTATE_V2_HAS_MODE_AND_SIZE) | |
68 | self._mtime = mtime |
|
143 | has_meaningful_mtime = bool(flags & DIRSTATE_V2_HAS_MTIME) | |
|
144 | if flags & DIRSTATE_V2_MTIME_SECOND_AMBIGUOUS: | |||
|
145 | # The current code is not able to do the more subtle comparison that the | |||
|
146 | # MTIME_SECOND_AMBIGUOUS requires. So we ignore the mtime | |||
|
147 | has_meaningful_mtime = False | |||
|
148 | mode = None | |||
|
149 | ||||
|
150 | if flags & +DIRSTATE_V2_EXPECTED_STATE_IS_MODIFIED: | |||
|
151 | # we do not have support for this flag in the code yet, | |||
|
152 | # force a lookup for this file. | |||
|
153 | has_mode_size = False | |||
|
154 | has_meaningful_mtime = False | |||
|
155 | ||||
|
156 | fallback_exec = None | |||
|
157 | if flags & DIRSTATE_V2_HAS_FALLBACK_EXEC: | |||
|
158 | fallback_exec = flags & DIRSTATE_V2_FALLBACK_EXEC | |||
|
159 | ||||
|
160 | fallback_symlink = None | |||
|
161 | if flags & DIRSTATE_V2_HAS_FALLBACK_SYMLINK: | |||
|
162 | fallback_symlink = flags & DIRSTATE_V2_FALLBACK_SYMLINK | |||
|
163 | ||||
|
164 | if has_mode_size: | |||
|
165 | assert stat.S_IXUSR == 0o100 | |||
|
166 | if flags & DIRSTATE_V2_MODE_EXEC_PERM: | |||
|
167 | mode = 0o755 | |||
|
168 | else: | |||
|
169 | mode = 0o644 | |||
|
170 | if flags & DIRSTATE_V2_MODE_IS_SYMLINK: | |||
|
171 | mode |= stat.S_IFLNK | |||
|
172 | else: | |||
|
173 | mode |= stat.S_IFREG | |||
|
174 | return cls( | |||
|
175 | wc_tracked=bool(flags & DIRSTATE_V2_WDIR_TRACKED), | |||
|
176 | p1_tracked=bool(flags & DIRSTATE_V2_P1_TRACKED), | |||
|
177 | p2_info=bool(flags & DIRSTATE_V2_P2_INFO), | |||
|
178 | has_meaningful_data=has_mode_size, | |||
|
179 | has_meaningful_mtime=has_meaningful_mtime, | |||
|
180 | parentfiledata=(mode, size, (mtime_s, mtime_ns)), | |||
|
181 | fallback_exec=fallback_exec, | |||
|
182 | fallback_symlink=fallback_symlink, | |||
|
183 | ) | |||
69 |
|
184 | |||
70 | @classmethod |
|
185 | @classmethod | |
71 | def from_v1_data(cls, state, mode, size, mtime): |
|
186 | def from_v1_data(cls, state, mode, size, mtime): | |
@@ -74,12 +189,41 b' class DirstateItem(object):' | |||||
74 | Since the dirstate-v1 format is frozen, the signature of this function |
|
189 | Since the dirstate-v1 format is frozen, the signature of this function | |
75 | is not expected to change, unlike the __init__ one. |
|
190 | is not expected to change, unlike the __init__ one. | |
76 | """ |
|
191 | """ | |
77 | return cls( |
|
192 | if state == b'm': | |
78 | state=state, |
|
193 | return cls(wc_tracked=True, p1_tracked=True, p2_info=True) | |
79 | mode=mode, |
|
194 | elif state == b'a': | |
80 | size=size, |
|
195 | return cls(wc_tracked=True) | |
81 | mtime=mtime, |
|
196 | elif state == b'r': | |
82 | ) |
|
197 | if size == NONNORMAL: | |
|
198 | p1_tracked = True | |||
|
199 | p2_info = True | |||
|
200 | elif size == FROM_P2: | |||
|
201 | p1_tracked = False | |||
|
202 | p2_info = True | |||
|
203 | else: | |||
|
204 | p1_tracked = True | |||
|
205 | p2_info = False | |||
|
206 | return cls(p1_tracked=p1_tracked, p2_info=p2_info) | |||
|
207 | elif state == b'n': | |||
|
208 | if size == FROM_P2: | |||
|
209 | return cls(wc_tracked=True, p2_info=True) | |||
|
210 | elif size == NONNORMAL: | |||
|
211 | return cls(wc_tracked=True, p1_tracked=True) | |||
|
212 | elif mtime == AMBIGUOUS_TIME: | |||
|
213 | return cls( | |||
|
214 | wc_tracked=True, | |||
|
215 | p1_tracked=True, | |||
|
216 | has_meaningful_mtime=False, | |||
|
217 | parentfiledata=(mode, size, (42, 0)), | |||
|
218 | ) | |||
|
219 | else: | |||
|
220 | return cls( | |||
|
221 | wc_tracked=True, | |||
|
222 | p1_tracked=True, | |||
|
223 | parentfiledata=(mode, size, (mtime, 0)), | |||
|
224 | ) | |||
|
225 | else: | |||
|
226 | raise RuntimeError(b'unknown state: %s' % state) | |||
83 |
|
227 | |||
84 | def set_possibly_dirty(self): |
|
228 | def set_possibly_dirty(self): | |
85 | """Mark a file as "possibly dirty" |
|
229 | """Mark a file as "possibly dirty" | |
@@ -87,39 +231,80 b' class DirstateItem(object):' | |||||
87 | This means the next status call will have to actually check its content |
|
231 | This means the next status call will have to actually check its content | |
88 | to make sure it is correct. |
|
232 | to make sure it is correct. | |
89 | """ |
|
233 | """ | |
90 |
self._mtime = |
|
234 | self._mtime_s = None | |
|
235 | self._mtime_ns = None | |||
|
236 | ||||
|
237 | def set_clean(self, mode, size, mtime): | |||
|
238 | """mark a file as "clean" cancelling potential "possibly dirty call" | |||
|
239 | ||||
|
240 | Note: this function is a descendant of `dirstate.normal` and is | |||
|
241 | currently expected to be call on "normal" entry only. There are not | |||
|
242 | reason for this to not change in the future as long as the ccode is | |||
|
243 | updated to preserve the proper state of the non-normal files. | |||
|
244 | """ | |||
|
245 | self._wc_tracked = True | |||
|
246 | self._p1_tracked = True | |||
|
247 | self._mode = mode | |||
|
248 | self._size = size | |||
|
249 | self._mtime_s, self._mtime_ns = mtime | |||
|
250 | ||||
|
251 | def set_tracked(self): | |||
|
252 | """mark a file as tracked in the working copy | |||
91 |
|
|
253 | ||
92 | def __getitem__(self, idx): |
|
254 | This will ultimately be called by command like `hg add`. | |
93 | if idx == 0 or idx == -4: |
|
255 | """ | |
94 | msg = b"do not use item[x], use item.state" |
|
256 | self._wc_tracked = True | |
95 | util.nouideprecwarn(msg, b'6.0', stacklevel=2) |
|
257 | # `set_tracked` is replacing various `normallookup` call. So we mark | |
96 | return self._state |
|
258 | # the files as needing lookup | |
97 | elif idx == 1 or idx == -3: |
|
259 | # | |
98 | msg = b"do not use item[x], use item.mode" |
|
260 | # Consider dropping this in the future in favor of something less broad. | |
99 | util.nouideprecwarn(msg, b'6.0', stacklevel=2) |
|
261 | self._mtime_s = None | |
100 | return self._mode |
|
262 | self._mtime_ns = None | |
101 | elif idx == 2 or idx == -2: |
|
263 | ||
102 | msg = b"do not use item[x], use item.size" |
|
264 | def set_untracked(self): | |
103 | util.nouideprecwarn(msg, b'6.0', stacklevel=2) |
|
265 | """mark a file as untracked in the working copy | |
104 | return self._size |
|
266 | ||
105 | elif idx == 3 or idx == -1: |
|
267 | This will ultimately be called by command like `hg remove`. | |
106 | msg = b"do not use item[x], use item.mtime" |
|
268 | """ | |
107 | util.nouideprecwarn(msg, b'6.0', stacklevel=2) |
|
269 | self._wc_tracked = False | |
108 | return self._mtime |
|
270 | self._mode = None | |
109 |
|
|
271 | self._size = None | |
110 | raise IndexError(idx) |
|
272 | self._mtime_s = None | |
|
273 | self._mtime_ns = None | |||
|
274 | ||||
|
275 | def drop_merge_data(self): | |||
|
276 | """remove all "merge-only" from a DirstateItem | |||
|
277 | ||||
|
278 | This is to be call by the dirstatemap code when the second parent is dropped | |||
|
279 | """ | |||
|
280 | if self._p2_info: | |||
|
281 | self._p2_info = False | |||
|
282 | self._mode = None | |||
|
283 | self._size = None | |||
|
284 | self._mtime_s = None | |||
|
285 | self._mtime_ns = None | |||
111 |
|
286 | |||
112 | @property |
|
287 | @property | |
113 | def mode(self): |
|
288 | def mode(self): | |
114 | return self._mode |
|
289 | return self.v1_mode() | |
115 |
|
290 | |||
116 | @property |
|
291 | @property | |
117 | def size(self): |
|
292 | def size(self): | |
118 | return self._size |
|
293 | return self.v1_size() | |
119 |
|
294 | |||
120 | @property |
|
295 | @property | |
121 | def mtime(self): |
|
296 | def mtime(self): | |
122 | return self._mtime |
|
297 | return self.v1_mtime() | |
|
298 | ||||
|
299 | def mtime_likely_equal_to(self, other_mtime): | |||
|
300 | self_sec = self._mtime_s | |||
|
301 | if self_sec is None: | |||
|
302 | return False | |||
|
303 | self_ns = self._mtime_ns | |||
|
304 | other_sec, other_ns = other_mtime | |||
|
305 | return self_sec == other_sec and ( | |||
|
306 | self_ns == other_ns or self_ns == 0 or other_ns == 0 | |||
|
307 | ) | |||
123 |
|
308 | |||
124 | @property |
|
309 | @property | |
125 | def state(self): |
|
310 | def state(self): | |
@@ -134,94 +319,224 b' class DirstateItem(object):' | |||||
134 | dirstatev1 format. It would make sense to ultimately deprecate it in |
|
319 | dirstatev1 format. It would make sense to ultimately deprecate it in | |
135 | favor of the more "semantic" attributes. |
|
320 | favor of the more "semantic" attributes. | |
136 | """ |
|
321 | """ | |
137 |
|
|
322 | if not self.any_tracked: | |
|
323 | return b'?' | |||
|
324 | return self.v1_state() | |||
|
325 | ||||
|
326 | @property | |||
|
327 | def has_fallback_exec(self): | |||
|
328 | """True if "fallback" information are available for the "exec" bit | |||
|
329 | ||||
|
330 | Fallback information can be stored in the dirstate to keep track of | |||
|
331 | filesystem attribute tracked by Mercurial when the underlying file | |||
|
332 | system or operating system does not support that property, (e.g. | |||
|
333 | Windows). | |||
|
334 | ||||
|
335 | Not all version of the dirstate on-disk storage support preserving this | |||
|
336 | information. | |||
|
337 | """ | |||
|
338 | return self._fallback_exec is not None | |||
|
339 | ||||
|
340 | @property | |||
|
341 | def fallback_exec(self): | |||
|
342 | """ "fallback" information for the executable bit | |||
|
343 | ||||
|
344 | True if the file should be considered executable when we cannot get | |||
|
345 | this information from the files system. False if it should be | |||
|
346 | considered non-executable. | |||
|
347 | ||||
|
348 | See has_fallback_exec for details.""" | |||
|
349 | return self._fallback_exec | |||
|
350 | ||||
|
351 | @fallback_exec.setter | |||
|
352 | def set_fallback_exec(self, value): | |||
|
353 | """control "fallback" executable bit | |||
|
354 | ||||
|
355 | Set to: | |||
|
356 | - True if the file should be considered executable, | |||
|
357 | - False if the file should be considered non-executable, | |||
|
358 | - None if we do not have valid fallback data. | |||
|
359 | ||||
|
360 | See has_fallback_exec for details.""" | |||
|
361 | if value is None: | |||
|
362 | self._fallback_exec = None | |||
|
363 | else: | |||
|
364 | self._fallback_exec = bool(value) | |||
|
365 | ||||
|
366 | @property | |||
|
367 | def has_fallback_symlink(self): | |||
|
368 | """True if "fallback" information are available for symlink status | |||
|
369 | ||||
|
370 | Fallback information can be stored in the dirstate to keep track of | |||
|
371 | filesystem attribute tracked by Mercurial when the underlying file | |||
|
372 | system or operating system does not support that property, (e.g. | |||
|
373 | Windows). | |||
|
374 | ||||
|
375 | Not all version of the dirstate on-disk storage support preserving this | |||
|
376 | information.""" | |||
|
377 | return self._fallback_symlink is not None | |||
|
378 | ||||
|
379 | @property | |||
|
380 | def fallback_symlink(self): | |||
|
381 | """ "fallback" information for symlink status | |||
|
382 | ||||
|
383 | True if the file should be considered executable when we cannot get | |||
|
384 | this information from the files system. False if it should be | |||
|
385 | considered non-executable. | |||
|
386 | ||||
|
387 | See has_fallback_exec for details.""" | |||
|
388 | return self._fallback_symlink | |||
|
389 | ||||
|
390 | @fallback_symlink.setter | |||
|
391 | def set_fallback_symlink(self, value): | |||
|
392 | """control "fallback" symlink status | |||
|
393 | ||||
|
394 | Set to: | |||
|
395 | - True if the file should be considered a symlink, | |||
|
396 | - False if the file should be considered not a symlink, | |||
|
397 | - None if we do not have valid fallback data. | |||
|
398 | ||||
|
399 | See has_fallback_symlink for details.""" | |||
|
400 | if value is None: | |||
|
401 | self._fallback_symlink = None | |||
|
402 | else: | |||
|
403 | self._fallback_symlink = bool(value) | |||
138 |
|
404 | |||
139 | @property |
|
405 | @property | |
140 | def tracked(self): |
|
406 | def tracked(self): | |
141 | """True is the file is tracked in the working copy""" |
|
407 | """True is the file is tracked in the working copy""" | |
142 |
return self._ |
|
408 | return self._wc_tracked | |
|
409 | ||||
|
410 | @property | |||
|
411 | def any_tracked(self): | |||
|
412 | """True is the file is tracked anywhere (wc or parents)""" | |||
|
413 | return self._wc_tracked or self._p1_tracked or self._p2_info | |||
143 |
|
414 | |||
144 | @property |
|
415 | @property | |
145 | def added(self): |
|
416 | def added(self): | |
146 | """True if the file has been added""" |
|
417 | """True if the file has been added""" | |
147 | return self._state == b'a' |
|
418 | return self._wc_tracked and not (self._p1_tracked or self._p2_info) | |
148 |
|
||||
149 | @property |
|
|||
150 | def merged(self): |
|
|||
151 | """True if the file has been merged |
|
|||
152 |
|
||||
153 | Should only be set if a merge is in progress in the dirstate |
|
|||
154 | """ |
|
|||
155 | return self._state == b'm' |
|
|||
156 |
|
419 | |||
157 | @property |
|
420 | @property | |
158 |
def |
|
421 | def maybe_clean(self): | |
159 |
"""True if the file ha |
|
422 | """True if the file has a chance to be in the "clean" state""" | |
160 |
|
423 | if not self._wc_tracked: | ||
161 | This is only True is the file is currently tracked. |
|
424 | return False | |
162 |
|
425 | elif not self._p1_tracked: | ||
163 | Should only be set if a merge is in progress in the dirstate |
|
426 | return False | |
164 | """ |
|
427 | elif self._p2_info: | |
165 | return self._state == b'n' and self._size == FROM_P2 |
|
428 | return False | |
|
429 | return True | |||
166 |
|
430 | |||
167 | @property |
|
431 | @property | |
168 |
def |
|
432 | def p1_tracked(self): | |
169 | """True if the file has been removed, but was "from_p2" initially |
|
433 | """True if the file is tracked in the first parent manifest""" | |
|
434 | return self._p1_tracked | |||
170 |
|
435 | |||
171 | This property seems like an abstraction leakage and should probably be |
|
436 | @property | |
172 | dealt in this class (or maybe the dirstatemap) directly. |
|
437 | def p2_info(self): | |
|
438 | """True if the file needed to merge or apply any input from p2 | |||
|
439 | ||||
|
440 | See the class documentation for details. | |||
173 | """ |
|
441 | """ | |
174 |
return self._ |
|
442 | return self._wc_tracked and self._p2_info | |
175 |
|
443 | |||
176 | @property |
|
444 | @property | |
177 | def removed(self): |
|
445 | def removed(self): | |
178 | """True if the file has been removed""" |
|
446 | """True if the file has been removed""" | |
179 | return self._state == b'r' |
|
447 | return not self._wc_tracked and (self._p1_tracked or self._p2_info) | |
180 |
|
||||
181 | @property |
|
|||
182 | def merged_removed(self): |
|
|||
183 | """True if the file has been removed, but was "merged" initially |
|
|||
184 |
|
||||
185 | This property seems like an abstraction leakage and should probably be |
|
|||
186 | dealt in this class (or maybe the dirstatemap) directly. |
|
|||
187 | """ |
|
|||
188 | return self._state == b'r' and self._size == NONNORMAL |
|
|||
189 |
|
448 | |||
190 | @property |
|
449 | def v2_data(self): | |
191 | def dm_nonnormal(self): |
|
450 | """Returns (flags, mode, size, mtime) for v2 serialization""" | |
192 | """True is the entry is non-normal in the dirstatemap sense |
|
451 | flags = 0 | |
193 |
|
452 | if self._wc_tracked: | ||
194 | There is no reason for any code, but the dirstatemap one to use this. |
|
453 | flags |= DIRSTATE_V2_WDIR_TRACKED | |
195 | """ |
|
454 | if self._p1_tracked: | |
196 | return self.state != b'n' or self.mtime == AMBIGUOUS_TIME |
|
455 | flags |= DIRSTATE_V2_P1_TRACKED | |
|
456 | if self._p2_info: | |||
|
457 | flags |= DIRSTATE_V2_P2_INFO | |||
|
458 | if self._mode is not None and self._size is not None: | |||
|
459 | flags |= DIRSTATE_V2_HAS_MODE_AND_SIZE | |||
|
460 | if self.mode & stat.S_IXUSR: | |||
|
461 | flags |= DIRSTATE_V2_MODE_EXEC_PERM | |||
|
462 | if stat.S_ISLNK(self.mode): | |||
|
463 | flags |= DIRSTATE_V2_MODE_IS_SYMLINK | |||
|
464 | if self._mtime_s is not None: | |||
|
465 | flags |= DIRSTATE_V2_HAS_MTIME | |||
197 |
|
466 | |||
198 | @property |
|
467 | if self._fallback_exec is not None: | |
199 | def dm_otherparent(self): |
|
468 | flags |= DIRSTATE_V2_HAS_FALLBACK_EXEC | |
200 | """True is the entry is `otherparent` in the dirstatemap sense |
|
469 | if self._fallback_exec: | |
|
470 | flags |= DIRSTATE_V2_FALLBACK_EXEC | |||
201 |
|
471 | |||
202 | There is no reason for any code, but the dirstatemap one to use this. |
|
472 | if self._fallback_symlink is not None: | |
203 | """ |
|
473 | flags |= DIRSTATE_V2_HAS_FALLBACK_SYMLINK | |
204 | return self._size == FROM_P2 |
|
474 | if self._fallback_symlink: | |
|
475 | flags |= DIRSTATE_V2_FALLBACK_SYMLINK | |||
|
476 | ||||
|
477 | # Note: we do not need to do anything regarding | |||
|
478 | # DIRSTATE_V2_ALL_UNKNOWN_RECORDED and DIRSTATE_V2_ALL_IGNORED_RECORDED | |||
|
479 | # since we never set _DIRSTATE_V2_HAS_DIRCTORY_MTIME | |||
|
480 | return (flags, self._size or 0, self._mtime_s or 0, self._mtime_ns or 0) | |||
205 |
|
481 | |||
206 | def v1_state(self): |
|
482 | def v1_state(self): | |
207 | """return a "state" suitable for v1 serialization""" |
|
483 | """return a "state" suitable for v1 serialization""" | |
208 |
|
|
484 | if not self.any_tracked: | |
|
485 | # the object has no state to record, this is -currently- | |||
|
486 | # unsupported | |||
|
487 | raise RuntimeError('untracked item') | |||
|
488 | elif self.removed: | |||
|
489 | return b'r' | |||
|
490 | elif self._p1_tracked and self._p2_info: | |||
|
491 | return b'm' | |||
|
492 | elif self.added: | |||
|
493 | return b'a' | |||
|
494 | else: | |||
|
495 | return b'n' | |||
209 |
|
496 | |||
210 | def v1_mode(self): |
|
497 | def v1_mode(self): | |
211 | """return a "mode" suitable for v1 serialization""" |
|
498 | """return a "mode" suitable for v1 serialization""" | |
212 | return self._mode |
|
499 | return self._mode if self._mode is not None else 0 | |
213 |
|
500 | |||
214 | def v1_size(self): |
|
501 | def v1_size(self): | |
215 | """return a "size" suitable for v1 serialization""" |
|
502 | """return a "size" suitable for v1 serialization""" | |
216 | return self._size |
|
503 | if not self.any_tracked: | |
|
504 | # the object has no state to record, this is -currently- | |||
|
505 | # unsupported | |||
|
506 | raise RuntimeError('untracked item') | |||
|
507 | elif self.removed and self._p1_tracked and self._p2_info: | |||
|
508 | return NONNORMAL | |||
|
509 | elif self._p2_info: | |||
|
510 | return FROM_P2 | |||
|
511 | elif self.removed: | |||
|
512 | return 0 | |||
|
513 | elif self.added: | |||
|
514 | return NONNORMAL | |||
|
515 | elif self._size is None: | |||
|
516 | return NONNORMAL | |||
|
517 | else: | |||
|
518 | return self._size | |||
217 |
|
519 | |||
218 | def v1_mtime(self): |
|
520 | def v1_mtime(self): | |
219 | """return a "mtime" suitable for v1 serialization""" |
|
521 | """return a "mtime" suitable for v1 serialization""" | |
220 |
|
|
522 | if not self.any_tracked: | |
|
523 | # the object has no state to record, this is -currently- | |||
|
524 | # unsupported | |||
|
525 | raise RuntimeError('untracked item') | |||
|
526 | elif self.removed: | |||
|
527 | return 0 | |||
|
528 | elif self._mtime_s is None: | |||
|
529 | return AMBIGUOUS_TIME | |||
|
530 | elif self._p2_info: | |||
|
531 | return AMBIGUOUS_TIME | |||
|
532 | elif not self._p1_tracked: | |||
|
533 | return AMBIGUOUS_TIME | |||
|
534 | else: | |||
|
535 | return self._mtime_s | |||
221 |
|
536 | |||
222 | def need_delay(self, now): |
|
537 | def need_delay(self, now): | |
223 | """True if the stored mtime would be ambiguous with the current time""" |
|
538 | """True if the stored mtime would be ambiguous with the current time""" | |
224 | return self._state == b'n' and self._mtime == now |
|
539 | return self.v1_state() == b'n' and self._mtime_s == now[0] | |
225 |
|
540 | |||
226 |
|
541 | |||
227 | def gettype(q): |
|
542 | def gettype(q): | |
@@ -589,7 +904,6 b' def parse_dirstate(dmap, copymap, st):' | |||||
589 |
|
904 | |||
590 |
|
905 | |||
591 | def pack_dirstate(dmap, copymap, pl, now): |
|
906 | def pack_dirstate(dmap, copymap, pl, now): | |
592 | now = int(now) |
|
|||
593 | cs = stringio() |
|
907 | cs = stringio() | |
594 | write = cs.write |
|
908 | write = cs.write | |
595 | write(b"".join(pl)) |
|
909 | write(b"".join(pl)) |
@@ -44,6 +44,7 b' if not ispy3:' | |||||
44 | FileNotFoundError = OSError |
|
44 | FileNotFoundError = OSError | |
45 |
|
45 | |||
46 | else: |
|
46 | else: | |
|
47 | import builtins | |||
47 | import concurrent.futures as futures |
|
48 | import concurrent.futures as futures | |
48 | import http.cookiejar as cookielib |
|
49 | import http.cookiejar as cookielib | |
49 | import http.client as httplib |
|
50 | import http.client as httplib | |
@@ -55,7 +56,7 b' else:' | |||||
55 | def future_set_exception_info(f, exc_info): |
|
56 | def future_set_exception_info(f, exc_info): | |
56 | f.set_exception(exc_info[0]) |
|
57 | f.set_exception(exc_info[0]) | |
57 |
|
58 | |||
58 |
FileNotFoundError = |
|
59 | FileNotFoundError = builtins.FileNotFoundError | |
59 |
|
60 | |||
60 |
|
61 | |||
61 | def identity(a): |
|
62 | def identity(a): | |
@@ -222,6 +223,15 b' if ispy3:' | |||||
222 | >>> assert type(t) is bytes |
|
223 | >>> assert type(t) is bytes | |
223 | """ |
|
224 | """ | |
224 |
|
225 | |||
|
226 | # Trick pytype into not demanding Iterable[int] be passed to __new__(), | |||
|
227 | # since the appropriate bytes format is done internally. | |||
|
228 | # | |||
|
229 | # https://github.com/google/pytype/issues/500 | |||
|
230 | if TYPE_CHECKING: | |||
|
231 | ||||
|
232 | def __init__(self, s=b''): | |||
|
233 | pass | |||
|
234 | ||||
225 | def __new__(cls, s=b''): |
|
235 | def __new__(cls, s=b''): | |
226 | if isinstance(s, bytestr): |
|
236 | if isinstance(s, bytestr): | |
227 | return s |
|
237 | return s |
@@ -433,7 +433,7 b' def manifestrevlogs(repo):' | |||||
433 | if scmutil.istreemanifest(repo): |
|
433 | if scmutil.istreemanifest(repo): | |
434 | # This logic is safe if treemanifest isn't enabled, but also |
|
434 | # This logic is safe if treemanifest isn't enabled, but also | |
435 | # pointless, so we skip it if treemanifest isn't enabled. |
|
435 | # pointless, so we skip it if treemanifest isn't enabled. | |
436 |
for t, unencoded |
|
436 | for t, unencoded, size in repo.store.datafiles(): | |
437 | if unencoded.startswith(b'meta/') and unencoded.endswith( |
|
437 | if unencoded.startswith(b'meta/') and unencoded.endswith( | |
438 | b'00manifest.i' |
|
438 | b'00manifest.i' | |
439 | ): |
|
439 | ): | |
@@ -441,7 +441,7 b' def manifestrevlogs(repo):' | |||||
441 | yield repo.manifestlog.getstorage(dir) |
|
441 | yield repo.manifestlog.getstorage(dir) | |
442 |
|
442 | |||
443 |
|
443 | |||
444 | def rebuildfncache(ui, repo): |
|
444 | def rebuildfncache(ui, repo, only_data=False): | |
445 | """Rebuilds the fncache file from repo history. |
|
445 | """Rebuilds the fncache file from repo history. | |
446 |
|
446 | |||
447 | Missing entries will be added. Extra entries will be removed. |
|
447 | Missing entries will be added. Extra entries will be removed. | |
@@ -465,28 +465,40 b' def rebuildfncache(ui, repo):' | |||||
465 | newentries = set() |
|
465 | newentries = set() | |
466 | seenfiles = set() |
|
466 | seenfiles = set() | |
467 |
|
467 | |||
468 | progress = ui.makeprogress( |
|
468 | if only_data: | |
469 | _(b'rebuilding'), unit=_(b'changesets'), total=len(repo) |
|
469 | # Trust the listing of .i from the fncache, but not the .d. This is | |
470 | ) |
|
470 | # much faster, because we only need to stat every possible .d files, | |
471 | for rev in repo: |
|
471 | # instead of reading the full changelog | |
472 | progress.update(rev) |
|
472 | for f in fnc: | |
|
473 | if f[:5] == b'data/' and f[-2:] == b'.i': | |||
|
474 | seenfiles.add(f[5:-2]) | |||
|
475 | newentries.add(f) | |||
|
476 | dataf = f[:-2] + b'.d' | |||
|
477 | if repo.store._exists(dataf): | |||
|
478 | newentries.add(dataf) | |||
|
479 | else: | |||
|
480 | progress = ui.makeprogress( | |||
|
481 | _(b'rebuilding'), unit=_(b'changesets'), total=len(repo) | |||
|
482 | ) | |||
|
483 | for rev in repo: | |||
|
484 | progress.update(rev) | |||
473 |
|
485 | |||
474 | ctx = repo[rev] |
|
486 | ctx = repo[rev] | |
475 | for f in ctx.files(): |
|
487 | for f in ctx.files(): | |
476 | # This is to minimize I/O. |
|
488 | # This is to minimize I/O. | |
477 | if f in seenfiles: |
|
489 | if f in seenfiles: | |
478 | continue |
|
490 | continue | |
479 | seenfiles.add(f) |
|
491 | seenfiles.add(f) | |
480 |
|
492 | |||
481 | i = b'data/%s.i' % f |
|
493 | i = b'data/%s.i' % f | |
482 | d = b'data/%s.d' % f |
|
494 | d = b'data/%s.d' % f | |
483 |
|
495 | |||
484 | if repo.store._exists(i): |
|
496 | if repo.store._exists(i): | |
485 | newentries.add(i) |
|
497 | newentries.add(i) | |
486 | if repo.store._exists(d): |
|
498 | if repo.store._exists(d): | |
487 | newentries.add(d) |
|
499 | newentries.add(d) | |
488 |
|
500 | |||
489 | progress.complete() |
|
501 | progress.complete() | |
490 |
|
502 | |||
491 | if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements: |
|
503 | if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements: | |
492 | # This logic is safe if treemanifest isn't enabled, but also |
|
504 | # This logic is safe if treemanifest isn't enabled, but also |
@@ -12,7 +12,7 b" DOTENCODE_REQUIREMENT = b'dotencode'" | |||||
12 | STORE_REQUIREMENT = b'store' |
|
12 | STORE_REQUIREMENT = b'store' | |
13 | FNCACHE_REQUIREMENT = b'fncache' |
|
13 | FNCACHE_REQUIREMENT = b'fncache' | |
14 |
|
14 | |||
15 |
DIRSTATE_V2_REQUIREMENT = b' |
|
15 | DIRSTATE_V2_REQUIREMENT = b'dirstate-v2' | |
16 |
|
16 | |||
17 | # When narrowing is finalized and no longer subject to format changes, |
|
17 | # When narrowing is finalized and no longer subject to format changes, | |
18 | # we should move this to just "narrow" or similar. |
|
18 | # we should move this to just "narrow" or similar. |
@@ -2581,10 +2581,15 b' class revlog(object):' | |||||
2581 | self._enforceinlinesize(transaction) |
|
2581 | self._enforceinlinesize(transaction) | |
2582 | if self._docket is not None: |
|
2582 | if self._docket is not None: | |
2583 | # revlog-v2 always has 3 writing handles, help Pytype |
|
2583 | # revlog-v2 always has 3 writing handles, help Pytype | |
2584 |
|
|
2584 | wh1 = self._writinghandles[0] | |
2585 |
|
|
2585 | wh2 = self._writinghandles[1] | |
2586 |
|
|
2586 | wh3 = self._writinghandles[2] | |
2587 | self._docket.sidedata_end = self._writinghandles[2].tell() |
|
2587 | assert wh1 is not None | |
|
2588 | assert wh2 is not None | |||
|
2589 | assert wh3 is not None | |||
|
2590 | self._docket.index_end = wh1.tell() | |||
|
2591 | self._docket.data_end = wh2.tell() | |||
|
2592 | self._docket.sidedata_end = wh3.tell() | |||
2588 |
|
2593 | |||
2589 | nodemaputil.setup_persistent_nodemap(transaction, self) |
|
2594 | nodemaputil.setup_persistent_nodemap(transaction, self) | |
2590 |
|
2595 |
@@ -826,7 +826,7 b' def repair_issue6528(' | |||||
826 | with context(): |
|
826 | with context(): | |
827 | files = list( |
|
827 | files = list( | |
828 | (file_type, path) |
|
828 | (file_type, path) | |
829 |
for (file_type, path |
|
829 | for (file_type, path, _s) in repo.store.datafiles() | |
830 | if path.endswith(b'.i') and file_type & store.FILEFLAGS_FILELOG |
|
830 | if path.endswith(b'.i') and file_type & store.FILEFLAGS_FILELOG | |
831 | ) |
|
831 | ) | |
832 |
|
832 |
@@ -689,7 +689,7 b" def revsingle(repo, revspec, default=b'." | |||||
689 |
|
689 | |||
690 | l = revrange(repo, [revspec], localalias=localalias) |
|
690 | l = revrange(repo, [revspec], localalias=localalias) | |
691 | if not l: |
|
691 | if not l: | |
692 |
raise error. |
|
692 | raise error.InputError(_(b'empty revision set')) | |
693 | return repo[l.last()] |
|
693 | return repo[l.last()] | |
694 |
|
694 | |||
695 |
|
695 | |||
@@ -710,7 +710,7 b' def revpair(repo, revs):' | |||||
710 | l = revrange(repo, revs) |
|
710 | l = revrange(repo, revs) | |
711 |
|
711 | |||
712 | if not l: |
|
712 | if not l: | |
713 |
raise error. |
|
713 | raise error.InputError(_(b'empty revision range')) | |
714 |
|
714 | |||
715 | first = l.first() |
|
715 | first = l.first() | |
716 | second = l.last() |
|
716 | second = l.last() | |
@@ -720,7 +720,7 b' def revpair(repo, revs):' | |||||
720 | and len(revs) >= 2 |
|
720 | and len(revs) >= 2 | |
721 | and not all(revrange(repo, [r]) for r in revs) |
|
721 | and not all(revrange(repo, [r]) for r in revs) | |
722 | ): |
|
722 | ): | |
723 |
raise error. |
|
723 | raise error.InputError(_(b'empty revision on one side of range')) | |
724 |
|
724 | |||
725 | # if top-level is range expression, the result must always be a pair |
|
725 | # if top-level is range expression, the result must always be a pair | |
726 | if first == second and len(revs) == 1 and not _pairspec(revs[0]): |
|
726 | if first == second and len(revs) == 1 and not _pairspec(revs[0]): | |
@@ -1211,9 +1211,9 b' def addremove(repo, matcher, prefix, uip' | |||||
1211 | try: |
|
1211 | try: | |
1212 | similarity = float(opts.get(b'similarity') or 0) |
|
1212 | similarity = float(opts.get(b'similarity') or 0) | |
1213 | except ValueError: |
|
1213 | except ValueError: | |
1214 |
raise error. |
|
1214 | raise error.InputError(_(b'similarity must be a number')) | |
1215 | if similarity < 0 or similarity > 100: |
|
1215 | if similarity < 0 or similarity > 100: | |
1216 |
raise error. |
|
1216 | raise error.InputError(_(b'similarity must be between 0 and 100')) | |
1217 | similarity /= 100.0 |
|
1217 | similarity /= 100.0 | |
1218 |
|
1218 | |||
1219 | ret = 0 |
|
1219 | ret = 0 | |
@@ -1327,17 +1327,17 b' def _interestingfiles(repo, matcher):' | |||||
1327 | full=False, |
|
1327 | full=False, | |
1328 | ) |
|
1328 | ) | |
1329 | for abs, st in pycompat.iteritems(walkresults): |
|
1329 | for abs, st in pycompat.iteritems(walkresults): | |
1330 |
|
|
1330 | entry = dirstate.get_entry(abs) | |
1331 |
if |
|
1331 | if (not entry.any_tracked) and audit_path.check(abs): | |
1332 | unknown.append(abs) |
|
1332 | unknown.append(abs) | |
1333 |
elif |
|
1333 | elif (not entry.removed) and not st: | |
1334 | deleted.append(abs) |
|
1334 | deleted.append(abs) | |
1335 |
elif |
|
1335 | elif entry.removed and st: | |
1336 | forgotten.append(abs) |
|
1336 | forgotten.append(abs) | |
1337 | # for finding renames |
|
1337 | # for finding renames | |
1338 |
elif |
|
1338 | elif entry.removed and not st: | |
1339 | removed.append(abs) |
|
1339 | removed.append(abs) | |
1340 |
elif |
|
1340 | elif entry.added: | |
1341 | added.append(abs) |
|
1341 | added.append(abs) | |
1342 |
|
1342 | |||
1343 | return added, unknown, deleted, removed, forgotten |
|
1343 | return added, unknown, deleted, removed, forgotten | |
@@ -1455,10 +1455,11 b' def dirstatecopy(ui, repo, wctx, src, ds' | |||||
1455 | """ |
|
1455 | """ | |
1456 | origsrc = repo.dirstate.copied(src) or src |
|
1456 | origsrc = repo.dirstate.copied(src) or src | |
1457 | if dst == origsrc: # copying back a copy? |
|
1457 | if dst == origsrc: # copying back a copy? | |
1458 | if repo.dirstate[dst] not in b'mn' and not dryrun: |
|
1458 | entry = repo.dirstate.get_entry(dst) | |
|
1459 | if (entry.added or not entry.tracked) and not dryrun: | |||
1459 | repo.dirstate.set_tracked(dst) |
|
1460 | repo.dirstate.set_tracked(dst) | |
1460 | else: |
|
1461 | else: | |
1461 |
if repo.dirstate |
|
1462 | if repo.dirstate.get_entry(origsrc).added and origsrc == src: | |
1462 | if not ui.quiet: |
|
1463 | if not ui.quiet: | |
1463 | ui.warn( |
|
1464 | ui.warn( | |
1464 | _( |
|
1465 | _( | |
@@ -1467,7 +1468,7 b' def dirstatecopy(ui, repo, wctx, src, ds' | |||||
1467 | ) |
|
1468 | ) | |
1468 | % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)) |
|
1469 | % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)) | |
1469 | ) |
|
1470 | ) | |
1470 |
if repo.dirstate |
|
1471 | if not repo.dirstate.get_entry(dst).tracked and not dryrun: | |
1471 | wctx.add([dst]) |
|
1472 | wctx.add([dst]) | |
1472 | elif not dryrun: |
|
1473 | elif not dryrun: | |
1473 | wctx.copy(origsrc, dst) |
|
1474 | wctx.copy(origsrc, dst) | |
@@ -1504,7 +1505,7 b' def movedirstate(repo, newctx, match=Non' | |||||
1504 | } |
|
1505 | } | |
1505 | # Adjust the dirstate copies |
|
1506 | # Adjust the dirstate copies | |
1506 | for dst, src in pycompat.iteritems(copies): |
|
1507 | for dst, src in pycompat.iteritems(copies): | |
1507 |
if src not in newctx or dst in newctx or ds |
|
1508 | if src not in newctx or dst in newctx or not ds.get_entry(dst).added: | |
1508 | src = None |
|
1509 | src = None | |
1509 | ds.copy(src, dst) |
|
1510 | ds.copy(src, dst) | |
1510 | repo._quick_access_changeid_invalidate() |
|
1511 | repo._quick_access_changeid_invalidate() |
@@ -472,7 +472,7 b' class basicstore(object):' | |||||
472 | return self.path + b'/' + encodedir(f) |
|
472 | return self.path + b'/' + encodedir(f) | |
473 |
|
473 | |||
474 | def _walk(self, relpath, recurse): |
|
474 | def _walk(self, relpath, recurse): | |
475 |
'''yields ( |
|
475 | '''yields (revlog_type, unencoded, size)''' | |
476 | path = self.path |
|
476 | path = self.path | |
477 | if relpath: |
|
477 | if relpath: | |
478 | path += b'/' + relpath |
|
478 | path += b'/' + relpath | |
@@ -488,7 +488,7 b' class basicstore(object):' | |||||
488 | rl_type = is_revlog(f, kind, st) |
|
488 | rl_type = is_revlog(f, kind, st) | |
489 | if rl_type is not None: |
|
489 | if rl_type is not None: | |
490 | n = util.pconvert(fp[striplen:]) |
|
490 | n = util.pconvert(fp[striplen:]) | |
491 |
l.append((rl_type, decodedir(n), |
|
491 | l.append((rl_type, decodedir(n), st.st_size)) | |
492 | elif kind == stat.S_IFDIR and recurse: |
|
492 | elif kind == stat.S_IFDIR and recurse: | |
493 | visit.append(fp) |
|
493 | visit.append(fp) | |
494 | l.sort() |
|
494 | l.sort() | |
@@ -505,26 +505,32 b' class basicstore(object):' | |||||
505 | rootstore = manifest.manifestrevlog(repo.nodeconstants, self.vfs) |
|
505 | rootstore = manifest.manifestrevlog(repo.nodeconstants, self.vfs) | |
506 | return manifest.manifestlog(self.vfs, repo, rootstore, storenarrowmatch) |
|
506 | return manifest.manifestlog(self.vfs, repo, rootstore, storenarrowmatch) | |
507 |
|
507 | |||
508 | def datafiles(self, matcher=None): |
|
508 | def datafiles(self, matcher=None, undecodable=None): | |
|
509 | """Like walk, but excluding the changelog and root manifest. | |||
|
510 | ||||
|
511 | When [undecodable] is None, revlogs names that can't be | |||
|
512 | decoded cause an exception. When it is provided, it should | |||
|
513 | be a list and the filenames that can't be decoded are added | |||
|
514 | to it instead. This is very rarely needed.""" | |||
509 | files = self._walk(b'data', True) + self._walk(b'meta', True) |
|
515 | files = self._walk(b'data', True) + self._walk(b'meta', True) | |
510 |
for (t, u, |
|
516 | for (t, u, s) in files: | |
511 |
yield (FILEFLAGS_FILELOG | t, u, |
|
517 | yield (FILEFLAGS_FILELOG | t, u, s) | |
512 |
|
518 | |||
513 | def topfiles(self): |
|
519 | def topfiles(self): | |
514 | # yield manifest before changelog |
|
520 | # yield manifest before changelog | |
515 | files = reversed(self._walk(b'', False)) |
|
521 | files = reversed(self._walk(b'', False)) | |
516 |
for (t, u, |
|
522 | for (t, u, s) in files: | |
517 | if u.startswith(b'00changelog'): |
|
523 | if u.startswith(b'00changelog'): | |
518 |
yield (FILEFLAGS_CHANGELOG | t, u, |
|
524 | yield (FILEFLAGS_CHANGELOG | t, u, s) | |
519 | elif u.startswith(b'00manifest'): |
|
525 | elif u.startswith(b'00manifest'): | |
520 |
yield (FILEFLAGS_MANIFESTLOG | t, u, |
|
526 | yield (FILEFLAGS_MANIFESTLOG | t, u, s) | |
521 | else: |
|
527 | else: | |
522 |
yield (FILETYPE_OTHER | t, u, |
|
528 | yield (FILETYPE_OTHER | t, u, s) | |
523 |
|
529 | |||
524 | def walk(self, matcher=None): |
|
530 | def walk(self, matcher=None): | |
525 | """return file related to data storage (ie: revlogs) |
|
531 | """return file related to data storage (ie: revlogs) | |
526 |
|
532 | |||
527 |
yields (file_type, unencoded, |
|
533 | yields (file_type, unencoded, size) | |
528 |
|
534 | |||
529 | if a matcher is passed, storage files of only those tracked paths |
|
535 | if a matcher is passed, storage files of only those tracked paths | |
530 | are passed with matches the matcher |
|
536 | are passed with matches the matcher | |
@@ -574,15 +580,20 b' class encodedstore(basicstore):' | |||||
574 | # However that might change so we should probably add a test and encoding |
|
580 | # However that might change so we should probably add a test and encoding | |
575 | # decoding for it too. see issue6548 |
|
581 | # decoding for it too. see issue6548 | |
576 |
|
582 | |||
577 | def datafiles(self, matcher=None): |
|
583 | def datafiles(self, matcher=None, undecodable=None): | |
578 |
for t, |
|
584 | for t, f1, size in super(encodedstore, self).datafiles(): | |
579 | try: |
|
585 | try: | |
580 |
|
|
586 | f2 = decodefilename(f1) | |
581 | except KeyError: |
|
587 | except KeyError: | |
582 |
|
|
588 | if undecodable is None: | |
583 | if a is not None and not _matchtrackedpath(a, matcher): |
|
589 | msg = _(b'undecodable revlog name %s') % f1 | |
|
590 | raise error.StorageError(msg) | |||
|
591 | else: | |||
|
592 | undecodable.append(f1) | |||
|
593 | continue | |||
|
594 | if not _matchtrackedpath(f2, matcher): | |||
584 | continue |
|
595 | continue | |
585 |
yield t, |
|
596 | yield t, f2, size | |
586 |
|
597 | |||
587 | def join(self, f): |
|
598 | def join(self, f): | |
588 | return self.path + b'/' + encodefilename(f) |
|
599 | return self.path + b'/' + encodefilename(f) | |
@@ -770,7 +781,7 b' class fncachestore(basicstore):' | |||||
770 | def getsize(self, path): |
|
781 | def getsize(self, path): | |
771 | return self.rawvfs.stat(path).st_size |
|
782 | return self.rawvfs.stat(path).st_size | |
772 |
|
783 | |||
773 | def datafiles(self, matcher=None): |
|
784 | def datafiles(self, matcher=None, undecodable=None): | |
774 | for f in sorted(self.fncache): |
|
785 | for f in sorted(self.fncache): | |
775 | if not _matchtrackedpath(f, matcher): |
|
786 | if not _matchtrackedpath(f, matcher): | |
776 | continue |
|
787 | continue | |
@@ -779,7 +790,7 b' class fncachestore(basicstore):' | |||||
779 | t = revlog_type(f) |
|
790 | t = revlog_type(f) | |
780 | assert t is not None, f |
|
791 | assert t is not None, f | |
781 | t |= FILEFLAGS_FILELOG |
|
792 | t |= FILEFLAGS_FILELOG | |
782 |
yield t, f, |
|
793 | yield t, f, self.getsize(ef) | |
783 | except OSError as err: |
|
794 | except OSError as err: | |
784 | if err.errno != errno.ENOENT: |
|
795 | if err.errno != errno.ENOENT: | |
785 | raise |
|
796 | raise |
@@ -248,7 +248,7 b' def generatev1(repo):' | |||||
248 | # Get consistent snapshot of repo, lock during scan. |
|
248 | # Get consistent snapshot of repo, lock during scan. | |
249 | with repo.lock(): |
|
249 | with repo.lock(): | |
250 | repo.ui.debug(b'scanning\n') |
|
250 | repo.ui.debug(b'scanning\n') | |
251 |
for file_type, name |
|
251 | for file_type, name, size in _walkstreamfiles(repo): | |
252 | if size: |
|
252 | if size: | |
253 | entries.append((name, size)) |
|
253 | entries.append((name, size)) | |
254 | total_bytes += size |
|
254 | total_bytes += size | |
@@ -650,7 +650,7 b' def _v2_walk(repo, includes, excludes, i' | |||||
650 | if includes or excludes: |
|
650 | if includes or excludes: | |
651 | matcher = narrowspec.match(repo.root, includes, excludes) |
|
651 | matcher = narrowspec.match(repo.root, includes, excludes) | |
652 |
|
652 | |||
653 |
for rl_type, name |
|
653 | for rl_type, name, size in _walkstreamfiles(repo, matcher): | |
654 | if size: |
|
654 | if size: | |
655 | ft = _fileappend |
|
655 | ft = _fileappend | |
656 | if rl_type & store.FILEFLAGS_VOLATILE: |
|
656 | if rl_type & store.FILEFLAGS_VOLATILE: |
@@ -8,6 +8,7 b' from . import (' | |||||
8 | error, |
|
8 | error, | |
9 | hg, |
|
9 | hg, | |
10 | lock as lockmod, |
|
10 | lock as lockmod, | |
|
11 | logcmdutil, | |||
11 | mergestate as mergestatemod, |
|
12 | mergestate as mergestatemod, | |
12 | pycompat, |
|
13 | pycompat, | |
13 | registrar, |
|
14 | registrar, | |
@@ -178,7 +179,7 b' def debugstrip(ui, repo, *revs, **opts):' | |||||
178 |
|
179 | |||
179 | cl = repo.changelog |
|
180 | cl = repo.changelog | |
180 | revs = list(revs) + opts.get(b'rev') |
|
181 | revs = list(revs) + opts.get(b'rev') | |
181 |
revs = set( |
|
182 | revs = set(logcmdutil.revrange(repo, revs)) | |
182 |
|
183 | |||
183 | with repo.wlock(): |
|
184 | with repo.wlock(): | |
184 | bookmarks = set(opts.get(b'bookmark')) |
|
185 | bookmarks = set(opts.get(b'bookmark')) | |
@@ -255,7 +256,9 b' def debugstrip(ui, repo, *revs, **opts):' | |||||
255 |
|
256 | |||
256 | # reset files that only changed in the dirstate too |
|
257 | # reset files that only changed in the dirstate too | |
257 | dirstate = repo.dirstate |
|
258 | dirstate = repo.dirstate | |
258 | dirchanges = [f for f in dirstate if dirstate[f] != b'n'] |
|
259 | dirchanges = [ | |
|
260 | f for f in dirstate if not dirstate.get_entry(f).maybe_clean | |||
|
261 | ] | |||
259 | changedfiles.extend(dirchanges) |
|
262 | changedfiles.extend(dirchanges) | |
260 |
|
263 | |||
261 | repo.dirstate.rebuild(urev, uctx.manifest(), changedfiles) |
|
264 | repo.dirstate.rebuild(urev, uctx.manifest(), changedfiles) |
@@ -178,7 +178,9 b' class dirstatev2(requirementformatvarian' | |||||
178 |
|
178 | |||
179 | description = _( |
|
179 | description = _( | |
180 | b'version 1 of the dirstate file format requires ' |
|
180 | b'version 1 of the dirstate file format requires ' | |
181 | b'reading and parsing it all at once.' |
|
181 | b'reading and parsing it all at once.\n' | |
|
182 | b'Version 2 has a better structure,' | |||
|
183 | b'better information and lighter update mechanism' | |||
182 | ) |
|
184 | ) | |
183 |
|
185 | |||
184 | upgrademessage = _(b'"hg status" will be faster') |
|
186 | upgrademessage = _(b'"hg status" will be faster') |
@@ -201,7 +201,7 b' def _clonerevlogs(' | |||||
201 |
|
201 | |||
202 | # Perform a pass to collect metadata. This validates we can open all |
|
202 | # Perform a pass to collect metadata. This validates we can open all | |
203 | # source files and allows a unified progress bar to be displayed. |
|
203 | # source files and allows a unified progress bar to be displayed. | |
204 |
for rl_type, unencoded, |
|
204 | for rl_type, unencoded, size in alldatafiles: | |
205 | if not rl_type & store.FILEFLAGS_REVLOG_MAIN: |
|
205 | if not rl_type & store.FILEFLAGS_REVLOG_MAIN: | |
206 | continue |
|
206 | continue | |
207 |
|
207 | |||
@@ -638,7 +638,6 b' def upgrade_dirstate(ui, srcrepo, upgrad' | |||||
638 | ) |
|
638 | ) | |
639 |
|
639 | |||
640 | assert srcrepo.dirstate._use_dirstate_v2 == (old == b'v2') |
|
640 | assert srcrepo.dirstate._use_dirstate_v2 == (old == b'v2') | |
641 | srcrepo.dirstate._map._use_dirstate_tree = True |
|
|||
642 | srcrepo.dirstate._map.preload() |
|
641 | srcrepo.dirstate._map.preload() | |
643 | srcrepo.dirstate._use_dirstate_v2 = new == b'v2' |
|
642 | srcrepo.dirstate._use_dirstate_v2 = new == b'v2' | |
644 | srcrepo.dirstate._map._use_dirstate_v2 = srcrepo.dirstate._use_dirstate_v2 |
|
643 | srcrepo.dirstate._map._use_dirstate_v2 = srcrepo.dirstate._use_dirstate_v2 |
@@ -449,8 +449,8 b' def mmapread(fp, size=None):' | |||||
449 | return b'' |
|
449 | return b'' | |
450 | elif size is None: |
|
450 | elif size is None: | |
451 | size = 0 |
|
451 | size = 0 | |
|
452 | fd = getattr(fp, 'fileno', lambda: fp)() | |||
452 | try: |
|
453 | try: | |
453 | fd = getattr(fp, 'fileno', lambda: fp)() |
|
|||
454 | return mmap.mmap(fd, size, access=mmap.ACCESS_READ) |
|
454 | return mmap.mmap(fd, size, access=mmap.ACCESS_READ) | |
455 | except ValueError: |
|
455 | except ValueError: | |
456 | # Empty files cannot be mmapped, but mmapread should still work. Check |
|
456 | # Empty files cannot be mmapped, but mmapread should still work. Check | |
@@ -1225,6 +1225,8 b' def versiontuple(v=None, n=4):' | |||||
1225 | if n == 4: |
|
1225 | if n == 4: | |
1226 | return (vints[0], vints[1], vints[2], extra) |
|
1226 | return (vints[0], vints[1], vints[2], extra) | |
1227 |
|
1227 | |||
|
1228 | raise error.ProgrammingError(b"invalid version part request: %d" % n) | |||
|
1229 | ||||
1228 |
|
1230 | |||
1229 | def cachefunc(func): |
|
1231 | def cachefunc(func): | |
1230 | '''cache the result of function calls''' |
|
1232 | '''cache the result of function calls''' |
@@ -57,30 +57,11 b' else:' | |||||
57 | try: |
|
57 | try: | |
58 | # importlib.resources exists from Python 3.7; see fallback in except clause |
|
58 | # importlib.resources exists from Python 3.7; see fallback in except clause | |
59 | # further down |
|
59 | # further down | |
60 | from importlib import resources |
|
60 | from importlib import resources # pytype: disable=import-error | |
61 |
|
||||
62 | from .. import encoding |
|
|||
63 |
|
61 | |||
64 | # Force loading of the resources module |
|
62 | # Force loading of the resources module | |
65 | resources.open_binary # pytype: disable=module-attr |
|
63 | resources.open_binary # pytype: disable=module-attr | |
66 |
|
64 | |||
67 | def open_resource(package, name): |
|
|||
68 | return resources.open_binary( # pytype: disable=module-attr |
|
|||
69 | pycompat.sysstr(package), pycompat.sysstr(name) |
|
|||
70 | ) |
|
|||
71 |
|
||||
72 | def is_resource(package, name): |
|
|||
73 | return resources.is_resource( # pytype: disable=module-attr |
|
|||
74 | pycompat.sysstr(package), encoding.strfromlocal(name) |
|
|||
75 | ) |
|
|||
76 |
|
||||
77 | def contents(package): |
|
|||
78 | # pytype: disable=module-attr |
|
|||
79 | for r in resources.contents(pycompat.sysstr(package)): |
|
|||
80 | # pytype: enable=module-attr |
|
|||
81 | yield encoding.strtolocal(r) |
|
|||
82 |
|
||||
83 |
|
||||
84 | except (ImportError, AttributeError): |
|
65 | except (ImportError, AttributeError): | |
85 | # importlib.resources was not found (almost definitely because we're on a |
|
66 | # importlib.resources was not found (almost definitely because we're on a | |
86 | # Python version before 3.7) |
|
67 | # Python version before 3.7) | |
@@ -102,3 +83,23 b' except (ImportError, AttributeError):' | |||||
102 |
|
83 | |||
103 | for p in os.listdir(path): |
|
84 | for p in os.listdir(path): | |
104 | yield pycompat.fsencode(p) |
|
85 | yield pycompat.fsencode(p) | |
|
86 | ||||
|
87 | ||||
|
88 | else: | |||
|
89 | from .. import encoding | |||
|
90 | ||||
|
91 | def open_resource(package, name): | |||
|
92 | return resources.open_binary( # pytype: disable=module-attr | |||
|
93 | pycompat.sysstr(package), pycompat.sysstr(name) | |||
|
94 | ) | |||
|
95 | ||||
|
96 | def is_resource(package, name): | |||
|
97 | return resources.is_resource( # pytype: disable=module-attr | |||
|
98 | pycompat.sysstr(package), encoding.strfromlocal(name) | |||
|
99 | ) | |||
|
100 | ||||
|
101 | def contents(package): | |||
|
102 | # pytype: disable=module-attr | |||
|
103 | for r in resources.contents(pycompat.sysstr(package)): | |||
|
104 | # pytype: enable=module-attr | |||
|
105 | yield encoding.strtolocal(r) |
@@ -503,22 +503,17 b' def get_push_paths(repo, ui, dests):' | |||||
503 | yield path |
|
503 | yield path | |
504 |
|
504 | |||
505 |
|
505 | |||
506 |
def get_pull_paths(repo, ui, sources |
|
506 | def get_pull_paths(repo, ui, sources): | |
507 | """yields all the `(path, branch)` selected as pull source by `sources`""" |
|
507 | """yields all the `(path, branch)` selected as pull source by `sources`""" | |
508 | if not sources: |
|
508 | if not sources: | |
509 | sources = [b'default'] |
|
509 | sources = [b'default'] | |
510 | for source in sources: |
|
510 | for source in sources: | |
511 | if source in ui.paths: |
|
511 | if source in ui.paths: | |
512 | for p in ui.paths[source]: |
|
512 | for p in ui.paths[source]: | |
513 | yield parseurl(p.rawloc, default_branches) |
|
513 | yield p | |
514 | else: |
|
514 | else: | |
515 | # Try to resolve as a local path or URI. |
|
515 | p = path(ui, None, source, validate_path=False) | |
516 | path = try_path(ui, source) |
|
516 | yield p | |
517 | if path is not None: |
|
|||
518 | url = path.rawloc |
|
|||
519 | else: |
|
|||
520 | url = source |
|
|||
521 | yield parseurl(url, default_branches) |
|
|||
522 |
|
517 | |||
523 |
|
518 | |||
524 | def get_unique_push_path(action, repo, ui, dest=None): |
|
519 | def get_unique_push_path(action, repo, ui, dest=None): | |
@@ -771,6 +766,28 b' def pushrevpathoption(ui, path, value):' | |||||
771 | return value |
|
766 | return value | |
772 |
|
767 | |||
773 |
|
768 | |||
|
769 | SUPPORTED_BOOKMARKS_MODES = { | |||
|
770 | b'default', | |||
|
771 | b'mirror', | |||
|
772 | b'ignore', | |||
|
773 | } | |||
|
774 | ||||
|
775 | ||||
|
776 | @pathsuboption(b'bookmarks.mode', b'bookmarks_mode') | |||
|
777 | def bookmarks_mode_option(ui, path, value): | |||
|
778 | if value not in SUPPORTED_BOOKMARKS_MODES: | |||
|
779 | path_name = path.name | |||
|
780 | if path_name is None: | |||
|
781 | # this is an "anonymous" path, config comes from the global one | |||
|
782 | path_name = b'*' | |||
|
783 | msg = _(b'(paths.%s:bookmarks.mode has unknown value: "%s")\n') | |||
|
784 | msg %= (path_name, value) | |||
|
785 | ui.warn(msg) | |||
|
786 | if value == b'default': | |||
|
787 | value = None | |||
|
788 | return value | |||
|
789 | ||||
|
790 | ||||
774 | @pathsuboption(b'multi-urls', b'multi_urls') |
|
791 | @pathsuboption(b'multi-urls', b'multi_urls') | |
775 | def multiurls_pathoption(ui, path, value): |
|
792 | def multiurls_pathoption(ui, path, value): | |
776 | res = stringutil.parsebool(value) |
|
793 | res = stringutil.parsebool(value) | |
@@ -818,7 +835,14 b' def _chain_path(base_path, ui, paths):' | |||||
818 | class path(object): |
|
835 | class path(object): | |
819 | """Represents an individual path and its configuration.""" |
|
836 | """Represents an individual path and its configuration.""" | |
820 |
|
837 | |||
821 | def __init__(self, ui=None, name=None, rawloc=None, suboptions=None): |
|
838 | def __init__( | |
|
839 | self, | |||
|
840 | ui=None, | |||
|
841 | name=None, | |||
|
842 | rawloc=None, | |||
|
843 | suboptions=None, | |||
|
844 | validate_path=True, | |||
|
845 | ): | |||
822 | """Construct a path from its config options. |
|
846 | """Construct a path from its config options. | |
823 |
|
847 | |||
824 | ``ui`` is the ``ui`` instance the path is coming from. |
|
848 | ``ui`` is the ``ui`` instance the path is coming from. | |
@@ -856,7 +880,8 b' class path(object):' | |||||
856 | self.rawloc = rawloc |
|
880 | self.rawloc = rawloc | |
857 | self.loc = b'%s' % u |
|
881 | self.loc = b'%s' % u | |
858 |
|
882 | |||
859 |
|
|
883 | if validate_path: | |
|
884 | self._validate_path() | |||
860 |
|
885 | |||
861 | _path, sub_opts = ui.configsuboptions(b'paths', b'*') |
|
886 | _path, sub_opts = ui.configsuboptions(b'paths', b'*') | |
862 | self._own_sub_opts = {} |
|
887 | self._own_sub_opts = {} |
@@ -395,12 +395,13 b' class verifier(object):' | |||||
395 | storefiles = set() |
|
395 | storefiles = set() | |
396 | subdirs = set() |
|
396 | subdirs = set() | |
397 | revlogv1 = self.revlogv1 |
|
397 | revlogv1 = self.revlogv1 | |
398 | for t, f, f2, size in repo.store.datafiles(): |
|
398 | undecodable = [] | |
399 | if not f: |
|
399 | for t, f, size in repo.store.datafiles(undecodable=undecodable): | |
400 | self._err(None, _(b"cannot decode filename '%s'") % f2) |
|
400 | if (size > 0 or not revlogv1) and f.startswith(b'meta/'): | |
401 | elif (size > 0 or not revlogv1) and f.startswith(b'meta/'): |
|
|||
402 | storefiles.add(_normpath(f)) |
|
401 | storefiles.add(_normpath(f)) | |
403 | subdirs.add(os.path.dirname(f)) |
|
402 | subdirs.add(os.path.dirname(f)) | |
|
403 | for f in undecodable: | |||
|
404 | self._err(None, _(b"cannot decode filename '%s'") % f) | |||
404 | subdirprogress = ui.makeprogress( |
|
405 | subdirprogress = ui.makeprogress( | |
405 | _(b'checking'), unit=_(b'manifests'), total=len(subdirs) |
|
406 | _(b'checking'), unit=_(b'manifests'), total=len(subdirs) | |
406 | ) |
|
407 | ) | |
@@ -459,11 +460,12 b' class verifier(object):' | |||||
459 | ui.status(_(b"checking files\n")) |
|
460 | ui.status(_(b"checking files\n")) | |
460 |
|
461 | |||
461 | storefiles = set() |
|
462 | storefiles = set() | |
462 | for rl_type, f, f2, size in repo.store.datafiles(): |
|
463 | undecodable = [] | |
463 | if not f: |
|
464 | for t, f, size in repo.store.datafiles(undecodable=undecodable): | |
464 | self._err(None, _(b"cannot decode filename '%s'") % f2) |
|
465 | if (size > 0 or not revlogv1) and f.startswith(b'data/'): | |
465 | elif (size > 0 or not revlogv1) and f.startswith(b'data/'): |
|
|||
466 | storefiles.add(_normpath(f)) |
|
466 | storefiles.add(_normpath(f)) | |
|
467 | for f in undecodable: | |||
|
468 | self._err(None, _(b"cannot decode filename '%s'") % f) | |||
467 |
|
469 | |||
468 | state = { |
|
470 | state = { | |
469 | # TODO this assumes revlog storage for changelog. |
|
471 | # TODO this assumes revlog storage for changelog. |
@@ -175,7 +175,7 b" def posixfile(name, mode=b'r', buffering" | |||||
175 | return mixedfilemodewrapper(fp) |
|
175 | return mixedfilemodewrapper(fp) | |
176 |
|
176 | |||
177 | return fp |
|
177 | return fp | |
178 | except WindowsError as err: |
|
178 | except WindowsError as err: # pytype: disable=name-error | |
179 | # convert to a friendlier exception |
|
179 | # convert to a friendlier exception | |
180 | raise IOError( |
|
180 | raise IOError( | |
181 | err.errno, '%s: %s' % (encoding.strfromlocal(name), err.strerror) |
|
181 | err.errno, '%s: %s' % (encoding.strfromlocal(name), err.strerror) |
@@ -44,13 +44,9 b' def batchable(f):' | |||||
44 | def sample(self, one, two=None): |
|
44 | def sample(self, one, two=None): | |
45 | # Build list of encoded arguments suitable for your wire protocol: |
|
45 | # Build list of encoded arguments suitable for your wire protocol: | |
46 | encoded_args = [('one', encode(one),), ('two', encode(two),)] |
|
46 | encoded_args = [('one', encode(one),), ('two', encode(two),)] | |
47 | # Create future for injection of encoded result: |
|
47 | # Return it, along with a function that will receive the result | |
48 | encoded_res_future = future() |
|
48 | # from the batched request. | |
49 |
|
|
49 | return encoded_args, decode | |
50 | yield encoded_args, encoded_res_future |
|
|||
51 | # Assuming the future to be filled with the result from the batched |
|
|||
52 | # request now. Decode it: |
|
|||
53 | yield decode(encoded_res_future.value) |
|
|||
54 |
|
50 | |||
55 | The decorator returns a function which wraps this coroutine as a plain |
|
51 | The decorator returns a function which wraps this coroutine as a plain | |
56 | method, but adds the original method as an attribute called "batchable", |
|
52 | method, but adds the original method as an attribute called "batchable", | |
@@ -59,29 +55,19 b' def batchable(f):' | |||||
59 | """ |
|
55 | """ | |
60 |
|
56 | |||
61 | def plain(*args, **opts): |
|
57 | def plain(*args, **opts): | |
62 |
|
|
58 | encoded_args_or_res, decode = f(*args, **opts) | |
63 | encoded_args_or_res, encoded_res_future = next(batchable) |
|
59 | if not decode: | |
64 | if not encoded_res_future: |
|
|||
65 | return encoded_args_or_res # a local result in this case |
|
60 | return encoded_args_or_res # a local result in this case | |
66 | self = args[0] |
|
61 | self = args[0] | |
67 | cmd = pycompat.bytesurl(f.__name__) # ensure cmd is ascii bytestr |
|
62 | cmd = pycompat.bytesurl(f.__name__) # ensure cmd is ascii bytestr | |
68 |
encoded_res |
|
63 | encoded_res = self._submitone(cmd, encoded_args_or_res) | |
69 | return next(batchable) |
|
64 | return decode(encoded_res) | |
70 |
|
65 | |||
71 | setattr(plain, 'batchable', f) |
|
66 | setattr(plain, 'batchable', f) | |
72 | setattr(plain, '__name__', f.__name__) |
|
67 | setattr(plain, '__name__', f.__name__) | |
73 | return plain |
|
68 | return plain | |
74 |
|
69 | |||
75 |
|
70 | |||
76 | class future(object): |
|
|||
77 | '''placeholder for a value to be set later''' |
|
|||
78 |
|
||||
79 | def set(self, value): |
|
|||
80 | if util.safehasattr(self, b'value'): |
|
|||
81 | raise error.RepoError(b"future is already set") |
|
|||
82 | self.value = value |
|
|||
83 |
|
||||
84 |
|
||||
85 | def encodebatchcmds(req): |
|
71 | def encodebatchcmds(req): | |
86 | """Return a ``cmds`` argument value for the ``batch`` command.""" |
|
72 | """Return a ``cmds`` argument value for the ``batch`` command.""" | |
87 | escapearg = wireprototypes.escapebatcharg |
|
73 | escapearg = wireprototypes.escapebatcharg | |
@@ -248,25 +234,18 b' class peerexecutor(object):' | |||||
248 | continue |
|
234 | continue | |
249 |
|
235 | |||
250 | try: |
|
236 | try: | |
251 |
|
|
237 | encoded_args_or_res, decode = fn.batchable( | |
252 | fn.__self__, **pycompat.strkwargs(args) |
|
238 | fn.__self__, **pycompat.strkwargs(args) | |
253 | ) |
|
239 | ) | |
254 | except Exception: |
|
240 | except Exception: | |
255 | pycompat.future_set_exception_info(f, sys.exc_info()[1:]) |
|
241 | pycompat.future_set_exception_info(f, sys.exc_info()[1:]) | |
256 | return |
|
242 | return | |
257 |
|
243 | |||
258 | # Encoded arguments and future holding remote result. |
|
244 | if not decode: | |
259 | try: |
|
|||
260 | encoded_args_or_res, fremote = next(batchable) |
|
|||
261 | except Exception: |
|
|||
262 | pycompat.future_set_exception_info(f, sys.exc_info()[1:]) |
|
|||
263 | return |
|
|||
264 |
|
||||
265 | if not fremote: |
|
|||
266 | f.set_result(encoded_args_or_res) |
|
245 | f.set_result(encoded_args_or_res) | |
267 | else: |
|
246 | else: | |
268 | requests.append((command, encoded_args_or_res)) |
|
247 | requests.append((command, encoded_args_or_res)) | |
269 |
states.append((command, f, batchable, |
|
248 | states.append((command, f, batchable, decode)) | |
270 |
|
249 | |||
271 | if not requests: |
|
250 | if not requests: | |
272 | return |
|
251 | return | |
@@ -319,7 +298,7 b' class peerexecutor(object):' | |||||
319 | def _readbatchresponse(self, states, wireresults): |
|
298 | def _readbatchresponse(self, states, wireresults): | |
320 | # Executes in a thread to read data off the wire. |
|
299 | # Executes in a thread to read data off the wire. | |
321 |
|
300 | |||
322 |
for command, f, batchable, |
|
301 | for command, f, batchable, decode in states: | |
323 | # Grab raw result off the wire and teach the internal future |
|
302 | # Grab raw result off the wire and teach the internal future | |
324 | # about it. |
|
303 | # about it. | |
325 | try: |
|
304 | try: | |
@@ -334,11 +313,8 b' class peerexecutor(object):' | |||||
334 | ) |
|
313 | ) | |
335 | ) |
|
314 | ) | |
336 | else: |
|
315 | else: | |
337 | fremote.set(remoteresult) |
|
|||
338 |
|
||||
339 | # And ask the coroutine to decode that value. |
|
|||
340 | try: |
|
316 | try: | |
341 |
result = |
|
317 | result = decode(remoteresult) | |
342 | except Exception: |
|
318 | except Exception: | |
343 | pycompat.future_set_exception_info(f, sys.exc_info()[1:]) |
|
319 | pycompat.future_set_exception_info(f, sys.exc_info()[1:]) | |
344 | else: |
|
320 | else: | |
@@ -369,87 +345,90 b' class wirepeer(repository.peer):' | |||||
369 | @batchable |
|
345 | @batchable | |
370 | def lookup(self, key): |
|
346 | def lookup(self, key): | |
371 | self.requirecap(b'lookup', _(b'look up remote revision')) |
|
347 | self.requirecap(b'lookup', _(b'look up remote revision')) | |
372 | f = future() |
|
348 | ||
373 | yield {b'key': encoding.fromlocal(key)}, f |
|
349 | def decode(d): | |
374 | d = f.value |
|
350 | success, data = d[:-1].split(b" ", 1) | |
375 | success, data = d[:-1].split(b" ", 1) |
|
351 | if int(success): | |
376 | if int(success): |
|
352 | return bin(data) | |
377 |
|
|
353 | else: | |
378 | else: |
|
354 | self._abort(error.RepoError(data)) | |
379 | self._abort(error.RepoError(data)) |
|
355 | ||
|
356 | return {b'key': encoding.fromlocal(key)}, decode | |||
380 |
|
357 | |||
381 | @batchable |
|
358 | @batchable | |
382 | def heads(self): |
|
359 | def heads(self): | |
383 |
f |
|
360 | def decode(d): | |
384 | yield {}, f |
|
361 | try: | |
385 | d = f.value |
|
362 | return wireprototypes.decodelist(d[:-1]) | |
386 | try: |
|
363 | except ValueError: | |
387 | yield wireprototypes.decodelist(d[:-1]) |
|
364 | self._abort(error.ResponseError(_(b"unexpected response:"), d)) | |
388 | except ValueError: |
|
365 | ||
389 | self._abort(error.ResponseError(_(b"unexpected response:"), d)) |
|
366 | return {}, decode | |
390 |
|
367 | |||
391 | @batchable |
|
368 | @batchable | |
392 | def known(self, nodes): |
|
369 | def known(self, nodes): | |
393 |
f |
|
370 | def decode(d): | |
394 | yield {b'nodes': wireprototypes.encodelist(nodes)}, f |
|
371 | try: | |
395 | d = f.value |
|
372 | return [bool(int(b)) for b in pycompat.iterbytestr(d)] | |
396 | try: |
|
373 | except ValueError: | |
397 | yield [bool(int(b)) for b in pycompat.iterbytestr(d)] |
|
374 | self._abort(error.ResponseError(_(b"unexpected response:"), d)) | |
398 | except ValueError: |
|
375 | ||
399 | self._abort(error.ResponseError(_(b"unexpected response:"), d)) |
|
376 | return {b'nodes': wireprototypes.encodelist(nodes)}, decode | |
400 |
|
377 | |||
401 | @batchable |
|
378 | @batchable | |
402 | def branchmap(self): |
|
379 | def branchmap(self): | |
403 |
f |
|
380 | def decode(d): | |
404 | yield {}, f |
|
381 | try: | |
405 | d = f.value |
|
382 | branchmap = {} | |
406 | try: |
|
383 | for branchpart in d.splitlines(): | |
407 | branchmap = {} |
|
384 | branchname, branchheads = branchpart.split(b' ', 1) | |
408 | for branchpart in d.splitlines(): |
|
385 | branchname = encoding.tolocal(urlreq.unquote(branchname)) | |
409 | branchname, branchheads = branchpart.split(b' ', 1) |
|
386 | branchheads = wireprototypes.decodelist(branchheads) | |
410 | branchname = encoding.tolocal(urlreq.unquote(branchname)) |
|
387 | branchmap[branchname] = branchheads | |
411 | branchheads = wireprototypes.decodelist(branchheads) |
|
388 | return branchmap | |
412 | branchmap[branchname] = branchheads |
|
389 | except TypeError: | |
413 | yield branchmap |
|
390 | self._abort(error.ResponseError(_(b"unexpected response:"), d)) | |
414 | except TypeError: |
|
391 | ||
415 | self._abort(error.ResponseError(_(b"unexpected response:"), d)) |
|
392 | return {}, decode | |
416 |
|
393 | |||
417 | @batchable |
|
394 | @batchable | |
418 | def listkeys(self, namespace): |
|
395 | def listkeys(self, namespace): | |
419 | if not self.capable(b'pushkey'): |
|
396 | if not self.capable(b'pushkey'): | |
420 |
|
|
397 | return {}, None | |
421 | f = future() |
|
|||
422 | self.ui.debug(b'preparing listkeys for "%s"\n' % namespace) |
|
398 | self.ui.debug(b'preparing listkeys for "%s"\n' % namespace) | |
423 | yield {b'namespace': encoding.fromlocal(namespace)}, f |
|
399 | ||
424 | d = f.value |
|
400 | def decode(d): | |
425 | self.ui.debug( |
|
401 | self.ui.debug( | |
426 | b'received listkey for "%s": %i bytes\n' % (namespace, len(d)) |
|
402 | b'received listkey for "%s": %i bytes\n' % (namespace, len(d)) | |
427 | ) |
|
403 | ) | |
428 |
|
|
404 | return pushkeymod.decodekeys(d) | |
|
405 | ||||
|
406 | return {b'namespace': encoding.fromlocal(namespace)}, decode | |||
429 |
|
407 | |||
430 | @batchable |
|
408 | @batchable | |
431 | def pushkey(self, namespace, key, old, new): |
|
409 | def pushkey(self, namespace, key, old, new): | |
432 | if not self.capable(b'pushkey'): |
|
410 | if not self.capable(b'pushkey'): | |
433 |
|
|
411 | return False, None | |
434 | f = future() |
|
|||
435 | self.ui.debug(b'preparing pushkey for "%s:%s"\n' % (namespace, key)) |
|
412 | self.ui.debug(b'preparing pushkey for "%s:%s"\n' % (namespace, key)) | |
436 | yield { |
|
413 | ||
|
414 | def decode(d): | |||
|
415 | d, output = d.split(b'\n', 1) | |||
|
416 | try: | |||
|
417 | d = bool(int(d)) | |||
|
418 | except ValueError: | |||
|
419 | raise error.ResponseError( | |||
|
420 | _(b'push failed (unexpected response):'), d | |||
|
421 | ) | |||
|
422 | for l in output.splitlines(True): | |||
|
423 | self.ui.status(_(b'remote: '), l) | |||
|
424 | return d | |||
|
425 | ||||
|
426 | return { | |||
437 | b'namespace': encoding.fromlocal(namespace), |
|
427 | b'namespace': encoding.fromlocal(namespace), | |
438 | b'key': encoding.fromlocal(key), |
|
428 | b'key': encoding.fromlocal(key), | |
439 | b'old': encoding.fromlocal(old), |
|
429 | b'old': encoding.fromlocal(old), | |
440 | b'new': encoding.fromlocal(new), |
|
430 | b'new': encoding.fromlocal(new), | |
441 |
}, |
|
431 | }, decode | |
442 | d = f.value |
|
|||
443 | d, output = d.split(b'\n', 1) |
|
|||
444 | try: |
|
|||
445 | d = bool(int(d)) |
|
|||
446 | except ValueError: |
|
|||
447 | raise error.ResponseError( |
|
|||
448 | _(b'push failed (unexpected response):'), d |
|
|||
449 | ) |
|
|||
450 | for l in output.splitlines(True): |
|
|||
451 | self.ui.status(_(b'remote: '), l) |
|
|||
452 | yield d |
|
|||
453 |
|
432 | |||
454 | def stream_out(self): |
|
433 | def stream_out(self): | |
455 | return self._callstream(b'stream_out') |
|
434 | return self._callstream(b'stream_out') |
@@ -1579,7 +1579,7 b' def rawstorefiledata(repo, proto, files,' | |||||
1579 |
|
1579 | |||
1580 | # TODO this is a bunch of storage layer interface abstractions because |
|
1580 | # TODO this is a bunch of storage layer interface abstractions because | |
1581 | # it assumes revlogs. |
|
1581 | # it assumes revlogs. | |
1582 |
for rl_type, name, |
|
1582 | for rl_type, name, size in topfiles: | |
1583 | # XXX use the `rl_type` for that |
|
1583 | # XXX use the `rl_type` for that | |
1584 | if b'changelog' in files and name.startswith(b'00changelog'): |
|
1584 | if b'changelog' in files and name.startswith(b'00changelog'): | |
1585 | pass |
|
1585 | pass |
@@ -1,26 +1,16 b'' | |||||
1 | == New Features == |
|
1 | == New Features == | |
2 | * `debugrebuildfncache` now has an option to rebuild only the index files |
|
|||
3 |
|
2 | |||
4 |
|
3 | |||
5 | == Default Format Change == |
|
4 | == Default Format Change == | |
6 |
|
5 | |||
7 | These changes affects newly created repositories (or new clone) done with |
|
6 | These changes affects newly created repositories (or new clone) done with | |
8 |
Mercurial |
|
7 | Mercurial XXX. | |
9 |
|
8 | |||
10 |
|
9 | |||
11 | == New Experimental Features == |
|
10 | == New Experimental Features == | |
12 |
|
11 | |||
13 | * Added a new `web.full-garbage-collection-rate` to control performance. See |
|
|||
14 | de2e04fe4897a554b9ef433167f11ea4feb2e09c for more information |
|
|||
15 |
|
||||
16 | == Bug Fixes == |
|
12 | == Bug Fixes == | |
17 |
|
13 | |||
18 | * `hg fix --working-dir` now correctly works when in an uncommitted merge state |
|
|||
19 | * `rhg` (Rust fast-path for `hg`) now supports the full config list syntax |
|
|||
20 | * `rhg` now parses some corner-cases for revsets correctly |
|
|||
21 | * `hg email -o` now works again when not mentioning a revision |
|
|||
22 | * Lots of Windows fixes |
|
|||
23 | * Lots of miscellaneous other fixes |
|
|||
24 |
|
14 | |||
25 | == Backwards Compatibility Changes == |
|
15 | == Backwards Compatibility Changes == | |
26 |
|
16 | |||
@@ -29,15 +19,4 b' Mercurial 6.0.' | |||||
29 |
|
19 | |||
30 | The following functions have been removed: |
|
20 | The following functions have been removed: | |
31 |
|
21 | |||
32 | * `dirstate.normal` |
|
|||
33 | * `dirstate.normallookup` |
|
|||
34 | * `dirstate.otherparent` |
|
|||
35 | * `dirstate.add` |
|
|||
36 | * `dirstate.remove` |
|
|||
37 | * `dirstate.drop` |
|
|||
38 | * `dirstate.__getitem__` |
|
|||
39 |
|
||||
40 | Miscellaneous: |
|
22 | Miscellaneous: | |
41 |
|
||||
42 | * `wireprotov1peer`'s `batchable` is now a simple function and not a generator |
|
|||
43 | anymore No newline at end of file |
|
@@ -157,9 +157,9 b' dependencies = [' | |||||
157 |
|
157 | |||
158 | [[package]] |
|
158 | [[package]] | |
159 | name = "cpython" |
|
159 | name = "cpython" | |
160 |
version = "0. |
|
160 | version = "0.7.0" | |
161 | source = "registry+https://github.com/rust-lang/crates.io-index" |
|
161 | source = "registry+https://github.com/rust-lang/crates.io-index" | |
162 | checksum = "8094679a4e9bfc8035572162624bc800eda35b5f9eff2537b9cd9aacc3d9782e" |
|
162 | checksum = "b7d46ba8ace7f3a1d204ac5060a706d0a68de6b42eafb6a586cc08bebcffe664" | |
163 | dependencies = [ |
|
163 | dependencies = [ | |
164 | "libc", |
|
164 | "libc", | |
165 | "num-traits", |
|
165 | "num-traits", | |
@@ -374,6 +374,7 b' dependencies = [' | |||||
374 | name = "hg-core" |
|
374 | name = "hg-core" | |
375 | version = "0.1.0" |
|
375 | version = "0.1.0" | |
376 | dependencies = [ |
|
376 | dependencies = [ | |
|
377 | "bitflags", | |||
377 | "byteorder", |
|
378 | "byteorder", | |
378 | "bytes-cast", |
|
379 | "bytes-cast", | |
379 | "clap", |
|
380 | "clap", | |
@@ -385,8 +386,9 b' dependencies = [' | |||||
385 | "im-rc", |
|
386 | "im-rc", | |
386 | "itertools", |
|
387 | "itertools", | |
387 | "lazy_static", |
|
388 | "lazy_static", | |
|
389 | "libc", | |||
388 | "log", |
|
390 | "log", | |
389 | "memmap", |
|
391 | "memmap2", | |
390 | "micro-timer", |
|
392 | "micro-timer", | |
391 | "pretty_assertions", |
|
393 | "pretty_assertions", | |
392 | "rand", |
|
394 | "rand", | |
@@ -396,6 +398,7 b' dependencies = [' | |||||
396 | "regex", |
|
398 | "regex", | |
397 | "same-file", |
|
399 | "same-file", | |
398 | "sha-1", |
|
400 | "sha-1", | |
|
401 | "stable_deref_trait", | |||
399 | "tempfile", |
|
402 | "tempfile", | |
400 | "twox-hash", |
|
403 | "twox-hash", | |
401 | "zstd", |
|
404 | "zstd", | |
@@ -411,6 +414,7 b' dependencies = [' | |||||
411 | "hg-core", |
|
414 | "hg-core", | |
412 | "libc", |
|
415 | "libc", | |
413 | "log", |
|
416 | "log", | |
|
417 | "stable_deref_trait", | |||
414 | ] |
|
418 | ] | |
415 |
|
419 | |||
416 | [[package]] |
|
420 | [[package]] | |
@@ -508,13 +512,13 b' source = "registry+https://github.com/ru' | |||||
508 | checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" |
|
512 | checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" | |
509 |
|
513 | |||
510 | [[package]] |
|
514 | [[package]] | |
511 | name = "memmap" |
|
515 | name = "memmap2" | |
512 |
version = "0. |
|
516 | version = "0.4.0" | |
513 | source = "registry+https://github.com/rust-lang/crates.io-index" |
|
517 | source = "registry+https://github.com/rust-lang/crates.io-index" | |
514 | checksum = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b" |
|
518 | checksum = "de5d3112c080d58ce560081baeaab7e1e864ca21795ddbf533d5b1842bb1ecf8" | |
515 | dependencies = [ |
|
519 | dependencies = [ | |
516 | "libc", |
|
520 | "libc", | |
517 | "winapi", |
|
521 | "stable_deref_trait", | |
518 | ] |
|
522 | ] | |
519 |
|
523 | |||
520 | [[package]] |
|
524 | [[package]] | |
@@ -649,9 +653,9 b' dependencies = [' | |||||
649 |
|
653 | |||
650 | [[package]] |
|
654 | [[package]] | |
651 | name = "python27-sys" |
|
655 | name = "python27-sys" | |
652 |
version = "0. |
|
656 | version = "0.7.0" | |
653 | source = "registry+https://github.com/rust-lang/crates.io-index" |
|
657 | source = "registry+https://github.com/rust-lang/crates.io-index" | |
654 | checksum = "5826ddbc5366eb0b0492040fdc25bf50bb49092c192bd45e80fb7a24dc6832ab" |
|
658 | checksum = "94670354e264300dde81a5864cbb6bfc9d56ac3dcf3a278c32cb52f816f4dfd1" | |
655 | dependencies = [ |
|
659 | dependencies = [ | |
656 | "libc", |
|
660 | "libc", | |
657 | "regex", |
|
661 | "regex", | |
@@ -659,9 +663,9 b' dependencies = [' | |||||
659 |
|
663 | |||
660 | [[package]] |
|
664 | [[package]] | |
661 | name = "python3-sys" |
|
665 | name = "python3-sys" | |
662 |
version = "0. |
|
666 | version = "0.7.0" | |
663 | source = "registry+https://github.com/rust-lang/crates.io-index" |
|
667 | source = "registry+https://github.com/rust-lang/crates.io-index" | |
664 | checksum = "b78af21b29594951a47fc3dac9b9eff0a3f077dec2f780ee943ae16a668f3b6a" |
|
668 | checksum = "b18b32e64c103d5045f44644d7ddddd65336f7a0521f6fde673240a9ecceb77e" | |
665 | dependencies = [ |
|
669 | dependencies = [ | |
666 | "libc", |
|
670 | "libc", | |
667 | "regex", |
|
671 | "regex", | |
@@ -865,6 +869,12 b' dependencies = [' | |||||
865 | ] |
|
869 | ] | |
866 |
|
870 | |||
867 | [[package]] |
|
871 | [[package]] | |
|
872 | name = "stable_deref_trait" | |||
|
873 | version = "1.2.0" | |||
|
874 | source = "registry+https://github.com/rust-lang/crates.io-index" | |||
|
875 | checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" | |||
|
876 | ||||
|
877 | [[package]] | |||
868 | name = "static_assertions" |
|
878 | name = "static_assertions" | |
869 | version = "1.1.0" |
|
879 | version = "1.1.0" | |
870 | source = "registry+https://github.com/rust-lang/crates.io-index" |
|
880 | source = "registry+https://github.com/rust-lang/crates.io-index" |
@@ -74,8 +74,8 b' Example usage:' | |||||
74 | Developing Rust |
|
74 | Developing Rust | |
75 | =============== |
|
75 | =============== | |
76 |
|
76 | |||
77 |
The current version of Rust in use is ``1.4 |
|
77 | The current version of Rust in use is ``1.48.0``, because it's what Debian | |
78 |
stable has. You can use ``rustup override set 1.4 |
|
78 | stable has. You can use ``rustup override set 1.48.0`` at the root of the repo | |
79 | to make it easier on you. |
|
79 | to make it easier on you. | |
80 |
|
80 | |||
81 | Go to the ``hg-cpython`` folder:: |
|
81 | Go to the ``hg-cpython`` folder:: |
@@ -9,6 +9,7 b' edition = "2018"' | |||||
9 | name = "hg" |
|
9 | name = "hg" | |
10 |
|
10 | |||
11 | [dependencies] |
|
11 | [dependencies] | |
|
12 | bitflags = "1.2" | |||
12 | bytes-cast = "0.2" |
|
13 | bytes-cast = "0.2" | |
13 | byteorder = "1.3.4" |
|
14 | byteorder = "1.3.4" | |
14 | derive_more = "0.99" |
|
15 | derive_more = "0.99" | |
@@ -16,6 +17,7 b' home = "0.5"' | |||||
16 | im-rc = "15.0.*" |
|
17 | im-rc = "15.0.*" | |
17 | itertools = "0.9" |
|
18 | itertools = "0.9" | |
18 | lazy_static = "1.4.0" |
|
19 | lazy_static = "1.4.0" | |
|
20 | libc = "0.2" | |||
19 | rand = "0.7.3" |
|
21 | rand = "0.7.3" | |
20 | rand_pcg = "0.2.1" |
|
22 | rand_pcg = "0.2.1" | |
21 | rand_distr = "0.2.2" |
|
23 | rand_distr = "0.2.2" | |
@@ -24,11 +26,12 b' regex = "1.3.9"' | |||||
24 | sha-1 = "0.9.6" |
|
26 | sha-1 = "0.9.6" | |
25 | twox-hash = "1.5.0" |
|
27 | twox-hash = "1.5.0" | |
26 | same-file = "1.0.6" |
|
28 | same-file = "1.0.6" | |
|
29 | stable_deref_trait = "1.2.0" | |||
27 | tempfile = "3.1.0" |
|
30 | tempfile = "3.1.0" | |
28 | crossbeam-channel = "0.4" |
|
31 | crossbeam-channel = "0.4" | |
29 | micro-timer = "0.3.0" |
|
32 | micro-timer = "0.3.0" | |
30 | log = "0.4.8" |
|
33 | log = "0.4.8" | |
31 | memmap = "0.7.0" |
|
34 | memmap2 = {version = "0.4", features = ["stable_deref_trait"]} | |
32 | zstd = "0.5.3" |
|
35 | zstd = "0.5.3" | |
33 | format-bytes = "0.2.2" |
|
36 | format-bytes = "0.2.2" | |
34 |
|
37 |
@@ -5,7 +5,7 b'' | |||||
5 |
|
5 | |||
6 | //! Minimal `RevlogIndex`, readable from standard Mercurial file format |
|
6 | //! Minimal `RevlogIndex`, readable from standard Mercurial file format | |
7 | use hg::*; |
|
7 | use hg::*; | |
8 | use memmap::*; |
|
8 | use memmap2::*; | |
9 | use std::fs::File; |
|
9 | use std::fs::File; | |
10 | use std::ops::Deref; |
|
10 | use std::ops::Deref; | |
11 | use std::path::Path; |
|
11 | use std::path::Path; |
@@ -7,7 +7,7 b' use clap::*;' | |||||
7 | use hg::revlog::node::*; |
|
7 | use hg::revlog::node::*; | |
8 | use hg::revlog::nodemap::*; |
|
8 | use hg::revlog::nodemap::*; | |
9 | use hg::revlog::*; |
|
9 | use hg::revlog::*; | |
10 | use memmap::MmapOptions; |
|
10 | use memmap2::MmapOptions; | |
11 | use rand::Rng; |
|
11 | use rand::Rng; | |
12 | use std::fs::File; |
|
12 | use std::fs::File; | |
13 | use std::io; |
|
13 | use std::io; |
@@ -13,7 +13,6 b' use crate::config::layer::{' | |||||
13 | ConfigError, ConfigLayer, ConfigOrigin, ConfigValue, |
|
13 | ConfigError, ConfigLayer, ConfigOrigin, ConfigValue, | |
14 | }; |
|
14 | }; | |
15 | use crate::utils::files::get_bytes_from_os_str; |
|
15 | use crate::utils::files::get_bytes_from_os_str; | |
16 | use crate::utils::SliceExt; |
|
|||
17 | use format_bytes::{write_bytes, DisplayBytes}; |
|
16 | use format_bytes::{write_bytes, DisplayBytes}; | |
18 | use std::collections::HashSet; |
|
17 | use std::collections::HashSet; | |
19 | use std::env; |
|
18 | use std::env; | |
@@ -362,30 +361,14 b' impl Config {' | |||||
362 | Ok(self.get_option(section, item)?.unwrap_or(false)) |
|
361 | Ok(self.get_option(section, item)?.unwrap_or(false)) | |
363 | } |
|
362 | } | |
364 |
|
363 | |||
365 | /// Returns the corresponding list-value in the config if found, or `None`. |
|
364 | /// If there is an `item` value in `section`, parse and return a list of | |
366 | /// |
|
365 | /// byte strings. | |
367 | /// This is appropriate for new configuration keys. The value syntax is |
|
366 | pub fn get_list( | |
368 | /// **not** the same as most existing list-valued config, which has Python |
|
|||
369 | /// parsing implemented in `parselist()` in |
|
|||
370 | /// `mercurial/utils/stringutil.py`. Faithfully porting that parsing |
|
|||
371 | /// algorithm to Rust (including behavior that are arguably bugs) |
|
|||
372 | /// turned out to be non-trivial and hasn’t been completed as of this |
|
|||
373 | /// writing. |
|
|||
374 | /// |
|
|||
375 | /// Instead, the "simple" syntax is: split on comma, then trim leading and |
|
|||
376 | /// trailing whitespace of each component. Quotes or backslashes are not |
|
|||
377 | /// interpreted in any way. Commas are mandatory between values. Values |
|
|||
378 | /// that contain a comma are not supported. |
|
|||
379 | pub fn get_simple_list( |
|
|||
380 | &self, |
|
367 | &self, | |
381 | section: &[u8], |
|
368 | section: &[u8], | |
382 | item: &[u8], |
|
369 | item: &[u8], | |
383 |
) -> Option< |
|
370 | ) -> Option<Vec<Vec<u8>>> { | |
384 |
self.get(section, item).map( |
|
371 | self.get(section, item).map(values::parse_list) | |
385 | value |
|
|||
386 | .split(|&byte| byte == b',') |
|
|||
387 | .map(|component| component.trim()) |
|
|||
388 | }) |
|
|||
389 | } |
|
372 | } | |
390 |
|
373 | |||
391 | /// Returns the raw value bytes of the first one found, or `None`. |
|
374 | /// Returns the raw value bytes of the first one found, or `None`. |
@@ -8,6 +8,8 b'' | |||||
8 | //! details about where the value came from (but omits details of what’s |
|
8 | //! details about where the value came from (but omits details of what’s | |
9 | //! invalid inside the value). |
|
9 | //! invalid inside the value). | |
10 |
|
10 | |||
|
11 | use crate::utils::SliceExt; | |||
|
12 | ||||
11 | pub(super) fn parse_bool(v: &[u8]) -> Option<bool> { |
|
13 | pub(super) fn parse_bool(v: &[u8]) -> Option<bool> { | |
12 | match v.to_ascii_lowercase().as_slice() { |
|
14 | match v.to_ascii_lowercase().as_slice() { | |
13 | b"1" | b"yes" | b"true" | b"on" | b"always" => Some(true), |
|
15 | b"1" | b"yes" | b"true" | b"on" | b"always" => Some(true), | |
@@ -42,6 +44,216 b' pub(super) fn parse_byte_size(value: &[u' | |||||
42 | value.parse().ok() |
|
44 | value.parse().ok() | |
43 | } |
|
45 | } | |
44 |
|
46 | |||
|
47 | /// Parse a config value as a list of sub-values. | |||
|
48 | /// | |||
|
49 | /// Ported from `parselist` in `mercurial/utils/stringutil.py` | |||
|
50 | ||||
|
51 | // Note: keep behavior in sync with the Python one. | |||
|
52 | ||||
|
53 | // Note: this could return `Vec<Cow<[u8]>>` instead and borrow `input` when | |||
|
54 | // possible (when there’s no backslash-escapes) but this is probably not worth | |||
|
55 | // the complexity as config is presumably not accessed inside | |||
|
56 | // preformance-sensitive loops. | |||
|
57 | pub(super) fn parse_list(input: &[u8]) -> Vec<Vec<u8>> { | |||
|
58 | // Port of Python’s `value.lstrip(b' ,\n')` | |||
|
59 | // TODO: is this really what we want? | |||
|
60 | let input = | |||
|
61 | input.trim_start_matches(|b| b == b' ' || b == b',' || b == b'\n'); | |||
|
62 | parse_list_without_trim_start(input) | |||
|
63 | } | |||
|
64 | ||||
|
65 | fn parse_list_without_trim_start(input: &[u8]) -> Vec<Vec<u8>> { | |||
|
66 | // Start of port of Python’s `_configlist` | |||
|
67 | let input = input.trim_end_matches(|b| b == b' ' || b == b','); | |||
|
68 | if input.is_empty() { | |||
|
69 | return Vec::new(); | |||
|
70 | } | |||
|
71 | ||||
|
72 | // Just to make “a string” less confusable with “a list of strings”. | |||
|
73 | type ByteString = Vec<u8>; | |||
|
74 | ||||
|
75 | // These correspond to Python’s… | |||
|
76 | let mut mode = ParserMode::Plain; // `parser` | |||
|
77 | let mut values = Vec::new(); // `parts[:-1]` | |||
|
78 | let mut next_value = ByteString::new(); // `parts[-1]` | |||
|
79 | let mut offset = 0; // `offset` | |||
|
80 | ||||
|
81 | // Setting `parser` to `None` is instead handled by returning immediately | |||
|
82 | enum ParserMode { | |||
|
83 | Plain, | |||
|
84 | Quoted, | |||
|
85 | } | |||
|
86 | ||||
|
87 | loop { | |||
|
88 | match mode { | |||
|
89 | ParserMode::Plain => { | |||
|
90 | // Start of port of Python’s `_parse_plain` | |||
|
91 | let mut whitespace = false; | |||
|
92 | while let Some(&byte) = input.get(offset) { | |||
|
93 | if is_space(byte) || byte == b',' { | |||
|
94 | whitespace = true; | |||
|
95 | offset += 1; | |||
|
96 | } else { | |||
|
97 | break; | |||
|
98 | } | |||
|
99 | } | |||
|
100 | if let Some(&byte) = input.get(offset) { | |||
|
101 | if whitespace { | |||
|
102 | values.push(std::mem::take(&mut next_value)) | |||
|
103 | } | |||
|
104 | if byte == b'"' && next_value.is_empty() { | |||
|
105 | mode = ParserMode::Quoted; | |||
|
106 | } else { | |||
|
107 | if byte == b'"' && next_value.ends_with(b"\\") { | |||
|
108 | next_value.pop(); | |||
|
109 | } | |||
|
110 | next_value.push(byte); | |||
|
111 | } | |||
|
112 | offset += 1; | |||
|
113 | } else { | |||
|
114 | values.push(next_value); | |||
|
115 | return values; | |||
|
116 | } | |||
|
117 | } | |||
|
118 | ParserMode::Quoted => { | |||
|
119 | // Start of port of Python’s `_parse_quote` | |||
|
120 | if let Some(&byte) = input.get(offset) { | |||
|
121 | if byte == b'"' { | |||
|
122 | // The input contains a quoted zero-length value `""` | |||
|
123 | debug_assert_eq!(next_value, b""); | |||
|
124 | values.push(std::mem::take(&mut next_value)); | |||
|
125 | offset += 1; | |||
|
126 | while let Some(&byte) = input.get(offset) { | |||
|
127 | if is_space(byte) || byte == b',' { | |||
|
128 | offset += 1; | |||
|
129 | } else { | |||
|
130 | break; | |||
|
131 | } | |||
|
132 | } | |||
|
133 | mode = ParserMode::Plain; | |||
|
134 | continue; | |||
|
135 | } | |||
|
136 | } | |||
|
137 | ||||
|
138 | while let Some(&byte) = input.get(offset) { | |||
|
139 | if byte == b'"' { | |||
|
140 | break; | |||
|
141 | } | |||
|
142 | if byte == b'\\' && input.get(offset + 1) == Some(&b'"') { | |||
|
143 | next_value.push(b'"'); | |||
|
144 | offset += 2; | |||
|
145 | } else { | |||
|
146 | next_value.push(byte); | |||
|
147 | offset += 1; | |||
|
148 | } | |||
|
149 | } | |||
|
150 | ||||
|
151 | if offset >= input.len() { | |||
|
152 | // We didn’t find a closing double-quote, | |||
|
153 | // so treat the opening one as part of an unquoted value | |||
|
154 | // instead of delimiting the start of a quoted value. | |||
|
155 | ||||
|
156 | // `next_value` may have had some backslash-escapes | |||
|
157 | // unescaped. TODO: shouldn’t we use a slice of `input` | |||
|
158 | // instead? | |||
|
159 | let mut real_values = | |||
|
160 | parse_list_without_trim_start(&next_value); | |||
|
161 | ||||
|
162 | if let Some(first) = real_values.first_mut() { | |||
|
163 | first.insert(0, b'"'); | |||
|
164 | // Drop `next_value` | |||
|
165 | values.extend(real_values) | |||
|
166 | } else { | |||
|
167 | next_value.push(b'"'); | |||
|
168 | values.push(next_value); | |||
|
169 | } | |||
|
170 | return values; | |||
|
171 | } | |||
|
172 | ||||
|
173 | // We’re not at the end of the input, which means the `while` | |||
|
174 | // loop above ended at at double quote. Skip | |||
|
175 | // over that. | |||
|
176 | offset += 1; | |||
|
177 | ||||
|
178 | while let Some(&byte) = input.get(offset) { | |||
|
179 | if byte == b' ' || byte == b',' { | |||
|
180 | offset += 1; | |||
|
181 | } else { | |||
|
182 | break; | |||
|
183 | } | |||
|
184 | } | |||
|
185 | ||||
|
186 | if offset >= input.len() { | |||
|
187 | values.push(next_value); | |||
|
188 | return values; | |||
|
189 | } | |||
|
190 | ||||
|
191 | if offset + 1 == input.len() && input[offset] == b'"' { | |||
|
192 | next_value.push(b'"'); | |||
|
193 | offset += 1; | |||
|
194 | } else { | |||
|
195 | values.push(std::mem::take(&mut next_value)); | |||
|
196 | } | |||
|
197 | ||||
|
198 | mode = ParserMode::Plain; | |||
|
199 | } | |||
|
200 | } | |||
|
201 | } | |||
|
202 | ||||
|
203 | // https://docs.python.org/3/library/stdtypes.html?#bytes.isspace | |||
|
204 | fn is_space(byte: u8) -> bool { | |||
|
205 | if let b' ' | b'\t' | b'\n' | b'\r' | b'\x0b' | b'\x0c' = byte { | |||
|
206 | true | |||
|
207 | } else { | |||
|
208 | false | |||
|
209 | } | |||
|
210 | } | |||
|
211 | } | |||
|
212 | ||||
|
213 | #[test] | |||
|
214 | fn test_parse_list() { | |||
|
215 | // Make `assert_eq` error messages nicer | |||
|
216 | fn as_strings(values: &[Vec<u8>]) -> Vec<String> { | |||
|
217 | values | |||
|
218 | .iter() | |||
|
219 | .map(|v| std::str::from_utf8(v.as_ref()).unwrap().to_owned()) | |||
|
220 | .collect() | |||
|
221 | } | |||
|
222 | macro_rules! assert_parse_list { | |||
|
223 | ( $input: expr => [ $( $output: expr ),* ] ) => { | |||
|
224 | assert_eq!( | |||
|
225 | as_strings(&parse_list($input)), | |||
|
226 | as_strings(&[ $( Vec::from(&$output[..]) ),* ]), | |||
|
227 | ); | |||
|
228 | } | |||
|
229 | } | |||
|
230 | ||||
|
231 | // Keep these Rust tests in sync with the Python ones in | |||
|
232 | // `tests/test-config-parselist.py` | |||
|
233 | assert_parse_list!(b"" => []); | |||
|
234 | assert_parse_list!(b"," => []); | |||
|
235 | assert_parse_list!(b"A" => [b"A"]); | |||
|
236 | assert_parse_list!(b"B,B" => [b"B", b"B"]); | |||
|
237 | assert_parse_list!(b", C, ,C," => [b"C", b"C"]); | |||
|
238 | assert_parse_list!(b"\"" => [b"\""]); | |||
|
239 | assert_parse_list!(b"\"\"" => [b"", b""]); | |||
|
240 | assert_parse_list!(b"D,\"" => [b"D", b"\""]); | |||
|
241 | assert_parse_list!(b"E,\"\"" => [b"E", b"", b""]); | |||
|
242 | assert_parse_list!(b"\"F,F\"" => [b"F,F"]); | |||
|
243 | assert_parse_list!(b"\"G,G" => [b"\"G", b"G"]); | |||
|
244 | assert_parse_list!(b"\"H \\\",\\\"H" => [b"\"H", b",", b"H"]); | |||
|
245 | assert_parse_list!(b"I,I\"" => [b"I", b"I\""]); | |||
|
246 | assert_parse_list!(b"J,\"J" => [b"J", b"\"J"]); | |||
|
247 | assert_parse_list!(b"K K" => [b"K", b"K"]); | |||
|
248 | assert_parse_list!(b"\"K\" K" => [b"K", b"K"]); | |||
|
249 | assert_parse_list!(b"L\tL" => [b"L", b"L"]); | |||
|
250 | assert_parse_list!(b"\"L\"\tL" => [b"L", b"", b"L"]); | |||
|
251 | assert_parse_list!(b"M\x0bM" => [b"M", b"M"]); | |||
|
252 | assert_parse_list!(b"\"M\"\x0bM" => [b"M", b"", b"M"]); | |||
|
253 | assert_parse_list!(b"\"N\" , ,\"" => [b"N\""]); | |||
|
254 | assert_parse_list!(b"\" ,O, " => [b"\"", b"O"]); | |||
|
255 | } | |||
|
256 | ||||
45 | #[test] |
|
257 | #[test] | |
46 | fn test_parse_byte_size() { |
|
258 | fn test_parse_byte_size() { | |
47 | assert_eq!(parse_byte_size(b""), None); |
|
259 | assert_eq!(parse_byte_size(b""), None); |
@@ -6,20 +6,19 b'' | |||||
6 | // GNU General Public License version 2 or any later version. |
|
6 | // GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | use crate::dirstate_tree::on_disk::DirstateV2ParseError; |
|
8 | use crate::dirstate_tree::on_disk::DirstateV2ParseError; | |
9 | use crate::errors::HgError; |
|
|||
10 | use crate::revlog::node::NULL_NODE; |
|
9 | use crate::revlog::node::NULL_NODE; | |
11 | use crate::revlog::Node; |
|
10 | use crate::revlog::Node; | |
12 |
use crate::utils::hg_path:: |
|
11 | use crate::utils::hg_path::HgPath; | |
13 | use crate::FastHashMap; |
|
12 | use bytes_cast::BytesCast; | |
14 | use bytes_cast::{unaligned, BytesCast}; |
|
|||
15 | use std::convert::TryFrom; |
|
|||
16 |
|
13 | |||
17 | pub mod dirs_multiset; |
|
14 | pub mod dirs_multiset; | |
18 | pub mod dirstate_map; |
|
15 | pub mod entry; | |
19 | pub mod parsers; |
|
16 | pub mod parsers; | |
20 | pub mod status; |
|
17 | pub mod status; | |
21 |
|
18 | |||
22 | #[derive(Debug, PartialEq, Clone, BytesCast)] |
|
19 | pub use self::entry::*; | |
|
20 | ||||
|
21 | #[derive(Debug, PartialEq, Copy, Clone, BytesCast)] | |||
23 | #[repr(C)] |
|
22 | #[repr(C)] | |
24 | pub struct DirstateParents { |
|
23 | pub struct DirstateParents { | |
25 | pub p1: Node, |
|
24 | pub p1: Node, | |
@@ -33,69 +32,6 b' impl DirstateParents {' | |||||
33 | }; |
|
32 | }; | |
34 | } |
|
33 | } | |
35 |
|
34 | |||
36 | /// The C implementation uses all signed types. This will be an issue |
|
|||
37 | /// either when 4GB+ source files are commonplace or in 2038, whichever |
|
|||
38 | /// comes first. |
|
|||
39 | #[derive(Debug, PartialEq, Copy, Clone)] |
|
|||
40 | pub struct DirstateEntry { |
|
|||
41 | pub state: EntryState, |
|
|||
42 | pub mode: i32, |
|
|||
43 | pub mtime: i32, |
|
|||
44 | pub size: i32, |
|
|||
45 | } |
|
|||
46 |
|
||||
47 | impl DirstateEntry { |
|
|||
48 | pub fn is_non_normal(&self) -> bool { |
|
|||
49 | self.state != EntryState::Normal || self.mtime == MTIME_UNSET |
|
|||
50 | } |
|
|||
51 |
|
||||
52 | pub fn is_from_other_parent(&self) -> bool { |
|
|||
53 | self.state == EntryState::Normal && self.size == SIZE_FROM_OTHER_PARENT |
|
|||
54 | } |
|
|||
55 |
|
||||
56 | // TODO: other platforms |
|
|||
57 | #[cfg(unix)] |
|
|||
58 | pub fn mode_changed( |
|
|||
59 | &self, |
|
|||
60 | filesystem_metadata: &std::fs::Metadata, |
|
|||
61 | ) -> bool { |
|
|||
62 | use std::os::unix::fs::MetadataExt; |
|
|||
63 | const EXEC_BIT_MASK: u32 = 0o100; |
|
|||
64 | let dirstate_exec_bit = (self.mode as u32) & EXEC_BIT_MASK; |
|
|||
65 | let fs_exec_bit = filesystem_metadata.mode() & EXEC_BIT_MASK; |
|
|||
66 | dirstate_exec_bit != fs_exec_bit |
|
|||
67 | } |
|
|||
68 |
|
||||
69 | /// Returns a `(state, mode, size, mtime)` tuple as for |
|
|||
70 | /// `DirstateMapMethods::debug_iter`. |
|
|||
71 | pub fn debug_tuple(&self) -> (u8, i32, i32, i32) { |
|
|||
72 | (self.state.into(), self.mode, self.size, self.mtime) |
|
|||
73 | } |
|
|||
74 | } |
|
|||
75 |
|
||||
76 | #[derive(BytesCast)] |
|
|||
77 | #[repr(C)] |
|
|||
78 | struct RawEntry { |
|
|||
79 | state: u8, |
|
|||
80 | mode: unaligned::I32Be, |
|
|||
81 | size: unaligned::I32Be, |
|
|||
82 | mtime: unaligned::I32Be, |
|
|||
83 | length: unaligned::I32Be, |
|
|||
84 | } |
|
|||
85 |
|
||||
86 | pub const V1_RANGEMASK: i32 = 0x7FFFFFFF; |
|
|||
87 |
|
||||
88 | pub const MTIME_UNSET: i32 = -1; |
|
|||
89 |
|
||||
90 | /// A `DirstateEntry` with a size of `-2` means that it was merged from the |
|
|||
91 | /// other parent. This allows revert to pick the right status back during a |
|
|||
92 | /// merge. |
|
|||
93 | pub const SIZE_FROM_OTHER_PARENT: i32 = -2; |
|
|||
94 | /// A special value used for internal representation of special case in |
|
|||
95 | /// dirstate v1 format. |
|
|||
96 | pub const SIZE_NON_NORMAL: i32 = -1; |
|
|||
97 |
|
||||
98 | pub type StateMap = FastHashMap<HgPathBuf, DirstateEntry>; |
|
|||
99 | pub type StateMapIter<'a> = Box< |
|
35 | pub type StateMapIter<'a> = Box< | |
100 | dyn Iterator< |
|
36 | dyn Iterator< | |
101 | Item = Result<(&'a HgPath, DirstateEntry), DirstateV2ParseError>, |
|
37 | Item = Result<(&'a HgPath, DirstateEntry), DirstateV2ParseError>, | |
@@ -103,58 +39,8 b" pub type StateMapIter<'a> = Box<" | |||||
103 | + 'a, |
|
39 | + 'a, | |
104 | >; |
|
40 | >; | |
105 |
|
41 | |||
106 | pub type CopyMap = FastHashMap<HgPathBuf, HgPathBuf>; |
|
|||
107 | pub type CopyMapIter<'a> = Box< |
|
42 | pub type CopyMapIter<'a> = Box< | |
108 | dyn Iterator<Item = Result<(&'a HgPath, &'a HgPath), DirstateV2ParseError>> |
|
43 | dyn Iterator<Item = Result<(&'a HgPath, &'a HgPath), DirstateV2ParseError>> | |
109 | + Send |
|
44 | + Send | |
110 | + 'a, |
|
45 | + 'a, | |
111 | >; |
|
46 | >; | |
112 |
|
||||
113 | #[derive(Copy, Clone, Debug, Eq, PartialEq)] |
|
|||
114 | pub enum EntryState { |
|
|||
115 | Normal, |
|
|||
116 | Added, |
|
|||
117 | Removed, |
|
|||
118 | Merged, |
|
|||
119 | Unknown, |
|
|||
120 | } |
|
|||
121 |
|
||||
122 | impl EntryState { |
|
|||
123 | pub fn is_tracked(self) -> bool { |
|
|||
124 | use EntryState::*; |
|
|||
125 | match self { |
|
|||
126 | Normal | Added | Merged => true, |
|
|||
127 | Removed | Unknown => false, |
|
|||
128 | } |
|
|||
129 | } |
|
|||
130 | } |
|
|||
131 |
|
||||
132 | impl TryFrom<u8> for EntryState { |
|
|||
133 | type Error = HgError; |
|
|||
134 |
|
||||
135 | fn try_from(value: u8) -> Result<Self, Self::Error> { |
|
|||
136 | match value { |
|
|||
137 | b'n' => Ok(EntryState::Normal), |
|
|||
138 | b'a' => Ok(EntryState::Added), |
|
|||
139 | b'r' => Ok(EntryState::Removed), |
|
|||
140 | b'm' => Ok(EntryState::Merged), |
|
|||
141 | b'?' => Ok(EntryState::Unknown), |
|
|||
142 | _ => Err(HgError::CorruptedRepository(format!( |
|
|||
143 | "Incorrect dirstate entry state {}", |
|
|||
144 | value |
|
|||
145 | ))), |
|
|||
146 | } |
|
|||
147 | } |
|
|||
148 | } |
|
|||
149 |
|
||||
150 | impl Into<u8> for EntryState { |
|
|||
151 | fn into(self) -> u8 { |
|
|||
152 | match self { |
|
|||
153 | EntryState::Normal => b'n', |
|
|||
154 | EntryState::Added => b'a', |
|
|||
155 | EntryState::Removed => b'r', |
|
|||
156 | EntryState::Merged => b'm', |
|
|||
157 | EntryState::Unknown => b'?', |
|
|||
158 | } |
|
|||
159 | } |
|
|||
160 | } |
|
@@ -33,7 +33,7 b' impl DirsMultiset {' | |||||
33 | /// If `skip_state` is provided, skips dirstate entries with equal state. |
|
33 | /// If `skip_state` is provided, skips dirstate entries with equal state. | |
34 | pub fn from_dirstate<I, P>( |
|
34 | pub fn from_dirstate<I, P>( | |
35 | dirstate: I, |
|
35 | dirstate: I, | |
36 | skip_state: Option<EntryState>, |
|
36 | only_tracked: bool, | |
37 | ) -> Result<Self, DirstateError> |
|
37 | ) -> Result<Self, DirstateError> | |
38 | where |
|
38 | where | |
39 | I: IntoIterator< |
|
39 | I: IntoIterator< | |
@@ -48,8 +48,8 b' impl DirsMultiset {' | |||||
48 | let (filename, entry) = item?; |
|
48 | let (filename, entry) = item?; | |
49 | let filename = filename.as_ref(); |
|
49 | let filename = filename.as_ref(); | |
50 | // This `if` is optimized out of the loop |
|
50 | // This `if` is optimized out of the loop | |
51 | if let Some(skip) = skip_state { |
|
51 | if only_tracked { | |
52 |
if |
|
52 | if entry.state() != EntryState::Removed { | |
53 | multiset.add_path(filename)?; |
|
53 | multiset.add_path(filename)?; | |
54 | } |
|
54 | } | |
55 | } else { |
|
55 | } else { | |
@@ -216,7 +216,6 b" impl<'a> DirsChildrenMultiset<'a> {" | |||||
216 | #[cfg(test)] |
|
216 | #[cfg(test)] | |
217 | mod tests { |
|
217 | mod tests { | |
218 | use super::*; |
|
218 | use super::*; | |
219 | use crate::StateMap; |
|
|||
220 |
|
219 | |||
221 | #[test] |
|
220 | #[test] | |
222 | fn test_delete_path_path_not_found() { |
|
221 | fn test_delete_path_path_not_found() { | |
@@ -341,9 +340,9 b' mod tests {' | |||||
341 | }; |
|
340 | }; | |
342 | assert_eq!(expected, new); |
|
341 | assert_eq!(expected, new); | |
343 |
|
342 | |||
344 | let new = DirsMultiset::from_dirstate( |
|
343 | let new = DirsMultiset::from_dirstate::<_, HgPathBuf>( | |
345 | StateMap::default().into_iter().map(Ok), |
|
344 | std::iter::empty(), | |
346 |
|
|
345 | false, | |
347 | ) |
|
346 | ) | |
348 | .unwrap(); |
|
347 | .unwrap(); | |
349 | let expected = DirsMultiset { |
|
348 | let expected = DirsMultiset { | |
@@ -372,12 +371,7 b' mod tests {' | |||||
372 | let input_map = ["b/x", "a/c", "a/d/x"].iter().map(|f| { |
|
371 | let input_map = ["b/x", "a/c", "a/d/x"].iter().map(|f| { | |
373 | Ok(( |
|
372 | Ok(( | |
374 | HgPathBuf::from_bytes(f.as_bytes()), |
|
373 | HgPathBuf::from_bytes(f.as_bytes()), | |
375 | DirstateEntry { |
|
374 | DirstateEntry::from_v1_data(EntryState::Normal, 0, 0, 0), | |
376 | state: EntryState::Normal, |
|
|||
377 | mode: 0, |
|
|||
378 | mtime: 0, |
|
|||
379 | size: 0, |
|
|||
380 | }, |
|
|||
381 | )) |
|
375 | )) | |
382 | }); |
|
376 | }); | |
383 | let expected_inner = [("", 2), ("a", 2), ("b", 1), ("a/d", 1)] |
|
377 | let expected_inner = [("", 2), ("a", 2), ("b", 1), ("a/d", 1)] | |
@@ -385,7 +379,7 b' mod tests {' | |||||
385 | .map(|(k, v)| (HgPathBuf::from_bytes(k.as_bytes()), *v)) |
|
379 | .map(|(k, v)| (HgPathBuf::from_bytes(k.as_bytes()), *v)) | |
386 | .collect(); |
|
380 | .collect(); | |
387 |
|
381 | |||
388 |
let new = DirsMultiset::from_dirstate(input_map, |
|
382 | let new = DirsMultiset::from_dirstate(input_map, false).unwrap(); | |
389 | let expected = DirsMultiset { |
|
383 | let expected = DirsMultiset { | |
390 | inner: expected_inner, |
|
384 | inner: expected_inner, | |
391 | }; |
|
385 | }; | |
@@ -404,24 +398,17 b' mod tests {' | |||||
404 | .map(|(f, state)| { |
|
398 | .map(|(f, state)| { | |
405 | Ok(( |
|
399 | Ok(( | |
406 | HgPathBuf::from_bytes(f.as_bytes()), |
|
400 | HgPathBuf::from_bytes(f.as_bytes()), | |
407 |
DirstateEntry |
|
401 | DirstateEntry::from_v1_data(*state, 0, 0, 0), | |
408 | state: *state, |
|
|||
409 | mode: 0, |
|
|||
410 | mtime: 0, |
|
|||
411 | size: 0, |
|
|||
412 | }, |
|
|||
413 | )) |
|
402 | )) | |
414 | }); |
|
403 | }); | |
415 |
|
404 | |||
416 | // "a" incremented with "a/c" and "a/d/" |
|
405 | // "a" incremented with "a/c" and "a/d/" | |
417 |
let expected_inner = [("", 1), ("a", |
|
406 | let expected_inner = [("", 1), ("a", 3)] | |
418 | .iter() |
|
407 | .iter() | |
419 | .map(|(k, v)| (HgPathBuf::from_bytes(k.as_bytes()), *v)) |
|
408 | .map(|(k, v)| (HgPathBuf::from_bytes(k.as_bytes()), *v)) | |
420 | .collect(); |
|
409 | .collect(); | |
421 |
|
410 | |||
422 | let new = |
|
411 | let new = DirsMultiset::from_dirstate(input_map, true).unwrap(); | |
423 | DirsMultiset::from_dirstate(input_map, Some(EntryState::Normal)) |
|
|||
424 | .unwrap(); |
|
|||
425 | let expected = DirsMultiset { |
|
412 | let expected = DirsMultiset { | |
426 | inner: expected_inner, |
|
413 | inner: expected_inner, | |
427 | }; |
|
414 | }; |
@@ -5,14 +5,11 b'' | |||||
5 |
|
5 | |||
6 | use crate::errors::HgError; |
|
6 | use crate::errors::HgError; | |
7 | use crate::utils::hg_path::HgPath; |
|
7 | use crate::utils::hg_path::HgPath; | |
8 | use crate::{ |
|
8 | use crate::{dirstate::EntryState, DirstateEntry, DirstateParents}; | |
9 | dirstate::{CopyMap, EntryState, RawEntry, StateMap}, |
|
|||
10 | DirstateEntry, DirstateParents, |
|
|||
11 | }; |
|
|||
12 | use byteorder::{BigEndian, WriteBytesExt}; |
|
9 | use byteorder::{BigEndian, WriteBytesExt}; | |
13 | use bytes_cast::BytesCast; |
|
10 | use bytes_cast::{unaligned, BytesCast}; | |
14 | use micro_timer::timed; |
|
11 | use micro_timer::timed; | |
15 |
use std::convert:: |
|
12 | use std::convert::TryFrom; | |
16 |
|
13 | |||
17 | /// Parents are stored in the dirstate as byte hashes. |
|
14 | /// Parents are stored in the dirstate as byte hashes. | |
18 | pub const PARENT_SIZE: usize = 20; |
|
15 | pub const PARENT_SIZE: usize = 20; | |
@@ -48,6 +45,16 b' pub fn parse_dirstate(contents: &[u8]) -' | |||||
48 | Ok((parents, entries, copies)) |
|
45 | Ok((parents, entries, copies)) | |
49 | } |
|
46 | } | |
50 |
|
47 | |||
|
48 | #[derive(BytesCast)] | |||
|
49 | #[repr(C)] | |||
|
50 | struct RawEntry { | |||
|
51 | state: u8, | |||
|
52 | mode: unaligned::I32Be, | |||
|
53 | size: unaligned::I32Be, | |||
|
54 | mtime: unaligned::I32Be, | |||
|
55 | length: unaligned::I32Be, | |||
|
56 | } | |||
|
57 | ||||
51 | pub fn parse_dirstate_entries<'a>( |
|
58 | pub fn parse_dirstate_entries<'a>( | |
52 | mut contents: &'a [u8], |
|
59 | mut contents: &'a [u8], | |
53 | mut each_entry: impl FnMut( |
|
60 | mut each_entry: impl FnMut( | |
@@ -63,12 +70,12 b" pub fn parse_dirstate_entries<'a>(" | |||||
63 | let (raw_entry, rest) = RawEntry::from_bytes(contents) |
|
70 | let (raw_entry, rest) = RawEntry::from_bytes(contents) | |
64 | .map_err(|_| HgError::corrupted("Overflow in dirstate."))?; |
|
71 | .map_err(|_| HgError::corrupted("Overflow in dirstate."))?; | |
65 |
|
72 | |||
66 |
let entry = DirstateEntry |
|
73 | let entry = DirstateEntry::from_v1_data( | |
67 |
|
|
74 | EntryState::try_from(raw_entry.state)?, | |
68 |
|
|
75 | raw_entry.mode.get(), | |
69 |
|
|
76 | raw_entry.size.get(), | |
70 |
|
|
77 | raw_entry.mtime.get(), | |
71 |
|
|
78 | ); | |
72 | let (paths, rest) = |
|
79 | let (paths, rest) = | |
73 | u8::slice_from_bytes(rest, raw_entry.length.get() as usize) |
|
80 | u8::slice_from_bytes(rest, raw_entry.length.get() as usize) | |
74 | .map_err(|_| HgError::corrupted("Overflow in dirstate."))?; |
|
81 | .map_err(|_| HgError::corrupted("Overflow in dirstate."))?; | |
@@ -114,12 +121,13 b' pub fn pack_entry(' | |||||
114 | packed: &mut Vec<u8>, |
|
121 | packed: &mut Vec<u8>, | |
115 | ) { |
|
122 | ) { | |
116 | let length = packed_filename_and_copy_source_size(filename, copy_source); |
|
123 | let length = packed_filename_and_copy_source_size(filename, copy_source); | |
|
124 | let (state, mode, size, mtime) = entry.v1_data(); | |||
117 |
|
125 | |||
118 | // Unwrapping because `impl std::io::Write for Vec<u8>` never errors |
|
126 | // Unwrapping because `impl std::io::Write for Vec<u8>` never errors | |
119 |
packed.write_u8( |
|
127 | packed.write_u8(state).unwrap(); | |
120 |
packed.write_i32::<BigEndian>( |
|
128 | packed.write_i32::<BigEndian>(mode).unwrap(); | |
121 |
packed.write_i32::<BigEndian>( |
|
129 | packed.write_i32::<BigEndian>(size).unwrap(); | |
122 |
packed.write_i32::<BigEndian>( |
|
130 | packed.write_i32::<BigEndian>(mtime).unwrap(); | |
123 | packed.write_i32::<BigEndian>(length as i32).unwrap(); |
|
131 | packed.write_i32::<BigEndian>(length as i32).unwrap(); | |
124 | packed.extend(filename.as_bytes()); |
|
132 | packed.extend(filename.as_bytes()); | |
125 | if let Some(source) = copy_source { |
|
133 | if let Some(source) = copy_source { | |
@@ -127,363 +135,3 b' pub fn pack_entry(' | |||||
127 | packed.extend(source.as_bytes()); |
|
135 | packed.extend(source.as_bytes()); | |
128 | } |
|
136 | } | |
129 | } |
|
137 | } | |
130 |
|
||||
131 | /// Seconds since the Unix epoch |
|
|||
132 | pub struct Timestamp(pub i64); |
|
|||
133 |
|
||||
134 | impl DirstateEntry { |
|
|||
135 | pub fn mtime_is_ambiguous(&self, now: i32) -> bool { |
|
|||
136 | self.state == EntryState::Normal && self.mtime == now |
|
|||
137 | } |
|
|||
138 |
|
||||
139 | pub fn clear_ambiguous_mtime(&mut self, now: i32) -> bool { |
|
|||
140 | let ambiguous = self.mtime_is_ambiguous(now); |
|
|||
141 | if ambiguous { |
|
|||
142 | // The file was last modified "simultaneously" with the current |
|
|||
143 | // write to dirstate (i.e. within the same second for file- |
|
|||
144 | // systems with a granularity of 1 sec). This commonly happens |
|
|||
145 | // for at least a couple of files on 'update'. |
|
|||
146 | // The user could change the file without changing its size |
|
|||
147 | // within the same second. Invalidate the file's mtime in |
|
|||
148 | // dirstate, forcing future 'status' calls to compare the |
|
|||
149 | // contents of the file if the size is the same. This prevents |
|
|||
150 | // mistakenly treating such files as clean. |
|
|||
151 | self.clear_mtime() |
|
|||
152 | } |
|
|||
153 | ambiguous |
|
|||
154 | } |
|
|||
155 |
|
||||
156 | pub fn clear_mtime(&mut self) { |
|
|||
157 | self.mtime = -1; |
|
|||
158 | } |
|
|||
159 | } |
|
|||
160 |
|
||||
161 | pub fn pack_dirstate( |
|
|||
162 | state_map: &mut StateMap, |
|
|||
163 | copy_map: &CopyMap, |
|
|||
164 | parents: DirstateParents, |
|
|||
165 | now: Timestamp, |
|
|||
166 | ) -> Result<Vec<u8>, HgError> { |
|
|||
167 | // TODO move away from i32 before 2038. |
|
|||
168 | let now: i32 = now.0.try_into().expect("time overflow"); |
|
|||
169 |
|
||||
170 | let expected_size: usize = state_map |
|
|||
171 | .iter() |
|
|||
172 | .map(|(filename, _)| { |
|
|||
173 | packed_entry_size(filename, copy_map.get(filename).map(|p| &**p)) |
|
|||
174 | }) |
|
|||
175 | .sum(); |
|
|||
176 | let expected_size = expected_size + PARENT_SIZE * 2; |
|
|||
177 |
|
||||
178 | let mut packed = Vec::with_capacity(expected_size); |
|
|||
179 |
|
||||
180 | packed.extend(parents.p1.as_bytes()); |
|
|||
181 | packed.extend(parents.p2.as_bytes()); |
|
|||
182 |
|
||||
183 | for (filename, entry) in state_map.iter_mut() { |
|
|||
184 | entry.clear_ambiguous_mtime(now); |
|
|||
185 | pack_entry( |
|
|||
186 | filename, |
|
|||
187 | entry, |
|
|||
188 | copy_map.get(filename).map(|p| &**p), |
|
|||
189 | &mut packed, |
|
|||
190 | ) |
|
|||
191 | } |
|
|||
192 |
|
||||
193 | if packed.len() != expected_size { |
|
|||
194 | return Err(HgError::CorruptedRepository(format!( |
|
|||
195 | "bad dirstate size: {} != {}", |
|
|||
196 | expected_size, |
|
|||
197 | packed.len() |
|
|||
198 | ))); |
|
|||
199 | } |
|
|||
200 |
|
||||
201 | Ok(packed) |
|
|||
202 | } |
|
|||
203 |
|
||||
204 | #[cfg(test)] |
|
|||
205 | mod tests { |
|
|||
206 | use super::*; |
|
|||
207 | use crate::{utils::hg_path::HgPathBuf, FastHashMap}; |
|
|||
208 | use pretty_assertions::assert_eq; |
|
|||
209 |
|
||||
210 | #[test] |
|
|||
211 | fn test_pack_dirstate_empty() { |
|
|||
212 | let mut state_map = StateMap::default(); |
|
|||
213 | let copymap = FastHashMap::default(); |
|
|||
214 | let parents = DirstateParents { |
|
|||
215 | p1: b"12345678910111213141".into(), |
|
|||
216 | p2: b"00000000000000000000".into(), |
|
|||
217 | }; |
|
|||
218 | let now = Timestamp(15000000); |
|
|||
219 | let expected = b"1234567891011121314100000000000000000000".to_vec(); |
|
|||
220 |
|
||||
221 | assert_eq!( |
|
|||
222 | expected, |
|
|||
223 | pack_dirstate(&mut state_map, ©map, parents, now).unwrap() |
|
|||
224 | ); |
|
|||
225 |
|
||||
226 | assert!(state_map.is_empty()) |
|
|||
227 | } |
|
|||
228 | #[test] |
|
|||
229 | fn test_pack_dirstate_one_entry() { |
|
|||
230 | let expected_state_map: StateMap = [( |
|
|||
231 | HgPathBuf::from_bytes(b"f1"), |
|
|||
232 | DirstateEntry { |
|
|||
233 | state: EntryState::Normal, |
|
|||
234 | mode: 0o644, |
|
|||
235 | size: 0, |
|
|||
236 | mtime: 791231220, |
|
|||
237 | }, |
|
|||
238 | )] |
|
|||
239 | .iter() |
|
|||
240 | .cloned() |
|
|||
241 | .collect(); |
|
|||
242 | let mut state_map = expected_state_map.clone(); |
|
|||
243 |
|
||||
244 | let copymap = FastHashMap::default(); |
|
|||
245 | let parents = DirstateParents { |
|
|||
246 | p1: b"12345678910111213141".into(), |
|
|||
247 | p2: b"00000000000000000000".into(), |
|
|||
248 | }; |
|
|||
249 | let now = Timestamp(15000000); |
|
|||
250 | let expected = [ |
|
|||
251 | 49, 50, 51, 52, 53, 54, 55, 56, 57, 49, 48, 49, 49, 49, 50, 49, |
|
|||
252 | 51, 49, 52, 49, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, |
|
|||
253 | 48, 48, 48, 48, 48, 48, 48, 48, 110, 0, 0, 1, 164, 0, 0, 0, 0, 47, |
|
|||
254 | 41, 58, 244, 0, 0, 0, 2, 102, 49, |
|
|||
255 | ] |
|
|||
256 | .to_vec(); |
|
|||
257 |
|
||||
258 | assert_eq!( |
|
|||
259 | expected, |
|
|||
260 | pack_dirstate(&mut state_map, ©map, parents, now).unwrap() |
|
|||
261 | ); |
|
|||
262 |
|
||||
263 | assert_eq!(expected_state_map, state_map); |
|
|||
264 | } |
|
|||
265 | #[test] |
|
|||
266 | fn test_pack_dirstate_one_entry_with_copy() { |
|
|||
267 | let expected_state_map: StateMap = [( |
|
|||
268 | HgPathBuf::from_bytes(b"f1"), |
|
|||
269 | DirstateEntry { |
|
|||
270 | state: EntryState::Normal, |
|
|||
271 | mode: 0o644, |
|
|||
272 | size: 0, |
|
|||
273 | mtime: 791231220, |
|
|||
274 | }, |
|
|||
275 | )] |
|
|||
276 | .iter() |
|
|||
277 | .cloned() |
|
|||
278 | .collect(); |
|
|||
279 | let mut state_map = expected_state_map.clone(); |
|
|||
280 | let mut copymap = FastHashMap::default(); |
|
|||
281 | copymap.insert( |
|
|||
282 | HgPathBuf::from_bytes(b"f1"), |
|
|||
283 | HgPathBuf::from_bytes(b"copyname"), |
|
|||
284 | ); |
|
|||
285 | let parents = DirstateParents { |
|
|||
286 | p1: b"12345678910111213141".into(), |
|
|||
287 | p2: b"00000000000000000000".into(), |
|
|||
288 | }; |
|
|||
289 | let now = Timestamp(15000000); |
|
|||
290 | let expected = [ |
|
|||
291 | 49, 50, 51, 52, 53, 54, 55, 56, 57, 49, 48, 49, 49, 49, 50, 49, |
|
|||
292 | 51, 49, 52, 49, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, |
|
|||
293 | 48, 48, 48, 48, 48, 48, 48, 48, 110, 0, 0, 1, 164, 0, 0, 0, 0, 47, |
|
|||
294 | 41, 58, 244, 0, 0, 0, 11, 102, 49, 0, 99, 111, 112, 121, 110, 97, |
|
|||
295 | 109, 101, |
|
|||
296 | ] |
|
|||
297 | .to_vec(); |
|
|||
298 |
|
||||
299 | assert_eq!( |
|
|||
300 | expected, |
|
|||
301 | pack_dirstate(&mut state_map, ©map, parents, now).unwrap() |
|
|||
302 | ); |
|
|||
303 | assert_eq!(expected_state_map, state_map); |
|
|||
304 | } |
|
|||
305 |
|
||||
306 | #[test] |
|
|||
307 | fn test_parse_pack_one_entry_with_copy() { |
|
|||
308 | let mut state_map: StateMap = [( |
|
|||
309 | HgPathBuf::from_bytes(b"f1"), |
|
|||
310 | DirstateEntry { |
|
|||
311 | state: EntryState::Normal, |
|
|||
312 | mode: 0o644, |
|
|||
313 | size: 0, |
|
|||
314 | mtime: 791231220, |
|
|||
315 | }, |
|
|||
316 | )] |
|
|||
317 | .iter() |
|
|||
318 | .cloned() |
|
|||
319 | .collect(); |
|
|||
320 | let mut copymap = FastHashMap::default(); |
|
|||
321 | copymap.insert( |
|
|||
322 | HgPathBuf::from_bytes(b"f1"), |
|
|||
323 | HgPathBuf::from_bytes(b"copyname"), |
|
|||
324 | ); |
|
|||
325 | let parents = DirstateParents { |
|
|||
326 | p1: b"12345678910111213141".into(), |
|
|||
327 | p2: b"00000000000000000000".into(), |
|
|||
328 | }; |
|
|||
329 | let now = Timestamp(15000000); |
|
|||
330 | let result = |
|
|||
331 | pack_dirstate(&mut state_map, ©map, parents.clone(), now) |
|
|||
332 | .unwrap(); |
|
|||
333 |
|
||||
334 | let (new_parents, entries, copies) = |
|
|||
335 | parse_dirstate(result.as_slice()).unwrap(); |
|
|||
336 | let new_state_map: StateMap = entries |
|
|||
337 | .into_iter() |
|
|||
338 | .map(|(path, entry)| (path.to_owned(), entry)) |
|
|||
339 | .collect(); |
|
|||
340 | let new_copy_map: CopyMap = copies |
|
|||
341 | .into_iter() |
|
|||
342 | .map(|(path, copy)| (path.to_owned(), copy.to_owned())) |
|
|||
343 | .collect(); |
|
|||
344 |
|
||||
345 | assert_eq!( |
|
|||
346 | (&parents, state_map, copymap), |
|
|||
347 | (new_parents, new_state_map, new_copy_map) |
|
|||
348 | ) |
|
|||
349 | } |
|
|||
350 |
|
||||
351 | #[test] |
|
|||
352 | fn test_parse_pack_multiple_entries_with_copy() { |
|
|||
353 | let mut state_map: StateMap = [ |
|
|||
354 | ( |
|
|||
355 | HgPathBuf::from_bytes(b"f1"), |
|
|||
356 | DirstateEntry { |
|
|||
357 | state: EntryState::Normal, |
|
|||
358 | mode: 0o644, |
|
|||
359 | size: 0, |
|
|||
360 | mtime: 791231220, |
|
|||
361 | }, |
|
|||
362 | ), |
|
|||
363 | ( |
|
|||
364 | HgPathBuf::from_bytes(b"f2"), |
|
|||
365 | DirstateEntry { |
|
|||
366 | state: EntryState::Merged, |
|
|||
367 | mode: 0o777, |
|
|||
368 | size: 1000, |
|
|||
369 | mtime: 791231220, |
|
|||
370 | }, |
|
|||
371 | ), |
|
|||
372 | ( |
|
|||
373 | HgPathBuf::from_bytes(b"f3"), |
|
|||
374 | DirstateEntry { |
|
|||
375 | state: EntryState::Removed, |
|
|||
376 | mode: 0o644, |
|
|||
377 | size: 234553, |
|
|||
378 | mtime: 791231220, |
|
|||
379 | }, |
|
|||
380 | ), |
|
|||
381 | ( |
|
|||
382 | HgPathBuf::from_bytes(b"f4\xF6"), |
|
|||
383 | DirstateEntry { |
|
|||
384 | state: EntryState::Added, |
|
|||
385 | mode: 0o644, |
|
|||
386 | size: -1, |
|
|||
387 | mtime: -1, |
|
|||
388 | }, |
|
|||
389 | ), |
|
|||
390 | ] |
|
|||
391 | .iter() |
|
|||
392 | .cloned() |
|
|||
393 | .collect(); |
|
|||
394 | let mut copymap = FastHashMap::default(); |
|
|||
395 | copymap.insert( |
|
|||
396 | HgPathBuf::from_bytes(b"f1"), |
|
|||
397 | HgPathBuf::from_bytes(b"copyname"), |
|
|||
398 | ); |
|
|||
399 | copymap.insert( |
|
|||
400 | HgPathBuf::from_bytes(b"f4\xF6"), |
|
|||
401 | HgPathBuf::from_bytes(b"copyname2"), |
|
|||
402 | ); |
|
|||
403 | let parents = DirstateParents { |
|
|||
404 | p1: b"12345678910111213141".into(), |
|
|||
405 | p2: b"00000000000000000000".into(), |
|
|||
406 | }; |
|
|||
407 | let now = Timestamp(15000000); |
|
|||
408 | let result = |
|
|||
409 | pack_dirstate(&mut state_map, ©map, parents.clone(), now) |
|
|||
410 | .unwrap(); |
|
|||
411 |
|
||||
412 | let (new_parents, entries, copies) = |
|
|||
413 | parse_dirstate(result.as_slice()).unwrap(); |
|
|||
414 | let new_state_map: StateMap = entries |
|
|||
415 | .into_iter() |
|
|||
416 | .map(|(path, entry)| (path.to_owned(), entry)) |
|
|||
417 | .collect(); |
|
|||
418 | let new_copy_map: CopyMap = copies |
|
|||
419 | .into_iter() |
|
|||
420 | .map(|(path, copy)| (path.to_owned(), copy.to_owned())) |
|
|||
421 | .collect(); |
|
|||
422 |
|
||||
423 | assert_eq!( |
|
|||
424 | (&parents, state_map, copymap), |
|
|||
425 | (new_parents, new_state_map, new_copy_map) |
|
|||
426 | ) |
|
|||
427 | } |
|
|||
428 |
|
||||
429 | #[test] |
|
|||
430 | /// https://www.mercurial-scm.org/repo/hg/rev/af3f26b6bba4 |
|
|||
431 | fn test_parse_pack_one_entry_with_copy_and_time_conflict() { |
|
|||
432 | let mut state_map: StateMap = [( |
|
|||
433 | HgPathBuf::from_bytes(b"f1"), |
|
|||
434 | DirstateEntry { |
|
|||
435 | state: EntryState::Normal, |
|
|||
436 | mode: 0o644, |
|
|||
437 | size: 0, |
|
|||
438 | mtime: 15000000, |
|
|||
439 | }, |
|
|||
440 | )] |
|
|||
441 | .iter() |
|
|||
442 | .cloned() |
|
|||
443 | .collect(); |
|
|||
444 | let mut copymap = FastHashMap::default(); |
|
|||
445 | copymap.insert( |
|
|||
446 | HgPathBuf::from_bytes(b"f1"), |
|
|||
447 | HgPathBuf::from_bytes(b"copyname"), |
|
|||
448 | ); |
|
|||
449 | let parents = DirstateParents { |
|
|||
450 | p1: b"12345678910111213141".into(), |
|
|||
451 | p2: b"00000000000000000000".into(), |
|
|||
452 | }; |
|
|||
453 | let now = Timestamp(15000000); |
|
|||
454 | let result = |
|
|||
455 | pack_dirstate(&mut state_map, ©map, parents.clone(), now) |
|
|||
456 | .unwrap(); |
|
|||
457 |
|
||||
458 | let (new_parents, entries, copies) = |
|
|||
459 | parse_dirstate(result.as_slice()).unwrap(); |
|
|||
460 | let new_state_map: StateMap = entries |
|
|||
461 | .into_iter() |
|
|||
462 | .map(|(path, entry)| (path.to_owned(), entry)) |
|
|||
463 | .collect(); |
|
|||
464 | let new_copy_map: CopyMap = copies |
|
|||
465 | .into_iter() |
|
|||
466 | .map(|(path, copy)| (path.to_owned(), copy.to_owned())) |
|
|||
467 | .collect(); |
|
|||
468 |
|
||||
469 | assert_eq!( |
|
|||
470 | ( |
|
|||
471 | &parents, |
|
|||
472 | [( |
|
|||
473 | HgPathBuf::from_bytes(b"f1"), |
|
|||
474 | DirstateEntry { |
|
|||
475 | state: EntryState::Normal, |
|
|||
476 | mode: 0o644, |
|
|||
477 | size: 0, |
|
|||
478 | mtime: -1 |
|
|||
479 | } |
|
|||
480 | )] |
|
|||
481 | .iter() |
|
|||
482 | .cloned() |
|
|||
483 | .collect::<StateMap>(), |
|
|||
484 | copymap, |
|
|||
485 | ), |
|
|||
486 | (new_parents, new_state_map, new_copy_map) |
|
|||
487 | ) |
|
|||
488 | } |
|
|||
489 | } |
|
This diff has been collapsed as it changes many lines, (812 lines changed) Show them Hide them | |||||
@@ -10,33 +10,14 b'' | |||||
10 | //! and will only be triggered in narrow cases. |
|
10 | //! and will only be triggered in narrow cases. | |
11 |
|
11 | |||
12 | use crate::dirstate_tree::on_disk::DirstateV2ParseError; |
|
12 | use crate::dirstate_tree::on_disk::DirstateV2ParseError; | |
13 | use crate::utils::path_auditor::PathAuditor; |
|
13 | ||
14 | use crate::{ |
|
14 | use crate::{ | |
15 | dirstate::SIZE_FROM_OTHER_PARENT, |
|
15 | dirstate::TruncatedTimestamp, | |
16 | filepatterns::PatternFileWarning, |
|
16 | utils::hg_path::{HgPath, HgPathError}, | |
17 | matchers::{get_ignore_function, Matcher, VisitChildrenSet}, |
|
|||
18 | utils::{ |
|
|||
19 | files::{find_dirs, HgMetadata}, |
|
|||
20 | hg_path::{ |
|
|||
21 | hg_path_to_path_buf, os_string_to_hg_path_buf, HgPath, HgPathBuf, |
|
|||
22 | HgPathError, |
|
|||
23 | }, |
|
|||
24 | }, |
|
|||
25 | CopyMap, DirstateEntry, DirstateMap, EntryState, FastHashMap, |
|
|||
26 | PatternError, |
|
17 | PatternError, | |
27 | }; |
|
18 | }; | |
28 | use lazy_static::lazy_static; |
|
19 | ||
29 | use micro_timer::timed; |
|
20 | use std::{borrow::Cow, fmt}; | |
30 | use rayon::prelude::*; |
|
|||
31 | use std::{ |
|
|||
32 | borrow::Cow, |
|
|||
33 | collections::HashSet, |
|
|||
34 | fmt, |
|
|||
35 | fs::{read_dir, DirEntry}, |
|
|||
36 | io::ErrorKind, |
|
|||
37 | ops::Deref, |
|
|||
38 | path::{Path, PathBuf}, |
|
|||
39 | }; |
|
|||
40 |
|
21 | |||
41 | /// Wrong type of file from a `BadMatch` |
|
22 | /// Wrong type of file from a `BadMatch` | |
42 | /// Note: a lot of those don't exist on all platforms. |
|
23 | /// Note: a lot of those don't exist on all platforms. | |
@@ -70,32 +51,6 b' pub enum BadMatch {' | |||||
70 | BadType(BadType), |
|
51 | BadType(BadType), | |
71 | } |
|
52 | } | |
72 |
|
53 | |||
73 | /// Enum used to dispatch new status entries into the right collections. |
|
|||
74 | /// Is similar to `crate::EntryState`, but represents the transient state of |
|
|||
75 | /// entries during the lifetime of a command. |
|
|||
76 | #[derive(Debug, Copy, Clone)] |
|
|||
77 | pub enum Dispatch { |
|
|||
78 | Unsure, |
|
|||
79 | Modified, |
|
|||
80 | Added, |
|
|||
81 | Removed, |
|
|||
82 | Deleted, |
|
|||
83 | Clean, |
|
|||
84 | Unknown, |
|
|||
85 | Ignored, |
|
|||
86 | /// Empty dispatch, the file is not worth listing |
|
|||
87 | None, |
|
|||
88 | /// Was explicitly matched but cannot be found/accessed |
|
|||
89 | Bad(BadMatch), |
|
|||
90 | Directory { |
|
|||
91 | /// True if the directory used to be a file in the dmap so we can say |
|
|||
92 | /// that it's been removed. |
|
|||
93 | was_file: bool, |
|
|||
94 | }, |
|
|||
95 | } |
|
|||
96 |
|
||||
97 | type IoResult<T> = std::io::Result<T>; |
|
|||
98 |
|
||||
99 | /// `Box<dyn Trait>` is syntactic sugar for `Box<dyn Trait + 'static>`, so add |
|
54 | /// `Box<dyn Trait>` is syntactic sugar for `Box<dyn Trait + 'static>`, so add | |
100 | /// an explicit lifetime here to not fight `'static` bounds "out of nowhere". |
|
55 | /// an explicit lifetime here to not fight `'static` bounds "out of nowhere". | |
101 | pub type IgnoreFnType<'a> = |
|
56 | pub type IgnoreFnType<'a> = | |
@@ -105,147 +60,12 b" pub type IgnoreFnType<'a> =" | |||||
105 | /// the dirstate/explicit) paths, this comes up a lot. |
|
60 | /// the dirstate/explicit) paths, this comes up a lot. | |
106 | pub type HgPathCow<'a> = Cow<'a, HgPath>; |
|
61 | pub type HgPathCow<'a> = Cow<'a, HgPath>; | |
107 |
|
62 | |||
108 | /// A path with its computed ``Dispatch`` information |
|
|||
109 | type DispatchedPath<'a> = (HgPathCow<'a>, Dispatch); |
|
|||
110 |
|
||||
111 | /// The conversion from `HgPath` to a real fs path failed. |
|
|||
112 | /// `22` is the error code for "Invalid argument" |
|
|||
113 | const INVALID_PATH_DISPATCH: Dispatch = Dispatch::Bad(BadMatch::OsError(22)); |
|
|||
114 |
|
||||
115 | /// Dates and times that are outside the 31-bit signed range are compared |
|
|||
116 | /// modulo 2^31. This should prevent hg from behaving badly with very large |
|
|||
117 | /// files or corrupt dates while still having a high probability of detecting |
|
|||
118 | /// changes. (issue2608) |
|
|||
119 | /// TODO I haven't found a way of having `b` be `Into<i32>`, since `From<u64>` |
|
|||
120 | /// is not defined for `i32`, and there is no `As` trait. This forces the |
|
|||
121 | /// caller to cast `b` as `i32`. |
|
|||
122 | fn mod_compare(a: i32, b: i32) -> bool { |
|
|||
123 | a & i32::max_value() != b & i32::max_value() |
|
|||
124 | } |
|
|||
125 |
|
||||
126 | /// Return a sorted list containing information about the entries |
|
|||
127 | /// in the directory. |
|
|||
128 | /// |
|
|||
129 | /// * `skip_dot_hg` - Return an empty vec if `path` contains a `.hg` directory |
|
|||
130 | fn list_directory( |
|
|||
131 | path: impl AsRef<Path>, |
|
|||
132 | skip_dot_hg: bool, |
|
|||
133 | ) -> std::io::Result<Vec<(HgPathBuf, DirEntry)>> { |
|
|||
134 | let mut results = vec![]; |
|
|||
135 | let entries = read_dir(path.as_ref())?; |
|
|||
136 |
|
||||
137 | for entry in entries { |
|
|||
138 | let entry = entry?; |
|
|||
139 | let filename = os_string_to_hg_path_buf(entry.file_name())?; |
|
|||
140 | let file_type = entry.file_type()?; |
|
|||
141 | if skip_dot_hg && filename.as_bytes() == b".hg" && file_type.is_dir() { |
|
|||
142 | return Ok(vec![]); |
|
|||
143 | } else { |
|
|||
144 | results.push((filename, entry)) |
|
|||
145 | } |
|
|||
146 | } |
|
|||
147 |
|
||||
148 | results.sort_unstable_by_key(|e| e.0.clone()); |
|
|||
149 | Ok(results) |
|
|||
150 | } |
|
|||
151 |
|
||||
152 | /// The file corresponding to the dirstate entry was found on the filesystem. |
|
|||
153 | fn dispatch_found( |
|
|||
154 | filename: impl AsRef<HgPath>, |
|
|||
155 | entry: DirstateEntry, |
|
|||
156 | metadata: HgMetadata, |
|
|||
157 | copy_map: &CopyMap, |
|
|||
158 | options: StatusOptions, |
|
|||
159 | ) -> Dispatch { |
|
|||
160 | let DirstateEntry { |
|
|||
161 | state, |
|
|||
162 | mode, |
|
|||
163 | mtime, |
|
|||
164 | size, |
|
|||
165 | } = entry; |
|
|||
166 |
|
||||
167 | let HgMetadata { |
|
|||
168 | st_mode, |
|
|||
169 | st_size, |
|
|||
170 | st_mtime, |
|
|||
171 | .. |
|
|||
172 | } = metadata; |
|
|||
173 |
|
||||
174 | match state { |
|
|||
175 | EntryState::Normal => { |
|
|||
176 | let size_changed = mod_compare(size, st_size as i32); |
|
|||
177 | let mode_changed = |
|
|||
178 | (mode ^ st_mode as i32) & 0o100 != 0o000 && options.check_exec; |
|
|||
179 | let metadata_changed = size >= 0 && (size_changed || mode_changed); |
|
|||
180 | let other_parent = size == SIZE_FROM_OTHER_PARENT; |
|
|||
181 |
|
||||
182 | if metadata_changed |
|
|||
183 | || other_parent |
|
|||
184 | || copy_map.contains_key(filename.as_ref()) |
|
|||
185 | { |
|
|||
186 | if metadata.is_symlink() && size_changed { |
|
|||
187 | // issue6456: Size returned may be longer due to encryption |
|
|||
188 | // on EXT-4 fscrypt. TODO maybe only do it on EXT4? |
|
|||
189 | Dispatch::Unsure |
|
|||
190 | } else { |
|
|||
191 | Dispatch::Modified |
|
|||
192 | } |
|
|||
193 | } else if mod_compare(mtime, st_mtime as i32) |
|
|||
194 | || st_mtime == options.last_normal_time |
|
|||
195 | { |
|
|||
196 | // the file may have just been marked as normal and |
|
|||
197 | // it may have changed in the same second without |
|
|||
198 | // changing its size. This can happen if we quickly |
|
|||
199 | // do multiple commits. Force lookup, so we don't |
|
|||
200 | // miss such a racy file change. |
|
|||
201 | Dispatch::Unsure |
|
|||
202 | } else if options.list_clean { |
|
|||
203 | Dispatch::Clean |
|
|||
204 | } else { |
|
|||
205 | Dispatch::None |
|
|||
206 | } |
|
|||
207 | } |
|
|||
208 | EntryState::Merged => Dispatch::Modified, |
|
|||
209 | EntryState::Added => Dispatch::Added, |
|
|||
210 | EntryState::Removed => Dispatch::Removed, |
|
|||
211 | EntryState::Unknown => Dispatch::Unknown, |
|
|||
212 | } |
|
|||
213 | } |
|
|||
214 |
|
||||
215 | /// The file corresponding to this Dirstate entry is missing. |
|
|||
216 | fn dispatch_missing(state: EntryState) -> Dispatch { |
|
|||
217 | match state { |
|
|||
218 | // File was removed from the filesystem during commands |
|
|||
219 | EntryState::Normal | EntryState::Merged | EntryState::Added => { |
|
|||
220 | Dispatch::Deleted |
|
|||
221 | } |
|
|||
222 | // File was removed, everything is normal |
|
|||
223 | EntryState::Removed => Dispatch::Removed, |
|
|||
224 | // File is unknown to Mercurial, everything is normal |
|
|||
225 | EntryState::Unknown => Dispatch::Unknown, |
|
|||
226 | } |
|
|||
227 | } |
|
|||
228 |
|
||||
229 | fn dispatch_os_error(e: &std::io::Error) -> Dispatch { |
|
|||
230 | Dispatch::Bad(BadMatch::OsError( |
|
|||
231 | e.raw_os_error().expect("expected real OS error"), |
|
|||
232 | )) |
|
|||
233 | } |
|
|||
234 |
|
||||
235 | lazy_static! { |
|
|||
236 | static ref DEFAULT_WORK: HashSet<&'static HgPath> = { |
|
|||
237 | let mut h = HashSet::new(); |
|
|||
238 | h.insert(HgPath::new(b"")); |
|
|||
239 | h |
|
|||
240 | }; |
|
|||
241 | } |
|
|||
242 |
|
||||
243 | #[derive(Debug, Copy, Clone)] |
|
63 | #[derive(Debug, Copy, Clone)] | |
244 | pub struct StatusOptions { |
|
64 | pub struct StatusOptions { | |
245 | /// Remember the most recent modification timeslot for status, to make |
|
65 | /// Remember the most recent modification timeslot for status, to make | |
246 | /// sure we won't miss future size-preserving file content modifications |
|
66 | /// sure we won't miss future size-preserving file content modifications | |
247 | /// that happen within the same timeslot. |
|
67 | /// that happen within the same timeslot. | |
248 |
pub last_normal_time: |
|
68 | pub last_normal_time: TruncatedTimestamp, | |
249 | /// Whether we are on a filesystem with UNIX-like exec flags |
|
69 | /// Whether we are on a filesystem with UNIX-like exec flags | |
250 | pub check_exec: bool, |
|
70 | pub check_exec: bool, | |
251 | pub list_clean: bool, |
|
71 | pub list_clean: bool, | |
@@ -325,623 +145,3 b' impl fmt::Display for StatusError {' | |||||
325 | } |
|
145 | } | |
326 | } |
|
146 | } | |
327 | } |
|
147 | } | |
328 |
|
||||
329 | /// Gives information about which files are changed in the working directory |
|
|||
330 | /// and how, compared to the revision we're based on |
|
|||
331 | pub struct Status<'a, M: ?Sized + Matcher + Sync> { |
|
|||
332 | dmap: &'a DirstateMap, |
|
|||
333 | pub(crate) matcher: &'a M, |
|
|||
334 | root_dir: PathBuf, |
|
|||
335 | pub(crate) options: StatusOptions, |
|
|||
336 | ignore_fn: IgnoreFnType<'a>, |
|
|||
337 | } |
|
|||
338 |
|
||||
339 | impl<'a, M> Status<'a, M> |
|
|||
340 | where |
|
|||
341 | M: ?Sized + Matcher + Sync, |
|
|||
342 | { |
|
|||
343 | pub fn new( |
|
|||
344 | dmap: &'a DirstateMap, |
|
|||
345 | matcher: &'a M, |
|
|||
346 | root_dir: PathBuf, |
|
|||
347 | ignore_files: Vec<PathBuf>, |
|
|||
348 | options: StatusOptions, |
|
|||
349 | ) -> StatusResult<(Self, Vec<PatternFileWarning>)> { |
|
|||
350 | // Needs to outlive `dir_ignore_fn` since it's captured. |
|
|||
351 |
|
||||
352 | let (ignore_fn, warnings): (IgnoreFnType, _) = |
|
|||
353 | if options.list_ignored || options.list_unknown { |
|
|||
354 | get_ignore_function(ignore_files, &root_dir, &mut |_| {})? |
|
|||
355 | } else { |
|
|||
356 | (Box::new(|&_| true), vec![]) |
|
|||
357 | }; |
|
|||
358 |
|
||||
359 | Ok(( |
|
|||
360 | Self { |
|
|||
361 | dmap, |
|
|||
362 | matcher, |
|
|||
363 | root_dir, |
|
|||
364 | options, |
|
|||
365 | ignore_fn, |
|
|||
366 | }, |
|
|||
367 | warnings, |
|
|||
368 | )) |
|
|||
369 | } |
|
|||
370 |
|
||||
371 | /// Is the path ignored? |
|
|||
372 | pub fn is_ignored(&self, path: impl AsRef<HgPath>) -> bool { |
|
|||
373 | (self.ignore_fn)(path.as_ref()) |
|
|||
374 | } |
|
|||
375 |
|
||||
376 | /// Is the path or one of its ancestors ignored? |
|
|||
377 | pub fn dir_ignore(&self, dir: impl AsRef<HgPath>) -> bool { |
|
|||
378 | // Only involve ignore mechanism if we're listing unknowns or ignored. |
|
|||
379 | if self.options.list_ignored || self.options.list_unknown { |
|
|||
380 | if self.is_ignored(&dir) { |
|
|||
381 | true |
|
|||
382 | } else { |
|
|||
383 | for p in find_dirs(dir.as_ref()) { |
|
|||
384 | if self.is_ignored(p) { |
|
|||
385 | return true; |
|
|||
386 | } |
|
|||
387 | } |
|
|||
388 | false |
|
|||
389 | } |
|
|||
390 | } else { |
|
|||
391 | true |
|
|||
392 | } |
|
|||
393 | } |
|
|||
394 |
|
||||
395 | /// Get stat data about the files explicitly specified by the matcher. |
|
|||
396 | /// Returns a tuple of the directories that need to be traversed and the |
|
|||
397 | /// files with their corresponding `Dispatch`. |
|
|||
398 | /// TODO subrepos |
|
|||
399 | #[timed] |
|
|||
400 | pub fn walk_explicit( |
|
|||
401 | &self, |
|
|||
402 | traversed_sender: crossbeam_channel::Sender<HgPathBuf>, |
|
|||
403 | ) -> (Vec<DispatchedPath<'a>>, Vec<DispatchedPath<'a>>) { |
|
|||
404 | self.matcher |
|
|||
405 | .file_set() |
|
|||
406 | .unwrap_or(&DEFAULT_WORK) |
|
|||
407 | .par_iter() |
|
|||
408 | .flat_map(|&filename| -> Option<_> { |
|
|||
409 | // TODO normalization |
|
|||
410 | let normalized = filename; |
|
|||
411 |
|
||||
412 | let buf = match hg_path_to_path_buf(normalized) { |
|
|||
413 | Ok(x) => x, |
|
|||
414 | Err(_) => { |
|
|||
415 | return Some(( |
|
|||
416 | Cow::Borrowed(normalized), |
|
|||
417 | INVALID_PATH_DISPATCH, |
|
|||
418 | )) |
|
|||
419 | } |
|
|||
420 | }; |
|
|||
421 | let target = self.root_dir.join(buf); |
|
|||
422 | let st = target.symlink_metadata(); |
|
|||
423 | let in_dmap = self.dmap.get(normalized); |
|
|||
424 | match st { |
|
|||
425 | Ok(meta) => { |
|
|||
426 | let file_type = meta.file_type(); |
|
|||
427 | return if file_type.is_file() || file_type.is_symlink() |
|
|||
428 | { |
|
|||
429 | if let Some(entry) = in_dmap { |
|
|||
430 | return Some(( |
|
|||
431 | Cow::Borrowed(normalized), |
|
|||
432 | dispatch_found( |
|
|||
433 | &normalized, |
|
|||
434 | *entry, |
|
|||
435 | HgMetadata::from_metadata(meta), |
|
|||
436 | &self.dmap.copy_map, |
|
|||
437 | self.options, |
|
|||
438 | ), |
|
|||
439 | )); |
|
|||
440 | } |
|
|||
441 | Some(( |
|
|||
442 | Cow::Borrowed(normalized), |
|
|||
443 | Dispatch::Unknown, |
|
|||
444 | )) |
|
|||
445 | } else if file_type.is_dir() { |
|
|||
446 | if self.options.collect_traversed_dirs { |
|
|||
447 | traversed_sender |
|
|||
448 | .send(normalized.to_owned()) |
|
|||
449 | .expect("receiver should outlive sender"); |
|
|||
450 | } |
|
|||
451 | Some(( |
|
|||
452 | Cow::Borrowed(normalized), |
|
|||
453 | Dispatch::Directory { |
|
|||
454 | was_file: in_dmap.is_some(), |
|
|||
455 | }, |
|
|||
456 | )) |
|
|||
457 | } else { |
|
|||
458 | Some(( |
|
|||
459 | Cow::Borrowed(normalized), |
|
|||
460 | Dispatch::Bad(BadMatch::BadType( |
|
|||
461 | // TODO do more than unknown |
|
|||
462 | // Support for all `BadType` variant |
|
|||
463 | // varies greatly between platforms. |
|
|||
464 | // So far, no tests check the type and |
|
|||
465 | // this should be good enough for most |
|
|||
466 | // users. |
|
|||
467 | BadType::Unknown, |
|
|||
468 | )), |
|
|||
469 | )) |
|
|||
470 | }; |
|
|||
471 | } |
|
|||
472 | Err(_) => { |
|
|||
473 | if let Some(entry) = in_dmap { |
|
|||
474 | return Some(( |
|
|||
475 | Cow::Borrowed(normalized), |
|
|||
476 | dispatch_missing(entry.state), |
|
|||
477 | )); |
|
|||
478 | } |
|
|||
479 | } |
|
|||
480 | }; |
|
|||
481 | None |
|
|||
482 | }) |
|
|||
483 | .partition(|(_, dispatch)| match dispatch { |
|
|||
484 | Dispatch::Directory { .. } => true, |
|
|||
485 | _ => false, |
|
|||
486 | }) |
|
|||
487 | } |
|
|||
488 |
|
||||
489 | /// Walk the working directory recursively to look for changes compared to |
|
|||
490 | /// the current `DirstateMap`. |
|
|||
491 | /// |
|
|||
492 | /// This takes a mutable reference to the results to account for the |
|
|||
493 | /// `extend` in timings |
|
|||
494 | #[timed] |
|
|||
495 | pub fn traverse( |
|
|||
496 | &self, |
|
|||
497 | path: impl AsRef<HgPath>, |
|
|||
498 | old_results: &FastHashMap<HgPathCow<'a>, Dispatch>, |
|
|||
499 | results: &mut Vec<DispatchedPath<'a>>, |
|
|||
500 | traversed_sender: crossbeam_channel::Sender<HgPathBuf>, |
|
|||
501 | ) { |
|
|||
502 | // The traversal is done in parallel, so use a channel to gather |
|
|||
503 | // entries. `crossbeam_channel::Sender` is `Sync`, while `mpsc::Sender` |
|
|||
504 | // is not. |
|
|||
505 | let (files_transmitter, files_receiver) = |
|
|||
506 | crossbeam_channel::unbounded(); |
|
|||
507 |
|
||||
508 | self.traverse_dir( |
|
|||
509 | &files_transmitter, |
|
|||
510 | path, |
|
|||
511 | &old_results, |
|
|||
512 | traversed_sender, |
|
|||
513 | ); |
|
|||
514 |
|
||||
515 | // Disconnect the channel so the receiver stops waiting |
|
|||
516 | drop(files_transmitter); |
|
|||
517 |
|
||||
518 | let new_results = files_receiver |
|
|||
519 | .into_iter() |
|
|||
520 | .par_bridge() |
|
|||
521 | .map(|(f, d)| (Cow::Owned(f), d)); |
|
|||
522 |
|
||||
523 | results.par_extend(new_results); |
|
|||
524 | } |
|
|||
525 |
|
||||
526 | /// Dispatch a single entry (file, folder, symlink...) found during |
|
|||
527 | /// `traverse`. If the entry is a folder that needs to be traversed, it |
|
|||
528 | /// will be handled in a separate thread. |
|
|||
529 | fn handle_traversed_entry<'b>( |
|
|||
530 | &'a self, |
|
|||
531 | scope: &rayon::Scope<'b>, |
|
|||
532 | files_sender: &'b crossbeam_channel::Sender<(HgPathBuf, Dispatch)>, |
|
|||
533 | old_results: &'a FastHashMap<Cow<HgPath>, Dispatch>, |
|
|||
534 | filename: HgPathBuf, |
|
|||
535 | dir_entry: DirEntry, |
|
|||
536 | traversed_sender: crossbeam_channel::Sender<HgPathBuf>, |
|
|||
537 | ) -> IoResult<()> |
|
|||
538 | where |
|
|||
539 | 'a: 'b, |
|
|||
540 | { |
|
|||
541 | let file_type = dir_entry.file_type()?; |
|
|||
542 | let entry_option = self.dmap.get(&filename); |
|
|||
543 |
|
||||
544 | if filename.as_bytes() == b".hg" { |
|
|||
545 | // Could be a directory or a symlink |
|
|||
546 | return Ok(()); |
|
|||
547 | } |
|
|||
548 |
|
||||
549 | if file_type.is_dir() { |
|
|||
550 | self.handle_traversed_dir( |
|
|||
551 | scope, |
|
|||
552 | files_sender, |
|
|||
553 | old_results, |
|
|||
554 | entry_option, |
|
|||
555 | filename, |
|
|||
556 | traversed_sender, |
|
|||
557 | ); |
|
|||
558 | } else if file_type.is_file() || file_type.is_symlink() { |
|
|||
559 | if let Some(entry) = entry_option { |
|
|||
560 | if self.matcher.matches_everything() |
|
|||
561 | || self.matcher.matches(&filename) |
|
|||
562 | { |
|
|||
563 | let metadata = dir_entry.metadata()?; |
|
|||
564 | files_sender |
|
|||
565 | .send(( |
|
|||
566 | filename.to_owned(), |
|
|||
567 | dispatch_found( |
|
|||
568 | &filename, |
|
|||
569 | *entry, |
|
|||
570 | HgMetadata::from_metadata(metadata), |
|
|||
571 | &self.dmap.copy_map, |
|
|||
572 | self.options, |
|
|||
573 | ), |
|
|||
574 | )) |
|
|||
575 | .unwrap(); |
|
|||
576 | } |
|
|||
577 | } else if (self.matcher.matches_everything() |
|
|||
578 | || self.matcher.matches(&filename)) |
|
|||
579 | && !self.is_ignored(&filename) |
|
|||
580 | { |
|
|||
581 | if (self.options.list_ignored |
|
|||
582 | || self.matcher.exact_match(&filename)) |
|
|||
583 | && self.dir_ignore(&filename) |
|
|||
584 | { |
|
|||
585 | if self.options.list_ignored { |
|
|||
586 | files_sender |
|
|||
587 | .send((filename.to_owned(), Dispatch::Ignored)) |
|
|||
588 | .unwrap(); |
|
|||
589 | } |
|
|||
590 | } else if self.options.list_unknown { |
|
|||
591 | files_sender |
|
|||
592 | .send((filename.to_owned(), Dispatch::Unknown)) |
|
|||
593 | .unwrap(); |
|
|||
594 | } |
|
|||
595 | } else if self.is_ignored(&filename) && self.options.list_ignored { |
|
|||
596 | if self.matcher.matches(&filename) { |
|
|||
597 | files_sender |
|
|||
598 | .send((filename.to_owned(), Dispatch::Ignored)) |
|
|||
599 | .unwrap(); |
|
|||
600 | } |
|
|||
601 | } |
|
|||
602 | } else if let Some(entry) = entry_option { |
|
|||
603 | // Used to be a file or a folder, now something else. |
|
|||
604 | if self.matcher.matches_everything() |
|
|||
605 | || self.matcher.matches(&filename) |
|
|||
606 | { |
|
|||
607 | files_sender |
|
|||
608 | .send((filename.to_owned(), dispatch_missing(entry.state))) |
|
|||
609 | .unwrap(); |
|
|||
610 | } |
|
|||
611 | } |
|
|||
612 |
|
||||
613 | Ok(()) |
|
|||
614 | } |
|
|||
615 |
|
||||
616 | /// A directory was found in the filesystem and needs to be traversed |
|
|||
617 | fn handle_traversed_dir<'b>( |
|
|||
618 | &'a self, |
|
|||
619 | scope: &rayon::Scope<'b>, |
|
|||
620 | files_sender: &'b crossbeam_channel::Sender<(HgPathBuf, Dispatch)>, |
|
|||
621 | old_results: &'a FastHashMap<Cow<HgPath>, Dispatch>, |
|
|||
622 | entry_option: Option<&'a DirstateEntry>, |
|
|||
623 | directory: HgPathBuf, |
|
|||
624 | traversed_sender: crossbeam_channel::Sender<HgPathBuf>, |
|
|||
625 | ) where |
|
|||
626 | 'a: 'b, |
|
|||
627 | { |
|
|||
628 | scope.spawn(move |_| { |
|
|||
629 | // Nested `if` until `rust-lang/rust#53668` is stable |
|
|||
630 | if let Some(entry) = entry_option { |
|
|||
631 | // Used to be a file, is now a folder |
|
|||
632 | if self.matcher.matches_everything() |
|
|||
633 | || self.matcher.matches(&directory) |
|
|||
634 | { |
|
|||
635 | files_sender |
|
|||
636 | .send(( |
|
|||
637 | directory.to_owned(), |
|
|||
638 | dispatch_missing(entry.state), |
|
|||
639 | )) |
|
|||
640 | .unwrap(); |
|
|||
641 | } |
|
|||
642 | } |
|
|||
643 | // Do we need to traverse it? |
|
|||
644 | if !self.is_ignored(&directory) || self.options.list_ignored { |
|
|||
645 | self.traverse_dir( |
|
|||
646 | files_sender, |
|
|||
647 | directory, |
|
|||
648 | &old_results, |
|
|||
649 | traversed_sender, |
|
|||
650 | ) |
|
|||
651 | } |
|
|||
652 | }); |
|
|||
653 | } |
|
|||
654 |
|
||||
655 | /// Decides whether the directory needs to be listed, and if so handles the |
|
|||
656 | /// entries in a separate thread. |
|
|||
657 | fn traverse_dir( |
|
|||
658 | &self, |
|
|||
659 | files_sender: &crossbeam_channel::Sender<(HgPathBuf, Dispatch)>, |
|
|||
660 | directory: impl AsRef<HgPath>, |
|
|||
661 | old_results: &FastHashMap<Cow<HgPath>, Dispatch>, |
|
|||
662 | traversed_sender: crossbeam_channel::Sender<HgPathBuf>, |
|
|||
663 | ) { |
|
|||
664 | let directory = directory.as_ref(); |
|
|||
665 |
|
||||
666 | if self.options.collect_traversed_dirs { |
|
|||
667 | traversed_sender |
|
|||
668 | .send(directory.to_owned()) |
|
|||
669 | .expect("receiver should outlive sender"); |
|
|||
670 | } |
|
|||
671 |
|
||||
672 | let visit_entries = match self.matcher.visit_children_set(directory) { |
|
|||
673 | VisitChildrenSet::Empty => return, |
|
|||
674 | VisitChildrenSet::This | VisitChildrenSet::Recursive => None, |
|
|||
675 | VisitChildrenSet::Set(set) => Some(set), |
|
|||
676 | }; |
|
|||
677 | let buf = match hg_path_to_path_buf(directory) { |
|
|||
678 | Ok(b) => b, |
|
|||
679 | Err(_) => { |
|
|||
680 | files_sender |
|
|||
681 | .send((directory.to_owned(), INVALID_PATH_DISPATCH)) |
|
|||
682 | .expect("receiver should outlive sender"); |
|
|||
683 | return; |
|
|||
684 | } |
|
|||
685 | }; |
|
|||
686 | let dir_path = self.root_dir.join(buf); |
|
|||
687 |
|
||||
688 | let skip_dot_hg = !directory.as_bytes().is_empty(); |
|
|||
689 | let entries = match list_directory(dir_path, skip_dot_hg) { |
|
|||
690 | Err(e) => { |
|
|||
691 | files_sender |
|
|||
692 | .send((directory.to_owned(), dispatch_os_error(&e))) |
|
|||
693 | .expect("receiver should outlive sender"); |
|
|||
694 | return; |
|
|||
695 | } |
|
|||
696 | Ok(entries) => entries, |
|
|||
697 | }; |
|
|||
698 |
|
||||
699 | rayon::scope(|scope| { |
|
|||
700 | for (filename, dir_entry) in entries { |
|
|||
701 | if let Some(ref set) = visit_entries { |
|
|||
702 | if !set.contains(filename.deref()) { |
|
|||
703 | continue; |
|
|||
704 | } |
|
|||
705 | } |
|
|||
706 | // TODO normalize |
|
|||
707 | let filename = if directory.is_empty() { |
|
|||
708 | filename.to_owned() |
|
|||
709 | } else { |
|
|||
710 | directory.join(&filename) |
|
|||
711 | }; |
|
|||
712 |
|
||||
713 | if !old_results.contains_key(filename.deref()) { |
|
|||
714 | match self.handle_traversed_entry( |
|
|||
715 | scope, |
|
|||
716 | files_sender, |
|
|||
717 | old_results, |
|
|||
718 | filename, |
|
|||
719 | dir_entry, |
|
|||
720 | traversed_sender.clone(), |
|
|||
721 | ) { |
|
|||
722 | Err(e) => { |
|
|||
723 | files_sender |
|
|||
724 | .send(( |
|
|||
725 | directory.to_owned(), |
|
|||
726 | dispatch_os_error(&e), |
|
|||
727 | )) |
|
|||
728 | .expect("receiver should outlive sender"); |
|
|||
729 | } |
|
|||
730 | Ok(_) => {} |
|
|||
731 | } |
|
|||
732 | } |
|
|||
733 | } |
|
|||
734 | }) |
|
|||
735 | } |
|
|||
736 |
|
||||
737 | /// Add the files in the dirstate to the results. |
|
|||
738 | /// |
|
|||
739 | /// This takes a mutable reference to the results to account for the |
|
|||
740 | /// `extend` in timings |
|
|||
741 | #[timed] |
|
|||
742 | pub fn extend_from_dmap(&self, results: &mut Vec<DispatchedPath<'a>>) { |
|
|||
743 | results.par_extend( |
|
|||
744 | self.dmap |
|
|||
745 | .par_iter() |
|
|||
746 | .filter(|(path, _)| self.matcher.matches(path)) |
|
|||
747 | .map(move |(filename, entry)| { |
|
|||
748 | let filename: &HgPath = filename; |
|
|||
749 | let filename_as_path = match hg_path_to_path_buf(filename) |
|
|||
750 | { |
|
|||
751 | Ok(f) => f, |
|
|||
752 | Err(_) => { |
|
|||
753 | return ( |
|
|||
754 | Cow::Borrowed(filename), |
|
|||
755 | INVALID_PATH_DISPATCH, |
|
|||
756 | ) |
|
|||
757 | } |
|
|||
758 | }; |
|
|||
759 | let meta = self |
|
|||
760 | .root_dir |
|
|||
761 | .join(filename_as_path) |
|
|||
762 | .symlink_metadata(); |
|
|||
763 | match meta { |
|
|||
764 | Ok(m) |
|
|||
765 | if !(m.file_type().is_file() |
|
|||
766 | || m.file_type().is_symlink()) => |
|
|||
767 | { |
|
|||
768 | ( |
|
|||
769 | Cow::Borrowed(filename), |
|
|||
770 | dispatch_missing(entry.state), |
|
|||
771 | ) |
|
|||
772 | } |
|
|||
773 | Ok(m) => ( |
|
|||
774 | Cow::Borrowed(filename), |
|
|||
775 | dispatch_found( |
|
|||
776 | filename, |
|
|||
777 | *entry, |
|
|||
778 | HgMetadata::from_metadata(m), |
|
|||
779 | &self.dmap.copy_map, |
|
|||
780 | self.options, |
|
|||
781 | ), |
|
|||
782 | ), |
|
|||
783 | Err(e) |
|
|||
784 | if e.kind() == ErrorKind::NotFound |
|
|||
785 | || e.raw_os_error() == Some(20) => |
|
|||
786 | { |
|
|||
787 | // Rust does not yet have an `ErrorKind` for |
|
|||
788 | // `NotADirectory` (errno 20) |
|
|||
789 | // It happens if the dirstate contains `foo/bar` |
|
|||
790 | // and foo is not a |
|
|||
791 | // directory |
|
|||
792 | ( |
|
|||
793 | Cow::Borrowed(filename), |
|
|||
794 | dispatch_missing(entry.state), |
|
|||
795 | ) |
|
|||
796 | } |
|
|||
797 | Err(e) => { |
|
|||
798 | (Cow::Borrowed(filename), dispatch_os_error(&e)) |
|
|||
799 | } |
|
|||
800 | } |
|
|||
801 | }), |
|
|||
802 | ); |
|
|||
803 | } |
|
|||
804 |
|
||||
805 | /// Checks all files that are in the dirstate but were not found during the |
|
|||
806 | /// working directory traversal. This means that the rest must |
|
|||
807 | /// be either ignored, under a symlink or under a new nested repo. |
|
|||
808 | /// |
|
|||
809 | /// This takes a mutable reference to the results to account for the |
|
|||
810 | /// `extend` in timings |
|
|||
811 | #[timed] |
|
|||
812 | pub fn handle_unknowns(&self, results: &mut Vec<DispatchedPath<'a>>) { |
|
|||
813 | let to_visit: Vec<(&HgPath, &DirstateEntry)> = |
|
|||
814 | if results.is_empty() && self.matcher.matches_everything() { |
|
|||
815 | self.dmap.iter().map(|(f, e)| (f.deref(), e)).collect() |
|
|||
816 | } else { |
|
|||
817 | // Only convert to a hashmap if needed. |
|
|||
818 | let old_results: FastHashMap<_, _> = |
|
|||
819 | results.iter().cloned().collect(); |
|
|||
820 | self.dmap |
|
|||
821 | .iter() |
|
|||
822 | .filter_map(move |(f, e)| { |
|
|||
823 | if !old_results.contains_key(f.deref()) |
|
|||
824 | && self.matcher.matches(f) |
|
|||
825 | { |
|
|||
826 | Some((f.deref(), e)) |
|
|||
827 | } else { |
|
|||
828 | None |
|
|||
829 | } |
|
|||
830 | }) |
|
|||
831 | .collect() |
|
|||
832 | }; |
|
|||
833 |
|
||||
834 | let path_auditor = PathAuditor::new(&self.root_dir); |
|
|||
835 |
|
||||
836 | let new_results = to_visit.into_par_iter().filter_map( |
|
|||
837 | |(filename, entry)| -> Option<_> { |
|
|||
838 | // Report ignored items in the dmap as long as they are not |
|
|||
839 | // under a symlink directory. |
|
|||
840 | if path_auditor.check(filename) { |
|
|||
841 | // TODO normalize for case-insensitive filesystems |
|
|||
842 | let buf = match hg_path_to_path_buf(filename) { |
|
|||
843 | Ok(x) => x, |
|
|||
844 | Err(_) => { |
|
|||
845 | return Some(( |
|
|||
846 | Cow::Owned(filename.to_owned()), |
|
|||
847 | INVALID_PATH_DISPATCH, |
|
|||
848 | )); |
|
|||
849 | } |
|
|||
850 | }; |
|
|||
851 | Some(( |
|
|||
852 | Cow::Owned(filename.to_owned()), |
|
|||
853 | match self.root_dir.join(&buf).symlink_metadata() { |
|
|||
854 | // File was just ignored, no links, and exists |
|
|||
855 | Ok(meta) => { |
|
|||
856 | let metadata = HgMetadata::from_metadata(meta); |
|
|||
857 | dispatch_found( |
|
|||
858 | filename, |
|
|||
859 | *entry, |
|
|||
860 | metadata, |
|
|||
861 | &self.dmap.copy_map, |
|
|||
862 | self.options, |
|
|||
863 | ) |
|
|||
864 | } |
|
|||
865 | // File doesn't exist |
|
|||
866 | Err(_) => dispatch_missing(entry.state), |
|
|||
867 | }, |
|
|||
868 | )) |
|
|||
869 | } else { |
|
|||
870 | // It's either missing or under a symlink directory which |
|
|||
871 | // we, in this case, report as missing. |
|
|||
872 | Some(( |
|
|||
873 | Cow::Owned(filename.to_owned()), |
|
|||
874 | dispatch_missing(entry.state), |
|
|||
875 | )) |
|
|||
876 | } |
|
|||
877 | }, |
|
|||
878 | ); |
|
|||
879 |
|
||||
880 | results.par_extend(new_results); |
|
|||
881 | } |
|
|||
882 | } |
|
|||
883 |
|
||||
884 | #[timed] |
|
|||
885 | pub fn build_response<'a>( |
|
|||
886 | results: impl IntoIterator<Item = DispatchedPath<'a>>, |
|
|||
887 | traversed: Vec<HgPathCow<'a>>, |
|
|||
888 | ) -> DirstateStatus<'a> { |
|
|||
889 | let mut unsure = vec![]; |
|
|||
890 | let mut modified = vec![]; |
|
|||
891 | let mut added = vec![]; |
|
|||
892 | let mut removed = vec![]; |
|
|||
893 | let mut deleted = vec![]; |
|
|||
894 | let mut clean = vec![]; |
|
|||
895 | let mut ignored = vec![]; |
|
|||
896 | let mut unknown = vec![]; |
|
|||
897 | let mut bad = vec![]; |
|
|||
898 |
|
||||
899 | for (filename, dispatch) in results.into_iter() { |
|
|||
900 | match dispatch { |
|
|||
901 | Dispatch::Unknown => unknown.push(filename), |
|
|||
902 | Dispatch::Unsure => unsure.push(filename), |
|
|||
903 | Dispatch::Modified => modified.push(filename), |
|
|||
904 | Dispatch::Added => added.push(filename), |
|
|||
905 | Dispatch::Removed => removed.push(filename), |
|
|||
906 | Dispatch::Deleted => deleted.push(filename), |
|
|||
907 | Dispatch::Clean => clean.push(filename), |
|
|||
908 | Dispatch::Ignored => ignored.push(filename), |
|
|||
909 | Dispatch::None => {} |
|
|||
910 | Dispatch::Bad(reason) => bad.push((filename, reason)), |
|
|||
911 | Dispatch::Directory { .. } => {} |
|
|||
912 | } |
|
|||
913 | } |
|
|||
914 |
|
||||
915 | DirstateStatus { |
|
|||
916 | modified, |
|
|||
917 | added, |
|
|||
918 | removed, |
|
|||
919 | deleted, |
|
|||
920 | clean, |
|
|||
921 | ignored, |
|
|||
922 | unknown, |
|
|||
923 | bad, |
|
|||
924 | unsure, |
|
|||
925 | traversed, |
|
|||
926 | dirty: false, |
|
|||
927 | } |
|
|||
928 | } |
|
|||
929 |
|
||||
930 | /// Get the status of files in the working directory. |
|
|||
931 | /// |
|
|||
932 | /// This is the current entry-point for `hg-core` and is realistically unusable |
|
|||
933 | /// outside of a Python context because its arguments need to provide a lot of |
|
|||
934 | /// information that will not be necessary in the future. |
|
|||
935 | #[timed] |
|
|||
936 | pub fn status<'a>( |
|
|||
937 | dmap: &'a DirstateMap, |
|
|||
938 | matcher: &'a (dyn Matcher + Sync), |
|
|||
939 | root_dir: PathBuf, |
|
|||
940 | ignore_files: Vec<PathBuf>, |
|
|||
941 | options: StatusOptions, |
|
|||
942 | ) -> StatusResult<(DirstateStatus<'a>, Vec<PatternFileWarning>)> { |
|
|||
943 | let (status, warnings) = |
|
|||
944 | Status::new(dmap, matcher, root_dir, ignore_files, options)?; |
|
|||
945 |
|
||||
946 | Ok((status.run()?, warnings)) |
|
|||
947 | } |
|
@@ -1,5 +1,5 b'' | |||||
1 | pub mod dirstate_map; |
|
1 | pub mod dirstate_map; | |
2 | pub mod dispatch; |
|
|||
3 | pub mod on_disk; |
|
2 | pub mod on_disk; | |
|
3 | pub mod owning; | |||
4 | pub mod path_with_basename; |
|
4 | pub mod path_with_basename; | |
5 | pub mod status; |
|
5 | pub mod status; |
@@ -1,23 +1,22 b'' | |||||
1 | use bytes_cast::BytesCast; |
|
1 | use bytes_cast::BytesCast; | |
2 | use micro_timer::timed; |
|
2 | use micro_timer::timed; | |
3 | use std::borrow::Cow; |
|
3 | use std::borrow::Cow; | |
4 | use std::convert::TryInto; |
|
|||
5 | use std::path::PathBuf; |
|
4 | use std::path::PathBuf; | |
6 |
|
5 | |||
7 | use super::on_disk; |
|
6 | use super::on_disk; | |
8 | use super::on_disk::DirstateV2ParseError; |
|
7 | use super::on_disk::DirstateV2ParseError; | |
|
8 | use super::owning::OwningDirstateMap; | |||
9 | use super::path_with_basename::WithBasename; |
|
9 | use super::path_with_basename::WithBasename; | |
10 | use crate::dirstate::parsers::pack_entry; |
|
10 | use crate::dirstate::parsers::pack_entry; | |
11 | use crate::dirstate::parsers::packed_entry_size; |
|
11 | use crate::dirstate::parsers::packed_entry_size; | |
12 | use crate::dirstate::parsers::parse_dirstate_entries; |
|
12 | use crate::dirstate::parsers::parse_dirstate_entries; | |
13 |
use crate::dirstate:: |
|
13 | use crate::dirstate::CopyMapIter; | |
14 |
use crate::dirstate:: |
|
14 | use crate::dirstate::StateMapIter; | |
|
15 | use crate::dirstate::TruncatedTimestamp; | |||
15 | use crate::dirstate::SIZE_FROM_OTHER_PARENT; |
|
16 | use crate::dirstate::SIZE_FROM_OTHER_PARENT; | |
16 | use crate::dirstate::SIZE_NON_NORMAL; |
|
17 | use crate::dirstate::SIZE_NON_NORMAL; | |
17 | use crate::dirstate::V1_RANGEMASK; |
|
|||
18 | use crate::matchers::Matcher; |
|
18 | use crate::matchers::Matcher; | |
19 | use crate::utils::hg_path::{HgPath, HgPathBuf}; |
|
19 | use crate::utils::hg_path::{HgPath, HgPathBuf}; | |
20 | use crate::CopyMapIter; |
|
|||
21 | use crate::DirstateEntry; |
|
20 | use crate::DirstateEntry; | |
22 | use crate::DirstateError; |
|
21 | use crate::DirstateError; | |
23 | use crate::DirstateParents; |
|
22 | use crate::DirstateParents; | |
@@ -25,7 +24,6 b' use crate::DirstateStatus;' | |||||
25 | use crate::EntryState; |
|
24 | use crate::EntryState; | |
26 | use crate::FastHashMap; |
|
25 | use crate::FastHashMap; | |
27 | use crate::PatternFileWarning; |
|
26 | use crate::PatternFileWarning; | |
28 | use crate::StateMapIter; |
|
|||
29 | use crate::StatusError; |
|
27 | use crate::StatusError; | |
30 | use crate::StatusOptions; |
|
28 | use crate::StatusOptions; | |
31 |
|
29 | |||
@@ -326,22 +324,17 b" impl<'tree, 'on_disk> NodeRef<'tree, 'on" | |||||
326 | pub(super) fn state( |
|
324 | pub(super) fn state( | |
327 | &self, |
|
325 | &self, | |
328 | ) -> Result<Option<EntryState>, DirstateV2ParseError> { |
|
326 | ) -> Result<Option<EntryState>, DirstateV2ParseError> { | |
329 | match self { |
|
327 | Ok(self.entry()?.map(|e| e.state())) | |
330 | NodeRef::InMemory(_path, node) => { |
|
|||
331 | Ok(node.data.as_entry().map(|entry| entry.state)) |
|
|||
332 | } |
|
|||
333 | NodeRef::OnDisk(node) => node.state(), |
|
|||
334 | } |
|
|||
335 | } |
|
328 | } | |
336 |
|
329 | |||
337 | pub(super) fn cached_directory_mtime( |
|
330 | pub(super) fn cached_directory_mtime( | |
338 | &self, |
|
331 | &self, | |
339 | ) -> Option<&'tree on_disk::Timestamp> { |
|
332 | ) -> Result<Option<TruncatedTimestamp>, DirstateV2ParseError> { | |
340 | match self { |
|
333 | match self { | |
341 |
NodeRef::InMemory(_path, node) => match |
|
334 | NodeRef::InMemory(_path, node) => Ok(match node.data { | |
342 | NodeData::CachedDirectory { mtime } => Some(mtime), |
|
335 | NodeData::CachedDirectory { mtime } => Some(mtime), | |
343 | _ => None, |
|
336 | _ => None, | |
344 | }, |
|
337 | }), | |
345 | NodeRef::OnDisk(node) => node.cached_directory_mtime(), |
|
338 | NodeRef::OnDisk(node) => node.cached_directory_mtime(), | |
346 | } |
|
339 | } | |
347 | } |
|
340 | } | |
@@ -382,7 +375,7 b" pub(super) struct Node<'on_disk> {" | |||||
382 |
|
375 | |||
383 | pub(super) enum NodeData { |
|
376 | pub(super) enum NodeData { | |
384 | Entry(DirstateEntry), |
|
377 | Entry(DirstateEntry), | |
385 |
CachedDirectory { mtime: |
|
378 | CachedDirectory { mtime: TruncatedTimestamp }, | |
386 | None, |
|
379 | None, | |
387 | } |
|
380 | } | |
388 |
|
381 | |||
@@ -445,7 +438,7 b" impl<'on_disk> DirstateMap<'on_disk> {" | |||||
445 | let parents = parse_dirstate_entries( |
|
438 | let parents = parse_dirstate_entries( | |
446 | map.on_disk, |
|
439 | map.on_disk, | |
447 | |path, entry, copy_source| { |
|
440 | |path, entry, copy_source| { | |
448 | let tracked = entry.state.is_tracked(); |
|
441 | let tracked = entry.state().is_tracked(); | |
449 | let node = Self::get_or_insert_node( |
|
442 | let node = Self::get_or_insert_node( | |
450 | map.on_disk, |
|
443 | map.on_disk, | |
451 | &mut map.unreachable_bytes, |
|
444 | &mut map.unreachable_bytes, | |
@@ -593,12 +586,13 b" impl<'on_disk> DirstateMap<'on_disk> {" | |||||
593 | fn add_or_remove_file( |
|
586 | fn add_or_remove_file( | |
594 | &mut self, |
|
587 | &mut self, | |
595 | path: &HgPath, |
|
588 | path: &HgPath, | |
596 | old_state: EntryState, |
|
589 | old_state: Option<EntryState>, | |
597 | new_entry: DirstateEntry, |
|
590 | new_entry: DirstateEntry, | |
598 | ) -> Result<(), DirstateV2ParseError> { |
|
591 | ) -> Result<(), DirstateV2ParseError> { | |
599 |
let had_entry = old_state |
|
592 | let had_entry = old_state.is_some(); | |
|
593 | let was_tracked = old_state.map_or(false, |s| s.is_tracked()); | |||
600 | let tracked_count_increment = |
|
594 | let tracked_count_increment = | |
601 |
match ( |
|
595 | match (was_tracked, new_entry.state().is_tracked()) { | |
602 | (false, true) => 1, |
|
596 | (false, true) => 1, | |
603 | (true, false) => -1, |
|
597 | (true, false) => -1, | |
604 | _ => 0, |
|
598 | _ => 0, | |
@@ -695,34 +689,13 b" impl<'on_disk> DirstateMap<'on_disk> {" | |||||
695 | path.as_ref(), |
|
689 | path.as_ref(), | |
696 | )? { |
|
690 | )? { | |
697 | if let NodeData::Entry(entry) = &mut node.data { |
|
691 | if let NodeData::Entry(entry) = &mut node.data { | |
698 |
entry. |
|
692 | entry.set_possibly_dirty(); | |
699 | } |
|
693 | } | |
700 | } |
|
694 | } | |
701 | } |
|
695 | } | |
702 | Ok(()) |
|
696 | Ok(()) | |
703 | } |
|
697 | } | |
704 |
|
698 | |||
705 | /// Return a faillilble iterator of full paths of nodes that have an |
|
|||
706 | /// `entry` for which the given `predicate` returns true. |
|
|||
707 | /// |
|
|||
708 | /// Fallibility means that each iterator item is a `Result`, which may |
|
|||
709 | /// indicate a parse error of the on-disk dirstate-v2 format. Such errors |
|
|||
710 | /// should only happen if Mercurial is buggy or a repository is corrupted. |
|
|||
711 | fn filter_full_paths<'tree>( |
|
|||
712 | &'tree self, |
|
|||
713 | predicate: impl Fn(&DirstateEntry) -> bool + 'tree, |
|
|||
714 | ) -> impl Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + 'tree |
|
|||
715 | { |
|
|||
716 | filter_map_results(self.iter_nodes(), move |node| { |
|
|||
717 | if let Some(entry) = node.entry()? { |
|
|||
718 | if predicate(&entry) { |
|
|||
719 | return Ok(Some(node.full_path(self.on_disk)?)); |
|
|||
720 | } |
|
|||
721 | } |
|
|||
722 | Ok(None) |
|
|||
723 | }) |
|
|||
724 | } |
|
|||
725 |
|
||||
726 | fn count_dropped_path(unreachable_bytes: &mut u32, path: &Cow<HgPath>) { |
|
699 | fn count_dropped_path(unreachable_bytes: &mut u32, path: &Cow<HgPath>) { | |
727 | if let Cow::Borrowed(path) = path { |
|
700 | if let Cow::Borrowed(path) = path { | |
728 | *unreachable_bytes += path.len() as u32 |
|
701 | *unreachable_bytes += path.len() as u32 | |
@@ -750,78 +723,41 b' where' | |||||
750 | }) |
|
723 | }) | |
751 | } |
|
724 | } | |
752 |
|
725 | |||
753 | impl<'on_disk> super::dispatch::DirstateMapMethods for DirstateMap<'on_disk> { |
|
726 | impl OwningDirstateMap { | |
754 | fn clear(&mut self) { |
|
727 | pub fn clear(&mut self) { | |
755 | self.root = Default::default(); |
|
728 | let map = self.get_map_mut(); | |
756 | self.nodes_with_entry_count = 0; |
|
729 | map.root = Default::default(); | |
757 |
|
|
730 | map.nodes_with_entry_count = 0; | |
|
731 | map.nodes_with_copy_source_count = 0; | |||
758 | } |
|
732 | } | |
759 |
|
733 | |||
760 | fn set_v1(&mut self, filename: &HgPath, entry: DirstateEntry) { |
|
734 | pub fn set_entry( | |
761 | let node = |
|
|||
762 | self.get_or_insert(&filename).expect("no parse error in v1"); |
|
|||
763 | node.data = NodeData::Entry(entry); |
|
|||
764 | node.children = ChildNodes::default(); |
|
|||
765 | node.copy_source = None; |
|
|||
766 | node.descendants_with_entry_count = 0; |
|
|||
767 | node.tracked_descendants_count = 0; |
|
|||
768 | } |
|
|||
769 |
|
||||
770 | fn add_file( |
|
|||
771 | &mut self, |
|
735 | &mut self, | |
772 | filename: &HgPath, |
|
736 | filename: &HgPath, | |
773 | entry: DirstateEntry, |
|
737 | entry: DirstateEntry, | |
774 | added: bool, |
|
738 | ) -> Result<(), DirstateV2ParseError> { | |
775 | merged: bool, |
|
739 | let map = self.get_map_mut(); | |
776 | from_p2: bool, |
|
740 | map.get_or_insert(&filename)?.data = NodeData::Entry(entry); | |
777 | possibly_dirty: bool, |
|
741 | Ok(()) | |
778 | ) -> Result<(), DirstateError> { |
|
|||
779 | let mut entry = entry; |
|
|||
780 | if added { |
|
|||
781 | assert!(!possibly_dirty); |
|
|||
782 | assert!(!from_p2); |
|
|||
783 | entry.state = EntryState::Added; |
|
|||
784 | entry.size = SIZE_NON_NORMAL; |
|
|||
785 | entry.mtime = MTIME_UNSET; |
|
|||
786 | } else if merged { |
|
|||
787 | assert!(!possibly_dirty); |
|
|||
788 | assert!(!from_p2); |
|
|||
789 | entry.state = EntryState::Merged; |
|
|||
790 | entry.size = SIZE_FROM_OTHER_PARENT; |
|
|||
791 | entry.mtime = MTIME_UNSET; |
|
|||
792 | } else if from_p2 { |
|
|||
793 | assert!(!possibly_dirty); |
|
|||
794 | entry.state = EntryState::Normal; |
|
|||
795 | entry.size = SIZE_FROM_OTHER_PARENT; |
|
|||
796 | entry.mtime = MTIME_UNSET; |
|
|||
797 | } else if possibly_dirty { |
|
|||
798 | entry.state = EntryState::Normal; |
|
|||
799 | entry.size = SIZE_NON_NORMAL; |
|
|||
800 | entry.mtime = MTIME_UNSET; |
|
|||
801 | } else { |
|
|||
802 | entry.state = EntryState::Normal; |
|
|||
803 | entry.size = entry.size & V1_RANGEMASK; |
|
|||
804 | entry.mtime = entry.mtime & V1_RANGEMASK; |
|
|||
805 | } |
|
|||
806 |
|
||||
807 | let old_state = match self.get(filename)? { |
|
|||
808 | Some(e) => e.state, |
|
|||
809 | None => EntryState::Unknown, |
|
|||
810 | }; |
|
|||
811 |
|
||||
812 | Ok(self.add_or_remove_file(filename, old_state, entry)?) |
|
|||
813 | } |
|
742 | } | |
814 |
|
743 | |||
815 |
fn |
|
744 | pub fn add_file( | |
|
745 | &mut self, | |||
|
746 | filename: &HgPath, | |||
|
747 | entry: DirstateEntry, | |||
|
748 | ) -> Result<(), DirstateError> { | |||
|
749 | let old_state = self.get(filename)?.map(|e| e.state()); | |||
|
750 | let map = self.get_map_mut(); | |||
|
751 | Ok(map.add_or_remove_file(filename, old_state, entry)?) | |||
|
752 | } | |||
|
753 | ||||
|
754 | pub fn remove_file( | |||
816 | &mut self, |
|
755 | &mut self, | |
817 | filename: &HgPath, |
|
756 | filename: &HgPath, | |
818 | in_merge: bool, |
|
757 | in_merge: bool, | |
819 | ) -> Result<(), DirstateError> { |
|
758 | ) -> Result<(), DirstateError> { | |
820 | let old_entry_opt = self.get(filename)?; |
|
759 | let old_entry_opt = self.get(filename)?; | |
821 |
let old_state = |
|
760 | let old_state = old_entry_opt.map(|e| e.state()); | |
822 | Some(e) => e.state, |
|
|||
823 | None => EntryState::Unknown, |
|
|||
824 | }; |
|
|||
825 | let mut size = 0; |
|
761 | let mut size = 0; | |
826 | if in_merge { |
|
762 | if in_merge { | |
827 | // XXX we should not be able to have 'm' state and 'FROM_P2' if not |
|
763 | // XXX we should not be able to have 'm' state and 'FROM_P2' if not | |
@@ -830,10 +766,10 b" impl<'on_disk> super::dispatch::Dirstate" | |||||
830 | // would be nice. |
|
766 | // would be nice. | |
831 | if let Some(old_entry) = old_entry_opt { |
|
767 | if let Some(old_entry) = old_entry_opt { | |
832 | // backup the previous state |
|
768 | // backup the previous state | |
833 | if old_entry.state == EntryState::Merged { |
|
769 | if old_entry.state() == EntryState::Merged { | |
834 | size = SIZE_NON_NORMAL; |
|
770 | size = SIZE_NON_NORMAL; | |
835 | } else if old_entry.state == EntryState::Normal |
|
771 | } else if old_entry.state() == EntryState::Normal | |
836 | && old_entry.size == SIZE_FROM_OTHER_PARENT |
|
772 | && old_entry.size() == SIZE_FROM_OTHER_PARENT | |
837 | { |
|
773 | { | |
838 | // other parent |
|
774 | // other parent | |
839 | size = SIZE_FROM_OTHER_PARENT; |
|
775 | size = SIZE_FROM_OTHER_PARENT; | |
@@ -843,20 +779,19 b" impl<'on_disk> super::dispatch::Dirstate" | |||||
843 | if size == 0 { |
|
779 | if size == 0 { | |
844 | self.copy_map_remove(filename)?; |
|
780 | self.copy_map_remove(filename)?; | |
845 | } |
|
781 | } | |
846 | let entry = DirstateEntry { |
|
782 | let map = self.get_map_mut(); | |
847 |
|
|
783 | let entry = DirstateEntry::new_removed(size); | |
848 | mode: 0, |
|
784 | Ok(map.add_or_remove_file(filename, old_state, entry)?) | |
849 | size, |
|
|||
850 | mtime: 0, |
|
|||
851 | }; |
|
|||
852 | Ok(self.add_or_remove_file(filename, old_state, entry)?) |
|
|||
853 | } |
|
785 | } | |
854 |
|
786 | |||
855 | fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError> { |
|
787 | pub fn drop_entry_and_copy_source( | |
856 | let old_state = match self.get(filename)? { |
|
788 | &mut self, | |
857 | Some(e) => e.state, |
|
789 | filename: &HgPath, | |
858 | None => EntryState::Unknown, |
|
790 | ) -> Result<(), DirstateError> { | |
859 | }; |
|
791 | let was_tracked = self | |
|
792 | .get(filename)? | |||
|
793 | .map_or(false, |e| e.state().is_tracked()); | |||
|
794 | let map = self.get_map_mut(); | |||
860 | struct Dropped { |
|
795 | struct Dropped { | |
861 | was_tracked: bool, |
|
796 | was_tracked: bool, | |
862 | had_entry: bool, |
|
797 | had_entry: bool, | |
@@ -915,13 +850,14 b" impl<'on_disk> super::dispatch::Dirstate" | |||||
915 | node.data = NodeData::None |
|
850 | node.data = NodeData::None | |
916 | } |
|
851 | } | |
917 | if let Some(source) = &node.copy_source { |
|
852 | if let Some(source) = &node.copy_source { | |
918 | DirstateMap::count_dropped_path(unreachable_bytes, source) |
|
853 | DirstateMap::count_dropped_path(unreachable_bytes, source); | |
|
854 | node.copy_source = None | |||
919 | } |
|
855 | } | |
920 | dropped = Dropped { |
|
856 | dropped = Dropped { | |
921 | was_tracked: node |
|
857 | was_tracked: node | |
922 | .data |
|
858 | .data | |
923 | .as_entry() |
|
859 | .as_entry() | |
924 | .map_or(false, |entry| entry.state.is_tracked()), |
|
860 | .map_or(false, |entry| entry.state().is_tracked()), | |
925 | had_entry, |
|
861 | had_entry, | |
926 | had_copy_source: node.copy_source.take().is_some(), |
|
862 | had_copy_source: node.copy_source.take().is_some(), | |
927 | }; |
|
863 | }; | |
@@ -943,112 +879,29 b" impl<'on_disk> super::dispatch::Dirstate" | |||||
943 | } |
|
879 | } | |
944 |
|
880 | |||
945 | if let Some((dropped, _removed)) = recur( |
|
881 | if let Some((dropped, _removed)) = recur( | |
946 |
|
|
882 | map.on_disk, | |
947 |
&mut |
|
883 | &mut map.unreachable_bytes, | |
948 |
&mut |
|
884 | &mut map.root, | |
949 | filename, |
|
885 | filename, | |
950 | )? { |
|
886 | )? { | |
951 | if dropped.had_entry { |
|
887 | if dropped.had_entry { | |
952 |
|
|
888 | map.nodes_with_entry_count -= 1 | |
953 | } |
|
889 | } | |
954 | if dropped.had_copy_source { |
|
890 | if dropped.had_copy_source { | |
955 |
|
|
891 | map.nodes_with_copy_source_count -= 1 | |
956 | } |
|
892 | } | |
957 | Ok(dropped.had_entry) |
|
|||
958 | } else { |
|
893 | } else { | |
959 |
debug_assert!(! |
|
894 | debug_assert!(!was_tracked); | |
960 | Ok(false) |
|
|||
961 | } |
|
|||
962 | } |
|
|||
963 |
|
||||
964 | fn clear_ambiguous_times( |
|
|||
965 | &mut self, |
|
|||
966 | filenames: Vec<HgPathBuf>, |
|
|||
967 | now: i32, |
|
|||
968 | ) -> Result<(), DirstateV2ParseError> { |
|
|||
969 | for filename in filenames { |
|
|||
970 | if let Some(node) = Self::get_node_mut( |
|
|||
971 | self.on_disk, |
|
|||
972 | &mut self.unreachable_bytes, |
|
|||
973 | &mut self.root, |
|
|||
974 | &filename, |
|
|||
975 | )? { |
|
|||
976 | if let NodeData::Entry(entry) = &mut node.data { |
|
|||
977 | entry.clear_ambiguous_mtime(now); |
|
|||
978 | } |
|
|||
979 | } |
|
|||
980 | } |
|
895 | } | |
981 | Ok(()) |
|
896 | Ok(()) | |
982 | } |
|
897 | } | |
983 |
|
898 | |||
984 | fn non_normal_entries_contains( |
|
899 | pub fn has_tracked_dir( | |
985 | &mut self, |
|
|||
986 | key: &HgPath, |
|
|||
987 | ) -> Result<bool, DirstateV2ParseError> { |
|
|||
988 | Ok(if let Some(node) = self.get_node(key)? { |
|
|||
989 | node.entry()?.map_or(false, |entry| entry.is_non_normal()) |
|
|||
990 | } else { |
|
|||
991 | false |
|
|||
992 | }) |
|
|||
993 | } |
|
|||
994 |
|
||||
995 | fn non_normal_entries_remove(&mut self, key: &HgPath) -> bool { |
|
|||
996 | // Do nothing, this `DirstateMap` does not have a separate "non normal |
|
|||
997 | // entries" set that need to be kept up to date. |
|
|||
998 | if let Ok(Some(v)) = self.get(key) { |
|
|||
999 | return v.is_non_normal(); |
|
|||
1000 | } |
|
|||
1001 | false |
|
|||
1002 | } |
|
|||
1003 |
|
||||
1004 | fn non_normal_entries_add(&mut self, _key: &HgPath) { |
|
|||
1005 | // Do nothing, this `DirstateMap` does not have a separate "non normal |
|
|||
1006 | // entries" set that need to be kept up to date |
|
|||
1007 | } |
|
|||
1008 |
|
||||
1009 | fn non_normal_or_other_parent_paths( |
|
|||
1010 | &mut self, |
|
|||
1011 | ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_> |
|
|||
1012 | { |
|
|||
1013 | Box::new(self.filter_full_paths(|entry| { |
|
|||
1014 | entry.is_non_normal() || entry.is_from_other_parent() |
|
|||
1015 | })) |
|
|||
1016 | } |
|
|||
1017 |
|
||||
1018 | fn set_non_normal_other_parent_entries(&mut self, _force: bool) { |
|
|||
1019 | // Do nothing, this `DirstateMap` does not have a separate "non normal |
|
|||
1020 | // entries" and "from other parent" sets that need to be recomputed |
|
|||
1021 | } |
|
|||
1022 |
|
||||
1023 | fn iter_non_normal_paths( |
|
|||
1024 | &mut self, |
|
|||
1025 | ) -> Box< |
|
|||
1026 | dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_, |
|
|||
1027 | > { |
|
|||
1028 | self.iter_non_normal_paths_panic() |
|
|||
1029 | } |
|
|||
1030 |
|
||||
1031 | fn iter_non_normal_paths_panic( |
|
|||
1032 | &self, |
|
|||
1033 | ) -> Box< |
|
|||
1034 | dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_, |
|
|||
1035 | > { |
|
|||
1036 | Box::new(self.filter_full_paths(|entry| entry.is_non_normal())) |
|
|||
1037 | } |
|
|||
1038 |
|
||||
1039 | fn iter_other_parent_paths( |
|
|||
1040 | &mut self, |
|
|||
1041 | ) -> Box< |
|
|||
1042 | dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_, |
|
|||
1043 | > { |
|
|||
1044 | Box::new(self.filter_full_paths(|entry| entry.is_from_other_parent())) |
|
|||
1045 | } |
|
|||
1046 |
|
||||
1047 | fn has_tracked_dir( |
|
|||
1048 | &mut self, |
|
900 | &mut self, | |
1049 | directory: &HgPath, |
|
901 | directory: &HgPath, | |
1050 | ) -> Result<bool, DirstateError> { |
|
902 | ) -> Result<bool, DirstateError> { | |
1051 | if let Some(node) = self.get_node(directory)? { |
|
903 | let map = self.get_map_mut(); | |
|
904 | if let Some(node) = map.get_node(directory)? { | |||
1052 | // A node without a `DirstateEntry` was created to hold child |
|
905 | // A node without a `DirstateEntry` was created to hold child | |
1053 | // nodes, and is therefore a directory. |
|
906 | // nodes, and is therefore a directory. | |
1054 | let state = node.state()?; |
|
907 | let state = node.state()?; | |
@@ -1058,8 +911,12 b" impl<'on_disk> super::dispatch::Dirstate" | |||||
1058 | } |
|
911 | } | |
1059 | } |
|
912 | } | |
1060 |
|
913 | |||
1061 | fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> { |
|
914 | pub fn has_dir( | |
1062 | if let Some(node) = self.get_node(directory)? { |
|
915 | &mut self, | |
|
916 | directory: &HgPath, | |||
|
917 | ) -> Result<bool, DirstateError> { | |||
|
918 | let map = self.get_map_mut(); | |||
|
919 | if let Some(node) = map.get_node(directory)? { | |||
1063 | // A node without a `DirstateEntry` was created to hold child |
|
920 | // A node without a `DirstateEntry` was created to hold child | |
1064 | // nodes, and is therefore a directory. |
|
921 | // nodes, and is therefore a directory. | |
1065 | let state = node.state()?; |
|
922 | let state = node.state()?; | |
@@ -1070,43 +927,43 b" impl<'on_disk> super::dispatch::Dirstate" | |||||
1070 | } |
|
927 | } | |
1071 |
|
928 | |||
1072 | #[timed] |
|
929 | #[timed] | |
1073 | fn pack_v1( |
|
930 | pub fn pack_v1( | |
1074 | &mut self, |
|
931 | &mut self, | |
1075 | parents: DirstateParents, |
|
932 | parents: DirstateParents, | |
1076 | now: Timestamp, |
|
933 | now: TruncatedTimestamp, | |
1077 | ) -> Result<Vec<u8>, DirstateError> { |
|
934 | ) -> Result<Vec<u8>, DirstateError> { | |
1078 | let now: i32 = now.0.try_into().expect("time overflow"); |
|
935 | let map = self.get_map_mut(); | |
1079 | let mut ambiguous_mtimes = Vec::new(); |
|
936 | let mut ambiguous_mtimes = Vec::new(); | |
1080 | // Optizimation (to be measured?): pre-compute size to avoid `Vec` |
|
937 | // Optizimation (to be measured?): pre-compute size to avoid `Vec` | |
1081 | // reallocations |
|
938 | // reallocations | |
1082 | let mut size = parents.as_bytes().len(); |
|
939 | let mut size = parents.as_bytes().len(); | |
1083 |
for node in |
|
940 | for node in map.iter_nodes() { | |
1084 | let node = node?; |
|
941 | let node = node?; | |
1085 | if let Some(entry) = node.entry()? { |
|
942 | if let Some(entry) = node.entry()? { | |
1086 | size += packed_entry_size( |
|
943 | size += packed_entry_size( | |
1087 |
node.full_path( |
|
944 | node.full_path(map.on_disk)?, | |
1088 |
node.copy_source( |
|
945 | node.copy_source(map.on_disk)?, | |
1089 | ); |
|
946 | ); | |
1090 |
if entry. |
|
947 | if entry.need_delay(now) { | |
1091 | ambiguous_mtimes.push( |
|
948 | ambiguous_mtimes.push( | |
1092 |
node.full_path_borrowed( |
|
949 | node.full_path_borrowed(map.on_disk)? | |
1093 | .detach_from_tree(), |
|
950 | .detach_from_tree(), | |
1094 | ) |
|
951 | ) | |
1095 | } |
|
952 | } | |
1096 | } |
|
953 | } | |
1097 | } |
|
954 | } | |
1098 |
|
|
955 | map.clear_known_ambiguous_mtimes(&ambiguous_mtimes)?; | |
1099 |
|
956 | |||
1100 | let mut packed = Vec::with_capacity(size); |
|
957 | let mut packed = Vec::with_capacity(size); | |
1101 | packed.extend(parents.as_bytes()); |
|
958 | packed.extend(parents.as_bytes()); | |
1102 |
|
959 | |||
1103 |
for node in |
|
960 | for node in map.iter_nodes() { | |
1104 | let node = node?; |
|
961 | let node = node?; | |
1105 | if let Some(entry) = node.entry()? { |
|
962 | if let Some(entry) = node.entry()? { | |
1106 | pack_entry( |
|
963 | pack_entry( | |
1107 |
node.full_path( |
|
964 | node.full_path(map.on_disk)?, | |
1108 | &entry, |
|
965 | &entry, | |
1109 |
node.copy_source( |
|
966 | node.copy_source(map.on_disk)?, | |
1110 | &mut packed, |
|
967 | &mut packed, | |
1111 | ); |
|
968 | ); | |
1112 | } |
|
969 | } | |
@@ -1116,23 +973,22 b" impl<'on_disk> super::dispatch::Dirstate" | |||||
1116 |
|
973 | |||
1117 | /// Returns new data and metadata together with whether that data should be |
|
974 | /// Returns new data and metadata together with whether that data should be | |
1118 | /// appended to the existing data file whose content is at |
|
975 | /// appended to the existing data file whose content is at | |
1119 |
/// ` |
|
976 | /// `map.on_disk` (true), instead of written to a new data file | |
1120 | /// (false). |
|
977 | /// (false). | |
1121 | #[timed] |
|
978 | #[timed] | |
1122 | fn pack_v2( |
|
979 | pub fn pack_v2( | |
1123 | &mut self, |
|
980 | &mut self, | |
1124 | now: Timestamp, |
|
981 | now: TruncatedTimestamp, | |
1125 | can_append: bool, |
|
982 | can_append: bool, | |
1126 | ) -> Result<(Vec<u8>, Vec<u8>, bool), DirstateError> { |
|
983 | ) -> Result<(Vec<u8>, Vec<u8>, bool), DirstateError> { | |
1127 | // TODO: how do we want to handle this in 2038? |
|
984 | let map = self.get_map_mut(); | |
1128 | let now: i32 = now.0.try_into().expect("time overflow"); |
|
|||
1129 | let mut paths = Vec::new(); |
|
985 | let mut paths = Vec::new(); | |
1130 |
for node in |
|
986 | for node in map.iter_nodes() { | |
1131 | let node = node?; |
|
987 | let node = node?; | |
1132 | if let Some(entry) = node.entry()? { |
|
988 | if let Some(entry) = node.entry()? { | |
1133 |
if entry. |
|
989 | if entry.need_delay(now) { | |
1134 | paths.push( |
|
990 | paths.push( | |
1135 |
node.full_path_borrowed( |
|
991 | node.full_path_borrowed(map.on_disk)? | |
1136 | .detach_from_tree(), |
|
992 | .detach_from_tree(), | |
1137 | ) |
|
993 | ) | |
1138 | } |
|
994 | } | |
@@ -1140,12 +996,12 b" impl<'on_disk> super::dispatch::Dirstate" | |||||
1140 | } |
|
996 | } | |
1141 | // Borrow of `self` ends here since we collect cloned paths |
|
997 | // Borrow of `self` ends here since we collect cloned paths | |
1142 |
|
998 | |||
1143 |
|
|
999 | map.clear_known_ambiguous_mtimes(&paths)?; | |
1144 |
|
1000 | |||
1145 |
on_disk::write( |
|
1001 | on_disk::write(map, can_append) | |
1146 | } |
|
1002 | } | |
1147 |
|
1003 | |||
1148 | fn status<'a>( |
|
1004 | pub fn status<'a>( | |
1149 | &'a mut self, |
|
1005 | &'a mut self, | |
1150 | matcher: &'a (dyn Matcher + Sync), |
|
1006 | matcher: &'a (dyn Matcher + Sync), | |
1151 | root_dir: PathBuf, |
|
1007 | root_dir: PathBuf, | |
@@ -1153,119 +1009,129 b" impl<'on_disk> super::dispatch::Dirstate" | |||||
1153 | options: StatusOptions, |
|
1009 | options: StatusOptions, | |
1154 | ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError> |
|
1010 | ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError> | |
1155 | { |
|
1011 | { | |
1156 | super::status::status(self, matcher, root_dir, ignore_files, options) |
|
1012 | let map = self.get_map_mut(); | |
|
1013 | super::status::status(map, matcher, root_dir, ignore_files, options) | |||
1157 | } |
|
1014 | } | |
1158 |
|
1015 | |||
1159 | fn copy_map_len(&self) -> usize { |
|
1016 | pub fn copy_map_len(&self) -> usize { | |
1160 | self.nodes_with_copy_source_count as usize |
|
1017 | let map = self.get_map(); | |
|
1018 | map.nodes_with_copy_source_count as usize | |||
1161 | } |
|
1019 | } | |
1162 |
|
1020 | |||
1163 | fn copy_map_iter(&self) -> CopyMapIter<'_> { |
|
1021 | pub fn copy_map_iter(&self) -> CopyMapIter<'_> { | |
1164 | Box::new(filter_map_results(self.iter_nodes(), move |node| { |
|
1022 | let map = self.get_map(); | |
1165 | Ok(if let Some(source) = node.copy_source(self.on_disk)? { |
|
1023 | Box::new(filter_map_results(map.iter_nodes(), move |node| { | |
1166 | Some((node.full_path(self.on_disk)?, source)) |
|
1024 | Ok(if let Some(source) = node.copy_source(map.on_disk)? { | |
|
1025 | Some((node.full_path(map.on_disk)?, source)) | |||
1167 | } else { |
|
1026 | } else { | |
1168 | None |
|
1027 | None | |
1169 | }) |
|
1028 | }) | |
1170 | })) |
|
1029 | })) | |
1171 | } |
|
1030 | } | |
1172 |
|
1031 | |||
1173 | fn copy_map_contains_key( |
|
1032 | pub fn copy_map_contains_key( | |
1174 | &self, |
|
1033 | &self, | |
1175 | key: &HgPath, |
|
1034 | key: &HgPath, | |
1176 | ) -> Result<bool, DirstateV2ParseError> { |
|
1035 | ) -> Result<bool, DirstateV2ParseError> { | |
1177 |
|
|
1036 | let map = self.get_map(); | |
|
1037 | Ok(if let Some(node) = map.get_node(key)? { | |||
1178 | node.has_copy_source() |
|
1038 | node.has_copy_source() | |
1179 | } else { |
|
1039 | } else { | |
1180 | false |
|
1040 | false | |
1181 | }) |
|
1041 | }) | |
1182 | } |
|
1042 | } | |
1183 |
|
1043 | |||
1184 | fn copy_map_get( |
|
1044 | pub fn copy_map_get( | |
1185 | &self, |
|
1045 | &self, | |
1186 | key: &HgPath, |
|
1046 | key: &HgPath, | |
1187 | ) -> Result<Option<&HgPath>, DirstateV2ParseError> { |
|
1047 | ) -> Result<Option<&HgPath>, DirstateV2ParseError> { | |
1188 |
|
|
1048 | let map = self.get_map(); | |
1189 |
|
|
1049 | if let Some(node) = map.get_node(key)? { | |
|
1050 | if let Some(source) = node.copy_source(map.on_disk)? { | |||
1190 | return Ok(Some(source)); |
|
1051 | return Ok(Some(source)); | |
1191 | } |
|
1052 | } | |
1192 | } |
|
1053 | } | |
1193 | Ok(None) |
|
1054 | Ok(None) | |
1194 | } |
|
1055 | } | |
1195 |
|
1056 | |||
1196 | fn copy_map_remove( |
|
1057 | pub fn copy_map_remove( | |
1197 | &mut self, |
|
1058 | &mut self, | |
1198 | key: &HgPath, |
|
1059 | key: &HgPath, | |
1199 | ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> { |
|
1060 | ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> { | |
1200 | let count = &mut self.nodes_with_copy_source_count; |
|
1061 | let map = self.get_map_mut(); | |
1201 | let unreachable_bytes = &mut self.unreachable_bytes; |
|
1062 | let count = &mut map.nodes_with_copy_source_count; | |
1202 | Ok(Self::get_node_mut( |
|
1063 | let unreachable_bytes = &mut map.unreachable_bytes; | |
1203 | self.on_disk, |
|
1064 | Ok(DirstateMap::get_node_mut( | |
|
1065 | map.on_disk, | |||
1204 | unreachable_bytes, |
|
1066 | unreachable_bytes, | |
1205 |
&mut |
|
1067 | &mut map.root, | |
1206 | key, |
|
1068 | key, | |
1207 | )? |
|
1069 | )? | |
1208 | .and_then(|node| { |
|
1070 | .and_then(|node| { | |
1209 | if let Some(source) = &node.copy_source { |
|
1071 | if let Some(source) = &node.copy_source { | |
1210 | *count -= 1; |
|
1072 | *count -= 1; | |
1211 |
|
|
1073 | DirstateMap::count_dropped_path(unreachable_bytes, source); | |
1212 | } |
|
1074 | } | |
1213 | node.copy_source.take().map(Cow::into_owned) |
|
1075 | node.copy_source.take().map(Cow::into_owned) | |
1214 | })) |
|
1076 | })) | |
1215 | } |
|
1077 | } | |
1216 |
|
1078 | |||
1217 | fn copy_map_insert( |
|
1079 | pub fn copy_map_insert( | |
1218 | &mut self, |
|
1080 | &mut self, | |
1219 | key: HgPathBuf, |
|
1081 | key: HgPathBuf, | |
1220 | value: HgPathBuf, |
|
1082 | value: HgPathBuf, | |
1221 | ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> { |
|
1083 | ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> { | |
1222 |
let |
|
1084 | let map = self.get_map_mut(); | |
1223 | self.on_disk, |
|
1085 | let node = DirstateMap::get_or_insert_node( | |
1224 | &mut self.unreachable_bytes, |
|
1086 | map.on_disk, | |
1225 | &mut self.root, |
|
1087 | &mut map.unreachable_bytes, | |
|
1088 | &mut map.root, | |||
1226 | &key, |
|
1089 | &key, | |
1227 | WithBasename::to_cow_owned, |
|
1090 | WithBasename::to_cow_owned, | |
1228 | |_ancestor| {}, |
|
1091 | |_ancestor| {}, | |
1229 | )?; |
|
1092 | )?; | |
1230 | if node.copy_source.is_none() { |
|
1093 | if node.copy_source.is_none() { | |
1231 |
|
|
1094 | map.nodes_with_copy_source_count += 1 | |
1232 | } |
|
1095 | } | |
1233 | Ok(node.copy_source.replace(value.into()).map(Cow::into_owned)) |
|
1096 | Ok(node.copy_source.replace(value.into()).map(Cow::into_owned)) | |
1234 | } |
|
1097 | } | |
1235 |
|
1098 | |||
1236 | fn len(&self) -> usize { |
|
1099 | pub fn len(&self) -> usize { | |
1237 | self.nodes_with_entry_count as usize |
|
1100 | let map = self.get_map(); | |
|
1101 | map.nodes_with_entry_count as usize | |||
1238 | } |
|
1102 | } | |
1239 |
|
1103 | |||
1240 | fn contains_key( |
|
1104 | pub fn contains_key( | |
1241 | &self, |
|
1105 | &self, | |
1242 | key: &HgPath, |
|
1106 | key: &HgPath, | |
1243 | ) -> Result<bool, DirstateV2ParseError> { |
|
1107 | ) -> Result<bool, DirstateV2ParseError> { | |
1244 | Ok(self.get(key)?.is_some()) |
|
1108 | Ok(self.get(key)?.is_some()) | |
1245 | } |
|
1109 | } | |
1246 |
|
1110 | |||
1247 | fn get( |
|
1111 | pub fn get( | |
1248 | &self, |
|
1112 | &self, | |
1249 | key: &HgPath, |
|
1113 | key: &HgPath, | |
1250 | ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> { |
|
1114 | ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> { | |
1251 |
|
|
1115 | let map = self.get_map(); | |
|
1116 | Ok(if let Some(node) = map.get_node(key)? { | |||
1252 | node.entry()? |
|
1117 | node.entry()? | |
1253 | } else { |
|
1118 | } else { | |
1254 | None |
|
1119 | None | |
1255 | }) |
|
1120 | }) | |
1256 | } |
|
1121 | } | |
1257 |
|
1122 | |||
1258 | fn iter(&self) -> StateMapIter<'_> { |
|
1123 | pub fn iter(&self) -> StateMapIter<'_> { | |
1259 | Box::new(filter_map_results(self.iter_nodes(), move |node| { |
|
1124 | let map = self.get_map(); | |
|
1125 | Box::new(filter_map_results(map.iter_nodes(), move |node| { | |||
1260 | Ok(if let Some(entry) = node.entry()? { |
|
1126 | Ok(if let Some(entry) = node.entry()? { | |
1261 |
Some((node.full_path( |
|
1127 | Some((node.full_path(map.on_disk)?, entry)) | |
1262 | } else { |
|
1128 | } else { | |
1263 | None |
|
1129 | None | |
1264 | }) |
|
1130 | }) | |
1265 | })) |
|
1131 | })) | |
1266 | } |
|
1132 | } | |
1267 |
|
1133 | |||
1268 | fn iter_tracked_dirs( |
|
1134 | pub fn iter_tracked_dirs( | |
1269 | &mut self, |
|
1135 | &mut self, | |
1270 | ) -> Result< |
|
1136 | ) -> Result< | |
1271 | Box< |
|
1137 | Box< | |
@@ -1275,9 +1141,10 b" impl<'on_disk> super::dispatch::Dirstate" | |||||
1275 | >, |
|
1141 | >, | |
1276 | DirstateError, |
|
1142 | DirstateError, | |
1277 | > { |
|
1143 | > { | |
1278 |
let |
|
1144 | let map = self.get_map_mut(); | |
|
1145 | let on_disk = map.on_disk; | |||
1279 | Ok(Box::new(filter_map_results( |
|
1146 | Ok(Box::new(filter_map_results( | |
1280 |
|
|
1147 | map.iter_nodes(), | |
1281 | move |node| { |
|
1148 | move |node| { | |
1282 | Ok(if node.tracked_descendants_count() > 0 { |
|
1149 | Ok(if node.tracked_descendants_count() > 0 { | |
1283 | Some(node.full_path(on_disk)?) |
|
1150 | Some(node.full_path(on_disk)?) | |
@@ -1288,8 +1155,9 b" impl<'on_disk> super::dispatch::Dirstate" | |||||
1288 | ))) |
|
1155 | ))) | |
1289 | } |
|
1156 | } | |
1290 |
|
1157 | |||
1291 | fn debug_iter( |
|
1158 | pub fn debug_iter( | |
1292 | &self, |
|
1159 | &self, | |
|
1160 | all: bool, | |||
1293 | ) -> Box< |
|
1161 | ) -> Box< | |
1294 | dyn Iterator< |
|
1162 | dyn Iterator< | |
1295 | Item = Result< |
|
1163 | Item = Result< | |
@@ -1299,16 +1167,18 b" impl<'on_disk> super::dispatch::Dirstate" | |||||
1299 | > + Send |
|
1167 | > + Send | |
1300 | + '_, |
|
1168 | + '_, | |
1301 | > { |
|
1169 | > { | |
1302 | Box::new(self.iter_nodes().map(move |node| { |
|
1170 | let map = self.get_map(); | |
1303 | let node = node?; |
|
1171 | Box::new(filter_map_results(map.iter_nodes(), move |node| { | |
1304 | let debug_tuple = if let Some(entry) = node.entry()? { |
|
1172 | let debug_tuple = if let Some(entry) = node.entry()? { | |
1305 | entry.debug_tuple() |
|
1173 | entry.debug_tuple() | |
1306 | } else if let Some(mtime) = node.cached_directory_mtime() { |
|
1174 | } else if !all { | |
1307 | (b' ', 0, -1, mtime.seconds() as i32) |
|
1175 | return Ok(None); | |
|
1176 | } else if let Some(mtime) = node.cached_directory_mtime()? { | |||
|
1177 | (b' ', 0, -1, mtime.truncated_seconds() as i32) | |||
1308 | } else { |
|
1178 | } else { | |
1309 | (b' ', 0, -1, -1) |
|
1179 | (b' ', 0, -1, -1) | |
1310 | }; |
|
1180 | }; | |
1311 |
Ok((node.full_path( |
|
1181 | Ok(Some((node.full_path(map.on_disk)?, debug_tuple))) | |
1312 | })) |
|
1182 | })) | |
1313 | } |
|
1183 | } | |
1314 | } |
|
1184 | } |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: file renamed from rust/hg-cpython/src/dirstate/owning.rs to rust/hg-core/src/dirstate_tree/owning.rs |
|
NO CONTENT: file renamed from rust/hg-cpython/src/dirstate/owning.rs to rust/hg-core/src/dirstate_tree/owning.rs | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: file renamed from tests/test-clone-uncompressed.t to tests/test-clone-stream.t |
|
NO CONTENT: file renamed from tests/test-clone-uncompressed.t to tests/test-clone-stream.t |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed | ||
The requested commit or file is too big and content was truncated. Show full diff |
General Comments 0
You need to be logged in to leave comments.
Login now