##// END OF EJS Templates
merge with default
Pulkit Goyal -
r49136:a44bb185 merge 6.0rc0 stable
parent child Browse files
Show More
@@ -0,0 +1,87 b''
1 # Copyright Mercurial Contributors
2 #
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
5
6 from __future__ import absolute_import
7
8 import functools
9 import stat
10
11
12 rangemask = 0x7FFFFFFF
13
14
15 @functools.total_ordering
16 class timestamp(tuple):
17 """
18 A Unix timestamp with optional nanoseconds precision,
19 modulo 2**31 seconds.
20
21 A 2-tuple containing:
22
23 `truncated_seconds`: seconds since the Unix epoch,
24 truncated to its lower 31 bits
25
26 `subsecond_nanoseconds`: number of nanoseconds since `truncated_seconds`.
27 When this is zero, the sub-second precision is considered unknown.
28 """
29
30 def __new__(cls, value):
31 truncated_seconds, subsec_nanos = value
32 value = (truncated_seconds & rangemask, subsec_nanos)
33 return super(timestamp, cls).__new__(cls, value)
34
35 def __eq__(self, other):
36 self_secs, self_subsec_nanos = self
37 other_secs, other_subsec_nanos = other
38 return self_secs == other_secs and (
39 self_subsec_nanos == other_subsec_nanos
40 or self_subsec_nanos == 0
41 or other_subsec_nanos == 0
42 )
43
44 def __gt__(self, other):
45 self_secs, self_subsec_nanos = self
46 other_secs, other_subsec_nanos = other
47 if self_secs > other_secs:
48 return True
49 if self_secs < other_secs:
50 return False
51 if self_subsec_nanos == 0 or other_subsec_nanos == 0:
52 # they are considered equal, so not "greater than"
53 return False
54 return self_subsec_nanos > other_subsec_nanos
55
56
57 def zero():
58 """
59 Returns the `timestamp` at the Unix epoch.
60 """
61 return tuple.__new__(timestamp, (0, 0))
62
63
64 def mtime_of(stat_result):
65 """
66 Takes an `os.stat_result`-like object and returns a `timestamp` object
67 for its modification time.
68 """
69 try:
70 # TODO: add this attribute to `osutil.stat` objects,
71 # see `mercurial/cext/osutil.c`.
72 #
73 # This attribute is also not available on Python 2.
74 nanos = stat_result.st_mtime_ns
75 except AttributeError:
76 # https://docs.python.org/2/library/os.html#os.stat_float_times
77 # "For compatibility with older Python versions,
78 # accessing stat_result as a tuple always returns integers."
79 secs = stat_result[stat.ST_MTIME]
80
81 subsec_nanos = 0
82 else:
83 billion = int(1e9)
84 secs = nanos // billion
85 subsec_nanos = nanos % billion
86
87 return timestamp((secs, subsec_nanos))
@@ -0,0 +1,414 b''
1 # v2.py - Pure-Python implementation of the dirstate-v2 file format
2 #
3 # Copyright Mercurial Contributors
4 #
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
7
8 from __future__ import absolute_import
9
10 import struct
11
12 from ..thirdparty import attr
13 from .. import error, policy
14
15 parsers = policy.importmod('parsers')
16
17
18 # Must match the constant of the same name in
19 # `rust/hg-core/src/dirstate_tree/on_disk.rs`
20 TREE_METADATA_SIZE = 44
21 NODE_SIZE = 44
22
23
24 # Must match the `TreeMetadata` Rust struct in
25 # `rust/hg-core/src/dirstate_tree/on_disk.rs`. See doc-comments there.
26 #
27 # * 4 bytes: start offset of root nodes
28 # * 4 bytes: number of root nodes
29 # * 4 bytes: total number of nodes in the tree that have an entry
30 # * 4 bytes: total number of nodes in the tree that have a copy source
31 # * 4 bytes: number of bytes in the data file that are not used anymore
32 # * 4 bytes: unused
33 # * 20 bytes: SHA-1 hash of ignore patterns
34 TREE_METADATA = struct.Struct('>LLLLL4s20s')
35
36
37 # Must match the `Node` Rust struct in
38 # `rust/hg-core/src/dirstate_tree/on_disk.rs`. See doc-comments there.
39 #
40 # * 4 bytes: start offset of full path
41 # * 2 bytes: length of the full path
42 # * 2 bytes: length within the full path before its "base name"
43 # * 4 bytes: start offset of the copy source if any, or zero for no copy source
44 # * 2 bytes: length of the copy source if any, or unused
45 # * 4 bytes: start offset of child nodes
46 # * 4 bytes: number of child nodes
47 # * 4 bytes: number of descendant nodes that have an entry
48 # * 4 bytes: number of descendant nodes that have a "tracked" state
49 # * 1 byte: flags
50 # * 4 bytes: expected size
51 # * 4 bytes: mtime seconds
52 # * 4 bytes: mtime nanoseconds
53 NODE = struct.Struct('>LHHLHLLLLHlll')
54
55
56 assert TREE_METADATA_SIZE == TREE_METADATA.size
57 assert NODE_SIZE == NODE.size
58
59 # match constant in mercurial/pure/parsers.py
60 DIRSTATE_V2_DIRECTORY = 1 << 5
61
62
63 def parse_dirstate(map, copy_map, data, tree_metadata):
64 """parse a full v2-dirstate from a binary data into dictionnaries:
65
66 - map: a {path: entry} mapping that will be filled
67 - copy_map: a {path: copy-source} mapping that will be filled
68 - data: a binary blob contains v2 nodes data
69 - tree_metadata:: a binary blob of the top level node (from the docket)
70 """
71 (
72 root_nodes_start,
73 root_nodes_len,
74 _nodes_with_entry_count,
75 _nodes_with_copy_source_count,
76 _unreachable_bytes,
77 _unused,
78 _ignore_patterns_hash,
79 ) = TREE_METADATA.unpack(tree_metadata)
80 parse_nodes(map, copy_map, data, root_nodes_start, root_nodes_len)
81
82
83 def parse_nodes(map, copy_map, data, start, len):
84 """parse <len> nodes from <data> starting at offset <start>
85
86 This is used by parse_dirstate to recursively fill `map` and `copy_map`.
87
88 All directory specific information is ignored and do not need any
89 processing (DIRECTORY, ALL_UNKNOWN_RECORDED, ALL_IGNORED_RECORDED)
90 """
91 for i in range(len):
92 node_start = start + NODE_SIZE * i
93 node_bytes = slice_with_len(data, node_start, NODE_SIZE)
94 (
95 path_start,
96 path_len,
97 _basename_start,
98 copy_source_start,
99 copy_source_len,
100 children_start,
101 children_count,
102 _descendants_with_entry_count,
103 _tracked_descendants_count,
104 flags,
105 size,
106 mtime_s,
107 mtime_ns,
108 ) = NODE.unpack(node_bytes)
109
110 # Parse child nodes of this node recursively
111 parse_nodes(map, copy_map, data, children_start, children_count)
112
113 item = parsers.DirstateItem.from_v2_data(flags, size, mtime_s, mtime_ns)
114 if not item.any_tracked:
115 continue
116 path = slice_with_len(data, path_start, path_len)
117 map[path] = item
118 if copy_source_start:
119 copy_map[path] = slice_with_len(
120 data, copy_source_start, copy_source_len
121 )
122
123
124 def slice_with_len(data, start, len):
125 return data[start : start + len]
126
127
128 @attr.s
129 class Node(object):
130 path = attr.ib()
131 entry = attr.ib()
132 parent = attr.ib(default=None)
133 children_count = attr.ib(default=0)
134 children_offset = attr.ib(default=0)
135 descendants_with_entry = attr.ib(default=0)
136 tracked_descendants = attr.ib(default=0)
137
138 def pack(self, copy_map, paths_offset):
139 path = self.path
140 copy = copy_map.get(path)
141 entry = self.entry
142
143 path_start = paths_offset
144 path_len = len(path)
145 basename_start = path.rfind(b'/') + 1 # 0 if rfind returns -1
146 if copy is not None:
147 copy_source_start = paths_offset + len(path)
148 copy_source_len = len(copy)
149 else:
150 copy_source_start = 0
151 copy_source_len = 0
152 if entry is not None:
153 flags, size, mtime_s, mtime_ns = entry.v2_data()
154 else:
155 # There are no mtime-cached directories in the Python implementation
156 flags = DIRSTATE_V2_DIRECTORY
157 size = 0
158 mtime_s = 0
159 mtime_ns = 0
160 return NODE.pack(
161 path_start,
162 path_len,
163 basename_start,
164 copy_source_start,
165 copy_source_len,
166 self.children_offset,
167 self.children_count,
168 self.descendants_with_entry,
169 self.tracked_descendants,
170 flags,
171 size,
172 mtime_s,
173 mtime_ns,
174 )
175
176
177 def pack_dirstate(map, copy_map, now):
178 """
179 Pack `map` and `copy_map` into the dirstate v2 binary format and return
180 the bytearray.
181 `now` is a timestamp of the current filesystem time used to detect race
182 conditions in writing the dirstate to disk, see inline comment.
183
184 The on-disk format expects a tree-like structure where the leaves are
185 written first (and sorted per-directory), going up levels until the root
186 node and writing that one to the docket. See more details on the on-disk
187 format in `mercurial/helptext/internals/dirstate-v2`.
188
189 Since both `map` and `copy_map` are flat dicts we need to figure out the
190 hierarchy. This algorithm does so without having to build the entire tree
191 in-memory: it only keeps the minimum number of nodes around to satisfy the
192 format.
193
194 # Algorithm explanation
195
196 This explanation does not talk about the different counters for tracked
197 descendents and storing the copies, but that work is pretty simple once this
198 algorithm is in place.
199
200 ## Building a subtree
201
202 First, sort `map`: this makes it so the leaves of the tree are contiguous
203 per directory (i.e. a/b/c and a/b/d will be next to each other in the list),
204 and enables us to use the ordering of folders to have a "cursor" of the
205 current folder we're in without ever going twice in the same branch of the
206 tree. The cursor is a node that remembers its parent and any information
207 relevant to the format (see the `Node` class), building the relevant part
208 of the tree lazily.
209 Then, for each file in `map`, move the cursor into the tree to the
210 corresponding folder of the file: for example, if the very first file
211 is "a/b/c", we start from `Node[""]`, create `Node["a"]` which points to
212 its parent `Node[""]`, then create `Node["a/b"]`, which points to its parent
213 `Node["a"]`. These nodes are kept around in a stack.
214 If the next file in `map` is in the same subtree ("a/b/d" or "a/b/e/f"), we
215 add it to the stack and keep looping with the same logic of creating the
216 tree nodes as needed. If however the next file in `map` is *not* in the same
217 subtree ("a/other", if we're still in the "a/b" folder), then we know that
218 the subtree we're in is complete.
219
220 ## Writing the subtree
221
222 We have the entire subtree in the stack, so we start writing it to disk
223 folder by folder. The way we write a folder is to pop the stack into a list
224 until the folder changes, revert this list of direct children (to satisfy
225 the format requirement that children be sorted). This process repeats until
226 we hit the "other" subtree.
227
228 An example:
229 a
230 dir1/b
231 dir1/c
232 dir2/dir3/d
233 dir2/dir3/e
234 dir2/f
235
236 Would have us:
237 - add to the stack until "dir2/dir3/e"
238 - realize that "dir2/f" is in a different subtree
239 - pop "dir2/dir3/e", "dir2/dir3/d", reverse them so they're sorted and
240 pack them since the next entry is "dir2/dir3"
241 - go back up to "dir2"
242 - add "dir2/f" to the stack
243 - realize we're done with the map
244 - pop "dir2/f", "dir2/dir3" from the stack, reverse and pack them
245 - go up to the root node, do the same to write "a", "dir1" and "dir2" in
246 that order
247
248 ## Special case for the root node
249
250 The root node is not serialized in the format, but its information is
251 written to the docket. Again, see more details on the on-disk format in
252 `mercurial/helptext/internals/dirstate-v2`.
253 """
254 data = bytearray()
255 root_nodes_start = 0
256 root_nodes_len = 0
257 nodes_with_entry_count = 0
258 nodes_with_copy_source_count = 0
259 # Will always be 0 since this implementation always re-writes everything
260 # to disk
261 unreachable_bytes = 0
262 unused = b'\x00' * 4
263 # This is an optimization that's only useful for the Rust implementation
264 ignore_patterns_hash = b'\x00' * 20
265
266 if len(map) == 0:
267 tree_metadata = TREE_METADATA.pack(
268 root_nodes_start,
269 root_nodes_len,
270 nodes_with_entry_count,
271 nodes_with_copy_source_count,
272 unreachable_bytes,
273 unused,
274 ignore_patterns_hash,
275 )
276 return data, tree_metadata
277
278 sorted_map = sorted(map.items(), key=lambda x: x[0])
279
280 # Use a stack to not have to only remember the nodes we currently need
281 # instead of building the entire tree in memory
282 stack = []
283 current_node = Node(b"", None)
284 stack.append(current_node)
285
286 for index, (path, entry) in enumerate(sorted_map, 1):
287 if entry.need_delay(now):
288 # The file was last modified "simultaneously" with the current
289 # write to dirstate (i.e. within the same second for file-
290 # systems with a granularity of 1 sec). This commonly happens
291 # for at least a couple of files on 'update'.
292 # The user could change the file without changing its size
293 # within the same second. Invalidate the file's mtime in
294 # dirstate, forcing future 'status' calls to compare the
295 # contents of the file if the size is the same. This prevents
296 # mistakenly treating such files as clean.
297 entry.set_possibly_dirty()
298 nodes_with_entry_count += 1
299 if path in copy_map:
300 nodes_with_copy_source_count += 1
301 current_folder = get_folder(path)
302 current_node = move_to_correct_node_in_tree(
303 current_folder, current_node, stack
304 )
305
306 current_node.children_count += 1
307 # Entries from `map` are never `None`
308 if entry.tracked:
309 current_node.tracked_descendants += 1
310 current_node.descendants_with_entry += 1
311 stack.append(Node(path, entry, current_node))
312
313 should_pack = True
314 next_path = None
315 if index < len(sorted_map):
316 # Determine if the next entry is in the same sub-tree, if so don't
317 # pack yet
318 next_path = sorted_map[index][0]
319 should_pack = not get_folder(next_path).startswith(current_folder)
320 if should_pack:
321 pack_directory_children(current_node, copy_map, data, stack)
322 while stack and current_node.path != b"":
323 # Go up the tree and write until we reach the folder of the next
324 # entry (if any, otherwise the root)
325 parent = current_node.parent
326 in_parent_folder_of_next_entry = next_path is not None and (
327 get_folder(next_path).startswith(get_folder(stack[-1].path))
328 )
329 if parent is None or in_parent_folder_of_next_entry:
330 break
331 pack_directory_children(parent, copy_map, data, stack)
332 current_node = parent
333
334 # Special case for the root node since we don't write it to disk, only its
335 # children to the docket
336 current_node = stack.pop()
337 assert current_node.path == b"", current_node.path
338 assert len(stack) == 0, len(stack)
339
340 tree_metadata = TREE_METADATA.pack(
341 current_node.children_offset,
342 current_node.children_count,
343 nodes_with_entry_count,
344 nodes_with_copy_source_count,
345 unreachable_bytes,
346 unused,
347 ignore_patterns_hash,
348 )
349
350 return data, tree_metadata
351
352
353 def get_folder(path):
354 """
355 Return the folder of the path that's given, an empty string for root paths.
356 """
357 return path.rsplit(b'/', 1)[0] if b'/' in path else b''
358
359
360 def move_to_correct_node_in_tree(target_folder, current_node, stack):
361 """
362 Move inside the dirstate node tree to the node corresponding to
363 `target_folder`, creating the missing nodes along the way if needed.
364 """
365 while target_folder != current_node.path:
366 if target_folder.startswith(current_node.path):
367 # We need to go down a folder
368 prefix = target_folder[len(current_node.path) :].lstrip(b'/')
369 subfolder_name = prefix.split(b'/', 1)[0]
370 if current_node.path:
371 subfolder_path = current_node.path + b'/' + subfolder_name
372 else:
373 subfolder_path = subfolder_name
374 next_node = stack[-1]
375 if next_node.path == target_folder:
376 # This folder is now a file and only contains removed entries
377 # merge with the last node
378 current_node = next_node
379 else:
380 current_node.children_count += 1
381 current_node = Node(subfolder_path, None, current_node)
382 stack.append(current_node)
383 else:
384 # We need to go up a folder
385 current_node = current_node.parent
386 return current_node
387
388
389 def pack_directory_children(node, copy_map, data, stack):
390 """
391 Write the binary representation of the direct sorted children of `node` to
392 `data`
393 """
394 direct_children = []
395
396 while stack[-1].path != b"" and get_folder(stack[-1].path) == node.path:
397 direct_children.append(stack.pop())
398 if not direct_children:
399 raise error.ProgrammingError(b"no direct children for %r" % node.path)
400
401 # Reverse the stack to get the correct sorted order
402 direct_children.reverse()
403 packed_children = bytearray()
404 # Write the paths to `data`. Pack child nodes but don't write them yet
405 for child in direct_children:
406 packed = child.pack(copy_map=copy_map, paths_offset=len(data))
407 packed_children.extend(packed)
408 data.extend(child.path)
409 data.extend(copy_map.get(child.path, b""))
410 node.tracked_descendants += child.tracked_descendants
411 node.descendants_with_entry += child.descendants_with_entry
412 # Write the fixed-size child nodes all together
413 node.children_offset = len(data)
414 data.extend(packed_children)
This diff has been collapsed as it changes many lines, (616 lines changed) Show them Hide them
@@ -0,0 +1,616 b''
1 The *dirstate* is what Mercurial uses internally to track
2 the state of files in the working directory,
3 such as set by commands like `hg add` and `hg rm`.
4 It also contains some cached data that help make `hg status` faster.
5 The name refers both to `.hg/dirstate` on the filesystem
6 and the corresponding data structure in memory while a Mercurial process
7 is running.
8
9 The original file format, retroactively dubbed `dirstate-v1`,
10 is described at https://www.mercurial-scm.org/wiki/DirState.
11 It is made of a flat sequence of unordered variable-size entries,
12 so accessing any information in it requires parsing all of it.
13 Similarly, saving changes requires rewriting the entire file.
14
15 The newer `dirsate-v2` file format is designed to fix these limitations
16 and make `hg status` faster.
17
18 User guide
19 ==========
20
21 Compatibility
22 -------------
23
24 The file format is experimental and may still change.
25 Different versions of Mercurial may not be compatible with each other
26 when working on a local repository that uses this format.
27 When using an incompatible version with the experimental format,
28 anything can happen including data corruption.
29
30 Since the dirstate is entirely local and not relevant to the wire protocol,
31 `dirstate-v2` does not affect compatibility with remote Mercurial versions.
32
33 When `share-safe` is enabled, different repositories sharing the same store
34 can use different dirstate formats.
35
36 Enabling `dirsate-v2` for new local repositories
37 ------------------------------------------------
38
39 When creating a new local repository such as with `hg init` or `hg clone`,
40 the `exp-dirstate-v2` boolean in the `format` configuration section
41 controls whether to use this file format.
42 This is disabled by default as of this writing.
43 To enable it for a single repository, run for example::
44
45 $ hg init my-project --config format.exp-dirstate-v2=1
46
47 Checking the format of an existing local repsitory
48 --------------------------------------------------
49
50 The `debugformat` commands prints information about
51 which of multiple optional formats are used in the current repository,
52 including `dirstate-v2`::
53
54 $ hg debugformat
55 format-variant repo
56 fncache: yes
57 dirstate-v2: yes
58 […]
59
60 Upgrading or downgrading an existing local repository
61 -----------------------------------------------------
62
63 The `debugupgrade` command does various upgrades or downgrades
64 on a local repository
65 based on the current Mercurial version and on configuration.
66 The same `format.exp-dirstate-v2` configuration is used again.
67
68 Example to upgrade::
69
70 $ hg debugupgrade --config format.exp-dirstate-v2=1
71
72 Example to downgrade to `dirstate-v1`::
73
74 $ hg debugupgrade --config format.exp-dirstate-v2=0
75
76 Both of this commands do nothing but print a list of proposed changes,
77 which may include changes unrelated to the dirstate.
78 Those other changes are controlled by their own configuration keys.
79 Add `--run` to a command to actually apply the proposed changes.
80
81 Backups of `.hg/requires` and `.hg/dirstate` are created
82 in a `.hg/upgradebackup.*` directory.
83 If something goes wrong, restoring those files should undo the change.
84
85 Note that upgrading affects compatibility with older versions of Mercurial
86 as noted above.
87 This can be relevant when a repository’s files are on a USB drive
88 or some other removable media, or shared over the network, etc.
89
90 Internal filesystem representation
91 ==================================
92
93 Requirements file
94 -----------------
95
96 The `.hg/requires` file indicates which of various optional file formats
97 are used by a given repository.
98 Mercurial aborts when seeing a requirement it does not know about,
99 which avoids older version accidentally messing up a respository
100 that uses a format that was introduced later.
101 For versions that do support a format, the presence or absence of
102 the corresponding requirement indicates whether to use that format.
103
104 When the file contains a `exp-dirstate-v2` line,
105 the `dirstate-v2` format is used.
106 With no such line `dirstate-v1` is used.
107
108 High level description
109 ----------------------
110
111 Whereas `dirstate-v1` uses a single `.hg/disrtate` file,
112 in `dirstate-v2` that file is a "docket" file
113 that only contains some metadata
114 and points to separate data file named `.hg/dirstate.{ID}`,
115 where `{ID}` is a random identifier.
116
117 This separation allows making data files append-only
118 and therefore safer to memory-map.
119 Creating a new data file (occasionally to clean up unused data)
120 can be done with a different ID
121 without disrupting another Mercurial process
122 that could still be using the previous data file.
123
124 Both files have a format designed to reduce the need for parsing,
125 by using fixed-size binary components as much as possible.
126 For data that is not fixed-size,
127 references to other parts of a file can be made by storing "pseudo-pointers":
128 integers counted in bytes from the start of a file.
129 For read-only access no data structure is needed,
130 only a bytes buffer (possibly memory-mapped directly from the filesystem)
131 with specific parts read on demand.
132
133 The data file contains "nodes" organized in a tree.
134 Each node represents a file or directory inside the working directory
135 or its parent changeset.
136 This tree has the same structure as the filesystem,
137 so a node representing a directory has child nodes representing
138 the files and subdirectories contained directly in that directory.
139
140 The docket file format
141 ----------------------
142
143 This is implemented in `rust/hg-core/src/dirstate_tree/on_disk.rs`
144 and `mercurial/dirstateutils/docket.py`.
145
146 Components of the docket file are found at fixed offsets,
147 counted in bytes from the start of the file:
148
149 * Offset 0:
150 The 12-bytes marker string "dirstate-v2\n" ending with a newline character.
151 This makes it easier to tell a dirstate-v2 file from a dirstate-v1 file,
152 although it is not strictly necessary
153 since `.hg/requires` determines which format to use.
154
155 * Offset 12:
156 The changeset node ID on the first parent of the working directory,
157 as up to 32 binary bytes.
158 If a node ID is shorter (20 bytes for SHA-1),
159 it is start-aligned and the rest of the bytes are set to zero.
160
161 * Offset 44:
162 The changeset node ID on the second parent of the working directory,
163 or all zeros if there isn’t one.
164 Also 32 binary bytes.
165
166 * Offset 76:
167 Tree metadata on 44 bytes, described below.
168 Its separation in this documentation from the rest of the docket
169 reflects a detail of the current implementation.
170 Since tree metadata is also made of fields at fixed offsets, those could
171 be inlined here by adding 76 bytes to each offset.
172
173 * Offset 120:
174 The used size of the data file, as a 32-bit big-endian integer.
175 The actual size of the data file may be larger
176 (if another Mercurial processis in appending to it
177 but has not updated the docket yet).
178 That extra data must be ignored.
179
180 * Offset 124:
181 The length of the data file identifier, as a 8-bit integer.
182
183 * Offset 125:
184 The data file identifier.
185
186 * Any additional data is current ignored, and dropped when updating the file.
187
188 Tree metadata in the docket file
189 --------------------------------
190
191 Tree metadata is similarly made of components at fixed offsets.
192 These offsets are counted in bytes from the start of tree metadata,
193 which is 76 bytes after the start of the docket file.
194
195 This metadata can be thought of as the singular root of the tree
196 formed by nodes in the data file.
197
198 * Offset 0:
199 Pseudo-pointer to the start of root nodes,
200 counted in bytes from the start of the data file,
201 as a 32-bit big-endian integer.
202 These nodes describe files and directories found directly
203 at the root of the working directory.
204
205 * Offset 4:
206 Number of root nodes, as a 32-bit big-endian integer.
207
208 * Offset 8:
209 Total number of nodes in the entire tree that "have a dirstate entry",
210 as a 32-bit big-endian integer.
211 Those nodes represent files that would be present at all in `dirstate-v1`.
212 This is typically less than the total number of nodes.
213 This counter is used to implement `len(dirstatemap)`.
214
215 * Offset 12:
216 Number of nodes in the entire tree that have a copy source,
217 as a 32-bit big-endian integer.
218 At the next commit, these files are recorded
219 as having been copied or moved/renamed from that source.
220 (A move is recorded as a copy and separate removal of the source.)
221 This counter is used to implement `len(dirstatemap.copymap)`.
222
223 * Offset 16:
224 An estimation of how many bytes of the data file
225 (within its used size) are unused, as a 32-bit big-endian integer.
226 When appending to an existing data file,
227 some existing nodes or paths can be unreachable from the new root
228 but they still take up space.
229 This counter is used to decide when to write a new data file from scratch
230 instead of appending to an existing one,
231 in order to get rid of that unreachable data
232 and avoid unbounded file size growth.
233
234 * Offset 20:
235 These four bytes are currently ignored
236 and reset to zero when updating a docket file.
237 This is an attempt at forward compatibility:
238 future Mercurial versions could use this as a bit field
239 to indicate that a dirstate has additional data or constraints.
240 Finding a dirstate file with the relevant bit unset indicates that
241 it was written by a then-older version
242 which is not aware of that future change.
243
244 * Offset 24:
245 Either 20 zero bytes, or a SHA-1 hash as 20 binary bytes.
246 When present, the hash is of ignore patterns
247 that were used for some previous run of the `status` algorithm.
248
249 * (Offset 44: end of tree metadata)
250
251 Optional hash of ignore patterns
252 --------------------------------
253
254 The implementation of `status` at `rust/hg-core/src/dirstate_tree/status.rs`
255 has been optimized such that its run time is dominated by calls
256 to `stat` for reading the filesystem metadata of a file or directory,
257 and to `readdir` for listing the contents of a directory.
258 In some cases the algorithm can skip calls to `readdir`
259 (saving significant time)
260 because the dirstate already contains enough of the relevant information
261 to build the correct `status` results.
262
263 The default configuration of `hg status` is to list unknown files
264 but not ignored files.
265 In this case, it matters for the `readdir`-skipping optimization
266 if a given file used to be ignored but became unknown
267 because `.hgignore` changed.
268 To detect the possibility of such a change,
269 the tree metadata contains an optional hash of all ignore patterns.
270
271 We define:
272
273 * "Root" ignore files as:
274
275 - `.hgignore` at the root of the repository if it exists
276 - And all files from `ui.ignore.*` config.
277
278 This set of files is sorted by the string representation of their path.
279
280 * The "expanded contents" of an ignore files is the byte string made
281 by the concatenation of its contents followed by the "expanded contents"
282 of other files included with `include:` or `subinclude:` directives,
283 in inclusion order. This definition is recursive, as included files can
284 themselves include more files.
285
286 This hash is defined as the SHA-1 of the concatenation (in sorted
287 order) of the "expanded contents" of each "root" ignore file.
288 (Note that computing this does not require actually concatenating
289 into a single contiguous byte sequence.
290 Instead a SHA-1 hasher object can be created
291 and fed separate chunks one by one.)
292
293 The data file format
294 --------------------
295
296 This is implemented in `rust/hg-core/src/dirstate_tree/on_disk.rs`
297 and `mercurial/dirstateutils/v2.py`.
298
299 The data file contains two types of data: paths and nodes.
300
301 Paths and nodes can be organized in any order in the file, except that sibling
302 nodes must be next to each other and sorted by their path.
303 Contiguity lets the parent refer to them all
304 by their count and a single pseudo-pointer,
305 instead of storing one pseudo-pointer per child node.
306 Sorting allows using binary seach to find a child node with a given name
307 in `O(log(n))` byte sequence comparisons.
308
309 The current implemention writes paths and child node before a given node
310 for ease of figuring out the value of pseudo-pointers by the time the are to be
311 written, but this is not an obligation and readers must not rely on it.
312
313 A path is stored as a byte string anywhere in the file, without delimiter.
314 It is refered to by one or more node by a pseudo-pointer to its start, and its
315 length in bytes. Since there is no delimiter,
316 when a path is a substring of another the same bytes could be reused,
317 although the implementation does not exploit this as of this writing.
318
319 A node is stored on 43 bytes with components at fixed offsets. Paths and
320 child nodes relevant to a node are stored externally and referenced though
321 pseudo-pointers.
322
323 All integers are stored in big-endian. All pseudo-pointers are 32-bit integers
324 counting bytes from the start of the data file. Path lengths and positions
325 are 16-bit integers, also counted in bytes.
326
327 Node components are:
328
329 * Offset 0:
330 Pseudo-pointer to the full path of this node,
331 from the working directory root.
332
333 * Offset 4:
334 Length of the full path.
335
336 * Offset 6:
337 Position of the last `/` path separator within the full path,
338 in bytes from the start of the full path,
339 or zero if there isn’t one.
340 The part of the full path after this position is the "base name".
341 Since sibling nodes have the same parent, only their base name vary
342 and needs to be considered when doing binary search to find a given path.
343
344 * Offset 8:
345 Pseudo-pointer to the "copy source" path for this node,
346 or zero if there is no copy source.
347
348 * Offset 12:
349 Length of the copy source path, or zero if there isn’t one.
350
351 * Offset 14:
352 Pseudo-pointer to the start of child nodes.
353
354 * Offset 18:
355 Number of child nodes, as a 32-bit integer.
356 They occupy 43 times this number of bytes
357 (not counting space for paths, and further descendants).
358
359 * Offset 22:
360 Number as a 32-bit integer of descendant nodes in this subtree,
361 not including this node itself,
362 that "have a dirstate entry".
363 Those nodes represent files that would be present at all in `dirstate-v1`.
364 This is typically less than the total number of descendants.
365 This counter is used to implement `has_dir`.
366
367 * Offset 26:
368 Number as a 32-bit integer of descendant nodes in this subtree,
369 not including this node itself,
370 that represent files tracked in the working directory.
371 (For example, `hg rm` makes a file untracked.)
372 This counter is used to implement `has_tracked_dir`.
373
374 * Offset 30:
375 A `flags` fields that packs some boolean values as bits of a 16-bit integer.
376 Starting from least-significant, bit masks are::
377
378 WDIR_TRACKED = 1 << 0
379 P1_TRACKED = 1 << 1
380 P2_INFO = 1 << 2
381 MODE_EXEC_PERM = 1 << 3
382 MODE_IS_SYMLINK = 1 << 4
383 HAS_FALLBACK_EXEC = 1 << 5
384 FALLBACK_EXEC = 1 << 6
385 HAS_FALLBACK_SYMLINK = 1 << 7
386 FALLBACK_SYMLINK = 1 << 8
387 EXPECTED_STATE_IS_MODIFIED = 1 << 9
388 HAS_MODE_AND_SIZE = 1 << 10
389 HAS_MTIME = 1 << 11
390 MTIME_SECOND_AMBIGUOUS = 1 << 12
391 DIRECTORY = 1 << 13
392 ALL_UNKNOWN_RECORDED = 1 << 14
393 ALL_IGNORED_RECORDED = 1 << 15
394
395 The meaning of each bit is described below.
396
397 Other bits are unset.
398 They may be assigned meaning if the future,
399 with the limitation that Mercurial versions that pre-date such meaning
400 will always reset those bits to unset when writing nodes.
401 (A new node is written for any mutation in its subtree,
402 leaving the bytes of the old node unreachable
403 until the data file is rewritten entirely.)
404
405 * Offset 32:
406 A `size` field described below, as a 32-bit integer.
407 Unlike in dirstate-v1, negative values are not used.
408
409 * Offset 36:
410 The seconds component of an `mtime` field described below,
411 as a 32-bit integer.
412 Unlike in dirstate-v1, negative values are not used.
413 When `mtime` is used, this is number of seconds since the Unix epoch
414 truncated to its lower 31 bits.
415
416 * Offset 40:
417 The nanoseconds component of an `mtime` field described below,
418 as a 32-bit integer.
419 When `mtime` is used,
420 this is the number of nanoseconds since `mtime.seconds`,
421 always stritctly less than one billion.
422
423 This may be zero if more precision is not available.
424 (This can happen because of limitations in any of Mercurial, Python,
425 libc, the operating system, …)
426
427 When comparing two mtimes and either has this component set to zero,
428 the sub-second precision of both should be ignored.
429 False positives when checking mtime equality due to clock resolution
430 are always possible and the status algorithm needs to deal with them,
431 but having too many false negatives could be harmful too.
432
433 * (Offset 44: end of this node)
434
435 The meaning of the boolean values packed in `flags` is:
436
437 `WDIR_TRACKED`
438 Set if the working directory contains a tracked file at this node’s path.
439 This is typically set and unset by `hg add` and `hg rm`.
440
441 `P1_TRACKED`
442 Set if the working directory’s first parent changeset
443 (whose node identifier is found in tree metadata)
444 contains a tracked file at this node’s path.
445 This is a cache to reduce manifest lookups.
446
447 `P2_INFO`
448 Set if the file has been involved in some merge operation.
449 Either because it was actually merged,
450 or because the version in the second parent p2 version was ahead,
451 or because some rename moved it there.
452 In either case `hg status` will want it displayed as modified.
453
454 Files that would be mentioned at all in the `dirstate-v1` file format
455 have a node with at least one of the above three bits set in `dirstate-v2`.
456 Let’s call these files "tracked anywhere",
457 and "untracked" the nodes with all three of these bits unset.
458 Untracked nodes are typically for directories:
459 they hold child nodes and form the tree structure.
460 Additional untracked nodes may also exist.
461 Although implementations should strive to clean up nodes
462 that are entirely unused, other untracked nodes may also exist.
463 For example, a future version of Mercurial might in some cases
464 add nodes for untracked files or/and ignored files in the working directory
465 in order to optimize `hg status`
466 by enabling it to skip `readdir` in more cases.
467
468 `HAS_MODE_AND_SIZE`
469 Must be unset for untracked nodes.
470 For files tracked anywhere, if this is set:
471 - The `size` field is the expected file size,
472 in bytes truncated its lower to 31 bits.
473 - The expected execute permission for the file’s owner
474 is given by `MODE_EXEC_PERM`
475 - The expected file type is given by `MODE_IS_SIMLINK`:
476 a symbolic link if set, or a normal file if unset.
477 If this is unset the expected size, permission, and file type are unknown.
478 The `size` field is unused (set to zero).
479
480 `HAS_MTIME`
481 The nodes contains a "valid" last modification time in the `mtime` field.
482
483
484 It means the `mtime` was already strictly in the past when observed,
485 meaning that later changes cannot happen in the same clock tick
486 and must cause a different modification time
487 (unless the system clock jumps back and we get unlucky,
488 which is not impossible but deemed unlikely enough).
489
490 This means that if `std::fs::symlink_metadata` later reports
491 the same modification time
492 and ignored patterns haven’t changed,
493 we can assume the node to be unchanged on disk.
494
495 The `mtime` field can then be used to skip more expensive lookup when
496 checking the status of "tracked" nodes.
497
498 It can also be set for node where `DIRECTORY` is set.
499 See `DIRECTORY` documentation for details.
500
501 `DIRECTORY`
502 When set, this entry will match a directory that exists or existed on the
503 file system.
504
505 * When `HAS_MTIME` is set a directory has been seen on the file system and
506 `mtime` matches its last modificiation time. However, `HAS_MTIME` not being set
507 does not indicate the lack of directory on the file system.
508
509 * When not tracked anywhere, this node does not represent an ignored or
510 unknown file on disk.
511
512 If `HAS_MTIME` is set
513 and `mtime` matches the last modification time of the directory on disk,
514 the directory is unchanged
515 and we can skip calling `std::fs::read_dir` again for this directory,
516 and iterate child dirstate nodes instead.
517 (as long as `ALL_UNKNOWN_RECORDED` and `ALL_IGNORED_RECORDED` are taken
518 into account)
519
520 `MODE_EXEC_PERM`
521 Must be unset if `HAS_MODE_AND_SIZE` is unset.
522 If `HAS_MODE_AND_SIZE` is set,
523 this indicates whether the file’s own is expected
524 to have execute permission.
525
526 Beware that on system without fs support for this information, the value
527 stored in the dirstate might be wrong and should not be relied on.
528
529 `MODE_IS_SYMLINK`
530 Must be unset if `HAS_MODE_AND_SIZE` is unset.
531 If `HAS_MODE_AND_SIZE` is set,
532 this indicates whether the file is expected to be a symlink
533 as opposed to a normal file.
534
535 Beware that on system without fs support for this information, the value
536 stored in the dirstate might be wrong and should not be relied on.
537
538 `EXPECTED_STATE_IS_MODIFIED`
539 Must be unset for untracked nodes.
540 For:
541 - a file tracked anywhere
542 - that has expected metadata (`HAS_MODE_AND_SIZE` and `HAS_MTIME`)
543 - if that metadata matches
544 metadata found in the working directory with `stat`
545 This bit indicates the status of the file.
546 If set, the status is modified. If unset, it is clean.
547
548 In cases where `hg status` needs to read the contents of a file
549 because metadata is ambiguous, this bit lets it record the result
550 if the result is modified so that a future run of `hg status`
551 does not need to do the same again.
552 It is valid to never set this bit,
553 and consider expected metadata ambiguous if it is set.
554
555 `ALL_UNKNOWN_RECORDED`
556 If set, all "unknown" children existing on disk (at the time of the last
557 status) have been recorded and the `mtime` associated with
558 `DIRECTORY` can be used for optimization even when "unknown" file
559 are listed.
560
561 Note that the amount recorded "unknown" children can still be zero if None
562 where present.
563
564 Also note that having this flag unset does not imply that no "unknown"
565 children have been recorded. Some might be present, but there is no garantee
566 that is will be all of them.
567
568 `ALL_IGNORED_RECORDED`
569 If set, all "ignored" children existing on disk (at the time of the last
570 status) have been recorded and the `mtime` associated with
571 `DIRECTORY` can be used for optimization even when "ignored" file
572 are listed.
573
574 Note that the amount recorded "ignored" children can still be zero if None
575 where present.
576
577 Also note that having this flag unset does not imply that no "ignored"
578 children have been recorded. Some might be present, but there is no garantee
579 that is will be all of them.
580
581 `HAS_FALLBACK_EXEC`
582 If this flag is set, the entry carries "fallback" information for the
583 executable bit in the `FALLBACK_EXEC` flag.
584
585 Fallback information can be stored in the dirstate to keep track of
586 filesystem attribute tracked by Mercurial when the underlying file
587 system or operating system does not support that property, (e.g.
588 Windows).
589
590 `FALLBACK_EXEC`
591 Should be ignored if `HAS_FALLBACK_EXEC` is unset. If set the file for this
592 entry should be considered executable if that information cannot be
593 extracted from the file system. If unset it should be considered
594 non-executable instead.
595
596 `HAS_FALLBACK_SYMLINK`
597 If this flag is set, the entry carries "fallback" information for symbolic
598 link status in the `FALLBACK_SYMLINK` flag.
599
600 Fallback information can be stored in the dirstate to keep track of
601 filesystem attribute tracked by Mercurial when the underlying file
602 system or operating system does not support that property, (e.g.
603 Windows).
604
605 `FALLBACK_SYMLINK`
606 Should be ignored if `HAS_FALLBACK_SYMLINK` is unset. If set the file for
607 this entry should be considered a symlink if that information cannot be
608 extracted from the file system. If unset it should be considered a normal
609 file instead.
610
611 `MTIME_SECOND_AMBIGUOUS`
612 This flag is relevant only when `HAS_FILE_MTIME` is set. When set, the
613 `mtime` stored in the entry is only valid for comparison with timestamps
614 that have nanosecond information. If available timestamp does not carries
615 nanosecond information, the `mtime` should be ignored and no optimisation
616 can be applied.
@@ -0,0 +1,72 b''
1 == New Features ==
2 * `debugrebuildfncache` now has an option to rebuild only the index files
3 * a new `bookmarks.mode` path option have been introduced to control the
4 bookmark update strategy during exchange with a peer. See `hg help paths` for
5 details.
6 * a new `bookmarks.mirror` option has been introduced. See `hg help bookmarks`
7 for details.
8 * more commands support detailed exit codes when config `ui.detailed-exit-codes` is enabled
9
10 == Default Format Change ==
11
12 == New Experimental Features ==
13
14 * '''Major feature''': version 2 of the dirstate is available (the first version is as old as Mercurial itself). It allows for much faster working copy inspection (status, diff, commit, update, etc.) and richer information (symlink and exec info on Windows, etc.). The format has been frozen with room for some future evolution and the current implementations (Python, Python + C, Python + Rust or pure Rust) should be compatible with any future change or optimization that the format allows. You can get more information [[https://www.mercurial-scm.org/repo/hg/file/tip/mercurial/helptext/internals/dirstate-v2.txt | in the internal documentation]]
15 * Added a new `web.full-garbage-collection-rate` to control performance. See
16 de2e04fe4897a554b9ef433167f11ea4feb2e09c for more information
17 * Added a new `histedit.later-commits-first` option to affect the ordering of commits in `chistedit` to match the order in `hg log -G`. It will affect the text-based version before graduating from experimental.
18
19 == Bug Fixes ==
20
21 * `hg fix --working-dir` now correctly works when in an uncommitted merge state
22 * Unintentional duplicated calls to `hg fix`'s internals were removed, making it potentially much faster
23 * `rhg cat` can be called without a revision
24 * `rhg cat` can be called with the `.` revision
25 * `rhg cat` is more robust than before with regards to edge cases. Some still remain like a tag or bookmark that is ambiguous with a nodeid prefix, only nodeids (prefixed or not) are supported as of now.
26 * `rhg cat` is even faster
27 * `rhg` (Rust fast-path for `hg`) now supports the full config list syntax
28 * `rhg` now parses some corner-cases for revsets correctly
29 * Fixed an `fsmonitor` on Python 3 during exception handling
30 * Lots of Windows fixes
31 * Lots of miscellaneous other fixes
32 * Removed a CPython-specific compatibility hack to improve support for alternative Python implementations
33
34 == Backwards Compatibility Changes ==
35
36
37 == Internal API Changes ==
38
39 The following functions have been removed:
40
41 * `dirstate.normal`
42 * `dirstate.normallookup`
43 * `dirstate.otherparent`
44 * `dirstate.add`
45 * `dirstate.addfile`
46 * `dirstate.remove`
47 * `dirstate.drop`
48 * `dirstate.dropfile`
49 * `dirstate.__getitem__`
50 * `dirstatemap.nonnormalentries`
51 * `dirstatemap.nonnormalset`
52 * `dirstatemap.otherparentset`
53 * `dirstatemap.non_normal_or_other_parent_paths`
54 * `dirstateitem.dm_nonnormal`
55 * `dirstateitem.dm_otherparent`
56 * `dirstateitem.merged_removed`
57 * `dirstateitem.from_p2`
58 * `dirstateitem.merged`
59 * `dirstateitem.new_merged`
60 * `dirstateitem.new_added`
61 * `dirstateitem.new_from_p2`
62 * `dirstateitem.new_possibly_dirty`
63 * `dirstateitem.new_normal`
64 * `dirstateitem.from_p2_removed`
65
66 Miscellaneous:
67
68 * `wireprotov1peer`'s `batchable` is now a simple function and not a generator
69 anymore
70 * The Rust extensions (and by extension the experimental `rhg status`) only use a tree-based dirstate in-memory, even when using dirstate-v1. See bf8837e3d7cec40fe649c47163a3154dda03fa16 for more details
71 * The Rust minimum supported version is now 1.48.0 in accordance with out policy of keeping up with Debian stable
72 * The test harness plays nicer with the NixOS sandbox No newline at end of file
This diff has been collapsed as it changes many lines, (643 lines changed) Show them Hide them
@@ -0,0 +1,643 b''
1 use crate::dirstate_tree::on_disk::DirstateV2ParseError;
2 use crate::errors::HgError;
3 use bitflags::bitflags;
4 use std::convert::{TryFrom, TryInto};
5 use std::fs;
6 use std::io;
7 use std::time::{SystemTime, UNIX_EPOCH};
8
9 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
10 pub enum EntryState {
11 Normal,
12 Added,
13 Removed,
14 Merged,
15 }
16
17 /// `size` and `mtime.seconds` are truncated to 31 bits.
18 ///
19 /// TODO: double-check status algorithm correctness for files
20 /// larger than 2 GiB or modified after 2038.
21 #[derive(Debug, Copy, Clone)]
22 pub struct DirstateEntry {
23 pub(crate) flags: Flags,
24 mode_size: Option<(u32, u32)>,
25 mtime: Option<TruncatedTimestamp>,
26 }
27
28 bitflags! {
29 pub(crate) struct Flags: u8 {
30 const WDIR_TRACKED = 1 << 0;
31 const P1_TRACKED = 1 << 1;
32 const P2_INFO = 1 << 2;
33 const HAS_FALLBACK_EXEC = 1 << 3;
34 const FALLBACK_EXEC = 1 << 4;
35 const HAS_FALLBACK_SYMLINK = 1 << 5;
36 const FALLBACK_SYMLINK = 1 << 6;
37 }
38 }
39
40 /// A Unix timestamp with nanoseconds precision
41 #[derive(Debug, Copy, Clone)]
42 pub struct TruncatedTimestamp {
43 truncated_seconds: u32,
44 /// Always in the `0 .. 1_000_000_000` range.
45 nanoseconds: u32,
46 }
47
48 impl TruncatedTimestamp {
49 /// Constructs from a timestamp potentially outside of the supported range,
50 /// and truncate the seconds components to its lower 31 bits.
51 ///
52 /// Panics if the nanoseconds components is not in the expected range.
53 pub fn new_truncate(seconds: i64, nanoseconds: u32) -> Self {
54 assert!(nanoseconds < NSEC_PER_SEC);
55 Self {
56 truncated_seconds: seconds as u32 & RANGE_MASK_31BIT,
57 nanoseconds,
58 }
59 }
60
61 /// Construct from components. Returns an error if they are not in the
62 /// expcted range.
63 pub fn from_already_truncated(
64 truncated_seconds: u32,
65 nanoseconds: u32,
66 ) -> Result<Self, DirstateV2ParseError> {
67 if truncated_seconds & !RANGE_MASK_31BIT == 0
68 && nanoseconds < NSEC_PER_SEC
69 {
70 Ok(Self {
71 truncated_seconds,
72 nanoseconds,
73 })
74 } else {
75 Err(DirstateV2ParseError)
76 }
77 }
78
79 pub fn for_mtime_of(metadata: &fs::Metadata) -> io::Result<Self> {
80 #[cfg(unix)]
81 {
82 use std::os::unix::fs::MetadataExt;
83 let seconds = metadata.mtime();
84 // i64Β -> u32 with value always in the `0 .. NSEC_PER_SEC` range
85 let nanoseconds = metadata.mtime_nsec().try_into().unwrap();
86 Ok(Self::new_truncate(seconds, nanoseconds))
87 }
88 #[cfg(not(unix))]
89 {
90 metadata.modified().map(Self::from)
91 }
92 }
93
94 /// The lower 31 bits of the number of seconds since the epoch.
95 pub fn truncated_seconds(&self) -> u32 {
96 self.truncated_seconds
97 }
98
99 /// The sub-second component of this timestamp, in nanoseconds.
100 /// Always in the `0 .. 1_000_000_000` range.
101 ///
102 /// This timestamp is after `(seconds, 0)` by this many nanoseconds.
103 pub fn nanoseconds(&self) -> u32 {
104 self.nanoseconds
105 }
106
107 /// Returns whether two timestamps are equal modulo 2**31 seconds.
108 ///
109 /// If this returns `true`, the original values converted from `SystemTime`
110 /// or given to `new_truncate` were very likely equal. A false positive is
111 /// possible if they were exactly a multiple of 2**31 seconds apart (around
112 /// 68 years). This is deemed very unlikely to happen by chance, especially
113 /// on filesystems that support sub-second precision.
114 ///
115 /// If someone is manipulating the modification times of some files to
116 /// intentionally make `hg status` return incorrect results, not truncating
117 /// wouldn’t help much since they can set exactly the expected timestamp.
118 ///
119 /// Sub-second precision is ignored if it is zero in either value.
120 /// Some APIs simply return zero when more precision is not available.
121 /// When comparing values from different sources, if only one is truncated
122 /// in that way, doing a simple comparison would cause many false
123 /// negatives.
124 pub fn likely_equal(self, other: Self) -> bool {
125 self.truncated_seconds == other.truncated_seconds
126 && (self.nanoseconds == other.nanoseconds
127 || self.nanoseconds == 0
128 || other.nanoseconds == 0)
129 }
130
131 pub fn likely_equal_to_mtime_of(
132 self,
133 metadata: &fs::Metadata,
134 ) -> io::Result<bool> {
135 Ok(self.likely_equal(Self::for_mtime_of(metadata)?))
136 }
137 }
138
139 impl From<SystemTime> for TruncatedTimestamp {
140 fn from(system_time: SystemTime) -> Self {
141 // On Unix, `SystemTime` is a wrapper for the `timespec` C struct:
142 // https://www.gnu.org/software/libc/manual/html_node/Time-Types.html#index-struct-timespec
143 // We want to effectively access its fields, but the Rust standard
144 // library does not expose them. The best we can do is:
145 let seconds;
146 let nanoseconds;
147 match system_time.duration_since(UNIX_EPOCH) {
148 Ok(duration) => {
149 seconds = duration.as_secs() as i64;
150 nanoseconds = duration.subsec_nanos();
151 }
152 Err(error) => {
153 // `system_time` is before `UNIX_EPOCH`.
154 // We need to undo this algorithm:
155 // https://github.com/rust-lang/rust/blob/6bed1f0bc3cc50c10aab26d5f94b16a00776b8a5/library/std/src/sys/unix/time.rs#L40-L41
156 let negative = error.duration();
157 let negative_secs = negative.as_secs() as i64;
158 let negative_nanos = negative.subsec_nanos();
159 if negative_nanos == 0 {
160 seconds = -negative_secs;
161 nanoseconds = 0;
162 } else {
163 // For example if `system_time` was 4.3Β seconds before
164 // the Unix epoch we get a Duration that represents
165 // `(-4, -0.3)` but we want `(-5, +0.7)`:
166 seconds = -1 - negative_secs;
167 nanoseconds = NSEC_PER_SEC - negative_nanos;
168 }
169 }
170 };
171 Self::new_truncate(seconds, nanoseconds)
172 }
173 }
174
175 const NSEC_PER_SEC: u32 = 1_000_000_000;
176 const RANGE_MASK_31BIT: u32 = 0x7FFF_FFFF;
177
178 pub const MTIME_UNSET: i32 = -1;
179
180 /// A `DirstateEntry` with a size of `-2` means that it was merged from the
181 /// other parent. This allows revert to pick the right status back during a
182 /// merge.
183 pub const SIZE_FROM_OTHER_PARENT: i32 = -2;
184 /// A special value used for internal representation of special case in
185 /// dirstate v1 format.
186 pub const SIZE_NON_NORMAL: i32 = -1;
187
188 impl DirstateEntry {
189 pub fn from_v2_data(
190 wdir_tracked: bool,
191 p1_tracked: bool,
192 p2_info: bool,
193 mode_size: Option<(u32, u32)>,
194 mtime: Option<TruncatedTimestamp>,
195 fallback_exec: Option<bool>,
196 fallback_symlink: Option<bool>,
197 ) -> Self {
198 if let Some((mode, size)) = mode_size {
199 // TODO: return an error for out of range values?
200 assert!(mode & !RANGE_MASK_31BIT == 0);
201 assert!(size & !RANGE_MASK_31BIT == 0);
202 }
203 let mut flags = Flags::empty();
204 flags.set(Flags::WDIR_TRACKED, wdir_tracked);
205 flags.set(Flags::P1_TRACKED, p1_tracked);
206 flags.set(Flags::P2_INFO, p2_info);
207 if let Some(exec) = fallback_exec {
208 flags.insert(Flags::HAS_FALLBACK_EXEC);
209 if exec {
210 flags.insert(Flags::FALLBACK_EXEC);
211 }
212 }
213 if let Some(exec) = fallback_symlink {
214 flags.insert(Flags::HAS_FALLBACK_SYMLINK);
215 if exec {
216 flags.insert(Flags::FALLBACK_SYMLINK);
217 }
218 }
219 Self {
220 flags,
221 mode_size,
222 mtime,
223 }
224 }
225
226 pub fn from_v1_data(
227 state: EntryState,
228 mode: i32,
229 size: i32,
230 mtime: i32,
231 ) -> Self {
232 match state {
233 EntryState::Normal => {
234 if size == SIZE_FROM_OTHER_PARENT {
235 Self {
236 // might be missing P1_TRACKED
237 flags: Flags::WDIR_TRACKED | Flags::P2_INFO,
238 mode_size: None,
239 mtime: None,
240 }
241 } else if size == SIZE_NON_NORMAL {
242 Self {
243 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
244 mode_size: None,
245 mtime: None,
246 }
247 } else if mtime == MTIME_UNSET {
248 // TODO:Β return an error for negative values?
249 let mode = u32::try_from(mode).unwrap();
250 let size = u32::try_from(size).unwrap();
251 Self {
252 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
253 mode_size: Some((mode, size)),
254 mtime: None,
255 }
256 } else {
257 // TODO:Β return an error for negative values?
258 let mode = u32::try_from(mode).unwrap();
259 let size = u32::try_from(size).unwrap();
260 let mtime = u32::try_from(mtime).unwrap();
261 let mtime =
262 TruncatedTimestamp::from_already_truncated(mtime, 0)
263 .unwrap();
264 Self {
265 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
266 mode_size: Some((mode, size)),
267 mtime: Some(mtime),
268 }
269 }
270 }
271 EntryState::Added => Self {
272 flags: Flags::WDIR_TRACKED,
273 mode_size: None,
274 mtime: None,
275 },
276 EntryState::Removed => Self {
277 flags: if size == SIZE_NON_NORMAL {
278 Flags::P1_TRACKED | Flags::P2_INFO
279 } else if size == SIZE_FROM_OTHER_PARENT {
280 // We don’t know if P1_TRACKED should be set (file history)
281 Flags::P2_INFO
282 } else {
283 Flags::P1_TRACKED
284 },
285 mode_size: None,
286 mtime: None,
287 },
288 EntryState::Merged => Self {
289 flags: Flags::WDIR_TRACKED
290 | Flags::P1_TRACKED // might not be true because of rename ?
291 | Flags::P2_INFO, // might not be true because of rename ?
292 mode_size: None,
293 mtime: None,
294 },
295 }
296 }
297
298 /// Creates a new entry in "removed" state.
299 ///
300 /// `size` is expected to be zero, `SIZE_NON_NORMAL`, or
301 /// `SIZE_FROM_OTHER_PARENT`
302 pub fn new_removed(size: i32) -> Self {
303 Self::from_v1_data(EntryState::Removed, 0, size, 0)
304 }
305
306 pub fn tracked(&self) -> bool {
307 self.flags.contains(Flags::WDIR_TRACKED)
308 }
309
310 pub fn p1_tracked(&self) -> bool {
311 self.flags.contains(Flags::P1_TRACKED)
312 }
313
314 fn in_either_parent(&self) -> bool {
315 self.flags.intersects(Flags::P1_TRACKED | Flags::P2_INFO)
316 }
317
318 pub fn removed(&self) -> bool {
319 self.in_either_parent() && !self.flags.contains(Flags::WDIR_TRACKED)
320 }
321
322 pub fn p2_info(&self) -> bool {
323 self.flags.contains(Flags::WDIR_TRACKED | Flags::P2_INFO)
324 }
325
326 pub fn added(&self) -> bool {
327 self.flags.contains(Flags::WDIR_TRACKED) && !self.in_either_parent()
328 }
329
330 pub fn maybe_clean(&self) -> bool {
331 if !self.flags.contains(Flags::WDIR_TRACKED) {
332 false
333 } else if !self.flags.contains(Flags::P1_TRACKED) {
334 false
335 } else if self.flags.contains(Flags::P2_INFO) {
336 false
337 } else {
338 true
339 }
340 }
341
342 pub fn any_tracked(&self) -> bool {
343 self.flags.intersects(
344 Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO,
345 )
346 }
347
348 /// Returns `(wdir_tracked, p1_tracked, p2_info, mode_size, mtime)`
349 pub(crate) fn v2_data(
350 &self,
351 ) -> (
352 bool,
353 bool,
354 bool,
355 Option<(u32, u32)>,
356 Option<TruncatedTimestamp>,
357 Option<bool>,
358 Option<bool>,
359 ) {
360 if !self.any_tracked() {
361 // TODO: return an Option instead?
362 panic!("Accessing v1_state of an untracked DirstateEntry")
363 }
364 let wdir_tracked = self.flags.contains(Flags::WDIR_TRACKED);
365 let p1_tracked = self.flags.contains(Flags::P1_TRACKED);
366 let p2_info = self.flags.contains(Flags::P2_INFO);
367 let mode_size = self.mode_size;
368 let mtime = self.mtime;
369 (
370 wdir_tracked,
371 p1_tracked,
372 p2_info,
373 mode_size,
374 mtime,
375 self.get_fallback_exec(),
376 self.get_fallback_symlink(),
377 )
378 }
379
380 fn v1_state(&self) -> EntryState {
381 if !self.any_tracked() {
382 // TODO: return an Option instead?
383 panic!("Accessing v1_state of an untracked DirstateEntry")
384 }
385 if self.removed() {
386 EntryState::Removed
387 } else if self
388 .flags
389 .contains(Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO)
390 {
391 EntryState::Merged
392 } else if self.added() {
393 EntryState::Added
394 } else {
395 EntryState::Normal
396 }
397 }
398
399 fn v1_mode(&self) -> i32 {
400 if let Some((mode, _size)) = self.mode_size {
401 i32::try_from(mode).unwrap()
402 } else {
403 0
404 }
405 }
406
407 fn v1_size(&self) -> i32 {
408 if !self.any_tracked() {
409 // TODO: return an Option instead?
410 panic!("Accessing v1_size of an untracked DirstateEntry")
411 }
412 if self.removed()
413 && self.flags.contains(Flags::P1_TRACKED | Flags::P2_INFO)
414 {
415 SIZE_NON_NORMAL
416 } else if self.flags.contains(Flags::P2_INFO) {
417 SIZE_FROM_OTHER_PARENT
418 } else if self.removed() {
419 0
420 } else if self.added() {
421 SIZE_NON_NORMAL
422 } else if let Some((_mode, size)) = self.mode_size {
423 i32::try_from(size).unwrap()
424 } else {
425 SIZE_NON_NORMAL
426 }
427 }
428
429 fn v1_mtime(&self) -> i32 {
430 if !self.any_tracked() {
431 // TODO: return an Option instead?
432 panic!("Accessing v1_mtime of an untracked DirstateEntry")
433 }
434 if self.removed() {
435 0
436 } else if self.flags.contains(Flags::P2_INFO) {
437 MTIME_UNSET
438 } else if !self.flags.contains(Flags::P1_TRACKED) {
439 MTIME_UNSET
440 } else if let Some(mtime) = self.mtime {
441 i32::try_from(mtime.truncated_seconds()).unwrap()
442 } else {
443 MTIME_UNSET
444 }
445 }
446
447 // TODO: return `Option<EntryState>`? None when `!self.any_tracked`
448 pub fn state(&self) -> EntryState {
449 self.v1_state()
450 }
451
452 // TODO: return Option?
453 pub fn mode(&self) -> i32 {
454 self.v1_mode()
455 }
456
457 // TODO: return Option?
458 pub fn size(&self) -> i32 {
459 self.v1_size()
460 }
461
462 // TODO: return Option?
463 pub fn mtime(&self) -> i32 {
464 self.v1_mtime()
465 }
466
467 pub fn get_fallback_exec(&self) -> Option<bool> {
468 if self.flags.contains(Flags::HAS_FALLBACK_EXEC) {
469 Some(self.flags.contains(Flags::FALLBACK_EXEC))
470 } else {
471 None
472 }
473 }
474
475 pub fn set_fallback_exec(&mut self, value: Option<bool>) {
476 match value {
477 None => {
478 self.flags.remove(Flags::HAS_FALLBACK_EXEC);
479 self.flags.remove(Flags::FALLBACK_EXEC);
480 }
481 Some(exec) => {
482 self.flags.insert(Flags::HAS_FALLBACK_EXEC);
483 if exec {
484 self.flags.insert(Flags::FALLBACK_EXEC);
485 }
486 }
487 }
488 }
489
490 pub fn get_fallback_symlink(&self) -> Option<bool> {
491 if self.flags.contains(Flags::HAS_FALLBACK_SYMLINK) {
492 Some(self.flags.contains(Flags::FALLBACK_SYMLINK))
493 } else {
494 None
495 }
496 }
497
498 pub fn set_fallback_symlink(&mut self, value: Option<bool>) {
499 match value {
500 None => {
501 self.flags.remove(Flags::HAS_FALLBACK_SYMLINK);
502 self.flags.remove(Flags::FALLBACK_SYMLINK);
503 }
504 Some(symlink) => {
505 self.flags.insert(Flags::HAS_FALLBACK_SYMLINK);
506 if symlink {
507 self.flags.insert(Flags::FALLBACK_SYMLINK);
508 }
509 }
510 }
511 }
512
513 pub fn truncated_mtime(&self) -> Option<TruncatedTimestamp> {
514 self.mtime
515 }
516
517 pub fn drop_merge_data(&mut self) {
518 if self.flags.contains(Flags::P2_INFO) {
519 self.flags.remove(Flags::P2_INFO);
520 self.mode_size = None;
521 self.mtime = None;
522 }
523 }
524
525 pub fn set_possibly_dirty(&mut self) {
526 self.mtime = None
527 }
528
529 pub fn set_clean(
530 &mut self,
531 mode: u32,
532 size: u32,
533 mtime: TruncatedTimestamp,
534 ) {
535 let size = size & RANGE_MASK_31BIT;
536 self.flags.insert(Flags::WDIR_TRACKED | Flags::P1_TRACKED);
537 self.mode_size = Some((mode, size));
538 self.mtime = Some(mtime);
539 }
540
541 pub fn set_tracked(&mut self) {
542 self.flags.insert(Flags::WDIR_TRACKED);
543 // `set_tracked` is replacing various `normallookup` call. So we mark
544 // the files as needing lookup
545 //
546 // Consider dropping this in the future in favor of something less
547 // broad.
548 self.mtime = None;
549 }
550
551 pub fn set_untracked(&mut self) {
552 self.flags.remove(Flags::WDIR_TRACKED);
553 self.mode_size = None;
554 self.mtime = None;
555 }
556
557 /// Returns `(state, mode, size, mtime)` for the puprose of serialization
558 /// in the dirstate-v1 format.
559 ///
560 /// This includes marker values such as `mtime == -1`. In the future we may
561 /// want to not represent these cases that way in memory, but serialization
562 /// will need to keep the same format.
563 pub fn v1_data(&self) -> (u8, i32, i32, i32) {
564 (
565 self.v1_state().into(),
566 self.v1_mode(),
567 self.v1_size(),
568 self.v1_mtime(),
569 )
570 }
571
572 pub(crate) fn is_from_other_parent(&self) -> bool {
573 self.state() == EntryState::Normal
574 && self.size() == SIZE_FROM_OTHER_PARENT
575 }
576
577 // TODO: other platforms
578 #[cfg(unix)]
579 pub fn mode_changed(
580 &self,
581 filesystem_metadata: &std::fs::Metadata,
582 ) -> bool {
583 use std::os::unix::fs::MetadataExt;
584 const EXEC_BIT_MASK: u32 = 0o100;
585 let dirstate_exec_bit = (self.mode() as u32) & EXEC_BIT_MASK;
586 let fs_exec_bit = filesystem_metadata.mode() & EXEC_BIT_MASK;
587 dirstate_exec_bit != fs_exec_bit
588 }
589
590 /// Returns a `(state, mode, size, mtime)` tuple as for
591 /// `DirstateMapMethods::debug_iter`.
592 pub fn debug_tuple(&self) -> (u8, i32, i32, i32) {
593 (self.state().into(), self.mode(), self.size(), self.mtime())
594 }
595
596 /// True if the stored mtime would be ambiguous with the current time
597 pub fn need_delay(&self, now: TruncatedTimestamp) -> bool {
598 if let Some(mtime) = self.mtime {
599 self.state() == EntryState::Normal
600 && mtime.truncated_seconds() == now.truncated_seconds()
601 } else {
602 false
603 }
604 }
605 }
606
607 impl EntryState {
608 pub fn is_tracked(self) -> bool {
609 use EntryState::*;
610 match self {
611 Normal | Added | Merged => true,
612 Removed => false,
613 }
614 }
615 }
616
617 impl TryFrom<u8> for EntryState {
618 type Error = HgError;
619
620 fn try_from(value: u8) -> Result<Self, Self::Error> {
621 match value {
622 b'n' => Ok(EntryState::Normal),
623 b'a' => Ok(EntryState::Added),
624 b'r' => Ok(EntryState::Removed),
625 b'm' => Ok(EntryState::Merged),
626 _ => Err(HgError::CorruptedRepository(format!(
627 "Incorrect dirstate entry state {}",
628 value
629 ))),
630 }
631 }
632 }
633
634 impl Into<u8> for EntryState {
635 fn into(self) -> u8 {
636 match self {
637 EntryState::Normal => b'n',
638 EntryState::Added => b'a',
639 EntryState::Removed => b'r',
640 EntryState::Merged => b'm',
641 }
642 }
643 }
@@ -0,0 +1,88 b''
1 use crate::errors::HgError;
2 use crate::repo::Repo;
3 use crate::revlog::path_encode::path_encode;
4 use crate::revlog::revlog::{Revlog, RevlogError};
5 use crate::revlog::NodePrefix;
6 use crate::revlog::Revision;
7 use crate::utils::files::get_path_from_bytes;
8 use crate::utils::hg_path::HgPath;
9 use crate::utils::SliceExt;
10 use std::path::PathBuf;
11
12 /// A specialized `Revlog` to work with file data logs.
13 pub struct Filelog {
14 /// The generic `revlog` format.
15 revlog: Revlog,
16 }
17
18 impl Filelog {
19 pub fn open(repo: &Repo, file_path: &HgPath) -> Result<Self, HgError> {
20 let index_path = store_path(file_path, b".i");
21 let data_path = store_path(file_path, b".d");
22 let revlog = Revlog::open(repo, index_path, Some(&data_path))?;
23 Ok(Self { revlog })
24 }
25
26 /// The given node ID is that of the file as found in a manifest, not of a
27 /// changeset.
28 pub fn data_for_node(
29 &self,
30 file_node: impl Into<NodePrefix>,
31 ) -> Result<FilelogEntry, RevlogError> {
32 let file_rev = self.revlog.rev_from_node(file_node.into())?;
33 self.data_for_rev(file_rev)
34 }
35
36 /// The given revision is that of the file as found in a manifest, not of a
37 /// changeset.
38 pub fn data_for_rev(
39 &self,
40 file_rev: Revision,
41 ) -> Result<FilelogEntry, RevlogError> {
42 let data: Vec<u8> = self.revlog.get_rev_data(file_rev)?;
43 Ok(FilelogEntry(data.into()))
44 }
45 }
46
47 fn store_path(hg_path: &HgPath, suffix: &[u8]) -> PathBuf {
48 let encoded_bytes =
49 path_encode(&[b"data/", hg_path.as_bytes(), suffix].concat());
50 get_path_from_bytes(&encoded_bytes).into()
51 }
52
53 pub struct FilelogEntry(Vec<u8>);
54
55 impl FilelogEntry {
56 /// Split into metadata and data
57 pub fn split(&self) -> Result<(Option<&[u8]>, &[u8]), HgError> {
58 const DELIMITER: &[u8; 2] = &[b'\x01', b'\n'];
59
60 if let Some(rest) = self.0.drop_prefix(DELIMITER) {
61 if let Some((metadata, data)) = rest.split_2_by_slice(DELIMITER) {
62 Ok((Some(metadata), data))
63 } else {
64 Err(HgError::corrupted(
65 "Missing metadata end delimiter in filelog entry",
66 ))
67 }
68 } else {
69 Ok((None, &self.0))
70 }
71 }
72
73 /// Returns the file contents at this revision, stripped of any metadata
74 pub fn data(&self) -> Result<&[u8], HgError> {
75 let (_metadata, data) = self.split()?;
76 Ok(data)
77 }
78
79 /// Consume the entry, and convert it into data, discarding any metadata,
80 /// if present.
81 pub fn into_data(self) -> Result<Vec<u8>, HgError> {
82 if let (Some(_metadata), data) = self.split()? {
83 Ok(data.to_owned())
84 } else {
85 Ok(self.0)
86 }
87 }
88 }
@@ -0,0 +1,100 b''
1 use crate::errors::{HgError, IoErrorContext, IoResultExt};
2 use memmap2::{Mmap, MmapOptions};
3 use std::io::ErrorKind;
4 use std::path::{Path, PathBuf};
5
6 /// Filesystem access abstraction for the contents of a given "base" diretory
7 #[derive(Clone, Copy)]
8 pub struct Vfs<'a> {
9 pub(crate) base: &'a Path,
10 }
11
12 struct FileNotFound(std::io::Error, PathBuf);
13
14 impl Vfs<'_> {
15 pub fn join(&self, relative_path: impl AsRef<Path>) -> PathBuf {
16 self.base.join(relative_path)
17 }
18
19 pub fn read(
20 &self,
21 relative_path: impl AsRef<Path>,
22 ) -> Result<Vec<u8>, HgError> {
23 let path = self.join(relative_path);
24 std::fs::read(&path).when_reading_file(&path)
25 }
26
27 fn mmap_open_gen(
28 &self,
29 relative_path: impl AsRef<Path>,
30 ) -> Result<Result<Mmap, FileNotFound>, HgError> {
31 let path = self.join(relative_path);
32 let file = match std::fs::File::open(&path) {
33 Err(err) => {
34 if let ErrorKind::NotFound = err.kind() {
35 return Ok(Err(FileNotFound(err, path)));
36 };
37 return (Err(err)).when_reading_file(&path);
38 }
39 Ok(file) => file,
40 };
41 // TODO: what are the safety requirements here?
42 let mmap = unsafe { MmapOptions::new().map(&file) }
43 .when_reading_file(&path)?;
44 Ok(Ok(mmap))
45 }
46
47 pub fn mmap_open_opt(
48 &self,
49 relative_path: impl AsRef<Path>,
50 ) -> Result<Option<Mmap>, HgError> {
51 self.mmap_open_gen(relative_path).map(|res| res.ok())
52 }
53
54 pub fn mmap_open(
55 &self,
56 relative_path: impl AsRef<Path>,
57 ) -> Result<Mmap, HgError> {
58 match self.mmap_open_gen(relative_path)? {
59 Err(FileNotFound(err, path)) => Err(err).when_reading_file(&path),
60 Ok(res) => Ok(res),
61 }
62 }
63
64 pub fn rename(
65 &self,
66 relative_from: impl AsRef<Path>,
67 relative_to: impl AsRef<Path>,
68 ) -> Result<(), HgError> {
69 let from = self.join(relative_from);
70 let to = self.join(relative_to);
71 std::fs::rename(&from, &to)
72 .with_context(|| IoErrorContext::RenamingFile { from, to })
73 }
74 }
75
76 fn fs_metadata(
77 path: impl AsRef<Path>,
78 ) -> Result<Option<std::fs::Metadata>, HgError> {
79 let path = path.as_ref();
80 match std::fs::metadata(path) {
81 Ok(meta) => Ok(Some(meta)),
82 Err(error) => match error.kind() {
83 // TODO: when we require a Rust version where `NotADirectory` is
84 // stable, invert this logic and return None for it and `NotFound`
85 // and propagate any other error.
86 ErrorKind::PermissionDenied => Err(error).with_context(|| {
87 IoErrorContext::ReadingMetadata(path.to_owned())
88 }),
89 _ => Ok(None),
90 },
91 }
92 }
93
94 pub(crate) fn is_dir(path: impl AsRef<Path>) -> Result<bool, HgError> {
95 Ok(fs_metadata(path)?.map_or(false, |meta| meta.is_dir()))
96 }
97
98 pub(crate) fn is_file(path: impl AsRef<Path>) -> Result<bool, HgError> {
99 Ok(fs_metadata(path)?.map_or(false, |meta| meta.is_file()))
100 }
@@ -0,0 +1,286 b''
1 use cpython::exc;
2 use cpython::ObjectProtocol;
3 use cpython::PyBytes;
4 use cpython::PyErr;
5 use cpython::PyNone;
6 use cpython::PyObject;
7 use cpython::PyResult;
8 use cpython::Python;
9 use cpython::PythonObject;
10 use hg::dirstate::DirstateEntry;
11 use hg::dirstate::EntryState;
12 use hg::dirstate::TruncatedTimestamp;
13 use std::cell::Cell;
14 use std::convert::TryFrom;
15
16 py_class!(pub class DirstateItem |py| {
17 data entry: Cell<DirstateEntry>;
18
19 def __new__(
20 _cls,
21 wc_tracked: bool = false,
22 p1_tracked: bool = false,
23 p2_info: bool = false,
24 has_meaningful_data: bool = true,
25 has_meaningful_mtime: bool = true,
26 parentfiledata: Option<(u32, u32, (u32, u32))> = None,
27 fallback_exec: Option<bool> = None,
28 fallback_symlink: Option<bool> = None,
29
30 ) -> PyResult<DirstateItem> {
31 let mut mode_size_opt = None;
32 let mut mtime_opt = None;
33 if let Some((mode, size, mtime)) = parentfiledata {
34 if has_meaningful_data {
35 mode_size_opt = Some((mode, size))
36 }
37 if has_meaningful_mtime {
38 mtime_opt = Some(timestamp(py, mtime)?)
39 }
40 }
41 let entry = DirstateEntry::from_v2_data(
42 wc_tracked,
43 p1_tracked,
44 p2_info,
45 mode_size_opt,
46 mtime_opt,
47 fallback_exec,
48 fallback_symlink,
49 );
50 DirstateItem::create_instance(py, Cell::new(entry))
51 }
52
53 @property
54 def state(&self) -> PyResult<PyBytes> {
55 let state_byte: u8 = self.entry(py).get().state().into();
56 Ok(PyBytes::new(py, &[state_byte]))
57 }
58
59 @property
60 def mode(&self) -> PyResult<i32> {
61 Ok(self.entry(py).get().mode())
62 }
63
64 @property
65 def size(&self) -> PyResult<i32> {
66 Ok(self.entry(py).get().size())
67 }
68
69 @property
70 def mtime(&self) -> PyResult<i32> {
71 Ok(self.entry(py).get().mtime())
72 }
73
74 @property
75 def has_fallback_exec(&self) -> PyResult<bool> {
76 match self.entry(py).get().get_fallback_exec() {
77 Some(_) => Ok(true),
78 None => Ok(false),
79 }
80 }
81
82 @property
83 def fallback_exec(&self) -> PyResult<Option<bool>> {
84 match self.entry(py).get().get_fallback_exec() {
85 Some(exec) => Ok(Some(exec)),
86 None => Ok(None),
87 }
88 }
89
90 @fallback_exec.setter
91 def set_fallback_exec(&self, value: Option<PyObject>) -> PyResult<()> {
92 match value {
93 None => {self.entry(py).get().set_fallback_exec(None);},
94 Some(value) => {
95 if value.is_none(py) {
96 self.entry(py).get().set_fallback_exec(None);
97 } else {
98 self.entry(py).get().set_fallback_exec(
99 Some(value.is_true(py)?)
100 );
101 }},
102 }
103 Ok(())
104 }
105
106 @property
107 def has_fallback_symlink(&self) -> PyResult<bool> {
108 match self.entry(py).get().get_fallback_symlink() {
109 Some(_) => Ok(true),
110 None => Ok(false),
111 }
112 }
113
114 @property
115 def fallback_symlink(&self) -> PyResult<Option<bool>> {
116 match self.entry(py).get().get_fallback_symlink() {
117 Some(symlink) => Ok(Some(symlink)),
118 None => Ok(None),
119 }
120 }
121
122 @fallback_symlink.setter
123 def set_fallback_symlink(&self, value: Option<PyObject>) -> PyResult<()> {
124 match value {
125 None => {self.entry(py).get().set_fallback_symlink(None);},
126 Some(value) => {
127 if value.is_none(py) {
128 self.entry(py).get().set_fallback_symlink(None);
129 } else {
130 self.entry(py).get().set_fallback_symlink(
131 Some(value.is_true(py)?)
132 );
133 }},
134 }
135 Ok(())
136 }
137
138 @property
139 def tracked(&self) -> PyResult<bool> {
140 Ok(self.entry(py).get().tracked())
141 }
142
143 @property
144 def p1_tracked(&self) -> PyResult<bool> {
145 Ok(self.entry(py).get().p1_tracked())
146 }
147
148 @property
149 def added(&self) -> PyResult<bool> {
150 Ok(self.entry(py).get().added())
151 }
152
153
154 @property
155 def p2_info(&self) -> PyResult<bool> {
156 Ok(self.entry(py).get().p2_info())
157 }
158
159 @property
160 def removed(&self) -> PyResult<bool> {
161 Ok(self.entry(py).get().removed())
162 }
163
164 @property
165 def maybe_clean(&self) -> PyResult<bool> {
166 Ok(self.entry(py).get().maybe_clean())
167 }
168
169 @property
170 def any_tracked(&self) -> PyResult<bool> {
171 Ok(self.entry(py).get().any_tracked())
172 }
173
174 def v1_state(&self) -> PyResult<PyBytes> {
175 let (state, _mode, _size, _mtime) = self.entry(py).get().v1_data();
176 let state_byte: u8 = state.into();
177 Ok(PyBytes::new(py, &[state_byte]))
178 }
179
180 def v1_mode(&self) -> PyResult<i32> {
181 let (_state, mode, _size, _mtime) = self.entry(py).get().v1_data();
182 Ok(mode)
183 }
184
185 def v1_size(&self) -> PyResult<i32> {
186 let (_state, _mode, size, _mtime) = self.entry(py).get().v1_data();
187 Ok(size)
188 }
189
190 def v1_mtime(&self) -> PyResult<i32> {
191 let (_state, _mode, _size, mtime) = self.entry(py).get().v1_data();
192 Ok(mtime)
193 }
194
195 def need_delay(&self, now: (u32, u32)) -> PyResult<bool> {
196 let now = timestamp(py, now)?;
197 Ok(self.entry(py).get().need_delay(now))
198 }
199
200 def mtime_likely_equal_to(&self, other: (u32, u32)) -> PyResult<bool> {
201 if let Some(mtime) = self.entry(py).get().truncated_mtime() {
202 Ok(mtime.likely_equal(timestamp(py, other)?))
203 } else {
204 Ok(false)
205 }
206 }
207
208 @classmethod
209 def from_v1_data(
210 _cls,
211 state: PyBytes,
212 mode: i32,
213 size: i32,
214 mtime: i32,
215 ) -> PyResult<Self> {
216 let state = <[u8; 1]>::try_from(state.data(py))
217 .ok()
218 .and_then(|state| EntryState::try_from(state[0]).ok())
219 .ok_or_else(|| PyErr::new::<exc::ValueError, _>(py, "invalid state"))?;
220 let entry = DirstateEntry::from_v1_data(state, mode, size, mtime);
221 DirstateItem::create_instance(py, Cell::new(entry))
222 }
223
224 def drop_merge_data(&self) -> PyResult<PyNone> {
225 self.update(py, |entry| entry.drop_merge_data());
226 Ok(PyNone)
227 }
228
229 def set_clean(
230 &self,
231 mode: u32,
232 size: u32,
233 mtime: (u32, u32),
234 ) -> PyResult<PyNone> {
235 let mtime = timestamp(py, mtime)?;
236 self.update(py, |entry| entry.set_clean(mode, size, mtime));
237 Ok(PyNone)
238 }
239
240 def set_possibly_dirty(&self) -> PyResult<PyNone> {
241 self.update(py, |entry| entry.set_possibly_dirty());
242 Ok(PyNone)
243 }
244
245 def set_tracked(&self) -> PyResult<PyNone> {
246 self.update(py, |entry| entry.set_tracked());
247 Ok(PyNone)
248 }
249
250 def set_untracked(&self) -> PyResult<PyNone> {
251 self.update(py, |entry| entry.set_untracked());
252 Ok(PyNone)
253 }
254 });
255
256 impl DirstateItem {
257 pub fn new_as_pyobject(
258 py: Python<'_>,
259 entry: DirstateEntry,
260 ) -> PyResult<PyObject> {
261 Ok(DirstateItem::create_instance(py, Cell::new(entry))?.into_object())
262 }
263
264 pub fn get_entry(&self, py: Python<'_>) -> DirstateEntry {
265 self.entry(py).get()
266 }
267
268 // TODO: Use https://doc.rust-lang.org/std/cell/struct.Cell.html#method.update instead when it’s stable
269 pub fn update(&self, py: Python<'_>, f: impl FnOnce(&mut DirstateEntry)) {
270 let mut entry = self.entry(py).get();
271 f(&mut entry);
272 self.entry(py).set(entry)
273 }
274 }
275
276 pub(crate) fn timestamp(
277 py: Python<'_>,
278 (s, ns): (u32, u32),
279 ) -> PyResult<TruncatedTimestamp> {
280 TruncatedTimestamp::from_already_truncated(s, ns).map_err(|_| {
281 PyErr::new::<exc::ValueError, _>(
282 py,
283 "expected mtime truncated to 31 bits",
284 )
285 })
286 }
@@ -0,0 +1,56 b''
1 use cpython::{PyBytes, Python};
2 use stable_deref_trait::StableDeref;
3
4 /// Safe abstraction over a `PyBytes` together with the `&[u8]` slice
5 /// that borrows it. Implements `Deref<Target = [u8]>`.
6 ///
7 /// Calling `PyBytes::data` requires a GIL marker but we want to access the
8 /// data in a thread that (ideally) does not need to acquire the GIL.
9 /// This type allows separating the call an the use.
10 ///
11 /// It also enables using a (wrapped) `PyBytes` in GIL-unaware generic code.
12 pub struct PyBytesDeref {
13 #[allow(unused)]
14 keep_alive: PyBytes,
15
16 /// Borrows the buffer inside `self.keep_alive`,
17 /// but the borrow-checker cannot express self-referential structs.
18 data: *const [u8],
19 }
20
21 impl PyBytesDeref {
22 pub fn new(py: Python, bytes: PyBytes) -> Self {
23 Self {
24 data: bytes.data(py),
25 keep_alive: bytes,
26 }
27 }
28
29 pub fn unwrap(self) -> PyBytes {
30 self.keep_alive
31 }
32 }
33
34 impl std::ops::Deref for PyBytesDeref {
35 type Target = [u8];
36
37 fn deref(&self) -> &[u8] {
38 // Safety: the raw pointer is valid as long as the PyBytes is still
39 // alive, and the returned slice borrows `self`.
40 unsafe { &*self.data }
41 }
42 }
43
44 unsafe impl StableDeref for PyBytesDeref {}
45
46 fn require_send<T: Send>() {}
47
48 #[allow(unused)]
49 fn static_assert_pybytes_is_send() {
50 require_send::<PyBytes>;
51 }
52
53 // Safety: PyBytes is Send. Raw pointers are not by default,
54 // but here sending one to another thread is fine since we ensure it stays
55 // valid.
56 unsafe impl Send for PyBytesDeref {}
@@ -0,0 +1,48 b''
1 // path utils module
2 //
3 // This software may be used and distributed according to the terms of the
4 // GNU General Public License version 2 or any later version.
5
6 use crate::error::CommandError;
7 use crate::ui::UiError;
8 use hg::repo::Repo;
9 use hg::utils::current_dir;
10 use hg::utils::files::{get_bytes_from_path, relativize_path};
11 use hg::utils::hg_path::HgPath;
12 use hg::utils::hg_path::HgPathBuf;
13 use std::borrow::Cow;
14
15 pub fn relativize_paths(
16 repo: &Repo,
17 paths: impl IntoIterator<Item = impl AsRef<HgPath>>,
18 mut callback: impl FnMut(Cow<[u8]>) -> Result<(), UiError>,
19 ) -> Result<(), CommandError> {
20 let cwd = current_dir()?;
21 let repo_root = repo.working_directory_path();
22 let repo_root = cwd.join(repo_root); // Make it absolute
23 let repo_root_hgpath =
24 HgPathBuf::from(get_bytes_from_path(repo_root.to_owned()));
25 let outside_repo: bool;
26 let cwd_hgpath: HgPathBuf;
27
28 if let Ok(cwd_relative_to_repo) = cwd.strip_prefix(&repo_root) {
29 // The current directory is inside the repo, so we can work with
30 // relative paths
31 outside_repo = false;
32 cwd_hgpath =
33 HgPathBuf::from(get_bytes_from_path(cwd_relative_to_repo));
34 } else {
35 outside_repo = true;
36 cwd_hgpath = HgPathBuf::from(get_bytes_from_path(cwd));
37 }
38
39 for file in paths {
40 if outside_repo {
41 let file = repo_root_hgpath.join(file.as_ref());
42 callback(relativize_path(&file, &cwd_hgpath))?;
43 } else {
44 callback(relativize_path(file.as_ref(), &cwd_hgpath))?;
45 }
46 }
47 Ok(())
48 }
@@ -0,0 +1,52 b''
1 """
2 List-valued configuration keys have an ad-hoc microsyntax. From `hg help config`:
3
4 > List values are separated by whitespace or comma, except when values are
5 > placed in double quotation marks:
6 >
7 > allow_read = "John Doe, PhD", brian, betty
8 >
9 > Quotation marks can be escaped by prefixing them with a backslash. Only
10 > quotation marks at the beginning of a word is counted as a quotation
11 > (e.g., ``foo"bar baz`` is the list of ``foo"bar`` and ``baz``).
12
13 That help documentation is fairly light on details, the actual parser has many
14 other edge cases. This test tries to cover them.
15 """
16
17 from mercurial.utils import stringutil
18
19
20 def assert_parselist(input, expected):
21 result = stringutil.parselist(input)
22 if result != expected:
23 raise AssertionError(
24 "parse_input(%r)\n got %r\nexpected %r"
25 % (input, result, expected)
26 )
27
28
29 # Keep these Python tests in sync with the Rust ones in `rust/hg-core/src/config/values.rs`
30
31 assert_parselist(b'', [])
32 assert_parselist(b',', [])
33 assert_parselist(b'A', [b'A'])
34 assert_parselist(b'B,B', [b'B', b'B'])
35 assert_parselist(b', C, ,C,', [b'C', b'C'])
36 assert_parselist(b'"', [b'"'])
37 assert_parselist(b'""', [b'', b''])
38 assert_parselist(b'D,"', [b'D', b'"'])
39 assert_parselist(b'E,""', [b'E', b'', b''])
40 assert_parselist(b'"F,F"', [b'F,F'])
41 assert_parselist(b'"G,G', [b'"G', b'G'])
42 assert_parselist(b'"H \\",\\"H', [b'"H', b',', b'H'])
43 assert_parselist(b'I,I"', [b'I', b'I"'])
44 assert_parselist(b'J,"J', [b'J', b'"J'])
45 assert_parselist(b'K K', [b'K', b'K'])
46 assert_parselist(b'"K" K', [b'K', b'K'])
47 assert_parselist(b'L\tL', [b'L', b'L'])
48 assert_parselist(b'"L"\tL', [b'L', b'', b'L'])
49 assert_parselist(b'M\x0bM', [b'M', b'M'])
50 assert_parselist(b'"M"\x0bM', [b'M', b'', b'M'])
51 assert_parselist(b'"N" , ,"', [b'N"'])
52 assert_parselist(b'" ,O, ', [b'"', b'O'])
@@ -0,0 +1,27 b''
1 Test null revisions (node 0000000000000000000000000000000000000000, aka rev -1)
2 in various circumstances.
3
4 Make an empty repo:
5
6 $ hg init a
7 $ cd a
8
9 $ hg files -r 0000000000000000000000000000000000000000
10 [1]
11 $ hg files -r .
12 [1]
13
14 Add an empty commit (this makes the changelog refer to a null manifest node):
15
16
17 $ hg commit -m "init" --config ui.allowemptycommit=true
18
19 $ hg files -r .
20 [1]
21
22 Strip that empty commit (this makes the changelog file empty, as opposed to missing):
23
24 $ hg --config 'extensions.strip=' strip . > /dev/null
25
26 $ hg files -r .
27 [1]
@@ -0,0 +1,49 b''
1 from __future__ import absolute_import
2
3 import os
4 from mercurial.hgweb import hgwebdir_mod
5
6 hgwebdir = hgwebdir_mod.hgwebdir
7
8 os.mkdir(b'webdir')
9 os.chdir(b'webdir')
10
11 webdir = os.path.realpath(b'.')
12
13
14 def trivial_response(req, res):
15 return []
16
17
18 def make_hgwebdir(gc_rate=None):
19 config = os.path.join(webdir, b'hgwebdir.conf')
20 with open(config, 'wb') as configfile:
21 configfile.write(b'[experimental]\n')
22 if gc_rate is not None:
23 configfile.write(b'web.full-garbage-collection-rate=%d\n' % gc_rate)
24 hg_wd = hgwebdir(config)
25 hg_wd._runwsgi = trivial_response
26 return hg_wd
27
28
29 def process_requests(webdir_instance, number):
30 # we don't care for now about passing realistic arguments
31 for _ in range(number):
32 for chunk in webdir_instance.run_wsgi(None, None):
33 pass
34
35
36 without_gc = make_hgwebdir(gc_rate=0)
37 process_requests(without_gc, 5)
38 assert without_gc.requests_count == 5
39 assert without_gc.gc_full_collections_done == 0
40
41 with_gc = make_hgwebdir(gc_rate=2)
42 process_requests(with_gc, 5)
43 assert with_gc.requests_count == 5
44 assert with_gc.gc_full_collections_done == 2
45
46 with_systematic_gc = make_hgwebdir() # default value of the setting
47 process_requests(with_systematic_gc, 3)
48 assert with_systematic_gc.requests_count == 3
49 assert with_systematic_gc.gc_full_collections_done == 3
@@ -37,9 +37,9 b' botocore==1.12.243 \\'
37 37 --hash=sha256:397585a7881230274afb8d1877ef69a661b0a311745cd324f14a052fb2a2863a \
38 38 --hash=sha256:4496f8da89cb496462a831897ad248e13e431d9fa7e41e06d426fd6658ab6e59 \
39 39 # via boto3, s3transfer
40 certifi==2019.9.11 \
41 --hash=sha256:e4f3620cfea4f83eedc95b24abd9cd56f3c4b146dd0177e83a21b4eb49e21e50 \
42 --hash=sha256:fd7c7c74727ddcf00e9acd26bba8da604ffec95bf1c2144e67aff7a8b50e6cef \
40 certifi==2021.5.30 \
41 --hash=sha256:2bbf76fd432960138b3ef6dda3dde0544f27cbf8546c458e60baf371917ba9ee \
42 --hash=sha256:50b1e4f8446b06f41be7dd6338db18e0990601dce795c2b1686458aa7e8fa7d8 \
43 43 # via requests
44 44 cffi==1.12.3 \
45 45 --hash=sha256:041c81822e9f84b1d9c401182e174996f0bae9991f33725d059b771744290774 \
@@ -4,9 +4,9 b''
4 4 #
5 5 # pip-compile --generate-hashes --output-file=contrib/packaging/requirements-windows-py2.txt contrib/packaging/requirements-windows.txt.in
6 6 #
7 certifi==2020.6.20 \
8 --hash=sha256:5930595817496dd21bb8dc35dad090f1c2cd0adfaf21204bf6732ca5d8ee34d3 \
9 --hash=sha256:8fc0819f1f30ba15bdb34cceffb9ef04d99f420f68eb75d901e9560b8749fc41 \
7 certifi==2021.5.30 \
8 --hash=sha256:2bbf76fd432960138b3ef6dda3dde0544f27cbf8546c458e60baf371917ba9ee \
9 --hash=sha256:50b1e4f8446b06f41be7dd6338db18e0990601dce795c2b1686458aa7e8fa7d8 \
10 10 # via dulwich
11 11 configparser==4.0.2 \
12 12 --hash=sha256:254c1d9c79f60c45dfde850850883d5aaa7f19a23f13561243a050d5a7c3fe4c \
@@ -16,9 +16,9 b' cached-property==1.5.2 \\'
16 16 --hash=sha256:9fa5755838eecbb2d234c3aa390bd80fbd3ac6b6869109bfc1b499f7bd89a130 \
17 17 --hash=sha256:df4f613cf7ad9a588cc381aaf4a512d26265ecebd5eb9e1ba12f1319eb85a6a0 \
18 18 # via pygit2
19 certifi==2020.6.20 \
20 --hash=sha256:5930595817496dd21bb8dc35dad090f1c2cd0adfaf21204bf6732ca5d8ee34d3 \
21 --hash=sha256:8fc0819f1f30ba15bdb34cceffb9ef04d99f420f68eb75d901e9560b8749fc41 \
19 certifi==2021.5.30 \
20 --hash=sha256:2bbf76fd432960138b3ef6dda3dde0544f27cbf8546c458e60baf371917ba9ee \
21 --hash=sha256:50b1e4f8446b06f41be7dd6338db18e0990601dce795c2b1686458aa7e8fa7d8 \
22 22 # via dulwich
23 23 cffi==1.14.4 \
24 24 --hash=sha256:00a1ba5e2e95684448de9b89888ccd02c98d512064b4cb987d48f4b40aa0421e \
@@ -57,10 +57,10 b' from mercurial import ('
57 57 diffutil,
58 58 error,
59 59 hg,
60 logcmdutil,
60 61 patch,
61 62 pycompat,
62 63 registrar,
63 scmutil,
64 64 )
65 65 from mercurial.utils import dateutil
66 66
@@ -180,7 +180,7 b' def analyze(ui, repo, *revs, **opts):'
180 180
181 181 # If a mercurial repo is available, also model the commit history.
182 182 if repo:
183 revs = scmutil.revrange(repo, revs)
183 revs = logcmdutil.revrange(repo, revs)
184 184 revs.sort()
185 185
186 186 progress = ui.makeprogress(
@@ -35,6 +35,7 b' from mercurial.node import short'
35 35
36 36 from mercurial import (
37 37 error,
38 logcmdutil,
38 39 registrar,
39 40 scmutil,
40 41 )
@@ -84,7 +85,7 b" def _docensor(ui, repo, path, rev=b'', t"
84 85 if not len(flog):
85 86 raise error.Abort(_(b'cannot censor file with no history'))
86 87
87 rev = scmutil.revsingle(repo, rev, rev).rev()
88 rev = logcmdutil.revsingle(repo, rev, rev).rev()
88 89 try:
89 90 ctx = repo[rev]
90 91 except KeyError:
@@ -22,7 +22,6 b' from mercurial import ('
22 22 logcmdutil,
23 23 pycompat,
24 24 registrar,
25 scmutil,
26 25 )
27 26
28 27 templateopts = cmdutil.templateopts
@@ -71,7 +70,7 b' def children(ui, repo, file_=None, **opt'
71 70 """
72 71 opts = pycompat.byteskwargs(opts)
73 72 rev = opts.get(b'rev')
74 ctx = scmutil.revsingle(repo, rev)
73 ctx = logcmdutil.revsingle(repo, rev)
75 74 if file_:
76 75 fctx = repo.filectx(file_, changeid=ctx.rev())
77 76 childctxs = [fcctx.changectx() for fcctx in fctx.children()]
@@ -13,9 +13,9 b' from mercurial import ('
13 13 cmdutil,
14 14 context,
15 15 error,
16 logcmdutil,
16 17 pycompat,
17 18 registrar,
18 scmutil,
19 19 )
20 20
21 21 cmdtable = {}
@@ -68,7 +68,7 b' def close_branch(ui, repo, *revs, **opts'
68 68 opts = pycompat.byteskwargs(opts)
69 69
70 70 revs += tuple(opts.get(b'rev', []))
71 revs = scmutil.revrange(repo, revs)
71 revs = logcmdutil.revrange(repo, revs)
72 72
73 73 if not revs:
74 74 raise error.Abort(_(b'no revisions specified'))
@@ -36,10 +36,10 b' from mercurial import ('
36 36 exchange,
37 37 hg,
38 38 lock as lockmod,
39 logcmdutil,
39 40 merge as mergemod,
40 41 phases,
41 42 pycompat,
42 scmutil,
43 43 util,
44 44 )
45 45 from mercurial.utils import dateutil
@@ -145,7 +145,7 b' class mercurial_sink(common.converter_si'
145 145 _(b'pulling from %s into %s\n') % (pbranch, branch)
146 146 )
147 147 exchange.pull(
148 self.repo, prepo, [prepo.lookup(h) for h in heads]
148 self.repo, prepo, heads=[prepo.lookup(h) for h in heads]
149 149 )
150 150 self.before()
151 151
@@ -564,7 +564,7 b' class mercurial_source(common.converter_'
564 564 )
565 565 nodes = set()
566 566 parents = set()
567 for r in scmutil.revrange(self.repo, [hgrevs]):
567 for r in logcmdutil.revrange(self.repo, [hgrevs]):
568 568 ctx = self.repo[r]
569 569 nodes.add(ctx.node())
570 570 parents.update(p.node() for p in ctx.parents())
@@ -423,7 +423,7 b' def reposetup(ui, repo):'
423 423 try:
424 424 wlock = self.wlock()
425 425 for f in self.dirstate:
426 if self.dirstate[f] != b'n':
426 if not self.dirstate.get_entry(f).maybe_clean:
427 427 continue
428 428 if oldeol is not None:
429 429 if not oldeol.match(f) and not neweol.match(f):
@@ -101,6 +101,7 b' from mercurial import ('
101 101 error,
102 102 filemerge,
103 103 formatter,
104 logcmdutil,
104 105 pycompat,
105 106 registrar,
106 107 scmutil,
@@ -558,17 +559,17 b' def dodiff(ui, repo, cmdline, pats, opts'
558 559 do3way = b'$parent2' in cmdline
559 560
560 561 if change:
561 ctx2 = scmutil.revsingle(repo, change, None)
562 ctx2 = logcmdutil.revsingle(repo, change, None)
562 563 ctx1a, ctx1b = ctx2.p1(), ctx2.p2()
563 564 elif from_rev or to_rev:
564 565 repo = scmutil.unhidehashlikerevs(
565 566 repo, [from_rev] + [to_rev], b'nowarn'
566 567 )
567 ctx1a = scmutil.revsingle(repo, from_rev, None)
568 ctx1a = logcmdutil.revsingle(repo, from_rev, None)
568 569 ctx1b = repo[nullrev]
569 ctx2 = scmutil.revsingle(repo, to_rev, None)
570 ctx2 = logcmdutil.revsingle(repo, to_rev, None)
570 571 else:
571 ctx1a, ctx2 = scmutil.revpair(repo, revs)
572 ctx1a, ctx2 = logcmdutil.revpair(repo, revs)
572 573 if not revs:
573 574 ctx1b = repo[None].p2()
574 575 else:
@@ -15,6 +15,7 b' from mercurial import ('
15 15 encoding,
16 16 error,
17 17 extensions,
18 logcmdutil,
18 19 patch,
19 20 pycompat,
20 21 registrar,
@@ -75,7 +76,7 b' def _matchpaths(repo, rev, pats, opts, a'
75 76 def bad(x, y):
76 77 raise error.Abort(b"%s: %s" % (x, y))
77 78
78 ctx = scmutil.revsingle(repo, rev)
79 ctx = logcmdutil.revsingle(repo, rev)
79 80 m = scmutil.match(ctx, pats, opts, badfn=bad)
80 81 for p in ctx.walk(m):
81 82 yield p
@@ -317,7 +318,7 b' def debugbuildannotatecache(ui, repo, *p'
317 318 )
318 319 if ui.configbool(b'fastannotate', b'unfilteredrepo'):
319 320 repo = repo.unfiltered()
320 ctx = scmutil.revsingle(repo, rev)
321 ctx = logcmdutil.revsingle(repo, rev)
321 322 m = scmutil.match(ctx, pats, opts)
322 323 paths = list(ctx.walk(m))
323 324 if util.safehasattr(repo, 'prefetchfastannotate'):
@@ -140,12 +140,10 b' def peersetup(ui, peer):'
140 140 def getannotate(self, path, lastnode=None):
141 141 if not self.capable(b'getannotate'):
142 142 ui.warn(_(b'remote peer cannot provide annotate cache\n'))
143 yield None, None
143 return None, None
144 144 else:
145 145 args = {b'path': path, b'lastnode': lastnode or b''}
146 f = wireprotov1peer.future()
147 yield args, f
148 yield _parseresponse(f.value)
146 return args, _parseresponse
149 147
150 148 peer.__class__ = fastannotatepeer
151 149
@@ -15,6 +15,7 b' from mercurial.node import hex, nullrev'
15 15 from mercurial.utils import stringutil
16 16 from mercurial import (
17 17 error,
18 logcmdutil,
18 19 pycompat,
19 20 registrar,
20 21 scmutil,
@@ -182,7 +183,7 b' def fastexport(ui, repo, *revs, **opts):'
182 183 if not revs:
183 184 revs = scmutil.revrange(repo, [b":"])
184 185 else:
185 revs = scmutil.revrange(repo, revs)
186 revs = logcmdutil.revrange(repo, revs)
186 187 if not revs:
187 188 raise error.Abort(_(b"no revisions matched"))
188 189 authorfile = opts.get(b"authormap")
@@ -144,6 +144,7 b' from mercurial import ('
144 144 context,
145 145 copies,
146 146 error,
147 logcmdutil,
147 148 match as matchmod,
148 149 mdiff,
149 150 merge,
@@ -283,20 +284,29 b' def fix(ui, repo, *pats, **opts):'
283 284 # There are no data dependencies between the workers fixing each file
284 285 # revision, so we can use all available parallelism.
285 286 def getfixes(items):
286 for rev, path in items:
287 ctx = repo[rev]
287 for srcrev, path, dstrevs in items:
288 ctx = repo[srcrev]
288 289 olddata = ctx[path].data()
289 290 metadata, newdata = fixfile(
290 ui, repo, opts, fixers, ctx, path, basepaths, basectxs[rev]
291 ui,
292 repo,
293 opts,
294 fixers,
295 ctx,
296 path,
297 basepaths,
298 basectxs[srcrev],
291 299 )
292 # Don't waste memory/time passing unchanged content back, but
293 # produce one result per item either way.
294 yield (
295 rev,
296 path,
297 metadata,
298 newdata if newdata != olddata else None,
299 )
300 # We ungroup the work items now, because the code that consumes
301 # these results has to handle each dstrev separately, and in
302 # topological order. Because these are handled in topological
303 # order, it's important that we pass around references to
304 # "newdata" instead of copying it. Otherwise, we would be
305 # keeping more copies of file content in memory at a time than
306 # if we hadn't bothered to group/deduplicate the work items.
307 data = newdata if newdata != olddata else None
308 for dstrev in dstrevs:
309 yield (dstrev, path, metadata, data)
300 310
301 311 results = worker.worker(
302 312 ui, 1.0, getfixes, tuple(), workqueue, threadsafe=False
@@ -376,23 +386,32 b' def cleanup(repo, replacements, wdirwrit'
376 386
377 387
378 388 def getworkqueue(ui, repo, pats, opts, revstofix, basectxs):
379 """Constructs the list of files to be fixed at specific revisions
389 """Constructs a list of files to fix and which revisions each fix applies to
380 390
381 It is up to the caller how to consume the work items, and the only
382 dependence between them is that replacement revisions must be committed in
383 topological order. Each work item represents a file in the working copy or
384 in some revision that should be fixed and written back to the working copy
385 or into a replacement revision.
391 To avoid duplicating work, there is usually only one work item for each file
392 revision that might need to be fixed. There can be multiple work items per
393 file revision if the same file needs to be fixed in multiple changesets with
394 different baserevs. Each work item also contains a list of changesets where
395 the file's data should be replaced with the fixed data. The work items for
396 earlier changesets come earlier in the work queue, to improve pipelining by
397 allowing the first changeset to be replaced while fixes are still being
398 computed for later changesets.
386 399
387 Work items for the same revision are grouped together, so that a worker
388 pool starting with the first N items in parallel is likely to finish the
389 first revision's work before other revisions. This can allow us to write
390 the result to disk and reduce memory footprint. At time of writing, the
391 partition strategy in worker.py seems favorable to this. We also sort the
392 items by ascending revision number to match the order in which we commit
393 the fixes later.
400 Also returned is a map from changesets to the count of work items that might
401 affect each changeset. This is used later to count when all of a changeset's
402 work items have been finished, without having to inspect the remaining work
403 queue in each worker subprocess.
404
405 The example work item (1, "foo/bar.txt", (1, 2, 3)) means that the data of
406 bar.txt should be read from revision 1, then fixed, and written back to
407 revisions 1, 2 and 3. Revision 1 is called the "srcrev" and the list of
408 revisions is called the "dstrevs". In practice the srcrev is always one of
409 the dstrevs, and we make that choice when constructing the work item so that
410 the choice can't be made inconsistently later on. The dstrevs should all
411 have the same file revision for the given path, so the choice of srcrev is
412 arbitrary. The wdirrev can be a dstrev and a srcrev.
394 413 """
395 workqueue = []
414 dstrevmap = collections.defaultdict(list)
396 415 numitems = collections.defaultdict(int)
397 416 maxfilesize = ui.configbytes(b'fix', b'maxfilesize')
398 417 for rev in sorted(revstofix):
@@ -410,8 +429,21 b' def getworkqueue(ui, repo, pats, opts, r'
410 429 % (util.bytecount(maxfilesize), path)
411 430 )
412 431 continue
413 workqueue.append((rev, path))
432 baserevs = tuple(ctx.rev() for ctx in basectxs[rev])
433 dstrevmap[(fctx.filerev(), baserevs, path)].append(rev)
414 434 numitems[rev] += 1
435 workqueue = [
436 (min(dstrevs), path, dstrevs)
437 for (_filerev, _baserevs, path), dstrevs in dstrevmap.items()
438 ]
439 # Move work items for earlier changesets to the front of the queue, so we
440 # might be able to replace those changesets (in topological order) while
441 # we're still processing later work items. Note the min() in the previous
442 # expression, which means we don't need a custom comparator here. The path
443 # is also important in the sort order to make the output order stable. There
444 # are some situations where this doesn't help much, but some situations
445 # where it lets us buffer O(1) files instead of O(n) files.
446 workqueue.sort()
415 447 return workqueue, numitems
416 448
417 449
@@ -420,7 +452,7 b' def getrevstofix(ui, repo, opts):'
420 452 if opts[b'all']:
421 453 revs = repo.revs(b'(not public() and not obsolete()) or wdir()')
422 454 elif opts[b'source']:
423 source_revs = scmutil.revrange(repo, opts[b'source'])
455 source_revs = logcmdutil.revrange(repo, opts[b'source'])
424 456 revs = set(repo.revs(b'(%ld::) - obsolete()', source_revs))
425 457 if wdirrev in source_revs:
426 458 # `wdir()::` is currently empty, so manually add wdir
@@ -428,7 +460,7 b' def getrevstofix(ui, repo, opts):'
428 460 if repo[b'.'].rev() in revs:
429 461 revs.add(wdirrev)
430 462 else:
431 revs = set(scmutil.revrange(repo, opts[b'rev']))
463 revs = set(logcmdutil.revrange(repo, opts[b'rev']))
432 464 if opts.get(b'working_dir'):
433 465 revs.add(wdirrev)
434 466 for rev in revs:
@@ -516,9 +548,9 b' def getbasepaths(repo, opts, workqueue, '
516 548 return {}
517 549
518 550 basepaths = {}
519 for rev, path in workqueue:
520 fixctx = repo[rev]
521 for basectx in basectxs[rev]:
551 for srcrev, path, _dstrevs in workqueue:
552 fixctx = repo[srcrev]
553 for basectx in basectxs[srcrev]:
522 554 basepath = copies.pathcopies(basectx, fixctx).get(path, path)
523 555 if basepath in basectx:
524 556 basepaths[(basectx.rev(), fixctx.rev(), path)] = basepath
@@ -618,7 +650,7 b' def getbasectxs(repo, opts, revstofix):'
618 650 # The --base flag overrides the usual logic, and we give every revision
619 651 # exactly the set of baserevs that the user specified.
620 652 if opts.get(b'base'):
621 baserevs = set(scmutil.revrange(repo, opts.get(b'base')))
653 baserevs = set(logcmdutil.revrange(repo, opts.get(b'base')))
622 654 if not baserevs:
623 655 baserevs = {nullrev}
624 656 basectxs = {repo[rev] for rev in baserevs}
@@ -641,10 +673,10 b' def _prefetchfiles(repo, workqueue, base'
641 673 toprefetch = set()
642 674
643 675 # Prefetch the files that will be fixed.
644 for rev, path in workqueue:
645 if rev == wdirrev:
676 for srcrev, path, _dstrevs in workqueue:
677 if srcrev == wdirrev:
646 678 continue
647 toprefetch.add((rev, path))
679 toprefetch.add((srcrev, path))
648 680
649 681 # Prefetch the base contents for lineranges().
650 682 for (baserev, fixrev, path), basepath in basepaths.items():
@@ -333,7 +333,11 b' def overridewalk(orig, self, match, subr'
333 333 # for better performance, directly access the inner dirstate map if the
334 334 # standard dirstate implementation is in use.
335 335 dmap = dmap._map
336 nonnormalset = self._map.nonnormalset
336 nonnormalset = {
337 f
338 for f, e in self._map.items()
339 if e.v1_state() != "n" or e.v1_mtime() == -1
340 }
337 341
338 342 copymap = self._map.copymap
339 343 getkind = stat.S_IFMT
@@ -560,8 +564,8 b' def overridestatus('
560 564 for i, (s1, s2) in enumerate(zip(l1, l2)):
561 565 if set(s1) != set(s2):
562 566 f.write(b'sets at position %d are unequal\n' % i)
563 f.write(b'watchman returned: %s\n' % s1)
564 f.write(b'stat returned: %s\n' % s2)
567 f.write(b'watchman returned: %r\n' % s1)
568 f.write(b'stat returned: %r\n' % s2)
565 569 finally:
566 570 f.close()
567 571
This diff has been collapsed as it changes many lines, (651 lines changed) Show them Hide them
@@ -282,6 +282,11 b' configitem('
282 282 default=None,
283 283 )
284 284 configitem(b'histedit', b'summary-template', default=b'{rev} {desc|firstline}')
285 # TODO: Teach the text-based histedit interface to respect this config option
286 # before we make it non-experimental.
287 configitem(
288 b'histedit', b'later-commits-first', default=False, experimental=True
289 )
285 290
286 291 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
287 292 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
@@ -749,7 +754,7 b' def _isdirtywc(repo):'
749 754
750 755
751 756 def abortdirty():
752 raise error.Abort(
757 raise error.StateError(
753 758 _(b'working copy has pending changes'),
754 759 hint=_(
755 760 b'amend, commit, or revert them and run histedit '
@@ -1052,12 +1057,12 b' def findoutgoing(ui, repo, remote=None, '
1052 1057
1053 1058 outgoing = discovery.findcommonoutgoing(repo, other, revs, force=force)
1054 1059 if not outgoing.missing:
1055 raise error.Abort(_(b'no outgoing ancestors'))
1060 raise error.StateError(_(b'no outgoing ancestors'))
1056 1061 roots = list(repo.revs(b"roots(%ln)", outgoing.missing))
1057 1062 if len(roots) > 1:
1058 1063 msg = _(b'there are ambiguous outgoing revisions')
1059 1064 hint = _(b"see 'hg help histedit' for more detail")
1060 raise error.Abort(msg, hint=hint)
1065 raise error.StateError(msg, hint=hint)
1061 1066 return repo[roots[0]].node()
1062 1067
1063 1068
@@ -1193,166 +1198,6 b' class histeditrule(object):'
1193 1198 return self.conflicts
1194 1199
1195 1200
1196 # ============ EVENTS ===============
1197 def movecursor(state, oldpos, newpos):
1198 """Change the rule/changeset that the cursor is pointing to, regardless of
1199 current mode (you can switch between patches from the view patch window)."""
1200 state[b'pos'] = newpos
1201
1202 mode, _ = state[b'mode']
1203 if mode == MODE_RULES:
1204 # Scroll through the list by updating the view for MODE_RULES, so that
1205 # even if we are not currently viewing the rules, switching back will
1206 # result in the cursor's rule being visible.
1207 modestate = state[b'modes'][MODE_RULES]
1208 if newpos < modestate[b'line_offset']:
1209 modestate[b'line_offset'] = newpos
1210 elif newpos > modestate[b'line_offset'] + state[b'page_height'] - 1:
1211 modestate[b'line_offset'] = newpos - state[b'page_height'] + 1
1212
1213 # Reset the patch view region to the top of the new patch.
1214 state[b'modes'][MODE_PATCH][b'line_offset'] = 0
1215
1216
1217 def changemode(state, mode):
1218 curmode, _ = state[b'mode']
1219 state[b'mode'] = (mode, curmode)
1220 if mode == MODE_PATCH:
1221 state[b'modes'][MODE_PATCH][b'patchcontents'] = patchcontents(state)
1222
1223
1224 def makeselection(state, pos):
1225 state[b'selected'] = pos
1226
1227
1228 def swap(state, oldpos, newpos):
1229 """Swap two positions and calculate necessary conflicts in
1230 O(|newpos-oldpos|) time"""
1231
1232 rules = state[b'rules']
1233 assert 0 <= oldpos < len(rules) and 0 <= newpos < len(rules)
1234
1235 rules[oldpos], rules[newpos] = rules[newpos], rules[oldpos]
1236
1237 # TODO: swap should not know about histeditrule's internals
1238 rules[newpos].pos = newpos
1239 rules[oldpos].pos = oldpos
1240
1241 start = min(oldpos, newpos)
1242 end = max(oldpos, newpos)
1243 for r in pycompat.xrange(start, end + 1):
1244 rules[newpos].checkconflicts(rules[r])
1245 rules[oldpos].checkconflicts(rules[r])
1246
1247 if state[b'selected']:
1248 makeselection(state, newpos)
1249
1250
1251 def changeaction(state, pos, action):
1252 """Change the action state on the given position to the new action"""
1253 rules = state[b'rules']
1254 assert 0 <= pos < len(rules)
1255 rules[pos].action = action
1256
1257
1258 def cycleaction(state, pos, next=False):
1259 """Changes the action state the next or the previous action from
1260 the action list"""
1261 rules = state[b'rules']
1262 assert 0 <= pos < len(rules)
1263 current = rules[pos].action
1264
1265 assert current in KEY_LIST
1266
1267 index = KEY_LIST.index(current)
1268 if next:
1269 index += 1
1270 else:
1271 index -= 1
1272 changeaction(state, pos, KEY_LIST[index % len(KEY_LIST)])
1273
1274
1275 def changeview(state, delta, unit):
1276 """Change the region of whatever is being viewed (a patch or the list of
1277 changesets). 'delta' is an amount (+/- 1) and 'unit' is 'page' or 'line'."""
1278 mode, _ = state[b'mode']
1279 if mode != MODE_PATCH:
1280 return
1281 mode_state = state[b'modes'][mode]
1282 num_lines = len(mode_state[b'patchcontents'])
1283 page_height = state[b'page_height']
1284 unit = page_height if unit == b'page' else 1
1285 num_pages = 1 + (num_lines - 1) // page_height
1286 max_offset = (num_pages - 1) * page_height
1287 newline = mode_state[b'line_offset'] + delta * unit
1288 mode_state[b'line_offset'] = max(0, min(max_offset, newline))
1289
1290
1291 def event(state, ch):
1292 """Change state based on the current character input
1293
1294 This takes the current state and based on the current character input from
1295 the user we change the state.
1296 """
1297 selected = state[b'selected']
1298 oldpos = state[b'pos']
1299 rules = state[b'rules']
1300
1301 if ch in (curses.KEY_RESIZE, b"KEY_RESIZE"):
1302 return E_RESIZE
1303
1304 lookup_ch = ch
1305 if ch is not None and b'0' <= ch <= b'9':
1306 lookup_ch = b'0'
1307
1308 curmode, prevmode = state[b'mode']
1309 action = KEYTABLE[curmode].get(
1310 lookup_ch, KEYTABLE[b'global'].get(lookup_ch)
1311 )
1312 if action is None:
1313 return
1314 if action in (b'down', b'move-down'):
1315 newpos = min(oldpos + 1, len(rules) - 1)
1316 movecursor(state, oldpos, newpos)
1317 if selected is not None or action == b'move-down':
1318 swap(state, oldpos, newpos)
1319 elif action in (b'up', b'move-up'):
1320 newpos = max(0, oldpos - 1)
1321 movecursor(state, oldpos, newpos)
1322 if selected is not None or action == b'move-up':
1323 swap(state, oldpos, newpos)
1324 elif action == b'next-action':
1325 cycleaction(state, oldpos, next=True)
1326 elif action == b'prev-action':
1327 cycleaction(state, oldpos, next=False)
1328 elif action == b'select':
1329 selected = oldpos if selected is None else None
1330 makeselection(state, selected)
1331 elif action == b'goto' and int(ch) < len(rules) and len(rules) <= 10:
1332 newrule = next((r for r in rules if r.origpos == int(ch)))
1333 movecursor(state, oldpos, newrule.pos)
1334 if selected is not None:
1335 swap(state, oldpos, newrule.pos)
1336 elif action.startswith(b'action-'):
1337 changeaction(state, oldpos, action[7:])
1338 elif action == b'showpatch':
1339 changemode(state, MODE_PATCH if curmode != MODE_PATCH else prevmode)
1340 elif action == b'help':
1341 changemode(state, MODE_HELP if curmode != MODE_HELP else prevmode)
1342 elif action == b'quit':
1343 return E_QUIT
1344 elif action == b'histedit':
1345 return E_HISTEDIT
1346 elif action == b'page-down':
1347 return E_PAGEDOWN
1348 elif action == b'page-up':
1349 return E_PAGEUP
1350 elif action == b'line-down':
1351 return E_LINEDOWN
1352 elif action == b'line-up':
1353 return E_LINEUP
1354
1355
1356 1201 def makecommands(rules):
1357 1202 """Returns a list of commands consumable by histedit --commands based on
1358 1203 our list of rules"""
@@ -1390,52 +1235,38 b' def _trunc_tail(line, n):'
1390 1235 return line[: n - 2] + b' >'
1391 1236
1392 1237
1393 def patchcontents(state):
1394 repo = state[b'repo']
1395 rule = state[b'rules'][state[b'pos']]
1396 displayer = logcmdutil.changesetdisplayer(
1397 repo.ui, repo, {b"patch": True, b"template": b"status"}, buffered=True
1398 )
1399 overrides = {(b'ui', b'verbose'): True}
1400 with repo.ui.configoverride(overrides, source=b'histedit'):
1401 displayer.show(rule.ctx)
1402 displayer.close()
1403 return displayer.hunk[rule.ctx.rev()].splitlines()
1404
1405
1406 def _chisteditmain(repo, rules, stdscr):
1407 try:
1408 curses.use_default_colors()
1409 except curses.error:
1410 pass
1411
1412 # initialize color pattern
1413 curses.init_pair(COLOR_HELP, curses.COLOR_WHITE, curses.COLOR_BLUE)
1414 curses.init_pair(COLOR_SELECTED, curses.COLOR_BLACK, curses.COLOR_WHITE)
1415 curses.init_pair(COLOR_WARN, curses.COLOR_BLACK, curses.COLOR_YELLOW)
1416 curses.init_pair(COLOR_OK, curses.COLOR_BLACK, curses.COLOR_GREEN)
1417 curses.init_pair(COLOR_CURRENT, curses.COLOR_WHITE, curses.COLOR_MAGENTA)
1418 curses.init_pair(COLOR_DIFF_ADD_LINE, curses.COLOR_GREEN, -1)
1419 curses.init_pair(COLOR_DIFF_DEL_LINE, curses.COLOR_RED, -1)
1420 curses.init_pair(COLOR_DIFF_OFFSET, curses.COLOR_MAGENTA, -1)
1421 curses.init_pair(COLOR_ROLL, curses.COLOR_RED, -1)
1422 curses.init_pair(
1423 COLOR_ROLL_CURRENT, curses.COLOR_BLACK, curses.COLOR_MAGENTA
1424 )
1425 curses.init_pair(COLOR_ROLL_SELECTED, curses.COLOR_RED, curses.COLOR_WHITE)
1426
1427 # don't display the cursor
1428 try:
1429 curses.curs_set(0)
1430 except curses.error:
1431 pass
1432
1433 def rendercommit(win, state):
1238 class _chistedit_state(object):
1239 def __init__(
1240 self,
1241 repo,
1242 rules,
1243 stdscr,
1244 ):
1245 self.repo = repo
1246 self.rules = rules
1247 self.stdscr = stdscr
1248 self.later_on_top = repo.ui.configbool(
1249 b'histedit', b'later-commits-first'
1250 )
1251 # The current item in display order, initialized to point to the top
1252 # of the screen.
1253 self.pos = 0
1254 self.selected = None
1255 self.mode = (MODE_INIT, MODE_INIT)
1256 self.page_height = None
1257 self.modes = {
1258 MODE_RULES: {
1259 b'line_offset': 0,
1260 },
1261 MODE_PATCH: {
1262 b'line_offset': 0,
1263 },
1264 }
1265
1266 def render_commit(self, win):
1434 1267 """Renders the commit window that shows the log of the current selected
1435 1268 commit"""
1436 pos = state[b'pos']
1437 rules = state[b'rules']
1438 rule = rules[pos]
1269 rule = self.rules[self.display_pos_to_rule_pos(self.pos)]
1439 1270
1440 1271 ctx = rule.ctx
1441 1272 win.box()
@@ -1449,7 +1280,7 b' def _chisteditmain(repo, rules, stdscr):'
1449 1280 line = b"user: %s" % ctx.user()
1450 1281 win.addstr(2, 1, line[:length])
1451 1282
1452 bms = repo.nodebookmarks(ctx.node())
1283 bms = self.repo.nodebookmarks(ctx.node())
1453 1284 line = b"bookmark: %s" % b' '.join(bms)
1454 1285 win.addstr(3, 1, line[:length])
1455 1286
@@ -1481,8 +1312,8 b' def _chisteditmain(repo, rules, stdscr):'
1481 1312 win.addstr(y, 1, conflictstr[:length])
1482 1313 win.noutrefresh()
1483 1314
1484 def helplines(mode):
1485 if mode == MODE_PATCH:
1315 def helplines(self):
1316 if self.mode[0] == MODE_PATCH:
1486 1317 help = b"""\
1487 1318 ?: help, k/up: line up, j/down: line down, v: stop viewing patch
1488 1319 pgup: prev page, space/pgdn: next page, c: commit, q: abort
@@ -1495,40 +1326,70 b' pgup/K: move patch up, pgdn/J: move patc'
1495 1326 """
1496 1327 return help.splitlines()
1497 1328
1498 def renderhelp(win, state):
1329 def render_help(self, win):
1499 1330 maxy, maxx = win.getmaxyx()
1500 mode, _ = state[b'mode']
1501 for y, line in enumerate(helplines(mode)):
1331 for y, line in enumerate(self.helplines()):
1502 1332 if y >= maxy:
1503 1333 break
1504 1334 addln(win, y, 0, line, curses.color_pair(COLOR_HELP))
1505 1335 win.noutrefresh()
1506 1336
1507 def renderrules(rulesscr, state):
1508 rules = state[b'rules']
1509 pos = state[b'pos']
1510 selected = state[b'selected']
1511 start = state[b'modes'][MODE_RULES][b'line_offset']
1512
1513 conflicts = [r.ctx for r in rules if r.conflicts]
1337 def layout(self):
1338 maxy, maxx = self.stdscr.getmaxyx()
1339 helplen = len(self.helplines())
1340 mainlen = maxy - helplen - 12
1341 if mainlen < 1:
1342 raise error.Abort(
1343 _(b"terminal dimensions %d by %d too small for curses histedit")
1344 % (maxy, maxx),
1345 hint=_(
1346 b"enlarge your terminal or use --config ui.interface=text"
1347 ),
1348 )
1349 return {
1350 b'commit': (12, maxx),
1351 b'help': (helplen, maxx),
1352 b'main': (mainlen, maxx),
1353 }
1354
1355 def display_pos_to_rule_pos(self, display_pos):
1356 """Converts a position in display order to rule order.
1357
1358 The `display_pos` is the order from the top in display order, not
1359 considering which items are currently visible on the screen. Thus,
1360 `display_pos=0` is the item at the top (possibly after scrolling to
1361 the top)
1362 """
1363 if self.later_on_top:
1364 return len(self.rules) - 1 - display_pos
1365 else:
1366 return display_pos
1367
1368 def render_rules(self, rulesscr):
1369 start = self.modes[MODE_RULES][b'line_offset']
1370
1371 conflicts = [r.ctx for r in self.rules if r.conflicts]
1514 1372 if len(conflicts) > 0:
1515 1373 line = b"potential conflict in %s" % b','.join(
1516 1374 map(pycompat.bytestr, conflicts)
1517 1375 )
1518 1376 addln(rulesscr, -1, 0, line, curses.color_pair(COLOR_WARN))
1519 1377
1520 for y, rule in enumerate(rules[start:]):
1521 if y >= state[b'page_height']:
1522 break
1378 for display_pos in range(start, len(self.rules)):
1379 y = display_pos - start
1380 if y < 0 or y >= self.page_height:
1381 continue
1382 rule_pos = self.display_pos_to_rule_pos(display_pos)
1383 rule = self.rules[rule_pos]
1523 1384 if len(rule.conflicts) > 0:
1524 1385 rulesscr.addstr(y, 0, b" ", curses.color_pair(COLOR_WARN))
1525 1386 else:
1526 1387 rulesscr.addstr(y, 0, b" ", curses.COLOR_BLACK)
1527 1388
1528 if y + start == selected:
1389 if display_pos == self.selected:
1529 1390 rollcolor = COLOR_ROLL_SELECTED
1530 1391 addln(rulesscr, y, 2, rule, curses.color_pair(COLOR_SELECTED))
1531 elif y + start == pos:
1392 elif display_pos == self.pos:
1532 1393 rollcolor = COLOR_ROLL_CURRENT
1533 1394 addln(
1534 1395 rulesscr,
@@ -1551,7 +1412,7 b' pgup/K: move patch up, pgdn/J: move patc'
1551 1412
1552 1413 rulesscr.noutrefresh()
1553 1414
1554 def renderstring(win, state, output, diffcolors=False):
1415 def render_string(self, win, output, diffcolors=False):
1555 1416 maxy, maxx = win.getmaxyx()
1556 1417 length = min(maxy - 1, len(output))
1557 1418 for y in range(0, length):
@@ -1573,77 +1434,239 b' pgup/K: move patch up, pgdn/J: move patc'
1573 1434 win.addstr(y, 0, line)
1574 1435 win.noutrefresh()
1575 1436
1576 def renderpatch(win, state):
1577 start = state[b'modes'][MODE_PATCH][b'line_offset']
1578 content = state[b'modes'][MODE_PATCH][b'patchcontents']
1579 renderstring(win, state, content[start:], diffcolors=True)
1580
1581 def layout(mode):
1582 maxy, maxx = stdscr.getmaxyx()
1583 helplen = len(helplines(mode))
1584 mainlen = maxy - helplen - 12
1585 if mainlen < 1:
1586 raise error.Abort(
1587 _(b"terminal dimensions %d by %d too small for curses histedit")
1588 % (maxy, maxx),
1589 hint=_(
1590 b"enlarge your terminal or use --config ui.interface=text"
1591 ),
1592 )
1593 return {
1594 b'commit': (12, maxx),
1595 b'help': (helplen, maxx),
1596 b'main': (mainlen, maxx),
1597 }
1437 def render_patch(self, win):
1438 start = self.modes[MODE_PATCH][b'line_offset']
1439 content = self.modes[MODE_PATCH][b'patchcontents']
1440 self.render_string(win, content[start:], diffcolors=True)
1441
1442 def event(self, ch):
1443 """Change state based on the current character input
1444
1445 This takes the current state and based on the current character input from
1446 the user we change the state.
1447 """
1448 oldpos = self.pos
1449
1450 if ch in (curses.KEY_RESIZE, b"KEY_RESIZE"):
1451 return E_RESIZE
1452
1453 lookup_ch = ch
1454 if ch is not None and b'0' <= ch <= b'9':
1455 lookup_ch = b'0'
1456
1457 curmode, prevmode = self.mode
1458 action = KEYTABLE[curmode].get(
1459 lookup_ch, KEYTABLE[b'global'].get(lookup_ch)
1460 )
1461 if action is None:
1462 return
1463 if action in (b'down', b'move-down'):
1464 newpos = min(oldpos + 1, len(self.rules) - 1)
1465 self.move_cursor(oldpos, newpos)
1466 if self.selected is not None or action == b'move-down':
1467 self.swap(oldpos, newpos)
1468 elif action in (b'up', b'move-up'):
1469 newpos = max(0, oldpos - 1)
1470 self.move_cursor(oldpos, newpos)
1471 if self.selected is not None or action == b'move-up':
1472 self.swap(oldpos, newpos)
1473 elif action == b'next-action':
1474 self.cycle_action(oldpos, next=True)
1475 elif action == b'prev-action':
1476 self.cycle_action(oldpos, next=False)
1477 elif action == b'select':
1478 self.selected = oldpos if self.selected is None else None
1479 self.make_selection(self.selected)
1480 elif action == b'goto' and int(ch) < len(self.rules) <= 10:
1481 newrule = next((r for r in self.rules if r.origpos == int(ch)))
1482 self.move_cursor(oldpos, newrule.pos)
1483 if self.selected is not None:
1484 self.swap(oldpos, newrule.pos)
1485 elif action.startswith(b'action-'):
1486 self.change_action(oldpos, action[7:])
1487 elif action == b'showpatch':
1488 self.change_mode(MODE_PATCH if curmode != MODE_PATCH else prevmode)
1489 elif action == b'help':
1490 self.change_mode(MODE_HELP if curmode != MODE_HELP else prevmode)
1491 elif action == b'quit':
1492 return E_QUIT
1493 elif action == b'histedit':
1494 return E_HISTEDIT
1495 elif action == b'page-down':
1496 return E_PAGEDOWN
1497 elif action == b'page-up':
1498 return E_PAGEUP
1499 elif action == b'line-down':
1500 return E_LINEDOWN
1501 elif action == b'line-up':
1502 return E_LINEUP
1503
1504 def patch_contents(self):
1505 repo = self.repo
1506 rule = self.rules[self.display_pos_to_rule_pos(self.pos)]
1507 displayer = logcmdutil.changesetdisplayer(
1508 repo.ui,
1509 repo,
1510 {b"patch": True, b"template": b"status"},
1511 buffered=True,
1512 )
1513 overrides = {(b'ui', b'verbose'): True}
1514 with repo.ui.configoverride(overrides, source=b'histedit'):
1515 displayer.show(rule.ctx)
1516 displayer.close()
1517 return displayer.hunk[rule.ctx.rev()].splitlines()
1518
1519 def move_cursor(self, oldpos, newpos):
1520 """Change the rule/changeset that the cursor is pointing to, regardless of
1521 current mode (you can switch between patches from the view patch window)."""
1522 self.pos = newpos
1523
1524 mode, _ = self.mode
1525 if mode == MODE_RULES:
1526 # Scroll through the list by updating the view for MODE_RULES, so that
1527 # even if we are not currently viewing the rules, switching back will
1528 # result in the cursor's rule being visible.
1529 modestate = self.modes[MODE_RULES]
1530 if newpos < modestate[b'line_offset']:
1531 modestate[b'line_offset'] = newpos
1532 elif newpos > modestate[b'line_offset'] + self.page_height - 1:
1533 modestate[b'line_offset'] = newpos - self.page_height + 1
1534
1535 # Reset the patch view region to the top of the new patch.
1536 self.modes[MODE_PATCH][b'line_offset'] = 0
1537
1538 def change_mode(self, mode):
1539 curmode, _ = self.mode
1540 self.mode = (mode, curmode)
1541 if mode == MODE_PATCH:
1542 self.modes[MODE_PATCH][b'patchcontents'] = self.patch_contents()
1543
1544 def make_selection(self, pos):
1545 self.selected = pos
1546
1547 def swap(self, oldpos, newpos):
1548 """Swap two positions and calculate necessary conflicts in
1549 O(|newpos-oldpos|) time"""
1550 old_rule_pos = self.display_pos_to_rule_pos(oldpos)
1551 new_rule_pos = self.display_pos_to_rule_pos(newpos)
1552
1553 rules = self.rules
1554 assert 0 <= old_rule_pos < len(rules) and 0 <= new_rule_pos < len(rules)
1555
1556 rules[old_rule_pos], rules[new_rule_pos] = (
1557 rules[new_rule_pos],
1558 rules[old_rule_pos],
1559 )
1560
1561 # TODO: swap should not know about histeditrule's internals
1562 rules[new_rule_pos].pos = new_rule_pos
1563 rules[old_rule_pos].pos = old_rule_pos
1564
1565 start = min(old_rule_pos, new_rule_pos)
1566 end = max(old_rule_pos, new_rule_pos)
1567 for r in pycompat.xrange(start, end + 1):
1568 rules[new_rule_pos].checkconflicts(rules[r])
1569 rules[old_rule_pos].checkconflicts(rules[r])
1570
1571 if self.selected:
1572 self.make_selection(newpos)
1573
1574 def change_action(self, pos, action):
1575 """Change the action state on the given position to the new action"""
1576 assert 0 <= pos < len(self.rules)
1577 self.rules[pos].action = action
1578
1579 def cycle_action(self, pos, next=False):
1580 """Changes the action state the next or the previous action from
1581 the action list"""
1582 assert 0 <= pos < len(self.rules)
1583 current = self.rules[pos].action
1584
1585 assert current in KEY_LIST
1586
1587 index = KEY_LIST.index(current)
1588 if next:
1589 index += 1
1590 else:
1591 index -= 1
1592 self.change_action(pos, KEY_LIST[index % len(KEY_LIST)])
1593
1594 def change_view(self, delta, unit):
1595 """Change the region of whatever is being viewed (a patch or the list of
1596 changesets). 'delta' is an amount (+/- 1) and 'unit' is 'page' or 'line'."""
1597 mode, _ = self.mode
1598 if mode != MODE_PATCH:
1599 return
1600 mode_state = self.modes[mode]
1601 num_lines = len(mode_state[b'patchcontents'])
1602 page_height = self.page_height
1603 unit = page_height if unit == b'page' else 1
1604 num_pages = 1 + (num_lines - 1) // page_height
1605 max_offset = (num_pages - 1) * page_height
1606 newline = mode_state[b'line_offset'] + delta * unit
1607 mode_state[b'line_offset'] = max(0, min(max_offset, newline))
1608
1609
1610 def _chisteditmain(repo, rules, stdscr):
1611 try:
1612 curses.use_default_colors()
1613 except curses.error:
1614 pass
1615
1616 # initialize color pattern
1617 curses.init_pair(COLOR_HELP, curses.COLOR_WHITE, curses.COLOR_BLUE)
1618 curses.init_pair(COLOR_SELECTED, curses.COLOR_BLACK, curses.COLOR_WHITE)
1619 curses.init_pair(COLOR_WARN, curses.COLOR_BLACK, curses.COLOR_YELLOW)
1620 curses.init_pair(COLOR_OK, curses.COLOR_BLACK, curses.COLOR_GREEN)
1621 curses.init_pair(COLOR_CURRENT, curses.COLOR_WHITE, curses.COLOR_MAGENTA)
1622 curses.init_pair(COLOR_DIFF_ADD_LINE, curses.COLOR_GREEN, -1)
1623 curses.init_pair(COLOR_DIFF_DEL_LINE, curses.COLOR_RED, -1)
1624 curses.init_pair(COLOR_DIFF_OFFSET, curses.COLOR_MAGENTA, -1)
1625 curses.init_pair(COLOR_ROLL, curses.COLOR_RED, -1)
1626 curses.init_pair(
1627 COLOR_ROLL_CURRENT, curses.COLOR_BLACK, curses.COLOR_MAGENTA
1628 )
1629 curses.init_pair(COLOR_ROLL_SELECTED, curses.COLOR_RED, curses.COLOR_WHITE)
1630
1631 # don't display the cursor
1632 try:
1633 curses.curs_set(0)
1634 except curses.error:
1635 pass
1598 1636
1599 1637 def drawvertwin(size, y, x):
1600 1638 win = curses.newwin(size[0], size[1], y, x)
1601 1639 y += size[0]
1602 1640 return win, y, x
1603 1641
1604 state = {
1605 b'pos': 0,
1606 b'rules': rules,
1607 b'selected': None,
1608 b'mode': (MODE_INIT, MODE_INIT),
1609 b'page_height': None,
1610 b'modes': {
1611 MODE_RULES: {
1612 b'line_offset': 0,
1613 },
1614 MODE_PATCH: {
1615 b'line_offset': 0,
1616 },
1617 },
1618 b'repo': repo,
1619 }
1642 state = _chistedit_state(repo, rules, stdscr)
1620 1643
1621 1644 # eventloop
1622 1645 ch = None
1623 1646 stdscr.clear()
1624 1647 stdscr.refresh()
1625 1648 while True:
1626 oldmode, unused = state[b'mode']
1649 oldmode, unused = state.mode
1627 1650 if oldmode == MODE_INIT:
1628 changemode(state, MODE_RULES)
1629 e = event(state, ch)
1651 state.change_mode(MODE_RULES)
1652 e = state.event(ch)
1630 1653
1631 1654 if e == E_QUIT:
1632 1655 return False
1633 1656 if e == E_HISTEDIT:
1634 return state[b'rules']
1657 return state.rules
1635 1658 else:
1636 1659 if e == E_RESIZE:
1637 1660 size = screen_size()
1638 1661 if size != stdscr.getmaxyx():
1639 1662 curses.resizeterm(*size)
1640 1663
1641 curmode, unused = state[b'mode']
1642 sizes = layout(curmode)
1664 sizes = state.layout()
1665 curmode, unused = state.mode
1643 1666 if curmode != oldmode:
1644 state[b'page_height'] = sizes[b'main'][0]
1667 state.page_height = sizes[b'main'][0]
1645 1668 # Adjust the view to fit the current screen size.
1646 movecursor(state, state[b'pos'], state[b'pos'])
1669 state.move_cursor(state.pos, state.pos)
1647 1670
1648 1671 # Pack the windows against the top, each pane spread across the
1649 1672 # full width of the screen.
@@ -1654,26 +1677,26 b' pgup/K: move patch up, pgdn/J: move patc'
1654 1677
1655 1678 if e in (E_PAGEDOWN, E_PAGEUP, E_LINEDOWN, E_LINEUP):
1656 1679 if e == E_PAGEDOWN:
1657 changeview(state, +1, b'page')
1680 state.change_view(+1, b'page')
1658 1681 elif e == E_PAGEUP:
1659 changeview(state, -1, b'page')
1682 state.change_view(-1, b'page')
1660 1683 elif e == E_LINEDOWN:
1661 changeview(state, +1, b'line')
1684 state.change_view(+1, b'line')
1662 1685 elif e == E_LINEUP:
1663 changeview(state, -1, b'line')
1686 state.change_view(-1, b'line')
1664 1687
1665 1688 # start rendering
1666 1689 commitwin.erase()
1667 1690 helpwin.erase()
1668 1691 mainwin.erase()
1669 1692 if curmode == MODE_PATCH:
1670 renderpatch(mainwin, state)
1693 state.render_patch(mainwin)
1671 1694 elif curmode == MODE_HELP:
1672 renderstring(mainwin, state, __doc__.strip().splitlines())
1695 state.render_string(mainwin, __doc__.strip().splitlines())
1673 1696 else:
1674 renderrules(mainwin, state)
1675 rendercommit(commitwin, state)
1676 renderhelp(helpwin, state)
1697 state.render_rules(mainwin)
1698 state.render_commit(commitwin)
1699 state.render_help(helpwin)
1677 1700 curses.doupdate()
1678 1701 # done rendering
1679 1702 ch = encoding.strtolocal(stdscr.getkey())
@@ -1697,26 +1720,19 b' def _chistedit(ui, repo, freeargs, opts)'
1697 1720 cmdutil.checkunfinished(repo)
1698 1721 cmdutil.bailifchanged(repo)
1699 1722
1700 if os.path.exists(os.path.join(repo.path, b'histedit-state')):
1701 raise error.Abort(
1702 _(
1703 b'history edit already in progress, try '
1704 b'--continue or --abort'
1705 )
1706 )
1707 1723 revs.extend(freeargs)
1708 1724 if not revs:
1709 1725 defaultrev = destutil.desthistedit(ui, repo)
1710 1726 if defaultrev is not None:
1711 1727 revs.append(defaultrev)
1712 1728 if len(revs) != 1:
1713 raise error.Abort(
1729 raise error.InputError(
1714 1730 _(b'histedit requires exactly one ancestor revision')
1715 1731 )
1716 1732
1717 rr = list(repo.set(b'roots(%ld)', scmutil.revrange(repo, revs)))
1733 rr = list(repo.set(b'roots(%ld)', logcmdutil.revrange(repo, revs)))
1718 1734 if len(rr) != 1:
1719 raise error.Abort(
1735 raise error.InputError(
1720 1736 _(
1721 1737 b'The specified revisions must have '
1722 1738 b'exactly one common root'
@@ -1727,15 +1743,15 b' def _chistedit(ui, repo, freeargs, opts)'
1727 1743 topmost = repo.dirstate.p1()
1728 1744 revs = between(repo, root, topmost, keep)
1729 1745 if not revs:
1730 raise error.Abort(
1746 raise error.InputError(
1731 1747 _(b'%s is not an ancestor of working directory') % short(root)
1732 1748 )
1733 1749
1734 ctxs = []
1750 rules = []
1735 1751 for i, r in enumerate(revs):
1736 ctxs.append(histeditrule(ui, repo[r], i))
1752 rules.append(histeditrule(ui, repo[r], i))
1737 1753 with util.with_lc_ctype():
1738 rc = curses.wrapper(functools.partial(_chisteditmain, repo, ctxs))
1754 rc = curses.wrapper(functools.partial(_chisteditmain, repo, rules))
1739 1755 curses.echo()
1740 1756 curses.endwin()
1741 1757 if rc is False:
@@ -1928,12 +1944,12 b' def _readfile(ui, path):'
1928 1944 return f.read()
1929 1945
1930 1946
1931 def _validateargs(ui, repo, state, freeargs, opts, goal, rules, revs):
1947 def _validateargs(ui, repo, freeargs, opts, goal, rules, revs):
1932 1948 # TODO only abort if we try to histedit mq patches, not just
1933 1949 # blanket if mq patches are applied somewhere
1934 1950 mq = getattr(repo, 'mq', None)
1935 1951 if mq and mq.applied:
1936 raise error.Abort(_(b'source has mq patches applied'))
1952 raise error.StateError(_(b'source has mq patches applied'))
1937 1953
1938 1954 # basic argument incompatibility processing
1939 1955 outg = opts.get(b'outgoing')
@@ -1941,31 +1957,26 b' def _validateargs(ui, repo, state, freea'
1941 1957 abort = opts.get(b'abort')
1942 1958 force = opts.get(b'force')
1943 1959 if force and not outg:
1944 raise error.Abort(_(b'--force only allowed with --outgoing'))
1960 raise error.InputError(_(b'--force only allowed with --outgoing'))
1945 1961 if goal == b'continue':
1946 1962 if any((outg, abort, revs, freeargs, rules, editplan)):
1947 raise error.Abort(_(b'no arguments allowed with --continue'))
1963 raise error.InputError(_(b'no arguments allowed with --continue'))
1948 1964 elif goal == b'abort':
1949 1965 if any((outg, revs, freeargs, rules, editplan)):
1950 raise error.Abort(_(b'no arguments allowed with --abort'))
1966 raise error.InputError(_(b'no arguments allowed with --abort'))
1951 1967 elif goal == b'edit-plan':
1952 1968 if any((outg, revs, freeargs)):
1953 raise error.Abort(
1969 raise error.InputError(
1954 1970 _(b'only --commands argument allowed with --edit-plan')
1955 1971 )
1956 1972 else:
1957 if state.inprogress():
1958 raise error.Abort(
1959 _(
1960 b'history edit already in progress, try '
1961 b'--continue or --abort'
1962 )
1963 )
1964 1973 if outg:
1965 1974 if revs:
1966 raise error.Abort(_(b'no revisions allowed with --outgoing'))
1975 raise error.InputError(
1976 _(b'no revisions allowed with --outgoing')
1977 )
1967 1978 if len(freeargs) > 1:
1968 raise error.Abort(
1979 raise error.InputError(
1969 1980 _(b'only one repo argument allowed with --outgoing')
1970 1981 )
1971 1982 else:
@@ -1976,7 +1987,7 b' def _validateargs(ui, repo, state, freea'
1976 1987 revs.append(defaultrev)
1977 1988
1978 1989 if len(revs) != 1:
1979 raise error.Abort(
1990 raise error.InputError(
1980 1991 _(b'histedit requires exactly one ancestor revision')
1981 1992 )
1982 1993
@@ -1990,11 +2001,11 b' def _histedit(ui, repo, state, freeargs,'
1990 2001 rules = opts.get(b'commands', b'')
1991 2002 state.keep = opts.get(b'keep', False)
1992 2003
1993 _validateargs(ui, repo, state, freeargs, opts, goal, rules, revs)
2004 _validateargs(ui, repo, freeargs, opts, goal, rules, revs)
1994 2005
1995 2006 hastags = False
1996 2007 if revs:
1997 revs = scmutil.revrange(repo, revs)
2008 revs = logcmdutil.revrange(repo, revs)
1998 2009 ctxs = [repo[rev] for rev in revs]
1999 2010 for ctx in ctxs:
2000 2011 tags = [tag for tag in ctx.tags() if tag != b'tip']
@@ -2009,7 +2020,7 b' def _histedit(ui, repo, state, freeargs,'
2009 2020 ),
2010 2021 default=1,
2011 2022 ):
2012 raise error.Abort(_(b'histedit cancelled\n'))
2023 raise error.CanceledError(_(b'histedit cancelled\n'))
2013 2024 # rebuild state
2014 2025 if goal == goalcontinue:
2015 2026 state.read()
@@ -2217,9 +2228,9 b' def _newhistedit(ui, repo, state, revs, '
2217 2228 remote = None
2218 2229 root = findoutgoing(ui, repo, remote, force, opts)
2219 2230 else:
2220 rr = list(repo.set(b'roots(%ld)', scmutil.revrange(repo, revs)))
2231 rr = list(repo.set(b'roots(%ld)', logcmdutil.revrange(repo, revs)))
2221 2232 if len(rr) != 1:
2222 raise error.Abort(
2233 raise error.InputError(
2223 2234 _(
2224 2235 b'The specified revisions must have '
2225 2236 b'exactly one common root'
@@ -2229,7 +2240,7 b' def _newhistedit(ui, repo, state, revs, '
2229 2240
2230 2241 revs = between(repo, root, topmost, state.keep)
2231 2242 if not revs:
2232 raise error.Abort(
2243 raise error.InputError(
2233 2244 _(b'%s is not an ancestor of working directory') % short(root)
2234 2245 )
2235 2246
@@ -2259,7 +2270,7 b' def _newhistedit(ui, repo, state, revs, '
2259 2270 followcopies=False,
2260 2271 )
2261 2272 except error.Abort:
2262 raise error.Abort(
2273 raise error.StateError(
2263 2274 _(
2264 2275 b"untracked files in working directory conflict with files in %s"
2265 2276 )
@@ -2337,7 +2348,9 b' def between(repo, old, new, keep):'
2337 2348 if revs and not keep:
2338 2349 rewriteutil.precheck(repo, revs, b'edit')
2339 2350 if repo.revs(b'(%ld) and merge()', revs):
2340 raise error.Abort(_(b'cannot edit history that contains merges'))
2351 raise error.StateError(
2352 _(b'cannot edit history that contains merges')
2353 )
2341 2354 return pycompat.maplist(repo.changelog.node, revs)
2342 2355
2343 2356
@@ -431,18 +431,19 b' def localrepolistkeys(orig, self, namesp'
431 431 @wireprotov1peer.batchable
432 432 def listkeyspatterns(self, namespace, patterns):
433 433 if not self.capable(b'pushkey'):
434 yield {}, None
435 f = wireprotov1peer.future()
434 return {}, None
436 435 self.ui.debug(b'preparing listkeys for "%s"\n' % namespace)
437 yield {
436
437 def decode(d):
438 self.ui.debug(
439 b'received listkey for "%s": %i bytes\n' % (namespace, len(d))
440 )
441 return pushkey.decodekeys(d)
442
443 return {
438 444 b'namespace': encoding.fromlocal(namespace),
439 445 b'patterns': wireprototypes.encodelist(patterns),
440 }, f
441 d = f.value
442 self.ui.debug(
443 b'received listkey for "%s": %i bytes\n' % (namespace, len(d))
444 )
445 yield pushkey.decodekeys(d)
446 }, decode
446 447
447 448
448 449 def _readbundlerevs(bundlerepo):
@@ -26,6 +26,7 b' from mercurial import ('
26 26 exthelper,
27 27 hg,
28 28 lock,
29 logcmdutil,
29 30 match as matchmod,
30 31 pycompat,
31 32 scmutil,
@@ -540,7 +541,7 b' def updatelfiles('
540 541 expecthash = lfutil.readasstandin(wctx[standin])
541 542 if expecthash != b'':
542 543 if lfile not in wctx: # not switched to normal file
543 if repo.dirstate[standin] != b'?':
544 if repo.dirstate.get_entry(standin).any_tracked:
544 545 wvfs.unlinkpath(lfile, ignoremissing=True)
545 546 else:
546 547 dropped.add(lfile)
@@ -568,7 +569,7 b' def updatelfiles('
568 569 removed += 1
569 570
570 571 # largefile processing might be slow and be interrupted - be prepared
571 lfdirstate.write()
572 lfdirstate.write(repo.currenttransaction())
572 573
573 574 if lfiles:
574 575 lfiles = [f for f in lfiles if f not in dropped]
@@ -577,7 +578,7 b' def updatelfiles('
577 578 repo.wvfs.unlinkpath(lfutil.standin(f))
578 579 # This needs to happen for dropped files, otherwise they stay in
579 580 # the M state.
580 lfdirstate._drop(f)
581 lfdirstate._map.reset_state(f)
581 582
582 583 statuswriter(_(b'getting changed largefiles\n'))
583 584 cachelfiles(ui, repo, None, lfiles)
@@ -618,7 +619,7 b' def updatelfiles('
618 619
619 620 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
620 621
621 lfdirstate.write()
622 lfdirstate.write(repo.currenttransaction())
622 623 if lfiles:
623 624 statuswriter(
624 625 _(b'%d largefiles updated, %d removed\n') % (updated, removed)
@@ -657,7 +658,7 b' def lfpull(ui, repo, source=b"default", '
657 658 revs = opts.get('rev', [])
658 659 if not revs:
659 660 raise error.Abort(_(b'no revisions specified'))
660 revs = scmutil.revrange(repo, revs)
661 revs = logcmdutil.revrange(repo, revs)
661 662
662 663 numcached = 0
663 664 for rev in revs:
@@ -191,10 +191,12 b' class largefilesdirstate(dirstate.dirsta'
191 191 def _ignore(self, f):
192 192 return False
193 193
194 def write(self, tr=False):
194 def write(self, tr):
195 195 # (1) disable PENDING mode always
196 196 # (lfdirstate isn't yet managed as a part of the transaction)
197 197 # (2) avoid develwarn 'use dirstate.write with ....'
198 if tr:
199 tr.addbackup(b'largefiles/dirstate', location=b'plain')
198 200 super(largefilesdirstate, self).write(None)
199 201
200 202
@@ -269,7 +271,7 b' def listlfiles(repo, rev=None, matcher=N'
269 271 return [
270 272 splitstandin(f)
271 273 for f in repo[rev].walk(matcher)
272 if rev is not None or repo.dirstate[f] != b'?'
274 if rev is not None or repo.dirstate.get_entry(f).any_tracked
273 275 ]
274 276
275 277
@@ -558,24 +560,14 b' def synclfdirstate(repo, lfdirstate, lfi'
558 560 if lfstandin not in repo.dirstate:
559 561 lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=False)
560 562 else:
561 stat = repo.dirstate._map[lfstandin]
562 state, mtime = stat.state, stat.mtime
563 if state == b'n':
564 if normallookup or mtime < 0 or not repo.wvfs.exists(lfile):
565 # state 'n' doesn't ensure 'clean' in this case
566 lfdirstate.update_file(
567 lfile, p1_tracked=True, wc_tracked=True, possibly_dirty=True
568 )
569 else:
570 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True)
571 elif state == b'm':
572 lfdirstate.update_file(
573 lfile, p1_tracked=True, wc_tracked=True, merged=True
574 )
575 elif state == b'r':
576 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=False)
577 elif state == b'a':
578 lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=True)
563 entry = repo.dirstate.get_entry(lfstandin)
564 lfdirstate.update_file(
565 lfile,
566 wc_tracked=entry.tracked,
567 p1_tracked=entry.p1_tracked,
568 p2_info=entry.p2_info,
569 possibly_dirty=True,
570 )
579 571
580 572
581 573 def markcommitted(orig, ctx, node):
@@ -598,7 +590,7 b' def markcommitted(orig, ctx, node):'
598 590 lfile = splitstandin(f)
599 591 if lfile is not None:
600 592 synclfdirstate(repo, lfdirstate, lfile, False)
601 lfdirstate.write()
593 lfdirstate.write(repo.currenttransaction())
602 594
603 595 # As part of committing, copy all of the largefiles into the cache.
604 596 #
@@ -713,7 +705,7 b' def updatestandinsbymatch(repo, match):'
713 705 lfdirstate = openlfdirstate(ui, repo)
714 706 for fstandin in standins:
715 707 lfile = splitstandin(fstandin)
716 if lfdirstate[lfile] != b'r':
708 if lfdirstate.get_entry(lfile).tracked:
717 709 updatestandin(repo, lfile, fstandin)
718 710
719 711 # Cook up a new matcher that only matches regular files or
@@ -737,10 +729,10 b' def updatestandinsbymatch(repo, match):'
737 729 # standin removal, drop the normal file if it is unknown to dirstate.
738 730 # Thus, skip plain largefile names but keep the standin.
739 731 if f in lfiles or fstandin in standins:
740 if repo.dirstate[fstandin] != b'r':
741 if repo.dirstate[f] != b'r':
732 if not repo.dirstate.get_entry(fstandin).removed:
733 if not repo.dirstate.get_entry(f).removed:
742 734 continue
743 elif repo.dirstate[f] == b'?':
735 elif not repo.dirstate.get_entry(f).any_tracked:
744 736 continue
745 737
746 738 actualfiles.append(f)
@@ -151,7 +151,7 b' def addlargefiles(ui, repo, isaddremove,'
151 151 )
152 152 standins.append(standinname)
153 153 lfdirstate.set_tracked(f)
154 lfdirstate.write()
154 lfdirstate.write(repo.currenttransaction())
155 155 bad += [
156 156 lfutil.splitstandin(f)
157 157 for f in repo[None].add(standins)
@@ -229,7 +229,7 b' def removelargefiles(ui, repo, isaddremo'
229 229 for f in remove:
230 230 lfdirstate.set_untracked(lfutil.splitstandin(f))
231 231
232 lfdirstate.write()
232 lfdirstate.write(repo.currenttransaction())
233 233
234 234 return result
235 235
@@ -659,7 +659,7 b' def mergerecordupdates(orig, repo, actio'
659 659 )
660 660 # make sure lfile doesn't get synclfdirstate'd as normal
661 661 lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=True)
662 lfdirstate.write()
662 lfdirstate.write(repo.currenttransaction())
663 663
664 664 return orig(repo, actions, branchmerge, getfiledata)
665 665
@@ -864,7 +864,7 b' def overridecopy(orig, ui, repo, pats, o'
864 864 util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile))
865 865
866 866 lfdirstate.set_tracked(destlfile)
867 lfdirstate.write()
867 lfdirstate.write(repo.currenttransaction())
868 868 except error.Abort as e:
869 869 if e.message != _(b'no files to copy'):
870 870 raise e
@@ -896,7 +896,7 b' def overriderevert(orig, ui, repo, ctx, '
896 896 with repo.wlock():
897 897 lfdirstate = lfutil.openlfdirstate(ui, repo)
898 898 s = lfutil.lfdirstatestatus(lfdirstate, repo)
899 lfdirstate.write()
899 lfdirstate.write(repo.currenttransaction())
900 900 for lfile in s.modified:
901 901 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
902 902 for lfile in s.deleted:
@@ -934,7 +934,7 b' def overriderevert(orig, ui, repo, ctx, '
934 934 standin = lfutil.standin(f)
935 935 if standin in ctx or standin in mctx:
936 936 matchfiles.append(standin)
937 elif standin in wctx or lfdirstate[f] == b'r':
937 elif standin in wctx or lfdirstate.get_entry(f).removed:
938 938 continue
939 939 else:
940 940 matchfiles.append(f)
@@ -1000,7 +1000,7 b' def overridepull(orig, ui, repo, source='
1000 1000 numcached = 0
1001 1001 repo.firstpulled = revsprepull # for pulled() revset expression
1002 1002 try:
1003 for rev in scmutil.revrange(repo, lfrevs):
1003 for rev in logcmdutil.revrange(repo, lfrevs):
1004 1004 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
1005 1005 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
1006 1006 numcached += len(cached)
@@ -1027,7 +1027,7 b' def overridepush(orig, ui, repo, *args, '
1027 1027 lfrevs = kwargs.pop('lfrev', None)
1028 1028 if lfrevs:
1029 1029 opargs = kwargs.setdefault('opargs', {})
1030 opargs[b'lfrevs'] = scmutil.revrange(repo, lfrevs)
1030 opargs[b'lfrevs'] = logcmdutil.revrange(repo, lfrevs)
1031 1031 return orig(ui, repo, *args, **kwargs)
1032 1032
1033 1033
@@ -1383,7 +1383,7 b' def cmdutilforget('
1383 1383 lfdirstate = lfutil.openlfdirstate(ui, repo)
1384 1384 for f in forget:
1385 1385 lfdirstate.set_untracked(f)
1386 lfdirstate.write()
1386 lfdirstate.write(repo.currenttransaction())
1387 1387 standins = [lfutil.standin(f) for f in forget]
1388 1388 for f in standins:
1389 1389 repo.wvfs.unlinkpath(f, ignoremissing=True)
@@ -1591,8 +1591,12 b' def overridepurge(orig, ui, repo, *dirs,'
1591 1591 node1, node2, match, ignored, clean, unknown, listsubrepos
1592 1592 )
1593 1593 lfdirstate = lfutil.openlfdirstate(ui, repo)
1594 unknown = [f for f in r.unknown if lfdirstate[f] == b'?']
1595 ignored = [f for f in r.ignored if lfdirstate[f] == b'?']
1594 unknown = [
1595 f for f in r.unknown if not lfdirstate.get_entry(f).any_tracked
1596 ]
1597 ignored = [
1598 f for f in r.ignored if not lfdirstate.get_entry(f).any_tracked
1599 ]
1596 1600 return scmutil.status(
1597 1601 r.modified, r.added, r.removed, r.deleted, unknown, ignored, r.clean
1598 1602 )
@@ -1609,7 +1613,7 b' def overriderollback(orig, ui, repo, **o'
1609 1613 orphans = {
1610 1614 f
1611 1615 for f in repo.dirstate
1612 if lfutil.isstandin(f) and repo.dirstate[f] != b'r'
1616 if lfutil.isstandin(f) and not repo.dirstate.get_entry(f).removed
1613 1617 }
1614 1618 result = orig(ui, repo, **opts)
1615 1619 after = repo.dirstate.parents()
@@ -1620,7 +1624,7 b' def overriderollback(orig, ui, repo, **o'
1620 1624 for f in repo.dirstate:
1621 1625 if lfutil.isstandin(f):
1622 1626 orphans.discard(f)
1623 if repo.dirstate[f] == b'r':
1627 if repo.dirstate.get_entry(f).removed:
1624 1628 repo.wvfs.unlinkpath(f, ignoremissing=True)
1625 1629 elif f in pctx:
1626 1630 fctx = pctx[f]
@@ -1632,18 +1636,6 b' def overriderollback(orig, ui, repo, **o'
1632 1636 for standin in orphans:
1633 1637 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1634 1638
1635 lfdirstate = lfutil.openlfdirstate(ui, repo)
1636 with lfdirstate.parentchange():
1637 orphans = set(lfdirstate)
1638 lfiles = lfutil.listlfiles(repo)
1639 for file in lfiles:
1640 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1641 orphans.discard(file)
1642 for lfile in orphans:
1643 lfdirstate.update_file(
1644 lfile, p1_tracked=False, wc_tracked=False
1645 )
1646 lfdirstate.write()
1647 1639 return result
1648 1640
1649 1641
@@ -1663,7 +1655,7 b' def overridetransplant(orig, ui, repo, *'
1663 1655 @eh.wrapcommand(b'cat')
1664 1656 def overridecat(orig, ui, repo, file1, *pats, **opts):
1665 1657 opts = pycompat.byteskwargs(opts)
1666 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
1658 ctx = logcmdutil.revsingle(repo, opts.get(b'rev'))
1667 1659 err = 1
1668 1660 notbad = set()
1669 1661 m = scmutil.match(ctx, (file1,) + pats, opts)
@@ -1787,10 +1779,8 b' def mergeupdate(orig, repo, node, branch'
1787 1779 # mark all clean largefiles as dirty, just in case the update gets
1788 1780 # interrupted before largefiles and lfdirstate are synchronized
1789 1781 for lfile in oldclean:
1790 entry = lfdirstate._map.get(lfile)
1791 assert not (entry.merged_removed or entry.from_p2_removed)
1792 1782 lfdirstate.set_possibly_dirty(lfile)
1793 lfdirstate.write()
1783 lfdirstate.write(repo.currenttransaction())
1794 1784
1795 1785 oldstandins = lfutil.getstandinsstate(repo)
1796 1786 wc = kwargs.get('wc')
@@ -1810,7 +1800,7 b' def mergeupdate(orig, repo, node, branch'
1810 1800 # all the ones that didn't change as clean
1811 1801 for lfile in oldclean.difference(filelist):
1812 1802 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True)
1813 lfdirstate.write()
1803 lfdirstate.write(repo.currenttransaction())
1814 1804
1815 1805 if branchmerge or force or partial:
1816 1806 filelist.extend(s.deleted + s.removed)
@@ -184,17 +184,18 b' def wirereposetup(ui, repo):'
184 184
185 185 @wireprotov1peer.batchable
186 186 def statlfile(self, sha):
187 f = wireprotov1peer.future()
187 def decode(d):
188 try:
189 return int(d)
190 except (ValueError, urlerr.httperror):
191 # If the server returns anything but an integer followed by a
192 # newline, newline, it's not speaking our language; if we get
193 # an HTTP error, we can't be sure the largefile is present;
194 # either way, consider it missing.
195 return 2
196
188 197 result = {b'sha': sha}
189 yield result, f
190 try:
191 yield int(f.value)
192 except (ValueError, urlerr.httperror):
193 # If the server returns anything but an integer followed by a
194 # newline, newline, it's not speaking our language; if we get
195 # an HTTP error, we can't be sure the largefile is present;
196 # either way, consider it missing.
197 yield 2
198 return result, decode
198 199
199 200 repo.__class__ = lfileswirerepository
200 201
@@ -310,7 +310,7 b' def reposetup(ui, repo):'
310 310 ]
311 311
312 312 if gotlock:
313 lfdirstate.write()
313 lfdirstate.write(self.currenttransaction())
314 314
315 315 self.lfstatus = True
316 316 return scmutil.status(*result)
@@ -137,6 +137,7 b' from mercurial import ('
137 137 filelog,
138 138 filesetlang,
139 139 localrepo,
140 logcmdutil,
140 141 minifileset,
141 142 pycompat,
142 143 revlog,
@@ -417,7 +418,7 b' def lfsfiles(context, mapping):'
417 418 def debuglfsupload(ui, repo, **opts):
418 419 """upload lfs blobs added by the working copy parent or given revisions"""
419 420 revs = opts.get('rev', [])
420 pointers = wrapper.extractpointers(repo, scmutil.revrange(repo, revs))
421 pointers = wrapper.extractpointers(repo, logcmdutil.revrange(repo, revs))
421 422 wrapper.uploadblobs(repo, pointers)
422 423
423 424
@@ -1241,7 +1241,7 b' class queue(object):'
1241 1241 if opts.get(b'rev'):
1242 1242 if not self.applied:
1243 1243 raise error.Abort(_(b'no patches applied'))
1244 revs = scmutil.revrange(repo, opts.get(b'rev'))
1244 revs = logcmdutil.revrange(repo, opts.get(b'rev'))
1245 1245 revs.sort()
1246 1246 revpatches = self._revpatches(repo, revs)
1247 1247 realpatches += revpatches
@@ -1267,9 +1267,9 b' class queue(object):'
1267 1267 if any((b'.hgsubstate' in files for files in mar)):
1268 1268 return # already listed up
1269 1269 # not yet listed up
1270 if substatestate in b'a?':
1270 if substatestate.added or not substatestate.any_tracked:
1271 1271 mar[1].append(b'.hgsubstate')
1272 elif substatestate in b'r':
1272 elif substatestate.removed:
1273 1273 mar[2].append(b'.hgsubstate')
1274 1274 else: # modified
1275 1275 mar[0].append(b'.hgsubstate')
@@ -1377,7 +1377,7 b' class queue(object):'
1377 1377 self.checkpatchname(patchfn)
1378 1378 inclsubs = checksubstate(repo)
1379 1379 if inclsubs:
1380 substatestate = repo.dirstate[b'.hgsubstate']
1380 substatestate = repo.dirstate.get_entry(b'.hgsubstate')
1381 1381 if opts.get(b'include') or opts.get(b'exclude') or pats:
1382 1382 # detect missing files in pats
1383 1383 def badfn(f, msg):
@@ -1908,7 +1908,7 b' class queue(object):'
1908 1908
1909 1909 inclsubs = checksubstate(repo, patchparent)
1910 1910 if inclsubs:
1911 substatestate = repo.dirstate[b'.hgsubstate']
1911 substatestate = repo.dirstate.get_entry(b'.hgsubstate')
1912 1912
1913 1913 ph = patchheader(self.join(patchfn), self.plainmode)
1914 1914 diffopts = self.diffopts(
@@ -2417,7 +2417,7 b' class queue(object):'
2417 2417 raise error.Abort(
2418 2418 _(b'option "-r" not valid when importing files')
2419 2419 )
2420 rev = scmutil.revrange(repo, rev)
2420 rev = logcmdutil.revrange(repo, rev)
2421 2421 rev.sort(reverse=True)
2422 2422 elif not files:
2423 2423 raise error.Abort(_(b'no files or revisions specified'))
@@ -3638,7 +3638,7 b' def rename(ui, repo, patch, name=None, *'
3638 3638 if r and patch in r.dirstate:
3639 3639 wctx = r[None]
3640 3640 with r.wlock():
3641 if r.dirstate[patch] == b'a':
3641 if r.dirstate.get_entry(patch).added:
3642 3642 r.dirstate.set_untracked(patch)
3643 3643 r.dirstate.set_tracked(name)
3644 3644 else:
@@ -3878,7 +3878,7 b' def finish(ui, repo, *revrange, **opts):'
3878 3878 ui.status(_(b'no patches applied\n'))
3879 3879 return 0
3880 3880
3881 revs = scmutil.revrange(repo, revrange)
3881 revs = logcmdutil.revrange(repo, revrange)
3882 3882 if repo[b'.'].rev() in revs and repo[None].files():
3883 3883 ui.warn(_(b'warning: uncommitted changes in the working directory\n'))
3884 3884 # queue.finish may changes phases but leave the responsibility to lock the
@@ -289,7 +289,7 b' def _narrow('
289 289 repair.strip(ui, unfi, tostrip, topic=b'narrow', backup=backup)
290 290
291 291 todelete = []
292 for t, f, f2, size in repo.store.datafiles():
292 for t, f, size in repo.store.datafiles():
293 293 if f.startswith(b'data/'):
294 294 file = f[5:-2]
295 295 if not newmatch(file):
@@ -91,6 +91,7 b' from mercurial import ('
91 91 error,
92 92 formatter,
93 93 hg,
94 logcmdutil,
94 95 mail,
95 96 patch,
96 97 pycompat,
@@ -812,7 +813,7 b' def email(ui, repo, *revs, **opts):'
812 813 raise error.Abort(_(b"bookmark '%s' not found") % bookmark)
813 814 revs = scmutil.bookmarkrevs(repo, bookmark)
814 815
815 revs = scmutil.revrange(repo, revs)
816 revs = logcmdutil.revrange(repo, revs)
816 817 if outgoing:
817 818 revs = _getoutgoing(repo, dest, revs)
818 819 if bundle:
@@ -1354,7 +1354,7 b' def phabsend(ui, repo, *revs, **opts):'
1354 1354 """
1355 1355 opts = pycompat.byteskwargs(opts)
1356 1356 revs = list(revs) + opts.get(b'rev', [])
1357 revs = scmutil.revrange(repo, revs)
1357 revs = logcmdutil.revrange(repo, revs)
1358 1358 revs.sort() # ascending order to preserve topological parent/child in phab
1359 1359
1360 1360 if not revs:
@@ -2276,7 +2276,7 b' def phabupdate(ui, repo, *specs, **opts)'
2276 2276 if specs:
2277 2277 raise error.InputError(_(b'cannot specify both DREVSPEC and --rev'))
2278 2278
2279 drevmap = getdrevmap(repo, scmutil.revrange(repo, [revs]))
2279 drevmap = getdrevmap(repo, logcmdutil.revrange(repo, [revs]))
2280 2280 specs = []
2281 2281 unknown = []
2282 2282 for r, d in pycompat.iteritems(drevmap):
@@ -35,6 +35,7 b' from mercurial import ('
35 35 dirstateguard,
36 36 error,
37 37 extensions,
38 logcmdutil,
38 39 merge as mergemod,
39 40 mergestate as mergestatemod,
40 41 mergeutil,
@@ -1302,19 +1303,19 b' def _definedestmap(ui, repo, inmemory, d'
1302 1303 dest = None
1303 1304
1304 1305 if revf:
1305 rebaseset = scmutil.revrange(repo, revf)
1306 rebaseset = logcmdutil.revrange(repo, revf)
1306 1307 if not rebaseset:
1307 1308 ui.status(_(b'empty "rev" revision set - nothing to rebase\n'))
1308 1309 return None
1309 1310 elif srcf:
1310 src = scmutil.revrange(repo, srcf)
1311 src = logcmdutil.revrange(repo, srcf)
1311 1312 if not src:
1312 1313 ui.status(_(b'empty "source" revision set - nothing to rebase\n'))
1313 1314 return None
1314 1315 # `+ (%ld)` to work around `wdir()::` being empty
1315 1316 rebaseset = repo.revs(b'(%ld):: + (%ld)', src, src)
1316 1317 else:
1317 base = scmutil.revrange(repo, basef or [b'.'])
1318 base = logcmdutil.revrange(repo, basef or [b'.'])
1318 1319 if not base:
1319 1320 ui.status(
1320 1321 _(b'empty "base" revision set - ' b"can't compute rebase set\n")
@@ -1322,7 +1323,7 b' def _definedestmap(ui, repo, inmemory, d'
1322 1323 return None
1323 1324 if destf:
1324 1325 # --base does not support multiple destinations
1325 dest = scmutil.revsingle(repo, destf)
1326 dest = logcmdutil.revsingle(repo, destf)
1326 1327 else:
1327 1328 dest = repo[_destrebase(repo, base, destspace=destspace)]
1328 1329 destf = bytes(dest)
@@ -24,10 +24,10 b' from mercurial import ('
24 24 cmdutil,
25 25 config,
26 26 error,
27 logcmdutil,
27 28 minirst,
28 29 pycompat,
29 30 registrar,
30 scmutil,
31 31 util,
32 32 )
33 33 from mercurial.utils import (
@@ -676,7 +676,7 b' def releasenotes(ui, repo, file_=None, *'
676 676 return _getadmonitionlist(ui, sections)
677 677
678 678 rev = opts.get(b'rev')
679 revs = scmutil.revrange(repo, [rev or b'not public()'])
679 revs = logcmdutil.revrange(repo, [rev or b'not public()'])
680 680 if opts.get(b'check'):
681 681 return checkadmonitions(ui, repo, sections.names(), revs)
682 682
@@ -378,7 +378,7 b' class manifestrevlogstore(object):'
378 378 ledger.markdataentry(self, treename, node)
379 379 ledger.markhistoryentry(self, treename, node)
380 380
381 for t, path, encoded, size in self._store.datafiles():
381 for t, path, size in self._store.datafiles():
382 382 if path[:5] != b'meta/' or path[-2:] != b'.i':
383 383 continue
384 384
@@ -63,12 +63,14 b' def peersetup(ui, peer):'
63 63 raise error.Abort(
64 64 b'configured remotefile server does not support getfile'
65 65 )
66 f = wireprotov1peer.future()
67 yield {b'file': file, b'node': node}, f
68 code, data = f.value.split(b'\0', 1)
69 if int(code):
70 raise error.LookupError(file, node, data)
71 yield data
66
67 def decode(d):
68 code, data = d.split(b'\0', 1)
69 if int(code):
70 raise error.LookupError(file, node, data)
71 return data
72
73 return {b'file': file, b'node': node}, decode
72 74
73 75 @wireprotov1peer.batchable
74 76 def x_rfl_getflogheads(self, path):
@@ -77,10 +79,11 b' def peersetup(ui, peer):'
77 79 b'configured remotefile server does not '
78 80 b'support getflogheads'
79 81 )
80 f = wireprotov1peer.future()
81 yield {b'path': path}, f
82 heads = f.value.split(b'\n') if f.value else []
83 yield heads
82
83 def decode(d):
84 return d.split(b'\n') if d else []
85
86 return {b'path': path}, decode
84 87
85 88 def _updatecallstreamopts(self, command, opts):
86 89 if command != b'getbundle':
@@ -166,24 +166,24 b' def onetimesetup(ui):'
166 166 n = util.pconvert(fp[striplen:])
167 167 d = store.decodedir(n)
168 168 t = store.FILETYPE_OTHER
169 yield (t, d, n, st.st_size)
169 yield (t, d, st.st_size)
170 170 if kind == stat.S_IFDIR:
171 171 visit.append(fp)
172 172
173 173 if scmutil.istreemanifest(repo):
174 for (t, u, e, s) in repo.store.datafiles():
174 for (t, u, s) in repo.store.datafiles():
175 175 if u.startswith(b'meta/') and (
176 176 u.endswith(b'.i') or u.endswith(b'.d')
177 177 ):
178 yield (t, u, e, s)
178 yield (t, u, s)
179 179
180 180 # Return .d and .i files that do not match the shallow pattern
181 181 match = state.match
182 182 if match and not match.always():
183 for (t, u, e, s) in repo.store.datafiles():
183 for (t, u, s) in repo.store.datafiles():
184 184 f = u[5:-2] # trim data/... and .i/.d
185 185 if not state.match(f):
186 yield (t, u, e, s)
186 yield (t, u, s)
187 187
188 188 for x in repo.store.topfiles():
189 189 if state.noflatmf and x[1][:11] == b'00manifest.':
@@ -255,14 +255,9 b' def _setupdirstate(ui):'
255 255
256 256 # Prevent adding files that are outside the sparse checkout
257 257 editfuncs = [
258 b'normal',
259 258 b'set_tracked',
260 259 b'set_untracked',
261 b'add',
262 b'normallookup',
263 260 b'copy',
264 b'remove',
265 b'merge',
266 261 ]
267 262 hint = _(
268 263 b'include file with `hg debugsparse --include <pattern>` or use '
@@ -22,6 +22,7 b' from mercurial import ('
22 22 commands,
23 23 error,
24 24 hg,
25 logcmdutil,
25 26 pycompat,
26 27 registrar,
27 28 revsetlang,
@@ -75,7 +76,7 b' def split(ui, repo, *revs, **opts):'
75 76 # If the rebase somehow runs into conflicts, make sure
76 77 # we close the transaction so the user can continue it.
77 78 with util.acceptintervention(tr):
78 revs = scmutil.revrange(repo, revlist or [b'.'])
79 revs = logcmdutil.revrange(repo, revlist or [b'.'])
79 80 if len(revs) > 1:
80 81 raise error.InputError(_(b'cannot split multiple revisions'))
81 82
@@ -37,7 +37,6 b' from mercurial import ('
37 37 pycompat,
38 38 registrar,
39 39 revset,
40 scmutil,
41 40 smartset,
42 41 state as statemod,
43 42 util,
@@ -845,7 +844,7 b' def _dotransplant(ui, repo, *revs, **opt'
845 844 if opts.get(b'prune'):
846 845 prune = {
847 846 source[r].node()
848 for r in scmutil.revrange(source, opts.get(b'prune'))
847 for r in logcmdutil.revrange(source, opts.get(b'prune'))
849 848 }
850 849 matchfn = lambda x: tf(x) and x not in prune
851 850 else:
@@ -853,7 +852,7 b' def _dotransplant(ui, repo, *revs, **opt'
853 852 merges = pycompat.maplist(source.lookup, opts.get(b'merge', ()))
854 853 revmap = {}
855 854 if revs:
856 for r in scmutil.revrange(source, revs):
855 for r in logcmdutil.revrange(source, revs):
857 856 revmap[int(r)] = source[r].node()
858 857 elif opts.get(b'all') or not merges:
859 858 if source != repo:
@@ -29,6 +29,8 b' from . import ('
29 29 vfs as vfsmod,
30 30 )
31 31
32 from .utils import stringutil
33
32 34 stringio = util.stringio
33 35
34 36 # from unzip source code:
@@ -196,7 +198,7 b' class tarit(object):'
196 198 name, pycompat.sysstr(mode + kind), fileobj
197 199 )
198 200 except tarfile.CompressionError as e:
199 raise error.Abort(pycompat.bytestr(e))
201 raise error.Abort(stringutil.forcebytestr(e))
200 202
201 203 if isinstance(dest, bytes):
202 204 self.z = taropen(b'w:', name=dest)
@@ -1,5 +1,5 b''
1 #ifndef _HG_BDIFF_H_
2 #define _HG_BDIFF_H_
1 #ifndef HG_BDIFF_H
2 #define HG_BDIFF_H
3 3
4 4 #include "compat.h"
5 5
@@ -1,5 +1,5 b''
1 #ifndef _HG_BITMANIPULATION_H_
2 #define _HG_BITMANIPULATION_H_
1 #ifndef HG_BITMANIPULATION_H
2 #define HG_BITMANIPULATION_H
3 3
4 4 #include <string.h>
5 5
@@ -680,8 +680,25 b' def binarydecode(repo, stream):'
680 680 return books
681 681
682 682
683 def updatefromremote(ui, repo, remotemarks, path, trfunc, explicit=()):
684 ui.debug(b"checking for updated bookmarks\n")
683 def mirroring_remote(ui, repo, remotemarks):
684 """computes the bookmark changes that set the local bookmarks to
685 remotemarks"""
686 changed = []
687 localmarks = repo._bookmarks
688 for (b, id) in pycompat.iteritems(remotemarks):
689 if id != localmarks.get(b, None) and id in repo:
690 changed.append((b, id, ui.debug, _(b"updating bookmark %s\n") % b))
691 for b in localmarks:
692 if b not in remotemarks:
693 changed.append(
694 (b, None, ui.debug, _(b"removing bookmark %s\n") % b)
695 )
696 return changed
697
698
699 def merging_from_remote(ui, repo, remotemarks, path, explicit=()):
700 """computes the bookmark changes that merge remote bookmarks into the
701 local bookmarks, based on comparebookmarks"""
685 702 localmarks = repo._bookmarks
686 703 (
687 704 addsrc,
@@ -752,6 +769,20 b' def updatefromremote(ui, repo, remotemar'
752 769 _(b"remote bookmark %s points to locally missing %s\n")
753 770 % (b, hex(scid)[:12])
754 771 )
772 return changed
773
774
775 def updatefromremote(
776 ui, repo, remotemarks, path, trfunc, explicit=(), mode=None
777 ):
778 if mode == b'ignore':
779 # This should move to an higher level to avoid fetching bookmark at all
780 return
781 ui.debug(b"checking for updated bookmarks\n")
782 if mode == b'mirror':
783 changed = mirroring_remote(ui, repo, remotemarks)
784 else:
785 changed = merging_from_remote(ui, repo, remotemarks, path, explicit)
755 786
756 787 if changed:
757 788 tr = trfunc()
@@ -760,11 +791,14 b' def updatefromremote(ui, repo, remotemar'
760 791 for b, node, writer, msg in sorted(changed, key=key):
761 792 changes.append((b, node))
762 793 writer(msg)
763 localmarks.applychanges(repo, tr, changes)
794 repo._bookmarks.applychanges(repo, tr, changes)
764 795
765 796
766 def incoming(ui, repo, peer):
797 def incoming(ui, repo, peer, mode=None):
767 798 """Show bookmarks incoming from other to repo"""
799 if mode == b'ignore':
800 ui.status(_(b"bookmarks exchange disabled with this path\n"))
801 return 0
768 802 ui.status(_(b"searching for changed bookmarks\n"))
769 803
770 804 with peer.commandexecutor() as e:
@@ -777,9 +811,6 b' def incoming(ui, repo, peer):'
777 811 ).result()
778 812 )
779 813
780 r = comparebookmarks(repo, remotemarks, repo._bookmarks)
781 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
782
783 814 incomings = []
784 815 if ui.debugflag:
785 816 getid = lambda id: id
@@ -795,18 +826,36 b' def incoming(ui, repo, peer):'
795 826 def add(b, id, st):
796 827 incomings.append(b" %-25s %s\n" % (b, getid(id)))
797 828
798 for b, scid, dcid in addsrc:
799 # i18n: "added" refers to a bookmark
800 add(b, hex(scid), _(b'added'))
801 for b, scid, dcid in advsrc:
802 # i18n: "advanced" refers to a bookmark
803 add(b, hex(scid), _(b'advanced'))
804 for b, scid, dcid in diverge:
805 # i18n: "diverged" refers to a bookmark
806 add(b, hex(scid), _(b'diverged'))
807 for b, scid, dcid in differ:
808 # i18n: "changed" refers to a bookmark
809 add(b, hex(scid), _(b'changed'))
829 if mode == b'mirror':
830 localmarks = repo._bookmarks
831 allmarks = set(remotemarks.keys()) | set(localmarks.keys())
832 for b in sorted(allmarks):
833 loc = localmarks.get(b)
834 rem = remotemarks.get(b)
835 if loc == rem:
836 continue
837 elif loc is None:
838 add(b, hex(rem), _(b'added'))
839 elif rem is None:
840 add(b, hex(repo.nullid), _(b'removed'))
841 else:
842 add(b, hex(rem), _(b'changed'))
843 else:
844 r = comparebookmarks(repo, remotemarks, repo._bookmarks)
845 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
846
847 for b, scid, dcid in addsrc:
848 # i18n: "added" refers to a bookmark
849 add(b, hex(scid), _(b'added'))
850 for b, scid, dcid in advsrc:
851 # i18n: "advanced" refers to a bookmark
852 add(b, hex(scid), _(b'advanced'))
853 for b, scid, dcid in diverge:
854 # i18n: "diverged" refers to a bookmark
855 add(b, hex(scid), _(b'diverged'))
856 for b, scid, dcid in differ:
857 # i18n: "changed" refers to a bookmark
858 add(b, hex(scid), _(b'changed'))
810 859
811 860 if not incomings:
812 861 ui.status(_(b"no changed bookmarks found\n"))
@@ -699,7 +699,9 b' def getremotechanges('
699 699 },
700 700 ).result()
701 701
702 pullop = exchange.pulloperation(bundlerepo, peer, heads=reponodes)
702 pullop = exchange.pulloperation(
703 bundlerepo, peer, path=None, heads=reponodes
704 )
703 705 pullop.trmanager = bundletransactionmanager()
704 706 exchange._pullapplyphases(pullop, remotephases)
705 707
@@ -264,7 +264,7 b' PyObject *make_file_foldmap(PyObject *se'
264 264 }
265 265
266 266 tuple = (dirstateItemObject *)v;
267 if (tuple->state != 'r') {
267 if (tuple->flags | dirstate_flag_wc_tracked) {
268 268 PyObject *normed;
269 269 if (table != NULL) {
270 270 normed = _asciitransform(k, table,
@@ -161,7 +161,7 b' bail:'
161 161 return ret;
162 162 }
163 163
164 static int dirs_fromdict(PyObject *dirs, PyObject *source, char skipchar)
164 static int dirs_fromdict(PyObject *dirs, PyObject *source, bool only_tracked)
165 165 {
166 166 PyObject *key, *value;
167 167 Py_ssize_t pos = 0;
@@ -171,13 +171,14 b' static int dirs_fromdict(PyObject *dirs,'
171 171 PyErr_SetString(PyExc_TypeError, "expected string key");
172 172 return -1;
173 173 }
174 if (skipchar) {
174 if (only_tracked) {
175 175 if (!dirstate_tuple_check(value)) {
176 176 PyErr_SetString(PyExc_TypeError,
177 177 "expected a dirstate tuple");
178 178 return -1;
179 179 }
180 if (((dirstateItemObject *)value)->state == skipchar)
180 if (!(((dirstateItemObject *)value)->flags &
181 dirstate_flag_wc_tracked))
181 182 continue;
182 183 }
183 184
@@ -218,15 +219,17 b' static int dirs_fromiter(PyObject *dirs,'
218 219 * Calculate a refcounted set of directory names for the files in a
219 220 * dirstate.
220 221 */
221 static int dirs_init(dirsObject *self, PyObject *args)
222 static int dirs_init(dirsObject *self, PyObject *args, PyObject *kwargs)
222 223 {
223 224 PyObject *dirs = NULL, *source = NULL;
224 char skipchar = 0;
225 int only_tracked = 0;
225 226 int ret = -1;
227 static char *keywords_name[] = {"map", "only_tracked", NULL};
226 228
227 229 self->dict = NULL;
228 230
229 if (!PyArg_ParseTuple(args, "|Oc:__init__", &source, &skipchar))
231 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|Oi:__init__",
232 keywords_name, &source, &only_tracked))
230 233 return -1;
231 234
232 235 dirs = PyDict_New();
@@ -237,10 +240,10 b' static int dirs_init(dirsObject *self, P'
237 240 if (source == NULL)
238 241 ret = 0;
239 242 else if (PyDict_Check(source))
240 ret = dirs_fromdict(dirs, source, skipchar);
241 else if (skipchar)
243 ret = dirs_fromdict(dirs, source, (bool)only_tracked);
244 else if (only_tracked)
242 245 PyErr_SetString(PyExc_ValueError,
243 "skip character is only supported "
246 "`only_tracked` is only supported "
244 247 "with a dict source");
245 248 else
246 249 ret = dirs_fromiter(dirs, source);
This diff has been collapsed as it changes many lines, (726 lines changed) Show them Hide them
@@ -44,42 +44,98 b' static PyObject *dict_new_presized(PyObj'
44 44 return _dict_new_presized(expected_size);
45 45 }
46 46
47 static inline dirstateItemObject *make_dirstate_item(char state, int mode,
48 int size, int mtime)
49 {
50 dirstateItemObject *t =
51 PyObject_New(dirstateItemObject, &dirstateItemType);
52 if (!t) {
53 return NULL;
54 }
55 t->state = state;
56 t->mode = mode;
57 t->size = size;
58 t->mtime = mtime;
59 return t;
60 }
61
62 47 static PyObject *dirstate_item_new(PyTypeObject *subtype, PyObject *args,
63 48 PyObject *kwds)
64 49 {
65 50 /* We do all the initialization here and not a tp_init function because
66 51 * dirstate_item is immutable. */
67 52 dirstateItemObject *t;
68 char state;
69 int size, mode, mtime;
70 if (!PyArg_ParseTuple(args, "ciii", &state, &mode, &size, &mtime)) {
53 int wc_tracked;
54 int p1_tracked;
55 int p2_info;
56 int has_meaningful_data;
57 int has_meaningful_mtime;
58 int mode;
59 int size;
60 int mtime_s;
61 int mtime_ns;
62 PyObject *parentfiledata;
63 PyObject *fallback_exec;
64 PyObject *fallback_symlink;
65 static char *keywords_name[] = {
66 "wc_tracked", "p1_tracked", "p2_info",
67 "has_meaningful_data", "has_meaningful_mtime", "parentfiledata",
68 "fallback_exec", "fallback_symlink", NULL,
69 };
70 wc_tracked = 0;
71 p1_tracked = 0;
72 p2_info = 0;
73 has_meaningful_mtime = 1;
74 has_meaningful_data = 1;
75 parentfiledata = Py_None;
76 fallback_exec = Py_None;
77 fallback_symlink = Py_None;
78 if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iiiiiOOO", keywords_name,
79 &wc_tracked, &p1_tracked, &p2_info,
80 &has_meaningful_data,
81 &has_meaningful_mtime, &parentfiledata,
82 &fallback_exec, &fallback_symlink)) {
71 83 return NULL;
72 84 }
73
74 85 t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
75 86 if (!t) {
76 87 return NULL;
77 88 }
78 t->state = state;
79 t->mode = mode;
80 t->size = size;
81 t->mtime = mtime;
89
90 t->flags = 0;
91 if (wc_tracked) {
92 t->flags |= dirstate_flag_wc_tracked;
93 }
94 if (p1_tracked) {
95 t->flags |= dirstate_flag_p1_tracked;
96 }
97 if (p2_info) {
98 t->flags |= dirstate_flag_p2_info;
99 }
100
101 if (fallback_exec != Py_None) {
102 t->flags |= dirstate_flag_has_fallback_exec;
103 if (PyObject_IsTrue(fallback_exec)) {
104 t->flags |= dirstate_flag_fallback_exec;
105 }
106 }
107 if (fallback_symlink != Py_None) {
108 t->flags |= dirstate_flag_has_fallback_symlink;
109 if (PyObject_IsTrue(fallback_symlink)) {
110 t->flags |= dirstate_flag_fallback_symlink;
111 }
112 }
82 113
114 if (parentfiledata != Py_None) {
115 if (!PyArg_ParseTuple(parentfiledata, "ii(ii)", &mode, &size,
116 &mtime_s, &mtime_ns)) {
117 return NULL;
118 }
119 } else {
120 has_meaningful_data = 0;
121 has_meaningful_mtime = 0;
122 }
123 if (has_meaningful_data) {
124 t->flags |= dirstate_flag_has_meaningful_data;
125 t->mode = mode;
126 t->size = size;
127 } else {
128 t->mode = 0;
129 t->size = 0;
130 }
131 if (has_meaningful_mtime) {
132 t->flags |= dirstate_flag_has_mtime;
133 t->mtime_s = mtime_s;
134 t->mtime_ns = mtime_ns;
135 } else {
136 t->mtime_s = 0;
137 t->mtime_ns = 0;
138 }
83 139 return (PyObject *)t;
84 140 }
85 141
@@ -88,92 +144,201 b' static void dirstate_item_dealloc(PyObje'
88 144 PyObject_Del(o);
89 145 }
90 146
91 static Py_ssize_t dirstate_item_length(PyObject *o)
147 static inline bool dirstate_item_c_tracked(dirstateItemObject *self)
148 {
149 return (self->flags & dirstate_flag_wc_tracked);
150 }
151
152 static inline bool dirstate_item_c_any_tracked(dirstateItemObject *self)
92 153 {
93 return 4;
154 const int mask = dirstate_flag_wc_tracked | dirstate_flag_p1_tracked |
155 dirstate_flag_p2_info;
156 return (self->flags & mask);
157 }
158
159 static inline bool dirstate_item_c_added(dirstateItemObject *self)
160 {
161 const int mask = (dirstate_flag_wc_tracked | dirstate_flag_p1_tracked |
162 dirstate_flag_p2_info);
163 const int target = dirstate_flag_wc_tracked;
164 return (self->flags & mask) == target;
94 165 }
95 166
96 static PyObject *dirstate_item_item(PyObject *o, Py_ssize_t i)
167 static inline bool dirstate_item_c_removed(dirstateItemObject *self)
168 {
169 if (self->flags & dirstate_flag_wc_tracked) {
170 return false;
171 }
172 return (self->flags &
173 (dirstate_flag_p1_tracked | dirstate_flag_p2_info));
174 }
175
176 static inline bool dirstate_item_c_merged(dirstateItemObject *self)
97 177 {
98 dirstateItemObject *t = (dirstateItemObject *)o;
99 switch (i) {
100 case 0:
101 return PyBytes_FromStringAndSize(&t->state, 1);
102 case 1:
103 return PyInt_FromLong(t->mode);
104 case 2:
105 return PyInt_FromLong(t->size);
106 case 3:
107 return PyInt_FromLong(t->mtime);
108 default:
109 PyErr_SetString(PyExc_IndexError, "index out of range");
110 return NULL;
178 return ((self->flags & dirstate_flag_wc_tracked) &&
179 (self->flags & dirstate_flag_p1_tracked) &&
180 (self->flags & dirstate_flag_p2_info));
181 }
182
183 static inline bool dirstate_item_c_from_p2(dirstateItemObject *self)
184 {
185 return ((self->flags & dirstate_flag_wc_tracked) &&
186 !(self->flags & dirstate_flag_p1_tracked) &&
187 (self->flags & dirstate_flag_p2_info));
188 }
189
190 static inline char dirstate_item_c_v1_state(dirstateItemObject *self)
191 {
192 if (dirstate_item_c_removed(self)) {
193 return 'r';
194 } else if (dirstate_item_c_merged(self)) {
195 return 'm';
196 } else if (dirstate_item_c_added(self)) {
197 return 'a';
198 } else {
199 return 'n';
111 200 }
112 201 }
113 202
114 static PySequenceMethods dirstate_item_sq = {
115 dirstate_item_length, /* sq_length */
116 0, /* sq_concat */
117 0, /* sq_repeat */
118 dirstate_item_item, /* sq_item */
119 0, /* sq_ass_item */
120 0, /* sq_contains */
121 0, /* sq_inplace_concat */
122 0 /* sq_inplace_repeat */
203 static inline bool dirstate_item_c_has_fallback_exec(dirstateItemObject *self)
204 {
205 return (bool)self->flags & dirstate_flag_has_fallback_exec;
206 }
207
208 static inline bool
209 dirstate_item_c_has_fallback_symlink(dirstateItemObject *self)
210 {
211 return (bool)self->flags & dirstate_flag_has_fallback_symlink;
212 }
213
214 static inline int dirstate_item_c_v1_mode(dirstateItemObject *self)
215 {
216 if (self->flags & dirstate_flag_has_meaningful_data) {
217 return self->mode;
218 } else {
219 return 0;
220 }
221 }
222
223 static inline int dirstate_item_c_v1_size(dirstateItemObject *self)
224 {
225 if (!(self->flags & dirstate_flag_wc_tracked) &&
226 (self->flags & dirstate_flag_p2_info)) {
227 if (self->flags & dirstate_flag_p1_tracked) {
228 return dirstate_v1_nonnormal;
229 } else {
230 return dirstate_v1_from_p2;
231 }
232 } else if (dirstate_item_c_removed(self)) {
233 return 0;
234 } else if (self->flags & dirstate_flag_p2_info) {
235 return dirstate_v1_from_p2;
236 } else if (dirstate_item_c_added(self)) {
237 return dirstate_v1_nonnormal;
238 } else if (self->flags & dirstate_flag_has_meaningful_data) {
239 return self->size;
240 } else {
241 return dirstate_v1_nonnormal;
242 }
243 }
244
245 static inline int dirstate_item_c_v1_mtime(dirstateItemObject *self)
246 {
247 if (dirstate_item_c_removed(self)) {
248 return 0;
249 } else if (!(self->flags & dirstate_flag_has_mtime) ||
250 !(self->flags & dirstate_flag_p1_tracked) ||
251 !(self->flags & dirstate_flag_wc_tracked) ||
252 (self->flags & dirstate_flag_p2_info)) {
253 return ambiguous_time;
254 } else {
255 return self->mtime_s;
256 }
257 }
258
259 static PyObject *dirstate_item_v2_data(dirstateItemObject *self)
260 {
261 int flags = self->flags;
262 int mode = dirstate_item_c_v1_mode(self);
263 #ifdef S_IXUSR
264 /* This is for platforms with an exec bit */
265 if ((mode & S_IXUSR) != 0) {
266 flags |= dirstate_flag_mode_exec_perm;
267 } else {
268 flags &= ~dirstate_flag_mode_exec_perm;
269 }
270 #else
271 flags &= ~dirstate_flag_mode_exec_perm;
272 #endif
273 #ifdef S_ISLNK
274 /* This is for platforms with support for symlinks */
275 if (S_ISLNK(mode)) {
276 flags |= dirstate_flag_mode_is_symlink;
277 } else {
278 flags &= ~dirstate_flag_mode_is_symlink;
279 }
280 #else
281 flags &= ~dirstate_flag_mode_is_symlink;
282 #endif
283 return Py_BuildValue("iiii", flags, self->size, self->mtime_s,
284 self->mtime_ns);
123 285 };
124 286
125 287 static PyObject *dirstate_item_v1_state(dirstateItemObject *self)
126 288 {
127 return PyBytes_FromStringAndSize(&self->state, 1);
289 char state = dirstate_item_c_v1_state(self);
290 return PyBytes_FromStringAndSize(&state, 1);
128 291 };
129 292
130 293 static PyObject *dirstate_item_v1_mode(dirstateItemObject *self)
131 294 {
132 return PyInt_FromLong(self->mode);
295 return PyInt_FromLong(dirstate_item_c_v1_mode(self));
133 296 };
134 297
135 298 static PyObject *dirstate_item_v1_size(dirstateItemObject *self)
136 299 {
137 return PyInt_FromLong(self->size);
300 return PyInt_FromLong(dirstate_item_c_v1_size(self));
138 301 };
139 302
140 303 static PyObject *dirstate_item_v1_mtime(dirstateItemObject *self)
141 304 {
142 return PyInt_FromLong(self->mtime);
305 return PyInt_FromLong(dirstate_item_c_v1_mtime(self));
143 306 };
144 307
145 static PyObject *dm_nonnormal(dirstateItemObject *self)
308 static PyObject *dirstate_item_need_delay(dirstateItemObject *self,
309 PyObject *now)
146 310 {
147 if (self->state != 'n' || self->mtime == ambiguous_time) {
148 Py_RETURN_TRUE;
149 } else {
150 Py_RETURN_FALSE;
311 int now_s;
312 int now_ns;
313 if (!PyArg_ParseTuple(now, "ii", &now_s, &now_ns)) {
314 return NULL;
151 315 }
152 };
153 static PyObject *dm_otherparent(dirstateItemObject *self)
154 {
155 if (self->size == dirstate_v1_from_p2) {
316 if (dirstate_item_c_v1_state(self) == 'n' && self->mtime_s == now_s) {
156 317 Py_RETURN_TRUE;
157 318 } else {
158 319 Py_RETURN_FALSE;
159 320 }
160 321 };
161 322
162 static PyObject *dirstate_item_need_delay(dirstateItemObject *self,
163 PyObject *value)
323 static PyObject *dirstate_item_mtime_likely_equal_to(dirstateItemObject *self,
324 PyObject *other)
164 325 {
165 long now;
166 if (!pylong_to_long(value, &now)) {
326 int other_s;
327 int other_ns;
328 if (!PyArg_ParseTuple(other, "ii", &other_s, &other_ns)) {
167 329 return NULL;
168 330 }
169 if (self->state == 'n' && self->mtime == now) {
331 if ((self->flags & dirstate_flag_has_mtime) &&
332 self->mtime_s == other_s &&
333 (self->mtime_ns == other_ns || self->mtime_ns == 0 ||
334 other_ns == 0)) {
170 335 Py_RETURN_TRUE;
171 336 } else {
172 337 Py_RETURN_FALSE;
173 338 }
174 339 };
175 340
176 /* This will never change since it's bound to V1, unlike `make_dirstate_item`
341 /* This will never change since it's bound to V1
177 342 */
178 343 static inline dirstateItemObject *
179 344 dirstate_item_from_v1_data(char state, int mode, int size, int mtime)
@@ -183,10 +348,56 b' dirstate_item_from_v1_data(char state, i'
183 348 if (!t) {
184 349 return NULL;
185 350 }
186 t->state = state;
187 t->mode = mode;
188 t->size = size;
189 t->mtime = mtime;
351 t->flags = 0;
352 t->mode = 0;
353 t->size = 0;
354 t->mtime_s = 0;
355 t->mtime_ns = 0;
356
357 if (state == 'm') {
358 t->flags = (dirstate_flag_wc_tracked |
359 dirstate_flag_p1_tracked | dirstate_flag_p2_info);
360 } else if (state == 'a') {
361 t->flags = dirstate_flag_wc_tracked;
362 } else if (state == 'r') {
363 if (size == dirstate_v1_nonnormal) {
364 t->flags =
365 dirstate_flag_p1_tracked | dirstate_flag_p2_info;
366 } else if (size == dirstate_v1_from_p2) {
367 t->flags = dirstate_flag_p2_info;
368 } else {
369 t->flags = dirstate_flag_p1_tracked;
370 }
371 } else if (state == 'n') {
372 if (size == dirstate_v1_from_p2) {
373 t->flags =
374 dirstate_flag_wc_tracked | dirstate_flag_p2_info;
375 } else if (size == dirstate_v1_nonnormal) {
376 t->flags =
377 dirstate_flag_wc_tracked | dirstate_flag_p1_tracked;
378 } else if (mtime == ambiguous_time) {
379 t->flags = (dirstate_flag_wc_tracked |
380 dirstate_flag_p1_tracked |
381 dirstate_flag_has_meaningful_data);
382 t->mode = mode;
383 t->size = size;
384 } else {
385 t->flags = (dirstate_flag_wc_tracked |
386 dirstate_flag_p1_tracked |
387 dirstate_flag_has_meaningful_data |
388 dirstate_flag_has_mtime);
389 t->mode = mode;
390 t->size = size;
391 t->mtime_s = mtime;
392 }
393 } else {
394 PyErr_Format(PyExc_RuntimeError,
395 "unknown state: `%c` (%d, %d, %d)", state, mode,
396 size, mtime, NULL);
397 Py_DECREF(t);
398 return NULL;
399 }
400
190 401 return t;
191 402 }
192 403
@@ -196,22 +407,52 b' static PyObject *dirstate_item_from_v1_m'
196 407 {
197 408 /* We do all the initialization here and not a tp_init function because
198 409 * dirstate_item is immutable. */
199 dirstateItemObject *t;
200 410 char state;
201 411 int size, mode, mtime;
202 412 if (!PyArg_ParseTuple(args, "ciii", &state, &mode, &size, &mtime)) {
203 413 return NULL;
204 414 }
415 return (PyObject *)dirstate_item_from_v1_data(state, mode, size, mtime);
416 };
205 417
206 t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
418 static PyObject *dirstate_item_from_v2_meth(PyTypeObject *subtype,
419 PyObject *args)
420 {
421 dirstateItemObject *t =
422 PyObject_New(dirstateItemObject, &dirstateItemType);
207 423 if (!t) {
208 424 return NULL;
209 425 }
210 t->state = state;
211 t->mode = mode;
212 t->size = size;
213 t->mtime = mtime;
214
426 if (!PyArg_ParseTuple(args, "iiii", &t->flags, &t->size, &t->mtime_s,
427 &t->mtime_ns)) {
428 return NULL;
429 }
430 if (t->flags & dirstate_flag_expected_state_is_modified) {
431 t->flags &= ~(dirstate_flag_expected_state_is_modified |
432 dirstate_flag_has_meaningful_data |
433 dirstate_flag_has_mtime);
434 }
435 if (t->flags & dirstate_flag_mtime_second_ambiguous) {
436 /* The current code is not able to do the more subtle comparison
437 * that the MTIME_SECOND_AMBIGUOUS requires. So we ignore the
438 * mtime */
439 t->flags &= ~(dirstate_flag_mtime_second_ambiguous |
440 dirstate_flag_has_meaningful_data |
441 dirstate_flag_has_mtime);
442 }
443 t->mode = 0;
444 if (t->flags & dirstate_flag_has_meaningful_data) {
445 if (t->flags & dirstate_flag_mode_exec_perm) {
446 t->mode = 0755;
447 } else {
448 t->mode = 0644;
449 }
450 if (t->flags & dirstate_flag_mode_is_symlink) {
451 t->mode |= S_IFLNK;
452 } else {
453 t->mode |= S_IFREG;
454 }
455 }
215 456 return (PyObject *)t;
216 457 };
217 458
@@ -219,11 +460,62 b' static PyObject *dirstate_item_from_v1_m'
219 460 to make sure it is correct. */
220 461 static PyObject *dirstate_item_set_possibly_dirty(dirstateItemObject *self)
221 462 {
222 self->mtime = ambiguous_time;
463 self->flags &= ~dirstate_flag_has_mtime;
464 Py_RETURN_NONE;
465 }
466
467 /* See docstring of the python implementation for details */
468 static PyObject *dirstate_item_set_clean(dirstateItemObject *self,
469 PyObject *args)
470 {
471 int size, mode, mtime_s, mtime_ns;
472 if (!PyArg_ParseTuple(args, "ii(ii)", &mode, &size, &mtime_s,
473 &mtime_ns)) {
474 return NULL;
475 }
476 self->flags = dirstate_flag_wc_tracked | dirstate_flag_p1_tracked |
477 dirstate_flag_has_meaningful_data |
478 dirstate_flag_has_mtime;
479 self->mode = mode;
480 self->size = size;
481 self->mtime_s = mtime_s;
482 self->mtime_ns = mtime_ns;
223 483 Py_RETURN_NONE;
224 484 }
225 485
486 static PyObject *dirstate_item_set_tracked(dirstateItemObject *self)
487 {
488 self->flags |= dirstate_flag_wc_tracked;
489 self->flags &= ~dirstate_flag_has_mtime;
490 Py_RETURN_NONE;
491 }
492
493 static PyObject *dirstate_item_set_untracked(dirstateItemObject *self)
494 {
495 self->flags &= ~dirstate_flag_wc_tracked;
496 self->mode = 0;
497 self->size = 0;
498 self->mtime_s = 0;
499 self->mtime_ns = 0;
500 Py_RETURN_NONE;
501 }
502
503 static PyObject *dirstate_item_drop_merge_data(dirstateItemObject *self)
504 {
505 if (self->flags & dirstate_flag_p2_info) {
506 self->flags &= ~(dirstate_flag_p2_info |
507 dirstate_flag_has_meaningful_data |
508 dirstate_flag_has_mtime);
509 self->mode = 0;
510 self->size = 0;
511 self->mtime_s = 0;
512 self->mtime_ns = 0;
513 }
514 Py_RETURN_NONE;
515 }
226 516 static PyMethodDef dirstate_item_methods[] = {
517 {"v2_data", (PyCFunction)dirstate_item_v2_data, METH_NOARGS,
518 "return data suitable for v2 serialization"},
227 519 {"v1_state", (PyCFunction)dirstate_item_v1_state, METH_NOARGS,
228 520 "return a \"state\" suitable for v1 serialization"},
229 521 {"v1_mode", (PyCFunction)dirstate_item_v1_mode, METH_NOARGS,
@@ -234,40 +526,134 b' static PyMethodDef dirstate_item_methods'
234 526 "return a \"mtime\" suitable for v1 serialization"},
235 527 {"need_delay", (PyCFunction)dirstate_item_need_delay, METH_O,
236 528 "True if the stored mtime would be ambiguous with the current time"},
237 {"from_v1_data", (PyCFunction)dirstate_item_from_v1_meth, METH_O,
238 "build a new DirstateItem object from V1 data"},
529 {"mtime_likely_equal_to", (PyCFunction)dirstate_item_mtime_likely_equal_to,
530 METH_O, "True if the stored mtime is likely equal to the given mtime"},
531 {"from_v1_data", (PyCFunction)dirstate_item_from_v1_meth,
532 METH_VARARGS | METH_CLASS, "build a new DirstateItem object from V1 data"},
533 {"from_v2_data", (PyCFunction)dirstate_item_from_v2_meth,
534 METH_VARARGS | METH_CLASS, "build a new DirstateItem object from V2 data"},
239 535 {"set_possibly_dirty", (PyCFunction)dirstate_item_set_possibly_dirty,
240 536 METH_NOARGS, "mark a file as \"possibly dirty\""},
241 {"dm_nonnormal", (PyCFunction)dm_nonnormal, METH_NOARGS,
242 "True is the entry is non-normal in the dirstatemap sense"},
243 {"dm_otherparent", (PyCFunction)dm_otherparent, METH_NOARGS,
244 "True is the entry is `otherparent` in the dirstatemap sense"},
537 {"set_clean", (PyCFunction)dirstate_item_set_clean, METH_VARARGS,
538 "mark a file as \"clean\""},
539 {"set_tracked", (PyCFunction)dirstate_item_set_tracked, METH_NOARGS,
540 "mark a file as \"tracked\""},
541 {"set_untracked", (PyCFunction)dirstate_item_set_untracked, METH_NOARGS,
542 "mark a file as \"untracked\""},
543 {"drop_merge_data", (PyCFunction)dirstate_item_drop_merge_data, METH_NOARGS,
544 "remove all \"merge-only\" from a DirstateItem"},
245 545 {NULL} /* Sentinel */
246 546 };
247 547
248 548 static PyObject *dirstate_item_get_mode(dirstateItemObject *self)
249 549 {
250 return PyInt_FromLong(self->mode);
550 return PyInt_FromLong(dirstate_item_c_v1_mode(self));
251 551 };
252 552
253 553 static PyObject *dirstate_item_get_size(dirstateItemObject *self)
254 554 {
255 return PyInt_FromLong(self->size);
555 return PyInt_FromLong(dirstate_item_c_v1_size(self));
256 556 };
257 557
258 558 static PyObject *dirstate_item_get_mtime(dirstateItemObject *self)
259 559 {
260 return PyInt_FromLong(self->mtime);
560 return PyInt_FromLong(dirstate_item_c_v1_mtime(self));
261 561 };
262 562
263 563 static PyObject *dirstate_item_get_state(dirstateItemObject *self)
264 564 {
265 return PyBytes_FromStringAndSize(&self->state, 1);
565 char state = dirstate_item_c_v1_state(self);
566 return PyBytes_FromStringAndSize(&state, 1);
567 };
568
569 static PyObject *dirstate_item_get_has_fallback_exec(dirstateItemObject *self)
570 {
571 if (dirstate_item_c_has_fallback_exec(self)) {
572 Py_RETURN_TRUE;
573 } else {
574 Py_RETURN_FALSE;
575 }
576 };
577
578 static PyObject *dirstate_item_get_fallback_exec(dirstateItemObject *self)
579 {
580 if (dirstate_item_c_has_fallback_exec(self)) {
581 if (self->flags & dirstate_flag_fallback_exec) {
582 Py_RETURN_TRUE;
583 } else {
584 Py_RETURN_FALSE;
585 }
586 } else {
587 Py_RETURN_NONE;
588 }
589 };
590
591 static int dirstate_item_set_fallback_exec(dirstateItemObject *self,
592 PyObject *value)
593 {
594 if ((value == Py_None) || (value == NULL)) {
595 self->flags &= ~dirstate_flag_has_fallback_exec;
596 } else {
597 self->flags |= dirstate_flag_has_fallback_exec;
598 if (PyObject_IsTrue(value)) {
599 self->flags |= dirstate_flag_fallback_exec;
600 } else {
601 self->flags &= ~dirstate_flag_fallback_exec;
602 }
603 }
604 return 0;
605 };
606
607 static PyObject *
608 dirstate_item_get_has_fallback_symlink(dirstateItemObject *self)
609 {
610 if (dirstate_item_c_has_fallback_symlink(self)) {
611 Py_RETURN_TRUE;
612 } else {
613 Py_RETURN_FALSE;
614 }
615 };
616
617 static PyObject *dirstate_item_get_fallback_symlink(dirstateItemObject *self)
618 {
619 if (dirstate_item_c_has_fallback_symlink(self)) {
620 if (self->flags & dirstate_flag_fallback_symlink) {
621 Py_RETURN_TRUE;
622 } else {
623 Py_RETURN_FALSE;
624 }
625 } else {
626 Py_RETURN_NONE;
627 }
628 };
629
630 static int dirstate_item_set_fallback_symlink(dirstateItemObject *self,
631 PyObject *value)
632 {
633 if ((value == Py_None) || (value == NULL)) {
634 self->flags &= ~dirstate_flag_has_fallback_symlink;
635 } else {
636 self->flags |= dirstate_flag_has_fallback_symlink;
637 if (PyObject_IsTrue(value)) {
638 self->flags |= dirstate_flag_fallback_symlink;
639 } else {
640 self->flags &= ~dirstate_flag_fallback_symlink;
641 }
642 }
643 return 0;
266 644 };
267 645
268 646 static PyObject *dirstate_item_get_tracked(dirstateItemObject *self)
269 647 {
270 if (self->state == 'a' || self->state == 'm' || self->state == 'n') {
648 if (dirstate_item_c_tracked(self)) {
649 Py_RETURN_TRUE;
650 } else {
651 Py_RETURN_FALSE;
652 }
653 };
654 static PyObject *dirstate_item_get_p1_tracked(dirstateItemObject *self)
655 {
656 if (self->flags & dirstate_flag_p1_tracked) {
271 657 Py_RETURN_TRUE;
272 658 } else {
273 659 Py_RETURN_FALSE;
@@ -276,7 +662,17 b' static PyObject *dirstate_item_get_track'
276 662
277 663 static PyObject *dirstate_item_get_added(dirstateItemObject *self)
278 664 {
279 if (self->state == 'a') {
665 if (dirstate_item_c_added(self)) {
666 Py_RETURN_TRUE;
667 } else {
668 Py_RETURN_FALSE;
669 }
670 };
671
672 static PyObject *dirstate_item_get_p2_info(dirstateItemObject *self)
673 {
674 if (self->flags & dirstate_flag_wc_tracked &&
675 self->flags & dirstate_flag_p2_info) {
280 676 Py_RETURN_TRUE;
281 677 } else {
282 678 Py_RETURN_FALSE;
@@ -285,16 +681,7 b' static PyObject *dirstate_item_get_added'
285 681
286 682 static PyObject *dirstate_item_get_merged(dirstateItemObject *self)
287 683 {
288 if (self->state == 'm') {
289 Py_RETURN_TRUE;
290 } else {
291 Py_RETURN_FALSE;
292 }
293 };
294
295 static PyObject *dirstate_item_get_merged_removed(dirstateItemObject *self)
296 {
297 if (self->state == 'r' && self->size == dirstate_v1_nonnormal) {
684 if (dirstate_item_c_merged(self)) {
298 685 Py_RETURN_TRUE;
299 686 } else {
300 687 Py_RETURN_FALSE;
@@ -303,16 +690,29 b' static PyObject *dirstate_item_get_merge'
303 690
304 691 static PyObject *dirstate_item_get_from_p2(dirstateItemObject *self)
305 692 {
306 if (self->state == 'n' && self->size == dirstate_v1_from_p2) {
693 if (dirstate_item_c_from_p2(self)) {
307 694 Py_RETURN_TRUE;
308 695 } else {
309 696 Py_RETURN_FALSE;
310 697 }
311 698 };
312 699
313 static PyObject *dirstate_item_get_from_p2_removed(dirstateItemObject *self)
700 static PyObject *dirstate_item_get_maybe_clean(dirstateItemObject *self)
314 701 {
315 if (self->state == 'r' && self->size == dirstate_v1_from_p2) {
702 if (!(self->flags & dirstate_flag_wc_tracked)) {
703 Py_RETURN_FALSE;
704 } else if (!(self->flags & dirstate_flag_p1_tracked)) {
705 Py_RETURN_FALSE;
706 } else if (self->flags & dirstate_flag_p2_info) {
707 Py_RETURN_FALSE;
708 } else {
709 Py_RETURN_TRUE;
710 }
711 };
712
713 static PyObject *dirstate_item_get_any_tracked(dirstateItemObject *self)
714 {
715 if (dirstate_item_c_any_tracked(self)) {
316 716 Py_RETURN_TRUE;
317 717 } else {
318 718 Py_RETURN_FALSE;
@@ -321,7 +721,7 b' static PyObject *dirstate_item_get_from_'
321 721
322 722 static PyObject *dirstate_item_get_removed(dirstateItemObject *self)
323 723 {
324 if (self->state == 'r') {
724 if (dirstate_item_c_removed(self)) {
325 725 Py_RETURN_TRUE;
326 726 } else {
327 727 Py_RETURN_FALSE;
@@ -333,14 +733,25 b' static PyGetSetDef dirstate_item_getset['
333 733 {"size", (getter)dirstate_item_get_size, NULL, "size", NULL},
334 734 {"mtime", (getter)dirstate_item_get_mtime, NULL, "mtime", NULL},
335 735 {"state", (getter)dirstate_item_get_state, NULL, "state", NULL},
736 {"has_fallback_exec", (getter)dirstate_item_get_has_fallback_exec, NULL,
737 "has_fallback_exec", NULL},
738 {"fallback_exec", (getter)dirstate_item_get_fallback_exec,
739 (setter)dirstate_item_set_fallback_exec, "fallback_exec", NULL},
740 {"has_fallback_symlink", (getter)dirstate_item_get_has_fallback_symlink,
741 NULL, "has_fallback_symlink", NULL},
742 {"fallback_symlink", (getter)dirstate_item_get_fallback_symlink,
743 (setter)dirstate_item_set_fallback_symlink, "fallback_symlink", NULL},
336 744 {"tracked", (getter)dirstate_item_get_tracked, NULL, "tracked", NULL},
745 {"p1_tracked", (getter)dirstate_item_get_p1_tracked, NULL, "p1_tracked",
746 NULL},
337 747 {"added", (getter)dirstate_item_get_added, NULL, "added", NULL},
338 {"merged_removed", (getter)dirstate_item_get_merged_removed, NULL,
339 "merged_removed", NULL},
748 {"p2_info", (getter)dirstate_item_get_p2_info, NULL, "p2_info", NULL},
340 749 {"merged", (getter)dirstate_item_get_merged, NULL, "merged", NULL},
341 {"from_p2_removed", (getter)dirstate_item_get_from_p2_removed, NULL,
342 "from_p2_removed", NULL},
343 750 {"from_p2", (getter)dirstate_item_get_from_p2, NULL, "from_p2", NULL},
751 {"maybe_clean", (getter)dirstate_item_get_maybe_clean, NULL, "maybe_clean",
752 NULL},
753 {"any_tracked", (getter)dirstate_item_get_any_tracked, NULL, "any_tracked",
754 NULL},
344 755 {"removed", (getter)dirstate_item_get_removed, NULL, "removed", NULL},
345 756 {NULL} /* Sentinel */
346 757 };
@@ -357,7 +768,7 b' PyTypeObject dirstateItemType = {'
357 768 0, /* tp_compare */
358 769 0, /* tp_repr */
359 770 0, /* tp_as_number */
360 &dirstate_item_sq, /* tp_as_sequence */
771 0, /* tp_as_sequence */
361 772 0, /* tp_as_mapping */
362 773 0, /* tp_hash */
363 774 0, /* tp_call */
@@ -441,6 +852,8 b' static PyObject *parse_dirstate(PyObject'
441 852
442 853 entry = (PyObject *)dirstate_item_from_v1_data(state, mode,
443 854 size, mtime);
855 if (!entry)
856 goto quit;
444 857 cpos = memchr(cur, 0, flen);
445 858 if (cpos) {
446 859 fname = PyBytes_FromStringAndSize(cur, cpos - cur);
@@ -476,68 +889,6 b' quit:'
476 889 }
477 890
478 891 /*
479 * Build a set of non-normal and other parent entries from the dirstate dmap
480 */
481 static PyObject *nonnormalotherparententries(PyObject *self, PyObject *args)
482 {
483 PyObject *dmap, *fname, *v;
484 PyObject *nonnset = NULL, *otherpset = NULL, *result = NULL;
485 Py_ssize_t pos;
486
487 if (!PyArg_ParseTuple(args, "O!:nonnormalentries", &PyDict_Type,
488 &dmap)) {
489 goto bail;
490 }
491
492 nonnset = PySet_New(NULL);
493 if (nonnset == NULL) {
494 goto bail;
495 }
496
497 otherpset = PySet_New(NULL);
498 if (otherpset == NULL) {
499 goto bail;
500 }
501
502 pos = 0;
503 while (PyDict_Next(dmap, &pos, &fname, &v)) {
504 dirstateItemObject *t;
505 if (!dirstate_tuple_check(v)) {
506 PyErr_SetString(PyExc_TypeError,
507 "expected a dirstate tuple");
508 goto bail;
509 }
510 t = (dirstateItemObject *)v;
511
512 if (t->state == 'n' && t->size == -2) {
513 if (PySet_Add(otherpset, fname) == -1) {
514 goto bail;
515 }
516 }
517
518 if (t->state == 'n' && t->mtime != -1) {
519 continue;
520 }
521 if (PySet_Add(nonnset, fname) == -1) {
522 goto bail;
523 }
524 }
525
526 result = Py_BuildValue("(OO)", nonnset, otherpset);
527 if (result == NULL) {
528 goto bail;
529 }
530 Py_DECREF(nonnset);
531 Py_DECREF(otherpset);
532 return result;
533 bail:
534 Py_XDECREF(nonnset);
535 Py_XDECREF(otherpset);
536 Py_XDECREF(result);
537 return NULL;
538 }
539
540 /*
541 892 * Efficiently pack a dirstate object into its on-disk format.
542 893 */
543 894 static PyObject *pack_dirstate(PyObject *self, PyObject *args)
@@ -547,11 +898,12 b' static PyObject *pack_dirstate(PyObject '
547 898 Py_ssize_t nbytes, pos, l;
548 899 PyObject *k, *v = NULL, *pn;
549 900 char *p, *s;
550 int now;
901 int now_s;
902 int now_ns;
551 903
552 if (!PyArg_ParseTuple(args, "O!O!O!i:pack_dirstate", &PyDict_Type, &map,
553 &PyDict_Type, &copymap, &PyTuple_Type, &pl,
554 &now)) {
904 if (!PyArg_ParseTuple(args, "O!O!O!(ii):pack_dirstate", &PyDict_Type,
905 &map, &PyDict_Type, &copymap, &PyTuple_Type, &pl,
906 &now_s, &now_ns)) {
555 907 return NULL;
556 908 }
557 909
@@ -616,15 +968,15 b' static PyObject *pack_dirstate(PyObject '
616 968 }
617 969 tuple = (dirstateItemObject *)v;
618 970
619 state = tuple->state;
620 mode = tuple->mode;
621 size = tuple->size;
622 mtime = tuple->mtime;
623 if (state == 'n' && mtime == now) {
971 state = dirstate_item_c_v1_state(tuple);
972 mode = dirstate_item_c_v1_mode(tuple);
973 size = dirstate_item_c_v1_size(tuple);
974 mtime = dirstate_item_c_v1_mtime(tuple);
975 if (state == 'n' && tuple->mtime_s == now_s) {
624 976 /* See pure/parsers.py:pack_dirstate for why we do
625 977 * this. */
626 978 mtime = -1;
627 mtime_unset = (PyObject *)make_dirstate_item(
979 mtime_unset = (PyObject *)dirstate_item_from_v1_data(
628 980 state, mode, size, mtime);
629 981 if (!mtime_unset) {
630 982 goto bail;
@@ -869,9 +1221,6 b' PyObject *parse_index2(PyObject *self, P'
869 1221
870 1222 static PyMethodDef methods[] = {
871 1223 {"pack_dirstate", pack_dirstate, METH_VARARGS, "pack a dirstate\n"},
872 {"nonnormalotherparententries", nonnormalotherparententries, METH_VARARGS,
873 "create a set containing non-normal and other parent entries of given "
874 "dirstate\n"},
875 1224 {"parse_dirstate", parse_dirstate, METH_VARARGS, "parse a dirstate\n"},
876 1225 {"parse_index2", (PyCFunction)parse_index2, METH_VARARGS | METH_KEYWORDS,
877 1226 "parse a revlog index\n"},
@@ -899,7 +1248,6 b' static const int version = 20;'
899 1248
900 1249 static void module_init(PyObject *mod)
901 1250 {
902 PyObject *capsule = NULL;
903 1251 PyModule_AddIntConstant(mod, "version", version);
904 1252
905 1253 /* This module constant has two purposes. First, it lets us unit test
@@ -916,12 +1264,6 b' static void module_init(PyObject *mod)'
916 1264 manifest_module_init(mod);
917 1265 revlog_module_init(mod);
918 1266
919 capsule = PyCapsule_New(
920 make_dirstate_item,
921 "mercurial.cext.parsers.make_dirstate_item_CAPI", NULL);
922 if (capsule != NULL)
923 PyModule_AddObject(mod, "make_dirstate_item_CAPI", capsule);
924
925 1267 if (PyType_Ready(&dirstateItemType) < 0) {
926 1268 return;
927 1269 }
@@ -24,13 +24,31 b''
24 24 /* clang-format off */
25 25 typedef struct {
26 26 PyObject_HEAD
27 char state;
27 int flags;
28 28 int mode;
29 29 int size;
30 int mtime;
30 int mtime_s;
31 int mtime_ns;
31 32 } dirstateItemObject;
32 33 /* clang-format on */
33 34
35 static const int dirstate_flag_wc_tracked = 1 << 0;
36 static const int dirstate_flag_p1_tracked = 1 << 1;
37 static const int dirstate_flag_p2_info = 1 << 2;
38 static const int dirstate_flag_mode_exec_perm = 1 << 3;
39 static const int dirstate_flag_mode_is_symlink = 1 << 4;
40 static const int dirstate_flag_has_fallback_exec = 1 << 5;
41 static const int dirstate_flag_fallback_exec = 1 << 6;
42 static const int dirstate_flag_has_fallback_symlink = 1 << 7;
43 static const int dirstate_flag_fallback_symlink = 1 << 8;
44 static const int dirstate_flag_expected_state_is_modified = 1 << 9;
45 static const int dirstate_flag_has_meaningful_data = 1 << 10;
46 static const int dirstate_flag_has_mtime = 1 << 11;
47 static const int dirstate_flag_mtime_second_ambiguous = 1 << 12;
48 static const int dirstate_flag_directory = 1 << 13;
49 static const int dirstate_flag_all_unknown_recorded = 1 << 14;
50 static const int dirstate_flag_all_ignored_recorded = 1 << 15;
51
34 52 extern PyTypeObject dirstateItemType;
35 53 #define dirstate_tuple_check(op) (Py_TYPE(op) == &dirstateItemType)
36 54
@@ -626,7 +626,7 b' def dorecord('
626 626 for realname, tmpname in pycompat.iteritems(backups):
627 627 ui.debug(b'restoring %r to %r\n' % (tmpname, realname))
628 628
629 if dirstate[realname] == b'n':
629 if dirstate.get_entry(realname).maybe_clean:
630 630 # without normallookup, restoring timestamp
631 631 # may cause partially committed files
632 632 # to be treated as unmodified
@@ -987,7 +987,7 b' def changebranch(ui, repo, revs, label, '
987 987 with repo.wlock(), repo.lock(), repo.transaction(b'branches'):
988 988 # abort in case of uncommitted merge or dirty wdir
989 989 bailifchanged(repo)
990 revs = scmutil.revrange(repo, revs)
990 revs = logcmdutil.revrange(repo, revs)
991 991 if not revs:
992 992 raise error.InputError(b"empty revision set")
993 993 roots = repo.revs(b'roots(%ld)', revs)
@@ -1480,7 +1480,7 b' def copy(ui, repo, pats, opts, rename=Fa'
1480 1480 # TODO: Remove this restriction and make it also create the copy
1481 1481 # targets (and remove the rename source if rename==True).
1482 1482 raise error.InputError(_(b'--at-rev requires --after'))
1483 ctx = scmutil.revsingle(repo, rev)
1483 ctx = logcmdutil.revsingle(repo, rev)
1484 1484 if len(ctx.parents()) > 1:
1485 1485 raise error.InputError(
1486 1486 _(b'cannot mark/unmark copy in merge commit')
@@ -1642,7 +1642,9 b' def copy(ui, repo, pats, opts, rename=Fa'
1642 1642 reltarget = repo.pathto(abstarget, cwd)
1643 1643 target = repo.wjoin(abstarget)
1644 1644 src = repo.wjoin(abssrc)
1645 state = repo.dirstate[abstarget]
1645 entry = repo.dirstate.get_entry(abstarget)
1646
1647 already_commited = entry.tracked and not entry.added
1646 1648
1647 1649 scmutil.checkportable(ui, abstarget)
1648 1650
@@ -1672,30 +1674,48 b' def copy(ui, repo, pats, opts, rename=Fa'
1672 1674 exists = False
1673 1675 samefile = True
1674 1676
1675 if not after and exists or after and state in b'mn':
1677 if not after and exists or after and already_commited:
1676 1678 if not opts[b'force']:
1677 if state in b'mn':
1679 if already_commited:
1678 1680 msg = _(b'%s: not overwriting - file already committed\n')
1679 if after:
1680 flags = b'--after --force'
1681 # Check if if the target was added in the parent and the
1682 # source already existed in the grandparent.
1683 looks_like_copy_in_pctx = abstarget in pctx and any(
1684 abssrc in gpctx and abstarget not in gpctx
1685 for gpctx in pctx.parents()
1686 )
1687 if looks_like_copy_in_pctx:
1688 if rename:
1689 hint = _(
1690 b"('hg rename --at-rev .' to record the rename "
1691 b"in the parent of the working copy)\n"
1692 )
1693 else:
1694 hint = _(
1695 b"('hg copy --at-rev .' to record the copy in "
1696 b"the parent of the working copy)\n"
1697 )
1681 1698 else:
1682 flags = b'--force'
1683 if rename:
1684 hint = (
1685 _(
1686 b"('hg rename %s' to replace the file by "
1687 b'recording a rename)\n'
1699 if after:
1700 flags = b'--after --force'
1701 else:
1702 flags = b'--force'
1703 if rename:
1704 hint = (
1705 _(
1706 b"('hg rename %s' to replace the file by "
1707 b'recording a rename)\n'
1708 )
1709 % flags
1688 1710 )
1689 % flags
1690 )
1691 else:
1692 hint = (
1693 _(
1694 b"('hg copy %s' to replace the file by "
1695 b'recording a copy)\n'
1711 else:
1712 hint = (
1713 _(
1714 b"('hg copy %s' to replace the file by "
1715 b'recording a copy)\n'
1716 )
1717 % flags
1696 1718 )
1697 % flags
1698 )
1699 1719 else:
1700 1720 msg = _(b'%s: not overwriting - file exists\n')
1701 1721 if rename:
@@ -3350,7 +3370,11 b' def revert(ui, repo, ctx, *pats, **opts)'
3350 3370 for f in localchanges:
3351 3371 src = repo.dirstate.copied(f)
3352 3372 # XXX should we check for rename down to target node?
3353 if src and src not in names and repo.dirstate[src] == b'r':
3373 if (
3374 src
3375 and src not in names
3376 and repo.dirstate.get_entry(src).removed
3377 ):
3354 3378 dsremoved.add(src)
3355 3379 names[src] = True
3356 3380
@@ -3364,12 +3388,12 b' def revert(ui, repo, ctx, *pats, **opts)'
3364 3388 # distinguish between file to forget and the other
3365 3389 added = set()
3366 3390 for abs in dsadded:
3367 if repo.dirstate[abs] != b'a':
3391 if not repo.dirstate.get_entry(abs).added:
3368 3392 added.add(abs)
3369 3393 dsadded -= added
3370 3394
3371 3395 for abs in deladded:
3372 if repo.dirstate[abs] == b'a':
3396 if repo.dirstate.get_entry(abs).added:
3373 3397 dsadded.add(abs)
3374 3398 deladded -= dsadded
3375 3399
@@ -445,7 +445,7 b' def annotate(ui, repo, *pats, **opts):'
445 445 rev = opts.get(b'rev')
446 446 if rev:
447 447 repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
448 ctx = scmutil.revsingle(repo, rev)
448 ctx = logcmdutil.revsingle(repo, rev)
449 449
450 450 ui.pager(b'annotate')
451 451 rootfm = ui.formatter(b'annotate', opts)
@@ -526,7 +526,7 b' def annotate(ui, repo, *pats, **opts):'
526 526 )
527 527
528 528 def bad(x, y):
529 raise error.Abort(b"%s: %s" % (x, y))
529 raise error.InputError(b"%s: %s" % (x, y))
530 530
531 531 m = scmutil.match(ctx, pats, opts, badfn=bad)
532 532
@@ -536,7 +536,7 b' def annotate(ui, repo, *pats, **opts):'
536 536 )
537 537 skiprevs = opts.get(b'skip')
538 538 if skiprevs:
539 skiprevs = scmutil.revrange(repo, skiprevs)
539 skiprevs = logcmdutil.revrange(repo, skiprevs)
540 540
541 541 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
542 542 for abs in ctx.walk(m):
@@ -649,7 +649,7 b' def archive(ui, repo, dest, **opts):'
649 649 rev = opts.get(b'rev')
650 650 if rev:
651 651 repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
652 ctx = scmutil.revsingle(repo, rev)
652 ctx = logcmdutil.revsingle(repo, rev)
653 653 if not ctx:
654 654 raise error.InputError(
655 655 _(b'no working directory: please specify a revision')
@@ -791,7 +791,7 b' def _dobackout(ui, repo, node=None, rev='
791 791
792 792 cmdutil.checkunfinished(repo)
793 793 cmdutil.bailifchanged(repo)
794 ctx = scmutil.revsingle(repo, rev)
794 ctx = logcmdutil.revsingle(repo, rev)
795 795 node = ctx.node()
796 796
797 797 op1, op2 = repo.dirstate.parents()
@@ -1037,7 +1037,7 b' def bisect('
1037 1037 state = hbisect.load_state(repo)
1038 1038
1039 1039 if rev:
1040 nodes = [repo[i].node() for i in scmutil.revrange(repo, rev)]
1040 nodes = [repo[i].node() for i in logcmdutil.revrange(repo, rev)]
1041 1041 else:
1042 1042 nodes = [repo.lookup(b'.')]
1043 1043
@@ -1081,7 +1081,7 b' def bisect('
1081 1081 raise error.StateError(_(b'current bisect revision is a merge'))
1082 1082 if rev:
1083 1083 if not nodes:
1084 raise error.Abort(_(b'empty revision set'))
1084 raise error.InputError(_(b'empty revision set'))
1085 1085 node = repo[nodes[-1]].node()
1086 1086 with hbisect.restore_state(repo, state, node):
1087 1087 while changesets:
@@ -1424,7 +1424,7 b' def branches(ui, repo, active=False, clo'
1424 1424 revs = opts.get(b'rev')
1425 1425 selectedbranches = None
1426 1426 if revs:
1427 revs = scmutil.revrange(repo, revs)
1427 revs = logcmdutil.revrange(repo, revs)
1428 1428 getbi = repo.revbranchcache().branchinfo
1429 1429 selectedbranches = {getbi(r)[0] for r in revs}
1430 1430
@@ -1558,7 +1558,7 b' def bundle(ui, repo, fname, *dests, **op'
1558 1558 revs = None
1559 1559 if b'rev' in opts:
1560 1560 revstrings = opts[b'rev']
1561 revs = scmutil.revrange(repo, revstrings)
1561 revs = logcmdutil.revrange(repo, revstrings)
1562 1562 if revstrings and not revs:
1563 1563 raise error.InputError(_(b'no commits to bundle'))
1564 1564
@@ -1590,7 +1590,7 b' def bundle(ui, repo, fname, *dests, **op'
1590 1590 ui.warn(_(b"ignoring --base because --all was specified\n"))
1591 1591 base = [nullrev]
1592 1592 else:
1593 base = scmutil.revrange(repo, opts.get(b'base'))
1593 base = logcmdutil.revrange(repo, opts.get(b'base'))
1594 1594 if cgversion not in changegroup.supportedoutgoingversions(repo):
1595 1595 raise error.Abort(
1596 1596 _(b"repository does not support bundle version %s") % cgversion
@@ -1761,7 +1761,7 b' def cat(ui, repo, file1, *pats, **opts):'
1761 1761 rev = opts.get(b'rev')
1762 1762 if rev:
1763 1763 repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
1764 ctx = scmutil.revsingle(repo, rev)
1764 ctx = logcmdutil.revsingle(repo, rev)
1765 1765 m = scmutil.match(ctx, (file1,) + pats, opts)
1766 1766 fntemplate = opts.pop(b'output', b'')
1767 1767 if cmdutil.isstdiofilename(fntemplate):
@@ -2600,17 +2600,17 b' def diff(ui, repo, *pats, **opts):'
2600 2600 cmdutil.check_incompatible_arguments(opts, b'to', [b'rev', b'change'])
2601 2601 if change:
2602 2602 repo = scmutil.unhidehashlikerevs(repo, [change], b'nowarn')
2603 ctx2 = scmutil.revsingle(repo, change, None)
2603 ctx2 = logcmdutil.revsingle(repo, change, None)
2604 2604 ctx1 = logcmdutil.diff_parent(ctx2)
2605 2605 elif from_rev or to_rev:
2606 2606 repo = scmutil.unhidehashlikerevs(
2607 2607 repo, [from_rev] + [to_rev], b'nowarn'
2608 2608 )
2609 ctx1 = scmutil.revsingle(repo, from_rev, None)
2610 ctx2 = scmutil.revsingle(repo, to_rev, None)
2609 ctx1 = logcmdutil.revsingle(repo, from_rev, None)
2610 ctx2 = logcmdutil.revsingle(repo, to_rev, None)
2611 2611 else:
2612 2612 repo = scmutil.unhidehashlikerevs(repo, revs, b'nowarn')
2613 ctx1, ctx2 = scmutil.revpair(repo, revs)
2613 ctx1, ctx2 = logcmdutil.revpair(repo, revs)
2614 2614
2615 2615 if reverse:
2616 2616 ctxleft = ctx2
@@ -2753,7 +2753,7 b' def export(ui, repo, *changesets, **opts'
2753 2753 changesets = [b'.']
2754 2754
2755 2755 repo = scmutil.unhidehashlikerevs(repo, changesets, b'nowarn')
2756 revs = scmutil.revrange(repo, changesets)
2756 revs = logcmdutil.revrange(repo, changesets)
2757 2757
2758 2758 if not revs:
2759 2759 raise error.InputError(_(b"export requires at least one changeset"))
@@ -2864,7 +2864,7 b' def files(ui, repo, *pats, **opts):'
2864 2864 rev = opts.get(b'rev')
2865 2865 if rev:
2866 2866 repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
2867 ctx = scmutil.revsingle(repo, rev, None)
2867 ctx = logcmdutil.revsingle(repo, rev, None)
2868 2868
2869 2869 end = b'\n'
2870 2870 if opts.get(b'print0'):
@@ -3170,12 +3170,12 b' def _dograft(ui, repo, *revs, **opts):'
3170 3170 raise error.InputError(_(b'no revisions specified'))
3171 3171 cmdutil.checkunfinished(repo)
3172 3172 cmdutil.bailifchanged(repo)
3173 revs = scmutil.revrange(repo, revs)
3173 revs = logcmdutil.revrange(repo, revs)
3174 3174
3175 3175 skipped = set()
3176 3176 basectx = None
3177 3177 if opts.get('base'):
3178 basectx = scmutil.revsingle(repo, opts['base'], None)
3178 basectx = logcmdutil.revsingle(repo, opts['base'], None)
3179 3179 if basectx is None:
3180 3180 # check for merges
3181 3181 for rev in repo.revs(b'%ld and merge()', revs):
@@ -3696,7 +3696,7 b' def heads(ui, repo, *branchrevs, **opts)'
3696 3696 rev = opts.get(b'rev')
3697 3697 if rev:
3698 3698 repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
3699 start = scmutil.revsingle(repo, rev, None).node()
3699 start = logcmdutil.revsingle(repo, rev, None).node()
3700 3700
3701 3701 if opts.get(b'topo'):
3702 3702 heads = [repo[h] for h in repo.heads(start)]
@@ -3708,7 +3708,7 b' def heads(ui, repo, *branchrevs, **opts)'
3708 3708
3709 3709 if branchrevs:
3710 3710 branches = {
3711 repo[r].branch() for r in scmutil.revrange(repo, branchrevs)
3711 repo[r].branch() for r in logcmdutil.revrange(repo, branchrevs)
3712 3712 }
3713 3713 heads = [h for h in heads if h.branch() in branches]
3714 3714
@@ -3932,7 +3932,7 b' def identify('
3932 3932 else:
3933 3933 if rev:
3934 3934 repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
3935 ctx = scmutil.revsingle(repo, rev, None)
3935 ctx = logcmdutil.revsingle(repo, rev, None)
3936 3936
3937 3937 if ctx.rev() is None:
3938 3938 ctx = repo[None]
@@ -4346,8 +4346,11 b' def incoming(ui, repo, source=b"default"'
4346 4346 cmdutil.check_incompatible_arguments(opts, b'subrepos', [b'bundle'])
4347 4347
4348 4348 if opts.get(b'bookmarks'):
4349 srcs = urlutil.get_pull_paths(repo, ui, [source], opts.get(b'branch'))
4350 for source, branches in srcs:
4349 srcs = urlutil.get_pull_paths(repo, ui, [source])
4350 for path in srcs:
4351 source, branches = urlutil.parseurl(
4352 path.rawloc, opts.get(b'branch')
4353 )
4351 4354 other = hg.peer(repo, opts, source)
4352 4355 try:
4353 4356 if b'bookmarks' not in other.listkeys(b'namespaces'):
@@ -4357,7 +4360,9 b' def incoming(ui, repo, source=b"default"'
4357 4360 ui.status(
4358 4361 _(b'comparing with %s\n') % urlutil.hidepassword(source)
4359 4362 )
4360 return bookmarks.incoming(ui, repo, other)
4363 return bookmarks.incoming(
4364 ui, repo, other, mode=path.bookmarks_mode
4365 )
4361 4366 finally:
4362 4367 other.close()
4363 4368
@@ -4445,7 +4450,7 b' def locate(ui, repo, *pats, **opts):'
4445 4450 end = b'\0'
4446 4451 else:
4447 4452 end = b'\n'
4448 ctx = scmutil.revsingle(repo, opts.get(b'rev'), None)
4453 ctx = logcmdutil.revsingle(repo, opts.get(b'rev'), None)
4449 4454
4450 4455 ret = 1
4451 4456 m = scmutil.match(
@@ -4790,7 +4795,7 b' def manifest(ui, repo, node=None, rev=No'
4790 4795 mode = {b'l': b'644', b'x': b'755', b'': b'644', b't': b'755'}
4791 4796 if node:
4792 4797 repo = scmutil.unhidehashlikerevs(repo, [node], b'nowarn')
4793 ctx = scmutil.revsingle(repo, node)
4798 ctx = logcmdutil.revsingle(repo, node)
4794 4799 mf = ctx.manifest()
4795 4800 ui.pager(b'manifest')
4796 4801 for f in ctx:
@@ -4877,7 +4882,7 b' def merge(ui, repo, node=None, **opts):'
4877 4882 node = opts.get(b'rev')
4878 4883
4879 4884 if node:
4880 ctx = scmutil.revsingle(repo, node)
4885 ctx = logcmdutil.revsingle(repo, node)
4881 4886 else:
4882 4887 if ui.configbool(b'commands', b'merge.require-rev'):
4883 4888 raise error.InputError(
@@ -5056,7 +5061,7 b' def parents(ui, repo, file_=None, **opts'
5056 5061 rev = opts.get(b'rev')
5057 5062 if rev:
5058 5063 repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
5059 ctx = scmutil.revsingle(repo, rev, None)
5064 ctx = logcmdutil.revsingle(repo, rev, None)
5060 5065
5061 5066 if file_:
5062 5067 m = scmutil.match(ctx, (file_,), opts)
@@ -5219,13 +5224,13 b' def phase(ui, repo, *revs, **opts):'
5219 5224 # look for specified revision
5220 5225 revs = list(revs)
5221 5226 revs.extend(opts[b'rev'])
5222 if not revs:
5227 if revs:
5228 revs = logcmdutil.revrange(repo, revs)
5229 else:
5223 5230 # display both parents as the second parent phase can influence
5224 5231 # the phase of a merge commit
5225 5232 revs = [c.rev() for c in repo[None].parents()]
5226 5233
5227 revs = scmutil.revrange(repo, revs)
5228
5229 5234 ret = 0
5230 5235 if targetphase is None:
5231 5236 # display
@@ -5393,8 +5398,8 b' def pull(ui, repo, *sources, **opts):'
5393 5398 hint = _(b'use hg pull followed by hg update DEST')
5394 5399 raise error.InputError(msg, hint=hint)
5395 5400
5396 sources = urlutil.get_pull_paths(repo, ui, sources, opts.get(b'branch'))
5397 for source, branches in sources:
5401 for path in urlutil.get_pull_paths(repo, ui, sources):
5402 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
5398 5403 ui.status(_(b'pulling from %s\n') % urlutil.hidepassword(source))
5399 5404 ui.flush()
5400 5405 other = hg.peer(repo, opts, source)
@@ -5451,6 +5456,7 b' def pull(ui, repo, *sources, **opts):'
5451 5456 modheads = exchange.pull(
5452 5457 repo,
5453 5458 other,
5459 path=path,
5454 5460 heads=nodes,
5455 5461 force=opts.get(b'force'),
5456 5462 bookmarks=opts.get(b'bookmark', ()),
@@ -5735,7 +5741,7 b' def push(ui, repo, *dests, **opts):'
5735 5741
5736 5742 try:
5737 5743 if revs:
5738 revs = [repo[r].node() for r in scmutil.revrange(repo, revs)]
5744 revs = [repo[r].node() for r in logcmdutil.revrange(repo, revs)]
5739 5745 if not revs:
5740 5746 raise error.InputError(
5741 5747 _(b"specified revisions evaluate to an empty set"),
@@ -6347,7 +6353,7 b' def revert(ui, repo, *pats, **opts):'
6347 6353 rev = opts.get(b'rev')
6348 6354 if rev:
6349 6355 repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
6350 ctx = scmutil.revsingle(repo, rev)
6356 ctx = logcmdutil.revsingle(repo, rev)
6351 6357
6352 6358 if not (
6353 6359 pats
@@ -6905,11 +6911,11 b' def status(ui, repo, *pats, **opts):'
6905 6911 raise error.InputError(msg)
6906 6912 elif change:
6907 6913 repo = scmutil.unhidehashlikerevs(repo, [change], b'nowarn')
6908 ctx2 = scmutil.revsingle(repo, change, None)
6914 ctx2 = logcmdutil.revsingle(repo, change, None)
6909 6915 ctx1 = ctx2.p1()
6910 6916 else:
6911 6917 repo = scmutil.unhidehashlikerevs(repo, revs, b'nowarn')
6912 ctx1, ctx2 = scmutil.revpair(repo, revs)
6918 ctx1, ctx2 = logcmdutil.revpair(repo, revs)
6913 6919
6914 6920 forcerelativevalue = None
6915 6921 if ui.hasconfig(b'commands', b'status.relative'):
@@ -7453,7 +7459,7 b' def tag(ui, repo, name1, *names, **opts)'
7453 7459 b'(use -f to force)'
7454 7460 )
7455 7461 )
7456 node = scmutil.revsingle(repo, rev_).node()
7462 node = logcmdutil.revsingle(repo, rev_).node()
7457 7463
7458 7464 if not message:
7459 7465 # we don't translate commit messages
@@ -7477,7 +7483,7 b' def tag(ui, repo, name1, *names, **opts)'
7477 7483 # don't allow tagging the null rev
7478 7484 if (
7479 7485 not opts.get(b'remove')
7480 and scmutil.revsingle(repo, rev_).rev() == nullrev
7486 and logcmdutil.revsingle(repo, rev_).rev() == nullrev
7481 7487 ):
7482 7488 raise error.InputError(_(b"cannot tag null revision"))
7483 7489
@@ -7840,7 +7846,7 b' def update(ui, repo, node=None, **opts):'
7840 7846 brev = rev
7841 7847 if rev:
7842 7848 repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
7843 ctx = scmutil.revsingle(repo, rev, default=None)
7849 ctx = logcmdutil.revsingle(repo, rev, default=None)
7844 7850 rev = ctx.rev()
7845 7851 hidden = ctx.hidden()
7846 7852 overrides = {(b'ui', b'forcemerge'): opts.get('tool', b'')}
@@ -1,5 +1,5 b''
1 #ifndef _HG_COMPAT_H_
2 #define _HG_COMPAT_H_
1 #ifndef HG_COMPAT_H
2 #define HG_COMPAT_H
3 3
4 4 #ifdef _WIN32
5 5 #ifdef _MSC_VER
@@ -959,11 +959,6 b' coreconfigitem('
959 959 )
960 960 coreconfigitem(
961 961 b'experimental',
962 b'dirstate-tree.in-memory',
963 default=False,
964 )
965 coreconfigitem(
966 b'experimental',
967 962 b'editortmpinhg',
968 963 default=False,
969 964 )
@@ -1266,6 +1261,11 b' coreconfigitem('
1266 1261 )
1267 1262 coreconfigitem(
1268 1263 b'experimental',
1264 b'web.full-garbage-collection-rate',
1265 default=1, # still forcing a full collection on each request
1266 )
1267 coreconfigitem(
1268 b'experimental',
1269 1269 b'worker.wdir-get-thread-safe',
1270 1270 default=False,
1271 1271 )
@@ -1306,7 +1306,7 b' coreconfigitem('
1306 1306 # Enable this dirstate format *when creating a new repository*.
1307 1307 # Which format to use for existing repos is controlled by .hg/requires
1308 1308 b'format',
1309 b'exp-dirstate-v2',
1309 b'exp-rc-dirstate-v2',
1310 1310 default=False,
1311 1311 experimental=True,
1312 1312 )
@@ -1880,6 +1880,13 b' coreconfigitem('
1880 1880 default=b'skip',
1881 1881 experimental=True,
1882 1882 )
1883 # experimental as long as format.exp-rc-dirstate-v2 is.
1884 coreconfigitem(
1885 b'storage',
1886 b'dirstate-v2.slow-path',
1887 default=b"abort",
1888 experimental=True,
1889 )
1883 1890 coreconfigitem(
1884 1891 b'storage',
1885 1892 b'new-repo-backend',
@@ -1551,11 +1551,11 b' class workingctx(committablectx):'
1551 1551 def __iter__(self):
1552 1552 d = self._repo.dirstate
1553 1553 for f in d:
1554 if d[f] != b'r':
1554 if d.get_entry(f).tracked:
1555 1555 yield f
1556 1556
1557 1557 def __contains__(self, key):
1558 return self._repo.dirstate[key] not in b"?r"
1558 return self._repo.dirstate.get_entry(key).tracked
1559 1559
1560 1560 def hex(self):
1561 1561 return self._repo.nodeconstants.wdirhex
@@ -2017,7 +2017,7 b' class workingctx(committablectx):'
2017 2017 def matches(self, match):
2018 2018 match = self._repo.narrowmatch(match)
2019 2019 ds = self._repo.dirstate
2020 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
2020 return sorted(f for f in ds.matches(match) if ds.get_entry(f).tracked)
2021 2021
2022 2022 def markcommitted(self, node):
2023 2023 with self._repo.dirstate.parentchange():
@@ -94,7 +94,7 b' def _dirstatecopies(repo, match=None):'
94 94 ds = repo.dirstate
95 95 c = ds.copies().copy()
96 96 for k in list(c):
97 if ds[k] not in b'anm' or (match and not match(k)):
97 if not ds.get_entry(k).tracked or (match and not match(k)):
98 98 del c[k]
99 99 return c
100 100
@@ -506,7 +506,7 b' def debugcapabilities(ui, path, **opts):'
506 506 )
507 507 def debugchangedfiles(ui, repo, rev, **opts):
508 508 """list the stored files changes for a revision"""
509 ctx = scmutil.revsingle(repo, rev, None)
509 ctx = logcmdutil.revsingle(repo, rev, None)
510 510 files = None
511 511
512 512 if opts['compute']:
@@ -550,24 +550,9 b' def debugcheckstate(ui, repo):'
550 550 m1 = repo[parent1].manifest()
551 551 m2 = repo[parent2].manifest()
552 552 errors = 0
553 for f in repo.dirstate:
554 state = repo.dirstate[f]
555 if state in b"nr" and f not in m1:
556 ui.warn(_(b"%s in state %s, but not in manifest1\n") % (f, state))
557 errors += 1
558 if state in b"a" and f in m1:
559 ui.warn(_(b"%s in state %s, but also in manifest1\n") % (f, state))
560 errors += 1
561 if state in b"m" and f not in m1 and f not in m2:
562 ui.warn(
563 _(b"%s in state %s, but not in either manifest\n") % (f, state)
564 )
565 errors += 1
566 for f in m1:
567 state = repo.dirstate[f]
568 if state not in b"nrm":
569 ui.warn(_(b"%s in manifest1, but listed as state %s") % (f, state))
570 errors += 1
553 for err in repo.dirstate.verify(m1, m2):
554 ui.warn(err[0] % err[1:])
555 errors += 1
571 556 if errors:
572 557 errstr = _(b".hg/dirstate inconsistent with current parent's manifest")
573 558 raise error.Abort(errstr)
@@ -962,35 +947,29 b' def debugstate(ui, repo, **opts):'
962 947 datesort = opts.get('datesort')
963 948
964 949 if datesort:
965 keyfunc = lambda x: (
966 x[1].v1_mtime(),
967 x[0],
968 ) # sort by mtime, then by filename
950
951 def keyfunc(entry):
952 filename, _state, _mode, _size, mtime = entry
953 return (mtime, filename)
954
969 955 else:
970 956 keyfunc = None # sort by filename
971 if opts['all']:
972 entries = list(repo.dirstate._map.debug_iter())
973 else:
974 entries = list(pycompat.iteritems(repo.dirstate))
957 entries = list(repo.dirstate._map.debug_iter(all=opts['all']))
975 958 entries.sort(key=keyfunc)
976 for file_, ent in entries:
977 if ent.v1_mtime() == -1:
959 for entry in entries:
960 filename, state, mode, size, mtime = entry
961 if mtime == -1:
978 962 timestr = b'unset '
979 963 elif nodates:
980 964 timestr = b'set '
981 965 else:
982 timestr = time.strftime(
983 "%Y-%m-%d %H:%M:%S ", time.localtime(ent.v1_mtime())
984 )
966 timestr = time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime(mtime))
985 967 timestr = encoding.strtolocal(timestr)
986 if ent.mode & 0o20000:
968 if mode & 0o20000:
987 969 mode = b'lnk'
988 970 else:
989 mode = b'%3o' % (ent.v1_mode() & 0o777 & ~util.umask)
990 ui.write(
991 b"%c %s %10d %s%s\n"
992 % (ent.v1_state(), mode, ent.v1_size(), timestr, file_)
993 )
971 mode = b'%3o' % (mode & 0o777 & ~util.umask)
972 ui.write(b"%c %s %10d %s%s\n" % (state, mode, size, timestr, filename))
994 973 for f in repo.dirstate.copies():
995 974 ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
996 975
@@ -1103,7 +1082,7 b' def debugdiscovery(ui, repo, remoteurl=b'
1103 1082 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(remoteurl))
1104 1083 else:
1105 1084 branches = (None, [])
1106 remote_filtered_revs = scmutil.revrange(
1085 remote_filtered_revs = logcmdutil.revrange(
1107 1086 unfi, [b"not (::(%s))" % remote_revs]
1108 1087 )
1109 1088 remote_filtered_revs = frozenset(remote_filtered_revs)
@@ -1117,7 +1096,7 b' def debugdiscovery(ui, repo, remoteurl=b'
1117 1096 remote._repo = remote._repo.filtered(b'debug-discovery-remote-filter')
1118 1097
1119 1098 if local_revs:
1120 local_filtered_revs = scmutil.revrange(
1099 local_filtered_revs = logcmdutil.revrange(
1121 1100 unfi, [b"not (::(%s))" % local_revs]
1122 1101 )
1123 1102 local_filtered_revs = frozenset(local_filtered_revs)
@@ -1155,7 +1134,7 b' def debugdiscovery(ui, repo, remoteurl=b'
1155 1134 def doit(pushedrevs, remoteheads, remote=remote):
1156 1135 nodes = None
1157 1136 if pushedrevs:
1158 revs = scmutil.revrange(repo, pushedrevs)
1137 revs = logcmdutil.revrange(repo, pushedrevs)
1159 1138 nodes = [repo[r].node() for r in revs]
1160 1139 common, any, hds = setdiscovery.findcommonheads(
1161 1140 ui, repo, remote, ancestorsof=nodes, audit=data
@@ -1394,7 +1373,7 b' def debugfileset(ui, repo, expr, **opts)'
1394 1373
1395 1374 fileset.symbols # force import of fileset so we have predicates to optimize
1396 1375 opts = pycompat.byteskwargs(opts)
1397 ctx = scmutil.revsingle(repo, opts.get(b'rev'), None)
1376 ctx = logcmdutil.revsingle(repo, opts.get(b'rev'), None)
1398 1377
1399 1378 stages = [
1400 1379 (b'parsed', pycompat.identity),
@@ -1495,8 +1474,8 b' def debug_repair_issue6528(ui, repo, **o'
1495 1474 filename.
1496 1475
1497 1476 Note that this does *not* mean that this repairs future affected revisions,
1498 that needs a separate fix at the exchange level that hasn't been written yet
1499 (as of 5.9rc0).
1477 that needs a separate fix at the exchange level that was introduced in
1478 Mercurial 5.9.1.
1500 1479
1501 1480 There is a `--paranoid` flag to test that the fast implementation is correct
1502 1481 by checking it against the slow implementation. Since this matter is quite
@@ -2614,7 +2593,7 b' def debugobsolete(ui, repo, precursor=No'
2614 2593 l.release()
2615 2594 else:
2616 2595 if opts[b'rev']:
2617 revs = scmutil.revrange(repo, opts[b'rev'])
2596 revs = logcmdutil.revrange(repo, opts[b'rev'])
2618 2597 nodes = [repo[r].node() for r in revs]
2619 2598 markers = list(
2620 2599 obsutil.getmarkers(
@@ -2981,16 +2960,28 b' def debugrebuilddirstate(ui, repo, rev, '
2981 2960 dirstatefiles = set(dirstate)
2982 2961 manifestonly = manifestfiles - dirstatefiles
2983 2962 dsonly = dirstatefiles - manifestfiles
2984 dsnotadded = {f for f in dsonly if dirstate[f] != b'a'}
2963 dsnotadded = {f for f in dsonly if not dirstate.get_entry(f).added}
2985 2964 changedfiles = manifestonly | dsnotadded
2986 2965
2987 2966 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
2988 2967
2989 2968
2990 @command(b'debugrebuildfncache', [], b'')
2991 def debugrebuildfncache(ui, repo):
2969 @command(
2970 b'debugrebuildfncache',
2971 [
2972 (
2973 b'',
2974 b'only-data',
2975 False,
2976 _(b'only look for wrong .d files (much faster)'),
2977 )
2978 ],
2979 b'',
2980 )
2981 def debugrebuildfncache(ui, repo, **opts):
2992 2982 """rebuild the fncache file"""
2993 repair.rebuildfncache(ui, repo)
2983 opts = pycompat.byteskwargs(opts)
2984 repair.rebuildfncache(ui, repo, opts.get(b"only_data"))
2994 2985
2995 2986
2996 2987 @command(
@@ -4018,7 +4009,7 b' def debugsuccessorssets(ui, repo, *revs,'
4018 4009 cache = {}
4019 4010 ctx2str = bytes
4020 4011 node2str = short
4021 for rev in scmutil.revrange(repo, revs):
4012 for rev in logcmdutil.revrange(repo, revs):
4022 4013 ctx = repo[rev]
4023 4014 ui.write(b'%s\n' % ctx2str(ctx))
4024 4015 for succsset in obsutil.successorssets(
@@ -4077,7 +4068,7 b' def debugtemplate(ui, repo, tmpl, **opts'
4077 4068 raise error.RepoError(
4078 4069 _(b'there is no Mercurial repository here (.hg not found)')
4079 4070 )
4080 revs = scmutil.revrange(repo, opts['rev'])
4071 revs = logcmdutil.revrange(repo, opts['rev'])
4081 4072
4082 4073 props = {}
4083 4074 for d in opts['define']:
This diff has been collapsed as it changes many lines, (603 lines changed) Show them Hide them
@@ -31,6 +31,10 b' from . import ('
31 31 util,
32 32 )
33 33
34 from .dirstateutils import (
35 timestamp,
36 )
37
34 38 from .interfaces import (
35 39 dirstate as intdirstate,
36 40 util as interfaceutil,
@@ -39,13 +43,13 b' from .interfaces import ('
39 43 parsers = policy.importmod('parsers')
40 44 rustmod = policy.importrust('dirstate')
41 45
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
46 HAS_FAST_DIRSTATE_V2 = rustmod is not None
43 47
44 48 propertycache = util.propertycache
45 49 filecache = scmutil.filecache
46 50 _rangemask = dirstatemap.rangemask
47 51
48 DirstateItem = parsers.DirstateItem
52 DirstateItem = dirstatemap.DirstateItem
49 53
50 54
51 55 class repocache(filecache):
@@ -66,7 +70,7 b' def _getfsnow(vfs):'
66 70 '''Get "now" timestamp on filesystem'''
67 71 tmpfd, tmpname = vfs.mkstemp()
68 72 try:
69 return os.fstat(tmpfd)[stat.ST_MTIME]
73 return timestamp.mtime_of(os.fstat(tmpfd))
70 74 finally:
71 75 os.close(tmpfd)
72 76 vfs.unlink(tmpname)
@@ -122,7 +126,7 b' class dirstate(object):'
122 126 # UNC path pointing to root share (issue4557)
123 127 self._rootdir = pathutil.normasprefix(root)
124 128 self._dirty = False
125 self._lastnormaltime = 0
129 self._lastnormaltime = timestamp.zero()
126 130 self._ui = ui
127 131 self._filecache = {}
128 132 self._parentwriters = 0
@@ -130,7 +134,6 b' class dirstate(object):'
130 134 self._pendingfilename = b'%s.pending' % self._filename
131 135 self._plchangecallbacks = {}
132 136 self._origpl = None
133 self._updatedfiles = set()
134 137 self._mapcls = dirstatemap.dirstatemap
135 138 # Access and cache cwd early, so we don't access it for the first time
136 139 # after a working-copy update caused it to not exist (accessing it then
@@ -239,44 +242,59 b' class dirstate(object):'
239 242 return self._rootdir + f
240 243
241 244 def flagfunc(self, buildfallback):
242 if self._checklink and self._checkexec:
245 """build a callable that returns flags associated with a filename
246
247 The information is extracted from three possible layers:
248 1. the file system if it supports the information
249 2. the "fallback" information stored in the dirstate if any
250 3. a more expensive mechanism inferring the flags from the parents.
251 """
243 252
244 def f(x):
245 try:
246 st = os.lstat(self._join(x))
247 if util.statislink(st):
248 return b'l'
249 if util.statisexec(st):
250 return b'x'
251 except OSError:
252 pass
253 # small hack to cache the result of buildfallback()
254 fallback_func = []
255
256 def get_flags(x):
257 entry = None
258 fallback_value = None
259 try:
260 st = os.lstat(self._join(x))
261 except OSError:
253 262 return b''
254 263
255 return f
256
257 fallback = buildfallback()
258 if self._checklink:
259
260 def f(x):
261 if os.path.islink(self._join(x)):
264 if self._checklink:
265 if util.statislink(st):
262 266 return b'l'
263 if b'x' in fallback(x):
264 return b'x'
265 return b''
267 else:
268 entry = self.get_entry(x)
269 if entry.has_fallback_symlink:
270 if entry.fallback_symlink:
271 return b'l'
272 else:
273 if not fallback_func:
274 fallback_func.append(buildfallback())
275 fallback_value = fallback_func[0](x)
276 if b'l' in fallback_value:
277 return b'l'
266 278
267 return f
268 if self._checkexec:
279 if self._checkexec:
280 if util.statisexec(st):
281 return b'x'
282 else:
283 if entry is None:
284 entry = self.get_entry(x)
285 if entry.has_fallback_exec:
286 if entry.fallback_exec:
287 return b'x'
288 else:
289 if fallback_value is None:
290 if not fallback_func:
291 fallback_func.append(buildfallback())
292 fallback_value = fallback_func[0](x)
293 if b'x' in fallback_value:
294 return b'x'
295 return b''
269 296
270 def f(x):
271 if b'l' in fallback(x):
272 return b'l'
273 if util.isexec(self._join(x)):
274 return b'x'
275 return b''
276
277 return f
278 else:
279 return fallback
297 return get_flags
280 298
281 299 @propertycache
282 300 def _cwd(self):
@@ -328,11 +346,20 b' class dirstate(object):'
328 346 consider migrating all user of this to going through the dirstate entry
329 347 instead.
330 348 """
349 msg = b"don't use dirstate[file], use dirstate.get_entry(file)"
350 util.nouideprecwarn(msg, b'6.1', stacklevel=2)
331 351 entry = self._map.get(key)
332 352 if entry is not None:
333 353 return entry.state
334 354 return b'?'
335 355
356 def get_entry(self, path):
357 """return a DirstateItem for the associated path"""
358 entry = self._map.get(path)
359 if entry is None:
360 return DirstateItem()
361 return entry
362
336 363 def __contains__(self, key):
337 364 return key in self._map
338 365
@@ -344,9 +371,6 b' class dirstate(object):'
344 371
345 372 iteritems = items
346 373
347 def directories(self):
348 return self._map.directories()
349
350 374 def parents(self):
351 375 return [self._validate(p) for p in self._pl]
352 376
@@ -385,32 +409,10 b' class dirstate(object):'
385 409 oldp2 = self._pl[1]
386 410 if self._origpl is None:
387 411 self._origpl = self._pl
388 self._map.setparents(p1, p2)
389 copies = {}
390 if (
391 oldp2 != self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
393 ):
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
395
396 for f in candidatefiles:
397 s = self._map.get(f)
398 if s is None:
399 continue
400
401 # Discard "merged" markers when moving away from a merge state
402 if s.merged:
403 source = self._map.copymap.get(f)
404 if source:
405 copies[f] = source
406 self._normallookup(f)
407 # Also fix up otherparent markers
408 elif s.from_p2:
409 source = self._map.copymap.get(f)
410 if source:
411 copies[f] = source
412 self._add(f)
413 return copies
412 nullid = self._nodeconstants.nullid
413 # True if we need to fold p2 related state back to a linear case
414 fold_p2 = oldp2 != nullid and p2 == nullid
415 return self._map.setparents(p1, p2, fold_p2=fold_p2)
414 416
415 417 def setbranch(self, branch):
416 418 self.__class__._branch.set(self, encoding.fromlocal(branch))
@@ -438,9 +440,8 b' class dirstate(object):'
438 440 for a in ("_map", "_branch", "_ignore"):
439 441 if a in self.__dict__:
440 442 delattr(self, a)
441 self._lastnormaltime = 0
443 self._lastnormaltime = timestamp.zero()
442 444 self._dirty = False
443 self._updatedfiles.clear()
444 445 self._parentwriters = 0
445 446 self._origpl = None
446 447
@@ -451,10 +452,8 b' class dirstate(object):'
451 452 self._dirty = True
452 453 if source is not None:
453 454 self._map.copymap[dest] = source
454 self._updatedfiles.add(source)
455 self._updatedfiles.add(dest)
456 elif self._map.copymap.pop(dest, None):
457 self._updatedfiles.add(dest)
455 else:
456 self._map.copymap.pop(dest, None)
458 457
459 458 def copied(self, file):
460 459 return self._map.copymap.get(file, None)
@@ -471,18 +470,11 b' class dirstate(object):'
471 470
472 471 return True the file was previously untracked, False otherwise.
473 472 """
473 self._dirty = True
474 474 entry = self._map.get(filename)
475 if entry is None:
476 self._add(filename)
477 return True
478 elif not entry.tracked:
479 self._normallookup(filename)
480 return True
481 # XXX This is probably overkill for more case, but we need this to
482 # fully replace the `normallookup` call with `set_tracked` one.
483 # Consider smoothing this in the future.
484 self.set_possibly_dirty(filename)
485 return False
475 if entry is None or not entry.tracked:
476 self._check_new_tracked_filename(filename)
477 return self._map.set_tracked(filename)
486 478
487 479 @requires_no_parents_change
488 480 def set_untracked(self, filename):
@@ -493,28 +485,32 b' class dirstate(object):'
493 485
494 486 return True the file was previously tracked, False otherwise.
495 487 """
496 entry = self._map.get(filename)
497 if entry is None:
498 return False
499 elif entry.added:
500 self._drop(filename)
501 return True
502 else:
503 self._remove(filename)
504 return True
488 ret = self._map.set_untracked(filename)
489 if ret:
490 self._dirty = True
491 return ret
505 492
506 493 @requires_no_parents_change
507 494 def set_clean(self, filename, parentfiledata=None):
508 495 """record that the current state of the file on disk is known to be clean"""
509 496 self._dirty = True
510 self._updatedfiles.add(filename)
511 self._normal(filename, parentfiledata=parentfiledata)
497 if parentfiledata:
498 (mode, size, mtime) = parentfiledata
499 else:
500 (mode, size, mtime) = self._get_filedata(filename)
501 if not self._map[filename].tracked:
502 self._check_new_tracked_filename(filename)
503 self._map.set_clean(filename, mode, size, mtime)
504 if mtime > self._lastnormaltime:
505 # Remember the most recent modification timeslot for status(),
506 # to make sure we won't miss future size-preserving file content
507 # modifications that happen within the same timeslot.
508 self._lastnormaltime = mtime
512 509
513 510 @requires_no_parents_change
514 511 def set_possibly_dirty(self, filename):
515 512 """record that the current state of the file on disk is unknown"""
516 513 self._dirty = True
517 self._updatedfiles.add(filename)
518 514 self._map.set_possibly_dirty(filename)
519 515
520 516 @requires_parents_change
@@ -539,35 +535,26 b' class dirstate(object):'
539 535 wc_tracked = False
540 536 else:
541 537 wc_tracked = entry.tracked
542 possibly_dirty = False
543 if p1_tracked and wc_tracked:
544 # the underlying reference might have changed, we will have to
545 # check it.
546 possibly_dirty = True
547 elif not (p1_tracked or wc_tracked):
538 if not (p1_tracked or wc_tracked):
548 539 # the file is no longer relevant to anyone
549 self._drop(filename)
540 if self._map.get(filename) is not None:
541 self._map.reset_state(filename)
542 self._dirty = True
550 543 elif (not p1_tracked) and wc_tracked:
551 544 if entry is not None and entry.added:
552 545 return # avoid dropping copy information (maybe?)
553 elif p1_tracked and not wc_tracked:
554 pass
555 else:
556 assert False, 'unreachable'
557 546
558 # this mean we are doing call for file we do not really care about the
559 # data (eg: added or removed), however this should be a minor overhead
560 # compared to the overall update process calling this.
561 547 parentfiledata = None
562 if wc_tracked:
548 if wc_tracked and p1_tracked:
563 549 parentfiledata = self._get_filedata(filename)
564 550
565 self._updatedfiles.add(filename)
566 551 self._map.reset_state(
567 552 filename,
568 553 wc_tracked,
569 554 p1_tracked,
570 possibly_dirty=possibly_dirty,
555 # the underlying reference might have changed, we will have to
556 # check it.
557 has_meaningful_mtime=False,
571 558 parentfiledata=parentfiledata,
572 559 )
573 560 if (
@@ -585,10 +572,7 b' class dirstate(object):'
585 572 filename,
586 573 wc_tracked,
587 574 p1_tracked,
588 p2_tracked=False,
589 merged=False,
590 clean_p1=False,
591 clean_p2=False,
575 p2_info=False,
592 576 possibly_dirty=False,
593 577 parentfiledata=None,
594 578 ):
@@ -603,47 +587,26 b' class dirstate(object):'
603 587 depending of what information ends up being relevant and useful to
604 588 other processing.
605 589 """
606 if merged and (clean_p1 or clean_p2):
607 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
608 raise error.ProgrammingError(msg)
609 590
610 591 # note: I do not think we need to double check name clash here since we
611 592 # are in a update/merge case that should already have taken care of
612 593 # this. The test agrees
613 594
614 595 self._dirty = True
615 self._updatedfiles.add(filename)
616 596
617 597 need_parent_file_data = (
618 not (possibly_dirty or clean_p2 or merged)
619 and wc_tracked
620 and p1_tracked
598 not possibly_dirty and not p2_info and wc_tracked and p1_tracked
621 599 )
622 600
623 # this mean we are doing call for file we do not really care about the
624 # data (eg: added or removed), however this should be a minor overhead
625 # compared to the overall update process calling this.
626 if need_parent_file_data:
627 if parentfiledata is None:
628 parentfiledata = self._get_filedata(filename)
629 mtime = parentfiledata[2]
630
631 if mtime > self._lastnormaltime:
632 # Remember the most recent modification timeslot for
633 # status(), to make sure we won't miss future
634 # size-preserving file content modifications that happen
635 # within the same timeslot.
636 self._lastnormaltime = mtime
601 if need_parent_file_data and parentfiledata is None:
602 parentfiledata = self._get_filedata(filename)
637 603
638 604 self._map.reset_state(
639 605 filename,
640 606 wc_tracked,
641 607 p1_tracked,
642 p2_tracked=p2_tracked,
643 merged=merged,
644 clean_p1=clean_p1,
645 clean_p2=clean_p2,
646 possibly_dirty=possibly_dirty,
608 p2_info=p2_info,
609 has_meaningful_mtime=not possibly_dirty,
647 610 parentfiledata=parentfiledata,
648 611 )
649 612 if (
@@ -655,263 +618,30 b' class dirstate(object):'
655 618 # modifications that happen within the same timeslot.
656 619 self._lastnormaltime = parentfiledata[2]
657 620
658 def _addpath(
659 self,
660 f,
661 mode=0,
662 size=None,
663 mtime=None,
664 added=False,
665 merged=False,
666 from_p2=False,
667 possibly_dirty=False,
668 ):
669 entry = self._map.get(f)
670 if added or entry is not None and entry.removed:
671 scmutil.checkfilename(f)
672 if self._map.hastrackeddir(f):
673 msg = _(b'directory %r already in dirstate')
674 msg %= pycompat.bytestr(f)
621 def _check_new_tracked_filename(self, filename):
622 scmutil.checkfilename(filename)
623 if self._map.hastrackeddir(filename):
624 msg = _(b'directory %r already in dirstate')
625 msg %= pycompat.bytestr(filename)
626 raise error.Abort(msg)
627 # shadows
628 for d in pathutil.finddirs(filename):
629 if self._map.hastrackeddir(d):
630 break
631 entry = self._map.get(d)
632 if entry is not None and not entry.removed:
633 msg = _(b'file %r in dirstate clashes with %r')
634 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
675 635 raise error.Abort(msg)
676 # shadows
677 for d in pathutil.finddirs(f):
678 if self._map.hastrackeddir(d):
679 break
680 entry = self._map.get(d)
681 if entry is not None and not entry.removed:
682 msg = _(b'file %r in dirstate clashes with %r')
683 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
684 raise error.Abort(msg)
685 self._dirty = True
686 self._updatedfiles.add(f)
687 self._map.addfile(
688 f,
689 mode=mode,
690 size=size,
691 mtime=mtime,
692 added=added,
693 merged=merged,
694 from_p2=from_p2,
695 possibly_dirty=possibly_dirty,
696 )
697 636
698 637 def _get_filedata(self, filename):
699 638 """returns"""
700 639 s = os.lstat(self._join(filename))
701 640 mode = s.st_mode
702 641 size = s.st_size
703 mtime = s[stat.ST_MTIME]
642 mtime = timestamp.mtime_of(s)
704 643 return (mode, size, mtime)
705 644
706 def normal(self, f, parentfiledata=None):
707 """Mark a file normal and clean.
708
709 parentfiledata: (mode, size, mtime) of the clean file
710
711 parentfiledata should be computed from memory (for mode,
712 size), as or close as possible from the point where we
713 determined the file was clean, to limit the risk of the
714 file having been changed by an external process between the
715 moment where the file was determined to be clean and now."""
716 if self.pendingparentchange():
717 util.nouideprecwarn(
718 b"do not use `normal` inside of update/merge context."
719 b" Use `update_file` or `update_file_p1`",
720 b'6.0',
721 stacklevel=2,
722 )
723 else:
724 util.nouideprecwarn(
725 b"do not use `normal` outside of update/merge context."
726 b" Use `set_tracked`",
727 b'6.0',
728 stacklevel=2,
729 )
730 self._normal(f, parentfiledata=parentfiledata)
731
732 def _normal(self, f, parentfiledata=None):
733 if parentfiledata:
734 (mode, size, mtime) = parentfiledata
735 else:
736 (mode, size, mtime) = self._get_filedata(f)
737 self._addpath(f, mode=mode, size=size, mtime=mtime)
738 self._map.copymap.pop(f, None)
739 if f in self._map.nonnormalset:
740 self._map.nonnormalset.remove(f)
741 if mtime > self._lastnormaltime:
742 # Remember the most recent modification timeslot for status(),
743 # to make sure we won't miss future size-preserving file content
744 # modifications that happen within the same timeslot.
745 self._lastnormaltime = mtime
746
747 def normallookup(self, f):
748 '''Mark a file normal, but possibly dirty.'''
749 if self.pendingparentchange():
750 util.nouideprecwarn(
751 b"do not use `normallookup` inside of update/merge context."
752 b" Use `update_file` or `update_file_p1`",
753 b'6.0',
754 stacklevel=2,
755 )
756 else:
757 util.nouideprecwarn(
758 b"do not use `normallookup` outside of update/merge context."
759 b" Use `set_possibly_dirty` or `set_tracked`",
760 b'6.0',
761 stacklevel=2,
762 )
763 self._normallookup(f)
764
765 def _normallookup(self, f):
766 '''Mark a file normal, but possibly dirty.'''
767 if self.in_merge:
768 # if there is a merge going on and the file was either
769 # "merged" or coming from other parent (-2) before
770 # being removed, restore that state.
771 entry = self._map.get(f)
772 if entry is not None:
773 # XXX this should probably be dealt with a a lower level
774 # (see `merged_removed` and `from_p2_removed`)
775 if entry.merged_removed or entry.from_p2_removed:
776 source = self._map.copymap.get(f)
777 if entry.merged_removed:
778 self._merge(f)
779 elif entry.from_p2_removed:
780 self._otherparent(f)
781 if source is not None:
782 self.copy(source, f)
783 return
784 elif entry.merged or entry.from_p2:
785 return
786 self._addpath(f, possibly_dirty=True)
787 self._map.copymap.pop(f, None)
788
789 def otherparent(self, f):
790 '''Mark as coming from the other parent, always dirty.'''
791 if self.pendingparentchange():
792 util.nouideprecwarn(
793 b"do not use `otherparent` inside of update/merge context."
794 b" Use `update_file` or `update_file_p1`",
795 b'6.0',
796 stacklevel=2,
797 )
798 else:
799 util.nouideprecwarn(
800 b"do not use `otherparent` outside of update/merge context."
801 b"It should have been set by the update/merge code",
802 b'6.0',
803 stacklevel=2,
804 )
805 self._otherparent(f)
806
807 def _otherparent(self, f):
808 if not self.in_merge:
809 msg = _(b"setting %r to other parent only allowed in merges") % f
810 raise error.Abort(msg)
811 entry = self._map.get(f)
812 if entry is not None and entry.tracked:
813 # merge-like
814 self._addpath(f, merged=True)
815 else:
816 # add-like
817 self._addpath(f, from_p2=True)
818 self._map.copymap.pop(f, None)
819
820 def add(self, f):
821 '''Mark a file added.'''
822 if self.pendingparentchange():
823 util.nouideprecwarn(
824 b"do not use `add` inside of update/merge context."
825 b" Use `update_file`",
826 b'6.0',
827 stacklevel=2,
828 )
829 else:
830 util.nouideprecwarn(
831 b"do not use `add` outside of update/merge context."
832 b" Use `set_tracked`",
833 b'6.0',
834 stacklevel=2,
835 )
836 self._add(f)
837
838 def _add(self, filename):
839 """internal function to mark a file as added"""
840 self._addpath(filename, added=True)
841 self._map.copymap.pop(filename, None)
842
843 def remove(self, f):
844 '''Mark a file removed'''
845 if self.pendingparentchange():
846 util.nouideprecwarn(
847 b"do not use `remove` insde of update/merge context."
848 b" Use `update_file` or `update_file_p1`",
849 b'6.0',
850 stacklevel=2,
851 )
852 else:
853 util.nouideprecwarn(
854 b"do not use `remove` outside of update/merge context."
855 b" Use `set_untracked`",
856 b'6.0',
857 stacklevel=2,
858 )
859 self._remove(f)
860
861 def _remove(self, filename):
862 """internal function to mark a file removed"""
863 self._dirty = True
864 self._updatedfiles.add(filename)
865 self._map.removefile(filename, in_merge=self.in_merge)
866
867 def merge(self, f):
868 '''Mark a file merged.'''
869 if self.pendingparentchange():
870 util.nouideprecwarn(
871 b"do not use `merge` inside of update/merge context."
872 b" Use `update_file`",
873 b'6.0',
874 stacklevel=2,
875 )
876 else:
877 util.nouideprecwarn(
878 b"do not use `merge` outside of update/merge context."
879 b"It should have been set by the update/merge code",
880 b'6.0',
881 stacklevel=2,
882 )
883 self._merge(f)
884
885 def _merge(self, f):
886 if not self.in_merge:
887 return self._normallookup(f)
888 return self._otherparent(f)
889
890 def drop(self, f):
891 '''Drop a file from the dirstate'''
892 if self.pendingparentchange():
893 util.nouideprecwarn(
894 b"do not use `drop` inside of update/merge context."
895 b" Use `update_file`",
896 b'6.0',
897 stacklevel=2,
898 )
899 else:
900 util.nouideprecwarn(
901 b"do not use `drop` outside of update/merge context."
902 b" Use `set_untracked`",
903 b'6.0',
904 stacklevel=2,
905 )
906 self._drop(f)
907
908 def _drop(self, filename):
909 """internal function to drop a file from the dirstate"""
910 if self._map.dropfile(filename):
911 self._dirty = True
912 self._updatedfiles.add(filename)
913 self._map.copymap.pop(filename, None)
914
915 645 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
916 646 if exists is None:
917 647 exists = os.path.lexists(os.path.join(self._root, path))
@@ -990,8 +720,7 b' class dirstate(object):'
990 720
991 721 def clear(self):
992 722 self._map.clear()
993 self._lastnormaltime = 0
994 self._updatedfiles.clear()
723 self._lastnormaltime = timestamp.zero()
995 724 self._dirty = True
996 725
997 726 def rebuild(self, parent, allfiles, changedfiles=None):
@@ -1022,9 +751,17 b' class dirstate(object):'
1022 751 self._map.setparents(parent, self._nodeconstants.nullid)
1023 752
1024 753 for f in to_lookup:
1025 self._normallookup(f)
754
755 if self.in_merge:
756 self.set_tracked(f)
757 else:
758 self._map.reset_state(
759 f,
760 wc_tracked=True,
761 p1_tracked=True,
762 )
1026 763 for f in to_drop:
1027 self._drop(f)
764 self._map.reset_state(f)
1028 765
1029 766 self._dirty = True
1030 767
@@ -1048,19 +785,14 b' class dirstate(object):'
1048 785 # See also the wiki page below for detail:
1049 786 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
1050 787
1051 # emulate dropping timestamp in 'parsers.pack_dirstate'
788 # record when mtime start to be ambiguous
1052 789 now = _getfsnow(self._opener)
1053 self._map.clearambiguoustimes(self._updatedfiles, now)
1054
1055 # emulate that all 'dirstate.normal' results are written out
1056 self._lastnormaltime = 0
1057 self._updatedfiles.clear()
1058 790
1059 791 # delay writing in-memory changes out
1060 792 tr.addfilegenerator(
1061 793 b'dirstate',
1062 794 (self._filename,),
1063 lambda f: self._writedirstate(tr, f),
795 lambda f: self._writedirstate(tr, f, now=now),
1064 796 location=b'plain',
1065 797 )
1066 798 return
@@ -1079,7 +811,7 b' class dirstate(object):'
1079 811 """
1080 812 self._plchangecallbacks[category] = callback
1081 813
1082 def _writedirstate(self, tr, st):
814 def _writedirstate(self, tr, st, now=None):
1083 815 # notify callbacks about parents change
1084 816 if self._origpl is not None and self._origpl != self._pl:
1085 817 for c, callback in sorted(
@@ -1087,9 +819,11 b' class dirstate(object):'
1087 819 ):
1088 820 callback(self, self._origpl, self._pl)
1089 821 self._origpl = None
1090 # use the modification time of the newly created temporary file as the
1091 # filesystem's notion of 'now'
1092 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
822
823 if now is None:
824 # use the modification time of the newly created temporary file as the
825 # filesystem's notion of 'now'
826 now = timestamp.mtime_of(util.fstat(st))
1093 827
1094 828 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
1095 829 # timestamp of each entries in dirstate, because of 'now > mtime'
@@ -1106,11 +840,12 b' class dirstate(object):'
1106 840 start = int(clock) - (int(clock) % delaywrite)
1107 841 end = start + delaywrite
1108 842 time.sleep(end - clock)
1109 now = end # trust our estimate that the end is near now
843 # trust our estimate that the end is near now
844 now = timestamp.timestamp((end, 0))
1110 845 break
1111 846
1112 847 self._map.write(tr, st, now)
1113 self._lastnormaltime = 0
848 self._lastnormaltime = timestamp.zero()
1114 849 self._dirty = False
1115 850
1116 851 def _dirignore(self, f):
@@ -1503,7 +1238,7 b' class dirstate(object):'
1503 1238 traversed,
1504 1239 dirty,
1505 1240 ) = rustmod.status(
1506 self._map._rustmap,
1241 self._map._map,
1507 1242 matcher,
1508 1243 self._rootdir,
1509 1244 self._ignorefiles(),
@@ -1624,6 +1359,7 b' class dirstate(object):'
1624 1359 mexact = match.exact
1625 1360 dirignore = self._dirignore
1626 1361 checkexec = self._checkexec
1362 checklink = self._checklink
1627 1363 copymap = self._map.copymap
1628 1364 lastnormaltime = self._lastnormaltime
1629 1365
@@ -1643,34 +1379,35 b' class dirstate(object):'
1643 1379 uadd(fn)
1644 1380 continue
1645 1381
1646 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1647 # written like that for performance reasons. dmap[fn] is not a
1648 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1649 # opcode has fast paths when the value to be unpacked is a tuple or
1650 # a list, but falls back to creating a full-fledged iterator in
1651 # general. That is much slower than simply accessing and storing the
1652 # tuple members one by one.
1653 1382 t = dget(fn)
1654 1383 mode = t.mode
1655 1384 size = t.size
1656 time = t.mtime
1657 1385
1658 1386 if not st and t.tracked:
1659 1387 dadd(fn)
1660 elif t.merged:
1388 elif t.p2_info:
1661 1389 madd(fn)
1662 1390 elif t.added:
1663 1391 aadd(fn)
1664 1392 elif t.removed:
1665 1393 radd(fn)
1666 1394 elif t.tracked:
1667 if (
1395 if not checklink and t.has_fallback_symlink:
1396 # If the file system does not support symlink, the mode
1397 # might not be correctly stored in the dirstate, so do not
1398 # trust it.
1399 ladd(fn)
1400 elif not checkexec and t.has_fallback_exec:
1401 # If the file system does not support exec bits, the mode
1402 # might not be correctly stored in the dirstate, so do not
1403 # trust it.
1404 ladd(fn)
1405 elif (
1668 1406 size >= 0
1669 1407 and (
1670 1408 (size != st.st_size and size != st.st_size & _rangemask)
1671 1409 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1672 1410 )
1673 or t.from_p2
1674 1411 or fn in copymap
1675 1412 ):
1676 1413 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
@@ -1679,12 +1416,9 b' class dirstate(object):'
1679 1416 ladd(fn)
1680 1417 else:
1681 1418 madd(fn)
1682 elif (
1683 time != st[stat.ST_MTIME]
1684 and time != st[stat.ST_MTIME] & _rangemask
1685 ):
1419 elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)):
1686 1420 ladd(fn)
1687 elif st[stat.ST_MTIME] == lastnormaltime:
1421 elif timestamp.mtime_of(st) == lastnormaltime:
1688 1422 # fn may have just been marked as normal and it may have
1689 1423 # changed in the same second without changing its size.
1690 1424 # This can happen if we quickly do multiple commits.
@@ -1703,7 +1437,7 b' class dirstate(object):'
1703 1437 """
1704 1438 dmap = self._map
1705 1439 if rustmod is not None:
1706 dmap = self._map._rustmap
1440 dmap = self._map._map
1707 1441
1708 1442 if match.always():
1709 1443 return dmap.keys()
@@ -1778,3 +1512,22 b' class dirstate(object):'
1778 1512 def clearbackup(self, tr, backupname):
1779 1513 '''Clear backup file'''
1780 1514 self._opener.unlink(backupname)
1515
1516 def verify(self, m1, m2):
1517 """check the dirstate content again the parent manifest and yield errors"""
1518 missing_from_p1 = b"%s in state %s, but not in manifest1\n"
1519 unexpected_in_p1 = b"%s in state %s, but also in manifest1\n"
1520 missing_from_ps = b"%s in state %s, but not in either manifest\n"
1521 missing_from_ds = b"%s in manifest1, but listed as state %s\n"
1522 for f, entry in self.items():
1523 state = entry.state
1524 if state in b"nr" and f not in m1:
1525 yield (missing_from_p1, f, state)
1526 if state in b"a" and f in m1:
1527 yield (unexpected_in_p1, f, state)
1528 if state in b"m" and f not in m1 and f not in m2:
1529 yield (missing_from_ps, f, state)
1530 for f in m1:
1531 state = self.get_entry(f).state
1532 if state not in b"nrm":
1533 yield (missing_from_ds, f, state)
This diff has been collapsed as it changes many lines, (1173 lines changed) Show them Hide them
@@ -20,6 +20,7 b' from . import ('
20 20
21 21 from .dirstateutils import (
22 22 docket as docketmod,
23 v2,
23 24 )
24 25
25 26 parsers = policy.importmod('parsers')
@@ -27,22 +28,276 b" rustmod = policy.importrust('dirstate')"
27 28
28 29 propertycache = util.propertycache
29 30
30 DirstateItem = parsers.DirstateItem
31
32
33 # a special value used internally for `size` if the file come from the other parent
34 FROM_P2 = -2
35
36 # a special value used internally for `size` if the file is modified/merged/added
37 NONNORMAL = -1
38
39 # a special value used internally for `time` if the time is ambigeous
40 AMBIGUOUS_TIME = -1
31 if rustmod is None:
32 DirstateItem = parsers.DirstateItem
33 else:
34 DirstateItem = rustmod.DirstateItem
41 35
42 36 rangemask = 0x7FFFFFFF
43 37
44 38
45 class dirstatemap(object):
39 class _dirstatemapcommon(object):
40 """
41 Methods that are identical for both implementations of the dirstatemap
42 class, with and without Rust extensions enabled.
43 """
44
45 # please pytype
46
47 _map = None
48 copymap = None
49
50 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
51 self._use_dirstate_v2 = use_dirstate_v2
52 self._nodeconstants = nodeconstants
53 self._ui = ui
54 self._opener = opener
55 self._root = root
56 self._filename = b'dirstate'
57 self._nodelen = 20 # Also update Rust code when changing this!
58 self._parents = None
59 self._dirtyparents = False
60 self._docket = None
61
62 # for consistent view between _pl() and _read() invocations
63 self._pendingmode = None
64
65 def preload(self):
66 """Loads the underlying data, if it's not already loaded"""
67 self._map
68
69 def get(self, key, default=None):
70 return self._map.get(key, default)
71
72 def __len__(self):
73 return len(self._map)
74
75 def __iter__(self):
76 return iter(self._map)
77
78 def __contains__(self, key):
79 return key in self._map
80
81 def __getitem__(self, item):
82 return self._map[item]
83
84 ### sub-class utility method
85 #
86 # Use to allow for generic implementation of some method while still coping
87 # with minor difference between implementation.
88
89 def _dirs_incr(self, filename, old_entry=None):
90 """incremente the dirstate counter if applicable
91
92 This might be a no-op for some subclass who deal with directory
93 tracking in a different way.
94 """
95
96 def _dirs_decr(self, filename, old_entry=None, remove_variant=False):
97 """decremente the dirstate counter if applicable
98
99 This might be a no-op for some subclass who deal with directory
100 tracking in a different way.
101 """
102
103 def _refresh_entry(self, f, entry):
104 """record updated state of an entry"""
105
106 def _insert_entry(self, f, entry):
107 """add a new dirstate entry (or replace an unrelated one)
108
109 The fact it is actually new is the responsability of the caller
110 """
111
112 def _drop_entry(self, f):
113 """remove any entry for file f
114
115 This should also drop associated copy information
116
117 The fact we actually need to drop it is the responsability of the caller"""
118
119 ### method to manipulate the entries
120
121 def set_possibly_dirty(self, filename):
122 """record that the current state of the file on disk is unknown"""
123 entry = self[filename]
124 entry.set_possibly_dirty()
125 self._refresh_entry(filename, entry)
126
127 def set_clean(self, filename, mode, size, mtime):
128 """mark a file as back to a clean state"""
129 entry = self[filename]
130 size = size & rangemask
131 entry.set_clean(mode, size, mtime)
132 self._refresh_entry(filename, entry)
133 self.copymap.pop(filename, None)
134
135 def set_tracked(self, filename):
136 new = False
137 entry = self.get(filename)
138 if entry is None:
139 self._dirs_incr(filename)
140 entry = DirstateItem(
141 wc_tracked=True,
142 )
143
144 self._insert_entry(filename, entry)
145 new = True
146 elif not entry.tracked:
147 self._dirs_incr(filename, entry)
148 entry.set_tracked()
149 self._refresh_entry(filename, entry)
150 new = True
151 else:
152 # XXX This is probably overkill for more case, but we need this to
153 # fully replace the `normallookup` call with `set_tracked` one.
154 # Consider smoothing this in the future.
155 entry.set_possibly_dirty()
156 self._refresh_entry(filename, entry)
157 return new
158
159 def set_untracked(self, f):
160 """Mark a file as no longer tracked in the dirstate map"""
161 entry = self.get(f)
162 if entry is None:
163 return False
164 else:
165 self._dirs_decr(f, old_entry=entry, remove_variant=not entry.added)
166 if not entry.p2_info:
167 self.copymap.pop(f, None)
168 entry.set_untracked()
169 self._refresh_entry(f, entry)
170 return True
171
172 def reset_state(
173 self,
174 filename,
175 wc_tracked=False,
176 p1_tracked=False,
177 p2_info=False,
178 has_meaningful_mtime=True,
179 has_meaningful_data=True,
180 parentfiledata=None,
181 ):
182 """Set a entry to a given state, diregarding all previous state
183
184 This is to be used by the part of the dirstate API dedicated to
185 adjusting the dirstate after a update/merge.
186
187 note: calling this might result to no entry existing at all if the
188 dirstate map does not see any point at having one for this file
189 anymore.
190 """
191 # copy information are now outdated
192 # (maybe new information should be in directly passed to this function)
193 self.copymap.pop(filename, None)
194
195 if not (p1_tracked or p2_info or wc_tracked):
196 old_entry = self._map.get(filename)
197 self._drop_entry(filename)
198 self._dirs_decr(filename, old_entry=old_entry)
199 return
200
201 old_entry = self._map.get(filename)
202 self._dirs_incr(filename, old_entry)
203 entry = DirstateItem(
204 wc_tracked=wc_tracked,
205 p1_tracked=p1_tracked,
206 p2_info=p2_info,
207 has_meaningful_mtime=has_meaningful_mtime,
208 parentfiledata=parentfiledata,
209 )
210 self._insert_entry(filename, entry)
211
212 ### disk interaction
213
214 def _opendirstatefile(self):
215 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
216 if self._pendingmode is not None and self._pendingmode != mode:
217 fp.close()
218 raise error.Abort(
219 _(b'working directory state may be changed parallelly')
220 )
221 self._pendingmode = mode
222 return fp
223
224 def _readdirstatefile(self, size=-1):
225 try:
226 with self._opendirstatefile() as fp:
227 return fp.read(size)
228 except IOError as err:
229 if err.errno != errno.ENOENT:
230 raise
231 # File doesn't exist, so the current state is empty
232 return b''
233
234 @property
235 def docket(self):
236 if not self._docket:
237 if not self._use_dirstate_v2:
238 raise error.ProgrammingError(
239 b'dirstate only has a docket in v2 format'
240 )
241 self._docket = docketmod.DirstateDocket.parse(
242 self._readdirstatefile(), self._nodeconstants
243 )
244 return self._docket
245
246 def write_v2_no_append(self, tr, st, meta, packed):
247 old_docket = self.docket
248 new_docket = docketmod.DirstateDocket.with_new_uuid(
249 self.parents(), len(packed), meta
250 )
251 data_filename = new_docket.data_filename()
252 if tr:
253 tr.add(data_filename, 0)
254 self._opener.write(data_filename, packed)
255 # Write the new docket after the new data file has been
256 # written. Because `st` was opened with `atomictemp=True`,
257 # the actual `.hg/dirstate` file is only affected on close.
258 st.write(new_docket.serialize())
259 st.close()
260 # Remove the old data file after the new docket pointing to
261 # the new data file was written.
262 if old_docket.uuid:
263 data_filename = old_docket.data_filename()
264 unlink = lambda _tr=None: self._opener.unlink(data_filename)
265 if tr:
266 category = b"dirstate-v2-clean-" + old_docket.uuid
267 tr.addpostclose(category, unlink)
268 else:
269 unlink()
270 self._docket = new_docket
271
272 ### reading/setting parents
273
274 def parents(self):
275 if not self._parents:
276 if self._use_dirstate_v2:
277 self._parents = self.docket.parents
278 else:
279 read_len = self._nodelen * 2
280 st = self._readdirstatefile(read_len)
281 l = len(st)
282 if l == read_len:
283 self._parents = (
284 st[: self._nodelen],
285 st[self._nodelen : 2 * self._nodelen],
286 )
287 elif l == 0:
288 self._parents = (
289 self._nodeconstants.nullid,
290 self._nodeconstants.nullid,
291 )
292 else:
293 raise error.Abort(
294 _(b'working directory state appears damaged!')
295 )
296
297 return self._parents
298
299
300 class dirstatemap(_dirstatemapcommon):
46 301 """Map encapsulating the dirstate's contents.
47 302
48 303 The dirstate contains the following state:
@@ -56,19 +311,19 b' class dirstatemap(object):'
56 311 - the state map maps filenames to tuples of (state, mode, size, mtime),
57 312 where state is a single character representing 'normal', 'added',
58 313 'removed', or 'merged'. It is read by treating the dirstate as a
59 dict. File state is updated by calling the `addfile`, `removefile` and
60 `dropfile` methods.
314 dict. File state is updated by calling various methods (see each
315 documentation for details):
316
317 - `reset_state`,
318 - `set_tracked`
319 - `set_untracked`
320 - `set_clean`
321 - `set_possibly_dirty`
61 322
62 323 - `copymap` maps destination filenames to their source filename.
63 324
64 325 The dirstate also provides the following views onto the state:
65 326
66 - `nonnormalset` is a set of the filenames that have state other
67 than 'normal', or are normal but have an mtime of -1 ('normallookup').
68
69 - `otherparentset` is a set of the filenames that are marked as coming
70 from the second parent when the dirstate is currently being merged.
71
72 327 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
73 328 form that they appear as in the dirstate.
74 329
@@ -76,22 +331,7 b' class dirstatemap(object):'
76 331 denormalized form that they appear as in the dirstate.
77 332 """
78 333
79 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
80 self._ui = ui
81 self._opener = opener
82 self._root = root
83 self._filename = b'dirstate'
84 self._nodelen = 20
85 self._nodeconstants = nodeconstants
86 assert (
87 not use_dirstate_v2
88 ), "should have detected unsupported requirement"
89
90 self._parents = None
91 self._dirtyparents = False
92
93 # for consistent view between _pl() and _read() invocations
94 self._pendingmode = None
334 ### Core data storage and access
95 335
96 336 @propertycache
97 337 def _map(self):
@@ -113,8 +353,6 b' class dirstatemap(object):'
113 353 util.clearcachedproperty(self, b"_alldirs")
114 354 util.clearcachedproperty(self, b"filefoldmap")
115 355 util.clearcachedproperty(self, b"dirfoldmap")
116 util.clearcachedproperty(self, b"nonnormalset")
117 util.clearcachedproperty(self, b"otherparentset")
118 356
119 357 def items(self):
120 358 return pycompat.iteritems(self._map)
@@ -122,29 +360,109 b' class dirstatemap(object):'
122 360 # forward for python2,3 compat
123 361 iteritems = items
124 362
125 debug_iter = items
126
127 def __len__(self):
128 return len(self._map)
129
130 def __iter__(self):
131 return iter(self._map)
363 def debug_iter(self, all):
364 """
365 Return an iterator of (filename, state, mode, size, mtime) tuples
132 366
133 def get(self, key, default=None):
134 return self._map.get(key, default)
135
136 def __contains__(self, key):
137 return key in self._map
138
139 def __getitem__(self, key):
140 return self._map[key]
367 `all` is unused when Rust is not enabled
368 """
369 for (filename, item) in self.items():
370 yield (filename, item.state, item.mode, item.size, item.mtime)
141 371
142 372 def keys(self):
143 373 return self._map.keys()
144 374
145 def preload(self):
146 """Loads the underlying data, if it's not already loaded"""
375 ### reading/setting parents
376
377 def setparents(self, p1, p2, fold_p2=False):
378 self._parents = (p1, p2)
379 self._dirtyparents = True
380 copies = {}
381 if fold_p2:
382 for f, s in pycompat.iteritems(self._map):
383 # Discard "merged" markers when moving away from a merge state
384 if s.p2_info:
385 source = self.copymap.pop(f, None)
386 if source:
387 copies[f] = source
388 s.drop_merge_data()
389 return copies
390
391 ### disk interaction
392
393 def read(self):
394 # ignore HG_PENDING because identity is used only for writing
395 self.identity = util.filestat.frompath(
396 self._opener.join(self._filename)
397 )
398
399 if self._use_dirstate_v2:
400 if not self.docket.uuid:
401 return
402 st = self._opener.read(self.docket.data_filename())
403 else:
404 st = self._readdirstatefile()
405
406 if not st:
407 return
408
409 # TODO: adjust this estimate for dirstate-v2
410 if util.safehasattr(parsers, b'dict_new_presized'):
411 # Make an estimate of the number of files in the dirstate based on
412 # its size. This trades wasting some memory for avoiding costly
413 # resizes. Each entry have a prefix of 17 bytes followed by one or
414 # two path names. Studies on various large-scale real-world repositories
415 # found 54 bytes a reasonable upper limit for the average path names.
416 # Copy entries are ignored for the sake of this estimate.
417 self._map = parsers.dict_new_presized(len(st) // 71)
418
419 # Python's garbage collector triggers a GC each time a certain number
420 # of container objects (the number being defined by
421 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
422 # for each file in the dirstate. The C version then immediately marks
423 # them as not to be tracked by the collector. However, this has no
424 # effect on when GCs are triggered, only on what objects the GC looks
425 # into. This means that O(number of files) GCs are unavoidable.
426 # Depending on when in the process's lifetime the dirstate is parsed,
427 # this can get very expensive. As a workaround, disable GC while
428 # parsing the dirstate.
429 #
430 # (we cannot decorate the function directly since it is in a C module)
431 if self._use_dirstate_v2:
432 p = self.docket.parents
433 meta = self.docket.tree_metadata
434 parse_dirstate = util.nogc(v2.parse_dirstate)
435 parse_dirstate(self._map, self.copymap, st, meta)
436 else:
437 parse_dirstate = util.nogc(parsers.parse_dirstate)
438 p = parse_dirstate(self._map, self.copymap, st)
439 if not self._dirtyparents:
440 self.setparents(*p)
441
442 # Avoid excess attribute lookups by fast pathing certain checks
443 self.__contains__ = self._map.__contains__
444 self.__getitem__ = self._map.__getitem__
445 self.get = self._map.get
446
447 def write(self, tr, st, now):
448 if self._use_dirstate_v2:
449 packed, meta = v2.pack_dirstate(self._map, self.copymap, now)
450 self.write_v2_no_append(tr, st, meta, packed)
451 else:
452 packed = parsers.pack_dirstate(
453 self._map, self.copymap, self.parents(), now
454 )
455 st.write(packed)
456 st.close()
457 self._dirtyparents = False
458
459 @propertycache
460 def identity(self):
147 461 self._map
462 return self.identity
463
464 ### code related to maintaining and accessing "extra" property
465 # (e.g. "has_dir")
148 466
149 467 def _dirs_incr(self, filename, old_entry=None):
150 468 """incremente the dirstate counter if applicable"""
@@ -168,200 +486,6 b' class dirstatemap(object):'
168 486 normed = util.normcase(filename)
169 487 self.filefoldmap.pop(normed, None)
170 488
171 def set_possibly_dirty(self, filename):
172 """record that the current state of the file on disk is unknown"""
173 self[filename].set_possibly_dirty()
174
175 def addfile(
176 self,
177 f,
178 mode=0,
179 size=None,
180 mtime=None,
181 added=False,
182 merged=False,
183 from_p2=False,
184 possibly_dirty=False,
185 ):
186 """Add a tracked file to the dirstate."""
187 if added:
188 assert not merged
189 assert not possibly_dirty
190 assert not from_p2
191 state = b'a'
192 size = NONNORMAL
193 mtime = AMBIGUOUS_TIME
194 elif merged:
195 assert not possibly_dirty
196 assert not from_p2
197 state = b'm'
198 size = FROM_P2
199 mtime = AMBIGUOUS_TIME
200 elif from_p2:
201 assert not possibly_dirty
202 state = b'n'
203 size = FROM_P2
204 mtime = AMBIGUOUS_TIME
205 elif possibly_dirty:
206 state = b'n'
207 size = NONNORMAL
208 mtime = AMBIGUOUS_TIME
209 else:
210 assert size != FROM_P2
211 assert size != NONNORMAL
212 assert size is not None
213 assert mtime is not None
214
215 state = b'n'
216 size = size & rangemask
217 mtime = mtime & rangemask
218 assert state is not None
219 assert size is not None
220 assert mtime is not None
221 old_entry = self.get(f)
222 self._dirs_incr(f, old_entry)
223 e = self._map[f] = DirstateItem(state, mode, size, mtime)
224 if e.dm_nonnormal:
225 self.nonnormalset.add(f)
226 if e.dm_otherparent:
227 self.otherparentset.add(f)
228
229 def reset_state(
230 self,
231 filename,
232 wc_tracked,
233 p1_tracked,
234 p2_tracked=False,
235 merged=False,
236 clean_p1=False,
237 clean_p2=False,
238 possibly_dirty=False,
239 parentfiledata=None,
240 ):
241 """Set a entry to a given state, diregarding all previous state
242
243 This is to be used by the part of the dirstate API dedicated to
244 adjusting the dirstate after a update/merge.
245
246 note: calling this might result to no entry existing at all if the
247 dirstate map does not see any point at having one for this file
248 anymore.
249 """
250 if merged and (clean_p1 or clean_p2):
251 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
252 raise error.ProgrammingError(msg)
253 # copy information are now outdated
254 # (maybe new information should be in directly passed to this function)
255 self.copymap.pop(filename, None)
256
257 if not (p1_tracked or p2_tracked or wc_tracked):
258 self.dropfile(filename)
259 elif merged:
260 # XXX might be merged and removed ?
261 entry = self.get(filename)
262 if entry is not None and entry.tracked:
263 # XXX mostly replicate dirstate.other parent. We should get
264 # the higher layer to pass us more reliable data where `merged`
265 # actually mean merged. Dropping the else clause will show
266 # failure in `test-graft.t`
267 self.addfile(filename, merged=True)
268 else:
269 self.addfile(filename, from_p2=True)
270 elif not (p1_tracked or p2_tracked) and wc_tracked:
271 self.addfile(filename, added=True, possibly_dirty=possibly_dirty)
272 elif (p1_tracked or p2_tracked) and not wc_tracked:
273 # XXX might be merged and removed ?
274 old_entry = self._map.get(filename)
275 self._dirs_decr(filename, old_entry=old_entry, remove_variant=True)
276 self._map[filename] = DirstateItem(b'r', 0, 0, 0)
277 self.nonnormalset.add(filename)
278 elif clean_p2 and wc_tracked:
279 if p1_tracked or self.get(filename) is not None:
280 # XXX the `self.get` call is catching some case in
281 # `test-merge-remove.t` where the file is tracked in p1, the
282 # p1_tracked argument is False.
283 #
284 # In addition, this seems to be a case where the file is marked
285 # as merged without actually being the result of a merge
286 # action. So thing are not ideal here.
287 self.addfile(filename, merged=True)
288 else:
289 self.addfile(filename, from_p2=True)
290 elif not p1_tracked and p2_tracked and wc_tracked:
291 self.addfile(filename, from_p2=True, possibly_dirty=possibly_dirty)
292 elif possibly_dirty:
293 self.addfile(filename, possibly_dirty=possibly_dirty)
294 elif wc_tracked:
295 # this is a "normal" file
296 if parentfiledata is None:
297 msg = b'failed to pass parentfiledata for a normal file: %s'
298 msg %= filename
299 raise error.ProgrammingError(msg)
300 mode, size, mtime = parentfiledata
301 self.addfile(filename, mode=mode, size=size, mtime=mtime)
302 self.nonnormalset.discard(filename)
303 else:
304 assert False, 'unreachable'
305
306 def removefile(self, f, in_merge=False):
307 """
308 Mark a file as removed in the dirstate.
309
310 The `size` parameter is used to store sentinel values that indicate
311 the file's previous state. In the future, we should refactor this
312 to be more explicit about what that state is.
313 """
314 entry = self.get(f)
315 size = 0
316 if in_merge:
317 # XXX we should not be able to have 'm' state and 'FROM_P2' if not
318 # during a merge. So I (marmoute) am not sure we need the
319 # conditionnal at all. Adding double checking this with assert
320 # would be nice.
321 if entry is not None:
322 # backup the previous state
323 if entry.merged: # merge
324 size = NONNORMAL
325 elif entry.from_p2:
326 size = FROM_P2
327 self.otherparentset.add(f)
328 if entry is not None and not (entry.merged or entry.from_p2):
329 self.copymap.pop(f, None)
330 self._dirs_decr(f, old_entry=entry, remove_variant=True)
331 self._map[f] = DirstateItem(b'r', 0, size, 0)
332 self.nonnormalset.add(f)
333
334 def dropfile(self, f):
335 """
336 Remove a file from the dirstate. Returns True if the file was
337 previously recorded.
338 """
339 old_entry = self._map.pop(f, None)
340 self._dirs_decr(f, old_entry=old_entry)
341 self.nonnormalset.discard(f)
342 return old_entry is not None
343
344 def clearambiguoustimes(self, files, now):
345 for f in files:
346 e = self.get(f)
347 if e is not None and e.need_delay(now):
348 e.set_possibly_dirty()
349 self.nonnormalset.add(f)
350
351 def nonnormalentries(self):
352 '''Compute the nonnormal dirstate entries from the dmap'''
353 try:
354 return parsers.nonnormalotherparententries(self._map)
355 except AttributeError:
356 nonnorm = set()
357 otherparent = set()
358 for fname, e in pycompat.iteritems(self._map):
359 if e.dm_nonnormal:
360 nonnorm.add(fname)
361 if e.from_p2:
362 otherparent.add(fname)
363 return nonnorm, otherparent
364
365 489 @propertycache
366 490 def filefoldmap(self):
367 491 """Returns a dictionary mapping normalized case paths to their
@@ -384,6 +508,14 b' class dirstatemap(object):'
384 508 f[b'.'] = b'.' # prevents useless util.fspath() invocation
385 509 return f
386 510
511 @propertycache
512 def dirfoldmap(self):
513 f = {}
514 normcase = util.normcase
515 for name in self._dirs:
516 f[normcase(name)] = name
517 return f
518
387 519 def hastrackeddir(self, d):
388 520 """
389 521 Returns True if the dirstate contains a tracked (not removed) file
@@ -400,393 +532,34 b' class dirstatemap(object):'
400 532
401 533 @propertycache
402 534 def _dirs(self):
403 return pathutil.dirs(self._map, b'r')
535 return pathutil.dirs(self._map, only_tracked=True)
404 536
405 537 @propertycache
406 538 def _alldirs(self):
407 539 return pathutil.dirs(self._map)
408 540
409 def _opendirstatefile(self):
410 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
411 if self._pendingmode is not None and self._pendingmode != mode:
412 fp.close()
413 raise error.Abort(
414 _(b'working directory state may be changed parallelly')
415 )
416 self._pendingmode = mode
417 return fp
418
419 def parents(self):
420 if not self._parents:
421 try:
422 fp = self._opendirstatefile()
423 st = fp.read(2 * self._nodelen)
424 fp.close()
425 except IOError as err:
426 if err.errno != errno.ENOENT:
427 raise
428 # File doesn't exist, so the current state is empty
429 st = b''
541 ### code related to manipulation of entries and copy-sources
430 542
431 l = len(st)
432 if l == self._nodelen * 2:
433 self._parents = (
434 st[: self._nodelen],
435 st[self._nodelen : 2 * self._nodelen],
436 )
437 elif l == 0:
438 self._parents = (
439 self._nodeconstants.nullid,
440 self._nodeconstants.nullid,
441 )
442 else:
443 raise error.Abort(
444 _(b'working directory state appears damaged!')
445 )
446
447 return self._parents
448
449 def setparents(self, p1, p2):
450 self._parents = (p1, p2)
451 self._dirtyparents = True
452
453 def read(self):
454 # ignore HG_PENDING because identity is used only for writing
455 self.identity = util.filestat.frompath(
456 self._opener.join(self._filename)
457 )
458
459 try:
460 fp = self._opendirstatefile()
461 try:
462 st = fp.read()
463 finally:
464 fp.close()
465 except IOError as err:
466 if err.errno != errno.ENOENT:
467 raise
468 return
469 if not st:
470 return
543 def _refresh_entry(self, f, entry):
544 if not entry.any_tracked:
545 self._map.pop(f, None)
471 546
472 if util.safehasattr(parsers, b'dict_new_presized'):
473 # Make an estimate of the number of files in the dirstate based on
474 # its size. This trades wasting some memory for avoiding costly
475 # resizes. Each entry have a prefix of 17 bytes followed by one or
476 # two path names. Studies on various large-scale real-world repositories
477 # found 54 bytes a reasonable upper limit for the average path names.
478 # Copy entries are ignored for the sake of this estimate.
479 self._map = parsers.dict_new_presized(len(st) // 71)
480
481 # Python's garbage collector triggers a GC each time a certain number
482 # of container objects (the number being defined by
483 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
484 # for each file in the dirstate. The C version then immediately marks
485 # them as not to be tracked by the collector. However, this has no
486 # effect on when GCs are triggered, only on what objects the GC looks
487 # into. This means that O(number of files) GCs are unavoidable.
488 # Depending on when in the process's lifetime the dirstate is parsed,
489 # this can get very expensive. As a workaround, disable GC while
490 # parsing the dirstate.
491 #
492 # (we cannot decorate the function directly since it is in a C module)
493 parse_dirstate = util.nogc(parsers.parse_dirstate)
494 p = parse_dirstate(self._map, self.copymap, st)
495 if not self._dirtyparents:
496 self.setparents(*p)
497
498 # Avoid excess attribute lookups by fast pathing certain checks
499 self.__contains__ = self._map.__contains__
500 self.__getitem__ = self._map.__getitem__
501 self.get = self._map.get
547 def _insert_entry(self, f, entry):
548 self._map[f] = entry
502 549
503 def write(self, _tr, st, now):
504 st.write(
505 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
506 )
507 st.close()
508 self._dirtyparents = False
509 self.nonnormalset, self.otherparentset = self.nonnormalentries()
510
511 @propertycache
512 def nonnormalset(self):
513 nonnorm, otherparents = self.nonnormalentries()
514 self.otherparentset = otherparents
515 return nonnorm
516
517 @propertycache
518 def otherparentset(self):
519 nonnorm, otherparents = self.nonnormalentries()
520 self.nonnormalset = nonnorm
521 return otherparents
522
523 def non_normal_or_other_parent_paths(self):
524 return self.nonnormalset.union(self.otherparentset)
525
526 @propertycache
527 def identity(self):
528 self._map
529 return self.identity
530
531 @propertycache
532 def dirfoldmap(self):
533 f = {}
534 normcase = util.normcase
535 for name in self._dirs:
536 f[normcase(name)] = name
537 return f
550 def _drop_entry(self, f):
551 self._map.pop(f, None)
552 self.copymap.pop(f, None)
538 553
539 554
540 555 if rustmod is not None:
541 556
542 class dirstatemap(object):
543 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
544 self._use_dirstate_v2 = use_dirstate_v2
545 self._nodeconstants = nodeconstants
546 self._ui = ui
547 self._opener = opener
548 self._root = root
549 self._filename = b'dirstate'
550 self._nodelen = 20 # Also update Rust code when changing this!
551 self._parents = None
552 self._dirtyparents = False
553 self._docket = None
554
555 # for consistent view between _pl() and _read() invocations
556 self._pendingmode = None
557
558 self._use_dirstate_tree = self._ui.configbool(
559 b"experimental",
560 b"dirstate-tree.in-memory",
561 False,
562 )
563
564 def addfile(
565 self,
566 f,
567 mode=0,
568 size=None,
569 mtime=None,
570 added=False,
571 merged=False,
572 from_p2=False,
573 possibly_dirty=False,
574 ):
575 return self._rustmap.addfile(
576 f,
577 mode,
578 size,
579 mtime,
580 added,
581 merged,
582 from_p2,
583 possibly_dirty,
584 )
585
586 def reset_state(
587 self,
588 filename,
589 wc_tracked,
590 p1_tracked,
591 p2_tracked=False,
592 merged=False,
593 clean_p1=False,
594 clean_p2=False,
595 possibly_dirty=False,
596 parentfiledata=None,
597 ):
598 """Set a entry to a given state, disregarding all previous state
599
600 This is to be used by the part of the dirstate API dedicated to
601 adjusting the dirstate after a update/merge.
602
603 note: calling this might result to no entry existing at all if the
604 dirstate map does not see any point at having one for this file
605 anymore.
606 """
607 if merged and (clean_p1 or clean_p2):
608 msg = (
609 b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
610 )
611 raise error.ProgrammingError(msg)
612 # copy information are now outdated
613 # (maybe new information should be in directly passed to this function)
614 self.copymap.pop(filename, None)
557 class dirstatemap(_dirstatemapcommon):
615 558
616 if not (p1_tracked or p2_tracked or wc_tracked):
617 self.dropfile(filename)
618 elif merged:
619 # XXX might be merged and removed ?
620 entry = self.get(filename)
621 if entry is not None and entry.tracked:
622 # XXX mostly replicate dirstate.other parent. We should get
623 # the higher layer to pass us more reliable data where `merged`
624 # actually mean merged. Dropping the else clause will show
625 # failure in `test-graft.t`
626 self.addfile(filename, merged=True)
627 else:
628 self.addfile(filename, from_p2=True)
629 elif not (p1_tracked or p2_tracked) and wc_tracked:
630 self.addfile(
631 filename, added=True, possibly_dirty=possibly_dirty
632 )
633 elif (p1_tracked or p2_tracked) and not wc_tracked:
634 # XXX might be merged and removed ?
635 self[filename] = DirstateItem(b'r', 0, 0, 0)
636 self.nonnormalset.add(filename)
637 elif clean_p2 and wc_tracked:
638 if p1_tracked or self.get(filename) is not None:
639 # XXX the `self.get` call is catching some case in
640 # `test-merge-remove.t` where the file is tracked in p1, the
641 # p1_tracked argument is False.
642 #
643 # In addition, this seems to be a case where the file is marked
644 # as merged without actually being the result of a merge
645 # action. So thing are not ideal here.
646 self.addfile(filename, merged=True)
647 else:
648 self.addfile(filename, from_p2=True)
649 elif not p1_tracked and p2_tracked and wc_tracked:
650 self.addfile(
651 filename, from_p2=True, possibly_dirty=possibly_dirty
652 )
653 elif possibly_dirty:
654 self.addfile(filename, possibly_dirty=possibly_dirty)
655 elif wc_tracked:
656 # this is a "normal" file
657 if parentfiledata is None:
658 msg = b'failed to pass parentfiledata for a normal file: %s'
659 msg %= filename
660 raise error.ProgrammingError(msg)
661 mode, size, mtime = parentfiledata
662 self.addfile(filename, mode=mode, size=size, mtime=mtime)
663 self.nonnormalset.discard(filename)
664 else:
665 assert False, 'unreachable'
666
667 def removefile(self, *args, **kwargs):
668 return self._rustmap.removefile(*args, **kwargs)
669
670 def dropfile(self, *args, **kwargs):
671 return self._rustmap.dropfile(*args, **kwargs)
672
673 def clearambiguoustimes(self, *args, **kwargs):
674 return self._rustmap.clearambiguoustimes(*args, **kwargs)
675
676 def nonnormalentries(self):
677 return self._rustmap.nonnormalentries()
678
679 def get(self, *args, **kwargs):
680 return self._rustmap.get(*args, **kwargs)
681
682 @property
683 def copymap(self):
684 return self._rustmap.copymap()
685
686 def directories(self):
687 return self._rustmap.directories()
688
689 def debug_iter(self):
690 return self._rustmap.debug_iter()
691
692 def preload(self):
693 self._rustmap
694
695 def clear(self):
696 self._rustmap.clear()
697 self.setparents(
698 self._nodeconstants.nullid, self._nodeconstants.nullid
699 )
700 util.clearcachedproperty(self, b"_dirs")
701 util.clearcachedproperty(self, b"_alldirs")
702 util.clearcachedproperty(self, b"dirfoldmap")
703
704 def items(self):
705 return self._rustmap.items()
706
707 def keys(self):
708 return iter(self._rustmap)
709
710 def __contains__(self, key):
711 return key in self._rustmap
712
713 def __getitem__(self, item):
714 return self._rustmap[item]
715
716 def __len__(self):
717 return len(self._rustmap)
718
719 def __iter__(self):
720 return iter(self._rustmap)
721
722 # forward for python2,3 compat
723 iteritems = items
724
725 def _opendirstatefile(self):
726 fp, mode = txnutil.trypending(
727 self._root, self._opener, self._filename
728 )
729 if self._pendingmode is not None and self._pendingmode != mode:
730 fp.close()
731 raise error.Abort(
732 _(b'working directory state may be changed parallelly')
733 )
734 self._pendingmode = mode
735 return fp
736
737 def _readdirstatefile(self, size=-1):
738 try:
739 with self._opendirstatefile() as fp:
740 return fp.read(size)
741 except IOError as err:
742 if err.errno != errno.ENOENT:
743 raise
744 # File doesn't exist, so the current state is empty
745 return b''
746
747 def setparents(self, p1, p2):
748 self._parents = (p1, p2)
749 self._dirtyparents = True
750
751 def parents(self):
752 if not self._parents:
753 if self._use_dirstate_v2:
754 self._parents = self.docket.parents
755 else:
756 read_len = self._nodelen * 2
757 st = self._readdirstatefile(read_len)
758 l = len(st)
759 if l == read_len:
760 self._parents = (
761 st[: self._nodelen],
762 st[self._nodelen : 2 * self._nodelen],
763 )
764 elif l == 0:
765 self._parents = (
766 self._nodeconstants.nullid,
767 self._nodeconstants.nullid,
768 )
769 else:
770 raise error.Abort(
771 _(b'working directory state appears damaged!')
772 )
773
774 return self._parents
775
776 @property
777 def docket(self):
778 if not self._docket:
779 if not self._use_dirstate_v2:
780 raise error.ProgrammingError(
781 b'dirstate only has a docket in v2 format'
782 )
783 self._docket = docketmod.DirstateDocket.parse(
784 self._readdirstatefile(), self._nodeconstants
785 )
786 return self._docket
559 ### Core data storage and access
787 560
788 561 @propertycache
789 def _rustmap(self):
562 def _map(self):
790 563 """
791 564 Fills the Dirstatemap when called.
792 565 """
@@ -801,27 +574,91 b' if rustmod is not None:'
801 574 data = self._opener.read(self.docket.data_filename())
802 575 else:
803 576 data = b''
804 self._rustmap = rustmod.DirstateMap.new_v2(
577 self._map = rustmod.DirstateMap.new_v2(
805 578 data, self.docket.data_size, self.docket.tree_metadata
806 579 )
807 580 parents = self.docket.parents
808 581 else:
809 self._rustmap, parents = rustmod.DirstateMap.new_v1(
810 self._use_dirstate_tree, self._readdirstatefile()
582 self._map, parents = rustmod.DirstateMap.new_v1(
583 self._readdirstatefile()
811 584 )
812 585
813 586 if parents and not self._dirtyparents:
814 587 self.setparents(*parents)
815 588
816 self.__contains__ = self._rustmap.__contains__
817 self.__getitem__ = self._rustmap.__getitem__
818 self.get = self._rustmap.get
819 return self._rustmap
589 self.__contains__ = self._map.__contains__
590 self.__getitem__ = self._map.__getitem__
591 self.get = self._map.get
592 return self._map
593
594 @property
595 def copymap(self):
596 return self._map.copymap()
597
598 def debug_iter(self, all):
599 """
600 Return an iterator of (filename, state, mode, size, mtime) tuples
601
602 `all`: also include with `state == b' '` dirstate tree nodes that
603 don't have an associated `DirstateItem`.
604
605 """
606 return self._map.debug_iter(all)
607
608 def clear(self):
609 self._map.clear()
610 self.setparents(
611 self._nodeconstants.nullid, self._nodeconstants.nullid
612 )
613 util.clearcachedproperty(self, b"_dirs")
614 util.clearcachedproperty(self, b"_alldirs")
615 util.clearcachedproperty(self, b"dirfoldmap")
616
617 def items(self):
618 return self._map.items()
619
620 # forward for python2,3 compat
621 iteritems = items
622
623 def keys(self):
624 return iter(self._map)
625
626 ### reading/setting parents
627
628 def setparents(self, p1, p2, fold_p2=False):
629 self._parents = (p1, p2)
630 self._dirtyparents = True
631 copies = {}
632 if fold_p2:
633 # Collect into an intermediate list to avoid a `RuntimeError`
634 # exception due to mutation during iteration.
635 # TODO: move this the whole loop to Rust where `iter_mut`
636 # enables in-place mutation of elements of a collection while
637 # iterating it, without mutating the collection itself.
638 files_with_p2_info = [
639 f for f, s in self._map.items() if s.p2_info
640 ]
641 rust_map = self._map
642 for f in files_with_p2_info:
643 e = rust_map.get(f)
644 source = self.copymap.pop(f, None)
645 if source:
646 copies[f] = source
647 e.drop_merge_data()
648 rust_map.set_dirstate_item(f, e)
649 return copies
650
651 ### disk interaction
652
653 @propertycache
654 def identity(self):
655 self._map
656 return self.identity
820 657
821 658 def write(self, tr, st, now):
822 659 if not self._use_dirstate_v2:
823 660 p1, p2 = self.parents()
824 packed = self._rustmap.write_v1(p1, p2, now)
661 packed = self._map.write_v1(p1, p2, now)
825 662 st.write(packed)
826 663 st.close()
827 664 self._dirtyparents = False
@@ -829,7 +666,7 b' if rustmod is not None:'
829 666
830 667 # We can only append to an existing data file if there is one
831 668 can_append = self.docket.uuid is not None
832 packed, meta, append = self._rustmap.write_v2(now, can_append)
669 packed, meta, append = self._map.write_v2(now, can_append)
833 670 if append:
834 671 docket = self.docket
835 672 data_filename = docket.data_filename()
@@ -847,79 +684,49 b' if rustmod is not None:'
847 684 st.write(docket.serialize())
848 685 st.close()
849 686 else:
850 old_docket = self.docket
851 new_docket = docketmod.DirstateDocket.with_new_uuid(
852 self.parents(), len(packed), meta
853 )
854 data_filename = new_docket.data_filename()
855 if tr:
856 tr.add(data_filename, 0)
857 self._opener.write(data_filename, packed)
858 # Write the new docket after the new data file has been
859 # written. Because `st` was opened with `atomictemp=True`,
860 # the actual `.hg/dirstate` file is only affected on close.
861 st.write(new_docket.serialize())
862 st.close()
863 # Remove the old data file after the new docket pointing to
864 # the new data file was written.
865 if old_docket.uuid:
866 data_filename = old_docket.data_filename()
867 unlink = lambda _tr=None: self._opener.unlink(data_filename)
868 if tr:
869 category = b"dirstate-v2-clean-" + old_docket.uuid
870 tr.addpostclose(category, unlink)
871 else:
872 unlink()
873 self._docket = new_docket
687 self.write_v2_no_append(tr, st, meta, packed)
874 688 # Reload from the newly-written file
875 util.clearcachedproperty(self, b"_rustmap")
689 util.clearcachedproperty(self, b"_map")
876 690 self._dirtyparents = False
877 691
692 ### code related to maintaining and accessing "extra" property
693 # (e.g. "has_dir")
694
878 695 @propertycache
879 696 def filefoldmap(self):
880 697 """Returns a dictionary mapping normalized case paths to their
881 698 non-normalized versions.
882 699 """
883 return self._rustmap.filefoldmapasdict()
700 return self._map.filefoldmapasdict()
884 701
885 702 def hastrackeddir(self, d):
886 return self._rustmap.hastrackeddir(d)
703 return self._map.hastrackeddir(d)
887 704
888 705 def hasdir(self, d):
889 return self._rustmap.hasdir(d)
890
891 @propertycache
892 def identity(self):
893 self._rustmap
894 return self.identity
895
896 @property
897 def nonnormalset(self):
898 nonnorm = self._rustmap.non_normal_entries()
899 return nonnorm
900
901 @propertycache
902 def otherparentset(self):
903 otherparents = self._rustmap.other_parent_entries()
904 return otherparents
905
906 def non_normal_or_other_parent_paths(self):
907 return self._rustmap.non_normal_or_other_parent_paths()
706 return self._map.hasdir(d)
908 707
909 708 @propertycache
910 709 def dirfoldmap(self):
911 710 f = {}
912 711 normcase = util.normcase
913 for name in self._rustmap.tracked_dirs():
712 for name in self._map.tracked_dirs():
914 713 f[normcase(name)] = name
915 714 return f
916 715
917 def set_possibly_dirty(self, filename):
918 """record that the current state of the file on disk is unknown"""
919 entry = self[filename]
920 entry.set_possibly_dirty()
921 self._rustmap.set_v1(filename, entry)
716 ### code related to manipulation of entries and copy-sources
717
718 def _refresh_entry(self, f, entry):
719 if not entry.any_tracked:
720 self._map.drop_item_and_copy_source(f)
721 else:
722 self._map.addfile(f, entry)
723
724 def _insert_entry(self, f, entry):
725 self._map.addfile(f, entry)
726
727 def _drop_entry(self, f):
728 self._map.drop_item_and_copy_source(f)
922 729
923 730 def __setitem__(self, key, value):
924 731 assert isinstance(value, DirstateItem)
925 self._rustmap.set_v1(key, value)
732 self._map.set_dirstate_item(key, value)
@@ -10,31 +10,27 b' from __future__ import absolute_import'
10 10 import struct
11 11
12 12 from ..revlogutils import docket as docket_mod
13
13 from . import v2
14 14
15 15 V2_FORMAT_MARKER = b"dirstate-v2\n"
16 16
17 # Must match the constant of the same name in
18 # `rust/hg-core/src/dirstate_tree/on_disk.rs`
19 TREE_METADATA_SIZE = 44
20
21 17 # * 12 bytes: format marker
22 18 # * 32 bytes: node ID of the working directory's first parent
23 19 # * 32 bytes: node ID of the working directory's second parent
20 # * {TREE_METADATA_SIZE} bytes: tree metadata, parsed separately
24 21 # * 4 bytes: big-endian used size of the data file
25 # * {TREE_METADATA_SIZE} bytes: tree metadata, parsed separately
26 22 # * 1 byte: length of the data file's UUID
27 23 # * variable: data file's UUID
28 24 #
29 25 # Node IDs are null-padded if shorter than 32 bytes.
30 26 # A data file shorter than the specified used size is corrupted (truncated)
31 27 HEADER = struct.Struct(
32 ">{}s32s32sL{}sB".format(len(V2_FORMAT_MARKER), TREE_METADATA_SIZE)
28 ">{}s32s32s{}sLB".format(len(V2_FORMAT_MARKER), v2.TREE_METADATA_SIZE)
33 29 )
34 30
35 31
36 32 class DirstateDocket(object):
37 data_filename_pattern = b'dirstate.%s.d'
33 data_filename_pattern = b'dirstate.%s'
38 34
39 35 def __init__(self, parents, data_size, tree_metadata, uuid):
40 36 self.parents = parents
@@ -51,7 +47,7 b' class DirstateDocket(object):'
51 47 if not data:
52 48 parents = (nodeconstants.nullid, nodeconstants.nullid)
53 49 return cls(parents, 0, b'', None)
54 marker, p1, p2, data_size, meta, uuid_size = HEADER.unpack_from(data)
50 marker, p1, p2, meta, data_size, uuid_size = HEADER.unpack_from(data)
55 51 if marker != V2_FORMAT_MARKER:
56 52 raise ValueError("expected dirstate-v2 marker")
57 53 uuid = data[HEADER.size : HEADER.size + uuid_size]
@@ -65,8 +61,8 b' class DirstateDocket(object):'
65 61 V2_FORMAT_MARKER,
66 62 p1,
67 63 p2,
64 self.tree_metadata,
68 65 self.data_size,
69 self.tree_metadata,
70 66 len(self.uuid),
71 67 )
72 68 return header + self.uuid
@@ -253,7 +253,7 b' def dispatch(req):'
253 253 status = -1
254 254
255 255 ret = _flushstdio(req.ui, err)
256 if ret:
256 if ret and not status:
257 257 status = ret
258 258 return status
259 259
@@ -240,7 +240,9 b' def fromlocal(s):'
240 240 b"decoding near '%s': %s!" % (sub, pycompat.bytestr(inst))
241 241 )
242 242 except LookupError as k:
243 raise error.Abort(k, hint=b"please check your locale settings")
243 raise error.Abort(
244 pycompat.bytestr(k), hint=b"please check your locale settings"
245 )
244 246
245 247
246 248 def unitolocal(u):
@@ -306,7 +308,9 b' def lower(s):'
306 308 except UnicodeError:
307 309 return s.lower() # we don't know how to fold this except in ASCII
308 310 except LookupError as k:
309 raise error.Abort(k, hint=b"please check your locale settings")
311 raise error.Abort(
312 pycompat.bytestr(k), hint=b"please check your locale settings"
313 )
310 314
311 315
312 316 def upper(s):
@@ -333,7 +337,9 b' def upperfallback(s):'
333 337 except UnicodeError:
334 338 return s.upper() # we don't know how to fold this except in ASCII
335 339 except LookupError as k:
336 raise error.Abort(k, hint=b"please check your locale settings")
340 raise error.Abort(
341 pycompat.bytestr(k), hint=b"please check your locale settings"
342 )
337 343
338 344
339 345 if not _nativeenviron:
@@ -31,6 +31,7 b' if pycompat.TYPE_CHECKING:'
31 31
32 32
33 33 def _tobytes(exc):
34 # type: (...) -> bytes
34 35 """Byte-stringify exception in the same way as BaseException_str()"""
35 36 if not exc.args:
36 37 return b''
@@ -47,7 +48,7 b' class Hint(object):'
47 48 """
48 49
49 50 def __init__(self, *args, **kw):
50 self.hint = kw.pop('hint', None)
51 self.hint = kw.pop('hint', None) # type: Optional[bytes]
51 52 super(Hint, self).__init__(*args, **kw)
52 53
53 54
@@ -71,6 +72,7 b' class Error(Hint, Exception):'
71 72 if pycompat.ispy3:
72 73
73 74 def __str__(self):
75 # type: () -> str
74 76 # the output would be unreadable if the message was translated,
75 77 # but do not replace it with encoding.strfromlocal(), which
76 78 # may raise another exception.
@@ -105,6 +107,7 b' class RevlogError(StorageError):'
105 107
106 108 class SidedataHashError(RevlogError):
107 109 def __init__(self, key, expected, got):
110 # type: (int, bytes, bytes) -> None
108 111 self.hint = None
109 112 self.sidedatakey = key
110 113 self.expecteddigest = expected
@@ -117,6 +120,7 b' class FilteredIndexError(IndexError):'
117 120
118 121 class LookupError(RevlogError, KeyError):
119 122 def __init__(self, name, index, message):
123 # type: (bytes, bytes, bytes) -> None
120 124 self.name = name
121 125 self.index = index
122 126 # this can't be called 'message' because at least some installs of
@@ -343,6 +347,7 b' class OutOfBandError(RemoteError):'
343 347 """Exception raised when a remote repo reports failure"""
344 348
345 349 def __init__(self, message=None, hint=None):
350 # type: (Optional[bytes], Optional[bytes]) -> None
346 351 from .i18n import _
347 352
348 353 if message:
@@ -1386,11 +1386,16 b' class pulloperation(object):'
1386 1386 includepats=None,
1387 1387 excludepats=None,
1388 1388 depth=None,
1389 path=None,
1389 1390 ):
1390 1391 # repo we pull into
1391 1392 self.repo = repo
1392 1393 # repo we pull from
1393 1394 self.remote = remote
1395 # path object used to build this remote
1396 #
1397 # Ideally, the remote peer would carry that directly.
1398 self.remote_path = path
1394 1399 # revision we try to pull (None is "all")
1395 1400 self.heads = heads
1396 1401 # bookmark pulled explicitly
@@ -1556,6 +1561,7 b' def add_confirm_callback(repo, pullop):'
1556 1561 def pull(
1557 1562 repo,
1558 1563 remote,
1564 path=None,
1559 1565 heads=None,
1560 1566 force=False,
1561 1567 bookmarks=(),
@@ -1611,8 +1617,9 b' def pull('
1611 1617 pullop = pulloperation(
1612 1618 repo,
1613 1619 remote,
1614 heads,
1615 force,
1620 path=path,
1621 heads=heads,
1622 force=force,
1616 1623 bookmarks=bookmarks,
1617 1624 streamclonerequested=streamclonerequested,
1618 1625 includepats=includepats,
@@ -2021,6 +2028,9 b' def _pullbookmarks(pullop):'
2021 2028 pullop.stepsdone.add(b'bookmarks')
2022 2029 repo = pullop.repo
2023 2030 remotebookmarks = pullop.remotebookmarks
2031 bookmarks_mode = None
2032 if pullop.remote_path is not None:
2033 bookmarks_mode = pullop.remote_path.bookmarks_mode
2024 2034 bookmod.updatefromremote(
2025 2035 repo.ui,
2026 2036 repo,
@@ -2028,6 +2038,7 b' def _pullbookmarks(pullop):'
2028 2038 pullop.remote.url(),
2029 2039 pullop.gettransaction,
2030 2040 explicit=pullop.explicitbookmarks,
2041 mode=bookmarks_mode,
2031 2042 )
2032 2043
2033 2044
@@ -224,8 +224,12 b' def load(ui, name, path, loadingtime=Non'
224 224 minver = getattr(mod, 'minimumhgversion', None)
225 225 if minver:
226 226 curver = util.versiontuple(n=2)
227 extmin = util.versiontuple(stringutil.forcebytestr(minver), 2)
227 228
228 if None in curver or util.versiontuple(minver, 2) > curver:
229 if None in extmin:
230 extmin = (extmin[0] or 0, extmin[1] or 0)
231
232 if None in curver or extmin > curver:
229 233 msg = _(
230 234 b'(third party extension %s requires version %s or newer '
231 235 b'of Mercurial (current: %s); disabling)\n'
@@ -365,6 +365,11 b' internalstable = sorted('
365 365 loaddoc(b'config', subdir=b'internals'),
366 366 ),
367 367 (
368 [b'dirstate-v2'],
369 _(b'dirstate-v2 file format'),
370 loaddoc(b'dirstate-v2', subdir=b'internals'),
371 ),
372 (
368 373 [b'extensions', b'extension'],
369 374 _(b'Extension API'),
370 375 loaddoc(b'extensions', subdir=b'internals'),
@@ -1748,6 +1748,18 b' The following sub-options can be defined'
1748 1748 Revsets specifying bookmarks will not result in the bookmark being
1749 1749 pushed.
1750 1750
1751 ``bookmarks.mode``
1752 How bookmark will be dealt during the exchange. It support the following value
1753
1754 - ``default``: the default behavior, local and remote bookmarks are "merged"
1755 on push/pull.
1756
1757 - ``mirror``: when pulling, replace local bookmarks by remote bookmarks. This
1758 is useful to replicate a repository, or as an optimization.
1759
1760 - ``ignore``: ignore bookmarks during exchange.
1761 (This currently only affect pulling)
1762
1751 1763 The following special named paths exist:
1752 1764
1753 1765 ``default``
@@ -942,7 +942,7 b' def clone('
942 942 exchange.pull(
943 943 local,
944 944 srcpeer,
945 revs,
945 heads=revs,
946 946 streamclonerequested=stream,
947 947 includepats=storeincludepats,
948 948 excludepats=storeexcludepats,
@@ -1261,13 +1261,14 b' def _incoming('
1261 1261 (remoterepo, incomingchangesetlist, displayer) parameters,
1262 1262 and is supposed to contain only code that can't be unified.
1263 1263 """
1264 srcs = urlutil.get_pull_paths(repo, ui, [source], opts.get(b'branch'))
1264 srcs = urlutil.get_pull_paths(repo, ui, [source])
1265 1265 srcs = list(srcs)
1266 1266 if len(srcs) != 1:
1267 1267 msg = _(b'for now, incoming supports only a single source, %d provided')
1268 1268 msg %= len(srcs)
1269 1269 raise error.Abort(msg)
1270 source, branches = srcs[0]
1270 path = srcs[0]
1271 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
1271 1272 if subpath is not None:
1272 1273 subpath = urlutil.url(subpath)
1273 1274 if subpath.isabs():
@@ -1285,7 +1286,7 b' def _incoming('
1285 1286 if revs:
1286 1287 revs = [other.lookup(rev) for rev in revs]
1287 1288 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1288 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1289 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1289 1290 )
1290 1291
1291 1292 if not chlist:
@@ -1352,7 +1353,7 b' def _outgoing(ui, repo, dests, opts, sub'
1352 1353 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1353 1354 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1354 1355 if revs:
1355 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1356 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1356 1357
1357 1358 other = peer(repo, opts, dest)
1358 1359 try:
@@ -285,6 +285,7 b' class hgwebdir(object):'
285 285 self.lastrefresh = 0
286 286 self.motd = None
287 287 self.refresh()
288 self.requests_count = 0
288 289 if not baseui:
289 290 # set up environment for new ui
290 291 extensions.loadall(self.ui)
@@ -341,6 +342,10 b' class hgwebdir(object):'
341 342
342 343 self.repos = repos
343 344 self.ui = u
345 self.gc_full_collect_rate = self.ui.configint(
346 b'experimental', b'web.full-garbage-collection-rate'
347 )
348 self.gc_full_collections_done = 0
344 349 encoding.encoding = self.ui.config(b'web', b'encoding')
345 350 self.style = self.ui.config(b'web', b'style')
346 351 self.templatepath = self.ui.config(
@@ -383,12 +388,27 b' class hgwebdir(object):'
383 388 finally:
384 389 # There are known cycles in localrepository that prevent
385 390 # those objects (and tons of held references) from being
386 # collected through normal refcounting. We mitigate those
387 # leaks by performing an explicit GC on every request.
388 # TODO remove this once leaks are fixed.
389 # TODO only run this on requests that create localrepository
390 # instances instead of every request.
391 gc.collect()
391 # collected through normal refcounting.
392 # In some cases, the resulting memory consumption can
393 # be tamed by performing explicit garbage collections.
394 # In presence of actual leaks or big long-lived caches, the
395 # impact on performance of such collections can become a
396 # problem, hence the rate shouldn't be set too low.
397 # See "Collecting the oldest generation" in
398 # https://devguide.python.org/garbage_collector
399 # for more about such trade-offs.
400 rate = self.gc_full_collect_rate
401
402 # this is not thread safe, but the consequence (skipping
403 # a garbage collection) is arguably better than risking
404 # to have several threads perform a collection in parallel
405 # (long useless wait on all threads).
406 self.requests_count += 1
407 if rate > 0 and self.requests_count % rate == 0:
408 gc.collect()
409 self.gc_full_collections_done += 1
410 else:
411 gc.collect(generation=1)
392 412
393 413 def _runwsgi(self, req, res):
394 414 try:
@@ -132,36 +132,6 b' class idirstate(interfaceutil.Interface)'
132 132 def copies():
133 133 pass
134 134
135 def normal(f, parentfiledata=None):
136 """Mark a file normal and clean.
137
138 parentfiledata: (mode, size, mtime) of the clean file
139
140 parentfiledata should be computed from memory (for mode,
141 size), as or close as possible from the point where we
142 determined the file was clean, to limit the risk of the
143 file having been changed by an external process between the
144 moment where the file was determined to be clean and now."""
145 pass
146
147 def normallookup(f):
148 '''Mark a file normal, but possibly dirty.'''
149
150 def otherparent(f):
151 '''Mark as coming from the other parent, always dirty.'''
152
153 def add(f):
154 '''Mark a file added.'''
155
156 def remove(f):
157 '''Mark a file removed.'''
158
159 def merge(f):
160 '''Mark a file merged.'''
161
162 def drop(f):
163 '''Drop a file from the dirstate'''
164
165 135 def normalize(path, isknown=False, ignoremissing=False):
166 136 """
167 137 normalize the case of a pathname when on a casefolding filesystem
@@ -917,9 +917,6 b' def gathersupportedrequirements(ui):'
917 917 # Start with all requirements supported by this file.
918 918 supported = set(localrepository._basesupported)
919 919
920 if dirstate.SUPPORTS_DIRSTATE_V2:
921 supported.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
922
923 920 # Execute ``featuresetupfuncs`` entries if they belong to an extension
924 921 # relevant to this ui instance.
925 922 modules = {m.__name__ for n, m in extensions.extensions(ui)}
@@ -1177,6 +1174,32 b' def resolverevlogstorevfsoptions(ui, req'
1177 1174 if slow_path == b'abort':
1178 1175 raise error.Abort(msg, hint=hint)
1179 1176 options[b'persistent-nodemap'] = True
1177 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1178 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1179 if slow_path not in (b'allow', b'warn', b'abort'):
1180 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1181 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1182 ui.warn(msg % slow_path)
1183 if not ui.quiet:
1184 ui.warn(_(b'falling back to default value: %s\n') % default)
1185 slow_path = default
1186
1187 msg = _(
1188 b"accessing `dirstate-v2` repository without associated "
1189 b"fast implementation."
1190 )
1191 hint = _(
1192 b"check `hg help config.format.exp-rc-dirstate-v2` " b"for details"
1193 )
1194 if not dirstate.HAS_FAST_DIRSTATE_V2:
1195 if slow_path == b'warn':
1196 msg = b"warning: " + msg + b'\n'
1197 ui.warn(msg)
1198 if not ui.quiet:
1199 hint = b'(' + hint + b')\n'
1200 ui.warn(hint)
1201 if slow_path == b'abort':
1202 raise error.Abort(msg, hint=hint)
1180 1203 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1181 1204 options[b'persistent-nodemap.mmap'] = True
1182 1205 if ui.configbool(b'devel', b'persistent-nodemap'):
@@ -1266,6 +1289,7 b' class localrepository(object):'
1266 1289 requirementsmod.NODEMAP_REQUIREMENT,
1267 1290 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1268 1291 requirementsmod.SHARESAFE_REQUIREMENT,
1292 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1269 1293 }
1270 1294 _basesupported = supportedformats | {
1271 1295 requirementsmod.STORE_REQUIREMENT,
@@ -3606,18 +3630,10 b' def newreporequirements(ui, createopts):'
3606 3630 if ui.configbool(b'format', b'sparse-revlog'):
3607 3631 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3608 3632
3609 # experimental config: format.exp-dirstate-v2
3633 # experimental config: format.exp-rc-dirstate-v2
3610 3634 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3611 if ui.configbool(b'format', b'exp-dirstate-v2'):
3612 if dirstate.SUPPORTS_DIRSTATE_V2:
3613 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3614 else:
3615 raise error.Abort(
3616 _(
3617 b"dirstate v2 format requested by config "
3618 b"but not supported (requires Rust extensions)"
3619 )
3620 )
3635 if ui.configbool(b'format', b'exp-rc-dirstate-v2'):
3636 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3621 3637
3622 3638 # experimental config: format.exp-use-copies-side-data-changeset
3623 3639 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
@@ -46,13 +46,12 b' if pycompat.TYPE_CHECKING:'
46 46 Any,
47 47 Callable,
48 48 Dict,
49 List,
50 49 Optional,
51 50 Sequence,
52 51 Tuple,
53 52 )
54 53
55 for t in (Any, Callable, Dict, List, Optional, Tuple):
54 for t in (Any, Callable, Dict, Optional, Tuple):
56 55 assert t
57 56
58 57
@@ -714,43 +713,43 b' class walkopts(object):'
714 713 """
715 714
716 715 # raw command-line parameters, which a matcher will be built from
717 pats = attr.ib() # type: List[bytes]
718 opts = attr.ib() # type: Dict[bytes, Any]
716 pats = attr.ib()
717 opts = attr.ib()
719 718
720 719 # a list of revset expressions to be traversed; if follow, it specifies
721 720 # the start revisions
722 revspec = attr.ib() # type: List[bytes]
721 revspec = attr.ib()
723 722
724 723 # miscellaneous queries to filter revisions (see "hg help log" for details)
725 bookmarks = attr.ib(default=attr.Factory(list)) # type: List[bytes]
726 branches = attr.ib(default=attr.Factory(list)) # type: List[bytes]
727 date = attr.ib(default=None) # type: Optional[bytes]
728 keywords = attr.ib(default=attr.Factory(list)) # type: List[bytes]
729 no_merges = attr.ib(default=False) # type: bool
730 only_merges = attr.ib(default=False) # type: bool
731 prune_ancestors = attr.ib(default=attr.Factory(list)) # type: List[bytes]
732 users = attr.ib(default=attr.Factory(list)) # type: List[bytes]
724 bookmarks = attr.ib(default=attr.Factory(list))
725 branches = attr.ib(default=attr.Factory(list))
726 date = attr.ib(default=None)
727 keywords = attr.ib(default=attr.Factory(list))
728 no_merges = attr.ib(default=False)
729 only_merges = attr.ib(default=False)
730 prune_ancestors = attr.ib(default=attr.Factory(list))
731 users = attr.ib(default=attr.Factory(list))
733 732
734 733 # miscellaneous matcher arguments
735 include_pats = attr.ib(default=attr.Factory(list)) # type: List[bytes]
736 exclude_pats = attr.ib(default=attr.Factory(list)) # type: List[bytes]
734 include_pats = attr.ib(default=attr.Factory(list))
735 exclude_pats = attr.ib(default=attr.Factory(list))
737 736
738 737 # 0: no follow, 1: follow first, 2: follow both parents
739 follow = attr.ib(default=0) # type: int
738 follow = attr.ib(default=0)
740 739
741 740 # do not attempt filelog-based traversal, which may be fast but cannot
742 741 # include revisions where files were removed
743 force_changelog_traversal = attr.ib(default=False) # type: bool
742 force_changelog_traversal = attr.ib(default=False)
744 743
745 744 # filter revisions by file patterns, which should be disabled only if
746 745 # you want to include revisions where files were unmodified
747 filter_revisions_by_pats = attr.ib(default=True) # type: bool
746 filter_revisions_by_pats = attr.ib(default=True)
748 747
749 748 # sort revisions prior to traversal: 'desc', 'topo', or None
750 sort_revisions = attr.ib(default=None) # type: Optional[bytes]
749 sort_revisions = attr.ib(default=None)
751 750
752 751 # limit number of changes displayed; None means unlimited
753 limit = attr.ib(default=None) # type: Optional[int]
752 limit = attr.ib(default=None)
754 753
755 754
756 755 def parseopts(ui, pats, opts):
@@ -913,6 +912,42 b' def _makenofollowfilematcher(repo, pats,'
913 912 return None
914 913
915 914
915 def revsingle(repo, revspec, default=b'.', localalias=None):
916 """Resolves user-provided revset(s) into a single revision.
917
918 This just wraps the lower-level scmutil.revsingle() in order to raise an
919 exception indicating user error.
920 """
921 try:
922 return scmutil.revsingle(repo, revspec, default, localalias)
923 except error.RepoLookupError as e:
924 raise error.InputError(e.args[0], hint=e.hint)
925
926
927 def revpair(repo, revs):
928 """Resolves user-provided revset(s) into two revisions.
929
930 This just wraps the lower-level scmutil.revpair() in order to raise an
931 exception indicating user error.
932 """
933 try:
934 return scmutil.revpair(repo, revs)
935 except error.RepoLookupError as e:
936 raise error.InputError(e.args[0], hint=e.hint)
937
938
939 def revrange(repo, specs, localalias=None):
940 """Resolves user-provided revset(s).
941
942 This just wraps the lower-level scmutil.revrange() in order to raise an
943 exception indicating user error.
944 """
945 try:
946 return scmutil.revrange(repo, specs, localalias)
947 except error.RepoLookupError as e:
948 raise error.InputError(e.args[0], hint=e.hint)
949
950
916 951 _opt2logrevset = {
917 952 b'no_merges': (b'not merge()', None),
918 953 b'only_merges': (b'merge()', None),
@@ -988,7 +1023,7 b' def _makerevset(repo, wopts, slowpath):'
988 1023 def _initialrevs(repo, wopts):
989 1024 """Return the initial set of revisions to be filtered or followed"""
990 1025 if wopts.revspec:
991 revs = scmutil.revrange(repo, wopts.revspec)
1026 revs = revrange(repo, wopts.revspec)
992 1027 elif wopts.follow and repo.dirstate.p1() == repo.nullid:
993 1028 revs = smartset.baseset()
994 1029 elif wopts.follow:
@@ -9,13 +9,13 b' from __future__ import absolute_import'
9 9
10 10 import collections
11 11 import errno
12 import stat
13 12 import struct
14 13
15 14 from .i18n import _
16 15 from .node import nullrev
17 16 from .thirdparty import attr
18 17 from .utils import stringutil
18 from .dirstateutils import timestamp
19 19 from . import (
20 20 copies,
21 21 encoding,
@@ -1406,8 +1406,9 b' def batchget(repo, mctx, wctx, wantfiled'
1406 1406 if wantfiledata:
1407 1407 s = wfctx.lstat()
1408 1408 mode = s.st_mode
1409 mtime = s[stat.ST_MTIME]
1410 filedata[f] = (mode, size, mtime) # for dirstate.normal
1409 mtime = timestamp.mtime_of(s)
1410 # for dirstate.update_file's parentfiledata argument:
1411 filedata[f] = (mode, size, mtime)
1411 1412 if i == 100:
1412 1413 yield False, (i, f)
1413 1414 i = 0
@@ -796,12 +796,13 b' def recordupdates(repo, actions, branchm'
796 796 for f, args, msg in actions.get(ACTION_GET, []):
797 797 if branchmerge:
798 798 # tracked in p1 can be True also but update_file should not care
799 old_entry = repo.dirstate.get_entry(f)
800 p1_tracked = old_entry.any_tracked and not old_entry.added
799 801 repo.dirstate.update_file(
800 802 f,
801 p1_tracked=False,
802 p2_tracked=True,
803 p1_tracked=p1_tracked,
803 804 wc_tracked=True,
804 clean_p2=True,
805 p2_info=True,
805 806 )
806 807 else:
807 808 parentfiledata = getfiledata[f] if getfiledata else None
@@ -818,8 +819,12 b' def recordupdates(repo, actions, branchm'
818 819 if branchmerge:
819 820 # We've done a branch merge, mark this file as merged
820 821 # so that we properly record the merger later
822 p1_tracked = f1 == f
821 823 repo.dirstate.update_file(
822 f, p1_tracked=True, wc_tracked=True, merged=True
824 f,
825 p1_tracked=p1_tracked,
826 wc_tracked=True,
827 p2_info=True,
823 828 )
824 829 if f1 != f2: # copy/rename
825 830 if move:
@@ -1,5 +1,5 b''
1 #ifndef _HG_MPATCH_H_
2 #define _HG_MPATCH_H_
1 #ifndef HG_MPATCH_H
2 #define HG_MPATCH_H
3 3
4 4 #define MPATCH_ERR_NO_MEM -3
5 5 #define MPATCH_ERR_CANNOT_BE_DECODED -2
@@ -299,7 +299,7 b' def checkworkingcopynarrowspec(repo):'
299 299 storespec = repo.svfs.tryread(FILENAME)
300 300 wcspec = repo.vfs.tryread(DIRSTATE_FILENAME)
301 301 if wcspec != storespec:
302 raise error.Abort(
302 raise error.StateError(
303 303 _(b"working copy's narrowspec is stale"),
304 304 hint=_(b"run 'hg tracked --update-working-copy'"),
305 305 )
@@ -21,7 +21,6 b' from __future__ import absolute_import, '
21 21 from .i18n import _
22 22 from . import (
23 23 error,
24 pycompat,
25 24 util,
26 25 )
27 26 from .utils import stringutil
@@ -216,7 +215,11 b' def unescapestr(s):'
216 215 return stringutil.unescapestr(s)
217 216 except ValueError as e:
218 217 # mangle Python's exception into our format
219 raise error.ParseError(pycompat.bytestr(e).lower())
218 # TODO: remove this suppression. For some reason, pytype 2021.09.09
219 # thinks .lower() is being called on Union[ValueError, bytes].
220 # pytype: disable=attribute-error
221 raise error.ParseError(stringutil.forcebytestr(e).lower())
222 # pytype: enable=attribute-error
220 223
221 224
222 225 def _prettyformat(tree, leafnodes, level, lines):
@@ -550,7 +550,9 b' class workingbackend(fsbackend):'
550 550 self.copied = []
551 551
552 552 def _checkknown(self, fname):
553 if self.repo.dirstate[fname] == b'?' and self.exists(fname):
553 if not self.repo.dirstate.get_entry(fname).any_tracked and self.exists(
554 fname
555 ):
554 556 raise PatchError(_(b'cannot patch %s: file is not tracked') % fname)
555 557
556 558 def setfile(self, fname, data, mode, copysource):
@@ -315,20 +315,19 b' def finddirs(path):'
315 315 class dirs(object):
316 316 '''a multiset of directory names from a set of file paths'''
317 317
318 def __init__(self, map, skip=None):
318 def __init__(self, map, only_tracked=False):
319 319 """
320 320 a dict map indicates a dirstate while a list indicates a manifest
321 321 """
322 322 self._dirs = {}
323 323 addpath = self.addpath
324 if isinstance(map, dict) and skip is not None:
324 if isinstance(map, dict) and only_tracked:
325 325 for f, s in pycompat.iteritems(map):
326 if s.state != skip:
326 if s.state != b'r':
327 327 addpath(f)
328 elif skip is not None:
329 raise error.ProgrammingError(
330 b"skip character is only supported with a dict source"
331 )
328 elif only_tracked:
329 msg = b"`only_tracked` is only supported with a dict source"
330 raise error.ProgrammingError(msg)
332 331 else:
333 332 for f in map:
334 333 addpath(f)
This diff has been collapsed as it changes many lines, (502 lines changed) Show them Hide them
@@ -7,6 +7,7 b''
7 7
8 8 from __future__ import absolute_import
9 9
10 import stat
10 11 import struct
11 12 import zlib
12 13
@@ -43,29 +44,143 b' NONNORMAL = -1'
43 44 # a special value used internally for `time` if the time is ambigeous
44 45 AMBIGUOUS_TIME = -1
45 46
47 # Bits of the `flags` byte inside a node in the file format
48 DIRSTATE_V2_WDIR_TRACKED = 1 << 0
49 DIRSTATE_V2_P1_TRACKED = 1 << 1
50 DIRSTATE_V2_P2_INFO = 1 << 2
51 DIRSTATE_V2_MODE_EXEC_PERM = 1 << 3
52 DIRSTATE_V2_MODE_IS_SYMLINK = 1 << 4
53 DIRSTATE_V2_HAS_FALLBACK_EXEC = 1 << 5
54 DIRSTATE_V2_FALLBACK_EXEC = 1 << 6
55 DIRSTATE_V2_HAS_FALLBACK_SYMLINK = 1 << 7
56 DIRSTATE_V2_FALLBACK_SYMLINK = 1 << 8
57 DIRSTATE_V2_EXPECTED_STATE_IS_MODIFIED = 1 << 9
58 DIRSTATE_V2_HAS_MODE_AND_SIZE = 1 << 10
59 DIRSTATE_V2_HAS_MTIME = 1 << 11
60 DIRSTATE_V2_MTIME_SECOND_AMBIGUOUS = 1 << 12
61 DIRSTATE_V2_DIRECTORY = 1 << 13
62 DIRSTATE_V2_ALL_UNKNOWN_RECORDED = 1 << 14
63 DIRSTATE_V2_ALL_IGNORED_RECORDED = 1 << 15
64
46 65
47 66 @attr.s(slots=True, init=False)
48 67 class DirstateItem(object):
49 68 """represent a dirstate entry
50 69
51 It contains:
70 It hold multiple attributes
71
72 # about file tracking
73 - wc_tracked: is the file tracked by the working copy
74 - p1_tracked: is the file tracked in working copy first parent
75 - p2_info: the file has been involved in some merge operation. Either
76 because it was actually merged, or because the p2 version was
77 ahead, or because some rename moved it there. In either case
78 `hg status` will want it displayed as modified.
52 79
53 - state (one of 'n', 'a', 'r', 'm')
54 - mode,
55 - size,
56 - mtime,
80 # about the file state expected from p1 manifest:
81 - mode: the file mode in p1
82 - size: the file size in p1
83
84 These value can be set to None, which mean we don't have a meaningful value
85 to compare with. Either because we don't really care about them as there
86 `status` is known without having to look at the disk or because we don't
87 know these right now and a full comparison will be needed to find out if
88 the file is clean.
89
90 # about the file state on disk last time we saw it:
91 - mtime: the last known clean mtime for the file.
92
93 This value can be set to None if no cachable state exist. Either because we
94 do not care (see previous section) or because we could not cache something
95 yet.
57 96 """
58 97
59 _state = attr.ib()
98 _wc_tracked = attr.ib()
99 _p1_tracked = attr.ib()
100 _p2_info = attr.ib()
60 101 _mode = attr.ib()
61 102 _size = attr.ib()
62 _mtime = attr.ib()
103 _mtime_s = attr.ib()
104 _mtime_ns = attr.ib()
105 _fallback_exec = attr.ib()
106 _fallback_symlink = attr.ib()
107
108 def __init__(
109 self,
110 wc_tracked=False,
111 p1_tracked=False,
112 p2_info=False,
113 has_meaningful_data=True,
114 has_meaningful_mtime=True,
115 parentfiledata=None,
116 fallback_exec=None,
117 fallback_symlink=None,
118 ):
119 self._wc_tracked = wc_tracked
120 self._p1_tracked = p1_tracked
121 self._p2_info = p2_info
122
123 self._fallback_exec = fallback_exec
124 self._fallback_symlink = fallback_symlink
125
126 self._mode = None
127 self._size = None
128 self._mtime_s = None
129 self._mtime_ns = None
130 if parentfiledata is None:
131 has_meaningful_mtime = False
132 has_meaningful_data = False
133 if has_meaningful_data:
134 self._mode = parentfiledata[0]
135 self._size = parentfiledata[1]
136 if has_meaningful_mtime:
137 self._mtime_s, self._mtime_ns = parentfiledata[2]
63 138
64 def __init__(self, state, mode, size, mtime):
65 self._state = state
66 self._mode = mode
67 self._size = size
68 self._mtime = mtime
139 @classmethod
140 def from_v2_data(cls, flags, size, mtime_s, mtime_ns):
141 """Build a new DirstateItem object from V2 data"""
142 has_mode_size = bool(flags & DIRSTATE_V2_HAS_MODE_AND_SIZE)
143 has_meaningful_mtime = bool(flags & DIRSTATE_V2_HAS_MTIME)
144 if flags & DIRSTATE_V2_MTIME_SECOND_AMBIGUOUS:
145 # The current code is not able to do the more subtle comparison that the
146 # MTIME_SECOND_AMBIGUOUS requires. So we ignore the mtime
147 has_meaningful_mtime = False
148 mode = None
149
150 if flags & +DIRSTATE_V2_EXPECTED_STATE_IS_MODIFIED:
151 # we do not have support for this flag in the code yet,
152 # force a lookup for this file.
153 has_mode_size = False
154 has_meaningful_mtime = False
155
156 fallback_exec = None
157 if flags & DIRSTATE_V2_HAS_FALLBACK_EXEC:
158 fallback_exec = flags & DIRSTATE_V2_FALLBACK_EXEC
159
160 fallback_symlink = None
161 if flags & DIRSTATE_V2_HAS_FALLBACK_SYMLINK:
162 fallback_symlink = flags & DIRSTATE_V2_FALLBACK_SYMLINK
163
164 if has_mode_size:
165 assert stat.S_IXUSR == 0o100
166 if flags & DIRSTATE_V2_MODE_EXEC_PERM:
167 mode = 0o755
168 else:
169 mode = 0o644
170 if flags & DIRSTATE_V2_MODE_IS_SYMLINK:
171 mode |= stat.S_IFLNK
172 else:
173 mode |= stat.S_IFREG
174 return cls(
175 wc_tracked=bool(flags & DIRSTATE_V2_WDIR_TRACKED),
176 p1_tracked=bool(flags & DIRSTATE_V2_P1_TRACKED),
177 p2_info=bool(flags & DIRSTATE_V2_P2_INFO),
178 has_meaningful_data=has_mode_size,
179 has_meaningful_mtime=has_meaningful_mtime,
180 parentfiledata=(mode, size, (mtime_s, mtime_ns)),
181 fallback_exec=fallback_exec,
182 fallback_symlink=fallback_symlink,
183 )
69 184
70 185 @classmethod
71 186 def from_v1_data(cls, state, mode, size, mtime):
@@ -74,12 +189,41 b' class DirstateItem(object):'
74 189 Since the dirstate-v1 format is frozen, the signature of this function
75 190 is not expected to change, unlike the __init__ one.
76 191 """
77 return cls(
78 state=state,
79 mode=mode,
80 size=size,
81 mtime=mtime,
82 )
192 if state == b'm':
193 return cls(wc_tracked=True, p1_tracked=True, p2_info=True)
194 elif state == b'a':
195 return cls(wc_tracked=True)
196 elif state == b'r':
197 if size == NONNORMAL:
198 p1_tracked = True
199 p2_info = True
200 elif size == FROM_P2:
201 p1_tracked = False
202 p2_info = True
203 else:
204 p1_tracked = True
205 p2_info = False
206 return cls(p1_tracked=p1_tracked, p2_info=p2_info)
207 elif state == b'n':
208 if size == FROM_P2:
209 return cls(wc_tracked=True, p2_info=True)
210 elif size == NONNORMAL:
211 return cls(wc_tracked=True, p1_tracked=True)
212 elif mtime == AMBIGUOUS_TIME:
213 return cls(
214 wc_tracked=True,
215 p1_tracked=True,
216 has_meaningful_mtime=False,
217 parentfiledata=(mode, size, (42, 0)),
218 )
219 else:
220 return cls(
221 wc_tracked=True,
222 p1_tracked=True,
223 parentfiledata=(mode, size, (mtime, 0)),
224 )
225 else:
226 raise RuntimeError(b'unknown state: %s' % state)
83 227
84 228 def set_possibly_dirty(self):
85 229 """Mark a file as "possibly dirty"
@@ -87,39 +231,80 b' class DirstateItem(object):'
87 231 This means the next status call will have to actually check its content
88 232 to make sure it is correct.
89 233 """
90 self._mtime = AMBIGUOUS_TIME
234 self._mtime_s = None
235 self._mtime_ns = None
236
237 def set_clean(self, mode, size, mtime):
238 """mark a file as "clean" cancelling potential "possibly dirty call"
239
240 Note: this function is a descendant of `dirstate.normal` and is
241 currently expected to be call on "normal" entry only. There are not
242 reason for this to not change in the future as long as the ccode is
243 updated to preserve the proper state of the non-normal files.
244 """
245 self._wc_tracked = True
246 self._p1_tracked = True
247 self._mode = mode
248 self._size = size
249 self._mtime_s, self._mtime_ns = mtime
250
251 def set_tracked(self):
252 """mark a file as tracked in the working copy
91 253
92 def __getitem__(self, idx):
93 if idx == 0 or idx == -4:
94 msg = b"do not use item[x], use item.state"
95 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
96 return self._state
97 elif idx == 1 or idx == -3:
98 msg = b"do not use item[x], use item.mode"
99 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
100 return self._mode
101 elif idx == 2 or idx == -2:
102 msg = b"do not use item[x], use item.size"
103 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
104 return self._size
105 elif idx == 3 or idx == -1:
106 msg = b"do not use item[x], use item.mtime"
107 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
108 return self._mtime
109 else:
110 raise IndexError(idx)
254 This will ultimately be called by command like `hg add`.
255 """
256 self._wc_tracked = True
257 # `set_tracked` is replacing various `normallookup` call. So we mark
258 # the files as needing lookup
259 #
260 # Consider dropping this in the future in favor of something less broad.
261 self._mtime_s = None
262 self._mtime_ns = None
263
264 def set_untracked(self):
265 """mark a file as untracked in the working copy
266
267 This will ultimately be called by command like `hg remove`.
268 """
269 self._wc_tracked = False
270 self._mode = None
271 self._size = None
272 self._mtime_s = None
273 self._mtime_ns = None
274
275 def drop_merge_data(self):
276 """remove all "merge-only" from a DirstateItem
277
278 This is to be call by the dirstatemap code when the second parent is dropped
279 """
280 if self._p2_info:
281 self._p2_info = False
282 self._mode = None
283 self._size = None
284 self._mtime_s = None
285 self._mtime_ns = None
111 286
112 287 @property
113 288 def mode(self):
114 return self._mode
289 return self.v1_mode()
115 290
116 291 @property
117 292 def size(self):
118 return self._size
293 return self.v1_size()
119 294
120 295 @property
121 296 def mtime(self):
122 return self._mtime
297 return self.v1_mtime()
298
299 def mtime_likely_equal_to(self, other_mtime):
300 self_sec = self._mtime_s
301 if self_sec is None:
302 return False
303 self_ns = self._mtime_ns
304 other_sec, other_ns = other_mtime
305 return self_sec == other_sec and (
306 self_ns == other_ns or self_ns == 0 or other_ns == 0
307 )
123 308
124 309 @property
125 310 def state(self):
@@ -134,94 +319,224 b' class DirstateItem(object):'
134 319 dirstatev1 format. It would make sense to ultimately deprecate it in
135 320 favor of the more "semantic" attributes.
136 321 """
137 return self._state
322 if not self.any_tracked:
323 return b'?'
324 return self.v1_state()
325
326 @property
327 def has_fallback_exec(self):
328 """True if "fallback" information are available for the "exec" bit
329
330 Fallback information can be stored in the dirstate to keep track of
331 filesystem attribute tracked by Mercurial when the underlying file
332 system or operating system does not support that property, (e.g.
333 Windows).
334
335 Not all version of the dirstate on-disk storage support preserving this
336 information.
337 """
338 return self._fallback_exec is not None
339
340 @property
341 def fallback_exec(self):
342 """ "fallback" information for the executable bit
343
344 True if the file should be considered executable when we cannot get
345 this information from the files system. False if it should be
346 considered non-executable.
347
348 See has_fallback_exec for details."""
349 return self._fallback_exec
350
351 @fallback_exec.setter
352 def set_fallback_exec(self, value):
353 """control "fallback" executable bit
354
355 Set to:
356 - True if the file should be considered executable,
357 - False if the file should be considered non-executable,
358 - None if we do not have valid fallback data.
359
360 See has_fallback_exec for details."""
361 if value is None:
362 self._fallback_exec = None
363 else:
364 self._fallback_exec = bool(value)
365
366 @property
367 def has_fallback_symlink(self):
368 """True if "fallback" information are available for symlink status
369
370 Fallback information can be stored in the dirstate to keep track of
371 filesystem attribute tracked by Mercurial when the underlying file
372 system or operating system does not support that property, (e.g.
373 Windows).
374
375 Not all version of the dirstate on-disk storage support preserving this
376 information."""
377 return self._fallback_symlink is not None
378
379 @property
380 def fallback_symlink(self):
381 """ "fallback" information for symlink status
382
383 True if the file should be considered executable when we cannot get
384 this information from the files system. False if it should be
385 considered non-executable.
386
387 See has_fallback_exec for details."""
388 return self._fallback_symlink
389
390 @fallback_symlink.setter
391 def set_fallback_symlink(self, value):
392 """control "fallback" symlink status
393
394 Set to:
395 - True if the file should be considered a symlink,
396 - False if the file should be considered not a symlink,
397 - None if we do not have valid fallback data.
398
399 See has_fallback_symlink for details."""
400 if value is None:
401 self._fallback_symlink = None
402 else:
403 self._fallback_symlink = bool(value)
138 404
139 405 @property
140 406 def tracked(self):
141 407 """True is the file is tracked in the working copy"""
142 return self._state in b"nma"
408 return self._wc_tracked
409
410 @property
411 def any_tracked(self):
412 """True is the file is tracked anywhere (wc or parents)"""
413 return self._wc_tracked or self._p1_tracked or self._p2_info
143 414
144 415 @property
145 416 def added(self):
146 417 """True if the file has been added"""
147 return self._state == b'a'
148
149 @property
150 def merged(self):
151 """True if the file has been merged
152
153 Should only be set if a merge is in progress in the dirstate
154 """
155 return self._state == b'm'
418 return self._wc_tracked and not (self._p1_tracked or self._p2_info)
156 419
157 420 @property
158 def from_p2(self):
159 """True if the file have been fetched from p2 during the current merge
160
161 This is only True is the file is currently tracked.
162
163 Should only be set if a merge is in progress in the dirstate
164 """
165 return self._state == b'n' and self._size == FROM_P2
421 def maybe_clean(self):
422 """True if the file has a chance to be in the "clean" state"""
423 if not self._wc_tracked:
424 return False
425 elif not self._p1_tracked:
426 return False
427 elif self._p2_info:
428 return False
429 return True
166 430
167 431 @property
168 def from_p2_removed(self):
169 """True if the file has been removed, but was "from_p2" initially
432 def p1_tracked(self):
433 """True if the file is tracked in the first parent manifest"""
434 return self._p1_tracked
170 435
171 This property seems like an abstraction leakage and should probably be
172 dealt in this class (or maybe the dirstatemap) directly.
436 @property
437 def p2_info(self):
438 """True if the file needed to merge or apply any input from p2
439
440 See the class documentation for details.
173 441 """
174 return self._state == b'r' and self._size == FROM_P2
442 return self._wc_tracked and self._p2_info
175 443
176 444 @property
177 445 def removed(self):
178 446 """True if the file has been removed"""
179 return self._state == b'r'
180
181 @property
182 def merged_removed(self):
183 """True if the file has been removed, but was "merged" initially
184
185 This property seems like an abstraction leakage and should probably be
186 dealt in this class (or maybe the dirstatemap) directly.
187 """
188 return self._state == b'r' and self._size == NONNORMAL
447 return not self._wc_tracked and (self._p1_tracked or self._p2_info)
189 448
190 @property
191 def dm_nonnormal(self):
192 """True is the entry is non-normal in the dirstatemap sense
193
194 There is no reason for any code, but the dirstatemap one to use this.
195 """
196 return self.state != b'n' or self.mtime == AMBIGUOUS_TIME
449 def v2_data(self):
450 """Returns (flags, mode, size, mtime) for v2 serialization"""
451 flags = 0
452 if self._wc_tracked:
453 flags |= DIRSTATE_V2_WDIR_TRACKED
454 if self._p1_tracked:
455 flags |= DIRSTATE_V2_P1_TRACKED
456 if self._p2_info:
457 flags |= DIRSTATE_V2_P2_INFO
458 if self._mode is not None and self._size is not None:
459 flags |= DIRSTATE_V2_HAS_MODE_AND_SIZE
460 if self.mode & stat.S_IXUSR:
461 flags |= DIRSTATE_V2_MODE_EXEC_PERM
462 if stat.S_ISLNK(self.mode):
463 flags |= DIRSTATE_V2_MODE_IS_SYMLINK
464 if self._mtime_s is not None:
465 flags |= DIRSTATE_V2_HAS_MTIME
197 466
198 @property
199 def dm_otherparent(self):
200 """True is the entry is `otherparent` in the dirstatemap sense
467 if self._fallback_exec is not None:
468 flags |= DIRSTATE_V2_HAS_FALLBACK_EXEC
469 if self._fallback_exec:
470 flags |= DIRSTATE_V2_FALLBACK_EXEC
201 471
202 There is no reason for any code, but the dirstatemap one to use this.
203 """
204 return self._size == FROM_P2
472 if self._fallback_symlink is not None:
473 flags |= DIRSTATE_V2_HAS_FALLBACK_SYMLINK
474 if self._fallback_symlink:
475 flags |= DIRSTATE_V2_FALLBACK_SYMLINK
476
477 # Note: we do not need to do anything regarding
478 # DIRSTATE_V2_ALL_UNKNOWN_RECORDED and DIRSTATE_V2_ALL_IGNORED_RECORDED
479 # since we never set _DIRSTATE_V2_HAS_DIRCTORY_MTIME
480 return (flags, self._size or 0, self._mtime_s or 0, self._mtime_ns or 0)
205 481
206 482 def v1_state(self):
207 483 """return a "state" suitable for v1 serialization"""
208 return self._state
484 if not self.any_tracked:
485 # the object has no state to record, this is -currently-
486 # unsupported
487 raise RuntimeError('untracked item')
488 elif self.removed:
489 return b'r'
490 elif self._p1_tracked and self._p2_info:
491 return b'm'
492 elif self.added:
493 return b'a'
494 else:
495 return b'n'
209 496
210 497 def v1_mode(self):
211 498 """return a "mode" suitable for v1 serialization"""
212 return self._mode
499 return self._mode if self._mode is not None else 0
213 500
214 501 def v1_size(self):
215 502 """return a "size" suitable for v1 serialization"""
216 return self._size
503 if not self.any_tracked:
504 # the object has no state to record, this is -currently-
505 # unsupported
506 raise RuntimeError('untracked item')
507 elif self.removed and self._p1_tracked and self._p2_info:
508 return NONNORMAL
509 elif self._p2_info:
510 return FROM_P2
511 elif self.removed:
512 return 0
513 elif self.added:
514 return NONNORMAL
515 elif self._size is None:
516 return NONNORMAL
517 else:
518 return self._size
217 519
218 520 def v1_mtime(self):
219 521 """return a "mtime" suitable for v1 serialization"""
220 return self._mtime
522 if not self.any_tracked:
523 # the object has no state to record, this is -currently-
524 # unsupported
525 raise RuntimeError('untracked item')
526 elif self.removed:
527 return 0
528 elif self._mtime_s is None:
529 return AMBIGUOUS_TIME
530 elif self._p2_info:
531 return AMBIGUOUS_TIME
532 elif not self._p1_tracked:
533 return AMBIGUOUS_TIME
534 else:
535 return self._mtime_s
221 536
222 537 def need_delay(self, now):
223 538 """True if the stored mtime would be ambiguous with the current time"""
224 return self._state == b'n' and self._mtime == now
539 return self.v1_state() == b'n' and self._mtime_s == now[0]
225 540
226 541
227 542 def gettype(q):
@@ -589,7 +904,6 b' def parse_dirstate(dmap, copymap, st):'
589 904
590 905
591 906 def pack_dirstate(dmap, copymap, pl, now):
592 now = int(now)
593 907 cs = stringio()
594 908 write = cs.write
595 909 write(b"".join(pl))
@@ -44,6 +44,7 b' if not ispy3:'
44 44 FileNotFoundError = OSError
45 45
46 46 else:
47 import builtins
47 48 import concurrent.futures as futures
48 49 import http.cookiejar as cookielib
49 50 import http.client as httplib
@@ -55,7 +56,7 b' else:'
55 56 def future_set_exception_info(f, exc_info):
56 57 f.set_exception(exc_info[0])
57 58
58 FileNotFoundError = __builtins__['FileNotFoundError']
59 FileNotFoundError = builtins.FileNotFoundError
59 60
60 61
61 62 def identity(a):
@@ -222,6 +223,15 b' if ispy3:'
222 223 >>> assert type(t) is bytes
223 224 """
224 225
226 # Trick pytype into not demanding Iterable[int] be passed to __new__(),
227 # since the appropriate bytes format is done internally.
228 #
229 # https://github.com/google/pytype/issues/500
230 if TYPE_CHECKING:
231
232 def __init__(self, s=b''):
233 pass
234
225 235 def __new__(cls, s=b''):
226 236 if isinstance(s, bytestr):
227 237 return s
@@ -433,7 +433,7 b' def manifestrevlogs(repo):'
433 433 if scmutil.istreemanifest(repo):
434 434 # This logic is safe if treemanifest isn't enabled, but also
435 435 # pointless, so we skip it if treemanifest isn't enabled.
436 for t, unencoded, encoded, size in repo.store.datafiles():
436 for t, unencoded, size in repo.store.datafiles():
437 437 if unencoded.startswith(b'meta/') and unencoded.endswith(
438 438 b'00manifest.i'
439 439 ):
@@ -441,7 +441,7 b' def manifestrevlogs(repo):'
441 441 yield repo.manifestlog.getstorage(dir)
442 442
443 443
444 def rebuildfncache(ui, repo):
444 def rebuildfncache(ui, repo, only_data=False):
445 445 """Rebuilds the fncache file from repo history.
446 446
447 447 Missing entries will be added. Extra entries will be removed.
@@ -465,28 +465,40 b' def rebuildfncache(ui, repo):'
465 465 newentries = set()
466 466 seenfiles = set()
467 467
468 progress = ui.makeprogress(
469 _(b'rebuilding'), unit=_(b'changesets'), total=len(repo)
470 )
471 for rev in repo:
472 progress.update(rev)
468 if only_data:
469 # Trust the listing of .i from the fncache, but not the .d. This is
470 # much faster, because we only need to stat every possible .d files,
471 # instead of reading the full changelog
472 for f in fnc:
473 if f[:5] == b'data/' and f[-2:] == b'.i':
474 seenfiles.add(f[5:-2])
475 newentries.add(f)
476 dataf = f[:-2] + b'.d'
477 if repo.store._exists(dataf):
478 newentries.add(dataf)
479 else:
480 progress = ui.makeprogress(
481 _(b'rebuilding'), unit=_(b'changesets'), total=len(repo)
482 )
483 for rev in repo:
484 progress.update(rev)
473 485
474 ctx = repo[rev]
475 for f in ctx.files():
476 # This is to minimize I/O.
477 if f in seenfiles:
478 continue
479 seenfiles.add(f)
486 ctx = repo[rev]
487 for f in ctx.files():
488 # This is to minimize I/O.
489 if f in seenfiles:
490 continue
491 seenfiles.add(f)
480 492
481 i = b'data/%s.i' % f
482 d = b'data/%s.d' % f
493 i = b'data/%s.i' % f
494 d = b'data/%s.d' % f
483 495
484 if repo.store._exists(i):
485 newentries.add(i)
486 if repo.store._exists(d):
487 newentries.add(d)
496 if repo.store._exists(i):
497 newentries.add(i)
498 if repo.store._exists(d):
499 newentries.add(d)
488 500
489 progress.complete()
501 progress.complete()
490 502
491 503 if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements:
492 504 # This logic is safe if treemanifest isn't enabled, but also
@@ -12,7 +12,7 b" DOTENCODE_REQUIREMENT = b'dotencode'"
12 12 STORE_REQUIREMENT = b'store'
13 13 FNCACHE_REQUIREMENT = b'fncache'
14 14
15 DIRSTATE_V2_REQUIREMENT = b'exp-dirstate-v2'
15 DIRSTATE_V2_REQUIREMENT = b'dirstate-v2'
16 16
17 17 # When narrowing is finalized and no longer subject to format changes,
18 18 # we should move this to just "narrow" or similar.
@@ -2581,10 +2581,15 b' class revlog(object):'
2581 2581 self._enforceinlinesize(transaction)
2582 2582 if self._docket is not None:
2583 2583 # revlog-v2 always has 3 writing handles, help Pytype
2584 assert self._writinghandles[2] is not None
2585 self._docket.index_end = self._writinghandles[0].tell()
2586 self._docket.data_end = self._writinghandles[1].tell()
2587 self._docket.sidedata_end = self._writinghandles[2].tell()
2584 wh1 = self._writinghandles[0]
2585 wh2 = self._writinghandles[1]
2586 wh3 = self._writinghandles[2]
2587 assert wh1 is not None
2588 assert wh2 is not None
2589 assert wh3 is not None
2590 self._docket.index_end = wh1.tell()
2591 self._docket.data_end = wh2.tell()
2592 self._docket.sidedata_end = wh3.tell()
2588 2593
2589 2594 nodemaputil.setup_persistent_nodemap(transaction, self)
2590 2595
@@ -826,7 +826,7 b' def repair_issue6528('
826 826 with context():
827 827 files = list(
828 828 (file_type, path)
829 for (file_type, path, _e, _s) in repo.store.datafiles()
829 for (file_type, path, _s) in repo.store.datafiles()
830 830 if path.endswith(b'.i') and file_type & store.FILEFLAGS_FILELOG
831 831 )
832 832
@@ -689,7 +689,7 b" def revsingle(repo, revspec, default=b'."
689 689
690 690 l = revrange(repo, [revspec], localalias=localalias)
691 691 if not l:
692 raise error.Abort(_(b'empty revision set'))
692 raise error.InputError(_(b'empty revision set'))
693 693 return repo[l.last()]
694 694
695 695
@@ -710,7 +710,7 b' def revpair(repo, revs):'
710 710 l = revrange(repo, revs)
711 711
712 712 if not l:
713 raise error.Abort(_(b'empty revision range'))
713 raise error.InputError(_(b'empty revision range'))
714 714
715 715 first = l.first()
716 716 second = l.last()
@@ -720,7 +720,7 b' def revpair(repo, revs):'
720 720 and len(revs) >= 2
721 721 and not all(revrange(repo, [r]) for r in revs)
722 722 ):
723 raise error.Abort(_(b'empty revision on one side of range'))
723 raise error.InputError(_(b'empty revision on one side of range'))
724 724
725 725 # if top-level is range expression, the result must always be a pair
726 726 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
@@ -1211,9 +1211,9 b' def addremove(repo, matcher, prefix, uip'
1211 1211 try:
1212 1212 similarity = float(opts.get(b'similarity') or 0)
1213 1213 except ValueError:
1214 raise error.Abort(_(b'similarity must be a number'))
1214 raise error.InputError(_(b'similarity must be a number'))
1215 1215 if similarity < 0 or similarity > 100:
1216 raise error.Abort(_(b'similarity must be between 0 and 100'))
1216 raise error.InputError(_(b'similarity must be between 0 and 100'))
1217 1217 similarity /= 100.0
1218 1218
1219 1219 ret = 0
@@ -1327,17 +1327,17 b' def _interestingfiles(repo, matcher):'
1327 1327 full=False,
1328 1328 )
1329 1329 for abs, st in pycompat.iteritems(walkresults):
1330 dstate = dirstate[abs]
1331 if dstate == b'?' and audit_path.check(abs):
1330 entry = dirstate.get_entry(abs)
1331 if (not entry.any_tracked) and audit_path.check(abs):
1332 1332 unknown.append(abs)
1333 elif dstate != b'r' and not st:
1333 elif (not entry.removed) and not st:
1334 1334 deleted.append(abs)
1335 elif dstate == b'r' and st:
1335 elif entry.removed and st:
1336 1336 forgotten.append(abs)
1337 1337 # for finding renames
1338 elif dstate == b'r' and not st:
1338 elif entry.removed and not st:
1339 1339 removed.append(abs)
1340 elif dstate == b'a':
1340 elif entry.added:
1341 1341 added.append(abs)
1342 1342
1343 1343 return added, unknown, deleted, removed, forgotten
@@ -1455,10 +1455,11 b' def dirstatecopy(ui, repo, wctx, src, ds'
1455 1455 """
1456 1456 origsrc = repo.dirstate.copied(src) or src
1457 1457 if dst == origsrc: # copying back a copy?
1458 if repo.dirstate[dst] not in b'mn' and not dryrun:
1458 entry = repo.dirstate.get_entry(dst)
1459 if (entry.added or not entry.tracked) and not dryrun:
1459 1460 repo.dirstate.set_tracked(dst)
1460 1461 else:
1461 if repo.dirstate[origsrc] == b'a' and origsrc == src:
1462 if repo.dirstate.get_entry(origsrc).added and origsrc == src:
1462 1463 if not ui.quiet:
1463 1464 ui.warn(
1464 1465 _(
@@ -1467,7 +1468,7 b' def dirstatecopy(ui, repo, wctx, src, ds'
1467 1468 )
1468 1469 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1469 1470 )
1470 if repo.dirstate[dst] in b'?r' and not dryrun:
1471 if not repo.dirstate.get_entry(dst).tracked and not dryrun:
1471 1472 wctx.add([dst])
1472 1473 elif not dryrun:
1473 1474 wctx.copy(origsrc, dst)
@@ -1504,7 +1505,7 b' def movedirstate(repo, newctx, match=Non'
1504 1505 }
1505 1506 # Adjust the dirstate copies
1506 1507 for dst, src in pycompat.iteritems(copies):
1507 if src not in newctx or dst in newctx or ds[dst] != b'a':
1508 if src not in newctx or dst in newctx or not ds.get_entry(dst).added:
1508 1509 src = None
1509 1510 ds.copy(src, dst)
1510 1511 repo._quick_access_changeid_invalidate()
@@ -472,7 +472,7 b' class basicstore(object):'
472 472 return self.path + b'/' + encodedir(f)
473 473
474 474 def _walk(self, relpath, recurse):
475 '''yields (unencoded, encoded, size)'''
475 '''yields (revlog_type, unencoded, size)'''
476 476 path = self.path
477 477 if relpath:
478 478 path += b'/' + relpath
@@ -488,7 +488,7 b' class basicstore(object):'
488 488 rl_type = is_revlog(f, kind, st)
489 489 if rl_type is not None:
490 490 n = util.pconvert(fp[striplen:])
491 l.append((rl_type, decodedir(n), n, st.st_size))
491 l.append((rl_type, decodedir(n), st.st_size))
492 492 elif kind == stat.S_IFDIR and recurse:
493 493 visit.append(fp)
494 494 l.sort()
@@ -505,26 +505,32 b' class basicstore(object):'
505 505 rootstore = manifest.manifestrevlog(repo.nodeconstants, self.vfs)
506 506 return manifest.manifestlog(self.vfs, repo, rootstore, storenarrowmatch)
507 507
508 def datafiles(self, matcher=None):
508 def datafiles(self, matcher=None, undecodable=None):
509 """Like walk, but excluding the changelog and root manifest.
510
511 When [undecodable] is None, revlogs names that can't be
512 decoded cause an exception. When it is provided, it should
513 be a list and the filenames that can't be decoded are added
514 to it instead. This is very rarely needed."""
509 515 files = self._walk(b'data', True) + self._walk(b'meta', True)
510 for (t, u, e, s) in files:
511 yield (FILEFLAGS_FILELOG | t, u, e, s)
516 for (t, u, s) in files:
517 yield (FILEFLAGS_FILELOG | t, u, s)
512 518
513 519 def topfiles(self):
514 520 # yield manifest before changelog
515 521 files = reversed(self._walk(b'', False))
516 for (t, u, e, s) in files:
522 for (t, u, s) in files:
517 523 if u.startswith(b'00changelog'):
518 yield (FILEFLAGS_CHANGELOG | t, u, e, s)
524 yield (FILEFLAGS_CHANGELOG | t, u, s)
519 525 elif u.startswith(b'00manifest'):
520 yield (FILEFLAGS_MANIFESTLOG | t, u, e, s)
526 yield (FILEFLAGS_MANIFESTLOG | t, u, s)
521 527 else:
522 yield (FILETYPE_OTHER | t, u, e, s)
528 yield (FILETYPE_OTHER | t, u, s)
523 529
524 530 def walk(self, matcher=None):
525 531 """return file related to data storage (ie: revlogs)
526 532
527 yields (file_type, unencoded, encoded, size)
533 yields (file_type, unencoded, size)
528 534
529 535 if a matcher is passed, storage files of only those tracked paths
530 536 are passed with matches the matcher
@@ -574,15 +580,20 b' class encodedstore(basicstore):'
574 580 # However that might change so we should probably add a test and encoding
575 581 # decoding for it too. see issue6548
576 582
577 def datafiles(self, matcher=None):
578 for t, a, b, size in super(encodedstore, self).datafiles():
583 def datafiles(self, matcher=None, undecodable=None):
584 for t, f1, size in super(encodedstore, self).datafiles():
579 585 try:
580 a = decodefilename(a)
586 f2 = decodefilename(f1)
581 587 except KeyError:
582 a = None
583 if a is not None and not _matchtrackedpath(a, matcher):
588 if undecodable is None:
589 msg = _(b'undecodable revlog name %s') % f1
590 raise error.StorageError(msg)
591 else:
592 undecodable.append(f1)
593 continue
594 if not _matchtrackedpath(f2, matcher):
584 595 continue
585 yield t, a, b, size
596 yield t, f2, size
586 597
587 598 def join(self, f):
588 599 return self.path + b'/' + encodefilename(f)
@@ -770,7 +781,7 b' class fncachestore(basicstore):'
770 781 def getsize(self, path):
771 782 return self.rawvfs.stat(path).st_size
772 783
773 def datafiles(self, matcher=None):
784 def datafiles(self, matcher=None, undecodable=None):
774 785 for f in sorted(self.fncache):
775 786 if not _matchtrackedpath(f, matcher):
776 787 continue
@@ -779,7 +790,7 b' class fncachestore(basicstore):'
779 790 t = revlog_type(f)
780 791 assert t is not None, f
781 792 t |= FILEFLAGS_FILELOG
782 yield t, f, ef, self.getsize(ef)
793 yield t, f, self.getsize(ef)
783 794 except OSError as err:
784 795 if err.errno != errno.ENOENT:
785 796 raise
@@ -248,7 +248,7 b' def generatev1(repo):'
248 248 # Get consistent snapshot of repo, lock during scan.
249 249 with repo.lock():
250 250 repo.ui.debug(b'scanning\n')
251 for file_type, name, ename, size in _walkstreamfiles(repo):
251 for file_type, name, size in _walkstreamfiles(repo):
252 252 if size:
253 253 entries.append((name, size))
254 254 total_bytes += size
@@ -650,7 +650,7 b' def _v2_walk(repo, includes, excludes, i'
650 650 if includes or excludes:
651 651 matcher = narrowspec.match(repo.root, includes, excludes)
652 652
653 for rl_type, name, ename, size in _walkstreamfiles(repo, matcher):
653 for rl_type, name, size in _walkstreamfiles(repo, matcher):
654 654 if size:
655 655 ft = _fileappend
656 656 if rl_type & store.FILEFLAGS_VOLATILE:
@@ -8,6 +8,7 b' from . import ('
8 8 error,
9 9 hg,
10 10 lock as lockmod,
11 logcmdutil,
11 12 mergestate as mergestatemod,
12 13 pycompat,
13 14 registrar,
@@ -178,7 +179,7 b' def debugstrip(ui, repo, *revs, **opts):'
178 179
179 180 cl = repo.changelog
180 181 revs = list(revs) + opts.get(b'rev')
181 revs = set(scmutil.revrange(repo, revs))
182 revs = set(logcmdutil.revrange(repo, revs))
182 183
183 184 with repo.wlock():
184 185 bookmarks = set(opts.get(b'bookmark'))
@@ -255,7 +256,9 b' def debugstrip(ui, repo, *revs, **opts):'
255 256
256 257 # reset files that only changed in the dirstate too
257 258 dirstate = repo.dirstate
258 dirchanges = [f for f in dirstate if dirstate[f] != b'n']
259 dirchanges = [
260 f for f in dirstate if not dirstate.get_entry(f).maybe_clean
261 ]
259 262 changedfiles.extend(dirchanges)
260 263
261 264 repo.dirstate.rebuild(urev, uctx.manifest(), changedfiles)
@@ -178,7 +178,9 b' class dirstatev2(requirementformatvarian'
178 178
179 179 description = _(
180 180 b'version 1 of the dirstate file format requires '
181 b'reading and parsing it all at once.'
181 b'reading and parsing it all at once.\n'
182 b'Version 2 has a better structure,'
183 b'better information and lighter update mechanism'
182 184 )
183 185
184 186 upgrademessage = _(b'"hg status" will be faster')
@@ -201,7 +201,7 b' def _clonerevlogs('
201 201
202 202 # Perform a pass to collect metadata. This validates we can open all
203 203 # source files and allows a unified progress bar to be displayed.
204 for rl_type, unencoded, encoded, size in alldatafiles:
204 for rl_type, unencoded, size in alldatafiles:
205 205 if not rl_type & store.FILEFLAGS_REVLOG_MAIN:
206 206 continue
207 207
@@ -638,7 +638,6 b' def upgrade_dirstate(ui, srcrepo, upgrad'
638 638 )
639 639
640 640 assert srcrepo.dirstate._use_dirstate_v2 == (old == b'v2')
641 srcrepo.dirstate._map._use_dirstate_tree = True
642 641 srcrepo.dirstate._map.preload()
643 642 srcrepo.dirstate._use_dirstate_v2 = new == b'v2'
644 643 srcrepo.dirstate._map._use_dirstate_v2 = srcrepo.dirstate._use_dirstate_v2
@@ -449,8 +449,8 b' def mmapread(fp, size=None):'
449 449 return b''
450 450 elif size is None:
451 451 size = 0
452 fd = getattr(fp, 'fileno', lambda: fp)()
452 453 try:
453 fd = getattr(fp, 'fileno', lambda: fp)()
454 454 return mmap.mmap(fd, size, access=mmap.ACCESS_READ)
455 455 except ValueError:
456 456 # Empty files cannot be mmapped, but mmapread should still work. Check
@@ -1225,6 +1225,8 b' def versiontuple(v=None, n=4):'
1225 1225 if n == 4:
1226 1226 return (vints[0], vints[1], vints[2], extra)
1227 1227
1228 raise error.ProgrammingError(b"invalid version part request: %d" % n)
1229
1228 1230
1229 1231 def cachefunc(func):
1230 1232 '''cache the result of function calls'''
@@ -57,30 +57,11 b' else:'
57 57 try:
58 58 # importlib.resources exists from Python 3.7; see fallback in except clause
59 59 # further down
60 from importlib import resources
61
62 from .. import encoding
60 from importlib import resources # pytype: disable=import-error
63 61
64 62 # Force loading of the resources module
65 63 resources.open_binary # pytype: disable=module-attr
66 64
67 def open_resource(package, name):
68 return resources.open_binary( # pytype: disable=module-attr
69 pycompat.sysstr(package), pycompat.sysstr(name)
70 )
71
72 def is_resource(package, name):
73 return resources.is_resource( # pytype: disable=module-attr
74 pycompat.sysstr(package), encoding.strfromlocal(name)
75 )
76
77 def contents(package):
78 # pytype: disable=module-attr
79 for r in resources.contents(pycompat.sysstr(package)):
80 # pytype: enable=module-attr
81 yield encoding.strtolocal(r)
82
83
84 65 except (ImportError, AttributeError):
85 66 # importlib.resources was not found (almost definitely because we're on a
86 67 # Python version before 3.7)
@@ -102,3 +83,23 b' except (ImportError, AttributeError):'
102 83
103 84 for p in os.listdir(path):
104 85 yield pycompat.fsencode(p)
86
87
88 else:
89 from .. import encoding
90
91 def open_resource(package, name):
92 return resources.open_binary( # pytype: disable=module-attr
93 pycompat.sysstr(package), pycompat.sysstr(name)
94 )
95
96 def is_resource(package, name):
97 return resources.is_resource( # pytype: disable=module-attr
98 pycompat.sysstr(package), encoding.strfromlocal(name)
99 )
100
101 def contents(package):
102 # pytype: disable=module-attr
103 for r in resources.contents(pycompat.sysstr(package)):
104 # pytype: enable=module-attr
105 yield encoding.strtolocal(r)
@@ -503,22 +503,17 b' def get_push_paths(repo, ui, dests):'
503 503 yield path
504 504
505 505
506 def get_pull_paths(repo, ui, sources, default_branches=()):
506 def get_pull_paths(repo, ui, sources):
507 507 """yields all the `(path, branch)` selected as pull source by `sources`"""
508 508 if not sources:
509 509 sources = [b'default']
510 510 for source in sources:
511 511 if source in ui.paths:
512 512 for p in ui.paths[source]:
513 yield parseurl(p.rawloc, default_branches)
513 yield p
514 514 else:
515 # Try to resolve as a local path or URI.
516 path = try_path(ui, source)
517 if path is not None:
518 url = path.rawloc
519 else:
520 url = source
521 yield parseurl(url, default_branches)
515 p = path(ui, None, source, validate_path=False)
516 yield p
522 517
523 518
524 519 def get_unique_push_path(action, repo, ui, dest=None):
@@ -771,6 +766,28 b' def pushrevpathoption(ui, path, value):'
771 766 return value
772 767
773 768
769 SUPPORTED_BOOKMARKS_MODES = {
770 b'default',
771 b'mirror',
772 b'ignore',
773 }
774
775
776 @pathsuboption(b'bookmarks.mode', b'bookmarks_mode')
777 def bookmarks_mode_option(ui, path, value):
778 if value not in SUPPORTED_BOOKMARKS_MODES:
779 path_name = path.name
780 if path_name is None:
781 # this is an "anonymous" path, config comes from the global one
782 path_name = b'*'
783 msg = _(b'(paths.%s:bookmarks.mode has unknown value: "%s")\n')
784 msg %= (path_name, value)
785 ui.warn(msg)
786 if value == b'default':
787 value = None
788 return value
789
790
774 791 @pathsuboption(b'multi-urls', b'multi_urls')
775 792 def multiurls_pathoption(ui, path, value):
776 793 res = stringutil.parsebool(value)
@@ -818,7 +835,14 b' def _chain_path(base_path, ui, paths):'
818 835 class path(object):
819 836 """Represents an individual path and its configuration."""
820 837
821 def __init__(self, ui=None, name=None, rawloc=None, suboptions=None):
838 def __init__(
839 self,
840 ui=None,
841 name=None,
842 rawloc=None,
843 suboptions=None,
844 validate_path=True,
845 ):
822 846 """Construct a path from its config options.
823 847
824 848 ``ui`` is the ``ui`` instance the path is coming from.
@@ -856,7 +880,8 b' class path(object):'
856 880 self.rawloc = rawloc
857 881 self.loc = b'%s' % u
858 882
859 self._validate_path()
883 if validate_path:
884 self._validate_path()
860 885
861 886 _path, sub_opts = ui.configsuboptions(b'paths', b'*')
862 887 self._own_sub_opts = {}
@@ -395,12 +395,13 b' class verifier(object):'
395 395 storefiles = set()
396 396 subdirs = set()
397 397 revlogv1 = self.revlogv1
398 for t, f, f2, size in repo.store.datafiles():
399 if not f:
400 self._err(None, _(b"cannot decode filename '%s'") % f2)
401 elif (size > 0 or not revlogv1) and f.startswith(b'meta/'):
398 undecodable = []
399 for t, f, size in repo.store.datafiles(undecodable=undecodable):
400 if (size > 0 or not revlogv1) and f.startswith(b'meta/'):
402 401 storefiles.add(_normpath(f))
403 402 subdirs.add(os.path.dirname(f))
403 for f in undecodable:
404 self._err(None, _(b"cannot decode filename '%s'") % f)
404 405 subdirprogress = ui.makeprogress(
405 406 _(b'checking'), unit=_(b'manifests'), total=len(subdirs)
406 407 )
@@ -459,11 +460,12 b' class verifier(object):'
459 460 ui.status(_(b"checking files\n"))
460 461
461 462 storefiles = set()
462 for rl_type, f, f2, size in repo.store.datafiles():
463 if not f:
464 self._err(None, _(b"cannot decode filename '%s'") % f2)
465 elif (size > 0 or not revlogv1) and f.startswith(b'data/'):
463 undecodable = []
464 for t, f, size in repo.store.datafiles(undecodable=undecodable):
465 if (size > 0 or not revlogv1) and f.startswith(b'data/'):
466 466 storefiles.add(_normpath(f))
467 for f in undecodable:
468 self._err(None, _(b"cannot decode filename '%s'") % f)
467 469
468 470 state = {
469 471 # TODO this assumes revlog storage for changelog.
@@ -175,7 +175,7 b" def posixfile(name, mode=b'r', buffering"
175 175 return mixedfilemodewrapper(fp)
176 176
177 177 return fp
178 except WindowsError as err:
178 except WindowsError as err: # pytype: disable=name-error
179 179 # convert to a friendlier exception
180 180 raise IOError(
181 181 err.errno, '%s: %s' % (encoding.strfromlocal(name), err.strerror)
@@ -44,13 +44,9 b' def batchable(f):'
44 44 def sample(self, one, two=None):
45 45 # Build list of encoded arguments suitable for your wire protocol:
46 46 encoded_args = [('one', encode(one),), ('two', encode(two),)]
47 # Create future for injection of encoded result:
48 encoded_res_future = future()
49 # Return encoded arguments and future:
50 yield encoded_args, encoded_res_future
51 # Assuming the future to be filled with the result from the batched
52 # request now. Decode it:
53 yield decode(encoded_res_future.value)
47 # Return it, along with a function that will receive the result
48 # from the batched request.
49 return encoded_args, decode
54 50
55 51 The decorator returns a function which wraps this coroutine as a plain
56 52 method, but adds the original method as an attribute called "batchable",
@@ -59,29 +55,19 b' def batchable(f):'
59 55 """
60 56
61 57 def plain(*args, **opts):
62 batchable = f(*args, **opts)
63 encoded_args_or_res, encoded_res_future = next(batchable)
64 if not encoded_res_future:
58 encoded_args_or_res, decode = f(*args, **opts)
59 if not decode:
65 60 return encoded_args_or_res # a local result in this case
66 61 self = args[0]
67 62 cmd = pycompat.bytesurl(f.__name__) # ensure cmd is ascii bytestr
68 encoded_res_future.set(self._submitone(cmd, encoded_args_or_res))
69 return next(batchable)
63 encoded_res = self._submitone(cmd, encoded_args_or_res)
64 return decode(encoded_res)
70 65
71 66 setattr(plain, 'batchable', f)
72 67 setattr(plain, '__name__', f.__name__)
73 68 return plain
74 69
75 70
76 class future(object):
77 '''placeholder for a value to be set later'''
78
79 def set(self, value):
80 if util.safehasattr(self, b'value'):
81 raise error.RepoError(b"future is already set")
82 self.value = value
83
84
85 71 def encodebatchcmds(req):
86 72 """Return a ``cmds`` argument value for the ``batch`` command."""
87 73 escapearg = wireprototypes.escapebatcharg
@@ -248,25 +234,18 b' class peerexecutor(object):'
248 234 continue
249 235
250 236 try:
251 batchable = fn.batchable(
237 encoded_args_or_res, decode = fn.batchable(
252 238 fn.__self__, **pycompat.strkwargs(args)
253 239 )
254 240 except Exception:
255 241 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
256 242 return
257 243
258 # Encoded arguments and future holding remote result.
259 try:
260 encoded_args_or_res, fremote = next(batchable)
261 except Exception:
262 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
263 return
264
265 if not fremote:
244 if not decode:
266 245 f.set_result(encoded_args_or_res)
267 246 else:
268 247 requests.append((command, encoded_args_or_res))
269 states.append((command, f, batchable, fremote))
248 states.append((command, f, batchable, decode))
270 249
271 250 if not requests:
272 251 return
@@ -319,7 +298,7 b' class peerexecutor(object):'
319 298 def _readbatchresponse(self, states, wireresults):
320 299 # Executes in a thread to read data off the wire.
321 300
322 for command, f, batchable, fremote in states:
301 for command, f, batchable, decode in states:
323 302 # Grab raw result off the wire and teach the internal future
324 303 # about it.
325 304 try:
@@ -334,11 +313,8 b' class peerexecutor(object):'
334 313 )
335 314 )
336 315 else:
337 fremote.set(remoteresult)
338
339 # And ask the coroutine to decode that value.
340 316 try:
341 result = next(batchable)
317 result = decode(remoteresult)
342 318 except Exception:
343 319 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
344 320 else:
@@ -369,87 +345,90 b' class wirepeer(repository.peer):'
369 345 @batchable
370 346 def lookup(self, key):
371 347 self.requirecap(b'lookup', _(b'look up remote revision'))
372 f = future()
373 yield {b'key': encoding.fromlocal(key)}, f
374 d = f.value
375 success, data = d[:-1].split(b" ", 1)
376 if int(success):
377 yield bin(data)
378 else:
379 self._abort(error.RepoError(data))
348
349 def decode(d):
350 success, data = d[:-1].split(b" ", 1)
351 if int(success):
352 return bin(data)
353 else:
354 self._abort(error.RepoError(data))
355
356 return {b'key': encoding.fromlocal(key)}, decode
380 357
381 358 @batchable
382 359 def heads(self):
383 f = future()
384 yield {}, f
385 d = f.value
386 try:
387 yield wireprototypes.decodelist(d[:-1])
388 except ValueError:
389 self._abort(error.ResponseError(_(b"unexpected response:"), d))
360 def decode(d):
361 try:
362 return wireprototypes.decodelist(d[:-1])
363 except ValueError:
364 self._abort(error.ResponseError(_(b"unexpected response:"), d))
365
366 return {}, decode
390 367
391 368 @batchable
392 369 def known(self, nodes):
393 f = future()
394 yield {b'nodes': wireprototypes.encodelist(nodes)}, f
395 d = f.value
396 try:
397 yield [bool(int(b)) for b in pycompat.iterbytestr(d)]
398 except ValueError:
399 self._abort(error.ResponseError(_(b"unexpected response:"), d))
370 def decode(d):
371 try:
372 return [bool(int(b)) for b in pycompat.iterbytestr(d)]
373 except ValueError:
374 self._abort(error.ResponseError(_(b"unexpected response:"), d))
375
376 return {b'nodes': wireprototypes.encodelist(nodes)}, decode
400 377
401 378 @batchable
402 379 def branchmap(self):
403 f = future()
404 yield {}, f
405 d = f.value
406 try:
407 branchmap = {}
408 for branchpart in d.splitlines():
409 branchname, branchheads = branchpart.split(b' ', 1)
410 branchname = encoding.tolocal(urlreq.unquote(branchname))
411 branchheads = wireprototypes.decodelist(branchheads)
412 branchmap[branchname] = branchheads
413 yield branchmap
414 except TypeError:
415 self._abort(error.ResponseError(_(b"unexpected response:"), d))
380 def decode(d):
381 try:
382 branchmap = {}
383 for branchpart in d.splitlines():
384 branchname, branchheads = branchpart.split(b' ', 1)
385 branchname = encoding.tolocal(urlreq.unquote(branchname))
386 branchheads = wireprototypes.decodelist(branchheads)
387 branchmap[branchname] = branchheads
388 return branchmap
389 except TypeError:
390 self._abort(error.ResponseError(_(b"unexpected response:"), d))
391
392 return {}, decode
416 393
417 394 @batchable
418 395 def listkeys(self, namespace):
419 396 if not self.capable(b'pushkey'):
420 yield {}, None
421 f = future()
397 return {}, None
422 398 self.ui.debug(b'preparing listkeys for "%s"\n' % namespace)
423 yield {b'namespace': encoding.fromlocal(namespace)}, f
424 d = f.value
425 self.ui.debug(
426 b'received listkey for "%s": %i bytes\n' % (namespace, len(d))
427 )
428 yield pushkeymod.decodekeys(d)
399
400 def decode(d):
401 self.ui.debug(
402 b'received listkey for "%s": %i bytes\n' % (namespace, len(d))
403 )
404 return pushkeymod.decodekeys(d)
405
406 return {b'namespace': encoding.fromlocal(namespace)}, decode
429 407
430 408 @batchable
431 409 def pushkey(self, namespace, key, old, new):
432 410 if not self.capable(b'pushkey'):
433 yield False, None
434 f = future()
411 return False, None
435 412 self.ui.debug(b'preparing pushkey for "%s:%s"\n' % (namespace, key))
436 yield {
413
414 def decode(d):
415 d, output = d.split(b'\n', 1)
416 try:
417 d = bool(int(d))
418 except ValueError:
419 raise error.ResponseError(
420 _(b'push failed (unexpected response):'), d
421 )
422 for l in output.splitlines(True):
423 self.ui.status(_(b'remote: '), l)
424 return d
425
426 return {
437 427 b'namespace': encoding.fromlocal(namespace),
438 428 b'key': encoding.fromlocal(key),
439 429 b'old': encoding.fromlocal(old),
440 430 b'new': encoding.fromlocal(new),
441 }, f
442 d = f.value
443 d, output = d.split(b'\n', 1)
444 try:
445 d = bool(int(d))
446 except ValueError:
447 raise error.ResponseError(
448 _(b'push failed (unexpected response):'), d
449 )
450 for l in output.splitlines(True):
451 self.ui.status(_(b'remote: '), l)
452 yield d
431 }, decode
453 432
454 433 def stream_out(self):
455 434 return self._callstream(b'stream_out')
@@ -1579,7 +1579,7 b' def rawstorefiledata(repo, proto, files,'
1579 1579
1580 1580 # TODO this is a bunch of storage layer interface abstractions because
1581 1581 # it assumes revlogs.
1582 for rl_type, name, encodedname, size in topfiles:
1582 for rl_type, name, size in topfiles:
1583 1583 # XXX use the `rl_type` for that
1584 1584 if b'changelog' in files and name.startswith(b'00changelog'):
1585 1585 pass
@@ -1,26 +1,16 b''
1 1 == New Features ==
2 * `debugrebuildfncache` now has an option to rebuild only the index files
3 2
4 3
5 4 == Default Format Change ==
6 5
7 6 These changes affects newly created repositories (or new clone) done with
8 Mercurial 6.0.
7 Mercurial XXX.
9 8
10 9
11 10 == New Experimental Features ==
12 11
13 * Added a new `web.full-garbage-collection-rate` to control performance. See
14 de2e04fe4897a554b9ef433167f11ea4feb2e09c for more information
15
16 12 == Bug Fixes ==
17 13
18 * `hg fix --working-dir` now correctly works when in an uncommitted merge state
19 * `rhg` (Rust fast-path for `hg`) now supports the full config list syntax
20 * `rhg` now parses some corner-cases for revsets correctly
21 * `hg email -o` now works again when not mentioning a revision
22 * Lots of Windows fixes
23 * Lots of miscellaneous other fixes
24 14
25 15 == Backwards Compatibility Changes ==
26 16
@@ -29,15 +19,4 b' Mercurial 6.0.'
29 19
30 20 The following functions have been removed:
31 21
32 * `dirstate.normal`
33 * `dirstate.normallookup`
34 * `dirstate.otherparent`
35 * `dirstate.add`
36 * `dirstate.remove`
37 * `dirstate.drop`
38 * `dirstate.__getitem__`
39
40 22 Miscellaneous:
41
42 * `wireprotov1peer`'s `batchable` is now a simple function and not a generator
43 anymore No newline at end of file
@@ -157,9 +157,9 b' dependencies = ['
157 157
158 158 [[package]]
159 159 name = "cpython"
160 version = "0.6.0"
160 version = "0.7.0"
161 161 source = "registry+https://github.com/rust-lang/crates.io-index"
162 checksum = "8094679a4e9bfc8035572162624bc800eda35b5f9eff2537b9cd9aacc3d9782e"
162 checksum = "b7d46ba8ace7f3a1d204ac5060a706d0a68de6b42eafb6a586cc08bebcffe664"
163 163 dependencies = [
164 164 "libc",
165 165 "num-traits",
@@ -374,6 +374,7 b' dependencies = ['
374 374 name = "hg-core"
375 375 version = "0.1.0"
376 376 dependencies = [
377 "bitflags",
377 378 "byteorder",
378 379 "bytes-cast",
379 380 "clap",
@@ -385,8 +386,9 b' dependencies = ['
385 386 "im-rc",
386 387 "itertools",
387 388 "lazy_static",
389 "libc",
388 390 "log",
389 "memmap",
391 "memmap2",
390 392 "micro-timer",
391 393 "pretty_assertions",
392 394 "rand",
@@ -396,6 +398,7 b' dependencies = ['
396 398 "regex",
397 399 "same-file",
398 400 "sha-1",
401 "stable_deref_trait",
399 402 "tempfile",
400 403 "twox-hash",
401 404 "zstd",
@@ -411,6 +414,7 b' dependencies = ['
411 414 "hg-core",
412 415 "libc",
413 416 "log",
417 "stable_deref_trait",
414 418 ]
415 419
416 420 [[package]]
@@ -508,13 +512,13 b' source = "registry+https://github.com/ru'
508 512 checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525"
509 513
510 514 [[package]]
511 name = "memmap"
512 version = "0.7.0"
515 name = "memmap2"
516 version = "0.4.0"
513 517 source = "registry+https://github.com/rust-lang/crates.io-index"
514 checksum = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b"
518 checksum = "de5d3112c080d58ce560081baeaab7e1e864ca21795ddbf533d5b1842bb1ecf8"
515 519 dependencies = [
516 520 "libc",
517 "winapi",
521 "stable_deref_trait",
518 522 ]
519 523
520 524 [[package]]
@@ -649,9 +653,9 b' dependencies = ['
649 653
650 654 [[package]]
651 655 name = "python27-sys"
652 version = "0.6.0"
656 version = "0.7.0"
653 657 source = "registry+https://github.com/rust-lang/crates.io-index"
654 checksum = "5826ddbc5366eb0b0492040fdc25bf50bb49092c192bd45e80fb7a24dc6832ab"
658 checksum = "94670354e264300dde81a5864cbb6bfc9d56ac3dcf3a278c32cb52f816f4dfd1"
655 659 dependencies = [
656 660 "libc",
657 661 "regex",
@@ -659,9 +663,9 b' dependencies = ['
659 663
660 664 [[package]]
661 665 name = "python3-sys"
662 version = "0.6.0"
666 version = "0.7.0"
663 667 source = "registry+https://github.com/rust-lang/crates.io-index"
664 checksum = "b78af21b29594951a47fc3dac9b9eff0a3f077dec2f780ee943ae16a668f3b6a"
668 checksum = "b18b32e64c103d5045f44644d7ddddd65336f7a0521f6fde673240a9ecceb77e"
665 669 dependencies = [
666 670 "libc",
667 671 "regex",
@@ -865,6 +869,12 b' dependencies = ['
865 869 ]
866 870
867 871 [[package]]
872 name = "stable_deref_trait"
873 version = "1.2.0"
874 source = "registry+https://github.com/rust-lang/crates.io-index"
875 checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3"
876
877 [[package]]
868 878 name = "static_assertions"
869 879 version = "1.1.0"
870 880 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -74,8 +74,8 b' Example usage:'
74 74 Developing Rust
75 75 ===============
76 76
77 The current version of Rust in use is ``1.41.1``, because it's what Debian
78 stable has. You can use ``rustup override set 1.41.1`` at the root of the repo
77 The current version of Rust in use is ``1.48.0``, because it's what Debian
78 stable has. You can use ``rustup override set 1.48.0`` at the root of the repo
79 79 to make it easier on you.
80 80
81 81 Go to the ``hg-cpython`` folder::
@@ -9,6 +9,7 b' edition = "2018"'
9 9 name = "hg"
10 10
11 11 [dependencies]
12 bitflags = "1.2"
12 13 bytes-cast = "0.2"
13 14 byteorder = "1.3.4"
14 15 derive_more = "0.99"
@@ -16,6 +17,7 b' home = "0.5"'
16 17 im-rc = "15.0.*"
17 18 itertools = "0.9"
18 19 lazy_static = "1.4.0"
20 libc = "0.2"
19 21 rand = "0.7.3"
20 22 rand_pcg = "0.2.1"
21 23 rand_distr = "0.2.2"
@@ -24,11 +26,12 b' regex = "1.3.9"'
24 26 sha-1 = "0.9.6"
25 27 twox-hash = "1.5.0"
26 28 same-file = "1.0.6"
29 stable_deref_trait = "1.2.0"
27 30 tempfile = "3.1.0"
28 31 crossbeam-channel = "0.4"
29 32 micro-timer = "0.3.0"
30 33 log = "0.4.8"
31 memmap = "0.7.0"
34 memmap2 = {version = "0.4", features = ["stable_deref_trait"]}
32 35 zstd = "0.5.3"
33 36 format-bytes = "0.2.2"
34 37
@@ -5,7 +5,7 b''
5 5
6 6 //! Minimal `RevlogIndex`, readable from standard Mercurial file format
7 7 use hg::*;
8 use memmap::*;
8 use memmap2::*;
9 9 use std::fs::File;
10 10 use std::ops::Deref;
11 11 use std::path::Path;
@@ -7,7 +7,7 b' use clap::*;'
7 7 use hg::revlog::node::*;
8 8 use hg::revlog::nodemap::*;
9 9 use hg::revlog::*;
10 use memmap::MmapOptions;
10 use memmap2::MmapOptions;
11 11 use rand::Rng;
12 12 use std::fs::File;
13 13 use std::io;
@@ -13,7 +13,6 b' use crate::config::layer::{'
13 13 ConfigError, ConfigLayer, ConfigOrigin, ConfigValue,
14 14 };
15 15 use crate::utils::files::get_bytes_from_os_str;
16 use crate::utils::SliceExt;
17 16 use format_bytes::{write_bytes, DisplayBytes};
18 17 use std::collections::HashSet;
19 18 use std::env;
@@ -362,30 +361,14 b' impl Config {'
362 361 Ok(self.get_option(section, item)?.unwrap_or(false))
363 362 }
364 363
365 /// Returns the corresponding list-value in the config if found, or `None`.
366 ///
367 /// This is appropriate for new configuration keys. The value syntax is
368 /// **not** the same as most existing list-valued config, which has Python
369 /// parsing implemented in `parselist()` in
370 /// `mercurial/utils/stringutil.py`. Faithfully porting that parsing
371 /// algorithm to Rust (including behavior that are arguably bugs)
372 /// turned out to be non-trivial and hasn’t been completed as of this
373 /// writing.
374 ///
375 /// Instead, the "simple" syntax is: split on comma, then trim leading and
376 /// trailing whitespace of each component. Quotes or backslashes are not
377 /// interpreted in any way. Commas are mandatory between values. Values
378 /// that contain a comma are not supported.
379 pub fn get_simple_list(
364 /// If there is an `item` value in `section`, parse and return a list of
365 /// byte strings.
366 pub fn get_list(
380 367 &self,
381 368 section: &[u8],
382 369 item: &[u8],
383 ) -> Option<impl Iterator<Item = &[u8]>> {
384 self.get(section, item).map(|value| {
385 value
386 .split(|&byte| byte == b',')
387 .map(|component| component.trim())
388 })
370 ) -> Option<Vec<Vec<u8>>> {
371 self.get(section, item).map(values::parse_list)
389 372 }
390 373
391 374 /// Returns the raw value bytes of the first one found, or `None`.
@@ -8,6 +8,8 b''
8 8 //! details about where the value came from (but omits details of what’s
9 9 //! invalid inside the value).
10 10
11 use crate::utils::SliceExt;
12
11 13 pub(super) fn parse_bool(v: &[u8]) -> Option<bool> {
12 14 match v.to_ascii_lowercase().as_slice() {
13 15 b"1" | b"yes" | b"true" | b"on" | b"always" => Some(true),
@@ -42,6 +44,216 b' pub(super) fn parse_byte_size(value: &[u'
42 44 value.parse().ok()
43 45 }
44 46
47 /// Parse a config value as a list of sub-values.
48 ///
49 /// Ported from `parselist` in `mercurial/utils/stringutil.py`
50
51 // Note: keep behavior in sync with the Python one.
52
53 // Note: this could return `Vec<Cow<[u8]>>` instead and borrow `input` when
54 // possible (when there’s no backslash-escapes) but this is probably not worth
55 // the complexity as config is presumably not accessed inside
56 // preformance-sensitive loops.
57 pub(super) fn parse_list(input: &[u8]) -> Vec<Vec<u8>> {
58 // Port of Python’s `value.lstrip(b' ,\n')`
59 // TODO: is this really what we want?
60 let input =
61 input.trim_start_matches(|b| b == b' ' || b == b',' || b == b'\n');
62 parse_list_without_trim_start(input)
63 }
64
65 fn parse_list_without_trim_start(input: &[u8]) -> Vec<Vec<u8>> {
66 // Start of port of Python’s `_configlist`
67 let input = input.trim_end_matches(|b| b == b' ' || b == b',');
68 if input.is_empty() {
69 return Vec::new();
70 }
71
72 // Just to make β€œa string” less confusable with β€œa list of strings”.
73 type ByteString = Vec<u8>;
74
75 // These correspond to Python’s…
76 let mut mode = ParserMode::Plain; // `parser`
77 let mut values = Vec::new(); // `parts[:-1]`
78 let mut next_value = ByteString::new(); // `parts[-1]`
79 let mut offset = 0; // `offset`
80
81 // Setting `parser` to `None` is instead handled by returning immediately
82 enum ParserMode {
83 Plain,
84 Quoted,
85 }
86
87 loop {
88 match mode {
89 ParserMode::Plain => {
90 // Start of port of Python’s `_parse_plain`
91 let mut whitespace = false;
92 while let Some(&byte) = input.get(offset) {
93 if is_space(byte) || byte == b',' {
94 whitespace = true;
95 offset += 1;
96 } else {
97 break;
98 }
99 }
100 if let Some(&byte) = input.get(offset) {
101 if whitespace {
102 values.push(std::mem::take(&mut next_value))
103 }
104 if byte == b'"' && next_value.is_empty() {
105 mode = ParserMode::Quoted;
106 } else {
107 if byte == b'"' && next_value.ends_with(b"\\") {
108 next_value.pop();
109 }
110 next_value.push(byte);
111 }
112 offset += 1;
113 } else {
114 values.push(next_value);
115 return values;
116 }
117 }
118 ParserMode::Quoted => {
119 // Start of port of Python’s `_parse_quote`
120 if let Some(&byte) = input.get(offset) {
121 if byte == b'"' {
122 // The input contains a quoted zero-length value `""`
123 debug_assert_eq!(next_value, b"");
124 values.push(std::mem::take(&mut next_value));
125 offset += 1;
126 while let Some(&byte) = input.get(offset) {
127 if is_space(byte) || byte == b',' {
128 offset += 1;
129 } else {
130 break;
131 }
132 }
133 mode = ParserMode::Plain;
134 continue;
135 }
136 }
137
138 while let Some(&byte) = input.get(offset) {
139 if byte == b'"' {
140 break;
141 }
142 if byte == b'\\' && input.get(offset + 1) == Some(&b'"') {
143 next_value.push(b'"');
144 offset += 2;
145 } else {
146 next_value.push(byte);
147 offset += 1;
148 }
149 }
150
151 if offset >= input.len() {
152 // We didn’t find a closing double-quote,
153 // so treat the opening one as part of an unquoted value
154 // instead of delimiting the start of a quoted value.
155
156 // `next_value` may have had some backslash-escapes
157 // unescaped. TODO: shouldn’t we use a slice of `input`
158 // instead?
159 let mut real_values =
160 parse_list_without_trim_start(&next_value);
161
162 if let Some(first) = real_values.first_mut() {
163 first.insert(0, b'"');
164 // Drop `next_value`
165 values.extend(real_values)
166 } else {
167 next_value.push(b'"');
168 values.push(next_value);
169 }
170 return values;
171 }
172
173 // We’re not at the end of the input, which means the `while`
174 // loop above ended at at double quote. Skip
175 // over that.
176 offset += 1;
177
178 while let Some(&byte) = input.get(offset) {
179 if byte == b' ' || byte == b',' {
180 offset += 1;
181 } else {
182 break;
183 }
184 }
185
186 if offset >= input.len() {
187 values.push(next_value);
188 return values;
189 }
190
191 if offset + 1 == input.len() && input[offset] == b'"' {
192 next_value.push(b'"');
193 offset += 1;
194 } else {
195 values.push(std::mem::take(&mut next_value));
196 }
197
198 mode = ParserMode::Plain;
199 }
200 }
201 }
202
203 // https://docs.python.org/3/library/stdtypes.html?#bytes.isspace
204 fn is_space(byte: u8) -> bool {
205 if let b' ' | b'\t' | b'\n' | b'\r' | b'\x0b' | b'\x0c' = byte {
206 true
207 } else {
208 false
209 }
210 }
211 }
212
213 #[test]
214 fn test_parse_list() {
215 // Make `assert_eq` error messages nicer
216 fn as_strings(values: &[Vec<u8>]) -> Vec<String> {
217 values
218 .iter()
219 .map(|v| std::str::from_utf8(v.as_ref()).unwrap().to_owned())
220 .collect()
221 }
222 macro_rules! assert_parse_list {
223 ( $input: expr => [ $( $output: expr ),* ] ) => {
224 assert_eq!(
225 as_strings(&parse_list($input)),
226 as_strings(&[ $( Vec::from(&$output[..]) ),* ]),
227 );
228 }
229 }
230
231 // Keep these Rust tests in sync with the Python ones in
232 // `tests/test-config-parselist.py`
233 assert_parse_list!(b"" => []);
234 assert_parse_list!(b"," => []);
235 assert_parse_list!(b"A" => [b"A"]);
236 assert_parse_list!(b"B,B" => [b"B", b"B"]);
237 assert_parse_list!(b", C, ,C," => [b"C", b"C"]);
238 assert_parse_list!(b"\"" => [b"\""]);
239 assert_parse_list!(b"\"\"" => [b"", b""]);
240 assert_parse_list!(b"D,\"" => [b"D", b"\""]);
241 assert_parse_list!(b"E,\"\"" => [b"E", b"", b""]);
242 assert_parse_list!(b"\"F,F\"" => [b"F,F"]);
243 assert_parse_list!(b"\"G,G" => [b"\"G", b"G"]);
244 assert_parse_list!(b"\"H \\\",\\\"H" => [b"\"H", b",", b"H"]);
245 assert_parse_list!(b"I,I\"" => [b"I", b"I\""]);
246 assert_parse_list!(b"J,\"J" => [b"J", b"\"J"]);
247 assert_parse_list!(b"K K" => [b"K", b"K"]);
248 assert_parse_list!(b"\"K\" K" => [b"K", b"K"]);
249 assert_parse_list!(b"L\tL" => [b"L", b"L"]);
250 assert_parse_list!(b"\"L\"\tL" => [b"L", b"", b"L"]);
251 assert_parse_list!(b"M\x0bM" => [b"M", b"M"]);
252 assert_parse_list!(b"\"M\"\x0bM" => [b"M", b"", b"M"]);
253 assert_parse_list!(b"\"N\" , ,\"" => [b"N\""]);
254 assert_parse_list!(b"\" ,O, " => [b"\"", b"O"]);
255 }
256
45 257 #[test]
46 258 fn test_parse_byte_size() {
47 259 assert_eq!(parse_byte_size(b""), None);
@@ -6,20 +6,19 b''
6 6 // GNU General Public License version 2 or any later version.
7 7
8 8 use crate::dirstate_tree::on_disk::DirstateV2ParseError;
9 use crate::errors::HgError;
10 9 use crate::revlog::node::NULL_NODE;
11 10 use crate::revlog::Node;
12 use crate::utils::hg_path::{HgPath, HgPathBuf};
13 use crate::FastHashMap;
14 use bytes_cast::{unaligned, BytesCast};
15 use std::convert::TryFrom;
11 use crate::utils::hg_path::HgPath;
12 use bytes_cast::BytesCast;
16 13
17 14 pub mod dirs_multiset;
18 pub mod dirstate_map;
15 pub mod entry;
19 16 pub mod parsers;
20 17 pub mod status;
21 18
22 #[derive(Debug, PartialEq, Clone, BytesCast)]
19 pub use self::entry::*;
20
21 #[derive(Debug, PartialEq, Copy, Clone, BytesCast)]
23 22 #[repr(C)]
24 23 pub struct DirstateParents {
25 24 pub p1: Node,
@@ -33,69 +32,6 b' impl DirstateParents {'
33 32 };
34 33 }
35 34
36 /// The C implementation uses all signed types. This will be an issue
37 /// either when 4GB+ source files are commonplace or in 2038, whichever
38 /// comes first.
39 #[derive(Debug, PartialEq, Copy, Clone)]
40 pub struct DirstateEntry {
41 pub state: EntryState,
42 pub mode: i32,
43 pub mtime: i32,
44 pub size: i32,
45 }
46
47 impl DirstateEntry {
48 pub fn is_non_normal(&self) -> bool {
49 self.state != EntryState::Normal || self.mtime == MTIME_UNSET
50 }
51
52 pub fn is_from_other_parent(&self) -> bool {
53 self.state == EntryState::Normal && self.size == SIZE_FROM_OTHER_PARENT
54 }
55
56 // TODO: other platforms
57 #[cfg(unix)]
58 pub fn mode_changed(
59 &self,
60 filesystem_metadata: &std::fs::Metadata,
61 ) -> bool {
62 use std::os::unix::fs::MetadataExt;
63 const EXEC_BIT_MASK: u32 = 0o100;
64 let dirstate_exec_bit = (self.mode as u32) & EXEC_BIT_MASK;
65 let fs_exec_bit = filesystem_metadata.mode() & EXEC_BIT_MASK;
66 dirstate_exec_bit != fs_exec_bit
67 }
68
69 /// Returns a `(state, mode, size, mtime)` tuple as for
70 /// `DirstateMapMethods::debug_iter`.
71 pub fn debug_tuple(&self) -> (u8, i32, i32, i32) {
72 (self.state.into(), self.mode, self.size, self.mtime)
73 }
74 }
75
76 #[derive(BytesCast)]
77 #[repr(C)]
78 struct RawEntry {
79 state: u8,
80 mode: unaligned::I32Be,
81 size: unaligned::I32Be,
82 mtime: unaligned::I32Be,
83 length: unaligned::I32Be,
84 }
85
86 pub const V1_RANGEMASK: i32 = 0x7FFFFFFF;
87
88 pub const MTIME_UNSET: i32 = -1;
89
90 /// A `DirstateEntry` with a size of `-2` means that it was merged from the
91 /// other parent. This allows revert to pick the right status back during a
92 /// merge.
93 pub const SIZE_FROM_OTHER_PARENT: i32 = -2;
94 /// A special value used for internal representation of special case in
95 /// dirstate v1 format.
96 pub const SIZE_NON_NORMAL: i32 = -1;
97
98 pub type StateMap = FastHashMap<HgPathBuf, DirstateEntry>;
99 35 pub type StateMapIter<'a> = Box<
100 36 dyn Iterator<
101 37 Item = Result<(&'a HgPath, DirstateEntry), DirstateV2ParseError>,
@@ -103,58 +39,8 b" pub type StateMapIter<'a> = Box<"
103 39 + 'a,
104 40 >;
105 41
106 pub type CopyMap = FastHashMap<HgPathBuf, HgPathBuf>;
107 42 pub type CopyMapIter<'a> = Box<
108 43 dyn Iterator<Item = Result<(&'a HgPath, &'a HgPath), DirstateV2ParseError>>
109 44 + Send
110 45 + 'a,
111 46 >;
112
113 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
114 pub enum EntryState {
115 Normal,
116 Added,
117 Removed,
118 Merged,
119 Unknown,
120 }
121
122 impl EntryState {
123 pub fn is_tracked(self) -> bool {
124 use EntryState::*;
125 match self {
126 Normal | Added | Merged => true,
127 Removed | Unknown => false,
128 }
129 }
130 }
131
132 impl TryFrom<u8> for EntryState {
133 type Error = HgError;
134
135 fn try_from(value: u8) -> Result<Self, Self::Error> {
136 match value {
137 b'n' => Ok(EntryState::Normal),
138 b'a' => Ok(EntryState::Added),
139 b'r' => Ok(EntryState::Removed),
140 b'm' => Ok(EntryState::Merged),
141 b'?' => Ok(EntryState::Unknown),
142 _ => Err(HgError::CorruptedRepository(format!(
143 "Incorrect dirstate entry state {}",
144 value
145 ))),
146 }
147 }
148 }
149
150 impl Into<u8> for EntryState {
151 fn into(self) -> u8 {
152 match self {
153 EntryState::Normal => b'n',
154 EntryState::Added => b'a',
155 EntryState::Removed => b'r',
156 EntryState::Merged => b'm',
157 EntryState::Unknown => b'?',
158 }
159 }
160 }
@@ -33,7 +33,7 b' impl DirsMultiset {'
33 33 /// If `skip_state` is provided, skips dirstate entries with equal state.
34 34 pub fn from_dirstate<I, P>(
35 35 dirstate: I,
36 skip_state: Option<EntryState>,
36 only_tracked: bool,
37 37 ) -> Result<Self, DirstateError>
38 38 where
39 39 I: IntoIterator<
@@ -48,8 +48,8 b' impl DirsMultiset {'
48 48 let (filename, entry) = item?;
49 49 let filename = filename.as_ref();
50 50 // This `if` is optimized out of the loop
51 if let Some(skip) = skip_state {
52 if skip != entry.state {
51 if only_tracked {
52 if entry.state() != EntryState::Removed {
53 53 multiset.add_path(filename)?;
54 54 }
55 55 } else {
@@ -216,7 +216,6 b" impl<'a> DirsChildrenMultiset<'a> {"
216 216 #[cfg(test)]
217 217 mod tests {
218 218 use super::*;
219 use crate::StateMap;
220 219
221 220 #[test]
222 221 fn test_delete_path_path_not_found() {
@@ -341,9 +340,9 b' mod tests {'
341 340 };
342 341 assert_eq!(expected, new);
343 342
344 let new = DirsMultiset::from_dirstate(
345 StateMap::default().into_iter().map(Ok),
346 None,
343 let new = DirsMultiset::from_dirstate::<_, HgPathBuf>(
344 std::iter::empty(),
345 false,
347 346 )
348 347 .unwrap();
349 348 let expected = DirsMultiset {
@@ -372,12 +371,7 b' mod tests {'
372 371 let input_map = ["b/x", "a/c", "a/d/x"].iter().map(|f| {
373 372 Ok((
374 373 HgPathBuf::from_bytes(f.as_bytes()),
375 DirstateEntry {
376 state: EntryState::Normal,
377 mode: 0,
378 mtime: 0,
379 size: 0,
380 },
374 DirstateEntry::from_v1_data(EntryState::Normal, 0, 0, 0),
381 375 ))
382 376 });
383 377 let expected_inner = [("", 2), ("a", 2), ("b", 1), ("a/d", 1)]
@@ -385,7 +379,7 b' mod tests {'
385 379 .map(|(k, v)| (HgPathBuf::from_bytes(k.as_bytes()), *v))
386 380 .collect();
387 381
388 let new = DirsMultiset::from_dirstate(input_map, None).unwrap();
382 let new = DirsMultiset::from_dirstate(input_map, false).unwrap();
389 383 let expected = DirsMultiset {
390 384 inner: expected_inner,
391 385 };
@@ -404,24 +398,17 b' mod tests {'
404 398 .map(|(f, state)| {
405 399 Ok((
406 400 HgPathBuf::from_bytes(f.as_bytes()),
407 DirstateEntry {
408 state: *state,
409 mode: 0,
410 mtime: 0,
411 size: 0,
412 },
401 DirstateEntry::from_v1_data(*state, 0, 0, 0),
413 402 ))
414 403 });
415 404
416 405 // "a" incremented with "a/c" and "a/d/"
417 let expected_inner = [("", 1), ("a", 2)]
406 let expected_inner = [("", 1), ("a", 3)]
418 407 .iter()
419 408 .map(|(k, v)| (HgPathBuf::from_bytes(k.as_bytes()), *v))
420 409 .collect();
421 410
422 let new =
423 DirsMultiset::from_dirstate(input_map, Some(EntryState::Normal))
424 .unwrap();
411 let new = DirsMultiset::from_dirstate(input_map, true).unwrap();
425 412 let expected = DirsMultiset {
426 413 inner: expected_inner,
427 414 };
@@ -5,14 +5,11 b''
5 5
6 6 use crate::errors::HgError;
7 7 use crate::utils::hg_path::HgPath;
8 use crate::{
9 dirstate::{CopyMap, EntryState, RawEntry, StateMap},
10 DirstateEntry, DirstateParents,
11 };
8 use crate::{dirstate::EntryState, DirstateEntry, DirstateParents};
12 9 use byteorder::{BigEndian, WriteBytesExt};
13 use bytes_cast::BytesCast;
10 use bytes_cast::{unaligned, BytesCast};
14 11 use micro_timer::timed;
15 use std::convert::{TryFrom, TryInto};
12 use std::convert::TryFrom;
16 13
17 14 /// Parents are stored in the dirstate as byte hashes.
18 15 pub const PARENT_SIZE: usize = 20;
@@ -48,6 +45,16 b' pub fn parse_dirstate(contents: &[u8]) -'
48 45 Ok((parents, entries, copies))
49 46 }
50 47
48 #[derive(BytesCast)]
49 #[repr(C)]
50 struct RawEntry {
51 state: u8,
52 mode: unaligned::I32Be,
53 size: unaligned::I32Be,
54 mtime: unaligned::I32Be,
55 length: unaligned::I32Be,
56 }
57
51 58 pub fn parse_dirstate_entries<'a>(
52 59 mut contents: &'a [u8],
53 60 mut each_entry: impl FnMut(
@@ -63,12 +70,12 b" pub fn parse_dirstate_entries<'a>("
63 70 let (raw_entry, rest) = RawEntry::from_bytes(contents)
64 71 .map_err(|_| HgError::corrupted("Overflow in dirstate."))?;
65 72
66 let entry = DirstateEntry {
67 state: EntryState::try_from(raw_entry.state)?,
68 mode: raw_entry.mode.get(),
69 mtime: raw_entry.mtime.get(),
70 size: raw_entry.size.get(),
71 };
73 let entry = DirstateEntry::from_v1_data(
74 EntryState::try_from(raw_entry.state)?,
75 raw_entry.mode.get(),
76 raw_entry.size.get(),
77 raw_entry.mtime.get(),
78 );
72 79 let (paths, rest) =
73 80 u8::slice_from_bytes(rest, raw_entry.length.get() as usize)
74 81 .map_err(|_| HgError::corrupted("Overflow in dirstate."))?;
@@ -114,12 +121,13 b' pub fn pack_entry('
114 121 packed: &mut Vec<u8>,
115 122 ) {
116 123 let length = packed_filename_and_copy_source_size(filename, copy_source);
124 let (state, mode, size, mtime) = entry.v1_data();
117 125
118 126 // Unwrapping because `impl std::io::Write for Vec<u8>` never errors
119 packed.write_u8(entry.state.into()).unwrap();
120 packed.write_i32::<BigEndian>(entry.mode).unwrap();
121 packed.write_i32::<BigEndian>(entry.size).unwrap();
122 packed.write_i32::<BigEndian>(entry.mtime).unwrap();
127 packed.write_u8(state).unwrap();
128 packed.write_i32::<BigEndian>(mode).unwrap();
129 packed.write_i32::<BigEndian>(size).unwrap();
130 packed.write_i32::<BigEndian>(mtime).unwrap();
123 131 packed.write_i32::<BigEndian>(length as i32).unwrap();
124 132 packed.extend(filename.as_bytes());
125 133 if let Some(source) = copy_source {
@@ -127,363 +135,3 b' pub fn pack_entry('
127 135 packed.extend(source.as_bytes());
128 136 }
129 137 }
130
131 /// Seconds since the Unix epoch
132 pub struct Timestamp(pub i64);
133
134 impl DirstateEntry {
135 pub fn mtime_is_ambiguous(&self, now: i32) -> bool {
136 self.state == EntryState::Normal && self.mtime == now
137 }
138
139 pub fn clear_ambiguous_mtime(&mut self, now: i32) -> bool {
140 let ambiguous = self.mtime_is_ambiguous(now);
141 if ambiguous {
142 // The file was last modified "simultaneously" with the current
143 // write to dirstate (i.e. within the same second for file-
144 // systems with a granularity of 1 sec). This commonly happens
145 // for at least a couple of files on 'update'.
146 // The user could change the file without changing its size
147 // within the same second. Invalidate the file's mtime in
148 // dirstate, forcing future 'status' calls to compare the
149 // contents of the file if the size is the same. This prevents
150 // mistakenly treating such files as clean.
151 self.clear_mtime()
152 }
153 ambiguous
154 }
155
156 pub fn clear_mtime(&mut self) {
157 self.mtime = -1;
158 }
159 }
160
161 pub fn pack_dirstate(
162 state_map: &mut StateMap,
163 copy_map: &CopyMap,
164 parents: DirstateParents,
165 now: Timestamp,
166 ) -> Result<Vec<u8>, HgError> {
167 // TODO move away from i32 before 2038.
168 let now: i32 = now.0.try_into().expect("time overflow");
169
170 let expected_size: usize = state_map
171 .iter()
172 .map(|(filename, _)| {
173 packed_entry_size(filename, copy_map.get(filename).map(|p| &**p))
174 })
175 .sum();
176 let expected_size = expected_size + PARENT_SIZE * 2;
177
178 let mut packed = Vec::with_capacity(expected_size);
179
180 packed.extend(parents.p1.as_bytes());
181 packed.extend(parents.p2.as_bytes());
182
183 for (filename, entry) in state_map.iter_mut() {
184 entry.clear_ambiguous_mtime(now);
185 pack_entry(
186 filename,
187 entry,
188 copy_map.get(filename).map(|p| &**p),
189 &mut packed,
190 )
191 }
192
193 if packed.len() != expected_size {
194 return Err(HgError::CorruptedRepository(format!(
195 "bad dirstate size: {} != {}",
196 expected_size,
197 packed.len()
198 )));
199 }
200
201 Ok(packed)
202 }
203
204 #[cfg(test)]
205 mod tests {
206 use super::*;
207 use crate::{utils::hg_path::HgPathBuf, FastHashMap};
208 use pretty_assertions::assert_eq;
209
210 #[test]
211 fn test_pack_dirstate_empty() {
212 let mut state_map = StateMap::default();
213 let copymap = FastHashMap::default();
214 let parents = DirstateParents {
215 p1: b"12345678910111213141".into(),
216 p2: b"00000000000000000000".into(),
217 };
218 let now = Timestamp(15000000);
219 let expected = b"1234567891011121314100000000000000000000".to_vec();
220
221 assert_eq!(
222 expected,
223 pack_dirstate(&mut state_map, &copymap, parents, now).unwrap()
224 );
225
226 assert!(state_map.is_empty())
227 }
228 #[test]
229 fn test_pack_dirstate_one_entry() {
230 let expected_state_map: StateMap = [(
231 HgPathBuf::from_bytes(b"f1"),
232 DirstateEntry {
233 state: EntryState::Normal,
234 mode: 0o644,
235 size: 0,
236 mtime: 791231220,
237 },
238 )]
239 .iter()
240 .cloned()
241 .collect();
242 let mut state_map = expected_state_map.clone();
243
244 let copymap = FastHashMap::default();
245 let parents = DirstateParents {
246 p1: b"12345678910111213141".into(),
247 p2: b"00000000000000000000".into(),
248 };
249 let now = Timestamp(15000000);
250 let expected = [
251 49, 50, 51, 52, 53, 54, 55, 56, 57, 49, 48, 49, 49, 49, 50, 49,
252 51, 49, 52, 49, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
253 48, 48, 48, 48, 48, 48, 48, 48, 110, 0, 0, 1, 164, 0, 0, 0, 0, 47,
254 41, 58, 244, 0, 0, 0, 2, 102, 49,
255 ]
256 .to_vec();
257
258 assert_eq!(
259 expected,
260 pack_dirstate(&mut state_map, &copymap, parents, now).unwrap()
261 );
262
263 assert_eq!(expected_state_map, state_map);
264 }
265 #[test]
266 fn test_pack_dirstate_one_entry_with_copy() {
267 let expected_state_map: StateMap = [(
268 HgPathBuf::from_bytes(b"f1"),
269 DirstateEntry {
270 state: EntryState::Normal,
271 mode: 0o644,
272 size: 0,
273 mtime: 791231220,
274 },
275 )]
276 .iter()
277 .cloned()
278 .collect();
279 let mut state_map = expected_state_map.clone();
280 let mut copymap = FastHashMap::default();
281 copymap.insert(
282 HgPathBuf::from_bytes(b"f1"),
283 HgPathBuf::from_bytes(b"copyname"),
284 );
285 let parents = DirstateParents {
286 p1: b"12345678910111213141".into(),
287 p2: b"00000000000000000000".into(),
288 };
289 let now = Timestamp(15000000);
290 let expected = [
291 49, 50, 51, 52, 53, 54, 55, 56, 57, 49, 48, 49, 49, 49, 50, 49,
292 51, 49, 52, 49, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
293 48, 48, 48, 48, 48, 48, 48, 48, 110, 0, 0, 1, 164, 0, 0, 0, 0, 47,
294 41, 58, 244, 0, 0, 0, 11, 102, 49, 0, 99, 111, 112, 121, 110, 97,
295 109, 101,
296 ]
297 .to_vec();
298
299 assert_eq!(
300 expected,
301 pack_dirstate(&mut state_map, &copymap, parents, now).unwrap()
302 );
303 assert_eq!(expected_state_map, state_map);
304 }
305
306 #[test]
307 fn test_parse_pack_one_entry_with_copy() {
308 let mut state_map: StateMap = [(
309 HgPathBuf::from_bytes(b"f1"),
310 DirstateEntry {
311 state: EntryState::Normal,
312 mode: 0o644,
313 size: 0,
314 mtime: 791231220,
315 },
316 )]
317 .iter()
318 .cloned()
319 .collect();
320 let mut copymap = FastHashMap::default();
321 copymap.insert(
322 HgPathBuf::from_bytes(b"f1"),
323 HgPathBuf::from_bytes(b"copyname"),
324 );
325 let parents = DirstateParents {
326 p1: b"12345678910111213141".into(),
327 p2: b"00000000000000000000".into(),
328 };
329 let now = Timestamp(15000000);
330 let result =
331 pack_dirstate(&mut state_map, &copymap, parents.clone(), now)
332 .unwrap();
333
334 let (new_parents, entries, copies) =
335 parse_dirstate(result.as_slice()).unwrap();
336 let new_state_map: StateMap = entries
337 .into_iter()
338 .map(|(path, entry)| (path.to_owned(), entry))
339 .collect();
340 let new_copy_map: CopyMap = copies
341 .into_iter()
342 .map(|(path, copy)| (path.to_owned(), copy.to_owned()))
343 .collect();
344
345 assert_eq!(
346 (&parents, state_map, copymap),
347 (new_parents, new_state_map, new_copy_map)
348 )
349 }
350
351 #[test]
352 fn test_parse_pack_multiple_entries_with_copy() {
353 let mut state_map: StateMap = [
354 (
355 HgPathBuf::from_bytes(b"f1"),
356 DirstateEntry {
357 state: EntryState::Normal,
358 mode: 0o644,
359 size: 0,
360 mtime: 791231220,
361 },
362 ),
363 (
364 HgPathBuf::from_bytes(b"f2"),
365 DirstateEntry {
366 state: EntryState::Merged,
367 mode: 0o777,
368 size: 1000,
369 mtime: 791231220,
370 },
371 ),
372 (
373 HgPathBuf::from_bytes(b"f3"),
374 DirstateEntry {
375 state: EntryState::Removed,
376 mode: 0o644,
377 size: 234553,
378 mtime: 791231220,
379 },
380 ),
381 (
382 HgPathBuf::from_bytes(b"f4\xF6"),
383 DirstateEntry {
384 state: EntryState::Added,
385 mode: 0o644,
386 size: -1,
387 mtime: -1,
388 },
389 ),
390 ]
391 .iter()
392 .cloned()
393 .collect();
394 let mut copymap = FastHashMap::default();
395 copymap.insert(
396 HgPathBuf::from_bytes(b"f1"),
397 HgPathBuf::from_bytes(b"copyname"),
398 );
399 copymap.insert(
400 HgPathBuf::from_bytes(b"f4\xF6"),
401 HgPathBuf::from_bytes(b"copyname2"),
402 );
403 let parents = DirstateParents {
404 p1: b"12345678910111213141".into(),
405 p2: b"00000000000000000000".into(),
406 };
407 let now = Timestamp(15000000);
408 let result =
409 pack_dirstate(&mut state_map, &copymap, parents.clone(), now)
410 .unwrap();
411
412 let (new_parents, entries, copies) =
413 parse_dirstate(result.as_slice()).unwrap();
414 let new_state_map: StateMap = entries
415 .into_iter()
416 .map(|(path, entry)| (path.to_owned(), entry))
417 .collect();
418 let new_copy_map: CopyMap = copies
419 .into_iter()
420 .map(|(path, copy)| (path.to_owned(), copy.to_owned()))
421 .collect();
422
423 assert_eq!(
424 (&parents, state_map, copymap),
425 (new_parents, new_state_map, new_copy_map)
426 )
427 }
428
429 #[test]
430 /// https://www.mercurial-scm.org/repo/hg/rev/af3f26b6bba4
431 fn test_parse_pack_one_entry_with_copy_and_time_conflict() {
432 let mut state_map: StateMap = [(
433 HgPathBuf::from_bytes(b"f1"),
434 DirstateEntry {
435 state: EntryState::Normal,
436 mode: 0o644,
437 size: 0,
438 mtime: 15000000,
439 },
440 )]
441 .iter()
442 .cloned()
443 .collect();
444 let mut copymap = FastHashMap::default();
445 copymap.insert(
446 HgPathBuf::from_bytes(b"f1"),
447 HgPathBuf::from_bytes(b"copyname"),
448 );
449 let parents = DirstateParents {
450 p1: b"12345678910111213141".into(),
451 p2: b"00000000000000000000".into(),
452 };
453 let now = Timestamp(15000000);
454 let result =
455 pack_dirstate(&mut state_map, &copymap, parents.clone(), now)
456 .unwrap();
457
458 let (new_parents, entries, copies) =
459 parse_dirstate(result.as_slice()).unwrap();
460 let new_state_map: StateMap = entries
461 .into_iter()
462 .map(|(path, entry)| (path.to_owned(), entry))
463 .collect();
464 let new_copy_map: CopyMap = copies
465 .into_iter()
466 .map(|(path, copy)| (path.to_owned(), copy.to_owned()))
467 .collect();
468
469 assert_eq!(
470 (
471 &parents,
472 [(
473 HgPathBuf::from_bytes(b"f1"),
474 DirstateEntry {
475 state: EntryState::Normal,
476 mode: 0o644,
477 size: 0,
478 mtime: -1
479 }
480 )]
481 .iter()
482 .cloned()
483 .collect::<StateMap>(),
484 copymap,
485 ),
486 (new_parents, new_state_map, new_copy_map)
487 )
488 }
489 }
This diff has been collapsed as it changes many lines, (812 lines changed) Show them Hide them
@@ -10,33 +10,14 b''
10 10 //! and will only be triggered in narrow cases.
11 11
12 12 use crate::dirstate_tree::on_disk::DirstateV2ParseError;
13 use crate::utils::path_auditor::PathAuditor;
13
14 14 use crate::{
15 dirstate::SIZE_FROM_OTHER_PARENT,
16 filepatterns::PatternFileWarning,
17 matchers::{get_ignore_function, Matcher, VisitChildrenSet},
18 utils::{
19 files::{find_dirs, HgMetadata},
20 hg_path::{
21 hg_path_to_path_buf, os_string_to_hg_path_buf, HgPath, HgPathBuf,
22 HgPathError,
23 },
24 },
25 CopyMap, DirstateEntry, DirstateMap, EntryState, FastHashMap,
15 dirstate::TruncatedTimestamp,
16 utils::hg_path::{HgPath, HgPathError},
26 17 PatternError,
27 18 };
28 use lazy_static::lazy_static;
29 use micro_timer::timed;
30 use rayon::prelude::*;
31 use std::{
32 borrow::Cow,
33 collections::HashSet,
34 fmt,
35 fs::{read_dir, DirEntry},
36 io::ErrorKind,
37 ops::Deref,
38 path::{Path, PathBuf},
39 };
19
20 use std::{borrow::Cow, fmt};
40 21
41 22 /// Wrong type of file from a `BadMatch`
42 23 /// Note: a lot of those don't exist on all platforms.
@@ -70,32 +51,6 b' pub enum BadMatch {'
70 51 BadType(BadType),
71 52 }
72 53
73 /// Enum used to dispatch new status entries into the right collections.
74 /// Is similar to `crate::EntryState`, but represents the transient state of
75 /// entries during the lifetime of a command.
76 #[derive(Debug, Copy, Clone)]
77 pub enum Dispatch {
78 Unsure,
79 Modified,
80 Added,
81 Removed,
82 Deleted,
83 Clean,
84 Unknown,
85 Ignored,
86 /// Empty dispatch, the file is not worth listing
87 None,
88 /// Was explicitly matched but cannot be found/accessed
89 Bad(BadMatch),
90 Directory {
91 /// True if the directory used to be a file in the dmap so we can say
92 /// that it's been removed.
93 was_file: bool,
94 },
95 }
96
97 type IoResult<T> = std::io::Result<T>;
98
99 54 /// `Box<dyn Trait>` is syntactic sugar for `Box<dyn Trait + 'static>`, so add
100 55 /// an explicit lifetime here to not fight `'static` bounds "out of nowhere".
101 56 pub type IgnoreFnType<'a> =
@@ -105,147 +60,12 b" pub type IgnoreFnType<'a> ="
105 60 /// the dirstate/explicit) paths, this comes up a lot.
106 61 pub type HgPathCow<'a> = Cow<'a, HgPath>;
107 62
108 /// A path with its computed ``Dispatch`` information
109 type DispatchedPath<'a> = (HgPathCow<'a>, Dispatch);
110
111 /// The conversion from `HgPath` to a real fs path failed.
112 /// `22` is the error code for "Invalid argument"
113 const INVALID_PATH_DISPATCH: Dispatch = Dispatch::Bad(BadMatch::OsError(22));
114
115 /// Dates and times that are outside the 31-bit signed range are compared
116 /// modulo 2^31. This should prevent hg from behaving badly with very large
117 /// files or corrupt dates while still having a high probability of detecting
118 /// changes. (issue2608)
119 /// TODO I haven't found a way of having `b` be `Into<i32>`, since `From<u64>`
120 /// is not defined for `i32`, and there is no `As` trait. This forces the
121 /// caller to cast `b` as `i32`.
122 fn mod_compare(a: i32, b: i32) -> bool {
123 a & i32::max_value() != b & i32::max_value()
124 }
125
126 /// Return a sorted list containing information about the entries
127 /// in the directory.
128 ///
129 /// * `skip_dot_hg` - Return an empty vec if `path` contains a `.hg` directory
130 fn list_directory(
131 path: impl AsRef<Path>,
132 skip_dot_hg: bool,
133 ) -> std::io::Result<Vec<(HgPathBuf, DirEntry)>> {
134 let mut results = vec![];
135 let entries = read_dir(path.as_ref())?;
136
137 for entry in entries {
138 let entry = entry?;
139 let filename = os_string_to_hg_path_buf(entry.file_name())?;
140 let file_type = entry.file_type()?;
141 if skip_dot_hg && filename.as_bytes() == b".hg" && file_type.is_dir() {
142 return Ok(vec![]);
143 } else {
144 results.push((filename, entry))
145 }
146 }
147
148 results.sort_unstable_by_key(|e| e.0.clone());
149 Ok(results)
150 }
151
152 /// The file corresponding to the dirstate entry was found on the filesystem.
153 fn dispatch_found(
154 filename: impl AsRef<HgPath>,
155 entry: DirstateEntry,
156 metadata: HgMetadata,
157 copy_map: &CopyMap,
158 options: StatusOptions,
159 ) -> Dispatch {
160 let DirstateEntry {
161 state,
162 mode,
163 mtime,
164 size,
165 } = entry;
166
167 let HgMetadata {
168 st_mode,
169 st_size,
170 st_mtime,
171 ..
172 } = metadata;
173
174 match state {
175 EntryState::Normal => {
176 let size_changed = mod_compare(size, st_size as i32);
177 let mode_changed =
178 (mode ^ st_mode as i32) & 0o100 != 0o000 && options.check_exec;
179 let metadata_changed = size >= 0 && (size_changed || mode_changed);
180 let other_parent = size == SIZE_FROM_OTHER_PARENT;
181
182 if metadata_changed
183 || other_parent
184 || copy_map.contains_key(filename.as_ref())
185 {
186 if metadata.is_symlink() && size_changed {
187 // issue6456: Size returned may be longer due to encryption
188 // on EXT-4 fscrypt. TODO maybe only do it on EXT4?
189 Dispatch::Unsure
190 } else {
191 Dispatch::Modified
192 }
193 } else if mod_compare(mtime, st_mtime as i32)
194 || st_mtime == options.last_normal_time
195 {
196 // the file may have just been marked as normal and
197 // it may have changed in the same second without
198 // changing its size. This can happen if we quickly
199 // do multiple commits. Force lookup, so we don't
200 // miss such a racy file change.
201 Dispatch::Unsure
202 } else if options.list_clean {
203 Dispatch::Clean
204 } else {
205 Dispatch::None
206 }
207 }
208 EntryState::Merged => Dispatch::Modified,
209 EntryState::Added => Dispatch::Added,
210 EntryState::Removed => Dispatch::Removed,
211 EntryState::Unknown => Dispatch::Unknown,
212 }
213 }
214
215 /// The file corresponding to this Dirstate entry is missing.
216 fn dispatch_missing(state: EntryState) -> Dispatch {
217 match state {
218 // File was removed from the filesystem during commands
219 EntryState::Normal | EntryState::Merged | EntryState::Added => {
220 Dispatch::Deleted
221 }
222 // File was removed, everything is normal
223 EntryState::Removed => Dispatch::Removed,
224 // File is unknown to Mercurial, everything is normal
225 EntryState::Unknown => Dispatch::Unknown,
226 }
227 }
228
229 fn dispatch_os_error(e: &std::io::Error) -> Dispatch {
230 Dispatch::Bad(BadMatch::OsError(
231 e.raw_os_error().expect("expected real OS error"),
232 ))
233 }
234
235 lazy_static! {
236 static ref DEFAULT_WORK: HashSet<&'static HgPath> = {
237 let mut h = HashSet::new();
238 h.insert(HgPath::new(b""));
239 h
240 };
241 }
242
243 63 #[derive(Debug, Copy, Clone)]
244 64 pub struct StatusOptions {
245 65 /// Remember the most recent modification timeslot for status, to make
246 66 /// sure we won't miss future size-preserving file content modifications
247 67 /// that happen within the same timeslot.
248 pub last_normal_time: i64,
68 pub last_normal_time: TruncatedTimestamp,
249 69 /// Whether we are on a filesystem with UNIX-like exec flags
250 70 pub check_exec: bool,
251 71 pub list_clean: bool,
@@ -325,623 +145,3 b' impl fmt::Display for StatusError {'
325 145 }
326 146 }
327 147 }
328
329 /// Gives information about which files are changed in the working directory
330 /// and how, compared to the revision we're based on
331 pub struct Status<'a, M: ?Sized + Matcher + Sync> {
332 dmap: &'a DirstateMap,
333 pub(crate) matcher: &'a M,
334 root_dir: PathBuf,
335 pub(crate) options: StatusOptions,
336 ignore_fn: IgnoreFnType<'a>,
337 }
338
339 impl<'a, M> Status<'a, M>
340 where
341 M: ?Sized + Matcher + Sync,
342 {
343 pub fn new(
344 dmap: &'a DirstateMap,
345 matcher: &'a M,
346 root_dir: PathBuf,
347 ignore_files: Vec<PathBuf>,
348 options: StatusOptions,
349 ) -> StatusResult<(Self, Vec<PatternFileWarning>)> {
350 // Needs to outlive `dir_ignore_fn` since it's captured.
351
352 let (ignore_fn, warnings): (IgnoreFnType, _) =
353 if options.list_ignored || options.list_unknown {
354 get_ignore_function(ignore_files, &root_dir, &mut |_| {})?
355 } else {
356 (Box::new(|&_| true), vec![])
357 };
358
359 Ok((
360 Self {
361 dmap,
362 matcher,
363 root_dir,
364 options,
365 ignore_fn,
366 },
367 warnings,
368 ))
369 }
370
371 /// Is the path ignored?
372 pub fn is_ignored(&self, path: impl AsRef<HgPath>) -> bool {
373 (self.ignore_fn)(path.as_ref())
374 }
375
376 /// Is the path or one of its ancestors ignored?
377 pub fn dir_ignore(&self, dir: impl AsRef<HgPath>) -> bool {
378 // Only involve ignore mechanism if we're listing unknowns or ignored.
379 if self.options.list_ignored || self.options.list_unknown {
380 if self.is_ignored(&dir) {
381 true
382 } else {
383 for p in find_dirs(dir.as_ref()) {
384 if self.is_ignored(p) {
385 return true;
386 }
387 }
388 false
389 }
390 } else {
391 true
392 }
393 }
394
395 /// Get stat data about the files explicitly specified by the matcher.
396 /// Returns a tuple of the directories that need to be traversed and the
397 /// files with their corresponding `Dispatch`.
398 /// TODO subrepos
399 #[timed]
400 pub fn walk_explicit(
401 &self,
402 traversed_sender: crossbeam_channel::Sender<HgPathBuf>,
403 ) -> (Vec<DispatchedPath<'a>>, Vec<DispatchedPath<'a>>) {
404 self.matcher
405 .file_set()
406 .unwrap_or(&DEFAULT_WORK)
407 .par_iter()
408 .flat_map(|&filename| -> Option<_> {
409 // TODO normalization
410 let normalized = filename;
411
412 let buf = match hg_path_to_path_buf(normalized) {
413 Ok(x) => x,
414 Err(_) => {
415 return Some((
416 Cow::Borrowed(normalized),
417 INVALID_PATH_DISPATCH,
418 ))
419 }
420 };
421 let target = self.root_dir.join(buf);
422 let st = target.symlink_metadata();
423 let in_dmap = self.dmap.get(normalized);
424 match st {
425 Ok(meta) => {
426 let file_type = meta.file_type();
427 return if file_type.is_file() || file_type.is_symlink()
428 {
429 if let Some(entry) = in_dmap {
430 return Some((
431 Cow::Borrowed(normalized),
432 dispatch_found(
433 &normalized,
434 *entry,
435 HgMetadata::from_metadata(meta),
436 &self.dmap.copy_map,
437 self.options,
438 ),
439 ));
440 }
441 Some((
442 Cow::Borrowed(normalized),
443 Dispatch::Unknown,
444 ))
445 } else if file_type.is_dir() {
446 if self.options.collect_traversed_dirs {
447 traversed_sender
448 .send(normalized.to_owned())
449 .expect("receiver should outlive sender");
450 }
451 Some((
452 Cow::Borrowed(normalized),
453 Dispatch::Directory {
454 was_file: in_dmap.is_some(),
455 },
456 ))
457 } else {
458 Some((
459 Cow::Borrowed(normalized),
460 Dispatch::Bad(BadMatch::BadType(
461 // TODO do more than unknown
462 // Support for all `BadType` variant
463 // varies greatly between platforms.
464 // So far, no tests check the type and
465 // this should be good enough for most
466 // users.
467 BadType::Unknown,
468 )),
469 ))
470 };
471 }
472 Err(_) => {
473 if let Some(entry) = in_dmap {
474 return Some((
475 Cow::Borrowed(normalized),
476 dispatch_missing(entry.state),
477 ));
478 }
479 }
480 };
481 None
482 })
483 .partition(|(_, dispatch)| match dispatch {
484 Dispatch::Directory { .. } => true,
485 _ => false,
486 })
487 }
488
489 /// Walk the working directory recursively to look for changes compared to
490 /// the current `DirstateMap`.
491 ///
492 /// This takes a mutable reference to the results to account for the
493 /// `extend` in timings
494 #[timed]
495 pub fn traverse(
496 &self,
497 path: impl AsRef<HgPath>,
498 old_results: &FastHashMap<HgPathCow<'a>, Dispatch>,
499 results: &mut Vec<DispatchedPath<'a>>,
500 traversed_sender: crossbeam_channel::Sender<HgPathBuf>,
501 ) {
502 // The traversal is done in parallel, so use a channel to gather
503 // entries. `crossbeam_channel::Sender` is `Sync`, while `mpsc::Sender`
504 // is not.
505 let (files_transmitter, files_receiver) =
506 crossbeam_channel::unbounded();
507
508 self.traverse_dir(
509 &files_transmitter,
510 path,
511 &old_results,
512 traversed_sender,
513 );
514
515 // Disconnect the channel so the receiver stops waiting
516 drop(files_transmitter);
517
518 let new_results = files_receiver
519 .into_iter()
520 .par_bridge()
521 .map(|(f, d)| (Cow::Owned(f), d));
522
523 results.par_extend(new_results);
524 }
525
526 /// Dispatch a single entry (file, folder, symlink...) found during
527 /// `traverse`. If the entry is a folder that needs to be traversed, it
528 /// will be handled in a separate thread.
529 fn handle_traversed_entry<'b>(
530 &'a self,
531 scope: &rayon::Scope<'b>,
532 files_sender: &'b crossbeam_channel::Sender<(HgPathBuf, Dispatch)>,
533 old_results: &'a FastHashMap<Cow<HgPath>, Dispatch>,
534 filename: HgPathBuf,
535 dir_entry: DirEntry,
536 traversed_sender: crossbeam_channel::Sender<HgPathBuf>,
537 ) -> IoResult<()>
538 where
539 'a: 'b,
540 {
541 let file_type = dir_entry.file_type()?;
542 let entry_option = self.dmap.get(&filename);
543
544 if filename.as_bytes() == b".hg" {
545 // Could be a directory or a symlink
546 return Ok(());
547 }
548
549 if file_type.is_dir() {
550 self.handle_traversed_dir(
551 scope,
552 files_sender,
553 old_results,
554 entry_option,
555 filename,
556 traversed_sender,
557 );
558 } else if file_type.is_file() || file_type.is_symlink() {
559 if let Some(entry) = entry_option {
560 if self.matcher.matches_everything()
561 || self.matcher.matches(&filename)
562 {
563 let metadata = dir_entry.metadata()?;
564 files_sender
565 .send((
566 filename.to_owned(),
567 dispatch_found(
568 &filename,
569 *entry,
570 HgMetadata::from_metadata(metadata),
571 &self.dmap.copy_map,
572 self.options,
573 ),
574 ))
575 .unwrap();
576 }
577 } else if (self.matcher.matches_everything()
578 || self.matcher.matches(&filename))
579 && !self.is_ignored(&filename)
580 {
581 if (self.options.list_ignored
582 || self.matcher.exact_match(&filename))
583 && self.dir_ignore(&filename)
584 {
585 if self.options.list_ignored {
586 files_sender
587 .send((filename.to_owned(), Dispatch::Ignored))
588 .unwrap();
589 }
590 } else if self.options.list_unknown {
591 files_sender
592 .send((filename.to_owned(), Dispatch::Unknown))
593 .unwrap();
594 }
595 } else if self.is_ignored(&filename) && self.options.list_ignored {
596 if self.matcher.matches(&filename) {
597 files_sender
598 .send((filename.to_owned(), Dispatch::Ignored))
599 .unwrap();
600 }
601 }
602 } else if let Some(entry) = entry_option {
603 // Used to be a file or a folder, now something else.
604 if self.matcher.matches_everything()
605 || self.matcher.matches(&filename)
606 {
607 files_sender
608 .send((filename.to_owned(), dispatch_missing(entry.state)))
609 .unwrap();
610 }
611 }
612
613 Ok(())
614 }
615
616 /// A directory was found in the filesystem and needs to be traversed
617 fn handle_traversed_dir<'b>(
618 &'a self,
619 scope: &rayon::Scope<'b>,
620 files_sender: &'b crossbeam_channel::Sender<(HgPathBuf, Dispatch)>,
621 old_results: &'a FastHashMap<Cow<HgPath>, Dispatch>,
622 entry_option: Option<&'a DirstateEntry>,
623 directory: HgPathBuf,
624 traversed_sender: crossbeam_channel::Sender<HgPathBuf>,
625 ) where
626 'a: 'b,
627 {
628 scope.spawn(move |_| {
629 // Nested `if` until `rust-lang/rust#53668` is stable
630 if let Some(entry) = entry_option {
631 // Used to be a file, is now a folder
632 if self.matcher.matches_everything()
633 || self.matcher.matches(&directory)
634 {
635 files_sender
636 .send((
637 directory.to_owned(),
638 dispatch_missing(entry.state),
639 ))
640 .unwrap();
641 }
642 }
643 // Do we need to traverse it?
644 if !self.is_ignored(&directory) || self.options.list_ignored {
645 self.traverse_dir(
646 files_sender,
647 directory,
648 &old_results,
649 traversed_sender,
650 )
651 }
652 });
653 }
654
655 /// Decides whether the directory needs to be listed, and if so handles the
656 /// entries in a separate thread.
657 fn traverse_dir(
658 &self,
659 files_sender: &crossbeam_channel::Sender<(HgPathBuf, Dispatch)>,
660 directory: impl AsRef<HgPath>,
661 old_results: &FastHashMap<Cow<HgPath>, Dispatch>,
662 traversed_sender: crossbeam_channel::Sender<HgPathBuf>,
663 ) {
664 let directory = directory.as_ref();
665
666 if self.options.collect_traversed_dirs {
667 traversed_sender
668 .send(directory.to_owned())
669 .expect("receiver should outlive sender");
670 }
671
672 let visit_entries = match self.matcher.visit_children_set(directory) {
673 VisitChildrenSet::Empty => return,
674 VisitChildrenSet::This | VisitChildrenSet::Recursive => None,
675 VisitChildrenSet::Set(set) => Some(set),
676 };
677 let buf = match hg_path_to_path_buf(directory) {
678 Ok(b) => b,
679 Err(_) => {
680 files_sender
681 .send((directory.to_owned(), INVALID_PATH_DISPATCH))
682 .expect("receiver should outlive sender");
683 return;
684 }
685 };
686 let dir_path = self.root_dir.join(buf);
687
688 let skip_dot_hg = !directory.as_bytes().is_empty();
689 let entries = match list_directory(dir_path, skip_dot_hg) {
690 Err(e) => {
691 files_sender
692 .send((directory.to_owned(), dispatch_os_error(&e)))
693 .expect("receiver should outlive sender");
694 return;
695 }
696 Ok(entries) => entries,
697 };
698
699 rayon::scope(|scope| {
700 for (filename, dir_entry) in entries {
701 if let Some(ref set) = visit_entries {
702 if !set.contains(filename.deref()) {
703 continue;
704 }
705 }
706 // TODO normalize
707 let filename = if directory.is_empty() {
708 filename.to_owned()
709 } else {
710 directory.join(&filename)
711 };
712
713 if !old_results.contains_key(filename.deref()) {
714 match self.handle_traversed_entry(
715 scope,
716 files_sender,
717 old_results,
718 filename,
719 dir_entry,
720 traversed_sender.clone(),
721 ) {
722 Err(e) => {
723 files_sender
724 .send((
725 directory.to_owned(),
726 dispatch_os_error(&e),
727 ))
728 .expect("receiver should outlive sender");
729 }
730 Ok(_) => {}
731 }
732 }
733 }
734 })
735 }
736
737 /// Add the files in the dirstate to the results.
738 ///
739 /// This takes a mutable reference to the results to account for the
740 /// `extend` in timings
741 #[timed]
742 pub fn extend_from_dmap(&self, results: &mut Vec<DispatchedPath<'a>>) {
743 results.par_extend(
744 self.dmap
745 .par_iter()
746 .filter(|(path, _)| self.matcher.matches(path))
747 .map(move |(filename, entry)| {
748 let filename: &HgPath = filename;
749 let filename_as_path = match hg_path_to_path_buf(filename)
750 {
751 Ok(f) => f,
752 Err(_) => {
753 return (
754 Cow::Borrowed(filename),
755 INVALID_PATH_DISPATCH,
756 )
757 }
758 };
759 let meta = self
760 .root_dir
761 .join(filename_as_path)
762 .symlink_metadata();
763 match meta {
764 Ok(m)
765 if !(m.file_type().is_file()
766 || m.file_type().is_symlink()) =>
767 {
768 (
769 Cow::Borrowed(filename),
770 dispatch_missing(entry.state),
771 )
772 }
773 Ok(m) => (
774 Cow::Borrowed(filename),
775 dispatch_found(
776 filename,
777 *entry,
778 HgMetadata::from_metadata(m),
779 &self.dmap.copy_map,
780 self.options,
781 ),
782 ),
783 Err(e)
784 if e.kind() == ErrorKind::NotFound
785 || e.raw_os_error() == Some(20) =>
786 {
787 // Rust does not yet have an `ErrorKind` for
788 // `NotADirectory` (errno 20)
789 // It happens if the dirstate contains `foo/bar`
790 // and foo is not a
791 // directory
792 (
793 Cow::Borrowed(filename),
794 dispatch_missing(entry.state),
795 )
796 }
797 Err(e) => {
798 (Cow::Borrowed(filename), dispatch_os_error(&e))
799 }
800 }
801 }),
802 );
803 }
804
805 /// Checks all files that are in the dirstate but were not found during the
806 /// working directory traversal. This means that the rest must
807 /// be either ignored, under a symlink or under a new nested repo.
808 ///
809 /// This takes a mutable reference to the results to account for the
810 /// `extend` in timings
811 #[timed]
812 pub fn handle_unknowns(&self, results: &mut Vec<DispatchedPath<'a>>) {
813 let to_visit: Vec<(&HgPath, &DirstateEntry)> =
814 if results.is_empty() && self.matcher.matches_everything() {
815 self.dmap.iter().map(|(f, e)| (f.deref(), e)).collect()
816 } else {
817 // Only convert to a hashmap if needed.
818 let old_results: FastHashMap<_, _> =
819 results.iter().cloned().collect();
820 self.dmap
821 .iter()
822 .filter_map(move |(f, e)| {
823 if !old_results.contains_key(f.deref())
824 && self.matcher.matches(f)
825 {
826 Some((f.deref(), e))
827 } else {
828 None
829 }
830 })
831 .collect()
832 };
833
834 let path_auditor = PathAuditor::new(&self.root_dir);
835
836 let new_results = to_visit.into_par_iter().filter_map(
837 |(filename, entry)| -> Option<_> {
838 // Report ignored items in the dmap as long as they are not
839 // under a symlink directory.
840 if path_auditor.check(filename) {
841 // TODO normalize for case-insensitive filesystems
842 let buf = match hg_path_to_path_buf(filename) {
843 Ok(x) => x,
844 Err(_) => {
845 return Some((
846 Cow::Owned(filename.to_owned()),
847 INVALID_PATH_DISPATCH,
848 ));
849 }
850 };
851 Some((
852 Cow::Owned(filename.to_owned()),
853 match self.root_dir.join(&buf).symlink_metadata() {
854 // File was just ignored, no links, and exists
855 Ok(meta) => {
856 let metadata = HgMetadata::from_metadata(meta);
857 dispatch_found(
858 filename,
859 *entry,
860 metadata,
861 &self.dmap.copy_map,
862 self.options,
863 )
864 }
865 // File doesn't exist
866 Err(_) => dispatch_missing(entry.state),
867 },
868 ))
869 } else {
870 // It's either missing or under a symlink directory which
871 // we, in this case, report as missing.
872 Some((
873 Cow::Owned(filename.to_owned()),
874 dispatch_missing(entry.state),
875 ))
876 }
877 },
878 );
879
880 results.par_extend(new_results);
881 }
882 }
883
884 #[timed]
885 pub fn build_response<'a>(
886 results: impl IntoIterator<Item = DispatchedPath<'a>>,
887 traversed: Vec<HgPathCow<'a>>,
888 ) -> DirstateStatus<'a> {
889 let mut unsure = vec![];
890 let mut modified = vec![];
891 let mut added = vec![];
892 let mut removed = vec![];
893 let mut deleted = vec![];
894 let mut clean = vec![];
895 let mut ignored = vec![];
896 let mut unknown = vec![];
897 let mut bad = vec![];
898
899 for (filename, dispatch) in results.into_iter() {
900 match dispatch {
901 Dispatch::Unknown => unknown.push(filename),
902 Dispatch::Unsure => unsure.push(filename),
903 Dispatch::Modified => modified.push(filename),
904 Dispatch::Added => added.push(filename),
905 Dispatch::Removed => removed.push(filename),
906 Dispatch::Deleted => deleted.push(filename),
907 Dispatch::Clean => clean.push(filename),
908 Dispatch::Ignored => ignored.push(filename),
909 Dispatch::None => {}
910 Dispatch::Bad(reason) => bad.push((filename, reason)),
911 Dispatch::Directory { .. } => {}
912 }
913 }
914
915 DirstateStatus {
916 modified,
917 added,
918 removed,
919 deleted,
920 clean,
921 ignored,
922 unknown,
923 bad,
924 unsure,
925 traversed,
926 dirty: false,
927 }
928 }
929
930 /// Get the status of files in the working directory.
931 ///
932 /// This is the current entry-point for `hg-core` and is realistically unusable
933 /// outside of a Python context because its arguments need to provide a lot of
934 /// information that will not be necessary in the future.
935 #[timed]
936 pub fn status<'a>(
937 dmap: &'a DirstateMap,
938 matcher: &'a (dyn Matcher + Sync),
939 root_dir: PathBuf,
940 ignore_files: Vec<PathBuf>,
941 options: StatusOptions,
942 ) -> StatusResult<(DirstateStatus<'a>, Vec<PatternFileWarning>)> {
943 let (status, warnings) =
944 Status::new(dmap, matcher, root_dir, ignore_files, options)?;
945
946 Ok((status.run()?, warnings))
947 }
@@ -1,5 +1,5 b''
1 1 pub mod dirstate_map;
2 pub mod dispatch;
3 2 pub mod on_disk;
3 pub mod owning;
4 4 pub mod path_with_basename;
5 5 pub mod status;
@@ -1,23 +1,22 b''
1 1 use bytes_cast::BytesCast;
2 2 use micro_timer::timed;
3 3 use std::borrow::Cow;
4 use std::convert::TryInto;
5 4 use std::path::PathBuf;
6 5
7 6 use super::on_disk;
8 7 use super::on_disk::DirstateV2ParseError;
8 use super::owning::OwningDirstateMap;
9 9 use super::path_with_basename::WithBasename;
10 10 use crate::dirstate::parsers::pack_entry;
11 11 use crate::dirstate::parsers::packed_entry_size;
12 12 use crate::dirstate::parsers::parse_dirstate_entries;
13 use crate::dirstate::parsers::Timestamp;
14 use crate::dirstate::MTIME_UNSET;
13 use crate::dirstate::CopyMapIter;
14 use crate::dirstate::StateMapIter;
15 use crate::dirstate::TruncatedTimestamp;
15 16 use crate::dirstate::SIZE_FROM_OTHER_PARENT;
16 17 use crate::dirstate::SIZE_NON_NORMAL;
17 use crate::dirstate::V1_RANGEMASK;
18 18 use crate::matchers::Matcher;
19 19 use crate::utils::hg_path::{HgPath, HgPathBuf};
20 use crate::CopyMapIter;
21 20 use crate::DirstateEntry;
22 21 use crate::DirstateError;
23 22 use crate::DirstateParents;
@@ -25,7 +24,6 b' use crate::DirstateStatus;'
25 24 use crate::EntryState;
26 25 use crate::FastHashMap;
27 26 use crate::PatternFileWarning;
28 use crate::StateMapIter;
29 27 use crate::StatusError;
30 28 use crate::StatusOptions;
31 29
@@ -326,22 +324,17 b" impl<'tree, 'on_disk> NodeRef<'tree, 'on"
326 324 pub(super) fn state(
327 325 &self,
328 326 ) -> Result<Option<EntryState>, DirstateV2ParseError> {
329 match self {
330 NodeRef::InMemory(_path, node) => {
331 Ok(node.data.as_entry().map(|entry| entry.state))
332 }
333 NodeRef::OnDisk(node) => node.state(),
334 }
327 Ok(self.entry()?.map(|e| e.state()))
335 328 }
336 329
337 330 pub(super) fn cached_directory_mtime(
338 331 &self,
339 ) -> Option<&'tree on_disk::Timestamp> {
332 ) -> Result<Option<TruncatedTimestamp>, DirstateV2ParseError> {
340 333 match self {
341 NodeRef::InMemory(_path, node) => match &node.data {
334 NodeRef::InMemory(_path, node) => Ok(match node.data {
342 335 NodeData::CachedDirectory { mtime } => Some(mtime),
343 336 _ => None,
344 },
337 }),
345 338 NodeRef::OnDisk(node) => node.cached_directory_mtime(),
346 339 }
347 340 }
@@ -382,7 +375,7 b" pub(super) struct Node<'on_disk> {"
382 375
383 376 pub(super) enum NodeData {
384 377 Entry(DirstateEntry),
385 CachedDirectory { mtime: on_disk::Timestamp },
378 CachedDirectory { mtime: TruncatedTimestamp },
386 379 None,
387 380 }
388 381
@@ -445,7 +438,7 b" impl<'on_disk> DirstateMap<'on_disk> {"
445 438 let parents = parse_dirstate_entries(
446 439 map.on_disk,
447 440 |path, entry, copy_source| {
448 let tracked = entry.state.is_tracked();
441 let tracked = entry.state().is_tracked();
449 442 let node = Self::get_or_insert_node(
450 443 map.on_disk,
451 444 &mut map.unreachable_bytes,
@@ -593,12 +586,13 b" impl<'on_disk> DirstateMap<'on_disk> {"
593 586 fn add_or_remove_file(
594 587 &mut self,
595 588 path: &HgPath,
596 old_state: EntryState,
589 old_state: Option<EntryState>,
597 590 new_entry: DirstateEntry,
598 591 ) -> Result<(), DirstateV2ParseError> {
599 let had_entry = old_state != EntryState::Unknown;
592 let had_entry = old_state.is_some();
593 let was_tracked = old_state.map_or(false, |s| s.is_tracked());
600 594 let tracked_count_increment =
601 match (old_state.is_tracked(), new_entry.state.is_tracked()) {
595 match (was_tracked, new_entry.state().is_tracked()) {
602 596 (false, true) => 1,
603 597 (true, false) => -1,
604 598 _ => 0,
@@ -695,34 +689,13 b" impl<'on_disk> DirstateMap<'on_disk> {"
695 689 path.as_ref(),
696 690 )? {
697 691 if let NodeData::Entry(entry) = &mut node.data {
698 entry.clear_mtime();
692 entry.set_possibly_dirty();
699 693 }
700 694 }
701 695 }
702 696 Ok(())
703 697 }
704 698
705 /// Return a faillilble iterator of full paths of nodes that have an
706 /// `entry` for which the given `predicate` returns true.
707 ///
708 /// Fallibility means that each iterator item is a `Result`, which may
709 /// indicate a parse error of the on-disk dirstate-v2 format. Such errors
710 /// should only happen if Mercurial is buggy or a repository is corrupted.
711 fn filter_full_paths<'tree>(
712 &'tree self,
713 predicate: impl Fn(&DirstateEntry) -> bool + 'tree,
714 ) -> impl Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + 'tree
715 {
716 filter_map_results(self.iter_nodes(), move |node| {
717 if let Some(entry) = node.entry()? {
718 if predicate(&entry) {
719 return Ok(Some(node.full_path(self.on_disk)?));
720 }
721 }
722 Ok(None)
723 })
724 }
725
726 699 fn count_dropped_path(unreachable_bytes: &mut u32, path: &Cow<HgPath>) {
727 700 if let Cow::Borrowed(path) = path {
728 701 *unreachable_bytes += path.len() as u32
@@ -750,78 +723,41 b' where'
750 723 })
751 724 }
752 725
753 impl<'on_disk> super::dispatch::DirstateMapMethods for DirstateMap<'on_disk> {
754 fn clear(&mut self) {
755 self.root = Default::default();
756 self.nodes_with_entry_count = 0;
757 self.nodes_with_copy_source_count = 0;
726 impl OwningDirstateMap {
727 pub fn clear(&mut self) {
728 let map = self.get_map_mut();
729 map.root = Default::default();
730 map.nodes_with_entry_count = 0;
731 map.nodes_with_copy_source_count = 0;
758 732 }
759 733
760 fn set_v1(&mut self, filename: &HgPath, entry: DirstateEntry) {
761 let node =
762 self.get_or_insert(&filename).expect("no parse error in v1");
763 node.data = NodeData::Entry(entry);
764 node.children = ChildNodes::default();
765 node.copy_source = None;
766 node.descendants_with_entry_count = 0;
767 node.tracked_descendants_count = 0;
768 }
769
770 fn add_file(
734 pub fn set_entry(
771 735 &mut self,
772 736 filename: &HgPath,
773 737 entry: DirstateEntry,
774 added: bool,
775 merged: bool,
776 from_p2: bool,
777 possibly_dirty: bool,
778 ) -> Result<(), DirstateError> {
779 let mut entry = entry;
780 if added {
781 assert!(!possibly_dirty);
782 assert!(!from_p2);
783 entry.state = EntryState::Added;
784 entry.size = SIZE_NON_NORMAL;
785 entry.mtime = MTIME_UNSET;
786 } else if merged {
787 assert!(!possibly_dirty);
788 assert!(!from_p2);
789 entry.state = EntryState::Merged;
790 entry.size = SIZE_FROM_OTHER_PARENT;
791 entry.mtime = MTIME_UNSET;
792 } else if from_p2 {
793 assert!(!possibly_dirty);
794 entry.state = EntryState::Normal;
795 entry.size = SIZE_FROM_OTHER_PARENT;
796 entry.mtime = MTIME_UNSET;
797 } else if possibly_dirty {
798 entry.state = EntryState::Normal;
799 entry.size = SIZE_NON_NORMAL;
800 entry.mtime = MTIME_UNSET;
801 } else {
802 entry.state = EntryState::Normal;
803 entry.size = entry.size & V1_RANGEMASK;
804 entry.mtime = entry.mtime & V1_RANGEMASK;
805 }
806
807 let old_state = match self.get(filename)? {
808 Some(e) => e.state,
809 None => EntryState::Unknown,
810 };
811
812 Ok(self.add_or_remove_file(filename, old_state, entry)?)
738 ) -> Result<(), DirstateV2ParseError> {
739 let map = self.get_map_mut();
740 map.get_or_insert(&filename)?.data = NodeData::Entry(entry);
741 Ok(())
813 742 }
814 743
815 fn remove_file(
744 pub fn add_file(
745 &mut self,
746 filename: &HgPath,
747 entry: DirstateEntry,
748 ) -> Result<(), DirstateError> {
749 let old_state = self.get(filename)?.map(|e| e.state());
750 let map = self.get_map_mut();
751 Ok(map.add_or_remove_file(filename, old_state, entry)?)
752 }
753
754 pub fn remove_file(
816 755 &mut self,
817 756 filename: &HgPath,
818 757 in_merge: bool,
819 758 ) -> Result<(), DirstateError> {
820 759 let old_entry_opt = self.get(filename)?;
821 let old_state = match old_entry_opt {
822 Some(e) => e.state,
823 None => EntryState::Unknown,
824 };
760 let old_state = old_entry_opt.map(|e| e.state());
825 761 let mut size = 0;
826 762 if in_merge {
827 763 // XXX we should not be able to have 'm' state and 'FROM_P2' if not
@@ -830,10 +766,10 b" impl<'on_disk> super::dispatch::Dirstate"
830 766 // would be nice.
831 767 if let Some(old_entry) = old_entry_opt {
832 768 // backup the previous state
833 if old_entry.state == EntryState::Merged {
769 if old_entry.state() == EntryState::Merged {
834 770 size = SIZE_NON_NORMAL;
835 } else if old_entry.state == EntryState::Normal
836 && old_entry.size == SIZE_FROM_OTHER_PARENT
771 } else if old_entry.state() == EntryState::Normal
772 && old_entry.size() == SIZE_FROM_OTHER_PARENT
837 773 {
838 774 // other parent
839 775 size = SIZE_FROM_OTHER_PARENT;
@@ -843,20 +779,19 b" impl<'on_disk> super::dispatch::Dirstate"
843 779 if size == 0 {
844 780 self.copy_map_remove(filename)?;
845 781 }
846 let entry = DirstateEntry {
847 state: EntryState::Removed,
848 mode: 0,
849 size,
850 mtime: 0,
851 };
852 Ok(self.add_or_remove_file(filename, old_state, entry)?)
782 let map = self.get_map_mut();
783 let entry = DirstateEntry::new_removed(size);
784 Ok(map.add_or_remove_file(filename, old_state, entry)?)
853 785 }
854 786
855 fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError> {
856 let old_state = match self.get(filename)? {
857 Some(e) => e.state,
858 None => EntryState::Unknown,
859 };
787 pub fn drop_entry_and_copy_source(
788 &mut self,
789 filename: &HgPath,
790 ) -> Result<(), DirstateError> {
791 let was_tracked = self
792 .get(filename)?
793 .map_or(false, |e| e.state().is_tracked());
794 let map = self.get_map_mut();
860 795 struct Dropped {
861 796 was_tracked: bool,
862 797 had_entry: bool,
@@ -915,13 +850,14 b" impl<'on_disk> super::dispatch::Dirstate"
915 850 node.data = NodeData::None
916 851 }
917 852 if let Some(source) = &node.copy_source {
918 DirstateMap::count_dropped_path(unreachable_bytes, source)
853 DirstateMap::count_dropped_path(unreachable_bytes, source);
854 node.copy_source = None
919 855 }
920 856 dropped = Dropped {
921 857 was_tracked: node
922 858 .data
923 859 .as_entry()
924 .map_or(false, |entry| entry.state.is_tracked()),
860 .map_or(false, |entry| entry.state().is_tracked()),
925 861 had_entry,
926 862 had_copy_source: node.copy_source.take().is_some(),
927 863 };
@@ -943,112 +879,29 b" impl<'on_disk> super::dispatch::Dirstate"
943 879 }
944 880
945 881 if let Some((dropped, _removed)) = recur(
946 self.on_disk,
947 &mut self.unreachable_bytes,
948 &mut self.root,
882 map.on_disk,
883 &mut map.unreachable_bytes,
884 &mut map.root,
949 885 filename,
950 886 )? {
951 887 if dropped.had_entry {
952 self.nodes_with_entry_count -= 1
888 map.nodes_with_entry_count -= 1
953 889 }
954 890 if dropped.had_copy_source {
955 self.nodes_with_copy_source_count -= 1
891 map.nodes_with_copy_source_count -= 1
956 892 }
957 Ok(dropped.had_entry)
958 893 } else {
959 debug_assert!(!old_state.is_tracked());
960 Ok(false)
961 }
962 }
963
964 fn clear_ambiguous_times(
965 &mut self,
966 filenames: Vec<HgPathBuf>,
967 now: i32,
968 ) -> Result<(), DirstateV2ParseError> {
969 for filename in filenames {
970 if let Some(node) = Self::get_node_mut(
971 self.on_disk,
972 &mut self.unreachable_bytes,
973 &mut self.root,
974 &filename,
975 )? {
976 if let NodeData::Entry(entry) = &mut node.data {
977 entry.clear_ambiguous_mtime(now);
978 }
979 }
894 debug_assert!(!was_tracked);
980 895 }
981 896 Ok(())
982 897 }
983 898
984 fn non_normal_entries_contains(
985 &mut self,
986 key: &HgPath,
987 ) -> Result<bool, DirstateV2ParseError> {
988 Ok(if let Some(node) = self.get_node(key)? {
989 node.entry()?.map_or(false, |entry| entry.is_non_normal())
990 } else {
991 false
992 })
993 }
994
995 fn non_normal_entries_remove(&mut self, key: &HgPath) -> bool {
996 // Do nothing, this `DirstateMap` does not have a separate "non normal
997 // entries" set that need to be kept up to date.
998 if let Ok(Some(v)) = self.get(key) {
999 return v.is_non_normal();
1000 }
1001 false
1002 }
1003
1004 fn non_normal_entries_add(&mut self, _key: &HgPath) {
1005 // Do nothing, this `DirstateMap` does not have a separate "non normal
1006 // entries" set that need to be kept up to date
1007 }
1008
1009 fn non_normal_or_other_parent_paths(
1010 &mut self,
1011 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
1012 {
1013 Box::new(self.filter_full_paths(|entry| {
1014 entry.is_non_normal() || entry.is_from_other_parent()
1015 }))
1016 }
1017
1018 fn set_non_normal_other_parent_entries(&mut self, _force: bool) {
1019 // Do nothing, this `DirstateMap` does not have a separate "non normal
1020 // entries" and "from other parent" sets that need to be recomputed
1021 }
1022
1023 fn iter_non_normal_paths(
1024 &mut self,
1025 ) -> Box<
1026 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
1027 > {
1028 self.iter_non_normal_paths_panic()
1029 }
1030
1031 fn iter_non_normal_paths_panic(
1032 &self,
1033 ) -> Box<
1034 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
1035 > {
1036 Box::new(self.filter_full_paths(|entry| entry.is_non_normal()))
1037 }
1038
1039 fn iter_other_parent_paths(
1040 &mut self,
1041 ) -> Box<
1042 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
1043 > {
1044 Box::new(self.filter_full_paths(|entry| entry.is_from_other_parent()))
1045 }
1046
1047 fn has_tracked_dir(
899 pub fn has_tracked_dir(
1048 900 &mut self,
1049 901 directory: &HgPath,
1050 902 ) -> Result<bool, DirstateError> {
1051 if let Some(node) = self.get_node(directory)? {
903 let map = self.get_map_mut();
904 if let Some(node) = map.get_node(directory)? {
1052 905 // A node without a `DirstateEntry` was created to hold child
1053 906 // nodes, and is therefore a directory.
1054 907 let state = node.state()?;
@@ -1058,8 +911,12 b" impl<'on_disk> super::dispatch::Dirstate"
1058 911 }
1059 912 }
1060 913
1061 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
1062 if let Some(node) = self.get_node(directory)? {
914 pub fn has_dir(
915 &mut self,
916 directory: &HgPath,
917 ) -> Result<bool, DirstateError> {
918 let map = self.get_map_mut();
919 if let Some(node) = map.get_node(directory)? {
1063 920 // A node without a `DirstateEntry` was created to hold child
1064 921 // nodes, and is therefore a directory.
1065 922 let state = node.state()?;
@@ -1070,43 +927,43 b" impl<'on_disk> super::dispatch::Dirstate"
1070 927 }
1071 928
1072 929 #[timed]
1073 fn pack_v1(
930 pub fn pack_v1(
1074 931 &mut self,
1075 932 parents: DirstateParents,
1076 now: Timestamp,
933 now: TruncatedTimestamp,
1077 934 ) -> Result<Vec<u8>, DirstateError> {
1078 let now: i32 = now.0.try_into().expect("time overflow");
935 let map = self.get_map_mut();
1079 936 let mut ambiguous_mtimes = Vec::new();
1080 937 // Optizimation (to be measured?): pre-compute size to avoid `Vec`
1081 938 // reallocations
1082 939 let mut size = parents.as_bytes().len();
1083 for node in self.iter_nodes() {
940 for node in map.iter_nodes() {
1084 941 let node = node?;
1085 942 if let Some(entry) = node.entry()? {
1086 943 size += packed_entry_size(
1087 node.full_path(self.on_disk)?,
1088 node.copy_source(self.on_disk)?,
944 node.full_path(map.on_disk)?,
945 node.copy_source(map.on_disk)?,
1089 946 );
1090 if entry.mtime_is_ambiguous(now) {
947 if entry.need_delay(now) {
1091 948 ambiguous_mtimes.push(
1092 node.full_path_borrowed(self.on_disk)?
949 node.full_path_borrowed(map.on_disk)?
1093 950 .detach_from_tree(),
1094 951 )
1095 952 }
1096 953 }
1097 954 }
1098 self.clear_known_ambiguous_mtimes(&ambiguous_mtimes)?;
955 map.clear_known_ambiguous_mtimes(&ambiguous_mtimes)?;
1099 956
1100 957 let mut packed = Vec::with_capacity(size);
1101 958 packed.extend(parents.as_bytes());
1102 959
1103 for node in self.iter_nodes() {
960 for node in map.iter_nodes() {
1104 961 let node = node?;
1105 962 if let Some(entry) = node.entry()? {
1106 963 pack_entry(
1107 node.full_path(self.on_disk)?,
964 node.full_path(map.on_disk)?,
1108 965 &entry,
1109 node.copy_source(self.on_disk)?,
966 node.copy_source(map.on_disk)?,
1110 967 &mut packed,
1111 968 );
1112 969 }
@@ -1116,23 +973,22 b" impl<'on_disk> super::dispatch::Dirstate"
1116 973
1117 974 /// Returns new data and metadata together with whether that data should be
1118 975 /// appended to the existing data file whose content is at
1119 /// `self.on_disk` (true), instead of written to a new data file
976 /// `map.on_disk` (true), instead of written to a new data file
1120 977 /// (false).
1121 978 #[timed]
1122 fn pack_v2(
979 pub fn pack_v2(
1123 980 &mut self,
1124 now: Timestamp,
981 now: TruncatedTimestamp,
1125 982 can_append: bool,
1126 983 ) -> Result<(Vec<u8>, Vec<u8>, bool), DirstateError> {
1127 // TODO:Β how do we want to handle this in 2038?
1128 let now: i32 = now.0.try_into().expect("time overflow");
984 let map = self.get_map_mut();
1129 985 let mut paths = Vec::new();
1130 for node in self.iter_nodes() {
986 for node in map.iter_nodes() {
1131 987 let node = node?;
1132 988 if let Some(entry) = node.entry()? {
1133 if entry.mtime_is_ambiguous(now) {
989 if entry.need_delay(now) {
1134 990 paths.push(
1135 node.full_path_borrowed(self.on_disk)?
991 node.full_path_borrowed(map.on_disk)?
1136 992 .detach_from_tree(),
1137 993 )
1138 994 }
@@ -1140,12 +996,12 b" impl<'on_disk> super::dispatch::Dirstate"
1140 996 }
1141 997 // Borrow of `self` ends here since we collect cloned paths
1142 998
1143 self.clear_known_ambiguous_mtimes(&paths)?;
999 map.clear_known_ambiguous_mtimes(&paths)?;
1144 1000
1145 on_disk::write(self, can_append)
1001 on_disk::write(map, can_append)
1146 1002 }
1147 1003
1148 fn status<'a>(
1004 pub fn status<'a>(
1149 1005 &'a mut self,
1150 1006 matcher: &'a (dyn Matcher + Sync),
1151 1007 root_dir: PathBuf,
@@ -1153,119 +1009,129 b" impl<'on_disk> super::dispatch::Dirstate"
1153 1009 options: StatusOptions,
1154 1010 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
1155 1011 {
1156 super::status::status(self, matcher, root_dir, ignore_files, options)
1012 let map = self.get_map_mut();
1013 super::status::status(map, matcher, root_dir, ignore_files, options)
1157 1014 }
1158 1015
1159 fn copy_map_len(&self) -> usize {
1160 self.nodes_with_copy_source_count as usize
1016 pub fn copy_map_len(&self) -> usize {
1017 let map = self.get_map();
1018 map.nodes_with_copy_source_count as usize
1161 1019 }
1162 1020
1163 fn copy_map_iter(&self) -> CopyMapIter<'_> {
1164 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1165 Ok(if let Some(source) = node.copy_source(self.on_disk)? {
1166 Some((node.full_path(self.on_disk)?, source))
1021 pub fn copy_map_iter(&self) -> CopyMapIter<'_> {
1022 let map = self.get_map();
1023 Box::new(filter_map_results(map.iter_nodes(), move |node| {
1024 Ok(if let Some(source) = node.copy_source(map.on_disk)? {
1025 Some((node.full_path(map.on_disk)?, source))
1167 1026 } else {
1168 1027 None
1169 1028 })
1170 1029 }))
1171 1030 }
1172 1031
1173 fn copy_map_contains_key(
1032 pub fn copy_map_contains_key(
1174 1033 &self,
1175 1034 key: &HgPath,
1176 1035 ) -> Result<bool, DirstateV2ParseError> {
1177 Ok(if let Some(node) = self.get_node(key)? {
1036 let map = self.get_map();
1037 Ok(if let Some(node) = map.get_node(key)? {
1178 1038 node.has_copy_source()
1179 1039 } else {
1180 1040 false
1181 1041 })
1182 1042 }
1183 1043
1184 fn copy_map_get(
1044 pub fn copy_map_get(
1185 1045 &self,
1186 1046 key: &HgPath,
1187 1047 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
1188 if let Some(node) = self.get_node(key)? {
1189 if let Some(source) = node.copy_source(self.on_disk)? {
1048 let map = self.get_map();
1049 if let Some(node) = map.get_node(key)? {
1050 if let Some(source) = node.copy_source(map.on_disk)? {
1190 1051 return Ok(Some(source));
1191 1052 }
1192 1053 }
1193 1054 Ok(None)
1194 1055 }
1195 1056
1196 fn copy_map_remove(
1057 pub fn copy_map_remove(
1197 1058 &mut self,
1198 1059 key: &HgPath,
1199 1060 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1200 let count = &mut self.nodes_with_copy_source_count;
1201 let unreachable_bytes = &mut self.unreachable_bytes;
1202 Ok(Self::get_node_mut(
1203 self.on_disk,
1061 let map = self.get_map_mut();
1062 let count = &mut map.nodes_with_copy_source_count;
1063 let unreachable_bytes = &mut map.unreachable_bytes;
1064 Ok(DirstateMap::get_node_mut(
1065 map.on_disk,
1204 1066 unreachable_bytes,
1205 &mut self.root,
1067 &mut map.root,
1206 1068 key,
1207 1069 )?
1208 1070 .and_then(|node| {
1209 1071 if let Some(source) = &node.copy_source {
1210 1072 *count -= 1;
1211 Self::count_dropped_path(unreachable_bytes, source);
1073 DirstateMap::count_dropped_path(unreachable_bytes, source);
1212 1074 }
1213 1075 node.copy_source.take().map(Cow::into_owned)
1214 1076 }))
1215 1077 }
1216 1078
1217 fn copy_map_insert(
1079 pub fn copy_map_insert(
1218 1080 &mut self,
1219 1081 key: HgPathBuf,
1220 1082 value: HgPathBuf,
1221 1083 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1222 let node = Self::get_or_insert_node(
1223 self.on_disk,
1224 &mut self.unreachable_bytes,
1225 &mut self.root,
1084 let map = self.get_map_mut();
1085 let node = DirstateMap::get_or_insert_node(
1086 map.on_disk,
1087 &mut map.unreachable_bytes,
1088 &mut map.root,
1226 1089 &key,
1227 1090 WithBasename::to_cow_owned,
1228 1091 |_ancestor| {},
1229 1092 )?;
1230 1093 if node.copy_source.is_none() {
1231 self.nodes_with_copy_source_count += 1
1094 map.nodes_with_copy_source_count += 1
1232 1095 }
1233 1096 Ok(node.copy_source.replace(value.into()).map(Cow::into_owned))
1234 1097 }
1235 1098
1236 fn len(&self) -> usize {
1237 self.nodes_with_entry_count as usize
1099 pub fn len(&self) -> usize {
1100 let map = self.get_map();
1101 map.nodes_with_entry_count as usize
1238 1102 }
1239 1103
1240 fn contains_key(
1104 pub fn contains_key(
1241 1105 &self,
1242 1106 key: &HgPath,
1243 1107 ) -> Result<bool, DirstateV2ParseError> {
1244 1108 Ok(self.get(key)?.is_some())
1245 1109 }
1246 1110
1247 fn get(
1111 pub fn get(
1248 1112 &self,
1249 1113 key: &HgPath,
1250 1114 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
1251 Ok(if let Some(node) = self.get_node(key)? {
1115 let map = self.get_map();
1116 Ok(if let Some(node) = map.get_node(key)? {
1252 1117 node.entry()?
1253 1118 } else {
1254 1119 None
1255 1120 })
1256 1121 }
1257 1122
1258 fn iter(&self) -> StateMapIter<'_> {
1259 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1123 pub fn iter(&self) -> StateMapIter<'_> {
1124 let map = self.get_map();
1125 Box::new(filter_map_results(map.iter_nodes(), move |node| {
1260 1126 Ok(if let Some(entry) = node.entry()? {
1261 Some((node.full_path(self.on_disk)?, entry))
1127 Some((node.full_path(map.on_disk)?, entry))
1262 1128 } else {
1263 1129 None
1264 1130 })
1265 1131 }))
1266 1132 }
1267 1133
1268 fn iter_tracked_dirs(
1134 pub fn iter_tracked_dirs(
1269 1135 &mut self,
1270 1136 ) -> Result<
1271 1137 Box<
@@ -1275,9 +1141,10 b" impl<'on_disk> super::dispatch::Dirstate"
1275 1141 >,
1276 1142 DirstateError,
1277 1143 > {
1278 let on_disk = self.on_disk;
1144 let map = self.get_map_mut();
1145 let on_disk = map.on_disk;
1279 1146 Ok(Box::new(filter_map_results(
1280 self.iter_nodes(),
1147 map.iter_nodes(),
1281 1148 move |node| {
1282 1149 Ok(if node.tracked_descendants_count() > 0 {
1283 1150 Some(node.full_path(on_disk)?)
@@ -1288,8 +1155,9 b" impl<'on_disk> super::dispatch::Dirstate"
1288 1155 )))
1289 1156 }
1290 1157
1291 fn debug_iter(
1158 pub fn debug_iter(
1292 1159 &self,
1160 all: bool,
1293 1161 ) -> Box<
1294 1162 dyn Iterator<
1295 1163 Item = Result<
@@ -1299,16 +1167,18 b" impl<'on_disk> super::dispatch::Dirstate"
1299 1167 > + Send
1300 1168 + '_,
1301 1169 > {
1302 Box::new(self.iter_nodes().map(move |node| {
1303 let node = node?;
1170 let map = self.get_map();
1171 Box::new(filter_map_results(map.iter_nodes(), move |node| {
1304 1172 let debug_tuple = if let Some(entry) = node.entry()? {
1305 1173 entry.debug_tuple()
1306 } else if let Some(mtime) = node.cached_directory_mtime() {
1307 (b' ', 0, -1, mtime.seconds() as i32)
1174 } else if !all {
1175 return Ok(None);
1176 } else if let Some(mtime) = node.cached_directory_mtime()? {
1177 (b' ', 0, -1, mtime.truncated_seconds() as i32)
1308 1178 } else {
1309 1179 (b' ', 0, -1, -1)
1310 1180 };
1311 Ok((node.full_path(self.on_disk)?, debug_tuple))
1181 Ok(Some((node.full_path(map.on_disk)?, debug_tuple)))
1312 1182 }))
1313 1183 }
1314 1184 }
@@ -1,23 +1,8 b''
1 1 //! The "version 2" disk representation of the dirstate
2 2 //!
3 //! # File format
4 //!
5 //! In dirstate-v2 format, the `.hg/dirstate` file is a "docket that starts
6 //! with a fixed-sized header whose layout is defined by the `DocketHeader`
7 //! struct, followed by the data file identifier.
8 //!
9 //! A separate `.hg/dirstate.{uuid}.d` file contains most of the data. That
10 //! file may be longer than the size given in the docket, but not shorter. Only
11 //! the start of the data file up to the given size is considered. The
12 //! fixed-size "root" of the dirstate tree whose layout is defined by the
13 //! `Root` struct is found at the end of that slice of data.
14 //!
15 //! Its `root_nodes` field contains the slice (offset and length) to
16 //! the nodes representing the files and directories at the root of the
17 //! repository. Each node is also fixed-size, defined by the `Node` struct.
18 //! Nodes in turn contain slices to variable-size paths, and to their own child
19 //! nodes (if any) for nested files and directories.
3 //! See `mercurial/helptext/internals/dirstate-v2.txt`
20 4
5 use crate::dirstate::TruncatedTimestamp;
21 6 use crate::dirstate_tree::dirstate_map::{self, DirstateMap, NodeRef};
22 7 use crate::dirstate_tree::path_with_basename::WithBasename;
23 8 use crate::errors::HgError;
@@ -25,13 +10,12 b' use crate::utils::hg_path::HgPath;'
25 10 use crate::DirstateEntry;
26 11 use crate::DirstateError;
27 12 use crate::DirstateParents;
28 use crate::EntryState;
29 use bytes_cast::unaligned::{I32Be, I64Be, U16Be, U32Be};
13 use bitflags::bitflags;
14 use bytes_cast::unaligned::{U16Be, U32Be};
30 15 use bytes_cast::BytesCast;
31 16 use format_bytes::format_bytes;
32 17 use std::borrow::Cow;
33 18 use std::convert::{TryFrom, TryInto};
34 use std::time::{Duration, SystemTime, UNIX_EPOCH};
35 19
36 20 /// Added at the start of `.hg/dirstate` when the "v2" format is used.
37 21 /// This a redundant sanity check more than an actual "magic number" since
@@ -47,16 +31,16 b' const USED_NODE_ID_BYTES: usize = 20;'
47 31 pub(super) const IGNORE_PATTERNS_HASH_LEN: usize = 20;
48 32 pub(super) type IgnorePatternsHash = [u8; IGNORE_PATTERNS_HASH_LEN];
49 33
50 /// Must match the constant of the same name in
51 /// `mercurial/dirstateutils/docket.py`
34 /// Must match constants of the same names in `mercurial/dirstateutils/v2.py`
52 35 const TREE_METADATA_SIZE: usize = 44;
36 const NODE_SIZE: usize = 44;
53 37
54 38 /// Make sure that size-affecting changes are made knowingly
55 39 #[allow(unused)]
56 40 fn static_assert_size_of() {
57 41 let _ = std::mem::transmute::<TreeMetadata, [u8; TREE_METADATA_SIZE]>;
58 42 let _ = std::mem::transmute::<DocketHeader, [u8; TREE_METADATA_SIZE + 81]>;
59 let _ = std::mem::transmute::<Node, [u8; 43]>;
43 let _ = std::mem::transmute::<Node, [u8; NODE_SIZE]>;
60 44 }
61 45
62 46 // Must match `HEADER` in `mercurial/dirstateutils/docket.py`
@@ -67,11 +51,11 b' struct DocketHeader {'
67 51 parent_1: [u8; STORED_NODE_ID_BYTES],
68 52 parent_2: [u8; STORED_NODE_ID_BYTES],
69 53
54 metadata: TreeMetadata,
55
70 56 /// Counted in bytes
71 57 data_size: Size,
72 58
73 metadata: TreeMetadata,
74
75 59 uuid_size: u8,
76 60 }
77 61
@@ -80,44 +64,24 b" pub struct Docket<'on_disk> {"
80 64 uuid: &'on_disk [u8],
81 65 }
82 66
67 /// Fields are documented in the *Tree metadata in the docket file*
68 /// section of `mercurial/helptext/internals/dirstate-v2.txt`
83 69 #[derive(BytesCast)]
84 70 #[repr(C)]
85 71 struct TreeMetadata {
86 72 root_nodes: ChildNodes,
87 73 nodes_with_entry_count: Size,
88 74 nodes_with_copy_source_count: Size,
89
90 /// How many bytes of this data file are not used anymore
91 75 unreachable_bytes: Size,
92
93 /// Current version always sets these bytes to zero when creating or
94 /// updating a dirstate. Future versions could assign some bits to signal
95 /// for example "the version that last wrote/updated this dirstate did so
96 /// in such and such way that can be relied on by versions that know to."
97 76 unused: [u8; 4],
98 77
99 /// If non-zero, a hash of ignore files that were used for some previous
100 /// run of the `status` algorithm.
101 ///
102 /// We define:
103 ///
104 /// * "Root" ignore files are `.hgignore` at the root of the repository if
105 /// it exists, and files from `ui.ignore.*` config. This set of files is
106 /// then sorted by the string representation of their path.
107 /// * The "expanded contents" of an ignore files is the byte string made
108 /// by concatenating its contents with the "expanded contents" of other
109 /// files included with `include:` or `subinclude:` files, in inclusion
110 /// order. This definition is recursive, as included files can
111 /// themselves include more files.
112 ///
113 /// This hash is defined as the SHA-1 of the concatenation (in sorted
114 /// order) of the "expanded contents" of each "root" ignore file.
115 /// (Note that computing this does not require actually concatenating byte
116 /// strings into contiguous memory, instead SHA-1 hashing can be done
117 /// incrementally.)
78 /// See *Optional hash of ignore patterns* section of
79 /// `mercurial/helptext/internals/dirstate-v2.txt`
118 80 ignore_patterns_hash: IgnorePatternsHash,
119 81 }
120 82
83 /// Fields are documented in the *The data file format*
84 /// section of `mercurial/helptext/internals/dirstate-v2.txt`
121 85 #[derive(BytesCast)]
122 86 #[repr(C)]
123 87 pub(super) struct Node {
@@ -130,59 +94,38 b' pub(super) struct Node {'
130 94 children: ChildNodes,
131 95 pub(super) descendants_with_entry_count: Size,
132 96 pub(super) tracked_descendants_count: Size,
133
134 /// Depending on the value of `state`:
135 ///
136 /// * A null byte: `data` is not used.
137 ///
138 /// * A `n`, `a`, `r`, or `m` ASCII byte: `state` and `data` together
139 /// represent a dirstate entry like in the v1 format.
140 ///
141 /// * A `d` ASCII byte: the bytes of `data` should instead be interpreted
142 /// as the `Timestamp` for the mtime of a cached directory.
143 ///
144 /// The presence of this state means that at some point, this path in
145 /// the working directory was observed:
146 ///
147 /// - To be a directory
148 /// - With the modification time as given by `Timestamp`
149 /// - That timestamp was already strictly in the past when observed,
150 /// meaning that later changes cannot happen in the same clock tick
151 /// and must cause a different modification time (unless the system
152 /// clock jumps back and we get unlucky, which is not impossible but
153 /// but deemed unlikely enough).
154 /// - All direct children of this directory (as returned by
155 /// `std::fs::read_dir`) either have a corresponding dirstate node, or
156 /// are ignored by ignore patterns whose hash is in
157 /// `TreeMetadata::ignore_patterns_hash`.
158 ///
159 /// This means that if `std::fs::symlink_metadata` later reports the
160 /// same modification time and ignored patterns haven’t changed, a run
161 /// of status that is not listing ignored files can skip calling
162 /// `std::fs::read_dir` again for this directory, iterate child
163 /// dirstate nodes instead.
164 state: u8,
165 data: Entry,
97 flags: U16Be,
98 size: U32Be,
99 mtime: PackedTruncatedTimestamp,
166 100 }
167 101
168 #[derive(BytesCast, Copy, Clone)]
169 #[repr(C)]
170 struct Entry {
171 mode: I32Be,
172 mtime: I32Be,
173 size: I32Be,
102 bitflags! {
103 #[repr(C)]
104 struct Flags: u16 {
105 const WDIR_TRACKED = 1 << 0;
106 const P1_TRACKED = 1 << 1;
107 const P2_INFO = 1 << 2;
108 const MODE_EXEC_PERM = 1 << 3;
109 const MODE_IS_SYMLINK = 1 << 4;
110 const HAS_FALLBACK_EXEC = 1 << 5;
111 const FALLBACK_EXEC = 1 << 6;
112 const HAS_FALLBACK_SYMLINK = 1 << 7;
113 const FALLBACK_SYMLINK = 1 << 8;
114 const EXPECTED_STATE_IS_MODIFIED = 1 << 9;
115 const HAS_MODE_AND_SIZE = 1 <<10;
116 const HAS_MTIME = 1 <<11;
117 const MTIME_SECOND_AMBIGUOUS = 1 << 12;
118 const DIRECTORY = 1 <<13;
119 const ALL_UNKNOWN_RECORDED = 1 <<14;
120 const ALL_IGNORED_RECORDED = 1 <<15;
121 }
174 122 }
175 123
176 124 /// Duration since the Unix epoch
177 #[derive(BytesCast, Copy, Clone, PartialEq)]
125 #[derive(BytesCast, Copy, Clone)]
178 126 #[repr(C)]
179 pub(super) struct Timestamp {
180 seconds: I64Be,
181
182 /// In `0 .. 1_000_000_000`.
183 ///
184 /// This timestamp is later or earlier than `(seconds, 0)` by this many
185 /// nanoseconds, if `seconds` is non-negative or negative, respectively.
127 struct PackedTruncatedTimestamp {
128 truncated_seconds: U32Be,
186 129 nanoseconds: U32Be,
187 130 }
188 131
@@ -265,7 +208,7 b" impl<'on_disk> Docket<'on_disk> {"
265 208 }
266 209
267 210 pub fn data_filename(&self) -> String {
268 String::from_utf8(format_bytes!(b"dirstate.{}.d", self.uuid)).unwrap()
211 String::from_utf8(format_bytes!(b"dirstate.{}", self.uuid)).unwrap()
269 212 }
270 213 }
271 214
@@ -361,62 +304,112 b' impl Node {'
361 304 })
362 305 }
363 306
307 fn flags(&self) -> Flags {
308 Flags::from_bits_truncate(self.flags.get())
309 }
310
311 fn has_entry(&self) -> bool {
312 self.flags().intersects(
313 Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO,
314 )
315 }
316
364 317 pub(super) fn node_data(
365 318 &self,
366 319 ) -> Result<dirstate_map::NodeData, DirstateV2ParseError> {
367 let entry = |state| {
368 dirstate_map::NodeData::Entry(self.entry_with_given_state(state))
369 };
320 if self.has_entry() {
321 Ok(dirstate_map::NodeData::Entry(self.assume_entry()?))
322 } else if let Some(mtime) = self.cached_directory_mtime()? {
323 Ok(dirstate_map::NodeData::CachedDirectory { mtime })
324 } else {
325 Ok(dirstate_map::NodeData::None)
326 }
327 }
370 328
371 match self.state {
372 b'\0' => Ok(dirstate_map::NodeData::None),
373 b'd' => Ok(dirstate_map::NodeData::CachedDirectory {
374 mtime: *self.data.as_timestamp(),
375 }),
376 b'n' => Ok(entry(EntryState::Normal)),
377 b'a' => Ok(entry(EntryState::Added)),
378 b'r' => Ok(entry(EntryState::Removed)),
379 b'm' => Ok(entry(EntryState::Merged)),
380 _ => Err(DirstateV2ParseError),
329 pub(super) fn cached_directory_mtime(
330 &self,
331 ) -> Result<Option<TruncatedTimestamp>, DirstateV2ParseError> {
332 // For now we do not have code to handle the absence of
333 // ALL_UNKNOWN_RECORDED, so we ignore the mtime if the flag is
334 // unset.
335 if self.flags().contains(Flags::DIRECTORY)
336 && self.flags().contains(Flags::HAS_MTIME)
337 && self.flags().contains(Flags::ALL_UNKNOWN_RECORDED)
338 {
339 Ok(Some(self.mtime.try_into()?))
340 } else {
341 Ok(None)
381 342 }
382 343 }
383 344
384 pub(super) fn cached_directory_mtime(&self) -> Option<&Timestamp> {
385 if self.state == b'd' {
386 Some(self.data.as_timestamp())
345 fn synthesize_unix_mode(&self) -> u32 {
346 let file_type = if self.flags().contains(Flags::MODE_IS_SYMLINK) {
347 libc::S_IFLNK
387 348 } else {
388 None
389 }
349 libc::S_IFREG
350 };
351 let permisions = if self.flags().contains(Flags::MODE_EXEC_PERM) {
352 0o755
353 } else {
354 0o644
355 };
356 file_type | permisions
390 357 }
391 358
392 pub(super) fn state(
393 &self,
394 ) -> Result<Option<EntryState>, DirstateV2ParseError> {
395 match self.state {
396 b'\0' | b'd' => Ok(None),
397 b'n' => Ok(Some(EntryState::Normal)),
398 b'a' => Ok(Some(EntryState::Added)),
399 b'r' => Ok(Some(EntryState::Removed)),
400 b'm' => Ok(Some(EntryState::Merged)),
401 _ => Err(DirstateV2ParseError),
402 }
403 }
404
405 fn entry_with_given_state(&self, state: EntryState) -> DirstateEntry {
406 DirstateEntry {
407 state,
408 mode: self.data.mode.get(),
409 mtime: self.data.mtime.get(),
410 size: self.data.size.get(),
411 }
359 fn assume_entry(&self) -> Result<DirstateEntry, DirstateV2ParseError> {
360 // TODO: convert through raw bits instead?
361 let wdir_tracked = self.flags().contains(Flags::WDIR_TRACKED);
362 let p1_tracked = self.flags().contains(Flags::P1_TRACKED);
363 let p2_info = self.flags().contains(Flags::P2_INFO);
364 let mode_size = if self.flags().contains(Flags::HAS_MODE_AND_SIZE)
365 && !self.flags().contains(Flags::EXPECTED_STATE_IS_MODIFIED)
366 {
367 Some((self.synthesize_unix_mode(), self.size.into()))
368 } else {
369 None
370 };
371 let mtime = if self.flags().contains(Flags::HAS_MTIME)
372 && !self.flags().contains(Flags::DIRECTORY)
373 && !self.flags().contains(Flags::EXPECTED_STATE_IS_MODIFIED)
374 // The current code is not able to do the more subtle comparison that the
375 // MTIME_SECOND_AMBIGUOUS requires. So we ignore the mtime
376 && !self.flags().contains(Flags::MTIME_SECOND_AMBIGUOUS)
377 {
378 Some(self.mtime.try_into()?)
379 } else {
380 None
381 };
382 let fallback_exec = if self.flags().contains(Flags::HAS_FALLBACK_EXEC)
383 {
384 Some(self.flags().contains(Flags::FALLBACK_EXEC))
385 } else {
386 None
387 };
388 let fallback_symlink =
389 if self.flags().contains(Flags::HAS_FALLBACK_SYMLINK) {
390 Some(self.flags().contains(Flags::FALLBACK_SYMLINK))
391 } else {
392 None
393 };
394 Ok(DirstateEntry::from_v2_data(
395 wdir_tracked,
396 p1_tracked,
397 p2_info,
398 mode_size,
399 mtime,
400 fallback_exec,
401 fallback_symlink,
402 ))
412 403 }
413 404
414 405 pub(super) fn entry(
415 406 &self,
416 407 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
417 Ok(self
418 .state()?
419 .map(|state| self.entry_with_given_state(state)))
408 if self.has_entry() {
409 Ok(Some(self.assume_entry()?))
410 } else {
411 Ok(None)
412 }
420 413 }
421 414
422 415 pub(super) fn children<'on_disk>(
@@ -442,57 +435,53 b' impl Node {'
442 435 tracked_descendants_count: self.tracked_descendants_count.get(),
443 436 })
444 437 }
445 }
446 438
447 impl Entry {
448 fn from_timestamp(timestamp: Timestamp) -> Self {
449 // Safety: both types implement the `ByteCast` trait, so we could
450 // safely use `as_bytes` and `from_bytes` to do this conversion. Using
451 // `transmute` instead makes the compiler check that the two types
452 // have the same size, which eliminates the error case of
453 // `from_bytes`.
454 unsafe { std::mem::transmute::<Timestamp, Entry>(timestamp) }
455 }
456
457 fn as_timestamp(&self) -> &Timestamp {
458 // Safety: same as above in `from_timestamp`
459 unsafe { &*(self as *const Entry as *const Timestamp) }
460 }
461 }
462
463 impl Timestamp {
464 pub fn seconds(&self) -> i64 {
465 self.seconds.get()
466 }
467 }
468
469 impl From<SystemTime> for Timestamp {
470 fn from(system_time: SystemTime) -> Self {
471 let (secs, nanos) = match system_time.duration_since(UNIX_EPOCH) {
472 Ok(duration) => {
473 (duration.as_secs() as i64, duration.subsec_nanos())
439 fn from_dirstate_entry(
440 entry: &DirstateEntry,
441 ) -> (Flags, U32Be, PackedTruncatedTimestamp) {
442 let (
443 wdir_tracked,
444 p1_tracked,
445 p2_info,
446 mode_size_opt,
447 mtime_opt,
448 fallback_exec,
449 fallback_symlink,
450 ) = entry.v2_data();
451 // TODO: convert throug raw flag bits instead?
452 let mut flags = Flags::empty();
453 flags.set(Flags::WDIR_TRACKED, wdir_tracked);
454 flags.set(Flags::P1_TRACKED, p1_tracked);
455 flags.set(Flags::P2_INFO, p2_info);
456 let size = if let Some((m, s)) = mode_size_opt {
457 let exec_perm = m & libc::S_IXUSR != 0;
458 let is_symlink = m & libc::S_IFMT == libc::S_IFLNK;
459 flags.set(Flags::MODE_EXEC_PERM, exec_perm);
460 flags.set(Flags::MODE_IS_SYMLINK, is_symlink);
461 flags.insert(Flags::HAS_MODE_AND_SIZE);
462 s.into()
463 } else {
464 0.into()
465 };
466 let mtime = if let Some(m) = mtime_opt {
467 flags.insert(Flags::HAS_MTIME);
468 m.into()
469 } else {
470 PackedTruncatedTimestamp::null()
471 };
472 if let Some(f_exec) = fallback_exec {
473 flags.insert(Flags::HAS_FALLBACK_EXEC);
474 if f_exec {
475 flags.insert(Flags::FALLBACK_EXEC);
474 476 }
475 Err(error) => {
476 let negative = error.duration();
477 (-(negative.as_secs() as i64), negative.subsec_nanos())
478 }
479 };
480 Timestamp {
481 seconds: secs.into(),
482 nanoseconds: nanos.into(),
483 477 }
484 }
485 }
486
487 impl From<&'_ Timestamp> for SystemTime {
488 fn from(timestamp: &'_ Timestamp) -> Self {
489 let secs = timestamp.seconds.get();
490 let nanos = timestamp.nanoseconds.get();
491 if secs >= 0 {
492 UNIX_EPOCH + Duration::new(secs as u64, nanos)
493 } else {
494 UNIX_EPOCH - Duration::new((-secs) as u64, nanos)
478 if let Some(f_symlink) = fallback_symlink {
479 flags.insert(Flags::HAS_FALLBACK_SYMLINK);
480 if f_symlink {
481 flags.insert(Flags::FALLBACK_SYMLINK);
482 }
495 483 }
484 (flags, size, mtime)
496 485 }
497 486 }
498 487
@@ -543,8 +532,8 b" pub(crate) fn for_each_tracked_path<'on_"
543 532 f: &mut impl FnMut(&'on_disk HgPath),
544 533 ) -> Result<(), DirstateV2ParseError> {
545 534 for node in read_nodes(on_disk, nodes)? {
546 if let Some(state) = node.state()? {
547 if state.is_tracked() {
535 if let Some(entry) = node.entry()? {
536 if entry.state().is_tracked() {
548 537 f(node.full_path(on_disk)?)
549 538 }
550 539 }
@@ -638,25 +627,31 b" impl Writer<'_, '_> {"
638 627 };
639 628 on_disk_nodes.push(match node {
640 629 NodeRef::InMemory(path, node) => {
641 let (state, data) = match &node.data {
642 dirstate_map::NodeData::Entry(entry) => (
643 entry.state.into(),
644 Entry {
645 mode: entry.mode.into(),
646 mtime: entry.mtime.into(),
647 size: entry.size.into(),
648 },
630 let (flags, size, mtime) = match &node.data {
631 dirstate_map::NodeData::Entry(entry) => {
632 Node::from_dirstate_entry(entry)
633 }
634 dirstate_map::NodeData::CachedDirectory { mtime } => (
635 // we currently never set a mtime if unknown file
636 // are present.
637 // So if we have a mtime for a directory, we know
638 // they are no unknown
639 // files and we
640 // blindly set ALL_UNKNOWN_RECORDED.
641 //
642 // We never set ALL_IGNORED_RECORDED since we
643 // don't track that case
644 // currently.
645 Flags::DIRECTORY
646 | Flags::HAS_MTIME
647 | Flags::ALL_UNKNOWN_RECORDED,
648 0.into(),
649 (*mtime).into(),
649 650 ),
650 dirstate_map::NodeData::CachedDirectory { mtime } => {
651 (b'd', Entry::from_timestamp(*mtime))
652 }
653 651 dirstate_map::NodeData::None => (
654 b'\0',
655 Entry {
656 mode: 0.into(),
657 mtime: 0.into(),
658 size: 0.into(),
659 },
652 Flags::DIRECTORY,
653 0.into(),
654 PackedTruncatedTimestamp::null(),
660 655 ),
661 656 };
662 657 Node {
@@ -673,8 +668,9 b" impl Writer<'_, '_> {"
673 668 tracked_descendants_count: node
674 669 .tracked_descendants_count
675 670 .into(),
676 state,
677 data,
671 flags: flags.bits().into(),
672 size,
673 mtime,
678 674 }
679 675 }
680 676 NodeRef::OnDisk(node) => Node {
@@ -758,3 +754,33 b' fn path_len_from_usize(x: usize) -> Path'
758 754 .expect("dirstate-v2 path length overflow")
759 755 .into()
760 756 }
757
758 impl From<TruncatedTimestamp> for PackedTruncatedTimestamp {
759 fn from(timestamp: TruncatedTimestamp) -> Self {
760 Self {
761 truncated_seconds: timestamp.truncated_seconds().into(),
762 nanoseconds: timestamp.nanoseconds().into(),
763 }
764 }
765 }
766
767 impl TryFrom<PackedTruncatedTimestamp> for TruncatedTimestamp {
768 type Error = DirstateV2ParseError;
769
770 fn try_from(
771 timestamp: PackedTruncatedTimestamp,
772 ) -> Result<Self, Self::Error> {
773 Self::from_already_truncated(
774 timestamp.truncated_seconds.get(),
775 timestamp.nanoseconds.get(),
776 )
777 }
778 }
779 impl PackedTruncatedTimestamp {
780 fn null() -> Self {
781 Self {
782 truncated_seconds: 0.into(),
783 nanoseconds: 0.into(),
784 }
785 }
786 }
@@ -1,11 +1,9 b''
1 use cpython::PyBytes;
2 use cpython::Python;
3 use hg::dirstate_tree::dirstate_map::DirstateMap;
4 use hg::DirstateError;
5 use hg::DirstateParents;
1 use super::dirstate_map::DirstateMap;
2 use stable_deref_trait::StableDeref;
3 use std::ops::Deref;
6 4
7 5 /// Keep a `DirstateMap<'on_disk>` next to the `on_disk` buffer that it
8 /// borrows. This is similar to the owning-ref crate.
6 /// borrows.
9 7 ///
10 8 /// This is similar to [`OwningRef`] which is more limited because it
11 9 /// represents exactly one `&T` reference next to the value it borrows, as
@@ -13,11 +11,11 b' use hg::DirstateParents;'
13 11 /// arbitrarily-nested data structures.
14 12 ///
15 13 /// [`OwningRef`]: https://docs.rs/owning_ref/0.4.1/owning_ref/struct.OwningRef.html
16 pub(super) struct OwningDirstateMap {
14 pub struct OwningDirstateMap {
17 15 /// Owned handle to a bytes buffer with a stable address.
18 16 ///
19 17 /// See <https://docs.rs/owning_ref/0.4.1/owning_ref/trait.StableAddress.html>.
20 on_disk: PyBytes,
18 on_disk: Box<dyn Deref<Target = [u8]> + Send>,
21 19
22 20 /// Pointer for `Box<DirstateMap<'on_disk>>`, typed-erased because the
23 21 /// language cannot represent a lifetime referencing a sibling field.
@@ -28,12 +26,13 b' pub(super) struct OwningDirstateMap {'
28 26 }
29 27
30 28 impl OwningDirstateMap {
31 pub fn new_v1(
32 py: Python,
33 on_disk: PyBytes,
34 ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
35 let bytes: &'_ [u8] = on_disk.data(py);
36 let (map, parents) = DirstateMap::new_v1(bytes)?;
29 pub fn new_empty<OnDisk>(on_disk: OnDisk) -> Self
30 where
31 OnDisk: Deref<Target = [u8]> + StableDeref + Send + 'static,
32 {
33 let on_disk = Box::new(on_disk);
34 let bytes: &'_ [u8] = &on_disk;
35 let map = DirstateMap::empty(bytes);
37 36
38 37 // Like in `bytes` above, this `'_` lifetime parameter borrows from
39 38 // the bytes buffer owned by `on_disk`.
@@ -42,30 +41,12 b' impl OwningDirstateMap {'
42 41 // Erase the pointed type entirely in order to erase the lifetime.
43 42 let ptr: *mut () = ptr.cast();
44 43
45 Ok((Self { on_disk, ptr }, parents))
44 Self { on_disk, ptr }
46 45 }
47 46
48 pub fn new_v2(
49 py: Python,
50 on_disk: PyBytes,
51 data_size: usize,
52 tree_metadata: PyBytes,
53 ) -> Result<Self, DirstateError> {
54 let bytes: &'_ [u8] = on_disk.data(py);
55 let map =
56 DirstateMap::new_v2(bytes, data_size, tree_metadata.data(py))?;
57
58 // Like in `bytes` above, this `'_` lifetime parameter borrows from
59 // the bytes buffer owned by `on_disk`.
60 let ptr: *mut DirstateMap<'_> = Box::into_raw(Box::new(map));
61
62 // Erase the pointed type entirely in order to erase the lifetime.
63 let ptr: *mut () = ptr.cast();
64
65 Ok(Self { on_disk, ptr })
66 }
67
68 pub fn get_mut<'a>(&'a mut self) -> &'a mut DirstateMap<'a> {
47 pub fn get_pair_mut<'a>(
48 &'a mut self,
49 ) -> (&'a [u8], &'a mut DirstateMap<'a>) {
69 50 // SAFETY: We cast the type-erased pointer back to the same type it had
70 51 // in `new`, except with a different lifetime parameter. This time we
71 52 // connect the lifetime to that of `self`. This cast is valid because
@@ -76,14 +57,22 b' impl OwningDirstateMap {'
76 57 // SAFETY: we dereference that pointer, connecting the lifetime of the
77 58 // new `&mut` to that of `self`. This is valid because the
78 59 // raw pointer is to a boxed value, and `self` owns that box.
79 unsafe { &mut *ptr }
60 (&self.on_disk, unsafe { &mut *ptr })
80 61 }
81 62
82 pub fn get<'a>(&'a self) -> &'a DirstateMap<'a> {
63 pub fn get_map_mut<'a>(&'a mut self) -> &'a mut DirstateMap<'a> {
64 self.get_pair_mut().1
65 }
66
67 pub fn get_map<'a>(&'a self) -> &'a DirstateMap<'a> {
83 68 // SAFETY: same reasoning as in `get_mut` above.
84 69 let ptr: *mut DirstateMap<'a> = self.ptr.cast();
85 70 unsafe { &*ptr }
86 71 }
72
73 pub fn on_disk<'a>(&'a self) -> &'a [u8] {
74 &self.on_disk
75 }
87 76 }
88 77
89 78 impl Drop for OwningDirstateMap {
@@ -105,13 +94,12 b' impl Drop for OwningDirstateMap {'
105 94 fn _static_assert_is_send<T: Send>() {}
106 95
107 96 fn _static_assert_fields_are_send() {
108 _static_assert_is_send::<PyBytes>();
109 97 _static_assert_is_send::<Box<DirstateMap<'_>>>();
110 98 }
111 99
112 100 // SAFETY: we don’t get this impl implicitly because `*mut (): !Send` because
113 101 // thread-safety of raw pointers is unknown in the general case. However this
114 102 // particular raw pointer represents a `Box<DirstateMap<'on_disk>>` that we
115 // own. Since that `Box` and `PyBytes` are both `Send` as shown in above, it
116 // is sound to mark this struct as `Send` too.
103 // own. Since that `Box` is `Send` as shown in above, it is sound to mark
104 // this struct as `Send` too.
117 105 unsafe impl Send for OwningDirstateMap {}
@@ -1,3 +1,4 b''
1 use crate::dirstate::entry::TruncatedTimestamp;
1 2 use crate::dirstate::status::IgnoreFnType;
2 3 use crate::dirstate_tree::dirstate_map::BorrowedPath;
3 4 use crate::dirstate_tree::dirstate_map::ChildNodesRef;
@@ -5,7 +6,6 b' use crate::dirstate_tree::dirstate_map::'
5 6 use crate::dirstate_tree::dirstate_map::NodeData;
6 7 use crate::dirstate_tree::dirstate_map::NodeRef;
7 8 use crate::dirstate_tree::on_disk::DirstateV2ParseError;
8 use crate::dirstate_tree::on_disk::Timestamp;
9 9 use crate::matchers::get_ignore_function;
10 10 use crate::matchers::Matcher;
11 11 use crate::utils::files::get_bytes_from_os_string;
@@ -126,7 +126,8 b" struct StatusCommon<'a, 'tree, 'on_disk:"
126 126 matcher: &'a (dyn Matcher + Sync),
127 127 ignore_fn: IgnoreFnType<'a>,
128 128 outcome: Mutex<DirstateStatus<'on_disk>>,
129 new_cachable_directories: Mutex<Vec<(Cow<'on_disk, HgPath>, Timestamp)>>,
129 new_cachable_directories:
130 Mutex<Vec<(Cow<'on_disk, HgPath>, TruncatedTimestamp)>>,
130 131 outated_cached_directories: Mutex<Vec<Cow<'on_disk, HgPath>>>,
131 132
132 133 /// Whether ignore files like `.hgignore` have changed since the previous
@@ -165,7 +166,7 b" impl<'a, 'tree, 'on_disk> StatusCommon<'"
165 166 dirstate_node: &NodeRef<'tree, 'on_disk>,
166 167 ) -> Result<(), DirstateV2ParseError> {
167 168 if self.ignore_patterns_have_changed == Some(true)
168 && dirstate_node.cached_directory_mtime().is_some()
169 && dirstate_node.cached_directory_mtime()?.is_some()
169 170 {
170 171 self.outated_cached_directories.lock().unwrap().push(
171 172 dirstate_node
@@ -182,7 +183,7 b" impl<'a, 'tree, 'on_disk> StatusCommon<'"
182 183 fn can_skip_fs_readdir(
183 184 &self,
184 185 directory_metadata: Option<&std::fs::Metadata>,
185 cached_directory_mtime: Option<&Timestamp>,
186 cached_directory_mtime: Option<TruncatedTimestamp>,
186 187 ) -> bool {
187 188 if !self.options.list_unknown && !self.options.list_ignored {
188 189 // All states that we care about listing have corresponding
@@ -198,13 +199,14 b" impl<'a, 'tree, 'on_disk> StatusCommon<'"
198 199 // by a previous run of the `status` algorithm which found this
199 200 // directory eligible for `read_dir` caching.
200 201 if let Some(meta) = directory_metadata {
201 if let Ok(current_mtime) = meta.modified() {
202 if current_mtime == cached_mtime.into() {
203 // The mtime of that directory has not changed
204 // since then, which means that the results of
205 // `read_dir` should also be unchanged.
206 return true;
207 }
202 if cached_mtime
203 .likely_equal_to_mtime_of(meta)
204 .unwrap_or(false)
205 {
206 // The mtime of that directory has not changed
207 // since then, which means that the results of
208 // `read_dir` should also be unchanged.
209 return true;
208 210 }
209 211 }
210 212 }
@@ -221,7 +223,7 b" impl<'a, 'tree, 'on_disk> StatusCommon<'"
221 223 directory_hg_path: &BorrowedPath<'tree, 'on_disk>,
222 224 directory_fs_path: &Path,
223 225 directory_metadata: Option<&std::fs::Metadata>,
224 cached_directory_mtime: Option<&Timestamp>,
226 cached_directory_mtime: Option<TruncatedTimestamp>,
225 227 is_at_repo_root: bool,
226 228 ) -> Result<bool, DirstateV2ParseError> {
227 229 if self.can_skip_fs_readdir(directory_metadata, cached_directory_mtime)
@@ -362,7 +364,7 b" impl<'a, 'tree, 'on_disk> StatusCommon<'"
362 364 hg_path,
363 365 fs_path,
364 366 Some(fs_metadata),
365 dirstate_node.cached_directory_mtime(),
367 dirstate_node.cached_directory_mtime()?,
366 368 is_at_repo_root,
367 369 )?;
368 370 self.maybe_save_directory_mtime(
@@ -394,9 +396,6 b" impl<'a, 'tree, 'on_disk> StatusCommon<'"
394 396 .push(hg_path.detach_from_tree()),
395 397 EntryState::Normal => self
396 398 .handle_normal_file(&dirstate_node, fs_metadata)?,
397 // This variant is not used in DirstateMap
398 // nodes
399 EntryState::Unknown => unreachable!(),
400 399 }
401 400 } else {
402 401 // `node.entry.is_none()` indicates a "directory"
@@ -468,16 +467,22 b" impl<'a, 'tree, 'on_disk> StatusCommon<'"
468 467 //
469 468 // We deem this scenario (unlike the previous one) to be
470 469 // unlikely enough in practice.
471 let timestamp = directory_mtime.into();
472 let cached = dirstate_node.cached_directory_mtime();
473 if cached != Some(&timestamp) {
470 let truncated = TruncatedTimestamp::from(directory_mtime);
471 let is_up_to_date = if let Some(cached) =
472 dirstate_node.cached_directory_mtime()?
473 {
474 cached.likely_equal(truncated)
475 } else {
476 false
477 };
478 if !is_up_to_date {
474 479 let hg_path = dirstate_node
475 480 .full_path_borrowed(self.dmap.on_disk)?
476 481 .detach_from_tree();
477 482 self.new_cachable_directories
478 483 .lock()
479 484 .unwrap()
480 .push((hg_path, timestamp))
485 .push((hg_path, truncated))
481 486 }
482 487 }
483 488 }
@@ -496,9 +501,6 b" impl<'a, 'tree, 'on_disk> StatusCommon<'"
496 501 fn truncate_u64(value: u64) -> i32 {
497 502 (value & 0x7FFF_FFFF) as i32
498 503 }
499 fn truncate_i64(value: i64) -> i32 {
500 (value & 0x7FFF_FFFF) as i32
501 }
502 504
503 505 let entry = dirstate_node
504 506 .entry()?
@@ -506,11 +508,9 b" impl<'a, 'tree, 'on_disk> StatusCommon<'"
506 508 let hg_path = &dirstate_node.full_path_borrowed(self.dmap.on_disk)?;
507 509 let mode_changed =
508 510 || self.options.check_exec && entry.mode_changed(fs_metadata);
509 let size_changed = entry.size != truncate_u64(fs_metadata.len());
510 if entry.size >= 0
511 && size_changed
512 && fs_metadata.file_type().is_symlink()
513 {
511 let size = entry.size();
512 let size_changed = size != truncate_u64(fs_metadata.len());
513 if size >= 0 && size_changed && fs_metadata.file_type().is_symlink() {
514 514 // issue6456: Size returned may be longer due to encryption
515 515 // on EXT-4 fscrypt. TODO maybe only do it on EXT4?
516 516 self.outcome
@@ -520,7 +520,7 b" impl<'a, 'tree, 'on_disk> StatusCommon<'"
520 520 .push(hg_path.detach_from_tree())
521 521 } else if dirstate_node.has_copy_source()
522 522 || entry.is_from_other_parent()
523 || (entry.size >= 0 && (size_changed || mode_changed()))
523 || (size >= 0 && (size_changed || mode_changed()))
524 524 {
525 525 self.outcome
526 526 .lock()
@@ -528,10 +528,17 b" impl<'a, 'tree, 'on_disk> StatusCommon<'"
528 528 .modified
529 529 .push(hg_path.detach_from_tree())
530 530 } else {
531 let mtime = mtime_seconds(fs_metadata);
532 if truncate_i64(mtime) != entry.mtime
533 || mtime == self.options.last_normal_time
534 {
531 let mtime_looks_clean;
532 if let Some(dirstate_mtime) = entry.truncated_mtime() {
533 let fs_mtime = TruncatedTimestamp::for_mtime_of(fs_metadata)
534 .expect("OS/libc does not support mtime?");
535 mtime_looks_clean = fs_mtime.likely_equal(dirstate_mtime)
536 && !fs_mtime.likely_equal(self.options.last_normal_time)
537 } else {
538 // No mtime in the dirstate entry
539 mtime_looks_clean = false
540 };
541 if !mtime_looks_clean {
535 542 self.outcome
536 543 .lock()
537 544 .unwrap()
@@ -687,15 +694,6 b" impl<'a, 'tree, 'on_disk> StatusCommon<'"
687 694 }
688 695 }
689 696
690 #[cfg(unix)] // TODO
691 fn mtime_seconds(metadata: &std::fs::Metadata) -> i64 {
692 // Going through `Metadata::modified()` would be portable, but would take
693 // care to construct a `SystemTime` value with sub-second precision just
694 // for us to throw that away here.
695 use std::os::unix::fs::MetadataExt;
696 metadata.mtime()
697 }
698
699 697 struct DirEntry {
700 698 base_name: HgPathBuf,
701 699 full_path: PathBuf,
@@ -536,7 +536,7 b' impl SubInclude {'
536 536 Ok(Self {
537 537 prefix: path_to_hg_path_buf(prefix).and_then(|mut p| {
538 538 if !p.is_empty() {
539 p.push(b'/');
539 p.push_byte(b'/');
540 540 }
541 541 Ok(p)
542 542 })?,
@@ -16,14 +16,11 b' pub mod requirements;'
16 16 pub mod testing; // unconditionally built, for use from integration tests
17 17 pub use dirstate::{
18 18 dirs_multiset::{DirsMultiset, DirsMultisetIter},
19 dirstate_map::DirstateMap,
20 parsers::{pack_dirstate, parse_dirstate, PARENT_SIZE},
21 19 status::{
22 status, BadMatch, BadType, DirstateStatus, HgPathCow, StatusError,
20 BadMatch, BadType, DirstateStatus, HgPathCow, StatusError,
23 21 StatusOptions,
24 22 },
25 CopyMap, CopyMapIter, DirstateEntry, DirstateParents, EntryState,
26 StateMap, StateMapIter,
23 DirstateEntry, DirstateParents, EntryState,
27 24 };
28 25 pub mod copy_tracing;
29 26 mod filepatterns;
@@ -36,6 +33,7 b' pub mod logging;'
36 33 pub mod operations;
37 34 pub mod revset;
38 35 pub mod utils;
36 pub mod vfs;
39 37
40 38 use crate::utils::hg_path::{HgPathBuf, HgPathError};
41 39 pub use filepatterns::{
@@ -1,5 +1,5 b''
1 1 use crate::errors::{HgError, HgResultExt, IoErrorContext, IoResultExt};
2 use crate::repo::Vfs;
2 use crate::vfs::Vfs;
3 3 use std::io::Write;
4 4
5 5 /// An utility to append to a log file with the given name, and optionally
@@ -391,8 +391,7 b' fn roots_and_dirs('
391 391 } = ignore_pattern;
392 392 match syntax {
393 393 PatternSyntax::RootGlob | PatternSyntax::Glob => {
394 let mut root = vec![];
395
394 let mut root = HgPathBuf::new();
396 395 for p in pattern.split(|c| *c == b'/') {
397 396 if p.iter().any(|c| match *c {
398 397 b'[' | b'{' | b'*' | b'?' => true,
@@ -400,11 +399,9 b' fn roots_and_dirs('
400 399 }) {
401 400 break;
402 401 }
403 root.push(HgPathBuf::from_bytes(p));
402 root.push(HgPathBuf::from_bytes(p).as_ref());
404 403 }
405 let buf =
406 root.iter().fold(HgPathBuf::new(), |acc, r| acc.join(r));
407 roots.push(buf);
404 roots.push(root);
408 405 }
409 406 PatternSyntax::Path | PatternSyntax::RelPath => {
410 407 let pat = HgPath::new(if pattern == b"." {
@@ -5,31 +5,70 b''
5 5 // This software may be used and distributed according to the terms of the
6 6 // GNU General Public License version 2 or any later version.
7 7
8 use std::path::PathBuf;
9
10 8 use crate::repo::Repo;
11 use crate::revlog::changelog::Changelog;
12 use crate::revlog::manifest::Manifest;
13 use crate::revlog::path_encode::path_encode;
14 use crate::revlog::revlog::Revlog;
15 9 use crate::revlog::revlog::RevlogError;
16 10 use crate::revlog::Node;
17 use crate::utils::files::get_path_from_bytes;
18 use crate::utils::hg_path::{HgPath, HgPathBuf};
11
12 use crate::utils::hg_path::HgPath;
19 13
20 pub struct CatOutput {
14 use itertools::put_back;
15 use itertools::PutBack;
16 use std::cmp::Ordering;
17
18 pub struct CatOutput<'a> {
21 19 /// Whether any file in the manifest matched the paths given as CLI
22 20 /// arguments
23 21 pub found_any: bool,
24 22 /// The contents of matching files, in manifest order
25 pub concatenated: Vec<u8>,
23 pub results: Vec<(&'a HgPath, Vec<u8>)>,
26 24 /// Which of the CLI arguments did not match any manifest file
27 pub missing: Vec<HgPathBuf>,
25 pub missing: Vec<&'a HgPath>,
28 26 /// The node ID that the given revset was resolved to
29 27 pub node: Node,
30 28 }
31 29
32 const METADATA_DELIMITER: [u8; 2] = [b'\x01', b'\n'];
30 // Find an item in an iterator over a sorted collection.
31 fn find_item<'a, 'b, 'c, D, I: Iterator<Item = (&'a HgPath, D)>>(
32 i: &mut PutBack<I>,
33 needle: &'b HgPath,
34 ) -> Option<D> {
35 loop {
36 match i.next() {
37 None => return None,
38 Some(val) => match needle.as_bytes().cmp(val.0.as_bytes()) {
39 Ordering::Less => {
40 i.put_back(val);
41 return None;
42 }
43 Ordering::Greater => continue,
44 Ordering::Equal => return Some(val.1),
45 },
46 }
47 }
48 }
49
50 fn find_files_in_manifest<
51 'manifest,
52 'query,
53 Data,
54 Manifest: Iterator<Item = (&'manifest HgPath, Data)>,
55 Query: Iterator<Item = &'query HgPath>,
56 >(
57 manifest: Manifest,
58 query: Query,
59 ) -> (Vec<(&'query HgPath, Data)>, Vec<&'query HgPath>) {
60 let mut manifest = put_back(manifest);
61 let mut res = vec![];
62 let mut missing = vec![];
63
64 for file in query {
65 match find_item(&mut manifest, file) {
66 None => missing.push(file),
67 Some(item) => res.push((file, item)),
68 }
69 }
70 return (res, missing);
71 }
33 72
34 73 /// Output the given revision of files
35 74 ///
@@ -39,67 +78,38 b" const METADATA_DELIMITER: [u8; 2] = [b'\\"
39 78 pub fn cat<'a>(
40 79 repo: &Repo,
41 80 revset: &str,
42 files: &'a [HgPathBuf],
43 ) -> Result<CatOutput, RevlogError> {
81 mut files: Vec<&'a HgPath>,
82 ) -> Result<CatOutput<'a>, RevlogError> {
44 83 let rev = crate::revset::resolve_single(revset, repo)?;
45 let changelog = Changelog::open(repo)?;
46 let manifest = Manifest::open(repo)?;
47 let changelog_entry = changelog.get_rev(rev)?;
48 let node = *changelog
84 let manifest = repo.manifest_for_rev(rev)?;
85 let node = *repo
86 .changelog()?
49 87 .node_from_rev(rev)
50 .expect("should succeed when changelog.get_rev did");
51 let manifest_node =
52 Node::from_hex_for_repo(&changelog_entry.manifest_node()?)?;
53 let manifest_entry = manifest.get_node(manifest_node.into())?;
54 let mut bytes = vec![];
55 let mut matched = vec![false; files.len()];
88 .expect("should succeed when repo.manifest did");
89 let mut results: Vec<(&'a HgPath, Vec<u8>)> = vec![];
56 90 let mut found_any = false;
57 91
58 for (manifest_file, node_bytes) in manifest_entry.files_with_nodes() {
59 for (cat_file, is_matched) in files.iter().zip(&mut matched) {
60 if cat_file.as_bytes() == manifest_file.as_bytes() {
61 *is_matched = true;
62 found_any = true;
63 let index_path = store_path(manifest_file, b".i");
64 let data_path = store_path(manifest_file, b".d");
92 files.sort_unstable();
93
94 let (found, missing) = find_files_in_manifest(
95 manifest.files_with_nodes(),
96 files.into_iter().map(|f| f.as_ref()),
97 );
65 98
66 let file_log =
67 Revlog::open(repo, &index_path, Some(&data_path))?;
68 let file_node = Node::from_hex_for_repo(node_bytes)?;
69 let file_rev = file_log.get_node_rev(file_node.into())?;
70 let data = file_log.get_rev_data(file_rev)?;
71 if data.starts_with(&METADATA_DELIMITER) {
72 let end_delimiter_position = data
73 [METADATA_DELIMITER.len()..]
74 .windows(METADATA_DELIMITER.len())
75 .position(|bytes| bytes == METADATA_DELIMITER);
76 if let Some(position) = end_delimiter_position {
77 let offset = METADATA_DELIMITER.len() * 2;
78 bytes.extend(data[position + offset..].iter());
79 }
80 } else {
81 bytes.extend(data);
82 }
83 }
84 }
99 for (file_path, node_bytes) in found {
100 found_any = true;
101 let file_log = repo.filelog(file_path)?;
102 let file_node = Node::from_hex_for_repo(node_bytes)?;
103 results.push((
104 file_path,
105 file_log.data_for_node(file_node)?.into_data()?,
106 ));
85 107 }
86 108
87 let missing: Vec<_> = files
88 .iter()
89 .zip(&matched)
90 .filter(|pair| !*pair.1)
91 .map(|pair| pair.0.clone())
92 .collect();
93 109 Ok(CatOutput {
94 110 found_any,
95 concatenated: bytes,
111 results,
96 112 missing,
97 113 node,
98 114 })
99 115 }
100
101 fn store_path(hg_path: &HgPath, suffix: &[u8]) -> PathBuf {
102 let encoded_bytes =
103 path_encode(&[b"data/", hg_path.as_bytes(), suffix].concat());
104 get_path_from_bytes(&encoded_bytes).into()
105 }
@@ -9,9 +9,7 b' use crate::dirstate::parsers::parse_dirs'
9 9 use crate::dirstate_tree::on_disk::{for_each_tracked_path, read_docket};
10 10 use crate::errors::HgError;
11 11 use crate::repo::Repo;
12 use crate::revlog::changelog::Changelog;
13 use crate::revlog::manifest::{Manifest, ManifestEntry};
14 use crate::revlog::node::Node;
12 use crate::revlog::manifest::Manifest;
15 13 use crate::revlog::revlog::RevlogError;
16 14 use crate::utils::hg_path::HgPath;
17 15 use crate::DirstateError;
@@ -53,7 +51,7 b' impl Dirstate {'
53 51 let _parents = parse_dirstate_entries(
54 52 &self.content,
55 53 |path, entry, _copy_source| {
56 if entry.state.is_tracked() {
54 if entry.state().is_tracked() {
57 55 files.push(path)
58 56 }
59 57 Ok(())
@@ -72,16 +70,10 b' pub fn list_rev_tracked_files('
72 70 revset: &str,
73 71 ) -> Result<FilesForRev, RevlogError> {
74 72 let rev = crate::revset::resolve_single(revset, repo)?;
75 let changelog = Changelog::open(repo)?;
76 let manifest = Manifest::open(repo)?;
77 let changelog_entry = changelog.get_rev(rev)?;
78 let manifest_node =
79 Node::from_hex_for_repo(&changelog_entry.manifest_node()?)?;
80 let manifest_entry = manifest.get_node(manifest_node.into())?;
81 Ok(FilesForRev(manifest_entry))
73 Ok(FilesForRev(repo.manifest_for_rev(rev)?))
82 74 }
83 75
84 pub struct FilesForRev(ManifestEntry);
76 pub struct FilesForRev(Manifest);
85 77
86 78 impl FilesForRev {
87 79 pub fn iter(&self) -> impl Iterator<Item = &HgPath> {
@@ -4,7 +4,6 b''
4 4
5 5 mod cat;
6 6 mod debugdata;
7 mod dirstate_status;
8 7 mod list_tracked_files;
9 8 pub use cat::{cat, CatOutput};
10 9 pub use debugdata::{debug_data, DebugDataKind};
@@ -1,12 +1,22 b''
1 use crate::changelog::Changelog;
1 2 use crate::config::{Config, ConfigError, ConfigParseError};
2 use crate::errors::{HgError, IoErrorContext, IoResultExt};
3 use crate::dirstate::DirstateParents;
4 use crate::dirstate_tree::dirstate_map::DirstateMap;
5 use crate::dirstate_tree::owning::OwningDirstateMap;
6 use crate::errors::HgError;
7 use crate::errors::HgResultExt;
3 8 use crate::exit_codes;
4 use crate::requirements;
9 use crate::manifest::{Manifest, Manifestlog};
10 use crate::revlog::filelog::Filelog;
11 use crate::revlog::revlog::RevlogError;
5 12 use crate::utils::files::get_path_from_bytes;
13 use crate::utils::hg_path::HgPath;
6 14 use crate::utils::SliceExt;
7 use memmap::{Mmap, MmapOptions};
15 use crate::vfs::{is_dir, is_file, Vfs};
16 use crate::{requirements, NodePrefix};
17 use crate::{DirstateError, Revision};
18 use std::cell::{Cell, Ref, RefCell, RefMut};
8 19 use std::collections::HashSet;
9 use std::io::ErrorKind;
10 20 use std::path::{Path, PathBuf};
11 21
12 22 /// A repository on disk
@@ -16,6 +26,11 b' pub struct Repo {'
16 26 store: PathBuf,
17 27 requirements: HashSet<String>,
18 28 config: Config,
29 // None means not known/initialized yet
30 dirstate_parents: Cell<Option<DirstateParents>>,
31 dirstate_map: LazyCell<OwningDirstateMap, DirstateError>,
32 changelog: LazyCell<Changelog, HgError>,
33 manifestlog: LazyCell<Manifestlog, HgError>,
19 34 }
20 35
21 36 #[derive(Debug, derive_more::From)]
@@ -38,12 +53,6 b' impl From<ConfigError> for RepoError {'
38 53 }
39 54 }
40 55
41 /// Filesystem access abstraction for the contents of a given "base" diretory
42 #[derive(Clone, Copy)]
43 pub struct Vfs<'a> {
44 pub(crate) base: &'a Path,
45 }
46
47 56 impl Repo {
48 57 /// tries to find nearest repository root in current working directory or
49 58 /// its ancestors
@@ -127,7 +136,8 b' impl Repo {'
127 136 } else {
128 137 let bytes = hg_vfs.read("sharedpath")?;
129 138 let mut shared_path =
130 get_path_from_bytes(bytes.trim_end_newlines()).to_owned();
139 get_path_from_bytes(bytes.trim_end_matches(|b| b == b'\n'))
140 .to_owned();
131 141 if relative {
132 142 shared_path = dot_hg.join(shared_path)
133 143 }
@@ -192,6 +202,10 b' impl Repo {'
192 202 store: store_path,
193 203 dot_hg,
194 204 config: repo_config,
205 dirstate_parents: Cell::new(None),
206 dirstate_map: LazyCell::new(Self::new_dirstate_map),
207 changelog: LazyCell::new(Changelog::open),
208 manifestlog: LazyCell::new(Manifestlog::open),
195 209 };
196 210
197 211 requirements::check(&repo)?;
@@ -234,82 +248,162 b' impl Repo {'
234 248 .contains(requirements::DIRSTATE_V2_REQUIREMENT)
235 249 }
236 250
237 pub fn dirstate_parents(
238 &self,
239 ) -> Result<crate::dirstate::DirstateParents, HgError> {
240 let dirstate = self.hg_vfs().mmap_open("dirstate")?;
241 if dirstate.is_empty() {
242 return Ok(crate::dirstate::DirstateParents::NULL);
251 fn dirstate_file_contents(&self) -> Result<Vec<u8>, HgError> {
252 Ok(self
253 .hg_vfs()
254 .read("dirstate")
255 .io_not_found_as_none()?
256 .unwrap_or(Vec::new()))
257 }
258
259 pub fn dirstate_parents(&self) -> Result<DirstateParents, HgError> {
260 if let Some(parents) = self.dirstate_parents.get() {
261 return Ok(parents);
243 262 }
244 let parents = if self.has_dirstate_v2() {
263 let dirstate = self.dirstate_file_contents()?;
264 let parents = if dirstate.is_empty() {
265 DirstateParents::NULL
266 } else if self.has_dirstate_v2() {
245 267 crate::dirstate_tree::on_disk::read_docket(&dirstate)?.parents()
246 268 } else {
247 269 crate::dirstate::parsers::parse_dirstate_parents(&dirstate)?
248 270 .clone()
249 271 };
272 self.dirstate_parents.set(Some(parents));
250 273 Ok(parents)
251 274 }
275
276 fn new_dirstate_map(&self) -> Result<OwningDirstateMap, DirstateError> {
277 let dirstate_file_contents = self.dirstate_file_contents()?;
278 if dirstate_file_contents.is_empty() {
279 self.dirstate_parents.set(Some(DirstateParents::NULL));
280 Ok(OwningDirstateMap::new_empty(Vec::new()))
281 } else if self.has_dirstate_v2() {
282 let docket = crate::dirstate_tree::on_disk::read_docket(
283 &dirstate_file_contents,
284 )?;
285 self.dirstate_parents.set(Some(docket.parents()));
286 let data_size = docket.data_size();
287 let metadata = docket.tree_metadata();
288 let mut map = if let Some(data_mmap) = self
289 .hg_vfs()
290 .mmap_open(docket.data_filename())
291 .io_not_found_as_none()?
292 {
293 OwningDirstateMap::new_empty(data_mmap)
294 } else {
295 OwningDirstateMap::new_empty(Vec::new())
296 };
297 let (on_disk, placeholder) = map.get_pair_mut();
298 *placeholder = DirstateMap::new_v2(on_disk, data_size, metadata)?;
299 Ok(map)
300 } else {
301 let mut map = OwningDirstateMap::new_empty(dirstate_file_contents);
302 let (on_disk, placeholder) = map.get_pair_mut();
303 let (inner, parents) = DirstateMap::new_v1(on_disk)?;
304 self.dirstate_parents
305 .set(Some(parents.unwrap_or(DirstateParents::NULL)));
306 *placeholder = inner;
307 Ok(map)
308 }
309 }
310
311 pub fn dirstate_map(
312 &self,
313 ) -> Result<Ref<OwningDirstateMap>, DirstateError> {
314 self.dirstate_map.get_or_init(self)
315 }
316
317 pub fn dirstate_map_mut(
318 &self,
319 ) -> Result<RefMut<OwningDirstateMap>, DirstateError> {
320 self.dirstate_map.get_mut_or_init(self)
321 }
322
323 pub fn changelog(&self) -> Result<Ref<Changelog>, HgError> {
324 self.changelog.get_or_init(self)
325 }
326
327 pub fn changelog_mut(&self) -> Result<RefMut<Changelog>, HgError> {
328 self.changelog.get_mut_or_init(self)
329 }
330
331 pub fn manifestlog(&self) -> Result<Ref<Manifestlog>, HgError> {
332 self.manifestlog.get_or_init(self)
333 }
334
335 pub fn manifestlog_mut(&self) -> Result<RefMut<Manifestlog>, HgError> {
336 self.manifestlog.get_mut_or_init(self)
337 }
338
339 /// Returns the manifest of the *changeset* with the given node ID
340 pub fn manifest_for_node(
341 &self,
342 node: impl Into<NodePrefix>,
343 ) -> Result<Manifest, RevlogError> {
344 self.manifestlog()?.data_for_node(
345 self.changelog()?
346 .data_for_node(node.into())?
347 .manifest_node()?
348 .into(),
349 )
350 }
351
352 /// Returns the manifest of the *changeset* with the given revision number
353 pub fn manifest_for_rev(
354 &self,
355 revision: Revision,
356 ) -> Result<Manifest, RevlogError> {
357 self.manifestlog()?.data_for_node(
358 self.changelog()?
359 .data_for_rev(revision)?
360 .manifest_node()?
361 .into(),
362 )
363 }
364
365 pub fn filelog(&self, path: &HgPath) -> Result<Filelog, HgError> {
366 Filelog::open(self, path)
367 }
252 368 }
253 369
254 impl Vfs<'_> {
255 pub fn join(&self, relative_path: impl AsRef<Path>) -> PathBuf {
256 self.base.join(relative_path)
257 }
370 /// Lazily-initialized component of `Repo` with interior mutability
371 ///
372 /// This differs from `OnceCell` in that the value can still be "deinitialized"
373 /// later by setting its inner `Option` to `None`.
374 struct LazyCell<T, E> {
375 value: RefCell<Option<T>>,
376 // `Fn`s that don’t capture environment are zero-size, so this box does
377 // not allocate:
378 init: Box<dyn Fn(&Repo) -> Result<T, E>>,
379 }
258 380
259 pub fn read(
260 &self,
261 relative_path: impl AsRef<Path>,
262 ) -> Result<Vec<u8>, HgError> {
263 let path = self.join(relative_path);
264 std::fs::read(&path).when_reading_file(&path)
265 }
266
267 pub fn mmap_open(
268 &self,
269 relative_path: impl AsRef<Path>,
270 ) -> Result<Mmap, HgError> {
271 let path = self.base.join(relative_path);
272 let file = std::fs::File::open(&path).when_reading_file(&path)?;
273 // TODO: what are the safety requirements here?
274 let mmap = unsafe { MmapOptions::new().map(&file) }
275 .when_reading_file(&path)?;
276 Ok(mmap)
381 impl<T, E> LazyCell<T, E> {
382 fn new(init: impl Fn(&Repo) -> Result<T, E> + 'static) -> Self {
383 Self {
384 value: RefCell::new(None),
385 init: Box::new(init),
386 }
277 387 }
278 388
279 pub fn rename(
280 &self,
281 relative_from: impl AsRef<Path>,
282 relative_to: impl AsRef<Path>,
283 ) -> Result<(), HgError> {
284 let from = self.join(relative_from);
285 let to = self.join(relative_to);
286 std::fs::rename(&from, &to)
287 .with_context(|| IoErrorContext::RenamingFile { from, to })
389 fn get_or_init(&self, repo: &Repo) -> Result<Ref<T>, E> {
390 let mut borrowed = self.value.borrow();
391 if borrowed.is_none() {
392 drop(borrowed);
393 // Only use `borrow_mut` if it is really needed to avoid panic in
394 // case there is another outstanding borrow but mutation is not
395 // needed.
396 *self.value.borrow_mut() = Some((self.init)(repo)?);
397 borrowed = self.value.borrow()
398 }
399 Ok(Ref::map(borrowed, |option| option.as_ref().unwrap()))
400 }
401
402 pub fn get_mut_or_init(&self, repo: &Repo) -> Result<RefMut<T>, E> {
403 let mut borrowed = self.value.borrow_mut();
404 if borrowed.is_none() {
405 *borrowed = Some((self.init)(repo)?);
406 }
407 Ok(RefMut::map(borrowed, |option| option.as_mut().unwrap()))
288 408 }
289 409 }
290
291 fn fs_metadata(
292 path: impl AsRef<Path>,
293 ) -> Result<Option<std::fs::Metadata>, HgError> {
294 let path = path.as_ref();
295 match std::fs::metadata(path) {
296 Ok(meta) => Ok(Some(meta)),
297 Err(error) => match error.kind() {
298 // TODO: when we require a Rust version where `NotADirectory` is
299 // stable, invert this logic and return None for it and `NotFound`
300 // and propagate any other error.
301 ErrorKind::PermissionDenied => Err(error).with_context(|| {
302 IoErrorContext::ReadingMetadata(path.to_owned())
303 }),
304 _ => Ok(None),
305 },
306 }
307 }
308
309 fn is_dir(path: impl AsRef<Path>) -> Result<bool, HgError> {
310 Ok(fs_metadata(path)?.map_or(false, |meta| meta.is_dir()))
311 }
312
313 fn is_file(path: impl AsRef<Path>) -> Result<bool, HgError> {
314 Ok(fs_metadata(path)?.map_or(false, |meta| meta.is_file()))
315 }
@@ -1,6 +1,7 b''
1 1 use crate::errors::{HgError, HgResultExt};
2 use crate::repo::{Repo, Vfs};
2 use crate::repo::Repo;
3 3 use crate::utils::join_display;
4 use crate::vfs::Vfs;
4 5 use std::collections::HashSet;
5 6
6 7 fn parse(bytes: &[u8]) -> Result<HashSet<String>, HgError> {
@@ -91,7 +92,7 b' const SUPPORTED: &[&str] = &['
91 92
92 93 // Copied from mercurial/requirements.py:
93 94
94 pub(crate) const DIRSTATE_V2_REQUIREMENT: &str = "exp-dirstate-v2";
95 pub(crate) const DIRSTATE_V2_REQUIREMENT: &str = "dirstate-v2";
95 96
96 97 /// When narrowing is finalized and no longer subject to format changes,
97 98 /// we should move this to just "narrow" or similar.
@@ -11,6 +11,7 b' mod nodemap_docket;'
11 11 pub mod path_encode;
12 12 pub use node::{FromHexError, Node, NodePrefix};
13 13 pub mod changelog;
14 pub mod filelog;
14 15 pub mod index;
15 16 pub mod manifest;
16 17 pub mod patch;
@@ -1,5 +1,6 b''
1 1 use crate::errors::HgError;
2 2 use crate::repo::Repo;
3 use crate::revlog::node::NULL_NODE;
3 4 use crate::revlog::revlog::{Revlog, RevlogError};
4 5 use crate::revlog::Revision;
5 6 use crate::revlog::{Node, NodePrefix};
@@ -12,22 +13,22 b' pub struct Changelog {'
12 13
13 14 impl Changelog {
14 15 /// Open the `changelog` of a repository given by its root.
15 pub fn open(repo: &Repo) -> Result<Self, RevlogError> {
16 pub fn open(repo: &Repo) -> Result<Self, HgError> {
16 17 let revlog = Revlog::open(repo, "00changelog.i", None)?;
17 18 Ok(Self { revlog })
18 19 }
19 20
20 /// Return the `ChangelogEntry` a given node id.
21 pub fn get_node(
21 /// Return the `ChangelogEntry` for the given node ID.
22 pub fn data_for_node(
22 23 &self,
23 24 node: NodePrefix,
24 25 ) -> Result<ChangelogEntry, RevlogError> {
25 let rev = self.revlog.get_node_rev(node)?;
26 self.get_rev(rev)
26 let rev = self.revlog.rev_from_node(node)?;
27 self.data_for_rev(rev)
27 28 }
28 29
29 /// Return the `ChangelogEntry` of a given node revision.
30 pub fn get_rev(
30 /// Return the `ChangelogEntry` of the given revision number.
31 pub fn data_for_rev(
31 32 &self,
32 33 rev: Revision,
33 34 ) -> Result<ChangelogEntry, RevlogError> {
@@ -36,7 +37,7 b' impl Changelog {'
36 37 }
37 38
38 39 pub fn node_from_rev(&self, rev: Revision) -> Option<&Node> {
39 Some(self.revlog.index.get_entry(rev)?.hash())
40 self.revlog.node_from_rev(rev)
40 41 }
41 42 }
42 43
@@ -57,9 +58,10 b' impl ChangelogEntry {'
57 58
58 59 /// Return the node id of the `manifest` referenced by this `changelog`
59 60 /// entry.
60 pub fn manifest_node(&self) -> Result<&[u8], RevlogError> {
61 self.lines()
62 .next()
63 .ok_or_else(|| HgError::corrupted("empty changelog entry").into())
61 pub fn manifest_node(&self) -> Result<Node, HgError> {
62 match self.lines().next() {
63 None => Ok(NULL_NODE),
64 Some(x) => Node::from_hex_for_repo(x),
65 }
64 66 }
65 67 }
@@ -5,7 +5,6 b' use byteorder::{BigEndian, ByteOrder};'
5 5
6 6 use crate::errors::HgError;
7 7 use crate::revlog::node::Node;
8 use crate::revlog::revlog::RevlogError;
9 8 use crate::revlog::{Revision, NULL_REVISION};
10 9
11 10 pub const INDEX_ENTRY_SIZE: usize = 64;
@@ -23,7 +22,7 b' impl Index {'
23 22 /// Calculate the start of each entry when is_inline is true.
24 23 pub fn new(
25 24 bytes: Box<dyn Deref<Target = [u8]> + Send>,
26 ) -> Result<Self, RevlogError> {
25 ) -> Result<Self, HgError> {
27 26 if is_inline(&bytes) {
28 27 let mut offset: usize = 0;
29 28 let mut offsets = Vec::new();
@@ -58,7 +57,7 b' impl Index {'
58 57
59 58 /// Value of the inline flag.
60 59 pub fn is_inline(&self) -> bool {
61 is_inline(&self.bytes)
60 self.offsets.is_some()
62 61 }
63 62
64 63 /// Return a slice of bytes if `revlog` is inline. Panic if not.
@@ -209,6 +208,9 b" impl<'a> IndexEntry<'a> {"
209 208
210 209 /// Value of the inline flag.
211 210 pub fn is_inline(index_bytes: &[u8]) -> bool {
211 if index_bytes.len() < 4 {
212 return true;
213 }
212 214 match &index_bytes[0..=1] {
213 215 [0, 0] | [0, 2] => false,
214 216 _ => true,
@@ -1,48 +1,60 b''
1 use crate::errors::HgError;
1 2 use crate::repo::Repo;
2 3 use crate::revlog::revlog::{Revlog, RevlogError};
3 use crate::revlog::NodePrefix;
4 4 use crate::revlog::Revision;
5 use crate::revlog::{Node, NodePrefix};
5 6 use crate::utils::hg_path::HgPath;
6 7
7 8 /// A specialized `Revlog` to work with `manifest` data format.
8 pub struct Manifest {
9 pub struct Manifestlog {
9 10 /// The generic `revlog` format.
10 11 revlog: Revlog,
11 12 }
12 13
13 impl Manifest {
14 impl Manifestlog {
14 15 /// Open the `manifest` of a repository given by its root.
15 pub fn open(repo: &Repo) -> Result<Self, RevlogError> {
16 pub fn open(repo: &Repo) -> Result<Self, HgError> {
16 17 let revlog = Revlog::open(repo, "00manifest.i", None)?;
17 18 Ok(Self { revlog })
18 19 }
19 20
20 /// Return the `ManifestEntry` of a given node id.
21 pub fn get_node(
21 /// Return the `Manifest` for the given node ID.
22 ///
23 /// Note: this is a node ID in the manifestlog, typically found through
24 /// `ChangelogEntry::manifest_node`. It is *not* the node ID of any
25 /// changeset.
26 ///
27 /// See also `Repo::manifest_for_node`
28 pub fn data_for_node(
22 29 &self,
23 30 node: NodePrefix,
24 ) -> Result<ManifestEntry, RevlogError> {
25 let rev = self.revlog.get_node_rev(node)?;
26 self.get_rev(rev)
31 ) -> Result<Manifest, RevlogError> {
32 let rev = self.revlog.rev_from_node(node)?;
33 self.data_for_rev(rev)
27 34 }
28 35
29 /// Return the `ManifestEntry` of a given node revision.
30 pub fn get_rev(
36 /// Return the `Manifest` of a given revision number.
37 ///
38 /// Note: this is a revision number in the manifestlog, *not* of any
39 /// changeset.
40 ///
41 /// See also `Repo::manifest_for_rev`
42 pub fn data_for_rev(
31 43 &self,
32 44 rev: Revision,
33 ) -> Result<ManifestEntry, RevlogError> {
45 ) -> Result<Manifest, RevlogError> {
34 46 let bytes = self.revlog.get_rev_data(rev)?;
35 Ok(ManifestEntry { bytes })
47 Ok(Manifest { bytes })
36 48 }
37 49 }
38 50
39 /// `Manifest` entry which knows how to interpret the `manifest` data bytes.
51 /// `Manifestlog` entry which knows how to interpret the `manifest` data bytes.
40 52 #[derive(Debug)]
41 pub struct ManifestEntry {
53 pub struct Manifest {
42 54 bytes: Vec<u8>,
43 55 }
44 56
45 impl ManifestEntry {
57 impl Manifest {
46 58 /// Return an iterator over the lines of the entry.
47 59 pub fn lines(&self) -> impl Iterator<Item = &[u8]> {
48 60 self.bytes
@@ -73,4 +85,17 b' impl ManifestEntry {'
73 85 (HgPath::new(&line[..pos]), &line[hash_start..hash_end])
74 86 })
75 87 }
88
89 /// If the given path is in this manifest, return its filelog node ID
90 pub fn find_file(&self, path: &HgPath) -> Result<Option<Node>, HgError> {
91 // TODO: use binary search instead of linear scan. This may involve
92 // building (and caching) an index of the byte indicex of each manifest
93 // line.
94 for (manifest_path, node) in self.files_with_nodes() {
95 if manifest_path == path {
96 return Ok(Some(Node::from_hex_for_repo(node)?));
97 }
98 }
99 Ok(None)
100 }
76 101 }
@@ -1,10 +1,9 b''
1 1 use crate::errors::{HgError, HgResultExt};
2 2 use crate::requirements;
3 3 use bytes_cast::{unaligned, BytesCast};
4 use memmap::Mmap;
4 use memmap2::Mmap;
5 5 use std::path::{Path, PathBuf};
6 6
7 use super::revlog::RevlogError;
8 7 use crate::repo::Repo;
9 8 use crate::utils::strip_suffix;
10 9
@@ -38,7 +37,7 b' impl NodeMapDocket {'
38 37 pub fn read_from_file(
39 38 repo: &Repo,
40 39 index_path: &Path,
41 ) -> Result<Option<(Self, Mmap)>, RevlogError> {
40 ) -> Result<Option<(Self, Mmap)>, HgError> {
42 41 if !repo
43 42 .requirements()
44 43 .contains(requirements::NODEMAP_REQUIREMENT)
@@ -65,10 +64,9 b' impl NodeMapDocket {'
65 64 };
66 65
67 66 /// Treat any error as a parse error
68 fn parse<T, E>(result: Result<T, E>) -> Result<T, RevlogError> {
69 result.map_err(|_| {
70 HgError::corrupted("nodemap docket parse error").into()
71 })
67 fn parse<T, E>(result: Result<T, E>) -> Result<T, HgError> {
68 result
69 .map_err(|_| HgError::corrupted("nodemap docket parse error"))
72 70 }
73 71
74 72 let (header, rest) = parse(DocketHeader::from_bytes(input))?;
@@ -94,7 +92,7 b' impl NodeMapDocket {'
94 92 if mmap.len() >= data_length {
95 93 Ok(Some((docket, mmap)))
96 94 } else {
97 Err(HgError::corrupted("persistent nodemap too short").into())
95 Err(HgError::corrupted("persistent nodemap too short"))
98 96 }
99 97 } else {
100 98 // Even if .hg/requires opted in, some revlogs are deemed small
@@ -18,6 +18,7 b' use super::patch;'
18 18 use crate::errors::HgError;
19 19 use crate::repo::Repo;
20 20 use crate::revlog::Revision;
21 use crate::{Node, NULL_REVISION};
21 22
22 23 #[derive(derive_more::From)]
23 24 pub enum RevlogError {
@@ -50,7 +51,7 b' pub struct Revlog {'
50 51 /// When index and data are not interleaved: bytes of the revlog index.
51 52 /// When index and data are interleaved: bytes of the revlog index and
52 53 /// data.
53 pub(crate) index: Index,
54 index: Index,
54 55 /// When index and data are not interleaved: bytes of the revlog data
55 56 data_bytes: Option<Box<dyn Deref<Target = [u8]> + Send>>,
56 57 /// When present on disk: the persistent nodemap for this revlog
@@ -67,17 +68,24 b' impl Revlog {'
67 68 repo: &Repo,
68 69 index_path: impl AsRef<Path>,
69 70 data_path: Option<&Path>,
70 ) -> Result<Self, RevlogError> {
71 ) -> Result<Self, HgError> {
71 72 let index_path = index_path.as_ref();
72 let index_mmap = repo.store_vfs().mmap_open(&index_path)?;
73 let index = {
74 match repo.store_vfs().mmap_open_opt(&index_path)? {
75 None => Index::new(Box::new(vec![])),
76 Some(index_mmap) => {
77 let version = get_version(&index_mmap)?;
78 if version != 1 {
79 // A proper new version should have had a repo/store
80 // requirement.
81 return Err(HgError::corrupted("corrupted revlog"));
82 }
73 83
74 let version = get_version(&index_mmap);
75 if version != 1 {
76 // A proper new version should have had a repo/store requirement.
77 return Err(RevlogError::corrupted());
78 }
79
80 let index = Index::new(Box::new(index_mmap))?;
84 let index = Index::new(Box::new(index_mmap))?;
85 Ok(index)
86 }
87 }
88 }?;
81 89
82 90 let default_data_path = index_path.with_extension("d");
83 91
@@ -92,14 +100,18 b' impl Revlog {'
92 100 Some(Box::new(data_mmap))
93 101 };
94 102
95 let nodemap = NodeMapDocket::read_from_file(repo, index_path)?.map(
96 |(docket, data)| {
97 nodemap::NodeTree::load_bytes(
98 Box::new(data),
99 docket.data_length,
100 )
101 },
102 );
103 let nodemap = if index.is_inline() {
104 None
105 } else {
106 NodeMapDocket::read_from_file(repo, index_path)?.map(
107 |(docket, data)| {
108 nodemap::NodeTree::load_bytes(
109 Box::new(data),
110 docket.data_length,
111 )
112 },
113 )
114 };
103 115
104 116 Ok(Revlog {
105 117 index,
@@ -118,12 +130,26 b' impl Revlog {'
118 130 self.index.is_empty()
119 131 }
120 132
121 /// Return the full data associated to a node.
133 /// Returns the node ID for the given revision number, if it exists in this
134 /// revlog
135 pub fn node_from_rev(&self, rev: Revision) -> Option<&Node> {
136 if rev == NULL_REVISION {
137 return Some(&NULL_NODE);
138 }
139 Some(self.index.get_entry(rev)?.hash())
140 }
141
142 /// Return the revision number for the given node ID, if it exists in this
143 /// revlog
122 144 #[timed]
123 pub fn get_node_rev(
145 pub fn rev_from_node(
124 146 &self,
125 147 node: NodePrefix,
126 148 ) -> Result<Revision, RevlogError> {
149 if node.is_prefix_of(&NULL_NODE) {
150 return Ok(NULL_REVISION);
151 }
152
127 153 if let Some(nodemap) = &self.nodemap {
128 154 return nodemap
129 155 .find_bin(&self.index, node)?
@@ -167,6 +193,9 b' impl Revlog {'
167 193 /// snapshot to rebuild the final data.
168 194 #[timed]
169 195 pub fn get_rev_data(&self, rev: Revision) -> Result<Vec<u8>, RevlogError> {
196 if rev == NULL_REVISION {
197 return Ok(vec![]);
198 };
170 199 // Todo return -> Cow
171 200 let mut entry = self.get_entry(rev)?;
172 201 let mut delta_chain = vec![];
@@ -292,6 +321,10 b" pub struct RevlogEntry<'a> {"
292 321 }
293 322
294 323 impl<'a> RevlogEntry<'a> {
324 pub fn revision(&self) -> Revision {
325 self.rev
326 }
327
295 328 /// Extract the data contained in the entry.
296 329 pub fn data(&self) -> Result<Cow<'_, [u8]>, RevlogError> {
297 330 if self.bytes.is_empty() {
@@ -355,8 +388,16 b" impl<'a> RevlogEntry<'a> {"
355 388 }
356 389
357 390 /// Format version of the revlog.
358 pub fn get_version(index_bytes: &[u8]) -> u16 {
359 BigEndian::read_u16(&index_bytes[2..=3])
391 pub fn get_version(index_bytes: &[u8]) -> Result<u16, HgError> {
392 if index_bytes.len() == 0 {
393 return Ok(1);
394 };
395 if index_bytes.len() < 4 {
396 return Err(HgError::corrupted(
397 "corrupted revlog: can't read the index format header",
398 ));
399 };
400 Ok(BigEndian::read_u16(&index_bytes[2..=3]))
360 401 }
361 402
362 403 /// Calculate the hash of a revision given its data and its parents.
@@ -391,6 +432,6 b' mod tests {'
391 432 .with_version(1)
392 433 .build();
393 434
394 assert_eq!(get_version(&bytes), 1)
435 assert_eq!(get_version(&bytes).map_err(|_err| ()), Ok(1))
395 436 }
396 437 }
@@ -4,7 +4,6 b''
4 4
5 5 use crate::errors::HgError;
6 6 use crate::repo::Repo;
7 use crate::revlog::changelog::Changelog;
8 7 use crate::revlog::revlog::{Revlog, RevlogError};
9 8 use crate::revlog::NodePrefix;
10 9 use crate::revlog::{Revision, NULL_REVISION, WORKING_DIRECTORY_HEX};
@@ -17,23 +16,25 b' pub fn resolve_single('
17 16 input: &str,
18 17 repo: &Repo,
19 18 ) -> Result<Revision, RevlogError> {
20 let changelog = Changelog::open(repo)?;
19 let changelog = repo.changelog()?;
21 20
22 match resolve_rev_number_or_hex_prefix(input, &changelog.revlog) {
23 Err(RevlogError::InvalidRevision) => {} // Try other syntax
24 result => return result,
21 match input {
22 "." => {
23 let p1 = repo.dirstate_parents()?.p1;
24 return Ok(changelog.revlog.rev_from_node(p1.into())?);
25 }
26 "null" => return Ok(NULL_REVISION),
27 _ => {}
25 28 }
26 29
27 if input == "null" {
28 return Ok(NULL_REVISION);
30 match resolve_rev_number_or_hex_prefix(input, &changelog.revlog) {
31 Err(RevlogError::InvalidRevision) => {
32 // TODO: support for the rest of the language here.
33 let msg = format!("cannot parse revset '{}'", input);
34 Err(HgError::unsupported(msg).into())
35 }
36 result => return result,
29 37 }
30
31 // TODO: support for the rest of the language here.
32
33 Err(
34 HgError::unsupported(format!("cannot parse revset '{}'", input))
35 .into(),
36 )
37 38 }
38 39
39 40 /// Resolve the small subset of the language suitable for revlogs other than
@@ -46,8 +47,14 b' pub fn resolve_rev_number_or_hex_prefix('
46 47 input: &str,
47 48 revlog: &Revlog,
48 49 ) -> Result<Revision, RevlogError> {
50 // The Python equivalent of this is part of `revsymbol` in
51 // `mercurial/scmutil.py`
52
49 53 if let Ok(integer) = input.parse::<i32>() {
50 if integer >= 0 && revlog.has_rev(integer) {
54 if integer.to_string() == input
55 && integer >= 0
56 && revlog.has_rev(integer)
57 {
51 58 return Ok(integer);
52 59 }
53 60 }
@@ -56,7 +63,7 b' pub fn resolve_rev_number_or_hex_prefix('
56 63 {
57 64 return Err(RevlogError::WDirUnsupported);
58 65 }
59 return revlog.get_node_rev(prefix);
66 return revlog.rev_from_node(prefix);
60 67 }
61 68 Err(RevlogError::InvalidRevision)
62 69 }
@@ -67,36 +67,35 b' where'
67 67 }
68 68
69 69 pub trait SliceExt {
70 fn trim_end_newlines(&self) -> &Self;
71 70 fn trim_end(&self) -> &Self;
72 71 fn trim_start(&self) -> &Self;
72 fn trim_end_matches(&self, f: impl FnMut(u8) -> bool) -> &Self;
73 fn trim_start_matches(&self, f: impl FnMut(u8) -> bool) -> &Self;
73 74 fn trim(&self) -> &Self;
74 75 fn drop_prefix(&self, needle: &Self) -> Option<&Self>;
75 76 fn split_2(&self, separator: u8) -> Option<(&[u8], &[u8])>;
76 }
77
78 #[allow(clippy::trivially_copy_pass_by_ref)]
79 fn is_not_whitespace(c: &u8) -> bool {
80 !(*c as char).is_whitespace()
77 fn split_2_by_slice(&self, separator: &[u8]) -> Option<(&[u8], &[u8])>;
81 78 }
82 79
83 80 impl SliceExt for [u8] {
84 fn trim_end_newlines(&self) -> &[u8] {
85 if let Some(last) = self.iter().rposition(|&byte| byte != b'\n') {
81 fn trim_end(&self) -> &[u8] {
82 self.trim_end_matches(|byte| byte.is_ascii_whitespace())
83 }
84
85 fn trim_start(&self) -> &[u8] {
86 self.trim_start_matches(|byte| byte.is_ascii_whitespace())
87 }
88
89 fn trim_end_matches(&self, mut f: impl FnMut(u8) -> bool) -> &Self {
90 if let Some(last) = self.iter().rposition(|&byte| !f(byte)) {
86 91 &self[..=last]
87 92 } else {
88 93 &[]
89 94 }
90 95 }
91 fn trim_end(&self) -> &[u8] {
92 if let Some(last) = self.iter().rposition(is_not_whitespace) {
93 &self[..=last]
94 } else {
95 &[]
96 }
97 }
98 fn trim_start(&self) -> &[u8] {
99 if let Some(first) = self.iter().position(is_not_whitespace) {
96
97 fn trim_start_matches(&self, mut f: impl FnMut(u8) -> bool) -> &Self {
98 if let Some(first) = self.iter().position(|&byte| !f(byte)) {
100 99 &self[first..]
101 100 } else {
102 101 &[]
@@ -136,6 +135,14 b' impl SliceExt for [u8] {'
136 135 let b = iter.next()?;
137 136 Some((a, b))
138 137 }
138
139 fn split_2_by_slice(&self, separator: &[u8]) -> Option<(&[u8], &[u8])> {
140 if let Some(pos) = find_slice_in_slice(self, separator) {
141 Some((&self[..pos], &self[pos + separator.len()..]))
142 } else {
143 None
144 }
145 }
139 146 }
140 147
141 148 pub trait Escaped {
@@ -18,7 +18,6 b' use lazy_static::lazy_static;'
18 18 use same_file::is_same_file;
19 19 use std::borrow::{Cow, ToOwned};
20 20 use std::ffi::{OsStr, OsString};
21 use std::fs::Metadata;
22 21 use std::iter::FusedIterator;
23 22 use std::ops::Deref;
24 23 use std::path::{Path, PathBuf};
@@ -181,38 +180,6 b' pub fn lower_clean(bytes: &[u8]) -> Vec<'
181 180 hfs_ignore_clean(&bytes.to_ascii_lowercase())
182 181 }
183 182
184 #[derive(Eq, PartialEq, Ord, PartialOrd, Copy, Clone)]
185 pub struct HgMetadata {
186 pub st_dev: u64,
187 pub st_mode: u32,
188 pub st_nlink: u64,
189 pub st_size: u64,
190 pub st_mtime: i64,
191 pub st_ctime: i64,
192 }
193
194 // TODO support other plaforms
195 #[cfg(unix)]
196 impl HgMetadata {
197 pub fn from_metadata(metadata: Metadata) -> Self {
198 use std::os::unix::fs::MetadataExt;
199 Self {
200 st_dev: metadata.dev(),
201 st_mode: metadata.mode(),
202 st_nlink: metadata.nlink(),
203 st_size: metadata.size(),
204 st_mtime: metadata.mtime(),
205 st_ctime: metadata.ctime(),
206 }
207 }
208
209 pub fn is_symlink(&self) -> bool {
210 // This is way too manual, but `HgMetadata` will go away in the
211 // near-future dirstate rewrite anyway.
212 self.st_mode & 0170000 == 0120000
213 }
214 }
215
216 183 /// Returns the canonical path of `name`, given `cwd` and `root`
217 184 pub fn canonical_path(
218 185 root: impl AsRef<Path>,
@@ -220,13 +220,11 b' impl HgPath {'
220 220 ),
221 221 }
222 222 }
223 pub fn join<T: ?Sized + AsRef<Self>>(&self, other: &T) -> HgPathBuf {
224 let mut inner = self.inner.to_owned();
225 if !inner.is_empty() && inner.last() != Some(&b'/') {
226 inner.push(b'/');
227 }
228 inner.extend(other.as_ref().bytes());
229 HgPathBuf::from_bytes(&inner)
223
224 pub fn join(&self, path: &HgPath) -> HgPathBuf {
225 let mut buf = self.to_owned();
226 buf.push(path);
227 buf
230 228 }
231 229
232 230 pub fn components(&self) -> impl Iterator<Item = &HgPath> {
@@ -405,7 +403,15 b' impl HgPathBuf {'
405 403 pub fn new() -> Self {
406 404 Default::default()
407 405 }
408 pub fn push(&mut self, byte: u8) {
406
407 pub fn push<T: ?Sized + AsRef<HgPath>>(&mut self, other: &T) -> () {
408 if !self.inner.is_empty() && self.inner.last() != Some(&b'/') {
409 self.inner.push(b'/');
410 }
411 self.inner.extend(other.as_ref().bytes())
412 }
413
414 pub fn push_byte(&mut self, byte: u8) {
409 415 self.inner.push(byte);
410 416 }
411 417 pub fn from_bytes(s: &[u8]) -> HgPathBuf {
@@ -9,7 +9,7 b" name='rusthg'"
9 9 crate-type = ["cdylib"]
10 10
11 11 [features]
12 default = ["python27"]
12 default = ["python3"]
13 13
14 14 # Features to build an extension module:
15 15 python27 = ["cpython/python27-sys", "cpython/extension-module-2-7"]
@@ -21,12 +21,10 b' python27-bin = ["cpython/python27-sys"]'
21 21 python3-bin = ["cpython/python3-sys"]
22 22
23 23 [dependencies]
24 cpython = { version = "0.7.0", default-features = false }
24 25 crossbeam-channel = "0.4"
25 26 hg-core = { path = "../hg-core"}
26 libc = '*'
27 libc = "0.2"
27 28 log = "0.4.8"
28 29 env_logger = "0.7.1"
29
30 [dependencies.cpython]
31 version = "0.6.0"
32 default-features = false
30 stable_deref_trait = "1.2.0"
@@ -13,58 +13,7 b' use hg::copy_tracing::ChangedFiles;'
13 13 use hg::copy_tracing::CombineChangesetCopies;
14 14 use hg::Revision;
15 15
16 use self::pybytes_with_data::PyBytesWithData;
17
18 // Module to encapsulate private fields
19 mod pybytes_with_data {
20 use cpython::{PyBytes, Python};
21
22 /// Safe abstraction over a `PyBytes` together with the `&[u8]` slice
23 /// that borrows it.
24 ///
25 /// Calling `PyBytes::data` requires a GIL marker but we want to access the
26 /// data in a thread that (ideally) does not need to acquire the GIL.
27 /// This type allows separating the call an the use.
28 pub(super) struct PyBytesWithData {
29 #[allow(unused)]
30 keep_alive: PyBytes,
31
32 /// Borrows the buffer inside `self.keep_alive`,
33 /// but the borrow-checker cannot express self-referential structs.
34 data: *const [u8],
35 }
36
37 fn require_send<T: Send>() {}
38
39 #[allow(unused)]
40 fn static_assert_pybytes_is_send() {
41 require_send::<PyBytes>;
42 }
43
44 // Safety: PyBytes is Send. Raw pointers are not by default,
45 // but here sending one to another thread is fine since we ensure it stays
46 // valid.
47 unsafe impl Send for PyBytesWithData {}
48
49 impl PyBytesWithData {
50 pub fn new(py: Python, bytes: PyBytes) -> Self {
51 Self {
52 data: bytes.data(py),
53 keep_alive: bytes,
54 }
55 }
56
57 pub fn data(&self) -> &[u8] {
58 // Safety: the raw pointer is valid as long as the PyBytes is still
59 // alive, and the returned slice borrows `self`.
60 unsafe { &*self.data }
61 }
62
63 pub fn unwrap(self) -> PyBytes {
64 self.keep_alive
65 }
66 }
67 }
16 use crate::pybytes_deref::PyBytesDeref;
68 17
69 18 /// Combines copies information contained into revision `revs` to build a copy
70 19 /// map.
@@ -123,7 +72,7 b' pub fn combine_changeset_copies_wrapper('
123 72 //
124 73 // TODO: tweak the bound?
125 74 let (rev_info_sender, rev_info_receiver) =
126 crossbeam_channel::bounded::<RevInfo<PyBytesWithData>>(1000);
75 crossbeam_channel::bounded::<RevInfo<PyBytesDeref>>(1000);
127 76
128 77 // This channel (going the other way around) however is unbounded.
129 78 // If they were both bounded, there might potentially be deadlocks
@@ -143,7 +92,7 b' pub fn combine_changeset_copies_wrapper('
143 92 CombineChangesetCopies::new(children_count);
144 93 for (rev, p1, p2, opt_bytes) in rev_info_receiver {
145 94 let files = match &opt_bytes {
146 Some(raw) => ChangedFiles::new(raw.data()),
95 Some(raw) => ChangedFiles::new(raw.as_ref()),
147 96 // Python None was extracted to Option::None,
148 97 // meaning there was no copy data.
149 98 None => ChangedFiles::new_empty(),
@@ -169,7 +118,7 b' pub fn combine_changeset_copies_wrapper('
169 118
170 119 for rev_info in revs_info {
171 120 let (rev, p1, p2, opt_bytes) = rev_info?;
172 let opt_bytes = opt_bytes.map(|b| PyBytesWithData::new(py, b));
121 let opt_bytes = opt_bytes.map(|b| PyBytesDeref::new(py, b));
173 122
174 123 // We’d prefer to avoid the child thread calling into Python code,
175 124 // but this avoids a potential deadlock on the GIL if it does:
@@ -12,101 +12,17 b''
12 12 mod copymap;
13 13 mod dirs_multiset;
14 14 mod dirstate_map;
15 mod dispatch;
16 mod non_normal_entries;
17 mod owning;
15 mod item;
18 16 mod status;
17 use self::item::DirstateItem;
19 18 use crate::{
20 19 dirstate::{
21 20 dirs_multiset::Dirs, dirstate_map::DirstateMap, status::status_wrapper,
22 21 },
23 22 exceptions,
24 23 };
25 use cpython::{
26 exc, PyBytes, PyDict, PyErr, PyList, PyModule, PyObject, PyResult,
27 PySequence, Python,
28 };
24 use cpython::{PyBytes, PyDict, PyList, PyModule, PyObject, PyResult, Python};
29 25 use hg::dirstate_tree::on_disk::V2_FORMAT_MARKER;
30 use hg::{utils::hg_path::HgPathBuf, DirstateEntry, EntryState, StateMap};
31 use libc::{c_char, c_int};
32 use std::convert::TryFrom;
33
34 // C code uses a custom `dirstate_tuple` type, checks in multiple instances
35 // for this type, and raises a Python `Exception` if the check does not pass.
36 // Because this type differs only in name from the regular Python tuple, it
37 // would be a good idea in the near future to remove it entirely to allow
38 // for a pure Python tuple of the same effective structure to be used,
39 // rendering this type and the capsule below useless.
40 py_capsule_fn!(
41 from mercurial.cext.parsers import make_dirstate_item_CAPI
42 as make_dirstate_item_capi
43 signature (
44 state: c_char,
45 mode: c_int,
46 size: c_int,
47 mtime: c_int,
48 ) -> *mut RawPyObject
49 );
50
51 pub fn make_dirstate_item(
52 py: Python,
53 entry: &DirstateEntry,
54 ) -> PyResult<PyObject> {
55 let &DirstateEntry {
56 state,
57 mode,
58 size,
59 mtime,
60 } = entry;
61 // Explicitly go through u8 first, then cast to platform-specific `c_char`
62 // because Into<u8> has a specific implementation while `as c_char` would
63 // just do a naive enum cast.
64 let state_code: u8 = state.into();
65 make_dirstate_item_raw(py, state_code, mode, size, mtime)
66 }
67
68 pub fn make_dirstate_item_raw(
69 py: Python,
70 state: u8,
71 mode: i32,
72 size: i32,
73 mtime: i32,
74 ) -> PyResult<PyObject> {
75 let make = make_dirstate_item_capi::retrieve(py)?;
76 let maybe_obj = unsafe {
77 let ptr = make(state as c_char, mode, size, mtime);
78 PyObject::from_owned_ptr_opt(py, ptr)
79 };
80 maybe_obj.ok_or_else(|| PyErr::fetch(py))
81 }
82
83 pub fn extract_dirstate(py: Python, dmap: &PyDict) -> Result<StateMap, PyErr> {
84 dmap.items(py)
85 .iter()
86 .map(|(filename, stats)| {
87 let stats = stats.extract::<PySequence>(py)?;
88 let state = stats.get_item(py, 0)?.extract::<PyBytes>(py)?;
89 let state =
90 EntryState::try_from(state.data(py)[0]).map_err(|e| {
91 PyErr::new::<exc::ValueError, _>(py, e.to_string())
92 })?;
93 let mode = stats.get_item(py, 1)?.extract(py)?;
94 let size = stats.get_item(py, 2)?.extract(py)?;
95 let mtime = stats.get_item(py, 3)?.extract(py)?;
96 let filename = filename.extract::<PyBytes>(py)?;
97 let filename = filename.data(py);
98 Ok((
99 HgPathBuf::from(filename.to_owned()),
100 DirstateEntry {
101 state,
102 mode,
103 size,
104 mtime,
105 },
106 ))
107 })
108 .collect()
109 }
110 26
111 27 /// Create the module, with `__package__` given from parent
112 28 pub fn init_module(py: Python, package: &str) -> PyResult<PyModule> {
@@ -125,6 +41,7 b' pub fn init_module(py: Python, package: '
125 41 )?;
126 42 m.add_class::<Dirs>(py)?;
127 43 m.add_class::<DirstateMap>(py)?;
44 m.add_class::<DirstateItem>(py)?;
128 45 m.add(py, "V2_FORMAT_MARKER", PyBytes::new(py, V2_FORMAT_MARKER))?;
129 46 m.add(
130 47 py,
@@ -137,7 +54,7 b' pub fn init_module(py: Python, package: '
137 54 matcher: PyObject,
138 55 ignorefiles: PyList,
139 56 check_exec: bool,
140 last_normal_time: i64,
57 last_normal_time: (u32, u32),
141 58 list_clean: bool,
142 59 list_ignored: bool,
143 60 list_unknown: bool,
@@ -15,9 +15,9 b' use std::cell::RefCell;'
15 15
16 16 use crate::dirstate::dirstate_map::v2_error;
17 17 use crate::dirstate::dirstate_map::DirstateMap;
18 use hg::dirstate::CopyMapIter;
18 19 use hg::dirstate_tree::on_disk::DirstateV2ParseError;
19 20 use hg::utils::hg_path::HgPath;
20 use hg::CopyMapIter;
21 21
22 22 py_class!(pub class CopyMap |py| {
23 23 data dirstate_map: DirstateMap;
@@ -9,19 +9,15 b''
9 9 //! `hg-core` package.
10 10
11 11 use std::cell::RefCell;
12 use std::convert::TryInto;
13 12
14 13 use cpython::{
15 14 exc, ObjectProtocol, PyBytes, PyClone, PyDict, PyErr, PyObject, PyResult,
16 15 Python, UnsafePyLeaked,
17 16 };
18 17
19 use crate::dirstate::extract_dirstate;
20 18 use hg::{
21 errors::HgError,
22 19 utils::hg_path::{HgPath, HgPathBuf},
23 DirsMultiset, DirsMultisetIter, DirstateError, DirstateMapError,
24 EntryState,
20 DirsMultiset, DirsMultisetIter, DirstateMapError,
25 21 };
26 22
27 23 py_class!(pub class Dirs |py| {
@@ -32,25 +28,11 b' py_class!(pub class Dirs |py| {'
32 28 def __new__(
33 29 _cls,
34 30 map: PyObject,
35 skip: Option<PyObject> = None
36 31 ) -> PyResult<Self> {
37 let mut skip_state: Option<EntryState> = None;
38 if let Some(skip) = skip {
39 skip_state = Some(
40 skip.extract::<PyBytes>(py)?.data(py)[0]
41 .try_into()
42 .map_err(|e: HgError| {
43 PyErr::new::<exc::ValueError, _>(py, e.to_string())
44 })?,
45 );
46 }
47 let inner = if let Ok(map) = map.cast_as::<PyDict>(py) {
48 let dirstate = extract_dirstate(py, &map)?;
49 let dirstate = dirstate.iter().map(|(k, v)| Ok((k, *v)));
50 DirsMultiset::from_dirstate(dirstate, skip_state)
51 .map_err(|e: DirstateError| {
52 PyErr::new::<exc::ValueError, _>(py, e.to_string())
53 })?
32 let inner = if map.cast_as::<PyDict>(py).is_ok() {
33 let err = "pathutil.dirs() with a dict should only be used by the Python dirstatemap \
34 and should not be used when Rust is enabled";
35 return Err(PyErr::new::<exc::TypeError, _>(py, err.to_string()))
54 36 } else {
55 37 let map: Result<Vec<HgPathBuf>, PyErr> = map
56 38 .iter(py)?
@@ -12,32 +12,24 b' use std::cell::{RefCell, RefMut};'
12 12 use std::convert::TryInto;
13 13
14 14 use cpython::{
15 exc, ObjectProtocol, PyBool, PyBytes, PyClone, PyDict, PyErr, PyList,
16 PyObject, PyResult, PySet, PyString, Python, PythonObject, ToPyObject,
17 UnsafePyLeaked,
15 exc, PyBool, PyBytes, PyClone, PyDict, PyErr, PyList, PyNone, PyObject,
16 PyResult, Python, PythonObject, ToPyObject, UnsafePyLeaked,
18 17 };
19 18
20 19 use crate::{
21 20 dirstate::copymap::{CopyMap, CopyMapItemsIterator, CopyMapKeysIterator},
22 dirstate::make_dirstate_item,
23 dirstate::make_dirstate_item_raw,
24 dirstate::non_normal_entries::{
25 NonNormalEntries, NonNormalEntriesIterator,
26 },
27 dirstate::owning::OwningDirstateMap,
28 parsers::dirstate_parents_to_pytuple,
21 dirstate::item::{timestamp, DirstateItem},
22 pybytes_deref::PyBytesDeref,
29 23 };
30 24 use hg::{
31 dirstate::parsers::Timestamp,
32 dirstate::MTIME_UNSET,
33 dirstate::SIZE_NON_NORMAL,
34 dirstate_tree::dispatch::DirstateMapMethods,
25 dirstate::StateMapIter,
26 dirstate_tree::dirstate_map::DirstateMap as TreeDirstateMap,
35 27 dirstate_tree::on_disk::DirstateV2ParseError,
28 dirstate_tree::owning::OwningDirstateMap,
36 29 revlog::Node,
37 30 utils::files::normalize_case,
38 31 utils::hg_path::{HgPath, HgPathBuf},
39 DirstateEntry, DirstateError, DirstateMap as RustDirstateMap,
40 DirstateParents, EntryState, StateMapIter,
32 DirstateEntry, DirstateError, DirstateParents, EntryState,
41 33 };
42 34
43 35 // TODO
@@ -53,26 +45,26 b' use hg::{'
53 45 // All attributes also have to have a separate refcount data attribute for
54 46 // leaks, with all methods that go along for reference sharing.
55 47 py_class!(pub class DirstateMap |py| {
56 @shared data inner: Box<dyn DirstateMapMethods + Send>;
48 @shared data inner: OwningDirstateMap;
57 49
58 50 /// Returns a `(dirstate_map, parents)` tuple
59 51 @staticmethod
60 52 def new_v1(
61 use_dirstate_tree: bool,
62 53 on_disk: PyBytes,
63 54 ) -> PyResult<PyObject> {
64 let (inner, parents) = if use_dirstate_tree {
65 let (map, parents) = OwningDirstateMap::new_v1(py, on_disk)
66 .map_err(|e| dirstate_error(py, e))?;
67 (Box::new(map) as _, parents)
68 } else {
69 let bytes = on_disk.data(py);
70 let mut map = RustDirstateMap::default();
71 let parents = map.read(bytes).map_err(|e| dirstate_error(py, e))?;
72 (Box::new(map) as _, parents)
73 };
74 let map = Self::create_instance(py, inner)?;
75 let parents = parents.map(|p| dirstate_parents_to_pytuple(py, &p));
55 let on_disk = PyBytesDeref::new(py, on_disk);
56 let mut map = OwningDirstateMap::new_empty(on_disk);
57 let (on_disk, map_placeholder) = map.get_pair_mut();
58
59 let (actual_map, parents) = TreeDirstateMap::new_v1(on_disk)
60 .map_err(|e| dirstate_error(py, e))?;
61 *map_placeholder = actual_map;
62 let map = Self::create_instance(py, map)?;
63 let parents = parents.map(|p| {
64 let p1 = PyBytes::new(py, p.p1.as_bytes());
65 let p2 = PyBytes::new(py, p.p2.as_bytes());
66 (p1, p2)
67 });
76 68 Ok((map, parents).to_py_object(py).into_object())
77 69 }
78 70
@@ -86,10 +78,13 b' py_class!(pub class DirstateMap |py| {'
86 78 let dirstate_error = |e: DirstateError| {
87 79 PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
88 80 };
89 let inner = OwningDirstateMap::new_v2(
90 py, on_disk, data_size, tree_metadata,
81 let on_disk = PyBytesDeref::new(py, on_disk);
82 let mut map = OwningDirstateMap::new_empty(on_disk);
83 let (on_disk, map_placeholder) = map.get_pair_mut();
84 *map_placeholder = TreeDirstateMap::new_v2(
85 on_disk, data_size, tree_metadata.data(py),
91 86 ).map_err(dirstate_error)?;
92 let map = Self::create_instance(py, Box::new(inner))?;
87 let map = Self::create_instance(py, map)?;
93 88 Ok(map.into_object())
94 89 }
95 90
@@ -111,79 +106,38 b' py_class!(pub class DirstateMap |py| {'
111 106 .map_err(|e| v2_error(py, e))?
112 107 {
113 108 Some(entry) => {
114 Ok(Some(make_dirstate_item(py, &entry)?))
109 Ok(Some(DirstateItem::new_as_pyobject(py, entry)?))
115 110 },
116 111 None => Ok(default)
117 112 }
118 113 }
119 114
120 def set_v1(&self, path: PyObject, item: PyObject) -> PyResult<PyObject> {
115 def set_dirstate_item(
116 &self,
117 path: PyObject,
118 item: DirstateItem
119 ) -> PyResult<PyObject> {
121 120 let f = path.extract::<PyBytes>(py)?;
122 121 let filename = HgPath::new(f.data(py));
123 let state = item.getattr(py, "state")?.extract::<PyBytes>(py)?;
124 let state = state.data(py)[0];
125 let entry = DirstateEntry {
126 state: state.try_into().expect("state is always valid"),
127 mtime: item.getattr(py, "mtime")?.extract(py)?,
128 size: item.getattr(py, "size")?.extract(py)?,
129 mode: item.getattr(py, "mode")?.extract(py)?,
130 };
131 self.inner(py).borrow_mut().set_v1(filename, entry);
122 self.inner(py)
123 .borrow_mut()
124 .set_entry(filename, item.get_entry(py))
125 .map_err(|e| v2_error(py, e))?;
132 126 Ok(py.None())
133 127 }
134 128
135 129 def addfile(
136 130 &self,
137 f: PyObject,
138 mode: PyObject,
139 size: PyObject,
140 mtime: PyObject,
141 added: PyObject,
142 merged: PyObject,
143 from_p2: PyObject,
144 possibly_dirty: PyObject,
145 ) -> PyResult<PyObject> {
146 let f = f.extract::<PyBytes>(py)?;
131 f: PyBytes,
132 item: DirstateItem,
133 ) -> PyResult<PyNone> {
147 134 let filename = HgPath::new(f.data(py));
148 let mode = if mode.is_none(py) {
149 // fallback default value
150 0
151 } else {
152 mode.extract(py)?
153 };
154 let size = if size.is_none(py) {
155 // fallback default value
156 SIZE_NON_NORMAL
157 } else {
158 size.extract(py)?
159 };
160 let mtime = if mtime.is_none(py) {
161 // fallback default value
162 MTIME_UNSET
163 } else {
164 mtime.extract(py)?
165 };
166 let entry = DirstateEntry {
167 // XXX Arbitrary default value since the value is determined later
168 state: EntryState::Normal,
169 mode: mode,
170 size: size,
171 mtime: mtime,
172 };
173 let added = added.extract::<PyBool>(py)?.is_true();
174 let merged = merged.extract::<PyBool>(py)?.is_true();
175 let from_p2 = from_p2.extract::<PyBool>(py)?.is_true();
176 let possibly_dirty = possibly_dirty.extract::<PyBool>(py)?.is_true();
177 self.inner(py).borrow_mut().add_file(
178 filename,
179 entry,
180 added,
181 merged,
182 from_p2,
183 possibly_dirty
184 ).and(Ok(py.None())).or_else(|e: DirstateError| {
185 Err(PyErr::new::<exc::ValueError, _>(py, e.to_string()))
186 })
135 let entry = item.get_entry(py);
136 self.inner(py)
137 .borrow_mut()
138 .add_file(filename, entry)
139 .map_err(|e |dirstate_error(py, e))?;
140 Ok(PyNone)
187 141 }
188 142
189 143 def removefile(
@@ -205,135 +159,15 b' py_class!(pub class DirstateMap |py| {'
205 159 Ok(py.None())
206 160 }
207 161
208 def dropfile(
209 &self,
210 f: PyObject,
211 ) -> PyResult<PyBool> {
212 self.inner(py).borrow_mut()
213 .drop_file(
214 HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
215 )
216 .and_then(|b| Ok(b.to_py_object(py)))
217 .or_else(|e| {
218 Err(PyErr::new::<exc::OSError, _>(
219 py,
220 format!("Dirstate error: {}", e.to_string()),
221 ))
222 })
223 }
224
225 def clearambiguoustimes(
162 def drop_item_and_copy_source(
226 163 &self,
227 files: PyObject,
228 now: PyObject
229 ) -> PyResult<PyObject> {
230 let files: PyResult<Vec<HgPathBuf>> = files
231 .iter(py)?
232 .map(|filename| {
233 Ok(HgPathBuf::from_bytes(
234 filename?.extract::<PyBytes>(py)?.data(py),
235 ))
236 })
237 .collect();
238 self.inner(py)
239 .borrow_mut()
240 .clear_ambiguous_times(files?, now.extract(py)?)
241 .map_err(|e| v2_error(py, e))?;
242 Ok(py.None())
243 }
244
245 def other_parent_entries(&self) -> PyResult<PyObject> {
246 let mut inner_shared = self.inner(py).borrow_mut();
247 let set = PySet::empty(py)?;
248 for path in inner_shared.iter_other_parent_paths() {
249 let path = path.map_err(|e| v2_error(py, e))?;
250 set.add(py, PyBytes::new(py, path.as_bytes()))?;
251 }
252 Ok(set.into_object())
253 }
254
255 def non_normal_entries(&self) -> PyResult<NonNormalEntries> {
256 NonNormalEntries::from_inner(py, self.clone_ref(py))
257 }
258
259 def non_normal_entries_contains(&self, key: PyObject) -> PyResult<bool> {
260 let key = key.extract::<PyBytes>(py)?;
164 f: PyBytes,
165 ) -> PyResult<PyNone> {
261 166 self.inner(py)
262 167 .borrow_mut()
263 .non_normal_entries_contains(HgPath::new(key.data(py)))
264 .map_err(|e| v2_error(py, e))
265 }
266
267 def non_normal_entries_display(&self) -> PyResult<PyString> {
268 let mut inner = self.inner(py).borrow_mut();
269 let paths = inner
270 .iter_non_normal_paths()
271 .collect::<Result<Vec<_>, _>>()
272 .map_err(|e| v2_error(py, e))?;
273 let formatted = format!("NonNormalEntries: {}", hg::utils::join_display(paths, ", "));
274 Ok(PyString::new(py, &formatted))
275 }
276
277 def non_normal_entries_remove(&self, key: PyObject) -> PyResult<PyObject> {
278 let key = key.extract::<PyBytes>(py)?;
279 let key = key.data(py);
280 let was_present = self
281 .inner(py)
282 .borrow_mut()
283 .non_normal_entries_remove(HgPath::new(key));
284 if !was_present {
285 let msg = String::from_utf8_lossy(key);
286 Err(PyErr::new::<exc::KeyError, _>(py, msg))
287 } else {
288 Ok(py.None())
289 }
290 }
291
292 def non_normal_entries_discard(&self, key: PyObject) -> PyResult<PyObject>
293 {
294 let key = key.extract::<PyBytes>(py)?;
295 self
296 .inner(py)
297 .borrow_mut()
298 .non_normal_entries_remove(HgPath::new(key.data(py)));
299 Ok(py.None())
300 }
301
302 def non_normal_entries_add(&self, key: PyObject) -> PyResult<PyObject> {
303 let key = key.extract::<PyBytes>(py)?;
304 self
305 .inner(py)
306 .borrow_mut()
307 .non_normal_entries_add(HgPath::new(key.data(py)));
308 Ok(py.None())
309 }
310
311 def non_normal_or_other_parent_paths(&self) -> PyResult<PyList> {
312 let mut inner = self.inner(py).borrow_mut();
313
314 let ret = PyList::new(py, &[]);
315 for filename in inner.non_normal_or_other_parent_paths() {
316 let filename = filename.map_err(|e| v2_error(py, e))?;
317 let as_pystring = PyBytes::new(py, filename.as_bytes());
318 ret.append(py, as_pystring.into_object());
319 }
320 Ok(ret)
321 }
322
323 def non_normal_entries_iter(&self) -> PyResult<NonNormalEntriesIterator> {
324 // Make sure the sets are defined before we no longer have a mutable
325 // reference to the dmap.
326 self.inner(py)
327 .borrow_mut()
328 .set_non_normal_other_parent_entries(false);
329
330 let leaked_ref = self.inner(py).leak_immutable();
331
332 NonNormalEntriesIterator::from_inner(py, unsafe {
333 leaked_ref.map(py, |o| {
334 o.iter_non_normal_paths_panic()
335 })
336 })
168 .drop_entry_and_copy_source(HgPath::new(f.data(py)))
169 .map_err(|e |dirstate_error(py, e))?;
170 Ok(PyNone)
337 171 }
338 172
339 173 def hastrackeddir(&self, d: PyObject) -> PyResult<PyBool> {
@@ -360,9 +194,9 b' py_class!(pub class DirstateMap |py| {'
360 194 &self,
361 195 p1: PyObject,
362 196 p2: PyObject,
363 now: PyObject
197 now: (u32, u32)
364 198 ) -> PyResult<PyBytes> {
365 let now = Timestamp(now.extract(py)?);
199 let now = timestamp(py, now)?;
366 200
367 201 let mut inner = self.inner(py).borrow_mut();
368 202 let parents = DirstateParents {
@@ -384,10 +218,10 b' py_class!(pub class DirstateMap |py| {'
384 218 /// instead of written to a new data file (False).
385 219 def write_v2(
386 220 &self,
387 now: PyObject,
221 now: (u32, u32),
388 222 can_append: bool,
389 223 ) -> PyResult<PyObject> {
390 let now = Timestamp(now.extract(py)?);
224 let now = timestamp(py, now)?;
391 225
392 226 let mut inner = self.inner(py).borrow_mut();
393 227 let result = inner.pack_v2(now, can_append);
@@ -409,7 +243,7 b' py_class!(pub class DirstateMap |py| {'
409 243 let dict = PyDict::new(py);
410 244 for item in self.inner(py).borrow_mut().iter() {
411 245 let (path, entry) = item.map_err(|e| v2_error(py, e))?;
412 if entry.state != EntryState::Removed {
246 if entry.state() != EntryState::Removed {
413 247 let key = normalize_case(path);
414 248 let value = path;
415 249 dict.set_item(
@@ -444,7 +278,7 b' py_class!(pub class DirstateMap |py| {'
444 278 .map_err(|e| v2_error(py, e))?
445 279 {
446 280 Some(entry) => {
447 Ok(make_dirstate_item(py, &entry)?)
281 Ok(DirstateItem::new_as_pyobject(py, entry)?)
448 282 },
449 283 None => Err(PyErr::new::<exc::KeyError, _>(
450 284 py,
@@ -566,7 +400,9 b' py_class!(pub class DirstateMap |py| {'
566 400 .copy_map_remove(HgPath::new(key.data(py)))
567 401 .map_err(|e| v2_error(py, e))?
568 402 {
569 Some(_) => Ok(None),
403 Some(copy) => Ok(Some(
404 PyBytes::new(py, copy.as_bytes()).into_object(),
405 )),
570 406 None => Ok(default),
571 407 }
572 408 }
@@ -599,14 +435,14 b' py_class!(pub class DirstateMap |py| {'
599 435 Ok(dirs)
600 436 }
601 437
602 def debug_iter(&self) -> PyResult<PyList> {
438 def debug_iter(&self, all: bool) -> PyResult<PyList> {
603 439 let dirs = PyList::new(py, &[]);
604 for item in self.inner(py).borrow().debug_iter() {
440 for item in self.inner(py).borrow().debug_iter(all) {
605 441 let (path, (state, mode, size, mtime)) =
606 442 item.map_err(|e| v2_error(py, e))?;
607 443 let path = PyBytes::new(py, path.as_bytes());
608 let item = make_dirstate_item_raw(py, state, mode, size, mtime)?;
609 dirs.append(py, (path, item).to_py_object(py).into_object())
444 let item = (path, state, mode, size, mtime);
445 dirs.append(py, item.to_py_object(py).into_object())
610 446 }
611 447 Ok(dirs)
612 448 }
@@ -616,7 +452,7 b' impl DirstateMap {'
616 452 pub fn get_inner_mut<'a>(
617 453 &'a self,
618 454 py: Python<'a>,
619 ) -> RefMut<'a, Box<dyn DirstateMapMethods + Send>> {
455 ) -> RefMut<'a, OwningDirstateMap> {
620 456 self.inner(py).borrow_mut()
621 457 }
622 458 fn translate_key(
@@ -633,7 +469,7 b' impl DirstateMap {'
633 469 let (f, entry) = res.map_err(|e| v2_error(py, e))?;
634 470 Ok(Some((
635 471 PyBytes::new(py, f.as_bytes()),
636 make_dirstate_item(py, &entry)?,
472 DirstateItem::new_as_pyobject(py, entry)?,
637 473 )))
638 474 }
639 475 }
@@ -9,6 +9,7 b''
9 9 //! `hg-core` crate. From Python, this will be seen as
10 10 //! `rustext.dirstate.status`.
11 11
12 use crate::dirstate::item::timestamp;
12 13 use crate::{dirstate::DirstateMap, exceptions::FallbackError};
13 14 use cpython::exc::OSError;
14 15 use cpython::{
@@ -102,12 +103,13 b' pub fn status_wrapper('
102 103 root_dir: PyObject,
103 104 ignore_files: PyList,
104 105 check_exec: bool,
105 last_normal_time: i64,
106 last_normal_time: (u32, u32),
106 107 list_clean: bool,
107 108 list_ignored: bool,
108 109 list_unknown: bool,
109 110 collect_traversed_dirs: bool,
110 111 ) -> PyResult<PyTuple> {
112 let last_normal_time = timestamp(py, last_normal_time)?;
111 113 let bytes = root_dir.extract::<PyBytes>(py)?;
112 114 let root_dir = get_path_from_bytes(bytes.data(py));
113 115
@@ -35,7 +35,7 b' pub mod debug;'
35 35 pub mod dirstate;
36 36 pub mod discovery;
37 37 pub mod exceptions;
38 pub mod parsers;
38 mod pybytes_deref;
39 39 pub mod revlog;
40 40 pub mod utils;
41 41
@@ -58,11 +58,6 b' py_module_initializer!(rustext, initrust'
58 58 m.add(py, "discovery", discovery::init_module(py, &dotted_name)?)?;
59 59 m.add(py, "dirstate", dirstate::init_module(py, &dotted_name)?)?;
60 60 m.add(py, "revlog", revlog::init_module(py, &dotted_name)?)?;
61 m.add(
62 py,
63 "parsers",
64 parsers::init_parsers_module(py, &dotted_name)?,
65 )?;
66 61 m.add(py, "GraphError", py.get_type::<exceptions::GraphError>())?;
67 62 Ok(())
68 63 });
@@ -12,23 +12,21 b' functionality.'
12 12
13 13 # Building
14 14
15 This project currently requires an unreleased version of PyOxidizer
16 (0.7.0-pre). For best results, build the exact PyOxidizer commit
17 as defined in the `pyoxidizer.bzl` file:
15 First, acquire and build a copy of PyOxidizer; you probably want to do this in
16 some directory outside of your clone of Mercurial:
18 17
19 18 $ git clone https://github.com/indygreg/PyOxidizer.git
20 19 $ cd PyOxidizer
21 $ git checkout <Git commit from pyoxidizer.bzl>
22 20 $ cargo build --release
23 21
24 Then build this Rust project using the built `pyoxidizer` executable::
22 Then build this Rust project using the built `pyoxidizer` executable:
25 23
26 $ /path/to/pyoxidizer/target/release/pyoxidizer build
24 $ /path/to/pyoxidizer/target/release/pyoxidizer build --release
27 25
28 26 If all goes according to plan, there should be an assembled application
29 under `build/<arch>/debug/app/` with an `hg` executable:
27 under `build/<arch>/release/app/` with an `hg` executable:
30 28
31 $ build/x86_64-unknown-linux-gnu/debug/app/hg version
29 $ build/x86_64-unknown-linux-gnu/release/app/hg version
32 30 Mercurial Distributed SCM (version 5.3.1+433-f99cd77d53dc+20200331)
33 31 (see https://mercurial-scm.org for more information)
34 32
@@ -46,5 +44,5 b" Python interpreter can't access them! To"
46 44 to the Mercurial source directory. e.g.:
47 45
48 46 $ cd /path/to/hg/src/tests
49 $ PYTHONPATH=`pwd`/.. python3.7 run-tests.py \
50 --with-hg `pwd`/../rust/hgcli/build/x86_64-unknown-linux-gnu/debug/app/hg
47 $ PYTHONPATH=`pwd`/.. python3.9 run-tests.py \
48 --with-hg `pwd`/../rust/hgcli/build/x86_64-unknown-linux-gnu/release/app/hg
@@ -24,7 +24,7 b''
24 24
25 25 ROOT = CWD + "/../.."
26 26
27 VERSION = VARS.get("VERSION", "5.8")
27 VERSION = VARS.get("VERSION", "0.0")
28 28 MSI_NAME = VARS.get("MSI_NAME", "mercurial")
29 29 EXTRA_MSI_FEATURES = VARS.get("EXTRA_MSI_FEATURES")
30 30 SIGNING_PFX_PATH = VARS.get("SIGNING_PFX_PATH")
@@ -34,6 +34,11 b' TIME_STAMP_SERVER_URL = VARS.get("TIME_S'
34 34
35 35 IS_WINDOWS = "windows" in BUILD_TARGET_TRIPLE
36 36
37 # Use in-memory resources for all resources. If false, most of the Python
38 # stdlib will be in memory, but other things such as Mercurial itself will not
39 # be. See the comment in resource_callback, below.
40 USE_IN_MEMORY_RESOURCES = not IS_WINDOWS
41
37 42 # Code to run in Python interpreter.
38 43 RUN_CODE = """
39 44 import os
@@ -57,6 +62,20 b" if os.name == 'nt':"
57 62 'site-packages',
58 63 )
59 64 )
65 elif sys.platform == "darwin":
66 vi = sys.version_info
67
68 def joinuser(*args):
69 return os.path.expanduser(os.path.join(*args))
70
71 # Note: site.py uses `sys._framework` instead of hardcoding "Python" as the
72 # 3rd arg, but that is set to an empty string in an oxidized binary. It
73 # has a fallback to ~/.local when `sys._framework` isn't set, but we want
74 # to match what the system python uses, so it sees pip installed stuff.
75 usersite = joinuser("~", "Library", "Python",
76 "%d.%d" % vi[:2], "lib/python/site-packages")
77
78 sys.path.append(usersite)
60 79 import hgdemandimport;
61 80 hgdemandimport.enable();
62 81 from mercurial import dispatch;
@@ -69,7 +88,7 b' def make_distribution():'
69 88 return default_python_distribution(python_version = "3.9")
70 89
71 90 def resource_callback(policy, resource):
72 if not IS_WINDOWS:
91 if USE_IN_MEMORY_RESOURCES:
73 92 resource.add_location = "in-memory"
74 93 return
75 94
@@ -100,7 +119,7 b' def make_exe(dist):'
100 119 # extensions.
101 120 packaging_policy.extension_module_filter = "all"
102 121 packaging_policy.resources_location = "in-memory"
103 if IS_WINDOWS:
122 if not USE_IN_MEMORY_RESOURCES:
104 123 packaging_policy.resources_location_fallback = "filesystem-relative:lib"
105 124 packaging_policy.register_resource_callback(resource_callback)
106 125
@@ -16,7 +16,7 b" pub fn args() -> clap::App<'static, 'sta"
16 16 Arg::with_name("rev")
17 17 .help("search the repository as it is in REV")
18 18 .short("-r")
19 .long("--revision")
19 .long("--rev")
20 20 .value_name("REV")
21 21 .takes_value(true),
22 22 )
@@ -26,13 +26,22 b" pub fn args() -> clap::App<'static, 'sta"
26 26 .multiple(true)
27 27 .empty_values(false)
28 28 .value_name("FILE")
29 .help("Activity to start: activity@category"),
29 .help("Files to output"),
30 30 )
31 31 .about(HELP_TEXT)
32 32 }
33 33
34 34 #[timed]
35 35 pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
36 let cat_enabled_default = true;
37 let cat_enabled = invocation.config.get_option(b"rhg", b"cat")?;
38 if !cat_enabled.unwrap_or(cat_enabled_default) {
39 return Err(CommandError::unsupported(
40 "cat is disabled in rhg (enable it with 'rhg.cat = true' \
41 or enable fallback with 'rhg.on-unsupported = fallback')",
42 ));
43 }
44
36 45 let rev = invocation.subcommand_args.value_of("rev");
37 46 let file_args = match invocation.subcommand_args.values_of("files") {
38 47 Some(files) => files.collect(),
@@ -46,8 +55,18 b' pub fn run(invocation: &crate::CliInvoca'
46 55
47 56 let mut files = vec![];
48 57 for file in file_args.iter() {
58 if file.starts_with("set:") {
59 let message = "fileset";
60 return Err(CommandError::unsupported(message));
61 }
62
63 let normalized = cwd.join(&file);
49 64 // TODO: actually normalize `..` path segments etc?
50 let normalized = cwd.join(&file);
65 let dotted = normalized.components().any(|c| c.as_os_str() == "..");
66 if file == &"." || dotted {
67 let message = "`..` or `.` path segment";
68 return Err(CommandError::unsupported(message));
69 }
51 70 let stripped = normalized
52 71 .strip_prefix(&working_directory)
53 72 // TODO: error message for path arguments outside of the repo
@@ -56,29 +75,31 b' pub fn run(invocation: &crate::CliInvoca'
56 75 .map_err(|e| CommandError::abort(e.to_string()))?;
57 76 files.push(hg_file);
58 77 }
78 let files = files.iter().map(|file| file.as_ref()).collect();
79 // TODO probably move this to a util function like `repo.default_rev` or
80 // something when it's used somewhere else
81 let rev = match rev {
82 Some(r) => r.to_string(),
83 None => format!("{:x}", repo.dirstate_parents()?.p1),
84 };
59 85
60 match rev {
61 Some(rev) => {
62 let output = cat(&repo, rev, &files).map_err(|e| (e, rev))?;
63 invocation.ui.write_stdout(&output.concatenated)?;
64 if !output.missing.is_empty() {
65 let short = format!("{:x}", output.node.short()).into_bytes();
66 for path in &output.missing {
67 invocation.ui.write_stderr(&format_bytes!(
68 b"{}: no such file in rev {}\n",
69 path.as_bytes(),
70 short
71 ))?;
72 }
73 }
74 if output.found_any {
75 Ok(())
76 } else {
77 Err(CommandError::Unsuccessful)
78 }
86 let output = cat(&repo, &rev, files).map_err(|e| (e, rev.as_str()))?;
87 for (_file, contents) in output.results {
88 invocation.ui.write_stdout(&contents)?;
89 }
90 if !output.missing.is_empty() {
91 let short = format!("{:x}", output.node.short()).into_bytes();
92 for path in &output.missing {
93 invocation.ui.write_stderr(&format_bytes!(
94 b"{}: no such file in rev {}\n",
95 path.as_bytes(),
96 short
97 ))?;
79 98 }
80 None => Err(CommandError::unsupported(
81 "`rhg cat` without `--rev` / `-r`",
82 )),
99 }
100 if output.found_any {
101 Ok(())
102 } else {
103 Err(CommandError::Unsuccessful)
83 104 }
84 105 }
@@ -1,12 +1,13 b''
1 1 use crate::error::CommandError;
2 2 use crate::ui::Ui;
3 use crate::ui::UiError;
4 use crate::utils::path_utils::relativize_paths;
3 5 use clap::Arg;
4 6 use hg::operations::list_rev_tracked_files;
5 7 use hg::operations::Dirstate;
6 8 use hg::repo::Repo;
7 use hg::utils::current_dir;
8 use hg::utils::files::{get_bytes_from_path, relativize_path};
9 use hg::utils::hg_path::{HgPath, HgPathBuf};
9 use hg::utils::hg_path::HgPath;
10 use std::borrow::Cow;
10 11
11 12 pub const HELP_TEXT: &str = "
12 13 List tracked files.
@@ -54,34 +55,13 b" fn display_files<'a>("
54 55 files: impl IntoIterator<Item = &'a HgPath>,
55 56 ) -> Result<(), CommandError> {
56 57 let mut stdout = ui.stdout_buffer();
57
58 let cwd = current_dir()?;
59 let working_directory = repo.working_directory_path();
60 let working_directory = cwd.join(working_directory); // Make it absolute
58 let mut any = false;
61 59
62 let mut any = false;
63 if let Ok(cwd_relative_to_repo) = cwd.strip_prefix(&working_directory) {
64 // The current directory is inside the repo, so we can work with
65 // relative paths
66 let cwd = HgPathBuf::from(get_bytes_from_path(cwd_relative_to_repo));
67 for file in files {
68 any = true;
69 stdout.write_all(relativize_path(&file, &cwd).as_ref())?;
70 stdout.write_all(b"\n")?;
71 }
72 } else {
73 let working_directory =
74 HgPathBuf::from(get_bytes_from_path(working_directory));
75 let cwd = HgPathBuf::from(get_bytes_from_path(cwd));
76 for file in files {
77 any = true;
78 // Absolute path in the filesystem
79 let file = working_directory.join(file);
80 stdout.write_all(relativize_path(&file, &cwd).as_ref())?;
81 stdout.write_all(b"\n")?;
82 }
83 }
84
60 relativize_paths(repo, files, |path: Cow<[u8]>| -> Result<(), UiError> {
61 any = true;
62 stdout.write_all(path.as_ref())?;
63 stdout.write_all(b"\n")
64 })?;
85 65 stdout.flush()?;
86 66 if any {
87 67 Ok(())
@@ -6,25 +6,20 b''
6 6 // GNU General Public License version 2 or any later version.
7 7
8 8 use crate::error::CommandError;
9 use crate::ui::Ui;
9 use crate::ui::{Ui, UiError};
10 use crate::utils::path_utils::relativize_paths;
10 11 use clap::{Arg, SubCommand};
11 12 use hg;
12 use hg::dirstate_tree::dirstate_map::DirstateMap;
13 use hg::dirstate_tree::on_disk;
14 use hg::errors::HgResultExt;
15 use hg::errors::IoResultExt;
13 use hg::config::Config;
14 use hg::dirstate::TruncatedTimestamp;
15 use hg::errors::HgError;
16 use hg::manifest::Manifest;
16 17 use hg::matchers::AlwaysMatcher;
17 use hg::operations::cat;
18 18 use hg::repo::Repo;
19 use hg::revlog::node::Node;
20 19 use hg::utils::hg_path::{hg_path_to_os_string, HgPath};
21 use hg::StatusError;
22 20 use hg::{HgPathCow, StatusOptions};
23 21 use log::{info, warn};
24 use std::convert::TryInto;
25 use std::fs;
26 use std::io::BufReader;
27 use std::io::Read;
22 use std::borrow::Cow;
28 23
29 24 pub const HELP_TEXT: &str = "
30 25 Show changed files in the working directory
@@ -142,7 +137,20 b' pub fn run(invocation: &crate::CliInvoca'
142 137 ));
143 138 }
144 139
140 // TODO: lift these limitations
141 if invocation.config.get_bool(b"ui", b"tweakdefaults").ok() == Some(true) {
142 return Err(CommandError::unsupported(
143 "ui.tweakdefaults is not yet supported with rhg status",
144 ));
145 }
146 if invocation.config.get_bool(b"ui", b"statuscopies").ok() == Some(true) {
147 return Err(CommandError::unsupported(
148 "ui.statuscopies is not yet supported with rhg status",
149 ));
150 }
151
145 152 let ui = invocation.ui;
153 let config = invocation.config;
146 154 let args = invocation.subcommand_args;
147 155 let display_states = if args.is_present("all") {
148 156 // TODO when implementing `--quiet`: it excludes clean files
@@ -166,47 +174,14 b' pub fn run(invocation: &crate::CliInvoca'
166 174 };
167 175
168 176 let repo = invocation.repo?;
169 let dirstate_data_mmap;
170 let (mut dmap, parents) = if repo.has_dirstate_v2() {
171 let docket_data =
172 repo.hg_vfs().read("dirstate").io_not_found_as_none()?;
173 let parents;
174 let dirstate_data;
175 let data_size;
176 let docket;
177 let tree_metadata;
178 if let Some(docket_data) = &docket_data {
179 docket = on_disk::read_docket(docket_data)?;
180 tree_metadata = docket.tree_metadata();
181 parents = Some(docket.parents());
182 data_size = docket.data_size();
183 dirstate_data_mmap = repo
184 .hg_vfs()
185 .mmap_open(docket.data_filename())
186 .io_not_found_as_none()?;
187 dirstate_data = dirstate_data_mmap.as_deref().unwrap_or(b"");
188 } else {
189 parents = None;
190 tree_metadata = b"";
191 data_size = 0;
192 dirstate_data = b"";
193 }
194 let dmap =
195 DirstateMap::new_v2(dirstate_data, data_size, tree_metadata)?;
196 (dmap, parents)
197 } else {
198 dirstate_data_mmap =
199 repo.hg_vfs().mmap_open("dirstate").io_not_found_as_none()?;
200 let dirstate_data = dirstate_data_mmap.as_deref().unwrap_or(b"");
201 DirstateMap::new_v1(dirstate_data)?
202 };
177 let mut dmap = repo.dirstate_map_mut()?;
203 178
204 179 let options = StatusOptions {
205 180 // TODO should be provided by the dirstate parsing and
206 181 // hence be stored on dmap. Using a value that assumes we aren't
207 182 // below the time resolution granularity of the FS and the
208 183 // dirstate.
209 last_normal_time: 0,
184 last_normal_time: TruncatedTimestamp::new_truncate(0, 0),
210 185 // we're currently supporting file systems with exec flags only
211 186 // anyway
212 187 check_exec: true,
@@ -216,8 +191,7 b' pub fn run(invocation: &crate::CliInvoca'
216 191 collect_traversed_dirs: false,
217 192 };
218 193 let ignore_file = repo.working_directory_vfs().join(".hgignore"); // TODO hardcoded
219 let (mut ds_status, pattern_warnings) = hg::dirstate_tree::status::status(
220 &mut dmap,
194 let (mut ds_status, pattern_warnings) = dmap.status(
221 195 &AlwaysMatcher,
222 196 repo.working_directory_path().to_owned(),
223 197 vec![ignore_file],
@@ -239,16 +213,12 b' pub fn run(invocation: &crate::CliInvoca'
239 213 if !ds_status.unsure.is_empty()
240 214 && (display_states.modified || display_states.clean)
241 215 {
242 let p1: Node = parents
243 .expect(
244 "Dirstate with no parents should not list any file to
245 be rechecked for modifications",
246 )
247 .p1
248 .into();
249 let p1_hex = format!("{:x}", p1);
216 let p1 = repo.dirstate_parents()?.p1;
217 let manifest = repo.manifest_for_node(p1).map_err(|e| {
218 CommandError::from((e, &*format!("{:x}", p1.short())))
219 })?;
250 220 for to_check in ds_status.unsure {
251 if cat_file_is_modified(repo, &to_check, &p1_hex)? {
221 if cat_file_is_modified(repo, &manifest, &to_check)? {
252 222 if display_states.modified {
253 223 ds_status.modified.push(to_check);
254 224 }
@@ -260,25 +230,25 b' pub fn run(invocation: &crate::CliInvoca'
260 230 }
261 231 }
262 232 if display_states.modified {
263 display_status_paths(ui, &mut ds_status.modified, b"M")?;
233 display_status_paths(ui, repo, config, &mut ds_status.modified, b"M")?;
264 234 }
265 235 if display_states.added {
266 display_status_paths(ui, &mut ds_status.added, b"A")?;
236 display_status_paths(ui, repo, config, &mut ds_status.added, b"A")?;
267 237 }
268 238 if display_states.removed {
269 display_status_paths(ui, &mut ds_status.removed, b"R")?;
239 display_status_paths(ui, repo, config, &mut ds_status.removed, b"R")?;
270 240 }
271 241 if display_states.deleted {
272 display_status_paths(ui, &mut ds_status.deleted, b"!")?;
242 display_status_paths(ui, repo, config, &mut ds_status.deleted, b"!")?;
273 243 }
274 244 if display_states.unknown {
275 display_status_paths(ui, &mut ds_status.unknown, b"?")?;
245 display_status_paths(ui, repo, config, &mut ds_status.unknown, b"?")?;
276 246 }
277 247 if display_states.ignored {
278 display_status_paths(ui, &mut ds_status.ignored, b"I")?;
248 display_status_paths(ui, repo, config, &mut ds_status.ignored, b"I")?;
279 249 }
280 250 if display_states.clean {
281 display_status_paths(ui, &mut ds_status.clean, b"C")?;
251 display_status_paths(ui, repo, config, &mut ds_status.clean, b"C")?;
282 252 }
283 253 Ok(())
284 254 }
@@ -287,16 +257,35 b' pub fn run(invocation: &crate::CliInvoca'
287 257 // harcode HgPathBuf, but probably not really useful at this point
288 258 fn display_status_paths(
289 259 ui: &Ui,
260 repo: &Repo,
261 config: &Config,
290 262 paths: &mut [HgPathCow],
291 263 status_prefix: &[u8],
292 264 ) -> Result<(), CommandError> {
293 265 paths.sort_unstable();
294 for path in paths {
295 // Same TODO as in commands::root
296 let bytes: &[u8] = path.as_bytes();
297 // TODO optim, probably lots of unneeded copies here, especially
298 // if out stream is buffered
299 ui.write_stdout(&[status_prefix, b" ", bytes, b"\n"].concat())?;
266 let mut relative: bool =
267 config.get_bool(b"ui", b"relative-paths").unwrap_or(false);
268 relative = config
269 .get_bool(b"commands", b"status.relative")
270 .unwrap_or(relative);
271 if relative && !ui.plain() {
272 relativize_paths(
273 repo,
274 paths,
275 |path: Cow<[u8]>| -> Result<(), UiError> {
276 ui.write_stdout(
277 &[status_prefix, b" ", path.as_ref(), b"\n"].concat(),
278 )
279 },
280 )?;
281 } else {
282 for path in paths {
283 // Same TODO as in commands::root
284 let bytes: &[u8] = path.as_bytes();
285 // TODO optim, probably lots of unneeded copies here, especially
286 // if out stream is buffered
287 ui.write_stdout(&[status_prefix, b" ", bytes, b"\n"].concat())?;
288 }
300 289 }
301 290 Ok(())
302 291 }
@@ -309,39 +298,19 b' fn display_status_paths('
309 298 /// TODO: detect permission bits and similar metadata modifications
310 299 fn cat_file_is_modified(
311 300 repo: &Repo,
301 manifest: &Manifest,
312 302 hg_path: &HgPath,
313 rev: &str,
314 ) -> Result<bool, CommandError> {
315 // TODO CatRev expects &[HgPathBuf], something like
316 // &[impl Deref<HgPath>] would be nicer and should avoid the copy
317 let path_bufs = [hg_path.into()];
318 // TODO IIUC CatRev returns a simple Vec<u8> for all files
319 // being able to tell them apart as (path, bytes) would be nicer
320 // and OPTIM would allow manifest resolution just once.
321 let output = cat(repo, rev, &path_bufs).map_err(|e| (e, rev))?;
303 ) -> Result<bool, HgError> {
304 let file_node = manifest
305 .find_file(hg_path)?
306 .expect("ambgious file not in p1");
307 let filelog = repo.filelog(hg_path)?;
308 let filelog_entry = filelog.data_for_node(file_node).map_err(|_| {
309 HgError::corrupted("filelog missing node from manifest")
310 })?;
311 let contents_in_p1 = filelog_entry.data()?;
322 312
323 let fs_path = repo
324 .working_directory_vfs()
325 .join(hg_path_to_os_string(hg_path).expect("HgPath conversion"));
326 let hg_data_len: u64 = match output.concatenated.len().try_into() {
327 Ok(v) => v,
328 Err(_) => {
329 // conversion of data length to u64 failed,
330 // good luck for any file to have this content
331 return Ok(true);
332 }
333 };
334 let fobj = fs::File::open(&fs_path).when_reading_file(&fs_path)?;
335 if fobj.metadata().map_err(|e| StatusError::from(e))?.len() != hg_data_len
336 {
337 return Ok(true);
338 }
339 for (fs_byte, hg_byte) in
340 BufReader::new(fobj).bytes().zip(output.concatenated)
341 {
342 if fs_byte.map_err(|e| StatusError::from(e))? != hg_byte {
343 return Ok(true);
344 }
345 }
346 Ok(false)
313 let fs_path = hg_path_to_os_string(hg_path).expect("HgPath conversion");
314 let fs_contents = repo.working_directory_vfs().read(fs_path)?;
315 return Ok(contents_in_p1 != &*fs_contents);
347 316 }
@@ -17,6 +17,9 b' use std::process::Command;'
17 17 mod blackbox;
18 18 mod error;
19 19 mod ui;
20 pub mod utils {
21 pub mod path_utils;
22 }
20 23 use error::CommandError;
21 24
22 25 fn main_with_result(
@@ -68,6 +71,25 b' fn main_with_result('
68 71 let matches = app.clone().get_matches_safe()?;
69 72
70 73 let (subcommand_name, subcommand_matches) = matches.subcommand();
74
75 // Mercurial allows users to define "defaults" for commands, fallback
76 // if a default is detected for the current command
77 let defaults = config.get_str(b"defaults", subcommand_name.as_bytes());
78 if defaults?.is_some() {
79 let msg = "`defaults` config set";
80 return Err(CommandError::unsupported(msg));
81 }
82
83 for prefix in ["pre", "post", "fail"].iter() {
84 // Mercurial allows users to define generic hooks for commands,
85 // fallback if any are detected
86 let item = format!("{}-{}", prefix, subcommand_name);
87 let hook_for_command = config.get_str(b"hooks", item.as_bytes())?;
88 if hook_for_command.is_some() {
89 let msg = format!("{}-{} hook defined", prefix, subcommand_name);
90 return Err(CommandError::unsupported(msg));
91 }
92 }
71 93 let run = subcommand_run_fn(subcommand_name)
72 94 .expect("unknown subcommand name from clap despite AppSettings::SubcommandRequired");
73 95 let subcommand_args = subcommand_matches
@@ -79,6 +101,15 b' fn main_with_result('
79 101 config,
80 102 repo,
81 103 };
104
105 if let Ok(repo) = repo {
106 // We don't support subrepos, fallback if the subrepos file is present
107 if repo.working_directory_vfs().join(".hgsub").exists() {
108 let msg = "subrepos (.hgsub is present)";
109 return Err(CommandError::unsupported(msg));
110 }
111 }
112
82 113 let blackbox = blackbox::Blackbox::new(&invocation, process_start_time)?;
83 114 blackbox.log_command_start();
84 115 let result = run(&invocation);
@@ -567,11 +598,10 b' fn check_extensions(config: &Config) -> '
567 598 unsupported.remove(supported);
568 599 }
569 600
570 if let Some(ignored_list) =
571 config.get_simple_list(b"rhg", b"ignored-extensions")
601 if let Some(ignored_list) = config.get_list(b"rhg", b"ignored-extensions")
572 602 {
573 603 for ignored in ignored_list {
574 unsupported.remove(ignored);
604 unsupported.remove(ignored.as_slice());
575 605 }
576 606 }
577 607
@@ -1,5 +1,6 b''
1 1 use format_bytes::format_bytes;
2 2 use std::borrow::Cow;
3 use std::env;
3 4 use std::io;
4 5 use std::io::{ErrorKind, Write};
5 6
@@ -49,6 +50,25 b' impl Ui {'
49 50
50 51 stderr.flush().or_else(handle_stderr_error)
51 52 }
53
54 /// is plain mode active
55 ///
56 /// Plain mode means that all configuration variables which affect
57 /// the behavior and output of Mercurial should be
58 /// ignored. Additionally, the output should be stable,
59 /// reproducible and suitable for use in scripts or applications.
60 ///
61 /// The only way to trigger plain mode is by setting either the
62 /// `HGPLAIN' or `HGPLAINEXCEPT' environment variables.
63 ///
64 /// The return value can either be
65 /// - False if HGPLAIN is not set, or feature is in HGPLAINEXCEPT
66 /// - False if feature is disabled by default and not included in HGPLAIN
67 /// - True otherwise
68 pub fn plain(&self) -> bool {
69 // TODO: add support for HGPLAINEXCEPT
70 env::var_os("HGPLAIN").is_some()
71 }
52 72 }
53 73
54 74 /// A buffered stdout writer for faster batch printing operations.
@@ -1428,12 +1428,9 b' class RustExtension(Extension):'
1428 1428
1429 1429 rusttargetdir = os.path.join('rust', 'target', 'release')
1430 1430
1431 def __init__(
1432 self, mpath, sources, rustlibname, subcrate, py3_features=None, **kw
1433 ):
1431 def __init__(self, mpath, sources, rustlibname, subcrate, **kw):
1434 1432 Extension.__init__(self, mpath, sources, **kw)
1435 1433 srcdir = self.rustsrcdir = os.path.join('rust', subcrate)
1436 self.py3_features = py3_features
1437 1434
1438 1435 # adding Rust source and control files to depends so that the extension
1439 1436 # gets rebuilt if they've changed
@@ -1481,9 +1478,11 b' class RustExtension(Extension):'
1481 1478
1482 1479 feature_flags = []
1483 1480
1484 if sys.version_info[0] == 3 and self.py3_features is not None:
1485 feature_flags.append(self.py3_features)
1486 cargocmd.append('--no-default-features')
1481 cargocmd.append('--no-default-features')
1482 if sys.version_info[0] == 2:
1483 feature_flags.append('python27')
1484 elif sys.version_info[0] == 3:
1485 feature_flags.append('python3')
1487 1486
1488 1487 rust_features = env.get("HG_RUST_FEATURES")
1489 1488 if rust_features:
@@ -1605,7 +1604,9 b' extmodules = ['
1605 1604 extra_compile_args=common_cflags,
1606 1605 ),
1607 1606 RustStandaloneExtension(
1608 'mercurial.rustext', 'hg-cpython', 'librusthg', py3_features='python3'
1607 'mercurial.rustext',
1608 'hg-cpython',
1609 'librusthg',
1609 1610 ),
1610 1611 ]
1611 1612
@@ -4,6 +4,7 b' from __future__ import absolute_import'
4 4
5 5 from mercurial import (
6 6 error,
7 logcmdutil,
7 8 patch,
8 9 pycompat,
9 10 registrar,
@@ -49,7 +50,7 b' def autodiff(ui, repo, *pats, **opts):'
49 50 else:
50 51 raise error.Abort(b'--git must be yes, no or auto')
51 52
52 ctx1, ctx2 = scmutil.revpair(repo, [])
53 ctx1, ctx2 = logcmdutil.revpair(repo, [])
53 54 m = scmutil.match(ctx2, pats, opts)
54 55 it = patch.diff(
55 56 repo,
@@ -15,6 +15,7 b' from mercurial import ('
15 15 policy,
16 16 registrar,
17 17 )
18 from mercurial.dirstateutils import timestamp
18 19 from mercurial.utils import dateutil
19 20
20 21 try:
@@ -34,15 +35,14 b' configitem('
34 35 )
35 36
36 37 parsers = policy.importmod('parsers')
37 rustmod = policy.importrust('parsers')
38 has_rust_dirstate = policy.importrust('dirstate') is not None
38 39
39 40
40 41 def pack_dirstate(fakenow, orig, dmap, copymap, pl, now):
41 42 # execute what original parsers.pack_dirstate should do actually
42 43 # for consistency
43 actualnow = int(now)
44 44 for f, e in dmap.items():
45 if e.need_delay(actualnow):
45 if e.need_delay(now):
46 46 e.set_possibly_dirty()
47 47
48 48 return orig(dmap, copymap, pl, fakenow)
@@ -62,8 +62,9 b' def fakewrite(ui, func):'
62 62 # parsing 'fakenow' in YYYYmmddHHMM format makes comparison between
63 63 # 'fakenow' value and 'touch -t YYYYmmddHHMM' argument easy
64 64 fakenow = dateutil.parsedate(fakenow, [b'%Y%m%d%H%M'])[0]
65 fakenow = timestamp.timestamp((fakenow, 0))
65 66
66 if rustmod is not None:
67 if has_rust_dirstate:
67 68 # The Rust implementation does not use public parse/pack dirstate
68 69 # to prevent conversion round-trips
69 70 orig_dirstatemap_write = dirstatemapmod.dirstatemap.write
@@ -85,7 +86,7 b' def fakewrite(ui, func):'
85 86 finally:
86 87 orig_module.pack_dirstate = orig_pack_dirstate
87 88 dirstate._getfsnow = orig_dirstate_getfsnow
88 if rustmod is not None:
89 if has_rust_dirstate:
89 90 dirstatemapmod.dirstatemap.write = orig_dirstatemap_write
90 91
91 92
@@ -14,8 +14,6 b' setupcommon() {'
14 14 cat >> $HGRCPATH << EOF
15 15 [extensions]
16 16 infinitepush=
17 [ui]
18 ssh = "$PYTHON" "$TESTDIR/dummyssh"
19 17 [infinitepush]
20 18 branchpattern=re:scratch/.*
21 19 EOF
@@ -1,8 +1,6 b''
1 1 cat >> $HGRCPATH <<EOF
2 2 [extensions]
3 3 narrow=
4 [ui]
5 ssh="$PYTHON" "$RUNTESTDIR/dummyssh"
6 4 [experimental]
7 5 changegroup3 = True
8 6 EOF
@@ -7,8 +7,6 b' debug=True'
7 7 remotefilelog=
8 8 rebase=
9 9 strip=
10 [ui]
11 ssh="$PYTHON" "$TESTDIR/dummyssh"
12 10 [server]
13 11 preferuncompressed=True
14 12 [experimental]
@@ -1554,6 +1554,8 b' class Test(unittest.TestCase):'
1554 1554 hgrc.write(b'merge = internal:merge\n')
1555 1555 hgrc.write(b'mergemarkers = detailed\n')
1556 1556 hgrc.write(b'promptecho = True\n')
1557 dummyssh = os.path.join(self._testdir, b'dummyssh')
1558 hgrc.write(b'ssh = "%s" "%s"\n' % (PYTHON, dummyssh))
1557 1559 hgrc.write(b'timeout.warn=15\n')
1558 1560 hgrc.write(b'[chgserver]\n')
1559 1561 hgrc.write(b'idletimeout=60\n')
@@ -665,20 +665,24 b' def issimplestorefile(f, kind, st):'
665 665
666 666
667 667 class simplestore(store.encodedstore):
668 def datafiles(self):
668 def datafiles(self, undecodable=None):
669 669 for x in super(simplestore, self).datafiles():
670 670 yield x
671 671
672 672 # Supplement with non-revlog files.
673 673 extrafiles = self._walk('data', True, filefilter=issimplestorefile)
674 674
675 for unencoded, encoded, size in extrafiles:
675 for f1, size in extrafiles:
676 676 try:
677 unencoded = store.decodefilename(unencoded)
677 f2 = store.decodefilename(f1)
678 678 except KeyError:
679 unencoded = None
679 if undecodable is None:
680 raise error.StorageError(b'undecodable revlog name %s' % f1)
681 else:
682 undecodable.append(f1)
683 continue
680 684
681 yield unencoded, encoded, size
685 yield f2, size
682 686
683 687
684 688 def reposetup(ui, repo):
@@ -131,13 +131,13 b' should all fail'
131 131
132 132 $ hg addremove -s foo
133 133 abort: similarity must be a number
134 [255]
134 [10]
135 135 $ hg addremove -s -1
136 136 abort: similarity must be between 0 and 100
137 [255]
137 [10]
138 138 $ hg addremove -s 1e6
139 139 abort: similarity must be between 0 and 100
140 [255]
140 [10]
141 141
142 142 $ cd ..
143 143
@@ -455,7 +455,7 b' missing file'
455 455
456 456 $ hg ann nosuchfile
457 457 abort: nosuchfile: no such file in rev e9e6b4fa872f
458 [255]
458 [10]
459 459
460 460 annotate file without '\n' on last line
461 461
@@ -6,7 +6,7 b' Create a repository:'
6 6 devel.all-warnings=true
7 7 devel.default-date=0 0
8 8 extensions.fsmonitor= (fsmonitor !)
9 format.exp-dirstate-v2=1 (dirstate-v2 !)
9 format.exp-rc-dirstate-v2=1 (dirstate-v2 !)
10 10 largefiles.usercache=$TESTTMP/.cache/largefiles
11 11 lfs.usercache=$TESTTMP/.cache/lfs
12 12 ui.slash=True
@@ -15,6 +15,7 b' Create a repository:'
15 15 ui.merge=internal:merge
16 16 ui.mergemarkers=detailed
17 17 ui.promptecho=True
18 ui.ssh=* (glob)
18 19 ui.timeout.warn=15
19 20 web.address=localhost
20 21 web\.ipv6=(?:True|False) (re)
@@ -54,12 +55,13 b' Writes to stdio succeed and fail appropr'
54 55 On Python 3, stdio may be None:
55 56
56 57 $ hg debuguiprompt --config ui.interactive=true 0<&-
57 abort: Bad file descriptor
58 abort: Bad file descriptor (no-rhg !)
59 abort: response expected (rhg !)
58 60 [255]
59 61 $ hg version -q 0<&-
60 62 Mercurial Distributed SCM * (glob)
61 63
62 #if py3
64 #if py3 no-rhg
63 65 $ hg version -q 1>&-
64 66 abort: Bad file descriptor
65 67 [255]
@@ -214,14 +214,11 b' class remotething(thing):'
214 214 mangle(two),
215 215 ),
216 216 ]
217 encoded_res_future = wireprotov1peer.future()
218 yield encoded_args, encoded_res_future
219 yield unmangle(encoded_res_future.value)
217 return encoded_args, unmangle
220 218
221 219 @wireprotov1peer.batchable
222 220 def bar(self, b, a):
223 encresref = wireprotov1peer.future()
224 yield [
221 return [
225 222 (
226 223 b'b',
227 224 mangle(b),
@@ -230,8 +227,7 b' class remotething(thing):'
230 227 b'a',
231 228 mangle(a),
232 229 ),
233 ], encresref
234 yield unmangle(encresref.value)
230 ], unmangle
235 231
236 232 # greet is coded directly. It therefore does not support batching. If it
237 233 # does appear in a batch, the batch is split around greet, and the call to
@@ -12,16 +12,6 b' The data from the bookmark file are filt'
12 12 node known to the changelog. If the cache invalidation between these two bits
13 13 goes wrong, bookmark can be dropped.
14 14
15 global setup
16 ------------
17
18 $ cat >> $HGRCPATH << EOF
19 > [ui]
20 > ssh = "$PYTHON" "$TESTDIR/dummyssh"
21 > [server]
22 > concurrent-push-mode=check-related
23 > EOF
24
25 15 Setup
26 16 -----
27 17
@@ -490,6 +490,65 b' divergent bookmarks'
490 490 Y 0:4e3505fd9583
491 491 Z 1:0d2164f0ce0d
492 492
493 mirroring bookmarks
494
495 $ hg book
496 @ 1:9b140be10808
497 @foo 2:0d2164f0ce0d
498 X 1:9b140be10808
499 X@foo 2:0d2164f0ce0d
500 Y 0:4e3505fd9583
501 Z 2:0d2164f0ce0d
502 foo -1:000000000000
503 * foobar 1:9b140be10808
504 $ cp .hg/bookmarks .hg/bookmarks.bak
505 $ hg book -d X
506 $ hg incoming --bookmark -v ../a
507 comparing with ../a
508 searching for changed bookmarks
509 @ 0d2164f0ce0d diverged
510 X 0d2164f0ce0d added
511 $ hg incoming --bookmark -v ../a --config 'paths.*:bookmarks.mode=babar'
512 (paths.*:bookmarks.mode has unknown value: "babar")
513 comparing with ../a
514 searching for changed bookmarks
515 @ 0d2164f0ce0d diverged
516 X 0d2164f0ce0d added
517 $ hg incoming --bookmark -v ../a --config 'paths.*:bookmarks.mode=mirror'
518 comparing with ../a
519 searching for changed bookmarks
520 @ 0d2164f0ce0d changed
521 @foo 000000000000 removed
522 X 0d2164f0ce0d added
523 X@foo 000000000000 removed
524 foo 000000000000 removed
525 foobar 000000000000 removed
526 $ hg incoming --bookmark -v ../a --config 'paths.*:bookmarks.mode=ignore'
527 comparing with ../a
528 bookmarks exchange disabled with this path
529 $ hg pull ../a --config 'paths.*:bookmarks.mode=ignore'
530 pulling from ../a
531 searching for changes
532 no changes found
533 $ hg book
534 @ 1:9b140be10808
535 @foo 2:0d2164f0ce0d
536 X@foo 2:0d2164f0ce0d
537 Y 0:4e3505fd9583
538 Z 2:0d2164f0ce0d
539 foo -1:000000000000
540 * foobar 1:9b140be10808
541 $ hg pull ../a --config 'paths.*:bookmarks.mode=mirror'
542 pulling from ../a
543 searching for changes
544 no changes found
545 $ hg book
546 @ 2:0d2164f0ce0d
547 X 2:0d2164f0ce0d
548 Y 0:4e3505fd9583
549 Z 2:0d2164f0ce0d
550 $ mv .hg/bookmarks.bak .hg/bookmarks
551
493 552 explicit pull should overwrite the local version (issue4439)
494 553
495 554 $ hg update -r X
@@ -1142,8 +1201,6 b' Check hook preventing push (issue4455)'
1142 1201 > local=../issue4455-dest/
1143 1202 > ssh=ssh://user@dummy/issue4455-dest
1144 1203 > http=http://localhost:$HGPORT/
1145 > [ui]
1146 > ssh="$PYTHON" "$TESTDIR/dummyssh"
1147 1204 > EOF
1148 1205 $ cat >> ../issue4455-dest/.hg/hgrc << EOF
1149 1206 > [hooks]
@@ -1270,7 +1327,6 b' Test that pre-pushkey compat for bookmar'
1270 1327
1271 1328 $ cat << EOF >> $HGRCPATH
1272 1329 > [ui]
1273 > ssh="$PYTHON" "$TESTDIR/dummyssh"
1274 1330 > [server]
1275 1331 > bookmarks-pushkey-compat = yes
1276 1332 > EOF
@@ -185,22 +185,22 b' but "literal:." is not since "." seems n'
185 185
186 186 $ hg log -r 'bookmark("literal:.")'
187 187 abort: bookmark '.' does not exist
188 [255]
188 [10]
189 189
190 190 "." should fail if there's no active bookmark:
191 191
192 192 $ hg bookmark --inactive
193 193 $ hg log -r 'bookmark(.)'
194 194 abort: no active bookmark
195 [255]
195 [10]
196 196 $ hg log -r 'present(bookmark(.))'
197 197
198 198 $ hg log -r 'bookmark(unknown)'
199 199 abort: bookmark 'unknown' does not exist
200 [255]
200 [10]
201 201 $ hg log -r 'bookmark("literal:unknown")'
202 202 abort: bookmark 'unknown' does not exist
203 [255]
203 [10]
204 204 $ hg log -r 'bookmark("re:unknown")'
205 205 $ hg log -r 'present(bookmark("literal:unknown"))'
206 206 $ hg log -r 'present(bookmark("re:unknown"))'
@@ -147,7 +147,7 b' Changing branch of an obsoleted changese'
147 147 $ hg branch -r 4 foobar
148 148 abort: hidden revision '4' was rewritten as: 7c1991464886
149 149 (use --hidden to access hidden revisions)
150 [255]
150 [10]
151 151
152 152 $ hg branch -r 4 --hidden foobar
153 153 abort: cannot change branch of 3938acfb5c0f, as that creates content-divergence with 7c1991464886
@@ -28,8 +28,6 b' enable obsolescence'
28 28 > evolution.createmarkers=True
29 29 > evolution.exchange=True
30 30 > bundle2-output-capture=True
31 > [ui]
32 > ssh="$PYTHON" "$TESTDIR/dummyssh"
33 31 > [command-templates]
34 32 > log={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
35 33 > [web]
@@ -922,10 +920,6 b' Check abort from mandatory pushkey'
922 920
923 921 Test lazily acquiring the lock during unbundle
924 922 $ cp $TESTTMP/hgrc.orig $HGRCPATH
925 $ cat >> $HGRCPATH <<EOF
926 > [ui]
927 > ssh="$PYTHON" "$TESTDIR/dummyssh"
928 > EOF
929 923
930 924 $ cat >> $TESTTMP/locktester.py <<EOF
931 925 > import os
@@ -233,8 +233,6 b' Create an extension to test bundle2 API'
233 233 > bundle2=$TESTTMP/bundle2.py
234 234 > [experimental]
235 235 > evolution.createmarkers=True
236 > [ui]
237 > ssh="$PYTHON" "$TESTDIR/dummyssh"
238 236 > [command-templates]
239 237 > log={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
240 238 > [web]
@@ -37,7 +37,6 b''
37 37
38 38 $ cat >> $HGRCPATH <<EOF
39 39 > [ui]
40 > ssh = "$PYTHON" "$TESTDIR/dummyssh"
41 40 > username = nobody <no.reply@example.com>
42 41 >
43 42 > [alias]
@@ -94,8 +94,6 b' Start a simple HTTP server to serve bund'
94 94 $ cat dumb.pid >> $DAEMON_PIDS
95 95
96 96 $ cat >> $HGRCPATH << EOF
97 > [ui]
98 > ssh="$PYTHON" "$TESTDIR/dummyssh"
99 97 > [command-templates]
100 98 > log={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
101 99 > EOF
@@ -3,7 +3,7 b''
3 3 $ . "$TESTDIR/helpers-testrepo.sh"
4 4
5 5 $ cd "$TESTDIR"/..
6 $ RUSTFMT=$(rustup which --toolchain nightly-2020-10-04 rustfmt)
6 $ RUSTFMT=$(rustup which --toolchain nightly-2021-11-02 rustfmt)
7 7 $ for f in `testrepohg files 'glob:**/*.rs'` ; do
8 8 > $RUSTFMT --check --edition=2018 --unstable-features --color=never $f
9 9 > done
1 NO CONTENT: file renamed from tests/test-clone-uncompressed.t to tests/test-clone-stream.t
@@ -1125,7 +1125,7 b" Test that auto sharing doesn't cause fai"
1125 1125 $ hg id -R remote -r 0
1126 1126 abort: repository remote not found
1127 1127 [255]
1128 $ hg --config share.pool=share -q clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" a ssh://user@dummy/remote
1128 $ hg --config share.pool=share -q clone a ssh://user@dummy/remote
1129 1129 $ hg -R remote id -r 0
1130 1130 acb14030fe0a
1131 1131
@@ -208,7 +208,7 b' by old clients.'
208 208
209 209 Feature works over SSH
210 210
211 $ hg clone -U -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/server ssh-full-clone
211 $ hg clone -U ssh://user@dummy/server ssh-full-clone
212 212 applying clone bundle from http://localhost:$HGPORT1/full.hg
213 213 adding changesets
214 214 adding manifests
@@ -101,7 +101,7 b' typical client does not want echo-back m'
101 101 000000000000 tip
102 102 *** runcommand id -runknown
103 103 abort: unknown revision 'unknown'
104 [255]
104 [10]
105 105
106 106 >>> from hgclient import bprint, check, readchannel
107 107 >>> @check
@@ -218,7 +218,7 b' check that local configs for the cached '
218 218 devel.all-warnings=true
219 219 devel.default-date=0 0
220 220 extensions.fsmonitor= (fsmonitor !)
221 format.exp-dirstate-v2=1 (dirstate-v2 !)
221 format.exp-rc-dirstate-v2=1 (dirstate-v2 !)
222 222 largefiles.usercache=$TESTTMP/.cache/largefiles
223 223 lfs.usercache=$TESTTMP/.cache/lfs
224 224 ui.slash=True
@@ -226,6 +226,7 b' check that local configs for the cached '
226 226 ui.detailed-exit-code=True
227 227 ui.merge=internal:merge
228 228 ui.mergemarkers=detailed
229 ui.ssh=* (glob)
229 230 ui.timeout.warn=15
230 231 ui.foo=bar
231 232 ui.nontty=true
@@ -239,6 +240,7 b' check that local configs for the cached '
239 240 ui.detailed-exit-code=True
240 241 ui.merge=internal:merge
241 242 ui.mergemarkers=detailed
243 ui.ssh=* (glob)
242 244 ui.timeout.warn=15
243 245 ui.nontty=true
244 246 #endif
@@ -316,7 +316,7 b' Show all commands + options'
316 316 debugpushkey:
317 317 debugpvec:
318 318 debugrebuilddirstate: rev, minimal
319 debugrebuildfncache:
319 debugrebuildfncache: only-data
320 320 debugrename: rev
321 321 debugrequires:
322 322 debugrevlog: changelog, manifest, dir, dump
@@ -413,7 +413,7 b' Listing all config options'
413 413
414 414 The feature is experimental and behavior may varies. This test exists to make sure the code is run. We grep it to avoid too much variability in its current experimental state.
415 415
416 $ hg config --exp-all-known | grep commit
416 $ hg config --exp-all-known | grep commit | grep -v ssh
417 417 commands.commit.interactive.git=False
418 418 commands.commit.interactive.ignoreblanklines=False
419 419 commands.commit.interactive.ignorews=False
@@ -50,7 +50,7 b' Remove the directory, then try to replac'
50 50 $ echo a >> a
51 51 $ commit -a -m t4.2
52 52 $ git checkout master >/dev/null 2>/dev/null
53 $ git pull --no-commit . other > /dev/null 2>/dev/null
53 $ git pull --no-commit . other --no-rebase > /dev/null 2>/dev/null
54 54 $ commit -m 'Merge branch other'
55 55 $ cd ..
56 56 $ hg convert --config extensions.progress= --config progress.assume-tty=1 \
@@ -137,7 +137,7 b' Remove the directory, then try to replac'
137 137 $ git add baz
138 138 $ commit -a -m 'add baz'
139 139 $ git checkout master >/dev/null 2>/dev/null
140 $ git pull --no-commit . Bar Baz > /dev/null 2>/dev/null
140 $ git pull --no-commit . Bar Baz --no-rebase > /dev/null 2>/dev/null
141 141 $ commit -m 'Octopus merge'
142 142 $ echo bar >> bar
143 143 $ commit -a -m 'change bar'
@@ -145,7 +145,7 b' Remove the directory, then try to replac'
145 145 $ echo >> foo
146 146 $ commit -a -m 'change foo'
147 147 $ git checkout master >/dev/null 2>/dev/null
148 $ git pull --no-commit -s ours . Foo > /dev/null 2>/dev/null
148 $ git pull --no-commit -s ours . Foo --no-rebase > /dev/null 2>/dev/null
149 149 $ commit -m 'Discard change to foo'
150 150 $ cd ..
151 151 $ glog()
@@ -644,14 +644,13 b' Test debugcapabilities command:'
644 644
645 645 Test debugpeer
646 646
647 $ hg --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" debugpeer ssh://user@dummy/debugrevlog
647 $ hg debugpeer ssh://user@dummy/debugrevlog
648 648 url: ssh://user@dummy/debugrevlog
649 649 local: no
650 650 pushable: yes
651 651
652 $ hg --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" --debug debugpeer ssh://user@dummy/debugrevlog
653 running "*" "*/tests/dummyssh" 'user@dummy' 'hg -R debugrevlog serve --stdio' (glob) (no-windows !)
654 running "*" "*\tests/dummyssh" "user@dummy" "hg -R debugrevlog serve --stdio" (glob) (windows !)
652 $ hg --debug debugpeer ssh://user@dummy/debugrevlog
653 running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R debugrevlog serve --stdio['"] (re)
655 654 devel-peer-request: hello+between
656 655 devel-peer-request: pairs: 81 bytes
657 656 sending hello command
@@ -119,7 +119,7 b' as pairs even if x == y, but not for "f('
119 119 +wdir
120 120 $ hg diff -r "2 and 1"
121 121 abort: empty revision range
122 [255]
122 [10]
123 123
124 124 $ cd ..
125 125
@@ -42,7 +42,7 b' Testing with rev number'
42 42 $ hg exp 2 --config experimental.directaccess.revnums=False
43 43 abort: hidden revision '2' was rewritten as: 2443a0e66469
44 44 (use --hidden to access hidden revisions)
45 [255]
45 [10]
46 46
47 47 $ hg exp 2
48 48 # HG changeset patch
@@ -75,7 +75,7 b' Testing with rev number'
75 75 $ hg status --change 2 --config experimental.directaccess.revnums=False
76 76 abort: hidden revision '2' was rewritten as: 2443a0e66469
77 77 (use --hidden to access hidden revisions)
78 [255]
78 [10]
79 79
80 80 $ hg diff -c 2
81 81 diff -r 29becc82797a -r 28ad74487de9 c
@@ -197,12 +197,12 b' Commands with undefined intent should no'
197 197 $ hg phase -r 28ad74
198 198 abort: hidden revision '28ad74' was rewritten as: 2443a0e66469
199 199 (use --hidden to access hidden revisions)
200 [255]
200 [10]
201 201
202 202 $ hg phase -r 2
203 203 abort: hidden revision '2' was rewritten as: 2443a0e66469
204 204 (use --hidden to access hidden revisions)
205 [255]
205 [10]
206 206
207 207 Setting a bookmark will make that changeset unhidden, so this should come in end
208 208
@@ -13,13 +13,13 b' class dirstests(unittest.TestCase):'
13 13 (b'a/a/a', [b'a', b'a/a', b'']),
14 14 (b'alpha/beta/gamma', [b'', b'alpha', b'alpha/beta']),
15 15 ]:
16 d = pathutil.dirs({})
16 d = pathutil.dirs([])
17 17 d.addpath(case)
18 18 self.assertEqual(sorted(d), sorted(want))
19 19
20 20 def testinvalid(self):
21 21 with self.assertRaises(ValueError):
22 d = pathutil.dirs({})
22 d = pathutil.dirs([])
23 23 d.addpath(b'a//b')
24 24
25 25
@@ -1,15 +1,12 b''
1 #testcases dirstate-v1 dirstate-v1-tree dirstate-v2
2
3 #if dirstate-v1-tree
4 #require rust
5 $ echo '[experimental]' >> $HGRCPATH
6 $ echo 'dirstate-tree.in-memory=1' >> $HGRCPATH
7 #endif
1 #testcases dirstate-v1 dirstate-v2
8 2
9 3 #if dirstate-v2
10 #require rust
11 $ echo '[format]' >> $HGRCPATH
12 $ echo 'exp-dirstate-v2=1' >> $HGRCPATH
4 $ cat >> $HGRCPATH << EOF
5 > [format]
6 > exp-rc-dirstate-v2=1
7 > [storage]
8 > dirstate-v2.slow-path=allow
9 > EOF
13 10 #endif
14 11
15 12 $ hg init repo
@@ -1,15 +1,12 b''
1 #testcases dirstate-v1 dirstate-v1-tree dirstate-v2
2
3 #if dirstate-v1-tree
4 #require rust
5 $ echo '[experimental]' >> $HGRCPATH
6 $ echo 'dirstate-tree.in-memory=1' >> $HGRCPATH
7 #endif
1 #testcases dirstate-v1 dirstate-v2
8 2
9 3 #if dirstate-v2
10 #require rust
11 $ echo '[format]' >> $HGRCPATH
12 $ echo 'exp-dirstate-v2=1' >> $HGRCPATH
4 $ cat >> $HGRCPATH << EOF
5 > [format]
6 > exp-rc-dirstate-v2=1
7 > [storage]
8 > dirstate-v2.slow-path=allow
9 > EOF
13 10 #endif
14 11
15 12 Checking the size/permissions/file-type of files stored in the
@@ -1,15 +1,12 b''
1 #testcases dirstate-v1 dirstate-v1-tree dirstate-v2
2
3 #if dirstate-v1-tree
4 #require rust
5 $ echo '[experimental]' >> $HGRCPATH
6 $ echo 'dirstate-tree.in-memory=1' >> $HGRCPATH
7 #endif
1 #testcases dirstate-v1 dirstate-v2
8 2
9 3 #if dirstate-v2
10 #require rust
11 $ echo '[format]' >> $HGRCPATH
12 $ echo 'exp-dirstate-v2=1' >> $HGRCPATH
4 $ cat >> $HGRCPATH << EOF
5 > [format]
6 > exp-rc-dirstate-v2=1
7 > [storage]
8 > dirstate-v2.slow-path=allow
9 > EOF
13 10 #endif
14 11
15 12 ------ Test dirstate._dirs refcounting
@@ -59,13 +56,13 b' Prepare test repo:'
59 56
60 57 Set mtime of a into the future:
61 58
62 $ touch -t 202101011200 a
59 $ touch -t 203101011200 a
63 60
64 61 Status must not set a's entry to unset (issue1790):
65 62
66 63 $ hg status
67 64 $ hg debugstate
68 n 644 2 2021-01-01 12:00:00 a
65 n 644 2 2031-01-01 12:00:00 a
69 66
70 67 Test modulo storage/comparison of absurd dates:
71 68
@@ -370,7 +370,7 b' Catch exporting unknown revisions (espec'
370 370 [10]
371 371 $ hg export 999
372 372 abort: unknown revision '999'
373 [255]
373 [10]
374 374 $ hg export "not all()"
375 375 abort: export requires at least one changeset
376 376 [10]
@@ -87,7 +87,7 b' Specifying an empty revision should abor'
87 87
88 88 $ hg extdiff -p diff --patch --rev 'ancestor()' --rev 1
89 89 abort: empty revision on one side of range
90 [255]
90 [10]
91 91
92 92 Test diff during merge:
93 93
@@ -1692,6 +1692,26 b' Can load minimum version identical to cu'
1692 1692 $ hg --config extensions.minversion=minversion3.py version 2>&1 | egrep '\(third'
1693 1693 [1]
1694 1694
1695 Don't explode on py3 with a bad version number (both str vs bytes, and not enough
1696 parts)
1697
1698 $ cat > minversion4.py << EOF
1699 > from mercurial import util
1700 > util.version = lambda: b'3.5'
1701 > minimumhgversion = '3'
1702 > EOF
1703 $ hg --config extensions.minversion=minversion4.py version -v
1704 Mercurial Distributed SCM (version 3.5)
1705 (see https://mercurial-scm.org for more information)
1706
1707 Copyright (C) 2005-* Olivia Mackall and others (glob)
1708 This is free software; see the source for copying conditions. There is NO
1709 warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
1710
1711 Enabled extensions:
1712
1713 minversion external
1714
1695 1715 Restore HGRCPATH
1696 1716
1697 1717 $ HGRCPATH=$ORGHGRCPATH
@@ -458,7 +458,7 b' missing file'
458 458
459 459 $ hg ann nosuchfile
460 460 abort: nosuchfile: no such file in rev e9e6b4fa872f
461 [255]
461 [10]
462 462
463 463 annotate file without '\n' on last line
464 464
@@ -1,6 +1,4 b''
1 1 $ cat >> $HGRCPATH << EOF
2 > [ui]
3 > ssh = "$PYTHON" "$TESTDIR/dummyssh"
4 2 > [extensions]
5 3 > fastannotate=
6 4 > [fastannotate]
@@ -1752,3 +1752,101 b' middle of fix.'
1752 1752 r0.whole:
1753 1753 hello
1754 1754
1755
1756 We should execute the fixer tools as few times as possible, because they might
1757 be slow or expensive to execute. The inputs to each execution are effectively
1758 the file path, file content, and line ranges. So, we should be able to re-use
1759 results whenever those inputs are repeated. That saves a lot of work when
1760 fixing chains of commits that all have the same file revision for a path being
1761 fixed.
1762
1763 $ hg init numberofinvocations
1764 $ cd numberofinvocations
1765
1766 $ printf "bar1" > bar.log
1767 $ printf "baz1" > baz.log
1768 $ printf "foo1" > foo.log
1769 $ printf "qux1" > qux.log
1770 $ hg commit -Aqm "commit1"
1771
1772 $ printf "bar2" > bar.log
1773 $ printf "baz2" > baz.log
1774 $ printf "foo2" > foo.log
1775 $ hg commit -Aqm "commit2"
1776
1777 $ printf "bar3" > bar.log
1778 $ printf "baz3" > baz.log
1779 $ hg commit -Aqm "commit3"
1780
1781 $ printf "bar4" > bar.log
1782
1783 $ LOGFILE=$TESTTMP/log
1784 $ LOGGER=$TESTTMP/log.py
1785 $ cat >> $LOGGER <<EOF
1786 > # Appends the input file's name to the log file.
1787 > import sys
1788 > with open(r'$LOGFILE', 'a') as f:
1789 > f.write(sys.argv[1] + '\n')
1790 > sys.stdout.write(sys.stdin.read())
1791 > EOF
1792
1793 $ hg fix --working-dir -r "all()" \
1794 > --config "fix.log:command=\"$PYTHON\" \"$LOGGER\" {rootpath}" \
1795 > --config "fix.log:pattern=glob:**.log"
1796
1797 $ cat $LOGFILE | sort | uniq -c
1798 4 bar.log
1799 4 baz.log
1800 3 foo.log
1801 2 qux.log
1802
1803 $ cd ..
1804
1805 For tools that support line ranges, it's wrong to blindly re-use fixed file
1806 content for the same file revision if it appears twice with different baserevs,
1807 because the line ranges could be different. Since computing line ranges is
1808 ambiguous, this isn't a matter of correctness, but it affects the usability of
1809 this extension. It could maybe be simpler if baserevs were computed on a
1810 per-file basis to make this situation impossible to construct.
1811
1812 In the following example, we construct two subgraphs with the same file
1813 revisions, and fix different sub-subgraphs to get different baserevs and
1814 different changed line ranges. The key precondition is that revisions 1 and 4
1815 have the same file revision, and the key result is that their successors don't
1816 have the same file content, because we want to fix different areas of that same
1817 file revision's content.
1818
1819 $ hg init differentlineranges
1820 $ cd differentlineranges
1821
1822 $ printf "a\nb\n" > file.changed
1823 $ hg commit -Aqm "0 ab"
1824 $ printf "a\nx\n" > file.changed
1825 $ hg commit -Aqm "1 ax"
1826 $ hg remove file.changed
1827 $ hg commit -Aqm "2 removed"
1828 $ hg revert file.changed -r 0
1829 $ hg commit -Aqm "3 ab (reverted)"
1830 $ hg revert file.changed -r 1
1831 $ hg commit -Aqm "4 ax (reverted)"
1832
1833 $ hg manifest --debug --template "{hash}\n" -r 0; \
1834 > hg manifest --debug --template "{hash}\n" -r 3
1835 418f692145676128d2fb518b027ddbac624be76e
1836 418f692145676128d2fb518b027ddbac624be76e
1837 $ hg manifest --debug --template "{hash}\n" -r 1; \
1838 > hg manifest --debug --template "{hash}\n" -r 4
1839 09b8b3ce5a507caaa282f7262679e6d04091426c
1840 09b8b3ce5a507caaa282f7262679e6d04091426c
1841
1842 $ hg fix --working-dir -r 1+3+4
1843 3 new orphan changesets
1844
1845 $ hg cat file.changed -r "successors(1)" --hidden
1846 a
1847 X
1848 $ hg cat file.changed -r "successors(4)" --hidden
1849 A
1850 X
1851
1852 $ cd ..
@@ -1121,6 +1121,7 b' internals topic renders index of availab'
1121 1121 censor Censor
1122 1122 changegroups Changegroups
1123 1123 config Config Registrar
1124 dirstate-v2 dirstate-v2 file format
1124 1125 extensions Extension API
1125 1126 mergestate Mergestate
1126 1127 requirements Repository Requirements
@@ -1899,6 +1900,17 b' Test section lookup'
1899 1900 Revsets specifying bookmarks will not result in the bookmark being
1900 1901 pushed.
1901 1902
1903 "bookmarks.mode"
1904 How bookmark will be dealt during the exchange. It support the following
1905 value
1906
1907 - "default": the default behavior, local and remote bookmarks are
1908 "merged" on push/pull.
1909 - "mirror": when pulling, replace local bookmarks by remote bookmarks.
1910 This is useful to replicate a repository, or as an optimization.
1911 - "ignore": ignore bookmarks during exchange. (This currently only
1912 affect pulling)
1913
1902 1914 The following special named paths exist:
1903 1915
1904 1916 "default"
@@ -3566,6 +3578,13 b' Sub-topic indexes rendered properly'
3566 3578 Config Registrar
3567 3579 </td></tr>
3568 3580 <tr><td>
3581 <a href="/help/internals.dirstate-v2">
3582 dirstate-v2
3583 </a>
3584 </td><td>
3585 dirstate-v2 file format
3586 </td></tr>
3587 <tr><td>
3569 3588 <a href="/help/internals.extensions">
3570 3589 extensions
3571 3590 </a>
@@ -1,15 +1,12 b''
1 #testcases dirstate-v1 dirstate-v1-tree dirstate-v2
2
3 #if dirstate-v1-tree
4 #require rust
5 $ echo '[experimental]' >> $HGRCPATH
6 $ echo 'dirstate-tree.in-memory=1' >> $HGRCPATH
7 #endif
1 #testcases dirstate-v1 dirstate-v2
8 2
9 3 #if dirstate-v2
10 #require rust
11 $ echo '[format]' >> $HGRCPATH
12 $ echo 'exp-dirstate-v2=1' >> $HGRCPATH
4 $ cat >> $HGRCPATH << EOF
5 > [format]
6 > exp-rc-dirstate-v2=1
7 > [storage]
8 > dirstate-v2.slow-path=allow
9 > EOF
13 10 #endif
14 11
15 12 $ hg init ignorerepo
@@ -403,9 +400,10 b' Windows paths are accepted on input'
403 400
404 401 #endif
405 402
406 #if dirstate-v2
403 #if dirstate-v2 rust
407 404
408 405 Check the hash of ignore patterns written in the dirstate
406 This is an optimization that is only relevant when using the Rust extensions
409 407
410 408 $ hg status > /dev/null
411 409 $ cat .hg/testhgignore .hg/testhgignorerel .hgignore dir2/.hgignore dir1/.hgignore dir1/.hgignoretwo | $TESTDIR/f --sha1
@@ -93,7 +93,7 b' Run on a revision not ancestors of the c'
93 93 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
94 94 $ hg histedit -r 4
95 95 abort: 08d98a8350f3 is not an ancestor of working directory
96 [255]
96 [10]
97 97 $ hg up --quiet
98 98
99 99
@@ -290,7 +290,7 b' short hash. This tests issue3893.'
290 290 created new head
291 291 $ hg histedit -r 'heads(all())'
292 292 abort: The specified revisions must have exactly one common root
293 [255]
293 [10]
294 294
295 295 Test that trimming description using multi-byte characters
296 296 --------------------------------------------------------------------
@@ -552,5 +552,5 b' warn the user on editing tagged commits'
552 552 do you want to continue (yN)? n
553 553 abort: histedit cancelled
554 554
555 [255]
555 [250]
556 556 $ cd ..
@@ -160,7 +160,7 b' even prompt the user for rules, sidestep'
160 160 $ hg histedit e860deea161a
161 161 c: untracked file differs
162 162 abort: untracked files in working directory conflict with files in 055a42cdd887
163 [255]
163 [20]
164 164
165 165 We should have detected the collision early enough we're not in a
166 166 histedit state, and p1 is unchanged.
@@ -508,7 +508,7 b' Note that there is a few reordering in t'
508 508 $ hg ci -m 'modify wat'
509 509 $ hg histedit 050280826e04
510 510 abort: cannot edit history that contains merges
511 [255]
511 [20]
512 512 $ cd ..
513 513
514 514 Check abort behavior
@@ -134,7 +134,7 b' test to check number of roots in outgoin'
134 134 $ HGEDITOR=cat hg -q histedit --outgoing '../r'
135 135 abort: there are ambiguous outgoing revisions
136 136 (see 'hg help histedit' for more detail)
137 [255]
137 [20]
138 138
139 139 $ hg -q update -C 2
140 140 $ echo aa >> a
@@ -151,6 +151,6 b' test to check number of roots in outgoin'
151 151 $ HGEDITOR=cat hg -q histedit --outgoing '../r#default'
152 152 abort: there are ambiguous outgoing revisions
153 153 (see 'hg help histedit' for more detail)
154 [255]
154 [20]
155 155
156 156 $ cd ..
@@ -9,8 +9,6 b' Setup'
9 9
10 10 $ . "$TESTDIR/library-infinitepush.sh"
11 11 $ cat >> $HGRCPATH <<EOF
12 > [ui]
13 > ssh = "$PYTHON" "$TESTDIR/dummyssh"
14 12 > [alias]
15 13 > glog = log -GT "{rev}:{node|short} {desc}\n{phase}"
16 14 > EOF
@@ -19,7 +19,7 b" creating 'local'"
19 19 store created
20 20 00changelog.i created
21 21 dotencode
22 exp-dirstate-v2 (dirstate-v2 !)
22 exp-rc-dirstate-v2 (dirstate-v2 !)
23 23 fncache
24 24 generaldelta
25 25 persistent-nodemap (rust !)
@@ -61,7 +61,7 b' creating repo with format.usestore=false'
61 61
62 62 $ hg --config format.usestore=false init old
63 63 $ checknewrepo old
64 exp-dirstate-v2 (dirstate-v2 !)
64 exp-rc-dirstate-v2 (dirstate-v2 !)
65 65 generaldelta
66 66 persistent-nodemap (rust !)
67 67 revlog-compression-zstd (zstd !)
@@ -75,7 +75,7 b' creating repo with format.usefncache=fal'
75 75 $ checknewrepo old2
76 76 store created
77 77 00changelog.i created
78 exp-dirstate-v2 (dirstate-v2 !)
78 exp-rc-dirstate-v2 (dirstate-v2 !)
79 79 generaldelta
80 80 persistent-nodemap (rust !)
81 81 revlog-compression-zstd (zstd !)
@@ -90,7 +90,7 b' creating repo with format.dotencode=fals'
90 90 $ checknewrepo old3
91 91 store created
92 92 00changelog.i created
93 exp-dirstate-v2 (dirstate-v2 !)
93 exp-rc-dirstate-v2 (dirstate-v2 !)
94 94 fncache
95 95 generaldelta
96 96 persistent-nodemap (rust !)
@@ -107,7 +107,7 b' creating repo with format.dotencode=fals'
107 107 store created
108 108 00changelog.i created
109 109 dotencode
110 exp-dirstate-v2 (dirstate-v2 !)
110 exp-rc-dirstate-v2 (dirstate-v2 !)
111 111 fncache
112 112 persistent-nodemap (rust !)
113 113 revlog-compression-zstd (zstd !)
@@ -123,7 +123,7 b' test failure'
123 123
124 124 init+push to remote2
125 125
126 $ hg init -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote2
126 $ hg init ssh://user@dummy/remote2
127 127 $ hg incoming -R remote2 local
128 128 comparing with local
129 129 changeset: 0:08b9e9f63b32
@@ -133,7 +133,7 b' init+push to remote2'
133 133 summary: init
134 134
135 135
136 $ hg push -R local -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote2
136 $ hg push -R local ssh://user@dummy/remote2
137 137 pushing to ssh://user@dummy/remote2
138 138 searching for changes
139 139 remote: adding changesets
@@ -143,7 +143,7 b' init+push to remote2'
143 143
144 144 clone to remote1
145 145
146 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" local ssh://user@dummy/remote1
146 $ hg clone local ssh://user@dummy/remote1
147 147 searching for changes
148 148 remote: adding changesets
149 149 remote: adding manifests
@@ -151,7 +151,7 b' clone to remote1'
151 151 remote: added 1 changesets with 1 changes to 1 files
152 152
153 153 The largefiles extension doesn't crash
154 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" local ssh://user@dummy/remotelf --config extensions.largefiles=
154 $ hg clone local ssh://user@dummy/remotelf --config extensions.largefiles=
155 155 The fsmonitor extension is incompatible with the largefiles extension and has been disabled. (fsmonitor !)
156 156 The fsmonitor extension is incompatible with the largefiles extension and has been disabled. (fsmonitor !)
157 157 searching for changes
@@ -162,14 +162,14 b" The largefiles extension doesn't crash"
162 162
163 163 init to existing repo
164 164
165 $ hg init -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote1
165 $ hg init ssh://user@dummy/remote1
166 166 abort: repository remote1 already exists
167 167 abort: could not create remote repo
168 168 [255]
169 169
170 170 clone to existing repo
171 171
172 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" local ssh://user@dummy/remote1
172 $ hg clone local ssh://user@dummy/remote1
173 173 abort: repository remote1 already exists
174 174 abort: could not create remote repo
175 175 [255]
@@ -226,7 +226,7 b" creating 'local/sub/repo'"
226 226 store created
227 227 00changelog.i created
228 228 dotencode
229 exp-dirstate-v2 (dirstate-v2 !)
229 exp-rc-dirstate-v2 (dirstate-v2 !)
230 230 fncache
231 231 generaldelta
232 232 persistent-nodemap (rust !)
@@ -249,7 +249,7 b' init should (for consistency with clone)'
249 249 store created
250 250 00changelog.i created
251 251 dotencode
252 exp-dirstate-v2 (dirstate-v2 !)
252 exp-rc-dirstate-v2 (dirstate-v2 !)
253 253 fncache
254 254 generaldelta
255 255 persistent-nodemap (rust !)
@@ -268,7 +268,7 b' verify that clone also expand urls'
268 268 store created
269 269 00changelog.i created
270 270 dotencode
271 exp-dirstate-v2 (dirstate-v2 !)
271 exp-rc-dirstate-v2 (dirstate-v2 !)
272 272 fncache
273 273 generaldelta
274 274 persistent-nodemap (rust !)
@@ -283,7 +283,7 b' clone bookmarks'
283 283 $ hg -R local bookmark test
284 284 $ hg -R local bookmarks
285 285 * test 0:08b9e9f63b32
286 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" local ssh://user@dummy/remote-bookmarks
286 $ hg clone local ssh://user@dummy/remote-bookmarks
287 287 searching for changes
288 288 remote: adding changesets
289 289 remote: adding manifests
@@ -185,10 +185,12 b' conditional above.'
185 185
186 186 $ find share_dst/.hg/largefiles/* | sort
187 187 share_dst/.hg/largefiles/dirstate
188 share_dst/.hg/largefiles/undo.backup.dirstate
188 189
189 190 $ find src/.hg/largefiles/* | egrep "(dirstate|$hash)" | sort
190 191 src/.hg/largefiles/dirstate
191 192 src/.hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
193 src/.hg/largefiles/undo.backup.dirstate
192 194
193 195 Verify that backwards compatibility is maintained for old storage layout
194 196 $ mv src/.hg/largefiles/$hash share_dst/.hg/largefiles
@@ -124,7 +124,7 b' used all HGPORTs, kill all daemons'
124 124 #endif
125 125
126 126 vanilla clients locked out from largefiles ssh repos
127 $ hg --config extensions.largefiles=! clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/r4 r5
127 $ hg --config extensions.largefiles=! clone ssh://user@dummy/r4 r5
128 128 remote:
129 129 remote: This repository uses the largefiles extension.
130 130 remote:
@@ -96,7 +96,7 b' Test link+rename largefile codepath'
96 96 "lfconvert" adds 'largefiles' to .hg/requires.
97 97 $ cat .hg/requires
98 98 dotencode
99 exp-dirstate-v2 (dirstate-v2 !)
99 exp-rc-dirstate-v2 (dirstate-v2 !)
100 100 fncache
101 101 generaldelta
102 102 largefiles
@@ -290,7 +290,7 b' The requirement is added to the destinat'
290 290
291 291 $ cat .hg/requires
292 292 dotencode
293 exp-dirstate-v2 (dirstate-v2 !)
293 exp-rc-dirstate-v2 (dirstate-v2 !)
294 294 fncache
295 295 generaldelta
296 296 lfs
@@ -5,13 +5,13 b' Log on empty repository: checking consis'
5 5 $ hg log
6 6 $ hg log -r 1
7 7 abort: unknown revision '1'
8 [255]
8 [10]
9 9 $ hg log -r -1:0
10 10 abort: unknown revision '-1'
11 [255]
11 [10]
12 12 $ hg log -r 'branch(name)'
13 13 abort: unknown revision 'name'
14 [255]
14 [10]
15 15 $ hg log -r null -q
16 16 -1:000000000000
17 17
@@ -1104,7 +1104,7 b' log -r <some unknown node id>'
1104 1104
1105 1105 $ hg log -r 1000000000000000000000000000000000000000
1106 1106 abort: unknown revision '1000000000000000000000000000000000000000'
1107 [255]
1107 [10]
1108 1108
1109 1109 log -k r1
1110 1110
@@ -2061,7 +2061,7 b' enable obsolete to test hidden feature'
2061 2061 $ hg log -r a
2062 2062 abort: hidden revision 'a' is pruned
2063 2063 (use --hidden to access hidden revisions)
2064 [255]
2064 [10]
2065 2065
2066 2066 test that parent prevent a changeset to be hidden
2067 2067
@@ -2125,7 +2125,7 b' test hidden revision 0 (issue5385)'
2125 2125 $ hg log -T'{rev}:{node}\n' -r:0
2126 2126 abort: hidden revision '0' is pruned
2127 2127 (use --hidden to access hidden revisions)
2128 [255]
2128 [10]
2129 2129 $ hg log -T'{rev}:{node}\n' -f
2130 2130 3:d7d28b288a6b83d5d2cf49f10c5974deed3a1d2e
2131 2131 2:94375ec45bddd2a824535fc04855bd058c926ec0
@@ -2516,10 +2516,9 b' New namespace is registered per repo ins'
2516 2516 is global. So we shouldn't expect the namespace always exists. Using
2517 2517 ssh:// makes sure a bundle repository is created from scratch. (issue6301)
2518 2518
2519 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" \
2520 > -qr0 "ssh://user@dummy/`pwd`/a" a-clone
2519 $ hg clone -qr0 "ssh://user@dummy/`pwd`/a" a-clone
2521 2520 $ hg incoming --config extensions.names=names.py -R a-clone \
2522 > -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" -T '{bars}\n' -l1
2521 > -T '{bars}\n' -l1
2523 2522 comparing with ssh://user@dummy/$TESTTMP/a
2524 2523 searching for changes
2525 2524
@@ -2,8 +2,6 b' Testing the functionality to pull remote'
2 2 =============================================
3 3
4 4 $ cat >> $HGRCPATH << EOF
5 > [ui]
6 > ssh = "$PYTHON" "$TESTDIR/dummyssh"
7 5 > [alias]
8 6 > glog = log -G -T '{rev}:{node|short} {desc}'
9 7 > [extensions]
@@ -482,15 +480,15 b' Testing for a literal name which does no'
482 480
483 481 $ hg log -r 'remotebranches(def)' -GT "{rev}:{node|short} {remotenames}\n"
484 482 abort: remote name 'def' does not exist
485 [255]
483 [10]
486 484
487 485 $ hg log -r 'remotebookmarks("server3")' -GT "{rev}:{node|short} {remotenames}\n"
488 486 abort: remote name 'server3' does not exist
489 [255]
487 [10]
490 488
491 489 $ hg log -r 'remotenames("server3")' -GT "{rev}:{node|short} {remotenames}\n"
492 490 abort: remote name 'server3' does not exist
493 [255]
491 [10]
494 492
495 493 Testing for a pattern which does not match anything, which shouldn't fail.
496 494
@@ -88,7 +88,7 b' The next two calls are expected to abort'
88 88
89 89 $ hg manifest -r 2
90 90 abort: unknown revision '2'
91 [255]
91 [10]
92 92
93 93 $ hg manifest -r tip tip
94 94 abort: please specify just one revision
@@ -55,8 +55,8 b' Re-adding foo1 and bar:'
55 55 adding foo1
56 56
57 57 $ hg debugstate --no-dates
58 n 0 -2 unset bar
59 n 0 -2 unset foo1
58 m 0 -2 unset bar
59 m 0 -2 unset foo1
60 60 copy: foo -> foo1
61 61
62 62 $ hg st -qC
@@ -74,8 +74,8 b' Reverting foo1 and bar:'
74 74 reverting foo1
75 75
76 76 $ hg debugstate --no-dates
77 n 0 -2 unset bar
78 n 0 -2 unset foo1
77 m 0 -2 unset bar
78 m 0 -2 unset foo1
79 79 copy: foo -> foo1
80 80
81 81 $ hg st -qC
@@ -24,10 +24,6 b" some capability (because it's running an"
24 24 > [extensions]
25 25 > disable-lookup = $TESTTMP/disable-lookup.py
26 26 > EOF
27 $ cat >> .hg/hgrc <<EOF
28 > [ui]
29 > ssh = "$PYTHON" "$TESTDIR/dummyssh"
30 > EOF
31 27
32 28 $ hg pull ssh://user@dummy/repo1 -r tip -B a
33 29 pulling from ssh://user@dummy/repo1
@@ -115,7 +115,7 b' Delete the same patch twice in one comma'
115 115
116 116 $ hg qfinish -a pc
117 117 abort: unknown revision 'pc'
118 [255]
118 [10]
119 119
120 120 $ hg qpush
121 121 applying pc
@@ -24,7 +24,7 b' narrow clone a file, f10'
24 24 $ cd narrow
25 25 $ cat .hg/requires | grep -v generaldelta
26 26 dotencode
27 exp-dirstate-v2 (dirstate-v2 !)
27 exp-rc-dirstate-v2 (dirstate-v2 !)
28 28 fncache
29 29 narrowhg-experimental
30 30 persistent-nodemap (rust !)
@@ -64,7 +64,7 b' Making sure we have the correct set of r'
64 64 $ cat .hg/requires
65 65 dotencode (tree !)
66 66 dotencode (flat-fncache !)
67 exp-dirstate-v2 (dirstate-v2 !)
67 exp-rc-dirstate-v2 (dirstate-v2 !)
68 68 fncache (tree !)
69 69 fncache (flat-fncache !)
70 70 generaldelta
@@ -40,7 +40,7 b' narrow clone a file, f10'
40 40 $ cd narrow
41 41 $ cat .hg/requires | grep -v generaldelta
42 42 dotencode
43 exp-dirstate-v2 (dirstate-v2 !)
43 exp-rc-dirstate-v2 (dirstate-v2 !)
44 44 fncache
45 45 narrowhg-experimental
46 46 persistent-nodemap (rust !)
@@ -100,7 +100,7 b' Narrow the share and check that the main'
100 100 $ hg -R main files
101 101 abort: working copy's narrowspec is stale
102 102 (run 'hg tracked --update-working-copy')
103 [255]
103 [20]
104 104 $ hg -R main tracked --update-working-copy
105 105 not deleting possibly dirty file d3/f
106 106 not deleting possibly dirty file d3/g
@@ -138,7 +138,7 b' Widen the share and check that the main '
138 138 $ hg -R main files
139 139 abort: working copy's narrowspec is stale
140 140 (run 'hg tracked --update-working-copy')
141 [255]
141 [20]
142 142 $ hg -R main tracked --update-working-copy
143 143 # d1/f, d3/f should be back
144 144 $ hg -R main files
@@ -189,7 +189,7 b' Make it look like a repo from before nar'
189 189 $ hg ci -Am test
190 190 abort: working copy's narrowspec is stale
191 191 (run 'hg tracked --update-working-copy')
192 [255]
192 [20]
193 193 $ hg tracked --update-working-copy
194 194 $ hg st
195 195 M d1/f
@@ -58,7 +58,7 b' XXX: we should have a flag in `hg debugs'
58 58
59 59 $ cat .hg/requires
60 60 dotencode
61 exp-dirstate-v2 (dirstate-v2 !)
61 exp-rc-dirstate-v2 (dirstate-v2 !)
62 62 fncache
63 63 generaldelta
64 64 narrowhg-experimental
@@ -54,7 +54,7 b' Actual test'
54 54 $ hg update 471f378eab4c
55 55 abort: hidden revision '471f378eab4c' was rewritten as: 4ae3a4151de9
56 56 (use --hidden to access hidden revisions)
57 [255]
57 [10]
58 58 $ hg update --hidden "desc(A0)"
59 59 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
60 60 updated to hidden changeset 471f378eab4c
@@ -118,7 +118,7 b' Actual test'
118 118 $ hg up 0dec01379d3b
119 119 abort: hidden revision '0dec01379d3b' is pruned
120 120 (use --hidden to access hidden revisions)
121 [255]
121 [10]
122 122 $ hg up --hidden -r 'desc(B0)'
123 123 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
124 124 updated to hidden changeset 0dec01379d3b
@@ -196,7 +196,7 b' Actual test'
196 196 $ hg update 471597cad322
197 197 abort: hidden revision '471597cad322' was split as: 337fec4d2edc, f257fde29c7a
198 198 (use --hidden to access hidden revisions)
199 [255]
199 [10]
200 200 $ hg update --hidden 'min(desc(A0))'
201 201 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
202 202 updated to hidden changeset 471597cad322
@@ -296,7 +296,7 b' Actual test'
296 296 $ hg update de7290d8b885
297 297 abort: hidden revision 'de7290d8b885' was split as: 337fec4d2edc, f257fde29c7a and 2 more
298 298 (use --hidden to access hidden revisions)
299 [255]
299 [10]
300 300 $ hg update --hidden 'min(desc(A0))'
301 301 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
302 302 updated to hidden changeset de7290d8b885
@@ -377,7 +377,7 b' Test setup'
377 377 $ hg update 471f378eab4c
378 378 abort: hidden revision '471f378eab4c' was rewritten as: eb5a0daa2192
379 379 (use --hidden to access hidden revisions)
380 [255]
380 [10]
381 381 $ hg update --hidden 'desc(A0)'
382 382 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
383 383 updated to hidden changeset 471f378eab4c
@@ -385,7 +385,7 b' Test setup'
385 385 $ hg update 0dec01379d3b
386 386 abort: hidden revision '0dec01379d3b' was rewritten as: eb5a0daa2192
387 387 (use --hidden to access hidden revisions)
388 [255]
388 [10]
389 389 $ hg update --hidden 'desc(B0)'
390 390 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
391 391 updated to hidden changeset 0dec01379d3b
@@ -460,7 +460,7 b' Actual test'
460 460 $ hg update 471f378eab4c
461 461 abort: hidden revision '471f378eab4c' has diverged
462 462 (use --hidden to access hidden revisions)
463 [255]
463 [10]
464 464 $ hg update --hidden 'desc(A0)'
465 465 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
466 466 updated to hidden changeset 471f378eab4c
@@ -557,7 +557,7 b' Test setup'
557 557 $ hg update 471f378eab4c
558 558 abort: hidden revision '471f378eab4c' was rewritten as: eb5a0daa2192
559 559 (use --hidden to access hidden revisions)
560 [255]
560 [10]
561 561 $ hg update --hidden 'desc(A0)'
562 562 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
563 563 updated to hidden changeset 471f378eab4c
@@ -203,11 +203,11 b' check that various commands work well wi'
203 203 5:5601fb93a350 (draft) [tip ] add new_3_c
204 204 $ hg log -r 6
205 205 abort: unknown revision '6'
206 [255]
206 [10]
207 207 $ hg log -r 4
208 208 abort: hidden revision '4' was rewritten as: 5601fb93a350
209 209 (use --hidden to access hidden revisions)
210 [255]
210 [10]
211 211 $ hg debugrevspec 'rev(6)'
212 212 $ hg debugrevspec 'rev(4)'
213 213 $ hg debugrevspec 'null'
@@ -1544,7 +1544,7 b' bookmarks change'
1544 1544 $ hg log -r 13bedc178fce
1545 1545 abort: hidden revision '13bedc178fce' was rewritten as: a9b1f8652753
1546 1546 (use --hidden to access hidden revisions)
1547 [255]
1547 [10]
1548 1548
1549 1549 Empty out the test extension, as it isn't compatible with later parts
1550 1550 of the test.
@@ -1,17 +1,14 b''
1 1 #require unix-permissions no-root reporevlogstore
2 2
3 #testcases dirstate-v1 dirstate-v1-tree dirstate-v2
4
5 #if dirstate-v1-tree
6 #require rust
7 $ echo '[experimental]' >> $HGRCPATH
8 $ echo 'dirstate-tree.in-memory=1' >> $HGRCPATH
9 #endif
3 #testcases dirstate-v1 dirstate-v2
10 4
11 5 #if dirstate-v2
12 #require rust
13 $ echo '[format]' >> $HGRCPATH
14 $ echo 'exp-dirstate-v2=1' >> $HGRCPATH
6 $ cat >> $HGRCPATH << EOF
7 > [format]
8 > exp-rc-dirstate-v2=1
9 > [storage]
10 > dirstate-v2.slow-path=allow
11 > EOF
15 12 #endif
16 13
17 14 $ hg init t
@@ -800,7 +800,7 b' downgrading'
800 800 requirements
801 801 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-zstd no-dirstate-v2 !)
802 802 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd no-dirstate-v2 !)
803 preserved: dotencode, exp-dirstate-v2, fncache, generaldelta, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd dirstate-v2 !)
803 preserved: dotencode, exp-rc-dirstate-v2, fncache, generaldelta, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd dirstate-v2 !)
804 804 removed: persistent-nodemap
805 805
806 806 processed revlogs:
@@ -844,7 +844,7 b' upgrading'
844 844 requirements
845 845 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-zstd no-dirstate-v2 !)
846 846 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd no-dirstate-v2 !)
847 preserved: dotencode, exp-dirstate-v2, fncache, generaldelta, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd dirstate-v2 !)
847 preserved: dotencode, exp-rc-dirstate-v2, fncache, generaldelta, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd dirstate-v2 !)
848 848 added: persistent-nodemap
849 849
850 850 processed revlogs:
@@ -876,7 +876,7 b' Running unrelated upgrade'
876 876 requirements
877 877 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (no-zstd no-dirstate-v2 !)
878 878 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd no-dirstate-v2 !)
879 preserved: dotencode, exp-dirstate-v2, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd dirstate-v2 !)
879 preserved: dotencode, exp-rc-dirstate-v2, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd dirstate-v2 !)
880 880
881 881 optimisations: re-delta-all
882 882
@@ -1016,7 +1016,7 b' Simple case'
1016 1016
1017 1017 No race condition
1018 1018
1019 $ hg clone -U --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/test-repo stream-clone --debug | egrep '00(changelog|manifest)'
1019 $ hg clone -U --stream ssh://user@dummy/test-repo stream-clone --debug | egrep '00(changelog|manifest)'
1020 1020 adding [s] 00manifest.n (62 bytes)
1021 1021 adding [s] 00manifest-*.nd (118 KB) (glob)
1022 1022 adding [s] 00changelog.n (62 bytes)
@@ -1081,7 +1081,7 b' Prepare a commit'
1081 1081
1082 1082 Do a mix of clone and commit at the same time so that the file listed on disk differ at actual transfer time.
1083 1083
1084 $ (hg clone -U --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/test-repo stream-clone-race-1 --debug 2>> clone-output | egrep '00(changelog|manifest)' >> clone-output; touch $HG_TEST_STREAM_WALKED_FILE_3) &
1084 $ (hg clone -U --stream ssh://user@dummy/test-repo stream-clone-race-1 --debug 2>> clone-output | egrep '00(changelog|manifest)' >> clone-output; touch $HG_TEST_STREAM_WALKED_FILE_3) &
1085 1085 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1
1086 1086 $ hg -R test-repo/ commit -m foo
1087 1087 $ touch $HG_TEST_STREAM_WALKED_FILE_2
@@ -1178,7 +1178,7 b' Check the initial state'
1178 1178 Performe the mix of clone and full refresh of the nodemap, so that the files
1179 1179 (and filenames) are different between listing time and actual transfer time.
1180 1180
1181 $ (hg clone -U --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/test-repo stream-clone-race-2 --debug 2>> clone-output-2 | egrep '00(changelog|manifest)' >> clone-output-2; touch $HG_TEST_STREAM_WALKED_FILE_3) &
1181 $ (hg clone -U --stream ssh://user@dummy/test-repo stream-clone-race-2 --debug 2>> clone-output-2 | egrep '00(changelog|manifest)' >> clone-output-2; touch $HG_TEST_STREAM_WALKED_FILE_3) &
1182 1182 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1
1183 1183 $ rm test-repo/.hg/store/00changelog.n
1184 1184 $ rm test-repo/.hg/store/00changelog-*.nd
@@ -884,7 +884,7 b' Check we deny its usage on older reposit'
884 884 $ cd no-internal-phase
885 885 $ cat .hg/requires
886 886 dotencode
887 exp-dirstate-v2 (dirstate-v2 !)
887 exp-rc-dirstate-v2 (dirstate-v2 !)
888 888 fncache
889 889 generaldelta
890 890 persistent-nodemap (rust !)
@@ -913,7 +913,7 b' Check it works fine with repository that'
913 913 $ cd internal-phase
914 914 $ cat .hg/requires
915 915 dotencode
916 exp-dirstate-v2 (dirstate-v2 !)
916 exp-rc-dirstate-v2 (dirstate-v2 !)
917 917 fncache
918 918 generaldelta
919 919 internal-phase
@@ -1,15 +1,12 b''
1 #testcases dirstate-v1 dirstate-v1-tree dirstate-v2
2
3 #if dirstate-v1-tree
4 #require rust
5 $ echo '[experimental]' >> $HGRCPATH
6 $ echo 'dirstate-tree.in-memory=1' >> $HGRCPATH
7 #endif
1 #testcases dirstate-v1 dirstate-v2
8 2
9 3 #if dirstate-v2
10 #require rust
11 $ echo '[format]' >> $HGRCPATH
12 $ echo 'exp-dirstate-v2=1' >> $HGRCPATH
4 $ cat >> $HGRCPATH << EOF
5 > [format]
6 > exp-rc-dirstate-v2=1
7 > [storage]
8 > dirstate-v2.slow-path=allow
9 > EOF
13 10 #endif
14 11
15 12 init
@@ -102,7 +102,6 b' A set of extension and shell functions e'
102 102
103 103 $ cat >> $HGRCPATH << EOF
104 104 > [ui]
105 > ssh = "$PYTHON" "$TESTDIR/dummyssh"
106 105 > # simplify output
107 106 > logtemplate = {node|short} {desc} ({branch})
108 107 > [phases]
@@ -162,7 +162,7 b' Multiple destinations cannot be used wit'
162 162 > A D
163 163 > EOS
164 164 abort: unknown revision 'SRC'
165 [255]
165 [10]
166 166
167 167 Rebase to null should work:
168 168
@@ -132,7 +132,7 b' These fail:'
132 132
133 133 $ hg rebase --dest '1 & !1'
134 134 abort: empty revision set
135 [255]
135 [10]
136 136
137 137 These work:
138 138
@@ -17,9 +17,16 b''
17 17 > try:
18 18 > for file in pats:
19 19 > if opts.get('normal_lookup'):
20 > repo.dirstate._normallookup(file)
20 > with repo.dirstate.parentchange():
21 > repo.dirstate.update_file(
22 > file,
23 > p1_tracked=True,
24 > wc_tracked=True,
25 > possibly_dirty=True,
26 > )
21 27 > else:
22 > repo.dirstate._drop(file)
28 > repo.dirstate._map.reset_state(file)
29 > repo.dirstate._dirty = True
23 30 >
24 31 > repo.dirstate.write(repo.currenttransaction())
25 32 > finally:
@@ -27,7 +27,7 b''
27 27 $ cd shallow
28 28 $ cat .hg/requires
29 29 dotencode
30 exp-dirstate-v2 (dirstate-v2 !)
30 exp-rc-dirstate-v2 (dirstate-v2 !)
31 31 exp-remotefilelog-repo-req-1
32 32 fncache
33 33 generaldelta
@@ -71,7 +71,7 b''
71 71 $ cd shallow2
72 72 $ cat .hg/requires
73 73 dotencode
74 exp-dirstate-v2 (dirstate-v2 !)
74 exp-rc-dirstate-v2 (dirstate-v2 !)
75 75 exp-remotefilelog-repo-req-1
76 76 fncache
77 77 generaldelta
@@ -115,7 +115,7 b''
115 115 $ ls shallow3/.hg/store/data
116 116 $ cat shallow3/.hg/requires
117 117 dotencode
118 exp-dirstate-v2 (dirstate-v2 !)
118 exp-rc-dirstate-v2 (dirstate-v2 !)
119 119 exp-remotefilelog-repo-req-1
120 120 fncache
121 121 generaldelta
@@ -24,7 +24,7 b''
24 24 $ cd shallow
25 25 $ cat .hg/requires
26 26 dotencode
27 exp-dirstate-v2 (dirstate-v2 !)
27 exp-rc-dirstate-v2 (dirstate-v2 !)
28 28 exp-remotefilelog-repo-req-1
29 29 fncache
30 30 generaldelta
@@ -61,7 +61,7 b''
61 61 $ cd shallow2
62 62 $ cat .hg/requires
63 63 dotencode
64 exp-dirstate-v2 (dirstate-v2 !)
64 exp-rc-dirstate-v2 (dirstate-v2 !)
65 65 exp-remotefilelog-repo-req-1
66 66 fncache
67 67 generaldelta
@@ -113,7 +113,7 b' check its contents separately.'
113 113 $ ls shallow3/.hg/store/data
114 114 $ cat shallow3/.hg/requires
115 115 dotencode
116 exp-dirstate-v2 (dirstate-v2 !)
116 exp-rc-dirstate-v2 (dirstate-v2 !)
117 117 exp-remotefilelog-repo-req-1
118 118 fncache
119 119 generaldelta
@@ -27,7 +27,7 b' Shallow clone from full'
27 27 $ cd shallow
28 28 $ cat .hg/requires
29 29 dotencode
30 exp-dirstate-v2 (dirstate-v2 !)
30 exp-rc-dirstate-v2 (dirstate-v2 !)
31 31 exp-remotefilelog-repo-req-1
32 32 fncache
33 33 generaldelta
@@ -42,6 +42,17 b' Test single file'
42 42 d1/b
43 43 A d1/d
44 44 d1/b
45 # Should get helpful message if we try to copy or rename after commit
46 $ hg cp --forget --at-rev . d1/d
47 saved backup bundle to $TESTTMP/.hg/strip-backup/3f7c325d3f9e-46f377bb-uncopy.hg
48 $ hg cp d1/b d1/d
49 d1/d: not overwriting - file already committed
50 ('hg copy --at-rev .' to record the copy in the parent of the working copy)
51 [1]
52 $ hg mv d1/b d1/d
53 d1/d: not overwriting - file already committed
54 ('hg rename --at-rev .' to record the rename in the parent of the working copy)
55 [1]
45 56
46 57 Test moved file (not copied) using 'hg cp' command
47 58
@@ -11,7 +11,7 b' A new repository uses zlib storage, whic'
11 11 $ cd default
12 12 $ cat .hg/requires
13 13 dotencode
14 exp-dirstate-v2 (dirstate-v2 !)
14 exp-rc-dirstate-v2 (dirstate-v2 !)
15 15 fncache
16 16 generaldelta
17 17 persistent-nodemap (rust !)
@@ -61,7 +61,7 b' with that engine or a requirement'
61 61
62 62 $ cat .hg/requires
63 63 dotencode
64 exp-dirstate-v2 (dirstate-v2 !)
64 exp-rc-dirstate-v2 (dirstate-v2 !)
65 65 fncache
66 66 generaldelta
67 67 persistent-nodemap (rust !)
@@ -81,7 +81,7 b' with that engine or a requirement'
81 81 $ cd zstd
82 82 $ cat .hg/requires
83 83 dotencode
84 exp-dirstate-v2 (dirstate-v2 !)
84 exp-rc-dirstate-v2 (dirstate-v2 !)
85 85 fncache
86 86 generaldelta
87 87 persistent-nodemap (rust !)
@@ -186,7 +186,7 b' checking details of none compression'
186 186 $ cat none-compression/.hg/requires
187 187 dotencode
188 188 exp-compression-none
189 exp-dirstate-v2 (dirstate-v2 !)
189 exp-rc-dirstate-v2 (dirstate-v2 !)
190 190 fncache
191 191 generaldelta
192 192 persistent-nodemap (rust !)
@@ -50,7 +50,7 b' another repository of push/pull/clone on'
50 50 > EOF
51 51 $ hg -R supported debugrequirements
52 52 dotencode
53 exp-dirstate-v2 (dirstate-v2 !)
53 exp-rc-dirstate-v2 (dirstate-v2 !)
54 54 featuresetup-test
55 55 fncache
56 56 generaldelta
@@ -22,7 +22,7 b' Can create and open repo with revlog v2 '
22 22 $ cd new-repo
23 23 $ cat .hg/requires
24 24 dotencode
25 exp-dirstate-v2 (dirstate-v2 !)
25 exp-rc-dirstate-v2 (dirstate-v2 !)
26 26 exp-revlogv2.2
27 27 fncache
28 28 generaldelta
@@ -96,10 +96,10 b' Test label with quote in them.'
96 96 2:fb616635b18f Added tag rev(0) for changeset 43114e71eddd ["foo"]
97 97 $ hg log -r '("foo")'
98 98 abort: unknown revision 'foo'
99 [255]
99 [10]
100 100 $ hg log -r 'revset("foo")'
101 101 abort: unknown revision 'foo'
102 [255]
102 [10]
103 103 $ hg log -r '("\"foo\"")'
104 104 2:fb616635b18f Added tag rev(0) for changeset 43114e71eddd ["foo"]
105 105 $ hg log -r 'revset("\"foo\"")'
@@ -126,10 +126,10 b' Test label with + in them.'
126 126 4:bbf52b87b370 Added tag foo-bar for changeset a50aae922707 [foo+bar]
127 127 $ hg log -r '(foo+bar)'
128 128 abort: unknown revision 'foo'
129 [255]
129 [10]
130 130 $ hg log -r 'revset(foo+bar)'
131 131 abort: unknown revision 'foo'
132 [255]
132 [10]
133 133 $ hg log -r '"foo+bar"'
134 134 4:bbf52b87b370 Added tag foo-bar for changeset a50aae922707 [foo+bar]
135 135 $ hg log -r '("foo+bar")'
@@ -407,7 +407,7 b' quoting needed'
407 407 [10]
408 408 $ log 'date'
409 409 abort: unknown revision 'date'
410 [255]
410 [10]
411 411 $ log 'date('
412 412 hg: parse error at 5: not a prefix: end
413 413 (date(
@@ -421,10 +421,10 b' quoting needed'
421 421 [10]
422 422 $ log '0:date'
423 423 abort: unknown revision 'date'
424 [255]
424 [10]
425 425 $ log '::"date"'
426 426 abort: unknown revision 'date'
427 [255]
427 [10]
428 428 $ hg book date -r 4
429 429 $ log '0:date'
430 430 0
@@ -3067,7 +3067,7 b" abort if the revset doesn't expect given"
3067 3067 0
3068 3068 $ log 'expectsize(0:1, 1)'
3069 3069 abort: revset size mismatch. expected 1, got 2
3070 [255]
3070 [10]
3071 3071 $ log 'expectsize(0:4, -1)'
3072 3072 hg: parse error: negative size
3073 3073 [10]
@@ -3077,7 +3077,7 b" abort if the revset doesn't expect given"
3077 3077 2
3078 3078 $ log 'expectsize(0:1, 3:5)'
3079 3079 abort: revset size mismatch. expected between 3 and 5, got 2
3080 [255]
3080 [10]
3081 3081 $ log 'expectsize(0:1, -1:2)'
3082 3082 hg: parse error: negative size
3083 3083 [10]
@@ -3104,10 +3104,10 b" abort if the revset doesn't expect given"
3104 3104 2
3105 3105 $ log 'expectsize(0:2, 4:)'
3106 3106 abort: revset size mismatch. expected between 4 and 11, got 3
3107 [255]
3107 [10]
3108 3108 $ log 'expectsize(0:2, :2)'
3109 3109 abort: revset size mismatch. expected between 0 and 2, got 3
3110 [255]
3110 [10]
3111 3111
3112 3112 Test getting list of node from file
3113 3113
@@ -320,7 +320,7 b' test unknown revision in `_list`'
320 320
321 321 $ log '0|unknown'
322 322 abort: unknown revision 'unknown'
323 [255]
323 [10]
324 324
325 325 test integer range in `_list`
326 326
@@ -330,11 +330,11 b' test integer range in `_list`'
330 330
331 331 $ log '-10|-11'
332 332 abort: unknown revision '-11'
333 [255]
333 [10]
334 334
335 335 $ log '9|10'
336 336 abort: unknown revision '10'
337 [255]
337 [10]
338 338
339 339 test '0000' != '0' in `_list`
340 340
@@ -590,7 +590,7 b' we can use patterns when searching for t'
590 590
591 591 $ log 'tag("1..*")'
592 592 abort: tag '1..*' does not exist
593 [255]
593 [10]
594 594 $ log 'tag("re:1..*")'
595 595 6
596 596 $ log 'tag("re:[0-9].[0-9]")'
@@ -601,16 +601,16 b' we can use patterns when searching for t'
601 601
602 602 $ log 'tag(unknown)'
603 603 abort: tag 'unknown' does not exist
604 [255]
604 [10]
605 605 $ log 'tag("re:unknown")'
606 606 $ log 'present(tag("unknown"))'
607 607 $ log 'present(tag("re:unknown"))'
608 608 $ log 'branch(unknown)'
609 609 abort: unknown revision 'unknown'
610 [255]
610 [10]
611 611 $ log 'branch("literal:unknown")'
612 612 abort: branch 'unknown' does not exist
613 [255]
613 [10]
614 614 $ log 'branch("re:unknown")'
615 615 $ log 'present(branch("unknown"))'
616 616 $ log 'present(branch("re:unknown"))'
@@ -666,7 +666,7 b' matching() should preserve the order of '
666 666
667 667 $ log 'named("unknown")'
668 668 abort: namespace 'unknown' does not exist
669 [255]
669 [10]
670 670 $ log 'named("re:unknown")'
671 671 $ log 'present(named("unknown"))'
672 672 $ log 'present(named("re:unknown"))'
@@ -759,7 +759,7 b' parentrevspec'
759 759
760 760 $ log 'branchpoint()~-1'
761 761 abort: revision in set has more than one child
762 [255]
762 [10]
763 763
764 764 Bogus function gets suggestions
765 765 $ log 'add()'
@@ -840,7 +840,7 b' test usage in revpair (with "+")'
840 840
841 841 $ hg diff -r 'author("babar") or author("celeste")'
842 842 abort: empty revision range
843 [255]
843 [10]
844 844
845 845 aliases:
846 846
@@ -121,11 +121,16 b' Specifying revisions by changeset ID'
121 121 file-3
122 122 $ $NO_FALLBACK rhg cat -r cf8b83 file-2
123 123 2
124 $ $NO_FALLBACK rhg cat --rev cf8b83 file-2
125 2
124 126 $ $NO_FALLBACK rhg cat -r c file-2
125 127 abort: ambiguous revision identifier: c
126 128 [255]
127 129 $ $NO_FALLBACK rhg cat -r d file-2
128 130 2
131 $ $NO_FALLBACK rhg cat -r 0000 file-2
132 file-2: no such file in rev 000000000000
133 [1]
129 134
130 135 Cat files
131 136 $ cd $TESTTMP
@@ -135,42 +140,102 b' Cat files'
135 140 $ echo "original content" > original
136 141 $ hg add original
137 142 $ hg commit -m "add original" original
143 Without `--rev`
144 $ $NO_FALLBACK rhg cat original
145 original content
146 With `--rev`
138 147 $ $NO_FALLBACK rhg cat -r 0 original
139 148 original content
140 149 Cat copied file should not display copy metadata
141 150 $ hg copy original copy_of_original
142 151 $ hg commit -m "add copy of original"
152 $ $NO_FALLBACK rhg cat original
153 original content
143 154 $ $NO_FALLBACK rhg cat -r 1 copy_of_original
144 155 original content
145 156
157
146 158 Fallback to Python
147 $ $NO_FALLBACK rhg cat original
148 unsupported feature: `rhg cat` without `--rev` / `-r`
159 $ $NO_FALLBACK rhg cat original --exclude="*.rs"
160 unsupported feature: error: Found argument '--exclude' which wasn't expected, or isn't valid in this context
161
162 USAGE:
163 rhg cat [OPTIONS] <FILE>...
164
165 For more information try --help
166
149 167 [252]
150 $ rhg cat original
168 $ rhg cat original --exclude="*.rs"
151 169 original content
152 170
153 171 $ FALLBACK_EXE="$RHG_FALLBACK_EXECUTABLE"
154 172 $ unset RHG_FALLBACK_EXECUTABLE
155 $ rhg cat original
173 $ rhg cat original --exclude="*.rs"
156 174 abort: 'rhg.on-unsupported=fallback' without 'rhg.fallback-executable' set.
157 175 [255]
158 176 $ RHG_FALLBACK_EXECUTABLE="$FALLBACK_EXE"
159 177 $ export RHG_FALLBACK_EXECUTABLE
160 178
161 $ rhg cat original --config rhg.fallback-executable=false
179 $ rhg cat original --exclude="*.rs" --config rhg.fallback-executable=false
162 180 [1]
163 181
164 $ rhg cat original --config rhg.fallback-executable=hg-non-existent
182 $ rhg cat original --exclude="*.rs" --config rhg.fallback-executable=hg-non-existent
165 183 tried to fall back to a 'hg-non-existent' sub-process but got error $ENOENT$
166 unsupported feature: `rhg cat` without `--rev` / `-r`
184 unsupported feature: error: Found argument '--exclude' which wasn't expected, or isn't valid in this context
185
186 USAGE:
187 rhg cat [OPTIONS] <FILE>...
188
189 For more information try --help
190
191 [252]
192
193 $ rhg cat original --exclude="*.rs" --config rhg.fallback-executable=rhg
194 Blocking recursive fallback. The 'rhg.fallback-executable = rhg' config points to `rhg` itself.
195 unsupported feature: error: Found argument '--exclude' which wasn't expected, or isn't valid in this context
196
197 USAGE:
198 rhg cat [OPTIONS] <FILE>...
199
200 For more information try --help
201
167 202 [252]
168 203
169 $ rhg cat original --config rhg.fallback-executable=rhg
170 Blocking recursive fallback. The 'rhg.fallback-executable = rhg' config points to `rhg` itself.
171 unsupported feature: `rhg cat` without `--rev` / `-r`
204 Fallback with shell path segments
205 $ $NO_FALLBACK rhg cat .
206 unsupported feature: `..` or `.` path segment
207 [252]
208 $ $NO_FALLBACK rhg cat ..
209 unsupported feature: `..` or `.` path segment
210 [252]
211 $ $NO_FALLBACK rhg cat ../..
212 unsupported feature: `..` or `.` path segment
213 [252]
214
215 Fallback with filesets
216 $ $NO_FALLBACK rhg cat "set:c or b"
217 unsupported feature: fileset
172 218 [252]
173 219
220 Fallback with generic hooks
221 $ $NO_FALLBACK rhg cat original --config hooks.pre-cat=something
222 unsupported feature: pre-cat hook defined
223 [252]
224
225 $ $NO_FALLBACK rhg cat original --config hooks.post-cat=something
226 unsupported feature: post-cat hook defined
227 [252]
228
229 $ $NO_FALLBACK rhg cat original --config hooks.fail-cat=something
230 unsupported feature: fail-cat hook defined
231 [252]
232
233 Fallback with [defaults]
234 $ $NO_FALLBACK rhg cat original --config "defaults.cat=-r null"
235 unsupported feature: `defaults` config set
236 [252]
237
238
174 239 Requirements
175 240 $ $NO_FALLBACK rhg debugrequirements
176 241 dotencode
@@ -307,3 +372,12 b' The blackbox extension is supported'
307 372 $ cat .hg/blackbox.log.1
308 373 ????/??/?? ??:??:??.??? * @d3873e73d99ef67873dac33fbcc66268d5d2b6f4 (*)> (rust) files (glob)
309 374
375 Subrepos are not supported
376
377 $ touch .hgsub
378 $ $NO_FALLBACK rhg files
379 unsupported feature: subrepos (.hgsub is present)
380 [252]
381 $ rhg files
382 a
383 $ rm .hgsub
@@ -19,7 +19,7 b' prepare source repo'
19 19 $ hg init source
20 20 $ cd source
21 21 $ cat .hg/requires
22 exp-dirstate-v2 (dirstate-v2 !)
22 exp-rc-dirstate-v2 (dirstate-v2 !)
23 23 share-safe
24 24 $ cat .hg/store/requires
25 25 dotencode
@@ -30,7 +30,7 b' prepare source repo'
30 30 store
31 31 $ hg debugrequirements
32 32 dotencode
33 exp-dirstate-v2 (dirstate-v2 !)
33 exp-rc-dirstate-v2 (dirstate-v2 !)
34 34 fncache
35 35 generaldelta
36 36 revlogv1
@@ -54,13 +54,13 b' Create a shared repo and check the requi'
54 54 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
55 55 $ cd shared1
56 56 $ cat .hg/requires
57 exp-dirstate-v2 (dirstate-v2 !)
57 exp-rc-dirstate-v2 (dirstate-v2 !)
58 58 share-safe
59 59 shared
60 60
61 61 $ hg debugrequirements -R ../source
62 62 dotencode
63 exp-dirstate-v2 (dirstate-v2 !)
63 exp-rc-dirstate-v2 (dirstate-v2 !)
64 64 fncache
65 65 generaldelta
66 66 revlogv1
@@ -70,7 +70,7 b' Create a shared repo and check the requi'
70 70
71 71 $ hg debugrequirements
72 72 dotencode
73 exp-dirstate-v2 (dirstate-v2 !)
73 exp-rc-dirstate-v2 (dirstate-v2 !)
74 74 fncache
75 75 generaldelta
76 76 revlogv1
@@ -225,7 +225,7 b' Disable zstd related tests because its n'
225 225
226 226 requirements
227 227 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-dirstate-v2 !)
228 preserved: dotencode, exp-dirstate-v2, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (dirstate-v2 !)
228 preserved: dotencode, exp-rc-dirstate-v2, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (dirstate-v2 !)
229 229 added: revlog-compression-zstd
230 230
231 231 processed revlogs:
@@ -253,8 +253,8 b' Disable zstd related tests because its n'
253 253 requirements
254 254 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-zstd no-dirstate-v2 !)
255 255 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd no-dirstate-v2 !)
256 preserved: dotencode, exp-dirstate-v2, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-zstd dirstate-v2 !)
257 preserved: dotencode, exp-dirstate-v2, fncache, generaldelta, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd dirstate-v2 !)
256 preserved: dotencode, exp-rc-dirstate-v2, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-zstd dirstate-v2 !)
257 preserved: dotencode, exp-rc-dirstate-v2, fncache, generaldelta, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd dirstate-v2 !)
258 258 added: persistent-nodemap
259 259
260 260 processed revlogs:
@@ -327,7 +327,7 b' Test that upgrading using debugupgradere'
327 327 $ cd non-share-safe
328 328 $ hg debugrequirements
329 329 dotencode
330 exp-dirstate-v2 (dirstate-v2 !)
330 exp-rc-dirstate-v2 (dirstate-v2 !)
331 331 fncache
332 332 generaldelta
333 333 revlogv1
@@ -346,7 +346,7 b' Create a share before upgrading'
346 346 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
347 347 $ hg debugrequirements -R nss-share
348 348 dotencode
349 exp-dirstate-v2 (dirstate-v2 !)
349 exp-rc-dirstate-v2 (dirstate-v2 !)
350 350 fncache
351 351 generaldelta
352 352 revlogv1
@@ -360,7 +360,7 b' Upgrade'
360 360 $ hg debugupgraderepo -q
361 361 requirements
362 362 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-dirstate-v2 !)
363 preserved: dotencode, exp-dirstate-v2, fncache, generaldelta, revlogv1, sparserevlog, store (dirstate-v2 !)
363 preserved: dotencode, exp-rc-dirstate-v2, fncache, generaldelta, revlogv1, sparserevlog, store (dirstate-v2 !)
364 364 added: share-safe
365 365
366 366 processed revlogs:
@@ -373,7 +373,7 b' Upgrade'
373 373
374 374 requirements
375 375 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-dirstate-v2 !)
376 preserved: dotencode, exp-dirstate-v2, fncache, generaldelta, revlogv1, sparserevlog, store (dirstate-v2 !)
376 preserved: dotencode, exp-rc-dirstate-v2, fncache, generaldelta, revlogv1, sparserevlog, store (dirstate-v2 !)
377 377 added: share-safe
378 378
379 379 share-safe
@@ -394,7 +394,7 b' Upgrade'
394 394
395 395 $ hg debugrequirements
396 396 dotencode
397 exp-dirstate-v2 (dirstate-v2 !)
397 exp-rc-dirstate-v2 (dirstate-v2 !)
398 398 fncache
399 399 generaldelta
400 400 revlogv1
@@ -403,7 +403,7 b' Upgrade'
403 403 store
404 404
405 405 $ cat .hg/requires
406 exp-dirstate-v2 (dirstate-v2 !)
406 exp-rc-dirstate-v2 (dirstate-v2 !)
407 407 share-safe
408 408
409 409 $ cat .hg/store/requires
@@ -454,7 +454,7 b' Test that downgrading works too'
454 454 $ hg debugupgraderepo -q
455 455 requirements
456 456 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-dirstate-v2 !)
457 preserved: dotencode, exp-dirstate-v2, fncache, generaldelta, revlogv1, sparserevlog, store (dirstate-v2 !)
457 preserved: dotencode, exp-rc-dirstate-v2, fncache, generaldelta, revlogv1, sparserevlog, store (dirstate-v2 !)
458 458 removed: share-safe
459 459
460 460 processed revlogs:
@@ -467,7 +467,7 b' Test that downgrading works too'
467 467
468 468 requirements
469 469 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-dirstate-v2 !)
470 preserved: dotencode, exp-dirstate-v2, fncache, generaldelta, revlogv1, sparserevlog, store (dirstate-v2 !)
470 preserved: dotencode, exp-rc-dirstate-v2, fncache, generaldelta, revlogv1, sparserevlog, store (dirstate-v2 !)
471 471 removed: share-safe
472 472
473 473 processed revlogs:
@@ -485,7 +485,7 b' Test that downgrading works too'
485 485
486 486 $ hg debugrequirements
487 487 dotencode
488 exp-dirstate-v2 (dirstate-v2 !)
488 exp-rc-dirstate-v2 (dirstate-v2 !)
489 489 fncache
490 490 generaldelta
491 491 revlogv1
@@ -494,7 +494,7 b' Test that downgrading works too'
494 494
495 495 $ cat .hg/requires
496 496 dotencode
497 exp-dirstate-v2 (dirstate-v2 !)
497 exp-rc-dirstate-v2 (dirstate-v2 !)
498 498 fncache
499 499 generaldelta
500 500 revlogv1
@@ -553,7 +553,7 b' Testing automatic upgrade of shares when'
553 553
554 554 requirements
555 555 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-dirstate-v2 !)
556 preserved: dotencode, exp-dirstate-v2, fncache, generaldelta, revlogv1, sparserevlog, store (dirstate-v2 !)
556 preserved: dotencode, exp-rc-dirstate-v2, fncache, generaldelta, revlogv1, sparserevlog, store (dirstate-v2 !)
557 557 added: share-safe
558 558
559 559 processed revlogs:
@@ -564,7 +564,7 b' Testing automatic upgrade of shares when'
564 564 repository upgraded to share safe mode, existing shares will still work in old non-safe mode. Re-share existing shares to use them in safe mode New shares will be created in safe mode.
565 565 $ hg debugrequirements
566 566 dotencode
567 exp-dirstate-v2 (dirstate-v2 !)
567 exp-rc-dirstate-v2 (dirstate-v2 !)
568 568 fncache
569 569 generaldelta
570 570 revlogv1
@@ -47,8 +47,8 b" share shouldn't have a full cache dir, o"
47 47 [1]
48 48 $ ls -1 .hg/wcache || true
49 49 checkisexec (execbit !)
50 checklink (symlink !)
51 checklink-target (symlink !)
50 checklink (symlink no-rust !)
51 checklink-target (symlink no-rust !)
52 52 manifestfulltextcache (reporevlogstore !)
53 53 $ ls -1 ../repo1/.hg/cache
54 54 branch2-served
@@ -160,7 +160,7 b' hg serve shared clone'
160 160 Cloning a shared repo via bundle2 results in a non-shared clone
161 161
162 162 $ cd ..
163 $ hg clone -q --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/`pwd`/repo2 cloned-via-bundle2
163 $ hg clone -q --stream ssh://user@dummy/`pwd`/repo2 cloned-via-bundle2
164 164 $ cat ./cloned-via-bundle2/.hg/requires | grep "shared"
165 165 [1]
166 166 $ hg id --cwd cloned-via-bundle2 -r tip
@@ -2,7 +2,6 b' test sparse'
2 2
3 3 $ cat >> $HGRCPATH << EOF
4 4 > [ui]
5 > ssh = "$PYTHON" "$RUNTESTDIR/dummyssh"
6 5 > username = nobody <no.reply@fb.com>
7 6 > [extensions]
8 7 > sparse=
@@ -18,7 +18,7 b' Enable sparse profile'
18 18
19 19 $ cat .hg/requires
20 20 dotencode
21 exp-dirstate-v2 (dirstate-v2 !)
21 exp-rc-dirstate-v2 (dirstate-v2 !)
22 22 fncache
23 23 generaldelta
24 24 persistent-nodemap (rust !)
@@ -38,7 +38,7 b' Requirement for sparse added when sparse'
38 38
39 39 $ cat .hg/requires
40 40 dotencode
41 exp-dirstate-v2 (dirstate-v2 !)
41 exp-rc-dirstate-v2 (dirstate-v2 !)
42 42 exp-sparse
43 43 fncache
44 44 generaldelta
@@ -61,7 +61,7 b' Requirement for sparse is removed when s'
61 61
62 62 $ cat .hg/requires
63 63 dotencode
64 exp-dirstate-v2 (dirstate-v2 !)
64 exp-rc-dirstate-v2 (dirstate-v2 !)
65 65 fncache
66 66 generaldelta
67 67 persistent-nodemap (rust !)
@@ -15,7 +15,7 b' New repo should not use SQLite by defaul'
15 15 $ hg init empty-no-sqlite
16 16 $ cat empty-no-sqlite/.hg/requires
17 17 dotencode
18 exp-dirstate-v2 (dirstate-v2 !)
18 exp-rc-dirstate-v2 (dirstate-v2 !)
19 19 fncache
20 20 generaldelta
21 21 persistent-nodemap (rust !)
@@ -29,7 +29,7 b' storage.new-repo-backend=sqlite is recog'
29 29 $ hg --config storage.new-repo-backend=sqlite init empty-sqlite
30 30 $ cat empty-sqlite/.hg/requires
31 31 dotencode
32 exp-dirstate-v2 (dirstate-v2 !)
32 exp-rc-dirstate-v2 (dirstate-v2 !)
33 33 exp-sqlite-001
34 34 exp-sqlite-comp-001=zstd (zstd !)
35 35 exp-sqlite-comp-001=$BUNDLE2_COMPRESSIONS$ (no-zstd !)
@@ -51,7 +51,7 b' Can force compression to zlib'
51 51 $ hg --config storage.sqlite.compression=zlib init empty-zlib
52 52 $ cat empty-zlib/.hg/requires
53 53 dotencode
54 exp-dirstate-v2 (dirstate-v2 !)
54 exp-rc-dirstate-v2 (dirstate-v2 !)
55 55 exp-sqlite-001
56 56 exp-sqlite-comp-001=$BUNDLE2_COMPRESSIONS$
57 57 fncache
@@ -67,7 +67,7 b' Can force compression to none'
67 67 $ hg --config storage.sqlite.compression=none init empty-none
68 68 $ cat empty-none/.hg/requires
69 69 dotencode
70 exp-dirstate-v2 (dirstate-v2 !)
70 exp-rc-dirstate-v2 (dirstate-v2 !)
71 71 exp-sqlite-001
72 72 exp-sqlite-comp-001=none
73 73 fncache
@@ -9,7 +9,7 b' Checking that when lookup multiple bookm'
9 9 fails (thus causing the sshpeer to be stopped), the errors from the
10 10 further lookups don't result in tracebacks.
11 11
12 $ hg pull -r b0 -r nosuchbookmark $(for i in $($TESTDIR/seq.py 1 20); do echo -r b$i; done) -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/$(pwd)/../a
12 $ hg pull -r b0 -r nosuchbookmark $(for i in $($TESTDIR/seq.py 1 20); do echo -r b$i; done) ssh://user@dummy/$(pwd)/../a
13 13 pulling from ssh://user@dummy/$TESTTMP/b/../a
14 14 abort: unknown revision 'nosuchbookmark'
15 15 [255]
@@ -52,7 +52,7 b' configure for serving'
52 52
53 53 repo not found error
54 54
55 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/nonexistent local
55 $ hg clone ssh://user@dummy/nonexistent local
56 56 remote: abort: repository nonexistent not found
57 57 abort: no suitable response from remote hg
58 58 [255]
@@ -60,7 +60,7 b' repo not found error'
60 60 non-existent absolute path
61 61
62 62 #if no-msys
63 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy//`pwd`/nonexistent local
63 $ hg clone ssh://user@dummy//`pwd`/nonexistent local
64 64 remote: abort: repository /$TESTTMP/nonexistent not found
65 65 abort: no suitable response from remote hg
66 66 [255]
@@ -70,7 +70,7 b' clone remote via stream'
70 70
71 71 #if no-reposimplestore
72 72
73 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --stream ssh://user@dummy/remote local-stream
73 $ hg clone --stream ssh://user@dummy/remote local-stream
74 74 streaming all changes
75 75 4 files to transfer, 602 bytes of data (no-zstd !)
76 76 transferred 602 bytes in * seconds (*) (glob) (no-zstd !)
@@ -94,7 +94,7 b' clone remote via stream'
94 94 clone bookmarks via stream
95 95
96 96 $ hg -R local-stream book mybook
97 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --stream ssh://user@dummy/local-stream stream2
97 $ hg clone --stream ssh://user@dummy/local-stream stream2
98 98 streaming all changes
99 99 4 files to transfer, 602 bytes of data (no-zstd !)
100 100 transferred 602 bytes in * seconds (*) (glob) (no-zstd !)
@@ -114,7 +114,7 b' clone bookmarks via stream'
114 114
115 115 clone remote via pull
116 116
117 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote local
117 $ hg clone ssh://user@dummy/remote local
118 118 requesting all changes
119 119 adding changesets
120 120 adding manifests
@@ -142,14 +142,14 b' empty default pull'
142 142
143 143 $ hg paths
144 144 default = ssh://user@dummy/remote
145 $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\""
145 $ hg pull
146 146 pulling from ssh://user@dummy/remote
147 147 searching for changes
148 148 no changes found
149 149
150 150 pull from wrong ssh URL
151 151
152 $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/doesnotexist
152 $ hg pull ssh://user@dummy/doesnotexist
153 153 pulling from ssh://user@dummy/doesnotexist
154 154 remote: abort: repository doesnotexist not found
155 155 abort: no suitable response from remote hg
@@ -163,8 +163,6 b' local change'
163 163 updating rc
164 164
165 165 $ echo "default-push = ssh://user@dummy/remote" >> .hg/hgrc
166 $ echo "[ui]" >> .hg/hgrc
167 $ echo "ssh = \"$PYTHON\" \"$TESTDIR/dummyssh\"" >> .hg/hgrc
168 166
169 167 find outgoing
170 168
@@ -181,7 +179,7 b' find outgoing'
181 179
182 180 find incoming on the remote side
183 181
184 $ hg incoming -R ../remote -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/local
182 $ hg incoming -R ../remote ssh://user@dummy/local
185 183 comparing with ssh://user@dummy/local
186 184 searching for changes
187 185 changeset: 3:a28a9d1a809c
@@ -194,7 +192,7 b' find incoming on the remote side'
194 192
195 193 find incoming on the remote side (using absolute path)
196 194
197 $ hg incoming -R ../remote -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/`pwd`"
195 $ hg incoming -R ../remote "ssh://user@dummy/`pwd`"
198 196 comparing with ssh://user@dummy/$TESTTMP/local
199 197 searching for changes
200 198 changeset: 3:a28a9d1a809c
@@ -241,7 +239,7 b' check remote tip'
241 239 test pushkeys and bookmarks
242 240
243 241 $ cd $TESTTMP/local
244 $ hg debugpushkey --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote namespaces
242 $ hg debugpushkey ssh://user@dummy/remote namespaces
245 243 bookmarks
246 244 namespaces
247 245 phases
@@ -256,7 +254,7 b' test pushkeys and bookmarks'
256 254 no changes found
257 255 exporting bookmark foo
258 256 [1]
259 $ hg debugpushkey --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote bookmarks
257 $ hg debugpushkey ssh://user@dummy/remote bookmarks
260 258 foo 1160648e36cec0054048a7edc4110c6f84fde594
261 259 $ hg book -f foo
262 260 $ hg push --traceback
@@ -328,7 +326,7 b' clone bookmarks'
328 326 $ hg -R ../remote bookmark test
329 327 $ hg -R ../remote bookmarks
330 328 * test 4:6c0482d977a3
331 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote local-bookmarks
329 $ hg clone ssh://user@dummy/remote local-bookmarks
332 330 requesting all changes
333 331 adding changesets
334 332 adding manifests
@@ -356,21 +354,21 b' hide outer repo'
356 354
357 355 Test remote paths with spaces (issue2983):
358 356
359 $ hg init --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo"
357 $ hg init "ssh://user@dummy/a repo"
360 358 $ touch "$TESTTMP/a repo/test"
361 359 $ hg -R 'a repo' commit -A -m "test"
362 360 adding test
363 361 $ hg -R 'a repo' tag tag
364 $ hg id --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo"
362 $ hg id "ssh://user@dummy/a repo"
365 363 73649e48688a
366 364
367 $ hg id --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo#noNoNO"
365 $ hg id "ssh://user@dummy/a repo#noNoNO"
368 366 abort: unknown revision 'noNoNO'
369 367 [255]
370 368
371 369 Test (non-)escaping of remote paths with spaces when cloning (issue3145):
372 370
373 $ hg clone --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo"
371 $ hg clone "ssh://user@dummy/a repo"
374 372 destination directory: a repo
375 373 abort: destination 'a repo' is not empty
376 374 [10]
@@ -462,8 +460,6 b' stderr from remote commands should be pr'
462 460 $ cat >> .hg/hgrc << EOF
463 461 > [paths]
464 462 > default-push = ssh://user@dummy/remote
465 > [ui]
466 > ssh = "$PYTHON" "$TESTDIR/dummyssh"
467 463 > [extensions]
468 464 > localwrite = localwrite.py
469 465 > EOF
@@ -486,7 +482,7 b' debug output'
486 482
487 483 $ hg pull --debug ssh://user@dummy/remote
488 484 pulling from ssh://user@dummy/remote
489 running .* ".*/dummyssh" ['"]user@dummy['"] ('|")hg -R remote serve --stdio('|") (re)
485 running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R remote serve --stdio['"] (re)
490 486 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
491 487 sending hello command
492 488 sending between command
@@ -583,11 +579,11 b' remote hook failure is attributed to rem'
583 579
584 580 $ echo "pretxnchangegroup.fail = python:$TESTTMP/failhook:hook" >> remote/.hg/hgrc
585 581
586 $ hg -q --config ui.ssh="\"$PYTHON\" $TESTDIR/dummyssh" clone ssh://user@dummy/remote hookout
582 $ hg -q clone ssh://user@dummy/remote hookout
587 583 $ cd hookout
588 584 $ touch hookfailure
589 585 $ hg -q commit -A -m 'remote hook failure'
590 $ hg --config ui.ssh="\"$PYTHON\" $TESTDIR/dummyssh" push
586 $ hg push
591 587 pushing to ssh://user@dummy/remote
592 588 searching for changes
593 589 remote: adding changesets
@@ -607,7 +603,7 b' abort during pull is properly reported a'
607 603 > [extensions]
608 604 > crash = ${TESTDIR}/crashgetbundler.py
609 605 > EOF
610 $ hg --config ui.ssh="\"$PYTHON\" $TESTDIR/dummyssh" pull
606 $ hg pull
611 607 pulling from ssh://user@dummy/remote
612 608 searching for changes
613 609 adding changesets
@@ -28,7 +28,7 b" creating 'remote' repo"
28 28 clone remote via stream
29 29
30 30 $ for i in 0 1 2 3 4 5 6 7 8; do
31 > hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --stream -r "$i" ssh://user@dummy/remote test-"$i"
31 > hg clone --stream -r "$i" ssh://user@dummy/remote test-"$i"
32 32 > if cd test-"$i"; then
33 33 > hg verify
34 34 > cd ..
@@ -160,7 +160,7 b' clone remote via stream'
160 160 checked 9 changesets with 7 changes to 4 files
161 161 $ cd ..
162 162 $ cd test-1
163 $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" -r 4 ssh://user@dummy/remote
163 $ hg pull -r 4 ssh://user@dummy/remote
164 164 pulling from ssh://user@dummy/remote
165 165 searching for changes
166 166 adding changesets
@@ -175,7 +175,7 b' clone remote via stream'
175 175 crosschecking files in changesets and manifests
176 176 checking files
177 177 checked 3 changesets with 2 changes to 1 files
178 $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote
178 $ hg pull ssh://user@dummy/remote
179 179 pulling from ssh://user@dummy/remote
180 180 searching for changes
181 181 adding changesets
@@ -186,7 +186,7 b' clone remote via stream'
186 186 (run 'hg update' to get a working copy)
187 187 $ cd ..
188 188 $ cd test-2
189 $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" -r 5 ssh://user@dummy/remote
189 $ hg pull -r 5 ssh://user@dummy/remote
190 190 pulling from ssh://user@dummy/remote
191 191 searching for changes
192 192 adding changesets
@@ -201,7 +201,7 b' clone remote via stream'
201 201 crosschecking files in changesets and manifests
202 202 checking files
203 203 checked 5 changesets with 3 changes to 1 files
204 $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote
204 $ hg pull ssh://user@dummy/remote
205 205 pulling from ssh://user@dummy/remote
206 206 searching for changes
207 207 adding changesets
@@ -28,8 +28,6 b' protocols with inline conditional output'
28 28 > }
29 29
30 30 $ cat >> $HGRCPATH << EOF
31 > [ui]
32 > ssh = "$PYTHON" "$TESTDIR/dummyssh"
33 31 > [devel]
34 32 > debug.peer-request = true
35 33 > [extensions]
@@ -65,8 +63,7 b' Test a normal behaving server, for sanit'
65 63 $ cd ..
66 64
67 65 $ hg --debug debugpeer ssh://user@dummy/server
68 running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
69 running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
66 running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re)
70 67 devel-peer-request: hello+between
71 68 devel-peer-request: pairs: 81 bytes
72 69 sending hello command
@@ -178,8 +175,7 b' SSH banner is not printed by default, ig'
178 175 --debug will print the banner
179 176
180 177 $ SSHSERVERMODE=banner hg --debug debugpeer ssh://user@dummy/server
181 running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
182 running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
178 running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re)
183 179 devel-peer-request: hello+between
184 180 devel-peer-request: pairs: 81 bytes
185 181 sending hello command
@@ -269,8 +265,7 b' The client should refuse, as we dropped '
269 265 servers.
270 266
271 267 $ SSHSERVERMODE=no-hello hg --debug debugpeer ssh://user@dummy/server
272 running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
273 running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
268 running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re)
274 269 devel-peer-request: hello+between
275 270 devel-peer-request: pairs: 81 bytes
276 271 sending hello command
@@ -315,8 +310,7 b' Sending an unknown command to the server'
315 310 o> 1\n
316 311
317 312 $ hg --config sshpeer.mode=extra-handshake-commands --config sshpeer.handshake-mode=pre-no-args --debug debugpeer ssh://user@dummy/server
318 running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
319 running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
313 running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re)
320 314 sending no-args command
321 315 devel-peer-request: hello+between
322 316 devel-peer-request: pairs: 81 bytes
@@ -385,8 +379,7 b' Send multiple unknown commands before he'
385 379 o> \n
386 380
387 381 $ hg --config sshpeer.mode=extra-handshake-commands --config sshpeer.handshake-mode=pre-multiple-no-args --debug debugpeer ssh://user@dummy/server
388 running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
389 running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
382 running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re)
390 383 sending unknown1 command
391 384 sending unknown2 command
392 385 sending unknown3 command
@@ -961,8 +954,7 b' Send an upgrade request to a server that'
961 954 $ cd ..
962 955
963 956 $ hg --config experimental.sshpeer.advertise-v2=true --debug debugpeer ssh://user@dummy/server
964 running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
965 running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
957 running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re)
966 958 sending upgrade request: * proto=exp-ssh-v2-0003 (glob)
967 959 devel-peer-request: hello+between
968 960 devel-peer-request: pairs: 81 bytes
@@ -1019,8 +1011,7 b' Send an upgrade request to a server that'
1019 1011 $ cd ..
1020 1012
1021 1013 $ hg --config experimental.sshpeer.advertise-v2=true --debug debugpeer ssh://user@dummy/server
1022 running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
1023 running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
1014 running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re)
1024 1015 sending upgrade request: * proto=exp-ssh-v2-0003 (glob)
1025 1016 devel-peer-request: hello+between
1026 1017 devel-peer-request: pairs: 81 bytes
@@ -1038,8 +1029,7 b' Send an upgrade request to a server that'
1038 1029 Verify the peer has capabilities
1039 1030
1040 1031 $ hg --config experimental.sshpeer.advertise-v2=true --debug debugcapabilities ssh://user@dummy/server
1041 running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
1042 running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
1032 running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re)
1043 1033 sending upgrade request: * proto=exp-ssh-v2-0003 (glob)
1044 1034 devel-peer-request: hello+between
1045 1035 devel-peer-request: pairs: 81 bytes
@@ -4,13 +4,6 b' XXX-RHG this test hangs if `hg` is reall'
4 4 `alias hg=rhg` by run-tests.py. With such alias removed, this test is revealed
5 5 buggy. This need to be resolved sooner than later.
6 6
7 initial setup
8
9 $ cat << EOF >> $HGRCPATH
10 > [ui]
11 > ssh="$PYTHON" "$TESTDIR/dummyssh"
12 > EOF
13
14 7 repository itself is non-readable
15 8 ---------------------------------
16 9
@@ -42,18 +42,18 b' configure for serving'
42 42
43 43 repo not found error
44 44
45 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/nonexistent local
45 $ hg clone ssh://user@dummy/nonexistent local
46 46 remote: abort: repository nonexistent not found
47 47 abort: no suitable response from remote hg
48 48 [255]
49 $ hg clone -q -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/nonexistent local
49 $ hg clone -q ssh://user@dummy/nonexistent local
50 50 remote: abort: repository nonexistent not found
51 51 abort: no suitable response from remote hg
52 52 [255]
53 53
54 54 non-existent absolute path
55 55
56 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/`pwd`/nonexistent local
56 $ hg clone ssh://user@dummy/`pwd`/nonexistent local
57 57 remote: abort: repository $TESTTMP/nonexistent not found
58 58 abort: no suitable response from remote hg
59 59 [255]
@@ -62,7 +62,7 b' clone remote via stream'
62 62
63 63 #if no-reposimplestore
64 64
65 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --stream ssh://user@dummy/remote local-stream
65 $ hg clone --stream ssh://user@dummy/remote local-stream
66 66 streaming all changes
67 67 8 files to transfer, 827 bytes of data (no-zstd !)
68 68 transferred 827 bytes in * seconds (*) (glob) (no-zstd !)
@@ -84,7 +84,7 b' clone remote via stream'
84 84 clone bookmarks via stream
85 85
86 86 $ hg -R local-stream book mybook
87 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --stream ssh://user@dummy/local-stream stream2
87 $ hg clone --stream ssh://user@dummy/local-stream stream2
88 88 streaming all changes
89 89 15 files to transfer, * of data (glob)
90 90 transferred * in * seconds (*) (glob)
@@ -100,7 +100,7 b' clone bookmarks via stream'
100 100
101 101 clone remote via pull
102 102
103 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote local
103 $ hg clone ssh://user@dummy/remote local
104 104 requesting all changes
105 105 adding changesets
106 106 adding manifests
@@ -128,14 +128,14 b' empty default pull'
128 128
129 129 $ hg paths
130 130 default = ssh://user@dummy/remote
131 $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\""
131 $ hg pull
132 132 pulling from ssh://user@dummy/remote
133 133 searching for changes
134 134 no changes found
135 135
136 136 pull from wrong ssh URL
137 137
138 $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/doesnotexist
138 $ hg pull ssh://user@dummy/doesnotexist
139 139 pulling from ssh://user@dummy/doesnotexist
140 140 remote: abort: repository doesnotexist not found
141 141 abort: no suitable response from remote hg
@@ -149,8 +149,6 b' local change'
149 149 updating rc
150 150
151 151 $ echo "default-push = ssh://user@dummy/remote" >> .hg/hgrc
152 $ echo "[ui]" >> .hg/hgrc
153 $ echo "ssh = \"$PYTHON\" \"$TESTDIR/dummyssh\"" >> .hg/hgrc
154 152
155 153 find outgoing
156 154
@@ -167,7 +165,7 b' find outgoing'
167 165
168 166 find incoming on the remote side
169 167
170 $ hg incoming -R ../remote -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/local
168 $ hg incoming -R ../remote ssh://user@dummy/local
171 169 comparing with ssh://user@dummy/local
172 170 searching for changes
173 171 changeset: 3:a28a9d1a809c
@@ -180,7 +178,7 b' find incoming on the remote side'
180 178
181 179 find incoming on the remote side (using absolute path)
182 180
183 $ hg incoming -R ../remote -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/`pwd`"
181 $ hg incoming -R ../remote "ssh://user@dummy/`pwd`"
184 182 comparing with ssh://user@dummy/$TESTTMP/local
185 183 searching for changes
186 184 changeset: 3:a28a9d1a809c
@@ -227,7 +225,7 b' check remote tip'
227 225 test pushkeys and bookmarks
228 226
229 227 $ cd $TESTTMP/local
230 $ hg debugpushkey --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote namespaces
228 $ hg debugpushkey ssh://user@dummy/remote namespaces
231 229 bookmarks
232 230 namespaces
233 231 phases
@@ -242,7 +240,7 b' test pushkeys and bookmarks'
242 240 no changes found
243 241 exporting bookmark foo
244 242 [1]
245 $ hg debugpushkey --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote bookmarks
243 $ hg debugpushkey ssh://user@dummy/remote bookmarks
246 244 foo 1160648e36cec0054048a7edc4110c6f84fde594
247 245 $ hg book -f foo
248 246 $ hg push --traceback
@@ -347,7 +345,7 b' clone bookmarks'
347 345 $ hg -R ../remote bookmark test
348 346 $ hg -R ../remote bookmarks
349 347 * test 4:6c0482d977a3
350 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote local-bookmarks
348 $ hg clone ssh://user@dummy/remote local-bookmarks
351 349 requesting all changes
352 350 adding changesets
353 351 adding manifests
@@ -375,21 +373,21 b' hide outer repo'
375 373
376 374 Test remote paths with spaces (issue2983):
377 375
378 $ hg init --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo"
376 $ hg init "ssh://user@dummy/a repo"
379 377 $ touch "$TESTTMP/a repo/test"
380 378 $ hg -R 'a repo' commit -A -m "test"
381 379 adding test
382 380 $ hg -R 'a repo' tag tag
383 $ hg id --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo"
381 $ hg id "ssh://user@dummy/a repo"
384 382 73649e48688a
385 383
386 $ hg id --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo#noNoNO"
384 $ hg id "ssh://user@dummy/a repo#noNoNO"
387 385 abort: unknown revision 'noNoNO'
388 386 [255]
389 387
390 388 Test (non-)escaping of remote paths with spaces when cloning (issue3145):
391 389
392 $ hg clone --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo"
390 $ hg clone "ssh://user@dummy/a repo"
393 391 destination directory: a repo
394 392 abort: destination 'a repo' is not empty
395 393 [10]
@@ -515,8 +513,6 b' stderr from remote commands should be pr'
515 513 $ cat >> .hg/hgrc << EOF
516 514 > [paths]
517 515 > default-push = ssh://user@dummy/remote
518 > [ui]
519 > ssh = "$PYTHON" "$TESTDIR/dummyssh"
520 516 > [extensions]
521 517 > localwrite = localwrite.py
522 518 > EOF
@@ -540,7 +536,7 b' debug output'
540 536
541 537 $ hg pull --debug ssh://user@dummy/remote --config devel.debug.peer-request=yes
542 538 pulling from ssh://user@dummy/remote
543 running .* ".*/dummyssh" ['"]user@dummy['"] ('|")hg -R remote serve --stdio('|") (re)
539 running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R remote serve --stdio['"] (re)
544 540 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
545 541 devel-peer-request: hello+between
546 542 devel-peer-request: pairs: 81 bytes
@@ -670,11 +666,11 b' remote hook failure is attributed to rem'
670 666
671 667 $ echo "pretxnchangegroup.fail = python:$TESTTMP/failhook:hook" >> remote/.hg/hgrc
672 668
673 $ hg -q --config ui.ssh="\"$PYTHON\" $TESTDIR/dummyssh" clone ssh://user@dummy/remote hookout
669 $ hg -q clone ssh://user@dummy/remote hookout
674 670 $ cd hookout
675 671 $ touch hookfailure
676 672 $ hg -q commit -A -m 'remote hook failure'
677 $ hg --config ui.ssh="\"$PYTHON\" $TESTDIR/dummyssh" push
673 $ hg push
678 674 pushing to ssh://user@dummy/remote
679 675 searching for changes
680 676 remote: adding changesets
@@ -695,7 +691,7 b' abort during pull is properly reported a'
695 691 > [extensions]
696 692 > crash = ${TESTDIR}/crashgetbundler.py
697 693 > EOF
698 $ hg --config ui.ssh="\"$PYTHON\" $TESTDIR/dummyssh" pull
694 $ hg pull
699 695 pulling from ssh://user@dummy/remote
700 696 searching for changes
701 697 remote: abort: this is an exercise
@@ -704,14 +700,14 b' abort during pull is properly reported a'
704 700
705 701 abort with no error hint when there is a ssh problem when pulling
706 702
707 $ hg pull ssh://brokenrepository -e "\"$PYTHON\" \"$TESTDIR/dummyssh\""
703 $ hg pull ssh://brokenrepository
708 704 pulling from ssh://brokenrepository/
709 705 abort: no suitable response from remote hg
710 706 [255]
711 707
712 708 abort with configured error hint when there is a ssh problem when pulling
713 709
714 $ hg pull ssh://brokenrepository -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" \
710 $ hg pull ssh://brokenrepository \
715 711 > --config ui.ssherrorhint="Please see http://company/internalwiki/ssh.html"
716 712 pulling from ssh://brokenrepository/
717 713 abort: no suitable response from remote hg
@@ -1,21 +1,12 b''
1 #testcases dirstate-v1 dirstate-v1-tree dirstate-v2
2
3 #if no-rust
4 $ hg init repo0 --config format.exp-dirstate-v2=1
5 abort: dirstate v2 format requested by config but not supported (requires Rust extensions)
6 [255]
7 #endif
8
9 #if dirstate-v1-tree
10 #require rust
11 $ echo '[experimental]' >> $HGRCPATH
12 $ echo 'dirstate-tree.in-memory=1' >> $HGRCPATH
13 #endif
1 #testcases dirstate-v1 dirstate-v2
14 2
15 3 #if dirstate-v2
16 #require rust
17 $ echo '[format]' >> $HGRCPATH
18 $ echo 'exp-dirstate-v2=1' >> $HGRCPATH
4 $ cat >> $HGRCPATH << EOF
5 > [format]
6 > exp-rc-dirstate-v2=1
7 > [storage]
8 > dirstate-v2.slow-path=allow
9 > EOF
19 10 #endif
20 11
21 12 $ hg init repo1
@@ -749,7 +740,7 b' When a directory containing a tracked fi'
749 740 if also listing unknowns.
750 741 The tree-based dirstate and status algorithm fix this:
751 742
752 #if symlink no-dirstate-v1
743 #if symlink no-dirstate-v1 rust
753 744
754 745 $ cd ..
755 746 $ hg init issue6335
@@ -765,11 +756,11 b' The tree-based dirstate and status algor'
765 756 ? bar/a
766 757 ? foo
767 758
768 $ hg status -c # incorrect output with `dirstate-v1`
759 $ hg status -c # incorrect output without the Rust implementation
769 760 $ hg status -cu
770 761 ? bar/a
771 762 ? foo
772 $ hg status -d # incorrect output with `dirstate-v1`
763 $ hg status -d # incorrect output without the Rust implementation
773 764 ! foo/a
774 765 $ hg status -du
775 766 ! foo/a
@@ -916,7 +907,7 b' Check using include flag while listing i'
916 907 I B.hs
917 908 I ignored-folder/ctest.hs
918 909
919 #if dirstate-v2
910 #if rust dirstate-v2
920 911
921 912 Check read_dir caching
922 913
@@ -14,7 +14,6 b' Test creating a consuming stream bundle '
14 14 > evolution.exchange=True
15 15 > bundle2-output-capture=True
16 16 > [ui]
17 > ssh="$PYTHON" "$TESTDIR/dummyssh"
18 17 > logtemplate={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
19 18 > [web]
20 19 > push_ssl = false
@@ -49,12 +48,12 b' The extension requires a repo (currently'
49 48 stream2 -- {bytecount: 1693, filecount: 11, requirements: dotencode%2Cfncache%2Cgeneraldelta%2Crevlogv1%2Csparserevlog%2Cstore} (mandatory: True) (no-zstd !)
50 49 stream2 -- {bytecount: 1693, filecount: 11, requirements: dotencode%2Cfncache%2Cgeneraldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog%2Cstore} (mandatory: True) (zstd no-rust !)
51 50 stream2 -- {bytecount: 1693, filecount: 11, requirements: dotencode%2Cfncache%2Cgeneraldelta%2Cpersistent-nodemap%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog%2Cstore} (mandatory: True) (rust no-dirstate-v2 !)
52 stream2 -- {bytecount: 1693, filecount: 11, requirements: dotencode%2Cexp-dirstate-v2%2Cfncache%2Cgeneraldelta%2Cpersistent-nodemap%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog%2Cstore} (mandatory: True) (dirstate-v2 !)
51 stream2 -- {bytecount: 1693, filecount: 11, requirements: dotencode%2Cexp-rc-dirstate-v2%2Cfncache%2Cgeneraldelta%2Cpersistent-nodemap%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog%2Cstore} (mandatory: True) (dirstate-v2 !)
53 52 $ hg debugbundle --spec bundle.hg
54 53 none-v2;stream=v2;requirements%3Ddotencode%2Cfncache%2Cgeneraldelta%2Crevlogv1%2Csparserevlog%2Cstore (no-zstd !)
55 54 none-v2;stream=v2;requirements%3Ddotencode%2Cfncache%2Cgeneraldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog%2Cstore (zstd no-rust !)
56 55 none-v2;stream=v2;requirements%3Ddotencode%2Cfncache%2Cgeneraldelta%2Cpersistent-nodemap%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog%2Cstore (rust no-dirstate-v2 !)
57 none-v2;stream=v2;requirements%3Ddotencode%2Cexp-dirstate-v2%2Cfncache%2Cgeneraldelta%2Cpersistent-nodemap%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog%2Cstore (dirstate-v2 !)
56 none-v2;stream=v2;requirements%3Ddotencode%2Cexp-rc-dirstate-v2%2Cfncache%2Cgeneraldelta%2Cpersistent-nodemap%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog%2Cstore (dirstate-v2 !)
58 57
59 58 Test that we can apply the bundle as a stream clone bundle
60 59
@@ -709,7 +709,7 b' test hg strip -B bookmark'
709 709 bookmark 'todelete' deleted
710 710 $ hg id -ir dcbb326fdec2
711 711 abort: unknown revision 'dcbb326fdec2'
712 [255]
712 [10]
713 713 $ hg id -ir d62d843c9a01
714 714 d62d843c9a01
715 715 $ hg bookmarks
@@ -725,17 +725,17 b' test hg strip -B bookmark'
725 725 bookmark 'multipledelete2' deleted
726 726 $ hg id -ir e46a4836065c
727 727 abort: unknown revision 'e46a4836065c'
728 [255]
728 [10]
729 729 $ hg id -ir b4594d867745
730 730 abort: unknown revision 'b4594d867745'
731 [255]
731 [10]
732 732 $ hg strip -B singlenode1 -B singlenode2
733 733 saved backup bundle to $TESTTMP/bookmarks/.hg/strip-backup/43227190fef8-8da858f2-backup.hg
734 734 bookmark 'singlenode1' deleted
735 735 bookmark 'singlenode2' deleted
736 736 $ hg id -ir 43227190fef8
737 737 abort: unknown revision '43227190fef8'
738 [255]
738 [10]
739 739 $ hg strip -B unknownbookmark
740 740 abort: bookmark 'unknownbookmark' not found
741 741 [255]
@@ -750,7 +750,7 b' test hg strip -B bookmark'
750 750 bookmark 'delete' deleted
751 751 $ hg id -ir 6:2702dd0c91e7
752 752 abort: unknown revision '2702dd0c91e7'
753 [255]
753 [10]
754 754 $ hg update B
755 755 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
756 756 (activating bookmark B)
@@ -186,7 +186,7 b' subrepo is referenced by absolute path.'
186 186
187 187 subrepo paths with ssh urls
188 188
189 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/cloned sshclone
189 $ hg clone ssh://user@dummy/cloned sshclone
190 190 requesting all changes
191 191 adding changesets
192 192 adding manifests
@@ -203,7 +203,7 b' subrepo paths with ssh urls'
203 203 new changesets 863c1745b441
204 204 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
205 205
206 $ hg -R sshclone push -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/`pwd`/cloned
206 $ hg -R sshclone push ssh://user@dummy/`pwd`/cloned
207 207 pushing to ssh://user@dummy/$TESTTMP/cloned
208 208 pushing subrepo sub to ssh://user@dummy/$TESTTMP/sub
209 209 searching for changes
@@ -1275,8 +1275,8 b' Check that share works with subrepo'
1275 1275 ../shared/subrepo-2/.hg/sharedpath
1276 1276 ../shared/subrepo-2/.hg/wcache
1277 1277 ../shared/subrepo-2/.hg/wcache/checkisexec (execbit !)
1278 ../shared/subrepo-2/.hg/wcache/checklink (symlink !)
1279 ../shared/subrepo-2/.hg/wcache/checklink-target (symlink !)
1278 ../shared/subrepo-2/.hg/wcache/checklink (symlink no-rust !)
1279 ../shared/subrepo-2/.hg/wcache/checklink-target (symlink no-rust !)
1280 1280 ../shared/subrepo-2/.hg/wcache/manifestfulltextcache (reporevlogstore !)
1281 1281 ../shared/subrepo-2/file
1282 1282 $ hg -R ../shared in
@@ -1,17 +1,14 b''
1 1 #require symlink
2 2
3 #testcases dirstate-v1 dirstate-v1-tree dirstate-v2
4
5 #if dirstate-v1-tree
6 #require rust
7 $ echo '[experimental]' >> $HGRCPATH
8 $ echo 'dirstate-tree.in-memory=1' >> $HGRCPATH
9 #endif
3 #testcases dirstate-v1 dirstate-v2
10 4
11 5 #if dirstate-v2
12 #require rust
13 $ echo '[format]' >> $HGRCPATH
14 $ echo 'exp-dirstate-v2=1' >> $HGRCPATH
6 $ cat >> $HGRCPATH << EOF
7 > [format]
8 > exp-rc-dirstate-v2=1
9 > [storage]
10 > dirstate-v2.slow-path=allow
11 > EOF
15 12 #endif
16 13
17 14 == tests added in 0.7 ==
@@ -82,15 +82,14 b' and the second file.i entry should match'
82 82 date: Thu Jan 01 00:00:00 1970 +0000
83 83 summary: _
84 84
85 $ hg verify
86 checking changesets
87 checking manifests
88 crosschecking files in changesets and manifests
89 checking files
85 $ hg verify -q
90 86 warning: revlog 'data/file.d' not in fncache!
91 checked 2 changesets with 2 changes to 1 files
92 87 1 warnings encountered!
93 88 hint: run "hg debugrebuildfncache" to recover from corrupt fncache
89 $ hg debugrebuildfncache --only-data
90 adding data/file.d
91 1 items added, 0 removed from fncache
92 $ hg verify -q
94 93 $ cd ..
95 94
96 95
@@ -133,12 +132,7 b' where the data file is left as garbage.'
133 132 date: Thu Jan 01 00:00:00 1970 +0000
134 133 summary: _
135 134
136 $ hg verify
137 checking changesets
138 checking manifests
139 crosschecking files in changesets and manifests
140 checking files
141 checked 2 changesets with 2 changes to 1 files
135 $ hg verify -q
142 136 $ cd ..
143 137
144 138
@@ -170,13 +164,8 b' Repeat the original test but let hg roll'
170 164 date: Thu Jan 01 00:00:00 1970 +0000
171 165 summary: _
172 166
173 $ hg verify
174 checking changesets
175 checking manifests
176 crosschecking files in changesets and manifests
177 checking files
167 $ hg verify -q
178 168 warning: revlog 'data/file.d' not in fncache!
179 checked 2 changesets with 2 changes to 1 files
180 169 1 warnings encountered!
181 170 hint: run "hg debugrebuildfncache" to recover from corrupt fncache
182 171 $ cd ..
@@ -2,7 +2,7 b' Test that, when an hg push is interrupte'
2 2 the remote hg is able to successfully roll back the transaction.
3 3
4 4 $ hg init -q remote
5 $ hg clone -e "\"$PYTHON\" \"$RUNTESTDIR/dummyssh\"" -q ssh://user@dummy/`pwd`/remote local
5 $ hg clone -q ssh://user@dummy/`pwd`/remote local
6 6 $ SIGPIPE_REMOTE_DEBUG_FILE="$TESTTMP/DEBUGFILE"
7 7 $ SYNCFILE1="$TESTTMP/SYNCFILE1"
8 8 $ SYNCFILE2="$TESTTMP/SYNCFILE2"
@@ -36,7 +36,7 b' disconnecting. Then exit nonzero, to for'
36 36
37 37 (use quiet to avoid flacky output from the server)
38 38
39 $ hg push --quiet -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --remotecmd "$remotecmd"
39 $ hg push --quiet --remotecmd "$remotecmd"
40 40 abort: stream ended unexpectedly (got 0 bytes, expected 4)
41 41 [255]
42 42 $ cat $SIGPIPE_REMOTE_DEBUG_FILE
@@ -1,8 +1,3 b''
1 $ cat << EOF >> $HGRCPATH
2 > [ui]
3 > ssh="$PYTHON" "$TESTDIR/dummyssh"
4 > EOF
5
6 1 Set up repo
7 2
8 3 $ hg --config experimental.treemanifest=True init repo
@@ -1638,7 +1638,7 b' Demonstrate that nothing to perform upgr'
1638 1638
1639 1639 Upgrade to dirstate-v2
1640 1640
1641 $ hg debugformat -v --config format.exp-dirstate-v2=1
1641 $ hg debugformat -v --config format.exp-rc-dirstate-v2=1
1642 1642 format-variant repo config default
1643 1643 fncache: yes yes yes
1644 1644 dirstate-v2: no yes no
@@ -1653,12 +1653,12 b' Upgrade to dirstate-v2'
1653 1653 plain-cl-delta: yes yes yes
1654 1654 compression: zstd zstd zstd
1655 1655 compression-level: default default default
1656 $ hg debugupgraderepo --config format.exp-dirstate-v2=1 --run
1656 $ hg debugupgraderepo --config format.exp-rc-dirstate-v2=1 --run
1657 1657 upgrade will perform the following actions:
1658 1658
1659 1659 requirements
1660 1660 preserved: dotencode, exp-revlogv2.2, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, sparserevlog, store
1661 added: exp-dirstate-v2
1661 added: dirstate-v2
1662 1662
1663 1663 dirstate-v2
1664 1664 "hg status" will be faster
@@ -1703,7 +1703,7 b' Downgrade from dirstate-v2'
1703 1703
1704 1704 requirements
1705 1705 preserved: dotencode, exp-revlogv2.2, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, sparserevlog, store
1706 removed: exp-dirstate-v2
1706 removed: dirstate-v2
1707 1707
1708 1708 processed revlogs:
1709 1709 - all-filelogs
@@ -75,9 +75,7 b' class clientpeer(wireprotov1peer.wirepee'
75 75
76 76 @wireprotov1peer.batchable
77 77 def greet(self, name):
78 f = wireprotov1peer.future()
79 yield {b'name': mangle(name)}, f
80 yield unmangle(f.value)
78 return {b'name': mangle(name)}, unmangle
81 79
82 80
83 81 class serverrepo(object):
@@ -142,13 +142,13 b' HTTP without the httpheader capability:'
142 142
143 143 SSH (try to exercise the ssh functionality with a dummy script):
144 144
145 $ hg debugwireargs --ssh "\"$PYTHON\" $TESTDIR/dummyssh" ssh://user@dummy/repo uno due tre quattro
145 $ hg debugwireargs ssh://user@dummy/repo uno due tre quattro
146 146 uno due tre quattro None
147 $ hg debugwireargs --ssh "\"$PYTHON\" $TESTDIR/dummyssh" ssh://user@dummy/repo eins zwei --four vier
147 $ hg debugwireargs ssh://user@dummy/repo eins zwei --four vier
148 148 eins zwei None vier None
149 $ hg debugwireargs --ssh "\"$PYTHON\" $TESTDIR/dummyssh" ssh://user@dummy/repo eins zwei
149 $ hg debugwireargs ssh://user@dummy/repo eins zwei
150 150 eins zwei None None None
151 $ hg debugwireargs --ssh "\"$PYTHON\" $TESTDIR/dummyssh" ssh://user@dummy/repo eins zwei --five fuenf
151 $ hg debugwireargs ssh://user@dummy/repo eins zwei --five fuenf
152 152 eins zwei None None None
153 153
154 154 Explicitly kill daemons to let the test exit on Windows
@@ -1,69 +0,0 b''
1 # dirstatenonnormalcheck.py - extension to check the consistency of the
2 # dirstate's non-normal map
3 #
4 # For most operations on dirstate, this extensions checks that the nonnormalset
5 # contains the right entries.
6 # It compares the nonnormal file to a nonnormalset built from the map of all
7 # the files in the dirstate to check that they contain the same files.
8
9 from __future__ import absolute_import
10
11 from mercurial import (
12 dirstate,
13 extensions,
14 pycompat,
15 )
16
17
18 def nonnormalentries(dmap):
19 """Compute nonnormal entries from dirstate's dmap"""
20 res = set()
21 for f, e in dmap.iteritems():
22 if e.state != b'n' or e.mtime == -1:
23 res.add(f)
24 return res
25
26
27 def checkconsistency(ui, orig, dmap, _nonnormalset, label):
28 """Compute nonnormalset from dmap, check that it matches _nonnormalset"""
29 nonnormalcomputedmap = nonnormalentries(dmap)
30 if _nonnormalset != nonnormalcomputedmap:
31 b_orig = pycompat.sysbytes(repr(orig))
32 ui.develwarn(b"%s call to %s\n" % (label, b_orig), config=b'dirstate')
33 ui.develwarn(b"inconsistency in nonnormalset\n", config=b'dirstate')
34 b_nonnormal = pycompat.sysbytes(repr(_nonnormalset))
35 ui.develwarn(b"[nonnormalset] %s\n" % b_nonnormal, config=b'dirstate')
36 b_nonnormalcomputed = pycompat.sysbytes(repr(nonnormalcomputedmap))
37 ui.develwarn(b"[map] %s\n" % b_nonnormalcomputed, config=b'dirstate')
38
39
40 def _checkdirstate(orig, self, *args, **kwargs):
41 """Check nonnormal set consistency before and after the call to orig"""
42 checkconsistency(
43 self._ui, orig, self._map, self._map.nonnormalset, b"before"
44 )
45 r = orig(self, *args, **kwargs)
46 checkconsistency(
47 self._ui, orig, self._map, self._map.nonnormalset, b"after"
48 )
49 return r
50
51
52 def extsetup(ui):
53 """Wrap functions modifying dirstate to check nonnormalset consistency"""
54 dirstatecl = dirstate.dirstate
55 devel = ui.configbool(b'devel', b'all-warnings')
56 paranoid = ui.configbool(b'experimental', b'nonnormalparanoidcheck')
57 if devel:
58 extensions.wrapfunction(dirstatecl, '_writedirstate', _checkdirstate)
59 if paranoid:
60 # We don't do all these checks when paranoid is disable as it would
61 # make the extension run very slowly on large repos
62 extensions.wrapfunction(dirstatecl, 'normallookup', _checkdirstate)
63 extensions.wrapfunction(dirstatecl, 'otherparent', _checkdirstate)
64 extensions.wrapfunction(dirstatecl, 'normal', _checkdirstate)
65 extensions.wrapfunction(dirstatecl, 'write', _checkdirstate)
66 extensions.wrapfunction(dirstatecl, 'add', _checkdirstate)
67 extensions.wrapfunction(dirstatecl, 'remove', _checkdirstate)
68 extensions.wrapfunction(dirstatecl, 'merge', _checkdirstate)
69 extensions.wrapfunction(dirstatecl, 'drop', _checkdirstate)
@@ -1,494 +0,0 b''
1 // dirstate_map.rs
2 //
3 // Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
4 //
5 // This software may be used and distributed according to the terms of the
6 // GNU General Public License version 2 or any later version.
7
8 use crate::dirstate::parsers::Timestamp;
9 use crate::{
10 dirstate::EntryState,
11 dirstate::MTIME_UNSET,
12 dirstate::SIZE_FROM_OTHER_PARENT,
13 dirstate::SIZE_NON_NORMAL,
14 dirstate::V1_RANGEMASK,
15 pack_dirstate, parse_dirstate,
16 utils::hg_path::{HgPath, HgPathBuf},
17 CopyMap, DirsMultiset, DirstateEntry, DirstateError, DirstateParents,
18 StateMap,
19 };
20 use micro_timer::timed;
21 use std::collections::HashSet;
22 use std::iter::FromIterator;
23 use std::ops::Deref;
24
25 #[derive(Default)]
26 pub struct DirstateMap {
27 state_map: StateMap,
28 pub copy_map: CopyMap,
29 pub dirs: Option<DirsMultiset>,
30 pub all_dirs: Option<DirsMultiset>,
31 non_normal_set: Option<HashSet<HgPathBuf>>,
32 other_parent_set: Option<HashSet<HgPathBuf>>,
33 }
34
35 /// Should only really be used in python interface code, for clarity
36 impl Deref for DirstateMap {
37 type Target = StateMap;
38
39 fn deref(&self) -> &Self::Target {
40 &self.state_map
41 }
42 }
43
44 impl FromIterator<(HgPathBuf, DirstateEntry)> for DirstateMap {
45 fn from_iter<I: IntoIterator<Item = (HgPathBuf, DirstateEntry)>>(
46 iter: I,
47 ) -> Self {
48 Self {
49 state_map: iter.into_iter().collect(),
50 ..Self::default()
51 }
52 }
53 }
54
55 impl DirstateMap {
56 pub fn new() -> Self {
57 Self::default()
58 }
59
60 pub fn clear(&mut self) {
61 self.state_map = StateMap::default();
62 self.copy_map.clear();
63 self.non_normal_set = None;
64 self.other_parent_set = None;
65 }
66
67 pub fn set_v1_inner(&mut self, filename: &HgPath, entry: DirstateEntry) {
68 self.state_map.insert(filename.to_owned(), entry);
69 }
70
71 /// Add a tracked file to the dirstate
72 pub fn add_file(
73 &mut self,
74 filename: &HgPath,
75 entry: DirstateEntry,
76 // XXX once the dust settle this should probably become an enum
77 added: bool,
78 merged: bool,
79 from_p2: bool,
80 possibly_dirty: bool,
81 ) -> Result<(), DirstateError> {
82 let mut entry = entry;
83 if added {
84 assert!(!merged);
85 assert!(!possibly_dirty);
86 assert!(!from_p2);
87 entry.state = EntryState::Added;
88 entry.size = SIZE_NON_NORMAL;
89 entry.mtime = MTIME_UNSET;
90 } else if merged {
91 assert!(!possibly_dirty);
92 assert!(!from_p2);
93 entry.state = EntryState::Merged;
94 entry.size = SIZE_FROM_OTHER_PARENT;
95 entry.mtime = MTIME_UNSET;
96 } else if from_p2 {
97 assert!(!possibly_dirty);
98 entry.state = EntryState::Normal;
99 entry.size = SIZE_FROM_OTHER_PARENT;
100 entry.mtime = MTIME_UNSET;
101 } else if possibly_dirty {
102 entry.state = EntryState::Normal;
103 entry.size = SIZE_NON_NORMAL;
104 entry.mtime = MTIME_UNSET;
105 } else {
106 entry.state = EntryState::Normal;
107 entry.size = entry.size & V1_RANGEMASK;
108 entry.mtime = entry.mtime & V1_RANGEMASK;
109 }
110 let old_state = match self.get(filename) {
111 Some(e) => e.state,
112 None => EntryState::Unknown,
113 };
114 if old_state == EntryState::Unknown || old_state == EntryState::Removed
115 {
116 if let Some(ref mut dirs) = self.dirs {
117 dirs.add_path(filename)?;
118 }
119 }
120 if old_state == EntryState::Unknown {
121 if let Some(ref mut all_dirs) = self.all_dirs {
122 all_dirs.add_path(filename)?;
123 }
124 }
125 self.state_map.insert(filename.to_owned(), entry.to_owned());
126
127 if entry.is_non_normal() {
128 self.get_non_normal_other_parent_entries()
129 .0
130 .insert(filename.to_owned());
131 }
132
133 if entry.is_from_other_parent() {
134 self.get_non_normal_other_parent_entries()
135 .1
136 .insert(filename.to_owned());
137 }
138 Ok(())
139 }
140
141 /// Mark a file as removed in the dirstate.
142 ///
143 /// The `size` parameter is used to store sentinel values that indicate
144 /// the file's previous state. In the future, we should refactor this
145 /// to be more explicit about what that state is.
146 pub fn remove_file(
147 &mut self,
148 filename: &HgPath,
149 in_merge: bool,
150 ) -> Result<(), DirstateError> {
151 let old_entry_opt = self.get(filename);
152 let old_state = match old_entry_opt {
153 Some(e) => e.state,
154 None => EntryState::Unknown,
155 };
156 let mut size = 0;
157 if in_merge {
158 // XXX we should not be able to have 'm' state and 'FROM_P2' if not
159 // during a merge. So I (marmoute) am not sure we need the
160 // conditionnal at all. Adding double checking this with assert
161 // would be nice.
162 if let Some(old_entry) = old_entry_opt {
163 // backup the previous state
164 if old_entry.state == EntryState::Merged {
165 size = SIZE_NON_NORMAL;
166 } else if old_entry.state == EntryState::Normal
167 && old_entry.size == SIZE_FROM_OTHER_PARENT
168 {
169 // other parent
170 size = SIZE_FROM_OTHER_PARENT;
171 self.get_non_normal_other_parent_entries()
172 .1
173 .insert(filename.to_owned());
174 }
175 }
176 }
177 if old_state != EntryState::Unknown && old_state != EntryState::Removed
178 {
179 if let Some(ref mut dirs) = self.dirs {
180 dirs.delete_path(filename)?;
181 }
182 }
183 if old_state == EntryState::Unknown {
184 if let Some(ref mut all_dirs) = self.all_dirs {
185 all_dirs.add_path(filename)?;
186 }
187 }
188 if size == 0 {
189 self.copy_map.remove(filename);
190 }
191
192 self.state_map.insert(
193 filename.to_owned(),
194 DirstateEntry {
195 state: EntryState::Removed,
196 mode: 0,
197 size,
198 mtime: 0,
199 },
200 );
201 self.get_non_normal_other_parent_entries()
202 .0
203 .insert(filename.to_owned());
204 Ok(())
205 }
206
207 /// Remove a file from the dirstate.
208 /// Returns `true` if the file was previously recorded.
209 pub fn drop_file(
210 &mut self,
211 filename: &HgPath,
212 ) -> Result<bool, DirstateError> {
213 let old_state = match self.get(filename) {
214 Some(e) => e.state,
215 None => EntryState::Unknown,
216 };
217 let exists = self.state_map.remove(filename).is_some();
218
219 if exists {
220 if old_state != EntryState::Removed {
221 if let Some(ref mut dirs) = self.dirs {
222 dirs.delete_path(filename)?;
223 }
224 }
225 if let Some(ref mut all_dirs) = self.all_dirs {
226 all_dirs.delete_path(filename)?;
227 }
228 }
229 self.get_non_normal_other_parent_entries()
230 .0
231 .remove(filename);
232
233 Ok(exists)
234 }
235
236 pub fn clear_ambiguous_times(
237 &mut self,
238 filenames: Vec<HgPathBuf>,
239 now: i32,
240 ) {
241 for filename in filenames {
242 if let Some(entry) = self.state_map.get_mut(&filename) {
243 if entry.clear_ambiguous_mtime(now) {
244 self.get_non_normal_other_parent_entries()
245 .0
246 .insert(filename.to_owned());
247 }
248 }
249 }
250 }
251
252 pub fn non_normal_entries_remove(
253 &mut self,
254 key: impl AsRef<HgPath>,
255 ) -> bool {
256 self.get_non_normal_other_parent_entries()
257 .0
258 .remove(key.as_ref())
259 }
260
261 pub fn non_normal_entries_add(&mut self, key: impl AsRef<HgPath>) {
262 self.get_non_normal_other_parent_entries()
263 .0
264 .insert(key.as_ref().into());
265 }
266
267 pub fn non_normal_entries_union(
268 &mut self,
269 other: HashSet<HgPathBuf>,
270 ) -> Vec<HgPathBuf> {
271 self.get_non_normal_other_parent_entries()
272 .0
273 .union(&other)
274 .map(ToOwned::to_owned)
275 .collect()
276 }
277
278 pub fn get_non_normal_other_parent_entries(
279 &mut self,
280 ) -> (&mut HashSet<HgPathBuf>, &mut HashSet<HgPathBuf>) {
281 self.set_non_normal_other_parent_entries(false);
282 (
283 self.non_normal_set.as_mut().unwrap(),
284 self.other_parent_set.as_mut().unwrap(),
285 )
286 }
287
288 /// Useful to get immutable references to those sets in contexts where
289 /// you only have an immutable reference to the `DirstateMap`, like when
290 /// sharing references with Python.
291 ///
292 /// TODO, get rid of this along with the other "setter/getter" stuff when
293 /// a nice typestate plan is defined.
294 ///
295 /// # Panics
296 ///
297 /// Will panic if either set is `None`.
298 pub fn get_non_normal_other_parent_entries_panic(
299 &self,
300 ) -> (&HashSet<HgPathBuf>, &HashSet<HgPathBuf>) {
301 (
302 self.non_normal_set.as_ref().unwrap(),
303 self.other_parent_set.as_ref().unwrap(),
304 )
305 }
306
307 pub fn set_non_normal_other_parent_entries(&mut self, force: bool) {
308 if !force
309 && self.non_normal_set.is_some()
310 && self.other_parent_set.is_some()
311 {
312 return;
313 }
314 let mut non_normal = HashSet::new();
315 let mut other_parent = HashSet::new();
316
317 for (filename, entry) in self.state_map.iter() {
318 if entry.is_non_normal() {
319 non_normal.insert(filename.to_owned());
320 }
321 if entry.is_from_other_parent() {
322 other_parent.insert(filename.to_owned());
323 }
324 }
325 self.non_normal_set = Some(non_normal);
326 self.other_parent_set = Some(other_parent);
327 }
328
329 /// Both of these setters and their uses appear to be the simplest way to
330 /// emulate a Python lazy property, but it is ugly and unidiomatic.
331 /// TODO One day, rewriting this struct using the typestate might be a
332 /// good idea.
333 pub fn set_all_dirs(&mut self) -> Result<(), DirstateError> {
334 if self.all_dirs.is_none() {
335 self.all_dirs = Some(DirsMultiset::from_dirstate(
336 self.state_map.iter().map(|(k, v)| Ok((k, *v))),
337 None,
338 )?);
339 }
340 Ok(())
341 }
342
343 pub fn set_dirs(&mut self) -> Result<(), DirstateError> {
344 if self.dirs.is_none() {
345 self.dirs = Some(DirsMultiset::from_dirstate(
346 self.state_map.iter().map(|(k, v)| Ok((k, *v))),
347 Some(EntryState::Removed),
348 )?);
349 }
350 Ok(())
351 }
352
353 pub fn has_tracked_dir(
354 &mut self,
355 directory: &HgPath,
356 ) -> Result<bool, DirstateError> {
357 self.set_dirs()?;
358 Ok(self.dirs.as_ref().unwrap().contains(directory))
359 }
360
361 pub fn has_dir(
362 &mut self,
363 directory: &HgPath,
364 ) -> Result<bool, DirstateError> {
365 self.set_all_dirs()?;
366 Ok(self.all_dirs.as_ref().unwrap().contains(directory))
367 }
368
369 #[timed]
370 pub fn read(
371 &mut self,
372 file_contents: &[u8],
373 ) -> Result<Option<DirstateParents>, DirstateError> {
374 if file_contents.is_empty() {
375 return Ok(None);
376 }
377
378 let (parents, entries, copies) = parse_dirstate(file_contents)?;
379 self.state_map.extend(
380 entries
381 .into_iter()
382 .map(|(path, entry)| (path.to_owned(), entry)),
383 );
384 self.copy_map.extend(
385 copies
386 .into_iter()
387 .map(|(path, copy)| (path.to_owned(), copy.to_owned())),
388 );
389 Ok(Some(parents.clone()))
390 }
391
392 pub fn pack(
393 &mut self,
394 parents: DirstateParents,
395 now: Timestamp,
396 ) -> Result<Vec<u8>, DirstateError> {
397 let packed =
398 pack_dirstate(&mut self.state_map, &self.copy_map, parents, now)?;
399
400 self.set_non_normal_other_parent_entries(true);
401 Ok(packed)
402 }
403 }
404
405 #[cfg(test)]
406 mod tests {
407 use super::*;
408
409 #[test]
410 fn test_dirs_multiset() {
411 let mut map = DirstateMap::new();
412 assert!(map.dirs.is_none());
413 assert!(map.all_dirs.is_none());
414
415 assert_eq!(map.has_dir(HgPath::new(b"nope")).unwrap(), false);
416 assert!(map.all_dirs.is_some());
417 assert!(map.dirs.is_none());
418
419 assert_eq!(map.has_tracked_dir(HgPath::new(b"nope")).unwrap(), false);
420 assert!(map.dirs.is_some());
421 }
422
423 #[test]
424 fn test_add_file() {
425 let mut map = DirstateMap::new();
426
427 assert_eq!(0, map.len());
428
429 map.add_file(
430 HgPath::new(b"meh"),
431 DirstateEntry {
432 state: EntryState::Normal,
433 mode: 1337,
434 mtime: 1337,
435 size: 1337,
436 },
437 false,
438 false,
439 false,
440 false,
441 )
442 .unwrap();
443
444 assert_eq!(1, map.len());
445 assert_eq!(0, map.get_non_normal_other_parent_entries().0.len());
446 assert_eq!(0, map.get_non_normal_other_parent_entries().1.len());
447 }
448
449 #[test]
450 fn test_non_normal_other_parent_entries() {
451 let mut map: DirstateMap = [
452 (b"f1", (EntryState::Removed, 1337, 1337, 1337)),
453 (b"f2", (EntryState::Normal, 1337, 1337, -1)),
454 (b"f3", (EntryState::Normal, 1337, 1337, 1337)),
455 (b"f4", (EntryState::Normal, 1337, -2, 1337)),
456 (b"f5", (EntryState::Added, 1337, 1337, 1337)),
457 (b"f6", (EntryState::Added, 1337, 1337, -1)),
458 (b"f7", (EntryState::Merged, 1337, 1337, -1)),
459 (b"f8", (EntryState::Merged, 1337, 1337, 1337)),
460 (b"f9", (EntryState::Merged, 1337, -2, 1337)),
461 (b"fa", (EntryState::Added, 1337, -2, 1337)),
462 (b"fb", (EntryState::Removed, 1337, -2, 1337)),
463 ]
464 .iter()
465 .map(|(fname, (state, mode, size, mtime))| {
466 (
467 HgPathBuf::from_bytes(fname.as_ref()),
468 DirstateEntry {
469 state: *state,
470 mode: *mode,
471 size: *size,
472 mtime: *mtime,
473 },
474 )
475 })
476 .collect();
477
478 let mut non_normal = [
479 b"f1", b"f2", b"f5", b"f6", b"f7", b"f8", b"f9", b"fa", b"fb",
480 ]
481 .iter()
482 .map(|x| HgPathBuf::from_bytes(x.as_ref()))
483 .collect();
484
485 let mut other_parent = HashSet::new();
486 other_parent.insert(HgPathBuf::from_bytes(b"f4"));
487 let entries = map.get_non_normal_other_parent_entries();
488
489 assert_eq!(
490 (&mut non_normal, &mut other_parent),
491 (entries.0, entries.1)
492 );
493 }
494 }
This diff has been collapsed as it changes many lines, (556 lines changed) Show them Hide them
@@ -1,556 +0,0 b''
1 use std::path::PathBuf;
2
3 use crate::dirstate::parsers::Timestamp;
4 use crate::dirstate_tree::on_disk::DirstateV2ParseError;
5 use crate::matchers::Matcher;
6 use crate::utils::hg_path::{HgPath, HgPathBuf};
7 use crate::CopyMapIter;
8 use crate::DirstateEntry;
9 use crate::DirstateError;
10 use crate::DirstateMap;
11 use crate::DirstateParents;
12 use crate::DirstateStatus;
13 use crate::PatternFileWarning;
14 use crate::StateMapIter;
15 use crate::StatusError;
16 use crate::StatusOptions;
17
18 /// `rust/hg-cpython/src/dirstate/dirstate_map.rs` implements in Rust a
19 /// `DirstateMap` Python class that wraps `Box<dyn DirstateMapMethods + Send>`,
20 /// a trait object of this trait. Except for constructors, this trait defines
21 /// all APIs that the class needs to interact with its inner dirstate map.
22 ///
23 /// A trait object is used to support two different concrete types:
24 ///
25 /// * `rust/hg-core/src/dirstate/dirstate_map.rs` defines the "flat dirstate
26 /// map" which is based on a few large `HgPath`-keyed `HashMap` and `HashSet`
27 /// fields.
28 /// * `rust/hg-core/src/dirstate_tree/dirstate_map.rs` defines the "tree
29 /// dirstate map" based on a tree data struture with nodes for directories
30 /// containing child nodes for their files and sub-directories. This tree
31 /// enables a more efficient algorithm for `hg status`, but its details are
32 /// abstracted in this trait.
33 ///
34 /// The dirstate map associates paths of files in the working directory to
35 /// various information about the state of those files.
36 pub trait DirstateMapMethods {
37 /// Remove information about all files in this map
38 fn clear(&mut self);
39
40 fn set_v1(&mut self, filename: &HgPath, entry: DirstateEntry);
41
42 /// Add or change the information associated to a given file.
43 ///
44 /// `old_state` is the state in the entry that `get` would have returned
45 /// before this call, or `EntryState::Unknown` if there was no such entry.
46 ///
47 /// `entry.state` should never be `EntryState::Unknown`.
48 fn add_file(
49 &mut self,
50 filename: &HgPath,
51 entry: DirstateEntry,
52 added: bool,
53 merged: bool,
54 from_p2: bool,
55 possibly_dirty: bool,
56 ) -> Result<(), DirstateError>;
57
58 /// Mark a file as "removed" (as in `hg rm`).
59 ///
60 /// `old_state` is the state in the entry that `get` would have returned
61 /// before this call, or `EntryState::Unknown` if there was no such entry.
62 ///
63 /// `size` is not actually a size but the 0 or -1 or -2 value that would be
64 /// put in the size field in the dirstate-v1Β format.
65 fn remove_file(
66 &mut self,
67 filename: &HgPath,
68 in_merge: bool,
69 ) -> Result<(), DirstateError>;
70
71 /// Drop information about this file from the map if any, and return
72 /// whether there was any.
73 ///
74 /// `get` will now return `None` for this filename.
75 ///
76 /// `old_state` is the state in the entry that `get` would have returned
77 /// before this call, or `EntryState::Unknown` if there was no such entry.
78 fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError>;
79
80 /// Among given files, mark the stored `mtime` as ambiguous if there is one
81 /// (if `state == EntryState::Normal`) equal to the given current Unix
82 /// timestamp.
83 fn clear_ambiguous_times(
84 &mut self,
85 filenames: Vec<HgPathBuf>,
86 now: i32,
87 ) -> Result<(), DirstateV2ParseError>;
88
89 /// Return whether the map has an "non-normal" entry for the given
90 /// filename. That is, any entry with a `state` other than
91 /// `EntryState::Normal` or with an ambiguous `mtime`.
92 fn non_normal_entries_contains(
93 &mut self,
94 key: &HgPath,
95 ) -> Result<bool, DirstateV2ParseError>;
96
97 /// Mark the given path as "normal" file. This is only relevant in the flat
98 /// dirstate map where there is a separate `HashSet` that needs to be kept
99 /// up to date.
100 /// Returns whether the key was present in the set.
101 fn non_normal_entries_remove(&mut self, key: &HgPath) -> bool;
102
103 /// Mark the given path as "non-normal" file.
104 /// This is only relevant in the flat dirstate map where there is a
105 /// separate `HashSet` that needs to be kept up to date.
106 fn non_normal_entries_add(&mut self, key: &HgPath);
107
108 /// Return an iterator of paths whose respective entry are either
109 /// "non-normal" (see `non_normal_entries_contains`) or "from other
110 /// parent".
111 ///
112 /// If that information is cached, create the cache as needed.
113 ///
114 /// "From other parent" is defined as `state == Normal && size == -2`.
115 ///
116 /// Because parse errors can happen during iteration, the iterated items
117 /// are `Result`s.
118 fn non_normal_or_other_parent_paths(
119 &mut self,
120 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>;
121
122 /// Create the cache for `non_normal_or_other_parent_paths` if needed.
123 ///
124 /// If `force` is true, the cache is re-created even if it already exists.
125 fn set_non_normal_other_parent_entries(&mut self, force: bool);
126
127 /// Return an iterator of paths whose respective entry are "non-normal"
128 /// (see `non_normal_entries_contains`).
129 ///
130 /// If that information is cached, create the cache as needed.
131 ///
132 /// Because parse errors can happen during iteration, the iterated items
133 /// are `Result`s.
134 fn iter_non_normal_paths(
135 &mut self,
136 ) -> Box<
137 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
138 >;
139
140 /// Same as `iter_non_normal_paths`, but takes `&self` instead of `&mut
141 /// self`.
142 ///
143 /// Panics if a cache is necessary but does not exist yet.
144 fn iter_non_normal_paths_panic(
145 &self,
146 ) -> Box<
147 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
148 >;
149
150 /// Return an iterator of paths whose respective entry are "from other
151 /// parent".
152 ///
153 /// If that information is cached, create the cache as needed.
154 ///
155 /// "From other parent" is defined as `state == Normal && size == -2`.
156 ///
157 /// Because parse errors can happen during iteration, the iterated items
158 /// are `Result`s.
159 fn iter_other_parent_paths(
160 &mut self,
161 ) -> Box<
162 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
163 >;
164
165 /// Returns whether the sub-tree rooted at the given directory contains any
166 /// tracked file.
167 ///
168 /// A file is tracked if it has a `state` other than `EntryState::Removed`.
169 fn has_tracked_dir(
170 &mut self,
171 directory: &HgPath,
172 ) -> Result<bool, DirstateError>;
173
174 /// Returns whether the sub-tree rooted at the given directory contains any
175 /// file with a dirstate entry.
176 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError>;
177
178 /// Clear mtimes that are ambigous with `now` (similar to
179 /// `clear_ambiguous_times` but for all files in the dirstate map), and
180 /// serialize bytes to write the `.hg/dirstate` file to disk in dirstate-v1
181 /// format.
182 fn pack_v1(
183 &mut self,
184 parents: DirstateParents,
185 now: Timestamp,
186 ) -> Result<Vec<u8>, DirstateError>;
187
188 /// Clear mtimes that are ambigous with `now` (similar to
189 /// `clear_ambiguous_times` but for all files in the dirstate map), and
190 /// serialize bytes to write a dirstate data file to disk in dirstate-v2
191 /// format.
192 ///
193 /// Returns new data and metadata together with whether that data should be
194 /// appended to the existing data file whose content is at
195 /// `self.on_disk` (true), instead of written to a new data file
196 /// (false).
197 ///
198 /// Note: this is only supported by the tree dirstate map.
199 fn pack_v2(
200 &mut self,
201 now: Timestamp,
202 can_append: bool,
203 ) -> Result<(Vec<u8>, Vec<u8>, bool), DirstateError>;
204
205 /// Run the status algorithm.
206 ///
207 /// This is not sematically a method of the dirstate map, but a different
208 /// algorithm is used for the flat v.s. tree dirstate map so having it in
209 /// this trait enables the same dynamic dispatch as with other methods.
210 fn status<'a>(
211 &'a mut self,
212 matcher: &'a (dyn Matcher + Sync),
213 root_dir: PathBuf,
214 ignore_files: Vec<PathBuf>,
215 options: StatusOptions,
216 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>;
217
218 /// Returns how many files in the dirstate map have a recorded copy source.
219 fn copy_map_len(&self) -> usize;
220
221 /// Returns an iterator of `(path, copy_source)` for all files that have a
222 /// copy source.
223 fn copy_map_iter(&self) -> CopyMapIter<'_>;
224
225 /// Returns whether the givef file has a copy source.
226 fn copy_map_contains_key(
227 &self,
228 key: &HgPath,
229 ) -> Result<bool, DirstateV2ParseError>;
230
231 /// Returns the copy source for the given file.
232 fn copy_map_get(
233 &self,
234 key: &HgPath,
235 ) -> Result<Option<&HgPath>, DirstateV2ParseError>;
236
237 /// Removes the recorded copy source if any for the given file, and returns
238 /// it.
239 fn copy_map_remove(
240 &mut self,
241 key: &HgPath,
242 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError>;
243
244 /// Set the given `value` copy source for the given `key` file.
245 fn copy_map_insert(
246 &mut self,
247 key: HgPathBuf,
248 value: HgPathBuf,
249 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError>;
250
251 /// Returns the number of files that have an entry.
252 fn len(&self) -> usize;
253
254 /// Returns whether the given file has an entry.
255 fn contains_key(&self, key: &HgPath)
256 -> Result<bool, DirstateV2ParseError>;
257
258 /// Returns the entry, if any, for the given file.
259 fn get(
260 &self,
261 key: &HgPath,
262 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError>;
263
264 /// Returns a `(path, entry)` iterator of files that have an entry.
265 ///
266 /// Because parse errors can happen during iteration, the iterated items
267 /// are `Result`s.
268 fn iter(&self) -> StateMapIter<'_>;
269
270 /// Returns an iterator of tracked directories.
271 ///
272 /// This is the paths for which `has_tracked_dir` would return true.
273 /// Or, in other words, the union of ancestor paths of all paths that have
274 /// an associated entry in a "tracked" state in this dirstate map.
275 ///
276 /// Because parse errors can happen during iteration, the iterated items
277 /// are `Result`s.
278 fn iter_tracked_dirs(
279 &mut self,
280 ) -> Result<
281 Box<
282 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>>
283 + Send
284 + '_,
285 >,
286 DirstateError,
287 >;
288
289 /// Return an iterator of `(path, (state, mode, size, mtime))` for every
290 /// node stored in this dirstate map, for the purpose of the `hg
291 /// debugdirstate` command.
292 ///
293 /// For nodes that don’t have an entry, `state` is the ASCII space.
294 /// An `mtime` may still be present. It is used to optimize `status`.
295 ///
296 /// Because parse errors can happen during iteration, the iterated items
297 /// are `Result`s.
298 fn debug_iter(
299 &self,
300 ) -> Box<
301 dyn Iterator<
302 Item = Result<
303 (&HgPath, (u8, i32, i32, i32)),
304 DirstateV2ParseError,
305 >,
306 > + Send
307 + '_,
308 >;
309 }
310
311 impl DirstateMapMethods for DirstateMap {
312 fn clear(&mut self) {
313 self.clear()
314 }
315
316 /// Used to set a value directory.
317 ///
318 /// XXX Is temporary during a refactor of V1 dirstate and will disappear
319 /// shortly.
320 fn set_v1(&mut self, filename: &HgPath, entry: DirstateEntry) {
321 self.set_v1_inner(&filename, entry)
322 }
323
324 fn add_file(
325 &mut self,
326 filename: &HgPath,
327 entry: DirstateEntry,
328 added: bool,
329 merged: bool,
330 from_p2: bool,
331 possibly_dirty: bool,
332 ) -> Result<(), DirstateError> {
333 self.add_file(filename, entry, added, merged, from_p2, possibly_dirty)
334 }
335
336 fn remove_file(
337 &mut self,
338 filename: &HgPath,
339 in_merge: bool,
340 ) -> Result<(), DirstateError> {
341 self.remove_file(filename, in_merge)
342 }
343
344 fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError> {
345 self.drop_file(filename)
346 }
347
348 fn clear_ambiguous_times(
349 &mut self,
350 filenames: Vec<HgPathBuf>,
351 now: i32,
352 ) -> Result<(), DirstateV2ParseError> {
353 Ok(self.clear_ambiguous_times(filenames, now))
354 }
355
356 fn non_normal_entries_contains(
357 &mut self,
358 key: &HgPath,
359 ) -> Result<bool, DirstateV2ParseError> {
360 let (non_normal, _other_parent) =
361 self.get_non_normal_other_parent_entries();
362 Ok(non_normal.contains(key))
363 }
364
365 fn non_normal_entries_remove(&mut self, key: &HgPath) -> bool {
366 self.non_normal_entries_remove(key)
367 }
368
369 fn non_normal_entries_add(&mut self, key: &HgPath) {
370 self.non_normal_entries_add(key)
371 }
372
373 fn non_normal_or_other_parent_paths(
374 &mut self,
375 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
376 {
377 let (non_normal, other_parent) =
378 self.get_non_normal_other_parent_entries();
379 Box::new(non_normal.union(other_parent).map(|p| Ok(&**p)))
380 }
381
382 fn set_non_normal_other_parent_entries(&mut self, force: bool) {
383 self.set_non_normal_other_parent_entries(force)
384 }
385
386 fn iter_non_normal_paths(
387 &mut self,
388 ) -> Box<
389 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
390 > {
391 let (non_normal, _other_parent) =
392 self.get_non_normal_other_parent_entries();
393 Box::new(non_normal.iter().map(|p| Ok(&**p)))
394 }
395
396 fn iter_non_normal_paths_panic(
397 &self,
398 ) -> Box<
399 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
400 > {
401 let (non_normal, _other_parent) =
402 self.get_non_normal_other_parent_entries_panic();
403 Box::new(non_normal.iter().map(|p| Ok(&**p)))
404 }
405
406 fn iter_other_parent_paths(
407 &mut self,
408 ) -> Box<
409 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
410 > {
411 let (_non_normal, other_parent) =
412 self.get_non_normal_other_parent_entries();
413 Box::new(other_parent.iter().map(|p| Ok(&**p)))
414 }
415
416 fn has_tracked_dir(
417 &mut self,
418 directory: &HgPath,
419 ) -> Result<bool, DirstateError> {
420 self.has_tracked_dir(directory)
421 }
422
423 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
424 self.has_dir(directory)
425 }
426
427 fn pack_v1(
428 &mut self,
429 parents: DirstateParents,
430 now: Timestamp,
431 ) -> Result<Vec<u8>, DirstateError> {
432 self.pack(parents, now)
433 }
434
435 fn pack_v2(
436 &mut self,
437 _now: Timestamp,
438 _can_append: bool,
439 ) -> Result<(Vec<u8>, Vec<u8>, bool), DirstateError> {
440 panic!(
441 "should have used dirstate_tree::DirstateMap to use the v2 format"
442 )
443 }
444
445 fn status<'a>(
446 &'a mut self,
447 matcher: &'a (dyn Matcher + Sync),
448 root_dir: PathBuf,
449 ignore_files: Vec<PathBuf>,
450 options: StatusOptions,
451 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
452 {
453 crate::status(self, matcher, root_dir, ignore_files, options)
454 }
455
456 fn copy_map_len(&self) -> usize {
457 self.copy_map.len()
458 }
459
460 fn copy_map_iter(&self) -> CopyMapIter<'_> {
461 Box::new(
462 self.copy_map
463 .iter()
464 .map(|(key, value)| Ok((&**key, &**value))),
465 )
466 }
467
468 fn copy_map_contains_key(
469 &self,
470 key: &HgPath,
471 ) -> Result<bool, DirstateV2ParseError> {
472 Ok(self.copy_map.contains_key(key))
473 }
474
475 fn copy_map_get(
476 &self,
477 key: &HgPath,
478 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
479 Ok(self.copy_map.get(key).map(|p| &**p))
480 }
481
482 fn copy_map_remove(
483 &mut self,
484 key: &HgPath,
485 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
486 Ok(self.copy_map.remove(key))
487 }
488
489 fn copy_map_insert(
490 &mut self,
491 key: HgPathBuf,
492 value: HgPathBuf,
493 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
494 Ok(self.copy_map.insert(key, value))
495 }
496
497 fn len(&self) -> usize {
498 (&**self).len()
499 }
500
501 fn contains_key(
502 &self,
503 key: &HgPath,
504 ) -> Result<bool, DirstateV2ParseError> {
505 Ok((&**self).contains_key(key))
506 }
507
508 fn get(
509 &self,
510 key: &HgPath,
511 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
512 Ok((&**self).get(key).cloned())
513 }
514
515 fn iter(&self) -> StateMapIter<'_> {
516 Box::new((&**self).iter().map(|(key, value)| Ok((&**key, *value))))
517 }
518
519 fn iter_tracked_dirs(
520 &mut self,
521 ) -> Result<
522 Box<
523 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>>
524 + Send
525 + '_,
526 >,
527 DirstateError,
528 > {
529 self.set_all_dirs()?;
530 Ok(Box::new(
531 self.all_dirs
532 .as_ref()
533 .unwrap()
534 .iter()
535 .map(|path| Ok(&**path)),
536 ))
537 }
538
539 fn debug_iter(
540 &self,
541 ) -> Box<
542 dyn Iterator<
543 Item = Result<
544 (&HgPath, (u8, i32, i32, i32)),
545 DirstateV2ParseError,
546 >,
547 > + Send
548 + '_,
549 > {
550 Box::new(
551 (&**self)
552 .iter()
553 .map(|(path, entry)| Ok((&**path, entry.debug_tuple()))),
554 )
555 }
556 }
@@ -1,71 +0,0 b''
1 // dirstate_status.rs
2 //
3 // Copyright 2019, Raphaël Gomès <rgomes@octobus.net>
4 //
5 // This software may be used and distributed according to the terms of the
6 // GNU General Public License version 2 or any later version.
7
8 use crate::dirstate::status::{build_response, Dispatch, Status};
9 use crate::matchers::Matcher;
10 use crate::{DirstateStatus, StatusError};
11
12 impl<'a, M: ?Sized + Matcher + Sync> Status<'a, M> {
13 pub(crate) fn run(&self) -> Result<DirstateStatus<'a>, StatusError> {
14 let (traversed_sender, traversed_receiver) =
15 crossbeam_channel::unbounded();
16
17 // Step 1: check the files explicitly mentioned by the user
18 let (work, mut results) = self.walk_explicit(traversed_sender.clone());
19
20 if !work.is_empty() {
21 // Hashmaps are quite a bit slower to build than vecs, so only
22 // build it if needed.
23 let old_results = results.iter().cloned().collect();
24
25 // Step 2: recursively check the working directory for changes if
26 // needed
27 for (dir, dispatch) in work {
28 match dispatch {
29 Dispatch::Directory { was_file } => {
30 if was_file {
31 results.push((dir.to_owned(), Dispatch::Removed));
32 }
33 if self.options.list_ignored
34 || self.options.list_unknown
35 && !self.dir_ignore(&dir)
36 {
37 self.traverse(
38 &dir,
39 &old_results,
40 &mut results,
41 traversed_sender.clone(),
42 );
43 }
44 }
45 _ => {
46 unreachable!("There can only be directories in `work`")
47 }
48 }
49 }
50 }
51
52 if !self.matcher.is_exact() {
53 if self.options.list_unknown {
54 self.handle_unknowns(&mut results);
55 } else {
56 // TODO this is incorrect, see issue6335
57 // This requires a fix in both Python and Rust that can happen
58 // with other pending changes to `status`.
59 self.extend_from_dmap(&mut results);
60 }
61 }
62
63 drop(traversed_sender);
64 let traversed = traversed_receiver
65 .into_iter()
66 .map(std::borrow::Cow::Owned)
67 .collect();
68
69 Ok(build_response(results, traversed))
70 }
71 }
@@ -1,240 +0,0 b''
1 use crate::dirstate::owning::OwningDirstateMap;
2 use hg::dirstate::parsers::Timestamp;
3 use hg::dirstate_tree::dispatch::DirstateMapMethods;
4 use hg::dirstate_tree::on_disk::DirstateV2ParseError;
5 use hg::matchers::Matcher;
6 use hg::utils::hg_path::{HgPath, HgPathBuf};
7 use hg::CopyMapIter;
8 use hg::DirstateEntry;
9 use hg::DirstateError;
10 use hg::DirstateParents;
11 use hg::DirstateStatus;
12 use hg::PatternFileWarning;
13 use hg::StateMapIter;
14 use hg::StatusError;
15 use hg::StatusOptions;
16 use std::path::PathBuf;
17
18 impl DirstateMapMethods for OwningDirstateMap {
19 fn clear(&mut self) {
20 self.get_mut().clear()
21 }
22
23 fn set_v1(&mut self, filename: &HgPath, entry: DirstateEntry) {
24 self.get_mut().set_v1(filename, entry)
25 }
26
27 fn add_file(
28 &mut self,
29 filename: &HgPath,
30 entry: DirstateEntry,
31 added: bool,
32 merged: bool,
33 from_p2: bool,
34 possibly_dirty: bool,
35 ) -> Result<(), DirstateError> {
36 self.get_mut().add_file(
37 filename,
38 entry,
39 added,
40 merged,
41 from_p2,
42 possibly_dirty,
43 )
44 }
45
46 fn remove_file(
47 &mut self,
48 filename: &HgPath,
49 in_merge: bool,
50 ) -> Result<(), DirstateError> {
51 self.get_mut().remove_file(filename, in_merge)
52 }
53
54 fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError> {
55 self.get_mut().drop_file(filename)
56 }
57
58 fn clear_ambiguous_times(
59 &mut self,
60 filenames: Vec<HgPathBuf>,
61 now: i32,
62 ) -> Result<(), DirstateV2ParseError> {
63 self.get_mut().clear_ambiguous_times(filenames, now)
64 }
65
66 fn non_normal_entries_contains(
67 &mut self,
68 key: &HgPath,
69 ) -> Result<bool, DirstateV2ParseError> {
70 self.get_mut().non_normal_entries_contains(key)
71 }
72
73 fn non_normal_entries_remove(&mut self, key: &HgPath) -> bool {
74 self.get_mut().non_normal_entries_remove(key)
75 }
76
77 fn non_normal_entries_add(&mut self, key: &HgPath) {
78 self.get_mut().non_normal_entries_add(key)
79 }
80
81 fn non_normal_or_other_parent_paths(
82 &mut self,
83 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
84 {
85 self.get_mut().non_normal_or_other_parent_paths()
86 }
87
88 fn set_non_normal_other_parent_entries(&mut self, force: bool) {
89 self.get_mut().set_non_normal_other_parent_entries(force)
90 }
91
92 fn iter_non_normal_paths(
93 &mut self,
94 ) -> Box<
95 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
96 > {
97 self.get_mut().iter_non_normal_paths()
98 }
99
100 fn iter_non_normal_paths_panic(
101 &self,
102 ) -> Box<
103 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
104 > {
105 self.get().iter_non_normal_paths_panic()
106 }
107
108 fn iter_other_parent_paths(
109 &mut self,
110 ) -> Box<
111 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
112 > {
113 self.get_mut().iter_other_parent_paths()
114 }
115
116 fn has_tracked_dir(
117 &mut self,
118 directory: &HgPath,
119 ) -> Result<bool, DirstateError> {
120 self.get_mut().has_tracked_dir(directory)
121 }
122
123 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
124 self.get_mut().has_dir(directory)
125 }
126
127 fn pack_v1(
128 &mut self,
129 parents: DirstateParents,
130 now: Timestamp,
131 ) -> Result<Vec<u8>, DirstateError> {
132 self.get_mut().pack_v1(parents, now)
133 }
134
135 fn pack_v2(
136 &mut self,
137 now: Timestamp,
138 can_append: bool,
139 ) -> Result<(Vec<u8>, Vec<u8>, bool), DirstateError> {
140 self.get_mut().pack_v2(now, can_append)
141 }
142
143 fn status<'a>(
144 &'a mut self,
145 matcher: &'a (dyn Matcher + Sync),
146 root_dir: PathBuf,
147 ignore_files: Vec<PathBuf>,
148 options: StatusOptions,
149 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
150 {
151 self.get_mut()
152 .status(matcher, root_dir, ignore_files, options)
153 }
154
155 fn copy_map_len(&self) -> usize {
156 self.get().copy_map_len()
157 }
158
159 fn copy_map_iter(&self) -> CopyMapIter<'_> {
160 self.get().copy_map_iter()
161 }
162
163 fn copy_map_contains_key(
164 &self,
165 key: &HgPath,
166 ) -> Result<bool, DirstateV2ParseError> {
167 self.get().copy_map_contains_key(key)
168 }
169
170 fn copy_map_get(
171 &self,
172 key: &HgPath,
173 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
174 self.get().copy_map_get(key)
175 }
176
177 fn copy_map_remove(
178 &mut self,
179 key: &HgPath,
180 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
181 self.get_mut().copy_map_remove(key)
182 }
183
184 fn copy_map_insert(
185 &mut self,
186 key: HgPathBuf,
187 value: HgPathBuf,
188 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
189 self.get_mut().copy_map_insert(key, value)
190 }
191
192 fn len(&self) -> usize {
193 self.get().len()
194 }
195
196 fn contains_key(
197 &self,
198 key: &HgPath,
199 ) -> Result<bool, DirstateV2ParseError> {
200 self.get().contains_key(key)
201 }
202
203 fn get(
204 &self,
205 key: &HgPath,
206 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
207 self.get().get(key)
208 }
209
210 fn iter(&self) -> StateMapIter<'_> {
211 self.get().iter()
212 }
213
214 fn iter_tracked_dirs(
215 &mut self,
216 ) -> Result<
217 Box<
218 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>>
219 + Send
220 + '_,
221 >,
222 DirstateError,
223 > {
224 self.get_mut().iter_tracked_dirs()
225 }
226
227 fn debug_iter(
228 &self,
229 ) -> Box<
230 dyn Iterator<
231 Item = Result<
232 (&HgPath, (u8, i32, i32, i32)),
233 DirstateV2ParseError,
234 >,
235 > + Send
236 + '_,
237 > {
238 self.get().debug_iter()
239 }
240 }
@@ -1,83 +0,0 b''
1 // non_normal_other_parent_entries.rs
2 //
3 // Copyright 2020 Raphaël Gomès <rgomes@octobus.net>
4 //
5 // This software may be used and distributed according to the terms of the
6 // GNU General Public License version 2 or any later version.
7
8 use cpython::{
9 exc::NotImplementedError, CompareOp, ObjectProtocol, PyBytes, PyClone,
10 PyErr, PyObject, PyResult, PyString, Python, PythonObject, ToPyObject,
11 UnsafePyLeaked,
12 };
13
14 use crate::dirstate::dirstate_map::v2_error;
15 use crate::dirstate::DirstateMap;
16 use hg::dirstate_tree::on_disk::DirstateV2ParseError;
17 use hg::utils::hg_path::HgPath;
18 use std::cell::RefCell;
19
20 py_class!(pub class NonNormalEntries |py| {
21 data dmap: DirstateMap;
22
23 def __contains__(&self, key: PyObject) -> PyResult<bool> {
24 self.dmap(py).non_normal_entries_contains(py, key)
25 }
26 def remove(&self, key: PyObject) -> PyResult<PyObject> {
27 self.dmap(py).non_normal_entries_remove(py, key)
28 }
29 def add(&self, key: PyObject) -> PyResult<PyObject> {
30 self.dmap(py).non_normal_entries_add(py, key)
31 }
32 def discard(&self, key: PyObject) -> PyResult<PyObject> {
33 self.dmap(py).non_normal_entries_discard(py, key)
34 }
35 def __richcmp__(&self, other: PyObject, op: CompareOp) -> PyResult<bool> {
36 match op {
37 CompareOp::Eq => self.is_equal_to(py, other),
38 CompareOp::Ne => Ok(!self.is_equal_to(py, other)?),
39 _ => Err(PyErr::new::<NotImplementedError, _>(py, ""))
40 }
41 }
42 def __repr__(&self) -> PyResult<PyString> {
43 self.dmap(py).non_normal_entries_display(py)
44 }
45
46 def __iter__(&self) -> PyResult<NonNormalEntriesIterator> {
47 self.dmap(py).non_normal_entries_iter(py)
48 }
49 });
50
51 impl NonNormalEntries {
52 pub fn from_inner(py: Python, dm: DirstateMap) -> PyResult<Self> {
53 Self::create_instance(py, dm)
54 }
55
56 fn is_equal_to(&self, py: Python, other: PyObject) -> PyResult<bool> {
57 for item in other.iter(py)? {
58 if !self.dmap(py).non_normal_entries_contains(py, item?)? {
59 return Ok(false);
60 }
61 }
62 Ok(true)
63 }
64
65 fn translate_key(
66 py: Python,
67 key: Result<&HgPath, DirstateV2ParseError>,
68 ) -> PyResult<Option<PyBytes>> {
69 let key = key.map_err(|e| v2_error(py, e))?;
70 Ok(Some(PyBytes::new(py, key.as_bytes())))
71 }
72 }
73
74 type NonNormalEntriesIter<'a> = Box<
75 dyn Iterator<Item = Result<&'a HgPath, DirstateV2ParseError>> + Send + 'a,
76 >;
77
78 py_shared_iterator!(
79 NonNormalEntriesIterator,
80 UnsafePyLeaked<NonNormalEntriesIter<'static>>,
81 NonNormalEntries::translate_key,
82 Option<PyBytes>
83 );
@@ -1,163 +0,0 b''
1 // parsers.rs
2 //
3 // Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
4 //
5 // This software may be used and distributed according to the terms of the
6 // GNU General Public License version 2 or any later version.
7
8 //! Bindings for the `hg::dirstate::parsers` module provided by the
9 //! `hg-core` package.
10 //!
11 //! From Python, this will be seen as `mercurial.rustext.parsers`
12 use cpython::{
13 exc, PyBytes, PyDict, PyErr, PyInt, PyModule, PyResult, PyTuple, Python,
14 PythonObject, ToPyObject,
15 };
16 use hg::{
17 dirstate::parsers::Timestamp, pack_dirstate, parse_dirstate,
18 utils::hg_path::HgPathBuf, DirstateEntry, DirstateParents, FastHashMap,
19 PARENT_SIZE,
20 };
21 use std::convert::TryInto;
22
23 use crate::dirstate::{extract_dirstate, make_dirstate_item};
24
25 fn parse_dirstate_wrapper(
26 py: Python,
27 dmap: PyDict,
28 copymap: PyDict,
29 st: PyBytes,
30 ) -> PyResult<PyTuple> {
31 match parse_dirstate(st.data(py)) {
32 Ok((parents, entries, copies)) => {
33 let dirstate_map: FastHashMap<HgPathBuf, DirstateEntry> = entries
34 .into_iter()
35 .map(|(path, entry)| (path.to_owned(), entry))
36 .collect();
37 let copy_map: FastHashMap<HgPathBuf, HgPathBuf> = copies
38 .into_iter()
39 .map(|(path, copy)| (path.to_owned(), copy.to_owned()))
40 .collect();
41
42 for (filename, entry) in &dirstate_map {
43 dmap.set_item(
44 py,
45 PyBytes::new(py, filename.as_bytes()),
46 make_dirstate_item(py, entry)?,
47 )?;
48 }
49 for (path, copy_path) in copy_map {
50 copymap.set_item(
51 py,
52 PyBytes::new(py, path.as_bytes()),
53 PyBytes::new(py, copy_path.as_bytes()),
54 )?;
55 }
56 Ok(dirstate_parents_to_pytuple(py, parents))
57 }
58 Err(e) => Err(PyErr::new::<exc::ValueError, _>(py, e.to_string())),
59 }
60 }
61
62 fn pack_dirstate_wrapper(
63 py: Python,
64 dmap: PyDict,
65 copymap: PyDict,
66 pl: PyTuple,
67 now: PyInt,
68 ) -> PyResult<PyBytes> {
69 let p1 = pl.get_item(py, 0).extract::<PyBytes>(py)?;
70 let p1: &[u8] = p1.data(py);
71 let p2 = pl.get_item(py, 1).extract::<PyBytes>(py)?;
72 let p2: &[u8] = p2.data(py);
73
74 let mut dirstate_map = extract_dirstate(py, &dmap)?;
75
76 let copies: Result<FastHashMap<HgPathBuf, HgPathBuf>, PyErr> = copymap
77 .items(py)
78 .iter()
79 .map(|(key, value)| {
80 Ok((
81 HgPathBuf::from_bytes(key.extract::<PyBytes>(py)?.data(py)),
82 HgPathBuf::from_bytes(value.extract::<PyBytes>(py)?.data(py)),
83 ))
84 })
85 .collect();
86
87 if p1.len() != PARENT_SIZE || p2.len() != PARENT_SIZE {
88 return Err(PyErr::new::<exc::ValueError, _>(
89 py,
90 "expected a 20-byte hash".to_string(),
91 ));
92 }
93
94 match pack_dirstate(
95 &mut dirstate_map,
96 &copies?,
97 DirstateParents {
98 p1: p1.try_into().unwrap(),
99 p2: p2.try_into().unwrap(),
100 },
101 Timestamp(now.as_object().extract::<i64>(py)?),
102 ) {
103 Ok(packed) => {
104 for (filename, entry) in dirstate_map.iter() {
105 dmap.set_item(
106 py,
107 PyBytes::new(py, filename.as_bytes()),
108 make_dirstate_item(py, &entry)?,
109 )?;
110 }
111 Ok(PyBytes::new(py, &packed))
112 }
113 Err(error) => {
114 Err(PyErr::new::<exc::ValueError, _>(py, error.to_string()))
115 }
116 }
117 }
118
119 /// Create the module, with `__package__` given from parent
120 pub fn init_parsers_module(py: Python, package: &str) -> PyResult<PyModule> {
121 let dotted_name = &format!("{}.parsers", package);
122 let m = PyModule::new(py, dotted_name)?;
123
124 m.add(py, "__package__", package)?;
125 m.add(py, "__doc__", "Parsers - Rust implementation")?;
126
127 m.add(
128 py,
129 "parse_dirstate",
130 py_fn!(
131 py,
132 parse_dirstate_wrapper(dmap: PyDict, copymap: PyDict, st: PyBytes)
133 ),
134 )?;
135 m.add(
136 py,
137 "pack_dirstate",
138 py_fn!(
139 py,
140 pack_dirstate_wrapper(
141 dmap: PyDict,
142 copymap: PyDict,
143 pl: PyTuple,
144 now: PyInt
145 )
146 ),
147 )?;
148
149 let sys = PyModule::import(py, "sys")?;
150 let sys_modules: PyDict = sys.get(py, "modules")?.extract(py)?;
151 sys_modules.set_item(py, dotted_name, &m)?;
152
153 Ok(m)
154 }
155
156 pub(crate) fn dirstate_parents_to_pytuple(
157 py: Python,
158 parents: &DirstateParents,
159 ) -> PyTuple {
160 let p1 = PyBytes::new(py, parents.p1.as_bytes());
161 let p2 = PyBytes::new(py, parents.p2.as_bytes());
162 (p1, p2).to_py_object(py)
163 }
@@ -1,22 +0,0 b''
1 $ cat >> $HGRCPATH << EOF
2 > [command-templates]
3 > log="{rev}:{node|short} ({phase}) [{tags} {bookmarks}] {desc|firstline}\n"
4 > [extensions]
5 > dirstateparanoidcheck = $TESTDIR/../contrib/dirstatenonnormalcheck.py
6 > [experimental]
7 > nonnormalparanoidcheck = True
8 > [devel]
9 > all-warnings=True
10 > EOF
11 $ mkcommit() {
12 > echo "$1" > "$1"
13 > hg add "$1"
14 > hg ci -m "add $1"
15 > }
16
17 $ hg init testrepo
18 $ cd testrepo
19 $ mkcommit a
20 $ mkcommit b
21 $ mkcommit c
22 $ hg status
General Comments 0
You need to be logged in to leave comments. Login now