##// END OF EJS Templates
merge with default
Pulkit Goyal -
r49521:a44bb185 merge 6.0rc0 stable
parent child Browse files
Show More
@@ -0,0 +1,87 b''
1 # Copyright Mercurial Contributors
2 #
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
5
6 from __future__ import absolute_import
7
8 import functools
9 import stat
10
11
12 rangemask = 0x7FFFFFFF
13
14
15 @functools.total_ordering
16 class timestamp(tuple):
17 """
18 A Unix timestamp with optional nanoseconds precision,
19 modulo 2**31 seconds.
20
21 A 2-tuple containing:
22
23 `truncated_seconds`: seconds since the Unix epoch,
24 truncated to its lower 31 bits
25
26 `subsecond_nanoseconds`: number of nanoseconds since `truncated_seconds`.
27 When this is zero, the sub-second precision is considered unknown.
28 """
29
30 def __new__(cls, value):
31 truncated_seconds, subsec_nanos = value
32 value = (truncated_seconds & rangemask, subsec_nanos)
33 return super(timestamp, cls).__new__(cls, value)
34
35 def __eq__(self, other):
36 self_secs, self_subsec_nanos = self
37 other_secs, other_subsec_nanos = other
38 return self_secs == other_secs and (
39 self_subsec_nanos == other_subsec_nanos
40 or self_subsec_nanos == 0
41 or other_subsec_nanos == 0
42 )
43
44 def __gt__(self, other):
45 self_secs, self_subsec_nanos = self
46 other_secs, other_subsec_nanos = other
47 if self_secs > other_secs:
48 return True
49 if self_secs < other_secs:
50 return False
51 if self_subsec_nanos == 0 or other_subsec_nanos == 0:
52 # they are considered equal, so not "greater than"
53 return False
54 return self_subsec_nanos > other_subsec_nanos
55
56
57 def zero():
58 """
59 Returns the `timestamp` at the Unix epoch.
60 """
61 return tuple.__new__(timestamp, (0, 0))
62
63
64 def mtime_of(stat_result):
65 """
66 Takes an `os.stat_result`-like object and returns a `timestamp` object
67 for its modification time.
68 """
69 try:
70 # TODO: add this attribute to `osutil.stat` objects,
71 # see `mercurial/cext/osutil.c`.
72 #
73 # This attribute is also not available on Python 2.
74 nanos = stat_result.st_mtime_ns
75 except AttributeError:
76 # https://docs.python.org/2/library/os.html#os.stat_float_times
77 # "For compatibility with older Python versions,
78 # accessing stat_result as a tuple always returns integers."
79 secs = stat_result[stat.ST_MTIME]
80
81 subsec_nanos = 0
82 else:
83 billion = int(1e9)
84 secs = nanos // billion
85 subsec_nanos = nanos % billion
86
87 return timestamp((secs, subsec_nanos))
@@ -0,0 +1,414 b''
1 # v2.py - Pure-Python implementation of the dirstate-v2 file format
2 #
3 # Copyright Mercurial Contributors
4 #
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
7
8 from __future__ import absolute_import
9
10 import struct
11
12 from ..thirdparty import attr
13 from .. import error, policy
14
15 parsers = policy.importmod('parsers')
16
17
18 # Must match the constant of the same name in
19 # `rust/hg-core/src/dirstate_tree/on_disk.rs`
20 TREE_METADATA_SIZE = 44
21 NODE_SIZE = 44
22
23
24 # Must match the `TreeMetadata` Rust struct in
25 # `rust/hg-core/src/dirstate_tree/on_disk.rs`. See doc-comments there.
26 #
27 # * 4 bytes: start offset of root nodes
28 # * 4 bytes: number of root nodes
29 # * 4 bytes: total number of nodes in the tree that have an entry
30 # * 4 bytes: total number of nodes in the tree that have a copy source
31 # * 4 bytes: number of bytes in the data file that are not used anymore
32 # * 4 bytes: unused
33 # * 20 bytes: SHA-1 hash of ignore patterns
34 TREE_METADATA = struct.Struct('>LLLLL4s20s')
35
36
37 # Must match the `Node` Rust struct in
38 # `rust/hg-core/src/dirstate_tree/on_disk.rs`. See doc-comments there.
39 #
40 # * 4 bytes: start offset of full path
41 # * 2 bytes: length of the full path
42 # * 2 bytes: length within the full path before its "base name"
43 # * 4 bytes: start offset of the copy source if any, or zero for no copy source
44 # * 2 bytes: length of the copy source if any, or unused
45 # * 4 bytes: start offset of child nodes
46 # * 4 bytes: number of child nodes
47 # * 4 bytes: number of descendant nodes that have an entry
48 # * 4 bytes: number of descendant nodes that have a "tracked" state
49 # * 1 byte: flags
50 # * 4 bytes: expected size
51 # * 4 bytes: mtime seconds
52 # * 4 bytes: mtime nanoseconds
53 NODE = struct.Struct('>LHHLHLLLLHlll')
54
55
56 assert TREE_METADATA_SIZE == TREE_METADATA.size
57 assert NODE_SIZE == NODE.size
58
59 # match constant in mercurial/pure/parsers.py
60 DIRSTATE_V2_DIRECTORY = 1 << 5
61
62
63 def parse_dirstate(map, copy_map, data, tree_metadata):
64 """parse a full v2-dirstate from a binary data into dictionnaries:
65
66 - map: a {path: entry} mapping that will be filled
67 - copy_map: a {path: copy-source} mapping that will be filled
68 - data: a binary blob contains v2 nodes data
69 - tree_metadata:: a binary blob of the top level node (from the docket)
70 """
71 (
72 root_nodes_start,
73 root_nodes_len,
74 _nodes_with_entry_count,
75 _nodes_with_copy_source_count,
76 _unreachable_bytes,
77 _unused,
78 _ignore_patterns_hash,
79 ) = TREE_METADATA.unpack(tree_metadata)
80 parse_nodes(map, copy_map, data, root_nodes_start, root_nodes_len)
81
82
83 def parse_nodes(map, copy_map, data, start, len):
84 """parse <len> nodes from <data> starting at offset <start>
85
86 This is used by parse_dirstate to recursively fill `map` and `copy_map`.
87
88 All directory specific information is ignored and do not need any
89 processing (DIRECTORY, ALL_UNKNOWN_RECORDED, ALL_IGNORED_RECORDED)
90 """
91 for i in range(len):
92 node_start = start + NODE_SIZE * i
93 node_bytes = slice_with_len(data, node_start, NODE_SIZE)
94 (
95 path_start,
96 path_len,
97 _basename_start,
98 copy_source_start,
99 copy_source_len,
100 children_start,
101 children_count,
102 _descendants_with_entry_count,
103 _tracked_descendants_count,
104 flags,
105 size,
106 mtime_s,
107 mtime_ns,
108 ) = NODE.unpack(node_bytes)
109
110 # Parse child nodes of this node recursively
111 parse_nodes(map, copy_map, data, children_start, children_count)
112
113 item = parsers.DirstateItem.from_v2_data(flags, size, mtime_s, mtime_ns)
114 if not item.any_tracked:
115 continue
116 path = slice_with_len(data, path_start, path_len)
117 map[path] = item
118 if copy_source_start:
119 copy_map[path] = slice_with_len(
120 data, copy_source_start, copy_source_len
121 )
122
123
124 def slice_with_len(data, start, len):
125 return data[start : start + len]
126
127
128 @attr.s
129 class Node(object):
130 path = attr.ib()
131 entry = attr.ib()
132 parent = attr.ib(default=None)
133 children_count = attr.ib(default=0)
134 children_offset = attr.ib(default=0)
135 descendants_with_entry = attr.ib(default=0)
136 tracked_descendants = attr.ib(default=0)
137
138 def pack(self, copy_map, paths_offset):
139 path = self.path
140 copy = copy_map.get(path)
141 entry = self.entry
142
143 path_start = paths_offset
144 path_len = len(path)
145 basename_start = path.rfind(b'/') + 1 # 0 if rfind returns -1
146 if copy is not None:
147 copy_source_start = paths_offset + len(path)
148 copy_source_len = len(copy)
149 else:
150 copy_source_start = 0
151 copy_source_len = 0
152 if entry is not None:
153 flags, size, mtime_s, mtime_ns = entry.v2_data()
154 else:
155 # There are no mtime-cached directories in the Python implementation
156 flags = DIRSTATE_V2_DIRECTORY
157 size = 0
158 mtime_s = 0
159 mtime_ns = 0
160 return NODE.pack(
161 path_start,
162 path_len,
163 basename_start,
164 copy_source_start,
165 copy_source_len,
166 self.children_offset,
167 self.children_count,
168 self.descendants_with_entry,
169 self.tracked_descendants,
170 flags,
171 size,
172 mtime_s,
173 mtime_ns,
174 )
175
176
177 def pack_dirstate(map, copy_map, now):
178 """
179 Pack `map` and `copy_map` into the dirstate v2 binary format and return
180 the bytearray.
181 `now` is a timestamp of the current filesystem time used to detect race
182 conditions in writing the dirstate to disk, see inline comment.
183
184 The on-disk format expects a tree-like structure where the leaves are
185 written first (and sorted per-directory), going up levels until the root
186 node and writing that one to the docket. See more details on the on-disk
187 format in `mercurial/helptext/internals/dirstate-v2`.
188
189 Since both `map` and `copy_map` are flat dicts we need to figure out the
190 hierarchy. This algorithm does so without having to build the entire tree
191 in-memory: it only keeps the minimum number of nodes around to satisfy the
192 format.
193
194 # Algorithm explanation
195
196 This explanation does not talk about the different counters for tracked
197 descendents and storing the copies, but that work is pretty simple once this
198 algorithm is in place.
199
200 ## Building a subtree
201
202 First, sort `map`: this makes it so the leaves of the tree are contiguous
203 per directory (i.e. a/b/c and a/b/d will be next to each other in the list),
204 and enables us to use the ordering of folders to have a "cursor" of the
205 current folder we're in without ever going twice in the same branch of the
206 tree. The cursor is a node that remembers its parent and any information
207 relevant to the format (see the `Node` class), building the relevant part
208 of the tree lazily.
209 Then, for each file in `map`, move the cursor into the tree to the
210 corresponding folder of the file: for example, if the very first file
211 is "a/b/c", we start from `Node[""]`, create `Node["a"]` which points to
212 its parent `Node[""]`, then create `Node["a/b"]`, which points to its parent
213 `Node["a"]`. These nodes are kept around in a stack.
214 If the next file in `map` is in the same subtree ("a/b/d" or "a/b/e/f"), we
215 add it to the stack and keep looping with the same logic of creating the
216 tree nodes as needed. If however the next file in `map` is *not* in the same
217 subtree ("a/other", if we're still in the "a/b" folder), then we know that
218 the subtree we're in is complete.
219
220 ## Writing the subtree
221
222 We have the entire subtree in the stack, so we start writing it to disk
223 folder by folder. The way we write a folder is to pop the stack into a list
224 until the folder changes, revert this list of direct children (to satisfy
225 the format requirement that children be sorted). This process repeats until
226 we hit the "other" subtree.
227
228 An example:
229 a
230 dir1/b
231 dir1/c
232 dir2/dir3/d
233 dir2/dir3/e
234 dir2/f
235
236 Would have us:
237 - add to the stack until "dir2/dir3/e"
238 - realize that "dir2/f" is in a different subtree
239 - pop "dir2/dir3/e", "dir2/dir3/d", reverse them so they're sorted and
240 pack them since the next entry is "dir2/dir3"
241 - go back up to "dir2"
242 - add "dir2/f" to the stack
243 - realize we're done with the map
244 - pop "dir2/f", "dir2/dir3" from the stack, reverse and pack them
245 - go up to the root node, do the same to write "a", "dir1" and "dir2" in
246 that order
247
248 ## Special case for the root node
249
250 The root node is not serialized in the format, but its information is
251 written to the docket. Again, see more details on the on-disk format in
252 `mercurial/helptext/internals/dirstate-v2`.
253 """
254 data = bytearray()
255 root_nodes_start = 0
256 root_nodes_len = 0
257 nodes_with_entry_count = 0
258 nodes_with_copy_source_count = 0
259 # Will always be 0 since this implementation always re-writes everything
260 # to disk
261 unreachable_bytes = 0
262 unused = b'\x00' * 4
263 # This is an optimization that's only useful for the Rust implementation
264 ignore_patterns_hash = b'\x00' * 20
265
266 if len(map) == 0:
267 tree_metadata = TREE_METADATA.pack(
268 root_nodes_start,
269 root_nodes_len,
270 nodes_with_entry_count,
271 nodes_with_copy_source_count,
272 unreachable_bytes,
273 unused,
274 ignore_patterns_hash,
275 )
276 return data, tree_metadata
277
278 sorted_map = sorted(map.items(), key=lambda x: x[0])
279
280 # Use a stack to not have to only remember the nodes we currently need
281 # instead of building the entire tree in memory
282 stack = []
283 current_node = Node(b"", None)
284 stack.append(current_node)
285
286 for index, (path, entry) in enumerate(sorted_map, 1):
287 if entry.need_delay(now):
288 # The file was last modified "simultaneously" with the current
289 # write to dirstate (i.e. within the same second for file-
290 # systems with a granularity of 1 sec). This commonly happens
291 # for at least a couple of files on 'update'.
292 # The user could change the file without changing its size
293 # within the same second. Invalidate the file's mtime in
294 # dirstate, forcing future 'status' calls to compare the
295 # contents of the file if the size is the same. This prevents
296 # mistakenly treating such files as clean.
297 entry.set_possibly_dirty()
298 nodes_with_entry_count += 1
299 if path in copy_map:
300 nodes_with_copy_source_count += 1
301 current_folder = get_folder(path)
302 current_node = move_to_correct_node_in_tree(
303 current_folder, current_node, stack
304 )
305
306 current_node.children_count += 1
307 # Entries from `map` are never `None`
308 if entry.tracked:
309 current_node.tracked_descendants += 1
310 current_node.descendants_with_entry += 1
311 stack.append(Node(path, entry, current_node))
312
313 should_pack = True
314 next_path = None
315 if index < len(sorted_map):
316 # Determine if the next entry is in the same sub-tree, if so don't
317 # pack yet
318 next_path = sorted_map[index][0]
319 should_pack = not get_folder(next_path).startswith(current_folder)
320 if should_pack:
321 pack_directory_children(current_node, copy_map, data, stack)
322 while stack and current_node.path != b"":
323 # Go up the tree and write until we reach the folder of the next
324 # entry (if any, otherwise the root)
325 parent = current_node.parent
326 in_parent_folder_of_next_entry = next_path is not None and (
327 get_folder(next_path).startswith(get_folder(stack[-1].path))
328 )
329 if parent is None or in_parent_folder_of_next_entry:
330 break
331 pack_directory_children(parent, copy_map, data, stack)
332 current_node = parent
333
334 # Special case for the root node since we don't write it to disk, only its
335 # children to the docket
336 current_node = stack.pop()
337 assert current_node.path == b"", current_node.path
338 assert len(stack) == 0, len(stack)
339
340 tree_metadata = TREE_METADATA.pack(
341 current_node.children_offset,
342 current_node.children_count,
343 nodes_with_entry_count,
344 nodes_with_copy_source_count,
345 unreachable_bytes,
346 unused,
347 ignore_patterns_hash,
348 )
349
350 return data, tree_metadata
351
352
353 def get_folder(path):
354 """
355 Return the folder of the path that's given, an empty string for root paths.
356 """
357 return path.rsplit(b'/', 1)[0] if b'/' in path else b''
358
359
360 def move_to_correct_node_in_tree(target_folder, current_node, stack):
361 """
362 Move inside the dirstate node tree to the node corresponding to
363 `target_folder`, creating the missing nodes along the way if needed.
364 """
365 while target_folder != current_node.path:
366 if target_folder.startswith(current_node.path):
367 # We need to go down a folder
368 prefix = target_folder[len(current_node.path) :].lstrip(b'/')
369 subfolder_name = prefix.split(b'/', 1)[0]
370 if current_node.path:
371 subfolder_path = current_node.path + b'/' + subfolder_name
372 else:
373 subfolder_path = subfolder_name
374 next_node = stack[-1]
375 if next_node.path == target_folder:
376 # This folder is now a file and only contains removed entries
377 # merge with the last node
378 current_node = next_node
379 else:
380 current_node.children_count += 1
381 current_node = Node(subfolder_path, None, current_node)
382 stack.append(current_node)
383 else:
384 # We need to go up a folder
385 current_node = current_node.parent
386 return current_node
387
388
389 def pack_directory_children(node, copy_map, data, stack):
390 """
391 Write the binary representation of the direct sorted children of `node` to
392 `data`
393 """
394 direct_children = []
395
396 while stack[-1].path != b"" and get_folder(stack[-1].path) == node.path:
397 direct_children.append(stack.pop())
398 if not direct_children:
399 raise error.ProgrammingError(b"no direct children for %r" % node.path)
400
401 # Reverse the stack to get the correct sorted order
402 direct_children.reverse()
403 packed_children = bytearray()
404 # Write the paths to `data`. Pack child nodes but don't write them yet
405 for child in direct_children:
406 packed = child.pack(copy_map=copy_map, paths_offset=len(data))
407 packed_children.extend(packed)
408 data.extend(child.path)
409 data.extend(copy_map.get(child.path, b""))
410 node.tracked_descendants += child.tracked_descendants
411 node.descendants_with_entry += child.descendants_with_entry
412 # Write the fixed-size child nodes all together
413 node.children_offset = len(data)
414 data.extend(packed_children)
This diff has been collapsed as it changes many lines, (616 lines changed) Show them Hide them
@@ -0,0 +1,616 b''
1 The *dirstate* is what Mercurial uses internally to track
2 the state of files in the working directory,
3 such as set by commands like `hg add` and `hg rm`.
4 It also contains some cached data that help make `hg status` faster.
5 The name refers both to `.hg/dirstate` on the filesystem
6 and the corresponding data structure in memory while a Mercurial process
7 is running.
8
9 The original file format, retroactively dubbed `dirstate-v1`,
10 is described at https://www.mercurial-scm.org/wiki/DirState.
11 It is made of a flat sequence of unordered variable-size entries,
12 so accessing any information in it requires parsing all of it.
13 Similarly, saving changes requires rewriting the entire file.
14
15 The newer `dirsate-v2` file format is designed to fix these limitations
16 and make `hg status` faster.
17
18 User guide
19 ==========
20
21 Compatibility
22 -------------
23
24 The file format is experimental and may still change.
25 Different versions of Mercurial may not be compatible with each other
26 when working on a local repository that uses this format.
27 When using an incompatible version with the experimental format,
28 anything can happen including data corruption.
29
30 Since the dirstate is entirely local and not relevant to the wire protocol,
31 `dirstate-v2` does not affect compatibility with remote Mercurial versions.
32
33 When `share-safe` is enabled, different repositories sharing the same store
34 can use different dirstate formats.
35
36 Enabling `dirsate-v2` for new local repositories
37 ------------------------------------------------
38
39 When creating a new local repository such as with `hg init` or `hg clone`,
40 the `exp-dirstate-v2` boolean in the `format` configuration section
41 controls whether to use this file format.
42 This is disabled by default as of this writing.
43 To enable it for a single repository, run for example::
44
45 $ hg init my-project --config format.exp-dirstate-v2=1
46
47 Checking the format of an existing local repsitory
48 --------------------------------------------------
49
50 The `debugformat` commands prints information about
51 which of multiple optional formats are used in the current repository,
52 including `dirstate-v2`::
53
54 $ hg debugformat
55 format-variant repo
56 fncache: yes
57 dirstate-v2: yes
58 […]
59
60 Upgrading or downgrading an existing local repository
61 -----------------------------------------------------
62
63 The `debugupgrade` command does various upgrades or downgrades
64 on a local repository
65 based on the current Mercurial version and on configuration.
66 The same `format.exp-dirstate-v2` configuration is used again.
67
68 Example to upgrade::
69
70 $ hg debugupgrade --config format.exp-dirstate-v2=1
71
72 Example to downgrade to `dirstate-v1`::
73
74 $ hg debugupgrade --config format.exp-dirstate-v2=0
75
76 Both of this commands do nothing but print a list of proposed changes,
77 which may include changes unrelated to the dirstate.
78 Those other changes are controlled by their own configuration keys.
79 Add `--run` to a command to actually apply the proposed changes.
80
81 Backups of `.hg/requires` and `.hg/dirstate` are created
82 in a `.hg/upgradebackup.*` directory.
83 If something goes wrong, restoring those files should undo the change.
84
85 Note that upgrading affects compatibility with older versions of Mercurial
86 as noted above.
87 This can be relevant when a repository’s files are on a USB drive
88 or some other removable media, or shared over the network, etc.
89
90 Internal filesystem representation
91 ==================================
92
93 Requirements file
94 -----------------
95
96 The `.hg/requires` file indicates which of various optional file formats
97 are used by a given repository.
98 Mercurial aborts when seeing a requirement it does not know about,
99 which avoids older version accidentally messing up a respository
100 that uses a format that was introduced later.
101 For versions that do support a format, the presence or absence of
102 the corresponding requirement indicates whether to use that format.
103
104 When the file contains a `exp-dirstate-v2` line,
105 the `dirstate-v2` format is used.
106 With no such line `dirstate-v1` is used.
107
108 High level description
109 ----------------------
110
111 Whereas `dirstate-v1` uses a single `.hg/disrtate` file,
112 in `dirstate-v2` that file is a "docket" file
113 that only contains some metadata
114 and points to separate data file named `.hg/dirstate.{ID}`,
115 where `{ID}` is a random identifier.
116
117 This separation allows making data files append-only
118 and therefore safer to memory-map.
119 Creating a new data file (occasionally to clean up unused data)
120 can be done with a different ID
121 without disrupting another Mercurial process
122 that could still be using the previous data file.
123
124 Both files have a format designed to reduce the need for parsing,
125 by using fixed-size binary components as much as possible.
126 For data that is not fixed-size,
127 references to other parts of a file can be made by storing "pseudo-pointers":
128 integers counted in bytes from the start of a file.
129 For read-only access no data structure is needed,
130 only a bytes buffer (possibly memory-mapped directly from the filesystem)
131 with specific parts read on demand.
132
133 The data file contains "nodes" organized in a tree.
134 Each node represents a file or directory inside the working directory
135 or its parent changeset.
136 This tree has the same structure as the filesystem,
137 so a node representing a directory has child nodes representing
138 the files and subdirectories contained directly in that directory.
139
140 The docket file format
141 ----------------------
142
143 This is implemented in `rust/hg-core/src/dirstate_tree/on_disk.rs`
144 and `mercurial/dirstateutils/docket.py`.
145
146 Components of the docket file are found at fixed offsets,
147 counted in bytes from the start of the file:
148
149 * Offset 0:
150 The 12-bytes marker string "dirstate-v2\n" ending with a newline character.
151 This makes it easier to tell a dirstate-v2 file from a dirstate-v1 file,
152 although it is not strictly necessary
153 since `.hg/requires` determines which format to use.
154
155 * Offset 12:
156 The changeset node ID on the first parent of the working directory,
157 as up to 32 binary bytes.
158 If a node ID is shorter (20 bytes for SHA-1),
159 it is start-aligned and the rest of the bytes are set to zero.
160
161 * Offset 44:
162 The changeset node ID on the second parent of the working directory,
163 or all zeros if there isn’t one.
164 Also 32 binary bytes.
165
166 * Offset 76:
167 Tree metadata on 44 bytes, described below.
168 Its separation in this documentation from the rest of the docket
169 reflects a detail of the current implementation.
170 Since tree metadata is also made of fields at fixed offsets, those could
171 be inlined here by adding 76 bytes to each offset.
172
173 * Offset 120:
174 The used size of the data file, as a 32-bit big-endian integer.
175 The actual size of the data file may be larger
176 (if another Mercurial processis in appending to it
177 but has not updated the docket yet).
178 That extra data must be ignored.
179
180 * Offset 124:
181 The length of the data file identifier, as a 8-bit integer.
182
183 * Offset 125:
184 The data file identifier.
185
186 * Any additional data is current ignored, and dropped when updating the file.
187
188 Tree metadata in the docket file
189 --------------------------------
190
191 Tree metadata is similarly made of components at fixed offsets.
192 These offsets are counted in bytes from the start of tree metadata,
193 which is 76 bytes after the start of the docket file.
194
195 This metadata can be thought of as the singular root of the tree
196 formed by nodes in the data file.
197
198 * Offset 0:
199 Pseudo-pointer to the start of root nodes,
200 counted in bytes from the start of the data file,
201 as a 32-bit big-endian integer.
202 These nodes describe files and directories found directly
203 at the root of the working directory.
204
205 * Offset 4:
206 Number of root nodes, as a 32-bit big-endian integer.
207
208 * Offset 8:
209 Total number of nodes in the entire tree that "have a dirstate entry",
210 as a 32-bit big-endian integer.
211 Those nodes represent files that would be present at all in `dirstate-v1`.
212 This is typically less than the total number of nodes.
213 This counter is used to implement `len(dirstatemap)`.
214
215 * Offset 12:
216 Number of nodes in the entire tree that have a copy source,
217 as a 32-bit big-endian integer.
218 At the next commit, these files are recorded
219 as having been copied or moved/renamed from that source.
220 (A move is recorded as a copy and separate removal of the source.)
221 This counter is used to implement `len(dirstatemap.copymap)`.
222
223 * Offset 16:
224 An estimation of how many bytes of the data file
225 (within its used size) are unused, as a 32-bit big-endian integer.
226 When appending to an existing data file,
227 some existing nodes or paths can be unreachable from the new root
228 but they still take up space.
229 This counter is used to decide when to write a new data file from scratch
230 instead of appending to an existing one,
231 in order to get rid of that unreachable data
232 and avoid unbounded file size growth.
233
234 * Offset 20:
235 These four bytes are currently ignored
236 and reset to zero when updating a docket file.
237 This is an attempt at forward compatibility:
238 future Mercurial versions could use this as a bit field
239 to indicate that a dirstate has additional data or constraints.
240 Finding a dirstate file with the relevant bit unset indicates that
241 it was written by a then-older version
242 which is not aware of that future change.
243
244 * Offset 24:
245 Either 20 zero bytes, or a SHA-1 hash as 20 binary bytes.
246 When present, the hash is of ignore patterns
247 that were used for some previous run of the `status` algorithm.
248
249 * (Offset 44: end of tree metadata)
250
251 Optional hash of ignore patterns
252 --------------------------------
253
254 The implementation of `status` at `rust/hg-core/src/dirstate_tree/status.rs`
255 has been optimized such that its run time is dominated by calls
256 to `stat` for reading the filesystem metadata of a file or directory,
257 and to `readdir` for listing the contents of a directory.
258 In some cases the algorithm can skip calls to `readdir`
259 (saving significant time)
260 because the dirstate already contains enough of the relevant information
261 to build the correct `status` results.
262
263 The default configuration of `hg status` is to list unknown files
264 but not ignored files.
265 In this case, it matters for the `readdir`-skipping optimization
266 if a given file used to be ignored but became unknown
267 because `.hgignore` changed.
268 To detect the possibility of such a change,
269 the tree metadata contains an optional hash of all ignore patterns.
270
271 We define:
272
273 * "Root" ignore files as:
274
275 - `.hgignore` at the root of the repository if it exists
276 - And all files from `ui.ignore.*` config.
277
278 This set of files is sorted by the string representation of their path.
279
280 * The "expanded contents" of an ignore files is the byte string made
281 by the concatenation of its contents followed by the "expanded contents"
282 of other files included with `include:` or `subinclude:` directives,
283 in inclusion order. This definition is recursive, as included files can
284 themselves include more files.
285
286 This hash is defined as the SHA-1 of the concatenation (in sorted
287 order) of the "expanded contents" of each "root" ignore file.
288 (Note that computing this does not require actually concatenating
289 into a single contiguous byte sequence.
290 Instead a SHA-1 hasher object can be created
291 and fed separate chunks one by one.)
292
293 The data file format
294 --------------------
295
296 This is implemented in `rust/hg-core/src/dirstate_tree/on_disk.rs`
297 and `mercurial/dirstateutils/v2.py`.
298
299 The data file contains two types of data: paths and nodes.
300
301 Paths and nodes can be organized in any order in the file, except that sibling
302 nodes must be next to each other and sorted by their path.
303 Contiguity lets the parent refer to them all
304 by their count and a single pseudo-pointer,
305 instead of storing one pseudo-pointer per child node.
306 Sorting allows using binary seach to find a child node with a given name
307 in `O(log(n))` byte sequence comparisons.
308
309 The current implemention writes paths and child node before a given node
310 for ease of figuring out the value of pseudo-pointers by the time the are to be
311 written, but this is not an obligation and readers must not rely on it.
312
313 A path is stored as a byte string anywhere in the file, without delimiter.
314 It is refered to by one or more node by a pseudo-pointer to its start, and its
315 length in bytes. Since there is no delimiter,
316 when a path is a substring of another the same bytes could be reused,
317 although the implementation does not exploit this as of this writing.
318
319 A node is stored on 43 bytes with components at fixed offsets. Paths and
320 child nodes relevant to a node are stored externally and referenced though
321 pseudo-pointers.
322
323 All integers are stored in big-endian. All pseudo-pointers are 32-bit integers
324 counting bytes from the start of the data file. Path lengths and positions
325 are 16-bit integers, also counted in bytes.
326
327 Node components are:
328
329 * Offset 0:
330 Pseudo-pointer to the full path of this node,
331 from the working directory root.
332
333 * Offset 4:
334 Length of the full path.
335
336 * Offset 6:
337 Position of the last `/` path separator within the full path,
338 in bytes from the start of the full path,
339 or zero if there isn’t one.
340 The part of the full path after this position is the "base name".
341 Since sibling nodes have the same parent, only their base name vary
342 and needs to be considered when doing binary search to find a given path.
343
344 * Offset 8:
345 Pseudo-pointer to the "copy source" path for this node,
346 or zero if there is no copy source.
347
348 * Offset 12:
349 Length of the copy source path, or zero if there isn’t one.
350
351 * Offset 14:
352 Pseudo-pointer to the start of child nodes.
353
354 * Offset 18:
355 Number of child nodes, as a 32-bit integer.
356 They occupy 43 times this number of bytes
357 (not counting space for paths, and further descendants).
358
359 * Offset 22:
360 Number as a 32-bit integer of descendant nodes in this subtree,
361 not including this node itself,
362 that "have a dirstate entry".
363 Those nodes represent files that would be present at all in `dirstate-v1`.
364 This is typically less than the total number of descendants.
365 This counter is used to implement `has_dir`.
366
367 * Offset 26:
368 Number as a 32-bit integer of descendant nodes in this subtree,
369 not including this node itself,
370 that represent files tracked in the working directory.
371 (For example, `hg rm` makes a file untracked.)
372 This counter is used to implement `has_tracked_dir`.
373
374 * Offset 30:
375 A `flags` fields that packs some boolean values as bits of a 16-bit integer.
376 Starting from least-significant, bit masks are::
377
378 WDIR_TRACKED = 1 << 0
379 P1_TRACKED = 1 << 1
380 P2_INFO = 1 << 2
381 MODE_EXEC_PERM = 1 << 3
382 MODE_IS_SYMLINK = 1 << 4
383 HAS_FALLBACK_EXEC = 1 << 5
384 FALLBACK_EXEC = 1 << 6
385 HAS_FALLBACK_SYMLINK = 1 << 7
386 FALLBACK_SYMLINK = 1 << 8
387 EXPECTED_STATE_IS_MODIFIED = 1 << 9
388 HAS_MODE_AND_SIZE = 1 << 10
389 HAS_MTIME = 1 << 11
390 MTIME_SECOND_AMBIGUOUS = 1 << 12
391 DIRECTORY = 1 << 13
392 ALL_UNKNOWN_RECORDED = 1 << 14
393 ALL_IGNORED_RECORDED = 1 << 15
394
395 The meaning of each bit is described below.
396
397 Other bits are unset.
398 They may be assigned meaning if the future,
399 with the limitation that Mercurial versions that pre-date such meaning
400 will always reset those bits to unset when writing nodes.
401 (A new node is written for any mutation in its subtree,
402 leaving the bytes of the old node unreachable
403 until the data file is rewritten entirely.)
404
405 * Offset 32:
406 A `size` field described below, as a 32-bit integer.
407 Unlike in dirstate-v1, negative values are not used.
408
409 * Offset 36:
410 The seconds component of an `mtime` field described below,
411 as a 32-bit integer.
412 Unlike in dirstate-v1, negative values are not used.
413 When `mtime` is used, this is number of seconds since the Unix epoch
414 truncated to its lower 31 bits.
415
416 * Offset 40:
417 The nanoseconds component of an `mtime` field described below,
418 as a 32-bit integer.
419 When `mtime` is used,
420 this is the number of nanoseconds since `mtime.seconds`,
421 always stritctly less than one billion.
422
423 This may be zero if more precision is not available.
424 (This can happen because of limitations in any of Mercurial, Python,
425 libc, the operating system, …)
426
427 When comparing two mtimes and either has this component set to zero,
428 the sub-second precision of both should be ignored.
429 False positives when checking mtime equality due to clock resolution
430 are always possible and the status algorithm needs to deal with them,
431 but having too many false negatives could be harmful too.
432
433 * (Offset 44: end of this node)
434
435 The meaning of the boolean values packed in `flags` is:
436
437 `WDIR_TRACKED`
438 Set if the working directory contains a tracked file at this node’s path.
439 This is typically set and unset by `hg add` and `hg rm`.
440
441 `P1_TRACKED`
442 Set if the working directory’s first parent changeset
443 (whose node identifier is found in tree metadata)
444 contains a tracked file at this node’s path.
445 This is a cache to reduce manifest lookups.
446
447 `P2_INFO`
448 Set if the file has been involved in some merge operation.
449 Either because it was actually merged,
450 or because the version in the second parent p2 version was ahead,
451 or because some rename moved it there.
452 In either case `hg status` will want it displayed as modified.
453
454 Files that would be mentioned at all in the `dirstate-v1` file format
455 have a node with at least one of the above three bits set in `dirstate-v2`.
456 Let’s call these files "tracked anywhere",
457 and "untracked" the nodes with all three of these bits unset.
458 Untracked nodes are typically for directories:
459 they hold child nodes and form the tree structure.
460 Additional untracked nodes may also exist.
461 Although implementations should strive to clean up nodes
462 that are entirely unused, other untracked nodes may also exist.
463 For example, a future version of Mercurial might in some cases
464 add nodes for untracked files or/and ignored files in the working directory
465 in order to optimize `hg status`
466 by enabling it to skip `readdir` in more cases.
467
468 `HAS_MODE_AND_SIZE`
469 Must be unset for untracked nodes.
470 For files tracked anywhere, if this is set:
471 - The `size` field is the expected file size,
472 in bytes truncated its lower to 31 bits.
473 - The expected execute permission for the file’s owner
474 is given by `MODE_EXEC_PERM`
475 - The expected file type is given by `MODE_IS_SIMLINK`:
476 a symbolic link if set, or a normal file if unset.
477 If this is unset the expected size, permission, and file type are unknown.
478 The `size` field is unused (set to zero).
479
480 `HAS_MTIME`
481 The nodes contains a "valid" last modification time in the `mtime` field.
482
483
484 It means the `mtime` was already strictly in the past when observed,
485 meaning that later changes cannot happen in the same clock tick
486 and must cause a different modification time
487 (unless the system clock jumps back and we get unlucky,
488 which is not impossible but deemed unlikely enough).
489
490 This means that if `std::fs::symlink_metadata` later reports
491 the same modification time
492 and ignored patterns haven’t changed,
493 we can assume the node to be unchanged on disk.
494
495 The `mtime` field can then be used to skip more expensive lookup when
496 checking the status of "tracked" nodes.
497
498 It can also be set for node where `DIRECTORY` is set.
499 See `DIRECTORY` documentation for details.
500
501 `DIRECTORY`
502 When set, this entry will match a directory that exists or existed on the
503 file system.
504
505 * When `HAS_MTIME` is set a directory has been seen on the file system and
506 `mtime` matches its last modificiation time. However, `HAS_MTIME` not being set
507 does not indicate the lack of directory on the file system.
508
509 * When not tracked anywhere, this node does not represent an ignored or
510 unknown file on disk.
511
512 If `HAS_MTIME` is set
513 and `mtime` matches the last modification time of the directory on disk,
514 the directory is unchanged
515 and we can skip calling `std::fs::read_dir` again for this directory,
516 and iterate child dirstate nodes instead.
517 (as long as `ALL_UNKNOWN_RECORDED` and `ALL_IGNORED_RECORDED` are taken
518 into account)
519
520 `MODE_EXEC_PERM`
521 Must be unset if `HAS_MODE_AND_SIZE` is unset.
522 If `HAS_MODE_AND_SIZE` is set,
523 this indicates whether the file’s own is expected
524 to have execute permission.
525
526 Beware that on system without fs support for this information, the value
527 stored in the dirstate might be wrong and should not be relied on.
528
529 `MODE_IS_SYMLINK`
530 Must be unset if `HAS_MODE_AND_SIZE` is unset.
531 If `HAS_MODE_AND_SIZE` is set,
532 this indicates whether the file is expected to be a symlink
533 as opposed to a normal file.
534
535 Beware that on system without fs support for this information, the value
536 stored in the dirstate might be wrong and should not be relied on.
537
538 `EXPECTED_STATE_IS_MODIFIED`
539 Must be unset for untracked nodes.
540 For:
541 - a file tracked anywhere
542 - that has expected metadata (`HAS_MODE_AND_SIZE` and `HAS_MTIME`)
543 - if that metadata matches
544 metadata found in the working directory with `stat`
545 This bit indicates the status of the file.
546 If set, the status is modified. If unset, it is clean.
547
548 In cases where `hg status` needs to read the contents of a file
549 because metadata is ambiguous, this bit lets it record the result
550 if the result is modified so that a future run of `hg status`
551 does not need to do the same again.
552 It is valid to never set this bit,
553 and consider expected metadata ambiguous if it is set.
554
555 `ALL_UNKNOWN_RECORDED`
556 If set, all "unknown" children existing on disk (at the time of the last
557 status) have been recorded and the `mtime` associated with
558 `DIRECTORY` can be used for optimization even when "unknown" file
559 are listed.
560
561 Note that the amount recorded "unknown" children can still be zero if None
562 where present.
563
564 Also note that having this flag unset does not imply that no "unknown"
565 children have been recorded. Some might be present, but there is no garantee
566 that is will be all of them.
567
568 `ALL_IGNORED_RECORDED`
569 If set, all "ignored" children existing on disk (at the time of the last
570 status) have been recorded and the `mtime` associated with
571 `DIRECTORY` can be used for optimization even when "ignored" file
572 are listed.
573
574 Note that the amount recorded "ignored" children can still be zero if None
575 where present.
576
577 Also note that having this flag unset does not imply that no "ignored"
578 children have been recorded. Some might be present, but there is no garantee
579 that is will be all of them.
580
581 `HAS_FALLBACK_EXEC`
582 If this flag is set, the entry carries "fallback" information for the
583 executable bit in the `FALLBACK_EXEC` flag.
584
585 Fallback information can be stored in the dirstate to keep track of
586 filesystem attribute tracked by Mercurial when the underlying file
587 system or operating system does not support that property, (e.g.
588 Windows).
589
590 `FALLBACK_EXEC`
591 Should be ignored if `HAS_FALLBACK_EXEC` is unset. If set the file for this
592 entry should be considered executable if that information cannot be
593 extracted from the file system. If unset it should be considered
594 non-executable instead.
595
596 `HAS_FALLBACK_SYMLINK`
597 If this flag is set, the entry carries "fallback" information for symbolic
598 link status in the `FALLBACK_SYMLINK` flag.
599
600 Fallback information can be stored in the dirstate to keep track of
601 filesystem attribute tracked by Mercurial when the underlying file
602 system or operating system does not support that property, (e.g.
603 Windows).
604
605 `FALLBACK_SYMLINK`
606 Should be ignored if `HAS_FALLBACK_SYMLINK` is unset. If set the file for
607 this entry should be considered a symlink if that information cannot be
608 extracted from the file system. If unset it should be considered a normal
609 file instead.
610
611 `MTIME_SECOND_AMBIGUOUS`
612 This flag is relevant only when `HAS_FILE_MTIME` is set. When set, the
613 `mtime` stored in the entry is only valid for comparison with timestamps
614 that have nanosecond information. If available timestamp does not carries
615 nanosecond information, the `mtime` should be ignored and no optimisation
616 can be applied.
@@ -0,0 +1,72 b''
1 == New Features ==
2 * `debugrebuildfncache` now has an option to rebuild only the index files
3 * a new `bookmarks.mode` path option have been introduced to control the
4 bookmark update strategy during exchange with a peer. See `hg help paths` for
5 details.
6 * a new `bookmarks.mirror` option has been introduced. See `hg help bookmarks`
7 for details.
8 * more commands support detailed exit codes when config `ui.detailed-exit-codes` is enabled
9
10 == Default Format Change ==
11
12 == New Experimental Features ==
13
14 * '''Major feature''': version 2 of the dirstate is available (the first version is as old as Mercurial itself). It allows for much faster working copy inspection (status, diff, commit, update, etc.) and richer information (symlink and exec info on Windows, etc.). The format has been frozen with room for some future evolution and the current implementations (Python, Python + C, Python + Rust or pure Rust) should be compatible with any future change or optimization that the format allows. You can get more information [[https://www.mercurial-scm.org/repo/hg/file/tip/mercurial/helptext/internals/dirstate-v2.txt | in the internal documentation]]
15 * Added a new `web.full-garbage-collection-rate` to control performance. See
16 de2e04fe4897a554b9ef433167f11ea4feb2e09c for more information
17 * Added a new `histedit.later-commits-first` option to affect the ordering of commits in `chistedit` to match the order in `hg log -G`. It will affect the text-based version before graduating from experimental.
18
19 == Bug Fixes ==
20
21 * `hg fix --working-dir` now correctly works when in an uncommitted merge state
22 * Unintentional duplicated calls to `hg fix`'s internals were removed, making it potentially much faster
23 * `rhg cat` can be called without a revision
24 * `rhg cat` can be called with the `.` revision
25 * `rhg cat` is more robust than before with regards to edge cases. Some still remain like a tag or bookmark that is ambiguous with a nodeid prefix, only nodeids (prefixed or not) are supported as of now.
26 * `rhg cat` is even faster
27 * `rhg` (Rust fast-path for `hg`) now supports the full config list syntax
28 * `rhg` now parses some corner-cases for revsets correctly
29 * Fixed an `fsmonitor` on Python 3 during exception handling
30 * Lots of Windows fixes
31 * Lots of miscellaneous other fixes
32 * Removed a CPython-specific compatibility hack to improve support for alternative Python implementations
33
34 == Backwards Compatibility Changes ==
35
36
37 == Internal API Changes ==
38
39 The following functions have been removed:
40
41 * `dirstate.normal`
42 * `dirstate.normallookup`
43 * `dirstate.otherparent`
44 * `dirstate.add`
45 * `dirstate.addfile`
46 * `dirstate.remove`
47 * `dirstate.drop`
48 * `dirstate.dropfile`
49 * `dirstate.__getitem__`
50 * `dirstatemap.nonnormalentries`
51 * `dirstatemap.nonnormalset`
52 * `dirstatemap.otherparentset`
53 * `dirstatemap.non_normal_or_other_parent_paths`
54 * `dirstateitem.dm_nonnormal`
55 * `dirstateitem.dm_otherparent`
56 * `dirstateitem.merged_removed`
57 * `dirstateitem.from_p2`
58 * `dirstateitem.merged`
59 * `dirstateitem.new_merged`
60 * `dirstateitem.new_added`
61 * `dirstateitem.new_from_p2`
62 * `dirstateitem.new_possibly_dirty`
63 * `dirstateitem.new_normal`
64 * `dirstateitem.from_p2_removed`
65
66 Miscellaneous:
67
68 * `wireprotov1peer`'s `batchable` is now a simple function and not a generator
69 anymore
70 * The Rust extensions (and by extension the experimental `rhg status`) only use a tree-based dirstate in-memory, even when using dirstate-v1. See bf8837e3d7cec40fe649c47163a3154dda03fa16 for more details
71 * The Rust minimum supported version is now 1.48.0 in accordance with out policy of keeping up with Debian stable
72 * The test harness plays nicer with the NixOS sandbox No newline at end of file
This diff has been collapsed as it changes many lines, (643 lines changed) Show them Hide them
@@ -0,0 +1,643 b''
1 use crate::dirstate_tree::on_disk::DirstateV2ParseError;
2 use crate::errors::HgError;
3 use bitflags::bitflags;
4 use std::convert::{TryFrom, TryInto};
5 use std::fs;
6 use std::io;
7 use std::time::{SystemTime, UNIX_EPOCH};
8
9 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
10 pub enum EntryState {
11 Normal,
12 Added,
13 Removed,
14 Merged,
15 }
16
17 /// `size` and `mtime.seconds` are truncated to 31 bits.
18 ///
19 /// TODO: double-check status algorithm correctness for files
20 /// larger than 2 GiB or modified after 2038.
21 #[derive(Debug, Copy, Clone)]
22 pub struct DirstateEntry {
23 pub(crate) flags: Flags,
24 mode_size: Option<(u32, u32)>,
25 mtime: Option<TruncatedTimestamp>,
26 }
27
28 bitflags! {
29 pub(crate) struct Flags: u8 {
30 const WDIR_TRACKED = 1 << 0;
31 const P1_TRACKED = 1 << 1;
32 const P2_INFO = 1 << 2;
33 const HAS_FALLBACK_EXEC = 1 << 3;
34 const FALLBACK_EXEC = 1 << 4;
35 const HAS_FALLBACK_SYMLINK = 1 << 5;
36 const FALLBACK_SYMLINK = 1 << 6;
37 }
38 }
39
40 /// A Unix timestamp with nanoseconds precision
41 #[derive(Debug, Copy, Clone)]
42 pub struct TruncatedTimestamp {
43 truncated_seconds: u32,
44 /// Always in the `0 .. 1_000_000_000` range.
45 nanoseconds: u32,
46 }
47
48 impl TruncatedTimestamp {
49 /// Constructs from a timestamp potentially outside of the supported range,
50 /// and truncate the seconds components to its lower 31 bits.
51 ///
52 /// Panics if the nanoseconds components is not in the expected range.
53 pub fn new_truncate(seconds: i64, nanoseconds: u32) -> Self {
54 assert!(nanoseconds < NSEC_PER_SEC);
55 Self {
56 truncated_seconds: seconds as u32 & RANGE_MASK_31BIT,
57 nanoseconds,
58 }
59 }
60
61 /// Construct from components. Returns an error if they are not in the
62 /// expcted range.
63 pub fn from_already_truncated(
64 truncated_seconds: u32,
65 nanoseconds: u32,
66 ) -> Result<Self, DirstateV2ParseError> {
67 if truncated_seconds & !RANGE_MASK_31BIT == 0
68 && nanoseconds < NSEC_PER_SEC
69 {
70 Ok(Self {
71 truncated_seconds,
72 nanoseconds,
73 })
74 } else {
75 Err(DirstateV2ParseError)
76 }
77 }
78
79 pub fn for_mtime_of(metadata: &fs::Metadata) -> io::Result<Self> {
80 #[cfg(unix)]
81 {
82 use std::os::unix::fs::MetadataExt;
83 let seconds = metadata.mtime();
84 // i64Β -> u32 with value always in the `0 .. NSEC_PER_SEC` range
85 let nanoseconds = metadata.mtime_nsec().try_into().unwrap();
86 Ok(Self::new_truncate(seconds, nanoseconds))
87 }
88 #[cfg(not(unix))]
89 {
90 metadata.modified().map(Self::from)
91 }
92 }
93
94 /// The lower 31 bits of the number of seconds since the epoch.
95 pub fn truncated_seconds(&self) -> u32 {
96 self.truncated_seconds
97 }
98
99 /// The sub-second component of this timestamp, in nanoseconds.
100 /// Always in the `0 .. 1_000_000_000` range.
101 ///
102 /// This timestamp is after `(seconds, 0)` by this many nanoseconds.
103 pub fn nanoseconds(&self) -> u32 {
104 self.nanoseconds
105 }
106
107 /// Returns whether two timestamps are equal modulo 2**31 seconds.
108 ///
109 /// If this returns `true`, the original values converted from `SystemTime`
110 /// or given to `new_truncate` were very likely equal. A false positive is
111 /// possible if they were exactly a multiple of 2**31 seconds apart (around
112 /// 68 years). This is deemed very unlikely to happen by chance, especially
113 /// on filesystems that support sub-second precision.
114 ///
115 /// If someone is manipulating the modification times of some files to
116 /// intentionally make `hg status` return incorrect results, not truncating
117 /// wouldn’t help much since they can set exactly the expected timestamp.
118 ///
119 /// Sub-second precision is ignored if it is zero in either value.
120 /// Some APIs simply return zero when more precision is not available.
121 /// When comparing values from different sources, if only one is truncated
122 /// in that way, doing a simple comparison would cause many false
123 /// negatives.
124 pub fn likely_equal(self, other: Self) -> bool {
125 self.truncated_seconds == other.truncated_seconds
126 && (self.nanoseconds == other.nanoseconds
127 || self.nanoseconds == 0
128 || other.nanoseconds == 0)
129 }
130
131 pub fn likely_equal_to_mtime_of(
132 self,
133 metadata: &fs::Metadata,
134 ) -> io::Result<bool> {
135 Ok(self.likely_equal(Self::for_mtime_of(metadata)?))
136 }
137 }
138
139 impl From<SystemTime> for TruncatedTimestamp {
140 fn from(system_time: SystemTime) -> Self {
141 // On Unix, `SystemTime` is a wrapper for the `timespec` C struct:
142 // https://www.gnu.org/software/libc/manual/html_node/Time-Types.html#index-struct-timespec
143 // We want to effectively access its fields, but the Rust standard
144 // library does not expose them. The best we can do is:
145 let seconds;
146 let nanoseconds;
147 match system_time.duration_since(UNIX_EPOCH) {
148 Ok(duration) => {
149 seconds = duration.as_secs() as i64;
150 nanoseconds = duration.subsec_nanos();
151 }
152 Err(error) => {
153 // `system_time` is before `UNIX_EPOCH`.
154 // We need to undo this algorithm:
155 // https://github.com/rust-lang/rust/blob/6bed1f0bc3cc50c10aab26d5f94b16a00776b8a5/library/std/src/sys/unix/time.rs#L40-L41
156 let negative = error.duration();
157 let negative_secs = negative.as_secs() as i64;
158 let negative_nanos = negative.subsec_nanos();
159 if negative_nanos == 0 {
160 seconds = -negative_secs;
161 nanoseconds = 0;
162 } else {
163 // For example if `system_time` was 4.3Β seconds before
164 // the Unix epoch we get a Duration that represents
165 // `(-4, -0.3)` but we want `(-5, +0.7)`:
166 seconds = -1 - negative_secs;
167 nanoseconds = NSEC_PER_SEC - negative_nanos;
168 }
169 }
170 };
171 Self::new_truncate(seconds, nanoseconds)
172 }
173 }
174
175 const NSEC_PER_SEC: u32 = 1_000_000_000;
176 const RANGE_MASK_31BIT: u32 = 0x7FFF_FFFF;
177
178 pub const MTIME_UNSET: i32 = -1;
179
180 /// A `DirstateEntry` with a size of `-2` means that it was merged from the
181 /// other parent. This allows revert to pick the right status back during a
182 /// merge.
183 pub const SIZE_FROM_OTHER_PARENT: i32 = -2;
184 /// A special value used for internal representation of special case in
185 /// dirstate v1 format.
186 pub const SIZE_NON_NORMAL: i32 = -1;
187
188 impl DirstateEntry {
189 pub fn from_v2_data(
190 wdir_tracked: bool,
191 p1_tracked: bool,
192 p2_info: bool,
193 mode_size: Option<(u32, u32)>,
194 mtime: Option<TruncatedTimestamp>,
195 fallback_exec: Option<bool>,
196 fallback_symlink: Option<bool>,
197 ) -> Self {
198 if let Some((mode, size)) = mode_size {
199 // TODO: return an error for out of range values?
200 assert!(mode & !RANGE_MASK_31BIT == 0);
201 assert!(size & !RANGE_MASK_31BIT == 0);
202 }
203 let mut flags = Flags::empty();
204 flags.set(Flags::WDIR_TRACKED, wdir_tracked);
205 flags.set(Flags::P1_TRACKED, p1_tracked);
206 flags.set(Flags::P2_INFO, p2_info);
207 if let Some(exec) = fallback_exec {
208 flags.insert(Flags::HAS_FALLBACK_EXEC);
209 if exec {
210 flags.insert(Flags::FALLBACK_EXEC);
211 }
212 }
213 if let Some(exec) = fallback_symlink {
214 flags.insert(Flags::HAS_FALLBACK_SYMLINK);
215 if exec {
216 flags.insert(Flags::FALLBACK_SYMLINK);
217 }
218 }
219 Self {
220 flags,
221 mode_size,
222 mtime,
223 }
224 }
225
226 pub fn from_v1_data(
227 state: EntryState,
228 mode: i32,
229 size: i32,
230 mtime: i32,
231 ) -> Self {
232 match state {
233 EntryState::Normal => {
234 if size == SIZE_FROM_OTHER_PARENT {
235 Self {
236 // might be missing P1_TRACKED
237 flags: Flags::WDIR_TRACKED | Flags::P2_INFO,
238 mode_size: None,
239 mtime: None,
240 }
241 } else if size == SIZE_NON_NORMAL {
242 Self {
243 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
244 mode_size: None,
245 mtime: None,
246 }
247 } else if mtime == MTIME_UNSET {
248 // TODO:Β return an error for negative values?
249 let mode = u32::try_from(mode).unwrap();
250 let size = u32::try_from(size).unwrap();
251 Self {
252 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
253 mode_size: Some((mode, size)),
254 mtime: None,
255 }
256 } else {
257 // TODO:Β return an error for negative values?
258 let mode = u32::try_from(mode).unwrap();
259 let size = u32::try_from(size).unwrap();
260 let mtime = u32::try_from(mtime).unwrap();
261 let mtime =
262 TruncatedTimestamp::from_already_truncated(mtime, 0)
263 .unwrap();
264 Self {
265 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
266 mode_size: Some((mode, size)),
267 mtime: Some(mtime),
268 }
269 }
270 }
271 EntryState::Added => Self {
272 flags: Flags::WDIR_TRACKED,
273 mode_size: None,
274 mtime: None,
275 },
276 EntryState::Removed => Self {
277 flags: if size == SIZE_NON_NORMAL {
278 Flags::P1_TRACKED | Flags::P2_INFO
279 } else if size == SIZE_FROM_OTHER_PARENT {
280 // We don’t know if P1_TRACKED should be set (file history)
281 Flags::P2_INFO
282 } else {
283 Flags::P1_TRACKED
284 },
285 mode_size: None,
286 mtime: None,
287 },
288 EntryState::Merged => Self {
289 flags: Flags::WDIR_TRACKED
290 | Flags::P1_TRACKED // might not be true because of rename ?
291 | Flags::P2_INFO, // might not be true because of rename ?
292 mode_size: None,
293 mtime: None,
294 },
295 }
296 }
297
298 /// Creates a new entry in "removed" state.
299 ///
300 /// `size` is expected to be zero, `SIZE_NON_NORMAL`, or
301 /// `SIZE_FROM_OTHER_PARENT`
302 pub fn new_removed(size: i32) -> Self {
303 Self::from_v1_data(EntryState::Removed, 0, size, 0)
304 }
305
306 pub fn tracked(&self) -> bool {
307 self.flags.contains(Flags::WDIR_TRACKED)
308 }
309
310 pub fn p1_tracked(&self) -> bool {
311 self.flags.contains(Flags::P1_TRACKED)
312 }
313
314 fn in_either_parent(&self) -> bool {
315 self.flags.intersects(Flags::P1_TRACKED | Flags::P2_INFO)
316 }
317
318 pub fn removed(&self) -> bool {
319 self.in_either_parent() && !self.flags.contains(Flags::WDIR_TRACKED)
320 }
321
322 pub fn p2_info(&self) -> bool {
323 self.flags.contains(Flags::WDIR_TRACKED | Flags::P2_INFO)
324 }
325
326 pub fn added(&self) -> bool {
327 self.flags.contains(Flags::WDIR_TRACKED) && !self.in_either_parent()
328 }
329
330 pub fn maybe_clean(&self) -> bool {
331 if !self.flags.contains(Flags::WDIR_TRACKED) {
332 false
333 } else if !self.flags.contains(Flags::P1_TRACKED) {
334 false
335 } else if self.flags.contains(Flags::P2_INFO) {
336 false
337 } else {
338 true
339 }
340 }
341
342 pub fn any_tracked(&self) -> bool {
343 self.flags.intersects(
344 Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO,
345 )
346 }
347
348 /// Returns `(wdir_tracked, p1_tracked, p2_info, mode_size, mtime)`
349 pub(crate) fn v2_data(
350 &self,
351 ) -> (
352 bool,
353 bool,
354 bool,
355 Option<(u32, u32)>,
356 Option<TruncatedTimestamp>,
357 Option<bool>,
358 Option<bool>,
359 ) {
360 if !self.any_tracked() {
361 // TODO: return an Option instead?
362 panic!("Accessing v1_state of an untracked DirstateEntry")
363 }
364 let wdir_tracked = self.flags.contains(Flags::WDIR_TRACKED);
365 let p1_tracked = self.flags.contains(Flags::P1_TRACKED);
366 let p2_info = self.flags.contains(Flags::P2_INFO);
367 let mode_size = self.mode_size;
368 let mtime = self.mtime;
369 (
370 wdir_tracked,
371 p1_tracked,
372 p2_info,
373 mode_size,
374 mtime,
375 self.get_fallback_exec(),
376 self.get_fallback_symlink(),
377 )
378 }
379
380 fn v1_state(&self) -> EntryState {
381 if !self.any_tracked() {
382 // TODO: return an Option instead?
383 panic!("Accessing v1_state of an untracked DirstateEntry")
384 }
385 if self.removed() {
386 EntryState::Removed
387 } else if self
388 .flags
389 .contains(Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO)
390 {
391 EntryState::Merged
392 } else if self.added() {
393 EntryState::Added
394 } else {
395 EntryState::Normal
396 }
397 }
398
399 fn v1_mode(&self) -> i32 {
400 if let Some((mode, _size)) = self.mode_size {
401 i32::try_from(mode).unwrap()
402 } else {
403 0
404 }
405 }
406
407 fn v1_size(&self) -> i32 {
408 if !self.any_tracked() {
409 // TODO: return an Option instead?
410 panic!("Accessing v1_size of an untracked DirstateEntry")
411 }
412 if self.removed()
413 && self.flags.contains(Flags::P1_TRACKED | Flags::P2_INFO)
414 {
415 SIZE_NON_NORMAL
416 } else if self.flags.contains(Flags::P2_INFO) {
417 SIZE_FROM_OTHER_PARENT
418 } else if self.removed() {
419 0
420 } else if self.added() {
421 SIZE_NON_NORMAL
422 } else if let Some((_mode, size)) = self.mode_size {
423 i32::try_from(size).unwrap()
424 } else {
425 SIZE_NON_NORMAL
426 }
427 }
428
429 fn v1_mtime(&self) -> i32 {
430 if !self.any_tracked() {
431 // TODO: return an Option instead?
432 panic!("Accessing v1_mtime of an untracked DirstateEntry")
433 }
434 if self.removed() {
435 0
436 } else if self.flags.contains(Flags::P2_INFO) {
437 MTIME_UNSET
438 } else if !self.flags.contains(Flags::P1_TRACKED) {
439 MTIME_UNSET
440 } else if let Some(mtime) = self.mtime {
441 i32::try_from(mtime.truncated_seconds()).unwrap()
442 } else {
443 MTIME_UNSET
444 }
445 }
446
447 // TODO: return `Option<EntryState>`? None when `!self.any_tracked`
448 pub fn state(&self) -> EntryState {
449 self.v1_state()
450 }
451
452 // TODO: return Option?
453 pub fn mode(&self) -> i32 {
454 self.v1_mode()
455 }
456
457 // TODO: return Option?
458 pub fn size(&self) -> i32 {
459 self.v1_size()
460 }
461
462 // TODO: return Option?
463 pub fn mtime(&self) -> i32 {
464 self.v1_mtime()
465 }
466
467 pub fn get_fallback_exec(&self) -> Option<bool> {
468 if self.flags.contains(Flags::HAS_FALLBACK_EXEC) {
469 Some(self.flags.contains(Flags::FALLBACK_EXEC))
470 } else {
471 None
472 }
473 }
474
475 pub fn set_fallback_exec(&mut self, value: Option<bool>) {
476 match value {
477 None => {
478 self.flags.remove(Flags::HAS_FALLBACK_EXEC);
479 self.flags.remove(Flags::FALLBACK_EXEC);
480 }
481 Some(exec) => {
482 self.flags.insert(Flags::HAS_FALLBACK_EXEC);
483 if exec {
484 self.flags.insert(Flags::FALLBACK_EXEC);
485 }
486 }
487 }
488 }
489
490 pub fn get_fallback_symlink(&self) -> Option<bool> {
491 if self.flags.contains(Flags::HAS_FALLBACK_SYMLINK) {
492 Some(self.flags.contains(Flags::FALLBACK_SYMLINK))
493 } else {
494 None
495 }
496 }
497
498 pub fn set_fallback_symlink(&mut self, value: Option<bool>) {
499 match value {
500 None => {
501 self.flags.remove(Flags::HAS_FALLBACK_SYMLINK);
502 self.flags.remove(Flags::FALLBACK_SYMLINK);
503 }
504 Some(symlink) => {
505 self.flags.insert(Flags::HAS_FALLBACK_SYMLINK);
506 if symlink {
507 self.flags.insert(Flags::FALLBACK_SYMLINK);
508 }
509 }
510 }
511 }
512
513 pub fn truncated_mtime(&self) -> Option<TruncatedTimestamp> {
514 self.mtime
515 }
516
517 pub fn drop_merge_data(&mut self) {
518 if self.flags.contains(Flags::P2_INFO) {
519 self.flags.remove(Flags::P2_INFO);
520 self.mode_size = None;
521 self.mtime = None;
522 }
523 }
524
525 pub fn set_possibly_dirty(&mut self) {
526 self.mtime = None
527 }
528
529 pub fn set_clean(
530 &mut self,
531 mode: u32,
532 size: u32,
533 mtime: TruncatedTimestamp,
534 ) {
535 let size = size & RANGE_MASK_31BIT;
536 self.flags.insert(Flags::WDIR_TRACKED | Flags::P1_TRACKED);
537 self.mode_size = Some((mode, size));
538 self.mtime = Some(mtime);
539 }
540
541 pub fn set_tracked(&mut self) {
542 self.flags.insert(Flags::WDIR_TRACKED);
543 // `set_tracked` is replacing various `normallookup` call. So we mark
544 // the files as needing lookup
545 //
546 // Consider dropping this in the future in favor of something less
547 // broad.
548 self.mtime = None;
549 }
550
551 pub fn set_untracked(&mut self) {
552 self.flags.remove(Flags::WDIR_TRACKED);
553 self.mode_size = None;
554 self.mtime = None;
555 }
556
557 /// Returns `(state, mode, size, mtime)` for the puprose of serialization
558 /// in the dirstate-v1 format.
559 ///
560 /// This includes marker values such as `mtime == -1`. In the future we may
561 /// want to not represent these cases that way in memory, but serialization
562 /// will need to keep the same format.
563 pub fn v1_data(&self) -> (u8, i32, i32, i32) {
564 (
565 self.v1_state().into(),
566 self.v1_mode(),
567 self.v1_size(),
568 self.v1_mtime(),
569 )
570 }
571
572 pub(crate) fn is_from_other_parent(&self) -> bool {
573 self.state() == EntryState::Normal
574 && self.size() == SIZE_FROM_OTHER_PARENT
575 }
576
577 // TODO: other platforms
578 #[cfg(unix)]
579 pub fn mode_changed(
580 &self,
581 filesystem_metadata: &std::fs::Metadata,
582 ) -> bool {
583 use std::os::unix::fs::MetadataExt;
584 const EXEC_BIT_MASK: u32 = 0o100;
585 let dirstate_exec_bit = (self.mode() as u32) & EXEC_BIT_MASK;
586 let fs_exec_bit = filesystem_metadata.mode() & EXEC_BIT_MASK;
587 dirstate_exec_bit != fs_exec_bit
588 }
589
590 /// Returns a `(state, mode, size, mtime)` tuple as for
591 /// `DirstateMapMethods::debug_iter`.
592 pub fn debug_tuple(&self) -> (u8, i32, i32, i32) {
593 (self.state().into(), self.mode(), self.size(), self.mtime())
594 }
595
596 /// True if the stored mtime would be ambiguous with the current time
597 pub fn need_delay(&self, now: TruncatedTimestamp) -> bool {
598 if let Some(mtime) = self.mtime {
599 self.state() == EntryState::Normal
600 && mtime.truncated_seconds() == now.truncated_seconds()
601 } else {
602 false
603 }
604 }
605 }
606
607 impl EntryState {
608 pub fn is_tracked(self) -> bool {
609 use EntryState::*;
610 match self {
611 Normal | Added | Merged => true,
612 Removed => false,
613 }
614 }
615 }
616
617 impl TryFrom<u8> for EntryState {
618 type Error = HgError;
619
620 fn try_from(value: u8) -> Result<Self, Self::Error> {
621 match value {
622 b'n' => Ok(EntryState::Normal),
623 b'a' => Ok(EntryState::Added),
624 b'r' => Ok(EntryState::Removed),
625 b'm' => Ok(EntryState::Merged),
626 _ => Err(HgError::CorruptedRepository(format!(
627 "Incorrect dirstate entry state {}",
628 value
629 ))),
630 }
631 }
632 }
633
634 impl Into<u8> for EntryState {
635 fn into(self) -> u8 {
636 match self {
637 EntryState::Normal => b'n',
638 EntryState::Added => b'a',
639 EntryState::Removed => b'r',
640 EntryState::Merged => b'm',
641 }
642 }
643 }
@@ -0,0 +1,88 b''
1 use crate::errors::HgError;
2 use crate::repo::Repo;
3 use crate::revlog::path_encode::path_encode;
4 use crate::revlog::revlog::{Revlog, RevlogError};
5 use crate::revlog::NodePrefix;
6 use crate::revlog::Revision;
7 use crate::utils::files::get_path_from_bytes;
8 use crate::utils::hg_path::HgPath;
9 use crate::utils::SliceExt;
10 use std::path::PathBuf;
11
12 /// A specialized `Revlog` to work with file data logs.
13 pub struct Filelog {
14 /// The generic `revlog` format.
15 revlog: Revlog,
16 }
17
18 impl Filelog {
19 pub fn open(repo: &Repo, file_path: &HgPath) -> Result<Self, HgError> {
20 let index_path = store_path(file_path, b".i");
21 let data_path = store_path(file_path, b".d");
22 let revlog = Revlog::open(repo, index_path, Some(&data_path))?;
23 Ok(Self { revlog })
24 }
25
26 /// The given node ID is that of the file as found in a manifest, not of a
27 /// changeset.
28 pub fn data_for_node(
29 &self,
30 file_node: impl Into<NodePrefix>,
31 ) -> Result<FilelogEntry, RevlogError> {
32 let file_rev = self.revlog.rev_from_node(file_node.into())?;
33 self.data_for_rev(file_rev)
34 }
35
36 /// The given revision is that of the file as found in a manifest, not of a
37 /// changeset.
38 pub fn data_for_rev(
39 &self,
40 file_rev: Revision,
41 ) -> Result<FilelogEntry, RevlogError> {
42 let data: Vec<u8> = self.revlog.get_rev_data(file_rev)?;
43 Ok(FilelogEntry(data.into()))
44 }
45 }
46
47 fn store_path(hg_path: &HgPath, suffix: &[u8]) -> PathBuf {
48 let encoded_bytes =
49 path_encode(&[b"data/", hg_path.as_bytes(), suffix].concat());
50 get_path_from_bytes(&encoded_bytes).into()
51 }
52
53 pub struct FilelogEntry(Vec<u8>);
54
55 impl FilelogEntry {
56 /// Split into metadata and data
57 pub fn split(&self) -> Result<(Option<&[u8]>, &[u8]), HgError> {
58 const DELIMITER: &[u8; 2] = &[b'\x01', b'\n'];
59
60 if let Some(rest) = self.0.drop_prefix(DELIMITER) {
61 if let Some((metadata, data)) = rest.split_2_by_slice(DELIMITER) {
62 Ok((Some(metadata), data))
63 } else {
64 Err(HgError::corrupted(
65 "Missing metadata end delimiter in filelog entry",
66 ))
67 }
68 } else {
69 Ok((None, &self.0))
70 }
71 }
72
73 /// Returns the file contents at this revision, stripped of any metadata
74 pub fn data(&self) -> Result<&[u8], HgError> {
75 let (_metadata, data) = self.split()?;
76 Ok(data)
77 }
78
79 /// Consume the entry, and convert it into data, discarding any metadata,
80 /// if present.
81 pub fn into_data(self) -> Result<Vec<u8>, HgError> {
82 if let (Some(_metadata), data) = self.split()? {
83 Ok(data.to_owned())
84 } else {
85 Ok(self.0)
86 }
87 }
88 }
@@ -0,0 +1,100 b''
1 use crate::errors::{HgError, IoErrorContext, IoResultExt};
2 use memmap2::{Mmap, MmapOptions};
3 use std::io::ErrorKind;
4 use std::path::{Path, PathBuf};
5
6 /// Filesystem access abstraction for the contents of a given "base" diretory
7 #[derive(Clone, Copy)]
8 pub struct Vfs<'a> {
9 pub(crate) base: &'a Path,
10 }
11
12 struct FileNotFound(std::io::Error, PathBuf);
13
14 impl Vfs<'_> {
15 pub fn join(&self, relative_path: impl AsRef<Path>) -> PathBuf {
16 self.base.join(relative_path)
17 }
18
19 pub fn read(
20 &self,
21 relative_path: impl AsRef<Path>,
22 ) -> Result<Vec<u8>, HgError> {
23 let path = self.join(relative_path);
24 std::fs::read(&path).when_reading_file(&path)
25 }
26
27 fn mmap_open_gen(
28 &self,
29 relative_path: impl AsRef<Path>,
30 ) -> Result<Result<Mmap, FileNotFound>, HgError> {
31 let path = self.join(relative_path);
32 let file = match std::fs::File::open(&path) {
33 Err(err) => {
34 if let ErrorKind::NotFound = err.kind() {
35 return Ok(Err(FileNotFound(err, path)));
36 };
37 return (Err(err)).when_reading_file(&path);
38 }
39 Ok(file) => file,
40 };
41 // TODO: what are the safety requirements here?
42 let mmap = unsafe { MmapOptions::new().map(&file) }
43 .when_reading_file(&path)?;
44 Ok(Ok(mmap))
45 }
46
47 pub fn mmap_open_opt(
48 &self,
49 relative_path: impl AsRef<Path>,
50 ) -> Result<Option<Mmap>, HgError> {
51 self.mmap_open_gen(relative_path).map(|res| res.ok())
52 }
53
54 pub fn mmap_open(
55 &self,
56 relative_path: impl AsRef<Path>,
57 ) -> Result<Mmap, HgError> {
58 match self.mmap_open_gen(relative_path)? {
59 Err(FileNotFound(err, path)) => Err(err).when_reading_file(&path),
60 Ok(res) => Ok(res),
61 }
62 }
63
64 pub fn rename(
65 &self,
66 relative_from: impl AsRef<Path>,
67 relative_to: impl AsRef<Path>,
68 ) -> Result<(), HgError> {
69 let from = self.join(relative_from);
70 let to = self.join(relative_to);
71 std::fs::rename(&from, &to)
72 .with_context(|| IoErrorContext::RenamingFile { from, to })
73 }
74 }
75
76 fn fs_metadata(
77 path: impl AsRef<Path>,
78 ) -> Result<Option<std::fs::Metadata>, HgError> {
79 let path = path.as_ref();
80 match std::fs::metadata(path) {
81 Ok(meta) => Ok(Some(meta)),
82 Err(error) => match error.kind() {
83 // TODO: when we require a Rust version where `NotADirectory` is
84 // stable, invert this logic and return None for it and `NotFound`
85 // and propagate any other error.
86 ErrorKind::PermissionDenied => Err(error).with_context(|| {
87 IoErrorContext::ReadingMetadata(path.to_owned())
88 }),
89 _ => Ok(None),
90 },
91 }
92 }
93
94 pub(crate) fn is_dir(path: impl AsRef<Path>) -> Result<bool, HgError> {
95 Ok(fs_metadata(path)?.map_or(false, |meta| meta.is_dir()))
96 }
97
98 pub(crate) fn is_file(path: impl AsRef<Path>) -> Result<bool, HgError> {
99 Ok(fs_metadata(path)?.map_or(false, |meta| meta.is_file()))
100 }
@@ -0,0 +1,286 b''
1 use cpython::exc;
2 use cpython::ObjectProtocol;
3 use cpython::PyBytes;
4 use cpython::PyErr;
5 use cpython::PyNone;
6 use cpython::PyObject;
7 use cpython::PyResult;
8 use cpython::Python;
9 use cpython::PythonObject;
10 use hg::dirstate::DirstateEntry;
11 use hg::dirstate::EntryState;
12 use hg::dirstate::TruncatedTimestamp;
13 use std::cell::Cell;
14 use std::convert::TryFrom;
15
16 py_class!(pub class DirstateItem |py| {
17 data entry: Cell<DirstateEntry>;
18
19 def __new__(
20 _cls,
21 wc_tracked: bool = false,
22 p1_tracked: bool = false,
23 p2_info: bool = false,
24 has_meaningful_data: bool = true,
25 has_meaningful_mtime: bool = true,
26 parentfiledata: Option<(u32, u32, (u32, u32))> = None,
27 fallback_exec: Option<bool> = None,
28 fallback_symlink: Option<bool> = None,
29
30 ) -> PyResult<DirstateItem> {
31 let mut mode_size_opt = None;
32 let mut mtime_opt = None;
33 if let Some((mode, size, mtime)) = parentfiledata {
34 if has_meaningful_data {
35 mode_size_opt = Some((mode, size))
36 }
37 if has_meaningful_mtime {
38 mtime_opt = Some(timestamp(py, mtime)?)
39 }
40 }
41 let entry = DirstateEntry::from_v2_data(
42 wc_tracked,
43 p1_tracked,
44 p2_info,
45 mode_size_opt,
46 mtime_opt,
47 fallback_exec,
48 fallback_symlink,
49 );
50 DirstateItem::create_instance(py, Cell::new(entry))
51 }
52
53 @property
54 def state(&self) -> PyResult<PyBytes> {
55 let state_byte: u8 = self.entry(py).get().state().into();
56 Ok(PyBytes::new(py, &[state_byte]))
57 }
58
59 @property
60 def mode(&self) -> PyResult<i32> {
61 Ok(self.entry(py).get().mode())
62 }
63
64 @property
65 def size(&self) -> PyResult<i32> {
66 Ok(self.entry(py).get().size())
67 }
68
69 @property
70 def mtime(&self) -> PyResult<i32> {
71 Ok(self.entry(py).get().mtime())
72 }
73
74 @property
75 def has_fallback_exec(&self) -> PyResult<bool> {
76 match self.entry(py).get().get_fallback_exec() {
77 Some(_) => Ok(true),
78 None => Ok(false),
79 }
80 }
81
82 @property
83 def fallback_exec(&self) -> PyResult<Option<bool>> {
84 match self.entry(py).get().get_fallback_exec() {
85 Some(exec) => Ok(Some(exec)),
86 None => Ok(None),
87 }
88 }
89
90 @fallback_exec.setter
91 def set_fallback_exec(&self, value: Option<PyObject>) -> PyResult<()> {
92 match value {
93 None => {self.entry(py).get().set_fallback_exec(None);},
94 Some(value) => {
95 if value.is_none(py) {
96 self.entry(py).get().set_fallback_exec(None);
97 } else {
98 self.entry(py).get().set_fallback_exec(
99 Some(value.is_true(py)?)
100 );
101 }},
102 }
103 Ok(())
104 }
105
106 @property
107 def has_fallback_symlink(&self) -> PyResult<bool> {
108 match self.entry(py).get().get_fallback_symlink() {
109 Some(_) => Ok(true),
110 None => Ok(false),
111 }
112 }
113
114 @property
115 def fallback_symlink(&self) -> PyResult<Option<bool>> {
116 match self.entry(py).get().get_fallback_symlink() {
117 Some(symlink) => Ok(Some(symlink)),
118 None => Ok(None),
119 }
120 }
121
122 @fallback_symlink.setter
123 def set_fallback_symlink(&self, value: Option<PyObject>) -> PyResult<()> {
124 match value {
125 None => {self.entry(py).get().set_fallback_symlink(None);},
126 Some(value) => {
127 if value.is_none(py) {
128 self.entry(py).get().set_fallback_symlink(None);
129 } else {
130 self.entry(py).get().set_fallback_symlink(
131 Some(value.is_true(py)?)
132 );
133 }},
134 }
135 Ok(())
136 }
137
138 @property
139 def tracked(&self) -> PyResult<bool> {
140 Ok(self.entry(py).get().tracked())
141 }
142
143 @property
144 def p1_tracked(&self) -> PyResult<bool> {
145 Ok(self.entry(py).get().p1_tracked())
146 }
147
148 @property
149 def added(&self) -> PyResult<bool> {
150 Ok(self.entry(py).get().added())
151 }
152
153
154 @property
155 def p2_info(&self) -> PyResult<bool> {
156 Ok(self.entry(py).get().p2_info())
157 }
158
159 @property
160 def removed(&self) -> PyResult<bool> {
161 Ok(self.entry(py).get().removed())
162 }
163
164 @property
165 def maybe_clean(&self) -> PyResult<bool> {
166 Ok(self.entry(py).get().maybe_clean())
167 }
168
169 @property
170 def any_tracked(&self) -> PyResult<bool> {
171 Ok(self.entry(py).get().any_tracked())
172 }
173
174 def v1_state(&self) -> PyResult<PyBytes> {
175 let (state, _mode, _size, _mtime) = self.entry(py).get().v1_data();
176 let state_byte: u8 = state.into();
177 Ok(PyBytes::new(py, &[state_byte]))
178 }
179
180 def v1_mode(&self) -> PyResult<i32> {
181 let (_state, mode, _size, _mtime) = self.entry(py).get().v1_data();
182 Ok(mode)
183 }
184
185 def v1_size(&self) -> PyResult<i32> {
186 let (_state, _mode, size, _mtime) = self.entry(py).get().v1_data();
187 Ok(size)
188 }
189
190 def v1_mtime(&self) -> PyResult<i32> {
191 let (_state, _mode, _size, mtime) = self.entry(py).get().v1_data();
192 Ok(mtime)
193 }
194
195 def need_delay(&self, now: (u32, u32)) -> PyResult<bool> {
196 let now = timestamp(py, now)?;
197 Ok(self.entry(py).get().need_delay(now))
198 }
199
200 def mtime_likely_equal_to(&self, other: (u32, u32)) -> PyResult<bool> {
201 if let Some(mtime) = self.entry(py).get().truncated_mtime() {
202 Ok(mtime.likely_equal(timestamp(py, other)?))
203 } else {
204 Ok(false)
205 }
206 }
207
208 @classmethod
209 def from_v1_data(
210 _cls,
211 state: PyBytes,
212 mode: i32,
213 size: i32,
214 mtime: i32,
215 ) -> PyResult<Self> {
216 let state = <[u8; 1]>::try_from(state.data(py))
217 .ok()
218 .and_then(|state| EntryState::try_from(state[0]).ok())
219 .ok_or_else(|| PyErr::new::<exc::ValueError, _>(py, "invalid state"))?;
220 let entry = DirstateEntry::from_v1_data(state, mode, size, mtime);
221 DirstateItem::create_instance(py, Cell::new(entry))
222 }
223
224 def drop_merge_data(&self) -> PyResult<PyNone> {
225 self.update(py, |entry| entry.drop_merge_data());
226 Ok(PyNone)
227 }
228
229 def set_clean(
230 &self,
231 mode: u32,
232 size: u32,
233 mtime: (u32, u32),
234 ) -> PyResult<PyNone> {
235 let mtime = timestamp(py, mtime)?;
236 self.update(py, |entry| entry.set_clean(mode, size, mtime));
237 Ok(PyNone)
238 }
239
240 def set_possibly_dirty(&self) -> PyResult<PyNone> {
241 self.update(py, |entry| entry.set_possibly_dirty());
242 Ok(PyNone)
243 }
244
245 def set_tracked(&self) -> PyResult<PyNone> {
246 self.update(py, |entry| entry.set_tracked());
247 Ok(PyNone)
248 }
249
250 def set_untracked(&self) -> PyResult<PyNone> {
251 self.update(py, |entry| entry.set_untracked());
252 Ok(PyNone)
253 }
254 });
255
256 impl DirstateItem {
257 pub fn new_as_pyobject(
258 py: Python<'_>,
259 entry: DirstateEntry,
260 ) -> PyResult<PyObject> {
261 Ok(DirstateItem::create_instance(py, Cell::new(entry))?.into_object())
262 }
263
264 pub fn get_entry(&self, py: Python<'_>) -> DirstateEntry {
265 self.entry(py).get()
266 }
267
268 // TODO: Use https://doc.rust-lang.org/std/cell/struct.Cell.html#method.update instead when it’s stable
269 pub fn update(&self, py: Python<'_>, f: impl FnOnce(&mut DirstateEntry)) {
270 let mut entry = self.entry(py).get();
271 f(&mut entry);
272 self.entry(py).set(entry)
273 }
274 }
275
276 pub(crate) fn timestamp(
277 py: Python<'_>,
278 (s, ns): (u32, u32),
279 ) -> PyResult<TruncatedTimestamp> {
280 TruncatedTimestamp::from_already_truncated(s, ns).map_err(|_| {
281 PyErr::new::<exc::ValueError, _>(
282 py,
283 "expected mtime truncated to 31 bits",
284 )
285 })
286 }
@@ -0,0 +1,56 b''
1 use cpython::{PyBytes, Python};
2 use stable_deref_trait::StableDeref;
3
4 /// Safe abstraction over a `PyBytes` together with the `&[u8]` slice
5 /// that borrows it. Implements `Deref<Target = [u8]>`.
6 ///
7 /// Calling `PyBytes::data` requires a GIL marker but we want to access the
8 /// data in a thread that (ideally) does not need to acquire the GIL.
9 /// This type allows separating the call an the use.
10 ///
11 /// It also enables using a (wrapped) `PyBytes` in GIL-unaware generic code.
12 pub struct PyBytesDeref {
13 #[allow(unused)]
14 keep_alive: PyBytes,
15
16 /// Borrows the buffer inside `self.keep_alive`,
17 /// but the borrow-checker cannot express self-referential structs.
18 data: *const [u8],
19 }
20
21 impl PyBytesDeref {
22 pub fn new(py: Python, bytes: PyBytes) -> Self {
23 Self {
24 data: bytes.data(py),
25 keep_alive: bytes,
26 }
27 }
28
29 pub fn unwrap(self) -> PyBytes {
30 self.keep_alive
31 }
32 }
33
34 impl std::ops::Deref for PyBytesDeref {
35 type Target = [u8];
36
37 fn deref(&self) -> &[u8] {
38 // Safety: the raw pointer is valid as long as the PyBytes is still
39 // alive, and the returned slice borrows `self`.
40 unsafe { &*self.data }
41 }
42 }
43
44 unsafe impl StableDeref for PyBytesDeref {}
45
46 fn require_send<T: Send>() {}
47
48 #[allow(unused)]
49 fn static_assert_pybytes_is_send() {
50 require_send::<PyBytes>;
51 }
52
53 // Safety: PyBytes is Send. Raw pointers are not by default,
54 // but here sending one to another thread is fine since we ensure it stays
55 // valid.
56 unsafe impl Send for PyBytesDeref {}
@@ -0,0 +1,48 b''
1 // path utils module
2 //
3 // This software may be used and distributed according to the terms of the
4 // GNU General Public License version 2 or any later version.
5
6 use crate::error::CommandError;
7 use crate::ui::UiError;
8 use hg::repo::Repo;
9 use hg::utils::current_dir;
10 use hg::utils::files::{get_bytes_from_path, relativize_path};
11 use hg::utils::hg_path::HgPath;
12 use hg::utils::hg_path::HgPathBuf;
13 use std::borrow::Cow;
14
15 pub fn relativize_paths(
16 repo: &Repo,
17 paths: impl IntoIterator<Item = impl AsRef<HgPath>>,
18 mut callback: impl FnMut(Cow<[u8]>) -> Result<(), UiError>,
19 ) -> Result<(), CommandError> {
20 let cwd = current_dir()?;
21 let repo_root = repo.working_directory_path();
22 let repo_root = cwd.join(repo_root); // Make it absolute
23 let repo_root_hgpath =
24 HgPathBuf::from(get_bytes_from_path(repo_root.to_owned()));
25 let outside_repo: bool;
26 let cwd_hgpath: HgPathBuf;
27
28 if let Ok(cwd_relative_to_repo) = cwd.strip_prefix(&repo_root) {
29 // The current directory is inside the repo, so we can work with
30 // relative paths
31 outside_repo = false;
32 cwd_hgpath =
33 HgPathBuf::from(get_bytes_from_path(cwd_relative_to_repo));
34 } else {
35 outside_repo = true;
36 cwd_hgpath = HgPathBuf::from(get_bytes_from_path(cwd));
37 }
38
39 for file in paths {
40 if outside_repo {
41 let file = repo_root_hgpath.join(file.as_ref());
42 callback(relativize_path(&file, &cwd_hgpath))?;
43 } else {
44 callback(relativize_path(file.as_ref(), &cwd_hgpath))?;
45 }
46 }
47 Ok(())
48 }
@@ -0,0 +1,52 b''
1 """
2 List-valued configuration keys have an ad-hoc microsyntax. From `hg help config`:
3
4 > List values are separated by whitespace or comma, except when values are
5 > placed in double quotation marks:
6 >
7 > allow_read = "John Doe, PhD", brian, betty
8 >
9 > Quotation marks can be escaped by prefixing them with a backslash. Only
10 > quotation marks at the beginning of a word is counted as a quotation
11 > (e.g., ``foo"bar baz`` is the list of ``foo"bar`` and ``baz``).
12
13 That help documentation is fairly light on details, the actual parser has many
14 other edge cases. This test tries to cover them.
15 """
16
17 from mercurial.utils import stringutil
18
19
20 def assert_parselist(input, expected):
21 result = stringutil.parselist(input)
22 if result != expected:
23 raise AssertionError(
24 "parse_input(%r)\n got %r\nexpected %r"
25 % (input, result, expected)
26 )
27
28
29 # Keep these Python tests in sync with the Rust ones in `rust/hg-core/src/config/values.rs`
30
31 assert_parselist(b'', [])
32 assert_parselist(b',', [])
33 assert_parselist(b'A', [b'A'])
34 assert_parselist(b'B,B', [b'B', b'B'])
35 assert_parselist(b', C, ,C,', [b'C', b'C'])
36 assert_parselist(b'"', [b'"'])
37 assert_parselist(b'""', [b'', b''])
38 assert_parselist(b'D,"', [b'D', b'"'])
39 assert_parselist(b'E,""', [b'E', b'', b''])
40 assert_parselist(b'"F,F"', [b'F,F'])
41 assert_parselist(b'"G,G', [b'"G', b'G'])
42 assert_parselist(b'"H \\",\\"H', [b'"H', b',', b'H'])
43 assert_parselist(b'I,I"', [b'I', b'I"'])
44 assert_parselist(b'J,"J', [b'J', b'"J'])
45 assert_parselist(b'K K', [b'K', b'K'])
46 assert_parselist(b'"K" K', [b'K', b'K'])
47 assert_parselist(b'L\tL', [b'L', b'L'])
48 assert_parselist(b'"L"\tL', [b'L', b'', b'L'])
49 assert_parselist(b'M\x0bM', [b'M', b'M'])
50 assert_parselist(b'"M"\x0bM', [b'M', b'', b'M'])
51 assert_parselist(b'"N" , ,"', [b'N"'])
52 assert_parselist(b'" ,O, ', [b'"', b'O'])
@@ -0,0 +1,27 b''
1 Test null revisions (node 0000000000000000000000000000000000000000, aka rev -1)
2 in various circumstances.
3
4 Make an empty repo:
5
6 $ hg init a
7 $ cd a
8
9 $ hg files -r 0000000000000000000000000000000000000000
10 [1]
11 $ hg files -r .
12 [1]
13
14 Add an empty commit (this makes the changelog refer to a null manifest node):
15
16
17 $ hg commit -m "init" --config ui.allowemptycommit=true
18
19 $ hg files -r .
20 [1]
21
22 Strip that empty commit (this makes the changelog file empty, as opposed to missing):
23
24 $ hg --config 'extensions.strip=' strip . > /dev/null
25
26 $ hg files -r .
27 [1]
@@ -0,0 +1,49 b''
1 from __future__ import absolute_import
2
3 import os
4 from mercurial.hgweb import hgwebdir_mod
5
6 hgwebdir = hgwebdir_mod.hgwebdir
7
8 os.mkdir(b'webdir')
9 os.chdir(b'webdir')
10
11 webdir = os.path.realpath(b'.')
12
13
14 def trivial_response(req, res):
15 return []
16
17
18 def make_hgwebdir(gc_rate=None):
19 config = os.path.join(webdir, b'hgwebdir.conf')
20 with open(config, 'wb') as configfile:
21 configfile.write(b'[experimental]\n')
22 if gc_rate is not None:
23 configfile.write(b'web.full-garbage-collection-rate=%d\n' % gc_rate)
24 hg_wd = hgwebdir(config)
25 hg_wd._runwsgi = trivial_response
26 return hg_wd
27
28
29 def process_requests(webdir_instance, number):
30 # we don't care for now about passing realistic arguments
31 for _ in range(number):
32 for chunk in webdir_instance.run_wsgi(None, None):
33 pass
34
35
36 without_gc = make_hgwebdir(gc_rate=0)
37 process_requests(without_gc, 5)
38 assert without_gc.requests_count == 5
39 assert without_gc.gc_full_collections_done == 0
40
41 with_gc = make_hgwebdir(gc_rate=2)
42 process_requests(with_gc, 5)
43 assert with_gc.requests_count == 5
44 assert with_gc.gc_full_collections_done == 2
45
46 with_systematic_gc = make_hgwebdir() # default value of the setting
47 process_requests(with_systematic_gc, 3)
48 assert with_systematic_gc.requests_count == 3
49 assert with_systematic_gc.gc_full_collections_done == 3
@@ -37,9 +37,9 b' botocore==1.12.243 \\'
37 --hash=sha256:397585a7881230274afb8d1877ef69a661b0a311745cd324f14a052fb2a2863a \
37 --hash=sha256:397585a7881230274afb8d1877ef69a661b0a311745cd324f14a052fb2a2863a \
38 --hash=sha256:4496f8da89cb496462a831897ad248e13e431d9fa7e41e06d426fd6658ab6e59 \
38 --hash=sha256:4496f8da89cb496462a831897ad248e13e431d9fa7e41e06d426fd6658ab6e59 \
39 # via boto3, s3transfer
39 # via boto3, s3transfer
40 certifi==2019.9.11 \
40 certifi==2021.5.30 \
41 --hash=sha256:e4f3620cfea4f83eedc95b24abd9cd56f3c4b146dd0177e83a21b4eb49e21e50 \
41 --hash=sha256:2bbf76fd432960138b3ef6dda3dde0544f27cbf8546c458e60baf371917ba9ee \
42 --hash=sha256:fd7c7c74727ddcf00e9acd26bba8da604ffec95bf1c2144e67aff7a8b50e6cef \
42 --hash=sha256:50b1e4f8446b06f41be7dd6338db18e0990601dce795c2b1686458aa7e8fa7d8 \
43 # via requests
43 # via requests
44 cffi==1.12.3 \
44 cffi==1.12.3 \
45 --hash=sha256:041c81822e9f84b1d9c401182e174996f0bae9991f33725d059b771744290774 \
45 --hash=sha256:041c81822e9f84b1d9c401182e174996f0bae9991f33725d059b771744290774 \
@@ -4,9 +4,9 b''
4 #
4 #
5 # pip-compile --generate-hashes --output-file=contrib/packaging/requirements-windows-py2.txt contrib/packaging/requirements-windows.txt.in
5 # pip-compile --generate-hashes --output-file=contrib/packaging/requirements-windows-py2.txt contrib/packaging/requirements-windows.txt.in
6 #
6 #
7 certifi==2020.6.20 \
7 certifi==2021.5.30 \
8 --hash=sha256:5930595817496dd21bb8dc35dad090f1c2cd0adfaf21204bf6732ca5d8ee34d3 \
8 --hash=sha256:2bbf76fd432960138b3ef6dda3dde0544f27cbf8546c458e60baf371917ba9ee \
9 --hash=sha256:8fc0819f1f30ba15bdb34cceffb9ef04d99f420f68eb75d901e9560b8749fc41 \
9 --hash=sha256:50b1e4f8446b06f41be7dd6338db18e0990601dce795c2b1686458aa7e8fa7d8 \
10 # via dulwich
10 # via dulwich
11 configparser==4.0.2 \
11 configparser==4.0.2 \
12 --hash=sha256:254c1d9c79f60c45dfde850850883d5aaa7f19a23f13561243a050d5a7c3fe4c \
12 --hash=sha256:254c1d9c79f60c45dfde850850883d5aaa7f19a23f13561243a050d5a7c3fe4c \
@@ -16,9 +16,9 b' cached-property==1.5.2 \\'
16 --hash=sha256:9fa5755838eecbb2d234c3aa390bd80fbd3ac6b6869109bfc1b499f7bd89a130 \
16 --hash=sha256:9fa5755838eecbb2d234c3aa390bd80fbd3ac6b6869109bfc1b499f7bd89a130 \
17 --hash=sha256:df4f613cf7ad9a588cc381aaf4a512d26265ecebd5eb9e1ba12f1319eb85a6a0 \
17 --hash=sha256:df4f613cf7ad9a588cc381aaf4a512d26265ecebd5eb9e1ba12f1319eb85a6a0 \
18 # via pygit2
18 # via pygit2
19 certifi==2020.6.20 \
19 certifi==2021.5.30 \
20 --hash=sha256:5930595817496dd21bb8dc35dad090f1c2cd0adfaf21204bf6732ca5d8ee34d3 \
20 --hash=sha256:2bbf76fd432960138b3ef6dda3dde0544f27cbf8546c458e60baf371917ba9ee \
21 --hash=sha256:8fc0819f1f30ba15bdb34cceffb9ef04d99f420f68eb75d901e9560b8749fc41 \
21 --hash=sha256:50b1e4f8446b06f41be7dd6338db18e0990601dce795c2b1686458aa7e8fa7d8 \
22 # via dulwich
22 # via dulwich
23 cffi==1.14.4 \
23 cffi==1.14.4 \
24 --hash=sha256:00a1ba5e2e95684448de9b89888ccd02c98d512064b4cb987d48f4b40aa0421e \
24 --hash=sha256:00a1ba5e2e95684448de9b89888ccd02c98d512064b4cb987d48f4b40aa0421e \
@@ -57,10 +57,10 b' from mercurial import ('
57 diffutil,
57 diffutil,
58 error,
58 error,
59 hg,
59 hg,
60 logcmdutil,
60 patch,
61 patch,
61 pycompat,
62 pycompat,
62 registrar,
63 registrar,
63 scmutil,
64 )
64 )
65 from mercurial.utils import dateutil
65 from mercurial.utils import dateutil
66
66
@@ -180,7 +180,7 b' def analyze(ui, repo, *revs, **opts):'
180
180
181 # If a mercurial repo is available, also model the commit history.
181 # If a mercurial repo is available, also model the commit history.
182 if repo:
182 if repo:
183 revs = scmutil.revrange(repo, revs)
183 revs = logcmdutil.revrange(repo, revs)
184 revs.sort()
184 revs.sort()
185
185
186 progress = ui.makeprogress(
186 progress = ui.makeprogress(
@@ -35,6 +35,7 b' from mercurial.node import short'
35
35
36 from mercurial import (
36 from mercurial import (
37 error,
37 error,
38 logcmdutil,
38 registrar,
39 registrar,
39 scmutil,
40 scmutil,
40 )
41 )
@@ -84,7 +85,7 b" def _docensor(ui, repo, path, rev=b'', t"
84 if not len(flog):
85 if not len(flog):
85 raise error.Abort(_(b'cannot censor file with no history'))
86 raise error.Abort(_(b'cannot censor file with no history'))
86
87
87 rev = scmutil.revsingle(repo, rev, rev).rev()
88 rev = logcmdutil.revsingle(repo, rev, rev).rev()
88 try:
89 try:
89 ctx = repo[rev]
90 ctx = repo[rev]
90 except KeyError:
91 except KeyError:
@@ -22,7 +22,6 b' from mercurial import ('
22 logcmdutil,
22 logcmdutil,
23 pycompat,
23 pycompat,
24 registrar,
24 registrar,
25 scmutil,
26 )
25 )
27
26
28 templateopts = cmdutil.templateopts
27 templateopts = cmdutil.templateopts
@@ -71,7 +70,7 b' def children(ui, repo, file_=None, **opt'
71 """
70 """
72 opts = pycompat.byteskwargs(opts)
71 opts = pycompat.byteskwargs(opts)
73 rev = opts.get(b'rev')
72 rev = opts.get(b'rev')
74 ctx = scmutil.revsingle(repo, rev)
73 ctx = logcmdutil.revsingle(repo, rev)
75 if file_:
74 if file_:
76 fctx = repo.filectx(file_, changeid=ctx.rev())
75 fctx = repo.filectx(file_, changeid=ctx.rev())
77 childctxs = [fcctx.changectx() for fcctx in fctx.children()]
76 childctxs = [fcctx.changectx() for fcctx in fctx.children()]
@@ -13,9 +13,9 b' from mercurial import ('
13 cmdutil,
13 cmdutil,
14 context,
14 context,
15 error,
15 error,
16 logcmdutil,
16 pycompat,
17 pycompat,
17 registrar,
18 registrar,
18 scmutil,
19 )
19 )
20
20
21 cmdtable = {}
21 cmdtable = {}
@@ -68,7 +68,7 b' def close_branch(ui, repo, *revs, **opts'
68 opts = pycompat.byteskwargs(opts)
68 opts = pycompat.byteskwargs(opts)
69
69
70 revs += tuple(opts.get(b'rev', []))
70 revs += tuple(opts.get(b'rev', []))
71 revs = scmutil.revrange(repo, revs)
71 revs = logcmdutil.revrange(repo, revs)
72
72
73 if not revs:
73 if not revs:
74 raise error.Abort(_(b'no revisions specified'))
74 raise error.Abort(_(b'no revisions specified'))
@@ -36,10 +36,10 b' from mercurial import ('
36 exchange,
36 exchange,
37 hg,
37 hg,
38 lock as lockmod,
38 lock as lockmod,
39 logcmdutil,
39 merge as mergemod,
40 merge as mergemod,
40 phases,
41 phases,
41 pycompat,
42 pycompat,
42 scmutil,
43 util,
43 util,
44 )
44 )
45 from mercurial.utils import dateutil
45 from mercurial.utils import dateutil
@@ -145,7 +145,7 b' class mercurial_sink(common.converter_si'
145 _(b'pulling from %s into %s\n') % (pbranch, branch)
145 _(b'pulling from %s into %s\n') % (pbranch, branch)
146 )
146 )
147 exchange.pull(
147 exchange.pull(
148 self.repo, prepo, [prepo.lookup(h) for h in heads]
148 self.repo, prepo, heads=[prepo.lookup(h) for h in heads]
149 )
149 )
150 self.before()
150 self.before()
151
151
@@ -564,7 +564,7 b' class mercurial_source(common.converter_'
564 )
564 )
565 nodes = set()
565 nodes = set()
566 parents = set()
566 parents = set()
567 for r in scmutil.revrange(self.repo, [hgrevs]):
567 for r in logcmdutil.revrange(self.repo, [hgrevs]):
568 ctx = self.repo[r]
568 ctx = self.repo[r]
569 nodes.add(ctx.node())
569 nodes.add(ctx.node())
570 parents.update(p.node() for p in ctx.parents())
570 parents.update(p.node() for p in ctx.parents())
@@ -423,7 +423,7 b' def reposetup(ui, repo):'
423 try:
423 try:
424 wlock = self.wlock()
424 wlock = self.wlock()
425 for f in self.dirstate:
425 for f in self.dirstate:
426 if self.dirstate[f] != b'n':
426 if not self.dirstate.get_entry(f).maybe_clean:
427 continue
427 continue
428 if oldeol is not None:
428 if oldeol is not None:
429 if not oldeol.match(f) and not neweol.match(f):
429 if not oldeol.match(f) and not neweol.match(f):
@@ -101,6 +101,7 b' from mercurial import ('
101 error,
101 error,
102 filemerge,
102 filemerge,
103 formatter,
103 formatter,
104 logcmdutil,
104 pycompat,
105 pycompat,
105 registrar,
106 registrar,
106 scmutil,
107 scmutil,
@@ -558,17 +559,17 b' def dodiff(ui, repo, cmdline, pats, opts'
558 do3way = b'$parent2' in cmdline
559 do3way = b'$parent2' in cmdline
559
560
560 if change:
561 if change:
561 ctx2 = scmutil.revsingle(repo, change, None)
562 ctx2 = logcmdutil.revsingle(repo, change, None)
562 ctx1a, ctx1b = ctx2.p1(), ctx2.p2()
563 ctx1a, ctx1b = ctx2.p1(), ctx2.p2()
563 elif from_rev or to_rev:
564 elif from_rev or to_rev:
564 repo = scmutil.unhidehashlikerevs(
565 repo = scmutil.unhidehashlikerevs(
565 repo, [from_rev] + [to_rev], b'nowarn'
566 repo, [from_rev] + [to_rev], b'nowarn'
566 )
567 )
567 ctx1a = scmutil.revsingle(repo, from_rev, None)
568 ctx1a = logcmdutil.revsingle(repo, from_rev, None)
568 ctx1b = repo[nullrev]
569 ctx1b = repo[nullrev]
569 ctx2 = scmutil.revsingle(repo, to_rev, None)
570 ctx2 = logcmdutil.revsingle(repo, to_rev, None)
570 else:
571 else:
571 ctx1a, ctx2 = scmutil.revpair(repo, revs)
572 ctx1a, ctx2 = logcmdutil.revpair(repo, revs)
572 if not revs:
573 if not revs:
573 ctx1b = repo[None].p2()
574 ctx1b = repo[None].p2()
574 else:
575 else:
@@ -15,6 +15,7 b' from mercurial import ('
15 encoding,
15 encoding,
16 error,
16 error,
17 extensions,
17 extensions,
18 logcmdutil,
18 patch,
19 patch,
19 pycompat,
20 pycompat,
20 registrar,
21 registrar,
@@ -75,7 +76,7 b' def _matchpaths(repo, rev, pats, opts, a'
75 def bad(x, y):
76 def bad(x, y):
76 raise error.Abort(b"%s: %s" % (x, y))
77 raise error.Abort(b"%s: %s" % (x, y))
77
78
78 ctx = scmutil.revsingle(repo, rev)
79 ctx = logcmdutil.revsingle(repo, rev)
79 m = scmutil.match(ctx, pats, opts, badfn=bad)
80 m = scmutil.match(ctx, pats, opts, badfn=bad)
80 for p in ctx.walk(m):
81 for p in ctx.walk(m):
81 yield p
82 yield p
@@ -317,7 +318,7 b' def debugbuildannotatecache(ui, repo, *p'
317 )
318 )
318 if ui.configbool(b'fastannotate', b'unfilteredrepo'):
319 if ui.configbool(b'fastannotate', b'unfilteredrepo'):
319 repo = repo.unfiltered()
320 repo = repo.unfiltered()
320 ctx = scmutil.revsingle(repo, rev)
321 ctx = logcmdutil.revsingle(repo, rev)
321 m = scmutil.match(ctx, pats, opts)
322 m = scmutil.match(ctx, pats, opts)
322 paths = list(ctx.walk(m))
323 paths = list(ctx.walk(m))
323 if util.safehasattr(repo, 'prefetchfastannotate'):
324 if util.safehasattr(repo, 'prefetchfastannotate'):
@@ -140,12 +140,10 b' def peersetup(ui, peer):'
140 def getannotate(self, path, lastnode=None):
140 def getannotate(self, path, lastnode=None):
141 if not self.capable(b'getannotate'):
141 if not self.capable(b'getannotate'):
142 ui.warn(_(b'remote peer cannot provide annotate cache\n'))
142 ui.warn(_(b'remote peer cannot provide annotate cache\n'))
143 yield None, None
143 return None, None
144 else:
144 else:
145 args = {b'path': path, b'lastnode': lastnode or b''}
145 args = {b'path': path, b'lastnode': lastnode or b''}
146 f = wireprotov1peer.future()
146 return args, _parseresponse
147 yield args, f
148 yield _parseresponse(f.value)
149
147
150 peer.__class__ = fastannotatepeer
148 peer.__class__ = fastannotatepeer
151
149
@@ -15,6 +15,7 b' from mercurial.node import hex, nullrev'
15 from mercurial.utils import stringutil
15 from mercurial.utils import stringutil
16 from mercurial import (
16 from mercurial import (
17 error,
17 error,
18 logcmdutil,
18 pycompat,
19 pycompat,
19 registrar,
20 registrar,
20 scmutil,
21 scmutil,
@@ -182,7 +183,7 b' def fastexport(ui, repo, *revs, **opts):'
182 if not revs:
183 if not revs:
183 revs = scmutil.revrange(repo, [b":"])
184 revs = scmutil.revrange(repo, [b":"])
184 else:
185 else:
185 revs = scmutil.revrange(repo, revs)
186 revs = logcmdutil.revrange(repo, revs)
186 if not revs:
187 if not revs:
187 raise error.Abort(_(b"no revisions matched"))
188 raise error.Abort(_(b"no revisions matched"))
188 authorfile = opts.get(b"authormap")
189 authorfile = opts.get(b"authormap")
@@ -144,6 +144,7 b' from mercurial import ('
144 context,
144 context,
145 copies,
145 copies,
146 error,
146 error,
147 logcmdutil,
147 match as matchmod,
148 match as matchmod,
148 mdiff,
149 mdiff,
149 merge,
150 merge,
@@ -283,20 +284,29 b' def fix(ui, repo, *pats, **opts):'
283 # There are no data dependencies between the workers fixing each file
284 # There are no data dependencies between the workers fixing each file
284 # revision, so we can use all available parallelism.
285 # revision, so we can use all available parallelism.
285 def getfixes(items):
286 def getfixes(items):
286 for rev, path in items:
287 for srcrev, path, dstrevs in items:
287 ctx = repo[rev]
288 ctx = repo[srcrev]
288 olddata = ctx[path].data()
289 olddata = ctx[path].data()
289 metadata, newdata = fixfile(
290 metadata, newdata = fixfile(
290 ui, repo, opts, fixers, ctx, path, basepaths, basectxs[rev]
291 ui,
292 repo,
293 opts,
294 fixers,
295 ctx,
296 path,
297 basepaths,
298 basectxs[srcrev],
291 )
299 )
292 # Don't waste memory/time passing unchanged content back, but
300 # We ungroup the work items now, because the code that consumes
293 # produce one result per item either way.
301 # these results has to handle each dstrev separately, and in
294 yield (
302 # topological order. Because these are handled in topological
295 rev,
303 # order, it's important that we pass around references to
296 path,
304 # "newdata" instead of copying it. Otherwise, we would be
297 metadata,
305 # keeping more copies of file content in memory at a time than
298 newdata if newdata != olddata else None,
306 # if we hadn't bothered to group/deduplicate the work items.
299 )
307 data = newdata if newdata != olddata else None
308 for dstrev in dstrevs:
309 yield (dstrev, path, metadata, data)
300
310
301 results = worker.worker(
311 results = worker.worker(
302 ui, 1.0, getfixes, tuple(), workqueue, threadsafe=False
312 ui, 1.0, getfixes, tuple(), workqueue, threadsafe=False
@@ -376,23 +386,32 b' def cleanup(repo, replacements, wdirwrit'
376
386
377
387
378 def getworkqueue(ui, repo, pats, opts, revstofix, basectxs):
388 def getworkqueue(ui, repo, pats, opts, revstofix, basectxs):
379 """Constructs the list of files to be fixed at specific revisions
389 """Constructs a list of files to fix and which revisions each fix applies to
380
390
381 It is up to the caller how to consume the work items, and the only
391 To avoid duplicating work, there is usually only one work item for each file
382 dependence between them is that replacement revisions must be committed in
392 revision that might need to be fixed. There can be multiple work items per
383 topological order. Each work item represents a file in the working copy or
393 file revision if the same file needs to be fixed in multiple changesets with
384 in some revision that should be fixed and written back to the working copy
394 different baserevs. Each work item also contains a list of changesets where
385 or into a replacement revision.
395 the file's data should be replaced with the fixed data. The work items for
396 earlier changesets come earlier in the work queue, to improve pipelining by
397 allowing the first changeset to be replaced while fixes are still being
398 computed for later changesets.
386
399
387 Work items for the same revision are grouped together, so that a worker
400 Also returned is a map from changesets to the count of work items that might
388 pool starting with the first N items in parallel is likely to finish the
401 affect each changeset. This is used later to count when all of a changeset's
389 first revision's work before other revisions. This can allow us to write
402 work items have been finished, without having to inspect the remaining work
390 the result to disk and reduce memory footprint. At time of writing, the
403 queue in each worker subprocess.
391 partition strategy in worker.py seems favorable to this. We also sort the
404
392 items by ascending revision number to match the order in which we commit
405 The example work item (1, "foo/bar.txt", (1, 2, 3)) means that the data of
393 the fixes later.
406 bar.txt should be read from revision 1, then fixed, and written back to
407 revisions 1, 2 and 3. Revision 1 is called the "srcrev" and the list of
408 revisions is called the "dstrevs". In practice the srcrev is always one of
409 the dstrevs, and we make that choice when constructing the work item so that
410 the choice can't be made inconsistently later on. The dstrevs should all
411 have the same file revision for the given path, so the choice of srcrev is
412 arbitrary. The wdirrev can be a dstrev and a srcrev.
394 """
413 """
395 workqueue = []
414 dstrevmap = collections.defaultdict(list)
396 numitems = collections.defaultdict(int)
415 numitems = collections.defaultdict(int)
397 maxfilesize = ui.configbytes(b'fix', b'maxfilesize')
416 maxfilesize = ui.configbytes(b'fix', b'maxfilesize')
398 for rev in sorted(revstofix):
417 for rev in sorted(revstofix):
@@ -410,8 +429,21 b' def getworkqueue(ui, repo, pats, opts, r'
410 % (util.bytecount(maxfilesize), path)
429 % (util.bytecount(maxfilesize), path)
411 )
430 )
412 continue
431 continue
413 workqueue.append((rev, path))
432 baserevs = tuple(ctx.rev() for ctx in basectxs[rev])
433 dstrevmap[(fctx.filerev(), baserevs, path)].append(rev)
414 numitems[rev] += 1
434 numitems[rev] += 1
435 workqueue = [
436 (min(dstrevs), path, dstrevs)
437 for (_filerev, _baserevs, path), dstrevs in dstrevmap.items()
438 ]
439 # Move work items for earlier changesets to the front of the queue, so we
440 # might be able to replace those changesets (in topological order) while
441 # we're still processing later work items. Note the min() in the previous
442 # expression, which means we don't need a custom comparator here. The path
443 # is also important in the sort order to make the output order stable. There
444 # are some situations where this doesn't help much, but some situations
445 # where it lets us buffer O(1) files instead of O(n) files.
446 workqueue.sort()
415 return workqueue, numitems
447 return workqueue, numitems
416
448
417
449
@@ -420,7 +452,7 b' def getrevstofix(ui, repo, opts):'
420 if opts[b'all']:
452 if opts[b'all']:
421 revs = repo.revs(b'(not public() and not obsolete()) or wdir()')
453 revs = repo.revs(b'(not public() and not obsolete()) or wdir()')
422 elif opts[b'source']:
454 elif opts[b'source']:
423 source_revs = scmutil.revrange(repo, opts[b'source'])
455 source_revs = logcmdutil.revrange(repo, opts[b'source'])
424 revs = set(repo.revs(b'(%ld::) - obsolete()', source_revs))
456 revs = set(repo.revs(b'(%ld::) - obsolete()', source_revs))
425 if wdirrev in source_revs:
457 if wdirrev in source_revs:
426 # `wdir()::` is currently empty, so manually add wdir
458 # `wdir()::` is currently empty, so manually add wdir
@@ -428,7 +460,7 b' def getrevstofix(ui, repo, opts):'
428 if repo[b'.'].rev() in revs:
460 if repo[b'.'].rev() in revs:
429 revs.add(wdirrev)
461 revs.add(wdirrev)
430 else:
462 else:
431 revs = set(scmutil.revrange(repo, opts[b'rev']))
463 revs = set(logcmdutil.revrange(repo, opts[b'rev']))
432 if opts.get(b'working_dir'):
464 if opts.get(b'working_dir'):
433 revs.add(wdirrev)
465 revs.add(wdirrev)
434 for rev in revs:
466 for rev in revs:
@@ -516,9 +548,9 b' def getbasepaths(repo, opts, workqueue, '
516 return {}
548 return {}
517
549
518 basepaths = {}
550 basepaths = {}
519 for rev, path in workqueue:
551 for srcrev, path, _dstrevs in workqueue:
520 fixctx = repo[rev]
552 fixctx = repo[srcrev]
521 for basectx in basectxs[rev]:
553 for basectx in basectxs[srcrev]:
522 basepath = copies.pathcopies(basectx, fixctx).get(path, path)
554 basepath = copies.pathcopies(basectx, fixctx).get(path, path)
523 if basepath in basectx:
555 if basepath in basectx:
524 basepaths[(basectx.rev(), fixctx.rev(), path)] = basepath
556 basepaths[(basectx.rev(), fixctx.rev(), path)] = basepath
@@ -618,7 +650,7 b' def getbasectxs(repo, opts, revstofix):'
618 # The --base flag overrides the usual logic, and we give every revision
650 # The --base flag overrides the usual logic, and we give every revision
619 # exactly the set of baserevs that the user specified.
651 # exactly the set of baserevs that the user specified.
620 if opts.get(b'base'):
652 if opts.get(b'base'):
621 baserevs = set(scmutil.revrange(repo, opts.get(b'base')))
653 baserevs = set(logcmdutil.revrange(repo, opts.get(b'base')))
622 if not baserevs:
654 if not baserevs:
623 baserevs = {nullrev}
655 baserevs = {nullrev}
624 basectxs = {repo[rev] for rev in baserevs}
656 basectxs = {repo[rev] for rev in baserevs}
@@ -641,10 +673,10 b' def _prefetchfiles(repo, workqueue, base'
641 toprefetch = set()
673 toprefetch = set()
642
674
643 # Prefetch the files that will be fixed.
675 # Prefetch the files that will be fixed.
644 for rev, path in workqueue:
676 for srcrev, path, _dstrevs in workqueue:
645 if rev == wdirrev:
677 if srcrev == wdirrev:
646 continue
678 continue
647 toprefetch.add((rev, path))
679 toprefetch.add((srcrev, path))
648
680
649 # Prefetch the base contents for lineranges().
681 # Prefetch the base contents for lineranges().
650 for (baserev, fixrev, path), basepath in basepaths.items():
682 for (baserev, fixrev, path), basepath in basepaths.items():
@@ -333,7 +333,11 b' def overridewalk(orig, self, match, subr'
333 # for better performance, directly access the inner dirstate map if the
333 # for better performance, directly access the inner dirstate map if the
334 # standard dirstate implementation is in use.
334 # standard dirstate implementation is in use.
335 dmap = dmap._map
335 dmap = dmap._map
336 nonnormalset = self._map.nonnormalset
336 nonnormalset = {
337 f
338 for f, e in self._map.items()
339 if e.v1_state() != "n" or e.v1_mtime() == -1
340 }
337
341
338 copymap = self._map.copymap
342 copymap = self._map.copymap
339 getkind = stat.S_IFMT
343 getkind = stat.S_IFMT
@@ -560,8 +564,8 b' def overridestatus('
560 for i, (s1, s2) in enumerate(zip(l1, l2)):
564 for i, (s1, s2) in enumerate(zip(l1, l2)):
561 if set(s1) != set(s2):
565 if set(s1) != set(s2):
562 f.write(b'sets at position %d are unequal\n' % i)
566 f.write(b'sets at position %d are unequal\n' % i)
563 f.write(b'watchman returned: %s\n' % s1)
567 f.write(b'watchman returned: %r\n' % s1)
564 f.write(b'stat returned: %s\n' % s2)
568 f.write(b'stat returned: %r\n' % s2)
565 finally:
569 finally:
566 f.close()
570 f.close()
567
571
This diff has been collapsed as it changes many lines, (651 lines changed) Show them Hide them
@@ -282,6 +282,11 b' configitem('
282 default=None,
282 default=None,
283 )
283 )
284 configitem(b'histedit', b'summary-template', default=b'{rev} {desc|firstline}')
284 configitem(b'histedit', b'summary-template', default=b'{rev} {desc|firstline}')
285 # TODO: Teach the text-based histedit interface to respect this config option
286 # before we make it non-experimental.
287 configitem(
288 b'histedit', b'later-commits-first', default=False, experimental=True
289 )
285
290
286 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
291 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
287 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
292 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
@@ -749,7 +754,7 b' def _isdirtywc(repo):'
749
754
750
755
751 def abortdirty():
756 def abortdirty():
752 raise error.Abort(
757 raise error.StateError(
753 _(b'working copy has pending changes'),
758 _(b'working copy has pending changes'),
754 hint=_(
759 hint=_(
755 b'amend, commit, or revert them and run histedit '
760 b'amend, commit, or revert them and run histedit '
@@ -1052,12 +1057,12 b' def findoutgoing(ui, repo, remote=None, '
1052
1057
1053 outgoing = discovery.findcommonoutgoing(repo, other, revs, force=force)
1058 outgoing = discovery.findcommonoutgoing(repo, other, revs, force=force)
1054 if not outgoing.missing:
1059 if not outgoing.missing:
1055 raise error.Abort(_(b'no outgoing ancestors'))
1060 raise error.StateError(_(b'no outgoing ancestors'))
1056 roots = list(repo.revs(b"roots(%ln)", outgoing.missing))
1061 roots = list(repo.revs(b"roots(%ln)", outgoing.missing))
1057 if len(roots) > 1:
1062 if len(roots) > 1:
1058 msg = _(b'there are ambiguous outgoing revisions')
1063 msg = _(b'there are ambiguous outgoing revisions')
1059 hint = _(b"see 'hg help histedit' for more detail")
1064 hint = _(b"see 'hg help histedit' for more detail")
1060 raise error.Abort(msg, hint=hint)
1065 raise error.StateError(msg, hint=hint)
1061 return repo[roots[0]].node()
1066 return repo[roots[0]].node()
1062
1067
1063
1068
@@ -1193,166 +1198,6 b' class histeditrule(object):'
1193 return self.conflicts
1198 return self.conflicts
1194
1199
1195
1200
1196 # ============ EVENTS ===============
1197 def movecursor(state, oldpos, newpos):
1198 """Change the rule/changeset that the cursor is pointing to, regardless of
1199 current mode (you can switch between patches from the view patch window)."""
1200 state[b'pos'] = newpos
1201
1202 mode, _ = state[b'mode']
1203 if mode == MODE_RULES:
1204 # Scroll through the list by updating the view for MODE_RULES, so that
1205 # even if we are not currently viewing the rules, switching back will
1206 # result in the cursor's rule being visible.
1207 modestate = state[b'modes'][MODE_RULES]
1208 if newpos < modestate[b'line_offset']:
1209 modestate[b'line_offset'] = newpos
1210 elif newpos > modestate[b'line_offset'] + state[b'page_height'] - 1:
1211 modestate[b'line_offset'] = newpos - state[b'page_height'] + 1
1212
1213 # Reset the patch view region to the top of the new patch.
1214 state[b'modes'][MODE_PATCH][b'line_offset'] = 0
1215
1216
1217 def changemode(state, mode):
1218 curmode, _ = state[b'mode']
1219 state[b'mode'] = (mode, curmode)
1220 if mode == MODE_PATCH:
1221 state[b'modes'][MODE_PATCH][b'patchcontents'] = patchcontents(state)
1222
1223
1224 def makeselection(state, pos):
1225 state[b'selected'] = pos
1226
1227
1228 def swap(state, oldpos, newpos):
1229 """Swap two positions and calculate necessary conflicts in
1230 O(|newpos-oldpos|) time"""
1231
1232 rules = state[b'rules']
1233 assert 0 <= oldpos < len(rules) and 0 <= newpos < len(rules)
1234
1235 rules[oldpos], rules[newpos] = rules[newpos], rules[oldpos]
1236
1237 # TODO: swap should not know about histeditrule's internals
1238 rules[newpos].pos = newpos
1239 rules[oldpos].pos = oldpos
1240
1241 start = min(oldpos, newpos)
1242 end = max(oldpos, newpos)
1243 for r in pycompat.xrange(start, end + 1):
1244 rules[newpos].checkconflicts(rules[r])
1245 rules[oldpos].checkconflicts(rules[r])
1246
1247 if state[b'selected']:
1248 makeselection(state, newpos)
1249
1250
1251 def changeaction(state, pos, action):
1252 """Change the action state on the given position to the new action"""
1253 rules = state[b'rules']
1254 assert 0 <= pos < len(rules)
1255 rules[pos].action = action
1256
1257
1258 def cycleaction(state, pos, next=False):
1259 """Changes the action state the next or the previous action from
1260 the action list"""
1261 rules = state[b'rules']
1262 assert 0 <= pos < len(rules)
1263 current = rules[pos].action
1264
1265 assert current in KEY_LIST
1266
1267 index = KEY_LIST.index(current)
1268 if next:
1269 index += 1
1270 else:
1271 index -= 1
1272 changeaction(state, pos, KEY_LIST[index % len(KEY_LIST)])
1273
1274
1275 def changeview(state, delta, unit):
1276 """Change the region of whatever is being viewed (a patch or the list of
1277 changesets). 'delta' is an amount (+/- 1) and 'unit' is 'page' or 'line'."""
1278 mode, _ = state[b'mode']
1279 if mode != MODE_PATCH:
1280 return
1281 mode_state = state[b'modes'][mode]
1282 num_lines = len(mode_state[b'patchcontents'])
1283 page_height = state[b'page_height']
1284 unit = page_height if unit == b'page' else 1
1285 num_pages = 1 + (num_lines - 1) // page_height
1286 max_offset = (num_pages - 1) * page_height
1287 newline = mode_state[b'line_offset'] + delta * unit
1288 mode_state[b'line_offset'] = max(0, min(max_offset, newline))
1289
1290
1291 def event(state, ch):
1292 """Change state based on the current character input
1293
1294 This takes the current state and based on the current character input from
1295 the user we change the state.
1296 """
1297 selected = state[b'selected']
1298 oldpos = state[b'pos']
1299 rules = state[b'rules']
1300
1301 if ch in (curses.KEY_RESIZE, b"KEY_RESIZE"):
1302 return E_RESIZE
1303
1304 lookup_ch = ch
1305 if ch is not None and b'0' <= ch <= b'9':
1306 lookup_ch = b'0'
1307
1308 curmode, prevmode = state[b'mode']
1309 action = KEYTABLE[curmode].get(
1310 lookup_ch, KEYTABLE[b'global'].get(lookup_ch)
1311 )
1312 if action is None:
1313 return
1314 if action in (b'down', b'move-down'):
1315 newpos = min(oldpos + 1, len(rules) - 1)
1316 movecursor(state, oldpos, newpos)
1317 if selected is not None or action == b'move-down':
1318 swap(state, oldpos, newpos)
1319 elif action in (b'up', b'move-up'):
1320 newpos = max(0, oldpos - 1)
1321 movecursor(state, oldpos, newpos)
1322 if selected is not None or action == b'move-up':
1323 swap(state, oldpos, newpos)
1324 elif action == b'next-action':
1325 cycleaction(state, oldpos, next=True)
1326 elif action == b'prev-action':
1327 cycleaction(state, oldpos, next=False)
1328 elif action == b'select':
1329 selected = oldpos if selected is None else None
1330 makeselection(state, selected)
1331 elif action == b'goto' and int(ch) < len(rules) and len(rules) <= 10:
1332 newrule = next((r for r in rules if r.origpos == int(ch)))
1333 movecursor(state, oldpos, newrule.pos)
1334 if selected is not None:
1335 swap(state, oldpos, newrule.pos)
1336 elif action.startswith(b'action-'):
1337 changeaction(state, oldpos, action[7:])
1338 elif action == b'showpatch':
1339 changemode(state, MODE_PATCH if curmode != MODE_PATCH else prevmode)
1340 elif action == b'help':
1341 changemode(state, MODE_HELP if curmode != MODE_HELP else prevmode)
1342 elif action == b'quit':
1343 return E_QUIT
1344 elif action == b'histedit':
1345 return E_HISTEDIT
1346 elif action == b'page-down':
1347 return E_PAGEDOWN
1348 elif action == b'page-up':
1349 return E_PAGEUP
1350 elif action == b'line-down':
1351 return E_LINEDOWN
1352 elif action == b'line-up':
1353 return E_LINEUP
1354
1355
1356 def makecommands(rules):
1201 def makecommands(rules):
1357 """Returns a list of commands consumable by histedit --commands based on
1202 """Returns a list of commands consumable by histedit --commands based on
1358 our list of rules"""
1203 our list of rules"""
@@ -1390,52 +1235,38 b' def _trunc_tail(line, n):'
1390 return line[: n - 2] + b' >'
1235 return line[: n - 2] + b' >'
1391
1236
1392
1237
1393 def patchcontents(state):
1238 class _chistedit_state(object):
1394 repo = state[b'repo']
1239 def __init__(
1395 rule = state[b'rules'][state[b'pos']]
1240 self,
1396 displayer = logcmdutil.changesetdisplayer(
1241 repo,
1397 repo.ui, repo, {b"patch": True, b"template": b"status"}, buffered=True
1242 rules,
1398 )
1243 stdscr,
1399 overrides = {(b'ui', b'verbose'): True}
1244 ):
1400 with repo.ui.configoverride(overrides, source=b'histedit'):
1245 self.repo = repo
1401 displayer.show(rule.ctx)
1246 self.rules = rules
1402 displayer.close()
1247 self.stdscr = stdscr
1403 return displayer.hunk[rule.ctx.rev()].splitlines()
1248 self.later_on_top = repo.ui.configbool(
1404
1249 b'histedit', b'later-commits-first'
1405
1250 )
1406 def _chisteditmain(repo, rules, stdscr):
1251 # The current item in display order, initialized to point to the top
1407 try:
1252 # of the screen.
1408 curses.use_default_colors()
1253 self.pos = 0
1409 except curses.error:
1254 self.selected = None
1410 pass
1255 self.mode = (MODE_INIT, MODE_INIT)
1411
1256 self.page_height = None
1412 # initialize color pattern
1257 self.modes = {
1413 curses.init_pair(COLOR_HELP, curses.COLOR_WHITE, curses.COLOR_BLUE)
1258 MODE_RULES: {
1414 curses.init_pair(COLOR_SELECTED, curses.COLOR_BLACK, curses.COLOR_WHITE)
1259 b'line_offset': 0,
1415 curses.init_pair(COLOR_WARN, curses.COLOR_BLACK, curses.COLOR_YELLOW)
1260 },
1416 curses.init_pair(COLOR_OK, curses.COLOR_BLACK, curses.COLOR_GREEN)
1261 MODE_PATCH: {
1417 curses.init_pair(COLOR_CURRENT, curses.COLOR_WHITE, curses.COLOR_MAGENTA)
1262 b'line_offset': 0,
1418 curses.init_pair(COLOR_DIFF_ADD_LINE, curses.COLOR_GREEN, -1)
1263 },
1419 curses.init_pair(COLOR_DIFF_DEL_LINE, curses.COLOR_RED, -1)
1264 }
1420 curses.init_pair(COLOR_DIFF_OFFSET, curses.COLOR_MAGENTA, -1)
1265
1421 curses.init_pair(COLOR_ROLL, curses.COLOR_RED, -1)
1266 def render_commit(self, win):
1422 curses.init_pair(
1423 COLOR_ROLL_CURRENT, curses.COLOR_BLACK, curses.COLOR_MAGENTA
1424 )
1425 curses.init_pair(COLOR_ROLL_SELECTED, curses.COLOR_RED, curses.COLOR_WHITE)
1426
1427 # don't display the cursor
1428 try:
1429 curses.curs_set(0)
1430 except curses.error:
1431 pass
1432
1433 def rendercommit(win, state):
1434 """Renders the commit window that shows the log of the current selected
1267 """Renders the commit window that shows the log of the current selected
1435 commit"""
1268 commit"""
1436 pos = state[b'pos']
1269 rule = self.rules[self.display_pos_to_rule_pos(self.pos)]
1437 rules = state[b'rules']
1438 rule = rules[pos]
1439
1270
1440 ctx = rule.ctx
1271 ctx = rule.ctx
1441 win.box()
1272 win.box()
@@ -1449,7 +1280,7 b' def _chisteditmain(repo, rules, stdscr):'
1449 line = b"user: %s" % ctx.user()
1280 line = b"user: %s" % ctx.user()
1450 win.addstr(2, 1, line[:length])
1281 win.addstr(2, 1, line[:length])
1451
1282
1452 bms = repo.nodebookmarks(ctx.node())
1283 bms = self.repo.nodebookmarks(ctx.node())
1453 line = b"bookmark: %s" % b' '.join(bms)
1284 line = b"bookmark: %s" % b' '.join(bms)
1454 win.addstr(3, 1, line[:length])
1285 win.addstr(3, 1, line[:length])
1455
1286
@@ -1481,8 +1312,8 b' def _chisteditmain(repo, rules, stdscr):'
1481 win.addstr(y, 1, conflictstr[:length])
1312 win.addstr(y, 1, conflictstr[:length])
1482 win.noutrefresh()
1313 win.noutrefresh()
1483
1314
1484 def helplines(mode):
1315 def helplines(self):
1485 if mode == MODE_PATCH:
1316 if self.mode[0] == MODE_PATCH:
1486 help = b"""\
1317 help = b"""\
1487 ?: help, k/up: line up, j/down: line down, v: stop viewing patch
1318 ?: help, k/up: line up, j/down: line down, v: stop viewing patch
1488 pgup: prev page, space/pgdn: next page, c: commit, q: abort
1319 pgup: prev page, space/pgdn: next page, c: commit, q: abort
@@ -1495,40 +1326,70 b' pgup/K: move patch up, pgdn/J: move patc'
1495 """
1326 """
1496 return help.splitlines()
1327 return help.splitlines()
1497
1328
1498 def renderhelp(win, state):
1329 def render_help(self, win):
1499 maxy, maxx = win.getmaxyx()
1330 maxy, maxx = win.getmaxyx()
1500 mode, _ = state[b'mode']
1331 for y, line in enumerate(self.helplines()):
1501 for y, line in enumerate(helplines(mode)):
1502 if y >= maxy:
1332 if y >= maxy:
1503 break
1333 break
1504 addln(win, y, 0, line, curses.color_pair(COLOR_HELP))
1334 addln(win, y, 0, line, curses.color_pair(COLOR_HELP))
1505 win.noutrefresh()
1335 win.noutrefresh()
1506
1336
1507 def renderrules(rulesscr, state):
1337 def layout(self):
1508 rules = state[b'rules']
1338 maxy, maxx = self.stdscr.getmaxyx()
1509 pos = state[b'pos']
1339 helplen = len(self.helplines())
1510 selected = state[b'selected']
1340 mainlen = maxy - helplen - 12
1511 start = state[b'modes'][MODE_RULES][b'line_offset']
1341 if mainlen < 1:
1512
1342 raise error.Abort(
1513 conflicts = [r.ctx for r in rules if r.conflicts]
1343 _(b"terminal dimensions %d by %d too small for curses histedit")
1344 % (maxy, maxx),
1345 hint=_(
1346 b"enlarge your terminal or use --config ui.interface=text"
1347 ),
1348 )
1349 return {
1350 b'commit': (12, maxx),
1351 b'help': (helplen, maxx),
1352 b'main': (mainlen, maxx),
1353 }
1354
1355 def display_pos_to_rule_pos(self, display_pos):
1356 """Converts a position in display order to rule order.
1357
1358 The `display_pos` is the order from the top in display order, not
1359 considering which items are currently visible on the screen. Thus,
1360 `display_pos=0` is the item at the top (possibly after scrolling to
1361 the top)
1362 """
1363 if self.later_on_top:
1364 return len(self.rules) - 1 - display_pos
1365 else:
1366 return display_pos
1367
1368 def render_rules(self, rulesscr):
1369 start = self.modes[MODE_RULES][b'line_offset']
1370
1371 conflicts = [r.ctx for r in self.rules if r.conflicts]
1514 if len(conflicts) > 0:
1372 if len(conflicts) > 0:
1515 line = b"potential conflict in %s" % b','.join(
1373 line = b"potential conflict in %s" % b','.join(
1516 map(pycompat.bytestr, conflicts)
1374 map(pycompat.bytestr, conflicts)
1517 )
1375 )
1518 addln(rulesscr, -1, 0, line, curses.color_pair(COLOR_WARN))
1376 addln(rulesscr, -1, 0, line, curses.color_pair(COLOR_WARN))
1519
1377
1520 for y, rule in enumerate(rules[start:]):
1378 for display_pos in range(start, len(self.rules)):
1521 if y >= state[b'page_height']:
1379 y = display_pos - start
1522 break
1380 if y < 0 or y >= self.page_height:
1381 continue
1382 rule_pos = self.display_pos_to_rule_pos(display_pos)
1383 rule = self.rules[rule_pos]
1523 if len(rule.conflicts) > 0:
1384 if len(rule.conflicts) > 0:
1524 rulesscr.addstr(y, 0, b" ", curses.color_pair(COLOR_WARN))
1385 rulesscr.addstr(y, 0, b" ", curses.color_pair(COLOR_WARN))
1525 else:
1386 else:
1526 rulesscr.addstr(y, 0, b" ", curses.COLOR_BLACK)
1387 rulesscr.addstr(y, 0, b" ", curses.COLOR_BLACK)
1527
1388
1528 if y + start == selected:
1389 if display_pos == self.selected:
1529 rollcolor = COLOR_ROLL_SELECTED
1390 rollcolor = COLOR_ROLL_SELECTED
1530 addln(rulesscr, y, 2, rule, curses.color_pair(COLOR_SELECTED))
1391 addln(rulesscr, y, 2, rule, curses.color_pair(COLOR_SELECTED))
1531 elif y + start == pos:
1392 elif display_pos == self.pos:
1532 rollcolor = COLOR_ROLL_CURRENT
1393 rollcolor = COLOR_ROLL_CURRENT
1533 addln(
1394 addln(
1534 rulesscr,
1395 rulesscr,
@@ -1551,7 +1412,7 b' pgup/K: move patch up, pgdn/J: move patc'
1551
1412
1552 rulesscr.noutrefresh()
1413 rulesscr.noutrefresh()
1553
1414
1554 def renderstring(win, state, output, diffcolors=False):
1415 def render_string(self, win, output, diffcolors=False):
1555 maxy, maxx = win.getmaxyx()
1416 maxy, maxx = win.getmaxyx()
1556 length = min(maxy - 1, len(output))
1417 length = min(maxy - 1, len(output))
1557 for y in range(0, length):
1418 for y in range(0, length):
@@ -1573,77 +1434,239 b' pgup/K: move patch up, pgdn/J: move patc'
1573 win.addstr(y, 0, line)
1434 win.addstr(y, 0, line)
1574 win.noutrefresh()
1435 win.noutrefresh()
1575
1436
1576 def renderpatch(win, state):
1437 def render_patch(self, win):
1577 start = state[b'modes'][MODE_PATCH][b'line_offset']
1438 start = self.modes[MODE_PATCH][b'line_offset']
1578 content = state[b'modes'][MODE_PATCH][b'patchcontents']
1439 content = self.modes[MODE_PATCH][b'patchcontents']
1579 renderstring(win, state, content[start:], diffcolors=True)
1440 self.render_string(win, content[start:], diffcolors=True)
1580
1441
1581 def layout(mode):
1442 def event(self, ch):
1582 maxy, maxx = stdscr.getmaxyx()
1443 """Change state based on the current character input
1583 helplen = len(helplines(mode))
1444
1584 mainlen = maxy - helplen - 12
1445 This takes the current state and based on the current character input from
1585 if mainlen < 1:
1446 the user we change the state.
1586 raise error.Abort(
1447 """
1587 _(b"terminal dimensions %d by %d too small for curses histedit")
1448 oldpos = self.pos
1588 % (maxy, maxx),
1449
1589 hint=_(
1450 if ch in (curses.KEY_RESIZE, b"KEY_RESIZE"):
1590 b"enlarge your terminal or use --config ui.interface=text"
1451 return E_RESIZE
1591 ),
1452
1592 )
1453 lookup_ch = ch
1593 return {
1454 if ch is not None and b'0' <= ch <= b'9':
1594 b'commit': (12, maxx),
1455 lookup_ch = b'0'
1595 b'help': (helplen, maxx),
1456
1596 b'main': (mainlen, maxx),
1457 curmode, prevmode = self.mode
1597 }
1458 action = KEYTABLE[curmode].get(
1459 lookup_ch, KEYTABLE[b'global'].get(lookup_ch)
1460 )
1461 if action is None:
1462 return
1463 if action in (b'down', b'move-down'):
1464 newpos = min(oldpos + 1, len(self.rules) - 1)
1465 self.move_cursor(oldpos, newpos)
1466 if self.selected is not None or action == b'move-down':
1467 self.swap(oldpos, newpos)
1468 elif action in (b'up', b'move-up'):
1469 newpos = max(0, oldpos - 1)
1470 self.move_cursor(oldpos, newpos)
1471 if self.selected is not None or action == b'move-up':
1472 self.swap(oldpos, newpos)
1473 elif action == b'next-action':
1474 self.cycle_action(oldpos, next=True)
1475 elif action == b'prev-action':
1476 self.cycle_action(oldpos, next=False)
1477 elif action == b'select':
1478 self.selected = oldpos if self.selected is None else None
1479 self.make_selection(self.selected)
1480 elif action == b'goto' and int(ch) < len(self.rules) <= 10:
1481 newrule = next((r for r in self.rules if r.origpos == int(ch)))
1482 self.move_cursor(oldpos, newrule.pos)
1483 if self.selected is not None:
1484 self.swap(oldpos, newrule.pos)
1485 elif action.startswith(b'action-'):
1486 self.change_action(oldpos, action[7:])
1487 elif action == b'showpatch':
1488 self.change_mode(MODE_PATCH if curmode != MODE_PATCH else prevmode)
1489 elif action == b'help':
1490 self.change_mode(MODE_HELP if curmode != MODE_HELP else prevmode)
1491 elif action == b'quit':
1492 return E_QUIT
1493 elif action == b'histedit':
1494 return E_HISTEDIT
1495 elif action == b'page-down':
1496 return E_PAGEDOWN
1497 elif action == b'page-up':
1498 return E_PAGEUP
1499 elif action == b'line-down':
1500 return E_LINEDOWN
1501 elif action == b'line-up':
1502 return E_LINEUP
1503
1504 def patch_contents(self):
1505 repo = self.repo
1506 rule = self.rules[self.display_pos_to_rule_pos(self.pos)]
1507 displayer = logcmdutil.changesetdisplayer(
1508 repo.ui,
1509 repo,
1510 {b"patch": True, b"template": b"status"},
1511 buffered=True,
1512 )
1513 overrides = {(b'ui', b'verbose'): True}
1514 with repo.ui.configoverride(overrides, source=b'histedit'):
1515 displayer.show(rule.ctx)
1516 displayer.close()
1517 return displayer.hunk[rule.ctx.rev()].splitlines()
1518
1519 def move_cursor(self, oldpos, newpos):
1520 """Change the rule/changeset that the cursor is pointing to, regardless of
1521 current mode (you can switch between patches from the view patch window)."""
1522 self.pos = newpos
1523
1524 mode, _ = self.mode
1525 if mode == MODE_RULES:
1526 # Scroll through the list by updating the view for MODE_RULES, so that
1527 # even if we are not currently viewing the rules, switching back will
1528 # result in the cursor's rule being visible.
1529 modestate = self.modes[MODE_RULES]
1530 if newpos < modestate[b'line_offset']:
1531 modestate[b'line_offset'] = newpos
1532 elif newpos > modestate[b'line_offset'] + self.page_height - 1:
1533 modestate[b'line_offset'] = newpos - self.page_height + 1
1534
1535 # Reset the patch view region to the top of the new patch.
1536 self.modes[MODE_PATCH][b'line_offset'] = 0
1537
1538 def change_mode(self, mode):
1539 curmode, _ = self.mode
1540 self.mode = (mode, curmode)
1541 if mode == MODE_PATCH:
1542 self.modes[MODE_PATCH][b'patchcontents'] = self.patch_contents()
1543
1544 def make_selection(self, pos):
1545 self.selected = pos
1546
1547 def swap(self, oldpos, newpos):
1548 """Swap two positions and calculate necessary conflicts in
1549 O(|newpos-oldpos|) time"""
1550 old_rule_pos = self.display_pos_to_rule_pos(oldpos)
1551 new_rule_pos = self.display_pos_to_rule_pos(newpos)
1552
1553 rules = self.rules
1554 assert 0 <= old_rule_pos < len(rules) and 0 <= new_rule_pos < len(rules)
1555
1556 rules[old_rule_pos], rules[new_rule_pos] = (
1557 rules[new_rule_pos],
1558 rules[old_rule_pos],
1559 )
1560
1561 # TODO: swap should not know about histeditrule's internals
1562 rules[new_rule_pos].pos = new_rule_pos
1563 rules[old_rule_pos].pos = old_rule_pos
1564
1565 start = min(old_rule_pos, new_rule_pos)
1566 end = max(old_rule_pos, new_rule_pos)
1567 for r in pycompat.xrange(start, end + 1):
1568 rules[new_rule_pos].checkconflicts(rules[r])
1569 rules[old_rule_pos].checkconflicts(rules[r])
1570
1571 if self.selected:
1572 self.make_selection(newpos)
1573
1574 def change_action(self, pos, action):
1575 """Change the action state on the given position to the new action"""
1576 assert 0 <= pos < len(self.rules)
1577 self.rules[pos].action = action
1578
1579 def cycle_action(self, pos, next=False):
1580 """Changes the action state the next or the previous action from
1581 the action list"""
1582 assert 0 <= pos < len(self.rules)
1583 current = self.rules[pos].action
1584
1585 assert current in KEY_LIST
1586
1587 index = KEY_LIST.index(current)
1588 if next:
1589 index += 1
1590 else:
1591 index -= 1
1592 self.change_action(pos, KEY_LIST[index % len(KEY_LIST)])
1593
1594 def change_view(self, delta, unit):
1595 """Change the region of whatever is being viewed (a patch or the list of
1596 changesets). 'delta' is an amount (+/- 1) and 'unit' is 'page' or 'line'."""
1597 mode, _ = self.mode
1598 if mode != MODE_PATCH:
1599 return
1600 mode_state = self.modes[mode]
1601 num_lines = len(mode_state[b'patchcontents'])
1602 page_height = self.page_height
1603 unit = page_height if unit == b'page' else 1
1604 num_pages = 1 + (num_lines - 1) // page_height
1605 max_offset = (num_pages - 1) * page_height
1606 newline = mode_state[b'line_offset'] + delta * unit
1607 mode_state[b'line_offset'] = max(0, min(max_offset, newline))
1608
1609
1610 def _chisteditmain(repo, rules, stdscr):
1611 try:
1612 curses.use_default_colors()
1613 except curses.error:
1614 pass
1615
1616 # initialize color pattern
1617 curses.init_pair(COLOR_HELP, curses.COLOR_WHITE, curses.COLOR_BLUE)
1618 curses.init_pair(COLOR_SELECTED, curses.COLOR_BLACK, curses.COLOR_WHITE)
1619 curses.init_pair(COLOR_WARN, curses.COLOR_BLACK, curses.COLOR_YELLOW)
1620 curses.init_pair(COLOR_OK, curses.COLOR_BLACK, curses.COLOR_GREEN)
1621 curses.init_pair(COLOR_CURRENT, curses.COLOR_WHITE, curses.COLOR_MAGENTA)
1622 curses.init_pair(COLOR_DIFF_ADD_LINE, curses.COLOR_GREEN, -1)
1623 curses.init_pair(COLOR_DIFF_DEL_LINE, curses.COLOR_RED, -1)
1624 curses.init_pair(COLOR_DIFF_OFFSET, curses.COLOR_MAGENTA, -1)
1625 curses.init_pair(COLOR_ROLL, curses.COLOR_RED, -1)
1626 curses.init_pair(
1627 COLOR_ROLL_CURRENT, curses.COLOR_BLACK, curses.COLOR_MAGENTA
1628 )
1629 curses.init_pair(COLOR_ROLL_SELECTED, curses.COLOR_RED, curses.COLOR_WHITE)
1630
1631 # don't display the cursor
1632 try:
1633 curses.curs_set(0)
1634 except curses.error:
1635 pass
1598
1636
1599 def drawvertwin(size, y, x):
1637 def drawvertwin(size, y, x):
1600 win = curses.newwin(size[0], size[1], y, x)
1638 win = curses.newwin(size[0], size[1], y, x)
1601 y += size[0]
1639 y += size[0]
1602 return win, y, x
1640 return win, y, x
1603
1641
1604 state = {
1642 state = _chistedit_state(repo, rules, stdscr)
1605 b'pos': 0,
1606 b'rules': rules,
1607 b'selected': None,
1608 b'mode': (MODE_INIT, MODE_INIT),
1609 b'page_height': None,
1610 b'modes': {
1611 MODE_RULES: {
1612 b'line_offset': 0,
1613 },
1614 MODE_PATCH: {
1615 b'line_offset': 0,
1616 },
1617 },
1618 b'repo': repo,
1619 }
1620
1643
1621 # eventloop
1644 # eventloop
1622 ch = None
1645 ch = None
1623 stdscr.clear()
1646 stdscr.clear()
1624 stdscr.refresh()
1647 stdscr.refresh()
1625 while True:
1648 while True:
1626 oldmode, unused = state[b'mode']
1649 oldmode, unused = state.mode
1627 if oldmode == MODE_INIT:
1650 if oldmode == MODE_INIT:
1628 changemode(state, MODE_RULES)
1651 state.change_mode(MODE_RULES)
1629 e = event(state, ch)
1652 e = state.event(ch)
1630
1653
1631 if e == E_QUIT:
1654 if e == E_QUIT:
1632 return False
1655 return False
1633 if e == E_HISTEDIT:
1656 if e == E_HISTEDIT:
1634 return state[b'rules']
1657 return state.rules
1635 else:
1658 else:
1636 if e == E_RESIZE:
1659 if e == E_RESIZE:
1637 size = screen_size()
1660 size = screen_size()
1638 if size != stdscr.getmaxyx():
1661 if size != stdscr.getmaxyx():
1639 curses.resizeterm(*size)
1662 curses.resizeterm(*size)
1640
1663
1641 curmode, unused = state[b'mode']
1664 sizes = state.layout()
1642 sizes = layout(curmode)
1665 curmode, unused = state.mode
1643 if curmode != oldmode:
1666 if curmode != oldmode:
1644 state[b'page_height'] = sizes[b'main'][0]
1667 state.page_height = sizes[b'main'][0]
1645 # Adjust the view to fit the current screen size.
1668 # Adjust the view to fit the current screen size.
1646 movecursor(state, state[b'pos'], state[b'pos'])
1669 state.move_cursor(state.pos, state.pos)
1647
1670
1648 # Pack the windows against the top, each pane spread across the
1671 # Pack the windows against the top, each pane spread across the
1649 # full width of the screen.
1672 # full width of the screen.
@@ -1654,26 +1677,26 b' pgup/K: move patch up, pgdn/J: move patc'
1654
1677
1655 if e in (E_PAGEDOWN, E_PAGEUP, E_LINEDOWN, E_LINEUP):
1678 if e in (E_PAGEDOWN, E_PAGEUP, E_LINEDOWN, E_LINEUP):
1656 if e == E_PAGEDOWN:
1679 if e == E_PAGEDOWN:
1657 changeview(state, +1, b'page')
1680 state.change_view(+1, b'page')
1658 elif e == E_PAGEUP:
1681 elif e == E_PAGEUP:
1659 changeview(state, -1, b'page')
1682 state.change_view(-1, b'page')
1660 elif e == E_LINEDOWN:
1683 elif e == E_LINEDOWN:
1661 changeview(state, +1, b'line')
1684 state.change_view(+1, b'line')
1662 elif e == E_LINEUP:
1685 elif e == E_LINEUP:
1663 changeview(state, -1, b'line')
1686 state.change_view(-1, b'line')
1664
1687
1665 # start rendering
1688 # start rendering
1666 commitwin.erase()
1689 commitwin.erase()
1667 helpwin.erase()
1690 helpwin.erase()
1668 mainwin.erase()
1691 mainwin.erase()
1669 if curmode == MODE_PATCH:
1692 if curmode == MODE_PATCH:
1670 renderpatch(mainwin, state)
1693 state.render_patch(mainwin)
1671 elif curmode == MODE_HELP:
1694 elif curmode == MODE_HELP:
1672 renderstring(mainwin, state, __doc__.strip().splitlines())
1695 state.render_string(mainwin, __doc__.strip().splitlines())
1673 else:
1696 else:
1674 renderrules(mainwin, state)
1697 state.render_rules(mainwin)
1675 rendercommit(commitwin, state)
1698 state.render_commit(commitwin)
1676 renderhelp(helpwin, state)
1699 state.render_help(helpwin)
1677 curses.doupdate()
1700 curses.doupdate()
1678 # done rendering
1701 # done rendering
1679 ch = encoding.strtolocal(stdscr.getkey())
1702 ch = encoding.strtolocal(stdscr.getkey())
@@ -1697,26 +1720,19 b' def _chistedit(ui, repo, freeargs, opts)'
1697 cmdutil.checkunfinished(repo)
1720 cmdutil.checkunfinished(repo)
1698 cmdutil.bailifchanged(repo)
1721 cmdutil.bailifchanged(repo)
1699
1722
1700 if os.path.exists(os.path.join(repo.path, b'histedit-state')):
1701 raise error.Abort(
1702 _(
1703 b'history edit already in progress, try '
1704 b'--continue or --abort'
1705 )
1706 )
1707 revs.extend(freeargs)
1723 revs.extend(freeargs)
1708 if not revs:
1724 if not revs:
1709 defaultrev = destutil.desthistedit(ui, repo)
1725 defaultrev = destutil.desthistedit(ui, repo)
1710 if defaultrev is not None:
1726 if defaultrev is not None:
1711 revs.append(defaultrev)
1727 revs.append(defaultrev)
1712 if len(revs) != 1:
1728 if len(revs) != 1:
1713 raise error.Abort(
1729 raise error.InputError(
1714 _(b'histedit requires exactly one ancestor revision')
1730 _(b'histedit requires exactly one ancestor revision')
1715 )
1731 )
1716
1732
1717 rr = list(repo.set(b'roots(%ld)', scmutil.revrange(repo, revs)))
1733 rr = list(repo.set(b'roots(%ld)', logcmdutil.revrange(repo, revs)))
1718 if len(rr) != 1:
1734 if len(rr) != 1:
1719 raise error.Abort(
1735 raise error.InputError(
1720 _(
1736 _(
1721 b'The specified revisions must have '
1737 b'The specified revisions must have '
1722 b'exactly one common root'
1738 b'exactly one common root'
@@ -1727,15 +1743,15 b' def _chistedit(ui, repo, freeargs, opts)'
1727 topmost = repo.dirstate.p1()
1743 topmost = repo.dirstate.p1()
1728 revs = between(repo, root, topmost, keep)
1744 revs = between(repo, root, topmost, keep)
1729 if not revs:
1745 if not revs:
1730 raise error.Abort(
1746 raise error.InputError(
1731 _(b'%s is not an ancestor of working directory') % short(root)
1747 _(b'%s is not an ancestor of working directory') % short(root)
1732 )
1748 )
1733
1749
1734 ctxs = []
1750 rules = []
1735 for i, r in enumerate(revs):
1751 for i, r in enumerate(revs):
1736 ctxs.append(histeditrule(ui, repo[r], i))
1752 rules.append(histeditrule(ui, repo[r], i))
1737 with util.with_lc_ctype():
1753 with util.with_lc_ctype():
1738 rc = curses.wrapper(functools.partial(_chisteditmain, repo, ctxs))
1754 rc = curses.wrapper(functools.partial(_chisteditmain, repo, rules))
1739 curses.echo()
1755 curses.echo()
1740 curses.endwin()
1756 curses.endwin()
1741 if rc is False:
1757 if rc is False:
@@ -1928,12 +1944,12 b' def _readfile(ui, path):'
1928 return f.read()
1944 return f.read()
1929
1945
1930
1946
1931 def _validateargs(ui, repo, state, freeargs, opts, goal, rules, revs):
1947 def _validateargs(ui, repo, freeargs, opts, goal, rules, revs):
1932 # TODO only abort if we try to histedit mq patches, not just
1948 # TODO only abort if we try to histedit mq patches, not just
1933 # blanket if mq patches are applied somewhere
1949 # blanket if mq patches are applied somewhere
1934 mq = getattr(repo, 'mq', None)
1950 mq = getattr(repo, 'mq', None)
1935 if mq and mq.applied:
1951 if mq and mq.applied:
1936 raise error.Abort(_(b'source has mq patches applied'))
1952 raise error.StateError(_(b'source has mq patches applied'))
1937
1953
1938 # basic argument incompatibility processing
1954 # basic argument incompatibility processing
1939 outg = opts.get(b'outgoing')
1955 outg = opts.get(b'outgoing')
@@ -1941,31 +1957,26 b' def _validateargs(ui, repo, state, freea'
1941 abort = opts.get(b'abort')
1957 abort = opts.get(b'abort')
1942 force = opts.get(b'force')
1958 force = opts.get(b'force')
1943 if force and not outg:
1959 if force and not outg:
1944 raise error.Abort(_(b'--force only allowed with --outgoing'))
1960 raise error.InputError(_(b'--force only allowed with --outgoing'))
1945 if goal == b'continue':
1961 if goal == b'continue':
1946 if any((outg, abort, revs, freeargs, rules, editplan)):
1962 if any((outg, abort, revs, freeargs, rules, editplan)):
1947 raise error.Abort(_(b'no arguments allowed with --continue'))
1963 raise error.InputError(_(b'no arguments allowed with --continue'))
1948 elif goal == b'abort':
1964 elif goal == b'abort':
1949 if any((outg, revs, freeargs, rules, editplan)):
1965 if any((outg, revs, freeargs, rules, editplan)):
1950 raise error.Abort(_(b'no arguments allowed with --abort'))
1966 raise error.InputError(_(b'no arguments allowed with --abort'))
1951 elif goal == b'edit-plan':
1967 elif goal == b'edit-plan':
1952 if any((outg, revs, freeargs)):
1968 if any((outg, revs, freeargs)):
1953 raise error.Abort(
1969 raise error.InputError(
1954 _(b'only --commands argument allowed with --edit-plan')
1970 _(b'only --commands argument allowed with --edit-plan')
1955 )
1971 )
1956 else:
1972 else:
1957 if state.inprogress():
1958 raise error.Abort(
1959 _(
1960 b'history edit already in progress, try '
1961 b'--continue or --abort'
1962 )
1963 )
1964 if outg:
1973 if outg:
1965 if revs:
1974 if revs:
1966 raise error.Abort(_(b'no revisions allowed with --outgoing'))
1975 raise error.InputError(
1976 _(b'no revisions allowed with --outgoing')
1977 )
1967 if len(freeargs) > 1:
1978 if len(freeargs) > 1:
1968 raise error.Abort(
1979 raise error.InputError(
1969 _(b'only one repo argument allowed with --outgoing')
1980 _(b'only one repo argument allowed with --outgoing')
1970 )
1981 )
1971 else:
1982 else:
@@ -1976,7 +1987,7 b' def _validateargs(ui, repo, state, freea'
1976 revs.append(defaultrev)
1987 revs.append(defaultrev)
1977
1988
1978 if len(revs) != 1:
1989 if len(revs) != 1:
1979 raise error.Abort(
1990 raise error.InputError(
1980 _(b'histedit requires exactly one ancestor revision')
1991 _(b'histedit requires exactly one ancestor revision')
1981 )
1992 )
1982
1993
@@ -1990,11 +2001,11 b' def _histedit(ui, repo, state, freeargs,'
1990 rules = opts.get(b'commands', b'')
2001 rules = opts.get(b'commands', b'')
1991 state.keep = opts.get(b'keep', False)
2002 state.keep = opts.get(b'keep', False)
1992
2003
1993 _validateargs(ui, repo, state, freeargs, opts, goal, rules, revs)
2004 _validateargs(ui, repo, freeargs, opts, goal, rules, revs)
1994
2005
1995 hastags = False
2006 hastags = False
1996 if revs:
2007 if revs:
1997 revs = scmutil.revrange(repo, revs)
2008 revs = logcmdutil.revrange(repo, revs)
1998 ctxs = [repo[rev] for rev in revs]
2009 ctxs = [repo[rev] for rev in revs]
1999 for ctx in ctxs:
2010 for ctx in ctxs:
2000 tags = [tag for tag in ctx.tags() if tag != b'tip']
2011 tags = [tag for tag in ctx.tags() if tag != b'tip']
@@ -2009,7 +2020,7 b' def _histedit(ui, repo, state, freeargs,'
2009 ),
2020 ),
2010 default=1,
2021 default=1,
2011 ):
2022 ):
2012 raise error.Abort(_(b'histedit cancelled\n'))
2023 raise error.CanceledError(_(b'histedit cancelled\n'))
2013 # rebuild state
2024 # rebuild state
2014 if goal == goalcontinue:
2025 if goal == goalcontinue:
2015 state.read()
2026 state.read()
@@ -2217,9 +2228,9 b' def _newhistedit(ui, repo, state, revs, '
2217 remote = None
2228 remote = None
2218 root = findoutgoing(ui, repo, remote, force, opts)
2229 root = findoutgoing(ui, repo, remote, force, opts)
2219 else:
2230 else:
2220 rr = list(repo.set(b'roots(%ld)', scmutil.revrange(repo, revs)))
2231 rr = list(repo.set(b'roots(%ld)', logcmdutil.revrange(repo, revs)))
2221 if len(rr) != 1:
2232 if len(rr) != 1:
2222 raise error.Abort(
2233 raise error.InputError(
2223 _(
2234 _(
2224 b'The specified revisions must have '
2235 b'The specified revisions must have '
2225 b'exactly one common root'
2236 b'exactly one common root'
@@ -2229,7 +2240,7 b' def _newhistedit(ui, repo, state, revs, '
2229
2240
2230 revs = between(repo, root, topmost, state.keep)
2241 revs = between(repo, root, topmost, state.keep)
2231 if not revs:
2242 if not revs:
2232 raise error.Abort(
2243 raise error.InputError(
2233 _(b'%s is not an ancestor of working directory') % short(root)
2244 _(b'%s is not an ancestor of working directory') % short(root)
2234 )
2245 )
2235
2246
@@ -2259,7 +2270,7 b' def _newhistedit(ui, repo, state, revs, '
2259 followcopies=False,
2270 followcopies=False,
2260 )
2271 )
2261 except error.Abort:
2272 except error.Abort:
2262 raise error.Abort(
2273 raise error.StateError(
2263 _(
2274 _(
2264 b"untracked files in working directory conflict with files in %s"
2275 b"untracked files in working directory conflict with files in %s"
2265 )
2276 )
@@ -2337,7 +2348,9 b' def between(repo, old, new, keep):'
2337 if revs and not keep:
2348 if revs and not keep:
2338 rewriteutil.precheck(repo, revs, b'edit')
2349 rewriteutil.precheck(repo, revs, b'edit')
2339 if repo.revs(b'(%ld) and merge()', revs):
2350 if repo.revs(b'(%ld) and merge()', revs):
2340 raise error.Abort(_(b'cannot edit history that contains merges'))
2351 raise error.StateError(
2352 _(b'cannot edit history that contains merges')
2353 )
2341 return pycompat.maplist(repo.changelog.node, revs)
2354 return pycompat.maplist(repo.changelog.node, revs)
2342
2355
2343
2356
@@ -431,18 +431,19 b' def localrepolistkeys(orig, self, namesp'
431 @wireprotov1peer.batchable
431 @wireprotov1peer.batchable
432 def listkeyspatterns(self, namespace, patterns):
432 def listkeyspatterns(self, namespace, patterns):
433 if not self.capable(b'pushkey'):
433 if not self.capable(b'pushkey'):
434 yield {}, None
434 return {}, None
435 f = wireprotov1peer.future()
436 self.ui.debug(b'preparing listkeys for "%s"\n' % namespace)
435 self.ui.debug(b'preparing listkeys for "%s"\n' % namespace)
437 yield {
436
437 def decode(d):
438 self.ui.debug(
439 b'received listkey for "%s": %i bytes\n' % (namespace, len(d))
440 )
441 return pushkey.decodekeys(d)
442
443 return {
438 b'namespace': encoding.fromlocal(namespace),
444 b'namespace': encoding.fromlocal(namespace),
439 b'patterns': wireprototypes.encodelist(patterns),
445 b'patterns': wireprototypes.encodelist(patterns),
440 }, f
446 }, decode
441 d = f.value
442 self.ui.debug(
443 b'received listkey for "%s": %i bytes\n' % (namespace, len(d))
444 )
445 yield pushkey.decodekeys(d)
446
447
447
448
448 def _readbundlerevs(bundlerepo):
449 def _readbundlerevs(bundlerepo):
@@ -26,6 +26,7 b' from mercurial import ('
26 exthelper,
26 exthelper,
27 hg,
27 hg,
28 lock,
28 lock,
29 logcmdutil,
29 match as matchmod,
30 match as matchmod,
30 pycompat,
31 pycompat,
31 scmutil,
32 scmutil,
@@ -540,7 +541,7 b' def updatelfiles('
540 expecthash = lfutil.readasstandin(wctx[standin])
541 expecthash = lfutil.readasstandin(wctx[standin])
541 if expecthash != b'':
542 if expecthash != b'':
542 if lfile not in wctx: # not switched to normal file
543 if lfile not in wctx: # not switched to normal file
543 if repo.dirstate[standin] != b'?':
544 if repo.dirstate.get_entry(standin).any_tracked:
544 wvfs.unlinkpath(lfile, ignoremissing=True)
545 wvfs.unlinkpath(lfile, ignoremissing=True)
545 else:
546 else:
546 dropped.add(lfile)
547 dropped.add(lfile)
@@ -568,7 +569,7 b' def updatelfiles('
568 removed += 1
569 removed += 1
569
570
570 # largefile processing might be slow and be interrupted - be prepared
571 # largefile processing might be slow and be interrupted - be prepared
571 lfdirstate.write()
572 lfdirstate.write(repo.currenttransaction())
572
573
573 if lfiles:
574 if lfiles:
574 lfiles = [f for f in lfiles if f not in dropped]
575 lfiles = [f for f in lfiles if f not in dropped]
@@ -577,7 +578,7 b' def updatelfiles('
577 repo.wvfs.unlinkpath(lfutil.standin(f))
578 repo.wvfs.unlinkpath(lfutil.standin(f))
578 # This needs to happen for dropped files, otherwise they stay in
579 # This needs to happen for dropped files, otherwise they stay in
579 # the M state.
580 # the M state.
580 lfdirstate._drop(f)
581 lfdirstate._map.reset_state(f)
581
582
582 statuswriter(_(b'getting changed largefiles\n'))
583 statuswriter(_(b'getting changed largefiles\n'))
583 cachelfiles(ui, repo, None, lfiles)
584 cachelfiles(ui, repo, None, lfiles)
@@ -618,7 +619,7 b' def updatelfiles('
618
619
619 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
620 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
620
621
621 lfdirstate.write()
622 lfdirstate.write(repo.currenttransaction())
622 if lfiles:
623 if lfiles:
623 statuswriter(
624 statuswriter(
624 _(b'%d largefiles updated, %d removed\n') % (updated, removed)
625 _(b'%d largefiles updated, %d removed\n') % (updated, removed)
@@ -657,7 +658,7 b' def lfpull(ui, repo, source=b"default", '
657 revs = opts.get('rev', [])
658 revs = opts.get('rev', [])
658 if not revs:
659 if not revs:
659 raise error.Abort(_(b'no revisions specified'))
660 raise error.Abort(_(b'no revisions specified'))
660 revs = scmutil.revrange(repo, revs)
661 revs = logcmdutil.revrange(repo, revs)
661
662
662 numcached = 0
663 numcached = 0
663 for rev in revs:
664 for rev in revs:
@@ -191,10 +191,12 b' class largefilesdirstate(dirstate.dirsta'
191 def _ignore(self, f):
191 def _ignore(self, f):
192 return False
192 return False
193
193
194 def write(self, tr=False):
194 def write(self, tr):
195 # (1) disable PENDING mode always
195 # (1) disable PENDING mode always
196 # (lfdirstate isn't yet managed as a part of the transaction)
196 # (lfdirstate isn't yet managed as a part of the transaction)
197 # (2) avoid develwarn 'use dirstate.write with ....'
197 # (2) avoid develwarn 'use dirstate.write with ....'
198 if tr:
199 tr.addbackup(b'largefiles/dirstate', location=b'plain')
198 super(largefilesdirstate, self).write(None)
200 super(largefilesdirstate, self).write(None)
199
201
200
202
@@ -269,7 +271,7 b' def listlfiles(repo, rev=None, matcher=N'
269 return [
271 return [
270 splitstandin(f)
272 splitstandin(f)
271 for f in repo[rev].walk(matcher)
273 for f in repo[rev].walk(matcher)
272 if rev is not None or repo.dirstate[f] != b'?'
274 if rev is not None or repo.dirstate.get_entry(f).any_tracked
273 ]
275 ]
274
276
275
277
@@ -558,24 +560,14 b' def synclfdirstate(repo, lfdirstate, lfi'
558 if lfstandin not in repo.dirstate:
560 if lfstandin not in repo.dirstate:
559 lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=False)
561 lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=False)
560 else:
562 else:
561 stat = repo.dirstate._map[lfstandin]
563 entry = repo.dirstate.get_entry(lfstandin)
562 state, mtime = stat.state, stat.mtime
564 lfdirstate.update_file(
563 if state == b'n':
565 lfile,
564 if normallookup or mtime < 0 or not repo.wvfs.exists(lfile):
566 wc_tracked=entry.tracked,
565 # state 'n' doesn't ensure 'clean' in this case
567 p1_tracked=entry.p1_tracked,
566 lfdirstate.update_file(
568 p2_info=entry.p2_info,
567 lfile, p1_tracked=True, wc_tracked=True, possibly_dirty=True
569 possibly_dirty=True,
568 )
570 )
569 else:
570 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True)
571 elif state == b'm':
572 lfdirstate.update_file(
573 lfile, p1_tracked=True, wc_tracked=True, merged=True
574 )
575 elif state == b'r':
576 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=False)
577 elif state == b'a':
578 lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=True)
579
571
580
572
581 def markcommitted(orig, ctx, node):
573 def markcommitted(orig, ctx, node):
@@ -598,7 +590,7 b' def markcommitted(orig, ctx, node):'
598 lfile = splitstandin(f)
590 lfile = splitstandin(f)
599 if lfile is not None:
591 if lfile is not None:
600 synclfdirstate(repo, lfdirstate, lfile, False)
592 synclfdirstate(repo, lfdirstate, lfile, False)
601 lfdirstate.write()
593 lfdirstate.write(repo.currenttransaction())
602
594
603 # As part of committing, copy all of the largefiles into the cache.
595 # As part of committing, copy all of the largefiles into the cache.
604 #
596 #
@@ -713,7 +705,7 b' def updatestandinsbymatch(repo, match):'
713 lfdirstate = openlfdirstate(ui, repo)
705 lfdirstate = openlfdirstate(ui, repo)
714 for fstandin in standins:
706 for fstandin in standins:
715 lfile = splitstandin(fstandin)
707 lfile = splitstandin(fstandin)
716 if lfdirstate[lfile] != b'r':
708 if lfdirstate.get_entry(lfile).tracked:
717 updatestandin(repo, lfile, fstandin)
709 updatestandin(repo, lfile, fstandin)
718
710
719 # Cook up a new matcher that only matches regular files or
711 # Cook up a new matcher that only matches regular files or
@@ -737,10 +729,10 b' def updatestandinsbymatch(repo, match):'
737 # standin removal, drop the normal file if it is unknown to dirstate.
729 # standin removal, drop the normal file if it is unknown to dirstate.
738 # Thus, skip plain largefile names but keep the standin.
730 # Thus, skip plain largefile names but keep the standin.
739 if f in lfiles or fstandin in standins:
731 if f in lfiles or fstandin in standins:
740 if repo.dirstate[fstandin] != b'r':
732 if not repo.dirstate.get_entry(fstandin).removed:
741 if repo.dirstate[f] != b'r':
733 if not repo.dirstate.get_entry(f).removed:
742 continue
734 continue
743 elif repo.dirstate[f] == b'?':
735 elif not repo.dirstate.get_entry(f).any_tracked:
744 continue
736 continue
745
737
746 actualfiles.append(f)
738 actualfiles.append(f)
@@ -151,7 +151,7 b' def addlargefiles(ui, repo, isaddremove,'
151 )
151 )
152 standins.append(standinname)
152 standins.append(standinname)
153 lfdirstate.set_tracked(f)
153 lfdirstate.set_tracked(f)
154 lfdirstate.write()
154 lfdirstate.write(repo.currenttransaction())
155 bad += [
155 bad += [
156 lfutil.splitstandin(f)
156 lfutil.splitstandin(f)
157 for f in repo[None].add(standins)
157 for f in repo[None].add(standins)
@@ -229,7 +229,7 b' def removelargefiles(ui, repo, isaddremo'
229 for f in remove:
229 for f in remove:
230 lfdirstate.set_untracked(lfutil.splitstandin(f))
230 lfdirstate.set_untracked(lfutil.splitstandin(f))
231
231
232 lfdirstate.write()
232 lfdirstate.write(repo.currenttransaction())
233
233
234 return result
234 return result
235
235
@@ -659,7 +659,7 b' def mergerecordupdates(orig, repo, actio'
659 )
659 )
660 # make sure lfile doesn't get synclfdirstate'd as normal
660 # make sure lfile doesn't get synclfdirstate'd as normal
661 lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=True)
661 lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=True)
662 lfdirstate.write()
662 lfdirstate.write(repo.currenttransaction())
663
663
664 return orig(repo, actions, branchmerge, getfiledata)
664 return orig(repo, actions, branchmerge, getfiledata)
665
665
@@ -864,7 +864,7 b' def overridecopy(orig, ui, repo, pats, o'
864 util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile))
864 util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile))
865
865
866 lfdirstate.set_tracked(destlfile)
866 lfdirstate.set_tracked(destlfile)
867 lfdirstate.write()
867 lfdirstate.write(repo.currenttransaction())
868 except error.Abort as e:
868 except error.Abort as e:
869 if e.message != _(b'no files to copy'):
869 if e.message != _(b'no files to copy'):
870 raise e
870 raise e
@@ -896,7 +896,7 b' def overriderevert(orig, ui, repo, ctx, '
896 with repo.wlock():
896 with repo.wlock():
897 lfdirstate = lfutil.openlfdirstate(ui, repo)
897 lfdirstate = lfutil.openlfdirstate(ui, repo)
898 s = lfutil.lfdirstatestatus(lfdirstate, repo)
898 s = lfutil.lfdirstatestatus(lfdirstate, repo)
899 lfdirstate.write()
899 lfdirstate.write(repo.currenttransaction())
900 for lfile in s.modified:
900 for lfile in s.modified:
901 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
901 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
902 for lfile in s.deleted:
902 for lfile in s.deleted:
@@ -934,7 +934,7 b' def overriderevert(orig, ui, repo, ctx, '
934 standin = lfutil.standin(f)
934 standin = lfutil.standin(f)
935 if standin in ctx or standin in mctx:
935 if standin in ctx or standin in mctx:
936 matchfiles.append(standin)
936 matchfiles.append(standin)
937 elif standin in wctx or lfdirstate[f] == b'r':
937 elif standin in wctx or lfdirstate.get_entry(f).removed:
938 continue
938 continue
939 else:
939 else:
940 matchfiles.append(f)
940 matchfiles.append(f)
@@ -1000,7 +1000,7 b' def overridepull(orig, ui, repo, source='
1000 numcached = 0
1000 numcached = 0
1001 repo.firstpulled = revsprepull # for pulled() revset expression
1001 repo.firstpulled = revsprepull # for pulled() revset expression
1002 try:
1002 try:
1003 for rev in scmutil.revrange(repo, lfrevs):
1003 for rev in logcmdutil.revrange(repo, lfrevs):
1004 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
1004 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
1005 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
1005 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
1006 numcached += len(cached)
1006 numcached += len(cached)
@@ -1027,7 +1027,7 b' def overridepush(orig, ui, repo, *args, '
1027 lfrevs = kwargs.pop('lfrev', None)
1027 lfrevs = kwargs.pop('lfrev', None)
1028 if lfrevs:
1028 if lfrevs:
1029 opargs = kwargs.setdefault('opargs', {})
1029 opargs = kwargs.setdefault('opargs', {})
1030 opargs[b'lfrevs'] = scmutil.revrange(repo, lfrevs)
1030 opargs[b'lfrevs'] = logcmdutil.revrange(repo, lfrevs)
1031 return orig(ui, repo, *args, **kwargs)
1031 return orig(ui, repo, *args, **kwargs)
1032
1032
1033
1033
@@ -1383,7 +1383,7 b' def cmdutilforget('
1383 lfdirstate = lfutil.openlfdirstate(ui, repo)
1383 lfdirstate = lfutil.openlfdirstate(ui, repo)
1384 for f in forget:
1384 for f in forget:
1385 lfdirstate.set_untracked(f)
1385 lfdirstate.set_untracked(f)
1386 lfdirstate.write()
1386 lfdirstate.write(repo.currenttransaction())
1387 standins = [lfutil.standin(f) for f in forget]
1387 standins = [lfutil.standin(f) for f in forget]
1388 for f in standins:
1388 for f in standins:
1389 repo.wvfs.unlinkpath(f, ignoremissing=True)
1389 repo.wvfs.unlinkpath(f, ignoremissing=True)
@@ -1591,8 +1591,12 b' def overridepurge(orig, ui, repo, *dirs,'
1591 node1, node2, match, ignored, clean, unknown, listsubrepos
1591 node1, node2, match, ignored, clean, unknown, listsubrepos
1592 )
1592 )
1593 lfdirstate = lfutil.openlfdirstate(ui, repo)
1593 lfdirstate = lfutil.openlfdirstate(ui, repo)
1594 unknown = [f for f in r.unknown if lfdirstate[f] == b'?']
1594 unknown = [
1595 ignored = [f for f in r.ignored if lfdirstate[f] == b'?']
1595 f for f in r.unknown if not lfdirstate.get_entry(f).any_tracked
1596 ]
1597 ignored = [
1598 f for f in r.ignored if not lfdirstate.get_entry(f).any_tracked
1599 ]
1596 return scmutil.status(
1600 return scmutil.status(
1597 r.modified, r.added, r.removed, r.deleted, unknown, ignored, r.clean
1601 r.modified, r.added, r.removed, r.deleted, unknown, ignored, r.clean
1598 )
1602 )
@@ -1609,7 +1613,7 b' def overriderollback(orig, ui, repo, **o'
1609 orphans = {
1613 orphans = {
1610 f
1614 f
1611 for f in repo.dirstate
1615 for f in repo.dirstate
1612 if lfutil.isstandin(f) and repo.dirstate[f] != b'r'
1616 if lfutil.isstandin(f) and not repo.dirstate.get_entry(f).removed
1613 }
1617 }
1614 result = orig(ui, repo, **opts)
1618 result = orig(ui, repo, **opts)
1615 after = repo.dirstate.parents()
1619 after = repo.dirstate.parents()
@@ -1620,7 +1624,7 b' def overriderollback(orig, ui, repo, **o'
1620 for f in repo.dirstate:
1624 for f in repo.dirstate:
1621 if lfutil.isstandin(f):
1625 if lfutil.isstandin(f):
1622 orphans.discard(f)
1626 orphans.discard(f)
1623 if repo.dirstate[f] == b'r':
1627 if repo.dirstate.get_entry(f).removed:
1624 repo.wvfs.unlinkpath(f, ignoremissing=True)
1628 repo.wvfs.unlinkpath(f, ignoremissing=True)
1625 elif f in pctx:
1629 elif f in pctx:
1626 fctx = pctx[f]
1630 fctx = pctx[f]
@@ -1632,18 +1636,6 b' def overriderollback(orig, ui, repo, **o'
1632 for standin in orphans:
1636 for standin in orphans:
1633 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1637 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1634
1638
1635 lfdirstate = lfutil.openlfdirstate(ui, repo)
1636 with lfdirstate.parentchange():
1637 orphans = set(lfdirstate)
1638 lfiles = lfutil.listlfiles(repo)
1639 for file in lfiles:
1640 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1641 orphans.discard(file)
1642 for lfile in orphans:
1643 lfdirstate.update_file(
1644 lfile, p1_tracked=False, wc_tracked=False
1645 )
1646 lfdirstate.write()
1647 return result
1639 return result
1648
1640
1649
1641
@@ -1663,7 +1655,7 b' def overridetransplant(orig, ui, repo, *'
1663 @eh.wrapcommand(b'cat')
1655 @eh.wrapcommand(b'cat')
1664 def overridecat(orig, ui, repo, file1, *pats, **opts):
1656 def overridecat(orig, ui, repo, file1, *pats, **opts):
1665 opts = pycompat.byteskwargs(opts)
1657 opts = pycompat.byteskwargs(opts)
1666 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
1658 ctx = logcmdutil.revsingle(repo, opts.get(b'rev'))
1667 err = 1
1659 err = 1
1668 notbad = set()
1660 notbad = set()
1669 m = scmutil.match(ctx, (file1,) + pats, opts)
1661 m = scmutil.match(ctx, (file1,) + pats, opts)
@@ -1787,10 +1779,8 b' def mergeupdate(orig, repo, node, branch'
1787 # mark all clean largefiles as dirty, just in case the update gets
1779 # mark all clean largefiles as dirty, just in case the update gets
1788 # interrupted before largefiles and lfdirstate are synchronized
1780 # interrupted before largefiles and lfdirstate are synchronized
1789 for lfile in oldclean:
1781 for lfile in oldclean:
1790 entry = lfdirstate._map.get(lfile)
1791 assert not (entry.merged_removed or entry.from_p2_removed)
1792 lfdirstate.set_possibly_dirty(lfile)
1782 lfdirstate.set_possibly_dirty(lfile)
1793 lfdirstate.write()
1783 lfdirstate.write(repo.currenttransaction())
1794
1784
1795 oldstandins = lfutil.getstandinsstate(repo)
1785 oldstandins = lfutil.getstandinsstate(repo)
1796 wc = kwargs.get('wc')
1786 wc = kwargs.get('wc')
@@ -1810,7 +1800,7 b' def mergeupdate(orig, repo, node, branch'
1810 # all the ones that didn't change as clean
1800 # all the ones that didn't change as clean
1811 for lfile in oldclean.difference(filelist):
1801 for lfile in oldclean.difference(filelist):
1812 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True)
1802 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True)
1813 lfdirstate.write()
1803 lfdirstate.write(repo.currenttransaction())
1814
1804
1815 if branchmerge or force or partial:
1805 if branchmerge or force or partial:
1816 filelist.extend(s.deleted + s.removed)
1806 filelist.extend(s.deleted + s.removed)
@@ -184,17 +184,18 b' def wirereposetup(ui, repo):'
184
184
185 @wireprotov1peer.batchable
185 @wireprotov1peer.batchable
186 def statlfile(self, sha):
186 def statlfile(self, sha):
187 f = wireprotov1peer.future()
187 def decode(d):
188 try:
189 return int(d)
190 except (ValueError, urlerr.httperror):
191 # If the server returns anything but an integer followed by a
192 # newline, newline, it's not speaking our language; if we get
193 # an HTTP error, we can't be sure the largefile is present;
194 # either way, consider it missing.
195 return 2
196
188 result = {b'sha': sha}
197 result = {b'sha': sha}
189 yield result, f
198 return result, decode
190 try:
191 yield int(f.value)
192 except (ValueError, urlerr.httperror):
193 # If the server returns anything but an integer followed by a
194 # newline, newline, it's not speaking our language; if we get
195 # an HTTP error, we can't be sure the largefile is present;
196 # either way, consider it missing.
197 yield 2
198
199
199 repo.__class__ = lfileswirerepository
200 repo.__class__ = lfileswirerepository
200
201
@@ -310,7 +310,7 b' def reposetup(ui, repo):'
310 ]
310 ]
311
311
312 if gotlock:
312 if gotlock:
313 lfdirstate.write()
313 lfdirstate.write(self.currenttransaction())
314
314
315 self.lfstatus = True
315 self.lfstatus = True
316 return scmutil.status(*result)
316 return scmutil.status(*result)
@@ -137,6 +137,7 b' from mercurial import ('
137 filelog,
137 filelog,
138 filesetlang,
138 filesetlang,
139 localrepo,
139 localrepo,
140 logcmdutil,
140 minifileset,
141 minifileset,
141 pycompat,
142 pycompat,
142 revlog,
143 revlog,
@@ -417,7 +418,7 b' def lfsfiles(context, mapping):'
417 def debuglfsupload(ui, repo, **opts):
418 def debuglfsupload(ui, repo, **opts):
418 """upload lfs blobs added by the working copy parent or given revisions"""
419 """upload lfs blobs added by the working copy parent or given revisions"""
419 revs = opts.get('rev', [])
420 revs = opts.get('rev', [])
420 pointers = wrapper.extractpointers(repo, scmutil.revrange(repo, revs))
421 pointers = wrapper.extractpointers(repo, logcmdutil.revrange(repo, revs))
421 wrapper.uploadblobs(repo, pointers)
422 wrapper.uploadblobs(repo, pointers)
422
423
423
424
@@ -1241,7 +1241,7 b' class queue(object):'
1241 if opts.get(b'rev'):
1241 if opts.get(b'rev'):
1242 if not self.applied:
1242 if not self.applied:
1243 raise error.Abort(_(b'no patches applied'))
1243 raise error.Abort(_(b'no patches applied'))
1244 revs = scmutil.revrange(repo, opts.get(b'rev'))
1244 revs = logcmdutil.revrange(repo, opts.get(b'rev'))
1245 revs.sort()
1245 revs.sort()
1246 revpatches = self._revpatches(repo, revs)
1246 revpatches = self._revpatches(repo, revs)
1247 realpatches += revpatches
1247 realpatches += revpatches
@@ -1267,9 +1267,9 b' class queue(object):'
1267 if any((b'.hgsubstate' in files for files in mar)):
1267 if any((b'.hgsubstate' in files for files in mar)):
1268 return # already listed up
1268 return # already listed up
1269 # not yet listed up
1269 # not yet listed up
1270 if substatestate in b'a?':
1270 if substatestate.added or not substatestate.any_tracked:
1271 mar[1].append(b'.hgsubstate')
1271 mar[1].append(b'.hgsubstate')
1272 elif substatestate in b'r':
1272 elif substatestate.removed:
1273 mar[2].append(b'.hgsubstate')
1273 mar[2].append(b'.hgsubstate')
1274 else: # modified
1274 else: # modified
1275 mar[0].append(b'.hgsubstate')
1275 mar[0].append(b'.hgsubstate')
@@ -1377,7 +1377,7 b' class queue(object):'
1377 self.checkpatchname(patchfn)
1377 self.checkpatchname(patchfn)
1378 inclsubs = checksubstate(repo)
1378 inclsubs = checksubstate(repo)
1379 if inclsubs:
1379 if inclsubs:
1380 substatestate = repo.dirstate[b'.hgsubstate']
1380 substatestate = repo.dirstate.get_entry(b'.hgsubstate')
1381 if opts.get(b'include') or opts.get(b'exclude') or pats:
1381 if opts.get(b'include') or opts.get(b'exclude') or pats:
1382 # detect missing files in pats
1382 # detect missing files in pats
1383 def badfn(f, msg):
1383 def badfn(f, msg):
@@ -1908,7 +1908,7 b' class queue(object):'
1908
1908
1909 inclsubs = checksubstate(repo, patchparent)
1909 inclsubs = checksubstate(repo, patchparent)
1910 if inclsubs:
1910 if inclsubs:
1911 substatestate = repo.dirstate[b'.hgsubstate']
1911 substatestate = repo.dirstate.get_entry(b'.hgsubstate')
1912
1912
1913 ph = patchheader(self.join(patchfn), self.plainmode)
1913 ph = patchheader(self.join(patchfn), self.plainmode)
1914 diffopts = self.diffopts(
1914 diffopts = self.diffopts(
@@ -2417,7 +2417,7 b' class queue(object):'
2417 raise error.Abort(
2417 raise error.Abort(
2418 _(b'option "-r" not valid when importing files')
2418 _(b'option "-r" not valid when importing files')
2419 )
2419 )
2420 rev = scmutil.revrange(repo, rev)
2420 rev = logcmdutil.revrange(repo, rev)
2421 rev.sort(reverse=True)
2421 rev.sort(reverse=True)
2422 elif not files:
2422 elif not files:
2423 raise error.Abort(_(b'no files or revisions specified'))
2423 raise error.Abort(_(b'no files or revisions specified'))
@@ -3638,7 +3638,7 b' def rename(ui, repo, patch, name=None, *'
3638 if r and patch in r.dirstate:
3638 if r and patch in r.dirstate:
3639 wctx = r[None]
3639 wctx = r[None]
3640 with r.wlock():
3640 with r.wlock():
3641 if r.dirstate[patch] == b'a':
3641 if r.dirstate.get_entry(patch).added:
3642 r.dirstate.set_untracked(patch)
3642 r.dirstate.set_untracked(patch)
3643 r.dirstate.set_tracked(name)
3643 r.dirstate.set_tracked(name)
3644 else:
3644 else:
@@ -3878,7 +3878,7 b' def finish(ui, repo, *revrange, **opts):'
3878 ui.status(_(b'no patches applied\n'))
3878 ui.status(_(b'no patches applied\n'))
3879 return 0
3879 return 0
3880
3880
3881 revs = scmutil.revrange(repo, revrange)
3881 revs = logcmdutil.revrange(repo, revrange)
3882 if repo[b'.'].rev() in revs and repo[None].files():
3882 if repo[b'.'].rev() in revs and repo[None].files():
3883 ui.warn(_(b'warning: uncommitted changes in the working directory\n'))
3883 ui.warn(_(b'warning: uncommitted changes in the working directory\n'))
3884 # queue.finish may changes phases but leave the responsibility to lock the
3884 # queue.finish may changes phases but leave the responsibility to lock the
@@ -289,7 +289,7 b' def _narrow('
289 repair.strip(ui, unfi, tostrip, topic=b'narrow', backup=backup)
289 repair.strip(ui, unfi, tostrip, topic=b'narrow', backup=backup)
290
290
291 todelete = []
291 todelete = []
292 for t, f, f2, size in repo.store.datafiles():
292 for t, f, size in repo.store.datafiles():
293 if f.startswith(b'data/'):
293 if f.startswith(b'data/'):
294 file = f[5:-2]
294 file = f[5:-2]
295 if not newmatch(file):
295 if not newmatch(file):
@@ -91,6 +91,7 b' from mercurial import ('
91 error,
91 error,
92 formatter,
92 formatter,
93 hg,
93 hg,
94 logcmdutil,
94 mail,
95 mail,
95 patch,
96 patch,
96 pycompat,
97 pycompat,
@@ -812,7 +813,7 b' def email(ui, repo, *revs, **opts):'
812 raise error.Abort(_(b"bookmark '%s' not found") % bookmark)
813 raise error.Abort(_(b"bookmark '%s' not found") % bookmark)
813 revs = scmutil.bookmarkrevs(repo, bookmark)
814 revs = scmutil.bookmarkrevs(repo, bookmark)
814
815
815 revs = scmutil.revrange(repo, revs)
816 revs = logcmdutil.revrange(repo, revs)
816 if outgoing:
817 if outgoing:
817 revs = _getoutgoing(repo, dest, revs)
818 revs = _getoutgoing(repo, dest, revs)
818 if bundle:
819 if bundle:
@@ -1354,7 +1354,7 b' def phabsend(ui, repo, *revs, **opts):'
1354 """
1354 """
1355 opts = pycompat.byteskwargs(opts)
1355 opts = pycompat.byteskwargs(opts)
1356 revs = list(revs) + opts.get(b'rev', [])
1356 revs = list(revs) + opts.get(b'rev', [])
1357 revs = scmutil.revrange(repo, revs)
1357 revs = logcmdutil.revrange(repo, revs)
1358 revs.sort() # ascending order to preserve topological parent/child in phab
1358 revs.sort() # ascending order to preserve topological parent/child in phab
1359
1359
1360 if not revs:
1360 if not revs:
@@ -2276,7 +2276,7 b' def phabupdate(ui, repo, *specs, **opts)'
2276 if specs:
2276 if specs:
2277 raise error.InputError(_(b'cannot specify both DREVSPEC and --rev'))
2277 raise error.InputError(_(b'cannot specify both DREVSPEC and --rev'))
2278
2278
2279 drevmap = getdrevmap(repo, scmutil.revrange(repo, [revs]))
2279 drevmap = getdrevmap(repo, logcmdutil.revrange(repo, [revs]))
2280 specs = []
2280 specs = []
2281 unknown = []
2281 unknown = []
2282 for r, d in pycompat.iteritems(drevmap):
2282 for r, d in pycompat.iteritems(drevmap):
@@ -35,6 +35,7 b' from mercurial import ('
35 dirstateguard,
35 dirstateguard,
36 error,
36 error,
37 extensions,
37 extensions,
38 logcmdutil,
38 merge as mergemod,
39 merge as mergemod,
39 mergestate as mergestatemod,
40 mergestate as mergestatemod,
40 mergeutil,
41 mergeutil,
@@ -1302,19 +1303,19 b' def _definedestmap(ui, repo, inmemory, d'
1302 dest = None
1303 dest = None
1303
1304
1304 if revf:
1305 if revf:
1305 rebaseset = scmutil.revrange(repo, revf)
1306 rebaseset = logcmdutil.revrange(repo, revf)
1306 if not rebaseset:
1307 if not rebaseset:
1307 ui.status(_(b'empty "rev" revision set - nothing to rebase\n'))
1308 ui.status(_(b'empty "rev" revision set - nothing to rebase\n'))
1308 return None
1309 return None
1309 elif srcf:
1310 elif srcf:
1310 src = scmutil.revrange(repo, srcf)
1311 src = logcmdutil.revrange(repo, srcf)
1311 if not src:
1312 if not src:
1312 ui.status(_(b'empty "source" revision set - nothing to rebase\n'))
1313 ui.status(_(b'empty "source" revision set - nothing to rebase\n'))
1313 return None
1314 return None
1314 # `+ (%ld)` to work around `wdir()::` being empty
1315 # `+ (%ld)` to work around `wdir()::` being empty
1315 rebaseset = repo.revs(b'(%ld):: + (%ld)', src, src)
1316 rebaseset = repo.revs(b'(%ld):: + (%ld)', src, src)
1316 else:
1317 else:
1317 base = scmutil.revrange(repo, basef or [b'.'])
1318 base = logcmdutil.revrange(repo, basef or [b'.'])
1318 if not base:
1319 if not base:
1319 ui.status(
1320 ui.status(
1320 _(b'empty "base" revision set - ' b"can't compute rebase set\n")
1321 _(b'empty "base" revision set - ' b"can't compute rebase set\n")
@@ -1322,7 +1323,7 b' def _definedestmap(ui, repo, inmemory, d'
1322 return None
1323 return None
1323 if destf:
1324 if destf:
1324 # --base does not support multiple destinations
1325 # --base does not support multiple destinations
1325 dest = scmutil.revsingle(repo, destf)
1326 dest = logcmdutil.revsingle(repo, destf)
1326 else:
1327 else:
1327 dest = repo[_destrebase(repo, base, destspace=destspace)]
1328 dest = repo[_destrebase(repo, base, destspace=destspace)]
1328 destf = bytes(dest)
1329 destf = bytes(dest)
@@ -24,10 +24,10 b' from mercurial import ('
24 cmdutil,
24 cmdutil,
25 config,
25 config,
26 error,
26 error,
27 logcmdutil,
27 minirst,
28 minirst,
28 pycompat,
29 pycompat,
29 registrar,
30 registrar,
30 scmutil,
31 util,
31 util,
32 )
32 )
33 from mercurial.utils import (
33 from mercurial.utils import (
@@ -676,7 +676,7 b' def releasenotes(ui, repo, file_=None, *'
676 return _getadmonitionlist(ui, sections)
676 return _getadmonitionlist(ui, sections)
677
677
678 rev = opts.get(b'rev')
678 rev = opts.get(b'rev')
679 revs = scmutil.revrange(repo, [rev or b'not public()'])
679 revs = logcmdutil.revrange(repo, [rev or b'not public()'])
680 if opts.get(b'check'):
680 if opts.get(b'check'):
681 return checkadmonitions(ui, repo, sections.names(), revs)
681 return checkadmonitions(ui, repo, sections.names(), revs)
682
682
@@ -378,7 +378,7 b' class manifestrevlogstore(object):'
378 ledger.markdataentry(self, treename, node)
378 ledger.markdataentry(self, treename, node)
379 ledger.markhistoryentry(self, treename, node)
379 ledger.markhistoryentry(self, treename, node)
380
380
381 for t, path, encoded, size in self._store.datafiles():
381 for t, path, size in self._store.datafiles():
382 if path[:5] != b'meta/' or path[-2:] != b'.i':
382 if path[:5] != b'meta/' or path[-2:] != b'.i':
383 continue
383 continue
384
384
@@ -63,12 +63,14 b' def peersetup(ui, peer):'
63 raise error.Abort(
63 raise error.Abort(
64 b'configured remotefile server does not support getfile'
64 b'configured remotefile server does not support getfile'
65 )
65 )
66 f = wireprotov1peer.future()
66
67 yield {b'file': file, b'node': node}, f
67 def decode(d):
68 code, data = f.value.split(b'\0', 1)
68 code, data = d.split(b'\0', 1)
69 if int(code):
69 if int(code):
70 raise error.LookupError(file, node, data)
70 raise error.LookupError(file, node, data)
71 yield data
71 return data
72
73 return {b'file': file, b'node': node}, decode
72
74
73 @wireprotov1peer.batchable
75 @wireprotov1peer.batchable
74 def x_rfl_getflogheads(self, path):
76 def x_rfl_getflogheads(self, path):
@@ -77,10 +79,11 b' def peersetup(ui, peer):'
77 b'configured remotefile server does not '
79 b'configured remotefile server does not '
78 b'support getflogheads'
80 b'support getflogheads'
79 )
81 )
80 f = wireprotov1peer.future()
82
81 yield {b'path': path}, f
83 def decode(d):
82 heads = f.value.split(b'\n') if f.value else []
84 return d.split(b'\n') if d else []
83 yield heads
85
86 return {b'path': path}, decode
84
87
85 def _updatecallstreamopts(self, command, opts):
88 def _updatecallstreamopts(self, command, opts):
86 if command != b'getbundle':
89 if command != b'getbundle':
@@ -166,24 +166,24 b' def onetimesetup(ui):'
166 n = util.pconvert(fp[striplen:])
166 n = util.pconvert(fp[striplen:])
167 d = store.decodedir(n)
167 d = store.decodedir(n)
168 t = store.FILETYPE_OTHER
168 t = store.FILETYPE_OTHER
169 yield (t, d, n, st.st_size)
169 yield (t, d, st.st_size)
170 if kind == stat.S_IFDIR:
170 if kind == stat.S_IFDIR:
171 visit.append(fp)
171 visit.append(fp)
172
172
173 if scmutil.istreemanifest(repo):
173 if scmutil.istreemanifest(repo):
174 for (t, u, e, s) in repo.store.datafiles():
174 for (t, u, s) in repo.store.datafiles():
175 if u.startswith(b'meta/') and (
175 if u.startswith(b'meta/') and (
176 u.endswith(b'.i') or u.endswith(b'.d')
176 u.endswith(b'.i') or u.endswith(b'.d')
177 ):
177 ):
178 yield (t, u, e, s)
178 yield (t, u, s)
179
179
180 # Return .d and .i files that do not match the shallow pattern
180 # Return .d and .i files that do not match the shallow pattern
181 match = state.match
181 match = state.match
182 if match and not match.always():
182 if match and not match.always():
183 for (t, u, e, s) in repo.store.datafiles():
183 for (t, u, s) in repo.store.datafiles():
184 f = u[5:-2] # trim data/... and .i/.d
184 f = u[5:-2] # trim data/... and .i/.d
185 if not state.match(f):
185 if not state.match(f):
186 yield (t, u, e, s)
186 yield (t, u, s)
187
187
188 for x in repo.store.topfiles():
188 for x in repo.store.topfiles():
189 if state.noflatmf and x[1][:11] == b'00manifest.':
189 if state.noflatmf and x[1][:11] == b'00manifest.':
@@ -255,14 +255,9 b' def _setupdirstate(ui):'
255
255
256 # Prevent adding files that are outside the sparse checkout
256 # Prevent adding files that are outside the sparse checkout
257 editfuncs = [
257 editfuncs = [
258 b'normal',
259 b'set_tracked',
258 b'set_tracked',
260 b'set_untracked',
259 b'set_untracked',
261 b'add',
262 b'normallookup',
263 b'copy',
260 b'copy',
264 b'remove',
265 b'merge',
266 ]
261 ]
267 hint = _(
262 hint = _(
268 b'include file with `hg debugsparse --include <pattern>` or use '
263 b'include file with `hg debugsparse --include <pattern>` or use '
@@ -22,6 +22,7 b' from mercurial import ('
22 commands,
22 commands,
23 error,
23 error,
24 hg,
24 hg,
25 logcmdutil,
25 pycompat,
26 pycompat,
26 registrar,
27 registrar,
27 revsetlang,
28 revsetlang,
@@ -75,7 +76,7 b' def split(ui, repo, *revs, **opts):'
75 # If the rebase somehow runs into conflicts, make sure
76 # If the rebase somehow runs into conflicts, make sure
76 # we close the transaction so the user can continue it.
77 # we close the transaction so the user can continue it.
77 with util.acceptintervention(tr):
78 with util.acceptintervention(tr):
78 revs = scmutil.revrange(repo, revlist or [b'.'])
79 revs = logcmdutil.revrange(repo, revlist or [b'.'])
79 if len(revs) > 1:
80 if len(revs) > 1:
80 raise error.InputError(_(b'cannot split multiple revisions'))
81 raise error.InputError(_(b'cannot split multiple revisions'))
81
82
@@ -37,7 +37,6 b' from mercurial import ('
37 pycompat,
37 pycompat,
38 registrar,
38 registrar,
39 revset,
39 revset,
40 scmutil,
41 smartset,
40 smartset,
42 state as statemod,
41 state as statemod,
43 util,
42 util,
@@ -845,7 +844,7 b' def _dotransplant(ui, repo, *revs, **opt'
845 if opts.get(b'prune'):
844 if opts.get(b'prune'):
846 prune = {
845 prune = {
847 source[r].node()
846 source[r].node()
848 for r in scmutil.revrange(source, opts.get(b'prune'))
847 for r in logcmdutil.revrange(source, opts.get(b'prune'))
849 }
848 }
850 matchfn = lambda x: tf(x) and x not in prune
849 matchfn = lambda x: tf(x) and x not in prune
851 else:
850 else:
@@ -853,7 +852,7 b' def _dotransplant(ui, repo, *revs, **opt'
853 merges = pycompat.maplist(source.lookup, opts.get(b'merge', ()))
852 merges = pycompat.maplist(source.lookup, opts.get(b'merge', ()))
854 revmap = {}
853 revmap = {}
855 if revs:
854 if revs:
856 for r in scmutil.revrange(source, revs):
855 for r in logcmdutil.revrange(source, revs):
857 revmap[int(r)] = source[r].node()
856 revmap[int(r)] = source[r].node()
858 elif opts.get(b'all') or not merges:
857 elif opts.get(b'all') or not merges:
859 if source != repo:
858 if source != repo:
@@ -29,6 +29,8 b' from . import ('
29 vfs as vfsmod,
29 vfs as vfsmod,
30 )
30 )
31
31
32 from .utils import stringutil
33
32 stringio = util.stringio
34 stringio = util.stringio
33
35
34 # from unzip source code:
36 # from unzip source code:
@@ -196,7 +198,7 b' class tarit(object):'
196 name, pycompat.sysstr(mode + kind), fileobj
198 name, pycompat.sysstr(mode + kind), fileobj
197 )
199 )
198 except tarfile.CompressionError as e:
200 except tarfile.CompressionError as e:
199 raise error.Abort(pycompat.bytestr(e))
201 raise error.Abort(stringutil.forcebytestr(e))
200
202
201 if isinstance(dest, bytes):
203 if isinstance(dest, bytes):
202 self.z = taropen(b'w:', name=dest)
204 self.z = taropen(b'w:', name=dest)
@@ -1,5 +1,5 b''
1 #ifndef _HG_BDIFF_H_
1 #ifndef HG_BDIFF_H
2 #define _HG_BDIFF_H_
2 #define HG_BDIFF_H
3
3
4 #include "compat.h"
4 #include "compat.h"
5
5
@@ -1,5 +1,5 b''
1 #ifndef _HG_BITMANIPULATION_H_
1 #ifndef HG_BITMANIPULATION_H
2 #define _HG_BITMANIPULATION_H_
2 #define HG_BITMANIPULATION_H
3
3
4 #include <string.h>
4 #include <string.h>
5
5
@@ -680,8 +680,25 b' def binarydecode(repo, stream):'
680 return books
680 return books
681
681
682
682
683 def updatefromremote(ui, repo, remotemarks, path, trfunc, explicit=()):
683 def mirroring_remote(ui, repo, remotemarks):
684 ui.debug(b"checking for updated bookmarks\n")
684 """computes the bookmark changes that set the local bookmarks to
685 remotemarks"""
686 changed = []
687 localmarks = repo._bookmarks
688 for (b, id) in pycompat.iteritems(remotemarks):
689 if id != localmarks.get(b, None) and id in repo:
690 changed.append((b, id, ui.debug, _(b"updating bookmark %s\n") % b))
691 for b in localmarks:
692 if b not in remotemarks:
693 changed.append(
694 (b, None, ui.debug, _(b"removing bookmark %s\n") % b)
695 )
696 return changed
697
698
699 def merging_from_remote(ui, repo, remotemarks, path, explicit=()):
700 """computes the bookmark changes that merge remote bookmarks into the
701 local bookmarks, based on comparebookmarks"""
685 localmarks = repo._bookmarks
702 localmarks = repo._bookmarks
686 (
703 (
687 addsrc,
704 addsrc,
@@ -752,6 +769,20 b' def updatefromremote(ui, repo, remotemar'
752 _(b"remote bookmark %s points to locally missing %s\n")
769 _(b"remote bookmark %s points to locally missing %s\n")
753 % (b, hex(scid)[:12])
770 % (b, hex(scid)[:12])
754 )
771 )
772 return changed
773
774
775 def updatefromremote(
776 ui, repo, remotemarks, path, trfunc, explicit=(), mode=None
777 ):
778 if mode == b'ignore':
779 # This should move to an higher level to avoid fetching bookmark at all
780 return
781 ui.debug(b"checking for updated bookmarks\n")
782 if mode == b'mirror':
783 changed = mirroring_remote(ui, repo, remotemarks)
784 else:
785 changed = merging_from_remote(ui, repo, remotemarks, path, explicit)
755
786
756 if changed:
787 if changed:
757 tr = trfunc()
788 tr = trfunc()
@@ -760,11 +791,14 b' def updatefromremote(ui, repo, remotemar'
760 for b, node, writer, msg in sorted(changed, key=key):
791 for b, node, writer, msg in sorted(changed, key=key):
761 changes.append((b, node))
792 changes.append((b, node))
762 writer(msg)
793 writer(msg)
763 localmarks.applychanges(repo, tr, changes)
794 repo._bookmarks.applychanges(repo, tr, changes)
764
795
765
796
766 def incoming(ui, repo, peer):
797 def incoming(ui, repo, peer, mode=None):
767 """Show bookmarks incoming from other to repo"""
798 """Show bookmarks incoming from other to repo"""
799 if mode == b'ignore':
800 ui.status(_(b"bookmarks exchange disabled with this path\n"))
801 return 0
768 ui.status(_(b"searching for changed bookmarks\n"))
802 ui.status(_(b"searching for changed bookmarks\n"))
769
803
770 with peer.commandexecutor() as e:
804 with peer.commandexecutor() as e:
@@ -777,9 +811,6 b' def incoming(ui, repo, peer):'
777 ).result()
811 ).result()
778 )
812 )
779
813
780 r = comparebookmarks(repo, remotemarks, repo._bookmarks)
781 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
782
783 incomings = []
814 incomings = []
784 if ui.debugflag:
815 if ui.debugflag:
785 getid = lambda id: id
816 getid = lambda id: id
@@ -795,18 +826,36 b' def incoming(ui, repo, peer):'
795 def add(b, id, st):
826 def add(b, id, st):
796 incomings.append(b" %-25s %s\n" % (b, getid(id)))
827 incomings.append(b" %-25s %s\n" % (b, getid(id)))
797
828
798 for b, scid, dcid in addsrc:
829 if mode == b'mirror':
799 # i18n: "added" refers to a bookmark
830 localmarks = repo._bookmarks
800 add(b, hex(scid), _(b'added'))
831 allmarks = set(remotemarks.keys()) | set(localmarks.keys())
801 for b, scid, dcid in advsrc:
832 for b in sorted(allmarks):
802 # i18n: "advanced" refers to a bookmark
833 loc = localmarks.get(b)
803 add(b, hex(scid), _(b'advanced'))
834 rem = remotemarks.get(b)
804 for b, scid, dcid in diverge:
835 if loc == rem:
805 # i18n: "diverged" refers to a bookmark
836 continue
806 add(b, hex(scid), _(b'diverged'))
837 elif loc is None:
807 for b, scid, dcid in differ:
838 add(b, hex(rem), _(b'added'))
808 # i18n: "changed" refers to a bookmark
839 elif rem is None:
809 add(b, hex(scid), _(b'changed'))
840 add(b, hex(repo.nullid), _(b'removed'))
841 else:
842 add(b, hex(rem), _(b'changed'))
843 else:
844 r = comparebookmarks(repo, remotemarks, repo._bookmarks)
845 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
846
847 for b, scid, dcid in addsrc:
848 # i18n: "added" refers to a bookmark
849 add(b, hex(scid), _(b'added'))
850 for b, scid, dcid in advsrc:
851 # i18n: "advanced" refers to a bookmark
852 add(b, hex(scid), _(b'advanced'))
853 for b, scid, dcid in diverge:
854 # i18n: "diverged" refers to a bookmark
855 add(b, hex(scid), _(b'diverged'))
856 for b, scid, dcid in differ:
857 # i18n: "changed" refers to a bookmark
858 add(b, hex(scid), _(b'changed'))
810
859
811 if not incomings:
860 if not incomings:
812 ui.status(_(b"no changed bookmarks found\n"))
861 ui.status(_(b"no changed bookmarks found\n"))
@@ -699,7 +699,9 b' def getremotechanges('
699 },
699 },
700 ).result()
700 ).result()
701
701
702 pullop = exchange.pulloperation(bundlerepo, peer, heads=reponodes)
702 pullop = exchange.pulloperation(
703 bundlerepo, peer, path=None, heads=reponodes
704 )
703 pullop.trmanager = bundletransactionmanager()
705 pullop.trmanager = bundletransactionmanager()
704 exchange._pullapplyphases(pullop, remotephases)
706 exchange._pullapplyphases(pullop, remotephases)
705
707
@@ -264,7 +264,7 b' PyObject *make_file_foldmap(PyObject *se'
264 }
264 }
265
265
266 tuple = (dirstateItemObject *)v;
266 tuple = (dirstateItemObject *)v;
267 if (tuple->state != 'r') {
267 if (tuple->flags | dirstate_flag_wc_tracked) {
268 PyObject *normed;
268 PyObject *normed;
269 if (table != NULL) {
269 if (table != NULL) {
270 normed = _asciitransform(k, table,
270 normed = _asciitransform(k, table,
@@ -161,7 +161,7 b' bail:'
161 return ret;
161 return ret;
162 }
162 }
163
163
164 static int dirs_fromdict(PyObject *dirs, PyObject *source, char skipchar)
164 static int dirs_fromdict(PyObject *dirs, PyObject *source, bool only_tracked)
165 {
165 {
166 PyObject *key, *value;
166 PyObject *key, *value;
167 Py_ssize_t pos = 0;
167 Py_ssize_t pos = 0;
@@ -171,13 +171,14 b' static int dirs_fromdict(PyObject *dirs,'
171 PyErr_SetString(PyExc_TypeError, "expected string key");
171 PyErr_SetString(PyExc_TypeError, "expected string key");
172 return -1;
172 return -1;
173 }
173 }
174 if (skipchar) {
174 if (only_tracked) {
175 if (!dirstate_tuple_check(value)) {
175 if (!dirstate_tuple_check(value)) {
176 PyErr_SetString(PyExc_TypeError,
176 PyErr_SetString(PyExc_TypeError,
177 "expected a dirstate tuple");
177 "expected a dirstate tuple");
178 return -1;
178 return -1;
179 }
179 }
180 if (((dirstateItemObject *)value)->state == skipchar)
180 if (!(((dirstateItemObject *)value)->flags &
181 dirstate_flag_wc_tracked))
181 continue;
182 continue;
182 }
183 }
183
184
@@ -218,15 +219,17 b' static int dirs_fromiter(PyObject *dirs,'
218 * Calculate a refcounted set of directory names for the files in a
219 * Calculate a refcounted set of directory names for the files in a
219 * dirstate.
220 * dirstate.
220 */
221 */
221 static int dirs_init(dirsObject *self, PyObject *args)
222 static int dirs_init(dirsObject *self, PyObject *args, PyObject *kwargs)
222 {
223 {
223 PyObject *dirs = NULL, *source = NULL;
224 PyObject *dirs = NULL, *source = NULL;
224 char skipchar = 0;
225 int only_tracked = 0;
225 int ret = -1;
226 int ret = -1;
227 static char *keywords_name[] = {"map", "only_tracked", NULL};
226
228
227 self->dict = NULL;
229 self->dict = NULL;
228
230
229 if (!PyArg_ParseTuple(args, "|Oc:__init__", &source, &skipchar))
231 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|Oi:__init__",
232 keywords_name, &source, &only_tracked))
230 return -1;
233 return -1;
231
234
232 dirs = PyDict_New();
235 dirs = PyDict_New();
@@ -237,10 +240,10 b' static int dirs_init(dirsObject *self, P'
237 if (source == NULL)
240 if (source == NULL)
238 ret = 0;
241 ret = 0;
239 else if (PyDict_Check(source))
242 else if (PyDict_Check(source))
240 ret = dirs_fromdict(dirs, source, skipchar);
243 ret = dirs_fromdict(dirs, source, (bool)only_tracked);
241 else if (skipchar)
244 else if (only_tracked)
242 PyErr_SetString(PyExc_ValueError,
245 PyErr_SetString(PyExc_ValueError,
243 "skip character is only supported "
246 "`only_tracked` is only supported "
244 "with a dict source");
247 "with a dict source");
245 else
248 else
246 ret = dirs_fromiter(dirs, source);
249 ret = dirs_fromiter(dirs, source);
This diff has been collapsed as it changes many lines, (726 lines changed) Show them Hide them
@@ -44,42 +44,98 b' static PyObject *dict_new_presized(PyObj'
44 return _dict_new_presized(expected_size);
44 return _dict_new_presized(expected_size);
45 }
45 }
46
46
47 static inline dirstateItemObject *make_dirstate_item(char state, int mode,
48 int size, int mtime)
49 {
50 dirstateItemObject *t =
51 PyObject_New(dirstateItemObject, &dirstateItemType);
52 if (!t) {
53 return NULL;
54 }
55 t->state = state;
56 t->mode = mode;
57 t->size = size;
58 t->mtime = mtime;
59 return t;
60 }
61
62 static PyObject *dirstate_item_new(PyTypeObject *subtype, PyObject *args,
47 static PyObject *dirstate_item_new(PyTypeObject *subtype, PyObject *args,
63 PyObject *kwds)
48 PyObject *kwds)
64 {
49 {
65 /* We do all the initialization here and not a tp_init function because
50 /* We do all the initialization here and not a tp_init function because
66 * dirstate_item is immutable. */
51 * dirstate_item is immutable. */
67 dirstateItemObject *t;
52 dirstateItemObject *t;
68 char state;
53 int wc_tracked;
69 int size, mode, mtime;
54 int p1_tracked;
70 if (!PyArg_ParseTuple(args, "ciii", &state, &mode, &size, &mtime)) {
55 int p2_info;
56 int has_meaningful_data;
57 int has_meaningful_mtime;
58 int mode;
59 int size;
60 int mtime_s;
61 int mtime_ns;
62 PyObject *parentfiledata;
63 PyObject *fallback_exec;
64 PyObject *fallback_symlink;
65 static char *keywords_name[] = {
66 "wc_tracked", "p1_tracked", "p2_info",
67 "has_meaningful_data", "has_meaningful_mtime", "parentfiledata",
68 "fallback_exec", "fallback_symlink", NULL,
69 };
70 wc_tracked = 0;
71 p1_tracked = 0;
72 p2_info = 0;
73 has_meaningful_mtime = 1;
74 has_meaningful_data = 1;
75 parentfiledata = Py_None;
76 fallback_exec = Py_None;
77 fallback_symlink = Py_None;
78 if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iiiiiOOO", keywords_name,
79 &wc_tracked, &p1_tracked, &p2_info,
80 &has_meaningful_data,
81 &has_meaningful_mtime, &parentfiledata,
82 &fallback_exec, &fallback_symlink)) {
71 return NULL;
83 return NULL;
72 }
84 }
73
74 t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
85 t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
75 if (!t) {
86 if (!t) {
76 return NULL;
87 return NULL;
77 }
88 }
78 t->state = state;
89
79 t->mode = mode;
90 t->flags = 0;
80 t->size = size;
91 if (wc_tracked) {
81 t->mtime = mtime;
92 t->flags |= dirstate_flag_wc_tracked;
93 }
94 if (p1_tracked) {
95 t->flags |= dirstate_flag_p1_tracked;
96 }
97 if (p2_info) {
98 t->flags |= dirstate_flag_p2_info;
99 }
100
101 if (fallback_exec != Py_None) {
102 t->flags |= dirstate_flag_has_fallback_exec;
103 if (PyObject_IsTrue(fallback_exec)) {
104 t->flags |= dirstate_flag_fallback_exec;
105 }
106 }
107 if (fallback_symlink != Py_None) {
108 t->flags |= dirstate_flag_has_fallback_symlink;
109 if (PyObject_IsTrue(fallback_symlink)) {
110 t->flags |= dirstate_flag_fallback_symlink;
111 }
112 }
82
113
114 if (parentfiledata != Py_None) {
115 if (!PyArg_ParseTuple(parentfiledata, "ii(ii)", &mode, &size,
116 &mtime_s, &mtime_ns)) {
117 return NULL;
118 }
119 } else {
120 has_meaningful_data = 0;
121 has_meaningful_mtime = 0;
122 }
123 if (has_meaningful_data) {
124 t->flags |= dirstate_flag_has_meaningful_data;
125 t->mode = mode;
126 t->size = size;
127 } else {
128 t->mode = 0;
129 t->size = 0;
130 }
131 if (has_meaningful_mtime) {
132 t->flags |= dirstate_flag_has_mtime;
133 t->mtime_s = mtime_s;
134 t->mtime_ns = mtime_ns;
135 } else {
136 t->mtime_s = 0;
137 t->mtime_ns = 0;
138 }
83 return (PyObject *)t;
139 return (PyObject *)t;
84 }
140 }
85
141
@@ -88,92 +144,201 b' static void dirstate_item_dealloc(PyObje'
88 PyObject_Del(o);
144 PyObject_Del(o);
89 }
145 }
90
146
91 static Py_ssize_t dirstate_item_length(PyObject *o)
147 static inline bool dirstate_item_c_tracked(dirstateItemObject *self)
148 {
149 return (self->flags & dirstate_flag_wc_tracked);
150 }
151
152 static inline bool dirstate_item_c_any_tracked(dirstateItemObject *self)
92 {
153 {
93 return 4;
154 const int mask = dirstate_flag_wc_tracked | dirstate_flag_p1_tracked |
155 dirstate_flag_p2_info;
156 return (self->flags & mask);
157 }
158
159 static inline bool dirstate_item_c_added(dirstateItemObject *self)
160 {
161 const int mask = (dirstate_flag_wc_tracked | dirstate_flag_p1_tracked |
162 dirstate_flag_p2_info);
163 const int target = dirstate_flag_wc_tracked;
164 return (self->flags & mask) == target;
94 }
165 }
95
166
96 static PyObject *dirstate_item_item(PyObject *o, Py_ssize_t i)
167 static inline bool dirstate_item_c_removed(dirstateItemObject *self)
168 {
169 if (self->flags & dirstate_flag_wc_tracked) {
170 return false;
171 }
172 return (self->flags &
173 (dirstate_flag_p1_tracked | dirstate_flag_p2_info));
174 }
175
176 static inline bool dirstate_item_c_merged(dirstateItemObject *self)
97 {
177 {
98 dirstateItemObject *t = (dirstateItemObject *)o;
178 return ((self->flags & dirstate_flag_wc_tracked) &&
99 switch (i) {
179 (self->flags & dirstate_flag_p1_tracked) &&
100 case 0:
180 (self->flags & dirstate_flag_p2_info));
101 return PyBytes_FromStringAndSize(&t->state, 1);
181 }
102 case 1:
182
103 return PyInt_FromLong(t->mode);
183 static inline bool dirstate_item_c_from_p2(dirstateItemObject *self)
104 case 2:
184 {
105 return PyInt_FromLong(t->size);
185 return ((self->flags & dirstate_flag_wc_tracked) &&
106 case 3:
186 !(self->flags & dirstate_flag_p1_tracked) &&
107 return PyInt_FromLong(t->mtime);
187 (self->flags & dirstate_flag_p2_info));
108 default:
188 }
109 PyErr_SetString(PyExc_IndexError, "index out of range");
189
110 return NULL;
190 static inline char dirstate_item_c_v1_state(dirstateItemObject *self)
191 {
192 if (dirstate_item_c_removed(self)) {
193 return 'r';
194 } else if (dirstate_item_c_merged(self)) {
195 return 'm';
196 } else if (dirstate_item_c_added(self)) {
197 return 'a';
198 } else {
199 return 'n';
111 }
200 }
112 }
201 }
113
202
114 static PySequenceMethods dirstate_item_sq = {
203 static inline bool dirstate_item_c_has_fallback_exec(dirstateItemObject *self)
115 dirstate_item_length, /* sq_length */
204 {
116 0, /* sq_concat */
205 return (bool)self->flags & dirstate_flag_has_fallback_exec;
117 0, /* sq_repeat */
206 }
118 dirstate_item_item, /* sq_item */
207
119 0, /* sq_ass_item */
208 static inline bool
120 0, /* sq_contains */
209 dirstate_item_c_has_fallback_symlink(dirstateItemObject *self)
121 0, /* sq_inplace_concat */
210 {
122 0 /* sq_inplace_repeat */
211 return (bool)self->flags & dirstate_flag_has_fallback_symlink;
212 }
213
214 static inline int dirstate_item_c_v1_mode(dirstateItemObject *self)
215 {
216 if (self->flags & dirstate_flag_has_meaningful_data) {
217 return self->mode;
218 } else {
219 return 0;
220 }
221 }
222
223 static inline int dirstate_item_c_v1_size(dirstateItemObject *self)
224 {
225 if (!(self->flags & dirstate_flag_wc_tracked) &&
226 (self->flags & dirstate_flag_p2_info)) {
227 if (self->flags & dirstate_flag_p1_tracked) {
228 return dirstate_v1_nonnormal;
229 } else {
230 return dirstate_v1_from_p2;
231 }
232 } else if (dirstate_item_c_removed(self)) {
233 return 0;
234 } else if (self->flags & dirstate_flag_p2_info) {
235 return dirstate_v1_from_p2;
236 } else if (dirstate_item_c_added(self)) {
237 return dirstate_v1_nonnormal;
238 } else if (self->flags & dirstate_flag_has_meaningful_data) {
239 return self->size;
240 } else {
241 return dirstate_v1_nonnormal;
242 }
243 }
244
245 static inline int dirstate_item_c_v1_mtime(dirstateItemObject *self)
246 {
247 if (dirstate_item_c_removed(self)) {
248 return 0;
249 } else if (!(self->flags & dirstate_flag_has_mtime) ||
250 !(self->flags & dirstate_flag_p1_tracked) ||
251 !(self->flags & dirstate_flag_wc_tracked) ||
252 (self->flags & dirstate_flag_p2_info)) {
253 return ambiguous_time;
254 } else {
255 return self->mtime_s;
256 }
257 }
258
259 static PyObject *dirstate_item_v2_data(dirstateItemObject *self)
260 {
261 int flags = self->flags;
262 int mode = dirstate_item_c_v1_mode(self);
263 #ifdef S_IXUSR
264 /* This is for platforms with an exec bit */
265 if ((mode & S_IXUSR) != 0) {
266 flags |= dirstate_flag_mode_exec_perm;
267 } else {
268 flags &= ~dirstate_flag_mode_exec_perm;
269 }
270 #else
271 flags &= ~dirstate_flag_mode_exec_perm;
272 #endif
273 #ifdef S_ISLNK
274 /* This is for platforms with support for symlinks */
275 if (S_ISLNK(mode)) {
276 flags |= dirstate_flag_mode_is_symlink;
277 } else {
278 flags &= ~dirstate_flag_mode_is_symlink;
279 }
280 #else
281 flags &= ~dirstate_flag_mode_is_symlink;
282 #endif
283 return Py_BuildValue("iiii", flags, self->size, self->mtime_s,
284 self->mtime_ns);
123 };
285 };
124
286
125 static PyObject *dirstate_item_v1_state(dirstateItemObject *self)
287 static PyObject *dirstate_item_v1_state(dirstateItemObject *self)
126 {
288 {
127 return PyBytes_FromStringAndSize(&self->state, 1);
289 char state = dirstate_item_c_v1_state(self);
290 return PyBytes_FromStringAndSize(&state, 1);
128 };
291 };
129
292
130 static PyObject *dirstate_item_v1_mode(dirstateItemObject *self)
293 static PyObject *dirstate_item_v1_mode(dirstateItemObject *self)
131 {
294 {
132 return PyInt_FromLong(self->mode);
295 return PyInt_FromLong(dirstate_item_c_v1_mode(self));
133 };
296 };
134
297
135 static PyObject *dirstate_item_v1_size(dirstateItemObject *self)
298 static PyObject *dirstate_item_v1_size(dirstateItemObject *self)
136 {
299 {
137 return PyInt_FromLong(self->size);
300 return PyInt_FromLong(dirstate_item_c_v1_size(self));
138 };
301 };
139
302
140 static PyObject *dirstate_item_v1_mtime(dirstateItemObject *self)
303 static PyObject *dirstate_item_v1_mtime(dirstateItemObject *self)
141 {
304 {
142 return PyInt_FromLong(self->mtime);
305 return PyInt_FromLong(dirstate_item_c_v1_mtime(self));
143 };
306 };
144
307
145 static PyObject *dm_nonnormal(dirstateItemObject *self)
308 static PyObject *dirstate_item_need_delay(dirstateItemObject *self,
309 PyObject *now)
146 {
310 {
147 if (self->state != 'n' || self->mtime == ambiguous_time) {
311 int now_s;
148 Py_RETURN_TRUE;
312 int now_ns;
149 } else {
313 if (!PyArg_ParseTuple(now, "ii", &now_s, &now_ns)) {
150 Py_RETURN_FALSE;
314 return NULL;
151 }
315 }
152 };
316 if (dirstate_item_c_v1_state(self) == 'n' && self->mtime_s == now_s) {
153 static PyObject *dm_otherparent(dirstateItemObject *self)
154 {
155 if (self->size == dirstate_v1_from_p2) {
156 Py_RETURN_TRUE;
317 Py_RETURN_TRUE;
157 } else {
318 } else {
158 Py_RETURN_FALSE;
319 Py_RETURN_FALSE;
159 }
320 }
160 };
321 };
161
322
162 static PyObject *dirstate_item_need_delay(dirstateItemObject *self,
323 static PyObject *dirstate_item_mtime_likely_equal_to(dirstateItemObject *self,
163 PyObject *value)
324 PyObject *other)
164 {
325 {
165 long now;
326 int other_s;
166 if (!pylong_to_long(value, &now)) {
327 int other_ns;
328 if (!PyArg_ParseTuple(other, "ii", &other_s, &other_ns)) {
167 return NULL;
329 return NULL;
168 }
330 }
169 if (self->state == 'n' && self->mtime == now) {
331 if ((self->flags & dirstate_flag_has_mtime) &&
332 self->mtime_s == other_s &&
333 (self->mtime_ns == other_ns || self->mtime_ns == 0 ||
334 other_ns == 0)) {
170 Py_RETURN_TRUE;
335 Py_RETURN_TRUE;
171 } else {
336 } else {
172 Py_RETURN_FALSE;
337 Py_RETURN_FALSE;
173 }
338 }
174 };
339 };
175
340
176 /* This will never change since it's bound to V1, unlike `make_dirstate_item`
341 /* This will never change since it's bound to V1
177 */
342 */
178 static inline dirstateItemObject *
343 static inline dirstateItemObject *
179 dirstate_item_from_v1_data(char state, int mode, int size, int mtime)
344 dirstate_item_from_v1_data(char state, int mode, int size, int mtime)
@@ -183,10 +348,56 b' dirstate_item_from_v1_data(char state, i'
183 if (!t) {
348 if (!t) {
184 return NULL;
349 return NULL;
185 }
350 }
186 t->state = state;
351 t->flags = 0;
187 t->mode = mode;
352 t->mode = 0;
188 t->size = size;
353 t->size = 0;
189 t->mtime = mtime;
354 t->mtime_s = 0;
355 t->mtime_ns = 0;
356
357 if (state == 'm') {
358 t->flags = (dirstate_flag_wc_tracked |
359 dirstate_flag_p1_tracked | dirstate_flag_p2_info);
360 } else if (state == 'a') {
361 t->flags = dirstate_flag_wc_tracked;
362 } else if (state == 'r') {
363 if (size == dirstate_v1_nonnormal) {
364 t->flags =
365 dirstate_flag_p1_tracked | dirstate_flag_p2_info;
366 } else if (size == dirstate_v1_from_p2) {
367 t->flags = dirstate_flag_p2_info;
368 } else {
369 t->flags = dirstate_flag_p1_tracked;
370 }
371 } else if (state == 'n') {
372 if (size == dirstate_v1_from_p2) {
373 t->flags =
374 dirstate_flag_wc_tracked | dirstate_flag_p2_info;
375 } else if (size == dirstate_v1_nonnormal) {
376 t->flags =
377 dirstate_flag_wc_tracked | dirstate_flag_p1_tracked;
378 } else if (mtime == ambiguous_time) {
379 t->flags = (dirstate_flag_wc_tracked |
380 dirstate_flag_p1_tracked |
381 dirstate_flag_has_meaningful_data);
382 t->mode = mode;
383 t->size = size;
384 } else {
385 t->flags = (dirstate_flag_wc_tracked |
386 dirstate_flag_p1_tracked |
387 dirstate_flag_has_meaningful_data |
388 dirstate_flag_has_mtime);
389 t->mode = mode;
390 t->size = size;
391 t->mtime_s = mtime;
392 }
393 } else {
394 PyErr_Format(PyExc_RuntimeError,
395 "unknown state: `%c` (%d, %d, %d)", state, mode,
396 size, mtime, NULL);
397 Py_DECREF(t);
398 return NULL;
399 }
400
190 return t;
401 return t;
191 }
402 }
192
403
@@ -196,22 +407,52 b' static PyObject *dirstate_item_from_v1_m'
196 {
407 {
197 /* We do all the initialization here and not a tp_init function because
408 /* We do all the initialization here and not a tp_init function because
198 * dirstate_item is immutable. */
409 * dirstate_item is immutable. */
199 dirstateItemObject *t;
200 char state;
410 char state;
201 int size, mode, mtime;
411 int size, mode, mtime;
202 if (!PyArg_ParseTuple(args, "ciii", &state, &mode, &size, &mtime)) {
412 if (!PyArg_ParseTuple(args, "ciii", &state, &mode, &size, &mtime)) {
203 return NULL;
413 return NULL;
204 }
414 }
415 return (PyObject *)dirstate_item_from_v1_data(state, mode, size, mtime);
416 };
205
417
206 t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
418 static PyObject *dirstate_item_from_v2_meth(PyTypeObject *subtype,
419 PyObject *args)
420 {
421 dirstateItemObject *t =
422 PyObject_New(dirstateItemObject, &dirstateItemType);
207 if (!t) {
423 if (!t) {
208 return NULL;
424 return NULL;
209 }
425 }
210 t->state = state;
426 if (!PyArg_ParseTuple(args, "iiii", &t->flags, &t->size, &t->mtime_s,
211 t->mode = mode;
427 &t->mtime_ns)) {
212 t->size = size;
428 return NULL;
213 t->mtime = mtime;
429 }
214
430 if (t->flags & dirstate_flag_expected_state_is_modified) {
431 t->flags &= ~(dirstate_flag_expected_state_is_modified |
432 dirstate_flag_has_meaningful_data |
433 dirstate_flag_has_mtime);
434 }
435 if (t->flags & dirstate_flag_mtime_second_ambiguous) {
436 /* The current code is not able to do the more subtle comparison
437 * that the MTIME_SECOND_AMBIGUOUS requires. So we ignore the
438 * mtime */
439 t->flags &= ~(dirstate_flag_mtime_second_ambiguous |
440 dirstate_flag_has_meaningful_data |
441 dirstate_flag_has_mtime);
442 }
443 t->mode = 0;
444 if (t->flags & dirstate_flag_has_meaningful_data) {
445 if (t->flags & dirstate_flag_mode_exec_perm) {
446 t->mode = 0755;
447 } else {
448 t->mode = 0644;
449 }
450 if (t->flags & dirstate_flag_mode_is_symlink) {
451 t->mode |= S_IFLNK;
452 } else {
453 t->mode |= S_IFREG;
454 }
455 }
215 return (PyObject *)t;
456 return (PyObject *)t;
216 };
457 };
217
458
@@ -219,11 +460,62 b' static PyObject *dirstate_item_from_v1_m'
219 to make sure it is correct. */
460 to make sure it is correct. */
220 static PyObject *dirstate_item_set_possibly_dirty(dirstateItemObject *self)
461 static PyObject *dirstate_item_set_possibly_dirty(dirstateItemObject *self)
221 {
462 {
222 self->mtime = ambiguous_time;
463 self->flags &= ~dirstate_flag_has_mtime;
464 Py_RETURN_NONE;
465 }
466
467 /* See docstring of the python implementation for details */
468 static PyObject *dirstate_item_set_clean(dirstateItemObject *self,
469 PyObject *args)
470 {
471 int size, mode, mtime_s, mtime_ns;
472 if (!PyArg_ParseTuple(args, "ii(ii)", &mode, &size, &mtime_s,
473 &mtime_ns)) {
474 return NULL;
475 }
476 self->flags = dirstate_flag_wc_tracked | dirstate_flag_p1_tracked |
477 dirstate_flag_has_meaningful_data |
478 dirstate_flag_has_mtime;
479 self->mode = mode;
480 self->size = size;
481 self->mtime_s = mtime_s;
482 self->mtime_ns = mtime_ns;
223 Py_RETURN_NONE;
483 Py_RETURN_NONE;
224 }
484 }
225
485
486 static PyObject *dirstate_item_set_tracked(dirstateItemObject *self)
487 {
488 self->flags |= dirstate_flag_wc_tracked;
489 self->flags &= ~dirstate_flag_has_mtime;
490 Py_RETURN_NONE;
491 }
492
493 static PyObject *dirstate_item_set_untracked(dirstateItemObject *self)
494 {
495 self->flags &= ~dirstate_flag_wc_tracked;
496 self->mode = 0;
497 self->size = 0;
498 self->mtime_s = 0;
499 self->mtime_ns = 0;
500 Py_RETURN_NONE;
501 }
502
503 static PyObject *dirstate_item_drop_merge_data(dirstateItemObject *self)
504 {
505 if (self->flags & dirstate_flag_p2_info) {
506 self->flags &= ~(dirstate_flag_p2_info |
507 dirstate_flag_has_meaningful_data |
508 dirstate_flag_has_mtime);
509 self->mode = 0;
510 self->size = 0;
511 self->mtime_s = 0;
512 self->mtime_ns = 0;
513 }
514 Py_RETURN_NONE;
515 }
226 static PyMethodDef dirstate_item_methods[] = {
516 static PyMethodDef dirstate_item_methods[] = {
517 {"v2_data", (PyCFunction)dirstate_item_v2_data, METH_NOARGS,
518 "return data suitable for v2 serialization"},
227 {"v1_state", (PyCFunction)dirstate_item_v1_state, METH_NOARGS,
519 {"v1_state", (PyCFunction)dirstate_item_v1_state, METH_NOARGS,
228 "return a \"state\" suitable for v1 serialization"},
520 "return a \"state\" suitable for v1 serialization"},
229 {"v1_mode", (PyCFunction)dirstate_item_v1_mode, METH_NOARGS,
521 {"v1_mode", (PyCFunction)dirstate_item_v1_mode, METH_NOARGS,
@@ -234,40 +526,134 b' static PyMethodDef dirstate_item_methods'
234 "return a \"mtime\" suitable for v1 serialization"},
526 "return a \"mtime\" suitable for v1 serialization"},
235 {"need_delay", (PyCFunction)dirstate_item_need_delay, METH_O,
527 {"need_delay", (PyCFunction)dirstate_item_need_delay, METH_O,
236 "True if the stored mtime would be ambiguous with the current time"},
528 "True if the stored mtime would be ambiguous with the current time"},
237 {"from_v1_data", (PyCFunction)dirstate_item_from_v1_meth, METH_O,
529 {"mtime_likely_equal_to", (PyCFunction)dirstate_item_mtime_likely_equal_to,
238 "build a new DirstateItem object from V1 data"},
530 METH_O, "True if the stored mtime is likely equal to the given mtime"},
531 {"from_v1_data", (PyCFunction)dirstate_item_from_v1_meth,
532 METH_VARARGS | METH_CLASS, "build a new DirstateItem object from V1 data"},
533 {"from_v2_data", (PyCFunction)dirstate_item_from_v2_meth,
534 METH_VARARGS | METH_CLASS, "build a new DirstateItem object from V2 data"},
239 {"set_possibly_dirty", (PyCFunction)dirstate_item_set_possibly_dirty,
535 {"set_possibly_dirty", (PyCFunction)dirstate_item_set_possibly_dirty,
240 METH_NOARGS, "mark a file as \"possibly dirty\""},
536 METH_NOARGS, "mark a file as \"possibly dirty\""},
241 {"dm_nonnormal", (PyCFunction)dm_nonnormal, METH_NOARGS,
537 {"set_clean", (PyCFunction)dirstate_item_set_clean, METH_VARARGS,
242 "True is the entry is non-normal in the dirstatemap sense"},
538 "mark a file as \"clean\""},
243 {"dm_otherparent", (PyCFunction)dm_otherparent, METH_NOARGS,
539 {"set_tracked", (PyCFunction)dirstate_item_set_tracked, METH_NOARGS,
244 "True is the entry is `otherparent` in the dirstatemap sense"},
540 "mark a file as \"tracked\""},
541 {"set_untracked", (PyCFunction)dirstate_item_set_untracked, METH_NOARGS,
542 "mark a file as \"untracked\""},
543 {"drop_merge_data", (PyCFunction)dirstate_item_drop_merge_data, METH_NOARGS,
544 "remove all \"merge-only\" from a DirstateItem"},
245 {NULL} /* Sentinel */
545 {NULL} /* Sentinel */
246 };
546 };
247
547
248 static PyObject *dirstate_item_get_mode(dirstateItemObject *self)
548 static PyObject *dirstate_item_get_mode(dirstateItemObject *self)
249 {
549 {
250 return PyInt_FromLong(self->mode);
550 return PyInt_FromLong(dirstate_item_c_v1_mode(self));
251 };
551 };
252
552
253 static PyObject *dirstate_item_get_size(dirstateItemObject *self)
553 static PyObject *dirstate_item_get_size(dirstateItemObject *self)
254 {
554 {
255 return PyInt_FromLong(self->size);
555 return PyInt_FromLong(dirstate_item_c_v1_size(self));
256 };
556 };
257
557
258 static PyObject *dirstate_item_get_mtime(dirstateItemObject *self)
558 static PyObject *dirstate_item_get_mtime(dirstateItemObject *self)
259 {
559 {
260 return PyInt_FromLong(self->mtime);
560 return PyInt_FromLong(dirstate_item_c_v1_mtime(self));
261 };
561 };
262
562
263 static PyObject *dirstate_item_get_state(dirstateItemObject *self)
563 static PyObject *dirstate_item_get_state(dirstateItemObject *self)
264 {
564 {
265 return PyBytes_FromStringAndSize(&self->state, 1);
565 char state = dirstate_item_c_v1_state(self);
566 return PyBytes_FromStringAndSize(&state, 1);
567 };
568
569 static PyObject *dirstate_item_get_has_fallback_exec(dirstateItemObject *self)
570 {
571 if (dirstate_item_c_has_fallback_exec(self)) {
572 Py_RETURN_TRUE;
573 } else {
574 Py_RETURN_FALSE;
575 }
576 };
577
578 static PyObject *dirstate_item_get_fallback_exec(dirstateItemObject *self)
579 {
580 if (dirstate_item_c_has_fallback_exec(self)) {
581 if (self->flags & dirstate_flag_fallback_exec) {
582 Py_RETURN_TRUE;
583 } else {
584 Py_RETURN_FALSE;
585 }
586 } else {
587 Py_RETURN_NONE;
588 }
589 };
590
591 static int dirstate_item_set_fallback_exec(dirstateItemObject *self,
592 PyObject *value)
593 {
594 if ((value == Py_None) || (value == NULL)) {
595 self->flags &= ~dirstate_flag_has_fallback_exec;
596 } else {
597 self->flags |= dirstate_flag_has_fallback_exec;
598 if (PyObject_IsTrue(value)) {
599 self->flags |= dirstate_flag_fallback_exec;
600 } else {
601 self->flags &= ~dirstate_flag_fallback_exec;
602 }
603 }
604 return 0;
605 };
606
607 static PyObject *
608 dirstate_item_get_has_fallback_symlink(dirstateItemObject *self)
609 {
610 if (dirstate_item_c_has_fallback_symlink(self)) {
611 Py_RETURN_TRUE;
612 } else {
613 Py_RETURN_FALSE;
614 }
615 };
616
617 static PyObject *dirstate_item_get_fallback_symlink(dirstateItemObject *self)
618 {
619 if (dirstate_item_c_has_fallback_symlink(self)) {
620 if (self->flags & dirstate_flag_fallback_symlink) {
621 Py_RETURN_TRUE;
622 } else {
623 Py_RETURN_FALSE;
624 }
625 } else {
626 Py_RETURN_NONE;
627 }
628 };
629
630 static int dirstate_item_set_fallback_symlink(dirstateItemObject *self,
631 PyObject *value)
632 {
633 if ((value == Py_None) || (value == NULL)) {
634 self->flags &= ~dirstate_flag_has_fallback_symlink;
635 } else {
636 self->flags |= dirstate_flag_has_fallback_symlink;
637 if (PyObject_IsTrue(value)) {
638 self->flags |= dirstate_flag_fallback_symlink;
639 } else {
640 self->flags &= ~dirstate_flag_fallback_symlink;
641 }
642 }
643 return 0;
266 };
644 };
267
645
268 static PyObject *dirstate_item_get_tracked(dirstateItemObject *self)
646 static PyObject *dirstate_item_get_tracked(dirstateItemObject *self)
269 {
647 {
270 if (self->state == 'a' || self->state == 'm' || self->state == 'n') {
648 if (dirstate_item_c_tracked(self)) {
649 Py_RETURN_TRUE;
650 } else {
651 Py_RETURN_FALSE;
652 }
653 };
654 static PyObject *dirstate_item_get_p1_tracked(dirstateItemObject *self)
655 {
656 if (self->flags & dirstate_flag_p1_tracked) {
271 Py_RETURN_TRUE;
657 Py_RETURN_TRUE;
272 } else {
658 } else {
273 Py_RETURN_FALSE;
659 Py_RETURN_FALSE;
@@ -276,7 +662,17 b' static PyObject *dirstate_item_get_track'
276
662
277 static PyObject *dirstate_item_get_added(dirstateItemObject *self)
663 static PyObject *dirstate_item_get_added(dirstateItemObject *self)
278 {
664 {
279 if (self->state == 'a') {
665 if (dirstate_item_c_added(self)) {
666 Py_RETURN_TRUE;
667 } else {
668 Py_RETURN_FALSE;
669 }
670 };
671
672 static PyObject *dirstate_item_get_p2_info(dirstateItemObject *self)
673 {
674 if (self->flags & dirstate_flag_wc_tracked &&
675 self->flags & dirstate_flag_p2_info) {
280 Py_RETURN_TRUE;
676 Py_RETURN_TRUE;
281 } else {
677 } else {
282 Py_RETURN_FALSE;
678 Py_RETURN_FALSE;
@@ -285,16 +681,7 b' static PyObject *dirstate_item_get_added'
285
681
286 static PyObject *dirstate_item_get_merged(dirstateItemObject *self)
682 static PyObject *dirstate_item_get_merged(dirstateItemObject *self)
287 {
683 {
288 if (self->state == 'm') {
684 if (dirstate_item_c_merged(self)) {
289 Py_RETURN_TRUE;
290 } else {
291 Py_RETURN_FALSE;
292 }
293 };
294
295 static PyObject *dirstate_item_get_merged_removed(dirstateItemObject *self)
296 {
297 if (self->state == 'r' && self->size == dirstate_v1_nonnormal) {
298 Py_RETURN_TRUE;
685 Py_RETURN_TRUE;
299 } else {
686 } else {
300 Py_RETURN_FALSE;
687 Py_RETURN_FALSE;
@@ -303,16 +690,29 b' static PyObject *dirstate_item_get_merge'
303
690
304 static PyObject *dirstate_item_get_from_p2(dirstateItemObject *self)
691 static PyObject *dirstate_item_get_from_p2(dirstateItemObject *self)
305 {
692 {
306 if (self->state == 'n' && self->size == dirstate_v1_from_p2) {
693 if (dirstate_item_c_from_p2(self)) {
307 Py_RETURN_TRUE;
694 Py_RETURN_TRUE;
308 } else {
695 } else {
309 Py_RETURN_FALSE;
696 Py_RETURN_FALSE;
310 }
697 }
311 };
698 };
312
699
313 static PyObject *dirstate_item_get_from_p2_removed(dirstateItemObject *self)
700 static PyObject *dirstate_item_get_maybe_clean(dirstateItemObject *self)
314 {
701 {
315 if (self->state == 'r' && self->size == dirstate_v1_from_p2) {
702 if (!(self->flags & dirstate_flag_wc_tracked)) {
703 Py_RETURN_FALSE;
704 } else if (!(self->flags & dirstate_flag_p1_tracked)) {
705 Py_RETURN_FALSE;
706 } else if (self->flags & dirstate_flag_p2_info) {
707 Py_RETURN_FALSE;
708 } else {
709 Py_RETURN_TRUE;
710 }
711 };
712
713 static PyObject *dirstate_item_get_any_tracked(dirstateItemObject *self)
714 {
715 if (dirstate_item_c_any_tracked(self)) {
316 Py_RETURN_TRUE;
716 Py_RETURN_TRUE;
317 } else {
717 } else {
318 Py_RETURN_FALSE;
718 Py_RETURN_FALSE;
@@ -321,7 +721,7 b' static PyObject *dirstate_item_get_from_'
321
721
322 static PyObject *dirstate_item_get_removed(dirstateItemObject *self)
722 static PyObject *dirstate_item_get_removed(dirstateItemObject *self)
323 {
723 {
324 if (self->state == 'r') {
724 if (dirstate_item_c_removed(self)) {
325 Py_RETURN_TRUE;
725 Py_RETURN_TRUE;
326 } else {
726 } else {
327 Py_RETURN_FALSE;
727 Py_RETURN_FALSE;
@@ -333,14 +733,25 b' static PyGetSetDef dirstate_item_getset['
333 {"size", (getter)dirstate_item_get_size, NULL, "size", NULL},
733 {"size", (getter)dirstate_item_get_size, NULL, "size", NULL},
334 {"mtime", (getter)dirstate_item_get_mtime, NULL, "mtime", NULL},
734 {"mtime", (getter)dirstate_item_get_mtime, NULL, "mtime", NULL},
335 {"state", (getter)dirstate_item_get_state, NULL, "state", NULL},
735 {"state", (getter)dirstate_item_get_state, NULL, "state", NULL},
736 {"has_fallback_exec", (getter)dirstate_item_get_has_fallback_exec, NULL,
737 "has_fallback_exec", NULL},
738 {"fallback_exec", (getter)dirstate_item_get_fallback_exec,
739 (setter)dirstate_item_set_fallback_exec, "fallback_exec", NULL},
740 {"has_fallback_symlink", (getter)dirstate_item_get_has_fallback_symlink,
741 NULL, "has_fallback_symlink", NULL},
742 {"fallback_symlink", (getter)dirstate_item_get_fallback_symlink,
743 (setter)dirstate_item_set_fallback_symlink, "fallback_symlink", NULL},
336 {"tracked", (getter)dirstate_item_get_tracked, NULL, "tracked", NULL},
744 {"tracked", (getter)dirstate_item_get_tracked, NULL, "tracked", NULL},
745 {"p1_tracked", (getter)dirstate_item_get_p1_tracked, NULL, "p1_tracked",
746 NULL},
337 {"added", (getter)dirstate_item_get_added, NULL, "added", NULL},
747 {"added", (getter)dirstate_item_get_added, NULL, "added", NULL},
338 {"merged_removed", (getter)dirstate_item_get_merged_removed, NULL,
748 {"p2_info", (getter)dirstate_item_get_p2_info, NULL, "p2_info", NULL},
339 "merged_removed", NULL},
340 {"merged", (getter)dirstate_item_get_merged, NULL, "merged", NULL},
749 {"merged", (getter)dirstate_item_get_merged, NULL, "merged", NULL},
341 {"from_p2_removed", (getter)dirstate_item_get_from_p2_removed, NULL,
342 "from_p2_removed", NULL},
343 {"from_p2", (getter)dirstate_item_get_from_p2, NULL, "from_p2", NULL},
750 {"from_p2", (getter)dirstate_item_get_from_p2, NULL, "from_p2", NULL},
751 {"maybe_clean", (getter)dirstate_item_get_maybe_clean, NULL, "maybe_clean",
752 NULL},
753 {"any_tracked", (getter)dirstate_item_get_any_tracked, NULL, "any_tracked",
754 NULL},
344 {"removed", (getter)dirstate_item_get_removed, NULL, "removed", NULL},
755 {"removed", (getter)dirstate_item_get_removed, NULL, "removed", NULL},
345 {NULL} /* Sentinel */
756 {NULL} /* Sentinel */
346 };
757 };
@@ -357,7 +768,7 b' PyTypeObject dirstateItemType = {'
357 0, /* tp_compare */
768 0, /* tp_compare */
358 0, /* tp_repr */
769 0, /* tp_repr */
359 0, /* tp_as_number */
770 0, /* tp_as_number */
360 &dirstate_item_sq, /* tp_as_sequence */
771 0, /* tp_as_sequence */
361 0, /* tp_as_mapping */
772 0, /* tp_as_mapping */
362 0, /* tp_hash */
773 0, /* tp_hash */
363 0, /* tp_call */
774 0, /* tp_call */
@@ -441,6 +852,8 b' static PyObject *parse_dirstate(PyObject'
441
852
442 entry = (PyObject *)dirstate_item_from_v1_data(state, mode,
853 entry = (PyObject *)dirstate_item_from_v1_data(state, mode,
443 size, mtime);
854 size, mtime);
855 if (!entry)
856 goto quit;
444 cpos = memchr(cur, 0, flen);
857 cpos = memchr(cur, 0, flen);
445 if (cpos) {
858 if (cpos) {
446 fname = PyBytes_FromStringAndSize(cur, cpos - cur);
859 fname = PyBytes_FromStringAndSize(cur, cpos - cur);
@@ -476,68 +889,6 b' quit:'
476 }
889 }
477
890
478 /*
891 /*
479 * Build a set of non-normal and other parent entries from the dirstate dmap
480 */
481 static PyObject *nonnormalotherparententries(PyObject *self, PyObject *args)
482 {
483 PyObject *dmap, *fname, *v;
484 PyObject *nonnset = NULL, *otherpset = NULL, *result = NULL;
485 Py_ssize_t pos;
486
487 if (!PyArg_ParseTuple(args, "O!:nonnormalentries", &PyDict_Type,
488 &dmap)) {
489 goto bail;
490 }
491
492 nonnset = PySet_New(NULL);
493 if (nonnset == NULL) {
494 goto bail;
495 }
496
497 otherpset = PySet_New(NULL);
498 if (otherpset == NULL) {
499 goto bail;
500 }
501
502 pos = 0;
503 while (PyDict_Next(dmap, &pos, &fname, &v)) {
504 dirstateItemObject *t;
505 if (!dirstate_tuple_check(v)) {
506 PyErr_SetString(PyExc_TypeError,
507 "expected a dirstate tuple");
508 goto bail;
509 }
510 t = (dirstateItemObject *)v;
511
512 if (t->state == 'n' && t->size == -2) {
513 if (PySet_Add(otherpset, fname) == -1) {
514 goto bail;
515 }
516 }
517
518 if (t->state == 'n' && t->mtime != -1) {
519 continue;
520 }
521 if (PySet_Add(nonnset, fname) == -1) {
522 goto bail;
523 }
524 }
525
526 result = Py_BuildValue("(OO)", nonnset, otherpset);
527 if (result == NULL) {
528 goto bail;
529 }
530 Py_DECREF(nonnset);
531 Py_DECREF(otherpset);
532 return result;
533 bail:
534 Py_XDECREF(nonnset);
535 Py_XDECREF(otherpset);
536 Py_XDECREF(result);
537 return NULL;
538 }
539
540 /*
541 * Efficiently pack a dirstate object into its on-disk format.
892 * Efficiently pack a dirstate object into its on-disk format.
542 */
893 */
543 static PyObject *pack_dirstate(PyObject *self, PyObject *args)
894 static PyObject *pack_dirstate(PyObject *self, PyObject *args)
@@ -547,11 +898,12 b' static PyObject *pack_dirstate(PyObject '
547 Py_ssize_t nbytes, pos, l;
898 Py_ssize_t nbytes, pos, l;
548 PyObject *k, *v = NULL, *pn;
899 PyObject *k, *v = NULL, *pn;
549 char *p, *s;
900 char *p, *s;
550 int now;
901 int now_s;
902 int now_ns;
551
903
552 if (!PyArg_ParseTuple(args, "O!O!O!i:pack_dirstate", &PyDict_Type, &map,
904 if (!PyArg_ParseTuple(args, "O!O!O!(ii):pack_dirstate", &PyDict_Type,
553 &PyDict_Type, &copymap, &PyTuple_Type, &pl,
905 &map, &PyDict_Type, &copymap, &PyTuple_Type, &pl,
554 &now)) {
906 &now_s, &now_ns)) {
555 return NULL;
907 return NULL;
556 }
908 }
557
909
@@ -616,15 +968,15 b' static PyObject *pack_dirstate(PyObject '
616 }
968 }
617 tuple = (dirstateItemObject *)v;
969 tuple = (dirstateItemObject *)v;
618
970
619 state = tuple->state;
971 state = dirstate_item_c_v1_state(tuple);
620 mode = tuple->mode;
972 mode = dirstate_item_c_v1_mode(tuple);
621 size = tuple->size;
973 size = dirstate_item_c_v1_size(tuple);
622 mtime = tuple->mtime;
974 mtime = dirstate_item_c_v1_mtime(tuple);
623 if (state == 'n' && mtime == now) {
975 if (state == 'n' && tuple->mtime_s == now_s) {
624 /* See pure/parsers.py:pack_dirstate for why we do
976 /* See pure/parsers.py:pack_dirstate for why we do
625 * this. */
977 * this. */
626 mtime = -1;
978 mtime = -1;
627 mtime_unset = (PyObject *)make_dirstate_item(
979 mtime_unset = (PyObject *)dirstate_item_from_v1_data(
628 state, mode, size, mtime);
980 state, mode, size, mtime);
629 if (!mtime_unset) {
981 if (!mtime_unset) {
630 goto bail;
982 goto bail;
@@ -869,9 +1221,6 b' PyObject *parse_index2(PyObject *self, P'
869
1221
870 static PyMethodDef methods[] = {
1222 static PyMethodDef methods[] = {
871 {"pack_dirstate", pack_dirstate, METH_VARARGS, "pack a dirstate\n"},
1223 {"pack_dirstate", pack_dirstate, METH_VARARGS, "pack a dirstate\n"},
872 {"nonnormalotherparententries", nonnormalotherparententries, METH_VARARGS,
873 "create a set containing non-normal and other parent entries of given "
874 "dirstate\n"},
875 {"parse_dirstate", parse_dirstate, METH_VARARGS, "parse a dirstate\n"},
1224 {"parse_dirstate", parse_dirstate, METH_VARARGS, "parse a dirstate\n"},
876 {"parse_index2", (PyCFunction)parse_index2, METH_VARARGS | METH_KEYWORDS,
1225 {"parse_index2", (PyCFunction)parse_index2, METH_VARARGS | METH_KEYWORDS,
877 "parse a revlog index\n"},
1226 "parse a revlog index\n"},
@@ -899,7 +1248,6 b' static const int version = 20;'
899
1248
900 static void module_init(PyObject *mod)
1249 static void module_init(PyObject *mod)
901 {
1250 {
902 PyObject *capsule = NULL;
903 PyModule_AddIntConstant(mod, "version", version);
1251 PyModule_AddIntConstant(mod, "version", version);
904
1252
905 /* This module constant has two purposes. First, it lets us unit test
1253 /* This module constant has two purposes. First, it lets us unit test
@@ -916,12 +1264,6 b' static void module_init(PyObject *mod)'
916 manifest_module_init(mod);
1264 manifest_module_init(mod);
917 revlog_module_init(mod);
1265 revlog_module_init(mod);
918
1266
919 capsule = PyCapsule_New(
920 make_dirstate_item,
921 "mercurial.cext.parsers.make_dirstate_item_CAPI", NULL);
922 if (capsule != NULL)
923 PyModule_AddObject(mod, "make_dirstate_item_CAPI", capsule);
924
925 if (PyType_Ready(&dirstateItemType) < 0) {
1267 if (PyType_Ready(&dirstateItemType) < 0) {
926 return;
1268 return;
927 }
1269 }
@@ -24,13 +24,31 b''
24 /* clang-format off */
24 /* clang-format off */
25 typedef struct {
25 typedef struct {
26 PyObject_HEAD
26 PyObject_HEAD
27 char state;
27 int flags;
28 int mode;
28 int mode;
29 int size;
29 int size;
30 int mtime;
30 int mtime_s;
31 int mtime_ns;
31 } dirstateItemObject;
32 } dirstateItemObject;
32 /* clang-format on */
33 /* clang-format on */
33
34
35 static const int dirstate_flag_wc_tracked = 1 << 0;
36 static const int dirstate_flag_p1_tracked = 1 << 1;
37 static const int dirstate_flag_p2_info = 1 << 2;
38 static const int dirstate_flag_mode_exec_perm = 1 << 3;
39 static const int dirstate_flag_mode_is_symlink = 1 << 4;
40 static const int dirstate_flag_has_fallback_exec = 1 << 5;
41 static const int dirstate_flag_fallback_exec = 1 << 6;
42 static const int dirstate_flag_has_fallback_symlink = 1 << 7;
43 static const int dirstate_flag_fallback_symlink = 1 << 8;
44 static const int dirstate_flag_expected_state_is_modified = 1 << 9;
45 static const int dirstate_flag_has_meaningful_data = 1 << 10;
46 static const int dirstate_flag_has_mtime = 1 << 11;
47 static const int dirstate_flag_mtime_second_ambiguous = 1 << 12;
48 static const int dirstate_flag_directory = 1 << 13;
49 static const int dirstate_flag_all_unknown_recorded = 1 << 14;
50 static const int dirstate_flag_all_ignored_recorded = 1 << 15;
51
34 extern PyTypeObject dirstateItemType;
52 extern PyTypeObject dirstateItemType;
35 #define dirstate_tuple_check(op) (Py_TYPE(op) == &dirstateItemType)
53 #define dirstate_tuple_check(op) (Py_TYPE(op) == &dirstateItemType)
36
54
@@ -626,7 +626,7 b' def dorecord('
626 for realname, tmpname in pycompat.iteritems(backups):
626 for realname, tmpname in pycompat.iteritems(backups):
627 ui.debug(b'restoring %r to %r\n' % (tmpname, realname))
627 ui.debug(b'restoring %r to %r\n' % (tmpname, realname))
628
628
629 if dirstate[realname] == b'n':
629 if dirstate.get_entry(realname).maybe_clean:
630 # without normallookup, restoring timestamp
630 # without normallookup, restoring timestamp
631 # may cause partially committed files
631 # may cause partially committed files
632 # to be treated as unmodified
632 # to be treated as unmodified
@@ -987,7 +987,7 b' def changebranch(ui, repo, revs, label, '
987 with repo.wlock(), repo.lock(), repo.transaction(b'branches'):
987 with repo.wlock(), repo.lock(), repo.transaction(b'branches'):
988 # abort in case of uncommitted merge or dirty wdir
988 # abort in case of uncommitted merge or dirty wdir
989 bailifchanged(repo)
989 bailifchanged(repo)
990 revs = scmutil.revrange(repo, revs)
990 revs = logcmdutil.revrange(repo, revs)
991 if not revs:
991 if not revs:
992 raise error.InputError(b"empty revision set")
992 raise error.InputError(b"empty revision set")
993 roots = repo.revs(b'roots(%ld)', revs)
993 roots = repo.revs(b'roots(%ld)', revs)
@@ -1480,7 +1480,7 b' def copy(ui, repo, pats, opts, rename=Fa'
1480 # TODO: Remove this restriction and make it also create the copy
1480 # TODO: Remove this restriction and make it also create the copy
1481 # targets (and remove the rename source if rename==True).
1481 # targets (and remove the rename source if rename==True).
1482 raise error.InputError(_(b'--at-rev requires --after'))
1482 raise error.InputError(_(b'--at-rev requires --after'))
1483 ctx = scmutil.revsingle(repo, rev)
1483 ctx = logcmdutil.revsingle(repo, rev)
1484 if len(ctx.parents()) > 1:
1484 if len(ctx.parents()) > 1:
1485 raise error.InputError(
1485 raise error.InputError(
1486 _(b'cannot mark/unmark copy in merge commit')
1486 _(b'cannot mark/unmark copy in merge commit')
@@ -1642,7 +1642,9 b' def copy(ui, repo, pats, opts, rename=Fa'
1642 reltarget = repo.pathto(abstarget, cwd)
1642 reltarget = repo.pathto(abstarget, cwd)
1643 target = repo.wjoin(abstarget)
1643 target = repo.wjoin(abstarget)
1644 src = repo.wjoin(abssrc)
1644 src = repo.wjoin(abssrc)
1645 state = repo.dirstate[abstarget]
1645 entry = repo.dirstate.get_entry(abstarget)
1646
1647 already_commited = entry.tracked and not entry.added
1646
1648
1647 scmutil.checkportable(ui, abstarget)
1649 scmutil.checkportable(ui, abstarget)
1648
1650
@@ -1672,30 +1674,48 b' def copy(ui, repo, pats, opts, rename=Fa'
1672 exists = False
1674 exists = False
1673 samefile = True
1675 samefile = True
1674
1676
1675 if not after and exists or after and state in b'mn':
1677 if not after and exists or after and already_commited:
1676 if not opts[b'force']:
1678 if not opts[b'force']:
1677 if state in b'mn':
1679 if already_commited:
1678 msg = _(b'%s: not overwriting - file already committed\n')
1680 msg = _(b'%s: not overwriting - file already committed\n')
1679 if after:
1681 # Check if if the target was added in the parent and the
1680 flags = b'--after --force'
1682 # source already existed in the grandparent.
1683 looks_like_copy_in_pctx = abstarget in pctx and any(
1684 abssrc in gpctx and abstarget not in gpctx
1685 for gpctx in pctx.parents()
1686 )
1687 if looks_like_copy_in_pctx:
1688 if rename:
1689 hint = _(
1690 b"('hg rename --at-rev .' to record the rename "
1691 b"in the parent of the working copy)\n"
1692 )
1693 else:
1694 hint = _(
1695 b"('hg copy --at-rev .' to record the copy in "
1696 b"the parent of the working copy)\n"
1697 )
1681 else:
1698 else:
1682 flags = b'--force'
1699 if after:
1683 if rename:
1700 flags = b'--after --force'
1684 hint = (
1701 else:
1685 _(
1702 flags = b'--force'
1686 b"('hg rename %s' to replace the file by "
1703 if rename:
1687 b'recording a rename)\n'
1704 hint = (
1705 _(
1706 b"('hg rename %s' to replace the file by "
1707 b'recording a rename)\n'
1708 )
1709 % flags
1688 )
1710 )
1689 % flags
1711 else:
1690 )
1712 hint = (
1691 else:
1713 _(
1692 hint = (
1714 b"('hg copy %s' to replace the file by "
1693 _(
1715 b'recording a copy)\n'
1694 b"('hg copy %s' to replace the file by "
1716 )
1695 b'recording a copy)\n'
1717 % flags
1696 )
1718 )
1697 % flags
1698 )
1699 else:
1719 else:
1700 msg = _(b'%s: not overwriting - file exists\n')
1720 msg = _(b'%s: not overwriting - file exists\n')
1701 if rename:
1721 if rename:
@@ -3350,7 +3370,11 b' def revert(ui, repo, ctx, *pats, **opts)'
3350 for f in localchanges:
3370 for f in localchanges:
3351 src = repo.dirstate.copied(f)
3371 src = repo.dirstate.copied(f)
3352 # XXX should we check for rename down to target node?
3372 # XXX should we check for rename down to target node?
3353 if src and src not in names and repo.dirstate[src] == b'r':
3373 if (
3374 src
3375 and src not in names
3376 and repo.dirstate.get_entry(src).removed
3377 ):
3354 dsremoved.add(src)
3378 dsremoved.add(src)
3355 names[src] = True
3379 names[src] = True
3356
3380
@@ -3364,12 +3388,12 b' def revert(ui, repo, ctx, *pats, **opts)'
3364 # distinguish between file to forget and the other
3388 # distinguish between file to forget and the other
3365 added = set()
3389 added = set()
3366 for abs in dsadded:
3390 for abs in dsadded:
3367 if repo.dirstate[abs] != b'a':
3391 if not repo.dirstate.get_entry(abs).added:
3368 added.add(abs)
3392 added.add(abs)
3369 dsadded -= added
3393 dsadded -= added
3370
3394
3371 for abs in deladded:
3395 for abs in deladded:
3372 if repo.dirstate[abs] == b'a':
3396 if repo.dirstate.get_entry(abs).added:
3373 dsadded.add(abs)
3397 dsadded.add(abs)
3374 deladded -= dsadded
3398 deladded -= dsadded
3375
3399
@@ -445,7 +445,7 b' def annotate(ui, repo, *pats, **opts):'
445 rev = opts.get(b'rev')
445 rev = opts.get(b'rev')
446 if rev:
446 if rev:
447 repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
447 repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
448 ctx = scmutil.revsingle(repo, rev)
448 ctx = logcmdutil.revsingle(repo, rev)
449
449
450 ui.pager(b'annotate')
450 ui.pager(b'annotate')
451 rootfm = ui.formatter(b'annotate', opts)
451 rootfm = ui.formatter(b'annotate', opts)
@@ -526,7 +526,7 b' def annotate(ui, repo, *pats, **opts):'
526 )
526 )
527
527
528 def bad(x, y):
528 def bad(x, y):
529 raise error.Abort(b"%s: %s" % (x, y))
529 raise error.InputError(b"%s: %s" % (x, y))
530
530
531 m = scmutil.match(ctx, pats, opts, badfn=bad)
531 m = scmutil.match(ctx, pats, opts, badfn=bad)
532
532
@@ -536,7 +536,7 b' def annotate(ui, repo, *pats, **opts):'
536 )
536 )
537 skiprevs = opts.get(b'skip')
537 skiprevs = opts.get(b'skip')
538 if skiprevs:
538 if skiprevs:
539 skiprevs = scmutil.revrange(repo, skiprevs)
539 skiprevs = logcmdutil.revrange(repo, skiprevs)
540
540
541 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
541 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
542 for abs in ctx.walk(m):
542 for abs in ctx.walk(m):
@@ -649,7 +649,7 b' def archive(ui, repo, dest, **opts):'
649 rev = opts.get(b'rev')
649 rev = opts.get(b'rev')
650 if rev:
650 if rev:
651 repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
651 repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
652 ctx = scmutil.revsingle(repo, rev)
652 ctx = logcmdutil.revsingle(repo, rev)
653 if not ctx:
653 if not ctx:
654 raise error.InputError(
654 raise error.InputError(
655 _(b'no working directory: please specify a revision')
655 _(b'no working directory: please specify a revision')
@@ -791,7 +791,7 b' def _dobackout(ui, repo, node=None, rev='
791
791
792 cmdutil.checkunfinished(repo)
792 cmdutil.checkunfinished(repo)
793 cmdutil.bailifchanged(repo)
793 cmdutil.bailifchanged(repo)
794 ctx = scmutil.revsingle(repo, rev)
794 ctx = logcmdutil.revsingle(repo, rev)
795 node = ctx.node()
795 node = ctx.node()
796
796
797 op1, op2 = repo.dirstate.parents()
797 op1, op2 = repo.dirstate.parents()
@@ -1037,7 +1037,7 b' def bisect('
1037 state = hbisect.load_state(repo)
1037 state = hbisect.load_state(repo)
1038
1038
1039 if rev:
1039 if rev:
1040 nodes = [repo[i].node() for i in scmutil.revrange(repo, rev)]
1040 nodes = [repo[i].node() for i in logcmdutil.revrange(repo, rev)]
1041 else:
1041 else:
1042 nodes = [repo.lookup(b'.')]
1042 nodes = [repo.lookup(b'.')]
1043
1043
@@ -1081,7 +1081,7 b' def bisect('
1081 raise error.StateError(_(b'current bisect revision is a merge'))
1081 raise error.StateError(_(b'current bisect revision is a merge'))
1082 if rev:
1082 if rev:
1083 if not nodes:
1083 if not nodes:
1084 raise error.Abort(_(b'empty revision set'))
1084 raise error.InputError(_(b'empty revision set'))
1085 node = repo[nodes[-1]].node()
1085 node = repo[nodes[-1]].node()
1086 with hbisect.restore_state(repo, state, node):
1086 with hbisect.restore_state(repo, state, node):
1087 while changesets:
1087 while changesets:
@@ -1424,7 +1424,7 b' def branches(ui, repo, active=False, clo'
1424 revs = opts.get(b'rev')
1424 revs = opts.get(b'rev')
1425 selectedbranches = None
1425 selectedbranches = None
1426 if revs:
1426 if revs:
1427 revs = scmutil.revrange(repo, revs)
1427 revs = logcmdutil.revrange(repo, revs)
1428 getbi = repo.revbranchcache().branchinfo
1428 getbi = repo.revbranchcache().branchinfo
1429 selectedbranches = {getbi(r)[0] for r in revs}
1429 selectedbranches = {getbi(r)[0] for r in revs}
1430
1430
@@ -1558,7 +1558,7 b' def bundle(ui, repo, fname, *dests, **op'
1558 revs = None
1558 revs = None
1559 if b'rev' in opts:
1559 if b'rev' in opts:
1560 revstrings = opts[b'rev']
1560 revstrings = opts[b'rev']
1561 revs = scmutil.revrange(repo, revstrings)
1561 revs = logcmdutil.revrange(repo, revstrings)
1562 if revstrings and not revs:
1562 if revstrings and not revs:
1563 raise error.InputError(_(b'no commits to bundle'))
1563 raise error.InputError(_(b'no commits to bundle'))
1564
1564
@@ -1590,7 +1590,7 b' def bundle(ui, repo, fname, *dests, **op'
1590 ui.warn(_(b"ignoring --base because --all was specified\n"))
1590 ui.warn(_(b"ignoring --base because --all was specified\n"))
1591 base = [nullrev]
1591 base = [nullrev]
1592 else:
1592 else:
1593 base = scmutil.revrange(repo, opts.get(b'base'))
1593 base = logcmdutil.revrange(repo, opts.get(b'base'))
1594 if cgversion not in changegroup.supportedoutgoingversions(repo):
1594 if cgversion not in changegroup.supportedoutgoingversions(repo):
1595 raise error.Abort(
1595 raise error.Abort(
1596 _(b"repository does not support bundle version %s") % cgversion
1596 _(b"repository does not support bundle version %s") % cgversion
@@ -1761,7 +1761,7 b' def cat(ui, repo, file1, *pats, **opts):'
1761 rev = opts.get(b'rev')
1761 rev = opts.get(b'rev')
1762 if rev:
1762 if rev:
1763 repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
1763 repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
1764 ctx = scmutil.revsingle(repo, rev)
1764 ctx = logcmdutil.revsingle(repo, rev)
1765 m = scmutil.match(ctx, (file1,) + pats, opts)
1765 m = scmutil.match(ctx, (file1,) + pats, opts)
1766 fntemplate = opts.pop(b'output', b'')
1766 fntemplate = opts.pop(b'output', b'')
1767 if cmdutil.isstdiofilename(fntemplate):
1767 if cmdutil.isstdiofilename(fntemplate):
@@ -2600,17 +2600,17 b' def diff(ui, repo, *pats, **opts):'
2600 cmdutil.check_incompatible_arguments(opts, b'to', [b'rev', b'change'])
2600 cmdutil.check_incompatible_arguments(opts, b'to', [b'rev', b'change'])
2601 if change:
2601 if change:
2602 repo = scmutil.unhidehashlikerevs(repo, [change], b'nowarn')
2602 repo = scmutil.unhidehashlikerevs(repo, [change], b'nowarn')
2603 ctx2 = scmutil.revsingle(repo, change, None)
2603 ctx2 = logcmdutil.revsingle(repo, change, None)
2604 ctx1 = logcmdutil.diff_parent(ctx2)
2604 ctx1 = logcmdutil.diff_parent(ctx2)
2605 elif from_rev or to_rev:
2605 elif from_rev or to_rev:
2606 repo = scmutil.unhidehashlikerevs(
2606 repo = scmutil.unhidehashlikerevs(
2607 repo, [from_rev] + [to_rev], b'nowarn'
2607 repo, [from_rev] + [to_rev], b'nowarn'
2608 )
2608 )
2609 ctx1 = scmutil.revsingle(repo, from_rev, None)
2609 ctx1 = logcmdutil.revsingle(repo, from_rev, None)
2610 ctx2 = scmutil.revsingle(repo, to_rev, None)
2610 ctx2 = logcmdutil.revsingle(repo, to_rev, None)
2611 else:
2611 else:
2612 repo = scmutil.unhidehashlikerevs(repo, revs, b'nowarn')
2612 repo = scmutil.unhidehashlikerevs(repo, revs, b'nowarn')
2613 ctx1, ctx2 = scmutil.revpair(repo, revs)
2613 ctx1, ctx2 = logcmdutil.revpair(repo, revs)
2614
2614
2615 if reverse:
2615 if reverse:
2616 ctxleft = ctx2
2616 ctxleft = ctx2
@@ -2753,7 +2753,7 b' def export(ui, repo, *changesets, **opts'
2753 changesets = [b'.']
2753 changesets = [b'.']
2754
2754
2755 repo = scmutil.unhidehashlikerevs(repo, changesets, b'nowarn')
2755 repo = scmutil.unhidehashlikerevs(repo, changesets, b'nowarn')
2756 revs = scmutil.revrange(repo, changesets)
2756 revs = logcmdutil.revrange(repo, changesets)
2757
2757
2758 if not revs:
2758 if not revs:
2759 raise error.InputError(_(b"export requires at least one changeset"))
2759 raise error.InputError(_(b"export requires at least one changeset"))
@@ -2864,7 +2864,7 b' def files(ui, repo, *pats, **opts):'
2864 rev = opts.get(b'rev')
2864 rev = opts.get(b'rev')
2865 if rev:
2865 if rev:
2866 repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
2866 repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
2867 ctx = scmutil.revsingle(repo, rev, None)
2867 ctx = logcmdutil.revsingle(repo, rev, None)
2868
2868
2869 end = b'\n'
2869 end = b'\n'
2870 if opts.get(b'print0'):
2870 if opts.get(b'print0'):
@@ -3170,12 +3170,12 b' def _dograft(ui, repo, *revs, **opts):'
3170 raise error.InputError(_(b'no revisions specified'))
3170 raise error.InputError(_(b'no revisions specified'))
3171 cmdutil.checkunfinished(repo)
3171 cmdutil.checkunfinished(repo)
3172 cmdutil.bailifchanged(repo)
3172 cmdutil.bailifchanged(repo)
3173 revs = scmutil.revrange(repo, revs)
3173 revs = logcmdutil.revrange(repo, revs)
3174
3174
3175 skipped = set()
3175 skipped = set()
3176 basectx = None
3176 basectx = None
3177 if opts.get('base'):
3177 if opts.get('base'):
3178 basectx = scmutil.revsingle(repo, opts['base'], None)
3178 basectx = logcmdutil.revsingle(repo, opts['base'], None)
3179 if basectx is None:
3179 if basectx is None:
3180 # check for merges
3180 # check for merges
3181 for rev in repo.revs(b'%ld and merge()', revs):
3181 for rev in repo.revs(b'%ld and merge()', revs):
@@ -3696,7 +3696,7 b' def heads(ui, repo, *branchrevs, **opts)'
3696 rev = opts.get(b'rev')
3696 rev = opts.get(b'rev')
3697 if rev:
3697 if rev:
3698 repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
3698 repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
3699 start = scmutil.revsingle(repo, rev, None).node()
3699 start = logcmdutil.revsingle(repo, rev, None).node()
3700
3700
3701 if opts.get(b'topo'):
3701 if opts.get(b'topo'):
3702 heads = [repo[h] for h in repo.heads(start)]
3702 heads = [repo[h] for h in repo.heads(start)]
@@ -3708,7 +3708,7 b' def heads(ui, repo, *branchrevs, **opts)'
3708
3708
3709 if branchrevs:
3709 if branchrevs:
3710 branches = {
3710 branches = {
3711 repo[r].branch() for r in scmutil.revrange(repo, branchrevs)
3711 repo[r].branch() for r in logcmdutil.revrange(repo, branchrevs)
3712 }
3712 }
3713 heads = [h for h in heads if h.branch() in branches]
3713 heads = [h for h in heads if h.branch() in branches]
3714
3714
@@ -3932,7 +3932,7 b' def identify('
3932 else:
3932 else:
3933 if rev:
3933 if rev:
3934 repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
3934 repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
3935 ctx = scmutil.revsingle(repo, rev, None)
3935 ctx = logcmdutil.revsingle(repo, rev, None)
3936
3936
3937 if ctx.rev() is None:
3937 if ctx.rev() is None:
3938 ctx = repo[None]
3938 ctx = repo[None]
@@ -4346,8 +4346,11 b' def incoming(ui, repo, source=b"default"'
4346 cmdutil.check_incompatible_arguments(opts, b'subrepos', [b'bundle'])
4346 cmdutil.check_incompatible_arguments(opts, b'subrepos', [b'bundle'])
4347
4347
4348 if opts.get(b'bookmarks'):
4348 if opts.get(b'bookmarks'):
4349 srcs = urlutil.get_pull_paths(repo, ui, [source], opts.get(b'branch'))
4349 srcs = urlutil.get_pull_paths(repo, ui, [source])
4350 for source, branches in srcs:
4350 for path in srcs:
4351 source, branches = urlutil.parseurl(
4352 path.rawloc, opts.get(b'branch')
4353 )
4351 other = hg.peer(repo, opts, source)
4354 other = hg.peer(repo, opts, source)
4352 try:
4355 try:
4353 if b'bookmarks' not in other.listkeys(b'namespaces'):
4356 if b'bookmarks' not in other.listkeys(b'namespaces'):
@@ -4357,7 +4360,9 b' def incoming(ui, repo, source=b"default"'
4357 ui.status(
4360 ui.status(
4358 _(b'comparing with %s\n') % urlutil.hidepassword(source)
4361 _(b'comparing with %s\n') % urlutil.hidepassword(source)
4359 )
4362 )
4360 return bookmarks.incoming(ui, repo, other)
4363 return bookmarks.incoming(
4364 ui, repo, other, mode=path.bookmarks_mode
4365 )
4361 finally:
4366 finally:
4362 other.close()
4367 other.close()
4363
4368
@@ -4445,7 +4450,7 b' def locate(ui, repo, *pats, **opts):'
4445 end = b'\0'
4450 end = b'\0'
4446 else:
4451 else:
4447 end = b'\n'
4452 end = b'\n'
4448 ctx = scmutil.revsingle(repo, opts.get(b'rev'), None)
4453 ctx = logcmdutil.revsingle(repo, opts.get(b'rev'), None)
4449
4454
4450 ret = 1
4455 ret = 1
4451 m = scmutil.match(
4456 m = scmutil.match(
@@ -4790,7 +4795,7 b' def manifest(ui, repo, node=None, rev=No'
4790 mode = {b'l': b'644', b'x': b'755', b'': b'644', b't': b'755'}
4795 mode = {b'l': b'644', b'x': b'755', b'': b'644', b't': b'755'}
4791 if node:
4796 if node:
4792 repo = scmutil.unhidehashlikerevs(repo, [node], b'nowarn')
4797 repo = scmutil.unhidehashlikerevs(repo, [node], b'nowarn')
4793 ctx = scmutil.revsingle(repo, node)
4798 ctx = logcmdutil.revsingle(repo, node)
4794 mf = ctx.manifest()
4799 mf = ctx.manifest()
4795 ui.pager(b'manifest')
4800 ui.pager(b'manifest')
4796 for f in ctx:
4801 for f in ctx:
@@ -4877,7 +4882,7 b' def merge(ui, repo, node=None, **opts):'
4877 node = opts.get(b'rev')
4882 node = opts.get(b'rev')
4878
4883
4879 if node:
4884 if node:
4880 ctx = scmutil.revsingle(repo, node)
4885 ctx = logcmdutil.revsingle(repo, node)
4881 else:
4886 else:
4882 if ui.configbool(b'commands', b'merge.require-rev'):
4887 if ui.configbool(b'commands', b'merge.require-rev'):
4883 raise error.InputError(
4888 raise error.InputError(
@@ -5056,7 +5061,7 b' def parents(ui, repo, file_=None, **opts'
5056 rev = opts.get(b'rev')
5061 rev = opts.get(b'rev')
5057 if rev:
5062 if rev:
5058 repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
5063 repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
5059 ctx = scmutil.revsingle(repo, rev, None)
5064 ctx = logcmdutil.revsingle(repo, rev, None)
5060
5065
5061 if file_:
5066 if file_:
5062 m = scmutil.match(ctx, (file_,), opts)
5067 m = scmutil.match(ctx, (file_,), opts)
@@ -5219,13 +5224,13 b' def phase(ui, repo, *revs, **opts):'
5219 # look for specified revision
5224 # look for specified revision
5220 revs = list(revs)
5225 revs = list(revs)
5221 revs.extend(opts[b'rev'])
5226 revs.extend(opts[b'rev'])
5222 if not revs:
5227 if revs:
5228 revs = logcmdutil.revrange(repo, revs)
5229 else:
5223 # display both parents as the second parent phase can influence
5230 # display both parents as the second parent phase can influence
5224 # the phase of a merge commit
5231 # the phase of a merge commit
5225 revs = [c.rev() for c in repo[None].parents()]
5232 revs = [c.rev() for c in repo[None].parents()]
5226
5233
5227 revs = scmutil.revrange(repo, revs)
5228
5229 ret = 0
5234 ret = 0
5230 if targetphase is None:
5235 if targetphase is None:
5231 # display
5236 # display
@@ -5393,8 +5398,8 b' def pull(ui, repo, *sources, **opts):'
5393 hint = _(b'use hg pull followed by hg update DEST')
5398 hint = _(b'use hg pull followed by hg update DEST')
5394 raise error.InputError(msg, hint=hint)
5399 raise error.InputError(msg, hint=hint)
5395
5400
5396 sources = urlutil.get_pull_paths(repo, ui, sources, opts.get(b'branch'))
5401 for path in urlutil.get_pull_paths(repo, ui, sources):
5397 for source, branches in sources:
5402 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
5398 ui.status(_(b'pulling from %s\n') % urlutil.hidepassword(source))
5403 ui.status(_(b'pulling from %s\n') % urlutil.hidepassword(source))
5399 ui.flush()
5404 ui.flush()
5400 other = hg.peer(repo, opts, source)
5405 other = hg.peer(repo, opts, source)
@@ -5451,6 +5456,7 b' def pull(ui, repo, *sources, **opts):'
5451 modheads = exchange.pull(
5456 modheads = exchange.pull(
5452 repo,
5457 repo,
5453 other,
5458 other,
5459 path=path,
5454 heads=nodes,
5460 heads=nodes,
5455 force=opts.get(b'force'),
5461 force=opts.get(b'force'),
5456 bookmarks=opts.get(b'bookmark', ()),
5462 bookmarks=opts.get(b'bookmark', ()),
@@ -5735,7 +5741,7 b' def push(ui, repo, *dests, **opts):'
5735
5741
5736 try:
5742 try:
5737 if revs:
5743 if revs:
5738 revs = [repo[r].node() for r in scmutil.revrange(repo, revs)]
5744 revs = [repo[r].node() for r in logcmdutil.revrange(repo, revs)]
5739 if not revs:
5745 if not revs:
5740 raise error.InputError(
5746 raise error.InputError(
5741 _(b"specified revisions evaluate to an empty set"),
5747 _(b"specified revisions evaluate to an empty set"),
@@ -6347,7 +6353,7 b' def revert(ui, repo, *pats, **opts):'
6347 rev = opts.get(b'rev')
6353 rev = opts.get(b'rev')
6348 if rev:
6354 if rev:
6349 repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
6355 repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
6350 ctx = scmutil.revsingle(repo, rev)
6356 ctx = logcmdutil.revsingle(repo, rev)
6351
6357
6352 if not (
6358 if not (
6353 pats
6359 pats
@@ -6905,11 +6911,11 b' def status(ui, repo, *pats, **opts):'
6905 raise error.InputError(msg)
6911 raise error.InputError(msg)
6906 elif change:
6912 elif change:
6907 repo = scmutil.unhidehashlikerevs(repo, [change], b'nowarn')
6913 repo = scmutil.unhidehashlikerevs(repo, [change], b'nowarn')
6908 ctx2 = scmutil.revsingle(repo, change, None)
6914 ctx2 = logcmdutil.revsingle(repo, change, None)
6909 ctx1 = ctx2.p1()
6915 ctx1 = ctx2.p1()
6910 else:
6916 else:
6911 repo = scmutil.unhidehashlikerevs(repo, revs, b'nowarn')
6917 repo = scmutil.unhidehashlikerevs(repo, revs, b'nowarn')
6912 ctx1, ctx2 = scmutil.revpair(repo, revs)
6918 ctx1, ctx2 = logcmdutil.revpair(repo, revs)
6913
6919
6914 forcerelativevalue = None
6920 forcerelativevalue = None
6915 if ui.hasconfig(b'commands', b'status.relative'):
6921 if ui.hasconfig(b'commands', b'status.relative'):
@@ -7453,7 +7459,7 b' def tag(ui, repo, name1, *names, **opts)'
7453 b'(use -f to force)'
7459 b'(use -f to force)'
7454 )
7460 )
7455 )
7461 )
7456 node = scmutil.revsingle(repo, rev_).node()
7462 node = logcmdutil.revsingle(repo, rev_).node()
7457
7463
7458 if not message:
7464 if not message:
7459 # we don't translate commit messages
7465 # we don't translate commit messages
@@ -7477,7 +7483,7 b' def tag(ui, repo, name1, *names, **opts)'
7477 # don't allow tagging the null rev
7483 # don't allow tagging the null rev
7478 if (
7484 if (
7479 not opts.get(b'remove')
7485 not opts.get(b'remove')
7480 and scmutil.revsingle(repo, rev_).rev() == nullrev
7486 and logcmdutil.revsingle(repo, rev_).rev() == nullrev
7481 ):
7487 ):
7482 raise error.InputError(_(b"cannot tag null revision"))
7488 raise error.InputError(_(b"cannot tag null revision"))
7483
7489
@@ -7840,7 +7846,7 b' def update(ui, repo, node=None, **opts):'
7840 brev = rev
7846 brev = rev
7841 if rev:
7847 if rev:
7842 repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
7848 repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
7843 ctx = scmutil.revsingle(repo, rev, default=None)
7849 ctx = logcmdutil.revsingle(repo, rev, default=None)
7844 rev = ctx.rev()
7850 rev = ctx.rev()
7845 hidden = ctx.hidden()
7851 hidden = ctx.hidden()
7846 overrides = {(b'ui', b'forcemerge'): opts.get('tool', b'')}
7852 overrides = {(b'ui', b'forcemerge'): opts.get('tool', b'')}
@@ -1,5 +1,5 b''
1 #ifndef _HG_COMPAT_H_
1 #ifndef HG_COMPAT_H
2 #define _HG_COMPAT_H_
2 #define HG_COMPAT_H
3
3
4 #ifdef _WIN32
4 #ifdef _WIN32
5 #ifdef _MSC_VER
5 #ifdef _MSC_VER
@@ -959,11 +959,6 b' coreconfigitem('
959 )
959 )
960 coreconfigitem(
960 coreconfigitem(
961 b'experimental',
961 b'experimental',
962 b'dirstate-tree.in-memory',
963 default=False,
964 )
965 coreconfigitem(
966 b'experimental',
967 b'editortmpinhg',
962 b'editortmpinhg',
968 default=False,
963 default=False,
969 )
964 )
@@ -1266,6 +1261,11 b' coreconfigitem('
1266 )
1261 )
1267 coreconfigitem(
1262 coreconfigitem(
1268 b'experimental',
1263 b'experimental',
1264 b'web.full-garbage-collection-rate',
1265 default=1, # still forcing a full collection on each request
1266 )
1267 coreconfigitem(
1268 b'experimental',
1269 b'worker.wdir-get-thread-safe',
1269 b'worker.wdir-get-thread-safe',
1270 default=False,
1270 default=False,
1271 )
1271 )
@@ -1306,7 +1306,7 b' coreconfigitem('
1306 # Enable this dirstate format *when creating a new repository*.
1306 # Enable this dirstate format *when creating a new repository*.
1307 # Which format to use for existing repos is controlled by .hg/requires
1307 # Which format to use for existing repos is controlled by .hg/requires
1308 b'format',
1308 b'format',
1309 b'exp-dirstate-v2',
1309 b'exp-rc-dirstate-v2',
1310 default=False,
1310 default=False,
1311 experimental=True,
1311 experimental=True,
1312 )
1312 )
@@ -1880,6 +1880,13 b' coreconfigitem('
1880 default=b'skip',
1880 default=b'skip',
1881 experimental=True,
1881 experimental=True,
1882 )
1882 )
1883 # experimental as long as format.exp-rc-dirstate-v2 is.
1884 coreconfigitem(
1885 b'storage',
1886 b'dirstate-v2.slow-path',
1887 default=b"abort",
1888 experimental=True,
1889 )
1883 coreconfigitem(
1890 coreconfigitem(
1884 b'storage',
1891 b'storage',
1885 b'new-repo-backend',
1892 b'new-repo-backend',
@@ -1551,11 +1551,11 b' class workingctx(committablectx):'
1551 def __iter__(self):
1551 def __iter__(self):
1552 d = self._repo.dirstate
1552 d = self._repo.dirstate
1553 for f in d:
1553 for f in d:
1554 if d[f] != b'r':
1554 if d.get_entry(f).tracked:
1555 yield f
1555 yield f
1556
1556
1557 def __contains__(self, key):
1557 def __contains__(self, key):
1558 return self._repo.dirstate[key] not in b"?r"
1558 return self._repo.dirstate.get_entry(key).tracked
1559
1559
1560 def hex(self):
1560 def hex(self):
1561 return self._repo.nodeconstants.wdirhex
1561 return self._repo.nodeconstants.wdirhex
@@ -2017,7 +2017,7 b' class workingctx(committablectx):'
2017 def matches(self, match):
2017 def matches(self, match):
2018 match = self._repo.narrowmatch(match)
2018 match = self._repo.narrowmatch(match)
2019 ds = self._repo.dirstate
2019 ds = self._repo.dirstate
2020 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
2020 return sorted(f for f in ds.matches(match) if ds.get_entry(f).tracked)
2021
2021
2022 def markcommitted(self, node):
2022 def markcommitted(self, node):
2023 with self._repo.dirstate.parentchange():
2023 with self._repo.dirstate.parentchange():
@@ -94,7 +94,7 b' def _dirstatecopies(repo, match=None):'
94 ds = repo.dirstate
94 ds = repo.dirstate
95 c = ds.copies().copy()
95 c = ds.copies().copy()
96 for k in list(c):
96 for k in list(c):
97 if ds[k] not in b'anm' or (match and not match(k)):
97 if not ds.get_entry(k).tracked or (match and not match(k)):
98 del c[k]
98 del c[k]
99 return c
99 return c
100
100
@@ -506,7 +506,7 b' def debugcapabilities(ui, path, **opts):'
506 )
506 )
507 def debugchangedfiles(ui, repo, rev, **opts):
507 def debugchangedfiles(ui, repo, rev, **opts):
508 """list the stored files changes for a revision"""
508 """list the stored files changes for a revision"""
509 ctx = scmutil.revsingle(repo, rev, None)
509 ctx = logcmdutil.revsingle(repo, rev, None)
510 files = None
510 files = None
511
511
512 if opts['compute']:
512 if opts['compute']:
@@ -550,24 +550,9 b' def debugcheckstate(ui, repo):'
550 m1 = repo[parent1].manifest()
550 m1 = repo[parent1].manifest()
551 m2 = repo[parent2].manifest()
551 m2 = repo[parent2].manifest()
552 errors = 0
552 errors = 0
553 for f in repo.dirstate:
553 for err in repo.dirstate.verify(m1, m2):
554 state = repo.dirstate[f]
554 ui.warn(err[0] % err[1:])
555 if state in b"nr" and f not in m1:
555 errors += 1
556 ui.warn(_(b"%s in state %s, but not in manifest1\n") % (f, state))
557 errors += 1
558 if state in b"a" and f in m1:
559 ui.warn(_(b"%s in state %s, but also in manifest1\n") % (f, state))
560 errors += 1
561 if state in b"m" and f not in m1 and f not in m2:
562 ui.warn(
563 _(b"%s in state %s, but not in either manifest\n") % (f, state)
564 )
565 errors += 1
566 for f in m1:
567 state = repo.dirstate[f]
568 if state not in b"nrm":
569 ui.warn(_(b"%s in manifest1, but listed as state %s") % (f, state))
570 errors += 1
571 if errors:
556 if errors:
572 errstr = _(b".hg/dirstate inconsistent with current parent's manifest")
557 errstr = _(b".hg/dirstate inconsistent with current parent's manifest")
573 raise error.Abort(errstr)
558 raise error.Abort(errstr)
@@ -962,35 +947,29 b' def debugstate(ui, repo, **opts):'
962 datesort = opts.get('datesort')
947 datesort = opts.get('datesort')
963
948
964 if datesort:
949 if datesort:
965 keyfunc = lambda x: (
950
966 x[1].v1_mtime(),
951 def keyfunc(entry):
967 x[0],
952 filename, _state, _mode, _size, mtime = entry
968 ) # sort by mtime, then by filename
953 return (mtime, filename)
954
969 else:
955 else:
970 keyfunc = None # sort by filename
956 keyfunc = None # sort by filename
971 if opts['all']:
957 entries = list(repo.dirstate._map.debug_iter(all=opts['all']))
972 entries = list(repo.dirstate._map.debug_iter())
973 else:
974 entries = list(pycompat.iteritems(repo.dirstate))
975 entries.sort(key=keyfunc)
958 entries.sort(key=keyfunc)
976 for file_, ent in entries:
959 for entry in entries:
977 if ent.v1_mtime() == -1:
960 filename, state, mode, size, mtime = entry
961 if mtime == -1:
978 timestr = b'unset '
962 timestr = b'unset '
979 elif nodates:
963 elif nodates:
980 timestr = b'set '
964 timestr = b'set '
981 else:
965 else:
982 timestr = time.strftime(
966 timestr = time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime(mtime))
983 "%Y-%m-%d %H:%M:%S ", time.localtime(ent.v1_mtime())
984 )
985 timestr = encoding.strtolocal(timestr)
967 timestr = encoding.strtolocal(timestr)
986 if ent.mode & 0o20000:
968 if mode & 0o20000:
987 mode = b'lnk'
969 mode = b'lnk'
988 else:
970 else:
989 mode = b'%3o' % (ent.v1_mode() & 0o777 & ~util.umask)
971 mode = b'%3o' % (mode & 0o777 & ~util.umask)
990 ui.write(
972 ui.write(b"%c %s %10d %s%s\n" % (state, mode, size, timestr, filename))
991 b"%c %s %10d %s%s\n"
992 % (ent.v1_state(), mode, ent.v1_size(), timestr, file_)
993 )
994 for f in repo.dirstate.copies():
973 for f in repo.dirstate.copies():
995 ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
974 ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
996
975
@@ -1103,7 +1082,7 b' def debugdiscovery(ui, repo, remoteurl=b'
1103 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(remoteurl))
1082 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(remoteurl))
1104 else:
1083 else:
1105 branches = (None, [])
1084 branches = (None, [])
1106 remote_filtered_revs = scmutil.revrange(
1085 remote_filtered_revs = logcmdutil.revrange(
1107 unfi, [b"not (::(%s))" % remote_revs]
1086 unfi, [b"not (::(%s))" % remote_revs]
1108 )
1087 )
1109 remote_filtered_revs = frozenset(remote_filtered_revs)
1088 remote_filtered_revs = frozenset(remote_filtered_revs)
@@ -1117,7 +1096,7 b' def debugdiscovery(ui, repo, remoteurl=b'
1117 remote._repo = remote._repo.filtered(b'debug-discovery-remote-filter')
1096 remote._repo = remote._repo.filtered(b'debug-discovery-remote-filter')
1118
1097
1119 if local_revs:
1098 if local_revs:
1120 local_filtered_revs = scmutil.revrange(
1099 local_filtered_revs = logcmdutil.revrange(
1121 unfi, [b"not (::(%s))" % local_revs]
1100 unfi, [b"not (::(%s))" % local_revs]
1122 )
1101 )
1123 local_filtered_revs = frozenset(local_filtered_revs)
1102 local_filtered_revs = frozenset(local_filtered_revs)
@@ -1155,7 +1134,7 b' def debugdiscovery(ui, repo, remoteurl=b'
1155 def doit(pushedrevs, remoteheads, remote=remote):
1134 def doit(pushedrevs, remoteheads, remote=remote):
1156 nodes = None
1135 nodes = None
1157 if pushedrevs:
1136 if pushedrevs:
1158 revs = scmutil.revrange(repo, pushedrevs)
1137 revs = logcmdutil.revrange(repo, pushedrevs)
1159 nodes = [repo[r].node() for r in revs]
1138 nodes = [repo[r].node() for r in revs]
1160 common, any, hds = setdiscovery.findcommonheads(
1139 common, any, hds = setdiscovery.findcommonheads(
1161 ui, repo, remote, ancestorsof=nodes, audit=data
1140 ui, repo, remote, ancestorsof=nodes, audit=data
@@ -1394,7 +1373,7 b' def debugfileset(ui, repo, expr, **opts)'
1394
1373
1395 fileset.symbols # force import of fileset so we have predicates to optimize
1374 fileset.symbols # force import of fileset so we have predicates to optimize
1396 opts = pycompat.byteskwargs(opts)
1375 opts = pycompat.byteskwargs(opts)
1397 ctx = scmutil.revsingle(repo, opts.get(b'rev'), None)
1376 ctx = logcmdutil.revsingle(repo, opts.get(b'rev'), None)
1398
1377
1399 stages = [
1378 stages = [
1400 (b'parsed', pycompat.identity),
1379 (b'parsed', pycompat.identity),
@@ -1495,8 +1474,8 b' def debug_repair_issue6528(ui, repo, **o'
1495 filename.
1474 filename.
1496
1475
1497 Note that this does *not* mean that this repairs future affected revisions,
1476 Note that this does *not* mean that this repairs future affected revisions,
1498 that needs a separate fix at the exchange level that hasn't been written yet
1477 that needs a separate fix at the exchange level that was introduced in
1499 (as of 5.9rc0).
1478 Mercurial 5.9.1.
1500
1479
1501 There is a `--paranoid` flag to test that the fast implementation is correct
1480 There is a `--paranoid` flag to test that the fast implementation is correct
1502 by checking it against the slow implementation. Since this matter is quite
1481 by checking it against the slow implementation. Since this matter is quite
@@ -2614,7 +2593,7 b' def debugobsolete(ui, repo, precursor=No'
2614 l.release()
2593 l.release()
2615 else:
2594 else:
2616 if opts[b'rev']:
2595 if opts[b'rev']:
2617 revs = scmutil.revrange(repo, opts[b'rev'])
2596 revs = logcmdutil.revrange(repo, opts[b'rev'])
2618 nodes = [repo[r].node() for r in revs]
2597 nodes = [repo[r].node() for r in revs]
2619 markers = list(
2598 markers = list(
2620 obsutil.getmarkers(
2599 obsutil.getmarkers(
@@ -2981,16 +2960,28 b' def debugrebuilddirstate(ui, repo, rev, '
2981 dirstatefiles = set(dirstate)
2960 dirstatefiles = set(dirstate)
2982 manifestonly = manifestfiles - dirstatefiles
2961 manifestonly = manifestfiles - dirstatefiles
2983 dsonly = dirstatefiles - manifestfiles
2962 dsonly = dirstatefiles - manifestfiles
2984 dsnotadded = {f for f in dsonly if dirstate[f] != b'a'}
2963 dsnotadded = {f for f in dsonly if not dirstate.get_entry(f).added}
2985 changedfiles = manifestonly | dsnotadded
2964 changedfiles = manifestonly | dsnotadded
2986
2965
2987 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
2966 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
2988
2967
2989
2968
2990 @command(b'debugrebuildfncache', [], b'')
2969 @command(
2991 def debugrebuildfncache(ui, repo):
2970 b'debugrebuildfncache',
2971 [
2972 (
2973 b'',
2974 b'only-data',
2975 False,
2976 _(b'only look for wrong .d files (much faster)'),
2977 )
2978 ],
2979 b'',
2980 )
2981 def debugrebuildfncache(ui, repo, **opts):
2992 """rebuild the fncache file"""
2982 """rebuild the fncache file"""
2993 repair.rebuildfncache(ui, repo)
2983 opts = pycompat.byteskwargs(opts)
2984 repair.rebuildfncache(ui, repo, opts.get(b"only_data"))
2994
2985
2995
2986
2996 @command(
2987 @command(
@@ -4018,7 +4009,7 b' def debugsuccessorssets(ui, repo, *revs,'
4018 cache = {}
4009 cache = {}
4019 ctx2str = bytes
4010 ctx2str = bytes
4020 node2str = short
4011 node2str = short
4021 for rev in scmutil.revrange(repo, revs):
4012 for rev in logcmdutil.revrange(repo, revs):
4022 ctx = repo[rev]
4013 ctx = repo[rev]
4023 ui.write(b'%s\n' % ctx2str(ctx))
4014 ui.write(b'%s\n' % ctx2str(ctx))
4024 for succsset in obsutil.successorssets(
4015 for succsset in obsutil.successorssets(
@@ -4077,7 +4068,7 b' def debugtemplate(ui, repo, tmpl, **opts'
4077 raise error.RepoError(
4068 raise error.RepoError(
4078 _(b'there is no Mercurial repository here (.hg not found)')
4069 _(b'there is no Mercurial repository here (.hg not found)')
4079 )
4070 )
4080 revs = scmutil.revrange(repo, opts['rev'])
4071 revs = logcmdutil.revrange(repo, opts['rev'])
4081
4072
4082 props = {}
4073 props = {}
4083 for d in opts['define']:
4074 for d in opts['define']:
This diff has been collapsed as it changes many lines, (603 lines changed) Show them Hide them
@@ -31,6 +31,10 b' from . import ('
31 util,
31 util,
32 )
32 )
33
33
34 from .dirstateutils import (
35 timestamp,
36 )
37
34 from .interfaces import (
38 from .interfaces import (
35 dirstate as intdirstate,
39 dirstate as intdirstate,
36 util as interfaceutil,
40 util as interfaceutil,
@@ -39,13 +43,13 b' from .interfaces import ('
39 parsers = policy.importmod('parsers')
43 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
44 rustmod = policy.importrust('dirstate')
41
45
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
46 HAS_FAST_DIRSTATE_V2 = rustmod is not None
43
47
44 propertycache = util.propertycache
48 propertycache = util.propertycache
45 filecache = scmutil.filecache
49 filecache = scmutil.filecache
46 _rangemask = dirstatemap.rangemask
50 _rangemask = dirstatemap.rangemask
47
51
48 DirstateItem = parsers.DirstateItem
52 DirstateItem = dirstatemap.DirstateItem
49
53
50
54
51 class repocache(filecache):
55 class repocache(filecache):
@@ -66,7 +70,7 b' def _getfsnow(vfs):'
66 '''Get "now" timestamp on filesystem'''
70 '''Get "now" timestamp on filesystem'''
67 tmpfd, tmpname = vfs.mkstemp()
71 tmpfd, tmpname = vfs.mkstemp()
68 try:
72 try:
69 return os.fstat(tmpfd)[stat.ST_MTIME]
73 return timestamp.mtime_of(os.fstat(tmpfd))
70 finally:
74 finally:
71 os.close(tmpfd)
75 os.close(tmpfd)
72 vfs.unlink(tmpname)
76 vfs.unlink(tmpname)
@@ -122,7 +126,7 b' class dirstate(object):'
122 # UNC path pointing to root share (issue4557)
126 # UNC path pointing to root share (issue4557)
123 self._rootdir = pathutil.normasprefix(root)
127 self._rootdir = pathutil.normasprefix(root)
124 self._dirty = False
128 self._dirty = False
125 self._lastnormaltime = 0
129 self._lastnormaltime = timestamp.zero()
126 self._ui = ui
130 self._ui = ui
127 self._filecache = {}
131 self._filecache = {}
128 self._parentwriters = 0
132 self._parentwriters = 0
@@ -130,7 +134,6 b' class dirstate(object):'
130 self._pendingfilename = b'%s.pending' % self._filename
134 self._pendingfilename = b'%s.pending' % self._filename
131 self._plchangecallbacks = {}
135 self._plchangecallbacks = {}
132 self._origpl = None
136 self._origpl = None
133 self._updatedfiles = set()
134 self._mapcls = dirstatemap.dirstatemap
137 self._mapcls = dirstatemap.dirstatemap
135 # Access and cache cwd early, so we don't access it for the first time
138 # Access and cache cwd early, so we don't access it for the first time
136 # after a working-copy update caused it to not exist (accessing it then
139 # after a working-copy update caused it to not exist (accessing it then
@@ -239,44 +242,59 b' class dirstate(object):'
239 return self._rootdir + f
242 return self._rootdir + f
240
243
241 def flagfunc(self, buildfallback):
244 def flagfunc(self, buildfallback):
242 if self._checklink and self._checkexec:
245 """build a callable that returns flags associated with a filename
246
247 The information is extracted from three possible layers:
248 1. the file system if it supports the information
249 2. the "fallback" information stored in the dirstate if any
250 3. a more expensive mechanism inferring the flags from the parents.
251 """
243
252
244 def f(x):
253 # small hack to cache the result of buildfallback()
245 try:
254 fallback_func = []
246 st = os.lstat(self._join(x))
255
247 if util.statislink(st):
256 def get_flags(x):
248 return b'l'
257 entry = None
249 if util.statisexec(st):
258 fallback_value = None
250 return b'x'
259 try:
251 except OSError:
260 st = os.lstat(self._join(x))
252 pass
261 except OSError:
253 return b''
262 return b''
254
263
255 return f
264 if self._checklink:
256
265 if util.statislink(st):
257 fallback = buildfallback()
258 if self._checklink:
259
260 def f(x):
261 if os.path.islink(self._join(x)):
262 return b'l'
266 return b'l'
263 if b'x' in fallback(x):
267 else:
264 return b'x'
268 entry = self.get_entry(x)
265 return b''
269 if entry.has_fallback_symlink:
270 if entry.fallback_symlink:
271 return b'l'
272 else:
273 if not fallback_func:
274 fallback_func.append(buildfallback())
275 fallback_value = fallback_func[0](x)
276 if b'l' in fallback_value:
277 return b'l'
266
278
267 return f
279 if self._checkexec:
268 if self._checkexec:
280 if util.statisexec(st):
281 return b'x'
282 else:
283 if entry is None:
284 entry = self.get_entry(x)
285 if entry.has_fallback_exec:
286 if entry.fallback_exec:
287 return b'x'
288 else:
289 if fallback_value is None:
290 if not fallback_func:
291 fallback_func.append(buildfallback())
292 fallback_value = fallback_func[0](x)
293 if b'x' in fallback_value:
294 return b'x'
295 return b''
269
296
270 def f(x):
297 return get_flags
271 if b'l' in fallback(x):
272 return b'l'
273 if util.isexec(self._join(x)):
274 return b'x'
275 return b''
276
277 return f
278 else:
279 return fallback
280
298
281 @propertycache
299 @propertycache
282 def _cwd(self):
300 def _cwd(self):
@@ -328,11 +346,20 b' class dirstate(object):'
328 consider migrating all user of this to going through the dirstate entry
346 consider migrating all user of this to going through the dirstate entry
329 instead.
347 instead.
330 """
348 """
349 msg = b"don't use dirstate[file], use dirstate.get_entry(file)"
350 util.nouideprecwarn(msg, b'6.1', stacklevel=2)
331 entry = self._map.get(key)
351 entry = self._map.get(key)
332 if entry is not None:
352 if entry is not None:
333 return entry.state
353 return entry.state
334 return b'?'
354 return b'?'
335
355
356 def get_entry(self, path):
357 """return a DirstateItem for the associated path"""
358 entry = self._map.get(path)
359 if entry is None:
360 return DirstateItem()
361 return entry
362
336 def __contains__(self, key):
363 def __contains__(self, key):
337 return key in self._map
364 return key in self._map
338
365
@@ -344,9 +371,6 b' class dirstate(object):'
344
371
345 iteritems = items
372 iteritems = items
346
373
347 def directories(self):
348 return self._map.directories()
349
350 def parents(self):
374 def parents(self):
351 return [self._validate(p) for p in self._pl]
375 return [self._validate(p) for p in self._pl]
352
376
@@ -385,32 +409,10 b' class dirstate(object):'
385 oldp2 = self._pl[1]
409 oldp2 = self._pl[1]
386 if self._origpl is None:
410 if self._origpl is None:
387 self._origpl = self._pl
411 self._origpl = self._pl
388 self._map.setparents(p1, p2)
412 nullid = self._nodeconstants.nullid
389 copies = {}
413 # True if we need to fold p2 related state back to a linear case
390 if (
414 fold_p2 = oldp2 != nullid and p2 == nullid
391 oldp2 != self._nodeconstants.nullid
415 return self._map.setparents(p1, p2, fold_p2=fold_p2)
392 and p2 == self._nodeconstants.nullid
393 ):
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
395
396 for f in candidatefiles:
397 s = self._map.get(f)
398 if s is None:
399 continue
400
401 # Discard "merged" markers when moving away from a merge state
402 if s.merged:
403 source = self._map.copymap.get(f)
404 if source:
405 copies[f] = source
406 self._normallookup(f)
407 # Also fix up otherparent markers
408 elif s.from_p2:
409 source = self._map.copymap.get(f)
410 if source:
411 copies[f] = source
412 self._add(f)
413 return copies
414
416
415 def setbranch(self, branch):
417 def setbranch(self, branch):
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
418 self.__class__._branch.set(self, encoding.fromlocal(branch))
@@ -438,9 +440,8 b' class dirstate(object):'
438 for a in ("_map", "_branch", "_ignore"):
440 for a in ("_map", "_branch", "_ignore"):
439 if a in self.__dict__:
441 if a in self.__dict__:
440 delattr(self, a)
442 delattr(self, a)
441 self._lastnormaltime = 0
443 self._lastnormaltime = timestamp.zero()
442 self._dirty = False
444 self._dirty = False
443 self._updatedfiles.clear()
444 self._parentwriters = 0
445 self._parentwriters = 0
445 self._origpl = None
446 self._origpl = None
446
447
@@ -451,10 +452,8 b' class dirstate(object):'
451 self._dirty = True
452 self._dirty = True
452 if source is not None:
453 if source is not None:
453 self._map.copymap[dest] = source
454 self._map.copymap[dest] = source
454 self._updatedfiles.add(source)
455 else:
455 self._updatedfiles.add(dest)
456 self._map.copymap.pop(dest, None)
456 elif self._map.copymap.pop(dest, None):
457 self._updatedfiles.add(dest)
458
457
459 def copied(self, file):
458 def copied(self, file):
460 return self._map.copymap.get(file, None)
459 return self._map.copymap.get(file, None)
@@ -471,18 +470,11 b' class dirstate(object):'
471
470
472 return True the file was previously untracked, False otherwise.
471 return True the file was previously untracked, False otherwise.
473 """
472 """
473 self._dirty = True
474 entry = self._map.get(filename)
474 entry = self._map.get(filename)
475 if entry is None:
475 if entry is None or not entry.tracked:
476 self._add(filename)
476 self._check_new_tracked_filename(filename)
477 return True
477 return self._map.set_tracked(filename)
478 elif not entry.tracked:
479 self._normallookup(filename)
480 return True
481 # XXX This is probably overkill for more case, but we need this to
482 # fully replace the `normallookup` call with `set_tracked` one.
483 # Consider smoothing this in the future.
484 self.set_possibly_dirty(filename)
485 return False
486
478
487 @requires_no_parents_change
479 @requires_no_parents_change
488 def set_untracked(self, filename):
480 def set_untracked(self, filename):
@@ -493,28 +485,32 b' class dirstate(object):'
493
485
494 return True the file was previously tracked, False otherwise.
486 return True the file was previously tracked, False otherwise.
495 """
487 """
496 entry = self._map.get(filename)
488 ret = self._map.set_untracked(filename)
497 if entry is None:
489 if ret:
498 return False
490 self._dirty = True
499 elif entry.added:
491 return ret
500 self._drop(filename)
501 return True
502 else:
503 self._remove(filename)
504 return True
505
492
506 @requires_no_parents_change
493 @requires_no_parents_change
507 def set_clean(self, filename, parentfiledata=None):
494 def set_clean(self, filename, parentfiledata=None):
508 """record that the current state of the file on disk is known to be clean"""
495 """record that the current state of the file on disk is known to be clean"""
509 self._dirty = True
496 self._dirty = True
510 self._updatedfiles.add(filename)
497 if parentfiledata:
511 self._normal(filename, parentfiledata=parentfiledata)
498 (mode, size, mtime) = parentfiledata
499 else:
500 (mode, size, mtime) = self._get_filedata(filename)
501 if not self._map[filename].tracked:
502 self._check_new_tracked_filename(filename)
503 self._map.set_clean(filename, mode, size, mtime)
504 if mtime > self._lastnormaltime:
505 # Remember the most recent modification timeslot for status(),
506 # to make sure we won't miss future size-preserving file content
507 # modifications that happen within the same timeslot.
508 self._lastnormaltime = mtime
512
509
513 @requires_no_parents_change
510 @requires_no_parents_change
514 def set_possibly_dirty(self, filename):
511 def set_possibly_dirty(self, filename):
515 """record that the current state of the file on disk is unknown"""
512 """record that the current state of the file on disk is unknown"""
516 self._dirty = True
513 self._dirty = True
517 self._updatedfiles.add(filename)
518 self._map.set_possibly_dirty(filename)
514 self._map.set_possibly_dirty(filename)
519
515
520 @requires_parents_change
516 @requires_parents_change
@@ -539,35 +535,26 b' class dirstate(object):'
539 wc_tracked = False
535 wc_tracked = False
540 else:
536 else:
541 wc_tracked = entry.tracked
537 wc_tracked = entry.tracked
542 possibly_dirty = False
538 if not (p1_tracked or wc_tracked):
543 if p1_tracked and wc_tracked:
544 # the underlying reference might have changed, we will have to
545 # check it.
546 possibly_dirty = True
547 elif not (p1_tracked or wc_tracked):
548 # the file is no longer relevant to anyone
539 # the file is no longer relevant to anyone
549 self._drop(filename)
540 if self._map.get(filename) is not None:
541 self._map.reset_state(filename)
542 self._dirty = True
550 elif (not p1_tracked) and wc_tracked:
543 elif (not p1_tracked) and wc_tracked:
551 if entry is not None and entry.added:
544 if entry is not None and entry.added:
552 return # avoid dropping copy information (maybe?)
545 return # avoid dropping copy information (maybe?)
553 elif p1_tracked and not wc_tracked:
554 pass
555 else:
556 assert False, 'unreachable'
557
546
558 # this mean we are doing call for file we do not really care about the
559 # data (eg: added or removed), however this should be a minor overhead
560 # compared to the overall update process calling this.
561 parentfiledata = None
547 parentfiledata = None
562 if wc_tracked:
548 if wc_tracked and p1_tracked:
563 parentfiledata = self._get_filedata(filename)
549 parentfiledata = self._get_filedata(filename)
564
550
565 self._updatedfiles.add(filename)
566 self._map.reset_state(
551 self._map.reset_state(
567 filename,
552 filename,
568 wc_tracked,
553 wc_tracked,
569 p1_tracked,
554 p1_tracked,
570 possibly_dirty=possibly_dirty,
555 # the underlying reference might have changed, we will have to
556 # check it.
557 has_meaningful_mtime=False,
571 parentfiledata=parentfiledata,
558 parentfiledata=parentfiledata,
572 )
559 )
573 if (
560 if (
@@ -585,10 +572,7 b' class dirstate(object):'
585 filename,
572 filename,
586 wc_tracked,
573 wc_tracked,
587 p1_tracked,
574 p1_tracked,
588 p2_tracked=False,
575 p2_info=False,
589 merged=False,
590 clean_p1=False,
591 clean_p2=False,
592 possibly_dirty=False,
576 possibly_dirty=False,
593 parentfiledata=None,
577 parentfiledata=None,
594 ):
578 ):
@@ -603,47 +587,26 b' class dirstate(object):'
603 depending of what information ends up being relevant and useful to
587 depending of what information ends up being relevant and useful to
604 other processing.
588 other processing.
605 """
589 """
606 if merged and (clean_p1 or clean_p2):
607 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
608 raise error.ProgrammingError(msg)
609
590
610 # note: I do not think we need to double check name clash here since we
591 # note: I do not think we need to double check name clash here since we
611 # are in a update/merge case that should already have taken care of
592 # are in a update/merge case that should already have taken care of
612 # this. The test agrees
593 # this. The test agrees
613
594
614 self._dirty = True
595 self._dirty = True
615 self._updatedfiles.add(filename)
616
596
617 need_parent_file_data = (
597 need_parent_file_data = (
618 not (possibly_dirty or clean_p2 or merged)
598 not possibly_dirty and not p2_info and wc_tracked and p1_tracked
619 and wc_tracked
620 and p1_tracked
621 )
599 )
622
600
623 # this mean we are doing call for file we do not really care about the
601 if need_parent_file_data and parentfiledata is None:
624 # data (eg: added or removed), however this should be a minor overhead
602 parentfiledata = self._get_filedata(filename)
625 # compared to the overall update process calling this.
626 if need_parent_file_data:
627 if parentfiledata is None:
628 parentfiledata = self._get_filedata(filename)
629 mtime = parentfiledata[2]
630
631 if mtime > self._lastnormaltime:
632 # Remember the most recent modification timeslot for
633 # status(), to make sure we won't miss future
634 # size-preserving file content modifications that happen
635 # within the same timeslot.
636 self._lastnormaltime = mtime
637
603
638 self._map.reset_state(
604 self._map.reset_state(
639 filename,
605 filename,
640 wc_tracked,
606 wc_tracked,
641 p1_tracked,
607 p1_tracked,
642 p2_tracked=p2_tracked,
608 p2_info=p2_info,
643 merged=merged,
609 has_meaningful_mtime=not possibly_dirty,
644 clean_p1=clean_p1,
645 clean_p2=clean_p2,
646 possibly_dirty=possibly_dirty,
647 parentfiledata=parentfiledata,
610 parentfiledata=parentfiledata,
648 )
611 )
649 if (
612 if (
@@ -655,263 +618,30 b' class dirstate(object):'
655 # modifications that happen within the same timeslot.
618 # modifications that happen within the same timeslot.
656 self._lastnormaltime = parentfiledata[2]
619 self._lastnormaltime = parentfiledata[2]
657
620
658 def _addpath(
621 def _check_new_tracked_filename(self, filename):
659 self,
622 scmutil.checkfilename(filename)
660 f,
623 if self._map.hastrackeddir(filename):
661 mode=0,
624 msg = _(b'directory %r already in dirstate')
662 size=None,
625 msg %= pycompat.bytestr(filename)
663 mtime=None,
626 raise error.Abort(msg)
664 added=False,
627 # shadows
665 merged=False,
628 for d in pathutil.finddirs(filename):
666 from_p2=False,
629 if self._map.hastrackeddir(d):
667 possibly_dirty=False,
630 break
668 ):
631 entry = self._map.get(d)
669 entry = self._map.get(f)
632 if entry is not None and not entry.removed:
670 if added or entry is not None and entry.removed:
633 msg = _(b'file %r in dirstate clashes with %r')
671 scmutil.checkfilename(f)
634 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
672 if self._map.hastrackeddir(f):
673 msg = _(b'directory %r already in dirstate')
674 msg %= pycompat.bytestr(f)
675 raise error.Abort(msg)
635 raise error.Abort(msg)
676 # shadows
677 for d in pathutil.finddirs(f):
678 if self._map.hastrackeddir(d):
679 break
680 entry = self._map.get(d)
681 if entry is not None and not entry.removed:
682 msg = _(b'file %r in dirstate clashes with %r')
683 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
684 raise error.Abort(msg)
685 self._dirty = True
686 self._updatedfiles.add(f)
687 self._map.addfile(
688 f,
689 mode=mode,
690 size=size,
691 mtime=mtime,
692 added=added,
693 merged=merged,
694 from_p2=from_p2,
695 possibly_dirty=possibly_dirty,
696 )
697
636
698 def _get_filedata(self, filename):
637 def _get_filedata(self, filename):
699 """returns"""
638 """returns"""
700 s = os.lstat(self._join(filename))
639 s = os.lstat(self._join(filename))
701 mode = s.st_mode
640 mode = s.st_mode
702 size = s.st_size
641 size = s.st_size
703 mtime = s[stat.ST_MTIME]
642 mtime = timestamp.mtime_of(s)
704 return (mode, size, mtime)
643 return (mode, size, mtime)
705
644
706 def normal(self, f, parentfiledata=None):
707 """Mark a file normal and clean.
708
709 parentfiledata: (mode, size, mtime) of the clean file
710
711 parentfiledata should be computed from memory (for mode,
712 size), as or close as possible from the point where we
713 determined the file was clean, to limit the risk of the
714 file having been changed by an external process between the
715 moment where the file was determined to be clean and now."""
716 if self.pendingparentchange():
717 util.nouideprecwarn(
718 b"do not use `normal` inside of update/merge context."
719 b" Use `update_file` or `update_file_p1`",
720 b'6.0',
721 stacklevel=2,
722 )
723 else:
724 util.nouideprecwarn(
725 b"do not use `normal` outside of update/merge context."
726 b" Use `set_tracked`",
727 b'6.0',
728 stacklevel=2,
729 )
730 self._normal(f, parentfiledata=parentfiledata)
731
732 def _normal(self, f, parentfiledata=None):
733 if parentfiledata:
734 (mode, size, mtime) = parentfiledata
735 else:
736 (mode, size, mtime) = self._get_filedata(f)
737 self._addpath(f, mode=mode, size=size, mtime=mtime)
738 self._map.copymap.pop(f, None)
739 if f in self._map.nonnormalset:
740 self._map.nonnormalset.remove(f)
741 if mtime > self._lastnormaltime:
742 # Remember the most recent modification timeslot for status(),
743 # to make sure we won't miss future size-preserving file content
744 # modifications that happen within the same timeslot.
745 self._lastnormaltime = mtime
746
747 def normallookup(self, f):
748 '''Mark a file normal, but possibly dirty.'''
749 if self.pendingparentchange():
750 util.nouideprecwarn(
751 b"do not use `normallookup` inside of update/merge context."
752 b" Use `update_file` or `update_file_p1`",
753 b'6.0',
754 stacklevel=2,
755 )
756 else:
757 util.nouideprecwarn(
758 b"do not use `normallookup` outside of update/merge context."
759 b" Use `set_possibly_dirty` or `set_tracked`",
760 b'6.0',
761 stacklevel=2,
762 )
763 self._normallookup(f)
764
765 def _normallookup(self, f):
766 '''Mark a file normal, but possibly dirty.'''
767 if self.in_merge:
768 # if there is a merge going on and the file was either
769 # "merged" or coming from other parent (-2) before
770 # being removed, restore that state.
771 entry = self._map.get(f)
772 if entry is not None:
773 # XXX this should probably be dealt with a a lower level
774 # (see `merged_removed` and `from_p2_removed`)
775 if entry.merged_removed or entry.from_p2_removed:
776 source = self._map.copymap.get(f)
777 if entry.merged_removed:
778 self._merge(f)
779 elif entry.from_p2_removed:
780 self._otherparent(f)
781 if source is not None:
782 self.copy(source, f)
783 return
784 elif entry.merged or entry.from_p2:
785 return
786 self._addpath(f, possibly_dirty=True)
787 self._map.copymap.pop(f, None)
788
789 def otherparent(self, f):
790 '''Mark as coming from the other parent, always dirty.'''
791 if self.pendingparentchange():
792 util.nouideprecwarn(
793 b"do not use `otherparent` inside of update/merge context."
794 b" Use `update_file` or `update_file_p1`",
795 b'6.0',
796 stacklevel=2,
797 )
798 else:
799 util.nouideprecwarn(
800 b"do not use `otherparent` outside of update/merge context."
801 b"It should have been set by the update/merge code",
802 b'6.0',
803 stacklevel=2,
804 )
805 self._otherparent(f)
806
807 def _otherparent(self, f):
808 if not self.in_merge:
809 msg = _(b"setting %r to other parent only allowed in merges") % f
810 raise error.Abort(msg)
811 entry = self._map.get(f)
812 if entry is not None and entry.tracked:
813 # merge-like
814 self._addpath(f, merged=True)
815 else:
816 # add-like
817 self._addpath(f, from_p2=True)
818 self._map.copymap.pop(f, None)
819
820 def add(self, f):
821 '''Mark a file added.'''
822 if self.pendingparentchange():
823 util.nouideprecwarn(
824 b"do not use `add` inside of update/merge context."
825 b" Use `update_file`",
826 b'6.0',
827 stacklevel=2,
828 )
829 else:
830 util.nouideprecwarn(
831 b"do not use `add` outside of update/merge context."
832 b" Use `set_tracked`",
833 b'6.0',
834 stacklevel=2,
835 )
836 self._add(f)
837
838 def _add(self, filename):
839 """internal function to mark a file as added"""
840 self._addpath(filename, added=True)
841 self._map.copymap.pop(filename, None)
842
843 def remove(self, f):
844 '''Mark a file removed'''
845 if self.pendingparentchange():
846 util.nouideprecwarn(
847 b"do not use `remove` insde of update/merge context."
848 b" Use `update_file` or `update_file_p1`",
849 b'6.0',
850 stacklevel=2,
851 )
852 else:
853 util.nouideprecwarn(
854 b"do not use `remove` outside of update/merge context."
855 b" Use `set_untracked`",
856 b'6.0',
857 stacklevel=2,
858 )
859 self._remove(f)
860
861 def _remove(self, filename):
862 """internal function to mark a file removed"""
863 self._dirty = True
864 self._updatedfiles.add(filename)
865 self._map.removefile(filename, in_merge=self.in_merge)
866
867 def merge(self, f):
868 '''Mark a file merged.'''
869 if self.pendingparentchange():
870 util.nouideprecwarn(
871 b"do not use `merge` inside of update/merge context."
872 b" Use `update_file`",
873 b'6.0',
874 stacklevel=2,
875 )
876 else:
877 util.nouideprecwarn(
878 b"do not use `merge` outside of update/merge context."
879 b"It should have been set by the update/merge code",
880 b'6.0',
881 stacklevel=2,
882 )
883 self._merge(f)
884
885 def _merge(self, f):
886 if not self.in_merge:
887 return self._normallookup(f)
888 return self._otherparent(f)
889
890 def drop(self, f):
891 '''Drop a file from the dirstate'''
892 if self.pendingparentchange():
893 util.nouideprecwarn(
894 b"do not use `drop` inside of update/merge context."
895 b" Use `update_file`",
896 b'6.0',
897 stacklevel=2,
898 )
899 else:
900 util.nouideprecwarn(
901 b"do not use `drop` outside of update/merge context."
902 b" Use `set_untracked`",
903 b'6.0',
904 stacklevel=2,
905 )
906 self._drop(f)
907
908 def _drop(self, filename):
909 """internal function to drop a file from the dirstate"""
910 if self._map.dropfile(filename):
911 self._dirty = True
912 self._updatedfiles.add(filename)
913 self._map.copymap.pop(filename, None)
914
915 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
645 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
916 if exists is None:
646 if exists is None:
917 exists = os.path.lexists(os.path.join(self._root, path))
647 exists = os.path.lexists(os.path.join(self._root, path))
@@ -990,8 +720,7 b' class dirstate(object):'
990
720
991 def clear(self):
721 def clear(self):
992 self._map.clear()
722 self._map.clear()
993 self._lastnormaltime = 0
723 self._lastnormaltime = timestamp.zero()
994 self._updatedfiles.clear()
995 self._dirty = True
724 self._dirty = True
996
725
997 def rebuild(self, parent, allfiles, changedfiles=None):
726 def rebuild(self, parent, allfiles, changedfiles=None):
@@ -1022,9 +751,17 b' class dirstate(object):'
1022 self._map.setparents(parent, self._nodeconstants.nullid)
751 self._map.setparents(parent, self._nodeconstants.nullid)
1023
752
1024 for f in to_lookup:
753 for f in to_lookup:
1025 self._normallookup(f)
754
755 if self.in_merge:
756 self.set_tracked(f)
757 else:
758 self._map.reset_state(
759 f,
760 wc_tracked=True,
761 p1_tracked=True,
762 )
1026 for f in to_drop:
763 for f in to_drop:
1027 self._drop(f)
764 self._map.reset_state(f)
1028
765
1029 self._dirty = True
766 self._dirty = True
1030
767
@@ -1048,19 +785,14 b' class dirstate(object):'
1048 # See also the wiki page below for detail:
785 # See also the wiki page below for detail:
1049 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
786 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
1050
787
1051 # emulate dropping timestamp in 'parsers.pack_dirstate'
788 # record when mtime start to be ambiguous
1052 now = _getfsnow(self._opener)
789 now = _getfsnow(self._opener)
1053 self._map.clearambiguoustimes(self._updatedfiles, now)
1054
1055 # emulate that all 'dirstate.normal' results are written out
1056 self._lastnormaltime = 0
1057 self._updatedfiles.clear()
1058
790
1059 # delay writing in-memory changes out
791 # delay writing in-memory changes out
1060 tr.addfilegenerator(
792 tr.addfilegenerator(
1061 b'dirstate',
793 b'dirstate',
1062 (self._filename,),
794 (self._filename,),
1063 lambda f: self._writedirstate(tr, f),
795 lambda f: self._writedirstate(tr, f, now=now),
1064 location=b'plain',
796 location=b'plain',
1065 )
797 )
1066 return
798 return
@@ -1079,7 +811,7 b' class dirstate(object):'
1079 """
811 """
1080 self._plchangecallbacks[category] = callback
812 self._plchangecallbacks[category] = callback
1081
813
1082 def _writedirstate(self, tr, st):
814 def _writedirstate(self, tr, st, now=None):
1083 # notify callbacks about parents change
815 # notify callbacks about parents change
1084 if self._origpl is not None and self._origpl != self._pl:
816 if self._origpl is not None and self._origpl != self._pl:
1085 for c, callback in sorted(
817 for c, callback in sorted(
@@ -1087,9 +819,11 b' class dirstate(object):'
1087 ):
819 ):
1088 callback(self, self._origpl, self._pl)
820 callback(self, self._origpl, self._pl)
1089 self._origpl = None
821 self._origpl = None
1090 # use the modification time of the newly created temporary file as the
822
1091 # filesystem's notion of 'now'
823 if now is None:
1092 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
824 # use the modification time of the newly created temporary file as the
825 # filesystem's notion of 'now'
826 now = timestamp.mtime_of(util.fstat(st))
1093
827
1094 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
828 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
1095 # timestamp of each entries in dirstate, because of 'now > mtime'
829 # timestamp of each entries in dirstate, because of 'now > mtime'
@@ -1106,11 +840,12 b' class dirstate(object):'
1106 start = int(clock) - (int(clock) % delaywrite)
840 start = int(clock) - (int(clock) % delaywrite)
1107 end = start + delaywrite
841 end = start + delaywrite
1108 time.sleep(end - clock)
842 time.sleep(end - clock)
1109 now = end # trust our estimate that the end is near now
843 # trust our estimate that the end is near now
844 now = timestamp.timestamp((end, 0))
1110 break
845 break
1111
846
1112 self._map.write(tr, st, now)
847 self._map.write(tr, st, now)
1113 self._lastnormaltime = 0
848 self._lastnormaltime = timestamp.zero()
1114 self._dirty = False
849 self._dirty = False
1115
850
1116 def _dirignore(self, f):
851 def _dirignore(self, f):
@@ -1503,7 +1238,7 b' class dirstate(object):'
1503 traversed,
1238 traversed,
1504 dirty,
1239 dirty,
1505 ) = rustmod.status(
1240 ) = rustmod.status(
1506 self._map._rustmap,
1241 self._map._map,
1507 matcher,
1242 matcher,
1508 self._rootdir,
1243 self._rootdir,
1509 self._ignorefiles(),
1244 self._ignorefiles(),
@@ -1624,6 +1359,7 b' class dirstate(object):'
1624 mexact = match.exact
1359 mexact = match.exact
1625 dirignore = self._dirignore
1360 dirignore = self._dirignore
1626 checkexec = self._checkexec
1361 checkexec = self._checkexec
1362 checklink = self._checklink
1627 copymap = self._map.copymap
1363 copymap = self._map.copymap
1628 lastnormaltime = self._lastnormaltime
1364 lastnormaltime = self._lastnormaltime
1629
1365
@@ -1643,34 +1379,35 b' class dirstate(object):'
1643 uadd(fn)
1379 uadd(fn)
1644 continue
1380 continue
1645
1381
1646 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1647 # written like that for performance reasons. dmap[fn] is not a
1648 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1649 # opcode has fast paths when the value to be unpacked is a tuple or
1650 # a list, but falls back to creating a full-fledged iterator in
1651 # general. That is much slower than simply accessing and storing the
1652 # tuple members one by one.
1653 t = dget(fn)
1382 t = dget(fn)
1654 mode = t.mode
1383 mode = t.mode
1655 size = t.size
1384 size = t.size
1656 time = t.mtime
1657
1385
1658 if not st and t.tracked:
1386 if not st and t.tracked:
1659 dadd(fn)
1387 dadd(fn)
1660 elif t.merged:
1388 elif t.p2_info:
1661 madd(fn)
1389 madd(fn)
1662 elif t.added:
1390 elif t.added:
1663 aadd(fn)
1391 aadd(fn)
1664 elif t.removed:
1392 elif t.removed:
1665 radd(fn)
1393 radd(fn)
1666 elif t.tracked:
1394 elif t.tracked:
1667 if (
1395 if not checklink and t.has_fallback_symlink:
1396 # If the file system does not support symlink, the mode
1397 # might not be correctly stored in the dirstate, so do not
1398 # trust it.
1399 ladd(fn)
1400 elif not checkexec and t.has_fallback_exec:
1401 # If the file system does not support exec bits, the mode
1402 # might not be correctly stored in the dirstate, so do not
1403 # trust it.
1404 ladd(fn)
1405 elif (
1668 size >= 0
1406 size >= 0
1669 and (
1407 and (
1670 (size != st.st_size and size != st.st_size & _rangemask)
1408 (size != st.st_size and size != st.st_size & _rangemask)
1671 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1409 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1672 )
1410 )
1673 or t.from_p2
1674 or fn in copymap
1411 or fn in copymap
1675 ):
1412 ):
1676 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1413 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
@@ -1679,12 +1416,9 b' class dirstate(object):'
1679 ladd(fn)
1416 ladd(fn)
1680 else:
1417 else:
1681 madd(fn)
1418 madd(fn)
1682 elif (
1419 elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)):
1683 time != st[stat.ST_MTIME]
1684 and time != st[stat.ST_MTIME] & _rangemask
1685 ):
1686 ladd(fn)
1420 ladd(fn)
1687 elif st[stat.ST_MTIME] == lastnormaltime:
1421 elif timestamp.mtime_of(st) == lastnormaltime:
1688 # fn may have just been marked as normal and it may have
1422 # fn may have just been marked as normal and it may have
1689 # changed in the same second without changing its size.
1423 # changed in the same second without changing its size.
1690 # This can happen if we quickly do multiple commits.
1424 # This can happen if we quickly do multiple commits.
@@ -1703,7 +1437,7 b' class dirstate(object):'
1703 """
1437 """
1704 dmap = self._map
1438 dmap = self._map
1705 if rustmod is not None:
1439 if rustmod is not None:
1706 dmap = self._map._rustmap
1440 dmap = self._map._map
1707
1441
1708 if match.always():
1442 if match.always():
1709 return dmap.keys()
1443 return dmap.keys()
@@ -1778,3 +1512,22 b' class dirstate(object):'
1778 def clearbackup(self, tr, backupname):
1512 def clearbackup(self, tr, backupname):
1779 '''Clear backup file'''
1513 '''Clear backup file'''
1780 self._opener.unlink(backupname)
1514 self._opener.unlink(backupname)
1515
1516 def verify(self, m1, m2):
1517 """check the dirstate content again the parent manifest and yield errors"""
1518 missing_from_p1 = b"%s in state %s, but not in manifest1\n"
1519 unexpected_in_p1 = b"%s in state %s, but also in manifest1\n"
1520 missing_from_ps = b"%s in state %s, but not in either manifest\n"
1521 missing_from_ds = b"%s in manifest1, but listed as state %s\n"
1522 for f, entry in self.items():
1523 state = entry.state
1524 if state in b"nr" and f not in m1:
1525 yield (missing_from_p1, f, state)
1526 if state in b"a" and f in m1:
1527 yield (unexpected_in_p1, f, state)
1528 if state in b"m" and f not in m1 and f not in m2:
1529 yield (missing_from_ps, f, state)
1530 for f in m1:
1531 state = self.get_entry(f).state
1532 if state not in b"nrm":
1533 yield (missing_from_ds, f, state)
This diff has been collapsed as it changes many lines, (1173 lines changed) Show them Hide them
@@ -20,6 +20,7 b' from . import ('
20
20
21 from .dirstateutils import (
21 from .dirstateutils import (
22 docket as docketmod,
22 docket as docketmod,
23 v2,
23 )
24 )
24
25
25 parsers = policy.importmod('parsers')
26 parsers = policy.importmod('parsers')
@@ -27,22 +28,276 b" rustmod = policy.importrust('dirstate')"
27
28
28 propertycache = util.propertycache
29 propertycache = util.propertycache
29
30
30 DirstateItem = parsers.DirstateItem
31 if rustmod is None:
31
32 DirstateItem = parsers.DirstateItem
32
33 else:
33 # a special value used internally for `size` if the file come from the other parent
34 DirstateItem = rustmod.DirstateItem
34 FROM_P2 = -2
35
36 # a special value used internally for `size` if the file is modified/merged/added
37 NONNORMAL = -1
38
39 # a special value used internally for `time` if the time is ambigeous
40 AMBIGUOUS_TIME = -1
41
35
42 rangemask = 0x7FFFFFFF
36 rangemask = 0x7FFFFFFF
43
37
44
38
45 class dirstatemap(object):
39 class _dirstatemapcommon(object):
40 """
41 Methods that are identical for both implementations of the dirstatemap
42 class, with and without Rust extensions enabled.
43 """
44
45 # please pytype
46
47 _map = None
48 copymap = None
49
50 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
51 self._use_dirstate_v2 = use_dirstate_v2
52 self._nodeconstants = nodeconstants
53 self._ui = ui
54 self._opener = opener
55 self._root = root
56 self._filename = b'dirstate'
57 self._nodelen = 20 # Also update Rust code when changing this!
58 self._parents = None
59 self._dirtyparents = False
60 self._docket = None
61
62 # for consistent view between _pl() and _read() invocations
63 self._pendingmode = None
64
65 def preload(self):
66 """Loads the underlying data, if it's not already loaded"""
67 self._map
68
69 def get(self, key, default=None):
70 return self._map.get(key, default)
71
72 def __len__(self):
73 return len(self._map)
74
75 def __iter__(self):
76 return iter(self._map)
77
78 def __contains__(self, key):
79 return key in self._map
80
81 def __getitem__(self, item):
82 return self._map[item]
83
84 ### sub-class utility method
85 #
86 # Use to allow for generic implementation of some method while still coping
87 # with minor difference between implementation.
88
89 def _dirs_incr(self, filename, old_entry=None):
90 """incremente the dirstate counter if applicable
91
92 This might be a no-op for some subclass who deal with directory
93 tracking in a different way.
94 """
95
96 def _dirs_decr(self, filename, old_entry=None, remove_variant=False):
97 """decremente the dirstate counter if applicable
98
99 This might be a no-op for some subclass who deal with directory
100 tracking in a different way.
101 """
102
103 def _refresh_entry(self, f, entry):
104 """record updated state of an entry"""
105
106 def _insert_entry(self, f, entry):
107 """add a new dirstate entry (or replace an unrelated one)
108
109 The fact it is actually new is the responsability of the caller
110 """
111
112 def _drop_entry(self, f):
113 """remove any entry for file f
114
115 This should also drop associated copy information
116
117 The fact we actually need to drop it is the responsability of the caller"""
118
119 ### method to manipulate the entries
120
121 def set_possibly_dirty(self, filename):
122 """record that the current state of the file on disk is unknown"""
123 entry = self[filename]
124 entry.set_possibly_dirty()
125 self._refresh_entry(filename, entry)
126
127 def set_clean(self, filename, mode, size, mtime):
128 """mark a file as back to a clean state"""
129 entry = self[filename]
130 size = size & rangemask
131 entry.set_clean(mode, size, mtime)
132 self._refresh_entry(filename, entry)
133 self.copymap.pop(filename, None)
134
135 def set_tracked(self, filename):
136 new = False
137 entry = self.get(filename)
138 if entry is None:
139 self._dirs_incr(filename)
140 entry = DirstateItem(
141 wc_tracked=True,
142 )
143
144 self._insert_entry(filename, entry)
145 new = True
146 elif not entry.tracked:
147 self._dirs_incr(filename, entry)
148 entry.set_tracked()
149 self._refresh_entry(filename, entry)
150 new = True
151 else:
152 # XXX This is probably overkill for more case, but we need this to
153 # fully replace the `normallookup` call with `set_tracked` one.
154 # Consider smoothing this in the future.
155 entry.set_possibly_dirty()
156 self._refresh_entry(filename, entry)
157 return new
158
159 def set_untracked(self, f):
160 """Mark a file as no longer tracked in the dirstate map"""
161 entry = self.get(f)
162 if entry is None:
163 return False
164 else:
165 self._dirs_decr(f, old_entry=entry, remove_variant=not entry.added)
166 if not entry.p2_info:
167 self.copymap.pop(f, None)
168 entry.set_untracked()
169 self._refresh_entry(f, entry)
170 return True
171
172 def reset_state(
173 self,
174 filename,
175 wc_tracked=False,
176 p1_tracked=False,
177 p2_info=False,
178 has_meaningful_mtime=True,
179 has_meaningful_data=True,
180 parentfiledata=None,
181 ):
182 """Set a entry to a given state, diregarding all previous state
183
184 This is to be used by the part of the dirstate API dedicated to
185 adjusting the dirstate after a update/merge.
186
187 note: calling this might result to no entry existing at all if the
188 dirstate map does not see any point at having one for this file
189 anymore.
190 """
191 # copy information are now outdated
192 # (maybe new information should be in directly passed to this function)
193 self.copymap.pop(filename, None)
194
195 if not (p1_tracked or p2_info or wc_tracked):
196 old_entry = self._map.get(filename)
197 self._drop_entry(filename)
198 self._dirs_decr(filename, old_entry=old_entry)
199 return
200
201 old_entry = self._map.get(filename)
202 self._dirs_incr(filename, old_entry)
203 entry = DirstateItem(
204 wc_tracked=wc_tracked,
205 p1_tracked=p1_tracked,
206 p2_info=p2_info,
207 has_meaningful_mtime=has_meaningful_mtime,
208 parentfiledata=parentfiledata,
209 )
210 self._insert_entry(filename, entry)
211
212 ### disk interaction
213
214 def _opendirstatefile(self):
215 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
216 if self._pendingmode is not None and self._pendingmode != mode:
217 fp.close()
218 raise error.Abort(
219 _(b'working directory state may be changed parallelly')
220 )
221 self._pendingmode = mode
222 return fp
223
224 def _readdirstatefile(self, size=-1):
225 try:
226 with self._opendirstatefile() as fp:
227 return fp.read(size)
228 except IOError as err:
229 if err.errno != errno.ENOENT:
230 raise
231 # File doesn't exist, so the current state is empty
232 return b''
233
234 @property
235 def docket(self):
236 if not self._docket:
237 if not self._use_dirstate_v2:
238 raise error.ProgrammingError(
239 b'dirstate only has a docket in v2 format'
240 )
241 self._docket = docketmod.DirstateDocket.parse(
242 self._readdirstatefile(), self._nodeconstants
243 )
244 return self._docket
245
246 def write_v2_no_append(self, tr, st, meta, packed):
247 old_docket = self.docket
248 new_docket = docketmod.DirstateDocket.with_new_uuid(
249 self.parents(), len(packed), meta
250 )
251 data_filename = new_docket.data_filename()
252 if tr:
253 tr.add(data_filename, 0)
254 self._opener.write(data_filename, packed)
255 # Write the new docket after the new data file has been
256 # written. Because `st` was opened with `atomictemp=True`,
257 # the actual `.hg/dirstate` file is only affected on close.
258 st.write(new_docket.serialize())
259 st.close()
260 # Remove the old data file after the new docket pointing to
261 # the new data file was written.
262 if old_docket.uuid:
263 data_filename = old_docket.data_filename()
264 unlink = lambda _tr=None: self._opener.unlink(data_filename)
265 if tr:
266 category = b"dirstate-v2-clean-" + old_docket.uuid
267 tr.addpostclose(category, unlink)
268 else:
269 unlink()
270 self._docket = new_docket
271
272 ### reading/setting parents
273
274 def parents(self):
275 if not self._parents:
276 if self._use_dirstate_v2:
277 self._parents = self.docket.parents
278 else:
279 read_len = self._nodelen * 2
280 st = self._readdirstatefile(read_len)
281 l = len(st)
282 if l == read_len:
283 self._parents = (
284 st[: self._nodelen],
285 st[self._nodelen : 2 * self._nodelen],
286 )
287 elif l == 0:
288 self._parents = (
289 self._nodeconstants.nullid,
290 self._nodeconstants.nullid,
291 )
292 else:
293 raise error.Abort(
294 _(b'working directory state appears damaged!')
295 )
296
297 return self._parents
298
299
300 class dirstatemap(_dirstatemapcommon):
46 """Map encapsulating the dirstate's contents.
301 """Map encapsulating the dirstate's contents.
47
302
48 The dirstate contains the following state:
303 The dirstate contains the following state:
@@ -56,19 +311,19 b' class dirstatemap(object):'
56 - the state map maps filenames to tuples of (state, mode, size, mtime),
311 - the state map maps filenames to tuples of (state, mode, size, mtime),
57 where state is a single character representing 'normal', 'added',
312 where state is a single character representing 'normal', 'added',
58 'removed', or 'merged'. It is read by treating the dirstate as a
313 'removed', or 'merged'. It is read by treating the dirstate as a
59 dict. File state is updated by calling the `addfile`, `removefile` and
314 dict. File state is updated by calling various methods (see each
60 `dropfile` methods.
315 documentation for details):
316
317 - `reset_state`,
318 - `set_tracked`
319 - `set_untracked`
320 - `set_clean`
321 - `set_possibly_dirty`
61
322
62 - `copymap` maps destination filenames to their source filename.
323 - `copymap` maps destination filenames to their source filename.
63
324
64 The dirstate also provides the following views onto the state:
325 The dirstate also provides the following views onto the state:
65
326
66 - `nonnormalset` is a set of the filenames that have state other
67 than 'normal', or are normal but have an mtime of -1 ('normallookup').
68
69 - `otherparentset` is a set of the filenames that are marked as coming
70 from the second parent when the dirstate is currently being merged.
71
72 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
327 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
73 form that they appear as in the dirstate.
328 form that they appear as in the dirstate.
74
329
@@ -76,22 +331,7 b' class dirstatemap(object):'
76 denormalized form that they appear as in the dirstate.
331 denormalized form that they appear as in the dirstate.
77 """
332 """
78
333
79 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
334 ### Core data storage and access
80 self._ui = ui
81 self._opener = opener
82 self._root = root
83 self._filename = b'dirstate'
84 self._nodelen = 20
85 self._nodeconstants = nodeconstants
86 assert (
87 not use_dirstate_v2
88 ), "should have detected unsupported requirement"
89
90 self._parents = None
91 self._dirtyparents = False
92
93 # for consistent view between _pl() and _read() invocations
94 self._pendingmode = None
95
335
96 @propertycache
336 @propertycache
97 def _map(self):
337 def _map(self):
@@ -113,8 +353,6 b' class dirstatemap(object):'
113 util.clearcachedproperty(self, b"_alldirs")
353 util.clearcachedproperty(self, b"_alldirs")
114 util.clearcachedproperty(self, b"filefoldmap")
354 util.clearcachedproperty(self, b"filefoldmap")
115 util.clearcachedproperty(self, b"dirfoldmap")
355 util.clearcachedproperty(self, b"dirfoldmap")
116 util.clearcachedproperty(self, b"nonnormalset")
117 util.clearcachedproperty(self, b"otherparentset")
118
356
119 def items(self):
357 def items(self):
120 return pycompat.iteritems(self._map)
358 return pycompat.iteritems(self._map)
@@ -122,29 +360,109 b' class dirstatemap(object):'
122 # forward for python2,3 compat
360 # forward for python2,3 compat
123 iteritems = items
361 iteritems = items
124
362
125 debug_iter = items
363 def debug_iter(self, all):
126
364 """
127 def __len__(self):
365 Return an iterator of (filename, state, mode, size, mtime) tuples
128 return len(self._map)
129
130 def __iter__(self):
131 return iter(self._map)
132
366
133 def get(self, key, default=None):
367 `all` is unused when Rust is not enabled
134 return self._map.get(key, default)
368 """
135
369 for (filename, item) in self.items():
136 def __contains__(self, key):
370 yield (filename, item.state, item.mode, item.size, item.mtime)
137 return key in self._map
138
139 def __getitem__(self, key):
140 return self._map[key]
141
371
142 def keys(self):
372 def keys(self):
143 return self._map.keys()
373 return self._map.keys()
144
374
145 def preload(self):
375 ### reading/setting parents
146 """Loads the underlying data, if it's not already loaded"""
376
377 def setparents(self, p1, p2, fold_p2=False):
378 self._parents = (p1, p2)
379 self._dirtyparents = True
380 copies = {}
381 if fold_p2:
382 for f, s in pycompat.iteritems(self._map):
383 # Discard "merged" markers when moving away from a merge state
384 if s.p2_info:
385 source = self.copymap.pop(f, None)
386 if source:
387 copies[f] = source
388 s.drop_merge_data()
389 return copies
390
391 ### disk interaction
392
393 def read(self):
394 # ignore HG_PENDING because identity is used only for writing
395 self.identity = util.filestat.frompath(
396 self._opener.join(self._filename)
397 )
398
399 if self._use_dirstate_v2:
400 if not self.docket.uuid:
401 return
402 st = self._opener.read(self.docket.data_filename())
403 else:
404 st = self._readdirstatefile()
405
406 if not st:
407 return
408
409 # TODO: adjust this estimate for dirstate-v2
410 if util.safehasattr(parsers, b'dict_new_presized'):
411 # Make an estimate of the number of files in the dirstate based on
412 # its size. This trades wasting some memory for avoiding costly
413 # resizes. Each entry have a prefix of 17 bytes followed by one or
414 # two path names. Studies on various large-scale real-world repositories
415 # found 54 bytes a reasonable upper limit for the average path names.
416 # Copy entries are ignored for the sake of this estimate.
417 self._map = parsers.dict_new_presized(len(st) // 71)
418
419 # Python's garbage collector triggers a GC each time a certain number
420 # of container objects (the number being defined by
421 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
422 # for each file in the dirstate. The C version then immediately marks
423 # them as not to be tracked by the collector. However, this has no
424 # effect on when GCs are triggered, only on what objects the GC looks
425 # into. This means that O(number of files) GCs are unavoidable.
426 # Depending on when in the process's lifetime the dirstate is parsed,
427 # this can get very expensive. As a workaround, disable GC while
428 # parsing the dirstate.
429 #
430 # (we cannot decorate the function directly since it is in a C module)
431 if self._use_dirstate_v2:
432 p = self.docket.parents
433 meta = self.docket.tree_metadata
434 parse_dirstate = util.nogc(v2.parse_dirstate)
435 parse_dirstate(self._map, self.copymap, st, meta)
436 else:
437 parse_dirstate = util.nogc(parsers.parse_dirstate)
438 p = parse_dirstate(self._map, self.copymap, st)
439 if not self._dirtyparents:
440 self.setparents(*p)
441
442 # Avoid excess attribute lookups by fast pathing certain checks
443 self.__contains__ = self._map.__contains__
444 self.__getitem__ = self._map.__getitem__
445 self.get = self._map.get
446
447 def write(self, tr, st, now):
448 if self._use_dirstate_v2:
449 packed, meta = v2.pack_dirstate(self._map, self.copymap, now)
450 self.write_v2_no_append(tr, st, meta, packed)
451 else:
452 packed = parsers.pack_dirstate(
453 self._map, self.copymap, self.parents(), now
454 )
455 st.write(packed)
456 st.close()
457 self._dirtyparents = False
458
459 @propertycache
460 def identity(self):
147 self._map
461 self._map
462 return self.identity
463
464 ### code related to maintaining and accessing "extra" property
465 # (e.g. "has_dir")
148
466
149 def _dirs_incr(self, filename, old_entry=None):
467 def _dirs_incr(self, filename, old_entry=None):
150 """incremente the dirstate counter if applicable"""
468 """incremente the dirstate counter if applicable"""
@@ -168,200 +486,6 b' class dirstatemap(object):'
168 normed = util.normcase(filename)
486 normed = util.normcase(filename)
169 self.filefoldmap.pop(normed, None)
487 self.filefoldmap.pop(normed, None)
170
488
171 def set_possibly_dirty(self, filename):
172 """record that the current state of the file on disk is unknown"""
173 self[filename].set_possibly_dirty()
174
175 def addfile(
176 self,
177 f,
178 mode=0,
179 size=None,
180 mtime=None,
181 added=False,
182 merged=False,
183 from_p2=False,
184 possibly_dirty=False,
185 ):
186 """Add a tracked file to the dirstate."""
187 if added:
188 assert not merged
189 assert not possibly_dirty
190 assert not from_p2
191 state = b'a'
192 size = NONNORMAL
193 mtime = AMBIGUOUS_TIME
194 elif merged:
195 assert not possibly_dirty
196 assert not from_p2
197 state = b'm'
198 size = FROM_P2
199 mtime = AMBIGUOUS_TIME
200 elif from_p2:
201 assert not possibly_dirty
202 state = b'n'
203 size = FROM_P2
204 mtime = AMBIGUOUS_TIME
205 elif possibly_dirty:
206 state = b'n'
207 size = NONNORMAL
208 mtime = AMBIGUOUS_TIME
209 else:
210 assert size != FROM_P2
211 assert size != NONNORMAL
212 assert size is not None
213 assert mtime is not None
214
215 state = b'n'
216 size = size & rangemask
217 mtime = mtime & rangemask
218 assert state is not None
219 assert size is not None
220 assert mtime is not None
221 old_entry = self.get(f)
222 self._dirs_incr(f, old_entry)
223 e = self._map[f] = DirstateItem(state, mode, size, mtime)
224 if e.dm_nonnormal:
225 self.nonnormalset.add(f)
226 if e.dm_otherparent:
227 self.otherparentset.add(f)
228
229 def reset_state(
230 self,
231 filename,
232 wc_tracked,
233 p1_tracked,
234 p2_tracked=False,
235 merged=False,
236 clean_p1=False,
237 clean_p2=False,
238 possibly_dirty=False,
239 parentfiledata=None,
240 ):
241 """Set a entry to a given state, diregarding all previous state
242
243 This is to be used by the part of the dirstate API dedicated to
244 adjusting the dirstate after a update/merge.
245
246 note: calling this might result to no entry existing at all if the
247 dirstate map does not see any point at having one for this file
248 anymore.
249 """
250 if merged and (clean_p1 or clean_p2):
251 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
252 raise error.ProgrammingError(msg)
253 # copy information are now outdated
254 # (maybe new information should be in directly passed to this function)
255 self.copymap.pop(filename, None)
256
257 if not (p1_tracked or p2_tracked or wc_tracked):
258 self.dropfile(filename)
259 elif merged:
260 # XXX might be merged and removed ?
261 entry = self.get(filename)
262 if entry is not None and entry.tracked:
263 # XXX mostly replicate dirstate.other parent. We should get
264 # the higher layer to pass us more reliable data where `merged`
265 # actually mean merged. Dropping the else clause will show
266 # failure in `test-graft.t`
267 self.addfile(filename, merged=True)
268 else:
269 self.addfile(filename, from_p2=True)
270 elif not (p1_tracked or p2_tracked) and wc_tracked:
271 self.addfile(filename, added=True, possibly_dirty=possibly_dirty)
272 elif (p1_tracked or p2_tracked) and not wc_tracked:
273 # XXX might be merged and removed ?
274 old_entry = self._map.get(filename)
275 self._dirs_decr(filename, old_entry=old_entry, remove_variant=True)
276 self._map[filename] = DirstateItem(b'r', 0, 0, 0)
277 self.nonnormalset.add(filename)
278 elif clean_p2 and wc_tracked:
279 if p1_tracked or self.get(filename) is not None:
280 # XXX the `self.get` call is catching some case in
281 # `test-merge-remove.t` where the file is tracked in p1, the
282 # p1_tracked argument is False.
283 #
284 # In addition, this seems to be a case where the file is marked
285 # as merged without actually being the result of a merge
286 # action. So thing are not ideal here.
287 self.addfile(filename, merged=True)
288 else:
289 self.addfile(filename, from_p2=True)
290 elif not p1_tracked and p2_tracked and wc_tracked:
291 self.addfile(filename, from_p2=True, possibly_dirty=possibly_dirty)
292 elif possibly_dirty:
293 self.addfile(filename, possibly_dirty=possibly_dirty)
294 elif wc_tracked:
295 # this is a "normal" file
296 if parentfiledata is None:
297 msg = b'failed to pass parentfiledata for a normal file: %s'
298 msg %= filename
299 raise error.ProgrammingError(msg)
300 mode, size, mtime = parentfiledata
301 self.addfile(filename, mode=mode, size=size, mtime=mtime)
302 self.nonnormalset.discard(filename)
303 else:
304 assert False, 'unreachable'
305
306 def removefile(self, f, in_merge=False):
307 """
308 Mark a file as removed in the dirstate.
309
310 The `size` parameter is used to store sentinel values that indicate
311 the file's previous state. In the future, we should refactor this
312 to be more explicit about what that state is.
313 """
314 entry = self.get(f)
315 size = 0
316 if in_merge:
317 # XXX we should not be able to have 'm' state and 'FROM_P2' if not
318 # during a merge. So I (marmoute) am not sure we need the
319 # conditionnal at all. Adding double checking this with assert
320 # would be nice.
321 if entry is not None:
322 # backup the previous state
323 if entry.merged: # merge
324 size = NONNORMAL
325 elif entry.from_p2:
326 size = FROM_P2
327 self.otherparentset.add(f)
328 if entry is not None and not (entry.merged or entry.from_p2):
329 self.copymap.pop(f, None)
330 self._dirs_decr(f, old_entry=entry, remove_variant=True)
331 self._map[f] = DirstateItem(b'r', 0, size, 0)
332 self.nonnormalset.add(f)
333
334 def dropfile(self, f):
335 """
336 Remove a file from the dirstate. Returns True if the file was
337 previously recorded.
338 """
339 old_entry = self._map.pop(f, None)
340 self._dirs_decr(f, old_entry=old_entry)
341 self.nonnormalset.discard(f)
342 return old_entry is not None
343
344 def clearambiguoustimes(self, files, now):
345 for f in files:
346 e = self.get(f)
347 if e is not None and e.need_delay(now):
348 e.set_possibly_dirty()
349 self.nonnormalset.add(f)
350
351 def nonnormalentries(self):
352 '''Compute the nonnormal dirstate entries from the dmap'''
353 try:
354 return parsers.nonnormalotherparententries(self._map)
355 except AttributeError:
356 nonnorm = set()
357 otherparent = set()
358 for fname, e in pycompat.iteritems(self._map):
359 if e.dm_nonnormal:
360 nonnorm.add(fname)
361 if e.from_p2:
362 otherparent.add(fname)
363 return nonnorm, otherparent
364
365 @propertycache
489 @propertycache
366 def filefoldmap(self):
490 def filefoldmap(self):
367 """Returns a dictionary mapping normalized case paths to their
491 """Returns a dictionary mapping normalized case paths to their
@@ -384,6 +508,14 b' class dirstatemap(object):'
384 f[b'.'] = b'.' # prevents useless util.fspath() invocation
508 f[b'.'] = b'.' # prevents useless util.fspath() invocation
385 return f
509 return f
386
510
511 @propertycache
512 def dirfoldmap(self):
513 f = {}
514 normcase = util.normcase
515 for name in self._dirs:
516 f[normcase(name)] = name
517 return f
518
387 def hastrackeddir(self, d):
519 def hastrackeddir(self, d):
388 """
520 """
389 Returns True if the dirstate contains a tracked (not removed) file
521 Returns True if the dirstate contains a tracked (not removed) file
@@ -400,393 +532,34 b' class dirstatemap(object):'
400
532
401 @propertycache
533 @propertycache
402 def _dirs(self):
534 def _dirs(self):
403 return pathutil.dirs(self._map, b'r')
535 return pathutil.dirs(self._map, only_tracked=True)
404
536
405 @propertycache
537 @propertycache
406 def _alldirs(self):
538 def _alldirs(self):
407 return pathutil.dirs(self._map)
539 return pathutil.dirs(self._map)
408
540
409 def _opendirstatefile(self):
541 ### code related to manipulation of entries and copy-sources
410 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
411 if self._pendingmode is not None and self._pendingmode != mode:
412 fp.close()
413 raise error.Abort(
414 _(b'working directory state may be changed parallelly')
415 )
416 self._pendingmode = mode
417 return fp
418
419 def parents(self):
420 if not self._parents:
421 try:
422 fp = self._opendirstatefile()
423 st = fp.read(2 * self._nodelen)
424 fp.close()
425 except IOError as err:
426 if err.errno != errno.ENOENT:
427 raise
428 # File doesn't exist, so the current state is empty
429 st = b''
430
542
431 l = len(st)
543 def _refresh_entry(self, f, entry):
432 if l == self._nodelen * 2:
544 if not entry.any_tracked:
433 self._parents = (
545 self._map.pop(f, None)
434 st[: self._nodelen],
435 st[self._nodelen : 2 * self._nodelen],
436 )
437 elif l == 0:
438 self._parents = (
439 self._nodeconstants.nullid,
440 self._nodeconstants.nullid,
441 )
442 else:
443 raise error.Abort(
444 _(b'working directory state appears damaged!')
445 )
446
447 return self._parents
448
449 def setparents(self, p1, p2):
450 self._parents = (p1, p2)
451 self._dirtyparents = True
452
453 def read(self):
454 # ignore HG_PENDING because identity is used only for writing
455 self.identity = util.filestat.frompath(
456 self._opener.join(self._filename)
457 )
458
459 try:
460 fp = self._opendirstatefile()
461 try:
462 st = fp.read()
463 finally:
464 fp.close()
465 except IOError as err:
466 if err.errno != errno.ENOENT:
467 raise
468 return
469 if not st:
470 return
471
546
472 if util.safehasattr(parsers, b'dict_new_presized'):
547 def _insert_entry(self, f, entry):
473 # Make an estimate of the number of files in the dirstate based on
548 self._map[f] = entry
474 # its size. This trades wasting some memory for avoiding costly
475 # resizes. Each entry have a prefix of 17 bytes followed by one or
476 # two path names. Studies on various large-scale real-world repositories
477 # found 54 bytes a reasonable upper limit for the average path names.
478 # Copy entries are ignored for the sake of this estimate.
479 self._map = parsers.dict_new_presized(len(st) // 71)
480
481 # Python's garbage collector triggers a GC each time a certain number
482 # of container objects (the number being defined by
483 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
484 # for each file in the dirstate. The C version then immediately marks
485 # them as not to be tracked by the collector. However, this has no
486 # effect on when GCs are triggered, only on what objects the GC looks
487 # into. This means that O(number of files) GCs are unavoidable.
488 # Depending on when in the process's lifetime the dirstate is parsed,
489 # this can get very expensive. As a workaround, disable GC while
490 # parsing the dirstate.
491 #
492 # (we cannot decorate the function directly since it is in a C module)
493 parse_dirstate = util.nogc(parsers.parse_dirstate)
494 p = parse_dirstate(self._map, self.copymap, st)
495 if not self._dirtyparents:
496 self.setparents(*p)
497
498 # Avoid excess attribute lookups by fast pathing certain checks
499 self.__contains__ = self._map.__contains__
500 self.__getitem__ = self._map.__getitem__
501 self.get = self._map.get
502
549
503 def write(self, _tr, st, now):
550 def _drop_entry(self, f):
504 st.write(
551 self._map.pop(f, None)
505 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
552 self.copymap.pop(f, None)
506 )
507 st.close()
508 self._dirtyparents = False
509 self.nonnormalset, self.otherparentset = self.nonnormalentries()
510
511 @propertycache
512 def nonnormalset(self):
513 nonnorm, otherparents = self.nonnormalentries()
514 self.otherparentset = otherparents
515 return nonnorm
516
517 @propertycache
518 def otherparentset(self):
519 nonnorm, otherparents = self.nonnormalentries()
520 self.nonnormalset = nonnorm
521 return otherparents
522
523 def non_normal_or_other_parent_paths(self):
524 return self.nonnormalset.union(self.otherparentset)
525
526 @propertycache
527 def identity(self):
528 self._map
529 return self.identity
530
531 @propertycache
532 def dirfoldmap(self):
533 f = {}
534 normcase = util.normcase
535 for name in self._dirs:
536 f[normcase(name)] = name
537 return f
538
553
539
554
540 if rustmod is not None:
555 if rustmod is not None:
541
556
542 class dirstatemap(object):
557 class dirstatemap(_dirstatemapcommon):
543 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
544 self._use_dirstate_v2 = use_dirstate_v2
545 self._nodeconstants = nodeconstants
546 self._ui = ui
547 self._opener = opener
548 self._root = root
549 self._filename = b'dirstate'
550 self._nodelen = 20 # Also update Rust code when changing this!
551 self._parents = None
552 self._dirtyparents = False
553 self._docket = None
554
555 # for consistent view between _pl() and _read() invocations
556 self._pendingmode = None
557
558 self._use_dirstate_tree = self._ui.configbool(
559 b"experimental",
560 b"dirstate-tree.in-memory",
561 False,
562 )
563
564 def addfile(
565 self,
566 f,
567 mode=0,
568 size=None,
569 mtime=None,
570 added=False,
571 merged=False,
572 from_p2=False,
573 possibly_dirty=False,
574 ):
575 return self._rustmap.addfile(
576 f,
577 mode,
578 size,
579 mtime,
580 added,
581 merged,
582 from_p2,
583 possibly_dirty,
584 )
585
586 def reset_state(
587 self,
588 filename,
589 wc_tracked,
590 p1_tracked,
591 p2_tracked=False,
592 merged=False,
593 clean_p1=False,
594 clean_p2=False,
595 possibly_dirty=False,
596 parentfiledata=None,
597 ):
598 """Set a entry to a given state, disregarding all previous state
599
600 This is to be used by the part of the dirstate API dedicated to
601 adjusting the dirstate after a update/merge.
602
603 note: calling this might result to no entry existing at all if the
604 dirstate map does not see any point at having one for this file
605 anymore.
606 """
607 if merged and (clean_p1 or clean_p2):
608 msg = (
609 b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
610 )
611 raise error.ProgrammingError(msg)
612 # copy information are now outdated
613 # (maybe new information should be in directly passed to this function)
614 self.copymap.pop(filename, None)
615
558
616 if not (p1_tracked or p2_tracked or wc_tracked):
559 ### Core data storage and access
617 self.dropfile(filename)
618 elif merged:
619 # XXX might be merged and removed ?
620 entry = self.get(filename)
621 if entry is not None and entry.tracked:
622 # XXX mostly replicate dirstate.other parent. We should get
623 # the higher layer to pass us more reliable data where `merged`
624 # actually mean merged. Dropping the else clause will show
625 # failure in `test-graft.t`
626 self.addfile(filename, merged=True)
627 else:
628 self.addfile(filename, from_p2=True)
629 elif not (p1_tracked or p2_tracked) and wc_tracked:
630 self.addfile(
631 filename, added=True, possibly_dirty=possibly_dirty
632 )
633 elif (p1_tracked or p2_tracked) and not wc_tracked:
634 # XXX might be merged and removed ?
635 self[filename] = DirstateItem(b'r', 0, 0, 0)
636 self.nonnormalset.add(filename)
637 elif clean_p2 and wc_tracked:
638 if p1_tracked or self.get(filename) is not None:
639 # XXX the `self.get` call is catching some case in
640 # `test-merge-remove.t` where the file is tracked in p1, the
641 # p1_tracked argument is False.
642 #
643 # In addition, this seems to be a case where the file is marked
644 # as merged without actually being the result of a merge
645 # action. So thing are not ideal here.
646 self.addfile(filename, merged=True)
647 else:
648 self.addfile(filename, from_p2=True)
649 elif not p1_tracked and p2_tracked and wc_tracked:
650 self.addfile(
651 filename, from_p2=True, possibly_dirty=possibly_dirty
652 )
653 elif possibly_dirty:
654 self.addfile(filename, possibly_dirty=possibly_dirty)
655 elif wc_tracked:
656 # this is a "normal" file
657 if parentfiledata is None:
658 msg = b'failed to pass parentfiledata for a normal file: %s'
659 msg %= filename
660 raise error.ProgrammingError(msg)
661 mode, size, mtime = parentfiledata
662 self.addfile(filename, mode=mode, size=size, mtime=mtime)
663 self.nonnormalset.discard(filename)
664 else:
665 assert False, 'unreachable'
666
667 def removefile(self, *args, **kwargs):
668 return self._rustmap.removefile(*args, **kwargs)
669
670 def dropfile(self, *args, **kwargs):
671 return self._rustmap.dropfile(*args, **kwargs)
672
673 def clearambiguoustimes(self, *args, **kwargs):
674 return self._rustmap.clearambiguoustimes(*args, **kwargs)
675
676 def nonnormalentries(self):
677 return self._rustmap.nonnormalentries()
678
679 def get(self, *args, **kwargs):
680 return self._rustmap.get(*args, **kwargs)
681
682 @property
683 def copymap(self):
684 return self._rustmap.copymap()
685
686 def directories(self):
687 return self._rustmap.directories()
688
689 def debug_iter(self):
690 return self._rustmap.debug_iter()
691
692 def preload(self):
693 self._rustmap
694
695 def clear(self):
696 self._rustmap.clear()
697 self.setparents(
698 self._nodeconstants.nullid, self._nodeconstants.nullid
699 )
700 util.clearcachedproperty(self, b"_dirs")
701 util.clearcachedproperty(self, b"_alldirs")
702 util.clearcachedproperty(self, b"dirfoldmap")
703
704 def items(self):
705 return self._rustmap.items()
706
707 def keys(self):
708 return iter(self._rustmap)
709
710 def __contains__(self, key):
711 return key in self._rustmap
712
713 def __getitem__(self, item):
714 return self._rustmap[item]
715
716 def __len__(self):
717 return len(self._rustmap)
718
719 def __iter__(self):
720 return iter(self._rustmap)
721
722 # forward for python2,3 compat
723 iteritems = items
724
725 def _opendirstatefile(self):
726 fp, mode = txnutil.trypending(
727 self._root, self._opener, self._filename
728 )
729 if self._pendingmode is not None and self._pendingmode != mode:
730 fp.close()
731 raise error.Abort(
732 _(b'working directory state may be changed parallelly')
733 )
734 self._pendingmode = mode
735 return fp
736
737 def _readdirstatefile(self, size=-1):
738 try:
739 with self._opendirstatefile() as fp:
740 return fp.read(size)
741 except IOError as err:
742 if err.errno != errno.ENOENT:
743 raise
744 # File doesn't exist, so the current state is empty
745 return b''
746
747 def setparents(self, p1, p2):
748 self._parents = (p1, p2)
749 self._dirtyparents = True
750
751 def parents(self):
752 if not self._parents:
753 if self._use_dirstate_v2:
754 self._parents = self.docket.parents
755 else:
756 read_len = self._nodelen * 2
757 st = self._readdirstatefile(read_len)
758 l = len(st)
759 if l == read_len:
760 self._parents = (
761 st[: self._nodelen],
762 st[self._nodelen : 2 * self._nodelen],
763 )
764 elif l == 0:
765 self._parents = (
766 self._nodeconstants.nullid,
767 self._nodeconstants.nullid,
768 )
769 else:
770 raise error.Abort(
771 _(b'working directory state appears damaged!')
772 )
773
774 return self._parents
775
776 @property
777 def docket(self):
778 if not self._docket:
779 if not self._use_dirstate_v2:
780 raise error.ProgrammingError(
781 b'dirstate only has a docket in v2 format'
782 )
783 self._docket = docketmod.DirstateDocket.parse(
784 self._readdirstatefile(), self._nodeconstants
785 )
786 return self._docket
787
560
788 @propertycache
561 @propertycache
789 def _rustmap(self):
562 def _map(self):
790 """
563 """
791 Fills the Dirstatemap when called.
564 Fills the Dirstatemap when called.
792 """
565 """
@@ -801,27 +574,91 b' if rustmod is not None:'
801 data = self._opener.read(self.docket.data_filename())
574 data = self._opener.read(self.docket.data_filename())
802 else:
575 else:
803 data = b''
576 data = b''
804 self._rustmap = rustmod.DirstateMap.new_v2(
577 self._map = rustmod.DirstateMap.new_v2(
805 data, self.docket.data_size, self.docket.tree_metadata
578 data, self.docket.data_size, self.docket.tree_metadata
806 )
579 )
807 parents = self.docket.parents
580 parents = self.docket.parents
808 else:
581 else:
809 self._rustmap, parents = rustmod.DirstateMap.new_v1(
582 self._map, parents = rustmod.DirstateMap.new_v1(
810 self._use_dirstate_tree, self._readdirstatefile()
583 self._readdirstatefile()
811 )
584 )
812
585
813 if parents and not self._dirtyparents:
586 if parents and not self._dirtyparents:
814 self.setparents(*parents)
587 self.setparents(*parents)
815
588
816 self.__contains__ = self._rustmap.__contains__
589 self.__contains__ = self._map.__contains__
817 self.__getitem__ = self._rustmap.__getitem__
590 self.__getitem__ = self._map.__getitem__
818 self.get = self._rustmap.get
591 self.get = self._map.get
819 return self._rustmap
592 return self._map
593
594 @property
595 def copymap(self):
596 return self._map.copymap()
597
598 def debug_iter(self, all):
599 """
600 Return an iterator of (filename, state, mode, size, mtime) tuples
601
602 `all`: also include with `state == b' '` dirstate tree nodes that
603 don't have an associated `DirstateItem`.
604
605 """
606 return self._map.debug_iter(all)
607
608 def clear(self):
609 self._map.clear()
610 self.setparents(
611 self._nodeconstants.nullid, self._nodeconstants.nullid
612 )
613 util.clearcachedproperty(self, b"_dirs")
614 util.clearcachedproperty(self, b"_alldirs")
615 util.clearcachedproperty(self, b"dirfoldmap")
616
617 def items(self):
618 return self._map.items()
619
620 # forward for python2,3 compat
621 iteritems = items
622
623 def keys(self):
624 return iter(self._map)
625
626 ### reading/setting parents
627
628 def setparents(self, p1, p2, fold_p2=False):
629 self._parents = (p1, p2)
630 self._dirtyparents = True
631 copies = {}
632 if fold_p2:
633 # Collect into an intermediate list to avoid a `RuntimeError`
634 # exception due to mutation during iteration.
635 # TODO: move this the whole loop to Rust where `iter_mut`
636 # enables in-place mutation of elements of a collection while
637 # iterating it, without mutating the collection itself.
638 files_with_p2_info = [
639 f for f, s in self._map.items() if s.p2_info
640 ]
641 rust_map = self._map
642 for f in files_with_p2_info:
643 e = rust_map.get(f)
644 source = self.copymap.pop(f, None)
645 if source:
646 copies[f] = source
647 e.drop_merge_data()
648 rust_map.set_dirstate_item(f, e)
649 return copies
650
651 ### disk interaction
652
653 @propertycache
654 def identity(self):
655 self._map
656 return self.identity
820
657
821 def write(self, tr, st, now):
658 def write(self, tr, st, now):
822 if not self._use_dirstate_v2:
659 if not self._use_dirstate_v2:
823 p1, p2 = self.parents()
660 p1, p2 = self.parents()
824 packed = self._rustmap.write_v1(p1, p2, now)
661 packed = self._map.write_v1(p1, p2, now)
825 st.write(packed)
662 st.write(packed)
826 st.close()
663 st.close()
827 self._dirtyparents = False
664 self._dirtyparents = False
@@ -829,7 +666,7 b' if rustmod is not None:'
829
666
830 # We can only append to an existing data file if there is one
667 # We can only append to an existing data file if there is one
831 can_append = self.docket.uuid is not None
668 can_append = self.docket.uuid is not None
832 packed, meta, append = self._rustmap.write_v2(now, can_append)
669 packed, meta, append = self._map.write_v2(now, can_append)
833 if append:
670 if append:
834 docket = self.docket
671 docket = self.docket
835 data_filename = docket.data_filename()
672 data_filename = docket.data_filename()
@@ -847,79 +684,49 b' if rustmod is not None:'
847 st.write(docket.serialize())
684 st.write(docket.serialize())
848 st.close()
685 st.close()
849 else:
686 else:
850 old_docket = self.docket
687 self.write_v2_no_append(tr, st, meta, packed)
851 new_docket = docketmod.DirstateDocket.with_new_uuid(
852 self.parents(), len(packed), meta
853 )
854 data_filename = new_docket.data_filename()
855 if tr:
856 tr.add(data_filename, 0)
857 self._opener.write(data_filename, packed)
858 # Write the new docket after the new data file has been
859 # written. Because `st` was opened with `atomictemp=True`,
860 # the actual `.hg/dirstate` file is only affected on close.
861 st.write(new_docket.serialize())
862 st.close()
863 # Remove the old data file after the new docket pointing to
864 # the new data file was written.
865 if old_docket.uuid:
866 data_filename = old_docket.data_filename()
867 unlink = lambda _tr=None: self._opener.unlink(data_filename)
868 if tr:
869 category = b"dirstate-v2-clean-" + old_docket.uuid
870 tr.addpostclose(category, unlink)
871 else:
872 unlink()
873 self._docket = new_docket
874 # Reload from the newly-written file
688 # Reload from the newly-written file
875 util.clearcachedproperty(self, b"_rustmap")
689 util.clearcachedproperty(self, b"_map")
876 self._dirtyparents = False
690 self._dirtyparents = False
877
691
692 ### code related to maintaining and accessing "extra" property
693 # (e.g. "has_dir")
694
878 @propertycache
695 @propertycache
879 def filefoldmap(self):
696 def filefoldmap(self):
880 """Returns a dictionary mapping normalized case paths to their
697 """Returns a dictionary mapping normalized case paths to their
881 non-normalized versions.
698 non-normalized versions.
882 """
699 """
883 return self._rustmap.filefoldmapasdict()
700 return self._map.filefoldmapasdict()
884
701
885 def hastrackeddir(self, d):
702 def hastrackeddir(self, d):
886 return self._rustmap.hastrackeddir(d)
703 return self._map.hastrackeddir(d)
887
704
888 def hasdir(self, d):
705 def hasdir(self, d):
889 return self._rustmap.hasdir(d)
706 return self._map.hasdir(d)
890
891 @propertycache
892 def identity(self):
893 self._rustmap
894 return self.identity
895
896 @property
897 def nonnormalset(self):
898 nonnorm = self._rustmap.non_normal_entries()
899 return nonnorm
900
901 @propertycache
902 def otherparentset(self):
903 otherparents = self._rustmap.other_parent_entries()
904 return otherparents
905
906 def non_normal_or_other_parent_paths(self):
907 return self._rustmap.non_normal_or_other_parent_paths()
908
707
909 @propertycache
708 @propertycache
910 def dirfoldmap(self):
709 def dirfoldmap(self):
911 f = {}
710 f = {}
912 normcase = util.normcase
711 normcase = util.normcase
913 for name in self._rustmap.tracked_dirs():
712 for name in self._map.tracked_dirs():
914 f[normcase(name)] = name
713 f[normcase(name)] = name
915 return f
714 return f
916
715
917 def set_possibly_dirty(self, filename):
716 ### code related to manipulation of entries and copy-sources
918 """record that the current state of the file on disk is unknown"""
717
919 entry = self[filename]
718 def _refresh_entry(self, f, entry):
920 entry.set_possibly_dirty()
719 if not entry.any_tracked:
921 self._rustmap.set_v1(filename, entry)
720 self._map.drop_item_and_copy_source(f)
721 else:
722 self._map.addfile(f, entry)
723
724 def _insert_entry(self, f, entry):
725 self._map.addfile(f, entry)
726
727 def _drop_entry(self, f):
728 self._map.drop_item_and_copy_source(f)
922
729
923 def __setitem__(self, key, value):
730 def __setitem__(self, key, value):
924 assert isinstance(value, DirstateItem)
731 assert isinstance(value, DirstateItem)
925 self._rustmap.set_v1(key, value)
732 self._map.set_dirstate_item(key, value)
@@ -10,31 +10,27 b' from __future__ import absolute_import'
10 import struct
10 import struct
11
11
12 from ..revlogutils import docket as docket_mod
12 from ..revlogutils import docket as docket_mod
13
13 from . import v2
14
14
15 V2_FORMAT_MARKER = b"dirstate-v2\n"
15 V2_FORMAT_MARKER = b"dirstate-v2\n"
16
16
17 # Must match the constant of the same name in
18 # `rust/hg-core/src/dirstate_tree/on_disk.rs`
19 TREE_METADATA_SIZE = 44
20
21 # * 12 bytes: format marker
17 # * 12 bytes: format marker
22 # * 32 bytes: node ID of the working directory's first parent
18 # * 32 bytes: node ID of the working directory's first parent
23 # * 32 bytes: node ID of the working directory's second parent
19 # * 32 bytes: node ID of the working directory's second parent
20 # * {TREE_METADATA_SIZE} bytes: tree metadata, parsed separately
24 # * 4 bytes: big-endian used size of the data file
21 # * 4 bytes: big-endian used size of the data file
25 # * {TREE_METADATA_SIZE} bytes: tree metadata, parsed separately
26 # * 1 byte: length of the data file's UUID
22 # * 1 byte: length of the data file's UUID
27 # * variable: data file's UUID
23 # * variable: data file's UUID
28 #
24 #
29 # Node IDs are null-padded if shorter than 32 bytes.
25 # Node IDs are null-padded if shorter than 32 bytes.
30 # A data file shorter than the specified used size is corrupted (truncated)
26 # A data file shorter than the specified used size is corrupted (truncated)
31 HEADER = struct.Struct(
27 HEADER = struct.Struct(
32 ">{}s32s32sL{}sB".format(len(V2_FORMAT_MARKER), TREE_METADATA_SIZE)
28 ">{}s32s32s{}sLB".format(len(V2_FORMAT_MARKER), v2.TREE_METADATA_SIZE)
33 )
29 )
34
30
35
31
36 class DirstateDocket(object):
32 class DirstateDocket(object):
37 data_filename_pattern = b'dirstate.%s.d'
33 data_filename_pattern = b'dirstate.%s'
38
34
39 def __init__(self, parents, data_size, tree_metadata, uuid):
35 def __init__(self, parents, data_size, tree_metadata, uuid):
40 self.parents = parents
36 self.parents = parents
@@ -51,7 +47,7 b' class DirstateDocket(object):'
51 if not data:
47 if not data:
52 parents = (nodeconstants.nullid, nodeconstants.nullid)
48 parents = (nodeconstants.nullid, nodeconstants.nullid)
53 return cls(parents, 0, b'', None)
49 return cls(parents, 0, b'', None)
54 marker, p1, p2, data_size, meta, uuid_size = HEADER.unpack_from(data)
50 marker, p1, p2, meta, data_size, uuid_size = HEADER.unpack_from(data)
55 if marker != V2_FORMAT_MARKER:
51 if marker != V2_FORMAT_MARKER:
56 raise ValueError("expected dirstate-v2 marker")
52 raise ValueError("expected dirstate-v2 marker")
57 uuid = data[HEADER.size : HEADER.size + uuid_size]
53 uuid = data[HEADER.size : HEADER.size + uuid_size]
@@ -65,8 +61,8 b' class DirstateDocket(object):'
65 V2_FORMAT_MARKER,
61 V2_FORMAT_MARKER,
66 p1,
62 p1,
67 p2,
63 p2,
64 self.tree_metadata,
68 self.data_size,
65 self.data_size,
69 self.tree_metadata,
70 len(self.uuid),
66 len(self.uuid),
71 )
67 )
72 return header + self.uuid
68 return header + self.uuid
@@ -253,7 +253,7 b' def dispatch(req):'
253 status = -1
253 status = -1
254
254
255 ret = _flushstdio(req.ui, err)
255 ret = _flushstdio(req.ui, err)
256 if ret:
256 if ret and not status:
257 status = ret
257 status = ret
258 return status
258 return status
259
259
@@ -240,7 +240,9 b' def fromlocal(s):'
240 b"decoding near '%s': %s!" % (sub, pycompat.bytestr(inst))
240 b"decoding near '%s': %s!" % (sub, pycompat.bytestr(inst))
241 )
241 )
242 except LookupError as k:
242 except LookupError as k:
243 raise error.Abort(k, hint=b"please check your locale settings")
243 raise error.Abort(
244 pycompat.bytestr(k), hint=b"please check your locale settings"
245 )
244
246
245
247
246 def unitolocal(u):
248 def unitolocal(u):
@@ -306,7 +308,9 b' def lower(s):'
306 except UnicodeError:
308 except UnicodeError:
307 return s.lower() # we don't know how to fold this except in ASCII
309 return s.lower() # we don't know how to fold this except in ASCII
308 except LookupError as k:
310 except LookupError as k:
309 raise error.Abort(k, hint=b"please check your locale settings")
311 raise error.Abort(
312 pycompat.bytestr(k), hint=b"please check your locale settings"
313 )
310
314
311
315
312 def upper(s):
316 def upper(s):
@@ -333,7 +337,9 b' def upperfallback(s):'
333 except UnicodeError:
337 except UnicodeError:
334 return s.upper() # we don't know how to fold this except in ASCII
338 return s.upper() # we don't know how to fold this except in ASCII
335 except LookupError as k:
339 except LookupError as k:
336 raise error.Abort(k, hint=b"please check your locale settings")
340 raise error.Abort(
341 pycompat.bytestr(k), hint=b"please check your locale settings"
342 )
337
343
338
344
339 if not _nativeenviron:
345 if not _nativeenviron:
@@ -31,6 +31,7 b' if pycompat.TYPE_CHECKING:'
31
31
32
32
33 def _tobytes(exc):
33 def _tobytes(exc):
34 # type: (...) -> bytes
34 """Byte-stringify exception in the same way as BaseException_str()"""
35 """Byte-stringify exception in the same way as BaseException_str()"""
35 if not exc.args:
36 if not exc.args:
36 return b''
37 return b''
@@ -47,7 +48,7 b' class Hint(object):'
47 """
48 """
48
49
49 def __init__(self, *args, **kw):
50 def __init__(self, *args, **kw):
50 self.hint = kw.pop('hint', None)
51 self.hint = kw.pop('hint', None) # type: Optional[bytes]
51 super(Hint, self).__init__(*args, **kw)
52 super(Hint, self).__init__(*args, **kw)
52
53
53
54
@@ -71,6 +72,7 b' class Error(Hint, Exception):'
71 if pycompat.ispy3:
72 if pycompat.ispy3:
72
73
73 def __str__(self):
74 def __str__(self):
75 # type: () -> str
74 # the output would be unreadable if the message was translated,
76 # the output would be unreadable if the message was translated,
75 # but do not replace it with encoding.strfromlocal(), which
77 # but do not replace it with encoding.strfromlocal(), which
76 # may raise another exception.
78 # may raise another exception.
@@ -105,6 +107,7 b' class RevlogError(StorageError):'
105
107
106 class SidedataHashError(RevlogError):
108 class SidedataHashError(RevlogError):
107 def __init__(self, key, expected, got):
109 def __init__(self, key, expected, got):
110 # type: (int, bytes, bytes) -> None
108 self.hint = None
111 self.hint = None
109 self.sidedatakey = key
112 self.sidedatakey = key
110 self.expecteddigest = expected
113 self.expecteddigest = expected
@@ -117,6 +120,7 b' class FilteredIndexError(IndexError):'
117
120
118 class LookupError(RevlogError, KeyError):
121 class LookupError(RevlogError, KeyError):
119 def __init__(self, name, index, message):
122 def __init__(self, name, index, message):
123 # type: (bytes, bytes, bytes) -> None
120 self.name = name
124 self.name = name
121 self.index = index
125 self.index = index
122 # this can't be called 'message' because at least some installs of
126 # this can't be called 'message' because at least some installs of
@@ -343,6 +347,7 b' class OutOfBandError(RemoteError):'
343 """Exception raised when a remote repo reports failure"""
347 """Exception raised when a remote repo reports failure"""
344
348
345 def __init__(self, message=None, hint=None):
349 def __init__(self, message=None, hint=None):
350 # type: (Optional[bytes], Optional[bytes]) -> None
346 from .i18n import _
351 from .i18n import _
347
352
348 if message:
353 if message:
@@ -1386,11 +1386,16 b' class pulloperation(object):'
1386 includepats=None,
1386 includepats=None,
1387 excludepats=None,
1387 excludepats=None,
1388 depth=None,
1388 depth=None,
1389 path=None,
1389 ):
1390 ):
1390 # repo we pull into
1391 # repo we pull into
1391 self.repo = repo
1392 self.repo = repo
1392 # repo we pull from
1393 # repo we pull from
1393 self.remote = remote
1394 self.remote = remote
1395 # path object used to build this remote
1396 #
1397 # Ideally, the remote peer would carry that directly.
1398 self.remote_path = path
1394 # revision we try to pull (None is "all")
1399 # revision we try to pull (None is "all")
1395 self.heads = heads
1400 self.heads = heads
1396 # bookmark pulled explicitly
1401 # bookmark pulled explicitly
@@ -1556,6 +1561,7 b' def add_confirm_callback(repo, pullop):'
1556 def pull(
1561 def pull(
1557 repo,
1562 repo,
1558 remote,
1563 remote,
1564 path=None,
1559 heads=None,
1565 heads=None,
1560 force=False,
1566 force=False,
1561 bookmarks=(),
1567 bookmarks=(),
@@ -1611,8 +1617,9 b' def pull('
1611 pullop = pulloperation(
1617 pullop = pulloperation(
1612 repo,
1618 repo,
1613 remote,
1619 remote,
1614 heads,
1620 path=path,
1615 force,
1621 heads=heads,
1622 force=force,
1616 bookmarks=bookmarks,
1623 bookmarks=bookmarks,
1617 streamclonerequested=streamclonerequested,
1624 streamclonerequested=streamclonerequested,
1618 includepats=includepats,
1625 includepats=includepats,
@@ -2021,6 +2028,9 b' def _pullbookmarks(pullop):'
2021 pullop.stepsdone.add(b'bookmarks')
2028 pullop.stepsdone.add(b'bookmarks')
2022 repo = pullop.repo
2029 repo = pullop.repo
2023 remotebookmarks = pullop.remotebookmarks
2030 remotebookmarks = pullop.remotebookmarks
2031 bookmarks_mode = None
2032 if pullop.remote_path is not None:
2033 bookmarks_mode = pullop.remote_path.bookmarks_mode
2024 bookmod.updatefromremote(
2034 bookmod.updatefromremote(
2025 repo.ui,
2035 repo.ui,
2026 repo,
2036 repo,
@@ -2028,6 +2038,7 b' def _pullbookmarks(pullop):'
2028 pullop.remote.url(),
2038 pullop.remote.url(),
2029 pullop.gettransaction,
2039 pullop.gettransaction,
2030 explicit=pullop.explicitbookmarks,
2040 explicit=pullop.explicitbookmarks,
2041 mode=bookmarks_mode,
2031 )
2042 )
2032
2043
2033
2044
@@ -224,8 +224,12 b' def load(ui, name, path, loadingtime=Non'
224 minver = getattr(mod, 'minimumhgversion', None)
224 minver = getattr(mod, 'minimumhgversion', None)
225 if minver:
225 if minver:
226 curver = util.versiontuple(n=2)
226 curver = util.versiontuple(n=2)
227 extmin = util.versiontuple(stringutil.forcebytestr(minver), 2)
227
228
228 if None in curver or util.versiontuple(minver, 2) > curver:
229 if None in extmin:
230 extmin = (extmin[0] or 0, extmin[1] or 0)
231
232 if None in curver or extmin > curver:
229 msg = _(
233 msg = _(
230 b'(third party extension %s requires version %s or newer '
234 b'(third party extension %s requires version %s or newer '
231 b'of Mercurial (current: %s); disabling)\n'
235 b'of Mercurial (current: %s); disabling)\n'
@@ -365,6 +365,11 b' internalstable = sorted('
365 loaddoc(b'config', subdir=b'internals'),
365 loaddoc(b'config', subdir=b'internals'),
366 ),
366 ),
367 (
367 (
368 [b'dirstate-v2'],
369 _(b'dirstate-v2 file format'),
370 loaddoc(b'dirstate-v2', subdir=b'internals'),
371 ),
372 (
368 [b'extensions', b'extension'],
373 [b'extensions', b'extension'],
369 _(b'Extension API'),
374 _(b'Extension API'),
370 loaddoc(b'extensions', subdir=b'internals'),
375 loaddoc(b'extensions', subdir=b'internals'),
@@ -1748,6 +1748,18 b' The following sub-options can be defined'
1748 Revsets specifying bookmarks will not result in the bookmark being
1748 Revsets specifying bookmarks will not result in the bookmark being
1749 pushed.
1749 pushed.
1750
1750
1751 ``bookmarks.mode``
1752 How bookmark will be dealt during the exchange. It support the following value
1753
1754 - ``default``: the default behavior, local and remote bookmarks are "merged"
1755 on push/pull.
1756
1757 - ``mirror``: when pulling, replace local bookmarks by remote bookmarks. This
1758 is useful to replicate a repository, or as an optimization.
1759
1760 - ``ignore``: ignore bookmarks during exchange.
1761 (This currently only affect pulling)
1762
1751 The following special named paths exist:
1763 The following special named paths exist:
1752
1764
1753 ``default``
1765 ``default``
@@ -942,7 +942,7 b' def clone('
942 exchange.pull(
942 exchange.pull(
943 local,
943 local,
944 srcpeer,
944 srcpeer,
945 revs,
945 heads=revs,
946 streamclonerequested=stream,
946 streamclonerequested=stream,
947 includepats=storeincludepats,
947 includepats=storeincludepats,
948 excludepats=storeexcludepats,
948 excludepats=storeexcludepats,
@@ -1261,13 +1261,14 b' def _incoming('
1261 (remoterepo, incomingchangesetlist, displayer) parameters,
1261 (remoterepo, incomingchangesetlist, displayer) parameters,
1262 and is supposed to contain only code that can't be unified.
1262 and is supposed to contain only code that can't be unified.
1263 """
1263 """
1264 srcs = urlutil.get_pull_paths(repo, ui, [source], opts.get(b'branch'))
1264 srcs = urlutil.get_pull_paths(repo, ui, [source])
1265 srcs = list(srcs)
1265 srcs = list(srcs)
1266 if len(srcs) != 1:
1266 if len(srcs) != 1:
1267 msg = _(b'for now, incoming supports only a single source, %d provided')
1267 msg = _(b'for now, incoming supports only a single source, %d provided')
1268 msg %= len(srcs)
1268 msg %= len(srcs)
1269 raise error.Abort(msg)
1269 raise error.Abort(msg)
1270 source, branches = srcs[0]
1270 path = srcs[0]
1271 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
1271 if subpath is not None:
1272 if subpath is not None:
1272 subpath = urlutil.url(subpath)
1273 subpath = urlutil.url(subpath)
1273 if subpath.isabs():
1274 if subpath.isabs():
@@ -1285,7 +1286,7 b' def _incoming('
1285 if revs:
1286 if revs:
1286 revs = [other.lookup(rev) for rev in revs]
1287 revs = [other.lookup(rev) for rev in revs]
1287 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1288 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1288 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1289 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1289 )
1290 )
1290
1291
1291 if not chlist:
1292 if not chlist:
@@ -1352,7 +1353,7 b' def _outgoing(ui, repo, dests, opts, sub'
1352 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1353 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1353 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1354 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1354 if revs:
1355 if revs:
1355 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1356 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1356
1357
1357 other = peer(repo, opts, dest)
1358 other = peer(repo, opts, dest)
1358 try:
1359 try:
@@ -285,6 +285,7 b' class hgwebdir(object):'
285 self.lastrefresh = 0
285 self.lastrefresh = 0
286 self.motd = None
286 self.motd = None
287 self.refresh()
287 self.refresh()
288 self.requests_count = 0
288 if not baseui:
289 if not baseui:
289 # set up environment for new ui
290 # set up environment for new ui
290 extensions.loadall(self.ui)
291 extensions.loadall(self.ui)
@@ -341,6 +342,10 b' class hgwebdir(object):'
341
342
342 self.repos = repos
343 self.repos = repos
343 self.ui = u
344 self.ui = u
345 self.gc_full_collect_rate = self.ui.configint(
346 b'experimental', b'web.full-garbage-collection-rate'
347 )
348 self.gc_full_collections_done = 0
344 encoding.encoding = self.ui.config(b'web', b'encoding')
349 encoding.encoding = self.ui.config(b'web', b'encoding')
345 self.style = self.ui.config(b'web', b'style')
350 self.style = self.ui.config(b'web', b'style')
346 self.templatepath = self.ui.config(
351 self.templatepath = self.ui.config(
@@ -383,12 +388,27 b' class hgwebdir(object):'
383 finally:
388 finally:
384 # There are known cycles in localrepository that prevent
389 # There are known cycles in localrepository that prevent
385 # those objects (and tons of held references) from being
390 # those objects (and tons of held references) from being
386 # collected through normal refcounting. We mitigate those
391 # collected through normal refcounting.
387 # leaks by performing an explicit GC on every request.
392 # In some cases, the resulting memory consumption can
388 # TODO remove this once leaks are fixed.
393 # be tamed by performing explicit garbage collections.
389 # TODO only run this on requests that create localrepository
394 # In presence of actual leaks or big long-lived caches, the
390 # instances instead of every request.
395 # impact on performance of such collections can become a
391 gc.collect()
396 # problem, hence the rate shouldn't be set too low.
397 # See "Collecting the oldest generation" in
398 # https://devguide.python.org/garbage_collector
399 # for more about such trade-offs.
400 rate = self.gc_full_collect_rate
401
402 # this is not thread safe, but the consequence (skipping
403 # a garbage collection) is arguably better than risking
404 # to have several threads perform a collection in parallel
405 # (long useless wait on all threads).
406 self.requests_count += 1
407 if rate > 0 and self.requests_count % rate == 0:
408 gc.collect()
409 self.gc_full_collections_done += 1
410 else:
411 gc.collect(generation=1)
392
412
393 def _runwsgi(self, req, res):
413 def _runwsgi(self, req, res):
394 try:
414 try:
@@ -132,36 +132,6 b' class idirstate(interfaceutil.Interface)'
132 def copies():
132 def copies():
133 pass
133 pass
134
134
135 def normal(f, parentfiledata=None):
136 """Mark a file normal and clean.
137
138 parentfiledata: (mode, size, mtime) of the clean file
139
140 parentfiledata should be computed from memory (for mode,
141 size), as or close as possible from the point where we
142 determined the file was clean, to limit the risk of the
143 file having been changed by an external process between the
144 moment where the file was determined to be clean and now."""
145 pass
146
147 def normallookup(f):
148 '''Mark a file normal, but possibly dirty.'''
149
150 def otherparent(f):
151 '''Mark as coming from the other parent, always dirty.'''
152
153 def add(f):
154 '''Mark a file added.'''
155
156 def remove(f):
157 '''Mark a file removed.'''
158
159 def merge(f):
160 '''Mark a file merged.'''
161
162 def drop(f):
163 '''Drop a file from the dirstate'''
164
165 def normalize(path, isknown=False, ignoremissing=False):
135 def normalize(path, isknown=False, ignoremissing=False):
166 """
136 """
167 normalize the case of a pathname when on a casefolding filesystem
137 normalize the case of a pathname when on a casefolding filesystem
@@ -917,9 +917,6 b' def gathersupportedrequirements(ui):'
917 # Start with all requirements supported by this file.
917 # Start with all requirements supported by this file.
918 supported = set(localrepository._basesupported)
918 supported = set(localrepository._basesupported)
919
919
920 if dirstate.SUPPORTS_DIRSTATE_V2:
921 supported.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
922
923 # Execute ``featuresetupfuncs`` entries if they belong to an extension
920 # Execute ``featuresetupfuncs`` entries if they belong to an extension
924 # relevant to this ui instance.
921 # relevant to this ui instance.
925 modules = {m.__name__ for n, m in extensions.extensions(ui)}
922 modules = {m.__name__ for n, m in extensions.extensions(ui)}
@@ -1177,6 +1174,32 b' def resolverevlogstorevfsoptions(ui, req'
1177 if slow_path == b'abort':
1174 if slow_path == b'abort':
1178 raise error.Abort(msg, hint=hint)
1175 raise error.Abort(msg, hint=hint)
1179 options[b'persistent-nodemap'] = True
1176 options[b'persistent-nodemap'] = True
1177 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1178 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1179 if slow_path not in (b'allow', b'warn', b'abort'):
1180 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1181 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1182 ui.warn(msg % slow_path)
1183 if not ui.quiet:
1184 ui.warn(_(b'falling back to default value: %s\n') % default)
1185 slow_path = default
1186
1187 msg = _(
1188 b"accessing `dirstate-v2` repository without associated "
1189 b"fast implementation."
1190 )
1191 hint = _(
1192 b"check `hg help config.format.exp-rc-dirstate-v2` " b"for details"
1193 )
1194 if not dirstate.HAS_FAST_DIRSTATE_V2:
1195 if slow_path == b'warn':
1196 msg = b"warning: " + msg + b'\n'
1197 ui.warn(msg)
1198 if not ui.quiet:
1199 hint = b'(' + hint + b')\n'
1200 ui.warn(hint)
1201 if slow_path == b'abort':
1202 raise error.Abort(msg, hint=hint)
1180 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1203 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1181 options[b'persistent-nodemap.mmap'] = True
1204 options[b'persistent-nodemap.mmap'] = True
1182 if ui.configbool(b'devel', b'persistent-nodemap'):
1205 if ui.configbool(b'devel', b'persistent-nodemap'):
@@ -1266,6 +1289,7 b' class localrepository(object):'
1266 requirementsmod.NODEMAP_REQUIREMENT,
1289 requirementsmod.NODEMAP_REQUIREMENT,
1267 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1290 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1268 requirementsmod.SHARESAFE_REQUIREMENT,
1291 requirementsmod.SHARESAFE_REQUIREMENT,
1292 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1269 }
1293 }
1270 _basesupported = supportedformats | {
1294 _basesupported = supportedformats | {
1271 requirementsmod.STORE_REQUIREMENT,
1295 requirementsmod.STORE_REQUIREMENT,
@@ -3606,18 +3630,10 b' def newreporequirements(ui, createopts):'
3606 if ui.configbool(b'format', b'sparse-revlog'):
3630 if ui.configbool(b'format', b'sparse-revlog'):
3607 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3631 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3608
3632
3609 # experimental config: format.exp-dirstate-v2
3633 # experimental config: format.exp-rc-dirstate-v2
3610 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3634 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3611 if ui.configbool(b'format', b'exp-dirstate-v2'):
3635 if ui.configbool(b'format', b'exp-rc-dirstate-v2'):
3612 if dirstate.SUPPORTS_DIRSTATE_V2:
3636 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3613 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3614 else:
3615 raise error.Abort(
3616 _(
3617 b"dirstate v2 format requested by config "
3618 b"but not supported (requires Rust extensions)"
3619 )
3620 )
3621
3637
3622 # experimental config: format.exp-use-copies-side-data-changeset
3638 # experimental config: format.exp-use-copies-side-data-changeset
3623 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3639 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
@@ -46,13 +46,12 b' if pycompat.TYPE_CHECKING:'
46 Any,
46 Any,
47 Callable,
47 Callable,
48 Dict,
48 Dict,
49 List,
50 Optional,
49 Optional,
51 Sequence,
50 Sequence,
52 Tuple,
51 Tuple,
53 )
52 )
54
53
55 for t in (Any, Callable, Dict, List, Optional, Tuple):
54 for t in (Any, Callable, Dict, Optional, Tuple):
56 assert t
55 assert t
57
56
58
57
@@ -714,43 +713,43 b' class walkopts(object):'
714 """
713 """
715
714
716 # raw command-line parameters, which a matcher will be built from
715 # raw command-line parameters, which a matcher will be built from
717 pats = attr.ib() # type: List[bytes]
716 pats = attr.ib()
718 opts = attr.ib() # type: Dict[bytes, Any]
717 opts = attr.ib()
719
718
720 # a list of revset expressions to be traversed; if follow, it specifies
719 # a list of revset expressions to be traversed; if follow, it specifies
721 # the start revisions
720 # the start revisions
722 revspec = attr.ib() # type: List[bytes]
721 revspec = attr.ib()
723
722
724 # miscellaneous queries to filter revisions (see "hg help log" for details)
723 # miscellaneous queries to filter revisions (see "hg help log" for details)
725 bookmarks = attr.ib(default=attr.Factory(list)) # type: List[bytes]
724 bookmarks = attr.ib(default=attr.Factory(list))
726 branches = attr.ib(default=attr.Factory(list)) # type: List[bytes]
725 branches = attr.ib(default=attr.Factory(list))
727 date = attr.ib(default=None) # type: Optional[bytes]
726 date = attr.ib(default=None)
728 keywords = attr.ib(default=attr.Factory(list)) # type: List[bytes]
727 keywords = attr.ib(default=attr.Factory(list))
729 no_merges = attr.ib(default=False) # type: bool
728 no_merges = attr.ib(default=False)
730 only_merges = attr.ib(default=False) # type: bool
729 only_merges = attr.ib(default=False)
731 prune_ancestors = attr.ib(default=attr.Factory(list)) # type: List[bytes]
730 prune_ancestors = attr.ib(default=attr.Factory(list))
732 users = attr.ib(default=attr.Factory(list)) # type: List[bytes]
731 users = attr.ib(default=attr.Factory(list))
733
732
734 # miscellaneous matcher arguments
733 # miscellaneous matcher arguments
735 include_pats = attr.ib(default=attr.Factory(list)) # type: List[bytes]
734 include_pats = attr.ib(default=attr.Factory(list))
736 exclude_pats = attr.ib(default=attr.Factory(list)) # type: List[bytes]
735 exclude_pats = attr.ib(default=attr.Factory(list))
737
736
738 # 0: no follow, 1: follow first, 2: follow both parents
737 # 0: no follow, 1: follow first, 2: follow both parents
739 follow = attr.ib(default=0) # type: int
738 follow = attr.ib(default=0)
740
739
741 # do not attempt filelog-based traversal, which may be fast but cannot
740 # do not attempt filelog-based traversal, which may be fast but cannot
742 # include revisions where files were removed
741 # include revisions where files were removed
743 force_changelog_traversal = attr.ib(default=False) # type: bool
742 force_changelog_traversal = attr.ib(default=False)
744
743
745 # filter revisions by file patterns, which should be disabled only if
744 # filter revisions by file patterns, which should be disabled only if
746 # you want to include revisions where files were unmodified
745 # you want to include revisions where files were unmodified
747 filter_revisions_by_pats = attr.ib(default=True) # type: bool
746 filter_revisions_by_pats = attr.ib(default=True)
748
747
749 # sort revisions prior to traversal: 'desc', 'topo', or None
748 # sort revisions prior to traversal: 'desc', 'topo', or None
750 sort_revisions = attr.ib(default=None) # type: Optional[bytes]
749 sort_revisions = attr.ib(default=None)
751
750
752 # limit number of changes displayed; None means unlimited
751 # limit number of changes displayed; None means unlimited
753 limit = attr.ib(default=None) # type: Optional[int]
752 limit = attr.ib(default=None)
754
753
755
754
756 def parseopts(ui, pats, opts):
755 def parseopts(ui, pats, opts):
@@ -913,6 +912,42 b' def _makenofollowfilematcher(repo, pats,'
913 return None
912 return None
914
913
915
914
915 def revsingle(repo, revspec, default=b'.', localalias=None):
916 """Resolves user-provided revset(s) into a single revision.
917
918 This just wraps the lower-level scmutil.revsingle() in order to raise an
919 exception indicating user error.
920 """
921 try:
922 return scmutil.revsingle(repo, revspec, default, localalias)
923 except error.RepoLookupError as e:
924 raise error.InputError(e.args[0], hint=e.hint)
925
926
927 def revpair(repo, revs):
928 """Resolves user-provided revset(s) into two revisions.
929
930 This just wraps the lower-level scmutil.revpair() in order to raise an
931 exception indicating user error.
932 """
933 try:
934 return scmutil.revpair(repo, revs)
935 except error.RepoLookupError as e:
936 raise error.InputError(e.args[0], hint=e.hint)
937
938
939 def revrange(repo, specs, localalias=None):
940 """Resolves user-provided revset(s).
941
942 This just wraps the lower-level scmutil.revrange() in order to raise an
943 exception indicating user error.
944 """
945 try:
946 return scmutil.revrange(repo, specs, localalias)
947 except error.RepoLookupError as e:
948 raise error.InputError(e.args[0], hint=e.hint)
949
950
916 _opt2logrevset = {
951 _opt2logrevset = {
917 b'no_merges': (b'not merge()', None),
952 b'no_merges': (b'not merge()', None),
918 b'only_merges': (b'merge()', None),
953 b'only_merges': (b'merge()', None),
@@ -988,7 +1023,7 b' def _makerevset(repo, wopts, slowpath):'
988 def _initialrevs(repo, wopts):
1023 def _initialrevs(repo, wopts):
989 """Return the initial set of revisions to be filtered or followed"""
1024 """Return the initial set of revisions to be filtered or followed"""
990 if wopts.revspec:
1025 if wopts.revspec:
991 revs = scmutil.revrange(repo, wopts.revspec)
1026 revs = revrange(repo, wopts.revspec)
992 elif wopts.follow and repo.dirstate.p1() == repo.nullid:
1027 elif wopts.follow and repo.dirstate.p1() == repo.nullid:
993 revs = smartset.baseset()
1028 revs = smartset.baseset()
994 elif wopts.follow:
1029 elif wopts.follow:
@@ -9,13 +9,13 b' from __future__ import absolute_import'
9
9
10 import collections
10 import collections
11 import errno
11 import errno
12 import stat
13 import struct
12 import struct
14
13
15 from .i18n import _
14 from .i18n import _
16 from .node import nullrev
15 from .node import nullrev
17 from .thirdparty import attr
16 from .thirdparty import attr
18 from .utils import stringutil
17 from .utils import stringutil
18 from .dirstateutils import timestamp
19 from . import (
19 from . import (
20 copies,
20 copies,
21 encoding,
21 encoding,
@@ -1406,8 +1406,9 b' def batchget(repo, mctx, wctx, wantfiled'
1406 if wantfiledata:
1406 if wantfiledata:
1407 s = wfctx.lstat()
1407 s = wfctx.lstat()
1408 mode = s.st_mode
1408 mode = s.st_mode
1409 mtime = s[stat.ST_MTIME]
1409 mtime = timestamp.mtime_of(s)
1410 filedata[f] = (mode, size, mtime) # for dirstate.normal
1410 # for dirstate.update_file's parentfiledata argument:
1411 filedata[f] = (mode, size, mtime)
1411 if i == 100:
1412 if i == 100:
1412 yield False, (i, f)
1413 yield False, (i, f)
1413 i = 0
1414 i = 0
@@ -796,12 +796,13 b' def recordupdates(repo, actions, branchm'
796 for f, args, msg in actions.get(ACTION_GET, []):
796 for f, args, msg in actions.get(ACTION_GET, []):
797 if branchmerge:
797 if branchmerge:
798 # tracked in p1 can be True also but update_file should not care
798 # tracked in p1 can be True also but update_file should not care
799 old_entry = repo.dirstate.get_entry(f)
800 p1_tracked = old_entry.any_tracked and not old_entry.added
799 repo.dirstate.update_file(
801 repo.dirstate.update_file(
800 f,
802 f,
801 p1_tracked=False,
803 p1_tracked=p1_tracked,
802 p2_tracked=True,
803 wc_tracked=True,
804 wc_tracked=True,
804 clean_p2=True,
805 p2_info=True,
805 )
806 )
806 else:
807 else:
807 parentfiledata = getfiledata[f] if getfiledata else None
808 parentfiledata = getfiledata[f] if getfiledata else None
@@ -818,8 +819,12 b' def recordupdates(repo, actions, branchm'
818 if branchmerge:
819 if branchmerge:
819 # We've done a branch merge, mark this file as merged
820 # We've done a branch merge, mark this file as merged
820 # so that we properly record the merger later
821 # so that we properly record the merger later
822 p1_tracked = f1 == f
821 repo.dirstate.update_file(
823 repo.dirstate.update_file(
822 f, p1_tracked=True, wc_tracked=True, merged=True
824 f,
825 p1_tracked=p1_tracked,
826 wc_tracked=True,
827 p2_info=True,
823 )
828 )
824 if f1 != f2: # copy/rename
829 if f1 != f2: # copy/rename
825 if move:
830 if move:
@@ -1,5 +1,5 b''
1 #ifndef _HG_MPATCH_H_
1 #ifndef HG_MPATCH_H
2 #define _HG_MPATCH_H_
2 #define HG_MPATCH_H
3
3
4 #define MPATCH_ERR_NO_MEM -3
4 #define MPATCH_ERR_NO_MEM -3
5 #define MPATCH_ERR_CANNOT_BE_DECODED -2
5 #define MPATCH_ERR_CANNOT_BE_DECODED -2
@@ -299,7 +299,7 b' def checkworkingcopynarrowspec(repo):'
299 storespec = repo.svfs.tryread(FILENAME)
299 storespec = repo.svfs.tryread(FILENAME)
300 wcspec = repo.vfs.tryread(DIRSTATE_FILENAME)
300 wcspec = repo.vfs.tryread(DIRSTATE_FILENAME)
301 if wcspec != storespec:
301 if wcspec != storespec:
302 raise error.Abort(
302 raise error.StateError(
303 _(b"working copy's narrowspec is stale"),
303 _(b"working copy's narrowspec is stale"),
304 hint=_(b"run 'hg tracked --update-working-copy'"),
304 hint=_(b"run 'hg tracked --update-working-copy'"),
305 )
305 )
@@ -21,7 +21,6 b' from __future__ import absolute_import, '
21 from .i18n import _
21 from .i18n import _
22 from . import (
22 from . import (
23 error,
23 error,
24 pycompat,
25 util,
24 util,
26 )
25 )
27 from .utils import stringutil
26 from .utils import stringutil
@@ -216,7 +215,11 b' def unescapestr(s):'
216 return stringutil.unescapestr(s)
215 return stringutil.unescapestr(s)
217 except ValueError as e:
216 except ValueError as e:
218 # mangle Python's exception into our format
217 # mangle Python's exception into our format
219 raise error.ParseError(pycompat.bytestr(e).lower())
218 # TODO: remove this suppression. For some reason, pytype 2021.09.09
219 # thinks .lower() is being called on Union[ValueError, bytes].
220 # pytype: disable=attribute-error
221 raise error.ParseError(stringutil.forcebytestr(e).lower())
222 # pytype: enable=attribute-error
220
223
221
224
222 def _prettyformat(tree, leafnodes, level, lines):
225 def _prettyformat(tree, leafnodes, level, lines):
@@ -550,7 +550,9 b' class workingbackend(fsbackend):'
550 self.copied = []
550 self.copied = []
551
551
552 def _checkknown(self, fname):
552 def _checkknown(self, fname):
553 if self.repo.dirstate[fname] == b'?' and self.exists(fname):
553 if not self.repo.dirstate.get_entry(fname).any_tracked and self.exists(
554 fname
555 ):
554 raise PatchError(_(b'cannot patch %s: file is not tracked') % fname)
556 raise PatchError(_(b'cannot patch %s: file is not tracked') % fname)
555
557
556 def setfile(self, fname, data, mode, copysource):
558 def setfile(self, fname, data, mode, copysource):
@@ -315,20 +315,19 b' def finddirs(path):'
315 class dirs(object):
315 class dirs(object):
316 '''a multiset of directory names from a set of file paths'''
316 '''a multiset of directory names from a set of file paths'''
317
317
318 def __init__(self, map, skip=None):
318 def __init__(self, map, only_tracked=False):
319 """
319 """
320 a dict map indicates a dirstate while a list indicates a manifest
320 a dict map indicates a dirstate while a list indicates a manifest
321 """
321 """
322 self._dirs = {}
322 self._dirs = {}
323 addpath = self.addpath
323 addpath = self.addpath
324 if isinstance(map, dict) and skip is not None:
324 if isinstance(map, dict) and only_tracked:
325 for f, s in pycompat.iteritems(map):
325 for f, s in pycompat.iteritems(map):
326 if s.state != skip:
326 if s.state != b'r':
327 addpath(f)
327 addpath(f)
328 elif skip is not None:
328 elif only_tracked:
329 raise error.ProgrammingError(
329 msg = b"`only_tracked` is only supported with a dict source"
330 b"skip character is only supported with a dict source"
330 raise error.ProgrammingError(msg)
331 )
332 else:
331 else:
333 for f in map:
332 for f in map:
334 addpath(f)
333 addpath(f)
This diff has been collapsed as it changes many lines, (502 lines changed) Show them Hide them
@@ -7,6 +7,7 b''
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import stat
10 import struct
11 import struct
11 import zlib
12 import zlib
12
13
@@ -43,29 +44,143 b' NONNORMAL = -1'
43 # a special value used internally for `time` if the time is ambigeous
44 # a special value used internally for `time` if the time is ambigeous
44 AMBIGUOUS_TIME = -1
45 AMBIGUOUS_TIME = -1
45
46
47 # Bits of the `flags` byte inside a node in the file format
48 DIRSTATE_V2_WDIR_TRACKED = 1 << 0
49 DIRSTATE_V2_P1_TRACKED = 1 << 1
50 DIRSTATE_V2_P2_INFO = 1 << 2
51 DIRSTATE_V2_MODE_EXEC_PERM = 1 << 3
52 DIRSTATE_V2_MODE_IS_SYMLINK = 1 << 4
53 DIRSTATE_V2_HAS_FALLBACK_EXEC = 1 << 5
54 DIRSTATE_V2_FALLBACK_EXEC = 1 << 6
55 DIRSTATE_V2_HAS_FALLBACK_SYMLINK = 1 << 7
56 DIRSTATE_V2_FALLBACK_SYMLINK = 1 << 8
57 DIRSTATE_V2_EXPECTED_STATE_IS_MODIFIED = 1 << 9
58 DIRSTATE_V2_HAS_MODE_AND_SIZE = 1 << 10
59 DIRSTATE_V2_HAS_MTIME = 1 << 11
60 DIRSTATE_V2_MTIME_SECOND_AMBIGUOUS = 1 << 12
61 DIRSTATE_V2_DIRECTORY = 1 << 13
62 DIRSTATE_V2_ALL_UNKNOWN_RECORDED = 1 << 14
63 DIRSTATE_V2_ALL_IGNORED_RECORDED = 1 << 15
64
46
65
47 @attr.s(slots=True, init=False)
66 @attr.s(slots=True, init=False)
48 class DirstateItem(object):
67 class DirstateItem(object):
49 """represent a dirstate entry
68 """represent a dirstate entry
50
69
51 It contains:
70 It hold multiple attributes
71
72 # about file tracking
73 - wc_tracked: is the file tracked by the working copy
74 - p1_tracked: is the file tracked in working copy first parent
75 - p2_info: the file has been involved in some merge operation. Either
76 because it was actually merged, or because the p2 version was
77 ahead, or because some rename moved it there. In either case
78 `hg status` will want it displayed as modified.
52
79
53 - state (one of 'n', 'a', 'r', 'm')
80 # about the file state expected from p1 manifest:
54 - mode,
81 - mode: the file mode in p1
55 - size,
82 - size: the file size in p1
56 - mtime,
83
84 These value can be set to None, which mean we don't have a meaningful value
85 to compare with. Either because we don't really care about them as there
86 `status` is known without having to look at the disk or because we don't
87 know these right now and a full comparison will be needed to find out if
88 the file is clean.
89
90 # about the file state on disk last time we saw it:
91 - mtime: the last known clean mtime for the file.
92
93 This value can be set to None if no cachable state exist. Either because we
94 do not care (see previous section) or because we could not cache something
95 yet.
57 """
96 """
58
97
59 _state = attr.ib()
98 _wc_tracked = attr.ib()
99 _p1_tracked = attr.ib()
100 _p2_info = attr.ib()
60 _mode = attr.ib()
101 _mode = attr.ib()
61 _size = attr.ib()
102 _size = attr.ib()
62 _mtime = attr.ib()
103 _mtime_s = attr.ib()
104 _mtime_ns = attr.ib()
105 _fallback_exec = attr.ib()
106 _fallback_symlink = attr.ib()
107
108 def __init__(
109 self,
110 wc_tracked=False,
111 p1_tracked=False,
112 p2_info=False,
113 has_meaningful_data=True,
114 has_meaningful_mtime=True,
115 parentfiledata=None,
116 fallback_exec=None,
117 fallback_symlink=None,
118 ):
119 self._wc_tracked = wc_tracked
120 self._p1_tracked = p1_tracked
121 self._p2_info = p2_info
122
123 self._fallback_exec = fallback_exec
124 self._fallback_symlink = fallback_symlink
125
126 self._mode = None
127 self._size = None
128 self._mtime_s = None
129 self._mtime_ns = None
130 if parentfiledata is None:
131 has_meaningful_mtime = False
132 has_meaningful_data = False
133 if has_meaningful_data:
134 self._mode = parentfiledata[0]
135 self._size = parentfiledata[1]
136 if has_meaningful_mtime:
137 self._mtime_s, self._mtime_ns = parentfiledata[2]
63
138
64 def __init__(self, state, mode, size, mtime):
139 @classmethod
65 self._state = state
140 def from_v2_data(cls, flags, size, mtime_s, mtime_ns):
66 self._mode = mode
141 """Build a new DirstateItem object from V2 data"""
67 self._size = size
142 has_mode_size = bool(flags & DIRSTATE_V2_HAS_MODE_AND_SIZE)
68 self._mtime = mtime
143 has_meaningful_mtime = bool(flags & DIRSTATE_V2_HAS_MTIME)
144 if flags & DIRSTATE_V2_MTIME_SECOND_AMBIGUOUS:
145 # The current code is not able to do the more subtle comparison that the
146 # MTIME_SECOND_AMBIGUOUS requires. So we ignore the mtime
147 has_meaningful_mtime = False
148 mode = None
149
150 if flags & +DIRSTATE_V2_EXPECTED_STATE_IS_MODIFIED:
151 # we do not have support for this flag in the code yet,
152 # force a lookup for this file.
153 has_mode_size = False
154 has_meaningful_mtime = False
155
156 fallback_exec = None
157 if flags & DIRSTATE_V2_HAS_FALLBACK_EXEC:
158 fallback_exec = flags & DIRSTATE_V2_FALLBACK_EXEC
159
160 fallback_symlink = None
161 if flags & DIRSTATE_V2_HAS_FALLBACK_SYMLINK:
162 fallback_symlink = flags & DIRSTATE_V2_FALLBACK_SYMLINK
163
164 if has_mode_size:
165 assert stat.S_IXUSR == 0o100
166 if flags & DIRSTATE_V2_MODE_EXEC_PERM:
167 mode = 0o755
168 else:
169 mode = 0o644
170 if flags & DIRSTATE_V2_MODE_IS_SYMLINK:
171 mode |= stat.S_IFLNK
172 else:
173 mode |= stat.S_IFREG
174 return cls(
175 wc_tracked=bool(flags & DIRSTATE_V2_WDIR_TRACKED),
176 p1_tracked=bool(flags & DIRSTATE_V2_P1_TRACKED),
177 p2_info=bool(flags & DIRSTATE_V2_P2_INFO),
178 has_meaningful_data=has_mode_size,
179 has_meaningful_mtime=has_meaningful_mtime,
180 parentfiledata=(mode, size, (mtime_s, mtime_ns)),
181 fallback_exec=fallback_exec,
182 fallback_symlink=fallback_symlink,
183 )
69
184
70 @classmethod
185 @classmethod
71 def from_v1_data(cls, state, mode, size, mtime):
186 def from_v1_data(cls, state, mode, size, mtime):
@@ -74,12 +189,41 b' class DirstateItem(object):'
74 Since the dirstate-v1 format is frozen, the signature of this function
189 Since the dirstate-v1 format is frozen, the signature of this function
75 is not expected to change, unlike the __init__ one.
190 is not expected to change, unlike the __init__ one.
76 """
191 """
77 return cls(
192 if state == b'm':
78 state=state,
193 return cls(wc_tracked=True, p1_tracked=True, p2_info=True)
79 mode=mode,
194 elif state == b'a':
80 size=size,
195 return cls(wc_tracked=True)
81 mtime=mtime,
196 elif state == b'r':
82 )
197 if size == NONNORMAL:
198 p1_tracked = True
199 p2_info = True
200 elif size == FROM_P2:
201 p1_tracked = False
202 p2_info = True
203 else:
204 p1_tracked = True
205 p2_info = False
206 return cls(p1_tracked=p1_tracked, p2_info=p2_info)
207 elif state == b'n':
208 if size == FROM_P2:
209 return cls(wc_tracked=True, p2_info=True)
210 elif size == NONNORMAL:
211 return cls(wc_tracked=True, p1_tracked=True)
212 elif mtime == AMBIGUOUS_TIME:
213 return cls(
214 wc_tracked=True,
215 p1_tracked=True,
216 has_meaningful_mtime=False,
217 parentfiledata=(mode, size, (42, 0)),
218 )
219 else:
220 return cls(
221 wc_tracked=True,
222 p1_tracked=True,
223 parentfiledata=(mode, size, (mtime, 0)),
224 )
225 else:
226 raise RuntimeError(b'unknown state: %s' % state)
83
227
84 def set_possibly_dirty(self):
228 def set_possibly_dirty(self):
85 """Mark a file as "possibly dirty"
229 """Mark a file as "possibly dirty"
@@ -87,39 +231,80 b' class DirstateItem(object):'
87 This means the next status call will have to actually check its content
231 This means the next status call will have to actually check its content
88 to make sure it is correct.
232 to make sure it is correct.
89 """
233 """
90 self._mtime = AMBIGUOUS_TIME
234 self._mtime_s = None
235 self._mtime_ns = None
236
237 def set_clean(self, mode, size, mtime):
238 """mark a file as "clean" cancelling potential "possibly dirty call"
239
240 Note: this function is a descendant of `dirstate.normal` and is
241 currently expected to be call on "normal" entry only. There are not
242 reason for this to not change in the future as long as the ccode is
243 updated to preserve the proper state of the non-normal files.
244 """
245 self._wc_tracked = True
246 self._p1_tracked = True
247 self._mode = mode
248 self._size = size
249 self._mtime_s, self._mtime_ns = mtime
250
251 def set_tracked(self):
252 """mark a file as tracked in the working copy
91
253
92 def __getitem__(self, idx):
254 This will ultimately be called by command like `hg add`.
93 if idx == 0 or idx == -4:
255 """
94 msg = b"do not use item[x], use item.state"
256 self._wc_tracked = True
95 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
257 # `set_tracked` is replacing various `normallookup` call. So we mark
96 return self._state
258 # the files as needing lookup
97 elif idx == 1 or idx == -3:
259 #
98 msg = b"do not use item[x], use item.mode"
260 # Consider dropping this in the future in favor of something less broad.
99 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
261 self._mtime_s = None
100 return self._mode
262 self._mtime_ns = None
101 elif idx == 2 or idx == -2:
263
102 msg = b"do not use item[x], use item.size"
264 def set_untracked(self):
103 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
265 """mark a file as untracked in the working copy
104 return self._size
266
105 elif idx == 3 or idx == -1:
267 This will ultimately be called by command like `hg remove`.
106 msg = b"do not use item[x], use item.mtime"
268 """
107 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
269 self._wc_tracked = False
108 return self._mtime
270 self._mode = None
109 else:
271 self._size = None
110 raise IndexError(idx)
272 self._mtime_s = None
273 self._mtime_ns = None
274
275 def drop_merge_data(self):
276 """remove all "merge-only" from a DirstateItem
277
278 This is to be call by the dirstatemap code when the second parent is dropped
279 """
280 if self._p2_info:
281 self._p2_info = False
282 self._mode = None
283 self._size = None
284 self._mtime_s = None
285 self._mtime_ns = None
111
286
112 @property
287 @property
113 def mode(self):
288 def mode(self):
114 return self._mode
289 return self.v1_mode()
115
290
116 @property
291 @property
117 def size(self):
292 def size(self):
118 return self._size
293 return self.v1_size()
119
294
120 @property
295 @property
121 def mtime(self):
296 def mtime(self):
122 return self._mtime
297 return self.v1_mtime()
298
299 def mtime_likely_equal_to(self, other_mtime):
300 self_sec = self._mtime_s
301 if self_sec is None:
302 return False
303 self_ns = self._mtime_ns
304 other_sec, other_ns = other_mtime
305 return self_sec == other_sec and (
306 self_ns == other_ns or self_ns == 0 or other_ns == 0
307 )
123
308
124 @property
309 @property
125 def state(self):
310 def state(self):
@@ -134,94 +319,224 b' class DirstateItem(object):'
134 dirstatev1 format. It would make sense to ultimately deprecate it in
319 dirstatev1 format. It would make sense to ultimately deprecate it in
135 favor of the more "semantic" attributes.
320 favor of the more "semantic" attributes.
136 """
321 """
137 return self._state
322 if not self.any_tracked:
323 return b'?'
324 return self.v1_state()
325
326 @property
327 def has_fallback_exec(self):
328 """True if "fallback" information are available for the "exec" bit
329
330 Fallback information can be stored in the dirstate to keep track of
331 filesystem attribute tracked by Mercurial when the underlying file
332 system or operating system does not support that property, (e.g.
333 Windows).
334
335 Not all version of the dirstate on-disk storage support preserving this
336 information.
337 """
338 return self._fallback_exec is not None
339
340 @property
341 def fallback_exec(self):
342 """ "fallback" information for the executable bit
343
344 True if the file should be considered executable when we cannot get
345 this information from the files system. False if it should be
346 considered non-executable.
347
348 See has_fallback_exec for details."""
349 return self._fallback_exec
350
351 @fallback_exec.setter
352 def set_fallback_exec(self, value):
353 """control "fallback" executable bit
354
355 Set to:
356 - True if the file should be considered executable,
357 - False if the file should be considered non-executable,
358 - None if we do not have valid fallback data.
359
360 See has_fallback_exec for details."""
361 if value is None:
362 self._fallback_exec = None
363 else:
364 self._fallback_exec = bool(value)
365
366 @property
367 def has_fallback_symlink(self):
368 """True if "fallback" information are available for symlink status
369
370 Fallback information can be stored in the dirstate to keep track of
371 filesystem attribute tracked by Mercurial when the underlying file
372 system or operating system does not support that property, (e.g.
373 Windows).
374
375 Not all version of the dirstate on-disk storage support preserving this
376 information."""
377 return self._fallback_symlink is not None
378
379 @property
380 def fallback_symlink(self):
381 """ "fallback" information for symlink status
382
383 True if the file should be considered executable when we cannot get
384 this information from the files system. False if it should be
385 considered non-executable.
386
387 See has_fallback_exec for details."""
388 return self._fallback_symlink
389
390 @fallback_symlink.setter
391 def set_fallback_symlink(self, value):
392 """control "fallback" symlink status
393
394 Set to:
395 - True if the file should be considered a symlink,
396 - False if the file should be considered not a symlink,
397 - None if we do not have valid fallback data.
398
399 See has_fallback_symlink for details."""
400 if value is None:
401 self._fallback_symlink = None
402 else:
403 self._fallback_symlink = bool(value)
138
404
139 @property
405 @property
140 def tracked(self):
406 def tracked(self):
141 """True is the file is tracked in the working copy"""
407 """True is the file is tracked in the working copy"""
142 return self._state in b"nma"
408 return self._wc_tracked
409
410 @property
411 def any_tracked(self):
412 """True is the file is tracked anywhere (wc or parents)"""
413 return self._wc_tracked or self._p1_tracked or self._p2_info
143
414
144 @property
415 @property
145 def added(self):
416 def added(self):
146 """True if the file has been added"""
417 """True if the file has been added"""
147 return self._state == b'a'
418 return self._wc_tracked and not (self._p1_tracked or self._p2_info)
148
149 @property
150 def merged(self):
151 """True if the file has been merged
152
153 Should only be set if a merge is in progress in the dirstate
154 """
155 return self._state == b'm'
156
419
157 @property
420 @property
158 def from_p2(self):
421 def maybe_clean(self):
159 """True if the file have been fetched from p2 during the current merge
422 """True if the file has a chance to be in the "clean" state"""
160
423 if not self._wc_tracked:
161 This is only True is the file is currently tracked.
424 return False
162
425 elif not self._p1_tracked:
163 Should only be set if a merge is in progress in the dirstate
426 return False
164 """
427 elif self._p2_info:
165 return self._state == b'n' and self._size == FROM_P2
428 return False
429 return True
166
430
167 @property
431 @property
168 def from_p2_removed(self):
432 def p1_tracked(self):
169 """True if the file has been removed, but was "from_p2" initially
433 """True if the file is tracked in the first parent manifest"""
434 return self._p1_tracked
170
435
171 This property seems like an abstraction leakage and should probably be
436 @property
172 dealt in this class (or maybe the dirstatemap) directly.
437 def p2_info(self):
438 """True if the file needed to merge or apply any input from p2
439
440 See the class documentation for details.
173 """
441 """
174 return self._state == b'r' and self._size == FROM_P2
442 return self._wc_tracked and self._p2_info
175
443
176 @property
444 @property
177 def removed(self):
445 def removed(self):
178 """True if the file has been removed"""
446 """True if the file has been removed"""
179 return self._state == b'r'
447 return not self._wc_tracked and (self._p1_tracked or self._p2_info)
180
181 @property
182 def merged_removed(self):
183 """True if the file has been removed, but was "merged" initially
184
185 This property seems like an abstraction leakage and should probably be
186 dealt in this class (or maybe the dirstatemap) directly.
187 """
188 return self._state == b'r' and self._size == NONNORMAL
189
448
190 @property
449 def v2_data(self):
191 def dm_nonnormal(self):
450 """Returns (flags, mode, size, mtime) for v2 serialization"""
192 """True is the entry is non-normal in the dirstatemap sense
451 flags = 0
193
452 if self._wc_tracked:
194 There is no reason for any code, but the dirstatemap one to use this.
453 flags |= DIRSTATE_V2_WDIR_TRACKED
195 """
454 if self._p1_tracked:
196 return self.state != b'n' or self.mtime == AMBIGUOUS_TIME
455 flags |= DIRSTATE_V2_P1_TRACKED
456 if self._p2_info:
457 flags |= DIRSTATE_V2_P2_INFO
458 if self._mode is not None and self._size is not None:
459 flags |= DIRSTATE_V2_HAS_MODE_AND_SIZE
460 if self.mode & stat.S_IXUSR:
461 flags |= DIRSTATE_V2_MODE_EXEC_PERM
462 if stat.S_ISLNK(self.mode):
463 flags |= DIRSTATE_V2_MODE_IS_SYMLINK
464 if self._mtime_s is not None:
465 flags |= DIRSTATE_V2_HAS_MTIME
197
466
198 @property
467 if self._fallback_exec is not None:
199 def dm_otherparent(self):
468 flags |= DIRSTATE_V2_HAS_FALLBACK_EXEC
200 """True is the entry is `otherparent` in the dirstatemap sense
469 if self._fallback_exec:
470 flags |= DIRSTATE_V2_FALLBACK_EXEC
201
471
202 There is no reason for any code, but the dirstatemap one to use this.
472 if self._fallback_symlink is not None:
203 """
473 flags |= DIRSTATE_V2_HAS_FALLBACK_SYMLINK
204 return self._size == FROM_P2
474 if self._fallback_symlink:
475 flags |= DIRSTATE_V2_FALLBACK_SYMLINK
476
477 # Note: we do not need to do anything regarding
478 # DIRSTATE_V2_ALL_UNKNOWN_RECORDED and DIRSTATE_V2_ALL_IGNORED_RECORDED
479 # since we never set _DIRSTATE_V2_HAS_DIRCTORY_MTIME
480 return (flags, self._size or 0, self._mtime_s or 0, self._mtime_ns or 0)
205
481
206 def v1_state(self):
482 def v1_state(self):
207 """return a "state" suitable for v1 serialization"""
483 """return a "state" suitable for v1 serialization"""
208 return self._state
484 if not self.any_tracked:
485 # the object has no state to record, this is -currently-
486 # unsupported
487 raise RuntimeError('untracked item')
488 elif self.removed:
489 return b'r'
490 elif self._p1_tracked and self._p2_info:
491 return b'm'
492 elif self.added:
493 return b'a'
494 else:
495 return b'n'
209
496
210 def v1_mode(self):
497 def v1_mode(self):
211 """return a "mode" suitable for v1 serialization"""
498 """return a "mode" suitable for v1 serialization"""
212 return self._mode
499 return self._mode if self._mode is not None else 0
213
500
214 def v1_size(self):
501 def v1_size(self):
215 """return a "size" suitable for v1 serialization"""
502 """return a "size" suitable for v1 serialization"""
216 return self._size
503 if not self.any_tracked:
504 # the object has no state to record, this is -currently-
505 # unsupported
506 raise RuntimeError('untracked item')
507 elif self.removed and self._p1_tracked and self._p2_info:
508 return NONNORMAL
509 elif self._p2_info:
510 return FROM_P2
511 elif self.removed:
512 return 0
513 elif self.added:
514 return NONNORMAL
515 elif self._size is None:
516 return NONNORMAL
517 else:
518 return self._size
217
519
218 def v1_mtime(self):
520 def v1_mtime(self):
219 """return a "mtime" suitable for v1 serialization"""
521 """return a "mtime" suitable for v1 serialization"""
220 return self._mtime
522 if not self.any_tracked:
523 # the object has no state to record, this is -currently-
524 # unsupported
525 raise RuntimeError('untracked item')
526 elif self.removed:
527 return 0
528 elif self._mtime_s is None:
529 return AMBIGUOUS_TIME
530 elif self._p2_info:
531 return AMBIGUOUS_TIME
532 elif not self._p1_tracked:
533 return AMBIGUOUS_TIME
534 else:
535 return self._mtime_s
221
536
222 def need_delay(self, now):
537 def need_delay(self, now):
223 """True if the stored mtime would be ambiguous with the current time"""
538 """True if the stored mtime would be ambiguous with the current time"""
224 return self._state == b'n' and self._mtime == now
539 return self.v1_state() == b'n' and self._mtime_s == now[0]
225
540
226
541
227 def gettype(q):
542 def gettype(q):
@@ -589,7 +904,6 b' def parse_dirstate(dmap, copymap, st):'
589
904
590
905
591 def pack_dirstate(dmap, copymap, pl, now):
906 def pack_dirstate(dmap, copymap, pl, now):
592 now = int(now)
593 cs = stringio()
907 cs = stringio()
594 write = cs.write
908 write = cs.write
595 write(b"".join(pl))
909 write(b"".join(pl))
@@ -44,6 +44,7 b' if not ispy3:'
44 FileNotFoundError = OSError
44 FileNotFoundError = OSError
45
45
46 else:
46 else:
47 import builtins
47 import concurrent.futures as futures
48 import concurrent.futures as futures
48 import http.cookiejar as cookielib
49 import http.cookiejar as cookielib
49 import http.client as httplib
50 import http.client as httplib
@@ -55,7 +56,7 b' else:'
55 def future_set_exception_info(f, exc_info):
56 def future_set_exception_info(f, exc_info):
56 f.set_exception(exc_info[0])
57 f.set_exception(exc_info[0])
57
58
58 FileNotFoundError = __builtins__['FileNotFoundError']
59 FileNotFoundError = builtins.FileNotFoundError
59
60
60
61
61 def identity(a):
62 def identity(a):
@@ -222,6 +223,15 b' if ispy3:'
222 >>> assert type(t) is bytes
223 >>> assert type(t) is bytes
223 """
224 """
224
225
226 # Trick pytype into not demanding Iterable[int] be passed to __new__(),
227 # since the appropriate bytes format is done internally.
228 #
229 # https://github.com/google/pytype/issues/500
230 if TYPE_CHECKING:
231
232 def __init__(self, s=b''):
233 pass
234
225 def __new__(cls, s=b''):
235 def __new__(cls, s=b''):
226 if isinstance(s, bytestr):
236 if isinstance(s, bytestr):
227 return s
237 return s
@@ -433,7 +433,7 b' def manifestrevlogs(repo):'
433 if scmutil.istreemanifest(repo):
433 if scmutil.istreemanifest(repo):
434 # This logic is safe if treemanifest isn't enabled, but also
434 # This logic is safe if treemanifest isn't enabled, but also
435 # pointless, so we skip it if treemanifest isn't enabled.
435 # pointless, so we skip it if treemanifest isn't enabled.
436 for t, unencoded, encoded, size in repo.store.datafiles():
436 for t, unencoded, size in repo.store.datafiles():
437 if unencoded.startswith(b'meta/') and unencoded.endswith(
437 if unencoded.startswith(b'meta/') and unencoded.endswith(
438 b'00manifest.i'
438 b'00manifest.i'
439 ):
439 ):
@@ -441,7 +441,7 b' def manifestrevlogs(repo):'
441 yield repo.manifestlog.getstorage(dir)
441 yield repo.manifestlog.getstorage(dir)
442
442
443
443
444 def rebuildfncache(ui, repo):
444 def rebuildfncache(ui, repo, only_data=False):
445 """Rebuilds the fncache file from repo history.
445 """Rebuilds the fncache file from repo history.
446
446
447 Missing entries will be added. Extra entries will be removed.
447 Missing entries will be added. Extra entries will be removed.
@@ -465,28 +465,40 b' def rebuildfncache(ui, repo):'
465 newentries = set()
465 newentries = set()
466 seenfiles = set()
466 seenfiles = set()
467
467
468 progress = ui.makeprogress(
468 if only_data:
469 _(b'rebuilding'), unit=_(b'changesets'), total=len(repo)
469 # Trust the listing of .i from the fncache, but not the .d. This is
470 )
470 # much faster, because we only need to stat every possible .d files,
471 for rev in repo:
471 # instead of reading the full changelog
472 progress.update(rev)
472 for f in fnc:
473 if f[:5] == b'data/' and f[-2:] == b'.i':
474 seenfiles.add(f[5:-2])
475 newentries.add(f)
476 dataf = f[:-2] + b'.d'
477 if repo.store._exists(dataf):
478 newentries.add(dataf)
479 else:
480 progress = ui.makeprogress(
481 _(b'rebuilding'), unit=_(b'changesets'), total=len(repo)
482 )
483 for rev in repo:
484 progress.update(rev)
473
485
474 ctx = repo[rev]
486 ctx = repo[rev]
475 for f in ctx.files():
487 for f in ctx.files():
476 # This is to minimize I/O.
488 # This is to minimize I/O.
477 if f in seenfiles:
489 if f in seenfiles:
478 continue
490 continue
479 seenfiles.add(f)
491 seenfiles.add(f)
480
492
481 i = b'data/%s.i' % f
493 i = b'data/%s.i' % f
482 d = b'data/%s.d' % f
494 d = b'data/%s.d' % f
483
495
484 if repo.store._exists(i):
496 if repo.store._exists(i):
485 newentries.add(i)
497 newentries.add(i)
486 if repo.store._exists(d):
498 if repo.store._exists(d):
487 newentries.add(d)
499 newentries.add(d)
488
500
489 progress.complete()
501 progress.complete()
490
502
491 if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements:
503 if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements:
492 # This logic is safe if treemanifest isn't enabled, but also
504 # This logic is safe if treemanifest isn't enabled, but also
@@ -12,7 +12,7 b" DOTENCODE_REQUIREMENT = b'dotencode'"
12 STORE_REQUIREMENT = b'store'
12 STORE_REQUIREMENT = b'store'
13 FNCACHE_REQUIREMENT = b'fncache'
13 FNCACHE_REQUIREMENT = b'fncache'
14
14
15 DIRSTATE_V2_REQUIREMENT = b'exp-dirstate-v2'
15 DIRSTATE_V2_REQUIREMENT = b'dirstate-v2'
16
16
17 # When narrowing is finalized and no longer subject to format changes,
17 # When narrowing is finalized and no longer subject to format changes,
18 # we should move this to just "narrow" or similar.
18 # we should move this to just "narrow" or similar.
@@ -2581,10 +2581,15 b' class revlog(object):'
2581 self._enforceinlinesize(transaction)
2581 self._enforceinlinesize(transaction)
2582 if self._docket is not None:
2582 if self._docket is not None:
2583 # revlog-v2 always has 3 writing handles, help Pytype
2583 # revlog-v2 always has 3 writing handles, help Pytype
2584 assert self._writinghandles[2] is not None
2584 wh1 = self._writinghandles[0]
2585 self._docket.index_end = self._writinghandles[0].tell()
2585 wh2 = self._writinghandles[1]
2586 self._docket.data_end = self._writinghandles[1].tell()
2586 wh3 = self._writinghandles[2]
2587 self._docket.sidedata_end = self._writinghandles[2].tell()
2587 assert wh1 is not None
2588 assert wh2 is not None
2589 assert wh3 is not None
2590 self._docket.index_end = wh1.tell()
2591 self._docket.data_end = wh2.tell()
2592 self._docket.sidedata_end = wh3.tell()
2588
2593
2589 nodemaputil.setup_persistent_nodemap(transaction, self)
2594 nodemaputil.setup_persistent_nodemap(transaction, self)
2590
2595
@@ -826,7 +826,7 b' def repair_issue6528('
826 with context():
826 with context():
827 files = list(
827 files = list(
828 (file_type, path)
828 (file_type, path)
829 for (file_type, path, _e, _s) in repo.store.datafiles()
829 for (file_type, path, _s) in repo.store.datafiles()
830 if path.endswith(b'.i') and file_type & store.FILEFLAGS_FILELOG
830 if path.endswith(b'.i') and file_type & store.FILEFLAGS_FILELOG
831 )
831 )
832
832
@@ -689,7 +689,7 b" def revsingle(repo, revspec, default=b'."
689
689
690 l = revrange(repo, [revspec], localalias=localalias)
690 l = revrange(repo, [revspec], localalias=localalias)
691 if not l:
691 if not l:
692 raise error.Abort(_(b'empty revision set'))
692 raise error.InputError(_(b'empty revision set'))
693 return repo[l.last()]
693 return repo[l.last()]
694
694
695
695
@@ -710,7 +710,7 b' def revpair(repo, revs):'
710 l = revrange(repo, revs)
710 l = revrange(repo, revs)
711
711
712 if not l:
712 if not l:
713 raise error.Abort(_(b'empty revision range'))
713 raise error.InputError(_(b'empty revision range'))
714
714
715 first = l.first()
715 first = l.first()
716 second = l.last()
716 second = l.last()
@@ -720,7 +720,7 b' def revpair(repo, revs):'
720 and len(revs) >= 2
720 and len(revs) >= 2
721 and not all(revrange(repo, [r]) for r in revs)
721 and not all(revrange(repo, [r]) for r in revs)
722 ):
722 ):
723 raise error.Abort(_(b'empty revision on one side of range'))
723 raise error.InputError(_(b'empty revision on one side of range'))
724
724
725 # if top-level is range expression, the result must always be a pair
725 # if top-level is range expression, the result must always be a pair
726 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
726 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
@@ -1211,9 +1211,9 b' def addremove(repo, matcher, prefix, uip'
1211 try:
1211 try:
1212 similarity = float(opts.get(b'similarity') or 0)
1212 similarity = float(opts.get(b'similarity') or 0)
1213 except ValueError:
1213 except ValueError:
1214 raise error.Abort(_(b'similarity must be a number'))
1214 raise error.InputError(_(b'similarity must be a number'))
1215 if similarity < 0 or similarity > 100:
1215 if similarity < 0 or similarity > 100:
1216 raise error.Abort(_(b'similarity must be between 0 and 100'))
1216 raise error.InputError(_(b'similarity must be between 0 and 100'))
1217 similarity /= 100.0
1217 similarity /= 100.0
1218
1218
1219 ret = 0
1219 ret = 0
@@ -1327,17 +1327,17 b' def _interestingfiles(repo, matcher):'
1327 full=False,
1327 full=False,
1328 )
1328 )
1329 for abs, st in pycompat.iteritems(walkresults):
1329 for abs, st in pycompat.iteritems(walkresults):
1330 dstate = dirstate[abs]
1330 entry = dirstate.get_entry(abs)
1331 if dstate == b'?' and audit_path.check(abs):
1331 if (not entry.any_tracked) and audit_path.check(abs):
1332 unknown.append(abs)
1332 unknown.append(abs)
1333 elif dstate != b'r' and not st:
1333 elif (not entry.removed) and not st:
1334 deleted.append(abs)
1334 deleted.append(abs)
1335 elif dstate == b'r' and st:
1335 elif entry.removed and st:
1336 forgotten.append(abs)
1336 forgotten.append(abs)
1337 # for finding renames
1337 # for finding renames
1338 elif dstate == b'r' and not st:
1338 elif entry.removed and not st:
1339 removed.append(abs)
1339 removed.append(abs)
1340 elif dstate == b'a':
1340 elif entry.added:
1341 added.append(abs)
1341 added.append(abs)
1342
1342
1343 return added, unknown, deleted, removed, forgotten
1343 return added, unknown, deleted, removed, forgotten
@@ -1455,10 +1455,11 b' def dirstatecopy(ui, repo, wctx, src, ds'
1455 """
1455 """
1456 origsrc = repo.dirstate.copied(src) or src
1456 origsrc = repo.dirstate.copied(src) or src
1457 if dst == origsrc: # copying back a copy?
1457 if dst == origsrc: # copying back a copy?
1458 if repo.dirstate[dst] not in b'mn' and not dryrun:
1458 entry = repo.dirstate.get_entry(dst)
1459 if (entry.added or not entry.tracked) and not dryrun:
1459 repo.dirstate.set_tracked(dst)
1460 repo.dirstate.set_tracked(dst)
1460 else:
1461 else:
1461 if repo.dirstate[origsrc] == b'a' and origsrc == src:
1462 if repo.dirstate.get_entry(origsrc).added and origsrc == src:
1462 if not ui.quiet:
1463 if not ui.quiet:
1463 ui.warn(
1464 ui.warn(
1464 _(
1465 _(
@@ -1467,7 +1468,7 b' def dirstatecopy(ui, repo, wctx, src, ds'
1467 )
1468 )
1468 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1469 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1469 )
1470 )
1470 if repo.dirstate[dst] in b'?r' and not dryrun:
1471 if not repo.dirstate.get_entry(dst).tracked and not dryrun:
1471 wctx.add([dst])
1472 wctx.add([dst])
1472 elif not dryrun:
1473 elif not dryrun:
1473 wctx.copy(origsrc, dst)
1474 wctx.copy(origsrc, dst)
@@ -1504,7 +1505,7 b' def movedirstate(repo, newctx, match=Non'
1504 }
1505 }
1505 # Adjust the dirstate copies
1506 # Adjust the dirstate copies
1506 for dst, src in pycompat.iteritems(copies):
1507 for dst, src in pycompat.iteritems(copies):
1507 if src not in newctx or dst in newctx or ds[dst] != b'a':
1508 if src not in newctx or dst in newctx or not ds.get_entry(dst).added:
1508 src = None
1509 src = None
1509 ds.copy(src, dst)
1510 ds.copy(src, dst)
1510 repo._quick_access_changeid_invalidate()
1511 repo._quick_access_changeid_invalidate()
@@ -472,7 +472,7 b' class basicstore(object):'
472 return self.path + b'/' + encodedir(f)
472 return self.path + b'/' + encodedir(f)
473
473
474 def _walk(self, relpath, recurse):
474 def _walk(self, relpath, recurse):
475 '''yields (unencoded, encoded, size)'''
475 '''yields (revlog_type, unencoded, size)'''
476 path = self.path
476 path = self.path
477 if relpath:
477 if relpath:
478 path += b'/' + relpath
478 path += b'/' + relpath
@@ -488,7 +488,7 b' class basicstore(object):'
488 rl_type = is_revlog(f, kind, st)
488 rl_type = is_revlog(f, kind, st)
489 if rl_type is not None:
489 if rl_type is not None:
490 n = util.pconvert(fp[striplen:])
490 n = util.pconvert(fp[striplen:])
491 l.append((rl_type, decodedir(n), n, st.st_size))
491 l.append((rl_type, decodedir(n), st.st_size))
492 elif kind == stat.S_IFDIR and recurse:
492 elif kind == stat.S_IFDIR and recurse:
493 visit.append(fp)
493 visit.append(fp)
494 l.sort()
494 l.sort()
@@ -505,26 +505,32 b' class basicstore(object):'
505 rootstore = manifest.manifestrevlog(repo.nodeconstants, self.vfs)
505 rootstore = manifest.manifestrevlog(repo.nodeconstants, self.vfs)
506 return manifest.manifestlog(self.vfs, repo, rootstore, storenarrowmatch)
506 return manifest.manifestlog(self.vfs, repo, rootstore, storenarrowmatch)
507
507
508 def datafiles(self, matcher=None):
508 def datafiles(self, matcher=None, undecodable=None):
509 """Like walk, but excluding the changelog and root manifest.
510
511 When [undecodable] is None, revlogs names that can't be
512 decoded cause an exception. When it is provided, it should
513 be a list and the filenames that can't be decoded are added
514 to it instead. This is very rarely needed."""
509 files = self._walk(b'data', True) + self._walk(b'meta', True)
515 files = self._walk(b'data', True) + self._walk(b'meta', True)
510 for (t, u, e, s) in files:
516 for (t, u, s) in files:
511 yield (FILEFLAGS_FILELOG | t, u, e, s)
517 yield (FILEFLAGS_FILELOG | t, u, s)
512
518
513 def topfiles(self):
519 def topfiles(self):
514 # yield manifest before changelog
520 # yield manifest before changelog
515 files = reversed(self._walk(b'', False))
521 files = reversed(self._walk(b'', False))
516 for (t, u, e, s) in files:
522 for (t, u, s) in files:
517 if u.startswith(b'00changelog'):
523 if u.startswith(b'00changelog'):
518 yield (FILEFLAGS_CHANGELOG | t, u, e, s)
524 yield (FILEFLAGS_CHANGELOG | t, u, s)
519 elif u.startswith(b'00manifest'):
525 elif u.startswith(b'00manifest'):
520 yield (FILEFLAGS_MANIFESTLOG | t, u, e, s)
526 yield (FILEFLAGS_MANIFESTLOG | t, u, s)
521 else:
527 else:
522 yield (FILETYPE_OTHER | t, u, e, s)
528 yield (FILETYPE_OTHER | t, u, s)
523
529
524 def walk(self, matcher=None):
530 def walk(self, matcher=None):
525 """return file related to data storage (ie: revlogs)
531 """return file related to data storage (ie: revlogs)
526
532
527 yields (file_type, unencoded, encoded, size)
533 yields (file_type, unencoded, size)
528
534
529 if a matcher is passed, storage files of only those tracked paths
535 if a matcher is passed, storage files of only those tracked paths
530 are passed with matches the matcher
536 are passed with matches the matcher
@@ -574,15 +580,20 b' class encodedstore(basicstore):'
574 # However that might change so we should probably add a test and encoding
580 # However that might change so we should probably add a test and encoding
575 # decoding for it too. see issue6548
581 # decoding for it too. see issue6548
576
582
577 def datafiles(self, matcher=None):
583 def datafiles(self, matcher=None, undecodable=None):
578 for t, a, b, size in super(encodedstore, self).datafiles():
584 for t, f1, size in super(encodedstore, self).datafiles():
579 try:
585 try:
580 a = decodefilename(a)
586 f2 = decodefilename(f1)
581 except KeyError:
587 except KeyError:
582 a = None
588 if undecodable is None:
583 if a is not None and not _matchtrackedpath(a, matcher):
589 msg = _(b'undecodable revlog name %s') % f1
590 raise error.StorageError(msg)
591 else:
592 undecodable.append(f1)
593 continue
594 if not _matchtrackedpath(f2, matcher):
584 continue
595 continue
585 yield t, a, b, size
596 yield t, f2, size
586
597
587 def join(self, f):
598 def join(self, f):
588 return self.path + b'/' + encodefilename(f)
599 return self.path + b'/' + encodefilename(f)
@@ -770,7 +781,7 b' class fncachestore(basicstore):'
770 def getsize(self, path):
781 def getsize(self, path):
771 return self.rawvfs.stat(path).st_size
782 return self.rawvfs.stat(path).st_size
772
783
773 def datafiles(self, matcher=None):
784 def datafiles(self, matcher=None, undecodable=None):
774 for f in sorted(self.fncache):
785 for f in sorted(self.fncache):
775 if not _matchtrackedpath(f, matcher):
786 if not _matchtrackedpath(f, matcher):
776 continue
787 continue
@@ -779,7 +790,7 b' class fncachestore(basicstore):'
779 t = revlog_type(f)
790 t = revlog_type(f)
780 assert t is not None, f
791 assert t is not None, f
781 t |= FILEFLAGS_FILELOG
792 t |= FILEFLAGS_FILELOG
782 yield t, f, ef, self.getsize(ef)
793 yield t, f, self.getsize(ef)
783 except OSError as err:
794 except OSError as err:
784 if err.errno != errno.ENOENT:
795 if err.errno != errno.ENOENT:
785 raise
796 raise
@@ -248,7 +248,7 b' def generatev1(repo):'
248 # Get consistent snapshot of repo, lock during scan.
248 # Get consistent snapshot of repo, lock during scan.
249 with repo.lock():
249 with repo.lock():
250 repo.ui.debug(b'scanning\n')
250 repo.ui.debug(b'scanning\n')
251 for file_type, name, ename, size in _walkstreamfiles(repo):
251 for file_type, name, size in _walkstreamfiles(repo):
252 if size:
252 if size:
253 entries.append((name, size))
253 entries.append((name, size))
254 total_bytes += size
254 total_bytes += size
@@ -650,7 +650,7 b' def _v2_walk(repo, includes, excludes, i'
650 if includes or excludes:
650 if includes or excludes:
651 matcher = narrowspec.match(repo.root, includes, excludes)
651 matcher = narrowspec.match(repo.root, includes, excludes)
652
652
653 for rl_type, name, ename, size in _walkstreamfiles(repo, matcher):
653 for rl_type, name, size in _walkstreamfiles(repo, matcher):
654 if size:
654 if size:
655 ft = _fileappend
655 ft = _fileappend
656 if rl_type & store.FILEFLAGS_VOLATILE:
656 if rl_type & store.FILEFLAGS_VOLATILE:
@@ -8,6 +8,7 b' from . import ('
8 error,
8 error,
9 hg,
9 hg,
10 lock as lockmod,
10 lock as lockmod,
11 logcmdutil,
11 mergestate as mergestatemod,
12 mergestate as mergestatemod,
12 pycompat,
13 pycompat,
13 registrar,
14 registrar,
@@ -178,7 +179,7 b' def debugstrip(ui, repo, *revs, **opts):'
178
179
179 cl = repo.changelog
180 cl = repo.changelog
180 revs = list(revs) + opts.get(b'rev')
181 revs = list(revs) + opts.get(b'rev')
181 revs = set(scmutil.revrange(repo, revs))
182 revs = set(logcmdutil.revrange(repo, revs))
182
183
183 with repo.wlock():
184 with repo.wlock():
184 bookmarks = set(opts.get(b'bookmark'))
185 bookmarks = set(opts.get(b'bookmark'))
@@ -255,7 +256,9 b' def debugstrip(ui, repo, *revs, **opts):'
255
256
256 # reset files that only changed in the dirstate too
257 # reset files that only changed in the dirstate too
257 dirstate = repo.dirstate
258 dirstate = repo.dirstate
258 dirchanges = [f for f in dirstate if dirstate[f] != b'n']
259 dirchanges = [
260 f for f in dirstate if not dirstate.get_entry(f).maybe_clean
261 ]
259 changedfiles.extend(dirchanges)
262 changedfiles.extend(dirchanges)
260
263
261 repo.dirstate.rebuild(urev, uctx.manifest(), changedfiles)
264 repo.dirstate.rebuild(urev, uctx.manifest(), changedfiles)
@@ -178,7 +178,9 b' class dirstatev2(requirementformatvarian'
178
178
179 description = _(
179 description = _(
180 b'version 1 of the dirstate file format requires '
180 b'version 1 of the dirstate file format requires '
181 b'reading and parsing it all at once.'
181 b'reading and parsing it all at once.\n'
182 b'Version 2 has a better structure,'
183 b'better information and lighter update mechanism'
182 )
184 )
183
185
184 upgrademessage = _(b'"hg status" will be faster')
186 upgrademessage = _(b'"hg status" will be faster')
@@ -201,7 +201,7 b' def _clonerevlogs('
201
201
202 # Perform a pass to collect metadata. This validates we can open all
202 # Perform a pass to collect metadata. This validates we can open all
203 # source files and allows a unified progress bar to be displayed.
203 # source files and allows a unified progress bar to be displayed.
204 for rl_type, unencoded, encoded, size in alldatafiles:
204 for rl_type, unencoded, size in alldatafiles:
205 if not rl_type & store.FILEFLAGS_REVLOG_MAIN:
205 if not rl_type & store.FILEFLAGS_REVLOG_MAIN:
206 continue
206 continue
207
207
@@ -638,7 +638,6 b' def upgrade_dirstate(ui, srcrepo, upgrad'
638 )
638 )
639
639
640 assert srcrepo.dirstate._use_dirstate_v2 == (old == b'v2')
640 assert srcrepo.dirstate._use_dirstate_v2 == (old == b'v2')
641 srcrepo.dirstate._map._use_dirstate_tree = True
642 srcrepo.dirstate._map.preload()
641 srcrepo.dirstate._map.preload()
643 srcrepo.dirstate._use_dirstate_v2 = new == b'v2'
642 srcrepo.dirstate._use_dirstate_v2 = new == b'v2'
644 srcrepo.dirstate._map._use_dirstate_v2 = srcrepo.dirstate._use_dirstate_v2
643 srcrepo.dirstate._map._use_dirstate_v2 = srcrepo.dirstate._use_dirstate_v2
@@ -449,8 +449,8 b' def mmapread(fp, size=None):'
449 return b''
449 return b''
450 elif size is None:
450 elif size is None:
451 size = 0
451 size = 0
452 fd = getattr(fp, 'fileno', lambda: fp)()
452 try:
453 try:
453 fd = getattr(fp, 'fileno', lambda: fp)()
454 return mmap.mmap(fd, size, access=mmap.ACCESS_READ)
454 return mmap.mmap(fd, size, access=mmap.ACCESS_READ)
455 except ValueError:
455 except ValueError:
456 # Empty files cannot be mmapped, but mmapread should still work. Check
456 # Empty files cannot be mmapped, but mmapread should still work. Check
@@ -1225,6 +1225,8 b' def versiontuple(v=None, n=4):'
1225 if n == 4:
1225 if n == 4:
1226 return (vints[0], vints[1], vints[2], extra)
1226 return (vints[0], vints[1], vints[2], extra)
1227
1227
1228 raise error.ProgrammingError(b"invalid version part request: %d" % n)
1229
1228
1230
1229 def cachefunc(func):
1231 def cachefunc(func):
1230 '''cache the result of function calls'''
1232 '''cache the result of function calls'''
@@ -57,30 +57,11 b' else:'
57 try:
57 try:
58 # importlib.resources exists from Python 3.7; see fallback in except clause
58 # importlib.resources exists from Python 3.7; see fallback in except clause
59 # further down
59 # further down
60 from importlib import resources
60 from importlib import resources # pytype: disable=import-error
61
62 from .. import encoding
63
61
64 # Force loading of the resources module
62 # Force loading of the resources module
65 resources.open_binary # pytype: disable=module-attr
63 resources.open_binary # pytype: disable=module-attr
66
64
67 def open_resource(package, name):
68 return resources.open_binary( # pytype: disable=module-attr
69 pycompat.sysstr(package), pycompat.sysstr(name)
70 )
71
72 def is_resource(package, name):
73 return resources.is_resource( # pytype: disable=module-attr
74 pycompat.sysstr(package), encoding.strfromlocal(name)
75 )
76
77 def contents(package):
78 # pytype: disable=module-attr
79 for r in resources.contents(pycompat.sysstr(package)):
80 # pytype: enable=module-attr
81 yield encoding.strtolocal(r)
82
83
84 except (ImportError, AttributeError):
65 except (ImportError, AttributeError):
85 # importlib.resources was not found (almost definitely because we're on a
66 # importlib.resources was not found (almost definitely because we're on a
86 # Python version before 3.7)
67 # Python version before 3.7)
@@ -102,3 +83,23 b' except (ImportError, AttributeError):'
102
83
103 for p in os.listdir(path):
84 for p in os.listdir(path):
104 yield pycompat.fsencode(p)
85 yield pycompat.fsencode(p)
86
87
88 else:
89 from .. import encoding
90
91 def open_resource(package, name):
92 return resources.open_binary( # pytype: disable=module-attr
93 pycompat.sysstr(package), pycompat.sysstr(name)
94 )
95
96 def is_resource(package, name):
97 return resources.is_resource( # pytype: disable=module-attr
98 pycompat.sysstr(package), encoding.strfromlocal(name)
99 )
100
101 def contents(package):
102 # pytype: disable=module-attr
103 for r in resources.contents(pycompat.sysstr(package)):
104 # pytype: enable=module-attr
105 yield encoding.strtolocal(r)
@@ -503,22 +503,17 b' def get_push_paths(repo, ui, dests):'
503 yield path
503 yield path
504
504
505
505
506 def get_pull_paths(repo, ui, sources, default_branches=()):
506 def get_pull_paths(repo, ui, sources):
507 """yields all the `(path, branch)` selected as pull source by `sources`"""
507 """yields all the `(path, branch)` selected as pull source by `sources`"""
508 if not sources:
508 if not sources:
509 sources = [b'default']
509 sources = [b'default']
510 for source in sources:
510 for source in sources:
511 if source in ui.paths:
511 if source in ui.paths:
512 for p in ui.paths[source]:
512 for p in ui.paths[source]:
513 yield parseurl(p.rawloc, default_branches)
513 yield p
514 else:
514 else:
515 # Try to resolve as a local path or URI.
515 p = path(ui, None, source, validate_path=False)
516 path = try_path(ui, source)
516 yield p
517 if path is not None:
518 url = path.rawloc
519 else:
520 url = source
521 yield parseurl(url, default_branches)
522
517
523
518
524 def get_unique_push_path(action, repo, ui, dest=None):
519 def get_unique_push_path(action, repo, ui, dest=None):
@@ -771,6 +766,28 b' def pushrevpathoption(ui, path, value):'
771 return value
766 return value
772
767
773
768
769 SUPPORTED_BOOKMARKS_MODES = {
770 b'default',
771 b'mirror',
772 b'ignore',
773 }
774
775
776 @pathsuboption(b'bookmarks.mode', b'bookmarks_mode')
777 def bookmarks_mode_option(ui, path, value):
778 if value not in SUPPORTED_BOOKMARKS_MODES:
779 path_name = path.name
780 if path_name is None:
781 # this is an "anonymous" path, config comes from the global one
782 path_name = b'*'
783 msg = _(b'(paths.%s:bookmarks.mode has unknown value: "%s")\n')
784 msg %= (path_name, value)
785 ui.warn(msg)
786 if value == b'default':
787 value = None
788 return value
789
790
774 @pathsuboption(b'multi-urls', b'multi_urls')
791 @pathsuboption(b'multi-urls', b'multi_urls')
775 def multiurls_pathoption(ui, path, value):
792 def multiurls_pathoption(ui, path, value):
776 res = stringutil.parsebool(value)
793 res = stringutil.parsebool(value)
@@ -818,7 +835,14 b' def _chain_path(base_path, ui, paths):'
818 class path(object):
835 class path(object):
819 """Represents an individual path and its configuration."""
836 """Represents an individual path and its configuration."""
820
837
821 def __init__(self, ui=None, name=None, rawloc=None, suboptions=None):
838 def __init__(
839 self,
840 ui=None,
841 name=None,
842 rawloc=None,
843 suboptions=None,
844 validate_path=True,
845 ):
822 """Construct a path from its config options.
846 """Construct a path from its config options.
823
847
824 ``ui`` is the ``ui`` instance the path is coming from.
848 ``ui`` is the ``ui`` instance the path is coming from.
@@ -856,7 +880,8 b' class path(object):'
856 self.rawloc = rawloc
880 self.rawloc = rawloc
857 self.loc = b'%s' % u
881 self.loc = b'%s' % u
858
882
859 self._validate_path()
883 if validate_path:
884 self._validate_path()
860
885
861 _path, sub_opts = ui.configsuboptions(b'paths', b'*')
886 _path, sub_opts = ui.configsuboptions(b'paths', b'*')
862 self._own_sub_opts = {}
887 self._own_sub_opts = {}
@@ -395,12 +395,13 b' class verifier(object):'
395 storefiles = set()
395 storefiles = set()
396 subdirs = set()
396 subdirs = set()
397 revlogv1 = self.revlogv1
397 revlogv1 = self.revlogv1
398 for t, f, f2, size in repo.store.datafiles():
398 undecodable = []
399 if not f:
399 for t, f, size in repo.store.datafiles(undecodable=undecodable):
400 self._err(None, _(b"cannot decode filename '%s'") % f2)
400 if (size > 0 or not revlogv1) and f.startswith(b'meta/'):
401 elif (size > 0 or not revlogv1) and f.startswith(b'meta/'):
402 storefiles.add(_normpath(f))
401 storefiles.add(_normpath(f))
403 subdirs.add(os.path.dirname(f))
402 subdirs.add(os.path.dirname(f))
403 for f in undecodable:
404 self._err(None, _(b"cannot decode filename '%s'") % f)
404 subdirprogress = ui.makeprogress(
405 subdirprogress = ui.makeprogress(
405 _(b'checking'), unit=_(b'manifests'), total=len(subdirs)
406 _(b'checking'), unit=_(b'manifests'), total=len(subdirs)
406 )
407 )
@@ -459,11 +460,12 b' class verifier(object):'
459 ui.status(_(b"checking files\n"))
460 ui.status(_(b"checking files\n"))
460
461
461 storefiles = set()
462 storefiles = set()
462 for rl_type, f, f2, size in repo.store.datafiles():
463 undecodable = []
463 if not f:
464 for t, f, size in repo.store.datafiles(undecodable=undecodable):
464 self._err(None, _(b"cannot decode filename '%s'") % f2)
465 if (size > 0 or not revlogv1) and f.startswith(b'data/'):
465 elif (size > 0 or not revlogv1) and f.startswith(b'data/'):
466 storefiles.add(_normpath(f))
466 storefiles.add(_normpath(f))
467 for f in undecodable:
468 self._err(None, _(b"cannot decode filename '%s'") % f)
467
469
468 state = {
470 state = {
469 # TODO this assumes revlog storage for changelog.
471 # TODO this assumes revlog storage for changelog.
@@ -175,7 +175,7 b" def posixfile(name, mode=b'r', buffering"
175 return mixedfilemodewrapper(fp)
175 return mixedfilemodewrapper(fp)
176
176
177 return fp
177 return fp
178 except WindowsError as err:
178 except WindowsError as err: # pytype: disable=name-error
179 # convert to a friendlier exception
179 # convert to a friendlier exception
180 raise IOError(
180 raise IOError(
181 err.errno, '%s: %s' % (encoding.strfromlocal(name), err.strerror)
181 err.errno, '%s: %s' % (encoding.strfromlocal(name), err.strerror)
@@ -44,13 +44,9 b' def batchable(f):'
44 def sample(self, one, two=None):
44 def sample(self, one, two=None):
45 # Build list of encoded arguments suitable for your wire protocol:
45 # Build list of encoded arguments suitable for your wire protocol:
46 encoded_args = [('one', encode(one),), ('two', encode(two),)]
46 encoded_args = [('one', encode(one),), ('two', encode(two),)]
47 # Create future for injection of encoded result:
47 # Return it, along with a function that will receive the result
48 encoded_res_future = future()
48 # from the batched request.
49 # Return encoded arguments and future:
49 return encoded_args, decode
50 yield encoded_args, encoded_res_future
51 # Assuming the future to be filled with the result from the batched
52 # request now. Decode it:
53 yield decode(encoded_res_future.value)
54
50
55 The decorator returns a function which wraps this coroutine as a plain
51 The decorator returns a function which wraps this coroutine as a plain
56 method, but adds the original method as an attribute called "batchable",
52 method, but adds the original method as an attribute called "batchable",
@@ -59,29 +55,19 b' def batchable(f):'
59 """
55 """
60
56
61 def plain(*args, **opts):
57 def plain(*args, **opts):
62 batchable = f(*args, **opts)
58 encoded_args_or_res, decode = f(*args, **opts)
63 encoded_args_or_res, encoded_res_future = next(batchable)
59 if not decode:
64 if not encoded_res_future:
65 return encoded_args_or_res # a local result in this case
60 return encoded_args_or_res # a local result in this case
66 self = args[0]
61 self = args[0]
67 cmd = pycompat.bytesurl(f.__name__) # ensure cmd is ascii bytestr
62 cmd = pycompat.bytesurl(f.__name__) # ensure cmd is ascii bytestr
68 encoded_res_future.set(self._submitone(cmd, encoded_args_or_res))
63 encoded_res = self._submitone(cmd, encoded_args_or_res)
69 return next(batchable)
64 return decode(encoded_res)
70
65
71 setattr(plain, 'batchable', f)
66 setattr(plain, 'batchable', f)
72 setattr(plain, '__name__', f.__name__)
67 setattr(plain, '__name__', f.__name__)
73 return plain
68 return plain
74
69
75
70
76 class future(object):
77 '''placeholder for a value to be set later'''
78
79 def set(self, value):
80 if util.safehasattr(self, b'value'):
81 raise error.RepoError(b"future is already set")
82 self.value = value
83
84
85 def encodebatchcmds(req):
71 def encodebatchcmds(req):
86 """Return a ``cmds`` argument value for the ``batch`` command."""
72 """Return a ``cmds`` argument value for the ``batch`` command."""
87 escapearg = wireprototypes.escapebatcharg
73 escapearg = wireprototypes.escapebatcharg
@@ -248,25 +234,18 b' class peerexecutor(object):'
248 continue
234 continue
249
235
250 try:
236 try:
251 batchable = fn.batchable(
237 encoded_args_or_res, decode = fn.batchable(
252 fn.__self__, **pycompat.strkwargs(args)
238 fn.__self__, **pycompat.strkwargs(args)
253 )
239 )
254 except Exception:
240 except Exception:
255 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
241 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
256 return
242 return
257
243
258 # Encoded arguments and future holding remote result.
244 if not decode:
259 try:
260 encoded_args_or_res, fremote = next(batchable)
261 except Exception:
262 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
263 return
264
265 if not fremote:
266 f.set_result(encoded_args_or_res)
245 f.set_result(encoded_args_or_res)
267 else:
246 else:
268 requests.append((command, encoded_args_or_res))
247 requests.append((command, encoded_args_or_res))
269 states.append((command, f, batchable, fremote))
248 states.append((command, f, batchable, decode))
270
249
271 if not requests:
250 if not requests:
272 return
251 return
@@ -319,7 +298,7 b' class peerexecutor(object):'
319 def _readbatchresponse(self, states, wireresults):
298 def _readbatchresponse(self, states, wireresults):
320 # Executes in a thread to read data off the wire.
299 # Executes in a thread to read data off the wire.
321
300
322 for command, f, batchable, fremote in states:
301 for command, f, batchable, decode in states:
323 # Grab raw result off the wire and teach the internal future
302 # Grab raw result off the wire and teach the internal future
324 # about it.
303 # about it.
325 try:
304 try:
@@ -334,11 +313,8 b' class peerexecutor(object):'
334 )
313 )
335 )
314 )
336 else:
315 else:
337 fremote.set(remoteresult)
338
339 # And ask the coroutine to decode that value.
340 try:
316 try:
341 result = next(batchable)
317 result = decode(remoteresult)
342 except Exception:
318 except Exception:
343 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
319 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
344 else:
320 else:
@@ -369,87 +345,90 b' class wirepeer(repository.peer):'
369 @batchable
345 @batchable
370 def lookup(self, key):
346 def lookup(self, key):
371 self.requirecap(b'lookup', _(b'look up remote revision'))
347 self.requirecap(b'lookup', _(b'look up remote revision'))
372 f = future()
348
373 yield {b'key': encoding.fromlocal(key)}, f
349 def decode(d):
374 d = f.value
350 success, data = d[:-1].split(b" ", 1)
375 success, data = d[:-1].split(b" ", 1)
351 if int(success):
376 if int(success):
352 return bin(data)
377 yield bin(data)
353 else:
378 else:
354 self._abort(error.RepoError(data))
379 self._abort(error.RepoError(data))
355
356 return {b'key': encoding.fromlocal(key)}, decode
380
357
381 @batchable
358 @batchable
382 def heads(self):
359 def heads(self):
383 f = future()
360 def decode(d):
384 yield {}, f
361 try:
385 d = f.value
362 return wireprototypes.decodelist(d[:-1])
386 try:
363 except ValueError:
387 yield wireprototypes.decodelist(d[:-1])
364 self._abort(error.ResponseError(_(b"unexpected response:"), d))
388 except ValueError:
365
389 self._abort(error.ResponseError(_(b"unexpected response:"), d))
366 return {}, decode
390
367
391 @batchable
368 @batchable
392 def known(self, nodes):
369 def known(self, nodes):
393 f = future()
370 def decode(d):
394 yield {b'nodes': wireprototypes.encodelist(nodes)}, f
371 try:
395 d = f.value
372 return [bool(int(b)) for b in pycompat.iterbytestr(d)]
396 try:
373 except ValueError:
397 yield [bool(int(b)) for b in pycompat.iterbytestr(d)]
374 self._abort(error.ResponseError(_(b"unexpected response:"), d))
398 except ValueError:
375
399 self._abort(error.ResponseError(_(b"unexpected response:"), d))
376 return {b'nodes': wireprototypes.encodelist(nodes)}, decode
400
377
401 @batchable
378 @batchable
402 def branchmap(self):
379 def branchmap(self):
403 f = future()
380 def decode(d):
404 yield {}, f
381 try:
405 d = f.value
382 branchmap = {}
406 try:
383 for branchpart in d.splitlines():
407 branchmap = {}
384 branchname, branchheads = branchpart.split(b' ', 1)
408 for branchpart in d.splitlines():
385 branchname = encoding.tolocal(urlreq.unquote(branchname))
409 branchname, branchheads = branchpart.split(b' ', 1)
386 branchheads = wireprototypes.decodelist(branchheads)
410 branchname = encoding.tolocal(urlreq.unquote(branchname))
387 branchmap[branchname] = branchheads
411 branchheads = wireprototypes.decodelist(branchheads)
388 return branchmap
412 branchmap[branchname] = branchheads
389 except TypeError:
413 yield branchmap
390 self._abort(error.ResponseError(_(b"unexpected response:"), d))
414 except TypeError:
391
415 self._abort(error.ResponseError(_(b"unexpected response:"), d))
392 return {}, decode
416
393
417 @batchable
394 @batchable
418 def listkeys(self, namespace):
395 def listkeys(self, namespace):
419 if not self.capable(b'pushkey'):
396 if not self.capable(b'pushkey'):
420 yield {}, None
397 return {}, None
421 f = future()
422 self.ui.debug(b'preparing listkeys for "%s"\n' % namespace)
398 self.ui.debug(b'preparing listkeys for "%s"\n' % namespace)
423 yield {b'namespace': encoding.fromlocal(namespace)}, f
399
424 d = f.value
400 def decode(d):
425 self.ui.debug(
401 self.ui.debug(
426 b'received listkey for "%s": %i bytes\n' % (namespace, len(d))
402 b'received listkey for "%s": %i bytes\n' % (namespace, len(d))
427 )
403 )
428 yield pushkeymod.decodekeys(d)
404 return pushkeymod.decodekeys(d)
405
406 return {b'namespace': encoding.fromlocal(namespace)}, decode
429
407
430 @batchable
408 @batchable
431 def pushkey(self, namespace, key, old, new):
409 def pushkey(self, namespace, key, old, new):
432 if not self.capable(b'pushkey'):
410 if not self.capable(b'pushkey'):
433 yield False, None
411 return False, None
434 f = future()
435 self.ui.debug(b'preparing pushkey for "%s:%s"\n' % (namespace, key))
412 self.ui.debug(b'preparing pushkey for "%s:%s"\n' % (namespace, key))
436 yield {
413
414 def decode(d):
415 d, output = d.split(b'\n', 1)
416 try:
417 d = bool(int(d))
418 except ValueError:
419 raise error.ResponseError(
420 _(b'push failed (unexpected response):'), d
421 )
422 for l in output.splitlines(True):
423 self.ui.status(_(b'remote: '), l)
424 return d
425
426 return {
437 b'namespace': encoding.fromlocal(namespace),
427 b'namespace': encoding.fromlocal(namespace),
438 b'key': encoding.fromlocal(key),
428 b'key': encoding.fromlocal(key),
439 b'old': encoding.fromlocal(old),
429 b'old': encoding.fromlocal(old),
440 b'new': encoding.fromlocal(new),
430 b'new': encoding.fromlocal(new),
441 }, f
431 }, decode
442 d = f.value
443 d, output = d.split(b'\n', 1)
444 try:
445 d = bool(int(d))
446 except ValueError:
447 raise error.ResponseError(
448 _(b'push failed (unexpected response):'), d
449 )
450 for l in output.splitlines(True):
451 self.ui.status(_(b'remote: '), l)
452 yield d
453
432
454 def stream_out(self):
433 def stream_out(self):
455 return self._callstream(b'stream_out')
434 return self._callstream(b'stream_out')
@@ -1579,7 +1579,7 b' def rawstorefiledata(repo, proto, files,'
1579
1579
1580 # TODO this is a bunch of storage layer interface abstractions because
1580 # TODO this is a bunch of storage layer interface abstractions because
1581 # it assumes revlogs.
1581 # it assumes revlogs.
1582 for rl_type, name, encodedname, size in topfiles:
1582 for rl_type, name, size in topfiles:
1583 # XXX use the `rl_type` for that
1583 # XXX use the `rl_type` for that
1584 if b'changelog' in files and name.startswith(b'00changelog'):
1584 if b'changelog' in files and name.startswith(b'00changelog'):
1585 pass
1585 pass
@@ -1,26 +1,16 b''
1 == New Features ==
1 == New Features ==
2 * `debugrebuildfncache` now has an option to rebuild only the index files
3
2
4
3
5 == Default Format Change ==
4 == Default Format Change ==
6
5
7 These changes affects newly created repositories (or new clone) done with
6 These changes affects newly created repositories (or new clone) done with
8 Mercurial 6.0.
7 Mercurial XXX.
9
8
10
9
11 == New Experimental Features ==
10 == New Experimental Features ==
12
11
13 * Added a new `web.full-garbage-collection-rate` to control performance. See
14 de2e04fe4897a554b9ef433167f11ea4feb2e09c for more information
15
16 == Bug Fixes ==
12 == Bug Fixes ==
17
13
18 * `hg fix --working-dir` now correctly works when in an uncommitted merge state
19 * `rhg` (Rust fast-path for `hg`) now supports the full config list syntax
20 * `rhg` now parses some corner-cases for revsets correctly
21 * `hg email -o` now works again when not mentioning a revision
22 * Lots of Windows fixes
23 * Lots of miscellaneous other fixes
24
14
25 == Backwards Compatibility Changes ==
15 == Backwards Compatibility Changes ==
26
16
@@ -29,15 +19,4 b' Mercurial 6.0.'
29
19
30 The following functions have been removed:
20 The following functions have been removed:
31
21
32 * `dirstate.normal`
33 * `dirstate.normallookup`
34 * `dirstate.otherparent`
35 * `dirstate.add`
36 * `dirstate.remove`
37 * `dirstate.drop`
38 * `dirstate.__getitem__`
39
40 Miscellaneous:
22 Miscellaneous:
41
42 * `wireprotov1peer`'s `batchable` is now a simple function and not a generator
43 anymore No newline at end of file
@@ -157,9 +157,9 b' dependencies = ['
157
157
158 [[package]]
158 [[package]]
159 name = "cpython"
159 name = "cpython"
160 version = "0.6.0"
160 version = "0.7.0"
161 source = "registry+https://github.com/rust-lang/crates.io-index"
161 source = "registry+https://github.com/rust-lang/crates.io-index"
162 checksum = "8094679a4e9bfc8035572162624bc800eda35b5f9eff2537b9cd9aacc3d9782e"
162 checksum = "b7d46ba8ace7f3a1d204ac5060a706d0a68de6b42eafb6a586cc08bebcffe664"
163 dependencies = [
163 dependencies = [
164 "libc",
164 "libc",
165 "num-traits",
165 "num-traits",
@@ -374,6 +374,7 b' dependencies = ['
374 name = "hg-core"
374 name = "hg-core"
375 version = "0.1.0"
375 version = "0.1.0"
376 dependencies = [
376 dependencies = [
377 "bitflags",
377 "byteorder",
378 "byteorder",
378 "bytes-cast",
379 "bytes-cast",
379 "clap",
380 "clap",
@@ -385,8 +386,9 b' dependencies = ['
385 "im-rc",
386 "im-rc",
386 "itertools",
387 "itertools",
387 "lazy_static",
388 "lazy_static",
389 "libc",
388 "log",
390 "log",
389 "memmap",
391 "memmap2",
390 "micro-timer",
392 "micro-timer",
391 "pretty_assertions",
393 "pretty_assertions",
392 "rand",
394 "rand",
@@ -396,6 +398,7 b' dependencies = ['
396 "regex",
398 "regex",
397 "same-file",
399 "same-file",
398 "sha-1",
400 "sha-1",
401 "stable_deref_trait",
399 "tempfile",
402 "tempfile",
400 "twox-hash",
403 "twox-hash",
401 "zstd",
404 "zstd",
@@ -411,6 +414,7 b' dependencies = ['
411 "hg-core",
414 "hg-core",
412 "libc",
415 "libc",
413 "log",
416 "log",
417 "stable_deref_trait",
414 ]
418 ]
415
419
416 [[package]]
420 [[package]]
@@ -508,13 +512,13 b' source = "registry+https://github.com/ru'
508 checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525"
512 checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525"
509
513
510 [[package]]
514 [[package]]
511 name = "memmap"
515 name = "memmap2"
512 version = "0.7.0"
516 version = "0.4.0"
513 source = "registry+https://github.com/rust-lang/crates.io-index"
517 source = "registry+https://github.com/rust-lang/crates.io-index"
514 checksum = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b"
518 checksum = "de5d3112c080d58ce560081baeaab7e1e864ca21795ddbf533d5b1842bb1ecf8"
515 dependencies = [
519 dependencies = [
516 "libc",
520 "libc",
517 "winapi",
521 "stable_deref_trait",
518 ]
522 ]
519
523
520 [[package]]
524 [[package]]
@@ -649,9 +653,9 b' dependencies = ['
649
653
650 [[package]]
654 [[package]]
651 name = "python27-sys"
655 name = "python27-sys"
652 version = "0.6.0"
656 version = "0.7.0"
653 source = "registry+https://github.com/rust-lang/crates.io-index"
657 source = "registry+https://github.com/rust-lang/crates.io-index"
654 checksum = "5826ddbc5366eb0b0492040fdc25bf50bb49092c192bd45e80fb7a24dc6832ab"
658 checksum = "94670354e264300dde81a5864cbb6bfc9d56ac3dcf3a278c32cb52f816f4dfd1"
655 dependencies = [
659 dependencies = [
656 "libc",
660 "libc",
657 "regex",
661 "regex",
@@ -659,9 +663,9 b' dependencies = ['
659
663
660 [[package]]
664 [[package]]
661 name = "python3-sys"
665 name = "python3-sys"
662 version = "0.6.0"
666 version = "0.7.0"
663 source = "registry+https://github.com/rust-lang/crates.io-index"
667 source = "registry+https://github.com/rust-lang/crates.io-index"
664 checksum = "b78af21b29594951a47fc3dac9b9eff0a3f077dec2f780ee943ae16a668f3b6a"
668 checksum = "b18b32e64c103d5045f44644d7ddddd65336f7a0521f6fde673240a9ecceb77e"
665 dependencies = [
669 dependencies = [
666 "libc",
670 "libc",
667 "regex",
671 "regex",
@@ -865,6 +869,12 b' dependencies = ['
865 ]
869 ]
866
870
867 [[package]]
871 [[package]]
872 name = "stable_deref_trait"
873 version = "1.2.0"
874 source = "registry+https://github.com/rust-lang/crates.io-index"
875 checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3"
876
877 [[package]]
868 name = "static_assertions"
878 name = "static_assertions"
869 version = "1.1.0"
879 version = "1.1.0"
870 source = "registry+https://github.com/rust-lang/crates.io-index"
880 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -74,8 +74,8 b' Example usage:'
74 Developing Rust
74 Developing Rust
75 ===============
75 ===============
76
76
77 The current version of Rust in use is ``1.41.1``, because it's what Debian
77 The current version of Rust in use is ``1.48.0``, because it's what Debian
78 stable has. You can use ``rustup override set 1.41.1`` at the root of the repo
78 stable has. You can use ``rustup override set 1.48.0`` at the root of the repo
79 to make it easier on you.
79 to make it easier on you.
80
80
81 Go to the ``hg-cpython`` folder::
81 Go to the ``hg-cpython`` folder::
@@ -9,6 +9,7 b' edition = "2018"'
9 name = "hg"
9 name = "hg"
10
10
11 [dependencies]
11 [dependencies]
12 bitflags = "1.2"
12 bytes-cast = "0.2"
13 bytes-cast = "0.2"
13 byteorder = "1.3.4"
14 byteorder = "1.3.4"
14 derive_more = "0.99"
15 derive_more = "0.99"
@@ -16,6 +17,7 b' home = "0.5"'
16 im-rc = "15.0.*"
17 im-rc = "15.0.*"
17 itertools = "0.9"
18 itertools = "0.9"
18 lazy_static = "1.4.0"
19 lazy_static = "1.4.0"
20 libc = "0.2"
19 rand = "0.7.3"
21 rand = "0.7.3"
20 rand_pcg = "0.2.1"
22 rand_pcg = "0.2.1"
21 rand_distr = "0.2.2"
23 rand_distr = "0.2.2"
@@ -24,11 +26,12 b' regex = "1.3.9"'
24 sha-1 = "0.9.6"
26 sha-1 = "0.9.6"
25 twox-hash = "1.5.0"
27 twox-hash = "1.5.0"
26 same-file = "1.0.6"
28 same-file = "1.0.6"
29 stable_deref_trait = "1.2.0"
27 tempfile = "3.1.0"
30 tempfile = "3.1.0"
28 crossbeam-channel = "0.4"
31 crossbeam-channel = "0.4"
29 micro-timer = "0.3.0"
32 micro-timer = "0.3.0"
30 log = "0.4.8"
33 log = "0.4.8"
31 memmap = "0.7.0"
34 memmap2 = {version = "0.4", features = ["stable_deref_trait"]}
32 zstd = "0.5.3"
35 zstd = "0.5.3"
33 format-bytes = "0.2.2"
36 format-bytes = "0.2.2"
34
37
@@ -5,7 +5,7 b''
5
5
6 //! Minimal `RevlogIndex`, readable from standard Mercurial file format
6 //! Minimal `RevlogIndex`, readable from standard Mercurial file format
7 use hg::*;
7 use hg::*;
8 use memmap::*;
8 use memmap2::*;
9 use std::fs::File;
9 use std::fs::File;
10 use std::ops::Deref;
10 use std::ops::Deref;
11 use std::path::Path;
11 use std::path::Path;
@@ -7,7 +7,7 b' use clap::*;'
7 use hg::revlog::node::*;
7 use hg::revlog::node::*;
8 use hg::revlog::nodemap::*;
8 use hg::revlog::nodemap::*;
9 use hg::revlog::*;
9 use hg::revlog::*;
10 use memmap::MmapOptions;
10 use memmap2::MmapOptions;
11 use rand::Rng;
11 use rand::Rng;
12 use std::fs::File;
12 use std::fs::File;
13 use std::io;
13 use std::io;
@@ -13,7 +13,6 b' use crate::config::layer::{'
13 ConfigError, ConfigLayer, ConfigOrigin, ConfigValue,
13 ConfigError, ConfigLayer, ConfigOrigin, ConfigValue,
14 };
14 };
15 use crate::utils::files::get_bytes_from_os_str;
15 use crate::utils::files::get_bytes_from_os_str;
16 use crate::utils::SliceExt;
17 use format_bytes::{write_bytes, DisplayBytes};
16 use format_bytes::{write_bytes, DisplayBytes};
18 use std::collections::HashSet;
17 use std::collections::HashSet;
19 use std::env;
18 use std::env;
@@ -362,30 +361,14 b' impl Config {'
362 Ok(self.get_option(section, item)?.unwrap_or(false))
361 Ok(self.get_option(section, item)?.unwrap_or(false))
363 }
362 }
364
363
365 /// Returns the corresponding list-value in the config if found, or `None`.
364 /// If there is an `item` value in `section`, parse and return a list of
366 ///
365 /// byte strings.
367 /// This is appropriate for new configuration keys. The value syntax is
366 pub fn get_list(
368 /// **not** the same as most existing list-valued config, which has Python
369 /// parsing implemented in `parselist()` in
370 /// `mercurial/utils/stringutil.py`. Faithfully porting that parsing
371 /// algorithm to Rust (including behavior that are arguably bugs)
372 /// turned out to be non-trivial and hasn’t been completed as of this
373 /// writing.
374 ///
375 /// Instead, the "simple" syntax is: split on comma, then trim leading and
376 /// trailing whitespace of each component. Quotes or backslashes are not
377 /// interpreted in any way. Commas are mandatory between values. Values
378 /// that contain a comma are not supported.
379 pub fn get_simple_list(
380 &self,
367 &self,
381 section: &[u8],
368 section: &[u8],
382 item: &[u8],
369 item: &[u8],
383 ) -> Option<impl Iterator<Item = &[u8]>> {
370 ) -> Option<Vec<Vec<u8>>> {
384 self.get(section, item).map(|value| {
371 self.get(section, item).map(values::parse_list)
385 value
386 .split(|&byte| byte == b',')
387 .map(|component| component.trim())
388 })
389 }
372 }
390
373
391 /// Returns the raw value bytes of the first one found, or `None`.
374 /// Returns the raw value bytes of the first one found, or `None`.
@@ -8,6 +8,8 b''
8 //! details about where the value came from (but omits details of what’s
8 //! details about where the value came from (but omits details of what’s
9 //! invalid inside the value).
9 //! invalid inside the value).
10
10
11 use crate::utils::SliceExt;
12
11 pub(super) fn parse_bool(v: &[u8]) -> Option<bool> {
13 pub(super) fn parse_bool(v: &[u8]) -> Option<bool> {
12 match v.to_ascii_lowercase().as_slice() {
14 match v.to_ascii_lowercase().as_slice() {
13 b"1" | b"yes" | b"true" | b"on" | b"always" => Some(true),
15 b"1" | b"yes" | b"true" | b"on" | b"always" => Some(true),
@@ -42,6 +44,216 b' pub(super) fn parse_byte_size(value: &[u'
42 value.parse().ok()
44 value.parse().ok()
43 }
45 }
44
46
47 /// Parse a config value as a list of sub-values.
48 ///
49 /// Ported from `parselist` in `mercurial/utils/stringutil.py`
50
51 // Note: keep behavior in sync with the Python one.
52
53 // Note: this could return `Vec<Cow<[u8]>>` instead and borrow `input` when
54 // possible (when there’s no backslash-escapes) but this is probably not worth
55 // the complexity as config is presumably not accessed inside
56 // preformance-sensitive loops.
57 pub(super) fn parse_list(input: &[u8]) -> Vec<Vec<u8>> {
58 // Port of Python’s `value.lstrip(b' ,\n')`
59 // TODO: is this really what we want?
60 let input =
61 input.trim_start_matches(|b| b == b' ' || b == b',' || b == b'\n');
62 parse_list_without_trim_start(input)
63 }
64
65 fn parse_list_without_trim_start(input: &[u8]) -> Vec<Vec<u8>> {
66 // Start of port of Python’s `_configlist`
67 let input = input.trim_end_matches(|b| b == b' ' || b == b',');
68 if input.is_empty() {
69 return Vec::new();
70 }
71
72 // Just to make β€œa string” less confusable with β€œa list of strings”.
73 type ByteString = Vec<u8>;
74
75 // These correspond to Python’s…
76 let mut mode = ParserMode::Plain; // `parser`
77 let mut values = Vec::new(); // `parts[:-1]`
78 let mut next_value = ByteString::new(); // `parts[-1]`
79 let mut offset = 0; // `offset`
80
81 // Setting `parser` to `None` is instead handled by returning immediately
82 enum ParserMode {
83 Plain,
84 Quoted,
85 }
86
87 loop {
88 match mode {
89 ParserMode::Plain => {
90 // Start of port of Python’s `_parse_plain`
91 let mut whitespace = false;
92 while let Some(&byte) = input.get(offset) {
93 if is_space(byte) || byte == b',' {
94 whitespace = true;
95 offset += 1;
96 } else {
97 break;
98 }
99 }
100 if let Some(&byte) = input.get(offset) {
101 if whitespace {
102 values.push(std::mem::take(&mut next_value))
103 }
104 if byte == b'"' && next_value.is_empty() {
105 mode = ParserMode::Quoted;
106 } else {
107 if byte == b'"' && next_value.ends_with(b"\\") {
108 next_value.pop();
109 }
110 next_value.push(byte);
111 }
112 offset += 1;
113 } else {
114 values.push(next_value);
115 return values;
116 }
117 }
118 ParserMode::Quoted => {
119 // Start of port of Python’s `_parse_quote`
120 if let Some(&byte) = input.get(offset) {
121 if byte == b'"' {
122 // The input contains a quoted zero-length value `""`
123 debug_assert_eq!(next_value, b"");
124 values.push(std::mem::take(&mut next_value));
125 offset += 1;
126 while let Some(&byte) = input.get(offset) {
127 if is_space(byte) || byte == b',' {
128 offset += 1;
129 } else {
130 break;
131 }
132 }
133 mode = ParserMode::Plain;
134 continue;
135 }
136 }
137
138 while let Some(&byte) = input.get(offset) {
139 if byte == b'"' {
140 break;
141 }
142 if byte == b'\\' && input.get(offset + 1) == Some(&b'"') {
143 next_value.push(b'"');
144 offset += 2;
145 } else {
146 next_value.push(byte);
147 offset += 1;
148 }
149 }
150
151 if offset >= input.len() {
152 // We didn’t find a closing double-quote,
153 // so treat the opening one as part of an unquoted value
154 // instead of delimiting the start of a quoted value.
155
156 // `next_value` may have had some backslash-escapes
157 // unescaped. TODO: shouldn’t we use a slice of `input`
158 // instead?
159 let mut real_values =
160 parse_list_without_trim_start(&next_value);
161
162 if let Some(first) = real_values.first_mut() {
163 first.insert(0, b'"');
164 // Drop `next_value`
165 values.extend(real_values)
166 } else {
167 next_value.push(b'"');
168 values.push(next_value);
169 }
170 return values;
171 }
172
173 // We’re not at the end of the input, which means the `while`
174 // loop above ended at at double quote. Skip
175 // over that.
176 offset += 1;
177
178 while let Some(&byte) = input.get(offset) {
179 if byte == b' ' || byte == b',' {
180 offset += 1;
181 } else {
182 break;
183 }
184 }
185
186 if offset >= input.len() {
187 values.push(next_value);
188 return values;
189 }
190
191 if offset + 1 == input.len() && input[offset] == b'"' {
192 next_value.push(b'"');
193 offset += 1;
194 } else {
195 values.push(std::mem::take(&mut next_value));
196 }
197
198 mode = ParserMode::Plain;
199 }
200 }
201 }
202
203 // https://docs.python.org/3/library/stdtypes.html?#bytes.isspace
204 fn is_space(byte: u8) -> bool {
205 if let b' ' | b'\t' | b'\n' | b'\r' | b'\x0b' | b'\x0c' = byte {
206 true
207 } else {
208 false
209 }
210 }
211 }
212
213 #[test]
214 fn test_parse_list() {
215 // Make `assert_eq` error messages nicer
216 fn as_strings(values: &[Vec<u8>]) -> Vec<String> {
217 values
218 .iter()
219 .map(|v| std::str::from_utf8(v.as_ref()).unwrap().to_owned())
220 .collect()
221 }
222 macro_rules! assert_parse_list {
223 ( $input: expr => [ $( $output: expr ),* ] ) => {
224 assert_eq!(
225 as_strings(&parse_list($input)),
226 as_strings(&[ $( Vec::from(&$output[..]) ),* ]),
227 );
228 }
229 }
230
231 // Keep these Rust tests in sync with the Python ones in
232 // `tests/test-config-parselist.py`
233 assert_parse_list!(b"" => []);
234 assert_parse_list!(b"," => []);
235 assert_parse_list!(b"A" => [b"A"]);
236 assert_parse_list!(b"B,B" => [b"B", b"B"]);
237 assert_parse_list!(b", C, ,C," => [b"C", b"C"]);
238 assert_parse_list!(b"\"" => [b"\""]);
239 assert_parse_list!(b"\"\"" => [b"", b""]);
240 assert_parse_list!(b"D,\"" => [b"D", b"\""]);
241 assert_parse_list!(b"E,\"\"" => [b"E", b"", b""]);
242 assert_parse_list!(b"\"F,F\"" => [b"F,F"]);
243 assert_parse_list!(b"\"G,G" => [b"\"G", b"G"]);
244 assert_parse_list!(b"\"H \\\",\\\"H" => [b"\"H", b",", b"H"]);
245 assert_parse_list!(b"I,I\"" => [b"I", b"I\""]);
246 assert_parse_list!(b"J,\"J" => [b"J", b"\"J"]);
247 assert_parse_list!(b"K K" => [b"K", b"K"]);
248 assert_parse_list!(b"\"K\" K" => [b"K", b"K"]);
249 assert_parse_list!(b"L\tL" => [b"L", b"L"]);
250 assert_parse_list!(b"\"L\"\tL" => [b"L", b"", b"L"]);
251 assert_parse_list!(b"M\x0bM" => [b"M", b"M"]);
252 assert_parse_list!(b"\"M\"\x0bM" => [b"M", b"", b"M"]);
253 assert_parse_list!(b"\"N\" , ,\"" => [b"N\""]);
254 assert_parse_list!(b"\" ,O, " => [b"\"", b"O"]);
255 }
256
45 #[test]
257 #[test]
46 fn test_parse_byte_size() {
258 fn test_parse_byte_size() {
47 assert_eq!(parse_byte_size(b""), None);
259 assert_eq!(parse_byte_size(b""), None);
@@ -6,20 +6,19 b''
6 // GNU General Public License version 2 or any later version.
6 // GNU General Public License version 2 or any later version.
7
7
8 use crate::dirstate_tree::on_disk::DirstateV2ParseError;
8 use crate::dirstate_tree::on_disk::DirstateV2ParseError;
9 use crate::errors::HgError;
10 use crate::revlog::node::NULL_NODE;
9 use crate::revlog::node::NULL_NODE;
11 use crate::revlog::Node;
10 use crate::revlog::Node;
12 use crate::utils::hg_path::{HgPath, HgPathBuf};
11 use crate::utils::hg_path::HgPath;
13 use crate::FastHashMap;
12 use bytes_cast::BytesCast;
14 use bytes_cast::{unaligned, BytesCast};
15 use std::convert::TryFrom;
16
13
17 pub mod dirs_multiset;
14 pub mod dirs_multiset;
18 pub mod dirstate_map;
15 pub mod entry;
19 pub mod parsers;
16 pub mod parsers;
20 pub mod status;
17 pub mod status;
21
18
22 #[derive(Debug, PartialEq, Clone, BytesCast)]
19 pub use self::entry::*;
20
21 #[derive(Debug, PartialEq, Copy, Clone, BytesCast)]
23 #[repr(C)]
22 #[repr(C)]
24 pub struct DirstateParents {
23 pub struct DirstateParents {
25 pub p1: Node,
24 pub p1: Node,
@@ -33,69 +32,6 b' impl DirstateParents {'
33 };
32 };
34 }
33 }
35
34
36 /// The C implementation uses all signed types. This will be an issue
37 /// either when 4GB+ source files are commonplace or in 2038, whichever
38 /// comes first.
39 #[derive(Debug, PartialEq, Copy, Clone)]
40 pub struct DirstateEntry {
41 pub state: EntryState,
42 pub mode: i32,
43 pub mtime: i32,
44 pub size: i32,
45 }
46
47 impl DirstateEntry {
48 pub fn is_non_normal(&self) -> bool {
49 self.state != EntryState::Normal || self.mtime == MTIME_UNSET
50 }
51
52 pub fn is_from_other_parent(&self) -> bool {
53 self.state == EntryState::Normal && self.size == SIZE_FROM_OTHER_PARENT
54 }
55
56 // TODO: other platforms
57 #[cfg(unix)]
58 pub fn mode_changed(
59 &self,
60 filesystem_metadata: &std::fs::Metadata,
61 ) -> bool {
62 use std::os::unix::fs::MetadataExt;
63 const EXEC_BIT_MASK: u32 = 0o100;
64 let dirstate_exec_bit = (self.mode as u32) & EXEC_BIT_MASK;
65 let fs_exec_bit = filesystem_metadata.mode() & EXEC_BIT_MASK;
66 dirstate_exec_bit != fs_exec_bit
67 }
68
69 /// Returns a `(state, mode, size, mtime)` tuple as for
70 /// `DirstateMapMethods::debug_iter`.
71 pub fn debug_tuple(&self) -> (u8, i32, i32, i32) {
72 (self.state.into(), self.mode, self.size, self.mtime)
73 }
74 }
75
76 #[derive(BytesCast)]
77 #[repr(C)]
78 struct RawEntry {
79 state: u8,
80 mode: unaligned::I32Be,
81 size: unaligned::I32Be,
82 mtime: unaligned::I32Be,
83 length: unaligned::I32Be,
84 }
85
86 pub const V1_RANGEMASK: i32 = 0x7FFFFFFF;
87
88 pub const MTIME_UNSET: i32 = -1;
89
90 /// A `DirstateEntry` with a size of `-2` means that it was merged from the
91 /// other parent. This allows revert to pick the right status back during a
92 /// merge.
93 pub const SIZE_FROM_OTHER_PARENT: i32 = -2;
94 /// A special value used for internal representation of special case in
95 /// dirstate v1 format.
96 pub const SIZE_NON_NORMAL: i32 = -1;
97
98 pub type StateMap = FastHashMap<HgPathBuf, DirstateEntry>;
99 pub type StateMapIter<'a> = Box<
35 pub type StateMapIter<'a> = Box<
100 dyn Iterator<
36 dyn Iterator<
101 Item = Result<(&'a HgPath, DirstateEntry), DirstateV2ParseError>,
37 Item = Result<(&'a HgPath, DirstateEntry), DirstateV2ParseError>,
@@ -103,58 +39,8 b" pub type StateMapIter<'a> = Box<"
103 + 'a,
39 + 'a,
104 >;
40 >;
105
41
106 pub type CopyMap = FastHashMap<HgPathBuf, HgPathBuf>;
107 pub type CopyMapIter<'a> = Box<
42 pub type CopyMapIter<'a> = Box<
108 dyn Iterator<Item = Result<(&'a HgPath, &'a HgPath), DirstateV2ParseError>>
43 dyn Iterator<Item = Result<(&'a HgPath, &'a HgPath), DirstateV2ParseError>>
109 + Send
44 + Send
110 + 'a,
45 + 'a,
111 >;
46 >;
112
113 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
114 pub enum EntryState {
115 Normal,
116 Added,
117 Removed,
118 Merged,
119 Unknown,
120 }
121
122 impl EntryState {
123 pub fn is_tracked(self) -> bool {
124 use EntryState::*;
125 match self {
126 Normal | Added | Merged => true,
127 Removed | Unknown => false,
128 }
129 }
130 }
131
132 impl TryFrom<u8> for EntryState {
133 type Error = HgError;
134
135 fn try_from(value: u8) -> Result<Self, Self::Error> {
136 match value {
137 b'n' => Ok(EntryState::Normal),
138 b'a' => Ok(EntryState::Added),
139 b'r' => Ok(EntryState::Removed),
140 b'm' => Ok(EntryState::Merged),
141 b'?' => Ok(EntryState::Unknown),
142 _ => Err(HgError::CorruptedRepository(format!(
143 "Incorrect dirstate entry state {}",
144 value
145 ))),
146 }
147 }
148 }
149
150 impl Into<u8> for EntryState {
151 fn into(self) -> u8 {
152 match self {
153 EntryState::Normal => b'n',
154 EntryState::Added => b'a',
155 EntryState::Removed => b'r',
156 EntryState::Merged => b'm',
157 EntryState::Unknown => b'?',
158 }
159 }
160 }
@@ -33,7 +33,7 b' impl DirsMultiset {'
33 /// If `skip_state` is provided, skips dirstate entries with equal state.
33 /// If `skip_state` is provided, skips dirstate entries with equal state.
34 pub fn from_dirstate<I, P>(
34 pub fn from_dirstate<I, P>(
35 dirstate: I,
35 dirstate: I,
36 skip_state: Option<EntryState>,
36 only_tracked: bool,
37 ) -> Result<Self, DirstateError>
37 ) -> Result<Self, DirstateError>
38 where
38 where
39 I: IntoIterator<
39 I: IntoIterator<
@@ -48,8 +48,8 b' impl DirsMultiset {'
48 let (filename, entry) = item?;
48 let (filename, entry) = item?;
49 let filename = filename.as_ref();
49 let filename = filename.as_ref();
50 // This `if` is optimized out of the loop
50 // This `if` is optimized out of the loop
51 if let Some(skip) = skip_state {
51 if only_tracked {
52 if skip != entry.state {
52 if entry.state() != EntryState::Removed {
53 multiset.add_path(filename)?;
53 multiset.add_path(filename)?;
54 }
54 }
55 } else {
55 } else {
@@ -216,7 +216,6 b" impl<'a> DirsChildrenMultiset<'a> {"
216 #[cfg(test)]
216 #[cfg(test)]
217 mod tests {
217 mod tests {
218 use super::*;
218 use super::*;
219 use crate::StateMap;
220
219
221 #[test]
220 #[test]
222 fn test_delete_path_path_not_found() {
221 fn test_delete_path_path_not_found() {
@@ -341,9 +340,9 b' mod tests {'
341 };
340 };
342 assert_eq!(expected, new);
341 assert_eq!(expected, new);
343
342
344 let new = DirsMultiset::from_dirstate(
343 let new = DirsMultiset::from_dirstate::<_, HgPathBuf>(
345 StateMap::default().into_iter().map(Ok),
344 std::iter::empty(),
346 None,
345 false,
347 )
346 )
348 .unwrap();
347 .unwrap();
349 let expected = DirsMultiset {
348 let expected = DirsMultiset {
@@ -372,12 +371,7 b' mod tests {'
372 let input_map = ["b/x", "a/c", "a/d/x"].iter().map(|f| {
371 let input_map = ["b/x", "a/c", "a/d/x"].iter().map(|f| {
373 Ok((
372 Ok((
374 HgPathBuf::from_bytes(f.as_bytes()),
373 HgPathBuf::from_bytes(f.as_bytes()),
375 DirstateEntry {
374 DirstateEntry::from_v1_data(EntryState::Normal, 0, 0, 0),
376 state: EntryState::Normal,
377 mode: 0,
378 mtime: 0,
379 size: 0,
380 },
381 ))
375 ))
382 });
376 });
383 let expected_inner = [("", 2), ("a", 2), ("b", 1), ("a/d", 1)]
377 let expected_inner = [("", 2), ("a", 2), ("b", 1), ("a/d", 1)]
@@ -385,7 +379,7 b' mod tests {'
385 .map(|(k, v)| (HgPathBuf::from_bytes(k.as_bytes()), *v))
379 .map(|(k, v)| (HgPathBuf::from_bytes(k.as_bytes()), *v))
386 .collect();
380 .collect();
387
381
388 let new = DirsMultiset::from_dirstate(input_map, None).unwrap();
382 let new = DirsMultiset::from_dirstate(input_map, false).unwrap();
389 let expected = DirsMultiset {
383 let expected = DirsMultiset {
390 inner: expected_inner,
384 inner: expected_inner,
391 };
385 };
@@ -404,24 +398,17 b' mod tests {'
404 .map(|(f, state)| {
398 .map(|(f, state)| {
405 Ok((
399 Ok((
406 HgPathBuf::from_bytes(f.as_bytes()),
400 HgPathBuf::from_bytes(f.as_bytes()),
407 DirstateEntry {
401 DirstateEntry::from_v1_data(*state, 0, 0, 0),
408 state: *state,
409 mode: 0,
410 mtime: 0,
411 size: 0,
412 },
413 ))
402 ))
414 });
403 });
415
404
416 // "a" incremented with "a/c" and "a/d/"
405 // "a" incremented with "a/c" and "a/d/"
417 let expected_inner = [("", 1), ("a", 2)]
406 let expected_inner = [("", 1), ("a", 3)]
418 .iter()
407 .iter()
419 .map(|(k, v)| (HgPathBuf::from_bytes(k.as_bytes()), *v))
408 .map(|(k, v)| (HgPathBuf::from_bytes(k.as_bytes()), *v))
420 .collect();
409 .collect();
421
410
422 let new =
411 let new = DirsMultiset::from_dirstate(input_map, true).unwrap();
423 DirsMultiset::from_dirstate(input_map, Some(EntryState::Normal))
424 .unwrap();
425 let expected = DirsMultiset {
412 let expected = DirsMultiset {
426 inner: expected_inner,
413 inner: expected_inner,
427 };
414 };
@@ -5,14 +5,11 b''
5
5
6 use crate::errors::HgError;
6 use crate::errors::HgError;
7 use crate::utils::hg_path::HgPath;
7 use crate::utils::hg_path::HgPath;
8 use crate::{
8 use crate::{dirstate::EntryState, DirstateEntry, DirstateParents};
9 dirstate::{CopyMap, EntryState, RawEntry, StateMap},
10 DirstateEntry, DirstateParents,
11 };
12 use byteorder::{BigEndian, WriteBytesExt};
9 use byteorder::{BigEndian, WriteBytesExt};
13 use bytes_cast::BytesCast;
10 use bytes_cast::{unaligned, BytesCast};
14 use micro_timer::timed;
11 use micro_timer::timed;
15 use std::convert::{TryFrom, TryInto};
12 use std::convert::TryFrom;
16
13
17 /// Parents are stored in the dirstate as byte hashes.
14 /// Parents are stored in the dirstate as byte hashes.
18 pub const PARENT_SIZE: usize = 20;
15 pub const PARENT_SIZE: usize = 20;
@@ -48,6 +45,16 b' pub fn parse_dirstate(contents: &[u8]) -'
48 Ok((parents, entries, copies))
45 Ok((parents, entries, copies))
49 }
46 }
50
47
48 #[derive(BytesCast)]
49 #[repr(C)]
50 struct RawEntry {
51 state: u8,
52 mode: unaligned::I32Be,
53 size: unaligned::I32Be,
54 mtime: unaligned::I32Be,
55 length: unaligned::I32Be,
56 }
57
51 pub fn parse_dirstate_entries<'a>(
58 pub fn parse_dirstate_entries<'a>(
52 mut contents: &'a [u8],
59 mut contents: &'a [u8],
53 mut each_entry: impl FnMut(
60 mut each_entry: impl FnMut(
@@ -63,12 +70,12 b" pub fn parse_dirstate_entries<'a>("
63 let (raw_entry, rest) = RawEntry::from_bytes(contents)
70 let (raw_entry, rest) = RawEntry::from_bytes(contents)
64 .map_err(|_| HgError::corrupted("Overflow in dirstate."))?;
71 .map_err(|_| HgError::corrupted("Overflow in dirstate."))?;
65
72
66 let entry = DirstateEntry {
73 let entry = DirstateEntry::from_v1_data(
67 state: EntryState::try_from(raw_entry.state)?,
74 EntryState::try_from(raw_entry.state)?,
68 mode: raw_entry.mode.get(),
75 raw_entry.mode.get(),
69 mtime: raw_entry.mtime.get(),
76 raw_entry.size.get(),
70 size: raw_entry.size.get(),
77 raw_entry.mtime.get(),
71 };
78 );
72 let (paths, rest) =
79 let (paths, rest) =
73 u8::slice_from_bytes(rest, raw_entry.length.get() as usize)
80 u8::slice_from_bytes(rest, raw_entry.length.get() as usize)
74 .map_err(|_| HgError::corrupted("Overflow in dirstate."))?;
81 .map_err(|_| HgError::corrupted("Overflow in dirstate."))?;
@@ -114,12 +121,13 b' pub fn pack_entry('
114 packed: &mut Vec<u8>,
121 packed: &mut Vec<u8>,
115 ) {
122 ) {
116 let length = packed_filename_and_copy_source_size(filename, copy_source);
123 let length = packed_filename_and_copy_source_size(filename, copy_source);
124 let (state, mode, size, mtime) = entry.v1_data();
117
125
118 // Unwrapping because `impl std::io::Write for Vec<u8>` never errors
126 // Unwrapping because `impl std::io::Write for Vec<u8>` never errors
119 packed.write_u8(entry.state.into()).unwrap();
127 packed.write_u8(state).unwrap();
120 packed.write_i32::<BigEndian>(entry.mode).unwrap();
128 packed.write_i32::<BigEndian>(mode).unwrap();
121 packed.write_i32::<BigEndian>(entry.size).unwrap();
129 packed.write_i32::<BigEndian>(size).unwrap();
122 packed.write_i32::<BigEndian>(entry.mtime).unwrap();
130 packed.write_i32::<BigEndian>(mtime).unwrap();
123 packed.write_i32::<BigEndian>(length as i32).unwrap();
131 packed.write_i32::<BigEndian>(length as i32).unwrap();
124 packed.extend(filename.as_bytes());
132 packed.extend(filename.as_bytes());
125 if let Some(source) = copy_source {
133 if let Some(source) = copy_source {
@@ -127,363 +135,3 b' pub fn pack_entry('
127 packed.extend(source.as_bytes());
135 packed.extend(source.as_bytes());
128 }
136 }
129 }
137 }
130
131 /// Seconds since the Unix epoch
132 pub struct Timestamp(pub i64);
133
134 impl DirstateEntry {
135 pub fn mtime_is_ambiguous(&self, now: i32) -> bool {
136 self.state == EntryState::Normal && self.mtime == now
137 }
138
139 pub fn clear_ambiguous_mtime(&mut self, now: i32) -> bool {
140 let ambiguous = self.mtime_is_ambiguous(now);
141 if ambiguous {
142 // The file was last modified "simultaneously" with the current
143 // write to dirstate (i.e. within the same second for file-
144 // systems with a granularity of 1 sec). This commonly happens
145 // for at least a couple of files on 'update'.
146 // The user could change the file without changing its size
147 // within the same second. Invalidate the file's mtime in
148 // dirstate, forcing future 'status' calls to compare the
149 // contents of the file if the size is the same. This prevents
150 // mistakenly treating such files as clean.
151 self.clear_mtime()
152 }
153 ambiguous
154 }
155
156 pub fn clear_mtime(&mut self) {
157 self.mtime = -1;
158 }
159 }
160
161 pub fn pack_dirstate(
162 state_map: &mut StateMap,
163 copy_map: &CopyMap,
164 parents: DirstateParents,
165 now: Timestamp,
166 ) -> Result<Vec<u8>, HgError> {
167 // TODO move away from i32 before 2038.
168 let now: i32 = now.0.try_into().expect("time overflow");
169
170 let expected_size: usize = state_map
171 .iter()
172 .map(|(filename, _)| {
173 packed_entry_size(filename, copy_map.get(filename).map(|p| &**p))
174 })
175 .sum();
176 let expected_size = expected_size + PARENT_SIZE * 2;
177
178 let mut packed = Vec::with_capacity(expected_size);
179
180 packed.extend(parents.p1.as_bytes());
181 packed.extend(parents.p2.as_bytes());
182
183 for (filename, entry) in state_map.iter_mut() {
184 entry.clear_ambiguous_mtime(now);
185 pack_entry(
186 filename,
187 entry,
188 copy_map.get(filename).map(|p| &**p),
189 &mut packed,
190 )
191 }
192
193 if packed.len() != expected_size {
194 return Err(HgError::CorruptedRepository(format!(
195 "bad dirstate size: {} != {}",
196 expected_size,
197 packed.len()
198 )));
199 }
200
201 Ok(packed)
202 }
203
204 #[cfg(test)]
205 mod tests {
206 use super::*;
207 use crate::{utils::hg_path::HgPathBuf, FastHashMap};
208 use pretty_assertions::assert_eq;
209
210 #[test]
211 fn test_pack_dirstate_empty() {
212 let mut state_map = StateMap::default();
213 let copymap = FastHashMap::default();
214 let parents = DirstateParents {
215 p1: b"12345678910111213141".into(),
216 p2: b"00000000000000000000".into(),
217 };
218 let now = Timestamp(15000000);
219 let expected = b"1234567891011121314100000000000000000000".to_vec();
220
221 assert_eq!(
222 expected,
223 pack_dirstate(&mut state_map, &copymap, parents, now).unwrap()
224 );
225
226 assert!(state_map.is_empty())
227 }
228 #[test]
229 fn test_pack_dirstate_one_entry() {
230 let expected_state_map: StateMap = [(
231 HgPathBuf::from_bytes(b"f1"),
232 DirstateEntry {
233 state: EntryState::Normal,
234 mode: 0o644,
235 size: 0,
236 mtime: 791231220,
237 },
238 )]
239 .iter()
240 .cloned()
241 .collect();
242 let mut state_map = expected_state_map.clone();
243
244 let copymap = FastHashMap::default();
245 let parents = DirstateParents {
246 p1: b"12345678910111213141".into(),
247 p2: b"00000000000000000000".into(),
248 };
249 let now = Timestamp(15000000);
250 let expected = [
251 49, 50, 51, 52, 53, 54, 55, 56, 57, 49, 48, 49, 49, 49, 50, 49,
252 51, 49, 52, 49, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
253 48, 48, 48, 48, 48, 48, 48, 48, 110, 0, 0, 1, 164, 0, 0, 0, 0, 47,
254 41, 58, 244, 0, 0, 0, 2, 102, 49,
255 ]
256 .to_vec();
257
258 assert_eq!(
259 expected,
260 pack_dirstate(&mut state_map, &copymap, parents, now).unwrap()
261 );
262
263 assert_eq!(expected_state_map, state_map);
264 }
265 #[test]
266 fn test_pack_dirstate_one_entry_with_copy() {
267 let expected_state_map: StateMap = [(
268 HgPathBuf::from_bytes(b"f1"),
269 DirstateEntry {
270 state: EntryState::Normal,
271 mode: 0o644,
272 size: 0,
273 mtime: 791231220,
274 },
275 )]
276 .iter()
277 .cloned()
278 .collect();
279 let mut state_map = expected_state_map.clone();
280 let mut copymap = FastHashMap::default();
281 copymap.insert(
282 HgPathBuf::from_bytes(b"f1"),
283 HgPathBuf::from_bytes(b"copyname"),
284 );
285 let parents = DirstateParents {
286 p1: b"12345678910111213141".into(),
287 p2: b"00000000000000000000".into(),
288 };
289 let now = Timestamp(15000000);
290 let expected = [
291 49, 50, 51, 52, 53, 54, 55, 56, 57, 49, 48, 49, 49, 49, 50, 49,
292 51, 49, 52, 49, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
293 48, 48, 48, 48, 48, 48, 48, 48, 110, 0, 0, 1, 164, 0, 0, 0, 0, 47,
294 41, 58, 244, 0, 0, 0, 11, 102, 49, 0, 99, 111, 112, 121, 110, 97,
295 109, 101,
296 ]
297 .to_vec();
298
299 assert_eq!(
300 expected,
301 pack_dirstate(&mut state_map, &copymap, parents, now).unwrap()
302 );
303 assert_eq!(expected_state_map, state_map);
304 }
305
306 #[test]
307 fn test_parse_pack_one_entry_with_copy() {
308 let mut state_map: StateMap = [(
309 HgPathBuf::from_bytes(b"f1"),
310 DirstateEntry {
311 state: EntryState::Normal,
312 mode: 0o644,
313 size: 0,
314 mtime: 791231220,
315 },
316 )]
317 .iter()
318 .cloned()
319 .collect();
320 let mut copymap = FastHashMap::default();
321 copymap.insert(
322 HgPathBuf::from_bytes(b"f1"),
323 HgPathBuf::from_bytes(b"copyname"),
324 );
325 let parents = DirstateParents {
326 p1: b"12345678910111213141".into(),
327 p2: b"00000000000000000000".into(),
328 };
329 let now = Timestamp(15000000);
330 let result =
331 pack_dirstate(&mut state_map, &copymap, parents.clone(), now)
332 .unwrap();
333
334 let (new_parents, entries, copies) =
335 parse_dirstate(result.as_slice()).unwrap();
336 let new_state_map: StateMap = entries
337 .into_iter()
338 .map(|(path, entry)| (path.to_owned(), entry))
339 .collect();
340 let new_copy_map: CopyMap = copies
341 .into_iter()
342 .map(|(path, copy)| (path.to_owned(), copy.to_owned()))
343 .collect();
344
345 assert_eq!(
346 (&parents, state_map, copymap),
347 (new_parents, new_state_map, new_copy_map)
348 )
349 }
350
351 #[test]
352 fn test_parse_pack_multiple_entries_with_copy() {
353 let mut state_map: StateMap = [
354 (
355 HgPathBuf::from_bytes(b"f1"),
356 DirstateEntry {
357 state: EntryState::Normal,
358 mode: 0o644,
359 size: 0,
360 mtime: 791231220,
361 },
362 ),
363 (
364 HgPathBuf::from_bytes(b"f2"),
365 DirstateEntry {
366 state: EntryState::Merged,
367 mode: 0o777,
368 size: 1000,
369 mtime: 791231220,
370 },
371 ),
372 (
373 HgPathBuf::from_bytes(b"f3"),
374 DirstateEntry {
375 state: EntryState::Removed,
376 mode: 0o644,
377 size: 234553,
378 mtime: 791231220,
379 },
380 ),
381 (
382 HgPathBuf::from_bytes(b"f4\xF6"),
383 DirstateEntry {
384 state: EntryState::Added,
385 mode: 0o644,
386 size: -1,
387 mtime: -1,
388 },
389 ),
390 ]
391 .iter()
392 .cloned()
393 .collect();
394 let mut copymap = FastHashMap::default();
395 copymap.insert(
396 HgPathBuf::from_bytes(b"f1"),
397 HgPathBuf::from_bytes(b"copyname"),
398 );
399 copymap.insert(
400 HgPathBuf::from_bytes(b"f4\xF6"),
401 HgPathBuf::from_bytes(b"copyname2"),
402 );
403 let parents = DirstateParents {
404 p1: b"12345678910111213141".into(),
405 p2: b"00000000000000000000".into(),
406 };
407 let now = Timestamp(15000000);
408 let result =
409 pack_dirstate(&mut state_map, &copymap, parents.clone(), now)
410 .unwrap();
411
412 let (new_parents, entries, copies) =
413 parse_dirstate(result.as_slice()).unwrap();
414 let new_state_map: StateMap = entries
415 .into_iter()
416 .map(|(path, entry)| (path.to_owned(), entry))
417 .collect();
418 let new_copy_map: CopyMap = copies
419 .into_iter()
420 .map(|(path, copy)| (path.to_owned(), copy.to_owned()))
421 .collect();
422
423 assert_eq!(
424 (&parents, state_map, copymap),
425 (new_parents, new_state_map, new_copy_map)
426 )
427 }
428
429 #[test]
430 /// https://www.mercurial-scm.org/repo/hg/rev/af3f26b6bba4
431 fn test_parse_pack_one_entry_with_copy_and_time_conflict() {
432 let mut state_map: StateMap = [(
433 HgPathBuf::from_bytes(b"f1"),
434 DirstateEntry {
435 state: EntryState::Normal,
436 mode: 0o644,
437 size: 0,
438 mtime: 15000000,
439 },
440 )]
441 .iter()
442 .cloned()
443 .collect();
444 let mut copymap = FastHashMap::default();
445 copymap.insert(
446 HgPathBuf::from_bytes(b"f1"),
447 HgPathBuf::from_bytes(b"copyname"),
448 );
449 let parents = DirstateParents {
450 p1: b"12345678910111213141".into(),
451 p2: b"00000000000000000000".into(),
452 };
453 let now = Timestamp(15000000);
454 let result =
455 pack_dirstate(&mut state_map, &copymap, parents.clone(), now)
456 .unwrap();
457
458 let (new_parents, entries, copies) =
459 parse_dirstate(result.as_slice()).unwrap();
460 let new_state_map: StateMap = entries
461 .into_iter()
462 .map(|(path, entry)| (path.to_owned(), entry))
463 .collect();
464 let new_copy_map: CopyMap = copies
465 .into_iter()
466 .map(|(path, copy)| (path.to_owned(), copy.to_owned()))
467 .collect();
468
469 assert_eq!(
470 (
471 &parents,
472 [(
473 HgPathBuf::from_bytes(b"f1"),
474 DirstateEntry {
475 state: EntryState::Normal,
476 mode: 0o644,
477 size: 0,
478 mtime: -1
479 }
480 )]
481 .iter()
482 .cloned()
483 .collect::<StateMap>(),
484 copymap,
485 ),
486 (new_parents, new_state_map, new_copy_map)
487 )
488 }
489 }
This diff has been collapsed as it changes many lines, (812 lines changed) Show them Hide them
@@ -10,33 +10,14 b''
10 //! and will only be triggered in narrow cases.
10 //! and will only be triggered in narrow cases.
11
11
12 use crate::dirstate_tree::on_disk::DirstateV2ParseError;
12 use crate::dirstate_tree::on_disk::DirstateV2ParseError;
13 use crate::utils::path_auditor::PathAuditor;
13
14 use crate::{
14 use crate::{
15 dirstate::SIZE_FROM_OTHER_PARENT,
15 dirstate::TruncatedTimestamp,
16 filepatterns::PatternFileWarning,
16 utils::hg_path::{HgPath, HgPathError},
17 matchers::{get_ignore_function, Matcher, VisitChildrenSet},
18 utils::{
19 files::{find_dirs, HgMetadata},
20 hg_path::{
21 hg_path_to_path_buf, os_string_to_hg_path_buf, HgPath, HgPathBuf,
22 HgPathError,
23 },
24 },
25 CopyMap, DirstateEntry, DirstateMap, EntryState, FastHashMap,
26 PatternError,
17 PatternError,
27 };
18 };
28 use lazy_static::lazy_static;
19
29 use micro_timer::timed;
20 use std::{borrow::Cow, fmt};
30 use rayon::prelude::*;
31 use std::{
32 borrow::Cow,
33 collections::HashSet,
34 fmt,
35 fs::{read_dir, DirEntry},
36 io::ErrorKind,
37 ops::Deref,
38 path::{Path, PathBuf},
39 };
40
21
41 /// Wrong type of file from a `BadMatch`
22 /// Wrong type of file from a `BadMatch`
42 /// Note: a lot of those don't exist on all platforms.
23 /// Note: a lot of those don't exist on all platforms.
@@ -70,32 +51,6 b' pub enum BadMatch {'
70 BadType(BadType),
51 BadType(BadType),
71 }
52 }
72
53
73 /// Enum used to dispatch new status entries into the right collections.
74 /// Is similar to `crate::EntryState`, but represents the transient state of
75 /// entries during the lifetime of a command.
76 #[derive(Debug, Copy, Clone)]
77 pub enum Dispatch {
78 Unsure,
79 Modified,
80 Added,
81 Removed,
82 Deleted,
83 Clean,
84 Unknown,
85 Ignored,
86 /// Empty dispatch, the file is not worth listing
87 None,
88 /// Was explicitly matched but cannot be found/accessed
89 Bad(BadMatch),
90 Directory {
91 /// True if the directory used to be a file in the dmap so we can say
92 /// that it's been removed.
93 was_file: bool,
94 },
95 }
96
97 type IoResult<T> = std::io::Result<T>;
98
99 /// `Box<dyn Trait>` is syntactic sugar for `Box<dyn Trait + 'static>`, so add
54 /// `Box<dyn Trait>` is syntactic sugar for `Box<dyn Trait + 'static>`, so add
100 /// an explicit lifetime here to not fight `'static` bounds "out of nowhere".
55 /// an explicit lifetime here to not fight `'static` bounds "out of nowhere".
101 pub type IgnoreFnType<'a> =
56 pub type IgnoreFnType<'a> =
@@ -105,147 +60,12 b" pub type IgnoreFnType<'a> ="
105 /// the dirstate/explicit) paths, this comes up a lot.
60 /// the dirstate/explicit) paths, this comes up a lot.
106 pub type HgPathCow<'a> = Cow<'a, HgPath>;
61 pub type HgPathCow<'a> = Cow<'a, HgPath>;
107
62
108 /// A path with its computed ``Dispatch`` information
109 type DispatchedPath<'a> = (HgPathCow<'a>, Dispatch);
110
111 /// The conversion from `HgPath` to a real fs path failed.
112 /// `22` is the error code for "Invalid argument"
113 const INVALID_PATH_DISPATCH: Dispatch = Dispatch::Bad(BadMatch::OsError(22));
114
115 /// Dates and times that are outside the 31-bit signed range are compared
116 /// modulo 2^31. This should prevent hg from behaving badly with very large
117 /// files or corrupt dates while still having a high probability of detecting
118 /// changes. (issue2608)
119 /// TODO I haven't found a way of having `b` be `Into<i32>`, since `From<u64>`
120 /// is not defined for `i32`, and there is no `As` trait. This forces the
121 /// caller to cast `b` as `i32`.
122 fn mod_compare(a: i32, b: i32) -> bool {
123 a & i32::max_value() != b & i32::max_value()
124 }
125
126 /// Return a sorted list containing information about the entries
127 /// in the directory.
128 ///
129 /// * `skip_dot_hg` - Return an empty vec if `path` contains a `.hg` directory
130 fn list_directory(
131 path: impl AsRef<Path>,
132 skip_dot_hg: bool,
133 ) -> std::io::Result<Vec<(HgPathBuf, DirEntry)>> {
134 let mut results = vec![];
135 let entries = read_dir(path.as_ref())?;
136
137 for entry in entries {
138 let entry = entry?;
139 let filename = os_string_to_hg_path_buf(entry.file_name())?;
140 let file_type = entry.file_type()?;
141 if skip_dot_hg && filename.as_bytes() == b".hg" && file_type.is_dir() {
142 return Ok(vec![]);
143 } else {
144 results.push((filename, entry))
145 }
146 }
147
148 results.sort_unstable_by_key(|e| e.0.clone());
149 Ok(results)
150 }
151
152 /// The file corresponding to the dirstate entry was found on the filesystem.
153 fn dispatch_found(
154 filename: impl AsRef<HgPath>,
155 entry: DirstateEntry,
156 metadata: HgMetadata,
157 copy_map: &CopyMap,
158 options: StatusOptions,
159 ) -> Dispatch {
160 let DirstateEntry {
161 state,
162 mode,
163 mtime,
164 size,
165 } = entry;
166
167 let HgMetadata {
168 st_mode,
169 st_size,
170 st_mtime,
171 ..
172 } = metadata;
173
174 match state {
175 EntryState::Normal => {
176 let size_changed = mod_compare(size, st_size as i32);
177 let mode_changed =
178 (mode ^ st_mode as i32) & 0o100 != 0o000 && options.check_exec;
179 let metadata_changed = size >= 0 && (size_changed || mode_changed);
180 let other_parent = size == SIZE_FROM_OTHER_PARENT;
181
182 if metadata_changed
183 || other_parent
184 || copy_map.contains_key(filename.as_ref())
185 {
186 if metadata.is_symlink() && size_changed {
187 // issue6456: Size returned may be longer due to encryption
188 // on EXT-4 fscrypt. TODO maybe only do it on EXT4?
189 Dispatch::Unsure
190 } else {
191 Dispatch::Modified
192 }
193 } else if mod_compare(mtime, st_mtime as i32)
194 || st_mtime == options.last_normal_time
195 {
196 // the file may have just been marked as normal and
197 // it may have changed in the same second without
198 // changing its size. This can happen if we quickly
199 // do multiple commits. Force lookup, so we don't
200 // miss such a racy file change.
201 Dispatch::Unsure
202 } else if options.list_clean {
203 Dispatch::Clean
204 } else {
205 Dispatch::None
206 }
207 }
208 EntryState::Merged => Dispatch::Modified,
209 EntryState::Added => Dispatch::Added,
210 EntryState::Removed => Dispatch::Removed,
211 EntryState::Unknown => Dispatch::Unknown,
212 }
213 }
214
215 /// The file corresponding to this Dirstate entry is missing.
216 fn dispatch_missing(state: EntryState) -> Dispatch {
217 match state {
218 // File was removed from the filesystem during commands
219 EntryState::Normal | EntryState::Merged | EntryState::Added => {
220 Dispatch::Deleted
221 }
222 // File was removed, everything is normal
223 EntryState::Removed => Dispatch::Removed,
224 // File is unknown to Mercurial, everything is normal
225 EntryState::Unknown => Dispatch::Unknown,
226 }
227 }
228
229 fn dispatch_os_error(e: &std::io::Error) -> Dispatch {
230 Dispatch::Bad(BadMatch::OsError(
231 e.raw_os_error().expect("expected real OS error"),
232 ))
233 }
234
235 lazy_static! {
236 static ref DEFAULT_WORK: HashSet<&'static HgPath> = {
237 let mut h = HashSet::new();
238 h.insert(HgPath::new(b""));
239 h
240 };
241 }
242
243 #[derive(Debug, Copy, Clone)]
63 #[derive(Debug, Copy, Clone)]
244 pub struct StatusOptions {
64 pub struct StatusOptions {
245 /// Remember the most recent modification timeslot for status, to make
65 /// Remember the most recent modification timeslot for status, to make
246 /// sure we won't miss future size-preserving file content modifications
66 /// sure we won't miss future size-preserving file content modifications
247 /// that happen within the same timeslot.
67 /// that happen within the same timeslot.
248 pub last_normal_time: i64,
68 pub last_normal_time: TruncatedTimestamp,
249 /// Whether we are on a filesystem with UNIX-like exec flags
69 /// Whether we are on a filesystem with UNIX-like exec flags
250 pub check_exec: bool,
70 pub check_exec: bool,
251 pub list_clean: bool,
71 pub list_clean: bool,
@@ -325,623 +145,3 b' impl fmt::Display for StatusError {'
325 }
145 }
326 }
146 }
327 }
147 }
328
329 /// Gives information about which files are changed in the working directory
330 /// and how, compared to the revision we're based on
331 pub struct Status<'a, M: ?Sized + Matcher + Sync> {
332 dmap: &'a DirstateMap,
333 pub(crate) matcher: &'a M,
334 root_dir: PathBuf,
335 pub(crate) options: StatusOptions,
336 ignore_fn: IgnoreFnType<'a>,
337 }
338
339 impl<'a, M> Status<'a, M>
340 where
341 M: ?Sized + Matcher + Sync,
342 {
343 pub fn new(
344 dmap: &'a DirstateMap,
345 matcher: &'a M,
346 root_dir: PathBuf,
347 ignore_files: Vec<PathBuf>,
348 options: StatusOptions,
349 ) -> StatusResult<(Self, Vec<PatternFileWarning>)> {
350 // Needs to outlive `dir_ignore_fn` since it's captured.
351
352 let (ignore_fn, warnings): (IgnoreFnType, _) =
353 if options.list_ignored || options.list_unknown {
354 get_ignore_function(ignore_files, &root_dir, &mut |_| {})?
355 } else {
356 (Box::new(|&_| true), vec![])
357 };
358
359 Ok((
360 Self {
361 dmap,
362 matcher,
363 root_dir,
364 options,
365 ignore_fn,
366 },
367 warnings,
368 ))
369 }
370
371 /// Is the path ignored?
372 pub fn is_ignored(&self, path: impl AsRef<HgPath>) -> bool {
373 (self.ignore_fn)(path.as_ref())
374 }
375
376 /// Is the path or one of its ancestors ignored?
377 pub fn dir_ignore(&self, dir: impl AsRef<HgPath>) -> bool {
378 // Only involve ignore mechanism if we're listing unknowns or ignored.
379 if self.options.list_ignored || self.options.list_unknown {
380 if self.is_ignored(&dir) {
381 true
382 } else {
383 for p in find_dirs(dir.as_ref()) {
384 if self.is_ignored(p) {
385 return true;
386 }
387 }
388 false
389 }
390 } else {
391 true
392 }
393 }
394
395 /// Get stat data about the files explicitly specified by the matcher.
396 /// Returns a tuple of the directories that need to be traversed and the
397 /// files with their corresponding `Dispatch`.
398 /// TODO subrepos
399 #[timed]
400 pub fn walk_explicit(
401 &self,
402 traversed_sender: crossbeam_channel::Sender<HgPathBuf>,
403 ) -> (Vec<DispatchedPath<'a>>, Vec<DispatchedPath<'a>>) {
404 self.matcher
405 .file_set()
406 .unwrap_or(&DEFAULT_WORK)
407 .par_iter()
408 .flat_map(|&filename| -> Option<_> {
409 // TODO normalization
410 let normalized = filename;
411
412 let buf = match hg_path_to_path_buf(normalized) {
413 Ok(x) => x,
414 Err(_) => {
415 return Some((
416 Cow::Borrowed(normalized),
417 INVALID_PATH_DISPATCH,
418 ))
419 }
420 };
421 let target = self.root_dir.join(buf);
422 let st = target.symlink_metadata();
423 let in_dmap = self.dmap.get(normalized);
424 match st {
425 Ok(meta) => {
426 let file_type = meta.file_type();
427 return if file_type.is_file() || file_type.is_symlink()
428 {
429 if let Some(entry) = in_dmap {
430 return Some((
431 Cow::Borrowed(normalized),
432 dispatch_found(
433 &normalized,
434 *entry,
435 HgMetadata::from_metadata(meta),
436 &self.dmap.copy_map,
437 self.options,
438 ),
439 ));
440 }
441 Some((
442 Cow::Borrowed(normalized),
443 Dispatch::Unknown,
444 ))
445 } else if file_type.is_dir() {
446 if self.options.collect_traversed_dirs {
447 traversed_sender
448 .send(normalized.to_owned())
449 .expect("receiver should outlive sender");
450 }
451 Some((
452 Cow::Borrowed(normalized),
453 Dispatch::Directory {
454 was_file: in_dmap.is_some(),
455 },
456 ))
457 } else {
458 Some((
459 Cow::Borrowed(normalized),
460 Dispatch::Bad(BadMatch::BadType(
461 // TODO do more than unknown
462 // Support for all `BadType` variant
463 // varies greatly between platforms.
464 // So far, no tests check the type and
465 // this should be good enough for most
466 // users.
467 BadType::Unknown,
468 )),
469 ))
470 };
471 }
472 Err(_) => {
473 if let Some(entry) = in_dmap {
474 return Some((
475 Cow::Borrowed(normalized),
476 dispatch_missing(entry.state),
477 ));
478 }
479 }
480 };
481 None
482 })
483 .partition(|(_, dispatch)| match dispatch {
484 Dispatch::Directory { .. } => true,
485 _ => false,
486 })
487 }
488
489 /// Walk the working directory recursively to look for changes compared to
490 /// the current `DirstateMap`.
491 ///
492 /// This takes a mutable reference to the results to account for the
493 /// `extend` in timings
494 #[timed]
495 pub fn traverse(
496 &self,
497 path: impl AsRef<HgPath>,
498 old_results: &FastHashMap<HgPathCow<'a>, Dispatch>,
499 results: &mut Vec<DispatchedPath<'a>>,
500 traversed_sender: crossbeam_channel::Sender<HgPathBuf>,
501 ) {
502 // The traversal is done in parallel, so use a channel to gather
503 // entries. `crossbeam_channel::Sender` is `Sync`, while `mpsc::Sender`
504 // is not.
505 let (files_transmitter, files_receiver) =
506 crossbeam_channel::unbounded();
507
508 self.traverse_dir(
509 &files_transmitter,
510 path,
511 &old_results,
512 traversed_sender,
513 );
514
515 // Disconnect the channel so the receiver stops waiting
516 drop(files_transmitter);
517
518 let new_results = files_receiver
519 .into_iter()
520 .par_bridge()
521 .map(|(f, d)| (Cow::Owned(f), d));
522
523 results.par_extend(new_results);
524 }
525
526 /// Dispatch a single entry (file, folder, symlink...) found during
527 /// `traverse`. If the entry is a folder that needs to be traversed, it
528 /// will be handled in a separate thread.
529 fn handle_traversed_entry<'b>(
530 &'a self,
531 scope: &rayon::Scope<'b>,
532 files_sender: &'b crossbeam_channel::Sender<(HgPathBuf, Dispatch)>,
533 old_results: &'a FastHashMap<Cow<HgPath>, Dispatch>,
534 filename: HgPathBuf,
535 dir_entry: DirEntry,
536 traversed_sender: crossbeam_channel::Sender<HgPathBuf>,
537 ) -> IoResult<()>
538 where
539 'a: 'b,
540 {
541 let file_type = dir_entry.file_type()?;
542 let entry_option = self.dmap.get(&filename);
543
544 if filename.as_bytes() == b".hg" {
545 // Could be a directory or a symlink
546 return Ok(());
547 }
548
549 if file_type.is_dir() {
550 self.handle_traversed_dir(
551 scope,
552 files_sender,
553 old_results,
554 entry_option,
555 filename,
556 traversed_sender,
557 );
558 } else if file_type.is_file() || file_type.is_symlink() {
559 if let Some(entry) = entry_option {
560 if self.matcher.matches_everything()
561 || self.matcher.matches(&filename)
562 {
563 let metadata = dir_entry.metadata()?;
564 files_sender
565 .send((
566 filename.to_owned(),
567 dispatch_found(
568 &filename,
569 *entry,
570 HgMetadata::from_metadata(metadata),
571 &self.dmap.copy_map,
572 self.options,
573 ),
574 ))
575 .unwrap();
576 }
577 } else if (self.matcher.matches_everything()
578 || self.matcher.matches(&filename))
579 && !self.is_ignored(&filename)
580 {
581 if (self.options.list_ignored
582 || self.matcher.exact_match(&filename))
583 && self.dir_ignore(&filename)
584 {
585 if self.options.list_ignored {
586 files_sender
587 .send((filename.to_owned(), Dispatch::Ignored))
588 .unwrap();
589 }
590 } else if self.options.list_unknown {
591 files_sender
592 .send((filename.to_owned(), Dispatch::Unknown))
593 .unwrap();
594 }
595 } else if self.is_ignored(&filename) && self.options.list_ignored {
596 if self.matcher.matches(&filename) {
597 files_sender
598 .send((filename.to_owned(), Dispatch::Ignored))
599 .unwrap();
600 }
601 }
602 } else if let Some(entry) = entry_option {
603 // Used to be a file or a folder, now something else.
604 if self.matcher.matches_everything()
605 || self.matcher.matches(&filename)
606 {
607 files_sender
608 .send((filename.to_owned(), dispatch_missing(entry.state)))
609 .unwrap();
610 }
611 }
612
613 Ok(())
614 }
615
616 /// A directory was found in the filesystem and needs to be traversed
617 fn handle_traversed_dir<'b>(
618 &'a self,
619 scope: &rayon::Scope<'b>,
620 files_sender: &'b crossbeam_channel::Sender<(HgPathBuf, Dispatch)>,
621 old_results: &'a FastHashMap<Cow<HgPath>, Dispatch>,
622 entry_option: Option<&'a DirstateEntry>,
623 directory: HgPathBuf,
624 traversed_sender: crossbeam_channel::Sender<HgPathBuf>,
625 ) where
626 'a: 'b,
627 {
628 scope.spawn(move |_| {
629 // Nested `if` until `rust-lang/rust#53668` is stable
630 if let Some(entry) = entry_option {
631 // Used to be a file, is now a folder
632 if self.matcher.matches_everything()
633 || self.matcher.matches(&directory)
634 {
635 files_sender
636 .send((
637 directory.to_owned(),
638 dispatch_missing(entry.state),
639 ))
640 .unwrap();
641 }
642 }
643 // Do we need to traverse it?
644 if !self.is_ignored(&directory) || self.options.list_ignored {
645 self.traverse_dir(
646 files_sender,
647 directory,
648 &old_results,
649 traversed_sender,
650 )
651 }
652 });
653 }
654
655 /// Decides whether the directory needs to be listed, and if so handles the
656 /// entries in a separate thread.
657 fn traverse_dir(
658 &self,
659 files_sender: &crossbeam_channel::Sender<(HgPathBuf, Dispatch)>,
660 directory: impl AsRef<HgPath>,
661 old_results: &FastHashMap<Cow<HgPath>, Dispatch>,
662 traversed_sender: crossbeam_channel::Sender<HgPathBuf>,
663 ) {
664 let directory = directory.as_ref();
665
666 if self.options.collect_traversed_dirs {
667 traversed_sender
668 .send(directory.to_owned())
669 .expect("receiver should outlive sender");
670 }
671
672 let visit_entries = match self.matcher.visit_children_set(directory) {
673 VisitChildrenSet::Empty => return,
674 VisitChildrenSet::This | VisitChildrenSet::Recursive => None,
675 VisitChildrenSet::Set(set) => Some(set),
676 };
677 let buf = match hg_path_to_path_buf(directory) {
678 Ok(b) => b,
679 Err(_) => {
680 files_sender
681 .send((directory.to_owned(), INVALID_PATH_DISPATCH))
682 .expect("receiver should outlive sender");
683 return;
684 }
685 };
686 let dir_path = self.root_dir.join(buf);
687
688 let skip_dot_hg = !directory.as_bytes().is_empty();
689 let entries = match list_directory(dir_path, skip_dot_hg) {
690 Err(e) => {
691 files_sender
692 .send((directory.to_owned(), dispatch_os_error(&e)))
693 .expect("receiver should outlive sender");
694 return;
695 }
696 Ok(entries) => entries,
697 };
698
699 rayon::scope(|scope| {
700 for (filename, dir_entry) in entries {
701 if let Some(ref set) = visit_entries {
702 if !set.contains(filename.deref()) {
703 continue;
704 }
705 }
706 // TODO normalize
707 let filename = if directory.is_empty() {
708 filename.to_owned()
709 } else {
710 directory.join(&filename)
711 };
712
713 if !old_results.contains_key(filename.deref()) {
714 match self.handle_traversed_entry(
715 scope,
716 files_sender,
717 old_results,
718 filename,
719 dir_entry,
720 traversed_sender.clone(),
721 ) {
722 Err(e) => {
723 files_sender
724 .send((
725 directory.to_owned(),
726 dispatch_os_error(&e),
727 ))
728 .expect("receiver should outlive sender");
729 }
730 Ok(_) => {}
731 }
732 }
733 }
734 })
735 }
736
737 /// Add the files in the dirstate to the results.
738 ///
739 /// This takes a mutable reference to the results to account for the
740 /// `extend` in timings
741 #[timed]
742 pub fn extend_from_dmap(&self, results: &mut Vec<DispatchedPath<'a>>) {
743 results.par_extend(
744 self.dmap
745 .par_iter()
746 .filter(|(path, _)| self.matcher.matches(path))
747 .map(move |(filename, entry)| {
748 let filename: &HgPath = filename;
749 let filename_as_path = match hg_path_to_path_buf(filename)
750 {
751 Ok(f) => f,
752 Err(_) => {
753 return (
754 Cow::Borrowed(filename),
755 INVALID_PATH_DISPATCH,
756 )
757 }
758 };
759 let meta = self
760 .root_dir
761 .join(filename_as_path)
762 .symlink_metadata();
763 match meta {
764 Ok(m)
765 if !(m.file_type().is_file()
766 || m.file_type().is_symlink()) =>
767 {
768 (
769 Cow::Borrowed(filename),
770 dispatch_missing(entry.state),
771 )
772 }
773 Ok(m) => (
774 Cow::Borrowed(filename),
775 dispatch_found(
776 filename,
777 *entry,
778 HgMetadata::from_metadata(m),
779 &self.dmap.copy_map,
780 self.options,
781 ),
782 ),
783 Err(e)
784 if e.kind() == ErrorKind::NotFound
785 || e.raw_os_error() == Some(20) =>
786 {
787 // Rust does not yet have an `ErrorKind` for
788 // `NotADirectory` (errno 20)
789 // It happens if the dirstate contains `foo/bar`
790 // and foo is not a
791 // directory
792 (
793 Cow::Borrowed(filename),
794 dispatch_missing(entry.state),
795 )
796 }
797 Err(e) => {
798 (Cow::Borrowed(filename), dispatch_os_error(&e))
799 }
800 }
801 }),
802 );
803 }
804
805 /// Checks all files that are in the dirstate but were not found during the
806 /// working directory traversal. This means that the rest must
807 /// be either ignored, under a symlink or under a new nested repo.
808 ///
809 /// This takes a mutable reference to the results to account for the
810 /// `extend` in timings
811 #[timed]
812 pub fn handle_unknowns(&self, results: &mut Vec<DispatchedPath<'a>>) {
813 let to_visit: Vec<(&HgPath, &DirstateEntry)> =
814 if results.is_empty() && self.matcher.matches_everything() {
815 self.dmap.iter().map(|(f, e)| (f.deref(), e)).collect()
816 } else {
817 // Only convert to a hashmap if needed.
818 let old_results: FastHashMap<_, _> =
819 results.iter().cloned().collect();
820 self.dmap
821 .iter()
822 .filter_map(move |(f, e)| {
823 if !old_results.contains_key(f.deref())
824 && self.matcher.matches(f)
825 {
826 Some((f.deref(), e))
827 } else {
828 None
829 }
830 })
831 .collect()
832 };
833
834 let path_auditor = PathAuditor::new(&self.root_dir);
835
836 let new_results = to_visit.into_par_iter().filter_map(
837 |(filename, entry)| -> Option<_> {
838 // Report ignored items in the dmap as long as they are not
839 // under a symlink directory.
840 if path_auditor.check(filename) {
841 // TODO normalize for case-insensitive filesystems
842 let buf = match hg_path_to_path_buf(filename) {
843 Ok(x) => x,
844 Err(_) => {
845 return Some((
846 Cow::Owned(filename.to_owned()),
847 INVALID_PATH_DISPATCH,
848 ));
849 }
850 };
851 Some((
852 Cow::Owned(filename.to_owned()),
853 match self.root_dir.join(&buf).symlink_metadata() {
854 // File was just ignored, no links, and exists
855 Ok(meta) => {
856 let metadata = HgMetadata::from_metadata(meta);
857 dispatch_found(
858 filename,
859 *entry,
860 metadata,
861 &self.dmap.copy_map,
862 self.options,
863 )
864 }
865 // File doesn't exist
866 Err(_) => dispatch_missing(entry.state),
867 },
868 ))
869 } else {
870 // It's either missing or under a symlink directory which
871 // we, in this case, report as missing.
872 Some((
873 Cow::Owned(filename.to_owned()),
874 dispatch_missing(entry.state),
875 ))
876 }
877 },
878 );
879
880 results.par_extend(new_results);
881 }
882 }
883
884 #[timed]
885 pub fn build_response<'a>(
886 results: impl IntoIterator<Item = DispatchedPath<'a>>,
887 traversed: Vec<HgPathCow<'a>>,
888 ) -> DirstateStatus<'a> {
889 let mut unsure = vec![];
890 let mut modified = vec![];
891 let mut added = vec![];
892 let mut removed = vec![];
893 let mut deleted = vec![];
894 let mut clean = vec![];
895 let mut ignored = vec![];
896 let mut unknown = vec![];
897 let mut bad = vec![];
898
899 for (filename, dispatch) in results.into_iter() {
900 match dispatch {
901 Dispatch::Unknown => unknown.push(filename),
902 Dispatch::Unsure => unsure.push(filename),
903 Dispatch::Modified => modified.push(filename),
904 Dispatch::Added => added.push(filename),
905 Dispatch::Removed => removed.push(filename),
906 Dispatch::Deleted => deleted.push(filename),
907 Dispatch::Clean => clean.push(filename),
908 Dispatch::Ignored => ignored.push(filename),
909 Dispatch::None => {}
910 Dispatch::Bad(reason) => bad.push((filename, reason)),
911 Dispatch::Directory { .. } => {}
912 }
913 }
914
915 DirstateStatus {
916 modified,
917 added,
918 removed,
919 deleted,
920 clean,
921 ignored,
922 unknown,
923 bad,
924 unsure,
925 traversed,
926 dirty: false,
927 }
928 }
929
930 /// Get the status of files in the working directory.
931 ///
932 /// This is the current entry-point for `hg-core` and is realistically unusable
933 /// outside of a Python context because its arguments need to provide a lot of
934 /// information that will not be necessary in the future.
935 #[timed]
936 pub fn status<'a>(
937 dmap: &'a DirstateMap,
938 matcher: &'a (dyn Matcher + Sync),
939 root_dir: PathBuf,
940 ignore_files: Vec<PathBuf>,
941 options: StatusOptions,
942 ) -> StatusResult<(DirstateStatus<'a>, Vec<PatternFileWarning>)> {
943 let (status, warnings) =
944 Status::new(dmap, matcher, root_dir, ignore_files, options)?;
945
946 Ok((status.run()?, warnings))
947 }
@@ -1,5 +1,5 b''
1 pub mod dirstate_map;
1 pub mod dirstate_map;
2 pub mod dispatch;
3 pub mod on_disk;
2 pub mod on_disk;
3 pub mod owning;
4 pub mod path_with_basename;
4 pub mod path_with_basename;
5 pub mod status;
5 pub mod status;
@@ -1,23 +1,22 b''
1 use bytes_cast::BytesCast;
1 use bytes_cast::BytesCast;
2 use micro_timer::timed;
2 use micro_timer::timed;
3 use std::borrow::Cow;
3 use std::borrow::Cow;
4 use std::convert::TryInto;
5 use std::path::PathBuf;
4 use std::path::PathBuf;
6
5
7 use super::on_disk;
6 use super::on_disk;
8 use super::on_disk::DirstateV2ParseError;
7 use super::on_disk::DirstateV2ParseError;
8 use super::owning::OwningDirstateMap;
9 use super::path_with_basename::WithBasename;
9 use super::path_with_basename::WithBasename;
10 use crate::dirstate::parsers::pack_entry;
10 use crate::dirstate::parsers::pack_entry;
11 use crate::dirstate::parsers::packed_entry_size;
11 use crate::dirstate::parsers::packed_entry_size;
12 use crate::dirstate::parsers::parse_dirstate_entries;
12 use crate::dirstate::parsers::parse_dirstate_entries;
13 use crate::dirstate::parsers::Timestamp;
13 use crate::dirstate::CopyMapIter;
14 use crate::dirstate::MTIME_UNSET;
14 use crate::dirstate::StateMapIter;
15 use crate::dirstate::TruncatedTimestamp;
15 use crate::dirstate::SIZE_FROM_OTHER_PARENT;
16 use crate::dirstate::SIZE_FROM_OTHER_PARENT;
16 use crate::dirstate::SIZE_NON_NORMAL;
17 use crate::dirstate::SIZE_NON_NORMAL;
17 use crate::dirstate::V1_RANGEMASK;
18 use crate::matchers::Matcher;
18 use crate::matchers::Matcher;
19 use crate::utils::hg_path::{HgPath, HgPathBuf};
19 use crate::utils::hg_path::{HgPath, HgPathBuf};
20 use crate::CopyMapIter;
21 use crate::DirstateEntry;
20 use crate::DirstateEntry;
22 use crate::DirstateError;
21 use crate::DirstateError;
23 use crate::DirstateParents;
22 use crate::DirstateParents;
@@ -25,7 +24,6 b' use crate::DirstateStatus;'
25 use crate::EntryState;
24 use crate::EntryState;
26 use crate::FastHashMap;
25 use crate::FastHashMap;
27 use crate::PatternFileWarning;
26 use crate::PatternFileWarning;
28 use crate::StateMapIter;
29 use crate::StatusError;
27 use crate::StatusError;
30 use crate::StatusOptions;
28 use crate::StatusOptions;
31
29
@@ -326,22 +324,17 b" impl<'tree, 'on_disk> NodeRef<'tree, 'on"
326 pub(super) fn state(
324 pub(super) fn state(
327 &self,
325 &self,
328 ) -> Result<Option<EntryState>, DirstateV2ParseError> {
326 ) -> Result<Option<EntryState>, DirstateV2ParseError> {
329 match self {
327 Ok(self.entry()?.map(|e| e.state()))
330 NodeRef::InMemory(_path, node) => {
331 Ok(node.data.as_entry().map(|entry| entry.state))
332 }
333 NodeRef::OnDisk(node) => node.state(),
334 }
335 }
328 }
336
329
337 pub(super) fn cached_directory_mtime(
330 pub(super) fn cached_directory_mtime(
338 &self,
331 &self,
339 ) -> Option<&'tree on_disk::Timestamp> {
332 ) -> Result<Option<TruncatedTimestamp>, DirstateV2ParseError> {
340 match self {
333 match self {
341 NodeRef::InMemory(_path, node) => match &node.data {
334 NodeRef::InMemory(_path, node) => Ok(match node.data {
342 NodeData::CachedDirectory { mtime } => Some(mtime),
335 NodeData::CachedDirectory { mtime } => Some(mtime),
343 _ => None,
336 _ => None,
344 },
337 }),
345 NodeRef::OnDisk(node) => node.cached_directory_mtime(),
338 NodeRef::OnDisk(node) => node.cached_directory_mtime(),
346 }
339 }
347 }
340 }
@@ -382,7 +375,7 b" pub(super) struct Node<'on_disk> {"
382
375
383 pub(super) enum NodeData {
376 pub(super) enum NodeData {
384 Entry(DirstateEntry),
377 Entry(DirstateEntry),
385 CachedDirectory { mtime: on_disk::Timestamp },
378 CachedDirectory { mtime: TruncatedTimestamp },
386 None,
379 None,
387 }
380 }
388
381
@@ -445,7 +438,7 b" impl<'on_disk> DirstateMap<'on_disk> {"
445 let parents = parse_dirstate_entries(
438 let parents = parse_dirstate_entries(
446 map.on_disk,
439 map.on_disk,
447 |path, entry, copy_source| {
440 |path, entry, copy_source| {
448 let tracked = entry.state.is_tracked();
441 let tracked = entry.state().is_tracked();
449 let node = Self::get_or_insert_node(
442 let node = Self::get_or_insert_node(
450 map.on_disk,
443 map.on_disk,
451 &mut map.unreachable_bytes,
444 &mut map.unreachable_bytes,
@@ -593,12 +586,13 b" impl<'on_disk> DirstateMap<'on_disk> {"
593 fn add_or_remove_file(
586 fn add_or_remove_file(
594 &mut self,
587 &mut self,
595 path: &HgPath,
588 path: &HgPath,
596 old_state: EntryState,
589 old_state: Option<EntryState>,
597 new_entry: DirstateEntry,
590 new_entry: DirstateEntry,
598 ) -> Result<(), DirstateV2ParseError> {
591 ) -> Result<(), DirstateV2ParseError> {
599 let had_entry = old_state != EntryState::Unknown;
592 let had_entry = old_state.is_some();
593 let was_tracked = old_state.map_or(false, |s| s.is_tracked());
600 let tracked_count_increment =
594 let tracked_count_increment =
601 match (old_state.is_tracked(), new_entry.state.is_tracked()) {
595 match (was_tracked, new_entry.state().is_tracked()) {
602 (false, true) => 1,
596 (false, true) => 1,
603 (true, false) => -1,
597 (true, false) => -1,
604 _ => 0,
598 _ => 0,
@@ -695,34 +689,13 b" impl<'on_disk> DirstateMap<'on_disk> {"
695 path.as_ref(),
689 path.as_ref(),
696 )? {
690 )? {
697 if let NodeData::Entry(entry) = &mut node.data {
691 if let NodeData::Entry(entry) = &mut node.data {
698 entry.clear_mtime();
692 entry.set_possibly_dirty();
699 }
693 }
700 }
694 }
701 }
695 }
702 Ok(())
696 Ok(())
703 }
697 }
704
698
705 /// Return a faillilble iterator of full paths of nodes that have an
706 /// `entry` for which the given `predicate` returns true.
707 ///
708 /// Fallibility means that each iterator item is a `Result`, which may
709 /// indicate a parse error of the on-disk dirstate-v2 format. Such errors
710 /// should only happen if Mercurial is buggy or a repository is corrupted.
711 fn filter_full_paths<'tree>(
712 &'tree self,
713 predicate: impl Fn(&DirstateEntry) -> bool + 'tree,
714 ) -> impl Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + 'tree
715 {
716 filter_map_results(self.iter_nodes(), move |node| {
717 if let Some(entry) = node.entry()? {
718 if predicate(&entry) {
719 return Ok(Some(node.full_path(self.on_disk)?));
720 }
721 }
722 Ok(None)
723 })
724 }
725
726 fn count_dropped_path(unreachable_bytes: &mut u32, path: &Cow<HgPath>) {
699 fn count_dropped_path(unreachable_bytes: &mut u32, path: &Cow<HgPath>) {
727 if let Cow::Borrowed(path) = path {
700 if let Cow::Borrowed(path) = path {
728 *unreachable_bytes += path.len() as u32
701 *unreachable_bytes += path.len() as u32
@@ -750,78 +723,41 b' where'
750 })
723 })
751 }
724 }
752
725
753 impl<'on_disk> super::dispatch::DirstateMapMethods for DirstateMap<'on_disk> {
726 impl OwningDirstateMap {
754 fn clear(&mut self) {
727 pub fn clear(&mut self) {
755 self.root = Default::default();
728 let map = self.get_map_mut();
756 self.nodes_with_entry_count = 0;
729 map.root = Default::default();
757 self.nodes_with_copy_source_count = 0;
730 map.nodes_with_entry_count = 0;
731 map.nodes_with_copy_source_count = 0;
758 }
732 }
759
733
760 fn set_v1(&mut self, filename: &HgPath, entry: DirstateEntry) {
734 pub fn set_entry(
761 let node =
762 self.get_or_insert(&filename).expect("no parse error in v1");
763 node.data = NodeData::Entry(entry);
764 node.children = ChildNodes::default();
765 node.copy_source = None;
766 node.descendants_with_entry_count = 0;
767 node.tracked_descendants_count = 0;
768 }
769
770 fn add_file(
771 &mut self,
735 &mut self,
772 filename: &HgPath,
736 filename: &HgPath,
773 entry: DirstateEntry,
737 entry: DirstateEntry,
774 added: bool,
738 ) -> Result<(), DirstateV2ParseError> {
775 merged: bool,
739 let map = self.get_map_mut();
776 from_p2: bool,
740 map.get_or_insert(&filename)?.data = NodeData::Entry(entry);
777 possibly_dirty: bool,
741 Ok(())
778 ) -> Result<(), DirstateError> {
779 let mut entry = entry;
780 if added {
781 assert!(!possibly_dirty);
782 assert!(!from_p2);
783 entry.state = EntryState::Added;
784 entry.size = SIZE_NON_NORMAL;
785 entry.mtime = MTIME_UNSET;
786 } else if merged {
787 assert!(!possibly_dirty);
788 assert!(!from_p2);
789 entry.state = EntryState::Merged;
790 entry.size = SIZE_FROM_OTHER_PARENT;
791 entry.mtime = MTIME_UNSET;
792 } else if from_p2 {
793 assert!(!possibly_dirty);
794 entry.state = EntryState::Normal;
795 entry.size = SIZE_FROM_OTHER_PARENT;
796 entry.mtime = MTIME_UNSET;
797 } else if possibly_dirty {
798 entry.state = EntryState::Normal;
799 entry.size = SIZE_NON_NORMAL;
800 entry.mtime = MTIME_UNSET;
801 } else {
802 entry.state = EntryState::Normal;
803 entry.size = entry.size & V1_RANGEMASK;
804 entry.mtime = entry.mtime & V1_RANGEMASK;
805 }
806
807 let old_state = match self.get(filename)? {
808 Some(e) => e.state,
809 None => EntryState::Unknown,
810 };
811
812 Ok(self.add_or_remove_file(filename, old_state, entry)?)
813 }
742 }
814
743
815 fn remove_file(
744 pub fn add_file(
745 &mut self,
746 filename: &HgPath,
747 entry: DirstateEntry,
748 ) -> Result<(), DirstateError> {
749 let old_state = self.get(filename)?.map(|e| e.state());
750 let map = self.get_map_mut();
751 Ok(map.add_or_remove_file(filename, old_state, entry)?)
752 }
753
754 pub fn remove_file(
816 &mut self,
755 &mut self,
817 filename: &HgPath,
756 filename: &HgPath,
818 in_merge: bool,
757 in_merge: bool,
819 ) -> Result<(), DirstateError> {
758 ) -> Result<(), DirstateError> {
820 let old_entry_opt = self.get(filename)?;
759 let old_entry_opt = self.get(filename)?;
821 let old_state = match old_entry_opt {
760 let old_state = old_entry_opt.map(|e| e.state());
822 Some(e) => e.state,
823 None => EntryState::Unknown,
824 };
825 let mut size = 0;
761 let mut size = 0;
826 if in_merge {
762 if in_merge {
827 // XXX we should not be able to have 'm' state and 'FROM_P2' if not
763 // XXX we should not be able to have 'm' state and 'FROM_P2' if not
@@ -830,10 +766,10 b" impl<'on_disk> super::dispatch::Dirstate"
830 // would be nice.
766 // would be nice.
831 if let Some(old_entry) = old_entry_opt {
767 if let Some(old_entry) = old_entry_opt {
832 // backup the previous state
768 // backup the previous state
833 if old_entry.state == EntryState::Merged {
769 if old_entry.state() == EntryState::Merged {
834 size = SIZE_NON_NORMAL;
770 size = SIZE_NON_NORMAL;
835 } else if old_entry.state == EntryState::Normal
771 } else if old_entry.state() == EntryState::Normal
836 && old_entry.size == SIZE_FROM_OTHER_PARENT
772 && old_entry.size() == SIZE_FROM_OTHER_PARENT
837 {
773 {
838 // other parent
774 // other parent
839 size = SIZE_FROM_OTHER_PARENT;
775 size = SIZE_FROM_OTHER_PARENT;
@@ -843,20 +779,19 b" impl<'on_disk> super::dispatch::Dirstate"
843 if size == 0 {
779 if size == 0 {
844 self.copy_map_remove(filename)?;
780 self.copy_map_remove(filename)?;
845 }
781 }
846 let entry = DirstateEntry {
782 let map = self.get_map_mut();
847 state: EntryState::Removed,
783 let entry = DirstateEntry::new_removed(size);
848 mode: 0,
784 Ok(map.add_or_remove_file(filename, old_state, entry)?)
849 size,
850 mtime: 0,
851 };
852 Ok(self.add_or_remove_file(filename, old_state, entry)?)
853 }
785 }
854
786
855 fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError> {
787 pub fn drop_entry_and_copy_source(
856 let old_state = match self.get(filename)? {
788 &mut self,
857 Some(e) => e.state,
789 filename: &HgPath,
858 None => EntryState::Unknown,
790 ) -> Result<(), DirstateError> {
859 };
791 let was_tracked = self
792 .get(filename)?
793 .map_or(false, |e| e.state().is_tracked());
794 let map = self.get_map_mut();
860 struct Dropped {
795 struct Dropped {
861 was_tracked: bool,
796 was_tracked: bool,
862 had_entry: bool,
797 had_entry: bool,
@@ -915,13 +850,14 b" impl<'on_disk> super::dispatch::Dirstate"
915 node.data = NodeData::None
850 node.data = NodeData::None
916 }
851 }
917 if let Some(source) = &node.copy_source {
852 if let Some(source) = &node.copy_source {
918 DirstateMap::count_dropped_path(unreachable_bytes, source)
853 DirstateMap::count_dropped_path(unreachable_bytes, source);
854 node.copy_source = None
919 }
855 }
920 dropped = Dropped {
856 dropped = Dropped {
921 was_tracked: node
857 was_tracked: node
922 .data
858 .data
923 .as_entry()
859 .as_entry()
924 .map_or(false, |entry| entry.state.is_tracked()),
860 .map_or(false, |entry| entry.state().is_tracked()),
925 had_entry,
861 had_entry,
926 had_copy_source: node.copy_source.take().is_some(),
862 had_copy_source: node.copy_source.take().is_some(),
927 };
863 };
@@ -943,112 +879,29 b" impl<'on_disk> super::dispatch::Dirstate"
943 }
879 }
944
880
945 if let Some((dropped, _removed)) = recur(
881 if let Some((dropped, _removed)) = recur(
946 self.on_disk,
882 map.on_disk,
947 &mut self.unreachable_bytes,
883 &mut map.unreachable_bytes,
948 &mut self.root,
884 &mut map.root,
949 filename,
885 filename,
950 )? {
886 )? {
951 if dropped.had_entry {
887 if dropped.had_entry {
952 self.nodes_with_entry_count -= 1
888 map.nodes_with_entry_count -= 1
953 }
889 }
954 if dropped.had_copy_source {
890 if dropped.had_copy_source {
955 self.nodes_with_copy_source_count -= 1
891 map.nodes_with_copy_source_count -= 1
956 }
892 }
957 Ok(dropped.had_entry)
958 } else {
893 } else {
959 debug_assert!(!old_state.is_tracked());
894 debug_assert!(!was_tracked);
960 Ok(false)
961 }
962 }
963
964 fn clear_ambiguous_times(
965 &mut self,
966 filenames: Vec<HgPathBuf>,
967 now: i32,
968 ) -> Result<(), DirstateV2ParseError> {
969 for filename in filenames {
970 if let Some(node) = Self::get_node_mut(
971 self.on_disk,
972 &mut self.unreachable_bytes,
973 &mut self.root,
974 &filename,
975 )? {
976 if let NodeData::Entry(entry) = &mut node.data {
977 entry.clear_ambiguous_mtime(now);
978 }
979 }
980 }
895 }
981 Ok(())
896 Ok(())
982 }
897 }
983
898
984 fn non_normal_entries_contains(
899 pub fn has_tracked_dir(
985 &mut self,
986 key: &HgPath,
987 ) -> Result<bool, DirstateV2ParseError> {
988 Ok(if let Some(node) = self.get_node(key)? {
989 node.entry()?.map_or(false, |entry| entry.is_non_normal())
990 } else {
991 false
992 })
993 }
994
995 fn non_normal_entries_remove(&mut self, key: &HgPath) -> bool {
996 // Do nothing, this `DirstateMap` does not have a separate "non normal
997 // entries" set that need to be kept up to date.
998 if let Ok(Some(v)) = self.get(key) {
999 return v.is_non_normal();
1000 }
1001 false
1002 }
1003
1004 fn non_normal_entries_add(&mut self, _key: &HgPath) {
1005 // Do nothing, this `DirstateMap` does not have a separate "non normal
1006 // entries" set that need to be kept up to date
1007 }
1008
1009 fn non_normal_or_other_parent_paths(
1010 &mut self,
1011 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
1012 {
1013 Box::new(self.filter_full_paths(|entry| {
1014 entry.is_non_normal() || entry.is_from_other_parent()
1015 }))
1016 }
1017
1018 fn set_non_normal_other_parent_entries(&mut self, _force: bool) {
1019 // Do nothing, this `DirstateMap` does not have a separate "non normal
1020 // entries" and "from other parent" sets that need to be recomputed
1021 }
1022
1023 fn iter_non_normal_paths(
1024 &mut self,
1025 ) -> Box<
1026 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
1027 > {
1028 self.iter_non_normal_paths_panic()
1029 }
1030
1031 fn iter_non_normal_paths_panic(
1032 &self,
1033 ) -> Box<
1034 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
1035 > {
1036 Box::new(self.filter_full_paths(|entry| entry.is_non_normal()))
1037 }
1038
1039 fn iter_other_parent_paths(
1040 &mut self,
1041 ) -> Box<
1042 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
1043 > {
1044 Box::new(self.filter_full_paths(|entry| entry.is_from_other_parent()))
1045 }
1046
1047 fn has_tracked_dir(
1048 &mut self,
900 &mut self,
1049 directory: &HgPath,
901 directory: &HgPath,
1050 ) -> Result<bool, DirstateError> {
902 ) -> Result<bool, DirstateError> {
1051 if let Some(node) = self.get_node(directory)? {
903 let map = self.get_map_mut();
904 if let Some(node) = map.get_node(directory)? {
1052 // A node without a `DirstateEntry` was created to hold child
905 // A node without a `DirstateEntry` was created to hold child
1053 // nodes, and is therefore a directory.
906 // nodes, and is therefore a directory.
1054 let state = node.state()?;
907 let state = node.state()?;
@@ -1058,8 +911,12 b" impl<'on_disk> super::dispatch::Dirstate"
1058 }
911 }
1059 }
912 }
1060
913
1061 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
914 pub fn has_dir(
1062 if let Some(node) = self.get_node(directory)? {
915 &mut self,
916 directory: &HgPath,
917 ) -> Result<bool, DirstateError> {
918 let map = self.get_map_mut();
919 if let Some(node) = map.get_node(directory)? {
1063 // A node without a `DirstateEntry` was created to hold child
920 // A node without a `DirstateEntry` was created to hold child
1064 // nodes, and is therefore a directory.
921 // nodes, and is therefore a directory.
1065 let state = node.state()?;
922 let state = node.state()?;
@@ -1070,43 +927,43 b" impl<'on_disk> super::dispatch::Dirstate"
1070 }
927 }
1071
928
1072 #[timed]
929 #[timed]
1073 fn pack_v1(
930 pub fn pack_v1(
1074 &mut self,
931 &mut self,
1075 parents: DirstateParents,
932 parents: DirstateParents,
1076 now: Timestamp,
933 now: TruncatedTimestamp,
1077 ) -> Result<Vec<u8>, DirstateError> {
934 ) -> Result<Vec<u8>, DirstateError> {
1078 let now: i32 = now.0.try_into().expect("time overflow");
935 let map = self.get_map_mut();
1079 let mut ambiguous_mtimes = Vec::new();
936 let mut ambiguous_mtimes = Vec::new();
1080 // Optizimation (to be measured?): pre-compute size to avoid `Vec`
937 // Optizimation (to be measured?): pre-compute size to avoid `Vec`
1081 // reallocations
938 // reallocations
1082 let mut size = parents.as_bytes().len();
939 let mut size = parents.as_bytes().len();
1083 for node in self.iter_nodes() {
940 for node in map.iter_nodes() {
1084 let node = node?;
941 let node = node?;
1085 if let Some(entry) = node.entry()? {
942 if let Some(entry) = node.entry()? {
1086 size += packed_entry_size(
943 size += packed_entry_size(
1087 node.full_path(self.on_disk)?,
944 node.full_path(map.on_disk)?,
1088 node.copy_source(self.on_disk)?,
945 node.copy_source(map.on_disk)?,
1089 );
946 );
1090 if entry.mtime_is_ambiguous(now) {
947 if entry.need_delay(now) {
1091 ambiguous_mtimes.push(
948 ambiguous_mtimes.push(
1092 node.full_path_borrowed(self.on_disk)?
949 node.full_path_borrowed(map.on_disk)?
1093 .detach_from_tree(),
950 .detach_from_tree(),
1094 )
951 )
1095 }
952 }
1096 }
953 }
1097 }
954 }
1098 self.clear_known_ambiguous_mtimes(&ambiguous_mtimes)?;
955 map.clear_known_ambiguous_mtimes(&ambiguous_mtimes)?;
1099
956
1100 let mut packed = Vec::with_capacity(size);
957 let mut packed = Vec::with_capacity(size);
1101 packed.extend(parents.as_bytes());
958 packed.extend(parents.as_bytes());
1102
959
1103 for node in self.iter_nodes() {
960 for node in map.iter_nodes() {
1104 let node = node?;
961 let node = node?;
1105 if let Some(entry) = node.entry()? {
962 if let Some(entry) = node.entry()? {
1106 pack_entry(
963 pack_entry(
1107 node.full_path(self.on_disk)?,
964 node.full_path(map.on_disk)?,
1108 &entry,
965 &entry,
1109 node.copy_source(self.on_disk)?,
966 node.copy_source(map.on_disk)?,
1110 &mut packed,
967 &mut packed,
1111 );
968 );
1112 }
969 }
@@ -1116,23 +973,22 b" impl<'on_disk> super::dispatch::Dirstate"
1116
973
1117 /// Returns new data and metadata together with whether that data should be
974 /// Returns new data and metadata together with whether that data should be
1118 /// appended to the existing data file whose content is at
975 /// appended to the existing data file whose content is at
1119 /// `self.on_disk` (true), instead of written to a new data file
976 /// `map.on_disk` (true), instead of written to a new data file
1120 /// (false).
977 /// (false).
1121 #[timed]
978 #[timed]
1122 fn pack_v2(
979 pub fn pack_v2(
1123 &mut self,
980 &mut self,
1124 now: Timestamp,
981 now: TruncatedTimestamp,
1125 can_append: bool,
982 can_append: bool,
1126 ) -> Result<(Vec<u8>, Vec<u8>, bool), DirstateError> {
983 ) -> Result<(Vec<u8>, Vec<u8>, bool), DirstateError> {
1127 // TODO:Β how do we want to handle this in 2038?
984 let map = self.get_map_mut();
1128 let now: i32 = now.0.try_into().expect("time overflow");
1129 let mut paths = Vec::new();
985 let mut paths = Vec::new();
1130 for node in self.iter_nodes() {
986 for node in map.iter_nodes() {
1131 let node = node?;
987 let node = node?;
1132 if let Some(entry) = node.entry()? {
988 if let Some(entry) = node.entry()? {
1133 if entry.mtime_is_ambiguous(now) {
989 if entry.need_delay(now) {
1134 paths.push(
990 paths.push(
1135 node.full_path_borrowed(self.on_disk)?
991 node.full_path_borrowed(map.on_disk)?
1136 .detach_from_tree(),
992 .detach_from_tree(),
1137 )
993 )
1138 }
994 }
@@ -1140,12 +996,12 b" impl<'on_disk> super::dispatch::Dirstate"
1140 }
996 }
1141 // Borrow of `self` ends here since we collect cloned paths
997 // Borrow of `self` ends here since we collect cloned paths
1142
998
1143 self.clear_known_ambiguous_mtimes(&paths)?;
999 map.clear_known_ambiguous_mtimes(&paths)?;
1144
1000
1145 on_disk::write(self, can_append)
1001 on_disk::write(map, can_append)
1146 }
1002 }
1147
1003
1148 fn status<'a>(
1004 pub fn status<'a>(
1149 &'a mut self,
1005 &'a mut self,
1150 matcher: &'a (dyn Matcher + Sync),
1006 matcher: &'a (dyn Matcher + Sync),
1151 root_dir: PathBuf,
1007 root_dir: PathBuf,
@@ -1153,119 +1009,129 b" impl<'on_disk> super::dispatch::Dirstate"
1153 options: StatusOptions,
1009 options: StatusOptions,
1154 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
1010 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
1155 {
1011 {
1156 super::status::status(self, matcher, root_dir, ignore_files, options)
1012 let map = self.get_map_mut();
1013 super::status::status(map, matcher, root_dir, ignore_files, options)
1157 }
1014 }
1158
1015
1159 fn copy_map_len(&self) -> usize {
1016 pub fn copy_map_len(&self) -> usize {
1160 self.nodes_with_copy_source_count as usize
1017 let map = self.get_map();
1018 map.nodes_with_copy_source_count as usize
1161 }
1019 }
1162
1020
1163 fn copy_map_iter(&self) -> CopyMapIter<'_> {
1021 pub fn copy_map_iter(&self) -> CopyMapIter<'_> {
1164 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1022 let map = self.get_map();
1165 Ok(if let Some(source) = node.copy_source(self.on_disk)? {
1023 Box::new(filter_map_results(map.iter_nodes(), move |node| {
1166 Some((node.full_path(self.on_disk)?, source))
1024 Ok(if let Some(source) = node.copy_source(map.on_disk)? {
1025 Some((node.full_path(map.on_disk)?, source))
1167 } else {
1026 } else {
1168 None
1027 None
1169 })
1028 })
1170 }))
1029 }))
1171 }
1030 }
1172
1031
1173 fn copy_map_contains_key(
1032 pub fn copy_map_contains_key(
1174 &self,
1033 &self,
1175 key: &HgPath,
1034 key: &HgPath,
1176 ) -> Result<bool, DirstateV2ParseError> {
1035 ) -> Result<bool, DirstateV2ParseError> {
1177 Ok(if let Some(node) = self.get_node(key)? {
1036 let map = self.get_map();
1037 Ok(if let Some(node) = map.get_node(key)? {
1178 node.has_copy_source()
1038 node.has_copy_source()
1179 } else {
1039 } else {
1180 false
1040 false
1181 })
1041 })
1182 }
1042 }
1183
1043
1184 fn copy_map_get(
1044 pub fn copy_map_get(
1185 &self,
1045 &self,
1186 key: &HgPath,
1046 key: &HgPath,
1187 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
1047 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
1188 if let Some(node) = self.get_node(key)? {
1048 let map = self.get_map();
1189 if let Some(source) = node.copy_source(self.on_disk)? {
1049 if let Some(node) = map.get_node(key)? {
1050 if let Some(source) = node.copy_source(map.on_disk)? {
1190 return Ok(Some(source));
1051 return Ok(Some(source));
1191 }
1052 }
1192 }
1053 }
1193 Ok(None)
1054 Ok(None)
1194 }
1055 }
1195
1056
1196 fn copy_map_remove(
1057 pub fn copy_map_remove(
1197 &mut self,
1058 &mut self,
1198 key: &HgPath,
1059 key: &HgPath,
1199 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1060 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1200 let count = &mut self.nodes_with_copy_source_count;
1061 let map = self.get_map_mut();
1201 let unreachable_bytes = &mut self.unreachable_bytes;
1062 let count = &mut map.nodes_with_copy_source_count;
1202 Ok(Self::get_node_mut(
1063 let unreachable_bytes = &mut map.unreachable_bytes;
1203 self.on_disk,
1064 Ok(DirstateMap::get_node_mut(
1065 map.on_disk,
1204 unreachable_bytes,
1066 unreachable_bytes,
1205 &mut self.root,
1067 &mut map.root,
1206 key,
1068 key,
1207 )?
1069 )?
1208 .and_then(|node| {
1070 .and_then(|node| {
1209 if let Some(source) = &node.copy_source {
1071 if let Some(source) = &node.copy_source {
1210 *count -= 1;
1072 *count -= 1;
1211 Self::count_dropped_path(unreachable_bytes, source);
1073 DirstateMap::count_dropped_path(unreachable_bytes, source);
1212 }
1074 }
1213 node.copy_source.take().map(Cow::into_owned)
1075 node.copy_source.take().map(Cow::into_owned)
1214 }))
1076 }))
1215 }
1077 }
1216
1078
1217 fn copy_map_insert(
1079 pub fn copy_map_insert(
1218 &mut self,
1080 &mut self,
1219 key: HgPathBuf,
1081 key: HgPathBuf,
1220 value: HgPathBuf,
1082 value: HgPathBuf,
1221 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1083 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1222 let node = Self::get_or_insert_node(
1084 let map = self.get_map_mut();
1223 self.on_disk,
1085 let node = DirstateMap::get_or_insert_node(
1224 &mut self.unreachable_bytes,
1086 map.on_disk,
1225 &mut self.root,
1087 &mut map.unreachable_bytes,
1088 &mut map.root,
1226 &key,
1089 &key,
1227 WithBasename::to_cow_owned,
1090 WithBasename::to_cow_owned,
1228 |_ancestor| {},
1091 |_ancestor| {},
1229 )?;
1092 )?;
1230 if node.copy_source.is_none() {
1093 if node.copy_source.is_none() {
1231 self.nodes_with_copy_source_count += 1
1094 map.nodes_with_copy_source_count += 1
1232 }
1095 }
1233 Ok(node.copy_source.replace(value.into()).map(Cow::into_owned))
1096 Ok(node.copy_source.replace(value.into()).map(Cow::into_owned))
1234 }
1097 }
1235
1098
1236 fn len(&self) -> usize {
1099 pub fn len(&self) -> usize {
1237 self.nodes_with_entry_count as usize
1100 let map = self.get_map();
1101 map.nodes_with_entry_count as usize
1238 }
1102 }
1239
1103
1240 fn contains_key(
1104 pub fn contains_key(
1241 &self,
1105 &self,
1242 key: &HgPath,
1106 key: &HgPath,
1243 ) -> Result<bool, DirstateV2ParseError> {
1107 ) -> Result<bool, DirstateV2ParseError> {
1244 Ok(self.get(key)?.is_some())
1108 Ok(self.get(key)?.is_some())
1245 }
1109 }
1246
1110
1247 fn get(
1111 pub fn get(
1248 &self,
1112 &self,
1249 key: &HgPath,
1113 key: &HgPath,
1250 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
1114 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
1251 Ok(if let Some(node) = self.get_node(key)? {
1115 let map = self.get_map();
1116 Ok(if let Some(node) = map.get_node(key)? {
1252 node.entry()?
1117 node.entry()?
1253 } else {
1118 } else {
1254 None
1119 None
1255 })
1120 })
1256 }
1121 }
1257
1122
1258 fn iter(&self) -> StateMapIter<'_> {
1123 pub fn iter(&self) -> StateMapIter<'_> {
1259 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1124 let map = self.get_map();
1125 Box::new(filter_map_results(map.iter_nodes(), move |node| {
1260 Ok(if let Some(entry) = node.entry()? {
1126 Ok(if let Some(entry) = node.entry()? {
1261 Some((node.full_path(self.on_disk)?, entry))
1127 Some((node.full_path(map.on_disk)?, entry))
1262 } else {
1128 } else {
1263 None
1129 None
1264 })
1130 })
1265 }))
1131 }))
1266 }
1132 }
1267
1133
1268 fn iter_tracked_dirs(
1134 pub fn iter_tracked_dirs(
1269 &mut self,
1135 &mut self,
1270 ) -> Result<
1136 ) -> Result<
1271 Box<
1137 Box<
@@ -1275,9 +1141,10 b" impl<'on_disk> super::dispatch::Dirstate"
1275 >,
1141 >,
1276 DirstateError,
1142 DirstateError,
1277 > {
1143 > {
1278 let on_disk = self.on_disk;
1144 let map = self.get_map_mut();
1145 let on_disk = map.on_disk;
1279 Ok(Box::new(filter_map_results(
1146 Ok(Box::new(filter_map_results(
1280 self.iter_nodes(),
1147 map.iter_nodes(),
1281 move |node| {
1148 move |node| {
1282 Ok(if node.tracked_descendants_count() > 0 {
1149 Ok(if node.tracked_descendants_count() > 0 {
1283 Some(node.full_path(on_disk)?)
1150 Some(node.full_path(on_disk)?)
@@ -1288,8 +1155,9 b" impl<'on_disk> super::dispatch::Dirstate"
1288 )))
1155 )))
1289 }
1156 }
1290
1157
1291 fn debug_iter(
1158 pub fn debug_iter(
1292 &self,
1159 &self,
1160 all: bool,
1293 ) -> Box<
1161 ) -> Box<
1294 dyn Iterator<
1162 dyn Iterator<
1295 Item = Result<
1163 Item = Result<
@@ -1299,16 +1167,18 b" impl<'on_disk> super::dispatch::Dirstate"
1299 > + Send
1167 > + Send
1300 + '_,
1168 + '_,
1301 > {
1169 > {
1302 Box::new(self.iter_nodes().map(move |node| {
1170 let map = self.get_map();
1303 let node = node?;
1171 Box::new(filter_map_results(map.iter_nodes(), move |node| {
1304 let debug_tuple = if let Some(entry) = node.entry()? {
1172 let debug_tuple = if let Some(entry) = node.entry()? {
1305 entry.debug_tuple()
1173 entry.debug_tuple()
1306 } else if let Some(mtime) = node.cached_directory_mtime() {
1174 } else if !all {
1307 (b' ', 0, -1, mtime.seconds() as i32)
1175 return Ok(None);
1176 } else if let Some(mtime) = node.cached_directory_mtime()? {
1177 (b' ', 0, -1, mtime.truncated_seconds() as i32)
1308 } else {
1178 } else {
1309 (b' ', 0, -1, -1)
1179 (b' ', 0, -1, -1)
1310 };
1180 };
1311 Ok((node.full_path(self.on_disk)?, debug_tuple))
1181 Ok(Some((node.full_path(map.on_disk)?, debug_tuple)))
1312 }))
1182 }))
1313 }
1183 }
1314 }
1184 }
@@ -1,23 +1,8 b''
1 //! The "version 2" disk representation of the dirstate
1 //! The "version 2" disk representation of the dirstate
2 //!
2 //!
3 //! # File format
3 //! See `mercurial/helptext/internals/dirstate-v2.txt`
4 //!
5 //! In dirstate-v2 format, the `.hg/dirstate` file is a "docket that starts
6 //! with a fixed-sized header whose layout is defined by the `DocketHeader`
7 //! struct, followed by the data file identifier.
8 //!
9 //! A separate `.hg/dirstate.{uuid}.d` file contains most of the data. That
10 //! file may be longer than the size given in the docket, but not shorter. Only
11 //! the start of the data file up to the given size is considered. The
12 //! fixed-size "root" of the dirstate tree whose layout is defined by the
13 //! `Root` struct is found at the end of that slice of data.
14 //!
15 //! Its `root_nodes` field contains the slice (offset and length) to
16 //! the nodes representing the files and directories at the root of the
17 //! repository. Each node is also fixed-size, defined by the `Node` struct.
18 //! Nodes in turn contain slices to variable-size paths, and to their own child
19 //! nodes (if any) for nested files and directories.
20
4
5 use crate::dirstate::TruncatedTimestamp;
21 use crate::dirstate_tree::dirstate_map::{self, DirstateMap, NodeRef};
6 use crate::dirstate_tree::dirstate_map::{self, DirstateMap, NodeRef};
22 use crate::dirstate_tree::path_with_basename::WithBasename;
7 use crate::dirstate_tree::path_with_basename::WithBasename;
23 use crate::errors::HgError;
8 use crate::errors::HgError;
@@ -25,13 +10,12 b' use crate::utils::hg_path::HgPath;'
25 use crate::DirstateEntry;
10 use crate::DirstateEntry;
26 use crate::DirstateError;
11 use crate::DirstateError;
27 use crate::DirstateParents;
12 use crate::DirstateParents;
28 use crate::EntryState;
13 use bitflags::bitflags;
29 use bytes_cast::unaligned::{I32Be, I64Be, U16Be, U32Be};
14 use bytes_cast::unaligned::{U16Be, U32Be};
30 use bytes_cast::BytesCast;
15 use bytes_cast::BytesCast;
31 use format_bytes::format_bytes;
16 use format_bytes::format_bytes;
32 use std::borrow::Cow;
17 use std::borrow::Cow;
33 use std::convert::{TryFrom, TryInto};
18 use std::convert::{TryFrom, TryInto};
34 use std::time::{Duration, SystemTime, UNIX_EPOCH};
35
19
36 /// Added at the start of `.hg/dirstate` when the "v2" format is used.
20 /// Added at the start of `.hg/dirstate` when the "v2" format is used.
37 /// This a redundant sanity check more than an actual "magic number" since
21 /// This a redundant sanity check more than an actual "magic number" since
@@ -47,16 +31,16 b' const USED_NODE_ID_BYTES: usize = 20;'
47 pub(super) const IGNORE_PATTERNS_HASH_LEN: usize = 20;
31 pub(super) const IGNORE_PATTERNS_HASH_LEN: usize = 20;
48 pub(super) type IgnorePatternsHash = [u8; IGNORE_PATTERNS_HASH_LEN];
32 pub(super) type IgnorePatternsHash = [u8; IGNORE_PATTERNS_HASH_LEN];
49
33
50 /// Must match the constant of the same name in
34 /// Must match constants of the same names in `mercurial/dirstateutils/v2.py`
51 /// `mercurial/dirstateutils/docket.py`
52 const TREE_METADATA_SIZE: usize = 44;
35 const TREE_METADATA_SIZE: usize = 44;
36 const NODE_SIZE: usize = 44;
53
37
54 /// Make sure that size-affecting changes are made knowingly
38 /// Make sure that size-affecting changes are made knowingly
55 #[allow(unused)]
39 #[allow(unused)]
56 fn static_assert_size_of() {
40 fn static_assert_size_of() {
57 let _ = std::mem::transmute::<TreeMetadata, [u8; TREE_METADATA_SIZE]>;
41 let _ = std::mem::transmute::<TreeMetadata, [u8; TREE_METADATA_SIZE]>;
58 let _ = std::mem::transmute::<DocketHeader, [u8; TREE_METADATA_SIZE + 81]>;
42 let _ = std::mem::transmute::<DocketHeader, [u8; TREE_METADATA_SIZE + 81]>;
59 let _ = std::mem::transmute::<Node, [u8; 43]>;
43 let _ = std::mem::transmute::<Node, [u8; NODE_SIZE]>;
60 }
44 }
61
45
62 // Must match `HEADER` in `mercurial/dirstateutils/docket.py`
46 // Must match `HEADER` in `mercurial/dirstateutils/docket.py`
@@ -67,11 +51,11 b' struct DocketHeader {'
67 parent_1: [u8; STORED_NODE_ID_BYTES],
51 parent_1: [u8; STORED_NODE_ID_BYTES],
68 parent_2: [u8; STORED_NODE_ID_BYTES],
52 parent_2: [u8; STORED_NODE_ID_BYTES],
69
53
54 metadata: TreeMetadata,
55
70 /// Counted in bytes
56 /// Counted in bytes
71 data_size: Size,
57 data_size: Size,
72
58
73 metadata: TreeMetadata,
74
75 uuid_size: u8,
59 uuid_size: u8,
76 }
60 }
77
61
@@ -80,44 +64,24 b" pub struct Docket<'on_disk> {"
80 uuid: &'on_disk [u8],
64 uuid: &'on_disk [u8],
81 }
65 }
82
66
67 /// Fields are documented in the *Tree metadata in the docket file*
68 /// section of `mercurial/helptext/internals/dirstate-v2.txt`
83 #[derive(BytesCast)]
69 #[derive(BytesCast)]
84 #[repr(C)]
70 #[repr(C)]
85 struct TreeMetadata {
71 struct TreeMetadata {
86 root_nodes: ChildNodes,
72 root_nodes: ChildNodes,
87 nodes_with_entry_count: Size,
73 nodes_with_entry_count: Size,
88 nodes_with_copy_source_count: Size,
74 nodes_with_copy_source_count: Size,
89
90 /// How many bytes of this data file are not used anymore
91 unreachable_bytes: Size,
75 unreachable_bytes: Size,
92
93 /// Current version always sets these bytes to zero when creating or
94 /// updating a dirstate. Future versions could assign some bits to signal
95 /// for example "the version that last wrote/updated this dirstate did so
96 /// in such and such way that can be relied on by versions that know to."
97 unused: [u8; 4],
76 unused: [u8; 4],
98
77
99 /// If non-zero, a hash of ignore files that were used for some previous
78 /// See *Optional hash of ignore patterns* section of
100 /// run of the `status` algorithm.
79 /// `mercurial/helptext/internals/dirstate-v2.txt`
101 ///
102 /// We define:
103 ///
104 /// * "Root" ignore files are `.hgignore` at the root of the repository if
105 /// it exists, and files from `ui.ignore.*` config. This set of files is
106 /// then sorted by the string representation of their path.
107 /// * The "expanded contents" of an ignore files is the byte string made
108 /// by concatenating its contents with the "expanded contents" of other
109 /// files included with `include:` or `subinclude:` files, in inclusion
110 /// order. This definition is recursive, as included files can
111 /// themselves include more files.
112 ///
113 /// This hash is defined as the SHA-1 of the concatenation (in sorted
114 /// order) of the "expanded contents" of each "root" ignore file.
115 /// (Note that computing this does not require actually concatenating byte
116 /// strings into contiguous memory, instead SHA-1 hashing can be done
117 /// incrementally.)
118 ignore_patterns_hash: IgnorePatternsHash,
80 ignore_patterns_hash: IgnorePatternsHash,
119 }
81 }
120
82
83 /// Fields are documented in the *The data file format*
84 /// section of `mercurial/helptext/internals/dirstate-v2.txt`
121 #[derive(BytesCast)]
85 #[derive(BytesCast)]
122 #[repr(C)]
86 #[repr(C)]
123 pub(super) struct Node {
87 pub(super) struct Node {
@@ -130,59 +94,38 b' pub(super) struct Node {'
130 children: ChildNodes,
94 children: ChildNodes,
131 pub(super) descendants_with_entry_count: Size,
95 pub(super) descendants_with_entry_count: Size,
132 pub(super) tracked_descendants_count: Size,
96 pub(super) tracked_descendants_count: Size,
133
97 flags: U16Be,
134 /// Depending on the value of `state`:
98 size: U32Be,
135 ///
99 mtime: PackedTruncatedTimestamp,
136 /// * A null byte: `data` is not used.
137 ///
138 /// * A `n`, `a`, `r`, or `m` ASCII byte: `state` and `data` together
139 /// represent a dirstate entry like in the v1 format.
140 ///
141 /// * A `d` ASCII byte: the bytes of `data` should instead be interpreted
142 /// as the `Timestamp` for the mtime of a cached directory.
143 ///
144 /// The presence of this state means that at some point, this path in
145 /// the working directory was observed:
146 ///
147 /// - To be a directory
148 /// - With the modification time as given by `Timestamp`
149 /// - That timestamp was already strictly in the past when observed,
150 /// meaning that later changes cannot happen in the same clock tick
151 /// and must cause a different modification time (unless the system
152 /// clock jumps back and we get unlucky, which is not impossible but
153 /// but deemed unlikely enough).
154 /// - All direct children of this directory (as returned by
155 /// `std::fs::read_dir`) either have a corresponding dirstate node, or
156 /// are ignored by ignore patterns whose hash is in
157 /// `TreeMetadata::ignore_patterns_hash`.
158 ///
159 /// This means that if `std::fs::symlink_metadata` later reports the
160 /// same modification time and ignored patterns haven’t changed, a run
161 /// of status that is not listing ignored files can skip calling
162 /// `std::fs::read_dir` again for this directory, iterate child
163 /// dirstate nodes instead.
164 state: u8,
165 data: Entry,
166 }
100 }
167
101
168 #[derive(BytesCast, Copy, Clone)]
102 bitflags! {
169 #[repr(C)]
103 #[repr(C)]
170 struct Entry {
104 struct Flags: u16 {
171 mode: I32Be,
105 const WDIR_TRACKED = 1 << 0;
172 mtime: I32Be,
106 const P1_TRACKED = 1 << 1;
173 size: I32Be,
107 const P2_INFO = 1 << 2;
108 const MODE_EXEC_PERM = 1 << 3;
109 const MODE_IS_SYMLINK = 1 << 4;
110 const HAS_FALLBACK_EXEC = 1 << 5;
111 const FALLBACK_EXEC = 1 << 6;
112 const HAS_FALLBACK_SYMLINK = 1 << 7;
113 const FALLBACK_SYMLINK = 1 << 8;
114 const EXPECTED_STATE_IS_MODIFIED = 1 << 9;
115 const HAS_MODE_AND_SIZE = 1 <<10;
116 const HAS_MTIME = 1 <<11;
117 const MTIME_SECOND_AMBIGUOUS = 1 << 12;
118 const DIRECTORY = 1 <<13;
119 const ALL_UNKNOWN_RECORDED = 1 <<14;
120 const ALL_IGNORED_RECORDED = 1 <<15;
121 }
174 }
122 }
175
123
176 /// Duration since the Unix epoch
124 /// Duration since the Unix epoch
177 #[derive(BytesCast, Copy, Clone, PartialEq)]
125 #[derive(BytesCast, Copy, Clone)]
178 #[repr(C)]
126 #[repr(C)]
179 pub(super) struct Timestamp {
127 struct PackedTruncatedTimestamp {
180 seconds: I64Be,
128 truncated_seconds: U32Be,
181
182 /// In `0 .. 1_000_000_000`.
183 ///
184 /// This timestamp is later or earlier than `(seconds, 0)` by this many
185 /// nanoseconds, if `seconds` is non-negative or negative, respectively.
186 nanoseconds: U32Be,
129 nanoseconds: U32Be,
187 }
130 }
188
131
@@ -265,7 +208,7 b" impl<'on_disk> Docket<'on_disk> {"
265 }
208 }
266
209
267 pub fn data_filename(&self) -> String {
210 pub fn data_filename(&self) -> String {
268 String::from_utf8(format_bytes!(b"dirstate.{}.d", self.uuid)).unwrap()
211 String::from_utf8(format_bytes!(b"dirstate.{}", self.uuid)).unwrap()
269 }
212 }
270 }
213 }
271
214
@@ -361,62 +304,112 b' impl Node {'
361 })
304 })
362 }
305 }
363
306
307 fn flags(&self) -> Flags {
308 Flags::from_bits_truncate(self.flags.get())
309 }
310
311 fn has_entry(&self) -> bool {
312 self.flags().intersects(
313 Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO,
314 )
315 }
316
364 pub(super) fn node_data(
317 pub(super) fn node_data(
365 &self,
318 &self,
366 ) -> Result<dirstate_map::NodeData, DirstateV2ParseError> {
319 ) -> Result<dirstate_map::NodeData, DirstateV2ParseError> {
367 let entry = |state| {
320 if self.has_entry() {
368 dirstate_map::NodeData::Entry(self.entry_with_given_state(state))
321 Ok(dirstate_map::NodeData::Entry(self.assume_entry()?))
369 };
322 } else if let Some(mtime) = self.cached_directory_mtime()? {
323 Ok(dirstate_map::NodeData::CachedDirectory { mtime })
324 } else {
325 Ok(dirstate_map::NodeData::None)
326 }
327 }
370
328
371 match self.state {
329 pub(super) fn cached_directory_mtime(
372 b'\0' => Ok(dirstate_map::NodeData::None),
330 &self,
373 b'd' => Ok(dirstate_map::NodeData::CachedDirectory {
331 ) -> Result<Option<TruncatedTimestamp>, DirstateV2ParseError> {
374 mtime: *self.data.as_timestamp(),
332 // For now we do not have code to handle the absence of
375 }),
333 // ALL_UNKNOWN_RECORDED, so we ignore the mtime if the flag is
376 b'n' => Ok(entry(EntryState::Normal)),
334 // unset.
377 b'a' => Ok(entry(EntryState::Added)),
335 if self.flags().contains(Flags::DIRECTORY)
378 b'r' => Ok(entry(EntryState::Removed)),
336 && self.flags().contains(Flags::HAS_MTIME)
379 b'm' => Ok(entry(EntryState::Merged)),
337 && self.flags().contains(Flags::ALL_UNKNOWN_RECORDED)
380 _ => Err(DirstateV2ParseError),
338 {
339 Ok(Some(self.mtime.try_into()?))
340 } else {
341 Ok(None)
381 }
342 }
382 }
343 }
383
344
384 pub(super) fn cached_directory_mtime(&self) -> Option<&Timestamp> {
345 fn synthesize_unix_mode(&self) -> u32 {
385 if self.state == b'd' {
346 let file_type = if self.flags().contains(Flags::MODE_IS_SYMLINK) {
386 Some(self.data.as_timestamp())
347 libc::S_IFLNK
387 } else {
348 } else {
388 None
349 libc::S_IFREG
389 }
350 };
351 let permisions = if self.flags().contains(Flags::MODE_EXEC_PERM) {
352 0o755
353 } else {
354 0o644
355 };
356 file_type | permisions
390 }
357 }
391
358
392 pub(super) fn state(
359 fn assume_entry(&self) -> Result<DirstateEntry, DirstateV2ParseError> {
393 &self,
360 // TODO: convert through raw bits instead?
394 ) -> Result<Option<EntryState>, DirstateV2ParseError> {
361 let wdir_tracked = self.flags().contains(Flags::WDIR_TRACKED);
395 match self.state {
362 let p1_tracked = self.flags().contains(Flags::P1_TRACKED);
396 b'\0' | b'd' => Ok(None),
363 let p2_info = self.flags().contains(Flags::P2_INFO);
397 b'n' => Ok(Some(EntryState::Normal)),
364 let mode_size = if self.flags().contains(Flags::HAS_MODE_AND_SIZE)
398 b'a' => Ok(Some(EntryState::Added)),
365 && !self.flags().contains(Flags::EXPECTED_STATE_IS_MODIFIED)
399 b'r' => Ok(Some(EntryState::Removed)),
366 {
400 b'm' => Ok(Some(EntryState::Merged)),
367 Some((self.synthesize_unix_mode(), self.size.into()))
401 _ => Err(DirstateV2ParseError),
368 } else {
402 }
369 None
403 }
370 };
404
371 let mtime = if self.flags().contains(Flags::HAS_MTIME)
405 fn entry_with_given_state(&self, state: EntryState) -> DirstateEntry {
372 && !self.flags().contains(Flags::DIRECTORY)
406 DirstateEntry {
373 && !self.flags().contains(Flags::EXPECTED_STATE_IS_MODIFIED)
407 state,
374 // The current code is not able to do the more subtle comparison that the
408 mode: self.data.mode.get(),
375 // MTIME_SECOND_AMBIGUOUS requires. So we ignore the mtime
409 mtime: self.data.mtime.get(),
376 && !self.flags().contains(Flags::MTIME_SECOND_AMBIGUOUS)
410 size: self.data.size.get(),
377 {
411 }
378 Some(self.mtime.try_into()?)
379 } else {
380 None
381 };
382 let fallback_exec = if self.flags().contains(Flags::HAS_FALLBACK_EXEC)
383 {
384 Some(self.flags().contains(Flags::FALLBACK_EXEC))
385 } else {
386 None
387 };
388 let fallback_symlink =
389 if self.flags().contains(Flags::HAS_FALLBACK_SYMLINK) {
390 Some(self.flags().contains(Flags::FALLBACK_SYMLINK))
391 } else {
392 None
393 };
394 Ok(DirstateEntry::from_v2_data(
395 wdir_tracked,
396 p1_tracked,
397 p2_info,
398 mode_size,
399 mtime,
400 fallback_exec,
401 fallback_symlink,
402 ))
412 }
403 }
413
404
414 pub(super) fn entry(
405 pub(super) fn entry(
415 &self,
406 &self,
416 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
407 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
417 Ok(self
408 if self.has_entry() {
418 .state()?
409 Ok(Some(self.assume_entry()?))
419 .map(|state| self.entry_with_given_state(state)))
410 } else {
411 Ok(None)
412 }
420 }
413 }
421
414
422 pub(super) fn children<'on_disk>(
415 pub(super) fn children<'on_disk>(
@@ -442,57 +435,53 b' impl Node {'
442 tracked_descendants_count: self.tracked_descendants_count.get(),
435 tracked_descendants_count: self.tracked_descendants_count.get(),
443 })
436 })
444 }
437 }
445 }
446
438
447 impl Entry {
439 fn from_dirstate_entry(
448 fn from_timestamp(timestamp: Timestamp) -> Self {
440 entry: &DirstateEntry,
449 // Safety: both types implement the `ByteCast` trait, so we could
441 ) -> (Flags, U32Be, PackedTruncatedTimestamp) {
450 // safely use `as_bytes` and `from_bytes` to do this conversion. Using
442 let (
451 // `transmute` instead makes the compiler check that the two types
443 wdir_tracked,
452 // have the same size, which eliminates the error case of
444 p1_tracked,
453 // `from_bytes`.
445 p2_info,
454 unsafe { std::mem::transmute::<Timestamp, Entry>(timestamp) }
446 mode_size_opt,
455 }
447 mtime_opt,
456
448 fallback_exec,
457 fn as_timestamp(&self) -> &Timestamp {
449 fallback_symlink,
458 // Safety: same as above in `from_timestamp`
450 ) = entry.v2_data();
459 unsafe { &*(self as *const Entry as *const Timestamp) }
451 // TODO: convert throug raw flag bits instead?
460 }
452 let mut flags = Flags::empty();
461 }
453 flags.set(Flags::WDIR_TRACKED, wdir_tracked);
462
454 flags.set(Flags::P1_TRACKED, p1_tracked);
463 impl Timestamp {
455 flags.set(Flags::P2_INFO, p2_info);
464 pub fn seconds(&self) -> i64 {
456 let size = if let Some((m, s)) = mode_size_opt {
465 self.seconds.get()
457 let exec_perm = m & libc::S_IXUSR != 0;
466 }
458 let is_symlink = m & libc::S_IFMT == libc::S_IFLNK;
467 }
459 flags.set(Flags::MODE_EXEC_PERM, exec_perm);
468
460 flags.set(Flags::MODE_IS_SYMLINK, is_symlink);
469 impl From<SystemTime> for Timestamp {
461 flags.insert(Flags::HAS_MODE_AND_SIZE);
470 fn from(system_time: SystemTime) -> Self {
462 s.into()
471 let (secs, nanos) = match system_time.duration_since(UNIX_EPOCH) {
463 } else {
472 Ok(duration) => {
464 0.into()
473 (duration.as_secs() as i64, duration.subsec_nanos())
465 };
466 let mtime = if let Some(m) = mtime_opt {
467 flags.insert(Flags::HAS_MTIME);
468 m.into()
469 } else {
470 PackedTruncatedTimestamp::null()
471 };
472 if let Some(f_exec) = fallback_exec {
473 flags.insert(Flags::HAS_FALLBACK_EXEC);
474 if f_exec {
475 flags.insert(Flags::FALLBACK_EXEC);
474 }
476 }
475 Err(error) => {
476 let negative = error.duration();
477 (-(negative.as_secs() as i64), negative.subsec_nanos())
478 }
479 };
480 Timestamp {
481 seconds: secs.into(),
482 nanoseconds: nanos.into(),
483 }
477 }
484 }
478 if let Some(f_symlink) = fallback_symlink {
485 }
479 flags.insert(Flags::HAS_FALLBACK_SYMLINK);
486
480 if f_symlink {
487 impl From<&'_ Timestamp> for SystemTime {
481 flags.insert(Flags::FALLBACK_SYMLINK);
488 fn from(timestamp: &'_ Timestamp) -> Self {
482 }
489 let secs = timestamp.seconds.get();
490 let nanos = timestamp.nanoseconds.get();
491 if secs >= 0 {
492 UNIX_EPOCH + Duration::new(secs as u64, nanos)
493 } else {
494 UNIX_EPOCH - Duration::new((-secs) as u64, nanos)
495 }
483 }
484 (flags, size, mtime)
496 }
485 }
497 }
486 }
498
487
@@ -543,8 +532,8 b" pub(crate) fn for_each_tracked_path<'on_"
543 f: &mut impl FnMut(&'on_disk HgPath),
532 f: &mut impl FnMut(&'on_disk HgPath),
544 ) -> Result<(), DirstateV2ParseError> {
533 ) -> Result<(), DirstateV2ParseError> {
545 for node in read_nodes(on_disk, nodes)? {
534 for node in read_nodes(on_disk, nodes)? {
546 if let Some(state) = node.state()? {
535 if let Some(entry) = node.entry()? {
547 if state.is_tracked() {
536 if entry.state().is_tracked() {
548 f(node.full_path(on_disk)?)
537 f(node.full_path(on_disk)?)
549 }
538 }
550 }
539 }
@@ -638,25 +627,31 b" impl Writer<'_, '_> {"
638 };
627 };
639 on_disk_nodes.push(match node {
628 on_disk_nodes.push(match node {
640 NodeRef::InMemory(path, node) => {
629 NodeRef::InMemory(path, node) => {
641 let (state, data) = match &node.data {
630 let (flags, size, mtime) = match &node.data {
642 dirstate_map::NodeData::Entry(entry) => (
631 dirstate_map::NodeData::Entry(entry) => {
643 entry.state.into(),
632 Node::from_dirstate_entry(entry)
644 Entry {
633 }
645 mode: entry.mode.into(),
634 dirstate_map::NodeData::CachedDirectory { mtime } => (
646 mtime: entry.mtime.into(),
635 // we currently never set a mtime if unknown file
647 size: entry.size.into(),
636 // are present.
648 },
637 // So if we have a mtime for a directory, we know
638 // they are no unknown
639 // files and we
640 // blindly set ALL_UNKNOWN_RECORDED.
641 //
642 // We never set ALL_IGNORED_RECORDED since we
643 // don't track that case
644 // currently.
645 Flags::DIRECTORY
646 | Flags::HAS_MTIME
647 | Flags::ALL_UNKNOWN_RECORDED,
648 0.into(),
649 (*mtime).into(),
649 ),
650 ),
650 dirstate_map::NodeData::CachedDirectory { mtime } => {
651 (b'd', Entry::from_timestamp(*mtime))
652 }
653 dirstate_map::NodeData::None => (
651 dirstate_map::NodeData::None => (
654 b'\0',
652 Flags::DIRECTORY,
655 Entry {
653 0.into(),
656 mode: 0.into(),
654 PackedTruncatedTimestamp::null(),
657 mtime: 0.into(),
658 size: 0.into(),
659 },
660 ),
655 ),
661 };
656 };
662 Node {
657 Node {
@@ -673,8 +668,9 b" impl Writer<'_, '_> {"
673 tracked_descendants_count: node
668 tracked_descendants_count: node
674 .tracked_descendants_count
669 .tracked_descendants_count
675 .into(),
670 .into(),
676 state,
671 flags: flags.bits().into(),
677 data,
672 size,
673 mtime,
678 }
674 }
679 }
675 }
680 NodeRef::OnDisk(node) => Node {
676 NodeRef::OnDisk(node) => Node {
@@ -758,3 +754,33 b' fn path_len_from_usize(x: usize) -> Path'
758 .expect("dirstate-v2 path length overflow")
754 .expect("dirstate-v2 path length overflow")
759 .into()
755 .into()
760 }
756 }
757
758 impl From<TruncatedTimestamp> for PackedTruncatedTimestamp {
759 fn from(timestamp: TruncatedTimestamp) -> Self {
760 Self {
761 truncated_seconds: timestamp.truncated_seconds().into(),
762 nanoseconds: timestamp.nanoseconds().into(),
763 }
764 }
765 }
766
767 impl TryFrom<PackedTruncatedTimestamp> for TruncatedTimestamp {
768 type Error = DirstateV2ParseError;
769
770 fn try_from(
771 timestamp: PackedTruncatedTimestamp,
772 ) -> Result<Self, Self::Error> {
773 Self::from_already_truncated(
774 timestamp.truncated_seconds.get(),
775 timestamp.nanoseconds.get(),
776 )
777 }
778 }
779 impl PackedTruncatedTimestamp {
780 fn null() -> Self {
781 Self {
782 truncated_seconds: 0.into(),
783 nanoseconds: 0.into(),
784 }
785 }
786 }
@@ -1,11 +1,9 b''
1 use cpython::PyBytes;
1 use super::dirstate_map::DirstateMap;
2 use cpython::Python;
2 use stable_deref_trait::StableDeref;
3 use hg::dirstate_tree::dirstate_map::DirstateMap;
3 use std::ops::Deref;
4 use hg::DirstateError;
5 use hg::DirstateParents;
6
4
7 /// Keep a `DirstateMap<'on_disk>` next to the `on_disk` buffer that it
5 /// Keep a `DirstateMap<'on_disk>` next to the `on_disk` buffer that it
8 /// borrows. This is similar to the owning-ref crate.
6 /// borrows.
9 ///
7 ///
10 /// This is similar to [`OwningRef`] which is more limited because it
8 /// This is similar to [`OwningRef`] which is more limited because it
11 /// represents exactly one `&T` reference next to the value it borrows, as
9 /// represents exactly one `&T` reference next to the value it borrows, as
@@ -13,11 +11,11 b' use hg::DirstateParents;'
13 /// arbitrarily-nested data structures.
11 /// arbitrarily-nested data structures.
14 ///
12 ///
15 /// [`OwningRef`]: https://docs.rs/owning_ref/0.4.1/owning_ref/struct.OwningRef.html
13 /// [`OwningRef`]: https://docs.rs/owning_ref/0.4.1/owning_ref/struct.OwningRef.html
16 pub(super) struct OwningDirstateMap {
14 pub struct OwningDirstateMap {
17 /// Owned handle to a bytes buffer with a stable address.
15 /// Owned handle to a bytes buffer with a stable address.
18 ///
16 ///
19 /// See <https://docs.rs/owning_ref/0.4.1/owning_ref/trait.StableAddress.html>.
17 /// See <https://docs.rs/owning_ref/0.4.1/owning_ref/trait.StableAddress.html>.
20 on_disk: PyBytes,
18 on_disk: Box<dyn Deref<Target = [u8]> + Send>,
21
19
22 /// Pointer for `Box<DirstateMap<'on_disk>>`, typed-erased because the
20 /// Pointer for `Box<DirstateMap<'on_disk>>`, typed-erased because the
23 /// language cannot represent a lifetime referencing a sibling field.
21 /// language cannot represent a lifetime referencing a sibling field.
@@ -28,12 +26,13 b' pub(super) struct OwningDirstateMap {'
28 }
26 }
29
27
30 impl OwningDirstateMap {
28 impl OwningDirstateMap {
31 pub fn new_v1(
29 pub fn new_empty<OnDisk>(on_disk: OnDisk) -> Self
32 py: Python,
30 where
33 on_disk: PyBytes,
31 OnDisk: Deref<Target = [u8]> + StableDeref + Send + 'static,
34 ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
32 {
35 let bytes: &'_ [u8] = on_disk.data(py);
33 let on_disk = Box::new(on_disk);
36 let (map, parents) = DirstateMap::new_v1(bytes)?;
34 let bytes: &'_ [u8] = &on_disk;
35 let map = DirstateMap::empty(bytes);
37
36
38 // Like in `bytes` above, this `'_` lifetime parameter borrows from
37 // Like in `bytes` above, this `'_` lifetime parameter borrows from
39 // the bytes buffer owned by `on_disk`.
38 // the bytes buffer owned by `on_disk`.
@@ -42,30 +41,12 b' impl OwningDirstateMap {'
42 // Erase the pointed type entirely in order to erase the lifetime.
41 // Erase the pointed type entirely in order to erase the lifetime.
43 let ptr: *mut () = ptr.cast();
42 let ptr: *mut () = ptr.cast();
44
43
45 Ok((Self { on_disk, ptr }, parents))
44 Self { on_disk, ptr }
46 }
45 }
47
46
48 pub fn new_v2(
47 pub fn get_pair_mut<'a>(
49 py: Python,
48 &'a mut self,
50 on_disk: PyBytes,
49 ) -> (&'a [u8], &'a mut DirstateMap<'a>) {
51 data_size: usize,
52 tree_metadata: PyBytes,
53 ) -> Result<Self, DirstateError> {
54 let bytes: &'_ [u8] = on_disk.data(py);
55 let map =
56 DirstateMap::new_v2(bytes, data_size, tree_metadata.data(py))?;
57
58 // Like in `bytes` above, this `'_` lifetime parameter borrows from
59 // the bytes buffer owned by `on_disk`.
60 let ptr: *mut DirstateMap<'_> = Box::into_raw(Box::new(map));
61
62 // Erase the pointed type entirely in order to erase the lifetime.
63 let ptr: *mut () = ptr.cast();
64
65 Ok(Self { on_disk, ptr })
66 }
67
68 pub fn get_mut<'a>(&'a mut self) -> &'a mut DirstateMap<'a> {
69 // SAFETY: We cast the type-erased pointer back to the same type it had
50 // SAFETY: We cast the type-erased pointer back to the same type it had
70 // in `new`, except with a different lifetime parameter. This time we
51 // in `new`, except with a different lifetime parameter. This time we
71 // connect the lifetime to that of `self`. This cast is valid because
52 // connect the lifetime to that of `self`. This cast is valid because
@@ -76,14 +57,22 b' impl OwningDirstateMap {'
76 // SAFETY: we dereference that pointer, connecting the lifetime of the
57 // SAFETY: we dereference that pointer, connecting the lifetime of the
77 // new `&mut` to that of `self`. This is valid because the
58 // new `&mut` to that of `self`. This is valid because the
78 // raw pointer is to a boxed value, and `self` owns that box.
59 // raw pointer is to a boxed value, and `self` owns that box.
79 unsafe { &mut *ptr }
60 (&self.on_disk, unsafe { &mut *ptr })
80 }
61 }
81
62
82 pub fn get<'a>(&'a self) -> &'a DirstateMap<'a> {
63 pub fn get_map_mut<'a>(&'a mut self) -> &'a mut DirstateMap<'a> {
64 self.get_pair_mut().1
65 }
66
67 pub fn get_map<'a>(&'a self) -> &'a DirstateMap<'a> {
83 // SAFETY: same reasoning as in `get_mut` above.
68 // SAFETY: same reasoning as in `get_mut` above.
84 let ptr: *mut DirstateMap<'a> = self.ptr.cast();
69 let ptr: *mut DirstateMap<'a> = self.ptr.cast();
85 unsafe { &*ptr }
70 unsafe { &*ptr }
86 }
71 }
72
73 pub fn on_disk<'a>(&'a self) -> &'a [u8] {
74 &self.on_disk
75 }
87 }
76 }
88
77
89 impl Drop for OwningDirstateMap {
78 impl Drop for OwningDirstateMap {
@@ -105,13 +94,12 b' impl Drop for OwningDirstateMap {'
105 fn _static_assert_is_send<T: Send>() {}
94 fn _static_assert_is_send<T: Send>() {}
106
95
107 fn _static_assert_fields_are_send() {
96 fn _static_assert_fields_are_send() {
108 _static_assert_is_send::<PyBytes>();
109 _static_assert_is_send::<Box<DirstateMap<'_>>>();
97 _static_assert_is_send::<Box<DirstateMap<'_>>>();
110 }
98 }
111
99
112 // SAFETY: we don’t get this impl implicitly because `*mut (): !Send` because
100 // SAFETY: we don’t get this impl implicitly because `*mut (): !Send` because
113 // thread-safety of raw pointers is unknown in the general case. However this
101 // thread-safety of raw pointers is unknown in the general case. However this
114 // particular raw pointer represents a `Box<DirstateMap<'on_disk>>` that we
102 // particular raw pointer represents a `Box<DirstateMap<'on_disk>>` that we
115 // own. Since that `Box` and `PyBytes` are both `Send` as shown in above, it
103 // own. Since that `Box` is `Send` as shown in above, it is sound to mark
116 // is sound to mark this struct as `Send` too.
104 // this struct as `Send` too.
117 unsafe impl Send for OwningDirstateMap {}
105 unsafe impl Send for OwningDirstateMap {}
@@ -1,3 +1,4 b''
1 use crate::dirstate::entry::TruncatedTimestamp;
1 use crate::dirstate::status::IgnoreFnType;
2 use crate::dirstate::status::IgnoreFnType;
2 use crate::dirstate_tree::dirstate_map::BorrowedPath;
3 use crate::dirstate_tree::dirstate_map::BorrowedPath;
3 use crate::dirstate_tree::dirstate_map::ChildNodesRef;
4 use crate::dirstate_tree::dirstate_map::ChildNodesRef;
@@ -5,7 +6,6 b' use crate::dirstate_tree::dirstate_map::'
5 use crate::dirstate_tree::dirstate_map::NodeData;
6 use crate::dirstate_tree::dirstate_map::NodeData;
6 use crate::dirstate_tree::dirstate_map::NodeRef;
7 use crate::dirstate_tree::dirstate_map::NodeRef;
7 use crate::dirstate_tree::on_disk::DirstateV2ParseError;
8 use crate::dirstate_tree::on_disk::DirstateV2ParseError;
8 use crate::dirstate_tree::on_disk::Timestamp;
9 use crate::matchers::get_ignore_function;
9 use crate::matchers::get_ignore_function;
10 use crate::matchers::Matcher;
10 use crate::matchers::Matcher;
11 use crate::utils::files::get_bytes_from_os_string;
11 use crate::utils::files::get_bytes_from_os_string;
@@ -126,7 +126,8 b" struct StatusCommon<'a, 'tree, 'on_disk:"
126 matcher: &'a (dyn Matcher + Sync),
126 matcher: &'a (dyn Matcher + Sync),
127 ignore_fn: IgnoreFnType<'a>,
127 ignore_fn: IgnoreFnType<'a>,
128 outcome: Mutex<DirstateStatus<'on_disk>>,
128 outcome: Mutex<DirstateStatus<'on_disk>>,
129 new_cachable_directories: Mutex<Vec<(Cow<'on_disk, HgPath>, Timestamp)>>,
129 new_cachable_directories:
130 Mutex<Vec<(Cow<'on_disk, HgPath>, TruncatedTimestamp)>>,
130 outated_cached_directories: Mutex<Vec<Cow<'on_disk, HgPath>>>,
131 outated_cached_directories: Mutex<Vec<Cow<'on_disk, HgPath>>>,
131
132
132 /// Whether ignore files like `.hgignore` have changed since the previous
133 /// Whether ignore files like `.hgignore` have changed since the previous
@@ -165,7 +166,7 b" impl<'a, 'tree, 'on_disk> StatusCommon<'"
165 dirstate_node: &NodeRef<'tree, 'on_disk>,
166 dirstate_node: &NodeRef<'tree, 'on_disk>,
166 ) -> Result<(), DirstateV2ParseError> {
167 ) -> Result<(), DirstateV2ParseError> {
167 if self.ignore_patterns_have_changed == Some(true)
168 if self.ignore_patterns_have_changed == Some(true)
168 && dirstate_node.cached_directory_mtime().is_some()
169 && dirstate_node.cached_directory_mtime()?.is_some()
169 {
170 {
170 self.outated_cached_directories.lock().unwrap().push(
171 self.outated_cached_directories.lock().unwrap().push(
171 dirstate_node
172 dirstate_node
@@ -182,7 +183,7 b" impl<'a, 'tree, 'on_disk> StatusCommon<'"
182 fn can_skip_fs_readdir(
183 fn can_skip_fs_readdir(
183 &self,
184 &self,
184 directory_metadata: Option<&std::fs::Metadata>,
185 directory_metadata: Option<&std::fs::Metadata>,
185 cached_directory_mtime: Option<&Timestamp>,
186 cached_directory_mtime: Option<TruncatedTimestamp>,
186 ) -> bool {
187 ) -> bool {
187 if !self.options.list_unknown && !self.options.list_ignored {
188 if !self.options.list_unknown && !self.options.list_ignored {
188 // All states that we care about listing have corresponding
189 // All states that we care about listing have corresponding
@@ -198,13 +199,14 b" impl<'a, 'tree, 'on_disk> StatusCommon<'"
198 // by a previous run of the `status` algorithm which found this
199 // by a previous run of the `status` algorithm which found this
199 // directory eligible for `read_dir` caching.
200 // directory eligible for `read_dir` caching.
200 if let Some(meta) = directory_metadata {
201 if let Some(meta) = directory_metadata {
201 if let Ok(current_mtime) = meta.modified() {
202 if cached_mtime
202 if current_mtime == cached_mtime.into() {
203 .likely_equal_to_mtime_of(meta)
203 // The mtime of that directory has not changed
204 .unwrap_or(false)
204 // since then, which means that the results of
205 {
205 // `read_dir` should also be unchanged.
206 // The mtime of that directory has not changed
206 return true;
207 // since then, which means that the results of
207 }
208 // `read_dir` should also be unchanged.
209 return true;
208 }
210 }
209 }
211 }
210 }
212 }
@@ -221,7 +223,7 b" impl<'a, 'tree, 'on_disk> StatusCommon<'"
221 directory_hg_path: &BorrowedPath<'tree, 'on_disk>,
223 directory_hg_path: &BorrowedPath<'tree, 'on_disk>,
222 directory_fs_path: &Path,
224 directory_fs_path: &Path,
223 directory_metadata: Option<&std::fs::Metadata>,
225 directory_metadata: Option<&std::fs::Metadata>,
224 cached_directory_mtime: Option<&Timestamp>,
226 cached_directory_mtime: Option<TruncatedTimestamp>,
225 is_at_repo_root: bool,
227 is_at_repo_root: bool,
226 ) -> Result<bool, DirstateV2ParseError> {
228 ) -> Result<bool, DirstateV2ParseError> {
227 if self.can_skip_fs_readdir(directory_metadata, cached_directory_mtime)
229 if self.can_skip_fs_readdir(directory_metadata, cached_directory_mtime)
@@ -362,7 +364,7 b" impl<'a, 'tree, 'on_disk> StatusCommon<'"
362 hg_path,
364 hg_path,
363 fs_path,
365 fs_path,
364 Some(fs_metadata),
366 Some(fs_metadata),
365 dirstate_node.cached_directory_mtime(),
367 dirstate_node.cached_directory_mtime()?,
366 is_at_repo_root,
368 is_at_repo_root,
367 )?;
369 )?;
368 self.maybe_save_directory_mtime(
370 self.maybe_save_directory_mtime(
@@ -394,9 +396,6 b" impl<'a, 'tree, 'on_disk> StatusCommon<'"
394 .push(hg_path.detach_from_tree()),
396 .push(hg_path.detach_from_tree()),
395 EntryState::Normal => self
397 EntryState::Normal => self
396 .handle_normal_file(&dirstate_node, fs_metadata)?,
398 .handle_normal_file(&dirstate_node, fs_metadata)?,
397 // This variant is not used in DirstateMap
398 // nodes
399 EntryState::Unknown => unreachable!(),
400 }
399 }
401 } else {
400 } else {
402 // `node.entry.is_none()` indicates a "directory"
401 // `node.entry.is_none()` indicates a "directory"
@@ -468,16 +467,22 b" impl<'a, 'tree, 'on_disk> StatusCommon<'"
468 //
467 //
469 // We deem this scenario (unlike the previous one) to be
468 // We deem this scenario (unlike the previous one) to be
470 // unlikely enough in practice.
469 // unlikely enough in practice.
471 let timestamp = directory_mtime.into();
470 let truncated = TruncatedTimestamp::from(directory_mtime);
472 let cached = dirstate_node.cached_directory_mtime();
471 let is_up_to_date = if let Some(cached) =
473 if cached != Some(&timestamp) {
472 dirstate_node.cached_directory_mtime()?
473 {
474 cached.likely_equal(truncated)
475 } else {
476 false
477 };
478 if !is_up_to_date {
474 let hg_path = dirstate_node
479 let hg_path = dirstate_node
475 .full_path_borrowed(self.dmap.on_disk)?
480 .full_path_borrowed(self.dmap.on_disk)?
476 .detach_from_tree();
481 .detach_from_tree();
477 self.new_cachable_directories
482 self.new_cachable_directories
478 .lock()
483 .lock()
479 .unwrap()
484 .unwrap()
480 .push((hg_path, timestamp))
485 .push((hg_path, truncated))
481 }
486 }
482 }
487 }
483 }
488 }
@@ -496,9 +501,6 b" impl<'a, 'tree, 'on_disk> StatusCommon<'"
496 fn truncate_u64(value: u64) -> i32 {
501 fn truncate_u64(value: u64) -> i32 {
497 (value & 0x7FFF_FFFF) as i32
502 (value & 0x7FFF_FFFF) as i32
498 }
503 }
499 fn truncate_i64(value: i64) -> i32 {
500 (value & 0x7FFF_FFFF) as i32
501 }
502
504
503 let entry = dirstate_node
505 let entry = dirstate_node
504 .entry()?
506 .entry()?
@@ -506,11 +508,9 b" impl<'a, 'tree, 'on_disk> StatusCommon<'"
506 let hg_path = &dirstate_node.full_path_borrowed(self.dmap.on_disk)?;
508 let hg_path = &dirstate_node.full_path_borrowed(self.dmap.on_disk)?;
507 let mode_changed =
509 let mode_changed =
508 || self.options.check_exec && entry.mode_changed(fs_metadata);
510 || self.options.check_exec && entry.mode_changed(fs_metadata);
509 let size_changed = entry.size != truncate_u64(fs_metadata.len());
511 let size = entry.size();
510 if entry.size >= 0
512 let size_changed = size != truncate_u64(fs_metadata.len());
511 && size_changed
513 if size >= 0 && size_changed && fs_metadata.file_type().is_symlink() {
512 && fs_metadata.file_type().is_symlink()
513 {
514 // issue6456: Size returned may be longer due to encryption
514 // issue6456: Size returned may be longer due to encryption
515 // on EXT-4 fscrypt. TODO maybe only do it on EXT4?
515 // on EXT-4 fscrypt. TODO maybe only do it on EXT4?
516 self.outcome
516 self.outcome
@@ -520,7 +520,7 b" impl<'a, 'tree, 'on_disk> StatusCommon<'"
520 .push(hg_path.detach_from_tree())
520 .push(hg_path.detach_from_tree())
521 } else if dirstate_node.has_copy_source()
521 } else if dirstate_node.has_copy_source()
522 || entry.is_from_other_parent()
522 || entry.is_from_other_parent()
523 || (entry.size >= 0 && (size_changed || mode_changed()))
523 || (size >= 0 && (size_changed || mode_changed()))
524 {
524 {
525 self.outcome
525 self.outcome
526 .lock()
526 .lock()
@@ -528,10 +528,17 b" impl<'a, 'tree, 'on_disk> StatusCommon<'"
528 .modified
528 .modified
529 .push(hg_path.detach_from_tree())
529 .push(hg_path.detach_from_tree())
530 } else {
530 } else {
531 let mtime = mtime_seconds(fs_metadata);
531 let mtime_looks_clean;
532 if truncate_i64(mtime) != entry.mtime
532 if let Some(dirstate_mtime) = entry.truncated_mtime() {
533 || mtime == self.options.last_normal_time
533 let fs_mtime = TruncatedTimestamp::for_mtime_of(fs_metadata)
534 {
534 .expect("OS/libc does not support mtime?");
535 mtime_looks_clean = fs_mtime.likely_equal(dirstate_mtime)
536 && !fs_mtime.likely_equal(self.options.last_normal_time)
537 } else {
538 // No mtime in the dirstate entry
539 mtime_looks_clean = false
540 };
541 if !mtime_looks_clean {
535 self.outcome
542 self.outcome
536 .lock()
543 .lock()
537 .unwrap()
544 .unwrap()
@@ -687,15 +694,6 b" impl<'a, 'tree, 'on_disk> StatusCommon<'"
687 }
694 }
688 }
695 }
689
696
690 #[cfg(unix)] // TODO
691 fn mtime_seconds(metadata: &std::fs::Metadata) -> i64 {
692 // Going through `Metadata::modified()` would be portable, but would take
693 // care to construct a `SystemTime` value with sub-second precision just
694 // for us to throw that away here.
695 use std::os::unix::fs::MetadataExt;
696 metadata.mtime()
697 }
698
699 struct DirEntry {
697 struct DirEntry {
700 base_name: HgPathBuf,
698 base_name: HgPathBuf,
701 full_path: PathBuf,
699 full_path: PathBuf,
@@ -536,7 +536,7 b' impl SubInclude {'
536 Ok(Self {
536 Ok(Self {
537 prefix: path_to_hg_path_buf(prefix).and_then(|mut p| {
537 prefix: path_to_hg_path_buf(prefix).and_then(|mut p| {
538 if !p.is_empty() {
538 if !p.is_empty() {
539 p.push(b'/');
539 p.push_byte(b'/');
540 }
540 }
541 Ok(p)
541 Ok(p)
542 })?,
542 })?,
@@ -16,14 +16,11 b' pub mod requirements;'
16 pub mod testing; // unconditionally built, for use from integration tests
16 pub mod testing; // unconditionally built, for use from integration tests
17 pub use dirstate::{
17 pub use dirstate::{
18 dirs_multiset::{DirsMultiset, DirsMultisetIter},
18 dirs_multiset::{DirsMultiset, DirsMultisetIter},
19 dirstate_map::DirstateMap,
20 parsers::{pack_dirstate, parse_dirstate, PARENT_SIZE},
21 status::{
19 status::{
22 status, BadMatch, BadType, DirstateStatus, HgPathCow, StatusError,
20 BadMatch, BadType, DirstateStatus, HgPathCow, StatusError,
23 StatusOptions,
21 StatusOptions,
24 },
22 },
25 CopyMap, CopyMapIter, DirstateEntry, DirstateParents, EntryState,
23 DirstateEntry, DirstateParents, EntryState,
26 StateMap, StateMapIter,
27 };
24 };
28 pub mod copy_tracing;
25 pub mod copy_tracing;
29 mod filepatterns;
26 mod filepatterns;
@@ -36,6 +33,7 b' pub mod logging;'
36 pub mod operations;
33 pub mod operations;
37 pub mod revset;
34 pub mod revset;
38 pub mod utils;
35 pub mod utils;
36 pub mod vfs;
39
37
40 use crate::utils::hg_path::{HgPathBuf, HgPathError};
38 use crate::utils::hg_path::{HgPathBuf, HgPathError};
41 pub use filepatterns::{
39 pub use filepatterns::{
@@ -1,5 +1,5 b''
1 use crate::errors::{HgError, HgResultExt, IoErrorContext, IoResultExt};
1 use crate::errors::{HgError, HgResultExt, IoErrorContext, IoResultExt};
2 use crate::repo::Vfs;
2 use crate::vfs::Vfs;
3 use std::io::Write;
3 use std::io::Write;
4
4
5 /// An utility to append to a log file with the given name, and optionally
5 /// An utility to append to a log file with the given name, and optionally
@@ -391,8 +391,7 b' fn roots_and_dirs('
391 } = ignore_pattern;
391 } = ignore_pattern;
392 match syntax {
392 match syntax {
393 PatternSyntax::RootGlob | PatternSyntax::Glob => {
393 PatternSyntax::RootGlob | PatternSyntax::Glob => {
394 let mut root = vec![];
394 let mut root = HgPathBuf::new();
395
396 for p in pattern.split(|c| *c == b'/') {
395 for p in pattern.split(|c| *c == b'/') {
397 if p.iter().any(|c| match *c {
396 if p.iter().any(|c| match *c {
398 b'[' | b'{' | b'*' | b'?' => true,
397 b'[' | b'{' | b'*' | b'?' => true,
@@ -400,11 +399,9 b' fn roots_and_dirs('
400 }) {
399 }) {
401 break;
400 break;
402 }
401 }
403 root.push(HgPathBuf::from_bytes(p));
402 root.push(HgPathBuf::from_bytes(p).as_ref());
404 }
403 }
405 let buf =
404 roots.push(root);
406 root.iter().fold(HgPathBuf::new(), |acc, r| acc.join(r));
407 roots.push(buf);
408 }
405 }
409 PatternSyntax::Path | PatternSyntax::RelPath => {
406 PatternSyntax::Path | PatternSyntax::RelPath => {
410 let pat = HgPath::new(if pattern == b"." {
407 let pat = HgPath::new(if pattern == b"." {
@@ -5,31 +5,70 b''
5 // This software may be used and distributed according to the terms of the
5 // This software may be used and distributed according to the terms of the
6 // GNU General Public License version 2 or any later version.
6 // GNU General Public License version 2 or any later version.
7
7
8 use std::path::PathBuf;
9
10 use crate::repo::Repo;
8 use crate::repo::Repo;
11 use crate::revlog::changelog::Changelog;
12 use crate::revlog::manifest::Manifest;
13 use crate::revlog::path_encode::path_encode;
14 use crate::revlog::revlog::Revlog;
15 use crate::revlog::revlog::RevlogError;
9 use crate::revlog::revlog::RevlogError;
16 use crate::revlog::Node;
10 use crate::revlog::Node;
17 use crate::utils::files::get_path_from_bytes;
11
18 use crate::utils::hg_path::{HgPath, HgPathBuf};
12 use crate::utils::hg_path::HgPath;
19
13
20 pub struct CatOutput {
14 use itertools::put_back;
15 use itertools::PutBack;
16 use std::cmp::Ordering;
17
18 pub struct CatOutput<'a> {
21 /// Whether any file in the manifest matched the paths given as CLI
19 /// Whether any file in the manifest matched the paths given as CLI
22 /// arguments
20 /// arguments
23 pub found_any: bool,
21 pub found_any: bool,
24 /// The contents of matching files, in manifest order
22 /// The contents of matching files, in manifest order
25 pub concatenated: Vec<u8>,
23 pub results: Vec<(&'a HgPath, Vec<u8>)>,
26 /// Which of the CLI arguments did not match any manifest file
24 /// Which of the CLI arguments did not match any manifest file
27 pub missing: Vec<HgPathBuf>,
25 pub missing: Vec<&'a HgPath>,
28 /// The node ID that the given revset was resolved to
26 /// The node ID that the given revset was resolved to
29 pub node: Node,
27 pub node: Node,
30 }
28 }
31
29
32 const METADATA_DELIMITER: [u8; 2] = [b'\x01', b'\n'];
30 // Find an item in an iterator over a sorted collection.
31 fn find_item<'a, 'b, 'c, D, I: Iterator<Item = (&'a HgPath, D)>>(
32 i: &mut PutBack<I>,
33 needle: &'b HgPath,
34 ) -> Option<D> {
35 loop {
36 match i.next() {
37 None => return None,
38 Some(val) => match needle.as_bytes().cmp(val.0.as_bytes()) {
39 Ordering::Less => {
40 i.put_back(val);
41 return None;
42 }
43 Ordering::Greater => continue,
44 Ordering::Equal => return Some(val.1),
45 },
46 }
47 }
48 }
49
50 fn find_files_in_manifest<
51 'manifest,
52 'query,
53 Data,
54 Manifest: Iterator<Item = (&'manifest HgPath, Data)>,
55 Query: Iterator<Item = &'query HgPath>,
56 >(
57 manifest: Manifest,
58 query: Query,
59 ) -> (Vec<(&'query HgPath, Data)>, Vec<&'query HgPath>) {
60 let mut manifest = put_back(manifest);
61 let mut res = vec![];
62 let mut missing = vec![];
63
64 for file in query {
65 match find_item(&mut manifest, file) {
66 None => missing.push(file),
67 Some(item) => res.push((file, item)),
68 }
69 }
70 return (res, missing);
71 }
33
72
34 /// Output the given revision of files
73 /// Output the given revision of files
35 ///
74 ///
@@ -39,67 +78,38 b" const METADATA_DELIMITER: [u8; 2] = [b'\\"
39 pub fn cat<'a>(
78 pub fn cat<'a>(
40 repo: &Repo,
79 repo: &Repo,
41 revset: &str,
80 revset: &str,
42 files: &'a [HgPathBuf],
81 mut files: Vec<&'a HgPath>,
43 ) -> Result<CatOutput, RevlogError> {
82 ) -> Result<CatOutput<'a>, RevlogError> {
44 let rev = crate::revset::resolve_single(revset, repo)?;
83 let rev = crate::revset::resolve_single(revset, repo)?;
45 let changelog = Changelog::open(repo)?;
84 let manifest = repo.manifest_for_rev(rev)?;
46 let manifest = Manifest::open(repo)?;
85 let node = *repo
47 let changelog_entry = changelog.get_rev(rev)?;
86 .changelog()?
48 let node = *changelog
49 .node_from_rev(rev)
87 .node_from_rev(rev)
50 .expect("should succeed when changelog.get_rev did");
88 .expect("should succeed when repo.manifest did");
51 let manifest_node =
89 let mut results: Vec<(&'a HgPath, Vec<u8>)> = vec![];
52 Node::from_hex_for_repo(&changelog_entry.manifest_node()?)?;
53 let manifest_entry = manifest.get_node(manifest_node.into())?;
54 let mut bytes = vec![];
55 let mut matched = vec![false; files.len()];
56 let mut found_any = false;
90 let mut found_any = false;
57
91
58 for (manifest_file, node_bytes) in manifest_entry.files_with_nodes() {
92 files.sort_unstable();
59 for (cat_file, is_matched) in files.iter().zip(&mut matched) {
93
60 if cat_file.as_bytes() == manifest_file.as_bytes() {
94 let (found, missing) = find_files_in_manifest(
61 *is_matched = true;
95 manifest.files_with_nodes(),
62 found_any = true;
96 files.into_iter().map(|f| f.as_ref()),
63 let index_path = store_path(manifest_file, b".i");
97 );
64 let data_path = store_path(manifest_file, b".d");
65
98
66 let file_log =
99 for (file_path, node_bytes) in found {
67 Revlog::open(repo, &index_path, Some(&data_path))?;
100 found_any = true;
68 let file_node = Node::from_hex_for_repo(node_bytes)?;
101 let file_log = repo.filelog(file_path)?;
69 let file_rev = file_log.get_node_rev(file_node.into())?;
102 let file_node = Node::from_hex_for_repo(node_bytes)?;
70 let data = file_log.get_rev_data(file_rev)?;
103 results.push((
71 if data.starts_with(&METADATA_DELIMITER) {
104 file_path,
72 let end_delimiter_position = data
105 file_log.data_for_node(file_node)?.into_data()?,
73 [METADATA_DELIMITER.len()..]
106 ));
74 .windows(METADATA_DELIMITER.len())
75 .position(|bytes| bytes == METADATA_DELIMITER);
76 if let Some(position) = end_delimiter_position {
77 let offset = METADATA_DELIMITER.len() * 2;
78 bytes.extend(data[position + offset..].iter());
79 }
80 } else {
81 bytes.extend(data);
82 }
83 }
84 }
85 }
107 }
86
108
87 let missing: Vec<_> = files
88 .iter()
89 .zip(&matched)
90 .filter(|pair| !*pair.1)
91 .map(|pair| pair.0.clone())
92 .collect();
93 Ok(CatOutput {
109 Ok(CatOutput {
94 found_any,
110 found_any,
95 concatenated: bytes,
111 results,
96 missing,
112 missing,
97 node,
113 node,
98 })
114 })
99 }
115 }
100
101 fn store_path(hg_path: &HgPath, suffix: &[u8]) -> PathBuf {
102 let encoded_bytes =
103 path_encode(&[b"data/", hg_path.as_bytes(), suffix].concat());
104 get_path_from_bytes(&encoded_bytes).into()
105 }
@@ -9,9 +9,7 b' use crate::dirstate::parsers::parse_dirs'
9 use crate::dirstate_tree::on_disk::{for_each_tracked_path, read_docket};
9 use crate::dirstate_tree::on_disk::{for_each_tracked_path, read_docket};
10 use crate::errors::HgError;
10 use crate::errors::HgError;
11 use crate::repo::Repo;
11 use crate::repo::Repo;
12 use crate::revlog::changelog::Changelog;
12 use crate::revlog::manifest::Manifest;
13 use crate::revlog::manifest::{Manifest, ManifestEntry};
14 use crate::revlog::node::Node;
15 use crate::revlog::revlog::RevlogError;
13 use crate::revlog::revlog::RevlogError;
16 use crate::utils::hg_path::HgPath;
14 use crate::utils::hg_path::HgPath;
17 use crate::DirstateError;
15 use crate::DirstateError;
@@ -53,7 +51,7 b' impl Dirstate {'
53 let _parents = parse_dirstate_entries(
51 let _parents = parse_dirstate_entries(
54 &self.content,
52 &self.content,
55 |path, entry, _copy_source| {
53 |path, entry, _copy_source| {
56 if entry.state.is_tracked() {
54 if entry.state().is_tracked() {
57 files.push(path)
55 files.push(path)
58 }
56 }
59 Ok(())
57 Ok(())
@@ -72,16 +70,10 b' pub fn list_rev_tracked_files('
72 revset: &str,
70 revset: &str,
73 ) -> Result<FilesForRev, RevlogError> {
71 ) -> Result<FilesForRev, RevlogError> {
74 let rev = crate::revset::resolve_single(revset, repo)?;
72 let rev = crate::revset::resolve_single(revset, repo)?;
75 let changelog = Changelog::open(repo)?;
73 Ok(FilesForRev(repo.manifest_for_rev(rev)?))
76 let manifest = Manifest::open(repo)?;
77 let changelog_entry = changelog.get_rev(rev)?;
78 let manifest_node =
79 Node::from_hex_for_repo(&changelog_entry.manifest_node()?)?;
80 let manifest_entry = manifest.get_node(manifest_node.into())?;
81 Ok(FilesForRev(manifest_entry))
82 }
74 }
83
75
84 pub struct FilesForRev(ManifestEntry);
76 pub struct FilesForRev(Manifest);
85
77
86 impl FilesForRev {
78 impl FilesForRev {
87 pub fn iter(&self) -> impl Iterator<Item = &HgPath> {
79 pub fn iter(&self) -> impl Iterator<Item = &HgPath> {
@@ -4,7 +4,6 b''
4
4
5 mod cat;
5 mod cat;
6 mod debugdata;
6 mod debugdata;
7 mod dirstate_status;
8 mod list_tracked_files;
7 mod list_tracked_files;
9 pub use cat::{cat, CatOutput};
8 pub use cat::{cat, CatOutput};
10 pub use debugdata::{debug_data, DebugDataKind};
9 pub use debugdata::{debug_data, DebugDataKind};
@@ -1,12 +1,22 b''
1 use crate::changelog::Changelog;
1 use crate::config::{Config, ConfigError, ConfigParseError};
2 use crate::config::{Config, ConfigError, ConfigParseError};
2 use crate::errors::{HgError, IoErrorContext, IoResultExt};
3 use crate::dirstate::DirstateParents;
4 use crate::dirstate_tree::dirstate_map::DirstateMap;
5 use crate::dirstate_tree::owning::OwningDirstateMap;
6 use crate::errors::HgError;
7 use crate::errors::HgResultExt;
3 use crate::exit_codes;
8 use crate::exit_codes;
4 use crate::requirements;
9 use crate::manifest::{Manifest, Manifestlog};
10 use crate::revlog::filelog::Filelog;
11 use crate::revlog::revlog::RevlogError;
5 use crate::utils::files::get_path_from_bytes;
12 use crate::utils::files::get_path_from_bytes;
13 use crate::utils::hg_path::HgPath;
6 use crate::utils::SliceExt;
14 use crate::utils::SliceExt;
7 use memmap::{Mmap, MmapOptions};
15 use crate::vfs::{is_dir, is_file, Vfs};
16 use crate::{requirements, NodePrefix};
17 use crate::{DirstateError, Revision};
18 use std::cell::{Cell, Ref, RefCell, RefMut};
8 use std::collections::HashSet;
19 use std::collections::HashSet;
9 use std::io::ErrorKind;
10 use std::path::{Path, PathBuf};
20 use std::path::{Path, PathBuf};
11
21
12 /// A repository on disk
22 /// A repository on disk
@@ -16,6 +26,11 b' pub struct Repo {'
16 store: PathBuf,
26 store: PathBuf,
17 requirements: HashSet<String>,
27 requirements: HashSet<String>,
18 config: Config,
28 config: Config,
29 // None means not known/initialized yet
30 dirstate_parents: Cell<Option<DirstateParents>>,
31 dirstate_map: LazyCell<OwningDirstateMap, DirstateError>,
32 changelog: LazyCell<Changelog, HgError>,
33 manifestlog: LazyCell<Manifestlog, HgError>,
19 }
34 }
20
35
21 #[derive(Debug, derive_more::From)]
36 #[derive(Debug, derive_more::From)]
@@ -38,12 +53,6 b' impl From<ConfigError> for RepoError {'
38 }
53 }
39 }
54 }
40
55
41 /// Filesystem access abstraction for the contents of a given "base" diretory
42 #[derive(Clone, Copy)]
43 pub struct Vfs<'a> {
44 pub(crate) base: &'a Path,
45 }
46
47 impl Repo {
56 impl Repo {
48 /// tries to find nearest repository root in current working directory or
57 /// tries to find nearest repository root in current working directory or
49 /// its ancestors
58 /// its ancestors
@@ -127,7 +136,8 b' impl Repo {'
127 } else {
136 } else {
128 let bytes = hg_vfs.read("sharedpath")?;
137 let bytes = hg_vfs.read("sharedpath")?;
129 let mut shared_path =
138 let mut shared_path =
130 get_path_from_bytes(bytes.trim_end_newlines()).to_owned();
139 get_path_from_bytes(bytes.trim_end_matches(|b| b == b'\n'))
140 .to_owned();
131 if relative {
141 if relative {
132 shared_path = dot_hg.join(shared_path)
142 shared_path = dot_hg.join(shared_path)
133 }
143 }
@@ -192,6 +202,10 b' impl Repo {'
192 store: store_path,
202 store: store_path,
193 dot_hg,
203 dot_hg,
194 config: repo_config,
204 config: repo_config,
205 dirstate_parents: Cell::new(None),
206 dirstate_map: LazyCell::new(Self::new_dirstate_map),
207 changelog: LazyCell::new(Changelog::open),
208 manifestlog: LazyCell::new(Manifestlog::open),
195 };
209 };
196
210
197 requirements::check(&repo)?;
211 requirements::check(&repo)?;
@@ -234,82 +248,162 b' impl Repo {'
234 .contains(requirements::DIRSTATE_V2_REQUIREMENT)
248 .contains(requirements::DIRSTATE_V2_REQUIREMENT)
235 }
249 }
236
250
237 pub fn dirstate_parents(
251 fn dirstate_file_contents(&self) -> Result<Vec<u8>, HgError> {
238 &self,
252 Ok(self
239 ) -> Result<crate::dirstate::DirstateParents, HgError> {
253 .hg_vfs()
240 let dirstate = self.hg_vfs().mmap_open("dirstate")?;
254 .read("dirstate")
241 if dirstate.is_empty() {
255 .io_not_found_as_none()?
242 return Ok(crate::dirstate::DirstateParents::NULL);
256 .unwrap_or(Vec::new()))
257 }
258
259 pub fn dirstate_parents(&self) -> Result<DirstateParents, HgError> {
260 if let Some(parents) = self.dirstate_parents.get() {
261 return Ok(parents);
243 }
262 }
244 let parents = if self.has_dirstate_v2() {
263 let dirstate = self.dirstate_file_contents()?;
264 let parents = if dirstate.is_empty() {
265 DirstateParents::NULL
266 } else if self.has_dirstate_v2() {
245 crate::dirstate_tree::on_disk::read_docket(&dirstate)?.parents()
267 crate::dirstate_tree::on_disk::read_docket(&dirstate)?.parents()
246 } else {
268 } else {
247 crate::dirstate::parsers::parse_dirstate_parents(&dirstate)?
269 crate::dirstate::parsers::parse_dirstate_parents(&dirstate)?
248 .clone()
270 .clone()
249 };
271 };
272 self.dirstate_parents.set(Some(parents));
250 Ok(parents)
273 Ok(parents)
251 }
274 }
275
276 fn new_dirstate_map(&self) -> Result<OwningDirstateMap, DirstateError> {
277 let dirstate_file_contents = self.dirstate_file_contents()?;
278 if dirstate_file_contents.is_empty() {
279 self.dirstate_parents.set(Some(DirstateParents::NULL));
280 Ok(OwningDirstateMap::new_empty(Vec::new()))
281 } else if self.has_dirstate_v2() {
282 let docket = crate::dirstate_tree::on_disk::read_docket(
283 &dirstate_file_contents,
284 )?;
285 self.dirstate_parents.set(Some(docket.parents()));
286 let data_size = docket.data_size();
287 let metadata = docket.tree_metadata();
288 let mut map = if let Some(data_mmap) = self
289 .hg_vfs()
290 .mmap_open(docket.data_filename())
291 .io_not_found_as_none()?
292 {
293 OwningDirstateMap::new_empty(data_mmap)
294 } else {
295 OwningDirstateMap::new_empty(Vec::new())
296 };
297 let (on_disk, placeholder) = map.get_pair_mut();
298 *placeholder = DirstateMap::new_v2(on_disk, data_size, metadata)?;
299 Ok(map)
300 } else {
301 let mut map = OwningDirstateMap::new_empty(dirstate_file_contents);
302 let (on_disk, placeholder) = map.get_pair_mut();
303 let (inner, parents) = DirstateMap::new_v1(on_disk)?;
304 self.dirstate_parents
305 .set(Some(parents.unwrap_or(DirstateParents::NULL)));
306 *placeholder = inner;
307 Ok(map)
308 }
309 }
310
311 pub fn dirstate_map(
312 &self,
313 ) -> Result<Ref<OwningDirstateMap>, DirstateError> {
314 self.dirstate_map.get_or_init(self)
315 }
316
317 pub fn dirstate_map_mut(
318 &self,
319 ) -> Result<RefMut<OwningDirstateMap>, DirstateError> {
320 self.dirstate_map.get_mut_or_init(self)
321 }
322
323 pub fn changelog(&self) -> Result<Ref<Changelog>, HgError> {
324 self.changelog.get_or_init(self)
325 }
326
327 pub fn changelog_mut(&self) -> Result<RefMut<Changelog>, HgError> {
328 self.changelog.get_mut_or_init(self)
329 }
330
331 pub fn manifestlog(&self) -> Result<Ref<Manifestlog>, HgError> {
332 self.manifestlog.get_or_init(self)
333 }
334
335 pub fn manifestlog_mut(&self) -> Result<RefMut<Manifestlog>, HgError> {
336 self.manifestlog.get_mut_or_init(self)
337 }
338
339 /// Returns the manifest of the *changeset* with the given node ID
340 pub fn manifest_for_node(
341 &self,
342 node: impl Into<NodePrefix>,
343 ) -> Result<Manifest, RevlogError> {
344 self.manifestlog()?.data_for_node(
345 self.changelog()?
346 .data_for_node(node.into())?
347 .manifest_node()?
348 .into(),
349 )
350 }
351
352 /// Returns the manifest of the *changeset* with the given revision number
353 pub fn manifest_for_rev(
354 &self,
355 revision: Revision,
356 ) -> Result<Manifest, RevlogError> {
357 self.manifestlog()?.data_for_node(
358 self.changelog()?
359 .data_for_rev(revision)?
360 .manifest_node()?
361 .into(),
362 )
363 }
364
365 pub fn filelog(&self, path: &HgPath) -> Result<Filelog, HgError> {
366 Filelog::open(self, path)
367 }
252 }
368 }
253
369
254 impl Vfs<'_> {
370 /// Lazily-initialized component of `Repo` with interior mutability
255 pub fn join(&self, relative_path: impl AsRef<Path>) -> PathBuf {
371 ///
256 self.base.join(relative_path)
372 /// This differs from `OnceCell` in that the value can still be "deinitialized"
257 }
373 /// later by setting its inner `Option` to `None`.
374 struct LazyCell<T, E> {
375 value: RefCell<Option<T>>,
376 // `Fn`s that don’t capture environment are zero-size, so this box does
377 // not allocate:
378 init: Box<dyn Fn(&Repo) -> Result<T, E>>,
379 }
258
380
259 pub fn read(
381 impl<T, E> LazyCell<T, E> {
260 &self,
382 fn new(init: impl Fn(&Repo) -> Result<T, E> + 'static) -> Self {
261 relative_path: impl AsRef<Path>,
383 Self {
262 ) -> Result<Vec<u8>, HgError> {
384 value: RefCell::new(None),
263 let path = self.join(relative_path);
385 init: Box::new(init),
264 std::fs::read(&path).when_reading_file(&path)
386 }
265 }
266
267 pub fn mmap_open(
268 &self,
269 relative_path: impl AsRef<Path>,
270 ) -> Result<Mmap, HgError> {
271 let path = self.base.join(relative_path);
272 let file = std::fs::File::open(&path).when_reading_file(&path)?;
273 // TODO: what are the safety requirements here?
274 let mmap = unsafe { MmapOptions::new().map(&file) }
275 .when_reading_file(&path)?;
276 Ok(mmap)
277 }
387 }
278
388
279 pub fn rename(
389 fn get_or_init(&self, repo: &Repo) -> Result<Ref<T>, E> {
280 &self,
390 let mut borrowed = self.value.borrow();
281 relative_from: impl AsRef<Path>,
391 if borrowed.is_none() {
282 relative_to: impl AsRef<Path>,
392 drop(borrowed);
283 ) -> Result<(), HgError> {
393 // Only use `borrow_mut` if it is really needed to avoid panic in
284 let from = self.join(relative_from);
394 // case there is another outstanding borrow but mutation is not
285 let to = self.join(relative_to);
395 // needed.
286 std::fs::rename(&from, &to)
396 *self.value.borrow_mut() = Some((self.init)(repo)?);
287 .with_context(|| IoErrorContext::RenamingFile { from, to })
397 borrowed = self.value.borrow()
398 }
399 Ok(Ref::map(borrowed, |option| option.as_ref().unwrap()))
400 }
401
402 pub fn get_mut_or_init(&self, repo: &Repo) -> Result<RefMut<T>, E> {
403 let mut borrowed = self.value.borrow_mut();
404 if borrowed.is_none() {
405 *borrowed = Some((self.init)(repo)?);
406 }
407 Ok(RefMut::map(borrowed, |option| option.as_mut().unwrap()))
288 }
408 }
289 }
409 }
290
291 fn fs_metadata(
292 path: impl AsRef<Path>,
293 ) -> Result<Option<std::fs::Metadata>, HgError> {
294 let path = path.as_ref();
295 match std::fs::metadata(path) {
296 Ok(meta) => Ok(Some(meta)),
297 Err(error) => match error.kind() {
298 // TODO: when we require a Rust version where `NotADirectory` is
299 // stable, invert this logic and return None for it and `NotFound`
300 // and propagate any other error.
301 ErrorKind::PermissionDenied => Err(error).with_context(|| {
302 IoErrorContext::ReadingMetadata(path.to_owned())
303 }),
304 _ => Ok(None),
305 },
306 }
307 }
308
309 fn is_dir(path: impl AsRef<Path>) -> Result<bool, HgError> {
310 Ok(fs_metadata(path)?.map_or(false, |meta| meta.is_dir()))
311 }
312
313 fn is_file(path: impl AsRef<Path>) -> Result<bool, HgError> {
314 Ok(fs_metadata(path)?.map_or(false, |meta| meta.is_file()))
315 }
@@ -1,6 +1,7 b''
1 use crate::errors::{HgError, HgResultExt};
1 use crate::errors::{HgError, HgResultExt};
2 use crate::repo::{Repo, Vfs};
2 use crate::repo::Repo;
3 use crate::utils::join_display;
3 use crate::utils::join_display;
4 use crate::vfs::Vfs;
4 use std::collections::HashSet;
5 use std::collections::HashSet;
5
6
6 fn parse(bytes: &[u8]) -> Result<HashSet<String>, HgError> {
7 fn parse(bytes: &[u8]) -> Result<HashSet<String>, HgError> {
@@ -91,7 +92,7 b' const SUPPORTED: &[&str] = &['
91
92
92 // Copied from mercurial/requirements.py:
93 // Copied from mercurial/requirements.py:
93
94
94 pub(crate) const DIRSTATE_V2_REQUIREMENT: &str = "exp-dirstate-v2";
95 pub(crate) const DIRSTATE_V2_REQUIREMENT: &str = "dirstate-v2";
95
96
96 /// When narrowing is finalized and no longer subject to format changes,
97 /// When narrowing is finalized and no longer subject to format changes,
97 /// we should move this to just "narrow" or similar.
98 /// we should move this to just "narrow" or similar.
@@ -11,6 +11,7 b' mod nodemap_docket;'
11 pub mod path_encode;
11 pub mod path_encode;
12 pub use node::{FromHexError, Node, NodePrefix};
12 pub use node::{FromHexError, Node, NodePrefix};
13 pub mod changelog;
13 pub mod changelog;
14 pub mod filelog;
14 pub mod index;
15 pub mod index;
15 pub mod manifest;
16 pub mod manifest;
16 pub mod patch;
17 pub mod patch;
@@ -1,5 +1,6 b''
1 use crate::errors::HgError;
1 use crate::errors::HgError;
2 use crate::repo::Repo;
2 use crate::repo::Repo;
3 use crate::revlog::node::NULL_NODE;
3 use crate::revlog::revlog::{Revlog, RevlogError};
4 use crate::revlog::revlog::{Revlog, RevlogError};
4 use crate::revlog::Revision;
5 use crate::revlog::Revision;
5 use crate::revlog::{Node, NodePrefix};
6 use crate::revlog::{Node, NodePrefix};
@@ -12,22 +13,22 b' pub struct Changelog {'
12
13
13 impl Changelog {
14 impl Changelog {
14 /// Open the `changelog` of a repository given by its root.
15 /// Open the `changelog` of a repository given by its root.
15 pub fn open(repo: &Repo) -> Result<Self, RevlogError> {
16 pub fn open(repo: &Repo) -> Result<Self, HgError> {
16 let revlog = Revlog::open(repo, "00changelog.i", None)?;
17 let revlog = Revlog::open(repo, "00changelog.i", None)?;
17 Ok(Self { revlog })
18 Ok(Self { revlog })
18 }
19 }
19
20
20 /// Return the `ChangelogEntry` a given node id.
21 /// Return the `ChangelogEntry` for the given node ID.
21 pub fn get_node(
22 pub fn data_for_node(
22 &self,
23 &self,
23 node: NodePrefix,
24 node: NodePrefix,
24 ) -> Result<ChangelogEntry, RevlogError> {
25 ) -> Result<ChangelogEntry, RevlogError> {
25 let rev = self.revlog.get_node_rev(node)?;
26 let rev = self.revlog.rev_from_node(node)?;
26 self.get_rev(rev)
27 self.data_for_rev(rev)
27 }
28 }
28
29
29 /// Return the `ChangelogEntry` of a given node revision.
30 /// Return the `ChangelogEntry` of the given revision number.
30 pub fn get_rev(
31 pub fn data_for_rev(
31 &self,
32 &self,
32 rev: Revision,
33 rev: Revision,
33 ) -> Result<ChangelogEntry, RevlogError> {
34 ) -> Result<ChangelogEntry, RevlogError> {
@@ -36,7 +37,7 b' impl Changelog {'
36 }
37 }
37
38
38 pub fn node_from_rev(&self, rev: Revision) -> Option<&Node> {
39 pub fn node_from_rev(&self, rev: Revision) -> Option<&Node> {
39 Some(self.revlog.index.get_entry(rev)?.hash())
40 self.revlog.node_from_rev(rev)
40 }
41 }
41 }
42 }
42
43
@@ -57,9 +58,10 b' impl ChangelogEntry {'
57
58
58 /// Return the node id of the `manifest` referenced by this `changelog`
59 /// Return the node id of the `manifest` referenced by this `changelog`
59 /// entry.
60 /// entry.
60 pub fn manifest_node(&self) -> Result<&[u8], RevlogError> {
61 pub fn manifest_node(&self) -> Result<Node, HgError> {
61 self.lines()
62 match self.lines().next() {
62 .next()
63 None => Ok(NULL_NODE),
63 .ok_or_else(|| HgError::corrupted("empty changelog entry").into())
64 Some(x) => Node::from_hex_for_repo(x),
65 }
64 }
66 }
65 }
67 }
@@ -5,7 +5,6 b' use byteorder::{BigEndian, ByteOrder};'
5
5
6 use crate::errors::HgError;
6 use crate::errors::HgError;
7 use crate::revlog::node::Node;
7 use crate::revlog::node::Node;
8 use crate::revlog::revlog::RevlogError;
9 use crate::revlog::{Revision, NULL_REVISION};
8 use crate::revlog::{Revision, NULL_REVISION};
10
9
11 pub const INDEX_ENTRY_SIZE: usize = 64;
10 pub const INDEX_ENTRY_SIZE: usize = 64;
@@ -23,7 +22,7 b' impl Index {'
23 /// Calculate the start of each entry when is_inline is true.
22 /// Calculate the start of each entry when is_inline is true.
24 pub fn new(
23 pub fn new(
25 bytes: Box<dyn Deref<Target = [u8]> + Send>,
24 bytes: Box<dyn Deref<Target = [u8]> + Send>,
26 ) -> Result<Self, RevlogError> {
25 ) -> Result<Self, HgError> {
27 if is_inline(&bytes) {
26 if is_inline(&bytes) {
28 let mut offset: usize = 0;
27 let mut offset: usize = 0;
29 let mut offsets = Vec::new();
28 let mut offsets = Vec::new();
@@ -58,7 +57,7 b' impl Index {'
58
57
59 /// Value of the inline flag.
58 /// Value of the inline flag.
60 pub fn is_inline(&self) -> bool {
59 pub fn is_inline(&self) -> bool {
61 is_inline(&self.bytes)
60 self.offsets.is_some()
62 }
61 }
63
62
64 /// Return a slice of bytes if `revlog` is inline. Panic if not.
63 /// Return a slice of bytes if `revlog` is inline. Panic if not.
@@ -209,6 +208,9 b" impl<'a> IndexEntry<'a> {"
209
208
210 /// Value of the inline flag.
209 /// Value of the inline flag.
211 pub fn is_inline(index_bytes: &[u8]) -> bool {
210 pub fn is_inline(index_bytes: &[u8]) -> bool {
211 if index_bytes.len() < 4 {
212 return true;
213 }
212 match &index_bytes[0..=1] {
214 match &index_bytes[0..=1] {
213 [0, 0] | [0, 2] => false,
215 [0, 0] | [0, 2] => false,
214 _ => true,
216 _ => true,
@@ -1,48 +1,60 b''
1 use crate::errors::HgError;
1 use crate::repo::Repo;
2 use crate::repo::Repo;
2 use crate::revlog::revlog::{Revlog, RevlogError};
3 use crate::revlog::revlog::{Revlog, RevlogError};
3 use crate::revlog::NodePrefix;
4 use crate::revlog::Revision;
4 use crate::revlog::Revision;
5 use crate::revlog::{Node, NodePrefix};
5 use crate::utils::hg_path::HgPath;
6 use crate::utils::hg_path::HgPath;
6
7
7 /// A specialized `Revlog` to work with `manifest` data format.
8 /// A specialized `Revlog` to work with `manifest` data format.
8 pub struct Manifest {
9 pub struct Manifestlog {
9 /// The generic `revlog` format.
10 /// The generic `revlog` format.
10 revlog: Revlog,
11 revlog: Revlog,
11 }
12 }
12
13
13 impl Manifest {
14 impl Manifestlog {
14 /// Open the `manifest` of a repository given by its root.
15 /// Open the `manifest` of a repository given by its root.
15 pub fn open(repo: &Repo) -> Result<Self, RevlogError> {
16 pub fn open(repo: &Repo) -> Result<Self, HgError> {
16 let revlog = Revlog::open(repo, "00manifest.i", None)?;
17 let revlog = Revlog::open(repo, "00manifest.i", None)?;
17 Ok(Self { revlog })
18 Ok(Self { revlog })
18 }
19 }
19
20
20 /// Return the `ManifestEntry` of a given node id.
21 /// Return the `Manifest` for the given node ID.
21 pub fn get_node(
22 ///
23 /// Note: this is a node ID in the manifestlog, typically found through
24 /// `ChangelogEntry::manifest_node`. It is *not* the node ID of any
25 /// changeset.
26 ///
27 /// See also `Repo::manifest_for_node`
28 pub fn data_for_node(
22 &self,
29 &self,
23 node: NodePrefix,
30 node: NodePrefix,
24 ) -> Result<ManifestEntry, RevlogError> {
31 ) -> Result<Manifest, RevlogError> {
25 let rev = self.revlog.get_node_rev(node)?;
32 let rev = self.revlog.rev_from_node(node)?;
26 self.get_rev(rev)
33 self.data_for_rev(rev)
27 }
34 }
28
35
29 /// Return the `ManifestEntry` of a given node revision.
36 /// Return the `Manifest` of a given revision number.
30 pub fn get_rev(
37 ///
38 /// Note: this is a revision number in the manifestlog, *not* of any
39 /// changeset.
40 ///
41 /// See also `Repo::manifest_for_rev`
42 pub fn data_for_rev(
31 &self,
43 &self,
32 rev: Revision,
44 rev: Revision,
33 ) -> Result<ManifestEntry, RevlogError> {
45 ) -> Result<Manifest, RevlogError> {
34 let bytes = self.revlog.get_rev_data(rev)?;
46 let bytes = self.revlog.get_rev_data(rev)?;
35 Ok(ManifestEntry { bytes })
47 Ok(Manifest { bytes })
36 }
48 }
37 }
49 }
38
50
39 /// `Manifest` entry which knows how to interpret the `manifest` data bytes.
51 /// `Manifestlog` entry which knows how to interpret the `manifest` data bytes.
40 #[derive(Debug)]
52 #[derive(Debug)]
41 pub struct ManifestEntry {
53 pub struct Manifest {
42 bytes: Vec<u8>,
54 bytes: Vec<u8>,
43 }
55 }
44
56
45 impl ManifestEntry {
57 impl Manifest {
46 /// Return an iterator over the lines of the entry.
58 /// Return an iterator over the lines of the entry.
47 pub fn lines(&self) -> impl Iterator<Item = &[u8]> {
59 pub fn lines(&self) -> impl Iterator<Item = &[u8]> {
48 self.bytes
60 self.bytes
@@ -73,4 +85,17 b' impl ManifestEntry {'
73 (HgPath::new(&line[..pos]), &line[hash_start..hash_end])
85 (HgPath::new(&line[..pos]), &line[hash_start..hash_end])
74 })
86 })
75 }
87 }
88
89 /// If the given path is in this manifest, return its filelog node ID
90 pub fn find_file(&self, path: &HgPath) -> Result<Option<Node>, HgError> {
91 // TODO: use binary search instead of linear scan. This may involve
92 // building (and caching) an index of the byte indicex of each manifest
93 // line.
94 for (manifest_path, node) in self.files_with_nodes() {
95 if manifest_path == path {
96 return Ok(Some(Node::from_hex_for_repo(node)?));
97 }
98 }
99 Ok(None)
100 }
76 }
101 }
@@ -1,10 +1,9 b''
1 use crate::errors::{HgError, HgResultExt};
1 use crate::errors::{HgError, HgResultExt};
2 use crate::requirements;
2 use crate::requirements;
3 use bytes_cast::{unaligned, BytesCast};
3 use bytes_cast::{unaligned, BytesCast};
4 use memmap::Mmap;
4 use memmap2::Mmap;
5 use std::path::{Path, PathBuf};
5 use std::path::{Path, PathBuf};
6
6
7 use super::revlog::RevlogError;
8 use crate::repo::Repo;
7 use crate::repo::Repo;
9 use crate::utils::strip_suffix;
8 use crate::utils::strip_suffix;
10
9
@@ -38,7 +37,7 b' impl NodeMapDocket {'
38 pub fn read_from_file(
37 pub fn read_from_file(
39 repo: &Repo,
38 repo: &Repo,
40 index_path: &Path,
39 index_path: &Path,
41 ) -> Result<Option<(Self, Mmap)>, RevlogError> {
40 ) -> Result<Option<(Self, Mmap)>, HgError> {
42 if !repo
41 if !repo
43 .requirements()
42 .requirements()
44 .contains(requirements::NODEMAP_REQUIREMENT)
43 .contains(requirements::NODEMAP_REQUIREMENT)
@@ -65,10 +64,9 b' impl NodeMapDocket {'
65 };
64 };
66
65
67 /// Treat any error as a parse error
66 /// Treat any error as a parse error
68 fn parse<T, E>(result: Result<T, E>) -> Result<T, RevlogError> {
67 fn parse<T, E>(result: Result<T, E>) -> Result<T, HgError> {
69 result.map_err(|_| {
68 result
70 HgError::corrupted("nodemap docket parse error").into()
69 .map_err(|_| HgError::corrupted("nodemap docket parse error"))
71 })
72 }
70 }
73
71
74 let (header, rest) = parse(DocketHeader::from_bytes(input))?;
72 let (header, rest) = parse(DocketHeader::from_bytes(input))?;
@@ -94,7 +92,7 b' impl NodeMapDocket {'
94 if mmap.len() >= data_length {
92 if mmap.len() >= data_length {
95 Ok(Some((docket, mmap)))
93 Ok(Some((docket, mmap)))
96 } else {
94 } else {
97 Err(HgError::corrupted("persistent nodemap too short").into())
95 Err(HgError::corrupted("persistent nodemap too short"))
98 }
96 }
99 } else {
97 } else {
100 // Even if .hg/requires opted in, some revlogs are deemed small
98 // Even if .hg/requires opted in, some revlogs are deemed small
@@ -18,6 +18,7 b' use super::patch;'
18 use crate::errors::HgError;
18 use crate::errors::HgError;
19 use crate::repo::Repo;
19 use crate::repo::Repo;
20 use crate::revlog::Revision;
20 use crate::revlog::Revision;
21 use crate::{Node, NULL_REVISION};
21
22
22 #[derive(derive_more::From)]
23 #[derive(derive_more::From)]
23 pub enum RevlogError {
24 pub enum RevlogError {
@@ -50,7 +51,7 b' pub struct Revlog {'
50 /// When index and data are not interleaved: bytes of the revlog index.
51 /// When index and data are not interleaved: bytes of the revlog index.
51 /// When index and data are interleaved: bytes of the revlog index and
52 /// When index and data are interleaved: bytes of the revlog index and
52 /// data.
53 /// data.
53 pub(crate) index: Index,
54 index: Index,
54 /// When index and data are not interleaved: bytes of the revlog data
55 /// When index and data are not interleaved: bytes of the revlog data
55 data_bytes: Option<Box<dyn Deref<Target = [u8]> + Send>>,
56 data_bytes: Option<Box<dyn Deref<Target = [u8]> + Send>>,
56 /// When present on disk: the persistent nodemap for this revlog
57 /// When present on disk: the persistent nodemap for this revlog
@@ -67,17 +68,24 b' impl Revlog {'
67 repo: &Repo,
68 repo: &Repo,
68 index_path: impl AsRef<Path>,
69 index_path: impl AsRef<Path>,
69 data_path: Option<&Path>,
70 data_path: Option<&Path>,
70 ) -> Result<Self, RevlogError> {
71 ) -> Result<Self, HgError> {
71 let index_path = index_path.as_ref();
72 let index_path = index_path.as_ref();
72 let index_mmap = repo.store_vfs().mmap_open(&index_path)?;
73 let index = {
74 match repo.store_vfs().mmap_open_opt(&index_path)? {
75 None => Index::new(Box::new(vec![])),
76 Some(index_mmap) => {
77 let version = get_version(&index_mmap)?;
78 if version != 1 {
79 // A proper new version should have had a repo/store
80 // requirement.
81 return Err(HgError::corrupted("corrupted revlog"));
82 }
73
83
74 let version = get_version(&index_mmap);
84 let index = Index::new(Box::new(index_mmap))?;
75 if version != 1 {
85 Ok(index)
76 // A proper new version should have had a repo/store requirement.
86 }
77 return Err(RevlogError::corrupted());
87 }
78 }
88 }?;
79
80 let index = Index::new(Box::new(index_mmap))?;
81
89
82 let default_data_path = index_path.with_extension("d");
90 let default_data_path = index_path.with_extension("d");
83
91
@@ -92,14 +100,18 b' impl Revlog {'
92 Some(Box::new(data_mmap))
100 Some(Box::new(data_mmap))
93 };
101 };
94
102
95 let nodemap = NodeMapDocket::read_from_file(repo, index_path)?.map(
103 let nodemap = if index.is_inline() {
96 |(docket, data)| {
104 None
97 nodemap::NodeTree::load_bytes(
105 } else {
98 Box::new(data),
106 NodeMapDocket::read_from_file(repo, index_path)?.map(
99 docket.data_length,
107 |(docket, data)| {
100 )
108 nodemap::NodeTree::load_bytes(
101 },
109 Box::new(data),
102 );
110 docket.data_length,
111 )
112 },
113 )
114 };
103
115
104 Ok(Revlog {
116 Ok(Revlog {
105 index,
117 index,
@@ -118,12 +130,26 b' impl Revlog {'
118 self.index.is_empty()
130 self.index.is_empty()
119 }
131 }
120
132
121 /// Return the full data associated to a node.
133 /// Returns the node ID for the given revision number, if it exists in this
134 /// revlog
135 pub fn node_from_rev(&self, rev: Revision) -> Option<&Node> {
136 if rev == NULL_REVISION {
137 return Some(&NULL_NODE);
138 }
139 Some(self.index.get_entry(rev)?.hash())
140 }
141
142 /// Return the revision number for the given node ID, if it exists in this
143 /// revlog
122 #[timed]
144 #[timed]
123 pub fn get_node_rev(
145 pub fn rev_from_node(
124 &self,
146 &self,
125 node: NodePrefix,
147 node: NodePrefix,
126 ) -> Result<Revision, RevlogError> {
148 ) -> Result<Revision, RevlogError> {
149 if node.is_prefix_of(&NULL_NODE) {
150 return Ok(NULL_REVISION);
151 }
152
127 if let Some(nodemap) = &self.nodemap {
153 if let Some(nodemap) = &self.nodemap {
128 return nodemap
154 return nodemap
129 .find_bin(&self.index, node)?
155 .find_bin(&self.index, node)?
@@ -167,6 +193,9 b' impl Revlog {'
167 /// snapshot to rebuild the final data.
193 /// snapshot to rebuild the final data.
168 #[timed]
194 #[timed]
169 pub fn get_rev_data(&self, rev: Revision) -> Result<Vec<u8>, RevlogError> {
195 pub fn get_rev_data(&self, rev: Revision) -> Result<Vec<u8>, RevlogError> {
196 if rev == NULL_REVISION {
197 return Ok(vec![]);
198 };
170 // Todo return -> Cow
199 // Todo return -> Cow
171 let mut entry = self.get_entry(rev)?;
200 let mut entry = self.get_entry(rev)?;
172 let mut delta_chain = vec![];
201 let mut delta_chain = vec![];
@@ -292,6 +321,10 b" pub struct RevlogEntry<'a> {"
292 }
321 }
293
322
294 impl<'a> RevlogEntry<'a> {
323 impl<'a> RevlogEntry<'a> {
324 pub fn revision(&self) -> Revision {
325 self.rev
326 }
327
295 /// Extract the data contained in the entry.
328 /// Extract the data contained in the entry.
296 pub fn data(&self) -> Result<Cow<'_, [u8]>, RevlogError> {
329 pub fn data(&self) -> Result<Cow<'_, [u8]>, RevlogError> {
297 if self.bytes.is_empty() {
330 if self.bytes.is_empty() {
@@ -355,8 +388,16 b" impl<'a> RevlogEntry<'a> {"
355 }
388 }
356
389
357 /// Format version of the revlog.
390 /// Format version of the revlog.
358 pub fn get_version(index_bytes: &[u8]) -> u16 {
391 pub fn get_version(index_bytes: &[u8]) -> Result<u16, HgError> {
359 BigEndian::read_u16(&index_bytes[2..=3])
392 if index_bytes.len() == 0 {
393 return Ok(1);
394 };
395 if index_bytes.len() < 4 {
396 return Err(HgError::corrupted(
397 "corrupted revlog: can't read the index format header",
398 ));
399 };
400 Ok(BigEndian::read_u16(&index_bytes[2..=3]))
360 }
401 }
361
402
362 /// Calculate the hash of a revision given its data and its parents.
403 /// Calculate the hash of a revision given its data and its parents.
@@ -391,6 +432,6 b' mod tests {'
391 .with_version(1)
432 .with_version(1)
392 .build();
433 .build();
393
434
394 assert_eq!(get_version(&bytes), 1)
435 assert_eq!(get_version(&bytes).map_err(|_err| ()), Ok(1))
395 }
436 }
396 }
437 }
@@ -4,7 +4,6 b''
4
4
5 use crate::errors::HgError;
5 use crate::errors::HgError;
6 use crate::repo::Repo;
6 use crate::repo::Repo;
7 use crate::revlog::changelog::Changelog;
8 use crate::revlog::revlog::{Revlog, RevlogError};
7 use crate::revlog::revlog::{Revlog, RevlogError};
9 use crate::revlog::NodePrefix;
8 use crate::revlog::NodePrefix;
10 use crate::revlog::{Revision, NULL_REVISION, WORKING_DIRECTORY_HEX};
9 use crate::revlog::{Revision, NULL_REVISION, WORKING_DIRECTORY_HEX};
@@ -17,23 +16,25 b' pub fn resolve_single('
17 input: &str,
16 input: &str,
18 repo: &Repo,
17 repo: &Repo,
19 ) -> Result<Revision, RevlogError> {
18 ) -> Result<Revision, RevlogError> {
20 let changelog = Changelog::open(repo)?;
19 let changelog = repo.changelog()?;
21
20
22 match resolve_rev_number_or_hex_prefix(input, &changelog.revlog) {
21 match input {
23 Err(RevlogError::InvalidRevision) => {} // Try other syntax
22 "." => {
24 result => return result,
23 let p1 = repo.dirstate_parents()?.p1;
24 return Ok(changelog.revlog.rev_from_node(p1.into())?);
25 }
26 "null" => return Ok(NULL_REVISION),
27 _ => {}
25 }
28 }
26
29
27 if input == "null" {
30 match resolve_rev_number_or_hex_prefix(input, &changelog.revlog) {
28 return Ok(NULL_REVISION);
31 Err(RevlogError::InvalidRevision) => {
32 // TODO: support for the rest of the language here.
33 let msg = format!("cannot parse revset '{}'", input);
34 Err(HgError::unsupported(msg).into())
35 }
36 result => return result,
29 }
37 }
30
31 // TODO: support for the rest of the language here.
32
33 Err(
34 HgError::unsupported(format!("cannot parse revset '{}'", input))
35 .into(),
36 )
37 }
38 }
38
39
39 /// Resolve the small subset of the language suitable for revlogs other than
40 /// Resolve the small subset of the language suitable for revlogs other than
@@ -46,8 +47,14 b' pub fn resolve_rev_number_or_hex_prefix('
46 input: &str,
47 input: &str,
47 revlog: &Revlog,
48 revlog: &Revlog,
48 ) -> Result<Revision, RevlogError> {
49 ) -> Result<Revision, RevlogError> {
50 // The Python equivalent of this is part of `revsymbol` in
51 // `mercurial/scmutil.py`
52
49 if let Ok(integer) = input.parse::<i32>() {
53 if let Ok(integer) = input.parse::<i32>() {
50 if integer >= 0 && revlog.has_rev(integer) {
54 if integer.to_string() == input
55 && integer >= 0
56 && revlog.has_rev(integer)
57 {
51 return Ok(integer);
58 return Ok(integer);
52 }
59 }
53 }
60 }
@@ -56,7 +63,7 b' pub fn resolve_rev_number_or_hex_prefix('
56 {
63 {
57 return Err(RevlogError::WDirUnsupported);
64 return Err(RevlogError::WDirUnsupported);
58 }
65 }
59 return revlog.get_node_rev(prefix);
66 return revlog.rev_from_node(prefix);
60 }
67 }
61 Err(RevlogError::InvalidRevision)
68 Err(RevlogError::InvalidRevision)
62 }
69 }
@@ -67,36 +67,35 b' where'
67 }
67 }
68
68
69 pub trait SliceExt {
69 pub trait SliceExt {
70 fn trim_end_newlines(&self) -> &Self;
71 fn trim_end(&self) -> &Self;
70 fn trim_end(&self) -> &Self;
72 fn trim_start(&self) -> &Self;
71 fn trim_start(&self) -> &Self;
72 fn trim_end_matches(&self, f: impl FnMut(u8) -> bool) -> &Self;
73 fn trim_start_matches(&self, f: impl FnMut(u8) -> bool) -> &Self;
73 fn trim(&self) -> &Self;
74 fn trim(&self) -> &Self;
74 fn drop_prefix(&self, needle: &Self) -> Option<&Self>;
75 fn drop_prefix(&self, needle: &Self) -> Option<&Self>;
75 fn split_2(&self, separator: u8) -> Option<(&[u8], &[u8])>;
76 fn split_2(&self, separator: u8) -> Option<(&[u8], &[u8])>;
76 }
77 fn split_2_by_slice(&self, separator: &[u8]) -> Option<(&[u8], &[u8])>;
77
78 #[allow(clippy::trivially_copy_pass_by_ref)]
79 fn is_not_whitespace(c: &u8) -> bool {
80 !(*c as char).is_whitespace()
81 }
78 }
82
79
83 impl SliceExt for [u8] {
80 impl SliceExt for [u8] {
84 fn trim_end_newlines(&self) -> &[u8] {
81 fn trim_end(&self) -> &[u8] {
85 if let Some(last) = self.iter().rposition(|&byte| byte != b'\n') {
82 self.trim_end_matches(|byte| byte.is_ascii_whitespace())
83 }
84
85 fn trim_start(&self) -> &[u8] {
86 self.trim_start_matches(|byte| byte.is_ascii_whitespace())
87 }
88
89 fn trim_end_matches(&self, mut f: impl FnMut(u8) -> bool) -> &Self {
90 if let Some(last) = self.iter().rposition(|&byte| !f(byte)) {
86 &self[..=last]
91 &self[..=last]
87 } else {
92 } else {
88 &[]
93 &[]
89 }
94 }
90 }
95 }
91 fn trim_end(&self) -> &[u8] {
96
92 if let Some(last) = self.iter().rposition(is_not_whitespace) {
97 fn trim_start_matches(&self, mut f: impl FnMut(u8) -> bool) -> &Self {
93 &self[..=last]
98 if let Some(first) = self.iter().position(|&byte| !f(byte)) {
94 } else {
95 &[]
96 }
97 }
98 fn trim_start(&self) -> &[u8] {
99 if let Some(first) = self.iter().position(is_not_whitespace) {
100 &self[first..]
99 &self[first..]
101 } else {
100 } else {
102 &[]
101 &[]
@@ -136,6 +135,14 b' impl SliceExt for [u8] {'
136 let b = iter.next()?;
135 let b = iter.next()?;
137 Some((a, b))
136 Some((a, b))
138 }
137 }
138
139 fn split_2_by_slice(&self, separator: &[u8]) -> Option<(&[u8], &[u8])> {
140 if let Some(pos) = find_slice_in_slice(self, separator) {
141 Some((&self[..pos], &self[pos + separator.len()..]))
142 } else {
143 None
144 }
145 }
139 }
146 }
140
147
141 pub trait Escaped {
148 pub trait Escaped {
@@ -18,7 +18,6 b' use lazy_static::lazy_static;'
18 use same_file::is_same_file;
18 use same_file::is_same_file;
19 use std::borrow::{Cow, ToOwned};
19 use std::borrow::{Cow, ToOwned};
20 use std::ffi::{OsStr, OsString};
20 use std::ffi::{OsStr, OsString};
21 use std::fs::Metadata;
22 use std::iter::FusedIterator;
21 use std::iter::FusedIterator;
23 use std::ops::Deref;
22 use std::ops::Deref;
24 use std::path::{Path, PathBuf};
23 use std::path::{Path, PathBuf};
@@ -181,38 +180,6 b' pub fn lower_clean(bytes: &[u8]) -> Vec<'
181 hfs_ignore_clean(&bytes.to_ascii_lowercase())
180 hfs_ignore_clean(&bytes.to_ascii_lowercase())
182 }
181 }
183
182
184 #[derive(Eq, PartialEq, Ord, PartialOrd, Copy, Clone)]
185 pub struct HgMetadata {
186 pub st_dev: u64,
187 pub st_mode: u32,
188 pub st_nlink: u64,
189 pub st_size: u64,
190 pub st_mtime: i64,
191 pub st_ctime: i64,
192 }
193
194 // TODO support other plaforms
195 #[cfg(unix)]
196 impl HgMetadata {
197 pub fn from_metadata(metadata: Metadata) -> Self {
198 use std::os::unix::fs::MetadataExt;
199 Self {
200 st_dev: metadata.dev(),
201 st_mode: metadata.mode(),
202 st_nlink: metadata.nlink(),
203 st_size: metadata.size(),
204 st_mtime: metadata.mtime(),
205 st_ctime: metadata.ctime(),
206 }
207 }
208
209 pub fn is_symlink(&self) -> bool {
210 // This is way too manual, but `HgMetadata` will go away in the
211 // near-future dirstate rewrite anyway.
212 self.st_mode & 0170000 == 0120000
213 }
214 }
215
216 /// Returns the canonical path of `name`, given `cwd` and `root`
183 /// Returns the canonical path of `name`, given `cwd` and `root`
217 pub fn canonical_path(
184 pub fn canonical_path(
218 root: impl AsRef<Path>,
185 root: impl AsRef<Path>,
@@ -220,13 +220,11 b' impl HgPath {'
220 ),
220 ),
221 }
221 }
222 }
222 }
223 pub fn join<T: ?Sized + AsRef<Self>>(&self, other: &T) -> HgPathBuf {
223
224 let mut inner = self.inner.to_owned();
224 pub fn join(&self, path: &HgPath) -> HgPathBuf {
225 if !inner.is_empty() && inner.last() != Some(&b'/') {
225 let mut buf = self.to_owned();
226 inner.push(b'/');
226 buf.push(path);
227 }
227 buf
228 inner.extend(other.as_ref().bytes());
229 HgPathBuf::from_bytes(&inner)
230 }
228 }
231
229
232 pub fn components(&self) -> impl Iterator<Item = &HgPath> {
230 pub fn components(&self) -> impl Iterator<Item = &HgPath> {
@@ -405,7 +403,15 b' impl HgPathBuf {'
405 pub fn new() -> Self {
403 pub fn new() -> Self {
406 Default::default()
404 Default::default()
407 }
405 }
408 pub fn push(&mut self, byte: u8) {
406
407 pub fn push<T: ?Sized + AsRef<HgPath>>(&mut self, other: &T) -> () {
408 if !self.inner.is_empty() && self.inner.last() != Some(&b'/') {
409 self.inner.push(b'/');
410 }
411 self.inner.extend(other.as_ref().bytes())
412 }
413
414 pub fn push_byte(&mut self, byte: u8) {
409 self.inner.push(byte);
415 self.inner.push(byte);
410 }
416 }
411 pub fn from_bytes(s: &[u8]) -> HgPathBuf {
417 pub fn from_bytes(s: &[u8]) -> HgPathBuf {
@@ -9,7 +9,7 b" name='rusthg'"
9 crate-type = ["cdylib"]
9 crate-type = ["cdylib"]
10
10
11 [features]
11 [features]
12 default = ["python27"]
12 default = ["python3"]
13
13
14 # Features to build an extension module:
14 # Features to build an extension module:
15 python27 = ["cpython/python27-sys", "cpython/extension-module-2-7"]
15 python27 = ["cpython/python27-sys", "cpython/extension-module-2-7"]
@@ -21,12 +21,10 b' python27-bin = ["cpython/python27-sys"]'
21 python3-bin = ["cpython/python3-sys"]
21 python3-bin = ["cpython/python3-sys"]
22
22
23 [dependencies]
23 [dependencies]
24 cpython = { version = "0.7.0", default-features = false }
24 crossbeam-channel = "0.4"
25 crossbeam-channel = "0.4"
25 hg-core = { path = "../hg-core"}
26 hg-core = { path = "../hg-core"}
26 libc = '*'
27 libc = "0.2"
27 log = "0.4.8"
28 log = "0.4.8"
28 env_logger = "0.7.1"
29 env_logger = "0.7.1"
29
30 stable_deref_trait = "1.2.0"
30 [dependencies.cpython]
31 version = "0.6.0"
32 default-features = false
@@ -13,58 +13,7 b' use hg::copy_tracing::ChangedFiles;'
13 use hg::copy_tracing::CombineChangesetCopies;
13 use hg::copy_tracing::CombineChangesetCopies;
14 use hg::Revision;
14 use hg::Revision;
15
15
16 use self::pybytes_with_data::PyBytesWithData;
16 use crate::pybytes_deref::PyBytesDeref;
17
18 // Module to encapsulate private fields
19 mod pybytes_with_data {
20 use cpython::{PyBytes, Python};
21
22 /// Safe abstraction over a `PyBytes` together with the `&[u8]` slice
23 /// that borrows it.
24 ///
25 /// Calling `PyBytes::data` requires a GIL marker but we want to access the
26 /// data in a thread that (ideally) does not need to acquire the GIL.
27 /// This type allows separating the call an the use.
28 pub(super) struct PyBytesWithData {
29 #[allow(unused)]
30 keep_alive: PyBytes,
31
32 /// Borrows the buffer inside `self.keep_alive`,
33 /// but the borrow-checker cannot express self-referential structs.
34 data: *const [u8],
35 }
36
37 fn require_send<T: Send>() {}
38
39 #[allow(unused)]
40 fn static_assert_pybytes_is_send() {
41 require_send::<PyBytes>;
42 }
43
44 // Safety: PyBytes is Send. Raw pointers are not by default,
45 // but here sending one to another thread is fine since we ensure it stays
46 // valid.
47 unsafe impl Send for PyBytesWithData {}
48
49 impl PyBytesWithData {
50 pub fn new(py: Python, bytes: PyBytes) -> Self {
51 Self {
52 data: bytes.data(py),
53 keep_alive: bytes,
54 }
55 }
56
57 pub fn data(&self) -> &[u8] {
58 // Safety: the raw pointer is valid as long as the PyBytes is still
59 // alive, and the returned slice borrows `self`.
60 unsafe { &*self.data }
61 }
62
63 pub fn unwrap(self) -> PyBytes {
64 self.keep_alive
65 }
66 }
67 }
68
17
69 /// Combines copies information contained into revision `revs` to build a copy
18 /// Combines copies information contained into revision `revs` to build a copy
70 /// map.
19 /// map.
@@ -123,7 +72,7 b' pub fn combine_changeset_copies_wrapper('
123 //
72 //
124 // TODO: tweak the bound?
73 // TODO: tweak the bound?
125 let (rev_info_sender, rev_info_receiver) =
74 let (rev_info_sender, rev_info_receiver) =
126 crossbeam_channel::bounded::<RevInfo<PyBytesWithData>>(1000);
75 crossbeam_channel::bounded::<RevInfo<PyBytesDeref>>(1000);
127
76
128 // This channel (going the other way around) however is unbounded.
77 // This channel (going the other way around) however is unbounded.
129 // If they were both bounded, there might potentially be deadlocks
78 // If they were both bounded, there might potentially be deadlocks
@@ -143,7 +92,7 b' pub fn combine_changeset_copies_wrapper('
143 CombineChangesetCopies::new(children_count);
92 CombineChangesetCopies::new(children_count);
144 for (rev, p1, p2, opt_bytes) in rev_info_receiver {
93 for (rev, p1, p2, opt_bytes) in rev_info_receiver {
145 let files = match &opt_bytes {
94 let files = match &opt_bytes {
146 Some(raw) => ChangedFiles::new(raw.data()),
95 Some(raw) => ChangedFiles::new(raw.as_ref()),
147 // Python None was extracted to Option::None,
96 // Python None was extracted to Option::None,
148 // meaning there was no copy data.
97 // meaning there was no copy data.
149 None => ChangedFiles::new_empty(),
98 None => ChangedFiles::new_empty(),
@@ -169,7 +118,7 b' pub fn combine_changeset_copies_wrapper('
169
118
170 for rev_info in revs_info {
119 for rev_info in revs_info {
171 let (rev, p1, p2, opt_bytes) = rev_info?;
120 let (rev, p1, p2, opt_bytes) = rev_info?;
172 let opt_bytes = opt_bytes.map(|b| PyBytesWithData::new(py, b));
121 let opt_bytes = opt_bytes.map(|b| PyBytesDeref::new(py, b));
173
122
174 // We’d prefer to avoid the child thread calling into Python code,
123 // We’d prefer to avoid the child thread calling into Python code,
175 // but this avoids a potential deadlock on the GIL if it does:
124 // but this avoids a potential deadlock on the GIL if it does:
@@ -12,101 +12,17 b''
12 mod copymap;
12 mod copymap;
13 mod dirs_multiset;
13 mod dirs_multiset;
14 mod dirstate_map;
14 mod dirstate_map;
15 mod dispatch;
15 mod item;
16 mod non_normal_entries;
17 mod owning;
18 mod status;
16 mod status;
17 use self::item::DirstateItem;
19 use crate::{
18 use crate::{
20 dirstate::{
19 dirstate::{
21 dirs_multiset::Dirs, dirstate_map::DirstateMap, status::status_wrapper,
20 dirs_multiset::Dirs, dirstate_map::DirstateMap, status::status_wrapper,
22 },
21 },
23 exceptions,
22 exceptions,
24 };
23 };
25 use cpython::{
24 use cpython::{PyBytes, PyDict, PyList, PyModule, PyObject, PyResult, Python};
26 exc, PyBytes, PyDict, PyErr, PyList, PyModule, PyObject, PyResult,
27 PySequence, Python,
28 };
29 use hg::dirstate_tree::on_disk::V2_FORMAT_MARKER;
25 use hg::dirstate_tree::on_disk::V2_FORMAT_MARKER;
30 use hg::{utils::hg_path::HgPathBuf, DirstateEntry, EntryState, StateMap};
31 use libc::{c_char, c_int};
32 use std::convert::TryFrom;
33
34 // C code uses a custom `dirstate_tuple` type, checks in multiple instances
35 // for this type, and raises a Python `Exception` if the check does not pass.
36 // Because this type differs only in name from the regular Python tuple, it
37 // would be a good idea in the near future to remove it entirely to allow
38 // for a pure Python tuple of the same effective structure to be used,
39 // rendering this type and the capsule below useless.
40 py_capsule_fn!(
41 from mercurial.cext.parsers import make_dirstate_item_CAPI
42 as make_dirstate_item_capi
43 signature (
44 state: c_char,
45 mode: c_int,
46 size: c_int,
47 mtime: c_int,
48 ) -> *mut RawPyObject
49 );
50
51 pub fn make_dirstate_item(
52 py: Python,
53 entry: &DirstateEntry,
54 ) -> PyResult<PyObject> {
55 let &DirstateEntry {
56 state,
57 mode,
58 size,
59 mtime,
60 } = entry;
61 // Explicitly go through u8 first, then cast to platform-specific `c_char`
62 // because Into<u8> has a specific implementation while `as c_char` would
63 // just do a naive enum cast.
64 let state_code: u8 = state.into();
65 make_dirstate_item_raw(py, state_code, mode, size, mtime)
66 }
67
68 pub fn make_dirstate_item_raw(
69 py: Python,
70 state: u8,
71 mode: i32,
72 size: i32,
73 mtime: i32,
74 ) -> PyResult<PyObject> {
75 let make = make_dirstate_item_capi::retrieve(py)?;
76 let maybe_obj = unsafe {
77 let ptr = make(state as c_char, mode, size, mtime);
78 PyObject::from_owned_ptr_opt(py, ptr)
79 };
80 maybe_obj.ok_or_else(|| PyErr::fetch(py))
81 }
82
83 pub fn extract_dirstate(py: Python, dmap: &PyDict) -> Result<StateMap, PyErr> {
84 dmap.items(py)
85 .iter()
86 .map(|(filename, stats)| {
87 let stats = stats.extract::<PySequence>(py)?;
88 let state = stats.get_item(py, 0)?.extract::<PyBytes>(py)?;
89 let state =
90 EntryState::try_from(state.data(py)[0]).map_err(|e| {
91 PyErr::new::<exc::ValueError, _>(py, e.to_string())
92 })?;
93 let mode = stats.get_item(py, 1)?.extract(py)?;
94 let size = stats.get_item(py, 2)?.extract(py)?;
95 let mtime = stats.get_item(py, 3)?.extract(py)?;
96 let filename = filename.extract::<PyBytes>(py)?;
97 let filename = filename.data(py);
98 Ok((
99 HgPathBuf::from(filename.to_owned()),
100 DirstateEntry {
101 state,
102 mode,
103 size,
104 mtime,
105 },
106 ))
107 })
108 .collect()
109 }
110
26
111 /// Create the module, with `__package__` given from parent
27 /// Create the module, with `__package__` given from parent
112 pub fn init_module(py: Python, package: &str) -> PyResult<PyModule> {
28 pub fn init_module(py: Python, package: &str) -> PyResult<PyModule> {
@@ -125,6 +41,7 b' pub fn init_module(py: Python, package: '
125 )?;
41 )?;
126 m.add_class::<Dirs>(py)?;
42 m.add_class::<Dirs>(py)?;
127 m.add_class::<DirstateMap>(py)?;
43 m.add_class::<DirstateMap>(py)?;
44 m.add_class::<DirstateItem>(py)?;
128 m.add(py, "V2_FORMAT_MARKER", PyBytes::new(py, V2_FORMAT_MARKER))?;
45 m.add(py, "V2_FORMAT_MARKER", PyBytes::new(py, V2_FORMAT_MARKER))?;
129 m.add(
46 m.add(
130 py,
47 py,
@@ -137,7 +54,7 b' pub fn init_module(py: Python, package: '
137 matcher: PyObject,
54 matcher: PyObject,
138 ignorefiles: PyList,
55 ignorefiles: PyList,
139 check_exec: bool,
56 check_exec: bool,
140 last_normal_time: i64,
57 last_normal_time: (u32, u32),
141 list_clean: bool,
58 list_clean: bool,
142 list_ignored: bool,
59 list_ignored: bool,
143 list_unknown: bool,
60 list_unknown: bool,
@@ -15,9 +15,9 b' use std::cell::RefCell;'
15
15
16 use crate::dirstate::dirstate_map::v2_error;
16 use crate::dirstate::dirstate_map::v2_error;
17 use crate::dirstate::dirstate_map::DirstateMap;
17 use crate::dirstate::dirstate_map::DirstateMap;
18 use hg::dirstate::CopyMapIter;
18 use hg::dirstate_tree::on_disk::DirstateV2ParseError;
19 use hg::dirstate_tree::on_disk::DirstateV2ParseError;
19 use hg::utils::hg_path::HgPath;
20 use hg::utils::hg_path::HgPath;
20 use hg::CopyMapIter;
21
21
22 py_class!(pub class CopyMap |py| {
22 py_class!(pub class CopyMap |py| {
23 data dirstate_map: DirstateMap;
23 data dirstate_map: DirstateMap;
@@ -9,19 +9,15 b''
9 //! `hg-core` package.
9 //! `hg-core` package.
10
10
11 use std::cell::RefCell;
11 use std::cell::RefCell;
12 use std::convert::TryInto;
13
12
14 use cpython::{
13 use cpython::{
15 exc, ObjectProtocol, PyBytes, PyClone, PyDict, PyErr, PyObject, PyResult,
14 exc, ObjectProtocol, PyBytes, PyClone, PyDict, PyErr, PyObject, PyResult,
16 Python, UnsafePyLeaked,
15 Python, UnsafePyLeaked,
17 };
16 };
18
17
19 use crate::dirstate::extract_dirstate;
20 use hg::{
18 use hg::{
21 errors::HgError,
22 utils::hg_path::{HgPath, HgPathBuf},
19 utils::hg_path::{HgPath, HgPathBuf},
23 DirsMultiset, DirsMultisetIter, DirstateError, DirstateMapError,
20 DirsMultiset, DirsMultisetIter, DirstateMapError,
24 EntryState,
25 };
21 };
26
22
27 py_class!(pub class Dirs |py| {
23 py_class!(pub class Dirs |py| {
@@ -32,25 +28,11 b' py_class!(pub class Dirs |py| {'
32 def __new__(
28 def __new__(
33 _cls,
29 _cls,
34 map: PyObject,
30 map: PyObject,
35 skip: Option<PyObject> = None
36 ) -> PyResult<Self> {
31 ) -> PyResult<Self> {
37 let mut skip_state: Option<EntryState> = None;
32 let inner = if map.cast_as::<PyDict>(py).is_ok() {
38 if let Some(skip) = skip {
33 let err = "pathutil.dirs() with a dict should only be used by the Python dirstatemap \
39 skip_state = Some(
34 and should not be used when Rust is enabled";
40 skip.extract::<PyBytes>(py)?.data(py)[0]
35 return Err(PyErr::new::<exc::TypeError, _>(py, err.to_string()))
41 .try_into()
42 .map_err(|e: HgError| {
43 PyErr::new::<exc::ValueError, _>(py, e.to_string())
44 })?,
45 );
46 }
47 let inner = if let Ok(map) = map.cast_as::<PyDict>(py) {
48 let dirstate = extract_dirstate(py, &map)?;
49 let dirstate = dirstate.iter().map(|(k, v)| Ok((k, *v)));
50 DirsMultiset::from_dirstate(dirstate, skip_state)
51 .map_err(|e: DirstateError| {
52 PyErr::new::<exc::ValueError, _>(py, e.to_string())
53 })?
54 } else {
36 } else {
55 let map: Result<Vec<HgPathBuf>, PyErr> = map
37 let map: Result<Vec<HgPathBuf>, PyErr> = map
56 .iter(py)?
38 .iter(py)?
@@ -12,32 +12,24 b' use std::cell::{RefCell, RefMut};'
12 use std::convert::TryInto;
12 use std::convert::TryInto;
13
13
14 use cpython::{
14 use cpython::{
15 exc, ObjectProtocol, PyBool, PyBytes, PyClone, PyDict, PyErr, PyList,
15 exc, PyBool, PyBytes, PyClone, PyDict, PyErr, PyList, PyNone, PyObject,
16 PyObject, PyResult, PySet, PyString, Python, PythonObject, ToPyObject,
16 PyResult, Python, PythonObject, ToPyObject, UnsafePyLeaked,
17 UnsafePyLeaked,
18 };
17 };
19
18
20 use crate::{
19 use crate::{
21 dirstate::copymap::{CopyMap, CopyMapItemsIterator, CopyMapKeysIterator},
20 dirstate::copymap::{CopyMap, CopyMapItemsIterator, CopyMapKeysIterator},
22 dirstate::make_dirstate_item,
21 dirstate::item::{timestamp, DirstateItem},
23 dirstate::make_dirstate_item_raw,
22 pybytes_deref::PyBytesDeref,
24 dirstate::non_normal_entries::{
25 NonNormalEntries, NonNormalEntriesIterator,
26 },
27 dirstate::owning::OwningDirstateMap,
28 parsers::dirstate_parents_to_pytuple,
29 };
23 };
30 use hg::{
24 use hg::{
31 dirstate::parsers::Timestamp,
25 dirstate::StateMapIter,
32 dirstate::MTIME_UNSET,
26 dirstate_tree::dirstate_map::DirstateMap as TreeDirstateMap,
33 dirstate::SIZE_NON_NORMAL,
34 dirstate_tree::dispatch::DirstateMapMethods,
35 dirstate_tree::on_disk::DirstateV2ParseError,
27 dirstate_tree::on_disk::DirstateV2ParseError,
28 dirstate_tree::owning::OwningDirstateMap,
36 revlog::Node,
29 revlog::Node,
37 utils::files::normalize_case,
30 utils::files::normalize_case,
38 utils::hg_path::{HgPath, HgPathBuf},
31 utils::hg_path::{HgPath, HgPathBuf},
39 DirstateEntry, DirstateError, DirstateMap as RustDirstateMap,
32 DirstateEntry, DirstateError, DirstateParents, EntryState,
40 DirstateParents, EntryState, StateMapIter,
41 };
33 };
42
34
43 // TODO
35 // TODO
@@ -53,26 +45,26 b' use hg::{'
53 // All attributes also have to have a separate refcount data attribute for
45 // All attributes also have to have a separate refcount data attribute for
54 // leaks, with all methods that go along for reference sharing.
46 // leaks, with all methods that go along for reference sharing.
55 py_class!(pub class DirstateMap |py| {
47 py_class!(pub class DirstateMap |py| {
56 @shared data inner: Box<dyn DirstateMapMethods + Send>;
48 @shared data inner: OwningDirstateMap;
57
49
58 /// Returns a `(dirstate_map, parents)` tuple
50 /// Returns a `(dirstate_map, parents)` tuple
59 @staticmethod
51 @staticmethod
60 def new_v1(
52 def new_v1(
61 use_dirstate_tree: bool,
62 on_disk: PyBytes,
53 on_disk: PyBytes,
63 ) -> PyResult<PyObject> {
54 ) -> PyResult<PyObject> {
64 let (inner, parents) = if use_dirstate_tree {
55 let on_disk = PyBytesDeref::new(py, on_disk);
65 let (map, parents) = OwningDirstateMap::new_v1(py, on_disk)
56 let mut map = OwningDirstateMap::new_empty(on_disk);
66 .map_err(|e| dirstate_error(py, e))?;
57 let (on_disk, map_placeholder) = map.get_pair_mut();
67 (Box::new(map) as _, parents)
58
68 } else {
59 let (actual_map, parents) = TreeDirstateMap::new_v1(on_disk)
69 let bytes = on_disk.data(py);
60 .map_err(|e| dirstate_error(py, e))?;
70 let mut map = RustDirstateMap::default();
61 *map_placeholder = actual_map;
71 let parents = map.read(bytes).map_err(|e| dirstate_error(py, e))?;
62 let map = Self::create_instance(py, map)?;
72 (Box::new(map) as _, parents)
63 let parents = parents.map(|p| {
73 };
64 let p1 = PyBytes::new(py, p.p1.as_bytes());
74 let map = Self::create_instance(py, inner)?;
65 let p2 = PyBytes::new(py, p.p2.as_bytes());
75 let parents = parents.map(|p| dirstate_parents_to_pytuple(py, &p));
66 (p1, p2)
67 });
76 Ok((map, parents).to_py_object(py).into_object())
68 Ok((map, parents).to_py_object(py).into_object())
77 }
69 }
78
70
@@ -86,10 +78,13 b' py_class!(pub class DirstateMap |py| {'
86 let dirstate_error = |e: DirstateError| {
78 let dirstate_error = |e: DirstateError| {
87 PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
79 PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
88 };
80 };
89 let inner = OwningDirstateMap::new_v2(
81 let on_disk = PyBytesDeref::new(py, on_disk);
90 py, on_disk, data_size, tree_metadata,
82 let mut map = OwningDirstateMap::new_empty(on_disk);
83 let (on_disk, map_placeholder) = map.get_pair_mut();
84 *map_placeholder = TreeDirstateMap::new_v2(
85 on_disk, data_size, tree_metadata.data(py),
91 ).map_err(dirstate_error)?;
86 ).map_err(dirstate_error)?;
92 let map = Self::create_instance(py, Box::new(inner))?;
87 let map = Self::create_instance(py, map)?;
93 Ok(map.into_object())
88 Ok(map.into_object())
94 }
89 }
95
90
@@ -111,79 +106,38 b' py_class!(pub class DirstateMap |py| {'
111 .map_err(|e| v2_error(py, e))?
106 .map_err(|e| v2_error(py, e))?
112 {
107 {
113 Some(entry) => {
108 Some(entry) => {
114 Ok(Some(make_dirstate_item(py, &entry)?))
109 Ok(Some(DirstateItem::new_as_pyobject(py, entry)?))
115 },
110 },
116 None => Ok(default)
111 None => Ok(default)
117 }
112 }
118 }
113 }
119
114
120 def set_v1(&self, path: PyObject, item: PyObject) -> PyResult<PyObject> {
115 def set_dirstate_item(
116 &self,
117 path: PyObject,
118 item: DirstateItem
119 ) -> PyResult<PyObject> {
121 let f = path.extract::<PyBytes>(py)?;
120 let f = path.extract::<PyBytes>(py)?;
122 let filename = HgPath::new(f.data(py));
121 let filename = HgPath::new(f.data(py));
123 let state = item.getattr(py, "state")?.extract::<PyBytes>(py)?;
122 self.inner(py)
124 let state = state.data(py)[0];
123 .borrow_mut()
125 let entry = DirstateEntry {
124 .set_entry(filename, item.get_entry(py))
126 state: state.try_into().expect("state is always valid"),
125 .map_err(|e| v2_error(py, e))?;
127 mtime: item.getattr(py, "mtime")?.extract(py)?,
128 size: item.getattr(py, "size")?.extract(py)?,
129 mode: item.getattr(py, "mode")?.extract(py)?,
130 };
131 self.inner(py).borrow_mut().set_v1(filename, entry);
132 Ok(py.None())
126 Ok(py.None())
133 }
127 }
134
128
135 def addfile(
129 def addfile(
136 &self,
130 &self,
137 f: PyObject,
131 f: PyBytes,
138 mode: PyObject,
132 item: DirstateItem,
139 size: PyObject,
133 ) -> PyResult<PyNone> {
140 mtime: PyObject,
141 added: PyObject,
142 merged: PyObject,
143 from_p2: PyObject,
144 possibly_dirty: PyObject,
145 ) -> PyResult<PyObject> {
146 let f = f.extract::<PyBytes>(py)?;
147 let filename = HgPath::new(f.data(py));
134 let filename = HgPath::new(f.data(py));
148 let mode = if mode.is_none(py) {
135 let entry = item.get_entry(py);
149 // fallback default value
136 self.inner(py)
150 0
137 .borrow_mut()
151 } else {
138 .add_file(filename, entry)
152 mode.extract(py)?
139 .map_err(|e |dirstate_error(py, e))?;
153 };
140 Ok(PyNone)
154 let size = if size.is_none(py) {
155 // fallback default value
156 SIZE_NON_NORMAL
157 } else {
158 size.extract(py)?
159 };
160 let mtime = if mtime.is_none(py) {
161 // fallback default value
162 MTIME_UNSET
163 } else {
164 mtime.extract(py)?
165 };
166 let entry = DirstateEntry {
167 // XXX Arbitrary default value since the value is determined later
168 state: EntryState::Normal,
169 mode: mode,
170 size: size,
171 mtime: mtime,
172 };
173 let added = added.extract::<PyBool>(py)?.is_true();
174 let merged = merged.extract::<PyBool>(py)?.is_true();
175 let from_p2 = from_p2.extract::<PyBool>(py)?.is_true();
176 let possibly_dirty = possibly_dirty.extract::<PyBool>(py)?.is_true();
177 self.inner(py).borrow_mut().add_file(
178 filename,
179 entry,
180 added,
181 merged,
182 from_p2,
183 possibly_dirty
184 ).and(Ok(py.None())).or_else(|e: DirstateError| {
185 Err(PyErr::new::<exc::ValueError, _>(py, e.to_string()))
186 })
187 }
141 }
188
142
189 def removefile(
143 def removefile(
@@ -205,135 +159,15 b' py_class!(pub class DirstateMap |py| {'
205 Ok(py.None())
159 Ok(py.None())
206 }
160 }
207
161
208 def dropfile(
162 def drop_item_and_copy_source(
209 &self,
210 f: PyObject,
211 ) -> PyResult<PyBool> {
212 self.inner(py).borrow_mut()
213 .drop_file(
214 HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
215 )
216 .and_then(|b| Ok(b.to_py_object(py)))
217 .or_else(|e| {
218 Err(PyErr::new::<exc::OSError, _>(
219 py,
220 format!("Dirstate error: {}", e.to_string()),
221 ))
222 })
223 }
224
225 def clearambiguoustimes(
226 &self,
163 &self,
227 files: PyObject,
164 f: PyBytes,
228 now: PyObject
165 ) -> PyResult<PyNone> {
229 ) -> PyResult<PyObject> {
230 let files: PyResult<Vec<HgPathBuf>> = files
231 .iter(py)?
232 .map(|filename| {
233 Ok(HgPathBuf::from_bytes(
234 filename?.extract::<PyBytes>(py)?.data(py),
235 ))
236 })
237 .collect();
238 self.inner(py)
239 .borrow_mut()
240 .clear_ambiguous_times(files?, now.extract(py)?)
241 .map_err(|e| v2_error(py, e))?;
242 Ok(py.None())
243 }
244
245 def other_parent_entries(&self) -> PyResult<PyObject> {
246 let mut inner_shared = self.inner(py).borrow_mut();
247 let set = PySet::empty(py)?;
248 for path in inner_shared.iter_other_parent_paths() {
249 let path = path.map_err(|e| v2_error(py, e))?;
250 set.add(py, PyBytes::new(py, path.as_bytes()))?;
251 }
252 Ok(set.into_object())
253 }
254
255 def non_normal_entries(&self) -> PyResult<NonNormalEntries> {
256 NonNormalEntries::from_inner(py, self.clone_ref(py))
257 }
258
259 def non_normal_entries_contains(&self, key: PyObject) -> PyResult<bool> {
260 let key = key.extract::<PyBytes>(py)?;
261 self.inner(py)
166 self.inner(py)
262 .borrow_mut()
167 .borrow_mut()
263 .non_normal_entries_contains(HgPath::new(key.data(py)))
168 .drop_entry_and_copy_source(HgPath::new(f.data(py)))
264 .map_err(|e| v2_error(py, e))
169 .map_err(|e |dirstate_error(py, e))?;
265 }
170 Ok(PyNone)
266
267 def non_normal_entries_display(&self) -> PyResult<PyString> {
268 let mut inner = self.inner(py).borrow_mut();
269 let paths = inner
270 .iter_non_normal_paths()
271 .collect::<Result<Vec<_>, _>>()
272 .map_err(|e| v2_error(py, e))?;
273 let formatted = format!("NonNormalEntries: {}", hg::utils::join_display(paths, ", "));
274 Ok(PyString::new(py, &formatted))
275 }
276
277 def non_normal_entries_remove(&self, key: PyObject) -> PyResult<PyObject> {
278 let key = key.extract::<PyBytes>(py)?;
279 let key = key.data(py);
280 let was_present = self
281 .inner(py)
282 .borrow_mut()
283 .non_normal_entries_remove(HgPath::new(key));
284 if !was_present {
285 let msg = String::from_utf8_lossy(key);
286 Err(PyErr::new::<exc::KeyError, _>(py, msg))
287 } else {
288 Ok(py.None())
289 }
290 }
291
292 def non_normal_entries_discard(&self, key: PyObject) -> PyResult<PyObject>
293 {
294 let key = key.extract::<PyBytes>(py)?;
295 self
296 .inner(py)
297 .borrow_mut()
298 .non_normal_entries_remove(HgPath::new(key.data(py)));
299 Ok(py.None())
300 }
301
302 def non_normal_entries_add(&self, key: PyObject) -> PyResult<PyObject> {
303 let key = key.extract::<PyBytes>(py)?;
304 self
305 .inner(py)
306 .borrow_mut()
307 .non_normal_entries_add(HgPath::new(key.data(py)));
308 Ok(py.None())
309 }
310
311 def non_normal_or_other_parent_paths(&self) -> PyResult<PyList> {
312 let mut inner = self.inner(py).borrow_mut();
313
314 let ret = PyList::new(py, &[]);
315 for filename in inner.non_normal_or_other_parent_paths() {
316 let filename = filename.map_err(|e| v2_error(py, e))?;
317 let as_pystring = PyBytes::new(py, filename.as_bytes());
318 ret.append(py, as_pystring.into_object());
319 }
320 Ok(ret)
321 }
322
323 def non_normal_entries_iter(&self) -> PyResult<NonNormalEntriesIterator> {
324 // Make sure the sets are defined before we no longer have a mutable
325 // reference to the dmap.
326 self.inner(py)
327 .borrow_mut()
328 .set_non_normal_other_parent_entries(false);
329
330 let leaked_ref = self.inner(py).leak_immutable();
331
332 NonNormalEntriesIterator::from_inner(py, unsafe {
333 leaked_ref.map(py, |o| {
334 o.iter_non_normal_paths_panic()
335 })
336 })
337 }
171 }
338
172
339 def hastrackeddir(&self, d: PyObject) -> PyResult<PyBool> {
173 def hastrackeddir(&self, d: PyObject) -> PyResult<PyBool> {
@@ -360,9 +194,9 b' py_class!(pub class DirstateMap |py| {'
360 &self,
194 &self,
361 p1: PyObject,
195 p1: PyObject,
362 p2: PyObject,
196 p2: PyObject,
363 now: PyObject
197 now: (u32, u32)
364 ) -> PyResult<PyBytes> {
198 ) -> PyResult<PyBytes> {
365 let now = Timestamp(now.extract(py)?);
199 let now = timestamp(py, now)?;
366
200
367 let mut inner = self.inner(py).borrow_mut();
201 let mut inner = self.inner(py).borrow_mut();
368 let parents = DirstateParents {
202 let parents = DirstateParents {
@@ -384,10 +218,10 b' py_class!(pub class DirstateMap |py| {'
384 /// instead of written to a new data file (False).
218 /// instead of written to a new data file (False).
385 def write_v2(
219 def write_v2(
386 &self,
220 &self,
387 now: PyObject,
221 now: (u32, u32),
388 can_append: bool,
222 can_append: bool,
389 ) -> PyResult<PyObject> {
223 ) -> PyResult<PyObject> {
390 let now = Timestamp(now.extract(py)?);
224 let now = timestamp(py, now)?;
391
225
392 let mut inner = self.inner(py).borrow_mut();
226 let mut inner = self.inner(py).borrow_mut();
393 let result = inner.pack_v2(now, can_append);
227 let result = inner.pack_v2(now, can_append);
@@ -409,7 +243,7 b' py_class!(pub class DirstateMap |py| {'
409 let dict = PyDict::new(py);
243 let dict = PyDict::new(py);
410 for item in self.inner(py).borrow_mut().iter() {
244 for item in self.inner(py).borrow_mut().iter() {
411 let (path, entry) = item.map_err(|e| v2_error(py, e))?;
245 let (path, entry) = item.map_err(|e| v2_error(py, e))?;
412 if entry.state != EntryState::Removed {
246 if entry.state() != EntryState::Removed {
413 let key = normalize_case(path);
247 let key = normalize_case(path);
414 let value = path;
248 let value = path;
415 dict.set_item(
249 dict.set_item(
@@ -444,7 +278,7 b' py_class!(pub class DirstateMap |py| {'
444 .map_err(|e| v2_error(py, e))?
278 .map_err(|e| v2_error(py, e))?
445 {
279 {
446 Some(entry) => {
280 Some(entry) => {
447 Ok(make_dirstate_item(py, &entry)?)
281 Ok(DirstateItem::new_as_pyobject(py, entry)?)
448 },
282 },
449 None => Err(PyErr::new::<exc::KeyError, _>(
283 None => Err(PyErr::new::<exc::KeyError, _>(
450 py,
284 py,
@@ -566,7 +400,9 b' py_class!(pub class DirstateMap |py| {'
566 .copy_map_remove(HgPath::new(key.data(py)))
400 .copy_map_remove(HgPath::new(key.data(py)))
567 .map_err(|e| v2_error(py, e))?
401 .map_err(|e| v2_error(py, e))?
568 {
402 {
569 Some(_) => Ok(None),
403 Some(copy) => Ok(Some(
404 PyBytes::new(py, copy.as_bytes()).into_object(),
405 )),
570 None => Ok(default),
406 None => Ok(default),
571 }
407 }
572 }
408 }
@@ -599,14 +435,14 b' py_class!(pub class DirstateMap |py| {'
599 Ok(dirs)
435 Ok(dirs)
600 }
436 }
601
437
602 def debug_iter(&self) -> PyResult<PyList> {
438 def debug_iter(&self, all: bool) -> PyResult<PyList> {
603 let dirs = PyList::new(py, &[]);
439 let dirs = PyList::new(py, &[]);
604 for item in self.inner(py).borrow().debug_iter() {
440 for item in self.inner(py).borrow().debug_iter(all) {
605 let (path, (state, mode, size, mtime)) =
441 let (path, (state, mode, size, mtime)) =
606 item.map_err(|e| v2_error(py, e))?;
442 item.map_err(|e| v2_error(py, e))?;
607 let path = PyBytes::new(py, path.as_bytes());
443 let path = PyBytes::new(py, path.as_bytes());
608 let item = make_dirstate_item_raw(py, state, mode, size, mtime)?;
444 let item = (path, state, mode, size, mtime);
609 dirs.append(py, (path, item).to_py_object(py).into_object())
445 dirs.append(py, item.to_py_object(py).into_object())
610 }
446 }
611 Ok(dirs)
447 Ok(dirs)
612 }
448 }
@@ -616,7 +452,7 b' impl DirstateMap {'
616 pub fn get_inner_mut<'a>(
452 pub fn get_inner_mut<'a>(
617 &'a self,
453 &'a self,
618 py: Python<'a>,
454 py: Python<'a>,
619 ) -> RefMut<'a, Box<dyn DirstateMapMethods + Send>> {
455 ) -> RefMut<'a, OwningDirstateMap> {
620 self.inner(py).borrow_mut()
456 self.inner(py).borrow_mut()
621 }
457 }
622 fn translate_key(
458 fn translate_key(
@@ -633,7 +469,7 b' impl DirstateMap {'
633 let (f, entry) = res.map_err(|e| v2_error(py, e))?;
469 let (f, entry) = res.map_err(|e| v2_error(py, e))?;
634 Ok(Some((
470 Ok(Some((
635 PyBytes::new(py, f.as_bytes()),
471 PyBytes::new(py, f.as_bytes()),
636 make_dirstate_item(py, &entry)?,
472 DirstateItem::new_as_pyobject(py, entry)?,
637 )))
473 )))
638 }
474 }
639 }
475 }
@@ -9,6 +9,7 b''
9 //! `hg-core` crate. From Python, this will be seen as
9 //! `hg-core` crate. From Python, this will be seen as
10 //! `rustext.dirstate.status`.
10 //! `rustext.dirstate.status`.
11
11
12 use crate::dirstate::item::timestamp;
12 use crate::{dirstate::DirstateMap, exceptions::FallbackError};
13 use crate::{dirstate::DirstateMap, exceptions::FallbackError};
13 use cpython::exc::OSError;
14 use cpython::exc::OSError;
14 use cpython::{
15 use cpython::{
@@ -102,12 +103,13 b' pub fn status_wrapper('
102 root_dir: PyObject,
103 root_dir: PyObject,
103 ignore_files: PyList,
104 ignore_files: PyList,
104 check_exec: bool,
105 check_exec: bool,
105 last_normal_time: i64,
106 last_normal_time: (u32, u32),
106 list_clean: bool,
107 list_clean: bool,
107 list_ignored: bool,
108 list_ignored: bool,
108 list_unknown: bool,
109 list_unknown: bool,
109 collect_traversed_dirs: bool,
110 collect_traversed_dirs: bool,
110 ) -> PyResult<PyTuple> {
111 ) -> PyResult<PyTuple> {
112 let last_normal_time = timestamp(py, last_normal_time)?;
111 let bytes = root_dir.extract::<PyBytes>(py)?;
113 let bytes = root_dir.extract::<PyBytes>(py)?;
112 let root_dir = get_path_from_bytes(bytes.data(py));
114 let root_dir = get_path_from_bytes(bytes.data(py));
113
115
@@ -35,7 +35,7 b' pub mod debug;'
35 pub mod dirstate;
35 pub mod dirstate;
36 pub mod discovery;
36 pub mod discovery;
37 pub mod exceptions;
37 pub mod exceptions;
38 pub mod parsers;
38 mod pybytes_deref;
39 pub mod revlog;
39 pub mod revlog;
40 pub mod utils;
40 pub mod utils;
41
41
@@ -58,11 +58,6 b' py_module_initializer!(rustext, initrust'
58 m.add(py, "discovery", discovery::init_module(py, &dotted_name)?)?;
58 m.add(py, "discovery", discovery::init_module(py, &dotted_name)?)?;
59 m.add(py, "dirstate", dirstate::init_module(py, &dotted_name)?)?;
59 m.add(py, "dirstate", dirstate::init_module(py, &dotted_name)?)?;
60 m.add(py, "revlog", revlog::init_module(py, &dotted_name)?)?;
60 m.add(py, "revlog", revlog::init_module(py, &dotted_name)?)?;
61 m.add(
62 py,
63 "parsers",
64 parsers::init_parsers_module(py, &dotted_name)?,
65 )?;
66 m.add(py, "GraphError", py.get_type::<exceptions::GraphError>())?;
61 m.add(py, "GraphError", py.get_type::<exceptions::GraphError>())?;
67 Ok(())
62 Ok(())
68 });
63 });
@@ -12,23 +12,21 b' functionality.'
12
12
13 # Building
13 # Building
14
14
15 This project currently requires an unreleased version of PyOxidizer
15 First, acquire and build a copy of PyOxidizer; you probably want to do this in
16 (0.7.0-pre). For best results, build the exact PyOxidizer commit
16 some directory outside of your clone of Mercurial:
17 as defined in the `pyoxidizer.bzl` file:
18
17
19 $ git clone https://github.com/indygreg/PyOxidizer.git
18 $ git clone https://github.com/indygreg/PyOxidizer.git
20 $ cd PyOxidizer
19 $ cd PyOxidizer
21 $ git checkout <Git commit from pyoxidizer.bzl>
22 $ cargo build --release
20 $ cargo build --release
23
21
24 Then build this Rust project using the built `pyoxidizer` executable::
22 Then build this Rust project using the built `pyoxidizer` executable:
25
23
26 $ /path/to/pyoxidizer/target/release/pyoxidizer build
24 $ /path/to/pyoxidizer/target/release/pyoxidizer build --release
27
25
28 If all goes according to plan, there should be an assembled application
26 If all goes according to plan, there should be an assembled application
29 under `build/<arch>/debug/app/` with an `hg` executable:
27 under `build/<arch>/release/app/` with an `hg` executable:
30
28
31 $ build/x86_64-unknown-linux-gnu/debug/app/hg version
29 $ build/x86_64-unknown-linux-gnu/release/app/hg version
32 Mercurial Distributed SCM (version 5.3.1+433-f99cd77d53dc+20200331)
30 Mercurial Distributed SCM (version 5.3.1+433-f99cd77d53dc+20200331)
33 (see https://mercurial-scm.org for more information)
31 (see https://mercurial-scm.org for more information)
34
32
@@ -46,5 +44,5 b" Python interpreter can't access them! To"
46 to the Mercurial source directory. e.g.:
44 to the Mercurial source directory. e.g.:
47
45
48 $ cd /path/to/hg/src/tests
46 $ cd /path/to/hg/src/tests
49 $ PYTHONPATH=`pwd`/.. python3.7 run-tests.py \
47 $ PYTHONPATH=`pwd`/.. python3.9 run-tests.py \
50 --with-hg `pwd`/../rust/hgcli/build/x86_64-unknown-linux-gnu/debug/app/hg
48 --with-hg `pwd`/../rust/hgcli/build/x86_64-unknown-linux-gnu/release/app/hg
@@ -24,7 +24,7 b''
24
24
25 ROOT = CWD + "/../.."
25 ROOT = CWD + "/../.."
26
26
27 VERSION = VARS.get("VERSION", "5.8")
27 VERSION = VARS.get("VERSION", "0.0")
28 MSI_NAME = VARS.get("MSI_NAME", "mercurial")
28 MSI_NAME = VARS.get("MSI_NAME", "mercurial")
29 EXTRA_MSI_FEATURES = VARS.get("EXTRA_MSI_FEATURES")
29 EXTRA_MSI_FEATURES = VARS.get("EXTRA_MSI_FEATURES")
30 SIGNING_PFX_PATH = VARS.get("SIGNING_PFX_PATH")
30 SIGNING_PFX_PATH = VARS.get("SIGNING_PFX_PATH")
@@ -34,6 +34,11 b' TIME_STAMP_SERVER_URL = VARS.get("TIME_S'
34
34
35 IS_WINDOWS = "windows" in BUILD_TARGET_TRIPLE
35 IS_WINDOWS = "windows" in BUILD_TARGET_TRIPLE
36
36
37 # Use in-memory resources for all resources. If false, most of the Python
38 # stdlib will be in memory, but other things such as Mercurial itself will not
39 # be. See the comment in resource_callback, below.
40 USE_IN_MEMORY_RESOURCES = not IS_WINDOWS
41
37 # Code to run in Python interpreter.
42 # Code to run in Python interpreter.
38 RUN_CODE = """
43 RUN_CODE = """
39 import os
44 import os
@@ -57,6 +62,20 b" if os.name == 'nt':"
57 'site-packages',
62 'site-packages',
58 )
63 )
59 )
64 )
65 elif sys.platform == "darwin":
66 vi = sys.version_info
67
68 def joinuser(*args):
69 return os.path.expanduser(os.path.join(*args))
70
71 # Note: site.py uses `sys._framework` instead of hardcoding "Python" as the
72 # 3rd arg, but that is set to an empty string in an oxidized binary. It
73 # has a fallback to ~/.local when `sys._framework` isn't set, but we want
74 # to match what the system python uses, so it sees pip installed stuff.
75 usersite = joinuser("~", "Library", "Python",
76 "%d.%d" % vi[:2], "lib/python/site-packages")
77
78 sys.path.append(usersite)
60 import hgdemandimport;
79 import hgdemandimport;
61 hgdemandimport.enable();
80 hgdemandimport.enable();
62 from mercurial import dispatch;
81 from mercurial import dispatch;
@@ -69,7 +88,7 b' def make_distribution():'
69 return default_python_distribution(python_version = "3.9")
88 return default_python_distribution(python_version = "3.9")
70
89
71 def resource_callback(policy, resource):
90 def resource_callback(policy, resource):
72 if not IS_WINDOWS:
91 if USE_IN_MEMORY_RESOURCES:
73 resource.add_location = "in-memory"
92 resource.add_location = "in-memory"
74 return
93 return
75
94
@@ -100,7 +119,7 b' def make_exe(dist):'
100 # extensions.
119 # extensions.
101 packaging_policy.extension_module_filter = "all"
120 packaging_policy.extension_module_filter = "all"
102 packaging_policy.resources_location = "in-memory"
121 packaging_policy.resources_location = "in-memory"
103 if IS_WINDOWS:
122 if not USE_IN_MEMORY_RESOURCES:
104 packaging_policy.resources_location_fallback = "filesystem-relative:lib"
123 packaging_policy.resources_location_fallback = "filesystem-relative:lib"
105 packaging_policy.register_resource_callback(resource_callback)
124 packaging_policy.register_resource_callback(resource_callback)
106
125
@@ -16,7 +16,7 b" pub fn args() -> clap::App<'static, 'sta"
16 Arg::with_name("rev")
16 Arg::with_name("rev")
17 .help("search the repository as it is in REV")
17 .help("search the repository as it is in REV")
18 .short("-r")
18 .short("-r")
19 .long("--revision")
19 .long("--rev")
20 .value_name("REV")
20 .value_name("REV")
21 .takes_value(true),
21 .takes_value(true),
22 )
22 )
@@ -26,13 +26,22 b" pub fn args() -> clap::App<'static, 'sta"
26 .multiple(true)
26 .multiple(true)
27 .empty_values(false)
27 .empty_values(false)
28 .value_name("FILE")
28 .value_name("FILE")
29 .help("Activity to start: activity@category"),
29 .help("Files to output"),
30 )
30 )
31 .about(HELP_TEXT)
31 .about(HELP_TEXT)
32 }
32 }
33
33
34 #[timed]
34 #[timed]
35 pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
35 pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
36 let cat_enabled_default = true;
37 let cat_enabled = invocation.config.get_option(b"rhg", b"cat")?;
38 if !cat_enabled.unwrap_or(cat_enabled_default) {
39 return Err(CommandError::unsupported(
40 "cat is disabled in rhg (enable it with 'rhg.cat = true' \
41 or enable fallback with 'rhg.on-unsupported = fallback')",
42 ));
43 }
44
36 let rev = invocation.subcommand_args.value_of("rev");
45 let rev = invocation.subcommand_args.value_of("rev");
37 let file_args = match invocation.subcommand_args.values_of("files") {
46 let file_args = match invocation.subcommand_args.values_of("files") {
38 Some(files) => files.collect(),
47 Some(files) => files.collect(),
@@ -46,8 +55,18 b' pub fn run(invocation: &crate::CliInvoca'
46
55
47 let mut files = vec![];
56 let mut files = vec![];
48 for file in file_args.iter() {
57 for file in file_args.iter() {
58 if file.starts_with("set:") {
59 let message = "fileset";
60 return Err(CommandError::unsupported(message));
61 }
62
63 let normalized = cwd.join(&file);
49 // TODO: actually normalize `..` path segments etc?
64 // TODO: actually normalize `..` path segments etc?
50 let normalized = cwd.join(&file);
65 let dotted = normalized.components().any(|c| c.as_os_str() == "..");
66 if file == &"." || dotted {
67 let message = "`..` or `.` path segment";
68 return Err(CommandError::unsupported(message));
69 }
51 let stripped = normalized
70 let stripped = normalized
52 .strip_prefix(&working_directory)
71 .strip_prefix(&working_directory)
53 // TODO: error message for path arguments outside of the repo
72 // TODO: error message for path arguments outside of the repo
@@ -56,29 +75,31 b' pub fn run(invocation: &crate::CliInvoca'
56 .map_err(|e| CommandError::abort(e.to_string()))?;
75 .map_err(|e| CommandError::abort(e.to_string()))?;
57 files.push(hg_file);
76 files.push(hg_file);
58 }
77 }
78 let files = files.iter().map(|file| file.as_ref()).collect();
79 // TODO probably move this to a util function like `repo.default_rev` or
80 // something when it's used somewhere else
81 let rev = match rev {
82 Some(r) => r.to_string(),
83 None => format!("{:x}", repo.dirstate_parents()?.p1),
84 };
59
85
60 match rev {
86 let output = cat(&repo, &rev, files).map_err(|e| (e, rev.as_str()))?;
61 Some(rev) => {
87 for (_file, contents) in output.results {
62 let output = cat(&repo, rev, &files).map_err(|e| (e, rev))?;
88 invocation.ui.write_stdout(&contents)?;
63 invocation.ui.write_stdout(&output.concatenated)?;
89 }
64 if !output.missing.is_empty() {
90 if !output.missing.is_empty() {
65 let short = format!("{:x}", output.node.short()).into_bytes();
91 let short = format!("{:x}", output.node.short()).into_bytes();
66 for path in &output.missing {
92 for path in &output.missing {
67 invocation.ui.write_stderr(&format_bytes!(
93 invocation.ui.write_stderr(&format_bytes!(
68 b"{}: no such file in rev {}\n",
94 b"{}: no such file in rev {}\n",
69 path.as_bytes(),
95 path.as_bytes(),
70 short
96 short
71 ))?;
97 ))?;
72 }
73 }
74 if output.found_any {
75 Ok(())
76 } else {
77 Err(CommandError::Unsuccessful)
78 }
79 }
98 }
80 None => Err(CommandError::unsupported(
99 }
81 "`rhg cat` without `--rev` / `-r`",
100 if output.found_any {
82 )),
101 Ok(())
102 } else {
103 Err(CommandError::Unsuccessful)
83 }
104 }
84 }
105 }
@@ -1,12 +1,13 b''
1 use crate::error::CommandError;
1 use crate::error::CommandError;
2 use crate::ui::Ui;
2 use crate::ui::Ui;
3 use crate::ui::UiError;
4 use crate::utils::path_utils::relativize_paths;
3 use clap::Arg;
5 use clap::Arg;
4 use hg::operations::list_rev_tracked_files;
6 use hg::operations::list_rev_tracked_files;
5 use hg::operations::Dirstate;
7 use hg::operations::Dirstate;
6 use hg::repo::Repo;
8 use hg::repo::Repo;
7 use hg::utils::current_dir;
9 use hg::utils::hg_path::HgPath;
8 use hg::utils::files::{get_bytes_from_path, relativize_path};
10 use std::borrow::Cow;
9 use hg::utils::hg_path::{HgPath, HgPathBuf};
10
11
11 pub const HELP_TEXT: &str = "
12 pub const HELP_TEXT: &str = "
12 List tracked files.
13 List tracked files.
@@ -54,34 +55,13 b" fn display_files<'a>("
54 files: impl IntoIterator<Item = &'a HgPath>,
55 files: impl IntoIterator<Item = &'a HgPath>,
55 ) -> Result<(), CommandError> {
56 ) -> Result<(), CommandError> {
56 let mut stdout = ui.stdout_buffer();
57 let mut stdout = ui.stdout_buffer();
57
58 let mut any = false;
58 let cwd = current_dir()?;
59 let working_directory = repo.working_directory_path();
60 let working_directory = cwd.join(working_directory); // Make it absolute
61
59
62 let mut any = false;
60 relativize_paths(repo, files, |path: Cow<[u8]>| -> Result<(), UiError> {
63 if let Ok(cwd_relative_to_repo) = cwd.strip_prefix(&working_directory) {
61 any = true;
64 // The current directory is inside the repo, so we can work with
62 stdout.write_all(path.as_ref())?;
65 // relative paths
63 stdout.write_all(b"\n")
66 let cwd = HgPathBuf::from(get_bytes_from_path(cwd_relative_to_repo));
64 })?;
67 for file in files {
68 any = true;
69 stdout.write_all(relativize_path(&file, &cwd).as_ref())?;
70 stdout.write_all(b"\n")?;
71 }
72 } else {
73 let working_directory =
74 HgPathBuf::from(get_bytes_from_path(working_directory));
75 let cwd = HgPathBuf::from(get_bytes_from_path(cwd));
76 for file in files {
77 any = true;
78 // Absolute path in the filesystem
79 let file = working_directory.join(file);
80 stdout.write_all(relativize_path(&file, &cwd).as_ref())?;
81 stdout.write_all(b"\n")?;
82 }
83 }
84
85 stdout.flush()?;
65 stdout.flush()?;
86 if any {
66 if any {
87 Ok(())
67 Ok(())
@@ -6,25 +6,20 b''
6 // GNU General Public License version 2 or any later version.
6 // GNU General Public License version 2 or any later version.
7
7
8 use crate::error::CommandError;
8 use crate::error::CommandError;
9 use crate::ui::Ui;
9 use crate::ui::{Ui, UiError};
10 use crate::utils::path_utils::relativize_paths;
10 use clap::{Arg, SubCommand};
11 use clap::{Arg, SubCommand};
11 use hg;
12 use hg;
12 use hg::dirstate_tree::dirstate_map::DirstateMap;
13 use hg::config::Config;
13 use hg::dirstate_tree::on_disk;
14 use hg::dirstate::TruncatedTimestamp;
14 use hg::errors::HgResultExt;
15 use hg::errors::HgError;
15 use hg::errors::IoResultExt;
16 use hg::manifest::Manifest;
16 use hg::matchers::AlwaysMatcher;
17 use hg::matchers::AlwaysMatcher;
17 use hg::operations::cat;
18 use hg::repo::Repo;
18 use hg::repo::Repo;
19 use hg::revlog::node::Node;
20 use hg::utils::hg_path::{hg_path_to_os_string, HgPath};
19 use hg::utils::hg_path::{hg_path_to_os_string, HgPath};
21 use hg::StatusError;
22 use hg::{HgPathCow, StatusOptions};
20 use hg::{HgPathCow, StatusOptions};
23 use log::{info, warn};
21 use log::{info, warn};
24 use std::convert::TryInto;
22 use std::borrow::Cow;
25 use std::fs;
26 use std::io::BufReader;
27 use std::io::Read;
28
23
29 pub const HELP_TEXT: &str = "
24 pub const HELP_TEXT: &str = "
30 Show changed files in the working directory
25 Show changed files in the working directory
@@ -142,7 +137,20 b' pub fn run(invocation: &crate::CliInvoca'
142 ));
137 ));
143 }
138 }
144
139
140 // TODO: lift these limitations
141 if invocation.config.get_bool(b"ui", b"tweakdefaults").ok() == Some(true) {
142 return Err(CommandError::unsupported(
143 "ui.tweakdefaults is not yet supported with rhg status",
144 ));
145 }
146 if invocation.config.get_bool(b"ui", b"statuscopies").ok() == Some(true) {
147 return Err(CommandError::unsupported(
148 "ui.statuscopies is not yet supported with rhg status",
149 ));
150 }
151
145 let ui = invocation.ui;
152 let ui = invocation.ui;
153 let config = invocation.config;
146 let args = invocation.subcommand_args;
154 let args = invocation.subcommand_args;
147 let display_states = if args.is_present("all") {
155 let display_states = if args.is_present("all") {
148 // TODO when implementing `--quiet`: it excludes clean files
156 // TODO when implementing `--quiet`: it excludes clean files
@@ -166,47 +174,14 b' pub fn run(invocation: &crate::CliInvoca'
166 };
174 };
167
175
168 let repo = invocation.repo?;
176 let repo = invocation.repo?;
169 let dirstate_data_mmap;
177 let mut dmap = repo.dirstate_map_mut()?;
170 let (mut dmap, parents) = if repo.has_dirstate_v2() {
171 let docket_data =
172 repo.hg_vfs().read("dirstate").io_not_found_as_none()?;
173 let parents;
174 let dirstate_data;
175 let data_size;
176 let docket;
177 let tree_metadata;
178 if let Some(docket_data) = &docket_data {
179 docket = on_disk::read_docket(docket_data)?;
180 tree_metadata = docket.tree_metadata();
181 parents = Some(docket.parents());
182 data_size = docket.data_size();
183 dirstate_data_mmap = repo
184 .hg_vfs()
185 .mmap_open(docket.data_filename())
186 .io_not_found_as_none()?;
187 dirstate_data = dirstate_data_mmap.as_deref().unwrap_or(b"");
188 } else {
189 parents = None;
190 tree_metadata = b"";
191 data_size = 0;
192 dirstate_data = b"";
193 }
194 let dmap =
195 DirstateMap::new_v2(dirstate_data, data_size, tree_metadata)?;
196 (dmap, parents)
197 } else {
198 dirstate_data_mmap =
199 repo.hg_vfs().mmap_open("dirstate").io_not_found_as_none()?;
200 let dirstate_data = dirstate_data_mmap.as_deref().unwrap_or(b"");
201 DirstateMap::new_v1(dirstate_data)?
202 };
203
178
204 let options = StatusOptions {
179 let options = StatusOptions {
205 // TODO should be provided by the dirstate parsing and
180 // TODO should be provided by the dirstate parsing and
206 // hence be stored on dmap. Using a value that assumes we aren't
181 // hence be stored on dmap. Using a value that assumes we aren't
207 // below the time resolution granularity of the FS and the
182 // below the time resolution granularity of the FS and the
208 // dirstate.
183 // dirstate.
209 last_normal_time: 0,
184 last_normal_time: TruncatedTimestamp::new_truncate(0, 0),
210 // we're currently supporting file systems with exec flags only
185 // we're currently supporting file systems with exec flags only
211 // anyway
186 // anyway
212 check_exec: true,
187 check_exec: true,
@@ -216,8 +191,7 b' pub fn run(invocation: &crate::CliInvoca'
216 collect_traversed_dirs: false,
191 collect_traversed_dirs: false,
217 };
192 };
218 let ignore_file = repo.working_directory_vfs().join(".hgignore"); // TODO hardcoded
193 let ignore_file = repo.working_directory_vfs().join(".hgignore"); // TODO hardcoded
219 let (mut ds_status, pattern_warnings) = hg::dirstate_tree::status::status(
194 let (mut ds_status, pattern_warnings) = dmap.status(
220 &mut dmap,
221 &AlwaysMatcher,
195 &AlwaysMatcher,
222 repo.working_directory_path().to_owned(),
196 repo.working_directory_path().to_owned(),
223 vec![ignore_file],
197 vec![ignore_file],
@@ -239,16 +213,12 b' pub fn run(invocation: &crate::CliInvoca'
239 if !ds_status.unsure.is_empty()
213 if !ds_status.unsure.is_empty()
240 && (display_states.modified || display_states.clean)
214 && (display_states.modified || display_states.clean)
241 {
215 {
242 let p1: Node = parents
216 let p1 = repo.dirstate_parents()?.p1;
243 .expect(
217 let manifest = repo.manifest_for_node(p1).map_err(|e| {
244 "Dirstate with no parents should not list any file to
218 CommandError::from((e, &*format!("{:x}", p1.short())))
245 be rechecked for modifications",
219 })?;
246 )
247 .p1
248 .into();
249 let p1_hex = format!("{:x}", p1);
250 for to_check in ds_status.unsure {
220 for to_check in ds_status.unsure {
251 if cat_file_is_modified(repo, &to_check, &p1_hex)? {
221 if cat_file_is_modified(repo, &manifest, &to_check)? {
252 if display_states.modified {
222 if display_states.modified {
253 ds_status.modified.push(to_check);
223 ds_status.modified.push(to_check);
254 }
224 }
@@ -260,25 +230,25 b' pub fn run(invocation: &crate::CliInvoca'
260 }
230 }
261 }
231 }
262 if display_states.modified {
232 if display_states.modified {
263 display_status_paths(ui, &mut ds_status.modified, b"M")?;
233 display_status_paths(ui, repo, config, &mut ds_status.modified, b"M")?;
264 }
234 }
265 if display_states.added {
235 if display_states.added {
266 display_status_paths(ui, &mut ds_status.added, b"A")?;
236 display_status_paths(ui, repo, config, &mut ds_status.added, b"A")?;
267 }
237 }
268 if display_states.removed {
238 if display_states.removed {
269 display_status_paths(ui, &mut ds_status.removed, b"R")?;
239 display_status_paths(ui, repo, config, &mut ds_status.removed, b"R")?;
270 }
240 }
271 if display_states.deleted {
241 if display_states.deleted {
272 display_status_paths(ui, &mut ds_status.deleted, b"!")?;
242 display_status_paths(ui, repo, config, &mut ds_status.deleted, b"!")?;
273 }
243 }
274 if display_states.unknown {
244 if display_states.unknown {
275 display_status_paths(ui, &mut ds_status.unknown, b"?")?;
245 display_status_paths(ui, repo, config, &mut ds_status.unknown, b"?")?;
276 }
246 }
277 if display_states.ignored {
247 if display_states.ignored {
278 display_status_paths(ui, &mut ds_status.ignored, b"I")?;
248 display_status_paths(ui, repo, config, &mut ds_status.ignored, b"I")?;
279 }
249 }
280 if display_states.clean {
250 if display_states.clean {
281 display_status_paths(ui, &mut ds_status.clean, b"C")?;
251 display_status_paths(ui, repo, config, &mut ds_status.clean, b"C")?;
282 }
252 }
283 Ok(())
253 Ok(())
284 }
254 }
@@ -287,16 +257,35 b' pub fn run(invocation: &crate::CliInvoca'
287 // harcode HgPathBuf, but probably not really useful at this point
257 // harcode HgPathBuf, but probably not really useful at this point
288 fn display_status_paths(
258 fn display_status_paths(
289 ui: &Ui,
259 ui: &Ui,
260 repo: &Repo,
261 config: &Config,
290 paths: &mut [HgPathCow],
262 paths: &mut [HgPathCow],
291 status_prefix: &[u8],
263 status_prefix: &[u8],
292 ) -> Result<(), CommandError> {
264 ) -> Result<(), CommandError> {
293 paths.sort_unstable();
265 paths.sort_unstable();
294 for path in paths {
266 let mut relative: bool =
295 // Same TODO as in commands::root
267 config.get_bool(b"ui", b"relative-paths").unwrap_or(false);
296 let bytes: &[u8] = path.as_bytes();
268 relative = config
297 // TODO optim, probably lots of unneeded copies here, especially
269 .get_bool(b"commands", b"status.relative")
298 // if out stream is buffered
270 .unwrap_or(relative);
299 ui.write_stdout(&[status_prefix, b" ", bytes, b"\n"].concat())?;
271 if relative && !ui.plain() {
272 relativize_paths(
273 repo,
274 paths,
275 |path: Cow<[u8]>| -> Result<(), UiError> {
276 ui.write_stdout(
277 &[status_prefix, b" ", path.as_ref(), b"\n"].concat(),
278 )
279 },
280 )?;
281 } else {
282 for path in paths {
283 // Same TODO as in commands::root
284 let bytes: &[u8] = path.as_bytes();
285 // TODO optim, probably lots of unneeded copies here, especially
286 // if out stream is buffered
287 ui.write_stdout(&[status_prefix, b" ", bytes, b"\n"].concat())?;
288 }
300 }
289 }
301 Ok(())
290 Ok(())
302 }
291 }
@@ -309,39 +298,19 b' fn display_status_paths('
309 /// TODO: detect permission bits and similar metadata modifications
298 /// TODO: detect permission bits and similar metadata modifications
310 fn cat_file_is_modified(
299 fn cat_file_is_modified(
311 repo: &Repo,
300 repo: &Repo,
301 manifest: &Manifest,
312 hg_path: &HgPath,
302 hg_path: &HgPath,
313 rev: &str,
303 ) -> Result<bool, HgError> {
314 ) -> Result<bool, CommandError> {
304 let file_node = manifest
315 // TODO CatRev expects &[HgPathBuf], something like
305 .find_file(hg_path)?
316 // &[impl Deref<HgPath>] would be nicer and should avoid the copy
306 .expect("ambgious file not in p1");
317 let path_bufs = [hg_path.into()];
307 let filelog = repo.filelog(hg_path)?;
318 // TODO IIUC CatRev returns a simple Vec<u8> for all files
308 let filelog_entry = filelog.data_for_node(file_node).map_err(|_| {
319 // being able to tell them apart as (path, bytes) would be nicer
309 HgError::corrupted("filelog missing node from manifest")
320 // and OPTIM would allow manifest resolution just once.
310 })?;
321 let output = cat(repo, rev, &path_bufs).map_err(|e| (e, rev))?;
311 let contents_in_p1 = filelog_entry.data()?;
322
312
323 let fs_path = repo
313 let fs_path = hg_path_to_os_string(hg_path).expect("HgPath conversion");
324 .working_directory_vfs()
314 let fs_contents = repo.working_directory_vfs().read(fs_path)?;
325 .join(hg_path_to_os_string(hg_path).expect("HgPath conversion"));
315 return Ok(contents_in_p1 != &*fs_contents);
326 let hg_data_len: u64 = match output.concatenated.len().try_into() {
327 Ok(v) => v,
328 Err(_) => {
329 // conversion of data length to u64 failed,
330 // good luck for any file to have this content
331 return Ok(true);
332 }
333 };
334 let fobj = fs::File::open(&fs_path).when_reading_file(&fs_path)?;
335 if fobj.metadata().map_err(|e| StatusError::from(e))?.len() != hg_data_len
336 {
337 return Ok(true);
338 }
339 for (fs_byte, hg_byte) in
340 BufReader::new(fobj).bytes().zip(output.concatenated)
341 {
342 if fs_byte.map_err(|e| StatusError::from(e))? != hg_byte {
343 return Ok(true);
344 }
345 }
346 Ok(false)
347 }
316 }
@@ -17,6 +17,9 b' use std::process::Command;'
17 mod blackbox;
17 mod blackbox;
18 mod error;
18 mod error;
19 mod ui;
19 mod ui;
20 pub mod utils {
21 pub mod path_utils;
22 }
20 use error::CommandError;
23 use error::CommandError;
21
24
22 fn main_with_result(
25 fn main_with_result(
@@ -68,6 +71,25 b' fn main_with_result('
68 let matches = app.clone().get_matches_safe()?;
71 let matches = app.clone().get_matches_safe()?;
69
72
70 let (subcommand_name, subcommand_matches) = matches.subcommand();
73 let (subcommand_name, subcommand_matches) = matches.subcommand();
74
75 // Mercurial allows users to define "defaults" for commands, fallback
76 // if a default is detected for the current command
77 let defaults = config.get_str(b"defaults", subcommand_name.as_bytes());
78 if defaults?.is_some() {
79 let msg = "`defaults` config set";
80 return Err(CommandError::unsupported(msg));
81 }
82
83 for prefix in ["pre", "post", "fail"].iter() {
84 // Mercurial allows users to define generic hooks for commands,
85 // fallback if any are detected
86 let item = format!("{}-{}", prefix, subcommand_name);
87 let hook_for_command = config.get_str(b"hooks", item.as_bytes())?;
88 if hook_for_command.is_some() {
89 let msg = format!("{}-{} hook defined", prefix, subcommand_name);
90 return Err(CommandError::unsupported(msg));
91 }
92 }
71 let run = subcommand_run_fn(subcommand_name)
93 let run = subcommand_run_fn(subcommand_name)
72 .expect("unknown subcommand name from clap despite AppSettings::SubcommandRequired");
94 .expect("unknown subcommand name from clap despite AppSettings::SubcommandRequired");
73 let subcommand_args = subcommand_matches
95 let subcommand_args = subcommand_matches
@@ -79,6 +101,15 b' fn main_with_result('
79 config,
101 config,
80 repo,
102 repo,
81 };
103 };
104
105 if let Ok(repo) = repo {
106 // We don't support subrepos, fallback if the subrepos file is present
107 if repo.working_directory_vfs().join(".hgsub").exists() {
108 let msg = "subrepos (.hgsub is present)";
109 return Err(CommandError::unsupported(msg));
110 }
111 }
112
82 let blackbox = blackbox::Blackbox::new(&invocation, process_start_time)?;
113 let blackbox = blackbox::Blackbox::new(&invocation, process_start_time)?;
83 blackbox.log_command_start();
114 blackbox.log_command_start();
84 let result = run(&invocation);
115 let result = run(&invocation);
@@ -567,11 +598,10 b' fn check_extensions(config: &Config) -> '
567 unsupported.remove(supported);
598 unsupported.remove(supported);
568 }
599 }
569
600
570 if let Some(ignored_list) =
601 if let Some(ignored_list) = config.get_list(b"rhg", b"ignored-extensions")
571 config.get_simple_list(b"rhg", b"ignored-extensions")
572 {
602 {
573 for ignored in ignored_list {
603 for ignored in ignored_list {
574 unsupported.remove(ignored);
604 unsupported.remove(ignored.as_slice());
575 }
605 }
576 }
606 }
577
607
@@ -1,5 +1,6 b''
1 use format_bytes::format_bytes;
1 use format_bytes::format_bytes;
2 use std::borrow::Cow;
2 use std::borrow::Cow;
3 use std::env;
3 use std::io;
4 use std::io;
4 use std::io::{ErrorKind, Write};
5 use std::io::{ErrorKind, Write};
5
6
@@ -49,6 +50,25 b' impl Ui {'
49
50
50 stderr.flush().or_else(handle_stderr_error)
51 stderr.flush().or_else(handle_stderr_error)
51 }
52 }
53
54 /// is plain mode active
55 ///
56 /// Plain mode means that all configuration variables which affect
57 /// the behavior and output of Mercurial should be
58 /// ignored. Additionally, the output should be stable,
59 /// reproducible and suitable for use in scripts or applications.
60 ///
61 /// The only way to trigger plain mode is by setting either the
62 /// `HGPLAIN' or `HGPLAINEXCEPT' environment variables.
63 ///
64 /// The return value can either be
65 /// - False if HGPLAIN is not set, or feature is in HGPLAINEXCEPT
66 /// - False if feature is disabled by default and not included in HGPLAIN
67 /// - True otherwise
68 pub fn plain(&self) -> bool {
69 // TODO: add support for HGPLAINEXCEPT
70 env::var_os("HGPLAIN").is_some()
71 }
52 }
72 }
53
73
54 /// A buffered stdout writer for faster batch printing operations.
74 /// A buffered stdout writer for faster batch printing operations.
@@ -1428,12 +1428,9 b' class RustExtension(Extension):'
1428
1428
1429 rusttargetdir = os.path.join('rust', 'target', 'release')
1429 rusttargetdir = os.path.join('rust', 'target', 'release')
1430
1430
1431 def __init__(
1431 def __init__(self, mpath, sources, rustlibname, subcrate, **kw):
1432 self, mpath, sources, rustlibname, subcrate, py3_features=None, **kw
1433 ):
1434 Extension.__init__(self, mpath, sources, **kw)
1432 Extension.__init__(self, mpath, sources, **kw)
1435 srcdir = self.rustsrcdir = os.path.join('rust', subcrate)
1433 srcdir = self.rustsrcdir = os.path.join('rust', subcrate)
1436 self.py3_features = py3_features
1437
1434
1438 # adding Rust source and control files to depends so that the extension
1435 # adding Rust source and control files to depends so that the extension
1439 # gets rebuilt if they've changed
1436 # gets rebuilt if they've changed
@@ -1481,9 +1478,11 b' class RustExtension(Extension):'
1481
1478
1482 feature_flags = []
1479 feature_flags = []
1483
1480
1484 if sys.version_info[0] == 3 and self.py3_features is not None:
1481 cargocmd.append('--no-default-features')
1485 feature_flags.append(self.py3_features)
1482 if sys.version_info[0] == 2:
1486 cargocmd.append('--no-default-features')
1483 feature_flags.append('python27')
1484 elif sys.version_info[0] == 3:
1485 feature_flags.append('python3')
1487
1486
1488 rust_features = env.get("HG_RUST_FEATURES")
1487 rust_features = env.get("HG_RUST_FEATURES")
1489 if rust_features:
1488 if rust_features:
@@ -1605,7 +1604,9 b' extmodules = ['
1605 extra_compile_args=common_cflags,
1604 extra_compile_args=common_cflags,
1606 ),
1605 ),
1607 RustStandaloneExtension(
1606 RustStandaloneExtension(
1608 'mercurial.rustext', 'hg-cpython', 'librusthg', py3_features='python3'
1607 'mercurial.rustext',
1608 'hg-cpython',
1609 'librusthg',
1609 ),
1610 ),
1610 ]
1611 ]
1611
1612
@@ -4,6 +4,7 b' from __future__ import absolute_import'
4
4
5 from mercurial import (
5 from mercurial import (
6 error,
6 error,
7 logcmdutil,
7 patch,
8 patch,
8 pycompat,
9 pycompat,
9 registrar,
10 registrar,
@@ -49,7 +50,7 b' def autodiff(ui, repo, *pats, **opts):'
49 else:
50 else:
50 raise error.Abort(b'--git must be yes, no or auto')
51 raise error.Abort(b'--git must be yes, no or auto')
51
52
52 ctx1, ctx2 = scmutil.revpair(repo, [])
53 ctx1, ctx2 = logcmdutil.revpair(repo, [])
53 m = scmutil.match(ctx2, pats, opts)
54 m = scmutil.match(ctx2, pats, opts)
54 it = patch.diff(
55 it = patch.diff(
55 repo,
56 repo,
@@ -15,6 +15,7 b' from mercurial import ('
15 policy,
15 policy,
16 registrar,
16 registrar,
17 )
17 )
18 from mercurial.dirstateutils import timestamp
18 from mercurial.utils import dateutil
19 from mercurial.utils import dateutil
19
20
20 try:
21 try:
@@ -34,15 +35,14 b' configitem('
34 )
35 )
35
36
36 parsers = policy.importmod('parsers')
37 parsers = policy.importmod('parsers')
37 rustmod = policy.importrust('parsers')
38 has_rust_dirstate = policy.importrust('dirstate') is not None
38
39
39
40
40 def pack_dirstate(fakenow, orig, dmap, copymap, pl, now):
41 def pack_dirstate(fakenow, orig, dmap, copymap, pl, now):
41 # execute what original parsers.pack_dirstate should do actually
42 # execute what original parsers.pack_dirstate should do actually
42 # for consistency
43 # for consistency
43 actualnow = int(now)
44 for f, e in dmap.items():
44 for f, e in dmap.items():
45 if e.need_delay(actualnow):
45 if e.need_delay(now):
46 e.set_possibly_dirty()
46 e.set_possibly_dirty()
47
47
48 return orig(dmap, copymap, pl, fakenow)
48 return orig(dmap, copymap, pl, fakenow)
@@ -62,8 +62,9 b' def fakewrite(ui, func):'
62 # parsing 'fakenow' in YYYYmmddHHMM format makes comparison between
62 # parsing 'fakenow' in YYYYmmddHHMM format makes comparison between
63 # 'fakenow' value and 'touch -t YYYYmmddHHMM' argument easy
63 # 'fakenow' value and 'touch -t YYYYmmddHHMM' argument easy
64 fakenow = dateutil.parsedate(fakenow, [b'%Y%m%d%H%M'])[0]
64 fakenow = dateutil.parsedate(fakenow, [b'%Y%m%d%H%M'])[0]
65 fakenow = timestamp.timestamp((fakenow, 0))
65
66
66 if rustmod is not None:
67 if has_rust_dirstate:
67 # The Rust implementation does not use public parse/pack dirstate
68 # The Rust implementation does not use public parse/pack dirstate
68 # to prevent conversion round-trips
69 # to prevent conversion round-trips
69 orig_dirstatemap_write = dirstatemapmod.dirstatemap.write
70 orig_dirstatemap_write = dirstatemapmod.dirstatemap.write
@@ -85,7 +86,7 b' def fakewrite(ui, func):'
85 finally:
86 finally:
86 orig_module.pack_dirstate = orig_pack_dirstate
87 orig_module.pack_dirstate = orig_pack_dirstate
87 dirstate._getfsnow = orig_dirstate_getfsnow
88 dirstate._getfsnow = orig_dirstate_getfsnow
88 if rustmod is not None:
89 if has_rust_dirstate:
89 dirstatemapmod.dirstatemap.write = orig_dirstatemap_write
90 dirstatemapmod.dirstatemap.write = orig_dirstatemap_write
90
91
91
92
@@ -14,8 +14,6 b' setupcommon() {'
14 cat >> $HGRCPATH << EOF
14 cat >> $HGRCPATH << EOF
15 [extensions]
15 [extensions]
16 infinitepush=
16 infinitepush=
17 [ui]
18 ssh = "$PYTHON" "$TESTDIR/dummyssh"
19 [infinitepush]
17 [infinitepush]
20 branchpattern=re:scratch/.*
18 branchpattern=re:scratch/.*
21 EOF
19 EOF
@@ -1,8 +1,6 b''
1 cat >> $HGRCPATH <<EOF
1 cat >> $HGRCPATH <<EOF
2 [extensions]
2 [extensions]
3 narrow=
3 narrow=
4 [ui]
5 ssh="$PYTHON" "$RUNTESTDIR/dummyssh"
6 [experimental]
4 [experimental]
7 changegroup3 = True
5 changegroup3 = True
8 EOF
6 EOF
@@ -7,8 +7,6 b' debug=True'
7 remotefilelog=
7 remotefilelog=
8 rebase=
8 rebase=
9 strip=
9 strip=
10 [ui]
11 ssh="$PYTHON" "$TESTDIR/dummyssh"
12 [server]
10 [server]
13 preferuncompressed=True
11 preferuncompressed=True
14 [experimental]
12 [experimental]
@@ -1554,6 +1554,8 b' class Test(unittest.TestCase):'
1554 hgrc.write(b'merge = internal:merge\n')
1554 hgrc.write(b'merge = internal:merge\n')
1555 hgrc.write(b'mergemarkers = detailed\n')
1555 hgrc.write(b'mergemarkers = detailed\n')
1556 hgrc.write(b'promptecho = True\n')
1556 hgrc.write(b'promptecho = True\n')
1557 dummyssh = os.path.join(self._testdir, b'dummyssh')
1558 hgrc.write(b'ssh = "%s" "%s"\n' % (PYTHON, dummyssh))
1557 hgrc.write(b'timeout.warn=15\n')
1559 hgrc.write(b'timeout.warn=15\n')
1558 hgrc.write(b'[chgserver]\n')
1560 hgrc.write(b'[chgserver]\n')
1559 hgrc.write(b'idletimeout=60\n')
1561 hgrc.write(b'idletimeout=60\n')
@@ -665,20 +665,24 b' def issimplestorefile(f, kind, st):'
665
665
666
666
667 class simplestore(store.encodedstore):
667 class simplestore(store.encodedstore):
668 def datafiles(self):
668 def datafiles(self, undecodable=None):
669 for x in super(simplestore, self).datafiles():
669 for x in super(simplestore, self).datafiles():
670 yield x
670 yield x
671
671
672 # Supplement with non-revlog files.
672 # Supplement with non-revlog files.
673 extrafiles = self._walk('data', True, filefilter=issimplestorefile)
673 extrafiles = self._walk('data', True, filefilter=issimplestorefile)
674
674
675 for unencoded, encoded, size in extrafiles:
675 for f1, size in extrafiles:
676 try:
676 try:
677 unencoded = store.decodefilename(unencoded)
677 f2 = store.decodefilename(f1)
678 except KeyError:
678 except KeyError:
679 unencoded = None
679 if undecodable is None:
680 raise error.StorageError(b'undecodable revlog name %s' % f1)
681 else:
682 undecodable.append(f1)
683 continue
680
684
681 yield unencoded, encoded, size
685 yield f2, size
682
686
683
687
684 def reposetup(ui, repo):
688 def reposetup(ui, repo):
@@ -131,13 +131,13 b' should all fail'
131
131
132 $ hg addremove -s foo
132 $ hg addremove -s foo
133 abort: similarity must be a number
133 abort: similarity must be a number
134 [255]
134 [10]
135 $ hg addremove -s -1
135 $ hg addremove -s -1
136 abort: similarity must be between 0 and 100
136 abort: similarity must be between 0 and 100
137 [255]
137 [10]
138 $ hg addremove -s 1e6
138 $ hg addremove -s 1e6
139 abort: similarity must be between 0 and 100
139 abort: similarity must be between 0 and 100
140 [255]
140 [10]
141
141
142 $ cd ..
142 $ cd ..
143
143
@@ -455,7 +455,7 b' missing file'
455
455
456 $ hg ann nosuchfile
456 $ hg ann nosuchfile
457 abort: nosuchfile: no such file in rev e9e6b4fa872f
457 abort: nosuchfile: no such file in rev e9e6b4fa872f
458 [255]
458 [10]
459
459
460 annotate file without '\n' on last line
460 annotate file without '\n' on last line
461
461
@@ -6,7 +6,7 b' Create a repository:'
6 devel.all-warnings=true
6 devel.all-warnings=true
7 devel.default-date=0 0
7 devel.default-date=0 0
8 extensions.fsmonitor= (fsmonitor !)
8 extensions.fsmonitor= (fsmonitor !)
9 format.exp-dirstate-v2=1 (dirstate-v2 !)
9 format.exp-rc-dirstate-v2=1 (dirstate-v2 !)
10 largefiles.usercache=$TESTTMP/.cache/largefiles
10 largefiles.usercache=$TESTTMP/.cache/largefiles
11 lfs.usercache=$TESTTMP/.cache/lfs
11 lfs.usercache=$TESTTMP/.cache/lfs
12 ui.slash=True
12 ui.slash=True
@@ -15,6 +15,7 b' Create a repository:'
15 ui.merge=internal:merge
15 ui.merge=internal:merge
16 ui.mergemarkers=detailed
16 ui.mergemarkers=detailed
17 ui.promptecho=True
17 ui.promptecho=True
18 ui.ssh=* (glob)
18 ui.timeout.warn=15
19 ui.timeout.warn=15
19 web.address=localhost
20 web.address=localhost
20 web\.ipv6=(?:True|False) (re)
21 web\.ipv6=(?:True|False) (re)
@@ -54,12 +55,13 b' Writes to stdio succeed and fail appropr'
54 On Python 3, stdio may be None:
55 On Python 3, stdio may be None:
55
56
56 $ hg debuguiprompt --config ui.interactive=true 0<&-
57 $ hg debuguiprompt --config ui.interactive=true 0<&-
57 abort: Bad file descriptor
58 abort: Bad file descriptor (no-rhg !)
59 abort: response expected (rhg !)
58 [255]
60 [255]
59 $ hg version -q 0<&-
61 $ hg version -q 0<&-
60 Mercurial Distributed SCM * (glob)
62 Mercurial Distributed SCM * (glob)
61
63
62 #if py3
64 #if py3 no-rhg
63 $ hg version -q 1>&-
65 $ hg version -q 1>&-
64 abort: Bad file descriptor
66 abort: Bad file descriptor
65 [255]
67 [255]
@@ -214,14 +214,11 b' class remotething(thing):'
214 mangle(two),
214 mangle(two),
215 ),
215 ),
216 ]
216 ]
217 encoded_res_future = wireprotov1peer.future()
217 return encoded_args, unmangle
218 yield encoded_args, encoded_res_future
219 yield unmangle(encoded_res_future.value)
220
218
221 @wireprotov1peer.batchable
219 @wireprotov1peer.batchable
222 def bar(self, b, a):
220 def bar(self, b, a):
223 encresref = wireprotov1peer.future()
221 return [
224 yield [
225 (
222 (
226 b'b',
223 b'b',
227 mangle(b),
224 mangle(b),
@@ -230,8 +227,7 b' class remotething(thing):'
230 b'a',
227 b'a',
231 mangle(a),
228 mangle(a),
232 ),
229 ),
233 ], encresref
230 ], unmangle
234 yield unmangle(encresref.value)
235
231
236 # greet is coded directly. It therefore does not support batching. If it
232 # greet is coded directly. It therefore does not support batching. If it
237 # does appear in a batch, the batch is split around greet, and the call to
233 # does appear in a batch, the batch is split around greet, and the call to
@@ -12,16 +12,6 b' The data from the bookmark file are filt'
12 node known to the changelog. If the cache invalidation between these two bits
12 node known to the changelog. If the cache invalidation between these two bits
13 goes wrong, bookmark can be dropped.
13 goes wrong, bookmark can be dropped.
14
14
15 global setup
16 ------------
17
18 $ cat >> $HGRCPATH << EOF
19 > [ui]
20 > ssh = "$PYTHON" "$TESTDIR/dummyssh"
21 > [server]
22 > concurrent-push-mode=check-related
23 > EOF
24
25 Setup
15 Setup
26 -----
16 -----
27
17
@@ -490,6 +490,65 b' divergent bookmarks'
490 Y 0:4e3505fd9583
490 Y 0:4e3505fd9583
491 Z 1:0d2164f0ce0d
491 Z 1:0d2164f0ce0d
492
492
493 mirroring bookmarks
494
495 $ hg book
496 @ 1:9b140be10808
497 @foo 2:0d2164f0ce0d
498 X 1:9b140be10808
499 X@foo 2:0d2164f0ce0d
500 Y 0:4e3505fd9583
501 Z 2:0d2164f0ce0d
502 foo -1:000000000000
503 * foobar 1:9b140be10808
504 $ cp .hg/bookmarks .hg/bookmarks.bak
505 $ hg book -d X
506 $ hg incoming --bookmark -v ../a
507 comparing with ../a
508 searching for changed bookmarks
509 @ 0d2164f0ce0d diverged
510 X 0d2164f0ce0d added
511 $ hg incoming --bookmark -v ../a --config 'paths.*:bookmarks.mode=babar'
512 (paths.*:bookmarks.mode has unknown value: "babar")
513 comparing with ../a
514 searching for changed bookmarks
515 @ 0d2164f0ce0d diverged
516 X 0d2164f0ce0d added
517 $ hg incoming --bookmark -v ../a --config 'paths.*:bookmarks.mode=mirror'
518 comparing with ../a
519 searching for changed bookmarks
520 @ 0d2164f0ce0d changed
521 @foo 000000000000 removed
522 X 0d2164f0ce0d added
523 X@foo 000000000000 removed
524 foo 000000000000 removed
525 foobar 000000000000 removed
526 $ hg incoming --bookmark -v ../a --config 'paths.*:bookmarks.mode=ignore'
527 comparing with ../a
528 bookmarks exchange disabled with this path
529 $ hg pull ../a --config 'paths.*:bookmarks.mode=ignore'
530 pulling from ../a
531 searching for changes
532 no changes found
533 $ hg book
534 @ 1:9b140be10808
535 @foo 2:0d2164f0ce0d
536 X@foo 2:0d2164f0ce0d
537 Y 0:4e3505fd9583
538 Z 2:0d2164f0ce0d
539 foo -1:000000000000
540 * foobar 1:9b140be10808
541 $ hg pull ../a --config 'paths.*:bookmarks.mode=mirror'
542 pulling from ../a
543 searching for changes
544 no changes found
545 $ hg book
546 @ 2:0d2164f0ce0d
547 X 2:0d2164f0ce0d
548 Y 0:4e3505fd9583
549 Z 2:0d2164f0ce0d
550 $ mv .hg/bookmarks.bak .hg/bookmarks
551
493 explicit pull should overwrite the local version (issue4439)
552 explicit pull should overwrite the local version (issue4439)
494
553
495 $ hg update -r X
554 $ hg update -r X
@@ -1142,8 +1201,6 b' Check hook preventing push (issue4455)'
1142 > local=../issue4455-dest/
1201 > local=../issue4455-dest/
1143 > ssh=ssh://user@dummy/issue4455-dest
1202 > ssh=ssh://user@dummy/issue4455-dest
1144 > http=http://localhost:$HGPORT/
1203 > http=http://localhost:$HGPORT/
1145 > [ui]
1146 > ssh="$PYTHON" "$TESTDIR/dummyssh"
1147 > EOF
1204 > EOF
1148 $ cat >> ../issue4455-dest/.hg/hgrc << EOF
1205 $ cat >> ../issue4455-dest/.hg/hgrc << EOF
1149 > [hooks]
1206 > [hooks]
@@ -1270,7 +1327,6 b' Test that pre-pushkey compat for bookmar'
1270
1327
1271 $ cat << EOF >> $HGRCPATH
1328 $ cat << EOF >> $HGRCPATH
1272 > [ui]
1329 > [ui]
1273 > ssh="$PYTHON" "$TESTDIR/dummyssh"
1274 > [server]
1330 > [server]
1275 > bookmarks-pushkey-compat = yes
1331 > bookmarks-pushkey-compat = yes
1276 > EOF
1332 > EOF
@@ -185,22 +185,22 b' but "literal:." is not since "." seems n'
185
185
186 $ hg log -r 'bookmark("literal:.")'
186 $ hg log -r 'bookmark("literal:.")'
187 abort: bookmark '.' does not exist
187 abort: bookmark '.' does not exist
188 [255]
188 [10]
189
189
190 "." should fail if there's no active bookmark:
190 "." should fail if there's no active bookmark:
191
191
192 $ hg bookmark --inactive
192 $ hg bookmark --inactive
193 $ hg log -r 'bookmark(.)'
193 $ hg log -r 'bookmark(.)'
194 abort: no active bookmark
194 abort: no active bookmark
195 [255]
195 [10]
196 $ hg log -r 'present(bookmark(.))'
196 $ hg log -r 'present(bookmark(.))'
197
197
198 $ hg log -r 'bookmark(unknown)'
198 $ hg log -r 'bookmark(unknown)'
199 abort: bookmark 'unknown' does not exist
199 abort: bookmark 'unknown' does not exist
200 [255]
200 [10]
201 $ hg log -r 'bookmark("literal:unknown")'
201 $ hg log -r 'bookmark("literal:unknown")'
202 abort: bookmark 'unknown' does not exist
202 abort: bookmark 'unknown' does not exist
203 [255]
203 [10]
204 $ hg log -r 'bookmark("re:unknown")'
204 $ hg log -r 'bookmark("re:unknown")'
205 $ hg log -r 'present(bookmark("literal:unknown"))'
205 $ hg log -r 'present(bookmark("literal:unknown"))'
206 $ hg log -r 'present(bookmark("re:unknown"))'
206 $ hg log -r 'present(bookmark("re:unknown"))'
@@ -147,7 +147,7 b' Changing branch of an obsoleted changese'
147 $ hg branch -r 4 foobar
147 $ hg branch -r 4 foobar
148 abort: hidden revision '4' was rewritten as: 7c1991464886
148 abort: hidden revision '4' was rewritten as: 7c1991464886
149 (use --hidden to access hidden revisions)
149 (use --hidden to access hidden revisions)
150 [255]
150 [10]
151
151
152 $ hg branch -r 4 --hidden foobar
152 $ hg branch -r 4 --hidden foobar
153 abort: cannot change branch of 3938acfb5c0f, as that creates content-divergence with 7c1991464886
153 abort: cannot change branch of 3938acfb5c0f, as that creates content-divergence with 7c1991464886
@@ -28,8 +28,6 b' enable obsolescence'
28 > evolution.createmarkers=True
28 > evolution.createmarkers=True
29 > evolution.exchange=True
29 > evolution.exchange=True
30 > bundle2-output-capture=True
30 > bundle2-output-capture=True
31 > [ui]
32 > ssh="$PYTHON" "$TESTDIR/dummyssh"
33 > [command-templates]
31 > [command-templates]
34 > log={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
32 > log={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
35 > [web]
33 > [web]
@@ -922,10 +920,6 b' Check abort from mandatory pushkey'
922
920
923 Test lazily acquiring the lock during unbundle
921 Test lazily acquiring the lock during unbundle
924 $ cp $TESTTMP/hgrc.orig $HGRCPATH
922 $ cp $TESTTMP/hgrc.orig $HGRCPATH
925 $ cat >> $HGRCPATH <<EOF
926 > [ui]
927 > ssh="$PYTHON" "$TESTDIR/dummyssh"
928 > EOF
929
923
930 $ cat >> $TESTTMP/locktester.py <<EOF
924 $ cat >> $TESTTMP/locktester.py <<EOF
931 > import os
925 > import os
@@ -233,8 +233,6 b' Create an extension to test bundle2 API'
233 > bundle2=$TESTTMP/bundle2.py
233 > bundle2=$TESTTMP/bundle2.py
234 > [experimental]
234 > [experimental]
235 > evolution.createmarkers=True
235 > evolution.createmarkers=True
236 > [ui]
237 > ssh="$PYTHON" "$TESTDIR/dummyssh"
238 > [command-templates]
236 > [command-templates]
239 > log={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
237 > log={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
240 > [web]
238 > [web]
@@ -37,7 +37,6 b''
37
37
38 $ cat >> $HGRCPATH <<EOF
38 $ cat >> $HGRCPATH <<EOF
39 > [ui]
39 > [ui]
40 > ssh = "$PYTHON" "$TESTDIR/dummyssh"
41 > username = nobody <no.reply@example.com>
40 > username = nobody <no.reply@example.com>
42 >
41 >
43 > [alias]
42 > [alias]
@@ -94,8 +94,6 b' Start a simple HTTP server to serve bund'
94 $ cat dumb.pid >> $DAEMON_PIDS
94 $ cat dumb.pid >> $DAEMON_PIDS
95
95
96 $ cat >> $HGRCPATH << EOF
96 $ cat >> $HGRCPATH << EOF
97 > [ui]
98 > ssh="$PYTHON" "$TESTDIR/dummyssh"
99 > [command-templates]
97 > [command-templates]
100 > log={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
98 > log={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
101 > EOF
99 > EOF
@@ -3,7 +3,7 b''
3 $ . "$TESTDIR/helpers-testrepo.sh"
3 $ . "$TESTDIR/helpers-testrepo.sh"
4
4
5 $ cd "$TESTDIR"/..
5 $ cd "$TESTDIR"/..
6 $ RUSTFMT=$(rustup which --toolchain nightly-2020-10-04 rustfmt)
6 $ RUSTFMT=$(rustup which --toolchain nightly-2021-11-02 rustfmt)
7 $ for f in `testrepohg files 'glob:**/*.rs'` ; do
7 $ for f in `testrepohg files 'glob:**/*.rs'` ; do
8 > $RUSTFMT --check --edition=2018 --unstable-features --color=never $f
8 > $RUSTFMT --check --edition=2018 --unstable-features --color=never $f
9 > done
9 > done
1 NO CONTENT: file renamed from tests/test-clone-uncompressed.t to tests/test-clone-stream.t
NO CONTENT: file renamed from tests/test-clone-uncompressed.t to tests/test-clone-stream.t
@@ -1125,7 +1125,7 b" Test that auto sharing doesn't cause fai"
1125 $ hg id -R remote -r 0
1125 $ hg id -R remote -r 0
1126 abort: repository remote not found
1126 abort: repository remote not found
1127 [255]
1127 [255]
1128 $ hg --config share.pool=share -q clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" a ssh://user@dummy/remote
1128 $ hg --config share.pool=share -q clone a ssh://user@dummy/remote
1129 $ hg -R remote id -r 0
1129 $ hg -R remote id -r 0
1130 acb14030fe0a
1130 acb14030fe0a
1131
1131
@@ -208,7 +208,7 b' by old clients.'
208
208
209 Feature works over SSH
209 Feature works over SSH
210
210
211 $ hg clone -U -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/server ssh-full-clone
211 $ hg clone -U ssh://user@dummy/server ssh-full-clone
212 applying clone bundle from http://localhost:$HGPORT1/full.hg
212 applying clone bundle from http://localhost:$HGPORT1/full.hg
213 adding changesets
213 adding changesets
214 adding manifests
214 adding manifests
@@ -101,7 +101,7 b' typical client does not want echo-back m'
101 000000000000 tip
101 000000000000 tip
102 *** runcommand id -runknown
102 *** runcommand id -runknown
103 abort: unknown revision 'unknown'
103 abort: unknown revision 'unknown'
104 [255]
104 [10]
105
105
106 >>> from hgclient import bprint, check, readchannel
106 >>> from hgclient import bprint, check, readchannel
107 >>> @check
107 >>> @check
@@ -218,7 +218,7 b' check that local configs for the cached '
218 devel.all-warnings=true
218 devel.all-warnings=true
219 devel.default-date=0 0
219 devel.default-date=0 0
220 extensions.fsmonitor= (fsmonitor !)
220 extensions.fsmonitor= (fsmonitor !)
221 format.exp-dirstate-v2=1 (dirstate-v2 !)
221 format.exp-rc-dirstate-v2=1 (dirstate-v2 !)
222 largefiles.usercache=$TESTTMP/.cache/largefiles
222 largefiles.usercache=$TESTTMP/.cache/largefiles
223 lfs.usercache=$TESTTMP/.cache/lfs
223 lfs.usercache=$TESTTMP/.cache/lfs
224 ui.slash=True
224 ui.slash=True
@@ -226,6 +226,7 b' check that local configs for the cached '
226 ui.detailed-exit-code=True
226 ui.detailed-exit-code=True
227 ui.merge=internal:merge
227 ui.merge=internal:merge
228 ui.mergemarkers=detailed
228 ui.mergemarkers=detailed
229 ui.ssh=* (glob)
229 ui.timeout.warn=15
230 ui.timeout.warn=15
230 ui.foo=bar
231 ui.foo=bar
231 ui.nontty=true
232 ui.nontty=true
@@ -239,6 +240,7 b' check that local configs for the cached '
239 ui.detailed-exit-code=True
240 ui.detailed-exit-code=True
240 ui.merge=internal:merge
241 ui.merge=internal:merge
241 ui.mergemarkers=detailed
242 ui.mergemarkers=detailed
243 ui.ssh=* (glob)
242 ui.timeout.warn=15
244 ui.timeout.warn=15
243 ui.nontty=true
245 ui.nontty=true
244 #endif
246 #endif
@@ -316,7 +316,7 b' Show all commands + options'
316 debugpushkey:
316 debugpushkey:
317 debugpvec:
317 debugpvec:
318 debugrebuilddirstate: rev, minimal
318 debugrebuilddirstate: rev, minimal
319 debugrebuildfncache:
319 debugrebuildfncache: only-data
320 debugrename: rev
320 debugrename: rev
321 debugrequires:
321 debugrequires:
322 debugrevlog: changelog, manifest, dir, dump
322 debugrevlog: changelog, manifest, dir, dump
@@ -413,7 +413,7 b' Listing all config options'
413
413
414 The feature is experimental and behavior may varies. This test exists to make sure the code is run. We grep it to avoid too much variability in its current experimental state.
414 The feature is experimental and behavior may varies. This test exists to make sure the code is run. We grep it to avoid too much variability in its current experimental state.
415
415
416 $ hg config --exp-all-known | grep commit
416 $ hg config --exp-all-known | grep commit | grep -v ssh
417 commands.commit.interactive.git=False
417 commands.commit.interactive.git=False
418 commands.commit.interactive.ignoreblanklines=False
418 commands.commit.interactive.ignoreblanklines=False
419 commands.commit.interactive.ignorews=False
419 commands.commit.interactive.ignorews=False
@@ -50,7 +50,7 b' Remove the directory, then try to replac'
50 $ echo a >> a
50 $ echo a >> a
51 $ commit -a -m t4.2
51 $ commit -a -m t4.2
52 $ git checkout master >/dev/null 2>/dev/null
52 $ git checkout master >/dev/null 2>/dev/null
53 $ git pull --no-commit . other > /dev/null 2>/dev/null
53 $ git pull --no-commit . other --no-rebase > /dev/null 2>/dev/null
54 $ commit -m 'Merge branch other'
54 $ commit -m 'Merge branch other'
55 $ cd ..
55 $ cd ..
56 $ hg convert --config extensions.progress= --config progress.assume-tty=1 \
56 $ hg convert --config extensions.progress= --config progress.assume-tty=1 \
@@ -137,7 +137,7 b' Remove the directory, then try to replac'
137 $ git add baz
137 $ git add baz
138 $ commit -a -m 'add baz'
138 $ commit -a -m 'add baz'
139 $ git checkout master >/dev/null 2>/dev/null
139 $ git checkout master >/dev/null 2>/dev/null
140 $ git pull --no-commit . Bar Baz > /dev/null 2>/dev/null
140 $ git pull --no-commit . Bar Baz --no-rebase > /dev/null 2>/dev/null
141 $ commit -m 'Octopus merge'
141 $ commit -m 'Octopus merge'
142 $ echo bar >> bar
142 $ echo bar >> bar
143 $ commit -a -m 'change bar'
143 $ commit -a -m 'change bar'
@@ -145,7 +145,7 b' Remove the directory, then try to replac'
145 $ echo >> foo
145 $ echo >> foo
146 $ commit -a -m 'change foo'
146 $ commit -a -m 'change foo'
147 $ git checkout master >/dev/null 2>/dev/null
147 $ git checkout master >/dev/null 2>/dev/null
148 $ git pull --no-commit -s ours . Foo > /dev/null 2>/dev/null
148 $ git pull --no-commit -s ours . Foo --no-rebase > /dev/null 2>/dev/null
149 $ commit -m 'Discard change to foo'
149 $ commit -m 'Discard change to foo'
150 $ cd ..
150 $ cd ..
151 $ glog()
151 $ glog()
@@ -644,14 +644,13 b' Test debugcapabilities command:'
644
644
645 Test debugpeer
645 Test debugpeer
646
646
647 $ hg --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" debugpeer ssh://user@dummy/debugrevlog
647 $ hg debugpeer ssh://user@dummy/debugrevlog
648 url: ssh://user@dummy/debugrevlog
648 url: ssh://user@dummy/debugrevlog
649 local: no
649 local: no
650 pushable: yes
650 pushable: yes
651
651
652 $ hg --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" --debug debugpeer ssh://user@dummy/debugrevlog
652 $ hg --debug debugpeer ssh://user@dummy/debugrevlog
653 running "*" "*/tests/dummyssh" 'user@dummy' 'hg -R debugrevlog serve --stdio' (glob) (no-windows !)
653 running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R debugrevlog serve --stdio['"] (re)
654 running "*" "*\tests/dummyssh" "user@dummy" "hg -R debugrevlog serve --stdio" (glob) (windows !)
655 devel-peer-request: hello+between
654 devel-peer-request: hello+between
656 devel-peer-request: pairs: 81 bytes
655 devel-peer-request: pairs: 81 bytes
657 sending hello command
656 sending hello command
@@ -119,7 +119,7 b' as pairs even if x == y, but not for "f('
119 +wdir
119 +wdir
120 $ hg diff -r "2 and 1"
120 $ hg diff -r "2 and 1"
121 abort: empty revision range
121 abort: empty revision range
122 [255]
122 [10]
123
123
124 $ cd ..
124 $ cd ..
125
125
@@ -42,7 +42,7 b' Testing with rev number'
42 $ hg exp 2 --config experimental.directaccess.revnums=False
42 $ hg exp 2 --config experimental.directaccess.revnums=False
43 abort: hidden revision '2' was rewritten as: 2443a0e66469
43 abort: hidden revision '2' was rewritten as: 2443a0e66469
44 (use --hidden to access hidden revisions)
44 (use --hidden to access hidden revisions)
45 [255]
45 [10]
46
46
47 $ hg exp 2
47 $ hg exp 2
48 # HG changeset patch
48 # HG changeset patch
@@ -75,7 +75,7 b' Testing with rev number'
75 $ hg status --change 2 --config experimental.directaccess.revnums=False
75 $ hg status --change 2 --config experimental.directaccess.revnums=False
76 abort: hidden revision '2' was rewritten as: 2443a0e66469
76 abort: hidden revision '2' was rewritten as: 2443a0e66469
77 (use --hidden to access hidden revisions)
77 (use --hidden to access hidden revisions)
78 [255]
78 [10]
79
79
80 $ hg diff -c 2
80 $ hg diff -c 2
81 diff -r 29becc82797a -r 28ad74487de9 c
81 diff -r 29becc82797a -r 28ad74487de9 c
@@ -197,12 +197,12 b' Commands with undefined intent should no'
197 $ hg phase -r 28ad74
197 $ hg phase -r 28ad74
198 abort: hidden revision '28ad74' was rewritten as: 2443a0e66469
198 abort: hidden revision '28ad74' was rewritten as: 2443a0e66469
199 (use --hidden to access hidden revisions)
199 (use --hidden to access hidden revisions)
200 [255]
200 [10]
201
201
202 $ hg phase -r 2
202 $ hg phase -r 2
203 abort: hidden revision '2' was rewritten as: 2443a0e66469
203 abort: hidden revision '2' was rewritten as: 2443a0e66469
204 (use --hidden to access hidden revisions)
204 (use --hidden to access hidden revisions)
205 [255]
205 [10]
206
206
207 Setting a bookmark will make that changeset unhidden, so this should come in end
207 Setting a bookmark will make that changeset unhidden, so this should come in end
208
208
@@ -13,13 +13,13 b' class dirstests(unittest.TestCase):'
13 (b'a/a/a', [b'a', b'a/a', b'']),
13 (b'a/a/a', [b'a', b'a/a', b'']),
14 (b'alpha/beta/gamma', [b'', b'alpha', b'alpha/beta']),
14 (b'alpha/beta/gamma', [b'', b'alpha', b'alpha/beta']),
15 ]:
15 ]:
16 d = pathutil.dirs({})
16 d = pathutil.dirs([])
17 d.addpath(case)
17 d.addpath(case)
18 self.assertEqual(sorted(d), sorted(want))
18 self.assertEqual(sorted(d), sorted(want))
19
19
20 def testinvalid(self):
20 def testinvalid(self):
21 with self.assertRaises(ValueError):
21 with self.assertRaises(ValueError):
22 d = pathutil.dirs({})
22 d = pathutil.dirs([])
23 d.addpath(b'a//b')
23 d.addpath(b'a//b')
24
24
25
25
@@ -1,15 +1,12 b''
1 #testcases dirstate-v1 dirstate-v1-tree dirstate-v2
1 #testcases dirstate-v1 dirstate-v2
2
3 #if dirstate-v1-tree
4 #require rust
5 $ echo '[experimental]' >> $HGRCPATH
6 $ echo 'dirstate-tree.in-memory=1' >> $HGRCPATH
7 #endif
8
2
9 #if dirstate-v2
3 #if dirstate-v2
10 #require rust
4 $ cat >> $HGRCPATH << EOF
11 $ echo '[format]' >> $HGRCPATH
5 > [format]
12 $ echo 'exp-dirstate-v2=1' >> $HGRCPATH
6 > exp-rc-dirstate-v2=1
7 > [storage]
8 > dirstate-v2.slow-path=allow
9 > EOF
13 #endif
10 #endif
14
11
15 $ hg init repo
12 $ hg init repo
@@ -1,15 +1,12 b''
1 #testcases dirstate-v1 dirstate-v1-tree dirstate-v2
1 #testcases dirstate-v1 dirstate-v2
2
3 #if dirstate-v1-tree
4 #require rust
5 $ echo '[experimental]' >> $HGRCPATH
6 $ echo 'dirstate-tree.in-memory=1' >> $HGRCPATH
7 #endif
8
2
9 #if dirstate-v2
3 #if dirstate-v2
10 #require rust
4 $ cat >> $HGRCPATH << EOF
11 $ echo '[format]' >> $HGRCPATH
5 > [format]
12 $ echo 'exp-dirstate-v2=1' >> $HGRCPATH
6 > exp-rc-dirstate-v2=1
7 > [storage]
8 > dirstate-v2.slow-path=allow
9 > EOF
13 #endif
10 #endif
14
11
15 Checking the size/permissions/file-type of files stored in the
12 Checking the size/permissions/file-type of files stored in the
@@ -1,15 +1,12 b''
1 #testcases dirstate-v1 dirstate-v1-tree dirstate-v2
1 #testcases dirstate-v1 dirstate-v2
2
3 #if dirstate-v1-tree
4 #require rust
5 $ echo '[experimental]' >> $HGRCPATH
6 $ echo 'dirstate-tree.in-memory=1' >> $HGRCPATH
7 #endif
8
2
9 #if dirstate-v2
3 #if dirstate-v2
10 #require rust
4 $ cat >> $HGRCPATH << EOF
11 $ echo '[format]' >> $HGRCPATH
5 > [format]
12 $ echo 'exp-dirstate-v2=1' >> $HGRCPATH
6 > exp-rc-dirstate-v2=1
7 > [storage]
8 > dirstate-v2.slow-path=allow
9 > EOF
13 #endif
10 #endif
14
11
15 ------ Test dirstate._dirs refcounting
12 ------ Test dirstate._dirs refcounting
@@ -59,13 +56,13 b' Prepare test repo:'
59
56
60 Set mtime of a into the future:
57 Set mtime of a into the future:
61
58
62 $ touch -t 202101011200 a
59 $ touch -t 203101011200 a
63
60
64 Status must not set a's entry to unset (issue1790):
61 Status must not set a's entry to unset (issue1790):
65
62
66 $ hg status
63 $ hg status
67 $ hg debugstate
64 $ hg debugstate
68 n 644 2 2021-01-01 12:00:00 a
65 n 644 2 2031-01-01 12:00:00 a
69
66
70 Test modulo storage/comparison of absurd dates:
67 Test modulo storage/comparison of absurd dates:
71
68
@@ -370,7 +370,7 b' Catch exporting unknown revisions (espec'
370 [10]
370 [10]
371 $ hg export 999
371 $ hg export 999
372 abort: unknown revision '999'
372 abort: unknown revision '999'
373 [255]
373 [10]
374 $ hg export "not all()"
374 $ hg export "not all()"
375 abort: export requires at least one changeset
375 abort: export requires at least one changeset
376 [10]
376 [10]
@@ -87,7 +87,7 b' Specifying an empty revision should abor'
87
87
88 $ hg extdiff -p diff --patch --rev 'ancestor()' --rev 1
88 $ hg extdiff -p diff --patch --rev 'ancestor()' --rev 1
89 abort: empty revision on one side of range
89 abort: empty revision on one side of range
90 [255]
90 [10]
91
91
92 Test diff during merge:
92 Test diff during merge:
93
93
@@ -1692,6 +1692,26 b' Can load minimum version identical to cu'
1692 $ hg --config extensions.minversion=minversion3.py version 2>&1 | egrep '\(third'
1692 $ hg --config extensions.minversion=minversion3.py version 2>&1 | egrep '\(third'
1693 [1]
1693 [1]
1694
1694
1695 Don't explode on py3 with a bad version number (both str vs bytes, and not enough
1696 parts)
1697
1698 $ cat > minversion4.py << EOF
1699 > from mercurial import util
1700 > util.version = lambda: b'3.5'
1701 > minimumhgversion = '3'
1702 > EOF
1703 $ hg --config extensions.minversion=minversion4.py version -v
1704 Mercurial Distributed SCM (version 3.5)
1705 (see https://mercurial-scm.org for more information)
1706
1707 Copyright (C) 2005-* Olivia Mackall and others (glob)
1708 This is free software; see the source for copying conditions. There is NO
1709 warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
1710
1711 Enabled extensions:
1712
1713 minversion external
1714
1695 Restore HGRCPATH
1715 Restore HGRCPATH
1696
1716
1697 $ HGRCPATH=$ORGHGRCPATH
1717 $ HGRCPATH=$ORGHGRCPATH
@@ -458,7 +458,7 b' missing file'
458
458
459 $ hg ann nosuchfile
459 $ hg ann nosuchfile
460 abort: nosuchfile: no such file in rev e9e6b4fa872f
460 abort: nosuchfile: no such file in rev e9e6b4fa872f
461 [255]
461 [10]
462
462
463 annotate file without '\n' on last line
463 annotate file without '\n' on last line
464
464
@@ -1,6 +1,4 b''
1 $ cat >> $HGRCPATH << EOF
1 $ cat >> $HGRCPATH << EOF
2 > [ui]
3 > ssh = "$PYTHON" "$TESTDIR/dummyssh"
4 > [extensions]
2 > [extensions]
5 > fastannotate=
3 > fastannotate=
6 > [fastannotate]
4 > [fastannotate]
@@ -1752,3 +1752,101 b' middle of fix.'
1752 r0.whole:
1752 r0.whole:
1753 hello
1753 hello
1754
1754
1755
1756 We should execute the fixer tools as few times as possible, because they might
1757 be slow or expensive to execute. The inputs to each execution are effectively
1758 the file path, file content, and line ranges. So, we should be able to re-use
1759 results whenever those inputs are repeated. That saves a lot of work when
1760 fixing chains of commits that all have the same file revision for a path being
1761 fixed.
1762
1763 $ hg init numberofinvocations
1764 $ cd numberofinvocations
1765
1766 $ printf "bar1" > bar.log
1767 $ printf "baz1" > baz.log
1768 $ printf "foo1" > foo.log
1769 $ printf "qux1" > qux.log
1770 $ hg commit -Aqm "commit1"
1771
1772 $ printf "bar2" > bar.log
1773 $ printf "baz2" > baz.log
1774 $ printf "foo2" > foo.log
1775 $ hg commit -Aqm "commit2"
1776
1777 $ printf "bar3" > bar.log
1778 $ printf "baz3" > baz.log
1779 $ hg commit -Aqm "commit3"
1780
1781 $ printf "bar4" > bar.log
1782
1783 $ LOGFILE=$TESTTMP/log
1784 $ LOGGER=$TESTTMP/log.py
1785 $ cat >> $LOGGER <<EOF
1786 > # Appends the input file's name to the log file.
1787 > import sys
1788 > with open(r'$LOGFILE', 'a') as f:
1789 > f.write(sys.argv[1] + '\n')
1790 > sys.stdout.write(sys.stdin.read())
1791 > EOF
1792
1793 $ hg fix --working-dir -r "all()" \
1794 > --config "fix.log:command=\"$PYTHON\" \"$LOGGER\" {rootpath}" \
1795 > --config "fix.log:pattern=glob:**.log"
1796
1797 $ cat $LOGFILE | sort | uniq -c
1798 4 bar.log
1799 4 baz.log
1800 3 foo.log
1801 2 qux.log
1802
1803 $ cd ..
1804
1805 For tools that support line ranges, it's wrong to blindly re-use fixed file
1806 content for the same file revision if it appears twice with different baserevs,
1807 because the line ranges could be different. Since computing line ranges is
1808 ambiguous, this isn't a matter of correctness, but it affects the usability of
1809 this extension. It could maybe be simpler if baserevs were computed on a
1810 per-file basis to make this situation impossible to construct.
1811
1812 In the following example, we construct two subgraphs with the same file
1813 revisions, and fix different sub-subgraphs to get different baserevs and
1814 different changed line ranges. The key precondition is that revisions 1 and 4
1815 have the same file revision, and the key result is that their successors don't
1816 have the same file content, because we want to fix different areas of that same
1817 file revision's content.
1818
1819 $ hg init differentlineranges
1820 $ cd differentlineranges
1821
1822 $ printf "a\nb\n" > file.changed
1823 $ hg commit -Aqm "0 ab"
1824 $ printf "a\nx\n" > file.changed
1825 $ hg commit -Aqm "1 ax"
1826 $ hg remove file.changed
1827 $ hg commit -Aqm "2 removed"
1828 $ hg revert file.changed -r 0
1829 $ hg commit -Aqm "3 ab (reverted)"
1830 $ hg revert file.changed -r 1
1831 $ hg commit -Aqm "4 ax (reverted)"
1832
1833 $ hg manifest --debug --template "{hash}\n" -r 0; \
1834 > hg manifest --debug --template "{hash}\n" -r 3
1835 418f692145676128d2fb518b027ddbac624be76e
1836 418f692145676128d2fb518b027ddbac624be76e
1837 $ hg manifest --debug --template "{hash}\n" -r 1; \
1838 > hg manifest --debug --template "{hash}\n" -r 4
1839 09b8b3ce5a507caaa282f7262679e6d04091426c
1840 09b8b3ce5a507caaa282f7262679e6d04091426c
1841
1842 $ hg fix --working-dir -r 1+3+4
1843 3 new orphan changesets
1844
1845 $ hg cat file.changed -r "successors(1)" --hidden
1846 a
1847 X
1848 $ hg cat file.changed -r "successors(4)" --hidden
1849 A
1850 X
1851
1852 $ cd ..
@@ -1121,6 +1121,7 b' internals topic renders index of availab'
1121 censor Censor
1121 censor Censor
1122 changegroups Changegroups
1122 changegroups Changegroups
1123 config Config Registrar
1123 config Config Registrar
1124 dirstate-v2 dirstate-v2 file format
1124 extensions Extension API
1125 extensions Extension API
1125 mergestate Mergestate
1126 mergestate Mergestate
1126 requirements Repository Requirements
1127 requirements Repository Requirements
@@ -1899,6 +1900,17 b' Test section lookup'
1899 Revsets specifying bookmarks will not result in the bookmark being
1900 Revsets specifying bookmarks will not result in the bookmark being
1900 pushed.
1901 pushed.
1901
1902
1903 "bookmarks.mode"
1904 How bookmark will be dealt during the exchange. It support the following
1905 value
1906
1907 - "default": the default behavior, local and remote bookmarks are
1908 "merged" on push/pull.
1909 - "mirror": when pulling, replace local bookmarks by remote bookmarks.
1910 This is useful to replicate a repository, or as an optimization.
1911 - "ignore": ignore bookmarks during exchange. (This currently only
1912 affect pulling)
1913
1902 The following special named paths exist:
1914 The following special named paths exist:
1903
1915
1904 "default"
1916 "default"
@@ -3566,6 +3578,13 b' Sub-topic indexes rendered properly'
3566 Config Registrar
3578 Config Registrar
3567 </td></tr>
3579 </td></tr>
3568 <tr><td>
3580 <tr><td>
3581 <a href="/help/internals.dirstate-v2">
3582 dirstate-v2
3583 </a>
3584 </td><td>
3585 dirstate-v2 file format
3586 </td></tr>
3587 <tr><td>
3569 <a href="/help/internals.extensions">
3588 <a href="/help/internals.extensions">
3570 extensions
3589 extensions
3571 </a>
3590 </a>
@@ -1,15 +1,12 b''
1 #testcases dirstate-v1 dirstate-v1-tree dirstate-v2
1 #testcases dirstate-v1 dirstate-v2
2
3 #if dirstate-v1-tree
4 #require rust
5 $ echo '[experimental]' >> $HGRCPATH
6 $ echo 'dirstate-tree.in-memory=1' >> $HGRCPATH
7 #endif
8
2
9 #if dirstate-v2
3 #if dirstate-v2
10 #require rust
4 $ cat >> $HGRCPATH << EOF
11 $ echo '[format]' >> $HGRCPATH
5 > [format]
12 $ echo 'exp-dirstate-v2=1' >> $HGRCPATH
6 > exp-rc-dirstate-v2=1
7 > [storage]
8 > dirstate-v2.slow-path=allow
9 > EOF
13 #endif
10 #endif
14
11
15 $ hg init ignorerepo
12 $ hg init ignorerepo
@@ -403,9 +400,10 b' Windows paths are accepted on input'
403
400
404 #endif
401 #endif
405
402
406 #if dirstate-v2
403 #if dirstate-v2 rust
407
404
408 Check the hash of ignore patterns written in the dirstate
405 Check the hash of ignore patterns written in the dirstate
406 This is an optimization that is only relevant when using the Rust extensions
409
407
410 $ hg status > /dev/null
408 $ hg status > /dev/null
411 $ cat .hg/testhgignore .hg/testhgignorerel .hgignore dir2/.hgignore dir1/.hgignore dir1/.hgignoretwo | $TESTDIR/f --sha1
409 $ cat .hg/testhgignore .hg/testhgignorerel .hgignore dir2/.hgignore dir1/.hgignore dir1/.hgignoretwo | $TESTDIR/f --sha1
@@ -93,7 +93,7 b' Run on a revision not ancestors of the c'
93 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
93 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
94 $ hg histedit -r 4
94 $ hg histedit -r 4
95 abort: 08d98a8350f3 is not an ancestor of working directory
95 abort: 08d98a8350f3 is not an ancestor of working directory
96 [255]
96 [10]
97 $ hg up --quiet
97 $ hg up --quiet
98
98
99
99
@@ -290,7 +290,7 b' short hash. This tests issue3893.'
290 created new head
290 created new head
291 $ hg histedit -r 'heads(all())'
291 $ hg histedit -r 'heads(all())'
292 abort: The specified revisions must have exactly one common root
292 abort: The specified revisions must have exactly one common root
293 [255]
293 [10]
294
294
295 Test that trimming description using multi-byte characters
295 Test that trimming description using multi-byte characters
296 --------------------------------------------------------------------
296 --------------------------------------------------------------------
@@ -552,5 +552,5 b' warn the user on editing tagged commits'
552 do you want to continue (yN)? n
552 do you want to continue (yN)? n
553 abort: histedit cancelled
553 abort: histedit cancelled
554
554
555 [255]
555 [250]
556 $ cd ..
556 $ cd ..
@@ -160,7 +160,7 b' even prompt the user for rules, sidestep'
160 $ hg histedit e860deea161a
160 $ hg histedit e860deea161a
161 c: untracked file differs
161 c: untracked file differs
162 abort: untracked files in working directory conflict with files in 055a42cdd887
162 abort: untracked files in working directory conflict with files in 055a42cdd887
163 [255]
163 [20]
164
164
165 We should have detected the collision early enough we're not in a
165 We should have detected the collision early enough we're not in a
166 histedit state, and p1 is unchanged.
166 histedit state, and p1 is unchanged.
@@ -508,7 +508,7 b' Note that there is a few reordering in t'
508 $ hg ci -m 'modify wat'
508 $ hg ci -m 'modify wat'
509 $ hg histedit 050280826e04
509 $ hg histedit 050280826e04
510 abort: cannot edit history that contains merges
510 abort: cannot edit history that contains merges
511 [255]
511 [20]
512 $ cd ..
512 $ cd ..
513
513
514 Check abort behavior
514 Check abort behavior
@@ -134,7 +134,7 b' test to check number of roots in outgoin'
134 $ HGEDITOR=cat hg -q histedit --outgoing '../r'
134 $ HGEDITOR=cat hg -q histedit --outgoing '../r'
135 abort: there are ambiguous outgoing revisions
135 abort: there are ambiguous outgoing revisions
136 (see 'hg help histedit' for more detail)
136 (see 'hg help histedit' for more detail)
137 [255]
137 [20]
138
138
139 $ hg -q update -C 2
139 $ hg -q update -C 2
140 $ echo aa >> a
140 $ echo aa >> a
@@ -151,6 +151,6 b' test to check number of roots in outgoin'
151 $ HGEDITOR=cat hg -q histedit --outgoing '../r#default'
151 $ HGEDITOR=cat hg -q histedit --outgoing '../r#default'
152 abort: there are ambiguous outgoing revisions
152 abort: there are ambiguous outgoing revisions
153 (see 'hg help histedit' for more detail)
153 (see 'hg help histedit' for more detail)
154 [255]
154 [20]
155
155
156 $ cd ..
156 $ cd ..
@@ -9,8 +9,6 b' Setup'
9
9
10 $ . "$TESTDIR/library-infinitepush.sh"
10 $ . "$TESTDIR/library-infinitepush.sh"
11 $ cat >> $HGRCPATH <<EOF
11 $ cat >> $HGRCPATH <<EOF
12 > [ui]
13 > ssh = "$PYTHON" "$TESTDIR/dummyssh"
14 > [alias]
12 > [alias]
15 > glog = log -GT "{rev}:{node|short} {desc}\n{phase}"
13 > glog = log -GT "{rev}:{node|short} {desc}\n{phase}"
16 > EOF
14 > EOF
@@ -19,7 +19,7 b" creating 'local'"
19 store created
19 store created
20 00changelog.i created
20 00changelog.i created
21 dotencode
21 dotencode
22 exp-dirstate-v2 (dirstate-v2 !)
22 exp-rc-dirstate-v2 (dirstate-v2 !)
23 fncache
23 fncache
24 generaldelta
24 generaldelta
25 persistent-nodemap (rust !)
25 persistent-nodemap (rust !)
@@ -61,7 +61,7 b' creating repo with format.usestore=false'
61
61
62 $ hg --config format.usestore=false init old
62 $ hg --config format.usestore=false init old
63 $ checknewrepo old
63 $ checknewrepo old
64 exp-dirstate-v2 (dirstate-v2 !)
64 exp-rc-dirstate-v2 (dirstate-v2 !)
65 generaldelta
65 generaldelta
66 persistent-nodemap (rust !)
66 persistent-nodemap (rust !)
67 revlog-compression-zstd (zstd !)
67 revlog-compression-zstd (zstd !)
@@ -75,7 +75,7 b' creating repo with format.usefncache=fal'
75 $ checknewrepo old2
75 $ checknewrepo old2
76 store created
76 store created
77 00changelog.i created
77 00changelog.i created
78 exp-dirstate-v2 (dirstate-v2 !)
78 exp-rc-dirstate-v2 (dirstate-v2 !)
79 generaldelta
79 generaldelta
80 persistent-nodemap (rust !)
80 persistent-nodemap (rust !)
81 revlog-compression-zstd (zstd !)
81 revlog-compression-zstd (zstd !)
@@ -90,7 +90,7 b' creating repo with format.dotencode=fals'
90 $ checknewrepo old3
90 $ checknewrepo old3
91 store created
91 store created
92 00changelog.i created
92 00changelog.i created
93 exp-dirstate-v2 (dirstate-v2 !)
93 exp-rc-dirstate-v2 (dirstate-v2 !)
94 fncache
94 fncache
95 generaldelta
95 generaldelta
96 persistent-nodemap (rust !)
96 persistent-nodemap (rust !)
@@ -107,7 +107,7 b' creating repo with format.dotencode=fals'
107 store created
107 store created
108 00changelog.i created
108 00changelog.i created
109 dotencode
109 dotencode
110 exp-dirstate-v2 (dirstate-v2 !)
110 exp-rc-dirstate-v2 (dirstate-v2 !)
111 fncache
111 fncache
112 persistent-nodemap (rust !)
112 persistent-nodemap (rust !)
113 revlog-compression-zstd (zstd !)
113 revlog-compression-zstd (zstd !)
@@ -123,7 +123,7 b' test failure'
123
123
124 init+push to remote2
124 init+push to remote2
125
125
126 $ hg init -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote2
126 $ hg init ssh://user@dummy/remote2
127 $ hg incoming -R remote2 local
127 $ hg incoming -R remote2 local
128 comparing with local
128 comparing with local
129 changeset: 0:08b9e9f63b32
129 changeset: 0:08b9e9f63b32
@@ -133,7 +133,7 b' init+push to remote2'
133 summary: init
133 summary: init
134
134
135
135
136 $ hg push -R local -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote2
136 $ hg push -R local ssh://user@dummy/remote2
137 pushing to ssh://user@dummy/remote2
137 pushing to ssh://user@dummy/remote2
138 searching for changes
138 searching for changes
139 remote: adding changesets
139 remote: adding changesets
@@ -143,7 +143,7 b' init+push to remote2'
143
143
144 clone to remote1
144 clone to remote1
145
145
146 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" local ssh://user@dummy/remote1
146 $ hg clone local ssh://user@dummy/remote1
147 searching for changes
147 searching for changes
148 remote: adding changesets
148 remote: adding changesets
149 remote: adding manifests
149 remote: adding manifests
@@ -151,7 +151,7 b' clone to remote1'
151 remote: added 1 changesets with 1 changes to 1 files
151 remote: added 1 changesets with 1 changes to 1 files
152
152
153 The largefiles extension doesn't crash
153 The largefiles extension doesn't crash
154 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" local ssh://user@dummy/remotelf --config extensions.largefiles=
154 $ hg clone local ssh://user@dummy/remotelf --config extensions.largefiles=
155 The fsmonitor extension is incompatible with the largefiles extension and has been disabled. (fsmonitor !)
155 The fsmonitor extension is incompatible with the largefiles extension and has been disabled. (fsmonitor !)
156 The fsmonitor extension is incompatible with the largefiles extension and has been disabled. (fsmonitor !)
156 The fsmonitor extension is incompatible with the largefiles extension and has been disabled. (fsmonitor !)
157 searching for changes
157 searching for changes
@@ -162,14 +162,14 b" The largefiles extension doesn't crash"
162
162
163 init to existing repo
163 init to existing repo
164
164
165 $ hg init -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote1
165 $ hg init ssh://user@dummy/remote1
166 abort: repository remote1 already exists
166 abort: repository remote1 already exists
167 abort: could not create remote repo
167 abort: could not create remote repo
168 [255]
168 [255]
169
169
170 clone to existing repo
170 clone to existing repo
171
171
172 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" local ssh://user@dummy/remote1
172 $ hg clone local ssh://user@dummy/remote1
173 abort: repository remote1 already exists
173 abort: repository remote1 already exists
174 abort: could not create remote repo
174 abort: could not create remote repo
175 [255]
175 [255]
@@ -226,7 +226,7 b" creating 'local/sub/repo'"
226 store created
226 store created
227 00changelog.i created
227 00changelog.i created
228 dotencode
228 dotencode
229 exp-dirstate-v2 (dirstate-v2 !)
229 exp-rc-dirstate-v2 (dirstate-v2 !)
230 fncache
230 fncache
231 generaldelta
231 generaldelta
232 persistent-nodemap (rust !)
232 persistent-nodemap (rust !)
@@ -249,7 +249,7 b' init should (for consistency with clone)'
249 store created
249 store created
250 00changelog.i created
250 00changelog.i created
251 dotencode
251 dotencode
252 exp-dirstate-v2 (dirstate-v2 !)
252 exp-rc-dirstate-v2 (dirstate-v2 !)
253 fncache
253 fncache
254 generaldelta
254 generaldelta
255 persistent-nodemap (rust !)
255 persistent-nodemap (rust !)
@@ -268,7 +268,7 b' verify that clone also expand urls'
268 store created
268 store created
269 00changelog.i created
269 00changelog.i created
270 dotencode
270 dotencode
271 exp-dirstate-v2 (dirstate-v2 !)
271 exp-rc-dirstate-v2 (dirstate-v2 !)
272 fncache
272 fncache
273 generaldelta
273 generaldelta
274 persistent-nodemap (rust !)
274 persistent-nodemap (rust !)
@@ -283,7 +283,7 b' clone bookmarks'
283 $ hg -R local bookmark test
283 $ hg -R local bookmark test
284 $ hg -R local bookmarks
284 $ hg -R local bookmarks
285 * test 0:08b9e9f63b32
285 * test 0:08b9e9f63b32
286 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" local ssh://user@dummy/remote-bookmarks
286 $ hg clone local ssh://user@dummy/remote-bookmarks
287 searching for changes
287 searching for changes
288 remote: adding changesets
288 remote: adding changesets
289 remote: adding manifests
289 remote: adding manifests
@@ -185,10 +185,12 b' conditional above.'
185
185
186 $ find share_dst/.hg/largefiles/* | sort
186 $ find share_dst/.hg/largefiles/* | sort
187 share_dst/.hg/largefiles/dirstate
187 share_dst/.hg/largefiles/dirstate
188 share_dst/.hg/largefiles/undo.backup.dirstate
188
189
189 $ find src/.hg/largefiles/* | egrep "(dirstate|$hash)" | sort
190 $ find src/.hg/largefiles/* | egrep "(dirstate|$hash)" | sort
190 src/.hg/largefiles/dirstate
191 src/.hg/largefiles/dirstate
191 src/.hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
192 src/.hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
193 src/.hg/largefiles/undo.backup.dirstate
192
194
193 Verify that backwards compatibility is maintained for old storage layout
195 Verify that backwards compatibility is maintained for old storage layout
194 $ mv src/.hg/largefiles/$hash share_dst/.hg/largefiles
196 $ mv src/.hg/largefiles/$hash share_dst/.hg/largefiles
@@ -124,7 +124,7 b' used all HGPORTs, kill all daemons'
124 #endif
124 #endif
125
125
126 vanilla clients locked out from largefiles ssh repos
126 vanilla clients locked out from largefiles ssh repos
127 $ hg --config extensions.largefiles=! clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/r4 r5
127 $ hg --config extensions.largefiles=! clone ssh://user@dummy/r4 r5
128 remote:
128 remote:
129 remote: This repository uses the largefiles extension.
129 remote: This repository uses the largefiles extension.
130 remote:
130 remote:
@@ -96,7 +96,7 b' Test link+rename largefile codepath'
96 "lfconvert" adds 'largefiles' to .hg/requires.
96 "lfconvert" adds 'largefiles' to .hg/requires.
97 $ cat .hg/requires
97 $ cat .hg/requires
98 dotencode
98 dotencode
99 exp-dirstate-v2 (dirstate-v2 !)
99 exp-rc-dirstate-v2 (dirstate-v2 !)
100 fncache
100 fncache
101 generaldelta
101 generaldelta
102 largefiles
102 largefiles
@@ -290,7 +290,7 b' The requirement is added to the destinat'
290
290
291 $ cat .hg/requires
291 $ cat .hg/requires
292 dotencode
292 dotencode
293 exp-dirstate-v2 (dirstate-v2 !)
293 exp-rc-dirstate-v2 (dirstate-v2 !)
294 fncache
294 fncache
295 generaldelta
295 generaldelta
296 lfs
296 lfs
@@ -5,13 +5,13 b' Log on empty repository: checking consis'
5 $ hg log
5 $ hg log
6 $ hg log -r 1
6 $ hg log -r 1
7 abort: unknown revision '1'
7 abort: unknown revision '1'
8 [255]
8 [10]
9 $ hg log -r -1:0
9 $ hg log -r -1:0
10 abort: unknown revision '-1'
10 abort: unknown revision '-1'
11 [255]
11 [10]
12 $ hg log -r 'branch(name)'
12 $ hg log -r 'branch(name)'
13 abort: unknown revision 'name'
13 abort: unknown revision 'name'
14 [255]
14 [10]
15 $ hg log -r null -q
15 $ hg log -r null -q
16 -1:000000000000
16 -1:000000000000
17
17
@@ -1104,7 +1104,7 b' log -r <some unknown node id>'
1104
1104
1105 $ hg log -r 1000000000000000000000000000000000000000
1105 $ hg log -r 1000000000000000000000000000000000000000
1106 abort: unknown revision '1000000000000000000000000000000000000000'
1106 abort: unknown revision '1000000000000000000000000000000000000000'
1107 [255]
1107 [10]
1108
1108
1109 log -k r1
1109 log -k r1
1110
1110
@@ -2061,7 +2061,7 b' enable obsolete to test hidden feature'
2061 $ hg log -r a
2061 $ hg log -r a
2062 abort: hidden revision 'a' is pruned
2062 abort: hidden revision 'a' is pruned
2063 (use --hidden to access hidden revisions)
2063 (use --hidden to access hidden revisions)
2064 [255]
2064 [10]
2065
2065
2066 test that parent prevent a changeset to be hidden
2066 test that parent prevent a changeset to be hidden
2067
2067
@@ -2125,7 +2125,7 b' test hidden revision 0 (issue5385)'
2125 $ hg log -T'{rev}:{node}\n' -r:0
2125 $ hg log -T'{rev}:{node}\n' -r:0
2126 abort: hidden revision '0' is pruned
2126 abort: hidden revision '0' is pruned
2127 (use --hidden to access hidden revisions)
2127 (use --hidden to access hidden revisions)
2128 [255]
2128 [10]
2129 $ hg log -T'{rev}:{node}\n' -f
2129 $ hg log -T'{rev}:{node}\n' -f
2130 3:d7d28b288a6b83d5d2cf49f10c5974deed3a1d2e
2130 3:d7d28b288a6b83d5d2cf49f10c5974deed3a1d2e
2131 2:94375ec45bddd2a824535fc04855bd058c926ec0
2131 2:94375ec45bddd2a824535fc04855bd058c926ec0
@@ -2516,10 +2516,9 b' New namespace is registered per repo ins'
2516 is global. So we shouldn't expect the namespace always exists. Using
2516 is global. So we shouldn't expect the namespace always exists. Using
2517 ssh:// makes sure a bundle repository is created from scratch. (issue6301)
2517 ssh:// makes sure a bundle repository is created from scratch. (issue6301)
2518
2518
2519 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" \
2519 $ hg clone -qr0 "ssh://user@dummy/`pwd`/a" a-clone
2520 > -qr0 "ssh://user@dummy/`pwd`/a" a-clone
2521 $ hg incoming --config extensions.names=names.py -R a-clone \
2520 $ hg incoming --config extensions.names=names.py -R a-clone \
2522 > -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" -T '{bars}\n' -l1
2521 > -T '{bars}\n' -l1
2523 comparing with ssh://user@dummy/$TESTTMP/a
2522 comparing with ssh://user@dummy/$TESTTMP/a
2524 searching for changes
2523 searching for changes
2525
2524
@@ -2,8 +2,6 b' Testing the functionality to pull remote'
2 =============================================
2 =============================================
3
3
4 $ cat >> $HGRCPATH << EOF
4 $ cat >> $HGRCPATH << EOF
5 > [ui]
6 > ssh = "$PYTHON" "$TESTDIR/dummyssh"
7 > [alias]
5 > [alias]
8 > glog = log -G -T '{rev}:{node|short} {desc}'
6 > glog = log -G -T '{rev}:{node|short} {desc}'
9 > [extensions]
7 > [extensions]
@@ -482,15 +480,15 b' Testing for a literal name which does no'
482
480
483 $ hg log -r 'remotebranches(def)' -GT "{rev}:{node|short} {remotenames}\n"
481 $ hg log -r 'remotebranches(def)' -GT "{rev}:{node|short} {remotenames}\n"
484 abort: remote name 'def' does not exist
482 abort: remote name 'def' does not exist
485 [255]
483 [10]
486
484
487 $ hg log -r 'remotebookmarks("server3")' -GT "{rev}:{node|short} {remotenames}\n"
485 $ hg log -r 'remotebookmarks("server3")' -GT "{rev}:{node|short} {remotenames}\n"
488 abort: remote name 'server3' does not exist
486 abort: remote name 'server3' does not exist
489 [255]
487 [10]
490
488
491 $ hg log -r 'remotenames("server3")' -GT "{rev}:{node|short} {remotenames}\n"
489 $ hg log -r 'remotenames("server3")' -GT "{rev}:{node|short} {remotenames}\n"
492 abort: remote name 'server3' does not exist
490 abort: remote name 'server3' does not exist
493 [255]
491 [10]
494
492
495 Testing for a pattern which does not match anything, which shouldn't fail.
493 Testing for a pattern which does not match anything, which shouldn't fail.
496
494
@@ -88,7 +88,7 b' The next two calls are expected to abort'
88
88
89 $ hg manifest -r 2
89 $ hg manifest -r 2
90 abort: unknown revision '2'
90 abort: unknown revision '2'
91 [255]
91 [10]
92
92
93 $ hg manifest -r tip tip
93 $ hg manifest -r tip tip
94 abort: please specify just one revision
94 abort: please specify just one revision
@@ -55,8 +55,8 b' Re-adding foo1 and bar:'
55 adding foo1
55 adding foo1
56
56
57 $ hg debugstate --no-dates
57 $ hg debugstate --no-dates
58 n 0 -2 unset bar
58 m 0 -2 unset bar
59 n 0 -2 unset foo1
59 m 0 -2 unset foo1
60 copy: foo -> foo1
60 copy: foo -> foo1
61
61
62 $ hg st -qC
62 $ hg st -qC
@@ -74,8 +74,8 b' Reverting foo1 and bar:'
74 reverting foo1
74 reverting foo1
75
75
76 $ hg debugstate --no-dates
76 $ hg debugstate --no-dates
77 n 0 -2 unset bar
77 m 0 -2 unset bar
78 n 0 -2 unset foo1
78 m 0 -2 unset foo1
79 copy: foo -> foo1
79 copy: foo -> foo1
80
80
81 $ hg st -qC
81 $ hg st -qC
@@ -24,10 +24,6 b" some capability (because it's running an"
24 > [extensions]
24 > [extensions]
25 > disable-lookup = $TESTTMP/disable-lookup.py
25 > disable-lookup = $TESTTMP/disable-lookup.py
26 > EOF
26 > EOF
27 $ cat >> .hg/hgrc <<EOF
28 > [ui]
29 > ssh = "$PYTHON" "$TESTDIR/dummyssh"
30 > EOF
31
27
32 $ hg pull ssh://user@dummy/repo1 -r tip -B a
28 $ hg pull ssh://user@dummy/repo1 -r tip -B a
33 pulling from ssh://user@dummy/repo1
29 pulling from ssh://user@dummy/repo1
@@ -115,7 +115,7 b' Delete the same patch twice in one comma'
115
115
116 $ hg qfinish -a pc
116 $ hg qfinish -a pc
117 abort: unknown revision 'pc'
117 abort: unknown revision 'pc'
118 [255]
118 [10]
119
119
120 $ hg qpush
120 $ hg qpush
121 applying pc
121 applying pc
@@ -24,7 +24,7 b' narrow clone a file, f10'
24 $ cd narrow
24 $ cd narrow
25 $ cat .hg/requires | grep -v generaldelta
25 $ cat .hg/requires | grep -v generaldelta
26 dotencode
26 dotencode
27 exp-dirstate-v2 (dirstate-v2 !)
27 exp-rc-dirstate-v2 (dirstate-v2 !)
28 fncache
28 fncache
29 narrowhg-experimental
29 narrowhg-experimental
30 persistent-nodemap (rust !)
30 persistent-nodemap (rust !)
@@ -64,7 +64,7 b' Making sure we have the correct set of r'
64 $ cat .hg/requires
64 $ cat .hg/requires
65 dotencode (tree !)
65 dotencode (tree !)
66 dotencode (flat-fncache !)
66 dotencode (flat-fncache !)
67 exp-dirstate-v2 (dirstate-v2 !)
67 exp-rc-dirstate-v2 (dirstate-v2 !)
68 fncache (tree !)
68 fncache (tree !)
69 fncache (flat-fncache !)
69 fncache (flat-fncache !)
70 generaldelta
70 generaldelta
@@ -40,7 +40,7 b' narrow clone a file, f10'
40 $ cd narrow
40 $ cd narrow
41 $ cat .hg/requires | grep -v generaldelta
41 $ cat .hg/requires | grep -v generaldelta
42 dotencode
42 dotencode
43 exp-dirstate-v2 (dirstate-v2 !)
43 exp-rc-dirstate-v2 (dirstate-v2 !)
44 fncache
44 fncache
45 narrowhg-experimental
45 narrowhg-experimental
46 persistent-nodemap (rust !)
46 persistent-nodemap (rust !)
@@ -100,7 +100,7 b' Narrow the share and check that the main'
100 $ hg -R main files
100 $ hg -R main files
101 abort: working copy's narrowspec is stale
101 abort: working copy's narrowspec is stale
102 (run 'hg tracked --update-working-copy')
102 (run 'hg tracked --update-working-copy')
103 [255]
103 [20]
104 $ hg -R main tracked --update-working-copy
104 $ hg -R main tracked --update-working-copy
105 not deleting possibly dirty file d3/f
105 not deleting possibly dirty file d3/f
106 not deleting possibly dirty file d3/g
106 not deleting possibly dirty file d3/g
@@ -138,7 +138,7 b' Widen the share and check that the main '
138 $ hg -R main files
138 $ hg -R main files
139 abort: working copy's narrowspec is stale
139 abort: working copy's narrowspec is stale
140 (run 'hg tracked --update-working-copy')
140 (run 'hg tracked --update-working-copy')
141 [255]
141 [20]
142 $ hg -R main tracked --update-working-copy
142 $ hg -R main tracked --update-working-copy
143 # d1/f, d3/f should be back
143 # d1/f, d3/f should be back
144 $ hg -R main files
144 $ hg -R main files
@@ -189,7 +189,7 b' Make it look like a repo from before nar'
189 $ hg ci -Am test
189 $ hg ci -Am test
190 abort: working copy's narrowspec is stale
190 abort: working copy's narrowspec is stale
191 (run 'hg tracked --update-working-copy')
191 (run 'hg tracked --update-working-copy')
192 [255]
192 [20]
193 $ hg tracked --update-working-copy
193 $ hg tracked --update-working-copy
194 $ hg st
194 $ hg st
195 M d1/f
195 M d1/f
@@ -58,7 +58,7 b' XXX: we should have a flag in `hg debugs'
58
58
59 $ cat .hg/requires
59 $ cat .hg/requires
60 dotencode
60 dotencode
61 exp-dirstate-v2 (dirstate-v2 !)
61 exp-rc-dirstate-v2 (dirstate-v2 !)
62 fncache
62 fncache
63 generaldelta
63 generaldelta
64 narrowhg-experimental
64 narrowhg-experimental
@@ -54,7 +54,7 b' Actual test'
54 $ hg update 471f378eab4c
54 $ hg update 471f378eab4c
55 abort: hidden revision '471f378eab4c' was rewritten as: 4ae3a4151de9
55 abort: hidden revision '471f378eab4c' was rewritten as: 4ae3a4151de9
56 (use --hidden to access hidden revisions)
56 (use --hidden to access hidden revisions)
57 [255]
57 [10]
58 $ hg update --hidden "desc(A0)"
58 $ hg update --hidden "desc(A0)"
59 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
59 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
60 updated to hidden changeset 471f378eab4c
60 updated to hidden changeset 471f378eab4c
@@ -118,7 +118,7 b' Actual test'
118 $ hg up 0dec01379d3b
118 $ hg up 0dec01379d3b
119 abort: hidden revision '0dec01379d3b' is pruned
119 abort: hidden revision '0dec01379d3b' is pruned
120 (use --hidden to access hidden revisions)
120 (use --hidden to access hidden revisions)
121 [255]
121 [10]
122 $ hg up --hidden -r 'desc(B0)'
122 $ hg up --hidden -r 'desc(B0)'
123 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
123 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
124 updated to hidden changeset 0dec01379d3b
124 updated to hidden changeset 0dec01379d3b
@@ -196,7 +196,7 b' Actual test'
196 $ hg update 471597cad322
196 $ hg update 471597cad322
197 abort: hidden revision '471597cad322' was split as: 337fec4d2edc, f257fde29c7a
197 abort: hidden revision '471597cad322' was split as: 337fec4d2edc, f257fde29c7a
198 (use --hidden to access hidden revisions)
198 (use --hidden to access hidden revisions)
199 [255]
199 [10]
200 $ hg update --hidden 'min(desc(A0))'
200 $ hg update --hidden 'min(desc(A0))'
201 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
201 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
202 updated to hidden changeset 471597cad322
202 updated to hidden changeset 471597cad322
@@ -296,7 +296,7 b' Actual test'
296 $ hg update de7290d8b885
296 $ hg update de7290d8b885
297 abort: hidden revision 'de7290d8b885' was split as: 337fec4d2edc, f257fde29c7a and 2 more
297 abort: hidden revision 'de7290d8b885' was split as: 337fec4d2edc, f257fde29c7a and 2 more
298 (use --hidden to access hidden revisions)
298 (use --hidden to access hidden revisions)
299 [255]
299 [10]
300 $ hg update --hidden 'min(desc(A0))'
300 $ hg update --hidden 'min(desc(A0))'
301 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
301 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
302 updated to hidden changeset de7290d8b885
302 updated to hidden changeset de7290d8b885
@@ -377,7 +377,7 b' Test setup'
377 $ hg update 471f378eab4c
377 $ hg update 471f378eab4c
378 abort: hidden revision '471f378eab4c' was rewritten as: eb5a0daa2192
378 abort: hidden revision '471f378eab4c' was rewritten as: eb5a0daa2192
379 (use --hidden to access hidden revisions)
379 (use --hidden to access hidden revisions)
380 [255]
380 [10]
381 $ hg update --hidden 'desc(A0)'
381 $ hg update --hidden 'desc(A0)'
382 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
382 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
383 updated to hidden changeset 471f378eab4c
383 updated to hidden changeset 471f378eab4c
@@ -385,7 +385,7 b' Test setup'
385 $ hg update 0dec01379d3b
385 $ hg update 0dec01379d3b
386 abort: hidden revision '0dec01379d3b' was rewritten as: eb5a0daa2192
386 abort: hidden revision '0dec01379d3b' was rewritten as: eb5a0daa2192
387 (use --hidden to access hidden revisions)
387 (use --hidden to access hidden revisions)
388 [255]
388 [10]
389 $ hg update --hidden 'desc(B0)'
389 $ hg update --hidden 'desc(B0)'
390 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
390 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
391 updated to hidden changeset 0dec01379d3b
391 updated to hidden changeset 0dec01379d3b
@@ -460,7 +460,7 b' Actual test'
460 $ hg update 471f378eab4c
460 $ hg update 471f378eab4c
461 abort: hidden revision '471f378eab4c' has diverged
461 abort: hidden revision '471f378eab4c' has diverged
462 (use --hidden to access hidden revisions)
462 (use --hidden to access hidden revisions)
463 [255]
463 [10]
464 $ hg update --hidden 'desc(A0)'
464 $ hg update --hidden 'desc(A0)'
465 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
465 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
466 updated to hidden changeset 471f378eab4c
466 updated to hidden changeset 471f378eab4c
@@ -557,7 +557,7 b' Test setup'
557 $ hg update 471f378eab4c
557 $ hg update 471f378eab4c
558 abort: hidden revision '471f378eab4c' was rewritten as: eb5a0daa2192
558 abort: hidden revision '471f378eab4c' was rewritten as: eb5a0daa2192
559 (use --hidden to access hidden revisions)
559 (use --hidden to access hidden revisions)
560 [255]
560 [10]
561 $ hg update --hidden 'desc(A0)'
561 $ hg update --hidden 'desc(A0)'
562 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
562 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
563 updated to hidden changeset 471f378eab4c
563 updated to hidden changeset 471f378eab4c
@@ -203,11 +203,11 b' check that various commands work well wi'
203 5:5601fb93a350 (draft) [tip ] add new_3_c
203 5:5601fb93a350 (draft) [tip ] add new_3_c
204 $ hg log -r 6
204 $ hg log -r 6
205 abort: unknown revision '6'
205 abort: unknown revision '6'
206 [255]
206 [10]
207 $ hg log -r 4
207 $ hg log -r 4
208 abort: hidden revision '4' was rewritten as: 5601fb93a350
208 abort: hidden revision '4' was rewritten as: 5601fb93a350
209 (use --hidden to access hidden revisions)
209 (use --hidden to access hidden revisions)
210 [255]
210 [10]
211 $ hg debugrevspec 'rev(6)'
211 $ hg debugrevspec 'rev(6)'
212 $ hg debugrevspec 'rev(4)'
212 $ hg debugrevspec 'rev(4)'
213 $ hg debugrevspec 'null'
213 $ hg debugrevspec 'null'
@@ -1544,7 +1544,7 b' bookmarks change'
1544 $ hg log -r 13bedc178fce
1544 $ hg log -r 13bedc178fce
1545 abort: hidden revision '13bedc178fce' was rewritten as: a9b1f8652753
1545 abort: hidden revision '13bedc178fce' was rewritten as: a9b1f8652753
1546 (use --hidden to access hidden revisions)
1546 (use --hidden to access hidden revisions)
1547 [255]
1547 [10]
1548
1548
1549 Empty out the test extension, as it isn't compatible with later parts
1549 Empty out the test extension, as it isn't compatible with later parts
1550 of the test.
1550 of the test.
@@ -1,17 +1,14 b''
1 #require unix-permissions no-root reporevlogstore
1 #require unix-permissions no-root reporevlogstore
2
2
3 #testcases dirstate-v1 dirstate-v1-tree dirstate-v2
3 #testcases dirstate-v1 dirstate-v2
4
5 #if dirstate-v1-tree
6 #require rust
7 $ echo '[experimental]' >> $HGRCPATH
8 $ echo 'dirstate-tree.in-memory=1' >> $HGRCPATH
9 #endif
10
4
11 #if dirstate-v2
5 #if dirstate-v2
12 #require rust
6 $ cat >> $HGRCPATH << EOF
13 $ echo '[format]' >> $HGRCPATH
7 > [format]
14 $ echo 'exp-dirstate-v2=1' >> $HGRCPATH
8 > exp-rc-dirstate-v2=1
9 > [storage]
10 > dirstate-v2.slow-path=allow
11 > EOF
15 #endif
12 #endif
16
13
17 $ hg init t
14 $ hg init t
@@ -800,7 +800,7 b' downgrading'
800 requirements
800 requirements
801 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-zstd no-dirstate-v2 !)
801 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-zstd no-dirstate-v2 !)
802 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd no-dirstate-v2 !)
802 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd no-dirstate-v2 !)
803 preserved: dotencode, exp-dirstate-v2, fncache, generaldelta, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd dirstate-v2 !)
803 preserved: dotencode, exp-rc-dirstate-v2, fncache, generaldelta, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd dirstate-v2 !)
804 removed: persistent-nodemap
804 removed: persistent-nodemap
805
805
806 processed revlogs:
806 processed revlogs:
@@ -844,7 +844,7 b' upgrading'
844 requirements
844 requirements
845 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-zstd no-dirstate-v2 !)
845 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-zstd no-dirstate-v2 !)
846 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd no-dirstate-v2 !)
846 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd no-dirstate-v2 !)
847 preserved: dotencode, exp-dirstate-v2, fncache, generaldelta, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd dirstate-v2 !)
847 preserved: dotencode, exp-rc-dirstate-v2, fncache, generaldelta, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd dirstate-v2 !)
848 added: persistent-nodemap
848 added: persistent-nodemap
849
849
850 processed revlogs:
850 processed revlogs:
@@ -876,7 +876,7 b' Running unrelated upgrade'
876 requirements
876 requirements
877 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (no-zstd no-dirstate-v2 !)
877 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (no-zstd no-dirstate-v2 !)
878 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd no-dirstate-v2 !)
878 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd no-dirstate-v2 !)
879 preserved: dotencode, exp-dirstate-v2, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd dirstate-v2 !)
879 preserved: dotencode, exp-rc-dirstate-v2, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd dirstate-v2 !)
880
880
881 optimisations: re-delta-all
881 optimisations: re-delta-all
882
882
@@ -1016,7 +1016,7 b' Simple case'
1016
1016
1017 No race condition
1017 No race condition
1018
1018
1019 $ hg clone -U --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/test-repo stream-clone --debug | egrep '00(changelog|manifest)'
1019 $ hg clone -U --stream ssh://user@dummy/test-repo stream-clone --debug | egrep '00(changelog|manifest)'
1020 adding [s] 00manifest.n (62 bytes)
1020 adding [s] 00manifest.n (62 bytes)
1021 adding [s] 00manifest-*.nd (118 KB) (glob)
1021 adding [s] 00manifest-*.nd (118 KB) (glob)
1022 adding [s] 00changelog.n (62 bytes)
1022 adding [s] 00changelog.n (62 bytes)
@@ -1081,7 +1081,7 b' Prepare a commit'
1081
1081
1082 Do a mix of clone and commit at the same time so that the file listed on disk differ at actual transfer time.
1082 Do a mix of clone and commit at the same time so that the file listed on disk differ at actual transfer time.
1083
1083
1084 $ (hg clone -U --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/test-repo stream-clone-race-1 --debug 2>> clone-output | egrep '00(changelog|manifest)' >> clone-output; touch $HG_TEST_STREAM_WALKED_FILE_3) &
1084 $ (hg clone -U --stream ssh://user@dummy/test-repo stream-clone-race-1 --debug 2>> clone-output | egrep '00(changelog|manifest)' >> clone-output; touch $HG_TEST_STREAM_WALKED_FILE_3) &
1085 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1
1085 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1
1086 $ hg -R test-repo/ commit -m foo
1086 $ hg -R test-repo/ commit -m foo
1087 $ touch $HG_TEST_STREAM_WALKED_FILE_2
1087 $ touch $HG_TEST_STREAM_WALKED_FILE_2
@@ -1178,7 +1178,7 b' Check the initial state'
1178 Performe the mix of clone and full refresh of the nodemap, so that the files
1178 Performe the mix of clone and full refresh of the nodemap, so that the files
1179 (and filenames) are different between listing time and actual transfer time.
1179 (and filenames) are different between listing time and actual transfer time.
1180
1180
1181 $ (hg clone -U --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/test-repo stream-clone-race-2 --debug 2>> clone-output-2 | egrep '00(changelog|manifest)' >> clone-output-2; touch $HG_TEST_STREAM_WALKED_FILE_3) &
1181 $ (hg clone -U --stream ssh://user@dummy/test-repo stream-clone-race-2 --debug 2>> clone-output-2 | egrep '00(changelog|manifest)' >> clone-output-2; touch $HG_TEST_STREAM_WALKED_FILE_3) &
1182 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1
1182 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1
1183 $ rm test-repo/.hg/store/00changelog.n
1183 $ rm test-repo/.hg/store/00changelog.n
1184 $ rm test-repo/.hg/store/00changelog-*.nd
1184 $ rm test-repo/.hg/store/00changelog-*.nd
@@ -884,7 +884,7 b' Check we deny its usage on older reposit'
884 $ cd no-internal-phase
884 $ cd no-internal-phase
885 $ cat .hg/requires
885 $ cat .hg/requires
886 dotencode
886 dotencode
887 exp-dirstate-v2 (dirstate-v2 !)
887 exp-rc-dirstate-v2 (dirstate-v2 !)
888 fncache
888 fncache
889 generaldelta
889 generaldelta
890 persistent-nodemap (rust !)
890 persistent-nodemap (rust !)
@@ -913,7 +913,7 b' Check it works fine with repository that'
913 $ cd internal-phase
913 $ cd internal-phase
914 $ cat .hg/requires
914 $ cat .hg/requires
915 dotencode
915 dotencode
916 exp-dirstate-v2 (dirstate-v2 !)
916 exp-rc-dirstate-v2 (dirstate-v2 !)
917 fncache
917 fncache
918 generaldelta
918 generaldelta
919 internal-phase
919 internal-phase
@@ -1,15 +1,12 b''
1 #testcases dirstate-v1 dirstate-v1-tree dirstate-v2
1 #testcases dirstate-v1 dirstate-v2
2
3 #if dirstate-v1-tree
4 #require rust
5 $ echo '[experimental]' >> $HGRCPATH
6 $ echo 'dirstate-tree.in-memory=1' >> $HGRCPATH
7 #endif
8
2
9 #if dirstate-v2
3 #if dirstate-v2
10 #require rust
4 $ cat >> $HGRCPATH << EOF
11 $ echo '[format]' >> $HGRCPATH
5 > [format]
12 $ echo 'exp-dirstate-v2=1' >> $HGRCPATH
6 > exp-rc-dirstate-v2=1
7 > [storage]
8 > dirstate-v2.slow-path=allow
9 > EOF
13 #endif
10 #endif
14
11
15 init
12 init
@@ -102,7 +102,6 b' A set of extension and shell functions e'
102
102
103 $ cat >> $HGRCPATH << EOF
103 $ cat >> $HGRCPATH << EOF
104 > [ui]
104 > [ui]
105 > ssh = "$PYTHON" "$TESTDIR/dummyssh"
106 > # simplify output
105 > # simplify output
107 > logtemplate = {node|short} {desc} ({branch})
106 > logtemplate = {node|short} {desc} ({branch})
108 > [phases]
107 > [phases]
@@ -162,7 +162,7 b' Multiple destinations cannot be used wit'
162 > A D
162 > A D
163 > EOS
163 > EOS
164 abort: unknown revision 'SRC'
164 abort: unknown revision 'SRC'
165 [255]
165 [10]
166
166
167 Rebase to null should work:
167 Rebase to null should work:
168
168
@@ -132,7 +132,7 b' These fail:'
132
132
133 $ hg rebase --dest '1 & !1'
133 $ hg rebase --dest '1 & !1'
134 abort: empty revision set
134 abort: empty revision set
135 [255]
135 [10]
136
136
137 These work:
137 These work:
138
138
@@ -17,9 +17,16 b''
17 > try:
17 > try:
18 > for file in pats:
18 > for file in pats:
19 > if opts.get('normal_lookup'):
19 > if opts.get('normal_lookup'):
20 > repo.dirstate._normallookup(file)
20 > with repo.dirstate.parentchange():
21 > repo.dirstate.update_file(
22 > file,
23 > p1_tracked=True,
24 > wc_tracked=True,
25 > possibly_dirty=True,
26 > )
21 > else:
27 > else:
22 > repo.dirstate._drop(file)
28 > repo.dirstate._map.reset_state(file)
29 > repo.dirstate._dirty = True
23 >
30 >
24 > repo.dirstate.write(repo.currenttransaction())
31 > repo.dirstate.write(repo.currenttransaction())
25 > finally:
32 > finally:
@@ -27,7 +27,7 b''
27 $ cd shallow
27 $ cd shallow
28 $ cat .hg/requires
28 $ cat .hg/requires
29 dotencode
29 dotencode
30 exp-dirstate-v2 (dirstate-v2 !)
30 exp-rc-dirstate-v2 (dirstate-v2 !)
31 exp-remotefilelog-repo-req-1
31 exp-remotefilelog-repo-req-1
32 fncache
32 fncache
33 generaldelta
33 generaldelta
@@ -71,7 +71,7 b''
71 $ cd shallow2
71 $ cd shallow2
72 $ cat .hg/requires
72 $ cat .hg/requires
73 dotencode
73 dotencode
74 exp-dirstate-v2 (dirstate-v2 !)
74 exp-rc-dirstate-v2 (dirstate-v2 !)
75 exp-remotefilelog-repo-req-1
75 exp-remotefilelog-repo-req-1
76 fncache
76 fncache
77 generaldelta
77 generaldelta
@@ -115,7 +115,7 b''
115 $ ls shallow3/.hg/store/data
115 $ ls shallow3/.hg/store/data
116 $ cat shallow3/.hg/requires
116 $ cat shallow3/.hg/requires
117 dotencode
117 dotencode
118 exp-dirstate-v2 (dirstate-v2 !)
118 exp-rc-dirstate-v2 (dirstate-v2 !)
119 exp-remotefilelog-repo-req-1
119 exp-remotefilelog-repo-req-1
120 fncache
120 fncache
121 generaldelta
121 generaldelta
@@ -24,7 +24,7 b''
24 $ cd shallow
24 $ cd shallow
25 $ cat .hg/requires
25 $ cat .hg/requires
26 dotencode
26 dotencode
27 exp-dirstate-v2 (dirstate-v2 !)
27 exp-rc-dirstate-v2 (dirstate-v2 !)
28 exp-remotefilelog-repo-req-1
28 exp-remotefilelog-repo-req-1
29 fncache
29 fncache
30 generaldelta
30 generaldelta
@@ -61,7 +61,7 b''
61 $ cd shallow2
61 $ cd shallow2
62 $ cat .hg/requires
62 $ cat .hg/requires
63 dotencode
63 dotencode
64 exp-dirstate-v2 (dirstate-v2 !)
64 exp-rc-dirstate-v2 (dirstate-v2 !)
65 exp-remotefilelog-repo-req-1
65 exp-remotefilelog-repo-req-1
66 fncache
66 fncache
67 generaldelta
67 generaldelta
@@ -113,7 +113,7 b' check its contents separately.'
113 $ ls shallow3/.hg/store/data
113 $ ls shallow3/.hg/store/data
114 $ cat shallow3/.hg/requires
114 $ cat shallow3/.hg/requires
115 dotencode
115 dotencode
116 exp-dirstate-v2 (dirstate-v2 !)
116 exp-rc-dirstate-v2 (dirstate-v2 !)
117 exp-remotefilelog-repo-req-1
117 exp-remotefilelog-repo-req-1
118 fncache
118 fncache
119 generaldelta
119 generaldelta
@@ -27,7 +27,7 b' Shallow clone from full'
27 $ cd shallow
27 $ cd shallow
28 $ cat .hg/requires
28 $ cat .hg/requires
29 dotencode
29 dotencode
30 exp-dirstate-v2 (dirstate-v2 !)
30 exp-rc-dirstate-v2 (dirstate-v2 !)
31 exp-remotefilelog-repo-req-1
31 exp-remotefilelog-repo-req-1
32 fncache
32 fncache
33 generaldelta
33 generaldelta
@@ -42,6 +42,17 b' Test single file'
42 d1/b
42 d1/b
43 A d1/d
43 A d1/d
44 d1/b
44 d1/b
45 # Should get helpful message if we try to copy or rename after commit
46 $ hg cp --forget --at-rev . d1/d
47 saved backup bundle to $TESTTMP/.hg/strip-backup/3f7c325d3f9e-46f377bb-uncopy.hg
48 $ hg cp d1/b d1/d
49 d1/d: not overwriting - file already committed
50 ('hg copy --at-rev .' to record the copy in the parent of the working copy)
51 [1]
52 $ hg mv d1/b d1/d
53 d1/d: not overwriting - file already committed
54 ('hg rename --at-rev .' to record the rename in the parent of the working copy)
55 [1]
45
56
46 Test moved file (not copied) using 'hg cp' command
57 Test moved file (not copied) using 'hg cp' command
47
58
@@ -11,7 +11,7 b' A new repository uses zlib storage, whic'
11 $ cd default
11 $ cd default
12 $ cat .hg/requires
12 $ cat .hg/requires
13 dotencode
13 dotencode
14 exp-dirstate-v2 (dirstate-v2 !)
14 exp-rc-dirstate-v2 (dirstate-v2 !)
15 fncache
15 fncache
16 generaldelta
16 generaldelta
17 persistent-nodemap (rust !)
17 persistent-nodemap (rust !)
@@ -61,7 +61,7 b' with that engine or a requirement'
61
61
62 $ cat .hg/requires
62 $ cat .hg/requires
63 dotencode
63 dotencode
64 exp-dirstate-v2 (dirstate-v2 !)
64 exp-rc-dirstate-v2 (dirstate-v2 !)
65 fncache
65 fncache
66 generaldelta
66 generaldelta
67 persistent-nodemap (rust !)
67 persistent-nodemap (rust !)
@@ -81,7 +81,7 b' with that engine or a requirement'
81 $ cd zstd
81 $ cd zstd
82 $ cat .hg/requires
82 $ cat .hg/requires
83 dotencode
83 dotencode
84 exp-dirstate-v2 (dirstate-v2 !)
84 exp-rc-dirstate-v2 (dirstate-v2 !)
85 fncache
85 fncache
86 generaldelta
86 generaldelta
87 persistent-nodemap (rust !)
87 persistent-nodemap (rust !)
@@ -186,7 +186,7 b' checking details of none compression'
186 $ cat none-compression/.hg/requires
186 $ cat none-compression/.hg/requires
187 dotencode
187 dotencode
188 exp-compression-none
188 exp-compression-none
189 exp-dirstate-v2 (dirstate-v2 !)
189 exp-rc-dirstate-v2 (dirstate-v2 !)
190 fncache
190 fncache
191 generaldelta
191 generaldelta
192 persistent-nodemap (rust !)
192 persistent-nodemap (rust !)
@@ -50,7 +50,7 b' another repository of push/pull/clone on'
50 > EOF
50 > EOF
51 $ hg -R supported debugrequirements
51 $ hg -R supported debugrequirements
52 dotencode
52 dotencode
53 exp-dirstate-v2 (dirstate-v2 !)
53 exp-rc-dirstate-v2 (dirstate-v2 !)
54 featuresetup-test
54 featuresetup-test
55 fncache
55 fncache
56 generaldelta
56 generaldelta
@@ -22,7 +22,7 b' Can create and open repo with revlog v2 '
22 $ cd new-repo
22 $ cd new-repo
23 $ cat .hg/requires
23 $ cat .hg/requires
24 dotencode
24 dotencode
25 exp-dirstate-v2 (dirstate-v2 !)
25 exp-rc-dirstate-v2 (dirstate-v2 !)
26 exp-revlogv2.2
26 exp-revlogv2.2
27 fncache
27 fncache
28 generaldelta
28 generaldelta
@@ -96,10 +96,10 b' Test label with quote in them.'
96 2:fb616635b18f Added tag rev(0) for changeset 43114e71eddd ["foo"]
96 2:fb616635b18f Added tag rev(0) for changeset 43114e71eddd ["foo"]
97 $ hg log -r '("foo")'
97 $ hg log -r '("foo")'
98 abort: unknown revision 'foo'
98 abort: unknown revision 'foo'
99 [255]
99 [10]
100 $ hg log -r 'revset("foo")'
100 $ hg log -r 'revset("foo")'
101 abort: unknown revision 'foo'
101 abort: unknown revision 'foo'
102 [255]
102 [10]
103 $ hg log -r '("\"foo\"")'
103 $ hg log -r '("\"foo\"")'
104 2:fb616635b18f Added tag rev(0) for changeset 43114e71eddd ["foo"]
104 2:fb616635b18f Added tag rev(0) for changeset 43114e71eddd ["foo"]
105 $ hg log -r 'revset("\"foo\"")'
105 $ hg log -r 'revset("\"foo\"")'
@@ -126,10 +126,10 b' Test label with + in them.'
126 4:bbf52b87b370 Added tag foo-bar for changeset a50aae922707 [foo+bar]
126 4:bbf52b87b370 Added tag foo-bar for changeset a50aae922707 [foo+bar]
127 $ hg log -r '(foo+bar)'
127 $ hg log -r '(foo+bar)'
128 abort: unknown revision 'foo'
128 abort: unknown revision 'foo'
129 [255]
129 [10]
130 $ hg log -r 'revset(foo+bar)'
130 $ hg log -r 'revset(foo+bar)'
131 abort: unknown revision 'foo'
131 abort: unknown revision 'foo'
132 [255]
132 [10]
133 $ hg log -r '"foo+bar"'
133 $ hg log -r '"foo+bar"'
134 4:bbf52b87b370 Added tag foo-bar for changeset a50aae922707 [foo+bar]
134 4:bbf52b87b370 Added tag foo-bar for changeset a50aae922707 [foo+bar]
135 $ hg log -r '("foo+bar")'
135 $ hg log -r '("foo+bar")'
@@ -407,7 +407,7 b' quoting needed'
407 [10]
407 [10]
408 $ log 'date'
408 $ log 'date'
409 abort: unknown revision 'date'
409 abort: unknown revision 'date'
410 [255]
410 [10]
411 $ log 'date('
411 $ log 'date('
412 hg: parse error at 5: not a prefix: end
412 hg: parse error at 5: not a prefix: end
413 (date(
413 (date(
@@ -421,10 +421,10 b' quoting needed'
421 [10]
421 [10]
422 $ log '0:date'
422 $ log '0:date'
423 abort: unknown revision 'date'
423 abort: unknown revision 'date'
424 [255]
424 [10]
425 $ log '::"date"'
425 $ log '::"date"'
426 abort: unknown revision 'date'
426 abort: unknown revision 'date'
427 [255]
427 [10]
428 $ hg book date -r 4
428 $ hg book date -r 4
429 $ log '0:date'
429 $ log '0:date'
430 0
430 0
@@ -3067,7 +3067,7 b" abort if the revset doesn't expect given"
3067 0
3067 0
3068 $ log 'expectsize(0:1, 1)'
3068 $ log 'expectsize(0:1, 1)'
3069 abort: revset size mismatch. expected 1, got 2
3069 abort: revset size mismatch. expected 1, got 2
3070 [255]
3070 [10]
3071 $ log 'expectsize(0:4, -1)'
3071 $ log 'expectsize(0:4, -1)'
3072 hg: parse error: negative size
3072 hg: parse error: negative size
3073 [10]
3073 [10]
@@ -3077,7 +3077,7 b" abort if the revset doesn't expect given"
3077 2
3077 2
3078 $ log 'expectsize(0:1, 3:5)'
3078 $ log 'expectsize(0:1, 3:5)'
3079 abort: revset size mismatch. expected between 3 and 5, got 2
3079 abort: revset size mismatch. expected between 3 and 5, got 2
3080 [255]
3080 [10]
3081 $ log 'expectsize(0:1, -1:2)'
3081 $ log 'expectsize(0:1, -1:2)'
3082 hg: parse error: negative size
3082 hg: parse error: negative size
3083 [10]
3083 [10]
@@ -3104,10 +3104,10 b" abort if the revset doesn't expect given"
3104 2
3104 2
3105 $ log 'expectsize(0:2, 4:)'
3105 $ log 'expectsize(0:2, 4:)'
3106 abort: revset size mismatch. expected between 4 and 11, got 3
3106 abort: revset size mismatch. expected between 4 and 11, got 3
3107 [255]
3107 [10]
3108 $ log 'expectsize(0:2, :2)'
3108 $ log 'expectsize(0:2, :2)'
3109 abort: revset size mismatch. expected between 0 and 2, got 3
3109 abort: revset size mismatch. expected between 0 and 2, got 3
3110 [255]
3110 [10]
3111
3111
3112 Test getting list of node from file
3112 Test getting list of node from file
3113
3113
@@ -320,7 +320,7 b' test unknown revision in `_list`'
320
320
321 $ log '0|unknown'
321 $ log '0|unknown'
322 abort: unknown revision 'unknown'
322 abort: unknown revision 'unknown'
323 [255]
323 [10]
324
324
325 test integer range in `_list`
325 test integer range in `_list`
326
326
@@ -330,11 +330,11 b' test integer range in `_list`'
330
330
331 $ log '-10|-11'
331 $ log '-10|-11'
332 abort: unknown revision '-11'
332 abort: unknown revision '-11'
333 [255]
333 [10]
334
334
335 $ log '9|10'
335 $ log '9|10'
336 abort: unknown revision '10'
336 abort: unknown revision '10'
337 [255]
337 [10]
338
338
339 test '0000' != '0' in `_list`
339 test '0000' != '0' in `_list`
340
340
@@ -590,7 +590,7 b' we can use patterns when searching for t'
590
590
591 $ log 'tag("1..*")'
591 $ log 'tag("1..*")'
592 abort: tag '1..*' does not exist
592 abort: tag '1..*' does not exist
593 [255]
593 [10]
594 $ log 'tag("re:1..*")'
594 $ log 'tag("re:1..*")'
595 6
595 6
596 $ log 'tag("re:[0-9].[0-9]")'
596 $ log 'tag("re:[0-9].[0-9]")'
@@ -601,16 +601,16 b' we can use patterns when searching for t'
601
601
602 $ log 'tag(unknown)'
602 $ log 'tag(unknown)'
603 abort: tag 'unknown' does not exist
603 abort: tag 'unknown' does not exist
604 [255]
604 [10]
605 $ log 'tag("re:unknown")'
605 $ log 'tag("re:unknown")'
606 $ log 'present(tag("unknown"))'
606 $ log 'present(tag("unknown"))'
607 $ log 'present(tag("re:unknown"))'
607 $ log 'present(tag("re:unknown"))'
608 $ log 'branch(unknown)'
608 $ log 'branch(unknown)'
609 abort: unknown revision 'unknown'
609 abort: unknown revision 'unknown'
610 [255]
610 [10]
611 $ log 'branch("literal:unknown")'
611 $ log 'branch("literal:unknown")'
612 abort: branch 'unknown' does not exist
612 abort: branch 'unknown' does not exist
613 [255]
613 [10]
614 $ log 'branch("re:unknown")'
614 $ log 'branch("re:unknown")'
615 $ log 'present(branch("unknown"))'
615 $ log 'present(branch("unknown"))'
616 $ log 'present(branch("re:unknown"))'
616 $ log 'present(branch("re:unknown"))'
@@ -666,7 +666,7 b' matching() should preserve the order of '
666
666
667 $ log 'named("unknown")'
667 $ log 'named("unknown")'
668 abort: namespace 'unknown' does not exist
668 abort: namespace 'unknown' does not exist
669 [255]
669 [10]
670 $ log 'named("re:unknown")'
670 $ log 'named("re:unknown")'
671 $ log 'present(named("unknown"))'
671 $ log 'present(named("unknown"))'
672 $ log 'present(named("re:unknown"))'
672 $ log 'present(named("re:unknown"))'
@@ -759,7 +759,7 b' parentrevspec'
759
759
760 $ log 'branchpoint()~-1'
760 $ log 'branchpoint()~-1'
761 abort: revision in set has more than one child
761 abort: revision in set has more than one child
762 [255]
762 [10]
763
763
764 Bogus function gets suggestions
764 Bogus function gets suggestions
765 $ log 'add()'
765 $ log 'add()'
@@ -840,7 +840,7 b' test usage in revpair (with "+")'
840
840
841 $ hg diff -r 'author("babar") or author("celeste")'
841 $ hg diff -r 'author("babar") or author("celeste")'
842 abort: empty revision range
842 abort: empty revision range
843 [255]
843 [10]
844
844
845 aliases:
845 aliases:
846
846
@@ -121,11 +121,16 b' Specifying revisions by changeset ID'
121 file-3
121 file-3
122 $ $NO_FALLBACK rhg cat -r cf8b83 file-2
122 $ $NO_FALLBACK rhg cat -r cf8b83 file-2
123 2
123 2
124 $ $NO_FALLBACK rhg cat --rev cf8b83 file-2
125 2
124 $ $NO_FALLBACK rhg cat -r c file-2
126 $ $NO_FALLBACK rhg cat -r c file-2
125 abort: ambiguous revision identifier: c
127 abort: ambiguous revision identifier: c
126 [255]
128 [255]
127 $ $NO_FALLBACK rhg cat -r d file-2
129 $ $NO_FALLBACK rhg cat -r d file-2
128 2
130 2
131 $ $NO_FALLBACK rhg cat -r 0000 file-2
132 file-2: no such file in rev 000000000000
133 [1]
129
134
130 Cat files
135 Cat files
131 $ cd $TESTTMP
136 $ cd $TESTTMP
@@ -135,42 +140,102 b' Cat files'
135 $ echo "original content" > original
140 $ echo "original content" > original
136 $ hg add original
141 $ hg add original
137 $ hg commit -m "add original" original
142 $ hg commit -m "add original" original
143 Without `--rev`
144 $ $NO_FALLBACK rhg cat original
145 original content
146 With `--rev`
138 $ $NO_FALLBACK rhg cat -r 0 original
147 $ $NO_FALLBACK rhg cat -r 0 original
139 original content
148 original content
140 Cat copied file should not display copy metadata
149 Cat copied file should not display copy metadata
141 $ hg copy original copy_of_original
150 $ hg copy original copy_of_original
142 $ hg commit -m "add copy of original"
151 $ hg commit -m "add copy of original"
152 $ $NO_FALLBACK rhg cat original
153 original content
143 $ $NO_FALLBACK rhg cat -r 1 copy_of_original
154 $ $NO_FALLBACK rhg cat -r 1 copy_of_original
144 original content
155 original content
145
156
157
146 Fallback to Python
158 Fallback to Python
147 $ $NO_FALLBACK rhg cat original
159 $ $NO_FALLBACK rhg cat original --exclude="*.rs"
148 unsupported feature: `rhg cat` without `--rev` / `-r`
160 unsupported feature: error: Found argument '--exclude' which wasn't expected, or isn't valid in this context
161
162 USAGE:
163 rhg cat [OPTIONS] <FILE>...
164
165 For more information try --help
166
149 [252]
167 [252]
150 $ rhg cat original
168 $ rhg cat original --exclude="*.rs"
151 original content
169 original content
152
170
153 $ FALLBACK_EXE="$RHG_FALLBACK_EXECUTABLE"
171 $ FALLBACK_EXE="$RHG_FALLBACK_EXECUTABLE"
154 $ unset RHG_FALLBACK_EXECUTABLE
172 $ unset RHG_FALLBACK_EXECUTABLE
155 $ rhg cat original
173 $ rhg cat original --exclude="*.rs"
156 abort: 'rhg.on-unsupported=fallback' without 'rhg.fallback-executable' set.
174 abort: 'rhg.on-unsupported=fallback' without 'rhg.fallback-executable' set.
157 [255]
175 [255]
158 $ RHG_FALLBACK_EXECUTABLE="$FALLBACK_EXE"
176 $ RHG_FALLBACK_EXECUTABLE="$FALLBACK_EXE"
159 $ export RHG_FALLBACK_EXECUTABLE
177 $ export RHG_FALLBACK_EXECUTABLE
160
178
161 $ rhg cat original --config rhg.fallback-executable=false
179 $ rhg cat original --exclude="*.rs" --config rhg.fallback-executable=false
162 [1]
180 [1]
163
181
164 $ rhg cat original --config rhg.fallback-executable=hg-non-existent
182 $ rhg cat original --exclude="*.rs" --config rhg.fallback-executable=hg-non-existent
165 tried to fall back to a 'hg-non-existent' sub-process but got error $ENOENT$
183 tried to fall back to a 'hg-non-existent' sub-process but got error $ENOENT$
166 unsupported feature: `rhg cat` without `--rev` / `-r`
184 unsupported feature: error: Found argument '--exclude' which wasn't expected, or isn't valid in this context
185
186 USAGE:
187 rhg cat [OPTIONS] <FILE>...
188
189 For more information try --help
190
191 [252]
192
193 $ rhg cat original --exclude="*.rs" --config rhg.fallback-executable=rhg
194 Blocking recursive fallback. The 'rhg.fallback-executable = rhg' config points to `rhg` itself.
195 unsupported feature: error: Found argument '--exclude' which wasn't expected, or isn't valid in this context
196
197 USAGE:
198 rhg cat [OPTIONS] <FILE>...
199
200 For more information try --help
201
167 [252]
202 [252]
168
203
169 $ rhg cat original --config rhg.fallback-executable=rhg
204 Fallback with shell path segments
170 Blocking recursive fallback. The 'rhg.fallback-executable = rhg' config points to `rhg` itself.
205 $ $NO_FALLBACK rhg cat .
171 unsupported feature: `rhg cat` without `--rev` / `-r`
206 unsupported feature: `..` or `.` path segment
207 [252]
208 $ $NO_FALLBACK rhg cat ..
209 unsupported feature: `..` or `.` path segment
210 [252]
211 $ $NO_FALLBACK rhg cat ../..
212 unsupported feature: `..` or `.` path segment
213 [252]
214
215 Fallback with filesets
216 $ $NO_FALLBACK rhg cat "set:c or b"
217 unsupported feature: fileset
172 [252]
218 [252]
173
219
220 Fallback with generic hooks
221 $ $NO_FALLBACK rhg cat original --config hooks.pre-cat=something
222 unsupported feature: pre-cat hook defined
223 [252]
224
225 $ $NO_FALLBACK rhg cat original --config hooks.post-cat=something
226 unsupported feature: post-cat hook defined
227 [252]
228
229 $ $NO_FALLBACK rhg cat original --config hooks.fail-cat=something
230 unsupported feature: fail-cat hook defined
231 [252]
232
233 Fallback with [defaults]
234 $ $NO_FALLBACK rhg cat original --config "defaults.cat=-r null"
235 unsupported feature: `defaults` config set
236 [252]
237
238
174 Requirements
239 Requirements
175 $ $NO_FALLBACK rhg debugrequirements
240 $ $NO_FALLBACK rhg debugrequirements
176 dotencode
241 dotencode
@@ -307,3 +372,12 b' The blackbox extension is supported'
307 $ cat .hg/blackbox.log.1
372 $ cat .hg/blackbox.log.1
308 ????/??/?? ??:??:??.??? * @d3873e73d99ef67873dac33fbcc66268d5d2b6f4 (*)> (rust) files (glob)
373 ????/??/?? ??:??:??.??? * @d3873e73d99ef67873dac33fbcc66268d5d2b6f4 (*)> (rust) files (glob)
309
374
375 Subrepos are not supported
376
377 $ touch .hgsub
378 $ $NO_FALLBACK rhg files
379 unsupported feature: subrepos (.hgsub is present)
380 [252]
381 $ rhg files
382 a
383 $ rm .hgsub
@@ -19,7 +19,7 b' prepare source repo'
19 $ hg init source
19 $ hg init source
20 $ cd source
20 $ cd source
21 $ cat .hg/requires
21 $ cat .hg/requires
22 exp-dirstate-v2 (dirstate-v2 !)
22 exp-rc-dirstate-v2 (dirstate-v2 !)
23 share-safe
23 share-safe
24 $ cat .hg/store/requires
24 $ cat .hg/store/requires
25 dotencode
25 dotencode
@@ -30,7 +30,7 b' prepare source repo'
30 store
30 store
31 $ hg debugrequirements
31 $ hg debugrequirements
32 dotencode
32 dotencode
33 exp-dirstate-v2 (dirstate-v2 !)
33 exp-rc-dirstate-v2 (dirstate-v2 !)
34 fncache
34 fncache
35 generaldelta
35 generaldelta
36 revlogv1
36 revlogv1
@@ -54,13 +54,13 b' Create a shared repo and check the requi'
54 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
54 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
55 $ cd shared1
55 $ cd shared1
56 $ cat .hg/requires
56 $ cat .hg/requires
57 exp-dirstate-v2 (dirstate-v2 !)
57 exp-rc-dirstate-v2 (dirstate-v2 !)
58 share-safe
58 share-safe
59 shared
59 shared
60
60
61 $ hg debugrequirements -R ../source
61 $ hg debugrequirements -R ../source
62 dotencode
62 dotencode
63 exp-dirstate-v2 (dirstate-v2 !)
63 exp-rc-dirstate-v2 (dirstate-v2 !)
64 fncache
64 fncache
65 generaldelta
65 generaldelta
66 revlogv1
66 revlogv1
@@ -70,7 +70,7 b' Create a shared repo and check the requi'
70
70
71 $ hg debugrequirements
71 $ hg debugrequirements
72 dotencode
72 dotencode
73 exp-dirstate-v2 (dirstate-v2 !)
73 exp-rc-dirstate-v2 (dirstate-v2 !)
74 fncache
74 fncache
75 generaldelta
75 generaldelta
76 revlogv1
76 revlogv1
@@ -225,7 +225,7 b' Disable zstd related tests because its n'
225
225
226 requirements
226 requirements
227 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-dirstate-v2 !)
227 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-dirstate-v2 !)
228 preserved: dotencode, exp-dirstate-v2, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (dirstate-v2 !)
228 preserved: dotencode, exp-rc-dirstate-v2, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (dirstate-v2 !)
229 added: revlog-compression-zstd
229 added: revlog-compression-zstd
230
230
231 processed revlogs:
231 processed revlogs:
@@ -253,8 +253,8 b' Disable zstd related tests because its n'
253 requirements
253 requirements
254 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-zstd no-dirstate-v2 !)
254 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-zstd no-dirstate-v2 !)
255 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd no-dirstate-v2 !)
255 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd no-dirstate-v2 !)
256 preserved: dotencode, exp-dirstate-v2, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-zstd dirstate-v2 !)
256 preserved: dotencode, exp-rc-dirstate-v2, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-zstd dirstate-v2 !)
257 preserved: dotencode, exp-dirstate-v2, fncache, generaldelta, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd dirstate-v2 !)
257 preserved: dotencode, exp-rc-dirstate-v2, fncache, generaldelta, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd dirstate-v2 !)
258 added: persistent-nodemap
258 added: persistent-nodemap
259
259
260 processed revlogs:
260 processed revlogs:
@@ -327,7 +327,7 b' Test that upgrading using debugupgradere'
327 $ cd non-share-safe
327 $ cd non-share-safe
328 $ hg debugrequirements
328 $ hg debugrequirements
329 dotencode
329 dotencode
330 exp-dirstate-v2 (dirstate-v2 !)
330 exp-rc-dirstate-v2 (dirstate-v2 !)
331 fncache
331 fncache
332 generaldelta
332 generaldelta
333 revlogv1
333 revlogv1
@@ -346,7 +346,7 b' Create a share before upgrading'
346 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
346 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
347 $ hg debugrequirements -R nss-share
347 $ hg debugrequirements -R nss-share
348 dotencode
348 dotencode
349 exp-dirstate-v2 (dirstate-v2 !)
349 exp-rc-dirstate-v2 (dirstate-v2 !)
350 fncache
350 fncache
351 generaldelta
351 generaldelta
352 revlogv1
352 revlogv1
@@ -360,7 +360,7 b' Upgrade'
360 $ hg debugupgraderepo -q
360 $ hg debugupgraderepo -q
361 requirements
361 requirements
362 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-dirstate-v2 !)
362 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-dirstate-v2 !)
363 preserved: dotencode, exp-dirstate-v2, fncache, generaldelta, revlogv1, sparserevlog, store (dirstate-v2 !)
363 preserved: dotencode, exp-rc-dirstate-v2, fncache, generaldelta, revlogv1, sparserevlog, store (dirstate-v2 !)
364 added: share-safe
364 added: share-safe
365
365
366 processed revlogs:
366 processed revlogs:
@@ -373,7 +373,7 b' Upgrade'
373
373
374 requirements
374 requirements
375 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-dirstate-v2 !)
375 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-dirstate-v2 !)
376 preserved: dotencode, exp-dirstate-v2, fncache, generaldelta, revlogv1, sparserevlog, store (dirstate-v2 !)
376 preserved: dotencode, exp-rc-dirstate-v2, fncache, generaldelta, revlogv1, sparserevlog, store (dirstate-v2 !)
377 added: share-safe
377 added: share-safe
378
378
379 share-safe
379 share-safe
@@ -394,7 +394,7 b' Upgrade'
394
394
395 $ hg debugrequirements
395 $ hg debugrequirements
396 dotencode
396 dotencode
397 exp-dirstate-v2 (dirstate-v2 !)
397 exp-rc-dirstate-v2 (dirstate-v2 !)
398 fncache
398 fncache
399 generaldelta
399 generaldelta
400 revlogv1
400 revlogv1
@@ -403,7 +403,7 b' Upgrade'
403 store
403 store
404
404
405 $ cat .hg/requires
405 $ cat .hg/requires
406 exp-dirstate-v2 (dirstate-v2 !)
406 exp-rc-dirstate-v2 (dirstate-v2 !)
407 share-safe
407 share-safe
408
408
409 $ cat .hg/store/requires
409 $ cat .hg/store/requires
@@ -454,7 +454,7 b' Test that downgrading works too'
454 $ hg debugupgraderepo -q
454 $ hg debugupgraderepo -q
455 requirements
455 requirements
456 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-dirstate-v2 !)
456 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-dirstate-v2 !)
457 preserved: dotencode, exp-dirstate-v2, fncache, generaldelta, revlogv1, sparserevlog, store (dirstate-v2 !)
457 preserved: dotencode, exp-rc-dirstate-v2, fncache, generaldelta, revlogv1, sparserevlog, store (dirstate-v2 !)
458 removed: share-safe
458 removed: share-safe
459
459
460 processed revlogs:
460 processed revlogs:
@@ -467,7 +467,7 b' Test that downgrading works too'
467
467
468 requirements
468 requirements
469 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-dirstate-v2 !)
469 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-dirstate-v2 !)
470 preserved: dotencode, exp-dirstate-v2, fncache, generaldelta, revlogv1, sparserevlog, store (dirstate-v2 !)
470 preserved: dotencode, exp-rc-dirstate-v2, fncache, generaldelta, revlogv1, sparserevlog, store (dirstate-v2 !)
471 removed: share-safe
471 removed: share-safe
472
472
473 processed revlogs:
473 processed revlogs:
@@ -485,7 +485,7 b' Test that downgrading works too'
485
485
486 $ hg debugrequirements
486 $ hg debugrequirements
487 dotencode
487 dotencode
488 exp-dirstate-v2 (dirstate-v2 !)
488 exp-rc-dirstate-v2 (dirstate-v2 !)
489 fncache
489 fncache
490 generaldelta
490 generaldelta
491 revlogv1
491 revlogv1
@@ -494,7 +494,7 b' Test that downgrading works too'
494
494
495 $ cat .hg/requires
495 $ cat .hg/requires
496 dotencode
496 dotencode
497 exp-dirstate-v2 (dirstate-v2 !)
497 exp-rc-dirstate-v2 (dirstate-v2 !)
498 fncache
498 fncache
499 generaldelta
499 generaldelta
500 revlogv1
500 revlogv1
@@ -553,7 +553,7 b' Testing automatic upgrade of shares when'
553
553
554 requirements
554 requirements
555 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-dirstate-v2 !)
555 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-dirstate-v2 !)
556 preserved: dotencode, exp-dirstate-v2, fncache, generaldelta, revlogv1, sparserevlog, store (dirstate-v2 !)
556 preserved: dotencode, exp-rc-dirstate-v2, fncache, generaldelta, revlogv1, sparserevlog, store (dirstate-v2 !)
557 added: share-safe
557 added: share-safe
558
558
559 processed revlogs:
559 processed revlogs:
@@ -564,7 +564,7 b' Testing automatic upgrade of shares when'
564 repository upgraded to share safe mode, existing shares will still work in old non-safe mode. Re-share existing shares to use them in safe mode New shares will be created in safe mode.
564 repository upgraded to share safe mode, existing shares will still work in old non-safe mode. Re-share existing shares to use them in safe mode New shares will be created in safe mode.
565 $ hg debugrequirements
565 $ hg debugrequirements
566 dotencode
566 dotencode
567 exp-dirstate-v2 (dirstate-v2 !)
567 exp-rc-dirstate-v2 (dirstate-v2 !)
568 fncache
568 fncache
569 generaldelta
569 generaldelta
570 revlogv1
570 revlogv1
@@ -47,8 +47,8 b" share shouldn't have a full cache dir, o"
47 [1]
47 [1]
48 $ ls -1 .hg/wcache || true
48 $ ls -1 .hg/wcache || true
49 checkisexec (execbit !)
49 checkisexec (execbit !)
50 checklink (symlink !)
50 checklink (symlink no-rust !)
51 checklink-target (symlink !)
51 checklink-target (symlink no-rust !)
52 manifestfulltextcache (reporevlogstore !)
52 manifestfulltextcache (reporevlogstore !)
53 $ ls -1 ../repo1/.hg/cache
53 $ ls -1 ../repo1/.hg/cache
54 branch2-served
54 branch2-served
@@ -160,7 +160,7 b' hg serve shared clone'
160 Cloning a shared repo via bundle2 results in a non-shared clone
160 Cloning a shared repo via bundle2 results in a non-shared clone
161
161
162 $ cd ..
162 $ cd ..
163 $ hg clone -q --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/`pwd`/repo2 cloned-via-bundle2
163 $ hg clone -q --stream ssh://user@dummy/`pwd`/repo2 cloned-via-bundle2
164 $ cat ./cloned-via-bundle2/.hg/requires | grep "shared"
164 $ cat ./cloned-via-bundle2/.hg/requires | grep "shared"
165 [1]
165 [1]
166 $ hg id --cwd cloned-via-bundle2 -r tip
166 $ hg id --cwd cloned-via-bundle2 -r tip
@@ -2,7 +2,6 b' test sparse'
2
2
3 $ cat >> $HGRCPATH << EOF
3 $ cat >> $HGRCPATH << EOF
4 > [ui]
4 > [ui]
5 > ssh = "$PYTHON" "$RUNTESTDIR/dummyssh"
6 > username = nobody <no.reply@fb.com>
5 > username = nobody <no.reply@fb.com>
7 > [extensions]
6 > [extensions]
8 > sparse=
7 > sparse=
@@ -18,7 +18,7 b' Enable sparse profile'
18
18
19 $ cat .hg/requires
19 $ cat .hg/requires
20 dotencode
20 dotencode
21 exp-dirstate-v2 (dirstate-v2 !)
21 exp-rc-dirstate-v2 (dirstate-v2 !)
22 fncache
22 fncache
23 generaldelta
23 generaldelta
24 persistent-nodemap (rust !)
24 persistent-nodemap (rust !)
@@ -38,7 +38,7 b' Requirement for sparse added when sparse'
38
38
39 $ cat .hg/requires
39 $ cat .hg/requires
40 dotencode
40 dotencode
41 exp-dirstate-v2 (dirstate-v2 !)
41 exp-rc-dirstate-v2 (dirstate-v2 !)
42 exp-sparse
42 exp-sparse
43 fncache
43 fncache
44 generaldelta
44 generaldelta
@@ -61,7 +61,7 b' Requirement for sparse is removed when s'
61
61
62 $ cat .hg/requires
62 $ cat .hg/requires
63 dotencode
63 dotencode
64 exp-dirstate-v2 (dirstate-v2 !)
64 exp-rc-dirstate-v2 (dirstate-v2 !)
65 fncache
65 fncache
66 generaldelta
66 generaldelta
67 persistent-nodemap (rust !)
67 persistent-nodemap (rust !)
@@ -15,7 +15,7 b' New repo should not use SQLite by defaul'
15 $ hg init empty-no-sqlite
15 $ hg init empty-no-sqlite
16 $ cat empty-no-sqlite/.hg/requires
16 $ cat empty-no-sqlite/.hg/requires
17 dotencode
17 dotencode
18 exp-dirstate-v2 (dirstate-v2 !)
18 exp-rc-dirstate-v2 (dirstate-v2 !)
19 fncache
19 fncache
20 generaldelta
20 generaldelta
21 persistent-nodemap (rust !)
21 persistent-nodemap (rust !)
@@ -29,7 +29,7 b' storage.new-repo-backend=sqlite is recog'
29 $ hg --config storage.new-repo-backend=sqlite init empty-sqlite
29 $ hg --config storage.new-repo-backend=sqlite init empty-sqlite
30 $ cat empty-sqlite/.hg/requires
30 $ cat empty-sqlite/.hg/requires
31 dotencode
31 dotencode
32 exp-dirstate-v2 (dirstate-v2 !)
32 exp-rc-dirstate-v2 (dirstate-v2 !)
33 exp-sqlite-001
33 exp-sqlite-001
34 exp-sqlite-comp-001=zstd (zstd !)
34 exp-sqlite-comp-001=zstd (zstd !)
35 exp-sqlite-comp-001=$BUNDLE2_COMPRESSIONS$ (no-zstd !)
35 exp-sqlite-comp-001=$BUNDLE2_COMPRESSIONS$ (no-zstd !)
@@ -51,7 +51,7 b' Can force compression to zlib'
51 $ hg --config storage.sqlite.compression=zlib init empty-zlib
51 $ hg --config storage.sqlite.compression=zlib init empty-zlib
52 $ cat empty-zlib/.hg/requires
52 $ cat empty-zlib/.hg/requires
53 dotencode
53 dotencode
54 exp-dirstate-v2 (dirstate-v2 !)
54 exp-rc-dirstate-v2 (dirstate-v2 !)
55 exp-sqlite-001
55 exp-sqlite-001
56 exp-sqlite-comp-001=$BUNDLE2_COMPRESSIONS$
56 exp-sqlite-comp-001=$BUNDLE2_COMPRESSIONS$
57 fncache
57 fncache
@@ -67,7 +67,7 b' Can force compression to none'
67 $ hg --config storage.sqlite.compression=none init empty-none
67 $ hg --config storage.sqlite.compression=none init empty-none
68 $ cat empty-none/.hg/requires
68 $ cat empty-none/.hg/requires
69 dotencode
69 dotencode
70 exp-dirstate-v2 (dirstate-v2 !)
70 exp-rc-dirstate-v2 (dirstate-v2 !)
71 exp-sqlite-001
71 exp-sqlite-001
72 exp-sqlite-comp-001=none
72 exp-sqlite-comp-001=none
73 fncache
73 fncache
@@ -9,7 +9,7 b' Checking that when lookup multiple bookm'
9 fails (thus causing the sshpeer to be stopped), the errors from the
9 fails (thus causing the sshpeer to be stopped), the errors from the
10 further lookups don't result in tracebacks.
10 further lookups don't result in tracebacks.
11
11
12 $ hg pull -r b0 -r nosuchbookmark $(for i in $($TESTDIR/seq.py 1 20); do echo -r b$i; done) -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/$(pwd)/../a
12 $ hg pull -r b0 -r nosuchbookmark $(for i in $($TESTDIR/seq.py 1 20); do echo -r b$i; done) ssh://user@dummy/$(pwd)/../a
13 pulling from ssh://user@dummy/$TESTTMP/b/../a
13 pulling from ssh://user@dummy/$TESTTMP/b/../a
14 abort: unknown revision 'nosuchbookmark'
14 abort: unknown revision 'nosuchbookmark'
15 [255]
15 [255]
@@ -52,7 +52,7 b' configure for serving'
52
52
53 repo not found error
53 repo not found error
54
54
55 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/nonexistent local
55 $ hg clone ssh://user@dummy/nonexistent local
56 remote: abort: repository nonexistent not found
56 remote: abort: repository nonexistent not found
57 abort: no suitable response from remote hg
57 abort: no suitable response from remote hg
58 [255]
58 [255]
@@ -60,7 +60,7 b' repo not found error'
60 non-existent absolute path
60 non-existent absolute path
61
61
62 #if no-msys
62 #if no-msys
63 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy//`pwd`/nonexistent local
63 $ hg clone ssh://user@dummy//`pwd`/nonexistent local
64 remote: abort: repository /$TESTTMP/nonexistent not found
64 remote: abort: repository /$TESTTMP/nonexistent not found
65 abort: no suitable response from remote hg
65 abort: no suitable response from remote hg
66 [255]
66 [255]
@@ -70,7 +70,7 b' clone remote via stream'
70
70
71 #if no-reposimplestore
71 #if no-reposimplestore
72
72
73 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --stream ssh://user@dummy/remote local-stream
73 $ hg clone --stream ssh://user@dummy/remote local-stream
74 streaming all changes
74 streaming all changes
75 4 files to transfer, 602 bytes of data (no-zstd !)
75 4 files to transfer, 602 bytes of data (no-zstd !)
76 transferred 602 bytes in * seconds (*) (glob) (no-zstd !)
76 transferred 602 bytes in * seconds (*) (glob) (no-zstd !)
@@ -94,7 +94,7 b' clone remote via stream'
94 clone bookmarks via stream
94 clone bookmarks via stream
95
95
96 $ hg -R local-stream book mybook
96 $ hg -R local-stream book mybook
97 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --stream ssh://user@dummy/local-stream stream2
97 $ hg clone --stream ssh://user@dummy/local-stream stream2
98 streaming all changes
98 streaming all changes
99 4 files to transfer, 602 bytes of data (no-zstd !)
99 4 files to transfer, 602 bytes of data (no-zstd !)
100 transferred 602 bytes in * seconds (*) (glob) (no-zstd !)
100 transferred 602 bytes in * seconds (*) (glob) (no-zstd !)
@@ -114,7 +114,7 b' clone bookmarks via stream'
114
114
115 clone remote via pull
115 clone remote via pull
116
116
117 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote local
117 $ hg clone ssh://user@dummy/remote local
118 requesting all changes
118 requesting all changes
119 adding changesets
119 adding changesets
120 adding manifests
120 adding manifests
@@ -142,14 +142,14 b' empty default pull'
142
142
143 $ hg paths
143 $ hg paths
144 default = ssh://user@dummy/remote
144 default = ssh://user@dummy/remote
145 $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\""
145 $ hg pull
146 pulling from ssh://user@dummy/remote
146 pulling from ssh://user@dummy/remote
147 searching for changes
147 searching for changes
148 no changes found
148 no changes found
149
149
150 pull from wrong ssh URL
150 pull from wrong ssh URL
151
151
152 $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/doesnotexist
152 $ hg pull ssh://user@dummy/doesnotexist
153 pulling from ssh://user@dummy/doesnotexist
153 pulling from ssh://user@dummy/doesnotexist
154 remote: abort: repository doesnotexist not found
154 remote: abort: repository doesnotexist not found
155 abort: no suitable response from remote hg
155 abort: no suitable response from remote hg
@@ -163,8 +163,6 b' local change'
163 updating rc
163 updating rc
164
164
165 $ echo "default-push = ssh://user@dummy/remote" >> .hg/hgrc
165 $ echo "default-push = ssh://user@dummy/remote" >> .hg/hgrc
166 $ echo "[ui]" >> .hg/hgrc
167 $ echo "ssh = \"$PYTHON\" \"$TESTDIR/dummyssh\"" >> .hg/hgrc
168
166
169 find outgoing
167 find outgoing
170
168
@@ -181,7 +179,7 b' find outgoing'
181
179
182 find incoming on the remote side
180 find incoming on the remote side
183
181
184 $ hg incoming -R ../remote -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/local
182 $ hg incoming -R ../remote ssh://user@dummy/local
185 comparing with ssh://user@dummy/local
183 comparing with ssh://user@dummy/local
186 searching for changes
184 searching for changes
187 changeset: 3:a28a9d1a809c
185 changeset: 3:a28a9d1a809c
@@ -194,7 +192,7 b' find incoming on the remote side'
194
192
195 find incoming on the remote side (using absolute path)
193 find incoming on the remote side (using absolute path)
196
194
197 $ hg incoming -R ../remote -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/`pwd`"
195 $ hg incoming -R ../remote "ssh://user@dummy/`pwd`"
198 comparing with ssh://user@dummy/$TESTTMP/local
196 comparing with ssh://user@dummy/$TESTTMP/local
199 searching for changes
197 searching for changes
200 changeset: 3:a28a9d1a809c
198 changeset: 3:a28a9d1a809c
@@ -241,7 +239,7 b' check remote tip'
241 test pushkeys and bookmarks
239 test pushkeys and bookmarks
242
240
243 $ cd $TESTTMP/local
241 $ cd $TESTTMP/local
244 $ hg debugpushkey --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote namespaces
242 $ hg debugpushkey ssh://user@dummy/remote namespaces
245 bookmarks
243 bookmarks
246 namespaces
244 namespaces
247 phases
245 phases
@@ -256,7 +254,7 b' test pushkeys and bookmarks'
256 no changes found
254 no changes found
257 exporting bookmark foo
255 exporting bookmark foo
258 [1]
256 [1]
259 $ hg debugpushkey --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote bookmarks
257 $ hg debugpushkey ssh://user@dummy/remote bookmarks
260 foo 1160648e36cec0054048a7edc4110c6f84fde594
258 foo 1160648e36cec0054048a7edc4110c6f84fde594
261 $ hg book -f foo
259 $ hg book -f foo
262 $ hg push --traceback
260 $ hg push --traceback
@@ -328,7 +326,7 b' clone bookmarks'
328 $ hg -R ../remote bookmark test
326 $ hg -R ../remote bookmark test
329 $ hg -R ../remote bookmarks
327 $ hg -R ../remote bookmarks
330 * test 4:6c0482d977a3
328 * test 4:6c0482d977a3
331 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote local-bookmarks
329 $ hg clone ssh://user@dummy/remote local-bookmarks
332 requesting all changes
330 requesting all changes
333 adding changesets
331 adding changesets
334 adding manifests
332 adding manifests
@@ -356,21 +354,21 b' hide outer repo'
356
354
357 Test remote paths with spaces (issue2983):
355 Test remote paths with spaces (issue2983):
358
356
359 $ hg init --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo"
357 $ hg init "ssh://user@dummy/a repo"
360 $ touch "$TESTTMP/a repo/test"
358 $ touch "$TESTTMP/a repo/test"
361 $ hg -R 'a repo' commit -A -m "test"
359 $ hg -R 'a repo' commit -A -m "test"
362 adding test
360 adding test
363 $ hg -R 'a repo' tag tag
361 $ hg -R 'a repo' tag tag
364 $ hg id --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo"
362 $ hg id "ssh://user@dummy/a repo"
365 73649e48688a
363 73649e48688a
366
364
367 $ hg id --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo#noNoNO"
365 $ hg id "ssh://user@dummy/a repo#noNoNO"
368 abort: unknown revision 'noNoNO'
366 abort: unknown revision 'noNoNO'
369 [255]
367 [255]
370
368
371 Test (non-)escaping of remote paths with spaces when cloning (issue3145):
369 Test (non-)escaping of remote paths with spaces when cloning (issue3145):
372
370
373 $ hg clone --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo"
371 $ hg clone "ssh://user@dummy/a repo"
374 destination directory: a repo
372 destination directory: a repo
375 abort: destination 'a repo' is not empty
373 abort: destination 'a repo' is not empty
376 [10]
374 [10]
@@ -462,8 +460,6 b' stderr from remote commands should be pr'
462 $ cat >> .hg/hgrc << EOF
460 $ cat >> .hg/hgrc << EOF
463 > [paths]
461 > [paths]
464 > default-push = ssh://user@dummy/remote
462 > default-push = ssh://user@dummy/remote
465 > [ui]
466 > ssh = "$PYTHON" "$TESTDIR/dummyssh"
467 > [extensions]
463 > [extensions]
468 > localwrite = localwrite.py
464 > localwrite = localwrite.py
469 > EOF
465 > EOF
@@ -486,7 +482,7 b' debug output'
486
482
487 $ hg pull --debug ssh://user@dummy/remote
483 $ hg pull --debug ssh://user@dummy/remote
488 pulling from ssh://user@dummy/remote
484 pulling from ssh://user@dummy/remote
489 running .* ".*/dummyssh" ['"]user@dummy['"] ('|")hg -R remote serve --stdio('|") (re)
485 running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R remote serve --stdio['"] (re)
490 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
486 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
491 sending hello command
487 sending hello command
492 sending between command
488 sending between command
@@ -583,11 +579,11 b' remote hook failure is attributed to rem'
583
579
584 $ echo "pretxnchangegroup.fail = python:$TESTTMP/failhook:hook" >> remote/.hg/hgrc
580 $ echo "pretxnchangegroup.fail = python:$TESTTMP/failhook:hook" >> remote/.hg/hgrc
585
581
586 $ hg -q --config ui.ssh="\"$PYTHON\" $TESTDIR/dummyssh" clone ssh://user@dummy/remote hookout
582 $ hg -q clone ssh://user@dummy/remote hookout
587 $ cd hookout
583 $ cd hookout
588 $ touch hookfailure
584 $ touch hookfailure
589 $ hg -q commit -A -m 'remote hook failure'
585 $ hg -q commit -A -m 'remote hook failure'
590 $ hg --config ui.ssh="\"$PYTHON\" $TESTDIR/dummyssh" push
586 $ hg push
591 pushing to ssh://user@dummy/remote
587 pushing to ssh://user@dummy/remote
592 searching for changes
588 searching for changes
593 remote: adding changesets
589 remote: adding changesets
@@ -607,7 +603,7 b' abort during pull is properly reported a'
607 > [extensions]
603 > [extensions]
608 > crash = ${TESTDIR}/crashgetbundler.py
604 > crash = ${TESTDIR}/crashgetbundler.py
609 > EOF
605 > EOF
610 $ hg --config ui.ssh="\"$PYTHON\" $TESTDIR/dummyssh" pull
606 $ hg pull
611 pulling from ssh://user@dummy/remote
607 pulling from ssh://user@dummy/remote
612 searching for changes
608 searching for changes
613 adding changesets
609 adding changesets
@@ -28,7 +28,7 b" creating 'remote' repo"
28 clone remote via stream
28 clone remote via stream
29
29
30 $ for i in 0 1 2 3 4 5 6 7 8; do
30 $ for i in 0 1 2 3 4 5 6 7 8; do
31 > hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --stream -r "$i" ssh://user@dummy/remote test-"$i"
31 > hg clone --stream -r "$i" ssh://user@dummy/remote test-"$i"
32 > if cd test-"$i"; then
32 > if cd test-"$i"; then
33 > hg verify
33 > hg verify
34 > cd ..
34 > cd ..
@@ -160,7 +160,7 b' clone remote via stream'
160 checked 9 changesets with 7 changes to 4 files
160 checked 9 changesets with 7 changes to 4 files
161 $ cd ..
161 $ cd ..
162 $ cd test-1
162 $ cd test-1
163 $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" -r 4 ssh://user@dummy/remote
163 $ hg pull -r 4 ssh://user@dummy/remote
164 pulling from ssh://user@dummy/remote
164 pulling from ssh://user@dummy/remote
165 searching for changes
165 searching for changes
166 adding changesets
166 adding changesets
@@ -175,7 +175,7 b' clone remote via stream'
175 crosschecking files in changesets and manifests
175 crosschecking files in changesets and manifests
176 checking files
176 checking files
177 checked 3 changesets with 2 changes to 1 files
177 checked 3 changesets with 2 changes to 1 files
178 $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote
178 $ hg pull ssh://user@dummy/remote
179 pulling from ssh://user@dummy/remote
179 pulling from ssh://user@dummy/remote
180 searching for changes
180 searching for changes
181 adding changesets
181 adding changesets
@@ -186,7 +186,7 b' clone remote via stream'
186 (run 'hg update' to get a working copy)
186 (run 'hg update' to get a working copy)
187 $ cd ..
187 $ cd ..
188 $ cd test-2
188 $ cd test-2
189 $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" -r 5 ssh://user@dummy/remote
189 $ hg pull -r 5 ssh://user@dummy/remote
190 pulling from ssh://user@dummy/remote
190 pulling from ssh://user@dummy/remote
191 searching for changes
191 searching for changes
192 adding changesets
192 adding changesets
@@ -201,7 +201,7 b' clone remote via stream'
201 crosschecking files in changesets and manifests
201 crosschecking files in changesets and manifests
202 checking files
202 checking files
203 checked 5 changesets with 3 changes to 1 files
203 checked 5 changesets with 3 changes to 1 files
204 $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote
204 $ hg pull ssh://user@dummy/remote
205 pulling from ssh://user@dummy/remote
205 pulling from ssh://user@dummy/remote
206 searching for changes
206 searching for changes
207 adding changesets
207 adding changesets
@@ -28,8 +28,6 b' protocols with inline conditional output'
28 > }
28 > }
29
29
30 $ cat >> $HGRCPATH << EOF
30 $ cat >> $HGRCPATH << EOF
31 > [ui]
32 > ssh = "$PYTHON" "$TESTDIR/dummyssh"
33 > [devel]
31 > [devel]
34 > debug.peer-request = true
32 > debug.peer-request = true
35 > [extensions]
33 > [extensions]
@@ -65,8 +63,7 b' Test a normal behaving server, for sanit'
65 $ cd ..
63 $ cd ..
66
64
67 $ hg --debug debugpeer ssh://user@dummy/server
65 $ hg --debug debugpeer ssh://user@dummy/server
68 running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
66 running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re)
69 running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
70 devel-peer-request: hello+between
67 devel-peer-request: hello+between
71 devel-peer-request: pairs: 81 bytes
68 devel-peer-request: pairs: 81 bytes
72 sending hello command
69 sending hello command
@@ -178,8 +175,7 b' SSH banner is not printed by default, ig'
178 --debug will print the banner
175 --debug will print the banner
179
176
180 $ SSHSERVERMODE=banner hg --debug debugpeer ssh://user@dummy/server
177 $ SSHSERVERMODE=banner hg --debug debugpeer ssh://user@dummy/server
181 running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
178 running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re)
182 running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
183 devel-peer-request: hello+between
179 devel-peer-request: hello+between
184 devel-peer-request: pairs: 81 bytes
180 devel-peer-request: pairs: 81 bytes
185 sending hello command
181 sending hello command
@@ -269,8 +265,7 b' The client should refuse, as we dropped '
269 servers.
265 servers.
270
266
271 $ SSHSERVERMODE=no-hello hg --debug debugpeer ssh://user@dummy/server
267 $ SSHSERVERMODE=no-hello hg --debug debugpeer ssh://user@dummy/server
272 running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
268 running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re)
273 running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
274 devel-peer-request: hello+between
269 devel-peer-request: hello+between
275 devel-peer-request: pairs: 81 bytes
270 devel-peer-request: pairs: 81 bytes
276 sending hello command
271 sending hello command
@@ -315,8 +310,7 b' Sending an unknown command to the server'
315 o> 1\n
310 o> 1\n
316
311
317 $ hg --config sshpeer.mode=extra-handshake-commands --config sshpeer.handshake-mode=pre-no-args --debug debugpeer ssh://user@dummy/server
312 $ hg --config sshpeer.mode=extra-handshake-commands --config sshpeer.handshake-mode=pre-no-args --debug debugpeer ssh://user@dummy/server
318 running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
313 running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re)
319 running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
320 sending no-args command
314 sending no-args command
321 devel-peer-request: hello+between
315 devel-peer-request: hello+between
322 devel-peer-request: pairs: 81 bytes
316 devel-peer-request: pairs: 81 bytes
@@ -385,8 +379,7 b' Send multiple unknown commands before he'
385 o> \n
379 o> \n
386
380
387 $ hg --config sshpeer.mode=extra-handshake-commands --config sshpeer.handshake-mode=pre-multiple-no-args --debug debugpeer ssh://user@dummy/server
381 $ hg --config sshpeer.mode=extra-handshake-commands --config sshpeer.handshake-mode=pre-multiple-no-args --debug debugpeer ssh://user@dummy/server
388 running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
382 running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re)
389 running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
390 sending unknown1 command
383 sending unknown1 command
391 sending unknown2 command
384 sending unknown2 command
392 sending unknown3 command
385 sending unknown3 command
@@ -961,8 +954,7 b' Send an upgrade request to a server that'
961 $ cd ..
954 $ cd ..
962
955
963 $ hg --config experimental.sshpeer.advertise-v2=true --debug debugpeer ssh://user@dummy/server
956 $ hg --config experimental.sshpeer.advertise-v2=true --debug debugpeer ssh://user@dummy/server
964 running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
957 running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re)
965 running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
966 sending upgrade request: * proto=exp-ssh-v2-0003 (glob)
958 sending upgrade request: * proto=exp-ssh-v2-0003 (glob)
967 devel-peer-request: hello+between
959 devel-peer-request: hello+between
968 devel-peer-request: pairs: 81 bytes
960 devel-peer-request: pairs: 81 bytes
@@ -1019,8 +1011,7 b' Send an upgrade request to a server that'
1019 $ cd ..
1011 $ cd ..
1020
1012
1021 $ hg --config experimental.sshpeer.advertise-v2=true --debug debugpeer ssh://user@dummy/server
1013 $ hg --config experimental.sshpeer.advertise-v2=true --debug debugpeer ssh://user@dummy/server
1022 running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
1014 running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re)
1023 running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
1024 sending upgrade request: * proto=exp-ssh-v2-0003 (glob)
1015 sending upgrade request: * proto=exp-ssh-v2-0003 (glob)
1025 devel-peer-request: hello+between
1016 devel-peer-request: hello+between
1026 devel-peer-request: pairs: 81 bytes
1017 devel-peer-request: pairs: 81 bytes
@@ -1038,8 +1029,7 b' Send an upgrade request to a server that'
1038 Verify the peer has capabilities
1029 Verify the peer has capabilities
1039
1030
1040 $ hg --config experimental.sshpeer.advertise-v2=true --debug debugcapabilities ssh://user@dummy/server
1031 $ hg --config experimental.sshpeer.advertise-v2=true --debug debugcapabilities ssh://user@dummy/server
1041 running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
1032 running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re)
1042 running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
1043 sending upgrade request: * proto=exp-ssh-v2-0003 (glob)
1033 sending upgrade request: * proto=exp-ssh-v2-0003 (glob)
1044 devel-peer-request: hello+between
1034 devel-peer-request: hello+between
1045 devel-peer-request: pairs: 81 bytes
1035 devel-peer-request: pairs: 81 bytes
@@ -4,13 +4,6 b' XXX-RHG this test hangs if `hg` is reall'
4 `alias hg=rhg` by run-tests.py. With such alias removed, this test is revealed
4 `alias hg=rhg` by run-tests.py. With such alias removed, this test is revealed
5 buggy. This need to be resolved sooner than later.
5 buggy. This need to be resolved sooner than later.
6
6
7 initial setup
8
9 $ cat << EOF >> $HGRCPATH
10 > [ui]
11 > ssh="$PYTHON" "$TESTDIR/dummyssh"
12 > EOF
13
14 repository itself is non-readable
7 repository itself is non-readable
15 ---------------------------------
8 ---------------------------------
16
9
@@ -42,18 +42,18 b' configure for serving'
42
42
43 repo not found error
43 repo not found error
44
44
45 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/nonexistent local
45 $ hg clone ssh://user@dummy/nonexistent local
46 remote: abort: repository nonexistent not found
46 remote: abort: repository nonexistent not found
47 abort: no suitable response from remote hg
47 abort: no suitable response from remote hg
48 [255]
48 [255]
49 $ hg clone -q -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/nonexistent local
49 $ hg clone -q ssh://user@dummy/nonexistent local
50 remote: abort: repository nonexistent not found
50 remote: abort: repository nonexistent not found
51 abort: no suitable response from remote hg
51 abort: no suitable response from remote hg
52 [255]
52 [255]
53
53
54 non-existent absolute path
54 non-existent absolute path
55
55
56 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/`pwd`/nonexistent local
56 $ hg clone ssh://user@dummy/`pwd`/nonexistent local
57 remote: abort: repository $TESTTMP/nonexistent not found
57 remote: abort: repository $TESTTMP/nonexistent not found
58 abort: no suitable response from remote hg
58 abort: no suitable response from remote hg
59 [255]
59 [255]
@@ -62,7 +62,7 b' clone remote via stream'
62
62
63 #if no-reposimplestore
63 #if no-reposimplestore
64
64
65 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --stream ssh://user@dummy/remote local-stream
65 $ hg clone --stream ssh://user@dummy/remote local-stream
66 streaming all changes
66 streaming all changes
67 8 files to transfer, 827 bytes of data (no-zstd !)
67 8 files to transfer, 827 bytes of data (no-zstd !)
68 transferred 827 bytes in * seconds (*) (glob) (no-zstd !)
68 transferred 827 bytes in * seconds (*) (glob) (no-zstd !)
@@ -84,7 +84,7 b' clone remote via stream'
84 clone bookmarks via stream
84 clone bookmarks via stream
85
85
86 $ hg -R local-stream book mybook
86 $ hg -R local-stream book mybook
87 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --stream ssh://user@dummy/local-stream stream2
87 $ hg clone --stream ssh://user@dummy/local-stream stream2
88 streaming all changes
88 streaming all changes
89 15 files to transfer, * of data (glob)
89 15 files to transfer, * of data (glob)
90 transferred * in * seconds (*) (glob)
90 transferred * in * seconds (*) (glob)
@@ -100,7 +100,7 b' clone bookmarks via stream'
100
100
101 clone remote via pull
101 clone remote via pull
102
102
103 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote local
103 $ hg clone ssh://user@dummy/remote local
104 requesting all changes
104 requesting all changes
105 adding changesets
105 adding changesets
106 adding manifests
106 adding manifests
@@ -128,14 +128,14 b' empty default pull'
128
128
129 $ hg paths
129 $ hg paths
130 default = ssh://user@dummy/remote
130 default = ssh://user@dummy/remote
131 $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\""
131 $ hg pull
132 pulling from ssh://user@dummy/remote
132 pulling from ssh://user@dummy/remote
133 searching for changes
133 searching for changes
134 no changes found
134 no changes found
135
135
136 pull from wrong ssh URL
136 pull from wrong ssh URL
137
137
138 $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/doesnotexist
138 $ hg pull ssh://user@dummy/doesnotexist
139 pulling from ssh://user@dummy/doesnotexist
139 pulling from ssh://user@dummy/doesnotexist
140 remote: abort: repository doesnotexist not found
140 remote: abort: repository doesnotexist not found
141 abort: no suitable response from remote hg
141 abort: no suitable response from remote hg
@@ -149,8 +149,6 b' local change'
149 updating rc
149 updating rc
150
150
151 $ echo "default-push = ssh://user@dummy/remote" >> .hg/hgrc
151 $ echo "default-push = ssh://user@dummy/remote" >> .hg/hgrc
152 $ echo "[ui]" >> .hg/hgrc
153 $ echo "ssh = \"$PYTHON\" \"$TESTDIR/dummyssh\"" >> .hg/hgrc
154
152
155 find outgoing
153 find outgoing
156
154
@@ -167,7 +165,7 b' find outgoing'
167
165
168 find incoming on the remote side
166 find incoming on the remote side
169
167
170 $ hg incoming -R ../remote -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/local
168 $ hg incoming -R ../remote ssh://user@dummy/local
171 comparing with ssh://user@dummy/local
169 comparing with ssh://user@dummy/local
172 searching for changes
170 searching for changes
173 changeset: 3:a28a9d1a809c
171 changeset: 3:a28a9d1a809c
@@ -180,7 +178,7 b' find incoming on the remote side'
180
178
181 find incoming on the remote side (using absolute path)
179 find incoming on the remote side (using absolute path)
182
180
183 $ hg incoming -R ../remote -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/`pwd`"
181 $ hg incoming -R ../remote "ssh://user@dummy/`pwd`"
184 comparing with ssh://user@dummy/$TESTTMP/local
182 comparing with ssh://user@dummy/$TESTTMP/local
185 searching for changes
183 searching for changes
186 changeset: 3:a28a9d1a809c
184 changeset: 3:a28a9d1a809c
@@ -227,7 +225,7 b' check remote tip'
227 test pushkeys and bookmarks
225 test pushkeys and bookmarks
228
226
229 $ cd $TESTTMP/local
227 $ cd $TESTTMP/local
230 $ hg debugpushkey --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote namespaces
228 $ hg debugpushkey ssh://user@dummy/remote namespaces
231 bookmarks
229 bookmarks
232 namespaces
230 namespaces
233 phases
231 phases
@@ -242,7 +240,7 b' test pushkeys and bookmarks'
242 no changes found
240 no changes found
243 exporting bookmark foo
241 exporting bookmark foo
244 [1]
242 [1]
245 $ hg debugpushkey --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote bookmarks
243 $ hg debugpushkey ssh://user@dummy/remote bookmarks
246 foo 1160648e36cec0054048a7edc4110c6f84fde594
244 foo 1160648e36cec0054048a7edc4110c6f84fde594
247 $ hg book -f foo
245 $ hg book -f foo
248 $ hg push --traceback
246 $ hg push --traceback
@@ -347,7 +345,7 b' clone bookmarks'
347 $ hg -R ../remote bookmark test
345 $ hg -R ../remote bookmark test
348 $ hg -R ../remote bookmarks
346 $ hg -R ../remote bookmarks
349 * test 4:6c0482d977a3
347 * test 4:6c0482d977a3
350 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote local-bookmarks
348 $ hg clone ssh://user@dummy/remote local-bookmarks
351 requesting all changes
349 requesting all changes
352 adding changesets
350 adding changesets
353 adding manifests
351 adding manifests
@@ -375,21 +373,21 b' hide outer repo'
375
373
376 Test remote paths with spaces (issue2983):
374 Test remote paths with spaces (issue2983):
377
375
378 $ hg init --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo"
376 $ hg init "ssh://user@dummy/a repo"
379 $ touch "$TESTTMP/a repo/test"
377 $ touch "$TESTTMP/a repo/test"
380 $ hg -R 'a repo' commit -A -m "test"
378 $ hg -R 'a repo' commit -A -m "test"
381 adding test
379 adding test
382 $ hg -R 'a repo' tag tag
380 $ hg -R 'a repo' tag tag
383 $ hg id --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo"
381 $ hg id "ssh://user@dummy/a repo"
384 73649e48688a
382 73649e48688a
385
383
386 $ hg id --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo#noNoNO"
384 $ hg id "ssh://user@dummy/a repo#noNoNO"
387 abort: unknown revision 'noNoNO'
385 abort: unknown revision 'noNoNO'
388 [255]
386 [255]
389
387
390 Test (non-)escaping of remote paths with spaces when cloning (issue3145):
388 Test (non-)escaping of remote paths with spaces when cloning (issue3145):
391
389
392 $ hg clone --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo"
390 $ hg clone "ssh://user@dummy/a repo"
393 destination directory: a repo
391 destination directory: a repo
394 abort: destination 'a repo' is not empty
392 abort: destination 'a repo' is not empty
395 [10]
393 [10]
@@ -515,8 +513,6 b' stderr from remote commands should be pr'
515 $ cat >> .hg/hgrc << EOF
513 $ cat >> .hg/hgrc << EOF
516 > [paths]
514 > [paths]
517 > default-push = ssh://user@dummy/remote
515 > default-push = ssh://user@dummy/remote
518 > [ui]
519 > ssh = "$PYTHON" "$TESTDIR/dummyssh"
520 > [extensions]
516 > [extensions]
521 > localwrite = localwrite.py
517 > localwrite = localwrite.py
522 > EOF
518 > EOF
@@ -540,7 +536,7 b' debug output'
540
536
541 $ hg pull --debug ssh://user@dummy/remote --config devel.debug.peer-request=yes
537 $ hg pull --debug ssh://user@dummy/remote --config devel.debug.peer-request=yes
542 pulling from ssh://user@dummy/remote
538 pulling from ssh://user@dummy/remote
543 running .* ".*/dummyssh" ['"]user@dummy['"] ('|")hg -R remote serve --stdio('|") (re)
539 running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R remote serve --stdio['"] (re)
544 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
540 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
545 devel-peer-request: hello+between
541 devel-peer-request: hello+between
546 devel-peer-request: pairs: 81 bytes
542 devel-peer-request: pairs: 81 bytes
@@ -670,11 +666,11 b' remote hook failure is attributed to rem'
670
666
671 $ echo "pretxnchangegroup.fail = python:$TESTTMP/failhook:hook" >> remote/.hg/hgrc
667 $ echo "pretxnchangegroup.fail = python:$TESTTMP/failhook:hook" >> remote/.hg/hgrc
672
668
673 $ hg -q --config ui.ssh="\"$PYTHON\" $TESTDIR/dummyssh" clone ssh://user@dummy/remote hookout
669 $ hg -q clone ssh://user@dummy/remote hookout
674 $ cd hookout
670 $ cd hookout
675 $ touch hookfailure
671 $ touch hookfailure
676 $ hg -q commit -A -m 'remote hook failure'
672 $ hg -q commit -A -m 'remote hook failure'
677 $ hg --config ui.ssh="\"$PYTHON\" $TESTDIR/dummyssh" push
673 $ hg push
678 pushing to ssh://user@dummy/remote
674 pushing to ssh://user@dummy/remote
679 searching for changes
675 searching for changes
680 remote: adding changesets
676 remote: adding changesets
@@ -695,7 +691,7 b' abort during pull is properly reported a'
695 > [extensions]
691 > [extensions]
696 > crash = ${TESTDIR}/crashgetbundler.py
692 > crash = ${TESTDIR}/crashgetbundler.py
697 > EOF
693 > EOF
698 $ hg --config ui.ssh="\"$PYTHON\" $TESTDIR/dummyssh" pull
694 $ hg pull
699 pulling from ssh://user@dummy/remote
695 pulling from ssh://user@dummy/remote
700 searching for changes
696 searching for changes
701 remote: abort: this is an exercise
697 remote: abort: this is an exercise
@@ -704,14 +700,14 b' abort during pull is properly reported a'
704
700
705 abort with no error hint when there is a ssh problem when pulling
701 abort with no error hint when there is a ssh problem when pulling
706
702
707 $ hg pull ssh://brokenrepository -e "\"$PYTHON\" \"$TESTDIR/dummyssh\""
703 $ hg pull ssh://brokenrepository
708 pulling from ssh://brokenrepository/
704 pulling from ssh://brokenrepository/
709 abort: no suitable response from remote hg
705 abort: no suitable response from remote hg
710 [255]
706 [255]
711
707
712 abort with configured error hint when there is a ssh problem when pulling
708 abort with configured error hint when there is a ssh problem when pulling
713
709
714 $ hg pull ssh://brokenrepository -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" \
710 $ hg pull ssh://brokenrepository \
715 > --config ui.ssherrorhint="Please see http://company/internalwiki/ssh.html"
711 > --config ui.ssherrorhint="Please see http://company/internalwiki/ssh.html"
716 pulling from ssh://brokenrepository/
712 pulling from ssh://brokenrepository/
717 abort: no suitable response from remote hg
713 abort: no suitable response from remote hg
@@ -1,21 +1,12 b''
1 #testcases dirstate-v1 dirstate-v1-tree dirstate-v2
1 #testcases dirstate-v1 dirstate-v2
2
3 #if no-rust
4 $ hg init repo0 --config format.exp-dirstate-v2=1
5 abort: dirstate v2 format requested by config but not supported (requires Rust extensions)
6 [255]
7 #endif
8
9 #if dirstate-v1-tree
10 #require rust
11 $ echo '[experimental]' >> $HGRCPATH
12 $ echo 'dirstate-tree.in-memory=1' >> $HGRCPATH
13 #endif
14
2
15 #if dirstate-v2
3 #if dirstate-v2
16 #require rust
4 $ cat >> $HGRCPATH << EOF
17 $ echo '[format]' >> $HGRCPATH
5 > [format]
18 $ echo 'exp-dirstate-v2=1' >> $HGRCPATH
6 > exp-rc-dirstate-v2=1
7 > [storage]
8 > dirstate-v2.slow-path=allow
9 > EOF
19 #endif
10 #endif
20
11
21 $ hg init repo1
12 $ hg init repo1
@@ -749,7 +740,7 b' When a directory containing a tracked fi'
749 if also listing unknowns.
740 if also listing unknowns.
750 The tree-based dirstate and status algorithm fix this:
741 The tree-based dirstate and status algorithm fix this:
751
742
752 #if symlink no-dirstate-v1
743 #if symlink no-dirstate-v1 rust
753
744
754 $ cd ..
745 $ cd ..
755 $ hg init issue6335
746 $ hg init issue6335
@@ -765,11 +756,11 b' The tree-based dirstate and status algor'
765 ? bar/a
756 ? bar/a
766 ? foo
757 ? foo
767
758
768 $ hg status -c # incorrect output with `dirstate-v1`
759 $ hg status -c # incorrect output without the Rust implementation
769 $ hg status -cu
760 $ hg status -cu
770 ? bar/a
761 ? bar/a
771 ? foo
762 ? foo
772 $ hg status -d # incorrect output with `dirstate-v1`
763 $ hg status -d # incorrect output without the Rust implementation
773 ! foo/a
764 ! foo/a
774 $ hg status -du
765 $ hg status -du
775 ! foo/a
766 ! foo/a
@@ -916,7 +907,7 b' Check using include flag while listing i'
916 I B.hs
907 I B.hs
917 I ignored-folder/ctest.hs
908 I ignored-folder/ctest.hs
918
909
919 #if dirstate-v2
910 #if rust dirstate-v2
920
911
921 Check read_dir caching
912 Check read_dir caching
922
913
@@ -14,7 +14,6 b' Test creating a consuming stream bundle '
14 > evolution.exchange=True
14 > evolution.exchange=True
15 > bundle2-output-capture=True
15 > bundle2-output-capture=True
16 > [ui]
16 > [ui]
17 > ssh="$PYTHON" "$TESTDIR/dummyssh"
18 > logtemplate={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
17 > logtemplate={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
19 > [web]
18 > [web]
20 > push_ssl = false
19 > push_ssl = false
@@ -49,12 +48,12 b' The extension requires a repo (currently'
49 stream2 -- {bytecount: 1693, filecount: 11, requirements: dotencode%2Cfncache%2Cgeneraldelta%2Crevlogv1%2Csparserevlog%2Cstore} (mandatory: True) (no-zstd !)
48 stream2 -- {bytecount: 1693, filecount: 11, requirements: dotencode%2Cfncache%2Cgeneraldelta%2Crevlogv1%2Csparserevlog%2Cstore} (mandatory: True) (no-zstd !)
50 stream2 -- {bytecount: 1693, filecount: 11, requirements: dotencode%2Cfncache%2Cgeneraldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog%2Cstore} (mandatory: True) (zstd no-rust !)
49 stream2 -- {bytecount: 1693, filecount: 11, requirements: dotencode%2Cfncache%2Cgeneraldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog%2Cstore} (mandatory: True) (zstd no-rust !)
51 stream2 -- {bytecount: 1693, filecount: 11, requirements: dotencode%2Cfncache%2Cgeneraldelta%2Cpersistent-nodemap%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog%2Cstore} (mandatory: True) (rust no-dirstate-v2 !)
50 stream2 -- {bytecount: 1693, filecount: 11, requirements: dotencode%2Cfncache%2Cgeneraldelta%2Cpersistent-nodemap%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog%2Cstore} (mandatory: True) (rust no-dirstate-v2 !)
52 stream2 -- {bytecount: 1693, filecount: 11, requirements: dotencode%2Cexp-dirstate-v2%2Cfncache%2Cgeneraldelta%2Cpersistent-nodemap%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog%2Cstore} (mandatory: True) (dirstate-v2 !)
51 stream2 -- {bytecount: 1693, filecount: 11, requirements: dotencode%2Cexp-rc-dirstate-v2%2Cfncache%2Cgeneraldelta%2Cpersistent-nodemap%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog%2Cstore} (mandatory: True) (dirstate-v2 !)
53 $ hg debugbundle --spec bundle.hg
52 $ hg debugbundle --spec bundle.hg
54 none-v2;stream=v2;requirements%3Ddotencode%2Cfncache%2Cgeneraldelta%2Crevlogv1%2Csparserevlog%2Cstore (no-zstd !)
53 none-v2;stream=v2;requirements%3Ddotencode%2Cfncache%2Cgeneraldelta%2Crevlogv1%2Csparserevlog%2Cstore (no-zstd !)
55 none-v2;stream=v2;requirements%3Ddotencode%2Cfncache%2Cgeneraldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog%2Cstore (zstd no-rust !)
54 none-v2;stream=v2;requirements%3Ddotencode%2Cfncache%2Cgeneraldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog%2Cstore (zstd no-rust !)
56 none-v2;stream=v2;requirements%3Ddotencode%2Cfncache%2Cgeneraldelta%2Cpersistent-nodemap%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog%2Cstore (rust no-dirstate-v2 !)
55 none-v2;stream=v2;requirements%3Ddotencode%2Cfncache%2Cgeneraldelta%2Cpersistent-nodemap%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog%2Cstore (rust no-dirstate-v2 !)
57 none-v2;stream=v2;requirements%3Ddotencode%2Cexp-dirstate-v2%2Cfncache%2Cgeneraldelta%2Cpersistent-nodemap%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog%2Cstore (dirstate-v2 !)
56 none-v2;stream=v2;requirements%3Ddotencode%2Cexp-rc-dirstate-v2%2Cfncache%2Cgeneraldelta%2Cpersistent-nodemap%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog%2Cstore (dirstate-v2 !)
58
57
59 Test that we can apply the bundle as a stream clone bundle
58 Test that we can apply the bundle as a stream clone bundle
60
59
@@ -709,7 +709,7 b' test hg strip -B bookmark'
709 bookmark 'todelete' deleted
709 bookmark 'todelete' deleted
710 $ hg id -ir dcbb326fdec2
710 $ hg id -ir dcbb326fdec2
711 abort: unknown revision 'dcbb326fdec2'
711 abort: unknown revision 'dcbb326fdec2'
712 [255]
712 [10]
713 $ hg id -ir d62d843c9a01
713 $ hg id -ir d62d843c9a01
714 d62d843c9a01
714 d62d843c9a01
715 $ hg bookmarks
715 $ hg bookmarks
@@ -725,17 +725,17 b' test hg strip -B bookmark'
725 bookmark 'multipledelete2' deleted
725 bookmark 'multipledelete2' deleted
726 $ hg id -ir e46a4836065c
726 $ hg id -ir e46a4836065c
727 abort: unknown revision 'e46a4836065c'
727 abort: unknown revision 'e46a4836065c'
728 [255]
728 [10]
729 $ hg id -ir b4594d867745
729 $ hg id -ir b4594d867745
730 abort: unknown revision 'b4594d867745'
730 abort: unknown revision 'b4594d867745'
731 [255]
731 [10]
732 $ hg strip -B singlenode1 -B singlenode2
732 $ hg strip -B singlenode1 -B singlenode2
733 saved backup bundle to $TESTTMP/bookmarks/.hg/strip-backup/43227190fef8-8da858f2-backup.hg
733 saved backup bundle to $TESTTMP/bookmarks/.hg/strip-backup/43227190fef8-8da858f2-backup.hg
734 bookmark 'singlenode1' deleted
734 bookmark 'singlenode1' deleted
735 bookmark 'singlenode2' deleted
735 bookmark 'singlenode2' deleted
736 $ hg id -ir 43227190fef8
736 $ hg id -ir 43227190fef8
737 abort: unknown revision '43227190fef8'
737 abort: unknown revision '43227190fef8'
738 [255]
738 [10]
739 $ hg strip -B unknownbookmark
739 $ hg strip -B unknownbookmark
740 abort: bookmark 'unknownbookmark' not found
740 abort: bookmark 'unknownbookmark' not found
741 [255]
741 [255]
@@ -750,7 +750,7 b' test hg strip -B bookmark'
750 bookmark 'delete' deleted
750 bookmark 'delete' deleted
751 $ hg id -ir 6:2702dd0c91e7
751 $ hg id -ir 6:2702dd0c91e7
752 abort: unknown revision '2702dd0c91e7'
752 abort: unknown revision '2702dd0c91e7'
753 [255]
753 [10]
754 $ hg update B
754 $ hg update B
755 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
755 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
756 (activating bookmark B)
756 (activating bookmark B)
@@ -186,7 +186,7 b' subrepo is referenced by absolute path.'
186
186
187 subrepo paths with ssh urls
187 subrepo paths with ssh urls
188
188
189 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/cloned sshclone
189 $ hg clone ssh://user@dummy/cloned sshclone
190 requesting all changes
190 requesting all changes
191 adding changesets
191 adding changesets
192 adding manifests
192 adding manifests
@@ -203,7 +203,7 b' subrepo paths with ssh urls'
203 new changesets 863c1745b441
203 new changesets 863c1745b441
204 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
204 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
205
205
206 $ hg -R sshclone push -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/`pwd`/cloned
206 $ hg -R sshclone push ssh://user@dummy/`pwd`/cloned
207 pushing to ssh://user@dummy/$TESTTMP/cloned
207 pushing to ssh://user@dummy/$TESTTMP/cloned
208 pushing subrepo sub to ssh://user@dummy/$TESTTMP/sub
208 pushing subrepo sub to ssh://user@dummy/$TESTTMP/sub
209 searching for changes
209 searching for changes
@@ -1275,8 +1275,8 b' Check that share works with subrepo'
1275 ../shared/subrepo-2/.hg/sharedpath
1275 ../shared/subrepo-2/.hg/sharedpath
1276 ../shared/subrepo-2/.hg/wcache
1276 ../shared/subrepo-2/.hg/wcache
1277 ../shared/subrepo-2/.hg/wcache/checkisexec (execbit !)
1277 ../shared/subrepo-2/.hg/wcache/checkisexec (execbit !)
1278 ../shared/subrepo-2/.hg/wcache/checklink (symlink !)
1278 ../shared/subrepo-2/.hg/wcache/checklink (symlink no-rust !)
1279 ../shared/subrepo-2/.hg/wcache/checklink-target (symlink !)
1279 ../shared/subrepo-2/.hg/wcache/checklink-target (symlink no-rust !)
1280 ../shared/subrepo-2/.hg/wcache/manifestfulltextcache (reporevlogstore !)
1280 ../shared/subrepo-2/.hg/wcache/manifestfulltextcache (reporevlogstore !)
1281 ../shared/subrepo-2/file
1281 ../shared/subrepo-2/file
1282 $ hg -R ../shared in
1282 $ hg -R ../shared in
@@ -1,17 +1,14 b''
1 #require symlink
1 #require symlink
2
2
3 #testcases dirstate-v1 dirstate-v1-tree dirstate-v2
3 #testcases dirstate-v1 dirstate-v2
4
5 #if dirstate-v1-tree
6 #require rust
7 $ echo '[experimental]' >> $HGRCPATH
8 $ echo 'dirstate-tree.in-memory=1' >> $HGRCPATH
9 #endif
10
4
11 #if dirstate-v2
5 #if dirstate-v2
12 #require rust
6 $ cat >> $HGRCPATH << EOF
13 $ echo '[format]' >> $HGRCPATH
7 > [format]
14 $ echo 'exp-dirstate-v2=1' >> $HGRCPATH
8 > exp-rc-dirstate-v2=1
9 > [storage]
10 > dirstate-v2.slow-path=allow
11 > EOF
15 #endif
12 #endif
16
13
17 == tests added in 0.7 ==
14 == tests added in 0.7 ==
@@ -82,15 +82,14 b' and the second file.i entry should match'
82 date: Thu Jan 01 00:00:00 1970 +0000
82 date: Thu Jan 01 00:00:00 1970 +0000
83 summary: _
83 summary: _
84
84
85 $ hg verify
85 $ hg verify -q
86 checking changesets
87 checking manifests
88 crosschecking files in changesets and manifests
89 checking files
90 warning: revlog 'data/file.d' not in fncache!
86 warning: revlog 'data/file.d' not in fncache!
91 checked 2 changesets with 2 changes to 1 files
92 1 warnings encountered!
87 1 warnings encountered!
93 hint: run "hg debugrebuildfncache" to recover from corrupt fncache
88 hint: run "hg debugrebuildfncache" to recover from corrupt fncache
89 $ hg debugrebuildfncache --only-data
90 adding data/file.d
91 1 items added, 0 removed from fncache
92 $ hg verify -q
94 $ cd ..
93 $ cd ..
95
94
96
95
@@ -133,12 +132,7 b' where the data file is left as garbage.'
133 date: Thu Jan 01 00:00:00 1970 +0000
132 date: Thu Jan 01 00:00:00 1970 +0000
134 summary: _
133 summary: _
135
134
136 $ hg verify
135 $ hg verify -q
137 checking changesets
138 checking manifests
139 crosschecking files in changesets and manifests
140 checking files
141 checked 2 changesets with 2 changes to 1 files
142 $ cd ..
136 $ cd ..
143
137
144
138
@@ -170,13 +164,8 b' Repeat the original test but let hg roll'
170 date: Thu Jan 01 00:00:00 1970 +0000
164 date: Thu Jan 01 00:00:00 1970 +0000
171 summary: _
165 summary: _
172
166
173 $ hg verify
167 $ hg verify -q
174 checking changesets
175 checking manifests
176 crosschecking files in changesets and manifests
177 checking files
178 warning: revlog 'data/file.d' not in fncache!
168 warning: revlog 'data/file.d' not in fncache!
179 checked 2 changesets with 2 changes to 1 files
180 1 warnings encountered!
169 1 warnings encountered!
181 hint: run "hg debugrebuildfncache" to recover from corrupt fncache
170 hint: run "hg debugrebuildfncache" to recover from corrupt fncache
182 $ cd ..
171 $ cd ..
@@ -2,7 +2,7 b' Test that, when an hg push is interrupte'
2 the remote hg is able to successfully roll back the transaction.
2 the remote hg is able to successfully roll back the transaction.
3
3
4 $ hg init -q remote
4 $ hg init -q remote
5 $ hg clone -e "\"$PYTHON\" \"$RUNTESTDIR/dummyssh\"" -q ssh://user@dummy/`pwd`/remote local
5 $ hg clone -q ssh://user@dummy/`pwd`/remote local
6 $ SIGPIPE_REMOTE_DEBUG_FILE="$TESTTMP/DEBUGFILE"
6 $ SIGPIPE_REMOTE_DEBUG_FILE="$TESTTMP/DEBUGFILE"
7 $ SYNCFILE1="$TESTTMP/SYNCFILE1"
7 $ SYNCFILE1="$TESTTMP/SYNCFILE1"
8 $ SYNCFILE2="$TESTTMP/SYNCFILE2"
8 $ SYNCFILE2="$TESTTMP/SYNCFILE2"
@@ -36,7 +36,7 b' disconnecting. Then exit nonzero, to for'
36
36
37 (use quiet to avoid flacky output from the server)
37 (use quiet to avoid flacky output from the server)
38
38
39 $ hg push --quiet -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --remotecmd "$remotecmd"
39 $ hg push --quiet --remotecmd "$remotecmd"
40 abort: stream ended unexpectedly (got 0 bytes, expected 4)
40 abort: stream ended unexpectedly (got 0 bytes, expected 4)
41 [255]
41 [255]
42 $ cat $SIGPIPE_REMOTE_DEBUG_FILE
42 $ cat $SIGPIPE_REMOTE_DEBUG_FILE
@@ -1,8 +1,3 b''
1 $ cat << EOF >> $HGRCPATH
2 > [ui]
3 > ssh="$PYTHON" "$TESTDIR/dummyssh"
4 > EOF
5
6 Set up repo
1 Set up repo
7
2
8 $ hg --config experimental.treemanifest=True init repo
3 $ hg --config experimental.treemanifest=True init repo
@@ -1638,7 +1638,7 b' Demonstrate that nothing to perform upgr'
1638
1638
1639 Upgrade to dirstate-v2
1639 Upgrade to dirstate-v2
1640
1640
1641 $ hg debugformat -v --config format.exp-dirstate-v2=1
1641 $ hg debugformat -v --config format.exp-rc-dirstate-v2=1
1642 format-variant repo config default
1642 format-variant repo config default
1643 fncache: yes yes yes
1643 fncache: yes yes yes
1644 dirstate-v2: no yes no
1644 dirstate-v2: no yes no
@@ -1653,12 +1653,12 b' Upgrade to dirstate-v2'
1653 plain-cl-delta: yes yes yes
1653 plain-cl-delta: yes yes yes
1654 compression: zstd zstd zstd
1654 compression: zstd zstd zstd
1655 compression-level: default default default
1655 compression-level: default default default
1656 $ hg debugupgraderepo --config format.exp-dirstate-v2=1 --run
1656 $ hg debugupgraderepo --config format.exp-rc-dirstate-v2=1 --run
1657 upgrade will perform the following actions:
1657 upgrade will perform the following actions:
1658
1658
1659 requirements
1659 requirements
1660 preserved: dotencode, exp-revlogv2.2, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, sparserevlog, store
1660 preserved: dotencode, exp-revlogv2.2, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, sparserevlog, store
1661 added: exp-dirstate-v2
1661 added: dirstate-v2
1662
1662
1663 dirstate-v2
1663 dirstate-v2
1664 "hg status" will be faster
1664 "hg status" will be faster
@@ -1703,7 +1703,7 b' Downgrade from dirstate-v2'
1703
1703
1704 requirements
1704 requirements
1705 preserved: dotencode, exp-revlogv2.2, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, sparserevlog, store
1705 preserved: dotencode, exp-revlogv2.2, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, sparserevlog, store
1706 removed: exp-dirstate-v2
1706 removed: dirstate-v2
1707
1707
1708 processed revlogs:
1708 processed revlogs:
1709 - all-filelogs
1709 - all-filelogs
@@ -75,9 +75,7 b' class clientpeer(wireprotov1peer.wirepee'
75
75
76 @wireprotov1peer.batchable
76 @wireprotov1peer.batchable
77 def greet(self, name):
77 def greet(self, name):
78 f = wireprotov1peer.future()
78 return {b'name': mangle(name)}, unmangle
79 yield {b'name': mangle(name)}, f
80 yield unmangle(f.value)
81
79
82
80
83 class serverrepo(object):
81 class serverrepo(object):
@@ -142,13 +142,13 b' HTTP without the httpheader capability:'
142
142
143 SSH (try to exercise the ssh functionality with a dummy script):
143 SSH (try to exercise the ssh functionality with a dummy script):
144
144
145 $ hg debugwireargs --ssh "\"$PYTHON\" $TESTDIR/dummyssh" ssh://user@dummy/repo uno due tre quattro
145 $ hg debugwireargs ssh://user@dummy/repo uno due tre quattro
146 uno due tre quattro None
146 uno due tre quattro None
147 $ hg debugwireargs --ssh "\"$PYTHON\" $TESTDIR/dummyssh" ssh://user@dummy/repo eins zwei --four vier
147 $ hg debugwireargs ssh://user@dummy/repo eins zwei --four vier
148 eins zwei None vier None
148 eins zwei None vier None
149 $ hg debugwireargs --ssh "\"$PYTHON\" $TESTDIR/dummyssh" ssh://user@dummy/repo eins zwei
149 $ hg debugwireargs ssh://user@dummy/repo eins zwei
150 eins zwei None None None
150 eins zwei None None None
151 $ hg debugwireargs --ssh "\"$PYTHON\" $TESTDIR/dummyssh" ssh://user@dummy/repo eins zwei --five fuenf
151 $ hg debugwireargs ssh://user@dummy/repo eins zwei --five fuenf
152 eins zwei None None None
152 eins zwei None None None
153
153
154 Explicitly kill daemons to let the test exit on Windows
154 Explicitly kill daemons to let the test exit on Windows
@@ -1,69 +0,0 b''
1 # dirstatenonnormalcheck.py - extension to check the consistency of the
2 # dirstate's non-normal map
3 #
4 # For most operations on dirstate, this extensions checks that the nonnormalset
5 # contains the right entries.
6 # It compares the nonnormal file to a nonnormalset built from the map of all
7 # the files in the dirstate to check that they contain the same files.
8
9 from __future__ import absolute_import
10
11 from mercurial import (
12 dirstate,
13 extensions,
14 pycompat,
15 )
16
17
18 def nonnormalentries(dmap):
19 """Compute nonnormal entries from dirstate's dmap"""
20 res = set()
21 for f, e in dmap.iteritems():
22 if e.state != b'n' or e.mtime == -1:
23 res.add(f)
24 return res
25
26
27 def checkconsistency(ui, orig, dmap, _nonnormalset, label):
28 """Compute nonnormalset from dmap, check that it matches _nonnormalset"""
29 nonnormalcomputedmap = nonnormalentries(dmap)
30 if _nonnormalset != nonnormalcomputedmap:
31 b_orig = pycompat.sysbytes(repr(orig))
32 ui.develwarn(b"%s call to %s\n" % (label, b_orig), config=b'dirstate')
33 ui.develwarn(b"inconsistency in nonnormalset\n", config=b'dirstate')
34 b_nonnormal = pycompat.sysbytes(repr(_nonnormalset))
35 ui.develwarn(b"[nonnormalset] %s\n" % b_nonnormal, config=b'dirstate')
36 b_nonnormalcomputed = pycompat.sysbytes(repr(nonnormalcomputedmap))
37 ui.develwarn(b"[map] %s\n" % b_nonnormalcomputed, config=b'dirstate')
38
39
40 def _checkdirstate(orig, self, *args, **kwargs):
41 """Check nonnormal set consistency before and after the call to orig"""
42 checkconsistency(
43 self._ui, orig, self._map, self._map.nonnormalset, b"before"
44 )
45 r = orig(self, *args, **kwargs)
46 checkconsistency(
47 self._ui, orig, self._map, self._map.nonnormalset, b"after"
48 )
49 return r
50
51
52 def extsetup(ui):
53 """Wrap functions modifying dirstate to check nonnormalset consistency"""
54 dirstatecl = dirstate.dirstate
55 devel = ui.configbool(b'devel', b'all-warnings')
56 paranoid = ui.configbool(b'experimental', b'nonnormalparanoidcheck')
57 if devel:
58 extensions.wrapfunction(dirstatecl, '_writedirstate', _checkdirstate)
59 if paranoid:
60 # We don't do all these checks when paranoid is disable as it would
61 # make the extension run very slowly on large repos
62 extensions.wrapfunction(dirstatecl, 'normallookup', _checkdirstate)
63 extensions.wrapfunction(dirstatecl, 'otherparent', _checkdirstate)
64 extensions.wrapfunction(dirstatecl, 'normal', _checkdirstate)
65 extensions.wrapfunction(dirstatecl, 'write', _checkdirstate)
66 extensions.wrapfunction(dirstatecl, 'add', _checkdirstate)
67 extensions.wrapfunction(dirstatecl, 'remove', _checkdirstate)
68 extensions.wrapfunction(dirstatecl, 'merge', _checkdirstate)
69 extensions.wrapfunction(dirstatecl, 'drop', _checkdirstate)
@@ -1,494 +0,0 b''
1 // dirstate_map.rs
2 //
3 // Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
4 //
5 // This software may be used and distributed according to the terms of the
6 // GNU General Public License version 2 or any later version.
7
8 use crate::dirstate::parsers::Timestamp;
9 use crate::{
10 dirstate::EntryState,
11 dirstate::MTIME_UNSET,
12 dirstate::SIZE_FROM_OTHER_PARENT,
13 dirstate::SIZE_NON_NORMAL,
14 dirstate::V1_RANGEMASK,
15 pack_dirstate, parse_dirstate,
16 utils::hg_path::{HgPath, HgPathBuf},
17 CopyMap, DirsMultiset, DirstateEntry, DirstateError, DirstateParents,
18 StateMap,
19 };
20 use micro_timer::timed;
21 use std::collections::HashSet;
22 use std::iter::FromIterator;
23 use std::ops::Deref;
24
25 #[derive(Default)]
26 pub struct DirstateMap {
27 state_map: StateMap,
28 pub copy_map: CopyMap,
29 pub dirs: Option<DirsMultiset>,
30 pub all_dirs: Option<DirsMultiset>,
31 non_normal_set: Option<HashSet<HgPathBuf>>,
32 other_parent_set: Option<HashSet<HgPathBuf>>,
33 }
34
35 /// Should only really be used in python interface code, for clarity
36 impl Deref for DirstateMap {
37 type Target = StateMap;
38
39 fn deref(&self) -> &Self::Target {
40 &self.state_map
41 }
42 }
43
44 impl FromIterator<(HgPathBuf, DirstateEntry)> for DirstateMap {
45 fn from_iter<I: IntoIterator<Item = (HgPathBuf, DirstateEntry)>>(
46 iter: I,
47 ) -> Self {
48 Self {
49 state_map: iter.into_iter().collect(),
50 ..Self::default()
51 }
52 }
53 }
54
55 impl DirstateMap {
56 pub fn new() -> Self {
57 Self::default()
58 }
59
60 pub fn clear(&mut self) {
61 self.state_map = StateMap::default();
62 self.copy_map.clear();
63 self.non_normal_set = None;
64 self.other_parent_set = None;
65 }
66
67 pub fn set_v1_inner(&mut self, filename: &HgPath, entry: DirstateEntry) {
68 self.state_map.insert(filename.to_owned(), entry);
69 }
70
71 /// Add a tracked file to the dirstate
72 pub fn add_file(
73 &mut self,
74 filename: &HgPath,
75 entry: DirstateEntry,
76 // XXX once the dust settle this should probably become an enum
77 added: bool,
78 merged: bool,
79 from_p2: bool,
80 possibly_dirty: bool,
81 ) -> Result<(), DirstateError> {
82 let mut entry = entry;
83 if added {
84 assert!(!merged);
85 assert!(!possibly_dirty);
86 assert!(!from_p2);
87 entry.state = EntryState::Added;
88 entry.size = SIZE_NON_NORMAL;
89 entry.mtime = MTIME_UNSET;
90 } else if merged {
91 assert!(!possibly_dirty);
92 assert!(!from_p2);
93 entry.state = EntryState::Merged;
94 entry.size = SIZE_FROM_OTHER_PARENT;
95 entry.mtime = MTIME_UNSET;
96 } else if from_p2 {
97 assert!(!possibly_dirty);
98 entry.state = EntryState::Normal;
99 entry.size = SIZE_FROM_OTHER_PARENT;
100 entry.mtime = MTIME_UNSET;
101 } else if possibly_dirty {
102 entry.state = EntryState::Normal;
103 entry.size = SIZE_NON_NORMAL;
104 entry.mtime = MTIME_UNSET;
105 } else {
106 entry.state = EntryState::Normal;
107 entry.size = entry.size & V1_RANGEMASK;
108 entry.mtime = entry.mtime & V1_RANGEMASK;
109 }
110 let old_state = match self.get(filename) {
111 Some(e) => e.state,
112 None => EntryState::Unknown,
113 };
114 if old_state == EntryState::Unknown || old_state == EntryState::Removed
115 {
116 if let Some(ref mut dirs) = self.dirs {
117 dirs.add_path(filename)?;
118 }
119 }
120 if old_state == EntryState::Unknown {
121 if let Some(ref mut all_dirs) = self.all_dirs {
122 all_dirs.add_path(filename)?;
123 }
124 }
125 self.state_map.insert(filename.to_owned(), entry.to_owned());
126
127 if entry.is_non_normal() {
128 self.get_non_normal_other_parent_entries()
129 .0
130 .insert(filename.to_owned());
131 }
132
133 if entry.is_from_other_parent() {
134 self.get_non_normal_other_parent_entries()
135 .1
136 .insert(filename.to_owned());
137 }
138 Ok(())
139 }
140
141 /// Mark a file as removed in the dirstate.
142 ///
143 /// The `size` parameter is used to store sentinel values that indicate
144 /// the file's previous state. In the future, we should refactor this
145 /// to be more explicit about what that state is.
146 pub fn remove_file(
147 &mut self,
148 filename: &HgPath,
149 in_merge: bool,
150 ) -> Result<(), DirstateError> {
151 let old_entry_opt = self.get(filename);
152 let old_state = match old_entry_opt {
153 Some(e) => e.state,
154 None => EntryState::Unknown,
155 };
156 let mut size = 0;
157 if in_merge {
158 // XXX we should not be able to have 'm' state and 'FROM_P2' if not
159 // during a merge. So I (marmoute) am not sure we need the
160 // conditionnal at all. Adding double checking this with assert
161 // would be nice.
162 if let Some(old_entry) = old_entry_opt {
163 // backup the previous state
164 if old_entry.state == EntryState::Merged {
165 size = SIZE_NON_NORMAL;
166 } else if old_entry.state == EntryState::Normal
167 && old_entry.size == SIZE_FROM_OTHER_PARENT
168 {
169 // other parent
170 size = SIZE_FROM_OTHER_PARENT;
171 self.get_non_normal_other_parent_entries()
172 .1
173 .insert(filename.to_owned());
174 }
175 }
176 }
177 if old_state != EntryState::Unknown && old_state != EntryState::Removed
178 {
179 if let Some(ref mut dirs) = self.dirs {
180 dirs.delete_path(filename)?;
181 }
182 }
183 if old_state == EntryState::Unknown {
184 if let Some(ref mut all_dirs) = self.all_dirs {
185 all_dirs.add_path(filename)?;
186 }
187 }
188 if size == 0 {
189 self.copy_map.remove(filename);
190 }
191
192 self.state_map.insert(
193 filename.to_owned(),
194 DirstateEntry {
195 state: EntryState::Removed,
196 mode: 0,
197 size,
198 mtime: 0,
199 },
200 );
201 self.get_non_normal_other_parent_entries()
202 .0
203 .insert(filename.to_owned());
204 Ok(())
205 }
206
207 /// Remove a file from the dirstate.
208 /// Returns `true` if the file was previously recorded.
209 pub fn drop_file(
210 &mut self,
211 filename: &HgPath,
212 ) -> Result<bool, DirstateError> {
213 let old_state = match self.get(filename) {
214 Some(e) => e.state,
215 None => EntryState::Unknown,
216 };
217 let exists = self.state_map.remove(filename).is_some();
218
219 if exists {
220 if old_state != EntryState::Removed {
221 if let Some(ref mut dirs) = self.dirs {
222 dirs.delete_path(filename)?;
223 }
224 }
225 if let Some(ref mut all_dirs) = self.all_dirs {
226 all_dirs.delete_path(filename)?;
227 }
228 }
229 self.get_non_normal_other_parent_entries()
230 .0
231 .remove(filename);
232
233 Ok(exists)
234 }
235
236 pub fn clear_ambiguous_times(
237 &mut self,
238 filenames: Vec<HgPathBuf>,
239 now: i32,
240 ) {
241 for filename in filenames {
242 if let Some(entry) = self.state_map.get_mut(&filename) {
243 if entry.clear_ambiguous_mtime(now) {
244 self.get_non_normal_other_parent_entries()
245 .0
246 .insert(filename.to_owned());
247 }
248 }
249 }
250 }
251
252 pub fn non_normal_entries_remove(
253 &mut self,
254 key: impl AsRef<HgPath>,
255 ) -> bool {
256 self.get_non_normal_other_parent_entries()
257 .0
258 .remove(key.as_ref())
259 }
260
261 pub fn non_normal_entries_add(&mut self, key: impl AsRef<HgPath>) {
262 self.get_non_normal_other_parent_entries()
263 .0
264 .insert(key.as_ref().into());
265 }
266
267 pub fn non_normal_entries_union(
268 &mut self,
269 other: HashSet<HgPathBuf>,
270 ) -> Vec<HgPathBuf> {
271 self.get_non_normal_other_parent_entries()
272 .0
273 .union(&other)
274 .map(ToOwned::to_owned)
275 .collect()
276 }
277
278 pub fn get_non_normal_other_parent_entries(
279 &mut self,
280 ) -> (&mut HashSet<HgPathBuf>, &mut HashSet<HgPathBuf>) {
281 self.set_non_normal_other_parent_entries(false);
282 (
283 self.non_normal_set.as_mut().unwrap(),
284 self.other_parent_set.as_mut().unwrap(),
285 )
286 }
287
288 /// Useful to get immutable references to those sets in contexts where
289 /// you only have an immutable reference to the `DirstateMap`, like when
290 /// sharing references with Python.
291 ///
292 /// TODO, get rid of this along with the other "setter/getter" stuff when
293 /// a nice typestate plan is defined.
294 ///
295 /// # Panics
296 ///
297 /// Will panic if either set is `None`.
298 pub fn get_non_normal_other_parent_entries_panic(
299 &self,
300 ) -> (&HashSet<HgPathBuf>, &HashSet<HgPathBuf>) {
301 (
302 self.non_normal_set.as_ref().unwrap(),
303 self.other_parent_set.as_ref().unwrap(),
304 )
305 }
306
307 pub fn set_non_normal_other_parent_entries(&mut self, force: bool) {
308 if !force
309 && self.non_normal_set.is_some()
310 && self.other_parent_set.is_some()
311 {
312 return;
313 }
314 let mut non_normal = HashSet::new();
315 let mut other_parent = HashSet::new();
316
317 for (filename, entry) in self.state_map.iter() {
318 if entry.is_non_normal() {
319 non_normal.insert(filename.to_owned());
320 }
321 if entry.is_from_other_parent() {
322 other_parent.insert(filename.to_owned());
323 }
324 }
325 self.non_normal_set = Some(non_normal);
326 self.other_parent_set = Some(other_parent);
327 }
328
329 /// Both of these setters and their uses appear to be the simplest way to
330 /// emulate a Python lazy property, but it is ugly and unidiomatic.
331 /// TODO One day, rewriting this struct using the typestate might be a
332 /// good idea.
333 pub fn set_all_dirs(&mut self) -> Result<(), DirstateError> {
334 if self.all_dirs.is_none() {
335 self.all_dirs = Some(DirsMultiset::from_dirstate(
336 self.state_map.iter().map(|(k, v)| Ok((k, *v))),
337 None,
338 )?);
339 }
340 Ok(())
341 }
342
343 pub fn set_dirs(&mut self) -> Result<(), DirstateError> {
344 if self.dirs.is_none() {
345 self.dirs = Some(DirsMultiset::from_dirstate(
346 self.state_map.iter().map(|(k, v)| Ok((k, *v))),
347 Some(EntryState::Removed),
348 )?);
349 }
350 Ok(())
351 }
352
353 pub fn has_tracked_dir(
354 &mut self,
355 directory: &HgPath,
356 ) -> Result<bool, DirstateError> {
357 self.set_dirs()?;
358 Ok(self.dirs.as_ref().unwrap().contains(directory))
359 }
360
361 pub fn has_dir(
362 &mut self,
363 directory: &HgPath,
364 ) -> Result<bool, DirstateError> {
365 self.set_all_dirs()?;
366 Ok(self.all_dirs.as_ref().unwrap().contains(directory))
367 }
368
369 #[timed]
370 pub fn read(
371 &mut self,
372 file_contents: &[u8],
373 ) -> Result<Option<DirstateParents>, DirstateError> {
374 if file_contents.is_empty() {
375 return Ok(None);
376 }
377
378 let (parents, entries, copies) = parse_dirstate(file_contents)?;
379 self.state_map.extend(
380 entries
381 .into_iter()
382 .map(|(path, entry)| (path.to_owned(), entry)),
383 );
384 self.copy_map.extend(
385 copies
386 .into_iter()
387 .map(|(path, copy)| (path.to_owned(), copy.to_owned())),
388 );
389 Ok(Some(parents.clone()))
390 }
391
392 pub fn pack(
393 &mut self,
394 parents: DirstateParents,
395 now: Timestamp,
396 ) -> Result<Vec<u8>, DirstateError> {
397 let packed =
398 pack_dirstate(&mut self.state_map, &self.copy_map, parents, now)?;
399
400 self.set_non_normal_other_parent_entries(true);
401 Ok(packed)
402 }
403 }
404
405 #[cfg(test)]
406 mod tests {
407 use super::*;
408
409 #[test]
410 fn test_dirs_multiset() {
411 let mut map = DirstateMap::new();
412 assert!(map.dirs.is_none());
413 assert!(map.all_dirs.is_none());
414
415 assert_eq!(map.has_dir(HgPath::new(b"nope")).unwrap(), false);
416 assert!(map.all_dirs.is_some());
417 assert!(map.dirs.is_none());
418
419 assert_eq!(map.has_tracked_dir(HgPath::new(b"nope")).unwrap(), false);
420 assert!(map.dirs.is_some());
421 }
422
423 #[test]
424 fn test_add_file() {
425 let mut map = DirstateMap::new();
426
427 assert_eq!(0, map.len());
428
429 map.add_file(
430 HgPath::new(b"meh"),
431 DirstateEntry {
432 state: EntryState::Normal,
433 mode: 1337,
434 mtime: 1337,
435 size: 1337,
436 },
437 false,
438 false,
439 false,
440 false,
441 )
442 .unwrap();
443
444 assert_eq!(1, map.len());
445 assert_eq!(0, map.get_non_normal_other_parent_entries().0.len());
446 assert_eq!(0, map.get_non_normal_other_parent_entries().1.len());
447 }
448
449 #[test]
450 fn test_non_normal_other_parent_entries() {
451 let mut map: DirstateMap = [
452 (b"f1", (EntryState::Removed, 1337, 1337, 1337)),
453 (b"f2", (EntryState::Normal, 1337, 1337, -1)),
454 (b"f3", (EntryState::Normal, 1337, 1337, 1337)),
455 (b"f4", (EntryState::Normal, 1337, -2, 1337)),
456 (b"f5", (EntryState::Added, 1337, 1337, 1337)),
457 (b"f6", (EntryState::Added, 1337, 1337, -1)),
458 (b"f7", (EntryState::Merged, 1337, 1337, -1)),
459 (b"f8", (EntryState::Merged, 1337, 1337, 1337)),
460 (b"f9", (EntryState::Merged, 1337, -2, 1337)),
461 (b"fa", (EntryState::Added, 1337, -2, 1337)),
462 (b"fb", (EntryState::Removed, 1337, -2, 1337)),
463 ]
464 .iter()
465 .map(|(fname, (state, mode, size, mtime))| {
466 (
467 HgPathBuf::from_bytes(fname.as_ref()),
468 DirstateEntry {
469 state: *state,
470 mode: *mode,
471 size: *size,
472 mtime: *mtime,
473 },
474 )
475 })
476 .collect();
477
478 let mut non_normal = [
479 b"f1", b"f2", b"f5", b"f6", b"f7", b"f8", b"f9", b"fa", b"fb",
480 ]
481 .iter()
482 .map(|x| HgPathBuf::from_bytes(x.as_ref()))
483 .collect();
484
485 let mut other_parent = HashSet::new();
486 other_parent.insert(HgPathBuf::from_bytes(b"f4"));
487 let entries = map.get_non_normal_other_parent_entries();
488
489 assert_eq!(
490 (&mut non_normal, &mut other_parent),
491 (entries.0, entries.1)
492 );
493 }
494 }
This diff has been collapsed as it changes many lines, (556 lines changed) Show them Hide them
@@ -1,556 +0,0 b''
1 use std::path::PathBuf;
2
3 use crate::dirstate::parsers::Timestamp;
4 use crate::dirstate_tree::on_disk::DirstateV2ParseError;
5 use crate::matchers::Matcher;
6 use crate::utils::hg_path::{HgPath, HgPathBuf};
7 use crate::CopyMapIter;
8 use crate::DirstateEntry;
9 use crate::DirstateError;
10 use crate::DirstateMap;
11 use crate::DirstateParents;
12 use crate::DirstateStatus;
13 use crate::PatternFileWarning;
14 use crate::StateMapIter;
15 use crate::StatusError;
16 use crate::StatusOptions;
17
18 /// `rust/hg-cpython/src/dirstate/dirstate_map.rs` implements in Rust a
19 /// `DirstateMap` Python class that wraps `Box<dyn DirstateMapMethods + Send>`,
20 /// a trait object of this trait. Except for constructors, this trait defines
21 /// all APIs that the class needs to interact with its inner dirstate map.
22 ///
23 /// A trait object is used to support two different concrete types:
24 ///
25 /// * `rust/hg-core/src/dirstate/dirstate_map.rs` defines the "flat dirstate
26 /// map" which is based on a few large `HgPath`-keyed `HashMap` and `HashSet`
27 /// fields.
28 /// * `rust/hg-core/src/dirstate_tree/dirstate_map.rs` defines the "tree
29 /// dirstate map" based on a tree data struture with nodes for directories
30 /// containing child nodes for their files and sub-directories. This tree
31 /// enables a more efficient algorithm for `hg status`, but its details are
32 /// abstracted in this trait.
33 ///
34 /// The dirstate map associates paths of files in the working directory to
35 /// various information about the state of those files.
36 pub trait DirstateMapMethods {
37 /// Remove information about all files in this map
38 fn clear(&mut self);
39
40 fn set_v1(&mut self, filename: &HgPath, entry: DirstateEntry);
41
42 /// Add or change the information associated to a given file.
43 ///
44 /// `old_state` is the state in the entry that `get` would have returned
45 /// before this call, or `EntryState::Unknown` if there was no such entry.
46 ///
47 /// `entry.state` should never be `EntryState::Unknown`.
48 fn add_file(
49 &mut self,
50 filename: &HgPath,
51 entry: DirstateEntry,
52 added: bool,
53 merged: bool,
54 from_p2: bool,
55 possibly_dirty: bool,
56 ) -> Result<(), DirstateError>;
57
58 /// Mark a file as "removed" (as in `hg rm`).
59 ///
60 /// `old_state` is the state in the entry that `get` would have returned
61 /// before this call, or `EntryState::Unknown` if there was no such entry.
62 ///
63 /// `size` is not actually a size but the 0 or -1 or -2 value that would be
64 /// put in the size field in the dirstate-v1Β format.
65 fn remove_file(
66 &mut self,
67 filename: &HgPath,
68 in_merge: bool,
69 ) -> Result<(), DirstateError>;
70
71 /// Drop information about this file from the map if any, and return
72 /// whether there was any.
73 ///
74 /// `get` will now return `None` for this filename.
75 ///
76 /// `old_state` is the state in the entry that `get` would have returned
77 /// before this call, or `EntryState::Unknown` if there was no such entry.
78 fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError>;
79
80 /// Among given files, mark the stored `mtime` as ambiguous if there is one
81 /// (if `state == EntryState::Normal`) equal to the given current Unix
82 /// timestamp.
83 fn clear_ambiguous_times(
84 &mut self,
85 filenames: Vec<HgPathBuf>,
86 now: i32,
87 ) -> Result<(), DirstateV2ParseError>;
88
89 /// Return whether the map has an "non-normal" entry for the given
90 /// filename. That is, any entry with a `state` other than
91 /// `EntryState::Normal` or with an ambiguous `mtime`.
92 fn non_normal_entries_contains(
93 &mut self,
94 key: &HgPath,
95 ) -> Result<bool, DirstateV2ParseError>;
96
97 /// Mark the given path as "normal" file. This is only relevant in the flat
98 /// dirstate map where there is a separate `HashSet` that needs to be kept
99 /// up to date.
100 /// Returns whether the key was present in the set.
101 fn non_normal_entries_remove(&mut self, key: &HgPath) -> bool;
102
103 /// Mark the given path as "non-normal" file.
104 /// This is only relevant in the flat dirstate map where there is a
105 /// separate `HashSet` that needs to be kept up to date.
106 fn non_normal_entries_add(&mut self, key: &HgPath);
107
108 /// Return an iterator of paths whose respective entry are either
109 /// "non-normal" (see `non_normal_entries_contains`) or "from other
110 /// parent".
111 ///
112 /// If that information is cached, create the cache as needed.
113 ///
114 /// "From other parent" is defined as `state == Normal && size == -2`.
115 ///
116 /// Because parse errors can happen during iteration, the iterated items
117 /// are `Result`s.
118 fn non_normal_or_other_parent_paths(
119 &mut self,
120 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>;
121
122 /// Create the cache for `non_normal_or_other_parent_paths` if needed.
123 ///
124 /// If `force` is true, the cache is re-created even if it already exists.
125 fn set_non_normal_other_parent_entries(&mut self, force: bool);
126
127 /// Return an iterator of paths whose respective entry are "non-normal"
128 /// (see `non_normal_entries_contains`).
129 ///
130 /// If that information is cached, create the cache as needed.
131 ///
132 /// Because parse errors can happen during iteration, the iterated items
133 /// are `Result`s.
134 fn iter_non_normal_paths(
135 &mut self,
136 ) -> Box<
137 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
138 >;
139
140 /// Same as `iter_non_normal_paths`, but takes `&self` instead of `&mut
141 /// self`.
142 ///
143 /// Panics if a cache is necessary but does not exist yet.
144 fn iter_non_normal_paths_panic(
145 &self,
146 ) -> Box<
147 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
148 >;
149
150 /// Return an iterator of paths whose respective entry are "from other
151 /// parent".
152 ///
153 /// If that information is cached, create the cache as needed.
154 ///
155 /// "From other parent" is defined as `state == Normal && size == -2`.
156 ///
157 /// Because parse errors can happen during iteration, the iterated items
158 /// are `Result`s.
159 fn iter_other_parent_paths(
160 &mut self,
161 ) -> Box<
162 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
163 >;
164
165 /// Returns whether the sub-tree rooted at the given directory contains any
166 /// tracked file.
167 ///
168 /// A file is tracked if it has a `state` other than `EntryState::Removed`.
169 fn has_tracked_dir(
170 &mut self,
171 directory: &HgPath,
172 ) -> Result<bool, DirstateError>;
173
174 /// Returns whether the sub-tree rooted at the given directory contains any
175 /// file with a dirstate entry.
176 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError>;
177
178 /// Clear mtimes that are ambigous with `now` (similar to
179 /// `clear_ambiguous_times` but for all files in the dirstate map), and
180 /// serialize bytes to write the `.hg/dirstate` file to disk in dirstate-v1
181 /// format.
182 fn pack_v1(
183 &mut self,
184 parents: DirstateParents,
185 now: Timestamp,
186 ) -> Result<Vec<u8>, DirstateError>;
187
188 /// Clear mtimes that are ambigous with `now` (similar to
189 /// `clear_ambiguous_times` but for all files in the dirstate map), and
190 /// serialize bytes to write a dirstate data file to disk in dirstate-v2
191 /// format.
192 ///
193 /// Returns new data and metadata together with whether that data should be
194 /// appended to the existing data file whose content is at
195 /// `self.on_disk` (true), instead of written to a new data file
196 /// (false).
197 ///
198 /// Note: this is only supported by the tree dirstate map.
199 fn pack_v2(
200 &mut self,
201 now: Timestamp,
202 can_append: bool,
203 ) -> Result<(Vec<u8>, Vec<u8>, bool), DirstateError>;
204
205 /// Run the status algorithm.
206 ///
207 /// This is not sematically a method of the dirstate map, but a different
208 /// algorithm is used for the flat v.s. tree dirstate map so having it in
209 /// this trait enables the same dynamic dispatch as with other methods.
210 fn status<'a>(
211 &'a mut self,
212 matcher: &'a (dyn Matcher + Sync),
213 root_dir: PathBuf,
214 ignore_files: Vec<PathBuf>,
215 options: StatusOptions,
216 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>;
217
218 /// Returns how many files in the dirstate map have a recorded copy source.
219 fn copy_map_len(&self) -> usize;
220
221 /// Returns an iterator of `(path, copy_source)` for all files that have a
222 /// copy source.
223 fn copy_map_iter(&self) -> CopyMapIter<'_>;
224
225 /// Returns whether the givef file has a copy source.
226 fn copy_map_contains_key(
227 &self,
228 key: &HgPath,
229 ) -> Result<bool, DirstateV2ParseError>;
230
231 /// Returns the copy source for the given file.
232 fn copy_map_get(
233 &self,
234 key: &HgPath,
235 ) -> Result<Option<&HgPath>, DirstateV2ParseError>;
236
237 /// Removes the recorded copy source if any for the given file, and returns
238 /// it.
239 fn copy_map_remove(
240 &mut self,
241 key: &HgPath,
242 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError>;
243
244 /// Set the given `value` copy source for the given `key` file.
245 fn copy_map_insert(
246 &mut self,
247 key: HgPathBuf,
248 value: HgPathBuf,
249 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError>;
250
251 /// Returns the number of files that have an entry.
252 fn len(&self) -> usize;
253
254 /// Returns whether the given file has an entry.
255 fn contains_key(&self, key: &HgPath)
256 -> Result<bool, DirstateV2ParseError>;
257
258 /// Returns the entry, if any, for the given file.
259 fn get(
260 &self,
261 key: &HgPath,
262 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError>;
263
264 /// Returns a `(path, entry)` iterator of files that have an entry.
265 ///
266 /// Because parse errors can happen during iteration, the iterated items
267 /// are `Result`s.
268 fn iter(&self) -> StateMapIter<'_>;
269
270 /// Returns an iterator of tracked directories.
271 ///
272 /// This is the paths for which `has_tracked_dir` would return true.
273 /// Or, in other words, the union of ancestor paths of all paths that have
274 /// an associated entry in a "tracked" state in this dirstate map.
275 ///
276 /// Because parse errors can happen during iteration, the iterated items
277 /// are `Result`s.
278 fn iter_tracked_dirs(
279 &mut self,
280 ) -> Result<
281 Box<
282 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>>
283 + Send
284 + '_,
285 >,
286 DirstateError,
287 >;
288
289 /// Return an iterator of `(path, (state, mode, size, mtime))` for every
290 /// node stored in this dirstate map, for the purpose of the `hg
291 /// debugdirstate` command.
292 ///
293 /// For nodes that don’t have an entry, `state` is the ASCII space.
294 /// An `mtime` may still be present. It is used to optimize `status`.
295 ///
296 /// Because parse errors can happen during iteration, the iterated items
297 /// are `Result`s.
298 fn debug_iter(
299 &self,
300 ) -> Box<
301 dyn Iterator<
302 Item = Result<
303 (&HgPath, (u8, i32, i32, i32)),
304 DirstateV2ParseError,
305 >,
306 > + Send
307 + '_,
308 >;
309 }
310
311 impl DirstateMapMethods for DirstateMap {
312 fn clear(&mut self) {
313 self.clear()
314 }
315
316 /// Used to set a value directory.
317 ///
318 /// XXX Is temporary during a refactor of V1 dirstate and will disappear
319 /// shortly.
320 fn set_v1(&mut self, filename: &HgPath, entry: DirstateEntry) {
321 self.set_v1_inner(&filename, entry)
322 }
323
324 fn add_file(
325 &mut self,
326 filename: &HgPath,
327 entry: DirstateEntry,
328 added: bool,
329 merged: bool,
330 from_p2: bool,
331 possibly_dirty: bool,
332 ) -> Result<(), DirstateError> {
333 self.add_file(filename, entry, added, merged, from_p2, possibly_dirty)
334 }
335
336 fn remove_file(
337 &mut self,
338 filename: &HgPath,
339 in_merge: bool,
340 ) -> Result<(), DirstateError> {
341 self.remove_file(filename, in_merge)
342 }
343
344 fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError> {
345 self.drop_file(filename)
346 }
347
348 fn clear_ambiguous_times(
349 &mut self,
350 filenames: Vec<HgPathBuf>,
351 now: i32,
352 ) -> Result<(), DirstateV2ParseError> {
353 Ok(self.clear_ambiguous_times(filenames, now))
354 }
355
356 fn non_normal_entries_contains(
357 &mut self,
358 key: &HgPath,
359 ) -> Result<bool, DirstateV2ParseError> {
360 let (non_normal, _other_parent) =
361 self.get_non_normal_other_parent_entries();
362 Ok(non_normal.contains(key))
363 }
364
365 fn non_normal_entries_remove(&mut self, key: &HgPath) -> bool {
366 self.non_normal_entries_remove(key)
367 }
368
369 fn non_normal_entries_add(&mut self, key: &HgPath) {
370 self.non_normal_entries_add(key)
371 }
372
373 fn non_normal_or_other_parent_paths(
374 &mut self,
375 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
376 {
377 let (non_normal, other_parent) =
378 self.get_non_normal_other_parent_entries();
379 Box::new(non_normal.union(other_parent).map(|p| Ok(&**p)))
380 }
381
382 fn set_non_normal_other_parent_entries(&mut self, force: bool) {
383 self.set_non_normal_other_parent_entries(force)
384 }
385
386 fn iter_non_normal_paths(
387 &mut self,
388 ) -> Box<
389 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
390 > {
391 let (non_normal, _other_parent) =
392 self.get_non_normal_other_parent_entries();
393 Box::new(non_normal.iter().map(|p| Ok(&**p)))
394 }
395
396 fn iter_non_normal_paths_panic(
397 &self,
398 ) -> Box<
399 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
400 > {
401 let (non_normal, _other_parent) =
402 self.get_non_normal_other_parent_entries_panic();
403 Box::new(non_normal.iter().map(|p| Ok(&**p)))
404 }
405
406 fn iter_other_parent_paths(
407 &mut self,
408 ) -> Box<
409 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
410 > {
411 let (_non_normal, other_parent) =
412 self.get_non_normal_other_parent_entries();
413 Box::new(other_parent.iter().map(|p| Ok(&**p)))
414 }
415
416 fn has_tracked_dir(
417 &mut self,
418 directory: &HgPath,
419 ) -> Result<bool, DirstateError> {
420 self.has_tracked_dir(directory)
421 }
422
423 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
424 self.has_dir(directory)
425 }
426
427 fn pack_v1(
428 &mut self,
429 parents: DirstateParents,
430 now: Timestamp,
431 ) -> Result<Vec<u8>, DirstateError> {
432 self.pack(parents, now)
433 }
434
435 fn pack_v2(
436 &mut self,
437 _now: Timestamp,
438 _can_append: bool,
439 ) -> Result<(Vec<u8>, Vec<u8>, bool), DirstateError> {
440 panic!(
441 "should have used dirstate_tree::DirstateMap to use the v2 format"
442 )
443 }
444
445 fn status<'a>(
446 &'a mut self,
447 matcher: &'a (dyn Matcher + Sync),
448 root_dir: PathBuf,
449 ignore_files: Vec<PathBuf>,
450 options: StatusOptions,
451 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
452 {
453 crate::status(self, matcher, root_dir, ignore_files, options)
454 }
455
456 fn copy_map_len(&self) -> usize {
457 self.copy_map.len()
458 }
459
460 fn copy_map_iter(&self) -> CopyMapIter<'_> {
461 Box::new(
462 self.copy_map
463 .iter()
464 .map(|(key, value)| Ok((&**key, &**value))),
465 )
466 }
467
468 fn copy_map_contains_key(
469 &self,
470 key: &HgPath,
471 ) -> Result<bool, DirstateV2ParseError> {
472 Ok(self.copy_map.contains_key(key))
473 }
474
475 fn copy_map_get(
476 &self,
477 key: &HgPath,
478 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
479 Ok(self.copy_map.get(key).map(|p| &**p))
480 }
481
482 fn copy_map_remove(
483 &mut self,
484 key: &HgPath,
485 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
486 Ok(self.copy_map.remove(key))
487 }
488
489 fn copy_map_insert(
490 &mut self,
491 key: HgPathBuf,
492 value: HgPathBuf,
493 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
494 Ok(self.copy_map.insert(key, value))
495 }
496
497 fn len(&self) -> usize {
498 (&**self).len()
499 }
500
501 fn contains_key(
502 &self,
503 key: &HgPath,
504 ) -> Result<bool, DirstateV2ParseError> {
505 Ok((&**self).contains_key(key))
506 }
507
508 fn get(
509 &self,
510 key: &HgPath,
511 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
512 Ok((&**self).get(key).cloned())
513 }
514
515 fn iter(&self) -> StateMapIter<'_> {
516 Box::new((&**self).iter().map(|(key, value)| Ok((&**key, *value))))
517 }
518
519 fn iter_tracked_dirs(
520 &mut self,
521 ) -> Result<
522 Box<
523 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>>
524 + Send
525 + '_,
526 >,
527 DirstateError,
528 > {
529 self.set_all_dirs()?;
530 Ok(Box::new(
531 self.all_dirs
532 .as_ref()
533 .unwrap()
534 .iter()
535 .map(|path| Ok(&**path)),
536 ))
537 }
538
539 fn debug_iter(
540 &self,
541 ) -> Box<
542 dyn Iterator<
543 Item = Result<
544 (&HgPath, (u8, i32, i32, i32)),
545 DirstateV2ParseError,
546 >,
547 > + Send
548 + '_,
549 > {
550 Box::new(
551 (&**self)
552 .iter()
553 .map(|(path, entry)| Ok((&**path, entry.debug_tuple()))),
554 )
555 }
556 }
@@ -1,71 +0,0 b''
1 // dirstate_status.rs
2 //
3 // Copyright 2019, Raphaël Gomès <rgomes@octobus.net>
4 //
5 // This software may be used and distributed according to the terms of the
6 // GNU General Public License version 2 or any later version.
7
8 use crate::dirstate::status::{build_response, Dispatch, Status};
9 use crate::matchers::Matcher;
10 use crate::{DirstateStatus, StatusError};
11
12 impl<'a, M: ?Sized + Matcher + Sync> Status<'a, M> {
13 pub(crate) fn run(&self) -> Result<DirstateStatus<'a>, StatusError> {
14 let (traversed_sender, traversed_receiver) =
15 crossbeam_channel::unbounded();
16
17 // Step 1: check the files explicitly mentioned by the user
18 let (work, mut results) = self.walk_explicit(traversed_sender.clone());
19
20 if !work.is_empty() {
21 // Hashmaps are quite a bit slower to build than vecs, so only
22 // build it if needed.
23 let old_results = results.iter().cloned().collect();
24
25 // Step 2: recursively check the working directory for changes if
26 // needed
27 for (dir, dispatch) in work {
28 match dispatch {
29 Dispatch::Directory { was_file } => {
30 if was_file {
31 results.push((dir.to_owned(), Dispatch::Removed));
32 }
33 if self.options.list_ignored
34 || self.options.list_unknown
35 && !self.dir_ignore(&dir)
36 {
37 self.traverse(
38 &dir,
39 &old_results,
40 &mut results,
41 traversed_sender.clone(),
42 );
43 }
44 }
45 _ => {
46 unreachable!("There can only be directories in `work`")
47 }
48 }
49 }
50 }
51
52 if !self.matcher.is_exact() {
53 if self.options.list_unknown {
54 self.handle_unknowns(&mut results);
55 } else {
56 // TODO this is incorrect, see issue6335
57 // This requires a fix in both Python and Rust that can happen
58 // with other pending changes to `status`.
59 self.extend_from_dmap(&mut results);
60 }
61 }
62
63 drop(traversed_sender);
64 let traversed = traversed_receiver
65 .into_iter()
66 .map(std::borrow::Cow::Owned)
67 .collect();
68
69 Ok(build_response(results, traversed))
70 }
71 }
@@ -1,240 +0,0 b''
1 use crate::dirstate::owning::OwningDirstateMap;
2 use hg::dirstate::parsers::Timestamp;
3 use hg::dirstate_tree::dispatch::DirstateMapMethods;
4 use hg::dirstate_tree::on_disk::DirstateV2ParseError;
5 use hg::matchers::Matcher;
6 use hg::utils::hg_path::{HgPath, HgPathBuf};
7 use hg::CopyMapIter;
8 use hg::DirstateEntry;
9 use hg::DirstateError;
10 use hg::DirstateParents;
11 use hg::DirstateStatus;
12 use hg::PatternFileWarning;
13 use hg::StateMapIter;
14 use hg::StatusError;
15 use hg::StatusOptions;
16 use std::path::PathBuf;
17
18 impl DirstateMapMethods for OwningDirstateMap {
19 fn clear(&mut self) {
20 self.get_mut().clear()
21 }
22
23 fn set_v1(&mut self, filename: &HgPath, entry: DirstateEntry) {
24 self.get_mut().set_v1(filename, entry)
25 }
26
27 fn add_file(
28 &mut self,
29 filename: &HgPath,
30 entry: DirstateEntry,
31 added: bool,
32 merged: bool,
33 from_p2: bool,
34 possibly_dirty: bool,
35 ) -> Result<(), DirstateError> {
36 self.get_mut().add_file(
37 filename,
38 entry,
39 added,
40 merged,
41 from_p2,
42 possibly_dirty,
43 )
44 }
45
46 fn remove_file(
47 &mut self,
48 filename: &HgPath,
49 in_merge: bool,
50 ) -> Result<(), DirstateError> {
51 self.get_mut().remove_file(filename, in_merge)
52 }
53
54 fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError> {
55 self.get_mut().drop_file(filename)
56 }
57
58 fn clear_ambiguous_times(
59 &mut self,
60 filenames: Vec<HgPathBuf>,
61 now: i32,
62 ) -> Result<(), DirstateV2ParseError> {
63 self.get_mut().clear_ambiguous_times(filenames, now)
64 }
65
66 fn non_normal_entries_contains(
67 &mut self,
68 key: &HgPath,
69 ) -> Result<bool, DirstateV2ParseError> {
70 self.get_mut().non_normal_entries_contains(key)
71 }
72
73 fn non_normal_entries_remove(&mut self, key: &HgPath) -> bool {
74 self.get_mut().non_normal_entries_remove(key)
75 }
76
77 fn non_normal_entries_add(&mut self, key: &HgPath) {
78 self.get_mut().non_normal_entries_add(key)
79 }
80
81 fn non_normal_or_other_parent_paths(
82 &mut self,
83 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
84 {
85 self.get_mut().non_normal_or_other_parent_paths()
86 }
87
88 fn set_non_normal_other_parent_entries(&mut self, force: bool) {
89 self.get_mut().set_non_normal_other_parent_entries(force)
90 }
91
92 fn iter_non_normal_paths(
93 &mut self,
94 ) -> Box<
95 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
96 > {
97 self.get_mut().iter_non_normal_paths()
98 }
99
100 fn iter_non_normal_paths_panic(
101 &self,
102 ) -> Box<
103 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
104 > {
105 self.get().iter_non_normal_paths_panic()
106 }
107
108 fn iter_other_parent_paths(
109 &mut self,
110 ) -> Box<
111 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
112 > {
113 self.get_mut().iter_other_parent_paths()
114 }
115
116 fn has_tracked_dir(
117 &mut self,
118 directory: &HgPath,
119 ) -> Result<bool, DirstateError> {
120 self.get_mut().has_tracked_dir(directory)
121 }
122
123 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
124 self.get_mut().has_dir(directory)
125 }
126
127 fn pack_v1(
128 &mut self,
129 parents: DirstateParents,
130 now: Timestamp,
131 ) -> Result<Vec<u8>, DirstateError> {
132 self.get_mut().pack_v1(parents, now)
133 }
134
135 fn pack_v2(
136 &mut self,
137 now: Timestamp,
138 can_append: bool,
139 ) -> Result<(Vec<u8>, Vec<u8>, bool), DirstateError> {
140 self.get_mut().pack_v2(now, can_append)
141 }
142
143 fn status<'a>(
144 &'a mut self,
145 matcher: &'a (dyn Matcher + Sync),
146 root_dir: PathBuf,
147 ignore_files: Vec<PathBuf>,
148 options: StatusOptions,
149 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
150 {
151 self.get_mut()
152 .status(matcher, root_dir, ignore_files, options)
153 }
154
155 fn copy_map_len(&self) -> usize {
156 self.get().copy_map_len()
157 }
158
159 fn copy_map_iter(&self) -> CopyMapIter<'_> {
160 self.get().copy_map_iter()
161 }
162
163 fn copy_map_contains_key(
164 &self,
165 key: &HgPath,
166 ) -> Result<bool, DirstateV2ParseError> {
167 self.get().copy_map_contains_key(key)
168 }
169
170 fn copy_map_get(
171 &self,
172 key: &HgPath,
173 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
174 self.get().copy_map_get(key)
175 }
176
177 fn copy_map_remove(
178 &mut self,
179 key: &HgPath,
180 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
181 self.get_mut().copy_map_remove(key)
182 }
183
184 fn copy_map_insert(
185 &mut self,
186 key: HgPathBuf,
187 value: HgPathBuf,
188 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
189 self.get_mut().copy_map_insert(key, value)
190 }
191
192 fn len(&self) -> usize {
193 self.get().len()
194 }
195
196 fn contains_key(
197 &self,
198 key: &HgPath,
199 ) -> Result<bool, DirstateV2ParseError> {
200 self.get().contains_key(key)
201 }
202
203 fn get(
204 &self,
205 key: &HgPath,
206 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
207 self.get().get(key)
208 }
209
210 fn iter(&self) -> StateMapIter<'_> {
211 self.get().iter()
212 }
213
214 fn iter_tracked_dirs(
215 &mut self,
216 ) -> Result<
217 Box<
218 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>>
219 + Send
220 + '_,
221 >,
222 DirstateError,
223 > {
224 self.get_mut().iter_tracked_dirs()
225 }
226
227 fn debug_iter(
228 &self,
229 ) -> Box<
230 dyn Iterator<
231 Item = Result<
232 (&HgPath, (u8, i32, i32, i32)),
233 DirstateV2ParseError,
234 >,
235 > + Send
236 + '_,
237 > {
238 self.get().debug_iter()
239 }
240 }
@@ -1,83 +0,0 b''
1 // non_normal_other_parent_entries.rs
2 //
3 // Copyright 2020 Raphaël Gomès <rgomes@octobus.net>
4 //
5 // This software may be used and distributed according to the terms of the
6 // GNU General Public License version 2 or any later version.
7
8 use cpython::{
9 exc::NotImplementedError, CompareOp, ObjectProtocol, PyBytes, PyClone,
10 PyErr, PyObject, PyResult, PyString, Python, PythonObject, ToPyObject,
11 UnsafePyLeaked,
12 };
13
14 use crate::dirstate::dirstate_map::v2_error;
15 use crate::dirstate::DirstateMap;
16 use hg::dirstate_tree::on_disk::DirstateV2ParseError;
17 use hg::utils::hg_path::HgPath;
18 use std::cell::RefCell;
19
20 py_class!(pub class NonNormalEntries |py| {
21 data dmap: DirstateMap;
22
23 def __contains__(&self, key: PyObject) -> PyResult<bool> {
24 self.dmap(py).non_normal_entries_contains(py, key)
25 }
26 def remove(&self, key: PyObject) -> PyResult<PyObject> {
27 self.dmap(py).non_normal_entries_remove(py, key)
28 }
29 def add(&self, key: PyObject) -> PyResult<PyObject> {
30 self.dmap(py).non_normal_entries_add(py, key)
31 }
32 def discard(&self, key: PyObject) -> PyResult<PyObject> {
33 self.dmap(py).non_normal_entries_discard(py, key)
34 }
35 def __richcmp__(&self, other: PyObject, op: CompareOp) -> PyResult<bool> {
36 match op {
37 CompareOp::Eq => self.is_equal_to(py, other),
38 CompareOp::Ne => Ok(!self.is_equal_to(py, other)?),
39 _ => Err(PyErr::new::<NotImplementedError, _>(py, ""))
40 }
41 }
42 def __repr__(&self) -> PyResult<PyString> {
43 self.dmap(py).non_normal_entries_display(py)
44 }
45
46 def __iter__(&self) -> PyResult<NonNormalEntriesIterator> {
47 self.dmap(py).non_normal_entries_iter(py)
48 }
49 });
50
51 impl NonNormalEntries {
52 pub fn from_inner(py: Python, dm: DirstateMap) -> PyResult<Self> {
53 Self::create_instance(py, dm)
54 }
55
56 fn is_equal_to(&self, py: Python, other: PyObject) -> PyResult<bool> {
57 for item in other.iter(py)? {
58 if !self.dmap(py).non_normal_entries_contains(py, item?)? {
59 return Ok(false);
60 }
61 }
62 Ok(true)
63 }
64
65 fn translate_key(
66 py: Python,
67 key: Result<&HgPath, DirstateV2ParseError>,
68 ) -> PyResult<Option<PyBytes>> {
69 let key = key.map_err(|e| v2_error(py, e))?;
70 Ok(Some(PyBytes::new(py, key.as_bytes())))
71 }
72 }
73
74 type NonNormalEntriesIter<'a> = Box<
75 dyn Iterator<Item = Result<&'a HgPath, DirstateV2ParseError>> + Send + 'a,
76 >;
77
78 py_shared_iterator!(
79 NonNormalEntriesIterator,
80 UnsafePyLeaked<NonNormalEntriesIter<'static>>,
81 NonNormalEntries::translate_key,
82 Option<PyBytes>
83 );
@@ -1,163 +0,0 b''
1 // parsers.rs
2 //
3 // Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
4 //
5 // This software may be used and distributed according to the terms of the
6 // GNU General Public License version 2 or any later version.
7
8 //! Bindings for the `hg::dirstate::parsers` module provided by the
9 //! `hg-core` package.
10 //!
11 //! From Python, this will be seen as `mercurial.rustext.parsers`
12 use cpython::{
13 exc, PyBytes, PyDict, PyErr, PyInt, PyModule, PyResult, PyTuple, Python,
14 PythonObject, ToPyObject,
15 };
16 use hg::{
17 dirstate::parsers::Timestamp, pack_dirstate, parse_dirstate,
18 utils::hg_path::HgPathBuf, DirstateEntry, DirstateParents, FastHashMap,
19 PARENT_SIZE,
20 };
21 use std::convert::TryInto;
22
23 use crate::dirstate::{extract_dirstate, make_dirstate_item};
24
25 fn parse_dirstate_wrapper(
26 py: Python,
27 dmap: PyDict,
28 copymap: PyDict,
29 st: PyBytes,
30 ) -> PyResult<PyTuple> {
31 match parse_dirstate(st.data(py)) {
32 Ok((parents, entries, copies)) => {
33 let dirstate_map: FastHashMap<HgPathBuf, DirstateEntry> = entries
34 .into_iter()
35 .map(|(path, entry)| (path.to_owned(), entry))
36 .collect();
37 let copy_map: FastHashMap<HgPathBuf, HgPathBuf> = copies
38 .into_iter()
39 .map(|(path, copy)| (path.to_owned(), copy.to_owned()))
40 .collect();
41
42 for (filename, entry) in &dirstate_map {
43 dmap.set_item(
44 py,
45 PyBytes::new(py, filename.as_bytes()),
46 make_dirstate_item(py, entry)?,
47 )?;
48 }
49 for (path, copy_path) in copy_map {
50 copymap.set_item(
51 py,
52 PyBytes::new(py, path.as_bytes()),
53 PyBytes::new(py, copy_path.as_bytes()),
54 )?;
55 }
56 Ok(dirstate_parents_to_pytuple(py, parents))
57 }
58 Err(e) => Err(PyErr::new::<exc::ValueError, _>(py, e.to_string())),
59 }
60 }
61
62 fn pack_dirstate_wrapper(
63 py: Python,
64 dmap: PyDict,
65 copymap: PyDict,
66 pl: PyTuple,
67 now: PyInt,
68 ) -> PyResult<PyBytes> {
69 let p1 = pl.get_item(py, 0).extract::<PyBytes>(py)?;
70 let p1: &[u8] = p1.data(py);
71 let p2 = pl.get_item(py, 1).extract::<PyBytes>(py)?;
72 let p2: &[u8] = p2.data(py);
73
74 let mut dirstate_map = extract_dirstate(py, &dmap)?;
75
76 let copies: Result<FastHashMap<HgPathBuf, HgPathBuf>, PyErr> = copymap
77 .items(py)
78 .iter()
79 .map(|(key, value)| {
80 Ok((
81 HgPathBuf::from_bytes(key.extract::<PyBytes>(py)?.data(py)),
82 HgPathBuf::from_bytes(value.extract::<PyBytes>(py)?.data(py)),
83 ))
84 })
85 .collect();
86
87 if p1.len() != PARENT_SIZE || p2.len() != PARENT_SIZE {
88 return Err(PyErr::new::<exc::ValueError, _>(
89 py,
90 "expected a 20-byte hash".to_string(),
91 ));
92 }
93
94 match pack_dirstate(
95 &mut dirstate_map,
96 &copies?,
97 DirstateParents {
98 p1: p1.try_into().unwrap(),
99 p2: p2.try_into().unwrap(),
100 },
101 Timestamp(now.as_object().extract::<i64>(py)?),
102 ) {
103 Ok(packed) => {
104 for (filename, entry) in dirstate_map.iter() {
105 dmap.set_item(
106 py,
107 PyBytes::new(py, filename.as_bytes()),
108 make_dirstate_item(py, &entry)?,
109 )?;
110 }
111 Ok(PyBytes::new(py, &packed))
112 }
113 Err(error) => {
114 Err(PyErr::new::<exc::ValueError, _>(py, error.to_string()))
115 }
116 }
117 }
118
119 /// Create the module, with `__package__` given from parent
120 pub fn init_parsers_module(py: Python, package: &str) -> PyResult<PyModule> {
121 let dotted_name = &format!("{}.parsers", package);
122 let m = PyModule::new(py, dotted_name)?;
123
124 m.add(py, "__package__", package)?;
125 m.add(py, "__doc__", "Parsers - Rust implementation")?;
126
127 m.add(
128 py,
129 "parse_dirstate",
130 py_fn!(
131 py,
132 parse_dirstate_wrapper(dmap: PyDict, copymap: PyDict, st: PyBytes)
133 ),
134 )?;
135 m.add(
136 py,
137 "pack_dirstate",
138 py_fn!(
139 py,
140 pack_dirstate_wrapper(
141 dmap: PyDict,
142 copymap: PyDict,
143 pl: PyTuple,
144 now: PyInt
145 )
146 ),
147 )?;
148
149 let sys = PyModule::import(py, "sys")?;
150 let sys_modules: PyDict = sys.get(py, "modules")?.extract(py)?;
151 sys_modules.set_item(py, dotted_name, &m)?;
152
153 Ok(m)
154 }
155
156 pub(crate) fn dirstate_parents_to_pytuple(
157 py: Python,
158 parents: &DirstateParents,
159 ) -> PyTuple {
160 let p1 = PyBytes::new(py, parents.p1.as_bytes());
161 let p2 = PyBytes::new(py, parents.p2.as_bytes());
162 (p1, p2).to_py_object(py)
163 }
@@ -1,22 +0,0 b''
1 $ cat >> $HGRCPATH << EOF
2 > [command-templates]
3 > log="{rev}:{node|short} ({phase}) [{tags} {bookmarks}] {desc|firstline}\n"
4 > [extensions]
5 > dirstateparanoidcheck = $TESTDIR/../contrib/dirstatenonnormalcheck.py
6 > [experimental]
7 > nonnormalparanoidcheck = True
8 > [devel]
9 > all-warnings=True
10 > EOF
11 $ mkcommit() {
12 > echo "$1" > "$1"
13 > hg add "$1"
14 > hg ci -m "add $1"
15 > }
16
17 $ hg init testrepo
18 $ cd testrepo
19 $ mkcommit a
20 $ mkcommit b
21 $ mkcommit c
22 $ hg status
General Comments 0
You need to be logged in to leave comments. Login now