##// END OF EJS Templates
branching: merge stable into default
Raphaël Gomès -
r48855:37a41267 merge default
parent child Browse files
Show More
@@ -0,0 +1,190 b''
1 use crate::errors::HgError;
2 use std::convert::TryFrom;
3
4 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
5 pub enum EntryState {
6 Normal,
7 Added,
8 Removed,
9 Merged,
10 }
11
12 /// The C implementation uses all signed types. This will be an issue
13 /// either when 4GB+ source files are commonplace or in 2038, whichever
14 /// comes first.
15 #[derive(Debug, PartialEq, Copy, Clone)]
16 pub struct DirstateEntry {
17 state: EntryState,
18 mode: i32,
19 size: i32,
20 mtime: i32,
21 }
22
23 pub const V1_RANGEMASK: i32 = 0x7FFFFFFF;
24
25 pub const MTIME_UNSET: i32 = -1;
26
27 /// A `DirstateEntry` with a size of `-2` means that it was merged from the
28 /// other parent. This allows revert to pick the right status back during a
29 /// merge.
30 pub const SIZE_FROM_OTHER_PARENT: i32 = -2;
31 /// A special value used for internal representation of special case in
32 /// dirstate v1 format.
33 pub const SIZE_NON_NORMAL: i32 = -1;
34
35 impl DirstateEntry {
36 pub fn from_v1_data(
37 state: EntryState,
38 mode: i32,
39 size: i32,
40 mtime: i32,
41 ) -> Self {
42 Self {
43 state,
44 mode,
45 size,
46 mtime,
47 }
48 }
49
50 /// Creates a new entry in "removed" state.
51 ///
52 /// `size` is expected to be zero, `SIZE_NON_NORMAL`, or
53 /// `SIZE_FROM_OTHER_PARENT`
54 pub fn new_removed(size: i32) -> Self {
55 Self {
56 state: EntryState::Removed,
57 mode: 0,
58 size,
59 mtime: 0,
60 }
61 }
62
63 /// TODO: refactor `DirstateMap::add_file` to not take a `DirstateEntry`
64 /// parameter and remove this constructor
65 pub fn new_for_add_file(mode: i32, size: i32, mtime: i32) -> Self {
66 Self {
67 // XXX Arbitrary default value since the value is determined later
68 state: EntryState::Normal,
69 mode,
70 size,
71 mtime,
72 }
73 }
74
75 pub fn state(&self) -> EntryState {
76 self.state
77 }
78
79 pub fn mode(&self) -> i32 {
80 self.mode
81 }
82
83 pub fn size(&self) -> i32 {
84 self.size
85 }
86
87 pub fn mtime(&self) -> i32 {
88 self.mtime
89 }
90
91 /// Returns `(state, mode, size, mtime)` for the puprose of serialization
92 /// in the dirstate-v1 format.
93 ///
94 /// This includes marker values such as `mtime == -1`. In the future we may
95 /// want to not represent these cases that way in memory, but serialization
96 /// will need to keep the same format.
97 pub fn v1_data(&self) -> (u8, i32, i32, i32) {
98 (self.state.into(), self.mode, self.size, self.mtime)
99 }
100
101 pub fn is_non_normal(&self) -> bool {
102 self.state != EntryState::Normal || self.mtime == MTIME_UNSET
103 }
104
105 pub fn is_from_other_parent(&self) -> bool {
106 self.state == EntryState::Normal && self.size == SIZE_FROM_OTHER_PARENT
107 }
108
109 // TODO: other platforms
110 #[cfg(unix)]
111 pub fn mode_changed(
112 &self,
113 filesystem_metadata: &std::fs::Metadata,
114 ) -> bool {
115 use std::os::unix::fs::MetadataExt;
116 const EXEC_BIT_MASK: u32 = 0o100;
117 let dirstate_exec_bit = (self.mode as u32) & EXEC_BIT_MASK;
118 let fs_exec_bit = filesystem_metadata.mode() & EXEC_BIT_MASK;
119 dirstate_exec_bit != fs_exec_bit
120 }
121
122 /// Returns a `(state, mode, size, mtime)` tuple as for
123 /// `DirstateMapMethods::debug_iter`.
124 pub fn debug_tuple(&self) -> (u8, i32, i32, i32) {
125 (self.state.into(), self.mode, self.size, self.mtime)
126 }
127
128 pub fn mtime_is_ambiguous(&self, now: i32) -> bool {
129 self.state == EntryState::Normal && self.mtime == now
130 }
131
132 pub fn clear_ambiguous_mtime(&mut self, now: i32) -> bool {
133 let ambiguous = self.mtime_is_ambiguous(now);
134 if ambiguous {
135 // The file was last modified "simultaneously" with the current
136 // write to dirstate (i.e. within the same second for file-
137 // systems with a granularity of 1 sec). This commonly happens
138 // for at least a couple of files on 'update'.
139 // The user could change the file without changing its size
140 // within the same second. Invalidate the file's mtime in
141 // dirstate, forcing future 'status' calls to compare the
142 // contents of the file if the size is the same. This prevents
143 // mistakenly treating such files as clean.
144 self.clear_mtime()
145 }
146 ambiguous
147 }
148
149 pub fn clear_mtime(&mut self) {
150 self.mtime = -1;
151 }
152 }
153
154 impl EntryState {
155 pub fn is_tracked(self) -> bool {
156 use EntryState::*;
157 match self {
158 Normal | Added | Merged => true,
159 Removed => false,
160 }
161 }
162 }
163
164 impl TryFrom<u8> for EntryState {
165 type Error = HgError;
166
167 fn try_from(value: u8) -> Result<Self, Self::Error> {
168 match value {
169 b'n' => Ok(EntryState::Normal),
170 b'a' => Ok(EntryState::Added),
171 b'r' => Ok(EntryState::Removed),
172 b'm' => Ok(EntryState::Merged),
173 _ => Err(HgError::CorruptedRepository(format!(
174 "Incorrect dirstate entry state {}",
175 value
176 ))),
177 }
178 }
179 }
180
181 impl Into<u8> for EntryState {
182 fn into(self) -> u8 {
183 match self {
184 EntryState::Normal => b'n',
185 EntryState::Added => b'a',
186 EntryState::Removed => b'r',
187 EntryState::Merged => b'm',
188 }
189 }
190 }
@@ -0,0 +1,79 b''
1 use crate::errors::HgError;
2 use crate::repo::Repo;
3 use crate::revlog::path_encode::path_encode;
4 use crate::revlog::revlog::{Revlog, RevlogError};
5 use crate::revlog::NodePrefix;
6 use crate::revlog::Revision;
7 use crate::utils::files::get_path_from_bytes;
8 use crate::utils::hg_path::HgPath;
9 use crate::utils::SliceExt;
10 use std::borrow::Cow;
11 use std::path::PathBuf;
12
13 /// A specialized `Revlog` to work with file data logs.
14 pub struct Filelog {
15 /// The generic `revlog` format.
16 revlog: Revlog,
17 }
18
19 impl Filelog {
20 pub fn open(repo: &Repo, file_path: &HgPath) -> Result<Self, HgError> {
21 let index_path = store_path(file_path, b".i");
22 let data_path = store_path(file_path, b".d");
23 let revlog = Revlog::open(repo, index_path, Some(&data_path))?;
24 Ok(Self { revlog })
25 }
26
27 /// The given node ID is that of the file as found in a manifest, not of a
28 /// changeset.
29 pub fn data_for_node(
30 &self,
31 file_node: impl Into<NodePrefix>,
32 ) -> Result<FilelogEntry, RevlogError> {
33 let file_rev = self.revlog.rev_from_node(file_node.into())?;
34 self.data_for_rev(file_rev)
35 }
36
37 /// The given revision is that of the file as found in a manifest, not of a
38 /// changeset.
39 pub fn data_for_rev(
40 &self,
41 file_rev: Revision,
42 ) -> Result<FilelogEntry, RevlogError> {
43 let data = self.revlog.get_rev_data(file_rev)?;
44 Ok(FilelogEntry(data.into()))
45 }
46 }
47
48 fn store_path(hg_path: &HgPath, suffix: &[u8]) -> PathBuf {
49 let encoded_bytes =
50 path_encode(&[b"data/", hg_path.as_bytes(), suffix].concat());
51 get_path_from_bytes(&encoded_bytes).into()
52 }
53
54 pub struct FilelogEntry<'filelog>(Cow<'filelog, [u8]>);
55
56 impl<'filelog> FilelogEntry<'filelog> {
57 /// Split into metadata and data
58 pub fn split(&self) -> Result<(Option<&[u8]>, &[u8]), HgError> {
59 const DELIMITER: &[u8; 2] = &[b'\x01', b'\n'];
60
61 if let Some(rest) = self.0.drop_prefix(DELIMITER) {
62 if let Some((metadata, data)) = rest.split_2_by_slice(DELIMITER) {
63 Ok((Some(metadata), data))
64 } else {
65 Err(HgError::corrupted(
66 "Missing metadata end delimiter in filelog entry",
67 ))
68 }
69 } else {
70 Ok((None, &self.0))
71 }
72 }
73
74 /// Returns the file contents at this revision, stripped of any metadata
75 pub fn data(&self) -> Result<&[u8], HgError> {
76 let (_metadata, data) = self.split()?;
77 Ok(data)
78 }
79 }
@@ -0,0 +1,73 b''
1 use crate::errors::{HgError, IoErrorContext, IoResultExt};
2 use memmap2::{Mmap, MmapOptions};
3 use std::io::ErrorKind;
4 use std::path::{Path, PathBuf};
5
6 /// Filesystem access abstraction for the contents of a given "base" diretory
7 #[derive(Clone, Copy)]
8 pub struct Vfs<'a> {
9 pub(crate) base: &'a Path,
10 }
11
12 impl Vfs<'_> {
13 pub fn join(&self, relative_path: impl AsRef<Path>) -> PathBuf {
14 self.base.join(relative_path)
15 }
16
17 pub fn read(
18 &self,
19 relative_path: impl AsRef<Path>,
20 ) -> Result<Vec<u8>, HgError> {
21 let path = self.join(relative_path);
22 std::fs::read(&path).when_reading_file(&path)
23 }
24
25 pub fn mmap_open(
26 &self,
27 relative_path: impl AsRef<Path>,
28 ) -> Result<Mmap, HgError> {
29 let path = self.base.join(relative_path);
30 let file = std::fs::File::open(&path).when_reading_file(&path)?;
31 // TODO: what are the safety requirements here?
32 let mmap = unsafe { MmapOptions::new().map(&file) }
33 .when_reading_file(&path)?;
34 Ok(mmap)
35 }
36
37 pub fn rename(
38 &self,
39 relative_from: impl AsRef<Path>,
40 relative_to: impl AsRef<Path>,
41 ) -> Result<(), HgError> {
42 let from = self.join(relative_from);
43 let to = self.join(relative_to);
44 std::fs::rename(&from, &to)
45 .with_context(|| IoErrorContext::RenamingFile { from, to })
46 }
47 }
48
49 fn fs_metadata(
50 path: impl AsRef<Path>,
51 ) -> Result<Option<std::fs::Metadata>, HgError> {
52 let path = path.as_ref();
53 match std::fs::metadata(path) {
54 Ok(meta) => Ok(Some(meta)),
55 Err(error) => match error.kind() {
56 // TODO: when we require a Rust version where `NotADirectory` is
57 // stable, invert this logic and return None for it and `NotFound`
58 // and propagate any other error.
59 ErrorKind::PermissionDenied => Err(error).with_context(|| {
60 IoErrorContext::ReadingMetadata(path.to_owned())
61 }),
62 _ => Ok(None),
63 },
64 }
65 }
66
67 pub(crate) fn is_dir(path: impl AsRef<Path>) -> Result<bool, HgError> {
68 Ok(fs_metadata(path)?.map_or(false, |meta| meta.is_dir()))
69 }
70
71 pub(crate) fn is_file(path: impl AsRef<Path>) -> Result<bool, HgError> {
72 Ok(fs_metadata(path)?.map_or(false, |meta| meta.is_file()))
73 }
@@ -0,0 +1,56 b''
1 use cpython::{PyBytes, Python};
2 use stable_deref_trait::StableDeref;
3
4 /// Safe abstraction over a `PyBytes` together with the `&[u8]` slice
5 /// that borrows it. Implements `Deref<Target = [u8]>`.
6 ///
7 /// Calling `PyBytes::data` requires a GIL marker but we want to access the
8 /// data in a thread that (ideally) does not need to acquire the GIL.
9 /// This type allows separating the call an the use.
10 ///
11 /// It also enables using a (wrapped) `PyBytes` in GIL-unaware generic code.
12 pub struct PyBytesDeref {
13 #[allow(unused)]
14 keep_alive: PyBytes,
15
16 /// Borrows the buffer inside `self.keep_alive`,
17 /// but the borrow-checker cannot express self-referential structs.
18 data: *const [u8],
19 }
20
21 impl PyBytesDeref {
22 pub fn new(py: Python, bytes: PyBytes) -> Self {
23 Self {
24 data: bytes.data(py),
25 keep_alive: bytes,
26 }
27 }
28
29 pub fn unwrap(self) -> PyBytes {
30 self.keep_alive
31 }
32 }
33
34 impl std::ops::Deref for PyBytesDeref {
35 type Target = [u8];
36
37 fn deref(&self) -> &[u8] {
38 // Safety: the raw pointer is valid as long as the PyBytes is still
39 // alive, and the returned slice borrows `self`.
40 unsafe { &*self.data }
41 }
42 }
43
44 unsafe impl StableDeref for PyBytesDeref {}
45
46 fn require_send<T: Send>() {}
47
48 #[allow(unused)]
49 fn static_assert_pybytes_is_send() {
50 require_send::<PyBytes>;
51 }
52
53 // Safety: PyBytes is Send. Raw pointers are not by default,
54 // but here sending one to another thread is fine since we ensure it stays
55 // valid.
56 unsafe impl Send for PyBytesDeref {}
@@ -0,0 +1,52 b''
1 """
2 List-valued configuration keys have an ad-hoc microsyntax. From `hg help config`:
3
4 > List values are separated by whitespace or comma, except when values are
5 > placed in double quotation marks:
6 >
7 > allow_read = "John Doe, PhD", brian, betty
8 >
9 > Quotation marks can be escaped by prefixing them with a backslash. Only
10 > quotation marks at the beginning of a word is counted as a quotation
11 > (e.g., ``foo"bar baz`` is the list of ``foo"bar`` and ``baz``).
12
13 That help documentation is fairly light on details, the actual parser has many
14 other edge cases. This test tries to cover them.
15 """
16
17 from mercurial.utils import stringutil
18
19
20 def assert_parselist(input, expected):
21 result = stringutil.parselist(input)
22 if result != expected:
23 raise AssertionError(
24 "parse_input(%r)\n got %r\nexpected %r"
25 % (input, result, expected)
26 )
27
28
29 # Keep these Python tests in sync with the Rust ones in `rust/hg-core/src/config/values.rs`
30
31 assert_parselist(b'', [])
32 assert_parselist(b',', [])
33 assert_parselist(b'A', [b'A'])
34 assert_parselist(b'B,B', [b'B', b'B'])
35 assert_parselist(b', C, ,C,', [b'C', b'C'])
36 assert_parselist(b'"', [b'"'])
37 assert_parselist(b'""', [b'', b''])
38 assert_parselist(b'D,"', [b'D', b'"'])
39 assert_parselist(b'E,""', [b'E', b'', b''])
40 assert_parselist(b'"F,F"', [b'F,F'])
41 assert_parselist(b'"G,G', [b'"G', b'G'])
42 assert_parselist(b'"H \\",\\"H', [b'"H', b',', b'H'])
43 assert_parselist(b'I,I"', [b'I', b'I"'])
44 assert_parselist(b'J,"J', [b'J', b'"J'])
45 assert_parselist(b'K K', [b'K', b'K'])
46 assert_parselist(b'"K" K', [b'K', b'K'])
47 assert_parselist(b'L\tL', [b'L', b'L'])
48 assert_parselist(b'"L"\tL', [b'L', b'', b'L'])
49 assert_parselist(b'M\x0bM', [b'M', b'M'])
50 assert_parselist(b'"M"\x0bM', [b'M', b'', b'M'])
51 assert_parselist(b'"N" , ,"', [b'N"'])
52 assert_parselist(b'" ,O, ', [b'"', b'O'])
@@ -0,0 +1,49 b''
1 from __future__ import absolute_import
2
3 import os
4 from mercurial.hgweb import hgwebdir_mod
5
6 hgwebdir = hgwebdir_mod.hgwebdir
7
8 os.mkdir(b'webdir')
9 os.chdir(b'webdir')
10
11 webdir = os.path.realpath(b'.')
12
13
14 def trivial_response(req, res):
15 return []
16
17
18 def make_hgwebdir(gc_rate=None):
19 config = os.path.join(webdir, b'hgwebdir.conf')
20 with open(config, 'wb') as configfile:
21 configfile.write(b'[experimental]\n')
22 if gc_rate is not None:
23 configfile.write(b'web.full-garbage-collection-rate=%d\n' % gc_rate)
24 hg_wd = hgwebdir(config)
25 hg_wd._runwsgi = trivial_response
26 return hg_wd
27
28
29 def process_requests(webdir_instance, number):
30 # we don't care for now about passing realistic arguments
31 for _ in range(number):
32 for chunk in webdir_instance.run_wsgi(None, None):
33 pass
34
35
36 without_gc = make_hgwebdir(gc_rate=0)
37 process_requests(without_gc, 5)
38 assert without_gc.requests_count == 5
39 assert without_gc.gc_full_collections_done == 0
40
41 with_gc = make_hgwebdir(gc_rate=2)
42 process_requests(with_gc, 5)
43 assert with_gc.requests_count == 5
44 assert with_gc.gc_full_collections_done == 2
45
46 with_systematic_gc = make_hgwebdir() # default value of the setting
47 process_requests(with_systematic_gc, 3)
48 assert with_systematic_gc.requests_count == 3
49 assert with_systematic_gc.gc_full_collections_done == 3
@@ -24,17 +24,27 b' def nonnormalentries(dmap):'
24 return res
24 return res
25
25
26
26
27 INCONSISTENCY_MESSAGE = b"""%s call to %s
28 inconsistency in nonnormalset
29 result from dirstatemap: %s
30 expected nonnormalset: %s
31 """
32
33
27 def checkconsistency(ui, orig, dmap, _nonnormalset, label):
34 def checkconsistency(ui, orig, dmap, _nonnormalset, label):
28 """Compute nonnormalset from dmap, check that it matches _nonnormalset"""
35 """Compute nonnormalset from dmap, check that it matches _nonnormalset"""
29 nonnormalcomputedmap = nonnormalentries(dmap)
36 nonnormalcomputedmap = nonnormalentries(dmap)
30 if _nonnormalset != nonnormalcomputedmap:
37 if _nonnormalset != nonnormalcomputedmap:
31 b_orig = pycompat.sysbytes(repr(orig))
38 b_orig = pycompat.sysbytes(repr(orig))
32 ui.develwarn(b"%s call to %s\n" % (label, b_orig), config=b'dirstate')
33 ui.develwarn(b"inconsistency in nonnormalset\n", config=b'dirstate')
34 b_nonnormal = pycompat.sysbytes(repr(_nonnormalset))
39 b_nonnormal = pycompat.sysbytes(repr(_nonnormalset))
35 ui.develwarn(b"[nonnormalset] %s\n" % b_nonnormal, config=b'dirstate')
36 b_nonnormalcomputed = pycompat.sysbytes(repr(nonnormalcomputedmap))
40 b_nonnormalcomputed = pycompat.sysbytes(repr(nonnormalcomputedmap))
37 ui.develwarn(b"[map] %s\n" % b_nonnormalcomputed, config=b'dirstate')
41 msg = INCONSISTENCY_MESSAGE % (
42 label,
43 b_orig,
44 b_nonnormal,
45 b_nonnormalcomputed,
46 )
47 ui.develwarn(msg, config=b'dirstate')
38
48
39
49
40 def _checkdirstate(orig, self, *args, **kwargs):
50 def _checkdirstate(orig, self, *args, **kwargs):
@@ -59,11 +69,13 b' def extsetup(ui):'
59 if paranoid:
69 if paranoid:
60 # We don't do all these checks when paranoid is disable as it would
70 # We don't do all these checks when paranoid is disable as it would
61 # make the extension run very slowly on large repos
71 # make the extension run very slowly on large repos
62 extensions.wrapfunction(dirstatecl, 'normallookup', _checkdirstate)
63 extensions.wrapfunction(dirstatecl, 'otherparent', _checkdirstate)
64 extensions.wrapfunction(dirstatecl, 'normal', _checkdirstate)
65 extensions.wrapfunction(dirstatecl, 'write', _checkdirstate)
72 extensions.wrapfunction(dirstatecl, 'write', _checkdirstate)
66 extensions.wrapfunction(dirstatecl, 'add', _checkdirstate)
73 extensions.wrapfunction(dirstatecl, 'set_tracked', _checkdirstate)
67 extensions.wrapfunction(dirstatecl, 'remove', _checkdirstate)
74 extensions.wrapfunction(dirstatecl, 'set_untracked', _checkdirstate)
68 extensions.wrapfunction(dirstatecl, 'merge', _checkdirstate)
75 extensions.wrapfunction(
69 extensions.wrapfunction(dirstatecl, 'drop', _checkdirstate)
76 dirstatecl, 'set_possibly_dirty', _checkdirstate
77 )
78 extensions.wrapfunction(
79 dirstatecl, 'update_file_p1', _checkdirstate
80 )
81 extensions.wrapfunction(dirstatecl, 'update_file', _checkdirstate)
@@ -140,12 +140,10 b' def peersetup(ui, peer):'
140 def getannotate(self, path, lastnode=None):
140 def getannotate(self, path, lastnode=None):
141 if not self.capable(b'getannotate'):
141 if not self.capable(b'getannotate'):
142 ui.warn(_(b'remote peer cannot provide annotate cache\n'))
142 ui.warn(_(b'remote peer cannot provide annotate cache\n'))
143 yield None, None
143 return None, None
144 else:
144 else:
145 args = {b'path': path, b'lastnode': lastnode or b''}
145 args = {b'path': path, b'lastnode': lastnode or b''}
146 f = wireprotov1peer.future()
146 return args, _parseresponse
147 yield args, f
148 yield _parseresponse(f.value)
149
147
150 peer.__class__ = fastannotatepeer
148 peer.__class__ = fastannotatepeer
151
149
@@ -431,18 +431,19 b' def localrepolistkeys(orig, self, namesp'
431 @wireprotov1peer.batchable
431 @wireprotov1peer.batchable
432 def listkeyspatterns(self, namespace, patterns):
432 def listkeyspatterns(self, namespace, patterns):
433 if not self.capable(b'pushkey'):
433 if not self.capable(b'pushkey'):
434 yield {}, None
434 return {}, None
435 f = wireprotov1peer.future()
436 self.ui.debug(b'preparing listkeys for "%s"\n' % namespace)
435 self.ui.debug(b'preparing listkeys for "%s"\n' % namespace)
437 yield {
436
437 def decode(d):
438 self.ui.debug(
439 b'received listkey for "%s": %i bytes\n' % (namespace, len(d))
440 )
441 return pushkey.decodekeys(d)
442
443 return {
438 b'namespace': encoding.fromlocal(namespace),
444 b'namespace': encoding.fromlocal(namespace),
439 b'patterns': wireprototypes.encodelist(patterns),
445 b'patterns': wireprototypes.encodelist(patterns),
440 }, f
446 }, decode
441 d = f.value
442 self.ui.debug(
443 b'received listkey for "%s": %i bytes\n' % (namespace, len(d))
444 )
445 yield pushkey.decodekeys(d)
446
447
447
448
448 def _readbundlerevs(bundlerepo):
449 def _readbundlerevs(bundlerepo):
@@ -577,7 +577,7 b' def updatelfiles('
577 repo.wvfs.unlinkpath(lfutil.standin(f))
577 repo.wvfs.unlinkpath(lfutil.standin(f))
578 # This needs to happen for dropped files, otherwise they stay in
578 # This needs to happen for dropped files, otherwise they stay in
579 # the M state.
579 # the M state.
580 lfdirstate._drop(f)
580 lfdirstate._map.reset_state(f)
581
581
582 statuswriter(_(b'getting changed largefiles\n'))
582 statuswriter(_(b'getting changed largefiles\n'))
583 cachelfiles(ui, repo, None, lfiles)
583 cachelfiles(ui, repo, None, lfiles)
@@ -184,17 +184,18 b' def wirereposetup(ui, repo):'
184
184
185 @wireprotov1peer.batchable
185 @wireprotov1peer.batchable
186 def statlfile(self, sha):
186 def statlfile(self, sha):
187 f = wireprotov1peer.future()
187 def decode(d):
188 try:
189 return int(d)
190 except (ValueError, urlerr.httperror):
191 # If the server returns anything but an integer followed by a
192 # newline, newline, it's not speaking our language; if we get
193 # an HTTP error, we can't be sure the largefile is present;
194 # either way, consider it missing.
195 return 2
196
188 result = {b'sha': sha}
197 result = {b'sha': sha}
189 yield result, f
198 return result, decode
190 try:
191 yield int(f.value)
192 except (ValueError, urlerr.httperror):
193 # If the server returns anything but an integer followed by a
194 # newline, newline, it's not speaking our language; if we get
195 # an HTTP error, we can't be sure the largefile is present;
196 # either way, consider it missing.
197 yield 2
198
199
199 repo.__class__ = lfileswirerepository
200 repo.__class__ = lfileswirerepository
200
201
@@ -289,7 +289,7 b' def _narrow('
289 repair.strip(ui, unfi, tostrip, topic=b'narrow', backup=backup)
289 repair.strip(ui, unfi, tostrip, topic=b'narrow', backup=backup)
290
290
291 todelete = []
291 todelete = []
292 for t, f, f2, size in repo.store.datafiles():
292 for t, f, size in repo.store.datafiles():
293 if f.startswith(b'data/'):
293 if f.startswith(b'data/'):
294 file = f[5:-2]
294 file = f[5:-2]
295 if not newmatch(file):
295 if not newmatch(file):
@@ -378,7 +378,7 b' class manifestrevlogstore(object):'
378 ledger.markdataentry(self, treename, node)
378 ledger.markdataentry(self, treename, node)
379 ledger.markhistoryentry(self, treename, node)
379 ledger.markhistoryentry(self, treename, node)
380
380
381 for t, path, encoded, size in self._store.datafiles():
381 for t, path, size in self._store.datafiles():
382 if path[:5] != b'meta/' or path[-2:] != b'.i':
382 if path[:5] != b'meta/' or path[-2:] != b'.i':
383 continue
383 continue
384
384
@@ -63,12 +63,14 b' def peersetup(ui, peer):'
63 raise error.Abort(
63 raise error.Abort(
64 b'configured remotefile server does not support getfile'
64 b'configured remotefile server does not support getfile'
65 )
65 )
66 f = wireprotov1peer.future()
66
67 yield {b'file': file, b'node': node}, f
67 def decode(d):
68 code, data = f.value.split(b'\0', 1)
68 code, data = d.split(b'\0', 1)
69 if int(code):
69 if int(code):
70 raise error.LookupError(file, node, data)
70 raise error.LookupError(file, node, data)
71 yield data
71 return data
72
73 return {b'file': file, b'node': node}, decode
72
74
73 @wireprotov1peer.batchable
75 @wireprotov1peer.batchable
74 def x_rfl_getflogheads(self, path):
76 def x_rfl_getflogheads(self, path):
@@ -77,10 +79,11 b' def peersetup(ui, peer):'
77 b'configured remotefile server does not '
79 b'configured remotefile server does not '
78 b'support getflogheads'
80 b'support getflogheads'
79 )
81 )
80 f = wireprotov1peer.future()
82
81 yield {b'path': path}, f
83 def decode(d):
82 heads = f.value.split(b'\n') if f.value else []
84 return d.split(b'\n') if d else []
83 yield heads
85
86 return {b'path': path}, decode
84
87
85 def _updatecallstreamopts(self, command, opts):
88 def _updatecallstreamopts(self, command, opts):
86 if command != b'getbundle':
89 if command != b'getbundle':
@@ -166,24 +166,24 b' def onetimesetup(ui):'
166 n = util.pconvert(fp[striplen:])
166 n = util.pconvert(fp[striplen:])
167 d = store.decodedir(n)
167 d = store.decodedir(n)
168 t = store.FILETYPE_OTHER
168 t = store.FILETYPE_OTHER
169 yield (t, d, n, st.st_size)
169 yield (t, d, st.st_size)
170 if kind == stat.S_IFDIR:
170 if kind == stat.S_IFDIR:
171 visit.append(fp)
171 visit.append(fp)
172
172
173 if scmutil.istreemanifest(repo):
173 if scmutil.istreemanifest(repo):
174 for (t, u, e, s) in repo.store.datafiles():
174 for (t, u, s) in repo.store.datafiles():
175 if u.startswith(b'meta/') and (
175 if u.startswith(b'meta/') and (
176 u.endswith(b'.i') or u.endswith(b'.d')
176 u.endswith(b'.i') or u.endswith(b'.d')
177 ):
177 ):
178 yield (t, u, e, s)
178 yield (t, u, s)
179
179
180 # Return .d and .i files that do not match the shallow pattern
180 # Return .d and .i files that do not match the shallow pattern
181 match = state.match
181 match = state.match
182 if match and not match.always():
182 if match and not match.always():
183 for (t, u, e, s) in repo.store.datafiles():
183 for (t, u, s) in repo.store.datafiles():
184 f = u[5:-2] # trim data/... and .i/.d
184 f = u[5:-2] # trim data/... and .i/.d
185 if not state.match(f):
185 if not state.match(f):
186 yield (t, u, e, s)
186 yield (t, u, s)
187
187
188 for x in repo.store.topfiles():
188 for x in repo.store.topfiles():
189 if state.noflatmf and x[1][:11] == b'00manifest.':
189 if state.noflatmf and x[1][:11] == b'00manifest.':
@@ -255,14 +255,9 b' def _setupdirstate(ui):'
255
255
256 # Prevent adding files that are outside the sparse checkout
256 # Prevent adding files that are outside the sparse checkout
257 editfuncs = [
257 editfuncs = [
258 b'normal',
259 b'set_tracked',
258 b'set_tracked',
260 b'set_untracked',
259 b'set_untracked',
261 b'add',
262 b'normallookup',
263 b'copy',
260 b'copy',
264 b'remove',
265 b'merge',
266 ]
261 ]
267 hint = _(
262 hint = _(
268 b'include file with `hg debugsparse --include <pattern>` or use '
263 b'include file with `hg debugsparse --include <pattern>` or use '
@@ -29,6 +29,8 b' from . import ('
29 vfs as vfsmod,
29 vfs as vfsmod,
30 )
30 )
31
31
32 from .utils import stringutil
33
32 stringio = util.stringio
34 stringio = util.stringio
33
35
34 # from unzip source code:
36 # from unzip source code:
@@ -196,7 +198,7 b' class tarit(object):'
196 name, pycompat.sysstr(mode + kind), fileobj
198 name, pycompat.sysstr(mode + kind), fileobj
197 )
199 )
198 except tarfile.CompressionError as e:
200 except tarfile.CompressionError as e:
199 raise error.Abort(pycompat.bytestr(e))
201 raise error.Abort(stringutil.forcebytestr(e))
200
202
201 if isinstance(dest, bytes):
203 if isinstance(dest, bytes):
202 self.z = taropen(b'w:', name=dest)
204 self.z = taropen(b'w:', name=dest)
@@ -680,8 +680,25 b' def binarydecode(repo, stream):'
680 return books
680 return books
681
681
682
682
683 def updatefromremote(ui, repo, remotemarks, path, trfunc, explicit=()):
683 def mirroring_remote(ui, repo, remotemarks):
684 ui.debug(b"checking for updated bookmarks\n")
684 """computes the bookmark changes that set the local bookmarks to
685 remotemarks"""
686 changed = []
687 localmarks = repo._bookmarks
688 for (b, id) in pycompat.iteritems(remotemarks):
689 if id != localmarks.get(b, None) and id in repo:
690 changed.append((b, id, ui.debug, _(b"updating bookmark %s\n") % b))
691 for b in localmarks:
692 if b not in remotemarks:
693 changed.append(
694 (b, None, ui.debug, _(b"removing bookmark %s\n") % b)
695 )
696 return changed
697
698
699 def merging_from_remote(ui, repo, remotemarks, path, explicit=()):
700 """computes the bookmark changes that merge remote bookmarks into the
701 local bookmarks, based on comparebookmarks"""
685 localmarks = repo._bookmarks
702 localmarks = repo._bookmarks
686 (
703 (
687 addsrc,
704 addsrc,
@@ -752,6 +769,15 b' def updatefromremote(ui, repo, remotemar'
752 _(b"remote bookmark %s points to locally missing %s\n")
769 _(b"remote bookmark %s points to locally missing %s\n")
753 % (b, hex(scid)[:12])
770 % (b, hex(scid)[:12])
754 )
771 )
772 return changed
773
774
775 def updatefromremote(ui, repo, remotemarks, path, trfunc, explicit=()):
776 ui.debug(b"checking for updated bookmarks\n")
777 if ui.configbool(b'bookmarks', b'mirror'):
778 changed = mirroring_remote(ui, repo, remotemarks)
779 else:
780 changed = merging_from_remote(ui, repo, remotemarks, path, explicit)
755
781
756 if changed:
782 if changed:
757 tr = trfunc()
783 tr = trfunc()
@@ -760,7 +786,7 b' def updatefromremote(ui, repo, remotemar'
760 for b, node, writer, msg in sorted(changed, key=key):
786 for b, node, writer, msg in sorted(changed, key=key):
761 changes.append((b, node))
787 changes.append((b, node))
762 writer(msg)
788 writer(msg)
763 localmarks.applychanges(repo, tr, changes)
789 repo._bookmarks.applychanges(repo, tr, changes)
764
790
765
791
766 def incoming(ui, repo, peer):
792 def incoming(ui, repo, peer):
@@ -264,7 +264,7 b' PyObject *make_file_foldmap(PyObject *se'
264 }
264 }
265
265
266 tuple = (dirstateItemObject *)v;
266 tuple = (dirstateItemObject *)v;
267 if (tuple->state != 'r') {
267 if (tuple->flags | dirstate_flag_wc_tracked) {
268 PyObject *normed;
268 PyObject *normed;
269 if (table != NULL) {
269 if (table != NULL) {
270 normed = _asciitransform(k, table,
270 normed = _asciitransform(k, table,
@@ -161,7 +161,7 b' bail:'
161 return ret;
161 return ret;
162 }
162 }
163
163
164 static int dirs_fromdict(PyObject *dirs, PyObject *source, char skipchar)
164 static int dirs_fromdict(PyObject *dirs, PyObject *source, bool only_tracked)
165 {
165 {
166 PyObject *key, *value;
166 PyObject *key, *value;
167 Py_ssize_t pos = 0;
167 Py_ssize_t pos = 0;
@@ -171,13 +171,14 b' static int dirs_fromdict(PyObject *dirs,'
171 PyErr_SetString(PyExc_TypeError, "expected string key");
171 PyErr_SetString(PyExc_TypeError, "expected string key");
172 return -1;
172 return -1;
173 }
173 }
174 if (skipchar) {
174 if (only_tracked) {
175 if (!dirstate_tuple_check(value)) {
175 if (!dirstate_tuple_check(value)) {
176 PyErr_SetString(PyExc_TypeError,
176 PyErr_SetString(PyExc_TypeError,
177 "expected a dirstate tuple");
177 "expected a dirstate tuple");
178 return -1;
178 return -1;
179 }
179 }
180 if (((dirstateItemObject *)value)->state == skipchar)
180 if (!(((dirstateItemObject *)value)->flags &
181 dirstate_flag_wc_tracked))
181 continue;
182 continue;
182 }
183 }
183
184
@@ -218,15 +219,17 b' static int dirs_fromiter(PyObject *dirs,'
218 * Calculate a refcounted set of directory names for the files in a
219 * Calculate a refcounted set of directory names for the files in a
219 * dirstate.
220 * dirstate.
220 */
221 */
221 static int dirs_init(dirsObject *self, PyObject *args)
222 static int dirs_init(dirsObject *self, PyObject *args, PyObject *kwargs)
222 {
223 {
223 PyObject *dirs = NULL, *source = NULL;
224 PyObject *dirs = NULL, *source = NULL;
224 char skipchar = 0;
225 int only_tracked = 0;
225 int ret = -1;
226 int ret = -1;
227 static char *keywords_name[] = {"map", "only_tracked", NULL};
226
228
227 self->dict = NULL;
229 self->dict = NULL;
228
230
229 if (!PyArg_ParseTuple(args, "|Oc:__init__", &source, &skipchar))
231 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|Oi:__init__",
232 keywords_name, &source, &only_tracked))
230 return -1;
233 return -1;
231
234
232 dirs = PyDict_New();
235 dirs = PyDict_New();
@@ -237,10 +240,10 b' static int dirs_init(dirsObject *self, P'
237 if (source == NULL)
240 if (source == NULL)
238 ret = 0;
241 ret = 0;
239 else if (PyDict_Check(source))
242 else if (PyDict_Check(source))
240 ret = dirs_fromdict(dirs, source, skipchar);
243 ret = dirs_fromdict(dirs, source, (bool)only_tracked);
241 else if (skipchar)
244 else if (only_tracked)
242 PyErr_SetString(PyExc_ValueError,
245 PyErr_SetString(PyExc_ValueError,
243 "skip character is only supported "
246 "`only_tracked` is only supported "
244 "with a dict source");
247 "with a dict source");
245 else
248 else
246 ret = dirs_fromiter(dirs, source);
249 ret = dirs_fromiter(dirs, source);
This diff has been collapsed as it changes many lines, (556 lines changed) Show them Hide them
@@ -44,42 +44,91 b' static PyObject *dict_new_presized(PyObj'
44 return _dict_new_presized(expected_size);
44 return _dict_new_presized(expected_size);
45 }
45 }
46
46
47 static inline dirstateItemObject *make_dirstate_item(char state, int mode,
48 int size, int mtime)
49 {
50 dirstateItemObject *t =
51 PyObject_New(dirstateItemObject, &dirstateItemType);
52 if (!t) {
53 return NULL;
54 }
55 t->state = state;
56 t->mode = mode;
57 t->size = size;
58 t->mtime = mtime;
59 return t;
60 }
61
62 static PyObject *dirstate_item_new(PyTypeObject *subtype, PyObject *args,
47 static PyObject *dirstate_item_new(PyTypeObject *subtype, PyObject *args,
63 PyObject *kwds)
48 PyObject *kwds)
64 {
49 {
65 /* We do all the initialization here and not a tp_init function because
50 /* We do all the initialization here and not a tp_init function because
66 * dirstate_item is immutable. */
51 * dirstate_item is immutable. */
67 dirstateItemObject *t;
52 dirstateItemObject *t;
68 char state;
53 int wc_tracked;
69 int size, mode, mtime;
54 int p1_tracked;
70 if (!PyArg_ParseTuple(args, "ciii", &state, &mode, &size, &mtime)) {
55 int p2_tracked;
56 int merged;
57 int clean_p1;
58 int clean_p2;
59 int possibly_dirty;
60 PyObject *parentfiledata;
61 static char *keywords_name[] = {
62 "wc_tracked", "p1_tracked", "p2_tracked",
63 "merged", "clean_p1", "clean_p2",
64 "possibly_dirty", "parentfiledata", NULL,
65 };
66 wc_tracked = 0;
67 p1_tracked = 0;
68 p2_tracked = 0;
69 merged = 0;
70 clean_p1 = 0;
71 clean_p2 = 0;
72 possibly_dirty = 0;
73 parentfiledata = Py_None;
74 if (!PyArg_ParseTupleAndKeywords(args, kwds, "iiiiiiiO", keywords_name,
75 &wc_tracked, &p1_tracked, &p2_tracked,
76 &merged, &clean_p1, &clean_p2,
77 &possibly_dirty, &parentfiledata
78
79 )) {
71 return NULL;
80 return NULL;
72 }
81 }
73
82 if (merged && (clean_p1 || clean_p2)) {
83 PyErr_SetString(PyExc_RuntimeError,
84 "`merged` argument incompatible with "
85 "`clean_p1`/`clean_p2`");
86 return NULL;
87 }
74 t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
88 t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
75 if (!t) {
89 if (!t) {
76 return NULL;
90 return NULL;
77 }
91 }
78 t->state = state;
79 t->mode = mode;
80 t->size = size;
81 t->mtime = mtime;
82
92
93 t->flags = 0;
94 if (wc_tracked) {
95 t->flags |= dirstate_flag_wc_tracked;
96 }
97 if (p1_tracked) {
98 t->flags |= dirstate_flag_p1_tracked;
99 }
100 if (p2_tracked) {
101 t->flags |= dirstate_flag_p2_tracked;
102 }
103 if (possibly_dirty) {
104 t->flags |= dirstate_flag_possibly_dirty;
105 }
106 if (merged) {
107 t->flags |= dirstate_flag_merged;
108 }
109 if (clean_p1) {
110 t->flags |= dirstate_flag_clean_p1;
111 }
112 if (clean_p2) {
113 t->flags |= dirstate_flag_clean_p2;
114 }
115 t->mode = 0;
116 t->size = dirstate_v1_nonnormal;
117 t->mtime = ambiguous_time;
118 if (parentfiledata != Py_None) {
119 if (!PyTuple_CheckExact(parentfiledata)) {
120 PyErr_SetString(
121 PyExc_TypeError,
122 "parentfiledata should be a Tuple or None");
123 return NULL;
124 }
125 t->mode =
126 (int)PyLong_AsLong(PyTuple_GetItem(parentfiledata, 0));
127 t->size =
128 (int)PyLong_AsLong(PyTuple_GetItem(parentfiledata, 1));
129 t->mtime =
130 (int)PyLong_AsLong(PyTuple_GetItem(parentfiledata, 2));
131 }
83 return (PyObject *)t;
132 return (PyObject *)t;
84 }
133 }
85
134
@@ -88,75 +137,134 b' static void dirstate_item_dealloc(PyObje'
88 PyObject_Del(o);
137 PyObject_Del(o);
89 }
138 }
90
139
91 static Py_ssize_t dirstate_item_length(PyObject *o)
140 static inline bool dirstate_item_c_tracked(dirstateItemObject *self)
141 {
142 return (self->flags & dirstate_flag_wc_tracked);
143 }
144
145 static inline bool dirstate_item_c_added(dirstateItemObject *self)
92 {
146 {
93 return 4;
147 unsigned char mask =
148 (dirstate_flag_wc_tracked | dirstate_flag_p1_tracked |
149 dirstate_flag_p2_tracked);
150 unsigned char target = dirstate_flag_wc_tracked;
151 return (self->flags & mask) == target;
152 }
153
154 static inline bool dirstate_item_c_removed(dirstateItemObject *self)
155 {
156 if (self->flags & dirstate_flag_wc_tracked) {
157 return false;
158 }
159 return (self->flags &
160 (dirstate_flag_p1_tracked | dirstate_flag_p2_tracked));
161 }
162
163 static inline bool dirstate_item_c_merged(dirstateItemObject *self)
164 {
165 return ((self->flags & dirstate_flag_wc_tracked) &&
166 (self->flags & dirstate_flag_merged));
94 }
167 }
95
168
96 static PyObject *dirstate_item_item(PyObject *o, Py_ssize_t i)
169 static inline bool dirstate_item_c_merged_removed(dirstateItemObject *self)
170 {
171 if (!dirstate_item_c_removed(self)) {
172 return false;
173 }
174 return (self->flags & dirstate_flag_merged);
175 }
176
177 static inline bool dirstate_item_c_from_p2(dirstateItemObject *self)
97 {
178 {
98 dirstateItemObject *t = (dirstateItemObject *)o;
179 if (!dirstate_item_c_tracked(self)) {
99 switch (i) {
180 return false;
100 case 0:
181 }
101 return PyBytes_FromStringAndSize(&t->state, 1);
182 return (self->flags & dirstate_flag_clean_p2);
102 case 1:
183 }
103 return PyInt_FromLong(t->mode);
184
104 case 2:
185 static inline bool dirstate_item_c_from_p2_removed(dirstateItemObject *self)
105 return PyInt_FromLong(t->size);
186 {
106 case 3:
187 if (!dirstate_item_c_removed(self)) {
107 return PyInt_FromLong(t->mtime);
188 return false;
108 default:
189 }
109 PyErr_SetString(PyExc_IndexError, "index out of range");
190 return (self->flags & dirstate_flag_clean_p2);
110 return NULL;
191 }
192
193 static inline char dirstate_item_c_v1_state(dirstateItemObject *self)
194 {
195 if (dirstate_item_c_removed(self)) {
196 return 'r';
197 } else if (dirstate_item_c_merged(self)) {
198 return 'm';
199 } else if (dirstate_item_c_added(self)) {
200 return 'a';
201 } else {
202 return 'n';
111 }
203 }
112 }
204 }
113
205
114 static PySequenceMethods dirstate_item_sq = {
206 static inline int dirstate_item_c_v1_mode(dirstateItemObject *self)
115 dirstate_item_length, /* sq_length */
207 {
116 0, /* sq_concat */
208 return self->mode;
117 0, /* sq_repeat */
209 }
118 dirstate_item_item, /* sq_item */
210
119 0, /* sq_ass_item */
211 static inline int dirstate_item_c_v1_size(dirstateItemObject *self)
120 0, /* sq_contains */
212 {
121 0, /* sq_inplace_concat */
213 if (dirstate_item_c_merged_removed(self)) {
122 0 /* sq_inplace_repeat */
214 return dirstate_v1_nonnormal;
123 };
215 } else if (dirstate_item_c_from_p2_removed(self)) {
216 return dirstate_v1_from_p2;
217 } else if (dirstate_item_c_removed(self)) {
218 return 0;
219 } else if (dirstate_item_c_merged(self)) {
220 return dirstate_v1_from_p2;
221 } else if (dirstate_item_c_added(self)) {
222 return dirstate_v1_nonnormal;
223 } else if (dirstate_item_c_from_p2(self)) {
224 return dirstate_v1_from_p2;
225 } else if (self->flags & dirstate_flag_possibly_dirty) {
226 return self->size; /* NON NORMAL ? */
227 } else {
228 return self->size;
229 }
230 }
231
232 static inline int dirstate_item_c_v1_mtime(dirstateItemObject *self)
233 {
234 if (dirstate_item_c_removed(self)) {
235 return 0;
236 } else if (self->flags & dirstate_flag_possibly_dirty) {
237 return ambiguous_time;
238 } else if (dirstate_item_c_merged(self)) {
239 return ambiguous_time;
240 } else if (dirstate_item_c_added(self)) {
241 return ambiguous_time;
242 } else if (dirstate_item_c_from_p2(self)) {
243 return ambiguous_time;
244 } else {
245 return self->mtime;
246 }
247 }
124
248
125 static PyObject *dirstate_item_v1_state(dirstateItemObject *self)
249 static PyObject *dirstate_item_v1_state(dirstateItemObject *self)
126 {
250 {
127 return PyBytes_FromStringAndSize(&self->state, 1);
251 char state = dirstate_item_c_v1_state(self);
252 return PyBytes_FromStringAndSize(&state, 1);
128 };
253 };
129
254
130 static PyObject *dirstate_item_v1_mode(dirstateItemObject *self)
255 static PyObject *dirstate_item_v1_mode(dirstateItemObject *self)
131 {
256 {
132 return PyInt_FromLong(self->mode);
257 return PyInt_FromLong(dirstate_item_c_v1_mode(self));
133 };
258 };
134
259
135 static PyObject *dirstate_item_v1_size(dirstateItemObject *self)
260 static PyObject *dirstate_item_v1_size(dirstateItemObject *self)
136 {
261 {
137 return PyInt_FromLong(self->size);
262 return PyInt_FromLong(dirstate_item_c_v1_size(self));
138 };
263 };
139
264
140 static PyObject *dirstate_item_v1_mtime(dirstateItemObject *self)
265 static PyObject *dirstate_item_v1_mtime(dirstateItemObject *self)
141 {
266 {
142 return PyInt_FromLong(self->mtime);
267 return PyInt_FromLong(dirstate_item_c_v1_mtime(self));
143 };
144
145 static PyObject *dm_nonnormal(dirstateItemObject *self)
146 {
147 if (self->state != 'n' || self->mtime == ambiguous_time) {
148 Py_RETURN_TRUE;
149 } else {
150 Py_RETURN_FALSE;
151 }
152 };
153 static PyObject *dm_otherparent(dirstateItemObject *self)
154 {
155 if (self->size == dirstate_v1_from_p2) {
156 Py_RETURN_TRUE;
157 } else {
158 Py_RETURN_FALSE;
159 }
160 };
268 };
161
269
162 static PyObject *dirstate_item_need_delay(dirstateItemObject *self,
270 static PyObject *dirstate_item_need_delay(dirstateItemObject *self,
@@ -166,14 +274,15 b' static PyObject *dirstate_item_need_dela'
166 if (!pylong_to_long(value, &now)) {
274 if (!pylong_to_long(value, &now)) {
167 return NULL;
275 return NULL;
168 }
276 }
169 if (self->state == 'n' && self->mtime == now) {
277 if (dirstate_item_c_v1_state(self) == 'n' &&
278 dirstate_item_c_v1_mtime(self) == now) {
170 Py_RETURN_TRUE;
279 Py_RETURN_TRUE;
171 } else {
280 } else {
172 Py_RETURN_FALSE;
281 Py_RETURN_FALSE;
173 }
282 }
174 };
283 };
175
284
176 /* This will never change since it's bound to V1, unlike `make_dirstate_item`
285 /* This will never change since it's bound to V1
177 */
286 */
178 static inline dirstateItemObject *
287 static inline dirstateItemObject *
179 dirstate_item_from_v1_data(char state, int mode, int size, int mtime)
288 dirstate_item_from_v1_data(char state, int mode, int size, int mtime)
@@ -183,10 +292,70 b' dirstate_item_from_v1_data(char state, i'
183 if (!t) {
292 if (!t) {
184 return NULL;
293 return NULL;
185 }
294 }
186 t->state = state;
295
187 t->mode = mode;
296 if (state == 'm') {
188 t->size = size;
297 t->flags =
189 t->mtime = mtime;
298 (dirstate_flag_wc_tracked | dirstate_flag_p1_tracked |
299 dirstate_flag_p2_tracked | dirstate_flag_merged);
300 t->mode = 0;
301 t->size = dirstate_v1_from_p2;
302 t->mtime = ambiguous_time;
303 } else if (state == 'a') {
304 t->flags = dirstate_flag_wc_tracked;
305 t->mode = 0;
306 t->size = dirstate_v1_nonnormal;
307 t->mtime = ambiguous_time;
308 } else if (state == 'r') {
309 t->mode = 0;
310 t->size = 0;
311 t->mtime = 0;
312 if (size == dirstate_v1_nonnormal) {
313 t->flags =
314 (dirstate_flag_p1_tracked |
315 dirstate_flag_p2_tracked | dirstate_flag_merged);
316 } else if (size == dirstate_v1_from_p2) {
317 t->flags =
318 (dirstate_flag_p2_tracked | dirstate_flag_clean_p2);
319 } else {
320 t->flags = dirstate_flag_p1_tracked;
321 }
322 } else if (state == 'n') {
323 if (size == dirstate_v1_from_p2) {
324 t->flags =
325 (dirstate_flag_wc_tracked |
326 dirstate_flag_p2_tracked | dirstate_flag_clean_p2);
327 t->mode = 0;
328 t->size = dirstate_v1_from_p2;
329 t->mtime = ambiguous_time;
330 } else if (size == dirstate_v1_nonnormal) {
331 t->flags = (dirstate_flag_wc_tracked |
332 dirstate_flag_p1_tracked |
333 dirstate_flag_possibly_dirty);
334 t->mode = 0;
335 t->size = dirstate_v1_nonnormal;
336 t->mtime = ambiguous_time;
337 } else if (mtime == ambiguous_time) {
338 t->flags = (dirstate_flag_wc_tracked |
339 dirstate_flag_p1_tracked |
340 dirstate_flag_possibly_dirty);
341 t->mode = mode;
342 t->size = size;
343 t->mtime = 0;
344 } else {
345 t->flags = (dirstate_flag_wc_tracked |
346 dirstate_flag_p1_tracked);
347 t->mode = mode;
348 t->size = size;
349 t->mtime = mtime;
350 }
351 } else {
352 PyErr_Format(PyExc_RuntimeError,
353 "unknown state: `%c` (%d, %d, %d)", state, mode,
354 size, mtime, NULL);
355 Py_DECREF(t);
356 return NULL;
357 }
358
190 return t;
359 return t;
191 }
360 }
192
361
@@ -196,22 +365,110 b' static PyObject *dirstate_item_from_v1_m'
196 {
365 {
197 /* We do all the initialization here and not a tp_init function because
366 /* We do all the initialization here and not a tp_init function because
198 * dirstate_item is immutable. */
367 * dirstate_item is immutable. */
199 dirstateItemObject *t;
200 char state;
368 char state;
201 int size, mode, mtime;
369 int size, mode, mtime;
202 if (!PyArg_ParseTuple(args, "ciii", &state, &mode, &size, &mtime)) {
370 if (!PyArg_ParseTuple(args, "ciii", &state, &mode, &size, &mtime)) {
203 return NULL;
371 return NULL;
204 }
372 }
373 return (PyObject *)dirstate_item_from_v1_data(state, mode, size, mtime);
374 };
375
376 /* constructor to help legacy API to build a new "added" item
377
378 Should eventually be removed */
379 static PyObject *dirstate_item_new_added(PyTypeObject *subtype)
380 {
381 dirstateItemObject *t;
382 t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
383 if (!t) {
384 return NULL;
385 }
386 t->flags = dirstate_flag_wc_tracked;
387 t->mode = 0;
388 t->size = dirstate_v1_nonnormal;
389 t->mtime = ambiguous_time;
390 return (PyObject *)t;
391 };
392
393 /* constructor to help legacy API to build a new "merged" item
394
395 Should eventually be removed */
396 static PyObject *dirstate_item_new_merged(PyTypeObject *subtype)
397 {
398 dirstateItemObject *t;
399 t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
400 if (!t) {
401 return NULL;
402 }
403 t->flags = (dirstate_flag_wc_tracked | dirstate_flag_p1_tracked |
404 dirstate_flag_p2_tracked | dirstate_flag_merged);
405 t->mode = 0;
406 t->size = dirstate_v1_from_p2;
407 t->mtime = ambiguous_time;
408 return (PyObject *)t;
409 };
410
411 /* constructor to help legacy API to build a new "from_p2" item
412
413 Should eventually be removed */
414 static PyObject *dirstate_item_new_from_p2(PyTypeObject *subtype)
415 {
416 /* We do all the initialization here and not a tp_init function because
417 * dirstate_item is immutable. */
418 dirstateItemObject *t;
419 t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
420 if (!t) {
421 return NULL;
422 }
423 t->flags = (dirstate_flag_wc_tracked | dirstate_flag_p2_tracked |
424 dirstate_flag_clean_p2);
425 t->mode = 0;
426 t->size = dirstate_v1_from_p2;
427 t->mtime = ambiguous_time;
428 return (PyObject *)t;
429 };
430
431 /* constructor to help legacy API to build a new "possibly" item
432
433 Should eventually be removed */
434 static PyObject *dirstate_item_new_possibly_dirty(PyTypeObject *subtype)
435 {
436 /* We do all the initialization here and not a tp_init function because
437 * dirstate_item is immutable. */
438 dirstateItemObject *t;
439 t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
440 if (!t) {
441 return NULL;
442 }
443 t->flags = (dirstate_flag_wc_tracked | dirstate_flag_p1_tracked |
444 dirstate_flag_possibly_dirty);
445 t->mode = 0;
446 t->size = dirstate_v1_nonnormal;
447 t->mtime = ambiguous_time;
448 return (PyObject *)t;
449 };
450
451 /* constructor to help legacy API to build a new "normal" item
452
453 Should eventually be removed */
454 static PyObject *dirstate_item_new_normal(PyTypeObject *subtype, PyObject *args)
455 {
456 /* We do all the initialization here and not a tp_init function because
457 * dirstate_item is immutable. */
458 dirstateItemObject *t;
459 int size, mode, mtime;
460 if (!PyArg_ParseTuple(args, "iii", &mode, &size, &mtime)) {
461 return NULL;
462 }
205
463
206 t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
464 t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
207 if (!t) {
465 if (!t) {
208 return NULL;
466 return NULL;
209 }
467 }
210 t->state = state;
468 t->flags = (dirstate_flag_wc_tracked | dirstate_flag_p1_tracked);
211 t->mode = mode;
469 t->mode = mode;
212 t->size = size;
470 t->size = size;
213 t->mtime = mtime;
471 t->mtime = mtime;
214
215 return (PyObject *)t;
472 return (PyObject *)t;
216 };
473 };
217
474
@@ -219,7 +476,42 b' static PyObject *dirstate_item_from_v1_m'
219 to make sure it is correct. */
476 to make sure it is correct. */
220 static PyObject *dirstate_item_set_possibly_dirty(dirstateItemObject *self)
477 static PyObject *dirstate_item_set_possibly_dirty(dirstateItemObject *self)
221 {
478 {
222 self->mtime = ambiguous_time;
479 self->flags |= dirstate_flag_possibly_dirty;
480 Py_RETURN_NONE;
481 }
482
483 /* See docstring of the python implementation for details */
484 static PyObject *dirstate_item_set_clean(dirstateItemObject *self,
485 PyObject *args)
486 {
487 int size, mode, mtime;
488 if (!PyArg_ParseTuple(args, "iii", &mode, &size, &mtime)) {
489 return NULL;
490 }
491 self->flags = dirstate_flag_wc_tracked | dirstate_flag_p1_tracked;
492 self->mode = mode;
493 self->size = size;
494 self->mtime = mtime;
495 Py_RETURN_NONE;
496 }
497
498 static PyObject *dirstate_item_set_tracked(dirstateItemObject *self)
499 {
500 self->flags |= dirstate_flag_wc_tracked;
501 self->flags |= dirstate_flag_possibly_dirty;
502 /* size = None on the python size turn into size = NON_NORMAL when
503 * accessed. So the next line is currently required, but a some future
504 * clean up would be welcome. */
505 self->size = dirstate_v1_nonnormal;
506 Py_RETURN_NONE;
507 }
508
509 static PyObject *dirstate_item_set_untracked(dirstateItemObject *self)
510 {
511 self->flags &= ~dirstate_flag_wc_tracked;
512 self->mode = 0;
513 self->mtime = 0;
514 self->size = 0;
223 Py_RETURN_NONE;
515 Py_RETURN_NONE;
224 }
516 }
225
517
@@ -234,40 +526,58 b' static PyMethodDef dirstate_item_methods'
234 "return a \"mtime\" suitable for v1 serialization"},
526 "return a \"mtime\" suitable for v1 serialization"},
235 {"need_delay", (PyCFunction)dirstate_item_need_delay, METH_O,
527 {"need_delay", (PyCFunction)dirstate_item_need_delay, METH_O,
236 "True if the stored mtime would be ambiguous with the current time"},
528 "True if the stored mtime would be ambiguous with the current time"},
237 {"from_v1_data", (PyCFunction)dirstate_item_from_v1_meth, METH_O,
529 {"from_v1_data", (PyCFunction)dirstate_item_from_v1_meth,
238 "build a new DirstateItem object from V1 data"},
530 METH_VARARGS | METH_CLASS, "build a new DirstateItem object from V1 data"},
531 {"new_added", (PyCFunction)dirstate_item_new_added,
532 METH_NOARGS | METH_CLASS,
533 "constructor to help legacy API to build a new \"added\" item"},
534 {"new_merged", (PyCFunction)dirstate_item_new_merged,
535 METH_NOARGS | METH_CLASS,
536 "constructor to help legacy API to build a new \"merged\" item"},
537 {"new_from_p2", (PyCFunction)dirstate_item_new_from_p2,
538 METH_NOARGS | METH_CLASS,
539 "constructor to help legacy API to build a new \"from_p2\" item"},
540 {"new_possibly_dirty", (PyCFunction)dirstate_item_new_possibly_dirty,
541 METH_NOARGS | METH_CLASS,
542 "constructor to help legacy API to build a new \"possibly_dirty\" item"},
543 {"new_normal", (PyCFunction)dirstate_item_new_normal,
544 METH_VARARGS | METH_CLASS,
545 "constructor to help legacy API to build a new \"normal\" item"},
239 {"set_possibly_dirty", (PyCFunction)dirstate_item_set_possibly_dirty,
546 {"set_possibly_dirty", (PyCFunction)dirstate_item_set_possibly_dirty,
240 METH_NOARGS, "mark a file as \"possibly dirty\""},
547 METH_NOARGS, "mark a file as \"possibly dirty\""},
241 {"dm_nonnormal", (PyCFunction)dm_nonnormal, METH_NOARGS,
548 {"set_clean", (PyCFunction)dirstate_item_set_clean, METH_VARARGS,
242 "True is the entry is non-normal in the dirstatemap sense"},
549 "mark a file as \"clean\""},
243 {"dm_otherparent", (PyCFunction)dm_otherparent, METH_NOARGS,
550 {"set_tracked", (PyCFunction)dirstate_item_set_tracked, METH_NOARGS,
244 "True is the entry is `otherparent` in the dirstatemap sense"},
551 "mark a file as \"tracked\""},
552 {"set_untracked", (PyCFunction)dirstate_item_set_untracked, METH_NOARGS,
553 "mark a file as \"untracked\""},
245 {NULL} /* Sentinel */
554 {NULL} /* Sentinel */
246 };
555 };
247
556
248 static PyObject *dirstate_item_get_mode(dirstateItemObject *self)
557 static PyObject *dirstate_item_get_mode(dirstateItemObject *self)
249 {
558 {
250 return PyInt_FromLong(self->mode);
559 return PyInt_FromLong(dirstate_item_c_v1_mode(self));
251 };
560 };
252
561
253 static PyObject *dirstate_item_get_size(dirstateItemObject *self)
562 static PyObject *dirstate_item_get_size(dirstateItemObject *self)
254 {
563 {
255 return PyInt_FromLong(self->size);
564 return PyInt_FromLong(dirstate_item_c_v1_size(self));
256 };
565 };
257
566
258 static PyObject *dirstate_item_get_mtime(dirstateItemObject *self)
567 static PyObject *dirstate_item_get_mtime(dirstateItemObject *self)
259 {
568 {
260 return PyInt_FromLong(self->mtime);
569 return PyInt_FromLong(dirstate_item_c_v1_mtime(self));
261 };
570 };
262
571
263 static PyObject *dirstate_item_get_state(dirstateItemObject *self)
572 static PyObject *dirstate_item_get_state(dirstateItemObject *self)
264 {
573 {
265 return PyBytes_FromStringAndSize(&self->state, 1);
574 char state = dirstate_item_c_v1_state(self);
575 return PyBytes_FromStringAndSize(&state, 1);
266 };
576 };
267
577
268 static PyObject *dirstate_item_get_tracked(dirstateItemObject *self)
578 static PyObject *dirstate_item_get_tracked(dirstateItemObject *self)
269 {
579 {
270 if (self->state == 'a' || self->state == 'm' || self->state == 'n') {
580 if (dirstate_item_c_tracked(self)) {
271 Py_RETURN_TRUE;
581 Py_RETURN_TRUE;
272 } else {
582 } else {
273 Py_RETURN_FALSE;
583 Py_RETURN_FALSE;
@@ -276,7 +586,7 b' static PyObject *dirstate_item_get_track'
276
586
277 static PyObject *dirstate_item_get_added(dirstateItemObject *self)
587 static PyObject *dirstate_item_get_added(dirstateItemObject *self)
278 {
588 {
279 if (self->state == 'a') {
589 if (dirstate_item_c_added(self)) {
280 Py_RETURN_TRUE;
590 Py_RETURN_TRUE;
281 } else {
591 } else {
282 Py_RETURN_FALSE;
592 Py_RETURN_FALSE;
@@ -285,7 +595,7 b' static PyObject *dirstate_item_get_added'
285
595
286 static PyObject *dirstate_item_get_merged(dirstateItemObject *self)
596 static PyObject *dirstate_item_get_merged(dirstateItemObject *self)
287 {
597 {
288 if (self->state == 'm') {
598 if (dirstate_item_c_merged(self)) {
289 Py_RETURN_TRUE;
599 Py_RETURN_TRUE;
290 } else {
600 } else {
291 Py_RETURN_FALSE;
601 Py_RETURN_FALSE;
@@ -294,7 +604,7 b' static PyObject *dirstate_item_get_merge'
294
604
295 static PyObject *dirstate_item_get_merged_removed(dirstateItemObject *self)
605 static PyObject *dirstate_item_get_merged_removed(dirstateItemObject *self)
296 {
606 {
297 if (self->state == 'r' && self->size == dirstate_v1_nonnormal) {
607 if (dirstate_item_c_merged_removed(self)) {
298 Py_RETURN_TRUE;
608 Py_RETURN_TRUE;
299 } else {
609 } else {
300 Py_RETURN_FALSE;
610 Py_RETURN_FALSE;
@@ -303,7 +613,7 b' static PyObject *dirstate_item_get_merge'
303
613
304 static PyObject *dirstate_item_get_from_p2(dirstateItemObject *self)
614 static PyObject *dirstate_item_get_from_p2(dirstateItemObject *self)
305 {
615 {
306 if (self->state == 'n' && self->size == dirstate_v1_from_p2) {
616 if (dirstate_item_c_from_p2(self)) {
307 Py_RETURN_TRUE;
617 Py_RETURN_TRUE;
308 } else {
618 } else {
309 Py_RETURN_FALSE;
619 Py_RETURN_FALSE;
@@ -312,7 +622,7 b' static PyObject *dirstate_item_get_from_'
312
622
313 static PyObject *dirstate_item_get_from_p2_removed(dirstateItemObject *self)
623 static PyObject *dirstate_item_get_from_p2_removed(dirstateItemObject *self)
314 {
624 {
315 if (self->state == 'r' && self->size == dirstate_v1_from_p2) {
625 if (dirstate_item_c_from_p2_removed(self)) {
316 Py_RETURN_TRUE;
626 Py_RETURN_TRUE;
317 } else {
627 } else {
318 Py_RETURN_FALSE;
628 Py_RETURN_FALSE;
@@ -321,7 +631,25 b' static PyObject *dirstate_item_get_from_'
321
631
322 static PyObject *dirstate_item_get_removed(dirstateItemObject *self)
632 static PyObject *dirstate_item_get_removed(dirstateItemObject *self)
323 {
633 {
324 if (self->state == 'r') {
634 if (dirstate_item_c_removed(self)) {
635 Py_RETURN_TRUE;
636 } else {
637 Py_RETURN_FALSE;
638 }
639 };
640
641 static PyObject *dm_nonnormal(dirstateItemObject *self)
642 {
643 if ((dirstate_item_c_v1_state(self) != 'n') ||
644 (dirstate_item_c_v1_mtime(self) == ambiguous_time)) {
645 Py_RETURN_TRUE;
646 } else {
647 Py_RETURN_FALSE;
648 }
649 };
650 static PyObject *dm_otherparent(dirstateItemObject *self)
651 {
652 if (dirstate_item_c_v1_mtime(self) == dirstate_v1_from_p2) {
325 Py_RETURN_TRUE;
653 Py_RETURN_TRUE;
326 } else {
654 } else {
327 Py_RETURN_FALSE;
655 Py_RETURN_FALSE;
@@ -342,6 +670,8 b' static PyGetSetDef dirstate_item_getset['
342 "from_p2_removed", NULL},
670 "from_p2_removed", NULL},
343 {"from_p2", (getter)dirstate_item_get_from_p2, NULL, "from_p2", NULL},
671 {"from_p2", (getter)dirstate_item_get_from_p2, NULL, "from_p2", NULL},
344 {"removed", (getter)dirstate_item_get_removed, NULL, "removed", NULL},
672 {"removed", (getter)dirstate_item_get_removed, NULL, "removed", NULL},
673 {"dm_nonnormal", (getter)dm_nonnormal, NULL, "dm_nonnormal", NULL},
674 {"dm_otherparent", (getter)dm_otherparent, NULL, "dm_otherparent", NULL},
345 {NULL} /* Sentinel */
675 {NULL} /* Sentinel */
346 };
676 };
347
677
@@ -357,7 +687,7 b' PyTypeObject dirstateItemType = {'
357 0, /* tp_compare */
687 0, /* tp_compare */
358 0, /* tp_repr */
688 0, /* tp_repr */
359 0, /* tp_as_number */
689 0, /* tp_as_number */
360 &dirstate_item_sq, /* tp_as_sequence */
690 0, /* tp_as_sequence */
361 0, /* tp_as_mapping */
691 0, /* tp_as_mapping */
362 0, /* tp_hash */
692 0, /* tp_hash */
363 0, /* tp_call */
693 0, /* tp_call */
@@ -441,6 +771,8 b' static PyObject *parse_dirstate(PyObject'
441
771
442 entry = (PyObject *)dirstate_item_from_v1_data(state, mode,
772 entry = (PyObject *)dirstate_item_from_v1_data(state, mode,
443 size, mtime);
773 size, mtime);
774 if (!entry)
775 goto quit;
444 cpos = memchr(cur, 0, flen);
776 cpos = memchr(cur, 0, flen);
445 if (cpos) {
777 if (cpos) {
446 fname = PyBytes_FromStringAndSize(cur, cpos - cur);
778 fname = PyBytes_FromStringAndSize(cur, cpos - cur);
@@ -509,17 +841,19 b' static PyObject *nonnormalotherparentent'
509 }
841 }
510 t = (dirstateItemObject *)v;
842 t = (dirstateItemObject *)v;
511
843
512 if (t->state == 'n' && t->size == -2) {
844 if (dirstate_item_c_from_p2(t)) {
513 if (PySet_Add(otherpset, fname) == -1) {
845 if (PySet_Add(otherpset, fname) == -1) {
514 goto bail;
846 goto bail;
515 }
847 }
516 }
848 }
517
849 if (!(t->flags & dirstate_flag_wc_tracked) ||
518 if (t->state == 'n' && t->mtime != -1) {
850 !(t->flags &
519 continue;
851 (dirstate_flag_p1_tracked | dirstate_flag_p2_tracked)) ||
520 }
852 (t->flags &
521 if (PySet_Add(nonnset, fname) == -1) {
853 (dirstate_flag_possibly_dirty | dirstate_flag_merged))) {
522 goto bail;
854 if (PySet_Add(nonnset, fname) == -1) {
855 goto bail;
856 }
523 }
857 }
524 }
858 }
525
859
@@ -616,15 +950,15 b' static PyObject *pack_dirstate(PyObject '
616 }
950 }
617 tuple = (dirstateItemObject *)v;
951 tuple = (dirstateItemObject *)v;
618
952
619 state = tuple->state;
953 state = dirstate_item_c_v1_state(tuple);
620 mode = tuple->mode;
954 mode = dirstate_item_c_v1_mode(tuple);
621 size = tuple->size;
955 size = dirstate_item_c_v1_size(tuple);
622 mtime = tuple->mtime;
956 mtime = dirstate_item_c_v1_mtime(tuple);
623 if (state == 'n' && mtime == now) {
957 if (state == 'n' && mtime == now) {
624 /* See pure/parsers.py:pack_dirstate for why we do
958 /* See pure/parsers.py:pack_dirstate for why we do
625 * this. */
959 * this. */
626 mtime = -1;
960 mtime = -1;
627 mtime_unset = (PyObject *)make_dirstate_item(
961 mtime_unset = (PyObject *)dirstate_item_from_v1_data(
628 state, mode, size, mtime);
962 state, mode, size, mtime);
629 if (!mtime_unset) {
963 if (!mtime_unset) {
630 goto bail;
964 goto bail;
@@ -917,7 +1251,7 b' static void module_init(PyObject *mod)'
917 revlog_module_init(mod);
1251 revlog_module_init(mod);
918
1252
919 capsule = PyCapsule_New(
1253 capsule = PyCapsule_New(
920 make_dirstate_item,
1254 dirstate_item_from_v1_data,
921 "mercurial.cext.parsers.make_dirstate_item_CAPI", NULL);
1255 "mercurial.cext.parsers.make_dirstate_item_CAPI", NULL);
922 if (capsule != NULL)
1256 if (capsule != NULL)
923 PyModule_AddObject(mod, "make_dirstate_item_CAPI", capsule);
1257 PyModule_AddObject(mod, "make_dirstate_item_CAPI", capsule);
@@ -24,13 +24,21 b''
24 /* clang-format off */
24 /* clang-format off */
25 typedef struct {
25 typedef struct {
26 PyObject_HEAD
26 PyObject_HEAD
27 char state;
27 unsigned char flags;
28 int mode;
28 int mode;
29 int size;
29 int size;
30 int mtime;
30 int mtime;
31 } dirstateItemObject;
31 } dirstateItemObject;
32 /* clang-format on */
32 /* clang-format on */
33
33
34 static const unsigned char dirstate_flag_wc_tracked = 1;
35 static const unsigned char dirstate_flag_p1_tracked = 1 << 1;
36 static const unsigned char dirstate_flag_p2_tracked = 1 << 2;
37 static const unsigned char dirstate_flag_possibly_dirty = 1 << 3;
38 static const unsigned char dirstate_flag_merged = 1 << 4;
39 static const unsigned char dirstate_flag_clean_p1 = 1 << 5;
40 static const unsigned char dirstate_flag_clean_p2 = 1 << 6;
41
34 extern PyTypeObject dirstateItemType;
42 extern PyTypeObject dirstateItemType;
35 #define dirstate_tuple_check(op) (Py_TYPE(op) == &dirstateItemType)
43 #define dirstate_tuple_check(op) (Py_TYPE(op) == &dirstateItemType)
36
44
@@ -526,7 +526,7 b' def annotate(ui, repo, *pats, **opts):'
526 )
526 )
527
527
528 def bad(x, y):
528 def bad(x, y):
529 raise error.Abort(b"%s: %s" % (x, y))
529 raise error.InputError(b"%s: %s" % (x, y))
530
530
531 m = scmutil.match(ctx, pats, opts, badfn=bad)
531 m = scmutil.match(ctx, pats, opts, badfn=bad)
532
532
@@ -1081,7 +1081,7 b' def bisect('
1081 raise error.StateError(_(b'current bisect revision is a merge'))
1081 raise error.StateError(_(b'current bisect revision is a merge'))
1082 if rev:
1082 if rev:
1083 if not nodes:
1083 if not nodes:
1084 raise error.Abort(_(b'empty revision set'))
1084 raise error.InputError(_(b'empty revision set'))
1085 node = repo[nodes[-1]].node()
1085 node = repo[nodes[-1]].node()
1086 with hbisect.restore_state(repo, state, node):
1086 with hbisect.restore_state(repo, state, node):
1087 while changesets:
1087 while changesets:
@@ -207,6 +207,11 b' coreconfigitem('
207 b'pushing',
207 b'pushing',
208 default=list,
208 default=list,
209 )
209 )
210 coreconfigitem(
211 b'bookmarks',
212 b'mirror',
213 default=False,
214 )
210 # bundle.mainreporoot: internal hack for bundlerepo
215 # bundle.mainreporoot: internal hack for bundlerepo
211 coreconfigitem(
216 coreconfigitem(
212 b'bundle',
217 b'bundle',
@@ -1266,6 +1271,11 b' coreconfigitem('
1266 )
1271 )
1267 coreconfigitem(
1272 coreconfigitem(
1268 b'experimental',
1273 b'experimental',
1274 b'web.full-garbage-collection-rate',
1275 default=1, # still forcing a full collection on each request
1276 )
1277 coreconfigitem(
1278 b'experimental',
1269 b'worker.wdir-get-thread-safe',
1279 b'worker.wdir-get-thread-safe',
1270 default=False,
1280 default=False,
1271 )
1281 )
@@ -962,35 +962,29 b' def debugstate(ui, repo, **opts):'
962 datesort = opts.get('datesort')
962 datesort = opts.get('datesort')
963
963
964 if datesort:
964 if datesort:
965 keyfunc = lambda x: (
965
966 x[1].v1_mtime(),
966 def keyfunc(entry):
967 x[0],
967 filename, _state, _mode, _size, mtime = entry
968 ) # sort by mtime, then by filename
968 return (mtime, filename)
969
969 else:
970 else:
970 keyfunc = None # sort by filename
971 keyfunc = None # sort by filename
971 if opts['all']:
972 entries = list(repo.dirstate._map.debug_iter(all=opts['all']))
972 entries = list(repo.dirstate._map.debug_iter())
973 else:
974 entries = list(pycompat.iteritems(repo.dirstate))
975 entries.sort(key=keyfunc)
973 entries.sort(key=keyfunc)
976 for file_, ent in entries:
974 for entry in entries:
977 if ent.v1_mtime() == -1:
975 filename, state, mode, size, mtime = entry
976 if mtime == -1:
978 timestr = b'unset '
977 timestr = b'unset '
979 elif nodates:
978 elif nodates:
980 timestr = b'set '
979 timestr = b'set '
981 else:
980 else:
982 timestr = time.strftime(
981 timestr = time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime(mtime))
983 "%Y-%m-%d %H:%M:%S ", time.localtime(ent.v1_mtime())
984 )
985 timestr = encoding.strtolocal(timestr)
982 timestr = encoding.strtolocal(timestr)
986 if ent.mode & 0o20000:
983 if mode & 0o20000:
987 mode = b'lnk'
984 mode = b'lnk'
988 else:
985 else:
989 mode = b'%3o' % (ent.v1_mode() & 0o777 & ~util.umask)
986 mode = b'%3o' % (mode & 0o777 & ~util.umask)
990 ui.write(
987 ui.write(b"%c %s %10d %s%s\n" % (state, mode, size, timestr, filename))
991 b"%c %s %10d %s%s\n"
992 % (ent.v1_state(), mode, ent.v1_size(), timestr, file_)
993 )
994 for f in repo.dirstate.copies():
988 for f in repo.dirstate.copies():
995 ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
989 ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
996
990
@@ -2987,10 +2981,22 b' def debugrebuilddirstate(ui, repo, rev, '
2987 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
2981 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
2988
2982
2989
2983
2990 @command(b'debugrebuildfncache', [], b'')
2984 @command(
2991 def debugrebuildfncache(ui, repo):
2985 b'debugrebuildfncache',
2986 [
2987 (
2988 b'',
2989 b'only-data',
2990 False,
2991 _(b'only look for wrong .d files (much faster)'),
2992 )
2993 ],
2994 b'',
2995 )
2996 def debugrebuildfncache(ui, repo, **opts):
2992 """rebuild the fncache file"""
2997 """rebuild the fncache file"""
2993 repair.rebuildfncache(ui, repo)
2998 opts = pycompat.byteskwargs(opts)
2999 repair.rebuildfncache(ui, repo, opts.get(b"only_data"))
2994
3000
2995
3001
2996 @command(
3002 @command(
@@ -344,9 +344,6 b' class dirstate(object):'
344
344
345 iteritems = items
345 iteritems = items
346
346
347 def directories(self):
348 return self._map.directories()
349
350 def parents(self):
347 def parents(self):
351 return [self._validate(p) for p in self._pl]
348 return [self._validate(p) for p in self._pl]
352
349
@@ -387,10 +384,8 b' class dirstate(object):'
387 self._origpl = self._pl
384 self._origpl = self._pl
388 self._map.setparents(p1, p2)
385 self._map.setparents(p1, p2)
389 copies = {}
386 copies = {}
390 if (
387 nullid = self._nodeconstants.nullid
391 oldp2 != self._nodeconstants.nullid
388 if oldp2 != nullid and p2 == nullid:
392 and p2 == self._nodeconstants.nullid
393 ):
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
389 candidatefiles = self._map.non_normal_or_other_parent_paths()
395
390
396 for f in candidatefiles:
391 for f in candidatefiles:
@@ -403,13 +398,24 b' class dirstate(object):'
403 source = self._map.copymap.get(f)
398 source = self._map.copymap.get(f)
404 if source:
399 if source:
405 copies[f] = source
400 copies[f] = source
406 self._normallookup(f)
401 self._map.reset_state(
402 f,
403 wc_tracked=True,
404 p1_tracked=True,
405 possibly_dirty=True,
406 )
407 # Also fix up otherparent markers
407 # Also fix up otherparent markers
408 elif s.from_p2:
408 elif s.from_p2:
409 source = self._map.copymap.get(f)
409 source = self._map.copymap.get(f)
410 if source:
410 if source:
411 copies[f] = source
411 copies[f] = source
412 self._add(f)
412 self._check_new_tracked_filename(f)
413 self._updatedfiles.add(f)
414 self._map.reset_state(
415 f,
416 p1_tracked=False,
417 wc_tracked=True,
418 )
413 return copies
419 return copies
414
420
415 def setbranch(self, branch):
421 def setbranch(self, branch):
@@ -471,18 +477,12 b' class dirstate(object):'
471
477
472 return True the file was previously untracked, False otherwise.
478 return True the file was previously untracked, False otherwise.
473 """
479 """
480 self._dirty = True
481 self._updatedfiles.add(filename)
474 entry = self._map.get(filename)
482 entry = self._map.get(filename)
475 if entry is None:
483 if entry is None or not entry.tracked:
476 self._add(filename)
484 self._check_new_tracked_filename(filename)
477 return True
485 return self._map.set_tracked(filename)
478 elif not entry.tracked:
479 self._normallookup(filename)
480 return True
481 # XXX This is probably overkill for more case, but we need this to
482 # fully replace the `normallookup` call with `set_tracked` one.
483 # Consider smoothing this in the future.
484 self.set_possibly_dirty(filename)
485 return False
486
486
487 @requires_no_parents_change
487 @requires_no_parents_change
488 def set_untracked(self, filename):
488 def set_untracked(self, filename):
@@ -493,22 +493,29 b' class dirstate(object):'
493
493
494 return True the file was previously tracked, False otherwise.
494 return True the file was previously tracked, False otherwise.
495 """
495 """
496 entry = self._map.get(filename)
496 ret = self._map.set_untracked(filename)
497 if entry is None:
497 if ret:
498 return False
498 self._dirty = True
499 elif entry.added:
499 self._updatedfiles.add(filename)
500 self._drop(filename)
500 return ret
501 return True
502 else:
503 self._remove(filename)
504 return True
505
501
506 @requires_no_parents_change
502 @requires_no_parents_change
507 def set_clean(self, filename, parentfiledata=None):
503 def set_clean(self, filename, parentfiledata=None):
508 """record that the current state of the file on disk is known to be clean"""
504 """record that the current state of the file on disk is known to be clean"""
509 self._dirty = True
505 self._dirty = True
510 self._updatedfiles.add(filename)
506 self._updatedfiles.add(filename)
511 self._normal(filename, parentfiledata=parentfiledata)
507 if parentfiledata:
508 (mode, size, mtime) = parentfiledata
509 else:
510 (mode, size, mtime) = self._get_filedata(filename)
511 if not self._map[filename].tracked:
512 self._check_new_tracked_filename(filename)
513 self._map.set_clean(filename, mode, size, mtime)
514 if mtime > self._lastnormaltime:
515 # Remember the most recent modification timeslot for status(),
516 # to make sure we won't miss future size-preserving file content
517 # modifications that happen within the same timeslot.
518 self._lastnormaltime = mtime
512
519
513 @requires_no_parents_change
520 @requires_no_parents_change
514 def set_possibly_dirty(self, filename):
521 def set_possibly_dirty(self, filename):
@@ -546,7 +553,10 b' class dirstate(object):'
546 possibly_dirty = True
553 possibly_dirty = True
547 elif not (p1_tracked or wc_tracked):
554 elif not (p1_tracked or wc_tracked):
548 # the file is no longer relevant to anyone
555 # the file is no longer relevant to anyone
549 self._drop(filename)
556 if self._map.get(filename) is not None:
557 self._map.reset_state(filename)
558 self._dirty = True
559 self._updatedfiles.add(filename)
550 elif (not p1_tracked) and wc_tracked:
560 elif (not p1_tracked) and wc_tracked:
551 if entry is not None and entry.added:
561 if entry is not None and entry.added:
552 return # avoid dropping copy information (maybe?)
562 return # avoid dropping copy information (maybe?)
@@ -655,45 +665,21 b' class dirstate(object):'
655 # modifications that happen within the same timeslot.
665 # modifications that happen within the same timeslot.
656 self._lastnormaltime = parentfiledata[2]
666 self._lastnormaltime = parentfiledata[2]
657
667
658 def _addpath(
668 def _check_new_tracked_filename(self, filename):
659 self,
669 scmutil.checkfilename(filename)
660 f,
670 if self._map.hastrackeddir(filename):
661 mode=0,
671 msg = _(b'directory %r already in dirstate')
662 size=None,
672 msg %= pycompat.bytestr(filename)
663 mtime=None,
673 raise error.Abort(msg)
664 added=False,
674 # shadows
665 merged=False,
675 for d in pathutil.finddirs(filename):
666 from_p2=False,
676 if self._map.hastrackeddir(d):
667 possibly_dirty=False,
677 break
668 ):
678 entry = self._map.get(d)
669 entry = self._map.get(f)
679 if entry is not None and not entry.removed:
670 if added or entry is not None and entry.removed:
680 msg = _(b'file %r in dirstate clashes with %r')
671 scmutil.checkfilename(f)
681 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
672 if self._map.hastrackeddir(f):
673 msg = _(b'directory %r already in dirstate')
674 msg %= pycompat.bytestr(f)
675 raise error.Abort(msg)
682 raise error.Abort(msg)
676 # shadows
677 for d in pathutil.finddirs(f):
678 if self._map.hastrackeddir(d):
679 break
680 entry = self._map.get(d)
681 if entry is not None and not entry.removed:
682 msg = _(b'file %r in dirstate clashes with %r')
683 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
684 raise error.Abort(msg)
685 self._dirty = True
686 self._updatedfiles.add(f)
687 self._map.addfile(
688 f,
689 mode=mode,
690 size=size,
691 mtime=mtime,
692 added=added,
693 merged=merged,
694 from_p2=from_p2,
695 possibly_dirty=possibly_dirty,
696 )
697
683
698 def _get_filedata(self, filename):
684 def _get_filedata(self, filename):
699 """returns"""
685 """returns"""
@@ -703,215 +689,6 b' class dirstate(object):'
703 mtime = s[stat.ST_MTIME]
689 mtime = s[stat.ST_MTIME]
704 return (mode, size, mtime)
690 return (mode, size, mtime)
705
691
706 def normal(self, f, parentfiledata=None):
707 """Mark a file normal and clean.
708
709 parentfiledata: (mode, size, mtime) of the clean file
710
711 parentfiledata should be computed from memory (for mode,
712 size), as or close as possible from the point where we
713 determined the file was clean, to limit the risk of the
714 file having been changed by an external process between the
715 moment where the file was determined to be clean and now."""
716 if self.pendingparentchange():
717 util.nouideprecwarn(
718 b"do not use `normal` inside of update/merge context."
719 b" Use `update_file` or `update_file_p1`",
720 b'6.0',
721 stacklevel=2,
722 )
723 else:
724 util.nouideprecwarn(
725 b"do not use `normal` outside of update/merge context."
726 b" Use `set_tracked`",
727 b'6.0',
728 stacklevel=2,
729 )
730 self._normal(f, parentfiledata=parentfiledata)
731
732 def _normal(self, f, parentfiledata=None):
733 if parentfiledata:
734 (mode, size, mtime) = parentfiledata
735 else:
736 (mode, size, mtime) = self._get_filedata(f)
737 self._addpath(f, mode=mode, size=size, mtime=mtime)
738 self._map.copymap.pop(f, None)
739 if f in self._map.nonnormalset:
740 self._map.nonnormalset.remove(f)
741 if mtime > self._lastnormaltime:
742 # Remember the most recent modification timeslot for status(),
743 # to make sure we won't miss future size-preserving file content
744 # modifications that happen within the same timeslot.
745 self._lastnormaltime = mtime
746
747 def normallookup(self, f):
748 '''Mark a file normal, but possibly dirty.'''
749 if self.pendingparentchange():
750 util.nouideprecwarn(
751 b"do not use `normallookup` inside of update/merge context."
752 b" Use `update_file` or `update_file_p1`",
753 b'6.0',
754 stacklevel=2,
755 )
756 else:
757 util.nouideprecwarn(
758 b"do not use `normallookup` outside of update/merge context."
759 b" Use `set_possibly_dirty` or `set_tracked`",
760 b'6.0',
761 stacklevel=2,
762 )
763 self._normallookup(f)
764
765 def _normallookup(self, f):
766 '''Mark a file normal, but possibly dirty.'''
767 if self.in_merge:
768 # if there is a merge going on and the file was either
769 # "merged" or coming from other parent (-2) before
770 # being removed, restore that state.
771 entry = self._map.get(f)
772 if entry is not None:
773 # XXX this should probably be dealt with a a lower level
774 # (see `merged_removed` and `from_p2_removed`)
775 if entry.merged_removed or entry.from_p2_removed:
776 source = self._map.copymap.get(f)
777 if entry.merged_removed:
778 self._merge(f)
779 elif entry.from_p2_removed:
780 self._otherparent(f)
781 if source is not None:
782 self.copy(source, f)
783 return
784 elif entry.merged or entry.from_p2:
785 return
786 self._addpath(f, possibly_dirty=True)
787 self._map.copymap.pop(f, None)
788
789 def otherparent(self, f):
790 '''Mark as coming from the other parent, always dirty.'''
791 if self.pendingparentchange():
792 util.nouideprecwarn(
793 b"do not use `otherparent` inside of update/merge context."
794 b" Use `update_file` or `update_file_p1`",
795 b'6.0',
796 stacklevel=2,
797 )
798 else:
799 util.nouideprecwarn(
800 b"do not use `otherparent` outside of update/merge context."
801 b"It should have been set by the update/merge code",
802 b'6.0',
803 stacklevel=2,
804 )
805 self._otherparent(f)
806
807 def _otherparent(self, f):
808 if not self.in_merge:
809 msg = _(b"setting %r to other parent only allowed in merges") % f
810 raise error.Abort(msg)
811 entry = self._map.get(f)
812 if entry is not None and entry.tracked:
813 # merge-like
814 self._addpath(f, merged=True)
815 else:
816 # add-like
817 self._addpath(f, from_p2=True)
818 self._map.copymap.pop(f, None)
819
820 def add(self, f):
821 '''Mark a file added.'''
822 if self.pendingparentchange():
823 util.nouideprecwarn(
824 b"do not use `add` inside of update/merge context."
825 b" Use `update_file`",
826 b'6.0',
827 stacklevel=2,
828 )
829 else:
830 util.nouideprecwarn(
831 b"do not use `add` outside of update/merge context."
832 b" Use `set_tracked`",
833 b'6.0',
834 stacklevel=2,
835 )
836 self._add(f)
837
838 def _add(self, filename):
839 """internal function to mark a file as added"""
840 self._addpath(filename, added=True)
841 self._map.copymap.pop(filename, None)
842
843 def remove(self, f):
844 '''Mark a file removed'''
845 if self.pendingparentchange():
846 util.nouideprecwarn(
847 b"do not use `remove` insde of update/merge context."
848 b" Use `update_file` or `update_file_p1`",
849 b'6.0',
850 stacklevel=2,
851 )
852 else:
853 util.nouideprecwarn(
854 b"do not use `remove` outside of update/merge context."
855 b" Use `set_untracked`",
856 b'6.0',
857 stacklevel=2,
858 )
859 self._remove(f)
860
861 def _remove(self, filename):
862 """internal function to mark a file removed"""
863 self._dirty = True
864 self._updatedfiles.add(filename)
865 self._map.removefile(filename, in_merge=self.in_merge)
866
867 def merge(self, f):
868 '''Mark a file merged.'''
869 if self.pendingparentchange():
870 util.nouideprecwarn(
871 b"do not use `merge` inside of update/merge context."
872 b" Use `update_file`",
873 b'6.0',
874 stacklevel=2,
875 )
876 else:
877 util.nouideprecwarn(
878 b"do not use `merge` outside of update/merge context."
879 b"It should have been set by the update/merge code",
880 b'6.0',
881 stacklevel=2,
882 )
883 self._merge(f)
884
885 def _merge(self, f):
886 if not self.in_merge:
887 return self._normallookup(f)
888 return self._otherparent(f)
889
890 def drop(self, f):
891 '''Drop a file from the dirstate'''
892 if self.pendingparentchange():
893 util.nouideprecwarn(
894 b"do not use `drop` inside of update/merge context."
895 b" Use `update_file`",
896 b'6.0',
897 stacklevel=2,
898 )
899 else:
900 util.nouideprecwarn(
901 b"do not use `drop` outside of update/merge context."
902 b" Use `set_untracked`",
903 b'6.0',
904 stacklevel=2,
905 )
906 self._drop(f)
907
908 def _drop(self, filename):
909 """internal function to drop a file from the dirstate"""
910 if self._map.dropfile(filename):
911 self._dirty = True
912 self._updatedfiles.add(filename)
913 self._map.copymap.pop(filename, None)
914
915 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
692 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
916 if exists is None:
693 if exists is None:
917 exists = os.path.lexists(os.path.join(self._root, path))
694 exists = os.path.lexists(os.path.join(self._root, path))
@@ -1022,9 +799,20 b' class dirstate(object):'
1022 self._map.setparents(parent, self._nodeconstants.nullid)
799 self._map.setparents(parent, self._nodeconstants.nullid)
1023
800
1024 for f in to_lookup:
801 for f in to_lookup:
1025 self._normallookup(f)
802
803 if self.in_merge:
804 self.set_tracked(f)
805 else:
806 self._map.reset_state(
807 f,
808 wc_tracked=True,
809 p1_tracked=True,
810 possibly_dirty=True,
811 )
812 self._updatedfiles.add(f)
1026 for f in to_drop:
813 for f in to_drop:
1027 self._drop(f)
814 self._map.reset_state(f)
815 self._updatedfiles.add(f)
1028
816
1029 self._dirty = True
817 self._dirty = True
1030
818
@@ -29,16 +29,6 b' propertycache = util.propertycache'
29
29
30 DirstateItem = parsers.DirstateItem
30 DirstateItem = parsers.DirstateItem
31
31
32
33 # a special value used internally for `size` if the file come from the other parent
34 FROM_P2 = -2
35
36 # a special value used internally for `size` if the file is modified/merged/added
37 NONNORMAL = -1
38
39 # a special value used internally for `time` if the time is ambigeous
40 AMBIGUOUS_TIME = -1
41
42 rangemask = 0x7FFFFFFF
32 rangemask = 0x7FFFFFFF
43
33
44
34
@@ -56,8 +46,14 b' class dirstatemap(object):'
56 - the state map maps filenames to tuples of (state, mode, size, mtime),
46 - the state map maps filenames to tuples of (state, mode, size, mtime),
57 where state is a single character representing 'normal', 'added',
47 where state is a single character representing 'normal', 'added',
58 'removed', or 'merged'. It is read by treating the dirstate as a
48 'removed', or 'merged'. It is read by treating the dirstate as a
59 dict. File state is updated by calling the `addfile`, `removefile` and
49 dict. File state is updated by calling various methods (see each
60 `dropfile` methods.
50 documentation for details):
51
52 - `reset_state`,
53 - `set_tracked`
54 - `set_untracked`
55 - `set_clean`
56 - `set_possibly_dirty`
61
57
62 - `copymap` maps destination filenames to their source filename.
58 - `copymap` maps destination filenames to their source filename.
63
59
@@ -122,7 +118,14 b' class dirstatemap(object):'
122 # forward for python2,3 compat
118 # forward for python2,3 compat
123 iteritems = items
119 iteritems = items
124
120
125 debug_iter = items
121 def debug_iter(self, all):
122 """
123 Return an iterator of (filename, state, mode, size, mtime) tuples
124
125 `all` is unused when Rust is not enabled
126 """
127 for (filename, item) in self.items():
128 yield (filename, item.state, item.mode, item.size, item.mtime)
126
129
127 def __len__(self):
130 def __len__(self):
128 return len(self._map)
131 return len(self._map)
@@ -172,65 +175,20 b' class dirstatemap(object):'
172 """record that the current state of the file on disk is unknown"""
175 """record that the current state of the file on disk is unknown"""
173 self[filename].set_possibly_dirty()
176 self[filename].set_possibly_dirty()
174
177
175 def addfile(
178 def set_clean(self, filename, mode, size, mtime):
176 self,
179 """mark a file as back to a clean state"""
177 f,
180 entry = self[filename]
178 mode=0,
181 mtime = mtime & rangemask
179 size=None,
182 size = size & rangemask
180 mtime=None,
183 entry.set_clean(mode, size, mtime)
181 added=False,
184 self.copymap.pop(filename, None)
182 merged=False,
185 self.nonnormalset.discard(filename)
183 from_p2=False,
184 possibly_dirty=False,
185 ):
186 """Add a tracked file to the dirstate."""
187 if added:
188 assert not merged
189 assert not possibly_dirty
190 assert not from_p2
191 state = b'a'
192 size = NONNORMAL
193 mtime = AMBIGUOUS_TIME
194 elif merged:
195 assert not possibly_dirty
196 assert not from_p2
197 state = b'm'
198 size = FROM_P2
199 mtime = AMBIGUOUS_TIME
200 elif from_p2:
201 assert not possibly_dirty
202 state = b'n'
203 size = FROM_P2
204 mtime = AMBIGUOUS_TIME
205 elif possibly_dirty:
206 state = b'n'
207 size = NONNORMAL
208 mtime = AMBIGUOUS_TIME
209 else:
210 assert size != FROM_P2
211 assert size != NONNORMAL
212 assert size is not None
213 assert mtime is not None
214
215 state = b'n'
216 size = size & rangemask
217 mtime = mtime & rangemask
218 assert state is not None
219 assert size is not None
220 assert mtime is not None
221 old_entry = self.get(f)
222 self._dirs_incr(f, old_entry)
223 e = self._map[f] = DirstateItem(state, mode, size, mtime)
224 if e.dm_nonnormal:
225 self.nonnormalset.add(f)
226 if e.dm_otherparent:
227 self.otherparentset.add(f)
228
186
229 def reset_state(
187 def reset_state(
230 self,
188 self,
231 filename,
189 filename,
232 wc_tracked,
190 wc_tracked=False,
233 p1_tracked,
191 p1_tracked=False,
234 p2_tracked=False,
192 p2_tracked=False,
235 merged=False,
193 merged=False,
236 clean_p1=False,
194 clean_p1=False,
@@ -255,26 +213,25 b' class dirstatemap(object):'
255 self.copymap.pop(filename, None)
213 self.copymap.pop(filename, None)
256
214
257 if not (p1_tracked or p2_tracked or wc_tracked):
215 if not (p1_tracked or p2_tracked or wc_tracked):
258 self.dropfile(filename)
216 old_entry = self._map.pop(filename, None)
217 self._dirs_decr(filename, old_entry=old_entry)
218 self.nonnormalset.discard(filename)
219 self.copymap.pop(filename, None)
220 return
259 elif merged:
221 elif merged:
260 # XXX might be merged and removed ?
222 # XXX might be merged and removed ?
261 entry = self.get(filename)
223 entry = self.get(filename)
262 if entry is not None and entry.tracked:
224 if entry is None or not entry.tracked:
263 # XXX mostly replicate dirstate.other parent. We should get
225 # XXX mostly replicate dirstate.other parent. We should get
264 # the higher layer to pass us more reliable data where `merged`
226 # the higher layer to pass us more reliable data where `merged`
265 # actually mean merged. Dropping the else clause will show
227 # actually mean merged. Dropping this clause will show failure
266 # failure in `test-graft.t`
228 # in `test-graft.t`
267 self.addfile(filename, merged=True)
229 merged = False
268 else:
230 clean_p2 = True
269 self.addfile(filename, from_p2=True)
270 elif not (p1_tracked or p2_tracked) and wc_tracked:
231 elif not (p1_tracked or p2_tracked) and wc_tracked:
271 self.addfile(filename, added=True, possibly_dirty=possibly_dirty)
232 pass # file is added, nothing special to adjust
272 elif (p1_tracked or p2_tracked) and not wc_tracked:
233 elif (p1_tracked or p2_tracked) and not wc_tracked:
273 # XXX might be merged and removed ?
234 pass
274 old_entry = self._map.get(filename)
275 self._dirs_decr(filename, old_entry=old_entry, remove_variant=True)
276 self._map[filename] = DirstateItem(b'r', 0, 0, 0)
277 self.nonnormalset.add(filename)
278 elif clean_p2 and wc_tracked:
235 elif clean_p2 and wc_tracked:
279 if p1_tracked or self.get(filename) is not None:
236 if p1_tracked or self.get(filename) is not None:
280 # XXX the `self.get` call is catching some case in
237 # XXX the `self.get` call is catching some case in
@@ -284,62 +241,91 b' class dirstatemap(object):'
284 # In addition, this seems to be a case where the file is marked
241 # In addition, this seems to be a case where the file is marked
285 # as merged without actually being the result of a merge
242 # as merged without actually being the result of a merge
286 # action. So thing are not ideal here.
243 # action. So thing are not ideal here.
287 self.addfile(filename, merged=True)
244 merged = True
288 else:
245 clean_p2 = False
289 self.addfile(filename, from_p2=True)
290 elif not p1_tracked and p2_tracked and wc_tracked:
246 elif not p1_tracked and p2_tracked and wc_tracked:
291 self.addfile(filename, from_p2=True, possibly_dirty=possibly_dirty)
247 clean_p2 = True
292 elif possibly_dirty:
248 elif possibly_dirty:
293 self.addfile(filename, possibly_dirty=possibly_dirty)
249 pass
294 elif wc_tracked:
250 elif wc_tracked:
295 # this is a "normal" file
251 # this is a "normal" file
296 if parentfiledata is None:
252 if parentfiledata is None:
297 msg = b'failed to pass parentfiledata for a normal file: %s'
253 msg = b'failed to pass parentfiledata for a normal file: %s'
298 msg %= filename
254 msg %= filename
299 raise error.ProgrammingError(msg)
255 raise error.ProgrammingError(msg)
300 mode, size, mtime = parentfiledata
301 self.addfile(filename, mode=mode, size=size, mtime=mtime)
302 self.nonnormalset.discard(filename)
303 else:
256 else:
304 assert False, 'unreachable'
257 assert False, 'unreachable'
305
258
306 def removefile(self, f, in_merge=False):
259 old_entry = self._map.get(filename)
307 """
260 self._dirs_incr(filename, old_entry)
308 Mark a file as removed in the dirstate.
261 entry = DirstateItem(
262 wc_tracked=wc_tracked,
263 p1_tracked=p1_tracked,
264 p2_tracked=p2_tracked,
265 merged=merged,
266 clean_p1=clean_p1,
267 clean_p2=clean_p2,
268 possibly_dirty=possibly_dirty,
269 parentfiledata=parentfiledata,
270 )
271 if entry.dm_nonnormal:
272 self.nonnormalset.add(filename)
273 else:
274 self.nonnormalset.discard(filename)
275 if entry.dm_otherparent:
276 self.otherparentset.add(filename)
277 else:
278 self.otherparentset.discard(filename)
279 self._map[filename] = entry
309
280
310 The `size` parameter is used to store sentinel values that indicate
281 def set_tracked(self, filename):
311 the file's previous state. In the future, we should refactor this
282 new = False
312 to be more explicit about what that state is.
283 entry = self.get(filename)
313 """
284 if entry is None:
285 self._dirs_incr(filename)
286 entry = DirstateItem(
287 p1_tracked=False,
288 p2_tracked=False,
289 wc_tracked=True,
290 merged=False,
291 clean_p1=False,
292 clean_p2=False,
293 possibly_dirty=False,
294 parentfiledata=None,
295 )
296 self._map[filename] = entry
297 if entry.dm_nonnormal:
298 self.nonnormalset.add(filename)
299 new = True
300 elif not entry.tracked:
301 self._dirs_incr(filename, entry)
302 entry.set_tracked()
303 new = True
304 else:
305 # XXX This is probably overkill for more case, but we need this to
306 # fully replace the `normallookup` call with `set_tracked` one.
307 # Consider smoothing this in the future.
308 self.set_possibly_dirty(filename)
309 return new
310
311 def set_untracked(self, f):
312 """Mark a file as no longer tracked in the dirstate map"""
314 entry = self.get(f)
313 entry = self.get(f)
315 size = 0
314 if entry is None:
316 if in_merge:
315 return False
317 # XXX we should not be able to have 'm' state and 'FROM_P2' if not
316 else:
318 # during a merge. So I (marmoute) am not sure we need the
317 self._dirs_decr(f, old_entry=entry, remove_variant=not entry.added)
319 # conditionnal at all. Adding double checking this with assert
318 if not entry.merged:
320 # would be nice.
319 self.copymap.pop(f, None)
321 if entry is not None:
320 if entry.added:
322 # backup the previous state
321 self.nonnormalset.discard(f)
323 if entry.merged: # merge
322 self._map.pop(f, None)
324 size = NONNORMAL
323 else:
325 elif entry.from_p2:
324 self.nonnormalset.add(f)
326 size = FROM_P2
325 if entry.from_p2:
327 self.otherparentset.add(f)
326 self.otherparentset.add(f)
328 if entry is not None and not (entry.merged or entry.from_p2):
327 entry.set_untracked()
329 self.copymap.pop(f, None)
328 return True
330 self._dirs_decr(f, old_entry=entry, remove_variant=True)
331 self._map[f] = DirstateItem(b'r', 0, size, 0)
332 self.nonnormalset.add(f)
333
334 def dropfile(self, f):
335 """
336 Remove a file from the dirstate. Returns True if the file was
337 previously recorded.
338 """
339 old_entry = self._map.pop(f, None)
340 self._dirs_decr(f, old_entry=old_entry)
341 self.nonnormalset.discard(f)
342 return old_entry is not None
343
329
344 def clearambiguoustimes(self, files, now):
330 def clearambiguoustimes(self, files, now):
345 for f in files:
331 for f in files:
@@ -400,7 +386,7 b' class dirstatemap(object):'
400
386
401 @propertycache
387 @propertycache
402 def _dirs(self):
388 def _dirs(self):
403 return pathutil.dirs(self._map, b'r')
389 return pathutil.dirs(self._map, only_tracked=True)
404
390
405 @propertycache
391 @propertycache
406 def _alldirs(self):
392 def _alldirs(self):
@@ -572,7 +558,7 b' if rustmod is not None:'
572 from_p2=False,
558 from_p2=False,
573 possibly_dirty=False,
559 possibly_dirty=False,
574 ):
560 ):
575 return self._rustmap.addfile(
561 ret = self._rustmap.addfile(
576 f,
562 f,
577 mode,
563 mode,
578 size,
564 size,
@@ -582,12 +568,15 b' if rustmod is not None:'
582 from_p2,
568 from_p2,
583 possibly_dirty,
569 possibly_dirty,
584 )
570 )
571 if added:
572 self.copymap.pop(f, None)
573 return ret
585
574
586 def reset_state(
575 def reset_state(
587 self,
576 self,
588 filename,
577 filename,
589 wc_tracked,
578 wc_tracked=False,
590 p1_tracked,
579 p1_tracked=False,
591 p2_tracked=False,
580 p2_tracked=False,
592 merged=False,
581 merged=False,
593 clean_p1=False,
582 clean_p1=False,
@@ -632,7 +621,7 b' if rustmod is not None:'
632 )
621 )
633 elif (p1_tracked or p2_tracked) and not wc_tracked:
622 elif (p1_tracked or p2_tracked) and not wc_tracked:
634 # XXX might be merged and removed ?
623 # XXX might be merged and removed ?
635 self[filename] = DirstateItem(b'r', 0, 0, 0)
624 self[filename] = DirstateItem.from_v1_data(b'r', 0, 0, 0)
636 self.nonnormalset.add(filename)
625 self.nonnormalset.add(filename)
637 elif clean_p2 and wc_tracked:
626 elif clean_p2 and wc_tracked:
638 if p1_tracked or self.get(filename) is not None:
627 if p1_tracked or self.get(filename) is not None:
@@ -664,11 +653,46 b' if rustmod is not None:'
664 else:
653 else:
665 assert False, 'unreachable'
654 assert False, 'unreachable'
666
655
656 def set_tracked(self, filename):
657 new = False
658 entry = self.get(filename)
659 if entry is None:
660 self.addfile(filename, added=True)
661 new = True
662 elif not entry.tracked:
663 entry.set_tracked()
664 self._rustmap.set_v1(filename, entry)
665 new = True
666 else:
667 # XXX This is probably overkill for more case, but we need this to
668 # fully replace the `normallookup` call with `set_tracked` one.
669 # Consider smoothing this in the future.
670 self.set_possibly_dirty(filename)
671 return new
672
673 def set_untracked(self, f):
674 """Mark a file as no longer tracked in the dirstate map"""
675 # in merge is only trigger more logic, so it "fine" to pass it.
676 #
677 # the inner rust dirstate map code need to be adjusted once the API
678 # for dirstate/dirstatemap/DirstateItem is a bit more settled
679 entry = self.get(f)
680 if entry is None:
681 return False
682 else:
683 if entry.added:
684 self._rustmap.copymap().pop(f, None)
685 self._rustmap.dropfile(f)
686 else:
687 self._rustmap.removefile(f, in_merge=True)
688 return True
689
667 def removefile(self, *args, **kwargs):
690 def removefile(self, *args, **kwargs):
668 return self._rustmap.removefile(*args, **kwargs)
691 return self._rustmap.removefile(*args, **kwargs)
669
692
670 def dropfile(self, *args, **kwargs):
693 def dropfile(self, f, *args, **kwargs):
671 return self._rustmap.dropfile(*args, **kwargs)
694 self._rustmap.copymap().pop(f, None)
695 return self._rustmap.dropfile(f, *args, **kwargs)
672
696
673 def clearambiguoustimes(self, *args, **kwargs):
697 def clearambiguoustimes(self, *args, **kwargs):
674 return self._rustmap.clearambiguoustimes(*args, **kwargs)
698 return self._rustmap.clearambiguoustimes(*args, **kwargs)
@@ -683,11 +707,15 b' if rustmod is not None:'
683 def copymap(self):
707 def copymap(self):
684 return self._rustmap.copymap()
708 return self._rustmap.copymap()
685
709
686 def directories(self):
710 def debug_iter(self, all):
687 return self._rustmap.directories()
711 """
712 Return an iterator of (filename, state, mode, size, mtime) tuples
688
713
689 def debug_iter(self):
714 `all`: also include with `state == b' '` dirstate tree nodes that
690 return self._rustmap.debug_iter()
715 don't have an associated `DirstateItem`.
716
717 """
718 return self._rustmap.debug_iter(all)
691
719
692 def preload(self):
720 def preload(self):
693 self._rustmap
721 self._rustmap
@@ -920,6 +948,15 b' if rustmod is not None:'
920 entry.set_possibly_dirty()
948 entry.set_possibly_dirty()
921 self._rustmap.set_v1(filename, entry)
949 self._rustmap.set_v1(filename, entry)
922
950
951 def set_clean(self, filename, mode, size, mtime):
952 """mark a file as back to a clean state"""
953 entry = self[filename]
954 mtime = mtime & rangemask
955 size = size & rangemask
956 entry.set_clean(mode, size, mtime)
957 self._rustmap.set_v1(filename, entry)
958 self._rustmap.copymap().pop(filename, None)
959
923 def __setitem__(self, key, value):
960 def __setitem__(self, key, value):
924 assert isinstance(value, DirstateItem)
961 assert isinstance(value, DirstateItem)
925 self._rustmap.set_v1(key, value)
962 self._rustmap.set_v1(key, value)
@@ -34,7 +34,7 b' HEADER = struct.Struct('
34
34
35
35
36 class DirstateDocket(object):
36 class DirstateDocket(object):
37 data_filename_pattern = b'dirstate.%s.d'
37 data_filename_pattern = b'dirstate.%s'
38
38
39 def __init__(self, parents, data_size, tree_metadata, uuid):
39 def __init__(self, parents, data_size, tree_metadata, uuid):
40 self.parents = parents
40 self.parents = parents
@@ -240,7 +240,9 b' def fromlocal(s):'
240 b"decoding near '%s': %s!" % (sub, pycompat.bytestr(inst))
240 b"decoding near '%s': %s!" % (sub, pycompat.bytestr(inst))
241 )
241 )
242 except LookupError as k:
242 except LookupError as k:
243 raise error.Abort(k, hint=b"please check your locale settings")
243 raise error.Abort(
244 pycompat.bytestr(k), hint=b"please check your locale settings"
245 )
244
246
245
247
246 def unitolocal(u):
248 def unitolocal(u):
@@ -306,7 +308,9 b' def lower(s):'
306 except UnicodeError:
308 except UnicodeError:
307 return s.lower() # we don't know how to fold this except in ASCII
309 return s.lower() # we don't know how to fold this except in ASCII
308 except LookupError as k:
310 except LookupError as k:
309 raise error.Abort(k, hint=b"please check your locale settings")
311 raise error.Abort(
312 pycompat.bytestr(k), hint=b"please check your locale settings"
313 )
310
314
311
315
312 def upper(s):
316 def upper(s):
@@ -333,7 +337,9 b' def upperfallback(s):'
333 except UnicodeError:
337 except UnicodeError:
334 return s.upper() # we don't know how to fold this except in ASCII
338 return s.upper() # we don't know how to fold this except in ASCII
335 except LookupError as k:
339 except LookupError as k:
336 raise error.Abort(k, hint=b"please check your locale settings")
340 raise error.Abort(
341 pycompat.bytestr(k), hint=b"please check your locale settings"
342 )
337
343
338
344
339 if not _nativeenviron:
345 if not _nativeenviron:
@@ -224,8 +224,12 b' def load(ui, name, path, loadingtime=Non'
224 minver = getattr(mod, 'minimumhgversion', None)
224 minver = getattr(mod, 'minimumhgversion', None)
225 if minver:
225 if minver:
226 curver = util.versiontuple(n=2)
226 curver = util.versiontuple(n=2)
227 extmin = util.versiontuple(stringutil.forcebytestr(minver), 2)
227
228
228 if None in curver or util.versiontuple(minver, 2) > curver:
229 if None in extmin:
230 extmin = (extmin[0] or 0, extmin[1] or 0)
231
232 if None in curver or extmin > curver:
229 msg = _(
233 msg = _(
230 b'(third party extension %s requires version %s or newer '
234 b'(third party extension %s requires version %s or newer '
231 b'of Mercurial (current: %s); disabling)\n'
235 b'of Mercurial (current: %s); disabling)\n'
@@ -418,6 +418,16 b' Supported arguments:'
418 If no suitable authentication entry is found, the user is prompted
418 If no suitable authentication entry is found, the user is prompted
419 for credentials as usual if required by the remote.
419 for credentials as usual if required by the remote.
420
420
421 ``bookmarks``
422 -------------
423
424 Controls some aspect of bookmarks.
425
426 ``mirror``
427 When pulling, instead of merging local bookmarks and remote bookmarks,
428 replace local bookmarks by remote bookmarks. This is useful to replicate
429 a repository, or as an optimization. (default: False)
430
421 ``cmdserver``
431 ``cmdserver``
422 -------------
432 -------------
423
433
@@ -285,6 +285,7 b' class hgwebdir(object):'
285 self.lastrefresh = 0
285 self.lastrefresh = 0
286 self.motd = None
286 self.motd = None
287 self.refresh()
287 self.refresh()
288 self.requests_count = 0
288 if not baseui:
289 if not baseui:
289 # set up environment for new ui
290 # set up environment for new ui
290 extensions.loadall(self.ui)
291 extensions.loadall(self.ui)
@@ -341,6 +342,10 b' class hgwebdir(object):'
341
342
342 self.repos = repos
343 self.repos = repos
343 self.ui = u
344 self.ui = u
345 self.gc_full_collect_rate = self.ui.configint(
346 b'experimental', b'web.full-garbage-collection-rate'
347 )
348 self.gc_full_collections_done = 0
344 encoding.encoding = self.ui.config(b'web', b'encoding')
349 encoding.encoding = self.ui.config(b'web', b'encoding')
345 self.style = self.ui.config(b'web', b'style')
350 self.style = self.ui.config(b'web', b'style')
346 self.templatepath = self.ui.config(
351 self.templatepath = self.ui.config(
@@ -383,12 +388,27 b' class hgwebdir(object):'
383 finally:
388 finally:
384 # There are known cycles in localrepository that prevent
389 # There are known cycles in localrepository that prevent
385 # those objects (and tons of held references) from being
390 # those objects (and tons of held references) from being
386 # collected through normal refcounting. We mitigate those
391 # collected through normal refcounting.
387 # leaks by performing an explicit GC on every request.
392 # In some cases, the resulting memory consumption can
388 # TODO remove this once leaks are fixed.
393 # be tamed by performing explicit garbage collections.
389 # TODO only run this on requests that create localrepository
394 # In presence of actual leaks or big long-lived caches, the
390 # instances instead of every request.
395 # impact on performance of such collections can become a
391 gc.collect()
396 # problem, hence the rate shouldn't be set too low.
397 # See "Collecting the oldest generation" in
398 # https://devguide.python.org/garbage_collector
399 # for more about such trade-offs.
400 rate = self.gc_full_collect_rate
401
402 # this is not thread safe, but the consequence (skipping
403 # a garbage collection) is arguably better than risking
404 # to have several threads perform a collection in parallel
405 # (long useless wait on all threads).
406 self.requests_count += 1
407 if rate > 0 and self.requests_count % rate == 0:
408 gc.collect()
409 self.gc_full_collections_done += 1
410 else:
411 gc.collect(generation=1)
392
412
393 def _runwsgi(self, req, res):
413 def _runwsgi(self, req, res):
394 try:
414 try:
@@ -132,36 +132,6 b' class idirstate(interfaceutil.Interface)'
132 def copies():
132 def copies():
133 pass
133 pass
134
134
135 def normal(f, parentfiledata=None):
136 """Mark a file normal and clean.
137
138 parentfiledata: (mode, size, mtime) of the clean file
139
140 parentfiledata should be computed from memory (for mode,
141 size), as or close as possible from the point where we
142 determined the file was clean, to limit the risk of the
143 file having been changed by an external process between the
144 moment where the file was determined to be clean and now."""
145 pass
146
147 def normallookup(f):
148 '''Mark a file normal, but possibly dirty.'''
149
150 def otherparent(f):
151 '''Mark as coming from the other parent, always dirty.'''
152
153 def add(f):
154 '''Mark a file added.'''
155
156 def remove(f):
157 '''Mark a file removed.'''
158
159 def merge(f):
160 '''Mark a file merged.'''
161
162 def drop(f):
163 '''Drop a file from the dirstate'''
164
165 def normalize(path, isknown=False, ignoremissing=False):
135 def normalize(path, isknown=False, ignoremissing=False):
166 """
136 """
167 normalize the case of a pathname when on a casefolding filesystem
137 normalize the case of a pathname when on a casefolding filesystem
@@ -46,13 +46,12 b' if pycompat.TYPE_CHECKING:'
46 Any,
46 Any,
47 Callable,
47 Callable,
48 Dict,
48 Dict,
49 List,
50 Optional,
49 Optional,
51 Sequence,
50 Sequence,
52 Tuple,
51 Tuple,
53 )
52 )
54
53
55 for t in (Any, Callable, Dict, List, Optional, Tuple):
54 for t in (Any, Callable, Dict, Optional, Tuple):
56 assert t
55 assert t
57
56
58
57
@@ -714,43 +713,43 b' class walkopts(object):'
714 """
713 """
715
714
716 # raw command-line parameters, which a matcher will be built from
715 # raw command-line parameters, which a matcher will be built from
717 pats = attr.ib() # type: List[bytes]
716 pats = attr.ib()
718 opts = attr.ib() # type: Dict[bytes, Any]
717 opts = attr.ib()
719
718
720 # a list of revset expressions to be traversed; if follow, it specifies
719 # a list of revset expressions to be traversed; if follow, it specifies
721 # the start revisions
720 # the start revisions
722 revspec = attr.ib() # type: List[bytes]
721 revspec = attr.ib()
723
722
724 # miscellaneous queries to filter revisions (see "hg help log" for details)
723 # miscellaneous queries to filter revisions (see "hg help log" for details)
725 bookmarks = attr.ib(default=attr.Factory(list)) # type: List[bytes]
724 bookmarks = attr.ib(default=attr.Factory(list))
726 branches = attr.ib(default=attr.Factory(list)) # type: List[bytes]
725 branches = attr.ib(default=attr.Factory(list))
727 date = attr.ib(default=None) # type: Optional[bytes]
726 date = attr.ib(default=None)
728 keywords = attr.ib(default=attr.Factory(list)) # type: List[bytes]
727 keywords = attr.ib(default=attr.Factory(list))
729 no_merges = attr.ib(default=False) # type: bool
728 no_merges = attr.ib(default=False)
730 only_merges = attr.ib(default=False) # type: bool
729 only_merges = attr.ib(default=False)
731 prune_ancestors = attr.ib(default=attr.Factory(list)) # type: List[bytes]
730 prune_ancestors = attr.ib(default=attr.Factory(list))
732 users = attr.ib(default=attr.Factory(list)) # type: List[bytes]
731 users = attr.ib(default=attr.Factory(list))
733
732
734 # miscellaneous matcher arguments
733 # miscellaneous matcher arguments
735 include_pats = attr.ib(default=attr.Factory(list)) # type: List[bytes]
734 include_pats = attr.ib(default=attr.Factory(list))
736 exclude_pats = attr.ib(default=attr.Factory(list)) # type: List[bytes]
735 exclude_pats = attr.ib(default=attr.Factory(list))
737
736
738 # 0: no follow, 1: follow first, 2: follow both parents
737 # 0: no follow, 1: follow first, 2: follow both parents
739 follow = attr.ib(default=0) # type: int
738 follow = attr.ib(default=0)
740
739
741 # do not attempt filelog-based traversal, which may be fast but cannot
740 # do not attempt filelog-based traversal, which may be fast but cannot
742 # include revisions where files were removed
741 # include revisions where files were removed
743 force_changelog_traversal = attr.ib(default=False) # type: bool
742 force_changelog_traversal = attr.ib(default=False)
744
743
745 # filter revisions by file patterns, which should be disabled only if
744 # filter revisions by file patterns, which should be disabled only if
746 # you want to include revisions where files were unmodified
745 # you want to include revisions where files were unmodified
747 filter_revisions_by_pats = attr.ib(default=True) # type: bool
746 filter_revisions_by_pats = attr.ib(default=True)
748
747
749 # sort revisions prior to traversal: 'desc', 'topo', or None
748 # sort revisions prior to traversal: 'desc', 'topo', or None
750 sort_revisions = attr.ib(default=None) # type: Optional[bytes]
749 sort_revisions = attr.ib(default=None)
751
750
752 # limit number of changes displayed; None means unlimited
751 # limit number of changes displayed; None means unlimited
753 limit = attr.ib(default=None) # type: Optional[int]
752 limit = attr.ib(default=None)
754
753
755
754
756 def parseopts(ui, pats, opts):
755 def parseopts(ui, pats, opts):
@@ -21,7 +21,6 b' from __future__ import absolute_import, '
21 from .i18n import _
21 from .i18n import _
22 from . import (
22 from . import (
23 error,
23 error,
24 pycompat,
25 util,
24 util,
26 )
25 )
27 from .utils import stringutil
26 from .utils import stringutil
@@ -216,7 +215,11 b' def unescapestr(s):'
216 return stringutil.unescapestr(s)
215 return stringutil.unescapestr(s)
217 except ValueError as e:
216 except ValueError as e:
218 # mangle Python's exception into our format
217 # mangle Python's exception into our format
219 raise error.ParseError(pycompat.bytestr(e).lower())
218 # TODO: remove this suppression. For some reason, pytype 2021.09.09
219 # thinks .lower() is being called on Union[ValueError, bytes].
220 # pytype: disable=attribute-error
221 raise error.ParseError(stringutil.forcebytestr(e).lower())
222 # pytype: enable=attribute-error
220
223
221
224
222 def _prettyformat(tree, leafnodes, level, lines):
225 def _prettyformat(tree, leafnodes, level, lines):
@@ -315,20 +315,19 b' def finddirs(path):'
315 class dirs(object):
315 class dirs(object):
316 '''a multiset of directory names from a set of file paths'''
316 '''a multiset of directory names from a set of file paths'''
317
317
318 def __init__(self, map, skip=None):
318 def __init__(self, map, only_tracked=False):
319 """
319 """
320 a dict map indicates a dirstate while a list indicates a manifest
320 a dict map indicates a dirstate while a list indicates a manifest
321 """
321 """
322 self._dirs = {}
322 self._dirs = {}
323 addpath = self.addpath
323 addpath = self.addpath
324 if isinstance(map, dict) and skip is not None:
324 if isinstance(map, dict) and only_tracked:
325 for f, s in pycompat.iteritems(map):
325 for f, s in pycompat.iteritems(map):
326 if s.state != skip:
326 if s.state != b'r':
327 addpath(f)
327 addpath(f)
328 elif skip is not None:
328 elif only_tracked:
329 raise error.ProgrammingError(
329 msg = b"`only_tracked` is only supported with a dict source"
330 b"skip character is only supported with a dict source"
330 raise error.ProgrammingError(msg)
331 )
332 else:
331 else:
333 for f in map:
332 for f in map:
334 addpath(f)
333 addpath(f)
@@ -56,16 +56,117 b' class DirstateItem(object):'
56 - mtime,
56 - mtime,
57 """
57 """
58
58
59 _state = attr.ib()
59 _wc_tracked = attr.ib()
60 _p1_tracked = attr.ib()
61 _p2_tracked = attr.ib()
62 # the three item above should probably be combined
63 #
64 # However it is unclear if they properly cover some of the most advanced
65 # merge case. So we should probably wait on this to be settled.
66 _merged = attr.ib()
67 _clean_p1 = attr.ib()
68 _clean_p2 = attr.ib()
69 _possibly_dirty = attr.ib()
60 _mode = attr.ib()
70 _mode = attr.ib()
61 _size = attr.ib()
71 _size = attr.ib()
62 _mtime = attr.ib()
72 _mtime = attr.ib()
63
73
64 def __init__(self, state, mode, size, mtime):
74 def __init__(
65 self._state = state
75 self,
66 self._mode = mode
76 wc_tracked=False,
67 self._size = size
77 p1_tracked=False,
68 self._mtime = mtime
78 p2_tracked=False,
79 merged=False,
80 clean_p1=False,
81 clean_p2=False,
82 possibly_dirty=False,
83 parentfiledata=None,
84 ):
85 if merged and (clean_p1 or clean_p2):
86 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
87 raise error.ProgrammingError(msg)
88
89 self._wc_tracked = wc_tracked
90 self._p1_tracked = p1_tracked
91 self._p2_tracked = p2_tracked
92 self._merged = merged
93 self._clean_p1 = clean_p1
94 self._clean_p2 = clean_p2
95 self._possibly_dirty = possibly_dirty
96 if parentfiledata is None:
97 self._mode = None
98 self._size = None
99 self._mtime = None
100 else:
101 self._mode = parentfiledata[0]
102 self._size = parentfiledata[1]
103 self._mtime = parentfiledata[2]
104
105 @classmethod
106 def new_added(cls):
107 """constructor to help legacy API to build a new "added" item
108
109 Should eventually be removed
110 """
111 instance = cls()
112 instance._wc_tracked = True
113 instance._p1_tracked = False
114 instance._p2_tracked = False
115 return instance
116
117 @classmethod
118 def new_merged(cls):
119 """constructor to help legacy API to build a new "merged" item
120
121 Should eventually be removed
122 """
123 instance = cls()
124 instance._wc_tracked = True
125 instance._p1_tracked = True # might not be True because of rename ?
126 instance._p2_tracked = True # might not be True because of rename ?
127 instance._merged = True
128 return instance
129
130 @classmethod
131 def new_from_p2(cls):
132 """constructor to help legacy API to build a new "from_p2" item
133
134 Should eventually be removed
135 """
136 instance = cls()
137 instance._wc_tracked = True
138 instance._p1_tracked = False # might actually be True
139 instance._p2_tracked = True
140 instance._clean_p2 = True
141 return instance
142
143 @classmethod
144 def new_possibly_dirty(cls):
145 """constructor to help legacy API to build a new "possibly_dirty" item
146
147 Should eventually be removed
148 """
149 instance = cls()
150 instance._wc_tracked = True
151 instance._p1_tracked = True
152 instance._possibly_dirty = True
153 return instance
154
155 @classmethod
156 def new_normal(cls, mode, size, mtime):
157 """constructor to help legacy API to build a new "normal" item
158
159 Should eventually be removed
160 """
161 assert size != FROM_P2
162 assert size != NONNORMAL
163 instance = cls()
164 instance._wc_tracked = True
165 instance._p1_tracked = True
166 instance._mode = mode
167 instance._size = size
168 instance._mtime = mtime
169 return instance
69
170
70 @classmethod
171 @classmethod
71 def from_v1_data(cls, state, mode, size, mtime):
172 def from_v1_data(cls, state, mode, size, mtime):
@@ -74,12 +175,44 b' class DirstateItem(object):'
74 Since the dirstate-v1 format is frozen, the signature of this function
175 Since the dirstate-v1 format is frozen, the signature of this function
75 is not expected to change, unlike the __init__ one.
176 is not expected to change, unlike the __init__ one.
76 """
177 """
77 return cls(
178 if state == b'm':
78 state=state,
179 return cls.new_merged()
79 mode=mode,
180 elif state == b'a':
80 size=size,
181 return cls.new_added()
81 mtime=mtime,
182 elif state == b'r':
82 )
183 instance = cls()
184 instance._wc_tracked = False
185 if size == NONNORMAL:
186 instance._merged = True
187 instance._p1_tracked = (
188 True # might not be True because of rename ?
189 )
190 instance._p2_tracked = (
191 True # might not be True because of rename ?
192 )
193 elif size == FROM_P2:
194 instance._clean_p2 = True
195 instance._p1_tracked = (
196 False # We actually don't know (file history)
197 )
198 instance._p2_tracked = True
199 else:
200 instance._p1_tracked = True
201 return instance
202 elif state == b'n':
203 if size == FROM_P2:
204 return cls.new_from_p2()
205 elif size == NONNORMAL:
206 return cls.new_possibly_dirty()
207 elif mtime == AMBIGUOUS_TIME:
208 instance = cls.new_normal(mode, size, 42)
209 instance._mtime = None
210 instance._possibly_dirty = True
211 return instance
212 else:
213 return cls.new_normal(mode, size, mtime)
214 else:
215 raise RuntimeError(b'unknown state: %s' % state)
83
216
84 def set_possibly_dirty(self):
217 def set_possibly_dirty(self):
85 """Mark a file as "possibly dirty"
218 """Mark a file as "possibly dirty"
@@ -87,39 +220,60 b' class DirstateItem(object):'
87 This means the next status call will have to actually check its content
220 This means the next status call will have to actually check its content
88 to make sure it is correct.
221 to make sure it is correct.
89 """
222 """
90 self._mtime = AMBIGUOUS_TIME
223 self._possibly_dirty = True
224
225 def set_clean(self, mode, size, mtime):
226 """mark a file as "clean" cancelling potential "possibly dirty call"
227
228 Note: this function is a descendant of `dirstate.normal` and is
229 currently expected to be call on "normal" entry only. There are not
230 reason for this to not change in the future as long as the ccode is
231 updated to preserve the proper state of the non-normal files.
232 """
233 self._wc_tracked = True
234 self._p1_tracked = True
235 self._p2_tracked = False # this might be wrong
236 self._merged = False
237 self._clean_p2 = False
238 self._possibly_dirty = False
239 self._mode = mode
240 self._size = size
241 self._mtime = mtime
91
242
92 def __getitem__(self, idx):
243 def set_tracked(self):
93 if idx == 0 or idx == -4:
244 """mark a file as tracked in the working copy
94 msg = b"do not use item[x], use item.state"
245
95 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
246 This will ultimately be called by command like `hg add`.
96 return self._state
247 """
97 elif idx == 1 or idx == -3:
248 self._wc_tracked = True
98 msg = b"do not use item[x], use item.mode"
249 # `set_tracked` is replacing various `normallookup` call. So we set
99 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
250 # "possibly dirty" to stay on the safe side.
100 return self._mode
251 #
101 elif idx == 2 or idx == -2:
252 # Consider dropping this in the future in favor of something less broad.
102 msg = b"do not use item[x], use item.size"
253 self._possibly_dirty = True
103 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
254
104 return self._size
255 def set_untracked(self):
105 elif idx == 3 or idx == -1:
256 """mark a file as untracked in the working copy
106 msg = b"do not use item[x], use item.mtime"
257
107 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
258 This will ultimately be called by command like `hg remove`.
108 return self._mtime
259 """
109 else:
260 # backup the previous state (useful for merge)
110 raise IndexError(idx)
261 self._wc_tracked = False
262 self._mode = None
263 self._size = None
264 self._mtime = None
111
265
112 @property
266 @property
113 def mode(self):
267 def mode(self):
114 return self._mode
268 return self.v1_mode()
115
269
116 @property
270 @property
117 def size(self):
271 def size(self):
118 return self._size
272 return self.v1_size()
119
273
120 @property
274 @property
121 def mtime(self):
275 def mtime(self):
122 return self._mtime
276 return self.v1_mtime()
123
277
124 @property
278 @property
125 def state(self):
279 def state(self):
@@ -134,17 +288,17 b' class DirstateItem(object):'
134 dirstatev1 format. It would make sense to ultimately deprecate it in
288 dirstatev1 format. It would make sense to ultimately deprecate it in
135 favor of the more "semantic" attributes.
289 favor of the more "semantic" attributes.
136 """
290 """
137 return self._state
291 return self.v1_state()
138
292
139 @property
293 @property
140 def tracked(self):
294 def tracked(self):
141 """True is the file is tracked in the working copy"""
295 """True is the file is tracked in the working copy"""
142 return self._state in b"nma"
296 return self._wc_tracked
143
297
144 @property
298 @property
145 def added(self):
299 def added(self):
146 """True if the file has been added"""
300 """True if the file has been added"""
147 return self._state == b'a'
301 return self._wc_tracked and not (self._p1_tracked or self._p2_tracked)
148
302
149 @property
303 @property
150 def merged(self):
304 def merged(self):
@@ -152,7 +306,7 b' class DirstateItem(object):'
152
306
153 Should only be set if a merge is in progress in the dirstate
307 Should only be set if a merge is in progress in the dirstate
154 """
308 """
155 return self._state == b'm'
309 return self._wc_tracked and self._merged
156
310
157 @property
311 @property
158 def from_p2(self):
312 def from_p2(self):
@@ -162,7 +316,9 b' class DirstateItem(object):'
162
316
163 Should only be set if a merge is in progress in the dirstate
317 Should only be set if a merge is in progress in the dirstate
164 """
318 """
165 return self._state == b'n' and self._size == FROM_P2
319 if not self._wc_tracked:
320 return False
321 return self._clean_p2 or (not self._p1_tracked and self._p2_tracked)
166
322
167 @property
323 @property
168 def from_p2_removed(self):
324 def from_p2_removed(self):
@@ -171,12 +327,12 b' class DirstateItem(object):'
171 This property seems like an abstraction leakage and should probably be
327 This property seems like an abstraction leakage and should probably be
172 dealt in this class (or maybe the dirstatemap) directly.
328 dealt in this class (or maybe the dirstatemap) directly.
173 """
329 """
174 return self._state == b'r' and self._size == FROM_P2
330 return self.removed and self._clean_p2
175
331
176 @property
332 @property
177 def removed(self):
333 def removed(self):
178 """True if the file has been removed"""
334 """True if the file has been removed"""
179 return self._state == b'r'
335 return not self._wc_tracked and (self._p1_tracked or self._p2_tracked)
180
336
181 @property
337 @property
182 def merged_removed(self):
338 def merged_removed(self):
@@ -185,7 +341,7 b' class DirstateItem(object):'
185 This property seems like an abstraction leakage and should probably be
341 This property seems like an abstraction leakage and should probably be
186 dealt in this class (or maybe the dirstatemap) directly.
342 dealt in this class (or maybe the dirstatemap) directly.
187 """
343 """
188 return self._state == b'r' and self._size == NONNORMAL
344 return self.removed and self._merged
189
345
190 @property
346 @property
191 def dm_nonnormal(self):
347 def dm_nonnormal(self):
@@ -193,7 +349,7 b' class DirstateItem(object):'
193
349
194 There is no reason for any code, but the dirstatemap one to use this.
350 There is no reason for any code, but the dirstatemap one to use this.
195 """
351 """
196 return self.state != b'n' or self.mtime == AMBIGUOUS_TIME
352 return self.v1_state() != b'n' or self.v1_mtime() == AMBIGUOUS_TIME
197
353
198 @property
354 @property
199 def dm_otherparent(self):
355 def dm_otherparent(self):
@@ -201,27 +357,72 b' class DirstateItem(object):'
201
357
202 There is no reason for any code, but the dirstatemap one to use this.
358 There is no reason for any code, but the dirstatemap one to use this.
203 """
359 """
204 return self._size == FROM_P2
360 return self.v1_size() == FROM_P2
205
361
206 def v1_state(self):
362 def v1_state(self):
207 """return a "state" suitable for v1 serialization"""
363 """return a "state" suitable for v1 serialization"""
208 return self._state
364 if not (self._p1_tracked or self._p2_tracked or self._wc_tracked):
365 # the object has no state to record, this is -currently-
366 # unsupported
367 raise RuntimeError('untracked item')
368 elif self.removed:
369 return b'r'
370 elif self.merged:
371 return b'm'
372 elif self.added:
373 return b'a'
374 else:
375 return b'n'
209
376
210 def v1_mode(self):
377 def v1_mode(self):
211 """return a "mode" suitable for v1 serialization"""
378 """return a "mode" suitable for v1 serialization"""
212 return self._mode
379 return self._mode if self._mode is not None else 0
213
380
214 def v1_size(self):
381 def v1_size(self):
215 """return a "size" suitable for v1 serialization"""
382 """return a "size" suitable for v1 serialization"""
216 return self._size
383 if not (self._p1_tracked or self._p2_tracked or self._wc_tracked):
384 # the object has no state to record, this is -currently-
385 # unsupported
386 raise RuntimeError('untracked item')
387 elif self.merged_removed:
388 return NONNORMAL
389 elif self.from_p2_removed:
390 return FROM_P2
391 elif self.removed:
392 return 0
393 elif self.merged:
394 return FROM_P2
395 elif self.added:
396 return NONNORMAL
397 elif self.from_p2:
398 return FROM_P2
399 elif self._possibly_dirty:
400 return self._size if self._size is not None else NONNORMAL
401 else:
402 return self._size
217
403
218 def v1_mtime(self):
404 def v1_mtime(self):
219 """return a "mtime" suitable for v1 serialization"""
405 """return a "mtime" suitable for v1 serialization"""
220 return self._mtime
406 if not (self._p1_tracked or self._p2_tracked or self._wc_tracked):
407 # the object has no state to record, this is -currently-
408 # unsupported
409 raise RuntimeError('untracked item')
410 elif self.removed:
411 return 0
412 elif self._possibly_dirty:
413 return AMBIGUOUS_TIME
414 elif self.merged:
415 return AMBIGUOUS_TIME
416 elif self.added:
417 return AMBIGUOUS_TIME
418 elif self.from_p2:
419 return AMBIGUOUS_TIME
420 else:
421 return self._mtime if self._mtime is not None else 0
221
422
222 def need_delay(self, now):
423 def need_delay(self, now):
223 """True if the stored mtime would be ambiguous with the current time"""
424 """True if the stored mtime would be ambiguous with the current time"""
224 return self._state == b'n' and self._mtime == now
425 return self.v1_state() == b'n' and self.v1_mtime() == now
225
426
226
427
227 def gettype(q):
428 def gettype(q):
@@ -222,6 +222,15 b' if ispy3:'
222 >>> assert type(t) is bytes
222 >>> assert type(t) is bytes
223 """
223 """
224
224
225 # Trick pytype into not demanding Iterable[int] be passed to __new__(),
226 # since the appropriate bytes format is done internally.
227 #
228 # https://github.com/google/pytype/issues/500
229 if TYPE_CHECKING:
230
231 def __init__(self, s=b''):
232 pass
233
225 def __new__(cls, s=b''):
234 def __new__(cls, s=b''):
226 if isinstance(s, bytestr):
235 if isinstance(s, bytestr):
227 return s
236 return s
@@ -433,7 +433,7 b' def manifestrevlogs(repo):'
433 if scmutil.istreemanifest(repo):
433 if scmutil.istreemanifest(repo):
434 # This logic is safe if treemanifest isn't enabled, but also
434 # This logic is safe if treemanifest isn't enabled, but also
435 # pointless, so we skip it if treemanifest isn't enabled.
435 # pointless, so we skip it if treemanifest isn't enabled.
436 for t, unencoded, encoded, size in repo.store.datafiles():
436 for t, unencoded, size in repo.store.datafiles():
437 if unencoded.startswith(b'meta/') and unencoded.endswith(
437 if unencoded.startswith(b'meta/') and unencoded.endswith(
438 b'00manifest.i'
438 b'00manifest.i'
439 ):
439 ):
@@ -441,7 +441,7 b' def manifestrevlogs(repo):'
441 yield repo.manifestlog.getstorage(dir)
441 yield repo.manifestlog.getstorage(dir)
442
442
443
443
444 def rebuildfncache(ui, repo):
444 def rebuildfncache(ui, repo, only_data=False):
445 """Rebuilds the fncache file from repo history.
445 """Rebuilds the fncache file from repo history.
446
446
447 Missing entries will be added. Extra entries will be removed.
447 Missing entries will be added. Extra entries will be removed.
@@ -465,28 +465,40 b' def rebuildfncache(ui, repo):'
465 newentries = set()
465 newentries = set()
466 seenfiles = set()
466 seenfiles = set()
467
467
468 progress = ui.makeprogress(
468 if only_data:
469 _(b'rebuilding'), unit=_(b'changesets'), total=len(repo)
469 # Trust the listing of .i from the fncache, but not the .d. This is
470 )
470 # much faster, because we only need to stat every possible .d files,
471 for rev in repo:
471 # instead of reading the full changelog
472 progress.update(rev)
472 for f in fnc:
473 if f[:5] == b'data/' and f[-2:] == b'.i':
474 seenfiles.add(f[5:-2])
475 newentries.add(f)
476 dataf = f[:-2] + b'.d'
477 if repo.store._exists(dataf):
478 newentries.add(dataf)
479 else:
480 progress = ui.makeprogress(
481 _(b'rebuilding'), unit=_(b'changesets'), total=len(repo)
482 )
483 for rev in repo:
484 progress.update(rev)
473
485
474 ctx = repo[rev]
486 ctx = repo[rev]
475 for f in ctx.files():
487 for f in ctx.files():
476 # This is to minimize I/O.
488 # This is to minimize I/O.
477 if f in seenfiles:
489 if f in seenfiles:
478 continue
490 continue
479 seenfiles.add(f)
491 seenfiles.add(f)
480
492
481 i = b'data/%s.i' % f
493 i = b'data/%s.i' % f
482 d = b'data/%s.d' % f
494 d = b'data/%s.d' % f
483
495
484 if repo.store._exists(i):
496 if repo.store._exists(i):
485 newentries.add(i)
497 newentries.add(i)
486 if repo.store._exists(d):
498 if repo.store._exists(d):
487 newentries.add(d)
499 newentries.add(d)
488
500
489 progress.complete()
501 progress.complete()
490
502
491 if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements:
503 if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements:
492 # This logic is safe if treemanifest isn't enabled, but also
504 # This logic is safe if treemanifest isn't enabled, but also
@@ -824,7 +824,7 b' def repair_issue6528('
824 with context():
824 with context():
825 files = list(
825 files = list(
826 (file_type, path)
826 (file_type, path)
827 for (file_type, path, _e, _s) in repo.store.datafiles()
827 for (file_type, path, _s) in repo.store.datafiles()
828 if path.endswith(b'.i') and file_type & store.FILEFLAGS_FILELOG
828 if path.endswith(b'.i') and file_type & store.FILEFLAGS_FILELOG
829 )
829 )
830
830
@@ -689,7 +689,7 b" def revsingle(repo, revspec, default=b'."
689
689
690 l = revrange(repo, [revspec], localalias=localalias)
690 l = revrange(repo, [revspec], localalias=localalias)
691 if not l:
691 if not l:
692 raise error.Abort(_(b'empty revision set'))
692 raise error.InputError(_(b'empty revision set'))
693 return repo[l.last()]
693 return repo[l.last()]
694
694
695
695
@@ -710,7 +710,7 b' def revpair(repo, revs):'
710 l = revrange(repo, revs)
710 l = revrange(repo, revs)
711
711
712 if not l:
712 if not l:
713 raise error.Abort(_(b'empty revision range'))
713 raise error.InputError(_(b'empty revision range'))
714
714
715 first = l.first()
715 first = l.first()
716 second = l.last()
716 second = l.last()
@@ -720,7 +720,7 b' def revpair(repo, revs):'
720 and len(revs) >= 2
720 and len(revs) >= 2
721 and not all(revrange(repo, [r]) for r in revs)
721 and not all(revrange(repo, [r]) for r in revs)
722 ):
722 ):
723 raise error.Abort(_(b'empty revision on one side of range'))
723 raise error.InputError(_(b'empty revision on one side of range'))
724
724
725 # if top-level is range expression, the result must always be a pair
725 # if top-level is range expression, the result must always be a pair
726 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
726 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
@@ -1211,9 +1211,9 b' def addremove(repo, matcher, prefix, uip'
1211 try:
1211 try:
1212 similarity = float(opts.get(b'similarity') or 0)
1212 similarity = float(opts.get(b'similarity') or 0)
1213 except ValueError:
1213 except ValueError:
1214 raise error.Abort(_(b'similarity must be a number'))
1214 raise error.InputError(_(b'similarity must be a number'))
1215 if similarity < 0 or similarity > 100:
1215 if similarity < 0 or similarity > 100:
1216 raise error.Abort(_(b'similarity must be between 0 and 100'))
1216 raise error.InputError(_(b'similarity must be between 0 and 100'))
1217 similarity /= 100.0
1217 similarity /= 100.0
1218
1218
1219 ret = 0
1219 ret = 0
@@ -472,7 +472,7 b' class basicstore(object):'
472 return self.path + b'/' + encodedir(f)
472 return self.path + b'/' + encodedir(f)
473
473
474 def _walk(self, relpath, recurse):
474 def _walk(self, relpath, recurse):
475 '''yields (unencoded, encoded, size)'''
475 '''yields (revlog_type, unencoded, size)'''
476 path = self.path
476 path = self.path
477 if relpath:
477 if relpath:
478 path += b'/' + relpath
478 path += b'/' + relpath
@@ -488,7 +488,7 b' class basicstore(object):'
488 rl_type = is_revlog(f, kind, st)
488 rl_type = is_revlog(f, kind, st)
489 if rl_type is not None:
489 if rl_type is not None:
490 n = util.pconvert(fp[striplen:])
490 n = util.pconvert(fp[striplen:])
491 l.append((rl_type, decodedir(n), n, st.st_size))
491 l.append((rl_type, decodedir(n), st.st_size))
492 elif kind == stat.S_IFDIR and recurse:
492 elif kind == stat.S_IFDIR and recurse:
493 visit.append(fp)
493 visit.append(fp)
494 l.sort()
494 l.sort()
@@ -505,26 +505,32 b' class basicstore(object):'
505 rootstore = manifest.manifestrevlog(repo.nodeconstants, self.vfs)
505 rootstore = manifest.manifestrevlog(repo.nodeconstants, self.vfs)
506 return manifest.manifestlog(self.vfs, repo, rootstore, storenarrowmatch)
506 return manifest.manifestlog(self.vfs, repo, rootstore, storenarrowmatch)
507
507
508 def datafiles(self, matcher=None):
508 def datafiles(self, matcher=None, undecodable=None):
509 """Like walk, but excluding the changelog and root manifest.
510
511 When [undecodable] is None, revlogs names that can't be
512 decoded cause an exception. When it is provided, it should
513 be a list and the filenames that can't be decoded are added
514 to it instead. This is very rarely needed."""
509 files = self._walk(b'data', True) + self._walk(b'meta', True)
515 files = self._walk(b'data', True) + self._walk(b'meta', True)
510 for (t, u, e, s) in files:
516 for (t, u, s) in files:
511 yield (FILEFLAGS_FILELOG | t, u, e, s)
517 yield (FILEFLAGS_FILELOG | t, u, s)
512
518
513 def topfiles(self):
519 def topfiles(self):
514 # yield manifest before changelog
520 # yield manifest before changelog
515 files = reversed(self._walk(b'', False))
521 files = reversed(self._walk(b'', False))
516 for (t, u, e, s) in files:
522 for (t, u, s) in files:
517 if u.startswith(b'00changelog'):
523 if u.startswith(b'00changelog'):
518 yield (FILEFLAGS_CHANGELOG | t, u, e, s)
524 yield (FILEFLAGS_CHANGELOG | t, u, s)
519 elif u.startswith(b'00manifest'):
525 elif u.startswith(b'00manifest'):
520 yield (FILEFLAGS_MANIFESTLOG | t, u, e, s)
526 yield (FILEFLAGS_MANIFESTLOG | t, u, s)
521 else:
527 else:
522 yield (FILETYPE_OTHER | t, u, e, s)
528 yield (FILETYPE_OTHER | t, u, s)
523
529
524 def walk(self, matcher=None):
530 def walk(self, matcher=None):
525 """return file related to data storage (ie: revlogs)
531 """return file related to data storage (ie: revlogs)
526
532
527 yields (file_type, unencoded, encoded, size)
533 yields (file_type, unencoded, size)
528
534
529 if a matcher is passed, storage files of only those tracked paths
535 if a matcher is passed, storage files of only those tracked paths
530 are passed with matches the matcher
536 are passed with matches the matcher
@@ -574,15 +580,20 b' class encodedstore(basicstore):'
574 # However that might change so we should probably add a test and encoding
580 # However that might change so we should probably add a test and encoding
575 # decoding for it too. see issue6548
581 # decoding for it too. see issue6548
576
582
577 def datafiles(self, matcher=None):
583 def datafiles(self, matcher=None, undecodable=None):
578 for t, a, b, size in super(encodedstore, self).datafiles():
584 for t, f1, size in super(encodedstore, self).datafiles():
579 try:
585 try:
580 a = decodefilename(a)
586 f2 = decodefilename(f1)
581 except KeyError:
587 except KeyError:
582 a = None
588 if undecodable is None:
583 if a is not None and not _matchtrackedpath(a, matcher):
589 msg = _(b'undecodable revlog name %s') % f1
590 raise error.StorageError(msg)
591 else:
592 undecodable.append(f1)
593 continue
594 if not _matchtrackedpath(f2, matcher):
584 continue
595 continue
585 yield t, a, b, size
596 yield t, f2, size
586
597
587 def join(self, f):
598 def join(self, f):
588 return self.path + b'/' + encodefilename(f)
599 return self.path + b'/' + encodefilename(f)
@@ -770,7 +781,7 b' class fncachestore(basicstore):'
770 def getsize(self, path):
781 def getsize(self, path):
771 return self.rawvfs.stat(path).st_size
782 return self.rawvfs.stat(path).st_size
772
783
773 def datafiles(self, matcher=None):
784 def datafiles(self, matcher=None, undecodable=None):
774 for f in sorted(self.fncache):
785 for f in sorted(self.fncache):
775 if not _matchtrackedpath(f, matcher):
786 if not _matchtrackedpath(f, matcher):
776 continue
787 continue
@@ -779,7 +790,7 b' class fncachestore(basicstore):'
779 t = revlog_type(f)
790 t = revlog_type(f)
780 assert t is not None, f
791 assert t is not None, f
781 t |= FILEFLAGS_FILELOG
792 t |= FILEFLAGS_FILELOG
782 yield t, f, ef, self.getsize(ef)
793 yield t, f, self.getsize(ef)
783 except OSError as err:
794 except OSError as err:
784 if err.errno != errno.ENOENT:
795 if err.errno != errno.ENOENT:
785 raise
796 raise
@@ -248,7 +248,7 b' def generatev1(repo):'
248 # Get consistent snapshot of repo, lock during scan.
248 # Get consistent snapshot of repo, lock during scan.
249 with repo.lock():
249 with repo.lock():
250 repo.ui.debug(b'scanning\n')
250 repo.ui.debug(b'scanning\n')
251 for file_type, name, ename, size in _walkstreamfiles(repo):
251 for file_type, name, size in _walkstreamfiles(repo):
252 if size:
252 if size:
253 entries.append((name, size))
253 entries.append((name, size))
254 total_bytes += size
254 total_bytes += size
@@ -650,7 +650,7 b' def _v2_walk(repo, includes, excludes, i'
650 if includes or excludes:
650 if includes or excludes:
651 matcher = narrowspec.match(repo.root, includes, excludes)
651 matcher = narrowspec.match(repo.root, includes, excludes)
652
652
653 for rl_type, name, ename, size in _walkstreamfiles(repo, matcher):
653 for rl_type, name, size in _walkstreamfiles(repo, matcher):
654 if size:
654 if size:
655 ft = _fileappend
655 ft = _fileappend
656 if rl_type & store.FILEFLAGS_VOLATILE:
656 if rl_type & store.FILEFLAGS_VOLATILE:
@@ -201,7 +201,7 b' def _clonerevlogs('
201
201
202 # Perform a pass to collect metadata. This validates we can open all
202 # Perform a pass to collect metadata. This validates we can open all
203 # source files and allows a unified progress bar to be displayed.
203 # source files and allows a unified progress bar to be displayed.
204 for rl_type, unencoded, encoded, size in alldatafiles:
204 for rl_type, unencoded, size in alldatafiles:
205 if not rl_type & store.FILEFLAGS_REVLOG_MAIN:
205 if not rl_type & store.FILEFLAGS_REVLOG_MAIN:
206 continue
206 continue
207
207
@@ -449,8 +449,8 b' def mmapread(fp, size=None):'
449 return b''
449 return b''
450 elif size is None:
450 elif size is None:
451 size = 0
451 size = 0
452 fd = getattr(fp, 'fileno', lambda: fp)()
452 try:
453 try:
453 fd = getattr(fp, 'fileno', lambda: fp)()
454 return mmap.mmap(fd, size, access=mmap.ACCESS_READ)
454 return mmap.mmap(fd, size, access=mmap.ACCESS_READ)
455 except ValueError:
455 except ValueError:
456 # Empty files cannot be mmapped, but mmapread should still work. Check
456 # Empty files cannot be mmapped, but mmapread should still work. Check
@@ -1225,6 +1225,8 b' def versiontuple(v=None, n=4):'
1225 if n == 4:
1225 if n == 4:
1226 return (vints[0], vints[1], vints[2], extra)
1226 return (vints[0], vints[1], vints[2], extra)
1227
1227
1228 raise error.ProgrammingError(b"invalid version part request: %d" % n)
1229
1228
1230
1229 def cachefunc(func):
1231 def cachefunc(func):
1230 '''cache the result of function calls'''
1232 '''cache the result of function calls'''
@@ -57,30 +57,11 b' else:'
57 try:
57 try:
58 # importlib.resources exists from Python 3.7; see fallback in except clause
58 # importlib.resources exists from Python 3.7; see fallback in except clause
59 # further down
59 # further down
60 from importlib import resources
60 from importlib import resources # pytype: disable=import-error
61
62 from .. import encoding
63
61
64 # Force loading of the resources module
62 # Force loading of the resources module
65 resources.open_binary # pytype: disable=module-attr
63 resources.open_binary # pytype: disable=module-attr
66
64
67 def open_resource(package, name):
68 return resources.open_binary( # pytype: disable=module-attr
69 pycompat.sysstr(package), pycompat.sysstr(name)
70 )
71
72 def is_resource(package, name):
73 return resources.is_resource( # pytype: disable=module-attr
74 pycompat.sysstr(package), encoding.strfromlocal(name)
75 )
76
77 def contents(package):
78 # pytype: disable=module-attr
79 for r in resources.contents(pycompat.sysstr(package)):
80 # pytype: enable=module-attr
81 yield encoding.strtolocal(r)
82
83
84 except (ImportError, AttributeError):
65 except (ImportError, AttributeError):
85 # importlib.resources was not found (almost definitely because we're on a
66 # importlib.resources was not found (almost definitely because we're on a
86 # Python version before 3.7)
67 # Python version before 3.7)
@@ -102,3 +83,23 b' except (ImportError, AttributeError):'
102
83
103 for p in os.listdir(path):
84 for p in os.listdir(path):
104 yield pycompat.fsencode(p)
85 yield pycompat.fsencode(p)
86
87
88 else:
89 from .. import encoding
90
91 def open_resource(package, name):
92 return resources.open_binary( # pytype: disable=module-attr
93 pycompat.sysstr(package), pycompat.sysstr(name)
94 )
95
96 def is_resource(package, name):
97 return resources.is_resource( # pytype: disable=module-attr
98 pycompat.sysstr(package), encoding.strfromlocal(name)
99 )
100
101 def contents(package):
102 # pytype: disable=module-attr
103 for r in resources.contents(pycompat.sysstr(package)):
104 # pytype: enable=module-attr
105 yield encoding.strtolocal(r)
@@ -395,12 +395,13 b' class verifier(object):'
395 storefiles = set()
395 storefiles = set()
396 subdirs = set()
396 subdirs = set()
397 revlogv1 = self.revlogv1
397 revlogv1 = self.revlogv1
398 for t, f, f2, size in repo.store.datafiles():
398 undecodable = []
399 if not f:
399 for t, f, size in repo.store.datafiles(undecodable=undecodable):
400 self._err(None, _(b"cannot decode filename '%s'") % f2)
400 if (size > 0 or not revlogv1) and f.startswith(b'meta/'):
401 elif (size > 0 or not revlogv1) and f.startswith(b'meta/'):
402 storefiles.add(_normpath(f))
401 storefiles.add(_normpath(f))
403 subdirs.add(os.path.dirname(f))
402 subdirs.add(os.path.dirname(f))
403 for f in undecodable:
404 self._err(None, _(b"cannot decode filename '%s'") % f)
404 subdirprogress = ui.makeprogress(
405 subdirprogress = ui.makeprogress(
405 _(b'checking'), unit=_(b'manifests'), total=len(subdirs)
406 _(b'checking'), unit=_(b'manifests'), total=len(subdirs)
406 )
407 )
@@ -459,11 +460,12 b' class verifier(object):'
459 ui.status(_(b"checking files\n"))
460 ui.status(_(b"checking files\n"))
460
461
461 storefiles = set()
462 storefiles = set()
462 for rl_type, f, f2, size in repo.store.datafiles():
463 undecodable = []
463 if not f:
464 for t, f, size in repo.store.datafiles(undecodable=undecodable):
464 self._err(None, _(b"cannot decode filename '%s'") % f2)
465 if (size > 0 or not revlogv1) and f.startswith(b'data/'):
465 elif (size > 0 or not revlogv1) and f.startswith(b'data/'):
466 storefiles.add(_normpath(f))
466 storefiles.add(_normpath(f))
467 for f in undecodable:
468 self._err(None, _(b"cannot decode filename '%s'") % f)
467
469
468 state = {
470 state = {
469 # TODO this assumes revlog storage for changelog.
471 # TODO this assumes revlog storage for changelog.
@@ -175,7 +175,7 b" def posixfile(name, mode=b'r', buffering"
175 return mixedfilemodewrapper(fp)
175 return mixedfilemodewrapper(fp)
176
176
177 return fp
177 return fp
178 except WindowsError as err:
178 except WindowsError as err: # pytype: disable=name-error
179 # convert to a friendlier exception
179 # convert to a friendlier exception
180 raise IOError(
180 raise IOError(
181 err.errno, '%s: %s' % (encoding.strfromlocal(name), err.strerror)
181 err.errno, '%s: %s' % (encoding.strfromlocal(name), err.strerror)
@@ -44,13 +44,9 b' def batchable(f):'
44 def sample(self, one, two=None):
44 def sample(self, one, two=None):
45 # Build list of encoded arguments suitable for your wire protocol:
45 # Build list of encoded arguments suitable for your wire protocol:
46 encoded_args = [('one', encode(one),), ('two', encode(two),)]
46 encoded_args = [('one', encode(one),), ('two', encode(two),)]
47 # Create future for injection of encoded result:
47 # Return it, along with a function that will receive the result
48 encoded_res_future = future()
48 # from the batched request.
49 # Return encoded arguments and future:
49 return encoded_args, decode
50 yield encoded_args, encoded_res_future
51 # Assuming the future to be filled with the result from the batched
52 # request now. Decode it:
53 yield decode(encoded_res_future.value)
54
50
55 The decorator returns a function which wraps this coroutine as a plain
51 The decorator returns a function which wraps this coroutine as a plain
56 method, but adds the original method as an attribute called "batchable",
52 method, but adds the original method as an attribute called "batchable",
@@ -59,29 +55,19 b' def batchable(f):'
59 """
55 """
60
56
61 def plain(*args, **opts):
57 def plain(*args, **opts):
62 batchable = f(*args, **opts)
58 encoded_args_or_res, decode = f(*args, **opts)
63 encoded_args_or_res, encoded_res_future = next(batchable)
59 if not decode:
64 if not encoded_res_future:
65 return encoded_args_or_res # a local result in this case
60 return encoded_args_or_res # a local result in this case
66 self = args[0]
61 self = args[0]
67 cmd = pycompat.bytesurl(f.__name__) # ensure cmd is ascii bytestr
62 cmd = pycompat.bytesurl(f.__name__) # ensure cmd is ascii bytestr
68 encoded_res_future.set(self._submitone(cmd, encoded_args_or_res))
63 encoded_res = self._submitone(cmd, encoded_args_or_res)
69 return next(batchable)
64 return decode(encoded_res)
70
65
71 setattr(plain, 'batchable', f)
66 setattr(plain, 'batchable', f)
72 setattr(plain, '__name__', f.__name__)
67 setattr(plain, '__name__', f.__name__)
73 return plain
68 return plain
74
69
75
70
76 class future(object):
77 '''placeholder for a value to be set later'''
78
79 def set(self, value):
80 if util.safehasattr(self, b'value'):
81 raise error.RepoError(b"future is already set")
82 self.value = value
83
84
85 def encodebatchcmds(req):
71 def encodebatchcmds(req):
86 """Return a ``cmds`` argument value for the ``batch`` command."""
72 """Return a ``cmds`` argument value for the ``batch`` command."""
87 escapearg = wireprototypes.escapebatcharg
73 escapearg = wireprototypes.escapebatcharg
@@ -248,25 +234,18 b' class peerexecutor(object):'
248 continue
234 continue
249
235
250 try:
236 try:
251 batchable = fn.batchable(
237 encoded_args_or_res, decode = fn.batchable(
252 fn.__self__, **pycompat.strkwargs(args)
238 fn.__self__, **pycompat.strkwargs(args)
253 )
239 )
254 except Exception:
240 except Exception:
255 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
241 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
256 return
242 return
257
243
258 # Encoded arguments and future holding remote result.
244 if not decode:
259 try:
260 encoded_args_or_res, fremote = next(batchable)
261 except Exception:
262 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
263 return
264
265 if not fremote:
266 f.set_result(encoded_args_or_res)
245 f.set_result(encoded_args_or_res)
267 else:
246 else:
268 requests.append((command, encoded_args_or_res))
247 requests.append((command, encoded_args_or_res))
269 states.append((command, f, batchable, fremote))
248 states.append((command, f, batchable, decode))
270
249
271 if not requests:
250 if not requests:
272 return
251 return
@@ -319,7 +298,7 b' class peerexecutor(object):'
319 def _readbatchresponse(self, states, wireresults):
298 def _readbatchresponse(self, states, wireresults):
320 # Executes in a thread to read data off the wire.
299 # Executes in a thread to read data off the wire.
321
300
322 for command, f, batchable, fremote in states:
301 for command, f, batchable, decode in states:
323 # Grab raw result off the wire and teach the internal future
302 # Grab raw result off the wire and teach the internal future
324 # about it.
303 # about it.
325 try:
304 try:
@@ -334,11 +313,8 b' class peerexecutor(object):'
334 )
313 )
335 )
314 )
336 else:
315 else:
337 fremote.set(remoteresult)
338
339 # And ask the coroutine to decode that value.
340 try:
316 try:
341 result = next(batchable)
317 result = decode(remoteresult)
342 except Exception:
318 except Exception:
343 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
319 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
344 else:
320 else:
@@ -369,87 +345,90 b' class wirepeer(repository.peer):'
369 @batchable
345 @batchable
370 def lookup(self, key):
346 def lookup(self, key):
371 self.requirecap(b'lookup', _(b'look up remote revision'))
347 self.requirecap(b'lookup', _(b'look up remote revision'))
372 f = future()
348
373 yield {b'key': encoding.fromlocal(key)}, f
349 def decode(d):
374 d = f.value
350 success, data = d[:-1].split(b" ", 1)
375 success, data = d[:-1].split(b" ", 1)
351 if int(success):
376 if int(success):
352 return bin(data)
377 yield bin(data)
353 else:
378 else:
354 self._abort(error.RepoError(data))
379 self._abort(error.RepoError(data))
355
356 return {b'key': encoding.fromlocal(key)}, decode
380
357
381 @batchable
358 @batchable
382 def heads(self):
359 def heads(self):
383 f = future()
360 def decode(d):
384 yield {}, f
361 try:
385 d = f.value
362 return wireprototypes.decodelist(d[:-1])
386 try:
363 except ValueError:
387 yield wireprototypes.decodelist(d[:-1])
364 self._abort(error.ResponseError(_(b"unexpected response:"), d))
388 except ValueError:
365
389 self._abort(error.ResponseError(_(b"unexpected response:"), d))
366 return {}, decode
390
367
391 @batchable
368 @batchable
392 def known(self, nodes):
369 def known(self, nodes):
393 f = future()
370 def decode(d):
394 yield {b'nodes': wireprototypes.encodelist(nodes)}, f
371 try:
395 d = f.value
372 return [bool(int(b)) for b in pycompat.iterbytestr(d)]
396 try:
373 except ValueError:
397 yield [bool(int(b)) for b in pycompat.iterbytestr(d)]
374 self._abort(error.ResponseError(_(b"unexpected response:"), d))
398 except ValueError:
375
399 self._abort(error.ResponseError(_(b"unexpected response:"), d))
376 return {b'nodes': wireprototypes.encodelist(nodes)}, decode
400
377
401 @batchable
378 @batchable
402 def branchmap(self):
379 def branchmap(self):
403 f = future()
380 def decode(d):
404 yield {}, f
381 try:
405 d = f.value
382 branchmap = {}
406 try:
383 for branchpart in d.splitlines():
407 branchmap = {}
384 branchname, branchheads = branchpart.split(b' ', 1)
408 for branchpart in d.splitlines():
385 branchname = encoding.tolocal(urlreq.unquote(branchname))
409 branchname, branchheads = branchpart.split(b' ', 1)
386 branchheads = wireprototypes.decodelist(branchheads)
410 branchname = encoding.tolocal(urlreq.unquote(branchname))
387 branchmap[branchname] = branchheads
411 branchheads = wireprototypes.decodelist(branchheads)
388 return branchmap
412 branchmap[branchname] = branchheads
389 except TypeError:
413 yield branchmap
390 self._abort(error.ResponseError(_(b"unexpected response:"), d))
414 except TypeError:
391
415 self._abort(error.ResponseError(_(b"unexpected response:"), d))
392 return {}, decode
416
393
417 @batchable
394 @batchable
418 def listkeys(self, namespace):
395 def listkeys(self, namespace):
419 if not self.capable(b'pushkey'):
396 if not self.capable(b'pushkey'):
420 yield {}, None
397 return {}, None
421 f = future()
422 self.ui.debug(b'preparing listkeys for "%s"\n' % namespace)
398 self.ui.debug(b'preparing listkeys for "%s"\n' % namespace)
423 yield {b'namespace': encoding.fromlocal(namespace)}, f
399
424 d = f.value
400 def decode(d):
425 self.ui.debug(
401 self.ui.debug(
426 b'received listkey for "%s": %i bytes\n' % (namespace, len(d))
402 b'received listkey for "%s": %i bytes\n' % (namespace, len(d))
427 )
403 )
428 yield pushkeymod.decodekeys(d)
404 return pushkeymod.decodekeys(d)
405
406 return {b'namespace': encoding.fromlocal(namespace)}, decode
429
407
430 @batchable
408 @batchable
431 def pushkey(self, namespace, key, old, new):
409 def pushkey(self, namespace, key, old, new):
432 if not self.capable(b'pushkey'):
410 if not self.capable(b'pushkey'):
433 yield False, None
411 return False, None
434 f = future()
435 self.ui.debug(b'preparing pushkey for "%s:%s"\n' % (namespace, key))
412 self.ui.debug(b'preparing pushkey for "%s:%s"\n' % (namespace, key))
436 yield {
413
414 def decode(d):
415 d, output = d.split(b'\n', 1)
416 try:
417 d = bool(int(d))
418 except ValueError:
419 raise error.ResponseError(
420 _(b'push failed (unexpected response):'), d
421 )
422 for l in output.splitlines(True):
423 self.ui.status(_(b'remote: '), l)
424 return d
425
426 return {
437 b'namespace': encoding.fromlocal(namespace),
427 b'namespace': encoding.fromlocal(namespace),
438 b'key': encoding.fromlocal(key),
428 b'key': encoding.fromlocal(key),
439 b'old': encoding.fromlocal(old),
429 b'old': encoding.fromlocal(old),
440 b'new': encoding.fromlocal(new),
430 b'new': encoding.fromlocal(new),
441 }, f
431 }, decode
442 d = f.value
443 d, output = d.split(b'\n', 1)
444 try:
445 d = bool(int(d))
446 except ValueError:
447 raise error.ResponseError(
448 _(b'push failed (unexpected response):'), d
449 )
450 for l in output.splitlines(True):
451 self.ui.status(_(b'remote: '), l)
452 yield d
453
432
454 def stream_out(self):
433 def stream_out(self):
455 return self._callstream(b'stream_out')
434 return self._callstream(b'stream_out')
@@ -1579,7 +1579,7 b' def rawstorefiledata(repo, proto, files,'
1579
1579
1580 # TODO this is a bunch of storage layer interface abstractions because
1580 # TODO this is a bunch of storage layer interface abstractions because
1581 # it assumes revlogs.
1581 # it assumes revlogs.
1582 for rl_type, name, encodedname, size in topfiles:
1582 for rl_type, name, size in topfiles:
1583 # XXX use the `rl_type` for that
1583 # XXX use the `rl_type` for that
1584 if b'changelog' in files and name.startswith(b'00changelog'):
1584 if b'changelog' in files and name.startswith(b'00changelog'):
1585 pass
1585 pass
@@ -1,5 +1,6 b''
1 # This file is automatically @generated by Cargo.
1 # This file is automatically @generated by Cargo.
2 # It is not intended for manual editing.
2 # It is not intended for manual editing.
3
3 [[package]]
4 [[package]]
4 name = "adler"
5 name = "adler"
5 version = "0.2.3"
6 version = "0.2.3"
@@ -386,7 +387,7 b' dependencies = ['
386 "itertools",
387 "itertools",
387 "lazy_static",
388 "lazy_static",
388 "log",
389 "log",
389 "memmap",
390 "memmap2",
390 "micro-timer",
391 "micro-timer",
391 "pretty_assertions",
392 "pretty_assertions",
392 "rand",
393 "rand",
@@ -396,6 +397,7 b' dependencies = ['
396 "regex",
397 "regex",
397 "same-file",
398 "same-file",
398 "sha-1",
399 "sha-1",
400 "stable_deref_trait",
399 "tempfile",
401 "tempfile",
400 "twox-hash",
402 "twox-hash",
401 "zstd",
403 "zstd",
@@ -411,6 +413,7 b' dependencies = ['
411 "hg-core",
413 "hg-core",
412 "libc",
414 "libc",
413 "log",
415 "log",
416 "stable_deref_trait",
414 ]
417 ]
415
418
416 [[package]]
419 [[package]]
@@ -508,13 +511,13 b' source = "registry+https://github.com/ru'
508 checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525"
511 checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525"
509
512
510 [[package]]
513 [[package]]
511 name = "memmap"
514 name = "memmap2"
512 version = "0.7.0"
515 version = "0.4.0"
513 source = "registry+https://github.com/rust-lang/crates.io-index"
516 source = "registry+https://github.com/rust-lang/crates.io-index"
514 checksum = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b"
517 checksum = "de5d3112c080d58ce560081baeaab7e1e864ca21795ddbf533d5b1842bb1ecf8"
515 dependencies = [
518 dependencies = [
516 "libc",
519 "libc",
517 "winapi",
520 "stable_deref_trait",
518 ]
521 ]
519
522
520 [[package]]
523 [[package]]
@@ -865,6 +868,12 b' dependencies = ['
865 ]
868 ]
866
869
867 [[package]]
870 [[package]]
871 name = "stable_deref_trait"
872 version = "1.2.0"
873 source = "registry+https://github.com/rust-lang/crates.io-index"
874 checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3"
875
876 [[package]]
868 name = "static_assertions"
877 name = "static_assertions"
869 version = "1.1.0"
878 version = "1.1.0"
870 source = "registry+https://github.com/rust-lang/crates.io-index"
879 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -24,11 +24,12 b' regex = "1.3.9"'
24 sha-1 = "0.9.6"
24 sha-1 = "0.9.6"
25 twox-hash = "1.5.0"
25 twox-hash = "1.5.0"
26 same-file = "1.0.6"
26 same-file = "1.0.6"
27 stable_deref_trait = "1.2.0"
27 tempfile = "3.1.0"
28 tempfile = "3.1.0"
28 crossbeam-channel = "0.4"
29 crossbeam-channel = "0.4"
29 micro-timer = "0.3.0"
30 micro-timer = "0.3.0"
30 log = "0.4.8"
31 log = "0.4.8"
31 memmap = "0.7.0"
32 memmap2 = {version = "0.4", features = ["stable_deref_trait"]}
32 zstd = "0.5.3"
33 zstd = "0.5.3"
33 format-bytes = "0.2.2"
34 format-bytes = "0.2.2"
34
35
@@ -5,7 +5,7 b''
5
5
6 //! Minimal `RevlogIndex`, readable from standard Mercurial file format
6 //! Minimal `RevlogIndex`, readable from standard Mercurial file format
7 use hg::*;
7 use hg::*;
8 use memmap::*;
8 use memmap2::*;
9 use std::fs::File;
9 use std::fs::File;
10 use std::ops::Deref;
10 use std::ops::Deref;
11 use std::path::Path;
11 use std::path::Path;
@@ -7,7 +7,7 b' use clap::*;'
7 use hg::revlog::node::*;
7 use hg::revlog::node::*;
8 use hg::revlog::nodemap::*;
8 use hg::revlog::nodemap::*;
9 use hg::revlog::*;
9 use hg::revlog::*;
10 use memmap::MmapOptions;
10 use memmap2::MmapOptions;
11 use rand::Rng;
11 use rand::Rng;
12 use std::fs::File;
12 use std::fs::File;
13 use std::io;
13 use std::io;
@@ -13,7 +13,6 b' use crate::config::layer::{'
13 ConfigError, ConfigLayer, ConfigOrigin, ConfigValue,
13 ConfigError, ConfigLayer, ConfigOrigin, ConfigValue,
14 };
14 };
15 use crate::utils::files::get_bytes_from_os_str;
15 use crate::utils::files::get_bytes_from_os_str;
16 use crate::utils::SliceExt;
17 use format_bytes::{write_bytes, DisplayBytes};
16 use format_bytes::{write_bytes, DisplayBytes};
18 use std::collections::HashSet;
17 use std::collections::HashSet;
19 use std::env;
18 use std::env;
@@ -362,30 +361,14 b' impl Config {'
362 Ok(self.get_option(section, item)?.unwrap_or(false))
361 Ok(self.get_option(section, item)?.unwrap_or(false))
363 }
362 }
364
363
365 /// Returns the corresponding list-value in the config if found, or `None`.
364 /// If there is an `item` value in `section`, parse and return a list of
366 ///
365 /// byte strings.
367 /// This is appropriate for new configuration keys. The value syntax is
366 pub fn get_list(
368 /// **not** the same as most existing list-valued config, which has Python
369 /// parsing implemented in `parselist()` in
370 /// `mercurial/utils/stringutil.py`. Faithfully porting that parsing
371 /// algorithm to Rust (including behavior that are arguably bugs)
372 /// turned out to be non-trivial and hasn’t been completed as of this
373 /// writing.
374 ///
375 /// Instead, the "simple" syntax is: split on comma, then trim leading and
376 /// trailing whitespace of each component. Quotes or backslashes are not
377 /// interpreted in any way. Commas are mandatory between values. Values
378 /// that contain a comma are not supported.
379 pub fn get_simple_list(
380 &self,
367 &self,
381 section: &[u8],
368 section: &[u8],
382 item: &[u8],
369 item: &[u8],
383 ) -> Option<impl Iterator<Item = &[u8]>> {
370 ) -> Option<Vec<Vec<u8>>> {
384 self.get(section, item).map(|value| {
371 self.get(section, item).map(values::parse_list)
385 value
386 .split(|&byte| byte == b',')
387 .map(|component| component.trim())
388 })
389 }
372 }
390
373
391 /// Returns the raw value bytes of the first one found, or `None`.
374 /// Returns the raw value bytes of the first one found, or `None`.
@@ -8,6 +8,8 b''
8 //! details about where the value came from (but omits details of what’s
8 //! details about where the value came from (but omits details of what’s
9 //! invalid inside the value).
9 //! invalid inside the value).
10
10
11 use crate::utils::SliceExt;
12
11 pub(super) fn parse_bool(v: &[u8]) -> Option<bool> {
13 pub(super) fn parse_bool(v: &[u8]) -> Option<bool> {
12 match v.to_ascii_lowercase().as_slice() {
14 match v.to_ascii_lowercase().as_slice() {
13 b"1" | b"yes" | b"true" | b"on" | b"always" => Some(true),
15 b"1" | b"yes" | b"true" | b"on" | b"always" => Some(true),
@@ -42,6 +44,216 b' pub(super) fn parse_byte_size(value: &[u'
42 value.parse().ok()
44 value.parse().ok()
43 }
45 }
44
46
47 /// Parse a config value as a list of sub-values.
48 ///
49 /// Ported from `parselist` in `mercurial/utils/stringutil.py`
50
51 // Note: keep behavior in sync with the Python one.
52
53 // Note: this could return `Vec<Cow<[u8]>>` instead and borrow `input` when
54 // possible (when there’s no backslash-escapes) but this is probably not worth
55 // the complexity as config is presumably not accessed inside
56 // preformance-sensitive loops.
57 pub(super) fn parse_list(input: &[u8]) -> Vec<Vec<u8>> {
58 // Port of Python’s `value.lstrip(b' ,\n')`
59 // TODO: is this really what we want?
60 let input =
61 input.trim_start_matches(|b| b == b' ' || b == b',' || b == b'\n');
62 parse_list_without_trim_start(input)
63 }
64
65 fn parse_list_without_trim_start(input: &[u8]) -> Vec<Vec<u8>> {
66 // Start of port of Python’s `_configlist`
67 let input = input.trim_end_matches(|b| b == b' ' || b == b',');
68 if input.is_empty() {
69 return Vec::new();
70 }
71
72 // Just to make “a string” less confusable with “a list of strings”.
73 type ByteString = Vec<u8>;
74
75 // These correspond to Python’s…
76 let mut mode = ParserMode::Plain; // `parser`
77 let mut values = Vec::new(); // `parts[:-1]`
78 let mut next_value = ByteString::new(); // `parts[-1]`
79 let mut offset = 0; // `offset`
80
81 // Setting `parser` to `None` is instead handled by returning immediately
82 enum ParserMode {
83 Plain,
84 Quoted,
85 }
86
87 loop {
88 match mode {
89 ParserMode::Plain => {
90 // Start of port of Python’s `_parse_plain`
91 let mut whitespace = false;
92 while let Some(&byte) = input.get(offset) {
93 if is_space(byte) || byte == b',' {
94 whitespace = true;
95 offset += 1;
96 } else {
97 break;
98 }
99 }
100 if let Some(&byte) = input.get(offset) {
101 if whitespace {
102 values.push(std::mem::take(&mut next_value))
103 }
104 if byte == b'"' && next_value.is_empty() {
105 mode = ParserMode::Quoted;
106 } else {
107 if byte == b'"' && next_value.ends_with(b"\\") {
108 next_value.pop();
109 }
110 next_value.push(byte);
111 }
112 offset += 1;
113 } else {
114 values.push(next_value);
115 return values;
116 }
117 }
118 ParserMode::Quoted => {
119 // Start of port of Python’s `_parse_quote`
120 if let Some(&byte) = input.get(offset) {
121 if byte == b'"' {
122 // The input contains a quoted zero-length value `""`
123 debug_assert_eq!(next_value, b"");
124 values.push(std::mem::take(&mut next_value));
125 offset += 1;
126 while let Some(&byte) = input.get(offset) {
127 if is_space(byte) || byte == b',' {
128 offset += 1;
129 } else {
130 break;
131 }
132 }
133 mode = ParserMode::Plain;
134 continue;
135 }
136 }
137
138 while let Some(&byte) = input.get(offset) {
139 if byte == b'"' {
140 break;
141 }
142 if byte == b'\\' && input.get(offset + 1) == Some(&b'"') {
143 next_value.push(b'"');
144 offset += 2;
145 } else {
146 next_value.push(byte);
147 offset += 1;
148 }
149 }
150
151 if offset >= input.len() {
152 // We didn’t find a closing double-quote,
153 // so treat the opening one as part of an unquoted value
154 // instead of delimiting the start of a quoted value.
155
156 // `next_value` may have had some backslash-escapes
157 // unescaped. TODO: shouldn’t we use a slice of `input`
158 // instead?
159 let mut real_values =
160 parse_list_without_trim_start(&next_value);
161
162 if let Some(first) = real_values.first_mut() {
163 first.insert(0, b'"');
164 // Drop `next_value`
165 values.extend(real_values)
166 } else {
167 next_value.push(b'"');
168 values.push(next_value);
169 }
170 return values;
171 }
172
173 // We’re not at the end of the input, which means the `while`
174 // loop above ended at at double quote. Skip
175 // over that.
176 offset += 1;
177
178 while let Some(&byte) = input.get(offset) {
179 if byte == b' ' || byte == b',' {
180 offset += 1;
181 } else {
182 break;
183 }
184 }
185
186 if offset >= input.len() {
187 values.push(next_value);
188 return values;
189 }
190
191 if offset + 1 == input.len() && input[offset] == b'"' {
192 next_value.push(b'"');
193 offset += 1;
194 } else {
195 values.push(std::mem::take(&mut next_value));
196 }
197
198 mode = ParserMode::Plain;
199 }
200 }
201 }
202
203 // https://docs.python.org/3/library/stdtypes.html?#bytes.isspace
204 fn is_space(byte: u8) -> bool {
205 if let b' ' | b'\t' | b'\n' | b'\r' | b'\x0b' | b'\x0c' = byte {
206 true
207 } else {
208 false
209 }
210 }
211 }
212
213 #[test]
214 fn test_parse_list() {
215 // Make `assert_eq` error messages nicer
216 fn as_strings(values: &[Vec<u8>]) -> Vec<String> {
217 values
218 .iter()
219 .map(|v| std::str::from_utf8(v.as_ref()).unwrap().to_owned())
220 .collect()
221 }
222 macro_rules! assert_parse_list {
223 ( $input: expr => [ $( $output: expr ),* ] ) => {
224 assert_eq!(
225 as_strings(&parse_list($input)),
226 as_strings(&[ $( Vec::from(&$output[..]) ),* ]),
227 );
228 }
229 }
230
231 // Keep these Rust tests in sync with the Python ones in
232 // `tests/test-config-parselist.py`
233 assert_parse_list!(b"" => []);
234 assert_parse_list!(b"," => []);
235 assert_parse_list!(b"A" => [b"A"]);
236 assert_parse_list!(b"B,B" => [b"B", b"B"]);
237 assert_parse_list!(b", C, ,C," => [b"C", b"C"]);
238 assert_parse_list!(b"\"" => [b"\""]);
239 assert_parse_list!(b"\"\"" => [b"", b""]);
240 assert_parse_list!(b"D,\"" => [b"D", b"\""]);
241 assert_parse_list!(b"E,\"\"" => [b"E", b"", b""]);
242 assert_parse_list!(b"\"F,F\"" => [b"F,F"]);
243 assert_parse_list!(b"\"G,G" => [b"\"G", b"G"]);
244 assert_parse_list!(b"\"H \\\",\\\"H" => [b"\"H", b",", b"H"]);
245 assert_parse_list!(b"I,I\"" => [b"I", b"I\""]);
246 assert_parse_list!(b"J,\"J" => [b"J", b"\"J"]);
247 assert_parse_list!(b"K K" => [b"K", b"K"]);
248 assert_parse_list!(b"\"K\" K" => [b"K", b"K"]);
249 assert_parse_list!(b"L\tL" => [b"L", b"L"]);
250 assert_parse_list!(b"\"L\"\tL" => [b"L", b"", b"L"]);
251 assert_parse_list!(b"M\x0bM" => [b"M", b"M"]);
252 assert_parse_list!(b"\"M\"\x0bM" => [b"M", b"", b"M"]);
253 assert_parse_list!(b"\"N\" , ,\"" => [b"N\""]);
254 assert_parse_list!(b"\" ,O, " => [b"\"", b"O"]);
255 }
256
45 #[test]
257 #[test]
46 fn test_parse_byte_size() {
258 fn test_parse_byte_size() {
47 assert_eq!(parse_byte_size(b""), None);
259 assert_eq!(parse_byte_size(b""), None);
@@ -6,20 +6,21 b''
6 // GNU General Public License version 2 or any later version.
6 // GNU General Public License version 2 or any later version.
7
7
8 use crate::dirstate_tree::on_disk::DirstateV2ParseError;
8 use crate::dirstate_tree::on_disk::DirstateV2ParseError;
9 use crate::errors::HgError;
10 use crate::revlog::node::NULL_NODE;
9 use crate::revlog::node::NULL_NODE;
11 use crate::revlog::Node;
10 use crate::revlog::Node;
12 use crate::utils::hg_path::{HgPath, HgPathBuf};
11 use crate::utils::hg_path::{HgPath, HgPathBuf};
13 use crate::FastHashMap;
12 use crate::FastHashMap;
14 use bytes_cast::{unaligned, BytesCast};
13 use bytes_cast::BytesCast;
15 use std::convert::TryFrom;
16
14
17 pub mod dirs_multiset;
15 pub mod dirs_multiset;
18 pub mod dirstate_map;
16 pub mod dirstate_map;
17 pub mod entry;
19 pub mod parsers;
18 pub mod parsers;
20 pub mod status;
19 pub mod status;
21
20
22 #[derive(Debug, PartialEq, Clone, BytesCast)]
21 pub use self::entry::*;
22
23 #[derive(Debug, PartialEq, Copy, Clone, BytesCast)]
23 #[repr(C)]
24 #[repr(C)]
24 pub struct DirstateParents {
25 pub struct DirstateParents {
25 pub p1: Node,
26 pub p1: Node,
@@ -33,68 +34,6 b' impl DirstateParents {'
33 };
34 };
34 }
35 }
35
36
36 /// The C implementation uses all signed types. This will be an issue
37 /// either when 4GB+ source files are commonplace or in 2038, whichever
38 /// comes first.
39 #[derive(Debug, PartialEq, Copy, Clone)]
40 pub struct DirstateEntry {
41 pub state: EntryState,
42 pub mode: i32,
43 pub mtime: i32,
44 pub size: i32,
45 }
46
47 impl DirstateEntry {
48 pub fn is_non_normal(&self) -> bool {
49 self.state != EntryState::Normal || self.mtime == MTIME_UNSET
50 }
51
52 pub fn is_from_other_parent(&self) -> bool {
53 self.state == EntryState::Normal && self.size == SIZE_FROM_OTHER_PARENT
54 }
55
56 // TODO: other platforms
57 #[cfg(unix)]
58 pub fn mode_changed(
59 &self,
60 filesystem_metadata: &std::fs::Metadata,
61 ) -> bool {
62 use std::os::unix::fs::MetadataExt;
63 const EXEC_BIT_MASK: u32 = 0o100;
64 let dirstate_exec_bit = (self.mode as u32) & EXEC_BIT_MASK;
65 let fs_exec_bit = filesystem_metadata.mode() & EXEC_BIT_MASK;
66 dirstate_exec_bit != fs_exec_bit
67 }
68
69 /// Returns a `(state, mode, size, mtime)` tuple as for
70 /// `DirstateMapMethods::debug_iter`.
71 pub fn debug_tuple(&self) -> (u8, i32, i32, i32) {
72 (self.state.into(), self.mode, self.size, self.mtime)
73 }
74 }
75
76 #[derive(BytesCast)]
77 #[repr(C)]
78 struct RawEntry {
79 state: u8,
80 mode: unaligned::I32Be,
81 size: unaligned::I32Be,
82 mtime: unaligned::I32Be,
83 length: unaligned::I32Be,
84 }
85
86 pub const V1_RANGEMASK: i32 = 0x7FFFFFFF;
87
88 pub const MTIME_UNSET: i32 = -1;
89
90 /// A `DirstateEntry` with a size of `-2` means that it was merged from the
91 /// other parent. This allows revert to pick the right status back during a
92 /// merge.
93 pub const SIZE_FROM_OTHER_PARENT: i32 = -2;
94 /// A special value used for internal representation of special case in
95 /// dirstate v1 format.
96 pub const SIZE_NON_NORMAL: i32 = -1;
97
98 pub type StateMap = FastHashMap<HgPathBuf, DirstateEntry>;
37 pub type StateMap = FastHashMap<HgPathBuf, DirstateEntry>;
99 pub type StateMapIter<'a> = Box<
38 pub type StateMapIter<'a> = Box<
100 dyn Iterator<
39 dyn Iterator<
@@ -109,52 +48,3 b" pub type CopyMapIter<'a> = Box<"
109 + Send
48 + Send
110 + 'a,
49 + 'a,
111 >;
50 >;
112
113 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
114 pub enum EntryState {
115 Normal,
116 Added,
117 Removed,
118 Merged,
119 Unknown,
120 }
121
122 impl EntryState {
123 pub fn is_tracked(self) -> bool {
124 use EntryState::*;
125 match self {
126 Normal | Added | Merged => true,
127 Removed | Unknown => false,
128 }
129 }
130 }
131
132 impl TryFrom<u8> for EntryState {
133 type Error = HgError;
134
135 fn try_from(value: u8) -> Result<Self, Self::Error> {
136 match value {
137 b'n' => Ok(EntryState::Normal),
138 b'a' => Ok(EntryState::Added),
139 b'r' => Ok(EntryState::Removed),
140 b'm' => Ok(EntryState::Merged),
141 b'?' => Ok(EntryState::Unknown),
142 _ => Err(HgError::CorruptedRepository(format!(
143 "Incorrect dirstate entry state {}",
144 value
145 ))),
146 }
147 }
148 }
149
150 impl Into<u8> for EntryState {
151 fn into(self) -> u8 {
152 match self {
153 EntryState::Normal => b'n',
154 EntryState::Added => b'a',
155 EntryState::Removed => b'r',
156 EntryState::Merged => b'm',
157 EntryState::Unknown => b'?',
158 }
159 }
160 }
@@ -33,7 +33,7 b' impl DirsMultiset {'
33 /// If `skip_state` is provided, skips dirstate entries with equal state.
33 /// If `skip_state` is provided, skips dirstate entries with equal state.
34 pub fn from_dirstate<I, P>(
34 pub fn from_dirstate<I, P>(
35 dirstate: I,
35 dirstate: I,
36 skip_state: Option<EntryState>,
36 only_tracked: bool,
37 ) -> Result<Self, DirstateError>
37 ) -> Result<Self, DirstateError>
38 where
38 where
39 I: IntoIterator<
39 I: IntoIterator<
@@ -48,8 +48,8 b' impl DirsMultiset {'
48 let (filename, entry) = item?;
48 let (filename, entry) = item?;
49 let filename = filename.as_ref();
49 let filename = filename.as_ref();
50 // This `if` is optimized out of the loop
50 // This `if` is optimized out of the loop
51 if let Some(skip) = skip_state {
51 if only_tracked {
52 if skip != entry.state {
52 if entry.state() != EntryState::Removed {
53 multiset.add_path(filename)?;
53 multiset.add_path(filename)?;
54 }
54 }
55 } else {
55 } else {
@@ -343,7 +343,7 b' mod tests {'
343
343
344 let new = DirsMultiset::from_dirstate(
344 let new = DirsMultiset::from_dirstate(
345 StateMap::default().into_iter().map(Ok),
345 StateMap::default().into_iter().map(Ok),
346 None,
346 false,
347 )
347 )
348 .unwrap();
348 .unwrap();
349 let expected = DirsMultiset {
349 let expected = DirsMultiset {
@@ -372,12 +372,7 b' mod tests {'
372 let input_map = ["b/x", "a/c", "a/d/x"].iter().map(|f| {
372 let input_map = ["b/x", "a/c", "a/d/x"].iter().map(|f| {
373 Ok((
373 Ok((
374 HgPathBuf::from_bytes(f.as_bytes()),
374 HgPathBuf::from_bytes(f.as_bytes()),
375 DirstateEntry {
375 DirstateEntry::from_v1_data(EntryState::Normal, 0, 0, 0),
376 state: EntryState::Normal,
377 mode: 0,
378 mtime: 0,
379 size: 0,
380 },
381 ))
376 ))
382 });
377 });
383 let expected_inner = [("", 2), ("a", 2), ("b", 1), ("a/d", 1)]
378 let expected_inner = [("", 2), ("a", 2), ("b", 1), ("a/d", 1)]
@@ -385,7 +380,7 b' mod tests {'
385 .map(|(k, v)| (HgPathBuf::from_bytes(k.as_bytes()), *v))
380 .map(|(k, v)| (HgPathBuf::from_bytes(k.as_bytes()), *v))
386 .collect();
381 .collect();
387
382
388 let new = DirsMultiset::from_dirstate(input_map, None).unwrap();
383 let new = DirsMultiset::from_dirstate(input_map, false).unwrap();
389 let expected = DirsMultiset {
384 let expected = DirsMultiset {
390 inner: expected_inner,
385 inner: expected_inner,
391 };
386 };
@@ -404,24 +399,17 b' mod tests {'
404 .map(|(f, state)| {
399 .map(|(f, state)| {
405 Ok((
400 Ok((
406 HgPathBuf::from_bytes(f.as_bytes()),
401 HgPathBuf::from_bytes(f.as_bytes()),
407 DirstateEntry {
402 DirstateEntry::from_v1_data(*state, 0, 0, 0),
408 state: *state,
409 mode: 0,
410 mtime: 0,
411 size: 0,
412 },
413 ))
403 ))
414 });
404 });
415
405
416 // "a" incremented with "a/c" and "a/d/"
406 // "a" incremented with "a/c" and "a/d/"
417 let expected_inner = [("", 1), ("a", 2)]
407 let expected_inner = [("", 1), ("a", 3)]
418 .iter()
408 .iter()
419 .map(|(k, v)| (HgPathBuf::from_bytes(k.as_bytes()), *v))
409 .map(|(k, v)| (HgPathBuf::from_bytes(k.as_bytes()), *v))
420 .collect();
410 .collect();
421
411
422 let new =
412 let new = DirsMultiset::from_dirstate(input_map, true).unwrap();
423 DirsMultiset::from_dirstate(input_map, Some(EntryState::Normal))
424 .unwrap();
425 let expected = DirsMultiset {
413 let expected = DirsMultiset {
426 inner: expected_inner,
414 inner: expected_inner,
427 };
415 };
@@ -79,45 +79,45 b' impl DirstateMap {'
79 from_p2: bool,
79 from_p2: bool,
80 possibly_dirty: bool,
80 possibly_dirty: bool,
81 ) -> Result<(), DirstateError> {
81 ) -> Result<(), DirstateError> {
82 let mut entry = entry;
82 let state;
83 let size;
84 let mtime;
83 if added {
85 if added {
84 assert!(!merged);
85 assert!(!possibly_dirty);
86 assert!(!possibly_dirty);
86 assert!(!from_p2);
87 assert!(!from_p2);
87 entry.state = EntryState::Added;
88 state = EntryState::Added;
88 entry.size = SIZE_NON_NORMAL;
89 size = SIZE_NON_NORMAL;
89 entry.mtime = MTIME_UNSET;
90 mtime = MTIME_UNSET;
90 } else if merged {
91 } else if merged {
91 assert!(!possibly_dirty);
92 assert!(!possibly_dirty);
92 assert!(!from_p2);
93 assert!(!from_p2);
93 entry.state = EntryState::Merged;
94 state = EntryState::Merged;
94 entry.size = SIZE_FROM_OTHER_PARENT;
95 size = SIZE_FROM_OTHER_PARENT;
95 entry.mtime = MTIME_UNSET;
96 mtime = MTIME_UNSET;
96 } else if from_p2 {
97 } else if from_p2 {
97 assert!(!possibly_dirty);
98 assert!(!possibly_dirty);
98 entry.state = EntryState::Normal;
99 state = EntryState::Normal;
99 entry.size = SIZE_FROM_OTHER_PARENT;
100 size = SIZE_FROM_OTHER_PARENT;
100 entry.mtime = MTIME_UNSET;
101 mtime = MTIME_UNSET;
101 } else if possibly_dirty {
102 } else if possibly_dirty {
102 entry.state = EntryState::Normal;
103 state = EntryState::Normal;
103 entry.size = SIZE_NON_NORMAL;
104 size = SIZE_NON_NORMAL;
104 entry.mtime = MTIME_UNSET;
105 mtime = MTIME_UNSET;
105 } else {
106 } else {
106 entry.state = EntryState::Normal;
107 state = EntryState::Normal;
107 entry.size = entry.size & V1_RANGEMASK;
108 size = entry.size() & V1_RANGEMASK;
108 entry.mtime = entry.mtime & V1_RANGEMASK;
109 mtime = entry.mtime() & V1_RANGEMASK;
109 }
110 }
110 let old_state = match self.get(filename) {
111 let mode = entry.mode();
111 Some(e) => e.state,
112 let entry = DirstateEntry::from_v1_data(state, mode, size, mtime);
112 None => EntryState::Unknown,
113
113 };
114 let old_state = self.get(filename).map(|e| e.state());
114 if old_state == EntryState::Unknown || old_state == EntryState::Removed
115 if old_state.is_none() || old_state == Some(EntryState::Removed) {
115 {
116 if let Some(ref mut dirs) = self.dirs {
116 if let Some(ref mut dirs) = self.dirs {
117 dirs.add_path(filename)?;
117 dirs.add_path(filename)?;
118 }
118 }
119 }
119 }
120 if old_state == EntryState::Unknown {
120 if old_state.is_none() {
121 if let Some(ref mut all_dirs) = self.all_dirs {
121 if let Some(ref mut all_dirs) = self.all_dirs {
122 all_dirs.add_path(filename)?;
122 all_dirs.add_path(filename)?;
123 }
123 }
@@ -149,10 +149,7 b' impl DirstateMap {'
149 in_merge: bool,
149 in_merge: bool,
150 ) -> Result<(), DirstateError> {
150 ) -> Result<(), DirstateError> {
151 let old_entry_opt = self.get(filename);
151 let old_entry_opt = self.get(filename);
152 let old_state = match old_entry_opt {
152 let old_state = old_entry_opt.map(|e| e.state());
153 Some(e) => e.state,
154 None => EntryState::Unknown,
155 };
156 let mut size = 0;
153 let mut size = 0;
157 if in_merge {
154 if in_merge {
158 // XXX we should not be able to have 'm' state and 'FROM_P2' if not
155 // XXX we should not be able to have 'm' state and 'FROM_P2' if not
@@ -161,10 +158,10 b' impl DirstateMap {'
161 // would be nice.
158 // would be nice.
162 if let Some(old_entry) = old_entry_opt {
159 if let Some(old_entry) = old_entry_opt {
163 // backup the previous state
160 // backup the previous state
164 if old_entry.state == EntryState::Merged {
161 if old_entry.state() == EntryState::Merged {
165 size = SIZE_NON_NORMAL;
162 size = SIZE_NON_NORMAL;
166 } else if old_entry.state == EntryState::Normal
163 } else if old_entry.state() == EntryState::Normal
167 && old_entry.size == SIZE_FROM_OTHER_PARENT
164 && old_entry.size() == SIZE_FROM_OTHER_PARENT
168 {
165 {
169 // other parent
166 // other parent
170 size = SIZE_FROM_OTHER_PARENT;
167 size = SIZE_FROM_OTHER_PARENT;
@@ -174,13 +171,12 b' impl DirstateMap {'
174 }
171 }
175 }
172 }
176 }
173 }
177 if old_state != EntryState::Unknown && old_state != EntryState::Removed
174 if old_state.is_some() && old_state != Some(EntryState::Removed) {
178 {
179 if let Some(ref mut dirs) = self.dirs {
175 if let Some(ref mut dirs) = self.dirs {
180 dirs.delete_path(filename)?;
176 dirs.delete_path(filename)?;
181 }
177 }
182 }
178 }
183 if old_state == EntryState::Unknown {
179 if old_state.is_none() {
184 if let Some(ref mut all_dirs) = self.all_dirs {
180 if let Some(ref mut all_dirs) = self.all_dirs {
185 all_dirs.add_path(filename)?;
181 all_dirs.add_path(filename)?;
186 }
182 }
@@ -189,15 +185,8 b' impl DirstateMap {'
189 self.copy_map.remove(filename);
185 self.copy_map.remove(filename);
190 }
186 }
191
187
192 self.state_map.insert(
188 self.state_map
193 filename.to_owned(),
189 .insert(filename.to_owned(), DirstateEntry::new_removed(size));
194 DirstateEntry {
195 state: EntryState::Removed,
196 mode: 0,
197 size,
198 mtime: 0,
199 },
200 );
201 self.get_non_normal_other_parent_entries()
190 self.get_non_normal_other_parent_entries()
202 .0
191 .0
203 .insert(filename.to_owned());
192 .insert(filename.to_owned());
@@ -210,14 +199,11 b' impl DirstateMap {'
210 &mut self,
199 &mut self,
211 filename: &HgPath,
200 filename: &HgPath,
212 ) -> Result<bool, DirstateError> {
201 ) -> Result<bool, DirstateError> {
213 let old_state = match self.get(filename) {
202 let old_state = self.get(filename).map(|e| e.state());
214 Some(e) => e.state,
215 None => EntryState::Unknown,
216 };
217 let exists = self.state_map.remove(filename).is_some();
203 let exists = self.state_map.remove(filename).is_some();
218
204
219 if exists {
205 if exists {
220 if old_state != EntryState::Removed {
206 if old_state != Some(EntryState::Removed) {
221 if let Some(ref mut dirs) = self.dirs {
207 if let Some(ref mut dirs) = self.dirs {
222 dirs.delete_path(filename)?;
208 dirs.delete_path(filename)?;
223 }
209 }
@@ -334,7 +320,7 b' impl DirstateMap {'
334 if self.all_dirs.is_none() {
320 if self.all_dirs.is_none() {
335 self.all_dirs = Some(DirsMultiset::from_dirstate(
321 self.all_dirs = Some(DirsMultiset::from_dirstate(
336 self.state_map.iter().map(|(k, v)| Ok((k, *v))),
322 self.state_map.iter().map(|(k, v)| Ok((k, *v))),
337 None,
323 false,
338 )?);
324 )?);
339 }
325 }
340 Ok(())
326 Ok(())
@@ -344,7 +330,7 b' impl DirstateMap {'
344 if self.dirs.is_none() {
330 if self.dirs.is_none() {
345 self.dirs = Some(DirsMultiset::from_dirstate(
331 self.dirs = Some(DirsMultiset::from_dirstate(
346 self.state_map.iter().map(|(k, v)| Ok((k, *v))),
332 self.state_map.iter().map(|(k, v)| Ok((k, *v))),
347 Some(EntryState::Removed),
333 true,
348 )?);
334 )?);
349 }
335 }
350 Ok(())
336 Ok(())
@@ -428,12 +414,7 b' mod tests {'
428
414
429 map.add_file(
415 map.add_file(
430 HgPath::new(b"meh"),
416 HgPath::new(b"meh"),
431 DirstateEntry {
417 DirstateEntry::from_v1_data(EntryState::Normal, 1337, 1337, 1337),
432 state: EntryState::Normal,
433 mode: 1337,
434 mtime: 1337,
435 size: 1337,
436 },
437 false,
418 false,
438 false,
419 false,
439 false,
420 false,
@@ -465,12 +446,7 b' mod tests {'
465 .map(|(fname, (state, mode, size, mtime))| {
446 .map(|(fname, (state, mode, size, mtime))| {
466 (
447 (
467 HgPathBuf::from_bytes(fname.as_ref()),
448 HgPathBuf::from_bytes(fname.as_ref()),
468 DirstateEntry {
449 DirstateEntry::from_v1_data(*state, *mode, *size, *mtime),
469 state: *state,
470 mode: *mode,
471 size: *size,
472 mtime: *mtime,
473 },
474 )
450 )
475 })
451 })
476 .collect();
452 .collect();
@@ -6,11 +6,11 b''
6 use crate::errors::HgError;
6 use crate::errors::HgError;
7 use crate::utils::hg_path::HgPath;
7 use crate::utils::hg_path::HgPath;
8 use crate::{
8 use crate::{
9 dirstate::{CopyMap, EntryState, RawEntry, StateMap},
9 dirstate::{CopyMap, EntryState, StateMap},
10 DirstateEntry, DirstateParents,
10 DirstateEntry, DirstateParents,
11 };
11 };
12 use byteorder::{BigEndian, WriteBytesExt};
12 use byteorder::{BigEndian, WriteBytesExt};
13 use bytes_cast::BytesCast;
13 use bytes_cast::{unaligned, BytesCast};
14 use micro_timer::timed;
14 use micro_timer::timed;
15 use std::convert::{TryFrom, TryInto};
15 use std::convert::{TryFrom, TryInto};
16
16
@@ -48,6 +48,16 b' pub fn parse_dirstate(contents: &[u8]) -'
48 Ok((parents, entries, copies))
48 Ok((parents, entries, copies))
49 }
49 }
50
50
51 #[derive(BytesCast)]
52 #[repr(C)]
53 struct RawEntry {
54 state: u8,
55 mode: unaligned::I32Be,
56 size: unaligned::I32Be,
57 mtime: unaligned::I32Be,
58 length: unaligned::I32Be,
59 }
60
51 pub fn parse_dirstate_entries<'a>(
61 pub fn parse_dirstate_entries<'a>(
52 mut contents: &'a [u8],
62 mut contents: &'a [u8],
53 mut each_entry: impl FnMut(
63 mut each_entry: impl FnMut(
@@ -63,12 +73,12 b" pub fn parse_dirstate_entries<'a>("
63 let (raw_entry, rest) = RawEntry::from_bytes(contents)
73 let (raw_entry, rest) = RawEntry::from_bytes(contents)
64 .map_err(|_| HgError::corrupted("Overflow in dirstate."))?;
74 .map_err(|_| HgError::corrupted("Overflow in dirstate."))?;
65
75
66 let entry = DirstateEntry {
76 let entry = DirstateEntry::from_v1_data(
67 state: EntryState::try_from(raw_entry.state)?,
77 EntryState::try_from(raw_entry.state)?,
68 mode: raw_entry.mode.get(),
78 raw_entry.mode.get(),
69 mtime: raw_entry.mtime.get(),
79 raw_entry.size.get(),
70 size: raw_entry.size.get(),
80 raw_entry.mtime.get(),
71 };
81 );
72 let (paths, rest) =
82 let (paths, rest) =
73 u8::slice_from_bytes(rest, raw_entry.length.get() as usize)
83 u8::slice_from_bytes(rest, raw_entry.length.get() as usize)
74 .map_err(|_| HgError::corrupted("Overflow in dirstate."))?;
84 .map_err(|_| HgError::corrupted("Overflow in dirstate."))?;
@@ -114,12 +124,13 b' pub fn pack_entry('
114 packed: &mut Vec<u8>,
124 packed: &mut Vec<u8>,
115 ) {
125 ) {
116 let length = packed_filename_and_copy_source_size(filename, copy_source);
126 let length = packed_filename_and_copy_source_size(filename, copy_source);
127 let (state, mode, size, mtime) = entry.v1_data();
117
128
118 // Unwrapping because `impl std::io::Write for Vec<u8>` never errors
129 // Unwrapping because `impl std::io::Write for Vec<u8>` never errors
119 packed.write_u8(entry.state.into()).unwrap();
130 packed.write_u8(state).unwrap();
120 packed.write_i32::<BigEndian>(entry.mode).unwrap();
131 packed.write_i32::<BigEndian>(mode).unwrap();
121 packed.write_i32::<BigEndian>(entry.size).unwrap();
132 packed.write_i32::<BigEndian>(size).unwrap();
122 packed.write_i32::<BigEndian>(entry.mtime).unwrap();
133 packed.write_i32::<BigEndian>(mtime).unwrap();
123 packed.write_i32::<BigEndian>(length as i32).unwrap();
134 packed.write_i32::<BigEndian>(length as i32).unwrap();
124 packed.extend(filename.as_bytes());
135 packed.extend(filename.as_bytes());
125 if let Some(source) = copy_source {
136 if let Some(source) = copy_source {
@@ -131,33 +142,6 b' pub fn pack_entry('
131 /// Seconds since the Unix epoch
142 /// Seconds since the Unix epoch
132 pub struct Timestamp(pub i64);
143 pub struct Timestamp(pub i64);
133
144
134 impl DirstateEntry {
135 pub fn mtime_is_ambiguous(&self, now: i32) -> bool {
136 self.state == EntryState::Normal && self.mtime == now
137 }
138
139 pub fn clear_ambiguous_mtime(&mut self, now: i32) -> bool {
140 let ambiguous = self.mtime_is_ambiguous(now);
141 if ambiguous {
142 // The file was last modified "simultaneously" with the current
143 // write to dirstate (i.e. within the same second for file-
144 // systems with a granularity of 1 sec). This commonly happens
145 // for at least a couple of files on 'update'.
146 // The user could change the file without changing its size
147 // within the same second. Invalidate the file's mtime in
148 // dirstate, forcing future 'status' calls to compare the
149 // contents of the file if the size is the same. This prevents
150 // mistakenly treating such files as clean.
151 self.clear_mtime()
152 }
153 ambiguous
154 }
155
156 pub fn clear_mtime(&mut self) {
157 self.mtime = -1;
158 }
159 }
160
161 pub fn pack_dirstate(
145 pub fn pack_dirstate(
162 state_map: &mut StateMap,
146 state_map: &mut StateMap,
163 copy_map: &CopyMap,
147 copy_map: &CopyMap,
@@ -229,12 +213,12 b' mod tests {'
229 fn test_pack_dirstate_one_entry() {
213 fn test_pack_dirstate_one_entry() {
230 let expected_state_map: StateMap = [(
214 let expected_state_map: StateMap = [(
231 HgPathBuf::from_bytes(b"f1"),
215 HgPathBuf::from_bytes(b"f1"),
232 DirstateEntry {
216 DirstateEntry::from_v1_data(
233 state: EntryState::Normal,
217 EntryState::Normal,
234 mode: 0o644,
218 0o644,
235 size: 0,
219 0,
236 mtime: 791231220,
220 791231220,
237 },
221 ),
238 )]
222 )]
239 .iter()
223 .iter()
240 .cloned()
224 .cloned()
@@ -266,12 +250,12 b' mod tests {'
266 fn test_pack_dirstate_one_entry_with_copy() {
250 fn test_pack_dirstate_one_entry_with_copy() {
267 let expected_state_map: StateMap = [(
251 let expected_state_map: StateMap = [(
268 HgPathBuf::from_bytes(b"f1"),
252 HgPathBuf::from_bytes(b"f1"),
269 DirstateEntry {
253 DirstateEntry::from_v1_data(
270 state: EntryState::Normal,
254 EntryState::Normal,
271 mode: 0o644,
255 0o644,
272 size: 0,
256 0,
273 mtime: 791231220,
257 791231220,
274 },
258 ),
275 )]
259 )]
276 .iter()
260 .iter()
277 .cloned()
261 .cloned()
@@ -307,12 +291,12 b' mod tests {'
307 fn test_parse_pack_one_entry_with_copy() {
291 fn test_parse_pack_one_entry_with_copy() {
308 let mut state_map: StateMap = [(
292 let mut state_map: StateMap = [(
309 HgPathBuf::from_bytes(b"f1"),
293 HgPathBuf::from_bytes(b"f1"),
310 DirstateEntry {
294 DirstateEntry::from_v1_data(
311 state: EntryState::Normal,
295 EntryState::Normal,
312 mode: 0o644,
296 0o644,
313 size: 0,
297 0,
314 mtime: 791231220,
298 791231220,
315 },
299 ),
316 )]
300 )]
317 .iter()
301 .iter()
318 .cloned()
302 .cloned()
@@ -353,39 +337,34 b' mod tests {'
353 let mut state_map: StateMap = [
337 let mut state_map: StateMap = [
354 (
338 (
355 HgPathBuf::from_bytes(b"f1"),
339 HgPathBuf::from_bytes(b"f1"),
356 DirstateEntry {
340 DirstateEntry::from_v1_data(
357 state: EntryState::Normal,
341 EntryState::Normal,
358 mode: 0o644,
342 0o644,
359 size: 0,
343 0,
360 mtime: 791231220,
344 791231220,
361 },
345 ),
362 ),
346 ),
363 (
347 (
364 HgPathBuf::from_bytes(b"f2"),
348 HgPathBuf::from_bytes(b"f2"),
365 DirstateEntry {
349 DirstateEntry::from_v1_data(
366 state: EntryState::Merged,
350 EntryState::Merged,
367 mode: 0o777,
351 0o777,
368 size: 1000,
352 1000,
369 mtime: 791231220,
353 791231220,
370 },
354 ),
371 ),
355 ),
372 (
356 (
373 HgPathBuf::from_bytes(b"f3"),
357 HgPathBuf::from_bytes(b"f3"),
374 DirstateEntry {
358 DirstateEntry::from_v1_data(
375 state: EntryState::Removed,
359 EntryState::Removed,
376 mode: 0o644,
360 0o644,
377 size: 234553,
361 234553,
378 mtime: 791231220,
362 791231220,
379 },
363 ),
380 ),
364 ),
381 (
365 (
382 HgPathBuf::from_bytes(b"f4\xF6"),
366 HgPathBuf::from_bytes(b"f4\xF6"),
383 DirstateEntry {
367 DirstateEntry::from_v1_data(EntryState::Added, 0o644, -1, -1),
384 state: EntryState::Added,
385 mode: 0o644,
386 size: -1,
387 mtime: -1,
388 },
389 ),
368 ),
390 ]
369 ]
391 .iter()
370 .iter()
@@ -431,12 +410,12 b' mod tests {'
431 fn test_parse_pack_one_entry_with_copy_and_time_conflict() {
410 fn test_parse_pack_one_entry_with_copy_and_time_conflict() {
432 let mut state_map: StateMap = [(
411 let mut state_map: StateMap = [(
433 HgPathBuf::from_bytes(b"f1"),
412 HgPathBuf::from_bytes(b"f1"),
434 DirstateEntry {
413 DirstateEntry::from_v1_data(
435 state: EntryState::Normal,
414 EntryState::Normal,
436 mode: 0o644,
415 0o644,
437 size: 0,
416 0,
438 mtime: 15000000,
417 15000000,
439 },
418 ),
440 )]
419 )]
441 .iter()
420 .iter()
442 .cloned()
421 .cloned()
@@ -471,12 +450,12 b' mod tests {'
471 &parents,
450 &parents,
472 [(
451 [(
473 HgPathBuf::from_bytes(b"f1"),
452 HgPathBuf::from_bytes(b"f1"),
474 DirstateEntry {
453 DirstateEntry::from_v1_data(
475 state: EntryState::Normal,
454 EntryState::Normal,
476 mode: 0o644,
455 0o644,
477 size: 0,
456 0,
478 mtime: -1
457 -1
479 }
458 )
480 )]
459 )]
481 .iter()
460 .iter()
482 .cloned()
461 .cloned()
@@ -157,22 +157,19 b' fn dispatch_found('
157 copy_map: &CopyMap,
157 copy_map: &CopyMap,
158 options: StatusOptions,
158 options: StatusOptions,
159 ) -> Dispatch {
159 ) -> Dispatch {
160 let DirstateEntry {
160 match entry.state() {
161 state,
161 EntryState::Normal => {
162 mode,
162 let mode = entry.mode();
163 mtime,
163 let size = entry.size();
164 size,
164 let mtime = entry.mtime();
165 } = entry;
166
165
167 let HgMetadata {
166 let HgMetadata {
168 st_mode,
167 st_mode,
169 st_size,
168 st_size,
170 st_mtime,
169 st_mtime,
171 ..
170 ..
172 } = metadata;
171 } = metadata;
173
172
174 match state {
175 EntryState::Normal => {
176 let size_changed = mod_compare(size, st_size as i32);
173 let size_changed = mod_compare(size, st_size as i32);
177 let mode_changed =
174 let mode_changed =
178 (mode ^ st_mode as i32) & 0o100 != 0o000 && options.check_exec;
175 (mode ^ st_mode as i32) & 0o100 != 0o000 && options.check_exec;
@@ -208,7 +205,6 b' fn dispatch_found('
208 EntryState::Merged => Dispatch::Modified,
205 EntryState::Merged => Dispatch::Modified,
209 EntryState::Added => Dispatch::Added,
206 EntryState::Added => Dispatch::Added,
210 EntryState::Removed => Dispatch::Removed,
207 EntryState::Removed => Dispatch::Removed,
211 EntryState::Unknown => Dispatch::Unknown,
212 }
208 }
213 }
209 }
214
210
@@ -221,8 +217,6 b' fn dispatch_missing(state: EntryState) -'
221 }
217 }
222 // File was removed, everything is normal
218 // File was removed, everything is normal
223 EntryState::Removed => Dispatch::Removed,
219 EntryState::Removed => Dispatch::Removed,
224 // File is unknown to Mercurial, everything is normal
225 EntryState::Unknown => Dispatch::Unknown,
226 }
220 }
227 }
221 }
228
222
@@ -473,7 +467,7 b' where'
473 if let Some(entry) = in_dmap {
467 if let Some(entry) = in_dmap {
474 return Some((
468 return Some((
475 Cow::Borrowed(normalized),
469 Cow::Borrowed(normalized),
476 dispatch_missing(entry.state),
470 dispatch_missing(entry.state()),
477 ));
471 ));
478 }
472 }
479 }
473 }
@@ -605,7 +599,10 b' where'
605 || self.matcher.matches(&filename)
599 || self.matcher.matches(&filename)
606 {
600 {
607 files_sender
601 files_sender
608 .send((filename.to_owned(), dispatch_missing(entry.state)))
602 .send((
603 filename.to_owned(),
604 dispatch_missing(entry.state()),
605 ))
609 .unwrap();
606 .unwrap();
610 }
607 }
611 }
608 }
@@ -635,7 +632,7 b' where'
635 files_sender
632 files_sender
636 .send((
633 .send((
637 directory.to_owned(),
634 directory.to_owned(),
638 dispatch_missing(entry.state),
635 dispatch_missing(entry.state()),
639 ))
636 ))
640 .unwrap();
637 .unwrap();
641 }
638 }
@@ -767,7 +764,7 b' where'
767 {
764 {
768 (
765 (
769 Cow::Borrowed(filename),
766 Cow::Borrowed(filename),
770 dispatch_missing(entry.state),
767 dispatch_missing(entry.state()),
771 )
768 )
772 }
769 }
773 Ok(m) => (
770 Ok(m) => (
@@ -791,7 +788,7 b' where'
791 // directory
788 // directory
792 (
789 (
793 Cow::Borrowed(filename),
790 Cow::Borrowed(filename),
794 dispatch_missing(entry.state),
791 dispatch_missing(entry.state()),
795 )
792 )
796 }
793 }
797 Err(e) => {
794 Err(e) => {
@@ -863,7 +860,7 b' where'
863 )
860 )
864 }
861 }
865 // File doesn't exist
862 // File doesn't exist
866 Err(_) => dispatch_missing(entry.state),
863 Err(_) => dispatch_missing(entry.state()),
867 },
864 },
868 ))
865 ))
869 } else {
866 } else {
@@ -871,7 +868,7 b' where'
871 // we, in this case, report as missing.
868 // we, in this case, report as missing.
872 Some((
869 Some((
873 Cow::Owned(filename.to_owned()),
870 Cow::Owned(filename.to_owned()),
874 dispatch_missing(entry.state),
871 dispatch_missing(entry.state()),
875 ))
872 ))
876 }
873 }
877 },
874 },
@@ -1,5 +1,7 b''
1 pub mod dirstate_map;
1 pub mod dirstate_map;
2 pub mod dispatch;
2 pub mod dispatch;
3 pub mod on_disk;
3 pub mod on_disk;
4 pub mod owning;
5 mod owning_dispatch;
4 pub mod path_with_basename;
6 pub mod path_with_basename;
5 pub mod status;
7 pub mod status;
@@ -328,7 +328,7 b" impl<'tree, 'on_disk> NodeRef<'tree, 'on"
328 ) -> Result<Option<EntryState>, DirstateV2ParseError> {
328 ) -> Result<Option<EntryState>, DirstateV2ParseError> {
329 match self {
329 match self {
330 NodeRef::InMemory(_path, node) => {
330 NodeRef::InMemory(_path, node) => {
331 Ok(node.data.as_entry().map(|entry| entry.state))
331 Ok(node.data.as_entry().map(|entry| entry.state()))
332 }
332 }
333 NodeRef::OnDisk(node) => node.state(),
333 NodeRef::OnDisk(node) => node.state(),
334 }
334 }
@@ -445,7 +445,7 b" impl<'on_disk> DirstateMap<'on_disk> {"
445 let parents = parse_dirstate_entries(
445 let parents = parse_dirstate_entries(
446 map.on_disk,
446 map.on_disk,
447 |path, entry, copy_source| {
447 |path, entry, copy_source| {
448 let tracked = entry.state.is_tracked();
448 let tracked = entry.state().is_tracked();
449 let node = Self::get_or_insert_node(
449 let node = Self::get_or_insert_node(
450 map.on_disk,
450 map.on_disk,
451 &mut map.unreachable_bytes,
451 &mut map.unreachable_bytes,
@@ -593,12 +593,13 b" impl<'on_disk> DirstateMap<'on_disk> {"
593 fn add_or_remove_file(
593 fn add_or_remove_file(
594 &mut self,
594 &mut self,
595 path: &HgPath,
595 path: &HgPath,
596 old_state: EntryState,
596 old_state: Option<EntryState>,
597 new_entry: DirstateEntry,
597 new_entry: DirstateEntry,
598 ) -> Result<(), DirstateV2ParseError> {
598 ) -> Result<(), DirstateV2ParseError> {
599 let had_entry = old_state != EntryState::Unknown;
599 let had_entry = old_state.is_some();
600 let was_tracked = old_state.map_or(false, |s| s.is_tracked());
600 let tracked_count_increment =
601 let tracked_count_increment =
601 match (old_state.is_tracked(), new_entry.state.is_tracked()) {
602 match (was_tracked, new_entry.state().is_tracked()) {
602 (false, true) => 1,
603 (false, true) => 1,
603 (true, false) => -1,
604 (true, false) => -1,
604 _ => 0,
605 _ => 0,
@@ -776,38 +777,39 b" impl<'on_disk> super::dispatch::Dirstate"
776 from_p2: bool,
777 from_p2: bool,
777 possibly_dirty: bool,
778 possibly_dirty: bool,
778 ) -> Result<(), DirstateError> {
779 ) -> Result<(), DirstateError> {
779 let mut entry = entry;
780 let state;
781 let size;
782 let mtime;
780 if added {
783 if added {
781 assert!(!possibly_dirty);
784 assert!(!possibly_dirty);
782 assert!(!from_p2);
785 assert!(!from_p2);
783 entry.state = EntryState::Added;
786 state = EntryState::Added;
784 entry.size = SIZE_NON_NORMAL;
787 size = SIZE_NON_NORMAL;
785 entry.mtime = MTIME_UNSET;
788 mtime = MTIME_UNSET;
786 } else if merged {
789 } else if merged {
787 assert!(!possibly_dirty);
790 assert!(!possibly_dirty);
788 assert!(!from_p2);
791 assert!(!from_p2);
789 entry.state = EntryState::Merged;
792 state = EntryState::Merged;
790 entry.size = SIZE_FROM_OTHER_PARENT;
793 size = SIZE_FROM_OTHER_PARENT;
791 entry.mtime = MTIME_UNSET;
794 mtime = MTIME_UNSET;
792 } else if from_p2 {
795 } else if from_p2 {
793 assert!(!possibly_dirty);
796 assert!(!possibly_dirty);
794 entry.state = EntryState::Normal;
797 state = EntryState::Normal;
795 entry.size = SIZE_FROM_OTHER_PARENT;
798 size = SIZE_FROM_OTHER_PARENT;
796 entry.mtime = MTIME_UNSET;
799 mtime = MTIME_UNSET;
797 } else if possibly_dirty {
800 } else if possibly_dirty {
798 entry.state = EntryState::Normal;
801 state = EntryState::Normal;
799 entry.size = SIZE_NON_NORMAL;
802 size = SIZE_NON_NORMAL;
800 entry.mtime = MTIME_UNSET;
803 mtime = MTIME_UNSET;
801 } else {
804 } else {
802 entry.state = EntryState::Normal;
805 state = EntryState::Normal;
803 entry.size = entry.size & V1_RANGEMASK;
806 size = entry.size() & V1_RANGEMASK;
804 entry.mtime = entry.mtime & V1_RANGEMASK;
807 mtime = entry.mtime() & V1_RANGEMASK;
805 }
808 }
809 let mode = entry.mode();
810 let entry = DirstateEntry::from_v1_data(state, mode, size, mtime);
806
811
807 let old_state = match self.get(filename)? {
812 let old_state = self.get(filename)?.map(|e| e.state());
808 Some(e) => e.state,
809 None => EntryState::Unknown,
810 };
811
813
812 Ok(self.add_or_remove_file(filename, old_state, entry)?)
814 Ok(self.add_or_remove_file(filename, old_state, entry)?)
813 }
815 }
@@ -818,10 +820,7 b" impl<'on_disk> super::dispatch::Dirstate"
818 in_merge: bool,
820 in_merge: bool,
819 ) -> Result<(), DirstateError> {
821 ) -> Result<(), DirstateError> {
820 let old_entry_opt = self.get(filename)?;
822 let old_entry_opt = self.get(filename)?;
821 let old_state = match old_entry_opt {
823 let old_state = old_entry_opt.map(|e| e.state());
822 Some(e) => e.state,
823 None => EntryState::Unknown,
824 };
825 let mut size = 0;
824 let mut size = 0;
826 if in_merge {
825 if in_merge {
827 // XXX we should not be able to have 'm' state and 'FROM_P2' if not
826 // XXX we should not be able to have 'm' state and 'FROM_P2' if not
@@ -830,10 +829,10 b" impl<'on_disk> super::dispatch::Dirstate"
830 // would be nice.
829 // would be nice.
831 if let Some(old_entry) = old_entry_opt {
830 if let Some(old_entry) = old_entry_opt {
832 // backup the previous state
831 // backup the previous state
833 if old_entry.state == EntryState::Merged {
832 if old_entry.state() == EntryState::Merged {
834 size = SIZE_NON_NORMAL;
833 size = SIZE_NON_NORMAL;
835 } else if old_entry.state == EntryState::Normal
834 } else if old_entry.state() == EntryState::Normal
836 && old_entry.size == SIZE_FROM_OTHER_PARENT
835 && old_entry.size() == SIZE_FROM_OTHER_PARENT
837 {
836 {
838 // other parent
837 // other parent
839 size = SIZE_FROM_OTHER_PARENT;
838 size = SIZE_FROM_OTHER_PARENT;
@@ -843,20 +842,14 b" impl<'on_disk> super::dispatch::Dirstate"
843 if size == 0 {
842 if size == 0 {
844 self.copy_map_remove(filename)?;
843 self.copy_map_remove(filename)?;
845 }
844 }
846 let entry = DirstateEntry {
845 let entry = DirstateEntry::new_removed(size);
847 state: EntryState::Removed,
848 mode: 0,
849 size,
850 mtime: 0,
851 };
852 Ok(self.add_or_remove_file(filename, old_state, entry)?)
846 Ok(self.add_or_remove_file(filename, old_state, entry)?)
853 }
847 }
854
848
855 fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError> {
849 fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError> {
856 let old_state = match self.get(filename)? {
850 let was_tracked = self
857 Some(e) => e.state,
851 .get(filename)?
858 None => EntryState::Unknown,
852 .map_or(false, |e| e.state().is_tracked());
859 };
860 struct Dropped {
853 struct Dropped {
861 was_tracked: bool,
854 was_tracked: bool,
862 had_entry: bool,
855 had_entry: bool,
@@ -921,7 +914,7 b" impl<'on_disk> super::dispatch::Dirstate"
921 was_tracked: node
914 was_tracked: node
922 .data
915 .data
923 .as_entry()
916 .as_entry()
924 .map_or(false, |entry| entry.state.is_tracked()),
917 .map_or(false, |entry| entry.state().is_tracked()),
925 had_entry,
918 had_entry,
926 had_copy_source: node.copy_source.take().is_some(),
919 had_copy_source: node.copy_source.take().is_some(),
927 };
920 };
@@ -956,7 +949,7 b" impl<'on_disk> super::dispatch::Dirstate"
956 }
949 }
957 Ok(dropped.had_entry)
950 Ok(dropped.had_entry)
958 } else {
951 } else {
959 debug_assert!(!old_state.is_tracked());
952 debug_assert!(!was_tracked);
960 Ok(false)
953 Ok(false)
961 }
954 }
962 }
955 }
@@ -1290,6 +1283,7 b" impl<'on_disk> super::dispatch::Dirstate"
1290
1283
1291 fn debug_iter(
1284 fn debug_iter(
1292 &self,
1285 &self,
1286 all: bool,
1293 ) -> Box<
1287 ) -> Box<
1294 dyn Iterator<
1288 dyn Iterator<
1295 Item = Result<
1289 Item = Result<
@@ -1299,16 +1293,17 b" impl<'on_disk> super::dispatch::Dirstate"
1299 > + Send
1293 > + Send
1300 + '_,
1294 + '_,
1301 > {
1295 > {
1302 Box::new(self.iter_nodes().map(move |node| {
1296 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1303 let node = node?;
1304 let debug_tuple = if let Some(entry) = node.entry()? {
1297 let debug_tuple = if let Some(entry) = node.entry()? {
1305 entry.debug_tuple()
1298 entry.debug_tuple()
1299 } else if !all {
1300 return Ok(None);
1306 } else if let Some(mtime) = node.cached_directory_mtime() {
1301 } else if let Some(mtime) = node.cached_directory_mtime() {
1307 (b' ', 0, -1, mtime.seconds() as i32)
1302 (b' ', 0, -1, mtime.seconds() as i32)
1308 } else {
1303 } else {
1309 (b' ', 0, -1, -1)
1304 (b' ', 0, -1, -1)
1310 };
1305 };
1311 Ok((node.full_path(self.on_disk)?, debug_tuple))
1306 Ok(Some((node.full_path(self.on_disk)?, debug_tuple)))
1312 }))
1307 }))
1313 }
1308 }
1314 }
1309 }
@@ -290,13 +290,15 b' pub trait DirstateMapMethods {'
290 /// node stored in this dirstate map, for the purpose of the `hg
290 /// node stored in this dirstate map, for the purpose of the `hg
291 /// debugdirstate` command.
291 /// debugdirstate` command.
292 ///
292 ///
293 /// For nodes that don’t have an entry, `state` is the ASCII space.
293 /// If `all` is true, include nodes that don’t have an entry.
294 /// For such nodes `state` is the ASCII space.
294 /// An `mtime` may still be present. It is used to optimize `status`.
295 /// An `mtime` may still be present. It is used to optimize `status`.
295 ///
296 ///
296 /// Because parse errors can happen during iteration, the iterated items
297 /// Because parse errors can happen during iteration, the iterated items
297 /// are `Result`s.
298 /// are `Result`s.
298 fn debug_iter(
299 fn debug_iter(
299 &self,
300 &self,
301 all: bool,
300 ) -> Box<
302 ) -> Box<
301 dyn Iterator<
303 dyn Iterator<
302 Item = Result<
304 Item = Result<
@@ -538,6 +540,7 b' impl DirstateMapMethods for DirstateMap '
538
540
539 fn debug_iter(
541 fn debug_iter(
540 &self,
542 &self,
543 all: bool,
541 ) -> Box<
544 ) -> Box<
542 dyn Iterator<
545 dyn Iterator<
543 Item = Result<
546 Item = Result<
@@ -547,6 +550,9 b' impl DirstateMapMethods for DirstateMap '
547 > + Send
550 > + Send
548 + '_,
551 + '_,
549 > {
552 > {
553 // Not used for the flat (not tree-based) DirstateMap
554 let _ = all;
555
550 Box::new(
556 Box::new(
551 (&**self)
557 (&**self)
552 .iter()
558 .iter()
@@ -265,7 +265,7 b" impl<'on_disk> Docket<'on_disk> {"
265 }
265 }
266
266
267 pub fn data_filename(&self) -> String {
267 pub fn data_filename(&self) -> String {
268 String::from_utf8(format_bytes!(b"dirstate.{}.d", self.uuid)).unwrap()
268 String::from_utf8(format_bytes!(b"dirstate.{}", self.uuid)).unwrap()
269 }
269 }
270 }
270 }
271
271
@@ -403,12 +403,15 b' impl Node {'
403 }
403 }
404
404
405 fn entry_with_given_state(&self, state: EntryState) -> DirstateEntry {
405 fn entry_with_given_state(&self, state: EntryState) -> DirstateEntry {
406 DirstateEntry {
406 // For now, the on-disk representation of DirstateEntry in dirstate-v2
407 // format is equivalent to that of dirstate-v1. When that changes, add
408 // a new constructor.
409 DirstateEntry::from_v1_data(
407 state,
410 state,
408 mode: self.data.mode.get(),
411 self.data.mode.get(),
409 mtime: self.data.mtime.get(),
412 self.data.size.get(),
410 size: self.data.size.get(),
413 self.data.mtime.get(),
411 }
414 )
412 }
415 }
413
416
414 pub(super) fn entry(
417 pub(super) fn entry(
@@ -640,11 +643,11 b" impl Writer<'_, '_> {"
640 NodeRef::InMemory(path, node) => {
643 NodeRef::InMemory(path, node) => {
641 let (state, data) = match &node.data {
644 let (state, data) = match &node.data {
642 dirstate_map::NodeData::Entry(entry) => (
645 dirstate_map::NodeData::Entry(entry) => (
643 entry.state.into(),
646 entry.state().into(),
644 Entry {
647 Entry {
645 mode: entry.mode.into(),
648 mode: entry.mode().into(),
646 mtime: entry.mtime.into(),
649 mtime: entry.mtime().into(),
647 size: entry.size.into(),
650 size: entry.size().into(),
648 },
651 },
649 ),
652 ),
650 dirstate_map::NodeData::CachedDirectory { mtime } => {
653 dirstate_map::NodeData::CachedDirectory { mtime } => {
@@ -1,11 +1,9 b''
1 use cpython::PyBytes;
1 use super::dirstate_map::DirstateMap;
2 use cpython::Python;
2 use stable_deref_trait::StableDeref;
3 use hg::dirstate_tree::dirstate_map::DirstateMap;
3 use std::ops::Deref;
4 use hg::DirstateError;
5 use hg::DirstateParents;
6
4
7 /// Keep a `DirstateMap<'on_disk>` next to the `on_disk` buffer that it
5 /// Keep a `DirstateMap<'on_disk>` next to the `on_disk` buffer that it
8 /// borrows. This is similar to the owning-ref crate.
6 /// borrows.
9 ///
7 ///
10 /// This is similar to [`OwningRef`] which is more limited because it
8 /// This is similar to [`OwningRef`] which is more limited because it
11 /// represents exactly one `&T` reference next to the value it borrows, as
9 /// represents exactly one `&T` reference next to the value it borrows, as
@@ -13,11 +11,11 b' use hg::DirstateParents;'
13 /// arbitrarily-nested data structures.
11 /// arbitrarily-nested data structures.
14 ///
12 ///
15 /// [`OwningRef`]: https://docs.rs/owning_ref/0.4.1/owning_ref/struct.OwningRef.html
13 /// [`OwningRef`]: https://docs.rs/owning_ref/0.4.1/owning_ref/struct.OwningRef.html
16 pub(super) struct OwningDirstateMap {
14 pub struct OwningDirstateMap {
17 /// Owned handle to a bytes buffer with a stable address.
15 /// Owned handle to a bytes buffer with a stable address.
18 ///
16 ///
19 /// See <https://docs.rs/owning_ref/0.4.1/owning_ref/trait.StableAddress.html>.
17 /// See <https://docs.rs/owning_ref/0.4.1/owning_ref/trait.StableAddress.html>.
20 on_disk: PyBytes,
18 on_disk: Box<dyn Deref<Target = [u8]> + Send>,
21
19
22 /// Pointer for `Box<DirstateMap<'on_disk>>`, typed-erased because the
20 /// Pointer for `Box<DirstateMap<'on_disk>>`, typed-erased because the
23 /// language cannot represent a lifetime referencing a sibling field.
21 /// language cannot represent a lifetime referencing a sibling field.
@@ -28,12 +26,13 b' pub(super) struct OwningDirstateMap {'
28 }
26 }
29
27
30 impl OwningDirstateMap {
28 impl OwningDirstateMap {
31 pub fn new_v1(
29 pub fn new_empty<OnDisk>(on_disk: OnDisk) -> Self
32 py: Python,
30 where
33 on_disk: PyBytes,
31 OnDisk: Deref<Target = [u8]> + StableDeref + Send + 'static,
34 ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
32 {
35 let bytes: &'_ [u8] = on_disk.data(py);
33 let on_disk = Box::new(on_disk);
36 let (map, parents) = DirstateMap::new_v1(bytes)?;
34 let bytes: &'_ [u8] = &on_disk;
35 let map = DirstateMap::empty(bytes);
37
36
38 // Like in `bytes` above, this `'_` lifetime parameter borrows from
37 // Like in `bytes` above, this `'_` lifetime parameter borrows from
39 // the bytes buffer owned by `on_disk`.
38 // the bytes buffer owned by `on_disk`.
@@ -42,30 +41,12 b' impl OwningDirstateMap {'
42 // Erase the pointed type entirely in order to erase the lifetime.
41 // Erase the pointed type entirely in order to erase the lifetime.
43 let ptr: *mut () = ptr.cast();
42 let ptr: *mut () = ptr.cast();
44
43
45 Ok((Self { on_disk, ptr }, parents))
44 Self { on_disk, ptr }
46 }
45 }
47
46
48 pub fn new_v2(
47 pub fn get_mut_pair<'a>(
49 py: Python,
48 &'a mut self,
50 on_disk: PyBytes,
49 ) -> (&'a [u8], &'a mut DirstateMap<'a>) {
51 data_size: usize,
52 tree_metadata: PyBytes,
53 ) -> Result<Self, DirstateError> {
54 let bytes: &'_ [u8] = on_disk.data(py);
55 let map =
56 DirstateMap::new_v2(bytes, data_size, tree_metadata.data(py))?;
57
58 // Like in `bytes` above, this `'_` lifetime parameter borrows from
59 // the bytes buffer owned by `on_disk`.
60 let ptr: *mut DirstateMap<'_> = Box::into_raw(Box::new(map));
61
62 // Erase the pointed type entirely in order to erase the lifetime.
63 let ptr: *mut () = ptr.cast();
64
65 Ok(Self { on_disk, ptr })
66 }
67
68 pub fn get_mut<'a>(&'a mut self) -> &'a mut DirstateMap<'a> {
69 // SAFETY: We cast the type-erased pointer back to the same type it had
50 // SAFETY: We cast the type-erased pointer back to the same type it had
70 // in `new`, except with a different lifetime parameter. This time we
51 // in `new`, except with a different lifetime parameter. This time we
71 // connect the lifetime to that of `self`. This cast is valid because
52 // connect the lifetime to that of `self`. This cast is valid because
@@ -76,7 +57,11 b' impl OwningDirstateMap {'
76 // SAFETY: we dereference that pointer, connecting the lifetime of the
57 // SAFETY: we dereference that pointer, connecting the lifetime of the
77 // new `&mut` to that of `self`. This is valid because the
58 // new `&mut` to that of `self`. This is valid because the
78 // raw pointer is to a boxed value, and `self` owns that box.
59 // raw pointer is to a boxed value, and `self` owns that box.
79 unsafe { &mut *ptr }
60 (&self.on_disk, unsafe { &mut *ptr })
61 }
62
63 pub fn get_mut<'a>(&'a mut self) -> &'a mut DirstateMap<'a> {
64 self.get_mut_pair().1
80 }
65 }
81
66
82 pub fn get<'a>(&'a self) -> &'a DirstateMap<'a> {
67 pub fn get<'a>(&'a self) -> &'a DirstateMap<'a> {
@@ -84,6 +69,10 b' impl OwningDirstateMap {'
84 let ptr: *mut DirstateMap<'a> = self.ptr.cast();
69 let ptr: *mut DirstateMap<'a> = self.ptr.cast();
85 unsafe { &*ptr }
70 unsafe { &*ptr }
86 }
71 }
72
73 pub fn on_disk<'a>(&'a self) -> &'a [u8] {
74 &self.on_disk
75 }
87 }
76 }
88
77
89 impl Drop for OwningDirstateMap {
78 impl Drop for OwningDirstateMap {
@@ -105,13 +94,12 b' impl Drop for OwningDirstateMap {'
105 fn _static_assert_is_send<T: Send>() {}
94 fn _static_assert_is_send<T: Send>() {}
106
95
107 fn _static_assert_fields_are_send() {
96 fn _static_assert_fields_are_send() {
108 _static_assert_is_send::<PyBytes>();
109 _static_assert_is_send::<Box<DirstateMap<'_>>>();
97 _static_assert_is_send::<Box<DirstateMap<'_>>>();
110 }
98 }
111
99
112 // SAFETY: we don’t get this impl implicitly because `*mut (): !Send` because
100 // SAFETY: we don’t get this impl implicitly because `*mut (): !Send` because
113 // thread-safety of raw pointers is unknown in the general case. However this
101 // thread-safety of raw pointers is unknown in the general case. However this
114 // particular raw pointer represents a `Box<DirstateMap<'on_disk>>` that we
102 // particular raw pointer represents a `Box<DirstateMap<'on_disk>>` that we
115 // own. Since that `Box` and `PyBytes` are both `Send` as shown in above, it
103 // own. Since that `Box` is `Send` as shown in above, it is sound to mark
116 // is sound to mark this struct as `Send` too.
104 // this struct as `Send` too.
117 unsafe impl Send for OwningDirstateMap {}
105 unsafe impl Send for OwningDirstateMap {}
@@ -1,18 +1,18 b''
1 use crate::dirstate::owning::OwningDirstateMap;
1 use crate::dirstate::parsers::Timestamp;
2 use hg::dirstate::parsers::Timestamp;
2 use crate::dirstate_tree::dispatch::DirstateMapMethods;
3 use hg::dirstate_tree::dispatch::DirstateMapMethods;
3 use crate::dirstate_tree::on_disk::DirstateV2ParseError;
4 use hg::dirstate_tree::on_disk::DirstateV2ParseError;
4 use crate::dirstate_tree::owning::OwningDirstateMap;
5 use hg::matchers::Matcher;
5 use crate::matchers::Matcher;
6 use hg::utils::hg_path::{HgPath, HgPathBuf};
6 use crate::utils::hg_path::{HgPath, HgPathBuf};
7 use hg::CopyMapIter;
7 use crate::CopyMapIter;
8 use hg::DirstateEntry;
8 use crate::DirstateEntry;
9 use hg::DirstateError;
9 use crate::DirstateError;
10 use hg::DirstateParents;
10 use crate::DirstateParents;
11 use hg::DirstateStatus;
11 use crate::DirstateStatus;
12 use hg::PatternFileWarning;
12 use crate::PatternFileWarning;
13 use hg::StateMapIter;
13 use crate::StateMapIter;
14 use hg::StatusError;
14 use crate::StatusError;
15 use hg::StatusOptions;
15 use crate::StatusOptions;
16 use std::path::PathBuf;
16 use std::path::PathBuf;
17
17
18 impl DirstateMapMethods for OwningDirstateMap {
18 impl DirstateMapMethods for OwningDirstateMap {
@@ -226,6 +226,7 b' impl DirstateMapMethods for OwningDirsta'
226
226
227 fn debug_iter(
227 fn debug_iter(
228 &self,
228 &self,
229 all: bool,
229 ) -> Box<
230 ) -> Box<
230 dyn Iterator<
231 dyn Iterator<
231 Item = Result<
232 Item = Result<
@@ -235,6 +236,6 b' impl DirstateMapMethods for OwningDirsta'
235 > + Send
236 > + Send
236 + '_,
237 + '_,
237 > {
238 > {
238 self.get().debug_iter()
239 self.get().debug_iter(all)
239 }
240 }
240 }
241 }
@@ -394,9 +394,6 b" impl<'a, 'tree, 'on_disk> StatusCommon<'"
394 .push(hg_path.detach_from_tree()),
394 .push(hg_path.detach_from_tree()),
395 EntryState::Normal => self
395 EntryState::Normal => self
396 .handle_normal_file(&dirstate_node, fs_metadata)?,
396 .handle_normal_file(&dirstate_node, fs_metadata)?,
397 // This variant is not used in DirstateMap
398 // nodes
399 EntryState::Unknown => unreachable!(),
400 }
397 }
401 } else {
398 } else {
402 // `node.entry.is_none()` indicates a "directory"
399 // `node.entry.is_none()` indicates a "directory"
@@ -506,11 +503,9 b" impl<'a, 'tree, 'on_disk> StatusCommon<'"
506 let hg_path = &dirstate_node.full_path_borrowed(self.dmap.on_disk)?;
503 let hg_path = &dirstate_node.full_path_borrowed(self.dmap.on_disk)?;
507 let mode_changed =
504 let mode_changed =
508 || self.options.check_exec && entry.mode_changed(fs_metadata);
505 || self.options.check_exec && entry.mode_changed(fs_metadata);
509 let size_changed = entry.size != truncate_u64(fs_metadata.len());
506 let size = entry.size();
510 if entry.size >= 0
507 let size_changed = size != truncate_u64(fs_metadata.len());
511 && size_changed
508 if size >= 0 && size_changed && fs_metadata.file_type().is_symlink() {
512 && fs_metadata.file_type().is_symlink()
513 {
514 // issue6456: Size returned may be longer due to encryption
509 // issue6456: Size returned may be longer due to encryption
515 // on EXT-4 fscrypt. TODO maybe only do it on EXT4?
510 // on EXT-4 fscrypt. TODO maybe only do it on EXT4?
516 self.outcome
511 self.outcome
@@ -520,7 +515,7 b" impl<'a, 'tree, 'on_disk> StatusCommon<'"
520 .push(hg_path.detach_from_tree())
515 .push(hg_path.detach_from_tree())
521 } else if dirstate_node.has_copy_source()
516 } else if dirstate_node.has_copy_source()
522 || entry.is_from_other_parent()
517 || entry.is_from_other_parent()
523 || (entry.size >= 0 && (size_changed || mode_changed()))
518 || (size >= 0 && (size_changed || mode_changed()))
524 {
519 {
525 self.outcome
520 self.outcome
526 .lock()
521 .lock()
@@ -529,7 +524,7 b" impl<'a, 'tree, 'on_disk> StatusCommon<'"
529 .push(hg_path.detach_from_tree())
524 .push(hg_path.detach_from_tree())
530 } else {
525 } else {
531 let mtime = mtime_seconds(fs_metadata);
526 let mtime = mtime_seconds(fs_metadata);
532 if truncate_i64(mtime) != entry.mtime
527 if truncate_i64(mtime) != entry.mtime()
533 || mtime == self.options.last_normal_time
528 || mtime == self.options.last_normal_time
534 {
529 {
535 self.outcome
530 self.outcome
@@ -36,6 +36,7 b' pub mod logging;'
36 pub mod operations;
36 pub mod operations;
37 pub mod revset;
37 pub mod revset;
38 pub mod utils;
38 pub mod utils;
39 pub mod vfs;
39
40
40 use crate::utils::hg_path::{HgPathBuf, HgPathError};
41 use crate::utils::hg_path::{HgPathBuf, HgPathError};
41 pub use filepatterns::{
42 pub use filepatterns::{
@@ -1,5 +1,5 b''
1 use crate::errors::{HgError, HgResultExt, IoErrorContext, IoResultExt};
1 use crate::errors::{HgError, HgResultExt, IoErrorContext, IoResultExt};
2 use crate::repo::Vfs;
2 use crate::vfs::Vfs;
3 use std::io::Write;
3 use std::io::Write;
4
4
5 /// An utility to append to a log file with the given name, and optionally
5 /// An utility to append to a log file with the given name, and optionally
@@ -5,17 +5,11 b''
5 // This software may be used and distributed according to the terms of the
5 // This software may be used and distributed according to the terms of the
6 // GNU General Public License version 2 or any later version.
6 // GNU General Public License version 2 or any later version.
7
7
8 use std::path::PathBuf;
9
10 use crate::repo::Repo;
8 use crate::repo::Repo;
11 use crate::revlog::changelog::Changelog;
12 use crate::revlog::manifest::Manifest;
13 use crate::revlog::path_encode::path_encode;
14 use crate::revlog::revlog::Revlog;
15 use crate::revlog::revlog::RevlogError;
9 use crate::revlog::revlog::RevlogError;
16 use crate::revlog::Node;
10 use crate::revlog::Node;
17 use crate::utils::files::get_path_from_bytes;
11
18 use crate::utils::hg_path::{HgPath, HgPathBuf};
12 use crate::utils::hg_path::HgPathBuf;
19
13
20 pub struct CatOutput {
14 pub struct CatOutput {
21 /// Whether any file in the manifest matched the paths given as CLI
15 /// Whether any file in the manifest matched the paths given as CLI
@@ -29,8 +23,6 b' pub struct CatOutput {'
29 pub node: Node,
23 pub node: Node,
30 }
24 }
31
25
32 const METADATA_DELIMITER: [u8; 2] = [b'\x01', b'\n'];
33
34 /// Output the given revision of files
26 /// Output the given revision of files
35 ///
27 ///
36 /// * `root`: Repository root
28 /// * `root`: Repository root
@@ -42,44 +34,24 b" pub fn cat<'a>("
42 files: &'a [HgPathBuf],
34 files: &'a [HgPathBuf],
43 ) -> Result<CatOutput, RevlogError> {
35 ) -> Result<CatOutput, RevlogError> {
44 let rev = crate::revset::resolve_single(revset, repo)?;
36 let rev = crate::revset::resolve_single(revset, repo)?;
45 let changelog = Changelog::open(repo)?;
37 let manifest = repo.manifest_for_rev(rev)?;
46 let manifest = Manifest::open(repo)?;
38 let node = *repo
47 let changelog_entry = changelog.get_rev(rev)?;
39 .changelog()?
48 let node = *changelog
49 .node_from_rev(rev)
40 .node_from_rev(rev)
50 .expect("should succeed when changelog.get_rev did");
41 .expect("should succeed when repo.manifest did");
51 let manifest_node =
52 Node::from_hex_for_repo(&changelog_entry.manifest_node()?)?;
53 let manifest_entry = manifest.get_node(manifest_node.into())?;
54 let mut bytes = vec![];
42 let mut bytes = vec![];
55 let mut matched = vec![false; files.len()];
43 let mut matched = vec![false; files.len()];
56 let mut found_any = false;
44 let mut found_any = false;
57
45
58 for (manifest_file, node_bytes) in manifest_entry.files_with_nodes() {
46 for (manifest_file, node_bytes) in manifest.files_with_nodes() {
59 for (cat_file, is_matched) in files.iter().zip(&mut matched) {
47 for (cat_file, is_matched) in files.iter().zip(&mut matched) {
60 if cat_file.as_bytes() == manifest_file.as_bytes() {
48 if cat_file.as_bytes() == manifest_file.as_bytes() {
61 *is_matched = true;
49 *is_matched = true;
62 found_any = true;
50 found_any = true;
63 let index_path = store_path(manifest_file, b".i");
51 let file_log = repo.filelog(manifest_file)?;
64 let data_path = store_path(manifest_file, b".d");
65
66 let file_log =
67 Revlog::open(repo, &index_path, Some(&data_path))?;
68 let file_node = Node::from_hex_for_repo(node_bytes)?;
52 let file_node = Node::from_hex_for_repo(node_bytes)?;
69 let file_rev = file_log.get_node_rev(file_node.into())?;
53 let entry = file_log.data_for_node(file_node)?;
70 let data = file_log.get_rev_data(file_rev)?;
54 bytes.extend(entry.data()?)
71 if data.starts_with(&METADATA_DELIMITER) {
72 let end_delimiter_position = data
73 [METADATA_DELIMITER.len()..]
74 .windows(METADATA_DELIMITER.len())
75 .position(|bytes| bytes == METADATA_DELIMITER);
76 if let Some(position) = end_delimiter_position {
77 let offset = METADATA_DELIMITER.len() * 2;
78 bytes.extend(data[position + offset..].iter());
79 }
80 } else {
81 bytes.extend(data);
82 }
83 }
55 }
84 }
56 }
85 }
57 }
@@ -97,9 +69,3 b" pub fn cat<'a>("
97 node,
69 node,
98 })
70 })
99 }
71 }
100
101 fn store_path(hg_path: &HgPath, suffix: &[u8]) -> PathBuf {
102 let encoded_bytes =
103 path_encode(&[b"data/", hg_path.as_bytes(), suffix].concat());
104 get_path_from_bytes(&encoded_bytes).into()
105 }
@@ -9,9 +9,7 b' use crate::dirstate::parsers::parse_dirs'
9 use crate::dirstate_tree::on_disk::{for_each_tracked_path, read_docket};
9 use crate::dirstate_tree::on_disk::{for_each_tracked_path, read_docket};
10 use crate::errors::HgError;
10 use crate::errors::HgError;
11 use crate::repo::Repo;
11 use crate::repo::Repo;
12 use crate::revlog::changelog::Changelog;
12 use crate::revlog::manifest::Manifest;
13 use crate::revlog::manifest::{Manifest, ManifestEntry};
14 use crate::revlog::node::Node;
15 use crate::revlog::revlog::RevlogError;
13 use crate::revlog::revlog::RevlogError;
16 use crate::utils::hg_path::HgPath;
14 use crate::utils::hg_path::HgPath;
17 use crate::DirstateError;
15 use crate::DirstateError;
@@ -53,7 +51,7 b' impl Dirstate {'
53 let _parents = parse_dirstate_entries(
51 let _parents = parse_dirstate_entries(
54 &self.content,
52 &self.content,
55 |path, entry, _copy_source| {
53 |path, entry, _copy_source| {
56 if entry.state.is_tracked() {
54 if entry.state().is_tracked() {
57 files.push(path)
55 files.push(path)
58 }
56 }
59 Ok(())
57 Ok(())
@@ -72,16 +70,10 b' pub fn list_rev_tracked_files('
72 revset: &str,
70 revset: &str,
73 ) -> Result<FilesForRev, RevlogError> {
71 ) -> Result<FilesForRev, RevlogError> {
74 let rev = crate::revset::resolve_single(revset, repo)?;
72 let rev = crate::revset::resolve_single(revset, repo)?;
75 let changelog = Changelog::open(repo)?;
73 Ok(FilesForRev(repo.manifest_for_rev(rev)?))
76 let manifest = Manifest::open(repo)?;
77 let changelog_entry = changelog.get_rev(rev)?;
78 let manifest_node =
79 Node::from_hex_for_repo(&changelog_entry.manifest_node()?)?;
80 let manifest_entry = manifest.get_node(manifest_node.into())?;
81 Ok(FilesForRev(manifest_entry))
82 }
74 }
83
75
84 pub struct FilesForRev(ManifestEntry);
76 pub struct FilesForRev(Manifest);
85
77
86 impl FilesForRev {
78 impl FilesForRev {
87 pub fn iter(&self) -> impl Iterator<Item = &HgPath> {
79 pub fn iter(&self) -> impl Iterator<Item = &HgPath> {
@@ -1,12 +1,22 b''
1 use crate::changelog::Changelog;
1 use crate::config::{Config, ConfigError, ConfigParseError};
2 use crate::config::{Config, ConfigError, ConfigParseError};
2 use crate::errors::{HgError, IoErrorContext, IoResultExt};
3 use crate::dirstate::DirstateParents;
4 use crate::dirstate_tree::dirstate_map::DirstateMap;
5 use crate::dirstate_tree::owning::OwningDirstateMap;
6 use crate::errors::HgError;
7 use crate::errors::HgResultExt;
3 use crate::exit_codes;
8 use crate::exit_codes;
4 use crate::requirements;
9 use crate::manifest::{Manifest, Manifestlog};
10 use crate::revlog::filelog::Filelog;
11 use crate::revlog::revlog::RevlogError;
5 use crate::utils::files::get_path_from_bytes;
12 use crate::utils::files::get_path_from_bytes;
13 use crate::utils::hg_path::HgPath;
6 use crate::utils::SliceExt;
14 use crate::utils::SliceExt;
7 use memmap::{Mmap, MmapOptions};
15 use crate::vfs::{is_dir, is_file, Vfs};
16 use crate::{requirements, NodePrefix};
17 use crate::{DirstateError, Revision};
18 use std::cell::{Cell, Ref, RefCell, RefMut};
8 use std::collections::HashSet;
19 use std::collections::HashSet;
9 use std::io::ErrorKind;
10 use std::path::{Path, PathBuf};
20 use std::path::{Path, PathBuf};
11
21
12 /// A repository on disk
22 /// A repository on disk
@@ -16,6 +26,11 b' pub struct Repo {'
16 store: PathBuf,
26 store: PathBuf,
17 requirements: HashSet<String>,
27 requirements: HashSet<String>,
18 config: Config,
28 config: Config,
29 // None means not known/initialized yet
30 dirstate_parents: Cell<Option<DirstateParents>>,
31 dirstate_map: LazyCell<OwningDirstateMap, DirstateError>,
32 changelog: LazyCell<Changelog, HgError>,
33 manifestlog: LazyCell<Manifestlog, HgError>,
19 }
34 }
20
35
21 #[derive(Debug, derive_more::From)]
36 #[derive(Debug, derive_more::From)]
@@ -38,12 +53,6 b' impl From<ConfigError> for RepoError {'
38 }
53 }
39 }
54 }
40
55
41 /// Filesystem access abstraction for the contents of a given "base" diretory
42 #[derive(Clone, Copy)]
43 pub struct Vfs<'a> {
44 pub(crate) base: &'a Path,
45 }
46
47 impl Repo {
56 impl Repo {
48 /// tries to find nearest repository root in current working directory or
57 /// tries to find nearest repository root in current working directory or
49 /// its ancestors
58 /// its ancestors
@@ -127,7 +136,8 b' impl Repo {'
127 } else {
136 } else {
128 let bytes = hg_vfs.read("sharedpath")?;
137 let bytes = hg_vfs.read("sharedpath")?;
129 let mut shared_path =
138 let mut shared_path =
130 get_path_from_bytes(bytes.trim_end_newlines()).to_owned();
139 get_path_from_bytes(bytes.trim_end_matches(|b| b == b'\n'))
140 .to_owned();
131 if relative {
141 if relative {
132 shared_path = dot_hg.join(shared_path)
142 shared_path = dot_hg.join(shared_path)
133 }
143 }
@@ -192,6 +202,10 b' impl Repo {'
192 store: store_path,
202 store: store_path,
193 dot_hg,
203 dot_hg,
194 config: repo_config,
204 config: repo_config,
205 dirstate_parents: Cell::new(None),
206 dirstate_map: LazyCell::new(Self::new_dirstate_map),
207 changelog: LazyCell::new(Changelog::open),
208 manifestlog: LazyCell::new(Manifestlog::open),
195 };
209 };
196
210
197 requirements::check(&repo)?;
211 requirements::check(&repo)?;
@@ -234,82 +248,162 b' impl Repo {'
234 .contains(requirements::DIRSTATE_V2_REQUIREMENT)
248 .contains(requirements::DIRSTATE_V2_REQUIREMENT)
235 }
249 }
236
250
237 pub fn dirstate_parents(
251 fn dirstate_file_contents(&self) -> Result<Vec<u8>, HgError> {
238 &self,
252 Ok(self
239 ) -> Result<crate::dirstate::DirstateParents, HgError> {
253 .hg_vfs()
240 let dirstate = self.hg_vfs().mmap_open("dirstate")?;
254 .read("dirstate")
241 if dirstate.is_empty() {
255 .io_not_found_as_none()?
242 return Ok(crate::dirstate::DirstateParents::NULL);
256 .unwrap_or(Vec::new()))
257 }
258
259 pub fn dirstate_parents(&self) -> Result<DirstateParents, HgError> {
260 if let Some(parents) = self.dirstate_parents.get() {
261 return Ok(parents);
243 }
262 }
244 let parents = if self.has_dirstate_v2() {
263 let dirstate = self.dirstate_file_contents()?;
264 let parents = if dirstate.is_empty() {
265 DirstateParents::NULL
266 } else if self.has_dirstate_v2() {
245 crate::dirstate_tree::on_disk::read_docket(&dirstate)?.parents()
267 crate::dirstate_tree::on_disk::read_docket(&dirstate)?.parents()
246 } else {
268 } else {
247 crate::dirstate::parsers::parse_dirstate_parents(&dirstate)?
269 crate::dirstate::parsers::parse_dirstate_parents(&dirstate)?
248 .clone()
270 .clone()
249 };
271 };
272 self.dirstate_parents.set(Some(parents));
250 Ok(parents)
273 Ok(parents)
251 }
274 }
275
276 fn new_dirstate_map(&self) -> Result<OwningDirstateMap, DirstateError> {
277 let dirstate_file_contents = self.dirstate_file_contents()?;
278 if dirstate_file_contents.is_empty() {
279 self.dirstate_parents.set(Some(DirstateParents::NULL));
280 Ok(OwningDirstateMap::new_empty(Vec::new()))
281 } else if self.has_dirstate_v2() {
282 let docket = crate::dirstate_tree::on_disk::read_docket(
283 &dirstate_file_contents,
284 )?;
285 self.dirstate_parents.set(Some(docket.parents()));
286 let data_size = docket.data_size();
287 let metadata = docket.tree_metadata();
288 let mut map = if let Some(data_mmap) = self
289 .hg_vfs()
290 .mmap_open(docket.data_filename())
291 .io_not_found_as_none()?
292 {
293 OwningDirstateMap::new_empty(data_mmap)
294 } else {
295 OwningDirstateMap::new_empty(Vec::new())
296 };
297 let (on_disk, placeholder) = map.get_mut_pair();
298 *placeholder = DirstateMap::new_v2(on_disk, data_size, metadata)?;
299 Ok(map)
300 } else {
301 let mut map = OwningDirstateMap::new_empty(dirstate_file_contents);
302 let (on_disk, placeholder) = map.get_mut_pair();
303 let (inner, parents) = DirstateMap::new_v1(on_disk)?;
304 self.dirstate_parents
305 .set(Some(parents.unwrap_or(DirstateParents::NULL)));
306 *placeholder = inner;
307 Ok(map)
308 }
309 }
310
311 pub fn dirstate_map(
312 &self,
313 ) -> Result<Ref<OwningDirstateMap>, DirstateError> {
314 self.dirstate_map.get_or_init(self)
315 }
316
317 pub fn dirstate_map_mut(
318 &self,
319 ) -> Result<RefMut<OwningDirstateMap>, DirstateError> {
320 self.dirstate_map.get_mut_or_init(self)
321 }
322
323 pub fn changelog(&self) -> Result<Ref<Changelog>, HgError> {
324 self.changelog.get_or_init(self)
325 }
326
327 pub fn changelog_mut(&self) -> Result<RefMut<Changelog>, HgError> {
328 self.changelog.get_mut_or_init(self)
329 }
330
331 pub fn manifestlog(&self) -> Result<Ref<Manifestlog>, HgError> {
332 self.manifestlog.get_or_init(self)
333 }
334
335 pub fn manifestlog_mut(&self) -> Result<RefMut<Manifestlog>, HgError> {
336 self.manifestlog.get_mut_or_init(self)
337 }
338
339 /// Returns the manifest of the *changeset* with the given node ID
340 pub fn manifest_for_node(
341 &self,
342 node: impl Into<NodePrefix>,
343 ) -> Result<Manifest, RevlogError> {
344 self.manifestlog()?.data_for_node(
345 self.changelog()?
346 .data_for_node(node.into())?
347 .manifest_node()?
348 .into(),
349 )
350 }
351
352 /// Returns the manifest of the *changeset* with the given revision number
353 pub fn manifest_for_rev(
354 &self,
355 revision: Revision,
356 ) -> Result<Manifest, RevlogError> {
357 self.manifestlog()?.data_for_node(
358 self.changelog()?
359 .data_for_rev(revision)?
360 .manifest_node()?
361 .into(),
362 )
363 }
364
365 pub fn filelog(&self, path: &HgPath) -> Result<Filelog, HgError> {
366 Filelog::open(self, path)
367 }
252 }
368 }
253
369
254 impl Vfs<'_> {
370 /// Lazily-initialized component of `Repo` with interior mutability
255 pub fn join(&self, relative_path: impl AsRef<Path>) -> PathBuf {
371 ///
256 self.base.join(relative_path)
372 /// This differs from `OnceCell` in that the value can still be "deinitialized"
257 }
373 /// later by setting its inner `Option` to `None`.
374 struct LazyCell<T, E> {
375 value: RefCell<Option<T>>,
376 // `Fn`s that don’t capture environment are zero-size, so this box does
377 // not allocate:
378 init: Box<dyn Fn(&Repo) -> Result<T, E>>,
379 }
258
380
259 pub fn read(
381 impl<T, E> LazyCell<T, E> {
260 &self,
382 fn new(init: impl Fn(&Repo) -> Result<T, E> + 'static) -> Self {
261 relative_path: impl AsRef<Path>,
383 Self {
262 ) -> Result<Vec<u8>, HgError> {
384 value: RefCell::new(None),
263 let path = self.join(relative_path);
385 init: Box::new(init),
264 std::fs::read(&path).when_reading_file(&path)
386 }
265 }
266
267 pub fn mmap_open(
268 &self,
269 relative_path: impl AsRef<Path>,
270 ) -> Result<Mmap, HgError> {
271 let path = self.base.join(relative_path);
272 let file = std::fs::File::open(&path).when_reading_file(&path)?;
273 // TODO: what are the safety requirements here?
274 let mmap = unsafe { MmapOptions::new().map(&file) }
275 .when_reading_file(&path)?;
276 Ok(mmap)
277 }
387 }
278
388
279 pub fn rename(
389 fn get_or_init(&self, repo: &Repo) -> Result<Ref<T>, E> {
280 &self,
390 let mut borrowed = self.value.borrow();
281 relative_from: impl AsRef<Path>,
391 if borrowed.is_none() {
282 relative_to: impl AsRef<Path>,
392 drop(borrowed);
283 ) -> Result<(), HgError> {
393 // Only use `borrow_mut` if it is really needed to avoid panic in
284 let from = self.join(relative_from);
394 // case there is another outstanding borrow but mutation is not
285 let to = self.join(relative_to);
395 // needed.
286 std::fs::rename(&from, &to)
396 *self.value.borrow_mut() = Some((self.init)(repo)?);
287 .with_context(|| IoErrorContext::RenamingFile { from, to })
397 borrowed = self.value.borrow()
398 }
399 Ok(Ref::map(borrowed, |option| option.as_ref().unwrap()))
400 }
401
402 pub fn get_mut_or_init(&self, repo: &Repo) -> Result<RefMut<T>, E> {
403 let mut borrowed = self.value.borrow_mut();
404 if borrowed.is_none() {
405 *borrowed = Some((self.init)(repo)?);
406 }
407 Ok(RefMut::map(borrowed, |option| option.as_mut().unwrap()))
288 }
408 }
289 }
409 }
290
291 fn fs_metadata(
292 path: impl AsRef<Path>,
293 ) -> Result<Option<std::fs::Metadata>, HgError> {
294 let path = path.as_ref();
295 match std::fs::metadata(path) {
296 Ok(meta) => Ok(Some(meta)),
297 Err(error) => match error.kind() {
298 // TODO: when we require a Rust version where `NotADirectory` is
299 // stable, invert this logic and return None for it and `NotFound`
300 // and propagate any other error.
301 ErrorKind::PermissionDenied => Err(error).with_context(|| {
302 IoErrorContext::ReadingMetadata(path.to_owned())
303 }),
304 _ => Ok(None),
305 },
306 }
307 }
308
309 fn is_dir(path: impl AsRef<Path>) -> Result<bool, HgError> {
310 Ok(fs_metadata(path)?.map_or(false, |meta| meta.is_dir()))
311 }
312
313 fn is_file(path: impl AsRef<Path>) -> Result<bool, HgError> {
314 Ok(fs_metadata(path)?.map_or(false, |meta| meta.is_file()))
315 }
@@ -1,6 +1,7 b''
1 use crate::errors::{HgError, HgResultExt};
1 use crate::errors::{HgError, HgResultExt};
2 use crate::repo::{Repo, Vfs};
2 use crate::repo::Repo;
3 use crate::utils::join_display;
3 use crate::utils::join_display;
4 use crate::vfs::Vfs;
4 use std::collections::HashSet;
5 use std::collections::HashSet;
5
6
6 fn parse(bytes: &[u8]) -> Result<HashSet<String>, HgError> {
7 fn parse(bytes: &[u8]) -> Result<HashSet<String>, HgError> {
@@ -11,6 +11,7 b' mod nodemap_docket;'
11 pub mod path_encode;
11 pub mod path_encode;
12 pub use node::{FromHexError, Node, NodePrefix};
12 pub use node::{FromHexError, Node, NodePrefix};
13 pub mod changelog;
13 pub mod changelog;
14 pub mod filelog;
14 pub mod index;
15 pub mod index;
15 pub mod manifest;
16 pub mod manifest;
16 pub mod patch;
17 pub mod patch;
@@ -12,22 +12,22 b' pub struct Changelog {'
12
12
13 impl Changelog {
13 impl Changelog {
14 /// Open the `changelog` of a repository given by its root.
14 /// Open the `changelog` of a repository given by its root.
15 pub fn open(repo: &Repo) -> Result<Self, RevlogError> {
15 pub fn open(repo: &Repo) -> Result<Self, HgError> {
16 let revlog = Revlog::open(repo, "00changelog.i", None)?;
16 let revlog = Revlog::open(repo, "00changelog.i", None)?;
17 Ok(Self { revlog })
17 Ok(Self { revlog })
18 }
18 }
19
19
20 /// Return the `ChangelogEntry` a given node id.
20 /// Return the `ChangelogEntry` for the given node ID.
21 pub fn get_node(
21 pub fn data_for_node(
22 &self,
22 &self,
23 node: NodePrefix,
23 node: NodePrefix,
24 ) -> Result<ChangelogEntry, RevlogError> {
24 ) -> Result<ChangelogEntry, RevlogError> {
25 let rev = self.revlog.get_node_rev(node)?;
25 let rev = self.revlog.rev_from_node(node)?;
26 self.get_rev(rev)
26 self.data_for_rev(rev)
27 }
27 }
28
28
29 /// Return the `ChangelogEntry` of a given node revision.
29 /// Return the `ChangelogEntry` of the given revision number.
30 pub fn get_rev(
30 pub fn data_for_rev(
31 &self,
31 &self,
32 rev: Revision,
32 rev: Revision,
33 ) -> Result<ChangelogEntry, RevlogError> {
33 ) -> Result<ChangelogEntry, RevlogError> {
@@ -36,7 +36,7 b' impl Changelog {'
36 }
36 }
37
37
38 pub fn node_from_rev(&self, rev: Revision) -> Option<&Node> {
38 pub fn node_from_rev(&self, rev: Revision) -> Option<&Node> {
39 Some(self.revlog.index.get_entry(rev)?.hash())
39 self.revlog.node_from_rev(rev)
40 }
40 }
41 }
41 }
42
42
@@ -57,9 +57,11 b' impl ChangelogEntry {'
57
57
58 /// Return the node id of the `manifest` referenced by this `changelog`
58 /// Return the node id of the `manifest` referenced by this `changelog`
59 /// entry.
59 /// entry.
60 pub fn manifest_node(&self) -> Result<&[u8], RevlogError> {
60 pub fn manifest_node(&self) -> Result<Node, HgError> {
61 self.lines()
61 Node::from_hex_for_repo(
62 .next()
62 self.lines()
63 .ok_or_else(|| HgError::corrupted("empty changelog entry").into())
63 .next()
64 .ok_or_else(|| HgError::corrupted("empty changelog entry"))?,
65 )
64 }
66 }
65 }
67 }
@@ -5,7 +5,6 b' use byteorder::{BigEndian, ByteOrder};'
5
5
6 use crate::errors::HgError;
6 use crate::errors::HgError;
7 use crate::revlog::node::Node;
7 use crate::revlog::node::Node;
8 use crate::revlog::revlog::RevlogError;
9 use crate::revlog::{Revision, NULL_REVISION};
8 use crate::revlog::{Revision, NULL_REVISION};
10
9
11 pub const INDEX_ENTRY_SIZE: usize = 64;
10 pub const INDEX_ENTRY_SIZE: usize = 64;
@@ -23,7 +22,7 b' impl Index {'
23 /// Calculate the start of each entry when is_inline is true.
22 /// Calculate the start of each entry when is_inline is true.
24 pub fn new(
23 pub fn new(
25 bytes: Box<dyn Deref<Target = [u8]> + Send>,
24 bytes: Box<dyn Deref<Target = [u8]> + Send>,
26 ) -> Result<Self, RevlogError> {
25 ) -> Result<Self, HgError> {
27 if is_inline(&bytes) {
26 if is_inline(&bytes) {
28 let mut offset: usize = 0;
27 let mut offset: usize = 0;
29 let mut offsets = Vec::new();
28 let mut offsets = Vec::new();
@@ -1,48 +1,60 b''
1 use crate::errors::HgError;
1 use crate::repo::Repo;
2 use crate::repo::Repo;
2 use crate::revlog::revlog::{Revlog, RevlogError};
3 use crate::revlog::revlog::{Revlog, RevlogError};
3 use crate::revlog::NodePrefix;
4 use crate::revlog::Revision;
4 use crate::revlog::Revision;
5 use crate::revlog::{Node, NodePrefix};
5 use crate::utils::hg_path::HgPath;
6 use crate::utils::hg_path::HgPath;
6
7
7 /// A specialized `Revlog` to work with `manifest` data format.
8 /// A specialized `Revlog` to work with `manifest` data format.
8 pub struct Manifest {
9 pub struct Manifestlog {
9 /// The generic `revlog` format.
10 /// The generic `revlog` format.
10 revlog: Revlog,
11 revlog: Revlog,
11 }
12 }
12
13
13 impl Manifest {
14 impl Manifestlog {
14 /// Open the `manifest` of a repository given by its root.
15 /// Open the `manifest` of a repository given by its root.
15 pub fn open(repo: &Repo) -> Result<Self, RevlogError> {
16 pub fn open(repo: &Repo) -> Result<Self, HgError> {
16 let revlog = Revlog::open(repo, "00manifest.i", None)?;
17 let revlog = Revlog::open(repo, "00manifest.i", None)?;
17 Ok(Self { revlog })
18 Ok(Self { revlog })
18 }
19 }
19
20
20 /// Return the `ManifestEntry` of a given node id.
21 /// Return the `Manifest` for the given node ID.
21 pub fn get_node(
22 ///
23 /// Note: this is a node ID in the manifestlog, typically found through
24 /// `ChangelogEntry::manifest_node`. It is *not* the node ID of any
25 /// changeset.
26 ///
27 /// See also `Repo::manifest_for_node`
28 pub fn data_for_node(
22 &self,
29 &self,
23 node: NodePrefix,
30 node: NodePrefix,
24 ) -> Result<ManifestEntry, RevlogError> {
31 ) -> Result<Manifest, RevlogError> {
25 let rev = self.revlog.get_node_rev(node)?;
32 let rev = self.revlog.rev_from_node(node)?;
26 self.get_rev(rev)
33 self.data_for_rev(rev)
27 }
34 }
28
35
29 /// Return the `ManifestEntry` of a given node revision.
36 /// Return the `Manifest` of a given revision number.
30 pub fn get_rev(
37 ///
38 /// Note: this is a revision number in the manifestlog, *not* of any
39 /// changeset.
40 ///
41 /// See also `Repo::manifest_for_rev`
42 pub fn data_for_rev(
31 &self,
43 &self,
32 rev: Revision,
44 rev: Revision,
33 ) -> Result<ManifestEntry, RevlogError> {
45 ) -> Result<Manifest, RevlogError> {
34 let bytes = self.revlog.get_rev_data(rev)?;
46 let bytes = self.revlog.get_rev_data(rev)?;
35 Ok(ManifestEntry { bytes })
47 Ok(Manifest { bytes })
36 }
48 }
37 }
49 }
38
50
39 /// `Manifest` entry which knows how to interpret the `manifest` data bytes.
51 /// `Manifestlog` entry which knows how to interpret the `manifest` data bytes.
40 #[derive(Debug)]
52 #[derive(Debug)]
41 pub struct ManifestEntry {
53 pub struct Manifest {
42 bytes: Vec<u8>,
54 bytes: Vec<u8>,
43 }
55 }
44
56
45 impl ManifestEntry {
57 impl Manifest {
46 /// Return an iterator over the lines of the entry.
58 /// Return an iterator over the lines of the entry.
47 pub fn lines(&self) -> impl Iterator<Item = &[u8]> {
59 pub fn lines(&self) -> impl Iterator<Item = &[u8]> {
48 self.bytes
60 self.bytes
@@ -73,4 +85,17 b' impl ManifestEntry {'
73 (HgPath::new(&line[..pos]), &line[hash_start..hash_end])
85 (HgPath::new(&line[..pos]), &line[hash_start..hash_end])
74 })
86 })
75 }
87 }
88
89 /// If the given path is in this manifest, return its filelog node ID
90 pub fn find_file(&self, path: &HgPath) -> Result<Option<Node>, HgError> {
91 // TODO: use binary search instead of linear scan. This may involve
92 // building (and caching) an index of the byte indicex of each manifest
93 // line.
94 for (manifest_path, node) in self.files_with_nodes() {
95 if manifest_path == path {
96 return Ok(Some(Node::from_hex_for_repo(node)?));
97 }
98 }
99 Ok(None)
100 }
76 }
101 }
@@ -1,10 +1,9 b''
1 use crate::errors::{HgError, HgResultExt};
1 use crate::errors::{HgError, HgResultExt};
2 use crate::requirements;
2 use crate::requirements;
3 use bytes_cast::{unaligned, BytesCast};
3 use bytes_cast::{unaligned, BytesCast};
4 use memmap::Mmap;
4 use memmap2::Mmap;
5 use std::path::{Path, PathBuf};
5 use std::path::{Path, PathBuf};
6
6
7 use super::revlog::RevlogError;
8 use crate::repo::Repo;
7 use crate::repo::Repo;
9 use crate::utils::strip_suffix;
8 use crate::utils::strip_suffix;
10
9
@@ -38,7 +37,7 b' impl NodeMapDocket {'
38 pub fn read_from_file(
37 pub fn read_from_file(
39 repo: &Repo,
38 repo: &Repo,
40 index_path: &Path,
39 index_path: &Path,
41 ) -> Result<Option<(Self, Mmap)>, RevlogError> {
40 ) -> Result<Option<(Self, Mmap)>, HgError> {
42 if !repo
41 if !repo
43 .requirements()
42 .requirements()
44 .contains(requirements::NODEMAP_REQUIREMENT)
43 .contains(requirements::NODEMAP_REQUIREMENT)
@@ -65,10 +64,9 b' impl NodeMapDocket {'
65 };
64 };
66
65
67 /// Treat any error as a parse error
66 /// Treat any error as a parse error
68 fn parse<T, E>(result: Result<T, E>) -> Result<T, RevlogError> {
67 fn parse<T, E>(result: Result<T, E>) -> Result<T, HgError> {
69 result.map_err(|_| {
68 result
70 HgError::corrupted("nodemap docket parse error").into()
69 .map_err(|_| HgError::corrupted("nodemap docket parse error"))
71 })
72 }
70 }
73
71
74 let (header, rest) = parse(DocketHeader::from_bytes(input))?;
72 let (header, rest) = parse(DocketHeader::from_bytes(input))?;
@@ -94,7 +92,7 b' impl NodeMapDocket {'
94 if mmap.len() >= data_length {
92 if mmap.len() >= data_length {
95 Ok(Some((docket, mmap)))
93 Ok(Some((docket, mmap)))
96 } else {
94 } else {
97 Err(HgError::corrupted("persistent nodemap too short").into())
95 Err(HgError::corrupted("persistent nodemap too short"))
98 }
96 }
99 } else {
97 } else {
100 // Even if .hg/requires opted in, some revlogs are deemed small
98 // Even if .hg/requires opted in, some revlogs are deemed small
@@ -18,6 +18,7 b' use super::patch;'
18 use crate::errors::HgError;
18 use crate::errors::HgError;
19 use crate::repo::Repo;
19 use crate::repo::Repo;
20 use crate::revlog::Revision;
20 use crate::revlog::Revision;
21 use crate::{Node, NULL_REVISION};
21
22
22 #[derive(derive_more::From)]
23 #[derive(derive_more::From)]
23 pub enum RevlogError {
24 pub enum RevlogError {
@@ -50,7 +51,7 b' pub struct Revlog {'
50 /// When index and data are not interleaved: bytes of the revlog index.
51 /// When index and data are not interleaved: bytes of the revlog index.
51 /// When index and data are interleaved: bytes of the revlog index and
52 /// When index and data are interleaved: bytes of the revlog index and
52 /// data.
53 /// data.
53 pub(crate) index: Index,
54 index: Index,
54 /// When index and data are not interleaved: bytes of the revlog data
55 /// When index and data are not interleaved: bytes of the revlog data
55 data_bytes: Option<Box<dyn Deref<Target = [u8]> + Send>>,
56 data_bytes: Option<Box<dyn Deref<Target = [u8]> + Send>>,
56 /// When present on disk: the persistent nodemap for this revlog
57 /// When present on disk: the persistent nodemap for this revlog
@@ -67,14 +68,14 b' impl Revlog {'
67 repo: &Repo,
68 repo: &Repo,
68 index_path: impl AsRef<Path>,
69 index_path: impl AsRef<Path>,
69 data_path: Option<&Path>,
70 data_path: Option<&Path>,
70 ) -> Result<Self, RevlogError> {
71 ) -> Result<Self, HgError> {
71 let index_path = index_path.as_ref();
72 let index_path = index_path.as_ref();
72 let index_mmap = repo.store_vfs().mmap_open(&index_path)?;
73 let index_mmap = repo.store_vfs().mmap_open(&index_path)?;
73
74
74 let version = get_version(&index_mmap);
75 let version = get_version(&index_mmap);
75 if version != 1 {
76 if version != 1 {
76 // A proper new version should have had a repo/store requirement.
77 // A proper new version should have had a repo/store requirement.
77 return Err(RevlogError::corrupted());
78 return Err(HgError::corrupted("corrupted revlog"));
78 }
79 }
79
80
80 let index = Index::new(Box::new(index_mmap))?;
81 let index = Index::new(Box::new(index_mmap))?;
@@ -118,12 +119,23 b' impl Revlog {'
118 self.index.is_empty()
119 self.index.is_empty()
119 }
120 }
120
121
121 /// Return the full data associated to a node.
122 /// Returns the node ID for the given revision number, if it exists in this
123 /// revlog
124 pub fn node_from_rev(&self, rev: Revision) -> Option<&Node> {
125 Some(self.index.get_entry(rev)?.hash())
126 }
127
128 /// Return the revision number for the given node ID, if it exists in this
129 /// revlog
122 #[timed]
130 #[timed]
123 pub fn get_node_rev(
131 pub fn rev_from_node(
124 &self,
132 &self,
125 node: NodePrefix,
133 node: NodePrefix,
126 ) -> Result<Revision, RevlogError> {
134 ) -> Result<Revision, RevlogError> {
135 if node.is_prefix_of(&NULL_NODE) {
136 return Ok(NULL_REVISION);
137 }
138
127 if let Some(nodemap) = &self.nodemap {
139 if let Some(nodemap) = &self.nodemap {
128 return nodemap
140 return nodemap
129 .find_bin(&self.index, node)?
141 .find_bin(&self.index, node)?
@@ -4,7 +4,6 b''
4
4
5 use crate::errors::HgError;
5 use crate::errors::HgError;
6 use crate::repo::Repo;
6 use crate::repo::Repo;
7 use crate::revlog::changelog::Changelog;
8 use crate::revlog::revlog::{Revlog, RevlogError};
7 use crate::revlog::revlog::{Revlog, RevlogError};
9 use crate::revlog::NodePrefix;
8 use crate::revlog::NodePrefix;
10 use crate::revlog::{Revision, NULL_REVISION, WORKING_DIRECTORY_HEX};
9 use crate::revlog::{Revision, NULL_REVISION, WORKING_DIRECTORY_HEX};
@@ -17,7 +16,7 b' pub fn resolve_single('
17 input: &str,
16 input: &str,
18 repo: &Repo,
17 repo: &Repo,
19 ) -> Result<Revision, RevlogError> {
18 ) -> Result<Revision, RevlogError> {
20 let changelog = Changelog::open(repo)?;
19 let changelog = repo.changelog()?;
21
20
22 match resolve_rev_number_or_hex_prefix(input, &changelog.revlog) {
21 match resolve_rev_number_or_hex_prefix(input, &changelog.revlog) {
23 Err(RevlogError::InvalidRevision) => {} // Try other syntax
22 Err(RevlogError::InvalidRevision) => {} // Try other syntax
@@ -46,8 +45,14 b' pub fn resolve_rev_number_or_hex_prefix('
46 input: &str,
45 input: &str,
47 revlog: &Revlog,
46 revlog: &Revlog,
48 ) -> Result<Revision, RevlogError> {
47 ) -> Result<Revision, RevlogError> {
48 // The Python equivalent of this is part of `revsymbol` in
49 // `mercurial/scmutil.py`
50
49 if let Ok(integer) = input.parse::<i32>() {
51 if let Ok(integer) = input.parse::<i32>() {
50 if integer >= 0 && revlog.has_rev(integer) {
52 if integer.to_string() == input
53 && integer >= 0
54 && revlog.has_rev(integer)
55 {
51 return Ok(integer);
56 return Ok(integer);
52 }
57 }
53 }
58 }
@@ -56,7 +61,7 b' pub fn resolve_rev_number_or_hex_prefix('
56 {
61 {
57 return Err(RevlogError::WDirUnsupported);
62 return Err(RevlogError::WDirUnsupported);
58 }
63 }
59 return revlog.get_node_rev(prefix);
64 return revlog.rev_from_node(prefix);
60 }
65 }
61 Err(RevlogError::InvalidRevision)
66 Err(RevlogError::InvalidRevision)
62 }
67 }
@@ -67,36 +67,35 b' where'
67 }
67 }
68
68
69 pub trait SliceExt {
69 pub trait SliceExt {
70 fn trim_end_newlines(&self) -> &Self;
71 fn trim_end(&self) -> &Self;
70 fn trim_end(&self) -> &Self;
72 fn trim_start(&self) -> &Self;
71 fn trim_start(&self) -> &Self;
72 fn trim_end_matches(&self, f: impl FnMut(u8) -> bool) -> &Self;
73 fn trim_start_matches(&self, f: impl FnMut(u8) -> bool) -> &Self;
73 fn trim(&self) -> &Self;
74 fn trim(&self) -> &Self;
74 fn drop_prefix(&self, needle: &Self) -> Option<&Self>;
75 fn drop_prefix(&self, needle: &Self) -> Option<&Self>;
75 fn split_2(&self, separator: u8) -> Option<(&[u8], &[u8])>;
76 fn split_2(&self, separator: u8) -> Option<(&[u8], &[u8])>;
76 }
77 fn split_2_by_slice(&self, separator: &[u8]) -> Option<(&[u8], &[u8])>;
77
78 #[allow(clippy::trivially_copy_pass_by_ref)]
79 fn is_not_whitespace(c: &u8) -> bool {
80 !(*c as char).is_whitespace()
81 }
78 }
82
79
83 impl SliceExt for [u8] {
80 impl SliceExt for [u8] {
84 fn trim_end_newlines(&self) -> &[u8] {
81 fn trim_end(&self) -> &[u8] {
85 if let Some(last) = self.iter().rposition(|&byte| byte != b'\n') {
82 self.trim_end_matches(|byte| byte.is_ascii_whitespace())
83 }
84
85 fn trim_start(&self) -> &[u8] {
86 self.trim_start_matches(|byte| byte.is_ascii_whitespace())
87 }
88
89 fn trim_end_matches(&self, mut f: impl FnMut(u8) -> bool) -> &Self {
90 if let Some(last) = self.iter().rposition(|&byte| !f(byte)) {
86 &self[..=last]
91 &self[..=last]
87 } else {
92 } else {
88 &[]
93 &[]
89 }
94 }
90 }
95 }
91 fn trim_end(&self) -> &[u8] {
96
92 if let Some(last) = self.iter().rposition(is_not_whitespace) {
97 fn trim_start_matches(&self, mut f: impl FnMut(u8) -> bool) -> &Self {
93 &self[..=last]
98 if let Some(first) = self.iter().position(|&byte| !f(byte)) {
94 } else {
95 &[]
96 }
97 }
98 fn trim_start(&self) -> &[u8] {
99 if let Some(first) = self.iter().position(is_not_whitespace) {
100 &self[first..]
99 &self[first..]
101 } else {
100 } else {
102 &[]
101 &[]
@@ -136,6 +135,14 b' impl SliceExt for [u8] {'
136 let b = iter.next()?;
135 let b = iter.next()?;
137 Some((a, b))
136 Some((a, b))
138 }
137 }
138
139 fn split_2_by_slice(&self, separator: &[u8]) -> Option<(&[u8], &[u8])> {
140 if let Some(pos) = find_slice_in_slice(self, separator) {
141 Some((&self[..pos], &self[pos + separator.len()..]))
142 } else {
143 None
144 }
145 }
139 }
146 }
140
147
141 pub trait Escaped {
148 pub trait Escaped {
@@ -26,6 +26,7 b' hg-core = { path = "../hg-core"}'
26 libc = '*'
26 libc = '*'
27 log = "0.4.8"
27 log = "0.4.8"
28 env_logger = "0.7.1"
28 env_logger = "0.7.1"
29 stable_deref_trait = "1.2.0"
29
30
30 [dependencies.cpython]
31 [dependencies.cpython]
31 version = "0.6.0"
32 version = "0.6.0"
@@ -13,58 +13,7 b' use hg::copy_tracing::ChangedFiles;'
13 use hg::copy_tracing::CombineChangesetCopies;
13 use hg::copy_tracing::CombineChangesetCopies;
14 use hg::Revision;
14 use hg::Revision;
15
15
16 use self::pybytes_with_data::PyBytesWithData;
16 use crate::pybytes_deref::PyBytesDeref;
17
18 // Module to encapsulate private fields
19 mod pybytes_with_data {
20 use cpython::{PyBytes, Python};
21
22 /// Safe abstraction over a `PyBytes` together with the `&[u8]` slice
23 /// that borrows it.
24 ///
25 /// Calling `PyBytes::data` requires a GIL marker but we want to access the
26 /// data in a thread that (ideally) does not need to acquire the GIL.
27 /// This type allows separating the call an the use.
28 pub(super) struct PyBytesWithData {
29 #[allow(unused)]
30 keep_alive: PyBytes,
31
32 /// Borrows the buffer inside `self.keep_alive`,
33 /// but the borrow-checker cannot express self-referential structs.
34 data: *const [u8],
35 }
36
37 fn require_send<T: Send>() {}
38
39 #[allow(unused)]
40 fn static_assert_pybytes_is_send() {
41 require_send::<PyBytes>;
42 }
43
44 // Safety: PyBytes is Send. Raw pointers are not by default,
45 // but here sending one to another thread is fine since we ensure it stays
46 // valid.
47 unsafe impl Send for PyBytesWithData {}
48
49 impl PyBytesWithData {
50 pub fn new(py: Python, bytes: PyBytes) -> Self {
51 Self {
52 data: bytes.data(py),
53 keep_alive: bytes,
54 }
55 }
56
57 pub fn data(&self) -> &[u8] {
58 // Safety: the raw pointer is valid as long as the PyBytes is still
59 // alive, and the returned slice borrows `self`.
60 unsafe { &*self.data }
61 }
62
63 pub fn unwrap(self) -> PyBytes {
64 self.keep_alive
65 }
66 }
67 }
68
17
69 /// Combines copies information contained into revision `revs` to build a copy
18 /// Combines copies information contained into revision `revs` to build a copy
70 /// map.
19 /// map.
@@ -123,7 +72,7 b' pub fn combine_changeset_copies_wrapper('
123 //
72 //
124 // TODO: tweak the bound?
73 // TODO: tweak the bound?
125 let (rev_info_sender, rev_info_receiver) =
74 let (rev_info_sender, rev_info_receiver) =
126 crossbeam_channel::bounded::<RevInfo<PyBytesWithData>>(1000);
75 crossbeam_channel::bounded::<RevInfo<PyBytesDeref>>(1000);
127
76
128 // This channel (going the other way around) however is unbounded.
77 // This channel (going the other way around) however is unbounded.
129 // If they were both bounded, there might potentially be deadlocks
78 // If they were both bounded, there might potentially be deadlocks
@@ -143,7 +92,7 b' pub fn combine_changeset_copies_wrapper('
143 CombineChangesetCopies::new(children_count);
92 CombineChangesetCopies::new(children_count);
144 for (rev, p1, p2, opt_bytes) in rev_info_receiver {
93 for (rev, p1, p2, opt_bytes) in rev_info_receiver {
145 let files = match &opt_bytes {
94 let files = match &opt_bytes {
146 Some(raw) => ChangedFiles::new(raw.data()),
95 Some(raw) => ChangedFiles::new(raw.as_ref()),
147 // Python None was extracted to Option::None,
96 // Python None was extracted to Option::None,
148 // meaning there was no copy data.
97 // meaning there was no copy data.
149 None => ChangedFiles::new_empty(),
98 None => ChangedFiles::new_empty(),
@@ -169,7 +118,7 b' pub fn combine_changeset_copies_wrapper('
169
118
170 for rev_info in revs_info {
119 for rev_info in revs_info {
171 let (rev, p1, p2, opt_bytes) = rev_info?;
120 let (rev, p1, p2, opt_bytes) = rev_info?;
172 let opt_bytes = opt_bytes.map(|b| PyBytesWithData::new(py, b));
121 let opt_bytes = opt_bytes.map(|b| PyBytesDeref::new(py, b));
173
122
174 // We’d prefer to avoid the child thread calling into Python code,
123 // We’d prefer to avoid the child thread calling into Python code,
175 // but this avoids a potential deadlock on the GIL if it does:
124 // but this avoids a potential deadlock on the GIL if it does:
@@ -12,9 +12,7 b''
12 mod copymap;
12 mod copymap;
13 mod dirs_multiset;
13 mod dirs_multiset;
14 mod dirstate_map;
14 mod dirstate_map;
15 mod dispatch;
16 mod non_normal_entries;
15 mod non_normal_entries;
17 mod owning;
18 mod status;
16 mod status;
19 use crate::{
17 use crate::{
20 dirstate::{
18 dirstate::{
@@ -23,13 +21,11 b' use crate::{'
23 exceptions,
21 exceptions,
24 };
22 };
25 use cpython::{
23 use cpython::{
26 exc, PyBytes, PyDict, PyErr, PyList, PyModule, PyObject, PyResult,
24 PyBytes, PyDict, PyErr, PyList, PyModule, PyObject, PyResult, Python,
27 PySequence, Python,
28 };
25 };
29 use hg::dirstate_tree::on_disk::V2_FORMAT_MARKER;
26 use hg::dirstate_tree::on_disk::V2_FORMAT_MARKER;
30 use hg::{utils::hg_path::HgPathBuf, DirstateEntry, EntryState, StateMap};
27 use hg::DirstateEntry;
31 use libc::{c_char, c_int};
28 use libc::{c_char, c_int};
32 use std::convert::TryFrom;
33
29
34 // C code uses a custom `dirstate_tuple` type, checks in multiple instances
30 // C code uses a custom `dirstate_tuple` type, checks in multiple instances
35 // for this type, and raises a Python `Exception` if the check does not pass.
31 // for this type, and raises a Python `Exception` if the check does not pass.
@@ -52,62 +48,24 b' pub fn make_dirstate_item('
52 py: Python,
48 py: Python,
53 entry: &DirstateEntry,
49 entry: &DirstateEntry,
54 ) -> PyResult<PyObject> {
50 ) -> PyResult<PyObject> {
55 let &DirstateEntry {
56 state,
57 mode,
58 size,
59 mtime,
60 } = entry;
61 // Explicitly go through u8 first, then cast to platform-specific `c_char`
51 // Explicitly go through u8 first, then cast to platform-specific `c_char`
62 // because Into<u8> has a specific implementation while `as c_char` would
52 // because Into<u8> has a specific implementation while `as c_char` would
63 // just do a naive enum cast.
53 // just do a naive enum cast.
64 let state_code: u8 = state.into();
54 let state_code: u8 = entry.state().into();
65 make_dirstate_item_raw(py, state_code, mode, size, mtime)
66 }
67
55
68 pub fn make_dirstate_item_raw(
69 py: Python,
70 state: u8,
71 mode: i32,
72 size: i32,
73 mtime: i32,
74 ) -> PyResult<PyObject> {
75 let make = make_dirstate_item_capi::retrieve(py)?;
56 let make = make_dirstate_item_capi::retrieve(py)?;
76 let maybe_obj = unsafe {
57 let maybe_obj = unsafe {
77 let ptr = make(state as c_char, mode, size, mtime);
58 let ptr = make(
59 state_code as c_char,
60 entry.mode(),
61 entry.size(),
62 entry.mtime(),
63 );
78 PyObject::from_owned_ptr_opt(py, ptr)
64 PyObject::from_owned_ptr_opt(py, ptr)
79 };
65 };
80 maybe_obj.ok_or_else(|| PyErr::fetch(py))
66 maybe_obj.ok_or_else(|| PyErr::fetch(py))
81 }
67 }
82
68
83 pub fn extract_dirstate(py: Python, dmap: &PyDict) -> Result<StateMap, PyErr> {
84 dmap.items(py)
85 .iter()
86 .map(|(filename, stats)| {
87 let stats = stats.extract::<PySequence>(py)?;
88 let state = stats.get_item(py, 0)?.extract::<PyBytes>(py)?;
89 let state =
90 EntryState::try_from(state.data(py)[0]).map_err(|e| {
91 PyErr::new::<exc::ValueError, _>(py, e.to_string())
92 })?;
93 let mode = stats.get_item(py, 1)?.extract(py)?;
94 let size = stats.get_item(py, 2)?.extract(py)?;
95 let mtime = stats.get_item(py, 3)?.extract(py)?;
96 let filename = filename.extract::<PyBytes>(py)?;
97 let filename = filename.data(py);
98 Ok((
99 HgPathBuf::from(filename.to_owned()),
100 DirstateEntry {
101 state,
102 mode,
103 size,
104 mtime,
105 },
106 ))
107 })
108 .collect()
109 }
110
111 /// Create the module, with `__package__` given from parent
69 /// Create the module, with `__package__` given from parent
112 pub fn init_module(py: Python, package: &str) -> PyResult<PyModule> {
70 pub fn init_module(py: Python, package: &str) -> PyResult<PyModule> {
113 let dotted_name = &format!("{}.dirstate", package);
71 let dotted_name = &format!("{}.dirstate", package);
@@ -9,19 +9,15 b''
9 //! `hg-core` package.
9 //! `hg-core` package.
10
10
11 use std::cell::RefCell;
11 use std::cell::RefCell;
12 use std::convert::TryInto;
13
12
14 use cpython::{
13 use cpython::{
15 exc, ObjectProtocol, PyBytes, PyClone, PyDict, PyErr, PyObject, PyResult,
14 exc, ObjectProtocol, PyBytes, PyClone, PyDict, PyErr, PyObject, PyResult,
16 Python, UnsafePyLeaked,
15 Python, UnsafePyLeaked,
17 };
16 };
18
17
19 use crate::dirstate::extract_dirstate;
20 use hg::{
18 use hg::{
21 errors::HgError,
22 utils::hg_path::{HgPath, HgPathBuf},
19 utils::hg_path::{HgPath, HgPathBuf},
23 DirsMultiset, DirsMultisetIter, DirstateError, DirstateMapError,
20 DirsMultiset, DirsMultisetIter, DirstateMapError,
24 EntryState,
25 };
21 };
26
22
27 py_class!(pub class Dirs |py| {
23 py_class!(pub class Dirs |py| {
@@ -32,25 +28,11 b' py_class!(pub class Dirs |py| {'
32 def __new__(
28 def __new__(
33 _cls,
29 _cls,
34 map: PyObject,
30 map: PyObject,
35 skip: Option<PyObject> = None
36 ) -> PyResult<Self> {
31 ) -> PyResult<Self> {
37 let mut skip_state: Option<EntryState> = None;
32 let inner = if map.cast_as::<PyDict>(py).is_ok() {
38 if let Some(skip) = skip {
33 let err = "pathutil.dirs() with a dict should only be used by the Python dirstatemap \
39 skip_state = Some(
34 and should not be used when Rust is enabled";
40 skip.extract::<PyBytes>(py)?.data(py)[0]
35 return Err(PyErr::new::<exc::TypeError, _>(py, err.to_string()))
41 .try_into()
42 .map_err(|e: HgError| {
43 PyErr::new::<exc::ValueError, _>(py, e.to_string())
44 })?,
45 );
46 }
47 let inner = if let Ok(map) = map.cast_as::<PyDict>(py) {
48 let dirstate = extract_dirstate(py, &map)?;
49 let dirstate = dirstate.iter().map(|(k, v)| Ok((k, *v)));
50 DirsMultiset::from_dirstate(dirstate, skip_state)
51 .map_err(|e: DirstateError| {
52 PyErr::new::<exc::ValueError, _>(py, e.to_string())
53 })?
54 } else {
36 } else {
55 let map: Result<Vec<HgPathBuf>, PyErr> = map
37 let map: Result<Vec<HgPathBuf>, PyErr> = map
56 .iter(py)?
38 .iter(py)?
@@ -20,19 +20,19 b' use cpython::{'
20 use crate::{
20 use crate::{
21 dirstate::copymap::{CopyMap, CopyMapItemsIterator, CopyMapKeysIterator},
21 dirstate::copymap::{CopyMap, CopyMapItemsIterator, CopyMapKeysIterator},
22 dirstate::make_dirstate_item,
22 dirstate::make_dirstate_item,
23 dirstate::make_dirstate_item_raw,
24 dirstate::non_normal_entries::{
23 dirstate::non_normal_entries::{
25 NonNormalEntries, NonNormalEntriesIterator,
24 NonNormalEntries, NonNormalEntriesIterator,
26 },
25 },
27 dirstate::owning::OwningDirstateMap,
26 pybytes_deref::PyBytesDeref,
28 parsers::dirstate_parents_to_pytuple,
29 };
27 };
30 use hg::{
28 use hg::{
31 dirstate::parsers::Timestamp,
29 dirstate::parsers::Timestamp,
32 dirstate::MTIME_UNSET,
30 dirstate::MTIME_UNSET,
33 dirstate::SIZE_NON_NORMAL,
31 dirstate::SIZE_NON_NORMAL,
32 dirstate_tree::dirstate_map::DirstateMap as TreeDirstateMap,
34 dirstate_tree::dispatch::DirstateMapMethods,
33 dirstate_tree::dispatch::DirstateMapMethods,
35 dirstate_tree::on_disk::DirstateV2ParseError,
34 dirstate_tree::on_disk::DirstateV2ParseError,
35 dirstate_tree::owning::OwningDirstateMap,
36 revlog::Node,
36 revlog::Node,
37 utils::files::normalize_case,
37 utils::files::normalize_case,
38 utils::hg_path::{HgPath, HgPathBuf},
38 utils::hg_path::{HgPath, HgPathBuf},
@@ -62,8 +62,13 b' py_class!(pub class DirstateMap |py| {'
62 on_disk: PyBytes,
62 on_disk: PyBytes,
63 ) -> PyResult<PyObject> {
63 ) -> PyResult<PyObject> {
64 let (inner, parents) = if use_dirstate_tree {
64 let (inner, parents) = if use_dirstate_tree {
65 let (map, parents) = OwningDirstateMap::new_v1(py, on_disk)
65 let on_disk = PyBytesDeref::new(py, on_disk);
66 let mut map = OwningDirstateMap::new_empty(on_disk);
67 let (on_disk, map_placeholder) = map.get_mut_pair();
68
69 let (actual_map, parents) = TreeDirstateMap::new_v1(on_disk)
66 .map_err(|e| dirstate_error(py, e))?;
70 .map_err(|e| dirstate_error(py, e))?;
71 *map_placeholder = actual_map;
67 (Box::new(map) as _, parents)
72 (Box::new(map) as _, parents)
68 } else {
73 } else {
69 let bytes = on_disk.data(py);
74 let bytes = on_disk.data(py);
@@ -72,7 +77,11 b' py_class!(pub class DirstateMap |py| {'
72 (Box::new(map) as _, parents)
77 (Box::new(map) as _, parents)
73 };
78 };
74 let map = Self::create_instance(py, inner)?;
79 let map = Self::create_instance(py, inner)?;
75 let parents = parents.map(|p| dirstate_parents_to_pytuple(py, &p));
80 let parents = parents.map(|p| {
81 let p1 = PyBytes::new(py, p.p1.as_bytes());
82 let p2 = PyBytes::new(py, p.p2.as_bytes());
83 (p1, p2)
84 });
76 Ok((map, parents).to_py_object(py).into_object())
85 Ok((map, parents).to_py_object(py).into_object())
77 }
86 }
78
87
@@ -86,10 +95,13 b' py_class!(pub class DirstateMap |py| {'
86 let dirstate_error = |e: DirstateError| {
95 let dirstate_error = |e: DirstateError| {
87 PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
96 PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
88 };
97 };
89 let inner = OwningDirstateMap::new_v2(
98 let on_disk = PyBytesDeref::new(py, on_disk);
90 py, on_disk, data_size, tree_metadata,
99 let mut map = OwningDirstateMap::new_empty(on_disk);
100 let (on_disk, map_placeholder) = map.get_mut_pair();
101 *map_placeholder = TreeDirstateMap::new_v2(
102 on_disk, data_size, tree_metadata.data(py),
91 ).map_err(dirstate_error)?;
103 ).map_err(dirstate_error)?;
92 let map = Self::create_instance(py, Box::new(inner))?;
104 let map = Self::create_instance(py, Box::new(map))?;
93 Ok(map.into_object())
105 Ok(map.into_object())
94 }
106 }
95
107
@@ -122,12 +134,12 b' py_class!(pub class DirstateMap |py| {'
122 let filename = HgPath::new(f.data(py));
134 let filename = HgPath::new(f.data(py));
123 let state = item.getattr(py, "state")?.extract::<PyBytes>(py)?;
135 let state = item.getattr(py, "state")?.extract::<PyBytes>(py)?;
124 let state = state.data(py)[0];
136 let state = state.data(py)[0];
125 let entry = DirstateEntry {
137 let entry = DirstateEntry::from_v1_data(
126 state: state.try_into().expect("state is always valid"),
138 state.try_into().expect("state is always valid"),
127 mtime: item.getattr(py, "mtime")?.extract(py)?,
139 item.getattr(py, "mode")?.extract(py)?,
128 size: item.getattr(py, "size")?.extract(py)?,
140 item.getattr(py, "size")?.extract(py)?,
129 mode: item.getattr(py, "mode")?.extract(py)?,
141 item.getattr(py, "mtime")?.extract(py)?,
130 };
142 );
131 self.inner(py).borrow_mut().set_v1(filename, entry);
143 self.inner(py).borrow_mut().set_v1(filename, entry);
132 Ok(py.None())
144 Ok(py.None())
133 }
145 }
@@ -163,13 +175,7 b' py_class!(pub class DirstateMap |py| {'
163 } else {
175 } else {
164 mtime.extract(py)?
176 mtime.extract(py)?
165 };
177 };
166 let entry = DirstateEntry {
178 let entry = DirstateEntry::new_for_add_file(mode, size, mtime);
167 // XXX Arbitrary default value since the value is determined later
168 state: EntryState::Normal,
169 mode: mode,
170 size: size,
171 mtime: mtime,
172 };
173 let added = added.extract::<PyBool>(py)?.is_true();
179 let added = added.extract::<PyBool>(py)?.is_true();
174 let merged = merged.extract::<PyBool>(py)?.is_true();
180 let merged = merged.extract::<PyBool>(py)?.is_true();
175 let from_p2 = from_p2.extract::<PyBool>(py)?.is_true();
181 let from_p2 = from_p2.extract::<PyBool>(py)?.is_true();
@@ -409,7 +415,7 b' py_class!(pub class DirstateMap |py| {'
409 let dict = PyDict::new(py);
415 let dict = PyDict::new(py);
410 for item in self.inner(py).borrow_mut().iter() {
416 for item in self.inner(py).borrow_mut().iter() {
411 let (path, entry) = item.map_err(|e| v2_error(py, e))?;
417 let (path, entry) = item.map_err(|e| v2_error(py, e))?;
412 if entry.state != EntryState::Removed {
418 if entry.state() != EntryState::Removed {
413 let key = normalize_case(path);
419 let key = normalize_case(path);
414 let value = path;
420 let value = path;
415 dict.set_item(
421 dict.set_item(
@@ -599,14 +605,14 b' py_class!(pub class DirstateMap |py| {'
599 Ok(dirs)
605 Ok(dirs)
600 }
606 }
601
607
602 def debug_iter(&self) -> PyResult<PyList> {
608 def debug_iter(&self, all: bool) -> PyResult<PyList> {
603 let dirs = PyList::new(py, &[]);
609 let dirs = PyList::new(py, &[]);
604 for item in self.inner(py).borrow().debug_iter() {
610 for item in self.inner(py).borrow().debug_iter(all) {
605 let (path, (state, mode, size, mtime)) =
611 let (path, (state, mode, size, mtime)) =
606 item.map_err(|e| v2_error(py, e))?;
612 item.map_err(|e| v2_error(py, e))?;
607 let path = PyBytes::new(py, path.as_bytes());
613 let path = PyBytes::new(py, path.as_bytes());
608 let item = make_dirstate_item_raw(py, state, mode, size, mtime)?;
614 let item = (path, state, mode, size, mtime);
609 dirs.append(py, (path, item).to_py_object(py).into_object())
615 dirs.append(py, item.to_py_object(py).into_object())
610 }
616 }
611 Ok(dirs)
617 Ok(dirs)
612 }
618 }
@@ -35,7 +35,7 b' pub mod debug;'
35 pub mod dirstate;
35 pub mod dirstate;
36 pub mod discovery;
36 pub mod discovery;
37 pub mod exceptions;
37 pub mod exceptions;
38 pub mod parsers;
38 mod pybytes_deref;
39 pub mod revlog;
39 pub mod revlog;
40 pub mod utils;
40 pub mod utils;
41
41
@@ -58,11 +58,6 b' py_module_initializer!(rustext, initrust'
58 m.add(py, "discovery", discovery::init_module(py, &dotted_name)?)?;
58 m.add(py, "discovery", discovery::init_module(py, &dotted_name)?)?;
59 m.add(py, "dirstate", dirstate::init_module(py, &dotted_name)?)?;
59 m.add(py, "dirstate", dirstate::init_module(py, &dotted_name)?)?;
60 m.add(py, "revlog", revlog::init_module(py, &dotted_name)?)?;
60 m.add(py, "revlog", revlog::init_module(py, &dotted_name)?)?;
61 m.add(
62 py,
63 "parsers",
64 parsers::init_parsers_module(py, &dotted_name)?,
65 )?;
66 m.add(py, "GraphError", py.get_type::<exceptions::GraphError>())?;
61 m.add(py, "GraphError", py.get_type::<exceptions::GraphError>())?;
67 Ok(())
62 Ok(())
68 });
63 });
@@ -9,22 +9,14 b' use crate::error::CommandError;'
9 use crate::ui::Ui;
9 use crate::ui::Ui;
10 use clap::{Arg, SubCommand};
10 use clap::{Arg, SubCommand};
11 use hg;
11 use hg;
12 use hg::dirstate_tree::dirstate_map::DirstateMap;
12 use hg::dirstate_tree::dispatch::DirstateMapMethods;
13 use hg::dirstate_tree::on_disk;
13 use hg::errors::HgError;
14 use hg::errors::HgResultExt;
14 use hg::manifest::Manifest;
15 use hg::errors::IoResultExt;
16 use hg::matchers::AlwaysMatcher;
15 use hg::matchers::AlwaysMatcher;
17 use hg::operations::cat;
18 use hg::repo::Repo;
16 use hg::repo::Repo;
19 use hg::revlog::node::Node;
20 use hg::utils::hg_path::{hg_path_to_os_string, HgPath};
17 use hg::utils::hg_path::{hg_path_to_os_string, HgPath};
21 use hg::StatusError;
22 use hg::{HgPathCow, StatusOptions};
18 use hg::{HgPathCow, StatusOptions};
23 use log::{info, warn};
19 use log::{info, warn};
24 use std::convert::TryInto;
25 use std::fs;
26 use std::io::BufReader;
27 use std::io::Read;
28
20
29 pub const HELP_TEXT: &str = "
21 pub const HELP_TEXT: &str = "
30 Show changed files in the working directory
22 Show changed files in the working directory
@@ -166,40 +158,7 b' pub fn run(invocation: &crate::CliInvoca'
166 };
158 };
167
159
168 let repo = invocation.repo?;
160 let repo = invocation.repo?;
169 let dirstate_data_mmap;
161 let mut dmap = repo.dirstate_map_mut()?;
170 let (mut dmap, parents) = if repo.has_dirstate_v2() {
171 let docket_data =
172 repo.hg_vfs().read("dirstate").io_not_found_as_none()?;
173 let parents;
174 let dirstate_data;
175 let data_size;
176 let docket;
177 let tree_metadata;
178 if let Some(docket_data) = &docket_data {
179 docket = on_disk::read_docket(docket_data)?;
180 tree_metadata = docket.tree_metadata();
181 parents = Some(docket.parents());
182 data_size = docket.data_size();
183 dirstate_data_mmap = repo
184 .hg_vfs()
185 .mmap_open(docket.data_filename())
186 .io_not_found_as_none()?;
187 dirstate_data = dirstate_data_mmap.as_deref().unwrap_or(b"");
188 } else {
189 parents = None;
190 tree_metadata = b"";
191 data_size = 0;
192 dirstate_data = b"";
193 }
194 let dmap =
195 DirstateMap::new_v2(dirstate_data, data_size, tree_metadata)?;
196 (dmap, parents)
197 } else {
198 dirstate_data_mmap =
199 repo.hg_vfs().mmap_open("dirstate").io_not_found_as_none()?;
200 let dirstate_data = dirstate_data_mmap.as_deref().unwrap_or(b"");
201 DirstateMap::new_v1(dirstate_data)?
202 };
203
162
204 let options = StatusOptions {
163 let options = StatusOptions {
205 // TODO should be provided by the dirstate parsing and
164 // TODO should be provided by the dirstate parsing and
@@ -216,8 +175,7 b' pub fn run(invocation: &crate::CliInvoca'
216 collect_traversed_dirs: false,
175 collect_traversed_dirs: false,
217 };
176 };
218 let ignore_file = repo.working_directory_vfs().join(".hgignore"); // TODO hardcoded
177 let ignore_file = repo.working_directory_vfs().join(".hgignore"); // TODO hardcoded
219 let (mut ds_status, pattern_warnings) = hg::dirstate_tree::status::status(
178 let (mut ds_status, pattern_warnings) = dmap.status(
220 &mut dmap,
221 &AlwaysMatcher,
179 &AlwaysMatcher,
222 repo.working_directory_path().to_owned(),
180 repo.working_directory_path().to_owned(),
223 vec![ignore_file],
181 vec![ignore_file],
@@ -239,16 +197,12 b' pub fn run(invocation: &crate::CliInvoca'
239 if !ds_status.unsure.is_empty()
197 if !ds_status.unsure.is_empty()
240 && (display_states.modified || display_states.clean)
198 && (display_states.modified || display_states.clean)
241 {
199 {
242 let p1: Node = parents
200 let p1 = repo.dirstate_parents()?.p1;
243 .expect(
201 let manifest = repo.manifest_for_node(p1).map_err(|e| {
244 "Dirstate with no parents should not list any file to
202 CommandError::from((e, &*format!("{:x}", p1.short())))
245 be rechecked for modifications",
203 })?;
246 )
247 .p1
248 .into();
249 let p1_hex = format!("{:x}", p1);
250 for to_check in ds_status.unsure {
204 for to_check in ds_status.unsure {
251 if cat_file_is_modified(repo, &to_check, &p1_hex)? {
205 if cat_file_is_modified(repo, &manifest, &to_check)? {
252 if display_states.modified {
206 if display_states.modified {
253 ds_status.modified.push(to_check);
207 ds_status.modified.push(to_check);
254 }
208 }
@@ -309,39 +263,19 b' fn display_status_paths('
309 /// TODO: detect permission bits and similar metadata modifications
263 /// TODO: detect permission bits and similar metadata modifications
310 fn cat_file_is_modified(
264 fn cat_file_is_modified(
311 repo: &Repo,
265 repo: &Repo,
266 manifest: &Manifest,
312 hg_path: &HgPath,
267 hg_path: &HgPath,
313 rev: &str,
268 ) -> Result<bool, HgError> {
314 ) -> Result<bool, CommandError> {
269 let file_node = manifest
315 // TODO CatRev expects &[HgPathBuf], something like
270 .find_file(hg_path)?
316 // &[impl Deref<HgPath>] would be nicer and should avoid the copy
271 .expect("ambgious file not in p1");
317 let path_bufs = [hg_path.into()];
272 let filelog = repo.filelog(hg_path)?;
318 // TODO IIUC CatRev returns a simple Vec<u8> for all files
273 let filelog_entry = filelog.data_for_node(file_node).map_err(|_| {
319 // being able to tell them apart as (path, bytes) would be nicer
274 HgError::corrupted("filelog missing node from manifest")
320 // and OPTIM would allow manifest resolution just once.
275 })?;
321 let output = cat(repo, rev, &path_bufs).map_err(|e| (e, rev))?;
276 let contents_in_p1 = filelog_entry.data()?;
322
277
323 let fs_path = repo
278 let fs_path = hg_path_to_os_string(hg_path).expect("HgPath conversion");
324 .working_directory_vfs()
279 let fs_contents = repo.working_directory_vfs().read(fs_path)?;
325 .join(hg_path_to_os_string(hg_path).expect("HgPath conversion"));
280 return Ok(contents_in_p1 == &*fs_contents);
326 let hg_data_len: u64 = match output.concatenated.len().try_into() {
327 Ok(v) => v,
328 Err(_) => {
329 // conversion of data length to u64 failed,
330 // good luck for any file to have this content
331 return Ok(true);
332 }
333 };
334 let fobj = fs::File::open(&fs_path).when_reading_file(&fs_path)?;
335 if fobj.metadata().map_err(|e| StatusError::from(e))?.len() != hg_data_len
336 {
337 return Ok(true);
338 }
339 for (fs_byte, hg_byte) in
340 BufReader::new(fobj).bytes().zip(output.concatenated)
341 {
342 if fs_byte.map_err(|e| StatusError::from(e))? != hg_byte {
343 return Ok(true);
344 }
345 }
346 Ok(false)
347 }
281 }
@@ -567,11 +567,10 b' fn check_extensions(config: &Config) -> '
567 unsupported.remove(supported);
567 unsupported.remove(supported);
568 }
568 }
569
569
570 if let Some(ignored_list) =
570 if let Some(ignored_list) = config.get_list(b"rhg", b"ignored-extensions")
571 config.get_simple_list(b"rhg", b"ignored-extensions")
572 {
571 {
573 for ignored in ignored_list {
572 for ignored in ignored_list {
574 unsupported.remove(ignored);
573 unsupported.remove(ignored.as_slice());
575 }
574 }
576 }
575 }
577
576
@@ -34,7 +34,7 b' configitem('
34 )
34 )
35
35
36 parsers = policy.importmod('parsers')
36 parsers = policy.importmod('parsers')
37 rustmod = policy.importrust('parsers')
37 has_rust_dirstate = policy.importrust('dirstate') is not None
38
38
39
39
40 def pack_dirstate(fakenow, orig, dmap, copymap, pl, now):
40 def pack_dirstate(fakenow, orig, dmap, copymap, pl, now):
@@ -63,7 +63,7 b' def fakewrite(ui, func):'
63 # 'fakenow' value and 'touch -t YYYYmmddHHMM' argument easy
63 # 'fakenow' value and 'touch -t YYYYmmddHHMM' argument easy
64 fakenow = dateutil.parsedate(fakenow, [b'%Y%m%d%H%M'])[0]
64 fakenow = dateutil.parsedate(fakenow, [b'%Y%m%d%H%M'])[0]
65
65
66 if rustmod is not None:
66 if has_rust_dirstate:
67 # The Rust implementation does not use public parse/pack dirstate
67 # The Rust implementation does not use public parse/pack dirstate
68 # to prevent conversion round-trips
68 # to prevent conversion round-trips
69 orig_dirstatemap_write = dirstatemapmod.dirstatemap.write
69 orig_dirstatemap_write = dirstatemapmod.dirstatemap.write
@@ -85,7 +85,7 b' def fakewrite(ui, func):'
85 finally:
85 finally:
86 orig_module.pack_dirstate = orig_pack_dirstate
86 orig_module.pack_dirstate = orig_pack_dirstate
87 dirstate._getfsnow = orig_dirstate_getfsnow
87 dirstate._getfsnow = orig_dirstate_getfsnow
88 if rustmod is not None:
88 if has_rust_dirstate:
89 dirstatemapmod.dirstatemap.write = orig_dirstatemap_write
89 dirstatemapmod.dirstatemap.write = orig_dirstatemap_write
90
90
91
91
@@ -14,8 +14,6 b' setupcommon() {'
14 cat >> $HGRCPATH << EOF
14 cat >> $HGRCPATH << EOF
15 [extensions]
15 [extensions]
16 infinitepush=
16 infinitepush=
17 [ui]
18 ssh = "$PYTHON" "$TESTDIR/dummyssh"
19 [infinitepush]
17 [infinitepush]
20 branchpattern=re:scratch/.*
18 branchpattern=re:scratch/.*
21 EOF
19 EOF
@@ -1,8 +1,6 b''
1 cat >> $HGRCPATH <<EOF
1 cat >> $HGRCPATH <<EOF
2 [extensions]
2 [extensions]
3 narrow=
3 narrow=
4 [ui]
5 ssh="$PYTHON" "$RUNTESTDIR/dummyssh"
6 [experimental]
4 [experimental]
7 changegroup3 = True
5 changegroup3 = True
8 EOF
6 EOF
@@ -7,8 +7,6 b' debug=True'
7 remotefilelog=
7 remotefilelog=
8 rebase=
8 rebase=
9 strip=
9 strip=
10 [ui]
11 ssh="$PYTHON" "$TESTDIR/dummyssh"
12 [server]
10 [server]
13 preferuncompressed=True
11 preferuncompressed=True
14 [experimental]
12 [experimental]
@@ -1554,6 +1554,8 b' class Test(unittest.TestCase):'
1554 hgrc.write(b'merge = internal:merge\n')
1554 hgrc.write(b'merge = internal:merge\n')
1555 hgrc.write(b'mergemarkers = detailed\n')
1555 hgrc.write(b'mergemarkers = detailed\n')
1556 hgrc.write(b'promptecho = True\n')
1556 hgrc.write(b'promptecho = True\n')
1557 dummyssh = os.path.join(self._testdir, b'dummyssh')
1558 hgrc.write(b'ssh = "%s" "%s"\n' % (PYTHON, dummyssh))
1557 hgrc.write(b'timeout.warn=15\n')
1559 hgrc.write(b'timeout.warn=15\n')
1558 hgrc.write(b'[chgserver]\n')
1560 hgrc.write(b'[chgserver]\n')
1559 hgrc.write(b'idletimeout=60\n')
1561 hgrc.write(b'idletimeout=60\n')
@@ -665,20 +665,24 b' def issimplestorefile(f, kind, st):'
665
665
666
666
667 class simplestore(store.encodedstore):
667 class simplestore(store.encodedstore):
668 def datafiles(self):
668 def datafiles(self, undecodable=None):
669 for x in super(simplestore, self).datafiles():
669 for x in super(simplestore, self).datafiles():
670 yield x
670 yield x
671
671
672 # Supplement with non-revlog files.
672 # Supplement with non-revlog files.
673 extrafiles = self._walk('data', True, filefilter=issimplestorefile)
673 extrafiles = self._walk('data', True, filefilter=issimplestorefile)
674
674
675 for unencoded, encoded, size in extrafiles:
675 for f1, size in extrafiles:
676 try:
676 try:
677 unencoded = store.decodefilename(unencoded)
677 f2 = store.decodefilename(f1)
678 except KeyError:
678 except KeyError:
679 unencoded = None
679 if undecodable is None:
680 raise error.StorageError(b'undecodable revlog name %s' % f1)
681 else:
682 undecodable.append(f1)
683 continue
680
684
681 yield unencoded, encoded, size
685 yield f2, size
682
686
683
687
684 def reposetup(ui, repo):
688 def reposetup(ui, repo):
@@ -131,13 +131,13 b' should all fail'
131
131
132 $ hg addremove -s foo
132 $ hg addremove -s foo
133 abort: similarity must be a number
133 abort: similarity must be a number
134 [255]
134 [10]
135 $ hg addremove -s -1
135 $ hg addremove -s -1
136 abort: similarity must be between 0 and 100
136 abort: similarity must be between 0 and 100
137 [255]
137 [10]
138 $ hg addremove -s 1e6
138 $ hg addremove -s 1e6
139 abort: similarity must be between 0 and 100
139 abort: similarity must be between 0 and 100
140 [255]
140 [10]
141
141
142 $ cd ..
142 $ cd ..
143
143
@@ -455,7 +455,7 b' missing file'
455
455
456 $ hg ann nosuchfile
456 $ hg ann nosuchfile
457 abort: nosuchfile: no such file in rev e9e6b4fa872f
457 abort: nosuchfile: no such file in rev e9e6b4fa872f
458 [255]
458 [10]
459
459
460 annotate file without '\n' on last line
460 annotate file without '\n' on last line
461
461
@@ -15,6 +15,7 b' Create a repository:'
15 ui.merge=internal:merge
15 ui.merge=internal:merge
16 ui.mergemarkers=detailed
16 ui.mergemarkers=detailed
17 ui.promptecho=True
17 ui.promptecho=True
18 ui.ssh=* (glob)
18 ui.timeout.warn=15
19 ui.timeout.warn=15
19 web.address=localhost
20 web.address=localhost
20 web\.ipv6=(?:True|False) (re)
21 web\.ipv6=(?:True|False) (re)
@@ -214,14 +214,11 b' class remotething(thing):'
214 mangle(two),
214 mangle(two),
215 ),
215 ),
216 ]
216 ]
217 encoded_res_future = wireprotov1peer.future()
217 return encoded_args, unmangle
218 yield encoded_args, encoded_res_future
219 yield unmangle(encoded_res_future.value)
220
218
221 @wireprotov1peer.batchable
219 @wireprotov1peer.batchable
222 def bar(self, b, a):
220 def bar(self, b, a):
223 encresref = wireprotov1peer.future()
221 return [
224 yield [
225 (
222 (
226 b'b',
223 b'b',
227 mangle(b),
224 mangle(b),
@@ -230,8 +227,7 b' class remotething(thing):'
230 b'a',
227 b'a',
231 mangle(a),
228 mangle(a),
232 ),
229 ),
233 ], encresref
230 ], unmangle
234 yield unmangle(encresref.value)
235
231
236 # greet is coded directly. It therefore does not support batching. If it
232 # greet is coded directly. It therefore does not support batching. If it
237 # does appear in a batch, the batch is split around greet, and the call to
233 # does appear in a batch, the batch is split around greet, and the call to
@@ -12,16 +12,6 b' The data from the bookmark file are filt'
12 node known to the changelog. If the cache invalidation between these two bits
12 node known to the changelog. If the cache invalidation between these two bits
13 goes wrong, bookmark can be dropped.
13 goes wrong, bookmark can be dropped.
14
14
15 global setup
16 ------------
17
18 $ cat >> $HGRCPATH << EOF
19 > [ui]
20 > ssh = "$PYTHON" "$TESTDIR/dummyssh"
21 > [server]
22 > concurrent-push-mode=check-related
23 > EOF
24
25 Setup
15 Setup
26 -----
16 -----
27
17
@@ -490,6 +490,30 b' divergent bookmarks'
490 Y 0:4e3505fd9583
490 Y 0:4e3505fd9583
491 Z 1:0d2164f0ce0d
491 Z 1:0d2164f0ce0d
492
492
493 mirroring bookmarks
494
495 $ hg book
496 @ 1:9b140be10808
497 @foo 2:0d2164f0ce0d
498 X 1:9b140be10808
499 X@foo 2:0d2164f0ce0d
500 Y 0:4e3505fd9583
501 Z 2:0d2164f0ce0d
502 foo -1:000000000000
503 * foobar 1:9b140be10808
504 $ cp .hg/bookmarks .hg/bookmarks.bak
505 $ hg book -d X
506 $ hg pull ../a --config bookmarks.mirror=true
507 pulling from ../a
508 searching for changes
509 no changes found
510 $ hg book
511 @ 2:0d2164f0ce0d
512 X 2:0d2164f0ce0d
513 Y 0:4e3505fd9583
514 Z 2:0d2164f0ce0d
515 $ mv .hg/bookmarks.bak .hg/bookmarks
516
493 explicit pull should overwrite the local version (issue4439)
517 explicit pull should overwrite the local version (issue4439)
494
518
495 $ hg update -r X
519 $ hg update -r X
@@ -1142,8 +1166,6 b' Check hook preventing push (issue4455)'
1142 > local=../issue4455-dest/
1166 > local=../issue4455-dest/
1143 > ssh=ssh://user@dummy/issue4455-dest
1167 > ssh=ssh://user@dummy/issue4455-dest
1144 > http=http://localhost:$HGPORT/
1168 > http=http://localhost:$HGPORT/
1145 > [ui]
1146 > ssh="$PYTHON" "$TESTDIR/dummyssh"
1147 > EOF
1169 > EOF
1148 $ cat >> ../issue4455-dest/.hg/hgrc << EOF
1170 $ cat >> ../issue4455-dest/.hg/hgrc << EOF
1149 > [hooks]
1171 > [hooks]
@@ -1270,7 +1292,6 b' Test that pre-pushkey compat for bookmar'
1270
1292
1271 $ cat << EOF >> $HGRCPATH
1293 $ cat << EOF >> $HGRCPATH
1272 > [ui]
1294 > [ui]
1273 > ssh="$PYTHON" "$TESTDIR/dummyssh"
1274 > [server]
1295 > [server]
1275 > bookmarks-pushkey-compat = yes
1296 > bookmarks-pushkey-compat = yes
1276 > EOF
1297 > EOF
@@ -28,8 +28,6 b' enable obsolescence'
28 > evolution.createmarkers=True
28 > evolution.createmarkers=True
29 > evolution.exchange=True
29 > evolution.exchange=True
30 > bundle2-output-capture=True
30 > bundle2-output-capture=True
31 > [ui]
32 > ssh="$PYTHON" "$TESTDIR/dummyssh"
33 > [command-templates]
31 > [command-templates]
34 > log={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
32 > log={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
35 > [web]
33 > [web]
@@ -922,10 +920,6 b' Check abort from mandatory pushkey'
922
920
923 Test lazily acquiring the lock during unbundle
921 Test lazily acquiring the lock during unbundle
924 $ cp $TESTTMP/hgrc.orig $HGRCPATH
922 $ cp $TESTTMP/hgrc.orig $HGRCPATH
925 $ cat >> $HGRCPATH <<EOF
926 > [ui]
927 > ssh="$PYTHON" "$TESTDIR/dummyssh"
928 > EOF
929
923
930 $ cat >> $TESTTMP/locktester.py <<EOF
924 $ cat >> $TESTTMP/locktester.py <<EOF
931 > import os
925 > import os
@@ -233,8 +233,6 b' Create an extension to test bundle2 API'
233 > bundle2=$TESTTMP/bundle2.py
233 > bundle2=$TESTTMP/bundle2.py
234 > [experimental]
234 > [experimental]
235 > evolution.createmarkers=True
235 > evolution.createmarkers=True
236 > [ui]
237 > ssh="$PYTHON" "$TESTDIR/dummyssh"
238 > [command-templates]
236 > [command-templates]
239 > log={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
237 > log={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
240 > [web]
238 > [web]
@@ -37,7 +37,6 b''
37
37
38 $ cat >> $HGRCPATH <<EOF
38 $ cat >> $HGRCPATH <<EOF
39 > [ui]
39 > [ui]
40 > ssh = "$PYTHON" "$TESTDIR/dummyssh"
41 > username = nobody <no.reply@example.com>
40 > username = nobody <no.reply@example.com>
42 >
41 >
43 > [alias]
42 > [alias]
@@ -94,8 +94,6 b' Start a simple HTTP server to serve bund'
94 $ cat dumb.pid >> $DAEMON_PIDS
94 $ cat dumb.pid >> $DAEMON_PIDS
95
95
96 $ cat >> $HGRCPATH << EOF
96 $ cat >> $HGRCPATH << EOF
97 > [ui]
98 > ssh="$PYTHON" "$TESTDIR/dummyssh"
99 > [command-templates]
97 > [command-templates]
100 > log={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
98 > log={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
101 > EOF
99 > EOF
1 NO CONTENT: file renamed from tests/test-clone-uncompressed.t to tests/test-clone-stream.t
NO CONTENT: file renamed from tests/test-clone-uncompressed.t to tests/test-clone-stream.t
@@ -1125,7 +1125,7 b" Test that auto sharing doesn't cause fai"
1125 $ hg id -R remote -r 0
1125 $ hg id -R remote -r 0
1126 abort: repository remote not found
1126 abort: repository remote not found
1127 [255]
1127 [255]
1128 $ hg --config share.pool=share -q clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" a ssh://user@dummy/remote
1128 $ hg --config share.pool=share -q clone a ssh://user@dummy/remote
1129 $ hg -R remote id -r 0
1129 $ hg -R remote id -r 0
1130 acb14030fe0a
1130 acb14030fe0a
1131
1131
@@ -208,7 +208,7 b' by old clients.'
208
208
209 Feature works over SSH
209 Feature works over SSH
210
210
211 $ hg clone -U -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/server ssh-full-clone
211 $ hg clone -U ssh://user@dummy/server ssh-full-clone
212 applying clone bundle from http://localhost:$HGPORT1/full.hg
212 applying clone bundle from http://localhost:$HGPORT1/full.hg
213 adding changesets
213 adding changesets
214 adding manifests
214 adding manifests
@@ -226,6 +226,7 b' check that local configs for the cached '
226 ui.detailed-exit-code=True
226 ui.detailed-exit-code=True
227 ui.merge=internal:merge
227 ui.merge=internal:merge
228 ui.mergemarkers=detailed
228 ui.mergemarkers=detailed
229 ui.ssh=* (glob)
229 ui.timeout.warn=15
230 ui.timeout.warn=15
230 ui.foo=bar
231 ui.foo=bar
231 ui.nontty=true
232 ui.nontty=true
@@ -239,6 +240,7 b' check that local configs for the cached '
239 ui.detailed-exit-code=True
240 ui.detailed-exit-code=True
240 ui.merge=internal:merge
241 ui.merge=internal:merge
241 ui.mergemarkers=detailed
242 ui.mergemarkers=detailed
243 ui.ssh=* (glob)
242 ui.timeout.warn=15
244 ui.timeout.warn=15
243 ui.nontty=true
245 ui.nontty=true
244 #endif
246 #endif
@@ -316,7 +316,7 b' Show all commands + options'
316 debugpushkey:
316 debugpushkey:
317 debugpvec:
317 debugpvec:
318 debugrebuilddirstate: rev, minimal
318 debugrebuilddirstate: rev, minimal
319 debugrebuildfncache:
319 debugrebuildfncache: only-data
320 debugrename: rev
320 debugrename: rev
321 debugrequires:
321 debugrequires:
322 debugrevlog: changelog, manifest, dir, dump
322 debugrevlog: changelog, manifest, dir, dump
@@ -413,7 +413,7 b' Listing all config options'
413
413
414 The feature is experimental and behavior may varies. This test exists to make sure the code is run. We grep it to avoid too much variability in its current experimental state.
414 The feature is experimental and behavior may varies. This test exists to make sure the code is run. We grep it to avoid too much variability in its current experimental state.
415
415
416 $ hg config --exp-all-known | grep commit
416 $ hg config --exp-all-known | grep commit | grep -v ssh
417 commands.commit.interactive.git=False
417 commands.commit.interactive.git=False
418 commands.commit.interactive.ignoreblanklines=False
418 commands.commit.interactive.ignoreblanklines=False
419 commands.commit.interactive.ignorews=False
419 commands.commit.interactive.ignorews=False
@@ -644,14 +644,13 b' Test debugcapabilities command:'
644
644
645 Test debugpeer
645 Test debugpeer
646
646
647 $ hg --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" debugpeer ssh://user@dummy/debugrevlog
647 $ hg debugpeer ssh://user@dummy/debugrevlog
648 url: ssh://user@dummy/debugrevlog
648 url: ssh://user@dummy/debugrevlog
649 local: no
649 local: no
650 pushable: yes
650 pushable: yes
651
651
652 $ hg --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" --debug debugpeer ssh://user@dummy/debugrevlog
652 $ hg --debug debugpeer ssh://user@dummy/debugrevlog
653 running "*" "*/tests/dummyssh" 'user@dummy' 'hg -R debugrevlog serve --stdio' (glob) (no-windows !)
653 running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R debugrevlog serve --stdio['"] (re)
654 running "*" "*\tests/dummyssh" "user@dummy" "hg -R debugrevlog serve --stdio" (glob) (windows !)
655 devel-peer-request: hello+between
654 devel-peer-request: hello+between
656 devel-peer-request: pairs: 81 bytes
655 devel-peer-request: pairs: 81 bytes
657 sending hello command
656 sending hello command
@@ -119,7 +119,7 b' as pairs even if x == y, but not for "f('
119 +wdir
119 +wdir
120 $ hg diff -r "2 and 1"
120 $ hg diff -r "2 and 1"
121 abort: empty revision range
121 abort: empty revision range
122 [255]
122 [10]
123
123
124 $ cd ..
124 $ cd ..
125
125
@@ -13,13 +13,13 b' class dirstests(unittest.TestCase):'
13 (b'a/a/a', [b'a', b'a/a', b'']),
13 (b'a/a/a', [b'a', b'a/a', b'']),
14 (b'alpha/beta/gamma', [b'', b'alpha', b'alpha/beta']),
14 (b'alpha/beta/gamma', [b'', b'alpha', b'alpha/beta']),
15 ]:
15 ]:
16 d = pathutil.dirs({})
16 d = pathutil.dirs([])
17 d.addpath(case)
17 d.addpath(case)
18 self.assertEqual(sorted(d), sorted(want))
18 self.assertEqual(sorted(d), sorted(want))
19
19
20 def testinvalid(self):
20 def testinvalid(self):
21 with self.assertRaises(ValueError):
21 with self.assertRaises(ValueError):
22 d = pathutil.dirs({})
22 d = pathutil.dirs([])
23 d.addpath(b'a//b')
23 d.addpath(b'a//b')
24
24
25
25
@@ -87,7 +87,7 b' Specifying an empty revision should abor'
87
87
88 $ hg extdiff -p diff --patch --rev 'ancestor()' --rev 1
88 $ hg extdiff -p diff --patch --rev 'ancestor()' --rev 1
89 abort: empty revision on one side of range
89 abort: empty revision on one side of range
90 [255]
90 [10]
91
91
92 Test diff during merge:
92 Test diff during merge:
93
93
@@ -1692,6 +1692,26 b' Can load minimum version identical to cu'
1692 $ hg --config extensions.minversion=minversion3.py version 2>&1 | egrep '\(third'
1692 $ hg --config extensions.minversion=minversion3.py version 2>&1 | egrep '\(third'
1693 [1]
1693 [1]
1694
1694
1695 Don't explode on py3 with a bad version number (both str vs bytes, and not enough
1696 parts)
1697
1698 $ cat > minversion4.py << EOF
1699 > from mercurial import util
1700 > util.version = lambda: b'3.5'
1701 > minimumhgversion = '3'
1702 > EOF
1703 $ hg --config extensions.minversion=minversion4.py version -v
1704 Mercurial Distributed SCM (version 3.5)
1705 (see https://mercurial-scm.org for more information)
1706
1707 Copyright (C) 2005-* Olivia Mackall and others (glob)
1708 This is free software; see the source for copying conditions. There is NO
1709 warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
1710
1711 Enabled extensions:
1712
1713 minversion external
1714
1695 Restore HGRCPATH
1715 Restore HGRCPATH
1696
1716
1697 $ HGRCPATH=$ORGHGRCPATH
1717 $ HGRCPATH=$ORGHGRCPATH
@@ -458,7 +458,7 b' missing file'
458
458
459 $ hg ann nosuchfile
459 $ hg ann nosuchfile
460 abort: nosuchfile: no such file in rev e9e6b4fa872f
460 abort: nosuchfile: no such file in rev e9e6b4fa872f
461 [255]
461 [10]
462
462
463 annotate file without '\n' on last line
463 annotate file without '\n' on last line
464
464
@@ -1,6 +1,4 b''
1 $ cat >> $HGRCPATH << EOF
1 $ cat >> $HGRCPATH << EOF
2 > [ui]
3 > ssh = "$PYTHON" "$TESTDIR/dummyssh"
4 > [extensions]
2 > [extensions]
5 > fastannotate=
3 > fastannotate=
6 > [fastannotate]
4 > [fastannotate]
@@ -9,8 +9,6 b' Setup'
9
9
10 $ . "$TESTDIR/library-infinitepush.sh"
10 $ . "$TESTDIR/library-infinitepush.sh"
11 $ cat >> $HGRCPATH <<EOF
11 $ cat >> $HGRCPATH <<EOF
12 > [ui]
13 > ssh = "$PYTHON" "$TESTDIR/dummyssh"
14 > [alias]
12 > [alias]
15 > glog = log -GT "{rev}:{node|short} {desc}\n{phase}"
13 > glog = log -GT "{rev}:{node|short} {desc}\n{phase}"
16 > EOF
14 > EOF
@@ -123,7 +123,7 b' test failure'
123
123
124 init+push to remote2
124 init+push to remote2
125
125
126 $ hg init -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote2
126 $ hg init ssh://user@dummy/remote2
127 $ hg incoming -R remote2 local
127 $ hg incoming -R remote2 local
128 comparing with local
128 comparing with local
129 changeset: 0:08b9e9f63b32
129 changeset: 0:08b9e9f63b32
@@ -133,7 +133,7 b' init+push to remote2'
133 summary: init
133 summary: init
134
134
135
135
136 $ hg push -R local -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote2
136 $ hg push -R local ssh://user@dummy/remote2
137 pushing to ssh://user@dummy/remote2
137 pushing to ssh://user@dummy/remote2
138 searching for changes
138 searching for changes
139 remote: adding changesets
139 remote: adding changesets
@@ -143,7 +143,7 b' init+push to remote2'
143
143
144 clone to remote1
144 clone to remote1
145
145
146 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" local ssh://user@dummy/remote1
146 $ hg clone local ssh://user@dummy/remote1
147 searching for changes
147 searching for changes
148 remote: adding changesets
148 remote: adding changesets
149 remote: adding manifests
149 remote: adding manifests
@@ -151,7 +151,7 b' clone to remote1'
151 remote: added 1 changesets with 1 changes to 1 files
151 remote: added 1 changesets with 1 changes to 1 files
152
152
153 The largefiles extension doesn't crash
153 The largefiles extension doesn't crash
154 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" local ssh://user@dummy/remotelf --config extensions.largefiles=
154 $ hg clone local ssh://user@dummy/remotelf --config extensions.largefiles=
155 The fsmonitor extension is incompatible with the largefiles extension and has been disabled. (fsmonitor !)
155 The fsmonitor extension is incompatible with the largefiles extension and has been disabled. (fsmonitor !)
156 The fsmonitor extension is incompatible with the largefiles extension and has been disabled. (fsmonitor !)
156 The fsmonitor extension is incompatible with the largefiles extension and has been disabled. (fsmonitor !)
157 searching for changes
157 searching for changes
@@ -162,14 +162,14 b" The largefiles extension doesn't crash"
162
162
163 init to existing repo
163 init to existing repo
164
164
165 $ hg init -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote1
165 $ hg init ssh://user@dummy/remote1
166 abort: repository remote1 already exists
166 abort: repository remote1 already exists
167 abort: could not create remote repo
167 abort: could not create remote repo
168 [255]
168 [255]
169
169
170 clone to existing repo
170 clone to existing repo
171
171
172 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" local ssh://user@dummy/remote1
172 $ hg clone local ssh://user@dummy/remote1
173 abort: repository remote1 already exists
173 abort: repository remote1 already exists
174 abort: could not create remote repo
174 abort: could not create remote repo
175 [255]
175 [255]
@@ -283,7 +283,7 b' clone bookmarks'
283 $ hg -R local bookmark test
283 $ hg -R local bookmark test
284 $ hg -R local bookmarks
284 $ hg -R local bookmarks
285 * test 0:08b9e9f63b32
285 * test 0:08b9e9f63b32
286 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" local ssh://user@dummy/remote-bookmarks
286 $ hg clone local ssh://user@dummy/remote-bookmarks
287 searching for changes
287 searching for changes
288 remote: adding changesets
288 remote: adding changesets
289 remote: adding manifests
289 remote: adding manifests
@@ -124,7 +124,7 b' used all HGPORTs, kill all daemons'
124 #endif
124 #endif
125
125
126 vanilla clients locked out from largefiles ssh repos
126 vanilla clients locked out from largefiles ssh repos
127 $ hg --config extensions.largefiles=! clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/r4 r5
127 $ hg --config extensions.largefiles=! clone ssh://user@dummy/r4 r5
128 remote:
128 remote:
129 remote: This repository uses the largefiles extension.
129 remote: This repository uses the largefiles extension.
130 remote:
130 remote:
@@ -2516,10 +2516,9 b' New namespace is registered per repo ins'
2516 is global. So we shouldn't expect the namespace always exists. Using
2516 is global. So we shouldn't expect the namespace always exists. Using
2517 ssh:// makes sure a bundle repository is created from scratch. (issue6301)
2517 ssh:// makes sure a bundle repository is created from scratch. (issue6301)
2518
2518
2519 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" \
2519 $ hg clone -qr0 "ssh://user@dummy/`pwd`/a" a-clone
2520 > -qr0 "ssh://user@dummy/`pwd`/a" a-clone
2521 $ hg incoming --config extensions.names=names.py -R a-clone \
2520 $ hg incoming --config extensions.names=names.py -R a-clone \
2522 > -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" -T '{bars}\n' -l1
2521 > -T '{bars}\n' -l1
2523 comparing with ssh://user@dummy/$TESTTMP/a
2522 comparing with ssh://user@dummy/$TESTTMP/a
2524 searching for changes
2523 searching for changes
2525
2524
@@ -2,8 +2,6 b' Testing the functionality to pull remote'
2 =============================================
2 =============================================
3
3
4 $ cat >> $HGRCPATH << EOF
4 $ cat >> $HGRCPATH << EOF
5 > [ui]
6 > ssh = "$PYTHON" "$TESTDIR/dummyssh"
7 > [alias]
5 > [alias]
8 > glog = log -G -T '{rev}:{node|short} {desc}'
6 > glog = log -G -T '{rev}:{node|short} {desc}'
9 > [extensions]
7 > [extensions]
@@ -55,8 +55,8 b' Re-adding foo1 and bar:'
55 adding foo1
55 adding foo1
56
56
57 $ hg debugstate --no-dates
57 $ hg debugstate --no-dates
58 n 0 -2 unset bar
58 m 0 -2 unset bar
59 n 0 -2 unset foo1
59 m 0 -2 unset foo1
60 copy: foo -> foo1
60 copy: foo -> foo1
61
61
62 $ hg st -qC
62 $ hg st -qC
@@ -74,8 +74,8 b' Reverting foo1 and bar:'
74 reverting foo1
74 reverting foo1
75
75
76 $ hg debugstate --no-dates
76 $ hg debugstate --no-dates
77 n 0 -2 unset bar
77 m 0 -2 unset bar
78 n 0 -2 unset foo1
78 m 0 -2 unset foo1
79 copy: foo -> foo1
79 copy: foo -> foo1
80
80
81 $ hg st -qC
81 $ hg st -qC
@@ -24,10 +24,6 b" some capability (because it's running an"
24 > [extensions]
24 > [extensions]
25 > disable-lookup = $TESTTMP/disable-lookup.py
25 > disable-lookup = $TESTTMP/disable-lookup.py
26 > EOF
26 > EOF
27 $ cat >> .hg/hgrc <<EOF
28 > [ui]
29 > ssh = "$PYTHON" "$TESTDIR/dummyssh"
30 > EOF
31
27
32 $ hg pull ssh://user@dummy/repo1 -r tip -B a
28 $ hg pull ssh://user@dummy/repo1 -r tip -B a
33 pulling from ssh://user@dummy/repo1
29 pulling from ssh://user@dummy/repo1
@@ -1056,7 +1056,7 b' Simple case'
1056
1056
1057 No race condition
1057 No race condition
1058
1058
1059 $ hg clone -U --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/test-repo stream-clone --debug | egrep '00(changelog|manifest)'
1059 $ hg clone -U --stream ssh://user@dummy/test-repo stream-clone --debug | egrep '00(changelog|manifest)'
1060 adding [s] 00manifest.n (62 bytes)
1060 adding [s] 00manifest.n (62 bytes)
1061 adding [s] 00manifest-*.nd (118 KB) (glob)
1061 adding [s] 00manifest-*.nd (118 KB) (glob)
1062 adding [s] 00changelog.n (62 bytes)
1062 adding [s] 00changelog.n (62 bytes)
@@ -1121,7 +1121,7 b' Prepare a commit'
1121
1121
1122 Do a mix of clone and commit at the same time so that the file listed on disk differ at actual transfer time.
1122 Do a mix of clone and commit at the same time so that the file listed on disk differ at actual transfer time.
1123
1123
1124 $ (hg clone -U --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/test-repo stream-clone-race-1 --debug 2>> clone-output | egrep '00(changelog|manifest)' >> clone-output; touch $HG_TEST_STREAM_WALKED_FILE_3) &
1124 $ (hg clone -U --stream ssh://user@dummy/test-repo stream-clone-race-1 --debug 2>> clone-output | egrep '00(changelog|manifest)' >> clone-output; touch $HG_TEST_STREAM_WALKED_FILE_3) &
1125 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1
1125 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1
1126 $ hg -R test-repo/ commit -m foo
1126 $ hg -R test-repo/ commit -m foo
1127 $ touch $HG_TEST_STREAM_WALKED_FILE_2
1127 $ touch $HG_TEST_STREAM_WALKED_FILE_2
@@ -1218,7 +1218,7 b' Check the initial state'
1218 Performe the mix of clone and full refresh of the nodemap, so that the files
1218 Performe the mix of clone and full refresh of the nodemap, so that the files
1219 (and filenames) are different between listing time and actual transfer time.
1219 (and filenames) are different between listing time and actual transfer time.
1220
1220
1221 $ (hg clone -U --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/test-repo stream-clone-race-2 --debug 2>> clone-output-2 | egrep '00(changelog|manifest)' >> clone-output-2; touch $HG_TEST_STREAM_WALKED_FILE_3) &
1221 $ (hg clone -U --stream ssh://user@dummy/test-repo stream-clone-race-2 --debug 2>> clone-output-2 | egrep '00(changelog|manifest)' >> clone-output-2; touch $HG_TEST_STREAM_WALKED_FILE_3) &
1222 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1
1222 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1
1223 $ rm test-repo/.hg/store/00changelog.n
1223 $ rm test-repo/.hg/store/00changelog.n
1224 $ rm test-repo/.hg/store/00changelog-*.nd
1224 $ rm test-repo/.hg/store/00changelog-*.nd
@@ -102,7 +102,6 b' A set of extension and shell functions e'
102
102
103 $ cat >> $HGRCPATH << EOF
103 $ cat >> $HGRCPATH << EOF
104 > [ui]
104 > [ui]
105 > ssh = "$PYTHON" "$TESTDIR/dummyssh"
106 > # simplify output
105 > # simplify output
107 > logtemplate = {node|short} {desc} ({branch})
106 > logtemplate = {node|short} {desc} ({branch})
108 > [phases]
107 > [phases]
@@ -132,7 +132,7 b' These fail:'
132
132
133 $ hg rebase --dest '1 & !1'
133 $ hg rebase --dest '1 & !1'
134 abort: empty revision set
134 abort: empty revision set
135 [255]
135 [10]
136
136
137 These work:
137 These work:
138
138
@@ -17,9 +17,16 b''
17 > try:
17 > try:
18 > for file in pats:
18 > for file in pats:
19 > if opts.get('normal_lookup'):
19 > if opts.get('normal_lookup'):
20 > repo.dirstate._normallookup(file)
20 > with repo.dirstate.parentchange():
21 > repo.dirstate.update_file(
22 > file,
23 > p1_tracked=True,
24 > wc_tracked=True,
25 > possibly_dirty=True,
26 > )
21 > else:
27 > else:
22 > repo.dirstate._drop(file)
28 > repo.dirstate._map.reset_state(file)
29 > repo.dirstate._dirty = True
23 >
30 >
24 > repo.dirstate.write(repo.currenttransaction())
31 > repo.dirstate.write(repo.currenttransaction())
25 > finally:
32 > finally:
@@ -840,7 +840,7 b' test usage in revpair (with "+")'
840
840
841 $ hg diff -r 'author("babar") or author("celeste")'
841 $ hg diff -r 'author("babar") or author("celeste")'
842 abort: empty revision range
842 abort: empty revision range
843 [255]
843 [10]
844
844
845 aliases:
845 aliases:
846
846
@@ -126,6 +126,9 b' Specifying revisions by changeset ID'
126 [255]
126 [255]
127 $ $NO_FALLBACK rhg cat -r d file-2
127 $ $NO_FALLBACK rhg cat -r d file-2
128 2
128 2
129 $ $NO_FALLBACK rhg cat -r 0000 file-2
130 abort: invalid revision identifier: 0000
131 [255]
129
132
130 Cat files
133 Cat files
131 $ cd $TESTTMP
134 $ cd $TESTTMP
@@ -160,7 +160,7 b' hg serve shared clone'
160 Cloning a shared repo via bundle2 results in a non-shared clone
160 Cloning a shared repo via bundle2 results in a non-shared clone
161
161
162 $ cd ..
162 $ cd ..
163 $ hg clone -q --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/`pwd`/repo2 cloned-via-bundle2
163 $ hg clone -q --stream ssh://user@dummy/`pwd`/repo2 cloned-via-bundle2
164 $ cat ./cloned-via-bundle2/.hg/requires | grep "shared"
164 $ cat ./cloned-via-bundle2/.hg/requires | grep "shared"
165 [1]
165 [1]
166 $ hg id --cwd cloned-via-bundle2 -r tip
166 $ hg id --cwd cloned-via-bundle2 -r tip
@@ -2,7 +2,6 b' test sparse'
2
2
3 $ cat >> $HGRCPATH << EOF
3 $ cat >> $HGRCPATH << EOF
4 > [ui]
4 > [ui]
5 > ssh = "$PYTHON" "$RUNTESTDIR/dummyssh"
6 > username = nobody <no.reply@fb.com>
5 > username = nobody <no.reply@fb.com>
7 > [extensions]
6 > [extensions]
8 > sparse=
7 > sparse=
@@ -9,7 +9,7 b' Checking that when lookup multiple bookm'
9 fails (thus causing the sshpeer to be stopped), the errors from the
9 fails (thus causing the sshpeer to be stopped), the errors from the
10 further lookups don't result in tracebacks.
10 further lookups don't result in tracebacks.
11
11
12 $ hg pull -r b0 -r nosuchbookmark $(for i in $($TESTDIR/seq.py 1 20); do echo -r b$i; done) -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/$(pwd)/../a
12 $ hg pull -r b0 -r nosuchbookmark $(for i in $($TESTDIR/seq.py 1 20); do echo -r b$i; done) ssh://user@dummy/$(pwd)/../a
13 pulling from ssh://user@dummy/$TESTTMP/b/../a
13 pulling from ssh://user@dummy/$TESTTMP/b/../a
14 abort: unknown revision 'nosuchbookmark'
14 abort: unknown revision 'nosuchbookmark'
15 [255]
15 [255]
@@ -52,7 +52,7 b' configure for serving'
52
52
53 repo not found error
53 repo not found error
54
54
55 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/nonexistent local
55 $ hg clone ssh://user@dummy/nonexistent local
56 remote: abort: repository nonexistent not found
56 remote: abort: repository nonexistent not found
57 abort: no suitable response from remote hg
57 abort: no suitable response from remote hg
58 [255]
58 [255]
@@ -60,7 +60,7 b' repo not found error'
60 non-existent absolute path
60 non-existent absolute path
61
61
62 #if no-msys
62 #if no-msys
63 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy//`pwd`/nonexistent local
63 $ hg clone ssh://user@dummy//`pwd`/nonexistent local
64 remote: abort: repository /$TESTTMP/nonexistent not found
64 remote: abort: repository /$TESTTMP/nonexistent not found
65 abort: no suitable response from remote hg
65 abort: no suitable response from remote hg
66 [255]
66 [255]
@@ -70,7 +70,7 b' clone remote via stream'
70
70
71 #if no-reposimplestore
71 #if no-reposimplestore
72
72
73 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --stream ssh://user@dummy/remote local-stream
73 $ hg clone --stream ssh://user@dummy/remote local-stream
74 streaming all changes
74 streaming all changes
75 4 files to transfer, 602 bytes of data (no-zstd !)
75 4 files to transfer, 602 bytes of data (no-zstd !)
76 transferred 602 bytes in * seconds (*) (glob) (no-zstd !)
76 transferred 602 bytes in * seconds (*) (glob) (no-zstd !)
@@ -94,7 +94,7 b' clone remote via stream'
94 clone bookmarks via stream
94 clone bookmarks via stream
95
95
96 $ hg -R local-stream book mybook
96 $ hg -R local-stream book mybook
97 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --stream ssh://user@dummy/local-stream stream2
97 $ hg clone --stream ssh://user@dummy/local-stream stream2
98 streaming all changes
98 streaming all changes
99 4 files to transfer, 602 bytes of data (no-zstd !)
99 4 files to transfer, 602 bytes of data (no-zstd !)
100 transferred 602 bytes in * seconds (*) (glob) (no-zstd !)
100 transferred 602 bytes in * seconds (*) (glob) (no-zstd !)
@@ -114,7 +114,7 b' clone bookmarks via stream'
114
114
115 clone remote via pull
115 clone remote via pull
116
116
117 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote local
117 $ hg clone ssh://user@dummy/remote local
118 requesting all changes
118 requesting all changes
119 adding changesets
119 adding changesets
120 adding manifests
120 adding manifests
@@ -142,14 +142,14 b' empty default pull'
142
142
143 $ hg paths
143 $ hg paths
144 default = ssh://user@dummy/remote
144 default = ssh://user@dummy/remote
145 $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\""
145 $ hg pull
146 pulling from ssh://user@dummy/remote
146 pulling from ssh://user@dummy/remote
147 searching for changes
147 searching for changes
148 no changes found
148 no changes found
149
149
150 pull from wrong ssh URL
150 pull from wrong ssh URL
151
151
152 $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/doesnotexist
152 $ hg pull ssh://user@dummy/doesnotexist
153 pulling from ssh://user@dummy/doesnotexist
153 pulling from ssh://user@dummy/doesnotexist
154 remote: abort: repository doesnotexist not found
154 remote: abort: repository doesnotexist not found
155 abort: no suitable response from remote hg
155 abort: no suitable response from remote hg
@@ -163,8 +163,6 b' local change'
163 updating rc
163 updating rc
164
164
165 $ echo "default-push = ssh://user@dummy/remote" >> .hg/hgrc
165 $ echo "default-push = ssh://user@dummy/remote" >> .hg/hgrc
166 $ echo "[ui]" >> .hg/hgrc
167 $ echo "ssh = \"$PYTHON\" \"$TESTDIR/dummyssh\"" >> .hg/hgrc
168
166
169 find outgoing
167 find outgoing
170
168
@@ -181,7 +179,7 b' find outgoing'
181
179
182 find incoming on the remote side
180 find incoming on the remote side
183
181
184 $ hg incoming -R ../remote -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/local
182 $ hg incoming -R ../remote ssh://user@dummy/local
185 comparing with ssh://user@dummy/local
183 comparing with ssh://user@dummy/local
186 searching for changes
184 searching for changes
187 changeset: 3:a28a9d1a809c
185 changeset: 3:a28a9d1a809c
@@ -194,7 +192,7 b' find incoming on the remote side'
194
192
195 find incoming on the remote side (using absolute path)
193 find incoming on the remote side (using absolute path)
196
194
197 $ hg incoming -R ../remote -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/`pwd`"
195 $ hg incoming -R ../remote "ssh://user@dummy/`pwd`"
198 comparing with ssh://user@dummy/$TESTTMP/local
196 comparing with ssh://user@dummy/$TESTTMP/local
199 searching for changes
197 searching for changes
200 changeset: 3:a28a9d1a809c
198 changeset: 3:a28a9d1a809c
@@ -241,7 +239,7 b' check remote tip'
241 test pushkeys and bookmarks
239 test pushkeys and bookmarks
242
240
243 $ cd $TESTTMP/local
241 $ cd $TESTTMP/local
244 $ hg debugpushkey --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote namespaces
242 $ hg debugpushkey ssh://user@dummy/remote namespaces
245 bookmarks
243 bookmarks
246 namespaces
244 namespaces
247 phases
245 phases
@@ -256,7 +254,7 b' test pushkeys and bookmarks'
256 no changes found
254 no changes found
257 exporting bookmark foo
255 exporting bookmark foo
258 [1]
256 [1]
259 $ hg debugpushkey --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote bookmarks
257 $ hg debugpushkey ssh://user@dummy/remote bookmarks
260 foo 1160648e36cec0054048a7edc4110c6f84fde594
258 foo 1160648e36cec0054048a7edc4110c6f84fde594
261 $ hg book -f foo
259 $ hg book -f foo
262 $ hg push --traceback
260 $ hg push --traceback
@@ -328,7 +326,7 b' clone bookmarks'
328 $ hg -R ../remote bookmark test
326 $ hg -R ../remote bookmark test
329 $ hg -R ../remote bookmarks
327 $ hg -R ../remote bookmarks
330 * test 4:6c0482d977a3
328 * test 4:6c0482d977a3
331 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote local-bookmarks
329 $ hg clone ssh://user@dummy/remote local-bookmarks
332 requesting all changes
330 requesting all changes
333 adding changesets
331 adding changesets
334 adding manifests
332 adding manifests
@@ -356,21 +354,21 b' hide outer repo'
356
354
357 Test remote paths with spaces (issue2983):
355 Test remote paths with spaces (issue2983):
358
356
359 $ hg init --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo"
357 $ hg init "ssh://user@dummy/a repo"
360 $ touch "$TESTTMP/a repo/test"
358 $ touch "$TESTTMP/a repo/test"
361 $ hg -R 'a repo' commit -A -m "test"
359 $ hg -R 'a repo' commit -A -m "test"
362 adding test
360 adding test
363 $ hg -R 'a repo' tag tag
361 $ hg -R 'a repo' tag tag
364 $ hg id --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo"
362 $ hg id "ssh://user@dummy/a repo"
365 73649e48688a
363 73649e48688a
366
364
367 $ hg id --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo#noNoNO"
365 $ hg id "ssh://user@dummy/a repo#noNoNO"
368 abort: unknown revision 'noNoNO'
366 abort: unknown revision 'noNoNO'
369 [255]
367 [255]
370
368
371 Test (non-)escaping of remote paths with spaces when cloning (issue3145):
369 Test (non-)escaping of remote paths with spaces when cloning (issue3145):
372
370
373 $ hg clone --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo"
371 $ hg clone "ssh://user@dummy/a repo"
374 destination directory: a repo
372 destination directory: a repo
375 abort: destination 'a repo' is not empty
373 abort: destination 'a repo' is not empty
376 [10]
374 [10]
@@ -462,8 +460,6 b' stderr from remote commands should be pr'
462 $ cat >> .hg/hgrc << EOF
460 $ cat >> .hg/hgrc << EOF
463 > [paths]
461 > [paths]
464 > default-push = ssh://user@dummy/remote
462 > default-push = ssh://user@dummy/remote
465 > [ui]
466 > ssh = "$PYTHON" "$TESTDIR/dummyssh"
467 > [extensions]
463 > [extensions]
468 > localwrite = localwrite.py
464 > localwrite = localwrite.py
469 > EOF
465 > EOF
@@ -486,7 +482,7 b' debug output'
486
482
487 $ hg pull --debug ssh://user@dummy/remote
483 $ hg pull --debug ssh://user@dummy/remote
488 pulling from ssh://user@dummy/remote
484 pulling from ssh://user@dummy/remote
489 running .* ".*/dummyssh" ['"]user@dummy['"] ('|")hg -R remote serve --stdio('|") (re)
485 running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R remote serve --stdio['"] (re)
490 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
486 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
491 sending hello command
487 sending hello command
492 sending between command
488 sending between command
@@ -583,11 +579,11 b' remote hook failure is attributed to rem'
583
579
584 $ echo "pretxnchangegroup.fail = python:$TESTTMP/failhook:hook" >> remote/.hg/hgrc
580 $ echo "pretxnchangegroup.fail = python:$TESTTMP/failhook:hook" >> remote/.hg/hgrc
585
581
586 $ hg -q --config ui.ssh="\"$PYTHON\" $TESTDIR/dummyssh" clone ssh://user@dummy/remote hookout
582 $ hg -q clone ssh://user@dummy/remote hookout
587 $ cd hookout
583 $ cd hookout
588 $ touch hookfailure
584 $ touch hookfailure
589 $ hg -q commit -A -m 'remote hook failure'
585 $ hg -q commit -A -m 'remote hook failure'
590 $ hg --config ui.ssh="\"$PYTHON\" $TESTDIR/dummyssh" push
586 $ hg push
591 pushing to ssh://user@dummy/remote
587 pushing to ssh://user@dummy/remote
592 searching for changes
588 searching for changes
593 remote: adding changesets
589 remote: adding changesets
@@ -607,7 +603,7 b' abort during pull is properly reported a'
607 > [extensions]
603 > [extensions]
608 > crash = ${TESTDIR}/crashgetbundler.py
604 > crash = ${TESTDIR}/crashgetbundler.py
609 > EOF
605 > EOF
610 $ hg --config ui.ssh="\"$PYTHON\" $TESTDIR/dummyssh" pull
606 $ hg pull
611 pulling from ssh://user@dummy/remote
607 pulling from ssh://user@dummy/remote
612 searching for changes
608 searching for changes
613 adding changesets
609 adding changesets
@@ -28,7 +28,7 b" creating 'remote' repo"
28 clone remote via stream
28 clone remote via stream
29
29
30 $ for i in 0 1 2 3 4 5 6 7 8; do
30 $ for i in 0 1 2 3 4 5 6 7 8; do
31 > hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --stream -r "$i" ssh://user@dummy/remote test-"$i"
31 > hg clone --stream -r "$i" ssh://user@dummy/remote test-"$i"
32 > if cd test-"$i"; then
32 > if cd test-"$i"; then
33 > hg verify
33 > hg verify
34 > cd ..
34 > cd ..
@@ -160,7 +160,7 b' clone remote via stream'
160 checked 9 changesets with 7 changes to 4 files
160 checked 9 changesets with 7 changes to 4 files
161 $ cd ..
161 $ cd ..
162 $ cd test-1
162 $ cd test-1
163 $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" -r 4 ssh://user@dummy/remote
163 $ hg pull -r 4 ssh://user@dummy/remote
164 pulling from ssh://user@dummy/remote
164 pulling from ssh://user@dummy/remote
165 searching for changes
165 searching for changes
166 adding changesets
166 adding changesets
@@ -175,7 +175,7 b' clone remote via stream'
175 crosschecking files in changesets and manifests
175 crosschecking files in changesets and manifests
176 checking files
176 checking files
177 checked 3 changesets with 2 changes to 1 files
177 checked 3 changesets with 2 changes to 1 files
178 $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote
178 $ hg pull ssh://user@dummy/remote
179 pulling from ssh://user@dummy/remote
179 pulling from ssh://user@dummy/remote
180 searching for changes
180 searching for changes
181 adding changesets
181 adding changesets
@@ -186,7 +186,7 b' clone remote via stream'
186 (run 'hg update' to get a working copy)
186 (run 'hg update' to get a working copy)
187 $ cd ..
187 $ cd ..
188 $ cd test-2
188 $ cd test-2
189 $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" -r 5 ssh://user@dummy/remote
189 $ hg pull -r 5 ssh://user@dummy/remote
190 pulling from ssh://user@dummy/remote
190 pulling from ssh://user@dummy/remote
191 searching for changes
191 searching for changes
192 adding changesets
192 adding changesets
@@ -201,7 +201,7 b' clone remote via stream'
201 crosschecking files in changesets and manifests
201 crosschecking files in changesets and manifests
202 checking files
202 checking files
203 checked 5 changesets with 3 changes to 1 files
203 checked 5 changesets with 3 changes to 1 files
204 $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote
204 $ hg pull ssh://user@dummy/remote
205 pulling from ssh://user@dummy/remote
205 pulling from ssh://user@dummy/remote
206 searching for changes
206 searching for changes
207 adding changesets
207 adding changesets
@@ -28,8 +28,6 b' protocols with inline conditional output'
28 > }
28 > }
29
29
30 $ cat >> $HGRCPATH << EOF
30 $ cat >> $HGRCPATH << EOF
31 > [ui]
32 > ssh = "$PYTHON" "$TESTDIR/dummyssh"
33 > [devel]
31 > [devel]
34 > debug.peer-request = true
32 > debug.peer-request = true
35 > [extensions]
33 > [extensions]
@@ -65,8 +63,7 b' Test a normal behaving server, for sanit'
65 $ cd ..
63 $ cd ..
66
64
67 $ hg --debug debugpeer ssh://user@dummy/server
65 $ hg --debug debugpeer ssh://user@dummy/server
68 running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
66 running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re)
69 running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
70 devel-peer-request: hello+between
67 devel-peer-request: hello+between
71 devel-peer-request: pairs: 81 bytes
68 devel-peer-request: pairs: 81 bytes
72 sending hello command
69 sending hello command
@@ -178,8 +175,7 b' SSH banner is not printed by default, ig'
178 --debug will print the banner
175 --debug will print the banner
179
176
180 $ SSHSERVERMODE=banner hg --debug debugpeer ssh://user@dummy/server
177 $ SSHSERVERMODE=banner hg --debug debugpeer ssh://user@dummy/server
181 running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
178 running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re)
182 running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
183 devel-peer-request: hello+between
179 devel-peer-request: hello+between
184 devel-peer-request: pairs: 81 bytes
180 devel-peer-request: pairs: 81 bytes
185 sending hello command
181 sending hello command
@@ -269,8 +265,7 b' The client should refuse, as we dropped '
269 servers.
265 servers.
270
266
271 $ SSHSERVERMODE=no-hello hg --debug debugpeer ssh://user@dummy/server
267 $ SSHSERVERMODE=no-hello hg --debug debugpeer ssh://user@dummy/server
272 running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
268 running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re)
273 running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
274 devel-peer-request: hello+between
269 devel-peer-request: hello+between
275 devel-peer-request: pairs: 81 bytes
270 devel-peer-request: pairs: 81 bytes
276 sending hello command
271 sending hello command
@@ -315,8 +310,7 b' Sending an unknown command to the server'
315 o> 1\n
310 o> 1\n
316
311
317 $ hg --config sshpeer.mode=extra-handshake-commands --config sshpeer.handshake-mode=pre-no-args --debug debugpeer ssh://user@dummy/server
312 $ hg --config sshpeer.mode=extra-handshake-commands --config sshpeer.handshake-mode=pre-no-args --debug debugpeer ssh://user@dummy/server
318 running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
313 running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re)
319 running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
320 sending no-args command
314 sending no-args command
321 devel-peer-request: hello+between
315 devel-peer-request: hello+between
322 devel-peer-request: pairs: 81 bytes
316 devel-peer-request: pairs: 81 bytes
@@ -385,8 +379,7 b' Send multiple unknown commands before he'
385 o> \n
379 o> \n
386
380
387 $ hg --config sshpeer.mode=extra-handshake-commands --config sshpeer.handshake-mode=pre-multiple-no-args --debug debugpeer ssh://user@dummy/server
381 $ hg --config sshpeer.mode=extra-handshake-commands --config sshpeer.handshake-mode=pre-multiple-no-args --debug debugpeer ssh://user@dummy/server
388 running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
382 running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re)
389 running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
390 sending unknown1 command
383 sending unknown1 command
391 sending unknown2 command
384 sending unknown2 command
392 sending unknown3 command
385 sending unknown3 command
@@ -961,8 +954,7 b' Send an upgrade request to a server that'
961 $ cd ..
954 $ cd ..
962
955
963 $ hg --config experimental.sshpeer.advertise-v2=true --debug debugpeer ssh://user@dummy/server
956 $ hg --config experimental.sshpeer.advertise-v2=true --debug debugpeer ssh://user@dummy/server
964 running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
957 running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re)
965 running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
966 sending upgrade request: * proto=exp-ssh-v2-0003 (glob)
958 sending upgrade request: * proto=exp-ssh-v2-0003 (glob)
967 devel-peer-request: hello+between
959 devel-peer-request: hello+between
968 devel-peer-request: pairs: 81 bytes
960 devel-peer-request: pairs: 81 bytes
@@ -1019,8 +1011,7 b' Send an upgrade request to a server that'
1019 $ cd ..
1011 $ cd ..
1020
1012
1021 $ hg --config experimental.sshpeer.advertise-v2=true --debug debugpeer ssh://user@dummy/server
1013 $ hg --config experimental.sshpeer.advertise-v2=true --debug debugpeer ssh://user@dummy/server
1022 running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
1014 running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re)
1023 running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
1024 sending upgrade request: * proto=exp-ssh-v2-0003 (glob)
1015 sending upgrade request: * proto=exp-ssh-v2-0003 (glob)
1025 devel-peer-request: hello+between
1016 devel-peer-request: hello+between
1026 devel-peer-request: pairs: 81 bytes
1017 devel-peer-request: pairs: 81 bytes
@@ -1038,8 +1029,7 b' Send an upgrade request to a server that'
1038 Verify the peer has capabilities
1029 Verify the peer has capabilities
1039
1030
1040 $ hg --config experimental.sshpeer.advertise-v2=true --debug debugcapabilities ssh://user@dummy/server
1031 $ hg --config experimental.sshpeer.advertise-v2=true --debug debugcapabilities ssh://user@dummy/server
1041 running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
1032 running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re)
1042 running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
1043 sending upgrade request: * proto=exp-ssh-v2-0003 (glob)
1033 sending upgrade request: * proto=exp-ssh-v2-0003 (glob)
1044 devel-peer-request: hello+between
1034 devel-peer-request: hello+between
1045 devel-peer-request: pairs: 81 bytes
1035 devel-peer-request: pairs: 81 bytes
@@ -4,13 +4,6 b' XXX-RHG this test hangs if `hg` is reall'
4 `alias hg=rhg` by run-tests.py. With such alias removed, this test is revealed
4 `alias hg=rhg` by run-tests.py. With such alias removed, this test is revealed
5 buggy. This need to be resolved sooner than later.
5 buggy. This need to be resolved sooner than later.
6
6
7 initial setup
8
9 $ cat << EOF >> $HGRCPATH
10 > [ui]
11 > ssh="$PYTHON" "$TESTDIR/dummyssh"
12 > EOF
13
14 repository itself is non-readable
7 repository itself is non-readable
15 ---------------------------------
8 ---------------------------------
16
9
@@ -42,18 +42,18 b' configure for serving'
42
42
43 repo not found error
43 repo not found error
44
44
45 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/nonexistent local
45 $ hg clone ssh://user@dummy/nonexistent local
46 remote: abort: repository nonexistent not found
46 remote: abort: repository nonexistent not found
47 abort: no suitable response from remote hg
47 abort: no suitable response from remote hg
48 [255]
48 [255]
49 $ hg clone -q -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/nonexistent local
49 $ hg clone -q ssh://user@dummy/nonexistent local
50 remote: abort: repository nonexistent not found
50 remote: abort: repository nonexistent not found
51 abort: no suitable response from remote hg
51 abort: no suitable response from remote hg
52 [255]
52 [255]
53
53
54 non-existent absolute path
54 non-existent absolute path
55
55
56 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/`pwd`/nonexistent local
56 $ hg clone ssh://user@dummy/`pwd`/nonexistent local
57 remote: abort: repository $TESTTMP/nonexistent not found
57 remote: abort: repository $TESTTMP/nonexistent not found
58 abort: no suitable response from remote hg
58 abort: no suitable response from remote hg
59 [255]
59 [255]
@@ -62,7 +62,7 b' clone remote via stream'
62
62
63 #if no-reposimplestore
63 #if no-reposimplestore
64
64
65 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --stream ssh://user@dummy/remote local-stream
65 $ hg clone --stream ssh://user@dummy/remote local-stream
66 streaming all changes
66 streaming all changes
67 8 files to transfer, 827 bytes of data (no-zstd !)
67 8 files to transfer, 827 bytes of data (no-zstd !)
68 transferred 827 bytes in * seconds (*) (glob) (no-zstd !)
68 transferred 827 bytes in * seconds (*) (glob) (no-zstd !)
@@ -84,7 +84,7 b' clone remote via stream'
84 clone bookmarks via stream
84 clone bookmarks via stream
85
85
86 $ hg -R local-stream book mybook
86 $ hg -R local-stream book mybook
87 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --stream ssh://user@dummy/local-stream stream2
87 $ hg clone --stream ssh://user@dummy/local-stream stream2
88 streaming all changes
88 streaming all changes
89 15 files to transfer, * of data (glob)
89 15 files to transfer, * of data (glob)
90 transferred * in * seconds (*) (glob)
90 transferred * in * seconds (*) (glob)
@@ -100,7 +100,7 b' clone bookmarks via stream'
100
100
101 clone remote via pull
101 clone remote via pull
102
102
103 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote local
103 $ hg clone ssh://user@dummy/remote local
104 requesting all changes
104 requesting all changes
105 adding changesets
105 adding changesets
106 adding manifests
106 adding manifests
@@ -128,14 +128,14 b' empty default pull'
128
128
129 $ hg paths
129 $ hg paths
130 default = ssh://user@dummy/remote
130 default = ssh://user@dummy/remote
131 $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\""
131 $ hg pull
132 pulling from ssh://user@dummy/remote
132 pulling from ssh://user@dummy/remote
133 searching for changes
133 searching for changes
134 no changes found
134 no changes found
135
135
136 pull from wrong ssh URL
136 pull from wrong ssh URL
137
137
138 $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/doesnotexist
138 $ hg pull ssh://user@dummy/doesnotexist
139 pulling from ssh://user@dummy/doesnotexist
139 pulling from ssh://user@dummy/doesnotexist
140 remote: abort: repository doesnotexist not found
140 remote: abort: repository doesnotexist not found
141 abort: no suitable response from remote hg
141 abort: no suitable response from remote hg
@@ -149,8 +149,6 b' local change'
149 updating rc
149 updating rc
150
150
151 $ echo "default-push = ssh://user@dummy/remote" >> .hg/hgrc
151 $ echo "default-push = ssh://user@dummy/remote" >> .hg/hgrc
152 $ echo "[ui]" >> .hg/hgrc
153 $ echo "ssh = \"$PYTHON\" \"$TESTDIR/dummyssh\"" >> .hg/hgrc
154
152
155 find outgoing
153 find outgoing
156
154
@@ -167,7 +165,7 b' find outgoing'
167
165
168 find incoming on the remote side
166 find incoming on the remote side
169
167
170 $ hg incoming -R ../remote -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/local
168 $ hg incoming -R ../remote ssh://user@dummy/local
171 comparing with ssh://user@dummy/local
169 comparing with ssh://user@dummy/local
172 searching for changes
170 searching for changes
173 changeset: 3:a28a9d1a809c
171 changeset: 3:a28a9d1a809c
@@ -180,7 +178,7 b' find incoming on the remote side'
180
178
181 find incoming on the remote side (using absolute path)
179 find incoming on the remote side (using absolute path)
182
180
183 $ hg incoming -R ../remote -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/`pwd`"
181 $ hg incoming -R ../remote "ssh://user@dummy/`pwd`"
184 comparing with ssh://user@dummy/$TESTTMP/local
182 comparing with ssh://user@dummy/$TESTTMP/local
185 searching for changes
183 searching for changes
186 changeset: 3:a28a9d1a809c
184 changeset: 3:a28a9d1a809c
@@ -227,7 +225,7 b' check remote tip'
227 test pushkeys and bookmarks
225 test pushkeys and bookmarks
228
226
229 $ cd $TESTTMP/local
227 $ cd $TESTTMP/local
230 $ hg debugpushkey --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote namespaces
228 $ hg debugpushkey ssh://user@dummy/remote namespaces
231 bookmarks
229 bookmarks
232 namespaces
230 namespaces
233 phases
231 phases
@@ -242,7 +240,7 b' test pushkeys and bookmarks'
242 no changes found
240 no changes found
243 exporting bookmark foo
241 exporting bookmark foo
244 [1]
242 [1]
245 $ hg debugpushkey --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote bookmarks
243 $ hg debugpushkey ssh://user@dummy/remote bookmarks
246 foo 1160648e36cec0054048a7edc4110c6f84fde594
244 foo 1160648e36cec0054048a7edc4110c6f84fde594
247 $ hg book -f foo
245 $ hg book -f foo
248 $ hg push --traceback
246 $ hg push --traceback
@@ -347,7 +345,7 b' clone bookmarks'
347 $ hg -R ../remote bookmark test
345 $ hg -R ../remote bookmark test
348 $ hg -R ../remote bookmarks
346 $ hg -R ../remote bookmarks
349 * test 4:6c0482d977a3
347 * test 4:6c0482d977a3
350 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote local-bookmarks
348 $ hg clone ssh://user@dummy/remote local-bookmarks
351 requesting all changes
349 requesting all changes
352 adding changesets
350 adding changesets
353 adding manifests
351 adding manifests
@@ -375,21 +373,21 b' hide outer repo'
375
373
376 Test remote paths with spaces (issue2983):
374 Test remote paths with spaces (issue2983):
377
375
378 $ hg init --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo"
376 $ hg init "ssh://user@dummy/a repo"
379 $ touch "$TESTTMP/a repo/test"
377 $ touch "$TESTTMP/a repo/test"
380 $ hg -R 'a repo' commit -A -m "test"
378 $ hg -R 'a repo' commit -A -m "test"
381 adding test
379 adding test
382 $ hg -R 'a repo' tag tag
380 $ hg -R 'a repo' tag tag
383 $ hg id --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo"
381 $ hg id "ssh://user@dummy/a repo"
384 73649e48688a
382 73649e48688a
385
383
386 $ hg id --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo#noNoNO"
384 $ hg id "ssh://user@dummy/a repo#noNoNO"
387 abort: unknown revision 'noNoNO'
385 abort: unknown revision 'noNoNO'
388 [255]
386 [255]
389
387
390 Test (non-)escaping of remote paths with spaces when cloning (issue3145):
388 Test (non-)escaping of remote paths with spaces when cloning (issue3145):
391
389
392 $ hg clone --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo"
390 $ hg clone "ssh://user@dummy/a repo"
393 destination directory: a repo
391 destination directory: a repo
394 abort: destination 'a repo' is not empty
392 abort: destination 'a repo' is not empty
395 [10]
393 [10]
@@ -515,8 +513,6 b' stderr from remote commands should be pr'
515 $ cat >> .hg/hgrc << EOF
513 $ cat >> .hg/hgrc << EOF
516 > [paths]
514 > [paths]
517 > default-push = ssh://user@dummy/remote
515 > default-push = ssh://user@dummy/remote
518 > [ui]
519 > ssh = "$PYTHON" "$TESTDIR/dummyssh"
520 > [extensions]
516 > [extensions]
521 > localwrite = localwrite.py
517 > localwrite = localwrite.py
522 > EOF
518 > EOF
@@ -540,7 +536,7 b' debug output'
540
536
541 $ hg pull --debug ssh://user@dummy/remote --config devel.debug.peer-request=yes
537 $ hg pull --debug ssh://user@dummy/remote --config devel.debug.peer-request=yes
542 pulling from ssh://user@dummy/remote
538 pulling from ssh://user@dummy/remote
543 running .* ".*/dummyssh" ['"]user@dummy['"] ('|")hg -R remote serve --stdio('|") (re)
539 running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R remote serve --stdio['"] (re)
544 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
540 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
545 devel-peer-request: hello+between
541 devel-peer-request: hello+between
546 devel-peer-request: pairs: 81 bytes
542 devel-peer-request: pairs: 81 bytes
@@ -670,11 +666,11 b' remote hook failure is attributed to rem'
670
666
671 $ echo "pretxnchangegroup.fail = python:$TESTTMP/failhook:hook" >> remote/.hg/hgrc
667 $ echo "pretxnchangegroup.fail = python:$TESTTMP/failhook:hook" >> remote/.hg/hgrc
672
668
673 $ hg -q --config ui.ssh="\"$PYTHON\" $TESTDIR/dummyssh" clone ssh://user@dummy/remote hookout
669 $ hg -q clone ssh://user@dummy/remote hookout
674 $ cd hookout
670 $ cd hookout
675 $ touch hookfailure
671 $ touch hookfailure
676 $ hg -q commit -A -m 'remote hook failure'
672 $ hg -q commit -A -m 'remote hook failure'
677 $ hg --config ui.ssh="\"$PYTHON\" $TESTDIR/dummyssh" push
673 $ hg push
678 pushing to ssh://user@dummy/remote
674 pushing to ssh://user@dummy/remote
679 searching for changes
675 searching for changes
680 remote: adding changesets
676 remote: adding changesets
@@ -695,7 +691,7 b' abort during pull is properly reported a'
695 > [extensions]
691 > [extensions]
696 > crash = ${TESTDIR}/crashgetbundler.py
692 > crash = ${TESTDIR}/crashgetbundler.py
697 > EOF
693 > EOF
698 $ hg --config ui.ssh="\"$PYTHON\" $TESTDIR/dummyssh" pull
694 $ hg pull
699 pulling from ssh://user@dummy/remote
695 pulling from ssh://user@dummy/remote
700 searching for changes
696 searching for changes
701 remote: abort: this is an exercise
697 remote: abort: this is an exercise
@@ -704,14 +700,14 b' abort during pull is properly reported a'
704
700
705 abort with no error hint when there is a ssh problem when pulling
701 abort with no error hint when there is a ssh problem when pulling
706
702
707 $ hg pull ssh://brokenrepository -e "\"$PYTHON\" \"$TESTDIR/dummyssh\""
703 $ hg pull ssh://brokenrepository
708 pulling from ssh://brokenrepository/
704 pulling from ssh://brokenrepository/
709 abort: no suitable response from remote hg
705 abort: no suitable response from remote hg
710 [255]
706 [255]
711
707
712 abort with configured error hint when there is a ssh problem when pulling
708 abort with configured error hint when there is a ssh problem when pulling
713
709
714 $ hg pull ssh://brokenrepository -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" \
710 $ hg pull ssh://brokenrepository \
715 > --config ui.ssherrorhint="Please see http://company/internalwiki/ssh.html"
711 > --config ui.ssherrorhint="Please see http://company/internalwiki/ssh.html"
716 pulling from ssh://brokenrepository/
712 pulling from ssh://brokenrepository/
717 abort: no suitable response from remote hg
713 abort: no suitable response from remote hg
@@ -14,7 +14,6 b' Test creating a consuming stream bundle '
14 > evolution.exchange=True
14 > evolution.exchange=True
15 > bundle2-output-capture=True
15 > bundle2-output-capture=True
16 > [ui]
16 > [ui]
17 > ssh="$PYTHON" "$TESTDIR/dummyssh"
18 > logtemplate={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
17 > logtemplate={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
19 > [web]
18 > [web]
20 > push_ssl = false
19 > push_ssl = false
@@ -186,7 +186,7 b' subrepo is referenced by absolute path.'
186
186
187 subrepo paths with ssh urls
187 subrepo paths with ssh urls
188
188
189 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/cloned sshclone
189 $ hg clone ssh://user@dummy/cloned sshclone
190 requesting all changes
190 requesting all changes
191 adding changesets
191 adding changesets
192 adding manifests
192 adding manifests
@@ -203,7 +203,7 b' subrepo paths with ssh urls'
203 new changesets 863c1745b441
203 new changesets 863c1745b441
204 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
204 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
205
205
206 $ hg -R sshclone push -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/`pwd`/cloned
206 $ hg -R sshclone push ssh://user@dummy/`pwd`/cloned
207 pushing to ssh://user@dummy/$TESTTMP/cloned
207 pushing to ssh://user@dummy/$TESTTMP/cloned
208 pushing subrepo sub to ssh://user@dummy/$TESTTMP/sub
208 pushing subrepo sub to ssh://user@dummy/$TESTTMP/sub
209 searching for changes
209 searching for changes
@@ -82,15 +82,14 b' and the second file.i entry should match'
82 date: Thu Jan 01 00:00:00 1970 +0000
82 date: Thu Jan 01 00:00:00 1970 +0000
83 summary: _
83 summary: _
84
84
85 $ hg verify
85 $ hg verify -q
86 checking changesets
87 checking manifests
88 crosschecking files in changesets and manifests
89 checking files
90 warning: revlog 'data/file.d' not in fncache!
86 warning: revlog 'data/file.d' not in fncache!
91 checked 2 changesets with 2 changes to 1 files
92 1 warnings encountered!
87 1 warnings encountered!
93 hint: run "hg debugrebuildfncache" to recover from corrupt fncache
88 hint: run "hg debugrebuildfncache" to recover from corrupt fncache
89 $ hg debugrebuildfncache --only-data
90 adding data/file.d
91 1 items added, 0 removed from fncache
92 $ hg verify -q
94 $ cd ..
93 $ cd ..
95
94
96
95
@@ -133,12 +132,7 b' where the data file is left as garbage.'
133 date: Thu Jan 01 00:00:00 1970 +0000
132 date: Thu Jan 01 00:00:00 1970 +0000
134 summary: _
133 summary: _
135
134
136 $ hg verify
135 $ hg verify -q
137 checking changesets
138 checking manifests
139 crosschecking files in changesets and manifests
140 checking files
141 checked 2 changesets with 2 changes to 1 files
142 $ cd ..
136 $ cd ..
143
137
144
138
@@ -170,13 +164,8 b' Repeat the original test but let hg roll'
170 date: Thu Jan 01 00:00:00 1970 +0000
164 date: Thu Jan 01 00:00:00 1970 +0000
171 summary: _
165 summary: _
172
166
173 $ hg verify
167 $ hg verify -q
174 checking changesets
175 checking manifests
176 crosschecking files in changesets and manifests
177 checking files
178 warning: revlog 'data/file.d' not in fncache!
168 warning: revlog 'data/file.d' not in fncache!
179 checked 2 changesets with 2 changes to 1 files
180 1 warnings encountered!
169 1 warnings encountered!
181 hint: run "hg debugrebuildfncache" to recover from corrupt fncache
170 hint: run "hg debugrebuildfncache" to recover from corrupt fncache
182 $ cd ..
171 $ cd ..
@@ -2,7 +2,7 b' Test that, when an hg push is interrupte'
2 the remote hg is able to successfully roll back the transaction.
2 the remote hg is able to successfully roll back the transaction.
3
3
4 $ hg init -q remote
4 $ hg init -q remote
5 $ hg clone -e "\"$PYTHON\" \"$RUNTESTDIR/dummyssh\"" -q ssh://user@dummy/`pwd`/remote local
5 $ hg clone -q ssh://user@dummy/`pwd`/remote local
6 $ SIGPIPE_REMOTE_DEBUG_FILE="$TESTTMP/DEBUGFILE"
6 $ SIGPIPE_REMOTE_DEBUG_FILE="$TESTTMP/DEBUGFILE"
7 $ SYNCFILE1="$TESTTMP/SYNCFILE1"
7 $ SYNCFILE1="$TESTTMP/SYNCFILE1"
8 $ SYNCFILE2="$TESTTMP/SYNCFILE2"
8 $ SYNCFILE2="$TESTTMP/SYNCFILE2"
@@ -36,7 +36,7 b' disconnecting. Then exit nonzero, to for'
36
36
37 (use quiet to avoid flacky output from the server)
37 (use quiet to avoid flacky output from the server)
38
38
39 $ hg push --quiet -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --remotecmd "$remotecmd"
39 $ hg push --quiet --remotecmd "$remotecmd"
40 abort: stream ended unexpectedly (got 0 bytes, expected 4)
40 abort: stream ended unexpectedly (got 0 bytes, expected 4)
41 [255]
41 [255]
42 $ cat $SIGPIPE_REMOTE_DEBUG_FILE
42 $ cat $SIGPIPE_REMOTE_DEBUG_FILE
@@ -1,8 +1,3 b''
1 $ cat << EOF >> $HGRCPATH
2 > [ui]
3 > ssh="$PYTHON" "$TESTDIR/dummyssh"
4 > EOF
5
6 Set up repo
1 Set up repo
7
2
8 $ hg --config experimental.treemanifest=True init repo
3 $ hg --config experimental.treemanifest=True init repo
@@ -75,9 +75,7 b' class clientpeer(wireprotov1peer.wirepee'
75
75
76 @wireprotov1peer.batchable
76 @wireprotov1peer.batchable
77 def greet(self, name):
77 def greet(self, name):
78 f = wireprotov1peer.future()
78 return {b'name': mangle(name)}, unmangle
79 yield {b'name': mangle(name)}, f
80 yield unmangle(f.value)
81
79
82
80
83 class serverrepo(object):
81 class serverrepo(object):
@@ -142,13 +142,13 b' HTTP without the httpheader capability:'
142
142
143 SSH (try to exercise the ssh functionality with a dummy script):
143 SSH (try to exercise the ssh functionality with a dummy script):
144
144
145 $ hg debugwireargs --ssh "\"$PYTHON\" $TESTDIR/dummyssh" ssh://user@dummy/repo uno due tre quattro
145 $ hg debugwireargs ssh://user@dummy/repo uno due tre quattro
146 uno due tre quattro None
146 uno due tre quattro None
147 $ hg debugwireargs --ssh "\"$PYTHON\" $TESTDIR/dummyssh" ssh://user@dummy/repo eins zwei --four vier
147 $ hg debugwireargs ssh://user@dummy/repo eins zwei --four vier
148 eins zwei None vier None
148 eins zwei None vier None
149 $ hg debugwireargs --ssh "\"$PYTHON\" $TESTDIR/dummyssh" ssh://user@dummy/repo eins zwei
149 $ hg debugwireargs ssh://user@dummy/repo eins zwei
150 eins zwei None None None
150 eins zwei None None None
151 $ hg debugwireargs --ssh "\"$PYTHON\" $TESTDIR/dummyssh" ssh://user@dummy/repo eins zwei --five fuenf
151 $ hg debugwireargs ssh://user@dummy/repo eins zwei --five fuenf
152 eins zwei None None None
152 eins zwei None None None
153
153
154 Explicitly kill daemons to let the test exit on Windows
154 Explicitly kill daemons to let the test exit on Windows
1 NO CONTENT: file was removed
NO CONTENT: file was removed
General Comments 0
You need to be logged in to leave comments. Login now