##// END OF EJS Templates
branching: merge stable into default
marmoute -
r51886:12c308c5 merge default
parent child Browse files
Show More
@@ -882,23 +882,143 b' def perfheads(ui, repo, **opts):'
882 882 fm.end()
883 883
884 884
885 def _default_clear_on_disk_tags_cache(repo):
886 from mercurial import tags
887
888 repo.cachevfs.tryunlink(tags._filename(repo))
889
890
891 def _default_clear_on_disk_tags_fnodes_cache(repo):
892 from mercurial import tags
893
894 repo.cachevfs.tryunlink(tags._fnodescachefile)
895
896
897 def _default_forget_fnodes(repo, revs):
898 """function used by the perf extension to prune some entries from the
899 fnodes cache"""
900 from mercurial import tags
901
902 missing_1 = b'\xff' * 4
903 missing_2 = b'\xff' * 20
904 cache = tags.hgtagsfnodescache(repo.unfiltered())
905 for r in revs:
906 cache._writeentry(r * tags._fnodesrecsize, missing_1, missing_2)
907 cache.write()
908
909
885 910 @command(
886 911 b'perf::tags|perftags',
887 912 formatteropts
888 913 + [
889 914 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
915 (
916 b'',
917 b'clear-on-disk-cache',
918 False,
919 b'clear on disk tags cache (DESTRUCTIVE)',
920 ),
921 (
922 b'',
923 b'clear-fnode-cache-all',
924 False,
925 b'clear on disk file node cache (DESTRUCTIVE),',
926 ),
927 (
928 b'',
929 b'clear-fnode-cache-rev',
930 [],
931 b'clear on disk file node cache (DESTRUCTIVE),',
932 b'REVS',
933 ),
934 (
935 b'',
936 b'update-last',
937 b'',
938 b'simulate an update over the last N revisions (DESTRUCTIVE),',
939 b'N',
940 ),
890 941 ],
891 942 )
892 943 def perftags(ui, repo, **opts):
944 """Benchmark tags retrieval in various situation
945
946 The option marked as (DESTRUCTIVE) will alter the on-disk cache, possibly
947 altering performance after the command was run. However, it does not
948 destroy any stored data.
949 """
950 from mercurial import tags
951
893 952 opts = _byteskwargs(opts)
894 953 timer, fm = gettimer(ui, opts)
895 954 repocleartagscache = repocleartagscachefunc(repo)
896 955 clearrevlogs = opts[b'clear_revlogs']
956 clear_disk = opts[b'clear_on_disk_cache']
957 clear_fnode = opts[b'clear_fnode_cache_all']
958
959 clear_fnode_revs = opts[b'clear_fnode_cache_rev']
960 update_last_str = opts[b'update_last']
961 update_last = None
962 if update_last_str:
963 try:
964 update_last = int(update_last_str)
965 except ValueError:
966 msg = b'could not parse value for update-last: "%s"'
967 msg %= update_last_str
968 hint = b'value should be an integer'
969 raise error.Abort(msg, hint=hint)
970
971 clear_disk_fn = getattr(
972 tags,
973 "clear_cache_on_disk",
974 _default_clear_on_disk_tags_cache,
975 )
976 clear_fnodes_fn = getattr(
977 tags,
978 "clear_cache_fnodes",
979 _default_clear_on_disk_tags_fnodes_cache,
980 )
981 clear_fnodes_rev_fn = getattr(
982 tags,
983 "forget_fnodes",
984 _default_forget_fnodes,
985 )
986
987 clear_revs = []
988 if clear_fnode_revs:
989 clear_revs.extends(scmutil.revrange(repo, clear_fnode_revs))
990
991 if update_last:
992 revset = b'last(all(), %d)' % update_last
993 last_revs = repo.unfiltered().revs(revset)
994 clear_revs.extend(last_revs)
995
996 from mercurial import repoview
997
998 rev_filter = {(b'experimental', b'extra-filter-revs'): revset}
999 with repo.ui.configoverride(rev_filter, source=b"perf"):
1000 filter_id = repoview.extrafilter(repo.ui)
1001
1002 filter_name = b'%s%%%s' % (repo.filtername, filter_id)
1003 pre_repo = repo.filtered(filter_name)
1004 pre_repo.tags() # warm the cache
1005 old_tags_path = repo.cachevfs.join(tags._filename(pre_repo))
1006 new_tags_path = repo.cachevfs.join(tags._filename(repo))
1007
1008 clear_revs = sorted(set(clear_revs))
897 1009
898 1010 def s():
1011 if update_last:
1012 util.copyfile(old_tags_path, new_tags_path)
899 1013 if clearrevlogs:
900 1014 clearchangelog(repo)
901 1015 clearfilecache(repo.unfiltered(), 'manifest')
1016 if clear_disk:
1017 clear_disk_fn(repo)
1018 if clear_fnode:
1019 clear_fnodes_fn(repo)
1020 elif clear_revs:
1021 clear_fnodes_rev_fn(repo, clear_revs)
902 1022 repocleartagscache()
903 1023
904 1024 def t():
@@ -99,6 +99,7 b' class blackboxlogger:'
99 99 def _log(self, ui, event, msg, opts):
100 100 default = ui.configdate(b'devel', b'default-date')
101 101 dateformat = ui.config(b'blackbox', b'date-format')
102 debug_to_stderr = ui.configbool(b'blackbox', b'debug.to-stderr')
102 103 if dateformat:
103 104 date = dateutil.datestr(default, dateformat)
104 105 else:
@@ -130,7 +131,10 b' class blackboxlogger:'
130 131 maxfiles=self._maxfiles,
131 132 maxsize=self._maxsize,
132 133 ) as fp:
133 fp.write(fmt % args)
134 msg = fmt % args
135 fp.write(msg)
136 if debug_to_stderr:
137 ui.write_err(msg)
134 138 except (IOError, OSError) as err:
135 139 # deactivate this to avoid failed logging again
136 140 self._trackedevents.clear()
@@ -896,7 +896,7 b' class unbundle20(unpackermixin):'
896 896 """utility to transfer a bundle2 as binary
897 897
898 898 This is made necessary by the fact the 'getbundle' command over 'ssh'
899 have no way to know then the reply end, relying on the bundle to be
899 have no way to know when the reply ends, relying on the bundle to be
900 900 interpreted to know its end. This is terrible and we are sorry, but we
901 901 needed to move forward to get general delta enabled.
902 902 """
@@ -2796,6 +2796,12 b' default = false'
2796 2796
2797 2797 [[items]]
2798 2798 section = "blackbox"
2799 name = "debug.to-stderr"
2800 default = false
2801 in_core_extension = "blackbox"
2802
2803 [[items]]
2804 section = "blackbox"
2799 2805 name = "dirty"
2800 2806 default = false
2801 2807 in_core_extension = "blackbox"
@@ -662,7 +662,8 b' def make_peer('
662 662 return inst
663 663 except error.RepoError as httpexception:
664 664 try:
665 r = statichttprepo.make_peer(ui, b"static-" + path.loc, create)
665 path = path.copy(new_raw_location=b"static-" + path.rawloc)
666 r = statichttprepo.make_peer(ui, path, create)
666 667 ui.note(_(b'(falling back to static-http)\n'))
667 668 return r
668 669 except error.RepoError:
@@ -190,10 +190,9 b' def findglobaltags(ui, repo):'
190 190 _updatetags(cachetags, alltags)
191 191 return alltags
192 192
193 has_node = repo.changelog.index.has_node
193 194 for head in reversed(heads): # oldest to newest
194 assert repo.changelog.index.has_node(
195 head
196 ), b"tag cache returned bogus head %s" % short(head)
195 assert has_node(head), b"tag cache returned bogus head %s" % short(head)
197 196 fnodes = _filterfnodes(tagfnode, reversed(heads))
198 197 alltags = _tagsfromfnodes(ui, repo, fnodes)
199 198
@@ -910,3 +909,24 b' class hgtagsfnodescache:'
910 909 )
911 910 finally:
912 911 lock.release()
912
913
914 def clear_cache_on_disk(repo):
915 """function used by the perf extension to "tags" cache"""
916 repo.cachevfs.tryunlink(_filename(repo))
917
918
919 def clear_cache_fnodes(repo):
920 """function used by the perf extension to clear "file node cache"""
921 repo.cachevfs.tryunlink(_filename(repo))
922
923
924 def forget_fnodes(repo, revs):
925 """function used by the perf extension to prune some entries from the fnodes
926 cache"""
927 missing_1 = b'\xff' * 4
928 missing_2 = b'\xff' * 20
929 cache = hgtagsfnodescache(repo.unfiltered())
930 for r in revs:
931 cache._writeentry(r * _fnodesrecsize, missing_1, missing_2)
932 cache.write()
@@ -289,8 +289,7 b' class sharesafe(requirementformatvariant'
289 289
290 290 postdowngrademessage = _(
291 291 b'repository downgraded to not use share safe mode, '
292 b'existing shares will not work and needs to'
293 b' be reshared.'
292 b'existing shares will not work and need to be reshared.'
294 293 )
295 294
296 295 postupgrademessage = _(
@@ -359,7 +358,7 b' class copiessdc(requirementformatvariant'
359 358 description = _(b'Stores copies information alongside changesets.')
360 359
361 360 upgrademessage = _(
362 b'Allows to use more efficient algorithm to deal with ' b'copy tracing.'
361 b'Allows to use more efficient algorithm to deal with copy tracing.'
363 362 )
364 363
365 364 touches_filelogs = False
@@ -236,6 +236,16 b' impl Revlog {'
236 236 data_path: Option<&Path>,
237 237 use_nodemap: bool,
238 238 ) -> Result<Self, HgError> {
239 Self::open_gen(store_vfs, index_path, data_path, use_nodemap, None)
240 }
241
242 fn open_gen(
243 store_vfs: &Vfs,
244 index_path: impl AsRef<Path>,
245 data_path: Option<&Path>,
246 use_nodemap: bool,
247 nodemap_for_test: Option<nodemap::NodeTree>,
248 ) -> Result<Self, HgError> {
239 249 let index_path = index_path.as_ref();
240 250 let index = {
241 251 match store_vfs.mmap_open_opt(&index_path)? {
@@ -273,6 +283,8 b' impl Revlog {'
273 283 )
274 284 };
275 285
286 let nodemap = nodemap_for_test.or(nodemap);
287
276 288 Ok(Revlog {
277 289 index,
278 290 data_bytes,
@@ -306,23 +318,13 b' impl Revlog {'
306 318 &self,
307 319 node: NodePrefix,
308 320 ) -> Result<Revision, RevlogError> {
309 let looked_up = if let Some(nodemap) = &self.nodemap {
321 if let Some(nodemap) = &self.nodemap {
310 322 nodemap
311 323 .find_bin(&self.index, node)?
312 324 .ok_or(RevlogError::InvalidRevision)
313 325 } else {
314 326 self.rev_from_node_no_persistent_nodemap(node)
315 };
316
317 if node.is_prefix_of(&NULL_NODE) {
318 return match looked_up {
319 Ok(_) => Err(RevlogError::AmbiguousPrefix),
320 Err(RevlogError::InvalidRevision) => Ok(NULL_REVISION),
321 res => res,
322 };
323 };
324
325 looked_up
327 }
326 328 }
327 329
328 330 /// Same as `rev_from_node`, without using a persistent nodemap
@@ -338,17 +340,23 b' impl Revlog {'
338 340 // TODO: consider building a non-persistent nodemap in memory to
339 341 // optimize these cases.
340 342 let mut found_by_prefix = None;
341 for rev in (0..self.len()).rev() {
343 for rev in (-1..self.len() as BaseRevision).rev() {
342 344 let rev = Revision(rev as BaseRevision);
343 let index_entry = self.index.get_entry(rev).ok_or_else(|| {
344 HgError::corrupted(
345 "revlog references a revision not in the index",
346 )
347 })?;
348 if node == *index_entry.hash() {
345 let candidate_node = if rev == Revision(-1) {
346 NULL_NODE
347 } else {
348 let index_entry =
349 self.index.get_entry(rev).ok_or_else(|| {
350 HgError::corrupted(
351 "revlog references a revision not in the index",
352 )
353 })?;
354 *index_entry.hash()
355 };
356 if node == candidate_node {
349 357 return Ok(rev);
350 358 }
351 if node.is_prefix_of(index_entry.hash()) {
359 if node.is_prefix_of(&candidate_node) {
352 360 if found_by_prefix.is_some() {
353 361 return Err(RevlogError::AmbiguousPrefix);
354 362 }
@@ -913,7 +921,13 b' mod tests {'
913 921 .flatten()
914 922 .collect_vec();
915 923 std::fs::write(temp.path().join("foo.i"), contents).unwrap();
916 let revlog = Revlog::open(&vfs, "foo.i", None, false).unwrap();
924
925 let mut idx = nodemap::tests::TestNtIndex::new();
926 idx.insert_node(Revision(0), node0).unwrap();
927 idx.insert_node(Revision(1), node1).unwrap();
928
929 let revlog =
930 Revlog::open_gen(&vfs, "foo.i", None, true, Some(idx.nt)).unwrap();
917 931
918 932 // accessing the data shows the corruption
919 933 revlog.get_entry(0.into()).unwrap().data().unwrap_err();
@@ -693,7 +693,7 b' impl NodeMap for NodeTree {'
693 693 }
694 694
695 695 #[cfg(test)]
696 mod tests {
696 pub mod tests {
697 697 use super::NodeMapError::*;
698 698 use super::*;
699 699 use crate::revlog::node::{hex_pad_right, Node};
@@ -871,29 +871,36 b' mod tests {'
871 871 Ok(())
872 872 }
873 873
874 struct TestNtIndex {
875 index: TestIndex,
876 nt: NodeTree,
874 pub struct TestNtIndex {
875 pub index: TestIndex,
876 pub nt: NodeTree,
877 877 }
878 878
879 879 impl TestNtIndex {
880 fn new() -> Self {
880 pub fn new() -> Self {
881 881 TestNtIndex {
882 882 index: HashMap::new(),
883 883 nt: NodeTree::default(),
884 884 }
885 885 }
886 886
887 fn insert(&mut self, rev: i32, hex: &str) -> Result<(), NodeMapError> {
887 pub fn insert_node(
888 &mut self,
889 rev: Revision,
890 node: Node,
891 ) -> Result<(), NodeMapError> {
892 self.index.insert(rev.into(), node);
893 self.nt.insert(&self.index, &node, rev)?;
894 Ok(())
895 }
896
897 pub fn insert(
898 &mut self,
899 rev: Revision,
900 hex: &str,
901 ) -> Result<(), NodeMapError> {
888 902 let node = pad_node(hex);
889 let rev: UncheckedRevision = rev.into();
890 self.index.insert(rev, node);
891 self.nt.insert(
892 &self.index,
893 &node,
894 self.index.check_revision(rev).unwrap(),
895 )?;
896 Ok(())
903 return self.insert_node(rev, node);
897 904 }
898 905
899 906 fn find_hex(
@@ -927,23 +934,23 b' mod tests {'
927 934 #[test]
928 935 fn test_insert_full_mutable() -> Result<(), NodeMapError> {
929 936 let mut idx = TestNtIndex::new();
930 idx.insert(0, "1234")?;
937 idx.insert(Revision(0), "1234")?;
931 938 assert_eq!(idx.find_hex("1")?, Some(R!(0)));
932 939 assert_eq!(idx.find_hex("12")?, Some(R!(0)));
933 940
934 941 // let's trigger a simple split
935 idx.insert(1, "1a34")?;
942 idx.insert(Revision(1), "1a34")?;
936 943 assert_eq!(idx.nt.growable.len(), 1);
937 944 assert_eq!(idx.find_hex("12")?, Some(R!(0)));
938 945 assert_eq!(idx.find_hex("1a")?, Some(R!(1)));
939 946
940 947 // reinserting is a no_op
941 idx.insert(1, "1a34")?;
948 idx.insert(Revision(1), "1a34")?;
942 949 assert_eq!(idx.nt.growable.len(), 1);
943 950 assert_eq!(idx.find_hex("12")?, Some(R!(0)));
944 951 assert_eq!(idx.find_hex("1a")?, Some(R!(1)));
945 952
946 idx.insert(2, "1a01")?;
953 idx.insert(Revision(2), "1a01")?;
947 954 assert_eq!(idx.nt.growable.len(), 2);
948 955 assert_eq!(idx.find_hex("1a"), Err(NodeMapError::MultipleResults));
949 956 assert_eq!(idx.find_hex("12")?, Some(R!(0)));
@@ -952,7 +959,7 b' mod tests {'
952 959 assert_eq!(idx.find_hex("1a12")?, None);
953 960
954 961 // now let's make it split and create more than one additional block
955 idx.insert(3, "1a345")?;
962 idx.insert(Revision(3), "1a345")?;
956 963 assert_eq!(idx.nt.growable.len(), 4);
957 964 assert_eq!(idx.find_hex("1a340")?, Some(R!(1)));
958 965 assert_eq!(idx.find_hex("1a345")?, Some(R!(3)));
@@ -966,7 +973,7 b' mod tests {'
966 973 #[test]
967 974 fn test_unique_prefix_len_zero_prefix() {
968 975 let mut idx = TestNtIndex::new();
969 idx.insert(0, "00000abcd").unwrap();
976 idx.insert(Revision(0), "00000abcd").unwrap();
970 977
971 978 assert_eq!(idx.find_hex("000"), Err(NodeMapError::MultipleResults));
972 979 // in the nodetree proper, this will be found at the first nybble
@@ -976,7 +983,7 b' mod tests {'
976 983 assert_eq!(idx.unique_prefix_len_hex("00000ab"), Ok(Some(6)));
977 984
978 985 // same with odd result
979 idx.insert(1, "00123").unwrap();
986 idx.insert(Revision(1), "00123").unwrap();
980 987 assert_eq!(idx.unique_prefix_len_hex("001"), Ok(Some(3)));
981 988 assert_eq!(idx.unique_prefix_len_hex("0012"), Ok(Some(3)));
982 989
@@ -1012,10 +1019,10 b' mod tests {'
1012 1019 #[test]
1013 1020 fn test_insert_partly_immutable() -> Result<(), NodeMapError> {
1014 1021 let mut idx = TestNtIndex::new();
1015 idx.insert(0, "1234")?;
1016 idx.insert(1, "1235")?;
1017 idx.insert(2, "131")?;
1018 idx.insert(3, "cafe")?;
1022 idx.insert(Revision(0), "1234")?;
1023 idx.insert(Revision(1), "1235")?;
1024 idx.insert(Revision(2), "131")?;
1025 idx.insert(Revision(3), "cafe")?;
1019 1026 let mut idx = idx.commit();
1020 1027 assert_eq!(idx.find_hex("1234")?, Some(R!(0)));
1021 1028 assert_eq!(idx.find_hex("1235")?, Some(R!(1)));
@@ -1024,7 +1031,7 b' mod tests {'
1024 1031 // we did not add anything since init from readonly
1025 1032 assert_eq!(idx.nt.masked_readonly_blocks(), 0);
1026 1033
1027 idx.insert(4, "123A")?;
1034 idx.insert(Revision(4), "123A")?;
1028 1035 assert_eq!(idx.find_hex("1234")?, Some(R!(0)));
1029 1036 assert_eq!(idx.find_hex("1235")?, Some(R!(1)));
1030 1037 assert_eq!(idx.find_hex("131")?, Some(R!(2)));
@@ -1034,7 +1041,7 b' mod tests {'
1034 1041 assert_eq!(idx.nt.masked_readonly_blocks(), 4);
1035 1042
1036 1043 eprintln!("{:?}", idx.nt);
1037 idx.insert(5, "c0")?;
1044 idx.insert(Revision(5), "c0")?;
1038 1045 assert_eq!(idx.find_hex("cafe")?, Some(R!(3)));
1039 1046 assert_eq!(idx.find_hex("c0")?, Some(R!(5)));
1040 1047 assert_eq!(idx.find_hex("c1")?, None);
@@ -1049,10 +1056,10 b' mod tests {'
1049 1056 #[test]
1050 1057 fn test_invalidate_all() -> Result<(), NodeMapError> {
1051 1058 let mut idx = TestNtIndex::new();
1052 idx.insert(0, "1234")?;
1053 idx.insert(1, "1235")?;
1054 idx.insert(2, "131")?;
1055 idx.insert(3, "cafe")?;
1059 idx.insert(Revision(0), "1234")?;
1060 idx.insert(Revision(1), "1235")?;
1061 idx.insert(Revision(2), "131")?;
1062 idx.insert(Revision(3), "cafe")?;
1056 1063 let mut idx = idx.commit();
1057 1064
1058 1065 idx.nt.invalidate_all();
@@ -1079,9 +1086,9 b' mod tests {'
1079 1086 #[test]
1080 1087 fn test_into_added_bytes() -> Result<(), NodeMapError> {
1081 1088 let mut idx = TestNtIndex::new();
1082 idx.insert(0, "1234")?;
1089 idx.insert(Revision(0), "1234")?;
1083 1090 let mut idx = idx.commit();
1084 idx.insert(4, "cafe")?;
1091 idx.insert(Revision(4), "cafe")?;
1085 1092 let (_, bytes) = idx.nt.into_readonly_and_added_bytes();
1086 1093
1087 1094 // only the root block has been changed
@@ -194,7 +194,7 b' perfstatus'
194 194 benchmark the full generation of a stream clone
195 195 perf::stream-locked-section
196 196 benchmark the initial, repo-locked, section of a stream-clone
197 perf::tags (no help text available)
197 perf::tags Benchmark tags retrieval in various situation
198 198 perf::templating
199 199 test the rendering time of a given template
200 200 perf::unbundle
@@ -106,11 +106,6 b''
106 106 # Test that warning is displayed when the repo path is malformed
107 107
108 108 $ printf "asdas\0das" >> $CACHEDIR/repos
109 #if py311
110 $ hg gc
111 finished: removed 0 of 4 files (0.00 GB to 0.00 GB)
112 #else
113 109 $ hg gc
114 110 abort: invalid path asdas\x00da: .*(null|NULL).* (re)
115 111 [255]
116 #endif
@@ -27,6 +27,8 b' Finding root'
27 27 Reading and setting configuration
28 28 $ echo "[ui]" >> $HGRCPATH
29 29 $ echo "username = user1" >> $HGRCPATH
30 $ echo "[extensions]" >> $HGRCPATH
31 $ echo "sparse =" >> $HGRCPATH
30 32 $ $NO_FALLBACK rhg config ui.username
31 33 user1
32 34 $ echo "[ui]" >> .hg/hgrc
@@ -309,6 +311,11 b' Persistent nodemap'
309 311 .hg/store/00changelog.i
310 312 .hg/store/00changelog.n
311 313
314 Rhg status on a sparse repo with nodemap (this specific combination used to crash in 6.5.2)
315
316 $ hg debugsparse -X excluded-dir
317 $ $NO_FALLBACK rhg status
318
312 319 Specifying revisions by changeset ID
313 320 $ $NO_FALLBACK rhg files -r c3ae8dec9fad
314 321 of
@@ -470,7 +470,7 b' Test that downgrading works too'
470 470 (it is safe to interrupt this process any time before data migration completes)
471 471 upgrading repository requirements
472 472 removing temporary repository $TESTTMP/non-share-safe/.hg/upgrade.* (glob)
473 repository downgraded to not use share safe mode, existing shares will not work and needs to be reshared.
473 repository downgraded to not use share safe mode, existing shares will not work and need to be reshared.
474 474
475 475 $ hg debugrequirements
476 476 dotencode
@@ -148,9 +148,17 b' test with empty repo (issue965)'
148 148 $ hg paths
149 149 default = static-http://localhost:$HGPORT/remotempty
150 150
151 test autodetecting static-http: scheme (issue6833)
152
153 $ cd ..
154 $ hg init actually-static
155 $ hg clone http://localhost:$HGPORT/actually-static local4
156 no changes found
157 updating to branch default
158 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
159
151 160 test with non-repo
152 161
153 $ cd ..
154 162 $ mkdir notarepo
155 163 $ hg clone static-http://localhost:$HGPORT/notarepo local3
156 164 abort: 'http://localhost:$HGPORT/notarepo' does not appear to be an hg repository
@@ -225,6 +233,15 b' List of files accessed over HTTP:'
225 233 /.hg/store/data/~2ehgsub.i (py37 !)
226 234 /.hg/store/data/~2ehgsubstate.i (py37 !)
227 235 /.hg/store/requires
236 /actually-static/.hg/bookmarks
237 /actually-static/.hg/bookmarks.current
238 /actually-static/.hg/dirstate
239 /actually-static/.hg/requires
240 /actually-static/.hg/store/00changelog.i
241 /actually-static/.hg/store/00manifest.i
242 /actually-static/.hg/store/requires
243 /actually-static/?cmd=capabilities
244 /actually-static?cmd=capabilities
228 245 /notarepo/.hg/00changelog.i
229 246 /notarepo/.hg/requires
230 247 /remote-with-names/.hg/bookmarks
General Comments 0
You need to be logged in to leave comments. Login now